File: | src/gnu/usr.bin/clang/libLLVM/../../../llvm/llvm/include/llvm/CodeGen/SelectionDAGNodes.h |
Warning: | line 1142, column 10 Called C++ object pointer is null |
Press '?' to see keyboard shortcuts
Keyboard shortcuts:
1 | //==--- InstrEmitter.cpp - Emit MachineInstrs for the SelectionDAG class ---==// | |||
2 | // | |||
3 | // Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions. | |||
4 | // See https://llvm.org/LICENSE.txt for license information. | |||
5 | // SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception | |||
6 | // | |||
7 | //===----------------------------------------------------------------------===// | |||
8 | // | |||
9 | // This implements the Emit routines for the SelectionDAG class, which creates | |||
10 | // MachineInstrs based on the decisions of the SelectionDAG instruction | |||
11 | // selection. | |||
12 | // | |||
13 | //===----------------------------------------------------------------------===// | |||
14 | ||||
15 | #include "InstrEmitter.h" | |||
16 | #include "SDNodeDbgValue.h" | |||
17 | #include "llvm/ADT/Statistic.h" | |||
18 | #include "llvm/CodeGen/MachineConstantPool.h" | |||
19 | #include "llvm/CodeGen/MachineFunction.h" | |||
20 | #include "llvm/CodeGen/MachineInstrBuilder.h" | |||
21 | #include "llvm/CodeGen/MachineRegisterInfo.h" | |||
22 | #include "llvm/CodeGen/SelectionDAG.h" | |||
23 | #include "llvm/CodeGen/StackMaps.h" | |||
24 | #include "llvm/CodeGen/TargetInstrInfo.h" | |||
25 | #include "llvm/CodeGen/TargetLowering.h" | |||
26 | #include "llvm/CodeGen/TargetSubtargetInfo.h" | |||
27 | #include "llvm/IR/DataLayout.h" | |||
28 | #include "llvm/IR/DebugInfo.h" | |||
29 | #include "llvm/IR/PseudoProbe.h" | |||
30 | #include "llvm/Support/Debug.h" | |||
31 | #include "llvm/Support/ErrorHandling.h" | |||
32 | #include "llvm/Support/MathExtras.h" | |||
33 | #include "llvm/Target/TargetMachine.h" | |||
34 | using namespace llvm; | |||
35 | ||||
36 | #define DEBUG_TYPE"instr-emitter" "instr-emitter" | |||
37 | ||||
38 | /// MinRCSize - Smallest register class we allow when constraining virtual | |||
39 | /// registers. If satisfying all register class constraints would require | |||
40 | /// using a smaller register class, emit a COPY to a new virtual register | |||
41 | /// instead. | |||
42 | const unsigned MinRCSize = 4; | |||
43 | ||||
44 | /// CountResults - The results of target nodes have register or immediate | |||
45 | /// operands first, then an optional chain, and optional glue operands (which do | |||
46 | /// not go into the resulting MachineInstr). | |||
47 | unsigned InstrEmitter::CountResults(SDNode *Node) { | |||
48 | unsigned N = Node->getNumValues(); | |||
49 | while (N && Node->getValueType(N - 1) == MVT::Glue) | |||
50 | --N; | |||
51 | if (N && Node->getValueType(N - 1) == MVT::Other) | |||
52 | --N; // Skip over chain result. | |||
53 | return N; | |||
54 | } | |||
55 | ||||
56 | /// countOperands - The inputs to target nodes have any actual inputs first, | |||
57 | /// followed by an optional chain operand, then an optional glue operand. | |||
58 | /// Compute the number of actual operands that will go into the resulting | |||
59 | /// MachineInstr. | |||
60 | /// | |||
61 | /// Also count physreg RegisterSDNode and RegisterMaskSDNode operands preceding | |||
62 | /// the chain and glue. These operands may be implicit on the machine instr. | |||
63 | static unsigned countOperands(SDNode *Node, unsigned NumExpUses, | |||
64 | unsigned &NumImpUses) { | |||
65 | unsigned N = Node->getNumOperands(); | |||
66 | while (N && Node->getOperand(N - 1).getValueType() == MVT::Glue) | |||
67 | --N; | |||
68 | if (N && Node->getOperand(N - 1).getValueType() == MVT::Other) | |||
69 | --N; // Ignore chain if it exists. | |||
70 | ||||
71 | // Count RegisterSDNode and RegisterMaskSDNode operands for NumImpUses. | |||
72 | NumImpUses = N - NumExpUses; | |||
73 | for (unsigned I = N; I > NumExpUses; --I) { | |||
74 | if (isa<RegisterMaskSDNode>(Node->getOperand(I - 1))) | |||
75 | continue; | |||
76 | if (RegisterSDNode *RN = dyn_cast<RegisterSDNode>(Node->getOperand(I - 1))) | |||
77 | if (Register::isPhysicalRegister(RN->getReg())) | |||
78 | continue; | |||
79 | NumImpUses = N - I; | |||
80 | break; | |||
81 | } | |||
82 | ||||
83 | return N; | |||
84 | } | |||
85 | ||||
86 | /// EmitCopyFromReg - Generate machine code for an CopyFromReg node or an | |||
87 | /// implicit physical register output. | |||
88 | void InstrEmitter:: | |||
89 | EmitCopyFromReg(SDNode *Node, unsigned ResNo, bool IsClone, bool IsCloned, | |||
90 | Register SrcReg, DenseMap<SDValue, Register> &VRBaseMap) { | |||
91 | Register VRBase; | |||
92 | if (SrcReg.isVirtual()) { | |||
93 | // Just use the input register directly! | |||
94 | SDValue Op(Node, ResNo); | |||
95 | if (IsClone) | |||
96 | VRBaseMap.erase(Op); | |||
97 | bool isNew = VRBaseMap.insert(std::make_pair(Op, SrcReg)).second; | |||
98 | (void)isNew; // Silence compiler warning. | |||
99 | assert(isNew && "Node emitted out of order - early")((void)0); | |||
100 | return; | |||
101 | } | |||
102 | ||||
103 | // If the node is only used by a CopyToReg and the dest reg is a vreg, use | |||
104 | // the CopyToReg'd destination register instead of creating a new vreg. | |||
105 | bool MatchReg = true; | |||
106 | const TargetRegisterClass *UseRC = nullptr; | |||
107 | MVT VT = Node->getSimpleValueType(ResNo); | |||
108 | ||||
109 | // Stick to the preferred register classes for legal types. | |||
110 | if (TLI->isTypeLegal(VT)) | |||
111 | UseRC = TLI->getRegClassFor(VT, Node->isDivergent()); | |||
112 | ||||
113 | if (!IsClone && !IsCloned) | |||
114 | for (SDNode *User : Node->uses()) { | |||
115 | bool Match = true; | |||
116 | if (User->getOpcode() == ISD::CopyToReg && | |||
117 | User->getOperand(2).getNode() == Node && | |||
118 | User->getOperand(2).getResNo() == ResNo) { | |||
119 | Register DestReg = cast<RegisterSDNode>(User->getOperand(1))->getReg(); | |||
120 | if (DestReg.isVirtual()) { | |||
121 | VRBase = DestReg; | |||
122 | Match = false; | |||
123 | } else if (DestReg != SrcReg) | |||
124 | Match = false; | |||
125 | } else { | |||
126 | for (unsigned i = 0, e = User->getNumOperands(); i != e; ++i) { | |||
127 | SDValue Op = User->getOperand(i); | |||
128 | if (Op.getNode() != Node || Op.getResNo() != ResNo) | |||
129 | continue; | |||
130 | MVT VT = Node->getSimpleValueType(Op.getResNo()); | |||
131 | if (VT == MVT::Other || VT == MVT::Glue) | |||
132 | continue; | |||
133 | Match = false; | |||
134 | if (User->isMachineOpcode()) { | |||
135 | const MCInstrDesc &II = TII->get(User->getMachineOpcode()); | |||
136 | const TargetRegisterClass *RC = nullptr; | |||
137 | if (i+II.getNumDefs() < II.getNumOperands()) { | |||
138 | RC = TRI->getAllocatableClass( | |||
139 | TII->getRegClass(II, i+II.getNumDefs(), TRI, *MF)); | |||
140 | } | |||
141 | if (!UseRC) | |||
142 | UseRC = RC; | |||
143 | else if (RC) { | |||
144 | const TargetRegisterClass *ComRC = | |||
145 | TRI->getCommonSubClass(UseRC, RC); | |||
146 | // If multiple uses expect disjoint register classes, we emit | |||
147 | // copies in AddRegisterOperand. | |||
148 | if (ComRC) | |||
149 | UseRC = ComRC; | |||
150 | } | |||
151 | } | |||
152 | } | |||
153 | } | |||
154 | MatchReg &= Match; | |||
155 | if (VRBase) | |||
156 | break; | |||
157 | } | |||
158 | ||||
159 | const TargetRegisterClass *SrcRC = nullptr, *DstRC = nullptr; | |||
160 | SrcRC = TRI->getMinimalPhysRegClass(SrcReg, VT); | |||
161 | ||||
162 | // Figure out the register class to create for the destreg. | |||
163 | if (VRBase) { | |||
164 | DstRC = MRI->getRegClass(VRBase); | |||
165 | } else if (UseRC) { | |||
166 | assert(TRI->isTypeLegalForClass(*UseRC, VT) &&((void)0) | |||
167 | "Incompatible phys register def and uses!")((void)0); | |||
168 | DstRC = UseRC; | |||
169 | } else | |||
170 | DstRC = SrcRC; | |||
171 | ||||
172 | // If all uses are reading from the src physical register and copying the | |||
173 | // register is either impossible or very expensive, then don't create a copy. | |||
174 | if (MatchReg && SrcRC->getCopyCost() < 0) { | |||
175 | VRBase = SrcReg; | |||
176 | } else { | |||
177 | // Create the reg, emit the copy. | |||
178 | VRBase = MRI->createVirtualRegister(DstRC); | |||
179 | BuildMI(*MBB, InsertPos, Node->getDebugLoc(), TII->get(TargetOpcode::COPY), | |||
180 | VRBase).addReg(SrcReg); | |||
181 | } | |||
182 | ||||
183 | SDValue Op(Node, ResNo); | |||
184 | if (IsClone) | |||
185 | VRBaseMap.erase(Op); | |||
186 | bool isNew = VRBaseMap.insert(std::make_pair(Op, VRBase)).second; | |||
187 | (void)isNew; // Silence compiler warning. | |||
188 | assert(isNew && "Node emitted out of order - early")((void)0); | |||
189 | } | |||
190 | ||||
191 | void InstrEmitter::CreateVirtualRegisters(SDNode *Node, | |||
192 | MachineInstrBuilder &MIB, | |||
193 | const MCInstrDesc &II, | |||
194 | bool IsClone, bool IsCloned, | |||
195 | DenseMap<SDValue, Register> &VRBaseMap) { | |||
196 | assert(Node->getMachineOpcode() != TargetOpcode::IMPLICIT_DEF &&((void)0) | |||
197 | "IMPLICIT_DEF should have been handled as a special case elsewhere!")((void)0); | |||
198 | ||||
199 | unsigned NumResults = CountResults(Node); | |||
200 | bool HasVRegVariadicDefs = !MF->getTarget().usesPhysRegsForValues() && | |||
201 | II.isVariadic() && II.variadicOpsAreDefs(); | |||
202 | unsigned NumVRegs = HasVRegVariadicDefs ? NumResults : II.getNumDefs(); | |||
203 | if (Node->getMachineOpcode() == TargetOpcode::STATEPOINT) | |||
204 | NumVRegs = NumResults; | |||
205 | for (unsigned i = 0; i < NumVRegs; ++i) { | |||
206 | // If the specific node value is only used by a CopyToReg and the dest reg | |||
207 | // is a vreg in the same register class, use the CopyToReg'd destination | |||
208 | // register instead of creating a new vreg. | |||
209 | Register VRBase; | |||
210 | const TargetRegisterClass *RC = | |||
211 | TRI->getAllocatableClass(TII->getRegClass(II, i, TRI, *MF)); | |||
212 | // Always let the value type influence the used register class. The | |||
213 | // constraints on the instruction may be too lax to represent the value | |||
214 | // type correctly. For example, a 64-bit float (X86::FR64) can't live in | |||
215 | // the 32-bit float super-class (X86::FR32). | |||
216 | if (i < NumResults && TLI->isTypeLegal(Node->getSimpleValueType(i))) { | |||
217 | const TargetRegisterClass *VTRC = TLI->getRegClassFor( | |||
218 | Node->getSimpleValueType(i), | |||
219 | (Node->isDivergent() || (RC && TRI->isDivergentRegClass(RC)))); | |||
220 | if (RC) | |||
221 | VTRC = TRI->getCommonSubClass(RC, VTRC); | |||
222 | if (VTRC) | |||
223 | RC = VTRC; | |||
224 | } | |||
225 | ||||
226 | if (II.OpInfo != nullptr && II.OpInfo[i].isOptionalDef()) { | |||
227 | // Optional def must be a physical register. | |||
228 | VRBase = cast<RegisterSDNode>(Node->getOperand(i-NumResults))->getReg(); | |||
229 | assert(VRBase.isPhysical())((void)0); | |||
230 | MIB.addReg(VRBase, RegState::Define); | |||
231 | } | |||
232 | ||||
233 | if (!VRBase && !IsClone && !IsCloned) | |||
234 | for (SDNode *User : Node->uses()) { | |||
235 | if (User->getOpcode() == ISD::CopyToReg && | |||
236 | User->getOperand(2).getNode() == Node && | |||
237 | User->getOperand(2).getResNo() == i) { | |||
238 | unsigned Reg = cast<RegisterSDNode>(User->getOperand(1))->getReg(); | |||
239 | if (Register::isVirtualRegister(Reg)) { | |||
240 | const TargetRegisterClass *RegRC = MRI->getRegClass(Reg); | |||
241 | if (RegRC == RC) { | |||
242 | VRBase = Reg; | |||
243 | MIB.addReg(VRBase, RegState::Define); | |||
244 | break; | |||
245 | } | |||
246 | } | |||
247 | } | |||
248 | } | |||
249 | ||||
250 | // Create the result registers for this node and add the result regs to | |||
251 | // the machine instruction. | |||
252 | if (VRBase == 0) { | |||
253 | assert(RC && "Isn't a register operand!")((void)0); | |||
254 | VRBase = MRI->createVirtualRegister(RC); | |||
255 | MIB.addReg(VRBase, RegState::Define); | |||
256 | } | |||
257 | ||||
258 | // If this def corresponds to a result of the SDNode insert the VRBase into | |||
259 | // the lookup map. | |||
260 | if (i < NumResults) { | |||
261 | SDValue Op(Node, i); | |||
262 | if (IsClone) | |||
263 | VRBaseMap.erase(Op); | |||
264 | bool isNew = VRBaseMap.insert(std::make_pair(Op, VRBase)).second; | |||
265 | (void)isNew; // Silence compiler warning. | |||
266 | assert(isNew && "Node emitted out of order - early")((void)0); | |||
267 | } | |||
268 | } | |||
269 | } | |||
270 | ||||
271 | /// getVR - Return the virtual register corresponding to the specified result | |||
272 | /// of the specified node. | |||
273 | Register InstrEmitter::getVR(SDValue Op, | |||
274 | DenseMap<SDValue, Register> &VRBaseMap) { | |||
275 | if (Op.isMachineOpcode() && | |||
276 | Op.getMachineOpcode() == TargetOpcode::IMPLICIT_DEF) { | |||
277 | // Add an IMPLICIT_DEF instruction before every use. | |||
278 | // IMPLICIT_DEF can produce any type of result so its MCInstrDesc | |||
279 | // does not include operand register class info. | |||
280 | const TargetRegisterClass *RC = TLI->getRegClassFor( | |||
281 | Op.getSimpleValueType(), Op.getNode()->isDivergent()); | |||
282 | Register VReg = MRI->createVirtualRegister(RC); | |||
283 | BuildMI(*MBB, InsertPos, Op.getDebugLoc(), | |||
284 | TII->get(TargetOpcode::IMPLICIT_DEF), VReg); | |||
285 | return VReg; | |||
286 | } | |||
287 | ||||
288 | DenseMap<SDValue, Register>::iterator I = VRBaseMap.find(Op); | |||
289 | assert(I != VRBaseMap.end() && "Node emitted out of order - late")((void)0); | |||
290 | return I->second; | |||
291 | } | |||
292 | ||||
293 | ||||
294 | /// AddRegisterOperand - Add the specified register as an operand to the | |||
295 | /// specified machine instr. Insert register copies if the register is | |||
296 | /// not in the required register class. | |||
297 | void | |||
298 | InstrEmitter::AddRegisterOperand(MachineInstrBuilder &MIB, | |||
299 | SDValue Op, | |||
300 | unsigned IIOpNum, | |||
301 | const MCInstrDesc *II, | |||
302 | DenseMap<SDValue, Register> &VRBaseMap, | |||
303 | bool IsDebug, bool IsClone, bool IsCloned) { | |||
304 | assert(Op.getValueType() != MVT::Other &&((void)0) | |||
305 | Op.getValueType() != MVT::Glue &&((void)0) | |||
306 | "Chain and glue operands should occur at end of operand list!")((void)0); | |||
307 | // Get/emit the operand. | |||
308 | Register VReg = getVR(Op, VRBaseMap); | |||
309 | ||||
310 | const MCInstrDesc &MCID = MIB->getDesc(); | |||
311 | bool isOptDef = IIOpNum < MCID.getNumOperands() && | |||
312 | MCID.OpInfo[IIOpNum].isOptionalDef(); | |||
313 | ||||
314 | // If the instruction requires a register in a different class, create | |||
315 | // a new virtual register and copy the value into it, but first attempt to | |||
316 | // shrink VReg's register class within reason. For example, if VReg == GR32 | |||
317 | // and II requires a GR32_NOSP, just constrain VReg to GR32_NOSP. | |||
318 | if (II) { | |||
319 | const TargetRegisterClass *OpRC = nullptr; | |||
320 | if (IIOpNum < II->getNumOperands()) | |||
321 | OpRC = TII->getRegClass(*II, IIOpNum, TRI, *MF); | |||
322 | ||||
323 | if (OpRC) { | |||
324 | const TargetRegisterClass *ConstrainedRC | |||
325 | = MRI->constrainRegClass(VReg, OpRC, MinRCSize); | |||
326 | if (!ConstrainedRC) { | |||
327 | OpRC = TRI->getAllocatableClass(OpRC); | |||
328 | assert(OpRC && "Constraints cannot be fulfilled for allocation")((void)0); | |||
329 | Register NewVReg = MRI->createVirtualRegister(OpRC); | |||
330 | BuildMI(*MBB, InsertPos, Op.getNode()->getDebugLoc(), | |||
331 | TII->get(TargetOpcode::COPY), NewVReg).addReg(VReg); | |||
332 | VReg = NewVReg; | |||
333 | } else { | |||
334 | assert(ConstrainedRC->isAllocatable() &&((void)0) | |||
335 | "Constraining an allocatable VReg produced an unallocatable class?")((void)0); | |||
336 | } | |||
337 | } | |||
338 | } | |||
339 | ||||
340 | // If this value has only one use, that use is a kill. This is a | |||
341 | // conservative approximation. InstrEmitter does trivial coalescing | |||
342 | // with CopyFromReg nodes, so don't emit kill flags for them. | |||
343 | // Avoid kill flags on Schedule cloned nodes, since there will be | |||
344 | // multiple uses. | |||
345 | // Tied operands are never killed, so we need to check that. And that | |||
346 | // means we need to determine the index of the operand. | |||
347 | bool isKill = Op.hasOneUse() && | |||
348 | Op.getNode()->getOpcode() != ISD::CopyFromReg && | |||
349 | !IsDebug && | |||
350 | !(IsClone || IsCloned); | |||
351 | if (isKill) { | |||
352 | unsigned Idx = MIB->getNumOperands(); | |||
353 | while (Idx > 0 && | |||
354 | MIB->getOperand(Idx-1).isReg() && | |||
355 | MIB->getOperand(Idx-1).isImplicit()) | |||
356 | --Idx; | |||
357 | bool isTied = MCID.getOperandConstraint(Idx, MCOI::TIED_TO) != -1; | |||
358 | if (isTied) | |||
359 | isKill = false; | |||
360 | } | |||
361 | ||||
362 | MIB.addReg(VReg, getDefRegState(isOptDef) | getKillRegState(isKill) | | |||
363 | getDebugRegState(IsDebug)); | |||
364 | } | |||
365 | ||||
366 | /// AddOperand - Add the specified operand to the specified machine instr. II | |||
367 | /// specifies the instruction information for the node, and IIOpNum is the | |||
368 | /// operand number (in the II) that we are adding. | |||
369 | void InstrEmitter::AddOperand(MachineInstrBuilder &MIB, | |||
370 | SDValue Op, | |||
371 | unsigned IIOpNum, | |||
372 | const MCInstrDesc *II, | |||
373 | DenseMap<SDValue, Register> &VRBaseMap, | |||
374 | bool IsDebug, bool IsClone, bool IsCloned) { | |||
375 | if (Op.isMachineOpcode()) { | |||
376 | AddRegisterOperand(MIB, Op, IIOpNum, II, VRBaseMap, | |||
377 | IsDebug, IsClone, IsCloned); | |||
378 | } else if (ConstantSDNode *C = dyn_cast<ConstantSDNode>(Op)) { | |||
379 | MIB.addImm(C->getSExtValue()); | |||
380 | } else if (ConstantFPSDNode *F = dyn_cast<ConstantFPSDNode>(Op)) { | |||
381 | MIB.addFPImm(F->getConstantFPValue()); | |||
382 | } else if (RegisterSDNode *R = dyn_cast<RegisterSDNode>(Op)) { | |||
383 | Register VReg = R->getReg(); | |||
384 | MVT OpVT = Op.getSimpleValueType(); | |||
385 | const TargetRegisterClass *IIRC = | |||
386 | II ? TRI->getAllocatableClass(TII->getRegClass(*II, IIOpNum, TRI, *MF)) | |||
387 | : nullptr; | |||
388 | const TargetRegisterClass *OpRC = | |||
389 | TLI->isTypeLegal(OpVT) | |||
390 | ? TLI->getRegClassFor(OpVT, | |||
391 | Op.getNode()->isDivergent() || | |||
392 | (IIRC && TRI->isDivergentRegClass(IIRC))) | |||
393 | : nullptr; | |||
394 | ||||
395 | if (OpRC && IIRC && OpRC != IIRC && Register::isVirtualRegister(VReg)) { | |||
396 | Register NewVReg = MRI->createVirtualRegister(IIRC); | |||
397 | BuildMI(*MBB, InsertPos, Op.getNode()->getDebugLoc(), | |||
398 | TII->get(TargetOpcode::COPY), NewVReg).addReg(VReg); | |||
399 | VReg = NewVReg; | |||
400 | } | |||
401 | // Turn additional physreg operands into implicit uses on non-variadic | |||
402 | // instructions. This is used by call and return instructions passing | |||
403 | // arguments in registers. | |||
404 | bool Imp = II && (IIOpNum >= II->getNumOperands() && !II->isVariadic()); | |||
405 | MIB.addReg(VReg, getImplRegState(Imp)); | |||
406 | } else if (RegisterMaskSDNode *RM = dyn_cast<RegisterMaskSDNode>(Op)) { | |||
407 | MIB.addRegMask(RM->getRegMask()); | |||
408 | } else if (GlobalAddressSDNode *TGA = dyn_cast<GlobalAddressSDNode>(Op)) { | |||
409 | MIB.addGlobalAddress(TGA->getGlobal(), TGA->getOffset(), | |||
410 | TGA->getTargetFlags()); | |||
411 | } else if (BasicBlockSDNode *BBNode = dyn_cast<BasicBlockSDNode>(Op)) { | |||
412 | MIB.addMBB(BBNode->getBasicBlock()); | |||
413 | } else if (FrameIndexSDNode *FI = dyn_cast<FrameIndexSDNode>(Op)) { | |||
414 | MIB.addFrameIndex(FI->getIndex()); | |||
415 | } else if (JumpTableSDNode *JT = dyn_cast<JumpTableSDNode>(Op)) { | |||
416 | MIB.addJumpTableIndex(JT->getIndex(), JT->getTargetFlags()); | |||
417 | } else if (ConstantPoolSDNode *CP = dyn_cast<ConstantPoolSDNode>(Op)) { | |||
418 | int Offset = CP->getOffset(); | |||
419 | Align Alignment = CP->getAlign(); | |||
420 | ||||
421 | unsigned Idx; | |||
422 | MachineConstantPool *MCP = MF->getConstantPool(); | |||
423 | if (CP->isMachineConstantPoolEntry()) | |||
424 | Idx = MCP->getConstantPoolIndex(CP->getMachineCPVal(), Alignment); | |||
425 | else | |||
426 | Idx = MCP->getConstantPoolIndex(CP->getConstVal(), Alignment); | |||
427 | MIB.addConstantPoolIndex(Idx, Offset, CP->getTargetFlags()); | |||
428 | } else if (ExternalSymbolSDNode *ES = dyn_cast<ExternalSymbolSDNode>(Op)) { | |||
429 | MIB.addExternalSymbol(ES->getSymbol(), ES->getTargetFlags()); | |||
430 | } else if (auto *SymNode = dyn_cast<MCSymbolSDNode>(Op)) { | |||
431 | MIB.addSym(SymNode->getMCSymbol()); | |||
432 | } else if (BlockAddressSDNode *BA = dyn_cast<BlockAddressSDNode>(Op)) { | |||
433 | MIB.addBlockAddress(BA->getBlockAddress(), | |||
434 | BA->getOffset(), | |||
435 | BA->getTargetFlags()); | |||
436 | } else if (TargetIndexSDNode *TI = dyn_cast<TargetIndexSDNode>(Op)) { | |||
437 | MIB.addTargetIndex(TI->getIndex(), TI->getOffset(), TI->getTargetFlags()); | |||
438 | } else { | |||
439 | assert(Op.getValueType() != MVT::Other &&((void)0) | |||
440 | Op.getValueType() != MVT::Glue &&((void)0) | |||
441 | "Chain and glue operands should occur at end of operand list!")((void)0); | |||
442 | AddRegisterOperand(MIB, Op, IIOpNum, II, VRBaseMap, | |||
443 | IsDebug, IsClone, IsCloned); | |||
444 | } | |||
445 | } | |||
446 | ||||
447 | Register InstrEmitter::ConstrainForSubReg(Register VReg, unsigned SubIdx, | |||
448 | MVT VT, bool isDivergent, const DebugLoc &DL) { | |||
449 | const TargetRegisterClass *VRC = MRI->getRegClass(VReg); | |||
450 | const TargetRegisterClass *RC = TRI->getSubClassWithSubReg(VRC, SubIdx); | |||
451 | ||||
452 | // RC is a sub-class of VRC that supports SubIdx. Try to constrain VReg | |||
453 | // within reason. | |||
454 | if (RC && RC != VRC) | |||
455 | RC = MRI->constrainRegClass(VReg, RC, MinRCSize); | |||
456 | ||||
457 | // VReg has been adjusted. It can be used with SubIdx operands now. | |||
458 | if (RC) | |||
459 | return VReg; | |||
460 | ||||
461 | // VReg couldn't be reasonably constrained. Emit a COPY to a new virtual | |||
462 | // register instead. | |||
463 | RC = TRI->getSubClassWithSubReg(TLI->getRegClassFor(VT, isDivergent), SubIdx); | |||
464 | assert(RC && "No legal register class for VT supports that SubIdx")((void)0); | |||
465 | Register NewReg = MRI->createVirtualRegister(RC); | |||
466 | BuildMI(*MBB, InsertPos, DL, TII->get(TargetOpcode::COPY), NewReg) | |||
467 | .addReg(VReg); | |||
468 | return NewReg; | |||
469 | } | |||
470 | ||||
471 | /// EmitSubregNode - Generate machine code for subreg nodes. | |||
472 | /// | |||
473 | void InstrEmitter::EmitSubregNode(SDNode *Node, | |||
474 | DenseMap<SDValue, Register> &VRBaseMap, | |||
475 | bool IsClone, bool IsCloned) { | |||
476 | Register VRBase; | |||
477 | unsigned Opc = Node->getMachineOpcode(); | |||
478 | ||||
479 | // If the node is only used by a CopyToReg and the dest reg is a vreg, use | |||
480 | // the CopyToReg'd destination register instead of creating a new vreg. | |||
481 | for (SDNode *User : Node->uses()) { | |||
482 | if (User->getOpcode() == ISD::CopyToReg && | |||
483 | User->getOperand(2).getNode() == Node) { | |||
484 | Register DestReg = cast<RegisterSDNode>(User->getOperand(1))->getReg(); | |||
485 | if (DestReg.isVirtual()) { | |||
486 | VRBase = DestReg; | |||
487 | break; | |||
488 | } | |||
489 | } | |||
490 | } | |||
491 | ||||
492 | if (Opc == TargetOpcode::EXTRACT_SUBREG) { | |||
493 | // EXTRACT_SUBREG is lowered as %dst = COPY %src:sub. There are no | |||
494 | // constraints on the %dst register, COPY can target all legal register | |||
495 | // classes. | |||
496 | unsigned SubIdx = cast<ConstantSDNode>(Node->getOperand(1))->getZExtValue(); | |||
497 | const TargetRegisterClass *TRC = | |||
498 | TLI->getRegClassFor(Node->getSimpleValueType(0), Node->isDivergent()); | |||
499 | ||||
500 | Register Reg; | |||
501 | MachineInstr *DefMI; | |||
502 | RegisterSDNode *R = dyn_cast<RegisterSDNode>(Node->getOperand(0)); | |||
503 | if (R && Register::isPhysicalRegister(R->getReg())) { | |||
504 | Reg = R->getReg(); | |||
505 | DefMI = nullptr; | |||
506 | } else { | |||
507 | Reg = R ? R->getReg() : getVR(Node->getOperand(0), VRBaseMap); | |||
508 | DefMI = MRI->getVRegDef(Reg); | |||
509 | } | |||
510 | ||||
511 | Register SrcReg, DstReg; | |||
512 | unsigned DefSubIdx; | |||
513 | if (DefMI && | |||
514 | TII->isCoalescableExtInstr(*DefMI, SrcReg, DstReg, DefSubIdx) && | |||
515 | SubIdx == DefSubIdx && | |||
516 | TRC == MRI->getRegClass(SrcReg)) { | |||
517 | // Optimize these: | |||
518 | // r1025 = s/zext r1024, 4 | |||
519 | // r1026 = extract_subreg r1025, 4 | |||
520 | // to a copy | |||
521 | // r1026 = copy r1024 | |||
522 | VRBase = MRI->createVirtualRegister(TRC); | |||
523 | BuildMI(*MBB, InsertPos, Node->getDebugLoc(), | |||
524 | TII->get(TargetOpcode::COPY), VRBase).addReg(SrcReg); | |||
525 | MRI->clearKillFlags(SrcReg); | |||
526 | } else { | |||
527 | // Reg may not support a SubIdx sub-register, and we may need to | |||
528 | // constrain its register class or issue a COPY to a compatible register | |||
529 | // class. | |||
530 | if (Reg.isVirtual()) | |||
531 | Reg = ConstrainForSubReg(Reg, SubIdx, | |||
532 | Node->getOperand(0).getSimpleValueType(), | |||
533 | Node->isDivergent(), Node->getDebugLoc()); | |||
534 | // Create the destreg if it is missing. | |||
535 | if (!VRBase) | |||
536 | VRBase = MRI->createVirtualRegister(TRC); | |||
537 | ||||
538 | // Create the extract_subreg machine instruction. | |||
539 | MachineInstrBuilder CopyMI = | |||
540 | BuildMI(*MBB, InsertPos, Node->getDebugLoc(), | |||
541 | TII->get(TargetOpcode::COPY), VRBase); | |||
542 | if (Reg.isVirtual()) | |||
543 | CopyMI.addReg(Reg, 0, SubIdx); | |||
544 | else | |||
545 | CopyMI.addReg(TRI->getSubReg(Reg, SubIdx)); | |||
546 | } | |||
547 | } else if (Opc == TargetOpcode::INSERT_SUBREG || | |||
548 | Opc == TargetOpcode::SUBREG_TO_REG) { | |||
549 | SDValue N0 = Node->getOperand(0); | |||
550 | SDValue N1 = Node->getOperand(1); | |||
551 | SDValue N2 = Node->getOperand(2); | |||
552 | unsigned SubIdx = cast<ConstantSDNode>(N2)->getZExtValue(); | |||
553 | ||||
554 | // Figure out the register class to create for the destreg. It should be | |||
555 | // the largest legal register class supporting SubIdx sub-registers. | |||
556 | // RegisterCoalescer will constrain it further if it decides to eliminate | |||
557 | // the INSERT_SUBREG instruction. | |||
558 | // | |||
559 | // %dst = INSERT_SUBREG %src, %sub, SubIdx | |||
560 | // | |||
561 | // is lowered by TwoAddressInstructionPass to: | |||
562 | // | |||
563 | // %dst = COPY %src | |||
564 | // %dst:SubIdx = COPY %sub | |||
565 | // | |||
566 | // There is no constraint on the %src register class. | |||
567 | // | |||
568 | const TargetRegisterClass *SRC = | |||
569 | TLI->getRegClassFor(Node->getSimpleValueType(0), Node->isDivergent()); | |||
570 | SRC = TRI->getSubClassWithSubReg(SRC, SubIdx); | |||
571 | assert(SRC && "No register class supports VT and SubIdx for INSERT_SUBREG")((void)0); | |||
572 | ||||
573 | if (VRBase == 0 || !SRC->hasSubClassEq(MRI->getRegClass(VRBase))) | |||
574 | VRBase = MRI->createVirtualRegister(SRC); | |||
575 | ||||
576 | // Create the insert_subreg or subreg_to_reg machine instruction. | |||
577 | MachineInstrBuilder MIB = | |||
578 | BuildMI(*MF, Node->getDebugLoc(), TII->get(Opc), VRBase); | |||
579 | ||||
580 | // If creating a subreg_to_reg, then the first input operand | |||
581 | // is an implicit value immediate, otherwise it's a register | |||
582 | if (Opc == TargetOpcode::SUBREG_TO_REG) { | |||
583 | const ConstantSDNode *SD = cast<ConstantSDNode>(N0); | |||
584 | MIB.addImm(SD->getZExtValue()); | |||
585 | } else | |||
586 | AddOperand(MIB, N0, 0, nullptr, VRBaseMap, /*IsDebug=*/false, | |||
587 | IsClone, IsCloned); | |||
588 | // Add the subregister being inserted | |||
589 | AddOperand(MIB, N1, 0, nullptr, VRBaseMap, /*IsDebug=*/false, | |||
590 | IsClone, IsCloned); | |||
591 | MIB.addImm(SubIdx); | |||
592 | MBB->insert(InsertPos, MIB); | |||
593 | } else | |||
594 | llvm_unreachable("Node is not insert_subreg, extract_subreg, or subreg_to_reg")__builtin_unreachable(); | |||
595 | ||||
596 | SDValue Op(Node, 0); | |||
597 | bool isNew = VRBaseMap.insert(std::make_pair(Op, VRBase)).second; | |||
598 | (void)isNew; // Silence compiler warning. | |||
599 | assert(isNew && "Node emitted out of order - early")((void)0); | |||
600 | } | |||
601 | ||||
602 | /// EmitCopyToRegClassNode - Generate machine code for COPY_TO_REGCLASS nodes. | |||
603 | /// COPY_TO_REGCLASS is just a normal copy, except that the destination | |||
604 | /// register is constrained to be in a particular register class. | |||
605 | /// | |||
606 | void | |||
607 | InstrEmitter::EmitCopyToRegClassNode(SDNode *Node, | |||
608 | DenseMap<SDValue, Register> &VRBaseMap) { | |||
609 | unsigned VReg = getVR(Node->getOperand(0), VRBaseMap); | |||
610 | ||||
611 | // Create the new VReg in the destination class and emit a copy. | |||
612 | unsigned DstRCIdx = cast<ConstantSDNode>(Node->getOperand(1))->getZExtValue(); | |||
613 | const TargetRegisterClass *DstRC = | |||
614 | TRI->getAllocatableClass(TRI->getRegClass(DstRCIdx)); | |||
615 | Register NewVReg = MRI->createVirtualRegister(DstRC); | |||
616 | BuildMI(*MBB, InsertPos, Node->getDebugLoc(), TII->get(TargetOpcode::COPY), | |||
617 | NewVReg).addReg(VReg); | |||
618 | ||||
619 | SDValue Op(Node, 0); | |||
620 | bool isNew = VRBaseMap.insert(std::make_pair(Op, NewVReg)).second; | |||
621 | (void)isNew; // Silence compiler warning. | |||
622 | assert(isNew && "Node emitted out of order - early")((void)0); | |||
623 | } | |||
624 | ||||
625 | /// EmitRegSequence - Generate machine code for REG_SEQUENCE nodes. | |||
626 | /// | |||
627 | void InstrEmitter::EmitRegSequence(SDNode *Node, | |||
628 | DenseMap<SDValue, Register> &VRBaseMap, | |||
629 | bool IsClone, bool IsCloned) { | |||
630 | unsigned DstRCIdx = cast<ConstantSDNode>(Node->getOperand(0))->getZExtValue(); | |||
631 | const TargetRegisterClass *RC = TRI->getRegClass(DstRCIdx); | |||
632 | Register NewVReg = MRI->createVirtualRegister(TRI->getAllocatableClass(RC)); | |||
633 | const MCInstrDesc &II = TII->get(TargetOpcode::REG_SEQUENCE); | |||
634 | MachineInstrBuilder MIB = BuildMI(*MF, Node->getDebugLoc(), II, NewVReg); | |||
635 | unsigned NumOps = Node->getNumOperands(); | |||
636 | // If the input pattern has a chain, then the root of the corresponding | |||
637 | // output pattern will get a chain as well. This can happen to be a | |||
638 | // REG_SEQUENCE (which is not "guarded" by countOperands/CountResults). | |||
639 | if (NumOps && Node->getOperand(NumOps-1).getValueType() == MVT::Other) | |||
640 | --NumOps; // Ignore chain if it exists. | |||
641 | ||||
642 | assert((NumOps & 1) == 1 &&((void)0) | |||
643 | "REG_SEQUENCE must have an odd number of operands!")((void)0); | |||
644 | for (unsigned i = 1; i != NumOps; ++i) { | |||
645 | SDValue Op = Node->getOperand(i); | |||
646 | if ((i & 1) == 0) { | |||
647 | RegisterSDNode *R = dyn_cast<RegisterSDNode>(Node->getOperand(i-1)); | |||
648 | // Skip physical registers as they don't have a vreg to get and we'll | |||
649 | // insert copies for them in TwoAddressInstructionPass anyway. | |||
650 | if (!R || !Register::isPhysicalRegister(R->getReg())) { | |||
651 | unsigned SubIdx = cast<ConstantSDNode>(Op)->getZExtValue(); | |||
652 | unsigned SubReg = getVR(Node->getOperand(i-1), VRBaseMap); | |||
653 | const TargetRegisterClass *TRC = MRI->getRegClass(SubReg); | |||
654 | const TargetRegisterClass *SRC = | |||
655 | TRI->getMatchingSuperRegClass(RC, TRC, SubIdx); | |||
656 | if (SRC && SRC != RC) { | |||
657 | MRI->setRegClass(NewVReg, SRC); | |||
658 | RC = SRC; | |||
659 | } | |||
660 | } | |||
661 | } | |||
662 | AddOperand(MIB, Op, i+1, &II, VRBaseMap, /*IsDebug=*/false, | |||
663 | IsClone, IsCloned); | |||
664 | } | |||
665 | ||||
666 | MBB->insert(InsertPos, MIB); | |||
667 | SDValue Op(Node, 0); | |||
668 | bool isNew = VRBaseMap.insert(std::make_pair(Op, NewVReg)).second; | |||
669 | (void)isNew; // Silence compiler warning. | |||
670 | assert(isNew && "Node emitted out of order - early")((void)0); | |||
671 | } | |||
672 | ||||
673 | /// EmitDbgValue - Generate machine instruction for a dbg_value node. | |||
674 | /// | |||
675 | MachineInstr * | |||
676 | InstrEmitter::EmitDbgValue(SDDbgValue *SD, | |||
677 | DenseMap<SDValue, Register> &VRBaseMap) { | |||
678 | MDNode *Var = SD->getVariable(); | |||
679 | MDNode *Expr = SD->getExpression(); | |||
680 | DebugLoc DL = SD->getDebugLoc(); | |||
681 | assert(cast<DILocalVariable>(Var)->isValidLocationForIntrinsic(DL) &&((void)0) | |||
682 | "Expected inlined-at fields to agree")((void)0); | |||
683 | ||||
684 | SD->setIsEmitted(); | |||
685 | ||||
686 | ArrayRef<SDDbgOperand> LocationOps = SD->getLocationOps(); | |||
687 | assert(!LocationOps.empty() && "dbg_value with no location operands?")((void)0); | |||
688 | ||||
689 | if (SD->isInvalidated()) | |||
690 | return EmitDbgNoLocation(SD); | |||
691 | ||||
692 | // Emit variadic dbg_value nodes as DBG_VALUE_LIST. | |||
693 | if (SD->isVariadic()) { | |||
694 | // DBG_VALUE_LIST := "DBG_VALUE_LIST" var, expression, loc (, loc)* | |||
695 | const MCInstrDesc &DbgValDesc = TII->get(TargetOpcode::DBG_VALUE_LIST); | |||
696 | // Build the DBG_VALUE_LIST instruction base. | |||
697 | auto MIB = BuildMI(*MF, DL, DbgValDesc); | |||
698 | MIB.addMetadata(Var); | |||
699 | MIB.addMetadata(Expr); | |||
700 | AddDbgValueLocationOps(MIB, DbgValDesc, LocationOps, VRBaseMap); | |||
701 | return &*MIB; | |||
702 | } | |||
703 | ||||
704 | // Attempt to produce a DBG_INSTR_REF if we've been asked to. | |||
705 | // We currently exclude the possibility of instruction references for | |||
706 | // variadic nodes; if at some point we enable them, this should be moved | |||
707 | // above the variadic block. | |||
708 | if (EmitDebugInstrRefs) | |||
709 | if (auto *InstrRef = EmitDbgInstrRef(SD, VRBaseMap)) | |||
710 | return InstrRef; | |||
711 | ||||
712 | return EmitDbgValueFromSingleOp(SD, VRBaseMap); | |||
713 | } | |||
714 | ||||
715 | void InstrEmitter::AddDbgValueLocationOps( | |||
716 | MachineInstrBuilder &MIB, const MCInstrDesc &DbgValDesc, | |||
717 | ArrayRef<SDDbgOperand> LocationOps, | |||
718 | DenseMap<SDValue, Register> &VRBaseMap) { | |||
719 | for (const SDDbgOperand &Op : LocationOps) { | |||
720 | switch (Op.getKind()) { | |||
721 | case SDDbgOperand::FRAMEIX: | |||
722 | MIB.addFrameIndex(Op.getFrameIx()); | |||
723 | break; | |||
724 | case SDDbgOperand::VREG: | |||
725 | MIB.addReg(Op.getVReg(), RegState::Debug); | |||
726 | break; | |||
727 | case SDDbgOperand::SDNODE: { | |||
728 | SDValue V = SDValue(Op.getSDNode(), Op.getResNo()); | |||
729 | // It's possible we replaced this SDNode with other(s) and therefore | |||
730 | // didn't generate code for it. It's better to catch these cases where | |||
731 | // they happen and transfer the debug info, but trying to guarantee that | |||
732 | // in all cases would be very fragile; this is a safeguard for any | |||
733 | // that were missed. | |||
734 | if (VRBaseMap.count(V) == 0) | |||
735 | MIB.addReg(0U); // undef | |||
736 | else | |||
737 | AddOperand(MIB, V, (*MIB).getNumOperands(), &DbgValDesc, VRBaseMap, | |||
738 | /*IsDebug=*/true, /*IsClone=*/false, /*IsCloned=*/false); | |||
739 | } break; | |||
740 | case SDDbgOperand::CONST: { | |||
741 | const Value *V = Op.getConst(); | |||
742 | if (const ConstantInt *CI = dyn_cast<ConstantInt>(V)) { | |||
743 | if (CI->getBitWidth() > 64) | |||
744 | MIB.addCImm(CI); | |||
745 | else | |||
746 | MIB.addImm(CI->getSExtValue()); | |||
747 | } else if (const ConstantFP *CF = dyn_cast<ConstantFP>(V)) { | |||
748 | MIB.addFPImm(CF); | |||
749 | } else if (isa<ConstantPointerNull>(V)) { | |||
750 | // Note: This assumes that all nullptr constants are zero-valued. | |||
751 | MIB.addImm(0); | |||
752 | } else { | |||
753 | // Could be an Undef. In any case insert an Undef so we can see what we | |||
754 | // dropped. | |||
755 | MIB.addReg(0U); | |||
756 | } | |||
757 | } break; | |||
758 | } | |||
759 | } | |||
760 | } | |||
761 | ||||
762 | MachineInstr * | |||
763 | InstrEmitter::EmitDbgInstrRef(SDDbgValue *SD, | |||
764 | DenseMap<SDValue, Register> &VRBaseMap) { | |||
765 | assert(!SD->isVariadic())((void)0); | |||
766 | SDDbgOperand DbgOperand = SD->getLocationOps()[0]; | |||
767 | MDNode *Var = SD->getVariable(); | |||
768 | MDNode *Expr = SD->getExpression(); | |||
769 | DebugLoc DL = SD->getDebugLoc(); | |||
770 | const MCInstrDesc &RefII = TII->get(TargetOpcode::DBG_INSTR_REF); | |||
771 | ||||
772 | // Handle variable locations that don't actually depend on the instructions | |||
773 | // in the program: constants and stack locations. | |||
774 | if (DbgOperand.getKind() == SDDbgOperand::FRAMEIX || | |||
775 | DbgOperand.getKind() == SDDbgOperand::CONST) | |||
776 | return EmitDbgValueFromSingleOp(SD, VRBaseMap); | |||
777 | ||||
778 | // It may not be immediately possible to identify the MachineInstr that | |||
779 | // defines a VReg, it can depend for example on the order blocks are | |||
780 | // emitted in. When this happens, or when further analysis is needed later, | |||
781 | // produce an instruction like this: | |||
782 | // | |||
783 | // DBG_INSTR_REF %0:gr64, 0, !123, !456 | |||
784 | // | |||
785 | // i.e., point the instruction at the vreg, and patch it up later in | |||
786 | // MachineFunction::finalizeDebugInstrRefs. | |||
787 | auto EmitHalfDoneInstrRef = [&](unsigned VReg) -> MachineInstr * { | |||
788 | auto MIB = BuildMI(*MF, DL, RefII); | |||
789 | MIB.addReg(VReg); | |||
790 | MIB.addImm(0); | |||
791 | MIB.addMetadata(Var); | |||
792 | MIB.addMetadata(Expr); | |||
793 | return MIB; | |||
794 | }; | |||
795 | ||||
796 | // Try to find both the defined register and the instruction defining it. | |||
797 | MachineInstr *DefMI = nullptr; | |||
798 | unsigned VReg; | |||
799 | ||||
800 | if (DbgOperand.getKind() == SDDbgOperand::VREG) { | |||
801 | VReg = DbgOperand.getVReg(); | |||
802 | ||||
803 | // No definition means that block hasn't been emitted yet. Leave a vreg | |||
804 | // reference to be fixed later. | |||
805 | if (!MRI->hasOneDef(VReg)) | |||
806 | return EmitHalfDoneInstrRef(VReg); | |||
807 | ||||
808 | DefMI = &*MRI->def_instr_begin(VReg); | |||
809 | } else { | |||
810 | assert(DbgOperand.getKind() == SDDbgOperand::SDNODE)((void)0); | |||
811 | // Look up the corresponding VReg for the given SDNode, if any. | |||
812 | SDNode *Node = DbgOperand.getSDNode(); | |||
813 | SDValue Op = SDValue(Node, DbgOperand.getResNo()); | |||
814 | DenseMap<SDValue, Register>::iterator I = VRBaseMap.find(Op); | |||
815 | // No VReg -> produce a DBG_VALUE $noreg instead. | |||
816 | if (I==VRBaseMap.end()) | |||
817 | return EmitDbgNoLocation(SD); | |||
818 | ||||
819 | // Try to pick out a defining instruction at this point. | |||
820 | VReg = getVR(Op, VRBaseMap); | |||
821 | ||||
822 | // Again, if there's no instruction defining the VReg right now, fix it up | |||
823 | // later. | |||
824 | if (!MRI->hasOneDef(VReg)) | |||
825 | return EmitHalfDoneInstrRef(VReg); | |||
826 | ||||
827 | DefMI = &*MRI->def_instr_begin(VReg); | |||
828 | } | |||
829 | ||||
830 | // Avoid copy like instructions: they don't define values, only move them. | |||
831 | // Leave a virtual-register reference until it can be fixed up later, to find | |||
832 | // the underlying value definition. | |||
833 | if (DefMI->isCopyLike() || TII->isCopyInstr(*DefMI)) | |||
834 | return EmitHalfDoneInstrRef(VReg); | |||
835 | ||||
836 | auto MIB = BuildMI(*MF, DL, RefII); | |||
837 | ||||
838 | // Find the operand number which defines the specified VReg. | |||
839 | unsigned OperandIdx = 0; | |||
840 | for (const auto &MO : DefMI->operands()) { | |||
841 | if (MO.isReg() && MO.isDef() && MO.getReg() == VReg) | |||
842 | break; | |||
843 | ++OperandIdx; | |||
844 | } | |||
845 | assert(OperandIdx < DefMI->getNumOperands())((void)0); | |||
846 | ||||
847 | // Make the DBG_INSTR_REF refer to that instruction, and that operand. | |||
848 | unsigned InstrNum = DefMI->getDebugInstrNum(); | |||
849 | MIB.addImm(InstrNum); | |||
850 | MIB.addImm(OperandIdx); | |||
851 | MIB.addMetadata(Var); | |||
852 | MIB.addMetadata(Expr); | |||
853 | return &*MIB; | |||
854 | } | |||
855 | ||||
856 | MachineInstr *InstrEmitter::EmitDbgNoLocation(SDDbgValue *SD) { | |||
857 | // An invalidated SDNode must generate an undef DBG_VALUE: although the | |||
858 | // original value is no longer computed, earlier DBG_VALUEs live ranges | |||
859 | // must not leak into later code. | |||
860 | MDNode *Var = SD->getVariable(); | |||
861 | MDNode *Expr = SD->getExpression(); | |||
862 | DebugLoc DL = SD->getDebugLoc(); | |||
863 | auto MIB = BuildMI(*MF, DL, TII->get(TargetOpcode::DBG_VALUE)); | |||
864 | MIB.addReg(0U); | |||
865 | MIB.addReg(0U, RegState::Debug); | |||
866 | MIB.addMetadata(Var); | |||
867 | MIB.addMetadata(Expr); | |||
868 | return &*MIB; | |||
869 | } | |||
870 | ||||
871 | MachineInstr * | |||
872 | InstrEmitter::EmitDbgValueFromSingleOp(SDDbgValue *SD, | |||
873 | DenseMap<SDValue, Register> &VRBaseMap) { | |||
874 | MDNode *Var = SD->getVariable(); | |||
875 | MDNode *Expr = SD->getExpression(); | |||
876 | DebugLoc DL = SD->getDebugLoc(); | |||
877 | const MCInstrDesc &II = TII->get(TargetOpcode::DBG_VALUE); | |||
878 | ||||
879 | assert(SD->getLocationOps().size() == 1 &&((void)0) | |||
880 | "Non variadic dbg_value should have only one location op")((void)0); | |||
881 | ||||
882 | // Emit non-variadic dbg_value nodes as DBG_VALUE. | |||
883 | // DBG_VALUE := "DBG_VALUE" loc, isIndirect, var, expr | |||
884 | auto MIB = BuildMI(*MF, DL, II); | |||
885 | AddDbgValueLocationOps(MIB, II, SD->getLocationOps(), VRBaseMap); | |||
886 | ||||
887 | if (SD->isIndirect()) | |||
888 | MIB.addImm(0U); | |||
889 | else | |||
890 | MIB.addReg(0U, RegState::Debug); | |||
891 | ||||
892 | return MIB.addMetadata(Var).addMetadata(Expr); | |||
893 | } | |||
894 | ||||
895 | MachineInstr * | |||
896 | InstrEmitter::EmitDbgLabel(SDDbgLabel *SD) { | |||
897 | MDNode *Label = SD->getLabel(); | |||
898 | DebugLoc DL = SD->getDebugLoc(); | |||
899 | assert(cast<DILabel>(Label)->isValidLocationForIntrinsic(DL) &&((void)0) | |||
900 | "Expected inlined-at fields to agree")((void)0); | |||
901 | ||||
902 | const MCInstrDesc &II = TII->get(TargetOpcode::DBG_LABEL); | |||
903 | MachineInstrBuilder MIB = BuildMI(*MF, DL, II); | |||
904 | MIB.addMetadata(Label); | |||
905 | ||||
906 | return &*MIB; | |||
907 | } | |||
908 | ||||
909 | /// EmitMachineNode - Generate machine code for a target-specific node and | |||
910 | /// needed dependencies. | |||
911 | /// | |||
912 | void InstrEmitter:: | |||
913 | EmitMachineNode(SDNode *Node, bool IsClone, bool IsCloned, | |||
914 | DenseMap<SDValue, Register> &VRBaseMap) { | |||
915 | unsigned Opc = Node->getMachineOpcode(); | |||
916 | ||||
917 | // Handle subreg insert/extract specially | |||
918 | if (Opc == TargetOpcode::EXTRACT_SUBREG || | |||
919 | Opc == TargetOpcode::INSERT_SUBREG || | |||
920 | Opc == TargetOpcode::SUBREG_TO_REG) { | |||
921 | EmitSubregNode(Node, VRBaseMap, IsClone, IsCloned); | |||
922 | return; | |||
923 | } | |||
924 | ||||
925 | // Handle COPY_TO_REGCLASS specially. | |||
926 | if (Opc == TargetOpcode::COPY_TO_REGCLASS) { | |||
927 | EmitCopyToRegClassNode(Node, VRBaseMap); | |||
928 | return; | |||
929 | } | |||
930 | ||||
931 | // Handle REG_SEQUENCE specially. | |||
932 | if (Opc == TargetOpcode::REG_SEQUENCE) { | |||
933 | EmitRegSequence(Node, VRBaseMap, IsClone, IsCloned); | |||
934 | return; | |||
935 | } | |||
936 | ||||
937 | if (Opc == TargetOpcode::IMPLICIT_DEF) | |||
938 | // We want a unique VR for each IMPLICIT_DEF use. | |||
939 | return; | |||
940 | ||||
941 | const MCInstrDesc &II = TII->get(Opc); | |||
942 | unsigned NumResults = CountResults(Node); | |||
943 | unsigned NumDefs = II.getNumDefs(); | |||
944 | const MCPhysReg *ScratchRegs = nullptr; | |||
945 | ||||
946 | // Handle STACKMAP and PATCHPOINT specially and then use the generic code. | |||
947 | if (Opc == TargetOpcode::STACKMAP || Opc == TargetOpcode::PATCHPOINT) { | |||
948 | // Stackmaps do not have arguments and do not preserve their calling | |||
949 | // convention. However, to simplify runtime support, they clobber the same | |||
950 | // scratch registers as AnyRegCC. | |||
951 | unsigned CC = CallingConv::AnyReg; | |||
952 | if (Opc == TargetOpcode::PATCHPOINT) { | |||
953 | CC = Node->getConstantOperandVal(PatchPointOpers::CCPos); | |||
954 | NumDefs = NumResults; | |||
955 | } | |||
956 | ScratchRegs = TLI->getScratchRegisters((CallingConv::ID) CC); | |||
957 | } else if (Opc == TargetOpcode::STATEPOINT) { | |||
958 | NumDefs = NumResults; | |||
959 | } | |||
960 | ||||
961 | unsigned NumImpUses = 0; | |||
962 | unsigned NodeOperands = | |||
963 | countOperands(Node, II.getNumOperands() - NumDefs, NumImpUses); | |||
964 | bool HasVRegVariadicDefs = !MF->getTarget().usesPhysRegsForValues() && | |||
965 | II.isVariadic() && II.variadicOpsAreDefs(); | |||
966 | bool HasPhysRegOuts = NumResults > NumDefs && | |||
967 | II.getImplicitDefs() != nullptr && !HasVRegVariadicDefs; | |||
968 | #ifndef NDEBUG1 | |||
969 | unsigned NumMIOperands = NodeOperands + NumResults; | |||
970 | if (II.isVariadic()) | |||
971 | assert(NumMIOperands >= II.getNumOperands() &&((void)0) | |||
972 | "Too few operands for a variadic node!")((void)0); | |||
973 | else | |||
974 | assert(NumMIOperands >= II.getNumOperands() &&((void)0) | |||
975 | NumMIOperands <= II.getNumOperands() + II.getNumImplicitDefs() +((void)0) | |||
976 | NumImpUses &&((void)0) | |||
977 | "#operands for dag node doesn't match .td file!")((void)0); | |||
978 | #endif | |||
979 | ||||
980 | // Create the new machine instruction. | |||
981 | MachineInstrBuilder MIB = BuildMI(*MF, Node->getDebugLoc(), II); | |||
982 | ||||
983 | // Add result register values for things that are defined by this | |||
984 | // instruction. | |||
985 | if (NumResults) { | |||
986 | CreateVirtualRegisters(Node, MIB, II, IsClone, IsCloned, VRBaseMap); | |||
987 | ||||
988 | // Transfer any IR flags from the SDNode to the MachineInstr | |||
989 | MachineInstr *MI = MIB.getInstr(); | |||
990 | const SDNodeFlags Flags = Node->getFlags(); | |||
991 | if (Flags.hasNoSignedZeros()) | |||
992 | MI->setFlag(MachineInstr::MIFlag::FmNsz); | |||
993 | ||||
994 | if (Flags.hasAllowReciprocal()) | |||
995 | MI->setFlag(MachineInstr::MIFlag::FmArcp); | |||
996 | ||||
997 | if (Flags.hasNoNaNs()) | |||
998 | MI->setFlag(MachineInstr::MIFlag::FmNoNans); | |||
999 | ||||
1000 | if (Flags.hasNoInfs()) | |||
1001 | MI->setFlag(MachineInstr::MIFlag::FmNoInfs); | |||
1002 | ||||
1003 | if (Flags.hasAllowContract()) | |||
1004 | MI->setFlag(MachineInstr::MIFlag::FmContract); | |||
1005 | ||||
1006 | if (Flags.hasApproximateFuncs()) | |||
1007 | MI->setFlag(MachineInstr::MIFlag::FmAfn); | |||
1008 | ||||
1009 | if (Flags.hasAllowReassociation()) | |||
1010 | MI->setFlag(MachineInstr::MIFlag::FmReassoc); | |||
1011 | ||||
1012 | if (Flags.hasNoUnsignedWrap()) | |||
1013 | MI->setFlag(MachineInstr::MIFlag::NoUWrap); | |||
1014 | ||||
1015 | if (Flags.hasNoSignedWrap()) | |||
1016 | MI->setFlag(MachineInstr::MIFlag::NoSWrap); | |||
1017 | ||||
1018 | if (Flags.hasExact()) | |||
1019 | MI->setFlag(MachineInstr::MIFlag::IsExact); | |||
1020 | ||||
1021 | if (Flags.hasNoFPExcept()) | |||
1022 | MI->setFlag(MachineInstr::MIFlag::NoFPExcept); | |||
1023 | } | |||
1024 | ||||
1025 | // Emit all of the actual operands of this instruction, adding them to the | |||
1026 | // instruction as appropriate. | |||
1027 | bool HasOptPRefs = NumDefs > NumResults; | |||
1028 | assert((!HasOptPRefs || !HasPhysRegOuts) &&((void)0) | |||
1029 | "Unable to cope with optional defs and phys regs defs!")((void)0); | |||
1030 | unsigned NumSkip = HasOptPRefs ? NumDefs - NumResults : 0; | |||
1031 | for (unsigned i = NumSkip; i != NodeOperands; ++i) | |||
1032 | AddOperand(MIB, Node->getOperand(i), i-NumSkip+NumDefs, &II, | |||
1033 | VRBaseMap, /*IsDebug=*/false, IsClone, IsCloned); | |||
1034 | ||||
1035 | // Add scratch registers as implicit def and early clobber | |||
1036 | if (ScratchRegs) | |||
1037 | for (unsigned i = 0; ScratchRegs[i]; ++i) | |||
1038 | MIB.addReg(ScratchRegs[i], RegState::ImplicitDefine | | |||
1039 | RegState::EarlyClobber); | |||
1040 | ||||
1041 | // Set the memory reference descriptions of this instruction now that it is | |||
1042 | // part of the function. | |||
1043 | MIB.setMemRefs(cast<MachineSDNode>(Node)->memoperands()); | |||
1044 | ||||
1045 | // Insert the instruction into position in the block. This needs to | |||
1046 | // happen before any custom inserter hook is called so that the | |||
1047 | // hook knows where in the block to insert the replacement code. | |||
1048 | MBB->insert(InsertPos, MIB); | |||
1049 | ||||
1050 | // The MachineInstr may also define physregs instead of virtregs. These | |||
1051 | // physreg values can reach other instructions in different ways: | |||
1052 | // | |||
1053 | // 1. When there is a use of a Node value beyond the explicitly defined | |||
1054 | // virtual registers, we emit a CopyFromReg for one of the implicitly | |||
1055 | // defined physregs. This only happens when HasPhysRegOuts is true. | |||
1056 | // | |||
1057 | // 2. A CopyFromReg reading a physreg may be glued to this instruction. | |||
1058 | // | |||
1059 | // 3. A glued instruction may implicitly use a physreg. | |||
1060 | // | |||
1061 | // 4. A glued instruction may use a RegisterSDNode operand. | |||
1062 | // | |||
1063 | // Collect all the used physreg defs, and make sure that any unused physreg | |||
1064 | // defs are marked as dead. | |||
1065 | SmallVector<Register, 8> UsedRegs; | |||
1066 | ||||
1067 | // Additional results must be physical register defs. | |||
1068 | if (HasPhysRegOuts) { | |||
1069 | for (unsigned i = NumDefs; i < NumResults; ++i) { | |||
1070 | Register Reg = II.getImplicitDefs()[i - NumDefs]; | |||
1071 | if (!Node->hasAnyUseOfValue(i)) | |||
1072 | continue; | |||
1073 | // This implicitly defined physreg has a use. | |||
1074 | UsedRegs.push_back(Reg); | |||
1075 | EmitCopyFromReg(Node, i, IsClone, IsCloned, Reg, VRBaseMap); | |||
1076 | } | |||
1077 | } | |||
1078 | ||||
1079 | // Scan the glue chain for any used physregs. | |||
1080 | if (Node->getValueType(Node->getNumValues()-1) == MVT::Glue) { | |||
1081 | for (SDNode *F = Node->getGluedUser(); F; F = F->getGluedUser()) { | |||
1082 | if (F->getOpcode() == ISD::CopyFromReg) { | |||
1083 | UsedRegs.push_back(cast<RegisterSDNode>(F->getOperand(1))->getReg()); | |||
1084 | continue; | |||
1085 | } else if (F->getOpcode() == ISD::CopyToReg) { | |||
1086 | // Skip CopyToReg nodes that are internal to the glue chain. | |||
1087 | continue; | |||
1088 | } | |||
1089 | // Collect declared implicit uses. | |||
1090 | const MCInstrDesc &MCID = TII->get(F->getMachineOpcode()); | |||
1091 | UsedRegs.append(MCID.getImplicitUses(), | |||
1092 | MCID.getImplicitUses() + MCID.getNumImplicitUses()); | |||
1093 | // In addition to declared implicit uses, we must also check for | |||
1094 | // direct RegisterSDNode operands. | |||
1095 | for (unsigned i = 0, e = F->getNumOperands(); i != e; ++i) | |||
1096 | if (RegisterSDNode *R = dyn_cast<RegisterSDNode>(F->getOperand(i))) { | |||
1097 | Register Reg = R->getReg(); | |||
1098 | if (Reg.isPhysical()) | |||
1099 | UsedRegs.push_back(Reg); | |||
1100 | } | |||
1101 | } | |||
1102 | } | |||
1103 | ||||
1104 | // Finally mark unused registers as dead. | |||
1105 | if (!UsedRegs.empty() || II.getImplicitDefs() || II.hasOptionalDef()) | |||
1106 | MIB->setPhysRegsDeadExcept(UsedRegs, *TRI); | |||
1107 | ||||
1108 | // STATEPOINT is too 'dynamic' to have meaningful machine description. | |||
1109 | // We have to manually tie operands. | |||
1110 | if (Opc == TargetOpcode::STATEPOINT && NumDefs > 0) { | |||
1111 | assert(!HasPhysRegOuts && "STATEPOINT mishandled")((void)0); | |||
1112 | MachineInstr *MI = MIB; | |||
1113 | unsigned Def = 0; | |||
1114 | int First = StatepointOpers(MI).getFirstGCPtrIdx(); | |||
1115 | assert(First > 0 && "Statepoint has Defs but no GC ptr list")((void)0); | |||
1116 | unsigned Use = (unsigned)First; | |||
1117 | while (Def < NumDefs) { | |||
1118 | if (MI->getOperand(Use).isReg()) | |||
1119 | MI->tieOperands(Def++, Use); | |||
1120 | Use = StackMaps::getNextMetaArgIdx(MI, Use); | |||
1121 | } | |||
1122 | } | |||
1123 | ||||
1124 | // Run post-isel target hook to adjust this instruction if needed. | |||
1125 | if (II.hasPostISelHook()) | |||
1126 | TLI->AdjustInstrPostInstrSelection(*MIB, Node); | |||
1127 | } | |||
1128 | ||||
1129 | /// EmitSpecialNode - Generate machine code for a target-independent node and | |||
1130 | /// needed dependencies. | |||
1131 | void InstrEmitter:: | |||
1132 | EmitSpecialNode(SDNode *Node, bool IsClone, bool IsCloned, | |||
1133 | DenseMap<SDValue, Register> &VRBaseMap) { | |||
1134 | switch (Node->getOpcode()) { | |||
| ||||
1135 | default: | |||
1136 | #ifndef NDEBUG1 | |||
1137 | Node->dump(); | |||
1138 | #endif | |||
1139 | llvm_unreachable("This target-independent node should have been selected!")__builtin_unreachable(); | |||
1140 | case ISD::EntryToken: | |||
1141 | llvm_unreachable("EntryToken should have been excluded from the schedule!")__builtin_unreachable(); | |||
1142 | case ISD::MERGE_VALUES: | |||
1143 | case ISD::TokenFactor: // fall thru | |||
1144 | break; | |||
1145 | case ISD::CopyToReg: { | |||
1146 | Register DestReg = cast<RegisterSDNode>(Node->getOperand(1))->getReg(); | |||
1147 | SDValue SrcVal = Node->getOperand(2); | |||
1148 | if (Register::isVirtualRegister(DestReg) && SrcVal.isMachineOpcode() && | |||
1149 | SrcVal.getMachineOpcode() == TargetOpcode::IMPLICIT_DEF) { | |||
1150 | // Instead building a COPY to that vreg destination, build an | |||
1151 | // IMPLICIT_DEF instruction instead. | |||
1152 | BuildMI(*MBB, InsertPos, Node->getDebugLoc(), | |||
1153 | TII->get(TargetOpcode::IMPLICIT_DEF), DestReg); | |||
1154 | break; | |||
1155 | } | |||
1156 | Register SrcReg; | |||
1157 | if (RegisterSDNode *R = dyn_cast<RegisterSDNode>(SrcVal)) | |||
1158 | SrcReg = R->getReg(); | |||
1159 | else | |||
1160 | SrcReg = getVR(SrcVal, VRBaseMap); | |||
1161 | ||||
1162 | if (SrcReg == DestReg) // Coalesced away the copy? Ignore. | |||
1163 | break; | |||
1164 | ||||
1165 | BuildMI(*MBB, InsertPos, Node->getDebugLoc(), TII->get(TargetOpcode::COPY), | |||
1166 | DestReg).addReg(SrcReg); | |||
1167 | break; | |||
1168 | } | |||
1169 | case ISD::CopyFromReg: { | |||
1170 | unsigned SrcReg = cast<RegisterSDNode>(Node->getOperand(1))->getReg(); | |||
1171 | EmitCopyFromReg(Node, 0, IsClone, IsCloned, SrcReg, VRBaseMap); | |||
1172 | break; | |||
1173 | } | |||
1174 | case ISD::EH_LABEL: | |||
1175 | case ISD::ANNOTATION_LABEL: { | |||
1176 | unsigned Opc = (Node->getOpcode() == ISD::EH_LABEL) | |||
1177 | ? TargetOpcode::EH_LABEL | |||
1178 | : TargetOpcode::ANNOTATION_LABEL; | |||
1179 | MCSymbol *S = cast<LabelSDNode>(Node)->getLabel(); | |||
1180 | BuildMI(*MBB, InsertPos, Node->getDebugLoc(), | |||
1181 | TII->get(Opc)).addSym(S); | |||
1182 | break; | |||
1183 | } | |||
1184 | ||||
1185 | case ISD::LIFETIME_START: | |||
1186 | case ISD::LIFETIME_END: { | |||
1187 | unsigned TarOp = (Node->getOpcode() == ISD::LIFETIME_START) | |||
1188 | ? TargetOpcode::LIFETIME_START | |||
1189 | : TargetOpcode::LIFETIME_END; | |||
1190 | auto *FI = cast<FrameIndexSDNode>(Node->getOperand(1)); | |||
1191 | BuildMI(*MBB, InsertPos, Node->getDebugLoc(), TII->get(TarOp)) | |||
1192 | .addFrameIndex(FI->getIndex()); | |||
1193 | break; | |||
1194 | } | |||
1195 | ||||
1196 | case ISD::PSEUDO_PROBE: { | |||
1197 | unsigned TarOp = TargetOpcode::PSEUDO_PROBE; | |||
1198 | auto Guid = cast<PseudoProbeSDNode>(Node)->getGuid(); | |||
1199 | auto Index = cast<PseudoProbeSDNode>(Node)->getIndex(); | |||
1200 | auto Attr = cast<PseudoProbeSDNode>(Node)->getAttributes(); | |||
1201 | ||||
1202 | BuildMI(*MBB, InsertPos, Node->getDebugLoc(), TII->get(TarOp)) | |||
1203 | .addImm(Guid) | |||
1204 | .addImm(Index) | |||
1205 | .addImm((uint8_t)PseudoProbeType::Block) | |||
1206 | .addImm(Attr); | |||
1207 | break; | |||
1208 | } | |||
1209 | ||||
1210 | case ISD::INLINEASM: | |||
1211 | case ISD::INLINEASM_BR: { | |||
1212 | unsigned NumOps = Node->getNumOperands(); | |||
1213 | if (Node->getOperand(NumOps-1).getValueType() == MVT::Glue) | |||
1214 | --NumOps; // Ignore the glue operand. | |||
1215 | ||||
1216 | // Create the inline asm machine instruction. | |||
1217 | unsigned TgtOpc = Node->getOpcode() == ISD::INLINEASM_BR | |||
1218 | ? TargetOpcode::INLINEASM_BR | |||
1219 | : TargetOpcode::INLINEASM; | |||
1220 | MachineInstrBuilder MIB = | |||
1221 | BuildMI(*MF, Node->getDebugLoc(), TII->get(TgtOpc)); | |||
1222 | ||||
1223 | // Add the asm string as an external symbol operand. | |||
1224 | SDValue AsmStrV = Node->getOperand(InlineAsm::Op_AsmString); | |||
1225 | const char *AsmStr = cast<ExternalSymbolSDNode>(AsmStrV)->getSymbol(); | |||
1226 | MIB.addExternalSymbol(AsmStr); | |||
1227 | ||||
1228 | // Add the HasSideEffect, isAlignStack, AsmDialect, MayLoad and MayStore | |||
1229 | // bits. | |||
1230 | int64_t ExtraInfo = | |||
1231 | cast<ConstantSDNode>(Node->getOperand(InlineAsm::Op_ExtraInfo))-> | |||
1232 | getZExtValue(); | |||
1233 | MIB.addImm(ExtraInfo); | |||
1234 | ||||
1235 | // Remember to operand index of the group flags. | |||
1236 | SmallVector<unsigned, 8> GroupIdx; | |||
1237 | ||||
1238 | // Remember registers that are part of early-clobber defs. | |||
1239 | SmallVector<unsigned, 8> ECRegs; | |||
1240 | ||||
1241 | // Add all of the operand registers to the instruction. | |||
1242 | for (unsigned i = InlineAsm::Op_FirstOperand; i != NumOps;) { | |||
1243 | unsigned Flags = | |||
1244 | cast<ConstantSDNode>(Node->getOperand(i))->getZExtValue(); | |||
1245 | const unsigned NumVals = InlineAsm::getNumOperandRegisters(Flags); | |||
1246 | ||||
1247 | GroupIdx.push_back(MIB->getNumOperands()); | |||
1248 | MIB.addImm(Flags); | |||
1249 | ++i; // Skip the ID value. | |||
1250 | ||||
1251 | switch (InlineAsm::getKind(Flags)) { | |||
1252 | default: llvm_unreachable("Bad flags!")__builtin_unreachable(); | |||
1253 | case InlineAsm::Kind_RegDef: | |||
1254 | for (unsigned j = 0; j != NumVals; ++j, ++i) { | |||
1255 | unsigned Reg = cast<RegisterSDNode>(Node->getOperand(i))->getReg(); | |||
1256 | // FIXME: Add dead flags for physical and virtual registers defined. | |||
1257 | // For now, mark physical register defs as implicit to help fast | |||
1258 | // regalloc. This makes inline asm look a lot like calls. | |||
1259 | MIB.addReg(Reg, | |||
1260 | RegState::Define | | |||
1261 | getImplRegState(Register::isPhysicalRegister(Reg))); | |||
1262 | } | |||
1263 | break; | |||
1264 | case InlineAsm::Kind_RegDefEarlyClobber: | |||
1265 | case InlineAsm::Kind_Clobber: | |||
1266 | for (unsigned j = 0; j != NumVals; ++j, ++i) { | |||
1267 | unsigned Reg = cast<RegisterSDNode>(Node->getOperand(i))->getReg(); | |||
1268 | MIB.addReg(Reg, | |||
1269 | RegState::Define | RegState::EarlyClobber | | |||
1270 | getImplRegState(Register::isPhysicalRegister(Reg))); | |||
1271 | ECRegs.push_back(Reg); | |||
1272 | } | |||
1273 | break; | |||
1274 | case InlineAsm::Kind_RegUse: // Use of register. | |||
1275 | case InlineAsm::Kind_Imm: // Immediate. | |||
1276 | case InlineAsm::Kind_Mem: // Addressing mode. | |||
1277 | // The addressing mode has been selected, just add all of the | |||
1278 | // operands to the machine instruction. | |||
1279 | for (unsigned j = 0; j != NumVals; ++j, ++i) | |||
1280 | AddOperand(MIB, Node->getOperand(i), 0, nullptr, VRBaseMap, | |||
1281 | /*IsDebug=*/false, IsClone, IsCloned); | |||
1282 | ||||
1283 | // Manually set isTied bits. | |||
1284 | if (InlineAsm::getKind(Flags) == InlineAsm::Kind_RegUse) { | |||
1285 | unsigned DefGroup = 0; | |||
1286 | if (InlineAsm::isUseOperandTiedToDef(Flags, DefGroup)) { | |||
1287 | unsigned DefIdx = GroupIdx[DefGroup] + 1; | |||
1288 | unsigned UseIdx = GroupIdx.back() + 1; | |||
1289 | for (unsigned j = 0; j != NumVals; ++j) | |||
1290 | MIB->tieOperands(DefIdx + j, UseIdx + j); | |||
1291 | } | |||
1292 | } | |||
1293 | break; | |||
1294 | } | |||
1295 | } | |||
1296 | ||||
1297 | // GCC inline assembly allows input operands to also be early-clobber | |||
1298 | // output operands (so long as the operand is written only after it's | |||
1299 | // used), but this does not match the semantics of our early-clobber flag. | |||
1300 | // If an early-clobber operand register is also an input operand register, | |||
1301 | // then remove the early-clobber flag. | |||
1302 | for (unsigned Reg : ECRegs) { | |||
1303 | if (MIB->readsRegister(Reg, TRI)) { | |||
1304 | MachineOperand *MO = | |||
1305 | MIB->findRegisterDefOperand(Reg, false, false, TRI); | |||
1306 | assert(MO && "No def operand for clobbered register?")((void)0); | |||
1307 | MO->setIsEarlyClobber(false); | |||
1308 | } | |||
1309 | } | |||
1310 | ||||
1311 | // Get the mdnode from the asm if it exists and add it to the instruction. | |||
1312 | SDValue MDV = Node->getOperand(InlineAsm::Op_MDNode); | |||
1313 | const MDNode *MD = cast<MDNodeSDNode>(MDV)->getMD(); | |||
1314 | if (MD) | |||
1315 | MIB.addMetadata(MD); | |||
1316 | ||||
1317 | MBB->insert(InsertPos, MIB); | |||
1318 | break; | |||
1319 | } | |||
1320 | } | |||
1321 | } | |||
1322 | ||||
1323 | /// InstrEmitter - Construct an InstrEmitter and set it to start inserting | |||
1324 | /// at the given position in the given block. | |||
1325 | InstrEmitter::InstrEmitter(const TargetMachine &TM, MachineBasicBlock *mbb, | |||
1326 | MachineBasicBlock::iterator insertpos) | |||
1327 | : MF(mbb->getParent()), MRI(&MF->getRegInfo()), | |||
1328 | TII(MF->getSubtarget().getInstrInfo()), | |||
1329 | TRI(MF->getSubtarget().getRegisterInfo()), | |||
1330 | TLI(MF->getSubtarget().getTargetLowering()), MBB(mbb), | |||
1331 | InsertPos(insertpos) { | |||
1332 | EmitDebugInstrRefs = TM.Options.ValueTrackingVariableLocations; | |||
1333 | } |
1 | //===- llvm/CodeGen/SelectionDAGNodes.h - SelectionDAG Nodes ----*- C++ -*-===// | |||
2 | // | |||
3 | // Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions. | |||
4 | // See https://llvm.org/LICENSE.txt for license information. | |||
5 | // SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception | |||
6 | // | |||
7 | //===----------------------------------------------------------------------===// | |||
8 | // | |||
9 | // This file declares the SDNode class and derived classes, which are used to | |||
10 | // represent the nodes and operations present in a SelectionDAG. These nodes | |||
11 | // and operations are machine code level operations, with some similarities to | |||
12 | // the GCC RTL representation. | |||
13 | // | |||
14 | // Clients should include the SelectionDAG.h file instead of this file directly. | |||
15 | // | |||
16 | //===----------------------------------------------------------------------===// | |||
17 | ||||
18 | #ifndef LLVM_CODEGEN_SELECTIONDAGNODES_H | |||
19 | #define LLVM_CODEGEN_SELECTIONDAGNODES_H | |||
20 | ||||
21 | #include "llvm/ADT/APFloat.h" | |||
22 | #include "llvm/ADT/ArrayRef.h" | |||
23 | #include "llvm/ADT/BitVector.h" | |||
24 | #include "llvm/ADT/FoldingSet.h" | |||
25 | #include "llvm/ADT/GraphTraits.h" | |||
26 | #include "llvm/ADT/SmallPtrSet.h" | |||
27 | #include "llvm/ADT/SmallVector.h" | |||
28 | #include "llvm/ADT/ilist_node.h" | |||
29 | #include "llvm/ADT/iterator.h" | |||
30 | #include "llvm/ADT/iterator_range.h" | |||
31 | #include "llvm/CodeGen/ISDOpcodes.h" | |||
32 | #include "llvm/CodeGen/MachineMemOperand.h" | |||
33 | #include "llvm/CodeGen/Register.h" | |||
34 | #include "llvm/CodeGen/ValueTypes.h" | |||
35 | #include "llvm/IR/Constants.h" | |||
36 | #include "llvm/IR/DebugLoc.h" | |||
37 | #include "llvm/IR/Instruction.h" | |||
38 | #include "llvm/IR/Instructions.h" | |||
39 | #include "llvm/IR/Metadata.h" | |||
40 | #include "llvm/IR/Operator.h" | |||
41 | #include "llvm/Support/AlignOf.h" | |||
42 | #include "llvm/Support/AtomicOrdering.h" | |||
43 | #include "llvm/Support/Casting.h" | |||
44 | #include "llvm/Support/ErrorHandling.h" | |||
45 | #include "llvm/Support/MachineValueType.h" | |||
46 | #include "llvm/Support/TypeSize.h" | |||
47 | #include <algorithm> | |||
48 | #include <cassert> | |||
49 | #include <climits> | |||
50 | #include <cstddef> | |||
51 | #include <cstdint> | |||
52 | #include <cstring> | |||
53 | #include <iterator> | |||
54 | #include <string> | |||
55 | #include <tuple> | |||
56 | ||||
57 | namespace llvm { | |||
58 | ||||
59 | class APInt; | |||
60 | class Constant; | |||
61 | template <typename T> struct DenseMapInfo; | |||
62 | class GlobalValue; | |||
63 | class MachineBasicBlock; | |||
64 | class MachineConstantPoolValue; | |||
65 | class MCSymbol; | |||
66 | class raw_ostream; | |||
67 | class SDNode; | |||
68 | class SelectionDAG; | |||
69 | class Type; | |||
70 | class Value; | |||
71 | ||||
72 | void checkForCycles(const SDNode *N, const SelectionDAG *DAG = nullptr, | |||
73 | bool force = false); | |||
74 | ||||
75 | /// This represents a list of ValueType's that has been intern'd by | |||
76 | /// a SelectionDAG. Instances of this simple value class are returned by | |||
77 | /// SelectionDAG::getVTList(...). | |||
78 | /// | |||
79 | struct SDVTList { | |||
80 | const EVT *VTs; | |||
81 | unsigned int NumVTs; | |||
82 | }; | |||
83 | ||||
84 | namespace ISD { | |||
85 | ||||
86 | /// Node predicates | |||
87 | ||||
88 | /// If N is a BUILD_VECTOR or SPLAT_VECTOR node whose elements are all the | |||
89 | /// same constant or undefined, return true and return the constant value in | |||
90 | /// \p SplatValue. | |||
91 | bool isConstantSplatVector(const SDNode *N, APInt &SplatValue); | |||
92 | ||||
93 | /// Return true if the specified node is a BUILD_VECTOR or SPLAT_VECTOR where | |||
94 | /// all of the elements are ~0 or undef. If \p BuildVectorOnly is set to | |||
95 | /// true, it only checks BUILD_VECTOR. | |||
96 | bool isConstantSplatVectorAllOnes(const SDNode *N, | |||
97 | bool BuildVectorOnly = false); | |||
98 | ||||
99 | /// Return true if the specified node is a BUILD_VECTOR or SPLAT_VECTOR where | |||
100 | /// all of the elements are 0 or undef. If \p BuildVectorOnly is set to true, it | |||
101 | /// only checks BUILD_VECTOR. | |||
102 | bool isConstantSplatVectorAllZeros(const SDNode *N, | |||
103 | bool BuildVectorOnly = false); | |||
104 | ||||
105 | /// Return true if the specified node is a BUILD_VECTOR where all of the | |||
106 | /// elements are ~0 or undef. | |||
107 | bool isBuildVectorAllOnes(const SDNode *N); | |||
108 | ||||
109 | /// Return true if the specified node is a BUILD_VECTOR where all of the | |||
110 | /// elements are 0 or undef. | |||
111 | bool isBuildVectorAllZeros(const SDNode *N); | |||
112 | ||||
113 | /// Return true if the specified node is a BUILD_VECTOR node of all | |||
114 | /// ConstantSDNode or undef. | |||
115 | bool isBuildVectorOfConstantSDNodes(const SDNode *N); | |||
116 | ||||
117 | /// Return true if the specified node is a BUILD_VECTOR node of all | |||
118 | /// ConstantFPSDNode or undef. | |||
119 | bool isBuildVectorOfConstantFPSDNodes(const SDNode *N); | |||
120 | ||||
121 | /// Return true if the node has at least one operand and all operands of the | |||
122 | /// specified node are ISD::UNDEF. | |||
123 | bool allOperandsUndef(const SDNode *N); | |||
124 | ||||
125 | } // end namespace ISD | |||
126 | ||||
127 | //===----------------------------------------------------------------------===// | |||
128 | /// Unlike LLVM values, Selection DAG nodes may return multiple | |||
129 | /// values as the result of a computation. Many nodes return multiple values, | |||
130 | /// from loads (which define a token and a return value) to ADDC (which returns | |||
131 | /// a result and a carry value), to calls (which may return an arbitrary number | |||
132 | /// of values). | |||
133 | /// | |||
134 | /// As such, each use of a SelectionDAG computation must indicate the node that | |||
135 | /// computes it as well as which return value to use from that node. This pair | |||
136 | /// of information is represented with the SDValue value type. | |||
137 | /// | |||
138 | class SDValue { | |||
139 | friend struct DenseMapInfo<SDValue>; | |||
140 | ||||
141 | SDNode *Node = nullptr; // The node defining the value we are using. | |||
142 | unsigned ResNo = 0; // Which return value of the node we are using. | |||
143 | ||||
144 | public: | |||
145 | SDValue() = default; | |||
146 | SDValue(SDNode *node, unsigned resno); | |||
147 | ||||
148 | /// get the index which selects a specific result in the SDNode | |||
149 | unsigned getResNo() const { return ResNo; } | |||
150 | ||||
151 | /// get the SDNode which holds the desired result | |||
152 | SDNode *getNode() const { return Node; } | |||
153 | ||||
154 | /// set the SDNode | |||
155 | void setNode(SDNode *N) { Node = N; } | |||
156 | ||||
157 | inline SDNode *operator->() const { return Node; } | |||
158 | ||||
159 | bool operator==(const SDValue &O) const { | |||
160 | return Node == O.Node && ResNo == O.ResNo; | |||
161 | } | |||
162 | bool operator!=(const SDValue &O) const { | |||
163 | return !operator==(O); | |||
164 | } | |||
165 | bool operator<(const SDValue &O) const { | |||
166 | return std::tie(Node, ResNo) < std::tie(O.Node, O.ResNo); | |||
167 | } | |||
168 | explicit operator bool() const { | |||
169 | return Node != nullptr; | |||
170 | } | |||
171 | ||||
172 | SDValue getValue(unsigned R) const { | |||
173 | return SDValue(Node, R); | |||
174 | } | |||
175 | ||||
176 | /// Return true if this node is an operand of N. | |||
177 | bool isOperandOf(const SDNode *N) const; | |||
178 | ||||
179 | /// Return the ValueType of the referenced return value. | |||
180 | inline EVT getValueType() const; | |||
181 | ||||
182 | /// Return the simple ValueType of the referenced return value. | |||
183 | MVT getSimpleValueType() const { | |||
184 | return getValueType().getSimpleVT(); | |||
185 | } | |||
186 | ||||
187 | /// Returns the size of the value in bits. | |||
188 | /// | |||
189 | /// If the value type is a scalable vector type, the scalable property will | |||
190 | /// be set and the runtime size will be a positive integer multiple of the | |||
191 | /// base size. | |||
192 | TypeSize getValueSizeInBits() const { | |||
193 | return getValueType().getSizeInBits(); | |||
194 | } | |||
195 | ||||
196 | uint64_t getScalarValueSizeInBits() const { | |||
197 | return getValueType().getScalarType().getFixedSizeInBits(); | |||
198 | } | |||
199 | ||||
200 | // Forwarding methods - These forward to the corresponding methods in SDNode. | |||
201 | inline unsigned getOpcode() const; | |||
202 | inline unsigned getNumOperands() const; | |||
203 | inline const SDValue &getOperand(unsigned i) const; | |||
204 | inline uint64_t getConstantOperandVal(unsigned i) const; | |||
205 | inline const APInt &getConstantOperandAPInt(unsigned i) const; | |||
206 | inline bool isTargetMemoryOpcode() const; | |||
207 | inline bool isTargetOpcode() const; | |||
208 | inline bool isMachineOpcode() const; | |||
209 | inline bool isUndef() const; | |||
210 | inline unsigned getMachineOpcode() const; | |||
211 | inline const DebugLoc &getDebugLoc() const; | |||
212 | inline void dump() const; | |||
213 | inline void dump(const SelectionDAG *G) const; | |||
214 | inline void dumpr() const; | |||
215 | inline void dumpr(const SelectionDAG *G) const; | |||
216 | ||||
217 | /// Return true if this operand (which must be a chain) reaches the | |||
218 | /// specified operand without crossing any side-effecting instructions. | |||
219 | /// In practice, this looks through token factors and non-volatile loads. | |||
220 | /// In order to remain efficient, this only | |||
221 | /// looks a couple of nodes in, it does not do an exhaustive search. | |||
222 | bool reachesChainWithoutSideEffects(SDValue Dest, | |||
223 | unsigned Depth = 2) const; | |||
224 | ||||
225 | /// Return true if there are no nodes using value ResNo of Node. | |||
226 | inline bool use_empty() const; | |||
227 | ||||
228 | /// Return true if there is exactly one node using value ResNo of Node. | |||
229 | inline bool hasOneUse() const; | |||
230 | }; | |||
231 | ||||
232 | template<> struct DenseMapInfo<SDValue> { | |||
233 | static inline SDValue getEmptyKey() { | |||
234 | SDValue V; | |||
235 | V.ResNo = -1U; | |||
236 | return V; | |||
237 | } | |||
238 | ||||
239 | static inline SDValue getTombstoneKey() { | |||
240 | SDValue V; | |||
241 | V.ResNo = -2U; | |||
242 | return V; | |||
243 | } | |||
244 | ||||
245 | static unsigned getHashValue(const SDValue &Val) { | |||
246 | return ((unsigned)((uintptr_t)Val.getNode() >> 4) ^ | |||
247 | (unsigned)((uintptr_t)Val.getNode() >> 9)) + Val.getResNo(); | |||
248 | } | |||
249 | ||||
250 | static bool isEqual(const SDValue &LHS, const SDValue &RHS) { | |||
251 | return LHS == RHS; | |||
252 | } | |||
253 | }; | |||
254 | ||||
255 | /// Allow casting operators to work directly on | |||
256 | /// SDValues as if they were SDNode*'s. | |||
257 | template<> struct simplify_type<SDValue> { | |||
258 | using SimpleType = SDNode *; | |||
259 | ||||
260 | static SimpleType getSimplifiedValue(SDValue &Val) { | |||
261 | return Val.getNode(); | |||
262 | } | |||
263 | }; | |||
264 | template<> struct simplify_type<const SDValue> { | |||
265 | using SimpleType = /*const*/ SDNode *; | |||
266 | ||||
267 | static SimpleType getSimplifiedValue(const SDValue &Val) { | |||
268 | return Val.getNode(); | |||
269 | } | |||
270 | }; | |||
271 | ||||
272 | /// Represents a use of a SDNode. This class holds an SDValue, | |||
273 | /// which records the SDNode being used and the result number, a | |||
274 | /// pointer to the SDNode using the value, and Next and Prev pointers, | |||
275 | /// which link together all the uses of an SDNode. | |||
276 | /// | |||
277 | class SDUse { | |||
278 | /// Val - The value being used. | |||
279 | SDValue Val; | |||
280 | /// User - The user of this value. | |||
281 | SDNode *User = nullptr; | |||
282 | /// Prev, Next - Pointers to the uses list of the SDNode referred by | |||
283 | /// this operand. | |||
284 | SDUse **Prev = nullptr; | |||
285 | SDUse *Next = nullptr; | |||
286 | ||||
287 | public: | |||
288 | SDUse() = default; | |||
289 | SDUse(const SDUse &U) = delete; | |||
290 | SDUse &operator=(const SDUse &) = delete; | |||
291 | ||||
292 | /// Normally SDUse will just implicitly convert to an SDValue that it holds. | |||
293 | operator const SDValue&() const { return Val; } | |||
294 | ||||
295 | /// If implicit conversion to SDValue doesn't work, the get() method returns | |||
296 | /// the SDValue. | |||
297 | const SDValue &get() const { return Val; } | |||
298 | ||||
299 | /// This returns the SDNode that contains this Use. | |||
300 | SDNode *getUser() { return User; } | |||
301 | ||||
302 | /// Get the next SDUse in the use list. | |||
303 | SDUse *getNext() const { return Next; } | |||
304 | ||||
305 | /// Convenience function for get().getNode(). | |||
306 | SDNode *getNode() const { return Val.getNode(); } | |||
307 | /// Convenience function for get().getResNo(). | |||
308 | unsigned getResNo() const { return Val.getResNo(); } | |||
309 | /// Convenience function for get().getValueType(). | |||
310 | EVT getValueType() const { return Val.getValueType(); } | |||
311 | ||||
312 | /// Convenience function for get().operator== | |||
313 | bool operator==(const SDValue &V) const { | |||
314 | return Val == V; | |||
315 | } | |||
316 | ||||
317 | /// Convenience function for get().operator!= | |||
318 | bool operator!=(const SDValue &V) const { | |||
319 | return Val != V; | |||
320 | } | |||
321 | ||||
322 | /// Convenience function for get().operator< | |||
323 | bool operator<(const SDValue &V) const { | |||
324 | return Val < V; | |||
325 | } | |||
326 | ||||
327 | private: | |||
328 | friend class SelectionDAG; | |||
329 | friend class SDNode; | |||
330 | // TODO: unfriend HandleSDNode once we fix its operand handling. | |||
331 | friend class HandleSDNode; | |||
332 | ||||
333 | void setUser(SDNode *p) { User = p; } | |||
334 | ||||
335 | /// Remove this use from its existing use list, assign it the | |||
336 | /// given value, and add it to the new value's node's use list. | |||
337 | inline void set(const SDValue &V); | |||
338 | /// Like set, but only supports initializing a newly-allocated | |||
339 | /// SDUse with a non-null value. | |||
340 | inline void setInitial(const SDValue &V); | |||
341 | /// Like set, but only sets the Node portion of the value, | |||
342 | /// leaving the ResNo portion unmodified. | |||
343 | inline void setNode(SDNode *N); | |||
344 | ||||
345 | void addToList(SDUse **List) { | |||
346 | Next = *List; | |||
347 | if (Next) Next->Prev = &Next; | |||
348 | Prev = List; | |||
349 | *List = this; | |||
350 | } | |||
351 | ||||
352 | void removeFromList() { | |||
353 | *Prev = Next; | |||
354 | if (Next) Next->Prev = Prev; | |||
355 | } | |||
356 | }; | |||
357 | ||||
358 | /// simplify_type specializations - Allow casting operators to work directly on | |||
359 | /// SDValues as if they were SDNode*'s. | |||
360 | template<> struct simplify_type<SDUse> { | |||
361 | using SimpleType = SDNode *; | |||
362 | ||||
363 | static SimpleType getSimplifiedValue(SDUse &Val) { | |||
364 | return Val.getNode(); | |||
365 | } | |||
366 | }; | |||
367 | ||||
368 | /// These are IR-level optimization flags that may be propagated to SDNodes. | |||
369 | /// TODO: This data structure should be shared by the IR optimizer and the | |||
370 | /// the backend. | |||
371 | struct SDNodeFlags { | |||
372 | private: | |||
373 | bool NoUnsignedWrap : 1; | |||
374 | bool NoSignedWrap : 1; | |||
375 | bool Exact : 1; | |||
376 | bool NoNaNs : 1; | |||
377 | bool NoInfs : 1; | |||
378 | bool NoSignedZeros : 1; | |||
379 | bool AllowReciprocal : 1; | |||
380 | bool AllowContract : 1; | |||
381 | bool ApproximateFuncs : 1; | |||
382 | bool AllowReassociation : 1; | |||
383 | ||||
384 | // We assume instructions do not raise floating-point exceptions by default, | |||
385 | // and only those marked explicitly may do so. We could choose to represent | |||
386 | // this via a positive "FPExcept" flags like on the MI level, but having a | |||
387 | // negative "NoFPExcept" flag here (that defaults to true) makes the flag | |||
388 | // intersection logic more straightforward. | |||
389 | bool NoFPExcept : 1; | |||
390 | ||||
391 | public: | |||
392 | /// Default constructor turns off all optimization flags. | |||
393 | SDNodeFlags() | |||
394 | : NoUnsignedWrap(false), NoSignedWrap(false), Exact(false), NoNaNs(false), | |||
395 | NoInfs(false), NoSignedZeros(false), AllowReciprocal(false), | |||
396 | AllowContract(false), ApproximateFuncs(false), | |||
397 | AllowReassociation(false), NoFPExcept(false) {} | |||
398 | ||||
399 | /// Propagate the fast-math-flags from an IR FPMathOperator. | |||
400 | void copyFMF(const FPMathOperator &FPMO) { | |||
401 | setNoNaNs(FPMO.hasNoNaNs()); | |||
402 | setNoInfs(FPMO.hasNoInfs()); | |||
403 | setNoSignedZeros(FPMO.hasNoSignedZeros()); | |||
404 | setAllowReciprocal(FPMO.hasAllowReciprocal()); | |||
405 | setAllowContract(FPMO.hasAllowContract()); | |||
406 | setApproximateFuncs(FPMO.hasApproxFunc()); | |||
407 | setAllowReassociation(FPMO.hasAllowReassoc()); | |||
408 | } | |||
409 | ||||
410 | // These are mutators for each flag. | |||
411 | void setNoUnsignedWrap(bool b) { NoUnsignedWrap = b; } | |||
412 | void setNoSignedWrap(bool b) { NoSignedWrap = b; } | |||
413 | void setExact(bool b) { Exact = b; } | |||
414 | void setNoNaNs(bool b) { NoNaNs = b; } | |||
415 | void setNoInfs(bool b) { NoInfs = b; } | |||
416 | void setNoSignedZeros(bool b) { NoSignedZeros = b; } | |||
417 | void setAllowReciprocal(bool b) { AllowReciprocal = b; } | |||
418 | void setAllowContract(bool b) { AllowContract = b; } | |||
419 | void setApproximateFuncs(bool b) { ApproximateFuncs = b; } | |||
420 | void setAllowReassociation(bool b) { AllowReassociation = b; } | |||
421 | void setNoFPExcept(bool b) { NoFPExcept = b; } | |||
422 | ||||
423 | // These are accessors for each flag. | |||
424 | bool hasNoUnsignedWrap() const { return NoUnsignedWrap; } | |||
425 | bool hasNoSignedWrap() const { return NoSignedWrap; } | |||
426 | bool hasExact() const { return Exact; } | |||
427 | bool hasNoNaNs() const { return NoNaNs; } | |||
428 | bool hasNoInfs() const { return NoInfs; } | |||
429 | bool hasNoSignedZeros() const { return NoSignedZeros; } | |||
430 | bool hasAllowReciprocal() const { return AllowReciprocal; } | |||
431 | bool hasAllowContract() const { return AllowContract; } | |||
432 | bool hasApproximateFuncs() const { return ApproximateFuncs; } | |||
433 | bool hasAllowReassociation() const { return AllowReassociation; } | |||
434 | bool hasNoFPExcept() const { return NoFPExcept; } | |||
435 | ||||
436 | /// Clear any flags in this flag set that aren't also set in Flags. All | |||
437 | /// flags will be cleared if Flags are undefined. | |||
438 | void intersectWith(const SDNodeFlags Flags) { | |||
439 | NoUnsignedWrap &= Flags.NoUnsignedWrap; | |||
440 | NoSignedWrap &= Flags.NoSignedWrap; | |||
441 | Exact &= Flags.Exact; | |||
442 | NoNaNs &= Flags.NoNaNs; | |||
443 | NoInfs &= Flags.NoInfs; | |||
444 | NoSignedZeros &= Flags.NoSignedZeros; | |||
445 | AllowReciprocal &= Flags.AllowReciprocal; | |||
446 | AllowContract &= Flags.AllowContract; | |||
447 | ApproximateFuncs &= Flags.ApproximateFuncs; | |||
448 | AllowReassociation &= Flags.AllowReassociation; | |||
449 | NoFPExcept &= Flags.NoFPExcept; | |||
450 | } | |||
451 | }; | |||
452 | ||||
453 | /// Represents one node in the SelectionDAG. | |||
454 | /// | |||
455 | class SDNode : public FoldingSetNode, public ilist_node<SDNode> { | |||
456 | private: | |||
457 | /// The operation that this node performs. | |||
458 | int16_t NodeType; | |||
459 | ||||
460 | protected: | |||
461 | // We define a set of mini-helper classes to help us interpret the bits in our | |||
462 | // SubclassData. These are designed to fit within a uint16_t so they pack | |||
463 | // with NodeType. | |||
464 | ||||
465 | #if defined(_AIX) && (!defined(__GNUC__4) || defined(__clang__1)) | |||
466 | // Except for GCC; by default, AIX compilers store bit-fields in 4-byte words | |||
467 | // and give the `pack` pragma push semantics. | |||
468 | #define BEGIN_TWO_BYTE_PACK() _Pragma("pack(2)")pack(2) | |||
469 | #define END_TWO_BYTE_PACK() _Pragma("pack(pop)")pack(pop) | |||
470 | #else | |||
471 | #define BEGIN_TWO_BYTE_PACK() | |||
472 | #define END_TWO_BYTE_PACK() | |||
473 | #endif | |||
474 | ||||
475 | BEGIN_TWO_BYTE_PACK() | |||
476 | class SDNodeBitfields { | |||
477 | friend class SDNode; | |||
478 | friend class MemIntrinsicSDNode; | |||
479 | friend class MemSDNode; | |||
480 | friend class SelectionDAG; | |||
481 | ||||
482 | uint16_t HasDebugValue : 1; | |||
483 | uint16_t IsMemIntrinsic : 1; | |||
484 | uint16_t IsDivergent : 1; | |||
485 | }; | |||
486 | enum { NumSDNodeBits = 3 }; | |||
487 | ||||
488 | class ConstantSDNodeBitfields { | |||
489 | friend class ConstantSDNode; | |||
490 | ||||
491 | uint16_t : NumSDNodeBits; | |||
492 | ||||
493 | uint16_t IsOpaque : 1; | |||
494 | }; | |||
495 | ||||
496 | class MemSDNodeBitfields { | |||
497 | friend class MemSDNode; | |||
498 | friend class MemIntrinsicSDNode; | |||
499 | friend class AtomicSDNode; | |||
500 | ||||
501 | uint16_t : NumSDNodeBits; | |||
502 | ||||
503 | uint16_t IsVolatile : 1; | |||
504 | uint16_t IsNonTemporal : 1; | |||
505 | uint16_t IsDereferenceable : 1; | |||
506 | uint16_t IsInvariant : 1; | |||
507 | }; | |||
508 | enum { NumMemSDNodeBits = NumSDNodeBits + 4 }; | |||
509 | ||||
510 | class LSBaseSDNodeBitfields { | |||
511 | friend class LSBaseSDNode; | |||
512 | friend class MaskedLoadStoreSDNode; | |||
513 | friend class MaskedGatherScatterSDNode; | |||
514 | ||||
515 | uint16_t : NumMemSDNodeBits; | |||
516 | ||||
517 | // This storage is shared between disparate class hierarchies to hold an | |||
518 | // enumeration specific to the class hierarchy in use. | |||
519 | // LSBaseSDNode => enum ISD::MemIndexedMode | |||
520 | // MaskedLoadStoreBaseSDNode => enum ISD::MemIndexedMode | |||
521 | // MaskedGatherScatterSDNode => enum ISD::MemIndexType | |||
522 | uint16_t AddressingMode : 3; | |||
523 | }; | |||
524 | enum { NumLSBaseSDNodeBits = NumMemSDNodeBits + 3 }; | |||
525 | ||||
526 | class LoadSDNodeBitfields { | |||
527 | friend class LoadSDNode; | |||
528 | friend class MaskedLoadSDNode; | |||
529 | friend class MaskedGatherSDNode; | |||
530 | ||||
531 | uint16_t : NumLSBaseSDNodeBits; | |||
532 | ||||
533 | uint16_t ExtTy : 2; // enum ISD::LoadExtType | |||
534 | uint16_t IsExpanding : 1; | |||
535 | }; | |||
536 | ||||
537 | class StoreSDNodeBitfields { | |||
538 | friend class StoreSDNode; | |||
539 | friend class MaskedStoreSDNode; | |||
540 | friend class MaskedScatterSDNode; | |||
541 | ||||
542 | uint16_t : NumLSBaseSDNodeBits; | |||
543 | ||||
544 | uint16_t IsTruncating : 1; | |||
545 | uint16_t IsCompressing : 1; | |||
546 | }; | |||
547 | ||||
548 | union { | |||
549 | char RawSDNodeBits[sizeof(uint16_t)]; | |||
550 | SDNodeBitfields SDNodeBits; | |||
551 | ConstantSDNodeBitfields ConstantSDNodeBits; | |||
552 | MemSDNodeBitfields MemSDNodeBits; | |||
553 | LSBaseSDNodeBitfields LSBaseSDNodeBits; | |||
554 | LoadSDNodeBitfields LoadSDNodeBits; | |||
555 | StoreSDNodeBitfields StoreSDNodeBits; | |||
556 | }; | |||
557 | END_TWO_BYTE_PACK() | |||
558 | #undef BEGIN_TWO_BYTE_PACK | |||
559 | #undef END_TWO_BYTE_PACK | |||
560 | ||||
561 | // RawSDNodeBits must cover the entirety of the union. This means that all of | |||
562 | // the union's members must have size <= RawSDNodeBits. We write the RHS as | |||
563 | // "2" instead of sizeof(RawSDNodeBits) because MSVC can't handle the latter. | |||
564 | static_assert(sizeof(SDNodeBitfields) <= 2, "field too wide"); | |||
565 | static_assert(sizeof(ConstantSDNodeBitfields) <= 2, "field too wide"); | |||
566 | static_assert(sizeof(MemSDNodeBitfields) <= 2, "field too wide"); | |||
567 | static_assert(sizeof(LSBaseSDNodeBitfields) <= 2, "field too wide"); | |||
568 | static_assert(sizeof(LoadSDNodeBitfields) <= 2, "field too wide"); | |||
569 | static_assert(sizeof(StoreSDNodeBitfields) <= 2, "field too wide"); | |||
570 | ||||
571 | private: | |||
572 | friend class SelectionDAG; | |||
573 | // TODO: unfriend HandleSDNode once we fix its operand handling. | |||
574 | friend class HandleSDNode; | |||
575 | ||||
576 | /// Unique id per SDNode in the DAG. | |||
577 | int NodeId = -1; | |||
578 | ||||
579 | /// The values that are used by this operation. | |||
580 | SDUse *OperandList = nullptr; | |||
581 | ||||
582 | /// The types of the values this node defines. SDNode's may | |||
583 | /// define multiple values simultaneously. | |||
584 | const EVT *ValueList; | |||
585 | ||||
586 | /// List of uses for this SDNode. | |||
587 | SDUse *UseList = nullptr; | |||
588 | ||||
589 | /// The number of entries in the Operand/Value list. | |||
590 | unsigned short NumOperands = 0; | |||
591 | unsigned short NumValues; | |||
592 | ||||
593 | // The ordering of the SDNodes. It roughly corresponds to the ordering of the | |||
594 | // original LLVM instructions. | |||
595 | // This is used for turning off scheduling, because we'll forgo | |||
596 | // the normal scheduling algorithms and output the instructions according to | |||
597 | // this ordering. | |||
598 | unsigned IROrder; | |||
599 | ||||
600 | /// Source line information. | |||
601 | DebugLoc debugLoc; | |||
602 | ||||
603 | /// Return a pointer to the specified value type. | |||
604 | static const EVT *getValueTypeList(EVT VT); | |||
605 | ||||
606 | SDNodeFlags Flags; | |||
607 | ||||
608 | public: | |||
609 | /// Unique and persistent id per SDNode in the DAG. | |||
610 | /// Used for debug printing. | |||
611 | uint16_t PersistentId; | |||
612 | ||||
613 | //===--------------------------------------------------------------------===// | |||
614 | // Accessors | |||
615 | // | |||
616 | ||||
617 | /// Return the SelectionDAG opcode value for this node. For | |||
618 | /// pre-isel nodes (those for which isMachineOpcode returns false), these | |||
619 | /// are the opcode values in the ISD and <target>ISD namespaces. For | |||
620 | /// post-isel opcodes, see getMachineOpcode. | |||
621 | unsigned getOpcode() const { return (unsigned short)NodeType; } | |||
622 | ||||
623 | /// Test if this node has a target-specific opcode (in the | |||
624 | /// \<target\>ISD namespace). | |||
625 | bool isTargetOpcode() const { return NodeType >= ISD::BUILTIN_OP_END; } | |||
626 | ||||
627 | /// Test if this node has a target-specific opcode that may raise | |||
628 | /// FP exceptions (in the \<target\>ISD namespace and greater than | |||
629 | /// FIRST_TARGET_STRICTFP_OPCODE). Note that all target memory | |||
630 | /// opcode are currently automatically considered to possibly raise | |||
631 | /// FP exceptions as well. | |||
632 | bool isTargetStrictFPOpcode() const { | |||
633 | return NodeType >= ISD::FIRST_TARGET_STRICTFP_OPCODE; | |||
634 | } | |||
635 | ||||
636 | /// Test if this node has a target-specific | |||
637 | /// memory-referencing opcode (in the \<target\>ISD namespace and | |||
638 | /// greater than FIRST_TARGET_MEMORY_OPCODE). | |||
639 | bool isTargetMemoryOpcode() const { | |||
640 | return NodeType >= ISD::FIRST_TARGET_MEMORY_OPCODE; | |||
641 | } | |||
642 | ||||
643 | /// Return true if the type of the node type undefined. | |||
644 | bool isUndef() const { return NodeType == ISD::UNDEF; } | |||
645 | ||||
646 | /// Test if this node is a memory intrinsic (with valid pointer information). | |||
647 | /// INTRINSIC_W_CHAIN and INTRINSIC_VOID nodes are sometimes created for | |||
648 | /// non-memory intrinsics (with chains) that are not really instances of | |||
649 | /// MemSDNode. For such nodes, we need some extra state to determine the | |||
650 | /// proper classof relationship. | |||
651 | bool isMemIntrinsic() const { | |||
652 | return (NodeType == ISD::INTRINSIC_W_CHAIN || | |||
653 | NodeType == ISD::INTRINSIC_VOID) && | |||
654 | SDNodeBits.IsMemIntrinsic; | |||
655 | } | |||
656 | ||||
657 | /// Test if this node is a strict floating point pseudo-op. | |||
658 | bool isStrictFPOpcode() { | |||
659 | switch (NodeType) { | |||
660 | default: | |||
661 | return false; | |||
662 | case ISD::STRICT_FP16_TO_FP: | |||
663 | case ISD::STRICT_FP_TO_FP16: | |||
664 | #define DAG_INSTRUCTION(NAME, NARG, ROUND_MODE, INTRINSIC, DAGN) \ | |||
665 | case ISD::STRICT_##DAGN: | |||
666 | #include "llvm/IR/ConstrainedOps.def" | |||
667 | return true; | |||
668 | } | |||
669 | } | |||
670 | ||||
671 | /// Test if this node has a post-isel opcode, directly | |||
672 | /// corresponding to a MachineInstr opcode. | |||
673 | bool isMachineOpcode() const { return NodeType < 0; } | |||
674 | ||||
675 | /// This may only be called if isMachineOpcode returns | |||
676 | /// true. It returns the MachineInstr opcode value that the node's opcode | |||
677 | /// corresponds to. | |||
678 | unsigned getMachineOpcode() const { | |||
679 | assert(isMachineOpcode() && "Not a MachineInstr opcode!")((void)0); | |||
680 | return ~NodeType; | |||
681 | } | |||
682 | ||||
683 | bool getHasDebugValue() const { return SDNodeBits.HasDebugValue; } | |||
684 | void setHasDebugValue(bool b) { SDNodeBits.HasDebugValue = b; } | |||
685 | ||||
686 | bool isDivergent() const { return SDNodeBits.IsDivergent; } | |||
687 | ||||
688 | /// Return true if there are no uses of this node. | |||
689 | bool use_empty() const { return UseList == nullptr; } | |||
690 | ||||
691 | /// Return true if there is exactly one use of this node. | |||
692 | bool hasOneUse() const { return hasSingleElement(uses()); } | |||
693 | ||||
694 | /// Return the number of uses of this node. This method takes | |||
695 | /// time proportional to the number of uses. | |||
696 | size_t use_size() const { return std::distance(use_begin(), use_end()); } | |||
697 | ||||
698 | /// Return the unique node id. | |||
699 | int getNodeId() const { return NodeId; } | |||
700 | ||||
701 | /// Set unique node id. | |||
702 | void setNodeId(int Id) { NodeId = Id; } | |||
703 | ||||
704 | /// Return the node ordering. | |||
705 | unsigned getIROrder() const { return IROrder; } | |||
706 | ||||
707 | /// Set the node ordering. | |||
708 | void setIROrder(unsigned Order) { IROrder = Order; } | |||
709 | ||||
710 | /// Return the source location info. | |||
711 | const DebugLoc &getDebugLoc() const { return debugLoc; } | |||
712 | ||||
713 | /// Set source location info. Try to avoid this, putting | |||
714 | /// it in the constructor is preferable. | |||
715 | void setDebugLoc(DebugLoc dl) { debugLoc = std::move(dl); } | |||
716 | ||||
717 | /// This class provides iterator support for SDUse | |||
718 | /// operands that use a specific SDNode. | |||
719 | class use_iterator { | |||
720 | friend class SDNode; | |||
721 | ||||
722 | SDUse *Op = nullptr; | |||
723 | ||||
724 | explicit use_iterator(SDUse *op) : Op(op) {} | |||
725 | ||||
726 | public: | |||
727 | using iterator_category = std::forward_iterator_tag; | |||
728 | using value_type = SDUse; | |||
729 | using difference_type = std::ptrdiff_t; | |||
730 | using pointer = value_type *; | |||
731 | using reference = value_type &; | |||
732 | ||||
733 | use_iterator() = default; | |||
734 | use_iterator(const use_iterator &I) : Op(I.Op) {} | |||
735 | ||||
736 | bool operator==(const use_iterator &x) const { | |||
737 | return Op == x.Op; | |||
738 | } | |||
739 | bool operator!=(const use_iterator &x) const { | |||
740 | return !operator==(x); | |||
741 | } | |||
742 | ||||
743 | /// Return true if this iterator is at the end of uses list. | |||
744 | bool atEnd() const { return Op == nullptr; } | |||
745 | ||||
746 | // Iterator traversal: forward iteration only. | |||
747 | use_iterator &operator++() { // Preincrement | |||
748 | assert(Op && "Cannot increment end iterator!")((void)0); | |||
749 | Op = Op->getNext(); | |||
750 | return *this; | |||
751 | } | |||
752 | ||||
753 | use_iterator operator++(int) { // Postincrement | |||
754 | use_iterator tmp = *this; ++*this; return tmp; | |||
755 | } | |||
756 | ||||
757 | /// Retrieve a pointer to the current user node. | |||
758 | SDNode *operator*() const { | |||
759 | assert(Op && "Cannot dereference end iterator!")((void)0); | |||
760 | return Op->getUser(); | |||
761 | } | |||
762 | ||||
763 | SDNode *operator->() const { return operator*(); } | |||
764 | ||||
765 | SDUse &getUse() const { return *Op; } | |||
766 | ||||
767 | /// Retrieve the operand # of this use in its user. | |||
768 | unsigned getOperandNo() const { | |||
769 | assert(Op && "Cannot dereference end iterator!")((void)0); | |||
770 | return (unsigned)(Op - Op->getUser()->OperandList); | |||
771 | } | |||
772 | }; | |||
773 | ||||
774 | /// Provide iteration support to walk over all uses of an SDNode. | |||
775 | use_iterator use_begin() const { | |||
776 | return use_iterator(UseList); | |||
777 | } | |||
778 | ||||
779 | static use_iterator use_end() { return use_iterator(nullptr); } | |||
780 | ||||
781 | inline iterator_range<use_iterator> uses() { | |||
782 | return make_range(use_begin(), use_end()); | |||
783 | } | |||
784 | inline iterator_range<use_iterator> uses() const { | |||
785 | return make_range(use_begin(), use_end()); | |||
786 | } | |||
787 | ||||
788 | /// Return true if there are exactly NUSES uses of the indicated value. | |||
789 | /// This method ignores uses of other values defined by this operation. | |||
790 | bool hasNUsesOfValue(unsigned NUses, unsigned Value) const; | |||
791 | ||||
792 | /// Return true if there are any use of the indicated value. | |||
793 | /// This method ignores uses of other values defined by this operation. | |||
794 | bool hasAnyUseOfValue(unsigned Value) const; | |||
795 | ||||
796 | /// Return true if this node is the only use of N. | |||
797 | bool isOnlyUserOf(const SDNode *N) const; | |||
798 | ||||
799 | /// Return true if this node is an operand of N. | |||
800 | bool isOperandOf(const SDNode *N) const; | |||
801 | ||||
802 | /// Return true if this node is a predecessor of N. | |||
803 | /// NOTE: Implemented on top of hasPredecessor and every bit as | |||
804 | /// expensive. Use carefully. | |||
805 | bool isPredecessorOf(const SDNode *N) const { | |||
806 | return N->hasPredecessor(this); | |||
807 | } | |||
808 | ||||
809 | /// Return true if N is a predecessor of this node. | |||
810 | /// N is either an operand of this node, or can be reached by recursively | |||
811 | /// traversing up the operands. | |||
812 | /// NOTE: This is an expensive method. Use it carefully. | |||
813 | bool hasPredecessor(const SDNode *N) const; | |||
814 | ||||
815 | /// Returns true if N is a predecessor of any node in Worklist. This | |||
816 | /// helper keeps Visited and Worklist sets externally to allow unions | |||
817 | /// searches to be performed in parallel, caching of results across | |||
818 | /// queries and incremental addition to Worklist. Stops early if N is | |||
819 | /// found but will resume. Remember to clear Visited and Worklists | |||
820 | /// if DAG changes. MaxSteps gives a maximum number of nodes to visit before | |||
821 | /// giving up. The TopologicalPrune flag signals that positive NodeIds are | |||
822 | /// topologically ordered (Operands have strictly smaller node id) and search | |||
823 | /// can be pruned leveraging this. | |||
824 | static bool hasPredecessorHelper(const SDNode *N, | |||
825 | SmallPtrSetImpl<const SDNode *> &Visited, | |||
826 | SmallVectorImpl<const SDNode *> &Worklist, | |||
827 | unsigned int MaxSteps = 0, | |||
828 | bool TopologicalPrune = false) { | |||
829 | SmallVector<const SDNode *, 8> DeferredNodes; | |||
830 | if (Visited.count(N)) | |||
831 | return true; | |||
832 | ||||
833 | // Node Id's are assigned in three places: As a topological | |||
834 | // ordering (> 0), during legalization (results in values set to | |||
835 | // 0), new nodes (set to -1). If N has a topolgical id then we | |||
836 | // know that all nodes with ids smaller than it cannot be | |||
837 | // successors and we need not check them. Filter out all node | |||
838 | // that can't be matches. We add them to the worklist before exit | |||
839 | // in case of multiple calls. Note that during selection the topological id | |||
840 | // may be violated if a node's predecessor is selected before it. We mark | |||
841 | // this at selection negating the id of unselected successors and | |||
842 | // restricting topological pruning to positive ids. | |||
843 | ||||
844 | int NId = N->getNodeId(); | |||
845 | // If we Invalidated the Id, reconstruct original NId. | |||
846 | if (NId < -1) | |||
847 | NId = -(NId + 1); | |||
848 | ||||
849 | bool Found = false; | |||
850 | while (!Worklist.empty()) { | |||
851 | const SDNode *M = Worklist.pop_back_val(); | |||
852 | int MId = M->getNodeId(); | |||
853 | if (TopologicalPrune && M->getOpcode() != ISD::TokenFactor && (NId > 0) && | |||
854 | (MId > 0) && (MId < NId)) { | |||
855 | DeferredNodes.push_back(M); | |||
856 | continue; | |||
857 | } | |||
858 | for (const SDValue &OpV : M->op_values()) { | |||
859 | SDNode *Op = OpV.getNode(); | |||
860 | if (Visited.insert(Op).second) | |||
861 | Worklist.push_back(Op); | |||
862 | if (Op == N) | |||
863 | Found = true; | |||
864 | } | |||
865 | if (Found) | |||
866 | break; | |||
867 | if (MaxSteps != 0 && Visited.size() >= MaxSteps) | |||
868 | break; | |||
869 | } | |||
870 | // Push deferred nodes back on worklist. | |||
871 | Worklist.append(DeferredNodes.begin(), DeferredNodes.end()); | |||
872 | // If we bailed early, conservatively return found. | |||
873 | if (MaxSteps != 0 && Visited.size() >= MaxSteps) | |||
874 | return true; | |||
875 | return Found; | |||
876 | } | |||
877 | ||||
878 | /// Return true if all the users of N are contained in Nodes. | |||
879 | /// NOTE: Requires at least one match, but doesn't require them all. | |||
880 | static bool areOnlyUsersOf(ArrayRef<const SDNode *> Nodes, const SDNode *N); | |||
881 | ||||
882 | /// Return the number of values used by this operation. | |||
883 | unsigned getNumOperands() const { return NumOperands; } | |||
884 | ||||
885 | /// Return the maximum number of operands that a SDNode can hold. | |||
886 | static constexpr size_t getMaxNumOperands() { | |||
887 | return std::numeric_limits<decltype(SDNode::NumOperands)>::max(); | |||
888 | } | |||
889 | ||||
890 | /// Helper method returns the integer value of a ConstantSDNode operand. | |||
891 | inline uint64_t getConstantOperandVal(unsigned Num) const; | |||
892 | ||||
893 | /// Helper method returns the APInt of a ConstantSDNode operand. | |||
894 | inline const APInt &getConstantOperandAPInt(unsigned Num) const; | |||
895 | ||||
896 | const SDValue &getOperand(unsigned Num) const { | |||
897 | assert(Num < NumOperands && "Invalid child # of SDNode!")((void)0); | |||
898 | return OperandList[Num]; | |||
899 | } | |||
900 | ||||
901 | using op_iterator = SDUse *; | |||
902 | ||||
903 | op_iterator op_begin() const { return OperandList; } | |||
904 | op_iterator op_end() const { return OperandList+NumOperands; } | |||
905 | ArrayRef<SDUse> ops() const { return makeArrayRef(op_begin(), op_end()); } | |||
906 | ||||
907 | /// Iterator for directly iterating over the operand SDValue's. | |||
908 | struct value_op_iterator | |||
909 | : iterator_adaptor_base<value_op_iterator, op_iterator, | |||
910 | std::random_access_iterator_tag, SDValue, | |||
911 | ptrdiff_t, value_op_iterator *, | |||
912 | value_op_iterator *> { | |||
913 | explicit value_op_iterator(SDUse *U = nullptr) | |||
914 | : iterator_adaptor_base(U) {} | |||
915 | ||||
916 | const SDValue &operator*() const { return I->get(); } | |||
917 | }; | |||
918 | ||||
919 | iterator_range<value_op_iterator> op_values() const { | |||
920 | return make_range(value_op_iterator(op_begin()), | |||
921 | value_op_iterator(op_end())); | |||
922 | } | |||
923 | ||||
924 | SDVTList getVTList() const { | |||
925 | SDVTList X = { ValueList, NumValues }; | |||
926 | return X; | |||
927 | } | |||
928 | ||||
929 | /// If this node has a glue operand, return the node | |||
930 | /// to which the glue operand points. Otherwise return NULL. | |||
931 | SDNode *getGluedNode() const { | |||
932 | if (getNumOperands() != 0 && | |||
933 | getOperand(getNumOperands()-1).getValueType() == MVT::Glue) | |||
934 | return getOperand(getNumOperands()-1).getNode(); | |||
935 | return nullptr; | |||
936 | } | |||
937 | ||||
938 | /// If this node has a glue value with a user, return | |||
939 | /// the user (there is at most one). Otherwise return NULL. | |||
940 | SDNode *getGluedUser() const { | |||
941 | for (use_iterator UI = use_begin(), UE = use_end(); UI != UE; ++UI) | |||
942 | if (UI.getUse().get().getValueType() == MVT::Glue) | |||
943 | return *UI; | |||
944 | return nullptr; | |||
945 | } | |||
946 | ||||
947 | SDNodeFlags getFlags() const { return Flags; } | |||
948 | void setFlags(SDNodeFlags NewFlags) { Flags = NewFlags; } | |||
949 | ||||
950 | /// Clear any flags in this node that aren't also set in Flags. | |||
951 | /// If Flags is not in a defined state then this has no effect. | |||
952 | void intersectFlagsWith(const SDNodeFlags Flags); | |||
953 | ||||
954 | /// Return the number of values defined/returned by this operator. | |||
955 | unsigned getNumValues() const { return NumValues; } | |||
956 | ||||
957 | /// Return the type of a specified result. | |||
958 | EVT getValueType(unsigned ResNo) const { | |||
959 | assert(ResNo < NumValues && "Illegal result number!")((void)0); | |||
960 | return ValueList[ResNo]; | |||
961 | } | |||
962 | ||||
963 | /// Return the type of a specified result as a simple type. | |||
964 | MVT getSimpleValueType(unsigned ResNo) const { | |||
965 | return getValueType(ResNo).getSimpleVT(); | |||
966 | } | |||
967 | ||||
968 | /// Returns MVT::getSizeInBits(getValueType(ResNo)). | |||
969 | /// | |||
970 | /// If the value type is a scalable vector type, the scalable property will | |||
971 | /// be set and the runtime size will be a positive integer multiple of the | |||
972 | /// base size. | |||
973 | TypeSize getValueSizeInBits(unsigned ResNo) const { | |||
974 | return getValueType(ResNo).getSizeInBits(); | |||
975 | } | |||
976 | ||||
977 | using value_iterator = const EVT *; | |||
978 | ||||
979 | value_iterator value_begin() const { return ValueList; } | |||
980 | value_iterator value_end() const { return ValueList+NumValues; } | |||
981 | iterator_range<value_iterator> values() const { | |||
982 | return llvm::make_range(value_begin(), value_end()); | |||
983 | } | |||
984 | ||||
985 | /// Return the opcode of this operation for printing. | |||
986 | std::string getOperationName(const SelectionDAG *G = nullptr) const; | |||
987 | static const char* getIndexedModeName(ISD::MemIndexedMode AM); | |||
988 | void print_types(raw_ostream &OS, const SelectionDAG *G) const; | |||
989 | void print_details(raw_ostream &OS, const SelectionDAG *G) const; | |||
990 | void print(raw_ostream &OS, const SelectionDAG *G = nullptr) const; | |||
991 | void printr(raw_ostream &OS, const SelectionDAG *G = nullptr) const; | |||
992 | ||||
993 | /// Print a SelectionDAG node and all children down to | |||
994 | /// the leaves. The given SelectionDAG allows target-specific nodes | |||
995 | /// to be printed in human-readable form. Unlike printr, this will | |||
996 | /// print the whole DAG, including children that appear multiple | |||
997 | /// times. | |||
998 | /// | |||
999 | void printrFull(raw_ostream &O, const SelectionDAG *G = nullptr) const; | |||
1000 | ||||
1001 | /// Print a SelectionDAG node and children up to | |||
1002 | /// depth "depth." The given SelectionDAG allows target-specific | |||
1003 | /// nodes to be printed in human-readable form. Unlike printr, this | |||
1004 | /// will print children that appear multiple times wherever they are | |||
1005 | /// used. | |||
1006 | /// | |||
1007 | void printrWithDepth(raw_ostream &O, const SelectionDAG *G = nullptr, | |||
1008 | unsigned depth = 100) const; | |||
1009 | ||||
1010 | /// Dump this node, for debugging. | |||
1011 | void dump() const; | |||
1012 | ||||
1013 | /// Dump (recursively) this node and its use-def subgraph. | |||
1014 | void dumpr() const; | |||
1015 | ||||
1016 | /// Dump this node, for debugging. | |||
1017 | /// The given SelectionDAG allows target-specific nodes to be printed | |||
1018 | /// in human-readable form. | |||
1019 | void dump(const SelectionDAG *G) const; | |||
1020 | ||||
1021 | /// Dump (recursively) this node and its use-def subgraph. | |||
1022 | /// The given SelectionDAG allows target-specific nodes to be printed | |||
1023 | /// in human-readable form. | |||
1024 | void dumpr(const SelectionDAG *G) const; | |||
1025 | ||||
1026 | /// printrFull to dbgs(). The given SelectionDAG allows | |||
1027 | /// target-specific nodes to be printed in human-readable form. | |||
1028 | /// Unlike dumpr, this will print the whole DAG, including children | |||
1029 | /// that appear multiple times. | |||
1030 | void dumprFull(const SelectionDAG *G = nullptr) const; | |||
1031 | ||||
1032 | /// printrWithDepth to dbgs(). The given | |||
1033 | /// SelectionDAG allows target-specific nodes to be printed in | |||
1034 | /// human-readable form. Unlike dumpr, this will print children | |||
1035 | /// that appear multiple times wherever they are used. | |||
1036 | /// | |||
1037 | void dumprWithDepth(const SelectionDAG *G = nullptr, | |||
1038 | unsigned depth = 100) const; | |||
1039 | ||||
1040 | /// Gather unique data for the node. | |||
1041 | void Profile(FoldingSetNodeID &ID) const; | |||
1042 | ||||
1043 | /// This method should only be used by the SDUse class. | |||
1044 | void addUse(SDUse &U) { U.addToList(&UseList); } | |||
1045 | ||||
1046 | protected: | |||
1047 | static SDVTList getSDVTList(EVT VT) { | |||
1048 | SDVTList Ret = { getValueTypeList(VT), 1 }; | |||
1049 | return Ret; | |||
1050 | } | |||
1051 | ||||
1052 | /// Create an SDNode. | |||
1053 | /// | |||
1054 | /// SDNodes are created without any operands, and never own the operand | |||
1055 | /// storage. To add operands, see SelectionDAG::createOperands. | |||
1056 | SDNode(unsigned Opc, unsigned Order, DebugLoc dl, SDVTList VTs) | |||
1057 | : NodeType(Opc), ValueList(VTs.VTs), NumValues(VTs.NumVTs), | |||
1058 | IROrder(Order), debugLoc(std::move(dl)) { | |||
1059 | memset(&RawSDNodeBits, 0, sizeof(RawSDNodeBits)); | |||
1060 | assert(debugLoc.hasTrivialDestructor() && "Expected trivial destructor")((void)0); | |||
1061 | assert(NumValues == VTs.NumVTs &&((void)0) | |||
1062 | "NumValues wasn't wide enough for its operands!")((void)0); | |||
1063 | } | |||
1064 | ||||
1065 | /// Release the operands and set this node to have zero operands. | |||
1066 | void DropOperands(); | |||
1067 | }; | |||
1068 | ||||
1069 | /// Wrapper class for IR location info (IR ordering and DebugLoc) to be passed | |||
1070 | /// into SDNode creation functions. | |||
1071 | /// When an SDNode is created from the DAGBuilder, the DebugLoc is extracted | |||
1072 | /// from the original Instruction, and IROrder is the ordinal position of | |||
1073 | /// the instruction. | |||
1074 | /// When an SDNode is created after the DAG is being built, both DebugLoc and | |||
1075 | /// the IROrder are propagated from the original SDNode. | |||
1076 | /// So SDLoc class provides two constructors besides the default one, one to | |||
1077 | /// be used by the DAGBuilder, the other to be used by others. | |||
1078 | class SDLoc { | |||
1079 | private: | |||
1080 | DebugLoc DL; | |||
1081 | int IROrder = 0; | |||
1082 | ||||
1083 | public: | |||
1084 | SDLoc() = default; | |||
1085 | SDLoc(const SDNode *N) : DL(N->getDebugLoc()), IROrder(N->getIROrder()) {} | |||
1086 | SDLoc(const SDValue V) : SDLoc(V.getNode()) {} | |||
1087 | SDLoc(const Instruction *I, int Order) : IROrder(Order) { | |||
1088 | assert(Order >= 0 && "bad IROrder")((void)0); | |||
1089 | if (I) | |||
1090 | DL = I->getDebugLoc(); | |||
1091 | } | |||
1092 | ||||
1093 | unsigned getIROrder() const { return IROrder; } | |||
1094 | const DebugLoc &getDebugLoc() const { return DL; } | |||
1095 | }; | |||
1096 | ||||
1097 | // Define inline functions from the SDValue class. | |||
1098 | ||||
1099 | inline SDValue::SDValue(SDNode *node, unsigned resno) | |||
1100 | : Node(node), ResNo(resno) { | |||
1101 | // Explicitly check for !ResNo to avoid use-after-free, because there are | |||
1102 | // callers that use SDValue(N, 0) with a deleted N to indicate successful | |||
1103 | // combines. | |||
1104 | assert((!Node || !ResNo || ResNo < Node->getNumValues()) &&((void)0) | |||
1105 | "Invalid result number for the given node!")((void)0); | |||
1106 | assert(ResNo < -2U && "Cannot use result numbers reserved for DenseMaps.")((void)0); | |||
1107 | } | |||
1108 | ||||
1109 | inline unsigned SDValue::getOpcode() const { | |||
1110 | return Node->getOpcode(); | |||
1111 | } | |||
1112 | ||||
1113 | inline EVT SDValue::getValueType() const { | |||
1114 | return Node->getValueType(ResNo); | |||
1115 | } | |||
1116 | ||||
1117 | inline unsigned SDValue::getNumOperands() const { | |||
1118 | return Node->getNumOperands(); | |||
1119 | } | |||
1120 | ||||
1121 | inline const SDValue &SDValue::getOperand(unsigned i) const { | |||
1122 | return Node->getOperand(i); | |||
1123 | } | |||
1124 | ||||
1125 | inline uint64_t SDValue::getConstantOperandVal(unsigned i) const { | |||
1126 | return Node->getConstantOperandVal(i); | |||
1127 | } | |||
1128 | ||||
1129 | inline const APInt &SDValue::getConstantOperandAPInt(unsigned i) const { | |||
1130 | return Node->getConstantOperandAPInt(i); | |||
1131 | } | |||
1132 | ||||
1133 | inline bool SDValue::isTargetOpcode() const { | |||
1134 | return Node->isTargetOpcode(); | |||
1135 | } | |||
1136 | ||||
1137 | inline bool SDValue::isTargetMemoryOpcode() const { | |||
1138 | return Node->isTargetMemoryOpcode(); | |||
1139 | } | |||
1140 | ||||
1141 | inline bool SDValue::isMachineOpcode() const { | |||
1142 | return Node->isMachineOpcode(); | |||
| ||||
1143 | } | |||
1144 | ||||
1145 | inline unsigned SDValue::getMachineOpcode() const { | |||
1146 | return Node->getMachineOpcode(); | |||
1147 | } | |||
1148 | ||||
1149 | inline bool SDValue::isUndef() const { | |||
1150 | return Node->isUndef(); | |||
1151 | } | |||
1152 | ||||
1153 | inline bool SDValue::use_empty() const { | |||
1154 | return !Node->hasAnyUseOfValue(ResNo); | |||
1155 | } | |||
1156 | ||||
1157 | inline bool SDValue::hasOneUse() const { | |||
1158 | return Node->hasNUsesOfValue(1, ResNo); | |||
1159 | } | |||
1160 | ||||
1161 | inline const DebugLoc &SDValue::getDebugLoc() const { | |||
1162 | return Node->getDebugLoc(); | |||
1163 | } | |||
1164 | ||||
1165 | inline void SDValue::dump() const { | |||
1166 | return Node->dump(); | |||
1167 | } | |||
1168 | ||||
1169 | inline void SDValue::dump(const SelectionDAG *G) const { | |||
1170 | return Node->dump(G); | |||
1171 | } | |||
1172 | ||||
1173 | inline void SDValue::dumpr() const { | |||
1174 | return Node->dumpr(); | |||
1175 | } | |||
1176 | ||||
1177 | inline void SDValue::dumpr(const SelectionDAG *G) const { | |||
1178 | return Node->dumpr(G); | |||
1179 | } | |||
1180 | ||||
1181 | // Define inline functions from the SDUse class. | |||
1182 | ||||
1183 | inline void SDUse::set(const SDValue &V) { | |||
1184 | if (Val.getNode()) removeFromList(); | |||
1185 | Val = V; | |||
1186 | if (V.getNode()) V.getNode()->addUse(*this); | |||
1187 | } | |||
1188 | ||||
1189 | inline void SDUse::setInitial(const SDValue &V) { | |||
1190 | Val = V; | |||
1191 | V.getNode()->addUse(*this); | |||
1192 | } | |||
1193 | ||||
1194 | inline void SDUse::setNode(SDNode *N) { | |||
1195 | if (Val.getNode()) removeFromList(); | |||
1196 | Val.setNode(N); | |||
1197 | if (N) N->addUse(*this); | |||
1198 | } | |||
1199 | ||||
1200 | /// This class is used to form a handle around another node that | |||
1201 | /// is persistent and is updated across invocations of replaceAllUsesWith on its | |||
1202 | /// operand. This node should be directly created by end-users and not added to | |||
1203 | /// the AllNodes list. | |||
1204 | class HandleSDNode : public SDNode { | |||
1205 | SDUse Op; | |||
1206 | ||||
1207 | public: | |||
1208 | explicit HandleSDNode(SDValue X) | |||
1209 | : SDNode(ISD::HANDLENODE, 0, DebugLoc(), getSDVTList(MVT::Other)) { | |||
1210 | // HandleSDNodes are never inserted into the DAG, so they won't be | |||
1211 | // auto-numbered. Use ID 65535 as a sentinel. | |||
1212 | PersistentId = 0xffff; | |||
1213 | ||||
1214 | // Manually set up the operand list. This node type is special in that it's | |||
1215 | // always stack allocated and SelectionDAG does not manage its operands. | |||
1216 | // TODO: This should either (a) not be in the SDNode hierarchy, or (b) not | |||
1217 | // be so special. | |||
1218 | Op.setUser(this); | |||
1219 | Op.setInitial(X); | |||
1220 | NumOperands = 1; | |||
1221 | OperandList = &Op; | |||
1222 | } | |||
1223 | ~HandleSDNode(); | |||
1224 | ||||
1225 | const SDValue &getValue() const { return Op; } | |||
1226 | }; | |||
1227 | ||||
1228 | class AddrSpaceCastSDNode : public SDNode { | |||
1229 | private: | |||
1230 | unsigned SrcAddrSpace; | |||
1231 | unsigned DestAddrSpace; | |||
1232 | ||||
1233 | public: | |||
1234 | AddrSpaceCastSDNode(unsigned Order, const DebugLoc &dl, EVT VT, | |||
1235 | unsigned SrcAS, unsigned DestAS); | |||
1236 | ||||
1237 | unsigned getSrcAddressSpace() const { return SrcAddrSpace; } | |||
1238 | unsigned getDestAddressSpace() const { return DestAddrSpace; } | |||
1239 | ||||
1240 | static bool classof(const SDNode *N) { | |||
1241 | return N->getOpcode() == ISD::ADDRSPACECAST; | |||
1242 | } | |||
1243 | }; | |||
1244 | ||||
1245 | /// This is an abstract virtual class for memory operations. | |||
1246 | class MemSDNode : public SDNode { | |||
1247 | private: | |||
1248 | // VT of in-memory value. | |||
1249 | EVT MemoryVT; | |||
1250 | ||||
1251 | protected: | |||
1252 | /// Memory reference information. | |||
1253 | MachineMemOperand *MMO; | |||
1254 | ||||
1255 | public: | |||
1256 | MemSDNode(unsigned Opc, unsigned Order, const DebugLoc &dl, SDVTList VTs, | |||
1257 | EVT memvt, MachineMemOperand *MMO); | |||
1258 | ||||
1259 | bool readMem() const { return MMO->isLoad(); } | |||
1260 | bool writeMem() const { return MMO->isStore(); } | |||
1261 | ||||
1262 | /// Returns alignment and volatility of the memory access | |||
1263 | Align getOriginalAlign() const { return MMO->getBaseAlign(); } | |||
1264 | Align getAlign() const { return MMO->getAlign(); } | |||
1265 | // FIXME: Remove once transition to getAlign is over. | |||
1266 | unsigned getAlignment() const { return MMO->getAlign().value(); } | |||
1267 | ||||
1268 | /// Return the SubclassData value, without HasDebugValue. This contains an | |||
1269 | /// encoding of the volatile flag, as well as bits used by subclasses. This | |||
1270 | /// function should only be used to compute a FoldingSetNodeID value. | |||
1271 | /// The HasDebugValue bit is masked out because CSE map needs to match | |||
1272 | /// nodes with debug info with nodes without debug info. Same is about | |||
1273 | /// isDivergent bit. | |||
1274 | unsigned getRawSubclassData() const { | |||
1275 | uint16_t Data; | |||
1276 | union { | |||
1277 | char RawSDNodeBits[sizeof(uint16_t)]; | |||
1278 | SDNodeBitfields SDNodeBits; | |||
1279 | }; | |||
1280 | memcpy(&RawSDNodeBits, &this->RawSDNodeBits, sizeof(this->RawSDNodeBits)); | |||
1281 | SDNodeBits.HasDebugValue = 0; | |||
1282 | SDNodeBits.IsDivergent = false; | |||
1283 | memcpy(&Data, &RawSDNodeBits, sizeof(RawSDNodeBits)); | |||
1284 | return Data; | |||
1285 | } | |||
1286 | ||||
1287 | bool isVolatile() const { return MemSDNodeBits.IsVolatile; } | |||
1288 | bool isNonTemporal() const { return MemSDNodeBits.IsNonTemporal; } | |||
1289 | bool isDereferenceable() const { return MemSDNodeBits.IsDereferenceable; } | |||
1290 | bool isInvariant() const { return MemSDNodeBits.IsInvariant; } | |||
1291 | ||||
1292 | // Returns the offset from the location of the access. | |||
1293 | int64_t getSrcValueOffset() const { return MMO->getOffset(); } | |||
1294 | ||||
1295 | /// Returns the AA info that describes the dereference. | |||
1296 | AAMDNodes getAAInfo() const { return MMO->getAAInfo(); } | |||
1297 | ||||
1298 | /// Returns the Ranges that describes the dereference. | |||
1299 | const MDNode *getRanges() const { return MMO->getRanges(); } | |||
1300 | ||||
1301 | /// Returns the synchronization scope ID for this memory operation. | |||
1302 | SyncScope::ID getSyncScopeID() const { return MMO->getSyncScopeID(); } | |||
1303 | ||||
1304 | /// Return the atomic ordering requirements for this memory operation. For | |||
1305 | /// cmpxchg atomic operations, return the atomic ordering requirements when | |||
1306 | /// store occurs. | |||
1307 | AtomicOrdering getSuccessOrdering() const { | |||
1308 | return MMO->getSuccessOrdering(); | |||
1309 | } | |||
1310 | ||||
1311 | /// Return a single atomic ordering that is at least as strong as both the | |||
1312 | /// success and failure orderings for an atomic operation. (For operations | |||
1313 | /// other than cmpxchg, this is equivalent to getSuccessOrdering().) | |||
1314 | AtomicOrdering getMergedOrdering() const { return MMO->getMergedOrdering(); } | |||
1315 | ||||
1316 | /// Return true if the memory operation ordering is Unordered or higher. | |||
1317 | bool isAtomic() const { return MMO->isAtomic(); } | |||
1318 | ||||
1319 | /// Returns true if the memory operation doesn't imply any ordering | |||
1320 | /// constraints on surrounding memory operations beyond the normal memory | |||
1321 | /// aliasing rules. | |||
1322 | bool isUnordered() const { return MMO->isUnordered(); } | |||
1323 | ||||
1324 | /// Returns true if the memory operation is neither atomic or volatile. | |||
1325 | bool isSimple() const { return !isAtomic() && !isVolatile(); } | |||
1326 | ||||
1327 | /// Return the type of the in-memory value. | |||
1328 | EVT getMemoryVT() const { return MemoryVT; } | |||
1329 | ||||
1330 | /// Return a MachineMemOperand object describing the memory | |||
1331 | /// reference performed by operation. | |||
1332 | MachineMemOperand *getMemOperand() const { return MMO; } | |||
1333 | ||||
1334 | const MachinePointerInfo &getPointerInfo() const { | |||
1335 | return MMO->getPointerInfo(); | |||
1336 | } | |||
1337 | ||||
1338 | /// Return the address space for the associated pointer | |||
1339 | unsigned getAddressSpace() const { | |||
1340 | return getPointerInfo().getAddrSpace(); | |||
1341 | } | |||
1342 | ||||
1343 | /// Update this MemSDNode's MachineMemOperand information | |||
1344 | /// to reflect the alignment of NewMMO, if it has a greater alignment. | |||
1345 | /// This must only be used when the new alignment applies to all users of | |||
1346 | /// this MachineMemOperand. | |||
1347 | void refineAlignment(const MachineMemOperand *NewMMO) { | |||
1348 | MMO->refineAlignment(NewMMO); | |||
1349 | } | |||
1350 | ||||
1351 | const SDValue &getChain() const { return getOperand(0); } | |||
1352 | ||||
1353 | const SDValue &getBasePtr() const { | |||
1354 | switch (getOpcode()) { | |||
1355 | case ISD::STORE: | |||
1356 | case ISD::MSTORE: | |||
1357 | return getOperand(2); | |||
1358 | case ISD::MGATHER: | |||
1359 | case ISD::MSCATTER: | |||
1360 | return getOperand(3); | |||
1361 | default: | |||
1362 | return getOperand(1); | |||
1363 | } | |||
1364 | } | |||
1365 | ||||
1366 | // Methods to support isa and dyn_cast | |||
1367 | static bool classof(const SDNode *N) { | |||
1368 | // For some targets, we lower some target intrinsics to a MemIntrinsicNode | |||
1369 | // with either an intrinsic or a target opcode. | |||
1370 | switch (N->getOpcode()) { | |||
1371 | case ISD::LOAD: | |||
1372 | case ISD::STORE: | |||
1373 | case ISD::PREFETCH: | |||
1374 | case ISD::ATOMIC_CMP_SWAP: | |||
1375 | case ISD::ATOMIC_CMP_SWAP_WITH_SUCCESS: | |||
1376 | case ISD::ATOMIC_SWAP: | |||
1377 | case ISD::ATOMIC_LOAD_ADD: | |||
1378 | case ISD::ATOMIC_LOAD_SUB: | |||
1379 | case ISD::ATOMIC_LOAD_AND: | |||
1380 | case ISD::ATOMIC_LOAD_CLR: | |||
1381 | case ISD::ATOMIC_LOAD_OR: | |||
1382 | case ISD::ATOMIC_LOAD_XOR: | |||
1383 | case ISD::ATOMIC_LOAD_NAND: | |||
1384 | case ISD::ATOMIC_LOAD_MIN: | |||
1385 | case ISD::ATOMIC_LOAD_MAX: | |||
1386 | case ISD::ATOMIC_LOAD_UMIN: | |||
1387 | case ISD::ATOMIC_LOAD_UMAX: | |||
1388 | case ISD::ATOMIC_LOAD_FADD: | |||
1389 | case ISD::ATOMIC_LOAD_FSUB: | |||
1390 | case ISD::ATOMIC_LOAD: | |||
1391 | case ISD::ATOMIC_STORE: | |||
1392 | case ISD::MLOAD: | |||
1393 | case ISD::MSTORE: | |||
1394 | case ISD::MGATHER: | |||
1395 | case ISD::MSCATTER: | |||
1396 | return true; | |||
1397 | default: | |||
1398 | return N->isMemIntrinsic() || N->isTargetMemoryOpcode(); | |||
1399 | } | |||
1400 | } | |||
1401 | }; | |||
1402 | ||||
1403 | /// This is an SDNode representing atomic operations. | |||
1404 | class AtomicSDNode : public MemSDNode { | |||
1405 | public: | |||
1406 | AtomicSDNode(unsigned Opc, unsigned Order, const DebugLoc &dl, SDVTList VTL, | |||
1407 | EVT MemVT, MachineMemOperand *MMO) | |||
1408 | : MemSDNode(Opc, Order, dl, VTL, MemVT, MMO) { | |||
1409 | assert(((Opc != ISD::ATOMIC_LOAD && Opc != ISD::ATOMIC_STORE) ||((void)0) | |||
1410 | MMO->isAtomic()) && "then why are we using an AtomicSDNode?")((void)0); | |||
1411 | } | |||
1412 | ||||
1413 | const SDValue &getBasePtr() const { return getOperand(1); } | |||
1414 | const SDValue &getVal() const { return getOperand(2); } | |||
1415 | ||||
1416 | /// Returns true if this SDNode represents cmpxchg atomic operation, false | |||
1417 | /// otherwise. | |||
1418 | bool isCompareAndSwap() const { | |||
1419 | unsigned Op = getOpcode(); | |||
1420 | return Op == ISD::ATOMIC_CMP_SWAP || | |||
1421 | Op == ISD::ATOMIC_CMP_SWAP_WITH_SUCCESS; | |||
1422 | } | |||
1423 | ||||
1424 | /// For cmpxchg atomic operations, return the atomic ordering requirements | |||
1425 | /// when store does not occur. | |||
1426 | AtomicOrdering getFailureOrdering() const { | |||
1427 | assert(isCompareAndSwap() && "Must be cmpxchg operation")((void)0); | |||
1428 | return MMO->getFailureOrdering(); | |||
1429 | } | |||
1430 | ||||
1431 | // Methods to support isa and dyn_cast | |||
1432 | static bool classof(const SDNode *N) { | |||
1433 | return N->getOpcode() == ISD::ATOMIC_CMP_SWAP || | |||
1434 | N->getOpcode() == ISD::ATOMIC_CMP_SWAP_WITH_SUCCESS || | |||
1435 | N->getOpcode() == ISD::ATOMIC_SWAP || | |||
1436 | N->getOpcode() == ISD::ATOMIC_LOAD_ADD || | |||
1437 | N->getOpcode() == ISD::ATOMIC_LOAD_SUB || | |||
1438 | N->getOpcode() == ISD::ATOMIC_LOAD_AND || | |||
1439 | N->getOpcode() == ISD::ATOMIC_LOAD_CLR || | |||
1440 | N->getOpcode() == ISD::ATOMIC_LOAD_OR || | |||
1441 | N->getOpcode() == ISD::ATOMIC_LOAD_XOR || | |||
1442 | N->getOpcode() == ISD::ATOMIC_LOAD_NAND || | |||
1443 | N->getOpcode() == ISD::ATOMIC_LOAD_MIN || | |||
1444 | N->getOpcode() == ISD::ATOMIC_LOAD_MAX || | |||
1445 | N->getOpcode() == ISD::ATOMIC_LOAD_UMIN || | |||
1446 | N->getOpcode() == ISD::ATOMIC_LOAD_UMAX || | |||
1447 | N->getOpcode() == ISD::ATOMIC_LOAD_FADD || | |||
1448 | N->getOpcode() == ISD::ATOMIC_LOAD_FSUB || | |||
1449 | N->getOpcode() == ISD::ATOMIC_LOAD || | |||
1450 | N->getOpcode() == ISD::ATOMIC_STORE; | |||
1451 | } | |||
1452 | }; | |||
1453 | ||||
1454 | /// This SDNode is used for target intrinsics that touch | |||
1455 | /// memory and need an associated MachineMemOperand. Its opcode may be | |||
1456 | /// INTRINSIC_VOID, INTRINSIC_W_CHAIN, PREFETCH, or a target-specific opcode | |||
1457 | /// with a value not less than FIRST_TARGET_MEMORY_OPCODE. | |||
1458 | class MemIntrinsicSDNode : public MemSDNode { | |||
1459 | public: | |||
1460 | MemIntrinsicSDNode(unsigned Opc, unsigned Order, const DebugLoc &dl, | |||
1461 | SDVTList VTs, EVT MemoryVT, MachineMemOperand *MMO) | |||
1462 | : MemSDNode(Opc, Order, dl, VTs, MemoryVT, MMO) { | |||
1463 | SDNodeBits.IsMemIntrinsic = true; | |||
1464 | } | |||
1465 | ||||
1466 | // Methods to support isa and dyn_cast | |||
1467 | static bool classof(const SDNode *N) { | |||
1468 | // We lower some target intrinsics to their target opcode | |||
1469 | // early a node with a target opcode can be of this class | |||
1470 | return N->isMemIntrinsic() || | |||
1471 | N->getOpcode() == ISD::PREFETCH || | |||
1472 | N->isTargetMemoryOpcode(); | |||
1473 | } | |||
1474 | }; | |||
1475 | ||||
1476 | /// This SDNode is used to implement the code generator | |||
1477 | /// support for the llvm IR shufflevector instruction. It combines elements | |||
1478 | /// from two input vectors into a new input vector, with the selection and | |||
1479 | /// ordering of elements determined by an array of integers, referred to as | |||
1480 | /// the shuffle mask. For input vectors of width N, mask indices of 0..N-1 | |||
1481 | /// refer to elements from the LHS input, and indices from N to 2N-1 the RHS. | |||
1482 | /// An index of -1 is treated as undef, such that the code generator may put | |||
1483 | /// any value in the corresponding element of the result. | |||
1484 | class ShuffleVectorSDNode : public SDNode { | |||
1485 | // The memory for Mask is owned by the SelectionDAG's OperandAllocator, and | |||
1486 | // is freed when the SelectionDAG object is destroyed. | |||
1487 | const int *Mask; | |||
1488 | ||||
1489 | protected: | |||
1490 | friend class SelectionDAG; | |||
1491 | ||||
1492 | ShuffleVectorSDNode(EVT VT, unsigned Order, const DebugLoc &dl, const int *M) | |||
1493 | : SDNode(ISD::VECTOR_SHUFFLE, Order, dl, getSDVTList(VT)), Mask(M) {} | |||
1494 | ||||
1495 | public: | |||
1496 | ArrayRef<int> getMask() const { | |||
1497 | EVT VT = getValueType(0); | |||
1498 | return makeArrayRef(Mask, VT.getVectorNumElements()); | |||
1499 | } | |||
1500 | ||||
1501 | int getMaskElt(unsigned Idx) const { | |||
1502 | assert(Idx < getValueType(0).getVectorNumElements() && "Idx out of range!")((void)0); | |||
1503 | return Mask[Idx]; | |||
1504 | } | |||
1505 | ||||
1506 | bool isSplat() const { return isSplatMask(Mask, getValueType(0)); } | |||
1507 | ||||
1508 | int getSplatIndex() const { | |||
1509 | assert(isSplat() && "Cannot get splat index for non-splat!")((void)0); | |||
1510 | EVT VT = getValueType(0); | |||
1511 | for (unsigned i = 0, e = VT.getVectorNumElements(); i != e; ++i) | |||
1512 | if (Mask[i] >= 0) | |||
1513 | return Mask[i]; | |||
1514 | ||||
1515 | // We can choose any index value here and be correct because all elements | |||
1516 | // are undefined. Return 0 for better potential for callers to simplify. | |||
1517 | return 0; | |||
1518 | } | |||
1519 | ||||
1520 | static bool isSplatMask(const int *Mask, EVT VT); | |||
1521 | ||||
1522 | /// Change values in a shuffle permute mask assuming | |||
1523 | /// the two vector operands have swapped position. | |||
1524 | static void commuteMask(MutableArrayRef<int> Mask) { | |||
1525 | unsigned NumElems = Mask.size(); | |||
1526 | for (unsigned i = 0; i != NumElems; ++i) { | |||
1527 | int idx = Mask[i]; | |||
1528 | if (idx < 0) | |||
1529 | continue; | |||
1530 | else if (idx < (int)NumElems) | |||
1531 | Mask[i] = idx + NumElems; | |||
1532 | else | |||
1533 | Mask[i] = idx - NumElems; | |||
1534 | } | |||
1535 | } | |||
1536 | ||||
1537 | static bool classof(const SDNode *N) { | |||
1538 | return N->getOpcode() == ISD::VECTOR_SHUFFLE; | |||
1539 | } | |||
1540 | }; | |||
1541 | ||||
1542 | class ConstantSDNode : public SDNode { | |||
1543 | friend class SelectionDAG; | |||
1544 | ||||
1545 | const ConstantInt *Value; | |||
1546 | ||||
1547 | ConstantSDNode(bool isTarget, bool isOpaque, const ConstantInt *val, EVT VT) | |||
1548 | : SDNode(isTarget ? ISD::TargetConstant : ISD::Constant, 0, DebugLoc(), | |||
1549 | getSDVTList(VT)), | |||
1550 | Value(val) { | |||
1551 | ConstantSDNodeBits.IsOpaque = isOpaque; | |||
1552 | } | |||
1553 | ||||
1554 | public: | |||
1555 | const ConstantInt *getConstantIntValue() const { return Value; } | |||
1556 | const APInt &getAPIntValue() const { return Value->getValue(); } | |||
1557 | uint64_t getZExtValue() const { return Value->getZExtValue(); } | |||
1558 | int64_t getSExtValue() const { return Value->getSExtValue(); } | |||
1559 | uint64_t getLimitedValue(uint64_t Limit = UINT64_MAX0xffffffffffffffffULL) { | |||
1560 | return Value->getLimitedValue(Limit); | |||
1561 | } | |||
1562 | MaybeAlign getMaybeAlignValue() const { return Value->getMaybeAlignValue(); } | |||
1563 | Align getAlignValue() const { return Value->getAlignValue(); } | |||
1564 | ||||
1565 | bool isOne() const { return Value->isOne(); } | |||
1566 | bool isNullValue() const { return Value->isZero(); } | |||
1567 | bool isAllOnesValue() const { return Value->isMinusOne(); } | |||
1568 | bool isMaxSignedValue() const { return Value->isMaxValue(true); } | |||
1569 | bool isMinSignedValue() const { return Value->isMinValue(true); } | |||
1570 | ||||
1571 | bool isOpaque() const { return ConstantSDNodeBits.IsOpaque; } | |||
1572 | ||||
1573 | static bool classof(const SDNode *N) { | |||
1574 | return N->getOpcode() == ISD::Constant || | |||
1575 | N->getOpcode() == ISD::TargetConstant; | |||
1576 | } | |||
1577 | }; | |||
1578 | ||||
1579 | uint64_t SDNode::getConstantOperandVal(unsigned Num) const { | |||
1580 | return cast<ConstantSDNode>(getOperand(Num))->getZExtValue(); | |||
1581 | } | |||
1582 | ||||
1583 | const APInt &SDNode::getConstantOperandAPInt(unsigned Num) const { | |||
1584 | return cast<ConstantSDNode>(getOperand(Num))->getAPIntValue(); | |||
1585 | } | |||
1586 | ||||
1587 | class ConstantFPSDNode : public SDNode { | |||
1588 | friend class SelectionDAG; | |||
1589 | ||||
1590 | const ConstantFP *Value; | |||
1591 | ||||
1592 | ConstantFPSDNode(bool isTarget, const ConstantFP *val, EVT VT) | |||
1593 | : SDNode(isTarget ? ISD::TargetConstantFP : ISD::ConstantFP, 0, | |||
1594 | DebugLoc(), getSDVTList(VT)), | |||
1595 | Value(val) {} | |||
1596 | ||||
1597 | public: | |||
1598 | const APFloat& getValueAPF() const { return Value->getValueAPF(); } | |||
1599 | const ConstantFP *getConstantFPValue() const { return Value; } | |||
1600 | ||||
1601 | /// Return true if the value is positive or negative zero. | |||
1602 | bool isZero() const { return Value->isZero(); } | |||
1603 | ||||
1604 | /// Return true if the value is a NaN. | |||
1605 | bool isNaN() const { return Value->isNaN(); } | |||
1606 | ||||
1607 | /// Return true if the value is an infinity | |||
1608 | bool isInfinity() const { return Value->isInfinity(); } | |||
1609 | ||||
1610 | /// Return true if the value is negative. | |||
1611 | bool isNegative() const { return Value->isNegative(); } | |||
1612 | ||||
1613 | /// We don't rely on operator== working on double values, as | |||
1614 | /// it returns true for things that are clearly not equal, like -0.0 and 0.0. | |||
1615 | /// As such, this method can be used to do an exact bit-for-bit comparison of | |||
1616 | /// two floating point values. | |||
1617 | ||||
1618 | /// We leave the version with the double argument here because it's just so | |||
1619 | /// convenient to write "2.0" and the like. Without this function we'd | |||
1620 | /// have to duplicate its logic everywhere it's called. | |||
1621 | bool isExactlyValue(double V) const { | |||
1622 | return Value->getValueAPF().isExactlyValue(V); | |||
1623 | } | |||
1624 | bool isExactlyValue(const APFloat& V) const; | |||
1625 | ||||
1626 | static bool isValueValidForType(EVT VT, const APFloat& Val); | |||
1627 | ||||
1628 | static bool classof(const SDNode *N) { | |||
1629 | return N->getOpcode() == ISD::ConstantFP || | |||
1630 | N->getOpcode() == ISD::TargetConstantFP; | |||
1631 | } | |||
1632 | }; | |||
1633 | ||||
1634 | /// Returns true if \p V is a constant integer zero. | |||
1635 | bool isNullConstant(SDValue V); | |||
1636 | ||||
1637 | /// Returns true if \p V is an FP constant with a value of positive zero. | |||
1638 | bool isNullFPConstant(SDValue V); | |||
1639 | ||||
1640 | /// Returns true if \p V is an integer constant with all bits set. | |||
1641 | bool isAllOnesConstant(SDValue V); | |||
1642 | ||||
1643 | /// Returns true if \p V is a constant integer one. | |||
1644 | bool isOneConstant(SDValue V); | |||
1645 | ||||
1646 | /// Return the non-bitcasted source operand of \p V if it exists. | |||
1647 | /// If \p V is not a bitcasted value, it is returned as-is. | |||
1648 | SDValue peekThroughBitcasts(SDValue V); | |||
1649 | ||||
1650 | /// Return the non-bitcasted and one-use source operand of \p V if it exists. | |||
1651 | /// If \p V is not a bitcasted one-use value, it is returned as-is. | |||
1652 | SDValue peekThroughOneUseBitcasts(SDValue V); | |||
1653 | ||||
1654 | /// Return the non-extracted vector source operand of \p V if it exists. | |||
1655 | /// If \p V is not an extracted subvector, it is returned as-is. | |||
1656 | SDValue peekThroughExtractSubvectors(SDValue V); | |||
1657 | ||||
1658 | /// Returns true if \p V is a bitwise not operation. Assumes that an all ones | |||
1659 | /// constant is canonicalized to be operand 1. | |||
1660 | bool isBitwiseNot(SDValue V, bool AllowUndefs = false); | |||
1661 | ||||
1662 | /// Returns the SDNode if it is a constant splat BuildVector or constant int. | |||
1663 | ConstantSDNode *isConstOrConstSplat(SDValue N, bool AllowUndefs = false, | |||
1664 | bool AllowTruncation = false); | |||
1665 | ||||
1666 | /// Returns the SDNode if it is a demanded constant splat BuildVector or | |||
1667 | /// constant int. | |||
1668 | ConstantSDNode *isConstOrConstSplat(SDValue N, const APInt &DemandedElts, | |||
1669 | bool AllowUndefs = false, | |||
1670 | bool AllowTruncation = false); | |||
1671 | ||||
1672 | /// Returns the SDNode if it is a constant splat BuildVector or constant float. | |||
1673 | ConstantFPSDNode *isConstOrConstSplatFP(SDValue N, bool AllowUndefs = false); | |||
1674 | ||||
1675 | /// Returns the SDNode if it is a demanded constant splat BuildVector or | |||
1676 | /// constant float. | |||
1677 | ConstantFPSDNode *isConstOrConstSplatFP(SDValue N, const APInt &DemandedElts, | |||
1678 | bool AllowUndefs = false); | |||
1679 | ||||
1680 | /// Return true if the value is a constant 0 integer or a splatted vector of | |||
1681 | /// a constant 0 integer (with no undefs by default). | |||
1682 | /// Build vector implicit truncation is not an issue for null values. | |||
1683 | bool isNullOrNullSplat(SDValue V, bool AllowUndefs = false); | |||
1684 | ||||
1685 | /// Return true if the value is a constant 1 integer or a splatted vector of a | |||
1686 | /// constant 1 integer (with no undefs). | |||
1687 | /// Does not permit build vector implicit truncation. | |||
1688 | bool isOneOrOneSplat(SDValue V, bool AllowUndefs = false); | |||
1689 | ||||
1690 | /// Return true if the value is a constant -1 integer or a splatted vector of a | |||
1691 | /// constant -1 integer (with no undefs). | |||
1692 | /// Does not permit build vector implicit truncation. | |||
1693 | bool isAllOnesOrAllOnesSplat(SDValue V, bool AllowUndefs = false); | |||
1694 | ||||
1695 | /// Return true if \p V is either a integer or FP constant. | |||
1696 | inline bool isIntOrFPConstant(SDValue V) { | |||
1697 | return isa<ConstantSDNode>(V) || isa<ConstantFPSDNode>(V); | |||
1698 | } | |||
1699 | ||||
1700 | class GlobalAddressSDNode : public SDNode { | |||
1701 | friend class SelectionDAG; | |||
1702 | ||||
1703 | const GlobalValue *TheGlobal; | |||
1704 | int64_t Offset; | |||
1705 | unsigned TargetFlags; | |||
1706 | ||||
1707 | GlobalAddressSDNode(unsigned Opc, unsigned Order, const DebugLoc &DL, | |||
1708 | const GlobalValue *GA, EVT VT, int64_t o, | |||
1709 | unsigned TF); | |||
1710 | ||||
1711 | public: | |||
1712 | const GlobalValue *getGlobal() const { return TheGlobal; } | |||
1713 | int64_t getOffset() const { return Offset; } | |||
1714 | unsigned getTargetFlags() const { return TargetFlags; } | |||
1715 | // Return the address space this GlobalAddress belongs to. | |||
1716 | unsigned getAddressSpace() const; | |||
1717 | ||||
1718 | static bool classof(const SDNode *N) { | |||
1719 | return N->getOpcode() == ISD::GlobalAddress || | |||
1720 | N->getOpcode() == ISD::TargetGlobalAddress || | |||
1721 | N->getOpcode() == ISD::GlobalTLSAddress || | |||
1722 | N->getOpcode() == ISD::TargetGlobalTLSAddress; | |||
1723 | } | |||
1724 | }; | |||
1725 | ||||
1726 | class FrameIndexSDNode : public SDNode { | |||
1727 | friend class SelectionDAG; | |||
1728 | ||||
1729 | int FI; | |||
1730 | ||||
1731 | FrameIndexSDNode(int fi, EVT VT, bool isTarg) | |||
1732 | : SDNode(isTarg ? ISD::TargetFrameIndex : ISD::FrameIndex, | |||
1733 | 0, DebugLoc(), getSDVTList(VT)), FI(fi) { | |||
1734 | } | |||
1735 | ||||
1736 | public: | |||
1737 | int getIndex() const { return FI; } | |||
1738 | ||||
1739 | static bool classof(const SDNode *N) { | |||
1740 | return N->getOpcode() == ISD::FrameIndex || | |||
1741 | N->getOpcode() == ISD::TargetFrameIndex; | |||
1742 | } | |||
1743 | }; | |||
1744 | ||||
1745 | /// This SDNode is used for LIFETIME_START/LIFETIME_END values, which indicate | |||
1746 | /// the offet and size that are started/ended in the underlying FrameIndex. | |||
1747 | class LifetimeSDNode : public SDNode { | |||
1748 | friend class SelectionDAG; | |||
1749 | int64_t Size; | |||
1750 | int64_t Offset; // -1 if offset is unknown. | |||
1751 | ||||
1752 | LifetimeSDNode(unsigned Opcode, unsigned Order, const DebugLoc &dl, | |||
1753 | SDVTList VTs, int64_t Size, int64_t Offset) | |||
1754 | : SDNode(Opcode, Order, dl, VTs), Size(Size), Offset(Offset) {} | |||
1755 | public: | |||
1756 | int64_t getFrameIndex() const { | |||
1757 | return cast<FrameIndexSDNode>(getOperand(1))->getIndex(); | |||
1758 | } | |||
1759 | ||||
1760 | bool hasOffset() const { return Offset >= 0; } | |||
1761 | int64_t getOffset() const { | |||
1762 | assert(hasOffset() && "offset is unknown")((void)0); | |||
1763 | return Offset; | |||
1764 | } | |||
1765 | int64_t getSize() const { | |||
1766 | assert(hasOffset() && "offset is unknown")((void)0); | |||
1767 | return Size; | |||
1768 | } | |||
1769 | ||||
1770 | // Methods to support isa and dyn_cast | |||
1771 | static bool classof(const SDNode *N) { | |||
1772 | return N->getOpcode() == ISD::LIFETIME_START || | |||
1773 | N->getOpcode() == ISD::LIFETIME_END; | |||
1774 | } | |||
1775 | }; | |||
1776 | ||||
1777 | /// This SDNode is used for PSEUDO_PROBE values, which are the function guid and | |||
1778 | /// the index of the basic block being probed. A pseudo probe serves as a place | |||
1779 | /// holder and will be removed at the end of compilation. It does not have any | |||
1780 | /// operand because we do not want the instruction selection to deal with any. | |||
1781 | class PseudoProbeSDNode : public SDNode { | |||
1782 | friend class SelectionDAG; | |||
1783 | uint64_t Guid; | |||
1784 | uint64_t Index; | |||
1785 | uint32_t Attributes; | |||
1786 | ||||
1787 | PseudoProbeSDNode(unsigned Opcode, unsigned Order, const DebugLoc &Dl, | |||
1788 | SDVTList VTs, uint64_t Guid, uint64_t Index, uint32_t Attr) | |||
1789 | : SDNode(Opcode, Order, Dl, VTs), Guid(Guid), Index(Index), | |||
1790 | Attributes(Attr) {} | |||
1791 | ||||
1792 | public: | |||
1793 | uint64_t getGuid() const { return Guid; } | |||
1794 | uint64_t getIndex() const { return Index; } | |||
1795 | uint32_t getAttributes() const { return Attributes; } | |||
1796 | ||||
1797 | // Methods to support isa and dyn_cast | |||
1798 | static bool classof(const SDNode *N) { | |||
1799 | return N->getOpcode() == ISD::PSEUDO_PROBE; | |||
1800 | } | |||
1801 | }; | |||
1802 | ||||
1803 | class JumpTableSDNode : public SDNode { | |||
1804 | friend class SelectionDAG; | |||
1805 | ||||
1806 | int JTI; | |||
1807 | unsigned TargetFlags; | |||
1808 | ||||
1809 | JumpTableSDNode(int jti, EVT VT, bool isTarg, unsigned TF) | |||
1810 | : SDNode(isTarg ? ISD::TargetJumpTable : ISD::JumpTable, | |||
1811 | 0, DebugLoc(), getSDVTList(VT)), JTI(jti), TargetFlags(TF) { | |||
1812 | } | |||
1813 | ||||
1814 | public: | |||
1815 | int getIndex() const { return JTI; } | |||
1816 | unsigned getTargetFlags() const { return TargetFlags; } | |||
1817 | ||||
1818 | static bool classof(const SDNode *N) { | |||
1819 | return N->getOpcode() == ISD::JumpTable || | |||
1820 | N->getOpcode() == ISD::TargetJumpTable; | |||
1821 | } | |||
1822 | }; | |||
1823 | ||||
1824 | class ConstantPoolSDNode : public SDNode { | |||
1825 | friend class SelectionDAG; | |||
1826 | ||||
1827 | union { | |||
1828 | const Constant *ConstVal; | |||
1829 | MachineConstantPoolValue *MachineCPVal; | |||
1830 | } Val; | |||
1831 | int Offset; // It's a MachineConstantPoolValue if top bit is set. | |||
1832 | Align Alignment; // Minimum alignment requirement of CP. | |||
1833 | unsigned TargetFlags; | |||
1834 | ||||
1835 | ConstantPoolSDNode(bool isTarget, const Constant *c, EVT VT, int o, | |||
1836 | Align Alignment, unsigned TF) | |||
1837 | : SDNode(isTarget ? ISD::TargetConstantPool : ISD::ConstantPool, 0, | |||
1838 | DebugLoc(), getSDVTList(VT)), | |||
1839 | Offset(o), Alignment(Alignment), TargetFlags(TF) { | |||
1840 | assert(Offset >= 0 && "Offset is too large")((void)0); | |||
1841 | Val.ConstVal = c; | |||
1842 | } | |||
1843 | ||||
1844 | ConstantPoolSDNode(bool isTarget, MachineConstantPoolValue *v, EVT VT, int o, | |||
1845 | Align Alignment, unsigned TF) | |||
1846 | : SDNode(isTarget ? ISD::TargetConstantPool : ISD::ConstantPool, 0, | |||
1847 | DebugLoc(), getSDVTList(VT)), | |||
1848 | Offset(o), Alignment(Alignment), TargetFlags(TF) { | |||
1849 | assert(Offset >= 0 && "Offset is too large")((void)0); | |||
1850 | Val.MachineCPVal = v; | |||
1851 | Offset |= 1 << (sizeof(unsigned)*CHAR_BIT8-1); | |||
1852 | } | |||
1853 | ||||
1854 | public: | |||
1855 | bool isMachineConstantPoolEntry() const { | |||
1856 | return Offset < 0; | |||
1857 | } | |||
1858 | ||||
1859 | const Constant *getConstVal() const { | |||
1860 | assert(!isMachineConstantPoolEntry() && "Wrong constantpool type")((void)0); | |||
1861 | return Val.ConstVal; | |||
1862 | } | |||
1863 | ||||
1864 | MachineConstantPoolValue *getMachineCPVal() const { | |||
1865 | assert(isMachineConstantPoolEntry() && "Wrong constantpool type")((void)0); | |||
1866 | return Val.MachineCPVal; | |||
1867 | } | |||
1868 | ||||
1869 | int getOffset() const { | |||
1870 | return Offset & ~(1 << (sizeof(unsigned)*CHAR_BIT8-1)); | |||
1871 | } | |||
1872 | ||||
1873 | // Return the alignment of this constant pool object, which is either 0 (for | |||
1874 | // default alignment) or the desired value. | |||
1875 | Align getAlign() const { return Alignment; } | |||
1876 | unsigned getTargetFlags() const { return TargetFlags; } | |||
1877 | ||||
1878 | Type *getType() const; | |||
1879 | ||||
1880 | static bool classof(const SDNode *N) { | |||
1881 | return N->getOpcode() == ISD::ConstantPool || | |||
1882 | N->getOpcode() == ISD::TargetConstantPool; | |||
1883 | } | |||
1884 | }; | |||
1885 | ||||
1886 | /// Completely target-dependent object reference. | |||
1887 | class TargetIndexSDNode : public SDNode { | |||
1888 | friend class SelectionDAG; | |||
1889 | ||||
1890 | unsigned TargetFlags; | |||
1891 | int Index; | |||
1892 | int64_t Offset; | |||
1893 | ||||
1894 | public: | |||
1895 | TargetIndexSDNode(int Idx, EVT VT, int64_t Ofs, unsigned TF) | |||
1896 | : SDNode(ISD::TargetIndex, 0, DebugLoc(), getSDVTList(VT)), | |||
1897 | TargetFlags(TF), Index(Idx), Offset(Ofs) {} | |||
1898 | ||||
1899 | unsigned getTargetFlags() const { return TargetFlags; } | |||
1900 | int getIndex() const { return Index; } | |||
1901 | int64_t getOffset() const { return Offset; } | |||
1902 | ||||
1903 | static bool classof(const SDNode *N) { | |||
1904 | return N->getOpcode() == ISD::TargetIndex; | |||
1905 | } | |||
1906 | }; | |||
1907 | ||||
1908 | class BasicBlockSDNode : public SDNode { | |||
1909 | friend class SelectionDAG; | |||
1910 | ||||
1911 | MachineBasicBlock *MBB; | |||
1912 | ||||
1913 | /// Debug info is meaningful and potentially useful here, but we create | |||
1914 | /// blocks out of order when they're jumped to, which makes it a bit | |||
1915 | /// harder. Let's see if we need it first. | |||
1916 | explicit BasicBlockSDNode(MachineBasicBlock *mbb) | |||
1917 | : SDNode(ISD::BasicBlock, 0, DebugLoc(), getSDVTList(MVT::Other)), MBB(mbb) | |||
1918 | {} | |||
1919 | ||||
1920 | public: | |||
1921 | MachineBasicBlock *getBasicBlock() const { return MBB; } | |||
1922 | ||||
1923 | static bool classof(const SDNode *N) { | |||
1924 | return N->getOpcode() == ISD::BasicBlock; | |||
1925 | } | |||
1926 | }; | |||
1927 | ||||
1928 | /// A "pseudo-class" with methods for operating on BUILD_VECTORs. | |||
1929 | class BuildVectorSDNode : public SDNode { | |||
1930 | public: | |||
1931 | // These are constructed as SDNodes and then cast to BuildVectorSDNodes. | |||
1932 | explicit BuildVectorSDNode() = delete; | |||
1933 | ||||
1934 | /// Check if this is a constant splat, and if so, find the | |||
1935 | /// smallest element size that splats the vector. If MinSplatBits is | |||
1936 | /// nonzero, the element size must be at least that large. Note that the | |||
1937 | /// splat element may be the entire vector (i.e., a one element vector). | |||
1938 | /// Returns the splat element value in SplatValue. Any undefined bits in | |||
1939 | /// that value are zero, and the corresponding bits in the SplatUndef mask | |||
1940 | /// are set. The SplatBitSize value is set to the splat element size in | |||
1941 | /// bits. HasAnyUndefs is set to true if any bits in the vector are | |||
1942 | /// undefined. isBigEndian describes the endianness of the target. | |||
1943 | bool isConstantSplat(APInt &SplatValue, APInt &SplatUndef, | |||
1944 | unsigned &SplatBitSize, bool &HasAnyUndefs, | |||
1945 | unsigned MinSplatBits = 0, | |||
1946 | bool isBigEndian = false) const; | |||
1947 | ||||
1948 | /// Returns the demanded splatted value or a null value if this is not a | |||
1949 | /// splat. | |||
1950 | /// | |||
1951 | /// The DemandedElts mask indicates the elements that must be in the splat. | |||
1952 | /// If passed a non-null UndefElements bitvector, it will resize it to match | |||
1953 | /// the vector width and set the bits where elements are undef. | |||
1954 | SDValue getSplatValue(const APInt &DemandedElts, | |||
1955 | BitVector *UndefElements = nullptr) const; | |||
1956 | ||||
1957 | /// Returns the splatted value or a null value if this is not a splat. | |||
1958 | /// | |||
1959 | /// If passed a non-null UndefElements bitvector, it will resize it to match | |||
1960 | /// the vector width and set the bits where elements are undef. | |||
1961 | SDValue getSplatValue(BitVector *UndefElements = nullptr) const; | |||
1962 | ||||
1963 | /// Find the shortest repeating sequence of values in the build vector. | |||
1964 | /// | |||
1965 | /// e.g. { u, X, u, X, u, u, X, u } -> { X } | |||
1966 | /// { X, Y, u, Y, u, u, X, u } -> { X, Y } | |||
1967 | /// | |||
1968 | /// Currently this must be a power-of-2 build vector. | |||
1969 | /// The DemandedElts mask indicates the elements that must be present, | |||
1970 | /// undemanded elements in Sequence may be null (SDValue()). If passed a | |||
1971 | /// non-null UndefElements bitvector, it will resize it to match the original | |||
1972 | /// vector width and set the bits where elements are undef. If result is | |||
1973 | /// false, Sequence will be empty. | |||
1974 | bool getRepeatedSequence(const APInt &DemandedElts, | |||
1975 | SmallVectorImpl<SDValue> &Sequence, | |||
1976 | BitVector *UndefElements = nullptr) const; | |||
1977 | ||||
1978 | /// Find the shortest repeating sequence of values in the build vector. | |||
1979 | /// | |||
1980 | /// e.g. { u, X, u, X, u, u, X, u } -> { X } | |||
1981 | /// { X, Y, u, Y, u, u, X, u } -> { X, Y } | |||
1982 | /// | |||
1983 | /// Currently this must be a power-of-2 build vector. | |||
1984 | /// If passed a non-null UndefElements bitvector, it will resize it to match | |||
1985 | /// the original vector width and set the bits where elements are undef. | |||
1986 | /// If result is false, Sequence will be empty. | |||
1987 | bool getRepeatedSequence(SmallVectorImpl<SDValue> &Sequence, | |||
1988 | BitVector *UndefElements = nullptr) const; | |||
1989 | ||||
1990 | /// Returns the demanded splatted constant or null if this is not a constant | |||
1991 | /// splat. | |||
1992 | /// | |||
1993 | /// The DemandedElts mask indicates the elements that must be in the splat. | |||
1994 | /// If passed a non-null UndefElements bitvector, it will resize it to match | |||
1995 | /// the vector width and set the bits where elements are undef. | |||
1996 | ConstantSDNode * | |||
1997 | getConstantSplatNode(const APInt &DemandedElts, | |||
1998 | BitVector *UndefElements = nullptr) const; | |||
1999 | ||||
2000 | /// Returns the splatted constant or null if this is not a constant | |||
2001 | /// splat. | |||
2002 | /// | |||
2003 | /// If passed a non-null UndefElements bitvector, it will resize it to match | |||
2004 | /// the vector width and set the bits where elements are undef. | |||
2005 | ConstantSDNode * | |||
2006 | getConstantSplatNode(BitVector *UndefElements = nullptr) const; | |||
2007 | ||||
2008 | /// Returns the demanded splatted constant FP or null if this is not a | |||
2009 | /// constant FP splat. | |||
2010 | /// | |||
2011 | /// The DemandedElts mask indicates the elements that must be in the splat. | |||
2012 | /// If passed a non-null UndefElements bitvector, it will resize it to match | |||
2013 | /// the vector width and set the bits where elements are undef. | |||
2014 | ConstantFPSDNode * | |||
2015 | getConstantFPSplatNode(const APInt &DemandedElts, | |||
2016 | BitVector *UndefElements = nullptr) const; | |||
2017 | ||||
2018 | /// Returns the splatted constant FP or null if this is not a constant | |||
2019 | /// FP splat. | |||
2020 | /// | |||
2021 | /// If passed a non-null UndefElements bitvector, it will resize it to match | |||
2022 | /// the vector width and set the bits where elements are undef. | |||
2023 | ConstantFPSDNode * | |||
2024 | getConstantFPSplatNode(BitVector *UndefElements = nullptr) const; | |||
2025 | ||||
2026 | /// If this is a constant FP splat and the splatted constant FP is an | |||
2027 | /// exact power or 2, return the log base 2 integer value. Otherwise, | |||
2028 | /// return -1. | |||
2029 | /// | |||
2030 | /// The BitWidth specifies the necessary bit precision. | |||
2031 | int32_t getConstantFPSplatPow2ToLog2Int(BitVector *UndefElements, | |||
2032 | uint32_t BitWidth) const; | |||
2033 | ||||
2034 | bool isConstant() const; | |||
2035 | ||||
2036 | static bool classof(const SDNode *N) { | |||
2037 | return N->getOpcode() == ISD::BUILD_VECTOR; | |||
2038 | } | |||
2039 | }; | |||
2040 | ||||
2041 | /// An SDNode that holds an arbitrary LLVM IR Value. This is | |||
2042 | /// used when the SelectionDAG needs to make a simple reference to something | |||
2043 | /// in the LLVM IR representation. | |||
2044 | /// | |||
2045 | class SrcValueSDNode : public SDNode { | |||
2046 | friend class SelectionDAG; | |||
2047 | ||||
2048 | const Value *V; | |||
2049 | ||||
2050 | /// Create a SrcValue for a general value. | |||
2051 | explicit SrcValueSDNode(const Value *v) | |||
2052 | : SDNode(ISD::SRCVALUE, 0, DebugLoc(), getSDVTList(MVT::Other)), V(v) {} | |||
2053 | ||||
2054 | public: | |||
2055 | /// Return the contained Value. | |||
2056 | const Value *getValue() const { return V; } | |||
2057 | ||||
2058 | static bool classof(const SDNode *N) { | |||
2059 | return N->getOpcode() == ISD::SRCVALUE; | |||
2060 | } | |||
2061 | }; | |||
2062 | ||||
2063 | class MDNodeSDNode : public SDNode { | |||
2064 | friend class SelectionDAG; | |||
2065 | ||||
2066 | const MDNode *MD; | |||
2067 | ||||
2068 | explicit MDNodeSDNode(const MDNode *md) | |||
2069 | : SDNode(ISD::MDNODE_SDNODE, 0, DebugLoc(), getSDVTList(MVT::Other)), MD(md) | |||
2070 | {} | |||
2071 | ||||
2072 | public: | |||
2073 | const MDNode *getMD() const { return MD; } | |||
2074 | ||||
2075 | static bool classof(const SDNode *N) { | |||
2076 | return N->getOpcode() == ISD::MDNODE_SDNODE; | |||
2077 | } | |||
2078 | }; | |||
2079 | ||||
2080 | class RegisterSDNode : public SDNode { | |||
2081 | friend class SelectionDAG; | |||
2082 | ||||
2083 | Register Reg; | |||
2084 | ||||
2085 | RegisterSDNode(Register reg, EVT VT) | |||
2086 | : SDNode(ISD::Register, 0, DebugLoc(), getSDVTList(VT)), Reg(reg) {} | |||
2087 | ||||
2088 | public: | |||
2089 | Register getReg() const { return Reg; } | |||
2090 | ||||
2091 | static bool classof(const SDNode *N) { | |||
2092 | return N->getOpcode() == ISD::Register; | |||
2093 | } | |||
2094 | }; | |||
2095 | ||||
2096 | class RegisterMaskSDNode : public SDNode { | |||
2097 | friend class SelectionDAG; | |||
2098 | ||||
2099 | // The memory for RegMask is not owned by the node. | |||
2100 | const uint32_t *RegMask; | |||
2101 | ||||
2102 | RegisterMaskSDNode(const uint32_t *mask) | |||
2103 | : SDNode(ISD::RegisterMask, 0, DebugLoc(), getSDVTList(MVT::Untyped)), | |||
2104 | RegMask(mask) {} | |||
2105 | ||||
2106 | public: | |||
2107 | const uint32_t *getRegMask() const { return RegMask; } | |||
2108 | ||||
2109 | static bool classof(const SDNode *N) { | |||
2110 | return N->getOpcode() == ISD::RegisterMask; | |||
2111 | } | |||
2112 | }; | |||
2113 | ||||
2114 | class BlockAddressSDNode : public SDNode { | |||
2115 | friend class SelectionDAG; | |||
2116 | ||||
2117 | const BlockAddress *BA; | |||
2118 | int64_t Offset; | |||
2119 | unsigned TargetFlags; | |||
2120 | ||||
2121 | BlockAddressSDNode(unsigned NodeTy, EVT VT, const BlockAddress *ba, | |||
2122 | int64_t o, unsigned Flags) | |||
2123 | : SDNode(NodeTy, 0, DebugLoc(), getSDVTList(VT)), | |||
2124 | BA(ba), Offset(o), TargetFlags(Flags) {} | |||
2125 | ||||
2126 | public: | |||
2127 | const BlockAddress *getBlockAddress() const { return BA; } | |||
2128 | int64_t getOffset() const { return Offset; } | |||
2129 | unsigned getTargetFlags() const { return TargetFlags; } | |||
2130 | ||||
2131 | static bool classof(const SDNode *N) { | |||
2132 | return N->getOpcode() == ISD::BlockAddress || | |||
2133 | N->getOpcode() == ISD::TargetBlockAddress; | |||
2134 | } | |||
2135 | }; | |||
2136 | ||||
2137 | class LabelSDNode : public SDNode { | |||
2138 | friend class SelectionDAG; | |||
2139 | ||||
2140 | MCSymbol *Label; | |||
2141 | ||||
2142 | LabelSDNode(unsigned Opcode, unsigned Order, const DebugLoc &dl, MCSymbol *L) | |||
2143 | : SDNode(Opcode, Order, dl, getSDVTList(MVT::Other)), Label(L) { | |||
2144 | assert(LabelSDNode::classof(this) && "not a label opcode")((void)0); | |||
2145 | } | |||
2146 | ||||
2147 | public: | |||
2148 | MCSymbol *getLabel() const { return Label; } | |||
2149 | ||||
2150 | static bool classof(const SDNode *N) { | |||
2151 | return N->getOpcode() == ISD::EH_LABEL || | |||
2152 | N->getOpcode() == ISD::ANNOTATION_LABEL; | |||
2153 | } | |||
2154 | }; | |||
2155 | ||||
2156 | class ExternalSymbolSDNode : public SDNode { | |||
2157 | friend class SelectionDAG; | |||
2158 | ||||
2159 | const char *Symbol; | |||
2160 | unsigned TargetFlags; | |||
2161 | ||||
2162 | ExternalSymbolSDNode(bool isTarget, const char *Sym, unsigned TF, EVT VT) | |||
2163 | : SDNode(isTarget ? ISD::TargetExternalSymbol : ISD::ExternalSymbol, 0, | |||
2164 | DebugLoc(), getSDVTList(VT)), | |||
2165 | Symbol(Sym), TargetFlags(TF) {} | |||
2166 | ||||
2167 | public: | |||
2168 | const char *getSymbol() const { return Symbol; } | |||
2169 | unsigned getTargetFlags() const { return TargetFlags; } | |||
2170 | ||||
2171 | static bool classof(const SDNode *N) { | |||
2172 | return N->getOpcode() == ISD::ExternalSymbol || | |||
2173 | N->getOpcode() == ISD::TargetExternalSymbol; | |||
2174 | } | |||
2175 | }; | |||
2176 | ||||
2177 | class MCSymbolSDNode : public SDNode { | |||
2178 | friend class SelectionDAG; | |||
2179 | ||||
2180 | MCSymbol *Symbol; | |||
2181 | ||||
2182 | MCSymbolSDNode(MCSymbol *Symbol, EVT VT) | |||
2183 | : SDNode(ISD::MCSymbol, 0, DebugLoc(), getSDVTList(VT)), Symbol(Symbol) {} | |||
2184 | ||||
2185 | public: | |||
2186 | MCSymbol *getMCSymbol() const { return Symbol; } | |||
2187 | ||||
2188 | static bool classof(const SDNode *N) { | |||
2189 | return N->getOpcode() == ISD::MCSymbol; | |||
2190 | } | |||
2191 | }; | |||
2192 | ||||
2193 | class CondCodeSDNode : public SDNode { | |||
2194 | friend class SelectionDAG; | |||
2195 | ||||
2196 | ISD::CondCode Condition; | |||
2197 | ||||
2198 | explicit CondCodeSDNode(ISD::CondCode Cond) | |||
2199 | : SDNode(ISD::CONDCODE, 0, DebugLoc(), getSDVTList(MVT::Other)), | |||
2200 | Condition(Cond) {} | |||
2201 | ||||
2202 | public: | |||
2203 | ISD::CondCode get() const { return Condition; } | |||
2204 | ||||
2205 | static bool classof(const SDNode *N) { | |||
2206 | return N->getOpcode() == ISD::CONDCODE; | |||
2207 | } | |||
2208 | }; | |||
2209 | ||||
2210 | /// This class is used to represent EVT's, which are used | |||
2211 | /// to parameterize some operations. | |||
2212 | class VTSDNode : public SDNode { | |||
2213 | friend class SelectionDAG; | |||
2214 | ||||
2215 | EVT ValueType; | |||
2216 | ||||
2217 | explicit VTSDNode(EVT VT) | |||
2218 | : SDNode(ISD::VALUETYPE, 0, DebugLoc(), getSDVTList(MVT::Other)), | |||
2219 | ValueType(VT) {} | |||
2220 | ||||
2221 | public: | |||
2222 | EVT getVT() const { return ValueType; } | |||
2223 | ||||
2224 | static bool classof(const SDNode *N) { | |||
2225 | return N->getOpcode() == ISD::VALUETYPE; | |||
2226 | } | |||
2227 | }; | |||
2228 | ||||
2229 | /// Base class for LoadSDNode and StoreSDNode | |||
2230 | class LSBaseSDNode : public MemSDNode { | |||
2231 | public: | |||
2232 | LSBaseSDNode(ISD::NodeType NodeTy, unsigned Order, const DebugLoc &dl, | |||
2233 | SDVTList VTs, ISD::MemIndexedMode AM, EVT MemVT, | |||
2234 | MachineMemOperand *MMO) | |||
2235 | : MemSDNode(NodeTy, Order, dl, VTs, MemVT, MMO) { | |||
2236 | LSBaseSDNodeBits.AddressingMode = AM; | |||
2237 | assert(getAddressingMode() == AM && "Value truncated")((void)0); | |||
2238 | } | |||
2239 | ||||
2240 | const SDValue &getOffset() const { | |||
2241 | return getOperand(getOpcode() == ISD::LOAD ? 2 : 3); | |||
2242 | } | |||
2243 | ||||
2244 | /// Return the addressing mode for this load or store: | |||
2245 | /// unindexed, pre-inc, pre-dec, post-inc, or post-dec. | |||
2246 | ISD::MemIndexedMode getAddressingMode() const { | |||
2247 | return static_cast<ISD::MemIndexedMode>(LSBaseSDNodeBits.AddressingMode); | |||
2248 | } | |||
2249 | ||||
2250 | /// Return true if this is a pre/post inc/dec load/store. | |||
2251 | bool isIndexed() const { return getAddressingMode() != ISD::UNINDEXED; } | |||
2252 | ||||
2253 | /// Return true if this is NOT a pre/post inc/dec load/store. | |||
2254 | bool isUnindexed() const { return getAddressingMode() == ISD::UNINDEXED; } | |||
2255 | ||||
2256 | static bool classof(const SDNode *N) { | |||
2257 | return N->getOpcode() == ISD::LOAD || | |||
2258 | N->getOpcode() == ISD::STORE; | |||
2259 | } | |||
2260 | }; | |||
2261 | ||||
2262 | /// This class is used to represent ISD::LOAD nodes. | |||
2263 | class LoadSDNode : public LSBaseSDNode { | |||
2264 | friend class SelectionDAG; | |||
2265 | ||||
2266 | LoadSDNode(unsigned Order, const DebugLoc &dl, SDVTList VTs, | |||
2267 | ISD::MemIndexedMode AM, ISD::LoadExtType ETy, EVT MemVT, | |||
2268 | MachineMemOperand *MMO) | |||
2269 | : LSBaseSDNode(ISD::LOAD, Order, dl, VTs, AM, MemVT, MMO) { | |||
2270 | LoadSDNodeBits.ExtTy = ETy; | |||
2271 | assert(readMem() && "Load MachineMemOperand is not a load!")((void)0); | |||
2272 | assert(!writeMem() && "Load MachineMemOperand is a store!")((void)0); | |||
2273 | } | |||
2274 | ||||
2275 | public: | |||
2276 | /// Return whether this is a plain node, | |||
2277 | /// or one of the varieties of value-extending loads. | |||
2278 | ISD::LoadExtType getExtensionType() const { | |||
2279 | return static_cast<ISD::LoadExtType>(LoadSDNodeBits.ExtTy); | |||
2280 | } | |||
2281 | ||||
2282 | const SDValue &getBasePtr() const { return getOperand(1); } | |||
2283 | const SDValue &getOffset() const { return getOperand(2); } | |||
2284 | ||||
2285 | static bool classof(const SDNode *N) { | |||
2286 | return N->getOpcode() == ISD::LOAD; | |||
2287 | } | |||
2288 | }; | |||
2289 | ||||
2290 | /// This class is used to represent ISD::STORE nodes. | |||
2291 | class StoreSDNode : public LSBaseSDNode { | |||
2292 | friend class SelectionDAG; | |||
2293 | ||||
2294 | StoreSDNode(unsigned Order, const DebugLoc &dl, SDVTList VTs, | |||
2295 | ISD::MemIndexedMode AM, bool isTrunc, EVT MemVT, | |||
2296 | MachineMemOperand *MMO) | |||
2297 | : LSBaseSDNode(ISD::STORE, Order, dl, VTs, AM, MemVT, MMO) { | |||
2298 | StoreSDNodeBits.IsTruncating = isTrunc; | |||
2299 | assert(!readMem() && "Store MachineMemOperand is a load!")((void)0); | |||
2300 | assert(writeMem() && "Store MachineMemOperand is not a store!")((void)0); | |||
2301 | } | |||
2302 | ||||
2303 | public: | |||
2304 | /// Return true if the op does a truncation before store. | |||
2305 | /// For integers this is the same as doing a TRUNCATE and storing the result. | |||
2306 | /// For floats, it is the same as doing an FP_ROUND and storing the result. | |||
2307 | bool isTruncatingStore() const { return StoreSDNodeBits.IsTruncating; } | |||
2308 | void setTruncatingStore(bool Truncating) { | |||
2309 | StoreSDNodeBits.IsTruncating = Truncating; | |||
2310 | } | |||
2311 | ||||
2312 | const SDValue &getValue() const { return getOperand(1); } | |||
2313 | const SDValue &getBasePtr() const { return getOperand(2); } | |||
2314 | const SDValue &getOffset() const { return getOperand(3); } | |||
2315 | ||||
2316 | static bool classof(const SDNode *N) { | |||
2317 | return N->getOpcode() == ISD::STORE; | |||
2318 | } | |||
2319 | }; | |||
2320 | ||||
2321 | /// This base class is used to represent MLOAD and MSTORE nodes | |||
2322 | class MaskedLoadStoreSDNode : public MemSDNode { | |||
2323 | public: | |||
2324 | friend class SelectionDAG; | |||
2325 | ||||
2326 | MaskedLoadStoreSDNode(ISD::NodeType NodeTy, unsigned Order, | |||
2327 | const DebugLoc &dl, SDVTList VTs, | |||
2328 | ISD::MemIndexedMode AM, EVT MemVT, | |||
2329 | MachineMemOperand *MMO) | |||
2330 | : MemSDNode(NodeTy, Order, dl, VTs, MemVT, MMO) { | |||
2331 | LSBaseSDNodeBits.AddressingMode = AM; | |||
2332 | assert(getAddressingMode() == AM && "Value truncated")((void)0); | |||
2333 | } | |||
2334 | ||||
2335 | // MaskedLoadSDNode (Chain, ptr, offset, mask, passthru) | |||
2336 | // MaskedStoreSDNode (Chain, data, ptr, offset, mask) | |||
2337 | // Mask is a vector of i1 elements | |||
2338 | const SDValue &getOffset() const { | |||
2339 | return getOperand(getOpcode() == ISD::MLOAD ? 2 : 3); | |||
2340 | } | |||
2341 | const SDValue &getMask() const { | |||
2342 | return getOperand(getOpcode() == ISD::MLOAD ? 3 : 4); | |||
2343 | } | |||
2344 | ||||
2345 | /// Return the addressing mode for this load or store: | |||
2346 | /// unindexed, pre-inc, pre-dec, post-inc, or post-dec. | |||
2347 | ISD::MemIndexedMode getAddressingMode() const { | |||
2348 | return static_cast<ISD::MemIndexedMode>(LSBaseSDNodeBits.AddressingMode); | |||
2349 | } | |||
2350 | ||||
2351 | /// Return true if this is a pre/post inc/dec load/store. | |||
2352 | bool isIndexed() const { return getAddressingMode() != ISD::UNINDEXED; } | |||
2353 | ||||
2354 | /// Return true if this is NOT a pre/post inc/dec load/store. | |||
2355 | bool isUnindexed() const { return getAddressingMode() == ISD::UNINDEXED; } | |||
2356 | ||||
2357 | static bool classof(const SDNode *N) { | |||
2358 | return N->getOpcode() == ISD::MLOAD || | |||
2359 | N->getOpcode() == ISD::MSTORE; | |||
2360 | } | |||
2361 | }; | |||
2362 | ||||
2363 | /// This class is used to represent an MLOAD node | |||
2364 | class MaskedLoadSDNode : public MaskedLoadStoreSDNode { | |||
2365 | public: | |||
2366 | friend class SelectionDAG; | |||
2367 | ||||
2368 | MaskedLoadSDNode(unsigned Order, const DebugLoc &dl, SDVTList VTs, | |||
2369 | ISD::MemIndexedMode AM, ISD::LoadExtType ETy, | |||
2370 | bool IsExpanding, EVT MemVT, MachineMemOperand *MMO) | |||
2371 | : MaskedLoadStoreSDNode(ISD::MLOAD, Order, dl, VTs, AM, MemVT, MMO) { | |||
2372 | LoadSDNodeBits.ExtTy = ETy; | |||
2373 | LoadSDNodeBits.IsExpanding = IsExpanding; | |||
2374 | } | |||
2375 | ||||
2376 | ISD::LoadExtType getExtensionType() const { | |||
2377 | return static_cast<ISD::LoadExtType>(LoadSDNodeBits.ExtTy); | |||
2378 | } | |||
2379 | ||||
2380 | const SDValue &getBasePtr() const { return getOperand(1); } | |||
2381 | const SDValue &getOffset() const { return getOperand(2); } | |||
2382 | const SDValue &getMask() const { return getOperand(3); } | |||
2383 | const SDValue &getPassThru() const { return getOperand(4); } | |||
2384 | ||||
2385 | static bool classof(const SDNode *N) { | |||
2386 | return N->getOpcode() == ISD::MLOAD; | |||
2387 | } | |||
2388 | ||||
2389 | bool isExpandingLoad() const { return LoadSDNodeBits.IsExpanding; } | |||
2390 | }; | |||
2391 | ||||
2392 | /// This class is used to represent an MSTORE node | |||
2393 | class MaskedStoreSDNode : public MaskedLoadStoreSDNode { | |||
2394 | public: | |||
2395 | friend class SelectionDAG; | |||
2396 | ||||
2397 | MaskedStoreSDNode(unsigned Order, const DebugLoc &dl, SDVTList VTs, | |||
2398 | ISD::MemIndexedMode AM, bool isTrunc, bool isCompressing, | |||
2399 | EVT MemVT, MachineMemOperand *MMO) | |||
2400 | : MaskedLoadStoreSDNode(ISD::MSTORE, Order, dl, VTs, AM, MemVT, MMO) { | |||
2401 | StoreSDNodeBits.IsTruncating = isTrunc; | |||
2402 | StoreSDNodeBits.IsCompressing = isCompressing; | |||
2403 | } | |||
2404 | ||||
2405 | /// Return true if the op does a truncation before store. | |||
2406 | /// For integers this is the same as doing a TRUNCATE and storing the result. | |||
2407 | /// For floats, it is the same as doing an FP_ROUND and storing the result. | |||
2408 | bool isTruncatingStore() const { return StoreSDNodeBits.IsTruncating; } | |||
2409 | ||||
2410 | /// Returns true if the op does a compression to the vector before storing. | |||
2411 | /// The node contiguously stores the active elements (integers or floats) | |||
2412 | /// in src (those with their respective bit set in writemask k) to unaligned | |||
2413 | /// memory at base_addr. | |||
2414 | bool isCompressingStore() const { return StoreSDNodeBits.IsCompressing; } | |||
2415 | ||||
2416 | const SDValue &getValue() const { return getOperand(1); } | |||
2417 | const SDValue &getBasePtr() const { return getOperand(2); } | |||
2418 | const SDValue &getOffset() const { return getOperand(3); } | |||
2419 | const SDValue &getMask() const { return getOperand(4); } | |||
2420 | ||||
2421 | static bool classof(const SDNode *N) { | |||
2422 | return N->getOpcode() == ISD::MSTORE; | |||
2423 | } | |||
2424 | }; | |||
2425 | ||||
2426 | /// This is a base class used to represent | |||
2427 | /// MGATHER and MSCATTER nodes | |||
2428 | /// | |||
2429 | class MaskedGatherScatterSDNode : public MemSDNode { | |||
2430 | public: | |||
2431 | friend class SelectionDAG; | |||
2432 | ||||
2433 | MaskedGatherScatterSDNode(ISD::NodeType NodeTy, unsigned Order, | |||
2434 | const DebugLoc &dl, SDVTList VTs, EVT MemVT, | |||
2435 | MachineMemOperand *MMO, ISD::MemIndexType IndexType) | |||
2436 | : MemSDNode(NodeTy, Order, dl, VTs, MemVT, MMO) { | |||
2437 | LSBaseSDNodeBits.AddressingMode = IndexType; | |||
2438 | assert(getIndexType() == IndexType && "Value truncated")((void)0); | |||
2439 | } | |||
2440 | ||||
2441 | /// How is Index applied to BasePtr when computing addresses. | |||
2442 | ISD::MemIndexType getIndexType() const { | |||
2443 | return static_cast<ISD::MemIndexType>(LSBaseSDNodeBits.AddressingMode); | |||
2444 | } | |||
2445 | void setIndexType(ISD::MemIndexType IndexType) { | |||
2446 | LSBaseSDNodeBits.AddressingMode = IndexType; | |||
2447 | } | |||
2448 | bool isIndexScaled() const { | |||
2449 | return (getIndexType() == ISD::SIGNED_SCALED) || | |||
2450 | (getIndexType() == ISD::UNSIGNED_SCALED); | |||
2451 | } | |||
2452 | bool isIndexSigned() const { | |||
2453 | return (getIndexType() == ISD::SIGNED_SCALED) || | |||
2454 | (getIndexType() == ISD::SIGNED_UNSCALED); | |||
2455 | } | |||
2456 | ||||
2457 | // In the both nodes address is Op1, mask is Op2: | |||
2458 | // MaskedGatherSDNode (Chain, passthru, mask, base, index, scale) | |||
2459 | // MaskedScatterSDNode (Chain, value, mask, base, index, scale) | |||
2460 | // Mask is a vector of i1 elements | |||
2461 | const SDValue &getBasePtr() const { return getOperand(3); } | |||
2462 | const SDValue &getIndex() const { return getOperand(4); } | |||
2463 | const SDValue &getMask() const { return getOperand(2); } | |||
2464 | const SDValue &getScale() const { return getOperand(5); } | |||
2465 | ||||
2466 | static bool classof(const SDNode *N) { | |||
2467 | return N->getOpcode() == ISD::MGATHER || | |||
2468 | N->getOpcode() == ISD::MSCATTER; | |||
2469 | } | |||
2470 | }; | |||
2471 | ||||
2472 | /// This class is used to represent an MGATHER node | |||
2473 | /// | |||
2474 | class MaskedGatherSDNode : public MaskedGatherScatterSDNode { | |||
2475 | public: | |||
2476 | friend class SelectionDAG; | |||
2477 | ||||
2478 | MaskedGatherSDNode(unsigned Order, const DebugLoc &dl, SDVTList VTs, | |||
2479 | EVT MemVT, MachineMemOperand *MMO, | |||
2480 | ISD::MemIndexType IndexType, ISD::LoadExtType ETy) | |||
2481 | : MaskedGatherScatterSDNode(ISD::MGATHER, Order, dl, VTs, MemVT, MMO, | |||
2482 | IndexType) { | |||
2483 | LoadSDNodeBits.ExtTy = ETy; | |||
2484 | } | |||
2485 | ||||
2486 | const SDValue &getPassThru() const { return getOperand(1); } | |||
2487 | ||||
2488 | ISD::LoadExtType getExtensionType() const { | |||
2489 | return ISD::LoadExtType(LoadSDNodeBits.ExtTy); | |||
2490 | } | |||
2491 | ||||
2492 | static bool classof(const SDNode *N) { | |||
2493 | return N->getOpcode() == ISD::MGATHER; | |||
2494 | } | |||
2495 | }; | |||
2496 | ||||
2497 | /// This class is used to represent an MSCATTER node | |||
2498 | /// | |||
2499 | class MaskedScatterSDNode : public MaskedGatherScatterSDNode { | |||
2500 | public: | |||
2501 | friend class SelectionDAG; | |||
2502 | ||||
2503 | MaskedScatterSDNode(unsigned Order, const DebugLoc &dl, SDVTList VTs, | |||
2504 | EVT MemVT, MachineMemOperand *MMO, | |||
2505 | ISD::MemIndexType IndexType, bool IsTrunc) | |||
2506 | : MaskedGatherScatterSDNode(ISD::MSCATTER, Order, dl, VTs, MemVT, MMO, | |||
2507 | IndexType) { | |||
2508 | StoreSDNodeBits.IsTruncating = IsTrunc; | |||
2509 | } | |||
2510 | ||||
2511 | /// Return true if the op does a truncation before store. | |||
2512 | /// For integers this is the same as doing a TRUNCATE and storing the result. | |||
2513 | /// For floats, it is the same as doing an FP_ROUND and storing the result. | |||
2514 | bool isTruncatingStore() const { return StoreSDNodeBits.IsTruncating; } | |||
2515 | ||||
2516 | const SDValue &getValue() const { return getOperand(1); } | |||
2517 | ||||
2518 | static bool classof(const SDNode *N) { | |||
2519 | return N->getOpcode() == ISD::MSCATTER; | |||
2520 | } | |||
2521 | }; | |||
2522 | ||||
2523 | /// An SDNode that represents everything that will be needed | |||
2524 | /// to construct a MachineInstr. These nodes are created during the | |||
2525 | /// instruction selection proper phase. | |||
2526 | /// | |||
2527 | /// Note that the only supported way to set the `memoperands` is by calling the | |||
2528 | /// `SelectionDAG::setNodeMemRefs` function as the memory management happens | |||
2529 | /// inside the DAG rather than in the node. | |||
2530 | class MachineSDNode : public SDNode { | |||
2531 | private: | |||
2532 | friend class SelectionDAG; | |||
2533 | ||||
2534 | MachineSDNode(unsigned Opc, unsigned Order, const DebugLoc &DL, SDVTList VTs) | |||
2535 | : SDNode(Opc, Order, DL, VTs) {} | |||
2536 | ||||
2537 | // We use a pointer union between a single `MachineMemOperand` pointer and | |||
2538 | // a pointer to an array of `MachineMemOperand` pointers. This is null when | |||
2539 | // the number of these is zero, the single pointer variant used when the | |||
2540 | // number is one, and the array is used for larger numbers. | |||
2541 | // | |||
2542 | // The array is allocated via the `SelectionDAG`'s allocator and so will | |||
2543 | // always live until the DAG is cleaned up and doesn't require ownership here. | |||
2544 | // | |||
2545 | // We can't use something simpler like `TinyPtrVector` here because `SDNode` | |||
2546 | // subclasses aren't managed in a conforming C++ manner. See the comments on | |||
2547 | // `SelectionDAG::MorphNodeTo` which details what all goes on, but the | |||
2548 | // constraint here is that these don't manage memory with their constructor or | |||
2549 | // destructor and can be initialized to a good state even if they start off | |||
2550 | // uninitialized. | |||
2551 | PointerUnion<MachineMemOperand *, MachineMemOperand **> MemRefs = {}; | |||
2552 | ||||
2553 | // Note that this could be folded into the above `MemRefs` member if doing so | |||
2554 | // is advantageous at some point. We don't need to store this in most cases. | |||
2555 | // However, at the moment this doesn't appear to make the allocation any | |||
2556 | // smaller and makes the code somewhat simpler to read. | |||
2557 | int NumMemRefs = 0; | |||
2558 | ||||
2559 | public: | |||
2560 | using mmo_iterator = ArrayRef<MachineMemOperand *>::const_iterator; | |||
2561 | ||||
2562 | ArrayRef<MachineMemOperand *> memoperands() const { | |||
2563 | // Special case the common cases. | |||
2564 | if (NumMemRefs == 0) | |||
2565 | return {}; | |||
2566 | if (NumMemRefs == 1) | |||
2567 | return makeArrayRef(MemRefs.getAddrOfPtr1(), 1); | |||
2568 | ||||
2569 | // Otherwise we have an actual array. | |||
2570 | return makeArrayRef(MemRefs.get<MachineMemOperand **>(), NumMemRefs); | |||
2571 | } | |||
2572 | mmo_iterator memoperands_begin() const { return memoperands().begin(); } | |||
2573 | mmo_iterator memoperands_end() const { return memoperands().end(); } | |||
2574 | bool memoperands_empty() const { return memoperands().empty(); } | |||
2575 | ||||
2576 | /// Clear out the memory reference descriptor list. | |||
2577 | void clearMemRefs() { | |||
2578 | MemRefs = nullptr; | |||
2579 | NumMemRefs = 0; | |||
2580 | } | |||
2581 | ||||
2582 | static bool classof(const SDNode *N) { | |||
2583 | return N->isMachineOpcode(); | |||
2584 | } | |||
2585 | }; | |||
2586 | ||||
2587 | /// An SDNode that records if a register contains a value that is guaranteed to | |||
2588 | /// be aligned accordingly. | |||
2589 | class AssertAlignSDNode : public SDNode { | |||
2590 | Align Alignment; | |||
2591 | ||||
2592 | public: | |||
2593 | AssertAlignSDNode(unsigned Order, const DebugLoc &DL, EVT VT, Align A) | |||
2594 | : SDNode(ISD::AssertAlign, Order, DL, getSDVTList(VT)), Alignment(A) {} | |||
2595 | ||||
2596 | Align getAlign() const { return Alignment; } | |||
2597 | ||||
2598 | static bool classof(const SDNode *N) { | |||
2599 | return N->getOpcode() == ISD::AssertAlign; | |||
2600 | } | |||
2601 | }; | |||
2602 | ||||
2603 | class SDNodeIterator { | |||
2604 | const SDNode *Node; | |||
2605 | unsigned Operand; | |||
2606 | ||||
2607 | SDNodeIterator(const SDNode *N, unsigned Op) : Node(N), Operand(Op) {} | |||
2608 | ||||
2609 | public: | |||
2610 | using iterator_category = std::forward_iterator_tag; | |||
2611 | using value_type = SDNode; | |||
2612 | using difference_type = std::ptrdiff_t; | |||
2613 | using pointer = value_type *; | |||
2614 | using reference = value_type &; | |||
2615 | ||||
2616 | bool operator==(const SDNodeIterator& x) const { | |||
2617 | return Operand == x.Operand; | |||
2618 | } | |||
2619 | bool operator!=(const SDNodeIterator& x) const { return !operator==(x); } | |||
2620 | ||||
2621 | pointer operator*() const { | |||
2622 | return Node->getOperand(Operand).getNode(); | |||
2623 | } | |||
2624 | pointer operator->() const { return operator*(); } | |||
2625 | ||||
2626 | SDNodeIterator& operator++() { // Preincrement | |||
2627 | ++Operand; | |||
2628 | return *this; | |||
2629 | } | |||
2630 | SDNodeIterator operator++(int) { // Postincrement | |||
2631 | SDNodeIterator tmp = *this; ++*this; return tmp; | |||
2632 | } | |||
2633 | size_t operator-(SDNodeIterator Other) const { | |||
2634 | assert(Node == Other.Node &&((void)0) | |||
2635 | "Cannot compare iterators of two different nodes!")((void)0); | |||
2636 | return Operand - Other.Operand; | |||
2637 | } | |||
2638 | ||||
2639 | static SDNodeIterator begin(const SDNode *N) { return SDNodeIterator(N, 0); } | |||
2640 | static SDNodeIterator end (const SDNode *N) { | |||
2641 | return SDNodeIterator(N, N->getNumOperands()); | |||
2642 | } | |||
2643 | ||||
2644 | unsigned getOperand() const { return Operand; } | |||
2645 | const SDNode *getNode() const { return Node; } | |||
2646 | }; | |||
2647 | ||||
2648 | template <> struct GraphTraits<SDNode*> { | |||
2649 | using NodeRef = SDNode *; | |||
2650 | using ChildIteratorType = SDNodeIterator; | |||
2651 | ||||
2652 | static NodeRef getEntryNode(SDNode *N) { return N; } | |||
2653 | ||||
2654 | static ChildIteratorType child_begin(NodeRef N) { | |||
2655 | return SDNodeIterator::begin(N); | |||
2656 | } | |||
2657 | ||||
2658 | static ChildIteratorType child_end(NodeRef N) { | |||
2659 | return SDNodeIterator::end(N); | |||
2660 | } | |||
2661 | }; | |||
2662 | ||||
2663 | /// A representation of the largest SDNode, for use in sizeof(). | |||
2664 | /// | |||
2665 | /// This needs to be a union because the largest node differs on 32 bit systems | |||
2666 | /// with 4 and 8 byte pointer alignment, respectively. | |||
2667 | using LargestSDNode = AlignedCharArrayUnion<AtomicSDNode, TargetIndexSDNode, | |||
2668 | BlockAddressSDNode, | |||
2669 | GlobalAddressSDNode, | |||
2670 | PseudoProbeSDNode>; | |||
2671 | ||||
2672 | /// The SDNode class with the greatest alignment requirement. | |||
2673 | using MostAlignedSDNode = GlobalAddressSDNode; | |||
2674 | ||||
2675 | namespace ISD { | |||
2676 | ||||
2677 | /// Returns true if the specified node is a non-extending and unindexed load. | |||
2678 | inline bool isNormalLoad(const SDNode *N) { | |||
2679 | const LoadSDNode *Ld = dyn_cast<LoadSDNode>(N); | |||
2680 | return Ld && Ld->getExtensionType() == ISD::NON_EXTLOAD && | |||
2681 | Ld->getAddressingMode() == ISD::UNINDEXED; | |||
2682 | } | |||
2683 | ||||
2684 | /// Returns true if the specified node is a non-extending load. | |||
2685 | inline bool isNON_EXTLoad(const SDNode *N) { | |||
2686 | return isa<LoadSDNode>(N) && | |||
2687 | cast<LoadSDNode>(N)->getExtensionType() == ISD::NON_EXTLOAD; | |||
2688 | } | |||
2689 | ||||
2690 | /// Returns true if the specified node is a EXTLOAD. | |||
2691 | inline bool isEXTLoad(const SDNode *N) { | |||
2692 | return isa<LoadSDNode>(N) && | |||
2693 | cast<LoadSDNode>(N)->getExtensionType() == ISD::EXTLOAD; | |||
2694 | } | |||
2695 | ||||
2696 | /// Returns true if the specified node is a SEXTLOAD. | |||
2697 | inline bool isSEXTLoad(const SDNode *N) { | |||
2698 | return isa<LoadSDNode>(N) && | |||
2699 | cast<LoadSDNode>(N)->getExtensionType() == ISD::SEXTLOAD; | |||
2700 | } | |||
2701 | ||||
2702 | /// Returns true if the specified node is a ZEXTLOAD. | |||
2703 | inline bool isZEXTLoad(const SDNode *N) { | |||
2704 | return isa<LoadSDNode>(N) && | |||
2705 | cast<LoadSDNode>(N)->getExtensionType() == ISD::ZEXTLOAD; | |||
2706 | } | |||
2707 | ||||
2708 | /// Returns true if the specified node is an unindexed load. | |||
2709 | inline bool isUNINDEXEDLoad(const SDNode *N) { | |||
2710 | return isa<LoadSDNode>(N) && | |||
2711 | cast<LoadSDNode>(N)->getAddressingMode() == ISD::UNINDEXED; | |||
2712 | } | |||
2713 | ||||
2714 | /// Returns true if the specified node is a non-truncating | |||
2715 | /// and unindexed store. | |||
2716 | inline bool isNormalStore(const SDNode *N) { | |||
2717 | const StoreSDNode *St = dyn_cast<StoreSDNode>(N); | |||
2718 | return St && !St->isTruncatingStore() && | |||
2719 | St->getAddressingMode() == ISD::UNINDEXED; | |||
2720 | } | |||
2721 | ||||
2722 | /// Returns true if the specified node is an unindexed store. | |||
2723 | inline bool isUNINDEXEDStore(const SDNode *N) { | |||
2724 | return isa<StoreSDNode>(N) && | |||
2725 | cast<StoreSDNode>(N)->getAddressingMode() == ISD::UNINDEXED; | |||
2726 | } | |||
2727 | ||||
2728 | /// Attempt to match a unary predicate against a scalar/splat constant or | |||
2729 | /// every element of a constant BUILD_VECTOR. | |||
2730 | /// If AllowUndef is true, then UNDEF elements will pass nullptr to Match. | |||
2731 | bool matchUnaryPredicate(SDValue Op, | |||
2732 | std::function<bool(ConstantSDNode *)> Match, | |||
2733 | bool AllowUndefs = false); | |||
2734 | ||||
2735 | /// Attempt to match a binary predicate against a pair of scalar/splat | |||
2736 | /// constants or every element of a pair of constant BUILD_VECTORs. | |||
2737 | /// If AllowUndef is true, then UNDEF elements will pass nullptr to Match. | |||
2738 | /// If AllowTypeMismatch is true then RetType + ArgTypes don't need to match. | |||
2739 | bool matchBinaryPredicate( | |||
2740 | SDValue LHS, SDValue RHS, | |||
2741 | std::function<bool(ConstantSDNode *, ConstantSDNode *)> Match, | |||
2742 | bool AllowUndefs = false, bool AllowTypeMismatch = false); | |||
2743 | ||||
2744 | /// Returns true if the specified value is the overflow result from one | |||
2745 | /// of the overflow intrinsic nodes. | |||
2746 | inline bool isOverflowIntrOpRes(SDValue Op) { | |||
2747 | unsigned Opc = Op.getOpcode(); | |||
2748 | return (Op.getResNo() == 1 && | |||
2749 | (Opc == ISD::SADDO || Opc == ISD::UADDO || Opc == ISD::SSUBO || | |||
2750 | Opc == ISD::USUBO || Opc == ISD::SMULO || Opc == ISD::UMULO)); | |||
2751 | } | |||
2752 | ||||
2753 | } // end namespace ISD | |||
2754 | ||||
2755 | } // end namespace llvm | |||
2756 | ||||
2757 | #endif // LLVM_CODEGEN_SELECTIONDAGNODES_H |