clang -cc1 -cc1 -triple amd64-unknown-openbsd7.0 -analyze -disable-free -disable-llvm-verifier -discard-value-names -main-file-name TargetInstrInfo.cpp -analyzer-store=region -analyzer-opt-analyze-nested-blocks -analyzer-checker=core -analyzer-checker=apiModeling -analyzer-checker=unix -analyzer-checker=deadcode -analyzer-checker=cplusplus -analyzer-checker=security.insecureAPI.UncheckedReturn -analyzer-checker=security.insecureAPI.getpw -analyzer-checker=security.insecureAPI.gets -analyzer-checker=security.insecureAPI.mktemp -analyzer-checker=security.insecureAPI.mkstemp -analyzer-checker=security.insecureAPI.vfork -analyzer-checker=nullability.NullPassedToNonnull -analyzer-checker=nullability.NullReturnedFromNonnull -analyzer-output plist -w -setup-static-analyzer -mrelocation-model static -mframe-pointer=all -relaxed-aliasing -fno-rounding-math -mconstructor-aliases -munwind-tables -target-cpu x86-64 -tune-cpu generic -debugger-tuning=gdb -fcoverage-compilation-dir=/usr/src/gnu/usr.bin/clang/libLLVM/obj -resource-dir /usr/local/lib/clang/13.0.0 -I /usr/src/gnu/usr.bin/clang/libLLVM/../../../llvm/llvm/include/llvm/Transforms -I /usr/src/gnu/usr.bin/clang/libLLVM/obj/../include/llvm/AMDGPU -I /usr/src/gnu/usr.bin/clang/libLLVM/../../../llvm/llvm/lib/Target/AMDGPU -I /usr/src/gnu/usr.bin/clang/libLLVM/obj/../include/llvm/AMDGPU -I /usr/src/gnu/usr.bin/clang/libLLVM/../../../llvm/llvm/lib/Target/AMDGPU -I /usr/src/gnu/usr.bin/clang/libLLVM/obj/../include/llvm/AMDGPU -I /usr/src/gnu/usr.bin/clang/libLLVM/../../../llvm/llvm/lib/Target/AMDGPU -I /usr/src/gnu/usr.bin/clang/libLLVM/obj/../include/llvm/AMDGPU -I /usr/src/gnu/usr.bin/clang/libLLVM/../../../llvm/llvm/lib/Target/AMDGPU -I /usr/src/gnu/usr.bin/clang/libLLVM/obj/../include/llvm/AMDGPU -I /usr/src/gnu/usr.bin/clang/libLLVM/../../../llvm/llvm/lib/Target/AMDGPU -I /usr/src/gnu/usr.bin/clang/libLLVM/obj/../include/llvm/AMDGPU -I /usr/src/gnu/usr.bin/clang/libLLVM/../../../llvm/llvm/lib/Target/AMDGPU -I /usr/src/gnu/usr.bin/clang/libLLVM/../../../llvm/llvm/include/llvm/Analysis -I /usr/src/gnu/usr.bin/clang/libLLVM/../../../llvm/llvm/include/llvm/ASMParser -I /usr/src/gnu/usr.bin/clang/libLLVM/../../../llvm/llvm/include/llvm/BinaryFormat -I /usr/src/gnu/usr.bin/clang/libLLVM/../../../llvm/llvm/include/llvm/Bitcode -I /usr/src/gnu/usr.bin/clang/libLLVM/../../../llvm/llvm/include/llvm/Bitcode -I /usr/src/gnu/usr.bin/clang/libLLVM/../../../llvm/llvm/include/llvm/Bitstream -I /usr/src/gnu/usr.bin/clang/libLLVM/../../../llvm/llvm/include/llvm/Transforms -I /include/llvm/CodeGen -I /include/llvm/CodeGen/PBQP -I /usr/src/gnu/usr.bin/clang/libLLVM/obj/../include/llvm/IR -I /usr/src/gnu/usr.bin/clang/libLLVM/../../../llvm/llvm/include/llvm/IR -I /usr/src/gnu/usr.bin/clang/libLLVM/../../../llvm/llvm/include/llvm/Transforms -I /usr/src/gnu/usr.bin/clang/libLLVM/../../../llvm/llvm/include/llvm/Transforms/Coroutines -I /usr/src/gnu/usr.bin/clang/libLLVM/../../../llvm/llvm/include/llvm/ProfileData/Coverage -I /usr/src/gnu/usr.bin/clang/libLLVM/../../../llvm/llvm/include/llvm/DebugInfo/CodeView -I /usr/src/gnu/usr.bin/clang/libLLVM/../../../llvm/llvm/include/llvm/DebugInfo/DWARF -I /usr/src/gnu/usr.bin/clang/libLLVM/../../../llvm/llvm/include/llvm/DebugInfo -I /usr/src/gnu/usr.bin/clang/libLLVM/../../../llvm/llvm/include/llvm/DebugInfo/MSF -I /usr/src/gnu/usr.bin/clang/libLLVM/../../../llvm/llvm/include/llvm/DebugInfo/PDB -I /usr/src/gnu/usr.bin/clang/libLLVM/../../../llvm/llvm/include/llvm/Demangle -I /usr/src/gnu/usr.bin/clang/libLLVM/../../../llvm/llvm/include/llvm/ExecutionEngine -I /usr/src/gnu/usr.bin/clang/libLLVM/../../../llvm/llvm/include/llvm/ExecutionEngine/JITLink -I /usr/src/gnu/usr.bin/clang/libLLVM/../../../llvm/llvm/include/llvm/ExecutionEngine/Orc -I /usr/src/gnu/usr.bin/clang/libLLVM/../../../llvm/llvm/include/llvm/Frontend -I /usr/src/gnu/usr.bin/clang/libLLVM/../../../llvm/llvm/include/llvm/Frontend/OpenACC -I /usr/src/gnu/usr.bin/clang/libLLVM/../../../llvm/llvm/include/llvm/Frontend -I /usr/src/gnu/usr.bin/clang/libLLVM/../../../llvm/llvm/include/llvm/Frontend/OpenMP -I /include/llvm/CodeGen/GlobalISel -I /usr/src/gnu/usr.bin/clang/libLLVM/../../../llvm/llvm/include/llvm/IRReader -I /usr/src/gnu/usr.bin/clang/libLLVM/../../../llvm/llvm/include/llvm/Transforms -I /usr/src/gnu/usr.bin/clang/libLLVM/../../../llvm/llvm/include/llvm/Transforms/InstCombine -I /usr/src/gnu/usr.bin/clang/libLLVM/obj/../include/llvm/Transforms/InstCombine -I /usr/src/gnu/usr.bin/clang/libLLVM/../../../llvm/llvm/include/llvm/Transforms -I /usr/src/gnu/usr.bin/clang/libLLVM/../../../llvm/llvm/include/llvm/LTO -I /usr/src/gnu/usr.bin/clang/libLLVM/../../../llvm/llvm/include/llvm/Linker -I /usr/src/gnu/usr.bin/clang/libLLVM/../../../llvm/llvm/include/llvm/MC -I /usr/src/gnu/usr.bin/clang/libLLVM/../../../llvm/llvm/include/llvm/MC/MCParser -I /include/llvm/CodeGen/MIRParser -I /usr/src/gnu/usr.bin/clang/libLLVM/../../../llvm/llvm/include/llvm/Transforms -I /usr/src/gnu/usr.bin/clang/libLLVM/../../../llvm/llvm/include/llvm/Object -I /usr/src/gnu/usr.bin/clang/libLLVM/../../../llvm/llvm/include/llvm/Option -I /usr/src/gnu/usr.bin/clang/libLLVM/../../../llvm/llvm/include/llvm/Passes -I /usr/src/gnu/usr.bin/clang/libLLVM/../../../llvm/llvm/include/llvm/ -I /usr/src/gnu/usr.bin/clang/libLLVM/../../../llvm/llvm/include/llvm/ProfileData -I /usr/src/gnu/usr.bin/clang/libLLVM/../../../llvm/llvm/include/llvm/Transforms -I /usr/src/gnu/usr.bin/clang/libLLVM/../../../llvm/llvm/include/llvm/Transforms/Scalar -I /usr/src/gnu/usr.bin/clang/libLLVM/../../../llvm/llvm/include/llvm/ADT -I /usr/src/gnu/usr.bin/clang/libLLVM/../../../llvm/llvm/include/llvm/Support -I /usr/src/gnu/usr.bin/clang/libLLVM/../../../llvm/llvm/include/llvm/DebugInfo/Symbolize -I /usr/src/gnu/usr.bin/clang/libLLVM/../../../llvm/llvm/include/llvm/Target -I /usr/src/gnu/usr.bin/clang/libLLVM/../../../llvm/llvm/include/llvm/Transforms -I /usr/src/gnu/usr.bin/clang/libLLVM/../../../llvm/llvm/include/llvm/Transforms/Utils -I /usr/src/gnu/usr.bin/clang/libLLVM/../../../llvm/llvm/include/llvm/Transforms -I /usr/src/gnu/usr.bin/clang/libLLVM/../../../llvm/llvm/include/llvm/Transforms/Vectorize -I /usr/src/gnu/usr.bin/clang/libLLVM/obj/../include/llvm/X86 -I /usr/src/gnu/usr.bin/clang/libLLVM/../../../llvm/llvm/lib/Target/X86 -I /usr/src/gnu/usr.bin/clang/libLLVM/obj/../include/llvm/X86 -I /usr/src/gnu/usr.bin/clang/libLLVM/../../../llvm/llvm/lib/Target/X86 -I /usr/src/gnu/usr.bin/clang/libLLVM/obj/../include/llvm/X86 -I /usr/src/gnu/usr.bin/clang/libLLVM/../../../llvm/llvm/lib/Target/X86 -I /usr/src/gnu/usr.bin/clang/libLLVM/obj/../include/llvm/X86 -I /usr/src/gnu/usr.bin/clang/libLLVM/../../../llvm/llvm/lib/Target/X86 -I /usr/src/gnu/usr.bin/clang/libLLVM/obj/../include/llvm/X86 -I /usr/src/gnu/usr.bin/clang/libLLVM/../../../llvm/llvm/lib/Target/X86 -I /usr/src/gnu/usr.bin/clang/libLLVM/../../../llvm/llvm/include/llvm/Transforms -I /usr/src/gnu/usr.bin/clang/libLLVM/../../../llvm/llvm/include/llvm/Transforms/IPO -I /usr/src/gnu/usr.bin/clang/libLLVM/../../../llvm/llvm/include -I /usr/src/gnu/usr.bin/clang/libLLVM/../include -I /usr/src/gnu/usr.bin/clang/libLLVM/obj -I /usr/src/gnu/usr.bin/clang/libLLVM/obj/../include -D NDEBUG -D __STDC_LIMIT_MACROS -D __STDC_CONSTANT_MACROS -D __STDC_FORMAT_MACROS -D LLVM_PREFIX="/usr" -internal-isystem /usr/include/c++/v1 -internal-isystem /usr/local/lib/clang/13.0.0/include -internal-externc-isystem /usr/include -O2 -Wno-unused-parameter -Wwrite-strings -Wno-missing-field-initializers -Wno-long-long -Wno-comment -std=c++14 -fdeprecated-macro -fdebug-compilation-dir=/usr/src/gnu/usr.bin/clang/libLLVM/obj -ferror-limit 19 -fvisibility-inlines-hidden -fwrapv -stack-protector 2 -fno-rtti -fgnuc-version=4.2.1 -vectorize-loops -vectorize-slp -fno-builtin-malloc -fno-builtin-calloc -fno-builtin-realloc -fno-builtin-valloc -fno-builtin-free -fno-builtin-strdup -fno-builtin-strndup -analyzer-output=html -faddrsig -D__GCC_HAVE_DWARF2_CFI_ASM=1 -o /home/ben/Projects/vmm/scan-build/2022-01-12-194120-40624-1 -x c++ /usr/src/gnu/usr.bin/clang/libLLVM/../../../llvm/llvm/lib/CodeGen/TargetInstrInfo.cpp
1 | |
2 | |
3 | |
4 | |
5 | |
6 | |
7 | |
8 | |
9 | |
10 | |
11 | |
12 | |
13 | #include "llvm/CodeGen/TargetInstrInfo.h" |
14 | #include "llvm/ADT/StringExtras.h" |
15 | #include "llvm/CodeGen/MachineFrameInfo.h" |
16 | #include "llvm/CodeGen/MachineInstrBuilder.h" |
17 | #include "llvm/CodeGen/MachineMemOperand.h" |
18 | #include "llvm/CodeGen/MachineRegisterInfo.h" |
19 | #include "llvm/CodeGen/MachineScheduler.h" |
20 | #include "llvm/CodeGen/PseudoSourceValue.h" |
21 | #include "llvm/CodeGen/ScoreboardHazardRecognizer.h" |
22 | #include "llvm/CodeGen/StackMaps.h" |
23 | #include "llvm/CodeGen/TargetFrameLowering.h" |
24 | #include "llvm/CodeGen/TargetLowering.h" |
25 | #include "llvm/CodeGen/TargetRegisterInfo.h" |
26 | #include "llvm/CodeGen/TargetSchedule.h" |
27 | #include "llvm/IR/DataLayout.h" |
28 | #include "llvm/IR/DebugInfoMetadata.h" |
29 | #include "llvm/MC/MCAsmInfo.h" |
30 | #include "llvm/MC/MCInstrItineraries.h" |
31 | #include "llvm/Support/CommandLine.h" |
32 | #include "llvm/Support/ErrorHandling.h" |
33 | #include "llvm/Support/raw_ostream.h" |
34 | #include "llvm/Target/TargetMachine.h" |
35 | #include <cctype> |
36 | |
37 | using namespace llvm; |
38 | |
39 | static cl::opt<bool> DisableHazardRecognizer( |
40 | "disable-sched-hazard", cl::Hidden, cl::init(false), |
41 | cl::desc("Disable hazard detection during preRA scheduling")); |
42 | |
43 | TargetInstrInfo::~TargetInstrInfo() { |
44 | } |
45 | |
46 | const TargetRegisterClass* |
47 | TargetInstrInfo::getRegClass(const MCInstrDesc &MCID, unsigned OpNum, |
48 | const TargetRegisterInfo *TRI, |
49 | const MachineFunction &MF) const { |
50 | if (OpNum >= MCID.getNumOperands()) |
51 | return nullptr; |
52 | |
53 | short RegClass = MCID.OpInfo[OpNum].RegClass; |
54 | if (MCID.OpInfo[OpNum].isLookupPtrRegClass()) |
55 | return TRI->getPointerRegClass(MF, RegClass); |
56 | |
57 | |
58 | if (RegClass < 0) |
59 | return nullptr; |
60 | |
61 | |
62 | return TRI->getRegClass(RegClass); |
63 | } |
64 | |
65 | |
66 | |
67 | void TargetInstrInfo::insertNoop(MachineBasicBlock &MBB, |
68 | MachineBasicBlock::iterator MI) const { |
69 | llvm_unreachable("Target didn't implement insertNoop!"); |
70 | } |
71 | |
72 | |
73 | |
74 | void TargetInstrInfo::insertNoops(MachineBasicBlock &MBB, |
75 | MachineBasicBlock::iterator MI, |
76 | unsigned Quantity) const { |
77 | for (unsigned i = 0; i < Quantity; ++i) |
78 | insertNoop(MBB, MI); |
79 | } |
80 | |
81 | static bool isAsmComment(const char *Str, const MCAsmInfo &MAI) { |
82 | return strncmp(Str, MAI.getCommentString().data(), |
83 | MAI.getCommentString().size()) == 0; |
84 | } |
85 | |
86 | |
87 | |
88 | |
89 | |
90 | |
91 | |
92 | |
93 | |
94 | |
95 | |
96 | |
97 | |
98 | |
99 | |
100 | unsigned TargetInstrInfo::getInlineAsmLength( |
101 | const char *Str, |
102 | const MCAsmInfo &MAI, const TargetSubtargetInfo *STI) const { |
103 | |
104 | bool AtInsnStart = true; |
105 | unsigned Length = 0; |
106 | const unsigned MaxInstLength = MAI.getMaxInstLength(STI); |
107 | for (; *Str; ++Str) { |
108 | if (*Str == '\n' || strncmp(Str, MAI.getSeparatorString(), |
109 | strlen(MAI.getSeparatorString())) == 0) { |
110 | AtInsnStart = true; |
111 | } else if (isAsmComment(Str, MAI)) { |
112 | |
113 | |
114 | AtInsnStart = false; |
115 | } |
116 | |
117 | if (AtInsnStart && !isSpace(static_cast<unsigned char>(*Str))) { |
118 | unsigned AddLength = MaxInstLength; |
119 | if (strncmp(Str, ".space", 6) == 0) { |
120 | char *EStr; |
121 | int SpaceSize; |
122 | SpaceSize = strtol(Str + 6, &EStr, 10); |
123 | SpaceSize = SpaceSize < 0 ? 0 : SpaceSize; |
124 | while (*EStr != '\n' && isSpace(static_cast<unsigned char>(*EStr))) |
125 | ++EStr; |
126 | if (*EStr == '\0' || *EStr == '\n' || |
127 | isAsmComment(EStr, MAI)) |
128 | AddLength = SpaceSize; |
129 | } |
130 | Length += AddLength; |
131 | AtInsnStart = false; |
132 | } |
133 | } |
134 | |
135 | return Length; |
136 | } |
137 | |
138 | |
139 | |
140 | void |
141 | TargetInstrInfo::ReplaceTailWithBranchTo(MachineBasicBlock::iterator Tail, |
142 | MachineBasicBlock *NewDest) const { |
143 | MachineBasicBlock *MBB = Tail->getParent(); |
144 | |
145 | |
146 | while (!MBB->succ_empty()) |
147 | MBB->removeSuccessor(MBB->succ_begin()); |
148 | |
149 | |
150 | DebugLoc DL = Tail->getDebugLoc(); |
151 | |
152 | |
153 | |
154 | while (Tail != MBB->end()) { |
155 | auto MI = Tail++; |
156 | if (MI->shouldUpdateCallSiteInfo()) |
157 | MBB->getParent()->eraseCallSiteInfo(&*MI); |
158 | MBB->erase(MI); |
159 | } |
160 | |
161 | |
162 | if (++MachineFunction::iterator(MBB) != MachineFunction::iterator(NewDest)) |
163 | insertBranch(*MBB, NewDest, nullptr, SmallVector<MachineOperand, 0>(), DL); |
164 | MBB->addSuccessor(NewDest); |
165 | } |
166 | |
167 | MachineInstr *TargetInstrInfo::commuteInstructionImpl(MachineInstr &MI, |
168 | bool NewMI, unsigned Idx1, |
169 | unsigned Idx2) const { |
170 | const MCInstrDesc &MCID = MI.getDesc(); |
171 | bool HasDef = MCID.getNumDefs(); |
172 | if (HasDef && !MI.getOperand(0).isReg()) |
173 | |
174 | return nullptr; |
175 | |
176 | unsigned CommutableOpIdx1 = Idx1; (void)CommutableOpIdx1; |
177 | unsigned CommutableOpIdx2 = Idx2; (void)CommutableOpIdx2; |
178 | assert(findCommutedOpIndices(MI, CommutableOpIdx1, CommutableOpIdx2) && |
179 | CommutableOpIdx1 == Idx1 && CommutableOpIdx2 == Idx2 && |
180 | "TargetInstrInfo::CommuteInstructionImpl(): not commutable operands."); |
181 | assert(MI.getOperand(Idx1).isReg() && MI.getOperand(Idx2).isReg() && |
182 | "This only knows how to commute register operands so far"); |
183 | |
184 | Register Reg0 = HasDef ? MI.getOperand(0).getReg() : Register(); |
185 | Register Reg1 = MI.getOperand(Idx1).getReg(); |
186 | Register Reg2 = MI.getOperand(Idx2).getReg(); |
187 | unsigned SubReg0 = HasDef ? MI.getOperand(0).getSubReg() : 0; |
188 | unsigned SubReg1 = MI.getOperand(Idx1).getSubReg(); |
189 | unsigned SubReg2 = MI.getOperand(Idx2).getSubReg(); |
190 | bool Reg1IsKill = MI.getOperand(Idx1).isKill(); |
191 | bool Reg2IsKill = MI.getOperand(Idx2).isKill(); |
192 | bool Reg1IsUndef = MI.getOperand(Idx1).isUndef(); |
193 | bool Reg2IsUndef = MI.getOperand(Idx2).isUndef(); |
194 | bool Reg1IsInternal = MI.getOperand(Idx1).isInternalRead(); |
195 | bool Reg2IsInternal = MI.getOperand(Idx2).isInternalRead(); |
196 | |
197 | |
198 | bool Reg1IsRenamable = Register::isPhysicalRegister(Reg1) |
199 | ? MI.getOperand(Idx1).isRenamable() |
200 | : false; |
201 | bool Reg2IsRenamable = Register::isPhysicalRegister(Reg2) |
202 | ? MI.getOperand(Idx2).isRenamable() |
203 | : false; |
204 | |
205 | |
206 | if (HasDef && Reg0 == Reg1 && |
207 | MI.getDesc().getOperandConstraint(Idx1, MCOI::TIED_TO) == 0) { |
208 | Reg2IsKill = false; |
209 | Reg0 = Reg2; |
210 | SubReg0 = SubReg2; |
211 | } else if (HasDef && Reg0 == Reg2 && |
212 | MI.getDesc().getOperandConstraint(Idx2, MCOI::TIED_TO) == 0) { |
213 | Reg1IsKill = false; |
214 | Reg0 = Reg1; |
215 | SubReg0 = SubReg1; |
216 | } |
217 | |
218 | MachineInstr *CommutedMI = nullptr; |
219 | if (NewMI) { |
220 | |
221 | MachineFunction &MF = *MI.getMF(); |
222 | CommutedMI = MF.CloneMachineInstr(&MI); |
223 | } else { |
224 | CommutedMI = &MI; |
225 | } |
226 | |
227 | if (HasDef) { |
228 | CommutedMI->getOperand(0).setReg(Reg0); |
229 | CommutedMI->getOperand(0).setSubReg(SubReg0); |
230 | } |
231 | CommutedMI->getOperand(Idx2).setReg(Reg1); |
232 | CommutedMI->getOperand(Idx1).setReg(Reg2); |
233 | CommutedMI->getOperand(Idx2).setSubReg(SubReg1); |
234 | CommutedMI->getOperand(Idx1).setSubReg(SubReg2); |
235 | CommutedMI->getOperand(Idx2).setIsKill(Reg1IsKill); |
236 | CommutedMI->getOperand(Idx1).setIsKill(Reg2IsKill); |
237 | CommutedMI->getOperand(Idx2).setIsUndef(Reg1IsUndef); |
238 | CommutedMI->getOperand(Idx1).setIsUndef(Reg2IsUndef); |
239 | CommutedMI->getOperand(Idx2).setIsInternalRead(Reg1IsInternal); |
240 | CommutedMI->getOperand(Idx1).setIsInternalRead(Reg2IsInternal); |
241 | |
242 | |
243 | if (Register::isPhysicalRegister(Reg1)) |
244 | CommutedMI->getOperand(Idx2).setIsRenamable(Reg1IsRenamable); |
245 | if (Register::isPhysicalRegister(Reg2)) |
246 | CommutedMI->getOperand(Idx1).setIsRenamable(Reg2IsRenamable); |
247 | return CommutedMI; |
248 | } |
249 | |
250 | MachineInstr *TargetInstrInfo::commuteInstruction(MachineInstr &MI, bool NewMI, |
251 | unsigned OpIdx1, |
252 | unsigned OpIdx2) const { |
253 | |
254 | |
255 | |
256 | if ((OpIdx1 == CommuteAnyOperandIndex || OpIdx2 == CommuteAnyOperandIndex) && |
257 | !findCommutedOpIndices(MI, OpIdx1, OpIdx2)) { |
258 | assert(MI.isCommutable() && |
259 | "Precondition violation: MI must be commutable."); |
260 | return nullptr; |
261 | } |
262 | return commuteInstructionImpl(MI, NewMI, OpIdx1, OpIdx2); |
263 | } |
264 | |
265 | bool TargetInstrInfo::fixCommutedOpIndices(unsigned &ResultIdx1, |
266 | unsigned &ResultIdx2, |
267 | unsigned CommutableOpIdx1, |
268 | unsigned CommutableOpIdx2) { |
269 | if (ResultIdx1 == CommuteAnyOperandIndex && |
270 | ResultIdx2 == CommuteAnyOperandIndex) { |
271 | ResultIdx1 = CommutableOpIdx1; |
272 | ResultIdx2 = CommutableOpIdx2; |
273 | } else if (ResultIdx1 == CommuteAnyOperandIndex) { |
274 | if (ResultIdx2 == CommutableOpIdx1) |
275 | ResultIdx1 = CommutableOpIdx2; |
276 | else if (ResultIdx2 == CommutableOpIdx2) |
277 | ResultIdx1 = CommutableOpIdx1; |
278 | else |
279 | return false; |
280 | } else if (ResultIdx2 == CommuteAnyOperandIndex) { |
281 | if (ResultIdx1 == CommutableOpIdx1) |
282 | ResultIdx2 = CommutableOpIdx2; |
283 | else if (ResultIdx1 == CommutableOpIdx2) |
284 | ResultIdx2 = CommutableOpIdx1; |
285 | else |
286 | return false; |
287 | } else |
288 | |
289 | |
290 | return (ResultIdx1 == CommutableOpIdx1 && ResultIdx2 == CommutableOpIdx2) || |
291 | (ResultIdx1 == CommutableOpIdx2 && ResultIdx2 == CommutableOpIdx1); |
292 | |
293 | return true; |
294 | } |
295 | |
296 | bool TargetInstrInfo::findCommutedOpIndices(const MachineInstr &MI, |
297 | unsigned &SrcOpIdx1, |
298 | unsigned &SrcOpIdx2) const { |
299 | assert(!MI.isBundle() && |
300 | "TargetInstrInfo::findCommutedOpIndices() can't handle bundles"); |
301 | |
302 | const MCInstrDesc &MCID = MI.getDesc(); |
303 | if (!MCID.isCommutable()) |
304 | return false; |
305 | |
306 | |
307 | |
308 | unsigned CommutableOpIdx1 = MCID.getNumDefs(); |
309 | unsigned CommutableOpIdx2 = CommutableOpIdx1 + 1; |
310 | if (!fixCommutedOpIndices(SrcOpIdx1, SrcOpIdx2, |
311 | CommutableOpIdx1, CommutableOpIdx2)) |
312 | return false; |
313 | |
314 | if (!MI.getOperand(SrcOpIdx1).isReg() || !MI.getOperand(SrcOpIdx2).isReg()) |
315 | |
316 | return false; |
317 | return true; |
318 | } |
319 | |
320 | bool TargetInstrInfo::isUnpredicatedTerminator(const MachineInstr &MI) const { |
321 | if (!MI.isTerminator()) return false; |
322 | |
323 | |
324 | if (MI.isBranch() && !MI.isBarrier()) |
325 | return true; |
326 | if (!MI.isPredicable()) |
327 | return true; |
328 | return !isPredicated(MI); |
329 | } |
330 | |
331 | bool TargetInstrInfo::PredicateInstruction( |
332 | MachineInstr &MI, ArrayRef<MachineOperand> Pred) const { |
333 | bool MadeChange = false; |
334 | |
335 | assert(!MI.isBundle() && |
336 | "TargetInstrInfo::PredicateInstruction() can't handle bundles"); |
337 | |
338 | const MCInstrDesc &MCID = MI.getDesc(); |
339 | if (!MI.isPredicable()) |
340 | return false; |
341 | |
342 | for (unsigned j = 0, i = 0, e = MI.getNumOperands(); i != e; ++i) { |
343 | if (MCID.OpInfo[i].isPredicate()) { |
344 | MachineOperand &MO = MI.getOperand(i); |
345 | if (MO.isReg()) { |
346 | MO.setReg(Pred[j].getReg()); |
347 | MadeChange = true; |
348 | } else if (MO.isImm()) { |
349 | MO.setImm(Pred[j].getImm()); |
350 | MadeChange = true; |
351 | } else if (MO.isMBB()) { |
352 | MO.setMBB(Pred[j].getMBB()); |
353 | MadeChange = true; |
354 | } |
355 | ++j; |
356 | } |
357 | } |
358 | return MadeChange; |
359 | } |
360 | |
361 | bool TargetInstrInfo::hasLoadFromStackSlot( |
362 | const MachineInstr &MI, |
363 | SmallVectorImpl<const MachineMemOperand *> &Accesses) const { |
364 | size_t StartSize = Accesses.size(); |
365 | for (MachineInstr::mmo_iterator o = MI.memoperands_begin(), |
366 | oe = MI.memoperands_end(); |
367 | o != oe; ++o) { |
368 | if ((*o)->isLoad() && |
369 | dyn_cast_or_null<FixedStackPseudoSourceValue>((*o)->getPseudoValue())) |
370 | Accesses.push_back(*o); |
371 | } |
372 | return Accesses.size() != StartSize; |
373 | } |
374 | |
375 | bool TargetInstrInfo::hasStoreToStackSlot( |
376 | const MachineInstr &MI, |
377 | SmallVectorImpl<const MachineMemOperand *> &Accesses) const { |
378 | size_t StartSize = Accesses.size(); |
379 | for (MachineInstr::mmo_iterator o = MI.memoperands_begin(), |
380 | oe = MI.memoperands_end(); |
381 | o != oe; ++o) { |
382 | if ((*o)->isStore() && |
383 | dyn_cast_or_null<FixedStackPseudoSourceValue>((*o)->getPseudoValue())) |
384 | Accesses.push_back(*o); |
385 | } |
386 | return Accesses.size() != StartSize; |
387 | } |
388 | |
389 | bool TargetInstrInfo::getStackSlotRange(const TargetRegisterClass *RC, |
390 | unsigned SubIdx, unsigned &Size, |
391 | unsigned &Offset, |
392 | const MachineFunction &MF) const { |
393 | const TargetRegisterInfo *TRI = MF.getSubtarget().getRegisterInfo(); |
394 | if (!SubIdx) { |
395 | Size = TRI->getSpillSize(*RC); |
396 | Offset = 0; |
397 | return true; |
398 | } |
399 | unsigned BitSize = TRI->getSubRegIdxSize(SubIdx); |
400 | |
401 | if (BitSize % 8) |
402 | return false; |
403 | |
404 | int BitOffset = TRI->getSubRegIdxOffset(SubIdx); |
405 | if (BitOffset < 0 || BitOffset % 8) |
406 | return false; |
407 | |
408 | Size = BitSize / 8; |
409 | Offset = (unsigned)BitOffset / 8; |
410 | |
411 | assert(TRI->getSpillSize(*RC) >= (Offset + Size) && "bad subregister range"); |
412 | |
413 | if (!MF.getDataLayout().isLittleEndian()) { |
414 | Offset = TRI->getSpillSize(*RC) - (Offset + Size); |
415 | } |
416 | return true; |
417 | } |
418 | |
419 | void TargetInstrInfo::reMaterialize(MachineBasicBlock &MBB, |
420 | MachineBasicBlock::iterator I, |
421 | Register DestReg, unsigned SubIdx, |
422 | const MachineInstr &Orig, |
423 | const TargetRegisterInfo &TRI) const { |
424 | MachineInstr *MI = MBB.getParent()->CloneMachineInstr(&Orig); |
425 | MI->substituteRegister(MI->getOperand(0).getReg(), DestReg, SubIdx, TRI); |
426 | MBB.insert(I, MI); |
427 | } |
428 | |
429 | bool TargetInstrInfo::produceSameValue(const MachineInstr &MI0, |
430 | const MachineInstr &MI1, |
431 | const MachineRegisterInfo *MRI) const { |
432 | return MI0.isIdenticalTo(MI1, MachineInstr::IgnoreVRegDefs); |
433 | } |
434 | |
435 | MachineInstr &TargetInstrInfo::duplicate(MachineBasicBlock &MBB, |
436 | MachineBasicBlock::iterator InsertBefore, const MachineInstr &Orig) const { |
437 | assert(!Orig.isNotDuplicable() && "Instruction cannot be duplicated"); |
438 | MachineFunction &MF = *MBB.getParent(); |
439 | return MF.CloneMachineInstrBundle(MBB, InsertBefore, Orig); |
440 | } |
441 | |
442 | |
443 | |
444 | static const TargetRegisterClass *canFoldCopy(const MachineInstr &MI, |
445 | unsigned FoldIdx) { |
446 | assert(MI.isCopy() && "MI must be a COPY instruction"); |
447 | if (MI.getNumOperands() != 2) |
448 | return nullptr; |
449 | assert(FoldIdx<2 && "FoldIdx refers no nonexistent operand"); |
450 | |
451 | const MachineOperand &FoldOp = MI.getOperand(FoldIdx); |
452 | const MachineOperand &LiveOp = MI.getOperand(1 - FoldIdx); |
453 | |
454 | if (FoldOp.getSubReg() || LiveOp.getSubReg()) |
455 | return nullptr; |
456 | |
457 | Register FoldReg = FoldOp.getReg(); |
458 | Register LiveReg = LiveOp.getReg(); |
459 | |
460 | assert(Register::isVirtualRegister(FoldReg) && "Cannot fold physregs"); |
461 | |
462 | const MachineRegisterInfo &MRI = MI.getMF()->getRegInfo(); |
463 | const TargetRegisterClass *RC = MRI.getRegClass(FoldReg); |
464 | |
465 | if (Register::isPhysicalRegister(LiveOp.getReg())) |
466 | return RC->contains(LiveOp.getReg()) ? RC : nullptr; |
467 | |
468 | if (RC->hasSubClassEq(MRI.getRegClass(LiveReg))) |
469 | return RC; |
470 | |
471 | |
472 | return nullptr; |
473 | } |
474 | |
475 | MCInst TargetInstrInfo::getNop() const { llvm_unreachable("Not implemented"); } |
476 | |
477 | std::pair<unsigned, unsigned> |
478 | TargetInstrInfo::getPatchpointUnfoldableRange(const MachineInstr &MI) const { |
479 | switch (MI.getOpcode()) { |
480 | case TargetOpcode::STACKMAP: |
481 | |
482 | return std::make_pair(0, StackMapOpers(&MI).getVarIdx()); |
483 | case TargetOpcode::PATCHPOINT: |
484 | |
485 | |
486 | return std::make_pair(0, PatchPointOpers(&MI).getVarIdx()); |
487 | case TargetOpcode::STATEPOINT: |
488 | |
489 | return std::make_pair(MI.getNumDefs(), StatepointOpers(&MI).getVarIdx()); |
490 | default: |
491 | llvm_unreachable("unexpected stackmap opcode"); |
492 | } |
493 | } |
494 | |
495 | static MachineInstr *foldPatchpoint(MachineFunction &MF, MachineInstr &MI, |
496 | ArrayRef<unsigned> Ops, int FrameIndex, |
497 | const TargetInstrInfo &TII) { |
498 | unsigned StartIdx = 0; |
499 | unsigned NumDefs = 0; |
500 | |
501 | std::tie(NumDefs, StartIdx) = TII.getPatchpointUnfoldableRange(MI); |
502 | |
503 | unsigned DefToFoldIdx = MI.getNumOperands(); |
504 | |
505 | |
506 | |
507 | for (unsigned Op : Ops) { |
508 | if (Op < NumDefs) { |
509 | assert(DefToFoldIdx == MI.getNumOperands() && "Folding multiple defs"); |
510 | DefToFoldIdx = Op; |
511 | } else if (Op < StartIdx) { |
512 | return nullptr; |
513 | } |
514 | if (MI.getOperand(Op).isTied()) |
515 | return nullptr; |
516 | } |
517 | |
518 | MachineInstr *NewMI = |
519 | MF.CreateMachineInstr(TII.get(MI.getOpcode()), MI.getDebugLoc(), true); |
520 | MachineInstrBuilder MIB(MF, NewMI); |
521 | |
522 | |
523 | for (unsigned i = 0; i < StartIdx; ++i) |
524 | if (i != DefToFoldIdx) |
525 | MIB.add(MI.getOperand(i)); |
526 | |
527 | for (unsigned i = StartIdx, e = MI.getNumOperands(); i < e; ++i) { |
528 | MachineOperand &MO = MI.getOperand(i); |
529 | unsigned TiedTo = e; |
530 | (void)MI.isRegTiedToDefOperand(i, &TiedTo); |
531 | |
532 | if (is_contained(Ops, i)) { |
533 | assert(TiedTo == e && "Cannot fold tied operands"); |
534 | unsigned SpillSize; |
535 | unsigned SpillOffset; |
536 | |
537 | const TargetRegisterClass *RC = |
538 | MF.getRegInfo().getRegClass(MO.getReg()); |
539 | bool Valid = |
540 | TII.getStackSlotRange(RC, MO.getSubReg(), SpillSize, SpillOffset, MF); |
541 | if (!Valid) |
542 | report_fatal_error("cannot spill patchpoint subregister operand"); |
543 | MIB.addImm(StackMaps::IndirectMemRefOp); |
544 | MIB.addImm(SpillSize); |
545 | MIB.addFrameIndex(FrameIndex); |
546 | MIB.addImm(SpillOffset); |
547 | } else { |
548 | MIB.add(MO); |
549 | if (TiedTo < e) { |
550 | assert(TiedTo < NumDefs && "Bad tied operand"); |
551 | if (TiedTo > DefToFoldIdx) |
552 | --TiedTo; |
553 | NewMI->tieOperands(TiedTo, NewMI->getNumOperands() - 1); |
554 | } |
555 | } |
556 | } |
557 | return NewMI; |
558 | } |
559 | |
560 | MachineInstr *TargetInstrInfo::foldMemoryOperand(MachineInstr &MI, |
561 | ArrayRef<unsigned> Ops, int FI, |
562 | LiveIntervals *LIS, |
563 | VirtRegMap *VRM) const { |
564 | auto Flags = MachineMemOperand::MONone; |
565 | for (unsigned OpIdx : Ops) |
566 | Flags |= MI.getOperand(OpIdx).isDef() ? MachineMemOperand::MOStore |
567 | : MachineMemOperand::MOLoad; |
568 | |
569 | MachineBasicBlock *MBB = MI.getParent(); |
570 | assert(MBB && "foldMemoryOperand needs an inserted instruction"); |
571 | MachineFunction &MF = *MBB->getParent(); |
572 | |
573 | |
574 | |
575 | |
576 | int64_t MemSize = 0; |
577 | const MachineFrameInfo &MFI = MF.getFrameInfo(); |
578 | const TargetRegisterInfo *TRI = MF.getSubtarget().getRegisterInfo(); |
579 | |
580 | if (Flags & MachineMemOperand::MOStore) { |
581 | MemSize = MFI.getObjectSize(FI); |
582 | } else { |
583 | for (unsigned OpIdx : Ops) { |
584 | int64_t OpSize = MFI.getObjectSize(FI); |
585 | |
586 | if (auto SubReg = MI.getOperand(OpIdx).getSubReg()) { |
587 | unsigned SubRegSize = TRI->getSubRegIdxSize(SubReg); |
588 | if (SubRegSize > 0 && !(SubRegSize % 8)) |
589 | OpSize = SubRegSize / 8; |
590 | } |
591 | |
592 | MemSize = std::max(MemSize, OpSize); |
593 | } |
594 | } |
595 | |
596 | assert(MemSize && "Did not expect a zero-sized stack slot"); |
597 | |
598 | MachineInstr *NewMI = nullptr; |
599 | |
600 | if (MI.getOpcode() == TargetOpcode::STACKMAP || |
601 | MI.getOpcode() == TargetOpcode::PATCHPOINT || |
602 | MI.getOpcode() == TargetOpcode::STATEPOINT) { |
603 | |
604 | NewMI = foldPatchpoint(MF, MI, Ops, FI, *this); |
605 | if (NewMI) |
606 | MBB->insert(MI, NewMI); |
607 | } else { |
608 | |
609 | NewMI = foldMemoryOperandImpl(MF, MI, Ops, MI, FI, LIS, VRM); |
610 | } |
611 | |
612 | if (NewMI) { |
613 | NewMI->setMemRefs(MF, MI.memoperands()); |
614 | |
615 | assert((!(Flags & MachineMemOperand::MOStore) || |
616 | NewMI->mayStore()) && |
617 | "Folded a def to a non-store!"); |
618 | assert((!(Flags & MachineMemOperand::MOLoad) || |
619 | NewMI->mayLoad()) && |
620 | "Folded a use to a non-load!"); |
621 | assert(MFI.getObjectOffset(FI) != -1); |
622 | MachineMemOperand *MMO = |
623 | MF.getMachineMemOperand(MachinePointerInfo::getFixedStack(MF, FI), |
624 | Flags, MemSize, MFI.getObjectAlign(FI)); |
625 | NewMI->addMemOperand(MF, MMO); |
626 | |
627 | |
628 | |
629 | NewMI->cloneInstrSymbols(MF, MI); |
630 | |
631 | return NewMI; |
632 | } |
633 | |
634 | |
635 | if (!MI.isCopy() || Ops.size() != 1) |
636 | return nullptr; |
637 | |
638 | const TargetRegisterClass *RC = canFoldCopy(MI, Ops[0]); |
639 | if (!RC) |
640 | return nullptr; |
641 | |
642 | const MachineOperand &MO = MI.getOperand(1 - Ops[0]); |
643 | MachineBasicBlock::iterator Pos = MI; |
644 | |
645 | if (Flags == MachineMemOperand::MOStore) |
646 | storeRegToStackSlot(*MBB, Pos, MO.getReg(), MO.isKill(), FI, RC, TRI); |
647 | else |
648 | loadRegFromStackSlot(*MBB, Pos, MO.getReg(), FI, RC, TRI); |
649 | return &*--Pos; |
650 | } |
651 | |
652 | MachineInstr *TargetInstrInfo::foldMemoryOperand(MachineInstr &MI, |
653 | ArrayRef<unsigned> Ops, |
654 | MachineInstr &LoadMI, |
655 | LiveIntervals *LIS) const { |
656 | assert(LoadMI.canFoldAsLoad() && "LoadMI isn't foldable!"); |
657 | #ifndef NDEBUG |
658 | for (unsigned OpIdx : Ops) |
659 | assert(MI.getOperand(OpIdx).isUse() && "Folding load into def!"); |
660 | #endif |
661 | |
662 | MachineBasicBlock &MBB = *MI.getParent(); |
663 | MachineFunction &MF = *MBB.getParent(); |
664 | |
665 | |
666 | MachineInstr *NewMI = nullptr; |
667 | int FrameIndex = 0; |
668 | |
669 | if ((MI.getOpcode() == TargetOpcode::STACKMAP || |
670 | MI.getOpcode() == TargetOpcode::PATCHPOINT || |
671 | MI.getOpcode() == TargetOpcode::STATEPOINT) && |
672 | isLoadFromStackSlot(LoadMI, FrameIndex)) { |
673 | |
674 | NewMI = foldPatchpoint(MF, MI, Ops, FrameIndex, *this); |
675 | if (NewMI) |
676 | NewMI = &*MBB.insert(MI, NewMI); |
677 | } else { |
678 | |
679 | NewMI = foldMemoryOperandImpl(MF, MI, Ops, MI, LoadMI, LIS); |
680 | } |
681 | |
682 | if (!NewMI) |
683 | return nullptr; |
684 | |
685 | |
686 | if (MI.memoperands_empty()) { |
687 | NewMI->setMemRefs(MF, LoadMI.memoperands()); |
688 | } else { |
689 | |
690 | NewMI->setMemRefs(MF, MI.memoperands()); |
691 | for (MachineInstr::mmo_iterator I = LoadMI.memoperands_begin(), |
692 | E = LoadMI.memoperands_end(); |
693 | I != E; ++I) { |
694 | NewMI->addMemOperand(MF, *I); |
695 | } |
696 | } |
697 | return NewMI; |
698 | } |
699 | |
700 | bool TargetInstrInfo::hasReassociableOperands( |
701 | const MachineInstr &Inst, const MachineBasicBlock *MBB) const { |
702 | const MachineOperand &Op1 = Inst.getOperand(1); |
703 | const MachineOperand &Op2 = Inst.getOperand(2); |
704 | const MachineRegisterInfo &MRI = MBB->getParent()->getRegInfo(); |
705 | |
706 | |
707 | |
708 | MachineInstr *MI1 = nullptr; |
709 | MachineInstr *MI2 = nullptr; |
710 | if (Op1.isReg() && Register::isVirtualRegister(Op1.getReg())) |
711 | MI1 = MRI.getUniqueVRegDef(Op1.getReg()); |
712 | if (Op2.isReg() && Register::isVirtualRegister(Op2.getReg())) |
713 | MI2 = MRI.getUniqueVRegDef(Op2.getReg()); |
714 | |
715 | |
716 | return MI1 && MI2 && MI1->getParent() == MBB && MI2->getParent() == MBB; |
717 | } |
718 | |
719 | bool TargetInstrInfo::hasReassociableSibling(const MachineInstr &Inst, |
720 | bool &Commuted) const { |
721 | const MachineBasicBlock *MBB = Inst.getParent(); |
722 | const MachineRegisterInfo &MRI = MBB->getParent()->getRegInfo(); |
723 | MachineInstr *MI1 = MRI.getUniqueVRegDef(Inst.getOperand(1).getReg()); |
724 | MachineInstr *MI2 = MRI.getUniqueVRegDef(Inst.getOperand(2).getReg()); |
725 | unsigned AssocOpcode = Inst.getOpcode(); |
726 | |
727 | |
728 | |
729 | Commuted = MI1->getOpcode() != AssocOpcode && MI2->getOpcode() == AssocOpcode; |
730 | if (Commuted) |
731 | std::swap(MI1, MI2); |
732 | |
733 | |
734 | |
735 | |
736 | |
737 | |
738 | |
739 | |
740 | return MI1->getOpcode() == AssocOpcode && isAssociativeAndCommutative(*MI1) && |
741 | hasReassociableOperands(*MI1, MBB) && |
742 | MRI.hasOneNonDBGUse(MI1->getOperand(0).getReg()); |
743 | } |
744 | |
745 | |
746 | |
747 | |
748 | |
749 | bool TargetInstrInfo::isReassociationCandidate(const MachineInstr &Inst, |
750 | bool &Commuted) const { |
751 | return isAssociativeAndCommutative(Inst) && |
752 | hasReassociableOperands(Inst, Inst.getParent()) && |
753 | hasReassociableSibling(Inst, Commuted); |
754 | } |
755 | |
756 | |
757 | |
758 | |
759 | |
760 | |
761 | |
762 | |
763 | |
764 | |
765 | |
766 | |
767 | |
768 | |
769 | |
770 | |
771 | |
772 | |
773 | |
774 | |
775 | |
776 | |
777 | bool TargetInstrInfo::getMachineCombinerPatterns( |
778 | MachineInstr &Root, SmallVectorImpl<MachineCombinerPattern> &Patterns, |
779 | bool DoRegPressureReduce) const { |
780 | bool Commute; |
781 | if (isReassociationCandidate(Root, Commute)) { |
782 | |
783 | |
784 | |
785 | |
786 | if (Commute) { |
787 | Patterns.push_back(MachineCombinerPattern::REASSOC_AX_YB); |
788 | Patterns.push_back(MachineCombinerPattern::REASSOC_XA_YB); |
789 | } else { |
790 | Patterns.push_back(MachineCombinerPattern::REASSOC_AX_BY); |
791 | Patterns.push_back(MachineCombinerPattern::REASSOC_XA_BY); |
792 | } |
793 | return true; |
794 | } |
795 | |
796 | return false; |
797 | } |
798 | |
799 | |
800 | bool |
801 | TargetInstrInfo::isThroughputPattern(MachineCombinerPattern Pattern) const { |
802 | return false; |
803 | } |
804 | |
805 | |
806 | |
807 | void TargetInstrInfo::reassociateOps( |
808 | MachineInstr &Root, MachineInstr &Prev, |
809 | MachineCombinerPattern Pattern, |
810 | SmallVectorImpl<MachineInstr *> &InsInstrs, |
811 | SmallVectorImpl<MachineInstr *> &DelInstrs, |
812 | DenseMap<unsigned, unsigned> &InstrIdxForVirtReg) const { |
813 | MachineFunction *MF = Root.getMF(); |
814 | MachineRegisterInfo &MRI = MF->getRegInfo(); |
815 | const TargetInstrInfo *TII = MF->getSubtarget().getInstrInfo(); |
816 | const TargetRegisterInfo *TRI = MF->getSubtarget().getRegisterInfo(); |
817 | const TargetRegisterClass *RC = Root.getRegClassConstraint(0, TII, TRI); |
818 | |
819 | |
820 | |
821 | |
822 | unsigned OpIdx[4][4] = { |
823 | { 1, 1, 2, 2 }, |
824 | { 1, 2, 2, 1 }, |
825 | { 2, 1, 1, 2 }, |
826 | { 2, 2, 1, 1 } |
827 | }; |
828 | |
829 | int Row; |
830 | switch (Pattern) { |
831 | case MachineCombinerPattern::REASSOC_AX_BY: Row = 0; break; |
832 | case MachineCombinerPattern::REASSOC_AX_YB: Row = 1; break; |
833 | case MachineCombinerPattern::REASSOC_XA_BY: Row = 2; break; |
834 | case MachineCombinerPattern::REASSOC_XA_YB: Row = 3; break; |
835 | default: llvm_unreachable("unexpected MachineCombinerPattern"); |
836 | } |
837 | |
838 | MachineOperand &OpA = Prev.getOperand(OpIdx[Row][0]); |
839 | MachineOperand &OpB = Root.getOperand(OpIdx[Row][1]); |
840 | MachineOperand &OpX = Prev.getOperand(OpIdx[Row][2]); |
841 | MachineOperand &OpY = Root.getOperand(OpIdx[Row][3]); |
842 | MachineOperand &OpC = Root.getOperand(0); |
843 | |
844 | Register RegA = OpA.getReg(); |
845 | Register RegB = OpB.getReg(); |
846 | Register RegX = OpX.getReg(); |
847 | Register RegY = OpY.getReg(); |
848 | Register RegC = OpC.getReg(); |
849 | |
850 | if (Register::isVirtualRegister(RegA)) |
851 | MRI.constrainRegClass(RegA, RC); |
852 | if (Register::isVirtualRegister(RegB)) |
853 | MRI.constrainRegClass(RegB, RC); |
854 | if (Register::isVirtualRegister(RegX)) |
855 | MRI.constrainRegClass(RegX, RC); |
856 | if (Register::isVirtualRegister(RegY)) |
857 | MRI.constrainRegClass(RegY, RC); |
858 | if (Register::isVirtualRegister(RegC)) |
859 | MRI.constrainRegClass(RegC, RC); |
860 | |
861 | |
862 | |
863 | |
864 | Register NewVR = MRI.createVirtualRegister(RC); |
865 | InstrIdxForVirtReg.insert(std::make_pair(NewVR, 0)); |
866 | |
867 | unsigned Opcode = Root.getOpcode(); |
868 | bool KillA = OpA.isKill(); |
869 | bool KillX = OpX.isKill(); |
870 | bool KillY = OpY.isKill(); |
871 | |
872 | |
873 | MachineInstrBuilder MIB1 = |
874 | BuildMI(*MF, Prev.getDebugLoc(), TII->get(Opcode), NewVR) |
875 | .addReg(RegX, getKillRegState(KillX)) |
876 | .addReg(RegY, getKillRegState(KillY)); |
877 | MachineInstrBuilder MIB2 = |
878 | BuildMI(*MF, Root.getDebugLoc(), TII->get(Opcode), RegC) |
879 | .addReg(RegA, getKillRegState(KillA)) |
880 | .addReg(NewVR, getKillRegState(true)); |
881 | |
882 | setSpecialOperandAttr(Root, Prev, *MIB1, *MIB2); |
883 | |
884 | |
885 | InsInstrs.push_back(MIB1); |
886 | InsInstrs.push_back(MIB2); |
887 | DelInstrs.push_back(&Prev); |
888 | DelInstrs.push_back(&Root); |
889 | } |
890 | |
891 | void TargetInstrInfo::genAlternativeCodeSequence( |
892 | MachineInstr &Root, MachineCombinerPattern Pattern, |
893 | SmallVectorImpl<MachineInstr *> &InsInstrs, |
894 | SmallVectorImpl<MachineInstr *> &DelInstrs, |
895 | DenseMap<unsigned, unsigned> &InstIdxForVirtReg) const { |
896 | MachineRegisterInfo &MRI = Root.getMF()->getRegInfo(); |
897 | |
898 | |
899 | MachineInstr *Prev = nullptr; |
| 1 | 'Prev' initialized to a null pointer value | |
|
900 | switch (Pattern) { |
| 2 | | Control jumps to the 'default' case at line 909 | |
|
901 | case MachineCombinerPattern::REASSOC_AX_BY: |
902 | case MachineCombinerPattern::REASSOC_XA_BY: |
903 | Prev = MRI.getUniqueVRegDef(Root.getOperand(1).getReg()); |
904 | break; |
905 | case MachineCombinerPattern::REASSOC_AX_YB: |
906 | case MachineCombinerPattern::REASSOC_XA_YB: |
907 | Prev = MRI.getUniqueVRegDef(Root.getOperand(2).getReg()); |
908 | break; |
909 | default: |
910 | break; |
| 3 | | Execution continues on line 913 | |
|
911 | } |
912 | |
913 | assert(Prev && "Unknown pattern for machine combiner"); |
914 | |
915 | reassociateOps(Root, *Prev, Pattern, InsInstrs, DelInstrs, InstIdxForVirtReg); |
| 4 | | Forming reference to null pointer |
|
916 | } |
917 | |
918 | bool TargetInstrInfo::isReallyTriviallyReMaterializableGeneric( |
919 | const MachineInstr &MI, AAResults *AA) const { |
920 | const MachineFunction &MF = *MI.getMF(); |
921 | const MachineRegisterInfo &MRI = MF.getRegInfo(); |
922 | |
923 | |
924 | if (!MI.getNumOperands() || !MI.getOperand(0).isReg()) |
925 | return false; |
926 | Register DefReg = MI.getOperand(0).getReg(); |
927 | |
928 | |
929 | |
930 | |
931 | |
932 | if (Register::isVirtualRegister(DefReg) && MI.getOperand(0).getSubReg() && |
933 | MI.readsVirtualRegister(DefReg)) |
934 | return false; |
935 | |
936 | |
937 | |
938 | |
939 | int FrameIdx = 0; |
940 | if (isLoadFromStackSlot(MI, FrameIdx) && |
941 | MF.getFrameInfo().isImmutableObjectIndex(FrameIdx)) |
942 | return true; |
943 | |
944 | |
945 | if (MI.isNotDuplicable() || MI.mayStore() || MI.mayRaiseFPException() || |
946 | MI.hasUnmodeledSideEffects()) |
947 | return false; |
948 | |
949 | |
950 | |
951 | if (MI.isInlineAsm()) |
952 | return false; |
953 | |
954 | |
955 | if (MI.mayLoad() && !MI.isDereferenceableInvariantLoad(AA)) |
956 | return false; |
957 | |
958 | |
959 | |
960 | for (unsigned i = 0, e = MI.getNumOperands(); i != e; ++i) { |
961 | const MachineOperand &MO = MI.getOperand(i); |
962 | if (!MO.isReg()) continue; |
963 | Register Reg = MO.getReg(); |
964 | if (Reg == 0) |
965 | continue; |
966 | |
967 | |
968 | if (Register::isPhysicalRegister(Reg)) { |
969 | if (MO.isUse()) { |
970 | |
971 | |
972 | |
973 | if (!MRI.isConstantPhysReg(Reg)) |
974 | return false; |
975 | } else { |
976 | |
977 | return false; |
978 | } |
979 | continue; |
980 | } |
981 | |
982 | |
983 | |
984 | if (MO.isDef() && Reg != DefReg) |
985 | return false; |
986 | |
987 | |
988 | |
989 | |
990 | if (MO.isUse()) |
991 | return false; |
992 | } |
993 | |
994 | |
995 | return true; |
996 | } |
997 | |
998 | int TargetInstrInfo::getSPAdjust(const MachineInstr &MI) const { |
999 | const MachineFunction *MF = MI.getMF(); |
1000 | const TargetFrameLowering *TFI = MF->getSubtarget().getFrameLowering(); |
1001 | bool StackGrowsDown = |
1002 | TFI->getStackGrowthDirection() == TargetFrameLowering::StackGrowsDown; |
1003 | |
1004 | unsigned FrameSetupOpcode = getCallFrameSetupOpcode(); |
1005 | unsigned FrameDestroyOpcode = getCallFrameDestroyOpcode(); |
1006 | |
1007 | if (!isFrameInstr(MI)) |
1008 | return 0; |
1009 | |
1010 | int SPAdj = TFI->alignSPAdjust(getFrameSize(MI)); |
1011 | |
1012 | if ((!StackGrowsDown && MI.getOpcode() == FrameSetupOpcode) || |
1013 | (StackGrowsDown && MI.getOpcode() == FrameDestroyOpcode)) |
1014 | SPAdj = -SPAdj; |
1015 | |
1016 | return SPAdj; |
1017 | } |
1018 | |
1019 | |
1020 | |
1021 | |
1022 | bool TargetInstrInfo::isSchedulingBoundary(const MachineInstr &MI, |
1023 | const MachineBasicBlock *MBB, |
1024 | const MachineFunction &MF) const { |
1025 | |
1026 | if (MI.isTerminator() || MI.isPosition()) |
1027 | return true; |
1028 | |
1029 | |
1030 | if (MI.getOpcode() == TargetOpcode::INLINEASM_BR) |
1031 | return true; |
1032 | |
1033 | |
1034 | |
1035 | |
1036 | |
1037 | |
1038 | const TargetLowering &TLI = *MF.getSubtarget().getTargetLowering(); |
1039 | const TargetRegisterInfo *TRI = MF.getSubtarget().getRegisterInfo(); |
1040 | return MI.modifiesRegister(TLI.getStackPointerRegisterToSaveRestore(), TRI); |
1041 | } |
1042 | |
1043 | |
1044 | |
1045 | bool TargetInstrInfo::usePreRAHazardRecognizer() const { |
1046 | return !DisableHazardRecognizer; |
1047 | } |
1048 | |
1049 | |
1050 | ScheduleHazardRecognizer *TargetInstrInfo:: |
1051 | CreateTargetHazardRecognizer(const TargetSubtargetInfo *STI, |
1052 | const ScheduleDAG *DAG) const { |
1053 | |
1054 | return new ScheduleHazardRecognizer(); |
1055 | } |
1056 | |
1057 | |
1058 | ScheduleHazardRecognizer *TargetInstrInfo::CreateTargetMIHazardRecognizer( |
1059 | const InstrItineraryData *II, const ScheduleDAGMI *DAG) const { |
1060 | return new ScoreboardHazardRecognizer(II, DAG, "machine-scheduler"); |
1061 | } |
1062 | |
1063 | |
1064 | ScheduleHazardRecognizer *TargetInstrInfo:: |
1065 | CreateTargetPostRAHazardRecognizer(const InstrItineraryData *II, |
1066 | const ScheduleDAG *DAG) const { |
1067 | return new ScoreboardHazardRecognizer(II, DAG, "post-RA-sched"); |
1068 | } |
1069 | |
1070 | |
1071 | bool TargetInstrInfo::getMemOperandWithOffset( |
1072 | const MachineInstr &MI, const MachineOperand *&BaseOp, int64_t &Offset, |
1073 | bool &OffsetIsScalable, const TargetRegisterInfo *TRI) const { |
1074 | SmallVector<const MachineOperand *, 4> BaseOps; |
1075 | unsigned Width; |
1076 | if (!getMemOperandsWithOffsetWidth(MI, BaseOps, Offset, OffsetIsScalable, |
1077 | Width, TRI) || |
1078 | BaseOps.size() != 1) |
1079 | return false; |
1080 | BaseOp = BaseOps.front(); |
1081 | return true; |
1082 | } |
1083 | |
1084 | |
1085 | |
1086 | |
1087 | |
1088 | int |
1089 | TargetInstrInfo::getOperandLatency(const InstrItineraryData *ItinData, |
1090 | SDNode *DefNode, unsigned DefIdx, |
1091 | SDNode *UseNode, unsigned UseIdx) const { |
1092 | if (!ItinData || ItinData->isEmpty()) |
1093 | return -1; |
1094 | |
1095 | if (!DefNode->isMachineOpcode()) |
1096 | return -1; |
1097 | |
1098 | unsigned DefClass = get(DefNode->getMachineOpcode()).getSchedClass(); |
1099 | if (!UseNode->isMachineOpcode()) |
1100 | return ItinData->getOperandCycle(DefClass, DefIdx); |
1101 | unsigned UseClass = get(UseNode->getMachineOpcode()).getSchedClass(); |
1102 | return ItinData->getOperandLatency(DefClass, DefIdx, UseClass, UseIdx); |
1103 | } |
1104 | |
1105 | int TargetInstrInfo::getInstrLatency(const InstrItineraryData *ItinData, |
1106 | SDNode *N) const { |
1107 | if (!ItinData || ItinData->isEmpty()) |
1108 | return 1; |
1109 | |
1110 | if (!N->isMachineOpcode()) |
1111 | return 1; |
1112 | |
1113 | return ItinData->getStageLatency(get(N->getMachineOpcode()).getSchedClass()); |
1114 | } |
1115 | |
1116 | |
1117 | |
1118 | |
1119 | |
1120 | unsigned TargetInstrInfo::getNumMicroOps(const InstrItineraryData *ItinData, |
1121 | const MachineInstr &MI) const { |
1122 | if (!ItinData || ItinData->isEmpty()) |
1123 | return 1; |
1124 | |
1125 | unsigned Class = MI.getDesc().getSchedClass(); |
1126 | int UOps = ItinData->Itineraries[Class].NumMicroOps; |
1127 | if (UOps >= 0) |
1128 | return UOps; |
1129 | |
1130 | |
1131 | |
1132 | return 1; |
1133 | } |
1134 | |
1135 | |
1136 | unsigned TargetInstrInfo::defaultDefLatency(const MCSchedModel &SchedModel, |
1137 | const MachineInstr &DefMI) const { |
1138 | if (DefMI.isTransient()) |
1139 | return 0; |
1140 | if (DefMI.mayLoad()) |
1141 | return SchedModel.LoadLatency; |
1142 | if (isHighLatencyDef(DefMI.getOpcode())) |
1143 | return SchedModel.HighLatency; |
1144 | return 1; |
1145 | } |
1146 | |
1147 | unsigned TargetInstrInfo::getPredicationCost(const MachineInstr &) const { |
1148 | return 0; |
1149 | } |
1150 | |
1151 | unsigned TargetInstrInfo::getInstrLatency(const InstrItineraryData *ItinData, |
1152 | const MachineInstr &MI, |
1153 | unsigned *PredCost) const { |
1154 | |
1155 | |
1156 | if (!ItinData) |
1157 | return MI.mayLoad() ? 2 : 1; |
1158 | |
1159 | return ItinData->getStageLatency(MI.getDesc().getSchedClass()); |
1160 | } |
1161 | |
1162 | bool TargetInstrInfo::hasLowDefLatency(const TargetSchedModel &SchedModel, |
1163 | const MachineInstr &DefMI, |
1164 | unsigned DefIdx) const { |
1165 | const InstrItineraryData *ItinData = SchedModel.getInstrItineraries(); |
1166 | if (!ItinData || ItinData->isEmpty()) |
1167 | return false; |
1168 | |
1169 | unsigned DefClass = DefMI.getDesc().getSchedClass(); |
1170 | int DefCycle = ItinData->getOperandCycle(DefClass, DefIdx); |
1171 | return (DefCycle != -1 && DefCycle <= 1); |
1172 | } |
1173 | |
1174 | Optional<ParamLoadedValue> |
1175 | TargetInstrInfo::describeLoadedValue(const MachineInstr &MI, |
1176 | Register Reg) const { |
1177 | const MachineFunction *MF = MI.getMF(); |
1178 | const TargetRegisterInfo *TRI = MF->getSubtarget().getRegisterInfo(); |
1179 | DIExpression *Expr = DIExpression::get(MF->getFunction().getContext(), {}); |
1180 | int64_t Offset; |
1181 | bool OffsetIsScalable; |
1182 | |
1183 | |
1184 | |
1185 | assert(MF->getProperties().hasProperty( |
1186 | MachineFunctionProperties::Property::NoVRegs)); |
1187 | |
1188 | if (auto DestSrc = isCopyInstr(MI)) { |
1189 | Register DestReg = DestSrc->Destination->getReg(); |
1190 | |
1191 | |
1192 | |
1193 | |
1194 | |
1195 | |
1196 | if (Reg == DestReg) |
1197 | return ParamLoadedValue(*DestSrc->Source, Expr); |
1198 | |
1199 | |
1200 | |
1201 | assert(!TRI->isSuperOrSubRegisterEq(Reg, DestReg) && |
1202 | "TargetInstrInfo::describeLoadedValue can't describe super- or " |
1203 | "sub-regs for copy instructions"); |
1204 | return None; |
1205 | } else if (auto RegImm = isAddImmediate(MI, Reg)) { |
1206 | Register SrcReg = RegImm->Reg; |
1207 | Offset = RegImm->Imm; |
1208 | Expr = DIExpression::prepend(Expr, DIExpression::ApplyOffset, Offset); |
1209 | return ParamLoadedValue(MachineOperand::CreateReg(SrcReg, false), Expr); |
1210 | } else if (MI.hasOneMemOperand()) { |
1211 | |
1212 | |
1213 | |
1214 | const auto &TII = MF->getSubtarget().getInstrInfo(); |
1215 | const MachineFrameInfo &MFI = MF->getFrameInfo(); |
1216 | const MachineMemOperand *MMO = MI.memoperands()[0]; |
1217 | const PseudoSourceValue *PSV = MMO->getPseudoValue(); |
1218 | |
1219 | |
1220 | |
1221 | if (!PSV || PSV->mayAlias(&MFI)) |
1222 | return None; |
1223 | |
1224 | const MachineOperand *BaseOp; |
1225 | if (!TII->getMemOperandWithOffset(MI, BaseOp, Offset, OffsetIsScalable, |
1226 | TRI)) |
1227 | return None; |
1228 | |
1229 | |
1230 | if (OffsetIsScalable) |
1231 | return None; |
1232 | |
1233 | |
1234 | |
1235 | |
1236 | |
1237 | |
1238 | |
1239 | if (MI.getNumExplicitDefs() != 1) |
1240 | return None; |
1241 | |
1242 | |
1243 | |
1244 | SmallVector<uint64_t, 8> Ops; |
1245 | DIExpression::appendOffset(Ops, Offset); |
1246 | Ops.push_back(dwarf::DW_OP_deref_size); |
1247 | Ops.push_back(MMO->getSize()); |
1248 | Expr = DIExpression::prependOpcodes(Expr, Ops); |
1249 | return ParamLoadedValue(*BaseOp, Expr); |
1250 | } |
1251 | |
1252 | return None; |
1253 | } |
1254 | |
1255 | |
1256 | |
1257 | int TargetInstrInfo::getOperandLatency(const InstrItineraryData *ItinData, |
1258 | const MachineInstr &DefMI, |
1259 | unsigned DefIdx, |
1260 | const MachineInstr &UseMI, |
1261 | unsigned UseIdx) const { |
1262 | unsigned DefClass = DefMI.getDesc().getSchedClass(); |
1263 | unsigned UseClass = UseMI.getDesc().getSchedClass(); |
1264 | return ItinData->getOperandLatency(DefClass, DefIdx, UseClass, UseIdx); |
1265 | } |
1266 | |
1267 | |
1268 | |
1269 | int TargetInstrInfo::computeDefOperandLatency( |
1270 | const InstrItineraryData *ItinData, const MachineInstr &DefMI) const { |
1271 | |
1272 | |
1273 | if (!ItinData) |
1274 | return getInstrLatency(ItinData, DefMI); |
1275 | |
1276 | if(ItinData->isEmpty()) |
1277 | return defaultDefLatency(ItinData->SchedModel, DefMI); |
1278 | |
1279 | |
1280 | return -1; |
1281 | } |
1282 | |
1283 | bool TargetInstrInfo::getRegSequenceInputs( |
1284 | const MachineInstr &MI, unsigned DefIdx, |
1285 | SmallVectorImpl<RegSubRegPairAndIdx> &InputRegs) const { |
1286 | assert((MI.isRegSequence() || |
1287 | MI.isRegSequenceLike()) && "Instruction do not have the proper type"); |
1288 | |
1289 | if (!MI.isRegSequence()) |
1290 | return getRegSequenceLikeInputs(MI, DefIdx, InputRegs); |
1291 | |
1292 | |
1293 | |
1294 | assert(DefIdx == 0 && "REG_SEQUENCE only has one def"); |
1295 | for (unsigned OpIdx = 1, EndOpIdx = MI.getNumOperands(); OpIdx != EndOpIdx; |
1296 | OpIdx += 2) { |
1297 | const MachineOperand &MOReg = MI.getOperand(OpIdx); |
1298 | if (MOReg.isUndef()) |
1299 | continue; |
1300 | const MachineOperand &MOSubIdx = MI.getOperand(OpIdx + 1); |
1301 | assert(MOSubIdx.isImm() && |
1302 | "One of the subindex of the reg_sequence is not an immediate"); |
1303 | |
1304 | InputRegs.push_back(RegSubRegPairAndIdx(MOReg.getReg(), MOReg.getSubReg(), |
1305 | (unsigned)MOSubIdx.getImm())); |
1306 | } |
1307 | return true; |
1308 | } |
1309 | |
1310 | bool TargetInstrInfo::getExtractSubregInputs( |
1311 | const MachineInstr &MI, unsigned DefIdx, |
1312 | RegSubRegPairAndIdx &InputReg) const { |
1313 | assert((MI.isExtractSubreg() || |
1314 | MI.isExtractSubregLike()) && "Instruction do not have the proper type"); |
1315 | |
1316 | if (!MI.isExtractSubreg()) |
1317 | return getExtractSubregLikeInputs(MI, DefIdx, InputReg); |
1318 | |
1319 | |
1320 | |
1321 | assert(DefIdx == 0 && "EXTRACT_SUBREG only has one def"); |
1322 | const MachineOperand &MOReg = MI.getOperand(1); |
1323 | if (MOReg.isUndef()) |
1324 | return false; |
1325 | const MachineOperand &MOSubIdx = MI.getOperand(2); |
1326 | assert(MOSubIdx.isImm() && |
1327 | "The subindex of the extract_subreg is not an immediate"); |
1328 | |
1329 | InputReg.Reg = MOReg.getReg(); |
1330 | InputReg.SubReg = MOReg.getSubReg(); |
1331 | InputReg.SubIdx = (unsigned)MOSubIdx.getImm(); |
1332 | return true; |
1333 | } |
1334 | |
1335 | bool TargetInstrInfo::getInsertSubregInputs( |
1336 | const MachineInstr &MI, unsigned DefIdx, |
1337 | RegSubRegPair &BaseReg, RegSubRegPairAndIdx &InsertedReg) const { |
1338 | assert((MI.isInsertSubreg() || |
1339 | MI.isInsertSubregLike()) && "Instruction do not have the proper type"); |
1340 | |
1341 | if (!MI.isInsertSubreg()) |
1342 | return getInsertSubregLikeInputs(MI, DefIdx, BaseReg, InsertedReg); |
1343 | |
1344 | |
1345 | |
1346 | assert(DefIdx == 0 && "INSERT_SUBREG only has one def"); |
1347 | const MachineOperand &MOBaseReg = MI.getOperand(1); |
1348 | const MachineOperand &MOInsertedReg = MI.getOperand(2); |
1349 | if (MOInsertedReg.isUndef()) |
1350 | return false; |
1351 | const MachineOperand &MOSubIdx = MI.getOperand(3); |
1352 | assert(MOSubIdx.isImm() && |
1353 | "One of the subindex of the reg_sequence is not an immediate"); |
1354 | BaseReg.Reg = MOBaseReg.getReg(); |
1355 | BaseReg.SubReg = MOBaseReg.getSubReg(); |
1356 | |
1357 | InsertedReg.Reg = MOInsertedReg.getReg(); |
1358 | InsertedReg.SubReg = MOInsertedReg.getSubReg(); |
1359 | InsertedReg.SubIdx = (unsigned)MOSubIdx.getImm(); |
1360 | return true; |
1361 | } |
1362 | |
1363 | |
1364 | std::string TargetInstrInfo::createMIROperandComment( |
1365 | const MachineInstr &MI, const MachineOperand &Op, unsigned OpIdx, |
1366 | const TargetRegisterInfo *TRI) const { |
1367 | |
1368 | if (!MI.isInlineAsm()) |
1369 | return ""; |
1370 | |
1371 | std::string Flags; |
1372 | raw_string_ostream OS(Flags); |
1373 | |
1374 | if (OpIdx == InlineAsm::MIOp_ExtraInfo) { |
1375 | |
1376 | unsigned ExtraInfo = Op.getImm(); |
1377 | bool First = true; |
1378 | for (StringRef Info : InlineAsm::getExtraInfoNames(ExtraInfo)) { |
1379 | if (!First) |
1380 | OS << " "; |
1381 | First = false; |
1382 | OS << Info; |
1383 | } |
1384 | |
1385 | return OS.str(); |
1386 | } |
1387 | |
1388 | int FlagIdx = MI.findInlineAsmFlagIdx(OpIdx); |
1389 | if (FlagIdx < 0 || (unsigned)FlagIdx != OpIdx) |
1390 | return ""; |
1391 | |
1392 | assert(Op.isImm() && "Expected flag operand to be an immediate"); |
1393 | |
1394 | unsigned Flag = Op.getImm(); |
1395 | unsigned Kind = InlineAsm::getKind(Flag); |
1396 | OS << InlineAsm::getKindName(Kind); |
1397 | |
1398 | unsigned RCID = 0; |
1399 | if (!InlineAsm::isImmKind(Flag) && !InlineAsm::isMemKind(Flag) && |
1400 | InlineAsm::hasRegClassConstraint(Flag, RCID)) { |
1401 | if (TRI) { |
1402 | OS << ':' << TRI->getRegClassName(TRI->getRegClass(RCID)); |
1403 | } else |
1404 | OS << ":RC" << RCID; |
1405 | } |
1406 | |
1407 | if (InlineAsm::isMemKind(Flag)) { |
1408 | unsigned MCID = InlineAsm::getMemoryConstraintID(Flag); |
1409 | OS << ":" << InlineAsm::getMemConstraintName(MCID); |
1410 | } |
1411 | |
1412 | unsigned TiedTo = 0; |
1413 | if (InlineAsm::isUseOperandTiedToDef(Flag, TiedTo)) |
1414 | OS << " tiedto:$" << TiedTo; |
1415 | |
1416 | return OS.str(); |
1417 | } |
1418 | |
1419 | TargetInstrInfo::PipelinerLoopInfo::~PipelinerLoopInfo() {} |