clang -cc1 -cc1 -triple amd64-unknown-openbsd7.0 -analyze -disable-free -disable-llvm-verifier -discard-value-names -main-file-name X86InstrInfo.cpp -analyzer-store=region -analyzer-opt-analyze-nested-blocks -analyzer-checker=core -analyzer-checker=apiModeling -analyzer-checker=unix -analyzer-checker=deadcode -analyzer-checker=cplusplus -analyzer-checker=security.insecureAPI.UncheckedReturn -analyzer-checker=security.insecureAPI.getpw -analyzer-checker=security.insecureAPI.gets -analyzer-checker=security.insecureAPI.mktemp -analyzer-checker=security.insecureAPI.mkstemp -analyzer-checker=security.insecureAPI.vfork -analyzer-checker=nullability.NullPassedToNonnull -analyzer-checker=nullability.NullReturnedFromNonnull -analyzer-output plist -w -setup-static-analyzer -mrelocation-model pic -pic-level 1 -fhalf-no-semantic-interposition -mframe-pointer=all -relaxed-aliasing -fno-rounding-math -mconstructor-aliases -munwind-tables -target-cpu x86-64 -tune-cpu generic -debugger-tuning=gdb -fcoverage-compilation-dir=/usr/src/gnu/usr.bin/clang/libLLVM/obj -resource-dir /usr/local/lib/clang/13.0.0 -I /usr/src/gnu/usr.bin/clang/libLLVM/../../../llvm/llvm/include/llvm/Transforms -I /usr/src/gnu/usr.bin/clang/libLLVM/obj/../include/llvm/AMDGPU -I /usr/src/gnu/usr.bin/clang/libLLVM/../../../llvm/llvm/lib/Target/AMDGPU -I /usr/src/gnu/usr.bin/clang/libLLVM/obj/../include/llvm/AMDGPU -I /usr/src/gnu/usr.bin/clang/libLLVM/../../../llvm/llvm/lib/Target/AMDGPU -I /usr/src/gnu/usr.bin/clang/libLLVM/obj/../include/llvm/AMDGPU -I /usr/src/gnu/usr.bin/clang/libLLVM/../../../llvm/llvm/lib/Target/AMDGPU -I /usr/src/gnu/usr.bin/clang/libLLVM/obj/../include/llvm/AMDGPU -I /usr/src/gnu/usr.bin/clang/libLLVM/../../../llvm/llvm/lib/Target/AMDGPU -I /usr/src/gnu/usr.bin/clang/libLLVM/obj/../include/llvm/AMDGPU -I /usr/src/gnu/usr.bin/clang/libLLVM/../../../llvm/llvm/lib/Target/AMDGPU -I /usr/src/gnu/usr.bin/clang/libLLVM/obj/../include/llvm/AMDGPU -I /usr/src/gnu/usr.bin/clang/libLLVM/../../../llvm/llvm/lib/Target/AMDGPU -I /usr/src/gnu/usr.bin/clang/libLLVM/../../../llvm/llvm/include/llvm/Analysis -I /usr/src/gnu/usr.bin/clang/libLLVM/../../../llvm/llvm/include/llvm/ASMParser -I /usr/src/gnu/usr.bin/clang/libLLVM/../../../llvm/llvm/include/llvm/BinaryFormat -I /usr/src/gnu/usr.bin/clang/libLLVM/../../../llvm/llvm/include/llvm/Bitcode -I /usr/src/gnu/usr.bin/clang/libLLVM/../../../llvm/llvm/include/llvm/Bitcode -I /usr/src/gnu/usr.bin/clang/libLLVM/../../../llvm/llvm/include/llvm/Bitstream -I /usr/src/gnu/usr.bin/clang/libLLVM/../../../llvm/llvm/include/llvm/Transforms -I /include/llvm/CodeGen -I /include/llvm/CodeGen/PBQP -I /usr/src/gnu/usr.bin/clang/libLLVM/obj/../include/llvm/IR -I /usr/src/gnu/usr.bin/clang/libLLVM/../../../llvm/llvm/include/llvm/IR -I /usr/src/gnu/usr.bin/clang/libLLVM/../../../llvm/llvm/include/llvm/Transforms -I /usr/src/gnu/usr.bin/clang/libLLVM/../../../llvm/llvm/include/llvm/Transforms/Coroutines -I /usr/src/gnu/usr.bin/clang/libLLVM/../../../llvm/llvm/include/llvm/ProfileData/Coverage -I /usr/src/gnu/usr.bin/clang/libLLVM/../../../llvm/llvm/include/llvm/DebugInfo/CodeView -I /usr/src/gnu/usr.bin/clang/libLLVM/../../../llvm/llvm/include/llvm/DebugInfo/DWARF -I /usr/src/gnu/usr.bin/clang/libLLVM/../../../llvm/llvm/include/llvm/DebugInfo -I /usr/src/gnu/usr.bin/clang/libLLVM/../../../llvm/llvm/include/llvm/DebugInfo/MSF -I /usr/src/gnu/usr.bin/clang/libLLVM/../../../llvm/llvm/include/llvm/DebugInfo/PDB -I /usr/src/gnu/usr.bin/clang/libLLVM/../../../llvm/llvm/include/llvm/Demangle -I /usr/src/gnu/usr.bin/clang/libLLVM/../../../llvm/llvm/include/llvm/ExecutionEngine -I /usr/src/gnu/usr.bin/clang/libLLVM/../../../llvm/llvm/include/llvm/ExecutionEngine/JITLink -I /usr/src/gnu/usr.bin/clang/libLLVM/../../../llvm/llvm/include/llvm/ExecutionEngine/Orc -I /usr/src/gnu/usr.bin/clang/libLLVM/../../../llvm/llvm/include/llvm/Frontend -I /usr/src/gnu/usr.bin/clang/libLLVM/../../../llvm/llvm/include/llvm/Frontend/OpenACC -I /usr/src/gnu/usr.bin/clang/libLLVM/../../../llvm/llvm/include/llvm/Frontend -I /usr/src/gnu/usr.bin/clang/libLLVM/../../../llvm/llvm/include/llvm/Frontend/OpenMP -I /include/llvm/CodeGen/GlobalISel -I /usr/src/gnu/usr.bin/clang/libLLVM/../../../llvm/llvm/include/llvm/IRReader -I /usr/src/gnu/usr.bin/clang/libLLVM/../../../llvm/llvm/include/llvm/Transforms -I /usr/src/gnu/usr.bin/clang/libLLVM/../../../llvm/llvm/include/llvm/Transforms/InstCombine -I /usr/src/gnu/usr.bin/clang/libLLVM/obj/../include/llvm/Transforms/InstCombine -I /usr/src/gnu/usr.bin/clang/libLLVM/../../../llvm/llvm/include/llvm/Transforms -I /usr/src/gnu/usr.bin/clang/libLLVM/../../../llvm/llvm/include/llvm/LTO -I /usr/src/gnu/usr.bin/clang/libLLVM/../../../llvm/llvm/include/llvm/Linker -I /usr/src/gnu/usr.bin/clang/libLLVM/../../../llvm/llvm/include/llvm/MC -I /usr/src/gnu/usr.bin/clang/libLLVM/../../../llvm/llvm/include/llvm/MC/MCParser -I /include/llvm/CodeGen/MIRParser -I /usr/src/gnu/usr.bin/clang/libLLVM/../../../llvm/llvm/include/llvm/Transforms -I /usr/src/gnu/usr.bin/clang/libLLVM/../../../llvm/llvm/include/llvm/Object -I /usr/src/gnu/usr.bin/clang/libLLVM/../../../llvm/llvm/include/llvm/Option -I /usr/src/gnu/usr.bin/clang/libLLVM/../../../llvm/llvm/include/llvm/Passes -I /usr/src/gnu/usr.bin/clang/libLLVM/../../../llvm/llvm/include/llvm/ -I /usr/src/gnu/usr.bin/clang/libLLVM/../../../llvm/llvm/include/llvm/ProfileData -I /usr/src/gnu/usr.bin/clang/libLLVM/../../../llvm/llvm/include/llvm/Transforms -I /usr/src/gnu/usr.bin/clang/libLLVM/../../../llvm/llvm/include/llvm/Transforms/Scalar -I /usr/src/gnu/usr.bin/clang/libLLVM/../../../llvm/llvm/include/llvm/ADT -I /usr/src/gnu/usr.bin/clang/libLLVM/../../../llvm/llvm/include/llvm/Support -I /usr/src/gnu/usr.bin/clang/libLLVM/../../../llvm/llvm/include/llvm/DebugInfo/Symbolize -I /usr/src/gnu/usr.bin/clang/libLLVM/../../../llvm/llvm/include/llvm/Target -I /usr/src/gnu/usr.bin/clang/libLLVM/../../../llvm/llvm/include/llvm/Transforms -I /usr/src/gnu/usr.bin/clang/libLLVM/../../../llvm/llvm/include/llvm/Transforms/Utils -I /usr/src/gnu/usr.bin/clang/libLLVM/../../../llvm/llvm/include/llvm/Transforms -I /usr/src/gnu/usr.bin/clang/libLLVM/../../../llvm/llvm/include/llvm/Transforms/Vectorize -I /usr/src/gnu/usr.bin/clang/libLLVM/obj/../include/llvm/X86 -I /usr/src/gnu/usr.bin/clang/libLLVM/../../../llvm/llvm/lib/Target/X86 -I /usr/src/gnu/usr.bin/clang/libLLVM/obj/../include/llvm/X86 -I /usr/src/gnu/usr.bin/clang/libLLVM/../../../llvm/llvm/lib/Target/X86 -I /usr/src/gnu/usr.bin/clang/libLLVM/obj/../include/llvm/X86 -I /usr/src/gnu/usr.bin/clang/libLLVM/../../../llvm/llvm/lib/Target/X86 -I /usr/src/gnu/usr.bin/clang/libLLVM/obj/../include/llvm/X86 -I /usr/src/gnu/usr.bin/clang/libLLVM/../../../llvm/llvm/lib/Target/X86 -I /usr/src/gnu/usr.bin/clang/libLLVM/obj/../include/llvm/X86 -I /usr/src/gnu/usr.bin/clang/libLLVM/../../../llvm/llvm/lib/Target/X86 -I /usr/src/gnu/usr.bin/clang/libLLVM/../../../llvm/llvm/include/llvm/Transforms -I /usr/src/gnu/usr.bin/clang/libLLVM/../../../llvm/llvm/include/llvm/Transforms/IPO -I /usr/src/gnu/usr.bin/clang/libLLVM/../../../llvm/llvm/include -I /usr/src/gnu/usr.bin/clang/libLLVM/../include -I /usr/src/gnu/usr.bin/clang/libLLVM/obj -I /usr/src/gnu/usr.bin/clang/libLLVM/obj/../include -D NDEBUG -D __STDC_LIMIT_MACROS -D __STDC_CONSTANT_MACROS -D __STDC_FORMAT_MACROS -D LLVM_PREFIX="/usr" -D PIC -internal-isystem /usr/include/c++/v1 -internal-isystem /usr/local/lib/clang/13.0.0/include -internal-externc-isystem /usr/include -O2 -Wno-unused-parameter -Wwrite-strings -Wno-missing-field-initializers -Wno-long-long -Wno-comment -std=c++14 -fdeprecated-macro -fdebug-compilation-dir=/usr/src/gnu/usr.bin/clang/libLLVM/obj -ferror-limit 19 -fvisibility-inlines-hidden -fwrapv -D_RET_PROTECTOR -ret-protector -fno-rtti -fgnuc-version=4.2.1 -vectorize-loops -vectorize-slp -fno-builtin-malloc -fno-builtin-calloc -fno-builtin-realloc -fno-builtin-valloc -fno-builtin-free -fno-builtin-strdup -fno-builtin-strndup -analyzer-output=html -faddrsig -D__GCC_HAVE_DWARF2_CFI_ASM=1 -o /home/ben/Projects/vmm/scan-build/2022-01-12-194120-40624-1 -x c++ /usr/src/gnu/usr.bin/clang/libLLVM/../../../llvm/llvm/lib/Target/X86/X86InstrInfo.cpp
1 | |
2 | |
3 | |
4 | |
5 | |
6 | |
7 | |
8 | |
9 | |
10 | |
11 | |
12 | |
13 | #include "X86InstrInfo.h" |
14 | #include "X86.h" |
15 | #include "X86InstrBuilder.h" |
16 | #include "X86InstrFoldTables.h" |
17 | #include "X86MachineFunctionInfo.h" |
18 | #include "X86Subtarget.h" |
19 | #include "X86TargetMachine.h" |
20 | #include "llvm/ADT/STLExtras.h" |
21 | #include "llvm/ADT/Sequence.h" |
22 | #include "llvm/CodeGen/LivePhysRegs.h" |
23 | #include "llvm/CodeGen/LiveVariables.h" |
24 | #include "llvm/CodeGen/MachineConstantPool.h" |
25 | #include "llvm/CodeGen/MachineDominators.h" |
26 | #include "llvm/CodeGen/MachineFrameInfo.h" |
27 | #include "llvm/CodeGen/MachineInstrBuilder.h" |
28 | #include "llvm/CodeGen/MachineModuleInfo.h" |
29 | #include "llvm/CodeGen/MachineRegisterInfo.h" |
30 | #include "llvm/CodeGen/StackMaps.h" |
31 | #include "llvm/IR/DebugInfoMetadata.h" |
32 | #include "llvm/IR/DerivedTypes.h" |
33 | #include "llvm/IR/Function.h" |
34 | #include "llvm/MC/MCAsmInfo.h" |
35 | #include "llvm/MC/MCExpr.h" |
36 | #include "llvm/MC/MCInst.h" |
37 | #include "llvm/Support/CommandLine.h" |
38 | #include "llvm/Support/Debug.h" |
39 | #include "llvm/Support/ErrorHandling.h" |
40 | #include "llvm/Support/raw_ostream.h" |
41 | #include "llvm/Target/TargetOptions.h" |
42 | |
43 | using namespace llvm; |
44 | |
45 | #define DEBUG_TYPE "x86-instr-info" |
46 | |
47 | #define GET_INSTRINFO_CTOR_DTOR |
48 | #include "X86GenInstrInfo.inc" |
49 | |
50 | static cl::opt<bool> |
51 | NoFusing("disable-spill-fusing", |
52 | cl::desc("Disable fusing of spill code into instructions"), |
53 | cl::Hidden); |
54 | static cl::opt<bool> |
55 | PrintFailedFusing("print-failed-fuse-candidates", |
56 | cl::desc("Print instructions that the allocator wants to" |
57 | " fuse, but the X86 backend currently can't"), |
58 | cl::Hidden); |
59 | static cl::opt<bool> |
60 | ReMatPICStubLoad("remat-pic-stub-load", |
61 | cl::desc("Re-materialize load from stub in PIC mode"), |
62 | cl::init(false), cl::Hidden); |
63 | static cl::opt<unsigned> |
64 | PartialRegUpdateClearance("partial-reg-update-clearance", |
65 | cl::desc("Clearance between two register writes " |
66 | "for inserting XOR to avoid partial " |
67 | "register update"), |
68 | cl::init(64), cl::Hidden); |
69 | static cl::opt<unsigned> |
70 | UndefRegClearance("undef-reg-clearance", |
71 | cl::desc("How many idle instructions we would like before " |
72 | "certain undef register reads"), |
73 | cl::init(128), cl::Hidden); |
74 | |
75 | |
76 | |
77 | void X86InstrInfo::anchor() {} |
78 | |
79 | X86InstrInfo::X86InstrInfo(X86Subtarget &STI) |
80 | : X86GenInstrInfo((STI.isTarget64BitLP64() ? X86::ADJCALLSTACKDOWN64 |
81 | : X86::ADJCALLSTACKDOWN32), |
82 | (STI.isTarget64BitLP64() ? X86::ADJCALLSTACKUP64 |
83 | : X86::ADJCALLSTACKUP32), |
84 | X86::CATCHRET, |
85 | (STI.is64Bit() ? X86::RETQ : X86::RETL)), |
86 | Subtarget(STI), RI(STI.getTargetTriple()) { |
87 | } |
88 | |
89 | bool |
90 | X86InstrInfo::isCoalescableExtInstr(const MachineInstr &MI, |
91 | Register &SrcReg, Register &DstReg, |
92 | unsigned &SubIdx) const { |
93 | switch (MI.getOpcode()) { |
94 | default: break; |
95 | case X86::MOVSX16rr8: |
96 | case X86::MOVZX16rr8: |
97 | case X86::MOVSX32rr8: |
98 | case X86::MOVZX32rr8: |
99 | case X86::MOVSX64rr8: |
100 | if (!Subtarget.is64Bit()) |
101 | |
102 | |
103 | return false; |
104 | LLVM_FALLTHROUGH; |
105 | case X86::MOVSX32rr16: |
106 | case X86::MOVZX32rr16: |
107 | case X86::MOVSX64rr16: |
108 | case X86::MOVSX64rr32: { |
109 | if (MI.getOperand(0).getSubReg() || MI.getOperand(1).getSubReg()) |
110 | |
111 | return false; |
112 | SrcReg = MI.getOperand(1).getReg(); |
113 | DstReg = MI.getOperand(0).getReg(); |
114 | switch (MI.getOpcode()) { |
115 | default: llvm_unreachable("Unreachable!"); |
116 | case X86::MOVSX16rr8: |
117 | case X86::MOVZX16rr8: |
118 | case X86::MOVSX32rr8: |
119 | case X86::MOVZX32rr8: |
120 | case X86::MOVSX64rr8: |
121 | SubIdx = X86::sub_8bit; |
122 | break; |
123 | case X86::MOVSX32rr16: |
124 | case X86::MOVZX32rr16: |
125 | case X86::MOVSX64rr16: |
126 | SubIdx = X86::sub_16bit; |
127 | break; |
128 | case X86::MOVSX64rr32: |
129 | SubIdx = X86::sub_32bit; |
130 | break; |
131 | } |
132 | return true; |
133 | } |
134 | } |
135 | return false; |
136 | } |
137 | |
138 | bool X86InstrInfo::isDataInvariant(MachineInstr &MI) { |
139 | switch (MI.getOpcode()) { |
140 | default: |
141 | |
142 | return false; |
143 | |
144 | |
145 | |
146 | case TargetOpcode::COPY: |
147 | case TargetOpcode::INSERT_SUBREG: |
148 | case TargetOpcode::SUBREG_TO_REG: |
149 | return true; |
150 | |
151 | |
152 | |
153 | |
154 | case X86::IMUL16rr: |
155 | case X86::IMUL16rri8: |
156 | case X86::IMUL16rri: |
157 | case X86::IMUL32rr: |
158 | case X86::IMUL32rri8: |
159 | case X86::IMUL32rri: |
160 | case X86::IMUL64rr: |
161 | case X86::IMUL64rri32: |
162 | case X86::IMUL64rri8: |
163 | |
164 | |
165 | |
166 | |
167 | |
168 | case X86::BSF16rr: |
169 | case X86::BSF32rr: |
170 | case X86::BSF64rr: |
171 | case X86::BSR16rr: |
172 | case X86::BSR32rr: |
173 | case X86::BSR64rr: |
174 | case X86::LZCNT16rr: |
175 | case X86::LZCNT32rr: |
176 | case X86::LZCNT64rr: |
177 | case X86::POPCNT16rr: |
178 | case X86::POPCNT32rr: |
179 | case X86::POPCNT64rr: |
180 | case X86::TZCNT16rr: |
181 | case X86::TZCNT32rr: |
182 | case X86::TZCNT64rr: |
183 | |
184 | |
185 | |
186 | |
187 | case X86::BLCFILL32rr: |
188 | case X86::BLCFILL64rr: |
189 | case X86::BLCI32rr: |
190 | case X86::BLCI64rr: |
191 | case X86::BLCIC32rr: |
192 | case X86::BLCIC64rr: |
193 | case X86::BLCMSK32rr: |
194 | case X86::BLCMSK64rr: |
195 | case X86::BLCS32rr: |
196 | case X86::BLCS64rr: |
197 | case X86::BLSFILL32rr: |
198 | case X86::BLSFILL64rr: |
199 | case X86::BLSI32rr: |
200 | case X86::BLSI64rr: |
201 | case X86::BLSIC32rr: |
202 | case X86::BLSIC64rr: |
203 | case X86::BLSMSK32rr: |
204 | case X86::BLSMSK64rr: |
205 | case X86::BLSR32rr: |
206 | case X86::BLSR64rr: |
207 | case X86::TZMSK32rr: |
208 | case X86::TZMSK64rr: |
209 | |
210 | |
211 | |
212 | case X86::BEXTR32rr: |
213 | case X86::BEXTR64rr: |
214 | case X86::BEXTRI32ri: |
215 | case X86::BEXTRI64ri: |
216 | case X86::BZHI32rr: |
217 | case X86::BZHI64rr: |
218 | |
219 | |
220 | case X86::ROL8r1: |
221 | case X86::ROL16r1: |
222 | case X86::ROL32r1: |
223 | case X86::ROL64r1: |
224 | case X86::ROL8rCL: |
225 | case X86::ROL16rCL: |
226 | case X86::ROL32rCL: |
227 | case X86::ROL64rCL: |
228 | case X86::ROL8ri: |
229 | case X86::ROL16ri: |
230 | case X86::ROL32ri: |
231 | case X86::ROL64ri: |
232 | case X86::ROR8r1: |
233 | case X86::ROR16r1: |
234 | case X86::ROR32r1: |
235 | case X86::ROR64r1: |
236 | case X86::ROR8rCL: |
237 | case X86::ROR16rCL: |
238 | case X86::ROR32rCL: |
239 | case X86::ROR64rCL: |
240 | case X86::ROR8ri: |
241 | case X86::ROR16ri: |
242 | case X86::ROR32ri: |
243 | case X86::ROR64ri: |
244 | case X86::SAR8r1: |
245 | case X86::SAR16r1: |
246 | case X86::SAR32r1: |
247 | case X86::SAR64r1: |
248 | case X86::SAR8rCL: |
249 | case X86::SAR16rCL: |
250 | case X86::SAR32rCL: |
251 | case X86::SAR64rCL: |
252 | case X86::SAR8ri: |
253 | case X86::SAR16ri: |
254 | case X86::SAR32ri: |
255 | case X86::SAR64ri: |
256 | case X86::SHL8r1: |
257 | case X86::SHL16r1: |
258 | case X86::SHL32r1: |
259 | case X86::SHL64r1: |
260 | case X86::SHL8rCL: |
261 | case X86::SHL16rCL: |
262 | case X86::SHL32rCL: |
263 | case X86::SHL64rCL: |
264 | case X86::SHL8ri: |
265 | case X86::SHL16ri: |
266 | case X86::SHL32ri: |
267 | case X86::SHL64ri: |
268 | case X86::SHR8r1: |
269 | case X86::SHR16r1: |
270 | case X86::SHR32r1: |
271 | case X86::SHR64r1: |
272 | case X86::SHR8rCL: |
273 | case X86::SHR16rCL: |
274 | case X86::SHR32rCL: |
275 | case X86::SHR64rCL: |
276 | case X86::SHR8ri: |
277 | case X86::SHR16ri: |
278 | case X86::SHR32ri: |
279 | case X86::SHR64ri: |
280 | case X86::SHLD16rrCL: |
281 | case X86::SHLD32rrCL: |
282 | case X86::SHLD64rrCL: |
283 | case X86::SHLD16rri8: |
284 | case X86::SHLD32rri8: |
285 | case X86::SHLD64rri8: |
286 | case X86::SHRD16rrCL: |
287 | case X86::SHRD32rrCL: |
288 | case X86::SHRD64rrCL: |
289 | case X86::SHRD16rri8: |
290 | case X86::SHRD32rri8: |
291 | case X86::SHRD64rri8: |
292 | |
293 | |
294 | case X86::ADC8rr: |
295 | case X86::ADC8ri: |
296 | case X86::ADC16rr: |
297 | case X86::ADC16ri: |
298 | case X86::ADC16ri8: |
299 | case X86::ADC32rr: |
300 | case X86::ADC32ri: |
301 | case X86::ADC32ri8: |
302 | case X86::ADC64rr: |
303 | case X86::ADC64ri8: |
304 | case X86::ADC64ri32: |
305 | case X86::ADD8rr: |
306 | case X86::ADD8ri: |
307 | case X86::ADD16rr: |
308 | case X86::ADD16ri: |
309 | case X86::ADD16ri8: |
310 | case X86::ADD32rr: |
311 | case X86::ADD32ri: |
312 | case X86::ADD32ri8: |
313 | case X86::ADD64rr: |
314 | case X86::ADD64ri8: |
315 | case X86::ADD64ri32: |
316 | case X86::AND8rr: |
317 | case X86::AND8ri: |
318 | case X86::AND16rr: |
319 | case X86::AND16ri: |
320 | case X86::AND16ri8: |
321 | case X86::AND32rr: |
322 | case X86::AND32ri: |
323 | case X86::AND32ri8: |
324 | case X86::AND64rr: |
325 | case X86::AND64ri8: |
326 | case X86::AND64ri32: |
327 | case X86::OR8rr: |
328 | case X86::OR8ri: |
329 | case X86::OR16rr: |
330 | case X86::OR16ri: |
331 | case X86::OR16ri8: |
332 | case X86::OR32rr: |
333 | case X86::OR32ri: |
334 | case X86::OR32ri8: |
335 | case X86::OR64rr: |
336 | case X86::OR64ri8: |
337 | case X86::OR64ri32: |
338 | case X86::SBB8rr: |
339 | case X86::SBB8ri: |
340 | case X86::SBB16rr: |
341 | case X86::SBB16ri: |
342 | case X86::SBB16ri8: |
343 | case X86::SBB32rr: |
344 | case X86::SBB32ri: |
345 | case X86::SBB32ri8: |
346 | case X86::SBB64rr: |
347 | case X86::SBB64ri8: |
348 | case X86::SBB64ri32: |
349 | case X86::SUB8rr: |
350 | case X86::SUB8ri: |
351 | case X86::SUB16rr: |
352 | case X86::SUB16ri: |
353 | case X86::SUB16ri8: |
354 | case X86::SUB32rr: |
355 | case X86::SUB32ri: |
356 | case X86::SUB32ri8: |
357 | case X86::SUB64rr: |
358 | case X86::SUB64ri8: |
359 | case X86::SUB64ri32: |
360 | case X86::XOR8rr: |
361 | case X86::XOR8ri: |
362 | case X86::XOR16rr: |
363 | case X86::XOR16ri: |
364 | case X86::XOR16ri8: |
365 | case X86::XOR32rr: |
366 | case X86::XOR32ri: |
367 | case X86::XOR32ri8: |
368 | case X86::XOR64rr: |
369 | case X86::XOR64ri8: |
370 | case X86::XOR64ri32: |
371 | |
372 | case X86::ADCX32rr: |
373 | case X86::ADCX64rr: |
374 | case X86::ADOX32rr: |
375 | case X86::ADOX64rr: |
376 | case X86::ANDN32rr: |
377 | case X86::ANDN64rr: |
378 | |
379 | case X86::DEC8r: |
380 | case X86::DEC16r: |
381 | case X86::DEC32r: |
382 | case X86::DEC64r: |
383 | case X86::INC8r: |
384 | case X86::INC16r: |
385 | case X86::INC32r: |
386 | case X86::INC64r: |
387 | case X86::NEG8r: |
388 | case X86::NEG16r: |
389 | case X86::NEG32r: |
390 | case X86::NEG64r: |
391 | |
392 | |
393 | case X86::NOT8r: |
394 | case X86::NOT16r: |
395 | case X86::NOT32r: |
396 | case X86::NOT64r: |
397 | |
398 | |
399 | |
400 | |
401 | case X86::MOVSX16rr8: |
402 | case X86::MOVSX32rr8: |
403 | case X86::MOVSX32rr16: |
404 | case X86::MOVSX64rr8: |
405 | case X86::MOVSX64rr16: |
406 | case X86::MOVSX64rr32: |
407 | case X86::MOVZX16rr8: |
408 | case X86::MOVZX32rr8: |
409 | case X86::MOVZX32rr16: |
410 | case X86::MOVZX64rr8: |
411 | case X86::MOVZX64rr16: |
412 | case X86::MOV32rr: |
413 | |
414 | |
415 | case X86::RORX32ri: |
416 | case X86::RORX64ri: |
417 | case X86::SARX32rr: |
418 | case X86::SARX64rr: |
419 | case X86::SHLX32rr: |
420 | case X86::SHLX64rr: |
421 | case X86::SHRX32rr: |
422 | case X86::SHRX64rr: |
423 | |
424 | |
425 | case X86::LEA16r: |
426 | case X86::LEA32r: |
427 | case X86::LEA64_32r: |
428 | case X86::LEA64r: |
429 | return true; |
430 | } |
431 | } |
432 | |
433 | bool X86InstrInfo::isDataInvariantLoad(MachineInstr &MI) { |
434 | switch (MI.getOpcode()) { |
435 | default: |
436 | |
437 | return false; |
438 | |
439 | |
440 | |
441 | |
442 | case X86::IMUL16rm: |
443 | case X86::IMUL16rmi8: |
444 | case X86::IMUL16rmi: |
445 | case X86::IMUL32rm: |
446 | case X86::IMUL32rmi8: |
447 | case X86::IMUL32rmi: |
448 | case X86::IMUL64rm: |
449 | case X86::IMUL64rmi32: |
450 | case X86::IMUL64rmi8: |
451 | |
452 | |
453 | |
454 | |
455 | |
456 | case X86::BSF16rm: |
457 | case X86::BSF32rm: |
458 | case X86::BSF64rm: |
459 | case X86::BSR16rm: |
460 | case X86::BSR32rm: |
461 | case X86::BSR64rm: |
462 | case X86::LZCNT16rm: |
463 | case X86::LZCNT32rm: |
464 | case X86::LZCNT64rm: |
465 | case X86::POPCNT16rm: |
466 | case X86::POPCNT32rm: |
467 | case X86::POPCNT64rm: |
468 | case X86::TZCNT16rm: |
469 | case X86::TZCNT32rm: |
470 | case X86::TZCNT64rm: |
471 | |
472 | |
473 | |
474 | |
475 | case X86::BLCFILL32rm: |
476 | case X86::BLCFILL64rm: |
477 | case X86::BLCI32rm: |
478 | case X86::BLCI64rm: |
479 | case X86::BLCIC32rm: |
480 | case X86::BLCIC64rm: |
481 | case X86::BLCMSK32rm: |
482 | case X86::BLCMSK64rm: |
483 | case X86::BLCS32rm: |
484 | case X86::BLCS64rm: |
485 | case X86::BLSFILL32rm: |
486 | case X86::BLSFILL64rm: |
487 | case X86::BLSI32rm: |
488 | case X86::BLSI64rm: |
489 | case X86::BLSIC32rm: |
490 | case X86::BLSIC64rm: |
491 | case X86::BLSMSK32rm: |
492 | case X86::BLSMSK64rm: |
493 | case X86::BLSR32rm: |
494 | case X86::BLSR64rm: |
495 | case X86::TZMSK32rm: |
496 | case X86::TZMSK64rm: |
497 | |
498 | |
499 | |
500 | case X86::BEXTR32rm: |
501 | case X86::BEXTR64rm: |
502 | case X86::BEXTRI32mi: |
503 | case X86::BEXTRI64mi: |
504 | case X86::BZHI32rm: |
505 | case X86::BZHI64rm: |
506 | |
507 | |
508 | case X86::ADC8rm: |
509 | case X86::ADC16rm: |
510 | case X86::ADC32rm: |
511 | case X86::ADC64rm: |
512 | case X86::ADCX32rm: |
513 | case X86::ADCX64rm: |
514 | case X86::ADD8rm: |
515 | case X86::ADD16rm: |
516 | case X86::ADD32rm: |
517 | case X86::ADD64rm: |
518 | case X86::ADOX32rm: |
519 | case X86::ADOX64rm: |
520 | case X86::AND8rm: |
521 | case X86::AND16rm: |
522 | case X86::AND32rm: |
523 | case X86::AND64rm: |
524 | case X86::ANDN32rm: |
525 | case X86::ANDN64rm: |
526 | case X86::OR8rm: |
527 | case X86::OR16rm: |
528 | case X86::OR32rm: |
529 | case X86::OR64rm: |
530 | case X86::SBB8rm: |
531 | case X86::SBB16rm: |
532 | case X86::SBB32rm: |
533 | case X86::SBB64rm: |
534 | case X86::SUB8rm: |
535 | case X86::SUB16rm: |
536 | case X86::SUB32rm: |
537 | case X86::SUB64rm: |
538 | case X86::XOR8rm: |
539 | case X86::XOR16rm: |
540 | case X86::XOR32rm: |
541 | case X86::XOR64rm: |
542 | |
543 | |
544 | |
545 | |
546 | case X86::MULX32rm: |
547 | case X86::MULX64rm: |
548 | |
549 | |
550 | case X86::RORX32mi: |
551 | case X86::RORX64mi: |
552 | case X86::SARX32rm: |
553 | case X86::SARX64rm: |
554 | case X86::SHLX32rm: |
555 | case X86::SHLX64rm: |
556 | case X86::SHRX32rm: |
557 | case X86::SHRX64rm: |
558 | |
559 | |
560 | case X86::CVTTSD2SI64rm: |
561 | case X86::VCVTTSD2SI64rm: |
562 | case X86::VCVTTSD2SI64Zrm: |
563 | case X86::CVTTSD2SIrm: |
564 | case X86::VCVTTSD2SIrm: |
565 | case X86::VCVTTSD2SIZrm: |
566 | case X86::CVTTSS2SI64rm: |
567 | case X86::VCVTTSS2SI64rm: |
568 | case X86::VCVTTSS2SI64Zrm: |
569 | case X86::CVTTSS2SIrm: |
570 | case X86::VCVTTSS2SIrm: |
571 | case X86::VCVTTSS2SIZrm: |
572 | case X86::CVTSI2SDrm: |
573 | case X86::VCVTSI2SDrm: |
574 | case X86::VCVTSI2SDZrm: |
575 | case X86::CVTSI2SSrm: |
576 | case X86::VCVTSI2SSrm: |
577 | case X86::VCVTSI2SSZrm: |
578 | case X86::CVTSI642SDrm: |
579 | case X86::VCVTSI642SDrm: |
580 | case X86::VCVTSI642SDZrm: |
581 | case X86::CVTSI642SSrm: |
582 | case X86::VCVTSI642SSrm: |
583 | case X86::VCVTSI642SSZrm: |
584 | case X86::CVTSS2SDrm: |
585 | case X86::VCVTSS2SDrm: |
586 | case X86::VCVTSS2SDZrm: |
587 | case X86::CVTSD2SSrm: |
588 | case X86::VCVTSD2SSrm: |
589 | case X86::VCVTSD2SSZrm: |
590 | |
591 | case X86::VCVTTSD2USI64Zrm: |
592 | case X86::VCVTTSD2USIZrm: |
593 | case X86::VCVTTSS2USI64Zrm: |
594 | case X86::VCVTTSS2USIZrm: |
595 | case X86::VCVTUSI2SDZrm: |
596 | case X86::VCVTUSI642SDZrm: |
597 | case X86::VCVTUSI2SSZrm: |
598 | case X86::VCVTUSI642SSZrm: |
599 | |
600 | |
601 | case X86::MOV8rm: |
602 | case X86::MOV8rm_NOREX: |
603 | case X86::MOV16rm: |
604 | case X86::MOV32rm: |
605 | case X86::MOV64rm: |
606 | case X86::MOVSX16rm8: |
607 | case X86::MOVSX32rm16: |
608 | case X86::MOVSX32rm8: |
609 | case X86::MOVSX32rm8_NOREX: |
610 | case X86::MOVSX64rm16: |
611 | case X86::MOVSX64rm32: |
612 | case X86::MOVSX64rm8: |
613 | case X86::MOVZX16rm8: |
614 | case X86::MOVZX32rm16: |
615 | case X86::MOVZX32rm8: |
616 | case X86::MOVZX32rm8_NOREX: |
617 | case X86::MOVZX64rm16: |
618 | case X86::MOVZX64rm8: |
619 | return true; |
620 | } |
621 | } |
622 | |
623 | int X86InstrInfo::getSPAdjust(const MachineInstr &MI) const { |
624 | const MachineFunction *MF = MI.getParent()->getParent(); |
625 | const TargetFrameLowering *TFI = MF->getSubtarget().getFrameLowering(); |
626 | |
627 | if (isFrameInstr(MI)) { |
628 | int SPAdj = alignTo(getFrameSize(MI), TFI->getStackAlign()); |
629 | SPAdj -= getFrameAdjustment(MI); |
630 | if (!isFrameSetup(MI)) |
631 | SPAdj = -SPAdj; |
632 | return SPAdj; |
633 | } |
634 | |
635 | |
636 | |
637 | |
638 | if (MI.isCall()) { |
639 | const MachineBasicBlock *MBB = MI.getParent(); |
640 | auto I = ++MachineBasicBlock::const_iterator(MI); |
641 | for (auto E = MBB->end(); I != E; ++I) { |
642 | if (I->getOpcode() == getCallFrameDestroyOpcode() || |
643 | I->isCall()) |
644 | break; |
645 | } |
646 | |
647 | |
648 | |
649 | if (I->getOpcode() != getCallFrameDestroyOpcode()) |
650 | return 0; |
651 | |
652 | return -(I->getOperand(1).getImm()); |
653 | } |
654 | |
655 | |
656 | |
657 | switch (MI.getOpcode()) { |
658 | default: |
659 | return 0; |
660 | case X86::PUSH32i8: |
661 | case X86::PUSH32r: |
662 | case X86::PUSH32rmm: |
663 | case X86::PUSH32rmr: |
664 | case X86::PUSHi32: |
665 | return 4; |
666 | case X86::PUSH64i8: |
667 | case X86::PUSH64r: |
668 | case X86::PUSH64rmm: |
669 | case X86::PUSH64rmr: |
670 | case X86::PUSH64i32: |
671 | return 8; |
672 | } |
673 | } |
674 | |
675 | |
676 | |
677 | bool X86InstrInfo::isFrameOperand(const MachineInstr &MI, unsigned int Op, |
678 | int &FrameIndex) const { |
679 | if (MI.getOperand(Op + X86::AddrBaseReg).isFI() && |
680 | MI.getOperand(Op + X86::AddrScaleAmt).isImm() && |
681 | MI.getOperand(Op + X86::AddrIndexReg).isReg() && |
682 | MI.getOperand(Op + X86::AddrDisp).isImm() && |
683 | MI.getOperand(Op + X86::AddrScaleAmt).getImm() == 1 && |
684 | MI.getOperand(Op + X86::AddrIndexReg).getReg() == 0 && |
685 | MI.getOperand(Op + X86::AddrDisp).getImm() == 0) { |
686 | FrameIndex = MI.getOperand(Op + X86::AddrBaseReg).getIndex(); |
687 | return true; |
688 | } |
689 | return false; |
690 | } |
691 | |
692 | static bool isFrameLoadOpcode(int Opcode, unsigned &MemBytes) { |
693 | switch (Opcode) { |
694 | default: |
695 | return false; |
696 | case X86::MOV8rm: |
697 | case X86::KMOVBkm: |
698 | MemBytes = 1; |
699 | return true; |
700 | case X86::MOV16rm: |
701 | case X86::KMOVWkm: |
702 | MemBytes = 2; |
703 | return true; |
704 | case X86::MOV32rm: |
705 | case X86::MOVSSrm: |
706 | case X86::MOVSSrm_alt: |
707 | case X86::VMOVSSrm: |
708 | case X86::VMOVSSrm_alt: |
709 | case X86::VMOVSSZrm: |
710 | case X86::VMOVSSZrm_alt: |
711 | case X86::KMOVDkm: |
712 | MemBytes = 4; |
713 | return true; |
714 | case X86::MOV64rm: |
715 | case X86::LD_Fp64m: |
716 | case X86::MOVSDrm: |
717 | case X86::MOVSDrm_alt: |
718 | case X86::VMOVSDrm: |
719 | case X86::VMOVSDrm_alt: |
720 | case X86::VMOVSDZrm: |
721 | case X86::VMOVSDZrm_alt: |
722 | case X86::MMX_MOVD64rm: |
723 | case X86::MMX_MOVQ64rm: |
724 | case X86::KMOVQkm: |
725 | MemBytes = 8; |
726 | return true; |
727 | case X86::MOVAPSrm: |
728 | case X86::MOVUPSrm: |
729 | case X86::MOVAPDrm: |
730 | case X86::MOVUPDrm: |
731 | case X86::MOVDQArm: |
732 | case X86::MOVDQUrm: |
733 | case X86::VMOVAPSrm: |
734 | case X86::VMOVUPSrm: |
735 | case X86::VMOVAPDrm: |
736 | case X86::VMOVUPDrm: |
737 | case X86::VMOVDQArm: |
738 | case X86::VMOVDQUrm: |
739 | case X86::VMOVAPSZ128rm: |
740 | case X86::VMOVUPSZ128rm: |
741 | case X86::VMOVAPSZ128rm_NOVLX: |
742 | case X86::VMOVUPSZ128rm_NOVLX: |
743 | case X86::VMOVAPDZ128rm: |
744 | case X86::VMOVUPDZ128rm: |
745 | case X86::VMOVDQU8Z128rm: |
746 | case X86::VMOVDQU16Z128rm: |
747 | case X86::VMOVDQA32Z128rm: |
748 | case X86::VMOVDQU32Z128rm: |
749 | case X86::VMOVDQA64Z128rm: |
750 | case X86::VMOVDQU64Z128rm: |
751 | MemBytes = 16; |
752 | return true; |
753 | case X86::VMOVAPSYrm: |
754 | case X86::VMOVUPSYrm: |
755 | case X86::VMOVAPDYrm: |
756 | case X86::VMOVUPDYrm: |
757 | case X86::VMOVDQAYrm: |
758 | case X86::VMOVDQUYrm: |
759 | case X86::VMOVAPSZ256rm: |
760 | case X86::VMOVUPSZ256rm: |
761 | case X86::VMOVAPSZ256rm_NOVLX: |
762 | case X86::VMOVUPSZ256rm_NOVLX: |
763 | case X86::VMOVAPDZ256rm: |
764 | case X86::VMOVUPDZ256rm: |
765 | case X86::VMOVDQU8Z256rm: |
766 | case X86::VMOVDQU16Z256rm: |
767 | case X86::VMOVDQA32Z256rm: |
768 | case X86::VMOVDQU32Z256rm: |
769 | case X86::VMOVDQA64Z256rm: |
770 | case X86::VMOVDQU64Z256rm: |
771 | MemBytes = 32; |
772 | return true; |
773 | case X86::VMOVAPSZrm: |
774 | case X86::VMOVUPSZrm: |
775 | case X86::VMOVAPDZrm: |
776 | case X86::VMOVUPDZrm: |
777 | case X86::VMOVDQU8Zrm: |
778 | case X86::VMOVDQU16Zrm: |
779 | case X86::VMOVDQA32Zrm: |
780 | case X86::VMOVDQU32Zrm: |
781 | case X86::VMOVDQA64Zrm: |
782 | case X86::VMOVDQU64Zrm: |
783 | MemBytes = 64; |
784 | return true; |
785 | } |
786 | } |
787 | |
788 | static bool isFrameStoreOpcode(int Opcode, unsigned &MemBytes) { |
789 | switch (Opcode) { |
790 | default: |
791 | return false; |
792 | case X86::MOV8mr: |
793 | case X86::KMOVBmk: |
794 | MemBytes = 1; |
795 | return true; |
796 | case X86::MOV16mr: |
797 | case X86::KMOVWmk: |
798 | MemBytes = 2; |
799 | return true; |
800 | case X86::MOV32mr: |
801 | case X86::MOVSSmr: |
802 | case X86::VMOVSSmr: |
803 | case X86::VMOVSSZmr: |
804 | case X86::KMOVDmk: |
805 | MemBytes = 4; |
806 | return true; |
807 | case X86::MOV64mr: |
808 | case X86::ST_FpP64m: |
809 | case X86::MOVSDmr: |
810 | case X86::VMOVSDmr: |
811 | case X86::VMOVSDZmr: |
812 | case X86::MMX_MOVD64mr: |
813 | case X86::MMX_MOVQ64mr: |
814 | case X86::MMX_MOVNTQmr: |
815 | case X86::KMOVQmk: |
816 | MemBytes = 8; |
817 | return true; |
818 | case X86::MOVAPSmr: |
819 | case X86::MOVUPSmr: |
820 | case X86::MOVAPDmr: |
821 | case X86::MOVUPDmr: |
822 | case X86::MOVDQAmr: |
823 | case X86::MOVDQUmr: |
824 | case X86::VMOVAPSmr: |
825 | case X86::VMOVUPSmr: |
826 | case X86::VMOVAPDmr: |
827 | case X86::VMOVUPDmr: |
828 | case X86::VMOVDQAmr: |
829 | case X86::VMOVDQUmr: |
830 | case X86::VMOVUPSZ128mr: |
831 | case X86::VMOVAPSZ128mr: |
832 | case X86::VMOVUPSZ128mr_NOVLX: |
833 | case X86::VMOVAPSZ128mr_NOVLX: |
834 | case X86::VMOVUPDZ128mr: |
835 | case X86::VMOVAPDZ128mr: |
836 | case X86::VMOVDQA32Z128mr: |
837 | case X86::VMOVDQU32Z128mr: |
838 | case X86::VMOVDQA64Z128mr: |
839 | case X86::VMOVDQU64Z128mr: |
840 | case X86::VMOVDQU8Z128mr: |
841 | case X86::VMOVDQU16Z128mr: |
842 | MemBytes = 16; |
843 | return true; |
844 | case X86::VMOVUPSYmr: |
845 | case X86::VMOVAPSYmr: |
846 | case X86::VMOVUPDYmr: |
847 | case X86::VMOVAPDYmr: |
848 | case X86::VMOVDQUYmr: |
849 | case X86::VMOVDQAYmr: |
850 | case X86::VMOVUPSZ256mr: |
851 | case X86::VMOVAPSZ256mr: |
852 | case X86::VMOVUPSZ256mr_NOVLX: |
853 | case X86::VMOVAPSZ256mr_NOVLX: |
854 | case X86::VMOVUPDZ256mr: |
855 | case X86::VMOVAPDZ256mr: |
856 | case X86::VMOVDQU8Z256mr: |
857 | case X86::VMOVDQU16Z256mr: |
858 | case X86::VMOVDQA32Z256mr: |
859 | case X86::VMOVDQU32Z256mr: |
860 | case X86::VMOVDQA64Z256mr: |
861 | case X86::VMOVDQU64Z256mr: |
862 | MemBytes = 32; |
863 | return true; |
864 | case X86::VMOVUPSZmr: |
865 | case X86::VMOVAPSZmr: |
866 | case X86::VMOVUPDZmr: |
867 | case X86::VMOVAPDZmr: |
868 | case X86::VMOVDQU8Zmr: |
869 | case X86::VMOVDQU16Zmr: |
870 | case X86::VMOVDQA32Zmr: |
871 | case X86::VMOVDQU32Zmr: |
872 | case X86::VMOVDQA64Zmr: |
873 | case X86::VMOVDQU64Zmr: |
874 | MemBytes = 64; |
875 | return true; |
876 | } |
877 | return false; |
878 | } |
879 | |
880 | unsigned X86InstrInfo::isLoadFromStackSlot(const MachineInstr &MI, |
881 | int &FrameIndex) const { |
882 | unsigned Dummy; |
883 | return X86InstrInfo::isLoadFromStackSlot(MI, FrameIndex, Dummy); |
884 | } |
885 | |
886 | unsigned X86InstrInfo::isLoadFromStackSlot(const MachineInstr &MI, |
887 | int &FrameIndex, |
888 | unsigned &MemBytes) const { |
889 | if (isFrameLoadOpcode(MI.getOpcode(), MemBytes)) |
890 | if (MI.getOperand(0).getSubReg() == 0 && isFrameOperand(MI, 1, FrameIndex)) |
891 | return MI.getOperand(0).getReg(); |
892 | return 0; |
893 | } |
894 | |
895 | unsigned X86InstrInfo::isLoadFromStackSlotPostFE(const MachineInstr &MI, |
896 | int &FrameIndex) const { |
897 | unsigned Dummy; |
898 | if (isFrameLoadOpcode(MI.getOpcode(), Dummy)) { |
899 | unsigned Reg; |
900 | if ((Reg = isLoadFromStackSlot(MI, FrameIndex))) |
901 | return Reg; |
902 | |
903 | SmallVector<const MachineMemOperand *, 1> Accesses; |
904 | if (hasLoadFromStackSlot(MI, Accesses)) { |
905 | FrameIndex = |
906 | cast<FixedStackPseudoSourceValue>(Accesses.front()->getPseudoValue()) |
907 | ->getFrameIndex(); |
908 | return MI.getOperand(0).getReg(); |
909 | } |
910 | } |
911 | return 0; |
912 | } |
913 | |
914 | unsigned X86InstrInfo::isStoreToStackSlot(const MachineInstr &MI, |
915 | int &FrameIndex) const { |
916 | unsigned Dummy; |
917 | return X86InstrInfo::isStoreToStackSlot(MI, FrameIndex, Dummy); |
918 | } |
919 | |
920 | unsigned X86InstrInfo::isStoreToStackSlot(const MachineInstr &MI, |
921 | int &FrameIndex, |
922 | unsigned &MemBytes) const { |
923 | if (isFrameStoreOpcode(MI.getOpcode(), MemBytes)) |
924 | if (MI.getOperand(X86::AddrNumOperands).getSubReg() == 0 && |
925 | isFrameOperand(MI, 0, FrameIndex)) |
926 | return MI.getOperand(X86::AddrNumOperands).getReg(); |
927 | return 0; |
928 | } |
929 | |
930 | unsigned X86InstrInfo::isStoreToStackSlotPostFE(const MachineInstr &MI, |
931 | int &FrameIndex) const { |
932 | unsigned Dummy; |
933 | if (isFrameStoreOpcode(MI.getOpcode(), Dummy)) { |
934 | unsigned Reg; |
935 | if ((Reg = isStoreToStackSlot(MI, FrameIndex))) |
936 | return Reg; |
937 | |
938 | SmallVector<const MachineMemOperand *, 1> Accesses; |
939 | if (hasStoreToStackSlot(MI, Accesses)) { |
940 | FrameIndex = |
941 | cast<FixedStackPseudoSourceValue>(Accesses.front()->getPseudoValue()) |
942 | ->getFrameIndex(); |
943 | return MI.getOperand(X86::AddrNumOperands).getReg(); |
944 | } |
945 | } |
946 | return 0; |
947 | } |
948 | |
949 | |
950 | static bool regIsPICBase(Register BaseReg, const MachineRegisterInfo &MRI) { |
951 | |
952 | if (!BaseReg.isVirtual()) |
953 | return false; |
954 | bool isPICBase = false; |
955 | for (MachineRegisterInfo::def_instr_iterator I = MRI.def_instr_begin(BaseReg), |
956 | E = MRI.def_instr_end(); I != E; ++I) { |
957 | MachineInstr *DefMI = &*I; |
958 | if (DefMI->getOpcode() != X86::MOVPC32r) |
959 | return false; |
960 | assert(!isPICBase && "More than one PIC base?"); |
961 | isPICBase = true; |
962 | } |
963 | return isPICBase; |
964 | } |
965 | |
966 | bool X86InstrInfo::isReallyTriviallyReMaterializable(const MachineInstr &MI, |
967 | AAResults *AA) const { |
968 | switch (MI.getOpcode()) { |
969 | default: |
970 | |
971 | |
972 | llvm_unreachable("Unknown rematerializable operation!"); |
973 | break; |
974 | |
975 | case X86::LOAD_STACK_GUARD: |
976 | case X86::AVX1_SETALLONES: |
977 | case X86::AVX2_SETALLONES: |
978 | case X86::AVX512_128_SET0: |
979 | case X86::AVX512_256_SET0: |
980 | case X86::AVX512_512_SET0: |
981 | case X86::AVX512_512_SETALLONES: |
982 | case X86::AVX512_FsFLD0SD: |
983 | case X86::AVX512_FsFLD0SS: |
984 | case X86::AVX512_FsFLD0F128: |
985 | case X86::AVX_SET0: |
986 | case X86::FsFLD0SD: |
987 | case X86::FsFLD0SS: |
988 | case X86::FsFLD0F128: |
989 | case X86::KSET0D: |
990 | case X86::KSET0Q: |
991 | case X86::KSET0W: |
992 | case X86::KSET1D: |
993 | case X86::KSET1Q: |
994 | case X86::KSET1W: |
995 | case X86::MMX_SET0: |
996 | case X86::MOV32ImmSExti8: |
997 | case X86::MOV32r0: |
998 | case X86::MOV32r1: |
999 | case X86::MOV32r_1: |
1000 | case X86::MOV32ri64: |
1001 | case X86::MOV64ImmSExti8: |
1002 | case X86::V_SET0: |
1003 | case X86::V_SETALLONES: |
1004 | case X86::MOV16ri: |
1005 | case X86::MOV32ri: |
1006 | case X86::MOV64ri: |
1007 | case X86::MOV64ri32: |
1008 | case X86::MOV8ri: |
1009 | case X86::PTILEZEROV: |
1010 | return true; |
1011 | |
1012 | case X86::MOV8rm: |
1013 | case X86::MOV8rm_NOREX: |
1014 | case X86::MOV16rm: |
1015 | case X86::MOV32rm: |
1016 | case X86::MOV64rm: |
1017 | case X86::MOVSSrm: |
1018 | case X86::MOVSSrm_alt: |
1019 | case X86::MOVSDrm: |
1020 | case X86::MOVSDrm_alt: |
1021 | case X86::MOVAPSrm: |
1022 | case X86::MOVUPSrm: |
1023 | case X86::MOVAPDrm: |
1024 | case X86::MOVUPDrm: |
1025 | case X86::MOVDQArm: |
1026 | case X86::MOVDQUrm: |
1027 | case X86::VMOVSSrm: |
1028 | case X86::VMOVSSrm_alt: |
1029 | case X86::VMOVSDrm: |
1030 | case X86::VMOVSDrm_alt: |
1031 | case X86::VMOVAPSrm: |
1032 | case X86::VMOVUPSrm: |
1033 | case X86::VMOVAPDrm: |
1034 | case X86::VMOVUPDrm: |
1035 | case X86::VMOVDQArm: |
1036 | case X86::VMOVDQUrm: |
1037 | case X86::VMOVAPSYrm: |
1038 | case X86::VMOVUPSYrm: |
1039 | case X86::VMOVAPDYrm: |
1040 | case X86::VMOVUPDYrm: |
1041 | case X86::VMOVDQAYrm: |
1042 | case X86::VMOVDQUYrm: |
1043 | case X86::MMX_MOVD64rm: |
1044 | case X86::MMX_MOVQ64rm: |
1045 | |
1046 | case X86::VMOVSSZrm: |
1047 | case X86::VMOVSSZrm_alt: |
1048 | case X86::VMOVSDZrm: |
1049 | case X86::VMOVSDZrm_alt: |
1050 | case X86::VMOVAPDZ128rm: |
1051 | case X86::VMOVAPDZ256rm: |
1052 | case X86::VMOVAPDZrm: |
1053 | case X86::VMOVAPSZ128rm: |
1054 | case X86::VMOVAPSZ256rm: |
1055 | case X86::VMOVAPSZ128rm_NOVLX: |
1056 | case X86::VMOVAPSZ256rm_NOVLX: |
1057 | case X86::VMOVAPSZrm: |
1058 | case X86::VMOVDQA32Z128rm: |
1059 | case X86::VMOVDQA32Z256rm: |
1060 | case X86::VMOVDQA32Zrm: |
1061 | case X86::VMOVDQA64Z128rm: |
1062 | case X86::VMOVDQA64Z256rm: |
1063 | case X86::VMOVDQA64Zrm: |
1064 | case X86::VMOVDQU16Z128rm: |
1065 | case X86::VMOVDQU16Z256rm: |
1066 | case X86::VMOVDQU16Zrm: |
1067 | case X86::VMOVDQU32Z128rm: |
1068 | case X86::VMOVDQU32Z256rm: |
1069 | case X86::VMOVDQU32Zrm: |
1070 | case X86::VMOVDQU64Z128rm: |
1071 | case X86::VMOVDQU64Z256rm: |
1072 | case X86::VMOVDQU64Zrm: |
1073 | case X86::VMOVDQU8Z128rm: |
1074 | case X86::VMOVDQU8Z256rm: |
1075 | case X86::VMOVDQU8Zrm: |
1076 | case X86::VMOVUPDZ128rm: |
1077 | case X86::VMOVUPDZ256rm: |
1078 | case X86::VMOVUPDZrm: |
1079 | case X86::VMOVUPSZ128rm: |
1080 | case X86::VMOVUPSZ256rm: |
1081 | case X86::VMOVUPSZ128rm_NOVLX: |
1082 | case X86::VMOVUPSZ256rm_NOVLX: |
1083 | case X86::VMOVUPSZrm: { |
1084 | |
1085 | if (MI.getOperand(1 + X86::AddrBaseReg).isReg() && |
1086 | MI.getOperand(1 + X86::AddrScaleAmt).isImm() && |
1087 | MI.getOperand(1 + X86::AddrIndexReg).isReg() && |
1088 | MI.getOperand(1 + X86::AddrIndexReg).getReg() == 0 && |
1089 | MI.isDereferenceableInvariantLoad(AA)) { |
1090 | Register BaseReg = MI.getOperand(1 + X86::AddrBaseReg).getReg(); |
1091 | if (BaseReg == 0 || BaseReg == X86::RIP) |
1092 | return true; |
1093 | |
1094 | if (!ReMatPICStubLoad && MI.getOperand(1 + X86::AddrDisp).isGlobal()) |
1095 | return false; |
1096 | const MachineFunction &MF = *MI.getParent()->getParent(); |
1097 | const MachineRegisterInfo &MRI = MF.getRegInfo(); |
1098 | return regIsPICBase(BaseReg, MRI); |
1099 | } |
1100 | return false; |
1101 | } |
1102 | |
1103 | case X86::LEA32r: |
1104 | case X86::LEA64r: { |
1105 | if (MI.getOperand(1 + X86::AddrScaleAmt).isImm() && |
1106 | MI.getOperand(1 + X86::AddrIndexReg).isReg() && |
1107 | MI.getOperand(1 + X86::AddrIndexReg).getReg() == 0 && |
1108 | !MI.getOperand(1 + X86::AddrDisp).isReg()) { |
1109 | |
1110 | if (!MI.getOperand(1 + X86::AddrBaseReg).isReg()) |
1111 | return true; |
1112 | Register BaseReg = MI.getOperand(1 + X86::AddrBaseReg).getReg(); |
1113 | if (BaseReg == 0) |
1114 | return true; |
1115 | |
1116 | const MachineFunction &MF = *MI.getParent()->getParent(); |
1117 | const MachineRegisterInfo &MRI = MF.getRegInfo(); |
1118 | return regIsPICBase(BaseReg, MRI); |
1119 | } |
1120 | return false; |
1121 | } |
1122 | } |
1123 | } |
1124 | |
1125 | void X86InstrInfo::reMaterialize(MachineBasicBlock &MBB, |
1126 | MachineBasicBlock::iterator I, |
1127 | Register DestReg, unsigned SubIdx, |
1128 | const MachineInstr &Orig, |
1129 | const TargetRegisterInfo &TRI) const { |
1130 | bool ClobbersEFLAGS = Orig.modifiesRegister(X86::EFLAGS, &TRI); |
1131 | if (ClobbersEFLAGS && MBB.computeRegisterLiveness(&TRI, X86::EFLAGS, I) != |
1132 | MachineBasicBlock::LQR_Dead) { |
1133 | |
1134 | |
1135 | int Value; |
1136 | switch (Orig.getOpcode()) { |
1137 | case X86::MOV32r0: Value = 0; break; |
1138 | case X86::MOV32r1: Value = 1; break; |
1139 | case X86::MOV32r_1: Value = -1; break; |
1140 | default: |
1141 | llvm_unreachable("Unexpected instruction!"); |
1142 | } |
1143 | |
1144 | const DebugLoc &DL = Orig.getDebugLoc(); |
1145 | BuildMI(MBB, I, DL, get(X86::MOV32ri)) |
1146 | .add(Orig.getOperand(0)) |
1147 | .addImm(Value); |
1148 | } else { |
1149 | MachineInstr *MI = MBB.getParent()->CloneMachineInstr(&Orig); |
1150 | MBB.insert(I, MI); |
1151 | } |
1152 | |
1153 | MachineInstr &NewMI = *std::prev(I); |
1154 | NewMI.substituteRegister(Orig.getOperand(0).getReg(), DestReg, SubIdx, TRI); |
1155 | } |
1156 | |
1157 | |
1158 | bool X86InstrInfo::hasLiveCondCodeDef(MachineInstr &MI) const { |
1159 | for (unsigned i = 0, e = MI.getNumOperands(); i != e; ++i) { |
1160 | MachineOperand &MO = MI.getOperand(i); |
1161 | if (MO.isReg() && MO.isDef() && |
1162 | MO.getReg() == X86::EFLAGS && !MO.isDead()) { |
1163 | return true; |
1164 | } |
1165 | } |
1166 | return false; |
1167 | } |
1168 | |
1169 | |
1170 | inline static unsigned getTruncatedShiftCount(const MachineInstr &MI, |
1171 | unsigned ShiftAmtOperandIdx) { |
1172 | |
1173 | unsigned ShiftCountMask = (MI.getDesc().TSFlags & X86II::REX_W) ? 63 : 31; |
1174 | unsigned Imm = MI.getOperand(ShiftAmtOperandIdx).getImm(); |
1175 | return Imm & ShiftCountMask; |
1176 | } |
1177 | |
1178 | |
1179 | |
1180 | inline static bool isTruncatedShiftCountForLEA(unsigned ShAmt) { |
1181 | |
1182 | |
1183 | |
1184 | |
1185 | |
1186 | return ShAmt < 4 && ShAmt > 0; |
1187 | } |
1188 | |
1189 | bool X86InstrInfo::classifyLEAReg(MachineInstr &MI, const MachineOperand &Src, |
1190 | unsigned Opc, bool AllowSP, Register &NewSrc, |
1191 | bool &isKill, MachineOperand &ImplicitOp, |
1192 | LiveVariables *LV) const { |
1193 | MachineFunction &MF = *MI.getParent()->getParent(); |
1194 | const TargetRegisterClass *RC; |
1195 | if (AllowSP) { |
1196 | RC = Opc != X86::LEA32r ? &X86::GR64RegClass : &X86::GR32RegClass; |
1197 | } else { |
1198 | RC = Opc != X86::LEA32r ? |
1199 | &X86::GR64_NOSPRegClass : &X86::GR32_NOSPRegClass; |
1200 | } |
1201 | Register SrcReg = Src.getReg(); |
1202 | |
1203 | |
1204 | |
1205 | if (Opc != X86::LEA64_32r) { |
1206 | NewSrc = SrcReg; |
1207 | isKill = Src.isKill(); |
1208 | assert(!Src.isUndef() && "Undef op doesn't need optimization"); |
1209 | |
1210 | if (NewSrc.isVirtual() && !MF.getRegInfo().constrainRegClass(NewSrc, RC)) |
1211 | return false; |
1212 | |
1213 | return true; |
1214 | } |
1215 | |
1216 | |
1217 | |
1218 | if (SrcReg.isPhysical()) { |
1219 | ImplicitOp = Src; |
1220 | ImplicitOp.setImplicit(); |
1221 | |
1222 | NewSrc = getX86SubSuperRegister(Src.getReg(), 64); |
1223 | isKill = Src.isKill(); |
1224 | assert(!Src.isUndef() && "Undef op doesn't need optimization"); |
1225 | } else { |
1226 | |
1227 | |
1228 | NewSrc = MF.getRegInfo().createVirtualRegister(RC); |
1229 | MachineInstr *Copy = |
1230 | BuildMI(*MI.getParent(), MI, MI.getDebugLoc(), get(TargetOpcode::COPY)) |
1231 | .addReg(NewSrc, RegState::Define | RegState::Undef, X86::sub_32bit) |
1232 | .add(Src); |
1233 | |
1234 | |
1235 | isKill = true; |
1236 | |
1237 | if (LV) |
1238 | LV->replaceKillInstruction(SrcReg, MI, *Copy); |
1239 | } |
1240 | |
1241 | |
1242 | return true; |
1243 | } |
1244 | |
1245 | MachineInstr *X86InstrInfo::convertToThreeAddressWithLEA( |
1246 | unsigned MIOpc, MachineFunction::iterator &MFI, MachineInstr &MI, |
1247 | LiveVariables *LV, bool Is8BitOp) const { |
1248 | |
1249 | MachineRegisterInfo &RegInfo = MFI->getParent()->getRegInfo(); |
1250 | assert((Is8BitOp || RegInfo.getTargetRegisterInfo()->getRegSizeInBits( |
1251 | *RegInfo.getRegClass(MI.getOperand(0).getReg())) == 16) && |
1252 | "Unexpected type for LEA transform"); |
1253 | |
1254 | |
1255 | |
1256 | |
1257 | |
1258 | |
1259 | |
1260 | |
1261 | if (!Subtarget.is64Bit()) |
1262 | return nullptr; |
1263 | |
1264 | unsigned Opcode = X86::LEA64_32r; |
1265 | Register InRegLEA = RegInfo.createVirtualRegister(&X86::GR64_NOSPRegClass); |
1266 | Register OutRegLEA = RegInfo.createVirtualRegister(&X86::GR32RegClass); |
1267 | |
1268 | |
1269 | |
1270 | |
1271 | |
1272 | |
1273 | |
1274 | |
1275 | MachineBasicBlock::iterator MBBI = MI.getIterator(); |
1276 | Register Dest = MI.getOperand(0).getReg(); |
1277 | Register Src = MI.getOperand(1).getReg(); |
1278 | bool IsDead = MI.getOperand(0).isDead(); |
1279 | bool IsKill = MI.getOperand(1).isKill(); |
1280 | unsigned SubReg = Is8BitOp ? X86::sub_8bit : X86::sub_16bit; |
1281 | assert(!MI.getOperand(1).isUndef() && "Undef op doesn't need optimization"); |
1282 | BuildMI(*MFI, MBBI, MI.getDebugLoc(), get(X86::IMPLICIT_DEF), InRegLEA); |
1283 | MachineInstr *InsMI = |
1284 | BuildMI(*MFI, MBBI, MI.getDebugLoc(), get(TargetOpcode::COPY)) |
1285 | .addReg(InRegLEA, RegState::Define, SubReg) |
1286 | .addReg(Src, getKillRegState(IsKill)); |
1287 | |
1288 | MachineInstrBuilder MIB = |
1289 | BuildMI(*MFI, MBBI, MI.getDebugLoc(), get(Opcode), OutRegLEA); |
1290 | switch (MIOpc) { |
1291 | default: llvm_unreachable("Unreachable!"); |
1292 | case X86::SHL8ri: |
1293 | case X86::SHL16ri: { |
1294 | unsigned ShAmt = MI.getOperand(2).getImm(); |
1295 | MIB.addReg(0).addImm(1ULL << ShAmt) |
1296 | .addReg(InRegLEA, RegState::Kill).addImm(0).addReg(0); |
1297 | break; |
1298 | } |
1299 | case X86::INC8r: |
1300 | case X86::INC16r: |
1301 | addRegOffset(MIB, InRegLEA, true, 1); |
1302 | break; |
1303 | case X86::DEC8r: |
1304 | case X86::DEC16r: |
1305 | addRegOffset(MIB, InRegLEA, true, -1); |
1306 | break; |
1307 | case X86::ADD8ri: |
1308 | case X86::ADD8ri_DB: |
1309 | case X86::ADD16ri: |
1310 | case X86::ADD16ri8: |
1311 | case X86::ADD16ri_DB: |
1312 | case X86::ADD16ri8_DB: |
1313 | addRegOffset(MIB, InRegLEA, true, MI.getOperand(2).getImm()); |
1314 | break; |
1315 | case X86::ADD8rr: |
1316 | case X86::ADD8rr_DB: |
1317 | case X86::ADD16rr: |
1318 | case X86::ADD16rr_DB: { |
1319 | Register Src2 = MI.getOperand(2).getReg(); |
1320 | bool IsKill2 = MI.getOperand(2).isKill(); |
1321 | assert(!MI.getOperand(2).isUndef() && "Undef op doesn't need optimization"); |
1322 | unsigned InRegLEA2 = 0; |
1323 | MachineInstr *InsMI2 = nullptr; |
1324 | if (Src == Src2) { |
1325 | |
1326 | |
1327 | addRegReg(MIB, InRegLEA, true, InRegLEA, false); |
1328 | } else { |
1329 | if (Subtarget.is64Bit()) |
1330 | InRegLEA2 = RegInfo.createVirtualRegister(&X86::GR64_NOSPRegClass); |
1331 | else |
1332 | InRegLEA2 = RegInfo.createVirtualRegister(&X86::GR32_NOSPRegClass); |
1333 | |
1334 | |
1335 | BuildMI(*MFI, &*MIB, MI.getDebugLoc(), get(X86::IMPLICIT_DEF), InRegLEA2); |
1336 | InsMI2 = BuildMI(*MFI, &*MIB, MI.getDebugLoc(), get(TargetOpcode::COPY)) |
1337 | .addReg(InRegLEA2, RegState::Define, SubReg) |
1338 | .addReg(Src2, getKillRegState(IsKill2)); |
1339 | addRegReg(MIB, InRegLEA, true, InRegLEA2, true); |
1340 | } |
1341 | if (LV && IsKill2 && InsMI2) |
1342 | LV->replaceKillInstruction(Src2, MI, *InsMI2); |
1343 | break; |
1344 | } |
1345 | } |
1346 | |
1347 | MachineInstr *NewMI = MIB; |
1348 | MachineInstr *ExtMI = |
1349 | BuildMI(*MFI, MBBI, MI.getDebugLoc(), get(TargetOpcode::COPY)) |
1350 | .addReg(Dest, RegState::Define | getDeadRegState(IsDead)) |
1351 | .addReg(OutRegLEA, RegState::Kill, SubReg); |
1352 | |
1353 | if (LV) { |
1354 | |
1355 | LV->getVarInfo(InRegLEA).Kills.push_back(NewMI); |
1356 | LV->getVarInfo(OutRegLEA).Kills.push_back(ExtMI); |
1357 | if (IsKill) |
1358 | LV->replaceKillInstruction(Src, MI, *InsMI); |
1359 | if (IsDead) |
1360 | LV->replaceKillInstruction(Dest, MI, *ExtMI); |
1361 | } |
1362 | |
1363 | return ExtMI; |
1364 | } |
1365 | |
1366 | |
1367 | |
1368 | |
1369 | |
1370 | |
1371 | |
1372 | |
1373 | |
1374 | |
1375 | |
1376 | MachineInstr * |
1377 | X86InstrInfo::convertToThreeAddress(MachineFunction::iterator &MFI, |
1378 | MachineInstr &MI, LiveVariables *LV) const { |
1379 | |
1380 | |
1381 | |
1382 | if (hasLiveCondCodeDef(MI)) |
1383 | return nullptr; |
1384 | |
1385 | MachineFunction &MF = *MI.getParent()->getParent(); |
1386 | |
1387 | const MachineOperand &Dest = MI.getOperand(0); |
1388 | const MachineOperand &Src = MI.getOperand(1); |
1389 | |
1390 | |
1391 | |
1392 | |
1393 | |
1394 | if (Src.isUndef()) |
1395 | return nullptr; |
1396 | if (MI.getNumOperands() > 2) |
1397 | if (MI.getOperand(2).isReg() && MI.getOperand(2).isUndef()) |
1398 | return nullptr; |
1399 | |
1400 | MachineInstr *NewMI = nullptr; |
1401 | bool Is64Bit = Subtarget.is64Bit(); |
1402 | |
1403 | bool Is8BitOp = false; |
1404 | unsigned MIOpc = MI.getOpcode(); |
1405 | switch (MIOpc) { |
1406 | default: llvm_unreachable("Unreachable!"); |
1407 | case X86::SHL64ri: { |
1408 | assert(MI.getNumOperands() >= 3 && "Unknown shift instruction!"); |
1409 | unsigned ShAmt = getTruncatedShiftCount(MI, 2); |
1410 | if (!isTruncatedShiftCountForLEA(ShAmt)) return nullptr; |
1411 | |
1412 | |
1413 | if (Src.getReg().isVirtual() && !MF.getRegInfo().constrainRegClass( |
1414 | Src.getReg(), &X86::GR64_NOSPRegClass)) |
1415 | return nullptr; |
1416 | |
1417 | NewMI = BuildMI(MF, MI.getDebugLoc(), get(X86::LEA64r)) |
1418 | .add(Dest) |
1419 | .addReg(0) |
1420 | .addImm(1ULL << ShAmt) |
1421 | .add(Src) |
1422 | .addImm(0) |
1423 | .addReg(0); |
1424 | break; |
1425 | } |
1426 | case X86::SHL32ri: { |
1427 | assert(MI.getNumOperands() >= 3 && "Unknown shift instruction!"); |
1428 | unsigned ShAmt = getTruncatedShiftCount(MI, 2); |
1429 | if (!isTruncatedShiftCountForLEA(ShAmt)) return nullptr; |
1430 | |
1431 | unsigned Opc = Is64Bit ? X86::LEA64_32r : X86::LEA32r; |
1432 | |
1433 | |
1434 | bool isKill; |
1435 | Register SrcReg; |
1436 | MachineOperand ImplicitOp = MachineOperand::CreateReg(0, false); |
1437 | if (!classifyLEAReg(MI, Src, Opc, false, |
1438 | SrcReg, isKill, ImplicitOp, LV)) |
1439 | return nullptr; |
1440 | |
1441 | MachineInstrBuilder MIB = |
1442 | BuildMI(MF, MI.getDebugLoc(), get(Opc)) |
1443 | .add(Dest) |
1444 | .addReg(0) |
1445 | .addImm(1ULL << ShAmt) |
1446 | .addReg(SrcReg, getKillRegState(isKill)) |
1447 | .addImm(0) |
1448 | .addReg(0); |
1449 | if (ImplicitOp.getReg() != 0) |
1450 | MIB.add(ImplicitOp); |
1451 | NewMI = MIB; |
1452 | |
1453 | break; |
1454 | } |
1455 | case X86::SHL8ri: |
1456 | Is8BitOp = true; |
1457 | LLVM_FALLTHROUGH; |
1458 | case X86::SHL16ri: { |
1459 | assert(MI.getNumOperands() >= 3 && "Unknown shift instruction!"); |
1460 | unsigned ShAmt = getTruncatedShiftCount(MI, 2); |
1461 | if (!isTruncatedShiftCountForLEA(ShAmt)) |
1462 | return nullptr; |
1463 | return convertToThreeAddressWithLEA(MIOpc, MFI, MI, LV, Is8BitOp); |
1464 | } |
1465 | case X86::INC64r: |
1466 | case X86::INC32r: { |
1467 | assert(MI.getNumOperands() >= 2 && "Unknown inc instruction!"); |
1468 | unsigned Opc = MIOpc == X86::INC64r ? X86::LEA64r : |
1469 | (Is64Bit ? X86::LEA64_32r : X86::LEA32r); |
1470 | bool isKill; |
1471 | Register SrcReg; |
1472 | MachineOperand ImplicitOp = MachineOperand::CreateReg(0, false); |
1473 | if (!classifyLEAReg(MI, Src, Opc, false, SrcReg, isKill, |
1474 | ImplicitOp, LV)) |
1475 | return nullptr; |
1476 | |
1477 | MachineInstrBuilder MIB = |
1478 | BuildMI(MF, MI.getDebugLoc(), get(Opc)) |
1479 | .add(Dest) |
1480 | .addReg(SrcReg, getKillRegState(isKill)); |
1481 | if (ImplicitOp.getReg() != 0) |
1482 | MIB.add(ImplicitOp); |
1483 | |
1484 | NewMI = addOffset(MIB, 1); |
1485 | break; |
1486 | } |
1487 | case X86::DEC64r: |
1488 | case X86::DEC32r: { |
1489 | assert(MI.getNumOperands() >= 2 && "Unknown dec instruction!"); |
1490 | unsigned Opc = MIOpc == X86::DEC64r ? X86::LEA64r |
1491 | : (Is64Bit ? X86::LEA64_32r : X86::LEA32r); |
1492 | |
1493 | bool isKill; |
1494 | Register SrcReg; |
1495 | MachineOperand ImplicitOp = MachineOperand::CreateReg(0, false); |
1496 | if (!classifyLEAReg(MI, Src, Opc, false, SrcReg, isKill, |
1497 | ImplicitOp, LV)) |
1498 | return nullptr; |
1499 | |
1500 | MachineInstrBuilder MIB = BuildMI(MF, MI.getDebugLoc(), get(Opc)) |
1501 | .add(Dest) |
1502 | .addReg(SrcReg, getKillRegState(isKill)); |
1503 | if (ImplicitOp.getReg() != 0) |
1504 | MIB.add(ImplicitOp); |
1505 | |
1506 | NewMI = addOffset(MIB, -1); |
1507 | |
1508 | break; |
1509 | } |
1510 | case X86::DEC8r: |
1511 | case X86::INC8r: |
1512 | Is8BitOp = true; |
1513 | LLVM_FALLTHROUGH; |
1514 | case X86::DEC16r: |
1515 | case X86::INC16r: |
1516 | return convertToThreeAddressWithLEA(MIOpc, MFI, MI, LV, Is8BitOp); |
1517 | case X86::ADD64rr: |
1518 | case X86::ADD64rr_DB: |
1519 | case X86::ADD32rr: |
1520 | case X86::ADD32rr_DB: { |
1521 | assert(MI.getNumOperands() >= 3 && "Unknown add instruction!"); |
1522 | unsigned Opc; |
1523 | if (MIOpc == X86::ADD64rr || MIOpc == X86::ADD64rr_DB) |
1524 | Opc = X86::LEA64r; |
1525 | else |
1526 | Opc = Is64Bit ? X86::LEA64_32r : X86::LEA32r; |
1527 | |
1528 | bool isKill; |
1529 | Register SrcReg; |
1530 | MachineOperand ImplicitOp = MachineOperand::CreateReg(0, false); |
1531 | if (!classifyLEAReg(MI, Src, Opc, true, |
1532 | SrcReg, isKill, ImplicitOp, LV)) |
1533 | return nullptr; |
1534 | |
1535 | const MachineOperand &Src2 = MI.getOperand(2); |
1536 | bool isKill2; |
1537 | Register SrcReg2; |
1538 | MachineOperand ImplicitOp2 = MachineOperand::CreateReg(0, false); |
1539 | if (!classifyLEAReg(MI, Src2, Opc, false, |
1540 | SrcReg2, isKill2, ImplicitOp2, LV)) |
1541 | return nullptr; |
1542 | |
1543 | MachineInstrBuilder MIB = BuildMI(MF, MI.getDebugLoc(), get(Opc)).add(Dest); |
1544 | if (ImplicitOp.getReg() != 0) |
1545 | MIB.add(ImplicitOp); |
1546 | if (ImplicitOp2.getReg() != 0) |
1547 | MIB.add(ImplicitOp2); |
1548 | |
1549 | NewMI = addRegReg(MIB, SrcReg, isKill, SrcReg2, isKill2); |
1550 | if (LV && Src2.isKill()) |
1551 | LV->replaceKillInstruction(SrcReg2, MI, *NewMI); |
1552 | break; |
1553 | } |
1554 | case X86::ADD8rr: |
1555 | case X86::ADD8rr_DB: |
1556 | Is8BitOp = true; |
1557 | LLVM_FALLTHROUGH; |
1558 | case X86::ADD16rr: |
1559 | case X86::ADD16rr_DB: |
1560 | return convertToThreeAddressWithLEA(MIOpc, MFI, MI, LV, Is8BitOp); |
1561 | case X86::ADD64ri32: |
1562 | case X86::ADD64ri8: |
1563 | case X86::ADD64ri32_DB: |
1564 | case X86::ADD64ri8_DB: |
1565 | assert(MI.getNumOperands() >= 3 && "Unknown add instruction!"); |
1566 | NewMI = addOffset( |
1567 | BuildMI(MF, MI.getDebugLoc(), get(X86::LEA64r)).add(Dest).add(Src), |
1568 | MI.getOperand(2)); |
1569 | break; |
1570 | case X86::ADD32ri: |
1571 | case X86::ADD32ri8: |
1572 | case X86::ADD32ri_DB: |
1573 | case X86::ADD32ri8_DB: { |
1574 | assert(MI.getNumOperands() >= 3 && "Unknown add instruction!"); |
1575 | unsigned Opc = Is64Bit ? X86::LEA64_32r : X86::LEA32r; |
1576 | |
1577 | bool isKill; |
1578 | Register SrcReg; |
1579 | MachineOperand ImplicitOp = MachineOperand::CreateReg(0, false); |
1580 | if (!classifyLEAReg(MI, Src, Opc, true, |
1581 | SrcReg, isKill, ImplicitOp, LV)) |
1582 | return nullptr; |
1583 | |
1584 | MachineInstrBuilder MIB = BuildMI(MF, MI.getDebugLoc(), get(Opc)) |
1585 | .add(Dest) |
1586 | .addReg(SrcReg, getKillRegState(isKill)); |
1587 | if (ImplicitOp.getReg() != 0) |
1588 | MIB.add(ImplicitOp); |
1589 | |
1590 | NewMI = addOffset(MIB, MI.getOperand(2)); |
1591 | break; |
1592 | } |
1593 | case X86::ADD8ri: |
1594 | case X86::ADD8ri_DB: |
1595 | Is8BitOp = true; |
1596 | LLVM_FALLTHROUGH; |
1597 | case X86::ADD16ri: |
1598 | case X86::ADD16ri8: |
1599 | case X86::ADD16ri_DB: |
1600 | case X86::ADD16ri8_DB: |
1601 | return convertToThreeAddressWithLEA(MIOpc, MFI, MI, LV, Is8BitOp); |
1602 | case X86::SUB8ri: |
1603 | case X86::SUB16ri8: |
1604 | case X86::SUB16ri: |
1605 | |
1606 | return nullptr; |
1607 | case X86::SUB32ri8: |
1608 | case X86::SUB32ri: { |
1609 | if (!MI.getOperand(2).isImm()) |
1610 | return nullptr; |
1611 | int64_t Imm = MI.getOperand(2).getImm(); |
1612 | if (!isInt<32>(-Imm)) |
1613 | return nullptr; |
1614 | |
1615 | assert(MI.getNumOperands() >= 3 && "Unknown add instruction!"); |
1616 | unsigned Opc = Is64Bit ? X86::LEA64_32r : X86::LEA32r; |
1617 | |
1618 | bool isKill; |
1619 | Register SrcReg; |
1620 | MachineOperand ImplicitOp = MachineOperand::CreateReg(0, false); |
1621 | if (!classifyLEAReg(MI, Src, Opc, true, |
1622 | SrcReg, isKill, ImplicitOp, LV)) |
1623 | return nullptr; |
1624 | |
1625 | MachineInstrBuilder MIB = BuildMI(MF, MI.getDebugLoc(), get(Opc)) |
1626 | .add(Dest) |
1627 | .addReg(SrcReg, getKillRegState(isKill)); |
1628 | if (ImplicitOp.getReg() != 0) |
1629 | MIB.add(ImplicitOp); |
1630 | |
1631 | NewMI = addOffset(MIB, -Imm); |
1632 | break; |
1633 | } |
1634 | |
1635 | case X86::SUB64ri8: |
1636 | case X86::SUB64ri32: { |
1637 | if (!MI.getOperand(2).isImm()) |
1638 | return nullptr; |
1639 | int64_t Imm = MI.getOperand(2).getImm(); |
1640 | if (!isInt<32>(-Imm)) |
1641 | return nullptr; |
1642 | |
1643 | assert(MI.getNumOperands() >= 3 && "Unknown sub instruction!"); |
1644 | |
1645 | MachineInstrBuilder MIB = BuildMI(MF, MI.getDebugLoc(), |
1646 | get(X86::LEA64r)).add(Dest).add(Src); |
1647 | NewMI = addOffset(MIB, -Imm); |
1648 | break; |
1649 | } |
1650 | |
1651 | case X86::VMOVDQU8Z128rmk: |
1652 | case X86::VMOVDQU8Z256rmk: |
1653 | case X86::VMOVDQU8Zrmk: |
1654 | case X86::VMOVDQU16Z128rmk: |
1655 | case X86::VMOVDQU16Z256rmk: |
1656 | case X86::VMOVDQU16Zrmk: |
1657 | case X86::VMOVDQU32Z128rmk: case X86::VMOVDQA32Z128rmk: |
1658 | case X86::VMOVDQU32Z256rmk: case X86::VMOVDQA32Z256rmk: |
1659 | case X86::VMOVDQU32Zrmk: case X86::VMOVDQA32Zrmk: |
1660 | case X86::VMOVDQU64Z128rmk: case X86::VMOVDQA64Z128rmk: |
1661 | case X86::VMOVDQU64Z256rmk: case X86::VMOVDQA64Z256rmk: |
1662 | case X86::VMOVDQU64Zrmk: case X86::VMOVDQA64Zrmk: |
1663 | case X86::VMOVUPDZ128rmk: case X86::VMOVAPDZ128rmk: |
1664 | case X86::VMOVUPDZ256rmk: case X86::VMOVAPDZ256rmk: |
1665 | case X86::VMOVUPDZrmk: case X86::VMOVAPDZrmk: |
1666 | case X86::VMOVUPSZ128rmk: case X86::VMOVAPSZ128rmk: |
1667 | case X86::VMOVUPSZ256rmk: case X86::VMOVAPSZ256rmk: |
1668 | case X86::VMOVUPSZrmk: case X86::VMOVAPSZrmk: |
1669 | case X86::VBROADCASTSDZ256rmk: |
1670 | case X86::VBROADCASTSDZrmk: |
1671 | case X86::VBROADCASTSSZ128rmk: |
1672 | case X86::VBROADCASTSSZ256rmk: |
1673 | case X86::VBROADCASTSSZrmk: |
1674 | case X86::VPBROADCASTDZ128rmk: |
1675 | case X86::VPBROADCASTDZ256rmk: |
1676 | case X86::VPBROADCASTDZrmk: |
1677 | case X86::VPBROADCASTQZ128rmk: |
1678 | case X86::VPBROADCASTQZ256rmk: |
1679 | case X86::VPBROADCASTQZrmk: { |
1680 | unsigned Opc; |
1681 | switch (MIOpc) { |
1682 | default: llvm_unreachable("Unreachable!"); |
1683 | case X86::VMOVDQU8Z128rmk: Opc = X86::VPBLENDMBZ128rmk; break; |
1684 | case X86::VMOVDQU8Z256rmk: Opc = X86::VPBLENDMBZ256rmk; break; |
1685 | case X86::VMOVDQU8Zrmk: Opc = X86::VPBLENDMBZrmk; break; |
1686 | case X86::VMOVDQU16Z128rmk: Opc = X86::VPBLENDMWZ128rmk; break; |
1687 | case X86::VMOVDQU16Z256rmk: Opc = X86::VPBLENDMWZ256rmk; break; |
1688 | case X86::VMOVDQU16Zrmk: Opc = X86::VPBLENDMWZrmk; break; |
1689 | case X86::VMOVDQU32Z128rmk: Opc = X86::VPBLENDMDZ128rmk; break; |
1690 | case X86::VMOVDQU32Z256rmk: Opc = X86::VPBLENDMDZ256rmk; break; |
1691 | case X86::VMOVDQU32Zrmk: Opc = X86::VPBLENDMDZrmk; break; |
1692 | case X86::VMOVDQU64Z128rmk: Opc = X86::VPBLENDMQZ128rmk; break; |
1693 | case X86::VMOVDQU64Z256rmk: Opc = X86::VPBLENDMQZ256rmk; break; |
1694 | case X86::VMOVDQU64Zrmk: Opc = X86::VPBLENDMQZrmk; break; |
1695 | case X86::VMOVUPDZ128rmk: Opc = X86::VBLENDMPDZ128rmk; break; |
1696 | case X86::VMOVUPDZ256rmk: Opc = X86::VBLENDMPDZ256rmk; break; |
1697 | case X86::VMOVUPDZrmk: Opc = X86::VBLENDMPDZrmk; break; |
1698 | case X86::VMOVUPSZ128rmk: Opc = X86::VBLENDMPSZ128rmk; break; |
1699 | case X86::VMOVUPSZ256rmk: Opc = X86::VBLENDMPSZ256rmk; break; |
1700 | case X86::VMOVUPSZrmk: Opc = X86::VBLENDMPSZrmk; break; |
1701 | case X86::VMOVDQA32Z128rmk: Opc = X86::VPBLENDMDZ128rmk; break; |
1702 | case X86::VMOVDQA32Z256rmk: Opc = X86::VPBLENDMDZ256rmk; break; |
1703 | case X86::VMOVDQA32Zrmk: Opc = X86::VPBLENDMDZrmk; break; |
1704 | case X86::VMOVDQA64Z128rmk: Opc = X86::VPBLENDMQZ128rmk; break; |
1705 | case X86::VMOVDQA64Z256rmk: Opc = X86::VPBLENDMQZ256rmk; break; |
1706 | case X86::VMOVDQA64Zrmk: Opc = X86::VPBLENDMQZrmk; break; |
1707 | case X86::VMOVAPDZ128rmk: Opc = X86::VBLENDMPDZ128rmk; break; |
1708 | case X86::VMOVAPDZ256rmk: Opc = X86::VBLENDMPDZ256rmk; break; |
1709 | case X86::VMOVAPDZrmk: Opc = X86::VBLENDMPDZrmk; break; |
1710 | case X86::VMOVAPSZ128rmk: Opc = X86::VBLENDMPSZ128rmk; break; |
1711 | case X86::VMOVAPSZ256rmk: Opc = X86::VBLENDMPSZ256rmk; break; |
1712 | case X86::VMOVAPSZrmk: Opc = X86::VBLENDMPSZrmk; break; |
1713 | case X86::VBROADCASTSDZ256rmk: Opc = X86::VBLENDMPDZ256rmbk; break; |
1714 | case X86::VBROADCASTSDZrmk: Opc = X86::VBLENDMPDZrmbk; break; |
1715 | case X86::VBROADCASTSSZ128rmk: Opc = X86::VBLENDMPSZ128rmbk; break; |
1716 | case X86::VBROADCASTSSZ256rmk: Opc = X86::VBLENDMPSZ256rmbk; break; |
1717 | case X86::VBROADCASTSSZrmk: Opc = X86::VBLENDMPSZrmbk; break; |
1718 | case X86::VPBROADCASTDZ128rmk: Opc = X86::VPBLENDMDZ128rmbk; break; |
1719 | case X86::VPBROADCASTDZ256rmk: Opc = X86::VPBLENDMDZ256rmbk; break; |
1720 | case X86::VPBROADCASTDZrmk: Opc = X86::VPBLENDMDZrmbk; break; |
1721 | case X86::VPBROADCASTQZ128rmk: Opc = X86::VPBLENDMQZ128rmbk; break; |
1722 | case X86::VPBROADCASTQZ256rmk: Opc = X86::VPBLENDMQZ256rmbk; break; |
1723 | case X86::VPBROADCASTQZrmk: Opc = X86::VPBLENDMQZrmbk; break; |
1724 | } |
1725 | |
1726 | NewMI = BuildMI(MF, MI.getDebugLoc(), get(Opc)) |
1727 | .add(Dest) |
1728 | .add(MI.getOperand(2)) |
1729 | .add(Src) |
1730 | .add(MI.getOperand(3)) |
1731 | .add(MI.getOperand(4)) |
1732 | .add(MI.getOperand(5)) |
1733 | .add(MI.getOperand(6)) |
1734 | .add(MI.getOperand(7)); |
1735 | break; |
1736 | } |
1737 | |
1738 | case X86::VMOVDQU8Z128rrk: |
1739 | case X86::VMOVDQU8Z256rrk: |
1740 | case X86::VMOVDQU8Zrrk: |
1741 | case X86::VMOVDQU16Z128rrk: |
1742 | case X86::VMOVDQU16Z256rrk: |
1743 | case X86::VMOVDQU16Zrrk: |
1744 | case X86::VMOVDQU32Z128rrk: case X86::VMOVDQA32Z128rrk: |
1745 | case X86::VMOVDQU32Z256rrk: case X86::VMOVDQA32Z256rrk: |
1746 | case X86::VMOVDQU32Zrrk: case X86::VMOVDQA32Zrrk: |
1747 | case X86::VMOVDQU64Z128rrk: case X86::VMOVDQA64Z128rrk: |
1748 | case X86::VMOVDQU64Z256rrk: case X86::VMOVDQA64Z256rrk: |
1749 | case X86::VMOVDQU64Zrrk: case X86::VMOVDQA64Zrrk: |
1750 | case X86::VMOVUPDZ128rrk: case X86::VMOVAPDZ128rrk: |
1751 | case X86::VMOVUPDZ256rrk: case X86::VMOVAPDZ256rrk: |
1752 | case X86::VMOVUPDZrrk: case X86::VMOVAPDZrrk: |
1753 | case X86::VMOVUPSZ128rrk: case X86::VMOVAPSZ128rrk: |
1754 | case X86::VMOVUPSZ256rrk: case X86::VMOVAPSZ256rrk: |
1755 | case X86::VMOVUPSZrrk: case X86::VMOVAPSZrrk: { |
1756 | unsigned Opc; |
1757 | switch (MIOpc) { |
1758 | default: llvm_unreachable("Unreachable!"); |
1759 | case X86::VMOVDQU8Z128rrk: Opc = X86::VPBLENDMBZ128rrk; break; |
1760 | case X86::VMOVDQU8Z256rrk: Opc = X86::VPBLENDMBZ256rrk; break; |
1761 | case X86::VMOVDQU8Zrrk: Opc = X86::VPBLENDMBZrrk; break; |
1762 | case X86::VMOVDQU16Z128rrk: Opc = X86::VPBLENDMWZ128rrk; break; |
1763 | case X86::VMOVDQU16Z256rrk: Opc = X86::VPBLENDMWZ256rrk; break; |
1764 | case X86::VMOVDQU16Zrrk: Opc = X86::VPBLENDMWZrrk; break; |
1765 | case X86::VMOVDQU32Z128rrk: Opc = X86::VPBLENDMDZ128rrk; break; |
1766 | case X86::VMOVDQU32Z256rrk: Opc = X86::VPBLENDMDZ256rrk; break; |
1767 | case X86::VMOVDQU32Zrrk: Opc = X86::VPBLENDMDZrrk; break; |
1768 | case X86::VMOVDQU64Z128rrk: Opc = X86::VPBLENDMQZ128rrk; break; |
1769 | case X86::VMOVDQU64Z256rrk: Opc = X86::VPBLENDMQZ256rrk; break; |
1770 | case X86::VMOVDQU64Zrrk: Opc = X86::VPBLENDMQZrrk; break; |
1771 | case X86::VMOVUPDZ128rrk: Opc = X86::VBLENDMPDZ128rrk; break; |
1772 | case X86::VMOVUPDZ256rrk: Opc = X86::VBLENDMPDZ256rrk; break; |
1773 | case X86::VMOVUPDZrrk: Opc = X86::VBLENDMPDZrrk; break; |
1774 | case X86::VMOVUPSZ128rrk: Opc = X86::VBLENDMPSZ128rrk; break; |
1775 | case X86::VMOVUPSZ256rrk: Opc = X86::VBLENDMPSZ256rrk; break; |
1776 | case X86::VMOVUPSZrrk: Opc = X86::VBLENDMPSZrrk; break; |
1777 | case X86::VMOVDQA32Z128rrk: Opc = X86::VPBLENDMDZ128rrk; break; |
1778 | case X86::VMOVDQA32Z256rrk: Opc = X86::VPBLENDMDZ256rrk; break; |
1779 | case X86::VMOVDQA32Zrrk: Opc = X86::VPBLENDMDZrrk; break; |
1780 | case X86::VMOVDQA64Z128rrk: Opc = X86::VPBLENDMQZ128rrk; break; |
1781 | case X86::VMOVDQA64Z256rrk: Opc = X86::VPBLENDMQZ256rrk; break; |
1782 | case X86::VMOVDQA64Zrrk: Opc = X86::VPBLENDMQZrrk; break; |
1783 | case X86::VMOVAPDZ128rrk: Opc = X86::VBLENDMPDZ128rrk; break; |
1784 | case X86::VMOVAPDZ256rrk: Opc = X86::VBLENDMPDZ256rrk; break; |
1785 | case X86::VMOVAPDZrrk: Opc = X86::VBLENDMPDZrrk; break; |
1786 | case X86::VMOVAPSZ128rrk: Opc = X86::VBLENDMPSZ128rrk; break; |
1787 | case X86::VMOVAPSZ256rrk: Opc = X86::VBLENDMPSZ256rrk; break; |
1788 | case X86::VMOVAPSZrrk: Opc = X86::VBLENDMPSZrrk; break; |
1789 | } |
1790 | |
1791 | NewMI = BuildMI(MF, MI.getDebugLoc(), get(Opc)) |
1792 | .add(Dest) |
1793 | .add(MI.getOperand(2)) |
1794 | .add(Src) |
1795 | .add(MI.getOperand(3)); |
1796 | break; |
1797 | } |
1798 | } |
1799 | |
1800 | if (!NewMI) return nullptr; |
1801 | |
1802 | if (LV) { |
1803 | if (Src.isKill()) |
1804 | LV->replaceKillInstruction(Src.getReg(), MI, *NewMI); |
1805 | if (Dest.isDead()) |
1806 | LV->replaceKillInstruction(Dest.getReg(), MI, *NewMI); |
1807 | } |
1808 | |
1809 | MFI->insert(MI.getIterator(), NewMI); |
1810 | return NewMI; |
1811 | } |
1812 | |
1813 | |
1814 | |
1815 | |
1816 | |
1817 | |
1818 | |
1819 | |
1820 | static unsigned getThreeSrcCommuteCase(uint64_t TSFlags, unsigned SrcOpIdx1, |
1821 | unsigned SrcOpIdx2) { |
1822 | |
1823 | if (SrcOpIdx1 > SrcOpIdx2) |
1824 | std::swap(SrcOpIdx1, SrcOpIdx2); |
1825 | |
1826 | unsigned Op1 = 1, Op2 = 2, Op3 = 3; |
1827 | if (X86II::isKMasked(TSFlags)) { |
1828 | Op2++; |
1829 | Op3++; |
1830 | } |
1831 | |
1832 | if (SrcOpIdx1 == Op1 && SrcOpIdx2 == Op2) |
1833 | return 0; |
1834 | if (SrcOpIdx1 == Op1 && SrcOpIdx2 == Op3) |
1835 | return 1; |
1836 | if (SrcOpIdx1 == Op2 && SrcOpIdx2 == Op3) |
1837 | return 2; |
1838 | llvm_unreachable("Unknown three src commute case."); |
1839 | } |
1840 | |
1841 | unsigned X86InstrInfo::getFMA3OpcodeToCommuteOperands( |
1842 | const MachineInstr &MI, unsigned SrcOpIdx1, unsigned SrcOpIdx2, |
1843 | const X86InstrFMA3Group &FMA3Group) const { |
1844 | |
1845 | unsigned Opc = MI.getOpcode(); |
1846 | |
1847 | |
1848 | |
1849 | |
1850 | |
1851 | |
1852 | |
1853 | assert(!(FMA3Group.isIntrinsic() && (SrcOpIdx1 == 1 || SrcOpIdx2 == 1)) && |
1854 | "Intrinsic instructions can't commute operand 1"); |
1855 | |
1856 | |
1857 | unsigned Case = getThreeSrcCommuteCase(MI.getDesc().TSFlags, SrcOpIdx1, |
1858 | SrcOpIdx2); |
1859 | assert(Case < 3 && "Unexpected case number!"); |
1860 | |
1861 | |
1862 | |
1863 | |
1864 | const unsigned Form132Index = 0; |
1865 | const unsigned Form213Index = 1; |
1866 | const unsigned Form231Index = 2; |
1867 | static const unsigned FormMapping[][3] = { |
1868 | |
1869 | |
1870 | |
1871 | |
1872 | { Form231Index, Form213Index, Form132Index }, |
1873 | |
1874 | |
1875 | |
1876 | |
1877 | { Form132Index, Form231Index, Form213Index }, |
1878 | |
1879 | |
1880 | |
1881 | |
1882 | { Form213Index, Form132Index, Form231Index } |
1883 | }; |
1884 | |
1885 | unsigned FMAForms[3]; |
1886 | FMAForms[0] = FMA3Group.get132Opcode(); |
1887 | FMAForms[1] = FMA3Group.get213Opcode(); |
1888 | FMAForms[2] = FMA3Group.get231Opcode(); |
1889 | unsigned FormIndex; |
1890 | for (FormIndex = 0; FormIndex < 3; FormIndex++) |
1891 | if (Opc == FMAForms[FormIndex]) |
1892 | break; |
1893 | |
1894 | |
1895 | FormIndex = FormMapping[Case][FormIndex]; |
1896 | return FMAForms[FormIndex]; |
1897 | } |
1898 | |
1899 | static void commuteVPTERNLOG(MachineInstr &MI, unsigned SrcOpIdx1, |
1900 | unsigned SrcOpIdx2) { |
1901 | |
1902 | unsigned Case = getThreeSrcCommuteCase(MI.getDesc().TSFlags, SrcOpIdx1, |
1903 | SrcOpIdx2); |
1904 | assert(Case < 3 && "Unexpected case value!"); |
1905 | |
1906 | |
1907 | static const uint8_t SwapMasks[3][4] = { |
1908 | { 0x04, 0x10, 0x08, 0x20 }, |
1909 | { 0x02, 0x10, 0x08, 0x40 }, |
1910 | { 0x02, 0x04, 0x20, 0x40 }, |
1911 | }; |
1912 | |
1913 | uint8_t Imm = MI.getOperand(MI.getNumOperands()-1).getImm(); |
1914 | |
1915 | uint8_t NewImm = Imm & ~(SwapMasks[Case][0] | SwapMasks[Case][1] | |
1916 | SwapMasks[Case][2] | SwapMasks[Case][3]); |
1917 | |
1918 | if (Imm & SwapMasks[Case][0]) NewImm |= SwapMasks[Case][1]; |
1919 | if (Imm & SwapMasks[Case][1]) NewImm |= SwapMasks[Case][0]; |
1920 | if (Imm & SwapMasks[Case][2]) NewImm |= SwapMasks[Case][3]; |
1921 | if (Imm & SwapMasks[Case][3]) NewImm |= SwapMasks[Case][2]; |
1922 | MI.getOperand(MI.getNumOperands()-1).setImm(NewImm); |
1923 | } |
1924 | |
1925 | |
1926 | |
1927 | static bool isCommutableVPERMV3Instruction(unsigned Opcode) { |
1928 | #define VPERM_CASES(Suffix) \ |
1929 | case X86::VPERMI2##Suffix##128rr: case X86::VPERMT2##Suffix##128rr: \ |
1930 | case X86::VPERMI2##Suffix##256rr: case X86::VPERMT2##Suffix##256rr: \ |
1931 | case X86::VPERMI2##Suffix##rr: case X86::VPERMT2##Suffix##rr: \ |
1932 | case X86::VPERMI2##Suffix##128rm: case X86::VPERMT2##Suffix##128rm: \ |
1933 | case X86::VPERMI2##Suffix##256rm: case X86::VPERMT2##Suffix##256rm: \ |
1934 | case X86::VPERMI2##Suffix##rm: case X86::VPERMT2##Suffix##rm: \ |
1935 | case X86::VPERMI2##Suffix##128rrkz: case X86::VPERMT2##Suffix##128rrkz: \ |
1936 | case X86::VPERMI2##Suffix##256rrkz: case X86::VPERMT2##Suffix##256rrkz: \ |
1937 | case X86::VPERMI2##Suffix##rrkz: case X86::VPERMT2##Suffix##rrkz: \ |
1938 | case X86::VPERMI2##Suffix##128rmkz: case X86::VPERMT2##Suffix##128rmkz: \ |
1939 | case X86::VPERMI2##Suffix##256rmkz: case X86::VPERMT2##Suffix##256rmkz: \ |
1940 | case X86::VPERMI2##Suffix##rmkz: case X86::VPERMT2##Suffix##rmkz: |
1941 | |
1942 | #define VPERM_CASES_BROADCAST(Suffix) \ |
1943 | VPERM_CASES(Suffix) \ |
1944 | case X86::VPERMI2##Suffix##128rmb: case X86::VPERMT2##Suffix##128rmb: \ |
1945 | case X86::VPERMI2##Suffix##256rmb: case X86::VPERMT2##Suffix##256rmb: \ |
1946 | case X86::VPERMI2##Suffix##rmb: case X86::VPERMT2##Suffix##rmb: \ |
1947 | case X86::VPERMI2##Suffix##128rmbkz: case X86::VPERMT2##Suffix##128rmbkz: \ |
1948 | case X86::VPERMI2##Suffix##256rmbkz: case X86::VPERMT2##Suffix##256rmbkz: \ |
1949 | case X86::VPERMI2##Suffix##rmbkz: case X86::VPERMT2##Suffix##rmbkz: |
1950 | |
1951 | switch (Opcode) { |
1952 | default: return false; |
1953 | VPERM_CASES(B) |
1954 | VPERM_CASES_BROADCAST(D) |
1955 | VPERM_CASES_BROADCAST(PD) |
1956 | VPERM_CASES_BROADCAST(PS) |
1957 | VPERM_CASES_BROADCAST(Q) |
1958 | VPERM_CASES(W) |
1959 | return true; |
1960 | } |
1961 | #undef VPERM_CASES_BROADCAST |
1962 | #undef VPERM_CASES |
1963 | } |
1964 | |
1965 | |
1966 | |
1967 | static unsigned getCommutedVPERMV3Opcode(unsigned Opcode) { |
1968 | #define VPERM_CASES(Orig, New) \ |
1969 | case X86::Orig##128rr: return X86::New##128rr; \ |
1970 | case X86::Orig##128rrkz: return X86::New##128rrkz; \ |
1971 | case X86::Orig##128rm: return X86::New##128rm; \ |
1972 | case X86::Orig##128rmkz: return X86::New##128rmkz; \ |
1973 | case X86::Orig##256rr: return X86::New##256rr; \ |
1974 | case X86::Orig##256rrkz: return X86::New##256rrkz; \ |
1975 | case X86::Orig##256rm: return X86::New##256rm; \ |
1976 | case X86::Orig##256rmkz: return X86::New##256rmkz; \ |
1977 | case X86::Orig##rr: return X86::New##rr; \ |
1978 | case X86::Orig##rrkz: return X86::New##rrkz; \ |
1979 | case X86::Orig##rm: return X86::New##rm; \ |
1980 | case X86::Orig##rmkz: return X86::New##rmkz; |
1981 | |
1982 | #define VPERM_CASES_BROADCAST(Orig, New) \ |
1983 | VPERM_CASES(Orig, New) \ |
1984 | case X86::Orig##128rmb: return X86::New##128rmb; \ |
1985 | case X86::Orig##128rmbkz: return X86::New##128rmbkz; \ |
1986 | case X86::Orig##256rmb: return X86::New##256rmb; \ |
1987 | case X86::Orig##256rmbkz: return X86::New##256rmbkz; \ |
1988 | case X86::Orig##rmb: return X86::New##rmb; \ |
1989 | case X86::Orig##rmbkz: return X86::New##rmbkz; |
1990 | |
1991 | switch (Opcode) { |
1992 | VPERM_CASES(VPERMI2B, VPERMT2B) |
1993 | VPERM_CASES_BROADCAST(VPERMI2D, VPERMT2D) |
1994 | VPERM_CASES_BROADCAST(VPERMI2PD, VPERMT2PD) |
1995 | VPERM_CASES_BROADCAST(VPERMI2PS, VPERMT2PS) |
1996 | VPERM_CASES_BROADCAST(VPERMI2Q, VPERMT2Q) |
1997 | VPERM_CASES(VPERMI2W, VPERMT2W) |
1998 | VPERM_CASES(VPERMT2B, VPERMI2B) |
1999 | VPERM_CASES_BROADCAST(VPERMT2D, VPERMI2D) |
2000 | VPERM_CASES_BROADCAST(VPERMT2PD, VPERMI2PD) |
2001 | VPERM_CASES_BROADCAST(VPERMT2PS, VPERMI2PS) |
2002 | VPERM_CASES_BROADCAST(VPERMT2Q, VPERMI2Q) |
2003 | VPERM_CASES(VPERMT2W, VPERMI2W) |
2004 | } |
2005 | |
2006 | llvm_unreachable("Unreachable!"); |
2007 | #undef VPERM_CASES_BROADCAST |
2008 | #undef VPERM_CASES |
2009 | } |
2010 | |
2011 | MachineInstr *X86InstrInfo::commuteInstructionImpl(MachineInstr &MI, bool NewMI, |
2012 | unsigned OpIdx1, |
2013 | unsigned OpIdx2) const { |
2014 | auto cloneIfNew = [NewMI](MachineInstr &MI) -> MachineInstr & { |
2015 | if (NewMI) |
2016 | return *MI.getParent()->getParent()->CloneMachineInstr(&MI); |
2017 | return MI; |
2018 | }; |
2019 | |
2020 | switch (MI.getOpcode()) { |
2021 | case X86::SHRD16rri8: |
2022 | case X86::SHLD16rri8: |
2023 | case X86::SHRD32rri8: |
2024 | case X86::SHLD32rri8: |
2025 | case X86::SHRD64rri8: |
2026 | case X86::SHLD64rri8:{ |
2027 | unsigned Opc; |
2028 | unsigned Size; |
2029 | switch (MI.getOpcode()) { |
2030 | default: llvm_unreachable("Unreachable!"); |
2031 | case X86::SHRD16rri8: Size = 16; Opc = X86::SHLD16rri8; break; |
2032 | case X86::SHLD16rri8: Size = 16; Opc = X86::SHRD16rri8; break; |
2033 | case X86::SHRD32rri8: Size = 32; Opc = X86::SHLD32rri8; break; |
2034 | case X86::SHLD32rri8: Size = 32; Opc = X86::SHRD32rri8; break; |
2035 | case X86::SHRD64rri8: Size = 64; Opc = X86::SHLD64rri8; break; |
2036 | case X86::SHLD64rri8: Size = 64; Opc = X86::SHRD64rri8; break; |
2037 | } |
2038 | unsigned Amt = MI.getOperand(3).getImm(); |
2039 | auto &WorkingMI = cloneIfNew(MI); |
2040 | WorkingMI.setDesc(get(Opc)); |
2041 | WorkingMI.getOperand(3).setImm(Size - Amt); |
2042 | return TargetInstrInfo::commuteInstructionImpl(WorkingMI, false, |
2043 | OpIdx1, OpIdx2); |
2044 | } |
2045 | case X86::PFSUBrr: |
2046 | case X86::PFSUBRrr: { |
2047 | |
2048 | |
2049 | unsigned Opc = |
2050 | (X86::PFSUBRrr == MI.getOpcode() ? X86::PFSUBrr : X86::PFSUBRrr); |
2051 | auto &WorkingMI = cloneIfNew(MI); |
2052 | WorkingMI.setDesc(get(Opc)); |
2053 | return TargetInstrInfo::commuteInstructionImpl(WorkingMI, false, |
2054 | OpIdx1, OpIdx2); |
2055 | } |
2056 | case X86::BLENDPDrri: |
2057 | case X86::BLENDPSrri: |
2058 | case X86::VBLENDPDrri: |
2059 | case X86::VBLENDPSrri: |
2060 | |
2061 | if (MI.getParent()->getParent()->getFunction().hasOptSize()) { |
2062 | unsigned Mask, Opc; |
2063 | switch (MI.getOpcode()) { |
2064 | default: llvm_unreachable("Unreachable!"); |
2065 | case X86::BLENDPDrri: Opc = X86::MOVSDrr; Mask = 0x03; break; |
2066 | case X86::BLENDPSrri: Opc = X86::MOVSSrr; Mask = 0x0F; break; |
2067 | case X86::VBLENDPDrri: Opc = X86::VMOVSDrr; Mask = 0x03; break; |
2068 | case X86::VBLENDPSrri: Opc = X86::VMOVSSrr; Mask = 0x0F; break; |
2069 | } |
2070 | if ((MI.getOperand(3).getImm() ^ Mask) == 1) { |
2071 | auto &WorkingMI = cloneIfNew(MI); |
2072 | WorkingMI.setDesc(get(Opc)); |
2073 | WorkingMI.RemoveOperand(3); |
2074 | return TargetInstrInfo::commuteInstructionImpl(WorkingMI, |
2075 | false, |
2076 | OpIdx1, OpIdx2); |
2077 | } |
2078 | } |
2079 | LLVM_FALLTHROUGH; |
2080 | case X86::PBLENDWrri: |
2081 | case X86::VBLENDPDYrri: |
2082 | case X86::VBLENDPSYrri: |
2083 | case X86::VPBLENDDrri: |
2084 | case X86::VPBLENDWrri: |
2085 | case X86::VPBLENDDYrri: |
2086 | case X86::VPBLENDWYrri:{ |
2087 | int8_t Mask; |
2088 | switch (MI.getOpcode()) { |
2089 | default: llvm_unreachable("Unreachable!"); |
2090 | case X86::BLENDPDrri: Mask = (int8_t)0x03; break; |
2091 | case X86::BLENDPSrri: Mask = (int8_t)0x0F; break; |
2092 | case X86::PBLENDWrri: Mask = (int8_t)0xFF; break; |
2093 | case X86::VBLENDPDrri: Mask = (int8_t)0x03; break; |
2094 | case X86::VBLENDPSrri: Mask = (int8_t)0x0F; break; |
2095 | case X86::VBLENDPDYrri: Mask = (int8_t)0x0F; break; |
2096 | case X86::VBLENDPSYrri: Mask = (int8_t)0xFF; break; |
2097 | case X86::VPBLENDDrri: Mask = (int8_t)0x0F; break; |
2098 | case X86::VPBLENDWrri: Mask = (int8_t)0xFF; break; |
2099 | case X86::VPBLENDDYrri: Mask = (int8_t)0xFF; break; |
2100 | case X86::VPBLENDWYrri: Mask = (int8_t)0xFF; break; |
2101 | } |
2102 | |
2103 | |
2104 | |
2105 | int8_t Imm = MI.getOperand(3).getImm() & Mask; |
2106 | auto &WorkingMI = cloneIfNew(MI); |
2107 | WorkingMI.getOperand(3).setImm(Mask ^ Imm); |
2108 | return TargetInstrInfo::commuteInstructionImpl(WorkingMI, false, |
2109 | OpIdx1, OpIdx2); |
2110 | } |
2111 | case X86::INSERTPSrr: |
2112 | case X86::VINSERTPSrr: |
2113 | case X86::VINSERTPSZrr: { |
2114 | unsigned Imm = MI.getOperand(MI.getNumOperands() - 1).getImm(); |
2115 | unsigned ZMask = Imm & 15; |
2116 | unsigned DstIdx = (Imm >> 4) & 3; |
2117 | unsigned SrcIdx = (Imm >> 6) & 3; |
2118 | |
2119 | |
2120 | |
2121 | if (DstIdx == SrcIdx && (ZMask & (1 << DstIdx)) == 0 && |
2122 | countPopulation(ZMask) == 2) { |
2123 | unsigned AltIdx = findFirstSet((ZMask | (1 << DstIdx)) ^ 15); |
2124 | assert(AltIdx < 4 && "Illegal insertion index"); |
2125 | unsigned AltImm = (AltIdx << 6) | (AltIdx << 4) | ZMask; |
2126 | auto &WorkingMI = cloneIfNew(MI); |
2127 | WorkingMI.getOperand(MI.getNumOperands() - 1).setImm(AltImm); |
2128 | return TargetInstrInfo::commuteInstructionImpl(WorkingMI, false, |
2129 | OpIdx1, OpIdx2); |
2130 | } |
2131 | return nullptr; |
2132 | } |
2133 | case X86::MOVSDrr: |
2134 | case X86::MOVSSrr: |
2135 | case X86::VMOVSDrr: |
2136 | case X86::VMOVSSrr:{ |
2137 | |
2138 | if (Subtarget.hasSSE41()) { |
2139 | unsigned Mask, Opc; |
2140 | switch (MI.getOpcode()) { |
2141 | default: llvm_unreachable("Unreachable!"); |
2142 | case X86::MOVSDrr: Opc = X86::BLENDPDrri; Mask = 0x02; break; |
2143 | case X86::MOVSSrr: Opc = X86::BLENDPSrri; Mask = 0x0E; break; |
2144 | case X86::VMOVSDrr: Opc = X86::VBLENDPDrri; Mask = 0x02; break; |
2145 | case X86::VMOVSSrr: Opc = X86::VBLENDPSrri; Mask = 0x0E; break; |
2146 | } |
2147 | |
2148 | auto &WorkingMI = cloneIfNew(MI); |
2149 | WorkingMI.setDesc(get(Opc)); |
2150 | WorkingMI.addOperand(MachineOperand::CreateImm(Mask)); |
2151 | return TargetInstrInfo::commuteInstructionImpl(WorkingMI, false, |
2152 | OpIdx1, OpIdx2); |
2153 | } |
2154 | |
2155 | |
2156 | assert(MI.getOpcode() == X86::MOVSDrr && |
2157 | "Can only commute MOVSDrr without SSE4.1"); |
2158 | |
2159 | auto &WorkingMI = cloneIfNew(MI); |
2160 | WorkingMI.setDesc(get(X86::SHUFPDrri)); |
2161 | WorkingMI.addOperand(MachineOperand::CreateImm(0x02)); |
2162 | return TargetInstrInfo::commuteInstructionImpl(WorkingMI, false, |
2163 | OpIdx1, OpIdx2); |
2164 | } |
2165 | case X86::SHUFPDrri: { |
2166 | |
2167 | assert(MI.getOperand(3).getImm() == 0x02 && "Unexpected immediate!"); |
2168 | auto &WorkingMI = cloneIfNew(MI); |
2169 | WorkingMI.setDesc(get(X86::MOVSDrr)); |
2170 | WorkingMI.RemoveOperand(3); |
2171 | return TargetInstrInfo::commuteInstructionImpl(WorkingMI, false, |
2172 | OpIdx1, OpIdx2); |
2173 | } |
2174 | case X86::PCLMULQDQrr: |
2175 | case X86::VPCLMULQDQrr: |
2176 | case X86::VPCLMULQDQYrr: |
2177 | case X86::VPCLMULQDQZrr: |
2178 | case X86::VPCLMULQDQZ128rr: |
2179 | case X86::VPCLMULQDQZ256rr: { |
2180 | |
2181 | |
2182 | unsigned Imm = MI.getOperand(3).getImm(); |
2183 | unsigned Src1Hi = Imm & 0x01; |
2184 | unsigned Src2Hi = Imm & 0x10; |
2185 | auto &WorkingMI = cloneIfNew(MI); |
2186 | WorkingMI.getOperand(3).setImm((Src1Hi << 4) | (Src2Hi >> 4)); |
2187 | return TargetInstrInfo::commuteInstructionImpl(WorkingMI, false, |
2188 | OpIdx1, OpIdx2); |
2189 | } |
2190 | case X86::VPCMPBZ128rri: case X86::VPCMPUBZ128rri: |
2191 | case X86::VPCMPBZ256rri: case X86::VPCMPUBZ256rri: |
2192 | case X86::VPCMPBZrri: case X86::VPCMPUBZrri: |
2193 | case X86::VPCMPDZ128rri: case X86::VPCMPUDZ128rri: |
2194 | case X86::VPCMPDZ256rri: case X86::VPCMPUDZ256rri: |
2195 | case X86::VPCMPDZrri: case X86::VPCMPUDZrri: |
2196 | case X86::VPCMPQZ128rri: case X86::VPCMPUQZ128rri: |
2197 | case X86::VPCMPQZ256rri: case X86::VPCMPUQZ256rri: |
2198 | case X86::VPCMPQZrri: case X86::VPCMPUQZrri: |
2199 | case X86::VPCMPWZ128rri: case X86::VPCMPUWZ128rri: |
2200 | case X86::VPCMPWZ256rri: case X86::VPCMPUWZ256rri: |
2201 | case X86::VPCMPWZrri: case X86::VPCMPUWZrri: |
2202 | case X86::VPCMPBZ128rrik: case X86::VPCMPUBZ128rrik: |
2203 | case X86::VPCMPBZ256rrik: case X86::VPCMPUBZ256rrik: |
2204 | case X86::VPCMPBZrrik: case X86::VPCMPUBZrrik: |
2205 | case X86::VPCMPDZ128rrik: case X86::VPCMPUDZ128rrik: |
2206 | case X86::VPCMPDZ256rrik: case X86::VPCMPUDZ256rrik: |
2207 | case X86::VPCMPDZrrik: case X86::VPCMPUDZrrik: |
2208 | case X86::VPCMPQZ128rrik: case X86::VPCMPUQZ128rrik: |
2209 | case X86::VPCMPQZ256rrik: case X86::VPCMPUQZ256rrik: |
2210 | case X86::VPCMPQZrrik: case X86::VPCMPUQZrrik: |
2211 | case X86::VPCMPWZ128rrik: case X86::VPCMPUWZ128rrik: |
2212 | case X86::VPCMPWZ256rrik: case X86::VPCMPUWZ256rrik: |
2213 | case X86::VPCMPWZrrik: case X86::VPCMPUWZrrik: { |
2214 | |
2215 | unsigned Imm = MI.getOperand(MI.getNumOperands() - 1).getImm() & 0x7; |
2216 | Imm = X86::getSwappedVPCMPImm(Imm); |
2217 | auto &WorkingMI = cloneIfNew(MI); |
2218 | WorkingMI.getOperand(MI.getNumOperands() - 1).setImm(Imm); |
2219 | return TargetInstrInfo::commuteInstructionImpl(WorkingMI, false, |
2220 | OpIdx1, OpIdx2); |
2221 | } |
2222 | case X86::VPCOMBri: case X86::VPCOMUBri: |
2223 | case X86::VPCOMDri: case X86::VPCOMUDri: |
2224 | case X86::VPCOMQri: case X86::VPCOMUQri: |
2225 | case X86::VPCOMWri: case X86::VPCOMUWri: { |
2226 | |
2227 | unsigned Imm = MI.getOperand(3).getImm() & 0x7; |
2228 | Imm = X86::getSwappedVPCOMImm(Imm); |
2229 | auto &WorkingMI = cloneIfNew(MI); |
2230 | WorkingMI.getOperand(3).setImm(Imm); |
2231 | return TargetInstrInfo::commuteInstructionImpl(WorkingMI, false, |
2232 | OpIdx1, OpIdx2); |
2233 | } |
2234 | case X86::VCMPSDZrr: |
2235 | case X86::VCMPSSZrr: |
2236 | case X86::VCMPPDZrri: |
2237 | case X86::VCMPPSZrri: |
2238 | case X86::VCMPPDZ128rri: |
2239 | case X86::VCMPPSZ128rri: |
2240 | case X86::VCMPPDZ256rri: |
2241 | case X86::VCMPPSZ256rri: |
2242 | case X86::VCMPPDZrrik: |
2243 | case X86::VCMPPSZrrik: |
2244 | case X86::VCMPPDZ128rrik: |
2245 | case X86::VCMPPSZ128rrik: |
2246 | case X86::VCMPPDZ256rrik: |
2247 | case X86::VCMPPSZ256rrik: { |
2248 | unsigned Imm = |
2249 | MI.getOperand(MI.getNumExplicitOperands() - 1).getImm() & 0x1f; |
2250 | Imm = X86::getSwappedVCMPImm(Imm); |
2251 | auto &WorkingMI = cloneIfNew(MI); |
2252 | WorkingMI.getOperand(MI.getNumExplicitOperands() - 1).setImm(Imm); |
2253 | return TargetInstrInfo::commuteInstructionImpl(WorkingMI, false, |
2254 | OpIdx1, OpIdx2); |
2255 | } |
2256 | case X86::VPERM2F128rr: |
2257 | case X86::VPERM2I128rr: { |
2258 | |
2259 | |
2260 | |
2261 | int8_t Imm = MI.getOperand(3).getImm() & 0xFF; |
2262 | auto &WorkingMI = cloneIfNew(MI); |
2263 | WorkingMI.getOperand(3).setImm(Imm ^ 0x22); |
2264 | return TargetInstrInfo::commuteInstructionImpl(WorkingMI, false, |
2265 | OpIdx1, OpIdx2); |
2266 | } |
2267 | case X86::MOVHLPSrr: |
2268 | case X86::UNPCKHPDrr: |
2269 | case X86::VMOVHLPSrr: |
2270 | case X86::VUNPCKHPDrr: |
2271 | case X86::VMOVHLPSZrr: |
2272 | case X86::VUNPCKHPDZ128rr: { |
2273 | assert(Subtarget.hasSSE2() && "Commuting MOVHLP/UNPCKHPD requires SSE2!"); |
2274 | |
2275 | unsigned Opc = MI.getOpcode(); |
2276 | switch (Opc) { |
2277 | default: llvm_unreachable("Unreachable!"); |
2278 | case X86::MOVHLPSrr: Opc = X86::UNPCKHPDrr; break; |
2279 | case X86::UNPCKHPDrr: Opc = X86::MOVHLPSrr; break; |
2280 | case X86::VMOVHLPSrr: Opc = X86::VUNPCKHPDrr; break; |
2281 | case X86::VUNPCKHPDrr: Opc = X86::VMOVHLPSrr; break; |
2282 | case X86::VMOVHLPSZrr: Opc = X86::VUNPCKHPDZ128rr; break; |
2283 | case X86::VUNPCKHPDZ128rr: Opc = X86::VMOVHLPSZrr; break; |
2284 | } |
2285 | auto &WorkingMI = cloneIfNew(MI); |
2286 | WorkingMI.setDesc(get(Opc)); |
2287 | return TargetInstrInfo::commuteInstructionImpl(WorkingMI, false, |
2288 | OpIdx1, OpIdx2); |
2289 | } |
2290 | case X86::CMOV16rr: case X86::CMOV32rr: case X86::CMOV64rr: { |
2291 | auto &WorkingMI = cloneIfNew(MI); |
2292 | unsigned OpNo = MI.getDesc().getNumOperands() - 1; |
2293 | X86::CondCode CC = static_cast<X86::CondCode>(MI.getOperand(OpNo).getImm()); |
2294 | WorkingMI.getOperand(OpNo).setImm(X86::GetOppositeBranchCondition(CC)); |
2295 | return TargetInstrInfo::commuteInstructionImpl(WorkingMI, false, |
2296 | OpIdx1, OpIdx2); |
2297 | } |
2298 | case X86::VPTERNLOGDZrri: case X86::VPTERNLOGDZrmi: |
2299 | case X86::VPTERNLOGDZ128rri: case X86::VPTERNLOGDZ128rmi: |
2300 | case X86::VPTERNLOGDZ256rri: case X86::VPTERNLOGDZ256rmi: |
2301 | case X86::VPTERNLOGQZrri: case X86::VPTERNLOGQZrmi: |
2302 | case X86::VPTERNLOGQZ128rri: case X86::VPTERNLOGQZ128rmi: |
2303 | case X86::VPTERNLOGQZ256rri: case X86::VPTERNLOGQZ256rmi: |
2304 | case X86::VPTERNLOGDZrrik: |
2305 | case X86::VPTERNLOGDZ128rrik: |
2306 | case X86::VPTERNLOGDZ256rrik: |
2307 | case X86::VPTERNLOGQZrrik: |
2308 | case X86::VPTERNLOGQZ128rrik: |
2309 | case X86::VPTERNLOGQZ256rrik: |
2310 | case X86::VPTERNLOGDZrrikz: case X86::VPTERNLOGDZrmikz: |
2311 | case X86::VPTERNLOGDZ128rrikz: case X86::VPTERNLOGDZ128rmikz: |
2312 | case X86::VPTERNLOGDZ256rrikz: case X86::VPTERNLOGDZ256rmikz: |
2313 | case X86::VPTERNLOGQZrrikz: case X86::VPTERNLOGQZrmikz: |
2314 | case X86::VPTERNLOGQZ128rrikz: case X86::VPTERNLOGQZ128rmikz: |
2315 | case X86::VPTERNLOGQZ256rrikz: case X86::VPTERNLOGQZ256rmikz: |
2316 | case X86::VPTERNLOGDZ128rmbi: |
2317 | case X86::VPTERNLOGDZ256rmbi: |
2318 | case X86::VPTERNLOGDZrmbi: |
2319 | case X86::VPTERNLOGQZ128rmbi: |
2320 | case X86::VPTERNLOGQZ256rmbi: |
2321 | case X86::VPTERNLOGQZrmbi: |
2322 | case X86::VPTERNLOGDZ128rmbikz: |
2323 | case X86::VPTERNLOGDZ256rmbikz: |
2324 | case X86::VPTERNLOGDZrmbikz: |
2325 | case X86::VPTERNLOGQZ128rmbikz: |
2326 | case X86::VPTERNLOGQZ256rmbikz: |
2327 | case X86::VPTERNLOGQZrmbikz: { |
2328 | auto &WorkingMI = cloneIfNew(MI); |
2329 | commuteVPTERNLOG(WorkingMI, OpIdx1, OpIdx2); |
2330 | return TargetInstrInfo::commuteInstructionImpl(WorkingMI, false, |
2331 | OpIdx1, OpIdx2); |
2332 | } |
2333 | default: { |
2334 | if (isCommutableVPERMV3Instruction(MI.getOpcode())) { |
2335 | unsigned Opc = getCommutedVPERMV3Opcode(MI.getOpcode()); |
2336 | auto &WorkingMI = cloneIfNew(MI); |
2337 | WorkingMI.setDesc(get(Opc)); |
2338 | return TargetInstrInfo::commuteInstructionImpl(WorkingMI, false, |
2339 | OpIdx1, OpIdx2); |
2340 | } |
2341 | |
2342 | const X86InstrFMA3Group *FMA3Group = getFMA3Group(MI.getOpcode(), |
2343 | MI.getDesc().TSFlags); |
2344 | if (FMA3Group) { |
2345 | unsigned Opc = |
2346 | getFMA3OpcodeToCommuteOperands(MI, OpIdx1, OpIdx2, *FMA3Group); |
2347 | auto &WorkingMI = cloneIfNew(MI); |
2348 | WorkingMI.setDesc(get(Opc)); |
2349 | return TargetInstrInfo::commuteInstructionImpl(WorkingMI, false, |
2350 | OpIdx1, OpIdx2); |
2351 | } |
2352 | |
2353 | return TargetInstrInfo::commuteInstructionImpl(MI, NewMI, OpIdx1, OpIdx2); |
2354 | } |
2355 | } |
2356 | } |
2357 | |
2358 | bool |
2359 | X86InstrInfo::findThreeSrcCommutedOpIndices(const MachineInstr &MI, |
2360 | unsigned &SrcOpIdx1, |
2361 | unsigned &SrcOpIdx2, |
2362 | bool IsIntrinsic) const { |
2363 | uint64_t TSFlags = MI.getDesc().TSFlags; |
2364 | |
2365 | unsigned FirstCommutableVecOp = 1; |
2366 | unsigned LastCommutableVecOp = 3; |
2367 | unsigned KMaskOp = -1U; |
2368 | if (X86II::isKMasked(TSFlags)) { |
2369 | |
2370 | |
2371 | |
2372 | |
2373 | |
2374 | |
2375 | |
2376 | |
2377 | |
2378 | |
2379 | |
2380 | |
2381 | |
2382 | |
2383 | |
2384 | |
2385 | KMaskOp = 2; |
2386 | |
2387 | |
2388 | |
2389 | if (X86II::isKMergeMasked(TSFlags) || IsIntrinsic) |
2390 | FirstCommutableVecOp = 3; |
2391 | |
2392 | LastCommutableVecOp++; |
2393 | } else if (IsIntrinsic) { |
2394 | |
2395 | |
2396 | FirstCommutableVecOp = 2; |
2397 | } |
2398 | |
2399 | if (isMem(MI, LastCommutableVecOp)) |
2400 | LastCommutableVecOp--; |
2401 | |
2402 | |
2403 | |
2404 | |
2405 | if (SrcOpIdx1 != CommuteAnyOperandIndex && |
2406 | (SrcOpIdx1 < FirstCommutableVecOp || SrcOpIdx1 > LastCommutableVecOp || |
2407 | SrcOpIdx1 == KMaskOp)) |
2408 | return false; |
2409 | if (SrcOpIdx2 != CommuteAnyOperandIndex && |
2410 | (SrcOpIdx2 < FirstCommutableVecOp || SrcOpIdx2 > LastCommutableVecOp || |
2411 | SrcOpIdx2 == KMaskOp)) |
2412 | return false; |
2413 | |
2414 | |
2415 | |
2416 | if (SrcOpIdx1 == CommuteAnyOperandIndex || |
2417 | SrcOpIdx2 == CommuteAnyOperandIndex) { |
2418 | unsigned CommutableOpIdx2 = SrcOpIdx2; |
2419 | |
2420 | |
2421 | |
2422 | if (SrcOpIdx1 == SrcOpIdx2) |
2423 | |
2424 | |
2425 | CommutableOpIdx2 = LastCommutableVecOp; |
2426 | else if (SrcOpIdx2 == CommuteAnyOperandIndex) |
2427 | |
2428 | CommutableOpIdx2 = SrcOpIdx1; |
2429 | |
2430 | |
2431 | |
2432 | Register Op2Reg = MI.getOperand(CommutableOpIdx2).getReg(); |
2433 | |
2434 | unsigned CommutableOpIdx1; |
2435 | for (CommutableOpIdx1 = LastCommutableVecOp; |
2436 | CommutableOpIdx1 >= FirstCommutableVecOp; CommutableOpIdx1--) { |
2437 | |
2438 | if (CommutableOpIdx1 == KMaskOp) |
2439 | continue; |
2440 | |
2441 | |
2442 | |
2443 | |
2444 | if (Op2Reg != MI.getOperand(CommutableOpIdx1).getReg()) |
2445 | break; |
2446 | } |
2447 | |
2448 | |
2449 | if (CommutableOpIdx1 < FirstCommutableVecOp) |
2450 | return false; |
2451 | |
2452 | |
2453 | |
2454 | if (!fixCommutedOpIndices(SrcOpIdx1, SrcOpIdx2, |
2455 | CommutableOpIdx1, CommutableOpIdx2)) |
2456 | return false; |
2457 | } |
2458 | |
2459 | return true; |
2460 | } |
2461 | |
2462 | bool X86InstrInfo::findCommutedOpIndices(const MachineInstr &MI, |
2463 | unsigned &SrcOpIdx1, |
2464 | unsigned &SrcOpIdx2) const { |
2465 | const MCInstrDesc &Desc = MI.getDesc(); |
2466 | if (!Desc.isCommutable()) |
2467 | return false; |
2468 | |
2469 | switch (MI.getOpcode()) { |
2470 | case X86::CMPSDrr: |
2471 | case X86::CMPSSrr: |
2472 | case X86::CMPPDrri: |
2473 | case X86::CMPPSrri: |
2474 | case X86::VCMPSDrr: |
2475 | case X86::VCMPSSrr: |
2476 | case X86::VCMPPDrri: |
2477 | case X86::VCMPPSrri: |
2478 | case X86::VCMPPDYrri: |
2479 | case X86::VCMPPSYrri: |
2480 | case X86::VCMPSDZrr: |
2481 | case X86::VCMPSSZrr: |
2482 | case X86::VCMPPDZrri: |
2483 | case X86::VCMPPSZrri: |
2484 | case X86::VCMPPDZ128rri: |
2485 | case X86::VCMPPSZ128rri: |
2486 | case X86::VCMPPDZ256rri: |
2487 | case X86::VCMPPSZ256rri: |
2488 | case X86::VCMPPDZrrik: |
2489 | case X86::VCMPPSZrrik: |
2490 | case X86::VCMPPDZ128rrik: |
2491 | case X86::VCMPPSZ128rrik: |
2492 | case X86::VCMPPDZ256rrik: |
2493 | case X86::VCMPPSZ256rrik: { |
2494 | unsigned OpOffset = X86II::isKMasked(Desc.TSFlags) ? 1 : 0; |
2495 | |
2496 | |
2497 | |
2498 | unsigned Imm = MI.getOperand(3 + OpOffset).getImm() & 0x7; |
2499 | switch (Imm) { |
2500 | default: |
2501 | |
2502 | if ((Desc.TSFlags & X86II::EncodingMask) == X86II::EVEX) |
2503 | break; |
2504 | return false; |
2505 | case 0x00: |
2506 | case 0x03: |
2507 | case 0x04: |
2508 | case 0x07: |
2509 | break; |
2510 | } |
2511 | |
2512 | |
2513 | |
2514 | |
2515 | return fixCommutedOpIndices(SrcOpIdx1, SrcOpIdx2, 1 + OpOffset, |
2516 | 2 + OpOffset); |
2517 | } |
2518 | case X86::MOVSSrr: |
2519 | |
2520 | |
2521 | |
2522 | if (Subtarget.hasSSE41()) |
2523 | return TargetInstrInfo::findCommutedOpIndices(MI, SrcOpIdx1, SrcOpIdx2); |
2524 | return false; |
2525 | case X86::SHUFPDrri: |
2526 | |
2527 | if (MI.getOperand(3).getImm() == 0x02) |
2528 | return TargetInstrInfo::findCommutedOpIndices(MI, SrcOpIdx1, SrcOpIdx2); |
2529 | return false; |
2530 | case X86::MOVHLPSrr: |
2531 | case X86::UNPCKHPDrr: |
2532 | case X86::VMOVHLPSrr: |
2533 | case X86::VUNPCKHPDrr: |
2534 | case X86::VMOVHLPSZrr: |
2535 | case X86::VUNPCKHPDZ128rr: |
2536 | if (Subtarget.hasSSE2()) |
2537 | return TargetInstrInfo::findCommutedOpIndices(MI, SrcOpIdx1, SrcOpIdx2); |
2538 | return false; |
2539 | case X86::VPTERNLOGDZrri: case X86::VPTERNLOGDZrmi: |
2540 | case X86::VPTERNLOGDZ128rri: case X86::VPTERNLOGDZ128rmi: |
2541 | case X86::VPTERNLOGDZ256rri: case X86::VPTERNLOGDZ256rmi: |
2542 | case X86::VPTERNLOGQZrri: case X86::VPTERNLOGQZrmi: |
2543 | case X86::VPTERNLOGQZ128rri: case X86::VPTERNLOGQZ128rmi: |
2544 | case X86::VPTERNLOGQZ256rri: case X86::VPTERNLOGQZ256rmi: |
2545 | case X86::VPTERNLOGDZrrik: |
2546 | case X86::VPTERNLOGDZ128rrik: |
2547 | case X86::VPTERNLOGDZ256rrik: |
2548 | case X86::VPTERNLOGQZrrik: |
2549 | case X86::VPTERNLOGQZ128rrik: |
2550 | case X86::VPTERNLOGQZ256rrik: |
2551 | case X86::VPTERNLOGDZrrikz: case X86::VPTERNLOGDZrmikz: |
2552 | case X86::VPTERNLOGDZ128rrikz: case X86::VPTERNLOGDZ128rmikz: |
2553 | case X86::VPTERNLOGDZ256rrikz: case X86::VPTERNLOGDZ256rmikz: |
2554 | case X86::VPTERNLOGQZrrikz: case X86::VPTERNLOGQZrmikz: |
2555 | case X86::VPTERNLOGQZ128rrikz: case X86::VPTERNLOGQZ128rmikz: |
2556 | case X86::VPTERNLOGQZ256rrikz: case X86::VPTERNLOGQZ256rmikz: |
2557 | case X86::VPTERNLOGDZ128rmbi: |
2558 | case X86::VPTERNLOGDZ256rmbi: |
2559 | case X86::VPTERNLOGDZrmbi: |
2560 | case X86::VPTERNLOGQZ128rmbi: |
2561 | case X86::VPTERNLOGQZ256rmbi: |
2562 | case X86::VPTERNLOGQZrmbi: |
2563 | case X86::VPTERNLOGDZ128rmbikz: |
2564 | case X86::VPTERNLOGDZ256rmbikz: |
2565 | case X86::VPTERNLOGDZrmbikz: |
2566 | case X86::VPTERNLOGQZ128rmbikz: |
2567 | case X86::VPTERNLOGQZ256rmbikz: |
2568 | case X86::VPTERNLOGQZrmbikz: |
2569 | return findThreeSrcCommutedOpIndices(MI, SrcOpIdx1, SrcOpIdx2); |
2570 | case X86::VPDPWSSDYrr: |
2571 | case X86::VPDPWSSDrr: |
2572 | case X86::VPDPWSSDSYrr: |
2573 | case X86::VPDPWSSDSrr: |
2574 | case X86::VPDPWSSDZ128r: |
2575 | case X86::VPDPWSSDZ128rk: |
2576 | case X86::VPDPWSSDZ128rkz: |
2577 | case X86::VPDPWSSDZ256r: |
2578 | case X86::VPDPWSSDZ256rk: |
2579 | case X86::VPDPWSSDZ256rkz: |
2580 | case X86::VPDPWSSDZr: |
2581 | case X86::VPDPWSSDZrk: |
2582 | case X86::VPDPWSSDZrkz: |
2583 | case X86::VPDPWSSDSZ128r: |
2584 | case X86::VPDPWSSDSZ128rk: |
2585 | case X86::VPDPWSSDSZ128rkz: |
2586 | case X86::VPDPWSSDSZ256r: |
2587 | case X86::VPDPWSSDSZ256rk: |
2588 | case X86::VPDPWSSDSZ256rkz: |
2589 | case X86::VPDPWSSDSZr: |
2590 | case X86::VPDPWSSDSZrk: |
2591 | case X86::VPDPWSSDSZrkz: |
2592 | case X86::VPMADD52HUQZ128r: |
2593 | case X86::VPMADD52HUQZ128rk: |
2594 | case X86::VPMADD52HUQZ128rkz: |
2595 | case X86::VPMADD52HUQZ256r: |
2596 | case X86::VPMADD52HUQZ256rk: |
2597 | case X86::VPMADD52HUQZ256rkz: |
2598 | case X86::VPMADD52HUQZr: |
2599 | case X86::VPMADD52HUQZrk: |
2600 | case X86::VPMADD52HUQZrkz: |
2601 | case X86::VPMADD52LUQZ128r: |
2602 | case X86::VPMADD52LUQZ128rk: |
2603 | case X86::VPMADD52LUQZ128rkz: |
2604 | case X86::VPMADD52LUQZ256r: |
2605 | case X86::VPMADD52LUQZ256rk: |
2606 | case X86::VPMADD52LUQZ256rkz: |
2607 | case X86::VPMADD52LUQZr: |
2608 | case X86::VPMADD52LUQZrk: |
2609 | case X86::VPMADD52LUQZrkz: { |
2610 | unsigned CommutableOpIdx1 = 2; |
2611 | unsigned CommutableOpIdx2 = 3; |
2612 | if (X86II::isKMasked(Desc.TSFlags)) { |
2613 | |
2614 | ++CommutableOpIdx1; |
2615 | ++CommutableOpIdx2; |
2616 | } |
2617 | if (!fixCommutedOpIndices(SrcOpIdx1, SrcOpIdx2, |
2618 | CommutableOpIdx1, CommutableOpIdx2)) |
2619 | return false; |
2620 | if (!MI.getOperand(SrcOpIdx1).isReg() || |
2621 | !MI.getOperand(SrcOpIdx2).isReg()) |
2622 | |
2623 | return false; |
2624 | return true; |
2625 | } |
2626 | |
2627 | default: |
2628 | const X86InstrFMA3Group *FMA3Group = getFMA3Group(MI.getOpcode(), |
2629 | MI.getDesc().TSFlags); |
2630 | if (FMA3Group) |
2631 | return findThreeSrcCommutedOpIndices(MI, SrcOpIdx1, SrcOpIdx2, |
2632 | FMA3Group->isIntrinsic()); |
2633 | |
2634 | |
2635 | |
2636 | if (X86II::isKMasked(Desc.TSFlags)) { |
2637 | |
2638 | unsigned CommutableOpIdx1 = Desc.getNumDefs() + 1; |
2639 | unsigned CommutableOpIdx2 = Desc.getNumDefs() + 2; |
2640 | |
2641 | |
2642 | if ((MI.getDesc().getOperandConstraint(Desc.getNumDefs(), |
2643 | MCOI::TIED_TO) != -1)) { |
2644 | |
2645 | |
2646 | |
2647 | |
2648 | |
2649 | if (X86II::isKMergeMasked(Desc.TSFlags)) { |
2650 | ++CommutableOpIdx1; |
2651 | ++CommutableOpIdx2; |
2652 | } else { |
2653 | --CommutableOpIdx1; |
2654 | } |
2655 | } |
2656 | |
2657 | if (!fixCommutedOpIndices(SrcOpIdx1, SrcOpIdx2, |
2658 | CommutableOpIdx1, CommutableOpIdx2)) |
2659 | return false; |
2660 | |
2661 | if (!MI.getOperand(SrcOpIdx1).isReg() || |
2662 | !MI.getOperand(SrcOpIdx2).isReg()) |
2663 | |
2664 | return false; |
2665 | return true; |
2666 | } |
2667 | |
2668 | return TargetInstrInfo::findCommutedOpIndices(MI, SrcOpIdx1, SrcOpIdx2); |
2669 | } |
2670 | return false; |
2671 | } |
2672 | |
2673 | static bool isConvertibleLEA(MachineInstr *MI) { |
2674 | unsigned Opcode = MI->getOpcode(); |
2675 | if (Opcode != X86::LEA32r && Opcode != X86::LEA64r && |
2676 | Opcode != X86::LEA64_32r) |
2677 | return false; |
2678 | |
2679 | const MachineOperand &Scale = MI->getOperand(1 + X86::AddrScaleAmt); |
2680 | const MachineOperand &Disp = MI->getOperand(1 + X86::AddrDisp); |
2681 | const MachineOperand &Segment = MI->getOperand(1 + X86::AddrSegmentReg); |
2682 | |
2683 | if (Segment.getReg() != 0 || !Disp.isImm() || Disp.getImm() != 0 || |
2684 | Scale.getImm() > 1) |
2685 | return false; |
2686 | |
2687 | return true; |
2688 | } |
2689 | |
2690 | bool X86InstrInfo::hasCommutePreference(MachineInstr &MI, bool &Commute) const { |
2691 | |
2692 | |
2693 | |
2694 | |
2695 | |
2696 | |
2697 | |
2698 | unsigned Opcode = MI.getOpcode(); |
2699 | if (Opcode != X86::ADD32rr && Opcode != X86::ADD64rr) |
2700 | return false; |
2701 | |
2702 | const MachineRegisterInfo &MRI = MI.getParent()->getParent()->getRegInfo(); |
2703 | Register Reg1 = MI.getOperand(1).getReg(); |
2704 | Register Reg2 = MI.getOperand(2).getReg(); |
2705 | |
2706 | |
2707 | if (MachineInstr *Inst = MRI.getUniqueVRegDef(Reg1)) { |
2708 | if (isConvertibleLEA(Inst) && Inst->getParent() == MI.getParent()) { |
2709 | Commute = true; |
2710 | return true; |
2711 | } |
2712 | } |
2713 | |
2714 | |
2715 | if (MachineInstr *Inst = MRI.getUniqueVRegDef(Reg2)) { |
2716 | if (isConvertibleLEA(Inst) && Inst->getParent() == MI.getParent()) { |
2717 | Commute = false; |
2718 | return true; |
2719 | } |
2720 | } |
2721 | |
2722 | return false; |
2723 | } |
2724 | |
2725 | X86::CondCode X86::getCondFromBranch(const MachineInstr &MI) { |
2726 | switch (MI.getOpcode()) { |
2727 | default: return X86::COND_INVALID; |
2728 | case X86::JCC_1: |
2729 | return static_cast<X86::CondCode>( |
2730 | MI.getOperand(MI.getDesc().getNumOperands() - 1).getImm()); |
2731 | } |
2732 | } |
2733 | |
2734 | |
2735 | X86::CondCode X86::getCondFromSETCC(const MachineInstr &MI) { |
2736 | switch (MI.getOpcode()) { |
2737 | default: return X86::COND_INVALID; |
2738 | case X86::SETCCr: case X86::SETCCm: |
2739 | return static_cast<X86::CondCode>( |
2740 | MI.getOperand(MI.getDesc().getNumOperands() - 1).getImm()); |
2741 | } |
2742 | } |
2743 | |
2744 | |
2745 | X86::CondCode X86::getCondFromCMov(const MachineInstr &MI) { |
2746 | switch (MI.getOpcode()) { |
2747 | default: return X86::COND_INVALID; |
2748 | case X86::CMOV16rr: case X86::CMOV32rr: case X86::CMOV64rr: |
2749 | case X86::CMOV16rm: case X86::CMOV32rm: case X86::CMOV64rm: |
2750 | return static_cast<X86::CondCode>( |
2751 | MI.getOperand(MI.getDesc().getNumOperands() - 1).getImm()); |
2752 | } |
2753 | } |
2754 | |
2755 | |
2756 | |
2757 | X86::CondCode X86::GetOppositeBranchCondition(X86::CondCode CC) { |
2758 | switch (CC) { |
2759 | default: llvm_unreachable("Illegal condition code!"); |
2760 | case X86::COND_E: return X86::COND_NE; |
2761 | case X86::COND_NE: return X86::COND_E; |
2762 | case X86::COND_L: return X86::COND_GE; |
2763 | case X86::COND_LE: return X86::COND_G; |
2764 | case X86::COND_G: return X86::COND_LE; |
2765 | case X86::COND_GE: return X86::COND_L; |
2766 | case X86::COND_B: return X86::COND_AE; |
2767 | case X86::COND_BE: return X86::COND_A; |
2768 | case X86::COND_A: return X86::COND_BE; |
2769 | case X86::COND_AE: return X86::COND_B; |
2770 | case X86::COND_S: return X86::COND_NS; |
2771 | case X86::COND_NS: return X86::COND_S; |
2772 | case X86::COND_P: return X86::COND_NP; |
2773 | case X86::COND_NP: return X86::COND_P; |
2774 | case X86::COND_O: return X86::COND_NO; |
2775 | case X86::COND_NO: return X86::COND_O; |
2776 | case X86::COND_NE_OR_P: return X86::COND_E_AND_NP; |
2777 | case X86::COND_E_AND_NP: return X86::COND_NE_OR_P; |
2778 | } |
2779 | } |
2780 | |
2781 | |
2782 | |
2783 | static X86::CondCode getSwappedCondition(X86::CondCode CC) { |
2784 | switch (CC) { |
2785 | default: return X86::COND_INVALID; |
2786 | case X86::COND_E: return X86::COND_E; |
2787 | case X86::COND_NE: return X86::COND_NE; |
2788 | case X86::COND_L: return X86::COND_G; |
2789 | case X86::COND_LE: return X86::COND_GE; |
2790 | case X86::COND_G: return X86::COND_L; |
2791 | case X86::COND_GE: return X86::COND_LE; |
2792 | case X86::COND_B: return X86::COND_A; |
2793 | case X86::COND_BE: return X86::COND_AE; |
2794 | case X86::COND_A: return X86::COND_B; |
2795 | case X86::COND_AE: return X86::COND_BE; |
2796 | } |
2797 | } |
2798 | |
2799 | std::pair<X86::CondCode, bool> |
2800 | X86::getX86ConditionCode(CmpInst::Predicate Predicate) { |
2801 | X86::CondCode CC = X86::COND_INVALID; |
2802 | bool NeedSwap = false; |
2803 | switch (Predicate) { |
2804 | default: break; |
2805 | |
2806 | case CmpInst::FCMP_UEQ: CC = X86::COND_E; break; |
2807 | case CmpInst::FCMP_OLT: NeedSwap = true; LLVM_FALLTHROUGH; |
2808 | case CmpInst::FCMP_OGT: CC = X86::COND_A; break; |
2809 | case CmpInst::FCMP_OLE: NeedSwap = true; LLVM_FALLTHROUGH; |
2810 | case CmpInst::FCMP_OGE: CC = X86::COND_AE; break; |
2811 | case CmpInst::FCMP_UGT: NeedSwap = true; LLVM_FALLTHROUGH; |
2812 | case CmpInst::FCMP_ULT: CC = X86::COND_B; break; |
2813 | case CmpInst::FCMP_UGE: NeedSwap = true; LLVM_FALLTHROUGH; |
2814 | case CmpInst::FCMP_ULE: CC = X86::COND_BE; break; |
2815 | case CmpInst::FCMP_ONE: CC = X86::COND_NE; break; |
2816 | case CmpInst::FCMP_UNO: CC = X86::COND_P; break; |
2817 | case CmpInst::FCMP_ORD: CC = X86::COND_NP; break; |
2818 | case CmpInst::FCMP_OEQ: LLVM_FALLTHROUGH; |
2819 | case CmpInst::FCMP_UNE: CC = X86::COND_INVALID; break; |
2820 | |
2821 | |
2822 | case CmpInst::ICMP_EQ: CC = X86::COND_E; break; |
2823 | case CmpInst::ICMP_NE: CC = X86::COND_NE; break; |
2824 | case CmpInst::ICMP_UGT: CC = X86::COND_A; break; |
2825 | case CmpInst::ICMP_UGE: CC = X86::COND_AE; break; |
2826 | case CmpInst::ICMP_ULT: CC = X86::COND_B; break; |
2827 | case CmpInst::ICMP_ULE: CC = X86::COND_BE; break; |
2828 | case CmpInst::ICMP_SGT: CC = X86::COND_G; break; |
2829 | case CmpInst::ICMP_SGE: CC = X86::COND_GE; break; |
2830 | case CmpInst::ICMP_SLT: CC = X86::COND_L; break; |
2831 | case CmpInst::ICMP_SLE: CC = X86::COND_LE; break; |
2832 | } |
2833 | |
2834 | return std::make_pair(CC, NeedSwap); |
2835 | } |
2836 | |
2837 | |
2838 | unsigned X86::getSETOpc(bool HasMemoryOperand) { |
2839 | return HasMemoryOperand ? X86::SETCCr : X86::SETCCm; |
2840 | } |
2841 | |
2842 | |
2843 | unsigned X86::getCMovOpcode(unsigned RegBytes, bool HasMemoryOperand) { |
2844 | switch(RegBytes) { |
2845 | default: llvm_unreachable("Illegal register size!"); |
2846 | case 2: return HasMemoryOperand ? X86::CMOV16rm : X86::CMOV16rr; |
2847 | case 4: return HasMemoryOperand ? X86::CMOV32rm : X86::CMOV32rr; |
2848 | case 8: return HasMemoryOperand ? X86::CMOV64rm : X86::CMOV64rr; |
2849 | } |
2850 | } |
2851 | |
2852 | |
2853 | unsigned X86::getVPCMPImmForCond(ISD::CondCode CC) { |
2854 | switch (CC) { |
2855 | default: llvm_unreachable("Unexpected SETCC condition"); |
2856 | case ISD::SETNE: return 4; |
2857 | case ISD::SETEQ: return 0; |
2858 | case ISD::SETULT: |
2859 | case ISD::SETLT: return 1; |
2860 | case ISD::SETUGT: |
2861 | case ISD::SETGT: return 6; |
2862 | case ISD::SETUGE: |
2863 | case ISD::SETGE: return 5; |
2864 | case ISD::SETULE: |
2865 | case ISD::SETLE: return 2; |
2866 | } |
2867 | } |
2868 | |
2869 | |
2870 | unsigned X86::getSwappedVPCMPImm(unsigned Imm) { |
2871 | switch (Imm) { |
2872 | default: llvm_unreachable("Unreachable!"); |
2873 | case 0x01: Imm = 0x06; break; |
2874 | case 0x02: Imm = 0x05; break; |
2875 | case 0x05: Imm = 0x02; break; |
2876 | case 0x06: Imm = 0x01; break; |
2877 | case 0x00: |
2878 | case 0x03: |
2879 | case 0x04: |
2880 | case 0x07: |
2881 | break; |
2882 | } |
2883 | |
2884 | return Imm; |
2885 | } |
2886 | |
2887 | |
2888 | unsigned X86::getSwappedVPCOMImm(unsigned Imm) { |
2889 | switch (Imm) { |
2890 | default: llvm_unreachable("Unreachable!"); |
2891 | case 0x00: Imm = 0x02; break; |
2892 | case 0x01: Imm = 0x03; break; |
2893 | case 0x02: Imm = 0x00; break; |
2894 | case 0x03: Imm = 0x01; break; |
2895 | case 0x04: |
2896 | case 0x05: |
2897 | case 0x06: |
2898 | case 0x07: |
2899 | break; |
2900 | } |
2901 | |
2902 | return Imm; |
2903 | } |
2904 | |
2905 | |
2906 | unsigned X86::getSwappedVCMPImm(unsigned Imm) { |
2907 | |
2908 | switch (Imm & 0x3) { |
2909 | default: llvm_unreachable("Unreachable!"); |
2910 | case 0x00: case 0x03: |
2911 | |
2912 | break; |
2913 | case 0x01: case 0x02: |
2914 | |
2915 | Imm ^= 0xf; |
2916 | break; |
2917 | } |
2918 | |
2919 | return Imm; |
2920 | } |
2921 | |
2922 | bool X86InstrInfo::isUnconditionalTailCall(const MachineInstr &MI) const { |
2923 | switch (MI.getOpcode()) { |
2924 | case X86::TCRETURNdi: |
2925 | case X86::TCRETURNri: |
2926 | case X86::TCRETURNmi: |
2927 | case X86::TCRETURNdi64: |
2928 | case X86::TCRETURNri64: |
2929 | case X86::TCRETURNmi64: |
2930 | return true; |
2931 | default: |
2932 | return false; |
2933 | } |
2934 | } |
2935 | |
2936 | bool X86InstrInfo::canMakeTailCallConditional( |
2937 | SmallVectorImpl<MachineOperand> &BranchCond, |
2938 | const MachineInstr &TailCall) const { |
2939 | if (TailCall.getOpcode() != X86::TCRETURNdi && |
2940 | TailCall.getOpcode() != X86::TCRETURNdi64) { |
2941 | |
2942 | return false; |
2943 | } |
2944 | |
2945 | const MachineFunction *MF = TailCall.getParent()->getParent(); |
2946 | if (Subtarget.isTargetWin64() && MF->hasWinCFI()) { |
2947 | |
2948 | return false; |
2949 | } |
2950 | |
2951 | assert(BranchCond.size() == 1); |
2952 | if (BranchCond[0].getImm() > X86::LAST_VALID_COND) { |
2953 | |
2954 | return false; |
2955 | } |
2956 | |
2957 | const X86MachineFunctionInfo *X86FI = MF->getInfo<X86MachineFunctionInfo>(); |
2958 | if (X86FI->getTCReturnAddrDelta() != 0 || |
2959 | TailCall.getOperand(1).getImm() != 0) { |
2960 | |
2961 | return false; |
2962 | } |
2963 | |
2964 | return true; |
2965 | } |
2966 | |
2967 | void X86InstrInfo::replaceBranchWithTailCall( |
2968 | MachineBasicBlock &MBB, SmallVectorImpl<MachineOperand> &BranchCond, |
2969 | const MachineInstr &TailCall) const { |
2970 | assert(canMakeTailCallConditional(BranchCond, TailCall)); |
2971 | |
2972 | MachineBasicBlock::iterator I = MBB.end(); |
2973 | while (I != MBB.begin()) { |
2974 | --I; |
2975 | if (I->isDebugInstr()) |
2976 | continue; |
2977 | if (!I->isBranch()) |
2978 | assert(0 && "Can't find the branch to replace!"); |
2979 | |
2980 | X86::CondCode CC = X86::getCondFromBranch(*I); |
2981 | assert(BranchCond.size() == 1); |
2982 | if (CC != BranchCond[0].getImm()) |
2983 | continue; |
2984 | |
2985 | break; |
2986 | } |
2987 | |
2988 | unsigned Opc = TailCall.getOpcode() == X86::TCRETURNdi ? X86::TCRETURNdicc |
2989 | : X86::TCRETURNdi64cc; |
2990 | |
2991 | auto MIB = BuildMI(MBB, I, MBB.findDebugLoc(I), get(Opc)); |
2992 | MIB->addOperand(TailCall.getOperand(0)); |
2993 | MIB.addImm(0); |
2994 | MIB->addOperand(BranchCond[0]); |
2995 | MIB.copyImplicitOps(TailCall); |
2996 | |
2997 | |
2998 | |
2999 | LivePhysRegs LiveRegs(getRegisterInfo()); |
3000 | LiveRegs.addLiveOuts(MBB); |
3001 | SmallVector<std::pair<MCPhysReg, const MachineOperand *>, 8> Clobbers; |
3002 | LiveRegs.stepForward(*MIB, Clobbers); |
3003 | for (const auto &C : Clobbers) { |
3004 | MIB.addReg(C.first, RegState::Implicit); |
3005 | MIB.addReg(C.first, RegState::Implicit | RegState::Define); |
3006 | } |
3007 | |
3008 | I->eraseFromParent(); |
3009 | } |
3010 | |
3011 | |
3012 | |
3013 | |
3014 | static MachineBasicBlock *getFallThroughMBB(MachineBasicBlock *MBB, |
3015 | MachineBasicBlock *TBB) { |
3016 | |
3017 | |
3018 | |
3019 | |
3020 | MachineBasicBlock *FallthroughBB = nullptr; |
3021 | for (auto SI = MBB->succ_begin(), SE = MBB->succ_end(); SI != SE; ++SI) { |
3022 | if ((*SI)->isEHPad() || (*SI == TBB && FallthroughBB)) |
3023 | continue; |
3024 | |
3025 | if (FallthroughBB && FallthroughBB != TBB) |
3026 | return nullptr; |
3027 | FallthroughBB = *SI; |
3028 | } |
3029 | return FallthroughBB; |
3030 | } |
3031 | |
3032 | bool X86InstrInfo::AnalyzeBranchImpl( |
3033 | MachineBasicBlock &MBB, MachineBasicBlock *&TBB, MachineBasicBlock *&FBB, |
3034 | SmallVectorImpl<MachineOperand> &Cond, |
3035 | SmallVectorImpl<MachineInstr *> &CondBranches, bool AllowModify) const { |
3036 | |
3037 | |
3038 | |
3039 | MachineBasicBlock::iterator I = MBB.end(); |
3040 | MachineBasicBlock::iterator UnCondBrIter = MBB.end(); |
3041 | while (I != MBB.begin()) { |
3042 | --I; |
3043 | if (I->isDebugInstr()) |
3044 | continue; |
3045 | |
3046 | |
3047 | |
3048 | if (!isUnpredicatedTerminator(*I)) |
3049 | break; |
3050 | |
3051 | |
3052 | |
3053 | if (!I->isBranch()) |
3054 | return true; |
3055 | |
3056 | |
3057 | if (I->getOpcode() == X86::JMP_1) { |
3058 | UnCondBrIter = I; |
3059 | |
3060 | if (!AllowModify) { |
3061 | TBB = I->getOperand(0).getMBB(); |
3062 | continue; |
3063 | } |
3064 | |
3065 | |
3066 | while (std::next(I) != MBB.end()) |
3067 | std::next(I)->eraseFromParent(); |
3068 | |
3069 | Cond.clear(); |
3070 | FBB = nullptr; |
3071 | |
3072 | |
3073 | if (MBB.isLayoutSuccessor(I->getOperand(0).getMBB())) { |
3074 | TBB = nullptr; |
3075 | I->eraseFromParent(); |
3076 | I = MBB.end(); |
3077 | UnCondBrIter = MBB.end(); |
3078 | continue; |
3079 | } |
3080 | |
3081 | |
3082 | TBB = I->getOperand(0).getMBB(); |
3083 | continue; |
3084 | } |
3085 | |
3086 | |
3087 | X86::CondCode BranchCode = X86::getCondFromBranch(*I); |
3088 | if (BranchCode == X86::COND_INVALID) |
3089 | return true; |
3090 | |
3091 | |
3092 | |
3093 | if (I->findRegisterUseOperand(X86::EFLAGS)->isUndef()) |
3094 | return true; |
3095 | |
3096 | |
3097 | if (Cond.empty()) { |
3098 | MachineBasicBlock *TargetBB = I->getOperand(0).getMBB(); |
3099 | if (AllowModify && UnCondBrIter != MBB.end() && |
3100 | MBB.isLayoutSuccessor(TargetBB)) { |
3101 | |
3102 | |
3103 | |
3104 | |
3105 | |
3106 | |
3107 | |
3108 | |
3109 | |
3110 | |
3111 | |
3112 | |
3113 | |
3114 | |
3115 | |
3116 | |
3117 | |
3118 | BranchCode = GetOppositeBranchCondition(BranchCode); |
3119 | MachineBasicBlock::iterator OldInst = I; |
3120 | |
3121 | BuildMI(MBB, UnCondBrIter, MBB.findDebugLoc(I), get(X86::JCC_1)) |
3122 | .addMBB(UnCondBrIter->getOperand(0).getMBB()) |
3123 | .addImm(BranchCode); |
3124 | BuildMI(MBB, UnCondBrIter, MBB.findDebugLoc(I), get(X86::JMP_1)) |
3125 | .addMBB(TargetBB); |
3126 | |
3127 | OldInst->eraseFromParent(); |
3128 | UnCondBrIter->eraseFromParent(); |
3129 | |
3130 | |
3131 | UnCondBrIter = MBB.end(); |
3132 | I = MBB.end(); |
3133 | continue; |
3134 | } |
3135 | |
3136 | FBB = TBB; |
3137 | TBB = I->getOperand(0).getMBB(); |
3138 | Cond.push_back(MachineOperand::CreateImm(BranchCode)); |
3139 | CondBranches.push_back(&*I); |
3140 | continue; |
3141 | } |
3142 | |
3143 | |
3144 | |
3145 | |
3146 | assert(Cond.size() == 1); |
3147 | assert(TBB); |
3148 | |
3149 | |
3150 | X86::CondCode OldBranchCode = (X86::CondCode)Cond[0].getImm(); |
3151 | auto NewTBB = I->getOperand(0).getMBB(); |
3152 | if (OldBranchCode == BranchCode && TBB == NewTBB) |
3153 | continue; |
3154 | |
3155 | |
3156 | |
3157 | |
3158 | if (TBB == NewTBB && |
3159 | ((OldBranchCode == X86::COND_P && BranchCode == X86::COND_NE) || |
3160 | (OldBranchCode == X86::COND_NE && BranchCode == X86::COND_P))) { |
3161 | BranchCode = X86::COND_NE_OR_P; |
3162 | } else if ((OldBranchCode == X86::COND_NP && BranchCode == X86::COND_NE) || |
3163 | (OldBranchCode == X86::COND_E && BranchCode == X86::COND_P)) { |
3164 | if (NewTBB != (FBB ? FBB : getFallThroughMBB(&MBB, TBB))) |
3165 | return true; |
3166 | |
3167 | |
3168 | |
3169 | |
3170 | |
3171 | |
3172 | |
3173 | |
3174 | |
3175 | |
3176 | |
3177 | |
3178 | |
3179 | |
3180 | |
3181 | |
3182 | |
3183 | |
3184 | |
3185 | |
3186 | BranchCode = X86::COND_E_AND_NP; |
3187 | } else |
3188 | return true; |
3189 | |
3190 | |
3191 | Cond[0].setImm(BranchCode); |
3192 | CondBranches.push_back(&*I); |
3193 | } |
3194 | |
3195 | return false; |
3196 | } |
3197 | |
3198 | bool X86InstrInfo::analyzeBranch(MachineBasicBlock &MBB, |
3199 | MachineBasicBlock *&TBB, |
3200 | MachineBasicBlock *&FBB, |
3201 | SmallVectorImpl<MachineOperand> &Cond, |
3202 | bool AllowModify) const { |
3203 | SmallVector<MachineInstr *, 4> CondBranches; |
3204 | return AnalyzeBranchImpl(MBB, TBB, FBB, Cond, CondBranches, AllowModify); |
3205 | } |
3206 | |
3207 | bool X86InstrInfo::analyzeBranchPredicate(MachineBasicBlock &MBB, |
3208 | MachineBranchPredicate &MBP, |
3209 | bool AllowModify) const { |
3210 | using namespace std::placeholders; |
3211 | |
3212 | SmallVector<MachineOperand, 4> Cond; |
3213 | SmallVector<MachineInstr *, 4> CondBranches; |
3214 | if (AnalyzeBranchImpl(MBB, MBP.TrueDest, MBP.FalseDest, Cond, CondBranches, |
3215 | AllowModify)) |
3216 | return true; |
3217 | |
3218 | if (Cond.size() != 1) |
3219 | return true; |
3220 | |
3221 | assert(MBP.TrueDest && "expected!"); |
3222 | |
3223 | if (!MBP.FalseDest) |
3224 | MBP.FalseDest = MBB.getNextNode(); |
3225 | |
3226 | const TargetRegisterInfo *TRI = &getRegisterInfo(); |
3227 | |
3228 | MachineInstr *ConditionDef = nullptr; |
3229 | bool SingleUseCondition = true; |
3230 | |
3231 | for (auto I = std::next(MBB.rbegin()), E = MBB.rend(); I != E; ++I) { |
3232 | if (I->modifiesRegister(X86::EFLAGS, TRI)) { |
3233 | ConditionDef = &*I; |
3234 | break; |
3235 | } |
3236 | |
3237 | if (I->readsRegister(X86::EFLAGS, TRI)) |
3238 | SingleUseCondition = false; |
3239 | } |
3240 | |
3241 | if (!ConditionDef) |
3242 | return true; |
3243 | |
3244 | if (SingleUseCondition) { |
3245 | for (auto *Succ : MBB.successors()) |
3246 | if (Succ->isLiveIn(X86::EFLAGS)) |
3247 | SingleUseCondition = false; |
3248 | } |
3249 | |
3250 | MBP.ConditionDef = ConditionDef; |
3251 | MBP.SingleUseCondition = SingleUseCondition; |
3252 | |
3253 | |
3254 | |
3255 | |
3256 | |
3257 | |
3258 | const unsigned TestOpcode = |
3259 | Subtarget.is64Bit() ? X86::TEST64rr : X86::TEST32rr; |
3260 | |
3261 | if (ConditionDef->getOpcode() == TestOpcode && |
3262 | ConditionDef->getNumOperands() == 3 && |
3263 | ConditionDef->getOperand(0).isIdenticalTo(ConditionDef->getOperand(1)) && |
3264 | (Cond[0].getImm() == X86::COND_NE || Cond[0].getImm() == X86::COND_E)) { |
3265 | MBP.LHS = ConditionDef->getOperand(0); |
3266 | MBP.RHS = MachineOperand::CreateImm(0); |
3267 | MBP.Predicate = Cond[0].getImm() == X86::COND_NE |
3268 | ? MachineBranchPredicate::PRED_NE |
3269 | : MachineBranchPredicate::PRED_EQ; |
3270 | return false; |
3271 | } |
3272 | |
3273 | return true; |
3274 | } |
3275 | |
3276 | unsigned X86InstrInfo::removeBranch(MachineBasicBlock &MBB, |
3277 | int *BytesRemoved) const { |
3278 | assert(!BytesRemoved && "code size not handled"); |
3279 | |
3280 | MachineBasicBlock::iterator I = MBB.end(); |
3281 | unsigned Count = 0; |
3282 | |
3283 | while (I != MBB.begin()) { |
3284 | --I; |
3285 | if (I->isDebugInstr()) |
3286 | continue; |
3287 | if (I->getOpcode() != X86::JMP_1 && |
3288 | X86::getCondFromBranch(*I) == X86::COND_INVALID) |
3289 | break; |
3290 | |
3291 | I->eraseFromParent(); |
3292 | I = MBB.end(); |
3293 | ++Count; |
3294 | } |
3295 | |
3296 | return Count; |
3297 | } |
3298 | |
3299 | unsigned X86InstrInfo::insertBranch(MachineBasicBlock &MBB, |
3300 | MachineBasicBlock *TBB, |
3301 | MachineBasicBlock *FBB, |
3302 | ArrayRef<MachineOperand> Cond, |
3303 | const DebugLoc &DL, |
3304 | int *BytesAdded) const { |
3305 | |
3306 | assert(TBB && "insertBranch must not be told to insert a fallthrough"); |
3307 | assert((Cond.size() == 1 || Cond.size() == 0) && |
3308 | "X86 branch conditions have one component!"); |
3309 | assert(!BytesAdded && "code size not handled"); |
3310 | |
3311 | if (Cond.empty()) { |
3312 | |
3313 | assert(!FBB && "Unconditional branch with multiple successors!"); |
3314 | BuildMI(&MBB, DL, get(X86::JMP_1)).addMBB(TBB); |
3315 | return 1; |
3316 | } |
3317 | |
3318 | |
3319 | bool FallThru = FBB == nullptr; |
3320 | |
3321 | |
3322 | unsigned Count = 0; |
3323 | X86::CondCode CC = (X86::CondCode)Cond[0].getImm(); |
3324 | switch (CC) { |
3325 | case X86::COND_NE_OR_P: |
3326 | |
3327 | BuildMI(&MBB, DL, get(X86::JCC_1)).addMBB(TBB).addImm(X86::COND_NE); |
3328 | ++Count; |
3329 | BuildMI(&MBB, DL, get(X86::JCC_1)).addMBB(TBB).addImm(X86::COND_P); |
3330 | ++Count; |
3331 | break; |
3332 | case X86::COND_E_AND_NP: |
3333 | |
3334 | if (FBB == nullptr) { |
3335 | FBB = getFallThroughMBB(&MBB, TBB); |
3336 | assert(FBB && "MBB cannot be the last block in function when the false " |
3337 | "body is a fall-through."); |
3338 | } |
3339 | |
3340 | BuildMI(&MBB, DL, get(X86::JCC_1)).addMBB(FBB).addImm(X86::COND_NE); |
3341 | ++Count; |
3342 | BuildMI(&MBB, DL, get(X86::JCC_1)).addMBB(TBB).addImm(X86::COND_NP); |
3343 | ++Count; |
3344 | break; |
3345 | default: { |
3346 | BuildMI(&MBB, DL, get(X86::JCC_1)).addMBB(TBB).addImm(CC); |
3347 | ++Count; |
3348 | } |
3349 | } |
3350 | if (!FallThru) { |
3351 | |
3352 | BuildMI(&MBB, DL, get(X86::JMP_1)).addMBB(FBB); |
3353 | ++Count; |
3354 | } |
3355 | return Count; |
3356 | } |
3357 | |
3358 | bool X86InstrInfo::canInsertSelect(const MachineBasicBlock &MBB, |
3359 | ArrayRef<MachineOperand> Cond, |
3360 | Register DstReg, Register TrueReg, |
3361 | Register FalseReg, int &CondCycles, |
3362 | int &TrueCycles, int &FalseCycles) const { |
3363 | |
3364 | if (!Subtarget.hasCMov()) |
3365 | return false; |
3366 | if (Cond.size() != 1) |
3367 | return false; |
3368 | |
3369 | if ((X86::CondCode)Cond[0].getImm() > X86::LAST_VALID_COND) |
3370 | return false; |
3371 | |
3372 | |
3373 | const MachineRegisterInfo &MRI = MBB.getParent()->getRegInfo(); |
3374 | const TargetRegisterClass *RC = |
3375 | RI.getCommonSubClass(MRI.getRegClass(TrueReg), MRI.getRegClass(FalseReg)); |
3376 | if (!RC) |
3377 | return false; |
3378 | |
3379 | |
3380 | if (X86::GR16RegClass.hasSubClassEq(RC) || |
3381 | X86::GR32RegClass.hasSubClassEq(RC) || |
3382 | X86::GR64RegClass.hasSubClassEq(RC)) { |
3383 | |
3384 | |
3385 | CondCycles = 2; |
3386 | TrueCycles = 2; |
3387 | FalseCycles = 2; |
3388 | return true; |
3389 | } |
3390 | |
3391 | |
3392 | return false; |
3393 | } |
3394 | |
3395 | void X86InstrInfo::insertSelect(MachineBasicBlock &MBB, |
3396 | MachineBasicBlock::iterator I, |
3397 | const DebugLoc &DL, Register DstReg, |
3398 | ArrayRef<MachineOperand> Cond, Register TrueReg, |
3399 | Register FalseReg) const { |
3400 | MachineRegisterInfo &MRI = MBB.getParent()->getRegInfo(); |
3401 | const TargetRegisterInfo &TRI = *MRI.getTargetRegisterInfo(); |
3402 | const TargetRegisterClass &RC = *MRI.getRegClass(DstReg); |
3403 | assert(Cond.size() == 1 && "Invalid Cond array"); |
3404 | unsigned Opc = X86::getCMovOpcode(TRI.getRegSizeInBits(RC) / 8, |
3405 | false ); |
3406 | BuildMI(MBB, I, DL, get(Opc), DstReg) |
3407 | .addReg(FalseReg) |
3408 | .addReg(TrueReg) |
3409 | .addImm(Cond[0].getImm()); |
3410 | } |
3411 | |
3412 | |
3413 | static bool isHReg(unsigned Reg) { |
3414 | return X86::GR8_ABCD_HRegClass.contains(Reg); |
3415 | } |
3416 | |
3417 | |
3418 | static unsigned CopyToFromAsymmetricReg(unsigned DestReg, unsigned SrcReg, |
3419 | const X86Subtarget &Subtarget) { |
3420 | bool HasAVX = Subtarget.hasAVX(); |
3421 | bool HasAVX512 = Subtarget.hasAVX512(); |
3422 | |
3423 | |
3424 | |
3425 | |
3426 | |
3427 | if (X86::VK16RegClass.contains(SrcReg)) { |
3428 | if (X86::GR64RegClass.contains(DestReg)) { |
3429 | assert(Subtarget.hasBWI()); |
3430 | return X86::KMOVQrk; |
3431 | } |
3432 | if (X86::GR32RegClass.contains(DestReg)) |
3433 | return Subtarget.hasBWI() ? X86::KMOVDrk : X86::KMOVWrk; |
3434 | } |
3435 | |
3436 | |
3437 | |
3438 | |
3439 | |
3440 | if (X86::VK16RegClass.contains(DestReg)) { |
3441 | if (X86::GR64RegClass.contains(SrcReg)) { |
3442 | assert(Subtarget.hasBWI()); |
3443 | return X86::KMOVQkr; |
3444 | } |
3445 | if (X86::GR32RegClass.contains(SrcReg)) |
3446 | return Subtarget.hasBWI() ? X86::KMOVDkr : X86::KMOVWkr; |
3447 | } |
3448 | |
3449 | |
3450 | |
3451 | |
3452 | |
3453 | |
3454 | |
3455 | if (X86::GR64RegClass.contains(DestReg)) { |
3456 | if (X86::VR128XRegClass.contains(SrcReg)) |
3457 | |
3458 | return HasAVX512 ? X86::VMOVPQIto64Zrr : |
3459 | HasAVX ? X86::VMOVPQIto64rr : |
3460 | X86::MOVPQIto64rr; |
3461 | if (X86::VR64RegClass.contains(SrcReg)) |
3462 | |
3463 | return X86::MMX_MOVD64from64rr; |
3464 | } else if (X86::GR64RegClass.contains(SrcReg)) { |
3465 | |
3466 | if (X86::VR128XRegClass.contains(DestReg)) |
3467 | return HasAVX512 ? X86::VMOV64toPQIZrr : |
3468 | HasAVX ? X86::VMOV64toPQIrr : |
3469 | X86::MOV64toPQIrr; |
3470 | |
3471 | if (X86::VR64RegClass.contains(DestReg)) |
3472 | return X86::MMX_MOVD64to64rr; |
3473 | } |
3474 | |
3475 | |
3476 | |
3477 | |
3478 | if (X86::GR32RegClass.contains(DestReg) && |
3479 | X86::VR128XRegClass.contains(SrcReg)) |
3480 | |
3481 | return HasAVX512 ? X86::VMOVPDI2DIZrr : |
3482 | HasAVX ? X86::VMOVPDI2DIrr : |
3483 | X86::MOVPDI2DIrr; |
3484 | |
3485 | if (X86::VR128XRegClass.contains(DestReg) && |
3486 | X86::GR32RegClass.contains(SrcReg)) |
3487 | |
3488 | return HasAVX512 ? X86::VMOVDI2PDIZrr : |
3489 | HasAVX ? X86::VMOVDI2PDIrr : |
3490 | X86::MOVDI2PDIrr; |
3491 | return 0; |
3492 | } |
3493 | |
3494 | void X86InstrInfo::copyPhysReg(MachineBasicBlock &MBB, |
3495 | MachineBasicBlock::iterator MI, |
3496 | const DebugLoc &DL, MCRegister DestReg, |
3497 | MCRegister SrcReg, bool KillSrc) const { |
3498 | |
3499 | bool HasAVX = Subtarget.hasAVX(); |
3500 | bool HasVLX = Subtarget.hasVLX(); |
3501 | unsigned Opc = 0; |
3502 | if (X86::GR64RegClass.contains(DestReg, SrcReg)) |
3503 | Opc = X86::MOV64rr; |
3504 | else if (X86::GR32RegClass.contains(DestReg, SrcReg)) |
3505 | Opc = X86::MOV32rr; |
3506 | else if (X86::GR16RegClass.contains(DestReg, SrcReg)) |
3507 | Opc = X86::MOV16rr; |
3508 | else if (X86::GR8RegClass.contains(DestReg, SrcReg)) { |
3509 | |
3510 | |
3511 | if ((isHReg(DestReg) || isHReg(SrcReg)) && |
3512 | Subtarget.is64Bit()) { |
3513 | Opc = X86::MOV8rr_NOREX; |
3514 | |
3515 | assert(X86::GR8_NOREXRegClass.contains(SrcReg, DestReg) && |
3516 | "8-bit H register can not be copied outside GR8_NOREX"); |
3517 | } else |
3518 | Opc = X86::MOV8rr; |
3519 | } |
3520 | else if (X86::VR64RegClass.contains(DestReg, SrcReg)) |
3521 | Opc = X86::MMX_MOVQ64rr; |
3522 | else if (X86::VR128XRegClass.contains(DestReg, SrcReg)) { |
3523 | if (HasVLX) |
3524 | Opc = X86::VMOVAPSZ128rr; |
3525 | else if (X86::VR128RegClass.contains(DestReg, SrcReg)) |
3526 | Opc = HasAVX ? X86::VMOVAPSrr : X86::MOVAPSrr; |
3527 | else { |
3528 | |
3529 | |
3530 | Opc = X86::VMOVAPSZrr; |
3531 | const TargetRegisterInfo *TRI = &getRegisterInfo(); |
3532 | DestReg = TRI->getMatchingSuperReg(DestReg, X86::sub_xmm, |
3533 | &X86::VR512RegClass); |
3534 | SrcReg = TRI->getMatchingSuperReg(SrcReg, X86::sub_xmm, |
3535 | &X86::VR512RegClass); |
3536 | } |
3537 | } else if (X86::VR256XRegClass.contains(DestReg, SrcReg)) { |
3538 | if (HasVLX) |
3539 | Opc = X86::VMOVAPSZ256rr; |
3540 | else if (X86::VR256RegClass.contains(DestReg, SrcReg)) |
3541 | Opc = X86::VMOVAPSYrr; |
3542 | else { |
3543 | |
3544 | |
3545 | Opc = X86::VMOVAPSZrr; |
3546 | const TargetRegisterInfo *TRI = &getRegisterInfo(); |
3547 | DestReg = TRI->getMatchingSuperReg(DestReg, X86::sub_ymm, |
3548 | &X86::VR512RegClass); |
3549 | SrcReg = TRI->getMatchingSuperReg(SrcReg, X86::sub_ymm, |
3550 | &X86::VR512RegClass); |
3551 | } |
3552 | } else if (X86::VR512RegClass.contains(DestReg, SrcReg)) |
3553 | Opc = X86::VMOVAPSZrr; |
3554 | |
3555 | else if (X86::VK16RegClass.contains(DestReg, SrcReg)) |
3556 | Opc = Subtarget.hasBWI() ? X86::KMOVQkk : X86::KMOVWkk; |
3557 | if (!Opc) |
3558 | Opc = CopyToFromAsymmetricReg(DestReg, SrcReg, Subtarget); |
3559 | |
3560 | if (Opc) { |
3561 | BuildMI(MBB, MI, DL, get(Opc), DestReg) |
3562 | .addReg(SrcReg, getKillRegState(KillSrc)); |
3563 | return; |
3564 | } |
3565 | |
3566 | if (SrcReg == X86::EFLAGS || DestReg == X86::EFLAGS) { |
3567 | |
3568 | |
3569 | |
3570 | |
3571 | report_fatal_error("Unable to copy EFLAGS physical register!"); |
3572 | } |
3573 | |
3574 | LLVM_DEBUG(dbgs() << "Cannot copy " << RI.getName(SrcReg) << " to " |
3575 | << RI.getName(DestReg) << '\n'); |
3576 | report_fatal_error("Cannot emit physreg copy instruction"); |
3577 | } |
3578 | |
3579 | Optional<DestSourcePair> |
3580 | X86InstrInfo::isCopyInstrImpl(const MachineInstr &MI) const { |
3581 | if (MI.isMoveReg()) |
3582 | return DestSourcePair{MI.getOperand(0), MI.getOperand(1)}; |
3583 | return None; |
3584 | } |
3585 | |
3586 | static unsigned getLoadStoreRegOpcode(Register Reg, |
3587 | const TargetRegisterClass *RC, |
3588 | bool IsStackAligned, |
3589 | const X86Subtarget &STI, bool load) { |
3590 | bool HasAVX = STI.hasAVX(); |
3591 | bool HasAVX512 = STI.hasAVX512(); |
3592 | bool HasVLX = STI.hasVLX(); |
3593 | |
3594 | switch (STI.getRegisterInfo()->getSpillSize(*RC)) { |
3595 | default: |
3596 | llvm_unreachable("Unknown spill size"); |
3597 | case 1: |
3598 | assert(X86::GR8RegClass.hasSubClassEq(RC) && "Unknown 1-byte regclass"); |
3599 | if (STI.is64Bit()) |
3600 | |
3601 | |
3602 | if (isHReg(Reg) || X86::GR8_ABCD_HRegClass.hasSubClassEq(RC)) |
3603 | return load ? X86::MOV8rm_NOREX : X86::MOV8mr_NOREX; |
3604 | return load ? X86::MOV8rm : X86::MOV8mr; |
3605 | case 2: |
3606 | if (X86::VK16RegClass.hasSubClassEq(RC)) |
3607 | return load ? X86::KMOVWkm : X86::KMOVWmk; |
3608 | assert(X86::GR16RegClass.hasSubClassEq(RC) && "Unknown 2-byte regclass"); |
3609 | return load ? X86::MOV16rm : X86::MOV16mr; |
3610 | case 4: |
3611 | if (X86::GR32RegClass.hasSubClassEq(RC)) |
3612 | return load ? X86::MOV32rm : X86::MOV32mr; |
3613 | if (X86::FR32XRegClass.hasSubClassEq(RC)) |
3614 | return load ? |
3615 | (HasAVX512 ? X86::VMOVSSZrm_alt : |
3616 | HasAVX ? X86::VMOVSSrm_alt : |
3617 | X86::MOVSSrm_alt) : |
3618 | (HasAVX512 ? X86::VMOVSSZmr : |
3619 | HasAVX ? X86::VMOVSSmr : |
3620 | X86::MOVSSmr); |
3621 | if (X86::RFP32RegClass.hasSubClassEq(RC)) |
3622 | return load ? X86::LD_Fp32m : X86::ST_Fp32m; |
3623 | if (X86::VK32RegClass.hasSubClassEq(RC)) { |
3624 | assert(STI.hasBWI() && "KMOVD requires BWI"); |
3625 | return load ? X86::KMOVDkm : X86::KMOVDmk; |
3626 | } |
3627 | |
3628 | |
3629 | if (X86::VK1PAIRRegClass.hasSubClassEq(RC) || |
3630 | X86::VK2PAIRRegClass.hasSubClassEq(RC) || |
3631 | X86::VK4PAIRRegClass.hasSubClassEq(RC) || |
3632 | X86::VK8PAIRRegClass.hasSubClassEq(RC) || |
3633 | X86::VK16PAIRRegClass.hasSubClassEq(RC)) |
3634 | return load ? X86::MASKPAIR16LOAD : X86::MASKPAIR16STORE; |
3635 | llvm_unreachable("Unknown 4-byte regclass"); |
3636 | case 8: |
3637 | if (X86::GR64RegClass.hasSubClassEq(RC)) |
3638 | return load ? X86::MOV64rm : X86::MOV64mr; |
3639 | if (X86::FR64XRegClass.hasSubClassEq(RC)) |
3640 | return load ? |
3641 | (HasAVX512 ? X86::VMOVSDZrm_alt : |
3642 | HasAVX ? X86::VMOVSDrm_alt : |
3643 | X86::MOVSDrm_alt) : |
3644 | (HasAVX512 ? X86::VMOVSDZmr : |
3645 | HasAVX ? X86::VMOVSDmr : |
3646 | X86::MOVSDmr); |
3647 | if (X86::VR64RegClass.hasSubClassEq(RC)) |
3648 | return load ? X86::MMX_MOVQ64rm : X86::MMX_MOVQ64mr; |
3649 | if (X86::RFP64RegClass.hasSubClassEq(RC)) |
3650 | return load ? X86::LD_Fp64m : X86::ST_Fp64m; |
3651 | if (X86::VK64RegClass.hasSubClassEq(RC)) { |
3652 | assert(STI.hasBWI() && "KMOVQ requires BWI"); |
3653 | return load ? X86::KMOVQkm : X86::KMOVQmk; |
3654 | } |
3655 | llvm_unreachable("Unknown 8-byte regclass"); |
3656 | case 10: |
3657 | assert(X86::RFP80RegClass.hasSubClassEq(RC) && "Unknown 10-byte regclass"); |
3658 | return load ? X86::LD_Fp80m : X86::ST_FpP80m; |
3659 | case 16: { |
3660 | if (X86::VR128XRegClass.hasSubClassEq(RC)) { |
3661 | |
3662 | if (IsStackAligned) |
3663 | return load ? |
3664 | (HasVLX ? X86::VMOVAPSZ128rm : |
3665 | HasAVX512 ? X86::VMOVAPSZ128rm_NOVLX : |
3666 | HasAVX ? X86::VMOVAPSrm : |
3667 | X86::MOVAPSrm): |
3668 | (HasVLX ? X86::VMOVAPSZ128mr : |
3669 | HasAVX512 ? X86::VMOVAPSZ128mr_NOVLX : |
3670 | HasAVX ? X86::VMOVAPSmr : |
3671 | X86::MOVAPSmr); |
3672 | else |
3673 | return load ? |
3674 | (HasVLX ? X86::VMOVUPSZ128rm : |
3675 | HasAVX512 ? X86::VMOVUPSZ128rm_NOVLX : |
3676 | HasAVX ? X86::VMOVUPSrm : |
3677 | X86::MOVUPSrm): |
3678 | (HasVLX ? X86::VMOVUPSZ128mr : |
3679 | HasAVX512 ? X86::VMOVUPSZ128mr_NOVLX : |
3680 | HasAVX ? X86::VMOVUPSmr : |
3681 | X86::MOVUPSmr); |
3682 | } |
3683 | if (X86::BNDRRegClass.hasSubClassEq(RC)) { |
3684 | if (STI.is64Bit()) |
3685 | return load ? X86::BNDMOV64rm : X86::BNDMOV64mr; |
3686 | else |
3687 | return load ? X86::BNDMOV32rm : X86::BNDMOV32mr; |
3688 | } |
3689 | llvm_unreachable("Unknown 16-byte regclass"); |
3690 | } |
3691 | case 32: |
3692 | assert(X86::VR256XRegClass.hasSubClassEq(RC) && "Unknown 32-byte regclass"); |
3693 | |
3694 | if (IsStackAligned) |
3695 | return load ? |
3696 | (HasVLX ? X86::VMOVAPSZ256rm : |
3697 | HasAVX512 ? X86::VMOVAPSZ256rm_NOVLX : |
3698 | X86::VMOVAPSYrm) : |
3699 | (HasVLX ? X86::VMOVAPSZ256mr : |
3700 | HasAVX512 ? X86::VMOVAPSZ256mr_NOVLX : |
3701 | X86::VMOVAPSYmr); |
3702 | else |
3703 | return load ? |
3704 | (HasVLX ? X86::VMOVUPSZ256rm : |
3705 | HasAVX512 ? X86::VMOVUPSZ256rm_NOVLX : |
3706 | X86::VMOVUPSYrm) : |
3707 | (HasVLX ? X86::VMOVUPSZ256mr : |
3708 | HasAVX512 ? X86::VMOVUPSZ256mr_NOVLX : |
3709 | X86::VMOVUPSYmr); |
3710 | case 64: |
3711 | assert(X86::VR512RegClass.hasSubClassEq(RC) && "Unknown 64-byte regclass"); |
3712 | assert(STI.hasAVX512() && "Using 512-bit register requires AVX512"); |
3713 | if (IsStackAligned) |
3714 | return load ? X86::VMOVAPSZrm : X86::VMOVAPSZmr; |
3715 | else |
3716 | return load ? X86::VMOVUPSZrm : X86::VMOVUPSZmr; |
3717 | } |
3718 | } |
3719 | |
3720 | Optional<ExtAddrMode> |
3721 | X86InstrInfo::getAddrModeFromMemoryOp(const MachineInstr &MemI, |
3722 | const TargetRegisterInfo *TRI) const { |
3723 | const MCInstrDesc &Desc = MemI.getDesc(); |
3724 | int MemRefBegin = X86II::getMemoryOperandNo(Desc.TSFlags); |
3725 | if (MemRefBegin < 0) |
3726 | return None; |
3727 | |
3728 | MemRefBegin += X86II::getOperandBias(Desc); |
3729 | |
3730 | auto &BaseOp = MemI.getOperand(MemRefBegin + X86::AddrBaseReg); |
3731 | if (!BaseOp.isReg()) |
3732 | return None; |
3733 | |
3734 | const MachineOperand &DispMO = MemI.getOperand(MemRefBegin + X86::AddrDisp); |
3735 | |
3736 | if (!DispMO.isImm()) |
3737 | return None; |
3738 | |
3739 | ExtAddrMode AM; |
3740 | AM.BaseReg = BaseOp.getReg(); |
3741 | AM.ScaledReg = MemI.getOperand(MemRefBegin + X86::AddrIndexReg).getReg(); |
3742 | AM.Scale = MemI.getOperand(MemRefBegin + X86::AddrScaleAmt).getImm(); |
3743 | AM.Displacement = DispMO.getImm(); |
3744 | return AM; |
3745 | } |
3746 | |
3747 | bool X86InstrInfo::getConstValDefinedInReg(const MachineInstr &MI, |
3748 | const Register Reg, |
3749 | int64_t &ImmVal) const { |
3750 | if (MI.getOpcode() != X86::MOV32ri && MI.getOpcode() != X86::MOV64ri) |
3751 | return false; |
3752 | |
3753 | if (!MI.getOperand(1).isImm() || MI.getOperand(0).getReg() != Reg) |
3754 | return false; |
3755 | ImmVal = MI.getOperand(1).getImm(); |
3756 | return true; |
3757 | } |
3758 | |
3759 | bool X86InstrInfo::preservesZeroValueInReg( |
3760 | const MachineInstr *MI, const Register NullValueReg, |
3761 | const TargetRegisterInfo *TRI) const { |
3762 | if (!MI->modifiesRegister(NullValueReg, TRI)) |
3763 | return true; |
3764 | switch (MI->getOpcode()) { |
3765 | |
3766 | |
3767 | case X86::SHR64ri: |
3768 | case X86::SHR32ri: |
3769 | case X86::SHL64ri: |
3770 | case X86::SHL32ri: |
3771 | assert(MI->getOperand(0).isDef() && MI->getOperand(1).isUse() && |
3772 | "expected for shift opcode!"); |
3773 | return MI->getOperand(0).getReg() == NullValueReg && |
3774 | MI->getOperand(1).getReg() == NullValueReg; |
3775 | |
3776 | |
3777 | case X86::MOV32rr: |
3778 | return llvm::all_of(MI->operands(), [&](const MachineOperand &MO) { |
3779 | return TRI->isSubRegisterEq(NullValueReg, MO.getReg()); |
3780 | }); |
3781 | default: |
3782 | return false; |
3783 | } |
3784 | llvm_unreachable("Should be handled above!"); |
3785 | } |
3786 | |
3787 | bool X86InstrInfo::getMemOperandsWithOffsetWidth( |
3788 | const MachineInstr &MemOp, SmallVectorImpl<const MachineOperand *> &BaseOps, |
3789 | int64_t &Offset, bool &OffsetIsScalable, unsigned &Width, |
3790 | const TargetRegisterInfo *TRI) const { |
3791 | const MCInstrDesc &Desc = MemOp.getDesc(); |
3792 | int MemRefBegin = X86II::getMemoryOperandNo(Desc.TSFlags); |
3793 | if (MemRefBegin < 0) |
3794 | return false; |
3795 | |
3796 | MemRefBegin += X86II::getOperandBias(Desc); |
3797 | |
3798 | const MachineOperand *BaseOp = |
3799 | &MemOp.getOperand(MemRefBegin + X86::AddrBaseReg); |
3800 | if (!BaseOp->isReg()) |
3801 | return false; |
3802 | |
3803 | if (MemOp.getOperand(MemRefBegin + X86::AddrScaleAmt).getImm() != 1) |
3804 | return false; |
3805 | |
3806 | if (MemOp.getOperand(MemRefBegin + X86::AddrIndexReg).getReg() != |
3807 | X86::NoRegister) |
3808 | return false; |
3809 | |
3810 | const MachineOperand &DispMO = MemOp.getOperand(MemRefBegin + X86::AddrDisp); |
3811 | |
3812 | |
3813 | if (!DispMO.isImm()) |
3814 | return false; |
3815 | |
3816 | Offset = DispMO.getImm(); |
3817 | |
3818 | if (!BaseOp->isReg()) |
3819 | return false; |
3820 | |
3821 | OffsetIsScalable = false; |
3822 | |
3823 | |
3824 | |
3825 | Width = |
3826 | !MemOp.memoperands_empty() ? MemOp.memoperands().front()->getSize() : 0; |
3827 | BaseOps.push_back(BaseOp); |
3828 | return true; |
3829 | } |
3830 | |
3831 | static unsigned getStoreRegOpcode(Register SrcReg, |
3832 | const TargetRegisterClass *RC, |
3833 | bool IsStackAligned, |
3834 | const X86Subtarget &STI) { |
3835 | return getLoadStoreRegOpcode(SrcReg, RC, IsStackAligned, STI, false); |
3836 | } |
3837 | |
3838 | static unsigned getLoadRegOpcode(Register DestReg, |
3839 | const TargetRegisterClass *RC, |
3840 | bool IsStackAligned, const X86Subtarget &STI) { |
3841 | return getLoadStoreRegOpcode(DestReg, RC, IsStackAligned, STI, true); |
3842 | } |
3843 | |
3844 | void X86InstrInfo::storeRegToStackSlot(MachineBasicBlock &MBB, |
3845 | MachineBasicBlock::iterator MI, |
3846 | Register SrcReg, bool isKill, int FrameIdx, |
3847 | const TargetRegisterClass *RC, |
3848 | const TargetRegisterInfo *TRI) const { |
3849 | const MachineFunction &MF = *MBB.getParent(); |
3850 | const MachineFrameInfo &MFI = MF.getFrameInfo(); |
3851 | assert(MFI.getObjectSize(FrameIdx) >= TRI->getSpillSize(*RC) && |
3852 | "Stack slot too small for store"); |
3853 | if (RC->getID() == X86::TILERegClassID) { |
3854 | unsigned Opc = X86::TILESTORED; |
3855 | |
3856 | MachineRegisterInfo &RegInfo = MBB.getParent()->getRegInfo(); |
3857 | Register VirtReg = RegInfo.createVirtualRegister(&X86::GR64_NOSPRegClass); |
3858 | BuildMI(MBB, MI, DebugLoc(), get(X86::MOV64ri), VirtReg).addImm(64); |
3859 | MachineInstr *NewMI = |
3860 | addFrameReference(BuildMI(MBB, MI, DebugLoc(), get(Opc)), FrameIdx) |
3861 | .addReg(SrcReg, getKillRegState(isKill)); |
3862 | MachineOperand &MO = NewMI->getOperand(2); |
3863 | MO.setReg(VirtReg); |
3864 | MO.setIsKill(true); |
3865 | } else { |
3866 | unsigned Alignment = std::max<uint32_t>(TRI->getSpillSize(*RC), 16); |
3867 | bool isAligned = |
3868 | (Subtarget.getFrameLowering()->getStackAlign() >= Alignment) || |
3869 | (RI.canRealignStack(MF) && !MFI.isFixedObjectIndex(FrameIdx)); |
3870 | unsigned Opc = getStoreRegOpcode(SrcReg, RC, isAligned, Subtarget); |
3871 | addFrameReference(BuildMI(MBB, MI, DebugLoc(), get(Opc)), FrameIdx) |
3872 | .addReg(SrcReg, getKillRegState(isKill)); |
3873 | } |
3874 | } |
3875 | |
3876 | void X86InstrInfo::loadRegFromStackSlot(MachineBasicBlock &MBB, |
3877 | MachineBasicBlock::iterator MI, |
3878 | Register DestReg, int FrameIdx, |
3879 | const TargetRegisterClass *RC, |
3880 | const TargetRegisterInfo *TRI) const { |
3881 | if (RC->getID() == X86::TILERegClassID) { |
3882 | unsigned Opc = X86::TILELOADD; |
3883 | |
3884 | MachineRegisterInfo &RegInfo = MBB.getParent()->getRegInfo(); |
3885 | Register VirtReg = RegInfo.createVirtualRegister(&X86::GR64_NOSPRegClass); |
3886 | MachineInstr *NewMI = |
3887 | BuildMI(MBB, MI, DebugLoc(), get(X86::MOV64ri), VirtReg).addImm(64); |
3888 | NewMI = addFrameReference(BuildMI(MBB, MI, DebugLoc(), get(Opc), DestReg), |
3889 | FrameIdx); |
3890 | MachineOperand &MO = NewMI->getOperand(3); |
3891 | MO.setReg(VirtReg); |
3892 | MO.setIsKill(true); |
3893 | } else { |
3894 | const MachineFunction &MF = *MBB.getParent(); |
3895 | const MachineFrameInfo &MFI = MF.getFrameInfo(); |
3896 | unsigned Alignment = std::max<uint32_t>(TRI->getSpillSize(*RC), 16); |
3897 | bool isAligned = |
3898 | (Subtarget.getFrameLowering()->getStackAlign() >= Alignment) || |
3899 | (RI.canRealignStack(MF) && !MFI.isFixedObjectIndex(FrameIdx)); |
3900 | unsigned Opc = getLoadRegOpcode(DestReg, RC, isAligned, Subtarget); |
3901 | addFrameReference(BuildMI(MBB, MI, DebugLoc(), get(Opc), DestReg), |
3902 | FrameIdx); |
3903 | } |
3904 | } |
3905 | |
3906 | bool X86InstrInfo::analyzeCompare(const MachineInstr &MI, Register &SrcReg, |
3907 | Register &SrcReg2, int &CmpMask, |
3908 | int &CmpValue) const { |
3909 | switch (MI.getOpcode()) { |
3910 | default: break; |
3911 | case X86::CMP64ri32: |
3912 | case X86::CMP64ri8: |
3913 | case X86::CMP32ri: |
3914 | case X86::CMP32ri8: |
3915 | case X86::CMP16ri: |
3916 | case X86::CMP16ri8: |
3917 | case X86::CMP8ri: |
3918 | SrcReg = MI.getOperand(0).getReg(); |
3919 | SrcReg2 = 0; |
3920 | if (MI.getOperand(1).isImm()) { |
3921 | CmpMask = ~0; |
3922 | CmpValue = MI.getOperand(1).getImm(); |
3923 | } else { |
3924 | CmpMask = CmpValue = 0; |
3925 | } |
3926 | return true; |
3927 | |
3928 | case X86::SUB64rm: |
3929 | case X86::SUB32rm: |
3930 | case X86::SUB16rm: |
3931 | case X86::SUB8rm: |
3932 | SrcReg = MI.getOperand(1).getReg(); |
3933 | SrcReg2 = 0; |
3934 | CmpMask = 0; |
3935 | CmpValue = 0; |
3936 | return true; |
3937 | case X86::SUB64rr: |
3938 | case X86::SUB32rr: |
3939 | case X86::SUB16rr: |
3940 | case X86::SUB8rr: |
3941 | SrcReg = MI.getOperand(1).getReg(); |
3942 | SrcReg2 = MI.getOperand(2).getReg(); |
3943 | CmpMask = 0; |
3944 | CmpValue = 0; |
3945 | return true; |
3946 | case X86::SUB64ri32: |
3947 | case X86::SUB64ri8: |
3948 | case X86::SUB32ri: |
3949 | case X86::SUB32ri8: |
3950 | case X86::SUB16ri: |
3951 | case X86::SUB16ri8: |
3952 | case X86::SUB8ri: |
3953 | SrcReg = MI.getOperand(1).getReg(); |
3954 | SrcReg2 = 0; |
3955 | if (MI.getOperand(2).isImm()) { |
3956 | CmpMask = ~0; |
3957 | CmpValue = MI.getOperand(2).getImm(); |
3958 | } else { |
3959 | CmpMask = CmpValue = 0; |
3960 | } |
3961 | return true; |
3962 | case X86::CMP64rr: |
3963 | case X86::CMP32rr: |
3964 | case X86::CMP16rr: |
3965 | case X86::CMP8rr: |
3966 | SrcReg = MI.getOperand(0).getReg(); |
3967 | SrcReg2 = MI.getOperand(1).getReg(); |
3968 | CmpMask = 0; |
3969 | CmpValue = 0; |
3970 | return true; |
3971 | case X86::TEST8rr: |
3972 | case X86::TEST16rr: |
3973 | case X86::TEST32rr: |
3974 | case X86::TEST64rr: |
3975 | SrcReg = MI.getOperand(0).getReg(); |
3976 | if (MI.getOperand(1).getReg() != SrcReg) |
3977 | return false; |
3978 | |
3979 | SrcReg2 = 0; |
3980 | CmpMask = ~0; |
3981 | CmpValue = 0; |
3982 | return true; |
3983 | } |
3984 | return false; |
3985 | } |
3986 | |
3987 | |
3988 | |
3989 | |
3990 | |
3991 | |
3992 | |
3993 | inline static bool isRedundantFlagInstr(const MachineInstr &FlagI, |
3994 | Register SrcReg, Register SrcReg2, |
3995 | int ImmMask, int ImmValue, |
3996 | const MachineInstr &OI) { |
3997 | if (((FlagI.getOpcode() == X86::CMP64rr && OI.getOpcode() == X86::SUB64rr) || |
3998 | (FlagI.getOpcode() == X86::CMP32rr && OI.getOpcode() == X86::SUB32rr) || |
3999 | (FlagI.getOpcode() == X86::CMP16rr && OI.getOpcode() == X86::SUB16rr) || |
4000 | (FlagI.getOpcode() == X86::CMP8rr && OI.getOpcode() == X86::SUB8rr)) && |
4001 | ((OI.getOperand(1).getReg() == SrcReg && |
4002 | OI.getOperand(2).getReg() == SrcReg2) || |
4003 | (OI.getOperand(1).getReg() == SrcReg2 && |
4004 | OI.getOperand(2).getReg() == SrcReg))) |
4005 | return true; |
4006 | |
4007 | if (ImmMask != 0 && |
4008 | ((FlagI.getOpcode() == X86::CMP64ri32 && |
4009 | OI.getOpcode() == X86::SUB64ri32) || |
4010 | (FlagI.getOpcode() == X86::CMP64ri8 && |
4011 | OI.getOpcode() == X86::SUB64ri8) || |
4012 | (FlagI.getOpcode() == X86::CMP32ri && OI.getOpcode() == X86::SUB32ri) || |
4013 | (FlagI.getOpcode() == X86::CMP32ri8 && |
4014 | OI.getOpcode() == X86::SUB32ri8) || |
4015 | (FlagI.getOpcode() == X86::CMP16ri && OI.getOpcode() == X86::SUB16ri) || |
4016 | (FlagI.getOpcode() == X86::CMP16ri8 && |
4017 | OI.getOpcode() == X86::SUB16ri8) || |
4018 | (FlagI.getOpcode() == X86::CMP8ri && OI.getOpcode() == X86::SUB8ri)) && |
4019 | OI.getOperand(1).getReg() == SrcReg && |
4020 | OI.getOperand(2).getImm() == ImmValue) |
4021 | return true; |
4022 | return false; |
4023 | } |
4024 | |
4025 | |
4026 | |
4027 | inline static bool isDefConvertible(const MachineInstr &MI, bool &NoSignFlag, |
4028 | bool &ClearsOverflowFlag) { |
4029 | NoSignFlag = false; |
4030 | ClearsOverflowFlag = false; |
4031 | |
4032 | switch (MI.getOpcode()) { |
4033 | default: return false; |
4034 | |
4035 | |
4036 | |
4037 | case X86::SAR8ri: case X86::SAR16ri: case X86::SAR32ri:case X86::SAR64ri: |
4038 | case X86::SHR8ri: case X86::SHR16ri: case X86::SHR32ri:case X86::SHR64ri: |
4039 | return getTruncatedShiftCount(MI, 2) != 0; |
4040 | |
4041 | |
4042 | |
4043 | case X86::SHL8ri: case X86::SHL16ri: case X86::SHL32ri:case X86::SHL64ri:{ |
4044 | unsigned ShAmt = getTruncatedShiftCount(MI, 2); |
4045 | if (isTruncatedShiftCountForLEA(ShAmt)) return false; |
4046 | return ShAmt != 0; |
4047 | } |
4048 | |
4049 | case X86::SHRD16rri8:case X86::SHRD32rri8:case X86::SHRD64rri8: |
4050 | case X86::SHLD16rri8:case X86::SHLD32rri8:case X86::SHLD64rri8: |
4051 | return getTruncatedShiftCount(MI, 3) != 0; |
4052 | |
4053 | case X86::SUB64ri32: case X86::SUB64ri8: case X86::SUB32ri: |
4054 | case X86::SUB32ri8: case X86::SUB16ri: case X86::SUB16ri8: |
4055 | case X86::SUB8ri: case X86::SUB64rr: case X86::SUB32rr: |
4056 | case X86::SUB16rr: case X86::SUB8rr: case X86::SUB64rm: |
4057 | case X86::SUB32rm: case X86::SUB16rm: case X86::SUB8rm: |
4058 | case X86::DEC64r: case X86::DEC32r: case X86::DEC16r: case X86::DEC8r: |
4059 | case X86::ADD64ri32: case X86::ADD64ri8: case X86::ADD32ri: |
4060 | case X86::ADD32ri8: case X86::ADD16ri: case X86::ADD16ri8: |
4061 | case X86::ADD8ri: case X86::ADD64rr: case X86::ADD32rr: |
4062 | case X86::ADD16rr: case X86::ADD8rr: case X86::ADD64rm: |
4063 | case X86::ADD32rm: case X86::ADD16rm: case X86::ADD8rm: |
4064 | case X86::INC64r: case X86::INC32r: case X86::INC16r: case X86::INC8r: |
4065 | case X86::ADC64ri32: case X86::ADC64ri8: case X86::ADC32ri: |
4066 | case X86::ADC32ri8: case X86::ADC16ri: case X86::ADC16ri8: |
4067 | case X86::ADC8ri: case X86::ADC64rr: case X86::ADC32rr: |
4068 | case X86::ADC16rr: case X86::ADC8rr: case X86::ADC64rm: |
4069 | case X86::ADC32rm: case X86::ADC16rm: case X86::ADC8rm: |
4070 | case X86::SBB64ri32: case X86::SBB64ri8: case X86::SBB32ri: |
4071 | case X86::SBB32ri8: case X86::SBB16ri: case X86::SBB16ri8: |
4072 | case X86::SBB8ri: case X86::SBB64rr: case X86::SBB32rr: |
4073 | case X86::SBB16rr: case X86::SBB8rr: case X86::SBB64rm: |
4074 | case X86::SBB32rm: case X86::SBB16rm: case X86::SBB8rm: |
4075 | case X86::NEG8r: case X86::NEG16r: case X86::NEG32r: case X86::NEG64r: |
4076 | case X86::SAR8r1: case X86::SAR16r1: case X86::SAR32r1:case X86::SAR64r1: |
4077 | case X86::SHR8r1: case X86::SHR16r1: case X86::SHR32r1:case X86::SHR64r1: |
4078 | case X86::SHL8r1: case X86::SHL16r1: case X86::SHL32r1:case X86::SHL64r1: |
4079 | case X86::LZCNT16rr: case X86::LZCNT16rm: |
4080 | case X86::LZCNT32rr: case X86::LZCNT32rm: |
4081 | case X86::LZCNT64rr: case X86::LZCNT64rm: |
4082 | case X86::POPCNT16rr:case X86::POPCNT16rm: |
4083 | case X86::POPCNT32rr:case X86::POPCNT32rm: |
4084 | case X86::POPCNT64rr:case X86::POPCNT64rm: |
4085 | case X86::TZCNT16rr: case X86::TZCNT16rm: |
4086 | case X86::TZCNT32rr: case X86::TZCNT32rm: |
4087 | case X86::TZCNT64rr: case X86::TZCNT64rm: |
4088 | return true; |
4089 | case X86::AND64ri32: case X86::AND64ri8: case X86::AND32ri: |
4090 | case X86::AND32ri8: case X86::AND16ri: case X86::AND16ri8: |
4091 | case X86::AND8ri: case X86::AND64rr: case X86::AND32rr: |
4092 | case X86::AND16rr: case X86::AND8rr: case X86::AND64rm: |
4093 | case X86::AND32rm: case X86::AND16rm: case X86::AND8rm: |
4094 | case X86::XOR64ri32: case X86::XOR64ri8: case X86::XOR32ri: |
4095 | case X86::XOR32ri8: case X86::XOR16ri: case X86::XOR16ri8: |
4096 | case X86::XOR8ri: case X86::XOR64rr: case X86::XOR32rr: |
4097 | case X86::XOR16rr: case X86::XOR8rr: case X86::XOR64rm: |
4098 | case X86::XOR32rm: case X86::XOR16rm: case X86::XOR8rm: |
4099 | case X86::OR64ri32: case X86::OR64ri8: case X86::OR32ri: |
4100 | case X86::OR32ri8: case X86::OR16ri: case X86::OR16ri8: |
4101 | case X86::OR8ri: case X86::OR64rr: case X86::OR32rr: |
4102 | case X86::OR16rr: case X86::OR8rr: case X86::OR64rm: |
4103 | case X86::OR32rm: case X86::OR16rm: case X86::OR8rm: |
4104 | case X86::ANDN32rr: case X86::ANDN32rm: |
4105 | case X86::ANDN64rr: case X86::ANDN64rm: |
4106 | case X86::BLSI32rr: case X86::BLSI32rm: |
4107 | case X86::BLSI64rr: case X86::BLSI64rm: |
4108 | case X86::BLSMSK32rr: case X86::BLSMSK32rm: |
4109 | case X86::BLSMSK64rr: case X86::BLSMSK64rm: |
4110 | case X86::BLSR32rr: case X86::BLSR32rm: |
4111 | case X86::BLSR64rr: case X86::BLSR64rm: |
4112 | case X86::BLCFILL32rr: case X86::BLCFILL32rm: |
4113 | case X86::BLCFILL64rr: case X86::BLCFILL64rm: |
4114 | case X86::BLCI32rr: case X86::BLCI32rm: |
4115 | case X86::BLCI64rr: case X86::BLCI64rm: |
4116 | case X86::BLCIC32rr: case X86::BLCIC32rm: |
4117 | case X86::BLCIC64rr: case X86::BLCIC64rm: |
4118 | case X86::BLCMSK32rr: case X86::BLCMSK32rm: |
4119 | case X86::BLCMSK64rr: case X86::BLCMSK64rm: |
4120 | case X86::BLCS32rr: case X86::BLCS32rm: |
4121 | case X86::BLCS64rr: case X86::BLCS64rm: |
4122 | case X86::BLSFILL32rr: case X86::BLSFILL32rm: |
4123 | case X86::BLSFILL64rr: case X86::BLSFILL64rm: |
4124 | case X86::BLSIC32rr: case X86::BLSIC32rm: |
4125 | case X86::BLSIC64rr: case X86::BLSIC64rm: |
4126 | case X86::BZHI32rr: case X86::BZHI32rm: |
4127 | case X86::BZHI64rr: case X86::BZHI64rm: |
4128 | case X86::T1MSKC32rr: case X86::T1MSKC32rm: |
4129 | case X86::T1MSKC64rr: case X86::T1MSKC64rm: |
4130 | case X86::TZMSK32rr: case X86::TZMSK32rm: |
4131 | case X86::TZMSK64rr: case X86::TZMSK64rm: |
4132 | |
4133 | |
4134 | |
4135 | ClearsOverflowFlag = true; |
4136 | return true; |
4137 | case X86::BEXTR32rr: case X86::BEXTR64rr: |
4138 | case X86::BEXTR32rm: case X86::BEXTR64rm: |
4139 | case X86::BEXTRI32ri: case X86::BEXTRI32mi: |
4140 | case X86::BEXTRI64ri: case X86::BEXTRI64mi: |
4141 | |
4142 | |
4143 | NoSignFlag = true; |
4144 | return true; |
4145 | } |
4146 | } |
4147 | |
4148 | |
4149 | static X86::CondCode isUseDefConvertible(const MachineInstr &MI) { |
4150 | switch (MI.getOpcode()) { |
4151 | default: return X86::COND_INVALID; |
4152 | case X86::NEG8r: |
4153 | case X86::NEG16r: |
4154 | case X86::NEG32r: |
4155 | case X86::NEG64r: |
4156 | return X86::COND_AE; |
4157 | case X86::LZCNT16rr: |
4158 | case X86::LZCNT32rr: |
4159 | case X86::LZCNT64rr: |
4160 | return X86::COND_B; |
4161 | case X86::POPCNT16rr: |
4162 | case X86::POPCNT32rr: |
4163 | case X86::POPCNT64rr: |
4164 | return X86::COND_E; |
4165 | case X86::TZCNT16rr: |
4166 | case X86::TZCNT32rr: |
4167 | case X86::TZCNT64rr: |
4168 | return X86::COND_B; |
4169 | case X86::BSF16rr: |
4170 | case X86::BSF32rr: |
4171 | case X86::BSF64rr: |
4172 | case X86::BSR16rr: |
4173 | case X86::BSR32rr: |
4174 | case X86::BSR64rr: |
4175 | return X86::COND_E; |
4176 | case X86::BLSI32rr: |
4177 | case X86::BLSI64rr: |
4178 | return X86::COND_AE; |
4179 | case X86::BLSR32rr: |
4180 | case X86::BLSR64rr: |
4181 | case X86::BLSMSK32rr: |
4182 | case X86::BLSMSK64rr: |
4183 | return X86::COND_B; |
4184 | |
4185 | } |
4186 | } |
4187 | |
4188 | |
4189 | |
4190 | |
4191 | bool X86InstrInfo::optimizeCompareInstr(MachineInstr &CmpInstr, Register SrcReg, |
4192 | Register SrcReg2, int CmpMask, |
4193 | int CmpValue, |
4194 | const MachineRegisterInfo *MRI) const { |
4195 | |
4196 | switch (CmpInstr.getOpcode()) { |
4197 | default: break; |
4198 | case X86::SUB64ri32: |
4199 | case X86::SUB64ri8: |
4200 | case X86::SUB32ri: |
4201 | case X86::SUB32ri8: |
4202 | case X86::SUB16ri: |
4203 | case X86::SUB16ri8: |
4204 | case X86::SUB8ri: |
4205 | case X86::SUB64rm: |
4206 | case X86::SUB32rm: |
4207 | case X86::SUB16rm: |
4208 | case X86::SUB8rm: |
4209 | case X86::SUB64rr: |
4210 | case X86::SUB32rr: |
4211 | case X86::SUB16rr: |
4212 | case X86::SUB8rr: { |
4213 | if (!MRI->use_nodbg_empty(CmpInstr.getOperand(0).getReg())) |
4214 | return false; |
4215 | |
4216 | unsigned NewOpcode = 0; |
4217 | switch (CmpInstr.getOpcode()) { |
4218 | default: llvm_unreachable("Unreachable!"); |
4219 | case X86::SUB64rm: NewOpcode = X86::CMP64rm; break; |
4220 | case X86::SUB32rm: NewOpcode = X86::CMP32rm; break; |
4221 | case X86::SUB16rm: NewOpcode = X86::CMP16rm; break; |
4222 | case X86::SUB8rm: NewOpcode = X86::CMP8rm; break; |
4223 | case X86::SUB64rr: NewOpcode = X86::CMP64rr; break; |
4224 | case X86::SUB32rr: NewOpcode = X86::CMP32rr; break; |
4225 | case X86::SUB16rr: NewOpcode = X86::CMP16rr; break; |
4226 | case X86::SUB8rr: NewOpcode = X86::CMP8rr; break; |
4227 | case X86::SUB64ri32: NewOpcode = X86::CMP64ri32; break; |
4228 | case X86::SUB64ri8: NewOpcode = X86::CMP64ri8; break; |
4229 | case X86::SUB32ri: NewOpcode = X86::CMP32ri; break; |
4230 | case X86::SUB32ri8: NewOpcode = X86::CMP32ri8; break; |
4231 | case X86::SUB16ri: NewOpcode = X86::CMP16ri; break; |
4232 | case X86::SUB16ri8: NewOpcode = X86::CMP16ri8; break; |
4233 | case X86::SUB8ri: NewOpcode = X86::CMP8ri; break; |
4234 | } |
4235 | CmpInstr.setDesc(get(NewOpcode)); |
4236 | CmpInstr.RemoveOperand(0); |
4237 | |
4238 | CmpInstr.dropDebugNumber(); |
4239 | |
4240 | if (NewOpcode == X86::CMP64rm || NewOpcode == X86::CMP32rm || |
4241 | NewOpcode == X86::CMP16rm || NewOpcode == X86::CMP8rm) |
4242 | return false; |
4243 | } |
4244 | } |
4245 | |
4246 | |
4247 | MachineInstr *MI = MRI->getUniqueVRegDef(SrcReg); |
4248 | if (!MI) return false; |
4249 | |
4250 | |
4251 | MachineBasicBlock::iterator I = CmpInstr, Def = MI; |
4252 | |
4253 | |
4254 | |
4255 | bool IsCmpZero = (CmpMask != 0 && CmpValue == 0); |
4256 | if (IsCmpZero && MI->getParent() != CmpInstr.getParent()) |
4257 | return false; |
4258 | |
4259 | |
4260 | |
4261 | |
4262 | bool ShouldUpdateCC = false; |
4263 | bool NoSignFlag = false; |
4264 | bool ClearsOverflowFlag = false; |
4265 | X86::CondCode NewCC = X86::COND_INVALID; |
4266 | if (IsCmpZero && !isDefConvertible(*MI, NoSignFlag, ClearsOverflowFlag)) { |
4267 | |
4268 | |
4269 | for (MachineBasicBlock::iterator J = MI;; ++J) { |
4270 | |
4271 | NewCC = isUseDefConvertible(*J); |
4272 | if (NewCC != X86::COND_INVALID && J->getOperand(1).isReg() && |
4273 | J->getOperand(1).getReg() == SrcReg) { |
4274 | assert(J->definesRegister(X86::EFLAGS) && "Must be an EFLAGS def!"); |
4275 | ShouldUpdateCC = true; |
4276 | |
4277 | |
4278 | Def = J; |
4279 | MI = &*Def; |
4280 | break; |
4281 | } |
4282 | |
4283 | if (J == I) |
4284 | return false; |
4285 | } |
4286 | } |
4287 | |
4288 | |
4289 | |
4290 | MachineInstr *Sub = nullptr; |
4291 | const TargetRegisterInfo *TRI = &getRegisterInfo(); |
4292 | |
4293 | |
4294 | |
4295 | |
4296 | |
4297 | |
4298 | MachineBasicBlock::reverse_iterator |
4299 | RI = ++I.getReverse(), |
4300 | RE = CmpInstr.getParent() == MI->getParent() |
4301 | ? Def.getReverse() |
4302 | : CmpInstr.getParent()->rend(); |
4303 | MachineInstr *Movr0Inst = nullptr; |
4304 | for (; RI != RE; ++RI) { |
4305 | MachineInstr &Instr = *RI; |
4306 | |
4307 | if (!IsCmpZero && isRedundantFlagInstr(CmpInstr, SrcReg, SrcReg2, CmpMask, |
4308 | CmpValue, Instr)) { |
4309 | Sub = &Instr; |
4310 | break; |
4311 | } |
4312 | |
4313 | if (Instr.modifiesRegister(X86::EFLAGS, TRI) || |
4314 | Instr.readsRegister(X86::EFLAGS, TRI)) { |
4315 | |
4316 | |
4317 | |
4318 | |
4319 | |
4320 | if (!Movr0Inst && Instr.getOpcode() == X86::MOV32r0 && |
4321 | Instr.registerDefIsDead(X86::EFLAGS, TRI)) { |
4322 | Movr0Inst = &Instr; |
4323 | continue; |
4324 | } |
4325 | |
4326 | |
4327 | return false; |
4328 | } |
4329 | } |
4330 | |
4331 | |
4332 | if (!IsCmpZero && !Sub) |
4333 | return false; |
4334 | |
4335 | bool IsSwapped = |
4336 | (SrcReg2 != 0 && Sub && Sub->getOperand(1).getReg() == SrcReg2 && |
4337 | Sub->getOperand(2).getReg() == SrcReg); |
4338 | |
4339 | |
4340 | |
4341 | |
4342 | |
4343 | bool IsSafe = false; |
4344 | SmallVector<std::pair<MachineInstr*, X86::CondCode>, 4> OpsToUpdate; |
4345 | MachineBasicBlock::iterator E = CmpInstr.getParent()->end(); |
4346 | for (++I; I != E; ++I) { |
4347 | const MachineInstr &Instr = *I; |
4348 | bool ModifyEFLAGS = Instr.modifiesRegister(X86::EFLAGS, TRI); |
4349 | bool UseEFLAGS = Instr.readsRegister(X86::EFLAGS, TRI); |
4350 | |
4351 | if (!UseEFLAGS && ModifyEFLAGS) { |
4352 | |
4353 | IsSafe = true; |
4354 | break; |
4355 | } |
4356 | if (!UseEFLAGS && !ModifyEFLAGS) |
4357 | continue; |
4358 | |
4359 | |
4360 | X86::CondCode OldCC = X86::COND_INVALID; |
4361 | if (IsCmpZero || IsSwapped) { |
4362 | |
4363 | if (Instr.isBranch()) |
4364 | OldCC = X86::getCondFromBranch(Instr); |
4365 | else { |
4366 | OldCC = X86::getCondFromSETCC(Instr); |
4367 | if (OldCC == X86::COND_INVALID) |
4368 | OldCC = X86::getCondFromCMov(Instr); |
4369 | } |
4370 | if (OldCC == X86::COND_INVALID) return false; |
4371 | } |
4372 | X86::CondCode ReplacementCC = X86::COND_INVALID; |
4373 | if (IsCmpZero) { |
4374 | switch (OldCC) { |
4375 | default: break; |
4376 | case X86::COND_A: case X86::COND_AE: |
4377 | case X86::COND_B: case X86::COND_BE: |
4378 | |
4379 | return false; |
4380 | case X86::COND_G: case X86::COND_GE: |
4381 | case X86::COND_L: case X86::COND_LE: |
4382 | case X86::COND_O: case X86::COND_NO: |
4383 | |
4384 | if (!ClearsOverflowFlag) |
4385 | return false; |
4386 | break; |
4387 | case X86::COND_S: case X86::COND_NS: |
4388 | |
4389 | |
4390 | if (NoSignFlag) |
4391 | return false; |
4392 | break; |
4393 | } |
4394 | |
4395 | |
4396 | |
4397 | if (ShouldUpdateCC) |
4398 | switch (OldCC) { |
4399 | default: |
4400 | return false; |
4401 | case X86::COND_E: |
4402 | ReplacementCC = NewCC; |
4403 | break; |
4404 | case X86::COND_NE: |
4405 | ReplacementCC = GetOppositeBranchCondition(NewCC); |
4406 | break; |
4407 | } |
4408 | } else if (IsSwapped) { |
4409 | |
4410 | |
4411 | |
4412 | ReplacementCC = getSwappedCondition(OldCC); |
4413 | if (ReplacementCC == X86::COND_INVALID) return false; |
4414 | } |
4415 | |
4416 | if ((ShouldUpdateCC || IsSwapped) && ReplacementCC != OldCC) { |
4417 | |
4418 | |
4419 | |
4420 | OpsToUpdate.push_back(std::make_pair(&*I, ReplacementCC)); |
4421 | } |
4422 | if (ModifyEFLAGS || Instr.killsRegister(X86::EFLAGS, TRI)) { |
4423 | |
4424 | IsSafe = true; |
4425 | break; |
4426 | } |
4427 | } |
4428 | |
4429 | |
4430 | |
4431 | if ((IsCmpZero || IsSwapped) && !IsSafe) { |
4432 | MachineBasicBlock *MBB = CmpInstr.getParent(); |
4433 | for (MachineBasicBlock *Successor : MBB->successors()) |
4434 | if (Successor->isLiveIn(X86::EFLAGS)) |
4435 | return false; |
4436 | } |
4437 | |
4438 | |
4439 | Sub = IsCmpZero ? MI : Sub; |
4440 | |
4441 | if (Movr0Inst) { |
4442 | |
4443 | Def = Sub; |
4444 | MachineBasicBlock::reverse_iterator InsertI = Def.getReverse(), |
4445 | InsertE = Sub->getParent()->rend(); |
4446 | for (; InsertI != InsertE; ++InsertI) { |
4447 | MachineInstr *Instr = &*InsertI; |
4448 | if (!Instr->readsRegister(X86::EFLAGS, TRI) && |
4449 | Instr->modifiesRegister(X86::EFLAGS, TRI)) { |
4450 | Sub->getParent()->remove(Movr0Inst); |
4451 | Instr->getParent()->insert(MachineBasicBlock::iterator(Instr), |
4452 | Movr0Inst); |
4453 | break; |
4454 | } |
4455 | } |
4456 | if (InsertI == InsertE) |
4457 | return false; |
4458 | } |
4459 | |
4460 | |
4461 | MachineOperand *FlagDef = Sub->findRegisterDefOperand(X86::EFLAGS); |
4462 | assert(FlagDef && "Unable to locate a def EFLAGS operand"); |
4463 | FlagDef->setIsDead(false); |
4464 | |
4465 | CmpInstr.eraseFromParent(); |
4466 | |
4467 | |
4468 | for (auto &Op : OpsToUpdate) { |
4469 | Op.first->getOperand(Op.first->getDesc().getNumOperands() - 1) |
4470 | .setImm(Op.second); |
4471 | } |
4472 | return true; |
4473 | } |
4474 | |
4475 | |
4476 | |
4477 | |
4478 | |
4479 | MachineInstr *X86InstrInfo::optimizeLoadInstr(MachineInstr &MI, |
4480 | const MachineRegisterInfo *MRI, |
4481 | Register &FoldAsLoadDefReg, |
4482 | MachineInstr *&DefMI) const { |
4483 | |
4484 | DefMI = MRI->getVRegDef(FoldAsLoadDefReg); |
4485 | assert(DefMI); |
4486 | bool SawStore = false; |
4487 | if (!DefMI->isSafeToMove(nullptr, SawStore)) |
4488 | return nullptr; |
4489 | |
4490 | |
4491 | SmallVector<unsigned, 1> SrcOperandIds; |
4492 | for (unsigned i = 0, e = MI.getNumOperands(); i != e; ++i) { |
4493 | MachineOperand &MO = MI.getOperand(i); |
4494 | if (!MO.isReg()) |
4495 | continue; |
4496 | Register Reg = MO.getReg(); |
4497 | if (Reg != FoldAsLoadDefReg) |
4498 | continue; |
4499 | |
4500 | if (MO.getSubReg() || MO.isDef()) |
4501 | return nullptr; |
4502 | SrcOperandIds.push_back(i); |
4503 | } |
4504 | if (SrcOperandIds.empty()) |
4505 | return nullptr; |
4506 | |
4507 | |
4508 | if (MachineInstr *FoldMI = foldMemoryOperand(MI, SrcOperandIds, *DefMI)) { |
4509 | FoldAsLoadDefReg = 0; |
4510 | return FoldMI; |
4511 | } |
4512 | |
4513 | return nullptr; |
4514 | } |
4515 | |
4516 | |
4517 | |
4518 | |
4519 | |
4520 | |
4521 | |
4522 | |
4523 | static bool Expand2AddrUndef(MachineInstrBuilder &MIB, |
4524 | const MCInstrDesc &Desc) { |
4525 | assert(Desc.getNumOperands() == 3 && "Expected two-addr instruction."); |
4526 | Register Reg = MIB.getReg(0); |
4527 | MIB->setDesc(Desc); |
4528 | |
4529 | |
4530 | |
4531 | MIB.addReg(Reg, RegState::Undef).addReg(Reg, RegState::Undef); |
4532 | |
4533 | assert(MIB.getReg(1) == Reg && |
4534 | MIB.getReg(2) == Reg && "Misplaced operand"); |
4535 | return true; |
4536 | } |
4537 | |
4538 | |
4539 | |
4540 | |
4541 | |
4542 | |
4543 | |
4544 | static bool Expand2AddrKreg(MachineInstrBuilder &MIB, const MCInstrDesc &Desc, |
4545 | Register Reg) { |
4546 | assert(Desc.getNumOperands() == 3 && "Expected two-addr instruction."); |
4547 | MIB->setDesc(Desc); |
4548 | MIB.addReg(Reg, RegState::Undef).addReg(Reg, RegState::Undef); |
4549 | return true; |
4550 | } |
4551 | |
4552 | static bool expandMOV32r1(MachineInstrBuilder &MIB, const TargetInstrInfo &TII, |
4553 | bool MinusOne) { |
4554 | MachineBasicBlock &MBB = *MIB->getParent(); |
4555 | const DebugLoc &DL = MIB->getDebugLoc(); |
4556 | Register Reg = MIB.getReg(0); |
4557 | |
4558 | |
4559 | BuildMI(MBB, MIB.getInstr(), DL, TII.get(X86::XOR32rr), Reg) |
4560 | .addReg(Reg, RegState::Undef) |
4561 | .addReg(Reg, RegState::Undef); |
4562 | |
4563 | |
4564 | MIB->setDesc(TII.get(MinusOne ? X86::DEC32r : X86::INC32r)); |
4565 | MIB.addReg(Reg); |
4566 | |
4567 | return true; |
4568 | } |
4569 | |
4570 | static bool ExpandMOVImmSExti8(MachineInstrBuilder &MIB, |
4571 | const TargetInstrInfo &TII, |
4572 | const X86Subtarget &Subtarget) { |
4573 | MachineBasicBlock &MBB = *MIB->getParent(); |
4574 | const DebugLoc &DL = MIB->getDebugLoc(); |
4575 | int64_t Imm = MIB->getOperand(1).getImm(); |
4576 | assert(Imm != 0 && "Using push/pop for 0 is not efficient."); |
4577 | MachineBasicBlock::iterator I = MIB.getInstr(); |
4578 | |
4579 | int StackAdjustment; |
4580 | |
4581 | if (Subtarget.is64Bit()) { |
4582 | assert(MIB->getOpcode() == X86::MOV64ImmSExti8 || |
4583 | MIB->getOpcode() == X86::MOV32ImmSExti8); |
4584 | |
4585 | |
4586 | X86MachineFunctionInfo *X86FI = |
4587 | MBB.getParent()->getInfo<X86MachineFunctionInfo>(); |
4588 | if (X86FI->getUsesRedZone()) { |
4589 | MIB->setDesc(TII.get(MIB->getOpcode() == |
4590 | X86::MOV32ImmSExti8 ? X86::MOV32ri : X86::MOV64ri)); |
4591 | return true; |
4592 | } |
4593 | |
4594 | |
4595 | |
4596 | StackAdjustment = 8; |
4597 | BuildMI(MBB, I, DL, TII.get(X86::PUSH64i8)).addImm(Imm); |
4598 | MIB->setDesc(TII.get(X86::POP64r)); |
4599 | MIB->getOperand(0) |
4600 | .setReg(getX86SubSuperRegister(MIB.getReg(0), 64)); |
4601 | } else { |
4602 | assert(MIB->getOpcode() == X86::MOV32ImmSExti8); |
4603 | StackAdjustment = 4; |
4604 | BuildMI(MBB, I, DL, TII.get(X86::PUSH32i8)).addImm(Imm); |
4605 | MIB->setDesc(TII.get(X86::POP32r)); |
4606 | } |
4607 | MIB->RemoveOperand(1); |
4608 | MIB->addImplicitDefUseOperands(*MBB.getParent()); |
4609 | |
4610 | |
4611 | MachineFunction &MF = *MBB.getParent(); |
4612 | const X86FrameLowering *TFL = Subtarget.getFrameLowering(); |
4613 | bool IsWin64Prologue = MF.getTarget().getMCAsmInfo()->usesWindowsCFI(); |
4614 | bool NeedsDwarfCFI = !IsWin64Prologue && MF.needsFrameMoves(); |
4615 | bool EmitCFI = !TFL->hasFP(MF) && NeedsDwarfCFI; |
4616 | if (EmitCFI) { |
4617 | TFL->BuildCFI(MBB, I, DL, |
4618 | MCCFIInstruction::createAdjustCfaOffset(nullptr, StackAdjustment)); |
4619 | TFL->BuildCFI(MBB, std::next(I), DL, |
4620 | MCCFIInstruction::createAdjustCfaOffset(nullptr, -StackAdjustment)); |
4621 | } |
4622 | |
4623 | return true; |
4624 | } |
4625 | |
4626 | |
4627 | |
4628 | static void expandLoadStackGuard(MachineInstrBuilder &MIB, |
4629 | const TargetInstrInfo &TII) { |
4630 | MachineBasicBlock &MBB = *MIB->getParent(); |
4631 | const DebugLoc &DL = MIB->getDebugLoc(); |
4632 | Register Reg = MIB.getReg(0); |
4633 | const GlobalValue *GV = |
4634 | cast<GlobalValue>((*MIB->memoperands_begin())->getValue()); |
4635 | auto Flags = MachineMemOperand::MOLoad | |
4636 | MachineMemOperand::MODereferenceable | |
4637 | MachineMemOperand::MOInvariant; |
4638 | MachineMemOperand *MMO = MBB.getParent()->getMachineMemOperand( |
4639 | MachinePointerInfo::getGOT(*MBB.getParent()), Flags, 8, Align(8)); |
4640 | MachineBasicBlock::iterator I = MIB.getInstr(); |
4641 | |
4642 | BuildMI(MBB, I, DL, TII.get(X86::MOV64rm), Reg).addReg(X86::RIP).addImm(1) |
4643 | .addReg(0).addGlobalAddress(GV, 0, X86II::MO_GOTPCREL).addReg(0) |
4644 | .addMemOperand(MMO); |
4645 | MIB->setDebugLoc(DL); |
4646 | MIB->setDesc(TII.get(X86::MOV64rm)); |
4647 | MIB.addReg(Reg, RegState::Kill).addImm(1).addReg(0).addImm(0).addReg(0); |
4648 | } |
4649 | |
4650 | static bool expandXorFP(MachineInstrBuilder &MIB, const TargetInstrInfo &TII) { |
4651 | MachineBasicBlock &MBB = *MIB->getParent(); |
4652 | MachineFunction &MF = *MBB.getParent(); |
4653 | const X86Subtarget &Subtarget = MF.getSubtarget<X86Subtarget>(); |
4654 | const X86RegisterInfo *TRI = Subtarget.getRegisterInfo(); |
4655 | unsigned XorOp = |
4656 | MIB->getOpcode() == X86::XOR64_FP ? X86::XOR64rr : X86::XOR32rr; |
4657 | MIB->setDesc(TII.get(XorOp)); |
4658 | MIB.addReg(TRI->getFrameRegister(MF), RegState::Undef); |
4659 | return true; |
4660 | } |
4661 | |
4662 | |
4663 | |
4664 | |
4665 | static bool expandNOVLXLoad(MachineInstrBuilder &MIB, |
4666 | const TargetRegisterInfo *TRI, |
4667 | const MCInstrDesc &LoadDesc, |
4668 | const MCInstrDesc &BroadcastDesc, |
4669 | unsigned SubIdx) { |
4670 | Register DestReg = MIB.getReg(0); |
4671 | |
4672 | if (TRI->getEncodingValue(DestReg) < 16) { |
4673 | |
4674 | MIB->setDesc(LoadDesc); |
4675 | } else { |
4676 | |
4677 | MIB->setDesc(BroadcastDesc); |
4678 | |
4679 | DestReg = TRI->getMatchingSuperReg(DestReg, SubIdx, &X86::VR512RegClass); |
4680 | MIB->getOperand(0).setReg(DestReg); |
4681 | } |
4682 | return true; |
4683 | } |
4684 | |
4685 | |
4686 | |
4687 | |
4688 | static bool expandNOVLXStore(MachineInstrBuilder &MIB, |
4689 | const TargetRegisterInfo *TRI, |
4690 | const MCInstrDesc &StoreDesc, |
4691 | const MCInstrDesc &ExtractDesc, |
4692 | unsigned SubIdx) { |
4693 | Register SrcReg = MIB.getReg(X86::AddrNumOperands); |
4694 | |
4695 | if (TRI->getEncodingValue(SrcReg) < 16) { |
4696 | |
4697 | MIB->setDesc(StoreDesc); |
4698 | } else { |
4699 | |
4700 | MIB->setDesc(ExtractDesc); |
4701 | |
4702 | SrcReg = TRI->getMatchingSuperReg(SrcReg, SubIdx, &X86::VR512RegClass); |
4703 | MIB->getOperand(X86::AddrNumOperands).setReg(SrcReg); |
4704 | MIB.addImm(0x0); |
4705 | } |
4706 | |
4707 | return true; |
4708 | } |
4709 | |
4710 | static bool expandSHXDROT(MachineInstrBuilder &MIB, const MCInstrDesc &Desc) { |
4711 | MIB->setDesc(Desc); |
4712 | int64_t ShiftAmt = MIB->getOperand(2).getImm(); |
4713 | |
4714 | MIB->RemoveOperand(2); |
4715 | |
4716 | MIB.addReg(MIB.getReg(1), |
4717 | getUndefRegState(MIB->getOperand(1).isUndef())); |
4718 | |
4719 | MIB.addImm(ShiftAmt); |
4720 | return true; |
4721 | } |
4722 | |
4723 | bool X86InstrInfo::expandPostRAPseudo(MachineInstr &MI) const { |
4724 | bool HasAVX = Subtarget.hasAVX(); |
4725 | MachineInstrBuilder MIB(*MI.getParent()->getParent(), MI); |
4726 | switch (MI.getOpcode()) { |
4727 | case X86::MOV32r0: |
4728 | return Expand2AddrUndef(MIB, get(X86::XOR32rr)); |
4729 | case X86::MOV32r1: |
4730 | return expandMOV32r1(MIB, *this, false); |
4731 | case X86::MOV32r_1: |
4732 | return expandMOV32r1(MIB, *this, true); |
4733 | case X86::MOV32ImmSExti8: |
4734 | case X86::MOV64ImmSExti8: |
4735 | return ExpandMOVImmSExti8(MIB, *this, Subtarget); |
4736 | case X86::SETB_C32r: |
4737 | return Expand2AddrUndef(MIB, get(X86::SBB32rr)); |
4738 | case X86::SETB_C64r: |
4739 | return Expand2AddrUndef(MIB, get(X86::SBB64rr)); |
4740 | case X86::MMX_SET0: |
4741 | return Expand2AddrUndef(MIB, get(X86::MMX_PXORirr)); |
4742 | case X86::V_SET0: |
4743 | case X86::FsFLD0SS: |
4744 | case X86::FsFLD0SD: |
4745 | case X86::FsFLD0F128: |
4746 | return Expand2AddrUndef(MIB, get(HasAVX ? X86::VXORPSrr : X86::XORPSrr)); |
4747 | case X86::AVX_SET0: { |
4748 | assert(HasAVX && "AVX not supported"); |
4749 | const TargetRegisterInfo *TRI = &getRegisterInfo(); |
4750 | Register SrcReg = MIB.getReg(0); |
4751 | Register XReg = TRI->getSubReg(SrcReg, X86::sub_xmm); |
4752 | MIB->getOperand(0).setReg(XReg); |
4753 | Expand2AddrUndef(MIB, get(X86::VXORPSrr)); |
4754 | MIB.addReg(SrcReg, RegState::ImplicitDefine); |
4755 | return true; |
4756 | } |
4757 | case X86::AVX512_128_SET0: |
4758 | case X86::AVX512_FsFLD0SS: |
4759 | case X86::AVX512_FsFLD0SD: |
4760 | case X86::AVX512_FsFLD0F128: { |
4761 | bool HasVLX = Subtarget.hasVLX(); |
4762 | Register SrcReg = MIB.getReg(0); |
4763 | const TargetRegisterInfo *TRI = &getRegisterInfo(); |
4764 | if (HasVLX || TRI->getEncodingValue(SrcReg) < 16) |
4765 | return Expand2AddrUndef(MIB, |
4766 | get(HasVLX ? X86::VPXORDZ128rr : X86::VXORPSrr)); |
4767 | |
4768 | SrcReg = |
4769 | TRI->getMatchingSuperReg(SrcReg, X86::sub_xmm, &X86::VR512RegClass); |
4770 | MIB->getOperand(0).setReg(SrcReg); |
4771 | return Expand2AddrUndef(MIB, get(X86::VPXORDZrr)); |
4772 | } |
4773 | case X86::AVX512_256_SET0: |
4774 | case X86::AVX512_512_SET0: { |
4775 | bool HasVLX = Subtarget.hasVLX(); |
4776 | Register SrcReg = MIB.getReg(0); |
4777 | const TargetRegisterInfo *TRI = &getRegisterInfo(); |
4778 | if (HasVLX || TRI->getEncodingValue(SrcReg) < 16) { |
4779 | Register XReg = TRI->getSubReg(SrcReg, X86::sub_xmm); |
4780 | MIB->getOperand(0).setReg(XReg); |
4781 | Expand2AddrUndef(MIB, |
4782 | get(HasVLX ? X86::VPXORDZ128rr : X86::VXORPSrr)); |
4783 | MIB.addReg(SrcReg, RegState::ImplicitDefine); |
4784 | return true; |
4785 | } |
4786 | if (MI.getOpcode() == X86::AVX512_256_SET0) { |
4787 | |
4788 | unsigned ZReg = |
4789 | TRI->getMatchingSuperReg(SrcReg, X86::sub_ymm, &X86::VR512RegClass); |
4790 | MIB->getOperand(0).setReg(ZReg); |
4791 | } |
4792 | return Expand2AddrUndef(MIB, get(X86::VPXORDZrr)); |
4793 | } |
4794 | case X86::V_SETALLONES: |
4795 | return Expand2AddrUndef(MIB, get(HasAVX ? X86::VPCMPEQDrr : X86::PCMPEQDrr)); |
4796 | case X86::AVX2_SETALLONES: |
4797 | return Expand2AddrUndef(MIB, get(X86::VPCMPEQDYrr)); |
4798 | case X86::AVX1_SETALLONES: { |
4799 | Register Reg = MIB.getReg(0); |
4800 | |
4801 | MIB->setDesc(get(X86::VCMPPSYrri)); |
4802 | MIB.addReg(Reg, RegState::Undef).addReg(Reg, RegState::Undef).addImm(0xf); |
4803 | return true; |
4804 | } |
4805 | case X86::AVX512_512_SETALLONES: { |
4806 | Register Reg = MIB.getReg(0); |
4807 | MIB->setDesc(get(X86::VPTERNLOGDZrri)); |
4808 | |
4809 | |
4810 | MIB.addReg(Reg, RegState::Undef).addReg(Reg, RegState::Undef) |
4811 | .addReg(Reg, RegState::Undef).addImm(0xff); |
4812 | return true; |
4813 | } |
4814 | case X86::AVX512_512_SEXT_MASK_32: |
4815 | case X86::AVX512_512_SEXT_MASK_64: { |
4816 | Register Reg = MIB.getReg(0); |
4817 | Register MaskReg = MIB.getReg(1); |
4818 | unsigned MaskState = getRegState(MIB->getOperand(1)); |
4819 | unsigned Opc = (MI.getOpcode() == X86::AVX512_512_SEXT_MASK_64) ? |
4820 | X86::VPTERNLOGQZrrikz : X86::VPTERNLOGDZrrikz; |
4821 | MI.RemoveOperand(1); |
4822 | MIB->setDesc(get(Opc)); |
4823 | |
4824 | |
4825 | MIB.addReg(Reg, RegState::Undef).addReg(MaskReg, MaskState) |
4826 | .addReg(Reg, RegState::Undef).addReg(Reg, RegState::Undef).addImm(0xff); |
4827 | return true; |
4828 | } |
4829 | case X86::VMOVAPSZ128rm_NOVLX: |
4830 | return expandNOVLXLoad(MIB, &getRegisterInfo(), get(X86::VMOVAPSrm), |
4831 | get(X86::VBROADCASTF32X4rm), X86::sub_xmm); |
4832 | case X86::VMOVUPSZ128rm_NOVLX: |
4833 | return expandNOVLXLoad(MIB, &getRegisterInfo(), get(X86::VMOVUPSrm), |
4834 | get(X86::VBROADCASTF32X4rm), X86::sub_xmm); |
4835 | case X86::VMOVAPSZ256rm_NOVLX: |
4836 | return expandNOVLXLoad(MIB, &getRegisterInfo(), get(X86::VMOVAPSYrm), |
4837 | get(X86::VBROADCASTF64X4rm), X86::sub_ymm); |
4838 | case X86::VMOVUPSZ256rm_NOVLX: |
4839 | return expandNOVLXLoad(MIB, &getRegisterInfo(), get(X86::VMOVUPSYrm), |
4840 | get(X86::VBROADCASTF64X4rm), X86::sub_ymm); |
4841 | case X86::VMOVAPSZ128mr_NOVLX: |
4842 | return expandNOVLXStore(MIB, &getRegisterInfo(), get(X86::VMOVAPSmr), |
4843 | get(X86::VEXTRACTF32x4Zmr), X86::sub_xmm); |
4844 | case X86::VMOVUPSZ128mr_NOVLX: |
4845 | return expandNOVLXStore(MIB, &getRegisterInfo(), get(X86::VMOVUPSmr), |
4846 | get(X86::VEXTRACTF32x4Zmr), X86::sub_xmm); |
4847 | case X86::VMOVAPSZ256mr_NOVLX: |
4848 | return expandNOVLXStore(MIB, &getRegisterInfo(), get(X86::VMOVAPSYmr), |
4849 | get(X86::VEXTRACTF64x4Zmr), X86::sub_ymm); |
4850 | case X86::VMOVUPSZ256mr_NOVLX: |
4851 | return expandNOVLXStore(MIB, &getRegisterInfo(), get(X86::VMOVUPSYmr), |
4852 | get(X86::VEXTRACTF64x4Zmr), X86::sub_ymm); |
4853 | case X86::MOV32ri64: { |
4854 | Register Reg = MIB.getReg(0); |
4855 | Register Reg32 = RI.getSubReg(Reg, X86::sub_32bit); |
4856 | MI.setDesc(get(X86::MOV32ri)); |
4857 | MIB->getOperand(0).setReg(Reg32); |
4858 | MIB.addReg(Reg, RegState::ImplicitDefine); |
4859 | return true; |
4860 | } |
4861 | |
4862 | |
4863 | |
4864 | |
4865 | |
4866 | |
4867 | |
4868 | |
4869 | case X86::KSET0W: return Expand2AddrKreg(MIB, get(X86::KXORWrr), X86::K0); |
4870 | case X86::KSET0D: return Expand2AddrKreg(MIB, get(X86::KXORDrr), X86::K0); |
4871 | case X86::KSET0Q: return Expand2AddrKreg(MIB, get(X86::KXORQrr), X86::K0); |
4872 | case X86::KSET1W: return Expand2AddrKreg(MIB, get(X86::KXNORWrr), X86::K0); |
4873 | case X86::KSET1D: return Expand2AddrKreg(MIB, get(X86::KXNORDrr), X86::K0); |
4874 | case X86::KSET1Q: return Expand2AddrKreg(MIB, get(X86::KXNORQrr), X86::K0); |
4875 | case TargetOpcode::LOAD_STACK_GUARD: |
4876 | expandLoadStackGuard(MIB, *this); |
4877 | return true; |
4878 | case X86::XOR64_FP: |
4879 | case X86::XOR32_FP: |
4880 | return expandXorFP(MIB, *this); |
4881 | case X86::SHLDROT32ri: return expandSHXDROT(MIB, get(X86::SHLD32rri8)); |
4882 | case X86::SHLDROT64ri: return expandSHXDROT(MIB, get(X86::SHLD64rri8)); |
4883 | case X86::SHRDROT32ri: return expandSHXDROT(MIB, get(X86::SHRD32rri8)); |
4884 | case X86::SHRDROT64ri: return expandSHXDROT(MIB, get(X86::SHRD64rri8)); |
4885 | case X86::ADD8rr_DB: MIB->setDesc(get(X86::OR8rr)); break; |
4886 | case X86::ADD16rr_DB: MIB->setDesc(get(X86::OR16rr)); break; |
4887 | case X86::ADD32rr_DB: MIB->setDesc(get(X86::OR32rr)); break; |
4888 | case X86::ADD64rr_DB: MIB->setDesc(get(X86::OR64rr)); break; |
4889 | case X86::ADD8ri_DB: MIB->setDesc(get(X86::OR8ri)); break; |
4890 | case X86::ADD16ri_DB: MIB->setDesc(get(X86::OR16ri)); break; |
4891 | case X86::ADD32ri_DB: MIB->setDesc(get(X86::OR32ri)); break; |
4892 | case X86::ADD64ri32_DB: MIB->setDesc(get(X86::OR64ri32)); break; |
4893 | case X86::ADD16ri8_DB: MIB->setDesc(get(X86::OR16ri8)); break; |
4894 | case X86::ADD32ri8_DB: MIB->setDesc(get(X86::OR32ri8)); break; |
4895 | case X86::ADD64ri8_DB: MIB->setDesc(get(X86::OR64ri8)); break; |
4896 | } |
4897 | return false; |
4898 | } |
4899 | |
4900 | |
4901 | |
4902 | |
4903 | |
4904 | |
4905 | |
4906 | |
4907 | |
4908 | |
4909 | |
4910 | |
4911 | |
4912 | |
4913 | |
4914 | |
4915 | |
4916 | static bool hasPartialRegUpdate(unsigned Opcode, |
4917 | const X86Subtarget &Subtarget, |
4918 | bool ForLoadFold = false) { |
4919 | switch (Opcode) { |
4920 | case X86::CVTSI2SSrr: |
4921 | case X86::CVTSI2SSrm: |
4922 | case X86::CVTSI642SSrr: |
4923 | case X86::CVTSI642SSrm: |
4924 | case X86::CVTSI2SDrr: |
4925 | case X86::CVTSI2SDrm: |
4926 | case X86::CVTSI642SDrr: |
4927 | case X86::CVTSI642SDrm: |
4928 | |
4929 | |
4930 | return !ForLoadFold; |
4931 | case X86::CVTSD2SSrr: |
4932 | case X86::CVTSD2SSrm: |
4933 | case X86::CVTSS2SDrr: |
4934 | case X86::CVTSS2SDrm: |
4935 | case X86::MOVHPDrm: |
4936 | case X86::MOVHPSrm: |
4937 | case X86::MOVLPDrm: |
4938 | case X86::MOVLPSrm: |
4939 | case X86::RCPSSr: |
4940 | case X86::RCPSSm: |
4941 | case X86::RCPSSr_Int: |
4942 | case X86::RCPSSm_Int: |
4943 | case X86::ROUNDSDr: |
4944 | case X86::ROUNDSDm: |
4945 | case X86::ROUNDSSr: |
4946 | case X86::ROUNDSSm: |
4947 | case X86::RSQRTSSr: |
4948 | case X86::RSQRTSSm: |
4949 | case X86::RSQRTSSr_Int: |
4950 | case X86::RSQRTSSm_Int: |
4951 | case X86::SQRTSSr: |
4952 | case X86::SQRTSSm: |
4953 | case X86::SQRTSSr_Int: |
4954 | case X86::SQRTSSm_Int: |
4955 | case X86::SQRTSDr: |
4956 | case X86::SQRTSDm: |
4957 | case X86::SQRTSDr_Int: |
4958 | case X86::SQRTSDm_Int: |
4959 | return true; |
4960 | |
4961 | case X86::POPCNT32rm: |
4962 | case X86::POPCNT32rr: |
4963 | case X86::POPCNT64rm: |
4964 | case X86::POPCNT64rr: |
4965 | return Subtarget.hasPOPCNTFalseDeps(); |
4966 | case X86::LZCNT32rm: |
4967 | case X86::LZCNT32rr: |
4968 | case X86::LZCNT64rm: |
4969 | case X86::LZCNT64rr: |
4970 | case X86::TZCNT32rm: |
4971 | case X86::TZCNT32rr: |
4972 | case X86::TZCNT64rm: |
4973 | case X86::TZCNT64rr: |
4974 | return Subtarget.hasLZCNTFalseDeps(); |
4975 | } |
4976 | |
4977 | return false; |
4978 | } |
4979 | |
4980 | |
4981 | |
4982 | unsigned X86InstrInfo::getPartialRegUpdateClearance( |
4983 | const MachineInstr &MI, unsigned OpNum, |
4984 | const TargetRegisterInfo *TRI) const { |
4985 | if (OpNum != 0 || !hasPartialRegUpdate(MI.getOpcode(), Subtarget)) |
4986 | return 0; |
4987 | |
4988 | |
4989 | const MachineOperand &MO = MI.getOperand(0); |
4990 | Register Reg = MO.getReg(); |
4991 | if (Reg.isVirtual()) { |
4992 | if (MO.readsReg() || MI.readsVirtualRegister(Reg)) |
4993 | return 0; |
4994 | } else { |
4995 | if (MI.readsRegister(Reg, TRI)) |
4996 | return 0; |
4997 | } |
4998 | |
4999 | |
5000 | |
5001 | |
5002 | return PartialRegUpdateClearance; |
5003 | } |
5004 | |
5005 | |
5006 | |
5007 | |
5008 | |
5009 | static bool hasUndefRegUpdate(unsigned Opcode, unsigned OpNum, |
5010 | bool ForLoadFold = false) { |
5011 | |
5012 | switch (Opcode) { |
5013 | case X86::MMX_PUNPCKHBWirr: |
5014 | case X86::MMX_PUNPCKHWDirr: |
5015 | case X86::MMX_PUNPCKHDQirr: |
5016 | case X86::MMX_PUNPCKLBWirr: |
5017 | case X86::MMX_PUNPCKLWDirr: |
5018 | case X86::MMX_PUNPCKLDQirr: |
5019 | case X86::MOVHLPSrr: |
5020 | case X86::PACKSSWBrr: |
5021 | case X86::PACKUSWBrr: |
5022 | case X86::PACKSSDWrr: |
5023 | case X86::PACKUSDWrr: |
5024 | case X86::PUNPCKHBWrr: |
5025 | case X86::PUNPCKLBWrr: |
5026 | case X86::PUNPCKHWDrr: |
5027 | case X86::PUNPCKLWDrr: |
5028 | case X86::PUNPCKHDQrr: |
5029 | case X86::PUNPCKLDQrr: |
5030 | case X86::PUNPCKHQDQrr: |
5031 | case X86::PUNPCKLQDQrr: |
5032 | case X86::SHUFPDrri: |
5033 | case X86::SHUFPSrri: |
5034 | |
5035 | |
5036 | |
5037 | |
5038 | |
5039 | return OpNum == 2 && !ForLoadFold; |
5040 | |
5041 | case X86::VMOVLHPSrr: |
5042 | case X86::VMOVLHPSZrr: |
5043 | case X86::VPACKSSWBrr: |
5044 | case X86::VPACKUSWBrr: |
5045 | case X86::VPACKSSDWrr: |
5046 | case X86::VPACKUSDWrr: |
5047 | case X86::VPACKSSWBZ128rr: |
5048 | case X86::VPACKUSWBZ128rr: |
5049 | case X86::VPACKSSDWZ128rr: |
5050 | case X86::VPACKUSDWZ128rr: |
5051 | case X86::VPERM2F128rr: |
5052 | case X86::VPERM2I128rr: |
5053 | case X86::VSHUFF32X4Z256rri: |
5054 | case X86::VSHUFF32X4Zrri: |
5055 | case X86::VSHUFF64X2Z256rri: |
5056 | case X86::VSHUFF64X2Zrri: |
5057 | case X86::VSHUFI32X4Z256rri: |
5058 | case X86::VSHUFI32X4Zrri: |
5059 | case X86::VSHUFI64X2Z256rri: |
5060 | case X86::VSHUFI64X2Zrri: |
5061 | case X86::VPUNPCKHBWrr: |
5062 | case X86::VPUNPCKLBWrr: |
5063 | case X86::VPUNPCKHBWYrr: |
5064 | case X86::VPUNPCKLBWYrr: |
5065 | case X86::VPUNPCKHBWZ128rr: |
5066 | case X86::VPUNPCKLBWZ128rr: |
5067 | case X86::VPUNPCKHBWZ256rr: |
5068 | case X86::VPUNPCKLBWZ256rr: |
5069 | case X86::VPUNPCKHBWZrr: |
5070 | case X86::VPUNPCKLBWZrr: |
5071 | case X86::VPUNPCKHWDrr: |
5072 | case X86::VPUNPCKLWDrr: |
5073 | case X86::VPUNPCKHWDYrr: |
5074 | case X86::VPUNPCKLWDYrr: |
5075 | case X86::VPUNPCKHWDZ128rr: |
5076 | case X86::VPUNPCKLWDZ128rr: |
5077 | case X86::VPUNPCKHWDZ256rr: |
5078 | case X86::VPUNPCKLWDZ256rr: |
5079 | case X86::VPUNPCKHWDZrr: |
5080 | case X86::VPUNPCKLWDZrr: |
5081 | case X86::VPUNPCKHDQrr: |
5082 | case X86::VPUNPCKLDQrr: |
5083 | case X86::VPUNPCKHDQYrr: |
5084 | case X86::VPUNPCKLDQYrr: |
5085 | case X86::VPUNPCKHDQZ128rr: |
5086 | case X86::VPUNPCKLDQZ128rr: |
5087 | case X86::VPUNPCKHDQZ256rr: |
5088 | case X86::VPUNPCKLDQZ256rr: |
5089 | case X86::VPUNPCKHDQZrr: |
5090 | case X86::VPUNPCKLDQZrr: |
5091 | case X86::VPUNPCKHQDQrr: |
5092 | case X86::VPUNPCKLQDQrr: |
5093 | case X86::VPUNPCKHQDQYrr: |
5094 | case X86::VPUNPCKLQDQYrr: |
5095 | case X86::VPUNPCKHQDQZ128rr: |
5096 | case X86::VPUNPCKLQDQZ128rr: |
5097 | case X86::VPUNPCKHQDQZ256rr: |
5098 | case X86::VPUNPCKLQDQZ256rr: |
5099 | case X86::VPUNPCKHQDQZrr: |
5100 | case X86::VPUNPCKLQDQZrr: |
5101 | |
5102 | |
5103 | |
5104 | return (OpNum == 1 || OpNum == 2) && !ForLoadFold; |
5105 | |
5106 | case X86::VCVTSI2SSrr: |
5107 | case X86::VCVTSI2SSrm: |
5108 | case X86::VCVTSI2SSrr_Int: |
5109 | case X86::VCVTSI2SSrm_Int: |
5110 | case X86::VCVTSI642SSrr: |
5111 | case X86::VCVTSI642SSrm: |
5112 | case X86::VCVTSI642SSrr_Int: |
5113 | case X86::VCVTSI642SSrm_Int: |
5114 | case X86::VCVTSI2SDrr: |
5115 | case X86::VCVTSI2SDrm: |
5116 | case X86::VCVTSI2SDrr_Int: |
5117 | case X86::VCVTSI2SDrm_Int: |
5118 | case X86::VCVTSI642SDrr: |
5119 | case X86::VCVTSI642SDrm: |
5120 | case X86::VCVTSI642SDrr_Int: |
5121 | case X86::VCVTSI642SDrm_Int: |
5122 | |
5123 | case X86::VCVTSI2SSZrr: |
5124 | case X86::VCVTSI2SSZrm: |
5125 | case X86::VCVTSI2SSZrr_Int: |
5126 | case X86::VCVTSI2SSZrrb_Int: |
5127 | case X86::VCVTSI2SSZrm_Int: |
5128 | case X86::VCVTSI642SSZrr: |
5129 | case X86::VCVTSI642SSZrm: |
5130 | case X86::VCVTSI642SSZrr_Int: |
5131 | case X86::VCVTSI642SSZrrb_Int: |
5132 | case X86::VCVTSI642SSZrm_Int: |
5133 | case X86::VCVTSI2SDZrr: |
5134 | case X86::VCVTSI2SDZrm: |
5135 | case X86::VCVTSI2SDZrr_Int: |
5136 | case X86::VCVTSI2SDZrm_Int: |
5137 | case X86::VCVTSI642SDZrr: |
5138 | case X86::VCVTSI642SDZrm: |
5139 | case X86::VCVTSI642SDZrr_Int: |
5140 | case X86::VCVTSI642SDZrrb_Int: |
5141 | case X86::VCVTSI642SDZrm_Int: |
5142 | case X86::VCVTUSI2SSZrr: |
5143 | case X86::VCVTUSI2SSZrm: |
5144 | case X86::VCVTUSI2SSZrr_Int: |
5145 | case X86::VCVTUSI2SSZrrb_Int: |
5146 | case X86::VCVTUSI2SSZrm_Int: |
5147 | case X86::VCVTUSI642SSZrr: |
5148 | case X86::VCVTUSI642SSZrm: |
5149 | case X86::VCVTUSI642SSZrr_Int: |
5150 | case X86::VCVTUSI642SSZrrb_Int: |
5151 | case X86::VCVTUSI642SSZrm_Int: |
5152 | case X86::VCVTUSI2SDZrr: |
5153 | case X86::VCVTUSI2SDZrm: |
5154 | case X86::VCVTUSI2SDZrr_Int: |
5155 | case X86::VCVTUSI2SDZrm_Int: |
5156 | case X86::VCVTUSI642SDZrr: |
5157 | case X86::VCVTUSI642SDZrm: |
5158 | case X86::VCVTUSI642SDZrr_Int: |
5159 | case X86::VCVTUSI642SDZrrb_Int: |
5160 | case X86::VCVTUSI642SDZrm_Int: |
5161 | |
5162 | |
5163 | return OpNum == 1 && !ForLoadFold; |
5164 | case X86::VCVTSD2SSrr: |
5165 | case X86::VCVTSD2SSrm: |
5166 | case X86::VCVTSD2SSrr_Int: |
5167 | case X86::VCVTSD2SSrm_Int: |
5168 | case X86::VCVTSS2SDrr: |
5169 | case X86::VCVTSS2SDrm: |
5170 | case X86::VCVTSS2SDrr_Int: |
5171 | case X86::VCVTSS2SDrm_Int: |
5172 | case X86::VRCPSSr: |
5173 | case X86::VRCPSSr_Int: |
5174 | case X86::VRCPSSm: |
5175 | case X86::VRCPSSm_Int: |
5176 | case X86::VROUNDSDr: |
5177 | case X86::VROUNDSDm: |
5178 | case X86::VROUNDSDr_Int: |
5179 | case X86::VROUNDSDm_Int: |
5180 | case X86::VROUNDSSr: |
5181 | case X86::VROUNDSSm: |
5182 | case X86::VROUNDSSr_Int: |
5183 | case X86::VROUNDSSm_Int: |
5184 | case X86::VRSQRTSSr: |
5185 | case X86::VRSQRTSSr_Int: |
5186 | case X86::VRSQRTSSm: |
5187 | case X86::VRSQRTSSm_Int: |
5188 | case X86::VSQRTSSr: |
5189 | case X86::VSQRTSSr_Int: |
5190 | case X86::VSQRTSSm: |
5191 | case X86::VSQRTSSm_Int: |
5192 | case X86::VSQRTSDr: |
5193 | case X86::VSQRTSDr_Int: |
5194 | case X86::VSQRTSDm: |
5195 | case X86::VSQRTSDm_Int: |
5196 | |
5197 | case X86::VCVTSD2SSZrr: |
5198 | case X86::VCVTSD2SSZrr_Int: |
5199 | case X86::VCVTSD2SSZrrb_Int: |
5200 | case X86::VCVTSD2SSZrm: |
5201 | case X86::VCVTSD2SSZrm_Int: |
5202 | case X86::VCVTSS2SDZrr: |
5203 | case X86::VCVTSS2SDZrr_Int: |
5204 | case X86::VCVTSS2SDZrrb_Int: |
5205 | case X86::VCVTSS2SDZrm: |
5206 | case X86::VCVTSS2SDZrm_Int: |
5207 | case X86::VGETEXPSDZr: |
5208 | case X86::VGETEXPSDZrb: |
5209 | case X86::VGETEXPSDZm: |
5210 | case X86::VGETEXPSSZr: |
5211 | case X86::VGETEXPSSZrb: |
5212 | case X86::VGETEXPSSZm: |
5213 | case X86::VGETMANTSDZrri: |
5214 | case X86::VGETMANTSDZrrib: |
5215 | case X86::VGETMANTSDZrmi: |
5216 | case X86::VGETMANTSSZrri: |
5217 | case X86::VGETMANTSSZrrib: |
5218 | case X86::VGETMANTSSZrmi: |
5219 | case X86::VRNDSCALESDZr: |
5220 | case X86::VRNDSCALESDZr_Int: |
5221 | case X86::VRNDSCALESDZrb_Int: |
5222 | case X86::VRNDSCALESDZm: |
5223 | case X86::VRNDSCALESDZm_Int: |
5224 | case X86::VRNDSCALESSZr: |
5225 | case X86::VRNDSCALESSZr_Int: |
5226 | case X86::VRNDSCALESSZrb_Int: |
5227 | case X86::VRNDSCALESSZm: |
5228 | case X86::VRNDSCALESSZm_Int: |
5229 | case X86::VRCP14SDZrr: |
5230 | case X86::VRCP14SDZrm: |
5231 | case X86::VRCP14SSZrr: |
5232 | case X86::VRCP14SSZrm: |
5233 | case X86::VRCP28SDZr: |
5234 | case X86::VRCP28SDZrb: |
5235 | case X86::VRCP28SDZm: |
5236 | case X86::VRCP28SSZr: |
5237 | case X86::VRCP28SSZrb: |
5238 | case X86::VRCP28SSZm: |
5239 | case X86::VREDUCESSZrmi: |
5240 | case X86::VREDUCESSZrri: |
5241 | case X86::VREDUCESSZrrib: |
5242 | case X86::VRSQRT14SDZrr: |
5243 | case X86::VRSQRT14SDZrm: |
5244 | case X86::VRSQRT14SSZrr: |
5245 | case X86::VRSQRT14SSZrm: |
5246 | case X86::VRSQRT28SDZr: |
5247 | case X86::VRSQRT28SDZrb: |
5248 | case X86::VRSQRT28SDZm: |
5249 | case X86::VRSQRT28SSZr: |
5250 | case X86::VRSQRT28SSZrb: |
5251 | case X86::VRSQRT28SSZm: |
5252 | case X86::VSQRTSSZr: |
5253 | case X86::VSQRTSSZr_Int: |
5254 | case X86::VSQRTSSZrb_Int: |
5255 | case X86::VSQRTSSZm: |
5256 | case X86::VSQRTSSZm_Int: |
5257 | case X86::VSQRTSDZr: |
5258 | case X86::VSQRTSDZr_Int: |
5259 | case X86::VSQRTSDZrb_Int: |
5260 | case X86::VSQRTSDZm: |
5261 | case X86::VSQRTSDZm_Int: |
5262 | return OpNum == 1; |
5263 | case X86::VMOVSSZrrk: |
5264 | case X86::VMOVSDZrrk: |
5265 | return OpNum == 3 && !ForLoadFold; |
5266 | case X86::VMOVSSZrrkz: |
5267 | case X86::VMOVSDZrrkz: |
5268 | return OpNum == 2 && !ForLoadFold; |
5269 | } |
5270 | |
5271 | return false; |
5272 | } |
5273 | |
5274 | |
5275 | |
5276 | |
5277 | |
5278 | |
5279 | |
5280 | |
5281 | |
5282 | |
5283 | |
5284 | |
5285 | |
5286 | |
5287 | |
5288 | unsigned |
5289 | X86InstrInfo::getUndefRegClearance(const MachineInstr &MI, unsigned OpNum, |
5290 | const TargetRegisterInfo *TRI) const { |
5291 | const MachineOperand &MO = MI.getOperand(OpNum); |
5292 | if (Register::isPhysicalRegister(MO.getReg()) && |
5293 | hasUndefRegUpdate(MI.getOpcode(), OpNum)) |
5294 | return UndefRegClearance; |
5295 | |
5296 | return 0; |
5297 | } |
5298 | |
5299 | void X86InstrInfo::breakPartialRegDependency( |
5300 | MachineInstr &MI, unsigned OpNum, const TargetRegisterInfo *TRI) const { |
5301 | Register Reg = MI.getOperand(OpNum).getReg(); |
5302 | |
5303 | if (MI.killsRegister(Reg, TRI)) |
5304 | return; |
5305 | |
5306 | if (X86::VR128RegClass.contains(Reg)) { |
5307 | |
5308 | |
5309 | unsigned Opc = Subtarget.hasAVX() ? X86::VXORPSrr : X86::XORPSrr; |
5310 | BuildMI(*MI.getParent(), MI, MI.getDebugLoc(), get(Opc), Reg) |
5311 | .addReg(Reg, RegState::Undef) |
5312 | .addReg(Reg, RegState::Undef); |
5313 | MI.addRegisterKilled(Reg, TRI, true); |
5314 | } else if (X86::VR256RegClass.contains(Reg)) { |
5315 | |
5316 | |
5317 | Register XReg = TRI->getSubReg(Reg, X86::sub_xmm); |
5318 | BuildMI(*MI.getParent(), MI, MI.getDebugLoc(), get(X86::VXORPSrr), XReg) |
5319 | .addReg(XReg, RegState::Undef) |
5320 | .addReg(XReg, RegState::Undef) |
5321 | .addReg(Reg, RegState::ImplicitDefine); |
5322 | MI.addRegisterKilled(Reg, TRI, true); |
5323 | } else if (X86::GR64RegClass.contains(Reg)) { |
5324 | |
5325 | |
5326 | Register XReg = TRI->getSubReg(Reg, X86::sub_32bit); |
5327 | BuildMI(*MI.getParent(), MI, MI.getDebugLoc(), get(X86::XOR32rr), XReg) |
5328 | .addReg(XReg, RegState::Undef) |
5329 | .addReg(XReg, RegState::Undef) |
5330 | .addReg(Reg, RegState::ImplicitDefine); |
5331 | MI.addRegisterKilled(Reg, TRI, true); |
5332 | } else if (X86::GR32RegClass.contains(Reg)) { |
5333 | BuildMI(*MI.getParent(), MI, MI.getDebugLoc(), get(X86::XOR32rr), Reg) |
5334 | .addReg(Reg, RegState::Undef) |
5335 | .addReg(Reg, RegState::Undef); |
5336 | MI.addRegisterKilled(Reg, TRI, true); |
5337 | } |
5338 | } |
5339 | |
5340 | static void addOperands(MachineInstrBuilder &MIB, ArrayRef<MachineOperand> MOs, |
5341 | int PtrOffset = 0) { |
5342 | unsigned NumAddrOps = MOs.size(); |
5343 | |
5344 | if (NumAddrOps < 4) { |
5345 | |
5346 | for (unsigned i = 0; i != NumAddrOps; ++i) |
5347 | MIB.add(MOs[i]); |
5348 | addOffset(MIB, PtrOffset); |
5349 | } else { |
5350 | |
5351 | |
5352 | assert(MOs.size() == 5 && "Unexpected memory operand list length"); |
5353 | for (unsigned i = 0; i != NumAddrOps; ++i) { |
5354 | const MachineOperand &MO = MOs[i]; |
5355 | if (i == 3 && PtrOffset != 0) { |
5356 | MIB.addDisp(MO, PtrOffset); |
5357 | } else { |
5358 | MIB.add(MO); |
5359 | } |
5360 | } |
5361 | } |
5362 | } |
5363 | |
5364 | static void updateOperandRegConstraints(MachineFunction &MF, |
5365 | MachineInstr &NewMI, |
5366 | const TargetInstrInfo &TII) { |
5367 | MachineRegisterInfo &MRI = MF.getRegInfo(); |
5368 | const TargetRegisterInfo &TRI = *MRI.getTargetRegisterInfo(); |
5369 | |
5370 | for (int Idx : llvm::seq<int>(0, NewMI.getNumOperands())) { |
5371 | MachineOperand &MO = NewMI.getOperand(Idx); |
5372 | |
5373 | if (!MO.isReg()) |
5374 | continue; |
5375 | Register Reg = MO.getReg(); |
5376 | if (!Reg.isVirtual()) |
5377 | continue; |
5378 | |
5379 | auto *NewRC = MRI.constrainRegClass( |
5380 | Reg, TII.getRegClass(NewMI.getDesc(), Idx, &TRI, MF)); |
5381 | if (!NewRC) { |
5382 | LLVM_DEBUG( |
5383 | dbgs() << "WARNING: Unable to update register constraint for operand " |
5384 | << Idx << " of instruction:\n"; |
5385 | NewMI.dump(); dbgs() << "\n"); |
5386 | } |
5387 | } |
5388 | } |
5389 | |
5390 | static MachineInstr *FuseTwoAddrInst(MachineFunction &MF, unsigned Opcode, |
5391 | ArrayRef<MachineOperand> MOs, |
5392 | MachineBasicBlock::iterator InsertPt, |
5393 | MachineInstr &MI, |
5394 | const TargetInstrInfo &TII) { |
5395 | |
5396 | |
5397 | MachineInstr *NewMI = |
5398 | MF.CreateMachineInstr(TII.get(Opcode), MI.getDebugLoc(), true); |
5399 | MachineInstrBuilder MIB(MF, NewMI); |
5400 | addOperands(MIB, MOs); |
5401 | |
5402 | |
5403 | unsigned NumOps = MI.getDesc().getNumOperands() - 2; |
5404 | for (unsigned i = 0; i != NumOps; ++i) { |
5405 | MachineOperand &MO = MI.getOperand(i + 2); |
5406 | MIB.add(MO); |
5407 | } |
5408 | for (unsigned i = NumOps + 2, e = MI.getNumOperands(); i != e; ++i) { |
5409 | MachineOperand &MO = MI.getOperand(i); |
5410 | MIB.add(MO); |
5411 | } |
5412 | |
5413 | updateOperandRegConstraints(MF, *NewMI, TII); |
5414 | |
5415 | MachineBasicBlock *MBB = InsertPt->getParent(); |
5416 | MBB->insert(InsertPt, NewMI); |
5417 | |
5418 | return MIB; |
5419 | } |
5420 | |
5421 | static MachineInstr *FuseInst(MachineFunction &MF, unsigned Opcode, |
5422 | unsigned OpNo, ArrayRef<MachineOperand> MOs, |
5423 | MachineBasicBlock::iterator InsertPt, |
5424 | MachineInstr &MI, const TargetInstrInfo &TII, |
5425 | int PtrOffset = 0) { |
5426 | |
5427 | MachineInstr *NewMI = |
5428 | MF.CreateMachineInstr(TII.get(Opcode), MI.getDebugLoc(), true); |
5429 | MachineInstrBuilder MIB(MF, NewMI); |
5430 | |
5431 | for (unsigned i = 0, e = MI.getNumOperands(); i != e; ++i) { |
5432 | MachineOperand &MO = MI.getOperand(i); |
5433 | if (i == OpNo) { |
5434 | assert(MO.isReg() && "Expected to fold into reg operand!"); |
5435 | addOperands(MIB, MOs, PtrOffset); |
5436 | } else { |
5437 | MIB.add(MO); |
5438 | } |
5439 | } |
5440 | |
5441 | updateOperandRegConstraints(MF, *NewMI, TII); |
5442 | |
5443 | |
5444 | if (MI.getFlag(MachineInstr::MIFlag::NoFPExcept)) |
5445 | NewMI->setFlag(MachineInstr::MIFlag::NoFPExcept); |
5446 | |
5447 | MachineBasicBlock *MBB = InsertPt->getParent(); |
5448 | MBB->insert(InsertPt, NewMI); |
5449 | |
5450 | return MIB; |
5451 | } |
5452 | |
5453 | static MachineInstr *MakeM0Inst(const TargetInstrInfo &TII, unsigned Opcode, |
5454 | ArrayRef<MachineOperand> MOs, |
5455 | MachineBasicBlock::iterator InsertPt, |
5456 | MachineInstr &MI) { |
5457 | MachineInstrBuilder MIB = BuildMI(*InsertPt->getParent(), InsertPt, |
5458 | MI.getDebugLoc(), TII.get(Opcode)); |
5459 | addOperands(MIB, MOs); |
5460 | return MIB.addImm(0); |
5461 | } |
5462 | |
5463 | MachineInstr *X86InstrInfo::foldMemoryOperandCustom( |
5464 | MachineFunction &MF, MachineInstr &MI, unsigned OpNum, |
5465 | ArrayRef<MachineOperand> MOs, MachineBasicBlock::iterator InsertPt, |
5466 | unsigned Size, Align Alignment) const { |
5467 | switch (MI.getOpcode()) { |
5468 | case X86::INSERTPSrr: |
5469 | case X86::VINSERTPSrr: |
5470 | case X86::VINSERTPSZrr: |
5471 | |
5472 | |
5473 | if (OpNum == 2) { |
5474 | unsigned Imm = MI.getOperand(MI.getNumOperands() - 1).getImm(); |
5475 | unsigned ZMask = Imm & 15; |
5476 | unsigned DstIdx = (Imm >> 4) & 3; |
5477 | unsigned SrcIdx = (Imm >> 6) & 3; |
5478 | |
5479 | const TargetRegisterInfo &TRI = *MF.getSubtarget().getRegisterInfo(); |
5480 | const TargetRegisterClass *RC = getRegClass(MI.getDesc(), OpNum, &RI, MF); |
5481 | unsigned RCSize = TRI.getRegSizeInBits(*RC) / 8; |
5482 | if ((Size == 0 || Size >= 16) && RCSize >= 16 && Alignment >= Align(4)) { |
5483 | int PtrOffset = SrcIdx * 4; |
5484 | unsigned NewImm = (DstIdx << 4) | ZMask; |
5485 | unsigned NewOpCode = |
5486 | (MI.getOpcode() == X86::VINSERTPSZrr) ? X86::VINSERTPSZrm : |
5487 | (MI.getOpcode() == X86::VINSERTPSrr) ? X86::VINSERTPSrm : |
5488 | X86::INSERTPSrm; |
5489 | MachineInstr *NewMI = |
5490 | FuseInst(MF, NewOpCode, OpNum, MOs, InsertPt, MI, *this, PtrOffset); |
5491 | NewMI->getOperand(NewMI->getNumOperands() - 1).setImm(NewImm); |
5492 | return NewMI; |
5493 | } |
5494 | } |
5495 | break; |
5496 | case X86::MOVHLPSrr: |
5497 | case X86::VMOVHLPSrr: |
5498 | case X86::VMOVHLPSZrr: |
5499 | |
5500 | |
5501 | |
5502 | if (OpNum == 2) { |
5503 | const TargetRegisterInfo &TRI = *MF.getSubtarget().getRegisterInfo(); |
5504 | const TargetRegisterClass *RC = getRegClass(MI.getDesc(), OpNum, &RI, MF); |
5505 | unsigned RCSize = TRI.getRegSizeInBits(*RC) / 8; |
5506 | if ((Size == 0 || Size >= 16) && RCSize >= 16 && Alignment >= Align(8)) { |
5507 | unsigned NewOpCode = |
5508 | (MI.getOpcode() == X86::VMOVHLPSZrr) ? X86::VMOVLPSZ128rm : |
5509 | (MI.getOpcode() == X86::VMOVHLPSrr) ? X86::VMOVLPSrm : |
5510 | X86::MOVLPSrm; |
5511 | MachineInstr *NewMI = |
5512 | FuseInst(MF, NewOpCode, OpNum, MOs, InsertPt, MI, *this, 8); |
5513 | return NewMI; |
5514 | } |
5515 | } |
5516 | break; |
5517 | case X86::UNPCKLPDrr: |
5518 | |
5519 | |
5520 | |
5521 | if (OpNum == 2) { |
5522 | const TargetRegisterInfo &TRI = *MF.getSubtarget().getRegisterInfo(); |
5523 | const TargetRegisterClass *RC = getRegClass(MI.getDesc(), OpNum, &RI, MF); |
5524 | unsigned RCSize = TRI.getRegSizeInBits(*RC) / 8; |
5525 | if ((Size == 0 || Size >= 16) && RCSize >= 16 && Alignment < Align(16)) { |
5526 | MachineInstr *NewMI = |
5527 | FuseInst(MF, X86::MOVHPDrm, OpNum, MOs, InsertPt, MI, *this); |
5528 | return NewMI; |
5529 | } |
5530 | } |
5531 | break; |
5532 | } |
5533 | |
5534 | return nullptr; |
5535 | } |
5536 | |
5537 | static bool shouldPreventUndefRegUpdateMemFold(MachineFunction &MF, |
5538 | MachineInstr &MI) { |
5539 | if (!hasUndefRegUpdate(MI.getOpcode(), 1, true) || |
5540 | !MI.getOperand(1).isReg()) |
5541 | return false; |
5542 | |
5543 | |
5544 | |
5545 | |
5546 | |
5547 | |
5548 | if (MI.getOperand(1).isUndef()) |
5549 | return true; |
5550 | |
5551 | MachineRegisterInfo &RegInfo = MF.getRegInfo(); |
5552 | MachineInstr *VRegDef = RegInfo.getUniqueVRegDef(MI.getOperand(1).getReg()); |
5553 | return VRegDef && VRegDef->isImplicitDef(); |
5554 | } |
5555 | |
5556 | MachineInstr *X86InstrInfo::foldMemoryOperandImpl( |
5557 | MachineFunction &MF, MachineInstr &MI, unsigned OpNum, |
5558 | ArrayRef<MachineOperand> MOs, MachineBasicBlock::iterator InsertPt, |
5559 | unsigned Size, Align Alignment, bool AllowCommute) const { |
5560 | bool isSlowTwoMemOps = Subtarget.slowTwoMemOps(); |
5561 | bool isTwoAddrFold = false; |
5562 | |
5563 | |
5564 | |
5565 | |
5566 | if (isSlowTwoMemOps && !MF.getFunction().hasMinSize() && |
5567 | (MI.getOpcode() == X86::CALL32r || MI.getOpcode() == X86::CALL64r || |
5568 | MI.getOpcode() == X86::PUSH16r || MI.getOpcode() == X86::PUSH32r || |
5569 | MI.getOpcode() == X86::PUSH64r)) |
5570 | return nullptr; |
5571 | |
5572 | |
5573 | if (!MF.getFunction().hasOptSize() && |
5574 | (hasPartialRegUpdate(MI.getOpcode(), Subtarget, true) || |
5575 | shouldPreventUndefRegUpdateMemFold(MF, MI))) |
5576 | return nullptr; |
5577 | |
5578 | unsigned NumOps = MI.getDesc().getNumOperands(); |
5579 | bool isTwoAddr = |
5580 | NumOps > 1 && MI.getDesc().getOperandConstraint(1, MCOI::TIED_TO) != -1; |
5581 | |
5582 | |
5583 | |
5584 | if (MI.getOpcode() == X86::ADD32ri && |
5585 | MI.getOperand(2).getTargetFlags() == X86II::MO_GOT_ABSOLUTE_ADDRESS) |
5586 | return nullptr; |
5587 | |
5588 | |
5589 | |
5590 | |
5591 | if (MOs.size() == X86::AddrNumOperands && |
5592 | MOs[X86::AddrDisp].getTargetFlags() == X86II::MO_GOTTPOFF && |
5593 | MI.getOpcode() != X86::ADD64rr) |
5594 | return nullptr; |
5595 | |
5596 | MachineInstr *NewMI = nullptr; |
5597 | |
5598 | |
5599 | if (MachineInstr *CustomMI = foldMemoryOperandCustom( |
5600 | MF, MI, OpNum, MOs, InsertPt, Size, Alignment)) |
5601 | return CustomMI; |
5602 | |
5603 | const X86MemoryFoldTableEntry *I = nullptr; |
5604 | |
5605 | |
5606 | |
5607 | |
5608 | if (isTwoAddr && NumOps >= 2 && OpNum < 2 && MI.getOperand(0).isReg() && |
5609 | MI.getOperand(1).isReg() && |
5610 | MI.getOperand(0).getReg() == MI.getOperand(1).getReg()) { |
5611 | I = lookupTwoAddrFoldTable(MI.getOpcode()); |
5612 | isTwoAddrFold = true; |
5613 | } else { |
5614 | if (OpNum == 0) { |
5615 | if (MI.getOpcode() == X86::MOV32r0) { |
5616 | NewMI = MakeM0Inst(*this, X86::MOV32mi, MOs, InsertPt, MI); |
5617 | if (NewMI) |
5618 | return NewMI; |
5619 | } |
5620 | } |
5621 | |
5622 | I = lookupFoldTable(MI.getOpcode(), OpNum); |
5623 | } |
5624 | |
5625 | if (I != nullptr) { |
5626 | unsigned Opcode = I->DstOp; |
5627 | bool FoldedLoad = |
5628 | isTwoAddrFold || (OpNum == 0 && I->Flags & TB_FOLDED_LOAD) || OpNum > 0; |
5629 | bool FoldedStore = |
5630 | isTwoAddrFold || (OpNum == 0 && I->Flags & TB_FOLDED_STORE); |
5631 | MaybeAlign MinAlign = |
5632 | decodeMaybeAlign((I->Flags & TB_ALIGN_MASK) >> TB_ALIGN_SHIFT); |
5633 | if (MinAlign && Alignment < *MinAlign) |
5634 | return nullptr; |
5635 | bool NarrowToMOV32rm = false; |
5636 | if (Size) { |
5637 | const TargetRegisterInfo &TRI = *MF.getSubtarget().getRegisterInfo(); |
5638 | const TargetRegisterClass *RC = getRegClass(MI.getDesc(), OpNum, |
5639 | &RI, MF); |
5640 | unsigned RCSize = TRI.getRegSizeInBits(*RC) / 8; |
5641 | |
5642 | |
5643 | |
5644 | if (FoldedLoad && Size < RCSize) { |
5645 | |
5646 | |
5647 | |
5648 | if (Opcode != X86::MOV64rm || RCSize != 8 || Size != 4) |
5649 | return nullptr; |
5650 | if (MI.getOperand(0).getSubReg() || MI.getOperand(1).getSubReg()) |
5651 | return nullptr; |
5652 | Opcode = X86::MOV32rm; |
5653 | NarrowToMOV32rm = true; |
5654 | } |
5655 | |
5656 | |
5657 | |
5658 | if (FoldedStore && Size != RCSize) |
5659 | return nullptr; |
5660 | } |
5661 | |
5662 | if (isTwoAddrFold) |
5663 | NewMI = FuseTwoAddrInst(MF, Opcode, MOs, InsertPt, MI, *this); |
5664 | else |
5665 | NewMI = FuseInst(MF, Opcode, OpNum, MOs, InsertPt, MI, *this); |
5666 | |
5667 | if (NarrowToMOV32rm) { |
5668 | |
5669 | |
5670 | |
5671 | Register DstReg = NewMI->getOperand(0).getReg(); |
5672 | if (DstReg.isPhysical()) |
5673 | NewMI->getOperand(0).setReg(RI.getSubReg(DstReg, X86::sub_32bit)); |
5674 | else |
5675 | NewMI->getOperand(0).setSubReg(X86::sub_32bit); |
5676 | } |
5677 | return NewMI; |
5678 | } |
5679 | |
5680 | |
5681 | |
5682 | if (AllowCommute) { |
5683 | unsigned CommuteOpIdx1 = OpNum, CommuteOpIdx2 = CommuteAnyOperandIndex; |
5684 | if (findCommutedOpIndices(MI, CommuteOpIdx1, CommuteOpIdx2)) { |
5685 | bool HasDef = MI.getDesc().getNumDefs(); |
5686 | Register Reg0 = HasDef ? MI.getOperand(0).getReg() : Register(); |
5687 | Register Reg1 = MI.getOperand(CommuteOpIdx1).getReg(); |
5688 | Register Reg2 = MI.getOperand(CommuteOpIdx2).getReg(); |
5689 | bool Tied1 = |
5690 | 0 == MI.getDesc().getOperandConstraint(CommuteOpIdx1, MCOI::TIED_TO); |
5691 | bool Tied2 = |
5692 | 0 == MI.getDesc().getOperandConstraint(CommuteOpIdx2, MCOI::TIED_TO); |
5693 | |
5694 | |
5695 | |
5696 | if ((HasDef && Reg0 == Reg1 && Tied1) || |
5697 | (HasDef && Reg0 == Reg2 && Tied2)) |
5698 | return nullptr; |
5699 | |
5700 | MachineInstr *CommutedMI = |
5701 | commuteInstruction(MI, false, CommuteOpIdx1, CommuteOpIdx2); |
5702 | if (!CommutedMI) { |
5703 | |
5704 | return nullptr; |
5705 | } |
5706 | if (CommutedMI != &MI) { |
5707 | |
5708 | CommutedMI->eraseFromParent(); |
5709 | return nullptr; |
5710 | } |
5711 | |
5712 | |
5713 | NewMI = foldMemoryOperandImpl(MF, MI, CommuteOpIdx2, MOs, InsertPt, Size, |
5714 | Alignment, false); |
5715 | if (NewMI) |
5716 | return NewMI; |
5717 | |
5718 | |
5719 | MachineInstr *UncommutedMI = |
5720 | commuteInstruction(MI, false, CommuteOpIdx1, CommuteOpIdx2); |
5721 | if (!UncommutedMI) { |
5722 | |
5723 | return nullptr; |
5724 | } |
5725 | if (UncommutedMI != &MI) { |
5726 | |
5727 | UncommutedMI->eraseFromParent(); |
5728 | return nullptr; |
5729 | } |
5730 | |
5731 | |
5732 | return nullptr; |
5733 | } |
5734 | } |
5735 | |
5736 | |
5737 | if (PrintFailedFusing && !MI.isCopy()) |
5738 | dbgs() << "We failed to fuse operand " << OpNum << " in " << MI; |
5739 | return nullptr; |
5740 | } |
5741 | |
5742 | MachineInstr * |
5743 | X86InstrInfo::foldMemoryOperandImpl(MachineFunction &MF, MachineInstr &MI, |
5744 | ArrayRef<unsigned> Ops, |
5745 | MachineBasicBlock::iterator InsertPt, |
5746 | int FrameIndex, LiveIntervals *LIS, |
5747 | VirtRegMap *VRM) const { |
5748 | |
5749 | if (NoFusing) |
5750 | return nullptr; |
5751 | |
5752 | |
5753 | if (!MF.getFunction().hasOptSize() && |
5754 | (hasPartialRegUpdate(MI.getOpcode(), Subtarget, true) || |
5755 | shouldPreventUndefRegUpdateMemFold(MF, MI))) |
5756 | return nullptr; |
5757 | |
5758 | |
5759 | for (auto Op : Ops) { |
5760 | MachineOperand &MO = MI.getOperand(Op); |
5761 | auto SubReg = MO.getSubReg(); |
5762 | if (SubReg && (MO.isDef() || SubReg == X86::sub_8bit_hi)) |
5763 | return nullptr; |
5764 | } |
5765 | |
5766 | const MachineFrameInfo &MFI = MF.getFrameInfo(); |
5767 | unsigned Size = MFI.getObjectSize(FrameIndex); |
5768 | Align Alignment = MFI.getObjectAlign(FrameIndex); |
5769 | |
5770 | |
5771 | if (!RI.hasStackRealignment(MF)) |
5772 | Alignment = |
5773 | std::min(Alignment, Subtarget.getFrameLowering()->getStackAlign()); |
5774 | if (Ops.size() == 2 && Ops[0] == 0 && Ops[1] == 1) { |
5775 | unsigned NewOpc = 0; |
5776 | unsigned RCSize = 0; |
5777 | switch (MI.getOpcode()) { |
5778 | default: return nullptr; |
5779 | case X86::TEST8rr: NewOpc = X86::CMP8ri; RCSize = 1; break; |
5780 | case X86::TEST16rr: NewOpc = X86::CMP16ri8; RCSize = 2; break; |
5781 | case X86::TEST32rr: NewOpc = X86::CMP32ri8; RCSize = 4; break; |
5782 | case X86::TEST64rr: NewOpc = X86::CMP64ri8; RCSize = 8; break; |
5783 | } |
5784 | |
5785 | |
5786 | if (Size < RCSize) |
5787 | return nullptr; |
5788 | |
5789 | MI.setDesc(get(NewOpc)); |
5790 | MI.getOperand(1).ChangeToImmediate(0); |
5791 | } else if (Ops.size() != 1) |
5792 | return nullptr; |
5793 | |
5794 | return foldMemoryOperandImpl(MF, MI, Ops[0], |
5795 | MachineOperand::CreateFI(FrameIndex), InsertPt, |
5796 | Size, Alignment, true); |
5797 | } |
5798 | |
5799 | |
5800 | |
5801 | |
5802 | |
5803 | |
5804 | |
5805 | |
5806 | |
5807 | |
5808 | |
5809 | |
5810 | |
5811 | |
5812 | |
5813 | static bool isNonFoldablePartialRegisterLoad(const MachineInstr &LoadMI, |
5814 | const MachineInstr &UserMI, |
5815 | const MachineFunction &MF) { |
5816 | unsigned Opc = LoadMI.getOpcode(); |
5817 | unsigned UserOpc = UserMI.getOpcode(); |
5818 | const TargetRegisterInfo &TRI = *MF.getSubtarget().getRegisterInfo(); |
5819 | const TargetRegisterClass *RC = |
5820 | MF.getRegInfo().getRegClass(LoadMI.getOperand(0).getReg()); |
5821 | unsigned RegSize = TRI.getRegSizeInBits(*RC); |
5822 | |
5823 | if ((Opc == X86::MOVSSrm || Opc == X86::VMOVSSrm || Opc == X86::VMOVSSZrm || |
5824 | Opc == X86::MOVSSrm_alt || Opc == X86::VMOVSSrm_alt || |
5825 | Opc == X86::VMOVSSZrm_alt) && |
5826 | RegSize > 32) { |
5827 | |
5828 | |
5829 | |
5830 | switch (UserOpc) { |
5831 | case X86::CVTSS2SDrr_Int: |
5832 | case X86::VCVTSS2SDrr_Int: |
5833 | case X86::VCVTSS2SDZrr_Int: |
5834 | case X86::VCVTSS2SDZrr_Intk: |
5835 | case X86::VCVTSS2SDZrr_Intkz: |
5836 | case X86::CVTSS2SIrr_Int: case X86::CVTSS2SI64rr_Int: |
5837 | case X86::VCVTSS2SIrr_Int: case X86::VCVTSS2SI64rr_Int: |
5838 | case X86::VCVTSS2SIZrr_Int: case X86::VCVTSS2SI64Zrr_Int: |
5839 | case X86::CVTTSS2SIrr_Int: case X86::CVTTSS2SI64rr_Int: |
5840 | case X86::VCVTTSS2SIrr_Int: case X86::VCVTTSS2SI64rr_Int: |
5841 | case X86::VCVTTSS2SIZrr_Int: case X86::VCVTTSS2SI64Zrr_Int: |
5842 | case X86::VCVTSS2USIZrr_Int: case X86::VCVTSS2USI64Zrr_Int: |
5843 | case X86::VCVTTSS2USIZrr_Int: case X86::VCVTTSS2USI64Zrr_Int: |
5844 | case X86::RCPSSr_Int: case X86::VRCPSSr_Int: |
5845 | case X86::RSQRTSSr_Int: case X86::VRSQRTSSr_Int: |
5846 | case X86::ROUNDSSr_Int: case X86::VROUNDSSr_Int: |
5847 | case X86::COMISSrr_Int: case X86::VCOMISSrr_Int: case X86::VCOMISSZrr_Int: |
5848 | case X86::UCOMISSrr_Int:case X86::VUCOMISSrr_Int:case X86::VUCOMISSZrr_Int: |
5849 | case X86::ADDSSrr_Int: case X86::VADDSSrr_Int: case X86::VADDSSZrr_Int: |
5850 | case X86::CMPSSrr_Int: case X86::VCMPSSrr_Int: case X86::VCMPSSZrr_Int: |
5851 | case X86::DIVSSrr_Int: case X86::VDIVSSrr_Int: case X86::VDIVSSZrr_Int: |
5852 | case X86::MAXSSrr_Int: case X86::VMAXSSrr_Int: case X86::VMAXSSZrr_Int: |
5853 | case X86::MINSSrr_Int: case X86::VMINSSrr_Int: case X86::VMINSSZrr_Int: |
5854 | case X86::MULSSrr_Int: case X86::VMULSSrr_Int: case X86::VMULSSZrr_Int: |
5855 | case X86::SQRTSSr_Int: case X86::VSQRTSSr_Int: case X86::VSQRTSSZr_Int: |
5856 | case X86::SUBSSrr_Int: case X86::VSUBSSrr_Int: case X86::VSUBSSZrr_Int: |
5857 | case X86::VADDSSZrr_Intk: case X86::VADDSSZrr_Intkz: |
5858 | case X86::VCMPSSZrr_Intk: |
5859 | case X86::VDIVSSZrr_Intk: case X86::VDIVSSZrr_Intkz: |
5860 | case X86::VMAXSSZrr_Intk: case X86::VMAXSSZrr_Intkz: |
5861 | case X86::VMINSSZrr_Intk: case X86::VMINSSZrr_Intkz: |
5862 | case X86::VMULSSZrr_Intk: case X86::VMULSSZrr_Intkz: |
5863 | case X86::VSQRTSSZr_Intk: case X86::VSQRTSSZr_Intkz: |
5864 | case X86::VSUBSSZrr_Intk: case X86::VSUBSSZrr_Intkz: |
5865 | case X86::VFMADDSS4rr_Int: case X86::VFNMADDSS4rr_Int: |
5866 | case X86::VFMSUBSS4rr_Int: case X86::VFNMSUBSS4rr_Int: |
5867 | case X86::VFMADD132SSr_Int: case X86::VFNMADD132SSr_Int: |
5868 | case X86::VFMADD213SSr_Int: case X86::VFNMADD213SSr_Int: |
5869 | case X86::VFMADD231SSr_Int: case X86::VFNMADD231SSr_Int: |
5870 | case X86::VFMSUB132SSr_Int: case X86::VFNMSUB132SSr_Int: |
5871 | case X86::VFMSUB213SSr_Int: case X86::VFNMSUB213SSr_Int: |
5872 | case X86::VFMSUB231SSr_Int: case X86::VFNMSUB231SSr_Int: |
5873 | case X86::VFMADD132SSZr_Int: case X86::VFNMADD132SSZr_Int: |
5874 | case X86::VFMADD213SSZr_Int: case X86::VFNMADD213SSZr_Int: |
5875 | case X86::VFMADD231SSZr_Int: case X86::VFNMADD231SSZr_Int: |
5876 | case X86::VFMSUB132SSZr_Int: case X86::VFNMSUB132SSZr_Int: |
5877 | case X86::VFMSUB213SSZr_Int: case X86::VFNMSUB213SSZr_Int: |
5878 | case X86::VFMSUB231SSZr_Int: case X86::VFNMSUB231SSZr_Int: |
5879 | case X86::VFMADD132SSZr_Intk: case X86::VFNMADD132SSZr_Intk: |
5880 | case X86::VFMADD213SSZr_Intk: case X86::VFNMADD213SSZr_Intk: |
5881 | case X86::VFMADD231SSZr_Intk: case X86::VFNMADD231SSZr_Intk: |
5882 | case X86::VFMSUB132SSZr_Intk: case X86::VFNMSUB132SSZr_Intk: |
5883 | case X86::VFMSUB213SSZr_Intk: case X86::VFNMSUB213SSZr_Intk: |
5884 | case X86::VFMSUB231SSZr_Intk: case X86::VFNMSUB231SSZr_Intk: |
5885 | case X86::VFMADD132SSZr_Intkz: case X86::VFNMADD132SSZr_Intkz: |
5886 | case X86::VFMADD213SSZr_Intkz: case X86::VFNMADD213SSZr_Intkz: |
5887 | case X86::VFMADD231SSZr_Intkz: case X86::VFNMADD231SSZr_Intkz: |
5888 | case X86::VFMSUB132SSZr_Intkz: case X86::VFNMSUB132SSZr_Intkz: |
5889 | case X86::VFMSUB213SSZr_Intkz: case X86::VFNMSUB213SSZr_Intkz: |
5890 | case X86::VFMSUB231SSZr_Intkz: case X86::VFNMSUB231SSZr_Intkz: |
5891 | case X86::VFIXUPIMMSSZrri: |
5892 | case X86::VFIXUPIMMSSZrrik: |
5893 | case X86::VFIXUPIMMSSZrrikz: |
5894 | case X86::VFPCLASSSSZrr: |
5895 | case X86::VFPCLASSSSZrrk: |
5896 | case X86::VGETEXPSSZr: |
5897 | case X86::VGETEXPSSZrk: |
5898 | case X86::VGETEXPSSZrkz: |
5899 | case X86::VGETMANTSSZrri: |
5900 | case X86::VGETMANTSSZrrik: |
5901 | case X86::VGETMANTSSZrrikz: |
5902 | case X86::VRANGESSZrri: |
5903 | case X86::VRANGESSZrrik: |
5904 | case X86::VRANGESSZrrikz: |
5905 | case X86::VRCP14SSZrr: |
5906 | case X86::VRCP14SSZrrk: |
5907 | case X86::VRCP14SSZrrkz: |
5908 | case X86::VRCP28SSZr: |
5909 | case X86::VRCP28SSZrk: |
5910 | case X86::VRCP28SSZrkz: |
5911 | case X86::VREDUCESSZrri: |
5912 | case X86::VREDUCESSZrrik: |
5913 | case X86::VREDUCESSZrrikz: |
5914 | case X86::VRNDSCALESSZr_Int: |
5915 | case X86::VRNDSCALESSZr_Intk: |
5916 | case X86::VRNDSCALESSZr_Intkz: |
5917 | case X86::VRSQRT14SSZrr: |
5918 | case X86::VRSQRT14SSZrrk: |
5919 | case X86::VRSQRT14SSZrrkz: |
5920 | case X86::VRSQRT28SSZr: |
5921 | case X86::VRSQRT28SSZrk: |
5922 | case X86::VRSQRT28SSZrkz: |
5923 | case X86::VSCALEFSSZrr: |
5924 | case X86::VSCALEFSSZrrk: |
5925 | case X86::VSCALEFSSZrrkz: |
5926 | return false; |
5927 | default: |
5928 | return true; |
5929 | } |
5930 | } |
5931 | |
5932 | if ((Opc == X86::MOVSDrm || Opc == X86::VMOVSDrm || Opc == X86::VMOVSDZrm || |
5933 | Opc == X86::MOVSDrm_alt || Opc == X86::VMOVSDrm_alt || |
5934 | Opc == X86::VMOVSDZrm_alt) && |
5935 | RegSize > 64) { |
5936 | |
5937 | |
5938 | |
5939 | switch (UserOpc) { |
5940 | case X86::CVTSD2SSrr_Int: |
5941 | case X86::VCVTSD2SSrr_Int: |
5942 | case X86::VCVTSD2SSZrr_Int: |
5943 | case X86::VCVTSD2SSZrr_Intk: |
5944 | case X86::VCVTSD2SSZrr_Intkz: |
5945 | case X86::CVTSD2SIrr_Int: case X86::CVTSD2SI64rr_Int: |
5946 | case X86::VCVTSD2SIrr_Int: case X86::VCVTSD2SI64rr_Int: |
5947 | case X86::VCVTSD2SIZrr_Int: case X86::VCVTSD2SI64Zrr_Int: |
5948 | case X86::CVTTSD2SIrr_Int: case X86::CVTTSD2SI64rr_Int: |
5949 | case X86::VCVTTSD2SIrr_Int: case X86::VCVTTSD2SI64rr_Int: |
5950 | case X86::VCVTTSD2SIZrr_Int: case X86::VCVTTSD2SI64Zrr_Int: |
5951 | case X86::VCVTSD2USIZrr_Int: case X86::VCVTSD2USI64Zrr_Int: |
5952 | case X86::VCVTTSD2USIZrr_Int: case X86::VCVTTSD2USI64Zrr_Int: |
5953 | case X86::ROUNDSDr_Int: case X86::VROUNDSDr_Int: |
5954 | case X86::COMISDrr_Int: case X86::VCOMISDrr_Int: case X86::VCOMISDZrr_Int: |
5955 | case X86::UCOMISDrr_Int:case X86::VUCOMISDrr_Int:case X86::VUCOMISDZrr_Int: |
5956 | case X86::ADDSDrr_Int: case X86::VADDSDrr_Int: case X86::VADDSDZrr_Int: |
5957 | case X86::CMPSDrr_Int: case X86::VCMPSDrr_Int: case X86::VCMPSDZrr_Int: |
5958 | case X86::DIVSDrr_Int: case X86::VDIVSDrr_Int: case X86::VDIVSDZrr_Int: |
5959 | case X86::MAXSDrr_Int: case X86::VMAXSDrr_Int: case X86::VMAXSDZrr_Int: |
5960 | case X86::MINSDrr_Int: case X86::VMINSDrr_Int: case X86::VMINSDZrr_Int: |
5961 | case X86::MULSDrr_Int: case X86::VMULSDrr_Int: case X86::VMULSDZrr_Int: |
5962 | case X86::SQRTSDr_Int: case X86::VSQRTSDr_Int: case X86::VSQRTSDZr_Int: |
5963 | case X86::SUBSDrr_Int: case X86::VSUBSDrr_Int: case X86::VSUBSDZrr_Int: |
5964 | case X86::VADDSDZrr_Intk: case X86::VADDSDZrr_Intkz: |
5965 | case X86::VCMPSDZrr_Intk: |
5966 | case X86::VDIVSDZrr_Intk: case X86::VDIVSDZrr_Intkz: |
5967 | case X86::VMAXSDZrr_Intk: case X86::VMAXSDZrr_Intkz: |
5968 | case X86::VMINSDZrr_Intk: case X86::VMINSDZrr_Intkz: |
5969 | case X86::VMULSDZrr_Intk: case X86::VMULSDZrr_Intkz: |
5970 | case X86::VSQRTSDZr_Intk: case X86::VSQRTSDZr_Intkz: |
5971 | case X86::VSUBSDZrr_Intk: case X86::VSUBSDZrr_Intkz: |
5972 | case X86::VFMADDSD4rr_Int: case X86::VFNMADDSD4rr_Int: |
5973 | case X86::VFMSUBSD4rr_Int: case X86::VFNMSUBSD4rr_Int: |
5974 | case X86::VFMADD132SDr_Int: case X86::VFNMADD132SDr_Int: |
5975 | case X86::VFMADD213SDr_Int: case X86::VFNMADD213SDr_Int: |
5976 | case X86::VFMADD231SDr_Int: case X86::VFNMADD231SDr_Int: |
5977 | case X86::VFMSUB132SDr_Int: case X86::VFNMSUB132SDr_Int: |
5978 | case X86::VFMSUB213SDr_Int: case X86::VFNMSUB213SDr_Int: |
5979 | case X86::VFMSUB231SDr_Int: case X86::VFNMSUB231SDr_Int: |
5980 | case X86::VFMADD132SDZr_Int: case X86::VFNMADD132SDZr_Int: |
5981 | case X86::VFMADD213SDZr_Int: case X86::VFNMADD213SDZr_Int: |
5982 | case X86::VFMADD231SDZr_Int: case X86::VFNMADD231SDZr_Int: |
5983 | case X86::VFMSUB132SDZr_Int: case X86::VFNMSUB132SDZr_Int: |
5984 | case X86::VFMSUB213SDZr_Int: case X86::VFNMSUB213SDZr_Int: |
5985 | case X86::VFMSUB231SDZr_Int: case X86::VFNMSUB231SDZr_Int: |
5986 | case X86::VFMADD132SDZr_Intk: case X86::VFNMADD132SDZr_Intk: |
5987 | case X86::VFMADD213SDZr_Intk: case X86::VFNMADD213SDZr_Intk: |
5988 | case X86::VFMADD231SDZr_Intk: case X86::VFNMADD231SDZr_Intk: |
5989 | case X86::VFMSUB132SDZr_Intk: case X86::VFNMSUB132SDZr_Intk: |
5990 | case X86::VFMSUB213SDZr_Intk: case X86::VFNMSUB213SDZr_Intk: |
5991 | case X86::VFMSUB231SDZr_Intk: case X86::VFNMSUB231SDZr_Intk: |
5992 | case X86::VFMADD132SDZr_Intkz: case X86::VFNMADD132SDZr_Intkz: |
5993 | case X86::VFMADD213SDZr_Intkz: case X86::VFNMADD213SDZr_Intkz: |
5994 | case X86::VFMADD231SDZr_Intkz: case X86::VFNMADD231SDZr_Intkz: |
5995 | case X86::VFMSUB132SDZr_Intkz: case X86::VFNMSUB132SDZr_Intkz: |
5996 | case X86::VFMSUB213SDZr_Intkz: case X86::VFNMSUB213SDZr_Intkz: |
5997 | case X86::VFMSUB231SDZr_Intkz: case X86::VFNMSUB231SDZr_Intkz: |
5998 | case X86::VFIXUPIMMSDZrri: |
5999 | case X86::VFIXUPIMMSDZrrik: |
6000 | case X86::VFIXUPIMMSDZrrikz: |
6001 | case X86::VFPCLASSSDZrr: |
6002 | case X86::VFPCLASSSDZrrk: |
6003 | case X86::VGETEXPSDZr: |
6004 | case X86::VGETEXPSDZrk: |
6005 | case X86::VGETEXPSDZrkz: |
6006 | case X86::VGETMANTSDZrri: |
6007 | case X86::VGETMANTSDZrrik: |
6008 | case X86::VGETMANTSDZrrikz: |
6009 | case X86::VRANGESDZrri: |
6010 | case X86::VRANGESDZrrik: |
6011 | case X86::VRANGESDZrrikz: |
6012 | case X86::VRCP14SDZrr: |
6013 | case X86::VRCP14SDZrrk: |
6014 | case X86::VRCP14SDZrrkz: |
6015 | case X86::VRCP28SDZr: |
6016 | case X86::VRCP28SDZrk: |
6017 | case X86::VRCP28SDZrkz: |
6018 | case X86::VREDUCESDZrri: |
6019 | case X86::VREDUCESDZrrik: |
6020 | case X86::VREDUCESDZrrikz: |
6021 | case X86::VRNDSCALESDZr_Int: |
6022 | case X86::VRNDSCALESDZr_Intk: |
6023 | case X86::VRNDSCALESDZr_Intkz: |
6024 | case X86::VRSQRT14SDZrr: |
6025 | case X86::VRSQRT14SDZrrk: |
6026 | case X86::VRSQRT14SDZrrkz: |
6027 | case X86::VRSQRT28SDZr: |
6028 | case X86::VRSQRT28SDZrk: |
6029 | case X86::VRSQRT28SDZrkz: |
6030 | case X86::VSCALEFSDZrr: |
6031 | case X86::VSCALEFSDZrrk: |
6032 | case X86::VSCALEFSDZrrkz: |
6033 | return false; |
6034 | default: |
6035 | return true; |
6036 | } |
6037 | } |
6038 | |
6039 | return false; |
6040 | } |
6041 | |
6042 | MachineInstr *X86InstrInfo::foldMemoryOperandImpl( |
6043 | MachineFunction &MF, MachineInstr &MI, ArrayRef<unsigned> Ops, |
6044 | MachineBasicBlock::iterator InsertPt, MachineInstr &LoadMI, |
6045 | LiveIntervals *LIS) const { |
6046 | |
6047 | |
6048 | |
6049 | for (auto Op : Ops) { |
6050 | if (MI.getOperand(Op).getSubReg()) |
6051 | return nullptr; |
6052 | } |
6053 | |
6054 | |
6055 | unsigned NumOps = LoadMI.getDesc().getNumOperands(); |
6056 | int FrameIndex; |
6057 | if (isLoadFromStackSlot(LoadMI, FrameIndex)) { |
6058 | if (isNonFoldablePartialRegisterLoad(LoadMI, MI, MF)) |
6059 | return nullptr; |
6060 | return foldMemoryOperandImpl(MF, MI, Ops, InsertPt, FrameIndex, LIS); |
6061 | } |
6062 | |
6063 | |
6064 | if (NoFusing) return nullptr; |
6065 | |
6066 | |
6067 | if (!MF.getFunction().hasOptSize() && |
6068 | (hasPartialRegUpdate(MI.getOpcode(), Subtarget, true) || |
6069 | shouldPreventUndefRegUpdateMemFold(MF, MI))) |
6070 | return nullptr; |
6071 | |
6072 | |
6073 | Align Alignment; |
6074 | if (LoadMI.hasOneMemOperand()) |
6075 | Alignment = (*LoadMI.memoperands_begin())->getAlign(); |
6076 | else |
6077 | switch (LoadMI.getOpcode()) { |
6078 | case X86::AVX512_512_SET0: |
6079 | case X86::AVX512_512_SETALLONES: |
6080 | Alignment = Align(64); |
6081 | break; |
6082 | case X86::AVX2_SETALLONES: |
6083 | case X86::AVX1_SETALLONES: |
6084 | case X86::AVX_SET0: |
6085 | case X86::AVX512_256_SET0: |
6086 | Alignment = Align(32); |
6087 | break; |
6088 | case X86::V_SET0: |
6089 | case X86::V_SETALLONES: |
6090 | case X86::AVX512_128_SET0: |
6091 | case X86::FsFLD0F128: |
6092 | case X86::AVX512_FsFLD0F128: |
6093 | Alignment = Align(16); |
6094 | break; |
6095 | case X86::MMX_SET0: |
6096 | case X86::FsFLD0SD: |
6097 | case X86::AVX512_FsFLD0SD: |
6098 | Alignment = Align(8); |
6099 | break; |
6100 | case X86::FsFLD0SS: |
6101 | case X86::AVX512_FsFLD0SS: |
6102 | Alignment = Align(4); |
6103 | break; |
6104 | default: |
6105 | return nullptr; |
6106 | } |
6107 | if (Ops.size() == 2 && Ops[0] == 0 && Ops[1] == 1) { |
6108 | unsigned NewOpc = 0; |
6109 | switch (MI.getOpcode()) { |
6110 | default: return nullptr; |
6111 | case X86::TEST8rr: NewOpc = X86::CMP8ri; break; |
6112 | case X86::TEST16rr: NewOpc = X86::CMP16ri8; break; |
6113 | case X86::TEST32rr: NewOpc = X86::CMP32ri8; break; |
6114 | case X86::TEST64rr: NewOpc = X86::CMP64ri8; break; |
6115 | } |
6116 | |
6117 | MI.setDesc(get(NewOpc)); |
6118 | MI.getOperand(1).ChangeToImmediate(0); |
6119 | } else if (Ops.size() != 1) |
6120 | return nullptr; |
6121 | |
6122 | |
6123 | |
6124 | if (LoadMI.getOperand(0).getSubReg() != MI.getOperand(Ops[0]).getSubReg()) |
6125 | return nullptr; |
6126 | |
6127 | SmallVector<MachineOperand,X86::AddrNumOperands> MOs; |
6128 | switch (LoadMI.getOpcode()) { |
6129 | case X86::MMX_SET0: |
6130 | case X86::V_SET0: |
6131 | case X86::V_SETALLONES: |
6132 | case X86::AVX2_SETALLONES: |
6133 | case X86::AVX1_SETALLONES: |
6134 | case X86::AVX_SET0: |
6135 | case X86::AVX512_128_SET0: |
6136 | case X86::AVX512_256_SET0: |
6137 | case X86::AVX512_512_SET0: |
6138 | case X86::AVX512_512_SETALLONES: |
6139 | case X86::FsFLD0SD: |
6140 | case X86::AVX512_FsFLD0SD: |
6141 | case X86::FsFLD0SS: |
6142 | case X86::AVX512_FsFLD0SS: |
6143 | case X86::FsFLD0F128: |
6144 | case X86::AVX512_FsFLD0F128: { |
6145 | |
6146 | |
6147 | |
6148 | |
6149 | if (MF.getTarget().getCodeModel() != CodeModel::Small && |
6150 | MF.getTarget().getCodeModel() != CodeModel::Kernel) |
6151 | return nullptr; |
6152 | |
6153 | |
6154 | unsigned PICBase = 0; |
6155 | |
6156 | |
6157 | if (Subtarget.is64Bit()) { |
6158 | PICBase = X86::RIP; |
6159 | } else if (MF.getTarget().isPositionIndependent()) { |
6160 | |
6161 | |
6162 | |
6163 | |
6164 | return nullptr; |
6165 | } |
6166 | |
6167 | |
6168 | MachineConstantPool &MCP = *MF.getConstantPool(); |
6169 | Type *Ty; |
6170 | unsigned Opc = LoadMI.getOpcode(); |
6171 | if (Opc == X86::FsFLD0SS || Opc == X86::AVX512_FsFLD0SS) |
6172 | Ty = Type::getFloatTy(MF.getFunction().getContext()); |
6173 | else if (Opc == X86::FsFLD0SD || Opc == X86::AVX512_FsFLD0SD) |
6174 | Ty = Type::getDoubleTy(MF.getFunction().getContext()); |
6175 | else if (Opc == X86::FsFLD0F128 || Opc == X86::AVX512_FsFLD0F128) |
6176 | Ty = Type::getFP128Ty(MF.getFunction().getContext()); |
6177 | else if (Opc == X86::AVX512_512_SET0 || Opc == X86::AVX512_512_SETALLONES) |
6178 | Ty = FixedVectorType::get(Type::getInt32Ty(MF.getFunction().getContext()), |
6179 | 16); |
6180 | else if (Opc == X86::AVX2_SETALLONES || Opc == X86::AVX_SET0 || |
6181 | Opc == X86::AVX512_256_SET0 || Opc == X86::AVX1_SETALLONES) |
6182 | Ty = FixedVectorType::get(Type::getInt32Ty(MF.getFunction().getContext()), |
6183 | 8); |
6184 | else if (Opc == X86::MMX_SET0) |
6185 | Ty = FixedVectorType::get(Type::getInt32Ty(MF.getFunction().getContext()), |
6186 | 2); |
6187 | else |
6188 | Ty = FixedVectorType::get(Type::getInt32Ty(MF.getFunction().getContext()), |
6189 | 4); |
6190 | |
6191 | bool IsAllOnes = (Opc == X86::V_SETALLONES || Opc == X86::AVX2_SETALLONES || |
6192 | Opc == X86::AVX512_512_SETALLONES || |
6193 | Opc == X86::AVX1_SETALLONES); |
6194 | const Constant *C = IsAllOnes ? Constant::getAllOnesValue(Ty) : |
6195 | Constant::getNullValue(Ty); |
6196 | unsigned CPI = MCP.getConstantPoolIndex(C, Alignment); |
6197 | |
6198 | |
6199 | MOs.push_back(MachineOperand::CreateReg(PICBase, false)); |
6200 | MOs.push_back(MachineOperand::CreateImm(1)); |
6201 | MOs.push_back(MachineOperand::CreateReg(0, false)); |
6202 | MOs.push_back(MachineOperand::CreateCPI(CPI, 0)); |
6203 | MOs.push_back(MachineOperand::CreateReg(0, false)); |
6204 | break; |
6205 | } |
6206 | default: { |
6207 | if (isNonFoldablePartialRegisterLoad(LoadMI, MI, MF)) |
6208 | return nullptr; |
6209 | |
6210 | |
6211 | MOs.append(LoadMI.operands_begin() + NumOps - X86::AddrNumOperands, |
6212 | LoadMI.operands_begin() + NumOps); |
6213 | break; |
6214 | } |
6215 | } |
6216 | return foldMemoryOperandImpl(MF, MI, Ops[0], MOs, InsertPt, |
6217 | 0, Alignment, true); |
6218 | } |
6219 | |
6220 | static SmallVector<MachineMemOperand *, 2> |
6221 | extractLoadMMOs(ArrayRef<MachineMemOperand *> MMOs, MachineFunction &MF) { |
6222 | SmallVector<MachineMemOperand *, 2> LoadMMOs; |
6223 | |
6224 | for (MachineMemOperand *MMO : MMOs) { |
6225 | if (!MMO->isLoad()) |
6226 | continue; |
6227 | |
6228 | if (!MMO->isStore()) { |
6229 | |
6230 | LoadMMOs.push_back(MMO); |
6231 | } else { |
6232 | |
6233 | LoadMMOs.push_back(MF.getMachineMemOperand( |
6234 | MMO, MMO->getFlags() & ~MachineMemOperand::MOStore)); |
6235 | } |
6236 | } |
6237 | |
6238 | return LoadMMOs; |
6239 | } |
6240 | |
6241 | static SmallVector<MachineMemOperand *, 2> |
6242 | extractStoreMMOs(ArrayRef<MachineMemOperand *> MMOs, MachineFunction &MF) { |
6243 | SmallVector<MachineMemOperand *, 2> StoreMMOs; |
6244 | |
6245 | for (MachineMemOperand *MMO : MMOs) { |
6246 | if (!MMO->isStore()) |
6247 | continue; |
6248 | |
6249 | if (!MMO->isLoad()) { |
6250 | |
6251 | StoreMMOs.push_back(MMO); |
6252 | } else { |
6253 | |
6254 | StoreMMOs.push_back(MF.getMachineMemOperand( |
6255 | MMO, MMO->getFlags() & ~MachineMemOperand::MOLoad)); |
6256 | } |
6257 | } |
6258 | |
6259 | return StoreMMOs; |
6260 | } |
6261 | |
6262 | static unsigned getBroadcastOpcode(const X86MemoryFoldTableEntry *I, |
6263 | const TargetRegisterClass *RC, |
6264 | const X86Subtarget &STI) { |
6265 | assert(STI.hasAVX512() && "Expected at least AVX512!"); |
6266 | unsigned SpillSize = STI.getRegisterInfo()->getSpillSize(*RC); |
6267 | assert((SpillSize == 64 || STI.hasVLX()) && |
6268 | "Can't broadcast less than 64 bytes without AVX512VL!"); |
6269 | |
6270 | switch (I->Flags & TB_BCAST_MASK) { |
6271 | default: llvm_unreachable("Unexpected broadcast type!"); |
6272 | case TB_BCAST_D: |
6273 | switch (SpillSize) { |
6274 | default: llvm_unreachable("Unknown spill size"); |
6275 | case 16: return X86::VPBROADCASTDZ128rm; |
6276 | case 32: return X86::VPBROADCASTDZ256rm; |
6277 | case 64: return X86::VPBROADCASTDZrm; |
6278 | } |
6279 | break; |
6280 | case TB_BCAST_Q: |
6281 | switch (SpillSize) { |
6282 | default: llvm_unreachable("Unknown spill size"); |
6283 | case 16: return X86::VPBROADCASTQZ128rm; |
6284 | case 32: return X86::VPBROADCASTQZ256rm; |
6285 | case 64: return X86::VPBROADCASTQZrm; |
6286 | } |
6287 | break; |
6288 | case TB_BCAST_SS: |
6289 | switch (SpillSize) { |
6290 | default: llvm_unreachable("Unknown spill size"); |
6291 | case 16: return X86::VBROADCASTSSZ128rm; |
6292 | case 32: return X86::VBROADCASTSSZ256rm; |
6293 | case 64: return X86::VBROADCASTSSZrm; |
6294 | } |
6295 | break; |
6296 | case TB_BCAST_SD: |
6297 | switch (SpillSize) { |
6298 | default: llvm_unreachable("Unknown spill size"); |
6299 | case 16: return X86::VMOVDDUPZ128rm; |
6300 | case 32: return X86::VBROADCASTSDZ256rm; |
6301 | case 64: return X86::VBROADCASTSDZrm; |
6302 | } |
6303 | break; |
6304 | } |
6305 | } |
6306 | |
6307 | bool X86InstrInfo::unfoldMemoryOperand( |
6308 | MachineFunction &MF, MachineInstr &MI, unsigned Reg, bool UnfoldLoad, |
6309 | bool UnfoldStore, SmallVectorImpl<MachineInstr *> &NewMIs) const { |
6310 | const X86MemoryFoldTableEntry *I = lookupUnfoldTable(MI.getOpcode()); |
6311 | if (I == nullptr) |
6312 | return false; |
6313 | unsigned Opc = I->DstOp; |
6314 | unsigned Index = I->Flags & TB_INDEX_MASK; |
6315 | bool FoldedLoad = I->Flags & TB_FOLDED_LOAD; |
6316 | bool FoldedStore = I->Flags & TB_FOLDED_STORE; |
6317 | bool FoldedBCast = I->Flags & TB_FOLDED_BCAST; |
6318 | if (UnfoldLoad && !FoldedLoad) |
6319 | return false; |
6320 | UnfoldLoad &= FoldedLoad; |
6321 | if (UnfoldStore && !FoldedStore) |
6322 | return false; |
6323 | UnfoldStore &= FoldedStore; |
6324 | |
6325 | const MCInstrDesc &MCID = get(Opc); |
6326 | |
6327 | const TargetRegisterClass *RC = getRegClass(MCID, Index, &RI, MF); |
6328 | const TargetRegisterInfo &TRI = *MF.getSubtarget().getRegisterInfo(); |
6329 | |
6330 | if (!MI.hasOneMemOperand() && RC == &X86::VR128RegClass && |
6331 | Subtarget.isUnalignedMem16Slow()) |
6332 | |
6333 | |
6334 | |
6335 | return false; |
6336 | SmallVector<MachineOperand, X86::AddrNumOperands> AddrOps; |
6337 | SmallVector<MachineOperand,2> BeforeOps; |
6338 | SmallVector<MachineOperand,2> AfterOps; |
6339 | SmallVector<MachineOperand,4> ImpOps; |
6340 | for (unsigned i = 0, e = MI.getNumOperands(); i != e; ++i) { |
6341 | MachineOperand &Op = MI.getOperand(i); |
6342 | if (i >= Index && i < Index + X86::AddrNumOperands) |
6343 | AddrOps.push_back(Op); |
6344 | else if (Op.isReg() && Op.isImplicit()) |
6345 | ImpOps.push_back(Op); |
6346 | else if (i < Index) |
6347 | BeforeOps.push_back(Op); |
6348 | else if (i > Index) |
6349 | AfterOps.push_back(Op); |
6350 | } |
6351 | |
6352 | |
6353 | if (UnfoldLoad) { |
6354 | auto MMOs = extractLoadMMOs(MI.memoperands(), MF); |
6355 | |
6356 | unsigned Opc; |
6357 | if (FoldedBCast) { |
6358 | Opc = getBroadcastOpcode(I, RC, Subtarget); |
6359 | } else { |
6360 | unsigned Alignment = std::max<uint32_t>(TRI.getSpillSize(*RC), 16); |
6361 | bool isAligned = !MMOs.empty() && MMOs.front()->getAlign() >= Alignment; |
6362 | Opc = getLoadRegOpcode(Reg, RC, isAligned, Subtarget); |
6363 | } |
6364 | |
6365 | DebugLoc DL; |
6366 | MachineInstrBuilder MIB = BuildMI(MF, DL, get(Opc), Reg); |
6367 | for (unsigned i = 0, e = AddrOps.size(); i != e; ++i) |
6368 | MIB.add(AddrOps[i]); |
6369 | MIB.setMemRefs(MMOs); |
6370 | NewMIs.push_back(MIB); |
6371 | |
6372 | if (UnfoldStore) { |
6373 | |
6374 | for (unsigned i = 1; i != 1 + X86::AddrNumOperands; ++i) { |
6375 | MachineOperand &MO = NewMIs[0]->getOperand(i); |
6376 | if (MO.isReg()) |
6377 | MO.setIsKill(false); |
6378 | } |
6379 | } |
6380 | } |
6381 | |
6382 | |
6383 | MachineInstr *DataMI = MF.CreateMachineInstr(MCID, MI.getDebugLoc(), true); |
6384 | MachineInstrBuilder MIB(MF, DataMI); |
6385 | |
6386 | if (FoldedStore) |
6387 | MIB.addReg(Reg, RegState::Define); |
6388 | for (MachineOperand &BeforeOp : BeforeOps) |
6389 | MIB.add(BeforeOp); |
6390 | if (FoldedLoad) |
6391 | MIB.addReg(Reg); |
6392 | for (MachineOperand &AfterOp : AfterOps) |
6393 | MIB.add(AfterOp); |
6394 | for (MachineOperand &ImpOp : ImpOps) { |
6395 | MIB.addReg(ImpOp.getReg(), |
6396 | getDefRegState(ImpOp.isDef()) | |
6397 | RegState::Implicit | |
6398 | getKillRegState(ImpOp.isKill()) | |
6399 | getDeadRegState(ImpOp.isDead()) | |
6400 | getUndefRegState(ImpOp.isUndef())); |
6401 | } |
6402 | |
6403 | switch (DataMI->getOpcode()) { |
6404 | default: break; |
6405 | case X86::CMP64ri32: |
6406 | case X86::CMP64ri8: |
6407 | case X86::CMP32ri: |
6408 | case X86::CMP32ri8: |
6409 | case X86::CMP16ri: |
6410 | case X86::CMP16ri8: |
6411 | case X86::CMP8ri: { |
6412 | MachineOperand &MO0 = DataMI->getOperand(0); |
6413 | MachineOperand &MO1 = DataMI->getOperand(1); |
6414 | if (MO1.isImm() && MO1.getImm() == 0) { |
6415 | unsigned NewOpc; |
6416 | switch (DataMI->getOpcode()) { |
6417 | default: llvm_unreachable("Unreachable!"); |
6418 | case X86::CMP64ri8: |
6419 | case X86::CMP64ri32: NewOpc = X86::TEST64rr; break; |
6420 | case X86::CMP32ri8: |
6421 | case X86::CMP32ri: NewOpc = X86::TEST32rr; break; |
6422 | case X86::CMP16ri8: |
6423 | case X86::CMP16ri: NewOpc = X86::TEST16rr; break; |
6424 | case X86::CMP8ri: NewOpc = X86::TEST8rr; break; |
6425 | } |
6426 | DataMI->setDesc(get(NewOpc)); |
6427 | MO1.ChangeToRegister(MO0.getReg(), false); |
6428 | } |
6429 | } |
6430 | } |
6431 | NewMIs.push_back(DataMI); |
6432 | |
6433 | |
6434 | if (UnfoldStore) { |
6435 | const TargetRegisterClass *DstRC = getRegClass(MCID, 0, &RI, MF); |
6436 | auto MMOs = extractStoreMMOs(MI.memoperands(), MF); |
6437 | unsigned Alignment = std::max<uint32_t>(TRI.getSpillSize(*DstRC), 16); |
6438 | bool isAligned = !MMOs.empty() && MMOs.front()->getAlign() >= Alignment; |
6439 | unsigned Opc = getStoreRegOpcode(Reg, DstRC, isAligned, Subtarget); |
6440 | DebugLoc DL; |
6441 | MachineInstrBuilder MIB = BuildMI(MF, DL, get(Opc)); |
6442 | for (unsigned i = 0, e = AddrOps.size(); i != e; ++i) |
6443 | MIB.add(AddrOps[i]); |
6444 | MIB.addReg(Reg, RegState::Kill); |
6445 | MIB.setMemRefs(MMOs); |
6446 | NewMIs.push_back(MIB); |
6447 | } |
6448 | |
6449 | return true; |
6450 | } |
6451 | |
6452 | bool |
6453 | X86InstrInfo::unfoldMemoryOperand(SelectionDAG &DAG, SDNode *N, |
6454 | SmallVectorImpl<SDNode*> &NewNodes) const { |
6455 | if (!N->isMachineOpcode()) |
6456 | return false; |
6457 | |
6458 | const X86MemoryFoldTableEntry *I = lookupUnfoldTable(N->getMachineOpcode()); |
6459 | if (I == nullptr) |
6460 | return false; |
6461 | unsigned Opc = I->DstOp; |
6462 | unsigned Index = I->Flags & TB_INDEX_MASK; |
6463 | bool FoldedLoad = I->Flags & TB_FOLDED_LOAD; |
6464 | bool FoldedStore = I->Flags & TB_FOLDED_STORE; |
6465 | bool FoldedBCast = I->Flags & TB_FOLDED_BCAST; |
6466 | const MCInstrDesc &MCID = get(Opc); |
6467 | MachineFunction &MF = DAG.getMachineFunction(); |
6468 | const TargetRegisterInfo &TRI = *MF.getSubtarget().getRegisterInfo(); |
6469 | const TargetRegisterClass *RC = getRegClass(MCID, Index, &RI, MF); |
6470 | unsigned NumDefs = MCID.NumDefs; |
6471 | std::vector<SDValue> AddrOps; |
6472 | std::vector<SDValue> BeforeOps; |
6473 | std::vector<SDValue> AfterOps; |
6474 | SDLoc dl(N); |
6475 | unsigned NumOps = N->getNumOperands(); |
6476 | for (unsigned i = 0; i != NumOps-1; ++i) { |
6477 | SDValue Op = N->getOperand(i); |
6478 | if (i >= Index-NumDefs && i < Index-NumDefs + X86::AddrNumOperands) |
6479 | AddrOps.push_back(Op); |
6480 | else if (i < Index-NumDefs) |
6481 | BeforeOps.push_back(Op); |
6482 | else if (i > Index-NumDefs) |
6483 | AfterOps.push_back(Op); |
6484 | } |
6485 | SDValue Chain = N->getOperand(NumOps-1); |
6486 | AddrOps.push_back(Chain); |
6487 | |
6488 | |
6489 | SDNode *Load = nullptr; |
6490 | if (FoldedLoad) { |
6491 | EVT VT = *TRI.legalclasstypes_begin(*RC); |
6492 | auto MMOs = extractLoadMMOs(cast<MachineSDNode>(N)->memoperands(), MF); |
6493 | if (MMOs.empty() && RC == &X86::VR128RegClass && |
6494 | Subtarget.isUnalignedMem16Slow()) |
6495 | |
6496 | return false; |
6497 | |
6498 | |
6499 | |
6500 | unsigned Opc; |
6501 | if (FoldedBCast) { |
6502 | Opc = getBroadcastOpcode(I, RC, Subtarget); |
6503 | } else { |
6504 | unsigned Alignment = std::max<uint32_t>(TRI.getSpillSize(*RC), 16); |
6505 | bool isAligned = !MMOs.empty() && MMOs.front()->getAlign() >= Alignment; |
6506 | Opc = getLoadRegOpcode(0, RC, isAligned, Subtarget); |
6507 | } |
6508 | |
6509 | Load = DAG.getMachineNode(Opc, dl, VT, MVT::Other, AddrOps); |
6510 | NewNodes.push_back(Load); |
6511 | |
6512 | |
6513 | DAG.setNodeMemRefs(cast<MachineSDNode>(Load), MMOs); |
6514 | } |
6515 | |
6516 | |
6517 | std::vector<EVT> VTs; |
6518 | const TargetRegisterClass *DstRC = nullptr; |
6519 | if (MCID.getNumDefs() > 0) { |
6520 | DstRC = getRegClass(MCID, 0, &RI, MF); |
6521 | VTs.push_back(*TRI.legalclasstypes_begin(*DstRC)); |
6522 | } |
6523 | for (unsigned i = 0, e = N->getNumValues(); i != e; ++i) { |
6524 | EVT VT = N->getValueType(i); |
6525 | if (VT != MVT::Other && i >= (unsigned)MCID.getNumDefs()) |
6526 | VTs.push_back(VT); |
6527 | } |
6528 | if (Load) |
6529 | BeforeOps.push_back(SDValue(Load, 0)); |
6530 | llvm::append_range(BeforeOps, AfterOps); |
6531 | |
6532 | switch (Opc) { |
6533 | default: break; |
6534 | case X86::CMP64ri32: |
6535 | case X86::CMP64ri8: |
6536 | case X86::CMP32ri: |
6537 | case X86::CMP32ri8: |
6538 | case X86::CMP16ri: |
6539 | case X86::CMP16ri8: |
6540 | case X86::CMP8ri: |
6541 | if (isNullConstant(BeforeOps[1])) { |
6542 | switch (Opc) { |
6543 | default: llvm_unreachable("Unreachable!"); |
6544 | case X86::CMP64ri8: |
6545 | case X86::CMP64ri32: Opc = X86::TEST64rr; break; |
6546 | case X86::CMP32ri8: |
6547 | case X86::CMP32ri: Opc = X86::TEST32rr; break; |
6548 | case X86::CMP16ri8: |
6549 | case X86::CMP16ri: Opc = X86::TEST16rr; break; |
6550 | case X86::CMP8ri: Opc = X86::TEST8rr; break; |
6551 | } |
6552 | BeforeOps[1] = BeforeOps[0]; |
6553 | } |
6554 | } |
6555 | SDNode *NewNode= DAG.getMachineNode(Opc, dl, VTs, BeforeOps); |
6556 | NewNodes.push_back(NewNode); |
6557 | |
6558 | |
6559 | if (FoldedStore) { |
6560 | AddrOps.pop_back(); |
6561 | AddrOps.push_back(SDValue(NewNode, 0)); |
6562 | AddrOps.push_back(Chain); |
6563 | auto MMOs = extractStoreMMOs(cast<MachineSDNode>(N)->memoperands(), MF); |
6564 | if (MMOs.empty() && RC == &X86::VR128RegClass && |
6565 | Subtarget.isUnalignedMem16Slow()) |
6566 | |
6567 | return false; |
6568 | |
6569 | |
6570 | unsigned Alignment = std::max<uint32_t>(TRI.getSpillSize(*RC), 16); |
6571 | bool isAligned = !MMOs.empty() && MMOs.front()->getAlign() >= Alignment; |
6572 | SDNode *Store = |
6573 | DAG.getMachineNode(getStoreRegOpcode(0, DstRC, isAligned, Subtarget), |
6574 | dl, MVT::Other, AddrOps); |
6575 | NewNodes.push_back(Store); |
6576 | |
6577 | |
6578 | DAG.setNodeMemRefs(cast<MachineSDNode>(Store), MMOs); |
6579 | } |
6580 | |
6581 | return true; |
6582 | } |
6583 | |
6584 | unsigned X86InstrInfo::getOpcodeAfterMemoryUnfold(unsigned Opc, |
6585 | bool UnfoldLoad, bool UnfoldStore, |
6586 | unsigned *LoadRegIndex) const { |
6587 | const X86MemoryFoldTableEntry *I = lookupUnfoldTable(Opc); |
6588 | if (I == nullptr) |
6589 | return 0; |
6590 | bool FoldedLoad = I->Flags & TB_FOLDED_LOAD; |
6591 | bool FoldedStore = I->Flags & TB_FOLDED_STORE; |
6592 | if (UnfoldLoad && !FoldedLoad) |
6593 | return 0; |
6594 | if (UnfoldStore && !FoldedStore) |
6595 | return 0; |
6596 | if (LoadRegIndex) |
6597 | *LoadRegIndex = I->Flags & TB_INDEX_MASK; |
6598 | return I->DstOp; |
6599 | } |
6600 | |
6601 | bool |
6602 | X86InstrInfo::areLoadsFromSameBasePtr(SDNode *Load1, SDNode *Load2, |
6603 | int64_t &Offset1, int64_t &Offset2) const { |
6604 | if (!Load1->isMachineOpcode() || !Load2->isMachineOpcode()) |
6605 | return false; |
6606 | unsigned Opc1 = Load1->getMachineOpcode(); |
6607 | unsigned Opc2 = Load2->getMachineOpcode(); |
6608 | switch (Opc1) { |
6609 | default: return false; |
6610 | case X86::MOV8rm: |
6611 | case X86::MOV16rm: |
6612 | case X86::MOV32rm: |
6613 | case X86::MOV64rm: |
6614 | case X86::LD_Fp32m: |
6615 | case X86::LD_Fp64m: |
6616 | case X86::LD_Fp80m: |
6617 | case X86::MOVSSrm: |
6618 | case X86::MOVSSrm_alt: |
6619 | case X86::MOVSDrm: |
6620 | case X86::MOVSDrm_alt: |
6621 | case X86::MMX_MOVD64rm: |
6622 | case X86::MMX_MOVQ64rm: |
6623 | case X86::MOVAPSrm: |
6624 | case X86::MOVUPSrm: |
6625 | case X86::MOVAPDrm: |
6626 | case X86::MOVUPDrm: |
6627 | case X86::MOVDQArm: |
6628 | case X86::MOVDQUrm: |
6629 | |
6630 | case X86::VMOVSSrm: |
6631 | case X86::VMOVSSrm_alt: |
6632 | case X86::VMOVSDrm: |
6633 | case X86::VMOVSDrm_alt: |
6634 | case X86::VMOVAPSrm: |
6635 | case X86::VMOVUPSrm: |
6636 | case X86::VMOVAPDrm: |
6637 | case X86::VMOVUPDrm: |
6638 | case X86::VMOVDQArm: |
6639 | case X86::VMOVDQUrm: |
6640 | case X86::VMOVAPSYrm: |
6641 | case X86::VMOVUPSYrm: |
6642 | case X86::VMOVAPDYrm: |
6643 | case X86::VMOVUPDYrm: |
6644 | case X86::VMOVDQAYrm: |
6645 | case X86::VMOVDQUYrm: |
6646 | |
6647 | case X86::VMOVSSZrm: |
6648 | case X86::VMOVSSZrm_alt: |
6649 | case X86::VMOVSDZrm: |
6650 | case X86::VMOVSDZrm_alt: |
6651 | case X86::VMOVAPSZ128rm: |
6652 | case X86::VMOVUPSZ128rm: |
6653 | case X86::VMOVAPSZ128rm_NOVLX: |
6654 | case X86::VMOVUPSZ128rm_NOVLX: |
6655 | case X86::VMOVAPDZ128rm: |
6656 | case X86::VMOVUPDZ128rm: |
6657 | case X86::VMOVDQU8Z128rm: |
6658 | case X86::VMOVDQU16Z128rm: |
6659 | case X86::VMOVDQA32Z128rm: |
6660 | case X86::VMOVDQU32Z128rm: |
6661 | case X86::VMOVDQA64Z128rm: |
6662 | case X86::VMOVDQU64Z128rm: |
6663 | case X86::VMOVAPSZ256rm: |
6664 | case X86::VMOVUPSZ256rm: |
6665 | case X86::VMOVAPSZ256rm_NOVLX: |
6666 | case X86::VMOVUPSZ256rm_NOVLX: |
6667 | case X86::VMOVAPDZ256rm: |
6668 | case X86::VMOVUPDZ256rm: |
6669 | case X86::VMOVDQU8Z256rm: |
6670 | case X86::VMOVDQU16Z256rm: |
6671 | case X86::VMOVDQA32Z256rm: |
6672 | case X86::VMOVDQU32Z256rm: |
6673 | case X86::VMOVDQA64Z256rm: |
6674 | case X86::VMOVDQU64Z256rm: |
6675 | case X86::VMOVAPSZrm: |
6676 | case X86::VMOVUPSZrm: |
6677 | case X86::VMOVAPDZrm: |
6678 | case X86::VMOVUPDZrm: |
6679 | case X86::VMOVDQU8Zrm: |
6680 | case X86::VMOVDQU16Zrm: |
6681 | case X86::VMOVDQA32Zrm: |
6682 | case X86::VMOVDQU32Zrm: |
6683 | case X86::VMOVDQA64Zrm: |
6684 | case X86::VMOVDQU64Zrm: |
6685 | case X86::KMOVBkm: |
6686 | case X86::KMOVWkm: |
6687 | case X86::KMOVDkm: |
6688 | case X86::KMOVQkm: |
6689 | break; |
6690 | } |
6691 | switch (Opc2) { |
6692 | default: return false; |
6693 | case X86::MOV8rm: |
6694 | case X86::MOV16rm: |
6695 | case X86::MOV32rm: |
6696 | case X86::MOV64rm: |
6697 | case X86::LD_Fp32m: |
6698 | case X86::LD_Fp64m: |
6699 | case X86::LD_Fp80m: |
6700 | case X86::MOVSSrm: |
6701 | case X86::MOVSSrm_alt: |
6702 | case X86::MOVSDrm: |
6703 | case X86::MOVSDrm_alt: |
6704 | case X86::MMX_MOVD64rm: |
6705 | case X86::MMX_MOVQ64rm: |
6706 | case X86::MOVAPSrm: |
6707 | case X86::MOVUPSrm: |
6708 | case X86::MOVAPDrm: |
6709 | case X86::MOVUPDrm: |
6710 | case X86::MOVDQArm: |
6711 | case X86::MOVDQUrm: |
6712 | |
6713 | case X86::VMOVSSrm: |
6714 | case X86::VMOVSSrm_alt: |
6715 | case X86::VMOVSDrm: |
6716 | case X86::VMOVSDrm_alt: |
6717 | case X86::VMOVAPSrm: |
6718 | case X86::VMOVUPSrm: |
6719 | case X86::VMOVAPDrm: |
6720 | case X86::VMOVUPDrm: |
6721 | case X86::VMOVDQArm: |
6722 | case X86::VMOVDQUrm: |
6723 | case X86::VMOVAPSYrm: |
6724 | case X86::VMOVUPSYrm: |
6725 | case X86::VMOVAPDYrm: |
6726 | case X86::VMOVUPDYrm: |
6727 | case X86::VMOVDQAYrm: |
6728 | case X86::VMOVDQUYrm: |
6729 | |
6730 | case X86::VMOVSSZrm: |
6731 | case X86::VMOVSSZrm_alt: |
6732 | case X86::VMOVSDZrm: |
6733 | case X86::VMOVSDZrm_alt: |
6734 | case X86::VMOVAPSZ128rm: |
6735 | case X86::VMOVUPSZ128rm: |
6736 | case X86::VMOVAPSZ128rm_NOVLX: |
6737 | case X86::VMOVUPSZ128rm_NOVLX: |
6738 | case X86::VMOVAPDZ128rm: |
6739 | case X86::VMOVUPDZ128rm: |
6740 | case X86::VMOVDQU8Z128rm: |
6741 | case X86::VMOVDQU16Z128rm: |
6742 | case X86::VMOVDQA32Z128rm: |
6743 | case X86::VMOVDQU32Z128rm: |
6744 | case X86::VMOVDQA64Z128rm: |
6745 | case X86::VMOVDQU64Z128rm: |
6746 | case X86::VMOVAPSZ256rm: |
6747 | case X86::VMOVUPSZ256rm: |
6748 | case X86::VMOVAPSZ256rm_NOVLX: |
6749 | case X86::VMOVUPSZ256rm_NOVLX: |
6750 | case X86::VMOVAPDZ256rm: |
6751 | case X86::VMOVUPDZ256rm: |
6752 | case X86::VMOVDQU8Z256rm: |
6753 | case X86::VMOVDQU16Z256rm: |
6754 | case X86::VMOVDQA32Z256rm: |
6755 | case X86::VMOVDQU32Z256rm: |
6756 | case X86::VMOVDQA64Z256rm: |
6757 | case X86::VMOVDQU64Z256rm: |
6758 | case X86::VMOVAPSZrm: |
6759 | case X86::VMOVUPSZrm: |
6760 | case X86::VMOVAPDZrm: |
6761 | case X86::VMOVUPDZrm: |
6762 | case X86::VMOVDQU8Zrm: |
6763 | case X86::VMOVDQU16Zrm: |
6764 | case X86::VMOVDQA32Zrm: |
6765 | case X86::VMOVDQU32Zrm: |
6766 | case X86::VMOVDQA64Zrm: |
6767 | case X86::VMOVDQU64Zrm: |
6768 | case X86::KMOVBkm: |
6769 | case X86::KMOVWkm: |
6770 | case X86::KMOVDkm: |
6771 | case X86::KMOVQkm: |
6772 | break; |
6773 | } |
6774 | |
6775 | |
6776 | auto HasSameOp = [&](int I) { |
6777 | return Load1->getOperand(I) == Load2->getOperand(I); |
6778 | }; |
6779 | |
6780 | |
6781 | if (!HasSameOp(X86::AddrBaseReg) || !HasSameOp(X86::AddrScaleAmt) || |
6782 | !HasSameOp(X86::AddrIndexReg) || !HasSameOp(X86::AddrSegmentReg)) |
6783 | return false; |
6784 | |
6785 | |
6786 | if (!HasSameOp(5)) |
6787 | return false; |
6788 | |
6789 | |
6790 | auto Disp1 = dyn_cast<ConstantSDNode>(Load1->getOperand(X86::AddrDisp)); |
6791 | auto Disp2 = dyn_cast<ConstantSDNode>(Load2->getOperand(X86::AddrDisp)); |
6792 | if (!Disp1 || !Disp2) |
6793 | return false; |
6794 | |
6795 | Offset1 = Disp1->getSExtValue(); |
6796 | Offset2 = Disp2->getSExtValue(); |
6797 | return true; |
6798 | } |
6799 | |
6800 | bool X86InstrInfo::shouldScheduleLoadsNear(SDNode *Load1, SDNode *Load2, |
6801 | int64_t Offset1, int64_t Offset2, |
6802 | unsigned NumLoads) const { |
6803 | assert(Offset2 > Offset1); |
6804 | if ((Offset2 - Offset1) / 8 > 64) |
6805 | return false; |
6806 | |
6807 | unsigned Opc1 = Load1->getMachineOpcode(); |
6808 | unsigned Opc2 = Load2->getMachineOpcode(); |
6809 | if (Opc1 != Opc2) |
6810 | return false; |
6811 | |
6812 | switch (Opc1) { |
6813 | default: break; |
6814 | case X86::LD_Fp32m: |
6815 | case X86::LD_Fp64m: |
6816 | case X86::LD_Fp80m: |
6817 | case X86::MMX_MOVD64rm: |
6818 | case X86::MMX_MOVQ64rm: |
6819 | return false; |
6820 | } |
6821 | |
6822 | EVT VT = Load1->getValueType(0); |
6823 | switch (VT.getSimpleVT().SimpleTy) { |
6824 | default: |
6825 | |
6826 | |
6827 | if (Subtarget.is64Bit()) { |
6828 | if (NumLoads >= 3) |
6829 | return false; |
6830 | } else if (NumLoads) { |
6831 | return false; |
6832 | } |
6833 | break; |
6834 | case MVT::i8: |
6835 | case MVT::i16: |
6836 | case MVT::i32: |
6837 | case MVT::i64: |
6838 | case MVT::f32: |
6839 | case MVT::f64: |
6840 | if (NumLoads) |
6841 | return false; |
6842 | break; |
6843 | } |
6844 | |
6845 | return true; |
6846 | } |
6847 | |
6848 | bool X86InstrInfo::isSchedulingBoundary(const MachineInstr &MI, |
6849 | const MachineBasicBlock *MBB, |
6850 | const MachineFunction &MF) const { |
6851 | |
6852 | |
6853 | unsigned Opcode = MI.getOpcode(); |
6854 | if (Opcode == X86::ENDBR64 || Opcode == X86::ENDBR32 || |
6855 | Opcode == X86::LDTILECFG) |
6856 | return true; |
6857 | |
6858 | return TargetInstrInfo::isSchedulingBoundary(MI, MBB, MF); |
6859 | } |
6860 | |
6861 | bool X86InstrInfo:: |
6862 | reverseBranchCondition(SmallVectorImpl<MachineOperand> &Cond) const { |
6863 | assert(Cond.size() == 1 && "Invalid X86 branch condition!"); |
6864 | X86::CondCode CC = static_cast<X86::CondCode>(Cond[0].getImm()); |
6865 | Cond[0].setImm(GetOppositeBranchCondition(CC)); |
6866 | return false; |
6867 | } |
6868 | |
6869 | bool X86InstrInfo:: |
6870 | isSafeToMoveRegClassDefs(const TargetRegisterClass *RC) const { |
6871 | |
6872 | |
6873 | return !(RC == &X86::CCRRegClass || RC == &X86::DFCCRRegClass || |
6874 | RC == &X86::RFP32RegClass || RC == &X86::RFP64RegClass || |
6875 | RC == &X86::RFP80RegClass); |
6876 | } |
6877 | |
6878 | |
6879 | |
6880 | |
6881 | |
6882 | |
6883 | |
6884 | unsigned X86InstrInfo::getGlobalBaseReg(MachineFunction *MF) const { |
6885 | assert((!Subtarget.is64Bit() || |
6886 | MF->getTarget().getCodeModel() == CodeModel::Medium || |
6887 | MF->getTarget().getCodeModel() == CodeModel::Large) && |
6888 | "X86-64 PIC uses RIP relative addressing"); |
6889 | |
6890 | X86MachineFunctionInfo *X86FI = MF->getInfo<X86MachineFunctionInfo>(); |
6891 | Register GlobalBaseReg = X86FI->getGlobalBaseReg(); |
6892 | if (GlobalBaseReg != 0) |
6893 | return GlobalBaseReg; |
6894 | |
6895 | |
6896 | |
6897 | MachineRegisterInfo &RegInfo = MF->getRegInfo(); |
6898 | GlobalBaseReg = RegInfo.createVirtualRegister( |
6899 | Subtarget.is64Bit() ? &X86::GR64_NOSPRegClass : &X86::GR32_NOSPRegClass); |
6900 | X86FI->setGlobalBaseReg(GlobalBaseReg); |
6901 | return GlobalBaseReg; |
6902 | } |
6903 | |
6904 | |
6905 | |
6906 | |
6907 | static const uint16_t ReplaceableInstrs[][3] = { |
6908 | |
6909 | { X86::MOVAPSmr, X86::MOVAPDmr, X86::MOVDQAmr }, |
6910 | { X86::MOVAPSrm, X86::MOVAPDrm, X86::MOVDQArm }, |
6911 | { X86::MOVAPSrr, X86::MOVAPDrr, X86::MOVDQArr }, |
6912 | { X86::MOVUPSmr, X86::MOVUPDmr, X86::MOVDQUmr }, |
6913 | { X86::MOVUPSrm, X86::MOVUPDrm, X86::MOVDQUrm }, |
6914 | { X86::MOVLPSmr, X86::MOVLPDmr, X86::MOVPQI2QImr }, |
6915 | { X86::MOVSDmr, X86::MOVSDmr, X86::MOVPQI2QImr }, |
6916 | { X86::MOVSSmr, X86::MOVSSmr, X86::MOVPDI2DImr }, |
6917 | { X86::MOVSDrm, X86::MOVSDrm, X86::MOVQI2PQIrm }, |
6918 | { X86::MOVSDrm_alt,X86::MOVSDrm_alt,X86::MOVQI2PQIrm }, |
6919 | { X86::MOVSSrm, X86::MOVSSrm, X86::MOVDI2PDIrm }, |
6920 | { X86::MOVSSrm_alt,X86::MOVSSrm_alt,X86::MOVDI2PDIrm }, |
6921 | { X86::MOVNTPSmr, X86::MOVNTPDmr, X86::MOVNTDQmr }, |
6922 | { X86::ANDNPSrm, X86::ANDNPDrm, X86::PANDNrm }, |
6923 | { X86::ANDNPSrr, X86::ANDNPDrr, X86::PANDNrr }, |
6924 | { X86::ANDPSrm, X86::ANDPDrm, X86::PANDrm }, |
6925 | { X86::ANDPSrr, X86::ANDPDrr, X86::PANDrr }, |
6926 | { X86::ORPSrm, X86::ORPDrm, X86::PORrm }, |
6927 | { X86::ORPSrr, X86::ORPDrr, X86::PORrr }, |
6928 | { X86::XORPSrm, X86::XORPDrm, X86::PXORrm }, |
6929 | { X86::XORPSrr, X86::XORPDrr, X86::PXORrr }, |
6930 | { X86::UNPCKLPDrm, X86::UNPCKLPDrm, X86::PUNPCKLQDQrm }, |
6931 | { X86::MOVLHPSrr, X86::UNPCKLPDrr, X86::PUNPCKLQDQrr }, |
6932 | { X86::UNPCKHPDrm, X86::UNPCKHPDrm, X86::PUNPCKHQDQrm }, |
6933 | { X86::UNPCKHPDrr, X86::UNPCKHPDrr, X86::PUNPCKHQDQrr }, |
6934 | { X86::UNPCKLPSrm, X86::UNPCKLPSrm, X86::PUNPCKLDQrm }, |
6935 | { X86::UNPCKLPSrr, X86::UNPCKLPSrr, X86::PUNPCKLDQrr }, |
6936 | { X86::UNPCKHPSrm, X86::UNPCKHPSrm, X86::PUNPCKHDQrm }, |
6937 | { X86::UNPCKHPSrr, X86::UNPCKHPSrr, X86::PUNPCKHDQrr }, |
6938 | { X86::EXTRACTPSmr, X86::EXTRACTPSmr, X86::PEXTRDmr }, |
6939 | { X86::EXTRACTPSrr, X86::EXTRACTPSrr, X86::PEXTRDrr }, |
6940 | |
6941 | { X86::VMOVAPSmr, X86::VMOVAPDmr, X86::VMOVDQAmr }, |
6942 | { X86::VMOVAPSrm, X86::VMOVAPDrm, X86::VMOVDQArm }, |
6943 | { X86::VMOVAPSrr, X86::VMOVAPDrr, X86::VMOVDQArr }, |
6944 | { X86::VMOVUPSmr, X86::VMOVUPDmr, X86::VMOVDQUmr }, |
6945 | { X86::VMOVUPSrm, X86::VMOVUPDrm, X86::VMOVDQUrm }, |
6946 | { X86::VMOVLPSmr, X86::VMOVLPDmr, X86::VMOVPQI2QImr }, |
6947 | { X86::VMOVSDmr, X86::VMOVSDmr, X86::VMOVPQI2QImr }, |
6948 | { X86::VMOVSSmr, X86::VMOVSSmr, X86::VMOVPDI2DImr }, |
6949 | { X86::VMOVSDrm, X86::VMOVSDrm, X86::VMOVQI2PQIrm }, |
6950 | { X86::VMOVSDrm_alt,X86::VMOVSDrm_alt,X86::VMOVQI2PQIrm }, |
6951 | { X86::VMOVSSrm, X86::VMOVSSrm, X86::VMOVDI2PDIrm }, |
6952 | { X86::VMOVSSrm_alt,X86::VMOVSSrm_alt,X86::VMOVDI2PDIrm }, |
6953 | { X86::VMOVNTPSmr, X86::VMOVNTPDmr, X86::VMOVNTDQmr }, |
6954 | { X86::VANDNPSrm, X86::VANDNPDrm, X86::VPANDNrm }, |
6955 | { X86::VANDNPSrr, X86::VANDNPDrr, X86::VPANDNrr }, |
6956 | { X86::VANDPSrm, X86::VANDPDrm, X86::VPANDrm }, |
6957 | { X86::VANDPSrr, X86::VANDPDrr, X86::VPANDrr }, |
6958 | { X86::VORPSrm, X86::VORPDrm, X86::VPORrm }, |
6959 | { X86::VORPSrr, X86::VORPDrr, X86::VPORrr }, |
6960 | { X86::VXORPSrm, X86::VXORPDrm, X86::VPXORrm }, |
6961 | { X86::VXORPSrr, X86::VXORPDrr, X86::VPXORrr }, |
6962 | { X86::VUNPCKLPDrm, X86::VUNPCKLPDrm, X86::VPUNPCKLQDQrm }, |
6963 | { X86::VMOVLHPSrr, X86::VUNPCKLPDrr, X86::VPUNPCKLQDQrr }, |
6964 | { X86::VUNPCKHPDrm, X86::VUNPCKHPDrm, X86::VPUNPCKHQDQrm }, |
6965 | { X86::VUNPCKHPDrr, X86::VUNPCKHPDrr, X86::VPUNPCKHQDQrr }, |
6966 | { X86::VUNPCKLPSrm, X86::VUNPCKLPSrm, X86::VPUNPCKLDQrm }, |
6967 | { X86::VUNPCKLPSrr, X86::VUNPCKLPSrr, X86::VPUNPCKLDQrr }, |
6968 | { X86::VUNPCKHPSrm, X86::VUNPCKHPSrm, X86::VPUNPCKHDQrm }, |
6969 | { X86::VUNPCKHPSrr, X86::VUNPCKHPSrr, X86::VPUNPCKHDQrr }, |
6970 | { X86::VEXTRACTPSmr, X86::VEXTRACTPSmr, X86::VPEXTRDmr }, |
6971 | { X86::VEXTRACTPSrr, X86::VEXTRACTPSrr, X86::VPEXTRDrr }, |
6972 | |
6973 | { X86::VMOVAPSYmr, X86::VMOVAPDYmr, X86::VMOVDQAYmr }, |
6974 | { X86::VMOVAPSYrm, X86::VMOVAPDYrm, X86::VMOVDQAYrm }, |
6975 | { X86::VMOVAPSYrr, X86::VMOVAPDYrr, X86::VMOVDQAYrr }, |
6976 | { X86::VMOVUPSYmr, X86::VMOVUPDYmr, X86::VMOVDQUYmr }, |
6977 | { X86::VMOVUPSYrm, X86::VMOVUPDYrm, X86::VMOVDQUYrm }, |
6978 | { X86::VMOVNTPSYmr, X86::VMOVNTPDYmr, X86::VMOVNTDQYmr }, |
6979 | { X86::VPERMPSYrm, X86::VPERMPSYrm, X86::VPERMDYrm }, |
6980 | { X86::VPERMPSYrr, X86::VPERMPSYrr, X86::VPERMDYrr }, |
6981 | { X86::VPERMPDYmi, X86::VPERMPDYmi, X86::VPERMQYmi }, |
6982 | { X86::VPERMPDYri, X86::VPERMPDYri, X86::VPERMQYri }, |
6983 | |
6984 | { X86::VMOVLPSZ128mr, X86::VMOVLPDZ128mr, X86::VMOVPQI2QIZmr }, |
6985 | { X86::VMOVNTPSZ128mr, X86::VMOVNTPDZ128mr, X86::VMOVNTDQZ128mr }, |
6986 | { X86::VMOVNTPSZ256mr, X86::VMOVNTPDZ256mr, X86::VMOVNTDQZ256mr }, |
6987 | { X86::VMOVNTPSZmr, X86::VMOVNTPDZmr, X86::VMOVNTDQZmr }, |
6988 | { X86::VMOVSDZmr, X86::VMOVSDZmr, X86::VMOVPQI2QIZmr }, |
6989 | { X86::VMOVSSZmr, X86::VMOVSSZmr, X86::VMOVPDI2DIZmr }, |
6990 | { X86::VMOVSDZrm, X86::VMOVSDZrm, X86::VMOVQI2PQIZrm }, |
6991 | { X86::VMOVSDZrm_alt, X86::VMOVSDZrm_alt, X86::VMOVQI2PQIZrm }, |
6992 | { X86::VMOVSSZrm, X86::VMOVSSZrm, X86::VMOVDI2PDIZrm }, |
6993 | { X86::VMOVSSZrm_alt, X86::VMOVSSZrm_alt, X86::VMOVDI2PDIZrm }, |
6994 | { X86::VBROADCASTSSZ128rr,X86::VBROADCASTSSZ128rr,X86::VPBROADCASTDZ128rr }, |
6995 | { X86::VBROADCASTSSZ128rm,X86::VBROADCASTSSZ128rm,X86::VPBROADCASTDZ128rm }, |
6996 | { X86::VBROADCASTSSZ256rr,X86::VBROADCASTSSZ256rr,X86::VPBROADCASTDZ256rr }, |
6997 | { X86::VBROADCASTSSZ256rm,X86::VBROADCASTSSZ256rm,X86::VPBROADCASTDZ256rm }, |
6998 | { X86::VBROADCASTSSZrr, X86::VBROADCASTSSZrr, X86::VPBROADCASTDZrr }, |
6999 | { X86::VBROADCASTSSZrm, X86::VBROADCASTSSZrm, X86::VPBROADCASTDZrm }, |
7000 | { X86::VMOVDDUPZ128rr, X86::VMOVDDUPZ128rr, X86::VPBROADCASTQZ128rr }, |
7001 | { X86::VMOVDDUPZ128rm, X86::VMOVDDUPZ128rm, X86::VPBROADCASTQZ128rm }, |
7002 | { X86::VBROADCASTSDZ256rr,X86::VBROADCASTSDZ256rr,X86::VPBROADCASTQZ256rr }, |
7003 | { X86::VBROADCASTSDZ256rm,X86::VBROADCASTSDZ256rm,X86::VPBROADCASTQZ256rm }, |
7004 | { X86::VBROADCASTSDZrr, X86::VBROADCASTSDZrr, X86::VPBROADCASTQZrr }, |
7005 | { X86::VBROADCASTSDZrm, X86::VBROADCASTSDZrm, X86::VPBROADCASTQZrm }, |
7006 | { X86::VINSERTF32x4Zrr, X86::VINSERTF32x4Zrr, X86::VINSERTI32x4Zrr }, |
7007 | { X86::VINSERTF32x4Zrm, X86::VINSERTF32x4Zrm, X86::VINSERTI32x4Zrm }, |
7008 | { X86::VINSERTF32x8Zrr, X86::VINSERTF32x8Zrr, X86::VINSERTI32x8Zrr }, |
7009 | { X86::VINSERTF32x8Zrm, X86::VINSERTF32x8Zrm, X86::VINSERTI32x8Zrm }, |
7010 | { X86::VINSERTF64x2Zrr, X86::VINSERTF64x2Zrr, X86::VINSERTI64x2Zrr }, |
7011 | { X86::VINSERTF64x2Zrm, X86::VINSERTF64x2Zrm, X86::VINSERTI64x2Zrm }, |
7012 | { X86::VINSERTF64x4Zrr, X86::VINSERTF64x4Zrr, X86::VINSERTI64x4Zrr }, |
7013 | { X86::VINSERTF64x4Zrm, X86::VINSERTF64x4Zrm, X86::VINSERTI64x4Zrm }, |
7014 | { X86::VINSERTF32x4Z256rr,X86::VINSERTF32x4Z256rr,X86::VINSERTI32x4Z256rr }, |
7015 | { X86::VINSERTF32x4Z256rm,X86::VINSERTF32x4Z256rm,X86::VINSERTI32x4Z256rm }, |
7016 | { X86::VINSERTF64x2Z256rr,X86::VINSERTF64x2Z256rr,X86::VINSERTI64x2Z256rr }, |
7017 | { X86::VINSERTF64x2Z256rm,X86::VINSERTF64x2Z256rm,X86::VINSERTI64x2Z256rm }, |
7018 | { X86::VEXTRACTF32x4Zrr, X86::VEXTRACTF32x4Zrr, X86::VEXTRACTI32x4Zrr }, |
7019 | { X86::VEXTRACTF32x4Zmr, X86::VEXTRACTF32x4Zmr, X86::VEXTRACTI32x4Zmr }, |
7020 | { X86::VEXTRACTF32x8Zrr, X86::VEXTRACTF32x8Zrr, X86::VEXTRACTI32x8Zrr }, |
7021 | { X86::VEXTRACTF32x8Zmr, X86::VEXTRACTF32x8Zmr, X86::VEXTRACTI32x8Zmr }, |
7022 | { X86::VEXTRACTF64x2Zrr, X86::VEXTRACTF64x2Zrr, X86::VEXTRACTI64x2Zrr }, |
7023 | { X86::VEXTRACTF64x2Zmr, X86::VEXTRACTF64x2Zmr, X86::VEXTRACTI64x2Zmr }, |
7024 | { X86::VEXTRACTF64x4Zrr, X86::VEXTRACTF64x4Zrr, X86::VEXTRACTI64x4Zrr }, |
7025 | { X86::VEXTRACTF64x4Zmr, X86::VEXTRACTF64x4Zmr, X86::VEXTRACTI64x4Zmr }, |
7026 | { X86::VEXTRACTF32x4Z256rr,X86::VEXTRACTF32x4Z256rr,X86::VEXTRACTI32x4Z256rr }, |
7027 | { X86::VEXTRACTF32x4Z256mr,X86::VEXTRACTF32x4Z256mr,X86::VEXTRACTI32x4Z256mr }, |
7028 | { X86::VEXTRACTF64x2Z256rr,X86::VEXTRACTF64x2Z256rr,X86::VEXTRACTI64x2Z256rr }, |
7029 | { X86::VEXTRACTF64x2Z256mr,X86::VEXTRACTF64x2Z256mr,X86::VEXTRACTI64x2Z256mr }, |
7030 | { X86::VPERMILPSmi, X86::VPERMILPSmi, X86::VPSHUFDmi }, |
7031 | { X86::VPERMILPSri, X86::VPERMILPSri, X86::VPSHUFDri }, |
7032 | { X86::VPERMILPSZ128mi, X86::VPERMILPSZ128mi, X86::VPSHUFDZ128mi }, |
7033 | { X86::VPERMILPSZ128ri, X86::VPERMILPSZ128ri, X86::VPSHUFDZ128ri }, |
7034 | { X86::VPERMILPSZ256mi, X86::VPERMILPSZ256mi, X86::VPSHUFDZ256mi }, |
7035 | { X86::VPERMILPSZ256ri, X86::VPERMILPSZ256ri, X86::VPSHUFDZ256ri }, |
7036 | { X86::VPERMILPSZmi, X86::VPERMILPSZmi, X86::VPSHUFDZmi }, |
7037 | { X86::VPERMILPSZri, X86::VPERMILPSZri, X86::VPSHUFDZri }, |
7038 | { X86::VPERMPSZ256rm, X86::VPERMPSZ256rm, X86::VPERMDZ256rm }, |
7039 | { X86::VPERMPSZ256rr, X86::VPERMPSZ256rr, X86::VPERMDZ256rr }, |
7040 | { X86::VPERMPDZ256mi, X86::VPERMPDZ256mi, X86::VPERMQZ256mi }, |
7041 | { X86::VPERMPDZ256ri, X86::VPERMPDZ256ri, X86::VPERMQZ256ri }, |
7042 | { X86::VPERMPDZ256rm, X86::VPERMPDZ256rm, X86::VPERMQZ256rm }, |
7043 | { X86::VPERMPDZ256rr, X86::VPERMPDZ256rr, X86::VPERMQZ256rr }, |
7044 | { X86::VPERMPSZrm, X86::VPERMPSZrm, X86::VPERMDZrm }, |
7045 | { X86::VPERMPSZrr, X86::VPERMPSZrr, X86::VPERMDZrr }, |
7046 | { X86::VPERMPDZmi, X86::VPERMPDZmi, X86::VPERMQZmi }, |
7047 | { X86::VPERMPDZri, X86::VPERMPDZri, X86::VPERMQZri }, |
7048 | { X86::VPERMPDZrm, X86::VPERMPDZrm, X86::VPERMQZrm }, |
7049 | { X86::VPERMPDZrr, X86::VPERMPDZrr, X86::VPERMQZrr }, |
7050 | { X86::VUNPCKLPDZ256rm, X86::VUNPCKLPDZ256rm, X86::VPUNPCKLQDQZ256rm }, |
7051 | { X86::VUNPCKLPDZ256rr, X86::VUNPCKLPDZ256rr, X86::VPUNPCKLQDQZ256rr }, |
7052 | { X86::VUNPCKHPDZ256rm, X86::VUNPCKHPDZ256rm, X86::VPUNPCKHQDQZ256rm }, |
7053 | { X86::VUNPCKHPDZ256rr, X86::VUNPCKHPDZ256rr, X86::VPUNPCKHQDQZ256rr }, |
7054 | { X86::VUNPCKLPSZ256rm, X86::VUNPCKLPSZ256rm, X86::VPUNPCKLDQZ256rm }, |
7055 | { X86::VUNPCKLPSZ256rr, X86::VUNPCKLPSZ256rr, X86::VPUNPCKLDQZ256rr }, |
7056 | { X86::VUNPCKHPSZ256rm, X86::VUNPCKHPSZ256rm, X86::VPUNPCKHDQZ256rm }, |
7057 | { X86::VUNPCKHPSZ256rr, X86::VUNPCKHPSZ256rr, X86::VPUNPCKHDQZ256rr }, |
7058 | { X86::VUNPCKLPDZ128rm, X86::VUNPCKLPDZ128rm, X86::VPUNPCKLQDQZ128rm }, |
7059 | { X86::VMOVLHPSZrr, X86::VUNPCKLPDZ128rr, X86::VPUNPCKLQDQZ128rr }, |
7060 | { X86::VUNPCKHPDZ128rm, X86::VUNPCKHPDZ128rm, X86::VPUNPCKHQDQZ128rm }, |
7061 | { X86::VUNPCKHPDZ128rr, X86::VUNPCKHPDZ128rr, X86::VPUNPCKHQDQZ128rr }, |
7062 | { X86::VUNPCKLPSZ128rm, X86::VUNPCKLPSZ128rm, X86::VPUNPCKLDQZ128rm }, |
7063 | { X86::VUNPCKLPSZ128rr, X86::VUNPCKLPSZ128rr, X86::VPUNPCKLDQZ128rr }, |
7064 | { X86::VUNPCKHPSZ128rm, X86::VUNPCKHPSZ128rm, X86::VPUNPCKHDQZ128rm }, |
7065 | { X86::VUNPCKHPSZ128rr, X86::VUNPCKHPSZ128rr, X86::VPUNPCKHDQZ128rr }, |
7066 | { X86::VUNPCKLPDZrm, X86::VUNPCKLPDZrm, X86::VPUNPCKLQDQZrm }, |
7067 | { X86::VUNPCKLPDZrr, X86::VUNPCKLPDZrr, X86::VPUNPCKLQDQZrr }, |
7068 | { X86::VUNPCKHPDZrm, X86::VUNPCKHPDZrm, X86::VPUNPCKHQDQZrm }, |
7069 | { X86::VUNPCKHPDZrr, X86::VUNPCKHPDZrr, X86::VPUNPCKHQDQZrr }, |
7070 | { X86::VUNPCKLPSZrm, X86::VUNPCKLPSZrm, X86::VPUNPCKLDQZrm }, |
7071 | { X86::VUNPCKLPSZrr, X86::VUNPCKLPSZrr, X86::VPUNPCKLDQZrr }, |
7072 | { X86::VUNPCKHPSZrm, X86::VUNPCKHPSZrm, X86::VPUNPCKHDQZrm }, |
7073 | { X86::VUNPCKHPSZrr, X86::VUNPCKHPSZrr, X86::VPUNPCKHDQZrr }, |
7074 | { X86::VEXTRACTPSZmr, X86::VEXTRACTPSZmr, X86::VPEXTRDZmr }, |
7075 | { X86::VEXTRACTPSZrr, X86::VEXTRACTPSZrr, X86::VPEXTRDZrr }, |
7076 | }; |
7077 | |
7078 | static const uint16_t ReplaceableInstrsAVX2[][3] = { |
7079 | |
7080 | { X86::VANDNPSYrm, X86::VANDNPDYrm, X86::VPANDNYrm }, |
7081 | { X86::VANDNPSYrr, X86::VANDNPDYrr, X86::VPANDNYrr }, |
7082 | { X86::VANDPSYrm, X86::VANDPDYrm, X86::VPANDYrm }, |
7083 | { X86::VANDPSYrr, X86::VANDPDYrr, X86::VPANDYrr }, |
7084 | { X86::VORPSYrm, X86::VORPDYrm, X86::VPORYrm }, |
7085 | { X86::VORPSYrr, X86::VORPDYrr, X86::VPORYrr }, |
7086 | { X86::VXORPSYrm, X86::VXORPDYrm, X86::VPXORYrm }, |
7087 | { X86::VXORPSYrr, X86::VXORPDYrr, X86::VPXORYrr }, |
7088 | { X86::VPERM2F128rm, X86::VPERM2F128rm, X86::VPERM2I128rm }, |
7089 | { X86::VPERM2F128rr, X86::VPERM2F128rr, X86::VPERM2I128rr }, |
7090 | { X86::VBROADCASTSSrm, X86::VBROADCASTSSrm, X86::VPBROADCASTDrm}, |
7091 | { X86::VBROADCASTSSrr, X86::VBROADCASTSSrr, X86::VPBROADCASTDrr}, |
7092 | { X86::VMOVDDUPrm, X86::VMOVDDUPrm, X86::VPBROADCASTQrm}, |
7093 | { X86::VMOVDDUPrr, X86::VMOVDDUPrr, X86::VPBROADCASTQrr}, |
7094 | { X86::VBROADCASTSSYrr, X86::VBROADCASTSSYrr, X86::VPBROADCASTDYrr}, |
7095 | { X86::VBROADCASTSSYrm, X86::VBROADCASTSSYrm, X86::VPBROADCASTDYrm}, |
7096 | { X86::VBROADCASTSDYrr, X86::VBROADCASTSDYrr, X86::VPBROADCASTQYrr}, |
7097 | { X86::VBROADCASTSDYrm, X86::VBROADCASTSDYrm, X86::VPBROADCASTQYrm}, |
7098 | { X86::VBROADCASTF128, X86::VBROADCASTF128, X86::VBROADCASTI128 }, |
7099 | { X86::VBLENDPSYrri, X86::VBLENDPSYrri, X86::VPBLENDDYrri }, |
7100 | { X86::VBLENDPSYrmi, X86::VBLENDPSYrmi, X86::VPBLENDDYrmi }, |
7101 | { X86::VPERMILPSYmi, X86::VPERMILPSYmi, X86::VPSHUFDYmi }, |
7102 | { X86::VPERMILPSYri, X86::VPERMILPSYri, X86::VPSHUFDYri }, |
7103 | { X86::VUNPCKLPDYrm, X86::VUNPCKLPDYrm, X86::VPUNPCKLQDQYrm }, |
7104 | { X86::VUNPCKLPDYrr, X86::VUNPCKLPDYrr, X86::VPUNPCKLQDQYrr }, |
7105 | { X86::VUNPCKHPDYrm, X86::VUNPCKHPDYrm, X86::VPUNPCKHQDQYrm }, |
7106 | { X86::VUNPCKHPDYrr, X86::VUNPCKHPDYrr, X86::VPUNPCKHQDQYrr }, |
7107 | { X86::VUNPCKLPSYrm, X86::VUNPCKLPSYrm, X86::VPUNPCKLDQYrm }, |
7108 | { X86::VUNPCKLPSYrr, X86::VUNPCKLPSYrr, X86::VPUNPCKLDQYrr }, |
7109 | { X86::VUNPCKHPSYrm, X86::VUNPCKHPSYrm, X86::VPUNPCKHDQYrm }, |
7110 | { X86::VUNPCKHPSYrr, X86::VUNPCKHPSYrr, X86::VPUNPCKHDQYrr }, |
7111 | }; |
7112 | |
7113 | static const uint16_t ReplaceableInstrsFP[][3] = { |
7114 | |
7115 | { X86::MOVLPSrm, X86::MOVLPDrm, X86::INSTRUCTION_LIST_END }, |
7116 | { X86::MOVHPSrm, X86::MOVHPDrm, X86::INSTRUCTION_LIST_END }, |
7117 | { X86::MOVHPSmr, X86::MOVHPDmr, X86::INSTRUCTION_LIST_END }, |
7118 | { X86::VMOVLPSrm, X86::VMOVLPDrm, X86::INSTRUCTION_LIST_END }, |
7119 | { X86::VMOVHPSrm, X86::VMOVHPDrm, X86::INSTRUCTION_LIST_END }, |
7120 | { X86::VMOVHPSmr, X86::VMOVHPDmr, X86::INSTRUCTION_LIST_END }, |
7121 | { X86::VMOVLPSZ128rm, X86::VMOVLPDZ128rm, X86::INSTRUCTION_LIST_END }, |
7122 | { X86::VMOVHPSZ128rm, X86::VMOVHPDZ128rm, X86::INSTRUCTION_LIST_END }, |
7123 | { X86::VMOVHPSZ128mr, X86::VMOVHPDZ128mr, X86::INSTRUCTION_LIST_END }, |
7124 | }; |
7125 | |
7126 | static const uint16_t ReplaceableInstrsAVX2InsertExtract[][3] = { |
7127 | |
7128 | { X86::VEXTRACTF128mr, X86::VEXTRACTF128mr, X86::VEXTRACTI128mr }, |
7129 | { X86::VEXTRACTF128rr, X86::VEXTRACTF128rr, X86::VEXTRACTI128rr }, |
7130 | { X86::VINSERTF128rm, X86::VINSERTF128rm, X86::VINSERTI128rm }, |
7131 | { X86::VINSERTF128rr, X86::VINSERTF128rr, X86::VINSERTI128rr }, |
7132 | }; |
7133 | |
7134 | static const uint16_t ReplaceableInstrsAVX512[][4] = { |
7135 | |
7136 | |
7137 | { X86::VMOVAPSZ128mr, X86::VMOVAPDZ128mr, X86::VMOVDQA64Z128mr, X86::VMOVDQA32Z128mr }, |
7138 | { X86::VMOVAPSZ128rm, X86::VMOVAPDZ128rm, X86::VMOVDQA64Z128rm, X86::VMOVDQA32Z128rm }, |
7139 | { X86::VMOVAPSZ128rr, X86::VMOVAPDZ128rr, X86::VMOVDQA64Z128rr, X86::VMOVDQA32Z128rr }, |
7140 | { X86::VMOVUPSZ128mr, X86::VMOVUPDZ128mr, X86::VMOVDQU64Z128mr, X86::VMOVDQU32Z128mr }, |
7141 | { X86::VMOVUPSZ128rm, X86::VMOVUPDZ128rm, X86::VMOVDQU64Z128rm, X86::VMOVDQU32Z128rm }, |
7142 | { X86::VMOVAPSZ256mr, X86::VMOVAPDZ256mr, X86::VMOVDQA64Z256mr, X86::VMOVDQA32Z256mr }, |
7143 | { X86::VMOVAPSZ256rm, X86::VMOVAPDZ256rm, X86::VMOVDQA64Z256rm, X86::VMOVDQA32Z256rm }, |
7144 | { X86::VMOVAPSZ256rr, X86::VMOVAPDZ256rr, X86::VMOVDQA64Z256rr, X86::VMOVDQA32Z256rr }, |
7145 | { X86::VMOVUPSZ256mr, X86::VMOVUPDZ256mr, X86::VMOVDQU64Z256mr, X86::VMOVDQU32Z256mr }, |
7146 | { X86::VMOVUPSZ256rm, X86::VMOVUPDZ256rm, X86::VMOVDQU64Z256rm, X86::VMOVDQU32Z256rm }, |
7147 | { X86::VMOVAPSZmr, X86::VMOVAPDZmr, X86::VMOVDQA64Zmr, X86::VMOVDQA32Zmr }, |
7148 | { X86::VMOVAPSZrm, X86::VMOVAPDZrm, X86::VMOVDQA64Zrm, X86::VMOVDQA32Zrm }, |
7149 | { X86::VMOVAPSZrr, X86::VMOVAPDZrr, X86::VMOVDQA64Zrr, X86::VMOVDQA32Zrr }, |
7150 | { X86::VMOVUPSZmr, X86::VMOVUPDZmr, X86::VMOVDQU64Zmr, X86::VMOVDQU32Zmr }, |
7151 | { X86::VMOVUPSZrm, X86::VMOVUPDZrm, X86::VMOVDQU64Zrm, X86::VMOVDQU32Zrm }, |
7152 | }; |
7153 | |
7154 | static const uint16_t ReplaceableInstrsAVX512DQ[][4] = { |
7155 | |
7156 | |
7157 | { X86::VANDNPSZ128rm, X86::VANDNPDZ128rm, X86::VPANDNQZ128rm, X86::VPANDNDZ128rm }, |
7158 | { X86::VANDNPSZ128rr, X86::VANDNPDZ128rr, X86::VPANDNQZ128rr, X86::VPANDNDZ128rr }, |
7159 | { X86::VANDPSZ128rm, X86::VANDPDZ128rm, X86::VPANDQZ128rm, X86::VPANDDZ128rm }, |
7160 | { X86::VANDPSZ128rr, X86::VANDPDZ128rr, X86::VPANDQZ128rr, X86::VPANDDZ128rr }, |
7161 | { X86::VORPSZ128rm, X86::VORPDZ128rm, X86::VPORQZ128rm, X86::VPORDZ128rm }, |
7162 | { X86::VORPSZ128rr, X86::VORPDZ128rr, X86::VPORQZ128rr, X86::VPORDZ128rr }, |
7163 | { X86::VXORPSZ128rm, X86::VXORPDZ128rm, X86::VPXORQZ128rm, X86::VPXORDZ128rm }, |
7164 | { X86::VXORPSZ128rr, X86::VXORPDZ128rr, X86::VPXORQZ128rr, X86::VPXORDZ128rr }, |
7165 | { X86::VANDNPSZ256rm, X86::VANDNPDZ256rm, X86::VPANDNQZ256rm, X86::VPANDNDZ256rm }, |
7166 | { X86::VANDNPSZ256rr, X86::VANDNPDZ256rr, X86::VPANDNQZ256rr, X86::VPANDNDZ256rr }, |
7167 | { X86::VANDPSZ256rm, X86::VANDPDZ256rm, X86::VPANDQZ256rm, X86::VPANDDZ256rm }, |
7168 | { X86::VANDPSZ256rr, X86::VANDPDZ256rr, X86::VPANDQZ256rr, X86::VPANDDZ256rr }, |
7169 | { X86::VORPSZ256rm, X86::VORPDZ256rm, X86::VPORQZ256rm, X86::VPORDZ256rm }, |
7170 | { X86::VORPSZ256rr, X86::VORPDZ256rr, X86::VPORQZ256rr, X86::VPORDZ256rr }, |
7171 | { X86::VXORPSZ256rm, X86::VXORPDZ256rm, X86::VPXORQZ256rm, X86::VPXORDZ256rm }, |
7172 | { X86::VXORPSZ256rr, X86::VXORPDZ256rr, X86::VPXORQZ256rr, X86::VPXORDZ256rr }, |
7173 | { X86::VANDNPSZrm, X86::VANDNPDZrm, X86::VPANDNQZrm, X86::VPANDNDZrm }, |
7174 | { X86::VANDNPSZrr, X86::VANDNPDZrr, X86::VPANDNQZrr, X86::VPANDNDZrr }, |
7175 | { X86::VANDPSZrm, X86::VANDPDZrm, X86::VPANDQZrm, X86::VPANDDZrm }, |
7176 | { X86::VANDPSZrr, X86::VANDPDZrr, X86::VPANDQZrr, X86::VPANDDZrr }, |
7177 | { X86::VORPSZrm, X86::VORPDZrm, X86::VPORQZrm, X86::VPORDZrm }, |
7178 | { X86::VORPSZrr, X86::VORPDZrr, X86::VPORQZrr, X86::VPORDZrr }, |
7179 | { X86::VXORPSZrm, X86::VXORPDZrm, X86::VPXORQZrm, X86::VPXORDZrm }, |
7180 | { X86::VXORPSZrr, X86::VXORPDZrr, X86::VPXORQZrr, X86::VPXORDZrr }, |
7181 | }; |
7182 | |
7183 | static const uint16_t ReplaceableInstrsAVX512DQMasked[][4] = { |
7184 | |
7185 | |
7186 | |
7187 | { X86::VANDNPSZ128rmk, X86::VANDNPDZ128rmk, |
7188 | X86::VPANDNQZ128rmk, X86::VPANDNDZ128rmk }, |
7189 | { X86::VANDNPSZ128rmkz, X86::VANDNPDZ128rmkz, |
7190 | X86::VPANDNQZ128rmkz, X86::VPANDNDZ128rmkz }, |
7191 | { X86::VANDNPSZ128rrk, X86::VANDNPDZ128rrk, |
7192 | X86::VPANDNQZ128rrk, X86::VPANDNDZ128rrk }, |
7193 | { X86::VANDNPSZ128rrkz, X86::VANDNPDZ128rrkz, |
7194 | X86::VPANDNQZ128rrkz, X86::VPANDNDZ128rrkz }, |
7195 | { X86::VANDPSZ128rmk, X86::VANDPDZ128rmk, |
7196 | X86::VPANDQZ128rmk, X86::VPANDDZ128rmk }, |
7197 | { X86::VANDPSZ128rmkz, X86::VANDPDZ128rmkz, |
7198 | X86::VPANDQZ128rmkz, X86::VPANDDZ128rmkz }, |
7199 | { X86::VANDPSZ128rrk, X86::VANDPDZ128rrk, |
7200 | X86::VPANDQZ128rrk, X86::VPANDDZ128rrk }, |
7201 | { X86::VANDPSZ128rrkz, X86::VANDPDZ128rrkz, |
7202 | X86::VPANDQZ128rrkz, X86::VPANDDZ128rrkz }, |
7203 | { X86::VORPSZ128rmk, X86::VORPDZ128rmk, |
7204 | X86::VPORQZ128rmk, X86::VPORDZ128rmk }, |
7205 | { X86::VORPSZ128rmkz, X86::VORPDZ128rmkz, |
7206 | X86::VPORQZ128rmkz, X86::VPORDZ128rmkz }, |
7207 | { X86::VORPSZ128rrk, X86::VORPDZ128rrk, |
7208 | X86::VPORQZ128rrk, X86::VPORDZ128rrk }, |
7209 | { X86::VORPSZ128rrkz, X86::VORPDZ128rrkz, |
7210 | X86::VPORQZ128rrkz, X86::VPORDZ128rrkz }, |
7211 | { X86::VXORPSZ128rmk, X86::VXORPDZ128rmk, |
7212 | X86::VPXORQZ128rmk, X86::VPXORDZ128rmk }, |
7213 | { X86::VXORPSZ128rmkz, X86::VXORPDZ128rmkz, |
7214 | X86::VPXORQZ128rmkz, X86::VPXORDZ128rmkz }, |
7215 | { X86::VXORPSZ128rrk, X86::VXORPDZ128rrk, |
7216 | X86::VPXORQZ128rrk, X86::VPXORDZ128rrk }, |
7217 | { X86::VXORPSZ128rrkz, X86::VXORPDZ128rrkz, |
7218 | X86::VPXORQZ128rrkz, X86::VPXORDZ128rrkz }, |
7219 | { X86::VANDNPSZ256rmk, X86::VANDNPDZ256rmk, |
7220 | X86::VPANDNQZ256rmk, X86::VPANDNDZ256rmk }, |
7221 | { X86::VANDNPSZ256rmkz, X86::VANDNPDZ256rmkz, |
7222 | X86::VPANDNQZ256rmkz, X86::VPANDNDZ256rmkz }, |
7223 | { X86::VANDNPSZ256rrk, X86::VANDNPDZ256rrk, |
7224 | X86::VPANDNQZ256rrk, X86::VPANDNDZ256rrk }, |
7225 | { X86::VANDNPSZ256rrkz, X86::VANDNPDZ256rrkz, |
7226 | X86::VPANDNQZ256rrkz, X86::VPANDNDZ256rrkz }, |
7227 | { X86::VANDPSZ256rmk, X86::VANDPDZ256rmk, |
7228 | X86::VPANDQZ256rmk, X86::VPANDDZ256rmk }, |
7229 | { X86::VANDPSZ256rmkz, X86::VANDPDZ256rmkz, |
7230 | X86::VPANDQZ256rmkz, X86::VPANDDZ256rmkz }, |
7231 | { X86::VANDPSZ256rrk, X86::VANDPDZ256rrk, |
7232 | X86::VPANDQZ256rrk, X86::VPANDDZ256rrk }, |
7233 | { X86::VANDPSZ256rrkz, X86::VANDPDZ256rrkz, |
7234 | X86::VPANDQZ256rrkz, X86::VPANDDZ256rrkz }, |
7235 | { X86::VORPSZ256rmk, X86::VORPDZ256rmk, |
7236 | X86::VPORQZ256rmk, X86::VPORDZ256rmk }, |
7237 | { X86::VORPSZ256rmkz, X86::VORPDZ256rmkz, |
7238 | X86::VPORQZ256rmkz, X86::VPORDZ256rmkz }, |
7239 | { X86::VORPSZ256rrk, X86::VORPDZ256rrk, |
7240 | X86::VPORQZ256rrk, X86::VPORDZ256rrk }, |
7241 | { X86::VORPSZ256rrkz, X86::VORPDZ256rrkz, |
7242 | X86::VPORQZ256rrkz, X86::VPORDZ256rrkz }, |
7243 | { X86::VXORPSZ256rmk, X86::VXORPDZ256rmk, |
7244 | X86::VPXORQZ256rmk, X86::VPXORDZ256rmk }, |
7245 | { X86::VXORPSZ256rmkz, X86::VXORPDZ256rmkz, |
7246 | X86::VPXORQZ256rmkz, X86::VPXORDZ256rmkz }, |
7247 | { X86::VXORPSZ256rrk, X86::VXORPDZ256rrk, |
7248 | X86::VPXORQZ256rrk, X86::VPXORDZ256rrk }, |
7249 | { X86::VXORPSZ256rrkz, X86::VXORPDZ256rrkz, |
7250 | X86::VPXORQZ256rrkz, X86::VPXORDZ256rrkz }, |
7251 | { X86::VANDNPSZrmk, X86::VANDNPDZrmk, |
7252 | X86::VPANDNQZrmk, X86::VPANDNDZrmk }, |
7253 | { X86::VANDNPSZrmkz, X86::VANDNPDZrmkz, |
7254 | X86::VPANDNQZrmkz, X86::VPANDNDZrmkz }, |
7255 | { X86::VANDNPSZrrk, X86::VANDNPDZrrk, |
7256 | X86::VPANDNQZrrk, X86::VPANDNDZrrk }, |
7257 | { X86::VANDNPSZrrkz, X86::VANDNPDZrrkz, |
7258 | X86::VPANDNQZrrkz, X86::VPANDNDZrrkz }, |
7259 | { X86::VANDPSZrmk, X86::VANDPDZrmk, |
7260 | X86::VPANDQZrmk, X86::VPANDDZrmk }, |
7261 | { X86::VANDPSZrmkz, X86::VANDPDZrmkz, |
7262 | X86::VPANDQZrmkz, X86::VPANDDZrmkz }, |
7263 | { X86::VANDPSZrrk, X86::VANDPDZrrk, |
7264 | X86::VPANDQZrrk, X86::VPANDDZrrk }, |
7265 | { X86::VANDPSZrrkz, X86::VANDPDZrrkz, |
7266 | X86::VPANDQZrrkz, X86::VPANDDZrrkz }, |
7267 | { X86::VORPSZrmk, X86::VORPDZrmk, |
7268 | X86::VPORQZrmk, X86::VPORDZrmk }, |
7269 | { X86::VORPSZrmkz, X86::VORPDZrmkz, |
7270 | X86::VPORQZrmkz, X86::VPORDZrmkz }, |
7271 | { X86::VORPSZrrk, X86::VORPDZrrk, |
7272 | X86::VPORQZrrk, X86::VPORDZrrk }, |
7273 | { X86::VORPSZrrkz, X86::VORPDZrrkz, |
7274 | X86::VPORQZrrkz, X86::VPORDZrrkz }, |
7275 | { X86::VXORPSZrmk, X86::VXORPDZrmk, |
7276 | X86::VPXORQZrmk, X86::VPXORDZrmk }, |
7277 | { X86::VXORPSZrmkz, X86::VXORPDZrmkz, |
7278 | X86::VPXORQZrmkz, X86::VPXORDZrmkz }, |
7279 | { X86::VXORPSZrrk, X86::VXORPDZrrk, |
7280 | X86::VPXORQZrrk, X86::VPXORDZrrk }, |
7281 | { X86::VXORPSZrrkz, X86::VXORPDZrrkz, |
7282 | X86::VPXORQZrrkz, X86::VPXORDZrrkz }, |
7283 | |
7284 | |
7285 | { X86::VANDNPSZ128rmb, X86::VANDNPDZ128rmb, |
7286 | X86::VPANDNQZ128rmb, X86::VPANDNDZ128rmb }, |
7287 | { X86::VANDPSZ128rmb, X86::VANDPDZ128rmb, |
7288 | X86::VPANDQZ128rmb, X86::VPANDDZ128rmb }, |
7289 | { X86::VORPSZ128rmb, X86::VORPDZ128rmb, |
7290 | X86::VPORQZ128rmb, X86::VPORDZ128rmb }, |
7291 | { X86::VXORPSZ128rmb, X86::VXORPDZ128rmb, |
7292 | X86::VPXORQZ128rmb, X86::VPXORDZ128rmb }, |
7293 | { X86::VANDNPSZ256rmb, X86::VANDNPDZ256rmb, |
7294 | X86::VPANDNQZ256rmb, X86::VPANDNDZ256rmb }, |
7295 | { X86::VANDPSZ256rmb, X86::VANDPDZ256rmb, |
7296 | X86::VPANDQZ256rmb, X86::VPANDDZ256rmb }, |
7297 | { X86::VORPSZ256rmb, X86::VORPDZ256rmb, |
7298 | X86::VPORQZ256rmb, X86::VPORDZ256rmb }, |
7299 | { X86::VXORPSZ256rmb, X86::VXORPDZ256rmb, |
7300 | X86::VPXORQZ256rmb, X86::VPXORDZ256rmb }, |
7301 | { X86::VANDNPSZrmb, X86::VANDNPDZrmb, |
7302 | X86::VPANDNQZrmb, X86::VPANDNDZrmb }, |
7303 | { X86::VANDPSZrmb, X86::VANDPDZrmb, |
7304 | X86::VPANDQZrmb, X86::VPANDDZrmb }, |
7305 | { X86::VANDPSZrmb, X86::VANDPDZrmb, |
7306 | X86::VPANDQZrmb, X86::VPANDDZrmb }, |
7307 | { X86::VORPSZrmb, X86::VORPDZrmb, |
7308 | X86::VPORQZrmb, X86::VPORDZrmb }, |
7309 | { X86::VXORPSZrmb, X86::VXORPDZrmb, |
7310 | X86::VPXORQZrmb, X86::VPXORDZrmb }, |
7311 | { X86::VANDNPSZ128rmbk, X86::VANDNPDZ128rmbk, |
7312 | X86::VPANDNQZ128rmbk, X86::VPANDNDZ128rmbk }, |
7313 | { X86::VANDPSZ128rmbk, X86::VANDPDZ128rmbk, |
7314 | X86::VPANDQZ128rmbk, X86::VPANDDZ128rmbk }, |
7315 | { X86::VORPSZ128rmbk, X86::VORPDZ128rmbk, |
7316 | X86::VPORQZ128rmbk, X86::VPORDZ128rmbk }, |
7317 | { X86::VXORPSZ128rmbk, X86::VXORPDZ128rmbk, |
7318 | X86::VPXORQZ128rmbk, X86::VPXORDZ128rmbk }, |
7319 | { X86::VANDNPSZ256rmbk, X86::VANDNPDZ256rmbk, |
7320 | X86::VPANDNQZ256rmbk, X86::VPANDNDZ256rmbk }, |
7321 | { X86::VANDPSZ256rmbk, X86::VANDPDZ256rmbk, |
7322 | X86::VPANDQZ256rmbk, X86::VPANDDZ256rmbk }, |
7323 | { X86::VORPSZ256rmbk, X86::VORPDZ256rmbk, |
7324 | X86::VPORQZ256rmbk, X86::VPORDZ256rmbk }, |
7325 | { X86::VXORPSZ256rmbk, X86::VXORPDZ256rmbk, |
7326 | X86::VPXORQZ256rmbk, X86::VPXORDZ256rmbk }, |
7327 | { X86::VANDNPSZrmbk, X86::VANDNPDZrmbk, |
7328 | X86::VPANDNQZrmbk, X86::VPANDNDZrmbk }, |
7329 | { X86::VANDPSZrmbk, X86::VANDPDZrmbk, |
7330 | X86::VPANDQZrmbk, X86::VPANDDZrmbk }, |
7331 | { X86::VANDPSZrmbk, X86::VANDPDZrmbk, |
7332 | X86::VPANDQZrmbk, X86::VPANDDZrmbk }, |
7333 | { X86::VORPSZrmbk, X86::VORPDZrmbk, |
7334 | X86::VPORQZrmbk, X86::VPORDZrmbk }, |
7335 | { X86::VXORPSZrmbk, X86::VXORPDZrmbk, |
7336 | X86::VPXORQZrmbk, X86::VPXORDZrmbk }, |
7337 | { X86::VANDNPSZ128rmbkz,X86::VANDNPDZ128rmbkz, |
7338 | X86::VPANDNQZ128rmbkz,X86::VPANDNDZ128rmbkz}, |
7339 | { X86::VANDPSZ128rmbkz, X86::VANDPDZ128rmbkz, |
7340 | X86::VPANDQZ128rmbkz, X86::VPANDDZ128rmbkz }, |
7341 | { X86::VORPSZ128rmbkz, X86::VORPDZ128rmbkz, |
7342 | X86::VPORQZ128rmbkz, X86::VPORDZ128rmbkz }, |
7343 | { X86::VXORPSZ128rmbkz, X86::VXORPDZ128rmbkz, |
7344 | X86::VPXORQZ128rmbkz, X86::VPXORDZ128rmbkz }, |
7345 | { X86::VANDNPSZ256rmbkz,X86::VANDNPDZ256rmbkz, |
7346 | X86::VPANDNQZ256rmbkz,X86::VPANDNDZ256rmbkz}, |
7347 | { X86::VANDPSZ256rmbkz, X86::VANDPDZ256rmbkz, |
7348 | X86::VPANDQZ256rmbkz, X86::VPANDDZ256rmbkz }, |
7349 | { X86::VORPSZ256rmbkz, X86::VORPDZ256rmbkz, |
7350 | X86::VPORQZ256rmbkz, X86::VPORDZ256rmbkz }, |
7351 | { X86::VXORPSZ256rmbkz, X86::VXORPDZ256rmbkz, |
7352 | X86::VPXORQZ256rmbkz, X86::VPXORDZ256rmbkz }, |
7353 | { X86::VANDNPSZrmbkz, X86::VANDNPDZrmbkz, |
7354 | X86::VPANDNQZrmbkz, X86::VPANDNDZrmbkz }, |
7355 | { X86::VANDPSZrmbkz, X86::VANDPDZrmbkz, |
7356 | X86::VPANDQZrmbkz, X86::VPANDDZrmbkz }, |
7357 | { X86::VANDPSZrmbkz, X86::VANDPDZrmbkz, |
7358 | X86::VPANDQZrmbkz, X86::VPANDDZrmbkz }, |
7359 | { X86::VORPSZrmbkz, X86::VORPDZrmbkz, |
7360 | X86::VPORQZrmbkz, X86::VPORDZrmbkz }, |
7361 | { X86::VXORPSZrmbkz, X86::VXORPDZrmbkz, |
7362 | X86::VPXORQZrmbkz, X86::VPXORDZrmbkz }, |
7363 | }; |
7364 | |
7365 | |
7366 | static const uint16_t ReplaceableBlendInstrs[][3] = { |
7367 | |
7368 | { X86::BLENDPSrmi, X86::BLENDPDrmi, X86::PBLENDWrmi }, |
7369 | { X86::BLENDPSrri, X86::BLENDPDrri, X86::PBLENDWrri }, |
7370 | { X86::VBLENDPSrmi, X86::VBLENDPDrmi, X86::VPBLENDWrmi }, |
7371 | { X86::VBLENDPSrri, X86::VBLENDPDrri, X86::VPBLENDWrri }, |
7372 | { X86::VBLENDPSYrmi, X86::VBLENDPDYrmi, X86::VPBLENDWYrmi }, |
7373 | { X86::VBLENDPSYrri, X86::VBLENDPDYrri, X86::VPBLENDWYrri }, |
7374 | }; |
7375 | static const uint16_t ReplaceableBlendAVX2Instrs[][3] = { |
7376 | |
7377 | { X86::VBLENDPSrmi, X86::VBLENDPDrmi, X86::VPBLENDDrmi }, |
7378 | { X86::VBLENDPSrri, X86::VBLENDPDrri, X86::VPBLENDDrri }, |
7379 | { X86::VBLENDPSYrmi, X86::VBLENDPDYrmi, X86::VPBLENDDYrmi }, |
7380 | { X86::VBLENDPSYrri, X86::VBLENDPDYrri, X86::VPBLENDDYrri }, |
7381 | }; |
7382 | |
7383 | |
7384 | |
7385 | static const uint16_t ReplaceableCustomAVX512LogicInstrs[][4] = { |
7386 | |
7387 | |
7388 | { X86::VANDNPSrm, X86::VANDNPDrm, X86::VPANDNQZ128rm, X86::VPANDNDZ128rm }, |
7389 | { X86::VANDNPSrr, X86::VANDNPDrr, X86::VPANDNQZ128rr, X86::VPANDNDZ128rr }, |
7390 | { X86::VANDPSrm, X86::VANDPDrm, X86::VPANDQZ128rm, X86::VPANDDZ128rm }, |
7391 | { X86::VANDPSrr, X86::VANDPDrr, X86::VPANDQZ128rr, X86::VPANDDZ128rr }, |
7392 | { X86::VORPSrm, X86::VORPDrm, X86::VPORQZ128rm, X86::VPORDZ128rm }, |
7393 | { X86::VORPSrr, X86::VORPDrr, X86::VPORQZ128rr, X86::VPORDZ128rr }, |
7394 | { X86::VXORPSrm, X86::VXORPDrm, X86::VPXORQZ128rm, X86::VPXORDZ128rm }, |
7395 | { X86::VXORPSrr, X86::VXORPDrr, X86::VPXORQZ128rr, X86::VPXORDZ128rr }, |
7396 | { X86::VANDNPSYrm, X86::VANDNPDYrm, X86::VPANDNQZ256rm, X86::VPANDNDZ256rm }, |
7397 | { X86::VANDNPSYrr, X86::VANDNPDYrr, X86::VPANDNQZ256rr, X86::VPANDNDZ256rr }, |
7398 | { X86::VANDPSYrm, X86::VANDPDYrm, X86::VPANDQZ256rm, X86::VPANDDZ256rm }, |
7399 | { X86::VANDPSYrr, X86::VANDPDYrr, X86::VPANDQZ256rr, X86::VPANDDZ256rr }, |
7400 | { X86::VORPSYrm, X86::VORPDYrm, X86::VPORQZ256rm, X86::VPORDZ256rm }, |
7401 | { X86::VORPSYrr, X86::VORPDYrr, X86::VPORQZ256rr, X86::VPORDZ256rr }, |
7402 | { X86::VXORPSYrm, X86::VXORPDYrm, X86::VPXORQZ256rm, X86::VPXORDZ256rm }, |
7403 | { X86::VXORPSYrr, X86::VXORPDYrr, X86::VPXORQZ256rr, X86::VPXORDZ256rr }, |
7404 | }; |
7405 | |
7406 | |
7407 | |
7408 | |
7409 | static const uint16_t *lookup(unsigned opcode, unsigned domain, |
7410 | ArrayRef<uint16_t[3]> Table) { |
7411 | for (const uint16_t (&Row)[3] : Table) |
7412 | if (Row[domain-1] == opcode) |
7413 | return Row; |
7414 | return nullptr; |
7415 | } |
7416 | |
7417 | static const uint16_t *lookupAVX512(unsigned opcode, unsigned domain, |
7418 | ArrayRef<uint16_t[4]> Table) { |
7419 | |
7420 | for (const uint16_t (&Row)[4] : Table) |
7421 | if (Row[domain-1] == opcode || (domain == 3 && Row[3] == opcode)) |
7422 | return Row; |
7423 | return nullptr; |
7424 | } |
7425 | |
7426 | |
7427 | static bool AdjustBlendMask(unsigned OldMask, unsigned OldWidth, |
7428 | unsigned NewWidth, unsigned *pNewMask = nullptr) { |
7429 | assert(((OldWidth % NewWidth) == 0 || (NewWidth % OldWidth) == 0) && |
7430 | "Illegal blend mask scale"); |
7431 | unsigned NewMask = 0; |
7432 | |
7433 | if ((OldWidth % NewWidth) == 0) { |
7434 | unsigned Scale = OldWidth / NewWidth; |
7435 | unsigned SubMask = (1u << Scale) - 1; |
7436 | for (unsigned i = 0; i != NewWidth; ++i) { |
7437 | unsigned Sub = (OldMask >> (i * Scale)) & SubMask; |
7438 | if (Sub == SubMask) |
7439 | NewMask |= (1u << i); |
7440 | else if (Sub != 0x0) |
7441 | return false; |
7442 | } |
7443 | } else { |
7444 | unsigned Scale = NewWidth / OldWidth; |
7445 | unsigned SubMask = (1u << Scale) - 1; |
7446 | for (unsigned i = 0; i != OldWidth; ++i) { |
7447 | if (OldMask & (1 << i)) { |
7448 | NewMask |= (SubMask << (i * Scale)); |
7449 | } |
7450 | } |
7451 | } |
7452 | |
7453 | if (pNewMask) |
7454 | *pNewMask = NewMask; |
7455 | return true; |
7456 | } |
7457 | |
7458 | uint16_t X86InstrInfo::getExecutionDomainCustom(const MachineInstr &MI) const { |
7459 | unsigned Opcode = MI.getOpcode(); |
7460 | unsigned NumOperands = MI.getDesc().getNumOperands(); |
7461 | |
7462 | auto GetBlendDomains = [&](unsigned ImmWidth, bool Is256) { |
7463 | uint16_t validDomains = 0; |
7464 | if (MI.getOperand(NumOperands - 1).isImm()) { |
7465 | unsigned Imm = MI.getOperand(NumOperands - 1).getImm(); |
7466 | if (AdjustBlendMask(Imm, ImmWidth, Is256 ? 8 : 4)) |
7467 | validDomains |= 0x2; |
7468 | if (AdjustBlendMask(Imm, ImmWidth, Is256 ? 4 : 2)) |
7469 | validDomains |= 0x4; |
7470 | if (!Is256 || Subtarget.hasAVX2()) |
7471 | validDomains |= 0x8; |
7472 | } |
7473 | return validDomains; |
7474 | }; |
7475 | |
7476 | switch (Opcode) { |
7477 | case X86::BLENDPDrmi: |
7478 | case X86::BLENDPDrri: |
7479 | case X86::VBLENDPDrmi: |
7480 | case X86::VBLENDPDrri: |
7481 | return GetBlendDomains(2, false); |
7482 | case X86::VBLENDPDYrmi: |
7483 | case X86::VBLENDPDYrri: |
7484 | return GetBlendDomains(4, true); |
7485 | case X86::BLENDPSrmi: |
7486 | case X86::BLENDPSrri: |
7487 | case X86::VBLENDPSrmi: |
7488 | case X86::VBLENDPSrri: |
7489 | case X86::VPBLENDDrmi: |
7490 | case X86::VPBLENDDrri: |
7491 | return GetBlendDomains(4, false); |
7492 | case X86::VBLENDPSYrmi: |
7493 | case X86::VBLENDPSYrri: |
7494 | case X86::VPBLENDDYrmi: |
7495 | case X86::VPBLENDDYrri: |
7496 | return GetBlendDomains(8, true); |
7497 | case X86::PBLENDWrmi: |
7498 | case X86::PBLENDWrri: |
7499 | case X86::VPBLENDWrmi: |
7500 | case X86::VPBLENDWrri: |
7501 | |
7502 | case X86::VPBLENDWYrmi: |
7503 | case X86::VPBLENDWYrri: |
7504 | return GetBlendDomains(8, false); |
7505 | case X86::VPANDDZ128rr: case X86::VPANDDZ128rm: |
7506 | case X86::VPANDDZ256rr: case X86::VPANDDZ256rm: |
7507 | case X86::VPANDQZ128rr: case X86::VPANDQZ128rm: |
7508 | case X86::VPANDQZ256rr: case X86::VPANDQZ256rm: |
7509 | case X86::VPANDNDZ128rr: case X86::VPANDNDZ128rm: |
7510 | case X86::VPANDNDZ256rr: case X86::VPANDNDZ256rm: |
7511 | case X86::VPANDNQZ128rr: case X86::VPANDNQZ128rm: |
7512 | case X86::VPANDNQZ256rr: case X86::VPANDNQZ256rm: |
7513 | case X86::VPORDZ128rr: case X86::VPORDZ128rm: |
7514 | case X86::VPORDZ256rr: case X86::VPORDZ256rm: |
7515 | case X86::VPORQZ128rr: case X86::VPORQZ128rm: |
7516 | case X86::VPORQZ256rr: case X86::VPORQZ256rm: |
7517 | case X86::VPXORDZ128rr: case X86::VPXORDZ128rm: |
7518 | case X86::VPXORDZ256rr: case X86::VPXORDZ256rm: |
7519 | case X86::VPXORQZ128rr: case X86::VPXORQZ128rm: |
7520 | case X86::VPXORQZ256rr: case X86::VPXORQZ256rm: |
7521 | |
7522 | |
7523 | if (Subtarget.hasDQI()) |
7524 | return 0; |
7525 | |
7526 | if (RI.getEncodingValue(MI.getOperand(0).getReg()) >= 16) |
7527 | return 0; |
7528 | if (RI.getEncodingValue(MI.getOperand(1).getReg()) >= 16) |
7529 | return 0; |
7530 | |
7531 | if (NumOperands == 3 && |
7532 | RI.getEncodingValue(MI.getOperand(2).getReg()) >= 16) |
7533 | return 0; |
7534 | |
7535 | |
7536 | return 0xe; |
7537 | case X86::MOVHLPSrr: |
7538 | |
7539 | |
7540 | |
7541 | |
7542 | |
7543 | |
7544 | if (MI.getOperand(1).getReg() == MI.getOperand(2).getReg() && |
7545 | MI.getOperand(0).getSubReg() == 0 && |
7546 | MI.getOperand(1).getSubReg() == 0 && |
7547 | MI.getOperand(2).getSubReg() == 0) |
7548 | return 0x6; |
7549 | return 0; |
7550 | case X86::SHUFPDrri: |
7551 | return 0x6; |
7552 | } |
7553 | return 0; |
7554 | } |
7555 | |
7556 | bool X86InstrInfo::setExecutionDomainCustom(MachineInstr &MI, |
7557 | unsigned Domain) const { |
7558 | assert(Domain > 0 && Domain < 4 && "Invalid execution domain"); |
7559 | uint16_t dom = (MI.getDesc().TSFlags >> X86II::SSEDomainShift) & 3; |
7560 | assert(dom && "Not an SSE instruction"); |
7561 | |
7562 | unsigned Opcode = MI.getOpcode(); |
7563 | unsigned NumOperands = MI.getDesc().getNumOperands(); |
7564 | |
7565 | auto SetBlendDomain = [&](unsigned ImmWidth, bool Is256) { |
7566 | if (MI.getOperand(NumOperands - 1).isImm()) { |
7567 | unsigned Imm = MI.getOperand(NumOperands - 1).getImm() & 255; |
7568 | Imm = (ImmWidth == 16 ? ((Imm << 8) | Imm) : Imm); |
7569 | unsigned NewImm = Imm; |
7570 | |
7571 | const uint16_t *table = lookup(Opcode, dom, ReplaceableBlendInstrs); |
7572 | if (!table) |
7573 | table = lookup(Opcode, dom, ReplaceableBlendAVX2Instrs); |
7574 | |
7575 | if (Domain == 1) { |
7576 | AdjustBlendMask(Imm, ImmWidth, Is256 ? 8 : 4, &NewImm); |
7577 | } else if (Domain == 2) { |
7578 | AdjustBlendMask(Imm, ImmWidth, Is256 ? 4 : 2, &NewImm); |
7579 | } else if (Domain == 3) { |
7580 | if (Subtarget.hasAVX2()) { |
7581 | |
7582 | if ((ImmWidth / (Is256 ? 2 : 1)) != 8) { |
7583 | table = lookup(Opcode, dom, ReplaceableBlendAVX2Instrs); |
7584 | AdjustBlendMask(Imm, ImmWidth, Is256 ? 8 : 4, &NewImm); |
7585 | } |
7586 | } else { |
7587 | assert(!Is256 && "128-bit vector expected"); |
7588 | AdjustBlendMask(Imm, ImmWidth, 8, &NewImm); |
7589 | } |
7590 | } |
7591 | |
7592 | assert(table && table[Domain - 1] && "Unknown domain op"); |
7593 | MI.setDesc(get(table[Domain - 1])); |
7594 | MI.getOperand(NumOperands - 1).setImm(NewImm & 255); |
7595 | } |
7596 | return true; |
7597 | }; |
7598 | |
7599 | switch (Opcode) { |
| 2 | | Control jumps to 'case UNPCKHPDrr:' at line 7658 | |
|
7600 | case X86::BLENDPDrmi: |
7601 | case X86::BLENDPDrri: |
7602 | case X86::VBLENDPDrmi: |
7603 | case X86::VBLENDPDrri: |
7604 | return SetBlendDomain(2, false); |
7605 | case X86::VBLENDPDYrmi: |
7606 | case X86::VBLENDPDYrri: |
7607 | return SetBlendDomain(4, true); |
7608 | case X86::BLENDPSrmi: |
7609 | case X86::BLENDPSrri: |
7610 | case X86::VBLENDPSrmi: |
7611 | case X86::VBLENDPSrri: |
7612 | case X86::VPBLENDDrmi: |
7613 | case X86::VPBLENDDrri: |
7614 | return SetBlendDomain(4, false); |
7615 | case X86::VBLENDPSYrmi: |
7616 | case X86::VBLENDPSYrri: |
7617 | case X86::VPBLENDDYrmi: |
7618 | case X86::VPBLENDDYrri: |
7619 | return SetBlendDomain(8, true); |
7620 | case X86::PBLENDWrmi: |
7621 | case X86::PBLENDWrri: |
7622 | case X86::VPBLENDWrmi: |
7623 | case X86::VPBLENDWrri: |
7624 | return SetBlendDomain(8, false); |
7625 | case X86::VPBLENDWYrmi: |
7626 | case X86::VPBLENDWYrri: |
7627 | return SetBlendDomain(16, true); |
7628 | case X86::VPANDDZ128rr: case X86::VPANDDZ128rm: |
7629 | case X86::VPANDDZ256rr: case X86::VPANDDZ256rm: |
7630 | case X86::VPANDQZ128rr: case X86::VPANDQZ128rm: |
7631 | case X86::VPANDQZ256rr: case X86::VPANDQZ256rm: |
7632 | case X86::VPANDNDZ128rr: case X86::VPANDNDZ128rm: |
7633 | case X86::VPANDNDZ256rr: case X86::VPANDNDZ256rm: |
7634 | case X86::VPANDNQZ128rr: case X86::VPANDNQZ128rm: |
7635 | case X86::VPANDNQZ256rr: case X86::VPANDNQZ256rm: |
7636 | case X86::VPORDZ128rr: case X86::VPORDZ128rm: |
7637 | case X86::VPORDZ256rr: case X86::VPORDZ256rm: |
7638 | case X86::VPORQZ128rr: case X86::VPORQZ128rm: |
7639 | case X86::VPORQZ256rr: case X86::VPORQZ256rm: |
7640 | case X86::VPXORDZ128rr: case X86::VPXORDZ128rm: |
7641 | case X86::VPXORDZ256rr: case X86::VPXORDZ256rm: |
7642 | case X86::VPXORQZ128rr: case X86::VPXORQZ128rm: |
7643 | case X86::VPXORQZ256rr: case X86::VPXORQZ256rm: { |
7644 | |
7645 | if (Subtarget.hasDQI()) |
7646 | return false; |
7647 | |
7648 | const uint16_t *table = lookupAVX512(MI.getOpcode(), dom, |
7649 | ReplaceableCustomAVX512LogicInstrs); |
7650 | assert(table && "Instruction not found in table?"); |
7651 | |
7652 | |
7653 | if (Domain == 3 && (dom == 1 || table[3] == MI.getOpcode())) |
7654 | Domain = 4; |
7655 | MI.setDesc(get(table[Domain - 1])); |
7656 | return true; |
7657 | } |
7658 | case X86::UNPCKHPDrr: |
7659 | case X86::MOVHLPSrr: |
7660 | |
7661 | if (Domain != dom && Domain != 3 && |
| 3 | | Assuming 'Domain' is equal to 'dom' | |
|
| |
7662 | MI.getOperand(1).getReg() == MI.getOperand(2).getReg() && |
7663 | MI.getOperand(0).getSubReg() == 0 && |
7664 | MI.getOperand(1).getSubReg() == 0 && |
7665 | MI.getOperand(2).getSubReg() == 0) { |
7666 | commuteInstruction(MI, false); |
7667 | return true; |
7668 | } |
7669 | |
7670 | if (Opcode == X86::MOVHLPSrr) |
| |
7671 | return true; |
7672 | break; |
| 6 | | Execution continues on line 7685 | |
|
7673 | case X86::SHUFPDrri: { |
7674 | if (Domain == 1) { |
7675 | unsigned Imm = MI.getOperand(3).getImm(); |
7676 | unsigned NewImm = 0x44; |
7677 | if (Imm & 1) NewImm |= 0x0a; |
7678 | if (Imm & 2) NewImm |= 0xa0; |
7679 | MI.getOperand(3).setImm(NewImm); |
7680 | MI.setDesc(get(X86::SHUFPSrri)); |
7681 | } |
7682 | return true; |
7683 | } |
7684 | } |
7685 | return false; |
| 7 | | Returning zero, which participates in a condition later | |
|
7686 | } |
7687 | |
7688 | std::pair<uint16_t, uint16_t> |
7689 | X86InstrInfo::getExecutionDomain(const MachineInstr &MI) const { |
7690 | uint16_t domain = (MI.getDesc().TSFlags >> X86II::SSEDomainShift) & 3; |
7691 | unsigned opcode = MI.getOpcode(); |
7692 | uint16_t validDomains = 0; |
7693 | if (domain) { |
7694 | |
7695 | validDomains = getExecutionDomainCustom(MI); |
7696 | if (validDomains) |
7697 | return std::make_pair(domain, validDomains); |
7698 | |
7699 | if (lookup(opcode, domain, ReplaceableInstrs)) { |
7700 | validDomains = 0xe; |
7701 | } else if (lookup(opcode, domain, ReplaceableInstrsAVX2)) { |
7702 | validDomains = Subtarget.hasAVX2() ? 0xe : 0x6; |
7703 | } else if (lookup(opcode, domain, ReplaceableInstrsFP)) { |
7704 | validDomains = 0x6; |
7705 | } else if (lookup(opcode, domain, ReplaceableInstrsAVX2InsertExtract)) { |
7706 | |
7707 | |
7708 | if (!Subtarget.hasAVX2()) |
7709 | return std::make_pair(0, 0); |
7710 | validDomains = 0xe; |
7711 | } else if (lookupAVX512(opcode, domain, ReplaceableInstrsAVX512)) { |
7712 | validDomains = 0xe; |
7713 | } else if (Subtarget.hasDQI() && lookupAVX512(opcode, domain, |
7714 | ReplaceableInstrsAVX512DQ)) { |
7715 | validDomains = 0xe; |
7716 | } else if (Subtarget.hasDQI()) { |
7717 | if (const uint16_t *table = lookupAVX512(opcode, domain, |
7718 | ReplaceableInstrsAVX512DQMasked)) { |
7719 | if (domain == 1 || (domain == 3 && table[3] == opcode)) |
7720 | validDomains = 0xa; |
7721 | else |
7722 | validDomains = 0xc; |
7723 | } |
7724 | } |
7725 | } |
7726 | return std::make_pair(domain, validDomains); |
7727 | } |
7728 | |
7729 | void X86InstrInfo::setExecutionDomain(MachineInstr &MI, unsigned Domain) const { |
7730 | assert(Domain>0 && Domain<4 && "Invalid execution domain"); |
7731 | uint16_t dom = (MI.getDesc().TSFlags >> X86II::SSEDomainShift) & 3; |
7732 | assert(dom && "Not an SSE instruction"); |
7733 | |
7734 | |
7735 | if (setExecutionDomainCustom(MI, Domain)) |
| 1 | Calling 'X86InstrInfo::setExecutionDomainCustom' | |
|
| 8 | | Returning from 'X86InstrInfo::setExecutionDomainCustom' | |
|
| |
7736 | return; |
7737 | |
7738 | const uint16_t *table = lookup(MI.getOpcode(), dom, ReplaceableInstrs); |
7739 | if (!table) { |
| 10 | | Assuming 'table' is null | |
|
| |
7740 | assert((Subtarget.hasAVX2() || Domain < 3) && |
7741 | "256-bit vector operations only available in AVX2"); |
7742 | table = lookup(MI.getOpcode(), dom, ReplaceableInstrsAVX2); |
7743 | } |
7744 | if (!table) { |
| 12 | | Assuming 'table' is null | |
|
| |
7745 | table = lookup(MI.getOpcode(), dom, ReplaceableInstrsFP); |
7746 | assert((!table || Domain < 3) && |
7747 | "Can only select PackedSingle or PackedDouble"); |
7748 | } |
7749 | if (!table) { |
| 14 | | Assuming 'table' is null | |
|
| |
7750 | assert(Subtarget.hasAVX2() && |
7751 | "256-bit insert/extract only available in AVX2"); |
7752 | table = lookup(MI.getOpcode(), dom, ReplaceableInstrsAVX2InsertExtract); |
7753 | } |
7754 | if (!table) { |
| 16 | | Assuming 'table' is null | |
|
| |
7755 | assert(Subtarget.hasAVX512() && "Requires AVX-512"); |
7756 | table = lookupAVX512(MI.getOpcode(), dom, ReplaceableInstrsAVX512); |
7757 | |
7758 | if (table && Domain == 3 && table[3] == MI.getOpcode()) |
| 18 | | Assuming 'table' is null | |
|
7759 | Domain = 4; |
7760 | } |
7761 | if (!table) { |
| |
7762 | assert((Subtarget.hasDQI() || Domain >= 3) && "Requires AVX-512DQ"); |
7763 | table = lookupAVX512(MI.getOpcode(), dom, ReplaceableInstrsAVX512DQ); |
7764 | |
7765 | |
7766 | if (table && Domain == 3 && (dom == 1 || table[3] == MI.getOpcode())) |
| 20 | | Assuming 'table' is null | |
|
7767 | Domain = 4; |
7768 | } |
7769 | if (!table) { |
| |
7770 | assert((Subtarget.hasDQI() || Domain >= 3) && "Requires AVX-512DQ"); |
7771 | table = lookupAVX512(MI.getOpcode(), dom, ReplaceableInstrsAVX512DQMasked); |
| 22 | | Value assigned to 'table' | |
|
7772 | if (table && Domain == 3 && (dom == 1 || table[3] == MI.getOpcode())) |
| 23 | | Assuming 'table' is null | |
|
7773 | Domain = 4; |
7774 | } |
7775 | assert(table && "Cannot change domain"); |
7776 | MI.setDesc(get(table[Domain - 1])); |
| 24 | | Array access (from variable 'table') results in a null pointer dereference |
|
7777 | } |
7778 | |
7779 | |
7780 | MCInst X86InstrInfo::getNop() const { |
7781 | MCInst Nop; |
7782 | Nop.setOpcode(X86::NOOP); |
7783 | return Nop; |
7784 | } |
7785 | |
7786 | bool X86InstrInfo::isHighLatencyDef(int opc) const { |
7787 | switch (opc) { |
7788 | default: return false; |
7789 | case X86::DIVPDrm: |
7790 | case X86::DIVPDrr: |
7791 | case X86::DIVPSrm: |
7792 | case X86::DIVPSrr: |
7793 | case X86::DIVSDrm: |
7794 | case X86::DIVSDrm_Int: |
7795 | case X86::DIVSDrr: |
7796 | case X86::DIVSDrr_Int: |
7797 | case X86::DIVSSrm: |
7798 | case X86::DIVSSrm_Int: |
7799 | case X86::DIVSSrr: |
7800 | case X86::DIVSSrr_Int: |
7801 | case X86::SQRTPDm: |
7802 | case X86::SQRTPDr: |
7803 | case X86::SQRTPSm: |
7804 | case X86::SQRTPSr: |
7805 | case X86::SQRTSDm: |
7806 | case X86::SQRTSDm_Int: |
7807 | case X86::SQRTSDr: |
7808 | case X86::SQRTSDr_Int: |
7809 | case X86::SQRTSSm: |
7810 | case X86::SQRTSSm_Int: |
7811 | case X86::SQRTSSr: |
7812 | case X86::SQRTSSr_Int: |
7813 | |
7814 | case X86::VDIVPDrm: |
7815 | case X86::VDIVPDrr: |
7816 | case X86::VDIVPDYrm: |
7817 | case X86::VDIVPDYrr: |
7818 | case X86::VDIVPSrm: |
7819 | case X86::VDIVPSrr: |
7820 | case X86::VDIVPSYrm: |
7821 | case X86::VDIVPSYrr: |
7822 | case X86::VDIVSDrm: |
7823 | case X86::VDIVSDrm_Int: |
7824 | case X86::VDIVSDrr: |
7825 | case X86::VDIVSDrr_Int: |
7826 | case X86::VDIVSSrm: |
7827 | case X86::VDIVSSrm_Int: |
7828 | case X86::VDIVSSrr: |
7829 | case X86::VDIVSSrr_Int: |
7830 | case X86::VSQRTPDm: |
7831 | case X86::VSQRTPDr: |
7832 | case X86::VSQRTPDYm: |
7833 | case X86::VSQRTPDYr: |
7834 | case X86::VSQRTPSm: |
7835 | case X86::VSQRTPSr: |
7836 | case X86::VSQRTPSYm: |
7837 | case X86::VSQRTPSYr: |
7838 | case X86::VSQRTSDm: |
7839 | case X86::VSQRTSDm_Int: |
7840 | case X86::VSQRTSDr: |
7841 | case X86::VSQRTSDr_Int: |
7842 | case X86::VSQRTSSm: |
7843 | case X86::VSQRTSSm_Int: |
7844 | case X86::VSQRTSSr: |
7845 | case X86::VSQRTSSr_Int: |
7846 | |
7847 | case X86::VDIVPDZ128rm: |
7848 | case X86::VDIVPDZ128rmb: |
7849 | case X86::VDIVPDZ128rmbk: |
7850 | case X86::VDIVPDZ128rmbkz: |
7851 | case X86::VDIVPDZ128rmk: |
7852 | case X86::VDIVPDZ128rmkz: |
7853 | case X86::VDIVPDZ128rr: |
7854 | case X86::VDIVPDZ128rrk: |
7855 | case X86::VDIVPDZ128rrkz: |
7856 | case X86::VDIVPDZ256rm: |
7857 | case X86::VDIVPDZ256rmb: |
7858 | case X86::VDIVPDZ256rmbk: |
7859 | case X86::VDIVPDZ256rmbkz: |
7860 | case X86::VDIVPDZ256rmk: |
7861 | case X86::VDIVPDZ256rmkz: |
7862 | case X86::VDIVPDZ256rr: |
7863 | case X86::VDIVPDZ256rrk: |
7864 | case X86::VDIVPDZ256rrkz: |
7865 | case X86::VDIVPDZrrb: |
7866 | case X86::VDIVPDZrrbk: |
7867 | case X86::VDIVPDZrrbkz: |
7868 | case X86::VDIVPDZrm: |
7869 | case X86::VDIVPDZrmb: |
7870 | case X86::VDIVPDZrmbk: |
7871 | case X86::VDIVPDZrmbkz: |
7872 | case X86::VDIVPDZrmk: |
7873 | case X86::VDIVPDZrmkz: |
7874 | case X86::VDIVPDZrr: |
7875 | case X86::VDIVPDZrrk: |
7876 | case X86::VDIVPDZrrkz: |
7877 | case X86::VDIVPSZ128rm: |
7878 | case X86::VDIVPSZ128rmb: |
7879 | case X86::VDIVPSZ128rmbk: |
7880 | case X86::VDIVPSZ128rmbkz: |
7881 | case X86::VDIVPSZ128rmk: |
7882 | case X86::VDIVPSZ128rmkz: |
7883 | case X86::VDIVPSZ128rr: |
7884 | case X86::VDIVPSZ128rrk: |
7885 | case X86::VDIVPSZ128rrkz: |
7886 | case X86::VDIVPSZ256rm: |
7887 | case X86::VDIVPSZ256rmb: |
7888 | case X86::VDIVPSZ256rmbk: |
7889 | case X86::VDIVPSZ256rmbkz: |
7890 | case X86::VDIVPSZ256rmk: |
7891 | case X86::VDIVPSZ256rmkz: |
7892 | case X86::VDIVPSZ256rr: |
7893 | case X86::VDIVPSZ256rrk: |
7894 | case X86::VDIVPSZ256rrkz: |
7895 | case X86::VDIVPSZrrb: |
7896 | case X86::VDIVPSZrrbk: |
7897 | case X86::VDIVPSZrrbkz: |
7898 | case X86::VDIVPSZrm: |
7899 | case X86::VDIVPSZrmb: |
7900 | case X86::VDIVPSZrmbk: |
7901 | case X86::VDIVPSZrmbkz: |
7902 | case X86::VDIVPSZrmk: |
7903 | case X86::VDIVPSZrmkz: |
7904 | case X86::VDIVPSZrr: |
7905 | case X86::VDIVPSZrrk: |
7906 | case X86::VDIVPSZrrkz: |
7907 | case X86::VDIVSDZrm: |
7908 | case X86::VDIVSDZrr: |
7909 | case X86::VDIVSDZrm_Int: |
7910 | case X86::VDIVSDZrm_Intk: |
7911 | case X86::VDIVSDZrm_Intkz: |
7912 | case X86::VDIVSDZrr_Int: |
7913 | case X86::VDIVSDZrr_Intk: |
7914 | case X86::VDIVSDZrr_Intkz: |
7915 | case X86::VDIVSDZrrb_Int: |
7916 | case X86::VDIVSDZrrb_Intk: |
7917 | case X86::VDIVSDZrrb_Intkz: |
7918 | case X86::VDIVSSZrm: |
7919 | case X86::VDIVSSZrr: |
7920 | case X86::VDIVSSZrm_Int: |
7921 | case X86::VDIVSSZrm_Intk: |
7922 | case X86::VDIVSSZrm_Intkz: |
7923 | case X86::VDIVSSZrr_Int: |
7924 | case X86::VDIVSSZrr_Intk: |
7925 | case X86::VDIVSSZrr_Intkz: |
7926 | case X86::VDIVSSZrrb_Int: |
7927 | case X86::VDIVSSZrrb_Intk: |
7928 | case X86::VDIVSSZrrb_Intkz: |
7929 | case X86::VSQRTPDZ128m: |
7930 | case X86::VSQRTPDZ128mb: |
7931 | case X86::VSQRTPDZ128mbk: |
7932 | case X86::VSQRTPDZ128mbkz: |
7933 | case X86::VSQRTPDZ128mk: |
7934 | case X86::VSQRTPDZ128mkz: |
7935 | case X86::VSQRTPDZ128r: |
7936 | case X86::VSQRTPDZ128rk: |
7937 | case X86::VSQRTPDZ128rkz: |
7938 | case X86::VSQRTPDZ256m: |
7939 | case X86::VSQRTPDZ256mb: |
7940 | case X86::VSQRTPDZ256mbk: |
7941 | case X86::VSQRTPDZ256mbkz: |
7942 | case X86::VSQRTPDZ256mk: |
7943 | case X86::VSQRTPDZ256mkz: |
7944 | case X86::VSQRTPDZ256r: |
7945 | case X86::VSQRTPDZ256rk: |
7946 | case X86::VSQRTPDZ256rkz: |
7947 | case X86::VSQRTPDZm: |
7948 | case X86::VSQRTPDZmb: |
7949 | case X86::VSQRTPDZmbk: |
7950 | case X86::VSQRTPDZmbkz: |
7951 | case X86::VSQRTPDZmk: |
7952 | case X86::VSQRTPDZmkz: |
7953 | case X86::VSQRTPDZr: |
7954 | case X86::VSQRTPDZrb: |
7955 | case X86::VSQRTPDZrbk: |
7956 | case X86::VSQRTPDZrbkz: |
7957 | case X86::VSQRTPDZrk: |
7958 | case X86::VSQRTPDZrkz: |
7959 | case X86::VSQRTPSZ128m: |
7960 | case X86::VSQRTPSZ128mb: |
7961 | case X86::VSQRTPSZ128mbk: |
7962 | case X86::VSQRTPSZ128mbkz: |
7963 | case X86::VSQRTPSZ128mk: |
7964 | case X86::VSQRTPSZ128mkz: |
7965 | case X86::VSQRTPSZ128r: |
7966 | case X86::VSQRTPSZ128rk: |
7967 | case X86::VSQRTPSZ128rkz: |
7968 | case X86::VSQRTPSZ256m: |
7969 | case X86::VSQRTPSZ256mb: |
7970 | case X86::VSQRTPSZ256mbk: |
7971 | case X86::VSQRTPSZ256mbkz: |
7972 | case X86::VSQRTPSZ256mk: |
7973 | case X86::VSQRTPSZ256mkz: |
7974 | case X86::VSQRTPSZ256r: |
7975 | case X86::VSQRTPSZ256rk: |
7976 | case X86::VSQRTPSZ256rkz: |
7977 | case X86::VSQRTPSZm: |
7978 | case X86::VSQRTPSZmb: |
7979 | case X86::VSQRTPSZmbk: |
7980 | case X86::VSQRTPSZmbkz: |
7981 | case X86::VSQRTPSZmk: |
7982 | case X86::VSQRTPSZmkz: |
7983 | case X86::VSQRTPSZr: |
7984 | case X86::VSQRTPSZrb: |
7985 | case X86::VSQRTPSZrbk: |
7986 | case X86::VSQRTPSZrbkz: |
7987 | case X86::VSQRTPSZrk: |
7988 | case X86::VSQRTPSZrkz: |
7989 | case X86::VSQRTSDZm: |
7990 | case X86::VSQRTSDZm_Int: |
7991 | case X86::VSQRTSDZm_Intk: |
7992 | case X86::VSQRTSDZm_Intkz: |
7993 | case X86::VSQRTSDZr: |
7994 | case X86::VSQRTSDZr_Int: |
7995 | case X86::VSQRTSDZr_Intk: |
7996 | case X86::VSQRTSDZr_Intkz: |
7997 | case X86::VSQRTSDZrb_Int: |
7998 | case X86::VSQRTSDZrb_Intk: |
7999 | case X86::VSQRTSDZrb_Intkz: |
8000 | case X86::VSQRTSSZm: |
8001 | case X86::VSQRTSSZm_Int: |
8002 | case X86::VSQRTSSZm_Intk: |
8003 | case X86::VSQRTSSZm_Intkz: |
8004 | case X86::VSQRTSSZr: |
8005 | case X86::VSQRTSSZr_Int: |
8006 | case X86::VSQRTSSZr_Intk: |
8007 | case X86::VSQRTSSZr_Intkz: |
8008 | case X86::VSQRTSSZrb_Int: |
8009 | case X86::VSQRTSSZrb_Intk: |
8010 | case X86::VSQRTSSZrb_Intkz: |
8011 | |
8012 | case X86::VGATHERDPDYrm: |
8013 | case X86::VGATHERDPDZ128rm: |
8014 | case X86::VGATHERDPDZ256rm: |
8015 | case X86::VGATHERDPDZrm: |
8016 | case X86::VGATHERDPDrm: |
8017 | case X86::VGATHERDPSYrm: |
8018 | case X86::VGATHERDPSZ128rm: |
8019 | case X86::VGATHERDPSZ256rm: |
8020 | case X86::VGATHERDPSZrm: |
8021 | case X86::VGATHERDPSrm: |
8022 | case X86::VGATHERPF0DPDm: |
8023 | case X86::VGATHERPF0DPSm: |
8024 | case X86::VGATHERPF0QPDm: |
8025 | case X86::VGATHERPF0QPSm: |
8026 | case X86::VGATHERPF1DPDm: |
8027 | case X86::VGATHERPF1DPSm: |
8028 | case X86::VGATHERPF1QPDm: |
8029 | case X86::VGATHERPF1QPSm: |
8030 | case X86::VGATHERQPDYrm: |
8031 | case X86::VGATHERQPDZ128rm: |
8032 | case X86::VGATHERQPDZ256rm: |
8033 | case X86::VGATHERQPDZrm: |
8034 | case X86::VGATHERQPDrm: |
8035 | case X86::VGATHERQPSYrm: |
8036 | case X86::VGATHERQPSZ128rm: |
8037 | case X86::VGATHERQPSZ256rm: |
8038 | case X86::VGATHERQPSZrm: |
8039 | case X86::VGATHERQPSrm: |
8040 | case X86::VPGATHERDDYrm: |
8041 | case X86::VPGATHERDDZ128rm: |
8042 | case X86::VPGATHERDDZ256rm: |
8043 | case X86::VPGATHERDDZrm: |
8044 | case X86::VPGATHERDDrm: |
8045 | case X86::VPGATHERDQYrm: |
8046 | case X86::VPGATHERDQZ128rm: |
8047 | case X86::VPGATHERDQZ256rm: |
8048 | case X86::VPGATHERDQZrm: |
8049 | case X86::VPGATHERDQrm: |
8050 | case X86::VPGATHERQDYrm: |
8051 | case X86::VPGATHERQDZ128rm: |
8052 | case X86::VPGATHERQDZ256rm: |
8053 | case X86::VPGATHERQDZrm: |
8054 | case X86::VPGATHERQDrm: |
8055 | case X86::VPGATHERQQYrm: |
8056 | case X86::VPGATHERQQZ128rm: |
8057 | case X86::VPGATHERQQZ256rm: |
8058 | case X86::VPGATHERQQZrm: |
8059 | case X86::VPGATHERQQrm: |
8060 | case X86::VSCATTERDPDZ128mr: |
8061 | case X86::VSCATTERDPDZ256mr: |
8062 | case X86::VSCATTERDPDZmr: |
8063 | case X86::VSCATTERDPSZ128mr: |
8064 | case X86::VSCATTERDPSZ256mr: |
8065 | case X86::VSCATTERDPSZmr: |
8066 | case X86::VSCATTERPF0DPDm: |
8067 | case X86::VSCATTERPF0DPSm: |
8068 | case X86::VSCATTERPF0QPDm: |
8069 | case X86::VSCATTERPF0QPSm: |
8070 | case X86::VSCATTERPF1DPDm: |
8071 | case X86::VSCATTERPF1DPSm: |
8072 | case X86::VSCATTERPF1QPDm: |
8073 | case X86::VSCATTERPF1QPSm: |
8074 | case X86::VSCATTERQPDZ128mr: |
8075 | case X86::VSCATTERQPDZ256mr: |
8076 | case X86::VSCATTERQPDZmr: |
8077 | case X86::VSCATTERQPSZ128mr: |
8078 | case X86::VSCATTERQPSZ256mr: |
8079 | case X86::VSCATTERQPSZmr: |
8080 | case X86::VPSCATTERDDZ128mr: |
8081 | case X86::VPSCATTERDDZ256mr: |
8082 | case X86::VPSCATTERDDZmr: |
8083 | case X86::VPSCATTERDQZ128mr: |
8084 | case X86::VPSCATTERDQZ256mr: |
8085 | case X86::VPSCATTERDQZmr: |
8086 | case X86::VPSCATTERQDZ128mr: |
8087 | case X86::VPSCATTERQDZ256mr: |
8088 | case X86::VPSCATTERQDZmr: |
8089 | case X86::VPSCATTERQQZ128mr: |
8090 | case X86::VPSCATTERQQZ256mr: |
8091 | case X86::VPSCATTERQQZmr: |
8092 | return true; |
8093 | } |
8094 | } |
8095 | |
8096 | bool X86InstrInfo::hasHighOperandLatency(const TargetSchedModel &SchedModel, |
8097 | const MachineRegisterInfo *MRI, |
8098 | const MachineInstr &DefMI, |
8099 | unsigned DefIdx, |
8100 | const MachineInstr &UseMI, |
8101 | unsigned UseIdx) const { |
8102 | return isHighLatencyDef(DefMI.getOpcode()); |
8103 | } |
8104 | |
8105 | bool X86InstrInfo::hasReassociableOperands(const MachineInstr &Inst, |
8106 | const MachineBasicBlock *MBB) const { |
8107 | assert(Inst.getNumExplicitOperands() == 3 && Inst.getNumExplicitDefs() == 1 && |
8108 | Inst.getNumDefs() <= 2 && "Reassociation needs binary operators"); |
8109 | |
8110 | |
8111 | |
8112 | |
8113 | |
8114 | |
8115 | |
8116 | const MachineOperand *FlagDef = Inst.findRegisterDefOperand(X86::EFLAGS); |
8117 | assert((Inst.getNumDefs() == 1 || FlagDef) && |
8118 | "Implicit def isn't flags?"); |
8119 | if (FlagDef && !FlagDef->isDead()) |
8120 | return false; |
8121 | |
8122 | return TargetInstrInfo::hasReassociableOperands(Inst, MBB); |
8123 | } |
8124 | |
8125 | |
8126 | |
8127 | |
8128 | |
8129 | bool X86InstrInfo::isAssociativeAndCommutative(const MachineInstr &Inst) const { |
8130 | switch (Inst.getOpcode()) { |
8131 | case X86::AND8rr: |
8132 | case X86::AND16rr: |
8133 | case X86::AND32rr: |
8134 | case X86::AND64rr: |
8135 | case X86::OR8rr: |
8136 | case X86::OR16rr: |
8137 | case X86::OR32rr: |
8138 | case X86::OR64rr: |
8139 | case X86::XOR8rr: |
8140 | case X86::XOR16rr: |
8141 | case X86::XOR32rr: |
8142 | case X86::XOR64rr: |
8143 | case X86::IMUL16rr: |
8144 | case X86::IMUL32rr: |
8145 | case X86::IMUL64rr: |
8146 | case X86::PANDrr: |
8147 | case X86::PORrr: |
8148 | case X86::PXORrr: |
8149 | case X86::ANDPDrr: |
8150 | case X86::ANDPSrr: |
8151 | case X86::ORPDrr: |
8152 | case X86::ORPSrr: |
8153 | case X86::XORPDrr: |
8154 | case X86::XORPSrr: |
8155 | case X86::PADDBrr: |
8156 | case X86::PADDWrr: |
8157 | case X86::PADDDrr: |
8158 | case X86::PADDQrr: |
8159 | case X86::PMULLWrr: |
8160 | case X86::PMULLDrr: |
8161 | case X86::PMAXSBrr: |
8162 | case X86::PMAXSDrr: |
8163 | case X86::PMAXSWrr: |
8164 | case X86::PMAXUBrr: |
8165 | case X86::PMAXUDrr: |
8166 | case X86::PMAXUWrr: |
8167 | case X86::PMINSBrr: |
8168 | case X86::PMINSDrr: |
8169 | case X86::PMINSWrr: |
8170 | case X86::PMINUBrr: |
8171 | case X86::PMINUDrr: |
8172 | case X86::PMINUWrr: |
8173 | case X86::VPANDrr: |
8174 | case X86::VPANDYrr: |
8175 | case X86::VPANDDZ128rr: |
8176 | case X86::VPANDDZ256rr: |
8177 | case X86::VPANDDZrr: |
8178 | case X86::VPANDQZ128rr: |
8179 | case X86::VPANDQZ256rr: |
8180 | case X86::VPANDQZrr: |
8181 | case X86::VPORrr: |
8182 | case X86::VPORYrr: |
8183 | case X86::VPORDZ128rr: |
8184 | case X86::VPORDZ256rr: |
8185 | case X86::VPORDZrr: |
8186 | case X86::VPORQZ128rr: |
8187 | case X86::VPORQZ256rr: |
8188 | case X86::VPORQZrr: |
8189 | case X86::VPXORrr: |
8190 | case X86::VPXORYrr: |
8191 | case X86::VPXORDZ128rr: |
8192 | case X86::VPXORDZ256rr: |
8193 | case X86::VPXORDZrr: |
8194 | case X86::VPXORQZ128rr: |
8195 | case X86::VPXORQZ256rr: |
8196 | case X86::VPXORQZrr: |
8197 | case X86::VANDPDrr: |
8198 | case X86::VANDPSrr: |
8199 | case X86::VANDPDYrr: |
8200 | case X86::VANDPSYrr: |
8201 | case X86::VANDPDZ128rr: |
8202 | case X86::VANDPSZ128rr: |
8203 | case X86::VANDPDZ256rr: |
8204 | case X86::VANDPSZ256rr: |
8205 | case X86::VANDPDZrr: |
8206 | case X86::VANDPSZrr: |
8207 | case X86::VORPDrr: |
8208 | case X86::VORPSrr: |
8209 | case X86::VORPDYrr: |
8210 | case X86::VORPSYrr: |
8211 | case X86::VORPDZ128rr: |
8212 | case X86::VORPSZ128rr: |
8213 | case X86::VORPDZ256rr: |
8214 | case X86::VORPSZ256rr: |
8215 | case X86::VORPDZrr: |
8216 | case X86::VORPSZrr: |
8217 | case X86::VXORPDrr: |
8218 | case X86::VXORPSrr: |
8219 | case X86::VXORPDYrr: |
8220 | case X86::VXORPSYrr: |
8221 | case X86::VXORPDZ128rr: |
8222 | case X86::VXORPSZ128rr: |
8223 | case X86::VXORPDZ256rr: |
8224 | case X86::VXORPSZ256rr: |
8225 | case X86::VXORPDZrr: |
8226 | case X86::VXORPSZrr: |
8227 | case X86::KADDBrr: |
8228 | case X86::KADDWrr: |
8229 | case X86::KADDDrr: |
8230 | case X86::KADDQrr: |
8231 | case X86::KANDBrr: |
8232 | case X86::KANDWrr: |
8233 | case X86::KANDDrr: |
8234 | case X86::KANDQrr: |
8235 | case X86::KORBrr: |
8236 | case X86::KORWrr: |
8237 | case X86::KORDrr: |
8238 | case X86::KORQrr: |
8239 | case X86::KXORBrr: |
8240 | case X86::KXORWrr: |
8241 | case X86::KXORDrr: |
8242 | case X86::KXORQrr: |
8243 | case X86::VPADDBrr: |
8244 | case X86::VPADDWrr: |
8245 | case X86::VPADDDrr: |
8246 | case X86::VPADDQrr: |
8247 | case X86::VPADDBYrr: |
8248 | case X86::VPADDWYrr: |
8249 | case X86::VPADDDYrr: |
8250 | case X86::VPADDQYrr: |
8251 | case X86::VPADDBZ128rr: |
8252 | case X86::VPADDWZ128rr: |
8253 | case X86::VPADDDZ128rr: |
8254 | case X86::VPADDQZ128rr: |
8255 | case X86::VPADDBZ256rr: |
8256 | case X86::VPADDWZ256rr: |
8257 | case X86::VPADDDZ256rr: |
8258 | case X86::VPADDQZ256rr: |
8259 | case X86::VPADDBZrr: |
8260 | case X86::VPADDWZrr: |
8261 | case X86::VPADDDZrr: |
8262 | case X86::VPADDQZrr: |
8263 | case X86::VPMULLWrr: |
8264 | case X86::VPMULLWYrr: |
8265 | case X86::VPMULLWZ128rr: |
8266 | case X86::VPMULLWZ256rr: |
8267 | case X86::VPMULLWZrr: |
8268 | case X86::VPMULLDrr: |
8269 | case X86::VPMULLDYrr: |
8270 | case X86::VPMULLDZ128rr: |
8271 | case X86::VPMULLDZ256rr: |
8272 | case X86::VPMULLDZrr: |
8273 | case X86::VPMULLQZ128rr: |
8274 | case X86::VPMULLQZ256rr: |
8275 | case X86::VPMULLQZrr: |
8276 | case X86::VPMAXSBrr: |
8277 | case X86::VPMAXSBYrr: |
8278 | case X86::VPMAXSBZ128rr: |
8279 | case X86::VPMAXSBZ256rr: |
8280 | case X86::VPMAXSBZrr: |
8281 | case X86::VPMAXSDrr: |
8282 | case X86::VPMAXSDYrr: |
8283 | case X86::VPMAXSDZ128rr: |
8284 | case X86::VPMAXSDZ256rr: |
8285 | case X86::VPMAXSDZrr: |
8286 | case X86::VPMAXSQZ128rr: |
8287 | case X86::VPMAXSQZ256rr: |
8288 | case X86::VPMAXSQZrr: |
8289 | case X86::VPMAXSWrr: |
8290 | case X86::VPMAXSWYrr: |
8291 | case X86::VPMAXSWZ128rr: |
8292 | case X86::VPMAXSWZ256rr: |
8293 | case X86::VPMAXSWZrr: |
8294 | case X86::VPMAXUBrr: |
8295 | case X86::VPMAXUBYrr: |
8296 | case X86::VPMAXUBZ128rr: |
8297 | case X86::VPMAXUBZ256rr: |
8298 | case X86::VPMAXUBZrr: |
8299 | case X86::VPMAXUDrr: |
8300 | case X86::VPMAXUDYrr: |
8301 | case X86::VPMAXUDZ128rr: |
8302 | case X86::VPMAXUDZ256rr: |
8303 | case X86::VPMAXUDZrr: |
8304 | case X86::VPMAXUQZ128rr: |
8305 | case X86::VPMAXUQZ256rr: |
8306 | case X86::VPMAXUQZrr: |
8307 | case X86::VPMAXUWrr: |
8308 | case X86::VPMAXUWYrr: |
8309 | case X86::VPMAXUWZ128rr: |
8310 | case X86::VPMAXUWZ256rr: |
8311 | case X86::VPMAXUWZrr: |
8312 | case X86::VPMINSBrr: |
8313 | case X86::VPMINSBYrr: |
8314 | case X86::VPMINSBZ128rr: |
8315 | case X86::VPMINSBZ256rr: |
8316 | case X86::VPMINSBZrr: |
8317 | case X86::VPMINSDrr: |
8318 | case X86::VPMINSDYrr: |
8319 | case X86::VPMINSDZ128rr: |
8320 | case X86::VPMINSDZ256rr: |
8321 | case X86::VPMINSDZrr: |
8322 | case X86::VPMINSQZ128rr: |
8323 | case X86::VPMINSQZ256rr: |
8324 | case X86::VPMINSQZrr: |
8325 | case X86::VPMINSWrr: |
8326 | case X86::VPMINSWYrr: |
8327 | case X86::VPMINSWZ128rr: |
8328 | case X86::VPMINSWZ256rr: |
8329 | case X86::VPMINSWZrr: |
8330 | case X86::VPMINUBrr: |
8331 | case X86::VPMINUBYrr: |
8332 | case X86::VPMINUBZ128rr: |
8333 | case X86::VPMINUBZ256rr: |
8334 | case X86::VPMINUBZrr: |
8335 | case X86::VPMINUDrr: |
8336 | case X86::VPMINUDYrr: |
8337 | case X86::VPMINUDZ128rr: |
8338 | case X86::VPMINUDZ256rr: |
8339 | case X86::VPMINUDZrr: |
8340 | case X86::VPMINUQZ128rr: |
8341 | case X86::VPMINUQZ256rr: |
8342 | case X86::VPMINUQZrr: |
8343 | case X86::VPMINUWrr: |
8344 | case X86::VPMINUWYrr: |
8345 | case X86::VPMINUWZ128rr: |
8346 | case X86::VPMINUWZ256rr: |
8347 | case X86::VPMINUWZrr: |
8348 | |
8349 | |
8350 | |
8351 | case X86::MAXCPDrr: |
8352 | case X86::MAXCPSrr: |
8353 | case X86::MAXCSDrr: |
8354 | case X86::MAXCSSrr: |
8355 | case X86::MINCPDrr: |
8356 | case X86::MINCPSrr: |
8357 | case X86::MINCSDrr: |
8358 | case X86::MINCSSrr: |
8359 | case X86::VMAXCPDrr: |
8360 | case X86::VMAXCPSrr: |
8361 | case X86::VMAXCPDYrr: |
8362 | case X86::VMAXCPSYrr: |
8363 | case X86::VMAXCPDZ128rr: |
8364 | case X86::VMAXCPSZ128rr: |
8365 | case X86::VMAXCPDZ256rr: |
8366 | case X86::VMAXCPSZ256rr: |
8367 | case X86::VMAXCPDZrr: |
8368 | case X86::VMAXCPSZrr: |
8369 | case X86::VMAXCSDrr: |
8370 | case X86::VMAXCSSrr: |
8371 | case X86::VMAXCSDZrr: |
8372 | case X86::VMAXCSSZrr: |
8373 | case X86::VMINCPDrr: |
8374 | case X86::VMINCPSrr: |
8375 | case X86::VMINCPDYrr: |
8376 | case X86::VMINCPSYrr: |
8377 | case X86::VMINCPDZ128rr: |
8378 | case X86::VMINCPSZ128rr: |
8379 | case X86::VMINCPDZ256rr: |
8380 | case X86::VMINCPSZ256rr: |
8381 | case X86::VMINCPDZrr: |
8382 | case X86::VMINCPSZrr: |
8383 | case X86::VMINCSDrr: |
8384 | case X86::VMINCSSrr: |
8385 | case X86::VMINCSDZrr: |
8386 | case X86::VMINCSSZrr: |
8387 | return true; |
8388 | case X86::ADDPDrr: |
8389 | case X86::ADDPSrr: |
8390 | case X86::ADDSDrr: |
8391 | case X86::ADDSSrr: |
8392 | case X86::MULPDrr: |
8393 | case X86::MULPSrr: |
8394 | case X86::MULSDrr: |
8395 | case X86::MULSSrr: |
8396 | case X86::VADDPDrr: |
8397 | case X86::VADDPSrr: |
8398 | case X86::VADDPDYrr: |
8399 | case X86::VADDPSYrr: |
8400 | case X86::VADDPDZ128rr: |
8401 | case X86::VADDPSZ128rr: |
8402 | case X86::VADDPDZ256rr: |
8403 | case X86::VADDPSZ256rr: |
8404 | case X86::VADDPDZrr: |
8405 | case X86::VADDPSZrr: |
8406 | case X86::VADDSDrr: |
8407 | case X86::VADDSSrr: |
8408 | case X86::VADDSDZrr: |
8409 | case X86::VADDSSZrr: |
8410 | case X86::VMULPDrr: |
8411 | case X86::VMULPSrr: |
8412 | case X86::VMULPDYrr: |
8413 | case X86::VMULPSYrr: |
8414 | case X86::VMULPDZ128rr: |
8415 | case X86::VMULPSZ128rr: |
8416 | case X86::VMULPDZ256rr: |
8417 | case X86::VMULPSZ256rr: |
8418 | case X86::VMULPDZrr: |
8419 | case X86::VMULPSZrr: |
8420 | case X86::VMULSDrr: |
8421 | case X86::VMULSSrr: |
8422 | case X86::VMULSDZrr: |
8423 | case X86::VMULSSZrr: |
8424 | return Inst.getFlag(MachineInstr::MIFlag::FmReassoc) && |
8425 | Inst.getFlag(MachineInstr::MIFlag::FmNsz); |
8426 | default: |
8427 | return false; |
8428 | } |
8429 | } |
8430 | |
8431 | |
8432 | |
8433 | |
8434 | static Optional<ParamLoadedValue> |
8435 | describeMOVrrLoadedValue(const MachineInstr &MI, Register DescribedReg, |
8436 | const TargetRegisterInfo *TRI) { |
8437 | Register DestReg = MI.getOperand(0).getReg(); |
8438 | Register SrcReg = MI.getOperand(1).getReg(); |
8439 | |
8440 | auto Expr = DIExpression::get(MI.getMF()->getFunction().getContext(), {}); |
8441 | |
8442 | |
8443 | if (DestReg == DescribedReg) |
8444 | return ParamLoadedValue(MachineOperand::CreateReg(SrcReg, false), Expr); |
8445 | |
8446 | |
8447 | |
8448 | if (unsigned SubRegIdx = TRI->getSubRegIndex(DestReg, DescribedReg)) { |
8449 | Register SrcSubReg = TRI->getSubReg(SrcReg, SubRegIdx); |
8450 | return ParamLoadedValue(MachineOperand::CreateReg(SrcSubReg, false), Expr); |
8451 | } |
8452 | |
8453 | |
8454 | |
8455 | |
8456 | |
8457 | |
8458 | |
8459 | if (MI.getOpcode() == X86::MOV8rr || MI.getOpcode() == X86::MOV16rr || |
8460 | !TRI->isSuperRegister(DestReg, DescribedReg)) |
8461 | return None; |
8462 | |
8463 | assert(MI.getOpcode() == X86::MOV32rr && "Unexpected super-register case"); |
8464 | return ParamLoadedValue(MachineOperand::CreateReg(SrcReg, false), Expr); |
8465 | } |
8466 | |
8467 | Optional<ParamLoadedValue> |
8468 | X86InstrInfo::describeLoadedValue(const MachineInstr &MI, Register Reg) const { |
8469 | const MachineOperand *Op = nullptr; |
8470 | DIExpression *Expr = nullptr; |
8471 | |
8472 | const TargetRegisterInfo *TRI = &getRegisterInfo(); |
8473 | |
8474 | switch (MI.getOpcode()) { |
8475 | case X86::LEA32r: |
8476 | case X86::LEA64r: |
8477 | case X86::LEA64_32r: { |
8478 | |
8479 | if (!TRI->isSuperRegisterEq(MI.getOperand(0).getReg(), Reg)) |
8480 | return None; |
8481 | |
8482 | |
8483 | |
8484 | if (!MI.getOperand(4).isImm() || !MI.getOperand(2).isImm()) |
8485 | return None; |
8486 | |
8487 | const MachineOperand &Op1 = MI.getOperand(1); |
8488 | const MachineOperand &Op2 = MI.getOperand(3); |
8489 | assert(Op2.isReg() && (Op2.getReg() == X86::NoRegister || |
8490 | Register::isPhysicalRegister(Op2.getReg()))); |
8491 | |
8492 | |
8493 | |
8494 | if ((Op1.isReg() && Op1.getReg() == MI.getOperand(0).getReg()) || |
8495 | Op2.getReg() == MI.getOperand(0).getReg()) |
8496 | return None; |
8497 | else if ((Op1.isReg() && Op1.getReg() != X86::NoRegister && |
8498 | TRI->regsOverlap(Op1.getReg(), MI.getOperand(0).getReg())) || |
8499 | (Op2.getReg() != X86::NoRegister && |
8500 | TRI->regsOverlap(Op2.getReg(), MI.getOperand(0).getReg()))) |
8501 | return None; |
8502 | |
8503 | int64_t Coef = MI.getOperand(2).getImm(); |
8504 | int64_t Offset = MI.getOperand(4).getImm(); |
8505 | SmallVector<uint64_t, 8> Ops; |
8506 | |
8507 | if ((Op1.isReg() && Op1.getReg() != X86::NoRegister)) { |
8508 | Op = &Op1; |
8509 | } else if (Op1.isFI()) |
8510 | Op = &Op1; |
8511 | |
8512 | if (Op && Op->isReg() && Op->getReg() == Op2.getReg() && Coef > 0) { |
8513 | Ops.push_back(dwarf::DW_OP_constu); |
8514 | Ops.push_back(Coef + 1); |
8515 | Ops.push_back(dwarf::DW_OP_mul); |
8516 | } else { |
8517 | if (Op && Op2.getReg() != X86::NoRegister) { |
8518 | int dwarfReg = TRI->getDwarfRegNum(Op2.getReg(), false); |
8519 | if (dwarfReg < 0) |
8520 | return None; |
8521 | else if (dwarfReg < 32) { |
8522 | Ops.push_back(dwarf::DW_OP_breg0 + dwarfReg); |
8523 | Ops.push_back(0); |
8524 | } else { |
8525 | Ops.push_back(dwarf::DW_OP_bregx); |
8526 | Ops.push_back(dwarfReg); |
8527 | Ops.push_back(0); |
8528 | } |
8529 | } else if (!Op) { |
8530 | assert(Op2.getReg() != X86::NoRegister); |
8531 | Op = &Op2; |
8532 | } |
8533 | |
8534 | if (Coef > 1) { |
8535 | assert(Op2.getReg() != X86::NoRegister); |
8536 | Ops.push_back(dwarf::DW_OP_constu); |
8537 | Ops.push_back(Coef); |
8538 | Ops.push_back(dwarf::DW_OP_mul); |
8539 | } |
8540 | |
8541 | if (((Op1.isReg() && Op1.getReg() != X86::NoRegister) || Op1.isFI()) && |
8542 | Op2.getReg() != X86::NoRegister) { |
8543 | Ops.push_back(dwarf::DW_OP_plus); |
8544 | } |
8545 | } |
8546 | |
8547 | DIExpression::appendOffset(Ops, Offset); |
8548 | Expr = DIExpression::get(MI.getMF()->getFunction().getContext(), Ops); |
8549 | |
8550 | return ParamLoadedValue(*Op, Expr);; |
8551 | } |
8552 | case X86::MOV8ri: |
8553 | case X86::MOV16ri: |
8554 | |
8555 | return None; |
8556 | case X86::MOV32ri: |
8557 | case X86::MOV64ri: |
8558 | case X86::MOV64ri32: |
8559 | |
8560 | |
8561 | if (!TRI->isSuperRegisterEq(MI.getOperand(0).getReg(), Reg)) |
8562 | return None; |
8563 | return ParamLoadedValue(MI.getOperand(1), Expr); |
8564 | case X86::MOV8rr: |
8565 | case X86::MOV16rr: |
8566 | case X86::MOV32rr: |
8567 | case X86::MOV64rr: |
8568 | return describeMOVrrLoadedValue(MI, Reg, TRI); |
8569 | case X86::XOR32rr: { |
8570 | |
8571 | |
8572 | if (!TRI->isSuperRegisterEq(MI.getOperand(0).getReg(), Reg)) |
8573 | return None; |
8574 | if (MI.getOperand(1).getReg() == MI.getOperand(2).getReg()) |
8575 | return ParamLoadedValue(MachineOperand::CreateImm(0), Expr); |
8576 | return None; |
8577 | } |
8578 | case X86::MOVSX64rr32: { |
8579 | |
8580 | |
8581 | |
8582 | |
8583 | |
8584 | |
8585 | if (!TRI->isSubRegisterEq(MI.getOperand(0).getReg(), Reg)) |
8586 | return None; |
8587 | |
8588 | Expr = DIExpression::get(MI.getMF()->getFunction().getContext(), {}); |
8589 | |
8590 | |
8591 | |
8592 | |
8593 | |
8594 | |
8595 | if (Reg == MI.getOperand(0).getReg()) |
8596 | Expr = DIExpression::appendExt(Expr, 32, 64, true); |
8597 | else |
8598 | assert(X86MCRegisterClasses[X86::GR32RegClassID].contains(Reg) && |
8599 | "Unhandled sub-register case for MOVSX64rr32"); |
8600 | |
8601 | return ParamLoadedValue(MI.getOperand(1), Expr); |
8602 | } |
8603 | default: |
8604 | assert(!MI.isMoveImmediate() && "Unexpected MoveImm instruction"); |
8605 | return TargetInstrInfo::describeLoadedValue(MI, Reg); |
8606 | } |
8607 | } |
8608 | |
8609 | |
8610 | |
8611 | void X86InstrInfo::setSpecialOperandAttr(MachineInstr &OldMI1, |
8612 | MachineInstr &OldMI2, |
8613 | MachineInstr &NewMI1, |
8614 | MachineInstr &NewMI2) const { |
8615 | |
8616 | |
8617 | |
8618 | uint16_t IntersectedFlags = OldMI1.getFlags() & OldMI2.getFlags(); |
8619 | NewMI1.setFlags(IntersectedFlags); |
8620 | NewMI1.clearFlag(MachineInstr::MIFlag::NoSWrap); |
8621 | NewMI1.clearFlag(MachineInstr::MIFlag::NoUWrap); |
8622 | NewMI1.clearFlag(MachineInstr::MIFlag::IsExact); |
8623 | |
8624 | NewMI2.setFlags(IntersectedFlags); |
8625 | NewMI2.clearFlag(MachineInstr::MIFlag::NoSWrap); |
8626 | NewMI2.clearFlag(MachineInstr::MIFlag::NoUWrap); |
8627 | NewMI2.clearFlag(MachineInstr::MIFlag::IsExact); |
8628 | |
8629 | |
8630 | MachineOperand *OldFlagDef1 = OldMI1.findRegisterDefOperand(X86::EFLAGS); |
8631 | MachineOperand *OldFlagDef2 = OldMI2.findRegisterDefOperand(X86::EFLAGS); |
8632 | |
8633 | assert(!OldFlagDef1 == !OldFlagDef2 && |
8634 | "Unexpected instruction type for reassociation"); |
8635 | |
8636 | if (!OldFlagDef1 || !OldFlagDef2) |
8637 | return; |
8638 | |
8639 | assert(OldFlagDef1->isDead() && OldFlagDef2->isDead() && |
8640 | "Must have dead EFLAGS operand in reassociable instruction"); |
8641 | |
8642 | MachineOperand *NewFlagDef1 = NewMI1.findRegisterDefOperand(X86::EFLAGS); |
8643 | MachineOperand *NewFlagDef2 = NewMI2.findRegisterDefOperand(X86::EFLAGS); |
8644 | |
8645 | assert(NewFlagDef1 && NewFlagDef2 && |
8646 | "Unexpected operand in reassociable instruction"); |
8647 | |
8648 | |
8649 | |
8650 | |
8651 | |
8652 | NewFlagDef1->setIsDead(); |
8653 | NewFlagDef2->setIsDead(); |
8654 | } |
8655 | |
8656 | std::pair<unsigned, unsigned> |
8657 | X86InstrInfo::decomposeMachineOperandsTargetFlags(unsigned TF) const { |
8658 | return std::make_pair(TF, 0u); |
8659 | } |
8660 | |
8661 | ArrayRef<std::pair<unsigned, const char *>> |
8662 | X86InstrInfo::getSerializableDirectMachineOperandTargetFlags() const { |
8663 | using namespace X86II; |
8664 | static const std::pair<unsigned, const char *> TargetFlags[] = { |
8665 | {MO_GOT_ABSOLUTE_ADDRESS, "x86-got-absolute-address"}, |
8666 | {MO_PIC_BASE_OFFSET, "x86-pic-base-offset"}, |
8667 | {MO_GOT, "x86-got"}, |
8668 | {MO_GOTOFF, "x86-gotoff"}, |
8669 | {MO_GOTPCREL, "x86-gotpcrel"}, |
8670 | {MO_PLT, "x86-plt"}, |
8671 | {MO_TLSGD, "x86-tlsgd"}, |
8672 | {MO_TLSLD, "x86-tlsld"}, |
8673 | {MO_TLSLDM, "x86-tlsldm"}, |
8674 | {MO_GOTTPOFF, "x86-gottpoff"}, |
8675 | {MO_INDNTPOFF, "x86-indntpoff"}, |
8676 | {MO_TPOFF, "x86-tpoff"}, |
8677 | {MO_DTPOFF, "x86-dtpoff"}, |
8678 | {MO_NTPOFF, "x86-ntpoff"}, |
8679 | {MO_GOTNTPOFF, "x86-gotntpoff"}, |
8680 | {MO_DLLIMPORT, "x86-dllimport"}, |
8681 | {MO_DARWIN_NONLAZY, "x86-darwin-nonlazy"}, |
8682 | {MO_DARWIN_NONLAZY_PIC_BASE, "x86-darwin-nonlazy-pic-base"}, |
8683 | {MO_TLVP, "x86-tlvp"}, |
8684 | {MO_TLVP_PIC_BASE, "x86-tlvp-pic-base"}, |
8685 | {MO_SECREL, "x86-secrel"}, |
8686 | {MO_COFFSTUB, "x86-coffstub"}}; |
8687 | return makeArrayRef(TargetFlags); |
8688 | } |
8689 | |
8690 | namespace { |
8691 | |
8692 | |
8693 | struct CGBR : public MachineFunctionPass { |
8694 | static char ID; |
8695 | CGBR() : MachineFunctionPass(ID) {} |
8696 | |
8697 | bool runOnMachineFunction(MachineFunction &MF) override { |
8698 | const X86TargetMachine *TM = |
8699 | static_cast<const X86TargetMachine *>(&MF.getTarget()); |
8700 | const X86Subtarget &STI = MF.getSubtarget<X86Subtarget>(); |
8701 | |
8702 | |
8703 | |
8704 | if (STI.is64Bit() && (TM->getCodeModel() == CodeModel::Small || |
8705 | TM->getCodeModel() == CodeModel::Kernel)) |
8706 | return false; |
8707 | |
8708 | |
8709 | if (!TM->isPositionIndependent()) |
8710 | return false; |
8711 | |
8712 | X86MachineFunctionInfo *X86FI = MF.getInfo<X86MachineFunctionInfo>(); |
8713 | Register GlobalBaseReg = X86FI->getGlobalBaseReg(); |
8714 | |
8715 | |
8716 | if (GlobalBaseReg == 0) |
8717 | return false; |
8718 | |
8719 | |
8720 | MachineBasicBlock &FirstMBB = MF.front(); |
8721 | MachineBasicBlock::iterator MBBI = FirstMBB.begin(); |
8722 | DebugLoc DL = FirstMBB.findDebugLoc(MBBI); |
8723 | MachineRegisterInfo &RegInfo = MF.getRegInfo(); |
8724 | const X86InstrInfo *TII = STI.getInstrInfo(); |
8725 | |
8726 | Register PC; |
8727 | if (STI.isPICStyleGOT()) |
8728 | PC = RegInfo.createVirtualRegister(&X86::GR32RegClass); |
8729 | else |
8730 | PC = GlobalBaseReg; |
8731 | |
8732 | if (STI.is64Bit()) { |
8733 | if (TM->getCodeModel() == CodeModel::Medium) { |
8734 | |
8735 | |
8736 | BuildMI(FirstMBB, MBBI, DL, TII->get(X86::LEA64r), PC) |
8737 | .addReg(X86::RIP) |
8738 | .addImm(0) |
8739 | .addReg(0) |
8740 | .addExternalSymbol("_GLOBAL_OFFSET_TABLE_") |
8741 | .addReg(0); |
8742 | } else if (TM->getCodeModel() == CodeModel::Large) { |
8743 | |
8744 | |
8745 | |
8746 | |
8747 | |
8748 | |
8749 | Register PBReg = RegInfo.createVirtualRegister(&X86::GR64RegClass); |
8750 | Register GOTReg = RegInfo.createVirtualRegister(&X86::GR64RegClass); |
8751 | BuildMI(FirstMBB, MBBI, DL, TII->get(X86::LEA64r), PBReg) |
8752 | .addReg(X86::RIP) |
8753 | .addImm(0) |
8754 | .addReg(0) |
8755 | .addSym(MF.getPICBaseSymbol()) |
8756 | .addReg(0); |
8757 | std::prev(MBBI)->setPreInstrSymbol(MF, MF.getPICBaseSymbol()); |
8758 | BuildMI(FirstMBB, MBBI, DL, TII->get(X86::MOV64ri), GOTReg) |
8759 | .addExternalSymbol("_GLOBAL_OFFSET_TABLE_", |
8760 | X86II::MO_PIC_BASE_OFFSET); |
8761 | BuildMI(FirstMBB, MBBI, DL, TII->get(X86::ADD64rr), PC) |
8762 | .addReg(PBReg, RegState::Kill) |
8763 | .addReg(GOTReg, RegState::Kill); |
8764 | } else { |
8765 | llvm_unreachable("unexpected code model"); |
8766 | } |
8767 | } else { |
8768 | |
8769 | |
8770 | BuildMI(FirstMBB, MBBI, DL, TII->get(X86::MOVPC32r), PC).addImm(0); |
8771 | |
8772 | |
8773 | |
8774 | if (STI.isPICStyleGOT()) { |
8775 | |
8776 | |
8777 | BuildMI(FirstMBB, MBBI, DL, TII->get(X86::ADD32ri), GlobalBaseReg) |
8778 | .addReg(PC) |
8779 | .addExternalSymbol("_GLOBAL_OFFSET_TABLE_", |
8780 | X86II::MO_GOT_ABSOLUTE_ADDRESS); |
8781 | } |
8782 | } |
8783 | |
8784 | return true; |
8785 | } |
8786 | |
8787 | StringRef getPassName() const override { |
8788 | return "X86 PIC Global Base Reg Initialization"; |
8789 | } |
8790 | |
8791 | void getAnalysisUsage(AnalysisUsage &AU) const override { |
8792 | AU.setPreservesCFG(); |
8793 | MachineFunctionPass::getAnalysisUsage(AU); |
8794 | } |
8795 | }; |
8796 | } |
8797 | |
8798 | char CGBR::ID = 0; |
8799 | FunctionPass* |
8800 | llvm::createX86GlobalBaseRegPass() { return new CGBR(); } |
8801 | |
8802 | namespace { |
8803 | struct LDTLSCleanup : public MachineFunctionPass { |
8804 | static char ID; |
8805 | LDTLSCleanup() : MachineFunctionPass(ID) {} |
8806 | |
8807 | bool runOnMachineFunction(MachineFunction &MF) override { |
8808 | if (skipFunction(MF.getFunction())) |
8809 | return false; |
8810 | |
8811 | X86MachineFunctionInfo *MFI = MF.getInfo<X86MachineFunctionInfo>(); |
8812 | if (MFI->getNumLocalDynamicTLSAccesses() < 2) { |
8813 | |
8814 | return false; |
8815 | } |
8816 | |
8817 | MachineDominatorTree *DT = &getAnalysis<MachineDominatorTree>(); |
8818 | return VisitNode(DT->getRootNode(), 0); |
8819 | } |
8820 | |
8821 | |
8822 | |
8823 | |
8824 | |
8825 | |
8826 | bool VisitNode(MachineDomTreeNode *Node, unsigned TLSBaseAddrReg) { |
8827 | MachineBasicBlock *BB = Node->getBlock(); |
8828 | bool Changed = false; |
8829 | |
8830 | |
8831 | for (MachineBasicBlock::iterator I = BB->begin(), E = BB->end(); I != E; |
8832 | ++I) { |
8833 | switch (I->getOpcode()) { |
8834 | case X86::TLS_base_addr32: |
8835 | case X86::TLS_base_addr64: |
8836 | if (TLSBaseAddrReg) |
8837 | I = ReplaceTLSBaseAddrCall(*I, TLSBaseAddrReg); |
8838 | else |
8839 | I = SetRegister(*I, &TLSBaseAddrReg); |
8840 | Changed = true; |
8841 | break; |
8842 | default: |
8843 | break; |
8844 | } |
8845 | } |
8846 | |
8847 | |
8848 | for (auto I = Node->begin(), E = Node->end(); I != E; ++I) { |
8849 | Changed |= VisitNode(*I, TLSBaseAddrReg); |
8850 | } |
8851 | |
8852 | return Changed; |
8853 | } |
8854 | |
8855 | |
8856 | |
8857 | MachineInstr *ReplaceTLSBaseAddrCall(MachineInstr &I, |
8858 | unsigned TLSBaseAddrReg) { |
8859 | MachineFunction *MF = I.getParent()->getParent(); |
8860 | const X86Subtarget &STI = MF->getSubtarget<X86Subtarget>(); |
8861 | const bool is64Bit = STI.is64Bit(); |
8862 | const X86InstrInfo *TII = STI.getInstrInfo(); |
8863 | |
8864 | |
8865 | MachineInstr *Copy = |
8866 | BuildMI(*I.getParent(), I, I.getDebugLoc(), |
8867 | TII->get(TargetOpcode::COPY), is64Bit ? X86::RAX : X86::EAX) |
8868 | .addReg(TLSBaseAddrReg); |
8869 | |
8870 | |
8871 | I.eraseFromParent(); |
8872 | |
8873 | return Copy; |
8874 | } |
8875 | |
8876 | |
8877 | |
8878 | MachineInstr *SetRegister(MachineInstr &I, unsigned *TLSBaseAddrReg) { |
8879 | MachineFunction *MF = I.getParent()->getParent(); |
8880 | const X86Subtarget &STI = MF->getSubtarget<X86Subtarget>(); |
8881 | const bool is64Bit = STI.is64Bit(); |
8882 | const X86InstrInfo *TII = STI.getInstrInfo(); |
8883 | |
8884 | |
8885 | MachineRegisterInfo &RegInfo = MF->getRegInfo(); |
8886 | *TLSBaseAddrReg = RegInfo.createVirtualRegister(is64Bit |
8887 | ? &X86::GR64RegClass |
8888 | : &X86::GR32RegClass); |
8889 | |
8890 | |
8891 | MachineInstr *Next = I.getNextNode(); |
8892 | MachineInstr *Copy = |
8893 | BuildMI(*I.getParent(), Next, I.getDebugLoc(), |
8894 | TII->get(TargetOpcode::COPY), *TLSBaseAddrReg) |
8895 | .addReg(is64Bit ? X86::RAX : X86::EAX); |
8896 | |
8897 | return Copy; |
8898 | } |
8899 | |
8900 | StringRef getPassName() const override { |
8901 | return "Local Dynamic TLS Access Clean-up"; |
8902 | } |
8903 | |
8904 | void getAnalysisUsage(AnalysisUsage &AU) const override { |
8905 | AU.setPreservesCFG(); |
8906 | AU.addRequired<MachineDominatorTree>(); |
8907 | MachineFunctionPass::getAnalysisUsage(AU); |
8908 | } |
8909 | }; |
8910 | } |
8911 | |
8912 | char LDTLSCleanup::ID = 0; |
8913 | FunctionPass* |
8914 | llvm::createCleanupLocalDynamicTLSPass() { return new LDTLSCleanup(); } |
8915 | |
8916 | |
8917 | |
8918 | |
8919 | |
8920 | |
8921 | |
8922 | |
8923 | |
8924 | |
8925 | |
8926 | |
8927 | |
8928 | |
8929 | |
8930 | |
8931 | |
8932 | |
8933 | |
8934 | |
8935 | |
8936 | |
8937 | |
8938 | |
8939 | |
8940 | |
8941 | |
8942 | |
8943 | |
8944 | enum MachineOutlinerClass { |
8945 | MachineOutlinerDefault, |
8946 | MachineOutlinerTailCall |
8947 | }; |
8948 | |
8949 | outliner::OutlinedFunction X86InstrInfo::getOutliningCandidateInfo( |
8950 | std::vector<outliner::Candidate> &RepeatedSequenceLocs) const { |
8951 | unsigned SequenceSize = |
8952 | std::accumulate(RepeatedSequenceLocs[0].front(), |
8953 | std::next(RepeatedSequenceLocs[0].back()), 0, |
8954 | [](unsigned Sum, const MachineInstr &MI) { |
8955 | |
8956 | |
8957 | |
8958 | if (MI.isDebugInstr() || MI.isKill()) |
8959 | return Sum; |
8960 | return Sum + 1; |
8961 | }); |
8962 | |
8963 | |
8964 | |
8965 | unsigned CFICount = 0; |
8966 | MachineBasicBlock::iterator MBBI = RepeatedSequenceLocs[0].front(); |
8967 | for (unsigned Loc = RepeatedSequenceLocs[0].getStartIdx(); |
8968 | Loc < RepeatedSequenceLocs[0].getEndIdx() + 1; Loc++) { |
8969 | const std::vector<MCCFIInstruction> &CFIInstructions = |
8970 | RepeatedSequenceLocs[0].getMF()->getFrameInstructions(); |
8971 | if (MBBI->isCFIInstruction()) { |
8972 | unsigned CFIIndex = MBBI->getOperand(0).getCFIIndex(); |
8973 | MCCFIInstruction CFI = CFIInstructions[CFIIndex]; |
8974 | CFICount++; |
8975 | } |
8976 | MBBI++; |
8977 | } |
8978 | |
8979 | |
8980 | |
8981 | |
8982 | |
8983 | |
8984 | for (outliner::Candidate &C : RepeatedSequenceLocs) { |
8985 | std::vector<MCCFIInstruction> CFIInstructions = |
8986 | C.getMF()->getFrameInstructions(); |
8987 | |
8988 | if (CFICount > 0 && CFICount != CFIInstructions.size()) |
8989 | return outliner::OutlinedFunction(); |
8990 | } |
8991 | |
8992 | |
8993 | if (RepeatedSequenceLocs[0].back()->isTerminator()) { |
8994 | for (outliner::Candidate &C : RepeatedSequenceLocs) |
8995 | C.setCallInfo(MachineOutlinerTailCall, 1); |
8996 | |
8997 | return outliner::OutlinedFunction(RepeatedSequenceLocs, SequenceSize, |
8998 | 0, |
8999 | MachineOutlinerTailCall |
9000 | ); |
9001 | } |
9002 | |
9003 | if (CFICount > 0) |
9004 | return outliner::OutlinedFunction(); |
9005 | |
9006 | for (outliner::Candidate &C : RepeatedSequenceLocs) |
9007 | C.setCallInfo(MachineOutlinerDefault, 1); |
9008 | |
9009 | return outliner::OutlinedFunction(RepeatedSequenceLocs, SequenceSize, 1, |
9010 | MachineOutlinerDefault); |
9011 | } |
9012 | |
9013 | bool X86InstrInfo::isFunctionSafeToOutlineFrom(MachineFunction &MF, |
9014 | bool OutlineFromLinkOnceODRs) const { |
9015 | const Function &F = MF.getFunction(); |
9016 | |
9017 | |
9018 | |
9019 | if (Subtarget.getFrameLowering()->has128ByteRedZone(MF)) { |
9020 | |
9021 | const X86MachineFunctionInfo *X86FI = MF.getInfo<X86MachineFunctionInfo>(); |
9022 | if (!X86FI || X86FI->getUsesRedZone()) |
9023 | return false; |
9024 | } |
9025 | |
9026 | |
9027 | |
9028 | if (!OutlineFromLinkOnceODRs && F.hasLinkOnceODRLinkage()) |
9029 | return false; |
9030 | |
9031 | |
9032 | return true; |
9033 | } |
9034 | |
9035 | outliner::InstrType |
9036 | X86InstrInfo::getOutliningType(MachineBasicBlock::iterator &MIT, unsigned Flags) const { |
9037 | MachineInstr &MI = *MIT; |
9038 | |
9039 | if (MI.isDebugInstr() || MI.isIndirectDebugValue()) |
9040 | return outliner::InstrType::Invisible; |
9041 | |
9042 | |
9043 | |
9044 | if (MI.isKill()) |
9045 | return outliner::InstrType::Invisible; |
9046 | |
9047 | |
9048 | if (isTailCall(MI)) |
9049 | return outliner::InstrType::Legal; |
9050 | |
9051 | |
9052 | if (MI.isTerminator() || MI.isReturn()) { |
9053 | |
9054 | |
9055 | if (MI.getParent()->succ_empty()) |
9056 | return outliner::InstrType::Legal; |
9057 | |
9058 | |
9059 | return outliner::InstrType::Illegal; |
9060 | } |
9061 | |
9062 | |
9063 | |
9064 | |
9065 | |
9066 | |
9067 | |
9068 | |
9069 | |
9070 | |
9071 | if (MI.modifiesRegister(X86::RSP, &RI) || MI.readsRegister(X86::RSP, &RI) || |
9072 | MI.getDesc().hasImplicitUseOfPhysReg(X86::RSP) || |
9073 | MI.getDesc().hasImplicitDefOfPhysReg(X86::RSP)) |
9074 | return outliner::InstrType::Illegal; |
9075 | |
9076 | |
9077 | if (MI.readsRegister(X86::RIP, &RI) || |
9078 | MI.getDesc().hasImplicitUseOfPhysReg(X86::RIP) || |
9079 | MI.getDesc().hasImplicitDefOfPhysReg(X86::RIP)) |
9080 | return outliner::InstrType::Illegal; |
9081 | |
9082 | |
9083 | if (MI.isPosition()) |
9084 | return outliner::InstrType::Illegal; |
9085 | |
9086 | |
9087 | for (const MachineOperand &MOP : MI.operands()) |
9088 | if (MOP.isCPI() || MOP.isJTI() || MOP.isCFIIndex() || MOP.isFI() || |
9089 | MOP.isTargetIndex()) |
9090 | return outliner::InstrType::Illegal; |
9091 | |
9092 | return outliner::InstrType::Legal; |
9093 | } |
9094 | |
9095 | void X86InstrInfo::buildOutlinedFrame(MachineBasicBlock &MBB, |
9096 | MachineFunction &MF, |
9097 | const outliner::OutlinedFunction &OF) |
9098 | const { |
9099 | |
9100 | if (OF.FrameConstructionID == MachineOutlinerTailCall) |
9101 | return; |
9102 | |
9103 | |
9104 | |
9105 | MachineInstr *retq = BuildMI(MF, DebugLoc(), get(X86::RETQ)); |
9106 | MBB.insert(MBB.end(), retq); |
9107 | } |
9108 | |
9109 | MachineBasicBlock::iterator |
9110 | X86InstrInfo::insertOutlinedCall(Module &M, MachineBasicBlock &MBB, |
9111 | MachineBasicBlock::iterator &It, |
9112 | MachineFunction &MF, |
9113 | const outliner::Candidate &C) const { |
9114 | |
9115 | if (C.CallConstructionID == MachineOutlinerTailCall) { |
9116 | |
9117 | It = MBB.insert(It, |
9118 | BuildMI(MF, DebugLoc(), get(X86::TAILJMPd64)) |
9119 | .addGlobalAddress(M.getNamedValue(MF.getName()))); |
9120 | } else { |
9121 | |
9122 | It = MBB.insert(It, |
9123 | BuildMI(MF, DebugLoc(), get(X86::CALL64pcrel32)) |
9124 | .addGlobalAddress(M.getNamedValue(MF.getName()))); |
9125 | } |
9126 | |
9127 | return It; |
9128 | } |
9129 | |
9130 | #define GET_INSTRINFO_HELPERS |
9131 | #include "X86GenInstrInfo.inc" |