Bug Summary

File:src/gnu/usr.bin/clang/libLLVM/../../../llvm/llvm/lib/CodeGen/GlobalISel/IRTranslator.cpp
Warning:line 340, column 26
Forming reference to null pointer

Annotated Source Code

Press '?' to see keyboard shortcuts

clang -cc1 -cc1 -triple amd64-unknown-openbsd7.0 -analyze -disable-free -disable-llvm-verifier -discard-value-names -main-file-name IRTranslator.cpp -analyzer-store=region -analyzer-opt-analyze-nested-blocks -analyzer-checker=core -analyzer-checker=apiModeling -analyzer-checker=unix -analyzer-checker=deadcode -analyzer-checker=cplusplus -analyzer-checker=security.insecureAPI.UncheckedReturn -analyzer-checker=security.insecureAPI.getpw -analyzer-checker=security.insecureAPI.gets -analyzer-checker=security.insecureAPI.mktemp -analyzer-checker=security.insecureAPI.mkstemp -analyzer-checker=security.insecureAPI.vfork -analyzer-checker=nullability.NullPassedToNonnull -analyzer-checker=nullability.NullReturnedFromNonnull -analyzer-output plist -w -setup-static-analyzer -mrelocation-model static -mframe-pointer=all -relaxed-aliasing -fno-rounding-math -mconstructor-aliases -munwind-tables -target-cpu x86-64 -tune-cpu generic -debugger-tuning=gdb -fcoverage-compilation-dir=/usr/src/gnu/usr.bin/clang/libLLVM/obj -resource-dir /usr/local/lib/clang/13.0.0 -I /usr/src/gnu/usr.bin/clang/libLLVM/../../../llvm/llvm/include/llvm/Transforms -I /usr/src/gnu/usr.bin/clang/libLLVM/obj/../include/llvm/AMDGPU -I /usr/src/gnu/usr.bin/clang/libLLVM/../../../llvm/llvm/lib/Target/AMDGPU -I /usr/src/gnu/usr.bin/clang/libLLVM/obj/../include/llvm/AMDGPU -I /usr/src/gnu/usr.bin/clang/libLLVM/../../../llvm/llvm/lib/Target/AMDGPU -I /usr/src/gnu/usr.bin/clang/libLLVM/obj/../include/llvm/AMDGPU -I /usr/src/gnu/usr.bin/clang/libLLVM/../../../llvm/llvm/lib/Target/AMDGPU -I /usr/src/gnu/usr.bin/clang/libLLVM/obj/../include/llvm/AMDGPU -I /usr/src/gnu/usr.bin/clang/libLLVM/../../../llvm/llvm/lib/Target/AMDGPU -I /usr/src/gnu/usr.bin/clang/libLLVM/obj/../include/llvm/AMDGPU -I /usr/src/gnu/usr.bin/clang/libLLVM/../../../llvm/llvm/lib/Target/AMDGPU -I /usr/src/gnu/usr.bin/clang/libLLVM/obj/../include/llvm/AMDGPU -I /usr/src/gnu/usr.bin/clang/libLLVM/../../../llvm/llvm/lib/Target/AMDGPU -I /usr/src/gnu/usr.bin/clang/libLLVM/../../../llvm/llvm/include/llvm/Analysis -I /usr/src/gnu/usr.bin/clang/libLLVM/../../../llvm/llvm/include/llvm/ASMParser -I /usr/src/gnu/usr.bin/clang/libLLVM/../../../llvm/llvm/include/llvm/BinaryFormat -I /usr/src/gnu/usr.bin/clang/libLLVM/../../../llvm/llvm/include/llvm/Bitcode -I /usr/src/gnu/usr.bin/clang/libLLVM/../../../llvm/llvm/include/llvm/Bitcode -I /usr/src/gnu/usr.bin/clang/libLLVM/../../../llvm/llvm/include/llvm/Bitstream -I /usr/src/gnu/usr.bin/clang/libLLVM/../../../llvm/llvm/include/llvm/Transforms -I /include/llvm/CodeGen -I /include/llvm/CodeGen/PBQP -I /usr/src/gnu/usr.bin/clang/libLLVM/obj/../include/llvm/IR -I /usr/src/gnu/usr.bin/clang/libLLVM/../../../llvm/llvm/include/llvm/IR -I /usr/src/gnu/usr.bin/clang/libLLVM/../../../llvm/llvm/include/llvm/Transforms -I /usr/src/gnu/usr.bin/clang/libLLVM/../../../llvm/llvm/include/llvm/Transforms/Coroutines -I /usr/src/gnu/usr.bin/clang/libLLVM/../../../llvm/llvm/include/llvm/ProfileData/Coverage -I /usr/src/gnu/usr.bin/clang/libLLVM/../../../llvm/llvm/include/llvm/DebugInfo/CodeView -I /usr/src/gnu/usr.bin/clang/libLLVM/../../../llvm/llvm/include/llvm/DebugInfo/DWARF -I /usr/src/gnu/usr.bin/clang/libLLVM/../../../llvm/llvm/include/llvm/DebugInfo -I /usr/src/gnu/usr.bin/clang/libLLVM/../../../llvm/llvm/include/llvm/DebugInfo/MSF -I /usr/src/gnu/usr.bin/clang/libLLVM/../../../llvm/llvm/include/llvm/DebugInfo/PDB -I /usr/src/gnu/usr.bin/clang/libLLVM/../../../llvm/llvm/include/llvm/Demangle -I /usr/src/gnu/usr.bin/clang/libLLVM/../../../llvm/llvm/include/llvm/ExecutionEngine -I /usr/src/gnu/usr.bin/clang/libLLVM/../../../llvm/llvm/include/llvm/ExecutionEngine/JITLink -I /usr/src/gnu/usr.bin/clang/libLLVM/../../../llvm/llvm/include/llvm/ExecutionEngine/Orc -I /usr/src/gnu/usr.bin/clang/libLLVM/../../../llvm/llvm/include/llvm/Frontend -I /usr/src/gnu/usr.bin/clang/libLLVM/../../../llvm/llvm/include/llvm/Frontend/OpenACC -I /usr/src/gnu/usr.bin/clang/libLLVM/../../../llvm/llvm/include/llvm/Frontend -I /usr/src/gnu/usr.bin/clang/libLLVM/../../../llvm/llvm/include/llvm/Frontend/OpenMP -I /include/llvm/CodeGen/GlobalISel -I /usr/src/gnu/usr.bin/clang/libLLVM/../../../llvm/llvm/include/llvm/IRReader -I /usr/src/gnu/usr.bin/clang/libLLVM/../../../llvm/llvm/include/llvm/Transforms -I /usr/src/gnu/usr.bin/clang/libLLVM/../../../llvm/llvm/include/llvm/Transforms/InstCombine -I /usr/src/gnu/usr.bin/clang/libLLVM/obj/../include/llvm/Transforms/InstCombine -I /usr/src/gnu/usr.bin/clang/libLLVM/../../../llvm/llvm/include/llvm/Transforms -I /usr/src/gnu/usr.bin/clang/libLLVM/../../../llvm/llvm/include/llvm/LTO -I /usr/src/gnu/usr.bin/clang/libLLVM/../../../llvm/llvm/include/llvm/Linker -I /usr/src/gnu/usr.bin/clang/libLLVM/../../../llvm/llvm/include/llvm/MC -I /usr/src/gnu/usr.bin/clang/libLLVM/../../../llvm/llvm/include/llvm/MC/MCParser -I /include/llvm/CodeGen/MIRParser -I /usr/src/gnu/usr.bin/clang/libLLVM/../../../llvm/llvm/include/llvm/Transforms -I /usr/src/gnu/usr.bin/clang/libLLVM/../../../llvm/llvm/include/llvm/Object -I /usr/src/gnu/usr.bin/clang/libLLVM/../../../llvm/llvm/include/llvm/Option -I /usr/src/gnu/usr.bin/clang/libLLVM/../../../llvm/llvm/include/llvm/Passes -I /usr/src/gnu/usr.bin/clang/libLLVM/../../../llvm/llvm/include/llvm/ -I /usr/src/gnu/usr.bin/clang/libLLVM/../../../llvm/llvm/include/llvm/ProfileData -I /usr/src/gnu/usr.bin/clang/libLLVM/../../../llvm/llvm/include/llvm/Transforms -I /usr/src/gnu/usr.bin/clang/libLLVM/../../../llvm/llvm/include/llvm/Transforms/Scalar -I /usr/src/gnu/usr.bin/clang/libLLVM/../../../llvm/llvm/include/llvm/ADT -I /usr/src/gnu/usr.bin/clang/libLLVM/../../../llvm/llvm/include/llvm/Support -I /usr/src/gnu/usr.bin/clang/libLLVM/../../../llvm/llvm/include/llvm/DebugInfo/Symbolize -I /usr/src/gnu/usr.bin/clang/libLLVM/../../../llvm/llvm/include/llvm/Target -I /usr/src/gnu/usr.bin/clang/libLLVM/../../../llvm/llvm/include/llvm/Transforms -I /usr/src/gnu/usr.bin/clang/libLLVM/../../../llvm/llvm/include/llvm/Transforms/Utils -I /usr/src/gnu/usr.bin/clang/libLLVM/../../../llvm/llvm/include/llvm/Transforms -I /usr/src/gnu/usr.bin/clang/libLLVM/../../../llvm/llvm/include/llvm/Transforms/Vectorize -I /usr/src/gnu/usr.bin/clang/libLLVM/obj/../include/llvm/X86 -I /usr/src/gnu/usr.bin/clang/libLLVM/../../../llvm/llvm/lib/Target/X86 -I /usr/src/gnu/usr.bin/clang/libLLVM/obj/../include/llvm/X86 -I /usr/src/gnu/usr.bin/clang/libLLVM/../../../llvm/llvm/lib/Target/X86 -I /usr/src/gnu/usr.bin/clang/libLLVM/obj/../include/llvm/X86 -I /usr/src/gnu/usr.bin/clang/libLLVM/../../../llvm/llvm/lib/Target/X86 -I /usr/src/gnu/usr.bin/clang/libLLVM/obj/../include/llvm/X86 -I /usr/src/gnu/usr.bin/clang/libLLVM/../../../llvm/llvm/lib/Target/X86 -I /usr/src/gnu/usr.bin/clang/libLLVM/obj/../include/llvm/X86 -I /usr/src/gnu/usr.bin/clang/libLLVM/../../../llvm/llvm/lib/Target/X86 -I /usr/src/gnu/usr.bin/clang/libLLVM/../../../llvm/llvm/include/llvm/Transforms -I /usr/src/gnu/usr.bin/clang/libLLVM/../../../llvm/llvm/include/llvm/Transforms/IPO -I /usr/src/gnu/usr.bin/clang/libLLVM/../../../llvm/llvm/include -I /usr/src/gnu/usr.bin/clang/libLLVM/../include -I /usr/src/gnu/usr.bin/clang/libLLVM/obj -I /usr/src/gnu/usr.bin/clang/libLLVM/obj/../include -D NDEBUG -D __STDC_LIMIT_MACROS -D __STDC_CONSTANT_MACROS -D __STDC_FORMAT_MACROS -D LLVM_PREFIX="/usr" -internal-isystem /usr/include/c++/v1 -internal-isystem /usr/local/lib/clang/13.0.0/include -internal-externc-isystem /usr/include -O2 -Wno-unused-parameter -Wwrite-strings -Wno-missing-field-initializers -Wno-long-long -Wno-comment -std=c++14 -fdeprecated-macro -fdebug-compilation-dir=/usr/src/gnu/usr.bin/clang/libLLVM/obj -ferror-limit 19 -fvisibility-inlines-hidden -fwrapv -stack-protector 2 -fno-rtti -fgnuc-version=4.2.1 -vectorize-loops -vectorize-slp -fno-builtin-malloc -fno-builtin-calloc -fno-builtin-realloc -fno-builtin-valloc -fno-builtin-free -fno-builtin-strdup -fno-builtin-strndup -analyzer-output=html -faddrsig -D__GCC_HAVE_DWARF2_CFI_ASM=1 -o /home/ben/Projects/vmm/scan-build/2022-01-12-194120-40624-1 -x c++ /usr/src/gnu/usr.bin/clang/libLLVM/../../../llvm/llvm/lib/CodeGen/GlobalISel/IRTranslator.cpp

/usr/src/gnu/usr.bin/clang/libLLVM/../../../llvm/llvm/lib/CodeGen/GlobalISel/IRTranslator.cpp

1//===- llvm/CodeGen/GlobalISel/IRTranslator.cpp - IRTranslator ---*- C++ -*-==//
2//
3// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
4// See https://llvm.org/LICENSE.txt for license information.
5// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
6//
7//===----------------------------------------------------------------------===//
8/// \file
9/// This file implements the IRTranslator class.
10//===----------------------------------------------------------------------===//
11
12#include "llvm/CodeGen/GlobalISel/IRTranslator.h"
13#include "llvm/ADT/PostOrderIterator.h"
14#include "llvm/ADT/STLExtras.h"
15#include "llvm/ADT/ScopeExit.h"
16#include "llvm/ADT/SmallSet.h"
17#include "llvm/ADT/SmallVector.h"
18#include "llvm/Analysis/BranchProbabilityInfo.h"
19#include "llvm/Analysis/Loads.h"
20#include "llvm/Analysis/OptimizationRemarkEmitter.h"
21#include "llvm/Analysis/ValueTracking.h"
22#include "llvm/CodeGen/Analysis.h"
23#include "llvm/CodeGen/GlobalISel/CallLowering.h"
24#include "llvm/CodeGen/GlobalISel/GISelChangeObserver.h"
25#include "llvm/CodeGen/GlobalISel/InlineAsmLowering.h"
26#include "llvm/CodeGen/LowLevelType.h"
27#include "llvm/CodeGen/MachineBasicBlock.h"
28#include "llvm/CodeGen/MachineFrameInfo.h"
29#include "llvm/CodeGen/MachineFunction.h"
30#include "llvm/CodeGen/MachineInstrBuilder.h"
31#include "llvm/CodeGen/MachineMemOperand.h"
32#include "llvm/CodeGen/MachineModuleInfo.h"
33#include "llvm/CodeGen/MachineOperand.h"
34#include "llvm/CodeGen/MachineRegisterInfo.h"
35#include "llvm/CodeGen/StackProtector.h"
36#include "llvm/CodeGen/SwitchLoweringUtils.h"
37#include "llvm/CodeGen/TargetFrameLowering.h"
38#include "llvm/CodeGen/TargetInstrInfo.h"
39#include "llvm/CodeGen/TargetLowering.h"
40#include "llvm/CodeGen/TargetPassConfig.h"
41#include "llvm/CodeGen/TargetRegisterInfo.h"
42#include "llvm/CodeGen/TargetSubtargetInfo.h"
43#include "llvm/IR/BasicBlock.h"
44#include "llvm/IR/CFG.h"
45#include "llvm/IR/Constant.h"
46#include "llvm/IR/Constants.h"
47#include "llvm/IR/DataLayout.h"
48#include "llvm/IR/DebugInfo.h"
49#include "llvm/IR/DerivedTypes.h"
50#include "llvm/IR/Function.h"
51#include "llvm/IR/GetElementPtrTypeIterator.h"
52#include "llvm/IR/InlineAsm.h"
53#include "llvm/IR/InstrTypes.h"
54#include "llvm/IR/Instructions.h"
55#include "llvm/IR/IntrinsicInst.h"
56#include "llvm/IR/Intrinsics.h"
57#include "llvm/IR/LLVMContext.h"
58#include "llvm/IR/Metadata.h"
59#include "llvm/IR/PatternMatch.h"
60#include "llvm/IR/Type.h"
61#include "llvm/IR/User.h"
62#include "llvm/IR/Value.h"
63#include "llvm/InitializePasses.h"
64#include "llvm/MC/MCContext.h"
65#include "llvm/Pass.h"
66#include "llvm/Support/Casting.h"
67#include "llvm/Support/CodeGen.h"
68#include "llvm/Support/Debug.h"
69#include "llvm/Support/ErrorHandling.h"
70#include "llvm/Support/LowLevelTypeImpl.h"
71#include "llvm/Support/MathExtras.h"
72#include "llvm/Support/raw_ostream.h"
73#include "llvm/Target/TargetIntrinsicInfo.h"
74#include "llvm/Target/TargetMachine.h"
75#include "llvm/Transforms/Utils/MemoryOpRemark.h"
76#include <algorithm>
77#include <cassert>
78#include <cstddef>
79#include <cstdint>
80#include <iterator>
81#include <string>
82#include <utility>
83#include <vector>
84
85#define DEBUG_TYPE"irtranslator" "irtranslator"
86
87using namespace llvm;
88
89static cl::opt<bool>
90 EnableCSEInIRTranslator("enable-cse-in-irtranslator",
91 cl::desc("Should enable CSE in irtranslator"),
92 cl::Optional, cl::init(false));
93char IRTranslator::ID = 0;
94
95INITIALIZE_PASS_BEGIN(IRTranslator, DEBUG_TYPE, "IRTranslator LLVM IR -> MI",static void *initializeIRTranslatorPassOnce(PassRegistry &
Registry) {
96 false, false)static void *initializeIRTranslatorPassOnce(PassRegistry &
Registry) {
97INITIALIZE_PASS_DEPENDENCY(TargetPassConfig)initializeTargetPassConfigPass(Registry);
98INITIALIZE_PASS_DEPENDENCY(GISelCSEAnalysisWrapperPass)initializeGISelCSEAnalysisWrapperPassPass(Registry);
99INITIALIZE_PASS_DEPENDENCY(BlockFrequencyInfoWrapperPass)initializeBlockFrequencyInfoWrapperPassPass(Registry);
100INITIALIZE_PASS_DEPENDENCY(StackProtector)initializeStackProtectorPass(Registry);
101INITIALIZE_PASS_DEPENDENCY(TargetLibraryInfoWrapperPass)initializeTargetLibraryInfoWrapperPassPass(Registry);
102INITIALIZE_PASS_END(IRTranslator, DEBUG_TYPE, "IRTranslator LLVM IR -> MI",PassInfo *PI = new PassInfo( "IRTranslator LLVM IR -> MI",
"irtranslator", &IRTranslator::ID, PassInfo::NormalCtor_t
(callDefaultCtor<IRTranslator>), false, false); Registry
.registerPass(*PI, true); return PI; } static llvm::once_flag
InitializeIRTranslatorPassFlag; void llvm::initializeIRTranslatorPass
(PassRegistry &Registry) { llvm::call_once(InitializeIRTranslatorPassFlag
, initializeIRTranslatorPassOnce, std::ref(Registry)); }
103 false, false)PassInfo *PI = new PassInfo( "IRTranslator LLVM IR -> MI",
"irtranslator", &IRTranslator::ID, PassInfo::NormalCtor_t
(callDefaultCtor<IRTranslator>), false, false); Registry
.registerPass(*PI, true); return PI; } static llvm::once_flag
InitializeIRTranslatorPassFlag; void llvm::initializeIRTranslatorPass
(PassRegistry &Registry) { llvm::call_once(InitializeIRTranslatorPassFlag
, initializeIRTranslatorPassOnce, std::ref(Registry)); }
104
105static void reportTranslationError(MachineFunction &MF,
106 const TargetPassConfig &TPC,
107 OptimizationRemarkEmitter &ORE,
108 OptimizationRemarkMissed &R) {
109 MF.getProperties().set(MachineFunctionProperties::Property::FailedISel);
110
111 // Print the function name explicitly if we don't have a debug location (which
112 // makes the diagnostic less useful) or if we're going to emit a raw error.
113 if (!R.getLocation().isValid() || TPC.isGlobalISelAbortEnabled())
114 R << (" (in function: " + MF.getName() + ")").str();
115
116 if (TPC.isGlobalISelAbortEnabled())
117 report_fatal_error(R.getMsg());
118 else
119 ORE.emit(R);
120}
121
122IRTranslator::IRTranslator(CodeGenOpt::Level optlevel)
123 : MachineFunctionPass(ID), OptLevel(optlevel) {}
124
125#ifndef NDEBUG1
126namespace {
127/// Verify that every instruction created has the same DILocation as the
128/// instruction being translated.
129class DILocationVerifier : public GISelChangeObserver {
130 const Instruction *CurrInst = nullptr;
131
132public:
133 DILocationVerifier() = default;
134 ~DILocationVerifier() = default;
135
136 const Instruction *getCurrentInst() const { return CurrInst; }
137 void setCurrentInst(const Instruction *Inst) { CurrInst = Inst; }
138
139 void erasingInstr(MachineInstr &MI) override {}
140 void changingInstr(MachineInstr &MI) override {}
141 void changedInstr(MachineInstr &MI) override {}
142
143 void createdInstr(MachineInstr &MI) override {
144 assert(getCurrentInst() && "Inserted instruction without a current MI")((void)0);
145
146 // Only print the check message if we're actually checking it.
147#ifndef NDEBUG1
148 LLVM_DEBUG(dbgs() << "Checking DILocation from " << *CurrInstdo { } while (false)
149 << " was copied to " << MI)do { } while (false);
150#endif
151 // We allow insts in the entry block to have a debug loc line of 0 because
152 // they could have originated from constants, and we don't want a jumpy
153 // debug experience.
154 assert((CurrInst->getDebugLoc() == MI.getDebugLoc() ||((void)0)
155 MI.getDebugLoc().getLine() == 0) &&((void)0)
156 "Line info was not transferred to all instructions")((void)0);
157 }
158};
159} // namespace
160#endif // ifndef NDEBUG
161
162
163void IRTranslator::getAnalysisUsage(AnalysisUsage &AU) const {
164 AU.addRequired<StackProtector>();
165 AU.addRequired<TargetPassConfig>();
166 AU.addRequired<GISelCSEAnalysisWrapperPass>();
167 if (OptLevel != CodeGenOpt::None)
168 AU.addRequired<BranchProbabilityInfoWrapperPass>();
169 AU.addRequired<TargetLibraryInfoWrapperPass>();
170 AU.addPreserved<TargetLibraryInfoWrapperPass>();
171 getSelectionDAGFallbackAnalysisUsage(AU);
172 MachineFunctionPass::getAnalysisUsage(AU);
173}
174
175IRTranslator::ValueToVRegInfo::VRegListT &
176IRTranslator::allocateVRegs(const Value &Val) {
177 auto VRegsIt = VMap.findVRegs(Val);
178 if (VRegsIt != VMap.vregs_end())
179 return *VRegsIt->second;
180 auto *Regs = VMap.getVRegs(Val);
181 auto *Offsets = VMap.getOffsets(Val);
182 SmallVector<LLT, 4> SplitTys;
183 computeValueLLTs(*DL, *Val.getType(), SplitTys,
184 Offsets->empty() ? Offsets : nullptr);
185 for (unsigned i = 0; i < SplitTys.size(); ++i)
186 Regs->push_back(0);
187 return *Regs;
188}
189
190ArrayRef<Register> IRTranslator::getOrCreateVRegs(const Value &Val) {
191 auto VRegsIt = VMap.findVRegs(Val);
192 if (VRegsIt != VMap.vregs_end())
193 return *VRegsIt->second;
194
195 if (Val.getType()->isVoidTy())
196 return *VMap.getVRegs(Val);
197
198 // Create entry for this type.
199 auto *VRegs = VMap.getVRegs(Val);
200 auto *Offsets = VMap.getOffsets(Val);
201
202 assert(Val.getType()->isSized() &&((void)0)
203 "Don't know how to create an empty vreg")((void)0);
204
205 SmallVector<LLT, 4> SplitTys;
206 computeValueLLTs(*DL, *Val.getType(), SplitTys,
207 Offsets->empty() ? Offsets : nullptr);
208
209 if (!isa<Constant>(Val)) {
210 for (auto Ty : SplitTys)
211 VRegs->push_back(MRI->createGenericVirtualRegister(Ty));
212 return *VRegs;
213 }
214
215 if (Val.getType()->isAggregateType()) {
216 // UndefValue, ConstantAggregateZero
217 auto &C = cast<Constant>(Val);
218 unsigned Idx = 0;
219 while (auto Elt = C.getAggregateElement(Idx++)) {
220 auto EltRegs = getOrCreateVRegs(*Elt);
221 llvm::copy(EltRegs, std::back_inserter(*VRegs));
222 }
223 } else {
224 assert(SplitTys.size() == 1 && "unexpectedly split LLT")((void)0);
225 VRegs->push_back(MRI->createGenericVirtualRegister(SplitTys[0]));
226 bool Success = translate(cast<Constant>(Val), VRegs->front());
227 if (!Success) {
228 OptimizationRemarkMissed R("gisel-irtranslator", "GISelFailure",
229 MF->getFunction().getSubprogram(),
230 &MF->getFunction().getEntryBlock());
231 R << "unable to translate constant: " << ore::NV("Type", Val.getType());
232 reportTranslationError(*MF, *TPC, *ORE, R);
233 return *VRegs;
234 }
235 }
236
237 return *VRegs;
238}
239
240int IRTranslator::getOrCreateFrameIndex(const AllocaInst &AI) {
241 auto MapEntry = FrameIndices.find(&AI);
242 if (MapEntry != FrameIndices.end())
243 return MapEntry->second;
244
245 uint64_t ElementSize = DL->getTypeAllocSize(AI.getAllocatedType());
246 uint64_t Size =
247 ElementSize * cast<ConstantInt>(AI.getArraySize())->getZExtValue();
248
249 // Always allocate at least one byte.
250 Size = std::max<uint64_t>(Size, 1u);
251
252 int &FI = FrameIndices[&AI];
253 FI = MF->getFrameInfo().CreateStackObject(Size, AI.getAlign(), false, &AI);
254 return FI;
255}
256
257Align IRTranslator::getMemOpAlign(const Instruction &I) {
258 if (const StoreInst *SI = dyn_cast<StoreInst>(&I))
259 return SI->getAlign();
260 if (const LoadInst *LI = dyn_cast<LoadInst>(&I))
261 return LI->getAlign();
262 if (const AtomicCmpXchgInst *AI = dyn_cast<AtomicCmpXchgInst>(&I))
263 return AI->getAlign();
264 if (const AtomicRMWInst *AI = dyn_cast<AtomicRMWInst>(&I))
265 return AI->getAlign();
266
267 OptimizationRemarkMissed R("gisel-irtranslator", "", &I);
268 R << "unable to translate memop: " << ore::NV("Opcode", &I);
269 reportTranslationError(*MF, *TPC, *ORE, R);
270 return Align(1);
271}
272
273MachineBasicBlock &IRTranslator::getMBB(const BasicBlock &BB) {
274 MachineBasicBlock *&MBB = BBToMBB[&BB];
275 assert(MBB && "BasicBlock was not encountered before")((void)0);
276 return *MBB;
277}
278
279void IRTranslator::addMachineCFGPred(CFGEdge Edge, MachineBasicBlock *NewPred) {
280 assert(NewPred && "new predecessor must be a real MachineBasicBlock")((void)0);
281 MachinePreds[Edge].push_back(NewPred);
282}
283
284bool IRTranslator::translateBinaryOp(unsigned Opcode, const User &U,
285 MachineIRBuilder &MIRBuilder) {
286 // Get or create a virtual register for each value.
287 // Unless the value is a Constant => loadimm cst?
288 // or inline constant each time?
289 // Creation of a virtual register needs to have a size.
290 Register Op0 = getOrCreateVReg(*U.getOperand(0));
291 Register Op1 = getOrCreateVReg(*U.getOperand(1));
292 Register Res = getOrCreateVReg(U);
293 uint16_t Flags = 0;
294 if (isa<Instruction>(U)) {
295 const Instruction &I = cast<Instruction>(U);
296 Flags = MachineInstr::copyFlagsFromInstruction(I);
297 }
298
299 MIRBuilder.buildInstr(Opcode, {Res}, {Op0, Op1}, Flags);
300 return true;
301}
302
303bool IRTranslator::translateUnaryOp(unsigned Opcode, const User &U,
304 MachineIRBuilder &MIRBuilder) {
305 Register Op0 = getOrCreateVReg(*U.getOperand(0));
306 Register Res = getOrCreateVReg(U);
307 uint16_t Flags = 0;
308 if (isa<Instruction>(U)) {
309 const Instruction &I = cast<Instruction>(U);
310 Flags = MachineInstr::copyFlagsFromInstruction(I);
311 }
312 MIRBuilder.buildInstr(Opcode, {Res}, {Op0}, Flags);
313 return true;
314}
315
316bool IRTranslator::translateFNeg(const User &U, MachineIRBuilder &MIRBuilder) {
317 return translateUnaryOp(TargetOpcode::G_FNEG, U, MIRBuilder);
318}
319
320bool IRTranslator::translateCompare(const User &U,
321 MachineIRBuilder &MIRBuilder) {
322 auto *CI = dyn_cast<CmpInst>(&U);
23
Assuming the object is not a 'CmpInst'
24
'CI' initialized to a null pointer value
323 Register Op0 = getOrCreateVReg(*U.getOperand(0));
324 Register Op1 = getOrCreateVReg(*U.getOperand(1));
325 Register Res = getOrCreateVReg(U);
326 CmpInst::Predicate Pred =
327 CI
24.1
'CI' is null
24.1
'CI' is null
24.1
'CI' is null
24.1
'CI' is null
? CI->getPredicate() : static_cast<CmpInst::Predicate>(
25
'?' condition is false
328 cast<ConstantExpr>(U).getPredicate());
26
'U' is a 'ConstantExpr'
329 if (CmpInst::isIntPredicate(Pred))
27
Calling 'CmpInst::isIntPredicate'
30
Returning from 'CmpInst::isIntPredicate'
31
Taking false branch
330 MIRBuilder.buildICmp(Pred, Res, Op0, Op1);
331 else if (Pred == CmpInst::FCMP_FALSE)
32
Assuming 'Pred' is not equal to FCMP_FALSE
33
Taking false branch
332 MIRBuilder.buildCopy(
333 Res, getOrCreateVReg(*Constant::getNullValue(U.getType())));
334 else if (Pred == CmpInst::FCMP_TRUE)
34
Assuming 'Pred' is not equal to FCMP_TRUE
35
Taking false branch
335 MIRBuilder.buildCopy(
336 Res, getOrCreateVReg(*Constant::getAllOnesValue(U.getType())));
337 else {
338 assert(CI && "Instruction should be CmpInst")((void)0);
339 MIRBuilder.buildFCmp(Pred, Res, Op0, Op1,
340 MachineInstr::copyFlagsFromInstruction(*CI));
36
Forming reference to null pointer
341 }
342
343 return true;
344}
345
346bool IRTranslator::translateRet(const User &U, MachineIRBuilder &MIRBuilder) {
347 const ReturnInst &RI = cast<ReturnInst>(U);
348 const Value *Ret = RI.getReturnValue();
349 if (Ret && DL->getTypeStoreSize(Ret->getType()) == 0)
350 Ret = nullptr;
351
352 ArrayRef<Register> VRegs;
353 if (Ret)
354 VRegs = getOrCreateVRegs(*Ret);
355
356 Register SwiftErrorVReg = 0;
357 if (CLI->supportSwiftError() && SwiftError.getFunctionArg()) {
358 SwiftErrorVReg = SwiftError.getOrCreateVRegUseAt(
359 &RI, &MIRBuilder.getMBB(), SwiftError.getFunctionArg());
360 }
361
362 // The target may mess up with the insertion point, but
363 // this is not important as a return is the last instruction
364 // of the block anyway.
365 return CLI->lowerReturn(MIRBuilder, Ret, VRegs, FuncInfo, SwiftErrorVReg);
366}
367
368void IRTranslator::emitBranchForMergedCondition(
369 const Value *Cond, MachineBasicBlock *TBB, MachineBasicBlock *FBB,
370 MachineBasicBlock *CurBB, MachineBasicBlock *SwitchBB,
371 BranchProbability TProb, BranchProbability FProb, bool InvertCond) {
372 // If the leaf of the tree is a comparison, merge the condition into
373 // the caseblock.
374 if (const CmpInst *BOp = dyn_cast<CmpInst>(Cond)) {
375 CmpInst::Predicate Condition;
376 if (const ICmpInst *IC = dyn_cast<ICmpInst>(Cond)) {
377 Condition = InvertCond ? IC->getInversePredicate() : IC->getPredicate();
378 } else {
379 const FCmpInst *FC = cast<FCmpInst>(Cond);
380 Condition = InvertCond ? FC->getInversePredicate() : FC->getPredicate();
381 }
382
383 SwitchCG::CaseBlock CB(Condition, false, BOp->getOperand(0),
384 BOp->getOperand(1), nullptr, TBB, FBB, CurBB,
385 CurBuilder->getDebugLoc(), TProb, FProb);
386 SL->SwitchCases.push_back(CB);
387 return;
388 }
389
390 // Create a CaseBlock record representing this branch.
391 CmpInst::Predicate Pred = InvertCond ? CmpInst::ICMP_NE : CmpInst::ICMP_EQ;
392 SwitchCG::CaseBlock CB(
393 Pred, false, Cond, ConstantInt::getTrue(MF->getFunction().getContext()),
394 nullptr, TBB, FBB, CurBB, CurBuilder->getDebugLoc(), TProb, FProb);
395 SL->SwitchCases.push_back(CB);
396}
397
398static bool isValInBlock(const Value *V, const BasicBlock *BB) {
399 if (const Instruction *I = dyn_cast<Instruction>(V))
400 return I->getParent() == BB;
401 return true;
402}
403
404void IRTranslator::findMergedConditions(
405 const Value *Cond, MachineBasicBlock *TBB, MachineBasicBlock *FBB,
406 MachineBasicBlock *CurBB, MachineBasicBlock *SwitchBB,
407 Instruction::BinaryOps Opc, BranchProbability TProb,
408 BranchProbability FProb, bool InvertCond) {
409 using namespace PatternMatch;
410 assert((Opc == Instruction::And || Opc == Instruction::Or) &&((void)0)
411 "Expected Opc to be AND/OR")((void)0);
412 // Skip over not part of the tree and remember to invert op and operands at
413 // next level.
414 Value *NotCond;
415 if (match(Cond, m_OneUse(m_Not(m_Value(NotCond)))) &&
416 isValInBlock(NotCond, CurBB->getBasicBlock())) {
417 findMergedConditions(NotCond, TBB, FBB, CurBB, SwitchBB, Opc, TProb, FProb,
418 !InvertCond);
419 return;
420 }
421
422 const Instruction *BOp = dyn_cast<Instruction>(Cond);
423 const Value *BOpOp0, *BOpOp1;
424 // Compute the effective opcode for Cond, taking into account whether it needs
425 // to be inverted, e.g.
426 // and (not (or A, B)), C
427 // gets lowered as
428 // and (and (not A, not B), C)
429 Instruction::BinaryOps BOpc = (Instruction::BinaryOps)0;
430 if (BOp) {
431 BOpc = match(BOp, m_LogicalAnd(m_Value(BOpOp0), m_Value(BOpOp1)))
432 ? Instruction::And
433 : (match(BOp, m_LogicalOr(m_Value(BOpOp0), m_Value(BOpOp1)))
434 ? Instruction::Or
435 : (Instruction::BinaryOps)0);
436 if (InvertCond) {
437 if (BOpc == Instruction::And)
438 BOpc = Instruction::Or;
439 else if (BOpc == Instruction::Or)
440 BOpc = Instruction::And;
441 }
442 }
443
444 // If this node is not part of the or/and tree, emit it as a branch.
445 // Note that all nodes in the tree should have same opcode.
446 bool BOpIsInOrAndTree = BOpc && BOpc == Opc && BOp->hasOneUse();
447 if (!BOpIsInOrAndTree || BOp->getParent() != CurBB->getBasicBlock() ||
448 !isValInBlock(BOpOp0, CurBB->getBasicBlock()) ||
449 !isValInBlock(BOpOp1, CurBB->getBasicBlock())) {
450 emitBranchForMergedCondition(Cond, TBB, FBB, CurBB, SwitchBB, TProb, FProb,
451 InvertCond);
452 return;
453 }
454
455 // Create TmpBB after CurBB.
456 MachineFunction::iterator BBI(CurBB);
457 MachineBasicBlock *TmpBB =
458 MF->CreateMachineBasicBlock(CurBB->getBasicBlock());
459 CurBB->getParent()->insert(++BBI, TmpBB);
460
461 if (Opc == Instruction::Or) {
462 // Codegen X | Y as:
463 // BB1:
464 // jmp_if_X TBB
465 // jmp TmpBB
466 // TmpBB:
467 // jmp_if_Y TBB
468 // jmp FBB
469 //
470
471 // We have flexibility in setting Prob for BB1 and Prob for TmpBB.
472 // The requirement is that
473 // TrueProb for BB1 + (FalseProb for BB1 * TrueProb for TmpBB)
474 // = TrueProb for original BB.
475 // Assuming the original probabilities are A and B, one choice is to set
476 // BB1's probabilities to A/2 and A/2+B, and set TmpBB's probabilities to
477 // A/(1+B) and 2B/(1+B). This choice assumes that
478 // TrueProb for BB1 == FalseProb for BB1 * TrueProb for TmpBB.
479 // Another choice is to assume TrueProb for BB1 equals to TrueProb for
480 // TmpBB, but the math is more complicated.
481
482 auto NewTrueProb = TProb / 2;
483 auto NewFalseProb = TProb / 2 + FProb;
484 // Emit the LHS condition.
485 findMergedConditions(BOpOp0, TBB, TmpBB, CurBB, SwitchBB, Opc, NewTrueProb,
486 NewFalseProb, InvertCond);
487
488 // Normalize A/2 and B to get A/(1+B) and 2B/(1+B).
489 SmallVector<BranchProbability, 2> Probs{TProb / 2, FProb};
490 BranchProbability::normalizeProbabilities(Probs.begin(), Probs.end());
491 // Emit the RHS condition into TmpBB.
492 findMergedConditions(BOpOp1, TBB, FBB, TmpBB, SwitchBB, Opc, Probs[0],
493 Probs[1], InvertCond);
494 } else {
495 assert(Opc == Instruction::And && "Unknown merge op!")((void)0);
496 // Codegen X & Y as:
497 // BB1:
498 // jmp_if_X TmpBB
499 // jmp FBB
500 // TmpBB:
501 // jmp_if_Y TBB
502 // jmp FBB
503 //
504 // This requires creation of TmpBB after CurBB.
505
506 // We have flexibility in setting Prob for BB1 and Prob for TmpBB.
507 // The requirement is that
508 // FalseProb for BB1 + (TrueProb for BB1 * FalseProb for TmpBB)
509 // = FalseProb for original BB.
510 // Assuming the original probabilities are A and B, one choice is to set
511 // BB1's probabilities to A+B/2 and B/2, and set TmpBB's probabilities to
512 // 2A/(1+A) and B/(1+A). This choice assumes that FalseProb for BB1 ==
513 // TrueProb for BB1 * FalseProb for TmpBB.
514
515 auto NewTrueProb = TProb + FProb / 2;
516 auto NewFalseProb = FProb / 2;
517 // Emit the LHS condition.
518 findMergedConditions(BOpOp0, TmpBB, FBB, CurBB, SwitchBB, Opc, NewTrueProb,
519 NewFalseProb, InvertCond);
520
521 // Normalize A and B/2 to get 2A/(1+A) and B/(1+A).
522 SmallVector<BranchProbability, 2> Probs{TProb, FProb / 2};
523 BranchProbability::normalizeProbabilities(Probs.begin(), Probs.end());
524 // Emit the RHS condition into TmpBB.
525 findMergedConditions(BOpOp1, TBB, FBB, TmpBB, SwitchBB, Opc, Probs[0],
526 Probs[1], InvertCond);
527 }
528}
529
530bool IRTranslator::shouldEmitAsBranches(
531 const std::vector<SwitchCG::CaseBlock> &Cases) {
532 // For multiple cases, it's better to emit as branches.
533 if (Cases.size() != 2)
534 return true;
535
536 // If this is two comparisons of the same values or'd or and'd together, they
537 // will get folded into a single comparison, so don't emit two blocks.
538 if ((Cases[0].CmpLHS == Cases[1].CmpLHS &&
539 Cases[0].CmpRHS == Cases[1].CmpRHS) ||
540 (Cases[0].CmpRHS == Cases[1].CmpLHS &&
541 Cases[0].CmpLHS == Cases[1].CmpRHS)) {
542 return false;
543 }
544
545 // Handle: (X != null) | (Y != null) --> (X|Y) != 0
546 // Handle: (X == null) & (Y == null) --> (X|Y) == 0
547 if (Cases[0].CmpRHS == Cases[1].CmpRHS &&
548 Cases[0].PredInfo.Pred == Cases[1].PredInfo.Pred &&
549 isa<Constant>(Cases[0].CmpRHS) &&
550 cast<Constant>(Cases[0].CmpRHS)->isNullValue()) {
551 if (Cases[0].PredInfo.Pred == CmpInst::ICMP_EQ &&
552 Cases[0].TrueBB == Cases[1].ThisBB)
553 return false;
554 if (Cases[0].PredInfo.Pred == CmpInst::ICMP_NE &&
555 Cases[0].FalseBB == Cases[1].ThisBB)
556 return false;
557 }
558
559 return true;
560}
561
562bool IRTranslator::translateBr(const User &U, MachineIRBuilder &MIRBuilder) {
563 const BranchInst &BrInst = cast<BranchInst>(U);
564 auto &CurMBB = MIRBuilder.getMBB();
565 auto *Succ0MBB = &getMBB(*BrInst.getSuccessor(0));
566
567 if (BrInst.isUnconditional()) {
568 // If the unconditional target is the layout successor, fallthrough.
569 if (!CurMBB.isLayoutSuccessor(Succ0MBB))
570 MIRBuilder.buildBr(*Succ0MBB);
571
572 // Link successors.
573 for (const BasicBlock *Succ : successors(&BrInst))
574 CurMBB.addSuccessor(&getMBB(*Succ));
575 return true;
576 }
577
578 // If this condition is one of the special cases we handle, do special stuff
579 // now.
580 const Value *CondVal = BrInst.getCondition();
581 MachineBasicBlock *Succ1MBB = &getMBB(*BrInst.getSuccessor(1));
582
583 const auto &TLI = *MF->getSubtarget().getTargetLowering();
584
585 // If this is a series of conditions that are or'd or and'd together, emit
586 // this as a sequence of branches instead of setcc's with and/or operations.
587 // As long as jumps are not expensive (exceptions for multi-use logic ops,
588 // unpredictable branches, and vector extracts because those jumps are likely
589 // expensive for any target), this should improve performance.
590 // For example, instead of something like:
591 // cmp A, B
592 // C = seteq
593 // cmp D, E
594 // F = setle
595 // or C, F
596 // jnz foo
597 // Emit:
598 // cmp A, B
599 // je foo
600 // cmp D, E
601 // jle foo
602 using namespace PatternMatch;
603 const Instruction *CondI = dyn_cast<Instruction>(CondVal);
604 if (!TLI.isJumpExpensive() && CondI && CondI->hasOneUse() &&
605 !BrInst.hasMetadata(LLVMContext::MD_unpredictable)) {
606 Instruction::BinaryOps Opcode = (Instruction::BinaryOps)0;
607 Value *Vec;
608 const Value *BOp0, *BOp1;
609 if (match(CondI, m_LogicalAnd(m_Value(BOp0), m_Value(BOp1))))
610 Opcode = Instruction::And;
611 else if (match(CondI, m_LogicalOr(m_Value(BOp0), m_Value(BOp1))))
612 Opcode = Instruction::Or;
613
614 if (Opcode && !(match(BOp0, m_ExtractElt(m_Value(Vec), m_Value())) &&
615 match(BOp1, m_ExtractElt(m_Specific(Vec), m_Value())))) {
616 findMergedConditions(CondI, Succ0MBB, Succ1MBB, &CurMBB, &CurMBB, Opcode,
617 getEdgeProbability(&CurMBB, Succ0MBB),
618 getEdgeProbability(&CurMBB, Succ1MBB),
619 /*InvertCond=*/false);
620 assert(SL->SwitchCases[0].ThisBB == &CurMBB && "Unexpected lowering!")((void)0);
621
622 // Allow some cases to be rejected.
623 if (shouldEmitAsBranches(SL->SwitchCases)) {
624 // Emit the branch for this block.
625 emitSwitchCase(SL->SwitchCases[0], &CurMBB, *CurBuilder);
626 SL->SwitchCases.erase(SL->SwitchCases.begin());
627 return true;
628 }
629
630 // Okay, we decided not to do this, remove any inserted MBB's and clear
631 // SwitchCases.
632 for (unsigned I = 1, E = SL->SwitchCases.size(); I != E; ++I)
633 MF->erase(SL->SwitchCases[I].ThisBB);
634
635 SL->SwitchCases.clear();
636 }
637 }
638
639 // Create a CaseBlock record representing this branch.
640 SwitchCG::CaseBlock CB(CmpInst::ICMP_EQ, false, CondVal,
641 ConstantInt::getTrue(MF->getFunction().getContext()),
642 nullptr, Succ0MBB, Succ1MBB, &CurMBB,
643 CurBuilder->getDebugLoc());
644
645 // Use emitSwitchCase to actually insert the fast branch sequence for this
646 // cond branch.
647 emitSwitchCase(CB, &CurMBB, *CurBuilder);
648 return true;
649}
650
651void IRTranslator::addSuccessorWithProb(MachineBasicBlock *Src,
652 MachineBasicBlock *Dst,
653 BranchProbability Prob) {
654 if (!FuncInfo.BPI) {
655 Src->addSuccessorWithoutProb(Dst);
656 return;
657 }
658 if (Prob.isUnknown())
659 Prob = getEdgeProbability(Src, Dst);
660 Src->addSuccessor(Dst, Prob);
661}
662
663BranchProbability
664IRTranslator::getEdgeProbability(const MachineBasicBlock *Src,
665 const MachineBasicBlock *Dst) const {
666 const BasicBlock *SrcBB = Src->getBasicBlock();
667 const BasicBlock *DstBB = Dst->getBasicBlock();
668 if (!FuncInfo.BPI) {
669 // If BPI is not available, set the default probability as 1 / N, where N is
670 // the number of successors.
671 auto SuccSize = std::max<uint32_t>(succ_size(SrcBB), 1);
672 return BranchProbability(1, SuccSize);
673 }
674 return FuncInfo.BPI->getEdgeProbability(SrcBB, DstBB);
675}
676
677bool IRTranslator::translateSwitch(const User &U, MachineIRBuilder &MIB) {
678 using namespace SwitchCG;
679 // Extract cases from the switch.
680 const SwitchInst &SI = cast<SwitchInst>(U);
681 BranchProbabilityInfo *BPI = FuncInfo.BPI;
682 CaseClusterVector Clusters;
683 Clusters.reserve(SI.getNumCases());
684 for (auto &I : SI.cases()) {
685 MachineBasicBlock *Succ = &getMBB(*I.getCaseSuccessor());
686 assert(Succ && "Could not find successor mbb in mapping")((void)0);
687 const ConstantInt *CaseVal = I.getCaseValue();
688 BranchProbability Prob =
689 BPI ? BPI->getEdgeProbability(SI.getParent(), I.getSuccessorIndex())
690 : BranchProbability(1, SI.getNumCases() + 1);
691 Clusters.push_back(CaseCluster::range(CaseVal, CaseVal, Succ, Prob));
692 }
693
694 MachineBasicBlock *DefaultMBB = &getMBB(*SI.getDefaultDest());
695
696 // Cluster adjacent cases with the same destination. We do this at all
697 // optimization levels because it's cheap to do and will make codegen faster
698 // if there are many clusters.
699 sortAndRangeify(Clusters);
700
701 MachineBasicBlock *SwitchMBB = &getMBB(*SI.getParent());
702
703 // If there is only the default destination, jump there directly.
704 if (Clusters.empty()) {
705 SwitchMBB->addSuccessor(DefaultMBB);
706 if (DefaultMBB != SwitchMBB->getNextNode())
707 MIB.buildBr(*DefaultMBB);
708 return true;
709 }
710
711 SL->findJumpTables(Clusters, &SI, DefaultMBB, nullptr, nullptr);
712 SL->findBitTestClusters(Clusters, &SI);
713
714 LLVM_DEBUG({do { } while (false)
715 dbgs() << "Case clusters: ";do { } while (false)
716 for (const CaseCluster &C : Clusters) {do { } while (false)
717 if (C.Kind == CC_JumpTable)do { } while (false)
718 dbgs() << "JT:";do { } while (false)
719 if (C.Kind == CC_BitTests)do { } while (false)
720 dbgs() << "BT:";do { } while (false)
721
722 C.Low->getValue().print(dbgs(), true);do { } while (false)
723 if (C.Low != C.High) {do { } while (false)
724 dbgs() << '-';do { } while (false)
725 C.High->getValue().print(dbgs(), true);do { } while (false)
726 }do { } while (false)
727 dbgs() << ' ';do { } while (false)
728 }do { } while (false)
729 dbgs() << '\n';do { } while (false)
730 })do { } while (false);
731
732 assert(!Clusters.empty())((void)0);
733 SwitchWorkList WorkList;
734 CaseClusterIt First = Clusters.begin();
735 CaseClusterIt Last = Clusters.end() - 1;
736 auto DefaultProb = getEdgeProbability(SwitchMBB, DefaultMBB);
737 WorkList.push_back({SwitchMBB, First, Last, nullptr, nullptr, DefaultProb});
738
739 // FIXME: At the moment we don't do any splitting optimizations here like
740 // SelectionDAG does, so this worklist only has one entry.
741 while (!WorkList.empty()) {
742 SwitchWorkListItem W = WorkList.back();
743 WorkList.pop_back();
744 if (!lowerSwitchWorkItem(W, SI.getCondition(), SwitchMBB, DefaultMBB, MIB))
745 return false;
746 }
747 return true;
748}
749
750void IRTranslator::emitJumpTable(SwitchCG::JumpTable &JT,
751 MachineBasicBlock *MBB) {
752 // Emit the code for the jump table
753 assert(JT.Reg != -1U && "Should lower JT Header first!")((void)0);
754 MachineIRBuilder MIB(*MBB->getParent());
755 MIB.setMBB(*MBB);
756 MIB.setDebugLoc(CurBuilder->getDebugLoc());
757
758 Type *PtrIRTy = Type::getInt8PtrTy(MF->getFunction().getContext());
759 const LLT PtrTy = getLLTForType(*PtrIRTy, *DL);
760
761 auto Table = MIB.buildJumpTable(PtrTy, JT.JTI);
762 MIB.buildBrJT(Table.getReg(0), JT.JTI, JT.Reg);
763}
764
765bool IRTranslator::emitJumpTableHeader(SwitchCG::JumpTable &JT,
766 SwitchCG::JumpTableHeader &JTH,
767 MachineBasicBlock *HeaderBB) {
768 MachineIRBuilder MIB(*HeaderBB->getParent());
769 MIB.setMBB(*HeaderBB);
770 MIB.setDebugLoc(CurBuilder->getDebugLoc());
771
772 const Value &SValue = *JTH.SValue;
773 // Subtract the lowest switch case value from the value being switched on.
774 const LLT SwitchTy = getLLTForType(*SValue.getType(), *DL);
775 Register SwitchOpReg = getOrCreateVReg(SValue);
776 auto FirstCst = MIB.buildConstant(SwitchTy, JTH.First);
777 auto Sub = MIB.buildSub({SwitchTy}, SwitchOpReg, FirstCst);
778
779 // This value may be smaller or larger than the target's pointer type, and
780 // therefore require extension or truncating.
781 Type *PtrIRTy = SValue.getType()->getPointerTo();
782 const LLT PtrScalarTy = LLT::scalar(DL->getTypeSizeInBits(PtrIRTy));
783 Sub = MIB.buildZExtOrTrunc(PtrScalarTy, Sub);
784
785 JT.Reg = Sub.getReg(0);
786
787 if (JTH.OmitRangeCheck) {
788 if (JT.MBB != HeaderBB->getNextNode())
789 MIB.buildBr(*JT.MBB);
790 return true;
791 }
792
793 // Emit the range check for the jump table, and branch to the default block
794 // for the switch statement if the value being switched on exceeds the
795 // largest case in the switch.
796 auto Cst = getOrCreateVReg(
797 *ConstantInt::get(SValue.getType(), JTH.Last - JTH.First));
798 Cst = MIB.buildZExtOrTrunc(PtrScalarTy, Cst).getReg(0);
799 auto Cmp = MIB.buildICmp(CmpInst::ICMP_UGT, LLT::scalar(1), Sub, Cst);
800
801 auto BrCond = MIB.buildBrCond(Cmp.getReg(0), *JT.Default);
802
803 // Avoid emitting unnecessary branches to the next block.
804 if (JT.MBB != HeaderBB->getNextNode())
805 BrCond = MIB.buildBr(*JT.MBB);
806 return true;
807}
808
809void IRTranslator::emitSwitchCase(SwitchCG::CaseBlock &CB,
810 MachineBasicBlock *SwitchBB,
811 MachineIRBuilder &MIB) {
812 Register CondLHS = getOrCreateVReg(*CB.CmpLHS);
813 Register Cond;
814 DebugLoc OldDbgLoc = MIB.getDebugLoc();
815 MIB.setDebugLoc(CB.DbgLoc);
816 MIB.setMBB(*CB.ThisBB);
817
818 if (CB.PredInfo.NoCmp) {
819 // Branch or fall through to TrueBB.
820 addSuccessorWithProb(CB.ThisBB, CB.TrueBB, CB.TrueProb);
821 addMachineCFGPred({SwitchBB->getBasicBlock(), CB.TrueBB->getBasicBlock()},
822 CB.ThisBB);
823 CB.ThisBB->normalizeSuccProbs();
824 if (CB.TrueBB != CB.ThisBB->getNextNode())
825 MIB.buildBr(*CB.TrueBB);
826 MIB.setDebugLoc(OldDbgLoc);
827 return;
828 }
829
830 const LLT i1Ty = LLT::scalar(1);
831 // Build the compare.
832 if (!CB.CmpMHS) {
833 const auto *CI = dyn_cast<ConstantInt>(CB.CmpRHS);
834 // For conditional branch lowering, we might try to do something silly like
835 // emit an G_ICMP to compare an existing G_ICMP i1 result with true. If so,
836 // just re-use the existing condition vreg.
837 if (MRI->getType(CondLHS).getSizeInBits() == 1 && CI &&
838 CI->getZExtValue() == 1 && CB.PredInfo.Pred == CmpInst::ICMP_EQ) {
839 Cond = CondLHS;
840 } else {
841 Register CondRHS = getOrCreateVReg(*CB.CmpRHS);
842 if (CmpInst::isFPPredicate(CB.PredInfo.Pred))
843 Cond =
844 MIB.buildFCmp(CB.PredInfo.Pred, i1Ty, CondLHS, CondRHS).getReg(0);
845 else
846 Cond =
847 MIB.buildICmp(CB.PredInfo.Pred, i1Ty, CondLHS, CondRHS).getReg(0);
848 }
849 } else {
850 assert(CB.PredInfo.Pred == CmpInst::ICMP_SLE &&((void)0)
851 "Can only handle SLE ranges")((void)0);
852
853 const APInt& Low = cast<ConstantInt>(CB.CmpLHS)->getValue();
854 const APInt& High = cast<ConstantInt>(CB.CmpRHS)->getValue();
855
856 Register CmpOpReg = getOrCreateVReg(*CB.CmpMHS);
857 if (cast<ConstantInt>(CB.CmpLHS)->isMinValue(true)) {
858 Register CondRHS = getOrCreateVReg(*CB.CmpRHS);
859 Cond =
860 MIB.buildICmp(CmpInst::ICMP_SLE, i1Ty, CmpOpReg, CondRHS).getReg(0);
861 } else {
862 const LLT CmpTy = MRI->getType(CmpOpReg);
863 auto Sub = MIB.buildSub({CmpTy}, CmpOpReg, CondLHS);
864 auto Diff = MIB.buildConstant(CmpTy, High - Low);
865 Cond = MIB.buildICmp(CmpInst::ICMP_ULE, i1Ty, Sub, Diff).getReg(0);
866 }
867 }
868
869 // Update successor info
870 addSuccessorWithProb(CB.ThisBB, CB.TrueBB, CB.TrueProb);
871
872 addMachineCFGPred({SwitchBB->getBasicBlock(), CB.TrueBB->getBasicBlock()},
873 CB.ThisBB);
874
875 // TrueBB and FalseBB are always different unless the incoming IR is
876 // degenerate. This only happens when running llc on weird IR.
877 if (CB.TrueBB != CB.FalseBB)
878 addSuccessorWithProb(CB.ThisBB, CB.FalseBB, CB.FalseProb);
879 CB.ThisBB->normalizeSuccProbs();
880
881 addMachineCFGPred({SwitchBB->getBasicBlock(), CB.FalseBB->getBasicBlock()},
882 CB.ThisBB);
883
884 MIB.buildBrCond(Cond, *CB.TrueBB);
885 MIB.buildBr(*CB.FalseBB);
886 MIB.setDebugLoc(OldDbgLoc);
887}
888
889bool IRTranslator::lowerJumpTableWorkItem(SwitchCG::SwitchWorkListItem W,
890 MachineBasicBlock *SwitchMBB,
891 MachineBasicBlock *CurMBB,
892 MachineBasicBlock *DefaultMBB,
893 MachineIRBuilder &MIB,
894 MachineFunction::iterator BBI,
895 BranchProbability UnhandledProbs,
896 SwitchCG::CaseClusterIt I,
897 MachineBasicBlock *Fallthrough,
898 bool FallthroughUnreachable) {
899 using namespace SwitchCG;
900 MachineFunction *CurMF = SwitchMBB->getParent();
901 // FIXME: Optimize away range check based on pivot comparisons.
902 JumpTableHeader *JTH = &SL->JTCases[I->JTCasesIndex].first;
903 SwitchCG::JumpTable *JT = &SL->JTCases[I->JTCasesIndex].second;
904 BranchProbability DefaultProb = W.DefaultProb;
905
906 // The jump block hasn't been inserted yet; insert it here.
907 MachineBasicBlock *JumpMBB = JT->MBB;
908 CurMF->insert(BBI, JumpMBB);
909
910 // Since the jump table block is separate from the switch block, we need
911 // to keep track of it as a machine predecessor to the default block,
912 // otherwise we lose the phi edges.
913 addMachineCFGPred({SwitchMBB->getBasicBlock(), DefaultMBB->getBasicBlock()},
914 CurMBB);
915 addMachineCFGPred({SwitchMBB->getBasicBlock(), DefaultMBB->getBasicBlock()},
916 JumpMBB);
917
918 auto JumpProb = I->Prob;
919 auto FallthroughProb = UnhandledProbs;
920
921 // If the default statement is a target of the jump table, we evenly
922 // distribute the default probability to successors of CurMBB. Also
923 // update the probability on the edge from JumpMBB to Fallthrough.
924 for (MachineBasicBlock::succ_iterator SI = JumpMBB->succ_begin(),
925 SE = JumpMBB->succ_end();
926 SI != SE; ++SI) {
927 if (*SI == DefaultMBB) {
928 JumpProb += DefaultProb / 2;
929 FallthroughProb -= DefaultProb / 2;
930 JumpMBB->setSuccProbability(SI, DefaultProb / 2);
931 JumpMBB->normalizeSuccProbs();
932 } else {
933 // Also record edges from the jump table block to it's successors.
934 addMachineCFGPred({SwitchMBB->getBasicBlock(), (*SI)->getBasicBlock()},
935 JumpMBB);
936 }
937 }
938
939 // Skip the range check if the fallthrough block is unreachable.
940 if (FallthroughUnreachable)
941 JTH->OmitRangeCheck = true;
942
943 if (!JTH->OmitRangeCheck)
944 addSuccessorWithProb(CurMBB, Fallthrough, FallthroughProb);
945 addSuccessorWithProb(CurMBB, JumpMBB, JumpProb);
946 CurMBB->normalizeSuccProbs();
947
948 // The jump table header will be inserted in our current block, do the
949 // range check, and fall through to our fallthrough block.
950 JTH->HeaderBB = CurMBB;
951 JT->Default = Fallthrough; // FIXME: Move Default to JumpTableHeader.
952
953 // If we're in the right place, emit the jump table header right now.
954 if (CurMBB == SwitchMBB) {
955 if (!emitJumpTableHeader(*JT, *JTH, CurMBB))
956 return false;
957 JTH->Emitted = true;
958 }
959 return true;
960}
961bool IRTranslator::lowerSwitchRangeWorkItem(SwitchCG::CaseClusterIt I,
962 Value *Cond,
963 MachineBasicBlock *Fallthrough,
964 bool FallthroughUnreachable,
965 BranchProbability UnhandledProbs,
966 MachineBasicBlock *CurMBB,
967 MachineIRBuilder &MIB,
968 MachineBasicBlock *SwitchMBB) {
969 using namespace SwitchCG;
970 const Value *RHS, *LHS, *MHS;
971 CmpInst::Predicate Pred;
972 if (I->Low == I->High) {
973 // Check Cond == I->Low.
974 Pred = CmpInst::ICMP_EQ;
975 LHS = Cond;
976 RHS = I->Low;
977 MHS = nullptr;
978 } else {
979 // Check I->Low <= Cond <= I->High.
980 Pred = CmpInst::ICMP_SLE;
981 LHS = I->Low;
982 MHS = Cond;
983 RHS = I->High;
984 }
985
986 // If Fallthrough is unreachable, fold away the comparison.
987 // The false probability is the sum of all unhandled cases.
988 CaseBlock CB(Pred, FallthroughUnreachable, LHS, RHS, MHS, I->MBB, Fallthrough,
989 CurMBB, MIB.getDebugLoc(), I->Prob, UnhandledProbs);
990
991 emitSwitchCase(CB, SwitchMBB, MIB);
992 return true;
993}
994
995void IRTranslator::emitBitTestHeader(SwitchCG::BitTestBlock &B,
996 MachineBasicBlock *SwitchBB) {
997 MachineIRBuilder &MIB = *CurBuilder;
998 MIB.setMBB(*SwitchBB);
999
1000 // Subtract the minimum value.
1001 Register SwitchOpReg = getOrCreateVReg(*B.SValue);
1002
1003 LLT SwitchOpTy = MRI->getType(SwitchOpReg);
1004 Register MinValReg = MIB.buildConstant(SwitchOpTy, B.First).getReg(0);
1005 auto RangeSub = MIB.buildSub(SwitchOpTy, SwitchOpReg, MinValReg);
1006
1007 // Ensure that the type will fit the mask value.
1008 LLT MaskTy = SwitchOpTy;
1009 for (unsigned I = 0, E = B.Cases.size(); I != E; ++I) {
1010 if (!isUIntN(SwitchOpTy.getSizeInBits(), B.Cases[I].Mask)) {
1011 // Switch table case range are encoded into series of masks.
1012 // Just use pointer type, it's guaranteed to fit.
1013 MaskTy = LLT::scalar(64);
1014 break;
1015 }
1016 }
1017 Register SubReg = RangeSub.getReg(0);
1018 if (SwitchOpTy != MaskTy)
1019 SubReg = MIB.buildZExtOrTrunc(MaskTy, SubReg).getReg(0);
1020
1021 B.RegVT = getMVTForLLT(MaskTy);
1022 B.Reg = SubReg;
1023
1024 MachineBasicBlock *MBB = B.Cases[0].ThisBB;
1025
1026 if (!B.OmitRangeCheck)
1027 addSuccessorWithProb(SwitchBB, B.Default, B.DefaultProb);
1028 addSuccessorWithProb(SwitchBB, MBB, B.Prob);
1029
1030 SwitchBB->normalizeSuccProbs();
1031
1032 if (!B.OmitRangeCheck) {
1033 // Conditional branch to the default block.
1034 auto RangeCst = MIB.buildConstant(SwitchOpTy, B.Range);
1035 auto RangeCmp = MIB.buildICmp(CmpInst::Predicate::ICMP_UGT, LLT::scalar(1),
1036 RangeSub, RangeCst);
1037 MIB.buildBrCond(RangeCmp, *B.Default);
1038 }
1039
1040 // Avoid emitting unnecessary branches to the next block.
1041 if (MBB != SwitchBB->getNextNode())
1042 MIB.buildBr(*MBB);
1043}
1044
1045void IRTranslator::emitBitTestCase(SwitchCG::BitTestBlock &BB,
1046 MachineBasicBlock *NextMBB,
1047 BranchProbability BranchProbToNext,
1048 Register Reg, SwitchCG::BitTestCase &B,
1049 MachineBasicBlock *SwitchBB) {
1050 MachineIRBuilder &MIB = *CurBuilder;
1051 MIB.setMBB(*SwitchBB);
1052
1053 LLT SwitchTy = getLLTForMVT(BB.RegVT);
1054 Register Cmp;
1055 unsigned PopCount = countPopulation(B.Mask);
1056 if (PopCount == 1) {
1057 // Testing for a single bit; just compare the shift count with what it
1058 // would need to be to shift a 1 bit in that position.
1059 auto MaskTrailingZeros =
1060 MIB.buildConstant(SwitchTy, countTrailingZeros(B.Mask));
1061 Cmp =
1062 MIB.buildICmp(ICmpInst::ICMP_EQ, LLT::scalar(1), Reg, MaskTrailingZeros)
1063 .getReg(0);
1064 } else if (PopCount == BB.Range) {
1065 // There is only one zero bit in the range, test for it directly.
1066 auto MaskTrailingOnes =
1067 MIB.buildConstant(SwitchTy, countTrailingOnes(B.Mask));
1068 Cmp = MIB.buildICmp(CmpInst::ICMP_NE, LLT::scalar(1), Reg, MaskTrailingOnes)
1069 .getReg(0);
1070 } else {
1071 // Make desired shift.
1072 auto CstOne = MIB.buildConstant(SwitchTy, 1);
1073 auto SwitchVal = MIB.buildShl(SwitchTy, CstOne, Reg);
1074
1075 // Emit bit tests and jumps.
1076 auto CstMask = MIB.buildConstant(SwitchTy, B.Mask);
1077 auto AndOp = MIB.buildAnd(SwitchTy, SwitchVal, CstMask);
1078 auto CstZero = MIB.buildConstant(SwitchTy, 0);
1079 Cmp = MIB.buildICmp(CmpInst::ICMP_NE, LLT::scalar(1), AndOp, CstZero)
1080 .getReg(0);
1081 }
1082
1083 // The branch probability from SwitchBB to B.TargetBB is B.ExtraProb.
1084 addSuccessorWithProb(SwitchBB, B.TargetBB, B.ExtraProb);
1085 // The branch probability from SwitchBB to NextMBB is BranchProbToNext.
1086 addSuccessorWithProb(SwitchBB, NextMBB, BranchProbToNext);
1087 // It is not guaranteed that the sum of B.ExtraProb and BranchProbToNext is
1088 // one as they are relative probabilities (and thus work more like weights),
1089 // and hence we need to normalize them to let the sum of them become one.
1090 SwitchBB->normalizeSuccProbs();
1091
1092 // Record the fact that the IR edge from the header to the bit test target
1093 // will go through our new block. Neeeded for PHIs to have nodes added.
1094 addMachineCFGPred({BB.Parent->getBasicBlock(), B.TargetBB->getBasicBlock()},
1095 SwitchBB);
1096
1097 MIB.buildBrCond(Cmp, *B.TargetBB);
1098
1099 // Avoid emitting unnecessary branches to the next block.
1100 if (NextMBB != SwitchBB->getNextNode())
1101 MIB.buildBr(*NextMBB);
1102}
1103
1104bool IRTranslator::lowerBitTestWorkItem(
1105 SwitchCG::SwitchWorkListItem W, MachineBasicBlock *SwitchMBB,
1106 MachineBasicBlock *CurMBB, MachineBasicBlock *DefaultMBB,
1107 MachineIRBuilder &MIB, MachineFunction::iterator BBI,
1108 BranchProbability DefaultProb, BranchProbability UnhandledProbs,
1109 SwitchCG::CaseClusterIt I, MachineBasicBlock *Fallthrough,
1110 bool FallthroughUnreachable) {
1111 using namespace SwitchCG;
1112 MachineFunction *CurMF = SwitchMBB->getParent();
1113 // FIXME: Optimize away range check based on pivot comparisons.
1114 BitTestBlock *BTB = &SL->BitTestCases[I->BTCasesIndex];
1115 // The bit test blocks haven't been inserted yet; insert them here.
1116 for (BitTestCase &BTC : BTB->Cases)
1117 CurMF->insert(BBI, BTC.ThisBB);
1118
1119 // Fill in fields of the BitTestBlock.
1120 BTB->Parent = CurMBB;
1121 BTB->Default = Fallthrough;
1122
1123 BTB->DefaultProb = UnhandledProbs;
1124 // If the cases in bit test don't form a contiguous range, we evenly
1125 // distribute the probability on the edge to Fallthrough to two
1126 // successors of CurMBB.
1127 if (!BTB->ContiguousRange) {
1128 BTB->Prob += DefaultProb / 2;
1129 BTB->DefaultProb -= DefaultProb / 2;
1130 }
1131
1132 if (FallthroughUnreachable) {
1133 // Skip the range check if the fallthrough block is unreachable.
1134 BTB->OmitRangeCheck = true;
1135 }
1136
1137 // If we're in the right place, emit the bit test header right now.
1138 if (CurMBB == SwitchMBB) {
1139 emitBitTestHeader(*BTB, SwitchMBB);
1140 BTB->Emitted = true;
1141 }
1142 return true;
1143}
1144
1145bool IRTranslator::lowerSwitchWorkItem(SwitchCG::SwitchWorkListItem W,
1146 Value *Cond,
1147 MachineBasicBlock *SwitchMBB,
1148 MachineBasicBlock *DefaultMBB,
1149 MachineIRBuilder &MIB) {
1150 using namespace SwitchCG;
1151 MachineFunction *CurMF = FuncInfo.MF;
1152 MachineBasicBlock *NextMBB = nullptr;
1153 MachineFunction::iterator BBI(W.MBB);
1154 if (++BBI != FuncInfo.MF->end())
1155 NextMBB = &*BBI;
1156
1157 if (EnableOpts) {
1158 // Here, we order cases by probability so the most likely case will be
1159 // checked first. However, two clusters can have the same probability in
1160 // which case their relative ordering is non-deterministic. So we use Low
1161 // as a tie-breaker as clusters are guaranteed to never overlap.
1162 llvm::sort(W.FirstCluster, W.LastCluster + 1,
1163 [](const CaseCluster &a, const CaseCluster &b) {
1164 return a.Prob != b.Prob
1165 ? a.Prob > b.Prob
1166 : a.Low->getValue().slt(b.Low->getValue());
1167 });
1168
1169 // Rearrange the case blocks so that the last one falls through if possible
1170 // without changing the order of probabilities.
1171 for (CaseClusterIt I = W.LastCluster; I > W.FirstCluster;) {
1172 --I;
1173 if (I->Prob > W.LastCluster->Prob)
1174 break;
1175 if (I->Kind == CC_Range && I->MBB == NextMBB) {
1176 std::swap(*I, *W.LastCluster);
1177 break;
1178 }
1179 }
1180 }
1181
1182 // Compute total probability.
1183 BranchProbability DefaultProb = W.DefaultProb;
1184 BranchProbability UnhandledProbs = DefaultProb;
1185 for (CaseClusterIt I = W.FirstCluster; I <= W.LastCluster; ++I)
1186 UnhandledProbs += I->Prob;
1187
1188 MachineBasicBlock *CurMBB = W.MBB;
1189 for (CaseClusterIt I = W.FirstCluster, E = W.LastCluster; I <= E; ++I) {
1190 bool FallthroughUnreachable = false;
1191 MachineBasicBlock *Fallthrough;
1192 if (I == W.LastCluster) {
1193 // For the last cluster, fall through to the default destination.
1194 Fallthrough = DefaultMBB;
1195 FallthroughUnreachable = isa<UnreachableInst>(
1196 DefaultMBB->getBasicBlock()->getFirstNonPHIOrDbg());
1197 } else {
1198 Fallthrough = CurMF->CreateMachineBasicBlock(CurMBB->getBasicBlock());
1199 CurMF->insert(BBI, Fallthrough);
1200 }
1201 UnhandledProbs -= I->Prob;
1202
1203 switch (I->Kind) {
1204 case CC_BitTests: {
1205 if (!lowerBitTestWorkItem(W, SwitchMBB, CurMBB, DefaultMBB, MIB, BBI,
1206 DefaultProb, UnhandledProbs, I, Fallthrough,
1207 FallthroughUnreachable)) {
1208 LLVM_DEBUG(dbgs() << "Failed to lower bit test for switch")do { } while (false);
1209 return false;
1210 }
1211 break;
1212 }
1213
1214 case CC_JumpTable: {
1215 if (!lowerJumpTableWorkItem(W, SwitchMBB, CurMBB, DefaultMBB, MIB, BBI,
1216 UnhandledProbs, I, Fallthrough,
1217 FallthroughUnreachable)) {
1218 LLVM_DEBUG(dbgs() << "Failed to lower jump table")do { } while (false);
1219 return false;
1220 }
1221 break;
1222 }
1223 case CC_Range: {
1224 if (!lowerSwitchRangeWorkItem(I, Cond, Fallthrough,
1225 FallthroughUnreachable, UnhandledProbs,
1226 CurMBB, MIB, SwitchMBB)) {
1227 LLVM_DEBUG(dbgs() << "Failed to lower switch range")do { } while (false);
1228 return false;
1229 }
1230 break;
1231 }
1232 }
1233 CurMBB = Fallthrough;
1234 }
1235
1236 return true;
1237}
1238
1239bool IRTranslator::translateIndirectBr(const User &U,
1240 MachineIRBuilder &MIRBuilder) {
1241 const IndirectBrInst &BrInst = cast<IndirectBrInst>(U);
1242
1243 const Register Tgt = getOrCreateVReg(*BrInst.getAddress());
1244 MIRBuilder.buildBrIndirect(Tgt);
1245
1246 // Link successors.
1247 SmallPtrSet<const BasicBlock *, 32> AddedSuccessors;
1248 MachineBasicBlock &CurBB = MIRBuilder.getMBB();
1249 for (const BasicBlock *Succ : successors(&BrInst)) {
1250 // It's legal for indirectbr instructions to have duplicate blocks in the
1251 // destination list. We don't allow this in MIR. Skip anything that's
1252 // already a successor.
1253 if (!AddedSuccessors.insert(Succ).second)
1254 continue;
1255 CurBB.addSuccessor(&getMBB(*Succ));
1256 }
1257
1258 return true;
1259}
1260
1261static bool isSwiftError(const Value *V) {
1262 if (auto Arg = dyn_cast<Argument>(V))
1263 return Arg->hasSwiftErrorAttr();
1264 if (auto AI = dyn_cast<AllocaInst>(V))
1265 return AI->isSwiftError();
1266 return false;
1267}
1268
1269bool IRTranslator::translateLoad(const User &U, MachineIRBuilder &MIRBuilder) {
1270 const LoadInst &LI = cast<LoadInst>(U);
1271 if (DL->getTypeStoreSize(LI.getType()) == 0)
1272 return true;
1273
1274 ArrayRef<Register> Regs = getOrCreateVRegs(LI);
1275 ArrayRef<uint64_t> Offsets = *VMap.getOffsets(LI);
1276 Register Base = getOrCreateVReg(*LI.getPointerOperand());
1277
1278 Type *OffsetIRTy = DL->getIntPtrType(LI.getPointerOperandType());
1279 LLT OffsetTy = getLLTForType(*OffsetIRTy, *DL);
1280
1281 if (CLI->supportSwiftError() && isSwiftError(LI.getPointerOperand())) {
1282 assert(Regs.size() == 1 && "swifterror should be single pointer")((void)0);
1283 Register VReg = SwiftError.getOrCreateVRegUseAt(&LI, &MIRBuilder.getMBB(),
1284 LI.getPointerOperand());
1285 MIRBuilder.buildCopy(Regs[0], VReg);
1286 return true;
1287 }
1288
1289 auto &TLI = *MF->getSubtarget().getTargetLowering();
1290 MachineMemOperand::Flags Flags = TLI.getLoadMemOperandFlags(LI, *DL);
1291
1292 const MDNode *Ranges =
1293 Regs.size() == 1 ? LI.getMetadata(LLVMContext::MD_range) : nullptr;
1294 for (unsigned i = 0; i < Regs.size(); ++i) {
1295 Register Addr;
1296 MIRBuilder.materializePtrAdd(Addr, Base, OffsetTy, Offsets[i] / 8);
1297
1298 MachinePointerInfo Ptr(LI.getPointerOperand(), Offsets[i] / 8);
1299 Align BaseAlign = getMemOpAlign(LI);
1300 AAMDNodes AAMetadata;
1301 LI.getAAMetadata(AAMetadata);
1302 auto MMO = MF->getMachineMemOperand(
1303 Ptr, Flags, MRI->getType(Regs[i]),
1304 commonAlignment(BaseAlign, Offsets[i] / 8), AAMetadata, Ranges,
1305 LI.getSyncScopeID(), LI.getOrdering());
1306 MIRBuilder.buildLoad(Regs[i], Addr, *MMO);
1307 }
1308
1309 return true;
1310}
1311
1312bool IRTranslator::translateStore(const User &U, MachineIRBuilder &MIRBuilder) {
1313 const StoreInst &SI = cast<StoreInst>(U);
1314 if (DL->getTypeStoreSize(SI.getValueOperand()->getType()) == 0)
1315 return true;
1316
1317 ArrayRef<Register> Vals = getOrCreateVRegs(*SI.getValueOperand());
1318 ArrayRef<uint64_t> Offsets = *VMap.getOffsets(*SI.getValueOperand());
1319 Register Base = getOrCreateVReg(*SI.getPointerOperand());
1320
1321 Type *OffsetIRTy = DL->getIntPtrType(SI.getPointerOperandType());
1322 LLT OffsetTy = getLLTForType(*OffsetIRTy, *DL);
1323
1324 if (CLI->supportSwiftError() && isSwiftError(SI.getPointerOperand())) {
1325 assert(Vals.size() == 1 && "swifterror should be single pointer")((void)0);
1326
1327 Register VReg = SwiftError.getOrCreateVRegDefAt(&SI, &MIRBuilder.getMBB(),
1328 SI.getPointerOperand());
1329 MIRBuilder.buildCopy(VReg, Vals[0]);
1330 return true;
1331 }
1332
1333 auto &TLI = *MF->getSubtarget().getTargetLowering();
1334 MachineMemOperand::Flags Flags = TLI.getStoreMemOperandFlags(SI, *DL);
1335
1336 for (unsigned i = 0; i < Vals.size(); ++i) {
1337 Register Addr;
1338 MIRBuilder.materializePtrAdd(Addr, Base, OffsetTy, Offsets[i] / 8);
1339
1340 MachinePointerInfo Ptr(SI.getPointerOperand(), Offsets[i] / 8);
1341 Align BaseAlign = getMemOpAlign(SI);
1342 AAMDNodes AAMetadata;
1343 SI.getAAMetadata(AAMetadata);
1344 auto MMO = MF->getMachineMemOperand(
1345 Ptr, Flags, MRI->getType(Vals[i]),
1346 commonAlignment(BaseAlign, Offsets[i] / 8), AAMetadata, nullptr,
1347 SI.getSyncScopeID(), SI.getOrdering());
1348 MIRBuilder.buildStore(Vals[i], Addr, *MMO);
1349 }
1350 return true;
1351}
1352
1353static uint64_t getOffsetFromIndices(const User &U, const DataLayout &DL) {
1354 const Value *Src = U.getOperand(0);
1355 Type *Int32Ty = Type::getInt32Ty(U.getContext());
1356
1357 // getIndexedOffsetInType is designed for GEPs, so the first index is the
1358 // usual array element rather than looking into the actual aggregate.
1359 SmallVector<Value *, 1> Indices;
1360 Indices.push_back(ConstantInt::get(Int32Ty, 0));
1361
1362 if (const ExtractValueInst *EVI = dyn_cast<ExtractValueInst>(&U)) {
1363 for (auto Idx : EVI->indices())
1364 Indices.push_back(ConstantInt::get(Int32Ty, Idx));
1365 } else if (const InsertValueInst *IVI = dyn_cast<InsertValueInst>(&U)) {
1366 for (auto Idx : IVI->indices())
1367 Indices.push_back(ConstantInt::get(Int32Ty, Idx));
1368 } else {
1369 for (unsigned i = 1; i < U.getNumOperands(); ++i)
1370 Indices.push_back(U.getOperand(i));
1371 }
1372
1373 return 8 * static_cast<uint64_t>(
1374 DL.getIndexedOffsetInType(Src->getType(), Indices));
1375}
1376
1377bool IRTranslator::translateExtractValue(const User &U,
1378 MachineIRBuilder &MIRBuilder) {
1379 const Value *Src = U.getOperand(0);
1380 uint64_t Offset = getOffsetFromIndices(U, *DL);
1381 ArrayRef<Register> SrcRegs = getOrCreateVRegs(*Src);
1382 ArrayRef<uint64_t> Offsets = *VMap.getOffsets(*Src);
1383 unsigned Idx = llvm::lower_bound(Offsets, Offset) - Offsets.begin();
1384 auto &DstRegs = allocateVRegs(U);
1385
1386 for (unsigned i = 0; i < DstRegs.size(); ++i)
1387 DstRegs[i] = SrcRegs[Idx++];
1388
1389 return true;
1390}
1391
1392bool IRTranslator::translateInsertValue(const User &U,
1393 MachineIRBuilder &MIRBuilder) {
1394 const Value *Src = U.getOperand(0);
1395 uint64_t Offset = getOffsetFromIndices(U, *DL);
1396 auto &DstRegs = allocateVRegs(U);
1397 ArrayRef<uint64_t> DstOffsets = *VMap.getOffsets(U);
1398 ArrayRef<Register> SrcRegs = getOrCreateVRegs(*Src);
1399 ArrayRef<Register> InsertedRegs = getOrCreateVRegs(*U.getOperand(1));
1400 auto InsertedIt = InsertedRegs.begin();
1401
1402 for (unsigned i = 0; i < DstRegs.size(); ++i) {
1403 if (DstOffsets[i] >= Offset && InsertedIt != InsertedRegs.end())
1404 DstRegs[i] = *InsertedIt++;
1405 else
1406 DstRegs[i] = SrcRegs[i];
1407 }
1408
1409 return true;
1410}
1411
1412bool IRTranslator::translateSelect(const User &U,
1413 MachineIRBuilder &MIRBuilder) {
1414 Register Tst = getOrCreateVReg(*U.getOperand(0));
1415 ArrayRef<Register> ResRegs = getOrCreateVRegs(U);
1416 ArrayRef<Register> Op0Regs = getOrCreateVRegs(*U.getOperand(1));
1417 ArrayRef<Register> Op1Regs = getOrCreateVRegs(*U.getOperand(2));
1418
1419 uint16_t Flags = 0;
1420 if (const SelectInst *SI = dyn_cast<SelectInst>(&U))
1421 Flags = MachineInstr::copyFlagsFromInstruction(*SI);
1422
1423 for (unsigned i = 0; i < ResRegs.size(); ++i) {
1424 MIRBuilder.buildSelect(ResRegs[i], Tst, Op0Regs[i], Op1Regs[i], Flags);
1425 }
1426
1427 return true;
1428}
1429
1430bool IRTranslator::translateCopy(const User &U, const Value &V,
1431 MachineIRBuilder &MIRBuilder) {
1432 Register Src = getOrCreateVReg(V);
1433 auto &Regs = *VMap.getVRegs(U);
1434 if (Regs.empty()) {
1435 Regs.push_back(Src);
1436 VMap.getOffsets(U)->push_back(0);
1437 } else {
1438 // If we already assigned a vreg for this instruction, we can't change that.
1439 // Emit a copy to satisfy the users we already emitted.
1440 MIRBuilder.buildCopy(Regs[0], Src);
1441 }
1442 return true;
1443}
1444
1445bool IRTranslator::translateBitCast(const User &U,
1446 MachineIRBuilder &MIRBuilder) {
1447 // If we're bitcasting to the source type, we can reuse the source vreg.
1448 if (getLLTForType(*U.getOperand(0)->getType(), *DL) ==
1449 getLLTForType(*U.getType(), *DL))
1450 return translateCopy(U, *U.getOperand(0), MIRBuilder);
1451
1452 return translateCast(TargetOpcode::G_BITCAST, U, MIRBuilder);
1453}
1454
1455bool IRTranslator::translateCast(unsigned Opcode, const User &U,
1456 MachineIRBuilder &MIRBuilder) {
1457 Register Op = getOrCreateVReg(*U.getOperand(0));
1458 Register Res = getOrCreateVReg(U);
1459 MIRBuilder.buildInstr(Opcode, {Res}, {Op});
1460 return true;
1461}
1462
1463bool IRTranslator::translateGetElementPtr(const User &U,
1464 MachineIRBuilder &MIRBuilder) {
1465 Value &Op0 = *U.getOperand(0);
1466 Register BaseReg = getOrCreateVReg(Op0);
1467 Type *PtrIRTy = Op0.getType();
1468 LLT PtrTy = getLLTForType(*PtrIRTy, *DL);
1469 Type *OffsetIRTy = DL->getIntPtrType(PtrIRTy);
1470 LLT OffsetTy = getLLTForType(*OffsetIRTy, *DL);
1471
1472 // Normalize Vector GEP - all scalar operands should be converted to the
1473 // splat vector.
1474 unsigned VectorWidth = 0;
1475
1476 // True if we should use a splat vector; using VectorWidth alone is not
1477 // sufficient.
1478 bool WantSplatVector = false;
1479 if (auto *VT = dyn_cast<VectorType>(U.getType())) {
1480 VectorWidth = cast<FixedVectorType>(VT)->getNumElements();
1481 // We don't produce 1 x N vectors; those are treated as scalars.
1482 WantSplatVector = VectorWidth > 1;
1483 }
1484
1485 // We might need to splat the base pointer into a vector if the offsets
1486 // are vectors.
1487 if (WantSplatVector && !PtrTy.isVector()) {
1488 BaseReg =
1489 MIRBuilder
1490 .buildSplatVector(LLT::fixed_vector(VectorWidth, PtrTy), BaseReg)
1491 .getReg(0);
1492 PtrIRTy = FixedVectorType::get(PtrIRTy, VectorWidth);
1493 PtrTy = getLLTForType(*PtrIRTy, *DL);
1494 OffsetIRTy = DL->getIntPtrType(PtrIRTy);
1495 OffsetTy = getLLTForType(*OffsetIRTy, *DL);
1496 }
1497
1498 int64_t Offset = 0;
1499 for (gep_type_iterator GTI = gep_type_begin(&U), E = gep_type_end(&U);
1500 GTI != E; ++GTI) {
1501 const Value *Idx = GTI.getOperand();
1502 if (StructType *StTy = GTI.getStructTypeOrNull()) {
1503 unsigned Field = cast<Constant>(Idx)->getUniqueInteger().getZExtValue();
1504 Offset += DL->getStructLayout(StTy)->getElementOffset(Field);
1505 continue;
1506 } else {
1507 uint64_t ElementSize = DL->getTypeAllocSize(GTI.getIndexedType());
1508
1509 // If this is a scalar constant or a splat vector of constants,
1510 // handle it quickly.
1511 if (const auto *CI = dyn_cast<ConstantInt>(Idx)) {
1512 Offset += ElementSize * CI->getSExtValue();
1513 continue;
1514 }
1515
1516 if (Offset != 0) {
1517 auto OffsetMIB = MIRBuilder.buildConstant({OffsetTy}, Offset);
1518 BaseReg = MIRBuilder.buildPtrAdd(PtrTy, BaseReg, OffsetMIB.getReg(0))
1519 .getReg(0);
1520 Offset = 0;
1521 }
1522
1523 Register IdxReg = getOrCreateVReg(*Idx);
1524 LLT IdxTy = MRI->getType(IdxReg);
1525 if (IdxTy != OffsetTy) {
1526 if (!IdxTy.isVector() && WantSplatVector) {
1527 IdxReg = MIRBuilder.buildSplatVector(
1528 OffsetTy.changeElementType(IdxTy), IdxReg).getReg(0);
1529 }
1530
1531 IdxReg = MIRBuilder.buildSExtOrTrunc(OffsetTy, IdxReg).getReg(0);
1532 }
1533
1534 // N = N + Idx * ElementSize;
1535 // Avoid doing it for ElementSize of 1.
1536 Register GepOffsetReg;
1537 if (ElementSize != 1) {
1538 auto ElementSizeMIB = MIRBuilder.buildConstant(
1539 getLLTForType(*OffsetIRTy, *DL), ElementSize);
1540 GepOffsetReg =
1541 MIRBuilder.buildMul(OffsetTy, IdxReg, ElementSizeMIB).getReg(0);
1542 } else
1543 GepOffsetReg = IdxReg;
1544
1545 BaseReg = MIRBuilder.buildPtrAdd(PtrTy, BaseReg, GepOffsetReg).getReg(0);
1546 }
1547 }
1548
1549 if (Offset != 0) {
1550 auto OffsetMIB =
1551 MIRBuilder.buildConstant(OffsetTy, Offset);
1552 MIRBuilder.buildPtrAdd(getOrCreateVReg(U), BaseReg, OffsetMIB.getReg(0));
1553 return true;
1554 }
1555
1556 MIRBuilder.buildCopy(getOrCreateVReg(U), BaseReg);
1557 return true;
1558}
1559
1560bool IRTranslator::translateMemFunc(const CallInst &CI,
1561 MachineIRBuilder &MIRBuilder,
1562 unsigned Opcode) {
1563
1564 // If the source is undef, then just emit a nop.
1565 if (isa<UndefValue>(CI.getArgOperand(1)))
1566 return true;
1567
1568 SmallVector<Register, 3> SrcRegs;
1569
1570 unsigned MinPtrSize = UINT_MAX(2147483647 *2U +1U);
1571 for (auto AI = CI.arg_begin(), AE = CI.arg_end(); std::next(AI) != AE; ++AI) {
1572 Register SrcReg = getOrCreateVReg(**AI);
1573 LLT SrcTy = MRI->getType(SrcReg);
1574 if (SrcTy.isPointer())
1575 MinPtrSize = std::min<unsigned>(SrcTy.getSizeInBits(), MinPtrSize);
1576 SrcRegs.push_back(SrcReg);
1577 }
1578
1579 LLT SizeTy = LLT::scalar(MinPtrSize);
1580
1581 // The size operand should be the minimum of the pointer sizes.
1582 Register &SizeOpReg = SrcRegs[SrcRegs.size() - 1];
1583 if (MRI->getType(SizeOpReg) != SizeTy)
1584 SizeOpReg = MIRBuilder.buildZExtOrTrunc(SizeTy, SizeOpReg).getReg(0);
1585
1586 auto ICall = MIRBuilder.buildInstr(Opcode);
1587 for (Register SrcReg : SrcRegs)
1588 ICall.addUse(SrcReg);
1589
1590 Align DstAlign;
1591 Align SrcAlign;
1592 unsigned IsVol =
1593 cast<ConstantInt>(CI.getArgOperand(CI.getNumArgOperands() - 1))
1594 ->getZExtValue();
1595
1596 if (auto *MCI = dyn_cast<MemCpyInst>(&CI)) {
1597 DstAlign = MCI->getDestAlign().valueOrOne();
1598 SrcAlign = MCI->getSourceAlign().valueOrOne();
1599 } else if (auto *MCI = dyn_cast<MemCpyInlineInst>(&CI)) {
1600 DstAlign = MCI->getDestAlign().valueOrOne();
1601 SrcAlign = MCI->getSourceAlign().valueOrOne();
1602 } else if (auto *MMI = dyn_cast<MemMoveInst>(&CI)) {
1603 DstAlign = MMI->getDestAlign().valueOrOne();
1604 SrcAlign = MMI->getSourceAlign().valueOrOne();
1605 } else {
1606 auto *MSI = cast<MemSetInst>(&CI);
1607 DstAlign = MSI->getDestAlign().valueOrOne();
1608 }
1609
1610 if (Opcode != TargetOpcode::G_MEMCPY_INLINE) {
1611 // We need to propagate the tail call flag from the IR inst as an argument.
1612 // Otherwise, we have to pessimize and assume later that we cannot tail call
1613 // any memory intrinsics.
1614 ICall.addImm(CI.isTailCall() ? 1 : 0);
1615 }
1616
1617 // Create mem operands to store the alignment and volatile info.
1618 auto VolFlag = IsVol ? MachineMemOperand::MOVolatile : MachineMemOperand::MONone;
1619 ICall.addMemOperand(MF->getMachineMemOperand(
1620 MachinePointerInfo(CI.getArgOperand(0)),
1621 MachineMemOperand::MOStore | VolFlag, 1, DstAlign));
1622 if (Opcode != TargetOpcode::G_MEMSET)
1623 ICall.addMemOperand(MF->getMachineMemOperand(
1624 MachinePointerInfo(CI.getArgOperand(1)),
1625 MachineMemOperand::MOLoad | VolFlag, 1, SrcAlign));
1626
1627 return true;
1628}
1629
1630void IRTranslator::getStackGuard(Register DstReg,
1631 MachineIRBuilder &MIRBuilder) {
1632 const TargetRegisterInfo *TRI = MF->getSubtarget().getRegisterInfo();
1633 MRI->setRegClass(DstReg, TRI->getPointerRegClass(*MF));
1634 auto MIB =
1635 MIRBuilder.buildInstr(TargetOpcode::LOAD_STACK_GUARD, {DstReg}, {});
1636
1637 auto &TLI = *MF->getSubtarget().getTargetLowering();
1638 Value *Global = TLI.getSDagStackGuard(*MF->getFunction().getParent());
1639 if (!Global)
1640 return;
1641
1642 unsigned AddrSpace = Global->getType()->getPointerAddressSpace();
1643 LLT PtrTy = LLT::pointer(AddrSpace, DL->getPointerSizeInBits(AddrSpace));
1644
1645 MachinePointerInfo MPInfo(Global);
1646 auto Flags = MachineMemOperand::MOLoad | MachineMemOperand::MOInvariant |
1647 MachineMemOperand::MODereferenceable;
1648 MachineMemOperand *MemRef = MF->getMachineMemOperand(
1649 MPInfo, Flags, PtrTy, DL->getPointerABIAlignment(AddrSpace));
1650 MIB.setMemRefs({MemRef});
1651}
1652
1653bool IRTranslator::translateOverflowIntrinsic(const CallInst &CI, unsigned Op,
1654 MachineIRBuilder &MIRBuilder) {
1655 ArrayRef<Register> ResRegs = getOrCreateVRegs(CI);
1656 MIRBuilder.buildInstr(
1657 Op, {ResRegs[0], ResRegs[1]},
1658 {getOrCreateVReg(*CI.getOperand(0)), getOrCreateVReg(*CI.getOperand(1))});
1659
1660 return true;
1661}
1662
1663bool IRTranslator::translateFixedPointIntrinsic(unsigned Op, const CallInst &CI,
1664 MachineIRBuilder &MIRBuilder) {
1665 Register Dst = getOrCreateVReg(CI);
1666 Register Src0 = getOrCreateVReg(*CI.getOperand(0));
1667 Register Src1 = getOrCreateVReg(*CI.getOperand(1));
1668 uint64_t Scale = cast<ConstantInt>(CI.getOperand(2))->getZExtValue();
1669 MIRBuilder.buildInstr(Op, {Dst}, { Src0, Src1, Scale });
1670 return true;
1671}
1672
1673unsigned IRTranslator::getSimpleIntrinsicOpcode(Intrinsic::ID ID) {
1674 switch (ID) {
1675 default:
1676 break;
1677 case Intrinsic::bswap:
1678 return TargetOpcode::G_BSWAP;
1679 case Intrinsic::bitreverse:
1680 return TargetOpcode::G_BITREVERSE;
1681 case Intrinsic::fshl:
1682 return TargetOpcode::G_FSHL;
1683 case Intrinsic::fshr:
1684 return TargetOpcode::G_FSHR;
1685 case Intrinsic::ceil:
1686 return TargetOpcode::G_FCEIL;
1687 case Intrinsic::cos:
1688 return TargetOpcode::G_FCOS;
1689 case Intrinsic::ctpop:
1690 return TargetOpcode::G_CTPOP;
1691 case Intrinsic::exp:
1692 return TargetOpcode::G_FEXP;
1693 case Intrinsic::exp2:
1694 return TargetOpcode::G_FEXP2;
1695 case Intrinsic::fabs:
1696 return TargetOpcode::G_FABS;
1697 case Intrinsic::copysign:
1698 return TargetOpcode::G_FCOPYSIGN;
1699 case Intrinsic::minnum:
1700 return TargetOpcode::G_FMINNUM;
1701 case Intrinsic::maxnum:
1702 return TargetOpcode::G_FMAXNUM;
1703 case Intrinsic::minimum:
1704 return TargetOpcode::G_FMINIMUM;
1705 case Intrinsic::maximum:
1706 return TargetOpcode::G_FMAXIMUM;
1707 case Intrinsic::canonicalize:
1708 return TargetOpcode::G_FCANONICALIZE;
1709 case Intrinsic::floor:
1710 return TargetOpcode::G_FFLOOR;
1711 case Intrinsic::fma:
1712 return TargetOpcode::G_FMA;
1713 case Intrinsic::log:
1714 return TargetOpcode::G_FLOG;
1715 case Intrinsic::log2:
1716 return TargetOpcode::G_FLOG2;
1717 case Intrinsic::log10:
1718 return TargetOpcode::G_FLOG10;
1719 case Intrinsic::nearbyint:
1720 return TargetOpcode::G_FNEARBYINT;
1721 case Intrinsic::pow:
1722 return TargetOpcode::G_FPOW;
1723 case Intrinsic::powi:
1724 return TargetOpcode::G_FPOWI;
1725 case Intrinsic::rint:
1726 return TargetOpcode::G_FRINT;
1727 case Intrinsic::round:
1728 return TargetOpcode::G_INTRINSIC_ROUND;
1729 case Intrinsic::roundeven:
1730 return TargetOpcode::G_INTRINSIC_ROUNDEVEN;
1731 case Intrinsic::sin:
1732 return TargetOpcode::G_FSIN;
1733 case Intrinsic::sqrt:
1734 return TargetOpcode::G_FSQRT;
1735 case Intrinsic::trunc:
1736 return TargetOpcode::G_INTRINSIC_TRUNC;
1737 case Intrinsic::readcyclecounter:
1738 return TargetOpcode::G_READCYCLECOUNTER;
1739 case Intrinsic::ptrmask:
1740 return TargetOpcode::G_PTRMASK;
1741 case Intrinsic::lrint:
1742 return TargetOpcode::G_INTRINSIC_LRINT;
1743 // FADD/FMUL require checking the FMF, so are handled elsewhere.
1744 case Intrinsic::vector_reduce_fmin:
1745 return TargetOpcode::G_VECREDUCE_FMIN;
1746 case Intrinsic::vector_reduce_fmax:
1747 return TargetOpcode::G_VECREDUCE_FMAX;
1748 case Intrinsic::vector_reduce_add:
1749 return TargetOpcode::G_VECREDUCE_ADD;
1750 case Intrinsic::vector_reduce_mul:
1751 return TargetOpcode::G_VECREDUCE_MUL;
1752 case Intrinsic::vector_reduce_and:
1753 return TargetOpcode::G_VECREDUCE_AND;
1754 case Intrinsic::vector_reduce_or:
1755 return TargetOpcode::G_VECREDUCE_OR;
1756 case Intrinsic::vector_reduce_xor:
1757 return TargetOpcode::G_VECREDUCE_XOR;
1758 case Intrinsic::vector_reduce_smax:
1759 return TargetOpcode::G_VECREDUCE_SMAX;
1760 case Intrinsic::vector_reduce_smin:
1761 return TargetOpcode::G_VECREDUCE_SMIN;
1762 case Intrinsic::vector_reduce_umax:
1763 return TargetOpcode::G_VECREDUCE_UMAX;
1764 case Intrinsic::vector_reduce_umin:
1765 return TargetOpcode::G_VECREDUCE_UMIN;
1766 }
1767 return Intrinsic::not_intrinsic;
1768}
1769
1770bool IRTranslator::translateSimpleIntrinsic(const CallInst &CI,
1771 Intrinsic::ID ID,
1772 MachineIRBuilder &MIRBuilder) {
1773
1774 unsigned Op = getSimpleIntrinsicOpcode(ID);
1775
1776 // Is this a simple intrinsic?
1777 if (Op == Intrinsic::not_intrinsic)
1778 return false;
1779
1780 // Yes. Let's translate it.
1781 SmallVector<llvm::SrcOp, 4> VRegs;
1782 for (auto &Arg : CI.arg_operands())
1783 VRegs.push_back(getOrCreateVReg(*Arg));
1784
1785 MIRBuilder.buildInstr(Op, {getOrCreateVReg(CI)}, VRegs,
1786 MachineInstr::copyFlagsFromInstruction(CI));
1787 return true;
1788}
1789
1790// TODO: Include ConstainedOps.def when all strict instructions are defined.
1791static unsigned getConstrainedOpcode(Intrinsic::ID ID) {
1792 switch (ID) {
1793 case Intrinsic::experimental_constrained_fadd:
1794 return TargetOpcode::G_STRICT_FADD;
1795 case Intrinsic::experimental_constrained_fsub:
1796 return TargetOpcode::G_STRICT_FSUB;
1797 case Intrinsic::experimental_constrained_fmul:
1798 return TargetOpcode::G_STRICT_FMUL;
1799 case Intrinsic::experimental_constrained_fdiv:
1800 return TargetOpcode::G_STRICT_FDIV;
1801 case Intrinsic::experimental_constrained_frem:
1802 return TargetOpcode::G_STRICT_FREM;
1803 case Intrinsic::experimental_constrained_fma:
1804 return TargetOpcode::G_STRICT_FMA;
1805 case Intrinsic::experimental_constrained_sqrt:
1806 return TargetOpcode::G_STRICT_FSQRT;
1807 default:
1808 return 0;
1809 }
1810}
1811
1812bool IRTranslator::translateConstrainedFPIntrinsic(
1813 const ConstrainedFPIntrinsic &FPI, MachineIRBuilder &MIRBuilder) {
1814 fp::ExceptionBehavior EB = FPI.getExceptionBehavior().getValue();
1815
1816 unsigned Opcode = getConstrainedOpcode(FPI.getIntrinsicID());
1817 if (!Opcode)
1818 return false;
1819
1820 unsigned Flags = MachineInstr::copyFlagsFromInstruction(FPI);
1821 if (EB == fp::ExceptionBehavior::ebIgnore)
1822 Flags |= MachineInstr::NoFPExcept;
1823
1824 SmallVector<llvm::SrcOp, 4> VRegs;
1825 VRegs.push_back(getOrCreateVReg(*FPI.getArgOperand(0)));
1826 if (!FPI.isUnaryOp())
1827 VRegs.push_back(getOrCreateVReg(*FPI.getArgOperand(1)));
1828 if (FPI.isTernaryOp())
1829 VRegs.push_back(getOrCreateVReg(*FPI.getArgOperand(2)));
1830
1831 MIRBuilder.buildInstr(Opcode, {getOrCreateVReg(FPI)}, VRegs, Flags);
1832 return true;
1833}
1834
1835bool IRTranslator::translateKnownIntrinsic(const CallInst &CI, Intrinsic::ID ID,
1836 MachineIRBuilder &MIRBuilder) {
1837 if (auto *MI = dyn_cast<AnyMemIntrinsic>(&CI)) {
1838 if (ORE->enabled()) {
1839 const Function &F = *MI->getParent()->getParent();
1840 auto &TLI = getAnalysis<TargetLibraryInfoWrapperPass>().getTLI(F);
1841 if (MemoryOpRemark::canHandle(MI, TLI)) {
1842 MemoryOpRemark R(*ORE, "gisel-irtranslator-memsize", *DL, TLI);
1843 R.visit(MI);
1844 }
1845 }
1846 }
1847
1848 // If this is a simple intrinsic (that is, we just need to add a def of
1849 // a vreg, and uses for each arg operand, then translate it.
1850 if (translateSimpleIntrinsic(CI, ID, MIRBuilder))
1851 return true;
1852
1853 switch (ID) {
1854 default:
1855 break;
1856 case Intrinsic::lifetime_start:
1857 case Intrinsic::lifetime_end: {
1858 // No stack colouring in O0, discard region information.
1859 if (MF->getTarget().getOptLevel() == CodeGenOpt::None)
1860 return true;
1861
1862 unsigned Op = ID == Intrinsic::lifetime_start ? TargetOpcode::LIFETIME_START
1863 : TargetOpcode::LIFETIME_END;
1864
1865 // Get the underlying objects for the location passed on the lifetime
1866 // marker.
1867 SmallVector<const Value *, 4> Allocas;
1868 getUnderlyingObjects(CI.getArgOperand(1), Allocas);
1869
1870 // Iterate over each underlying object, creating lifetime markers for each
1871 // static alloca. Quit if we find a non-static alloca.
1872 for (const Value *V : Allocas) {
1873 const AllocaInst *AI = dyn_cast<AllocaInst>(V);
1874 if (!AI)
1875 continue;
1876
1877 if (!AI->isStaticAlloca())
1878 return true;
1879
1880 MIRBuilder.buildInstr(Op).addFrameIndex(getOrCreateFrameIndex(*AI));
1881 }
1882 return true;
1883 }
1884 case Intrinsic::dbg_declare: {
1885 const DbgDeclareInst &DI = cast<DbgDeclareInst>(CI);
1886 assert(DI.getVariable() && "Missing variable")((void)0);
1887
1888 const Value *Address = DI.getAddress();
1889 if (!Address || isa<UndefValue>(Address)) {
1890 LLVM_DEBUG(dbgs() << "Dropping debug info for " << DI << "\n")do { } while (false);
1891 return true;
1892 }
1893
1894 assert(DI.getVariable()->isValidLocationForIntrinsic(((void)0)
1895 MIRBuilder.getDebugLoc()) &&((void)0)
1896 "Expected inlined-at fields to agree")((void)0);
1897 auto AI = dyn_cast<AllocaInst>(Address);
1898 if (AI && AI->isStaticAlloca()) {
1899 // Static allocas are tracked at the MF level, no need for DBG_VALUE
1900 // instructions (in fact, they get ignored if they *do* exist).
1901 MF->setVariableDbgInfo(DI.getVariable(), DI.getExpression(),
1902 getOrCreateFrameIndex(*AI), DI.getDebugLoc());
1903 } else {
1904 // A dbg.declare describes the address of a source variable, so lower it
1905 // into an indirect DBG_VALUE.
1906 MIRBuilder.buildIndirectDbgValue(getOrCreateVReg(*Address),
1907 DI.getVariable(), DI.getExpression());
1908 }
1909 return true;
1910 }
1911 case Intrinsic::dbg_label: {
1912 const DbgLabelInst &DI = cast<DbgLabelInst>(CI);
1913 assert(DI.getLabel() && "Missing label")((void)0);
1914
1915 assert(DI.getLabel()->isValidLocationForIntrinsic(((void)0)
1916 MIRBuilder.getDebugLoc()) &&((void)0)
1917 "Expected inlined-at fields to agree")((void)0);
1918
1919 MIRBuilder.buildDbgLabel(DI.getLabel());
1920 return true;
1921 }
1922 case Intrinsic::vaend:
1923 // No target I know of cares about va_end. Certainly no in-tree target
1924 // does. Simplest intrinsic ever!
1925 return true;
1926 case Intrinsic::vastart: {
1927 auto &TLI = *MF->getSubtarget().getTargetLowering();
1928 Value *Ptr = CI.getArgOperand(0);
1929 unsigned ListSize = TLI.getVaListSizeInBits(*DL) / 8;
1930
1931 // FIXME: Get alignment
1932 MIRBuilder.buildInstr(TargetOpcode::G_VASTART, {}, {getOrCreateVReg(*Ptr)})
1933 .addMemOperand(MF->getMachineMemOperand(MachinePointerInfo(Ptr),
1934 MachineMemOperand::MOStore,
1935 ListSize, Align(1)));
1936 return true;
1937 }
1938 case Intrinsic::dbg_value: {
1939 // This form of DBG_VALUE is target-independent.
1940 const DbgValueInst &DI = cast<DbgValueInst>(CI);
1941 const Value *V = DI.getValue();
1942 assert(DI.getVariable()->isValidLocationForIntrinsic(((void)0)
1943 MIRBuilder.getDebugLoc()) &&((void)0)
1944 "Expected inlined-at fields to agree")((void)0);
1945 if (!V || DI.hasArgList()) {
1946 // DI cannot produce a valid DBG_VALUE, so produce an undef DBG_VALUE to
1947 // terminate any prior location.
1948 MIRBuilder.buildIndirectDbgValue(0, DI.getVariable(), DI.getExpression());
1949 } else if (const auto *CI = dyn_cast<Constant>(V)) {
1950 MIRBuilder.buildConstDbgValue(*CI, DI.getVariable(), DI.getExpression());
1951 } else {
1952 for (Register Reg : getOrCreateVRegs(*V)) {
1953 // FIXME: This does not handle register-indirect values at offset 0. The
1954 // direct/indirect thing shouldn't really be handled by something as
1955 // implicit as reg+noreg vs reg+imm in the first place, but it seems
1956 // pretty baked in right now.
1957 MIRBuilder.buildDirectDbgValue(Reg, DI.getVariable(), DI.getExpression());
1958 }
1959 }
1960 return true;
1961 }
1962 case Intrinsic::uadd_with_overflow:
1963 return translateOverflowIntrinsic(CI, TargetOpcode::G_UADDO, MIRBuilder);
1964 case Intrinsic::sadd_with_overflow:
1965 return translateOverflowIntrinsic(CI, TargetOpcode::G_SADDO, MIRBuilder);
1966 case Intrinsic::usub_with_overflow:
1967 return translateOverflowIntrinsic(CI, TargetOpcode::G_USUBO, MIRBuilder);
1968 case Intrinsic::ssub_with_overflow:
1969 return translateOverflowIntrinsic(CI, TargetOpcode::G_SSUBO, MIRBuilder);
1970 case Intrinsic::umul_with_overflow:
1971 return translateOverflowIntrinsic(CI, TargetOpcode::G_UMULO, MIRBuilder);
1972 case Intrinsic::smul_with_overflow:
1973 return translateOverflowIntrinsic(CI, TargetOpcode::G_SMULO, MIRBuilder);
1974 case Intrinsic::uadd_sat:
1975 return translateBinaryOp(TargetOpcode::G_UADDSAT, CI, MIRBuilder);
1976 case Intrinsic::sadd_sat:
1977 return translateBinaryOp(TargetOpcode::G_SADDSAT, CI, MIRBuilder);
1978 case Intrinsic::usub_sat:
1979 return translateBinaryOp(TargetOpcode::G_USUBSAT, CI, MIRBuilder);
1980 case Intrinsic::ssub_sat:
1981 return translateBinaryOp(TargetOpcode::G_SSUBSAT, CI, MIRBuilder);
1982 case Intrinsic::ushl_sat:
1983 return translateBinaryOp(TargetOpcode::G_USHLSAT, CI, MIRBuilder);
1984 case Intrinsic::sshl_sat:
1985 return translateBinaryOp(TargetOpcode::G_SSHLSAT, CI, MIRBuilder);
1986 case Intrinsic::umin:
1987 return translateBinaryOp(TargetOpcode::G_UMIN, CI, MIRBuilder);
1988 case Intrinsic::umax:
1989 return translateBinaryOp(TargetOpcode::G_UMAX, CI, MIRBuilder);
1990 case Intrinsic::smin:
1991 return translateBinaryOp(TargetOpcode::G_SMIN, CI, MIRBuilder);
1992 case Intrinsic::smax:
1993 return translateBinaryOp(TargetOpcode::G_SMAX, CI, MIRBuilder);
1994 case Intrinsic::abs:
1995 // TODO: Preserve "int min is poison" arg in GMIR?
1996 return translateUnaryOp(TargetOpcode::G_ABS, CI, MIRBuilder);
1997 case Intrinsic::smul_fix:
1998 return translateFixedPointIntrinsic(TargetOpcode::G_SMULFIX, CI, MIRBuilder);
1999 case Intrinsic::umul_fix:
2000 return translateFixedPointIntrinsic(TargetOpcode::G_UMULFIX, CI, MIRBuilder);
2001 case Intrinsic::smul_fix_sat:
2002 return translateFixedPointIntrinsic(TargetOpcode::G_SMULFIXSAT, CI, MIRBuilder);
2003 case Intrinsic::umul_fix_sat:
2004 return translateFixedPointIntrinsic(TargetOpcode::G_UMULFIXSAT, CI, MIRBuilder);
2005 case Intrinsic::sdiv_fix:
2006 return translateFixedPointIntrinsic(TargetOpcode::G_SDIVFIX, CI, MIRBuilder);
2007 case Intrinsic::udiv_fix:
2008 return translateFixedPointIntrinsic(TargetOpcode::G_UDIVFIX, CI, MIRBuilder);
2009 case Intrinsic::sdiv_fix_sat:
2010 return translateFixedPointIntrinsic(TargetOpcode::G_SDIVFIXSAT, CI, MIRBuilder);
2011 case Intrinsic::udiv_fix_sat:
2012 return translateFixedPointIntrinsic(TargetOpcode::G_UDIVFIXSAT, CI, MIRBuilder);
2013 case Intrinsic::fmuladd: {
2014 const TargetMachine &TM = MF->getTarget();
2015 const TargetLowering &TLI = *MF->getSubtarget().getTargetLowering();
2016 Register Dst = getOrCreateVReg(CI);
2017 Register Op0 = getOrCreateVReg(*CI.getArgOperand(0));
2018 Register Op1 = getOrCreateVReg(*CI.getArgOperand(1));
2019 Register Op2 = getOrCreateVReg(*CI.getArgOperand(2));
2020 if (TM.Options.AllowFPOpFusion != FPOpFusion::Strict &&
2021 TLI.isFMAFasterThanFMulAndFAdd(*MF,
2022 TLI.getValueType(*DL, CI.getType()))) {
2023 // TODO: Revisit this to see if we should move this part of the
2024 // lowering to the combiner.
2025 MIRBuilder.buildFMA(Dst, Op0, Op1, Op2,
2026 MachineInstr::copyFlagsFromInstruction(CI));
2027 } else {
2028 LLT Ty = getLLTForType(*CI.getType(), *DL);
2029 auto FMul = MIRBuilder.buildFMul(
2030 Ty, Op0, Op1, MachineInstr::copyFlagsFromInstruction(CI));
2031 MIRBuilder.buildFAdd(Dst, FMul, Op2,
2032 MachineInstr::copyFlagsFromInstruction(CI));
2033 }
2034 return true;
2035 }
2036 case Intrinsic::convert_from_fp16:
2037 // FIXME: This intrinsic should probably be removed from the IR.
2038 MIRBuilder.buildFPExt(getOrCreateVReg(CI),
2039 getOrCreateVReg(*CI.getArgOperand(0)),
2040 MachineInstr::copyFlagsFromInstruction(CI));
2041 return true;
2042 case Intrinsic::convert_to_fp16:
2043 // FIXME: This intrinsic should probably be removed from the IR.
2044 MIRBuilder.buildFPTrunc(getOrCreateVReg(CI),
2045 getOrCreateVReg(*CI.getArgOperand(0)),
2046 MachineInstr::copyFlagsFromInstruction(CI));
2047 return true;
2048 case Intrinsic::memcpy_inline:
2049 return translateMemFunc(CI, MIRBuilder, TargetOpcode::G_MEMCPY_INLINE);
2050 case Intrinsic::memcpy:
2051 return translateMemFunc(CI, MIRBuilder, TargetOpcode::G_MEMCPY);
2052 case Intrinsic::memmove:
2053 return translateMemFunc(CI, MIRBuilder, TargetOpcode::G_MEMMOVE);
2054 case Intrinsic::memset:
2055 return translateMemFunc(CI, MIRBuilder, TargetOpcode::G_MEMSET);
2056 case Intrinsic::eh_typeid_for: {
2057 GlobalValue *GV = ExtractTypeInfo(CI.getArgOperand(0));
2058 Register Reg = getOrCreateVReg(CI);
2059 unsigned TypeID = MF->getTypeIDFor(GV);
2060 MIRBuilder.buildConstant(Reg, TypeID);
2061 return true;
2062 }
2063 case Intrinsic::objectsize:
2064 llvm_unreachable("llvm.objectsize.* should have been lowered already")__builtin_unreachable();
2065
2066 case Intrinsic::is_constant:
2067 llvm_unreachable("llvm.is.constant.* should have been lowered already")__builtin_unreachable();
2068
2069 case Intrinsic::stackguard:
2070 getStackGuard(getOrCreateVReg(CI), MIRBuilder);
2071 return true;
2072 case Intrinsic::stackprotector: {
2073 LLT PtrTy = getLLTForType(*CI.getArgOperand(0)->getType(), *DL);
2074 Register GuardVal = MRI->createGenericVirtualRegister(PtrTy);
2075 getStackGuard(GuardVal, MIRBuilder);
2076
2077 AllocaInst *Slot = cast<AllocaInst>(CI.getArgOperand(1));
2078 int FI = getOrCreateFrameIndex(*Slot);
2079 MF->getFrameInfo().setStackProtectorIndex(FI);
2080
2081 MIRBuilder.buildStore(
2082 GuardVal, getOrCreateVReg(*Slot),
2083 *MF->getMachineMemOperand(MachinePointerInfo::getFixedStack(*MF, FI),
2084 MachineMemOperand::MOStore |
2085 MachineMemOperand::MOVolatile,
2086 PtrTy, Align(8)));
2087 return true;
2088 }
2089 case Intrinsic::stacksave: {
2090 // Save the stack pointer to the location provided by the intrinsic.
2091 Register Reg = getOrCreateVReg(CI);
2092 Register StackPtr = MF->getSubtarget()
2093 .getTargetLowering()
2094 ->getStackPointerRegisterToSaveRestore();
2095
2096 // If the target doesn't specify a stack pointer, then fall back.
2097 if (!StackPtr)
2098 return false;
2099
2100 MIRBuilder.buildCopy(Reg, StackPtr);
2101 return true;
2102 }
2103 case Intrinsic::stackrestore: {
2104 // Restore the stack pointer from the location provided by the intrinsic.
2105 Register Reg = getOrCreateVReg(*CI.getArgOperand(0));
2106 Register StackPtr = MF->getSubtarget()
2107 .getTargetLowering()
2108 ->getStackPointerRegisterToSaveRestore();
2109
2110 // If the target doesn't specify a stack pointer, then fall back.
2111 if (!StackPtr)
2112 return false;
2113
2114 MIRBuilder.buildCopy(StackPtr, Reg);
2115 return true;
2116 }
2117 case Intrinsic::cttz:
2118 case Intrinsic::ctlz: {
2119 ConstantInt *Cst = cast<ConstantInt>(CI.getArgOperand(1));
2120 bool isTrailing = ID == Intrinsic::cttz;
2121 unsigned Opcode = isTrailing
2122 ? Cst->isZero() ? TargetOpcode::G_CTTZ
2123 : TargetOpcode::G_CTTZ_ZERO_UNDEF
2124 : Cst->isZero() ? TargetOpcode::G_CTLZ
2125 : TargetOpcode::G_CTLZ_ZERO_UNDEF;
2126 MIRBuilder.buildInstr(Opcode, {getOrCreateVReg(CI)},
2127 {getOrCreateVReg(*CI.getArgOperand(0))});
2128 return true;
2129 }
2130 case Intrinsic::invariant_start: {
2131 LLT PtrTy = getLLTForType(*CI.getArgOperand(0)->getType(), *DL);
2132 Register Undef = MRI->createGenericVirtualRegister(PtrTy);
2133 MIRBuilder.buildUndef(Undef);
2134 return true;
2135 }
2136 case Intrinsic::invariant_end:
2137 return true;
2138 case Intrinsic::expect:
2139 case Intrinsic::annotation:
2140 case Intrinsic::ptr_annotation:
2141 case Intrinsic::launder_invariant_group:
2142 case Intrinsic::strip_invariant_group: {
2143 // Drop the intrinsic, but forward the value.
2144 MIRBuilder.buildCopy(getOrCreateVReg(CI),
2145 getOrCreateVReg(*CI.getArgOperand(0)));
2146 return true;
2147 }
2148 case Intrinsic::assume:
2149 case Intrinsic::experimental_noalias_scope_decl:
2150 case Intrinsic::var_annotation:
2151 case Intrinsic::sideeffect:
2152 // Discard annotate attributes, assumptions, and artificial side-effects.
2153 return true;
2154 case Intrinsic::read_volatile_register:
2155 case Intrinsic::read_register: {
2156 Value *Arg = CI.getArgOperand(0);
2157 MIRBuilder
2158 .buildInstr(TargetOpcode::G_READ_REGISTER, {getOrCreateVReg(CI)}, {})
2159 .addMetadata(cast<MDNode>(cast<MetadataAsValue>(Arg)->getMetadata()));
2160 return true;
2161 }
2162 case Intrinsic::write_register: {
2163 Value *Arg = CI.getArgOperand(0);
2164 MIRBuilder.buildInstr(TargetOpcode::G_WRITE_REGISTER)
2165 .addMetadata(cast<MDNode>(cast<MetadataAsValue>(Arg)->getMetadata()))
2166 .addUse(getOrCreateVReg(*CI.getArgOperand(1)));
2167 return true;
2168 }
2169 case Intrinsic::localescape: {
2170 MachineBasicBlock &EntryMBB = MF->front();
2171 StringRef EscapedName = GlobalValue::dropLLVMManglingEscape(MF->getName());
2172
2173 // Directly emit some LOCAL_ESCAPE machine instrs. Label assignment emission
2174 // is the same on all targets.
2175 for (unsigned Idx = 0, E = CI.getNumArgOperands(); Idx < E; ++Idx) {
2176 Value *Arg = CI.getArgOperand(Idx)->stripPointerCasts();
2177 if (isa<ConstantPointerNull>(Arg))
2178 continue; // Skip null pointers. They represent a hole in index space.
2179
2180 int FI = getOrCreateFrameIndex(*cast<AllocaInst>(Arg));
2181 MCSymbol *FrameAllocSym =
2182 MF->getMMI().getContext().getOrCreateFrameAllocSymbol(EscapedName,
2183 Idx);
2184
2185 // This should be inserted at the start of the entry block.
2186 auto LocalEscape =
2187 MIRBuilder.buildInstrNoInsert(TargetOpcode::LOCAL_ESCAPE)
2188 .addSym(FrameAllocSym)
2189 .addFrameIndex(FI);
2190
2191 EntryMBB.insert(EntryMBB.begin(), LocalEscape);
2192 }
2193
2194 return true;
2195 }
2196 case Intrinsic::vector_reduce_fadd:
2197 case Intrinsic::vector_reduce_fmul: {
2198 // Need to check for the reassoc flag to decide whether we want a
2199 // sequential reduction opcode or not.
2200 Register Dst = getOrCreateVReg(CI);
2201 Register ScalarSrc = getOrCreateVReg(*CI.getArgOperand(0));
2202 Register VecSrc = getOrCreateVReg(*CI.getArgOperand(1));
2203 unsigned Opc = 0;
2204 if (!CI.hasAllowReassoc()) {
2205 // The sequential ordering case.
2206 Opc = ID == Intrinsic::vector_reduce_fadd
2207 ? TargetOpcode::G_VECREDUCE_SEQ_FADD
2208 : TargetOpcode::G_VECREDUCE_SEQ_FMUL;
2209 MIRBuilder.buildInstr(Opc, {Dst}, {ScalarSrc, VecSrc},
2210 MachineInstr::copyFlagsFromInstruction(CI));
2211 return true;
2212 }
2213 // We split the operation into a separate G_FADD/G_FMUL + the reduce,
2214 // since the associativity doesn't matter.
2215 unsigned ScalarOpc;
2216 if (ID == Intrinsic::vector_reduce_fadd) {
2217 Opc = TargetOpcode::G_VECREDUCE_FADD;
2218 ScalarOpc = TargetOpcode::G_FADD;
2219 } else {
2220 Opc = TargetOpcode::G_VECREDUCE_FMUL;
2221 ScalarOpc = TargetOpcode::G_FMUL;
2222 }
2223 LLT DstTy = MRI->getType(Dst);
2224 auto Rdx = MIRBuilder.buildInstr(
2225 Opc, {DstTy}, {VecSrc}, MachineInstr::copyFlagsFromInstruction(CI));
2226 MIRBuilder.buildInstr(ScalarOpc, {Dst}, {ScalarSrc, Rdx},
2227 MachineInstr::copyFlagsFromInstruction(CI));
2228
2229 return true;
2230 }
2231#define INSTRUCTION(NAME, NARG, ROUND_MODE, INTRINSIC) \
2232 case Intrinsic::INTRINSIC:
2233#include "llvm/IR/ConstrainedOps.def"
2234 return translateConstrainedFPIntrinsic(cast<ConstrainedFPIntrinsic>(CI),
2235 MIRBuilder);
2236
2237 }
2238 return false;
2239}
2240
2241bool IRTranslator::translateInlineAsm(const CallBase &CB,
2242 MachineIRBuilder &MIRBuilder) {
2243
2244 const InlineAsmLowering *ALI = MF->getSubtarget().getInlineAsmLowering();
2245
2246 if (!ALI) {
2247 LLVM_DEBUG(do { } while (false)
2248 dbgs() << "Inline asm lowering is not supported for this target yet\n")do { } while (false);
2249 return false;
2250 }
2251
2252 return ALI->lowerInlineAsm(
2253 MIRBuilder, CB, [&](const Value &Val) { return getOrCreateVRegs(Val); });
2254}
2255
2256bool IRTranslator::translateCallBase(const CallBase &CB,
2257 MachineIRBuilder &MIRBuilder) {
2258 ArrayRef<Register> Res = getOrCreateVRegs(CB);
2259
2260 SmallVector<ArrayRef<Register>, 8> Args;
2261 Register SwiftInVReg = 0;
2262 Register SwiftErrorVReg = 0;
2263 for (auto &Arg : CB.args()) {
2264 if (CLI->supportSwiftError() && isSwiftError(Arg)) {
2265 assert(SwiftInVReg == 0 && "Expected only one swift error argument")((void)0);
2266 LLT Ty = getLLTForType(*Arg->getType(), *DL);
2267 SwiftInVReg = MRI->createGenericVirtualRegister(Ty);
2268 MIRBuilder.buildCopy(SwiftInVReg, SwiftError.getOrCreateVRegUseAt(
2269 &CB, &MIRBuilder.getMBB(), Arg));
2270 Args.emplace_back(makeArrayRef(SwiftInVReg));
2271 SwiftErrorVReg =
2272 SwiftError.getOrCreateVRegDefAt(&CB, &MIRBuilder.getMBB(), Arg);
2273 continue;
2274 }
2275 Args.push_back(getOrCreateVRegs(*Arg));
2276 }
2277
2278 if (auto *CI = dyn_cast<CallInst>(&CB)) {
2279 if (ORE->enabled()) {
2280 const Function &F = *CI->getParent()->getParent();
2281 auto &TLI = getAnalysis<TargetLibraryInfoWrapperPass>().getTLI(F);
2282 if (MemoryOpRemark::canHandle(CI, TLI)) {
2283 MemoryOpRemark R(*ORE, "gisel-irtranslator-memsize", *DL, TLI);
2284 R.visit(CI);
2285 }
2286 }
2287 }
2288
2289 // We don't set HasCalls on MFI here yet because call lowering may decide to
2290 // optimize into tail calls. Instead, we defer that to selection where a final
2291 // scan is done to check if any instructions are calls.
2292 bool Success =
2293 CLI->lowerCall(MIRBuilder, CB, Res, Args, SwiftErrorVReg,
2294 [&]() { return getOrCreateVReg(*CB.getCalledOperand()); });
2295
2296 // Check if we just inserted a tail call.
2297 if (Success) {
2298 assert(!HasTailCall && "Can't tail call return twice from block?")((void)0);
2299 const TargetInstrInfo *TII = MF->getSubtarget().getInstrInfo();
2300 HasTailCall = TII->isTailCall(*std::prev(MIRBuilder.getInsertPt()));
2301 }
2302
2303 return Success;
2304}
2305
2306bool IRTranslator::translateCall(const User &U, MachineIRBuilder &MIRBuilder) {
2307 const CallInst &CI = cast<CallInst>(U);
2308 auto TII = MF->getTarget().getIntrinsicInfo();
2309 const Function *F = CI.getCalledFunction();
2310
2311 // FIXME: support Windows dllimport function calls.
2312 if (F && (F->hasDLLImportStorageClass() ||
2313 (MF->getTarget().getTargetTriple().isOSWindows() &&
2314 F->hasExternalWeakLinkage())))
2315 return false;
2316
2317 // FIXME: support control flow guard targets.
2318 if (CI.countOperandBundlesOfType(LLVMContext::OB_cfguardtarget))
2319 return false;
2320
2321 if (CI.isInlineAsm())
2322 return translateInlineAsm(CI, MIRBuilder);
2323
2324 Intrinsic::ID ID = Intrinsic::not_intrinsic;
2325 if (F && F->isIntrinsic()) {
2326 ID = F->getIntrinsicID();
2327 if (TII && ID == Intrinsic::not_intrinsic)
2328 ID = static_cast<Intrinsic::ID>(TII->getIntrinsicID(F));
2329 }
2330
2331 if (!F || !F->isIntrinsic() || ID == Intrinsic::not_intrinsic)
2332 return translateCallBase(CI, MIRBuilder);
2333
2334 assert(ID != Intrinsic::not_intrinsic && "unknown intrinsic")((void)0);
2335
2336 if (translateKnownIntrinsic(CI, ID, MIRBuilder))
2337 return true;
2338
2339 ArrayRef<Register> ResultRegs;
2340 if (!CI.getType()->isVoidTy())
2341 ResultRegs = getOrCreateVRegs(CI);
2342
2343 // Ignore the callsite attributes. Backend code is most likely not expecting
2344 // an intrinsic to sometimes have side effects and sometimes not.
2345 MachineInstrBuilder MIB =
2346 MIRBuilder.buildIntrinsic(ID, ResultRegs, !F->doesNotAccessMemory());
2347 if (isa<FPMathOperator>(CI))
2348 MIB->copyIRFlags(CI);
2349
2350 for (auto &Arg : enumerate(CI.arg_operands())) {
2351 // If this is required to be an immediate, don't materialize it in a
2352 // register.
2353 if (CI.paramHasAttr(Arg.index(), Attribute::ImmArg)) {
2354 if (ConstantInt *CI = dyn_cast<ConstantInt>(Arg.value())) {
2355 // imm arguments are more convenient than cimm (and realistically
2356 // probably sufficient), so use them.
2357 assert(CI->getBitWidth() <= 64 &&((void)0)
2358 "large intrinsic immediates not handled")((void)0);
2359 MIB.addImm(CI->getSExtValue());
2360 } else {
2361 MIB.addFPImm(cast<ConstantFP>(Arg.value()));
2362 }
2363 } else if (auto MD = dyn_cast<MetadataAsValue>(Arg.value())) {
2364 auto *MDN = dyn_cast<MDNode>(MD->getMetadata());
2365 if (!MDN) // This was probably an MDString.
2366 return false;
2367 MIB.addMetadata(MDN);
2368 } else {
2369 ArrayRef<Register> VRegs = getOrCreateVRegs(*Arg.value());
2370 if (VRegs.size() > 1)
2371 return false;
2372 MIB.addUse(VRegs[0]);
2373 }
2374 }
2375
2376 // Add a MachineMemOperand if it is a target mem intrinsic.
2377 const TargetLowering &TLI = *MF->getSubtarget().getTargetLowering();
2378 TargetLowering::IntrinsicInfo Info;
2379 // TODO: Add a GlobalISel version of getTgtMemIntrinsic.
2380 if (TLI.getTgtMemIntrinsic(Info, CI, *MF, ID)) {
2381 Align Alignment = Info.align.getValueOr(
2382 DL->getABITypeAlign(Info.memVT.getTypeForEVT(F->getContext())));
2383 LLT MemTy = Info.memVT.isSimple()
2384 ? getLLTForMVT(Info.memVT.getSimpleVT())
2385 : LLT::scalar(Info.memVT.getStoreSizeInBits());
2386 MIB.addMemOperand(MF->getMachineMemOperand(MachinePointerInfo(Info.ptrVal),
2387 Info.flags, MemTy, Alignment));
2388 }
2389
2390 return true;
2391}
2392
2393bool IRTranslator::findUnwindDestinations(
2394 const BasicBlock *EHPadBB,
2395 BranchProbability Prob,
2396 SmallVectorImpl<std::pair<MachineBasicBlock *, BranchProbability>>
2397 &UnwindDests) {
2398 EHPersonality Personality = classifyEHPersonality(
2399 EHPadBB->getParent()->getFunction().getPersonalityFn());
2400 bool IsMSVCCXX = Personality == EHPersonality::MSVC_CXX;
2401 bool IsCoreCLR = Personality == EHPersonality::CoreCLR;
2402 bool IsWasmCXX = Personality == EHPersonality::Wasm_CXX;
2403 bool IsSEH = isAsynchronousEHPersonality(Personality);
2404
2405 if (IsWasmCXX) {
2406 // Ignore this for now.
2407 return false;
2408 }
2409
2410 while (EHPadBB) {
2411 const Instruction *Pad = EHPadBB->getFirstNonPHI();
2412 BasicBlock *NewEHPadBB = nullptr;
2413 if (isa<LandingPadInst>(Pad)) {
2414 // Stop on landingpads. They are not funclets.
2415 UnwindDests.emplace_back(&getMBB(*EHPadBB), Prob);
2416 break;
2417 }
2418 if (isa<CleanupPadInst>(Pad)) {
2419 // Stop on cleanup pads. Cleanups are always funclet entries for all known
2420 // personalities.
2421 UnwindDests.emplace_back(&getMBB(*EHPadBB), Prob);
2422 UnwindDests.back().first->setIsEHScopeEntry();
2423 UnwindDests.back().first->setIsEHFuncletEntry();
2424 break;
2425 }
2426 if (auto *CatchSwitch = dyn_cast<CatchSwitchInst>(Pad)) {
2427 // Add the catchpad handlers to the possible destinations.
2428 for (const BasicBlock *CatchPadBB : CatchSwitch->handlers()) {
2429 UnwindDests.emplace_back(&getMBB(*CatchPadBB), Prob);
2430 // For MSVC++ and the CLR, catchblocks are funclets and need prologues.
2431 if (IsMSVCCXX || IsCoreCLR)
2432 UnwindDests.back().first->setIsEHFuncletEntry();
2433 if (!IsSEH)
2434 UnwindDests.back().first->setIsEHScopeEntry();
2435 }
2436 NewEHPadBB = CatchSwitch->getUnwindDest();
2437 } else {
2438 continue;
2439 }
2440
2441 BranchProbabilityInfo *BPI = FuncInfo.BPI;
2442 if (BPI && NewEHPadBB)
2443 Prob *= BPI->getEdgeProbability(EHPadBB, NewEHPadBB);
2444 EHPadBB = NewEHPadBB;
2445 }
2446 return true;
2447}
2448
2449bool IRTranslator::translateInvoke(const User &U,
2450 MachineIRBuilder &MIRBuilder) {
2451 const InvokeInst &I = cast<InvokeInst>(U);
2452 MCContext &Context = MF->getContext();
2453
2454 const BasicBlock *ReturnBB = I.getSuccessor(0);
2455 const BasicBlock *EHPadBB = I.getSuccessor(1);
2456
2457 const Function *Fn = I.getCalledFunction();
2458
2459 // FIXME: support invoking patchpoint and statepoint intrinsics.
2460 if (Fn && Fn->isIntrinsic())
2461 return false;
2462
2463 // FIXME: support whatever these are.
2464 if (I.countOperandBundlesOfType(LLVMContext::OB_deopt))
2465 return false;
2466
2467 // FIXME: support control flow guard targets.
2468 if (I.countOperandBundlesOfType(LLVMContext::OB_cfguardtarget))
2469 return false;
2470
2471 // FIXME: support Windows exception handling.
2472 if (!isa<LandingPadInst>(EHPadBB->getFirstNonPHI()))
2473 return false;
2474
2475 bool LowerInlineAsm = false;
2476 if (I.isInlineAsm()) {
2477 const InlineAsm *IA = cast<InlineAsm>(I.getCalledOperand());
2478 if (!IA->canThrow()) {
2479 // Fast path without emitting EH_LABELs.
2480
2481 if (!translateInlineAsm(I, MIRBuilder))
2482 return false;
2483
2484 MachineBasicBlock *InvokeMBB = &MIRBuilder.getMBB(),
2485 *ReturnMBB = &getMBB(*ReturnBB);
2486
2487 // Update successor info.
2488 addSuccessorWithProb(InvokeMBB, ReturnMBB, BranchProbability::getOne());
2489
2490 MIRBuilder.buildBr(*ReturnMBB);
2491 return true;
2492 } else {
2493 LowerInlineAsm = true;
2494 }
2495 }
2496
2497 // Emit the actual call, bracketed by EH_LABELs so that the MF knows about
2498 // the region covered by the try.
2499 MCSymbol *BeginSymbol = Context.createTempSymbol();
2500 MIRBuilder.buildInstr(TargetOpcode::EH_LABEL).addSym(BeginSymbol);
2501
2502 if (LowerInlineAsm) {
2503 if (!translateInlineAsm(I, MIRBuilder))
2504 return false;
2505 } else if (!translateCallBase(I, MIRBuilder))
2506 return false;
2507
2508 MCSymbol *EndSymbol = Context.createTempSymbol();
2509 MIRBuilder.buildInstr(TargetOpcode::EH_LABEL).addSym(EndSymbol);
2510
2511 SmallVector<std::pair<MachineBasicBlock *, BranchProbability>, 1> UnwindDests;
2512 BranchProbabilityInfo *BPI = FuncInfo.BPI;
2513 MachineBasicBlock *InvokeMBB = &MIRBuilder.getMBB();
2514 BranchProbability EHPadBBProb =
2515 BPI ? BPI->getEdgeProbability(InvokeMBB->getBasicBlock(), EHPadBB)
2516 : BranchProbability::getZero();
2517
2518 if (!findUnwindDestinations(EHPadBB, EHPadBBProb, UnwindDests))
2519 return false;
2520
2521 MachineBasicBlock &EHPadMBB = getMBB(*EHPadBB),
2522 &ReturnMBB = getMBB(*ReturnBB);
2523 // Update successor info.
2524 addSuccessorWithProb(InvokeMBB, &ReturnMBB);
2525 for (auto &UnwindDest : UnwindDests) {
2526 UnwindDest.first->setIsEHPad();
2527 addSuccessorWithProb(InvokeMBB, UnwindDest.first, UnwindDest.second);
2528 }
2529 InvokeMBB->normalizeSuccProbs();
2530
2531 MF->addInvoke(&EHPadMBB, BeginSymbol, EndSymbol);
2532 MIRBuilder.buildBr(ReturnMBB);
2533 return true;
2534}
2535
2536bool IRTranslator::translateCallBr(const User &U,
2537 MachineIRBuilder &MIRBuilder) {
2538 // FIXME: Implement this.
2539 return false;
2540}
2541
2542bool IRTranslator::translateLandingPad(const User &U,
2543 MachineIRBuilder &MIRBuilder) {
2544 const LandingPadInst &LP = cast<LandingPadInst>(U);
2545
2546 MachineBasicBlock &MBB = MIRBuilder.getMBB();
2547
2548 MBB.setIsEHPad();
2549
2550 // If there aren't registers to copy the values into (e.g., during SjLj
2551 // exceptions), then don't bother.
2552 auto &TLI = *MF->getSubtarget().getTargetLowering();
2553 const Constant *PersonalityFn = MF->getFunction().getPersonalityFn();
2554 if (TLI.getExceptionPointerRegister(PersonalityFn) == 0 &&
2555 TLI.getExceptionSelectorRegister(PersonalityFn) == 0)
2556 return true;
2557
2558 // If landingpad's return type is token type, we don't create DAG nodes
2559 // for its exception pointer and selector value. The extraction of exception
2560 // pointer or selector value from token type landingpads is not currently
2561 // supported.
2562 if (LP.getType()->isTokenTy())
2563 return true;
2564
2565 // Add a label to mark the beginning of the landing pad. Deletion of the
2566 // landing pad can thus be detected via the MachineModuleInfo.
2567 MIRBuilder.buildInstr(TargetOpcode::EH_LABEL)
2568 .addSym(MF->addLandingPad(&MBB));
2569
2570 // If the unwinder does not preserve all registers, ensure that the
2571 // function marks the clobbered registers as used.
2572 const TargetRegisterInfo &TRI = *MF->getSubtarget().getRegisterInfo();
2573 if (auto *RegMask = TRI.getCustomEHPadPreservedMask(*MF))
2574 MF->getRegInfo().addPhysRegsUsedFromRegMask(RegMask);
2575
2576 LLT Ty = getLLTForType(*LP.getType(), *DL);
2577 Register Undef = MRI->createGenericVirtualRegister(Ty);
2578 MIRBuilder.buildUndef(Undef);
2579
2580 SmallVector<LLT, 2> Tys;
2581 for (Type *Ty : cast<StructType>(LP.getType())->elements())
2582 Tys.push_back(getLLTForType(*Ty, *DL));
2583 assert(Tys.size() == 2 && "Only two-valued landingpads are supported")((void)0);
2584
2585 // Mark exception register as live in.
2586 Register ExceptionReg = TLI.getExceptionPointerRegister(PersonalityFn);
2587 if (!ExceptionReg)
2588 return false;
2589
2590 MBB.addLiveIn(ExceptionReg);
2591 ArrayRef<Register> ResRegs = getOrCreateVRegs(LP);
2592 MIRBuilder.buildCopy(ResRegs[0], ExceptionReg);
2593
2594 Register SelectorReg = TLI.getExceptionSelectorRegister(PersonalityFn);
2595 if (!SelectorReg)
2596 return false;
2597
2598 MBB.addLiveIn(SelectorReg);
2599 Register PtrVReg = MRI->createGenericVirtualRegister(Tys[0]);
2600 MIRBuilder.buildCopy(PtrVReg, SelectorReg);
2601 MIRBuilder.buildCast(ResRegs[1], PtrVReg);
2602
2603 return true;
2604}
2605
2606bool IRTranslator::translateAlloca(const User &U,
2607 MachineIRBuilder &MIRBuilder) {
2608 auto &AI = cast<AllocaInst>(U);
2609
2610 if (AI.isSwiftError())
2611 return true;
2612
2613 if (AI.isStaticAlloca()) {
2614 Register Res = getOrCreateVReg(AI);
2615 int FI = getOrCreateFrameIndex(AI);
2616 MIRBuilder.buildFrameIndex(Res, FI);
2617 return true;
2618 }
2619
2620 // FIXME: support stack probing for Windows.
2621 if (MF->getTarget().getTargetTriple().isOSWindows())
2622 return false;
2623
2624 // Now we're in the harder dynamic case.
2625 Register NumElts = getOrCreateVReg(*AI.getArraySize());
2626 Type *IntPtrIRTy = DL->getIntPtrType(AI.getType());
2627 LLT IntPtrTy = getLLTForType(*IntPtrIRTy, *DL);
2628 if (MRI->getType(NumElts) != IntPtrTy) {
2629 Register ExtElts = MRI->createGenericVirtualRegister(IntPtrTy);
2630 MIRBuilder.buildZExtOrTrunc(ExtElts, NumElts);
2631 NumElts = ExtElts;
2632 }
2633
2634 Type *Ty = AI.getAllocatedType();
2635
2636 Register AllocSize = MRI->createGenericVirtualRegister(IntPtrTy);
2637 Register TySize =
2638 getOrCreateVReg(*ConstantInt::get(IntPtrIRTy, DL->getTypeAllocSize(Ty)));
2639 MIRBuilder.buildMul(AllocSize, NumElts, TySize);
2640
2641 // Round the size of the allocation up to the stack alignment size
2642 // by add SA-1 to the size. This doesn't overflow because we're computing
2643 // an address inside an alloca.
2644 Align StackAlign = MF->getSubtarget().getFrameLowering()->getStackAlign();
2645 auto SAMinusOne = MIRBuilder.buildConstant(IntPtrTy, StackAlign.value() - 1);
2646 auto AllocAdd = MIRBuilder.buildAdd(IntPtrTy, AllocSize, SAMinusOne,
2647 MachineInstr::NoUWrap);
2648 auto AlignCst =
2649 MIRBuilder.buildConstant(IntPtrTy, ~(uint64_t)(StackAlign.value() - 1));
2650 auto AlignedAlloc = MIRBuilder.buildAnd(IntPtrTy, AllocAdd, AlignCst);
2651
2652 Align Alignment = std::max(AI.getAlign(), DL->getPrefTypeAlign(Ty));
2653 if (Alignment <= StackAlign)
2654 Alignment = Align(1);
2655 MIRBuilder.buildDynStackAlloc(getOrCreateVReg(AI), AlignedAlloc, Alignment);
2656
2657 MF->getFrameInfo().CreateVariableSizedObject(Alignment, &AI);
2658 assert(MF->getFrameInfo().hasVarSizedObjects())((void)0);
2659 return true;
2660}
2661
2662bool IRTranslator::translateVAArg(const User &U, MachineIRBuilder &MIRBuilder) {
2663 // FIXME: We may need more info about the type. Because of how LLT works,
2664 // we're completely discarding the i64/double distinction here (amongst
2665 // others). Fortunately the ABIs I know of where that matters don't use va_arg
2666 // anyway but that's not guaranteed.
2667 MIRBuilder.buildInstr(TargetOpcode::G_VAARG, {getOrCreateVReg(U)},
2668 {getOrCreateVReg(*U.getOperand(0)),
2669 DL->getABITypeAlign(U.getType()).value()});
2670 return true;
2671}
2672
2673bool IRTranslator::translateInsertElement(const User &U,
2674 MachineIRBuilder &MIRBuilder) {
2675 // If it is a <1 x Ty> vector, use the scalar as it is
2676 // not a legal vector type in LLT.
2677 if (cast<FixedVectorType>(U.getType())->getNumElements() == 1)
2678 return translateCopy(U, *U.getOperand(1), MIRBuilder);
2679
2680 Register Res = getOrCreateVReg(U);
2681 Register Val = getOrCreateVReg(*U.getOperand(0));
2682 Register Elt = getOrCreateVReg(*U.getOperand(1));
2683 Register Idx = getOrCreateVReg(*U.getOperand(2));
2684 MIRBuilder.buildInsertVectorElement(Res, Val, Elt, Idx);
2685 return true;
2686}
2687
2688bool IRTranslator::translateExtractElement(const User &U,
2689 MachineIRBuilder &MIRBuilder) {
2690 // If it is a <1 x Ty> vector, use the scalar as it is
2691 // not a legal vector type in LLT.
2692 if (cast<FixedVectorType>(U.getOperand(0)->getType())->getNumElements() == 1)
2693 return translateCopy(U, *U.getOperand(0), MIRBuilder);
2694
2695 Register Res = getOrCreateVReg(U);
2696 Register Val = getOrCreateVReg(*U.getOperand(0));
2697 const auto &TLI = *MF->getSubtarget().getTargetLowering();
2698 unsigned PreferredVecIdxWidth = TLI.getVectorIdxTy(*DL).getSizeInBits();
2699 Register Idx;
2700 if (auto *CI = dyn_cast<ConstantInt>(U.getOperand(1))) {
2701 if (CI->getBitWidth() != PreferredVecIdxWidth) {
2702 APInt NewIdx = CI->getValue().sextOrTrunc(PreferredVecIdxWidth);
2703 auto *NewIdxCI = ConstantInt::get(CI->getContext(), NewIdx);
2704 Idx = getOrCreateVReg(*NewIdxCI);
2705 }
2706 }
2707 if (!Idx)
2708 Idx = getOrCreateVReg(*U.getOperand(1));
2709 if (MRI->getType(Idx).getSizeInBits() != PreferredVecIdxWidth) {
2710 const LLT VecIdxTy = LLT::scalar(PreferredVecIdxWidth);
2711 Idx = MIRBuilder.buildSExtOrTrunc(VecIdxTy, Idx).getReg(0);
2712 }
2713 MIRBuilder.buildExtractVectorElement(Res, Val, Idx);
2714 return true;
2715}
2716
2717bool IRTranslator::translateShuffleVector(const User &U,
2718 MachineIRBuilder &MIRBuilder) {
2719 ArrayRef<int> Mask;
2720 if (auto *SVI = dyn_cast<ShuffleVectorInst>(&U))
2721 Mask = SVI->getShuffleMask();
2722 else
2723 Mask = cast<ConstantExpr>(U).getShuffleMask();
2724 ArrayRef<int> MaskAlloc = MF->allocateShuffleMask(Mask);
2725 MIRBuilder
2726 .buildInstr(TargetOpcode::G_SHUFFLE_VECTOR, {getOrCreateVReg(U)},
2727 {getOrCreateVReg(*U.getOperand(0)),
2728 getOrCreateVReg(*U.getOperand(1))})
2729 .addShuffleMask(MaskAlloc);
2730 return true;
2731}
2732
2733bool IRTranslator::translatePHI(const User &U, MachineIRBuilder &MIRBuilder) {
2734 const PHINode &PI = cast<PHINode>(U);
2735
2736 SmallVector<MachineInstr *, 4> Insts;
2737 for (auto Reg : getOrCreateVRegs(PI)) {
2738 auto MIB = MIRBuilder.buildInstr(TargetOpcode::G_PHI, {Reg}, {});
2739 Insts.push_back(MIB.getInstr());
2740 }
2741
2742 PendingPHIs.emplace_back(&PI, std::move(Insts));
2743 return true;
2744}
2745
2746bool IRTranslator::translateAtomicCmpXchg(const User &U,
2747 MachineIRBuilder &MIRBuilder) {
2748 const AtomicCmpXchgInst &I = cast<AtomicCmpXchgInst>(U);
2749
2750 auto &TLI = *MF->getSubtarget().getTargetLowering();
2751 auto Flags = TLI.getAtomicMemOperandFlags(I, *DL);
2752
2753 auto Res = getOrCreateVRegs(I);
2754 Register OldValRes = Res[0];
2755 Register SuccessRes = Res[1];
2756 Register Addr = getOrCreateVReg(*I.getPointerOperand());
2757 Register Cmp = getOrCreateVReg(*I.getCompareOperand());
2758 Register NewVal = getOrCreateVReg(*I.getNewValOperand());
2759
2760 AAMDNodes AAMetadata;
2761 I.getAAMetadata(AAMetadata);
2762
2763 MIRBuilder.buildAtomicCmpXchgWithSuccess(
2764 OldValRes, SuccessRes, Addr, Cmp, NewVal,
2765 *MF->getMachineMemOperand(
2766 MachinePointerInfo(I.getPointerOperand()), Flags, MRI->getType(Cmp),
2767 getMemOpAlign(I), AAMetadata, nullptr, I.getSyncScopeID(),
2768 I.getSuccessOrdering(), I.getFailureOrdering()));
2769 return true;
2770}
2771
2772bool IRTranslator::translateAtomicRMW(const User &U,
2773 MachineIRBuilder &MIRBuilder) {
2774 const AtomicRMWInst &I = cast<AtomicRMWInst>(U);
2775 auto &TLI = *MF->getSubtarget().getTargetLowering();
2776 auto Flags = TLI.getAtomicMemOperandFlags(I, *DL);
2777
2778 Register Res = getOrCreateVReg(I);
2779 Register Addr = getOrCreateVReg(*I.getPointerOperand());
2780 Register Val = getOrCreateVReg(*I.getValOperand());
2781
2782 unsigned Opcode = 0;
2783 switch (I.getOperation()) {
2784 default:
2785 return false;
2786 case AtomicRMWInst::Xchg:
2787 Opcode = TargetOpcode::G_ATOMICRMW_XCHG;
2788 break;
2789 case AtomicRMWInst::Add:
2790 Opcode = TargetOpcode::G_ATOMICRMW_ADD;
2791 break;
2792 case AtomicRMWInst::Sub:
2793 Opcode = TargetOpcode::G_ATOMICRMW_SUB;
2794 break;
2795 case AtomicRMWInst::And:
2796 Opcode = TargetOpcode::G_ATOMICRMW_AND;
2797 break;
2798 case AtomicRMWInst::Nand:
2799 Opcode = TargetOpcode::G_ATOMICRMW_NAND;
2800 break;
2801 case AtomicRMWInst::Or:
2802 Opcode = TargetOpcode::G_ATOMICRMW_OR;
2803 break;
2804 case AtomicRMWInst::Xor:
2805 Opcode = TargetOpcode::G_ATOMICRMW_XOR;
2806 break;
2807 case AtomicRMWInst::Max:
2808 Opcode = TargetOpcode::G_ATOMICRMW_MAX;
2809 break;
2810 case AtomicRMWInst::Min:
2811 Opcode = TargetOpcode::G_ATOMICRMW_MIN;
2812 break;
2813 case AtomicRMWInst::UMax:
2814 Opcode = TargetOpcode::G_ATOMICRMW_UMAX;
2815 break;
2816 case AtomicRMWInst::UMin:
2817 Opcode = TargetOpcode::G_ATOMICRMW_UMIN;
2818 break;
2819 case AtomicRMWInst::FAdd:
2820 Opcode = TargetOpcode::G_ATOMICRMW_FADD;
2821 break;
2822 case AtomicRMWInst::FSub:
2823 Opcode = TargetOpcode::G_ATOMICRMW_FSUB;
2824 break;
2825 }
2826
2827 AAMDNodes AAMetadata;
2828 I.getAAMetadata(AAMetadata);
2829
2830 MIRBuilder.buildAtomicRMW(
2831 Opcode, Res, Addr, Val,
2832 *MF->getMachineMemOperand(MachinePointerInfo(I.getPointerOperand()),
2833 Flags, MRI->getType(Val), getMemOpAlign(I),
2834 AAMetadata, nullptr, I.getSyncScopeID(),
2835 I.getOrdering()));
2836 return true;
2837}
2838
2839bool IRTranslator::translateFence(const User &U,
2840 MachineIRBuilder &MIRBuilder) {
2841 const FenceInst &Fence = cast<FenceInst>(U);
2842 MIRBuilder.buildFence(static_cast<unsigned>(Fence.getOrdering()),
2843 Fence.getSyncScopeID());
2844 return true;
2845}
2846
2847bool IRTranslator::translateFreeze(const User &U,
2848 MachineIRBuilder &MIRBuilder) {
2849 const ArrayRef<Register> DstRegs = getOrCreateVRegs(U);
2850 const ArrayRef<Register> SrcRegs = getOrCreateVRegs(*U.getOperand(0));
2851
2852 assert(DstRegs.size() == SrcRegs.size() &&((void)0)
2853 "Freeze with different source and destination type?")((void)0);
2854
2855 for (unsigned I = 0; I < DstRegs.size(); ++I) {
2856 MIRBuilder.buildFreeze(DstRegs[I], SrcRegs[I]);
2857 }
2858
2859 return true;
2860}
2861
2862void IRTranslator::finishPendingPhis() {
2863#ifndef NDEBUG1
2864 DILocationVerifier Verifier;
2865 GISelObserverWrapper WrapperObserver(&Verifier);
2866 RAIIDelegateInstaller DelInstall(*MF, &WrapperObserver);
2867#endif // ifndef NDEBUG
2868 for (auto &Phi : PendingPHIs) {
2869 const PHINode *PI = Phi.first;
2870 ArrayRef<MachineInstr *> ComponentPHIs = Phi.second;
2871 MachineBasicBlock *PhiMBB = ComponentPHIs[0]->getParent();
2872 EntryBuilder->setDebugLoc(PI->getDebugLoc());
2873#ifndef NDEBUG1
2874 Verifier.setCurrentInst(PI);
2875#endif // ifndef NDEBUG
2876
2877 SmallSet<const MachineBasicBlock *, 16> SeenPreds;
2878 for (unsigned i = 0; i < PI->getNumIncomingValues(); ++i) {
2879 auto IRPred = PI->getIncomingBlock(i);
2880 ArrayRef<Register> ValRegs = getOrCreateVRegs(*PI->getIncomingValue(i));
2881 for (auto Pred : getMachinePredBBs({IRPred, PI->getParent()})) {
2882 if (SeenPreds.count(Pred) || !PhiMBB->isPredecessor(Pred))
2883 continue;
2884 SeenPreds.insert(Pred);
2885 for (unsigned j = 0; j < ValRegs.size(); ++j) {
2886 MachineInstrBuilder MIB(*MF, ComponentPHIs[j]);
2887 MIB.addUse(ValRegs[j]);
2888 MIB.addMBB(Pred);
2889 }
2890 }
2891 }
2892 }
2893}
2894
2895bool IRTranslator::valueIsSplit(const Value &V,
2896 SmallVectorImpl<uint64_t> *Offsets) {
2897 SmallVector<LLT, 4> SplitTys;
2898 if (Offsets && !Offsets->empty())
2899 Offsets->clear();
2900 computeValueLLTs(*DL, *V.getType(), SplitTys, Offsets);
2901 return SplitTys.size() > 1;
2902}
2903
2904bool IRTranslator::translate(const Instruction &Inst) {
2905 CurBuilder->setDebugLoc(Inst.getDebugLoc());
2906
2907 auto &TLI = *MF->getSubtarget().getTargetLowering();
2908 if (TLI.fallBackToDAGISel(Inst))
18
Assuming the condition is false
19
Taking false branch
2909 return false;
2910
2911 switch (Inst.getOpcode()) {
20
Control jumps to 'case FCmp:' at line 207
2912#define HANDLE_INST(NUM, OPCODE, CLASS) \
2913 case Instruction::OPCODE: \
2914 return translate##OPCODE(Inst, *CurBuilder.get());
2915#include "llvm/IR/Instruction.def"
2916 default:
2917 return false;
2918 }
2919}
2920
2921bool IRTranslator::translate(const Constant &C, Register Reg) {
2922 // We only emit constants into the entry block from here. To prevent jumpy
2923 // debug behaviour set the line to 0.
2924 if (auto CurrInstDL = CurBuilder->getDL())
2925 EntryBuilder->setDebugLoc(DILocation::get(C.getContext(), 0, 0,
2926 CurrInstDL.getScope(),
2927 CurrInstDL.getInlinedAt()));
2928
2929 if (auto CI = dyn_cast<ConstantInt>(&C))
2930 EntryBuilder->buildConstant(Reg, *CI);
2931 else if (auto CF = dyn_cast<ConstantFP>(&C))
2932 EntryBuilder->buildFConstant(Reg, *CF);
2933 else if (isa<UndefValue>(C))
2934 EntryBuilder->buildUndef(Reg);
2935 else if (isa<ConstantPointerNull>(C))
2936 EntryBuilder->buildConstant(Reg, 0);
2937 else if (auto GV = dyn_cast<GlobalValue>(&C))
2938 EntryBuilder->buildGlobalValue(Reg, GV);
2939 else if (auto CAZ = dyn_cast<ConstantAggregateZero>(&C)) {
2940 if (!isa<FixedVectorType>(CAZ->getType()))
2941 return false;
2942 // Return the scalar if it is a <1 x Ty> vector.
2943 unsigned NumElts = CAZ->getElementCount().getFixedValue();
2944 if (NumElts == 1)
2945 return translateCopy(C, *CAZ->getElementValue(0u), *EntryBuilder.get());
2946 SmallVector<Register, 4> Ops;
2947 for (unsigned I = 0; I < NumElts; ++I) {
2948 Constant &Elt = *CAZ->getElementValue(I);
2949 Ops.push_back(getOrCreateVReg(Elt));
2950 }
2951 EntryBuilder->buildBuildVector(Reg, Ops);
2952 } else if (auto CV = dyn_cast<ConstantDataVector>(&C)) {
2953 // Return the scalar if it is a <1 x Ty> vector.
2954 if (CV->getNumElements() == 1)
2955 return translateCopy(C, *CV->getElementAsConstant(0),
2956 *EntryBuilder.get());
2957 SmallVector<Register, 4> Ops;
2958 for (unsigned i = 0; i < CV->getNumElements(); ++i) {
2959 Constant &Elt = *CV->getElementAsConstant(i);
2960 Ops.push_back(getOrCreateVReg(Elt));
2961 }
2962 EntryBuilder->buildBuildVector(Reg, Ops);
2963 } else if (auto CE = dyn_cast<ConstantExpr>(&C)) {
2964 switch(CE->getOpcode()) {
2965#define HANDLE_INST(NUM, OPCODE, CLASS) \
2966 case Instruction::OPCODE: \
2967 return translate##OPCODE(*CE, *EntryBuilder.get());
2968#include "llvm/IR/Instruction.def"
2969 default:
2970 return false;
2971 }
2972 } else if (auto CV = dyn_cast<ConstantVector>(&C)) {
2973 if (CV->getNumOperands() == 1)
2974 return translateCopy(C, *CV->getOperand(0), *EntryBuilder.get());
2975 SmallVector<Register, 4> Ops;
2976 for (unsigned i = 0; i < CV->getNumOperands(); ++i) {
2977 Ops.push_back(getOrCreateVReg(*CV->getOperand(i)));
2978 }
2979 EntryBuilder->buildBuildVector(Reg, Ops);
2980 } else if (auto *BA = dyn_cast<BlockAddress>(&C)) {
2981 EntryBuilder->buildBlockAddress(Reg, BA);
2982 } else
2983 return false;
2984
2985 return true;
2986}
2987
2988void IRTranslator::finalizeBasicBlock() {
2989 for (auto &BTB : SL->BitTestCases) {
2990 // Emit header first, if it wasn't already emitted.
2991 if (!BTB.Emitted)
2992 emitBitTestHeader(BTB, BTB.Parent);
2993
2994 BranchProbability UnhandledProb = BTB.Prob;
2995 for (unsigned j = 0, ej = BTB.Cases.size(); j != ej; ++j) {
2996 UnhandledProb -= BTB.Cases[j].ExtraProb;
2997 // Set the current basic block to the mbb we wish to insert the code into
2998 MachineBasicBlock *MBB = BTB.Cases[j].ThisBB;
2999 // If all cases cover a contiguous range, it is not necessary to jump to
3000 // the default block after the last bit test fails. This is because the
3001 // range check during bit test header creation has guaranteed that every
3002 // case here doesn't go outside the range. In this case, there is no need
3003 // to perform the last bit test, as it will always be true. Instead, make
3004 // the second-to-last bit-test fall through to the target of the last bit
3005 // test, and delete the last bit test.
3006
3007 MachineBasicBlock *NextMBB;
3008 if (BTB.ContiguousRange && j + 2 == ej) {
3009 // Second-to-last bit-test with contiguous range: fall through to the
3010 // target of the final bit test.
3011 NextMBB = BTB.Cases[j + 1].TargetBB;
3012 } else if (j + 1 == ej) {
3013 // For the last bit test, fall through to Default.
3014 NextMBB = BTB.Default;
3015 } else {
3016 // Otherwise, fall through to the next bit test.
3017 NextMBB = BTB.Cases[j + 1].ThisBB;
3018 }
3019
3020 emitBitTestCase(BTB, NextMBB, UnhandledProb, BTB.Reg, BTB.Cases[j], MBB);
3021
3022 if (BTB.ContiguousRange && j + 2 == ej) {
3023 // We need to record the replacement phi edge here that normally
3024 // happens in emitBitTestCase before we delete the case, otherwise the
3025 // phi edge will be lost.
3026 addMachineCFGPred({BTB.Parent->getBasicBlock(),
3027 BTB.Cases[ej - 1].TargetBB->getBasicBlock()},
3028 MBB);
3029 // Since we're not going to use the final bit test, remove it.
3030 BTB.Cases.pop_back();
3031 break;
3032 }
3033 }
3034 // This is "default" BB. We have two jumps to it. From "header" BB and from
3035 // last "case" BB, unless the latter was skipped.
3036 CFGEdge HeaderToDefaultEdge = {BTB.Parent->getBasicBlock(),
3037 BTB.Default->getBasicBlock()};
3038 addMachineCFGPred(HeaderToDefaultEdge, BTB.Parent);
3039 if (!BTB.ContiguousRange) {
3040 addMachineCFGPred(HeaderToDefaultEdge, BTB.Cases.back().ThisBB);
3041 }
3042 }
3043 SL->BitTestCases.clear();
3044
3045 for (auto &JTCase : SL->JTCases) {
3046 // Emit header first, if it wasn't already emitted.
3047 if (!JTCase.first.Emitted)
3048 emitJumpTableHeader(JTCase.second, JTCase.first, JTCase.first.HeaderBB);
3049
3050 emitJumpTable(JTCase.second, JTCase.second.MBB);
3051 }
3052 SL->JTCases.clear();
3053
3054 for (auto &SwCase : SL->SwitchCases)
3055 emitSwitchCase(SwCase, &CurBuilder->getMBB(), *CurBuilder);
3056 SL->SwitchCases.clear();
3057}
3058
3059void IRTranslator::finalizeFunction() {
3060 // Release the memory used by the different maps we
3061 // needed during the translation.
3062 PendingPHIs.clear();
3063 VMap.reset();
3064 FrameIndices.clear();
3065 MachinePreds.clear();
3066 // MachineIRBuilder::DebugLoc can outlive the DILocation it holds. Clear it
3067 // to avoid accessing free’d memory (in runOnMachineFunction) and to avoid
3068 // destroying it twice (in ~IRTranslator() and ~LLVMContext())
3069 EntryBuilder.reset();
3070 CurBuilder.reset();
3071 FuncInfo.clear();
3072}
3073
3074/// Returns true if a BasicBlock \p BB within a variadic function contains a
3075/// variadic musttail call.
3076static bool checkForMustTailInVarArgFn(bool IsVarArg, const BasicBlock &BB) {
3077 if (!IsVarArg)
3078 return false;
3079
3080 // Walk the block backwards, because tail calls usually only appear at the end
3081 // of a block.
3082 return std::any_of(BB.rbegin(), BB.rend(), [](const Instruction &I) {
3083 const auto *CI = dyn_cast<CallInst>(&I);
3084 return CI && CI->isMustTailCall();
3085 });
3086}
3087
3088bool IRTranslator::runOnMachineFunction(MachineFunction &CurMF) {
3089 MF = &CurMF;
3090 const Function &F = MF->getFunction();
3091 if (F.empty())
1
Assuming the condition is false
2
Taking false branch
3092 return false;
3093 GISelCSEAnalysisWrapper &Wrapper =
3094 getAnalysis<GISelCSEAnalysisWrapperPass>().getCSEWrapper();
3095 // Set the CSEConfig and run the analysis.
3096 GISelCSEInfo *CSEInfo = nullptr;
3097 TPC = &getAnalysis<TargetPassConfig>();
3098 bool EnableCSE = EnableCSEInIRTranslator.getNumOccurrences()
3
Assuming the condition is false
4
'?' condition is false
3099 ? EnableCSEInIRTranslator
3100 : TPC->isGISelCSEEnabled();
3101
3102 if (EnableCSE) {
5
Assuming 'EnableCSE' is false
6
Taking false branch
3103 EntryBuilder = std::make_unique<CSEMIRBuilder>(CurMF);
3104 CSEInfo = &Wrapper.get(TPC->getCSEConfig());
3105 EntryBuilder->setCSEInfo(CSEInfo);
3106 CurBuilder = std::make_unique<CSEMIRBuilder>(CurMF);
3107 CurBuilder->setCSEInfo(CSEInfo);
3108 } else {
3109 EntryBuilder = std::make_unique<MachineIRBuilder>();
3110 CurBuilder = std::make_unique<MachineIRBuilder>();
3111 }
3112 CLI = MF->getSubtarget().getCallLowering();
3113 CurBuilder->setMF(*MF);
3114 EntryBuilder->setMF(*MF);
3115 MRI = &MF->getRegInfo();
3116 DL = &F.getParent()->getDataLayout();
3117 ORE = std::make_unique<OptimizationRemarkEmitter>(&F);
3118 const TargetMachine &TM = MF->getTarget();
3119 TM.resetTargetOptions(F);
3120 EnableOpts = OptLevel != CodeGenOpt::None && !skipFunction(F);
7
Assuming field 'OptLevel' is equal to None
3121 FuncInfo.MF = MF;
3122 if (EnableOpts
7.1
Field 'EnableOpts' is false
7.1
Field 'EnableOpts' is false
7.1
Field 'EnableOpts' is false
7.1
Field 'EnableOpts' is false
)
8
Taking false branch
3123 FuncInfo.BPI = &getAnalysis<BranchProbabilityInfoWrapperPass>().getBPI();
3124 else
3125 FuncInfo.BPI = nullptr;
3126
3127 FuncInfo.CanLowerReturn = CLI->checkReturnTypeForCallConv(*MF);
3128
3129 const auto &TLI = *MF->getSubtarget().getTargetLowering();
3130
3131 SL = std::make_unique<GISelSwitchLowering>(this, FuncInfo);
3132 SL->init(TLI, TM, *DL);
3133
3134
3135
3136 assert(PendingPHIs.empty() && "stale PHIs")((void)0);
3137
3138 // Targets which want to use big endian can enable it using
3139 // enableBigEndian()
3140 if (!DL->isLittleEndian() && !CLI->enableBigEndian()) {
9
Assuming the condition is false
10
Taking false branch
3141 // Currently we don't properly handle big endian code.
3142 OptimizationRemarkMissed R("gisel-irtranslator", "GISelFailure",
3143 F.getSubprogram(), &F.getEntryBlock());
3144 R << "unable to translate in big endian mode";
3145 reportTranslationError(*MF, *TPC, *ORE, R);
3146 }
3147
3148 // Release the per-function state when we return, whether we succeeded or not.
3149 auto FinalizeOnReturn = make_scope_exit([this]() { finalizeFunction(); });
3150
3151 // Setup a separate basic-block for the arguments and constants
3152 MachineBasicBlock *EntryBB = MF->CreateMachineBasicBlock();
3153 MF->push_back(EntryBB);
3154 EntryBuilder->setMBB(*EntryBB);
3155
3156 DebugLoc DbgLoc = F.getEntryBlock().getFirstNonPHI()->getDebugLoc();
3157 SwiftError.setFunction(CurMF);
3158 SwiftError.createEntriesInEntryBlock(DbgLoc);
3159
3160 bool IsVarArg = F.isVarArg();
3161 bool HasMustTailInVarArgFn = false;
3162
3163 // Create all blocks, in IR order, to preserve the layout.
3164 for (const BasicBlock &BB: F) {
3165 auto *&MBB = BBToMBB[&BB];
3166
3167 MBB = MF->CreateMachineBasicBlock(&BB);
3168 MF->push_back(MBB);
3169
3170 if (BB.hasAddressTaken())
3171 MBB->setHasAddressTaken();
3172
3173 if (!HasMustTailInVarArgFn)
3174 HasMustTailInVarArgFn = checkForMustTailInVarArgFn(IsVarArg, BB);
3175 }
3176
3177 MF->getFrameInfo().setHasMustTailInVarArgFunc(HasMustTailInVarArgFn);
3178
3179 // Make our arguments/constants entry block fallthrough to the IR entry block.
3180 EntryBB->addSuccessor(&getMBB(F.front()));
3181
3182 if (CLI->fallBackToDAGISel(*MF)) {
11
Assuming the condition is false
12
Taking false branch
3183 OptimizationRemarkMissed R("gisel-irtranslator", "GISelFailure",
3184 F.getSubprogram(), &F.getEntryBlock());
3185 R << "unable to lower function: " << ore::NV("Prototype", F.getType());
3186 reportTranslationError(*MF, *TPC, *ORE, R);
3187 return false;
3188 }
3189
3190 // Lower the actual args into this basic block.
3191 SmallVector<ArrayRef<Register>, 8> VRegArgs;
3192 for (const Argument &Arg: F.args()) {
13
Assuming '__begin1' is equal to '__end1'
3193 if (DL->getTypeStoreSize(Arg.getType()).isZero())
3194 continue; // Don't handle zero sized types.
3195 ArrayRef<Register> VRegs = getOrCreateVRegs(Arg);
3196 VRegArgs.push_back(VRegs);
3197
3198 if (Arg.hasSwiftErrorAttr()) {
3199 assert(VRegs.size() == 1 && "Too many vregs for Swift error")((void)0);
3200 SwiftError.setCurrentVReg(EntryBB, SwiftError.getFunctionArg(), VRegs[0]);
3201 }
3202 }
3203
3204 if (!CLI->lowerFormalArguments(*EntryBuilder.get(), F, VRegArgs, FuncInfo)) {
14
Assuming the condition is false
15
Taking false branch
3205 OptimizationRemarkMissed R("gisel-irtranslator", "GISelFailure",
3206 F.getSubprogram(), &F.getEntryBlock());
3207 R << "unable to lower arguments: " << ore::NV("Prototype", F.getType());
3208 reportTranslationError(*MF, *TPC, *ORE, R);
3209 return false;
3210 }
3211
3212 // Need to visit defs before uses when translating instructions.
3213 GISelObserverWrapper WrapperObserver;
3214 if (EnableCSE
15.1
'EnableCSE' is false
15.1
'EnableCSE' is false
15.1
'EnableCSE' is false
15.1
'EnableCSE' is false
&& CSEInfo)
3215 WrapperObserver.addObserver(CSEInfo);
3216 {
3217 ReversePostOrderTraversal<const Function *> RPOT(&F);
3218#ifndef NDEBUG1
3219 DILocationVerifier Verifier;
3220 WrapperObserver.addObserver(&Verifier);
3221#endif // ifndef NDEBUG
3222 RAIIDelegateInstaller DelInstall(*MF, &WrapperObserver);
3223 RAIIMFObserverInstaller ObsInstall(*MF, WrapperObserver);
3224 for (const BasicBlock *BB : RPOT) {
3225 MachineBasicBlock &MBB = getMBB(*BB);
3226 // Set the insertion point of all the following translations to
3227 // the end of this basic block.
3228 CurBuilder->setMBB(MBB);
3229 HasTailCall = false;
3230 for (const Instruction &Inst : *BB) {
3231 // If we translated a tail call in the last step, then we know
3232 // everything after the call is either a return, or something that is
3233 // handled by the call itself. (E.g. a lifetime marker or assume
3234 // intrinsic.) In this case, we should stop translating the block and
3235 // move on.
3236 if (HasTailCall
15.2
Field 'HasTailCall' is false
15.2
Field 'HasTailCall' is false
15.2
Field 'HasTailCall' is false
15.2
Field 'HasTailCall' is false
)
16
Taking false branch
3237 break;
3238#ifndef NDEBUG1
3239 Verifier.setCurrentInst(&Inst);
3240#endif // ifndef NDEBUG
3241 if (translate(Inst))
17
Calling 'IRTranslator::translate'
3242 continue;
3243
3244 OptimizationRemarkMissed R("gisel-irtranslator", "GISelFailure",
3245 Inst.getDebugLoc(), BB);
3246 R << "unable to translate instruction: " << ore::NV("Opcode", &Inst);
3247
3248 if (ORE->allowExtraAnalysis("gisel-irtranslator")) {
3249 std::string InstStrStorage;
3250 raw_string_ostream InstStr(InstStrStorage);
3251 InstStr << Inst;
3252
3253 R << ": '" << InstStr.str() << "'";
3254 }
3255
3256 reportTranslationError(*MF, *TPC, *ORE, R);
3257 return false;
3258 }
3259
3260 finalizeBasicBlock();
3261 }
3262#ifndef NDEBUG1
3263 WrapperObserver.removeObserver(&Verifier);
3264#endif
3265 }
3266
3267 finishPendingPhis();
3268
3269 SwiftError.propagateVRegs();
3270
3271 // Merge the argument lowering and constants block with its single
3272 // successor, the LLVM-IR entry block. We want the basic block to
3273 // be maximal.
3274 assert(EntryBB->succ_size() == 1 &&((void)0)
3275 "Custom BB used for lowering should have only one successor")((void)0);
3276 // Get the successor of the current entry block.
3277 MachineBasicBlock &NewEntryBB = **EntryBB->succ_begin();
3278 assert(NewEntryBB.pred_size() == 1 &&((void)0)
3279 "LLVM-IR entry block has a predecessor!?")((void)0);
3280 // Move all the instruction from the current entry block to the
3281 // new entry block.
3282 NewEntryBB.splice(NewEntryBB.begin(), EntryBB, EntryBB->begin(),
3283 EntryBB->end());
3284
3285 // Update the live-in information for the new entry block.
3286 for (const MachineBasicBlock::RegisterMaskPair &LiveIn : EntryBB->liveins())
3287 NewEntryBB.addLiveIn(LiveIn);
3288 NewEntryBB.sortUniqueLiveIns();
3289
3290 // Get rid of the now empty basic block.
3291 EntryBB->removeSuccessor(&NewEntryBB);
3292 MF->remove(EntryBB);
3293 MF->DeleteMachineBasicBlock(EntryBB);
3294
3295 assert(&MF->front() == &NewEntryBB &&((void)0)
3296 "New entry wasn't next in the list of basic block!")((void)0);
3297
3298 // Initialize stack protector information.
3299 StackProtector &SP = getAnalysis<StackProtector>();
3300 SP.copyToMachineFrameInfo(MF->getFrameInfo());
3301
3302 return false;
3303}

/usr/src/gnu/usr.bin/clang/libLLVM/../../../llvm/llvm/include/llvm/IR/Instruction.def

1//===-- llvm/Instruction.def - File that describes Instructions -*- C++ -*-===//
2//
3// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
4// See https://llvm.org/LICENSE.txt for license information.
5// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
6//
7//===----------------------------------------------------------------------===//
8//
9// This file contains descriptions of the various LLVM instructions. This is
10// used as a central place for enumerating the different instructions and
11// should eventually be the place to put comments about the instructions.
12//
13//===----------------------------------------------------------------------===//
14
15// NOTE: NO INCLUDE GUARD DESIRED!
16
17// Provide definitions of macros so that users of this file do not have to
18// define everything to use it...
19//
20#ifndef FIRST_TERM_INST
21#define FIRST_TERM_INST(num)
22#endif
23#ifndef HANDLE_TERM_INST
24#ifndef HANDLE_INST
25#define HANDLE_TERM_INST(num, opcode, Class)
26#else
27#define HANDLE_TERM_INST(num, opcode, Class) HANDLE_INST(num, opcode, Class)
28#endif
29#endif
30#ifndef LAST_TERM_INST
31#define LAST_TERM_INST(num)
32#endif
33
34#ifndef FIRST_UNARY_INST
35#define FIRST_UNARY_INST(num)
36#endif
37#ifndef HANDLE_UNARY_INST
38#ifndef HANDLE_INST
39#define HANDLE_UNARY_INST(num, opcode, instclass)
40#else
41#define HANDLE_UNARY_INST(num, opcode, Class) HANDLE_INST(num, opcode, Class)
42#endif
43#endif
44#ifndef LAST_UNARY_INST
45#define LAST_UNARY_INST(num)
46#endif
47
48#ifndef FIRST_BINARY_INST
49#define FIRST_BINARY_INST(num)
50#endif
51#ifndef HANDLE_BINARY_INST
52#ifndef HANDLE_INST
53#define HANDLE_BINARY_INST(num, opcode, instclass)
54#else
55#define HANDLE_BINARY_INST(num, opcode, Class) HANDLE_INST(num, opcode, Class)
56#endif
57#endif
58#ifndef LAST_BINARY_INST
59#define LAST_BINARY_INST(num)
60#endif
61
62#ifndef FIRST_MEMORY_INST
63#define FIRST_MEMORY_INST(num)
64#endif
65#ifndef HANDLE_MEMORY_INST
66#ifndef HANDLE_INST
67#define HANDLE_MEMORY_INST(num, opcode, Class)
68#else
69#define HANDLE_MEMORY_INST(num, opcode, Class) HANDLE_INST(num, opcode, Class)
70#endif
71#endif
72#ifndef LAST_MEMORY_INST
73#define LAST_MEMORY_INST(num)
74#endif
75
76#ifndef FIRST_CAST_INST
77#define FIRST_CAST_INST(num)
78#endif
79#ifndef HANDLE_CAST_INST
80#ifndef HANDLE_INST
81#define HANDLE_CAST_INST(num, opcode, Class)
82#else
83#define HANDLE_CAST_INST(num, opcode, Class) HANDLE_INST(num, opcode, Class)
84#endif
85#endif
86#ifndef LAST_CAST_INST
87#define LAST_CAST_INST(num)
88#endif
89
90#ifndef FIRST_FUNCLETPAD_INST
91#define FIRST_FUNCLETPAD_INST(num)
92#endif
93#ifndef HANDLE_FUNCLETPAD_INST
94#ifndef HANDLE_INST
95#define HANDLE_FUNCLETPAD_INST(num, opcode, Class)
96#else
97#define HANDLE_FUNCLETPAD_INST(num, opcode, Class) HANDLE_INST(num, opcode, Class)
98#endif
99#endif
100#ifndef LAST_FUNCLETPAD_INST
101#define LAST_FUNCLETPAD_INST(num)
102#endif
103
104#ifndef FIRST_OTHER_INST
105#define FIRST_OTHER_INST(num)
106#endif
107#ifndef HANDLE_OTHER_INST
108#ifndef HANDLE_INST
109#define HANDLE_OTHER_INST(num, opcode, Class)
110#else
111#define HANDLE_OTHER_INST(num, opcode, Class) HANDLE_INST(num, opcode, Class)
112#endif
113#endif
114#ifndef LAST_OTHER_INST
115#define LAST_OTHER_INST(num)
116#endif
117
118#ifndef HANDLE_USER_INST
119#define HANDLE_USER_INST(num, opc, Class) HANDLE_OTHER_INST(num, opc, Class)
120#endif
121
122// Terminator Instructions - These instructions are used to terminate a basic
123// block of the program. Every basic block must end with one of these
124// instructions for it to be a well formed basic block.
125//
126 FIRST_TERM_INST ( 1)
127HANDLE_TERM_INST ( 1, Ret , ReturnInst)
128HANDLE_TERM_INST ( 2, Br , BranchInst)
129HANDLE_TERM_INST ( 3, Switch , SwitchInst)
130HANDLE_TERM_INST ( 4, IndirectBr , IndirectBrInst)
131HANDLE_TERM_INST ( 5, Invoke , InvokeInst)
132HANDLE_TERM_INST ( 6, Resume , ResumeInst)
133HANDLE_TERM_INST ( 7, Unreachable , UnreachableInst)
134HANDLE_TERM_INST ( 8, CleanupRet , CleanupReturnInst)
135HANDLE_TERM_INST ( 9, CatchRet , CatchReturnInst)
136HANDLE_TERM_INST (10, CatchSwitch , CatchSwitchInst)
137HANDLE_TERM_INST (11, CallBr , CallBrInst) // A call-site terminator
138 LAST_TERM_INST (11)
139
140// Standard unary operators...
141 FIRST_UNARY_INST(12)
142HANDLE_UNARY_INST(12, FNeg , UnaryOperator)
143 LAST_UNARY_INST(12)
144
145// Standard binary operators...
146 FIRST_BINARY_INST(13)
147HANDLE_BINARY_INST(13, Add , BinaryOperator)
148HANDLE_BINARY_INST(14, FAdd , BinaryOperator)
149HANDLE_BINARY_INST(15, Sub , BinaryOperator)
150HANDLE_BINARY_INST(16, FSub , BinaryOperator)
151HANDLE_BINARY_INST(17, Mul , BinaryOperator)
152HANDLE_BINARY_INST(18, FMul , BinaryOperator)
153HANDLE_BINARY_INST(19, UDiv , BinaryOperator)
154HANDLE_BINARY_INST(20, SDiv , BinaryOperator)
155HANDLE_BINARY_INST(21, FDiv , BinaryOperator)
156HANDLE_BINARY_INST(22, URem , BinaryOperator)
157HANDLE_BINARY_INST(23, SRem , BinaryOperator)
158HANDLE_BINARY_INST(24, FRem , BinaryOperator)
159
160// Logical operators (integer operands)
161HANDLE_BINARY_INST(25, Shl , BinaryOperator) // Shift left (logical)
162HANDLE_BINARY_INST(26, LShr , BinaryOperator) // Shift right (logical)
163HANDLE_BINARY_INST(27, AShr , BinaryOperator) // Shift right (arithmetic)
164HANDLE_BINARY_INST(28, And , BinaryOperator)
165HANDLE_BINARY_INST(29, Or , BinaryOperator)
166HANDLE_BINARY_INST(30, Xor , BinaryOperator)
167 LAST_BINARY_INST(30)
168
169// Memory operators...
170 FIRST_MEMORY_INST(31)
171HANDLE_MEMORY_INST(31, Alloca, AllocaInst) // Stack management
172HANDLE_MEMORY_INST(32, Load , LoadInst ) // Memory manipulation instrs
173HANDLE_MEMORY_INST(33, Store , StoreInst )
174HANDLE_MEMORY_INST(34, GetElementPtr, GetElementPtrInst)
175HANDLE_MEMORY_INST(35, Fence , FenceInst )
176HANDLE_MEMORY_INST(36, AtomicCmpXchg , AtomicCmpXchgInst )
177HANDLE_MEMORY_INST(37, AtomicRMW , AtomicRMWInst )
178 LAST_MEMORY_INST(37)
179
180// Cast operators ...
181// NOTE: The order matters here because CastInst::isEliminableCastPair
182// NOTE: (see Instructions.cpp) encodes a table based on this ordering.
183 FIRST_CAST_INST(38)
184HANDLE_CAST_INST(38, Trunc , TruncInst ) // Truncate integers
185HANDLE_CAST_INST(39, ZExt , ZExtInst ) // Zero extend integers
186HANDLE_CAST_INST(40, SExt , SExtInst ) // Sign extend integers
187HANDLE_CAST_INST(41, FPToUI , FPToUIInst ) // floating point -> UInt
188HANDLE_CAST_INST(42, FPToSI , FPToSIInst ) // floating point -> SInt
189HANDLE_CAST_INST(43, UIToFP , UIToFPInst ) // UInt -> floating point
190HANDLE_CAST_INST(44, SIToFP , SIToFPInst ) // SInt -> floating point
191HANDLE_CAST_INST(45, FPTrunc , FPTruncInst ) // Truncate floating point
192HANDLE_CAST_INST(46, FPExt , FPExtInst ) // Extend floating point
193HANDLE_CAST_INST(47, PtrToInt, PtrToIntInst) // Pointer -> Integer
194HANDLE_CAST_INST(48, IntToPtr, IntToPtrInst) // Integer -> Pointer
195HANDLE_CAST_INST(49, BitCast , BitCastInst ) // Type cast
196HANDLE_CAST_INST(50, AddrSpaceCast, AddrSpaceCastInst) // addrspace cast
197 LAST_CAST_INST(50)
198
199 FIRST_FUNCLETPAD_INST(51)
200HANDLE_FUNCLETPAD_INST(51, CleanupPad, CleanupPadInst)
201HANDLE_FUNCLETPAD_INST(52, CatchPad , CatchPadInst)
202 LAST_FUNCLETPAD_INST(52)
203
204// Other operators...
205 FIRST_OTHER_INST(53)
206HANDLE_OTHER_INST(53, ICmp , ICmpInst ) // Integer comparison instruction
207HANDLE_OTHER_INST(54, FCmp , FCmpInst ) // Floating point comparison instr.
21
Calling 'IRTranslator::translateFCmp'
208HANDLE_OTHER_INST(55, PHI , PHINode ) // PHI node instruction
209HANDLE_OTHER_INST(56, Call , CallInst ) // Call a function
210HANDLE_OTHER_INST(57, Select , SelectInst ) // select instruction
211HANDLE_USER_INST (58, UserOp1, Instruction) // May be used internally in a pass
212HANDLE_USER_INST (59, UserOp2, Instruction) // Internal to passes only
213HANDLE_OTHER_INST(60, VAArg , VAArgInst ) // vaarg instruction
214HANDLE_OTHER_INST(61, ExtractElement, ExtractElementInst)// extract from vector
215HANDLE_OTHER_INST(62, InsertElement, InsertElementInst) // insert into vector
216HANDLE_OTHER_INST(63, ShuffleVector, ShuffleVectorInst) // shuffle two vectors.
217HANDLE_OTHER_INST(64, ExtractValue, ExtractValueInst)// extract from aggregate
218HANDLE_OTHER_INST(65, InsertValue, InsertValueInst) // insert into aggregate
219HANDLE_OTHER_INST(66, LandingPad, LandingPadInst) // Landing pad instruction.
220HANDLE_OTHER_INST(67, Freeze, FreezeInst) // Freeze instruction.
221 LAST_OTHER_INST(67)
222
223#undef FIRST_TERM_INST
224#undef HANDLE_TERM_INST
225#undef LAST_TERM_INST
226
227#undef FIRST_UNARY_INST
228#undef HANDLE_UNARY_INST
229#undef LAST_UNARY_INST
230
231#undef FIRST_BINARY_INST
232#undef HANDLE_BINARY_INST
233#undef LAST_BINARY_INST
234
235#undef FIRST_MEMORY_INST
236#undef HANDLE_MEMORY_INST
237#undef LAST_MEMORY_INST
238
239#undef FIRST_CAST_INST
240#undef HANDLE_CAST_INST
241#undef LAST_CAST_INST
242
243#undef FIRST_FUNCLETPAD_INST
244#undef HANDLE_FUNCLETPAD_INST
245#undef LAST_FUNCLETPAD_INST
246
247#undef FIRST_OTHER_INST
248#undef HANDLE_OTHER_INST
249#undef LAST_OTHER_INST
250
251#undef HANDLE_USER_INST
252
253#ifdef HANDLE_INST
254#undef HANDLE_INST
255#endif

/usr/src/gnu/usr.bin/clang/libLLVM/../../../llvm/llvm/include/llvm/CodeGen/GlobalISel/IRTranslator.h

1//===- llvm/CodeGen/GlobalISel/IRTranslator.h - IRTranslator ----*- C++ -*-===//
2//
3// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
4// See https://llvm.org/LICENSE.txt for license information.
5// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
6//
7//===----------------------------------------------------------------------===//
8/// \file
9/// This file declares the IRTranslator pass.
10/// This pass is responsible for translating LLVM IR into MachineInstr.
11/// It uses target hooks to lower the ABI but aside from that, the pass
12/// generated code is generic. This is the default translator used for
13/// GlobalISel.
14///
15/// \todo Replace the comments with actual doxygen comments.
16//===----------------------------------------------------------------------===//
17
18#ifndef LLVM_CODEGEN_GLOBALISEL_IRTRANSLATOR_H
19#define LLVM_CODEGEN_GLOBALISEL_IRTRANSLATOR_H
20
21#include "llvm/ADT/DenseMap.h"
22#include "llvm/ADT/SmallVector.h"
23#include "llvm/CodeGen/FunctionLoweringInfo.h"
24#include "llvm/CodeGen/GlobalISel/CSEMIRBuilder.h"
25#include "llvm/CodeGen/MachineFunctionPass.h"
26#include "llvm/CodeGen/SwiftErrorValueTracking.h"
27#include "llvm/CodeGen/SwitchLoweringUtils.h"
28#include "llvm/IR/Intrinsics.h"
29#include "llvm/Support/Allocator.h"
30#include "llvm/Support/CodeGen.h"
31#include <memory>
32#include <utility>
33
34namespace llvm {
35
36class AllocaInst;
37class BasicBlock;
38class CallInst;
39class CallLowering;
40class Constant;
41class ConstrainedFPIntrinsic;
42class DataLayout;
43class Instruction;
44class MachineBasicBlock;
45class MachineFunction;
46class MachineInstr;
47class MachineRegisterInfo;
48class OptimizationRemarkEmitter;
49class PHINode;
50class TargetPassConfig;
51class User;
52class Value;
53
54// Technically the pass should run on an hypothetical MachineModule,
55// since it should translate Global into some sort of MachineGlobal.
56// The MachineGlobal should ultimately just be a transfer of ownership of
57// the interesting bits that are relevant to represent a global value.
58// That being said, we could investigate what would it cost to just duplicate
59// the information from the LLVM IR.
60// The idea is that ultimately we would be able to free up the memory used
61// by the LLVM IR as soon as the translation is over.
62class IRTranslator : public MachineFunctionPass {
63public:
64 static char ID;
65
66private:
67 /// Interface used to lower the everything related to calls.
68 const CallLowering *CLI;
69
70 /// This class contains the mapping between the Values to vreg related data.
71 class ValueToVRegInfo {
72 public:
73 ValueToVRegInfo() = default;
74
75 using VRegListT = SmallVector<Register, 1>;
76 using OffsetListT = SmallVector<uint64_t, 1>;
77
78 using const_vreg_iterator =
79 DenseMap<const Value *, VRegListT *>::const_iterator;
80 using const_offset_iterator =
81 DenseMap<const Value *, OffsetListT *>::const_iterator;
82
83 inline const_vreg_iterator vregs_end() const { return ValToVRegs.end(); }
84
85 VRegListT *getVRegs(const Value &V) {
86 auto It = ValToVRegs.find(&V);
87 if (It != ValToVRegs.end())
88 return It->second;
89
90 return insertVRegs(V);
91 }
92
93 OffsetListT *getOffsets(const Value &V) {
94 auto It = TypeToOffsets.find(V.getType());
95 if (It != TypeToOffsets.end())
96 return It->second;
97
98 return insertOffsets(V);
99 }
100
101 const_vreg_iterator findVRegs(const Value &V) const {
102 return ValToVRegs.find(&V);
103 }
104
105 bool contains(const Value &V) const {
106 return ValToVRegs.find(&V) != ValToVRegs.end();
107 }
108
109 void reset() {
110 ValToVRegs.clear();
111 TypeToOffsets.clear();
112 VRegAlloc.DestroyAll();
113 OffsetAlloc.DestroyAll();
114 }
115
116 private:
117 VRegListT *insertVRegs(const Value &V) {
118 assert(ValToVRegs.find(&V) == ValToVRegs.end() && "Value already exists")((void)0);
119
120 // We placement new using our fast allocator since we never try to free
121 // the vectors until translation is finished.
122 auto *VRegList = new (VRegAlloc.Allocate()) VRegListT();
123 ValToVRegs[&V] = VRegList;
124 return VRegList;
125 }
126
127 OffsetListT *insertOffsets(const Value &V) {
128 assert(TypeToOffsets.find(V.getType()) == TypeToOffsets.end() &&((void)0)
129 "Type already exists")((void)0);
130
131 auto *OffsetList = new (OffsetAlloc.Allocate()) OffsetListT();
132 TypeToOffsets[V.getType()] = OffsetList;
133 return OffsetList;
134 }
135 SpecificBumpPtrAllocator<VRegListT> VRegAlloc;
136 SpecificBumpPtrAllocator<OffsetListT> OffsetAlloc;
137
138 // We store pointers to vectors here since references may be invalidated
139 // while we hold them if we stored the vectors directly.
140 DenseMap<const Value *, VRegListT*> ValToVRegs;
141 DenseMap<const Type *, OffsetListT*> TypeToOffsets;
142 };
143
144 /// Mapping of the values of the current LLVM IR function to the related
145 /// virtual registers and offsets.
146 ValueToVRegInfo VMap;
147
148 // N.b. it's not completely obvious that this will be sufficient for every
149 // LLVM IR construct (with "invoke" being the obvious candidate to mess up our
150 // lives.
151 DenseMap<const BasicBlock *, MachineBasicBlock *> BBToMBB;
152
153 // One BasicBlock can be translated to multiple MachineBasicBlocks. For such
154 // BasicBlocks translated to multiple MachineBasicBlocks, MachinePreds retains
155 // a mapping between the edges arriving at the BasicBlock to the corresponding
156 // created MachineBasicBlocks. Some BasicBlocks that get translated to a
157 // single MachineBasicBlock may also end up in this Map.
158 using CFGEdge = std::pair<const BasicBlock *, const BasicBlock *>;
159 DenseMap<CFGEdge, SmallVector<MachineBasicBlock *, 1>> MachinePreds;
160
161 // List of stubbed PHI instructions, for values and basic blocks to be filled
162 // in once all MachineBasicBlocks have been created.
163 SmallVector<std::pair<const PHINode *, SmallVector<MachineInstr *, 1>>, 4>
164 PendingPHIs;
165
166 /// Record of what frame index has been allocated to specified allocas for
167 /// this function.
168 DenseMap<const AllocaInst *, int> FrameIndices;
169
170 SwiftErrorValueTracking SwiftError;
171
172 /// \name Methods for translating form LLVM IR to MachineInstr.
173 /// \see ::translate for general information on the translate methods.
174 /// @{
175
176 /// Translate \p Inst into its corresponding MachineInstr instruction(s).
177 /// Insert the newly translated instruction(s) right where the CurBuilder
178 /// is set.
179 ///
180 /// The general algorithm is:
181 /// 1. Look for a virtual register for each operand or
182 /// create one.
183 /// 2 Update the VMap accordingly.
184 /// 2.alt. For constant arguments, if they are compile time constants,
185 /// produce an immediate in the right operand and do not touch
186 /// ValToReg. Actually we will go with a virtual register for each
187 /// constants because it may be expensive to actually materialize the
188 /// constant. Moreover, if the constant spans on several instructions,
189 /// CSE may not catch them.
190 /// => Update ValToVReg and remember that we saw a constant in Constants.
191 /// We will materialize all the constants in finalize.
192 /// Note: we would need to do something so that we can recognize such operand
193 /// as constants.
194 /// 3. Create the generic instruction.
195 ///
196 /// \return true if the translation succeeded.
197 bool translate(const Instruction &Inst);
198
199 /// Materialize \p C into virtual-register \p Reg. The generic instructions
200 /// performing this materialization will be inserted into the entry block of
201 /// the function.
202 ///
203 /// \return true if the materialization succeeded.
204 bool translate(const Constant &C, Register Reg);
205
206 // Translate U as a copy of V.
207 bool translateCopy(const User &U, const Value &V,
208 MachineIRBuilder &MIRBuilder);
209
210 /// Translate an LLVM bitcast into generic IR. Either a COPY or a G_BITCAST is
211 /// emitted.
212 bool translateBitCast(const User &U, MachineIRBuilder &MIRBuilder);
213
214 /// Translate an LLVM load instruction into generic IR.
215 bool translateLoad(const User &U, MachineIRBuilder &MIRBuilder);
216
217 /// Translate an LLVM store instruction into generic IR.
218 bool translateStore(const User &U, MachineIRBuilder &MIRBuilder);
219
220 /// Translate an LLVM string intrinsic (memcpy, memset, ...).
221 bool translateMemFunc(const CallInst &CI, MachineIRBuilder &MIRBuilder,
222 unsigned Opcode);
223
224 void getStackGuard(Register DstReg, MachineIRBuilder &MIRBuilder);
225
226 bool translateOverflowIntrinsic(const CallInst &CI, unsigned Op,
227 MachineIRBuilder &MIRBuilder);
228 bool translateFixedPointIntrinsic(unsigned Op, const CallInst &CI,
229 MachineIRBuilder &MIRBuilder);
230
231 /// Helper function for translateSimpleIntrinsic.
232 /// \return The generic opcode for \p IntrinsicID if \p IntrinsicID is a
233 /// simple intrinsic (ceil, fabs, etc.). Otherwise, returns
234 /// Intrinsic::not_intrinsic.
235 unsigned getSimpleIntrinsicOpcode(Intrinsic::ID ID);
236
237 /// Translates the intrinsics defined in getSimpleIntrinsicOpcode.
238 /// \return true if the translation succeeded.
239 bool translateSimpleIntrinsic(const CallInst &CI, Intrinsic::ID ID,
240 MachineIRBuilder &MIRBuilder);
241
242 bool translateConstrainedFPIntrinsic(const ConstrainedFPIntrinsic &FPI,
243 MachineIRBuilder &MIRBuilder);
244
245 bool translateKnownIntrinsic(const CallInst &CI, Intrinsic::ID ID,
246 MachineIRBuilder &MIRBuilder);
247
248 bool translateInlineAsm(const CallBase &CB, MachineIRBuilder &MIRBuilder);
249
250 /// Returns true if the value should be split into multiple LLTs.
251 /// If \p Offsets is given then the split type's offsets will be stored in it.
252 /// If \p Offsets is not empty it will be cleared first.
253 bool valueIsSplit(const Value &V,
254 SmallVectorImpl<uint64_t> *Offsets = nullptr);
255
256 /// Common code for translating normal calls or invokes.
257 bool translateCallBase(const CallBase &CB, MachineIRBuilder &MIRBuilder);
258
259 /// Translate call instruction.
260 /// \pre \p U is a call instruction.
261 bool translateCall(const User &U, MachineIRBuilder &MIRBuilder);
262
263 /// When an invoke or a cleanupret unwinds to the next EH pad, there are
264 /// many places it could ultimately go. In the IR, we have a single unwind
265 /// destination, but in the machine CFG, we enumerate all the possible blocks.
266 /// This function skips over imaginary basic blocks that hold catchswitch
267 /// instructions, and finds all the "real" machine
268 /// basic block destinations. As those destinations may not be successors of
269 /// EHPadBB, here we also calculate the edge probability to those
270 /// destinations. The passed-in Prob is the edge probability to EHPadBB.
271 bool findUnwindDestinations(
272 const BasicBlock *EHPadBB, BranchProbability Prob,
273 SmallVectorImpl<std::pair<MachineBasicBlock *, BranchProbability>>
274 &UnwindDests);
275
276 bool translateInvoke(const User &U, MachineIRBuilder &MIRBuilder);
277
278 bool translateCallBr(const User &U, MachineIRBuilder &MIRBuilder);
279
280 bool translateLandingPad(const User &U, MachineIRBuilder &MIRBuilder);
281
282 /// Translate one of LLVM's cast instructions into MachineInstrs, with the
283 /// given generic Opcode.
284 bool translateCast(unsigned Opcode, const User &U,
285 MachineIRBuilder &MIRBuilder);
286
287 /// Translate a phi instruction.
288 bool translatePHI(const User &U, MachineIRBuilder &MIRBuilder);
289
290 /// Translate a comparison (icmp or fcmp) instruction or constant.
291 bool translateCompare(const User &U, MachineIRBuilder &MIRBuilder);
292
293 /// Translate an integer compare instruction (or constant).
294 bool translateICmp(const User &U, MachineIRBuilder &MIRBuilder) {
295 return translateCompare(U, MIRBuilder);
296 }
297
298 /// Translate a floating-point compare instruction (or constant).
299 bool translateFCmp(const User &U, MachineIRBuilder &MIRBuilder) {
300 return translateCompare(U, MIRBuilder);
22
Calling 'IRTranslator::translateCompare'
301 }
302
303 /// Add remaining operands onto phis we've translated. Executed after all
304 /// MachineBasicBlocks for the function have been created.
305 void finishPendingPhis();
306
307 /// Translate \p Inst into a unary operation \p Opcode.
308 /// \pre \p U is a unary operation.
309 bool translateUnaryOp(unsigned Opcode, const User &U,
310 MachineIRBuilder &MIRBuilder);
311
312 /// Translate \p Inst into a binary operation \p Opcode.
313 /// \pre \p U is a binary operation.
314 bool translateBinaryOp(unsigned Opcode, const User &U,
315 MachineIRBuilder &MIRBuilder);
316
317 /// If the set of cases should be emitted as a series of branches, return
318 /// true. If we should emit this as a bunch of and/or'd together conditions,
319 /// return false.
320 bool shouldEmitAsBranches(const std::vector<SwitchCG::CaseBlock> &Cases);
321 /// Helper method for findMergedConditions.
322 /// This function emits a branch and is used at the leaves of an OR or an
323 /// AND operator tree.
324 void emitBranchForMergedCondition(const Value *Cond, MachineBasicBlock *TBB,
325 MachineBasicBlock *FBB,
326 MachineBasicBlock *CurBB,
327 MachineBasicBlock *SwitchBB,
328 BranchProbability TProb,
329 BranchProbability FProb, bool InvertCond);
330 /// Used during condbr translation to find trees of conditions that can be
331 /// optimized.
332 void findMergedConditions(const Value *Cond, MachineBasicBlock *TBB,
333 MachineBasicBlock *FBB, MachineBasicBlock *CurBB,
334 MachineBasicBlock *SwitchBB,
335 Instruction::BinaryOps Opc, BranchProbability TProb,
336 BranchProbability FProb, bool InvertCond);
337
338 /// Translate branch (br) instruction.
339 /// \pre \p U is a branch instruction.
340 bool translateBr(const User &U, MachineIRBuilder &MIRBuilder);
341
342 // Begin switch lowering functions.
343 bool emitJumpTableHeader(SwitchCG::JumpTable &JT,
344 SwitchCG::JumpTableHeader &JTH,
345 MachineBasicBlock *HeaderBB);
346 void emitJumpTable(SwitchCG::JumpTable &JT, MachineBasicBlock *MBB);
347
348 void emitSwitchCase(SwitchCG::CaseBlock &CB, MachineBasicBlock *SwitchBB,
349 MachineIRBuilder &MIB);
350
351 /// Generate for for the BitTest header block, which precedes each sequence of
352 /// BitTestCases.
353 void emitBitTestHeader(SwitchCG::BitTestBlock &BTB,
354 MachineBasicBlock *SwitchMBB);
355 /// Generate code to produces one "bit test" for a given BitTestCase \p B.
356 void emitBitTestCase(SwitchCG::BitTestBlock &BB, MachineBasicBlock *NextMBB,
357 BranchProbability BranchProbToNext, Register Reg,
358 SwitchCG::BitTestCase &B, MachineBasicBlock *SwitchBB);
359
360 bool lowerJumpTableWorkItem(
361 SwitchCG::SwitchWorkListItem W, MachineBasicBlock *SwitchMBB,
362 MachineBasicBlock *CurMBB, MachineBasicBlock *DefaultMBB,
363 MachineIRBuilder &MIB, MachineFunction::iterator BBI,
364 BranchProbability UnhandledProbs, SwitchCG::CaseClusterIt I,
365 MachineBasicBlock *Fallthrough, bool FallthroughUnreachable);
366
367 bool lowerSwitchRangeWorkItem(SwitchCG::CaseClusterIt I, Value *Cond,
368 MachineBasicBlock *Fallthrough,
369 bool FallthroughUnreachable,
370 BranchProbability UnhandledProbs,
371 MachineBasicBlock *CurMBB,
372 MachineIRBuilder &MIB,
373 MachineBasicBlock *SwitchMBB);
374
375 bool lowerBitTestWorkItem(
376 SwitchCG::SwitchWorkListItem W, MachineBasicBlock *SwitchMBB,
377 MachineBasicBlock *CurMBB, MachineBasicBlock *DefaultMBB,
378 MachineIRBuilder &MIB, MachineFunction::iterator BBI,
379 BranchProbability DefaultProb, BranchProbability UnhandledProbs,
380 SwitchCG::CaseClusterIt I, MachineBasicBlock *Fallthrough,
381 bool FallthroughUnreachable);
382
383 bool lowerSwitchWorkItem(SwitchCG::SwitchWorkListItem W, Value *Cond,
384 MachineBasicBlock *SwitchMBB,
385 MachineBasicBlock *DefaultMBB,
386 MachineIRBuilder &MIB);
387
388 bool translateSwitch(const User &U, MachineIRBuilder &MIRBuilder);
389 // End switch lowering section.
390
391 bool translateIndirectBr(const User &U, MachineIRBuilder &MIRBuilder);
392
393 bool translateExtractValue(const User &U, MachineIRBuilder &MIRBuilder);
394
395 bool translateInsertValue(const User &U, MachineIRBuilder &MIRBuilder);
396
397 bool translateSelect(const User &U, MachineIRBuilder &MIRBuilder);
398
399 bool translateGetElementPtr(const User &U, MachineIRBuilder &MIRBuilder);
400
401 bool translateAlloca(const User &U, MachineIRBuilder &MIRBuilder);
402
403 /// Translate return (ret) instruction.
404 /// The target needs to implement CallLowering::lowerReturn for
405 /// this to succeed.
406 /// \pre \p U is a return instruction.
407 bool translateRet(const User &U, MachineIRBuilder &MIRBuilder);
408
409 bool translateFNeg(const User &U, MachineIRBuilder &MIRBuilder);
410
411 bool translateAdd(const User &U, MachineIRBuilder &MIRBuilder) {
412 return translateBinaryOp(TargetOpcode::G_ADD, U, MIRBuilder);
413 }
414 bool translateSub(const User &U, MachineIRBuilder &MIRBuilder) {
415 return translateBinaryOp(TargetOpcode::G_SUB, U, MIRBuilder);
416 }
417 bool translateAnd(const User &U, MachineIRBuilder &MIRBuilder) {
418 return translateBinaryOp(TargetOpcode::G_AND, U, MIRBuilder);
419 }
420 bool translateMul(const User &U, MachineIRBuilder &MIRBuilder) {
421 return translateBinaryOp(TargetOpcode::G_MUL, U, MIRBuilder);
422 }
423 bool translateOr(const User &U, MachineIRBuilder &MIRBuilder) {
424 return translateBinaryOp(TargetOpcode::G_OR, U, MIRBuilder);
425 }
426 bool translateXor(const User &U, MachineIRBuilder &MIRBuilder) {
427 return translateBinaryOp(TargetOpcode::G_XOR, U, MIRBuilder);
428 }
429
430 bool translateUDiv(const User &U, MachineIRBuilder &MIRBuilder) {
431 return translateBinaryOp(TargetOpcode::G_UDIV, U, MIRBuilder);
432 }
433 bool translateSDiv(const User &U, MachineIRBuilder &MIRBuilder) {
434 return translateBinaryOp(TargetOpcode::G_SDIV, U, MIRBuilder);
435 }
436 bool translateURem(const User &U, MachineIRBuilder &MIRBuilder) {
437 return translateBinaryOp(TargetOpcode::G_UREM, U, MIRBuilder);
438 }
439 bool translateSRem(const User &U, MachineIRBuilder &MIRBuilder) {
440 return translateBinaryOp(TargetOpcode::G_SREM, U, MIRBuilder);
441 }
442 bool translateIntToPtr(const User &U, MachineIRBuilder &MIRBuilder) {
443 return translateCast(TargetOpcode::G_INTTOPTR, U, MIRBuilder);
444 }
445 bool translatePtrToInt(const User &U, MachineIRBuilder &MIRBuilder) {
446 return translateCast(TargetOpcode::G_PTRTOINT, U, MIRBuilder);
447 }
448 bool translateTrunc(const User &U, MachineIRBuilder &MIRBuilder) {
449 return translateCast(TargetOpcode::G_TRUNC, U, MIRBuilder);
450 }
451 bool translateFPTrunc(const User &U, MachineIRBuilder &MIRBuilder) {
452 return translateCast(TargetOpcode::G_FPTRUNC, U, MIRBuilder);
453 }
454 bool translateFPExt(const User &U, MachineIRBuilder &MIRBuilder) {
455 return translateCast(TargetOpcode::G_FPEXT, U, MIRBuilder);
456 }
457 bool translateFPToUI(const User &U, MachineIRBuilder &MIRBuilder) {
458 return translateCast(TargetOpcode::G_FPTOUI, U, MIRBuilder);
459 }
460 bool translateFPToSI(const User &U, MachineIRBuilder &MIRBuilder) {
461 return translateCast(TargetOpcode::G_FPTOSI, U, MIRBuilder);
462 }
463 bool translateUIToFP(const User &U, MachineIRBuilder &MIRBuilder) {
464 return translateCast(TargetOpcode::G_UITOFP, U, MIRBuilder);
465 }
466 bool translateSIToFP(const User &U, MachineIRBuilder &MIRBuilder) {
467 return translateCast(TargetOpcode::G_SITOFP, U, MIRBuilder);
468 }
469 bool translateUnreachable(const User &U, MachineIRBuilder &MIRBuilder) {
470 return true;
471 }
472 bool translateSExt(const User &U, MachineIRBuilder &MIRBuilder) {
473 return translateCast(TargetOpcode::G_SEXT, U, MIRBuilder);
474 }
475
476 bool translateZExt(const User &U, MachineIRBuilder &MIRBuilder) {
477 return translateCast(TargetOpcode::G_ZEXT, U, MIRBuilder);
478 }
479
480 bool translateShl(const User &U, MachineIRBuilder &MIRBuilder) {
481 return translateBinaryOp(TargetOpcode::G_SHL, U, MIRBuilder);
482 }
483 bool translateLShr(const User &U, MachineIRBuilder &MIRBuilder) {
484 return translateBinaryOp(TargetOpcode::G_LSHR, U, MIRBuilder);
485 }
486 bool translateAShr(const User &U, MachineIRBuilder &MIRBuilder) {
487 return translateBinaryOp(TargetOpcode::G_ASHR, U, MIRBuilder);
488 }
489
490 bool translateFAdd(const User &U, MachineIRBuilder &MIRBuilder) {
491 return translateBinaryOp(TargetOpcode::G_FADD, U, MIRBuilder);
492 }
493 bool translateFSub(const User &U, MachineIRBuilder &MIRBuilder) {
494 return translateBinaryOp(TargetOpcode::G_FSUB, U, MIRBuilder);
495 }
496 bool translateFMul(const User &U, MachineIRBuilder &MIRBuilder) {
497 return translateBinaryOp(TargetOpcode::G_FMUL, U, MIRBuilder);
498 }
499 bool translateFDiv(const User &U, MachineIRBuilder &MIRBuilder) {
500 return translateBinaryOp(TargetOpcode::G_FDIV, U, MIRBuilder);
501 }
502 bool translateFRem(const User &U, MachineIRBuilder &MIRBuilder) {
503 return translateBinaryOp(TargetOpcode::G_FREM, U, MIRBuilder);
504 }
505
506 bool translateVAArg(const User &U, MachineIRBuilder &MIRBuilder);
507
508 bool translateInsertElement(const User &U, MachineIRBuilder &MIRBuilder);
509
510 bool translateExtractElement(const User &U, MachineIRBuilder &MIRBuilder);
511
512 bool translateShuffleVector(const User &U, MachineIRBuilder &MIRBuilder);
513
514 bool translateAtomicCmpXchg(const User &U, MachineIRBuilder &MIRBuilder);
515 bool translateAtomicRMW(const User &U, MachineIRBuilder &MIRBuilder);
516 bool translateFence(const User &U, MachineIRBuilder &MIRBuilder);
517 bool translateFreeze(const User &U, MachineIRBuilder &MIRBuilder);
518
519 // Stubs to keep the compiler happy while we implement the rest of the
520 // translation.
521 bool translateResume(const User &U, MachineIRBuilder &MIRBuilder) {
522 return false;
523 }
524 bool translateCleanupRet(const User &U, MachineIRBuilder &MIRBuilder) {
525 return false;
526 }
527 bool translateCatchRet(const User &U, MachineIRBuilder &MIRBuilder) {
528 return false;
529 }
530 bool translateCatchSwitch(const User &U, MachineIRBuilder &MIRBuilder) {
531 return false;
532 }
533 bool translateAddrSpaceCast(const User &U, MachineIRBuilder &MIRBuilder) {
534 return translateCast(TargetOpcode::G_ADDRSPACE_CAST, U, MIRBuilder);
535 }
536 bool translateCleanupPad(const User &U, MachineIRBuilder &MIRBuilder) {
537 return false;
538 }
539 bool translateCatchPad(const User &U, MachineIRBuilder &MIRBuilder) {
540 return false;
541 }
542 bool translateUserOp1(const User &U, MachineIRBuilder &MIRBuilder) {
543 return false;
544 }
545 bool translateUserOp2(const User &U, MachineIRBuilder &MIRBuilder) {
546 return false;
547 }
548
549 /// @}
550
551 // Builder for machine instruction a la IRBuilder.
552 // I.e., compared to regular MIBuilder, this one also inserts the instruction
553 // in the current block, it can creates block, etc., basically a kind of
554 // IRBuilder, but for Machine IR.
555 // CSEMIRBuilder CurBuilder;
556 std::unique_ptr<MachineIRBuilder> CurBuilder;
557
558 // Builder set to the entry block (just after ABI lowering instructions). Used
559 // as a convenient location for Constants.
560 // CSEMIRBuilder EntryBuilder;
561 std::unique_ptr<MachineIRBuilder> EntryBuilder;
562
563 // The MachineFunction currently being translated.
564 MachineFunction *MF;
565
566 /// MachineRegisterInfo used to create virtual registers.
567 MachineRegisterInfo *MRI = nullptr;
568
569 const DataLayout *DL;
570
571 /// Current target configuration. Controls how the pass handles errors.
572 const TargetPassConfig *TPC;
573
574 CodeGenOpt::Level OptLevel;
575
576 /// Current optimization remark emitter. Used to report failures.
577 std::unique_ptr<OptimizationRemarkEmitter> ORE;
578
579 FunctionLoweringInfo FuncInfo;
580
581 // True when either the Target Machine specifies no optimizations or the
582 // function has the optnone attribute.
583 bool EnableOpts = false;
584
585 /// True when the block contains a tail call. This allows the IRTranslator to
586 /// stop translating such blocks early.
587 bool HasTailCall = false;
588
589 /// Switch analysis and optimization.
590 class GISelSwitchLowering : public SwitchCG::SwitchLowering {
591 public:
592 GISelSwitchLowering(IRTranslator *irt, FunctionLoweringInfo &funcinfo)
593 : SwitchLowering(funcinfo), IRT(irt) {
594 assert(irt && "irt is null!")((void)0);
595 }
596
597 virtual void addSuccessorWithProb(
598 MachineBasicBlock *Src, MachineBasicBlock *Dst,
599 BranchProbability Prob = BranchProbability::getUnknown()) override {
600 IRT->addSuccessorWithProb(Src, Dst, Prob);
601 }
602
603 virtual ~GISelSwitchLowering() = default;
604
605 private:
606 IRTranslator *IRT;
607 };
608
609 std::unique_ptr<GISelSwitchLowering> SL;
610
611 // * Insert all the code needed to materialize the constants
612 // at the proper place. E.g., Entry block or dominator block
613 // of each constant depending on how fancy we want to be.
614 // * Clear the different maps.
615 void finalizeFunction();
616
617 // Handle emitting jump tables for each basic block.
618 void finalizeBasicBlock();
619
620 /// Get the VRegs that represent \p Val.
621 /// Non-aggregate types have just one corresponding VReg and the list can be
622 /// used as a single "unsigned". Aggregates get flattened. If such VRegs do
623 /// not exist, they are created.
624 ArrayRef<Register> getOrCreateVRegs(const Value &Val);
625
626 Register getOrCreateVReg(const Value &Val) {
627 auto Regs = getOrCreateVRegs(Val);
628 if (Regs.empty())
629 return 0;
630 assert(Regs.size() == 1 &&((void)0)
631 "attempt to get single VReg for aggregate or void")((void)0);
632 return Regs[0];
633 }
634
635 /// Allocate some vregs and offsets in the VMap. Then populate just the
636 /// offsets while leaving the vregs empty.
637 ValueToVRegInfo::VRegListT &allocateVRegs(const Value &Val);
638
639 /// Get the frame index that represents \p Val.
640 /// If such VReg does not exist, it is created.
641 int getOrCreateFrameIndex(const AllocaInst &AI);
642
643 /// Get the alignment of the given memory operation instruction. This will
644 /// either be the explicitly specified value or the ABI-required alignment for
645 /// the type being accessed (according to the Module's DataLayout).
646 Align getMemOpAlign(const Instruction &I);
647
648 /// Get the MachineBasicBlock that represents \p BB. Specifically, the block
649 /// returned will be the head of the translated block (suitable for branch
650 /// destinations).
651 MachineBasicBlock &getMBB(const BasicBlock &BB);
652
653 /// Record \p NewPred as a Machine predecessor to `Edge.second`, corresponding
654 /// to `Edge.first` at the IR level. This is used when IRTranslation creates
655 /// multiple MachineBasicBlocks for a given IR block and the CFG is no longer
656 /// represented simply by the IR-level CFG.
657 void addMachineCFGPred(CFGEdge Edge, MachineBasicBlock *NewPred);
658
659 /// Returns the Machine IR predecessors for the given IR CFG edge. Usually
660 /// this is just the single MachineBasicBlock corresponding to the predecessor
661 /// in the IR. More complex lowering can result in multiple MachineBasicBlocks
662 /// preceding the original though (e.g. switch instructions).
663 SmallVector<MachineBasicBlock *, 1> getMachinePredBBs(CFGEdge Edge) {
664 auto RemappedEdge = MachinePreds.find(Edge);
665 if (RemappedEdge != MachinePreds.end())
666 return RemappedEdge->second;
667 return SmallVector<MachineBasicBlock *, 4>(1, &getMBB(*Edge.first));
668 }
669
670 /// Return branch probability calculated by BranchProbabilityInfo for IR
671 /// blocks.
672 BranchProbability getEdgeProbability(const MachineBasicBlock *Src,
673 const MachineBasicBlock *Dst) const;
674
675 void addSuccessorWithProb(
676 MachineBasicBlock *Src, MachineBasicBlock *Dst,
677 BranchProbability Prob = BranchProbability::getUnknown());
678
679public:
680 IRTranslator(CodeGenOpt::Level OptLevel = CodeGenOpt::None);
681
682 StringRef getPassName() const override { return "IRTranslator"; }
683
684 void getAnalysisUsage(AnalysisUsage &AU) const override;
685
686 // Algo:
687 // CallLowering = MF.subtarget.getCallLowering()
688 // F = MF.getParent()
689 // MIRBuilder.reset(MF)
690 // getMBB(F.getEntryBB())
691 // CallLowering->translateArguments(MIRBuilder, F, ValToVReg)
692 // for each bb in F
693 // getMBB(bb)
694 // for each inst in bb
695 // if (!translate(MIRBuilder, inst, ValToVReg, ConstantToSequence))
696 // report_fatal_error("Don't know how to translate input");
697 // finalize()
698 bool runOnMachineFunction(MachineFunction &MF) override;
699};
700
701} // end namespace llvm
702
703#endif // LLVM_CODEGEN_GLOBALISEL_IRTRANSLATOR_H

/usr/src/gnu/usr.bin/clang/libLLVM/../../../llvm/llvm/include/llvm/IR/InstrTypes.h

1//===- llvm/InstrTypes.h - Important Instruction subclasses -----*- C++ -*-===//
2//
3// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
4// See https://llvm.org/LICENSE.txt for license information.
5// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
6//
7//===----------------------------------------------------------------------===//
8//
9// This file defines various meta classes of instructions that exist in the VM
10// representation. Specific concrete subclasses of these may be found in the
11// i*.h files...
12//
13//===----------------------------------------------------------------------===//
14
15#ifndef LLVM_IR_INSTRTYPES_H
16#define LLVM_IR_INSTRTYPES_H
17
18#include "llvm/ADT/ArrayRef.h"
19#include "llvm/ADT/None.h"
20#include "llvm/ADT/Optional.h"
21#include "llvm/ADT/STLExtras.h"
22#include "llvm/ADT/StringMap.h"
23#include "llvm/ADT/StringRef.h"
24#include "llvm/ADT/Twine.h"
25#include "llvm/ADT/iterator_range.h"
26#include "llvm/IR/Attributes.h"
27#include "llvm/IR/CallingConv.h"
28#include "llvm/IR/Constants.h"
29#include "llvm/IR/DerivedTypes.h"
30#include "llvm/IR/Function.h"
31#include "llvm/IR/Instruction.h"
32#include "llvm/IR/LLVMContext.h"
33#include "llvm/IR/OperandTraits.h"
34#include "llvm/IR/Type.h"
35#include "llvm/IR/User.h"
36#include "llvm/IR/Value.h"
37#include "llvm/Support/Casting.h"
38#include "llvm/Support/ErrorHandling.h"
39#include <algorithm>
40#include <cassert>
41#include <cstddef>
42#include <cstdint>
43#include <iterator>
44#include <string>
45#include <vector>
46
47namespace llvm {
48
49namespace Intrinsic {
50typedef unsigned ID;
51}
52
53//===----------------------------------------------------------------------===//
54// UnaryInstruction Class
55//===----------------------------------------------------------------------===//
56
57class UnaryInstruction : public Instruction {
58protected:
59 UnaryInstruction(Type *Ty, unsigned iType, Value *V,
60 Instruction *IB = nullptr)
61 : Instruction(Ty, iType, &Op<0>(), 1, IB) {
62 Op<0>() = V;
63 }
64 UnaryInstruction(Type *Ty, unsigned iType, Value *V, BasicBlock *IAE)
65 : Instruction(Ty, iType, &Op<0>(), 1, IAE) {
66 Op<0>() = V;
67 }
68
69public:
70 // allocate space for exactly one operand
71 void *operator new(size_t S) { return User::operator new(S, 1); }
72 void operator delete(void *Ptr) { User::operator delete(Ptr); }
73
74 /// Transparently provide more efficient getOperand methods.
75 DECLARE_TRANSPARENT_OPERAND_ACCESSORS(Value)public: inline Value *getOperand(unsigned) const; inline void
setOperand(unsigned, Value*); inline op_iterator op_begin();
inline const_op_iterator op_begin() const; inline op_iterator
op_end(); inline const_op_iterator op_end() const; protected
: template <int> inline Use &Op(); template <int
> inline const Use &Op() const; public: inline unsigned
getNumOperands() const
;
76
77 // Methods for support type inquiry through isa, cast, and dyn_cast:
78 static bool classof(const Instruction *I) {
79 return I->isUnaryOp() ||
80 I->getOpcode() == Instruction::Alloca ||
81 I->getOpcode() == Instruction::Load ||
82 I->getOpcode() == Instruction::VAArg ||
83 I->getOpcode() == Instruction::ExtractValue ||
84 (I->getOpcode() >= CastOpsBegin && I->getOpcode() < CastOpsEnd);
85 }
86 static bool classof(const Value *V) {
87 return isa<Instruction>(V) && classof(cast<Instruction>(V));
88 }
89};
90
91template <>
92struct OperandTraits<UnaryInstruction> :
93 public FixedNumOperandTraits<UnaryInstruction, 1> {
94};
95
96DEFINE_TRANSPARENT_OPERAND_ACCESSORS(UnaryInstruction, Value)UnaryInstruction::op_iterator UnaryInstruction::op_begin() { return
OperandTraits<UnaryInstruction>::op_begin(this); } UnaryInstruction
::const_op_iterator UnaryInstruction::op_begin() const { return
OperandTraits<UnaryInstruction>::op_begin(const_cast<
UnaryInstruction*>(this)); } UnaryInstruction::op_iterator
UnaryInstruction::op_end() { return OperandTraits<UnaryInstruction
>::op_end(this); } UnaryInstruction::const_op_iterator UnaryInstruction
::op_end() const { return OperandTraits<UnaryInstruction>
::op_end(const_cast<UnaryInstruction*>(this)); } Value *
UnaryInstruction::getOperand(unsigned i_nocapture) const { ((
void)0); return cast_or_null<Value>( OperandTraits<UnaryInstruction
>::op_begin(const_cast<UnaryInstruction*>(this))[i_nocapture
].get()); } void UnaryInstruction::setOperand(unsigned i_nocapture
, Value *Val_nocapture) { ((void)0); OperandTraits<UnaryInstruction
>::op_begin(this)[i_nocapture] = Val_nocapture; } unsigned
UnaryInstruction::getNumOperands() const { return OperandTraits
<UnaryInstruction>::operands(this); } template <int Idx_nocapture
> Use &UnaryInstruction::Op() { return this->OpFrom
<Idx_nocapture>(this); } template <int Idx_nocapture
> const Use &UnaryInstruction::Op() const { return this
->OpFrom<Idx_nocapture>(this); }
97
98//===----------------------------------------------------------------------===//
99// UnaryOperator Class
100//===----------------------------------------------------------------------===//
101
102class UnaryOperator : public UnaryInstruction {
103 void AssertOK();
104
105protected:
106 UnaryOperator(UnaryOps iType, Value *S, Type *Ty,
107 const Twine &Name, Instruction *InsertBefore);
108 UnaryOperator(UnaryOps iType, Value *S, Type *Ty,
109 const Twine &Name, BasicBlock *InsertAtEnd);
110
111 // Note: Instruction needs to be a friend here to call cloneImpl.
112 friend class Instruction;
113
114 UnaryOperator *cloneImpl() const;
115
116public:
117
118 /// Construct a unary instruction, given the opcode and an operand.
119 /// Optionally (if InstBefore is specified) insert the instruction
120 /// into a BasicBlock right before the specified instruction. The specified
121 /// Instruction is allowed to be a dereferenced end iterator.
122 ///
123 static UnaryOperator *Create(UnaryOps Op, Value *S,
124 const Twine &Name = Twine(),
125 Instruction *InsertBefore = nullptr);
126
127 /// Construct a unary instruction, given the opcode and an operand.
128 /// Also automatically insert this instruction to the end of the
129 /// BasicBlock specified.
130 ///
131 static UnaryOperator *Create(UnaryOps Op, Value *S,
132 const Twine &Name,
133 BasicBlock *InsertAtEnd);
134
135 /// These methods just forward to Create, and are useful when you
136 /// statically know what type of instruction you're going to create. These
137 /// helpers just save some typing.
138#define HANDLE_UNARY_INST(N, OPC, CLASS) \
139 static UnaryOperator *Create##OPC(Value *V, const Twine &Name = "") {\
140 return Create(Instruction::OPC, V, Name);\
141 }
142#include "llvm/IR/Instruction.def"
143#define HANDLE_UNARY_INST(N, OPC, CLASS) \
144 static UnaryOperator *Create##OPC(Value *V, const Twine &Name, \
145 BasicBlock *BB) {\
146 return Create(Instruction::OPC, V, Name, BB);\
147 }
148#include "llvm/IR/Instruction.def"
149#define HANDLE_UNARY_INST(N, OPC, CLASS) \
150 static UnaryOperator *Create##OPC(Value *V, const Twine &Name, \
151 Instruction *I) {\
152 return Create(Instruction::OPC, V, Name, I);\
153 }
154#include "llvm/IR/Instruction.def"
155
156 static UnaryOperator *
157 CreateWithCopiedFlags(UnaryOps Opc, Value *V, Instruction *CopyO,
158 const Twine &Name = "",
159 Instruction *InsertBefore = nullptr) {
160 UnaryOperator *UO = Create(Opc, V, Name, InsertBefore);
161 UO->copyIRFlags(CopyO);
162 return UO;
163 }
164
165 static UnaryOperator *CreateFNegFMF(Value *Op, Instruction *FMFSource,
166 const Twine &Name = "",
167 Instruction *InsertBefore = nullptr) {
168 return CreateWithCopiedFlags(Instruction::FNeg, Op, FMFSource, Name,
169 InsertBefore);
170 }
171
172 UnaryOps getOpcode() const {
173 return static_cast<UnaryOps>(Instruction::getOpcode());
174 }
175
176 // Methods for support type inquiry through isa, cast, and dyn_cast:
177 static bool classof(const Instruction *I) {
178 return I->isUnaryOp();
179 }
180 static bool classof(const Value *V) {
181 return isa<Instruction>(V) && classof(cast<Instruction>(V));
182 }
183};
184
185//===----------------------------------------------------------------------===//
186// BinaryOperator Class
187//===----------------------------------------------------------------------===//
188
189class BinaryOperator : public Instruction {
190 void AssertOK();
191
192protected:
193 BinaryOperator(BinaryOps iType, Value *S1, Value *S2, Type *Ty,
194 const Twine &Name, Instruction *InsertBefore);
195 BinaryOperator(BinaryOps iType, Value *S1, Value *S2, Type *Ty,
196 const Twine &Name, BasicBlock *InsertAtEnd);
197
198 // Note: Instruction needs to be a friend here to call cloneImpl.
199 friend class Instruction;
200
201 BinaryOperator *cloneImpl() const;
202
203public:
204 // allocate space for exactly two operands
205 void *operator new(size_t S) { return User::operator new(S, 2); }
206 void operator delete(void *Ptr) { User::operator delete(Ptr); }
207
208 /// Transparently provide more efficient getOperand methods.
209 DECLARE_TRANSPARENT_OPERAND_ACCESSORS(Value)public: inline Value *getOperand(unsigned) const; inline void
setOperand(unsigned, Value*); inline op_iterator op_begin();
inline const_op_iterator op_begin() const; inline op_iterator
op_end(); inline const_op_iterator op_end() const; protected
: template <int> inline Use &Op(); template <int
> inline const Use &Op() const; public: inline unsigned
getNumOperands() const
;
210
211 /// Construct a binary instruction, given the opcode and the two
212 /// operands. Optionally (if InstBefore is specified) insert the instruction
213 /// into a BasicBlock right before the specified instruction. The specified
214 /// Instruction is allowed to be a dereferenced end iterator.
215 ///
216 static BinaryOperator *Create(BinaryOps Op, Value *S1, Value *S2,
217 const Twine &Name = Twine(),
218 Instruction *InsertBefore = nullptr);
219
220 /// Construct a binary instruction, given the opcode and the two
221 /// operands. Also automatically insert this instruction to the end of the
222 /// BasicBlock specified.
223 ///
224 static BinaryOperator *Create(BinaryOps Op, Value *S1, Value *S2,
225 const Twine &Name, BasicBlock *InsertAtEnd);
226
227 /// These methods just forward to Create, and are useful when you
228 /// statically know what type of instruction you're going to create. These
229 /// helpers just save some typing.
230#define HANDLE_BINARY_INST(N, OPC, CLASS) \
231 static BinaryOperator *Create##OPC(Value *V1, Value *V2, \
232 const Twine &Name = "") {\
233 return Create(Instruction::OPC, V1, V2, Name);\
234 }
235#include "llvm/IR/Instruction.def"
236#define HANDLE_BINARY_INST(N, OPC, CLASS) \
237 static BinaryOperator *Create##OPC(Value *V1, Value *V2, \
238 const Twine &Name, BasicBlock *BB) {\
239 return Create(Instruction::OPC, V1, V2, Name, BB);\
240 }
241#include "llvm/IR/Instruction.def"
242#define HANDLE_BINARY_INST(N, OPC, CLASS) \
243 static BinaryOperator *Create##OPC(Value *V1, Value *V2, \
244 const Twine &Name, Instruction *I) {\
245 return Create(Instruction::OPC, V1, V2, Name, I);\
246 }
247#include "llvm/IR/Instruction.def"
248
249 static BinaryOperator *
250 CreateWithCopiedFlags(BinaryOps Opc, Value *V1, Value *V2, Instruction *CopyO,
251 const Twine &Name = "",
252 Instruction *InsertBefore = nullptr) {
253 BinaryOperator *BO = Create(Opc, V1, V2, Name, InsertBefore);
254 BO->copyIRFlags(CopyO);
255 return BO;
256 }
257
258 static BinaryOperator *CreateFAddFMF(Value *V1, Value *V2,
259 Instruction *FMFSource,
260 const Twine &Name = "") {
261 return CreateWithCopiedFlags(Instruction::FAdd, V1, V2, FMFSource, Name);
262 }
263 static BinaryOperator *CreateFSubFMF(Value *V1, Value *V2,
264 Instruction *FMFSource,
265 const Twine &Name = "") {
266 return CreateWithCopiedFlags(Instruction::FSub, V1, V2, FMFSource, Name);
267 }
268 static BinaryOperator *CreateFMulFMF(Value *V1, Value *V2,
269 Instruction *FMFSource,
270 const Twine &Name = "") {
271 return CreateWithCopiedFlags(Instruction::FMul, V1, V2, FMFSource, Name);
272 }
273 static BinaryOperator *CreateFDivFMF(Value *V1, Value *V2,
274 Instruction *FMFSource,
275 const Twine &Name = "") {
276 return CreateWithCopiedFlags(Instruction::FDiv, V1, V2, FMFSource, Name);
277 }
278 static BinaryOperator *CreateFRemFMF(Value *V1, Value *V2,
279 Instruction *FMFSource,
280 const Twine &Name = "") {
281 return CreateWithCopiedFlags(Instruction::FRem, V1, V2, FMFSource, Name);
282 }
283
284 static BinaryOperator *CreateNSW(BinaryOps Opc, Value *V1, Value *V2,
285 const Twine &Name = "") {
286 BinaryOperator *BO = Create(Opc, V1, V2, Name);
287 BO->setHasNoSignedWrap(true);
288 return BO;
289 }
290 static BinaryOperator *CreateNSW(BinaryOps Opc, Value *V1, Value *V2,
291 const Twine &Name, BasicBlock *BB) {
292 BinaryOperator *BO = Create(Opc, V1, V2, Name, BB);
293 BO->setHasNoSignedWrap(true);
294 return BO;
295 }
296 static BinaryOperator *CreateNSW(BinaryOps Opc, Value *V1, Value *V2,
297 const Twine &Name, Instruction *I) {
298 BinaryOperator *BO = Create(Opc, V1, V2, Name, I);
299 BO->setHasNoSignedWrap(true);
300 return BO;
301 }
302
303 static BinaryOperator *CreateNUW(BinaryOps Opc, Value *V1, Value *V2,
304 const Twine &Name = "") {
305 BinaryOperator *BO = Create(Opc, V1, V2, Name);
306 BO->setHasNoUnsignedWrap(true);
307 return BO;
308 }
309 static BinaryOperator *CreateNUW(BinaryOps Opc, Value *V1, Value *V2,
310 const Twine &Name, BasicBlock *BB) {
311 BinaryOperator *BO = Create(Opc, V1, V2, Name, BB);
312 BO->setHasNoUnsignedWrap(true);
313 return BO;
314 }
315 static BinaryOperator *CreateNUW(BinaryOps Opc, Value *V1, Value *V2,
316 const Twine &Name, Instruction *I) {
317 BinaryOperator *BO = Create(Opc, V1, V2, Name, I);
318 BO->setHasNoUnsignedWrap(true);
319 return BO;
320 }
321
322 static BinaryOperator *CreateExact(BinaryOps Opc, Value *V1, Value *V2,
323 const Twine &Name = "") {
324 BinaryOperator *BO = Create(Opc, V1, V2, Name);
325 BO->setIsExact(true);
326 return BO;
327 }
328 static BinaryOperator *CreateExact(BinaryOps Opc, Value *V1, Value *V2,
329 const Twine &Name, BasicBlock *BB) {
330 BinaryOperator *BO = Create(Opc, V1, V2, Name, BB);
331 BO->setIsExact(true);
332 return BO;
333 }
334 static BinaryOperator *CreateExact(BinaryOps Opc, Value *V1, Value *V2,
335 const Twine &Name, Instruction *I) {
336 BinaryOperator *BO = Create(Opc, V1, V2, Name, I);
337 BO->setIsExact(true);
338 return BO;
339 }
340
341#define DEFINE_HELPERS(OPC, NUWNSWEXACT) \
342 static BinaryOperator *Create##NUWNSWEXACT##OPC(Value *V1, Value *V2, \
343 const Twine &Name = "") { \
344 return Create##NUWNSWEXACT(Instruction::OPC, V1, V2, Name); \
345 } \
346 static BinaryOperator *Create##NUWNSWEXACT##OPC( \
347 Value *V1, Value *V2, const Twine &Name, BasicBlock *BB) { \
348 return Create##NUWNSWEXACT(Instruction::OPC, V1, V2, Name, BB); \
349 } \
350 static BinaryOperator *Create##NUWNSWEXACT##OPC( \
351 Value *V1, Value *V2, const Twine &Name, Instruction *I) { \
352 return Create##NUWNSWEXACT(Instruction::OPC, V1, V2, Name, I); \
353 }
354
355 DEFINE_HELPERS(Add, NSW) // CreateNSWAdd
356 DEFINE_HELPERS(Add, NUW) // CreateNUWAdd
357 DEFINE_HELPERS(Sub, NSW) // CreateNSWSub
358 DEFINE_HELPERS(Sub, NUW) // CreateNUWSub
359 DEFINE_HELPERS(Mul, NSW) // CreateNSWMul
360 DEFINE_HELPERS(Mul, NUW) // CreateNUWMul
361 DEFINE_HELPERS(Shl, NSW) // CreateNSWShl
362 DEFINE_HELPERS(Shl, NUW) // CreateNUWShl
363
364 DEFINE_HELPERS(SDiv, Exact) // CreateExactSDiv
365 DEFINE_HELPERS(UDiv, Exact) // CreateExactUDiv
366 DEFINE_HELPERS(AShr, Exact) // CreateExactAShr
367 DEFINE_HELPERS(LShr, Exact) // CreateExactLShr
368
369#undef DEFINE_HELPERS
370
371 /// Helper functions to construct and inspect unary operations (NEG and NOT)
372 /// via binary operators SUB and XOR:
373 ///
374 /// Create the NEG and NOT instructions out of SUB and XOR instructions.
375 ///
376 static BinaryOperator *CreateNeg(Value *Op, const Twine &Name = "",
377 Instruction *InsertBefore = nullptr);
378 static BinaryOperator *CreateNeg(Value *Op, const Twine &Name,
379 BasicBlock *InsertAtEnd);
380 static BinaryOperator *CreateNSWNeg(Value *Op, const Twine &Name = "",
381 Instruction *InsertBefore = nullptr);
382 static BinaryOperator *CreateNSWNeg(Value *Op, const Twine &Name,
383 BasicBlock *InsertAtEnd);
384 static BinaryOperator *CreateNUWNeg(Value *Op, const Twine &Name = "",
385 Instruction *InsertBefore = nullptr);
386 static BinaryOperator *CreateNUWNeg(Value *Op, const Twine &Name,
387 BasicBlock *InsertAtEnd);
388 static BinaryOperator *CreateNot(Value *Op, const Twine &Name = "",
389 Instruction *InsertBefore = nullptr);
390 static BinaryOperator *CreateNot(Value *Op, const Twine &Name,
391 BasicBlock *InsertAtEnd);
392
393 BinaryOps getOpcode() const {
394 return static_cast<BinaryOps>(Instruction::getOpcode());
395 }
396
397 /// Exchange the two operands to this instruction.
398 /// This instruction is safe to use on any binary instruction and
399 /// does not modify the semantics of the instruction. If the instruction
400 /// cannot be reversed (ie, it's a Div), then return true.
401 ///
402 bool swapOperands();
403
404 // Methods for support type inquiry through isa, cast, and dyn_cast:
405 static bool classof(const Instruction *I) {
406 return I->isBinaryOp();
407 }
408 static bool classof(const Value *V) {
409 return isa<Instruction>(V) && classof(cast<Instruction>(V));
410 }
411};
412
413template <>
414struct OperandTraits<BinaryOperator> :
415 public FixedNumOperandTraits<BinaryOperator, 2> {
416};
417
418DEFINE_TRANSPARENT_OPERAND_ACCESSORS(BinaryOperator, Value)BinaryOperator::op_iterator BinaryOperator::op_begin() { return
OperandTraits<BinaryOperator>::op_begin(this); } BinaryOperator
::const_op_iterator BinaryOperator::op_begin() const { return
OperandTraits<BinaryOperator>::op_begin(const_cast<
BinaryOperator*>(this)); } BinaryOperator::op_iterator BinaryOperator
::op_end() { return OperandTraits<BinaryOperator>::op_end
(this); } BinaryOperator::const_op_iterator BinaryOperator::op_end
() const { return OperandTraits<BinaryOperator>::op_end
(const_cast<BinaryOperator*>(this)); } Value *BinaryOperator
::getOperand(unsigned i_nocapture) const { ((void)0); return cast_or_null
<Value>( OperandTraits<BinaryOperator>::op_begin(
const_cast<BinaryOperator*>(this))[i_nocapture].get());
} void BinaryOperator::setOperand(unsigned i_nocapture, Value
*Val_nocapture) { ((void)0); OperandTraits<BinaryOperator
>::op_begin(this)[i_nocapture] = Val_nocapture; } unsigned
BinaryOperator::getNumOperands() const { return OperandTraits
<BinaryOperator>::operands(this); } template <int Idx_nocapture
> Use &BinaryOperator::Op() { return this->OpFrom<
Idx_nocapture>(this); } template <int Idx_nocapture>
const Use &BinaryOperator::Op() const { return this->
OpFrom<Idx_nocapture>(this); }
419
420//===----------------------------------------------------------------------===//
421// CastInst Class
422//===----------------------------------------------------------------------===//
423
424/// This is the base class for all instructions that perform data
425/// casts. It is simply provided so that instruction category testing
426/// can be performed with code like:
427///
428/// if (isa<CastInst>(Instr)) { ... }
429/// Base class of casting instructions.
430class CastInst : public UnaryInstruction {
431protected:
432 /// Constructor with insert-before-instruction semantics for subclasses
433 CastInst(Type *Ty, unsigned iType, Value *S,
434 const Twine &NameStr = "", Instruction *InsertBefore = nullptr)
435 : UnaryInstruction(Ty, iType, S, InsertBefore) {
436 setName(NameStr);
437 }
438 /// Constructor with insert-at-end-of-block semantics for subclasses
439 CastInst(Type *Ty, unsigned iType, Value *S,
440 const Twine &NameStr, BasicBlock *InsertAtEnd)
441 : UnaryInstruction(Ty, iType, S, InsertAtEnd) {
442 setName(NameStr);
443 }
444
445public:
446 /// Provides a way to construct any of the CastInst subclasses using an
447 /// opcode instead of the subclass's constructor. The opcode must be in the
448 /// CastOps category (Instruction::isCast(opcode) returns true). This
449 /// constructor has insert-before-instruction semantics to automatically
450 /// insert the new CastInst before InsertBefore (if it is non-null).
451 /// Construct any of the CastInst subclasses
452 static CastInst *Create(
453 Instruction::CastOps, ///< The opcode of the cast instruction
454 Value *S, ///< The value to be casted (operand 0)
455 Type *Ty, ///< The type to which cast should be made
456 const Twine &Name = "", ///< Name for the instruction
457 Instruction *InsertBefore = nullptr ///< Place to insert the instruction
458 );
459 /// Provides a way to construct any of the CastInst subclasses using an
460 /// opcode instead of the subclass's constructor. The opcode must be in the
461 /// CastOps category. This constructor has insert-at-end-of-block semantics
462 /// to automatically insert the new CastInst at the end of InsertAtEnd (if
463 /// its non-null).
464 /// Construct any of the CastInst subclasses
465 static CastInst *Create(
466 Instruction::CastOps, ///< The opcode for the cast instruction
467 Value *S, ///< The value to be casted (operand 0)
468 Type *Ty, ///< The type to which operand is casted
469 const Twine &Name, ///< The name for the instruction
470 BasicBlock *InsertAtEnd ///< The block to insert the instruction into
471 );
472
473 /// Create a ZExt or BitCast cast instruction
474 static CastInst *CreateZExtOrBitCast(
475 Value *S, ///< The value to be casted (operand 0)
476 Type *Ty, ///< The type to which cast should be made
477 const Twine &Name = "", ///< Name for the instruction
478 Instruction *InsertBefore = nullptr ///< Place to insert the instruction
479 );
480
481 /// Create a ZExt or BitCast cast instruction
482 static CastInst *CreateZExtOrBitCast(
483 Value *S, ///< The value to be casted (operand 0)
484 Type *Ty, ///< The type to which operand is casted
485 const Twine &Name, ///< The name for the instruction
486 BasicBlock *InsertAtEnd ///< The block to insert the instruction into
487 );
488
489 /// Create a SExt or BitCast cast instruction
490 static CastInst *CreateSExtOrBitCast(
491 Value *S, ///< The value to be casted (operand 0)
492 Type *Ty, ///< The type to which cast should be made
493 const Twine &Name = "", ///< Name for the instruction
494 Instruction *InsertBefore = nullptr ///< Place to insert the instruction
495 );
496
497 /// Create a SExt or BitCast cast instruction
498 static CastInst *CreateSExtOrBitCast(
499 Value *S, ///< The value to be casted (operand 0)
500 Type *Ty, ///< The type to which operand is casted
501 const Twine &Name, ///< The name for the instruction
502 BasicBlock *InsertAtEnd ///< The block to insert the instruction into
503 );
504
505 /// Create a BitCast AddrSpaceCast, or a PtrToInt cast instruction.
506 static CastInst *CreatePointerCast(
507 Value *S, ///< The pointer value to be casted (operand 0)
508 Type *Ty, ///< The type to which operand is casted
509 const Twine &Name, ///< The name for the instruction
510 BasicBlock *InsertAtEnd ///< The block to insert the instruction into
511 );
512
513 /// Create a BitCast, AddrSpaceCast or a PtrToInt cast instruction.
514 static CastInst *CreatePointerCast(
515 Value *S, ///< The pointer value to be casted (operand 0)
516 Type *Ty, ///< The type to which cast should be made
517 const Twine &Name = "", ///< Name for the instruction
518 Instruction *InsertBefore = nullptr ///< Place to insert the instruction
519 );
520
521 /// Create a BitCast or an AddrSpaceCast cast instruction.
522 static CastInst *CreatePointerBitCastOrAddrSpaceCast(
523 Value *S, ///< The pointer value to be casted (operand 0)
524 Type *Ty, ///< The type to which operand is casted
525 const Twine &Name, ///< The name for the instruction
526 BasicBlock *InsertAtEnd ///< The block to insert the instruction into
527 );
528
529 /// Create a BitCast or an AddrSpaceCast cast instruction.
530 static CastInst *CreatePointerBitCastOrAddrSpaceCast(
531 Value *S, ///< The pointer value to be casted (operand 0)
532 Type *Ty, ///< The type to which cast should be made
533 const Twine &Name = "", ///< Name for the instruction
534 Instruction *InsertBefore = nullptr ///< Place to insert the instruction
535 );
536
537 /// Create a BitCast, a PtrToInt, or an IntToPTr cast instruction.
538 ///
539 /// If the value is a pointer type and the destination an integer type,
540 /// creates a PtrToInt cast. If the value is an integer type and the
541 /// destination a pointer type, creates an IntToPtr cast. Otherwise, creates
542 /// a bitcast.
543 static CastInst *CreateBitOrPointerCast(
544 Value *S, ///< The pointer value to be casted (operand 0)
545 Type *Ty, ///< The type to which cast should be made
546 const Twine &Name = "", ///< Name for the instruction
547 Instruction *InsertBefore = nullptr ///< Place to insert the instruction
548 );
549
550 /// Create a ZExt, BitCast, or Trunc for int -> int casts.
551 static CastInst *CreateIntegerCast(
552 Value *S, ///< The pointer value to be casted (operand 0)
553 Type *Ty, ///< The type to which cast should be made
554 bool isSigned, ///< Whether to regard S as signed or not
555 const Twine &Name = "", ///< Name for the instruction
556 Instruction *InsertBefore = nullptr ///< Place to insert the instruction
557 );
558
559 /// Create a ZExt, BitCast, or Trunc for int -> int casts.
560 static CastInst *CreateIntegerCast(
561 Value *S, ///< The integer value to be casted (operand 0)
562 Type *Ty, ///< The integer type to which operand is casted
563 bool isSigned, ///< Whether to regard S as signed or not
564 const Twine &Name, ///< The name for the instruction
565 BasicBlock *InsertAtEnd ///< The block to insert the instruction into
566 );
567
568 /// Create an FPExt, BitCast, or FPTrunc for fp -> fp casts
569 static CastInst *CreateFPCast(
570 Value *S, ///< The floating point value to be casted
571 Type *Ty, ///< The floating point type to cast to
572 const Twine &Name = "", ///< Name for the instruction
573 Instruction *InsertBefore = nullptr ///< Place to insert the instruction
574 );
575
576 /// Create an FPExt, BitCast, or FPTrunc for fp -> fp casts
577 static CastInst *CreateFPCast(
578 Value *S, ///< The floating point value to be casted
579 Type *Ty, ///< The floating point type to cast to
580 const Twine &Name, ///< The name for the instruction
581 BasicBlock *InsertAtEnd ///< The block to insert the instruction into
582 );
583
584 /// Create a Trunc or BitCast cast instruction
585 static CastInst *CreateTruncOrBitCast(
586 Value *S, ///< The value to be casted (operand 0)
587 Type *Ty, ///< The type to which cast should be made
588 const Twine &Name = "", ///< Name for the instruction
589 Instruction *InsertBefore = nullptr ///< Place to insert the instruction
590 );
591
592 /// Create a Trunc or BitCast cast instruction
593 static CastInst *CreateTruncOrBitCast(
594 Value *S, ///< The value to be casted (operand 0)
595 Type *Ty, ///< The type to which operand is casted
596 const Twine &Name, ///< The name for the instruction
597 BasicBlock *InsertAtEnd ///< The block to insert the instruction into
598 );
599
600 /// Check whether a bitcast between these types is valid
601 static bool isBitCastable(
602 Type *SrcTy, ///< The Type from which the value should be cast.
603 Type *DestTy ///< The Type to which the value should be cast.
604 );
605
606 /// Check whether a bitcast, inttoptr, or ptrtoint cast between these
607 /// types is valid and a no-op.
608 ///
609 /// This ensures that any pointer<->integer cast has enough bits in the
610 /// integer and any other cast is a bitcast.
611 static bool isBitOrNoopPointerCastable(
612 Type *SrcTy, ///< The Type from which the value should be cast.
613 Type *DestTy, ///< The Type to which the value should be cast.
614 const DataLayout &DL);
615
616 /// Returns the opcode necessary to cast Val into Ty using usual casting
617 /// rules.
618 /// Infer the opcode for cast operand and type
619 static Instruction::CastOps getCastOpcode(
620 const Value *Val, ///< The value to cast
621 bool SrcIsSigned, ///< Whether to treat the source as signed
622 Type *Ty, ///< The Type to which the value should be casted
623 bool DstIsSigned ///< Whether to treate the dest. as signed
624 );
625
626 /// There are several places where we need to know if a cast instruction
627 /// only deals with integer source and destination types. To simplify that
628 /// logic, this method is provided.
629 /// @returns true iff the cast has only integral typed operand and dest type.
630 /// Determine if this is an integer-only cast.
631 bool isIntegerCast() const;
632
633 /// A lossless cast is one that does not alter the basic value. It implies
634 /// a no-op cast but is more stringent, preventing things like int->float,
635 /// long->double, or int->ptr.
636 /// @returns true iff the cast is lossless.
637 /// Determine if this is a lossless cast.
638 bool isLosslessCast() const;
639
640 /// A no-op cast is one that can be effected without changing any bits.
641 /// It implies that the source and destination types are the same size. The
642 /// DataLayout argument is to determine the pointer size when examining casts
643 /// involving Integer and Pointer types. They are no-op casts if the integer
644 /// is the same size as the pointer. However, pointer size varies with
645 /// platform. Note that a precondition of this method is that the cast is
646 /// legal - i.e. the instruction formed with these operands would verify.
647 static bool isNoopCast(
648 Instruction::CastOps Opcode, ///< Opcode of cast
649 Type *SrcTy, ///< SrcTy of cast
650 Type *DstTy, ///< DstTy of cast
651 const DataLayout &DL ///< DataLayout to get the Int Ptr type from.
652 );
653
654 /// Determine if this cast is a no-op cast.
655 ///
656 /// \param DL is the DataLayout to determine pointer size.
657 bool isNoopCast(const DataLayout &DL) const;
658
659 /// Determine how a pair of casts can be eliminated, if they can be at all.
660 /// This is a helper function for both CastInst and ConstantExpr.
661 /// @returns 0 if the CastInst pair can't be eliminated, otherwise
662 /// returns Instruction::CastOps value for a cast that can replace
663 /// the pair, casting SrcTy to DstTy.
664 /// Determine if a cast pair is eliminable
665 static unsigned isEliminableCastPair(
666 Instruction::CastOps firstOpcode, ///< Opcode of first cast
667 Instruction::CastOps secondOpcode, ///< Opcode of second cast
668 Type *SrcTy, ///< SrcTy of 1st cast
669 Type *MidTy, ///< DstTy of 1st cast & SrcTy of 2nd cast
670 Type *DstTy, ///< DstTy of 2nd cast
671 Type *SrcIntPtrTy, ///< Integer type corresponding to Ptr SrcTy, or null
672 Type *MidIntPtrTy, ///< Integer type corresponding to Ptr MidTy, or null
673 Type *DstIntPtrTy ///< Integer type corresponding to Ptr DstTy, or null
674 );
675
676 /// Return the opcode of this CastInst
677 Instruction::CastOps getOpcode() const {
678 return Instruction::CastOps(Instruction::getOpcode());
679 }
680
681 /// Return the source type, as a convenience
682 Type* getSrcTy() const { return getOperand(0)->getType(); }
683 /// Return the destination type, as a convenience
684 Type* getDestTy() const { return getType(); }
685
686 /// This method can be used to determine if a cast from SrcTy to DstTy using
687 /// Opcode op is valid or not.
688 /// @returns true iff the proposed cast is valid.
689 /// Determine if a cast is valid without creating one.
690 static bool castIsValid(Instruction::CastOps op, Type *SrcTy, Type *DstTy);
691 static bool castIsValid(Instruction::CastOps op, Value *S, Type *DstTy) {
692 return castIsValid(op, S->getType(), DstTy);
693 }
694
695 /// Methods for support type inquiry through isa, cast, and dyn_cast:
696 static bool classof(const Instruction *I) {
697 return I->isCast();
698 }
699 static bool classof(const Value *V) {
700 return isa<Instruction>(V) && classof(cast<Instruction>(V));
701 }
702};
703
704//===----------------------------------------------------------------------===//
705// CmpInst Class
706//===----------------------------------------------------------------------===//
707
708/// This class is the base class for the comparison instructions.
709/// Abstract base class of comparison instructions.
710class CmpInst : public Instruction {
711public:
712 /// This enumeration lists the possible predicates for CmpInst subclasses.
713 /// Values in the range 0-31 are reserved for FCmpInst, while values in the
714 /// range 32-64 are reserved for ICmpInst. This is necessary to ensure the
715 /// predicate values are not overlapping between the classes.
716 ///
717 /// Some passes (e.g. InstCombine) depend on the bit-wise characteristics of
718 /// FCMP_* values. Changing the bit patterns requires a potential change to
719 /// those passes.
720 enum Predicate : unsigned {
721 // Opcode U L G E Intuitive operation
722 FCMP_FALSE = 0, ///< 0 0 0 0 Always false (always folded)
723 FCMP_OEQ = 1, ///< 0 0 0 1 True if ordered and equal
724 FCMP_OGT = 2, ///< 0 0 1 0 True if ordered and greater than
725 FCMP_OGE = 3, ///< 0 0 1 1 True if ordered and greater than or equal
726 FCMP_OLT = 4, ///< 0 1 0 0 True if ordered and less than
727 FCMP_OLE = 5, ///< 0 1 0 1 True if ordered and less than or equal
728 FCMP_ONE = 6, ///< 0 1 1 0 True if ordered and operands are unequal
729 FCMP_ORD = 7, ///< 0 1 1 1 True if ordered (no nans)
730 FCMP_UNO = 8, ///< 1 0 0 0 True if unordered: isnan(X) | isnan(Y)
731 FCMP_UEQ = 9, ///< 1 0 0 1 True if unordered or equal
732 FCMP_UGT = 10, ///< 1 0 1 0 True if unordered or greater than
733 FCMP_UGE = 11, ///< 1 0 1 1 True if unordered, greater than, or equal
734 FCMP_ULT = 12, ///< 1 1 0 0 True if unordered or less than
735 FCMP_ULE = 13, ///< 1 1 0 1 True if unordered, less than, or equal
736 FCMP_UNE = 14, ///< 1 1 1 0 True if unordered or not equal
737 FCMP_TRUE = 15, ///< 1 1 1 1 Always true (always folded)
738 FIRST_FCMP_PREDICATE = FCMP_FALSE,
739 LAST_FCMP_PREDICATE = FCMP_TRUE,
740 BAD_FCMP_PREDICATE = FCMP_TRUE + 1,
741 ICMP_EQ = 32, ///< equal
742 ICMP_NE = 33, ///< not equal
743 ICMP_UGT = 34, ///< unsigned greater than
744 ICMP_UGE = 35, ///< unsigned greater or equal
745 ICMP_ULT = 36, ///< unsigned less than
746 ICMP_ULE = 37, ///< unsigned less or equal
747 ICMP_SGT = 38, ///< signed greater than
748 ICMP_SGE = 39, ///< signed greater or equal
749 ICMP_SLT = 40, ///< signed less than
750 ICMP_SLE = 41, ///< signed less or equal
751 FIRST_ICMP_PREDICATE = ICMP_EQ,
752 LAST_ICMP_PREDICATE = ICMP_SLE,
753 BAD_ICMP_PREDICATE = ICMP_SLE + 1
754 };
755 using PredicateField =
756 Bitfield::Element<Predicate, 0, 6, LAST_ICMP_PREDICATE>;
757
758protected:
759 CmpInst(Type *ty, Instruction::OtherOps op, Predicate pred,
760 Value *LHS, Value *RHS, const Twine &Name = "",
761 Instruction *InsertBefore = nullptr,
762 Instruction *FlagsSource = nullptr);
763
764 CmpInst(Type *ty, Instruction::OtherOps op, Predicate pred,
765 Value *LHS, Value *RHS, const Twine &Name,
766 BasicBlock *InsertAtEnd);
767
768public:
769 // allocate space for exactly two operands
770 void *operator new(size_t S) { return User::operator new(S, 2); }
771 void operator delete(void *Ptr) { User::operator delete(Ptr); }
772
773 /// Construct a compare instruction, given the opcode, the predicate and
774 /// the two operands. Optionally (if InstBefore is specified) insert the
775 /// instruction into a BasicBlock right before the specified instruction.
776 /// The specified Instruction is allowed to be a dereferenced end iterator.
777 /// Create a CmpInst
778 static CmpInst *Create(OtherOps Op,
779 Predicate predicate, Value *S1,
780 Value *S2, const Twine &Name = "",
781 Instruction *InsertBefore = nullptr);
782
783 /// Construct a compare instruction, given the opcode, the predicate and the
784 /// two operands. Also automatically insert this instruction to the end of
785 /// the BasicBlock specified.
786 /// Create a CmpInst
787 static CmpInst *Create(OtherOps Op, Predicate predicate, Value *S1,
788 Value *S2, const Twine &Name, BasicBlock *InsertAtEnd);
789
790 /// Get the opcode casted to the right type
791 OtherOps getOpcode() const {
792 return static_cast<OtherOps>(Instruction::getOpcode());
793 }
794
795 /// Return the predicate for this instruction.
796 Predicate getPredicate() const { return getSubclassData<PredicateField>(); }
797
798 /// Set the predicate for this instruction to the specified value.
799 void setPredicate(Predicate P) { setSubclassData<PredicateField>(P); }
800
801 static bool isFPPredicate(Predicate P) {
802 static_assert(FIRST_FCMP_PREDICATE == 0,
803 "FIRST_FCMP_PREDICATE is required to be 0");
804 return P <= LAST_FCMP_PREDICATE;
805 }
806
807 static bool isIntPredicate(Predicate P) {
808 return P >= FIRST_ICMP_PREDICATE && P <= LAST_ICMP_PREDICATE;
28
Assuming 'P' is < FIRST_ICMP_PREDICATE
29
Returning zero, which participates in a condition later
809 }
810
811 static StringRef getPredicateName(Predicate P);
812
813 bool isFPPredicate() const { return isFPPredicate(getPredicate()); }
814 bool isIntPredicate() const { return isIntPredicate(getPredicate()); }
815
816 /// For example, EQ -> NE, UGT -> ULE, SLT -> SGE,
817 /// OEQ -> UNE, UGT -> OLE, OLT -> UGE, etc.
818 /// @returns the inverse predicate for the instruction's current predicate.
819 /// Return the inverse of the instruction's predicate.
820 Predicate getInversePredicate() const {
821 return getInversePredicate(getPredicate());
822 }
823
824 /// For example, EQ -> NE, UGT -> ULE, SLT -> SGE,
825 /// OEQ -> UNE, UGT -> OLE, OLT -> UGE, etc.
826 /// @returns the inverse predicate for predicate provided in \p pred.
827 /// Return the inverse of a given predicate
828 static Predicate getInversePredicate(Predicate pred);
829
830 /// For example, EQ->EQ, SLE->SGE, ULT->UGT,
831 /// OEQ->OEQ, ULE->UGE, OLT->OGT, etc.
832 /// @returns the predicate that would be the result of exchanging the two
833 /// operands of the CmpInst instruction without changing the result
834 /// produced.
835 /// Return the predicate as if the operands were swapped
836 Predicate getSwappedPredicate() const {
837 return getSwappedPredicate(getPredicate());
838 }
839
840 /// This is a static version that you can use without an instruction
841 /// available.
842 /// Return the predicate as if the operands were swapped.
843 static Predicate getSwappedPredicate(Predicate pred);
844
845 /// This is a static version that you can use without an instruction
846 /// available.
847 /// @returns true if the comparison predicate is strict, false otherwise.
848 static bool isStrictPredicate(Predicate predicate);
849
850 /// @returns true if the comparison predicate is strict, false otherwise.
851 /// Determine if this instruction is using an strict comparison predicate.
852 bool isStrictPredicate() const { return isStrictPredicate(getPredicate()); }
853
854 /// This is a static version that you can use without an instruction
855 /// available.
856 /// @returns true if the comparison predicate is non-strict, false otherwise.
857 static bool isNonStrictPredicate(Predicate predicate);
858
859 /// @returns true if the comparison predicate is non-strict, false otherwise.
860 /// Determine if this instruction is using an non-strict comparison predicate.
861 bool isNonStrictPredicate() const {
862 return isNonStrictPredicate(getPredicate());
863 }
864
865 /// For example, SGE -> SGT, SLE -> SLT, ULE -> ULT, UGE -> UGT.
866 /// Returns the strict version of non-strict comparisons.
867 Predicate getStrictPredicate() const {
868 return getStrictPredicate(getPredicate());
869 }
870
871 /// This is a static version that you can use without an instruction
872 /// available.
873 /// @returns the strict version of comparison provided in \p pred.
874 /// If \p pred is not a strict comparison predicate, returns \p pred.
875 /// Returns the strict version of non-strict comparisons.
876 static Predicate getStrictPredicate(Predicate pred);
877
878 /// For example, SGT -> SGE, SLT -> SLE, ULT -> ULE, UGT -> UGE.
879 /// Returns the non-strict version of strict comparisons.
880 Predicate getNonStrictPredicate() const {
881 return getNonStrictPredicate(getPredicate());
882 }
883
884 /// This is a static version that you can use without an instruction
885 /// available.
886 /// @returns the non-strict version of comparison provided in \p pred.
887 /// If \p pred is not a strict comparison predicate, returns \p pred.
888 /// Returns the non-strict version of strict comparisons.
889 static Predicate getNonStrictPredicate(Predicate pred);
890
891 /// This is a static version that you can use without an instruction
892 /// available.
893 /// Return the flipped strictness of predicate
894 static Predicate getFlippedStrictnessPredicate(Predicate pred);
895
896 /// For predicate of kind "is X or equal to 0" returns the predicate "is X".
897 /// For predicate of kind "is X" returns the predicate "is X or equal to 0".
898 /// does not support other kind of predicates.
899 /// @returns the predicate that does not contains is equal to zero if
900 /// it had and vice versa.
901 /// Return the flipped strictness of predicate
902 Predicate getFlippedStrictnessPredicate() const {
903 return getFlippedStrictnessPredicate(getPredicate());
904 }
905
906 /// Provide more efficient getOperand methods.
907 DECLARE_TRANSPARENT_OPERAND_ACCESSORS(Value)public: inline Value *getOperand(unsigned) const; inline void
setOperand(unsigned, Value*); inline op_iterator op_begin();
inline const_op_iterator op_begin() const; inline op_iterator
op_end(); inline const_op_iterator op_end() const; protected
: template <int> inline Use &Op(); template <int
> inline const Use &Op() const; public: inline unsigned
getNumOperands() const
;
908
909 /// This is just a convenience that dispatches to the subclasses.
910 /// Swap the operands and adjust predicate accordingly to retain
911 /// the same comparison.
912 void swapOperands();
913
914 /// This is just a convenience that dispatches to the subclasses.
915 /// Determine if this CmpInst is commutative.
916 bool isCommutative() const;
917
918 /// Determine if this is an equals/not equals predicate.
919 /// This is a static version that you can use without an instruction
920 /// available.
921 static bool isEquality(Predicate pred);
922
923 /// Determine if this is an equals/not equals predicate.
924 bool isEquality() const { return isEquality(getPredicate()); }
925
926 /// Return true if the predicate is relational (not EQ or NE).
927 static bool isRelational(Predicate P) { return !isEquality(P); }
928
929 /// Return true if the predicate is relational (not EQ or NE).
930 bool isRelational() const { return !isEquality(); }
931
932 /// @returns true if the comparison is signed, false otherwise.
933 /// Determine if this instruction is using a signed comparison.
934 bool isSigned() const {
935 return isSigned(getPredicate());
936 }
937
938 /// @returns true if the comparison is unsigned, false otherwise.
939 /// Determine if this instruction is using an unsigned comparison.
940 bool isUnsigned() const {
941 return isUnsigned(getPredicate());
942 }
943
944 /// For example, ULT->SLT, ULE->SLE, UGT->SGT, UGE->SGE, SLT->Failed assert
945 /// @returns the signed version of the unsigned predicate pred.
946 /// return the signed version of a predicate
947 static Predicate getSignedPredicate(Predicate pred);
948
949 /// For example, ULT->SLT, ULE->SLE, UGT->SGT, UGE->SGE, SLT->Failed assert
950 /// @returns the signed version of the predicate for this instruction (which
951 /// has to be an unsigned predicate).
952 /// return the signed version of a predicate
953 Predicate getSignedPredicate() {
954 return getSignedPredicate(getPredicate());
955 }
956
957 /// For example, SLT->ULT, SLE->ULE, SGT->UGT, SGE->UGE, ULT->Failed assert
958 /// @returns the unsigned version of the signed predicate pred.
959 static Predicate getUnsignedPredicate(Predicate pred);
960
961 /// For example, SLT->ULT, SLE->ULE, SGT->UGT, SGE->UGE, ULT->Failed assert
962 /// @returns the unsigned version of the predicate for this instruction (which
963 /// has to be an signed predicate).
964 /// return the unsigned version of a predicate
965 Predicate getUnsignedPredicate() {
966 return getUnsignedPredicate(getPredicate());
967 }
968
969 /// For example, SLT->ULT, ULT->SLT, SLE->ULE, ULE->SLE, EQ->Failed assert
970 /// @returns the unsigned version of the signed predicate pred or
971 /// the signed version of the signed predicate pred.
972 static Predicate getFlippedSignednessPredicate(Predicate pred);
973
974 /// For example, SLT->ULT, ULT->SLT, SLE->ULE, ULE->SLE, EQ->Failed assert
975 /// @returns the unsigned version of the signed predicate pred or
976 /// the signed version of the signed predicate pred.
977 Predicate getFlippedSignednessPredicate() {
978 return getFlippedSignednessPredicate(getPredicate());
979 }
980
981 /// This is just a convenience.
982 /// Determine if this is true when both operands are the same.
983 bool isTrueWhenEqual() const {
984 return isTrueWhenEqual(getPredicate());
985 }
986
987 /// This is just a convenience.
988 /// Determine if this is false when both operands are the same.
989 bool isFalseWhenEqual() const {
990 return isFalseWhenEqual(getPredicate());
991 }
992
993 /// @returns true if the predicate is unsigned, false otherwise.
994 /// Determine if the predicate is an unsigned operation.
995 static bool isUnsigned(Predicate predicate);
996
997 /// @returns true if the predicate is signed, false otherwise.
998 /// Determine if the predicate is an signed operation.
999 static bool isSigned(Predicate predicate);
1000
1001 /// Determine if the predicate is an ordered operation.
1002 static bool isOrdered(Predicate predicate);
1003
1004 /// Determine if the predicate is an unordered operation.
1005 static bool isUnordered(Predicate predicate);
1006
1007 /// Determine if the predicate is true when comparing a value with itself.
1008 static bool isTrueWhenEqual(Predicate predicate);
1009
1010 /// Determine if the predicate is false when comparing a value with itself.
1011 static bool isFalseWhenEqual(Predicate predicate);
1012
1013 /// Determine if Pred1 implies Pred2 is true when two compares have matching
1014 /// operands.
1015 static bool isImpliedTrueByMatchingCmp(Predicate Pred1, Predicate Pred2);
1016
1017 /// Determine if Pred1 implies Pred2 is false when two compares have matching
1018 /// operands.
1019 static bool isImpliedFalseByMatchingCmp(Predicate Pred1, Predicate Pred2);
1020
1021 /// Methods for support type inquiry through isa, cast, and dyn_cast:
1022 static bool classof(const Instruction *I) {
1023 return I->getOpcode() == Instruction::ICmp ||
1024 I->getOpcode() == Instruction::FCmp;
1025 }
1026 static bool classof(const Value *V) {
1027 return isa<Instruction>(V) && classof(cast<Instruction>(V));
1028 }
1029
1030 /// Create a result type for fcmp/icmp
1031 static Type* makeCmpResultType(Type* opnd_type) {
1032 if (VectorType* vt = dyn_cast<VectorType>(opnd_type)) {
1033 return VectorType::get(Type::getInt1Ty(opnd_type->getContext()),
1034 vt->getElementCount());
1035 }
1036 return Type::getInt1Ty(opnd_type->getContext());
1037 }
1038
1039private:
1040 // Shadow Value::setValueSubclassData with a private forwarding method so that
1041 // subclasses cannot accidentally use it.
1042 void setValueSubclassData(unsigned short D) {
1043 Value::setValueSubclassData(D);
1044 }
1045};
1046
1047// FIXME: these are redundant if CmpInst < BinaryOperator
1048template <>
1049struct OperandTraits<CmpInst> : public FixedNumOperandTraits<CmpInst, 2> {
1050};
1051
1052DEFINE_TRANSPARENT_OPERAND_ACCESSORS(CmpInst, Value)CmpInst::op_iterator CmpInst::op_begin() { return OperandTraits
<CmpInst>::op_begin(this); } CmpInst::const_op_iterator
CmpInst::op_begin() const { return OperandTraits<CmpInst>
::op_begin(const_cast<CmpInst*>(this)); } CmpInst::op_iterator
CmpInst::op_end() { return OperandTraits<CmpInst>::op_end
(this); } CmpInst::const_op_iterator CmpInst::op_end() const {
return OperandTraits<CmpInst>::op_end(const_cast<CmpInst
*>(this)); } Value *CmpInst::getOperand(unsigned i_nocapture
) const { ((void)0); return cast_or_null<Value>( OperandTraits
<CmpInst>::op_begin(const_cast<CmpInst*>(this))[i_nocapture
].get()); } void CmpInst::setOperand(unsigned i_nocapture, Value
*Val_nocapture) { ((void)0); OperandTraits<CmpInst>::op_begin
(this)[i_nocapture] = Val_nocapture; } unsigned CmpInst::getNumOperands
() const { return OperandTraits<CmpInst>::operands(this
); } template <int Idx_nocapture> Use &CmpInst::Op(
) { return this->OpFrom<Idx_nocapture>(this); } template
<int Idx_nocapture> const Use &CmpInst::Op() const
{ return this->OpFrom<Idx_nocapture>(this); }
1053
1054/// A lightweight accessor for an operand bundle meant to be passed
1055/// around by value.
1056struct OperandBundleUse {
1057 ArrayRef<Use> Inputs;
1058
1059 OperandBundleUse() = default;
1060 explicit OperandBundleUse(StringMapEntry<uint32_t> *Tag, ArrayRef<Use> Inputs)
1061 : Inputs(Inputs), Tag(Tag) {}
1062
1063 /// Return true if the operand at index \p Idx in this operand bundle
1064 /// has the attribute A.
1065 bool operandHasAttr(unsigned Idx, Attribute::AttrKind A) const {
1066 if (isDeoptOperandBundle())
1067 if (A == Attribute::ReadOnly || A == Attribute::NoCapture)
1068 return Inputs[Idx]->getType()->isPointerTy();
1069
1070 // Conservative answer: no operands have any attributes.
1071 return false;
1072 }
1073
1074 /// Return the tag of this operand bundle as a string.
1075 StringRef getTagName() const {
1076 return Tag->getKey();
1077 }
1078
1079 /// Return the tag of this operand bundle as an integer.
1080 ///
1081 /// Operand bundle tags are interned by LLVMContextImpl::getOrInsertBundleTag,
1082 /// and this function returns the unique integer getOrInsertBundleTag
1083 /// associated the tag of this operand bundle to.
1084 uint32_t getTagID() const {
1085 return Tag->getValue();
1086 }
1087
1088 /// Return true if this is a "deopt" operand bundle.
1089 bool isDeoptOperandBundle() const {
1090 return getTagID() == LLVMContext::OB_deopt;
1091 }
1092
1093 /// Return true if this is a "funclet" operand bundle.
1094 bool isFuncletOperandBundle() const {
1095 return getTagID() == LLVMContext::OB_funclet;
1096 }
1097
1098 /// Return true if this is a "cfguardtarget" operand bundle.
1099 bool isCFGuardTargetOperandBundle() const {
1100 return getTagID() == LLVMContext::OB_cfguardtarget;
1101 }
1102
1103private:
1104 /// Pointer to an entry in LLVMContextImpl::getOrInsertBundleTag.
1105 StringMapEntry<uint32_t> *Tag;
1106};
1107
1108/// A container for an operand bundle being viewed as a set of values
1109/// rather than a set of uses.
1110///
1111/// Unlike OperandBundleUse, OperandBundleDefT owns the memory it carries, and
1112/// so it is possible to create and pass around "self-contained" instances of
1113/// OperandBundleDef and ConstOperandBundleDef.
1114template <typename InputTy> class OperandBundleDefT {
1115 std::string Tag;
1116 std::vector<InputTy> Inputs;
1117
1118public:
1119 explicit OperandBundleDefT(std::string Tag, std::vector<InputTy> Inputs)
1120 : Tag(std::move(Tag)), Inputs(std::move(Inputs)) {}
1121 explicit OperandBundleDefT(std::string Tag, ArrayRef<InputTy> Inputs)
1122 : Tag(std::move(Tag)), Inputs(Inputs) {}
1123
1124 explicit OperandBundleDefT(const OperandBundleUse &OBU) {
1125 Tag = std::string(OBU.getTagName());
1126 llvm::append_range(Inputs, OBU.Inputs);
1127 }
1128
1129 ArrayRef<InputTy> inputs() const { return Inputs; }
1130
1131 using input_iterator = typename std::vector<InputTy>::const_iterator;
1132
1133 size_t input_size() const { return Inputs.size(); }
1134 input_iterator input_begin() const { return Inputs.begin(); }
1135 input_iterator input_end() const { return Inputs.end(); }
1136
1137 StringRef getTag() const { return Tag; }
1138};
1139
1140using OperandBundleDef = OperandBundleDefT<Value *>;
1141using ConstOperandBundleDef = OperandBundleDefT<const Value *>;
1142
1143//===----------------------------------------------------------------------===//
1144// CallBase Class
1145//===----------------------------------------------------------------------===//
1146
1147/// Base class for all callable instructions (InvokeInst and CallInst)
1148/// Holds everything related to calling a function.
1149///
1150/// All call-like instructions are required to use a common operand layout:
1151/// - Zero or more arguments to the call,
1152/// - Zero or more operand bundles with zero or more operand inputs each
1153/// bundle,
1154/// - Zero or more subclass controlled operands
1155/// - The called function.
1156///
1157/// This allows this base class to easily access the called function and the
1158/// start of the arguments without knowing how many other operands a particular
1159/// subclass requires. Note that accessing the end of the argument list isn't
1160/// as cheap as most other operations on the base class.
1161class CallBase : public Instruction {
1162protected:
1163 // The first two bits are reserved by CallInst for fast retrieval,
1164 using CallInstReservedField = Bitfield::Element<unsigned, 0, 2>;
1165 using CallingConvField =
1166 Bitfield::Element<CallingConv::ID, CallInstReservedField::NextBit, 10,
1167 CallingConv::MaxID>;
1168 static_assert(
1169 Bitfield::areContiguous<CallInstReservedField, CallingConvField>(),
1170 "Bitfields must be contiguous");
1171
1172 /// The last operand is the called operand.
1173 static constexpr int CalledOperandOpEndIdx = -1;
1174
1175 AttributeList Attrs; ///< parameter attributes for callable
1176 FunctionType *FTy;
1177
1178 template <class... ArgsTy>
1179 CallBase(AttributeList const &A, FunctionType *FT, ArgsTy &&... Args)
1180 : Instruction(std::forward<ArgsTy>(Args)...), Attrs(A), FTy(FT) {}
1181
1182 using Instruction::Instruction;
1183
1184 bool hasDescriptor() const { return Value::HasDescriptor; }
1185
1186 unsigned getNumSubclassExtraOperands() const {
1187 switch (getOpcode()) {
1188 case Instruction::Call:
1189 return 0;
1190 case Instruction::Invoke:
1191 return 2;
1192 case Instruction::CallBr:
1193 return getNumSubclassExtraOperandsDynamic();
1194 }
1195 llvm_unreachable("Invalid opcode!")__builtin_unreachable();
1196 }
1197
1198 /// Get the number of extra operands for instructions that don't have a fixed
1199 /// number of extra operands.
1200 unsigned getNumSubclassExtraOperandsDynamic() const;
1201
1202public:
1203 using Instruction::getContext;
1204
1205 /// Create a clone of \p CB with a different set of operand bundles and
1206 /// insert it before \p InsertPt.
1207 ///
1208 /// The returned call instruction is identical \p CB in every way except that
1209 /// the operand bundles for the new instruction are set to the operand bundles
1210 /// in \p Bundles.
1211 static CallBase *Create(CallBase *CB, ArrayRef<OperandBundleDef> Bundles,
1212 Instruction *InsertPt = nullptr);
1213
1214 /// Create a clone of \p CB with the operand bundle with the tag matching
1215 /// \p Bundle's tag replaced with Bundle, and insert it before \p InsertPt.
1216 ///
1217 /// The returned call instruction is identical \p CI in every way except that
1218 /// the specified operand bundle has been replaced.
1219 static CallBase *Create(CallBase *CB,
1220 OperandBundleDef Bundle,
1221 Instruction *InsertPt = nullptr);
1222
1223 /// Create a clone of \p CB with operand bundle \p OB added.
1224 static CallBase *addOperandBundle(CallBase *CB, uint32_t ID,
1225 OperandBundleDef OB,
1226 Instruction *InsertPt = nullptr);
1227
1228 /// Create a clone of \p CB with operand bundle \p ID removed.
1229 static CallBase *removeOperandBundle(CallBase *CB, uint32_t ID,
1230 Instruction *InsertPt = nullptr);
1231
1232 static bool classof(const Instruction *I) {
1233 return I->getOpcode() == Instruction::Call ||
1234 I->getOpcode() == Instruction::Invoke ||
1235 I->getOpcode() == Instruction::CallBr;
1236 }
1237 static bool classof(const Value *V) {
1238 return isa<Instruction>(V) && classof(cast<Instruction>(V));
1239 }
1240
1241 FunctionType *getFunctionType() const { return FTy; }
1242
1243 void mutateFunctionType(FunctionType *FTy) {
1244 Value::mutateType(FTy->getReturnType());
1245 this->FTy = FTy;
1246 }
1247
1248 DECLARE_TRANSPARENT_OPERAND_ACCESSORS(Value)public: inline Value *getOperand(unsigned) const; inline void
setOperand(unsigned, Value*); inline op_iterator op_begin();
inline const_op_iterator op_begin() const; inline op_iterator
op_end(); inline const_op_iterator op_end() const; protected
: template <int> inline Use &Op(); template <int
> inline const Use &Op() const; public: inline unsigned
getNumOperands() const
;
1249
1250 /// data_operands_begin/data_operands_end - Return iterators iterating over
1251 /// the call / invoke argument list and bundle operands. For invokes, this is
1252 /// the set of instruction operands except the invoke target and the two
1253 /// successor blocks; and for calls this is the set of instruction operands
1254 /// except the call target.
1255 User::op_iterator data_operands_begin() { return op_begin(); }
1256 User::const_op_iterator data_operands_begin() const {
1257 return const_cast<CallBase *>(this)->data_operands_begin();
1258 }
1259 User::op_iterator data_operands_end() {
1260 // Walk from the end of the operands over the called operand and any
1261 // subclass operands.
1262 return op_end() - getNumSubclassExtraOperands() - 1;
1263 }
1264 User::const_op_iterator data_operands_end() const {
1265 return const_cast<CallBase *>(this)->data_operands_end();
1266 }
1267 iterator_range<User::op_iterator> data_ops() {
1268 return make_range(data_operands_begin(), data_operands_end());
1269 }
1270 iterator_range<User::const_op_iterator> data_ops() const {
1271 return make_range(data_operands_begin(), data_operands_end());
1272 }
1273 bool data_operands_empty() const {
1274 return data_operands_end() == data_operands_begin();
1275 }
1276 unsigned data_operands_size() const {
1277 return std::distance(data_operands_begin(), data_operands_end());
1278 }
1279
1280 bool isDataOperand(const Use *U) const {
1281 assert(this == U->getUser() &&((void)0)
1282 "Only valid to query with a use of this instruction!")((void)0);
1283 return data_operands_begin() <= U && U < data_operands_end();
1284 }
1285 bool isDataOperand(Value::const_user_iterator UI) const {
1286 return isDataOperand(&UI.getUse());
1287 }
1288
1289 /// Given a value use iterator, return the data operand corresponding to it.
1290 /// Iterator must actually correspond to a data operand.
1291 unsigned getDataOperandNo(Value::const_user_iterator UI) const {
1292 return getDataOperandNo(&UI.getUse());
1293 }
1294
1295 /// Given a use for a data operand, get the data operand number that
1296 /// corresponds to it.
1297 unsigned getDataOperandNo(const Use *U) const {
1298 assert(isDataOperand(U) && "Data operand # out of range!")((void)0);
1299 return U - data_operands_begin();
1300 }
1301
1302 /// Return the iterator pointing to the beginning of the argument list.
1303 User::op_iterator arg_begin() { return op_begin(); }
1304 User::const_op_iterator arg_begin() const {
1305 return const_cast<CallBase *>(this)->arg_begin();
1306 }
1307
1308 /// Return the iterator pointing to the end of the argument list.
1309 User::op_iterator arg_end() {
1310 // From the end of the data operands, walk backwards past the bundle
1311 // operands.
1312 return data_operands_end() - getNumTotalBundleOperands();
1313 }
1314 User::const_op_iterator arg_end() const {
1315 return const_cast<CallBase *>(this)->arg_end();
1316 }
1317
1318 /// Iteration adapter for range-for loops.
1319 iterator_range<User::op_iterator> args() {
1320 return make_range(arg_begin(), arg_end());
1321 }
1322 iterator_range<User::const_op_iterator> args() const {
1323 return make_range(arg_begin(), arg_end());
1324 }
1325 bool arg_empty() const { return arg_end() == arg_begin(); }
1326 unsigned arg_size() const { return arg_end() - arg_begin(); }
1327
1328 // Legacy API names that duplicate the above and will be removed once users
1329 // are migrated.
1330 iterator_range<User::op_iterator> arg_operands() {
1331 return make_range(arg_begin(), arg_end());
1332 }
1333 iterator_range<User::const_op_iterator> arg_operands() const {
1334 return make_range(arg_begin(), arg_end());
1335 }
1336 unsigned getNumArgOperands() const { return arg_size(); }
1337
1338 Value *getArgOperand(unsigned i) const {
1339 assert(i < getNumArgOperands() && "Out of bounds!")((void)0);
1340 return getOperand(i);
1341 }
1342
1343 void setArgOperand(unsigned i, Value *v) {
1344 assert(i < getNumArgOperands() && "Out of bounds!")((void)0);
1345 setOperand(i, v);
1346 }
1347
1348 /// Wrappers for getting the \c Use of a call argument.
1349 const Use &getArgOperandUse(unsigned i) const {
1350 assert(i < getNumArgOperands() && "Out of bounds!")((void)0);
1351 return User::getOperandUse(i);
1352 }
1353 Use &getArgOperandUse(unsigned i) {
1354 assert(i < getNumArgOperands() && "Out of bounds!")((void)0);
1355 return User::getOperandUse(i);
1356 }
1357
1358 bool isArgOperand(const Use *U) const {
1359 assert(this == U->getUser() &&((void)0)
1360 "Only valid to query with a use of this instruction!")((void)0);
1361 return arg_begin() <= U && U < arg_end();
1362 }
1363 bool isArgOperand(Value::const_user_iterator UI) const {
1364 return isArgOperand(&UI.getUse());
1365 }
1366
1367 /// Given a use for a arg operand, get the arg operand number that
1368 /// corresponds to it.
1369 unsigned getArgOperandNo(const Use *U) const {
1370 assert(isArgOperand(U) && "Arg operand # out of range!")((void)0);
1371 return U - arg_begin();
1372 }
1373
1374 /// Given a value use iterator, return the arg operand number corresponding to
1375 /// it. Iterator must actually correspond to a data operand.
1376 unsigned getArgOperandNo(Value::const_user_iterator UI) const {
1377 return getArgOperandNo(&UI.getUse());
1378 }
1379
1380 /// Returns true if this CallSite passes the given Value* as an argument to
1381 /// the called function.
1382 bool hasArgument(const Value *V) const {
1383 return llvm::is_contained(args(), V);
1384 }
1385
1386 Value *getCalledOperand() const { return Op<CalledOperandOpEndIdx>(); }
1387
1388 const Use &getCalledOperandUse() const { return Op<CalledOperandOpEndIdx>(); }
1389 Use &getCalledOperandUse() { return Op<CalledOperandOpEndIdx>(); }
1390
1391 /// Returns the function called, or null if this is an
1392 /// indirect function invocation.
1393 Function *getCalledFunction() const {
1394 return dyn_cast_or_null<Function>(getCalledOperand());
1395 }
1396
1397 /// Return true if the callsite is an indirect call.
1398 bool isIndirectCall() const;
1399
1400 /// Determine whether the passed iterator points to the callee operand's Use.
1401 bool isCallee(Value::const_user_iterator UI) const {
1402 return isCallee(&UI.getUse());
1403 }
1404
1405 /// Determine whether this Use is the callee operand's Use.
1406 bool isCallee(const Use *U) const { return &getCalledOperandUse() == U; }
1407
1408 /// Helper to get the caller (the parent function).
1409 Function *getCaller();
1410 const Function *getCaller() const {
1411 return const_cast<CallBase *>(this)->getCaller();
1412 }
1413
1414 /// Tests if this call site must be tail call optimized. Only a CallInst can
1415 /// be tail call optimized.
1416 bool isMustTailCall() const;
1417
1418 /// Tests if this call site is marked as a tail call.
1419 bool isTailCall() const;
1420
1421 /// Returns the intrinsic ID of the intrinsic called or
1422 /// Intrinsic::not_intrinsic if the called function is not an intrinsic, or if
1423 /// this is an indirect call.
1424 Intrinsic::ID getIntrinsicID() const;
1425
1426 void setCalledOperand(Value *V) { Op<CalledOperandOpEndIdx>() = V; }
1427
1428 /// Sets the function called, including updating the function type.
1429 void setCalledFunction(Function *Fn) {
1430 setCalledFunction(Fn->getFunctionType(), Fn);
1431 }
1432
1433 /// Sets the function called, including updating the function type.
1434 void setCalledFunction(FunctionCallee Fn) {
1435 setCalledFunction(Fn.getFunctionType(), Fn.getCallee());
1436 }
1437
1438 /// Sets the function called, including updating to the specified function
1439 /// type.
1440 void setCalledFunction(FunctionType *FTy, Value *Fn) {
1441 this->FTy = FTy;
1442 assert(cast<PointerType>(Fn->getType())->isOpaqueOrPointeeTypeMatches(FTy))((void)0);
1443 // This function doesn't mutate the return type, only the function
1444 // type. Seems broken, but I'm just gonna stick an assert in for now.
1445 assert(getType() == FTy->getReturnType())((void)0);
1446 setCalledOperand(Fn);
1447 }
1448
1449 CallingConv::ID getCallingConv() const {
1450 return getSubclassData<CallingConvField>();
1451 }
1452
1453 void setCallingConv(CallingConv::ID CC) {
1454 setSubclassData<CallingConvField>(CC);
1455 }
1456
1457 /// Check if this call is an inline asm statement.
1458 bool isInlineAsm() const { return isa<InlineAsm>(getCalledOperand()); }
1459
1460 /// \name Attribute API
1461 ///
1462 /// These methods access and modify attributes on this call (including
1463 /// looking through to the attributes on the called function when necessary).
1464 ///@{
1465
1466 /// Return the parameter attributes for this call.
1467 ///
1468 AttributeList getAttributes() const { return Attrs; }
1469
1470 /// Set the parameter attributes for this call.
1471 ///
1472 void setAttributes(AttributeList A) { Attrs = A; }
1473
1474 /// Determine whether this call has the given attribute. If it does not
1475 /// then determine if the called function has the attribute, but only if
1476 /// the attribute is allowed for the call.
1477 bool hasFnAttr(Attribute::AttrKind Kind) const {
1478 assert(Kind != Attribute::NoBuiltin &&((void)0)
1479 "Use CallBase::isNoBuiltin() to check for Attribute::NoBuiltin")((void)0);
1480 return hasFnAttrImpl(Kind);
1481 }
1482
1483 /// Determine whether this call has the given attribute. If it does not
1484 /// then determine if the called function has the attribute, but only if
1485 /// the attribute is allowed for the call.
1486 bool hasFnAttr(StringRef Kind) const { return hasFnAttrImpl(Kind); }
1487
1488 /// adds the attribute to the list of attributes.
1489 void addAttribute(unsigned i, Attribute::AttrKind Kind) {
1490 AttributeList PAL = getAttributes();
1491 PAL = PAL.addAttribute(getContext(), i, Kind);
1492 setAttributes(PAL);
1493 }
1494
1495 /// adds the attribute to the list of attributes.
1496 void addAttribute(unsigned i, Attribute Attr) {
1497 AttributeList PAL = getAttributes();
1498 PAL = PAL.addAttribute(getContext(), i, Attr);
1499 setAttributes(PAL);
1500 }
1501
1502 /// Adds the attribute to the indicated argument
1503 void addParamAttr(unsigned ArgNo, Attribute::AttrKind Kind) {
1504 assert(ArgNo < getNumArgOperands() && "Out of bounds")((void)0);
1505 AttributeList PAL = getAttributes();
1506 PAL = PAL.addParamAttribute(getContext(), ArgNo, Kind);
1507 setAttributes(PAL);
1508 }
1509
1510 /// Adds the attribute to the indicated argument
1511 void addParamAttr(unsigned ArgNo, Attribute Attr) {
1512 assert(ArgNo < getNumArgOperands() && "Out of bounds")((void)0);
1513 AttributeList PAL = getAttributes();
1514 PAL = PAL.addParamAttribute(getContext(), ArgNo, Attr);
1515 setAttributes(PAL);
1516 }
1517
1518 /// removes the attribute from the list of attributes.
1519 void removeAttribute(unsigned i, Attribute::AttrKind Kind) {
1520 AttributeList PAL = getAttributes();
1521 PAL = PAL.removeAttribute(getContext(), i, Kind);
1522 setAttributes(PAL);
1523 }
1524
1525 /// removes the attribute from the list of attributes.
1526 void removeAttribute(unsigned i, StringRef Kind) {
1527 AttributeList PAL = getAttributes();
1528 PAL = PAL.removeAttribute(getContext(), i, Kind);
1529 setAttributes(PAL);
1530 }
1531
1532 void removeAttributes(unsigned i, const AttrBuilder &Attrs) {
1533 AttributeList PAL = getAttributes();
1534 PAL = PAL.removeAttributes(getContext(), i, Attrs);
1535 setAttributes(PAL);
1536 }
1537
1538 /// Removes the attribute from the given argument
1539 void removeParamAttr(unsigned ArgNo, Attribute::AttrKind Kind) {
1540 assert(ArgNo < getNumArgOperands() && "Out of bounds")((void)0);
1541 AttributeList PAL = getAttributes();
1542 PAL = PAL.removeParamAttribute(getContext(), ArgNo, Kind);
1543 setAttributes(PAL);
1544 }
1545
1546 /// Removes the attribute from the given argument
1547 void removeParamAttr(unsigned ArgNo, StringRef Kind) {
1548 assert(ArgNo < getNumArgOperands() && "Out of bounds")((void)0);
1549 AttributeList PAL = getAttributes();
1550 PAL = PAL.removeParamAttribute(getContext(), ArgNo, Kind);
1551 setAttributes(PAL);
1552 }
1553
1554 /// Removes the attributes from the given argument
1555 void removeParamAttrs(unsigned ArgNo, const AttrBuilder &Attrs) {
1556 AttributeList PAL = getAttributes();
1557 PAL = PAL.removeParamAttributes(getContext(), ArgNo, Attrs);
1558 setAttributes(PAL);
1559 }
1560
1561 /// adds the dereferenceable attribute to the list of attributes.
1562 void addDereferenceableAttr(unsigned i, uint64_t Bytes) {
1563 AttributeList PAL = getAttributes();
1564 PAL = PAL.addDereferenceableAttr(getContext(), i, Bytes);
1565 setAttributes(PAL);
1566 }
1567
1568 /// adds the dereferenceable_or_null attribute to the list of
1569 /// attributes.
1570 void addDereferenceableOrNullAttr(unsigned i, uint64_t Bytes) {
1571 AttributeList PAL = getAttributes();
1572 PAL = PAL.addDereferenceableOrNullAttr(getContext(), i, Bytes);
1573 setAttributes(PAL);
1574 }
1575
1576 /// Determine whether the return value has the given attribute.
1577 bool hasRetAttr(Attribute::AttrKind Kind) const {
1578 return hasRetAttrImpl(Kind);
1579 }
1580 /// Determine whether the return value has the given attribute.
1581 bool hasRetAttr(StringRef Kind) const { return hasRetAttrImpl(Kind); }
1582
1583 /// Determine whether the argument or parameter has the given attribute.
1584 bool paramHasAttr(unsigned ArgNo, Attribute::AttrKind Kind) const;
1585
1586 /// Get the attribute of a given kind at a position.
1587 Attribute getAttribute(unsigned i, Attribute::AttrKind Kind) const {
1588 return getAttributes().getAttribute(i, Kind);
1589 }
1590
1591 /// Get the attribute of a given kind at a position.
1592 Attribute getAttribute(unsigned i, StringRef Kind) const {
1593 return getAttributes().getAttribute(i, Kind);
1594 }
1595
1596 /// Get the attribute of a given kind from a given arg
1597 Attribute getParamAttr(unsigned ArgNo, Attribute::AttrKind Kind) const {
1598 assert(ArgNo < getNumArgOperands() && "Out of bounds")((void)0);
1599 return getAttributes().getParamAttr(ArgNo, Kind);
1600 }
1601
1602 /// Get the attribute of a given kind from a given arg
1603 Attribute getParamAttr(unsigned ArgNo, StringRef Kind) const {
1604 assert(ArgNo < getNumArgOperands() && "Out of bounds")((void)0);
1605 return getAttributes().getParamAttr(ArgNo, Kind);
1606 }
1607
1608 /// Return true if the data operand at index \p i has the attribute \p
1609 /// A.
1610 ///
1611 /// Data operands include call arguments and values used in operand bundles,
1612 /// but does not include the callee operand. This routine dispatches to the
1613 /// underlying AttributeList or the OperandBundleUser as appropriate.
1614 ///
1615 /// The index \p i is interpreted as
1616 ///
1617 /// \p i == Attribute::ReturnIndex -> the return value
1618 /// \p i in [1, arg_size + 1) -> argument number (\p i - 1)
1619 /// \p i in [arg_size + 1, data_operand_size + 1) -> bundle operand at index
1620 /// (\p i - 1) in the operand list.
1621 bool dataOperandHasImpliedAttr(unsigned i, Attribute::AttrKind Kind) const {
1622 // Note that we have to add one because `i` isn't zero-indexed.
1623 assert(i < (getNumArgOperands() + getNumTotalBundleOperands() + 1) &&((void)0)
1624 "Data operand index out of bounds!")((void)0);
1625
1626 // The attribute A can either be directly specified, if the operand in
1627 // question is a call argument; or be indirectly implied by the kind of its
1628 // containing operand bundle, if the operand is a bundle operand.
1629
1630 if (i == AttributeList::ReturnIndex)
1631 return hasRetAttr(Kind);
1632
1633 // FIXME: Avoid these i - 1 calculations and update the API to use
1634 // zero-based indices.
1635 if (i < (getNumArgOperands() + 1))
1636 return paramHasAttr(i - 1, Kind);
1637
1638 assert(hasOperandBundles() && i >= (getBundleOperandsStartIndex() + 1) &&((void)0)
1639 "Must be either a call argument or an operand bundle!")((void)0);
1640 return bundleOperandHasAttr(i - 1, Kind);
1641 }
1642
1643 /// Determine whether this data operand is not captured.
1644 // FIXME: Once this API is no longer duplicated in `CallSite`, rename this to
1645 // better indicate that this may return a conservative answer.
1646 bool doesNotCapture(unsigned OpNo) const {
1647 return dataOperandHasImpliedAttr(OpNo + 1, Attribute::NoCapture);
1648 }
1649
1650 /// Determine whether this argument is passed by value.
1651 bool isByValArgument(unsigned ArgNo) const {
1652 return paramHasAttr(ArgNo, Attribute::ByVal);
1653 }
1654
1655 /// Determine whether this argument is passed in an alloca.
1656 bool isInAllocaArgument(unsigned ArgNo) const {
1657 return paramHasAttr(ArgNo, Attribute::InAlloca);
1658 }
1659
1660 /// Determine whether this argument is passed by value, in an alloca, or is
1661 /// preallocated.
1662 bool isPassPointeeByValueArgument(unsigned ArgNo) const {
1663 return paramHasAttr(ArgNo, Attribute::ByVal) ||
1664 paramHasAttr(ArgNo, Attribute::InAlloca) ||
1665 paramHasAttr(ArgNo, Attribute::Preallocated);
1666 }
1667
1668 /// Determine whether passing undef to this argument is undefined behavior.
1669 /// If passing undef to this argument is UB, passing poison is UB as well
1670 /// because poison is more undefined than undef.
1671 bool isPassingUndefUB(unsigned ArgNo) const {
1672 return paramHasAttr(ArgNo, Attribute::NoUndef) ||
1673 // dereferenceable implies noundef.
1674 paramHasAttr(ArgNo, Attribute::Dereferenceable) ||
1675 // dereferenceable implies noundef, and null is a well-defined value.
1676 paramHasAttr(ArgNo, Attribute::DereferenceableOrNull);
1677 }
1678
1679 /// Determine if there are is an inalloca argument. Only the last argument can
1680 /// have the inalloca attribute.
1681 bool hasInAllocaArgument() const {
1682 return !arg_empty() && paramHasAttr(arg_size() - 1, Attribute::InAlloca);
1683 }
1684
1685 // FIXME: Once this API is no longer duplicated in `CallSite`, rename this to
1686 // better indicate that this may return a conservative answer.
1687 bool doesNotAccessMemory(unsigned OpNo) const {
1688 return dataOperandHasImpliedAttr(OpNo + 1, Attribute::ReadNone);
1689 }
1690
1691 // FIXME: Once this API is no longer duplicated in `CallSite`, rename this to
1692 // better indicate that this may return a conservative answer.
1693 bool onlyReadsMemory(unsigned OpNo) const {
1694 return dataOperandHasImpliedAttr(OpNo + 1, Attribute::ReadOnly) ||
1695 dataOperandHasImpliedAttr(OpNo + 1, Attribute::ReadNone);
1696 }
1697
1698 // FIXME: Once this API is no longer duplicated in `CallSite`, rename this to
1699 // better indicate that this may return a conservative answer.
1700 bool doesNotReadMemory(unsigned OpNo) const {
1701 return dataOperandHasImpliedAttr(OpNo + 1, Attribute::WriteOnly) ||
1702 dataOperandHasImpliedAttr(OpNo + 1, Attribute::ReadNone);
1703 }
1704
1705 /// Extract the alignment of the return value.
1706 MaybeAlign getRetAlign() const { return Attrs.getRetAlignment(); }
1707
1708 /// Extract the alignment for a call or parameter (0=unknown).
1709 MaybeAlign getParamAlign(unsigned ArgNo) const {
1710 return Attrs.getParamAlignment(ArgNo);
1711 }
1712
1713 MaybeAlign getParamStackAlign(unsigned ArgNo) const {
1714 return Attrs.getParamStackAlignment(ArgNo);
1715 }
1716
1717 /// Extract the byval type for a call or parameter.
1718 Type *getParamByValType(unsigned ArgNo) const {
1719 if (auto *Ty = Attrs.getParamByValType(ArgNo))
1720 return Ty;
1721 if (const Function *F = getCalledFunction())
1722 return F->getAttributes().getParamByValType(ArgNo);
1723 return nullptr;
1724 }
1725
1726 /// Extract the preallocated type for a call or parameter.
1727 Type *getParamPreallocatedType(unsigned ArgNo) const {
1728 if (auto *Ty = Attrs.getParamPreallocatedType(ArgNo))
1729 return Ty;
1730 if (const Function *F = getCalledFunction())
1731 return F->getAttributes().getParamPreallocatedType(ArgNo);
1732 return nullptr;
1733 }
1734
1735 /// Extract the preallocated type for a call or parameter.
1736 Type *getParamInAllocaType(unsigned ArgNo) const {
1737 if (auto *Ty = Attrs.getParamInAllocaType(ArgNo))
1738 return Ty;
1739 if (const Function *F = getCalledFunction())
1740 return F->getAttributes().getParamInAllocaType(ArgNo);
1741 return nullptr;
1742 }
1743
1744 /// Extract the number of dereferenceable bytes for a call or
1745 /// parameter (0=unknown).
1746 uint64_t getDereferenceableBytes(unsigned i) const {
1747 return Attrs.getDereferenceableBytes(i);
1748 }
1749
1750 /// Extract the number of dereferenceable_or_null bytes for a call or
1751 /// parameter (0=unknown).
1752 uint64_t getDereferenceableOrNullBytes(unsigned i) const {
1753 return Attrs.getDereferenceableOrNullBytes(i);
1754 }
1755
1756 /// Return true if the return value is known to be not null.
1757 /// This may be because it has the nonnull attribute, or because at least
1758 /// one byte is dereferenceable and the pointer is in addrspace(0).
1759 bool isReturnNonNull() const;
1760
1761 /// Determine if the return value is marked with NoAlias attribute.
1762 bool returnDoesNotAlias() const {
1763 return Attrs.hasAttribute(AttributeList::ReturnIndex, Attribute::NoAlias);
1764 }
1765
1766 /// If one of the arguments has the 'returned' attribute, returns its
1767 /// operand value. Otherwise, return nullptr.
1768 Value *getReturnedArgOperand() const;
1769
1770 /// Return true if the call should not be treated as a call to a
1771 /// builtin.
1772 bool isNoBuiltin() const {
1773 return hasFnAttrImpl(Attribute::NoBuiltin) &&
1774 !hasFnAttrImpl(Attribute::Builtin);
1775 }
1776
1777 /// Determine if the call requires strict floating point semantics.
1778 bool isStrictFP() const { return hasFnAttr(Attribute::StrictFP); }
1779
1780 /// Return true if the call should not be inlined.
1781 bool isNoInline() const { return hasFnAttr(Attribute::NoInline); }
1782 void setIsNoInline() {
1783 addAttribute(AttributeList::FunctionIndex, Attribute::NoInline);
1784 }
1785 /// Determine if the call does not access memory.
1786 bool doesNotAccessMemory() const { return hasFnAttr(Attribute::ReadNone); }
1787 void setDoesNotAccessMemory() {
1788 addAttribute(AttributeList::FunctionIndex, Attribute::ReadNone);
1789 }
1790
1791 /// Determine if the call does not access or only reads memory.
1792 bool onlyReadsMemory() const {
1793 return doesNotAccessMemory() || hasFnAttr(Attribute::ReadOnly);
1794 }
1795
1796 void setOnlyReadsMemory() {
1797 addAttribute(AttributeList::FunctionIndex, Attribute::ReadOnly);
1798 }
1799
1800 /// Determine if the call does not access or only writes memory.
1801 bool doesNotReadMemory() const {
1802 return doesNotAccessMemory() || hasFnAttr(Attribute::WriteOnly);
1803 }
1804 void setDoesNotReadMemory() {
1805 addAttribute(AttributeList::FunctionIndex, Attribute::WriteOnly);
1806 }
1807
1808 /// Determine if the call can access memmory only using pointers based
1809 /// on its arguments.
1810 bool onlyAccessesArgMemory() const {
1811 return hasFnAttr(Attribute::ArgMemOnly);
1812 }
1813 void setOnlyAccessesArgMemory() {
1814 addAttribute(AttributeList::FunctionIndex, Attribute::ArgMemOnly);
1815 }
1816
1817 /// Determine if the function may only access memory that is
1818 /// inaccessible from the IR.
1819 bool onlyAccessesInaccessibleMemory() const {
1820 return hasFnAttr(Attribute::InaccessibleMemOnly);
1821 }
1822 void setOnlyAccessesInaccessibleMemory() {
1823 addAttribute(AttributeList::FunctionIndex, Attribute::InaccessibleMemOnly);
1824 }
1825
1826 /// Determine if the function may only access memory that is
1827 /// either inaccessible from the IR or pointed to by its arguments.
1828 bool onlyAccessesInaccessibleMemOrArgMem() const {
1829 return hasFnAttr(Attribute::InaccessibleMemOrArgMemOnly);
1830 }
1831 void setOnlyAccessesInaccessibleMemOrArgMem() {
1832 addAttribute(AttributeList::FunctionIndex,
1833 Attribute::InaccessibleMemOrArgMemOnly);
1834 }
1835 /// Determine if the call cannot return.
1836 bool doesNotReturn() const { return hasFnAttr(Attribute::NoReturn); }
1837 void setDoesNotReturn() {
1838 addAttribute(AttributeList::FunctionIndex, Attribute::NoReturn);
1839 }
1840
1841 /// Determine if the call should not perform indirect branch tracking.
1842 bool doesNoCfCheck() const { return hasFnAttr(Attribute::NoCfCheck); }
1843
1844 /// Determine if the call cannot unwind.
1845 bool doesNotThrow() const { return hasFnAttr(Attribute::NoUnwind); }
1846 void setDoesNotThrow() {
1847 addAttribute(AttributeList::FunctionIndex, Attribute::NoUnwind);
1848 }
1849
1850 /// Determine if the invoke cannot be duplicated.
1851 bool cannotDuplicate() const { return hasFnAttr(Attribute::NoDuplicate); }
1852 void setCannotDuplicate() {
1853 addAttribute(AttributeList::FunctionIndex, Attribute::NoDuplicate);
1854 }
1855
1856 /// Determine if the call cannot be tail merged.
1857 bool cannotMerge() const { return hasFnAttr(Attribute::NoMerge); }
1858 void setCannotMerge() {
1859 addAttribute(AttributeList::FunctionIndex, Attribute::NoMerge);
1860 }
1861
1862 /// Determine if the invoke is convergent
1863 bool isConvergent() const { return hasFnAttr(Attribute::Convergent); }
1864 void setConvergent() {
1865 addAttribute(AttributeList::FunctionIndex, Attribute::Convergent);
1866 }
1867 void setNotConvergent() {
1868 removeAttribute(AttributeList::FunctionIndex, Attribute::Convergent);
1869 }
1870
1871 /// Determine if the call returns a structure through first
1872 /// pointer argument.
1873 bool hasStructRetAttr() const {
1874 if (getNumArgOperands() == 0)
1875 return false;
1876
1877 // Be friendly and also check the callee.
1878 return paramHasAttr(0, Attribute::StructRet);
1879 }
1880
1881 /// Determine if any call argument is an aggregate passed by value.
1882 bool hasByValArgument() const {
1883 return Attrs.hasAttrSomewhere(Attribute::ByVal);
1884 }
1885
1886 ///@{
1887 // End of attribute API.
1888
1889 /// \name Operand Bundle API
1890 ///
1891 /// This group of methods provides the API to access and manipulate operand
1892 /// bundles on this call.
1893 /// @{
1894
1895 /// Return the number of operand bundles associated with this User.
1896 unsigned getNumOperandBundles() const {
1897 return std::distance(bundle_op_info_begin(), bundle_op_info_end());
1898 }
1899
1900 /// Return true if this User has any operand bundles.
1901 bool hasOperandBundles() const { return getNumOperandBundles() != 0; }
1902
1903 /// Return the index of the first bundle operand in the Use array.
1904 unsigned getBundleOperandsStartIndex() const {
1905 assert(hasOperandBundles() && "Don't call otherwise!")((void)0);
1906 return bundle_op_info_begin()->Begin;
1907 }
1908
1909 /// Return the index of the last bundle operand in the Use array.
1910 unsigned getBundleOperandsEndIndex() const {
1911 assert(hasOperandBundles() && "Don't call otherwise!")((void)0);
1912 return bundle_op_info_end()[-1].End;
1913 }
1914
1915 /// Return true if the operand at index \p Idx is a bundle operand.
1916 bool isBundleOperand(unsigned Idx) const {
1917 return hasOperandBundles() && Idx >= getBundleOperandsStartIndex() &&
1918 Idx < getBundleOperandsEndIndex();
1919 }
1920
1921 /// Returns true if the use is a bundle operand.
1922 bool isBundleOperand(const Use *U) const {
1923 assert(this == U->getUser() &&((void)0)
1924 "Only valid to query with a use of this instruction!")((void)0);
1925 return hasOperandBundles() && isBundleOperand(U - op_begin());
1926 }
1927 bool isBundleOperand(Value::const_user_iterator UI) const {
1928 return isBundleOperand(&UI.getUse());
1929 }
1930
1931 /// Return the total number operands (not operand bundles) used by
1932 /// every operand bundle in this OperandBundleUser.
1933 unsigned getNumTotalBundleOperands() const {
1934 if (!hasOperandBundles())
1935 return 0;
1936
1937 unsigned Begin = getBundleOperandsStartIndex();
1938 unsigned End = getBundleOperandsEndIndex();
1939
1940 assert(Begin <= End && "Should be!")((void)0);
1941 return End - Begin;
1942 }
1943
1944 /// Return the operand bundle at a specific index.
1945 OperandBundleUse getOperandBundleAt(unsigned Index) const {
1946 assert(Index < getNumOperandBundles() && "Index out of bounds!")((void)0);
1947 return operandBundleFromBundleOpInfo(*(bundle_op_info_begin() + Index));
1948 }
1949
1950 /// Return the number of operand bundles with the tag Name attached to
1951 /// this instruction.
1952 unsigned countOperandBundlesOfType(StringRef Name) const {
1953 unsigned Count = 0;
1954 for (unsigned i = 0, e = getNumOperandBundles(); i != e; ++i)
1955 if (getOperandBundleAt(i).getTagName() == Name)
1956 Count++;
1957
1958 return Count;
1959 }
1960
1961 /// Return the number of operand bundles with the tag ID attached to
1962 /// this instruction.
1963 unsigned countOperandBundlesOfType(uint32_t ID) const {
1964 unsigned Count = 0;
1965 for (unsigned i = 0, e = getNumOperandBundles(); i != e; ++i)
1966 if (getOperandBundleAt(i).getTagID() == ID)
1967 Count++;
1968
1969 return Count;
1970 }
1971
1972 /// Return an operand bundle by name, if present.
1973 ///
1974 /// It is an error to call this for operand bundle types that may have
1975 /// multiple instances of them on the same instruction.
1976 Optional<OperandBundleUse> getOperandBundle(StringRef Name) const {
1977 assert(countOperandBundlesOfType(Name) < 2 && "Precondition violated!")((void)0);
1978
1979 for (unsigned i = 0, e = getNumOperandBundles(); i != e; ++i) {
1980 OperandBundleUse U = getOperandBundleAt(i);
1981 if (U.getTagName() == Name)
1982 return U;
1983 }
1984
1985 return None;
1986 }
1987
1988 /// Return an operand bundle by tag ID, if present.
1989 ///
1990 /// It is an error to call this for operand bundle types that may have
1991 /// multiple instances of them on the same instruction.
1992 Optional<OperandBundleUse> getOperandBundle(uint32_t ID) const {
1993 assert(countOperandBundlesOfType(ID) < 2 && "Precondition violated!")((void)0);
1994
1995 for (unsigned i = 0, e = getNumOperandBundles(); i != e; ++i) {
1996 OperandBundleUse U = getOperandBundleAt(i);
1997 if (U.getTagID() == ID)
1998 return U;
1999 }
2000
2001 return None;
2002 }
2003
2004 /// Return the list of operand bundles attached to this instruction as
2005 /// a vector of OperandBundleDefs.
2006 ///
2007 /// This function copies the OperandBundeUse instances associated with this
2008 /// OperandBundleUser to a vector of OperandBundleDefs. Note:
2009 /// OperandBundeUses and OperandBundleDefs are non-trivially *different*
2010 /// representations of operand bundles (see documentation above).
2011 void getOperandBundlesAsDefs(SmallVectorImpl<OperandBundleDef> &Defs) const;
2012
2013 /// Return the operand bundle for the operand at index OpIdx.
2014 ///
2015 /// It is an error to call this with an OpIdx that does not correspond to an
2016 /// bundle operand.
2017 OperandBundleUse getOperandBundleForOperand(unsigned OpIdx) const {
2018 return operandBundleFromBundleOpInfo(getBundleOpInfoForOperand(OpIdx));
2019 }
2020
2021 /// Return true if this operand bundle user has operand bundles that
2022 /// may read from the heap.
2023 bool hasReadingOperandBundles() const;
2024
2025 /// Return true if this operand bundle user has operand bundles that
2026 /// may write to the heap.
2027 bool hasClobberingOperandBundles() const {
2028 for (auto &BOI : bundle_op_infos()) {
2029 if (BOI.Tag->second == LLVMContext::OB_deopt ||
2030 BOI.Tag->second == LLVMContext::OB_funclet)
2031 continue;
2032
2033 // This instruction has an operand bundle that is not known to us.
2034 // Assume the worst.
2035 return true;
2036 }
2037
2038 return false;
2039 }
2040
2041 /// Return true if the bundle operand at index \p OpIdx has the
2042 /// attribute \p A.
2043 bool bundleOperandHasAttr(unsigned OpIdx, Attribute::AttrKind A) const {
2044 auto &BOI = getBundleOpInfoForOperand(OpIdx);
2045 auto OBU = operandBundleFromBundleOpInfo(BOI);
2046 return OBU.operandHasAttr(OpIdx - BOI.Begin, A);
2047 }
2048
2049 /// Return true if \p Other has the same sequence of operand bundle
2050 /// tags with the same number of operands on each one of them as this
2051 /// OperandBundleUser.
2052 bool hasIdenticalOperandBundleSchema(const CallBase &Other) const {
2053 if (getNumOperandBundles() != Other.getNumOperandBundles())
2054 return false;
2055
2056 return std::equal(bundle_op_info_begin(), bundle_op_info_end(),
2057 Other.bundle_op_info_begin());
2058 }
2059
2060 /// Return true if this operand bundle user contains operand bundles
2061 /// with tags other than those specified in \p IDs.
2062 bool hasOperandBundlesOtherThan(ArrayRef<uint32_t> IDs) const {
2063 for (unsigned i = 0, e = getNumOperandBundles(); i != e; ++i) {
2064 uint32_t ID = getOperandBundleAt(i).getTagID();
2065 if (!is_contained(IDs, ID))
2066 return true;
2067 }
2068 return false;
2069 }
2070
2071 /// Is the function attribute S disallowed by some operand bundle on
2072 /// this operand bundle user?
2073 bool isFnAttrDisallowedByOpBundle(StringRef S) const {
2074 // Operand bundles only possibly disallow readnone, readonly and argmemonly
2075 // attributes. All String attributes are fine.
2076 return false;
2077 }
2078
2079 /// Is the function attribute A disallowed by some operand bundle on
2080 /// this operand bundle user?
2081 bool isFnAttrDisallowedByOpBundle(Attribute::AttrKind A) const {
2082 switch (A) {
2083 default:
2084 return false;
2085
2086 case Attribute::InaccessibleMemOrArgMemOnly:
2087 return hasReadingOperandBundles();
2088
2089 case Attribute::InaccessibleMemOnly:
2090 return hasReadingOperandBundles();
2091
2092 case Attribute::ArgMemOnly:
2093 return hasReadingOperandBundles();
2094
2095 case Attribute::ReadNone:
2096 return hasReadingOperandBundles();
2097
2098 case Attribute::ReadOnly:
2099 return hasClobberingOperandBundles();
2100 }
2101
2102 llvm_unreachable("switch has a default case!")__builtin_unreachable();
2103 }
2104
2105 /// Used to keep track of an operand bundle. See the main comment on
2106 /// OperandBundleUser above.
2107 struct BundleOpInfo {
2108 /// The operand bundle tag, interned by
2109 /// LLVMContextImpl::getOrInsertBundleTag.
2110 StringMapEntry<uint32_t> *Tag;
2111
2112 /// The index in the Use& vector where operands for this operand
2113 /// bundle starts.
2114 uint32_t Begin;
2115
2116 /// The index in the Use& vector where operands for this operand
2117 /// bundle ends.
2118 uint32_t End;
2119
2120 bool operator==(const BundleOpInfo &Other) const {
2121 return Tag == Other.Tag && Begin == Other.Begin && End == Other.End;
2122 }
2123 };
2124
2125 /// Simple helper function to map a BundleOpInfo to an
2126 /// OperandBundleUse.
2127 OperandBundleUse
2128 operandBundleFromBundleOpInfo(const BundleOpInfo &BOI) const {
2129 auto begin = op_begin();
2130 ArrayRef<Use> Inputs(begin + BOI.Begin, begin + BOI.End);
2131 return OperandBundleUse(BOI.Tag, Inputs);
2132 }
2133
2134 using bundle_op_iterator = BundleOpInfo *;
2135 using const_bundle_op_iterator = const BundleOpInfo *;
2136
2137 /// Return the start of the list of BundleOpInfo instances associated
2138 /// with this OperandBundleUser.
2139 ///
2140 /// OperandBundleUser uses the descriptor area co-allocated with the host User
2141 /// to store some meta information about which operands are "normal" operands,
2142 /// and which ones belong to some operand bundle.
2143 ///
2144 /// The layout of an operand bundle user is
2145 ///
2146 /// +-----------uint32_t End-------------------------------------+
2147 /// | |
2148 /// | +--------uint32_t Begin--------------------+ |
2149 /// | | | |
2150 /// ^ ^ v v
2151 /// |------|------|----|----|----|----|----|---------|----|---------|----|-----
2152 /// | BOI0 | BOI1 | .. | DU | U0 | U1 | .. | BOI0_U0 | .. | BOI1_U0 | .. | Un
2153 /// |------|------|----|----|----|----|----|---------|----|---------|----|-----
2154 /// v v ^ ^
2155 /// | | | |
2156 /// | +--------uint32_t Begin------------+ |
2157 /// | |
2158 /// +-----------uint32_t End-----------------------------+
2159 ///
2160 ///
2161 /// BOI0, BOI1 ... are descriptions of operand bundles in this User's use
2162 /// list. These descriptions are installed and managed by this class, and
2163 /// they're all instances of OperandBundleUser<T>::BundleOpInfo.
2164 ///
2165 /// DU is an additional descriptor installed by User's 'operator new' to keep
2166 /// track of the 'BOI0 ... BOIN' co-allocation. OperandBundleUser does not
2167 /// access or modify DU in any way, it's an implementation detail private to
2168 /// User.
2169 ///
2170 /// The regular Use& vector for the User starts at U0. The operand bundle
2171 /// uses are part of the Use& vector, just like normal uses. In the diagram
2172 /// above, the operand bundle uses start at BOI0_U0. Each instance of
2173 /// BundleOpInfo has information about a contiguous set of uses constituting
2174 /// an operand bundle, and the total set of operand bundle uses themselves
2175 /// form a contiguous set of uses (i.e. there are no gaps between uses
2176 /// corresponding to individual operand bundles).
2177 ///
2178 /// This class does not know the location of the set of operand bundle uses
2179 /// within the use list -- that is decided by the User using this class via
2180 /// the BeginIdx argument in populateBundleOperandInfos.
2181 ///
2182 /// Currently operand bundle users with hung-off operands are not supported.
2183 bundle_op_iterator bundle_op_info_begin() {
2184 if (!hasDescriptor())
2185 return nullptr;
2186
2187 uint8_t *BytesBegin = getDescriptor().begin();
2188 return reinterpret_cast<bundle_op_iterator>(BytesBegin);
2189 }
2190
2191 /// Return the start of the list of BundleOpInfo instances associated
2192 /// with this OperandBundleUser.
2193 const_bundle_op_iterator bundle_op_info_begin() const {
2194 auto *NonConstThis = const_cast<CallBase *>(this);
2195 return NonConstThis->bundle_op_info_begin();
2196 }
2197
2198 /// Return the end of the list of BundleOpInfo instances associated
2199 /// with this OperandBundleUser.
2200 bundle_op_iterator bundle_op_info_end() {
2201 if (!hasDescriptor())
2202 return nullptr;
2203
2204 uint8_t *BytesEnd = getDescriptor().end();
2205 return reinterpret_cast<bundle_op_iterator>(BytesEnd);
2206 }
2207
2208 /// Return the end of the list of BundleOpInfo instances associated
2209 /// with this OperandBundleUser.
2210 const_bundle_op_iterator bundle_op_info_end() const {
2211 auto *NonConstThis = const_cast<CallBase *>(this);
2212 return NonConstThis->bundle_op_info_end();
2213 }
2214
2215 /// Return the range [\p bundle_op_info_begin, \p bundle_op_info_end).
2216 iterator_range<bundle_op_iterator> bundle_op_infos() {
2217 return make_range(bundle_op_info_begin(), bundle_op_info_end());
2218 }
2219
2220 /// Return the range [\p bundle_op_info_begin, \p bundle_op_info_end).
2221 iterator_range<const_bundle_op_iterator> bundle_op_infos() const {
2222 return make_range(bundle_op_info_begin(), bundle_op_info_end());
2223 }
2224
2225 /// Populate the BundleOpInfo instances and the Use& vector from \p
2226 /// Bundles. Return the op_iterator pointing to the Use& one past the last
2227 /// last bundle operand use.
2228 ///
2229 /// Each \p OperandBundleDef instance is tracked by a OperandBundleInfo
2230 /// instance allocated in this User's descriptor.
2231 op_iterator populateBundleOperandInfos(ArrayRef<OperandBundleDef> Bundles,
2232 const unsigned BeginIndex);
2233
2234public:
2235 /// Return the BundleOpInfo for the operand at index OpIdx.
2236 ///
2237 /// It is an error to call this with an OpIdx that does not correspond to an
2238 /// bundle operand.
2239 BundleOpInfo &getBundleOpInfoForOperand(unsigned OpIdx);
2240 const BundleOpInfo &getBundleOpInfoForOperand(unsigned OpIdx) const {
2241 return const_cast<CallBase *>(this)->getBundleOpInfoForOperand(OpIdx);
2242 }
2243
2244protected:
2245 /// Return the total number of values used in \p Bundles.
2246 static unsigned CountBundleInputs(ArrayRef<OperandBundleDef> Bundles) {
2247 unsigned Total = 0;
2248 for (auto &B : Bundles)
2249 Total += B.input_size();
2250 return Total;
2251 }
2252
2253 /// @}
2254 // End of operand bundle API.
2255
2256private:
2257 bool hasFnAttrOnCalledFunction(Attribute::AttrKind Kind) const;
2258 bool hasFnAttrOnCalledFunction(StringRef Kind) const;
2259
2260 template <typename AttrKind> bool hasFnAttrImpl(AttrKind Kind) const {
2261 if (Attrs.hasFnAttribute(Kind))
2262 return true;
2263
2264 // Operand bundles override attributes on the called function, but don't
2265 // override attributes directly present on the call instruction.
2266 if (isFnAttrDisallowedByOpBundle(Kind))
2267 return false;
2268
2269 return hasFnAttrOnCalledFunction(Kind);
2270 }
2271
2272 /// Determine whether the return value has the given attribute. Supports
2273 /// Attribute::AttrKind and StringRef as \p AttrKind types.
2274 template <typename AttrKind> bool hasRetAttrImpl(AttrKind Kind) const {
2275 if (Attrs.hasAttribute(AttributeList::ReturnIndex, Kind))
2276 return true;
2277
2278 // Look at the callee, if available.
2279 if (const Function *F = getCalledFunction())
2280 return F->getAttributes().hasAttribute(AttributeList::ReturnIndex, Kind);
2281 return false;
2282 }
2283};
2284
2285template <>
2286struct OperandTraits<CallBase> : public VariadicOperandTraits<CallBase, 1> {};
2287
2288DEFINE_TRANSPARENT_OPERAND_ACCESSORS(CallBase, Value)CallBase::op_iterator CallBase::op_begin() { return OperandTraits
<CallBase>::op_begin(this); } CallBase::const_op_iterator
CallBase::op_begin() const { return OperandTraits<CallBase
>::op_begin(const_cast<CallBase*>(this)); } CallBase
::op_iterator CallBase::op_end() { return OperandTraits<CallBase
>::op_end(this); } CallBase::const_op_iterator CallBase::op_end
() const { return OperandTraits<CallBase>::op_end(const_cast
<CallBase*>(this)); } Value *CallBase::getOperand(unsigned
i_nocapture) const { ((void)0); return cast_or_null<Value
>( OperandTraits<CallBase>::op_begin(const_cast<CallBase
*>(this))[i_nocapture].get()); } void CallBase::setOperand
(unsigned i_nocapture, Value *Val_nocapture) { ((void)0); OperandTraits
<CallBase>::op_begin(this)[i_nocapture] = Val_nocapture
; } unsigned CallBase::getNumOperands() const { return OperandTraits
<CallBase>::operands(this); } template <int Idx_nocapture
> Use &CallBase::Op() { return this->OpFrom<Idx_nocapture
>(this); } template <int Idx_nocapture> const Use &
CallBase::Op() const { return this->OpFrom<Idx_nocapture
>(this); }
2289
2290//===----------------------------------------------------------------------===//
2291// FuncletPadInst Class
2292//===----------------------------------------------------------------------===//
2293class FuncletPadInst : public Instruction {
2294private:
2295 FuncletPadInst(const FuncletPadInst &CPI);
2296
2297 explicit FuncletPadInst(Instruction::FuncletPadOps Op, Value *ParentPad,
2298 ArrayRef<Value *> Args, unsigned Values,
2299 const Twine &NameStr, Instruction *InsertBefore);
2300 explicit FuncletPadInst(Instruction::FuncletPadOps Op, Value *ParentPad,
2301 ArrayRef<Value *> Args, unsigned Values,
2302 const Twine &NameStr, BasicBlock *InsertAtEnd);
2303
2304 void init(Value *ParentPad, ArrayRef<Value *> Args, const Twine &NameStr);
2305
2306protected:
2307 // Note: Instruction needs to be a friend here to call cloneImpl.
2308 friend class Instruction;
2309 friend class CatchPadInst;
2310 friend class CleanupPadInst;
2311
2312 FuncletPadInst *cloneImpl() const;
2313
2314public:
2315 /// Provide fast operand accessors
2316 DECLARE_TRANSPARENT_OPERAND_ACCESSORS(Value)public: inline Value *getOperand(unsigned) const; inline void
setOperand(unsigned, Value*); inline op_iterator op_begin();
inline const_op_iterator op_begin() const; inline op_iterator
op_end(); inline const_op_iterator op_end() const; protected
: template <int> inline Use &Op(); template <int
> inline const Use &Op() const; public: inline unsigned
getNumOperands() const
;
2317
2318 /// getNumArgOperands - Return the number of funcletpad arguments.
2319 ///
2320 unsigned getNumArgOperands() const { return getNumOperands() - 1; }
2321
2322 /// Convenience accessors
2323
2324 /// Return the outer EH-pad this funclet is nested within.
2325 ///
2326 /// Note: This returns the associated CatchSwitchInst if this FuncletPadInst
2327 /// is a CatchPadInst.
2328 Value *getParentPad() const { return Op<-1>(); }
2329 void setParentPad(Value *ParentPad) {
2330 assert(ParentPad)((void)0);
2331 Op<-1>() = ParentPad;
2332 }
2333
2334 /// getArgOperand/setArgOperand - Return/set the i-th funcletpad argument.
2335 ///
2336 Value *getArgOperand(unsigned i) const { return getOperand(i); }
2337 void setArgOperand(unsigned i, Value *v) { setOperand(i, v); }
2338
2339 /// arg_operands - iteration adapter for range-for loops.
2340 op_range arg_operands() { return op_range(op_begin(), op_end() - 1); }
2341
2342 /// arg_operands - iteration adapter for range-for loops.
2343 const_op_range arg_operands() const {
2344 return const_op_range(op_begin(), op_end() - 1);
2345 }
2346
2347 // Methods for support type inquiry through isa, cast, and dyn_cast:
2348 static bool classof(const Instruction *I) { return I->isFuncletPad(); }
2349 static bool classof(const Value *V) {
2350 return isa<Instruction>(V) && classof(cast<Instruction>(V));
2351 }
2352};
2353
2354template <>
2355struct OperandTraits<FuncletPadInst>
2356 : public VariadicOperandTraits<FuncletPadInst, /*MINARITY=*/1> {};
2357
2358DEFINE_TRANSPARENT_OPERAND_ACCESSORS(FuncletPadInst, Value)FuncletPadInst::op_iterator FuncletPadInst::op_begin() { return
OperandTraits<FuncletPadInst>::op_begin(this); } FuncletPadInst
::const_op_iterator FuncletPadInst::op_begin() const { return
OperandTraits<FuncletPadInst>::op_begin(const_cast<
FuncletPadInst*>(this)); } FuncletPadInst::op_iterator FuncletPadInst
::op_end() { return OperandTraits<FuncletPadInst>::op_end
(this); } FuncletPadInst::const_op_iterator FuncletPadInst::op_end
() const { return OperandTraits<FuncletPadInst>::op_end
(const_cast<FuncletPadInst*>(this)); } Value *FuncletPadInst
::getOperand(unsigned i_nocapture) const { ((void)0); return cast_or_null
<Value>( OperandTraits<FuncletPadInst>::op_begin(
const_cast<FuncletPadInst*>(this))[i_nocapture].get());
} void FuncletPadInst::setOperand(unsigned i_nocapture, Value
*Val_nocapture) { ((void)0); OperandTraits<FuncletPadInst
>::op_begin(this)[i_nocapture] = Val_nocapture; } unsigned
FuncletPadInst::getNumOperands() const { return OperandTraits
<FuncletPadInst>::operands(this); } template <int Idx_nocapture
> Use &FuncletPadInst::Op() { return this->OpFrom<
Idx_nocapture>(this); } template <int Idx_nocapture>
const Use &FuncletPadInst::Op() const { return this->
OpFrom<Idx_nocapture>(this); }
2359
2360} // end namespace llvm
2361
2362#endif // LLVM_IR_INSTRTYPES_H