Bug Summary

File:src/gnu/usr.bin/clang/libLLVM/../../../llvm/llvm/include/llvm/Support/Alignment.h
Warning:line 85, column 47
The result of the left shift is undefined due to shifting by '255', which is greater or equal to the width of type 'uint64_t'

Annotated Source Code

Press '?' to see keyboard shortcuts

clang -cc1 -cc1 -triple amd64-unknown-openbsd7.0 -analyze -disable-free -disable-llvm-verifier -discard-value-names -main-file-name MachineOperand.cpp -analyzer-store=region -analyzer-opt-analyze-nested-blocks -analyzer-checker=core -analyzer-checker=apiModeling -analyzer-checker=unix -analyzer-checker=deadcode -analyzer-checker=cplusplus -analyzer-checker=security.insecureAPI.UncheckedReturn -analyzer-checker=security.insecureAPI.getpw -analyzer-checker=security.insecureAPI.gets -analyzer-checker=security.insecureAPI.mktemp -analyzer-checker=security.insecureAPI.mkstemp -analyzer-checker=security.insecureAPI.vfork -analyzer-checker=nullability.NullPassedToNonnull -analyzer-checker=nullability.NullReturnedFromNonnull -analyzer-output plist -w -setup-static-analyzer -mrelocation-model pic -pic-level 1 -fhalf-no-semantic-interposition -mframe-pointer=all -relaxed-aliasing -fno-rounding-math -mconstructor-aliases -munwind-tables -target-cpu x86-64 -tune-cpu generic -debugger-tuning=gdb -fcoverage-compilation-dir=/usr/src/gnu/usr.bin/clang/libLLVM/obj -resource-dir /usr/local/lib/clang/13.0.0 -I /usr/src/gnu/usr.bin/clang/libLLVM/../../../llvm/llvm/include/llvm/Transforms -I /usr/src/gnu/usr.bin/clang/libLLVM/obj/../include/llvm/AMDGPU -I /usr/src/gnu/usr.bin/clang/libLLVM/../../../llvm/llvm/lib/Target/AMDGPU -I /usr/src/gnu/usr.bin/clang/libLLVM/obj/../include/llvm/AMDGPU -I /usr/src/gnu/usr.bin/clang/libLLVM/../../../llvm/llvm/lib/Target/AMDGPU -I /usr/src/gnu/usr.bin/clang/libLLVM/obj/../include/llvm/AMDGPU -I /usr/src/gnu/usr.bin/clang/libLLVM/../../../llvm/llvm/lib/Target/AMDGPU -I /usr/src/gnu/usr.bin/clang/libLLVM/obj/../include/llvm/AMDGPU -I /usr/src/gnu/usr.bin/clang/libLLVM/../../../llvm/llvm/lib/Target/AMDGPU -I /usr/src/gnu/usr.bin/clang/libLLVM/obj/../include/llvm/AMDGPU -I /usr/src/gnu/usr.bin/clang/libLLVM/../../../llvm/llvm/lib/Target/AMDGPU -I /usr/src/gnu/usr.bin/clang/libLLVM/obj/../include/llvm/AMDGPU -I /usr/src/gnu/usr.bin/clang/libLLVM/../../../llvm/llvm/lib/Target/AMDGPU -I /usr/src/gnu/usr.bin/clang/libLLVM/../../../llvm/llvm/include/llvm/Analysis -I /usr/src/gnu/usr.bin/clang/libLLVM/../../../llvm/llvm/include/llvm/ASMParser -I /usr/src/gnu/usr.bin/clang/libLLVM/../../../llvm/llvm/include/llvm/BinaryFormat -I /usr/src/gnu/usr.bin/clang/libLLVM/../../../llvm/llvm/include/llvm/Bitcode -I /usr/src/gnu/usr.bin/clang/libLLVM/../../../llvm/llvm/include/llvm/Bitcode -I /usr/src/gnu/usr.bin/clang/libLLVM/../../../llvm/llvm/include/llvm/Bitstream -I /usr/src/gnu/usr.bin/clang/libLLVM/../../../llvm/llvm/include/llvm/Transforms -I /include/llvm/CodeGen -I /include/llvm/CodeGen/PBQP -I /usr/src/gnu/usr.bin/clang/libLLVM/obj/../include/llvm/IR -I /usr/src/gnu/usr.bin/clang/libLLVM/../../../llvm/llvm/include/llvm/IR -I /usr/src/gnu/usr.bin/clang/libLLVM/../../../llvm/llvm/include/llvm/Transforms -I /usr/src/gnu/usr.bin/clang/libLLVM/../../../llvm/llvm/include/llvm/Transforms/Coroutines -I /usr/src/gnu/usr.bin/clang/libLLVM/../../../llvm/llvm/include/llvm/ProfileData/Coverage -I /usr/src/gnu/usr.bin/clang/libLLVM/../../../llvm/llvm/include/llvm/DebugInfo/CodeView -I /usr/src/gnu/usr.bin/clang/libLLVM/../../../llvm/llvm/include/llvm/DebugInfo/DWARF -I /usr/src/gnu/usr.bin/clang/libLLVM/../../../llvm/llvm/include/llvm/DebugInfo -I /usr/src/gnu/usr.bin/clang/libLLVM/../../../llvm/llvm/include/llvm/DebugInfo/MSF -I /usr/src/gnu/usr.bin/clang/libLLVM/../../../llvm/llvm/include/llvm/DebugInfo/PDB -I /usr/src/gnu/usr.bin/clang/libLLVM/../../../llvm/llvm/include/llvm/Demangle -I /usr/src/gnu/usr.bin/clang/libLLVM/../../../llvm/llvm/include/llvm/ExecutionEngine -I /usr/src/gnu/usr.bin/clang/libLLVM/../../../llvm/llvm/include/llvm/ExecutionEngine/JITLink -I /usr/src/gnu/usr.bin/clang/libLLVM/../../../llvm/llvm/include/llvm/ExecutionEngine/Orc -I /usr/src/gnu/usr.bin/clang/libLLVM/../../../llvm/llvm/include/llvm/Frontend -I /usr/src/gnu/usr.bin/clang/libLLVM/../../../llvm/llvm/include/llvm/Frontend/OpenACC -I /usr/src/gnu/usr.bin/clang/libLLVM/../../../llvm/llvm/include/llvm/Frontend -I /usr/src/gnu/usr.bin/clang/libLLVM/../../../llvm/llvm/include/llvm/Frontend/OpenMP -I /include/llvm/CodeGen/GlobalISel -I /usr/src/gnu/usr.bin/clang/libLLVM/../../../llvm/llvm/include/llvm/IRReader -I /usr/src/gnu/usr.bin/clang/libLLVM/../../../llvm/llvm/include/llvm/Transforms -I /usr/src/gnu/usr.bin/clang/libLLVM/../../../llvm/llvm/include/llvm/Transforms/InstCombine -I /usr/src/gnu/usr.bin/clang/libLLVM/obj/../include/llvm/Transforms/InstCombine -I /usr/src/gnu/usr.bin/clang/libLLVM/../../../llvm/llvm/include/llvm/Transforms -I /usr/src/gnu/usr.bin/clang/libLLVM/../../../llvm/llvm/include/llvm/LTO -I /usr/src/gnu/usr.bin/clang/libLLVM/../../../llvm/llvm/include/llvm/Linker -I /usr/src/gnu/usr.bin/clang/libLLVM/../../../llvm/llvm/include/llvm/MC -I /usr/src/gnu/usr.bin/clang/libLLVM/../../../llvm/llvm/include/llvm/MC/MCParser -I /include/llvm/CodeGen/MIRParser -I /usr/src/gnu/usr.bin/clang/libLLVM/../../../llvm/llvm/include/llvm/Transforms -I /usr/src/gnu/usr.bin/clang/libLLVM/../../../llvm/llvm/include/llvm/Object -I /usr/src/gnu/usr.bin/clang/libLLVM/../../../llvm/llvm/include/llvm/Option -I /usr/src/gnu/usr.bin/clang/libLLVM/../../../llvm/llvm/include/llvm/Passes -I /usr/src/gnu/usr.bin/clang/libLLVM/../../../llvm/llvm/include/llvm/ -I /usr/src/gnu/usr.bin/clang/libLLVM/../../../llvm/llvm/include/llvm/ProfileData -I /usr/src/gnu/usr.bin/clang/libLLVM/../../../llvm/llvm/include/llvm/Transforms -I /usr/src/gnu/usr.bin/clang/libLLVM/../../../llvm/llvm/include/llvm/Transforms/Scalar -I /usr/src/gnu/usr.bin/clang/libLLVM/../../../llvm/llvm/include/llvm/ADT -I /usr/src/gnu/usr.bin/clang/libLLVM/../../../llvm/llvm/include/llvm/Support -I /usr/src/gnu/usr.bin/clang/libLLVM/../../../llvm/llvm/include/llvm/DebugInfo/Symbolize -I /usr/src/gnu/usr.bin/clang/libLLVM/../../../llvm/llvm/include/llvm/Target -I /usr/src/gnu/usr.bin/clang/libLLVM/../../../llvm/llvm/include/llvm/Transforms -I /usr/src/gnu/usr.bin/clang/libLLVM/../../../llvm/llvm/include/llvm/Transforms/Utils -I /usr/src/gnu/usr.bin/clang/libLLVM/../../../llvm/llvm/include/llvm/Transforms -I /usr/src/gnu/usr.bin/clang/libLLVM/../../../llvm/llvm/include/llvm/Transforms/Vectorize -I /usr/src/gnu/usr.bin/clang/libLLVM/obj/../include/llvm/X86 -I /usr/src/gnu/usr.bin/clang/libLLVM/../../../llvm/llvm/lib/Target/X86 -I /usr/src/gnu/usr.bin/clang/libLLVM/obj/../include/llvm/X86 -I /usr/src/gnu/usr.bin/clang/libLLVM/../../../llvm/llvm/lib/Target/X86 -I /usr/src/gnu/usr.bin/clang/libLLVM/obj/../include/llvm/X86 -I /usr/src/gnu/usr.bin/clang/libLLVM/../../../llvm/llvm/lib/Target/X86 -I /usr/src/gnu/usr.bin/clang/libLLVM/obj/../include/llvm/X86 -I /usr/src/gnu/usr.bin/clang/libLLVM/../../../llvm/llvm/lib/Target/X86 -I /usr/src/gnu/usr.bin/clang/libLLVM/obj/../include/llvm/X86 -I /usr/src/gnu/usr.bin/clang/libLLVM/../../../llvm/llvm/lib/Target/X86 -I /usr/src/gnu/usr.bin/clang/libLLVM/../../../llvm/llvm/include/llvm/Transforms -I /usr/src/gnu/usr.bin/clang/libLLVM/../../../llvm/llvm/include/llvm/Transforms/IPO -I /usr/src/gnu/usr.bin/clang/libLLVM/../../../llvm/llvm/include -I /usr/src/gnu/usr.bin/clang/libLLVM/../include -I /usr/src/gnu/usr.bin/clang/libLLVM/obj -I /usr/src/gnu/usr.bin/clang/libLLVM/obj/../include -D NDEBUG -D __STDC_LIMIT_MACROS -D __STDC_CONSTANT_MACROS -D __STDC_FORMAT_MACROS -D LLVM_PREFIX="/usr" -D PIC -internal-isystem /usr/include/c++/v1 -internal-isystem /usr/local/lib/clang/13.0.0/include -internal-externc-isystem /usr/include -O2 -Wno-unused-parameter -Wwrite-strings -Wno-missing-field-initializers -Wno-long-long -Wno-comment -std=c++14 -fdeprecated-macro -fdebug-compilation-dir=/usr/src/gnu/usr.bin/clang/libLLVM/obj -ferror-limit 19 -fvisibility-inlines-hidden -fwrapv -D_RET_PROTECTOR -ret-protector -fno-rtti -fgnuc-version=4.2.1 -vectorize-loops -vectorize-slp -fno-builtin-malloc -fno-builtin-calloc -fno-builtin-realloc -fno-builtin-valloc -fno-builtin-free -fno-builtin-strdup -fno-builtin-strndup -analyzer-output=html -faddrsig -D__GCC_HAVE_DWARF2_CFI_ASM=1 -o /home/ben/Projects/vmm/scan-build/2022-01-12-194120-40624-1 -x c++ /usr/src/gnu/usr.bin/clang/libLLVM/../../../llvm/llvm/lib/CodeGen/MachineOperand.cpp

/usr/src/gnu/usr.bin/clang/libLLVM/../../../llvm/llvm/lib/CodeGen/MachineOperand.cpp

1//===- lib/CodeGen/MachineOperand.cpp -------------------------------------===//
2//
3// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
4// See https://llvm.org/LICENSE.txt for license information.
5// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
6//
7//===----------------------------------------------------------------------===//
8//
9/// \file Methods common to all machine operands.
10//
11//===----------------------------------------------------------------------===//
12
13#include "llvm/CodeGen/MachineOperand.h"
14#include "llvm/ADT/FoldingSet.h"
15#include "llvm/ADT/StringExtras.h"
16#include "llvm/Analysis/Loads.h"
17#include "llvm/Analysis/MemoryLocation.h"
18#include "llvm/CodeGen/MIRFormatter.h"
19#include "llvm/CodeGen/MIRPrinter.h"
20#include "llvm/CodeGen/MachineFrameInfo.h"
21#include "llvm/CodeGen/MachineJumpTableInfo.h"
22#include "llvm/CodeGen/MachineRegisterInfo.h"
23#include "llvm/CodeGen/TargetInstrInfo.h"
24#include "llvm/CodeGen/TargetRegisterInfo.h"
25#include "llvm/Config/llvm-config.h"
26#include "llvm/IR/Constants.h"
27#include "llvm/IR/IRPrintingPasses.h"
28#include "llvm/IR/Instructions.h"
29#include "llvm/IR/ModuleSlotTracker.h"
30#include "llvm/MC/MCDwarf.h"
31#include "llvm/Target/TargetIntrinsicInfo.h"
32#include "llvm/Target/TargetMachine.h"
33
34using namespace llvm;
35
36static cl::opt<int>
37 PrintRegMaskNumRegs("print-regmask-num-regs",
38 cl::desc("Number of registers to limit to when "
39 "printing regmask operands in IR dumps. "
40 "unlimited = -1"),
41 cl::init(32), cl::Hidden);
42
43static const MachineFunction *getMFIfAvailable(const MachineOperand &MO) {
44 if (const MachineInstr *MI = MO.getParent())
45 if (const MachineBasicBlock *MBB = MI->getParent())
46 if (const MachineFunction *MF = MBB->getParent())
47 return MF;
48 return nullptr;
49}
50static MachineFunction *getMFIfAvailable(MachineOperand &MO) {
51 return const_cast<MachineFunction *>(
52 getMFIfAvailable(const_cast<const MachineOperand &>(MO)));
53}
54
55void MachineOperand::setReg(Register Reg) {
56 if (getReg() == Reg)
57 return; // No change.
58
59 // Clear the IsRenamable bit to keep it conservatively correct.
60 IsRenamable = false;
61
62 // Otherwise, we have to change the register. If this operand is embedded
63 // into a machine function, we need to update the old and new register's
64 // use/def lists.
65 if (MachineFunction *MF = getMFIfAvailable(*this)) {
66 MachineRegisterInfo &MRI = MF->getRegInfo();
67 MRI.removeRegOperandFromUseList(this);
68 SmallContents.RegNo = Reg;
69 MRI.addRegOperandToUseList(this);
70 return;
71 }
72
73 // Otherwise, just change the register, no problem. :)
74 SmallContents.RegNo = Reg;
75}
76
77void MachineOperand::substVirtReg(Register Reg, unsigned SubIdx,
78 const TargetRegisterInfo &TRI) {
79 assert(Reg.isVirtual())((void)0);
80 if (SubIdx && getSubReg())
81 SubIdx = TRI.composeSubRegIndices(SubIdx, getSubReg());
82 setReg(Reg);
83 if (SubIdx)
84 setSubReg(SubIdx);
85}
86
87void MachineOperand::substPhysReg(MCRegister Reg, const TargetRegisterInfo &TRI) {
88 assert(Register::isPhysicalRegister(Reg))((void)0);
89 if (getSubReg()) {
90 Reg = TRI.getSubReg(Reg, getSubReg());
91 // Note that getSubReg() may return 0 if the sub-register doesn't exist.
92 // That won't happen in legal code.
93 setSubReg(0);
94 if (isDef())
95 setIsUndef(false);
96 }
97 setReg(Reg);
98}
99
100/// Change a def to a use, or a use to a def.
101void MachineOperand::setIsDef(bool Val) {
102 assert(isReg() && "Wrong MachineOperand accessor")((void)0);
103 assert((!Val || !isDebug()) && "Marking a debug operation as def")((void)0);
104 if (IsDef == Val)
105 return;
106 assert(!IsDeadOrKill && "Changing def/use with dead/kill set not supported")((void)0);
107 // MRI may keep uses and defs in different list positions.
108 if (MachineFunction *MF = getMFIfAvailable(*this)) {
109 MachineRegisterInfo &MRI = MF->getRegInfo();
110 MRI.removeRegOperandFromUseList(this);
111 IsDef = Val;
112 MRI.addRegOperandToUseList(this);
113 return;
114 }
115 IsDef = Val;
116}
117
118bool MachineOperand::isRenamable() const {
119 assert(isReg() && "Wrong MachineOperand accessor")((void)0);
120 assert(Register::isPhysicalRegister(getReg()) &&((void)0)
121 "isRenamable should only be checked on physical registers")((void)0);
122 if (!IsRenamable)
123 return false;
124
125 const MachineInstr *MI = getParent();
126 if (!MI)
127 return true;
128
129 if (isDef())
130 return !MI->hasExtraDefRegAllocReq(MachineInstr::IgnoreBundle);
131
132 assert(isUse() && "Reg is not def or use")((void)0);
133 return !MI->hasExtraSrcRegAllocReq(MachineInstr::IgnoreBundle);
134}
135
136void MachineOperand::setIsRenamable(bool Val) {
137 assert(isReg() && "Wrong MachineOperand accessor")((void)0);
138 assert(Register::isPhysicalRegister(getReg()) &&((void)0)
139 "setIsRenamable should only be called on physical registers")((void)0);
140 IsRenamable = Val;
141}
142
143// If this operand is currently a register operand, and if this is in a
144// function, deregister the operand from the register's use/def list.
145void MachineOperand::removeRegFromUses() {
146 if (!isReg() || !isOnRegUseList())
147 return;
148
149 if (MachineFunction *MF = getMFIfAvailable(*this))
150 MF->getRegInfo().removeRegOperandFromUseList(this);
151}
152
153/// ChangeToImmediate - Replace this operand with a new immediate operand of
154/// the specified value. If an operand is known to be an immediate already,
155/// the setImm method should be used.
156void MachineOperand::ChangeToImmediate(int64_t ImmVal, unsigned TargetFlags) {
157 assert((!isReg() || !isTied()) && "Cannot change a tied operand into an imm")((void)0);
158
159 removeRegFromUses();
160
161 OpKind = MO_Immediate;
162 Contents.ImmVal = ImmVal;
163 setTargetFlags(TargetFlags);
164}
165
166void MachineOperand::ChangeToFPImmediate(const ConstantFP *FPImm,
167 unsigned TargetFlags) {
168 assert((!isReg() || !isTied()) && "Cannot change a tied operand into an imm")((void)0);
169
170 removeRegFromUses();
171
172 OpKind = MO_FPImmediate;
173 Contents.CFP = FPImm;
174 setTargetFlags(TargetFlags);
175}
176
177void MachineOperand::ChangeToES(const char *SymName,
178 unsigned TargetFlags) {
179 assert((!isReg() || !isTied()) &&((void)0)
180 "Cannot change a tied operand into an external symbol")((void)0);
181
182 removeRegFromUses();
183
184 OpKind = MO_ExternalSymbol;
185 Contents.OffsetedInfo.Val.SymbolName = SymName;
186 setOffset(0); // Offset is always 0.
187 setTargetFlags(TargetFlags);
188}
189
190void MachineOperand::ChangeToGA(const GlobalValue *GV, int64_t Offset,
191 unsigned TargetFlags) {
192 assert((!isReg() || !isTied()) &&((void)0)
193 "Cannot change a tied operand into a global address")((void)0);
194
195 removeRegFromUses();
196
197 OpKind = MO_GlobalAddress;
198 Contents.OffsetedInfo.Val.GV = GV;
199 setOffset(Offset);
200 setTargetFlags(TargetFlags);
201}
202
203void MachineOperand::ChangeToMCSymbol(MCSymbol *Sym, unsigned TargetFlags) {
204 assert((!isReg() || !isTied()) &&((void)0)
205 "Cannot change a tied operand into an MCSymbol")((void)0);
206
207 removeRegFromUses();
208
209 OpKind = MO_MCSymbol;
210 Contents.Sym = Sym;
211 setTargetFlags(TargetFlags);
212}
213
214void MachineOperand::ChangeToFrameIndex(int Idx, unsigned TargetFlags) {
215 assert((!isReg() || !isTied()) &&((void)0)
216 "Cannot change a tied operand into a FrameIndex")((void)0);
217
218 removeRegFromUses();
219
220 OpKind = MO_FrameIndex;
221 setIndex(Idx);
222 setTargetFlags(TargetFlags);
223}
224
225void MachineOperand::ChangeToTargetIndex(unsigned Idx, int64_t Offset,
226 unsigned TargetFlags) {
227 assert((!isReg() || !isTied()) &&((void)0)
228 "Cannot change a tied operand into a FrameIndex")((void)0);
229
230 removeRegFromUses();
231
232 OpKind = MO_TargetIndex;
233 setIndex(Idx);
234 setOffset(Offset);
235 setTargetFlags(TargetFlags);
236}
237
238/// ChangeToRegister - Replace this operand with a new register operand of
239/// the specified value. If an operand is known to be an register already,
240/// the setReg method should be used.
241void MachineOperand::ChangeToRegister(Register Reg, bool isDef, bool isImp,
242 bool isKill, bool isDead, bool isUndef,
243 bool isDebug) {
244 MachineRegisterInfo *RegInfo = nullptr;
245 if (MachineFunction *MF = getMFIfAvailable(*this))
246 RegInfo = &MF->getRegInfo();
247 // If this operand is already a register operand, remove it from the
248 // register's use/def lists.
249 bool WasReg = isReg();
250 if (RegInfo && WasReg)
251 RegInfo->removeRegOperandFromUseList(this);
252
253 // Change this to a register and set the reg#.
254 assert(!(isDead && !isDef) && "Dead flag on non-def")((void)0);
255 assert(!(isKill && isDef) && "Kill flag on def")((void)0);
256 OpKind = MO_Register;
257 SmallContents.RegNo = Reg;
258 SubReg_TargetFlags = 0;
259 IsDef = isDef;
260 IsImp = isImp;
261 IsDeadOrKill = isKill | isDead;
262 IsRenamable = false;
263 IsUndef = isUndef;
264 IsInternalRead = false;
265 IsEarlyClobber = false;
266 IsDebug = isDebug;
267 // Ensure isOnRegUseList() returns false.
268 Contents.Reg.Prev = nullptr;
269 // Preserve the tie when the operand was already a register.
270 if (!WasReg)
271 TiedTo = 0;
272
273 // If this operand is embedded in a function, add the operand to the
274 // register's use/def list.
275 if (RegInfo)
276 RegInfo->addRegOperandToUseList(this);
277}
278
279/// isIdenticalTo - Return true if this operand is identical to the specified
280/// operand. Note that this should stay in sync with the hash_value overload
281/// below.
282bool MachineOperand::isIdenticalTo(const MachineOperand &Other) const {
283 if (getType() != Other.getType() ||
284 getTargetFlags() != Other.getTargetFlags())
285 return false;
286
287 switch (getType()) {
288 case MachineOperand::MO_Register:
289 return getReg() == Other.getReg() && isDef() == Other.isDef() &&
290 getSubReg() == Other.getSubReg();
291 case MachineOperand::MO_Immediate:
292 return getImm() == Other.getImm();
293 case MachineOperand::MO_CImmediate:
294 return getCImm() == Other.getCImm();
295 case MachineOperand::MO_FPImmediate:
296 return getFPImm() == Other.getFPImm();
297 case MachineOperand::MO_MachineBasicBlock:
298 return getMBB() == Other.getMBB();
299 case MachineOperand::MO_FrameIndex:
300 return getIndex() == Other.getIndex();
301 case MachineOperand::MO_ConstantPoolIndex:
302 case MachineOperand::MO_TargetIndex:
303 return getIndex() == Other.getIndex() && getOffset() == Other.getOffset();
304 case MachineOperand::MO_JumpTableIndex:
305 return getIndex() == Other.getIndex();
306 case MachineOperand::MO_GlobalAddress:
307 return getGlobal() == Other.getGlobal() && getOffset() == Other.getOffset();
308 case MachineOperand::MO_ExternalSymbol:
309 return strcmp(getSymbolName(), Other.getSymbolName()) == 0 &&
310 getOffset() == Other.getOffset();
311 case MachineOperand::MO_BlockAddress:
312 return getBlockAddress() == Other.getBlockAddress() &&
313 getOffset() == Other.getOffset();
314 case MachineOperand::MO_RegisterMask:
315 case MachineOperand::MO_RegisterLiveOut: {
316 // Shallow compare of the two RegMasks
317 const uint32_t *RegMask = getRegMask();
318 const uint32_t *OtherRegMask = Other.getRegMask();
319 if (RegMask == OtherRegMask)
320 return true;
321
322 if (const MachineFunction *MF = getMFIfAvailable(*this)) {
323 // Calculate the size of the RegMask
324 const TargetRegisterInfo *TRI = MF->getSubtarget().getRegisterInfo();
325 unsigned RegMaskSize = (TRI->getNumRegs() + 31) / 32;
326
327 // Deep compare of the two RegMasks
328 return std::equal(RegMask, RegMask + RegMaskSize, OtherRegMask);
329 }
330 // We don't know the size of the RegMask, so we can't deep compare the two
331 // reg masks.
332 return false;
333 }
334 case MachineOperand::MO_MCSymbol:
335 return getMCSymbol() == Other.getMCSymbol();
336 case MachineOperand::MO_CFIIndex:
337 return getCFIIndex() == Other.getCFIIndex();
338 case MachineOperand::MO_Metadata:
339 return getMetadata() == Other.getMetadata();
340 case MachineOperand::MO_IntrinsicID:
341 return getIntrinsicID() == Other.getIntrinsicID();
342 case MachineOperand::MO_Predicate:
343 return getPredicate() == Other.getPredicate();
344 case MachineOperand::MO_ShuffleMask:
345 return getShuffleMask() == Other.getShuffleMask();
346 }
347 llvm_unreachable("Invalid machine operand type")__builtin_unreachable();
348}
349
350// Note: this must stay exactly in sync with isIdenticalTo above.
351hash_code llvm::hash_value(const MachineOperand &MO) {
352 switch (MO.getType()) {
353 case MachineOperand::MO_Register:
354 // Register operands don't have target flags.
355 return hash_combine(MO.getType(), (unsigned)MO.getReg(), MO.getSubReg(), MO.isDef());
356 case MachineOperand::MO_Immediate:
357 return hash_combine(MO.getType(), MO.getTargetFlags(), MO.getImm());
358 case MachineOperand::MO_CImmediate:
359 return hash_combine(MO.getType(), MO.getTargetFlags(), MO.getCImm());
360 case MachineOperand::MO_FPImmediate:
361 return hash_combine(MO.getType(), MO.getTargetFlags(), MO.getFPImm());
362 case MachineOperand::MO_MachineBasicBlock:
363 return hash_combine(MO.getType(), MO.getTargetFlags(), MO.getMBB());
364 case MachineOperand::MO_FrameIndex:
365 return hash_combine(MO.getType(), MO.getTargetFlags(), MO.getIndex());
366 case MachineOperand::MO_ConstantPoolIndex:
367 case MachineOperand::MO_TargetIndex:
368 return hash_combine(MO.getType(), MO.getTargetFlags(), MO.getIndex(),
369 MO.getOffset());
370 case MachineOperand::MO_JumpTableIndex:
371 return hash_combine(MO.getType(), MO.getTargetFlags(), MO.getIndex());
372 case MachineOperand::MO_ExternalSymbol:
373 return hash_combine(MO.getType(), MO.getTargetFlags(), MO.getOffset(),
374 StringRef(MO.getSymbolName()));
375 case MachineOperand::MO_GlobalAddress:
376 return hash_combine(MO.getType(), MO.getTargetFlags(), MO.getGlobal(),
377 MO.getOffset());
378 case MachineOperand::MO_BlockAddress:
379 return hash_combine(MO.getType(), MO.getTargetFlags(), MO.getBlockAddress(),
380 MO.getOffset());
381 case MachineOperand::MO_RegisterMask:
382 case MachineOperand::MO_RegisterLiveOut:
383 return hash_combine(MO.getType(), MO.getTargetFlags(), MO.getRegMask());
384 case MachineOperand::MO_Metadata:
385 return hash_combine(MO.getType(), MO.getTargetFlags(), MO.getMetadata());
386 case MachineOperand::MO_MCSymbol:
387 return hash_combine(MO.getType(), MO.getTargetFlags(), MO.getMCSymbol());
388 case MachineOperand::MO_CFIIndex:
389 return hash_combine(MO.getType(), MO.getTargetFlags(), MO.getCFIIndex());
390 case MachineOperand::MO_IntrinsicID:
391 return hash_combine(MO.getType(), MO.getTargetFlags(), MO.getIntrinsicID());
392 case MachineOperand::MO_Predicate:
393 return hash_combine(MO.getType(), MO.getTargetFlags(), MO.getPredicate());
394 case MachineOperand::MO_ShuffleMask:
395 return hash_combine(MO.getType(), MO.getTargetFlags(), MO.getShuffleMask());
396 }
397 llvm_unreachable("Invalid machine operand type")__builtin_unreachable();
398}
399
400// Try to crawl up to the machine function and get TRI and IntrinsicInfo from
401// it.
402static void tryToGetTargetInfo(const MachineOperand &MO,
403 const TargetRegisterInfo *&TRI,
404 const TargetIntrinsicInfo *&IntrinsicInfo) {
405 if (const MachineFunction *MF = getMFIfAvailable(MO)) {
406 TRI = MF->getSubtarget().getRegisterInfo();
407 IntrinsicInfo = MF->getTarget().getIntrinsicInfo();
408 }
409}
410
411static const char *getTargetIndexName(const MachineFunction &MF, int Index) {
412 const auto *TII = MF.getSubtarget().getInstrInfo();
413 assert(TII && "expected instruction info")((void)0);
414 auto Indices = TII->getSerializableTargetIndices();
415 auto Found = find_if(Indices, [&](const std::pair<int, const char *> &I) {
416 return I.first == Index;
417 });
418 if (Found != Indices.end())
419 return Found->second;
420 return nullptr;
421}
422
423const char *MachineOperand::getTargetIndexName() const {
424 const MachineFunction *MF = getMFIfAvailable(*this);
425 return MF ? ::getTargetIndexName(*MF, this->getIndex()) : nullptr;
426}
427
428static const char *getTargetFlagName(const TargetInstrInfo *TII, unsigned TF) {
429 auto Flags = TII->getSerializableDirectMachineOperandTargetFlags();
430 for (const auto &I : Flags) {
431 if (I.first == TF) {
432 return I.second;
433 }
434 }
435 return nullptr;
436}
437
438static void printCFIRegister(unsigned DwarfReg, raw_ostream &OS,
439 const TargetRegisterInfo *TRI) {
440 if (!TRI) {
441 OS << "%dwarfreg." << DwarfReg;
442 return;
443 }
444
445 if (Optional<unsigned> Reg = TRI->getLLVMRegNum(DwarfReg, true))
446 OS << printReg(*Reg, TRI);
447 else
448 OS << "<badreg>";
449}
450
451static void printIRBlockReference(raw_ostream &OS, const BasicBlock &BB,
452 ModuleSlotTracker &MST) {
453 OS << "%ir-block.";
454 if (BB.hasName()) {
455 printLLVMNameWithoutPrefix(OS, BB.getName());
456 return;
457 }
458 Optional<int> Slot;
459 if (const Function *F = BB.getParent()) {
460 if (F == MST.getCurrentFunction()) {
461 Slot = MST.getLocalSlot(&BB);
462 } else if (const Module *M = F->getParent()) {
463 ModuleSlotTracker CustomMST(M, /*ShouldInitializeAllMetadata=*/false);
464 CustomMST.incorporateFunction(*F);
465 Slot = CustomMST.getLocalSlot(&BB);
466 }
467 }
468 if (Slot)
469 MachineOperand::printIRSlotNumber(OS, *Slot);
470 else
471 OS << "<unknown>";
472}
473
474static void printSyncScope(raw_ostream &OS, const LLVMContext &Context,
475 SyncScope::ID SSID,
476 SmallVectorImpl<StringRef> &SSNs) {
477 switch (SSID) {
478 case SyncScope::System:
479 break;
480 default:
481 if (SSNs.empty())
482 Context.getSyncScopeNames(SSNs);
483
484 OS << "syncscope(\"";
485 printEscapedString(SSNs[SSID], OS);
486 OS << "\") ";
487 break;
488 }
489}
490
491static const char *getTargetMMOFlagName(const TargetInstrInfo &TII,
492 unsigned TMMOFlag) {
493 auto Flags = TII.getSerializableMachineMemOperandTargetFlags();
494 for (const auto &I : Flags) {
495 if (I.first == TMMOFlag) {
496 return I.second;
497 }
498 }
499 return nullptr;
500}
501
502static void printFrameIndex(raw_ostream& OS, int FrameIndex, bool IsFixed,
503 const MachineFrameInfo *MFI) {
504 StringRef Name;
505 if (MFI) {
506 IsFixed = MFI->isFixedObjectIndex(FrameIndex);
507 if (const AllocaInst *Alloca = MFI->getObjectAllocation(FrameIndex))
508 if (Alloca->hasName())
509 Name = Alloca->getName();
510 if (IsFixed)
511 FrameIndex -= MFI->getObjectIndexBegin();
512 }
513 MachineOperand::printStackObjectReference(OS, FrameIndex, IsFixed, Name);
514}
515
516void MachineOperand::printSubRegIdx(raw_ostream &OS, uint64_t Index,
517 const TargetRegisterInfo *TRI) {
518 OS << "%subreg.";
519 if (TRI)
520 OS << TRI->getSubRegIndexName(Index);
521 else
522 OS << Index;
523}
524
525void MachineOperand::printTargetFlags(raw_ostream &OS,
526 const MachineOperand &Op) {
527 if (!Op.getTargetFlags())
528 return;
529 const MachineFunction *MF = getMFIfAvailable(Op);
530 if (!MF)
531 return;
532
533 const auto *TII = MF->getSubtarget().getInstrInfo();
534 assert(TII && "expected instruction info")((void)0);
535 auto Flags = TII->decomposeMachineOperandsTargetFlags(Op.getTargetFlags());
536 OS << "target-flags(";
537 const bool HasDirectFlags = Flags.first;
538 const bool HasBitmaskFlags = Flags.second;
539 if (!HasDirectFlags && !HasBitmaskFlags) {
540 OS << "<unknown>) ";
541 return;
542 }
543 if (HasDirectFlags) {
544 if (const auto *Name = getTargetFlagName(TII, Flags.first))
545 OS << Name;
546 else
547 OS << "<unknown target flag>";
548 }
549 if (!HasBitmaskFlags) {
550 OS << ") ";
551 return;
552 }
553 bool IsCommaNeeded = HasDirectFlags;
554 unsigned BitMask = Flags.second;
555 auto BitMasks = TII->getSerializableBitmaskMachineOperandTargetFlags();
556 for (const auto &Mask : BitMasks) {
557 // Check if the flag's bitmask has the bits of the current mask set.
558 if ((BitMask & Mask.first) == Mask.first) {
559 if (IsCommaNeeded)
560 OS << ", ";
561 IsCommaNeeded = true;
562 OS << Mask.second;
563 // Clear the bits which were serialized from the flag's bitmask.
564 BitMask &= ~(Mask.first);
565 }
566 }
567 if (BitMask) {
568 // When the resulting flag's bitmask isn't zero, we know that we didn't
569 // serialize all of the bit flags.
570 if (IsCommaNeeded)
571 OS << ", ";
572 OS << "<unknown bitmask target flag>";
573 }
574 OS << ") ";
575}
576
577void MachineOperand::printSymbol(raw_ostream &OS, MCSymbol &Sym) {
578 OS << "<mcsymbol " << Sym << ">";
579}
580
581void MachineOperand::printStackObjectReference(raw_ostream &OS,
582 unsigned FrameIndex,
583 bool IsFixed, StringRef Name) {
584 if (IsFixed) {
585 OS << "%fixed-stack." << FrameIndex;
586 return;
587 }
588
589 OS << "%stack." << FrameIndex;
590 if (!Name.empty())
591 OS << '.' << Name;
592}
593
594void MachineOperand::printOperandOffset(raw_ostream &OS, int64_t Offset) {
595 if (Offset == 0)
596 return;
597 if (Offset < 0) {
598 OS << " - " << -Offset;
599 return;
600 }
601 OS << " + " << Offset;
602}
603
604void MachineOperand::printIRSlotNumber(raw_ostream &OS, int Slot) {
605 if (Slot == -1)
606 OS << "<badref>";
607 else
608 OS << Slot;
609}
610
611static void printCFI(raw_ostream &OS, const MCCFIInstruction &CFI,
612 const TargetRegisterInfo *TRI) {
613 switch (CFI.getOperation()) {
614 case MCCFIInstruction::OpSameValue:
615 OS << "same_value ";
616 if (MCSymbol *Label = CFI.getLabel())
617 MachineOperand::printSymbol(OS, *Label);
618 printCFIRegister(CFI.getRegister(), OS, TRI);
619 break;
620 case MCCFIInstruction::OpRememberState:
621 OS << "remember_state ";
622 if (MCSymbol *Label = CFI.getLabel())
623 MachineOperand::printSymbol(OS, *Label);
624 break;
625 case MCCFIInstruction::OpRestoreState:
626 OS << "restore_state ";
627 if (MCSymbol *Label = CFI.getLabel())
628 MachineOperand::printSymbol(OS, *Label);
629 break;
630 case MCCFIInstruction::OpOffset:
631 OS << "offset ";
632 if (MCSymbol *Label = CFI.getLabel())
633 MachineOperand::printSymbol(OS, *Label);
634 printCFIRegister(CFI.getRegister(), OS, TRI);
635 OS << ", " << CFI.getOffset();
636 break;
637 case MCCFIInstruction::OpDefCfaRegister:
638 OS << "def_cfa_register ";
639 if (MCSymbol *Label = CFI.getLabel())
640 MachineOperand::printSymbol(OS, *Label);
641 printCFIRegister(CFI.getRegister(), OS, TRI);
642 break;
643 case MCCFIInstruction::OpDefCfaOffset:
644 OS << "def_cfa_offset ";
645 if (MCSymbol *Label = CFI.getLabel())
646 MachineOperand::printSymbol(OS, *Label);
647 OS << CFI.getOffset();
648 break;
649 case MCCFIInstruction::OpDefCfa:
650 OS << "def_cfa ";
651 if (MCSymbol *Label = CFI.getLabel())
652 MachineOperand::printSymbol(OS, *Label);
653 printCFIRegister(CFI.getRegister(), OS, TRI);
654 OS << ", " << CFI.getOffset();
655 break;
656 case MCCFIInstruction::OpLLVMDefAspaceCfa:
657 OS << "llvm_def_aspace_cfa ";
658 if (MCSymbol *Label = CFI.getLabel())
659 MachineOperand::printSymbol(OS, *Label);
660 printCFIRegister(CFI.getRegister(), OS, TRI);
661 OS << ", " << CFI.getOffset();
662 OS << ", " << CFI.getAddressSpace();
663 break;
664 case MCCFIInstruction::OpRelOffset:
665 OS << "rel_offset ";
666 if (MCSymbol *Label = CFI.getLabel())
667 MachineOperand::printSymbol(OS, *Label);
668 printCFIRegister(CFI.getRegister(), OS, TRI);
669 OS << ", " << CFI.getOffset();
670 break;
671 case MCCFIInstruction::OpAdjustCfaOffset:
672 OS << "adjust_cfa_offset ";
673 if (MCSymbol *Label = CFI.getLabel())
674 MachineOperand::printSymbol(OS, *Label);
675 OS << CFI.getOffset();
676 break;
677 case MCCFIInstruction::OpRestore:
678 OS << "restore ";
679 if (MCSymbol *Label = CFI.getLabel())
680 MachineOperand::printSymbol(OS, *Label);
681 printCFIRegister(CFI.getRegister(), OS, TRI);
682 break;
683 case MCCFIInstruction::OpEscape: {
684 OS << "escape ";
685 if (MCSymbol *Label = CFI.getLabel())
686 MachineOperand::printSymbol(OS, *Label);
687 if (!CFI.getValues().empty()) {
688 size_t e = CFI.getValues().size() - 1;
689 for (size_t i = 0; i < e; ++i)
690 OS << format("0x%02x", uint8_t(CFI.getValues()[i])) << ", ";
691 OS << format("0x%02x", uint8_t(CFI.getValues()[e]));
692 }
693 break;
694 }
695 case MCCFIInstruction::OpUndefined:
696 OS << "undefined ";
697 if (MCSymbol *Label = CFI.getLabel())
698 MachineOperand::printSymbol(OS, *Label);
699 printCFIRegister(CFI.getRegister(), OS, TRI);
700 break;
701 case MCCFIInstruction::OpRegister:
702 OS << "register ";
703 if (MCSymbol *Label = CFI.getLabel())
704 MachineOperand::printSymbol(OS, *Label);
705 printCFIRegister(CFI.getRegister(), OS, TRI);
706 OS << ", ";
707 printCFIRegister(CFI.getRegister2(), OS, TRI);
708 break;
709 case MCCFIInstruction::OpWindowSave:
710 OS << "window_save ";
711 if (MCSymbol *Label = CFI.getLabel())
712 MachineOperand::printSymbol(OS, *Label);
713 break;
714 case MCCFIInstruction::OpNegateRAState:
715 OS << "negate_ra_sign_state ";
716 if (MCSymbol *Label = CFI.getLabel())
717 MachineOperand::printSymbol(OS, *Label);
718 break;
719 default:
720 // TODO: Print the other CFI Operations.
721 OS << "<unserializable cfi directive>";
722 break;
723 }
724}
725
726void MachineOperand::print(raw_ostream &OS, const TargetRegisterInfo *TRI,
727 const TargetIntrinsicInfo *IntrinsicInfo) const {
728 print(OS, LLT{}, TRI, IntrinsicInfo);
729}
730
731void MachineOperand::print(raw_ostream &OS, LLT TypeToPrint,
732 const TargetRegisterInfo *TRI,
733 const TargetIntrinsicInfo *IntrinsicInfo) const {
734 tryToGetTargetInfo(*this, TRI, IntrinsicInfo);
735 ModuleSlotTracker DummyMST(nullptr);
736 print(OS, DummyMST, TypeToPrint, None, /*PrintDef=*/false,
737 /*IsStandalone=*/true,
738 /*ShouldPrintRegisterTies=*/true,
739 /*TiedOperandIdx=*/0, TRI, IntrinsicInfo);
740}
741
742void MachineOperand::print(raw_ostream &OS, ModuleSlotTracker &MST,
743 LLT TypeToPrint, Optional<unsigned> OpIdx, bool PrintDef,
744 bool IsStandalone, bool ShouldPrintRegisterTies,
745 unsigned TiedOperandIdx,
746 const TargetRegisterInfo *TRI,
747 const TargetIntrinsicInfo *IntrinsicInfo) const {
748 printTargetFlags(OS, *this);
749 switch (getType()) {
750 case MachineOperand::MO_Register: {
751 Register Reg = getReg();
752 if (isImplicit())
753 OS << (isDef() ? "implicit-def " : "implicit ");
754 else if (PrintDef && isDef())
755 // Print the 'def' flag only when the operand is defined after '='.
756 OS << "def ";
757 if (isInternalRead())
758 OS << "internal ";
759 if (isDead())
760 OS << "dead ";
761 if (isKill())
762 OS << "killed ";
763 if (isUndef())
764 OS << "undef ";
765 if (isEarlyClobber())
766 OS << "early-clobber ";
767 if (Register::isPhysicalRegister(getReg()) && isRenamable())
768 OS << "renamable ";
769 // isDebug() is exactly true for register operands of a DBG_VALUE. So we
770 // simply infer it when parsing and do not need to print it.
771
772 const MachineRegisterInfo *MRI = nullptr;
773 if (Register::isVirtualRegister(Reg)) {
774 if (const MachineFunction *MF = getMFIfAvailable(*this)) {
775 MRI = &MF->getRegInfo();
776 }
777 }
778
779 OS << printReg(Reg, TRI, 0, MRI);
780 // Print the sub register.
781 if (unsigned SubReg = getSubReg()) {
782 if (TRI)
783 OS << '.' << TRI->getSubRegIndexName(SubReg);
784 else
785 OS << ".subreg" << SubReg;
786 }
787 // Print the register class / bank.
788 if (Register::isVirtualRegister(Reg)) {
789 if (const MachineFunction *MF = getMFIfAvailable(*this)) {
790 const MachineRegisterInfo &MRI = MF->getRegInfo();
791 if (IsStandalone || !PrintDef || MRI.def_empty(Reg)) {
792 OS << ':';
793 OS << printRegClassOrBank(Reg, MRI, TRI);
794 }
795 }
796 }
797 // Print ties.
798 if (ShouldPrintRegisterTies && isTied() && !isDef())
799 OS << "(tied-def " << TiedOperandIdx << ")";
800 // Print types.
801 if (TypeToPrint.isValid())
802 OS << '(' << TypeToPrint << ')';
803 break;
804 }
805 case MachineOperand::MO_Immediate: {
806 const MIRFormatter *Formatter = nullptr;
807 if (const MachineFunction *MF = getMFIfAvailable(*this)) {
808 const auto *TII = MF->getSubtarget().getInstrInfo();
809 assert(TII && "expected instruction info")((void)0);
810 Formatter = TII->getMIRFormatter();
811 }
812 if (Formatter)
813 Formatter->printImm(OS, *getParent(), OpIdx, getImm());
814 else
815 OS << getImm();
816 break;
817 }
818 case MachineOperand::MO_CImmediate:
819 getCImm()->printAsOperand(OS, /*PrintType=*/true, MST);
820 break;
821 case MachineOperand::MO_FPImmediate:
822 getFPImm()->printAsOperand(OS, /*PrintType=*/true, MST);
823 break;
824 case MachineOperand::MO_MachineBasicBlock:
825 OS << printMBBReference(*getMBB());
826 break;
827 case MachineOperand::MO_FrameIndex: {
828 int FrameIndex = getIndex();
829 bool IsFixed = false;
830 const MachineFrameInfo *MFI = nullptr;
831 if (const MachineFunction *MF = getMFIfAvailable(*this))
832 MFI = &MF->getFrameInfo();
833 printFrameIndex(OS, FrameIndex, IsFixed, MFI);
834 break;
835 }
836 case MachineOperand::MO_ConstantPoolIndex:
837 OS << "%const." << getIndex();
838 printOperandOffset(OS, getOffset());
839 break;
840 case MachineOperand::MO_TargetIndex: {
841 OS << "target-index(";
842 const char *Name = "<unknown>";
843 if (const MachineFunction *MF = getMFIfAvailable(*this))
844 if (const auto *TargetIndexName = ::getTargetIndexName(*MF, getIndex()))
845 Name = TargetIndexName;
846 OS << Name << ')';
847 printOperandOffset(OS, getOffset());
848 break;
849 }
850 case MachineOperand::MO_JumpTableIndex:
851 OS << printJumpTableEntryReference(getIndex());
852 break;
853 case MachineOperand::MO_GlobalAddress:
854 getGlobal()->printAsOperand(OS, /*PrintType=*/false, MST);
855 printOperandOffset(OS, getOffset());
856 break;
857 case MachineOperand::MO_ExternalSymbol: {
858 StringRef Name = getSymbolName();
859 OS << '&';
860 if (Name.empty()) {
861 OS << "\"\"";
862 } else {
863 printLLVMNameWithoutPrefix(OS, Name);
864 }
865 printOperandOffset(OS, getOffset());
866 break;
867 }
868 case MachineOperand::MO_BlockAddress: {
869 OS << "blockaddress(";
870 getBlockAddress()->getFunction()->printAsOperand(OS, /*PrintType=*/false,
871 MST);
872 OS << ", ";
873 printIRBlockReference(OS, *getBlockAddress()->getBasicBlock(), MST);
874 OS << ')';
875 MachineOperand::printOperandOffset(OS, getOffset());
876 break;
877 }
878 case MachineOperand::MO_RegisterMask: {
879 OS << "<regmask";
880 if (TRI) {
881 unsigned NumRegsInMask = 0;
882 unsigned NumRegsEmitted = 0;
883 for (unsigned i = 0; i < TRI->getNumRegs(); ++i) {
884 unsigned MaskWord = i / 32;
885 unsigned MaskBit = i % 32;
886 if (getRegMask()[MaskWord] & (1 << MaskBit)) {
887 if (PrintRegMaskNumRegs < 0 ||
888 NumRegsEmitted <= static_cast<unsigned>(PrintRegMaskNumRegs)) {
889 OS << " " << printReg(i, TRI);
890 NumRegsEmitted++;
891 }
892 NumRegsInMask++;
893 }
894 }
895 if (NumRegsEmitted != NumRegsInMask)
896 OS << " and " << (NumRegsInMask - NumRegsEmitted) << " more...";
897 } else {
898 OS << " ...";
899 }
900 OS << ">";
901 break;
902 }
903 case MachineOperand::MO_RegisterLiveOut: {
904 const uint32_t *RegMask = getRegLiveOut();
905 OS << "liveout(";
906 if (!TRI) {
907 OS << "<unknown>";
908 } else {
909 bool IsCommaNeeded = false;
910 for (unsigned Reg = 0, E = TRI->getNumRegs(); Reg < E; ++Reg) {
911 if (RegMask[Reg / 32] & (1U << (Reg % 32))) {
912 if (IsCommaNeeded)
913 OS << ", ";
914 OS << printReg(Reg, TRI);
915 IsCommaNeeded = true;
916 }
917 }
918 }
919 OS << ")";
920 break;
921 }
922 case MachineOperand::MO_Metadata:
923 getMetadata()->printAsOperand(OS, MST);
924 break;
925 case MachineOperand::MO_MCSymbol:
926 printSymbol(OS, *getMCSymbol());
927 break;
928 case MachineOperand::MO_CFIIndex: {
929 if (const MachineFunction *MF = getMFIfAvailable(*this))
930 printCFI(OS, MF->getFrameInstructions()[getCFIIndex()], TRI);
931 else
932 OS << "<cfi directive>";
933 break;
934 }
935 case MachineOperand::MO_IntrinsicID: {
936 Intrinsic::ID ID = getIntrinsicID();
937 if (ID < Intrinsic::num_intrinsics)
938 OS << "intrinsic(@" << Intrinsic::getBaseName(ID) << ')';
939 else if (IntrinsicInfo)
940 OS << "intrinsic(@" << IntrinsicInfo->getName(ID) << ')';
941 else
942 OS << "intrinsic(" << ID << ')';
943 break;
944 }
945 case MachineOperand::MO_Predicate: {
946 auto Pred = static_cast<CmpInst::Predicate>(getPredicate());
947 OS << (CmpInst::isIntPredicate(Pred) ? "int" : "float") << "pred("
948 << CmpInst::getPredicateName(Pred) << ')';
949 break;
950 }
951 case MachineOperand::MO_ShuffleMask:
952 OS << "shufflemask(";
953 ArrayRef<int> Mask = getShuffleMask();
954 StringRef Separator;
955 for (int Elt : Mask) {
956 if (Elt == -1)
957 OS << Separator << "undef";
958 else
959 OS << Separator << Elt;
960 Separator = ", ";
961 }
962
963 OS << ')';
964 break;
965 }
966}
967
968#if !defined(NDEBUG1) || defined(LLVM_ENABLE_DUMP)
969LLVM_DUMP_METHOD__attribute__((noinline)) void MachineOperand::dump() const { dbgs() << *this << '\n'; }
970#endif
971
972//===----------------------------------------------------------------------===//
973// MachineMemOperand Implementation
974//===----------------------------------------------------------------------===//
975
976/// getAddrSpace - Return the LLVM IR address space number that this pointer
977/// points into.
978unsigned MachinePointerInfo::getAddrSpace() const { return AddrSpace; }
979
980/// isDereferenceable - Return true if V is always dereferenceable for
981/// Offset + Size byte.
982bool MachinePointerInfo::isDereferenceable(unsigned Size, LLVMContext &C,
983 const DataLayout &DL) const {
984 if (!V.is<const Value *>())
985 return false;
986
987 const Value *BasePtr = V.get<const Value *>();
988 if (BasePtr == nullptr)
989 return false;
990
991 return isDereferenceableAndAlignedPointer(
992 BasePtr, Align(1), APInt(DL.getPointerSizeInBits(), Offset + Size), DL);
993}
994
995/// getConstantPool - Return a MachinePointerInfo record that refers to the
996/// constant pool.
997MachinePointerInfo MachinePointerInfo::getConstantPool(MachineFunction &MF) {
998 return MachinePointerInfo(MF.getPSVManager().getConstantPool());
999}
1000
1001/// getFixedStack - Return a MachinePointerInfo record that refers to the
1002/// the specified FrameIndex.
1003MachinePointerInfo MachinePointerInfo::getFixedStack(MachineFunction &MF,
1004 int FI, int64_t Offset) {
1005 return MachinePointerInfo(MF.getPSVManager().getFixedStack(FI), Offset);
1006}
1007
1008MachinePointerInfo MachinePointerInfo::getJumpTable(MachineFunction &MF) {
1009 return MachinePointerInfo(MF.getPSVManager().getJumpTable());
1010}
1011
1012MachinePointerInfo MachinePointerInfo::getGOT(MachineFunction &MF) {
1013 return MachinePointerInfo(MF.getPSVManager().getGOT());
1014}
1015
1016MachinePointerInfo MachinePointerInfo::getStack(MachineFunction &MF,
1017 int64_t Offset, uint8_t ID) {
1018 return MachinePointerInfo(MF.getPSVManager().getStack(), Offset, ID);
1019}
1020
1021MachinePointerInfo MachinePointerInfo::getUnknownStack(MachineFunction &MF) {
1022 return MachinePointerInfo(MF.getDataLayout().getAllocaAddrSpace());
1023}
1024
1025MachineMemOperand::MachineMemOperand(MachinePointerInfo ptrinfo, Flags f,
1026 LLT type, Align a, const AAMDNodes &AAInfo,
1027 const MDNode *Ranges, SyncScope::ID SSID,
1028 AtomicOrdering Ordering,
1029 AtomicOrdering FailureOrdering)
1030 : PtrInfo(ptrinfo), MemoryType(type), FlagVals(f), BaseAlign(a),
1031 AAInfo(AAInfo), Ranges(Ranges) {
1032 assert((PtrInfo.V.isNull() || PtrInfo.V.is<const PseudoSourceValue *>() ||((void)0)
1033 isa<PointerType>(PtrInfo.V.get<const Value *>()->getType())) &&((void)0)
1034 "invalid pointer value")((void)0);
1035 assert((isLoad() || isStore()) && "Not a load/store!")((void)0);
1036
1037 AtomicInfo.SSID = static_cast<unsigned>(SSID);
1038 assert(getSyncScopeID() == SSID && "Value truncated")((void)0);
1039 AtomicInfo.Ordering = static_cast<unsigned>(Ordering);
1040 assert(getSuccessOrdering() == Ordering && "Value truncated")((void)0);
1041 AtomicInfo.FailureOrdering = static_cast<unsigned>(FailureOrdering);
1042 assert(getFailureOrdering() == FailureOrdering && "Value truncated")((void)0);
1043}
1044
1045MachineMemOperand::MachineMemOperand(MachinePointerInfo ptrinfo, Flags f,
1046 uint64_t s, Align a,
1047 const AAMDNodes &AAInfo,
1048 const MDNode *Ranges, SyncScope::ID SSID,
1049 AtomicOrdering Ordering,
1050 AtomicOrdering FailureOrdering)
1051 : MachineMemOperand(ptrinfo, f,
1052 s == ~UINT64_C(0)0ULL ? LLT() : LLT::scalar(8 * s), a,
1053 AAInfo, Ranges, SSID, Ordering, FailureOrdering) {}
1054
1055/// Profile - Gather unique data for the object.
1056///
1057void MachineMemOperand::Profile(FoldingSetNodeID &ID) const {
1058 ID.AddInteger(getOffset());
1059 ID.AddInteger(getMemoryType().getUniqueRAWLLTData());
1060 ID.AddPointer(getOpaqueValue());
1061 ID.AddInteger(getFlags());
1062 ID.AddInteger(getBaseAlign().value());
1063}
1064
1065void MachineMemOperand::refineAlignment(const MachineMemOperand *MMO) {
1066 // The Value and Offset may differ due to CSE. But the flags and size
1067 // should be the same.
1068 assert(MMO->getFlags() == getFlags() && "Flags mismatch!")((void)0);
1069 assert(MMO->getSize() == getSize() && "Size mismatch!")((void)0);
1070
1071 if (MMO->getBaseAlign() >= getBaseAlign()) {
1072 // Update the alignment value.
1073 BaseAlign = MMO->getBaseAlign();
1074 // Also update the base and offset, because the new alignment may
1075 // not be applicable with the old ones.
1076 PtrInfo = MMO->PtrInfo;
1077 }
1078}
1079
1080/// getAlign - Return the minimum known alignment in bytes of the
1081/// actual memory reference.
1082Align MachineMemOperand::getAlign() const {
1083 return commonAlignment(getBaseAlign(), getOffset());
28
Calling 'commonAlignment'
35
Returning from 'commonAlignment'
1084}
1085
1086void MachineMemOperand::print(raw_ostream &OS, ModuleSlotTracker &MST,
1087 SmallVectorImpl<StringRef> &SSNs,
1088 const LLVMContext &Context,
1089 const MachineFrameInfo *MFI,
1090 const TargetInstrInfo *TII) const {
1091 OS << '(';
1092 if (isVolatile())
1
Assuming the condition is false
2
Taking false branch
1093 OS << "volatile ";
1094 if (isNonTemporal())
3
Assuming the condition is false
4
Taking false branch
1095 OS << "non-temporal ";
1096 if (isDereferenceable())
5
Assuming the condition is false
6
Taking false branch
1097 OS << "dereferenceable ";
1098 if (isInvariant())
7
Assuming the condition is false
8
Taking false branch
1099 OS << "invariant ";
1100 if (getFlags() & MachineMemOperand::MOTargetFlag1)
9
Assuming the condition is false
10
Taking false branch
1101 OS << '"' << getTargetMMOFlagName(*TII, MachineMemOperand::MOTargetFlag1)
1102 << "\" ";
1103 if (getFlags() & MachineMemOperand::MOTargetFlag2)
11
Assuming the condition is false
12
Taking false branch
1104 OS << '"' << getTargetMMOFlagName(*TII, MachineMemOperand::MOTargetFlag2)
1105 << "\" ";
1106 if (getFlags() & MachineMemOperand::MOTargetFlag3)
13
Assuming the condition is false
14
Taking false branch
1107 OS << '"' << getTargetMMOFlagName(*TII, MachineMemOperand::MOTargetFlag3)
1108 << "\" ";
1109
1110 assert((isLoad() || isStore()) &&((void)0)
1111 "machine memory operand must be a load or store (or both)")((void)0);
1112 if (isLoad())
15
Assuming the condition is false
16
Taking false branch
1113 OS << "load ";
1114 if (isStore())
17
Assuming the condition is false
18
Taking false branch
1115 OS << "store ";
1116
1117 printSyncScope(OS, Context, getSyncScopeID(), SSNs);
1118
1119 if (getSuccessOrdering() != AtomicOrdering::NotAtomic)
19
Assuming the condition is false
20
Taking false branch
1120 OS << toIRString(getSuccessOrdering()) << ' ';
1121 if (getFailureOrdering() != AtomicOrdering::NotAtomic)
21
Assuming the condition is false
22
Taking false branch
1122 OS << toIRString(getFailureOrdering()) << ' ';
1123
1124 if (getMemoryType().isValid())
23
Taking false branch
1125 OS << '(' << getMemoryType() << ')';
1126 else
1127 OS << "unknown-size";
1128
1129 if (const Value *Val
23.1
'Val' is null
23.1
'Val' is null
23.1
'Val' is null
= getValue()) {
24
Taking false branch
1130 OS << ((isLoad() && isStore()) ? " on " : isLoad() ? " from " : " into ");
1131 MIRFormatter::printIRValue(OS, *Val, MST);
1132 } else if (const PseudoSourceValue *PVal
24.1
'PVal' is null
24.1
'PVal' is null
24.1
'PVal' is null
= getPseudoValue()) {
25
Taking false branch
1133 OS << ((isLoad() && isStore()) ? " on " : isLoad() ? " from " : " into ");
1134 assert(PVal && "Expected a pseudo source value")((void)0);
1135 switch (PVal->kind()) {
1136 case PseudoSourceValue::Stack:
1137 OS << "stack";
1138 break;
1139 case PseudoSourceValue::GOT:
1140 OS << "got";
1141 break;
1142 case PseudoSourceValue::JumpTable:
1143 OS << "jump-table";
1144 break;
1145 case PseudoSourceValue::ConstantPool:
1146 OS << "constant-pool";
1147 break;
1148 case PseudoSourceValue::FixedStack: {
1149 int FrameIndex = cast<FixedStackPseudoSourceValue>(PVal)->getFrameIndex();
1150 bool IsFixed = true;
1151 printFrameIndex(OS, FrameIndex, IsFixed, MFI);
1152 break;
1153 }
1154 case PseudoSourceValue::GlobalValueCallEntry:
1155 OS << "call-entry ";
1156 cast<GlobalValuePseudoSourceValue>(PVal)->getValue()->printAsOperand(
1157 OS, /*PrintType=*/false, MST);
1158 break;
1159 case PseudoSourceValue::ExternalSymbolCallEntry:
1160 OS << "call-entry &";
1161 printLLVMNameWithoutPrefix(
1162 OS, cast<ExternalSymbolPseudoSourceValue>(PVal)->getSymbol());
1163 break;
1164 default: {
1165 const MIRFormatter *Formatter = TII->getMIRFormatter();
1166 // FIXME: This is not necessarily the correct MIR serialization format for
1167 // a custom pseudo source value, but at least it allows
1168 // MIR printing to work on a target with custom pseudo source
1169 // values.
1170 OS << "custom \"";
1171 Formatter->printCustomPseudoSourceValue(OS, MST, *PVal);
1172 OS << '\"';
1173 break;
1174 }
1175 }
1176 } else if (getOpaqueValue() == nullptr && getOffset() != 0) {
26
Assuming the condition is false
1177 OS << ((isLoad() && isStore()) ? " on "
1178 : isLoad() ? " from "
1179 : " into ")
1180 << "unknown-address";
1181 }
1182 MachineOperand::printOperandOffset(OS, getOffset());
1183 if (getSize() > 0 && getAlign() != getSize())
27
Calling 'MachineMemOperand::getAlign'
36
Returning from 'MachineMemOperand::getAlign'
37
Calling 'operator!='
1184 OS << ", align " << getAlign().value();
1185 if (getAlign() != getBaseAlign())
1186 OS << ", basealign " << getBaseAlign().value();
1187 auto AAInfo = getAAInfo();
1188 if (AAInfo.TBAA) {
1189 OS << ", !tbaa ";
1190 AAInfo.TBAA->printAsOperand(OS, MST);
1191 }
1192 if (AAInfo.Scope) {
1193 OS << ", !alias.scope ";
1194 AAInfo.Scope->printAsOperand(OS, MST);
1195 }
1196 if (AAInfo.NoAlias) {
1197 OS << ", !noalias ";
1198 AAInfo.NoAlias->printAsOperand(OS, MST);
1199 }
1200 if (getRanges()) {
1201 OS << ", !range ";
1202 getRanges()->printAsOperand(OS, MST);
1203 }
1204 // FIXME: Implement addrspace printing/parsing in MIR.
1205 // For now, print this even though parsing it is not available in MIR.
1206 if (unsigned AS = getAddrSpace())
1207 OS << ", addrspace " << AS;
1208
1209 OS << ')';
1210}

/usr/src/gnu/usr.bin/clang/libLLVM/../../../llvm/llvm/include/llvm/Support/Alignment.h

1//===-- llvm/Support/Alignment.h - Useful alignment functions ---*- C++ -*-===//
2//
3// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
4// See https://llvm.org/LICENSE.txt for license information.
5// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
6//
7//===----------------------------------------------------------------------===//
8//
9// This file contains types to represent alignments.
10// They are instrumented to guarantee some invariants are preserved and prevent
11// invalid manipulations.
12//
13// - Align represents an alignment in bytes, it is always set and always a valid
14// power of two, its minimum value is 1 which means no alignment requirements.
15//
16// - MaybeAlign is an optional type, it may be undefined or set. When it's set
17// you can get the underlying Align type by using the getValue() method.
18//
19//===----------------------------------------------------------------------===//
20
21#ifndef LLVM_SUPPORT_ALIGNMENT_H_
22#define LLVM_SUPPORT_ALIGNMENT_H_
23
24#include "llvm/ADT/Optional.h"
25#include "llvm/Support/MathExtras.h"
26#include <cassert>
27#ifndef NDEBUG1
28#include <string>
29#endif // NDEBUG
30
31namespace llvm {
32
33#define ALIGN_CHECK_ISPOSITIVE(decl) \
34 assert(decl > 0 && (#decl " should be defined"))((void)0)
35
36/// This struct is a compact representation of a valid (non-zero power of two)
37/// alignment.
38/// It is suitable for use as static global constants.
39struct Align {
40private:
41 uint8_t ShiftValue = 0; /// The log2 of the required alignment.
42 /// ShiftValue is less than 64 by construction.
43
44 friend struct MaybeAlign;
45 friend unsigned Log2(Align);
46 friend bool operator==(Align Lhs, Align Rhs);
47 friend bool operator!=(Align Lhs, Align Rhs);
48 friend bool operator<=(Align Lhs, Align Rhs);
49 friend bool operator>=(Align Lhs, Align Rhs);
50 friend bool operator<(Align Lhs, Align Rhs);
51 friend bool operator>(Align Lhs, Align Rhs);
52 friend unsigned encode(struct MaybeAlign A);
53 friend struct MaybeAlign decodeMaybeAlign(unsigned Value);
54
55 /// A trivial type to allow construction of constexpr Align.
56 /// This is currently needed to workaround a bug in GCC 5.3 which prevents
57 /// definition of constexpr assign operators.
58 /// https://stackoverflow.com/questions/46756288/explicitly-defaulted-function-cannot-be-declared-as-constexpr-because-the-implic
59 /// FIXME: Remove this, make all assign operators constexpr and introduce user
60 /// defined literals when we don't have to support GCC 5.3 anymore.
61 /// https://llvm.org/docs/GettingStarted.html#getting-a-modern-host-c-toolchain
62 struct LogValue {
63 uint8_t Log;
64 };
65
66public:
67 /// Default is byte-aligned.
68 constexpr Align() = default;
69 /// Do not perform checks in case of copy/move construct/assign, because the
70 /// checks have been performed when building `Other`.
71 constexpr Align(const Align &Other) = default;
72 constexpr Align(Align &&Other) = default;
73 Align &operator=(const Align &Other) = default;
74 Align &operator=(Align &&Other) = default;
75
76 explicit Align(uint64_t Value) {
77 assert(Value > 0 && "Value must not be 0")((void)0);
78 assert(llvm::isPowerOf2_64(Value) && "Alignment is not a power of 2")((void)0);
79 ShiftValue = Log2_64(Value);
30
Calling 'Log2_64'
32
Returning from 'Log2_64'
33
The value 255 is assigned to 'Lhs.ShiftValue'
80 assert(ShiftValue < 64 && "Broken invariant")((void)0);
81 }
82
83 /// This is a hole in the type system and should not be abused.
84 /// Needed to interact with C for instance.
85 uint64_t value() const { return uint64_t(1) << ShiftValue; }
39
The result of the left shift is undefined due to shifting by '255', which is greater or equal to the width of type 'uint64_t'
86
87 /// Allow constructions of constexpr Align.
88 template <size_t kValue> constexpr static LogValue Constant() {
89 return LogValue{static_cast<uint8_t>(CTLog2<kValue>())};
90 }
91
92 /// Allow constructions of constexpr Align from types.
93 /// Compile time equivalent to Align(alignof(T)).
94 template <typename T> constexpr static LogValue Of() {
95 return Constant<std::alignment_of<T>::value>();
96 }
97
98 /// Constexpr constructor from LogValue type.
99 constexpr Align(LogValue CA) : ShiftValue(CA.Log) {}
100};
101
102/// Treats the value 0 as a 1, so Align is always at least 1.
103inline Align assumeAligned(uint64_t Value) {
104 return Value ? Align(Value) : Align();
105}
106
107/// This struct is a compact representation of a valid (power of two) or
108/// undefined (0) alignment.
109struct MaybeAlign : public llvm::Optional<Align> {
110private:
111 using UP = llvm::Optional<Align>;
112
113public:
114 /// Default is undefined.
115 MaybeAlign() = default;
116 /// Do not perform checks in case of copy/move construct/assign, because the
117 /// checks have been performed when building `Other`.
118 MaybeAlign(const MaybeAlign &Other) = default;
119 MaybeAlign &operator=(const MaybeAlign &Other) = default;
120 MaybeAlign(MaybeAlign &&Other) = default;
121 MaybeAlign &operator=(MaybeAlign &&Other) = default;
122
123 /// Use llvm::Optional<Align> constructor.
124 using UP::UP;
125
126 explicit MaybeAlign(uint64_t Value) {
127 assert((Value == 0 || llvm::isPowerOf2_64(Value)) &&((void)0)
128 "Alignment is neither 0 nor a power of 2")((void)0);
129 if (Value)
130 emplace(Value);
131 }
132
133 /// For convenience, returns a valid alignment or 1 if undefined.
134 Align valueOrOne() const { return hasValue() ? getValue() : Align(); }
135};
136
137/// Checks that SizeInBytes is a multiple of the alignment.
138inline bool isAligned(Align Lhs, uint64_t SizeInBytes) {
139 return SizeInBytes % Lhs.value() == 0;
140}
141
142/// Checks that Addr is a multiple of the alignment.
143inline bool isAddrAligned(Align Lhs, const void *Addr) {
144 return isAligned(Lhs, reinterpret_cast<uintptr_t>(Addr));
145}
146
147/// Returns a multiple of A needed to store `Size` bytes.
148inline uint64_t alignTo(uint64_t Size, Align A) {
149 const uint64_t Value = A.value();
150 // The following line is equivalent to `(Size + Value - 1) / Value * Value`.
151
152 // The division followed by a multiplication can be thought of as a right
153 // shift followed by a left shift which zeros out the extra bits produced in
154 // the bump; `~(Value - 1)` is a mask where all those bits being zeroed out
155 // are just zero.
156
157 // Most compilers can generate this code but the pattern may be missed when
158 // multiple functions gets inlined.
159 return (Size + Value - 1) & ~(Value - 1U);
160}
161
162/// If non-zero \p Skew is specified, the return value will be a minimal integer
163/// that is greater than or equal to \p Size and equal to \p A * N + \p Skew for
164/// some integer N. If \p Skew is larger than \p A, its value is adjusted to '\p
165/// Skew mod \p A'.
166///
167/// Examples:
168/// \code
169/// alignTo(5, Align(8), 7) = 7
170/// alignTo(17, Align(8), 1) = 17
171/// alignTo(~0LL, Align(8), 3) = 3
172/// \endcode
173inline uint64_t alignTo(uint64_t Size, Align A, uint64_t Skew) {
174 const uint64_t Value = A.value();
175 Skew %= Value;
176 return ((Size + Value - 1 - Skew) & ~(Value - 1U)) + Skew;
177}
178
179/// Returns a multiple of A needed to store `Size` bytes.
180/// Returns `Size` if current alignment is undefined.
181inline uint64_t alignTo(uint64_t Size, MaybeAlign A) {
182 return A ? alignTo(Size, A.getValue()) : Size;
183}
184
185/// Aligns `Addr` to `Alignment` bytes, rounding up.
186inline uintptr_t alignAddr(const void *Addr, Align Alignment) {
187 uintptr_t ArithAddr = reinterpret_cast<uintptr_t>(Addr);
188 assert(static_cast<uintptr_t>(ArithAddr + Alignment.value() - 1) >=((void)0)
189 ArithAddr &&((void)0)
190 "Overflow")((void)0);
191 return alignTo(ArithAddr, Alignment);
192}
193
194/// Returns the offset to the next integer (mod 2**64) that is greater than
195/// or equal to \p Value and is a multiple of \p Align.
196inline uint64_t offsetToAlignment(uint64_t Value, Align Alignment) {
197 return alignTo(Value, Alignment) - Value;
198}
199
200/// Returns the necessary adjustment for aligning `Addr` to `Alignment`
201/// bytes, rounding up.
202inline uint64_t offsetToAlignedAddr(const void *Addr, Align Alignment) {
203 return offsetToAlignment(reinterpret_cast<uintptr_t>(Addr), Alignment);
204}
205
206/// Returns the log2 of the alignment.
207inline unsigned Log2(Align A) { return A.ShiftValue; }
208
209/// Returns the alignment that satisfies both alignments.
210/// Same semantic as MinAlign.
211inline Align commonAlignment(Align A, Align B) { return std::min(A, B); }
212
213/// Returns the alignment that satisfies both alignments.
214/// Same semantic as MinAlign.
215inline Align commonAlignment(Align A, uint64_t Offset) {
216 return Align(MinAlign(A.value(), Offset));
29
Calling constructor for 'Align'
34
Returning from constructor for 'Align'
217}
218
219/// Returns the alignment that satisfies both alignments.
220/// Same semantic as MinAlign.
221inline MaybeAlign commonAlignment(MaybeAlign A, MaybeAlign B) {
222 return A && B ? commonAlignment(*A, *B) : A ? A : B;
223}
224
225/// Returns the alignment that satisfies both alignments.
226/// Same semantic as MinAlign.
227inline MaybeAlign commonAlignment(MaybeAlign A, uint64_t Offset) {
228 return MaybeAlign(MinAlign((*A).value(), Offset));
229}
230
231/// Returns a representation of the alignment that encodes undefined as 0.
232inline unsigned encode(MaybeAlign A) { return A ? A->ShiftValue + 1 : 0; }
233
234/// Dual operation of the encode function above.
235inline MaybeAlign decodeMaybeAlign(unsigned Value) {
236 if (Value == 0)
237 return MaybeAlign();
238 Align Out;
239 Out.ShiftValue = Value - 1;
240 return Out;
241}
242
243/// Returns a representation of the alignment, the encoded value is positive by
244/// definition.
245inline unsigned encode(Align A) { return encode(MaybeAlign(A)); }
246
247/// Comparisons between Align and scalars. Rhs must be positive.
248inline bool operator==(Align Lhs, uint64_t Rhs) {
249 ALIGN_CHECK_ISPOSITIVE(Rhs);
250 return Lhs.value() == Rhs;
251}
252inline bool operator!=(Align Lhs, uint64_t Rhs) {
253 ALIGN_CHECK_ISPOSITIVE(Rhs);
254 return Lhs.value() != Rhs;
38
Calling 'Align::value'
255}
256inline bool operator<=(Align Lhs, uint64_t Rhs) {
257 ALIGN_CHECK_ISPOSITIVE(Rhs);
258 return Lhs.value() <= Rhs;
259}
260inline bool operator>=(Align Lhs, uint64_t Rhs) {
261 ALIGN_CHECK_ISPOSITIVE(Rhs);
262 return Lhs.value() >= Rhs;
263}
264inline bool operator<(Align Lhs, uint64_t Rhs) {
265 ALIGN_CHECK_ISPOSITIVE(Rhs);
266 return Lhs.value() < Rhs;
267}
268inline bool operator>(Align Lhs, uint64_t Rhs) {
269 ALIGN_CHECK_ISPOSITIVE(Rhs);
270 return Lhs.value() > Rhs;
271}
272
273/// Comparisons between MaybeAlign and scalars.
274inline bool operator==(MaybeAlign Lhs, uint64_t Rhs) {
275 return Lhs ? (*Lhs).value() == Rhs : Rhs == 0;
276}
277inline bool operator!=(MaybeAlign Lhs, uint64_t Rhs) {
278 return Lhs ? (*Lhs).value() != Rhs : Rhs != 0;
279}
280
281/// Comparisons operators between Align.
282inline bool operator==(Align Lhs, Align Rhs) {
283 return Lhs.ShiftValue == Rhs.ShiftValue;
284}
285inline bool operator!=(Align Lhs, Align Rhs) {
286 return Lhs.ShiftValue != Rhs.ShiftValue;
287}
288inline bool operator<=(Align Lhs, Align Rhs) {
289 return Lhs.ShiftValue <= Rhs.ShiftValue;
290}
291inline bool operator>=(Align Lhs, Align Rhs) {
292 return Lhs.ShiftValue >= Rhs.ShiftValue;
293}
294inline bool operator<(Align Lhs, Align Rhs) {
295 return Lhs.ShiftValue < Rhs.ShiftValue;
296}
297inline bool operator>(Align Lhs, Align Rhs) {
298 return Lhs.ShiftValue > Rhs.ShiftValue;
299}
300
301// Don't allow relational comparisons with MaybeAlign.
302bool operator<=(Align Lhs, MaybeAlign Rhs) = delete;
303bool operator>=(Align Lhs, MaybeAlign Rhs) = delete;
304bool operator<(Align Lhs, MaybeAlign Rhs) = delete;
305bool operator>(Align Lhs, MaybeAlign Rhs) = delete;
306
307bool operator<=(MaybeAlign Lhs, Align Rhs) = delete;
308bool operator>=(MaybeAlign Lhs, Align Rhs) = delete;
309bool operator<(MaybeAlign Lhs, Align Rhs) = delete;
310bool operator>(MaybeAlign Lhs, Align Rhs) = delete;
311
312bool operator<=(MaybeAlign Lhs, MaybeAlign Rhs) = delete;
313bool operator>=(MaybeAlign Lhs, MaybeAlign Rhs) = delete;
314bool operator<(MaybeAlign Lhs, MaybeAlign Rhs) = delete;
315bool operator>(MaybeAlign Lhs, MaybeAlign Rhs) = delete;
316
317inline Align operator*(Align Lhs, uint64_t Rhs) {
318 assert(Rhs > 0 && "Rhs must be positive")((void)0);
319 return Align(Lhs.value() * Rhs);
320}
321
322inline MaybeAlign operator*(MaybeAlign Lhs, uint64_t Rhs) {
323 assert(Rhs > 0 && "Rhs must be positive")((void)0);
324 return Lhs ? Lhs.getValue() * Rhs : MaybeAlign();
325}
326
327inline Align operator/(Align Lhs, uint64_t Divisor) {
328 assert(llvm::isPowerOf2_64(Divisor) &&((void)0)
329 "Divisor must be positive and a power of 2")((void)0);
330 assert(Lhs != 1 && "Can't halve byte alignment")((void)0);
331 return Align(Lhs.value() / Divisor);
332}
333
334inline MaybeAlign operator/(MaybeAlign Lhs, uint64_t Divisor) {
335 assert(llvm::isPowerOf2_64(Divisor) &&((void)0)
336 "Divisor must be positive and a power of 2")((void)0);
337 return Lhs ? Lhs.getValue() / Divisor : MaybeAlign();
338}
339
340inline Align max(MaybeAlign Lhs, Align Rhs) {
341 return Lhs && *Lhs > Rhs ? *Lhs : Rhs;
342}
343
344inline Align max(Align Lhs, MaybeAlign Rhs) {
345 return Rhs && *Rhs > Lhs ? *Rhs : Lhs;
346}
347
348#ifndef NDEBUG1
349// For usage in LLVM_DEBUG macros.
350inline std::string DebugStr(const Align &A) {
351 return std::to_string(A.value());
352}
353// For usage in LLVM_DEBUG macros.
354inline std::string DebugStr(const MaybeAlign &MA) {
355 if (MA)
356 return std::to_string(MA->value());
357 return "None";
358}
359#endif // NDEBUG
360
361#undef ALIGN_CHECK_ISPOSITIVE
362
363} // namespace llvm
364
365#endif // LLVM_SUPPORT_ALIGNMENT_H_

/usr/src/gnu/usr.bin/clang/libLLVM/../../../llvm/llvm/include/llvm/Support/MathExtras.h

1//===-- llvm/Support/MathExtras.h - Useful math functions -------*- C++ -*-===//
2//
3// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
4// See https://llvm.org/LICENSE.txt for license information.
5// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
6//
7//===----------------------------------------------------------------------===//
8//
9// This file contains some functions that are useful for math stuff.
10//
11//===----------------------------------------------------------------------===//
12
13#ifndef LLVM_SUPPORT_MATHEXTRAS_H
14#define LLVM_SUPPORT_MATHEXTRAS_H
15
16#include "llvm/Support/Compiler.h"
17#include <cassert>
18#include <climits>
19#include <cmath>
20#include <cstdint>
21#include <cstring>
22#include <limits>
23#include <type_traits>
24
25#ifdef __ANDROID_NDK__
26#include <android/api-level.h>
27#endif
28
29#ifdef _MSC_VER
30// Declare these intrinsics manually rather including intrin.h. It's very
31// expensive, and MathExtras.h is popular.
32// #include <intrin.h>
33extern "C" {
34unsigned char _BitScanForward(unsigned long *_Index, unsigned long _Mask);
35unsigned char _BitScanForward64(unsigned long *_Index, unsigned __int64 _Mask);
36unsigned char _BitScanReverse(unsigned long *_Index, unsigned long _Mask);
37unsigned char _BitScanReverse64(unsigned long *_Index, unsigned __int64 _Mask);
38}
39#endif
40
41namespace llvm {
42
43/// The behavior an operation has on an input of 0.
44enum ZeroBehavior {
45 /// The returned value is undefined.
46 ZB_Undefined,
47 /// The returned value is numeric_limits<T>::max()
48 ZB_Max,
49 /// The returned value is numeric_limits<T>::digits
50 ZB_Width
51};
52
53/// Mathematical constants.
54namespace numbers {
55// TODO: Track C++20 std::numbers.
56// TODO: Favor using the hexadecimal FP constants (requires C++17).
57constexpr double e = 2.7182818284590452354, // (0x1.5bf0a8b145749P+1) https://oeis.org/A001113
58 egamma = .57721566490153286061, // (0x1.2788cfc6fb619P-1) https://oeis.org/A001620
59 ln2 = .69314718055994530942, // (0x1.62e42fefa39efP-1) https://oeis.org/A002162
60 ln10 = 2.3025850929940456840, // (0x1.24bb1bbb55516P+1) https://oeis.org/A002392
61 log2e = 1.4426950408889634074, // (0x1.71547652b82feP+0)
62 log10e = .43429448190325182765, // (0x1.bcb7b1526e50eP-2)
63 pi = 3.1415926535897932385, // (0x1.921fb54442d18P+1) https://oeis.org/A000796
64 inv_pi = .31830988618379067154, // (0x1.45f306bc9c883P-2) https://oeis.org/A049541
65 sqrtpi = 1.7724538509055160273, // (0x1.c5bf891b4ef6bP+0) https://oeis.org/A002161
66 inv_sqrtpi = .56418958354775628695, // (0x1.20dd750429b6dP-1) https://oeis.org/A087197
67 sqrt2 = 1.4142135623730950488, // (0x1.6a09e667f3bcdP+0) https://oeis.org/A00219
68 inv_sqrt2 = .70710678118654752440, // (0x1.6a09e667f3bcdP-1)
69 sqrt3 = 1.7320508075688772935, // (0x1.bb67ae8584caaP+0) https://oeis.org/A002194
70 inv_sqrt3 = .57735026918962576451, // (0x1.279a74590331cP-1)
71 phi = 1.6180339887498948482; // (0x1.9e3779b97f4a8P+0) https://oeis.org/A001622
72constexpr float ef = 2.71828183F, // (0x1.5bf0a8P+1) https://oeis.org/A001113
73 egammaf = .577215665F, // (0x1.2788d0P-1) https://oeis.org/A001620
74 ln2f = .693147181F, // (0x1.62e430P-1) https://oeis.org/A002162
75 ln10f = 2.30258509F, // (0x1.26bb1cP+1) https://oeis.org/A002392
76 log2ef = 1.44269504F, // (0x1.715476P+0)
77 log10ef = .434294482F, // (0x1.bcb7b2P-2)
78 pif = 3.14159265F, // (0x1.921fb6P+1) https://oeis.org/A000796
79 inv_pif = .318309886F, // (0x1.45f306P-2) https://oeis.org/A049541
80 sqrtpif = 1.77245385F, // (0x1.c5bf8aP+0) https://oeis.org/A002161
81 inv_sqrtpif = .564189584F, // (0x1.20dd76P-1) https://oeis.org/A087197
82 sqrt2f = 1.41421356F, // (0x1.6a09e6P+0) https://oeis.org/A002193
83 inv_sqrt2f = .707106781F, // (0x1.6a09e6P-1)
84 sqrt3f = 1.73205081F, // (0x1.bb67aeP+0) https://oeis.org/A002194
85 inv_sqrt3f = .577350269F, // (0x1.279a74P-1)
86 phif = 1.61803399F; // (0x1.9e377aP+0) https://oeis.org/A001622
87} // namespace numbers
88
89namespace detail {
90template <typename T, std::size_t SizeOfT> struct TrailingZerosCounter {
91 static unsigned count(T Val, ZeroBehavior) {
92 if (!Val)
93 return std::numeric_limits<T>::digits;
94 if (Val & 0x1)
95 return 0;
96
97 // Bisection method.
98 unsigned ZeroBits = 0;
99 T Shift = std::numeric_limits<T>::digits >> 1;
100 T Mask = std::numeric_limits<T>::max() >> Shift;
101 while (Shift) {
102 if ((Val & Mask) == 0) {
103 Val >>= Shift;
104 ZeroBits |= Shift;
105 }
106 Shift >>= 1;
107 Mask >>= Shift;
108 }
109 return ZeroBits;
110 }
111};
112
113#if defined(__GNUC__4) || defined(_MSC_VER)
114template <typename T> struct TrailingZerosCounter<T, 4> {
115 static unsigned count(T Val, ZeroBehavior ZB) {
116 if (ZB != ZB_Undefined && Val == 0)
117 return 32;
118
119#if __has_builtin(__builtin_ctz)1 || defined(__GNUC__4)
120 return __builtin_ctz(Val);
121#elif defined(_MSC_VER)
122 unsigned long Index;
123 _BitScanForward(&Index, Val);
124 return Index;
125#endif
126 }
127};
128
129#if !defined(_MSC_VER) || defined(_M_X64)
130template <typename T> struct TrailingZerosCounter<T, 8> {
131 static unsigned count(T Val, ZeroBehavior ZB) {
132 if (ZB != ZB_Undefined && Val == 0)
133 return 64;
134
135#if __has_builtin(__builtin_ctzll)1 || defined(__GNUC__4)
136 return __builtin_ctzll(Val);
137#elif defined(_MSC_VER)
138 unsigned long Index;
139 _BitScanForward64(&Index, Val);
140 return Index;
141#endif
142 }
143};
144#endif
145#endif
146} // namespace detail
147
148/// Count number of 0's from the least significant bit to the most
149/// stopping at the first 1.
150///
151/// Only unsigned integral types are allowed.
152///
153/// \param ZB the behavior on an input of 0. Only ZB_Width and ZB_Undefined are
154/// valid arguments.
155template <typename T>
156unsigned countTrailingZeros(T Val, ZeroBehavior ZB = ZB_Width) {
157 static_assert(std::numeric_limits<T>::is_integer &&
158 !std::numeric_limits<T>::is_signed,
159 "Only unsigned integral types are allowed.");
160 return llvm::detail::TrailingZerosCounter<T, sizeof(T)>::count(Val, ZB);
161}
162
163namespace detail {
164template <typename T, std::size_t SizeOfT> struct LeadingZerosCounter {
165 static unsigned count(T Val, ZeroBehavior) {
166 if (!Val)
167 return std::numeric_limits<T>::digits;
168
169 // Bisection method.
170 unsigned ZeroBits = 0;
171 for (T Shift = std::numeric_limits<T>::digits >> 1; Shift; Shift >>= 1) {
172 T Tmp = Val >> Shift;
173 if (Tmp)
174 Val = Tmp;
175 else
176 ZeroBits |= Shift;
177 }
178 return ZeroBits;
179 }
180};
181
182#if defined(__GNUC__4) || defined(_MSC_VER)
183template <typename T> struct LeadingZerosCounter<T, 4> {
184 static unsigned count(T Val, ZeroBehavior ZB) {
185 if (ZB != ZB_Undefined && Val == 0)
186 return 32;
187
188#if __has_builtin(__builtin_clz)1 || defined(__GNUC__4)
189 return __builtin_clz(Val);
190#elif defined(_MSC_VER)
191 unsigned long Index;
192 _BitScanReverse(&Index, Val);
193 return Index ^ 31;
194#endif
195 }
196};
197
198#if !defined(_MSC_VER) || defined(_M_X64)
199template <typename T> struct LeadingZerosCounter<T, 8> {
200 static unsigned count(T Val, ZeroBehavior ZB) {
201 if (ZB != ZB_Undefined && Val == 0)
202 return 64;
203
204#if __has_builtin(__builtin_clzll)1 || defined(__GNUC__4)
205 return __builtin_clzll(Val);
206#elif defined(_MSC_VER)
207 unsigned long Index;
208 _BitScanReverse64(&Index, Val);
209 return Index ^ 63;
210#endif
211 }
212};
213#endif
214#endif
215} // namespace detail
216
217/// Count number of 0's from the most significant bit to the least
218/// stopping at the first 1.
219///
220/// Only unsigned integral types are allowed.
221///
222/// \param ZB the behavior on an input of 0. Only ZB_Width and ZB_Undefined are
223/// valid arguments.
224template <typename T>
225unsigned countLeadingZeros(T Val, ZeroBehavior ZB = ZB_Width) {
226 static_assert(std::numeric_limits<T>::is_integer &&
227 !std::numeric_limits<T>::is_signed,
228 "Only unsigned integral types are allowed.");
229 return llvm::detail::LeadingZerosCounter<T, sizeof(T)>::count(Val, ZB);
230}
231
232/// Get the index of the first set bit starting from the least
233/// significant bit.
234///
235/// Only unsigned integral types are allowed.
236///
237/// \param ZB the behavior on an input of 0. Only ZB_Max and ZB_Undefined are
238/// valid arguments.
239template <typename T> T findFirstSet(T Val, ZeroBehavior ZB = ZB_Max) {
240 if (ZB == ZB_Max && Val == 0)
241 return std::numeric_limits<T>::max();
242
243 return countTrailingZeros(Val, ZB_Undefined);
244}
245
246/// Create a bitmask with the N right-most bits set to 1, and all other
247/// bits set to 0. Only unsigned types are allowed.
248template <typename T> T maskTrailingOnes(unsigned N) {
249 static_assert(std::is_unsigned<T>::value, "Invalid type!");
250 const unsigned Bits = CHAR_BIT8 * sizeof(T);
251 assert(N <= Bits && "Invalid bit index")((void)0);
252 return N == 0 ? 0 : (T(-1) >> (Bits - N));
253}
254
255/// Create a bitmask with the N left-most bits set to 1, and all other
256/// bits set to 0. Only unsigned types are allowed.
257template <typename T> T maskLeadingOnes(unsigned N) {
258 return ~maskTrailingOnes<T>(CHAR_BIT8 * sizeof(T) - N);
259}
260
261/// Create a bitmask with the N right-most bits set to 0, and all other
262/// bits set to 1. Only unsigned types are allowed.
263template <typename T> T maskTrailingZeros(unsigned N) {
264 return maskLeadingOnes<T>(CHAR_BIT8 * sizeof(T) - N);
265}
266
267/// Create a bitmask with the N left-most bits set to 0, and all other
268/// bits set to 1. Only unsigned types are allowed.
269template <typename T> T maskLeadingZeros(unsigned N) {
270 return maskTrailingOnes<T>(CHAR_BIT8 * sizeof(T) - N);
271}
272
273/// Get the index of the last set bit starting from the least
274/// significant bit.
275///
276/// Only unsigned integral types are allowed.
277///
278/// \param ZB the behavior on an input of 0. Only ZB_Max and ZB_Undefined are
279/// valid arguments.
280template <typename T> T findLastSet(T Val, ZeroBehavior ZB = ZB_Max) {
281 if (ZB == ZB_Max && Val == 0)
282 return std::numeric_limits<T>::max();
283
284 // Use ^ instead of - because both gcc and llvm can remove the associated ^
285 // in the __builtin_clz intrinsic on x86.
286 return countLeadingZeros(Val, ZB_Undefined) ^
287 (std::numeric_limits<T>::digits - 1);
288}
289
290/// Macro compressed bit reversal table for 256 bits.
291///
292/// http://graphics.stanford.edu/~seander/bithacks.html#BitReverseTable
293static const unsigned char BitReverseTable256[256] = {
294#define R2(n) n, n + 2 * 64, n + 1 * 64, n + 3 * 64
295#define R4(n) R2(n), R2(n + 2 * 16), R2(n + 1 * 16), R2(n + 3 * 16)
296#define R6(n) R4(n), R4(n + 2 * 4), R4(n + 1 * 4), R4(n + 3 * 4)
297 R6(0), R6(2), R6(1), R6(3)
298#undef R2
299#undef R4
300#undef R6
301};
302
303/// Reverse the bits in \p Val.
304template <typename T>
305T reverseBits(T Val) {
306 unsigned char in[sizeof(Val)];
307 unsigned char out[sizeof(Val)];
308 std::memcpy(in, &Val, sizeof(Val));
309 for (unsigned i = 0; i < sizeof(Val); ++i)
310 out[(sizeof(Val) - i) - 1] = BitReverseTable256[in[i]];
311 std::memcpy(&Val, out, sizeof(Val));
312 return Val;
313}
314
315#if __has_builtin(__builtin_bitreverse8)1
316template<>
317inline uint8_t reverseBits<uint8_t>(uint8_t Val) {
318 return __builtin_bitreverse8(Val);
319}
320#endif
321
322#if __has_builtin(__builtin_bitreverse16)1
323template<>
324inline uint16_t reverseBits<uint16_t>(uint16_t Val) {
325 return __builtin_bitreverse16(Val);
326}
327#endif
328
329#if __has_builtin(__builtin_bitreverse32)1
330template<>
331inline uint32_t reverseBits<uint32_t>(uint32_t Val) {
332 return __builtin_bitreverse32(Val);
333}
334#endif
335
336#if __has_builtin(__builtin_bitreverse64)1
337template<>
338inline uint64_t reverseBits<uint64_t>(uint64_t Val) {
339 return __builtin_bitreverse64(Val);
340}
341#endif
342
343// NOTE: The following support functions use the _32/_64 extensions instead of
344// type overloading so that signed and unsigned integers can be used without
345// ambiguity.
346
347/// Return the high 32 bits of a 64 bit value.
348constexpr inline uint32_t Hi_32(uint64_t Value) {
349 return static_cast<uint32_t>(Value >> 32);
350}
351
352/// Return the low 32 bits of a 64 bit value.
353constexpr inline uint32_t Lo_32(uint64_t Value) {
354 return static_cast<uint32_t>(Value);
355}
356
357/// Make a 64-bit integer from a high / low pair of 32-bit integers.
358constexpr inline uint64_t Make_64(uint32_t High, uint32_t Low) {
359 return ((uint64_t)High << 32) | (uint64_t)Low;
360}
361
362/// Checks if an integer fits into the given bit width.
363template <unsigned N> constexpr inline bool isInt(int64_t x) {
364 return N >= 64 || (-(INT64_C(1)1LL<<(N-1)) <= x && x < (INT64_C(1)1LL<<(N-1)));
365}
366// Template specializations to get better code for common cases.
367template <> constexpr inline bool isInt<8>(int64_t x) {
368 return static_cast<int8_t>(x) == x;
369}
370template <> constexpr inline bool isInt<16>(int64_t x) {
371 return static_cast<int16_t>(x) == x;
372}
373template <> constexpr inline bool isInt<32>(int64_t x) {
374 return static_cast<int32_t>(x) == x;
375}
376
377/// Checks if a signed integer is an N bit number shifted left by S.
378template <unsigned N, unsigned S>
379constexpr inline bool isShiftedInt(int64_t x) {
380 static_assert(
381 N > 0, "isShiftedInt<0> doesn't make sense (refers to a 0-bit number.");
382 static_assert(N + S <= 64, "isShiftedInt<N, S> with N + S > 64 is too wide.");
383 return isInt<N + S>(x) && (x % (UINT64_C(1)1ULL << S) == 0);
384}
385
386/// Checks if an unsigned integer fits into the given bit width.
387///
388/// This is written as two functions rather than as simply
389///
390/// return N >= 64 || X < (UINT64_C(1) << N);
391///
392/// to keep MSVC from (incorrectly) warning on isUInt<64> that we're shifting
393/// left too many places.
394template <unsigned N>
395constexpr inline std::enable_if_t<(N < 64), bool> isUInt(uint64_t X) {
396 static_assert(N > 0, "isUInt<0> doesn't make sense");
397 return X < (UINT64_C(1)1ULL << (N));
398}
399template <unsigned N>
400constexpr inline std::enable_if_t<N >= 64, bool> isUInt(uint64_t) {
401 return true;
402}
403
404// Template specializations to get better code for common cases.
405template <> constexpr inline bool isUInt<8>(uint64_t x) {
406 return static_cast<uint8_t>(x) == x;
407}
408template <> constexpr inline bool isUInt<16>(uint64_t x) {
409 return static_cast<uint16_t>(x) == x;
410}
411template <> constexpr inline bool isUInt<32>(uint64_t x) {
412 return static_cast<uint32_t>(x) == x;
413}
414
415/// Checks if a unsigned integer is an N bit number shifted left by S.
416template <unsigned N, unsigned S>
417constexpr inline bool isShiftedUInt(uint64_t x) {
418 static_assert(
419 N > 0, "isShiftedUInt<0> doesn't make sense (refers to a 0-bit number)");
420 static_assert(N + S <= 64,
421 "isShiftedUInt<N, S> with N + S > 64 is too wide.");
422 // Per the two static_asserts above, S must be strictly less than 64. So
423 // 1 << S is not undefined behavior.
424 return isUInt<N + S>(x) && (x % (UINT64_C(1)1ULL << S) == 0);
425}
426
427/// Gets the maximum value for a N-bit unsigned integer.
428inline uint64_t maxUIntN(uint64_t N) {
429 assert(N > 0 && N <= 64 && "integer width out of range")((void)0);
430
431 // uint64_t(1) << 64 is undefined behavior, so we can't do
432 // (uint64_t(1) << N) - 1
433 // without checking first that N != 64. But this works and doesn't have a
434 // branch.
435 return UINT64_MAX0xffffffffffffffffULL >> (64 - N);
436}
437
438/// Gets the minimum value for a N-bit signed integer.
439inline int64_t minIntN(int64_t N) {
440 assert(N > 0 && N <= 64 && "integer width out of range")((void)0);
441
442 return UINT64_C(1)1ULL + ~(UINT64_C(1)1ULL << (N - 1));
443}
444
445/// Gets the maximum value for a N-bit signed integer.
446inline int64_t maxIntN(int64_t N) {
447 assert(N > 0 && N <= 64 && "integer width out of range")((void)0);
448
449 // This relies on two's complement wraparound when N == 64, so we convert to
450 // int64_t only at the very end to avoid UB.
451 return (UINT64_C(1)1ULL << (N - 1)) - 1;
452}
453
454/// Checks if an unsigned integer fits into the given (dynamic) bit width.
455inline bool isUIntN(unsigned N, uint64_t x) {
456 return N >= 64 || x <= maxUIntN(N);
457}
458
459/// Checks if an signed integer fits into the given (dynamic) bit width.
460inline bool isIntN(unsigned N, int64_t x) {
461 return N >= 64 || (minIntN(N) <= x && x <= maxIntN(N));
462}
463
464/// Return true if the argument is a non-empty sequence of ones starting at the
465/// least significant bit with the remainder zero (32 bit version).
466/// Ex. isMask_32(0x0000FFFFU) == true.
467constexpr inline bool isMask_32(uint32_t Value) {
468 return Value && ((Value + 1) & Value) == 0;
469}
470
471/// Return true if the argument is a non-empty sequence of ones starting at the
472/// least significant bit with the remainder zero (64 bit version).
473constexpr inline bool isMask_64(uint64_t Value) {
474 return Value && ((Value + 1) & Value) == 0;
475}
476
477/// Return true if the argument contains a non-empty sequence of ones with the
478/// remainder zero (32 bit version.) Ex. isShiftedMask_32(0x0000FF00U) == true.
479constexpr inline bool isShiftedMask_32(uint32_t Value) {
480 return Value && isMask_32((Value - 1) | Value);
481}
482
483/// Return true if the argument contains a non-empty sequence of ones with the
484/// remainder zero (64 bit version.)
485constexpr inline bool isShiftedMask_64(uint64_t Value) {
486 return Value && isMask_64((Value - 1) | Value);
487}
488
489/// Return true if the argument is a power of two > 0.
490/// Ex. isPowerOf2_32(0x00100000U) == true (32 bit edition.)
491constexpr inline bool isPowerOf2_32(uint32_t Value) {
492 return Value && !(Value & (Value - 1));
493}
494
495/// Return true if the argument is a power of two > 0 (64 bit edition.)
496constexpr inline bool isPowerOf2_64(uint64_t Value) {
497 return Value && !(Value & (Value - 1));
498}
499
500/// Count the number of ones from the most significant bit to the first
501/// zero bit.
502///
503/// Ex. countLeadingOnes(0xFF0FFF00) == 8.
504/// Only unsigned integral types are allowed.
505///
506/// \param ZB the behavior on an input of all ones. Only ZB_Width and
507/// ZB_Undefined are valid arguments.
508template <typename T>
509unsigned countLeadingOnes(T Value, ZeroBehavior ZB = ZB_Width) {
510 static_assert(std::numeric_limits<T>::is_integer &&
511 !std::numeric_limits<T>::is_signed,
512 "Only unsigned integral types are allowed.");
513 return countLeadingZeros<T>(~Value, ZB);
514}
515
516/// Count the number of ones from the least significant bit to the first
517/// zero bit.
518///
519/// Ex. countTrailingOnes(0x00FF00FF) == 8.
520/// Only unsigned integral types are allowed.
521///
522/// \param ZB the behavior on an input of all ones. Only ZB_Width and
523/// ZB_Undefined are valid arguments.
524template <typename T>
525unsigned countTrailingOnes(T Value, ZeroBehavior ZB = ZB_Width) {
526 static_assert(std::numeric_limits<T>::is_integer &&
527 !std::numeric_limits<T>::is_signed,
528 "Only unsigned integral types are allowed.");
529 return countTrailingZeros<T>(~Value, ZB);
530}
531
532namespace detail {
533template <typename T, std::size_t SizeOfT> struct PopulationCounter {
534 static unsigned count(T Value) {
535 // Generic version, forward to 32 bits.
536 static_assert(SizeOfT <= 4, "Not implemented!");
537#if defined(__GNUC__4)
538 return __builtin_popcount(Value);
539#else
540 uint32_t v = Value;
541 v = v - ((v >> 1) & 0x55555555);
542 v = (v & 0x33333333) + ((v >> 2) & 0x33333333);
543 return ((v + (v >> 4) & 0xF0F0F0F) * 0x1010101) >> 24;
544#endif
545 }
546};
547
548template <typename T> struct PopulationCounter<T, 8> {
549 static unsigned count(T Value) {
550#if defined(__GNUC__4)
551 return __builtin_popcountll(Value);
552#else
553 uint64_t v = Value;
554 v = v - ((v >> 1) & 0x5555555555555555ULL);
555 v = (v & 0x3333333333333333ULL) + ((v >> 2) & 0x3333333333333333ULL);
556 v = (v + (v >> 4)) & 0x0F0F0F0F0F0F0F0FULL;
557 return unsigned((uint64_t)(v * 0x0101010101010101ULL) >> 56);
558#endif
559 }
560};
561} // namespace detail
562
563/// Count the number of set bits in a value.
564/// Ex. countPopulation(0xF000F000) = 8
565/// Returns 0 if the word is zero.
566template <typename T>
567inline unsigned countPopulation(T Value) {
568 static_assert(std::numeric_limits<T>::is_integer &&
569 !std::numeric_limits<T>::is_signed,
570 "Only unsigned integral types are allowed.");
571 return detail::PopulationCounter<T, sizeof(T)>::count(Value);
572}
573
574/// Compile time Log2.
575/// Valid only for positive powers of two.
576template <size_t kValue> constexpr inline size_t CTLog2() {
577 static_assert(kValue > 0 && llvm::isPowerOf2_64(kValue),
578 "Value is not a valid power of 2");
579 return 1 + CTLog2<kValue / 2>();
580}
581
582template <> constexpr inline size_t CTLog2<1>() { return 0; }
583
584/// Return the log base 2 of the specified value.
585inline double Log2(double Value) {
586#if defined(__ANDROID_API__) && __ANDROID_API__ < 18
587 return __builtin_log(Value) / __builtin_log(2.0);
588#else
589 return log2(Value);
590#endif
591}
592
593/// Return the floor log base 2 of the specified value, -1 if the value is zero.
594/// (32 bit edition.)
595/// Ex. Log2_32(32) == 5, Log2_32(1) == 0, Log2_32(0) == -1, Log2_32(6) == 2
596inline unsigned Log2_32(uint32_t Value) {
597 return 31 - countLeadingZeros(Value);
598}
599
600/// Return the floor log base 2 of the specified value, -1 if the value is zero.
601/// (64 bit edition.)
602inline unsigned Log2_64(uint64_t Value) {
603 return 63 - countLeadingZeros(Value);
31
Returning the value 4294967295
604}
605
606/// Return the ceil log base 2 of the specified value, 32 if the value is zero.
607/// (32 bit edition).
608/// Ex. Log2_32_Ceil(32) == 5, Log2_32_Ceil(1) == 0, Log2_32_Ceil(6) == 3
609inline unsigned Log2_32_Ceil(uint32_t Value) {
610 return 32 - countLeadingZeros(Value - 1);
611}
612
613/// Return the ceil log base 2 of the specified value, 64 if the value is zero.
614/// (64 bit edition.)
615inline unsigned Log2_64_Ceil(uint64_t Value) {
616 return 64 - countLeadingZeros(Value - 1);
617}
618
619/// Return the greatest common divisor of the values using Euclid's algorithm.
620template <typename T>
621inline T greatestCommonDivisor(T A, T B) {
622 while (B) {
623 T Tmp = B;
624 B = A % B;
625 A = Tmp;
626 }
627 return A;
628}
629
630inline uint64_t GreatestCommonDivisor64(uint64_t A, uint64_t B) {
631 return greatestCommonDivisor<uint64_t>(A, B);
632}
633
634/// This function takes a 64-bit integer and returns the bit equivalent double.
635inline double BitsToDouble(uint64_t Bits) {
636 double D;
637 static_assert(sizeof(uint64_t) == sizeof(double), "Unexpected type sizes");
638 memcpy(&D, &Bits, sizeof(Bits));
639 return D;
640}
641
642/// This function takes a 32-bit integer and returns the bit equivalent float.
643inline float BitsToFloat(uint32_t Bits) {
644 float F;
645 static_assert(sizeof(uint32_t) == sizeof(float), "Unexpected type sizes");
646 memcpy(&F, &Bits, sizeof(Bits));
647 return F;
648}
649
650/// This function takes a double and returns the bit equivalent 64-bit integer.
651/// Note that copying doubles around changes the bits of NaNs on some hosts,
652/// notably x86, so this routine cannot be used if these bits are needed.
653inline uint64_t DoubleToBits(double Double) {
654 uint64_t Bits;
655 static_assert(sizeof(uint64_t) == sizeof(double), "Unexpected type sizes");
656 memcpy(&Bits, &Double, sizeof(Double));
657 return Bits;
658}
659
660/// This function takes a float and returns the bit equivalent 32-bit integer.
661/// Note that copying floats around changes the bits of NaNs on some hosts,
662/// notably x86, so this routine cannot be used if these bits are needed.
663inline uint32_t FloatToBits(float Float) {
664 uint32_t Bits;
665 static_assert(sizeof(uint32_t) == sizeof(float), "Unexpected type sizes");
666 memcpy(&Bits, &Float, sizeof(Float));
667 return Bits;
668}
669
670/// A and B are either alignments or offsets. Return the minimum alignment that
671/// may be assumed after adding the two together.
672constexpr inline uint64_t MinAlign(uint64_t A, uint64_t B) {
673 // The largest power of 2 that divides both A and B.
674 //
675 // Replace "-Value" by "1+~Value" in the following commented code to avoid
676 // MSVC warning C4146
677 // return (A | B) & -(A | B);
678 return (A | B) & (1 + ~(A | B));
679}
680
681/// Returns the next power of two (in 64-bits) that is strictly greater than A.
682/// Returns zero on overflow.
683inline uint64_t NextPowerOf2(uint64_t A) {
684 A |= (A >> 1);
685 A |= (A >> 2);
686 A |= (A >> 4);
687 A |= (A >> 8);
688 A |= (A >> 16);
689 A |= (A >> 32);
690 return A + 1;
691}
692
693/// Returns the power of two which is less than or equal to the given value.
694/// Essentially, it is a floor operation across the domain of powers of two.
695inline uint64_t PowerOf2Floor(uint64_t A) {
696 if (!A) return 0;
697 return 1ull << (63 - countLeadingZeros(A, ZB_Undefined));
698}
699
700/// Returns the power of two which is greater than or equal to the given value.
701/// Essentially, it is a ceil operation across the domain of powers of two.
702inline uint64_t PowerOf2Ceil(uint64_t A) {
703 if (!A)
704 return 0;
705 return NextPowerOf2(A - 1);
706}
707
708/// Returns the next integer (mod 2**64) that is greater than or equal to
709/// \p Value and is a multiple of \p Align. \p Align must be non-zero.
710///
711/// If non-zero \p Skew is specified, the return value will be a minimal
712/// integer that is greater than or equal to \p Value and equal to
713/// \p Align * N + \p Skew for some integer N. If \p Skew is larger than
714/// \p Align, its value is adjusted to '\p Skew mod \p Align'.
715///
716/// Examples:
717/// \code
718/// alignTo(5, 8) = 8
719/// alignTo(17, 8) = 24
720/// alignTo(~0LL, 8) = 0
721/// alignTo(321, 255) = 510
722///
723/// alignTo(5, 8, 7) = 7
724/// alignTo(17, 8, 1) = 17
725/// alignTo(~0LL, 8, 3) = 3
726/// alignTo(321, 255, 42) = 552
727/// \endcode
728inline uint64_t alignTo(uint64_t Value, uint64_t Align, uint64_t Skew = 0) {
729 assert(Align != 0u && "Align can't be 0.")((void)0);
730 Skew %= Align;
731 return (Value + Align - 1 - Skew) / Align * Align + Skew;
732}
733
734/// Returns the next integer (mod 2**64) that is greater than or equal to
735/// \p Value and is a multiple of \c Align. \c Align must be non-zero.
736template <uint64_t Align> constexpr inline uint64_t alignTo(uint64_t Value) {
737 static_assert(Align != 0u, "Align must be non-zero");
738 return (Value + Align - 1) / Align * Align;
739}
740
741/// Returns the integer ceil(Numerator / Denominator).
742inline uint64_t divideCeil(uint64_t Numerator, uint64_t Denominator) {
743 return alignTo(Numerator, Denominator) / Denominator;
744}
745
746/// Returns the integer nearest(Numerator / Denominator).
747inline uint64_t divideNearest(uint64_t Numerator, uint64_t Denominator) {
748 return (Numerator + (Denominator / 2)) / Denominator;
749}
750
751/// Returns the largest uint64_t less than or equal to \p Value and is
752/// \p Skew mod \p Align. \p Align must be non-zero
753inline uint64_t alignDown(uint64_t Value, uint64_t Align, uint64_t Skew = 0) {
754 assert(Align != 0u && "Align can't be 0.")((void)0);
755 Skew %= Align;
756 return (Value - Skew) / Align * Align + Skew;
757}
758
759/// Sign-extend the number in the bottom B bits of X to a 32-bit integer.
760/// Requires 0 < B <= 32.
761template <unsigned B> constexpr inline int32_t SignExtend32(uint32_t X) {
762 static_assert(B > 0, "Bit width can't be 0.");
763 static_assert(B <= 32, "Bit width out of range.");
764 return int32_t(X << (32 - B)) >> (32 - B);
765}
766
767/// Sign-extend the number in the bottom B bits of X to a 32-bit integer.
768/// Requires 0 < B <= 32.
769inline int32_t SignExtend32(uint32_t X, unsigned B) {
770 assert(B > 0 && "Bit width can't be 0.")((void)0);
771 assert(B <= 32 && "Bit width out of range.")((void)0);
772 return int32_t(X << (32 - B)) >> (32 - B);
773}
774
775/// Sign-extend the number in the bottom B bits of X to a 64-bit integer.
776/// Requires 0 < B <= 64.
777template <unsigned B> constexpr inline int64_t SignExtend64(uint64_t x) {
778 static_assert(B > 0, "Bit width can't be 0.");
779 static_assert(B <= 64, "Bit width out of range.");
780 return int64_t(x << (64 - B)) >> (64 - B);
781}
782
783/// Sign-extend the number in the bottom B bits of X to a 64-bit integer.
784/// Requires 0 < B <= 64.
785inline int64_t SignExtend64(uint64_t X, unsigned B) {
786 assert(B > 0 && "Bit width can't be 0.")((void)0);
787 assert(B <= 64 && "Bit width out of range.")((void)0);
788 return int64_t(X << (64 - B)) >> (64 - B);
789}
790
791/// Subtract two unsigned integers, X and Y, of type T and return the absolute
792/// value of the result.
793template <typename T>
794std::enable_if_t<std::is_unsigned<T>::value, T> AbsoluteDifference(T X, T Y) {
795 return X > Y ? (X - Y) : (Y - X);
796}
797
798/// Add two unsigned integers, X and Y, of type T. Clamp the result to the
799/// maximum representable value of T on overflow. ResultOverflowed indicates if
800/// the result is larger than the maximum representable value of type T.
801template <typename T>
802std::enable_if_t<std::is_unsigned<T>::value, T>
803SaturatingAdd(T X, T Y, bool *ResultOverflowed = nullptr) {
804 bool Dummy;
805 bool &Overflowed = ResultOverflowed ? *ResultOverflowed : Dummy;
806 // Hacker's Delight, p. 29
807 T Z = X + Y;
808 Overflowed = (Z < X || Z < Y);
809 if (Overflowed)
810 return std::numeric_limits<T>::max();
811 else
812 return Z;
813}
814
815/// Multiply two unsigned integers, X and Y, of type T. Clamp the result to the
816/// maximum representable value of T on overflow. ResultOverflowed indicates if
817/// the result is larger than the maximum representable value of type T.
818template <typename T>
819std::enable_if_t<std::is_unsigned<T>::value, T>
820SaturatingMultiply(T X, T Y, bool *ResultOverflowed = nullptr) {
821 bool Dummy;
822 bool &Overflowed = ResultOverflowed ? *ResultOverflowed : Dummy;
823
824 // Hacker's Delight, p. 30 has a different algorithm, but we don't use that
825 // because it fails for uint16_t (where multiplication can have undefined
826 // behavior due to promotion to int), and requires a division in addition
827 // to the multiplication.
828
829 Overflowed = false;
830
831 // Log2(Z) would be either Log2Z or Log2Z + 1.
832 // Special case: if X or Y is 0, Log2_64 gives -1, and Log2Z
833 // will necessarily be less than Log2Max as desired.
834 int Log2Z = Log2_64(X) + Log2_64(Y);
835 const T Max = std::numeric_limits<T>::max();
836 int Log2Max = Log2_64(Max);
837 if (Log2Z < Log2Max) {
838 return X * Y;
839 }
840 if (Log2Z > Log2Max) {
841 Overflowed = true;
842 return Max;
843 }
844
845 // We're going to use the top bit, and maybe overflow one
846 // bit past it. Multiply all but the bottom bit then add
847 // that on at the end.
848 T Z = (X >> 1) * Y;
849 if (Z & ~(Max >> 1)) {
850 Overflowed = true;
851 return Max;
852 }
853 Z <<= 1;
854 if (X & 1)
855 return SaturatingAdd(Z, Y, ResultOverflowed);
856
857 return Z;
858}
859
860/// Multiply two unsigned integers, X and Y, and add the unsigned integer, A to
861/// the product. Clamp the result to the maximum representable value of T on
862/// overflow. ResultOverflowed indicates if the result is larger than the
863/// maximum representable value of type T.
864template <typename T>
865std::enable_if_t<std::is_unsigned<T>::value, T>
866SaturatingMultiplyAdd(T X, T Y, T A, bool *ResultOverflowed = nullptr) {
867 bool Dummy;
868 bool &Overflowed = ResultOverflowed ? *ResultOverflowed : Dummy;
869
870 T Product = SaturatingMultiply(X, Y, &Overflowed);
871 if (Overflowed)
872 return Product;
873
874 return SaturatingAdd(A, Product, &Overflowed);
875}
876
877/// Use this rather than HUGE_VALF; the latter causes warnings on MSVC.
878extern const float huge_valf;
879
880
881/// Add two signed integers, computing the two's complement truncated result,
882/// returning true if overflow occured.
883template <typename T>
884std::enable_if_t<std::is_signed<T>::value, T> AddOverflow(T X, T Y, T &Result) {
885#if __has_builtin(__builtin_add_overflow)1
886 return __builtin_add_overflow(X, Y, &Result);
887#else
888 // Perform the unsigned addition.
889 using U = std::make_unsigned_t<T>;
890 const U UX = static_cast<U>(X);
891 const U UY = static_cast<U>(Y);
892 const U UResult = UX + UY;
893
894 // Convert to signed.
895 Result = static_cast<T>(UResult);
896
897 // Adding two positive numbers should result in a positive number.
898 if (X > 0 && Y > 0)
899 return Result <= 0;
900 // Adding two negatives should result in a negative number.
901 if (X < 0 && Y < 0)
902 return Result >= 0;
903 return false;
904#endif
905}
906
907/// Subtract two signed integers, computing the two's complement truncated
908/// result, returning true if an overflow ocurred.
909template <typename T>
910std::enable_if_t<std::is_signed<T>::value, T> SubOverflow(T X, T Y, T &Result) {
911#if __has_builtin(__builtin_sub_overflow)1
912 return __builtin_sub_overflow(X, Y, &Result);
913#else
914 // Perform the unsigned addition.
915 using U = std::make_unsigned_t<T>;
916 const U UX = static_cast<U>(X);
917 const U UY = static_cast<U>(Y);
918 const U UResult = UX - UY;
919
920 // Convert to signed.
921 Result = static_cast<T>(UResult);
922
923 // Subtracting a positive number from a negative results in a negative number.
924 if (X <= 0 && Y > 0)
925 return Result >= 0;
926 // Subtracting a negative number from a positive results in a positive number.
927 if (X >= 0 && Y < 0)
928 return Result <= 0;
929 return false;
930#endif
931}
932
933/// Multiply two signed integers, computing the two's complement truncated
934/// result, returning true if an overflow ocurred.
935template <typename T>
936std::enable_if_t<std::is_signed<T>::value, T> MulOverflow(T X, T Y, T &Result) {
937 // Perform the unsigned multiplication on absolute values.
938 using U = std::make_unsigned_t<T>;
939 const U UX = X < 0 ? (0 - static_cast<U>(X)) : static_cast<U>(X);
940 const U UY = Y < 0 ? (0 - static_cast<U>(Y)) : static_cast<U>(Y);
941 const U UResult = UX * UY;
942
943 // Convert to signed.
944 const bool IsNegative = (X < 0) ^ (Y < 0);
945 Result = IsNegative ? (0 - UResult) : UResult;
946
947 // If any of the args was 0, result is 0 and no overflow occurs.
948 if (UX == 0 || UY == 0)
949 return false;
950
951 // UX and UY are in [1, 2^n], where n is the number of digits.
952 // Check how the max allowed absolute value (2^n for negative, 2^(n-1) for
953 // positive) divided by an argument compares to the other.
954 if (IsNegative)
955 return UX > (static_cast<U>(std::numeric_limits<T>::max()) + U(1)) / UY;
956 else
957 return UX > (static_cast<U>(std::numeric_limits<T>::max())) / UY;
958}
959
960} // End llvm namespace
961
962#endif