Bug Summary

File:llvm/include/llvm/IR/Instructions.h
Warning:line 1234, column 33
Called C++ object pointer is null

Annotated Source Code

Press '?' to see keyboard shortcuts

clang -cc1 -cc1 -triple x86_64-pc-linux-gnu -analyze -disable-free -disable-llvm-verifier -discard-value-names -main-file-name CGExprScalar.cpp -analyzer-store=region -analyzer-opt-analyze-nested-blocks -analyzer-checker=core -analyzer-checker=apiModeling -analyzer-checker=unix -analyzer-checker=deadcode -analyzer-checker=cplusplus -analyzer-checker=security.insecureAPI.UncheckedReturn -analyzer-checker=security.insecureAPI.getpw -analyzer-checker=security.insecureAPI.gets -analyzer-checker=security.insecureAPI.mktemp -analyzer-checker=security.insecureAPI.mkstemp -analyzer-checker=security.insecureAPI.vfork -analyzer-checker=nullability.NullPassedToNonnull -analyzer-checker=nullability.NullReturnedFromNonnull -analyzer-output plist -w -setup-static-analyzer -analyzer-config-compatibility-mode=true -mrelocation-model pic -pic-level 2 -fhalf-no-semantic-interposition -mframe-pointer=none -relaxed-aliasing -fmath-errno -fno-rounding-math -mconstructor-aliases -munwind-tables -target-cpu x86-64 -tune-cpu generic -debugger-tuning=gdb -ffunction-sections -fdata-sections -fcoverage-compilation-dir=/build/llvm-toolchain-snapshot-13~++20210413100635+64c24f493e5f/build-llvm/tools/clang/lib/CodeGen -resource-dir /usr/lib/llvm-13/lib/clang/13.0.0 -D CLANG_ROUND_TRIP_CC1_ARGS=ON -D _DEBUG -D _GNU_SOURCE -D __STDC_CONSTANT_MACROS -D __STDC_FORMAT_MACROS -D __STDC_LIMIT_MACROS -I /build/llvm-toolchain-snapshot-13~++20210413100635+64c24f493e5f/build-llvm/tools/clang/lib/CodeGen -I /build/llvm-toolchain-snapshot-13~++20210413100635+64c24f493e5f/clang/lib/CodeGen -I /build/llvm-toolchain-snapshot-13~++20210413100635+64c24f493e5f/clang/include -I /build/llvm-toolchain-snapshot-13~++20210413100635+64c24f493e5f/build-llvm/tools/clang/include -I /build/llvm-toolchain-snapshot-13~++20210413100635+64c24f493e5f/build-llvm/include -I /build/llvm-toolchain-snapshot-13~++20210413100635+64c24f493e5f/llvm/include -U NDEBUG -internal-isystem /usr/lib/gcc/x86_64-linux-gnu/6.3.0/../../../../include/c++/6.3.0 -internal-isystem /usr/lib/gcc/x86_64-linux-gnu/6.3.0/../../../../include/x86_64-linux-gnu/c++/6.3.0 -internal-isystem /usr/lib/gcc/x86_64-linux-gnu/6.3.0/../../../../include/c++/6.3.0/backward -internal-isystem /usr/lib/llvm-13/lib/clang/13.0.0/include -internal-isystem /usr/local/include -internal-isystem /usr/lib/gcc/x86_64-linux-gnu/6.3.0/../../../../x86_64-linux-gnu/include -internal-externc-isystem /usr/include/x86_64-linux-gnu -internal-externc-isystem /include -internal-externc-isystem /usr/include -O2 -Wno-unused-parameter -Wwrite-strings -Wno-missing-field-initializers -Wno-long-long -Wno-maybe-uninitialized -Wno-comment -std=c++14 -fdeprecated-macro -fdebug-compilation-dir=/build/llvm-toolchain-snapshot-13~++20210413100635+64c24f493e5f/build-llvm/tools/clang/lib/CodeGen -fdebug-prefix-map=/build/llvm-toolchain-snapshot-13~++20210413100635+64c24f493e5f=. -ferror-limit 19 -fvisibility-inlines-hidden -stack-protector 2 -fgnuc-version=4.2.1 -vectorize-loops -vectorize-slp -analyzer-output=html -analyzer-config stable-report-filename=true -faddrsig -D__GCC_HAVE_DWARF2_CFI_ASM=1 -o /tmp/scan-build-2021-04-14-063029-18377-1 -x c++ /build/llvm-toolchain-snapshot-13~++20210413100635+64c24f493e5f/clang/lib/CodeGen/CGExprScalar.cpp

/build/llvm-toolchain-snapshot-13~++20210413100635+64c24f493e5f/clang/lib/CodeGen/CGExprScalar.cpp

1//===--- CGExprScalar.cpp - Emit LLVM Code for Scalar Exprs ---------------===//
2//
3// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
4// See https://llvm.org/LICENSE.txt for license information.
5// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
6//
7//===----------------------------------------------------------------------===//
8//
9// This contains code to emit Expr nodes with scalar LLVM types as LLVM code.
10//
11//===----------------------------------------------------------------------===//
12
13#include "CGCXXABI.h"
14#include "CGCleanup.h"
15#include "CGDebugInfo.h"
16#include "CGObjCRuntime.h"
17#include "CGOpenMPRuntime.h"
18#include "CodeGenFunction.h"
19#include "CodeGenModule.h"
20#include "ConstantEmitter.h"
21#include "TargetInfo.h"
22#include "clang/AST/ASTContext.h"
23#include "clang/AST/Attr.h"
24#include "clang/AST/DeclObjC.h"
25#include "clang/AST/Expr.h"
26#include "clang/AST/RecordLayout.h"
27#include "clang/AST/StmtVisitor.h"
28#include "clang/Basic/CodeGenOptions.h"
29#include "clang/Basic/TargetInfo.h"
30#include "llvm/ADT/APFixedPoint.h"
31#include "llvm/ADT/Optional.h"
32#include "llvm/IR/CFG.h"
33#include "llvm/IR/Constants.h"
34#include "llvm/IR/DataLayout.h"
35#include "llvm/IR/FixedPointBuilder.h"
36#include "llvm/IR/Function.h"
37#include "llvm/IR/GetElementPtrTypeIterator.h"
38#include "llvm/IR/GlobalVariable.h"
39#include "llvm/IR/Intrinsics.h"
40#include "llvm/IR/IntrinsicsPowerPC.h"
41#include "llvm/IR/MatrixBuilder.h"
42#include "llvm/IR/Module.h"
43#include <cstdarg>
44
45using namespace clang;
46using namespace CodeGen;
47using llvm::Value;
48
49//===----------------------------------------------------------------------===//
50// Scalar Expression Emitter
51//===----------------------------------------------------------------------===//
52
53namespace {
54
55/// Determine whether the given binary operation may overflow.
56/// Sets \p Result to the value of the operation for BO_Add, BO_Sub, BO_Mul,
57/// and signed BO_{Div,Rem}. For these opcodes, and for unsigned BO_{Div,Rem},
58/// the returned overflow check is precise. The returned value is 'true' for
59/// all other opcodes, to be conservative.
60bool mayHaveIntegerOverflow(llvm::ConstantInt *LHS, llvm::ConstantInt *RHS,
61 BinaryOperator::Opcode Opcode, bool Signed,
62 llvm::APInt &Result) {
63 // Assume overflow is possible, unless we can prove otherwise.
64 bool Overflow = true;
65 const auto &LHSAP = LHS->getValue();
66 const auto &RHSAP = RHS->getValue();
67 if (Opcode == BO_Add) {
68 if (Signed)
69 Result = LHSAP.sadd_ov(RHSAP, Overflow);
70 else
71 Result = LHSAP.uadd_ov(RHSAP, Overflow);
72 } else if (Opcode == BO_Sub) {
73 if (Signed)
74 Result = LHSAP.ssub_ov(RHSAP, Overflow);
75 else
76 Result = LHSAP.usub_ov(RHSAP, Overflow);
77 } else if (Opcode == BO_Mul) {
78 if (Signed)
79 Result = LHSAP.smul_ov(RHSAP, Overflow);
80 else
81 Result = LHSAP.umul_ov(RHSAP, Overflow);
82 } else if (Opcode == BO_Div || Opcode == BO_Rem) {
83 if (Signed && !RHS->isZero())
84 Result = LHSAP.sdiv_ov(RHSAP, Overflow);
85 else
86 return false;
87 }
88 return Overflow;
89}
90
91struct BinOpInfo {
92 Value *LHS;
93 Value *RHS;
94 QualType Ty; // Computation Type.
95 BinaryOperator::Opcode Opcode; // Opcode of BinOp to perform
96 FPOptions FPFeatures;
97 const Expr *E; // Entire expr, for error unsupported. May not be binop.
98
99 /// Check if the binop can result in integer overflow.
100 bool mayHaveIntegerOverflow() const {
101 // Without constant input, we can't rule out overflow.
102 auto *LHSCI = dyn_cast<llvm::ConstantInt>(LHS);
103 auto *RHSCI = dyn_cast<llvm::ConstantInt>(RHS);
104 if (!LHSCI || !RHSCI)
105 return true;
106
107 llvm::APInt Result;
108 return ::mayHaveIntegerOverflow(
109 LHSCI, RHSCI, Opcode, Ty->hasSignedIntegerRepresentation(), Result);
110 }
111
112 /// Check if the binop computes a division or a remainder.
113 bool isDivremOp() const {
114 return Opcode == BO_Div || Opcode == BO_Rem || Opcode == BO_DivAssign ||
115 Opcode == BO_RemAssign;
116 }
117
118 /// Check if the binop can result in an integer division by zero.
119 bool mayHaveIntegerDivisionByZero() const {
120 if (isDivremOp())
121 if (auto *CI = dyn_cast<llvm::ConstantInt>(RHS))
122 return CI->isZero();
123 return true;
124 }
125
126 /// Check if the binop can result in a float division by zero.
127 bool mayHaveFloatDivisionByZero() const {
128 if (isDivremOp())
129 if (auto *CFP = dyn_cast<llvm::ConstantFP>(RHS))
130 return CFP->isZero();
131 return true;
132 }
133
134 /// Check if at least one operand is a fixed point type. In such cases, this
135 /// operation did not follow usual arithmetic conversion and both operands
136 /// might not be of the same type.
137 bool isFixedPointOp() const {
138 // We cannot simply check the result type since comparison operations return
139 // an int.
140 if (const auto *BinOp = dyn_cast<BinaryOperator>(E)) {
141 QualType LHSType = BinOp->getLHS()->getType();
142 QualType RHSType = BinOp->getRHS()->getType();
143 return LHSType->isFixedPointType() || RHSType->isFixedPointType();
144 }
145 if (const auto *UnOp = dyn_cast<UnaryOperator>(E))
146 return UnOp->getSubExpr()->getType()->isFixedPointType();
147 return false;
148 }
149};
150
151static bool MustVisitNullValue(const Expr *E) {
152 // If a null pointer expression's type is the C++0x nullptr_t, then
153 // it's not necessarily a simple constant and it must be evaluated
154 // for its potential side effects.
155 return E->getType()->isNullPtrType();
156}
157
158/// If \p E is a widened promoted integer, get its base (unpromoted) type.
159static llvm::Optional<QualType> getUnwidenedIntegerType(const ASTContext &Ctx,
160 const Expr *E) {
161 const Expr *Base = E->IgnoreImpCasts();
162 if (E == Base)
163 return llvm::None;
164
165 QualType BaseTy = Base->getType();
166 if (!BaseTy->isPromotableIntegerType() ||
167 Ctx.getTypeSize(BaseTy) >= Ctx.getTypeSize(E->getType()))
168 return llvm::None;
169
170 return BaseTy;
171}
172
173/// Check if \p E is a widened promoted integer.
174static bool IsWidenedIntegerOp(const ASTContext &Ctx, const Expr *E) {
175 return getUnwidenedIntegerType(Ctx, E).hasValue();
176}
177
178/// Check if we can skip the overflow check for \p Op.
179static bool CanElideOverflowCheck(const ASTContext &Ctx, const BinOpInfo &Op) {
180 assert((isa<UnaryOperator>(Op.E) || isa<BinaryOperator>(Op.E)) &&(((isa<UnaryOperator>(Op.E) || isa<BinaryOperator>
(Op.E)) && "Expected a unary or binary operator") ? static_cast
<void> (0) : __assert_fail ("(isa<UnaryOperator>(Op.E) || isa<BinaryOperator>(Op.E)) && \"Expected a unary or binary operator\""
, "/build/llvm-toolchain-snapshot-13~++20210413100635+64c24f493e5f/clang/lib/CodeGen/CGExprScalar.cpp"
, 181, __PRETTY_FUNCTION__))
181 "Expected a unary or binary operator")(((isa<UnaryOperator>(Op.E) || isa<BinaryOperator>
(Op.E)) && "Expected a unary or binary operator") ? static_cast
<void> (0) : __assert_fail ("(isa<UnaryOperator>(Op.E) || isa<BinaryOperator>(Op.E)) && \"Expected a unary or binary operator\""
, "/build/llvm-toolchain-snapshot-13~++20210413100635+64c24f493e5f/clang/lib/CodeGen/CGExprScalar.cpp"
, 181, __PRETTY_FUNCTION__))
;
182
183 // If the binop has constant inputs and we can prove there is no overflow,
184 // we can elide the overflow check.
185 if (!Op.mayHaveIntegerOverflow())
186 return true;
187
188 // If a unary op has a widened operand, the op cannot overflow.
189 if (const auto *UO = dyn_cast<UnaryOperator>(Op.E))
190 return !UO->canOverflow();
191
192 // We usually don't need overflow checks for binops with widened operands.
193 // Multiplication with promoted unsigned operands is a special case.
194 const auto *BO = cast<BinaryOperator>(Op.E);
195 auto OptionalLHSTy = getUnwidenedIntegerType(Ctx, BO->getLHS());
196 if (!OptionalLHSTy)
197 return false;
198
199 auto OptionalRHSTy = getUnwidenedIntegerType(Ctx, BO->getRHS());
200 if (!OptionalRHSTy)
201 return false;
202
203 QualType LHSTy = *OptionalLHSTy;
204 QualType RHSTy = *OptionalRHSTy;
205
206 // This is the simple case: binops without unsigned multiplication, and with
207 // widened operands. No overflow check is needed here.
208 if ((Op.Opcode != BO_Mul && Op.Opcode != BO_MulAssign) ||
209 !LHSTy->isUnsignedIntegerType() || !RHSTy->isUnsignedIntegerType())
210 return true;
211
212 // For unsigned multiplication the overflow check can be elided if either one
213 // of the unpromoted types are less than half the size of the promoted type.
214 unsigned PromotedSize = Ctx.getTypeSize(Op.E->getType());
215 return (2 * Ctx.getTypeSize(LHSTy)) < PromotedSize ||
216 (2 * Ctx.getTypeSize(RHSTy)) < PromotedSize;
217}
218
219class ScalarExprEmitter
220 : public StmtVisitor<ScalarExprEmitter, Value*> {
221 CodeGenFunction &CGF;
222 CGBuilderTy &Builder;
223 bool IgnoreResultAssign;
224 llvm::LLVMContext &VMContext;
225public:
226
227 ScalarExprEmitter(CodeGenFunction &cgf, bool ira=false)
228 : CGF(cgf), Builder(CGF.Builder), IgnoreResultAssign(ira),
229 VMContext(cgf.getLLVMContext()) {
230 }
231
232 //===--------------------------------------------------------------------===//
233 // Utilities
234 //===--------------------------------------------------------------------===//
235
236 bool TestAndClearIgnoreResultAssign() {
237 bool I = IgnoreResultAssign;
238 IgnoreResultAssign = false;
239 return I;
240 }
241
242 llvm::Type *ConvertType(QualType T) { return CGF.ConvertType(T); }
243 LValue EmitLValue(const Expr *E) { return CGF.EmitLValue(E); }
244 LValue EmitCheckedLValue(const Expr *E, CodeGenFunction::TypeCheckKind TCK) {
245 return CGF.EmitCheckedLValue(E, TCK);
246 }
247
248 void EmitBinOpCheck(ArrayRef<std::pair<Value *, SanitizerMask>> Checks,
249 const BinOpInfo &Info);
250
251 Value *EmitLoadOfLValue(LValue LV, SourceLocation Loc) {
252 return CGF.EmitLoadOfLValue(LV, Loc).getScalarVal();
253 }
254
255 void EmitLValueAlignmentAssumption(const Expr *E, Value *V) {
256 const AlignValueAttr *AVAttr = nullptr;
257 if (const auto *DRE = dyn_cast<DeclRefExpr>(E)) {
258 const ValueDecl *VD = DRE->getDecl();
259
260 if (VD->getType()->isReferenceType()) {
261 if (const auto *TTy =
262 dyn_cast<TypedefType>(VD->getType().getNonReferenceType()))
263 AVAttr = TTy->getDecl()->getAttr<AlignValueAttr>();
264 } else {
265 // Assumptions for function parameters are emitted at the start of the
266 // function, so there is no need to repeat that here,
267 // unless the alignment-assumption sanitizer is enabled,
268 // then we prefer the assumption over alignment attribute
269 // on IR function param.
270 if (isa<ParmVarDecl>(VD) && !CGF.SanOpts.has(SanitizerKind::Alignment))
271 return;
272
273 AVAttr = VD->getAttr<AlignValueAttr>();
274 }
275 }
276
277 if (!AVAttr)
278 if (const auto *TTy =
279 dyn_cast<TypedefType>(E->getType()))
280 AVAttr = TTy->getDecl()->getAttr<AlignValueAttr>();
281
282 if (!AVAttr)
283 return;
284
285 Value *AlignmentValue = CGF.EmitScalarExpr(AVAttr->getAlignment());
286 llvm::ConstantInt *AlignmentCI = cast<llvm::ConstantInt>(AlignmentValue);
287 CGF.emitAlignmentAssumption(V, E, AVAttr->getLocation(), AlignmentCI);
288 }
289
290 /// EmitLoadOfLValue - Given an expression with complex type that represents a
291 /// value l-value, this method emits the address of the l-value, then loads
292 /// and returns the result.
293 Value *EmitLoadOfLValue(const Expr *E) {
294 Value *V = EmitLoadOfLValue(EmitCheckedLValue(E, CodeGenFunction::TCK_Load),
295 E->getExprLoc());
296
297 EmitLValueAlignmentAssumption(E, V);
298 return V;
299 }
300
301 /// EmitConversionToBool - Convert the specified expression value to a
302 /// boolean (i1) truth value. This is equivalent to "Val != 0".
303 Value *EmitConversionToBool(Value *Src, QualType DstTy);
304
305 /// Emit a check that a conversion from a floating-point type does not
306 /// overflow.
307 void EmitFloatConversionCheck(Value *OrigSrc, QualType OrigSrcType,
308 Value *Src, QualType SrcType, QualType DstType,
309 llvm::Type *DstTy, SourceLocation Loc);
310
311 /// Known implicit conversion check kinds.
312 /// Keep in sync with the enum of the same name in ubsan_handlers.h
313 enum ImplicitConversionCheckKind : unsigned char {
314 ICCK_IntegerTruncation = 0, // Legacy, was only used by clang 7.
315 ICCK_UnsignedIntegerTruncation = 1,
316 ICCK_SignedIntegerTruncation = 2,
317 ICCK_IntegerSignChange = 3,
318 ICCK_SignedIntegerTruncationOrSignChange = 4,
319 };
320
321 /// Emit a check that an [implicit] truncation of an integer does not
322 /// discard any bits. It is not UB, so we use the value after truncation.
323 void EmitIntegerTruncationCheck(Value *Src, QualType SrcType, Value *Dst,
324 QualType DstType, SourceLocation Loc);
325
326 /// Emit a check that an [implicit] conversion of an integer does not change
327 /// the sign of the value. It is not UB, so we use the value after conversion.
328 /// NOTE: Src and Dst may be the exact same value! (point to the same thing)
329 void EmitIntegerSignChangeCheck(Value *Src, QualType SrcType, Value *Dst,
330 QualType DstType, SourceLocation Loc);
331
332 /// Emit a conversion from the specified type to the specified destination
333 /// type, both of which are LLVM scalar types.
334 struct ScalarConversionOpts {
335 bool TreatBooleanAsSigned;
336 bool EmitImplicitIntegerTruncationChecks;
337 bool EmitImplicitIntegerSignChangeChecks;
338
339 ScalarConversionOpts()
340 : TreatBooleanAsSigned(false),
341 EmitImplicitIntegerTruncationChecks(false),
342 EmitImplicitIntegerSignChangeChecks(false) {}
343
344 ScalarConversionOpts(clang::SanitizerSet SanOpts)
345 : TreatBooleanAsSigned(false),
346 EmitImplicitIntegerTruncationChecks(
347 SanOpts.hasOneOf(SanitizerKind::ImplicitIntegerTruncation)),
348 EmitImplicitIntegerSignChangeChecks(
349 SanOpts.has(SanitizerKind::ImplicitIntegerSignChange)) {}
350 };
351 Value *EmitScalarCast(Value *Src, QualType SrcType, QualType DstType,
352 llvm::Type *SrcTy, llvm::Type *DstTy,
353 ScalarConversionOpts Opts);
354 Value *
355 EmitScalarConversion(Value *Src, QualType SrcTy, QualType DstTy,
356 SourceLocation Loc,
357 ScalarConversionOpts Opts = ScalarConversionOpts());
358
359 /// Convert between either a fixed point and other fixed point or fixed point
360 /// and an integer.
361 Value *EmitFixedPointConversion(Value *Src, QualType SrcTy, QualType DstTy,
362 SourceLocation Loc);
363
364 /// Emit a conversion from the specified complex type to the specified
365 /// destination type, where the destination type is an LLVM scalar type.
366 Value *EmitComplexToScalarConversion(CodeGenFunction::ComplexPairTy Src,
367 QualType SrcTy, QualType DstTy,
368 SourceLocation Loc);
369
370 /// EmitNullValue - Emit a value that corresponds to null for the given type.
371 Value *EmitNullValue(QualType Ty);
372
373 /// EmitFloatToBoolConversion - Perform an FP to boolean conversion.
374 Value *EmitFloatToBoolConversion(Value *V) {
375 // Compare against 0.0 for fp scalars.
376 llvm::Value *Zero = llvm::Constant::getNullValue(V->getType());
377 return Builder.CreateFCmpUNE(V, Zero, "tobool");
378 }
379
380 /// EmitPointerToBoolConversion - Perform a pointer to boolean conversion.
381 Value *EmitPointerToBoolConversion(Value *V, QualType QT) {
382 Value *Zero = CGF.CGM.getNullPointer(cast<llvm::PointerType>(V->getType()), QT);
383
384 return Builder.CreateICmpNE(V, Zero, "tobool");
385 }
386
387 Value *EmitIntToBoolConversion(Value *V) {
388 // Because of the type rules of C, we often end up computing a
389 // logical value, then zero extending it to int, then wanting it
390 // as a logical value again. Optimize this common case.
391 if (llvm::ZExtInst *ZI = dyn_cast<llvm::ZExtInst>(V)) {
392 if (ZI->getOperand(0)->getType() == Builder.getInt1Ty()) {
393 Value *Result = ZI->getOperand(0);
394 // If there aren't any more uses, zap the instruction to save space.
395 // Note that there can be more uses, for example if this
396 // is the result of an assignment.
397 if (ZI->use_empty())
398 ZI->eraseFromParent();
399 return Result;
400 }
401 }
402
403 return Builder.CreateIsNotNull(V, "tobool");
404 }
405
406 //===--------------------------------------------------------------------===//
407 // Visitor Methods
408 //===--------------------------------------------------------------------===//
409
410 Value *Visit(Expr *E) {
411 ApplyDebugLocation DL(CGF, E);
412 return StmtVisitor<ScalarExprEmitter, Value*>::Visit(E);
413 }
414
415 Value *VisitStmt(Stmt *S) {
416 S->dump(llvm::errs(), CGF.getContext());
417 llvm_unreachable("Stmt can't have complex result type!")::llvm::llvm_unreachable_internal("Stmt can't have complex result type!"
, "/build/llvm-toolchain-snapshot-13~++20210413100635+64c24f493e5f/clang/lib/CodeGen/CGExprScalar.cpp"
, 417)
;
418 }
419 Value *VisitExpr(Expr *S);
420
421 Value *VisitConstantExpr(ConstantExpr *E) {
422 if (Value *Result = ConstantEmitter(CGF).tryEmitConstantExpr(E)) {
423 if (E->isGLValue())
424 return CGF.Builder.CreateLoad(Address(
425 Result, CGF.getContext().getTypeAlignInChars(E->getType())));
426 return Result;
427 }
428 return Visit(E->getSubExpr());
429 }
430 Value *VisitParenExpr(ParenExpr *PE) {
431 return Visit(PE->getSubExpr());
432 }
433 Value *VisitSubstNonTypeTemplateParmExpr(SubstNonTypeTemplateParmExpr *E) {
434 return Visit(E->getReplacement());
435 }
436 Value *VisitGenericSelectionExpr(GenericSelectionExpr *GE) {
437 return Visit(GE->getResultExpr());
438 }
439 Value *VisitCoawaitExpr(CoawaitExpr *S) {
440 return CGF.EmitCoawaitExpr(*S).getScalarVal();
441 }
442 Value *VisitCoyieldExpr(CoyieldExpr *S) {
443 return CGF.EmitCoyieldExpr(*S).getScalarVal();
444 }
445 Value *VisitUnaryCoawait(const UnaryOperator *E) {
446 return Visit(E->getSubExpr());
447 }
448
449 // Leaves.
450 Value *VisitIntegerLiteral(const IntegerLiteral *E) {
451 return Builder.getInt(E->getValue());
452 }
453 Value *VisitFixedPointLiteral(const FixedPointLiteral *E) {
454 return Builder.getInt(E->getValue());
455 }
456 Value *VisitFloatingLiteral(const FloatingLiteral *E) {
457 return llvm::ConstantFP::get(VMContext, E->getValue());
458 }
459 Value *VisitCharacterLiteral(const CharacterLiteral *E) {
460 return llvm::ConstantInt::get(ConvertType(E->getType()), E->getValue());
461 }
462 Value *VisitObjCBoolLiteralExpr(const ObjCBoolLiteralExpr *E) {
463 return llvm::ConstantInt::get(ConvertType(E->getType()), E->getValue());
464 }
465 Value *VisitCXXBoolLiteralExpr(const CXXBoolLiteralExpr *E) {
466 return llvm::ConstantInt::get(ConvertType(E->getType()), E->getValue());
467 }
468 Value *VisitCXXScalarValueInitExpr(const CXXScalarValueInitExpr *E) {
469 return EmitNullValue(E->getType());
470 }
471 Value *VisitGNUNullExpr(const GNUNullExpr *E) {
472 return EmitNullValue(E->getType());
473 }
474 Value *VisitOffsetOfExpr(OffsetOfExpr *E);
475 Value *VisitUnaryExprOrTypeTraitExpr(const UnaryExprOrTypeTraitExpr *E);
476 Value *VisitAddrLabelExpr(const AddrLabelExpr *E) {
477 llvm::Value *V = CGF.GetAddrOfLabel(E->getLabel());
478 return Builder.CreateBitCast(V, ConvertType(E->getType()));
479 }
480
481 Value *VisitSizeOfPackExpr(SizeOfPackExpr *E) {
482 return llvm::ConstantInt::get(ConvertType(E->getType()),E->getPackLength());
483 }
484
485 Value *VisitPseudoObjectExpr(PseudoObjectExpr *E) {
486 return CGF.EmitPseudoObjectRValue(E).getScalarVal();
487 }
488
489 Value *VisitOpaqueValueExpr(OpaqueValueExpr *E) {
490 if (E->isGLValue())
491 return EmitLoadOfLValue(CGF.getOrCreateOpaqueLValueMapping(E),
492 E->getExprLoc());
493
494 // Otherwise, assume the mapping is the scalar directly.
495 return CGF.getOrCreateOpaqueRValueMapping(E).getScalarVal();
496 }
497
498 // l-values.
499 Value *VisitDeclRefExpr(DeclRefExpr *E) {
500 if (CodeGenFunction::ConstantEmission Constant = CGF.tryEmitAsConstant(E))
501 return CGF.emitScalarConstant(Constant, E);
502 return EmitLoadOfLValue(E);
503 }
504
505 Value *VisitObjCSelectorExpr(ObjCSelectorExpr *E) {
506 return CGF.EmitObjCSelectorExpr(E);
507 }
508 Value *VisitObjCProtocolExpr(ObjCProtocolExpr *E) {
509 return CGF.EmitObjCProtocolExpr(E);
510 }
511 Value *VisitObjCIvarRefExpr(ObjCIvarRefExpr *E) {
512 return EmitLoadOfLValue(E);
513 }
514 Value *VisitObjCMessageExpr(ObjCMessageExpr *E) {
515 if (E->getMethodDecl() &&
516 E->getMethodDecl()->getReturnType()->isReferenceType())
517 return EmitLoadOfLValue(E);
518 return CGF.EmitObjCMessageExpr(E).getScalarVal();
519 }
520
521 Value *VisitObjCIsaExpr(ObjCIsaExpr *E) {
522 LValue LV = CGF.EmitObjCIsaExpr(E);
523 Value *V = CGF.EmitLoadOfLValue(LV, E->getExprLoc()).getScalarVal();
524 return V;
525 }
526
527 Value *VisitObjCAvailabilityCheckExpr(ObjCAvailabilityCheckExpr *E) {
528 VersionTuple Version = E->getVersion();
529
530 // If we're checking for a platform older than our minimum deployment
531 // target, we can fold the check away.
532 if (Version <= CGF.CGM.getTarget().getPlatformMinVersion())
533 return llvm::ConstantInt::get(Builder.getInt1Ty(), 1);
534
535 return CGF.EmitBuiltinAvailable(Version);
536 }
537
538 Value *VisitArraySubscriptExpr(ArraySubscriptExpr *E);
539 Value *VisitMatrixSubscriptExpr(MatrixSubscriptExpr *E);
540 Value *VisitShuffleVectorExpr(ShuffleVectorExpr *E);
541 Value *VisitConvertVectorExpr(ConvertVectorExpr *E);
542 Value *VisitMemberExpr(MemberExpr *E);
543 Value *VisitExtVectorElementExpr(Expr *E) { return EmitLoadOfLValue(E); }
544 Value *VisitCompoundLiteralExpr(CompoundLiteralExpr *E) {
545 // Strictly speaking, we shouldn't be calling EmitLoadOfLValue, which
546 // transitively calls EmitCompoundLiteralLValue, here in C++ since compound
547 // literals aren't l-values in C++. We do so simply because that's the
548 // cleanest way to handle compound literals in C++.
549 // See the discussion here: https://reviews.llvm.org/D64464
550 return EmitLoadOfLValue(E);
551 }
552
553 Value *VisitInitListExpr(InitListExpr *E);
554
555 Value *VisitArrayInitIndexExpr(ArrayInitIndexExpr *E) {
556 assert(CGF.getArrayInitIndex() &&((CGF.getArrayInitIndex() && "ArrayInitIndexExpr not inside an ArrayInitLoopExpr?"
) ? static_cast<void> (0) : __assert_fail ("CGF.getArrayInitIndex() && \"ArrayInitIndexExpr not inside an ArrayInitLoopExpr?\""
, "/build/llvm-toolchain-snapshot-13~++20210413100635+64c24f493e5f/clang/lib/CodeGen/CGExprScalar.cpp"
, 557, __PRETTY_FUNCTION__))
557 "ArrayInitIndexExpr not inside an ArrayInitLoopExpr?")((CGF.getArrayInitIndex() && "ArrayInitIndexExpr not inside an ArrayInitLoopExpr?"
) ? static_cast<void> (0) : __assert_fail ("CGF.getArrayInitIndex() && \"ArrayInitIndexExpr not inside an ArrayInitLoopExpr?\""
, "/build/llvm-toolchain-snapshot-13~++20210413100635+64c24f493e5f/clang/lib/CodeGen/CGExprScalar.cpp"
, 557, __PRETTY_FUNCTION__))
;
558 return CGF.getArrayInitIndex();
559 }
560
561 Value *VisitImplicitValueInitExpr(const ImplicitValueInitExpr *E) {
562 return EmitNullValue(E->getType());
563 }
564 Value *VisitExplicitCastExpr(ExplicitCastExpr *E) {
565 CGF.CGM.EmitExplicitCastExprType(E, &CGF);
566 return VisitCastExpr(E);
567 }
568 Value *VisitCastExpr(CastExpr *E);
569
570 Value *VisitCallExpr(const CallExpr *E) {
571 if (E->getCallReturnType(CGF.getContext())->isReferenceType())
572 return EmitLoadOfLValue(E);
573
574 Value *V = CGF.EmitCallExpr(E).getScalarVal();
575
576 EmitLValueAlignmentAssumption(E, V);
577 return V;
578 }
579
580 Value *VisitStmtExpr(const StmtExpr *E);
581
582 // Unary Operators.
583 Value *VisitUnaryPostDec(const UnaryOperator *E) {
584 LValue LV = EmitLValue(E->getSubExpr());
585 return EmitScalarPrePostIncDec(E, LV, false, false);
586 }
587 Value *VisitUnaryPostInc(const UnaryOperator *E) {
588 LValue LV = EmitLValue(E->getSubExpr());
589 return EmitScalarPrePostIncDec(E, LV, true, false);
590 }
591 Value *VisitUnaryPreDec(const UnaryOperator *E) {
592 LValue LV = EmitLValue(E->getSubExpr());
593 return EmitScalarPrePostIncDec(E, LV, false, true);
594 }
595 Value *VisitUnaryPreInc(const UnaryOperator *E) {
596 LValue LV = EmitLValue(E->getSubExpr());
597 return EmitScalarPrePostIncDec(E, LV, true, true);
598 }
599
600 llvm::Value *EmitIncDecConsiderOverflowBehavior(const UnaryOperator *E,
601 llvm::Value *InVal,
602 bool IsInc);
603
604 llvm::Value *EmitScalarPrePostIncDec(const UnaryOperator *E, LValue LV,
605 bool isInc, bool isPre);
606
607
608 Value *VisitUnaryAddrOf(const UnaryOperator *E) {
609 if (isa<MemberPointerType>(E->getType())) // never sugared
610 return CGF.CGM.getMemberPointerConstant(E);
611
612 return EmitLValue(E->getSubExpr()).getPointer(CGF);
613 }
614 Value *VisitUnaryDeref(const UnaryOperator *E) {
615 if (E->getType()->isVoidType())
616 return Visit(E->getSubExpr()); // the actual value should be unused
617 return EmitLoadOfLValue(E);
618 }
619 Value *VisitUnaryPlus(const UnaryOperator *E) {
620 // This differs from gcc, though, most likely due to a bug in gcc.
621 TestAndClearIgnoreResultAssign();
622 return Visit(E->getSubExpr());
623 }
624 Value *VisitUnaryMinus (const UnaryOperator *E);
625 Value *VisitUnaryNot (const UnaryOperator *E);
626 Value *VisitUnaryLNot (const UnaryOperator *E);
627 Value *VisitUnaryReal (const UnaryOperator *E);
628 Value *VisitUnaryImag (const UnaryOperator *E);
629 Value *VisitUnaryExtension(const UnaryOperator *E) {
630 return Visit(E->getSubExpr());
631 }
632
633 // C++
634 Value *VisitMaterializeTemporaryExpr(const MaterializeTemporaryExpr *E) {
635 return EmitLoadOfLValue(E);
636 }
637 Value *VisitSourceLocExpr(SourceLocExpr *SLE) {
638 auto &Ctx = CGF.getContext();
639 APValue Evaluated =
640 SLE->EvaluateInContext(Ctx, CGF.CurSourceLocExprScope.getDefaultExpr());
641 return ConstantEmitter(CGF).emitAbstract(SLE->getLocation(), Evaluated,
642 SLE->getType());
643 }
644
645 Value *VisitCXXDefaultArgExpr(CXXDefaultArgExpr *DAE) {
646 CodeGenFunction::CXXDefaultArgExprScope Scope(CGF, DAE);
647 return Visit(DAE->getExpr());
648 }
649 Value *VisitCXXDefaultInitExpr(CXXDefaultInitExpr *DIE) {
650 CodeGenFunction::CXXDefaultInitExprScope Scope(CGF, DIE);
651 return Visit(DIE->getExpr());
652 }
653 Value *VisitCXXThisExpr(CXXThisExpr *TE) {
654 return CGF.LoadCXXThis();
655 }
656
657 Value *VisitExprWithCleanups(ExprWithCleanups *E);
658 Value *VisitCXXNewExpr(const CXXNewExpr *E) {
659 return CGF.EmitCXXNewExpr(E);
660 }
661 Value *VisitCXXDeleteExpr(const CXXDeleteExpr *E) {
662 CGF.EmitCXXDeleteExpr(E);
663 return nullptr;
664 }
665
666 Value *VisitTypeTraitExpr(const TypeTraitExpr *E) {
667 return llvm::ConstantInt::get(ConvertType(E->getType()), E->getValue());
668 }
669
670 Value *VisitConceptSpecializationExpr(const ConceptSpecializationExpr *E) {
671 return Builder.getInt1(E->isSatisfied());
672 }
673
674 Value *VisitRequiresExpr(const RequiresExpr *E) {
675 return Builder.getInt1(E->isSatisfied());
676 }
677
678 Value *VisitArrayTypeTraitExpr(const ArrayTypeTraitExpr *E) {
679 return llvm::ConstantInt::get(Builder.getInt32Ty(), E->getValue());
680 }
681
682 Value *VisitExpressionTraitExpr(const ExpressionTraitExpr *E) {
683 return llvm::ConstantInt::get(Builder.getInt1Ty(), E->getValue());
684 }
685
686 Value *VisitCXXPseudoDestructorExpr(const CXXPseudoDestructorExpr *E) {
687 // C++ [expr.pseudo]p1:
688 // The result shall only be used as the operand for the function call
689 // operator (), and the result of such a call has type void. The only
690 // effect is the evaluation of the postfix-expression before the dot or
691 // arrow.
692 CGF.EmitScalarExpr(E->getBase());
693 return nullptr;
694 }
695
696 Value *VisitCXXNullPtrLiteralExpr(const CXXNullPtrLiteralExpr *E) {
697 return EmitNullValue(E->getType());
698 }
699
700 Value *VisitCXXThrowExpr(const CXXThrowExpr *E) {
701 CGF.EmitCXXThrowExpr(E);
702 return nullptr;
703 }
704
705 Value *VisitCXXNoexceptExpr(const CXXNoexceptExpr *E) {
706 return Builder.getInt1(E->getValue());
707 }
708
709 // Binary Operators.
710 Value *EmitMul(const BinOpInfo &Ops) {
711 if (Ops.Ty->isSignedIntegerOrEnumerationType()) {
712 switch (CGF.getLangOpts().getSignedOverflowBehavior()) {
713 case LangOptions::SOB_Defined:
714 return Builder.CreateMul(Ops.LHS, Ops.RHS, "mul");
715 case LangOptions::SOB_Undefined:
716 if (!CGF.SanOpts.has(SanitizerKind::SignedIntegerOverflow))
717 return Builder.CreateNSWMul(Ops.LHS, Ops.RHS, "mul");
718 LLVM_FALLTHROUGH[[gnu::fallthrough]];
719 case LangOptions::SOB_Trapping:
720 if (CanElideOverflowCheck(CGF.getContext(), Ops))
721 return Builder.CreateNSWMul(Ops.LHS, Ops.RHS, "mul");
722 return EmitOverflowCheckedBinOp(Ops);
723 }
724 }
725
726 if (Ops.Ty->isConstantMatrixType()) {
727 llvm::MatrixBuilder<CGBuilderTy> MB(Builder);
728 // We need to check the types of the operands of the operator to get the
729 // correct matrix dimensions.
730 auto *BO = cast<BinaryOperator>(Ops.E);
731 auto *LHSMatTy = dyn_cast<ConstantMatrixType>(
732 BO->getLHS()->getType().getCanonicalType());
733 auto *RHSMatTy = dyn_cast<ConstantMatrixType>(
734 BO->getRHS()->getType().getCanonicalType());
735 if (LHSMatTy && RHSMatTy)
736 return MB.CreateMatrixMultiply(Ops.LHS, Ops.RHS, LHSMatTy->getNumRows(),
737 LHSMatTy->getNumColumns(),
738 RHSMatTy->getNumColumns());
739 return MB.CreateScalarMultiply(Ops.LHS, Ops.RHS);
740 }
741
742 if (Ops.Ty->isUnsignedIntegerType() &&
743 CGF.SanOpts.has(SanitizerKind::UnsignedIntegerOverflow) &&
744 !CanElideOverflowCheck(CGF.getContext(), Ops))
745 return EmitOverflowCheckedBinOp(Ops);
746
747 if (Ops.LHS->getType()->isFPOrFPVectorTy()) {
748 // Preserve the old values
749 CodeGenFunction::CGFPOptionsRAII FPOptsRAII(CGF, Ops.FPFeatures);
750 return Builder.CreateFMul(Ops.LHS, Ops.RHS, "mul");
751 }
752 if (Ops.isFixedPointOp())
753 return EmitFixedPointBinOp(Ops);
754 return Builder.CreateMul(Ops.LHS, Ops.RHS, "mul");
755 }
756 /// Create a binary op that checks for overflow.
757 /// Currently only supports +, - and *.
758 Value *EmitOverflowCheckedBinOp(const BinOpInfo &Ops);
759
760 // Check for undefined division and modulus behaviors.
761 void EmitUndefinedBehaviorIntegerDivAndRemCheck(const BinOpInfo &Ops,
762 llvm::Value *Zero,bool isDiv);
763 // Common helper for getting how wide LHS of shift is.
764 static Value *GetWidthMinusOneValue(Value* LHS,Value* RHS);
765
766 // Used for shifting constraints for OpenCL, do mask for powers of 2, URem for
767 // non powers of two.
768 Value *ConstrainShiftValue(Value *LHS, Value *RHS, const Twine &Name);
769
770 Value *EmitDiv(const BinOpInfo &Ops);
771 Value *EmitRem(const BinOpInfo &Ops);
772 Value *EmitAdd(const BinOpInfo &Ops);
773 Value *EmitSub(const BinOpInfo &Ops);
774 Value *EmitShl(const BinOpInfo &Ops);
775 Value *EmitShr(const BinOpInfo &Ops);
776 Value *EmitAnd(const BinOpInfo &Ops) {
777 return Builder.CreateAnd(Ops.LHS, Ops.RHS, "and");
778 }
779 Value *EmitXor(const BinOpInfo &Ops) {
780 return Builder.CreateXor(Ops.LHS, Ops.RHS, "xor");
781 }
782 Value *EmitOr (const BinOpInfo &Ops) {
783 return Builder.CreateOr(Ops.LHS, Ops.RHS, "or");
784 }
785
786 // Helper functions for fixed point binary operations.
787 Value *EmitFixedPointBinOp(const BinOpInfo &Ops);
788
789 BinOpInfo EmitBinOps(const BinaryOperator *E);
790 LValue EmitCompoundAssignLValue(const CompoundAssignOperator *E,
791 Value *(ScalarExprEmitter::*F)(const BinOpInfo &),
792 Value *&Result);
793
794 Value *EmitCompoundAssign(const CompoundAssignOperator *E,
795 Value *(ScalarExprEmitter::*F)(const BinOpInfo &));
796
797 // Binary operators and binary compound assignment operators.
798#define HANDLEBINOP(OP) \
799 Value *VisitBin ## OP(const BinaryOperator *E) { \
800 return Emit ## OP(EmitBinOps(E)); \
801 } \
802 Value *VisitBin ## OP ## Assign(const CompoundAssignOperator *E) { \
803 return EmitCompoundAssign(E, &ScalarExprEmitter::Emit ## OP); \
804 }
805 HANDLEBINOP(Mul)
806 HANDLEBINOP(Div)
807 HANDLEBINOP(Rem)
808 HANDLEBINOP(Add)
809 HANDLEBINOP(Sub)
810 HANDLEBINOP(Shl)
811 HANDLEBINOP(Shr)
812 HANDLEBINOP(And)
813 HANDLEBINOP(Xor)
814 HANDLEBINOP(Or)
815#undef HANDLEBINOP
816
817 // Comparisons.
818 Value *EmitCompare(const BinaryOperator *E, llvm::CmpInst::Predicate UICmpOpc,
819 llvm::CmpInst::Predicate SICmpOpc,
820 llvm::CmpInst::Predicate FCmpOpc, bool IsSignaling);
821#define VISITCOMP(CODE, UI, SI, FP, SIG) \
822 Value *VisitBin##CODE(const BinaryOperator *E) { \
823 return EmitCompare(E, llvm::ICmpInst::UI, llvm::ICmpInst::SI, \
824 llvm::FCmpInst::FP, SIG); }
825 VISITCOMP(LT, ICMP_ULT, ICMP_SLT, FCMP_OLT, true)
826 VISITCOMP(GT, ICMP_UGT, ICMP_SGT, FCMP_OGT, true)
827 VISITCOMP(LE, ICMP_ULE, ICMP_SLE, FCMP_OLE, true)
828 VISITCOMP(GE, ICMP_UGE, ICMP_SGE, FCMP_OGE, true)
829 VISITCOMP(EQ, ICMP_EQ , ICMP_EQ , FCMP_OEQ, false)
830 VISITCOMP(NE, ICMP_NE , ICMP_NE , FCMP_UNE, false)
831#undef VISITCOMP
832
833 Value *VisitBinAssign (const BinaryOperator *E);
834
835 Value *VisitBinLAnd (const BinaryOperator *E);
836 Value *VisitBinLOr (const BinaryOperator *E);
837 Value *VisitBinComma (const BinaryOperator *E);
838
839 Value *VisitBinPtrMemD(const Expr *E) { return EmitLoadOfLValue(E); }
840 Value *VisitBinPtrMemI(const Expr *E) { return EmitLoadOfLValue(E); }
841
842 Value *VisitCXXRewrittenBinaryOperator(CXXRewrittenBinaryOperator *E) {
843 return Visit(E->getSemanticForm());
844 }
845
846 // Other Operators.
847 Value *VisitBlockExpr(const BlockExpr *BE);
848 Value *VisitAbstractConditionalOperator(const AbstractConditionalOperator *);
849 Value *VisitChooseExpr(ChooseExpr *CE);
850 Value *VisitVAArgExpr(VAArgExpr *VE);
851 Value *VisitObjCStringLiteral(const ObjCStringLiteral *E) {
852 return CGF.EmitObjCStringLiteral(E);
853 }
854 Value *VisitObjCBoxedExpr(ObjCBoxedExpr *E) {
855 return CGF.EmitObjCBoxedExpr(E);
856 }
857 Value *VisitObjCArrayLiteral(ObjCArrayLiteral *E) {
858 return CGF.EmitObjCArrayLiteral(E);
859 }
860 Value *VisitObjCDictionaryLiteral(ObjCDictionaryLiteral *E) {
861 return CGF.EmitObjCDictionaryLiteral(E);
862 }
863 Value *VisitAsTypeExpr(AsTypeExpr *CE);
864 Value *VisitAtomicExpr(AtomicExpr *AE);
865};
866} // end anonymous namespace.
867
868//===----------------------------------------------------------------------===//
869// Utilities
870//===----------------------------------------------------------------------===//
871
872/// EmitConversionToBool - Convert the specified expression value to a
873/// boolean (i1) truth value. This is equivalent to "Val != 0".
874Value *ScalarExprEmitter::EmitConversionToBool(Value *Src, QualType SrcType) {
875 assert(SrcType.isCanonical() && "EmitScalarConversion strips typedefs")((SrcType.isCanonical() && "EmitScalarConversion strips typedefs"
) ? static_cast<void> (0) : __assert_fail ("SrcType.isCanonical() && \"EmitScalarConversion strips typedefs\""
, "/build/llvm-toolchain-snapshot-13~++20210413100635+64c24f493e5f/clang/lib/CodeGen/CGExprScalar.cpp"
, 875, __PRETTY_FUNCTION__))
;
876
877 if (SrcType->isRealFloatingType())
878 return EmitFloatToBoolConversion(Src);
879
880 if (const MemberPointerType *MPT = dyn_cast<MemberPointerType>(SrcType))
881 return CGF.CGM.getCXXABI().EmitMemberPointerIsNotNull(CGF, Src, MPT);
882
883 assert((SrcType->isIntegerType() || isa<llvm::PointerType>(Src->getType())) &&(((SrcType->isIntegerType() || isa<llvm::PointerType>
(Src->getType())) && "Unknown scalar type to convert"
) ? static_cast<void> (0) : __assert_fail ("(SrcType->isIntegerType() || isa<llvm::PointerType>(Src->getType())) && \"Unknown scalar type to convert\""
, "/build/llvm-toolchain-snapshot-13~++20210413100635+64c24f493e5f/clang/lib/CodeGen/CGExprScalar.cpp"
, 884, __PRETTY_FUNCTION__))
884 "Unknown scalar type to convert")(((SrcType->isIntegerType() || isa<llvm::PointerType>
(Src->getType())) && "Unknown scalar type to convert"
) ? static_cast<void> (0) : __assert_fail ("(SrcType->isIntegerType() || isa<llvm::PointerType>(Src->getType())) && \"Unknown scalar type to convert\""
, "/build/llvm-toolchain-snapshot-13~++20210413100635+64c24f493e5f/clang/lib/CodeGen/CGExprScalar.cpp"
, 884, __PRETTY_FUNCTION__))
;
885
886 if (isa<llvm::IntegerType>(Src->getType()))
887 return EmitIntToBoolConversion(Src);
888
889 assert(isa<llvm::PointerType>(Src->getType()))((isa<llvm::PointerType>(Src->getType())) ? static_cast
<void> (0) : __assert_fail ("isa<llvm::PointerType>(Src->getType())"
, "/build/llvm-toolchain-snapshot-13~++20210413100635+64c24f493e5f/clang/lib/CodeGen/CGExprScalar.cpp"
, 889, __PRETTY_FUNCTION__))
;
890 return EmitPointerToBoolConversion(Src, SrcType);
891}
892
893void ScalarExprEmitter::EmitFloatConversionCheck(
894 Value *OrigSrc, QualType OrigSrcType, Value *Src, QualType SrcType,
895 QualType DstType, llvm::Type *DstTy, SourceLocation Loc) {
896 assert(SrcType->isFloatingType() && "not a conversion from floating point")((SrcType->isFloatingType() && "not a conversion from floating point"
) ? static_cast<void> (0) : __assert_fail ("SrcType->isFloatingType() && \"not a conversion from floating point\""
, "/build/llvm-toolchain-snapshot-13~++20210413100635+64c24f493e5f/clang/lib/CodeGen/CGExprScalar.cpp"
, 896, __PRETTY_FUNCTION__))
;
897 if (!isa<llvm::IntegerType>(DstTy))
898 return;
899
900 CodeGenFunction::SanitizerScope SanScope(&CGF);
901 using llvm::APFloat;
902 using llvm::APSInt;
903
904 llvm::Value *Check = nullptr;
905 const llvm::fltSemantics &SrcSema =
906 CGF.getContext().getFloatTypeSemantics(OrigSrcType);
907
908 // Floating-point to integer. This has undefined behavior if the source is
909 // +-Inf, NaN, or doesn't fit into the destination type (after truncation
910 // to an integer).
911 unsigned Width = CGF.getContext().getIntWidth(DstType);
912 bool Unsigned = DstType->isUnsignedIntegerOrEnumerationType();
913
914 APSInt Min = APSInt::getMinValue(Width, Unsigned);
915 APFloat MinSrc(SrcSema, APFloat::uninitialized);
916 if (MinSrc.convertFromAPInt(Min, !Unsigned, APFloat::rmTowardZero) &
917 APFloat::opOverflow)
918 // Don't need an overflow check for lower bound. Just check for
919 // -Inf/NaN.
920 MinSrc = APFloat::getInf(SrcSema, true);
921 else
922 // Find the largest value which is too small to represent (before
923 // truncation toward zero).
924 MinSrc.subtract(APFloat(SrcSema, 1), APFloat::rmTowardNegative);
925
926 APSInt Max = APSInt::getMaxValue(Width, Unsigned);
927 APFloat MaxSrc(SrcSema, APFloat::uninitialized);
928 if (MaxSrc.convertFromAPInt(Max, !Unsigned, APFloat::rmTowardZero) &
929 APFloat::opOverflow)
930 // Don't need an overflow check for upper bound. Just check for
931 // +Inf/NaN.
932 MaxSrc = APFloat::getInf(SrcSema, false);
933 else
934 // Find the smallest value which is too large to represent (before
935 // truncation toward zero).
936 MaxSrc.add(APFloat(SrcSema, 1), APFloat::rmTowardPositive);
937
938 // If we're converting from __half, convert the range to float to match
939 // the type of src.
940 if (OrigSrcType->isHalfType()) {
941 const llvm::fltSemantics &Sema =
942 CGF.getContext().getFloatTypeSemantics(SrcType);
943 bool IsInexact;
944 MinSrc.convert(Sema, APFloat::rmTowardZero, &IsInexact);
945 MaxSrc.convert(Sema, APFloat::rmTowardZero, &IsInexact);
946 }
947
948 llvm::Value *GE =
949 Builder.CreateFCmpOGT(Src, llvm::ConstantFP::get(VMContext, MinSrc));
950 llvm::Value *LE =
951 Builder.CreateFCmpOLT(Src, llvm::ConstantFP::get(VMContext, MaxSrc));
952 Check = Builder.CreateAnd(GE, LE);
953
954 llvm::Constant *StaticArgs[] = {CGF.EmitCheckSourceLocation(Loc),
955 CGF.EmitCheckTypeDescriptor(OrigSrcType),
956 CGF.EmitCheckTypeDescriptor(DstType)};
957 CGF.EmitCheck(std::make_pair(Check, SanitizerKind::FloatCastOverflow),
958 SanitizerHandler::FloatCastOverflow, StaticArgs, OrigSrc);
959}
960
961// Should be called within CodeGenFunction::SanitizerScope RAII scope.
962// Returns 'i1 false' when the truncation Src -> Dst was lossy.
963static std::pair<ScalarExprEmitter::ImplicitConversionCheckKind,
964 std::pair<llvm::Value *, SanitizerMask>>
965EmitIntegerTruncationCheckHelper(Value *Src, QualType SrcType, Value *Dst,
966 QualType DstType, CGBuilderTy &Builder) {
967 llvm::Type *SrcTy = Src->getType();
968 llvm::Type *DstTy = Dst->getType();
969 (void)DstTy; // Only used in assert()
970
971 // This should be truncation of integral types.
972 assert(Src != Dst)((Src != Dst) ? static_cast<void> (0) : __assert_fail (
"Src != Dst", "/build/llvm-toolchain-snapshot-13~++20210413100635+64c24f493e5f/clang/lib/CodeGen/CGExprScalar.cpp"
, 972, __PRETTY_FUNCTION__))
;
973 assert(SrcTy->getScalarSizeInBits() > Dst->getType()->getScalarSizeInBits())((SrcTy->getScalarSizeInBits() > Dst->getType()->
getScalarSizeInBits()) ? static_cast<void> (0) : __assert_fail
("SrcTy->getScalarSizeInBits() > Dst->getType()->getScalarSizeInBits()"
, "/build/llvm-toolchain-snapshot-13~++20210413100635+64c24f493e5f/clang/lib/CodeGen/CGExprScalar.cpp"
, 973, __PRETTY_FUNCTION__))
;
974 assert(isa<llvm::IntegerType>(SrcTy) && isa<llvm::IntegerType>(DstTy) &&((isa<llvm::IntegerType>(SrcTy) && isa<llvm::
IntegerType>(DstTy) && "non-integer llvm type") ? static_cast
<void> (0) : __assert_fail ("isa<llvm::IntegerType>(SrcTy) && isa<llvm::IntegerType>(DstTy) && \"non-integer llvm type\""
, "/build/llvm-toolchain-snapshot-13~++20210413100635+64c24f493e5f/clang/lib/CodeGen/CGExprScalar.cpp"
, 975, __PRETTY_FUNCTION__))
975 "non-integer llvm type")((isa<llvm::IntegerType>(SrcTy) && isa<llvm::
IntegerType>(DstTy) && "non-integer llvm type") ? static_cast
<void> (0) : __assert_fail ("isa<llvm::IntegerType>(SrcTy) && isa<llvm::IntegerType>(DstTy) && \"non-integer llvm type\""
, "/build/llvm-toolchain-snapshot-13~++20210413100635+64c24f493e5f/clang/lib/CodeGen/CGExprScalar.cpp"
, 975, __PRETTY_FUNCTION__))
;
976
977 bool SrcSigned = SrcType->isSignedIntegerOrEnumerationType();
978 bool DstSigned = DstType->isSignedIntegerOrEnumerationType();
979
980 // If both (src and dst) types are unsigned, then it's an unsigned truncation.
981 // Else, it is a signed truncation.
982 ScalarExprEmitter::ImplicitConversionCheckKind Kind;
983 SanitizerMask Mask;
984 if (!SrcSigned && !DstSigned) {
985 Kind = ScalarExprEmitter::ICCK_UnsignedIntegerTruncation;
986 Mask = SanitizerKind::ImplicitUnsignedIntegerTruncation;
987 } else {
988 Kind = ScalarExprEmitter::ICCK_SignedIntegerTruncation;
989 Mask = SanitizerKind::ImplicitSignedIntegerTruncation;
990 }
991
992 llvm::Value *Check = nullptr;
993 // 1. Extend the truncated value back to the same width as the Src.
994 Check = Builder.CreateIntCast(Dst, SrcTy, DstSigned, "anyext");
995 // 2. Equality-compare with the original source value
996 Check = Builder.CreateICmpEQ(Check, Src, "truncheck");
997 // If the comparison result is 'i1 false', then the truncation was lossy.
998 return std::make_pair(Kind, std::make_pair(Check, Mask));
999}
1000
1001static bool PromotionIsPotentiallyEligibleForImplicitIntegerConversionCheck(
1002 QualType SrcType, QualType DstType) {
1003 return SrcType->isIntegerType() && DstType->isIntegerType();
1004}
1005
1006void ScalarExprEmitter::EmitIntegerTruncationCheck(Value *Src, QualType SrcType,
1007 Value *Dst, QualType DstType,
1008 SourceLocation Loc) {
1009 if (!CGF.SanOpts.hasOneOf(SanitizerKind::ImplicitIntegerTruncation))
1010 return;
1011
1012 // We only care about int->int conversions here.
1013 // We ignore conversions to/from pointer and/or bool.
1014 if (!PromotionIsPotentiallyEligibleForImplicitIntegerConversionCheck(SrcType,
1015 DstType))
1016 return;
1017
1018 unsigned SrcBits = Src->getType()->getScalarSizeInBits();
1019 unsigned DstBits = Dst->getType()->getScalarSizeInBits();
1020 // This must be truncation. Else we do not care.
1021 if (SrcBits <= DstBits)
1022 return;
1023
1024 assert(!DstType->isBooleanType() && "we should not get here with booleans.")((!DstType->isBooleanType() && "we should not get here with booleans."
) ? static_cast<void> (0) : __assert_fail ("!DstType->isBooleanType() && \"we should not get here with booleans.\""
, "/build/llvm-toolchain-snapshot-13~++20210413100635+64c24f493e5f/clang/lib/CodeGen/CGExprScalar.cpp"
, 1024, __PRETTY_FUNCTION__))
;
1025
1026 // If the integer sign change sanitizer is enabled,
1027 // and we are truncating from larger unsigned type to smaller signed type,
1028 // let that next sanitizer deal with it.
1029 bool SrcSigned = SrcType->isSignedIntegerOrEnumerationType();
1030 bool DstSigned = DstType->isSignedIntegerOrEnumerationType();
1031 if (CGF.SanOpts.has(SanitizerKind::ImplicitIntegerSignChange) &&
1032 (!SrcSigned && DstSigned))
1033 return;
1034
1035 CodeGenFunction::SanitizerScope SanScope(&CGF);
1036
1037 std::pair<ScalarExprEmitter::ImplicitConversionCheckKind,
1038 std::pair<llvm::Value *, SanitizerMask>>
1039 Check =
1040 EmitIntegerTruncationCheckHelper(Src, SrcType, Dst, DstType, Builder);
1041 // If the comparison result is 'i1 false', then the truncation was lossy.
1042
1043 // Do we care about this type of truncation?
1044 if (!CGF.SanOpts.has(Check.second.second))
1045 return;
1046
1047 llvm::Constant *StaticArgs[] = {
1048 CGF.EmitCheckSourceLocation(Loc), CGF.EmitCheckTypeDescriptor(SrcType),
1049 CGF.EmitCheckTypeDescriptor(DstType),
1050 llvm::ConstantInt::get(Builder.getInt8Ty(), Check.first)};
1051 CGF.EmitCheck(Check.second, SanitizerHandler::ImplicitConversion, StaticArgs,
1052 {Src, Dst});
1053}
1054
1055// Should be called within CodeGenFunction::SanitizerScope RAII scope.
1056// Returns 'i1 false' when the conversion Src -> Dst changed the sign.
1057static std::pair<ScalarExprEmitter::ImplicitConversionCheckKind,
1058 std::pair<llvm::Value *, SanitizerMask>>
1059EmitIntegerSignChangeCheckHelper(Value *Src, QualType SrcType, Value *Dst,
1060 QualType DstType, CGBuilderTy &Builder) {
1061 llvm::Type *SrcTy = Src->getType();
1062 llvm::Type *DstTy = Dst->getType();
1063
1064 assert(isa<llvm::IntegerType>(SrcTy) && isa<llvm::IntegerType>(DstTy) &&((isa<llvm::IntegerType>(SrcTy) && isa<llvm::
IntegerType>(DstTy) && "non-integer llvm type") ? static_cast
<void> (0) : __assert_fail ("isa<llvm::IntegerType>(SrcTy) && isa<llvm::IntegerType>(DstTy) && \"non-integer llvm type\""
, "/build/llvm-toolchain-snapshot-13~++20210413100635+64c24f493e5f/clang/lib/CodeGen/CGExprScalar.cpp"
, 1065, __PRETTY_FUNCTION__))
1065 "non-integer llvm type")((isa<llvm::IntegerType>(SrcTy) && isa<llvm::
IntegerType>(DstTy) && "non-integer llvm type") ? static_cast
<void> (0) : __assert_fail ("isa<llvm::IntegerType>(SrcTy) && isa<llvm::IntegerType>(DstTy) && \"non-integer llvm type\""
, "/build/llvm-toolchain-snapshot-13~++20210413100635+64c24f493e5f/clang/lib/CodeGen/CGExprScalar.cpp"
, 1065, __PRETTY_FUNCTION__))
;
1066
1067 bool SrcSigned = SrcType->isSignedIntegerOrEnumerationType();
1068 bool DstSigned = DstType->isSignedIntegerOrEnumerationType();
1069 (void)SrcSigned; // Only used in assert()
1070 (void)DstSigned; // Only used in assert()
1071 unsigned SrcBits = SrcTy->getScalarSizeInBits();
1072 unsigned DstBits = DstTy->getScalarSizeInBits();
1073 (void)SrcBits; // Only used in assert()
1074 (void)DstBits; // Only used in assert()
1075
1076 assert(((SrcBits != DstBits) || (SrcSigned != DstSigned)) &&((((SrcBits != DstBits) || (SrcSigned != DstSigned)) &&
"either the widths should be different, or the signednesses."
) ? static_cast<void> (0) : __assert_fail ("((SrcBits != DstBits) || (SrcSigned != DstSigned)) && \"either the widths should be different, or the signednesses.\""
, "/build/llvm-toolchain-snapshot-13~++20210413100635+64c24f493e5f/clang/lib/CodeGen/CGExprScalar.cpp"
, 1077, __PRETTY_FUNCTION__))
1077 "either the widths should be different, or the signednesses.")((((SrcBits != DstBits) || (SrcSigned != DstSigned)) &&
"either the widths should be different, or the signednesses."
) ? static_cast<void> (0) : __assert_fail ("((SrcBits != DstBits) || (SrcSigned != DstSigned)) && \"either the widths should be different, or the signednesses.\""
, "/build/llvm-toolchain-snapshot-13~++20210413100635+64c24f493e5f/clang/lib/CodeGen/CGExprScalar.cpp"
, 1077, __PRETTY_FUNCTION__))
;
1078
1079 // NOTE: zero value is considered to be non-negative.
1080 auto EmitIsNegativeTest = [&Builder](Value *V, QualType VType,
1081 const char *Name) -> Value * {
1082 // Is this value a signed type?
1083 bool VSigned = VType->isSignedIntegerOrEnumerationType();
1084 llvm::Type *VTy = V->getType();
1085 if (!VSigned) {
1086 // If the value is unsigned, then it is never negative.
1087 // FIXME: can we encounter non-scalar VTy here?
1088 return llvm::ConstantInt::getFalse(VTy->getContext());
1089 }
1090 // Get the zero of the same type with which we will be comparing.
1091 llvm::Constant *Zero = llvm::ConstantInt::get(VTy, 0);
1092 // %V.isnegative = icmp slt %V, 0
1093 // I.e is %V *strictly* less than zero, does it have negative value?
1094 return Builder.CreateICmp(llvm::ICmpInst::ICMP_SLT, V, Zero,
1095 llvm::Twine(Name) + "." + V->getName() +
1096 ".negativitycheck");
1097 };
1098
1099 // 1. Was the old Value negative?
1100 llvm::Value *SrcIsNegative = EmitIsNegativeTest(Src, SrcType, "src");
1101 // 2. Is the new Value negative?
1102 llvm::Value *DstIsNegative = EmitIsNegativeTest(Dst, DstType, "dst");
1103 // 3. Now, was the 'negativity status' preserved during the conversion?
1104 // NOTE: conversion from negative to zero is considered to change the sign.
1105 // (We want to get 'false' when the conversion changed the sign)
1106 // So we should just equality-compare the negativity statuses.
1107 llvm::Value *Check = nullptr;
1108 Check = Builder.CreateICmpEQ(SrcIsNegative, DstIsNegative, "signchangecheck");
1109 // If the comparison result is 'false', then the conversion changed the sign.
1110 return std::make_pair(
1111 ScalarExprEmitter::ICCK_IntegerSignChange,
1112 std::make_pair(Check, SanitizerKind::ImplicitIntegerSignChange));
1113}
1114
1115void ScalarExprEmitter::EmitIntegerSignChangeCheck(Value *Src, QualType SrcType,
1116 Value *Dst, QualType DstType,
1117 SourceLocation Loc) {
1118 if (!CGF.SanOpts.has(SanitizerKind::ImplicitIntegerSignChange))
1119 return;
1120
1121 llvm::Type *SrcTy = Src->getType();
1122 llvm::Type *DstTy = Dst->getType();
1123
1124 // We only care about int->int conversions here.
1125 // We ignore conversions to/from pointer and/or bool.
1126 if (!PromotionIsPotentiallyEligibleForImplicitIntegerConversionCheck(SrcType,
1127 DstType))
1128 return;
1129
1130 bool SrcSigned = SrcType->isSignedIntegerOrEnumerationType();
1131 bool DstSigned = DstType->isSignedIntegerOrEnumerationType();
1132 unsigned SrcBits = SrcTy->getScalarSizeInBits();
1133 unsigned DstBits = DstTy->getScalarSizeInBits();
1134
1135 // Now, we do not need to emit the check in *all* of the cases.
1136 // We can avoid emitting it in some obvious cases where it would have been
1137 // dropped by the opt passes (instcombine) always anyways.
1138 // If it's a cast between effectively the same type, no check.
1139 // NOTE: this is *not* equivalent to checking the canonical types.
1140 if (SrcSigned == DstSigned && SrcBits == DstBits)
1141 return;
1142 // At least one of the values needs to have signed type.
1143 // If both are unsigned, then obviously, neither of them can be negative.
1144 if (!SrcSigned && !DstSigned)
1145 return;
1146 // If the conversion is to *larger* *signed* type, then no check is needed.
1147 // Because either sign-extension happens (so the sign will remain),
1148 // or zero-extension will happen (the sign bit will be zero.)
1149 if ((DstBits > SrcBits) && DstSigned)
1150 return;
1151 if (CGF.SanOpts.has(SanitizerKind::ImplicitSignedIntegerTruncation) &&
1152 (SrcBits > DstBits) && SrcSigned) {
1153 // If the signed integer truncation sanitizer is enabled,
1154 // and this is a truncation from signed type, then no check is needed.
1155 // Because here sign change check is interchangeable with truncation check.
1156 return;
1157 }
1158 // That's it. We can't rule out any more cases with the data we have.
1159
1160 CodeGenFunction::SanitizerScope SanScope(&CGF);
1161
1162 std::pair<ScalarExprEmitter::ImplicitConversionCheckKind,
1163 std::pair<llvm::Value *, SanitizerMask>>
1164 Check;
1165
1166 // Each of these checks needs to return 'false' when an issue was detected.
1167 ImplicitConversionCheckKind CheckKind;
1168 llvm::SmallVector<std::pair<llvm::Value *, SanitizerMask>, 2> Checks;
1169 // So we can 'and' all the checks together, and still get 'false',
1170 // if at least one of the checks detected an issue.
1171
1172 Check = EmitIntegerSignChangeCheckHelper(Src, SrcType, Dst, DstType, Builder);
1173 CheckKind = Check.first;
1174 Checks.emplace_back(Check.second);
1175
1176 if (CGF.SanOpts.has(SanitizerKind::ImplicitSignedIntegerTruncation) &&
1177 (SrcBits > DstBits) && !SrcSigned && DstSigned) {
1178 // If the signed integer truncation sanitizer was enabled,
1179 // and we are truncating from larger unsigned type to smaller signed type,
1180 // let's handle the case we skipped in that check.
1181 Check =
1182 EmitIntegerTruncationCheckHelper(Src, SrcType, Dst, DstType, Builder);
1183 CheckKind = ICCK_SignedIntegerTruncationOrSignChange;
1184 Checks.emplace_back(Check.second);
1185 // If the comparison result is 'i1 false', then the truncation was lossy.
1186 }
1187
1188 llvm::Constant *StaticArgs[] = {
1189 CGF.EmitCheckSourceLocation(Loc), CGF.EmitCheckTypeDescriptor(SrcType),
1190 CGF.EmitCheckTypeDescriptor(DstType),
1191 llvm::ConstantInt::get(Builder.getInt8Ty(), CheckKind)};
1192 // EmitCheck() will 'and' all the checks together.
1193 CGF.EmitCheck(Checks, SanitizerHandler::ImplicitConversion, StaticArgs,
1194 {Src, Dst});
1195}
1196
1197Value *ScalarExprEmitter::EmitScalarCast(Value *Src, QualType SrcType,
1198 QualType DstType, llvm::Type *SrcTy,
1199 llvm::Type *DstTy,
1200 ScalarConversionOpts Opts) {
1201 // The Element types determine the type of cast to perform.
1202 llvm::Type *SrcElementTy;
1203 llvm::Type *DstElementTy;
1204 QualType SrcElementType;
1205 QualType DstElementType;
1206 if (SrcType->isMatrixType() && DstType->isMatrixType()) {
1207 // Allow bitcast between matrixes of the same size.
1208 if (SrcTy->getPrimitiveSizeInBits() == DstTy->getPrimitiveSizeInBits())
1209 return Builder.CreateBitCast(Src, DstTy, "conv");
1210
1211 SrcElementTy = cast<llvm::VectorType>(SrcTy)->getElementType();
1212 DstElementTy = cast<llvm::VectorType>(DstTy)->getElementType();
1213 SrcElementType = SrcType->castAs<MatrixType>()->getElementType();
1214 DstElementType = DstType->castAs<MatrixType>()->getElementType();
1215 } else {
1216 assert(!SrcType->isMatrixType() && !DstType->isMatrixType() &&((!SrcType->isMatrixType() && !DstType->isMatrixType
() && "cannot cast between matrix and non-matrix types"
) ? static_cast<void> (0) : __assert_fail ("!SrcType->isMatrixType() && !DstType->isMatrixType() && \"cannot cast between matrix and non-matrix types\""
, "/build/llvm-toolchain-snapshot-13~++20210413100635+64c24f493e5f/clang/lib/CodeGen/CGExprScalar.cpp"
, 1217, __PRETTY_FUNCTION__))
1217 "cannot cast between matrix and non-matrix types")((!SrcType->isMatrixType() && !DstType->isMatrixType
() && "cannot cast between matrix and non-matrix types"
) ? static_cast<void> (0) : __assert_fail ("!SrcType->isMatrixType() && !DstType->isMatrixType() && \"cannot cast between matrix and non-matrix types\""
, "/build/llvm-toolchain-snapshot-13~++20210413100635+64c24f493e5f/clang/lib/CodeGen/CGExprScalar.cpp"
, 1217, __PRETTY_FUNCTION__))
;
1218 SrcElementTy = SrcTy;
1219 DstElementTy = DstTy;
1220 SrcElementType = SrcType;
1221 DstElementType = DstType;
1222 }
1223
1224 if (isa<llvm::IntegerType>(SrcElementTy)) {
1225 bool InputSigned = SrcElementType->isSignedIntegerOrEnumerationType();
1226 if (SrcElementType->isBooleanType() && Opts.TreatBooleanAsSigned) {
1227 InputSigned = true;
1228 }
1229
1230 if (isa<llvm::IntegerType>(DstElementTy))
1231 return Builder.CreateIntCast(Src, DstTy, InputSigned, "conv");
1232 if (InputSigned)
1233 return Builder.CreateSIToFP(Src, DstTy, "conv");
1234 return Builder.CreateUIToFP(Src, DstTy, "conv");
1235 }
1236
1237 if (isa<llvm::IntegerType>(DstElementTy)) {
1238 assert(SrcElementTy->isFloatingPointTy() && "Unknown real conversion")((SrcElementTy->isFloatingPointTy() && "Unknown real conversion"
) ? static_cast<void> (0) : __assert_fail ("SrcElementTy->isFloatingPointTy() && \"Unknown real conversion\""
, "/build/llvm-toolchain-snapshot-13~++20210413100635+64c24f493e5f/clang/lib/CodeGen/CGExprScalar.cpp"
, 1238, __PRETTY_FUNCTION__))
;
1239 if (DstElementType->isSignedIntegerOrEnumerationType())
1240 return Builder.CreateFPToSI(Src, DstTy, "conv");
1241 return Builder.CreateFPToUI(Src, DstTy, "conv");
1242 }
1243
1244 if (DstElementTy->getTypeID() < SrcElementTy->getTypeID())
1245 return Builder.CreateFPTrunc(Src, DstTy, "conv");
1246 return Builder.CreateFPExt(Src, DstTy, "conv");
1247}
1248
1249/// Emit a conversion from the specified type to the specified destination type,
1250/// both of which are LLVM scalar types.
1251Value *ScalarExprEmitter::EmitScalarConversion(Value *Src, QualType SrcType,
1252 QualType DstType,
1253 SourceLocation Loc,
1254 ScalarConversionOpts Opts) {
1255 // All conversions involving fixed point types should be handled by the
1256 // EmitFixedPoint family functions. This is done to prevent bloating up this
1257 // function more, and although fixed point numbers are represented by
1258 // integers, we do not want to follow any logic that assumes they should be
1259 // treated as integers.
1260 // TODO(leonardchan): When necessary, add another if statement checking for
1261 // conversions to fixed point types from other types.
1262 if (SrcType->isFixedPointType()) {
1263 if (DstType->isBooleanType())
1264 // It is important that we check this before checking if the dest type is
1265 // an integer because booleans are technically integer types.
1266 // We do not need to check the padding bit on unsigned types if unsigned
1267 // padding is enabled because overflow into this bit is undefined
1268 // behavior.
1269 return Builder.CreateIsNotNull(Src, "tobool");
1270 if (DstType->isFixedPointType() || DstType->isIntegerType() ||
1271 DstType->isRealFloatingType())
1272 return EmitFixedPointConversion(Src, SrcType, DstType, Loc);
1273
1274 llvm_unreachable(::llvm::llvm_unreachable_internal("Unhandled scalar conversion from a fixed point type to another type."
, "/build/llvm-toolchain-snapshot-13~++20210413100635+64c24f493e5f/clang/lib/CodeGen/CGExprScalar.cpp"
, 1275)
1275 "Unhandled scalar conversion from a fixed point type to another type.")::llvm::llvm_unreachable_internal("Unhandled scalar conversion from a fixed point type to another type."
, "/build/llvm-toolchain-snapshot-13~++20210413100635+64c24f493e5f/clang/lib/CodeGen/CGExprScalar.cpp"
, 1275)
;
1276 } else if (DstType->isFixedPointType()) {
1277 if (SrcType->isIntegerType() || SrcType->isRealFloatingType())
1278 // This also includes converting booleans and enums to fixed point types.
1279 return EmitFixedPointConversion(Src, SrcType, DstType, Loc);
1280
1281 llvm_unreachable(::llvm::llvm_unreachable_internal("Unhandled scalar conversion to a fixed point type from another type."
, "/build/llvm-toolchain-snapshot-13~++20210413100635+64c24f493e5f/clang/lib/CodeGen/CGExprScalar.cpp"
, 1282)
1282 "Unhandled scalar conversion to a fixed point type from another type.")::llvm::llvm_unreachable_internal("Unhandled scalar conversion to a fixed point type from another type."
, "/build/llvm-toolchain-snapshot-13~++20210413100635+64c24f493e5f/clang/lib/CodeGen/CGExprScalar.cpp"
, 1282)
;
1283 }
1284
1285 QualType NoncanonicalSrcType = SrcType;
1286 QualType NoncanonicalDstType = DstType;
1287
1288 SrcType = CGF.getContext().getCanonicalType(SrcType);
1289 DstType = CGF.getContext().getCanonicalType(DstType);
1290 if (SrcType == DstType) return Src;
1291
1292 if (DstType->isVoidType()) return nullptr;
1293
1294 llvm::Value *OrigSrc = Src;
1295 QualType OrigSrcType = SrcType;
1296 llvm::Type *SrcTy = Src->getType();
1297
1298 // Handle conversions to bool first, they are special: comparisons against 0.
1299 if (DstType->isBooleanType())
1300 return EmitConversionToBool(Src, SrcType);
1301
1302 llvm::Type *DstTy = ConvertType(DstType);
1303
1304 // Cast from half through float if half isn't a native type.
1305 if (SrcType->isHalfType() && !CGF.getContext().getLangOpts().NativeHalfType) {
1306 // Cast to FP using the intrinsic if the half type itself isn't supported.
1307 if (DstTy->isFloatingPointTy()) {
1308 if (CGF.getContext().getTargetInfo().useFP16ConversionIntrinsics())
1309 return Builder.CreateCall(
1310 CGF.CGM.getIntrinsic(llvm::Intrinsic::convert_from_fp16, DstTy),
1311 Src);
1312 } else {
1313 // Cast to other types through float, using either the intrinsic or FPExt,
1314 // depending on whether the half type itself is supported
1315 // (as opposed to operations on half, available with NativeHalfType).
1316 if (CGF.getContext().getTargetInfo().useFP16ConversionIntrinsics()) {
1317 Src = Builder.CreateCall(
1318 CGF.CGM.getIntrinsic(llvm::Intrinsic::convert_from_fp16,
1319 CGF.CGM.FloatTy),
1320 Src);
1321 } else {
1322 Src = Builder.CreateFPExt(Src, CGF.CGM.FloatTy, "conv");
1323 }
1324 SrcType = CGF.getContext().FloatTy;
1325 SrcTy = CGF.FloatTy;
1326 }
1327 }
1328
1329 // Ignore conversions like int -> uint.
1330 if (SrcTy == DstTy) {
1331 if (Opts.EmitImplicitIntegerSignChangeChecks)
1332 EmitIntegerSignChangeCheck(Src, NoncanonicalSrcType, Src,
1333 NoncanonicalDstType, Loc);
1334
1335 return Src;
1336 }
1337
1338 // Handle pointer conversions next: pointers can only be converted to/from
1339 // other pointers and integers. Check for pointer types in terms of LLVM, as
1340 // some native types (like Obj-C id) may map to a pointer type.
1341 if (auto DstPT = dyn_cast<llvm::PointerType>(DstTy)) {
1342 // The source value may be an integer, or a pointer.
1343 if (isa<llvm::PointerType>(SrcTy))
1344 return Builder.CreateBitCast(Src, DstTy, "conv");
1345
1346 assert(SrcType->isIntegerType() && "Not ptr->ptr or int->ptr conversion?")((SrcType->isIntegerType() && "Not ptr->ptr or int->ptr conversion?"
) ? static_cast<void> (0) : __assert_fail ("SrcType->isIntegerType() && \"Not ptr->ptr or int->ptr conversion?\""
, "/build/llvm-toolchain-snapshot-13~++20210413100635+64c24f493e5f/clang/lib/CodeGen/CGExprScalar.cpp"
, 1346, __PRETTY_FUNCTION__))
;
1347 // First, convert to the correct width so that we control the kind of
1348 // extension.
1349 llvm::Type *MiddleTy = CGF.CGM.getDataLayout().getIntPtrType(DstPT);
1350 bool InputSigned = SrcType->isSignedIntegerOrEnumerationType();
1351 llvm::Value* IntResult =
1352 Builder.CreateIntCast(Src, MiddleTy, InputSigned, "conv");
1353 // Then, cast to pointer.
1354 return Builder.CreateIntToPtr(IntResult, DstTy, "conv");
1355 }
1356
1357 if (isa<llvm::PointerType>(SrcTy)) {
1358 // Must be an ptr to int cast.
1359 assert(isa<llvm::IntegerType>(DstTy) && "not ptr->int?")((isa<llvm::IntegerType>(DstTy) && "not ptr->int?"
) ? static_cast<void> (0) : __assert_fail ("isa<llvm::IntegerType>(DstTy) && \"not ptr->int?\""
, "/build/llvm-toolchain-snapshot-13~++20210413100635+64c24f493e5f/clang/lib/CodeGen/CGExprScalar.cpp"
, 1359, __PRETTY_FUNCTION__))
;
1360 return Builder.CreatePtrToInt(Src, DstTy, "conv");
1361 }
1362
1363 // A scalar can be splatted to an extended vector of the same element type
1364 if (DstType->isExtVectorType() && !SrcType->isVectorType()) {
1365 // Sema should add casts to make sure that the source expression's type is
1366 // the same as the vector's element type (sans qualifiers)
1367 assert(DstType->castAs<ExtVectorType>()->getElementType().getTypePtr() ==((DstType->castAs<ExtVectorType>()->getElementType
().getTypePtr() == SrcType.getTypePtr() && "Splatted expr doesn't match with vector element type?"
) ? static_cast<void> (0) : __assert_fail ("DstType->castAs<ExtVectorType>()->getElementType().getTypePtr() == SrcType.getTypePtr() && \"Splatted expr doesn't match with vector element type?\""
, "/build/llvm-toolchain-snapshot-13~++20210413100635+64c24f493e5f/clang/lib/CodeGen/CGExprScalar.cpp"
, 1369, __PRETTY_FUNCTION__))
1368 SrcType.getTypePtr() &&((DstType->castAs<ExtVectorType>()->getElementType
().getTypePtr() == SrcType.getTypePtr() && "Splatted expr doesn't match with vector element type?"
) ? static_cast<void> (0) : __assert_fail ("DstType->castAs<ExtVectorType>()->getElementType().getTypePtr() == SrcType.getTypePtr() && \"Splatted expr doesn't match with vector element type?\""
, "/build/llvm-toolchain-snapshot-13~++20210413100635+64c24f493e5f/clang/lib/CodeGen/CGExprScalar.cpp"
, 1369, __PRETTY_FUNCTION__))
1369 "Splatted expr doesn't match with vector element type?")((DstType->castAs<ExtVectorType>()->getElementType
().getTypePtr() == SrcType.getTypePtr() && "Splatted expr doesn't match with vector element type?"
) ? static_cast<void> (0) : __assert_fail ("DstType->castAs<ExtVectorType>()->getElementType().getTypePtr() == SrcType.getTypePtr() && \"Splatted expr doesn't match with vector element type?\""
, "/build/llvm-toolchain-snapshot-13~++20210413100635+64c24f493e5f/clang/lib/CodeGen/CGExprScalar.cpp"
, 1369, __PRETTY_FUNCTION__))
;
1370
1371 // Splat the element across to all elements
1372 unsigned NumElements = cast<llvm::FixedVectorType>(DstTy)->getNumElements();
1373 return Builder.CreateVectorSplat(NumElements, Src, "splat");
1374 }
1375
1376 if (SrcType->isMatrixType() && DstType->isMatrixType())
1377 return EmitScalarCast(Src, SrcType, DstType, SrcTy, DstTy, Opts);
1378
1379 if (isa<llvm::VectorType>(SrcTy) || isa<llvm::VectorType>(DstTy)) {
1380 // Allow bitcast from vector to integer/fp of the same size.
1381 unsigned SrcSize = SrcTy->getPrimitiveSizeInBits();
1382 unsigned DstSize = DstTy->getPrimitiveSizeInBits();
1383 if (SrcSize == DstSize)
1384 return Builder.CreateBitCast(Src, DstTy, "conv");
1385
1386 // Conversions between vectors of different sizes are not allowed except
1387 // when vectors of half are involved. Operations on storage-only half
1388 // vectors require promoting half vector operands to float vectors and
1389 // truncating the result, which is either an int or float vector, to a
1390 // short or half vector.
1391
1392 // Source and destination are both expected to be vectors.
1393 llvm::Type *SrcElementTy = cast<llvm::VectorType>(SrcTy)->getElementType();
1394 llvm::Type *DstElementTy = cast<llvm::VectorType>(DstTy)->getElementType();
1395 (void)DstElementTy;
1396
1397 assert(((SrcElementTy->isIntegerTy() &&((((SrcElementTy->isIntegerTy() && DstElementTy->
isIntegerTy()) || (SrcElementTy->isFloatingPointTy() &&
DstElementTy->isFloatingPointTy())) && "unexpected conversion between a floating-point vector and an "
"integer vector") ? static_cast<void> (0) : __assert_fail
("((SrcElementTy->isIntegerTy() && DstElementTy->isIntegerTy()) || (SrcElementTy->isFloatingPointTy() && DstElementTy->isFloatingPointTy())) && \"unexpected conversion between a floating-point vector and an \" \"integer vector\""
, "/build/llvm-toolchain-snapshot-13~++20210413100635+64c24f493e5f/clang/lib/CodeGen/CGExprScalar.cpp"
, 1402, __PRETTY_FUNCTION__))
1398 DstElementTy->isIntegerTy()) ||((((SrcElementTy->isIntegerTy() && DstElementTy->
isIntegerTy()) || (SrcElementTy->isFloatingPointTy() &&
DstElementTy->isFloatingPointTy())) && "unexpected conversion between a floating-point vector and an "
"integer vector") ? static_cast<void> (0) : __assert_fail
("((SrcElementTy->isIntegerTy() && DstElementTy->isIntegerTy()) || (SrcElementTy->isFloatingPointTy() && DstElementTy->isFloatingPointTy())) && \"unexpected conversion between a floating-point vector and an \" \"integer vector\""
, "/build/llvm-toolchain-snapshot-13~++20210413100635+64c24f493e5f/clang/lib/CodeGen/CGExprScalar.cpp"
, 1402, __PRETTY_FUNCTION__))
1399 (SrcElementTy->isFloatingPointTy() &&((((SrcElementTy->isIntegerTy() && DstElementTy->
isIntegerTy()) || (SrcElementTy->isFloatingPointTy() &&
DstElementTy->isFloatingPointTy())) && "unexpected conversion between a floating-point vector and an "
"integer vector") ? static_cast<void> (0) : __assert_fail
("((SrcElementTy->isIntegerTy() && DstElementTy->isIntegerTy()) || (SrcElementTy->isFloatingPointTy() && DstElementTy->isFloatingPointTy())) && \"unexpected conversion between a floating-point vector and an \" \"integer vector\""
, "/build/llvm-toolchain-snapshot-13~++20210413100635+64c24f493e5f/clang/lib/CodeGen/CGExprScalar.cpp"
, 1402, __PRETTY_FUNCTION__))
1400 DstElementTy->isFloatingPointTy())) &&((((SrcElementTy->isIntegerTy() && DstElementTy->
isIntegerTy()) || (SrcElementTy->isFloatingPointTy() &&
DstElementTy->isFloatingPointTy())) && "unexpected conversion between a floating-point vector and an "
"integer vector") ? static_cast<void> (0) : __assert_fail
("((SrcElementTy->isIntegerTy() && DstElementTy->isIntegerTy()) || (SrcElementTy->isFloatingPointTy() && DstElementTy->isFloatingPointTy())) && \"unexpected conversion between a floating-point vector and an \" \"integer vector\""
, "/build/llvm-toolchain-snapshot-13~++20210413100635+64c24f493e5f/clang/lib/CodeGen/CGExprScalar.cpp"
, 1402, __PRETTY_FUNCTION__))
1401 "unexpected conversion between a floating-point vector and an "((((SrcElementTy->isIntegerTy() && DstElementTy->
isIntegerTy()) || (SrcElementTy->isFloatingPointTy() &&
DstElementTy->isFloatingPointTy())) && "unexpected conversion between a floating-point vector and an "
"integer vector") ? static_cast<void> (0) : __assert_fail
("((SrcElementTy->isIntegerTy() && DstElementTy->isIntegerTy()) || (SrcElementTy->isFloatingPointTy() && DstElementTy->isFloatingPointTy())) && \"unexpected conversion between a floating-point vector and an \" \"integer vector\""
, "/build/llvm-toolchain-snapshot-13~++20210413100635+64c24f493e5f/clang/lib/CodeGen/CGExprScalar.cpp"
, 1402, __PRETTY_FUNCTION__))
1402 "integer vector")((((SrcElementTy->isIntegerTy() && DstElementTy->
isIntegerTy()) || (SrcElementTy->isFloatingPointTy() &&
DstElementTy->isFloatingPointTy())) && "unexpected conversion between a floating-point vector and an "
"integer vector") ? static_cast<void> (0) : __assert_fail
("((SrcElementTy->isIntegerTy() && DstElementTy->isIntegerTy()) || (SrcElementTy->isFloatingPointTy() && DstElementTy->isFloatingPointTy())) && \"unexpected conversion between a floating-point vector and an \" \"integer vector\""
, "/build/llvm-toolchain-snapshot-13~++20210413100635+64c24f493e5f/clang/lib/CodeGen/CGExprScalar.cpp"
, 1402, __PRETTY_FUNCTION__))
;
1403
1404 // Truncate an i32 vector to an i16 vector.
1405 if (SrcElementTy->isIntegerTy())
1406 return Builder.CreateIntCast(Src, DstTy, false, "conv");
1407
1408 // Truncate a float vector to a half vector.
1409 if (SrcSize > DstSize)
1410 return Builder.CreateFPTrunc(Src, DstTy, "conv");
1411
1412 // Promote a half vector to a float vector.
1413 return Builder.CreateFPExt(Src, DstTy, "conv");
1414 }
1415
1416 // Finally, we have the arithmetic types: real int/float.
1417 Value *Res = nullptr;
1418 llvm::Type *ResTy = DstTy;
1419
1420 // An overflowing conversion has undefined behavior if either the source type
1421 // or the destination type is a floating-point type. However, we consider the
1422 // range of representable values for all floating-point types to be
1423 // [-inf,+inf], so no overflow can ever happen when the destination type is a
1424 // floating-point type.
1425 if (CGF.SanOpts.has(SanitizerKind::FloatCastOverflow) &&
1426 OrigSrcType->isFloatingType())
1427 EmitFloatConversionCheck(OrigSrc, OrigSrcType, Src, SrcType, DstType, DstTy,
1428 Loc);
1429
1430 // Cast to half through float if half isn't a native type.
1431 if (DstType->isHalfType() && !CGF.getContext().getLangOpts().NativeHalfType) {
1432 // Make sure we cast in a single step if from another FP type.
1433 if (SrcTy->isFloatingPointTy()) {
1434 // Use the intrinsic if the half type itself isn't supported
1435 // (as opposed to operations on half, available with NativeHalfType).
1436 if (CGF.getContext().getTargetInfo().useFP16ConversionIntrinsics())
1437 return Builder.CreateCall(
1438 CGF.CGM.getIntrinsic(llvm::Intrinsic::convert_to_fp16, SrcTy), Src);
1439 // If the half type is supported, just use an fptrunc.
1440 return Builder.CreateFPTrunc(Src, DstTy);
1441 }
1442 DstTy = CGF.FloatTy;
1443 }
1444
1445 Res = EmitScalarCast(Src, SrcType, DstType, SrcTy, DstTy, Opts);
1446
1447 if (DstTy != ResTy) {
1448 if (CGF.getContext().getTargetInfo().useFP16ConversionIntrinsics()) {
1449 assert(ResTy->isIntegerTy(16) && "Only half FP requires extra conversion")((ResTy->isIntegerTy(16) && "Only half FP requires extra conversion"
) ? static_cast<void> (0) : __assert_fail ("ResTy->isIntegerTy(16) && \"Only half FP requires extra conversion\""
, "/build/llvm-toolchain-snapshot-13~++20210413100635+64c24f493e5f/clang/lib/CodeGen/CGExprScalar.cpp"
, 1449, __PRETTY_FUNCTION__))
;
1450 Res = Builder.CreateCall(
1451 CGF.CGM.getIntrinsic(llvm::Intrinsic::convert_to_fp16, CGF.CGM.FloatTy),
1452 Res);
1453 } else {
1454 Res = Builder.CreateFPTrunc(Res, ResTy, "conv");
1455 }
1456 }
1457
1458 if (Opts.EmitImplicitIntegerTruncationChecks)
1459 EmitIntegerTruncationCheck(Src, NoncanonicalSrcType, Res,
1460 NoncanonicalDstType, Loc);
1461
1462 if (Opts.EmitImplicitIntegerSignChangeChecks)
1463 EmitIntegerSignChangeCheck(Src, NoncanonicalSrcType, Res,
1464 NoncanonicalDstType, Loc);
1465
1466 return Res;
1467}
1468
1469Value *ScalarExprEmitter::EmitFixedPointConversion(Value *Src, QualType SrcTy,
1470 QualType DstTy,
1471 SourceLocation Loc) {
1472 llvm::FixedPointBuilder<CGBuilderTy> FPBuilder(Builder);
1473 llvm::Value *Result;
1474 if (SrcTy->isRealFloatingType())
1475 Result = FPBuilder.CreateFloatingToFixed(Src,
1476 CGF.getContext().getFixedPointSemantics(DstTy));
1477 else if (DstTy->isRealFloatingType())
1478 Result = FPBuilder.CreateFixedToFloating(Src,
1479 CGF.getContext().getFixedPointSemantics(SrcTy),
1480 ConvertType(DstTy));
1481 else {
1482 auto SrcFPSema = CGF.getContext().getFixedPointSemantics(SrcTy);
1483 auto DstFPSema = CGF.getContext().getFixedPointSemantics(DstTy);
1484
1485 if (DstTy->isIntegerType())
1486 Result = FPBuilder.CreateFixedToInteger(Src, SrcFPSema,
1487 DstFPSema.getWidth(),
1488 DstFPSema.isSigned());
1489 else if (SrcTy->isIntegerType())
1490 Result = FPBuilder.CreateIntegerToFixed(Src, SrcFPSema.isSigned(),
1491 DstFPSema);
1492 else
1493 Result = FPBuilder.CreateFixedToFixed(Src, SrcFPSema, DstFPSema);
1494 }
1495 return Result;
1496}
1497
1498/// Emit a conversion from the specified complex type to the specified
1499/// destination type, where the destination type is an LLVM scalar type.
1500Value *ScalarExprEmitter::EmitComplexToScalarConversion(
1501 CodeGenFunction::ComplexPairTy Src, QualType SrcTy, QualType DstTy,
1502 SourceLocation Loc) {
1503 // Get the source element type.
1504 SrcTy = SrcTy->castAs<ComplexType>()->getElementType();
1505
1506 // Handle conversions to bool first, they are special: comparisons against 0.
1507 if (DstTy->isBooleanType()) {
1508 // Complex != 0 -> (Real != 0) | (Imag != 0)
1509 Src.first = EmitScalarConversion(Src.first, SrcTy, DstTy, Loc);
1510 Src.second = EmitScalarConversion(Src.second, SrcTy, DstTy, Loc);
1511 return Builder.CreateOr(Src.first, Src.second, "tobool");
1512 }
1513
1514 // C99 6.3.1.7p2: "When a value of complex type is converted to a real type,
1515 // the imaginary part of the complex value is discarded and the value of the
1516 // real part is converted according to the conversion rules for the
1517 // corresponding real type.
1518 return EmitScalarConversion(Src.first, SrcTy, DstTy, Loc);
1519}
1520
1521Value *ScalarExprEmitter::EmitNullValue(QualType Ty) {
1522 return CGF.EmitFromMemory(CGF.CGM.EmitNullConstant(Ty), Ty);
1523}
1524
1525/// Emit a sanitization check for the given "binary" operation (which
1526/// might actually be a unary increment which has been lowered to a binary
1527/// operation). The check passes if all values in \p Checks (which are \c i1),
1528/// are \c true.
1529void ScalarExprEmitter::EmitBinOpCheck(
1530 ArrayRef<std::pair<Value *, SanitizerMask>> Checks, const BinOpInfo &Info) {
1531 assert(CGF.IsSanitizerScope)((CGF.IsSanitizerScope) ? static_cast<void> (0) : __assert_fail
("CGF.IsSanitizerScope", "/build/llvm-toolchain-snapshot-13~++20210413100635+64c24f493e5f/clang/lib/CodeGen/CGExprScalar.cpp"
, 1531, __PRETTY_FUNCTION__))
;
1532 SanitizerHandler Check;
1533 SmallVector<llvm::Constant *, 4> StaticData;
1534 SmallVector<llvm::Value *, 2> DynamicData;
1535
1536 BinaryOperatorKind Opcode = Info.Opcode;
1537 if (BinaryOperator::isCompoundAssignmentOp(Opcode))
1538 Opcode = BinaryOperator::getOpForCompoundAssignment(Opcode);
1539
1540 StaticData.push_back(CGF.EmitCheckSourceLocation(Info.E->getExprLoc()));
1541 const UnaryOperator *UO = dyn_cast<UnaryOperator>(Info.E);
1542 if (UO && UO->getOpcode() == UO_Minus) {
1543 Check = SanitizerHandler::NegateOverflow;
1544 StaticData.push_back(CGF.EmitCheckTypeDescriptor(UO->getType()));
1545 DynamicData.push_back(Info.RHS);
1546 } else {
1547 if (BinaryOperator::isShiftOp(Opcode)) {
1548 // Shift LHS negative or too large, or RHS out of bounds.
1549 Check = SanitizerHandler::ShiftOutOfBounds;
1550 const BinaryOperator *BO = cast<BinaryOperator>(Info.E);
1551 StaticData.push_back(
1552 CGF.EmitCheckTypeDescriptor(BO->getLHS()->getType()));
1553 StaticData.push_back(
1554 CGF.EmitCheckTypeDescriptor(BO->getRHS()->getType()));
1555 } else if (Opcode == BO_Div || Opcode == BO_Rem) {
1556 // Divide or modulo by zero, or signed overflow (eg INT_MAX / -1).
1557 Check = SanitizerHandler::DivremOverflow;
1558 StaticData.push_back(CGF.EmitCheckTypeDescriptor(Info.Ty));
1559 } else {
1560 // Arithmetic overflow (+, -, *).
1561 switch (Opcode) {
1562 case BO_Add: Check = SanitizerHandler::AddOverflow; break;
1563 case BO_Sub: Check = SanitizerHandler::SubOverflow; break;
1564 case BO_Mul: Check = SanitizerHandler::MulOverflow; break;
1565 default: llvm_unreachable("unexpected opcode for bin op check")::llvm::llvm_unreachable_internal("unexpected opcode for bin op check"
, "/build/llvm-toolchain-snapshot-13~++20210413100635+64c24f493e5f/clang/lib/CodeGen/CGExprScalar.cpp"
, 1565)
;
1566 }
1567 StaticData.push_back(CGF.EmitCheckTypeDescriptor(Info.Ty));
1568 }
1569 DynamicData.push_back(Info.LHS);
1570 DynamicData.push_back(Info.RHS);
1571 }
1572
1573 CGF.EmitCheck(Checks, Check, StaticData, DynamicData);
1574}
1575
1576//===----------------------------------------------------------------------===//
1577// Visitor Methods
1578//===----------------------------------------------------------------------===//
1579
1580Value *ScalarExprEmitter::VisitExpr(Expr *E) {
1581 CGF.ErrorUnsupported(E, "scalar expression");
1582 if (E->getType()->isVoidType())
1583 return nullptr;
1584 return llvm::UndefValue::get(CGF.ConvertType(E->getType()));
1585}
1586
1587Value *ScalarExprEmitter::VisitShuffleVectorExpr(ShuffleVectorExpr *E) {
1588 // Vector Mask Case
1589 if (E->getNumSubExprs() == 2) {
1590 Value *LHS = CGF.EmitScalarExpr(E->getExpr(0));
1591 Value *RHS = CGF.EmitScalarExpr(E->getExpr(1));
1592 Value *Mask;
1593
1594 auto *LTy = cast<llvm::FixedVectorType>(LHS->getType());
1595 unsigned LHSElts = LTy->getNumElements();
1596
1597 Mask = RHS;
1598
1599 auto *MTy = cast<llvm::FixedVectorType>(Mask->getType());
1600
1601 // Mask off the high bits of each shuffle index.
1602 Value *MaskBits =
1603 llvm::ConstantInt::get(MTy, llvm::NextPowerOf2(LHSElts - 1) - 1);
1604 Mask = Builder.CreateAnd(Mask, MaskBits, "mask");
1605
1606 // newv = undef
1607 // mask = mask & maskbits
1608 // for each elt
1609 // n = extract mask i
1610 // x = extract val n
1611 // newv = insert newv, x, i
1612 auto *RTy = llvm::FixedVectorType::get(LTy->getElementType(),
1613 MTy->getNumElements());
1614 Value* NewV = llvm::UndefValue::get(RTy);
1615 for (unsigned i = 0, e = MTy->getNumElements(); i != e; ++i) {
1616 Value *IIndx = llvm::ConstantInt::get(CGF.SizeTy, i);
1617 Value *Indx = Builder.CreateExtractElement(Mask, IIndx, "shuf_idx");
1618
1619 Value *VExt = Builder.CreateExtractElement(LHS, Indx, "shuf_elt");
1620 NewV = Builder.CreateInsertElement(NewV, VExt, IIndx, "shuf_ins");
1621 }
1622 return NewV;
1623 }
1624
1625 Value* V1 = CGF.EmitScalarExpr(E->getExpr(0));
1626 Value* V2 = CGF.EmitScalarExpr(E->getExpr(1));
1627
1628 SmallVector<int, 32> Indices;
1629 for (unsigned i = 2; i < E->getNumSubExprs(); ++i) {
1630 llvm::APSInt Idx = E->getShuffleMaskIdx(CGF.getContext(), i-2);
1631 // Check for -1 and output it as undef in the IR.
1632 if (Idx.isSigned() && Idx.isAllOnesValue())
1633 Indices.push_back(-1);
1634 else
1635 Indices.push_back(Idx.getZExtValue());
1636 }
1637
1638 return Builder.CreateShuffleVector(V1, V2, Indices, "shuffle");
1639}
1640
1641Value *ScalarExprEmitter::VisitConvertVectorExpr(ConvertVectorExpr *E) {
1642 QualType SrcType = E->getSrcExpr()->getType(),
1643 DstType = E->getType();
1644
1645 Value *Src = CGF.EmitScalarExpr(E->getSrcExpr());
1646
1647 SrcType = CGF.getContext().getCanonicalType(SrcType);
1648 DstType = CGF.getContext().getCanonicalType(DstType);
1649 if (SrcType == DstType) return Src;
1650
1651 assert(SrcType->isVectorType() &&((SrcType->isVectorType() && "ConvertVector source type must be a vector"
) ? static_cast<void> (0) : __assert_fail ("SrcType->isVectorType() && \"ConvertVector source type must be a vector\""
, "/build/llvm-toolchain-snapshot-13~++20210413100635+64c24f493e5f/clang/lib/CodeGen/CGExprScalar.cpp"
, 1652, __PRETTY_FUNCTION__))
1652 "ConvertVector source type must be a vector")((SrcType->isVectorType() && "ConvertVector source type must be a vector"
) ? static_cast<void> (0) : __assert_fail ("SrcType->isVectorType() && \"ConvertVector source type must be a vector\""
, "/build/llvm-toolchain-snapshot-13~++20210413100635+64c24f493e5f/clang/lib/CodeGen/CGExprScalar.cpp"
, 1652, __PRETTY_FUNCTION__))
;
1653 assert(DstType->isVectorType() &&((DstType->isVectorType() && "ConvertVector destination type must be a vector"
) ? static_cast<void> (0) : __assert_fail ("DstType->isVectorType() && \"ConvertVector destination type must be a vector\""
, "/build/llvm-toolchain-snapshot-13~++20210413100635+64c24f493e5f/clang/lib/CodeGen/CGExprScalar.cpp"
, 1654, __PRETTY_FUNCTION__))
1654 "ConvertVector destination type must be a vector")((DstType->isVectorType() && "ConvertVector destination type must be a vector"
) ? static_cast<void> (0) : __assert_fail ("DstType->isVectorType() && \"ConvertVector destination type must be a vector\""
, "/build/llvm-toolchain-snapshot-13~++20210413100635+64c24f493e5f/clang/lib/CodeGen/CGExprScalar.cpp"
, 1654, __PRETTY_FUNCTION__))
;
1655
1656 llvm::Type *SrcTy = Src->getType();
1657 llvm::Type *DstTy = ConvertType(DstType);
1658
1659 // Ignore conversions like int -> uint.
1660 if (SrcTy == DstTy)
1661 return Src;
1662
1663 QualType SrcEltType = SrcType->castAs<VectorType>()->getElementType(),
1664 DstEltType = DstType->castAs<VectorType>()->getElementType();
1665
1666 assert(SrcTy->isVectorTy() &&((SrcTy->isVectorTy() && "ConvertVector source IR type must be a vector"
) ? static_cast<void> (0) : __assert_fail ("SrcTy->isVectorTy() && \"ConvertVector source IR type must be a vector\""
, "/build/llvm-toolchain-snapshot-13~++20210413100635+64c24f493e5f/clang/lib/CodeGen/CGExprScalar.cpp"
, 1667, __PRETTY_FUNCTION__))
1667 "ConvertVector source IR type must be a vector")((SrcTy->isVectorTy() && "ConvertVector source IR type must be a vector"
) ? static_cast<void> (0) : __assert_fail ("SrcTy->isVectorTy() && \"ConvertVector source IR type must be a vector\""
, "/build/llvm-toolchain-snapshot-13~++20210413100635+64c24f493e5f/clang/lib/CodeGen/CGExprScalar.cpp"
, 1667, __PRETTY_FUNCTION__))
;
1668 assert(DstTy->isVectorTy() &&((DstTy->isVectorTy() && "ConvertVector destination IR type must be a vector"
) ? static_cast<void> (0) : __assert_fail ("DstTy->isVectorTy() && \"ConvertVector destination IR type must be a vector\""
, "/build/llvm-toolchain-snapshot-13~++20210413100635+64c24f493e5f/clang/lib/CodeGen/CGExprScalar.cpp"
, 1669, __PRETTY_FUNCTION__))
1669 "ConvertVector destination IR type must be a vector")((DstTy->isVectorTy() && "ConvertVector destination IR type must be a vector"
) ? static_cast<void> (0) : __assert_fail ("DstTy->isVectorTy() && \"ConvertVector destination IR type must be a vector\""
, "/build/llvm-toolchain-snapshot-13~++20210413100635+64c24f493e5f/clang/lib/CodeGen/CGExprScalar.cpp"
, 1669, __PRETTY_FUNCTION__))
;
1670
1671 llvm::Type *SrcEltTy = cast<llvm::VectorType>(SrcTy)->getElementType(),
1672 *DstEltTy = cast<llvm::VectorType>(DstTy)->getElementType();
1673
1674 if (DstEltType->isBooleanType()) {
1675 assert((SrcEltTy->isFloatingPointTy() ||(((SrcEltTy->isFloatingPointTy() || isa<llvm::IntegerType
>(SrcEltTy)) && "Unknown boolean conversion") ? static_cast
<void> (0) : __assert_fail ("(SrcEltTy->isFloatingPointTy() || isa<llvm::IntegerType>(SrcEltTy)) && \"Unknown boolean conversion\""
, "/build/llvm-toolchain-snapshot-13~++20210413100635+64c24f493e5f/clang/lib/CodeGen/CGExprScalar.cpp"
, 1676, __PRETTY_FUNCTION__))
1676 isa<llvm::IntegerType>(SrcEltTy)) && "Unknown boolean conversion")(((SrcEltTy->isFloatingPointTy() || isa<llvm::IntegerType
>(SrcEltTy)) && "Unknown boolean conversion") ? static_cast
<void> (0) : __assert_fail ("(SrcEltTy->isFloatingPointTy() || isa<llvm::IntegerType>(SrcEltTy)) && \"Unknown boolean conversion\""
, "/build/llvm-toolchain-snapshot-13~++20210413100635+64c24f493e5f/clang/lib/CodeGen/CGExprScalar.cpp"
, 1676, __PRETTY_FUNCTION__))
;
1677
1678 llvm::Value *Zero = llvm::Constant::getNullValue(SrcTy);
1679 if (SrcEltTy->isFloatingPointTy()) {
1680 return Builder.CreateFCmpUNE(Src, Zero, "tobool");
1681 } else {
1682 return Builder.CreateICmpNE(Src, Zero, "tobool");
1683 }
1684 }
1685
1686 // We have the arithmetic types: real int/float.
1687 Value *Res = nullptr;
1688
1689 if (isa<llvm::IntegerType>(SrcEltTy)) {
1690 bool InputSigned = SrcEltType->isSignedIntegerOrEnumerationType();
1691 if (isa<llvm::IntegerType>(DstEltTy))
1692 Res = Builder.CreateIntCast(Src, DstTy, InputSigned, "conv");
1693 else if (InputSigned)
1694 Res = Builder.CreateSIToFP(Src, DstTy, "conv");
1695 else
1696 Res = Builder.CreateUIToFP(Src, DstTy, "conv");
1697 } else if (isa<llvm::IntegerType>(DstEltTy)) {
1698 assert(SrcEltTy->isFloatingPointTy() && "Unknown real conversion")((SrcEltTy->isFloatingPointTy() && "Unknown real conversion"
) ? static_cast<void> (0) : __assert_fail ("SrcEltTy->isFloatingPointTy() && \"Unknown real conversion\""
, "/build/llvm-toolchain-snapshot-13~++20210413100635+64c24f493e5f/clang/lib/CodeGen/CGExprScalar.cpp"
, 1698, __PRETTY_FUNCTION__))
;
1699 if (DstEltType->isSignedIntegerOrEnumerationType())
1700 Res = Builder.CreateFPToSI(Src, DstTy, "conv");
1701 else
1702 Res = Builder.CreateFPToUI(Src, DstTy, "conv");
1703 } else {
1704 assert(SrcEltTy->isFloatingPointTy() && DstEltTy->isFloatingPointTy() &&((SrcEltTy->isFloatingPointTy() && DstEltTy->isFloatingPointTy
() && "Unknown real conversion") ? static_cast<void
> (0) : __assert_fail ("SrcEltTy->isFloatingPointTy() && DstEltTy->isFloatingPointTy() && \"Unknown real conversion\""
, "/build/llvm-toolchain-snapshot-13~++20210413100635+64c24f493e5f/clang/lib/CodeGen/CGExprScalar.cpp"
, 1705, __PRETTY_FUNCTION__))
1705 "Unknown real conversion")((SrcEltTy->isFloatingPointTy() && DstEltTy->isFloatingPointTy
() && "Unknown real conversion") ? static_cast<void
> (0) : __assert_fail ("SrcEltTy->isFloatingPointTy() && DstEltTy->isFloatingPointTy() && \"Unknown real conversion\""
, "/build/llvm-toolchain-snapshot-13~++20210413100635+64c24f493e5f/clang/lib/CodeGen/CGExprScalar.cpp"
, 1705, __PRETTY_FUNCTION__))
;
1706 if (DstEltTy->getTypeID() < SrcEltTy->getTypeID())
1707 Res = Builder.CreateFPTrunc(Src, DstTy, "conv");
1708 else
1709 Res = Builder.CreateFPExt(Src, DstTy, "conv");
1710 }
1711
1712 return Res;
1713}
1714
1715Value *ScalarExprEmitter::VisitMemberExpr(MemberExpr *E) {
1716 if (CodeGenFunction::ConstantEmission Constant = CGF.tryEmitAsConstant(E)) {
1717 CGF.EmitIgnoredExpr(E->getBase());
1718 return CGF.emitScalarConstant(Constant, E);
1719 } else {
1720 Expr::EvalResult Result;
1721 if (E->EvaluateAsInt(Result, CGF.getContext(), Expr::SE_AllowSideEffects)) {
1722 llvm::APSInt Value = Result.Val.getInt();
1723 CGF.EmitIgnoredExpr(E->getBase());
1724 return Builder.getInt(Value);
1725 }
1726 }
1727
1728 return EmitLoadOfLValue(E);
1729}
1730
1731Value *ScalarExprEmitter::VisitArraySubscriptExpr(ArraySubscriptExpr *E) {
1732 TestAndClearIgnoreResultAssign();
1733
1734 // Emit subscript expressions in rvalue context's. For most cases, this just
1735 // loads the lvalue formed by the subscript expr. However, we have to be
1736 // careful, because the base of a vector subscript is occasionally an rvalue,
1737 // so we can't get it as an lvalue.
1738 if (!E->getBase()->getType()->isVectorType())
1739 return EmitLoadOfLValue(E);
1740
1741 // Handle the vector case. The base must be a vector, the index must be an
1742 // integer value.
1743 Value *Base = Visit(E->getBase());
1744 Value *Idx = Visit(E->getIdx());
1745 QualType IdxTy = E->getIdx()->getType();
1746
1747 if (CGF.SanOpts.has(SanitizerKind::ArrayBounds))
1748 CGF.EmitBoundsCheck(E, E->getBase(), Idx, IdxTy, /*Accessed*/true);
1749
1750 return Builder.CreateExtractElement(Base, Idx, "vecext");
1751}
1752
1753Value *ScalarExprEmitter::VisitMatrixSubscriptExpr(MatrixSubscriptExpr *E) {
1754 TestAndClearIgnoreResultAssign();
1755
1756 // Handle the vector case. The base must be a vector, the index must be an
1757 // integer value.
1758 Value *RowIdx = Visit(E->getRowIdx());
1759 Value *ColumnIdx = Visit(E->getColumnIdx());
1760 Value *Matrix = Visit(E->getBase());
1761
1762 // TODO: Should we emit bounds checks with SanitizerKind::ArrayBounds?
1763 llvm::MatrixBuilder<CGBuilderTy> MB(Builder);
1764 return MB.CreateExtractElement(
1765 Matrix, RowIdx, ColumnIdx,
1766 E->getBase()->getType()->castAs<ConstantMatrixType>()->getNumRows());
1767}
1768
1769static int getMaskElt(llvm::ShuffleVectorInst *SVI, unsigned Idx,
1770 unsigned Off) {
1771 int MV = SVI->getMaskValue(Idx);
1772 if (MV == -1)
1773 return -1;
1774 return Off + MV;
1775}
1776
1777static int getAsInt32(llvm::ConstantInt *C, llvm::Type *I32Ty) {
1778 assert(llvm::ConstantInt::isValueValidForType(I32Ty, C->getZExtValue()) &&((llvm::ConstantInt::isValueValidForType(I32Ty, C->getZExtValue
()) && "Index operand too large for shufflevector mask!"
) ? static_cast<void> (0) : __assert_fail ("llvm::ConstantInt::isValueValidForType(I32Ty, C->getZExtValue()) && \"Index operand too large for shufflevector mask!\""
, "/build/llvm-toolchain-snapshot-13~++20210413100635+64c24f493e5f/clang/lib/CodeGen/CGExprScalar.cpp"
, 1779, __PRETTY_FUNCTION__))
1779 "Index operand too large for shufflevector mask!")((llvm::ConstantInt::isValueValidForType(I32Ty, C->getZExtValue
()) && "Index operand too large for shufflevector mask!"
) ? static_cast<void> (0) : __assert_fail ("llvm::ConstantInt::isValueValidForType(I32Ty, C->getZExtValue()) && \"Index operand too large for shufflevector mask!\""
, "/build/llvm-toolchain-snapshot-13~++20210413100635+64c24f493e5f/clang/lib/CodeGen/CGExprScalar.cpp"
, 1779, __PRETTY_FUNCTION__))
;
1780 return C->getZExtValue();
1781}
1782
1783Value *ScalarExprEmitter::VisitInitListExpr(InitListExpr *E) {
1784 bool Ignore = TestAndClearIgnoreResultAssign();
1785 (void)Ignore;
1786 assert (Ignore == false && "init list ignored")((Ignore == false && "init list ignored") ? static_cast
<void> (0) : __assert_fail ("Ignore == false && \"init list ignored\""
, "/build/llvm-toolchain-snapshot-13~++20210413100635+64c24f493e5f/clang/lib/CodeGen/CGExprScalar.cpp"
, 1786, __PRETTY_FUNCTION__))
;
1787 unsigned NumInitElements = E->getNumInits();
1788
1789 if (E->hadArrayRangeDesignator())
1790 CGF.ErrorUnsupported(E, "GNU array range designator extension");
1791
1792 llvm::VectorType *VType =
1793 dyn_cast<llvm::VectorType>(ConvertType(E->getType()));
1794
1795 if (!VType) {
1796 if (NumInitElements == 0) {
1797 // C++11 value-initialization for the scalar.
1798 return EmitNullValue(E->getType());
1799 }
1800 // We have a scalar in braces. Just use the first element.
1801 return Visit(E->getInit(0));
1802 }
1803
1804 unsigned ResElts = cast<llvm::FixedVectorType>(VType)->getNumElements();
1805
1806 // Loop over initializers collecting the Value for each, and remembering
1807 // whether the source was swizzle (ExtVectorElementExpr). This will allow
1808 // us to fold the shuffle for the swizzle into the shuffle for the vector
1809 // initializer, since LLVM optimizers generally do not want to touch
1810 // shuffles.
1811 unsigned CurIdx = 0;
1812 bool VIsUndefShuffle = false;
1813 llvm::Value *V = llvm::UndefValue::get(VType);
1814 for (unsigned i = 0; i != NumInitElements; ++i) {
1815 Expr *IE = E->getInit(i);
1816 Value *Init = Visit(IE);
1817 SmallVector<int, 16> Args;
1818
1819 llvm::VectorType *VVT = dyn_cast<llvm::VectorType>(Init->getType());
1820
1821 // Handle scalar elements. If the scalar initializer is actually one
1822 // element of a different vector of the same width, use shuffle instead of
1823 // extract+insert.
1824 if (!VVT) {
1825 if (isa<ExtVectorElementExpr>(IE)) {
1826 llvm::ExtractElementInst *EI = cast<llvm::ExtractElementInst>(Init);
1827
1828 if (cast<llvm::FixedVectorType>(EI->getVectorOperandType())
1829 ->getNumElements() == ResElts) {
1830 llvm::ConstantInt *C = cast<llvm::ConstantInt>(EI->getIndexOperand());
1831 Value *LHS = nullptr, *RHS = nullptr;
1832 if (CurIdx == 0) {
1833 // insert into undef -> shuffle (src, undef)
1834 // shufflemask must use an i32
1835 Args.push_back(getAsInt32(C, CGF.Int32Ty));
1836 Args.resize(ResElts, -1);
1837
1838 LHS = EI->getVectorOperand();
1839 RHS = V;
1840 VIsUndefShuffle = true;
1841 } else if (VIsUndefShuffle) {
1842 // insert into undefshuffle && size match -> shuffle (v, src)
1843 llvm::ShuffleVectorInst *SVV = cast<llvm::ShuffleVectorInst>(V);
1844 for (unsigned j = 0; j != CurIdx; ++j)
1845 Args.push_back(getMaskElt(SVV, j, 0));
1846 Args.push_back(ResElts + C->getZExtValue());
1847 Args.resize(ResElts, -1);
1848
1849 LHS = cast<llvm::ShuffleVectorInst>(V)->getOperand(0);
1850 RHS = EI->getVectorOperand();
1851 VIsUndefShuffle = false;
1852 }
1853 if (!Args.empty()) {
1854 V = Builder.CreateShuffleVector(LHS, RHS, Args);
1855 ++CurIdx;
1856 continue;
1857 }
1858 }
1859 }
1860 V = Builder.CreateInsertElement(V, Init, Builder.getInt32(CurIdx),
1861 "vecinit");
1862 VIsUndefShuffle = false;
1863 ++CurIdx;
1864 continue;
1865 }
1866
1867 unsigned InitElts = cast<llvm::FixedVectorType>(VVT)->getNumElements();
1868
1869 // If the initializer is an ExtVecEltExpr (a swizzle), and the swizzle's
1870 // input is the same width as the vector being constructed, generate an
1871 // optimized shuffle of the swizzle input into the result.
1872 unsigned Offset = (CurIdx == 0) ? 0 : ResElts;
1873 if (isa<ExtVectorElementExpr>(IE)) {
1874 llvm::ShuffleVectorInst *SVI = cast<llvm::ShuffleVectorInst>(Init);
1875 Value *SVOp = SVI->getOperand(0);
1876 auto *OpTy = cast<llvm::FixedVectorType>(SVOp->getType());
1877
1878 if (OpTy->getNumElements() == ResElts) {
1879 for (unsigned j = 0; j != CurIdx; ++j) {
1880 // If the current vector initializer is a shuffle with undef, merge
1881 // this shuffle directly into it.
1882 if (VIsUndefShuffle) {
1883 Args.push_back(getMaskElt(cast<llvm::ShuffleVectorInst>(V), j, 0));
1884 } else {
1885 Args.push_back(j);
1886 }
1887 }
1888 for (unsigned j = 0, je = InitElts; j != je; ++j)
1889 Args.push_back(getMaskElt(SVI, j, Offset));
1890 Args.resize(ResElts, -1);
1891
1892 if (VIsUndefShuffle)
1893 V = cast<llvm::ShuffleVectorInst>(V)->getOperand(0);
1894
1895 Init = SVOp;
1896 }
1897 }
1898
1899 // Extend init to result vector length, and then shuffle its contribution
1900 // to the vector initializer into V.
1901 if (Args.empty()) {
1902 for (unsigned j = 0; j != InitElts; ++j)
1903 Args.push_back(j);
1904 Args.resize(ResElts, -1);
1905 Init = Builder.CreateShuffleVector(Init, Args, "vext");
1906
1907 Args.clear();
1908 for (unsigned j = 0; j != CurIdx; ++j)
1909 Args.push_back(j);
1910 for (unsigned j = 0; j != InitElts; ++j)
1911 Args.push_back(j + Offset);
1912 Args.resize(ResElts, -1);
1913 }
1914
1915 // If V is undef, make sure it ends up on the RHS of the shuffle to aid
1916 // merging subsequent shuffles into this one.
1917 if (CurIdx == 0)
1918 std::swap(V, Init);
1919 V = Builder.CreateShuffleVector(V, Init, Args, "vecinit");
1920 VIsUndefShuffle = isa<llvm::UndefValue>(Init);
1921 CurIdx += InitElts;
1922 }
1923
1924 // FIXME: evaluate codegen vs. shuffling against constant null vector.
1925 // Emit remaining default initializers.
1926 llvm::Type *EltTy = VType->getElementType();
1927
1928 // Emit remaining default initializers
1929 for (/* Do not initialize i*/; CurIdx < ResElts; ++CurIdx) {
1930 Value *Idx = Builder.getInt32(CurIdx);
1931 llvm::Value *Init = llvm::Constant::getNullValue(EltTy);
1932 V = Builder.CreateInsertElement(V, Init, Idx, "vecinit");
1933 }
1934 return V;
1935}
1936
1937bool CodeGenFunction::ShouldNullCheckClassCastValue(const CastExpr *CE) {
1938 const Expr *E = CE->getSubExpr();
1939
1940 if (CE->getCastKind() == CK_UncheckedDerivedToBase)
1941 return false;
1942
1943 if (isa<CXXThisExpr>(E->IgnoreParens())) {
1944 // We always assume that 'this' is never null.
1945 return false;
1946 }
1947
1948 if (const ImplicitCastExpr *ICE = dyn_cast<ImplicitCastExpr>(CE)) {
1949 // And that glvalue casts are never null.
1950 if (ICE->getValueKind() != VK_RValue)
1951 return false;
1952 }
1953
1954 return true;
1955}
1956
1957// VisitCastExpr - Emit code for an explicit or implicit cast. Implicit casts
1958// have to handle a more broad range of conversions than explicit casts, as they
1959// handle things like function to ptr-to-function decay etc.
1960Value *ScalarExprEmitter::VisitCastExpr(CastExpr *CE) {
1961 Expr *E = CE->getSubExpr();
1962 QualType DestTy = CE->getType();
1963 CastKind Kind = CE->getCastKind();
1964
1965 // These cases are generally not written to ignore the result of
1966 // evaluating their sub-expressions, so we clear this now.
1967 bool Ignored = TestAndClearIgnoreResultAssign();
1968
1969 // Since almost all cast kinds apply to scalars, this switch doesn't have
1970 // a default case, so the compiler will warn on a missing case. The cases
1971 // are in the same order as in the CastKind enum.
1972 switch (Kind) {
1973 case CK_Dependent: llvm_unreachable("dependent cast kind in IR gen!")::llvm::llvm_unreachable_internal("dependent cast kind in IR gen!"
, "/build/llvm-toolchain-snapshot-13~++20210413100635+64c24f493e5f/clang/lib/CodeGen/CGExprScalar.cpp"
, 1973)
;
1974 case CK_BuiltinFnToFnPtr:
1975 llvm_unreachable("builtin functions are handled elsewhere")::llvm::llvm_unreachable_internal("builtin functions are handled elsewhere"
, "/build/llvm-toolchain-snapshot-13~++20210413100635+64c24f493e5f/clang/lib/CodeGen/CGExprScalar.cpp"
, 1975)
;
1976
1977 case CK_LValueBitCast:
1978 case CK_ObjCObjectLValueCast: {
1979 Address Addr = EmitLValue(E).getAddress(CGF);
1980 Addr = Builder.CreateElementBitCast(Addr, CGF.ConvertTypeForMem(DestTy));
1981 LValue LV = CGF.MakeAddrLValue(Addr, DestTy);
1982 return EmitLoadOfLValue(LV, CE->getExprLoc());
1983 }
1984
1985 case CK_LValueToRValueBitCast: {
1986 LValue SourceLVal = CGF.EmitLValue(E);
1987 Address Addr = Builder.CreateElementBitCast(SourceLVal.getAddress(CGF),
1988 CGF.ConvertTypeForMem(DestTy));
1989 LValue DestLV = CGF.MakeAddrLValue(Addr, DestTy);
1990 DestLV.setTBAAInfo(TBAAAccessInfo::getMayAliasInfo());
1991 return EmitLoadOfLValue(DestLV, CE->getExprLoc());
1992 }
1993
1994 case CK_CPointerToObjCPointerCast:
1995 case CK_BlockPointerToObjCPointerCast:
1996 case CK_AnyPointerToBlockPointerCast:
1997 case CK_BitCast: {
1998 Value *Src = Visit(const_cast<Expr*>(E));
1999 llvm::Type *SrcTy = Src->getType();
2000 llvm::Type *DstTy = ConvertType(DestTy);
2001 if (SrcTy->isPtrOrPtrVectorTy() && DstTy->isPtrOrPtrVectorTy() &&
2002 SrcTy->getPointerAddressSpace() != DstTy->getPointerAddressSpace()) {
2003 llvm_unreachable("wrong cast for pointers in different address spaces"::llvm::llvm_unreachable_internal("wrong cast for pointers in different address spaces"
"(must be an address space cast)!", "/build/llvm-toolchain-snapshot-13~++20210413100635+64c24f493e5f/clang/lib/CodeGen/CGExprScalar.cpp"
, 2004)
2004 "(must be an address space cast)!")::llvm::llvm_unreachable_internal("wrong cast for pointers in different address spaces"
"(must be an address space cast)!", "/build/llvm-toolchain-snapshot-13~++20210413100635+64c24f493e5f/clang/lib/CodeGen/CGExprScalar.cpp"
, 2004)
;
2005 }
2006
2007 if (CGF.SanOpts.has(SanitizerKind::CFIUnrelatedCast)) {
2008 if (auto PT = DestTy->getAs<PointerType>())
2009 CGF.EmitVTablePtrCheckForCast(PT->getPointeeType(), Src,
2010 /*MayBeNull=*/true,
2011 CodeGenFunction::CFITCK_UnrelatedCast,
2012 CE->getBeginLoc());
2013 }
2014
2015 if (CGF.CGM.getCodeGenOpts().StrictVTablePointers) {
2016 const QualType SrcType = E->getType();
2017
2018 if (SrcType.mayBeNotDynamicClass() && DestTy.mayBeDynamicClass()) {
2019 // Casting to pointer that could carry dynamic information (provided by
2020 // invariant.group) requires launder.
2021 Src = Builder.CreateLaunderInvariantGroup(Src);
2022 } else if (SrcType.mayBeDynamicClass() && DestTy.mayBeNotDynamicClass()) {
2023 // Casting to pointer that does not carry dynamic information (provided
2024 // by invariant.group) requires stripping it. Note that we don't do it
2025 // if the source could not be dynamic type and destination could be
2026 // dynamic because dynamic information is already laundered. It is
2027 // because launder(strip(src)) == launder(src), so there is no need to
2028 // add extra strip before launder.
2029 Src = Builder.CreateStripInvariantGroup(Src);
2030 }
2031 }
2032
2033 // Update heapallocsite metadata when there is an explicit pointer cast.
2034 if (auto *CI = dyn_cast<llvm::CallBase>(Src)) {
2035 if (CI->getMetadata("heapallocsite") && isa<ExplicitCastExpr>(CE)) {
2036 QualType PointeeType = DestTy->getPointeeType();
2037 if (!PointeeType.isNull())
2038 CGF.getDebugInfo()->addHeapAllocSiteMetadata(CI, PointeeType,
2039 CE->getExprLoc());
2040 }
2041 }
2042
2043 // If Src is a fixed vector and Dst is a scalable vector, and both have the
2044 // same element type, use the llvm.experimental.vector.insert intrinsic to
2045 // perform the bitcast.
2046 if (const auto *FixedSrc = dyn_cast<llvm::FixedVectorType>(SrcTy)) {
2047 if (const auto *ScalableDst = dyn_cast<llvm::ScalableVectorType>(DstTy)) {
2048 if (FixedSrc->getElementType() == ScalableDst->getElementType()) {
2049 llvm::Value *UndefVec = llvm::UndefValue::get(DstTy);
2050 llvm::Value *Zero = llvm::Constant::getNullValue(CGF.CGM.Int64Ty);
2051 return Builder.CreateInsertVector(DstTy, UndefVec, Src, Zero,
2052 "castScalableSve");
2053 }
2054 }
2055 }
2056
2057 // If Src is a scalable vector and Dst is a fixed vector, and both have the
2058 // same element type, use the llvm.experimental.vector.extract intrinsic to
2059 // perform the bitcast.
2060 if (const auto *ScalableSrc = dyn_cast<llvm::ScalableVectorType>(SrcTy)) {
2061 if (const auto *FixedDst = dyn_cast<llvm::FixedVectorType>(DstTy)) {
2062 if (ScalableSrc->getElementType() == FixedDst->getElementType()) {
2063 llvm::Value *Zero = llvm::Constant::getNullValue(CGF.CGM.Int64Ty);
2064 return Builder.CreateExtractVector(DstTy, Src, Zero, "castFixedSve");
2065 }
2066 }
2067 }
2068
2069 // Perform VLAT <-> VLST bitcast through memory.
2070 // TODO: since the llvm.experimental.vector.{insert,extract} intrinsics
2071 // require the element types of the vectors to be the same, we
2072 // need to keep this around for casting between predicates, or more
2073 // generally for bitcasts between VLAT <-> VLST where the element
2074 // types of the vectors are not the same, until we figure out a better
2075 // way of doing these casts.
2076 if ((isa<llvm::FixedVectorType>(SrcTy) &&
2077 isa<llvm::ScalableVectorType>(DstTy)) ||
2078 (isa<llvm::ScalableVectorType>(SrcTy) &&
2079 isa<llvm::FixedVectorType>(DstTy))) {
2080 if (const CallExpr *CE = dyn_cast<CallExpr>(E)) {
2081 // Call expressions can't have a scalar return unless the return type
2082 // is a reference type so an lvalue can't be emitted. Create a temp
2083 // alloca to store the call, bitcast the address then load.
2084 QualType RetTy = CE->getCallReturnType(CGF.getContext());
2085 Address Addr =
2086 CGF.CreateDefaultAlignTempAlloca(SrcTy, "saved-call-rvalue");
2087 LValue LV = CGF.MakeAddrLValue(Addr, RetTy);
2088 CGF.EmitStoreOfScalar(Src, LV);
2089 Addr = Builder.CreateElementBitCast(Addr, CGF.ConvertTypeForMem(DestTy),
2090 "castFixedSve");
2091 LValue DestLV = CGF.MakeAddrLValue(Addr, DestTy);
2092 DestLV.setTBAAInfo(TBAAAccessInfo::getMayAliasInfo());
2093 return EmitLoadOfLValue(DestLV, CE->getExprLoc());
2094 }
2095
2096 Address Addr = EmitLValue(E).getAddress(CGF);
2097 Addr = Builder.CreateElementBitCast(Addr, CGF.ConvertTypeForMem(DestTy));
2098 LValue DestLV = CGF.MakeAddrLValue(Addr, DestTy);
2099 DestLV.setTBAAInfo(TBAAAccessInfo::getMayAliasInfo());
2100 return EmitLoadOfLValue(DestLV, CE->getExprLoc());
2101 }
2102
2103 return Builder.CreateBitCast(Src, DstTy);
2104 }
2105 case CK_AddressSpaceConversion: {
2106 Expr::EvalResult Result;
2107 if (E->EvaluateAsRValue(Result, CGF.getContext()) &&
2108 Result.Val.isNullPointer()) {
2109 // If E has side effect, it is emitted even if its final result is a
2110 // null pointer. In that case, a DCE pass should be able to
2111 // eliminate the useless instructions emitted during translating E.
2112 if (Result.HasSideEffects)
2113 Visit(E);
2114 return CGF.CGM.getNullPointer(cast<llvm::PointerType>(
2115 ConvertType(DestTy)), DestTy);
2116 }
2117 // Since target may map different address spaces in AST to the same address
2118 // space, an address space conversion may end up as a bitcast.
2119 return CGF.CGM.getTargetCodeGenInfo().performAddrSpaceCast(
2120 CGF, Visit(E), E->getType()->getPointeeType().getAddressSpace(),
2121 DestTy->getPointeeType().getAddressSpace(), ConvertType(DestTy));
2122 }
2123 case CK_AtomicToNonAtomic:
2124 case CK_NonAtomicToAtomic:
2125 case CK_NoOp:
2126 case CK_UserDefinedConversion:
2127 return Visit(const_cast<Expr*>(E));
2128
2129 case CK_BaseToDerived: {
2130 const CXXRecordDecl *DerivedClassDecl = DestTy->getPointeeCXXRecordDecl();
2131 assert(DerivedClassDecl && "BaseToDerived arg isn't a C++ object pointer!")((DerivedClassDecl && "BaseToDerived arg isn't a C++ object pointer!"
) ? static_cast<void> (0) : __assert_fail ("DerivedClassDecl && \"BaseToDerived arg isn't a C++ object pointer!\""
, "/build/llvm-toolchain-snapshot-13~++20210413100635+64c24f493e5f/clang/lib/CodeGen/CGExprScalar.cpp"
, 2131, __PRETTY_FUNCTION__))
;
2132
2133 Address Base = CGF.EmitPointerWithAlignment(E);
2134 Address Derived =
2135 CGF.GetAddressOfDerivedClass(Base, DerivedClassDecl,
2136 CE->path_begin(), CE->path_end(),
2137 CGF.ShouldNullCheckClassCastValue(CE));
2138
2139 // C++11 [expr.static.cast]p11: Behavior is undefined if a downcast is
2140 // performed and the object is not of the derived type.
2141 if (CGF.sanitizePerformTypeCheck())
2142 CGF.EmitTypeCheck(CodeGenFunction::TCK_DowncastPointer, CE->getExprLoc(),
2143 Derived.getPointer(), DestTy->getPointeeType());
2144
2145 if (CGF.SanOpts.has(SanitizerKind::CFIDerivedCast))
2146 CGF.EmitVTablePtrCheckForCast(
2147 DestTy->getPointeeType(), Derived.getPointer(),
2148 /*MayBeNull=*/true, CodeGenFunction::CFITCK_DerivedCast,
2149 CE->getBeginLoc());
2150
2151 return Derived.getPointer();
2152 }
2153 case CK_UncheckedDerivedToBase:
2154 case CK_DerivedToBase: {
2155 // The EmitPointerWithAlignment path does this fine; just discard
2156 // the alignment.
2157 return CGF.EmitPointerWithAlignment(CE).getPointer();
2158 }
2159
2160 case CK_Dynamic: {
2161 Address V = CGF.EmitPointerWithAlignment(E);
2162 const CXXDynamicCastExpr *DCE = cast<CXXDynamicCastExpr>(CE);
2163 return CGF.EmitDynamicCast(V, DCE);
2164 }
2165
2166 case CK_ArrayToPointerDecay:
2167 return CGF.EmitArrayToPointerDecay(E).getPointer();
2168 case CK_FunctionToPointerDecay:
2169 return EmitLValue(E).getPointer(CGF);
2170
2171 case CK_NullToPointer:
2172 if (MustVisitNullValue(E))
2173 CGF.EmitIgnoredExpr(E);
2174
2175 return CGF.CGM.getNullPointer(cast<llvm::PointerType>(ConvertType(DestTy)),
2176 DestTy);
2177
2178 case CK_NullToMemberPointer: {
2179 if (MustVisitNullValue(E))
2180 CGF.EmitIgnoredExpr(E);
2181
2182 const MemberPointerType *MPT = CE->getType()->getAs<MemberPointerType>();
2183 return CGF.CGM.getCXXABI().EmitNullMemberPointer(MPT);
2184 }
2185
2186 case CK_ReinterpretMemberPointer:
2187 case CK_BaseToDerivedMemberPointer:
2188 case CK_DerivedToBaseMemberPointer: {
2189 Value *Src = Visit(E);
2190
2191 // Note that the AST doesn't distinguish between checked and
2192 // unchecked member pointer conversions, so we always have to
2193 // implement checked conversions here. This is inefficient when
2194 // actual control flow may be required in order to perform the
2195 // check, which it is for data member pointers (but not member
2196 // function pointers on Itanium and ARM).
2197 return CGF.CGM.getCXXABI().EmitMemberPointerConversion(CGF, CE, Src);
2198 }
2199
2200 case CK_ARCProduceObject:
2201 return CGF.EmitARCRetainScalarExpr(E);
2202 case CK_ARCConsumeObject:
2203 return CGF.EmitObjCConsumeObject(E->getType(), Visit(E));
2204 case CK_ARCReclaimReturnedObject:
2205 return CGF.EmitARCReclaimReturnedObject(E, /*allowUnsafe*/ Ignored);
2206 case CK_ARCExtendBlockObject:
2207 return CGF.EmitARCExtendBlockObject(E);
2208
2209 case CK_CopyAndAutoreleaseBlockObject:
2210 return CGF.EmitBlockCopyAndAutorelease(Visit(E), E->getType());
2211
2212 case CK_FloatingRealToComplex:
2213 case CK_FloatingComplexCast:
2214 case CK_IntegralRealToComplex:
2215 case CK_IntegralComplexCast:
2216 case CK_IntegralComplexToFloatingComplex:
2217 case CK_FloatingComplexToIntegralComplex:
2218 case CK_ConstructorConversion:
2219 case CK_ToUnion:
2220 llvm_unreachable("scalar cast to non-scalar value")::llvm::llvm_unreachable_internal("scalar cast to non-scalar value"
, "/build/llvm-toolchain-snapshot-13~++20210413100635+64c24f493e5f/clang/lib/CodeGen/CGExprScalar.cpp"
, 2220)
;
2221
2222 case CK_LValueToRValue:
2223 assert(CGF.getContext().hasSameUnqualifiedType(E->getType(), DestTy))((CGF.getContext().hasSameUnqualifiedType(E->getType(), DestTy
)) ? static_cast<void> (0) : __assert_fail ("CGF.getContext().hasSameUnqualifiedType(E->getType(), DestTy)"
, "/build/llvm-toolchain-snapshot-13~++20210413100635+64c24f493e5f/clang/lib/CodeGen/CGExprScalar.cpp"
, 2223, __PRETTY_FUNCTION__))
;
2224 assert(E->isGLValue() && "lvalue-to-rvalue applied to r-value!")((E->isGLValue() && "lvalue-to-rvalue applied to r-value!"
) ? static_cast<void> (0) : __assert_fail ("E->isGLValue() && \"lvalue-to-rvalue applied to r-value!\""
, "/build/llvm-toolchain-snapshot-13~++20210413100635+64c24f493e5f/clang/lib/CodeGen/CGExprScalar.cpp"
, 2224, __PRETTY_FUNCTION__))
;
2225 return Visit(const_cast<Expr*>(E));
2226
2227 case CK_IntegralToPointer: {
2228 Value *Src = Visit(const_cast<Expr*>(E));
2229
2230 // First, convert to the correct width so that we control the kind of
2231 // extension.
2232 auto DestLLVMTy = ConvertType(DestTy);
2233 llvm::Type *MiddleTy = CGF.CGM.getDataLayout().getIntPtrType(DestLLVMTy);
2234 bool InputSigned = E->getType()->isSignedIntegerOrEnumerationType();
2235 llvm::Value* IntResult =
2236 Builder.CreateIntCast(Src, MiddleTy, InputSigned, "conv");
2237
2238 auto *IntToPtr = Builder.CreateIntToPtr(IntResult, DestLLVMTy);
2239
2240 if (CGF.CGM.getCodeGenOpts().StrictVTablePointers) {
2241 // Going from integer to pointer that could be dynamic requires reloading
2242 // dynamic information from invariant.group.
2243 if (DestTy.mayBeDynamicClass())
2244 IntToPtr = Builder.CreateLaunderInvariantGroup(IntToPtr);
2245 }
2246 return IntToPtr;
2247 }
2248 case CK_PointerToIntegral: {
2249 assert(!DestTy->isBooleanType() && "bool should use PointerToBool")((!DestTy->isBooleanType() && "bool should use PointerToBool"
) ? static_cast<void> (0) : __assert_fail ("!DestTy->isBooleanType() && \"bool should use PointerToBool\""
, "/build/llvm-toolchain-snapshot-13~++20210413100635+64c24f493e5f/clang/lib/CodeGen/CGExprScalar.cpp"
, 2249, __PRETTY_FUNCTION__))
;
2250 auto *PtrExpr = Visit(E);
2251
2252 if (CGF.CGM.getCodeGenOpts().StrictVTablePointers) {
2253 const QualType SrcType = E->getType();
2254
2255 // Casting to integer requires stripping dynamic information as it does
2256 // not carries it.
2257 if (SrcType.mayBeDynamicClass())
2258 PtrExpr = Builder.CreateStripInvariantGroup(PtrExpr);
2259 }
2260
2261 return Builder.CreatePtrToInt(PtrExpr, ConvertType(DestTy));
2262 }
2263 case CK_ToVoid: {
2264 CGF.EmitIgnoredExpr(E);
2265 return nullptr;
2266 }
2267 case CK_MatrixCast: {
2268 return EmitScalarConversion(Visit(E), E->getType(), DestTy,
2269 CE->getExprLoc());
2270 }
2271 case CK_VectorSplat: {
2272 llvm::Type *DstTy = ConvertType(DestTy);
2273 Value *Elt = Visit(const_cast<Expr*>(E));
2274 // Splat the element across to all elements
2275 unsigned NumElements = cast<llvm::FixedVectorType>(DstTy)->getNumElements();
2276 return Builder.CreateVectorSplat(NumElements, Elt, "splat");
2277 }
2278
2279 case CK_FixedPointCast:
2280 return EmitScalarConversion(Visit(E), E->getType(), DestTy,
2281 CE->getExprLoc());
2282
2283 case CK_FixedPointToBoolean:
2284 assert(E->getType()->isFixedPointType() &&((E->getType()->isFixedPointType() && "Expected src type to be fixed point type"
) ? static_cast<void> (0) : __assert_fail ("E->getType()->isFixedPointType() && \"Expected src type to be fixed point type\""
, "/build/llvm-toolchain-snapshot-13~++20210413100635+64c24f493e5f/clang/lib/CodeGen/CGExprScalar.cpp"
, 2285, __PRETTY_FUNCTION__))
2285 "Expected src type to be fixed point type")((E->getType()->isFixedPointType() && "Expected src type to be fixed point type"
) ? static_cast<void> (0) : __assert_fail ("E->getType()->isFixedPointType() && \"Expected src type to be fixed point type\""
, "/build/llvm-toolchain-snapshot-13~++20210413100635+64c24f493e5f/clang/lib/CodeGen/CGExprScalar.cpp"
, 2285, __PRETTY_FUNCTION__))
;
2286 assert(DestTy->isBooleanType() && "Expected dest type to be boolean type")((DestTy->isBooleanType() && "Expected dest type to be boolean type"
) ? static_cast<void> (0) : __assert_fail ("DestTy->isBooleanType() && \"Expected dest type to be boolean type\""
, "/build/llvm-toolchain-snapshot-13~++20210413100635+64c24f493e5f/clang/lib/CodeGen/CGExprScalar.cpp"
, 2286, __PRETTY_FUNCTION__))
;
2287 return EmitScalarConversion(Visit(E), E->getType(), DestTy,
2288 CE->getExprLoc());
2289
2290 case CK_FixedPointToIntegral:
2291 assert(E->getType()->isFixedPointType() &&((E->getType()->isFixedPointType() && "Expected src type to be fixed point type"
) ? static_cast<void> (0) : __assert_fail ("E->getType()->isFixedPointType() && \"Expected src type to be fixed point type\""
, "/build/llvm-toolchain-snapshot-13~++20210413100635+64c24f493e5f/clang/lib/CodeGen/CGExprScalar.cpp"
, 2292, __PRETTY_FUNCTION__))
2292 "Expected src type to be fixed point type")((E->getType()->isFixedPointType() && "Expected src type to be fixed point type"
) ? static_cast<void> (0) : __assert_fail ("E->getType()->isFixedPointType() && \"Expected src type to be fixed point type\""
, "/build/llvm-toolchain-snapshot-13~++20210413100635+64c24f493e5f/clang/lib/CodeGen/CGExprScalar.cpp"
, 2292, __PRETTY_FUNCTION__))
;
2293 assert(DestTy->isIntegerType() && "Expected dest type to be an integer")((DestTy->isIntegerType() && "Expected dest type to be an integer"
) ? static_cast<void> (0) : __assert_fail ("DestTy->isIntegerType() && \"Expected dest type to be an integer\""
, "/build/llvm-toolchain-snapshot-13~++20210413100635+64c24f493e5f/clang/lib/CodeGen/CGExprScalar.cpp"
, 2293, __PRETTY_FUNCTION__))
;
2294 return EmitScalarConversion(Visit(E), E->getType(), DestTy,
2295 CE->getExprLoc());
2296
2297 case CK_IntegralToFixedPoint:
2298 assert(E->getType()->isIntegerType() &&((E->getType()->isIntegerType() && "Expected src type to be an integer"
) ? static_cast<void> (0) : __assert_fail ("E->getType()->isIntegerType() && \"Expected src type to be an integer\""
, "/build/llvm-toolchain-snapshot-13~++20210413100635+64c24f493e5f/clang/lib/CodeGen/CGExprScalar.cpp"
, 2299, __PRETTY_FUNCTION__))
2299 "Expected src type to be an integer")((E->getType()->isIntegerType() && "Expected src type to be an integer"
) ? static_cast<void> (0) : __assert_fail ("E->getType()->isIntegerType() && \"Expected src type to be an integer\""
, "/build/llvm-toolchain-snapshot-13~++20210413100635+64c24f493e5f/clang/lib/CodeGen/CGExprScalar.cpp"
, 2299, __PRETTY_FUNCTION__))
;
2300 assert(DestTy->isFixedPointType() &&((DestTy->isFixedPointType() && "Expected dest type to be fixed point type"
) ? static_cast<void> (0) : __assert_fail ("DestTy->isFixedPointType() && \"Expected dest type to be fixed point type\""
, "/build/llvm-toolchain-snapshot-13~++20210413100635+64c24f493e5f/clang/lib/CodeGen/CGExprScalar.cpp"
, 2301, __PRETTY_FUNCTION__))
2301 "Expected dest type to be fixed point type")((DestTy->isFixedPointType() && "Expected dest type to be fixed point type"
) ? static_cast<void> (0) : __assert_fail ("DestTy->isFixedPointType() && \"Expected dest type to be fixed point type\""
, "/build/llvm-toolchain-snapshot-13~++20210413100635+64c24f493e5f/clang/lib/CodeGen/CGExprScalar.cpp"
, 2301, __PRETTY_FUNCTION__))
;
2302 return EmitScalarConversion(Visit(E), E->getType(), DestTy,
2303 CE->getExprLoc());
2304
2305 case CK_IntegralCast: {
2306 ScalarConversionOpts Opts;
2307 if (auto *ICE = dyn_cast<ImplicitCastExpr>(CE)) {
2308 if (!ICE->isPartOfExplicitCast())
2309 Opts = ScalarConversionOpts(CGF.SanOpts);
2310 }
2311 return EmitScalarConversion(Visit(E), E->getType(), DestTy,
2312 CE->getExprLoc(), Opts);
2313 }
2314 case CK_IntegralToFloating:
2315 case CK_FloatingToIntegral:
2316 case CK_FloatingCast:
2317 case CK_FixedPointToFloating:
2318 case CK_FloatingToFixedPoint: {
2319 CodeGenFunction::CGFPOptionsRAII FPOptsRAII(CGF, CE);
2320 return EmitScalarConversion(Visit(E), E->getType(), DestTy,
2321 CE->getExprLoc());
2322 }
2323 case CK_BooleanToSignedIntegral: {
2324 ScalarConversionOpts Opts;
2325 Opts.TreatBooleanAsSigned = true;
2326 return EmitScalarConversion(Visit(E), E->getType(), DestTy,
2327 CE->getExprLoc(), Opts);
2328 }
2329 case CK_IntegralToBoolean:
2330 return EmitIntToBoolConversion(Visit(E));
2331 case CK_PointerToBoolean:
2332 return EmitPointerToBoolConversion(Visit(E), E->getType());
2333 case CK_FloatingToBoolean: {
2334 CodeGenFunction::CGFPOptionsRAII FPOptsRAII(CGF, CE);
2335 return EmitFloatToBoolConversion(Visit(E));
2336 }
2337 case CK_MemberPointerToBoolean: {
2338 llvm::Value *MemPtr = Visit(E);
2339 const MemberPointerType *MPT = E->getType()->getAs<MemberPointerType>();
2340 return CGF.CGM.getCXXABI().EmitMemberPointerIsNotNull(CGF, MemPtr, MPT);
2341 }
2342
2343 case CK_FloatingComplexToReal:
2344 case CK_IntegralComplexToReal:
2345 return CGF.EmitComplexExpr(E, false, true).first;
2346
2347 case CK_FloatingComplexToBoolean:
2348 case CK_IntegralComplexToBoolean: {
2349 CodeGenFunction::ComplexPairTy V = CGF.EmitComplexExpr(E);
2350
2351 // TODO: kill this function off, inline appropriate case here
2352 return EmitComplexToScalarConversion(V, E->getType(), DestTy,
2353 CE->getExprLoc());
2354 }
2355
2356 case CK_ZeroToOCLOpaqueType: {
2357 assert((DestTy->isEventT() || DestTy->isQueueT() ||(((DestTy->isEventT() || DestTy->isQueueT() || DestTy->
isOCLIntelSubgroupAVCType()) && "CK_ZeroToOCLEvent cast on non-event type"
) ? static_cast<void> (0) : __assert_fail ("(DestTy->isEventT() || DestTy->isQueueT() || DestTy->isOCLIntelSubgroupAVCType()) && \"CK_ZeroToOCLEvent cast on non-event type\""
, "/build/llvm-toolchain-snapshot-13~++20210413100635+64c24f493e5f/clang/lib/CodeGen/CGExprScalar.cpp"
, 2359, __PRETTY_FUNCTION__))
2358 DestTy->isOCLIntelSubgroupAVCType()) &&(((DestTy->isEventT() || DestTy->isQueueT() || DestTy->
isOCLIntelSubgroupAVCType()) && "CK_ZeroToOCLEvent cast on non-event type"
) ? static_cast<void> (0) : __assert_fail ("(DestTy->isEventT() || DestTy->isQueueT() || DestTy->isOCLIntelSubgroupAVCType()) && \"CK_ZeroToOCLEvent cast on non-event type\""
, "/build/llvm-toolchain-snapshot-13~++20210413100635+64c24f493e5f/clang/lib/CodeGen/CGExprScalar.cpp"
, 2359, __PRETTY_FUNCTION__))
2359 "CK_ZeroToOCLEvent cast on non-event type")(((DestTy->isEventT() || DestTy->isQueueT() || DestTy->
isOCLIntelSubgroupAVCType()) && "CK_ZeroToOCLEvent cast on non-event type"
) ? static_cast<void> (0) : __assert_fail ("(DestTy->isEventT() || DestTy->isQueueT() || DestTy->isOCLIntelSubgroupAVCType()) && \"CK_ZeroToOCLEvent cast on non-event type\""
, "/build/llvm-toolchain-snapshot-13~++20210413100635+64c24f493e5f/clang/lib/CodeGen/CGExprScalar.cpp"
, 2359, __PRETTY_FUNCTION__))
;
2360 return llvm::Constant::getNullValue(ConvertType(DestTy));
2361 }
2362
2363 case CK_IntToOCLSampler:
2364 return CGF.CGM.createOpenCLIntToSamplerConversion(E, CGF);
2365
2366 } // end of switch
2367
2368 llvm_unreachable("unknown scalar cast")::llvm::llvm_unreachable_internal("unknown scalar cast", "/build/llvm-toolchain-snapshot-13~++20210413100635+64c24f493e5f/clang/lib/CodeGen/CGExprScalar.cpp"
, 2368)
;
2369}
2370
2371Value *ScalarExprEmitter::VisitStmtExpr(const StmtExpr *E) {
2372 CodeGenFunction::StmtExprEvaluation eval(CGF);
2373 Address RetAlloca = CGF.EmitCompoundStmt(*E->getSubStmt(),
2374 !E->getType()->isVoidType());
2375 if (!RetAlloca.isValid())
2376 return nullptr;
2377 return CGF.EmitLoadOfScalar(CGF.MakeAddrLValue(RetAlloca, E->getType()),
2378 E->getExprLoc());
2379}
2380
2381Value *ScalarExprEmitter::VisitExprWithCleanups(ExprWithCleanups *E) {
2382 CodeGenFunction::RunCleanupsScope Scope(CGF);
2383 Value *V = Visit(E->getSubExpr());
2384 // Defend against dominance problems caused by jumps out of expression
2385 // evaluation through the shared cleanup block.
2386 Scope.ForceCleanup({&V});
2387 return V;
2388}
2389
2390//===----------------------------------------------------------------------===//
2391// Unary Operators
2392//===----------------------------------------------------------------------===//
2393
2394static BinOpInfo createBinOpInfoFromIncDec(const UnaryOperator *E,
2395 llvm::Value *InVal, bool IsInc,
2396 FPOptions FPFeatures) {
2397 BinOpInfo BinOp;
2398 BinOp.LHS = InVal;
2399 BinOp.RHS = llvm::ConstantInt::get(InVal->getType(), 1, false);
2400 BinOp.Ty = E->getType();
2401 BinOp.Opcode = IsInc ? BO_Add : BO_Sub;
2402 BinOp.FPFeatures = FPFeatures;
2403 BinOp.E = E;
2404 return BinOp;
2405}
2406
2407llvm::Value *ScalarExprEmitter::EmitIncDecConsiderOverflowBehavior(
2408 const UnaryOperator *E, llvm::Value *InVal, bool IsInc) {
2409 llvm::Value *Amount =
2410 llvm::ConstantInt::get(InVal->getType(), IsInc ? 1 : -1, true);
2411 StringRef Name = IsInc ? "inc" : "dec";
2412 switch (CGF.getLangOpts().getSignedOverflowBehavior()) {
2413 case LangOptions::SOB_Defined:
2414 return Builder.CreateAdd(InVal, Amount, Name);
2415 case LangOptions::SOB_Undefined:
2416 if (!CGF.SanOpts.has(SanitizerKind::SignedIntegerOverflow))
2417 return Builder.CreateNSWAdd(InVal, Amount, Name);
2418 LLVM_FALLTHROUGH[[gnu::fallthrough]];
2419 case LangOptions::SOB_Trapping:
2420 if (!E->canOverflow())
2421 return Builder.CreateNSWAdd(InVal, Amount, Name);
2422 return EmitOverflowCheckedBinOp(createBinOpInfoFromIncDec(
2423 E, InVal, IsInc, E->getFPFeaturesInEffect(CGF.getLangOpts())));
2424 }
2425 llvm_unreachable("Unknown SignedOverflowBehaviorTy")::llvm::llvm_unreachable_internal("Unknown SignedOverflowBehaviorTy"
, "/build/llvm-toolchain-snapshot-13~++20210413100635+64c24f493e5f/clang/lib/CodeGen/CGExprScalar.cpp"
, 2425)
;
2426}
2427
2428namespace {
2429/// Handles check and update for lastprivate conditional variables.
2430class OMPLastprivateConditionalUpdateRAII {
2431private:
2432 CodeGenFunction &CGF;
2433 const UnaryOperator *E;
2434
2435public:
2436 OMPLastprivateConditionalUpdateRAII(CodeGenFunction &CGF,
2437 const UnaryOperator *E)
2438 : CGF(CGF), E(E) {}
2439 ~OMPLastprivateConditionalUpdateRAII() {
2440 if (CGF.getLangOpts().OpenMP)
2441 CGF.CGM.getOpenMPRuntime().checkAndEmitLastprivateConditional(
2442 CGF, E->getSubExpr());
2443 }
2444};
2445} // namespace
2446
2447llvm::Value *
2448ScalarExprEmitter::EmitScalarPrePostIncDec(const UnaryOperator *E, LValue LV,
2449 bool isInc, bool isPre) {
2450 OMPLastprivateConditionalUpdateRAII OMPRegion(CGF, E);
2451 QualType type = E->getSubExpr()->getType();
2452 llvm::PHINode *atomicPHI = nullptr;
2453 llvm::Value *value;
2454 llvm::Value *input;
2455
2456 int amount = (isInc ? 1 : -1);
2457 bool isSubtraction = !isInc;
2458
2459 if (const AtomicType *atomicTy = type->getAs<AtomicType>()) {
2460 type = atomicTy->getValueType();
2461 if (isInc && type->isBooleanType()) {
2462 llvm::Value *True = CGF.EmitToMemory(Builder.getTrue(), type);
2463 if (isPre) {
2464 Builder.CreateStore(True, LV.getAddress(CGF), LV.isVolatileQualified())
2465 ->setAtomic(llvm::AtomicOrdering::SequentiallyConsistent);
2466 return Builder.getTrue();
2467 }
2468 // For atomic bool increment, we just store true and return it for
2469 // preincrement, do an atomic swap with true for postincrement
2470 return Builder.CreateAtomicRMW(
2471 llvm::AtomicRMWInst::Xchg, LV.getPointer(CGF), True,
2472 llvm::AtomicOrdering::SequentiallyConsistent);
2473 }
2474 // Special case for atomic increment / decrement on integers, emit
2475 // atomicrmw instructions. We skip this if we want to be doing overflow
2476 // checking, and fall into the slow path with the atomic cmpxchg loop.
2477 if (!type->isBooleanType() && type->isIntegerType() &&
2478 !(type->isUnsignedIntegerType() &&
2479 CGF.SanOpts.has(SanitizerKind::UnsignedIntegerOverflow)) &&
2480 CGF.getLangOpts().getSignedOverflowBehavior() !=
2481 LangOptions::SOB_Trapping) {
2482 llvm::AtomicRMWInst::BinOp aop = isInc ? llvm::AtomicRMWInst::Add :
2483 llvm::AtomicRMWInst::Sub;
2484 llvm::Instruction::BinaryOps op = isInc ? llvm::Instruction::Add :
2485 llvm::Instruction::Sub;
2486 llvm::Value *amt = CGF.EmitToMemory(
2487 llvm::ConstantInt::get(ConvertType(type), 1, true), type);
2488 llvm::Value *old =
2489 Builder.CreateAtomicRMW(aop, LV.getPointer(CGF), amt,
2490 llvm::AtomicOrdering::SequentiallyConsistent);
2491 return isPre ? Builder.CreateBinOp(op, old, amt) : old;
2492 }
2493 value = EmitLoadOfLValue(LV, E->getExprLoc());
2494 input = value;
2495 // For every other atomic operation, we need to emit a load-op-cmpxchg loop
2496 llvm::BasicBlock *startBB = Builder.GetInsertBlock();
2497 llvm::BasicBlock *opBB = CGF.createBasicBlock("atomic_op", CGF.CurFn);
2498 value = CGF.EmitToMemory(value, type);
2499 Builder.CreateBr(opBB);
2500 Builder.SetInsertPoint(opBB);
2501 atomicPHI = Builder.CreatePHI(value->getType(), 2);
2502 atomicPHI->addIncoming(value, startBB);
2503 value = atomicPHI;
2504 } else {
2505 value = EmitLoadOfLValue(LV, E->getExprLoc());
2506 input = value;
2507 }
2508
2509 // Special case of integer increment that we have to check first: bool++.
2510 // Due to promotion rules, we get:
2511 // bool++ -> bool = bool + 1
2512 // -> bool = (int)bool + 1
2513 // -> bool = ((int)bool + 1 != 0)
2514 // An interesting aspect of this is that increment is always true.
2515 // Decrement does not have this property.
2516 if (isInc && type->isBooleanType()) {
2517 value = Builder.getTrue();
2518
2519 // Most common case by far: integer increment.
2520 } else if (type->isIntegerType()) {
2521 QualType promotedType;
2522 bool canPerformLossyDemotionCheck = false;
2523 if (type->isPromotableIntegerType()) {
2524 promotedType = CGF.getContext().getPromotedIntegerType(type);
2525 assert(promotedType != type && "Shouldn't promote to the same type.")((promotedType != type && "Shouldn't promote to the same type."
) ? static_cast<void> (0) : __assert_fail ("promotedType != type && \"Shouldn't promote to the same type.\""
, "/build/llvm-toolchain-snapshot-13~++20210413100635+64c24f493e5f/clang/lib/CodeGen/CGExprScalar.cpp"
, 2525, __PRETTY_FUNCTION__))
;
2526 canPerformLossyDemotionCheck = true;
2527 canPerformLossyDemotionCheck &=
2528 CGF.getContext().getCanonicalType(type) !=
2529 CGF.getContext().getCanonicalType(promotedType);
2530 canPerformLossyDemotionCheck &=
2531 PromotionIsPotentiallyEligibleForImplicitIntegerConversionCheck(
2532 type, promotedType);
2533 assert((!canPerformLossyDemotionCheck ||(((!canPerformLossyDemotionCheck || type->isSignedIntegerOrEnumerationType
() || promotedType->isSignedIntegerOrEnumerationType() || ConvertType
(type)->getScalarSizeInBits() == ConvertType(promotedType)
->getScalarSizeInBits()) && "The following check expects that if we do promotion to different "
"underlying canonical type, at least one of the types (either "
"base or promoted) will be signed, or the bitwidths will match."
) ? static_cast<void> (0) : __assert_fail ("(!canPerformLossyDemotionCheck || type->isSignedIntegerOrEnumerationType() || promotedType->isSignedIntegerOrEnumerationType() || ConvertType(type)->getScalarSizeInBits() == ConvertType(promotedType)->getScalarSizeInBits()) && \"The following check expects that if we do promotion to different \" \"underlying canonical type, at least one of the types (either \" \"base or promoted) will be signed, or the bitwidths will match.\""
, "/build/llvm-toolchain-snapshot-13~++20210413100635+64c24f493e5f/clang/lib/CodeGen/CGExprScalar.cpp"
, 2540, __PRETTY_FUNCTION__))
2534 type->isSignedIntegerOrEnumerationType() ||(((!canPerformLossyDemotionCheck || type->isSignedIntegerOrEnumerationType
() || promotedType->isSignedIntegerOrEnumerationType() || ConvertType
(type)->getScalarSizeInBits() == ConvertType(promotedType)
->getScalarSizeInBits()) && "The following check expects that if we do promotion to different "
"underlying canonical type, at least one of the types (either "
"base or promoted) will be signed, or the bitwidths will match."
) ? static_cast<void> (0) : __assert_fail ("(!canPerformLossyDemotionCheck || type->isSignedIntegerOrEnumerationType() || promotedType->isSignedIntegerOrEnumerationType() || ConvertType(type)->getScalarSizeInBits() == ConvertType(promotedType)->getScalarSizeInBits()) && \"The following check expects that if we do promotion to different \" \"underlying canonical type, at least one of the types (either \" \"base or promoted) will be signed, or the bitwidths will match.\""
, "/build/llvm-toolchain-snapshot-13~++20210413100635+64c24f493e5f/clang/lib/CodeGen/CGExprScalar.cpp"
, 2540, __PRETTY_FUNCTION__))
2535 promotedType->isSignedIntegerOrEnumerationType() ||(((!canPerformLossyDemotionCheck || type->isSignedIntegerOrEnumerationType
() || promotedType->isSignedIntegerOrEnumerationType() || ConvertType
(type)->getScalarSizeInBits() == ConvertType(promotedType)
->getScalarSizeInBits()) && "The following check expects that if we do promotion to different "
"underlying canonical type, at least one of the types (either "
"base or promoted) will be signed, or the bitwidths will match."
) ? static_cast<void> (0) : __assert_fail ("(!canPerformLossyDemotionCheck || type->isSignedIntegerOrEnumerationType() || promotedType->isSignedIntegerOrEnumerationType() || ConvertType(type)->getScalarSizeInBits() == ConvertType(promotedType)->getScalarSizeInBits()) && \"The following check expects that if we do promotion to different \" \"underlying canonical type, at least one of the types (either \" \"base or promoted) will be signed, or the bitwidths will match.\""
, "/build/llvm-toolchain-snapshot-13~++20210413100635+64c24f493e5f/clang/lib/CodeGen/CGExprScalar.cpp"
, 2540, __PRETTY_FUNCTION__))
2536 ConvertType(type)->getScalarSizeInBits() ==(((!canPerformLossyDemotionCheck || type->isSignedIntegerOrEnumerationType
() || promotedType->isSignedIntegerOrEnumerationType() || ConvertType
(type)->getScalarSizeInBits() == ConvertType(promotedType)
->getScalarSizeInBits()) && "The following check expects that if we do promotion to different "
"underlying canonical type, at least one of the types (either "
"base or promoted) will be signed, or the bitwidths will match."
) ? static_cast<void> (0) : __assert_fail ("(!canPerformLossyDemotionCheck || type->isSignedIntegerOrEnumerationType() || promotedType->isSignedIntegerOrEnumerationType() || ConvertType(type)->getScalarSizeInBits() == ConvertType(promotedType)->getScalarSizeInBits()) && \"The following check expects that if we do promotion to different \" \"underlying canonical type, at least one of the types (either \" \"base or promoted) will be signed, or the bitwidths will match.\""
, "/build/llvm-toolchain-snapshot-13~++20210413100635+64c24f493e5f/clang/lib/CodeGen/CGExprScalar.cpp"
, 2540, __PRETTY_FUNCTION__))
2537 ConvertType(promotedType)->getScalarSizeInBits()) &&(((!canPerformLossyDemotionCheck || type->isSignedIntegerOrEnumerationType
() || promotedType->isSignedIntegerOrEnumerationType() || ConvertType
(type)->getScalarSizeInBits() == ConvertType(promotedType)
->getScalarSizeInBits()) && "The following check expects that if we do promotion to different "
"underlying canonical type, at least one of the types (either "
"base or promoted) will be signed, or the bitwidths will match."
) ? static_cast<void> (0) : __assert_fail ("(!canPerformLossyDemotionCheck || type->isSignedIntegerOrEnumerationType() || promotedType->isSignedIntegerOrEnumerationType() || ConvertType(type)->getScalarSizeInBits() == ConvertType(promotedType)->getScalarSizeInBits()) && \"The following check expects that if we do promotion to different \" \"underlying canonical type, at least one of the types (either \" \"base or promoted) will be signed, or the bitwidths will match.\""
, "/build/llvm-toolchain-snapshot-13~++20210413100635+64c24f493e5f/clang/lib/CodeGen/CGExprScalar.cpp"
, 2540, __PRETTY_FUNCTION__))
2538 "The following check expects that if we do promotion to different "(((!canPerformLossyDemotionCheck || type->isSignedIntegerOrEnumerationType
() || promotedType->isSignedIntegerOrEnumerationType() || ConvertType
(type)->getScalarSizeInBits() == ConvertType(promotedType)
->getScalarSizeInBits()) && "The following check expects that if we do promotion to different "
"underlying canonical type, at least one of the types (either "
"base or promoted) will be signed, or the bitwidths will match."
) ? static_cast<void> (0) : __assert_fail ("(!canPerformLossyDemotionCheck || type->isSignedIntegerOrEnumerationType() || promotedType->isSignedIntegerOrEnumerationType() || ConvertType(type)->getScalarSizeInBits() == ConvertType(promotedType)->getScalarSizeInBits()) && \"The following check expects that if we do promotion to different \" \"underlying canonical type, at least one of the types (either \" \"base or promoted) will be signed, or the bitwidths will match.\""
, "/build/llvm-toolchain-snapshot-13~++20210413100635+64c24f493e5f/clang/lib/CodeGen/CGExprScalar.cpp"
, 2540, __PRETTY_FUNCTION__))
2539 "underlying canonical type, at least one of the types (either "(((!canPerformLossyDemotionCheck || type->isSignedIntegerOrEnumerationType
() || promotedType->isSignedIntegerOrEnumerationType() || ConvertType
(type)->getScalarSizeInBits() == ConvertType(promotedType)
->getScalarSizeInBits()) && "The following check expects that if we do promotion to different "
"underlying canonical type, at least one of the types (either "
"base or promoted) will be signed, or the bitwidths will match."
) ? static_cast<void> (0) : __assert_fail ("(!canPerformLossyDemotionCheck || type->isSignedIntegerOrEnumerationType() || promotedType->isSignedIntegerOrEnumerationType() || ConvertType(type)->getScalarSizeInBits() == ConvertType(promotedType)->getScalarSizeInBits()) && \"The following check expects that if we do promotion to different \" \"underlying canonical type, at least one of the types (either \" \"base or promoted) will be signed, or the bitwidths will match.\""
, "/build/llvm-toolchain-snapshot-13~++20210413100635+64c24f493e5f/clang/lib/CodeGen/CGExprScalar.cpp"
, 2540, __PRETTY_FUNCTION__))
2540 "base or promoted) will be signed, or the bitwidths will match.")(((!canPerformLossyDemotionCheck || type->isSignedIntegerOrEnumerationType
() || promotedType->isSignedIntegerOrEnumerationType() || ConvertType
(type)->getScalarSizeInBits() == ConvertType(promotedType)
->getScalarSizeInBits()) && "The following check expects that if we do promotion to different "
"underlying canonical type, at least one of the types (either "
"base or promoted) will be signed, or the bitwidths will match."
) ? static_cast<void> (0) : __assert_fail ("(!canPerformLossyDemotionCheck || type->isSignedIntegerOrEnumerationType() || promotedType->isSignedIntegerOrEnumerationType() || ConvertType(type)->getScalarSizeInBits() == ConvertType(promotedType)->getScalarSizeInBits()) && \"The following check expects that if we do promotion to different \" \"underlying canonical type, at least one of the types (either \" \"base or promoted) will be signed, or the bitwidths will match.\""
, "/build/llvm-toolchain-snapshot-13~++20210413100635+64c24f493e5f/clang/lib/CodeGen/CGExprScalar.cpp"
, 2540, __PRETTY_FUNCTION__))
;
2541 }
2542 if (CGF.SanOpts.hasOneOf(
2543 SanitizerKind::ImplicitIntegerArithmeticValueChange) &&
2544 canPerformLossyDemotionCheck) {
2545 // While `x += 1` (for `x` with width less than int) is modeled as
2546 // promotion+arithmetics+demotion, and we can catch lossy demotion with
2547 // ease; inc/dec with width less than int can't overflow because of
2548 // promotion rules, so we omit promotion+demotion, which means that we can
2549 // not catch lossy "demotion". Because we still want to catch these cases
2550 // when the sanitizer is enabled, we perform the promotion, then perform
2551 // the increment/decrement in the wider type, and finally
2552 // perform the demotion. This will catch lossy demotions.
2553
2554 value = EmitScalarConversion(value, type, promotedType, E->getExprLoc());
2555 Value *amt = llvm::ConstantInt::get(value->getType(), amount, true);
2556 value = Builder.CreateAdd(value, amt, isInc ? "inc" : "dec");
2557 // Do pass non-default ScalarConversionOpts so that sanitizer check is
2558 // emitted.
2559 value = EmitScalarConversion(value, promotedType, type, E->getExprLoc(),
2560 ScalarConversionOpts(CGF.SanOpts));
2561
2562 // Note that signed integer inc/dec with width less than int can't
2563 // overflow because of promotion rules; we're just eliding a few steps
2564 // here.
2565 } else if (E->canOverflow() && type->isSignedIntegerOrEnumerationType()) {
2566 value = EmitIncDecConsiderOverflowBehavior(E, value, isInc);
2567 } else if (E->canOverflow() && type->isUnsignedIntegerType() &&
2568 CGF.SanOpts.has(SanitizerKind::UnsignedIntegerOverflow)) {
2569 value = EmitOverflowCheckedBinOp(createBinOpInfoFromIncDec(
2570 E, value, isInc, E->getFPFeaturesInEffect(CGF.getLangOpts())));
2571 } else {
2572 llvm::Value *amt = llvm::ConstantInt::get(value->getType(), amount, true);
2573 value = Builder.CreateAdd(value, amt, isInc ? "inc" : "dec");
2574 }
2575
2576 // Next most common: pointer increment.
2577 } else if (const PointerType *ptr = type->getAs<PointerType>()) {
2578 QualType type = ptr->getPointeeType();
2579
2580 // VLA types don't have constant size.
2581 if (const VariableArrayType *vla
2582 = CGF.getContext().getAsVariableArrayType(type)) {
2583 llvm::Value *numElts = CGF.getVLASize(vla).NumElts;
2584 if (!isInc) numElts = Builder.CreateNSWNeg(numElts, "vla.negsize");
2585 if (CGF.getLangOpts().isSignedOverflowDefined())
2586 value = Builder.CreateGEP(value, numElts, "vla.inc");
2587 else
2588 value = CGF.EmitCheckedInBoundsGEP(
2589 value, numElts, /*SignedIndices=*/false, isSubtraction,
2590 E->getExprLoc(), "vla.inc");
2591
2592 // Arithmetic on function pointers (!) is just +-1.
2593 } else if (type->isFunctionType()) {
2594 llvm::Value *amt = Builder.getInt32(amount);
2595
2596 value = CGF.EmitCastToVoidPtr(value);
2597 if (CGF.getLangOpts().isSignedOverflowDefined())
2598 value = Builder.CreateGEP(value, amt, "incdec.funcptr");
2599 else
2600 value = CGF.EmitCheckedInBoundsGEP(value, amt, /*SignedIndices=*/false,
2601 isSubtraction, E->getExprLoc(),
2602 "incdec.funcptr");
2603 value = Builder.CreateBitCast(value, input->getType());
2604
2605 // For everything else, we can just do a simple increment.
2606 } else {
2607 llvm::Value *amt = Builder.getInt32(amount);
2608 if (CGF.getLangOpts().isSignedOverflowDefined())
2609 value = Builder.CreateGEP(value, amt, "incdec.ptr");
2610 else
2611 value = CGF.EmitCheckedInBoundsGEP(value, amt, /*SignedIndices=*/false,
2612 isSubtraction, E->getExprLoc(),
2613 "incdec.ptr");
2614 }
2615
2616 // Vector increment/decrement.
2617 } else if (type->isVectorType()) {
2618 if (type->hasIntegerRepresentation()) {
2619 llvm::Value *amt = llvm::ConstantInt::get(value->getType(), amount);
2620
2621 value = Builder.CreateAdd(value, amt, isInc ? "inc" : "dec");
2622 } else {
2623 value = Builder.CreateFAdd(
2624 value,
2625 llvm::ConstantFP::get(value->getType(), amount),
2626 isInc ? "inc" : "dec");
2627 }
2628
2629 // Floating point.
2630 } else if (type->isRealFloatingType()) {
2631 // Add the inc/dec to the real part.
2632 llvm::Value *amt;
2633 CodeGenFunction::CGFPOptionsRAII FPOptsRAII(CGF, E);
2634
2635 if (type->isHalfType() && !CGF.getContext().getLangOpts().NativeHalfType) {
2636 // Another special case: half FP increment should be done via float
2637 if (CGF.getContext().getTargetInfo().useFP16ConversionIntrinsics()) {
2638 value = Builder.CreateCall(
2639 CGF.CGM.getIntrinsic(llvm::Intrinsic::convert_from_fp16,
2640 CGF.CGM.FloatTy),
2641 input, "incdec.conv");
2642 } else {
2643 value = Builder.CreateFPExt(input, CGF.CGM.FloatTy, "incdec.conv");
2644 }
2645 }
2646
2647 if (value->getType()->isFloatTy())
2648 amt = llvm::ConstantFP::get(VMContext,
2649 llvm::APFloat(static_cast<float>(amount)));
2650 else if (value->getType()->isDoubleTy())
2651 amt = llvm::ConstantFP::get(VMContext,
2652 llvm::APFloat(static_cast<double>(amount)));
2653 else {
2654 // Remaining types are Half, LongDouble or __float128. Convert from float.
2655 llvm::APFloat F(static_cast<float>(amount));
2656 bool ignored;
2657 const llvm::fltSemantics *FS;
2658 // Don't use getFloatTypeSemantics because Half isn't
2659 // necessarily represented using the "half" LLVM type.
2660 if (value->getType()->isFP128Ty())
2661 FS = &CGF.getTarget().getFloat128Format();
2662 else if (value->getType()->isHalfTy())
2663 FS = &CGF.getTarget().getHalfFormat();
2664 else
2665 FS = &CGF.getTarget().getLongDoubleFormat();
2666 F.convert(*FS, llvm::APFloat::rmTowardZero, &ignored);
2667 amt = llvm::ConstantFP::get(VMContext, F);
2668 }
2669 value = Builder.CreateFAdd(value, amt, isInc ? "inc" : "dec");
2670
2671 if (type->isHalfType() && !CGF.getContext().getLangOpts().NativeHalfType) {
2672 if (CGF.getContext().getTargetInfo().useFP16ConversionIntrinsics()) {
2673 value = Builder.CreateCall(
2674 CGF.CGM.getIntrinsic(llvm::Intrinsic::convert_to_fp16,
2675 CGF.CGM.FloatTy),
2676 value, "incdec.conv");
2677 } else {
2678 value = Builder.CreateFPTrunc(value, input->getType(), "incdec.conv");
2679 }
2680 }
2681
2682 // Fixed-point types.
2683 } else if (type->isFixedPointType()) {
2684 // Fixed-point types are tricky. In some cases, it isn't possible to
2685 // represent a 1 or a -1 in the type at all. Piggyback off of
2686 // EmitFixedPointBinOp to avoid having to reimplement saturation.
2687 BinOpInfo Info;
2688 Info.E = E;
2689 Info.Ty = E->getType();
2690 Info.Opcode = isInc ? BO_Add : BO_Sub;
2691 Info.LHS = value;
2692 Info.RHS = llvm::ConstantInt::get(value->getType(), 1, false);
2693 // If the type is signed, it's better to represent this as +(-1) or -(-1),
2694 // since -1 is guaranteed to be representable.
2695 if (type->isSignedFixedPointType()) {
2696 Info.Opcode = isInc ? BO_Sub : BO_Add;
2697 Info.RHS = Builder.CreateNeg(Info.RHS);
2698 }
2699 // Now, convert from our invented integer literal to the type of the unary
2700 // op. This will upscale and saturate if necessary. This value can become
2701 // undef in some cases.
2702 llvm::FixedPointBuilder<CGBuilderTy> FPBuilder(Builder);
2703 auto DstSema = CGF.getContext().getFixedPointSemantics(Info.Ty);
2704 Info.RHS = FPBuilder.CreateIntegerToFixed(Info.RHS, true, DstSema);
2705 value = EmitFixedPointBinOp(Info);
2706
2707 // Objective-C pointer types.
2708 } else {
2709 const ObjCObjectPointerType *OPT = type->castAs<ObjCObjectPointerType>();
2710 value = CGF.EmitCastToVoidPtr(value);
2711
2712 CharUnits size = CGF.getContext().getTypeSizeInChars(OPT->getObjectType());
2713 if (!isInc) size = -size;
2714 llvm::Value *sizeValue =
2715 llvm::ConstantInt::get(CGF.SizeTy, size.getQuantity());
2716
2717 if (CGF.getLangOpts().isSignedOverflowDefined())
2718 value = Builder.CreateGEP(value, sizeValue, "incdec.objptr");
2719 else
2720 value = CGF.EmitCheckedInBoundsGEP(value, sizeValue,
2721 /*SignedIndices=*/false, isSubtraction,
2722 E->getExprLoc(), "incdec.objptr");
2723 value = Builder.CreateBitCast(value, input->getType());
2724 }
2725
2726 if (atomicPHI) {
2727 llvm::BasicBlock *curBlock = Builder.GetInsertBlock();
2728 llvm::BasicBlock *contBB = CGF.createBasicBlock("atomic_cont", CGF.CurFn);
2729 auto Pair = CGF.EmitAtomicCompareExchange(
2730 LV, RValue::get(atomicPHI), RValue::get(value), E->getExprLoc());
2731 llvm::Value *old = CGF.EmitToMemory(Pair.first.getScalarVal(), type);
2732 llvm::Value *success = Pair.second;
2733 atomicPHI->addIncoming(old, curBlock);
2734 Builder.CreateCondBr(success, contBB, atomicPHI->getParent());
2735 Builder.SetInsertPoint(contBB);
2736 return isPre ? value : input;
2737 }
2738
2739 // Store the updated result through the lvalue.
2740 if (LV.isBitField())
2741 CGF.EmitStoreThroughBitfieldLValue(RValue::get(value), LV, &value);
2742 else
2743 CGF.EmitStoreThroughLValue(RValue::get(value), LV);
2744
2745 // If this is a postinc, return the value read from memory, otherwise use the
2746 // updated value.
2747 return isPre ? value : input;
2748}
2749
2750
2751
2752Value *ScalarExprEmitter::VisitUnaryMinus(const UnaryOperator *E) {
2753 TestAndClearIgnoreResultAssign();
2754 Value *Op = Visit(E->getSubExpr());
2755
2756 // Generate a unary FNeg for FP ops.
2757 if (Op->getType()->isFPOrFPVectorTy())
1
Taking false branch
2758 return Builder.CreateFNeg(Op, "fneg");
2759
2760 // Emit unary minus with EmitSub so we handle overflow cases etc.
2761 BinOpInfo BinOp;
2762 BinOp.RHS = Op;
2763 BinOp.LHS = llvm::Constant::getNullValue(BinOp.RHS->getType());
2764 BinOp.Ty = E->getType();
2765 BinOp.Opcode = BO_Sub;
2766 BinOp.FPFeatures = E->getFPFeaturesInEffect(CGF.getLangOpts());
2767 BinOp.E = E;
2768 return EmitSub(BinOp);
2
Calling 'ScalarExprEmitter::EmitSub'
2769}
2770
2771Value *ScalarExprEmitter::VisitUnaryNot(const UnaryOperator *E) {
2772 TestAndClearIgnoreResultAssign();
2773 Value *Op = Visit(E->getSubExpr());
2774 return Builder.CreateNot(Op, "neg");
2775}
2776
2777Value *ScalarExprEmitter::VisitUnaryLNot(const UnaryOperator *E) {
2778 // Perform vector logical not on comparison with zero vector.
2779 if (E->getType()->isVectorType() &&
2780 E->getType()->castAs<VectorType>()->getVectorKind() ==
2781 VectorType::GenericVector) {
2782 Value *Oper = Visit(E->getSubExpr());
2783 Value *Zero = llvm::Constant::getNullValue(Oper->getType());
2784 Value *Result;
2785 if (Oper->getType()->isFPOrFPVectorTy()) {
2786 CodeGenFunction::CGFPOptionsRAII FPOptsRAII(
2787 CGF, E->getFPFeaturesInEffect(CGF.getLangOpts()));
2788 Result = Builder.CreateFCmp(llvm::CmpInst::FCMP_OEQ, Oper, Zero, "cmp");
2789 } else
2790 Result = Builder.CreateICmp(llvm::CmpInst::ICMP_EQ, Oper, Zero, "cmp");
2791 return Builder.CreateSExt(Result, ConvertType(E->getType()), "sext");
2792 }
2793
2794 // Compare operand to zero.
2795 Value *BoolVal = CGF.EvaluateExprAsBool(E->getSubExpr());
2796
2797 // Invert value.
2798 // TODO: Could dynamically modify easy computations here. For example, if
2799 // the operand is an icmp ne, turn into icmp eq.
2800 BoolVal = Builder.CreateNot(BoolVal, "lnot");
2801
2802 // ZExt result to the expr type.
2803 return Builder.CreateZExt(BoolVal, ConvertType(E->getType()), "lnot.ext");
2804}
2805
2806Value *ScalarExprEmitter::VisitOffsetOfExpr(OffsetOfExpr *E) {
2807 // Try folding the offsetof to a constant.
2808 Expr::EvalResult EVResult;
2809 if (E->EvaluateAsInt(EVResult, CGF.getContext())) {
2810 llvm::APSInt Value = EVResult.Val.getInt();
2811 return Builder.getInt(Value);
2812 }
2813
2814 // Loop over the components of the offsetof to compute the value.
2815 unsigned n = E->getNumComponents();
2816 llvm::Type* ResultType = ConvertType(E->getType());
2817 llvm::Value* Result = llvm::Constant::getNullValue(ResultType);
2818 QualType CurrentType = E->getTypeSourceInfo()->getType();
2819 for (unsigned i = 0; i != n; ++i) {
2820 OffsetOfNode ON = E->getComponent(i);
2821 llvm::Value *Offset = nullptr;
2822 switch (ON.getKind()) {
2823 case OffsetOfNode::Array: {
2824 // Compute the index
2825 Expr *IdxExpr = E->getIndexExpr(ON.getArrayExprIndex());
2826 llvm::Value* Idx = CGF.EmitScalarExpr(IdxExpr);
2827 bool IdxSigned = IdxExpr->getType()->isSignedIntegerOrEnumerationType();
2828 Idx = Builder.CreateIntCast(Idx, ResultType, IdxSigned, "conv");
2829
2830 // Save the element type
2831 CurrentType =
2832 CGF.getContext().getAsArrayType(CurrentType)->getElementType();
2833
2834 // Compute the element size
2835 llvm::Value* ElemSize = llvm::ConstantInt::get(ResultType,
2836 CGF.getContext().getTypeSizeInChars(CurrentType).getQuantity());
2837
2838 // Multiply out to compute the result
2839 Offset = Builder.CreateMul(Idx, ElemSize);
2840 break;
2841 }
2842
2843 case OffsetOfNode::Field: {
2844 FieldDecl *MemberDecl = ON.getField();
2845 RecordDecl *RD = CurrentType->castAs<RecordType>()->getDecl();
2846 const ASTRecordLayout &RL = CGF.getContext().getASTRecordLayout(RD);
2847
2848 // Compute the index of the field in its parent.
2849 unsigned i = 0;
2850 // FIXME: It would be nice if we didn't have to loop here!
2851 for (RecordDecl::field_iterator Field = RD->field_begin(),
2852 FieldEnd = RD->field_end();
2853 Field != FieldEnd; ++Field, ++i) {
2854 if (*Field == MemberDecl)
2855 break;
2856 }
2857 assert(i < RL.getFieldCount() && "offsetof field in wrong type")((i < RL.getFieldCount() && "offsetof field in wrong type"
) ? static_cast<void> (0) : __assert_fail ("i < RL.getFieldCount() && \"offsetof field in wrong type\""
, "/build/llvm-toolchain-snapshot-13~++20210413100635+64c24f493e5f/clang/lib/CodeGen/CGExprScalar.cpp"
, 2857, __PRETTY_FUNCTION__))
;
2858
2859 // Compute the offset to the field
2860 int64_t OffsetInt = RL.getFieldOffset(i) /
2861 CGF.getContext().getCharWidth();
2862 Offset = llvm::ConstantInt::get(ResultType, OffsetInt);
2863
2864 // Save the element type.
2865 CurrentType = MemberDecl->getType();
2866 break;
2867 }
2868
2869 case OffsetOfNode::Identifier:
2870 llvm_unreachable("dependent __builtin_offsetof")::llvm::llvm_unreachable_internal("dependent __builtin_offsetof"
, "/build/llvm-toolchain-snapshot-13~++20210413100635+64c24f493e5f/clang/lib/CodeGen/CGExprScalar.cpp"
, 2870)
;
2871
2872 case OffsetOfNode::Base: {
2873 if (ON.getBase()->isVirtual()) {
2874 CGF.ErrorUnsupported(E, "virtual base in offsetof");
2875 continue;
2876 }
2877
2878 RecordDecl *RD = CurrentType->castAs<RecordType>()->getDecl();
2879 const ASTRecordLayout &RL = CGF.getContext().getASTRecordLayout(RD);
2880
2881 // Save the element type.
2882 CurrentType = ON.getBase()->getType();
2883
2884 // Compute the offset to the base.
2885 const RecordType *BaseRT = CurrentType->getAs<RecordType>();
2886 CXXRecordDecl *BaseRD = cast<CXXRecordDecl>(BaseRT->getDecl());
2887 CharUnits OffsetInt = RL.getBaseClassOffset(BaseRD);
2888 Offset = llvm::ConstantInt::get(ResultType, OffsetInt.getQuantity());
2889 break;
2890 }
2891 }
2892 Result = Builder.CreateAdd(Result, Offset);
2893 }
2894 return Result;
2895}
2896
2897/// VisitUnaryExprOrTypeTraitExpr - Return the size or alignment of the type of
2898/// argument of the sizeof expression as an integer.
2899Value *
2900ScalarExprEmitter::VisitUnaryExprOrTypeTraitExpr(
2901 const UnaryExprOrTypeTraitExpr *E) {
2902 QualType TypeToSize = E->getTypeOfArgument();
2903 if (E->getKind() == UETT_SizeOf) {
2904 if (const VariableArrayType *VAT =
2905 CGF.getContext().getAsVariableArrayType(TypeToSize)) {
2906 if (E->isArgumentType()) {
2907 // sizeof(type) - make sure to emit the VLA size.
2908 CGF.EmitVariablyModifiedType(TypeToSize);
2909 } else {
2910 // C99 6.5.3.4p2: If the argument is an expression of type
2911 // VLA, it is evaluated.
2912 CGF.EmitIgnoredExpr(E->getArgumentExpr());
2913 }
2914
2915 auto VlaSize = CGF.getVLASize(VAT);
2916 llvm::Value *size = VlaSize.NumElts;
2917
2918 // Scale the number of non-VLA elements by the non-VLA element size.
2919 CharUnits eltSize = CGF.getContext().getTypeSizeInChars(VlaSize.Type);
2920 if (!eltSize.isOne())
2921 size = CGF.Builder.CreateNUWMul(CGF.CGM.getSize(eltSize), size);
2922
2923 return size;
2924 }
2925 } else if (E->getKind() == UETT_OpenMPRequiredSimdAlign) {
2926 auto Alignment =
2927 CGF.getContext()
2928 .toCharUnitsFromBits(CGF.getContext().getOpenMPDefaultSimdAlign(
2929 E->getTypeOfArgument()->getPointeeType()))
2930 .getQuantity();
2931 return llvm::ConstantInt::get(CGF.SizeTy, Alignment);
2932 }
2933
2934 // If this isn't sizeof(vla), the result must be constant; use the constant
2935 // folding logic so we don't have to duplicate it here.
2936 return Builder.getInt(E->EvaluateKnownConstInt(CGF.getContext()));
2937}
2938
2939Value *ScalarExprEmitter::VisitUnaryReal(const UnaryOperator *E) {
2940 Expr *Op = E->getSubExpr();
2941 if (Op->getType()->isAnyComplexType()) {
2942 // If it's an l-value, load through the appropriate subobject l-value.
2943 // Note that we have to ask E because Op might be an l-value that
2944 // this won't work for, e.g. an Obj-C property.
2945 if (E->isGLValue())
2946 return CGF.EmitLoadOfLValue(CGF.EmitLValue(E),
2947 E->getExprLoc()).getScalarVal();
2948
2949 // Otherwise, calculate and project.
2950 return CGF.EmitComplexExpr(Op, false, true).first;
2951 }
2952
2953 return Visit(Op);
2954}
2955
2956Value *ScalarExprEmitter::VisitUnaryImag(const UnaryOperator *E) {
2957 Expr *Op = E->getSubExpr();
2958 if (Op->getType()->isAnyComplexType()) {
2959 // If it's an l-value, load through the appropriate subobject l-value.
2960 // Note that we have to ask E because Op might be an l-value that
2961 // this won't work for, e.g. an Obj-C property.
2962 if (Op->isGLValue())
2963 return CGF.EmitLoadOfLValue(CGF.EmitLValue(E),
2964 E->getExprLoc()).getScalarVal();
2965
2966 // Otherwise, calculate and project.
2967 return CGF.EmitComplexExpr(Op, true, false).second;
2968 }
2969
2970 // __imag on a scalar returns zero. Emit the subexpr to ensure side
2971 // effects are evaluated, but not the actual value.
2972 if (Op->isGLValue())
2973 CGF.EmitLValue(Op);
2974 else
2975 CGF.EmitScalarExpr(Op, true);
2976 return llvm::Constant::getNullValue(ConvertType(E->getType()));
2977}
2978
2979//===----------------------------------------------------------------------===//
2980// Binary Operators
2981//===----------------------------------------------------------------------===//
2982
2983BinOpInfo ScalarExprEmitter::EmitBinOps(const BinaryOperator *E) {
2984 TestAndClearIgnoreResultAssign();
2985 BinOpInfo Result;
2986 Result.LHS = Visit(E->getLHS());
2987 Result.RHS = Visit(E->getRHS());
2988 Result.Ty = E->getType();
2989 Result.Opcode = E->getOpcode();
2990 Result.FPFeatures = E->getFPFeaturesInEffect(CGF.getLangOpts());
2991 Result.E = E;
2992 return Result;
2993}
2994
2995LValue ScalarExprEmitter::EmitCompoundAssignLValue(
2996 const CompoundAssignOperator *E,
2997 Value *(ScalarExprEmitter::*Func)(const BinOpInfo &),
2998 Value *&Result) {
2999 QualType LHSTy = E->getLHS()->getType();
3000 BinOpInfo OpInfo;
3001
3002 if (E->getComputationResultType()->isAnyComplexType())
3003 return CGF.EmitScalarCompoundAssignWithComplex(E, Result);
3004
3005 // Emit the RHS first. __block variables need to have the rhs evaluated
3006 // first, plus this should improve codegen a little.
3007 OpInfo.RHS = Visit(E->getRHS());
3008 OpInfo.Ty = E->getComputationResultType();
3009 OpInfo.Opcode = E->getOpcode();
3010 OpInfo.FPFeatures = E->getFPFeaturesInEffect(CGF.getLangOpts());
3011 OpInfo.E = E;
3012 // Load/convert the LHS.
3013 LValue LHSLV = EmitCheckedLValue(E->getLHS(), CodeGenFunction::TCK_Store);
3014
3015 llvm::PHINode *atomicPHI = nullptr;
3016 if (const AtomicType *atomicTy = LHSTy->getAs<AtomicType>()) {
3017 QualType type = atomicTy->getValueType();
3018 if (!type->isBooleanType() && type->isIntegerType() &&
3019 !(type->isUnsignedIntegerType() &&
3020 CGF.SanOpts.has(SanitizerKind::UnsignedIntegerOverflow)) &&
3021 CGF.getLangOpts().getSignedOverflowBehavior() !=
3022 LangOptions::SOB_Trapping) {
3023 llvm::AtomicRMWInst::BinOp AtomicOp = llvm::AtomicRMWInst::BAD_BINOP;
3024 llvm::Instruction::BinaryOps Op;
3025 switch (OpInfo.Opcode) {
3026 // We don't have atomicrmw operands for *, %, /, <<, >>
3027 case BO_MulAssign: case BO_DivAssign:
3028 case BO_RemAssign:
3029 case BO_ShlAssign:
3030 case BO_ShrAssign:
3031 break;
3032 case BO_AddAssign:
3033 AtomicOp = llvm::AtomicRMWInst::Add;
3034 Op = llvm::Instruction::Add;
3035 break;
3036 case BO_SubAssign:
3037 AtomicOp = llvm::AtomicRMWInst::Sub;
3038 Op = llvm::Instruction::Sub;
3039 break;
3040 case BO_AndAssign:
3041 AtomicOp = llvm::AtomicRMWInst::And;
3042 Op = llvm::Instruction::And;
3043 break;
3044 case BO_XorAssign:
3045 AtomicOp = llvm::AtomicRMWInst::Xor;
3046 Op = llvm::Instruction::Xor;
3047 break;
3048 case BO_OrAssign:
3049 AtomicOp = llvm::AtomicRMWInst::Or;
3050 Op = llvm::Instruction::Or;
3051 break;
3052 default:
3053 llvm_unreachable("Invalid compound assignment type")::llvm::llvm_unreachable_internal("Invalid compound assignment type"
, "/build/llvm-toolchain-snapshot-13~++20210413100635+64c24f493e5f/clang/lib/CodeGen/CGExprScalar.cpp"
, 3053)
;
3054 }
3055 if (AtomicOp != llvm::AtomicRMWInst::BAD_BINOP) {
3056 llvm::Value *Amt = CGF.EmitToMemory(
3057 EmitScalarConversion(OpInfo.RHS, E->getRHS()->getType(), LHSTy,
3058 E->getExprLoc()),
3059 LHSTy);
3060 Value *OldVal = Builder.CreateAtomicRMW(
3061 AtomicOp, LHSLV.getPointer(CGF), Amt,
3062 llvm::AtomicOrdering::SequentiallyConsistent);
3063
3064 // Since operation is atomic, the result type is guaranteed to be the
3065 // same as the input in LLVM terms.
3066 Result = Builder.CreateBinOp(Op, OldVal, Amt);
3067 return LHSLV;
3068 }
3069 }
3070 // FIXME: For floating point types, we should be saving and restoring the
3071 // floating point environment in the loop.
3072 llvm::BasicBlock *startBB = Builder.GetInsertBlock();
3073 llvm::BasicBlock *opBB = CGF.createBasicBlock("atomic_op", CGF.CurFn);
3074 OpInfo.LHS = EmitLoadOfLValue(LHSLV, E->getExprLoc());
3075 OpInfo.LHS = CGF.EmitToMemory(OpInfo.LHS, type);
3076 Builder.CreateBr(opBB);
3077 Builder.SetInsertPoint(opBB);
3078 atomicPHI = Builder.CreatePHI(OpInfo.LHS->getType(), 2);
3079 atomicPHI->addIncoming(OpInfo.LHS, startBB);
3080 OpInfo.LHS = atomicPHI;
3081 }
3082 else
3083 OpInfo.LHS = EmitLoadOfLValue(LHSLV, E->getExprLoc());
3084
3085 CodeGenFunction::CGFPOptionsRAII FPOptsRAII(CGF, OpInfo.FPFeatures);
3086 SourceLocation Loc = E->getExprLoc();
3087 OpInfo.LHS =
3088 EmitScalarConversion(OpInfo.LHS, LHSTy, E->getComputationLHSType(), Loc);
3089
3090 // Expand the binary operator.
3091 Result = (this->*Func)(OpInfo);
3092
3093 // Convert the result back to the LHS type,
3094 // potentially with Implicit Conversion sanitizer check.
3095 Result = EmitScalarConversion(Result, E->getComputationResultType(), LHSTy,
3096 Loc, ScalarConversionOpts(CGF.SanOpts));
3097
3098 if (atomicPHI) {
3099 llvm::BasicBlock *curBlock = Builder.GetInsertBlock();
3100 llvm::BasicBlock *contBB = CGF.createBasicBlock("atomic_cont", CGF.CurFn);
3101 auto Pair = CGF.EmitAtomicCompareExchange(
3102 LHSLV, RValue::get(atomicPHI), RValue::get(Result), E->getExprLoc());
3103 llvm::Value *old = CGF.EmitToMemory(Pair.first.getScalarVal(), LHSTy);
3104 llvm::Value *success = Pair.second;
3105 atomicPHI->addIncoming(old, curBlock);
3106 Builder.CreateCondBr(success, contBB, atomicPHI->getParent());
3107 Builder.SetInsertPoint(contBB);
3108 return LHSLV;
3109 }
3110
3111 // Store the result value into the LHS lvalue. Bit-fields are handled
3112 // specially because the result is altered by the store, i.e., [C99 6.5.16p1]
3113 // 'An assignment expression has the value of the left operand after the
3114 // assignment...'.
3115 if (LHSLV.isBitField())
3116 CGF.EmitStoreThroughBitfieldLValue(RValue::get(Result), LHSLV, &Result);
3117 else
3118 CGF.EmitStoreThroughLValue(RValue::get(Result), LHSLV);
3119
3120 if (CGF.getLangOpts().OpenMP)
3121 CGF.CGM.getOpenMPRuntime().checkAndEmitLastprivateConditional(CGF,
3122 E->getLHS());
3123 return LHSLV;
3124}
3125
3126Value *ScalarExprEmitter::EmitCompoundAssign(const CompoundAssignOperator *E,
3127 Value *(ScalarExprEmitter::*Func)(const BinOpInfo &)) {
3128 bool Ignore = TestAndClearIgnoreResultAssign();
3129 Value *RHS = nullptr;
3130 LValue LHS = EmitCompoundAssignLValue(E, Func, RHS);
3131
3132 // If the result is clearly ignored, return now.
3133 if (Ignore)
3134 return nullptr;
3135
3136 // The result of an assignment in C is the assigned r-value.
3137 if (!CGF.getLangOpts().CPlusPlus)
3138 return RHS;
3139
3140 // If the lvalue is non-volatile, return the computed value of the assignment.
3141 if (!LHS.isVolatileQualified())
3142 return RHS;
3143
3144 // Otherwise, reload the value.
3145 return EmitLoadOfLValue(LHS, E->getExprLoc());
3146}
3147
3148void ScalarExprEmitter::EmitUndefinedBehaviorIntegerDivAndRemCheck(
3149 const BinOpInfo &Ops, llvm::Value *Zero, bool isDiv) {
3150 SmallVector<std::pair<llvm::Value *, SanitizerMask>, 2> Checks;
3151
3152 if (CGF.SanOpts.has(SanitizerKind::IntegerDivideByZero)) {
3153 Checks.push_back(std::make_pair(Builder.CreateICmpNE(Ops.RHS, Zero),
3154 SanitizerKind::IntegerDivideByZero));
3155 }
3156
3157 const auto *BO = cast<BinaryOperator>(Ops.E);
3158 if (CGF.SanOpts.has(SanitizerKind::SignedIntegerOverflow) &&
3159 Ops.Ty->hasSignedIntegerRepresentation() &&
3160 !IsWidenedIntegerOp(CGF.getContext(), BO->getLHS()) &&
3161 Ops.mayHaveIntegerOverflow()) {
3162 llvm::IntegerType *Ty = cast<llvm::IntegerType>(Zero->getType());
3163
3164 llvm::Value *IntMin =
3165 Builder.getInt(llvm::APInt::getSignedMinValue(Ty->getBitWidth()));
3166 llvm::Value *NegOne = llvm::Constant::getAllOnesValue(Ty);
3167
3168 llvm::Value *LHSCmp = Builder.CreateICmpNE(Ops.LHS, IntMin);
3169 llvm::Value *RHSCmp = Builder.CreateICmpNE(Ops.RHS, NegOne);
3170 llvm::Value *NotOverflow = Builder.CreateOr(LHSCmp, RHSCmp, "or");
3171 Checks.push_back(
3172 std::make_pair(NotOverflow, SanitizerKind::SignedIntegerOverflow));
3173 }
3174
3175 if (Checks.size() > 0)
3176 EmitBinOpCheck(Checks, Ops);
3177}
3178
3179Value *ScalarExprEmitter::EmitDiv(const BinOpInfo &Ops) {
3180 {
3181 CodeGenFunction::SanitizerScope SanScope(&CGF);
3182 if ((CGF.SanOpts.has(SanitizerKind::IntegerDivideByZero) ||
3183 CGF.SanOpts.has(SanitizerKind::SignedIntegerOverflow)) &&
3184 Ops.Ty->isIntegerType() &&
3185 (Ops.mayHaveIntegerDivisionByZero() || Ops.mayHaveIntegerOverflow())) {
3186 llvm::Value *Zero = llvm::Constant::getNullValue(ConvertType(Ops.Ty));
3187 EmitUndefinedBehaviorIntegerDivAndRemCheck(Ops, Zero, true);
3188 } else if (CGF.SanOpts.has(SanitizerKind::FloatDivideByZero) &&
3189 Ops.Ty->isRealFloatingType() &&
3190 Ops.mayHaveFloatDivisionByZero()) {
3191 llvm::Value *Zero = llvm::Constant::getNullValue(ConvertType(Ops.Ty));
3192 llvm::Value *NonZero = Builder.CreateFCmpUNE(Ops.RHS, Zero);
3193 EmitBinOpCheck(std::make_pair(NonZero, SanitizerKind::FloatDivideByZero),
3194 Ops);
3195 }
3196 }
3197
3198 if (Ops.Ty->isConstantMatrixType()) {
3199 llvm::MatrixBuilder<CGBuilderTy> MB(Builder);
3200 // We need to check the types of the operands of the operator to get the
3201 // correct matrix dimensions.
3202 auto *BO = cast<BinaryOperator>(Ops.E);
3203 (void)BO;
3204 assert(((isa<ConstantMatrixType>(BO->getLHS()->getType()
.getCanonicalType()) && "first operand must be a matrix"
) ? static_cast<void> (0) : __assert_fail ("isa<ConstantMatrixType>(BO->getLHS()->getType().getCanonicalType()) && \"first operand must be a matrix\""
, "/build/llvm-toolchain-snapshot-13~++20210413100635+64c24f493e5f/clang/lib/CodeGen/CGExprScalar.cpp"
, 3206, __PRETTY_FUNCTION__))
3205 isa<ConstantMatrixType>(BO->getLHS()->getType().getCanonicalType()) &&((isa<ConstantMatrixType>(BO->getLHS()->getType()
.getCanonicalType()) && "first operand must be a matrix"
) ? static_cast<void> (0) : __assert_fail ("isa<ConstantMatrixType>(BO->getLHS()->getType().getCanonicalType()) && \"first operand must be a matrix\""
, "/build/llvm-toolchain-snapshot-13~++20210413100635+64c24f493e5f/clang/lib/CodeGen/CGExprScalar.cpp"
, 3206, __PRETTY_FUNCTION__))
3206 "first operand must be a matrix")((isa<ConstantMatrixType>(BO->getLHS()->getType()
.getCanonicalType()) && "first operand must be a matrix"
) ? static_cast<void> (0) : __assert_fail ("isa<ConstantMatrixType>(BO->getLHS()->getType().getCanonicalType()) && \"first operand must be a matrix\""
, "/build/llvm-toolchain-snapshot-13~++20210413100635+64c24f493e5f/clang/lib/CodeGen/CGExprScalar.cpp"
, 3206, __PRETTY_FUNCTION__))
;
3207 assert(BO->getRHS()->getType().getCanonicalType()->isArithmeticType() &&((BO->getRHS()->getType().getCanonicalType()->isArithmeticType
() && "second operand must be an arithmetic type") ? static_cast
<void> (0) : __assert_fail ("BO->getRHS()->getType().getCanonicalType()->isArithmeticType() && \"second operand must be an arithmetic type\""
, "/build/llvm-toolchain-snapshot-13~++20210413100635+64c24f493e5f/clang/lib/CodeGen/CGExprScalar.cpp"
, 3208, __PRETTY_FUNCTION__))
3208 "second operand must be an arithmetic type")((BO->getRHS()->getType().getCanonicalType()->isArithmeticType
() && "second operand must be an arithmetic type") ? static_cast
<void> (0) : __assert_fail ("BO->getRHS()->getType().getCanonicalType()->isArithmeticType() && \"second operand must be an arithmetic type\""
, "/build/llvm-toolchain-snapshot-13~++20210413100635+64c24f493e5f/clang/lib/CodeGen/CGExprScalar.cpp"
, 3208, __PRETTY_FUNCTION__))
;
3209 return MB.CreateScalarDiv(Ops.LHS, Ops.RHS,
3210 Ops.Ty->hasUnsignedIntegerRepresentation());
3211 }
3212
3213 if (Ops.LHS->getType()->isFPOrFPVectorTy()) {
3214 llvm::Value *Val;
3215 CodeGenFunction::CGFPOptionsRAII FPOptsRAII(CGF, Ops.FPFeatures);
3216 Val = Builder.CreateFDiv(Ops.LHS, Ops.RHS, "div");
3217 if (CGF.getLangOpts().OpenCL &&
3218 !CGF.CGM.getCodeGenOpts().CorrectlyRoundedDivSqrt) {
3219 // OpenCL v1.1 s7.4: minimum accuracy of single precision / is 2.5ulp
3220 // OpenCL v1.2 s5.6.4.2: The -cl-fp32-correctly-rounded-divide-sqrt
3221 // build option allows an application to specify that single precision
3222 // floating-point divide (x/y and 1/x) and sqrt used in the program
3223 // source are correctly rounded.
3224 llvm::Type *ValTy = Val->getType();
3225 if (ValTy->isFloatTy() ||
3226 (isa<llvm::VectorType>(ValTy) &&
3227 cast<llvm::VectorType>(ValTy)->getElementType()->isFloatTy()))
3228 CGF.SetFPAccuracy(Val, 2.5);
3229 }
3230 return Val;
3231 }
3232 else if (Ops.isFixedPointOp())
3233 return EmitFixedPointBinOp(Ops);
3234 else if (Ops.Ty->hasUnsignedIntegerRepresentation())
3235 return Builder.CreateUDiv(Ops.LHS, Ops.RHS, "div");
3236 else
3237 return Builder.CreateSDiv(Ops.LHS, Ops.RHS, "div");
3238}
3239
3240Value *ScalarExprEmitter::EmitRem(const BinOpInfo &Ops) {
3241 // Rem in C can't be a floating point type: C99 6.5.5p2.
3242 if ((CGF.SanOpts.has(SanitizerKind::IntegerDivideByZero) ||
3243 CGF.SanOpts.has(SanitizerKind::SignedIntegerOverflow)) &&
3244 Ops.Ty->isIntegerType() &&
3245 (Ops.mayHaveIntegerDivisionByZero() || Ops.mayHaveIntegerOverflow())) {
3246 CodeGenFunction::SanitizerScope SanScope(&CGF);
3247 llvm::Value *Zero = llvm::Constant::getNullValue(ConvertType(Ops.Ty));
3248 EmitUndefinedBehaviorIntegerDivAndRemCheck(Ops, Zero, false);
3249 }
3250
3251 if (Ops.Ty->hasUnsignedIntegerRepresentation())
3252 return Builder.CreateURem(Ops.LHS, Ops.RHS, "rem");
3253 else
3254 return Builder.CreateSRem(Ops.LHS, Ops.RHS, "rem");
3255}
3256
3257Value *ScalarExprEmitter::EmitOverflowCheckedBinOp(const BinOpInfo &Ops) {
3258 unsigned IID;
3259 unsigned OpID = 0;
3260 SanitizerHandler OverflowKind;
3261
3262 bool isSigned = Ops.Ty->isSignedIntegerOrEnumerationType();
3263 switch (Ops.Opcode) {
3264 case BO_Add:
3265 case BO_AddAssign:
3266 OpID = 1;
3267 IID = isSigned ? llvm::Intrinsic::sadd_with_overflow :
3268 llvm::Intrinsic::uadd_with_overflow;
3269 OverflowKind = SanitizerHandler::AddOverflow;
3270 break;
3271 case BO_Sub:
3272 case BO_SubAssign:
3273 OpID = 2;
3274 IID = isSigned ? llvm::Intrinsic::ssub_with_overflow :
3275 llvm::Intrinsic::usub_with_overflow;
3276 OverflowKind = SanitizerHandler::SubOverflow;
3277 break;
3278 case BO_Mul:
3279 case BO_MulAssign:
3280 OpID = 3;
3281 IID = isSigned ? llvm::Intrinsic::smul_with_overflow :
3282 llvm::Intrinsic::umul_with_overflow;
3283 OverflowKind = SanitizerHandler::MulOverflow;
3284 break;
3285 default:
3286 llvm_unreachable("Unsupported operation for overflow detection")::llvm::llvm_unreachable_internal("Unsupported operation for overflow detection"
, "/build/llvm-toolchain-snapshot-13~++20210413100635+64c24f493e5f/clang/lib/CodeGen/CGExprScalar.cpp"
, 3286)
;
3287 }
3288 OpID <<= 1;
3289 if (isSigned)
3290 OpID |= 1;
3291
3292 CodeGenFunction::SanitizerScope SanScope(&CGF);
3293 llvm::Type *opTy = CGF.CGM.getTypes().ConvertType(Ops.Ty);
3294
3295 llvm::Function *intrinsic = CGF.CGM.getIntrinsic(IID, opTy);
3296
3297 Value *resultAndOverflow = Builder.CreateCall(intrinsic, {Ops.LHS, Ops.RHS});
3298 Value *result = Builder.CreateExtractValue(resultAndOverflow, 0);
3299 Value *overflow = Builder.CreateExtractValue(resultAndOverflow, 1);
3300
3301 // Handle overflow with llvm.trap if no custom handler has been specified.
3302 const std::string *handlerName =
3303 &CGF.getLangOpts().OverflowHandler;
3304 if (handlerName->empty()) {
3305 // If the signed-integer-overflow sanitizer is enabled, emit a call to its
3306 // runtime. Otherwise, this is a -ftrapv check, so just emit a trap.
3307 if (!isSigned || CGF.SanOpts.has(SanitizerKind::SignedIntegerOverflow)) {
3308 llvm::Value *NotOverflow = Builder.CreateNot(overflow);
3309 SanitizerMask Kind = isSigned ? SanitizerKind::SignedIntegerOverflow
3310 : SanitizerKind::UnsignedIntegerOverflow;
3311 EmitBinOpCheck(std::make_pair(NotOverflow, Kind), Ops);
3312 } else
3313 CGF.EmitTrapCheck(Builder.CreateNot(overflow), OverflowKind);
3314 return result;
3315 }
3316
3317 // Branch in case of overflow.
3318 llvm::BasicBlock *initialBB = Builder.GetInsertBlock();
3319 llvm::BasicBlock *continueBB =
3320 CGF.createBasicBlock("nooverflow", CGF.CurFn, initialBB->getNextNode());
3321 llvm::BasicBlock *overflowBB = CGF.createBasicBlock("overflow", CGF.CurFn);
3322
3323 Builder.CreateCondBr(overflow, overflowBB, continueBB);
3324
3325 // If an overflow handler is set, then we want to call it and then use its
3326 // result, if it returns.
3327 Builder.SetInsertPoint(overflowBB);
3328
3329 // Get the overflow handler.
3330 llvm::Type *Int8Ty = CGF.Int8Ty;
3331 llvm::Type *argTypes[] = { CGF.Int64Ty, CGF.Int64Ty, Int8Ty, Int8Ty };
3332 llvm::FunctionType *handlerTy =
3333 llvm::FunctionType::get(CGF.Int64Ty, argTypes, true);
3334 llvm::FunctionCallee handler =
3335 CGF.CGM.CreateRuntimeFunction(handlerTy, *handlerName);
3336
3337 // Sign extend the args to 64-bit, so that we can use the same handler for
3338 // all types of overflow.
3339 llvm::Value *lhs = Builder.CreateSExt(Ops.LHS, CGF.Int64Ty);
3340 llvm::Value *rhs = Builder.CreateSExt(Ops.RHS, CGF.Int64Ty);
3341
3342 // Call the handler with the two arguments, the operation, and the size of
3343 // the result.
3344 llvm::Value *handlerArgs[] = {
3345 lhs,
3346 rhs,
3347 Builder.getInt8(OpID),
3348 Builder.getInt8(cast<llvm::IntegerType>(opTy)->getBitWidth())
3349 };
3350 llvm::Value *handlerResult =
3351 CGF.EmitNounwindRuntimeCall(handler, handlerArgs);
3352
3353 // Truncate the result back to the desired size.
3354 handlerResult = Builder.CreateTrunc(handlerResult, opTy);
3355 Builder.CreateBr(continueBB);
3356
3357 Builder.SetInsertPoint(continueBB);
3358 llvm::PHINode *phi = Builder.CreatePHI(opTy, 2);
3359 phi->addIncoming(result, initialBB);
3360 phi->addIncoming(handlerResult, overflowBB);
3361
3362 return phi;
3363}
3364
3365/// Emit pointer + index arithmetic.
3366static Value *emitPointerArithmetic(CodeGenFunction &CGF,
3367 const BinOpInfo &op,
3368 bool isSubtraction) {
3369 // Must have binary (not unary) expr here. Unary pointer
3370 // increment/decrement doesn't use this path.
3371 const BinaryOperator *expr = cast<BinaryOperator>(op.E);
6
Field 'E' is a 'BinaryOperator'
3372
3373 Value *pointer = op.LHS;
3374 Expr *pointerOperand = expr->getLHS();
3375 Value *index = op.RHS;
3376 Expr *indexOperand = expr->getRHS();
3377
3378 // In a subtraction, the LHS is always the pointer.
3379 if (!isSubtraction
6.1
'isSubtraction' is true
6.1
'isSubtraction' is true
6.1
'isSubtraction' is true
6.1
'isSubtraction' is true
6.1
'isSubtraction' is true
&& !pointer->getType()->isPointerTy()) {
3380 std::swap(pointer, index);
3381 std::swap(pointerOperand, indexOperand);
3382 }
3383
3384 bool isSigned = indexOperand->getType()->isSignedIntegerOrEnumerationType();
3385
3386 unsigned width = cast<llvm::IntegerType>(index->getType())->getBitWidth();
7
The object is a 'IntegerType'
3387 auto &DL = CGF.CGM.getDataLayout();
3388 auto PtrTy = cast<llvm::PointerType>(pointer->getType());
8
The object is a 'PointerType'
3389
3390 // Some versions of glibc and gcc use idioms (particularly in their malloc
3391 // routines) that add a pointer-sized integer (known to be a pointer value)
3392 // to a null pointer in order to cast the value back to an integer or as
3393 // part of a pointer alignment algorithm. This is undefined behavior, but
3394 // we'd like to be able to compile programs that use it.
3395 //
3396 // Normally, we'd generate a GEP with a null-pointer base here in response
3397 // to that code, but it's also UB to dereference a pointer created that
3398 // way. Instead (as an acknowledged hack to tolerate the idiom) we will
3399 // generate a direct cast of the integer value to a pointer.
3400 //
3401 // The idiom (p = nullptr + N) is not met if any of the following are true:
3402 //
3403 // The operation is subtraction.
3404 // The index is not pointer-sized.
3405 // The pointer type is not byte-sized.
3406 //
3407 if (BinaryOperator::isNullPointerArithmeticExtension(CGF.getContext(),
9
Assuming the condition is false
10
Taking false branch
3408 op.Opcode,
3409 expr->getLHS(),
3410 expr->getRHS()))
3411 return CGF.Builder.CreateIntToPtr(index, pointer->getType());
3412
3413 if (width != DL.getIndexTypeSizeInBits(PtrTy)) {
11
Assuming the condition is false
12
Taking false branch
3414 // Zero-extend or sign-extend the pointer value according to
3415 // whether the index is signed or not.
3416 index = CGF.Builder.CreateIntCast(index, DL.getIndexType(PtrTy), isSigned,
3417 "idx.ext");
3418 }
3419
3420 // If this is subtraction, negate the index.
3421 if (isSubtraction
12.1
'isSubtraction' is true
12.1
'isSubtraction' is true
12.1
'isSubtraction' is true
12.1
'isSubtraction' is true
12.1
'isSubtraction' is true
)
13
Taking true branch
3422 index = CGF.Builder.CreateNeg(index, "idx.neg");
3423
3424 if (CGF.SanOpts.has(SanitizerKind::ArrayBounds))
14
Assuming the condition is false
15
Taking false branch
3425 CGF.EmitBoundsCheck(op.E, pointerOperand, index, indexOperand->getType(),
3426 /*Accessed*/ false);
3427
3428 const PointerType *pointerType
3429 = pointerOperand->getType()->getAs<PointerType>();
16
Assuming the object is a 'PointerType'
3430 if (!pointerType) {
17
Assuming 'pointerType' is non-null
18
Taking false branch
3431 QualType objectType = pointerOperand->getType()
3432 ->castAs<ObjCObjectPointerType>()
3433 ->getPointeeType();
3434 llvm::Value *objectSize
3435 = CGF.CGM.getSize(CGF.getContext().getTypeSizeInChars(objectType));
3436
3437 index = CGF.Builder.CreateMul(index, objectSize);
3438
3439 Value *result = CGF.Builder.CreateBitCast(pointer, CGF.VoidPtrTy);
3440 result = CGF.Builder.CreateGEP(result, index, "add.ptr");
3441 return CGF.Builder.CreateBitCast(result, pointer->getType());
3442 }
3443
3444 QualType elementType = pointerType->getPointeeType();
3445 if (const VariableArrayType *vla
18.1
'vla' is non-null
18.1
'vla' is non-null
18.1
'vla' is non-null
18.1
'vla' is non-null
18.1
'vla' is non-null
19
Taking true branch
3446 = CGF.getContext().getAsVariableArrayType(elementType)) { 3447 // The element count here is the total number of non-VLA elements. 3448 llvm::Value *numElements = CGF.getVLASize(vla).NumElts; 3449 3450 // Effectively, the multiply by the VLA size is part of the GEP. 3451 // GEP indexes are signed, and scaling an index isn't permitted to 3452 // signed-overflow, so we use the same semantics for our explicit 3453 // multiply. We suppress this if overflow is not undefined behavior. 3454 if (CGF.getLangOpts().isSignedOverflowDefined()) {
20
Calling 'LangOptions::isSignedOverflowDefined'
23
Returning from 'LangOptions::isSignedOverflowDefined'
24
Taking false branch
3455 index = CGF.Builder.CreateMul(index, numElements, "vla.index"); 3456 pointer = CGF.Builder.CreateGEP(pointer, index, "add.ptr"); 3457 } else { 3458 index = CGF.Builder.CreateNSWMul(index, numElements, "vla.index"); 3459 pointer = 3460 CGF.EmitCheckedInBoundsGEP(pointer, index, isSigned, isSubtraction,
25
Calling 'CodeGenFunction::EmitCheckedInBoundsGEP'
3461 op.E->getExprLoc(), "add.ptr"); 3462 } 3463 return pointer; 3464 } 3465 3466 // Explicitly handle GNU void* and function pointer arithmetic extensions. The 3467 // GNU void* casts amount to no-ops since our void* type is i8*, but this is 3468 // future proof. 3469 if (elementType->isVoidType() || elementType->isFunctionType()) { 3470 Value *result = CGF.EmitCastToVoidPtr(pointer); 3471 result = CGF.Builder.CreateGEP(result, index, "add.ptr"); 3472 return CGF.Builder.CreateBitCast(result, pointer->getType()); 3473 } 3474 3475 if (CGF.getLangOpts().isSignedOverflowDefined()) 3476 return CGF.Builder.CreateGEP(pointer, index, "add.ptr"); 3477 3478 return CGF.EmitCheckedInBoundsGEP(pointer, index, isSigned, isSubtraction, 3479 op.E->getExprLoc(), "add.ptr"); 3480} 3481 3482// Construct an fmuladd intrinsic to represent a fused mul-add of MulOp and 3483// Addend. Use negMul and negAdd to negate the first operand of the Mul or 3484// the add operand respectively. This allows fmuladd to represent a*b-c, or 3485// c-a*b. Patterns in LLVM should catch the negated forms and translate them to 3486// efficient operations. 3487static Value* buildFMulAdd(llvm::Instruction *MulOp, Value *Addend, 3488 const CodeGenFunction &CGF, CGBuilderTy &Builder, 3489 bool negMul, bool negAdd) { 3490 assert(!(negMul && negAdd) && "Only one of negMul and negAdd should be set.")((!(negMul && negAdd) && "Only one of negMul and negAdd should be set."
) ? static_cast<void> (0) : __assert_fail ("!(negMul && negAdd) && \"Only one of negMul and negAdd should be set.\""
, "/build/llvm-toolchain-snapshot-13~++20210413100635+64c24f493e5f/clang/lib/CodeGen/CGExprScalar.cpp"
, 3490, __PRETTY_FUNCTION__))
; 3491 3492 Value *MulOp0 = MulOp->getOperand(0); 3493 Value *MulOp1 = MulOp->getOperand(1); 3494 if (negMul) 3495 MulOp0 = Builder.CreateFNeg(MulOp0, "neg"); 3496 if (negAdd) 3497 Addend = Builder.CreateFNeg(Addend, "neg"); 3498 3499 Value *FMulAdd = nullptr; 3500 if (Builder.getIsFPConstrained()) { 3501 assert(isa<llvm::ConstrainedFPIntrinsic>(MulOp) &&((isa<llvm::ConstrainedFPIntrinsic>(MulOp) && "Only constrained operation should be created when Builder is in FP "
"constrained mode") ? static_cast<void> (0) : __assert_fail
("isa<llvm::ConstrainedFPIntrinsic>(MulOp) && \"Only constrained operation should be created when Builder is in FP \" \"constrained mode\""
, "/build/llvm-toolchain-snapshot-13~++20210413100635+64c24f493e5f/clang/lib/CodeGen/CGExprScalar.cpp"
, 3503, __PRETTY_FUNCTION__))
3502 "Only constrained operation should be created when Builder is in FP "((isa<llvm::ConstrainedFPIntrinsic>(MulOp) && "Only constrained operation should be created when Builder is in FP "
"constrained mode") ? static_cast<void> (0) : __assert_fail
("isa<llvm::ConstrainedFPIntrinsic>(MulOp) && \"Only constrained operation should be created when Builder is in FP \" \"constrained mode\""
, "/build/llvm-toolchain-snapshot-13~++20210413100635+64c24f493e5f/clang/lib/CodeGen/CGExprScalar.cpp"
, 3503, __PRETTY_FUNCTION__))
3503 "constrained mode")((isa<llvm::ConstrainedFPIntrinsic>(MulOp) && "Only constrained operation should be created when Builder is in FP "
"constrained mode") ? static_cast<void> (0) : __assert_fail
("isa<llvm::ConstrainedFPIntrinsic>(MulOp) && \"Only constrained operation should be created when Builder is in FP \" \"constrained mode\""
, "/build/llvm-toolchain-snapshot-13~++20210413100635+64c24f493e5f/clang/lib/CodeGen/CGExprScalar.cpp"
, 3503, __PRETTY_FUNCTION__))
; 3504 FMulAdd = Builder.CreateConstrainedFPCall( 3505 CGF.CGM.getIntrinsic(llvm::Intrinsic::experimental_constrained_fmuladd, 3506 Addend->getType()), 3507 {MulOp0, MulOp1, Addend}); 3508 } else { 3509 FMulAdd = Builder.CreateCall( 3510 CGF.CGM.getIntrinsic(llvm::Intrinsic::fmuladd, Addend->getType()), 3511 {MulOp0, MulOp1, Addend}); 3512 } 3513 MulOp->eraseFromParent(); 3514 3515 return FMulAdd; 3516} 3517 3518// Check whether it would be legal to emit an fmuladd intrinsic call to 3519// represent op and if so, build the fmuladd. 3520// 3521// Checks that (a) the operation is fusable, and (b) -ffp-contract=on. 3522// Does NOT check the type of the operation - it's assumed that this function 3523// will be called from contexts where it's known that the type is contractable. 3524static Value* tryEmitFMulAdd(const BinOpInfo &op, 3525 const CodeGenFunction &CGF, CGBuilderTy &Builder, 3526 bool isSub=false) { 3527 3528 assert((op.Opcode == BO_Add || op.Opcode == BO_AddAssign ||(((op.Opcode == BO_Add || op.Opcode == BO_AddAssign || op.Opcode
== BO_Sub || op.Opcode == BO_SubAssign) && "Only fadd/fsub can be the root of an fmuladd."
) ? static_cast<void> (0) : __assert_fail ("(op.Opcode == BO_Add || op.Opcode == BO_AddAssign || op.Opcode == BO_Sub || op.Opcode == BO_SubAssign) && \"Only fadd/fsub can be the root of an fmuladd.\""
, "/build/llvm-toolchain-snapshot-13~++20210413100635+64c24f493e5f/clang/lib/CodeGen/CGExprScalar.cpp"
, 3530, __PRETTY_FUNCTION__))
3529 op.Opcode == BO_Sub || op.Opcode == BO_SubAssign) &&(((op.Opcode == BO_Add || op.Opcode == BO_AddAssign || op.Opcode
== BO_Sub || op.Opcode == BO_SubAssign) && "Only fadd/fsub can be the root of an fmuladd."
) ? static_cast<void> (0) : __assert_fail ("(op.Opcode == BO_Add || op.Opcode == BO_AddAssign || op.Opcode == BO_Sub || op.Opcode == BO_SubAssign) && \"Only fadd/fsub can be the root of an fmuladd.\""
, "/build/llvm-toolchain-snapshot-13~++20210413100635+64c24f493e5f/clang/lib/CodeGen/CGExprScalar.cpp"
, 3530, __PRETTY_FUNCTION__))
3530 "Only fadd/fsub can be the root of an fmuladd.")(((op.Opcode == BO_Add || op.Opcode == BO_AddAssign || op.Opcode
== BO_Sub || op.Opcode == BO_SubAssign) && "Only fadd/fsub can be the root of an fmuladd."
) ? static_cast<void> (0) : __assert_fail ("(op.Opcode == BO_Add || op.Opcode == BO_AddAssign || op.Opcode == BO_Sub || op.Opcode == BO_SubAssign) && \"Only fadd/fsub can be the root of an fmuladd.\""
, "/build/llvm-toolchain-snapshot-13~++20210413100635+64c24f493e5f/clang/lib/CodeGen/CGExprScalar.cpp"
, 3530, __PRETTY_FUNCTION__))
; 3531 3532 // Check whether this op is marked as fusable. 3533 if (!op.FPFeatures.allowFPContractWithinStatement()) 3534 return nullptr; 3535 3536 // We have a potentially fusable op. Look for a mul on one of the operands. 3537 // Also, make sure that the mul result isn't used directly. In that case, 3538 // there's no point creating a muladd operation. 3539 if (auto *LHSBinOp = dyn_cast<llvm::BinaryOperator>(op.LHS)) { 3540 if (LHSBinOp->getOpcode() == llvm::Instruction::FMul && 3541 LHSBinOp->use_empty()) 3542 return buildFMulAdd(LHSBinOp, op.RHS, CGF, Builder, false, isSub); 3543 } 3544 if (auto *RHSBinOp = dyn_cast<llvm::BinaryOperator>(op.RHS)) { 3545 if (RHSBinOp->getOpcode() == llvm::Instruction::FMul && 3546 RHSBinOp->use_empty()) 3547 return buildFMulAdd(RHSBinOp, op.LHS, CGF, Builder, isSub, false); 3548 } 3549 3550 if (auto *LHSBinOp = dyn_cast<llvm::CallBase>(op.LHS)) { 3551 if (LHSBinOp->getIntrinsicID() == 3552 llvm::Intrinsic::experimental_constrained_fmul && 3553 LHSBinOp->use_empty()) 3554 return buildFMulAdd(LHSBinOp, op.RHS, CGF, Builder, false, isSub); 3555 } 3556 if (auto *RHSBinOp = dyn_cast<llvm::CallBase>(op.RHS)) { 3557 if (RHSBinOp->getIntrinsicID() == 3558 llvm::Intrinsic::experimental_constrained_fmul && 3559 RHSBinOp->use_empty()) 3560 return buildFMulAdd(RHSBinOp, op.LHS, CGF, Builder, isSub, false); 3561 } 3562 3563 return nullptr; 3564} 3565 3566Value *ScalarExprEmitter::EmitAdd(const BinOpInfo &op) { 3567 if (op.LHS->getType()->isPointerTy() || 3568 op.RHS->getType()->isPointerTy()) 3569 return emitPointerArithmetic(CGF, op, CodeGenFunction::NotSubtraction); 3570 3571 if (op.Ty->isSignedIntegerOrEnumerationType()) { 3572 switch (CGF.getLangOpts().getSignedOverflowBehavior()) { 3573 case LangOptions::SOB_Defined: 3574 return Builder.CreateAdd(op.LHS, op.RHS, "add"); 3575 case LangOptions::SOB_Undefined: 3576 if (!CGF.SanOpts.has(SanitizerKind::SignedIntegerOverflow)) 3577 return Builder.CreateNSWAdd(op.LHS, op.RHS, "add"); 3578 LLVM_FALLTHROUGH[[gnu::fallthrough]]; 3579 case LangOptions::SOB_Trapping: 3580 if (CanElideOverflowCheck(CGF.getContext(), op)) 3581 return Builder.CreateNSWAdd(op.LHS, op.RHS, "add"); 3582 return EmitOverflowCheckedBinOp(op); 3583 } 3584 } 3585 3586 if (op.Ty->isConstantMatrixType()) { 3587 llvm::MatrixBuilder<CGBuilderTy> MB(Builder); 3588 return MB.CreateAdd(op.LHS, op.RHS); 3589 } 3590 3591 if (op.Ty->isUnsignedIntegerType() && 3592 CGF.SanOpts.has(SanitizerKind::UnsignedIntegerOverflow) && 3593 !CanElideOverflowCheck(CGF.getContext(), op)) 3594 return EmitOverflowCheckedBinOp(op); 3595 3596 if (op.LHS->getType()->isFPOrFPVectorTy()) { 3597 CodeGenFunction::CGFPOptionsRAII FPOptsRAII(CGF, op.FPFeatures); 3598 // Try to form an fmuladd. 3599 if (Value *FMulAdd = tryEmitFMulAdd(op, CGF, Builder)) 3600 return FMulAdd; 3601 3602 return Builder.CreateFAdd(op.LHS, op.RHS, "add"); 3603 } 3604 3605 if (op.isFixedPointOp()) 3606 return EmitFixedPointBinOp(op); 3607 3608 return Builder.CreateAdd(op.LHS, op.RHS, "add"); 3609} 3610 3611/// The resulting value must be calculated with exact precision, so the operands 3612/// may not be the same type. 3613Value *ScalarExprEmitter::EmitFixedPointBinOp(const BinOpInfo &op) { 3614 using llvm::APSInt; 3615 using llvm::ConstantInt; 3616 3617 // This is either a binary operation where at least one of the operands is 3618 // a fixed-point type, or a unary operation where the operand is a fixed-point 3619 // type. The result type of a binary operation is determined by 3620 // Sema::handleFixedPointConversions(). 3621 QualType ResultTy = op.Ty; 3622 QualType LHSTy, RHSTy; 3623 if (const auto *BinOp = dyn_cast<BinaryOperator>(op.E)) { 3624 RHSTy = BinOp->getRHS()->getType(); 3625 if (const auto *CAO = dyn_cast<CompoundAssignOperator>(BinOp)) { 3626 // For compound assignment, the effective type of the LHS at this point 3627 // is the computation LHS type, not the actual LHS type, and the final 3628 // result type is not the type of the expression but rather the 3629 // computation result type. 3630 LHSTy = CAO->getComputationLHSType(); 3631 ResultTy = CAO->getComputationResultType(); 3632 } else 3633 LHSTy = BinOp->getLHS()->getType(); 3634 } else if (const auto *UnOp = dyn_cast<UnaryOperator>(op.E)) { 3635 LHSTy = UnOp->getSubExpr()->getType(); 3636 RHSTy = UnOp->getSubExpr()->getType(); 3637 } 3638 ASTContext &Ctx = CGF.getContext(); 3639 Value *LHS = op.LHS; 3640 Value *RHS = op.RHS; 3641 3642 auto LHSFixedSema = Ctx.getFixedPointSemantics(LHSTy); 3643 auto RHSFixedSema = Ctx.getFixedPointSemantics(RHSTy); 3644 auto ResultFixedSema = Ctx.getFixedPointSemantics(ResultTy); 3645 auto CommonFixedSema = LHSFixedSema.getCommonSemantics(RHSFixedSema); 3646 3647 // Perform the actual operation. 3648 Value *Result; 3649 llvm::FixedPointBuilder<CGBuilderTy> FPBuilder(Builder); 3650 switch (op.Opcode) { 3651 case BO_AddAssign: 3652 case BO_Add: 3653 Result = FPBuilder.CreateAdd(LHS, LHSFixedSema, RHS, RHSFixedSema); 3654 break; 3655 case BO_SubAssign: 3656 case BO_Sub: 3657 Result = FPBuilder.CreateSub(LHS, LHSFixedSema, RHS, RHSFixedSema); 3658 break; 3659 case BO_MulAssign: 3660 case BO_Mul: 3661 Result = FPBuilder.CreateMul(LHS, LHSFixedSema, RHS, RHSFixedSema); 3662 break; 3663 case BO_DivAssign: 3664 case BO_Div: 3665 Result = FPBuilder.CreateDiv(LHS, LHSFixedSema, RHS, RHSFixedSema); 3666 break; 3667 case BO_ShlAssign: 3668 case BO_Shl: 3669 Result = FPBuilder.CreateShl(LHS, LHSFixedSema, RHS); 3670 break; 3671 case BO_ShrAssign: 3672 case BO_Shr: 3673 Result = FPBuilder.CreateShr(LHS, LHSFixedSema, RHS); 3674 break; 3675 case BO_LT: 3676 return FPBuilder.CreateLT(LHS, LHSFixedSema, RHS, RHSFixedSema); 3677 case BO_GT: 3678 return FPBuilder.CreateGT(LHS, LHSFixedSema, RHS, RHSFixedSema); 3679 case BO_LE: 3680 return FPBuilder.CreateLE(LHS, LHSFixedSema, RHS, RHSFixedSema); 3681 case BO_GE: 3682 return FPBuilder.CreateGE(LHS, LHSFixedSema, RHS, RHSFixedSema); 3683 case BO_EQ: 3684 // For equality operations, we assume any padding bits on unsigned types are 3685 // zero'd out. They could be overwritten through non-saturating operations 3686 // that cause overflow, but this leads to undefined behavior. 3687 return FPBuilder.CreateEQ(LHS, LHSFixedSema, RHS, RHSFixedSema); 3688 case BO_NE: 3689 return FPBuilder.CreateNE(LHS, LHSFixedSema, RHS, RHSFixedSema); 3690 case BO_Cmp: 3691 case BO_LAnd: 3692 case BO_LOr: 3693 llvm_unreachable("Found unimplemented fixed point binary operation")::llvm::llvm_unreachable_internal("Found unimplemented fixed point binary operation"
, "/build/llvm-toolchain-snapshot-13~++20210413100635+64c24f493e5f/clang/lib/CodeGen/CGExprScalar.cpp"
, 3693)
; 3694 case BO_PtrMemD: 3695 case BO_PtrMemI: 3696 case BO_Rem: 3697 case BO_Xor: 3698 case BO_And: 3699 case BO_Or: 3700 case BO_Assign: 3701 case BO_RemAssign: 3702 case BO_AndAssign: 3703 case BO_XorAssign: 3704 case BO_OrAssign: 3705 case BO_Comma: 3706 llvm_unreachable("Found unsupported binary operation for fixed point types.")::llvm::llvm_unreachable_internal("Found unsupported binary operation for fixed point types."
, "/build/llvm-toolchain-snapshot-13~++20210413100635+64c24f493e5f/clang/lib/CodeGen/CGExprScalar.cpp"
, 3706)
; 3707 } 3708 3709 bool IsShift = BinaryOperator::isShiftOp(op.Opcode) || 3710 BinaryOperator::isShiftAssignOp(op.Opcode); 3711 // Convert to the result type. 3712 return FPBuilder.CreateFixedToFixed(Result, IsShift ? LHSFixedSema 3713 : CommonFixedSema, 3714 ResultFixedSema); 3715} 3716 3717Value *ScalarExprEmitter::EmitSub(const BinOpInfo &op) { 3718 // The LHS is always a pointer if either side is. 3719 if (!op.LHS->getType()->isPointerTy()) {
3
Taking false branch
3720 if (op.Ty->isSignedIntegerOrEnumerationType()) { 3721 switch (CGF.getLangOpts().getSignedOverflowBehavior()) { 3722 case LangOptions::SOB_Defined: 3723 return Builder.CreateSub(op.LHS, op.RHS, "sub"); 3724 case LangOptions::SOB_Undefined: 3725 if (!CGF.SanOpts.has(SanitizerKind::SignedIntegerOverflow)) 3726 return Builder.CreateNSWSub(op.LHS, op.RHS, "sub"); 3727 LLVM_FALLTHROUGH[[gnu::fallthrough]]; 3728 case LangOptions::SOB_Trapping: 3729 if (CanElideOverflowCheck(CGF.getContext(), op)) 3730 return Builder.CreateNSWSub(op.LHS, op.RHS, "sub"); 3731 return EmitOverflowCheckedBinOp(op); 3732 } 3733 } 3734 3735 if (op.Ty->isConstantMatrixType()) { 3736 llvm::MatrixBuilder<CGBuilderTy> MB(Builder); 3737 return MB.CreateSub(op.LHS, op.RHS); 3738 } 3739 3740 if (op.Ty->isUnsignedIntegerType() && 3741 CGF.SanOpts.has(SanitizerKind::UnsignedIntegerOverflow) && 3742 !CanElideOverflowCheck(CGF.getContext(), op)) 3743 return EmitOverflowCheckedBinOp(op); 3744 3745 if (op.LHS->getType()->isFPOrFPVectorTy()) { 3746 CodeGenFunction::CGFPOptionsRAII FPOptsRAII(CGF, op.FPFeatures); 3747 // Try to form an fmuladd. 3748 if (Value *FMulAdd = tryEmitFMulAdd(op, CGF, Builder, true)) 3749 return FMulAdd; 3750 return Builder.CreateFSub(op.LHS, op.RHS, "sub"); 3751 } 3752 3753 if (op.isFixedPointOp()) 3754 return EmitFixedPointBinOp(op); 3755 3756 return Builder.CreateSub(op.LHS, op.RHS, "sub"); 3757 } 3758 3759 // If the RHS is not a pointer, then we have normal pointer 3760 // arithmetic. 3761 if (!op.RHS->getType()->isPointerTy())
4
Taking true branch
3762 return emitPointerArithmetic(CGF, op, CodeGenFunction::IsSubtraction);
5
Calling 'emitPointerArithmetic'
3763 3764 // Otherwise, this is a pointer subtraction. 3765 3766 // Do the raw subtraction part. 3767 llvm::Value *LHS 3768 = Builder.CreatePtrToInt(op.LHS, CGF.PtrDiffTy, "sub.ptr.lhs.cast"); 3769 llvm::Value *RHS 3770 = Builder.CreatePtrToInt(op.RHS, CGF.PtrDiffTy, "sub.ptr.rhs.cast"); 3771 Value *diffInChars = Builder.CreateSub(LHS, RHS, "sub.ptr.sub"); 3772 3773 // Okay, figure out the element size. 3774 const BinaryOperator *expr = cast<BinaryOperator>(op.E); 3775 QualType elementType = expr->getLHS()->getType()->getPointeeType(); 3776 3777 llvm::Value *divisor = nullptr; 3778 3779 // For a variable-length array, this is going to be non-constant. 3780 if (const VariableArrayType *vla 3781 = CGF.getContext().getAsVariableArrayType(elementType)) { 3782 auto VlaSize = CGF.getVLASize(vla); 3783 elementType = VlaSize.Type; 3784 divisor = VlaSize.NumElts; 3785 3786 // Scale the number of non-VLA elements by the non-VLA element size. 3787 CharUnits eltSize = CGF.getContext().getTypeSizeInChars(elementType); 3788 if (!eltSize.isOne()) 3789 divisor = CGF.Builder.CreateNUWMul(CGF.CGM.getSize(eltSize), divisor); 3790 3791 // For everything elese, we can just compute it, safe in the 3792 // assumption that Sema won't let anything through that we can't 3793 // safely compute the size of. 3794 } else { 3795 CharUnits elementSize; 3796 // Handle GCC extension for pointer arithmetic on void* and 3797 // function pointer types. 3798 if (elementType->isVoidType() || elementType->isFunctionType()) 3799 elementSize = CharUnits::One(); 3800 else 3801 elementSize = CGF.getContext().getTypeSizeInChars(elementType); 3802 3803 // Don't even emit the divide for element size of 1. 3804 if (elementSize.isOne()) 3805 return diffInChars; 3806 3807 divisor = CGF.CGM.getSize(elementSize); 3808 } 3809 3810 // Otherwise, do a full sdiv. This uses the "exact" form of sdiv, since 3811 // pointer difference in C is only defined in the case where both operands 3812 // are pointing to elements of an array. 3813 return Builder.CreateExactSDiv(diffInChars, divisor, "sub.ptr.div"); 3814} 3815 3816Value *ScalarExprEmitter::GetWidthMinusOneValue(Value* LHS,Value* RHS) { 3817 llvm::IntegerType *Ty; 3818 if (llvm::VectorType *VT = dyn_cast<llvm::VectorType>(LHS->getType())) 3819 Ty = cast<llvm::IntegerType>(VT->getElementType()); 3820 else 3821 Ty = cast<llvm::IntegerType>(LHS->getType()); 3822 return llvm::ConstantInt::get(RHS->getType(), Ty->getBitWidth() - 1); 3823} 3824 3825Value *ScalarExprEmitter::ConstrainShiftValue(Value *LHS, Value *RHS, 3826 const Twine &Name) { 3827 llvm::IntegerType *Ty; 3828 if (auto *VT = dyn_cast<llvm::VectorType>(LHS->getType())) 3829 Ty = cast<llvm::IntegerType>(VT->getElementType()); 3830 else 3831 Ty = cast<llvm::IntegerType>(LHS->getType()); 3832 3833 if (llvm::isPowerOf2_64(Ty->getBitWidth())) 3834 return Builder.CreateAnd(RHS, GetWidthMinusOneValue(LHS, RHS), Name); 3835 3836 return Builder.CreateURem( 3837 RHS, llvm::ConstantInt::get(RHS->getType(), Ty->getBitWidth()), Name); 3838} 3839 3840Value *ScalarExprEmitter::EmitShl(const BinOpInfo &Ops) { 3841 // TODO: This misses out on the sanitizer check below. 3842 if (Ops.isFixedPointOp()) 3843 return EmitFixedPointBinOp(Ops); 3844 3845 // LLVM requires the LHS and RHS to be the same type: promote or truncate the 3846 // RHS to the same size as the LHS. 3847 Value *RHS = Ops.RHS; 3848 if (Ops.LHS->getType() != RHS->getType()) 3849 RHS = Builder.CreateIntCast(RHS, Ops.LHS->getType(), false, "sh_prom"); 3850 3851 bool SanitizeSignedBase = CGF.SanOpts.has(SanitizerKind::ShiftBase) && 3852 Ops.Ty->hasSignedIntegerRepresentation() && 3853 !CGF.getLangOpts().isSignedOverflowDefined() && 3854 !CGF.getLangOpts().CPlusPlus20; 3855 bool SanitizeUnsignedBase = 3856 CGF.SanOpts.has(SanitizerKind::UnsignedShiftBase) && 3857 Ops.Ty->hasUnsignedIntegerRepresentation(); 3858 bool SanitizeBase = SanitizeSignedBase || SanitizeUnsignedBase; 3859 bool SanitizeExponent = CGF.SanOpts.has(SanitizerKind::ShiftExponent); 3860 // OpenCL 6.3j: shift values are effectively % word size of LHS. 3861 if (CGF.getLangOpts().OpenCL) 3862 RHS = ConstrainShiftValue(Ops.LHS, RHS, "shl.mask"); 3863 else if ((SanitizeBase || SanitizeExponent) && 3864 isa<llvm::IntegerType>(Ops.LHS->getType())) { 3865 CodeGenFunction::SanitizerScope SanScope(&CGF); 3866 SmallVector<std::pair<Value *, SanitizerMask>, 2> Checks; 3867 llvm::Value *WidthMinusOne = GetWidthMinusOneValue(Ops.LHS, Ops.RHS); 3868 llvm::Value *ValidExponent = Builder.CreateICmpULE(Ops.RHS, WidthMinusOne); 3869 3870 if (SanitizeExponent) { 3871 Checks.push_back( 3872 std::make_pair(ValidExponent, SanitizerKind::ShiftExponent)); 3873 } 3874 3875 if (SanitizeBase) { 3876 // Check whether we are shifting any non-zero bits off the top of the 3877 // integer. We only emit this check if exponent is valid - otherwise 3878 // instructions below will have undefined behavior themselves. 3879 llvm::BasicBlock *Orig = Builder.GetInsertBlock(); 3880 llvm::BasicBlock *Cont = CGF.createBasicBlock("cont"); 3881 llvm::BasicBlock *CheckShiftBase = CGF.createBasicBlock("check"); 3882 Builder.CreateCondBr(ValidExponent, CheckShiftBase, Cont); 3883 llvm::Value *PromotedWidthMinusOne = 3884 (RHS == Ops.RHS) ? WidthMinusOne 3885 : GetWidthMinusOneValue(Ops.LHS, RHS); 3886 CGF.EmitBlock(CheckShiftBase); 3887 llvm::Value *BitsShiftedOff = Builder.CreateLShr( 3888 Ops.LHS, Builder.CreateSub(PromotedWidthMinusOne, RHS, "shl.zeros", 3889 /*NUW*/ true, /*NSW*/ true), 3890 "shl.check"); 3891 if (SanitizeUnsignedBase || CGF.getLangOpts().CPlusPlus) { 3892 // In C99, we are not permitted to shift a 1 bit into the sign bit. 3893 // Under C++11's rules, shifting a 1 bit into the sign bit is 3894 // OK, but shifting a 1 bit out of it is not. (C89 and C++03 don't 3895 // define signed left shifts, so we use the C99 and C++11 rules there). 3896 // Unsigned shifts can always shift into the top bit. 3897 llvm::Value *One = llvm::ConstantInt::get(BitsShiftedOff->getType(), 1); 3898 BitsShiftedOff = Builder.CreateLShr(BitsShiftedOff, One); 3899 } 3900 llvm::Value *Zero = llvm::ConstantInt::get(BitsShiftedOff->getType(), 0); 3901 llvm::Value *ValidBase = Builder.CreateICmpEQ(BitsShiftedOff, Zero); 3902 CGF.EmitBlock(Cont); 3903 llvm::PHINode *BaseCheck = Builder.CreatePHI(ValidBase->getType(), 2); 3904 BaseCheck->addIncoming(Builder.getTrue(), Orig); 3905 BaseCheck->addIncoming(ValidBase, CheckShiftBase); 3906 Checks.push_back(std::make_pair( 3907 BaseCheck, SanitizeSignedBase ? SanitizerKind::ShiftBase 3908 : SanitizerKind::UnsignedShiftBase)); 3909 } 3910 3911 assert(!Checks.empty())((!Checks.empty()) ? static_cast<void> (0) : __assert_fail
("!Checks.empty()", "/build/llvm-toolchain-snapshot-13~++20210413100635+64c24f493e5f/clang/lib/CodeGen/CGExprScalar.cpp"
, 3911, __PRETTY_FUNCTION__))
; 3912 EmitBinOpCheck(Checks, Ops); 3913 } 3914 3915 return Builder.CreateShl(Ops.LHS, RHS, "shl"); 3916} 3917 3918Value *ScalarExprEmitter::EmitShr(const BinOpInfo &Ops) { 3919 // TODO: This misses out on the sanitizer check below. 3920 if (Ops.isFixedPointOp()) 3921 return EmitFixedPointBinOp(Ops); 3922 3923 // LLVM requires the LHS and RHS to be the same type: promote or truncate the 3924 // RHS to the same size as the LHS. 3925 Value *RHS = Ops.RHS; 3926 if (Ops.LHS->getType() != RHS->getType()) 3927 RHS = Builder.CreateIntCast(RHS, Ops.LHS->getType(), false, "sh_prom"); 3928 3929 // OpenCL 6.3j: shift values are effectively % word size of LHS. 3930 if (CGF.getLangOpts().OpenCL) 3931 RHS = ConstrainShiftValue(Ops.LHS, RHS, "shr.mask"); 3932 else if (CGF.SanOpts.has(SanitizerKind::ShiftExponent) && 3933 isa<llvm::IntegerType>(Ops.LHS->getType())) { 3934 CodeGenFunction::SanitizerScope SanScope(&CGF); 3935 llvm::Value *Valid = 3936 Builder.CreateICmpULE(RHS, GetWidthMinusOneValue(Ops.LHS, RHS)); 3937 EmitBinOpCheck(std::make_pair(Valid, SanitizerKind::ShiftExponent), Ops); 3938 } 3939 3940 if (Ops.Ty->hasUnsignedIntegerRepresentation()) 3941 return Builder.CreateLShr(Ops.LHS, RHS, "shr"); 3942 return Builder.CreateAShr(Ops.LHS, RHS, "shr"); 3943} 3944 3945enum IntrinsicType { VCMPEQ, VCMPGT }; 3946// return corresponding comparison intrinsic for given vector type 3947static llvm::Intrinsic::ID GetIntrinsic(IntrinsicType IT, 3948 BuiltinType::Kind ElemKind) { 3949 switch (ElemKind) { 3950 default: llvm_unreachable("unexpected element type")::llvm::llvm_unreachable_internal("unexpected element type", "/build/llvm-toolchain-snapshot-13~++20210413100635+64c24f493e5f/clang/lib/CodeGen/CGExprScalar.cpp"
, 3950)
; 3951 case BuiltinType::Char_U: 3952 case BuiltinType::UChar: 3953 return (IT == VCMPEQ) ? llvm::Intrinsic::ppc_altivec_vcmpequb_p : 3954 llvm::Intrinsic::ppc_altivec_vcmpgtub_p; 3955 case BuiltinType::Char_S: 3956 case BuiltinType::SChar: 3957 return (IT == VCMPEQ) ? llvm::Intrinsic::ppc_altivec_vcmpequb_p : 3958 llvm::Intrinsic::ppc_altivec_vcmpgtsb_p; 3959 case BuiltinType::UShort: 3960 return (IT == VCMPEQ) ? llvm::Intrinsic::ppc_altivec_vcmpequh_p : 3961 llvm::Intrinsic::ppc_altivec_vcmpgtuh_p; 3962 case BuiltinType::Short: 3963 return (IT == VCMPEQ) ? llvm::Intrinsic::ppc_altivec_vcmpequh_p : 3964 llvm::Intrinsic::ppc_altivec_vcmpgtsh_p; 3965 case BuiltinType::UInt: 3966 return (IT == VCMPEQ) ? llvm::Intrinsic::ppc_altivec_vcmpequw_p : 3967 llvm::Intrinsic::ppc_altivec_vcmpgtuw_p; 3968 case BuiltinType::Int: 3969 return (IT == VCMPEQ) ? llvm::Intrinsic::ppc_altivec_vcmpequw_p : 3970 llvm::Intrinsic::ppc_altivec_vcmpgtsw_p; 3971 case BuiltinType::ULong: 3972 case BuiltinType::ULongLong: 3973 return (IT == VCMPEQ) ? llvm::Intrinsic::ppc_altivec_vcmpequd_p : 3974 llvm::Intrinsic::ppc_altivec_vcmpgtud_p; 3975 case BuiltinType::Long: 3976 case BuiltinType::LongLong: 3977 return (IT == VCMPEQ) ? llvm::Intrinsic::ppc_altivec_vcmpequd_p : 3978 llvm::Intrinsic::ppc_altivec_vcmpgtsd_p; 3979 case BuiltinType::Float: 3980 return (IT == VCMPEQ) ? llvm::Intrinsic::ppc_altivec_vcmpeqfp_p : 3981 llvm::Intrinsic::ppc_altivec_vcmpgtfp_p; 3982 case BuiltinType::Double: 3983 return (IT == VCMPEQ) ? llvm::Intrinsic::ppc_vsx_xvcmpeqdp_p : 3984 llvm::Intrinsic::ppc_vsx_xvcmpgtdp_p; 3985 case BuiltinType::UInt128: 3986 return (IT == VCMPEQ) ? llvm::Intrinsic::ppc_altivec_vcmpequq_p 3987 : llvm::Intrinsic::ppc_altivec_vcmpgtuq_p; 3988 case BuiltinType::Int128: 3989 return (IT == VCMPEQ) ? llvm::Intrinsic::ppc_altivec_vcmpequq_p 3990 : llvm::Intrinsic::ppc_altivec_vcmpgtsq_p; 3991 } 3992} 3993 3994Value *ScalarExprEmitter::EmitCompare(const BinaryOperator *E, 3995 llvm::CmpInst::Predicate UICmpOpc, 3996 llvm::CmpInst::Predicate SICmpOpc, 3997 llvm::CmpInst::Predicate FCmpOpc, 3998 bool IsSignaling) { 3999 TestAndClearIgnoreResultAssign(); 4000 Value *Result; 4001 QualType LHSTy = E->getLHS()->getType(); 4002 QualType RHSTy = E->getRHS()->getType(); 4003 if (const MemberPointerType *MPT = LHSTy->getAs<MemberPointerType>()) { 4004 assert(E->getOpcode() == BO_EQ ||((E->getOpcode() == BO_EQ || E->getOpcode() == BO_NE) ?
static_cast<void> (0) : __assert_fail ("E->getOpcode() == BO_EQ || E->getOpcode() == BO_NE"
, "/build/llvm-toolchain-snapshot-13~++20210413100635+64c24f493e5f/clang/lib/CodeGen/CGExprScalar.cpp"
, 4005, __PRETTY_FUNCTION__))
4005 E->getOpcode() == BO_NE)((E->getOpcode() == BO_EQ || E->getOpcode() == BO_NE) ?
static_cast<void> (0) : __assert_fail ("E->getOpcode() == BO_EQ || E->getOpcode() == BO_NE"
, "/build/llvm-toolchain-snapshot-13~++20210413100635+64c24f493e5f/clang/lib/CodeGen/CGExprScalar.cpp"
, 4005, __PRETTY_FUNCTION__))
; 4006 Value *LHS = CGF.EmitScalarExpr(E->getLHS()); 4007 Value *RHS = CGF.EmitScalarExpr(E->getRHS()); 4008 Result = CGF.CGM.getCXXABI().EmitMemberPointerComparison( 4009 CGF, LHS, RHS, MPT, E->getOpcode() == BO_NE); 4010 } else if (!LHSTy->isAnyComplexType() && !RHSTy->isAnyComplexType()) { 4011 BinOpInfo BOInfo = EmitBinOps(E); 4012 Value *LHS = BOInfo.LHS; 4013 Value *RHS = BOInfo.RHS; 4014 4015 // If AltiVec, the comparison results in a numeric type, so we use 4016 // intrinsics comparing vectors and giving 0 or 1 as a result 4017 if (LHSTy->isVectorType() && !E->getType()->isVectorType()) { 4018 // constants for mapping CR6 register bits to predicate result 4019 enum { CR6_EQ=0, CR6_EQ_REV, CR6_LT, CR6_LT_REV } CR6; 4020 4021 llvm::Intrinsic::ID ID = llvm::Intrinsic::not_intrinsic; 4022 4023 // in several cases vector arguments order will be reversed 4024 Value *FirstVecArg = LHS, 4025 *SecondVecArg = RHS; 4026 4027 QualType ElTy = LHSTy->castAs<VectorType>()->getElementType(); 4028 BuiltinType::Kind ElementKind = ElTy->castAs<BuiltinType>()->getKind(); 4029 4030 switch(E->getOpcode()) { 4031 default: llvm_unreachable("is not a comparison operation")::llvm::llvm_unreachable_internal("is not a comparison operation"
, "/build/llvm-toolchain-snapshot-13~++20210413100635+64c24f493e5f/clang/lib/CodeGen/CGExprScalar.cpp"
, 4031)
; 4032 case BO_EQ: 4033 CR6 = CR6_LT; 4034 ID = GetIntrinsic(VCMPEQ, ElementKind); 4035 break; 4036 case BO_NE: 4037 CR6 = CR6_EQ; 4038 ID = GetIntrinsic(VCMPEQ, ElementKind); 4039 break; 4040 case BO_LT: 4041 CR6 = CR6_LT; 4042 ID = GetIntrinsic(VCMPGT, ElementKind); 4043 std::swap(FirstVecArg, SecondVecArg); 4044 break; 4045 case BO_GT: 4046 CR6 = CR6_LT; 4047 ID = GetIntrinsic(VCMPGT, ElementKind); 4048 break; 4049 case BO_LE: 4050 if (ElementKind == BuiltinType::Float) { 4051 CR6 = CR6_LT; 4052 ID = llvm::Intrinsic::ppc_altivec_vcmpgefp_p; 4053 std::swap(FirstVecArg, SecondVecArg); 4054 } 4055 else { 4056 CR6 = CR6_EQ; 4057 ID = GetIntrinsic(VCMPGT, ElementKind); 4058 } 4059 break; 4060 case BO_GE: 4061 if (ElementKind == BuiltinType::Float) { 4062 CR6 = CR6_LT; 4063 ID = llvm::Intrinsic::ppc_altivec_vcmpgefp_p; 4064 } 4065 else { 4066 CR6 = CR6_EQ; 4067 ID = GetIntrinsic(VCMPGT, ElementKind); 4068 std::swap(FirstVecArg, SecondVecArg); 4069 } 4070 break; 4071 } 4072 4073 Value *CR6Param = Builder.getInt32(CR6); 4074 llvm::Function *F = CGF.CGM.getIntrinsic(ID); 4075 Result = Builder.CreateCall(F, {CR6Param, FirstVecArg, SecondVecArg}); 4076 4077 // The result type of intrinsic may not be same as E->getType(). 4078 // If E->getType() is not BoolTy, EmitScalarConversion will do the 4079 // conversion work. If E->getType() is BoolTy, EmitScalarConversion will 4080 // do nothing, if ResultTy is not i1 at the same time, it will cause 4081 // crash later. 4082 llvm::IntegerType *ResultTy = cast<llvm::IntegerType>(Result->getType()); 4083 if (ResultTy->getBitWidth() > 1 && 4084 E->getType() == CGF.getContext().BoolTy) 4085 Result = Builder.CreateTrunc(Result, Builder.getInt1Ty()); 4086 return EmitScalarConversion(Result, CGF.getContext().BoolTy, E->getType(), 4087 E->getExprLoc()); 4088 } 4089 4090 if (BOInfo.isFixedPointOp()) { 4091 Result = EmitFixedPointBinOp(BOInfo); 4092 } else if (LHS->getType()->isFPOrFPVectorTy()) { 4093 CodeGenFunction::CGFPOptionsRAII FPOptsRAII(CGF, BOInfo.FPFeatures); 4094 if (!IsSignaling) 4095 Result = Builder.CreateFCmp(FCmpOpc, LHS, RHS, "cmp"); 4096 else 4097 Result = Builder.CreateFCmpS(FCmpOpc, LHS, RHS, "cmp"); 4098 } else if (LHSTy->hasSignedIntegerRepresentation()) { 4099 Result = Builder.CreateICmp(SICmpOpc, LHS, RHS, "cmp"); 4100 } else { 4101 // Unsigned integers and pointers. 4102 4103 if (CGF.CGM.getCodeGenOpts().StrictVTablePointers && 4104 !isa<llvm::ConstantPointerNull>(LHS) && 4105 !isa<llvm::ConstantPointerNull>(RHS)) { 4106 4107 // Dynamic information is required to be stripped for comparisons, 4108 // because it could leak the dynamic information. Based on comparisons 4109 // of pointers to dynamic objects, the optimizer can replace one pointer 4110 // with another, which might be incorrect in presence of invariant 4111 // groups. Comparison with null is safe because null does not carry any 4112 // dynamic information. 4113 if (LHSTy.mayBeDynamicClass()) 4114 LHS = Builder.CreateStripInvariantGroup(LHS); 4115 if (RHSTy.mayBeDynamicClass()) 4116 RHS = Builder.CreateStripInvariantGroup(RHS); 4117 } 4118 4119 Result = Builder.CreateICmp(UICmpOpc, LHS, RHS, "cmp"); 4120 } 4121 4122 // If this is a vector comparison, sign extend the result to the appropriate 4123 // vector integer type and return it (don't convert to bool). 4124 if (LHSTy->isVectorType()) 4125 return Builder.CreateSExt(Result, ConvertType(E->getType()), "sext"); 4126 4127 } else { 4128 // Complex Comparison: can only be an equality comparison. 4129 CodeGenFunction::ComplexPairTy LHS, RHS; 4130 QualType CETy; 4131 if (auto *CTy = LHSTy->getAs<ComplexType>()) { 4132 LHS = CGF.EmitComplexExpr(E->getLHS()); 4133 CETy = CTy->getElementType(); 4134 } else { 4135 LHS.first = Visit(E->getLHS()); 4136 LHS.second = llvm::Constant::getNullValue(LHS.first->getType()); 4137 CETy = LHSTy; 4138 } 4139 if (auto *CTy = RHSTy->getAs<ComplexType>()) { 4140 RHS = CGF.EmitComplexExpr(E->getRHS()); 4141 assert(CGF.getContext().hasSameUnqualifiedType(CETy,((CGF.getContext().hasSameUnqualifiedType(CETy, CTy->getElementType
()) && "The element types must always match.") ? static_cast
<void> (0) : __assert_fail ("CGF.getContext().hasSameUnqualifiedType(CETy, CTy->getElementType()) && \"The element types must always match.\""
, "/build/llvm-toolchain-snapshot-13~++20210413100635+64c24f493e5f/clang/lib/CodeGen/CGExprScalar.cpp"
, 4143, __PRETTY_FUNCTION__))
4142 CTy->getElementType()) &&((CGF.getContext().hasSameUnqualifiedType(CETy, CTy->getElementType
()) && "The element types must always match.") ? static_cast
<void> (0) : __assert_fail ("CGF.getContext().hasSameUnqualifiedType(CETy, CTy->getElementType()) && \"The element types must always match.\""
, "/build/llvm-toolchain-snapshot-13~++20210413100635+64c24f493e5f/clang/lib/CodeGen/CGExprScalar.cpp"
, 4143, __PRETTY_FUNCTION__))
4143 "The element types must always match.")((CGF.getContext().hasSameUnqualifiedType(CETy, CTy->getElementType
()) && "The element types must always match.") ? static_cast
<void> (0) : __assert_fail ("CGF.getContext().hasSameUnqualifiedType(CETy, CTy->getElementType()) && \"The element types must always match.\""
, "/build/llvm-toolchain-snapshot-13~++20210413100635+64c24f493e5f/clang/lib/CodeGen/CGExprScalar.cpp"
, 4143, __PRETTY_FUNCTION__))
; 4144 (void)CTy; 4145 } else { 4146 RHS.first = Visit(E->getRHS()); 4147 RHS.second = llvm::Constant::getNullValue(RHS.first->getType()); 4148 assert(CGF.getContext().hasSameUnqualifiedType(CETy, RHSTy) &&((CGF.getContext().hasSameUnqualifiedType(CETy, RHSTy) &&
"The element types must always match.") ? static_cast<void
> (0) : __assert_fail ("CGF.getContext().hasSameUnqualifiedType(CETy, RHSTy) && \"The element types must always match.\""
, "/build/llvm-toolchain-snapshot-13~++20210413100635+64c24f493e5f/clang/lib/CodeGen/CGExprScalar.cpp"
, 4149, __PRETTY_FUNCTION__))
4149 "The element types must always match.")((CGF.getContext().hasSameUnqualifiedType(CETy, RHSTy) &&
"The element types must always match.") ? static_cast<void
> (0) : __assert_fail ("CGF.getContext().hasSameUnqualifiedType(CETy, RHSTy) && \"The element types must always match.\""
, "/build/llvm-toolchain-snapshot-13~++20210413100635+64c24f493e5f/clang/lib/CodeGen/CGExprScalar.cpp"
, 4149, __PRETTY_FUNCTION__))
; 4150 } 4151 4152 Value *ResultR, *ResultI; 4153 if (CETy->isRealFloatingType()) { 4154 // As complex comparisons can only be equality comparisons, they 4155 // are never signaling comparisons. 4156 ResultR = Builder.CreateFCmp(FCmpOpc, LHS.first, RHS.first, "cmp.r"); 4157 ResultI = Builder.CreateFCmp(FCmpOpc, LHS.second, RHS.second, "cmp.i"); 4158 } else { 4159 // Complex comparisons can only be equality comparisons. As such, signed 4160 // and unsigned opcodes are the same. 4161 ResultR = Builder.CreateICmp(UICmpOpc, LHS.first, RHS.first, "cmp.r"); 4162 ResultI = Builder.CreateICmp(UICmpOpc, LHS.second, RHS.second, "cmp.i"); 4163 } 4164 4165 if (E->getOpcode() == BO_EQ) { 4166 Result = Builder.CreateAnd(ResultR, ResultI, "and.ri"); 4167 } else { 4168 assert(E->getOpcode() == BO_NE &&((E->getOpcode() == BO_NE && "Complex comparison other than == or != ?"
) ? static_cast<void> (0) : __assert_fail ("E->getOpcode() == BO_NE && \"Complex comparison other than == or != ?\""
, "/build/llvm-toolchain-snapshot-13~++20210413100635+64c24f493e5f/clang/lib/CodeGen/CGExprScalar.cpp"
, 4169, __PRETTY_FUNCTION__))
4169 "Complex comparison other than == or != ?")((E->getOpcode() == BO_NE && "Complex comparison other than == or != ?"
) ? static_cast<void> (0) : __assert_fail ("E->getOpcode() == BO_NE && \"Complex comparison other than == or != ?\""
, "/build/llvm-toolchain-snapshot-13~++20210413100635+64c24f493e5f/clang/lib/CodeGen/CGExprScalar.cpp"
, 4169, __PRETTY_FUNCTION__))
; 4170 Result = Builder.CreateOr(ResultR, ResultI, "or.ri"); 4171 } 4172 } 4173 4174 return EmitScalarConversion(Result, CGF.getContext().BoolTy, E->getType(), 4175 E->getExprLoc()); 4176} 4177 4178Value *ScalarExprEmitter::VisitBinAssign(const BinaryOperator *E) { 4179 bool Ignore = TestAndClearIgnoreResultAssign(); 4180 4181 Value *RHS; 4182 LValue LHS; 4183 4184 switch (E->getLHS()->getType().getObjCLifetime()) { 4185 case Qualifiers::OCL_Strong: 4186 std::tie(LHS, RHS) = CGF.EmitARCStoreStrong(E, Ignore); 4187 break; 4188 4189 case Qualifiers::OCL_Autoreleasing: 4190 std::tie(LHS, RHS) = CGF.EmitARCStoreAutoreleasing(E); 4191 break; 4192 4193 case Qualifiers::OCL_ExplicitNone: 4194 std::tie(LHS, RHS) = CGF.EmitARCStoreUnsafeUnretained(E, Ignore); 4195 break; 4196 4197 case Qualifiers::OCL_Weak: 4198 RHS = Visit(E->getRHS()); 4199 LHS = EmitCheckedLValue(E->getLHS(), CodeGenFunction::TCK_Store); 4200 RHS = CGF.EmitARCStoreWeak(LHS.getAddress(CGF), RHS, Ignore); 4201 break; 4202 4203 case Qualifiers::OCL_None: 4204 // __block variables need to have the rhs evaluated first, plus 4205 // this should improve codegen just a little. 4206 RHS = Visit(E->getRHS()); 4207 LHS = EmitCheckedLValue(E->getLHS(), CodeGenFunction::TCK_Store); 4208 4209 // Store the value into the LHS. Bit-fields are handled specially 4210 // because the result is altered by the store, i.e., [C99 6.5.16p1] 4211 // 'An assignment expression has the value of the left operand after 4212 // the assignment...'. 4213 if (LHS.isBitField()) { 4214 CGF.EmitStoreThroughBitfieldLValue(RValue::get(RHS), LHS, &RHS); 4215 } else { 4216 CGF.EmitNullabilityCheck(LHS, RHS, E->getExprLoc()); 4217 CGF.EmitStoreThroughLValue(RValue::get(RHS), LHS); 4218 } 4219 } 4220 4221 // If the result is clearly ignored, return now. 4222 if (Ignore) 4223 return nullptr; 4224 4225 // The result of an assignment in C is the assigned r-value. 4226 if (!CGF.getLangOpts().CPlusPlus) 4227 return RHS; 4228 4229 // If the lvalue is non-volatile, return the computed value of the assignment. 4230 if (!LHS.isVolatileQualified()) 4231 return RHS; 4232 4233 // Otherwise, reload the value. 4234 return EmitLoadOfLValue(LHS, E->getExprLoc()); 4235} 4236 4237Value *ScalarExprEmitter::VisitBinLAnd(const BinaryOperator *E) { 4238 // Perform vector logical and on comparisons with zero vectors. 4239 if (E->getType()->isVectorType()) { 4240 CGF.incrementProfileCounter(E); 4241 4242 Value *LHS = Visit(E->getLHS()); 4243 Value *RHS = Visit(E->getRHS()); 4244 Value *Zero = llvm::ConstantAggregateZero::get(LHS->getType()); 4245 if (LHS->getType()->isFPOrFPVectorTy()) { 4246 CodeGenFunction::CGFPOptionsRAII FPOptsRAII( 4247 CGF, E->getFPFeaturesInEffect(CGF.getLangOpts())); 4248 LHS = Builder.CreateFCmp(llvm::CmpInst::FCMP_UNE, LHS, Zero, "cmp"); 4249 RHS = Builder.CreateFCmp(llvm::CmpInst::FCMP_UNE, RHS, Zero, "cmp"); 4250 } else { 4251 LHS = Builder.CreateICmp(llvm::CmpInst::ICMP_NE, LHS, Zero, "cmp"); 4252 RHS = Builder.CreateICmp(llvm::CmpInst::ICMP_NE, RHS, Zero, "cmp"); 4253 } 4254 Value *And = Builder.CreateAnd(LHS, RHS); 4255 return Builder.CreateSExt(And, ConvertType(E->getType()), "sext"); 4256 } 4257 4258 bool InstrumentRegions = CGF.CGM.getCodeGenOpts().hasProfileClangInstr(); 4259 llvm::Type *ResTy = ConvertType(E->getType()); 4260 4261 // If we have 0 && RHS, see if we can elide RHS, if so, just return 0. 4262 // If we have 1 && X, just emit X without inserting the control flow. 4263 bool LHSCondVal; 4264 if (CGF.ConstantFoldsToSimpleInteger(E->getLHS(), LHSCondVal)) { 4265 if (LHSCondVal) { // If we have 1 && X, just emit X. 4266 CGF.incrementProfileCounter(E); 4267 4268 Value *RHSCond = CGF.EvaluateExprAsBool(E->getRHS()); 4269 4270 // If we're generating for profiling or coverage, generate a branch to a 4271 // block that increments the RHS counter needed to track branch condition 4272 // coverage. In this case, use "FBlock" as both the final "TrueBlock" and 4273 // "FalseBlock" after the increment is done. 4274 if (InstrumentRegions && 4275 CodeGenFunction::isInstrumentedCondition(E->getRHS())) { 4276 llvm::BasicBlock *FBlock = CGF.createBasicBlock("land.end"); 4277 llvm::BasicBlock *RHSBlockCnt = CGF.createBasicBlock("land.rhscnt"); 4278 Builder.CreateCondBr(RHSCond, RHSBlockCnt, FBlock); 4279 CGF.EmitBlock(RHSBlockCnt); 4280 CGF.incrementProfileCounter(E->getRHS()); 4281 CGF.EmitBranch(FBlock); 4282 CGF.EmitBlock(FBlock); 4283 } 4284 4285 // ZExt result to int or bool. 4286 return Builder.CreateZExtOrBitCast(RHSCond, ResTy, "land.ext"); 4287 } 4288 4289 // 0 && RHS: If it is safe, just elide the RHS, and return 0/false. 4290 if (!CGF.ContainsLabel(E->getRHS())) 4291 return llvm::Constant::getNullValue(ResTy); 4292 } 4293 4294 llvm::BasicBlock *ContBlock = CGF.createBasicBlock("land.end"); 4295 llvm::BasicBlock *RHSBlock = CGF.createBasicBlock("land.rhs"); 4296 4297 CodeGenFunction::ConditionalEvaluation eval(CGF); 4298 4299 // Branch on the LHS first. If it is false, go to the failure (cont) block. 4300 CGF.EmitBranchOnBoolExpr(E->getLHS(), RHSBlock, ContBlock, 4301 CGF.getProfileCount(E->getRHS())); 4302 4303 // Any edges into the ContBlock are now from an (indeterminate number of) 4304 // edges from this first condition. All of these values will be false. Start 4305 // setting up the PHI node in the Cont Block for this. 4306 llvm::PHINode *PN = llvm::PHINode::Create(llvm::Type::getInt1Ty(VMContext), 2, 4307 "", ContBlock); 4308 for (llvm::pred_iterator PI = pred_begin(ContBlock), PE = pred_end(ContBlock); 4309 PI != PE; ++PI) 4310 PN->addIncoming(llvm::ConstantInt::getFalse(VMContext), *PI); 4311 4312 eval.begin(CGF); 4313 CGF.EmitBlock(RHSBlock); 4314 CGF.incrementProfileCounter(E); 4315 Value *RHSCond = CGF.EvaluateExprAsBool(E->getRHS()); 4316 eval.end(CGF); 4317 4318 // Reaquire the RHS block, as there may be subblocks inserted. 4319 RHSBlock = Builder.GetInsertBlock(); 4320 4321 // If we're generating for profiling or coverage, generate a branch on the 4322 // RHS to a block that increments the RHS true counter needed to track branch 4323 // condition coverage. 4324 if (InstrumentRegions && 4325 CodeGenFunction::isInstrumentedCondition(E->getRHS())) { 4326 llvm::BasicBlock *RHSBlockCnt = CGF.createBasicBlock("land.rhscnt"); 4327 Builder.CreateCondBr(RHSCond, RHSBlockCnt, ContBlock); 4328 CGF.EmitBlock(RHSBlockCnt); 4329 CGF.incrementProfileCounter(E->getRHS()); 4330 CGF.EmitBranch(ContBlock); 4331 PN->addIncoming(RHSCond, RHSBlockCnt); 4332 } 4333 4334 // Emit an unconditional branch from this block to ContBlock. 4335 { 4336 // There is no need to emit line number for unconditional branch. 4337 auto NL = ApplyDebugLocation::CreateEmpty(CGF); 4338 CGF.EmitBlock(ContBlock); 4339 } 4340 // Insert an entry into the phi node for the edge with the value of RHSCond. 4341 PN->addIncoming(RHSCond, RHSBlock); 4342 4343 // Artificial location to preserve the scope information 4344 { 4345 auto NL = ApplyDebugLocation::CreateArtificial(CGF); 4346 PN->setDebugLoc(Builder.getCurrentDebugLocation()); 4347 } 4348 4349 // ZExt result to int. 4350 return Builder.CreateZExtOrBitCast(PN, ResTy, "land.ext"); 4351} 4352 4353Value *ScalarExprEmitter::VisitBinLOr(const BinaryOperator *E) { 4354 // Perform vector logical or on comparisons with zero vectors. 4355 if (E->getType()->isVectorType()) { 4356 CGF.incrementProfileCounter(E); 4357 4358 Value *LHS = Visit(E->getLHS()); 4359 Value *RHS = Visit(E->getRHS()); 4360 Value *Zero = llvm::ConstantAggregateZero::get(LHS->getType()); 4361 if (LHS->getType()->isFPOrFPVectorTy()) { 4362 CodeGenFunction::CGFPOptionsRAII FPOptsRAII( 4363 CGF, E->getFPFeaturesInEffect(CGF.getLangOpts())); 4364 LHS = Builder.CreateFCmp(llvm::CmpInst::FCMP_UNE, LHS, Zero, "cmp"); 4365 RHS = Builder.CreateFCmp(llvm::CmpInst::FCMP_UNE, RHS, Zero, "cmp"); 4366 } else { 4367 LHS = Builder.CreateICmp(llvm::CmpInst::ICMP_NE, LHS, Zero, "cmp"); 4368 RHS = Builder.CreateICmp(llvm::CmpInst::ICMP_NE, RHS, Zero, "cmp"); 4369 } 4370 Value *Or = Builder.CreateOr(LHS, RHS); 4371 return Builder.CreateSExt(Or, ConvertType(E->getType()), "sext"); 4372 } 4373 4374 bool InstrumentRegions = CGF.CGM.getCodeGenOpts().hasProfileClangInstr(); 4375 llvm::Type *ResTy = ConvertType(E->getType()); 4376 4377 // If we have 1 || RHS, see if we can elide RHS, if so, just return 1. 4378 // If we have 0 || X, just emit X without inserting the control flow. 4379 bool LHSCondVal; 4380 if (CGF.ConstantFoldsToSimpleInteger(E->getLHS(), LHSCondVal)) { 4381 if (!LHSCondVal) { // If we have 0 || X, just emit X. 4382 CGF.incrementProfileCounter(E); 4383 4384 Value *RHSCond = CGF.EvaluateExprAsBool(E->getRHS()); 4385 4386 // If we're generating for profiling or coverage, generate a branch to a 4387 // block that increments the RHS counter need to track branch condition 4388 // coverage. In this case, use "FBlock" as both the final "TrueBlock" and 4389 // "FalseBlock" after the increment is done. 4390 if (InstrumentRegions && 4391 CodeGenFunction::isInstrumentedCondition(E->getRHS())) { 4392 llvm::BasicBlock *FBlock = CGF.createBasicBlock("lor.end"); 4393 llvm::BasicBlock *RHSBlockCnt = CGF.createBasicBlock("lor.rhscnt"); 4394 Builder.CreateCondBr(RHSCond, FBlock, RHSBlockCnt); 4395 CGF.EmitBlock(RHSBlockCnt); 4396 CGF.incrementProfileCounter(E->getRHS()); 4397 CGF.EmitBranch(FBlock); 4398 CGF.EmitBlock(FBlock); 4399 } 4400 4401 // ZExt result to int or bool. 4402 return Builder.CreateZExtOrBitCast(RHSCond, ResTy, "lor.ext"); 4403 } 4404 4405 // 1 || RHS: If it is safe, just elide the RHS, and return 1/true. 4406 if (!CGF.ContainsLabel(E->getRHS())) 4407 return llvm::ConstantInt::get(ResTy, 1); 4408 } 4409 4410 llvm::BasicBlock *ContBlock = CGF.createBasicBlock("lor.end"); 4411 llvm::BasicBlock *RHSBlock = CGF.createBasicBlock("lor.rhs"); 4412 4413 CodeGenFunction::ConditionalEvaluation eval(CGF); 4414 4415 // Branch on the LHS first. If it is true, go to the success (cont) block. 4416 CGF.EmitBranchOnBoolExpr(E->getLHS(), ContBlock, RHSBlock, 4417 CGF.getCurrentProfileCount() - 4418 CGF.getProfileCount(E->getRHS())); 4419 4420 // Any edges into the ContBlock are now from an (indeterminate number of) 4421 // edges from this first condition. All of these values will be true. Start 4422 // setting up the PHI node in the Cont Block for this. 4423 llvm::PHINode *PN = llvm::PHINode::Create(llvm::Type::getInt1Ty(VMContext), 2, 4424 "", ContBlock); 4425 for (llvm::pred_iterator PI = pred_begin(ContBlock), PE = pred_end(ContBlock); 4426 PI != PE; ++PI) 4427 PN->addIncoming(llvm::ConstantInt::getTrue(VMContext), *PI); 4428 4429 eval.begin(CGF); 4430 4431 // Emit the RHS condition as a bool value. 4432 CGF.EmitBlock(RHSBlock); 4433 CGF.incrementProfileCounter(E); 4434 Value *RHSCond = CGF.EvaluateExprAsBool(E->getRHS()); 4435 4436 eval.end(CGF); 4437 4438 // Reaquire the RHS block, as there may be subblocks inserted. 4439 RHSBlock = Builder.GetInsertBlock(); 4440 4441 // If we're generating for profiling or coverage, generate a branch on the 4442 // RHS to a block that increments the RHS true counter needed to track branch 4443 // condition coverage. 4444 if (InstrumentRegions && 4445 CodeGenFunction::isInstrumentedCondition(E->getRHS())) { 4446 llvm::BasicBlock *RHSBlockCnt = CGF.createBasicBlock("lor.rhscnt"); 4447 Builder.CreateCondBr(RHSCond, ContBlock, RHSBlockCnt); 4448 CGF.EmitBlock(RHSBlockCnt); 4449 CGF.incrementProfileCounter(E->getRHS()); 4450 CGF.EmitBranch(ContBlock); 4451 PN->addIncoming(RHSCond, RHSBlockCnt); 4452 } 4453 4454 // Emit an unconditional branch from this block to ContBlock. Insert an entry 4455 // into the phi node for the edge with the value of RHSCond. 4456 CGF.EmitBlock(ContBlock); 4457 PN->addIncoming(RHSCond, RHSBlock); 4458 4459 // ZExt result to int. 4460 return Builder.CreateZExtOrBitCast(PN, ResTy, "lor.ext"); 4461} 4462 4463Value *ScalarExprEmitter::VisitBinComma(const BinaryOperator *E) { 4464 CGF.EmitIgnoredExpr(E->getLHS()); 4465 CGF.EnsureInsertPoint(); 4466 return Visit(E->getRHS()); 4467} 4468 4469//===----------------------------------------------------------------------===// 4470// Other Operators 4471//===----------------------------------------------------------------------===// 4472 4473/// isCheapEnoughToEvaluateUnconditionally - Return true if the specified 4474/// expression is cheap enough and side-effect-free enough to evaluate 4475/// unconditionally instead of conditionally. This is used to convert control 4476/// flow into selects in some cases. 4477static bool isCheapEnoughToEvaluateUnconditionally(const Expr *E, 4478 CodeGenFunction &CGF) { 4479 // Anything that is an integer or floating point constant is fine. 4480 return E->IgnoreParens()->isEvaluatable(CGF.getContext()); 4481 4482 // Even non-volatile automatic variables can't be evaluated unconditionally. 4483 // Referencing a thread_local may cause non-trivial initialization work to 4484 // occur. If we're inside a lambda and one of the variables is from the scope 4485 // outside the lambda, that function may have returned already. Reading its 4486 // locals is a bad idea. Also, these reads may introduce races there didn't 4487 // exist in the source-level program. 4488} 4489 4490 4491Value *ScalarExprEmitter:: 4492VisitAbstractConditionalOperator(const AbstractConditionalOperator *E) { 4493 TestAndClearIgnoreResultAssign(); 4494 4495 // Bind the common expression if necessary. 4496 CodeGenFunction::OpaqueValueMapping binding(CGF, E); 4497 4498 Expr *condExpr = E->getCond(); 4499 Expr *lhsExpr = E->getTrueExpr(); 4500 Expr *rhsExpr = E->getFalseExpr(); 4501 4502 // If the condition constant folds and can be elided, try to avoid emitting 4503 // the condition and the dead arm. 4504 bool CondExprBool; 4505 if (CGF.ConstantFoldsToSimpleInteger(condExpr, CondExprBool)) { 4506 Expr *live = lhsExpr, *dead = rhsExpr; 4507 if (!CondExprBool) std::swap(live, dead); 4508 4509 // If the dead side doesn't have labels we need, just emit the Live part. 4510 if (!CGF.ContainsLabel(dead)) { 4511 if (CondExprBool) 4512 CGF.incrementProfileCounter(E); 4513 Value *Result = Visit(live); 4514 4515 // If the live part is a throw expression, it acts like it has a void 4516 // type, so evaluating it returns a null Value*. However, a conditional 4517 // with non-void type must return a non-null Value*. 4518 if (!Result && !E->getType()->isVoidType()) 4519 Result = llvm::UndefValue::get(CGF.ConvertType(E->getType())); 4520 4521 return Result; 4522 } 4523 } 4524 4525 // OpenCL: If the condition is a vector, we can treat this condition like 4526 // the select function. 4527 if ((CGF.getLangOpts().OpenCL && condExpr->getType()->isVectorType()) || 4528 condExpr->getType()->isExtVectorType()) { 4529 CGF.incrementProfileCounter(E); 4530 4531 llvm::Value *CondV = CGF.EmitScalarExpr(condExpr); 4532 llvm::Value *LHS = Visit(lhsExpr); 4533 llvm::Value *RHS = Visit(rhsExpr); 4534 4535 llvm::Type *condType = ConvertType(condExpr->getType()); 4536 auto *vecTy = cast<llvm::FixedVectorType>(condType); 4537 4538 unsigned numElem = vecTy->getNumElements(); 4539 llvm::Type *elemType = vecTy->getElementType(); 4540 4541 llvm::Value *zeroVec = llvm::Constant::getNullValue(vecTy); 4542 llvm::Value *TestMSB = Builder.CreateICmpSLT(CondV, zeroVec); 4543 llvm::Value *tmp = Builder.CreateSExt( 4544 TestMSB, llvm::FixedVectorType::get(elemType, numElem), "sext"); 4545 llvm::Value *tmp2 = Builder.CreateNot(tmp); 4546 4547 // Cast float to int to perform ANDs if necessary. 4548 llvm::Value *RHSTmp = RHS; 4549 llvm::Value *LHSTmp = LHS; 4550 bool wasCast = false; 4551 llvm::VectorType *rhsVTy = cast<llvm::VectorType>(RHS->getType()); 4552 if (rhsVTy->getElementType()->isFloatingPointTy()) { 4553 RHSTmp = Builder.CreateBitCast(RHS, tmp2->getType()); 4554 LHSTmp = Builder.CreateBitCast(LHS, tmp->getType()); 4555 wasCast = true; 4556 } 4557 4558 llvm::Value *tmp3 = Builder.CreateAnd(RHSTmp, tmp2); 4559 llvm::Value *tmp4 = Builder.CreateAnd(LHSTmp, tmp); 4560 llvm::Value *tmp5 = Builder.CreateOr(tmp3, tmp4, "cond"); 4561 if (wasCast) 4562 tmp5 = Builder.CreateBitCast(tmp5, RHS->getType()); 4563 4564 return tmp5; 4565 } 4566 4567 if (condExpr->getType()->isVectorType()) { 4568 CGF.incrementProfileCounter(E); 4569 4570 llvm::Value *CondV = CGF.EmitScalarExpr(condExpr); 4571 llvm::Value *LHS = Visit(lhsExpr); 4572 llvm::Value *RHS = Visit(rhsExpr); 4573 4574 llvm::Type *CondType = ConvertType(condExpr->getType()); 4575 auto *VecTy = cast<llvm::VectorType>(CondType); 4576 llvm::Value *ZeroVec = llvm::Constant::getNullValue(VecTy); 4577 4578 CondV = Builder.CreateICmpNE(CondV, ZeroVec, "vector_cond"); 4579 return Builder.CreateSelect(CondV, LHS, RHS, "vector_select"); 4580 } 4581 4582 // If this is a really simple expression (like x ? 4 : 5), emit this as a 4583 // select instead of as control flow. We can only do this if it is cheap and 4584 // safe to evaluate the LHS and RHS unconditionally. 4585 if (isCheapEnoughToEvaluateUnconditionally(lhsExpr, CGF) && 4586 isCheapEnoughToEvaluateUnconditionally(rhsExpr, CGF)) { 4587 llvm::Value *CondV = CGF.EvaluateExprAsBool(condExpr); 4588 llvm::Value *StepV = Builder.CreateZExtOrBitCast(CondV, CGF.Int64Ty); 4589 4590 CGF.incrementProfileCounter(E, StepV); 4591 4592 llvm::Value *LHS = Visit(lhsExpr); 4593 llvm::Value *RHS = Visit(rhsExpr); 4594 if (!LHS) { 4595 // If the conditional has void type, make sure we return a null Value*. 4596 assert(!RHS && "LHS and RHS types must match")((!RHS && "LHS and RHS types must match") ? static_cast
<void> (0) : __assert_fail ("!RHS && \"LHS and RHS types must match\""
, "/build/llvm-toolchain-snapshot-13~++20210413100635+64c24f493e5f/clang/lib/CodeGen/CGExprScalar.cpp"
, 4596, __PRETTY_FUNCTION__))
; 4597 return nullptr; 4598 } 4599 return Builder.CreateSelect(CondV, LHS, RHS, "cond"); 4600 } 4601 4602 llvm::BasicBlock *LHSBlock = CGF.createBasicBlock("cond.true"); 4603 llvm::BasicBlock *RHSBlock = CGF.createBasicBlock("cond.false"); 4604 llvm::BasicBlock *ContBlock = CGF.createBasicBlock("cond.end"); 4605 4606 CodeGenFunction::ConditionalEvaluation eval(CGF); 4607 CGF.EmitBranchOnBoolExpr(condExpr, LHSBlock, RHSBlock, 4608 CGF.getProfileCount(lhsExpr)); 4609 4610 CGF.EmitBlock(LHSBlock); 4611 CGF.incrementProfileCounter(E); 4612 eval.begin(CGF); 4613 Value *LHS = Visit(lhsExpr); 4614 eval.end(CGF); 4615 4616 LHSBlock = Builder.GetInsertBlock(); 4617 Builder.CreateBr(ContBlock); 4618 4619 CGF.EmitBlock(RHSBlock); 4620 eval.begin(CGF); 4621 Value *RHS = Visit(rhsExpr); 4622 eval.end(CGF); 4623 4624 RHSBlock = Builder.GetInsertBlock(); 4625 CGF.EmitBlock(ContBlock); 4626 4627 // If the LHS or RHS is a throw expression, it will be legitimately null. 4628 if (!LHS) 4629 return RHS; 4630 if (!RHS) 4631 return LHS; 4632 4633 // Create a PHI node for the real part. 4634 llvm::PHINode *PN = Builder.CreatePHI(LHS->getType(), 2, "cond"); 4635 PN->addIncoming(LHS, LHSBlock); 4636 PN->addIncoming(RHS, RHSBlock); 4637 return PN; 4638} 4639 4640Value *ScalarExprEmitter::VisitChooseExpr(ChooseExpr *E) { 4641 return Visit(E->getChosenSubExpr()); 4642} 4643 4644Value *ScalarExprEmitter::VisitVAArgExpr(VAArgExpr *VE) { 4645 QualType Ty = VE->getType(); 4646 4647 if (Ty->isVariablyModifiedType()) 4648 CGF.EmitVariablyModifiedType(Ty); 4649 4650 Address ArgValue = Address::invalid(); 4651 Address ArgPtr = CGF.EmitVAArg(VE, ArgValue); 4652 4653 llvm::Type *ArgTy = ConvertType(VE->getType()); 4654 4655 // If EmitVAArg fails, emit an error. 4656 if (!ArgPtr.isValid()) { 4657 CGF.ErrorUnsupported(VE, "va_arg expression"); 4658 return llvm::UndefValue::get(ArgTy); 4659 } 4660 4661 // FIXME Volatility. 4662 llvm::Value *Val = Builder.CreateLoad(ArgPtr); 4663 4664 // If EmitVAArg promoted the type, we must truncate it. 4665 if (ArgTy != Val->getType()) { 4666 if (ArgTy->isPointerTy() && !Val->getType()->isPointerTy()) 4667 Val = Builder.CreateIntToPtr(Val, ArgTy); 4668 else 4669 Val = Builder.CreateTrunc(Val, ArgTy); 4670 } 4671 4672 return Val; 4673} 4674 4675Value *ScalarExprEmitter::VisitBlockExpr(const BlockExpr *block) { 4676 return CGF.EmitBlockLiteral(block); 4677} 4678 4679// Convert a vec3 to vec4, or vice versa. 4680static Value *ConvertVec3AndVec4(CGBuilderTy &Builder, CodeGenFunction &CGF, 4681 Value *Src, unsigned NumElementsDst) { 4682 static constexpr int Mask[] = {0, 1, 2, -1}; 4683 return Builder.CreateShuffleVector(Src, 4684 llvm::makeArrayRef(Mask, NumElementsDst)); 4685} 4686 4687// Create cast instructions for converting LLVM value \p Src to LLVM type \p 4688// DstTy. \p Src has the same size as \p DstTy. Both are single value types 4689// but could be scalar or vectors of different lengths, and either can be 4690// pointer. 4691// There are 4 cases: 4692// 1. non-pointer -> non-pointer : needs 1 bitcast 4693// 2. pointer -> pointer : needs 1 bitcast or addrspacecast 4694// 3. pointer -> non-pointer 4695// a) pointer -> intptr_t : needs 1 ptrtoint 4696// b) pointer -> non-intptr_t : needs 1 ptrtoint then 1 bitcast 4697// 4. non-pointer -> pointer 4698// a) intptr_t -> pointer : needs 1 inttoptr 4699// b) non-intptr_t -> pointer : needs 1 bitcast then 1 inttoptr 4700// Note: for cases 3b and 4b two casts are required since LLVM casts do not 4701// allow casting directly between pointer types and non-integer non-pointer 4702// types. 4703static Value *createCastsForTypeOfSameSize(CGBuilderTy &Builder, 4704 const llvm::DataLayout &DL, 4705 Value *Src, llvm::Type *DstTy, 4706 StringRef Name = "") { 4707 auto SrcTy = Src->getType(); 4708 4709 // Case 1. 4710 if (!SrcTy->isPointerTy() && !DstTy->isPointerTy()) 4711 return Builder.CreateBitCast(Src, DstTy, Name); 4712 4713 // Case 2. 4714 if (SrcTy->isPointerTy() && DstTy->isPointerTy()) 4715 return Builder.CreatePointerBitCastOrAddrSpaceCast(Src, DstTy, Name); 4716 4717 // Case 3. 4718 if (SrcTy->isPointerTy() && !DstTy->isPointerTy()) { 4719 // Case 3b. 4720 if (!DstTy->isIntegerTy()) 4721 Src = Builder.CreatePtrToInt(Src, DL.getIntPtrType(SrcTy)); 4722 // Cases 3a and 3b. 4723 return Builder.CreateBitOrPointerCast(Src, DstTy, Name); 4724 } 4725 4726 // Case 4b. 4727 if (!SrcTy->isIntegerTy()) 4728 Src = Builder.CreateBitCast(Src, DL.getIntPtrType(DstTy)); 4729 // Cases 4a and 4b. 4730 return Builder.CreateIntToPtr(Src, DstTy, Name); 4731} 4732 4733Value *ScalarExprEmitter::VisitAsTypeExpr(AsTypeExpr *E) { 4734 Value *Src = CGF.EmitScalarExpr(E->getSrcExpr()); 4735 llvm::Type *DstTy = ConvertType(E->getType()); 4736 4737 llvm::Type *SrcTy = Src->getType(); 4738 unsigned NumElementsSrc = 4739 isa<llvm::VectorType>(SrcTy) 4740 ? cast<llvm::FixedVectorType>(SrcTy)->getNumElements() 4741 : 0; 4742 unsigned NumElementsDst = 4743 isa<llvm::VectorType>(DstTy) 4744 ? cast<llvm::FixedVectorType>(DstTy)->getNumElements() 4745 : 0; 4746 4747 // Going from vec3 to non-vec3 is a special case and requires a shuffle 4748 // vector to get a vec4, then a bitcast if the target type is different. 4749 if (NumElementsSrc == 3 && NumElementsDst != 3) { 4750 Src = ConvertVec3AndVec4(Builder, CGF, Src, 4); 4751 4752 if (!CGF.CGM.getCodeGenOpts().PreserveVec3Type) { 4753 Src = createCastsForTypeOfSameSize(Builder, CGF.CGM.getDataLayout(), Src, 4754 DstTy); 4755 } 4756 4757 Src->setName("astype"); 4758 return Src; 4759 } 4760 4761 // Going from non-vec3 to vec3 is a special case and requires a bitcast 4762 // to vec4 if the original type is not vec4, then a shuffle vector to 4763 // get a vec3. 4764 if (NumElementsSrc != 3 && NumElementsDst == 3) { 4765 if (!CGF.CGM.getCodeGenOpts().PreserveVec3Type) { 4766 auto *Vec4Ty = llvm::FixedVectorType::get( 4767 cast<llvm::VectorType>(DstTy)->getElementType(), 4); 4768 Src = createCastsForTypeOfSameSize(Builder, CGF.CGM.getDataLayout(), Src, 4769 Vec4Ty); 4770 } 4771 4772 Src = ConvertVec3AndVec4(Builder, CGF, Src, 3); 4773 Src->setName("astype"); 4774 return Src; 4775 } 4776 4777 return createCastsForTypeOfSameSize(Builder, CGF.CGM.getDataLayout(), 4778 Src, DstTy, "astype"); 4779} 4780 4781Value *ScalarExprEmitter::VisitAtomicExpr(AtomicExpr *E) { 4782 return CGF.EmitAtomicExpr(E).getScalarVal(); 4783} 4784 4785//===----------------------------------------------------------------------===// 4786// Entry Point into this File 4787//===----------------------------------------------------------------------===// 4788 4789/// Emit the computation of the specified expression of scalar type, ignoring 4790/// the result. 4791Value *CodeGenFunction::EmitScalarExpr(const Expr *E, bool IgnoreResultAssign) { 4792 assert(E && hasScalarEvaluationKind(E->getType()) &&((E && hasScalarEvaluationKind(E->getType()) &&
"Invalid scalar expression to emit") ? static_cast<void>
(0) : __assert_fail ("E && hasScalarEvaluationKind(E->getType()) && \"Invalid scalar expression to emit\""
, "/build/llvm-toolchain-snapshot-13~++20210413100635+64c24f493e5f/clang/lib/CodeGen/CGExprScalar.cpp"
, 4793, __PRETTY_FUNCTION__))
4793 "Invalid scalar expression to emit")((E && hasScalarEvaluationKind(E->getType()) &&
"Invalid scalar expression to emit") ? static_cast<void>
(0) : __assert_fail ("E && hasScalarEvaluationKind(E->getType()) && \"Invalid scalar expression to emit\""
, "/build/llvm-toolchain-snapshot-13~++20210413100635+64c24f493e5f/clang/lib/CodeGen/CGExprScalar.cpp"
, 4793, __PRETTY_FUNCTION__))
; 4794 4795 return ScalarExprEmitter(*this, IgnoreResultAssign) 4796 .Visit(const_cast<Expr *>(E)); 4797} 4798 4799/// Emit a conversion from the specified type to the specified destination type, 4800/// both of which are LLVM scalar types. 4801Value *CodeGenFunction::EmitScalarConversion(Value *Src, QualType SrcTy, 4802 QualType DstTy, 4803 SourceLocation Loc) { 4804 assert(hasScalarEvaluationKind(SrcTy) && hasScalarEvaluationKind(DstTy) &&((hasScalarEvaluationKind(SrcTy) && hasScalarEvaluationKind
(DstTy) && "Invalid scalar expression to emit") ? static_cast
<void> (0) : __assert_fail ("hasScalarEvaluationKind(SrcTy) && hasScalarEvaluationKind(DstTy) && \"Invalid scalar expression to emit\""
, "/build/llvm-toolchain-snapshot-13~++20210413100635+64c24f493e5f/clang/lib/CodeGen/CGExprScalar.cpp"
, 4805, __PRETTY_FUNCTION__))
4805 "Invalid scalar expression to emit")((hasScalarEvaluationKind(SrcTy) && hasScalarEvaluationKind
(DstTy) && "Invalid scalar expression to emit") ? static_cast
<void> (0) : __assert_fail ("hasScalarEvaluationKind(SrcTy) && hasScalarEvaluationKind(DstTy) && \"Invalid scalar expression to emit\""
, "/build/llvm-toolchain-snapshot-13~++20210413100635+64c24f493e5f/clang/lib/CodeGen/CGExprScalar.cpp"
, 4805, __PRETTY_FUNCTION__))
; 4806 return ScalarExprEmitter(*this).EmitScalarConversion(Src, SrcTy, DstTy, Loc); 4807} 4808 4809/// Emit a conversion from the specified complex type to the specified 4810/// destination type, where the destination type is an LLVM scalar type. 4811Value *CodeGenFunction::EmitComplexToScalarConversion(ComplexPairTy Src, 4812 QualType SrcTy, 4813 QualType DstTy, 4814 SourceLocation Loc) { 4815 assert(SrcTy->isAnyComplexType() && hasScalarEvaluationKind(DstTy) &&((SrcTy->isAnyComplexType() && hasScalarEvaluationKind
(DstTy) && "Invalid complex -> scalar conversion")
? static_cast<void> (0) : __assert_fail ("SrcTy->isAnyComplexType() && hasScalarEvaluationKind(DstTy) && \"Invalid complex -> scalar conversion\""
, "/build/llvm-toolchain-snapshot-13~++20210413100635+64c24f493e5f/clang/lib/CodeGen/CGExprScalar.cpp"
, 4816, __PRETTY_FUNCTION__))
4816 "Invalid complex -> scalar conversion")((SrcTy->isAnyComplexType() && hasScalarEvaluationKind
(DstTy) && "Invalid complex -> scalar conversion")
? static_cast<void> (0) : __assert_fail ("SrcTy->isAnyComplexType() && hasScalarEvaluationKind(DstTy) && \"Invalid complex -> scalar conversion\""
, "/build/llvm-toolchain-snapshot-13~++20210413100635+64c24f493e5f/clang/lib/CodeGen/CGExprScalar.cpp"
, 4816, __PRETTY_FUNCTION__))
; 4817 return ScalarExprEmitter(*this) 4818 .EmitComplexToScalarConversion(Src, SrcTy, DstTy, Loc); 4819} 4820 4821 4822llvm::Value *CodeGenFunction:: 4823EmitScalarPrePostIncDec(const UnaryOperator *E, LValue LV, 4824 bool isInc, bool isPre) { 4825 return ScalarExprEmitter(*this).EmitScalarPrePostIncDec(E, LV, isInc, isPre); 4826} 4827 4828LValue CodeGenFunction::EmitObjCIsaExpr(const ObjCIsaExpr *E) { 4829 // object->isa or (*object).isa 4830 // Generate code as for: *(Class*)object 4831 4832 Expr *BaseExpr = E->getBase(); 4833 Address Addr = Address::invalid(); 4834 if (BaseExpr->isRValue()) { 4835 Addr = Address(EmitScalarExpr(BaseExpr), getPointerAlign()); 4836 } else { 4837 Addr = EmitLValue(BaseExpr).getAddress(*this); 4838 } 4839 4840 // Cast the address to Class*. 4841 Addr = Builder.CreateElementBitCast(Addr, ConvertType(E->getType())); 4842 return MakeAddrLValue(Addr, E->getType()); 4843} 4844 4845 4846LValue CodeGenFunction::EmitCompoundAssignmentLValue( 4847 const CompoundAssignOperator *E) { 4848 ScalarExprEmitter Scalar(*this); 4849 Value *Result = nullptr; 4850 switch (E->getOpcode()) { 4851#define COMPOUND_OP(Op) \ 4852 case BO_##Op##Assign: \ 4853 return Scalar.EmitCompoundAssignLValue(E, &ScalarExprEmitter::Emit##Op, \ 4854 Result) 4855 COMPOUND_OP(Mul); 4856 COMPOUND_OP(Div); 4857 COMPOUND_OP(Rem); 4858 COMPOUND_OP(Add); 4859 COMPOUND_OP(Sub); 4860 COMPOUND_OP(Shl); 4861 COMPOUND_OP(Shr); 4862 COMPOUND_OP(And); 4863 COMPOUND_OP(Xor); 4864 COMPOUND_OP(Or); 4865#undef COMPOUND_OP 4866 4867 case BO_PtrMemD: 4868 case BO_PtrMemI: 4869 case BO_Mul: 4870 case BO_Div: 4871 case BO_Rem: 4872 case BO_Add: 4873 case BO_Sub: 4874 case BO_Shl: 4875 case BO_Shr: 4876 case BO_LT: 4877 case BO_GT: 4878 case BO_LE: 4879 case BO_GE: 4880 case BO_EQ: 4881 case BO_NE: 4882 case BO_Cmp: 4883 case BO_And: 4884 case BO_Xor: 4885 case BO_Or: 4886 case BO_LAnd: 4887 case BO_LOr: 4888 case BO_Assign: 4889 case BO_Comma: 4890 llvm_unreachable("Not valid compound assignment operators")::llvm::llvm_unreachable_internal("Not valid compound assignment operators"
, "/build/llvm-toolchain-snapshot-13~++20210413100635+64c24f493e5f/clang/lib/CodeGen/CGExprScalar.cpp"
, 4890)
; 4891 } 4892 4893 llvm_unreachable("Unhandled compound assignment operator")::llvm::llvm_unreachable_internal("Unhandled compound assignment operator"
, "/build/llvm-toolchain-snapshot-13~++20210413100635+64c24f493e5f/clang/lib/CodeGen/CGExprScalar.cpp"
, 4893)
; 4894} 4895 4896struct GEPOffsetAndOverflow { 4897 // The total (signed) byte offset for the GEP. 4898 llvm::Value *TotalOffset; 4899 // The offset overflow flag - true if the total offset overflows. 4900 llvm::Value *OffsetOverflows; 4901}; 4902 4903/// Evaluate given GEPVal, which is either an inbounds GEP, or a constant, 4904/// and compute the total offset it applies from it's base pointer BasePtr. 4905/// Returns offset in bytes and a boolean flag whether an overflow happened 4906/// during evaluation. 4907static GEPOffsetAndOverflow EmitGEPOffsetInBytes(Value *BasePtr, Value *GEPVal, 4908 llvm::LLVMContext &VMContext, 4909 CodeGenModule &CGM, 4910 CGBuilderTy &Builder) { 4911 const auto &DL = CGM.getDataLayout(); 4912 4913 // The total (signed) byte offset for the GEP. 4914 llvm::Value *TotalOffset = nullptr; 4915 4916 // Was the GEP already reduced to a constant? 4917 if (isa<llvm::Constant>(GEPVal)) { 4918 // Compute the offset by casting both pointers to integers and subtracting: 4919 // GEPVal = BasePtr + ptr(Offset) <--> Offset = int(GEPVal) - int(BasePtr) 4920 Value *BasePtr_int = 4921 Builder.CreatePtrToInt(BasePtr, DL.getIntPtrType(BasePtr->getType())); 4922 Value *GEPVal_int = 4923 Builder.CreatePtrToInt(GEPVal, DL.getIntPtrType(GEPVal->getType())); 4924 TotalOffset = Builder.CreateSub(GEPVal_int, BasePtr_int); 4925 return {TotalOffset, /*OffsetOverflows=*/Builder.getFalse()}; 4926 } 4927 4928 auto *GEP = cast<llvm::GEPOperator>(GEPVal); 4929 assert(GEP->getPointerOperand() == BasePtr &&((GEP->getPointerOperand() == BasePtr && "BasePtr must be the the base of the GEP."
) ? static_cast<void> (0) : __assert_fail ("GEP->getPointerOperand() == BasePtr && \"BasePtr must be the the base of the GEP.\""
, "/build/llvm-toolchain-snapshot-13~++20210413100635+64c24f493e5f/clang/lib/CodeGen/CGExprScalar.cpp"
, 4930, __PRETTY_FUNCTION__))
4930 "BasePtr must be the the base of the GEP.")((GEP->getPointerOperand() == BasePtr && "BasePtr must be the the base of the GEP."
) ? static_cast<void> (0) : __assert_fail ("GEP->getPointerOperand() == BasePtr && \"BasePtr must be the the base of the GEP.\""
, "/build/llvm-toolchain-snapshot-13~++20210413100635+64c24f493e5f/clang/lib/CodeGen/CGExprScalar.cpp"
, 4930, __PRETTY_FUNCTION__))
; 4931 assert(GEP->isInBounds() && "Expected inbounds GEP")((GEP->isInBounds() && "Expected inbounds GEP") ? static_cast
<void> (0) : __assert_fail ("GEP->isInBounds() && \"Expected inbounds GEP\""
, "/build/llvm-toolchain-snapshot-13~++20210413100635+64c24f493e5f/clang/lib/CodeGen/CGExprScalar.cpp"
, 4931, __PRETTY_FUNCTION__))
; 4932 4933 auto *IntPtrTy = DL.getIntPtrType(GEP->getPointerOperandType()); 4934 4935 // Grab references to the signed add/mul overflow intrinsics for intptr_t. 4936 auto *Zero = llvm::ConstantInt::getNullValue(IntPtrTy); 4937 auto *SAddIntrinsic = 4938 CGM.getIntrinsic(llvm::Intrinsic::sadd_with_overflow, IntPtrTy); 4939 auto *SMulIntrinsic = 4940 CGM.getIntrinsic(llvm::Intrinsic::smul_with_overflow, IntPtrTy); 4941 4942 // The offset overflow flag - true if the total offset overflows. 4943 llvm::Value *OffsetOverflows = Builder.getFalse(); 4944 4945 /// Return the result of the given binary operation. 4946 auto eval = [&](BinaryOperator::Opcode Opcode, llvm::Value *LHS, 4947 llvm::Value *RHS) -> llvm::Value * { 4948 assert((Opcode == BO_Add || Opcode == BO_Mul) && "Can't eval binop")(((Opcode == BO_Add || Opcode == BO_Mul) && "Can't eval binop"
) ? static_cast<void> (0) : __assert_fail ("(Opcode == BO_Add || Opcode == BO_Mul) && \"Can't eval binop\""
, "/build/llvm-toolchain-snapshot-13~++20210413100635+64c24f493e5f/clang/lib/CodeGen/CGExprScalar.cpp"
, 4948, __PRETTY_FUNCTION__))
; 4949 4950 // If the operands are constants, return a constant result. 4951 if (auto *LHSCI = dyn_cast<llvm::ConstantInt>(LHS)) { 4952 if (auto *RHSCI = dyn_cast<llvm::ConstantInt>(RHS)) { 4953 llvm::APInt N; 4954 bool HasOverflow = mayHaveIntegerOverflow(LHSCI, RHSCI, Opcode, 4955 /*Signed=*/true, N); 4956 if (HasOverflow) 4957 OffsetOverflows = Builder.getTrue(); 4958 return llvm::ConstantInt::get(VMContext, N); 4959 } 4960 } 4961 4962 // Otherwise, compute the result with checked arithmetic. 4963 auto *ResultAndOverflow = Builder.CreateCall( 4964 (Opcode == BO_Add) ? SAddIntrinsic : SMulIntrinsic, {LHS, RHS}); 4965 OffsetOverflows = Builder.CreateOr( 4966 Builder.CreateExtractValue(ResultAndOverflow, 1), OffsetOverflows); 4967 return Builder.CreateExtractValue(ResultAndOverflow, 0); 4968 }; 4969 4970 // Determine the total byte offset by looking at each GEP operand. 4971 for (auto GTI = llvm::gep_type_begin(GEP), GTE = llvm::gep_type_end(GEP); 4972 GTI != GTE; ++GTI) { 4973 llvm::Value *LocalOffset; 4974 auto *Index = GTI.getOperand(); 4975 // Compute the local offset contributed by this indexing step: 4976 if (auto *STy = GTI.getStructTypeOrNull()) { 4977 // For struct indexing, the local offset is the byte position of the 4978 // specified field. 4979 unsigned FieldNo = cast<llvm::ConstantInt>(Index)->getZExtValue(); 4980 LocalOffset = llvm::ConstantInt::get( 4981 IntPtrTy, DL.getStructLayout(STy)->getElementOffset(FieldNo)); 4982 } else { 4983 // Otherwise this is array-like indexing. The local offset is the index 4984 // multiplied by the element size. 4985 auto *ElementSize = llvm::ConstantInt::get( 4986 IntPtrTy, DL.getTypeAllocSize(GTI.getIndexedType())); 4987 auto *IndexS = Builder.CreateIntCast(Index, IntPtrTy, /*isSigned=*/true); 4988 LocalOffset = eval(BO_Mul, ElementSize, IndexS); 4989 } 4990 4991 // If this is the first offset, set it as the total offset. Otherwise, add 4992 // the local offset into the running total. 4993 if (!TotalOffset || TotalOffset == Zero) 4994 TotalOffset = LocalOffset; 4995 else 4996 TotalOffset = eval(BO_Add, TotalOffset, LocalOffset); 4997 } 4998 4999 return {TotalOffset, OffsetOverflows}; 5000} 5001 5002Value * 5003CodeGenFunction::EmitCheckedInBoundsGEP(Value *Ptr, ArrayRef<Value *> IdxList, 5004 bool SignedIndices, bool IsSubtraction, 5005 SourceLocation Loc, const Twine &Name) { 5006 Value *GEPVal = Builder.CreateInBoundsGEP(Ptr, IdxList, Name); 5007 5008 // If the pointer overflow sanitizer isn't enabled, do nothing. 5009 if (!SanOpts.has(SanitizerKind::PointerOverflow))
26
Calling 'SanitizerSet::has'
29
Returning from 'SanitizerSet::has'
30
Assuming the condition is false
31
Taking false branch
5010 return GEPVal; 5011 5012 llvm::Type *PtrTy = Ptr->getType(); 5013 5014 // Perform nullptr-and-offset check unless the nullptr is defined. 5015 bool PerformNullCheck = !NullPointerIsDefined(
32
Assuming the condition is false
5016 Builder.GetInsertBlock()->getParent(), PtrTy->getPointerAddressSpace()); 5017 // Check for overflows unless the GEP got constant-folded, 5018 // and only in the default address space 5019 bool PerformOverflowCheck = 5020 !isa<llvm::Constant>(GEPVal) && PtrTy->getPointerAddressSpace() == 0;
33
Assuming 'GEPVal' is not a 'Constant'
34
Assuming the condition is true
5021 5022 if (!(PerformNullCheck
34.1
'PerformNullCheck' is false
34.1
'PerformNullCheck' is false
34.1
'PerformNullCheck' is false
34.1
'PerformNullCheck' is false
34.1
'PerformNullCheck' is false
|| PerformOverflowCheck))
35
Taking false branch
5023 return GEPVal; 5024 5025 const auto &DL = CGM.getDataLayout(); 5026 5027 SanitizerScope SanScope(this); 5028 llvm::Type *IntPtrTy = DL.getIntPtrType(PtrTy); 5029 5030 GEPOffsetAndOverflow EvaluatedGEP = 5031 EmitGEPOffsetInBytes(Ptr, GEPVal, getLLVMContext(), CGM, Builder);
36
Null pointer value stored to 'EvaluatedGEP.TotalOffset'
5032 5033 assert((!isa<llvm::Constant>(EvaluatedGEP.TotalOffset) ||(((!isa<llvm::Constant>(EvaluatedGEP.TotalOffset) || EvaluatedGEP
.OffsetOverflows == Builder.getFalse()) && "If the offset got constant-folded, we don't expect that there was an "
"overflow.") ? static_cast<void> (0) : __assert_fail (
"(!isa<llvm::Constant>(EvaluatedGEP.TotalOffset) || EvaluatedGEP.OffsetOverflows == Builder.getFalse()) && \"If the offset got constant-folded, we don't expect that there was an \" \"overflow.\""
, "/build/llvm-toolchain-snapshot-13~++20210413100635+64c24f493e5f/clang/lib/CodeGen/CGExprScalar.cpp"
, 5036, __PRETTY_FUNCTION__))
37
Assuming field 'TotalOffset' is not a 'Constant'
38
'?' condition is true
5034 EvaluatedGEP.OffsetOverflows == Builder.getFalse()) &&(((!isa<llvm::Constant>(EvaluatedGEP.TotalOffset) || EvaluatedGEP
.OffsetOverflows == Builder.getFalse()) && "If the offset got constant-folded, we don't expect that there was an "
"overflow.") ? static_cast<void> (0) : __assert_fail (
"(!isa<llvm::Constant>(EvaluatedGEP.TotalOffset) || EvaluatedGEP.OffsetOverflows == Builder.getFalse()) && \"If the offset got constant-folded, we don't expect that there was an \" \"overflow.\""
, "/build/llvm-toolchain-snapshot-13~++20210413100635+64c24f493e5f/clang/lib/CodeGen/CGExprScalar.cpp"
, 5036, __PRETTY_FUNCTION__))
5035 "If the offset got constant-folded, we don't expect that there was an "(((!isa<llvm::Constant>(EvaluatedGEP.TotalOffset) || EvaluatedGEP
.OffsetOverflows == Builder.getFalse()) && "If the offset got constant-folded, we don't expect that there was an "
"overflow.") ? static_cast<void> (0) : __assert_fail (
"(!isa<llvm::Constant>(EvaluatedGEP.TotalOffset) || EvaluatedGEP.OffsetOverflows == Builder.getFalse()) && \"If the offset got constant-folded, we don't expect that there was an \" \"overflow.\""
, "/build/llvm-toolchain-snapshot-13~++20210413100635+64c24f493e5f/clang/lib/CodeGen/CGExprScalar.cpp"
, 5036, __PRETTY_FUNCTION__))
5036 "overflow.")(((!isa<llvm::Constant>(EvaluatedGEP.TotalOffset) || EvaluatedGEP
.OffsetOverflows == Builder.getFalse()) && "If the offset got constant-folded, we don't expect that there was an "
"overflow.") ? static_cast<void> (0) : __assert_fail (
"(!isa<llvm::Constant>(EvaluatedGEP.TotalOffset) || EvaluatedGEP.OffsetOverflows == Builder.getFalse()) && \"If the offset got constant-folded, we don't expect that there was an \" \"overflow.\""
, "/build/llvm-toolchain-snapshot-13~++20210413100635+64c24f493e5f/clang/lib/CodeGen/CGExprScalar.cpp"
, 5036, __PRETTY_FUNCTION__))
; 5037 5038 auto *Zero = llvm::ConstantInt::getNullValue(IntPtrTy); 5039 5040 // Common case: if the total offset is zero, and we are using C++ semantics, 5041 // where nullptr+0 is defined, don't emit a check. 5042 if (EvaluatedGEP.TotalOffset == Zero && CGM.getLangOpts().CPlusPlus)
39
Assuming 'Zero' is not equal to field 'TotalOffset'
5043 return GEPVal; 5044 5045 // Now that we've computed the total offset, add it to the base pointer (with 5046 // wrapping semantics). 5047 auto *IntPtr = Builder.CreatePtrToInt(Ptr, IntPtrTy); 5048 auto *ComputedGEP = Builder.CreateAdd(IntPtr, EvaluatedGEP.TotalOffset); 5049 5050 llvm::SmallVector<std::pair<llvm::Value *, SanitizerMask>, 2> Checks; 5051 5052 if (PerformNullCheck
39.1
'PerformNullCheck' is false
39.1
'PerformNullCheck' is false
39.1
'PerformNullCheck' is false
39.1
'PerformNullCheck' is false
39.1
'PerformNullCheck' is false
) {
40
Taking false branch
5053 // In C++, if the base pointer evaluates to a null pointer value, 5054 // the only valid pointer this inbounds GEP can produce is also 5055 // a null pointer, so the offset must also evaluate to zero. 5056 // Likewise, if we have non-zero base pointer, we can not get null pointer 5057 // as a result, so the offset can not be -intptr_t(BasePtr). 5058 // In other words, both pointers are either null, or both are non-null, 5059 // or the behaviour is undefined. 5060 // 5061 // C, however, is more strict in this regard, and gives more 5062 // optimization opportunities: in C, additionally, nullptr+0 is undefined. 5063 // So both the input to the 'gep inbounds' AND the output must not be null. 5064 auto *BaseIsNotNullptr = Builder.CreateIsNotNull(Ptr); 5065 auto *ResultIsNotNullptr = Builder.CreateIsNotNull(ComputedGEP); 5066 auto *Valid = 5067 CGM.getLangOpts().CPlusPlus 5068 ? Builder.CreateICmpEQ(BaseIsNotNullptr, ResultIsNotNullptr) 5069 : Builder.CreateAnd(BaseIsNotNullptr, ResultIsNotNullptr); 5070 Checks.emplace_back(Valid, SanitizerKind::PointerOverflow); 5071 } 5072 5073 if (PerformOverflowCheck
40.1
'PerformOverflowCheck' is true
40.1
'PerformOverflowCheck' is true
40.1
'PerformOverflowCheck' is true
40.1
'PerformOverflowCheck' is true
40.1
'PerformOverflowCheck' is true
) {
41
Taking true branch
5074 // The GEP is valid if: 5075 // 1) The total offset doesn't overflow, and 5076 // 2) The sign of the difference between the computed address and the base 5077 // pointer matches the sign of the total offset. 5078 llvm::Value *ValidGEP; 5079 auto *NoOffsetOverflow = Builder.CreateNot(EvaluatedGEP.OffsetOverflows); 5080 if (SignedIndices) {
42
Assuming 'SignedIndices' is true
43
Taking true branch
5081 // GEP is computed as `unsigned base + signed offset`, therefore: 5082 // * If offset was positive, then the computed pointer can not be 5083 // [unsigned] less than the base pointer, unless it overflowed. 5084 // * If offset was negative, then the computed pointer can not be 5085 // [unsigned] greater than the bas pointere, unless it overflowed. 5086 auto *PosOrZeroValid = Builder.CreateICmpUGE(ComputedGEP, IntPtr); 5087 auto *PosOrZeroOffset = 5088 Builder.CreateICmpSGE(EvaluatedGEP.TotalOffset, Zero);
44
Passing null pointer value via 1st parameter 'LHS'
45
Calling 'IRBuilderBase::CreateICmpSGE'
5089 llvm::Value *NegValid = Builder.CreateICmpULT(ComputedGEP, IntPtr); 5090 ValidGEP = 5091 Builder.CreateSelect(PosOrZeroOffset, PosOrZeroValid, NegValid); 5092 } else if (!IsSubtraction) { 5093 // GEP is computed as `unsigned base + unsigned offset`, therefore the 5094 // computed pointer can not be [unsigned] less than base pointer, 5095 // unless there was an overflow. 5096 // Equivalent to `@llvm.uadd.with.overflow(%base, %offset)`. 5097 ValidGEP = Builder.CreateICmpUGE(ComputedGEP, IntPtr); 5098 } else { 5099 // GEP is computed as `unsigned base - unsigned offset`, therefore the 5100 // computed pointer can not be [unsigned] greater than base pointer, 5101 // unless there was an overflow. 5102 // Equivalent to `@llvm.usub.with.overflow(%base, sub(0, %offset))`. 5103 ValidGEP = Builder.CreateICmpULE(ComputedGEP, IntPtr); 5104 } 5105 ValidGEP = Builder.CreateAnd(ValidGEP, NoOffsetOverflow); 5106 Checks.emplace_back(ValidGEP, SanitizerKind::PointerOverflow); 5107 } 5108 5109 assert(!Checks.empty() && "Should have produced some checks.")((!Checks.empty() && "Should have produced some checks."
) ? static_cast<void> (0) : __assert_fail ("!Checks.empty() && \"Should have produced some checks.\""
, "/build/llvm-toolchain-snapshot-13~++20210413100635+64c24f493e5f/clang/lib/CodeGen/CGExprScalar.cpp"
, 5109, __PRETTY_FUNCTION__))
; 5110 5111 llvm::Constant *StaticArgs[] = {EmitCheckSourceLocation(Loc)}; 5112 // Pass the computed GEP to the runtime to avoid emitting poisoned arguments. 5113 llvm::Value *DynamicArgs[] = {IntPtr, ComputedGEP}; 5114 EmitCheck(Checks, SanitizerHandler::PointerOverflow, StaticArgs, DynamicArgs); 5115 5116 return GEPVal; 5117}

/build/llvm-toolchain-snapshot-13~++20210413100635+64c24f493e5f/clang/include/clang/Basic/LangOptions.h

1//===- LangOptions.h - C Language Family Language Options -------*- C++ -*-===//
2//
3// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
4// See https://llvm.org/LICENSE.txt for license information.
5// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
6//
7//===----------------------------------------------------------------------===//
8//
9/// \file
10/// Defines the clang::LangOptions interface.
11//
12//===----------------------------------------------------------------------===//
13
14#ifndef LLVM_CLANG_BASIC_LANGOPTIONS_H
15#define LLVM_CLANG_BASIC_LANGOPTIONS_H
16
17#include "clang/Basic/CommentOptions.h"
18#include "clang/Basic/LLVM.h"
19#include "clang/Basic/LangStandard.h"
20#include "clang/Basic/ObjCRuntime.h"
21#include "clang/Basic/Sanitizers.h"
22#include "clang/Basic/Visibility.h"
23#include "llvm/ADT/FloatingPointMode.h"
24#include "llvm/ADT/StringRef.h"
25#include "llvm/ADT/Triple.h"
26#include "llvm/MC/MCTargetOptions.h"
27#include <string>
28#include <vector>
29
30namespace clang {
31
32/// Bitfields of LangOptions, split out from LangOptions in order to ensure that
33/// this large collection of bitfields is a trivial class type.
34class LangOptionsBase {
35 friend class CompilerInvocation;
36
37public:
38 // Define simple language options (with no accessors).
39#define LANGOPT(Name, Bits, Default, Description) unsigned Name : Bits;
40#define ENUM_LANGOPT(Name, Type, Bits, Default, Description)
41#include "clang/Basic/LangOptions.def"
42
43protected:
44 // Define language options of enumeration type. These are private, and will
45 // have accessors (below).
46#define LANGOPT(Name, Bits, Default, Description)
47#define ENUM_LANGOPT(Name, Type, Bits, Default, Description) \
48 unsigned Name : Bits;
49#include "clang/Basic/LangOptions.def"
50};
51
52/// In the Microsoft ABI, this controls the placement of virtual displacement
53/// members used to implement virtual inheritance.
54enum class MSVtorDispMode { Never, ForVBaseOverride, ForVFTable };
55
56/// Keeps track of the various options that can be
57/// enabled, which controls the dialect of C or C++ that is accepted.
58class LangOptions : public LangOptionsBase {
59public:
60 using Visibility = clang::Visibility;
61 using RoundingMode = llvm::RoundingMode;
62
63 enum GCMode { NonGC, GCOnly, HybridGC };
64 enum StackProtectorMode { SSPOff, SSPOn, SSPStrong, SSPReq };
65
66 // Automatic variables live on the stack, and when trivial they're usually
67 // uninitialized because it's undefined behavior to use them without
68 // initializing them.
69 enum class TrivialAutoVarInitKind { Uninitialized, Zero, Pattern };
70
71 enum SignedOverflowBehaviorTy {
72 // Default C standard behavior.
73 SOB_Undefined,
74
75 // -fwrapv
76 SOB_Defined,
77
78 // -ftrapv
79 SOB_Trapping
80 };
81
82 // FIXME: Unify with TUKind.
83 enum CompilingModuleKind {
84 /// Not compiling a module interface at all.
85 CMK_None,
86
87 /// Compiling a module from a module map.
88 CMK_ModuleMap,
89
90 /// Compiling a module from a list of header files.
91 CMK_HeaderModule,
92
93 /// Compiling a C++ modules TS module interface unit.
94 CMK_ModuleInterface,
95 };
96
97 enum PragmaMSPointersToMembersKind {
98 PPTMK_BestCase,
99 PPTMK_FullGeneralitySingleInheritance,
100 PPTMK_FullGeneralityMultipleInheritance,
101 PPTMK_FullGeneralityVirtualInheritance
102 };
103
104 using MSVtorDispMode = clang::MSVtorDispMode;
105
106 enum DefaultCallingConvention {
107 DCC_None,
108 DCC_CDecl,
109 DCC_FastCall,
110 DCC_StdCall,
111 DCC_VectorCall,
112 DCC_RegCall
113 };
114
115 enum AddrSpaceMapMangling { ASMM_Target, ASMM_On, ASMM_Off };
116
117 // Corresponds to _MSC_VER
118 enum MSVCMajorVersion {
119 MSVC2010 = 1600,
120 MSVC2012 = 1700,
121 MSVC2013 = 1800,
122 MSVC2015 = 1900,
123 MSVC2017 = 1910,
124 MSVC2017_5 = 1912,
125 MSVC2017_7 = 1914,
126 MSVC2019 = 1920,
127 MSVC2019_8 = 1928,
128 };
129
130 enum SYCLMajorVersion {
131 SYCL_None,
132 SYCL_2017,
133 };
134
135 /// Clang versions with different platform ABI conformance.
136 enum class ClangABI {
137 /// Attempt to be ABI-compatible with code generated by Clang 3.8.x
138 /// (SVN r257626). This causes <1 x long long> to be passed in an
139 /// integer register instead of an SSE register on x64_64.
140 Ver3_8,
141
142 /// Attempt to be ABI-compatible with code generated by Clang 4.0.x
143 /// (SVN r291814). This causes move operations to be ignored when
144 /// determining whether a class type can be passed or returned directly.
145 Ver4,
146
147 /// Attempt to be ABI-compatible with code generated by Clang 6.0.x
148 /// (SVN r321711). This causes determination of whether a type is
149 /// standard-layout to ignore collisions between empty base classes
150 /// and between base classes and member subobjects, which affects
151 /// whether we reuse base class tail padding in some ABIs.
152 Ver6,
153
154 /// Attempt to be ABI-compatible with code generated by Clang 7.0.x
155 /// (SVN r338536). This causes alignof (C++) and _Alignof (C11) to be
156 /// compatible with __alignof (i.e., return the preferred alignment)
157 /// rather than returning the required alignment.
158 Ver7,
159
160 /// Attempt to be ABI-compatible with code generated by Clang 9.0.x
161 /// (SVN r351319). This causes vectors of __int128 to be passed in memory
162 /// instead of passing in multiple scalar registers on x86_64 on Linux and
163 /// NetBSD.
164 Ver9,
165
166 /// Attempt to be ABI-compatible with code generated by Clang 11.0.x
167 /// (git 2e10b7a39b93). This causes clang to pass unions with a 256-bit
168 /// vector member on the stack instead of using registers, to not properly
169 /// mangle substitutions for template names in some cases, and to mangle
170 /// declaration template arguments without a cast to the parameter type
171 /// even when that can lead to mangling collisions.
172 Ver11,
173
174 /// Conform to the underlying platform's C and C++ ABIs as closely
175 /// as we can.
176 Latest
177 };
178
179 enum class CoreFoundationABI {
180 /// No interoperability ABI has been specified
181 Unspecified,
182 /// CoreFoundation does not have any language interoperability
183 Standalone,
184 /// Interoperability with the ObjectiveC runtime
185 ObjectiveC,
186 /// Interoperability with the latest known version of the Swift runtime
187 Swift,
188 /// Interoperability with the Swift 5.0 runtime
189 Swift5_0,
190 /// Interoperability with the Swift 4.2 runtime
191 Swift4_2,
192 /// Interoperability with the Swift 4.1 runtime
193 Swift4_1,
194 };
195
196 enum FPModeKind {
197 // Disable the floating point pragma
198 FPM_Off,
199
200 // Enable the floating point pragma
201 FPM_On,
202
203 // Aggressively fuse FP ops (E.g. FMA) disregarding pragmas.
204 FPM_Fast,
205
206 // Aggressively fuse FP ops and honor pragmas.
207 FPM_FastHonorPragmas
208 };
209
210 /// Alias for RoundingMode::NearestTiesToEven.
211 static constexpr unsigned FPR_ToNearest =
212 static_cast<unsigned>(llvm::RoundingMode::NearestTiesToEven);
213
214 /// Possible floating point exception behavior.
215 enum FPExceptionModeKind {
216 /// Assume that floating-point exceptions are masked.
217 FPE_Ignore,
218 /// Transformations do not cause new exceptions but may hide some.
219 FPE_MayTrap,
220 /// Strictly preserve the floating-point exception semantics.
221 FPE_Strict
222 };
223
224 /// Possible exception handling behavior.
225 using ExceptionHandlingKind = llvm::ExceptionHandling;
226
227 enum class LaxVectorConversionKind {
228 /// Permit no implicit vector bitcasts.
229 None,
230 /// Permit vector bitcasts between integer vectors with different numbers
231 /// of elements but the same total bit-width.
232 Integer,
233 /// Permit vector bitcasts between all vectors with the same total
234 /// bit-width.
235 All,
236 };
237
238 enum class SignReturnAddressScopeKind {
239 /// No signing for any function.
240 None,
241 /// Sign the return address of functions that spill LR.
242 NonLeaf,
243 /// Sign the return address of all functions,
244 All
245 };
246
247 enum class SignReturnAddressKeyKind {
248 /// Return address signing uses APIA key.
249 AKey,
250 /// Return address signing uses APIB key.
251 BKey
252 };
253
254 enum class ThreadModelKind {
255 /// POSIX Threads.
256 POSIX,
257 /// Single Threaded Environment.
258 Single
259 };
260
261public:
262 /// The used language standard.
263 LangStandard::Kind LangStd;
264
265 /// Set of enabled sanitizers.
266 SanitizerSet Sanitize;
267
268 /// Paths to files specifying which objects
269 /// (files, functions, variables) should not be instrumented.
270 std::vector<std::string> NoSanitizeFiles;
271
272 /// Paths to the XRay "always instrument" files specifying which
273 /// objects (files, functions, variables) should be imbued with the XRay
274 /// "always instrument" attribute.
275 /// WARNING: This is a deprecated field and will go away in the future.
276 std::vector<std::string> XRayAlwaysInstrumentFiles;
277
278 /// Paths to the XRay "never instrument" files specifying which
279 /// objects (files, functions, variables) should be imbued with the XRay
280 /// "never instrument" attribute.
281 /// WARNING: This is a deprecated field and will go away in the future.
282 std::vector<std::string> XRayNeverInstrumentFiles;
283
284 /// Paths to the XRay attribute list files, specifying which objects
285 /// (files, functions, variables) should be imbued with the appropriate XRay
286 /// attribute(s).
287 std::vector<std::string> XRayAttrListFiles;
288
289 /// Paths to special case list files specifying which entities
290 /// (files, functions) should or should not be instrumented.
291 std::vector<std::string> ProfileListFiles;
292
293 clang::ObjCRuntime ObjCRuntime;
294
295 CoreFoundationABI CFRuntime = CoreFoundationABI::Unspecified;
296
297 std::string ObjCConstantStringClass;
298
299 /// The name of the handler function to be called when -ftrapv is
300 /// specified.
301 ///
302 /// If none is specified, abort (GCC-compatible behaviour).
303 std::string OverflowHandler;
304
305 /// The module currently being compiled as specified by -fmodule-name.
306 std::string ModuleName;
307
308 /// The name of the current module, of which the main source file
309 /// is a part. If CompilingModule is set, we are compiling the interface
310 /// of this module, otherwise we are compiling an implementation file of
311 /// it. This starts as ModuleName in case -fmodule-name is provided and
312 /// changes during compilation to reflect the current module.
313 std::string CurrentModule;
314
315 /// The names of any features to enable in module 'requires' decls
316 /// in addition to the hard-coded list in Module.cpp and the target features.
317 ///
318 /// This list is sorted.
319 std::vector<std::string> ModuleFeatures;
320
321 /// Options for parsing comments.
322 CommentOptions CommentOpts;
323
324 /// A list of all -fno-builtin-* function names (e.g., memset).
325 std::vector<std::string> NoBuiltinFuncs;
326
327 /// Triples of the OpenMP targets that the host code codegen should
328 /// take into account in order to generate accurate offloading descriptors.
329 std::vector<llvm::Triple> OMPTargetTriples;
330
331 /// Name of the IR file that contains the result of the OpenMP target
332 /// host code generation.
333 std::string OMPHostIRFile;
334
335 /// The user provided compilation unit ID, if non-empty. This is used to
336 /// externalize static variables which is needed to support accessing static
337 /// device variables in host code for single source offloading languages
338 /// like CUDA/HIP.
339 std::string CUID;
340
341 /// Indicates whether the front-end is explicitly told that the
342 /// input is a header file (i.e. -x c-header).
343 bool IsHeaderFile = false;
344
345 LangOptions();
346
347 // Define accessors/mutators for language options of enumeration type.
348#define LANGOPT(Name, Bits, Default, Description)
349#define ENUM_LANGOPT(Name, Type, Bits, Default, Description) \
350 Type get##Name() const { return static_cast<Type>(Name); } \
351 void set##Name(Type Value) { Name = static_cast<unsigned>(Value); }
352#include "clang/Basic/LangOptions.def"
353
354 /// Are we compiling a module interface (.cppm or module map)?
355 bool isCompilingModule() const {
356 return getCompilingModule() != CMK_None;
357 }
358
359 /// Do we need to track the owning module for a local declaration?
360 bool trackLocalOwningModule() const {
361 return isCompilingModule() || ModulesLocalVisibility;
362 }
363
364 bool isSignedOverflowDefined() const {
365 return getSignedOverflowBehavior() == SOB_Defined;
21
Assuming the condition is false
22
Returning zero, which participates in a condition later
366 }
367
368 bool isSubscriptPointerArithmetic() const {
369 return ObjCRuntime.isSubscriptPointerArithmetic() &&
370 !ObjCSubscriptingLegacyRuntime;
371 }
372
373 bool isCompatibleWithMSVC(MSVCMajorVersion MajorVersion) const {
374 return MSCompatibilityVersion >= MajorVersion * 100000U;
375 }
376
377 /// Reset all of the options that are not considered when building a
378 /// module.
379 void resetNonModularOptions();
380
381 /// Is this a libc/libm function that is no longer recognized as a
382 /// builtin because a -fno-builtin-* option has been specified?
383 bool isNoBuiltinFunc(StringRef Name) const;
384
385 /// True if any ObjC types may have non-trivial lifetime qualifiers.
386 bool allowsNonTrivialObjCLifetimeQualifiers() const {
387 return ObjCAutoRefCount || ObjCWeak;
388 }
389
390 bool assumeFunctionsAreConvergent() const {
391 return ConvergentFunctions;
392 }
393
394 /// Return the OpenCL C or C++ version as a VersionTuple.
395 VersionTuple getOpenCLVersionTuple() const;
396
397 /// Check if return address signing is enabled.
398 bool hasSignReturnAddress() const {
399 return getSignReturnAddressScope() != SignReturnAddressScopeKind::None;
400 }
401
402 /// Check if return address signing uses AKey.
403 bool isSignReturnAddressWithAKey() const {
404 return getSignReturnAddressKey() == SignReturnAddressKeyKind::AKey;
405 }
406
407 /// Check if leaf functions are also signed.
408 bool isSignReturnAddressScopeAll() const {
409 return getSignReturnAddressScope() == SignReturnAddressScopeKind::All;
410 }
411
412 bool hasSjLjExceptions() const {
413 return getExceptionHandling() == llvm::ExceptionHandling::SjLj;
414 }
415
416 bool hasSEHExceptions() const {
417 return getExceptionHandling() == llvm::ExceptionHandling::WinEH;
418 }
419
420 bool hasDWARFExceptions() const {
421 return getExceptionHandling() == llvm::ExceptionHandling::DwarfCFI;
422 }
423
424 bool hasWasmExceptions() const {
425 return getExceptionHandling() == llvm::ExceptionHandling::Wasm;
426 }
427};
428
429/// Floating point control options
430class FPOptionsOverride;
431class FPOptions {
432public:
433 // We start by defining the layout.
434 using storage_type = uint16_t;
435
436 using RoundingMode = llvm::RoundingMode;
437
438 static constexpr unsigned StorageBitSize = 8 * sizeof(storage_type);
439
440 // Define a fake option named "First" so that we have a PREVIOUS even for the
441 // real first option.
442 static constexpr storage_type FirstShift = 0, FirstWidth = 0;
443#define OPTION(NAME, TYPE, WIDTH, PREVIOUS) \
444 static constexpr storage_type NAME##Shift = \
445 PREVIOUS##Shift + PREVIOUS##Width; \
446 static constexpr storage_type NAME##Width = WIDTH; \
447 static constexpr storage_type NAME##Mask = ((1 << NAME##Width) - 1) \
448 << NAME##Shift;
449#include "clang/Basic/FPOptions.def"
450
451 static constexpr storage_type TotalWidth = 0
452#define OPTION(NAME, TYPE, WIDTH, PREVIOUS) +WIDTH
453#include "clang/Basic/FPOptions.def"
454 ;
455 static_assert(TotalWidth <= StorageBitSize, "Too short type for FPOptions");
456
457private:
458 storage_type Value;
459
460public:
461 FPOptions() : Value(0) {
462 setFPContractMode(LangOptions::FPM_Off);
463 setRoundingMode(static_cast<RoundingMode>(LangOptions::FPR_ToNearest));
464 setFPExceptionMode(LangOptions::FPE_Ignore);
465 }
466 explicit FPOptions(const LangOptions &LO) {
467 Value = 0;
468 // The language fp contract option FPM_FastHonorPragmas has the same effect
469 // as FPM_Fast in frontend. For simplicity, use FPM_Fast uniformly in
470 // frontend.
471 auto LangOptContractMode = LO.getDefaultFPContractMode();
472 if (LangOptContractMode == LangOptions::FPM_FastHonorPragmas)
473 LangOptContractMode = LangOptions::FPM_Fast;
474 setFPContractMode(LangOptContractMode);
475 setRoundingMode(LO.getFPRoundingMode());
476 setFPExceptionMode(LO.getFPExceptionMode());
477 setAllowFPReassociate(LO.AllowFPReassoc);
478 setNoHonorNaNs(LO.NoHonorNaNs);
479 setNoHonorInfs(LO.NoHonorInfs);
480 setNoSignedZero(LO.NoSignedZero);
481 setAllowReciprocal(LO.AllowRecip);
482 setAllowApproxFunc(LO.ApproxFunc);
483 if (getFPContractMode() == LangOptions::FPM_On &&
484 getRoundingMode() == llvm::RoundingMode::Dynamic &&
485 getFPExceptionMode() == LangOptions::FPE_Strict)
486 // If the FP settings are set to the "strict" model, then
487 // FENV access is set to true. (ffp-model=strict)
488 setAllowFEnvAccess(true);
489 else
490 setAllowFEnvAccess(LangOptions::FPM_Off);
491 }
492
493 bool allowFPContractWithinStatement() const {
494 return getFPContractMode() == LangOptions::FPM_On;
495 }
496 void setAllowFPContractWithinStatement() {
497 setFPContractMode(LangOptions::FPM_On);
498 }
499
500 bool allowFPContractAcrossStatement() const {
501 return getFPContractMode() == LangOptions::FPM_Fast;
502 }
503 void setAllowFPContractAcrossStatement() {
504 setFPContractMode(LangOptions::FPM_Fast);
505 }
506
507 bool isFPConstrained() const {
508 return getRoundingMode() != llvm::RoundingMode::NearestTiesToEven ||
509 getFPExceptionMode() != LangOptions::FPE_Ignore ||
510 getAllowFEnvAccess();
511 }
512
513 bool operator==(FPOptions other) const { return Value == other.Value; }
514
515 /// Return the default value of FPOptions that's used when trailing
516 /// storage isn't required.
517 static FPOptions defaultWithoutTrailingStorage(const LangOptions &LO);
518
519 storage_type getAsOpaqueInt() const { return Value; }
520 static FPOptions getFromOpaqueInt(storage_type Value) {
521 FPOptions Opts;
522 Opts.Value = Value;
523 return Opts;
524 }
525
526 // We can define most of the accessors automatically:
527#define OPTION(NAME, TYPE, WIDTH, PREVIOUS) \
528 TYPE get##NAME() const { \
529 return static_cast<TYPE>((Value & NAME##Mask) >> NAME##Shift); \
530 } \
531 void set##NAME(TYPE value) { \
532 Value = (Value & ~NAME##Mask) | (storage_type(value) << NAME##Shift); \
533 }
534#include "clang/Basic/FPOptions.def"
535 LLVM_DUMP_METHOD__attribute__((noinline)) __attribute__((__used__)) void dump();
536};
537
538/// Represents difference between two FPOptions values.
539///
540/// The effect of language constructs changing the set of floating point options
541/// is usually a change of some FP properties while leaving others intact. This
542/// class describes such changes by keeping information about what FP options
543/// are overridden.
544///
545/// The integral set of FP options, described by the class FPOptions, may be
546/// represented as a default FP option set, defined by language standard and
547/// command line options, with the overrides introduced by pragmas.
548///
549/// The is implemented as a value of the new FPOptions plus a mask showing which
550/// fields are actually set in it.
551class FPOptionsOverride {
552 FPOptions Options = FPOptions::getFromOpaqueInt(0);
553 FPOptions::storage_type OverrideMask = 0;
554
555public:
556 using RoundingMode = llvm::RoundingMode;
557
558 /// The type suitable for storing values of FPOptionsOverride. Must be twice
559 /// as wide as bit size of FPOption.
560 using storage_type = uint32_t;
561 static_assert(sizeof(storage_type) >= 2 * sizeof(FPOptions::storage_type),
562 "Too short type for FPOptionsOverride");
563
564 /// Bit mask selecting bits of OverrideMask in serialized representation of
565 /// FPOptionsOverride.
566 static constexpr storage_type OverrideMaskBits =
567 (static_cast<storage_type>(1) << FPOptions::StorageBitSize) - 1;
568
569 FPOptionsOverride() {}
570 FPOptionsOverride(const LangOptions &LO)
571 : Options(LO), OverrideMask(OverrideMaskBits) {}
572 FPOptionsOverride(FPOptions FPO)
573 : Options(FPO), OverrideMask(OverrideMaskBits) {}
574
575 bool requiresTrailingStorage() const { return OverrideMask != 0; }
576
577 void setAllowFPContractWithinStatement() {
578 setFPContractModeOverride(LangOptions::FPM_On);
579 }
580
581 void setAllowFPContractAcrossStatement() {
582 setFPContractModeOverride(LangOptions::FPM_Fast);
583 }
584
585 void setDisallowFPContract() {
586 setFPContractModeOverride(LangOptions::FPM_Off);
587 }
588
589 void setFPPreciseEnabled(bool Value) {
590 setAllowFPReassociateOverride(!Value);
591 setNoHonorNaNsOverride(!Value);
592 setNoHonorInfsOverride(!Value);
593 setNoSignedZeroOverride(!Value);
594 setAllowReciprocalOverride(!Value);
595 setAllowApproxFuncOverride(!Value);
596 if (Value)
597 /* Precise mode implies fp_contract=on and disables ffast-math */
598 setAllowFPContractWithinStatement();
599 else
600 /* Precise mode disabled sets fp_contract=fast and enables ffast-math */
601 setAllowFPContractAcrossStatement();
602 }
603
604 storage_type getAsOpaqueInt() const {
605 return (static_cast<storage_type>(Options.getAsOpaqueInt())
606 << FPOptions::StorageBitSize) |
607 OverrideMask;
608 }
609 static FPOptionsOverride getFromOpaqueInt(storage_type I) {
610 FPOptionsOverride Opts;
611 Opts.OverrideMask = I & OverrideMaskBits;
612 Opts.Options = FPOptions::getFromOpaqueInt(I >> FPOptions::StorageBitSize);
613 return Opts;
614 }
615
616 FPOptions applyOverrides(FPOptions Base) {
617 FPOptions Result =
618 FPOptions::getFromOpaqueInt((Base.getAsOpaqueInt() & ~OverrideMask) |
619 (Options.getAsOpaqueInt() & OverrideMask));
620 return Result;
621 }
622
623 FPOptions applyOverrides(const LangOptions &LO) {
624 return applyOverrides(FPOptions(LO));
625 }
626
627 bool operator==(FPOptionsOverride other) const {
628 return Options == other.Options && OverrideMask == other.OverrideMask;
629 }
630 bool operator!=(FPOptionsOverride other) const { return !(*this == other); }
631
632#define OPTION(NAME, TYPE, WIDTH, PREVIOUS) \
633 bool has##NAME##Override() const { \
634 return OverrideMask & FPOptions::NAME##Mask; \
635 } \
636 TYPE get##NAME##Override() const { \
637 assert(has##NAME##Override())((has##NAME##Override()) ? static_cast<void> (0) : __assert_fail
("has##NAME##Override()", "/build/llvm-toolchain-snapshot-13~++20210413100635+64c24f493e5f/clang/include/clang/Basic/LangOptions.h"
, 637, __PRETTY_FUNCTION__))
; \
638 return Options.get##NAME(); \
639 } \
640 void clear##NAME##Override() { \
641 /* Clear the actual value so that we don't have spurious differences when \
642 * testing equality. */ \
643 Options.set##NAME(TYPE(0)); \
644 OverrideMask &= ~FPOptions::NAME##Mask; \
645 } \
646 void set##NAME##Override(TYPE value) { \
647 Options.set##NAME(value); \
648 OverrideMask |= FPOptions::NAME##Mask; \
649 }
650#include "clang/Basic/FPOptions.def"
651 LLVM_DUMP_METHOD__attribute__((noinline)) __attribute__((__used__)) void dump();
652};
653
654/// Describes the kind of translation unit being processed.
655enum TranslationUnitKind {
656 /// The translation unit is a complete translation unit.
657 TU_Complete,
658
659 /// The translation unit is a prefix to a translation unit, and is
660 /// not complete.
661 TU_Prefix,
662
663 /// The translation unit is a module.
664 TU_Module
665};
666
667} // namespace clang
668
669#endif // LLVM_CLANG_BASIC_LANGOPTIONS_H

/build/llvm-toolchain-snapshot-13~++20210413100635+64c24f493e5f/clang/include/clang/Basic/Sanitizers.h

1//===- Sanitizers.h - C Language Family Language Options --------*- C++ -*-===//
2//
3// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
4// See https://llvm.org/LICENSE.txt for license information.
5// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
6//
7//===----------------------------------------------------------------------===//
8//
9/// \file
10/// Defines the clang::SanitizerKind enum.
11//
12//===----------------------------------------------------------------------===//
13
14#ifndef LLVM_CLANG_BASIC_SANITIZERS_H
15#define LLVM_CLANG_BASIC_SANITIZERS_H
16
17#include "clang/Basic/LLVM.h"
18#include "llvm/ADT/StringRef.h"
19#include "llvm/Support/MathExtras.h"
20#include "llvm/Transforms/Instrumentation/AddressSanitizerOptions.h"
21#include <cassert>
22#include <cstdint>
23
24namespace llvm {
25class hash_code;
26}
27
28namespace clang {
29
30class SanitizerMask {
31 // NOTE: this class assumes kNumElem == 2 in most of the constexpr functions,
32 // in order to work within the C++11 constexpr function constraints. If you
33 // change kNumElem, you'll need to update those member functions as well.
34
35 /// Number of array elements.
36 static constexpr unsigned kNumElem = 2;
37 /// Mask value initialized to 0.
38 uint64_t maskLoToHigh[kNumElem]{};
39 /// Number of bits in a mask.
40 static constexpr unsigned kNumBits = sizeof(decltype(maskLoToHigh)) * 8;
41 /// Number of bits in a mask element.
42 static constexpr unsigned kNumBitElem = sizeof(decltype(maskLoToHigh[0])) * 8;
43
44 constexpr SanitizerMask(uint64_t mask1, uint64_t mask2)
45 : maskLoToHigh{mask1, mask2} {}
46
47public:
48 SanitizerMask() = default;
49
50 static constexpr bool checkBitPos(const unsigned Pos) {
51 return Pos < kNumBits;
52 }
53
54 /// Create a mask with a bit enabled at position Pos.
55 static constexpr SanitizerMask bitPosToMask(const unsigned Pos) {
56 uint64_t mask1 = (Pos < kNumBitElem) ? 1ULL << (Pos % kNumBitElem) : 0;
57 uint64_t mask2 = (Pos >= kNumBitElem && Pos < (kNumBitElem * 2))
58 ? 1ULL << (Pos % kNumBitElem)
59 : 0;
60 return SanitizerMask(mask1, mask2);
61 }
62
63 unsigned countPopulation() const {
64 unsigned total = 0;
65 for (const auto &Val : maskLoToHigh)
66 total += llvm::countPopulation(Val);
67 return total;
68 }
69
70 void flipAllBits() {
71 for (auto &Val : maskLoToHigh)
72 Val = ~Val;
73 }
74
75 bool isPowerOf2() const {
76 return countPopulation() == 1;
77 }
78
79 llvm::hash_code hash_value() const;
80
81 constexpr explicit operator bool() const {
82 return maskLoToHigh[0] || maskLoToHigh[1];
83 }
84
85 constexpr bool operator==(const SanitizerMask &V) const {
86 return maskLoToHigh[0] == V.maskLoToHigh[0] &&
87 maskLoToHigh[1] == V.maskLoToHigh[1];
88 }
89
90 SanitizerMask &operator&=(const SanitizerMask &RHS) {
91 for (unsigned k = 0; k < kNumElem; k++)
92 maskLoToHigh[k] &= RHS.maskLoToHigh[k];
93 return *this;
94 }
95
96 SanitizerMask &operator|=(const SanitizerMask &RHS) {
97 for (unsigned k = 0; k < kNumElem; k++)
98 maskLoToHigh[k] |= RHS.maskLoToHigh[k];
99 return *this;
100 }
101
102 constexpr bool operator!() const { return !bool(*this); }
103
104 constexpr bool operator!=(const SanitizerMask &RHS) const {
105 return !((*this) == RHS);
106 }
107
108 friend constexpr inline SanitizerMask operator~(SanitizerMask v) {
109 return SanitizerMask(~v.maskLoToHigh[0], ~v.maskLoToHigh[1]);
110 }
111
112 friend constexpr inline SanitizerMask operator&(SanitizerMask a,
113 const SanitizerMask &b) {
114 return SanitizerMask(a.maskLoToHigh[0] & b.maskLoToHigh[0],
115 a.maskLoToHigh[1] & b.maskLoToHigh[1]);
116 }
117
118 friend constexpr inline SanitizerMask operator|(SanitizerMask a,
119 const SanitizerMask &b) {
120 return SanitizerMask(a.maskLoToHigh[0] | b.maskLoToHigh[0],
121 a.maskLoToHigh[1] | b.maskLoToHigh[1]);
122 }
123};
124
125// Declaring in clang namespace so that it can be found by ADL.
126llvm::hash_code hash_value(const clang::SanitizerMask &Arg);
127
128// Define the set of sanitizer kinds, as well as the set of sanitizers each
129// sanitizer group expands into.
130struct SanitizerKind {
131 // Assign ordinals to possible values of -fsanitize= flag, which we will use
132 // as bit positions.
133 enum SanitizerOrdinal : uint64_t {
134#define SANITIZER(NAME, ID) SO_##ID,
135#define SANITIZER_GROUP(NAME, ID, ALIAS) SO_##ID##Group,
136#include "clang/Basic/Sanitizers.def"
137 SO_Count
138 };
139
140#define SANITIZER(NAME, ID) \
141 static constexpr SanitizerMask ID = SanitizerMask::bitPosToMask(SO_##ID); \
142 static_assert(SanitizerMask::checkBitPos(SO_##ID), "Bit position too big.");
143#define SANITIZER_GROUP(NAME, ID, ALIAS) \
144 static constexpr SanitizerMask ID = SanitizerMask(ALIAS); \
145 static constexpr SanitizerMask ID##Group = \
146 SanitizerMask::bitPosToMask(SO_##ID##Group); \
147 static_assert(SanitizerMask::checkBitPos(SO_##ID##Group), \
148 "Bit position too big.");
149#include "clang/Basic/Sanitizers.def"
150}; // SanitizerKind
151
152struct SanitizerSet {
153 /// Check if a certain (single) sanitizer is enabled.
154 bool has(SanitizerMask K) const {
155 assert(K.isPowerOf2() && "Has to be a single sanitizer.")((K.isPowerOf2() && "Has to be a single sanitizer.") ?
static_cast<void> (0) : __assert_fail ("K.isPowerOf2() && \"Has to be a single sanitizer.\""
, "/build/llvm-toolchain-snapshot-13~++20210413100635+64c24f493e5f/clang/include/clang/Basic/Sanitizers.h"
, 155, __PRETTY_FUNCTION__))
;
27
'?' condition is true
156 return static_cast<bool>(Mask & K);
28
Returning value, which participates in a condition later
157 }
158
159 /// Check if one or more sanitizers are enabled.
160 bool hasOneOf(SanitizerMask K) const { return static_cast<bool>(Mask & K); }
161
162 /// Enable or disable a certain (single) sanitizer.
163 void set(SanitizerMask K, bool Value) {
164 assert(K.isPowerOf2() && "Has to be a single sanitizer.")((K.isPowerOf2() && "Has to be a single sanitizer.") ?
static_cast<void> (0) : __assert_fail ("K.isPowerOf2() && \"Has to be a single sanitizer.\""
, "/build/llvm-toolchain-snapshot-13~++20210413100635+64c24f493e5f/clang/include/clang/Basic/Sanitizers.h"
, 164, __PRETTY_FUNCTION__))
;
165 Mask = Value ? (Mask | K) : (Mask & ~K);
166 }
167
168 /// Disable the sanitizers specified in \p K.
169 void clear(SanitizerMask K = SanitizerKind::All) { Mask &= ~K; }
170
171 /// Returns true if no sanitizers are enabled.
172 bool empty() const { return !Mask; }
173
174 /// Bitmask of enabled sanitizers.
175 SanitizerMask Mask;
176};
177
178/// Parse a single value from a -fsanitize= or -fno-sanitize= value list.
179/// Returns a non-zero SanitizerMask, or \c 0 if \p Value is not known.
180SanitizerMask parseSanitizerValue(StringRef Value, bool AllowGroups);
181
182/// Serialize a SanitizerSet into values for -fsanitize= or -fno-sanitize=.
183void serializeSanitizerSet(SanitizerSet Set,
184 SmallVectorImpl<StringRef> &Values);
185
186/// For each sanitizer group bit set in \p Kinds, set the bits for sanitizers
187/// this group enables.
188SanitizerMask expandSanitizerGroups(SanitizerMask Kinds);
189
190/// Return the sanitizers which do not affect preprocessing.
191inline SanitizerMask getPPTransparentSanitizers() {
192 return SanitizerKind::CFI | SanitizerKind::Integer |
193 SanitizerKind::ImplicitConversion | SanitizerKind::Nullability |
194 SanitizerKind::Undefined | SanitizerKind::FloatDivideByZero;
195}
196
197StringRef AsanDtorKindToString(llvm::AsanDtorKind kind);
198
199llvm::AsanDtorKind AsanDtorKindFromString(StringRef kind);
200
201} // namespace clang
202
203#endif // LLVM_CLANG_BASIC_SANITIZERS_H

/build/llvm-toolchain-snapshot-13~++20210413100635+64c24f493e5f/llvm/include/llvm/IR/IRBuilder.h

1//===- llvm/IRBuilder.h - Builder for LLVM Instructions ---------*- C++ -*-===//
2//
3// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
4// See https://llvm.org/LICENSE.txt for license information.
5// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
6//
7//===----------------------------------------------------------------------===//
8//
9// This file defines the IRBuilder class, which is used as a convenient way
10// to create LLVM instructions with a consistent and simplified interface.
11//
12//===----------------------------------------------------------------------===//
13
14#ifndef LLVM_IR_IRBUILDER_H
15#define LLVM_IR_IRBUILDER_H
16
17#include "llvm-c/Types.h"
18#include "llvm/ADT/ArrayRef.h"
19#include "llvm/ADT/None.h"
20#include "llvm/ADT/STLExtras.h"
21#include "llvm/ADT/StringRef.h"
22#include "llvm/ADT/Twine.h"
23#include "llvm/IR/BasicBlock.h"
24#include "llvm/IR/Constant.h"
25#include "llvm/IR/ConstantFolder.h"
26#include "llvm/IR/Constants.h"
27#include "llvm/IR/DataLayout.h"
28#include "llvm/IR/DebugInfoMetadata.h"
29#include "llvm/IR/DebugLoc.h"
30#include "llvm/IR/DerivedTypes.h"
31#include "llvm/IR/Function.h"
32#include "llvm/IR/GlobalVariable.h"
33#include "llvm/IR/InstrTypes.h"
34#include "llvm/IR/Instruction.h"
35#include "llvm/IR/Instructions.h"
36#include "llvm/IR/IntrinsicInst.h"
37#include "llvm/IR/LLVMContext.h"
38#include "llvm/IR/Module.h"
39#include "llvm/IR/Operator.h"
40#include "llvm/IR/Type.h"
41#include "llvm/IR/Value.h"
42#include "llvm/IR/ValueHandle.h"
43#include "llvm/Support/AtomicOrdering.h"
44#include "llvm/Support/CBindingWrapping.h"
45#include "llvm/Support/Casting.h"
46#include <cassert>
47#include <cstddef>
48#include <cstdint>
49#include <functional>
50#include <utility>
51
52namespace llvm {
53
54class APInt;
55class MDNode;
56class Use;
57
58/// This provides the default implementation of the IRBuilder
59/// 'InsertHelper' method that is called whenever an instruction is created by
60/// IRBuilder and needs to be inserted.
61///
62/// By default, this inserts the instruction at the insertion point.
63class IRBuilderDefaultInserter {
64public:
65 virtual ~IRBuilderDefaultInserter();
66
67 virtual void InsertHelper(Instruction *I, const Twine &Name,
68 BasicBlock *BB,
69 BasicBlock::iterator InsertPt) const {
70 if (BB) BB->getInstList().insert(InsertPt, I);
71 I->setName(Name);
72 }
73};
74
75/// Provides an 'InsertHelper' that calls a user-provided callback after
76/// performing the default insertion.
77class IRBuilderCallbackInserter : public IRBuilderDefaultInserter {
78 std::function<void(Instruction *)> Callback;
79
80public:
81 virtual ~IRBuilderCallbackInserter();
82
83 IRBuilderCallbackInserter(std::function<void(Instruction *)> Callback)
84 : Callback(std::move(Callback)) {}
85
86 void InsertHelper(Instruction *I, const Twine &Name,
87 BasicBlock *BB,
88 BasicBlock::iterator InsertPt) const override {
89 IRBuilderDefaultInserter::InsertHelper(I, Name, BB, InsertPt);
90 Callback(I);
91 }
92};
93
94/// Common base class shared among various IRBuilders.
95class IRBuilderBase {
96 /// Pairs of (metadata kind, MDNode *) that should be added to all newly
97 /// created instructions, like !dbg metadata.
98 SmallVector<std::pair<unsigned, MDNode *>, 2> MetadataToCopy;
99
100 /// Add or update the an entry (Kind, MD) to MetadataToCopy, if \p MD is not
101 /// null. If \p MD is null, remove the entry with \p Kind.
102 void AddOrRemoveMetadataToCopy(unsigned Kind, MDNode *MD) {
103 if (!MD) {
104 erase_if(MetadataToCopy, [Kind](const std::pair<unsigned, MDNode *> &KV) {
105 return KV.first == Kind;
106 });
107 return;
108 }
109
110 for (auto &KV : MetadataToCopy)
111 if (KV.first == Kind) {
112 KV.second = MD;
113 return;
114 }
115
116 MetadataToCopy.emplace_back(Kind, MD);
117 }
118
119protected:
120 BasicBlock *BB;
121 BasicBlock::iterator InsertPt;
122 LLVMContext &Context;
123 const IRBuilderFolder &Folder;
124 const IRBuilderDefaultInserter &Inserter;
125
126 MDNode *DefaultFPMathTag;
127 FastMathFlags FMF;
128
129 bool IsFPConstrained;
130 fp::ExceptionBehavior DefaultConstrainedExcept;
131 RoundingMode DefaultConstrainedRounding;
132
133 ArrayRef<OperandBundleDef> DefaultOperandBundles;
134
135public:
136 IRBuilderBase(LLVMContext &context, const IRBuilderFolder &Folder,
137 const IRBuilderDefaultInserter &Inserter,
138 MDNode *FPMathTag, ArrayRef<OperandBundleDef> OpBundles)
139 : Context(context), Folder(Folder), Inserter(Inserter),
140 DefaultFPMathTag(FPMathTag), IsFPConstrained(false),
141 DefaultConstrainedExcept(fp::ebStrict),
142 DefaultConstrainedRounding(RoundingMode::Dynamic),
143 DefaultOperandBundles(OpBundles) {
144 ClearInsertionPoint();
145 }
146
147 /// Insert and return the specified instruction.
148 template<typename InstTy>
149 InstTy *Insert(InstTy *I, const Twine &Name = "") const {
150 Inserter.InsertHelper(I, Name, BB, InsertPt);
151 AddMetadataToInst(I);
152 return I;
153 }
154
155 /// No-op overload to handle constants.
156 Constant *Insert(Constant *C, const Twine& = "") const {
157 return C;
158 }
159
160 Value *Insert(Value *V, const Twine &Name = "") const {
161 if (Instruction *I = dyn_cast<Instruction>(V))
162 return Insert(I, Name);
163 assert(isa<Constant>(V))((isa<Constant>(V)) ? static_cast<void> (0) : __assert_fail
("isa<Constant>(V)", "/build/llvm-toolchain-snapshot-13~++20210413100635+64c24f493e5f/llvm/include/llvm/IR/IRBuilder.h"
, 163, __PRETTY_FUNCTION__))
;
164 return V;
165 }
166
167 //===--------------------------------------------------------------------===//
168 // Builder configuration methods
169 //===--------------------------------------------------------------------===//
170
171 /// Clear the insertion point: created instructions will not be
172 /// inserted into a block.
173 void ClearInsertionPoint() {
174 BB = nullptr;
175 InsertPt = BasicBlock::iterator();
176 }
177
178 BasicBlock *GetInsertBlock() const { return BB; }
179 BasicBlock::iterator GetInsertPoint() const { return InsertPt; }
180 LLVMContext &getContext() const { return Context; }
181
182 /// This specifies that created instructions should be appended to the
183 /// end of the specified block.
184 void SetInsertPoint(BasicBlock *TheBB) {
185 BB = TheBB;
186 InsertPt = BB->end();
187 }
188
189 /// This specifies that created instructions should be inserted before
190 /// the specified instruction.
191 void SetInsertPoint(Instruction *I) {
192 BB = I->getParent();
193 InsertPt = I->getIterator();
194 assert(InsertPt != BB->end() && "Can't read debug loc from end()")((InsertPt != BB->end() && "Can't read debug loc from end()"
) ? static_cast<void> (0) : __assert_fail ("InsertPt != BB->end() && \"Can't read debug loc from end()\""
, "/build/llvm-toolchain-snapshot-13~++20210413100635+64c24f493e5f/llvm/include/llvm/IR/IRBuilder.h"
, 194, __PRETTY_FUNCTION__))
;
195 SetCurrentDebugLocation(I->getDebugLoc());
196 }
197
198 /// This specifies that created instructions should be inserted at the
199 /// specified point.
200 void SetInsertPoint(BasicBlock *TheBB, BasicBlock::iterator IP) {
201 BB = TheBB;
202 InsertPt = IP;
203 if (IP != TheBB->end())
204 SetCurrentDebugLocation(IP->getDebugLoc());
205 }
206
207 /// Set location information used by debugging information.
208 void SetCurrentDebugLocation(DebugLoc L) {
209 AddOrRemoveMetadataToCopy(LLVMContext::MD_dbg, L.getAsMDNode());
210 }
211
212 /// Collect metadata with IDs \p MetadataKinds from \p Src which should be
213 /// added to all created instructions. Entries present in MedataDataToCopy but
214 /// not on \p Src will be dropped from MetadataToCopy.
215 void CollectMetadataToCopy(Instruction *Src,
216 ArrayRef<unsigned> MetadataKinds) {
217 for (unsigned K : MetadataKinds)
218 AddOrRemoveMetadataToCopy(K, Src->getMetadata(K));
219 }
220
221 /// Get location information used by debugging information.
222 DebugLoc getCurrentDebugLocation() const {
223 for (auto &KV : MetadataToCopy)
224 if (KV.first == LLVMContext::MD_dbg)
225 return {cast<DILocation>(KV.second)};
226
227 return {};
228 }
229
230 /// If this builder has a current debug location, set it on the
231 /// specified instruction.
232 void SetInstDebugLocation(Instruction *I) const {
233 for (const auto &KV : MetadataToCopy)
234 if (KV.first == LLVMContext::MD_dbg) {
235 I->setDebugLoc(DebugLoc(KV.second));
236 return;
237 }
238 }
239
240 /// Add all entries in MetadataToCopy to \p I.
241 void AddMetadataToInst(Instruction *I) const {
242 for (auto &KV : MetadataToCopy)
243 I->setMetadata(KV.first, KV.second);
244 }
245
246 /// Get the return type of the current function that we're emitting
247 /// into.
248 Type *getCurrentFunctionReturnType() const;
249
250 /// InsertPoint - A saved insertion point.
251 class InsertPoint {
252 BasicBlock *Block = nullptr;
253 BasicBlock::iterator Point;
254
255 public:
256 /// Creates a new insertion point which doesn't point to anything.
257 InsertPoint() = default;
258
259 /// Creates a new insertion point at the given location.
260 InsertPoint(BasicBlock *InsertBlock, BasicBlock::iterator InsertPoint)
261 : Block(InsertBlock), Point(InsertPoint) {}
262
263 /// Returns true if this insert point is set.
264 bool isSet() const { return (Block != nullptr); }
265
266 BasicBlock *getBlock() const { return Block; }
267 BasicBlock::iterator getPoint() const { return Point; }
268 };
269
270 /// Returns the current insert point.
271 InsertPoint saveIP() const {
272 return InsertPoint(GetInsertBlock(), GetInsertPoint());
273 }
274
275 /// Returns the current insert point, clearing it in the process.
276 InsertPoint saveAndClearIP() {
277 InsertPoint IP(GetInsertBlock(), GetInsertPoint());
278 ClearInsertionPoint();
279 return IP;
280 }
281
282 /// Sets the current insert point to a previously-saved location.
283 void restoreIP(InsertPoint IP) {
284 if (IP.isSet())
285 SetInsertPoint(IP.getBlock(), IP.getPoint());
286 else
287 ClearInsertionPoint();
288 }
289
290 /// Get the floating point math metadata being used.
291 MDNode *getDefaultFPMathTag() const { return DefaultFPMathTag; }
292
293 /// Get the flags to be applied to created floating point ops
294 FastMathFlags getFastMathFlags() const { return FMF; }
295
296 FastMathFlags &getFastMathFlags() { return FMF; }
297
298 /// Clear the fast-math flags.
299 void clearFastMathFlags() { FMF.clear(); }
300
301 /// Set the floating point math metadata to be used.
302 void setDefaultFPMathTag(MDNode *FPMathTag) { DefaultFPMathTag = FPMathTag; }
303
304 /// Set the fast-math flags to be used with generated fp-math operators
305 void setFastMathFlags(FastMathFlags NewFMF) { FMF = NewFMF; }
306
307 /// Enable/Disable use of constrained floating point math. When
308 /// enabled the CreateF<op>() calls instead create constrained
309 /// floating point intrinsic calls. Fast math flags are unaffected
310 /// by this setting.
311 void setIsFPConstrained(bool IsCon) { IsFPConstrained = IsCon; }
312
313 /// Query for the use of constrained floating point math
314 bool getIsFPConstrained() { return IsFPConstrained; }
315
316 /// Set the exception handling to be used with constrained floating point
317 void setDefaultConstrainedExcept(fp::ExceptionBehavior NewExcept) {
318#ifndef NDEBUG
319 Optional<StringRef> ExceptStr = ExceptionBehaviorToStr(NewExcept);
320 assert(ExceptStr.hasValue() && "Garbage strict exception behavior!")((ExceptStr.hasValue() && "Garbage strict exception behavior!"
) ? static_cast<void> (0) : __assert_fail ("ExceptStr.hasValue() && \"Garbage strict exception behavior!\""
, "/build/llvm-toolchain-snapshot-13~++20210413100635+64c24f493e5f/llvm/include/llvm/IR/IRBuilder.h"
, 320, __PRETTY_FUNCTION__))
;
321#endif
322 DefaultConstrainedExcept = NewExcept;
323 }
324
325 /// Set the rounding mode handling to be used with constrained floating point
326 void setDefaultConstrainedRounding(RoundingMode NewRounding) {
327#ifndef NDEBUG
328 Optional<StringRef> RoundingStr = RoundingModeToStr(NewRounding);
329 assert(RoundingStr.hasValue() && "Garbage strict rounding mode!")((RoundingStr.hasValue() && "Garbage strict rounding mode!"
) ? static_cast<void> (0) : __assert_fail ("RoundingStr.hasValue() && \"Garbage strict rounding mode!\""
, "/build/llvm-toolchain-snapshot-13~++20210413100635+64c24f493e5f/llvm/include/llvm/IR/IRBuilder.h"
, 329, __PRETTY_FUNCTION__))
;
330#endif
331 DefaultConstrainedRounding = NewRounding;
332 }
333
334 /// Get the exception handling used with constrained floating point
335 fp::ExceptionBehavior getDefaultConstrainedExcept() {
336 return DefaultConstrainedExcept;
337 }
338
339 /// Get the rounding mode handling used with constrained floating point
340 RoundingMode getDefaultConstrainedRounding() {
341 return DefaultConstrainedRounding;
342 }
343
344 void setConstrainedFPFunctionAttr() {
345 assert(BB && "Must have a basic block to set any function attributes!")((BB && "Must have a basic block to set any function attributes!"
) ? static_cast<void> (0) : __assert_fail ("BB && \"Must have a basic block to set any function attributes!\""
, "/build/llvm-toolchain-snapshot-13~++20210413100635+64c24f493e5f/llvm/include/llvm/IR/IRBuilder.h"
, 345, __PRETTY_FUNCTION__))
;
346
347 Function *F = BB->getParent();
348 if (!F->hasFnAttribute(Attribute::StrictFP)) {
349 F->addFnAttr(Attribute::StrictFP);
350 }
351 }
352
353 void setConstrainedFPCallAttr(CallBase *I) {
354 I->addAttribute(AttributeList::FunctionIndex, Attribute::StrictFP);
355 }
356
357 void setDefaultOperandBundles(ArrayRef<OperandBundleDef> OpBundles) {
358 DefaultOperandBundles = OpBundles;
359 }
360
361 //===--------------------------------------------------------------------===//
362 // RAII helpers.
363 //===--------------------------------------------------------------------===//
364
365 // RAII object that stores the current insertion point and restores it
366 // when the object is destroyed. This includes the debug location.
367 class InsertPointGuard {
368 IRBuilderBase &Builder;
369 AssertingVH<BasicBlock> Block;
370 BasicBlock::iterator Point;
371 DebugLoc DbgLoc;
372
373 public:
374 InsertPointGuard(IRBuilderBase &B)
375 : Builder(B), Block(B.GetInsertBlock()), Point(B.GetInsertPoint()),
376 DbgLoc(B.getCurrentDebugLocation()) {}
377
378 InsertPointGuard(const InsertPointGuard &) = delete;
379 InsertPointGuard &operator=(const InsertPointGuard &) = delete;
380
381 ~InsertPointGuard() {
382 Builder.restoreIP(InsertPoint(Block, Point));
383 Builder.SetCurrentDebugLocation(DbgLoc);
384 }
385 };
386
387 // RAII object that stores the current fast math settings and restores
388 // them when the object is destroyed.
389 class FastMathFlagGuard {
390 IRBuilderBase &Builder;
391 FastMathFlags FMF;
392 MDNode *FPMathTag;
393 bool IsFPConstrained;
394 fp::ExceptionBehavior DefaultConstrainedExcept;
395 RoundingMode DefaultConstrainedRounding;
396
397 public:
398 FastMathFlagGuard(IRBuilderBase &B)
399 : Builder(B), FMF(B.FMF), FPMathTag(B.DefaultFPMathTag),
400 IsFPConstrained(B.IsFPConstrained),
401 DefaultConstrainedExcept(B.DefaultConstrainedExcept),
402 DefaultConstrainedRounding(B.DefaultConstrainedRounding) {}
403
404 FastMathFlagGuard(const FastMathFlagGuard &) = delete;
405 FastMathFlagGuard &operator=(const FastMathFlagGuard &) = delete;
406
407 ~FastMathFlagGuard() {
408 Builder.FMF = FMF;
409 Builder.DefaultFPMathTag = FPMathTag;
410 Builder.IsFPConstrained = IsFPConstrained;
411 Builder.DefaultConstrainedExcept = DefaultConstrainedExcept;
412 Builder.DefaultConstrainedRounding = DefaultConstrainedRounding;
413 }
414 };
415
416 // RAII object that stores the current default operand bundles and restores
417 // them when the object is destroyed.
418 class OperandBundlesGuard {
419 IRBuilderBase &Builder;
420 ArrayRef<OperandBundleDef> DefaultOperandBundles;
421
422 public:
423 OperandBundlesGuard(IRBuilderBase &B)
424 : Builder(B), DefaultOperandBundles(B.DefaultOperandBundles) {}
425
426 OperandBundlesGuard(const OperandBundlesGuard &) = delete;
427 OperandBundlesGuard &operator=(const OperandBundlesGuard &) = delete;
428
429 ~OperandBundlesGuard() {
430 Builder.DefaultOperandBundles = DefaultOperandBundles;
431 }
432 };
433
434
435 //===--------------------------------------------------------------------===//
436 // Miscellaneous creation methods.
437 //===--------------------------------------------------------------------===//
438
439 /// Make a new global variable with initializer type i8*
440 ///
441 /// Make a new global variable with an initializer that has array of i8 type
442 /// filled in with the null terminated string value specified. The new global
443 /// variable will be marked mergable with any others of the same contents. If
444 /// Name is specified, it is the name of the global variable created.
445 ///
446 /// If no module is given via \p M, it is take from the insertion point basic
447 /// block.
448 GlobalVariable *CreateGlobalString(StringRef Str, const Twine &Name = "",
449 unsigned AddressSpace = 0,
450 Module *M = nullptr);
451
452 /// Get a constant value representing either true or false.
453 ConstantInt *getInt1(bool V) {
454 return ConstantInt::get(getInt1Ty(), V);
455 }
456
457 /// Get the constant value for i1 true.
458 ConstantInt *getTrue() {
459 return ConstantInt::getTrue(Context);
460 }
461
462 /// Get the constant value for i1 false.
463 ConstantInt *getFalse() {
464 return ConstantInt::getFalse(Context);
465 }
466
467 /// Get a constant 8-bit value.
468 ConstantInt *getInt8(uint8_t C) {
469 return ConstantInt::get(getInt8Ty(), C);
470 }
471
472 /// Get a constant 16-bit value.
473 ConstantInt *getInt16(uint16_t C) {
474 return ConstantInt::get(getInt16Ty(), C);
475 }
476
477 /// Get a constant 32-bit value.
478 ConstantInt *getInt32(uint32_t C) {
479 return ConstantInt::get(getInt32Ty(), C);
480 }
481
482 /// Get a constant 64-bit value.
483 ConstantInt *getInt64(uint64_t C) {
484 return ConstantInt::get(getInt64Ty(), C);
485 }
486
487 /// Get a constant N-bit value, zero extended or truncated from
488 /// a 64-bit value.
489 ConstantInt *getIntN(unsigned N, uint64_t C) {
490 return ConstantInt::get(getIntNTy(N), C);
491 }
492
493 /// Get a constant integer value.
494 ConstantInt *getInt(const APInt &AI) {
495 return ConstantInt::get(Context, AI);
496 }
497
498 //===--------------------------------------------------------------------===//
499 // Type creation methods
500 //===--------------------------------------------------------------------===//
501
502 /// Fetch the type representing a single bit
503 IntegerType *getInt1Ty() {
504 return Type::getInt1Ty(Context);
505 }
506
507 /// Fetch the type representing an 8-bit integer.
508 IntegerType *getInt8Ty() {
509 return Type::getInt8Ty(Context);
510 }
511
512 /// Fetch the type representing a 16-bit integer.
513 IntegerType *getInt16Ty() {
514 return Type::getInt16Ty(Context);
515 }
516
517 /// Fetch the type representing a 32-bit integer.
518 IntegerType *getInt32Ty() {
519 return Type::getInt32Ty(Context);
520 }
521
522 /// Fetch the type representing a 64-bit integer.
523 IntegerType *getInt64Ty() {
524 return Type::getInt64Ty(Context);
525 }
526
527 /// Fetch the type representing a 128-bit integer.
528 IntegerType *getInt128Ty() { return Type::getInt128Ty(Context); }
529
530 /// Fetch the type representing an N-bit integer.
531 IntegerType *getIntNTy(unsigned N) {
532 return Type::getIntNTy(Context, N);
533 }
534
535 /// Fetch the type representing a 16-bit floating point value.
536 Type *getHalfTy() {
537 return Type::getHalfTy(Context);
538 }
539
540 /// Fetch the type representing a 16-bit brain floating point value.
541 Type *getBFloatTy() {
542 return Type::getBFloatTy(Context);
543 }
544
545 /// Fetch the type representing a 32-bit floating point value.
546 Type *getFloatTy() {
547 return Type::getFloatTy(Context);
548 }
549
550 /// Fetch the type representing a 64-bit floating point value.
551 Type *getDoubleTy() {
552 return Type::getDoubleTy(Context);
553 }
554
555 /// Fetch the type representing void.
556 Type *getVoidTy() {
557 return Type::getVoidTy(Context);
558 }
559
560 /// Fetch the type representing a pointer to an 8-bit integer value.
561 PointerType *getInt8PtrTy(unsigned AddrSpace = 0) {
562 return Type::getInt8PtrTy(Context, AddrSpace);
563 }
564
565 /// Fetch the type representing a pointer to an integer value.
566 IntegerType *getIntPtrTy(const DataLayout &DL, unsigned AddrSpace = 0) {
567 return DL.getIntPtrType(Context, AddrSpace);
568 }
569
570 //===--------------------------------------------------------------------===//
571 // Intrinsic creation methods
572 //===--------------------------------------------------------------------===//
573
574 /// Create and insert a memset to the specified pointer and the
575 /// specified value.
576 ///
577 /// If the pointer isn't an i8*, it will be converted. If a TBAA tag is
578 /// specified, it will be added to the instruction. Likewise with alias.scope
579 /// and noalias tags.
580 CallInst *CreateMemSet(Value *Ptr, Value *Val, uint64_t Size,
581 MaybeAlign Align, bool isVolatile = false,
582 MDNode *TBAATag = nullptr, MDNode *ScopeTag = nullptr,
583 MDNode *NoAliasTag = nullptr) {
584 return CreateMemSet(Ptr, Val, getInt64(Size), Align, isVolatile,
585 TBAATag, ScopeTag, NoAliasTag);
586 }
587
588 CallInst *CreateMemSet(Value *Ptr, Value *Val, Value *Size, MaybeAlign Align,
589 bool isVolatile = false, MDNode *TBAATag = nullptr,
590 MDNode *ScopeTag = nullptr,
591 MDNode *NoAliasTag = nullptr);
592
593 /// Create and insert an element unordered-atomic memset of the region of
594 /// memory starting at the given pointer to the given value.
595 ///
596 /// If the pointer isn't an i8*, it will be converted. If a TBAA tag is
597 /// specified, it will be added to the instruction. Likewise with alias.scope
598 /// and noalias tags.
599 CallInst *CreateElementUnorderedAtomicMemSet(Value *Ptr, Value *Val,
600 uint64_t Size, Align Alignment,
601 uint32_t ElementSize,
602 MDNode *TBAATag = nullptr,
603 MDNode *ScopeTag = nullptr,
604 MDNode *NoAliasTag = nullptr) {
605 return CreateElementUnorderedAtomicMemSet(Ptr, Val, getInt64(Size),
606 Align(Alignment), ElementSize,
607 TBAATag, ScopeTag, NoAliasTag);
608 }
609
610 CallInst *CreateElementUnorderedAtomicMemSet(Value *Ptr, Value *Val,
611 Value *Size, Align Alignment,
612 uint32_t ElementSize,
613 MDNode *TBAATag = nullptr,
614 MDNode *ScopeTag = nullptr,
615 MDNode *NoAliasTag = nullptr);
616
617 /// Create and insert a memcpy between the specified pointers.
618 ///
619 /// If the pointers aren't i8*, they will be converted. If a TBAA tag is
620 /// specified, it will be added to the instruction. Likewise with alias.scope
621 /// and noalias tags.
622 CallInst *CreateMemCpy(Value *Dst, MaybeAlign DstAlign, Value *Src,
623 MaybeAlign SrcAlign, uint64_t Size,
624 bool isVolatile = false, MDNode *TBAATag = nullptr,
625 MDNode *TBAAStructTag = nullptr,
626 MDNode *ScopeTag = nullptr,
627 MDNode *NoAliasTag = nullptr) {
628 return CreateMemCpy(Dst, DstAlign, Src, SrcAlign, getInt64(Size),
629 isVolatile, TBAATag, TBAAStructTag, ScopeTag,
630 NoAliasTag);
631 }
632
633 CallInst *CreateMemTransferInst(
634 Intrinsic::ID IntrID, Value *Dst, MaybeAlign DstAlign, Value *Src,
635 MaybeAlign SrcAlign, Value *Size, bool isVolatile = false,
636 MDNode *TBAATag = nullptr, MDNode *TBAAStructTag = nullptr,
637 MDNode *ScopeTag = nullptr, MDNode *NoAliasTag = nullptr);
638
639 CallInst *CreateMemCpy(Value *Dst, MaybeAlign DstAlign, Value *Src,
640 MaybeAlign SrcAlign, Value *Size,
641 bool isVolatile = false, MDNode *TBAATag = nullptr,
642 MDNode *TBAAStructTag = nullptr,
643 MDNode *ScopeTag = nullptr,
644 MDNode *NoAliasTag = nullptr) {
645 return CreateMemTransferInst(Intrinsic::memcpy, Dst, DstAlign, Src,
646 SrcAlign, Size, isVolatile, TBAATag,
647 TBAAStructTag, ScopeTag, NoAliasTag);
648 }
649
650 CallInst *CreateMemCpyInline(Value *Dst, MaybeAlign DstAlign, Value *Src,
651 MaybeAlign SrcAlign, Value *Size);
652
653 /// Create and insert an element unordered-atomic memcpy between the
654 /// specified pointers.
655 ///
656 /// DstAlign/SrcAlign are the alignments of the Dst/Src pointers, respectively.
657 ///
658 /// If the pointers aren't i8*, they will be converted. If a TBAA tag is
659 /// specified, it will be added to the instruction. Likewise with alias.scope
660 /// and noalias tags.
661 CallInst *CreateElementUnorderedAtomicMemCpy(
662 Value *Dst, Align DstAlign, Value *Src, Align SrcAlign, Value *Size,
663 uint32_t ElementSize, MDNode *TBAATag = nullptr,
664 MDNode *TBAAStructTag = nullptr, MDNode *ScopeTag = nullptr,
665 MDNode *NoAliasTag = nullptr);
666
667 CallInst *CreateMemMove(Value *Dst, MaybeAlign DstAlign, Value *Src,
668 MaybeAlign SrcAlign, uint64_t Size,
669 bool isVolatile = false, MDNode *TBAATag = nullptr,
670 MDNode *ScopeTag = nullptr,
671 MDNode *NoAliasTag = nullptr) {
672 return CreateMemMove(Dst, DstAlign, Src, SrcAlign, getInt64(Size),
673 isVolatile, TBAATag, ScopeTag, NoAliasTag);
674 }
675
676 CallInst *CreateMemMove(Value *Dst, MaybeAlign DstAlign, Value *Src,
677 MaybeAlign SrcAlign, Value *Size,
678 bool isVolatile = false, MDNode *TBAATag = nullptr,
679 MDNode *ScopeTag = nullptr,
680 MDNode *NoAliasTag = nullptr);
681
682 /// \brief Create and insert an element unordered-atomic memmove between the
683 /// specified pointers.
684 ///
685 /// DstAlign/SrcAlign are the alignments of the Dst/Src pointers,
686 /// respectively.
687 ///
688 /// If the pointers aren't i8*, they will be converted. If a TBAA tag is
689 /// specified, it will be added to the instruction. Likewise with alias.scope
690 /// and noalias tags.
691 CallInst *CreateElementUnorderedAtomicMemMove(
692 Value *Dst, Align DstAlign, Value *Src, Align SrcAlign, Value *Size,
693 uint32_t ElementSize, MDNode *TBAATag = nullptr,
694 MDNode *TBAAStructTag = nullptr, MDNode *ScopeTag = nullptr,
695 MDNode *NoAliasTag = nullptr);
696
697 /// Create a vector fadd reduction intrinsic of the source vector.
698 /// The first parameter is a scalar accumulator value for ordered reductions.
699 CallInst *CreateFAddReduce(Value *Acc, Value *Src);
700
701 /// Create a vector fmul reduction intrinsic of the source vector.
702 /// The first parameter is a scalar accumulator value for ordered reductions.
703 CallInst *CreateFMulReduce(Value *Acc, Value *Src);
704
705 /// Create a vector int add reduction intrinsic of the source vector.
706 CallInst *CreateAddReduce(Value *Src);
707
708 /// Create a vector int mul reduction intrinsic of the source vector.
709 CallInst *CreateMulReduce(Value *Src);
710
711 /// Create a vector int AND reduction intrinsic of the source vector.
712 CallInst *CreateAndReduce(Value *Src);
713
714 /// Create a vector int OR reduction intrinsic of the source vector.
715 CallInst *CreateOrReduce(Value *Src);
716
717 /// Create a vector int XOR reduction intrinsic of the source vector.
718 CallInst *CreateXorReduce(Value *Src);
719
720 /// Create a vector integer max reduction intrinsic of the source
721 /// vector.
722 CallInst *CreateIntMaxReduce(Value *Src, bool IsSigned = false);
723
724 /// Create a vector integer min reduction intrinsic of the source
725 /// vector.
726 CallInst *CreateIntMinReduce(Value *Src, bool IsSigned = false);
727
728 /// Create a vector float max reduction intrinsic of the source
729 /// vector.
730 CallInst *CreateFPMaxReduce(Value *Src);
731
732 /// Create a vector float min reduction intrinsic of the source
733 /// vector.
734 CallInst *CreateFPMinReduce(Value *Src);
735
736 /// Create a lifetime.start intrinsic.
737 ///
738 /// If the pointer isn't i8* it will be converted.
739 CallInst *CreateLifetimeStart(Value *Ptr, ConstantInt *Size = nullptr);
740
741 /// Create a lifetime.end intrinsic.
742 ///
743 /// If the pointer isn't i8* it will be converted.
744 CallInst *CreateLifetimeEnd(Value *Ptr, ConstantInt *Size = nullptr);
745
746 /// Create a call to invariant.start intrinsic.
747 ///
748 /// If the pointer isn't i8* it will be converted.
749 CallInst *CreateInvariantStart(Value *Ptr, ConstantInt *Size = nullptr);
750
751 /// Create a call to Masked Load intrinsic
752 CallInst *CreateMaskedLoad(Value *Ptr, Align Alignment, Value *Mask,
753 Value *PassThru = nullptr, const Twine &Name = "");
754
755 /// Create a call to Masked Store intrinsic
756 CallInst *CreateMaskedStore(Value *Val, Value *Ptr, Align Alignment,
757 Value *Mask);
758
759 /// Create a call to Masked Gather intrinsic
760 CallInst *CreateMaskedGather(Value *Ptrs, Align Alignment,
761 Value *Mask = nullptr, Value *PassThru = nullptr,
762 const Twine &Name = "");
763
764 /// Create a call to Masked Scatter intrinsic
765 CallInst *CreateMaskedScatter(Value *Val, Value *Ptrs, Align Alignment,
766 Value *Mask = nullptr);
767
768 /// Create an assume intrinsic call that allows the optimizer to
769 /// assume that the provided condition will be true.
770 ///
771 /// The optional argument \p OpBundles specifies operand bundles that are
772 /// added to the call instruction.
773 CallInst *CreateAssumption(Value *Cond,
774 ArrayRef<OperandBundleDef> OpBundles = llvm::None);
775
776 /// Create a llvm.experimental.noalias.scope.decl intrinsic call.
777 Instruction *CreateNoAliasScopeDeclaration(Value *Scope);
778 Instruction *CreateNoAliasScopeDeclaration(MDNode *ScopeTag) {
779 return CreateNoAliasScopeDeclaration(
780 MetadataAsValue::get(Context, ScopeTag));
781 }
782
783 /// Create a call to the experimental.gc.statepoint intrinsic to
784 /// start a new statepoint sequence.
785 CallInst *CreateGCStatepointCall(uint64_t ID, uint32_t NumPatchBytes,
786 Value *ActualCallee,
787 ArrayRef<Value *> CallArgs,
788 Optional<ArrayRef<Value *>> DeoptArgs,
789 ArrayRef<Value *> GCArgs,
790 const Twine &Name = "");
791
792 /// Create a call to the experimental.gc.statepoint intrinsic to
793 /// start a new statepoint sequence.
794 CallInst *CreateGCStatepointCall(uint64_t ID, uint32_t NumPatchBytes,
795 Value *ActualCallee, uint32_t Flags,
796 ArrayRef<Value *> CallArgs,
797 Optional<ArrayRef<Use>> TransitionArgs,
798 Optional<ArrayRef<Use>> DeoptArgs,
799 ArrayRef<Value *> GCArgs,
800 const Twine &Name = "");
801
802 /// Conveninence function for the common case when CallArgs are filled
803 /// in using makeArrayRef(CS.arg_begin(), CS.arg_end()); Use needs to be
804 /// .get()'ed to get the Value pointer.
805 CallInst *CreateGCStatepointCall(uint64_t ID, uint32_t NumPatchBytes,
806 Value *ActualCallee, ArrayRef<Use> CallArgs,
807 Optional<ArrayRef<Value *>> DeoptArgs,
808 ArrayRef<Value *> GCArgs,
809 const Twine &Name = "");
810
811 /// Create an invoke to the experimental.gc.statepoint intrinsic to
812 /// start a new statepoint sequence.
813 InvokeInst *
814 CreateGCStatepointInvoke(uint64_t ID, uint32_t NumPatchBytes,
815 Value *ActualInvokee, BasicBlock *NormalDest,
816 BasicBlock *UnwindDest, ArrayRef<Value *> InvokeArgs,
817 Optional<ArrayRef<Value *>> DeoptArgs,
818 ArrayRef<Value *> GCArgs, const Twine &Name = "");
819
820 /// Create an invoke to the experimental.gc.statepoint intrinsic to
821 /// start a new statepoint sequence.
822 InvokeInst *CreateGCStatepointInvoke(
823 uint64_t ID, uint32_t NumPatchBytes, Value *ActualInvokee,
824 BasicBlock *NormalDest, BasicBlock *UnwindDest, uint32_t Flags,
825 ArrayRef<Value *> InvokeArgs, Optional<ArrayRef<Use>> TransitionArgs,
826 Optional<ArrayRef<Use>> DeoptArgs, ArrayRef<Value *> GCArgs,
827 const Twine &Name = "");
828
829 // Convenience function for the common case when CallArgs are filled in using
830 // makeArrayRef(CS.arg_begin(), CS.arg_end()); Use needs to be .get()'ed to
831 // get the Value *.
832 InvokeInst *
833 CreateGCStatepointInvoke(uint64_t ID, uint32_t NumPatchBytes,
834 Value *ActualInvokee, BasicBlock *NormalDest,
835 BasicBlock *UnwindDest, ArrayRef<Use> InvokeArgs,
836 Optional<ArrayRef<Value *>> DeoptArgs,
837 ArrayRef<Value *> GCArgs, const Twine &Name = "");
838
839 /// Create a call to the experimental.gc.result intrinsic to extract
840 /// the result from a call wrapped in a statepoint.
841 CallInst *CreateGCResult(Instruction *Statepoint,
842 Type *ResultType,
843 const Twine &Name = "");
844
845 /// Create a call to the experimental.gc.relocate intrinsics to
846 /// project the relocated value of one pointer from the statepoint.
847 CallInst *CreateGCRelocate(Instruction *Statepoint,
848 int BaseOffset,
849 int DerivedOffset,
850 Type *ResultType,
851 const Twine &Name = "");
852
853 /// Create a call to llvm.vscale, multiplied by \p Scaling. The type of VScale
854 /// will be the same type as that of \p Scaling.
855 Value *CreateVScale(Constant *Scaling, const Twine &Name = "");
856
857 /// Creates a vector of type \p DstType with the linear sequence <0, 1, ...>
858 Value *CreateStepVector(Type *DstType, const Twine &Name = "");
859
860 /// Create a call to intrinsic \p ID with 1 operand which is mangled on its
861 /// type.
862 CallInst *CreateUnaryIntrinsic(Intrinsic::ID ID, Value *V,
863 Instruction *FMFSource = nullptr,
864 const Twine &Name = "");
865
866 /// Create a call to intrinsic \p ID with 2 operands which is mangled on the
867 /// first type.
868 CallInst *CreateBinaryIntrinsic(Intrinsic::ID ID, Value *LHS, Value *RHS,
869 Instruction *FMFSource = nullptr,
870 const Twine &Name = "");
871
872 /// Create a call to intrinsic \p ID with \p args, mangled using \p Types. If
873 /// \p FMFSource is provided, copy fast-math-flags from that instruction to
874 /// the intrinsic.
875 CallInst *CreateIntrinsic(Intrinsic::ID ID, ArrayRef<Type *> Types,
876 ArrayRef<Value *> Args,
877 Instruction *FMFSource = nullptr,
878 const Twine &Name = "");
879
880 /// Create call to the minnum intrinsic.
881 CallInst *CreateMinNum(Value *LHS, Value *RHS, const Twine &Name = "") {
882 return CreateBinaryIntrinsic(Intrinsic::minnum, LHS, RHS, nullptr, Name);
883 }
884
885 /// Create call to the maxnum intrinsic.
886 CallInst *CreateMaxNum(Value *LHS, Value *RHS, const Twine &Name = "") {
887 return CreateBinaryIntrinsic(Intrinsic::maxnum, LHS, RHS, nullptr, Name);
888 }
889
890 /// Create call to the minimum intrinsic.
891 CallInst *CreateMinimum(Value *LHS, Value *RHS, const Twine &Name = "") {
892 return CreateBinaryIntrinsic(Intrinsic::minimum, LHS, RHS, nullptr, Name);
893 }
894
895 /// Create call to the maximum intrinsic.
896 CallInst *CreateMaximum(Value *LHS, Value *RHS, const Twine &Name = "") {
897 return CreateBinaryIntrinsic(Intrinsic::maximum, LHS, RHS, nullptr, Name);
898 }
899
900 /// Create a call to the experimental.vector.extract intrinsic.
901 CallInst *CreateExtractVector(Type *DstType, Value *SrcVec, Value *Idx,
902 const Twine &Name = "") {
903 return CreateIntrinsic(Intrinsic::experimental_vector_extract,
904 {DstType, SrcVec->getType()}, {SrcVec, Idx}, nullptr,
905 Name);
906 }
907
908 /// Create a call to the experimental.vector.insert intrinsic.
909 CallInst *CreateInsertVector(Type *DstType, Value *SrcVec, Value *SubVec,
910 Value *Idx, const Twine &Name = "") {
911 return CreateIntrinsic(Intrinsic::experimental_vector_insert,
912 {DstType, SubVec->getType()}, {SrcVec, SubVec, Idx},
913 nullptr, Name);
914 }
915
916private:
917 /// Create a call to a masked intrinsic with given Id.
918 CallInst *CreateMaskedIntrinsic(Intrinsic::ID Id, ArrayRef<Value *> Ops,
919 ArrayRef<Type *> OverloadedTypes,
920 const Twine &Name = "");
921
922 Value *getCastedInt8PtrValue(Value *Ptr);
923
924 //===--------------------------------------------------------------------===//
925 // Instruction creation methods: Terminators
926 //===--------------------------------------------------------------------===//
927
928private:
929 /// Helper to add branch weight and unpredictable metadata onto an
930 /// instruction.
931 /// \returns The annotated instruction.
932 template <typename InstTy>
933 InstTy *addBranchMetadata(InstTy *I, MDNode *Weights, MDNode *Unpredictable) {
934 if (Weights)
935 I->setMetadata(LLVMContext::MD_prof, Weights);
936 if (Unpredictable)
937 I->setMetadata(LLVMContext::MD_unpredictable, Unpredictable);
938 return I;
939 }
940
941public:
942 /// Create a 'ret void' instruction.
943 ReturnInst *CreateRetVoid() {
944 return Insert(ReturnInst::Create(Context));
945 }
946
947 /// Create a 'ret <val>' instruction.
948 ReturnInst *CreateRet(Value *V) {
949 return Insert(ReturnInst::Create(Context, V));
950 }
951
952 /// Create a sequence of N insertvalue instructions,
953 /// with one Value from the retVals array each, that build a aggregate
954 /// return value one value at a time, and a ret instruction to return
955 /// the resulting aggregate value.
956 ///
957 /// This is a convenience function for code that uses aggregate return values
958 /// as a vehicle for having multiple return values.
959 ReturnInst *CreateAggregateRet(Value *const *retVals, unsigned N) {
960 Value *V = UndefValue::get(getCurrentFunctionReturnType());
961 for (unsigned i = 0; i != N; ++i)
962 V = CreateInsertValue(V, retVals[i], i, "mrv");
963 return Insert(ReturnInst::Create(Context, V));
964 }
965
966 /// Create an unconditional 'br label X' instruction.
967 BranchInst *CreateBr(BasicBlock *Dest) {
968 return Insert(BranchInst::Create(Dest));
969 }
970
971 /// Create a conditional 'br Cond, TrueDest, FalseDest'
972 /// instruction.
973 BranchInst *CreateCondBr(Value *Cond, BasicBlock *True, BasicBlock *False,
974 MDNode *BranchWeights = nullptr,
975 MDNode *Unpredictable = nullptr) {
976 return Insert(addBranchMetadata(BranchInst::Create(True, False, Cond),
977 BranchWeights, Unpredictable));
978 }
979
980 /// Create a conditional 'br Cond, TrueDest, FalseDest'
981 /// instruction. Copy branch meta data if available.
982 BranchInst *CreateCondBr(Value *Cond, BasicBlock *True, BasicBlock *False,
983 Instruction *MDSrc) {
984 BranchInst *Br = BranchInst::Create(True, False, Cond);
985 if (MDSrc) {
986 unsigned WL[4] = {LLVMContext::MD_prof, LLVMContext::MD_unpredictable,
987 LLVMContext::MD_make_implicit, LLVMContext::MD_dbg};
988 Br->copyMetadata(*MDSrc, makeArrayRef(&WL[0], 4));
989 }
990 return Insert(Br);
991 }
992
993 /// Create a switch instruction with the specified value, default dest,
994 /// and with a hint for the number of cases that will be added (for efficient
995 /// allocation).
996 SwitchInst *CreateSwitch(Value *V, BasicBlock *Dest, unsigned NumCases = 10,
997 MDNode *BranchWeights = nullptr,
998 MDNode *Unpredictable = nullptr) {
999 return Insert(addBranchMetadata(SwitchInst::Create(V, Dest, NumCases),
1000 BranchWeights, Unpredictable));
1001 }
1002
1003 /// Create an indirect branch instruction with the specified address
1004 /// operand, with an optional hint for the number of destinations that will be
1005 /// added (for efficient allocation).
1006 IndirectBrInst *CreateIndirectBr(Value *Addr, unsigned NumDests = 10) {
1007 return Insert(IndirectBrInst::Create(Addr, NumDests));
1008 }
1009
1010 /// Create an invoke instruction.
1011 InvokeInst *CreateInvoke(FunctionType *Ty, Value *Callee,
1012 BasicBlock *NormalDest, BasicBlock *UnwindDest,
1013 ArrayRef<Value *> Args,
1014 ArrayRef<OperandBundleDef> OpBundles,
1015 const Twine &Name = "") {
1016 InvokeInst *II =
1017 InvokeInst::Create(Ty, Callee, NormalDest, UnwindDest, Args, OpBundles);
1018 if (IsFPConstrained)
1019 setConstrainedFPCallAttr(II);
1020 return Insert(II, Name);
1021 }
1022 InvokeInst *CreateInvoke(FunctionType *Ty, Value *Callee,
1023 BasicBlock *NormalDest, BasicBlock *UnwindDest,
1024 ArrayRef<Value *> Args = None,
1025 const Twine &Name = "") {
1026 InvokeInst *II =
1027 InvokeInst::Create(Ty, Callee, NormalDest, UnwindDest, Args);
1028 if (IsFPConstrained)
1029 setConstrainedFPCallAttr(II);
1030 return Insert(II, Name);
1031 }
1032
1033 InvokeInst *CreateInvoke(FunctionCallee Callee, BasicBlock *NormalDest,
1034 BasicBlock *UnwindDest, ArrayRef<Value *> Args,
1035 ArrayRef<OperandBundleDef> OpBundles,
1036 const Twine &Name = "") {
1037 return CreateInvoke(Callee.getFunctionType(), Callee.getCallee(),
1038 NormalDest, UnwindDest, Args, OpBundles, Name);
1039 }
1040
1041 InvokeInst *CreateInvoke(FunctionCallee Callee, BasicBlock *NormalDest,
1042 BasicBlock *UnwindDest,
1043 ArrayRef<Value *> Args = None,
1044 const Twine &Name = "") {
1045 return CreateInvoke(Callee.getFunctionType(), Callee.getCallee(),
1046 NormalDest, UnwindDest, Args, Name);
1047 }
1048
1049 /// \brief Create a callbr instruction.
1050 CallBrInst *CreateCallBr(FunctionType *Ty, Value *Callee,
1051 BasicBlock *DefaultDest,
1052 ArrayRef<BasicBlock *> IndirectDests,
1053 ArrayRef<Value *> Args = None,
1054 const Twine &Name = "") {
1055 return Insert(CallBrInst::Create(Ty, Callee, DefaultDest, IndirectDests,
1056 Args), Name);
1057 }
1058 CallBrInst *CreateCallBr(FunctionType *Ty, Value *Callee,
1059 BasicBlock *DefaultDest,
1060 ArrayRef<BasicBlock *> IndirectDests,
1061 ArrayRef<Value *> Args,
1062 ArrayRef<OperandBundleDef> OpBundles,
1063 const Twine &Name = "") {
1064 return Insert(
1065 CallBrInst::Create(Ty, Callee, DefaultDest, IndirectDests, Args,
1066 OpBundles), Name);
1067 }
1068
1069 CallBrInst *CreateCallBr(FunctionCallee Callee, BasicBlock *DefaultDest,
1070 ArrayRef<BasicBlock *> IndirectDests,
1071 ArrayRef<Value *> Args = None,
1072 const Twine &Name = "") {
1073 return CreateCallBr(Callee.getFunctionType(), Callee.getCallee(),
1074 DefaultDest, IndirectDests, Args, Name);
1075 }
1076 CallBrInst *CreateCallBr(FunctionCallee Callee, BasicBlock *DefaultDest,
1077 ArrayRef<BasicBlock *> IndirectDests,
1078 ArrayRef<Value *> Args,
1079 ArrayRef<OperandBundleDef> OpBundles,
1080 const Twine &Name = "") {
1081 return CreateCallBr(Callee.getFunctionType(), Callee.getCallee(),
1082 DefaultDest, IndirectDests, Args, Name);
1083 }
1084
1085 ResumeInst *CreateResume(Value *Exn) {
1086 return Insert(ResumeInst::Create(Exn));
1087 }
1088
1089 CleanupReturnInst *CreateCleanupRet(CleanupPadInst *CleanupPad,
1090 BasicBlock *UnwindBB = nullptr) {
1091 return Insert(CleanupReturnInst::Create(CleanupPad, UnwindBB));
1092 }
1093
1094 CatchSwitchInst *CreateCatchSwitch(Value *ParentPad, BasicBlock *UnwindBB,
1095 unsigned NumHandlers,
1096 const Twine &Name = "") {
1097 return Insert(CatchSwitchInst::Create(ParentPad, UnwindBB, NumHandlers),
1098 Name);
1099 }
1100
1101 CatchPadInst *CreateCatchPad(Value *ParentPad, ArrayRef<Value *> Args,
1102 const Twine &Name = "") {
1103 return Insert(CatchPadInst::Create(ParentPad, Args), Name);
1104 }
1105
1106 CleanupPadInst *CreateCleanupPad(Value *ParentPad,
1107 ArrayRef<Value *> Args = None,
1108 const Twine &Name = "") {
1109 return Insert(CleanupPadInst::Create(ParentPad, Args), Name);
1110 }
1111
1112 CatchReturnInst *CreateCatchRet(CatchPadInst *CatchPad, BasicBlock *BB) {
1113 return Insert(CatchReturnInst::Create(CatchPad, BB));
1114 }
1115
1116 UnreachableInst *CreateUnreachable() {
1117 return Insert(new UnreachableInst(Context));
1118 }
1119
1120 //===--------------------------------------------------------------------===//
1121 // Instruction creation methods: Binary Operators
1122 //===--------------------------------------------------------------------===//
1123private:
1124 BinaryOperator *CreateInsertNUWNSWBinOp(BinaryOperator::BinaryOps Opc,
1125 Value *LHS, Value *RHS,
1126 const Twine &Name,
1127 bool HasNUW, bool HasNSW) {
1128 BinaryOperator *BO = Insert(BinaryOperator::Create(Opc, LHS, RHS), Name);
1129 if (HasNUW) BO->setHasNoUnsignedWrap();
1130 if (HasNSW) BO->setHasNoSignedWrap();
1131 return BO;
1132 }
1133
1134 Instruction *setFPAttrs(Instruction *I, MDNode *FPMD,
1135 FastMathFlags FMF) const {
1136 if (!FPMD)
1137 FPMD = DefaultFPMathTag;
1138 if (FPMD)
1139 I->setMetadata(LLVMContext::MD_fpmath, FPMD);
1140 I->setFastMathFlags(FMF);
1141 return I;
1142 }
1143
1144 Value *foldConstant(Instruction::BinaryOps Opc, Value *L,
1145 Value *R, const Twine &Name) const {
1146 auto *LC = dyn_cast<Constant>(L);
1147 auto *RC = dyn_cast<Constant>(R);
1148 return (LC && RC) ? Insert(Folder.CreateBinOp(Opc, LC, RC), Name) : nullptr;
1149 }
1150
1151 Value *getConstrainedFPRounding(Optional<RoundingMode> Rounding) {
1152 RoundingMode UseRounding = DefaultConstrainedRounding;
1153
1154 if (Rounding.hasValue())
1155 UseRounding = Rounding.getValue();
1156
1157 Optional<StringRef> RoundingStr = RoundingModeToStr(UseRounding);
1158 assert(RoundingStr.hasValue() && "Garbage strict rounding mode!")((RoundingStr.hasValue() && "Garbage strict rounding mode!"
) ? static_cast<void> (0) : __assert_fail ("RoundingStr.hasValue() && \"Garbage strict rounding mode!\""
, "/build/llvm-toolchain-snapshot-13~++20210413100635+64c24f493e5f/llvm/include/llvm/IR/IRBuilder.h"
, 1158, __PRETTY_FUNCTION__))
;
1159 auto *RoundingMDS = MDString::get(Context, RoundingStr.getValue());
1160
1161 return MetadataAsValue::get(Context, RoundingMDS);
1162 }
1163
1164 Value *getConstrainedFPExcept(Optional<fp::ExceptionBehavior> Except) {
1165 fp::ExceptionBehavior UseExcept = DefaultConstrainedExcept;
1166
1167 if (Except.hasValue())
1168 UseExcept = Except.getValue();
1169
1170 Optional<StringRef> ExceptStr = ExceptionBehaviorToStr(UseExcept);
1171 assert(ExceptStr.hasValue() && "Garbage strict exception behavior!")((ExceptStr.hasValue() && "Garbage strict exception behavior!"
) ? static_cast<void> (0) : __assert_fail ("ExceptStr.hasValue() && \"Garbage strict exception behavior!\""
, "/build/llvm-toolchain-snapshot-13~++20210413100635+64c24f493e5f/llvm/include/llvm/IR/IRBuilder.h"
, 1171, __PRETTY_FUNCTION__))
;
1172 auto *ExceptMDS = MDString::get(Context, ExceptStr.getValue());
1173
1174 return MetadataAsValue::get(Context, ExceptMDS);
1175 }
1176
1177 Value *getConstrainedFPPredicate(CmpInst::Predicate Predicate) {
1178 assert(CmpInst::isFPPredicate(Predicate) &&((CmpInst::isFPPredicate(Predicate) && Predicate != CmpInst
::FCMP_FALSE && Predicate != CmpInst::FCMP_TRUE &&
"Invalid constrained FP comparison predicate!") ? static_cast
<void> (0) : __assert_fail ("CmpInst::isFPPredicate(Predicate) && Predicate != CmpInst::FCMP_FALSE && Predicate != CmpInst::FCMP_TRUE && \"Invalid constrained FP comparison predicate!\""
, "/build/llvm-toolchain-snapshot-13~++20210413100635+64c24f493e5f/llvm/include/llvm/IR/IRBuilder.h"
, 1181, __PRETTY_FUNCTION__))
1179 Predicate != CmpInst::FCMP_FALSE &&((CmpInst::isFPPredicate(Predicate) && Predicate != CmpInst
::FCMP_FALSE && Predicate != CmpInst::FCMP_TRUE &&
"Invalid constrained FP comparison predicate!") ? static_cast
<void> (0) : __assert_fail ("CmpInst::isFPPredicate(Predicate) && Predicate != CmpInst::FCMP_FALSE && Predicate != CmpInst::FCMP_TRUE && \"Invalid constrained FP comparison predicate!\""
, "/build/llvm-toolchain-snapshot-13~++20210413100635+64c24f493e5f/llvm/include/llvm/IR/IRBuilder.h"
, 1181, __PRETTY_FUNCTION__))
1180 Predicate != CmpInst::FCMP_TRUE &&((CmpInst::isFPPredicate(Predicate) && Predicate != CmpInst
::FCMP_FALSE && Predicate != CmpInst::FCMP_TRUE &&
"Invalid constrained FP comparison predicate!") ? static_cast
<void> (0) : __assert_fail ("CmpInst::isFPPredicate(Predicate) && Predicate != CmpInst::FCMP_FALSE && Predicate != CmpInst::FCMP_TRUE && \"Invalid constrained FP comparison predicate!\""
, "/build/llvm-toolchain-snapshot-13~++20210413100635+64c24f493e5f/llvm/include/llvm/IR/IRBuilder.h"
, 1181, __PRETTY_FUNCTION__))
1181 "Invalid constrained FP comparison predicate!")((CmpInst::isFPPredicate(Predicate) && Predicate != CmpInst
::FCMP_FALSE && Predicate != CmpInst::FCMP_TRUE &&
"Invalid constrained FP comparison predicate!") ? static_cast
<void> (0) : __assert_fail ("CmpInst::isFPPredicate(Predicate) && Predicate != CmpInst::FCMP_FALSE && Predicate != CmpInst::FCMP_TRUE && \"Invalid constrained FP comparison predicate!\""
, "/build/llvm-toolchain-snapshot-13~++20210413100635+64c24f493e5f/llvm/include/llvm/IR/IRBuilder.h"
, 1181, __PRETTY_FUNCTION__))
;
1182
1183 StringRef PredicateStr = CmpInst::getPredicateName(Predicate);
1184 auto *PredicateMDS = MDString::get(Context, PredicateStr);
1185
1186 return MetadataAsValue::get(Context, PredicateMDS);
1187 }
1188
1189public:
1190 Value *CreateAdd(Value *LHS, Value *RHS, const Twine &Name = "",
1191 bool HasNUW = false, bool HasNSW = false) {
1192 if (auto *LC = dyn_cast<Constant>(LHS))
1193 if (auto *RC = dyn_cast<Constant>(RHS))
1194 return Insert(Folder.CreateAdd(LC, RC, HasNUW, HasNSW), Name);
1195 return CreateInsertNUWNSWBinOp(Instruction::Add, LHS, RHS, Name,
1196 HasNUW, HasNSW);
1197 }
1198
1199 Value *CreateNSWAdd(Value *LHS, Value *RHS, const Twine &Name = "") {
1200 return CreateAdd(LHS, RHS, Name, false, true);
1201 }
1202
1203 Value *CreateNUWAdd(Value *LHS, Value *RHS, const Twine &Name = "") {
1204 return CreateAdd(LHS, RHS, Name, true, false);
1205 }
1206
1207 Value *CreateSub(Value *LHS, Value *RHS, const Twine &Name = "",
1208 bool HasNUW = false, bool HasNSW = false) {
1209 if (auto *LC = dyn_cast<Constant>(LHS))
1210 if (auto *RC = dyn_cast<Constant>(RHS))
1211 return Insert(Folder.CreateSub(LC, RC, HasNUW, HasNSW), Name);
1212 return CreateInsertNUWNSWBinOp(Instruction::Sub, LHS, RHS, Name,
1213 HasNUW, HasNSW);
1214 }
1215
1216 Value *CreateNSWSub(Value *LHS, Value *RHS, const Twine &Name = "") {
1217 return CreateSub(LHS, RHS, Name, false, true);
1218 }
1219
1220 Value *CreateNUWSub(Value *LHS, Value *RHS, const Twine &Name = "") {
1221 return CreateSub(LHS, RHS, Name, true, false);
1222 }
1223
1224 Value *CreateMul(Value *LHS, Value *RHS, const Twine &Name = "",
1225 bool HasNUW = false, bool HasNSW = false) {
1226 if (auto *LC = dyn_cast<Constant>(LHS))
1227 if (auto *RC = dyn_cast<Constant>(RHS))
1228 return Insert(Folder.CreateMul(LC, RC, HasNUW, HasNSW), Name);
1229 return CreateInsertNUWNSWBinOp(Instruction::Mul, LHS, RHS, Name,
1230 HasNUW, HasNSW);
1231 }
1232
1233 Value *CreateNSWMul(Value *LHS, Value *RHS, const Twine &Name = "") {
1234 return CreateMul(LHS, RHS, Name, false, true);
1235 }
1236
1237 Value *CreateNUWMul(Value *LHS, Value *RHS, const Twine &Name = "") {
1238 return CreateMul(LHS, RHS, Name, true, false);
1239 }
1240
1241 Value *CreateUDiv(Value *LHS, Value *RHS, const Twine &Name = "",
1242 bool isExact = false) {
1243 if (auto *LC = dyn_cast<Constant>(LHS))
1244 if (auto *RC = dyn_cast<Constant>(RHS))
1245 return Insert(Folder.CreateUDiv(LC, RC, isExact), Name);
1246 if (!isExact)
1247 return Insert(BinaryOperator::CreateUDiv(LHS, RHS), Name);
1248 return Insert(BinaryOperator::CreateExactUDiv(LHS, RHS), Name);
1249 }
1250
1251 Value *CreateExactUDiv(Value *LHS, Value *RHS, const Twine &Name = "") {
1252 return CreateUDiv(LHS, RHS, Name, true);
1253 }
1254
1255 Value *CreateSDiv(Value *LHS, Value *RHS, const Twine &Name = "",
1256 bool isExact = false) {
1257 if (auto *LC = dyn_cast<Constant>(LHS))
1258 if (auto *RC = dyn_cast<Constant>(RHS))
1259 return Insert(Folder.CreateSDiv(LC, RC, isExact), Name);
1260 if (!isExact)
1261 return Insert(BinaryOperator::CreateSDiv(LHS, RHS), Name);
1262 return Insert(BinaryOperator::CreateExactSDiv(LHS, RHS), Name);
1263 }
1264
1265 Value *CreateExactSDiv(Value *LHS, Value *RHS, const Twine &Name = "") {
1266 return CreateSDiv(LHS, RHS, Name, true);
1267 }
1268
1269 Value *CreateURem(Value *LHS, Value *RHS, const Twine &Name = "") {
1270 if (Value *V = foldConstant(Instruction::URem, LHS, RHS, Name)) return V;
1271 return Insert(BinaryOperator::CreateURem(LHS, RHS), Name);
1272 }
1273
1274 Value *CreateSRem(Value *LHS, Value *RHS, const Twine &Name = "") {
1275 if (Value *V = foldConstant(Instruction::SRem, LHS, RHS, Name)) return V;
1276 return Insert(BinaryOperator::CreateSRem(LHS, RHS), Name);
1277 }
1278
1279 Value *CreateShl(Value *LHS, Value *RHS, const Twine &Name = "",
1280 bool HasNUW = false, bool HasNSW = false) {
1281 if (auto *LC = dyn_cast<Constant>(LHS))
1282 if (auto *RC = dyn_cast<Constant>(RHS))
1283 return Insert(Folder.CreateShl(LC, RC, HasNUW, HasNSW), Name);
1284 return CreateInsertNUWNSWBinOp(Instruction::Shl, LHS, RHS, Name,
1285 HasNUW, HasNSW);
1286 }
1287
1288 Value *CreateShl(Value *LHS, const APInt &RHS, const Twine &Name = "",
1289 bool HasNUW = false, bool HasNSW = false) {
1290 return CreateShl(LHS, ConstantInt::get(LHS->getType(), RHS), Name,
1291 HasNUW, HasNSW);
1292 }
1293
1294 Value *CreateShl(Value *LHS, uint64_t RHS, const Twine &Name = "",
1295 bool HasNUW = false, bool HasNSW = false) {
1296 return CreateShl(LHS, ConstantInt::get(LHS->getType(), RHS), Name,
1297 HasNUW, HasNSW);
1298 }
1299
1300 Value *CreateLShr(Value *LHS, Value *RHS, const Twine &Name = "",
1301 bool isExact = false) {
1302 if (auto *LC = dyn_cast<Constant>(LHS))
1303 if (auto *RC = dyn_cast<Constant>(RHS))
1304 return Insert(Folder.CreateLShr(LC, RC, isExact), Name);
1305 if (!isExact)
1306 return Insert(BinaryOperator::CreateLShr(LHS, RHS), Name);
1307 return Insert(BinaryOperator::CreateExactLShr(LHS, RHS), Name);
1308 }
1309
1310 Value *CreateLShr(Value *LHS, const APInt &RHS, const Twine &Name = "",
1311 bool isExact = false) {
1312 return CreateLShr(LHS, ConstantInt::get(LHS->getType(), RHS), Name,isExact);
1313 }
1314
1315 Value *CreateLShr(Value *LHS, uint64_t RHS, const Twine &Name = "",
1316 bool isExact = false) {
1317 return CreateLShr(LHS, ConstantInt::get(LHS->getType(), RHS), Name,isExact);
1318 }
1319
1320 Value *CreateAShr(Value *LHS, Value *RHS, const Twine &Name = "",
1321 bool isExact = false) {
1322 if (auto *LC = dyn_cast<Constant>(LHS))
1323 if (auto *RC = dyn_cast<Constant>(RHS))
1324 return Insert(Folder.CreateAShr(LC, RC, isExact), Name);
1325 if (!isExact)
1326 return Insert(BinaryOperator::CreateAShr(LHS, RHS), Name);
1327 return Insert(BinaryOperator::CreateExactAShr(LHS, RHS), Name);
1328 }
1329
1330 Value *CreateAShr(Value *LHS, const APInt &RHS, const Twine &Name = "",
1331 bool isExact = false) {
1332 return CreateAShr(LHS, ConstantInt::get(LHS->getType(), RHS), Name,isExact);
1333 }
1334
1335 Value *CreateAShr(Value *LHS, uint64_t RHS, const Twine &Name = "",
1336 bool isExact = false) {
1337 return CreateAShr(LHS, ConstantInt::get(LHS->getType(), RHS), Name,isExact);
1338 }
1339
1340 Value *CreateAnd(Value *LHS, Value *RHS, const Twine &Name = "") {
1341 if (auto *RC = dyn_cast<Constant>(RHS)) {
1342 if (isa<ConstantInt>(RC) && cast<ConstantInt>(RC)->isMinusOne())
1343 return LHS; // LHS & -1 -> LHS
1344 if (auto *LC = dyn_cast<Constant>(LHS))
1345 return Insert(Folder.CreateAnd(LC, RC), Name);
1346 }
1347 return Insert(BinaryOperator::CreateAnd(LHS, RHS), Name);
1348 }
1349
1350 Value *CreateAnd(Value *LHS, const APInt &RHS, const Twine &Name = "") {
1351 return CreateAnd(LHS, ConstantInt::get(LHS->getType(), RHS), Name);
1352 }
1353
1354 Value *CreateAnd(Value *LHS, uint64_t RHS, const Twine &Name = "") {
1355 return CreateAnd(LHS, ConstantInt::get(LHS->getType(), RHS), Name);
1356 }
1357
1358 Value *CreateAnd(ArrayRef<Value*> Ops) {
1359 assert(!Ops.empty())((!Ops.empty()) ? static_cast<void> (0) : __assert_fail
("!Ops.empty()", "/build/llvm-toolchain-snapshot-13~++20210413100635+64c24f493e5f/llvm/include/llvm/IR/IRBuilder.h"
, 1359, __PRETTY_FUNCTION__))
;
1360 Value *Accum = Ops[0];
1361 for (unsigned i = 1; i < Ops.size(); i++)
1362 Accum = CreateAnd(Accum, Ops[i]);
1363 return Accum;
1364 }
1365
1366 Value *CreateOr(Value *LHS, Value *RHS, const Twine &Name = "") {
1367 if (auto *RC = dyn_cast<Constant>(RHS)) {
1368 if (RC->isNullValue())
1369 return LHS; // LHS | 0 -> LHS
1370 if (auto *LC = dyn_cast<Constant>(LHS))
1371 return Insert(Folder.CreateOr(LC, RC), Name);
1372 }
1373 return Insert(BinaryOperator::CreateOr(LHS, RHS), Name);
1374 }
1375
1376 Value *CreateOr(Value *LHS, const APInt &RHS, const Twine &Name = "") {
1377 return CreateOr(LHS, ConstantInt::get(LHS->getType(), RHS), Name);
1378 }
1379
1380 Value *CreateOr(Value *LHS, uint64_t RHS, const Twine &Name = "") {
1381 return CreateOr(LHS, ConstantInt::get(LHS->getType(), RHS), Name);
1382 }
1383
1384 Value *CreateOr(ArrayRef<Value*> Ops) {
1385 assert(!Ops.empty())((!Ops.empty()) ? static_cast<void> (0) : __assert_fail
("!Ops.empty()", "/build/llvm-toolchain-snapshot-13~++20210413100635+64c24f493e5f/llvm/include/llvm/IR/IRBuilder.h"
, 1385, __PRETTY_FUNCTION__))
;
1386 Value *Accum = Ops[0];
1387 for (unsigned i = 1; i < Ops.size(); i++)
1388 Accum = CreateOr(Accum, Ops[i]);
1389 return Accum;
1390 }
1391
1392 Value *CreateXor(Value *LHS, Value *RHS, const Twine &Name = "") {
1393 if (Value *V = foldConstant(Instruction::Xor, LHS, RHS, Name)) return V;
1394 return Insert(BinaryOperator::CreateXor(LHS, RHS), Name);
1395 }
1396
1397 Value *CreateXor(Value *LHS, const APInt &RHS, const Twine &Name = "") {
1398 return CreateXor(LHS, ConstantInt::get(LHS->getType(), RHS), Name);
1399 }
1400
1401 Value *CreateXor(Value *LHS, uint64_t RHS, const Twine &Name = "") {
1402 return CreateXor(LHS, ConstantInt::get(LHS->getType(), RHS), Name);
1403 }
1404
1405 Value *CreateFAdd(Value *L, Value *R, const Twine &Name = "",
1406 MDNode *FPMD = nullptr) {
1407 if (IsFPConstrained)
1408 return CreateConstrainedFPBinOp(Intrinsic::experimental_constrained_fadd,
1409 L, R, nullptr, Name, FPMD);
1410
1411 if (Value *V = foldConstant(Instruction::FAdd, L, R, Name)) return V;
1412 Instruction *I = setFPAttrs(BinaryOperator::CreateFAdd(L, R), FPMD, FMF);
1413 return Insert(I, Name);
1414 }
1415
1416 /// Copy fast-math-flags from an instruction rather than using the builder's
1417 /// default FMF.
1418 Value *CreateFAddFMF(Value *L, Value *R, Instruction *FMFSource,
1419 const Twine &Name = "") {
1420 if (IsFPConstrained)
1421 return CreateConstrainedFPBinOp(Intrinsic::experimental_constrained_fadd,
1422 L, R, FMFSource, Name);
1423
1424 if (Value *V = foldConstant(Instruction::FAdd, L, R, Name)) return V;
1425 Instruction *I = setFPAttrs(BinaryOperator::CreateFAdd(L, R), nullptr,
1426 FMFSource->getFastMathFlags());
1427 return Insert(I, Name);
1428 }
1429
1430 Value *CreateFSub(Value *L, Value *R, const Twine &Name = "",
1431 MDNode *FPMD = nullptr) {
1432 if (IsFPConstrained)
1433 return CreateConstrainedFPBinOp(Intrinsic::experimental_constrained_fsub,
1434 L, R, nullptr, Name, FPMD);
1435
1436 if (Value *V = foldConstant(Instruction::FSub, L, R, Name)) return V;
1437 Instruction *I = setFPAttrs(BinaryOperator::CreateFSub(L, R), FPMD, FMF);
1438 return Insert(I, Name);
1439 }
1440
1441 /// Copy fast-math-flags from an instruction rather than using the builder's
1442 /// default FMF.
1443 Value *CreateFSubFMF(Value *L, Value *R, Instruction *FMFSource,
1444 const Twine &Name = "") {
1445 if (IsFPConstrained)
1446 return CreateConstrainedFPBinOp(Intrinsic::experimental_constrained_fsub,
1447 L, R, FMFSource, Name);
1448
1449 if (Value *V = foldConstant(Instruction::FSub, L, R, Name)) return V;
1450 Instruction *I = setFPAttrs(BinaryOperator::CreateFSub(L, R), nullptr,
1451 FMFSource->getFastMathFlags());
1452 return Insert(I, Name);
1453 }
1454
1455 Value *CreateFMul(Value *L, Value *R, const Twine &Name = "",
1456 MDNode *FPMD = nullptr) {
1457 if (IsFPConstrained)
1458 return CreateConstrainedFPBinOp(Intrinsic::experimental_constrained_fmul,
1459 L, R, nullptr, Name, FPMD);
1460
1461 if (Value *V = foldConstant(Instruction::FMul, L, R, Name)) return V;
1462 Instruction *I = setFPAttrs(BinaryOperator::CreateFMul(L, R), FPMD, FMF);
1463 return Insert(I, Name);
1464 }
1465
1466 /// Copy fast-math-flags from an instruction rather than using the builder's
1467 /// default FMF.
1468 Value *CreateFMulFMF(Value *L, Value *R, Instruction *FMFSource,
1469 const Twine &Name = "") {
1470 if (IsFPConstrained)
1471 return CreateConstrainedFPBinOp(Intrinsic::experimental_constrained_fmul,
1472 L, R, FMFSource, Name);
1473
1474 if (Value *V = foldConstant(Instruction::FMul, L, R, Name)) return V;
1475 Instruction *I = setFPAttrs(BinaryOperator::CreateFMul(L, R), nullptr,
1476 FMFSource->getFastMathFlags());
1477 return Insert(I, Name);
1478 }
1479
1480 Value *CreateFDiv(Value *L, Value *R, const Twine &Name = "",
1481 MDNode *FPMD = nullptr) {
1482 if (IsFPConstrained)
1483 return CreateConstrainedFPBinOp(Intrinsic::experimental_constrained_fdiv,
1484 L, R, nullptr, Name, FPMD);
1485
1486 if (Value *V = foldConstant(Instruction::FDiv, L, R, Name)) return V;
1487 Instruction *I = setFPAttrs(BinaryOperator::CreateFDiv(L, R), FPMD, FMF);
1488 return Insert(I, Name);
1489 }
1490
1491 /// Copy fast-math-flags from an instruction rather than using the builder's
1492 /// default FMF.
1493 Value *CreateFDivFMF(Value *L, Value *R, Instruction *FMFSource,
1494 const Twine &Name = "") {
1495 if (IsFPConstrained)
1496 return CreateConstrainedFPBinOp(Intrinsic::experimental_constrained_fdiv,
1497 L, R, FMFSource, Name);
1498
1499 if (Value *V = foldConstant(Instruction::FDiv, L, R, Name)) return V;
1500 Instruction *I = setFPAttrs(BinaryOperator::CreateFDiv(L, R), nullptr,
1501 FMFSource->getFastMathFlags());
1502 return Insert(I, Name);
1503 }
1504
1505 Value *CreateFRem(Value *L, Value *R, const Twine &Name = "",
1506 MDNode *FPMD = nullptr) {
1507 if (IsFPConstrained)
1508 return CreateConstrainedFPBinOp(Intrinsic::experimental_constrained_frem,
1509 L, R, nullptr, Name, FPMD);
1510
1511 if (Value *V = foldConstant(Instruction::FRem, L, R, Name)) return V;
1512 Instruction *I = setFPAttrs(BinaryOperator::CreateFRem(L, R), FPMD, FMF);
1513 return Insert(I, Name);
1514 }
1515
1516 /// Copy fast-math-flags from an instruction rather than using the builder's
1517 /// default FMF.
1518 Value *CreateFRemFMF(Value *L, Value *R, Instruction *FMFSource,
1519 const Twine &Name = "") {
1520 if (IsFPConstrained)
1521 return CreateConstrainedFPBinOp(Intrinsic::experimental_constrained_frem,
1522 L, R, FMFSource, Name);
1523
1524 if (Value *V = foldConstant(Instruction::FRem, L, R, Name)) return V;
1525 Instruction *I = setFPAttrs(BinaryOperator::CreateFRem(L, R), nullptr,
1526 FMFSource->getFastMathFlags());
1527 return Insert(I, Name);
1528 }
1529
1530 Value *CreateBinOp(Instruction::BinaryOps Opc,
1531 Value *LHS, Value *RHS, const Twine &Name = "",
1532 MDNode *FPMathTag = nullptr) {
1533 if (Value *V = foldConstant(Opc, LHS, RHS, Name)) return V;
1534 Instruction *BinOp = BinaryOperator::Create(Opc, LHS, RHS);
1535 if (isa<FPMathOperator>(BinOp))
1536 setFPAttrs(BinOp, FPMathTag, FMF);
1537 return Insert(BinOp, Name);
1538 }
1539
1540 Value *CreateLogicalAnd(Value *Cond1, Value *Cond2, const Twine &Name = "") {
1541 assert(Cond2->getType()->isIntOrIntVectorTy(1))((Cond2->getType()->isIntOrIntVectorTy(1)) ? static_cast
<void> (0) : __assert_fail ("Cond2->getType()->isIntOrIntVectorTy(1)"
, "/build/llvm-toolchain-snapshot-13~++20210413100635+64c24f493e5f/llvm/include/llvm/IR/IRBuilder.h"
, 1541, __PRETTY_FUNCTION__))
;
1542 return CreateSelect(Cond1, Cond2,
1543 ConstantInt::getNullValue(Cond2->getType()), Name);
1544 }
1545
1546 Value *CreateLogicalOr(Value *Cond1, Value *Cond2, const Twine &Name = "") {
1547 assert(Cond2->getType()->isIntOrIntVectorTy(1))((Cond2->getType()->isIntOrIntVectorTy(1)) ? static_cast
<void> (0) : __assert_fail ("Cond2->getType()->isIntOrIntVectorTy(1)"
, "/build/llvm-toolchain-snapshot-13~++20210413100635+64c24f493e5f/llvm/include/llvm/IR/IRBuilder.h"
, 1547, __PRETTY_FUNCTION__))
;
1548 return CreateSelect(Cond1, ConstantInt::getAllOnesValue(Cond2->getType()),
1549 Cond2, Name);
1550 }
1551
1552 CallInst *CreateConstrainedFPBinOp(
1553 Intrinsic::ID ID, Value *L, Value *R, Instruction *FMFSource = nullptr,
1554 const Twine &Name = "", MDNode *FPMathTag = nullptr,
1555 Optional<RoundingMode> Rounding = None,
1556 Optional<fp::ExceptionBehavior> Except = None);
1557
1558 Value *CreateNeg(Value *V, const Twine &Name = "",
1559 bool HasNUW = false, bool HasNSW = false) {
1560 if (auto *VC = dyn_cast<Constant>(V))
1561 return Insert(Folder.CreateNeg(VC, HasNUW, HasNSW), Name);
1562 BinaryOperator *BO = Insert(BinaryOperator::CreateNeg(V), Name);
1563 if (HasNUW) BO->setHasNoUnsignedWrap();
1564 if (HasNSW) BO->setHasNoSignedWrap();
1565 return BO;
1566 }
1567
1568 Value *CreateNSWNeg(Value *V, const Twine &Name = "") {
1569 return CreateNeg(V, Name, false, true);
1570 }
1571
1572 Value *CreateNUWNeg(Value *V, const Twine &Name = "") {
1573 return CreateNeg(V, Name, true, false);
1574 }
1575
1576 Value *CreateFNeg(Value *V, const Twine &Name = "",
1577 MDNode *FPMathTag = nullptr) {
1578 if (auto *VC = dyn_cast<Constant>(V))
1579 return Insert(Folder.CreateFNeg(VC), Name);
1580 return Insert(setFPAttrs(UnaryOperator::CreateFNeg(V), FPMathTag, FMF),
1581 Name);
1582 }
1583
1584 /// Copy fast-math-flags from an instruction rather than using the builder's
1585 /// default FMF.
1586 Value *CreateFNegFMF(Value *V, Instruction *FMFSource,
1587 const Twine &Name = "") {
1588 if (auto *VC = dyn_cast<Constant>(V))
1589 return Insert(Folder.CreateFNeg(VC), Name);
1590 return Insert(setFPAttrs(UnaryOperator::CreateFNeg(V), nullptr,
1591 FMFSource->getFastMathFlags()),
1592 Name);
1593 }
1594
1595 Value *CreateNot(Value *V, const Twine &Name = "") {
1596 if (auto *VC = dyn_cast<Constant>(V))
1597 return Insert(Folder.CreateNot(VC), Name);
1598 return Insert(BinaryOperator::CreateNot(V), Name);
1599 }
1600
1601 Value *CreateUnOp(Instruction::UnaryOps Opc,
1602 Value *V, const Twine &Name = "",
1603 MDNode *FPMathTag = nullptr) {
1604 if (auto *VC = dyn_cast<Constant>(V))
1605 return Insert(Folder.CreateUnOp(Opc, VC), Name);
1606 Instruction *UnOp = UnaryOperator::Create(Opc, V);
1607 if (isa<FPMathOperator>(UnOp))
1608 setFPAttrs(UnOp, FPMathTag, FMF);
1609 return Insert(UnOp, Name);
1610 }
1611
1612 /// Create either a UnaryOperator or BinaryOperator depending on \p Opc.
1613 /// Correct number of operands must be passed accordingly.
1614 Value *CreateNAryOp(unsigned Opc, ArrayRef<Value *> Ops,
1615 const Twine &Name = "", MDNode *FPMathTag = nullptr);
1616
1617 //===--------------------------------------------------------------------===//
1618 // Instruction creation methods: Memory Instructions
1619 //===--------------------------------------------------------------------===//
1620
1621 AllocaInst *CreateAlloca(Type *Ty, unsigned AddrSpace,
1622 Value *ArraySize = nullptr, const Twine &Name = "") {
1623 const DataLayout &DL = BB->getModule()->getDataLayout();
1624 Align AllocaAlign = DL.getPrefTypeAlign(Ty);
1625 return Insert(new AllocaInst(Ty, AddrSpace, ArraySize, AllocaAlign), Name);
1626 }
1627
1628 AllocaInst *CreateAlloca(Type *Ty, Value *ArraySize = nullptr,
1629 const Twine &Name = "") {
1630 const DataLayout &DL = BB->getModule()->getDataLayout();
1631 Align AllocaAlign = DL.getPrefTypeAlign(Ty);
1632 unsigned AddrSpace = DL.getAllocaAddrSpace();
1633 return Insert(new AllocaInst(Ty, AddrSpace, ArraySize, AllocaAlign), Name);
1634 }
1635
1636 /// Provided to resolve 'CreateLoad(Ty, Ptr, "...")' correctly, instead of
1637 /// converting the string to 'bool' for the isVolatile parameter.
1638 LoadInst *CreateLoad(Type *Ty, Value *Ptr, const char *Name) {
1639 return CreateAlignedLoad(Ty, Ptr, MaybeAlign(), Name);
1640 }
1641
1642 LoadInst *CreateLoad(Type *Ty, Value *Ptr, const Twine &Name = "") {
1643 return CreateAlignedLoad(Ty, Ptr, MaybeAlign(), Name);
1644 }
1645
1646 LoadInst *CreateLoad(Type *Ty, Value *Ptr, bool isVolatile,
1647 const Twine &Name = "") {
1648 return CreateAlignedLoad(Ty, Ptr, MaybeAlign(), isVolatile, Name);
1649 }
1650
1651 // Deprecated [opaque pointer types]
1652 LLVM_ATTRIBUTE_DEPRECATED(LoadInst *CreateLoad(Value *Ptr,[[deprecated("Use the version that explicitly specifies the "
"loaded type instead")]] LoadInst *CreateLoad(Value *Ptr, const
char *Name)
1653 const char *Name),[[deprecated("Use the version that explicitly specifies the "
"loaded type instead")]] LoadInst *CreateLoad(Value *Ptr, const
char *Name)
1654 "Use the version that explicitly specifies the "[[deprecated("Use the version that explicitly specifies the "
"loaded type instead")]] LoadInst *CreateLoad(Value *Ptr, const
char *Name)
1655 "loaded type instead")[[deprecated("Use the version that explicitly specifies the "
"loaded type instead")]] LoadInst *CreateLoad(Value *Ptr, const
char *Name)
{
1656 return CreateLoad(Ptr->getType()->getPointerElementType(), Ptr, Name);
1657 }
1658
1659 // Deprecated [opaque pointer types]
1660 LLVM_ATTRIBUTE_DEPRECATED(LoadInst *CreateLoad(Value *Ptr,[[deprecated("Use the version that explicitly specifies the "
"loaded type instead")]] LoadInst *CreateLoad(Value *Ptr, const
Twine &Name = "")
1661 const Twine &Name = ""),[[deprecated("Use the version that explicitly specifies the "
"loaded type instead")]] LoadInst *CreateLoad(Value *Ptr, const
Twine &Name = "")
1662 "Use the version that explicitly specifies the "[[deprecated("Use the version that explicitly specifies the "
"loaded type instead")]] LoadInst *CreateLoad(Value *Ptr, const
Twine &Name = "")
1663 "loaded type instead")[[deprecated("Use the version that explicitly specifies the "
"loaded type instead")]] LoadInst *CreateLoad(Value *Ptr, const
Twine &Name = "")
{
1664 return CreateLoad(Ptr->getType()->getPointerElementType(), Ptr, Name);
1665 }
1666
1667 // Deprecated [opaque pointer types]
1668 LLVM_ATTRIBUTE_DEPRECATED(LoadInst *CreateLoad(Value *Ptr,[[deprecated("Use the version that explicitly specifies the "
"loaded type instead")]] LoadInst *CreateLoad(Value *Ptr, bool
isVolatile, const Twine &Name = "")
1669 bool isVolatile,[[deprecated("Use the version that explicitly specifies the "
"loaded type instead")]] LoadInst *CreateLoad(Value *Ptr, bool
isVolatile, const Twine &Name = "")
1670 const Twine &Name = ""),[[deprecated("Use the version that explicitly specifies the "
"loaded type instead")]] LoadInst *CreateLoad(Value *Ptr, bool
isVolatile, const Twine &Name = "")
1671 "Use the version that explicitly specifies the "[[deprecated("Use the version that explicitly specifies the "
"loaded type instead")]] LoadInst *CreateLoad(Value *Ptr, bool
isVolatile, const Twine &Name = "")
1672 "loaded type instead")[[deprecated("Use the version that explicitly specifies the "
"loaded type instead")]] LoadInst *CreateLoad(Value *Ptr, bool
isVolatile, const Twine &Name = "")
{
1673 return CreateLoad(Ptr->getType()->getPointerElementType(), Ptr, isVolatile,
1674 Name);
1675 }
1676
1677 StoreInst *CreateStore(Value *Val, Value *Ptr, bool isVolatile = false) {
1678 return CreateAlignedStore(Val, Ptr, MaybeAlign(), isVolatile);
1679 }
1680
1681 LoadInst *CreateAlignedLoad(Type *Ty, Value *Ptr, MaybeAlign Align,
1682 const char *Name) {
1683 return CreateAlignedLoad(Ty, Ptr, Align, /*isVolatile*/false, Name);
1684 }
1685
1686 LoadInst *CreateAlignedLoad(Type *Ty, Value *Ptr, MaybeAlign Align,
1687 const Twine &Name = "") {
1688 return CreateAlignedLoad(Ty, Ptr, Align, /*isVolatile*/false, Name);
1689 }
1690
1691 LoadInst *CreateAlignedLoad(Type *Ty, Value *Ptr, MaybeAlign Align,
1692 bool isVolatile, const Twine &Name = "") {
1693 if (!Align) {
1694 const DataLayout &DL = BB->getModule()->getDataLayout();
1695 Align = DL.getABITypeAlign(Ty);
1696 }
1697 return Insert(new LoadInst(Ty, Ptr, Twine(), isVolatile, *Align), Name);
1698 }
1699
1700 // Deprecated [opaque pointer types]
1701 LLVM_ATTRIBUTE_DEPRECATED(LoadInst *CreateAlignedLoad(Value *Ptr,[[deprecated("Use the version that explicitly specifies the "
"loaded type instead")]] LoadInst *CreateAlignedLoad(Value *
Ptr, MaybeAlign Align, const char *Name)
1702 MaybeAlign Align,[[deprecated("Use the version that explicitly specifies the "
"loaded type instead")]] LoadInst *CreateAlignedLoad(Value *
Ptr, MaybeAlign Align, const char *Name)
1703 const char *Name),[[deprecated("Use the version that explicitly specifies the "
"loaded type instead")]] LoadInst *CreateAlignedLoad(Value *
Ptr, MaybeAlign Align, const char *Name)
1704 "Use the version that explicitly specifies the "[[deprecated("Use the version that explicitly specifies the "
"loaded type instead")]] LoadInst *CreateAlignedLoad(Value *
Ptr, MaybeAlign Align, const char *Name)
1705 "loaded type instead")[[deprecated("Use the version that explicitly specifies the "
"loaded type instead")]] LoadInst *CreateAlignedLoad(Value *
Ptr, MaybeAlign Align, const char *Name)
{
1706 return CreateAlignedLoad(Ptr->getType()->getPointerElementType(), Ptr,
1707 Align, Name);
1708 }
1709 // Deprecated [opaque pointer types]
1710 LLVM_ATTRIBUTE_DEPRECATED(LoadInst *CreateAlignedLoad(Value *Ptr,[[deprecated("Use the version that explicitly specifies the "
"loaded type instead")]] LoadInst *CreateAlignedLoad(Value *
Ptr, MaybeAlign Align, const Twine &Name = "")
1711 MaybeAlign Align,[[deprecated("Use the version that explicitly specifies the "
"loaded type instead")]] LoadInst *CreateAlignedLoad(Value *
Ptr, MaybeAlign Align, const Twine &Name = "")
1712 const Twine &Name = ""),[[deprecated("Use the version that explicitly specifies the "
"loaded type instead")]] LoadInst *CreateAlignedLoad(Value *
Ptr, MaybeAlign Align, const Twine &Name = "")
1713 "Use the version that explicitly specifies the "[[deprecated("Use the version that explicitly specifies the "
"loaded type instead")]] LoadInst *CreateAlignedLoad(Value *
Ptr, MaybeAlign Align, const Twine &Name = "")
1714 "loaded type instead")[[deprecated("Use the version that explicitly specifies the "
"loaded type instead")]] LoadInst *CreateAlignedLoad(Value *
Ptr, MaybeAlign Align, const Twine &Name = "")
{
1715 return CreateAlignedLoad(Ptr->getType()->getPointerElementType(), Ptr,
1716 Align, Name);
1717 }
1718 // Deprecated [opaque pointer types]
1719 LLVM_ATTRIBUTE_DEPRECATED(LoadInst *CreateAlignedLoad(Value *Ptr,[[deprecated("Use the version that explicitly specifies the "
"loaded type instead")]] LoadInst *CreateAlignedLoad(Value *
Ptr, MaybeAlign Align, bool isVolatile, const Twine &Name
= "")
1720 MaybeAlign Align,[[deprecated("Use the version that explicitly specifies the "
"loaded type instead")]] LoadInst *CreateAlignedLoad(Value *
Ptr, MaybeAlign Align, bool isVolatile, const Twine &Name
= "")
1721 bool isVolatile,[[deprecated("Use the version that explicitly specifies the "
"loaded type instead")]] LoadInst *CreateAlignedLoad(Value *
Ptr, MaybeAlign Align, bool isVolatile, const Twine &Name
= "")
1722 const Twine &Name = ""),[[deprecated("Use the version that explicitly specifies the "
"loaded type instead")]] LoadInst *CreateAlignedLoad(Value *
Ptr, MaybeAlign Align, bool isVolatile, const Twine &Name
= "")
1723 "Use the version that explicitly specifies the "[[deprecated("Use the version that explicitly specifies the "
"loaded type instead")]] LoadInst *CreateAlignedLoad(Value *
Ptr, MaybeAlign Align, bool isVolatile, const Twine &Name
= "")
1724 "loaded type instead")[[deprecated("Use the version that explicitly specifies the "
"loaded type instead")]] LoadInst *CreateAlignedLoad(Value *
Ptr, MaybeAlign Align, bool isVolatile, const Twine &Name
= "")
{
1725 return CreateAlignedLoad(Ptr->getType()->getPointerElementType(), Ptr,
1726 Align, isVolatile, Name);
1727 }
1728
1729 StoreInst *CreateAlignedStore(Value *Val, Value *Ptr, MaybeAlign Align,
1730 bool isVolatile = false) {
1731 if (!Align) {
1732 const DataLayout &DL = BB->getModule()->getDataLayout();
1733 Align = DL.getABITypeAlign(Val->getType());
1734 }
1735 return Insert(new StoreInst(Val, Ptr, isVolatile, *Align));
1736 }
1737 FenceInst *CreateFence(AtomicOrdering Ordering,
1738 SyncScope::ID SSID = SyncScope::System,
1739 const Twine &Name = "") {
1740 return Insert(new FenceInst(Context, Ordering, SSID), Name);
1741 }
1742
1743 AtomicCmpXchgInst *
1744 CreateAtomicCmpXchg(Value *Ptr, Value *Cmp, Value *New, MaybeAlign Align,
1745 AtomicOrdering SuccessOrdering,
1746 AtomicOrdering FailureOrdering,
1747 SyncScope::ID SSID = SyncScope::System) {
1748 if (!Align) {
1749 const DataLayout &DL = BB->getModule()->getDataLayout();
1750 Align = llvm::Align(DL.getTypeStoreSize(New->getType()));
1751 }
1752
1753 return Insert(new AtomicCmpXchgInst(Ptr, Cmp, New, *Align, SuccessOrdering,
1754 FailureOrdering, SSID));
1755 }
1756
1757 AtomicRMWInst *CreateAtomicRMW(AtomicRMWInst::BinOp Op, Value *Ptr,
1758 Value *Val, MaybeAlign Align,
1759 AtomicOrdering Ordering,
1760 SyncScope::ID SSID = SyncScope::System) {
1761 if (!Align) {
1762 const DataLayout &DL = BB->getModule()->getDataLayout();
1763 Align = llvm::Align(DL.getTypeStoreSize(Val->getType()));
1764 }
1765
1766 return Insert(new AtomicRMWInst(Op, Ptr, Val, *Align, Ordering, SSID));
1767 }
1768
1769 Value *CreateGEP(Value *Ptr, ArrayRef<Value *> IdxList,
1770 const Twine &Name = "") {
1771 return CreateGEP(nullptr, Ptr, IdxList, Name);
1772 }
1773
1774 Value *CreateGEP(Type *Ty, Value *Ptr, ArrayRef<Value *> IdxList,
1775 const Twine &Name = "") {
1776 if (auto *PC = dyn_cast<Constant>(Ptr)) {
1777 // Every index must be constant.
1778 size_t i, e;
1779 for (i = 0, e = IdxList.size(); i != e; ++i)
1780 if (!isa<Constant>(IdxList[i]))
1781 break;
1782 if (i == e)
1783 return Insert(Folder.CreateGetElementPtr(Ty, PC, IdxList), Name);
1784 }
1785 return Insert(GetElementPtrInst::Create(Ty, Ptr, IdxList), Name);
1786 }
1787
1788 Value *CreateInBoundsGEP(Value *Ptr, ArrayRef<Value *> IdxList,
1789 const Twine &Name = "") {
1790 return CreateInBoundsGEP(nullptr, Ptr, IdxList, Name);
1791 }
1792
1793 Value *CreateInBoundsGEP(Type *Ty, Value *Ptr, ArrayRef<Value *> IdxList,
1794 const Twine &Name = "") {
1795 if (auto *PC = dyn_cast<Constant>(Ptr)) {
1796 // Every index must be constant.
1797 size_t i, e;
1798 for (i = 0, e = IdxList.size(); i != e; ++i)
1799 if (!isa<Constant>(IdxList[i]))
1800 break;
1801 if (i == e)
1802 return Insert(Folder.CreateInBoundsGetElementPtr(Ty, PC, IdxList),
1803 Name);
1804 }
1805 return Insert(GetElementPtrInst::CreateInBounds(Ty, Ptr, IdxList), Name);
1806 }
1807
1808 Value *CreateGEP(Value *Ptr, Value *Idx, const Twine &Name = "") {
1809 return CreateGEP(nullptr, Ptr, Idx, Name);
1810 }
1811
1812 Value *CreateGEP(Type *Ty, Value *Ptr, Value *Idx, const Twine &Name = "") {
1813 if (auto *PC = dyn_cast<Constant>(Ptr))
1814 if (auto *IC = dyn_cast<Constant>(Idx))
1815 return Insert(Folder.CreateGetElementPtr(Ty, PC, IC), Name);
1816 return Insert(GetElementPtrInst::Create(Ty, Ptr, Idx), Name);
1817 }
1818
1819 Value *CreateInBoundsGEP(Type *Ty, Value *Ptr, Value *Idx,
1820 const Twine &Name = "") {
1821 if (auto *PC = dyn_cast<Constant>(Ptr))
1822 if (auto *IC = dyn_cast<Constant>(Idx))
1823 return Insert(Folder.CreateInBoundsGetElementPtr(Ty, PC, IC), Name);
1824 return Insert(GetElementPtrInst::CreateInBounds(Ty, Ptr, Idx), Name);
1825 }
1826
1827 Value *CreateConstGEP1_32(Value *Ptr, unsigned Idx0, const Twine &Name = "") {
1828 return CreateConstGEP1_32(nullptr, Ptr, Idx0, Name);
1829 }
1830
1831 Value *CreateConstGEP1_32(Type *Ty, Value *Ptr, unsigned Idx0,
1832 const Twine &Name = "") {
1833 Value *Idx = ConstantInt::get(Type::getInt32Ty(Context), Idx0);
1834
1835 if (auto *PC = dyn_cast<Constant>(Ptr))
1836 return Insert(Folder.CreateGetElementPtr(Ty, PC, Idx), Name);
1837
1838 return Insert(GetElementPtrInst::Create(Ty, Ptr, Idx), Name);
1839 }
1840
1841 Value *CreateConstInBoundsGEP1_32(Type *Ty, Value *Ptr, unsigned Idx0,
1842 const Twine &Name = "") {
1843 Value *Idx = ConstantInt::get(Type::getInt32Ty(Context), Idx0);
1844
1845 if (auto *PC = dyn_cast<Constant>(Ptr))
1846 return Insert(Folder.CreateInBoundsGetElementPtr(Ty, PC, Idx), Name);
1847
1848 return Insert(GetElementPtrInst::CreateInBounds(Ty, Ptr, Idx), Name);
1849 }
1850
1851 Value *CreateConstGEP2_32(Type *Ty, Value *Ptr, unsigned Idx0, unsigned Idx1,
1852 const Twine &Name = "") {
1853 Value *Idxs[] = {
1854 ConstantInt::get(Type::getInt32Ty(Context), Idx0),
1855 ConstantInt::get(Type::getInt32Ty(Context), Idx1)
1856 };
1857
1858 if (auto *PC = dyn_cast<Constant>(Ptr))
1859 return Insert(Folder.CreateGetElementPtr(Ty, PC, Idxs), Name);
1860
1861 return Insert(GetElementPtrInst::Create(Ty, Ptr, Idxs), Name);
1862 }
1863
1864 Value *CreateConstInBoundsGEP2_32(Type *Ty, Value *Ptr, unsigned Idx0,
1865 unsigned Idx1, const Twine &Name = "") {
1866 Value *Idxs[] = {
1867 ConstantInt::get(Type::getInt32Ty(Context), Idx0),
1868 ConstantInt::get(Type::getInt32Ty(Context), Idx1)
1869 };
1870
1871 if (auto *PC = dyn_cast<Constant>(Ptr))
1872 return Insert(Folder.CreateInBoundsGetElementPtr(Ty, PC, Idxs), Name);
1873
1874 return Insert(GetElementPtrInst::CreateInBounds(Ty, Ptr, Idxs), Name);
1875 }
1876
1877 Value *CreateConstGEP1_64(Type *Ty, Value *Ptr, uint64_t Idx0,
1878 const Twine &Name = "") {
1879 Value *Idx = ConstantInt::get(Type::getInt64Ty(Context), Idx0);
1880
1881 if (auto *PC = dyn_cast<Constant>(Ptr))
1882 return Insert(Folder.CreateGetElementPtr(Ty, PC, Idx), Name);
1883
1884 return Insert(GetElementPtrInst::Create(Ty, Ptr, Idx), Name);
1885 }
1886
1887 Value *CreateConstGEP1_64(Value *Ptr, uint64_t Idx0, const Twine &Name = "") {
1888 return CreateConstGEP1_64(nullptr, Ptr, Idx0, Name);
1889 }
1890
1891 Value *CreateConstInBoundsGEP1_64(Type *Ty, Value *Ptr, uint64_t Idx0,
1892 const Twine &Name = "") {
1893 Value *Idx = ConstantInt::get(Type::getInt64Ty(Context), Idx0);
1894
1895 if (auto *PC = dyn_cast<Constant>(Ptr))
1896 return Insert(Folder.CreateInBoundsGetElementPtr(Ty, PC, Idx), Name);
1897
1898 return Insert(GetElementPtrInst::CreateInBounds(Ty, Ptr, Idx), Name);
1899 }
1900
1901 Value *CreateConstInBoundsGEP1_64(Value *Ptr, uint64_t Idx0,
1902 const Twine &Name = "") {
1903 return CreateConstInBoundsGEP1_64(nullptr, Ptr, Idx0, Name);
1904 }
1905
1906 Value *CreateConstGEP2_64(Type *Ty, Value *Ptr, uint64_t Idx0, uint64_t Idx1,
1907 const Twine &Name = "") {
1908 Value *Idxs[] = {
1909 ConstantInt::get(Type::getInt64Ty(Context), Idx0),
1910 ConstantInt::get(Type::getInt64Ty(Context), Idx1)
1911 };
1912
1913 if (auto *PC = dyn_cast<Constant>(Ptr))
1914 return Insert(Folder.CreateGetElementPtr(Ty, PC, Idxs), Name);
1915
1916 return Insert(GetElementPtrInst::Create(Ty, Ptr, Idxs), Name);
1917 }
1918
1919 Value *CreateConstGEP2_64(Value *Ptr, uint64_t Idx0, uint64_t Idx1,
1920 const Twine &Name = "") {
1921 return CreateConstGEP2_64(nullptr, Ptr, Idx0, Idx1, Name);
1922 }
1923
1924 Value *CreateConstInBoundsGEP2_64(Type *Ty, Value *Ptr, uint64_t Idx0,
1925 uint64_t Idx1, const Twine &Name = "") {
1926 Value *Idxs[] = {
1927 ConstantInt::get(Type::getInt64Ty(Context), Idx0),
1928 ConstantInt::get(Type::getInt64Ty(Context), Idx1)
1929 };
1930
1931 if (auto *PC = dyn_cast<Constant>(Ptr))
1932 return Insert(Folder.CreateInBoundsGetElementPtr(Ty, PC, Idxs), Name);
1933
1934 return Insert(GetElementPtrInst::CreateInBounds(Ty, Ptr, Idxs), Name);
1935 }
1936
1937 Value *CreateConstInBoundsGEP2_64(Value *Ptr, uint64_t Idx0, uint64_t Idx1,
1938 const Twine &Name = "") {
1939 return CreateConstInBoundsGEP2_64(nullptr, Ptr, Idx0, Idx1, Name);
1940 }
1941
1942 Value *CreateStructGEP(Type *Ty, Value *Ptr, unsigned Idx,
1943 const Twine &Name = "") {
1944 return CreateConstInBoundsGEP2_32(Ty, Ptr, 0, Idx, Name);
1945 }
1946
1947 Value *CreateStructGEP(Value *Ptr, unsigned Idx, const Twine &Name = "") {
1948 return CreateConstInBoundsGEP2_32(nullptr, Ptr, 0, Idx, Name);
1949 }
1950
1951 /// Same as CreateGlobalString, but return a pointer with "i8*" type
1952 /// instead of a pointer to array of i8.
1953 ///
1954 /// If no module is given via \p M, it is take from the insertion point basic
1955 /// block.
1956 Constant *CreateGlobalStringPtr(StringRef Str, const Twine &Name = "",
1957 unsigned AddressSpace = 0,
1958 Module *M = nullptr) {
1959 GlobalVariable *GV = CreateGlobalString(Str, Name, AddressSpace, M);
1960 Constant *Zero = ConstantInt::get(Type::getInt32Ty(Context), 0);
1961 Constant *Indices[] = {Zero, Zero};
1962 return ConstantExpr::getInBoundsGetElementPtr(GV->getValueType(), GV,
1963 Indices);
1964 }
1965
1966 //===--------------------------------------------------------------------===//
1967 // Instruction creation methods: Cast/Conversion Operators
1968 //===--------------------------------------------------------------------===//
1969
1970 Value *CreateTrunc(Value *V, Type *DestTy, const Twine &Name = "") {
1971 return CreateCast(Instruction::Trunc, V, DestTy, Name);
1972 }
1973
1974 Value *CreateZExt(Value *V, Type *DestTy, const Twine &Name = "") {
1975 return CreateCast(Instruction::ZExt, V, DestTy, Name);
1976 }
1977
1978 Value *CreateSExt(Value *V, Type *DestTy, const Twine &Name = "") {
1979 return CreateCast(Instruction::SExt, V, DestTy, Name);
1980 }
1981
1982 /// Create a ZExt or Trunc from the integer value V to DestTy. Return
1983 /// the value untouched if the type of V is already DestTy.
1984 Value *CreateZExtOrTrunc(Value *V, Type *DestTy,
1985 const Twine &Name = "") {
1986 assert(V->getType()->isIntOrIntVectorTy() &&((V->getType()->isIntOrIntVectorTy() && DestTy->
isIntOrIntVectorTy() && "Can only zero extend/truncate integers!"
) ? static_cast<void> (0) : __assert_fail ("V->getType()->isIntOrIntVectorTy() && DestTy->isIntOrIntVectorTy() && \"Can only zero extend/truncate integers!\""
, "/build/llvm-toolchain-snapshot-13~++20210413100635+64c24f493e5f/llvm/include/llvm/IR/IRBuilder.h"
, 1988, __PRETTY_FUNCTION__))
1987 DestTy->isIntOrIntVectorTy() &&((V->getType()->isIntOrIntVectorTy() && DestTy->
isIntOrIntVectorTy() && "Can only zero extend/truncate integers!"
) ? static_cast<void> (0) : __assert_fail ("V->getType()->isIntOrIntVectorTy() && DestTy->isIntOrIntVectorTy() && \"Can only zero extend/truncate integers!\""
, "/build/llvm-toolchain-snapshot-13~++20210413100635+64c24f493e5f/llvm/include/llvm/IR/IRBuilder.h"
, 1988, __PRETTY_FUNCTION__))
1988 "Can only zero extend/truncate integers!")((V->getType()->isIntOrIntVectorTy() && DestTy->
isIntOrIntVectorTy() && "Can only zero extend/truncate integers!"
) ? static_cast<void> (0) : __assert_fail ("V->getType()->isIntOrIntVectorTy() && DestTy->isIntOrIntVectorTy() && \"Can only zero extend/truncate integers!\""
, "/build/llvm-toolchain-snapshot-13~++20210413100635+64c24f493e5f/llvm/include/llvm/IR/IRBuilder.h"
, 1988, __PRETTY_FUNCTION__))
;
1989 Type *VTy = V->getType();
1990 if (VTy->getScalarSizeInBits() < DestTy->getScalarSizeInBits())
1991 return CreateZExt(V, DestTy, Name);
1992 if (VTy->getScalarSizeInBits() > DestTy->getScalarSizeInBits())
1993 return CreateTrunc(V, DestTy, Name);
1994 return V;
1995 }
1996
1997 /// Create a SExt or Trunc from the integer value V to DestTy. Return
1998 /// the value untouched if the type of V is already DestTy.
1999 Value *CreateSExtOrTrunc(Value *V, Type *DestTy,
2000 const Twine &Name = "") {
2001 assert(V->getType()->isIntOrIntVectorTy() &&((V->getType()->isIntOrIntVectorTy() && DestTy->
isIntOrIntVectorTy() && "Can only sign extend/truncate integers!"
) ? static_cast<void> (0) : __assert_fail ("V->getType()->isIntOrIntVectorTy() && DestTy->isIntOrIntVectorTy() && \"Can only sign extend/truncate integers!\""
, "/build/llvm-toolchain-snapshot-13~++20210413100635+64c24f493e5f/llvm/include/llvm/IR/IRBuilder.h"
, 2003, __PRETTY_FUNCTION__))
2002 DestTy->isIntOrIntVectorTy() &&((V->getType()->isIntOrIntVectorTy() && DestTy->
isIntOrIntVectorTy() && "Can only sign extend/truncate integers!"
) ? static_cast<void> (0) : __assert_fail ("V->getType()->isIntOrIntVectorTy() && DestTy->isIntOrIntVectorTy() && \"Can only sign extend/truncate integers!\""
, "/build/llvm-toolchain-snapshot-13~++20210413100635+64c24f493e5f/llvm/include/llvm/IR/IRBuilder.h"
, 2003, __PRETTY_FUNCTION__))
2003 "Can only sign extend/truncate integers!")((V->getType()->isIntOrIntVectorTy() && DestTy->
isIntOrIntVectorTy() && "Can only sign extend/truncate integers!"
) ? static_cast<void> (0) : __assert_fail ("V->getType()->isIntOrIntVectorTy() && DestTy->isIntOrIntVectorTy() && \"Can only sign extend/truncate integers!\""
, "/build/llvm-toolchain-snapshot-13~++20210413100635+64c24f493e5f/llvm/include/llvm/IR/IRBuilder.h"
, 2003, __PRETTY_FUNCTION__))
;
2004 Type *VTy = V->getType();
2005 if (VTy->getScalarSizeInBits() < DestTy->getScalarSizeInBits())
2006 return CreateSExt(V, DestTy, Name);
2007 if (VTy->getScalarSizeInBits() > DestTy->getScalarSizeInBits())
2008 return CreateTrunc(V, DestTy, Name);
2009 return V;
2010 }
2011
2012 Value *CreateFPToUI(Value *V, Type *DestTy, const Twine &Name = "") {
2013 if (IsFPConstrained)
2014 return CreateConstrainedFPCast(Intrinsic::experimental_constrained_fptoui,
2015 V, DestTy, nullptr, Name);
2016 return CreateCast(Instruction::FPToUI, V, DestTy, Name);
2017 }
2018
2019 Value *CreateFPToSI(Value *V, Type *DestTy, const Twine &Name = "") {
2020 if (IsFPConstrained)
2021 return CreateConstrainedFPCast(Intrinsic::experimental_constrained_fptosi,
2022 V, DestTy, nullptr, Name);
2023 return CreateCast(Instruction::FPToSI, V, DestTy, Name);
2024 }
2025
2026 Value *CreateUIToFP(Value *V, Type *DestTy, const Twine &Name = ""){
2027 if (IsFPConstrained)
2028 return CreateConstrainedFPCast(Intrinsic::experimental_constrained_uitofp,
2029 V, DestTy, nullptr, Name);
2030 return CreateCast(Instruction::UIToFP, V, DestTy, Name);
2031 }
2032
2033 Value *CreateSIToFP(Value *V, Type *DestTy, const Twine &Name = ""){
2034 if (IsFPConstrained)
2035 return CreateConstrainedFPCast(Intrinsic::experimental_constrained_sitofp,
2036 V, DestTy, nullptr, Name);
2037 return CreateCast(Instruction::SIToFP, V, DestTy, Name);
2038 }
2039
2040 Value *CreateFPTrunc(Value *V, Type *DestTy,
2041 const Twine &Name = "") {
2042 if (IsFPConstrained)
2043 return CreateConstrainedFPCast(
2044 Intrinsic::experimental_constrained_fptrunc, V, DestTy, nullptr,
2045 Name);
2046 return CreateCast(Instruction::FPTrunc, V, DestTy, Name);
2047 }
2048
2049 Value *CreateFPExt(Value *V, Type *DestTy, const Twine &Name = "") {
2050 if (IsFPConstrained)
2051 return CreateConstrainedFPCast(Intrinsic::experimental_constrained_fpext,
2052 V, DestTy, nullptr, Name);
2053 return CreateCast(Instruction::FPExt, V, DestTy, Name);
2054 }
2055
2056 Value *CreatePtrToInt(Value *V, Type *DestTy,
2057 const Twine &Name = "") {
2058 return CreateCast(Instruction::PtrToInt, V, DestTy, Name);
2059 }
2060
2061 Value *CreateIntToPtr(Value *V, Type *DestTy,
2062 const Twine &Name = "") {
2063 return CreateCast(Instruction::IntToPtr, V, DestTy, Name);
2064 }
2065
2066 Value *CreateBitCast(Value *V, Type *DestTy,
2067 const Twine &Name = "") {
2068 return CreateCast(Instruction::BitCast, V, DestTy, Name);
2069 }
2070
2071 Value *CreateAddrSpaceCast(Value *V, Type *DestTy,
2072 const Twine &Name = "") {
2073 return CreateCast(Instruction::AddrSpaceCast, V, DestTy, Name);
2074 }
2075
2076 Value *CreateZExtOrBitCast(Value *V, Type *DestTy,
2077 const Twine &Name = "") {
2078 if (V->getType() == DestTy)
2079 return V;
2080 if (auto *VC = dyn_cast<Constant>(V))
2081 return Insert(Folder.CreateZExtOrBitCast(VC, DestTy), Name);
2082 return Insert(CastInst::CreateZExtOrBitCast(V, DestTy), Name);
2083 }
2084
2085 Value *CreateSExtOrBitCast(Value *V, Type *DestTy,
2086 const Twine &Name = "") {
2087 if (V->getType() == DestTy)
2088 return V;
2089 if (auto *VC = dyn_cast<Constant>(V))
2090 return Insert(Folder.CreateSExtOrBitCast(VC, DestTy), Name);
2091 return Insert(CastInst::CreateSExtOrBitCast(V, DestTy), Name);
2092 }
2093
2094 Value *CreateTruncOrBitCast(Value *V, Type *DestTy,
2095 const Twine &Name = "") {
2096 if (V->getType() == DestTy)
2097 return V;
2098 if (auto *VC = dyn_cast<Constant>(V))
2099 return Insert(Folder.CreateTruncOrBitCast(VC, DestTy), Name);
2100 return Insert(CastInst::CreateTruncOrBitCast(V, DestTy), Name);
2101 }
2102
2103 Value *CreateCast(Instruction::CastOps Op, Value *V, Type *DestTy,
2104 const Twine &Name = "") {
2105 if (V->getType() == DestTy)
2106 return V;
2107 if (auto *VC = dyn_cast<Constant>(V))
2108 return Insert(Folder.CreateCast(Op, VC, DestTy), Name);
2109 return Insert(CastInst::Create(Op, V, DestTy), Name);
2110 }
2111
2112 Value *CreatePointerCast(Value *V, Type *DestTy,
2113 const Twine &Name = "") {
2114 if (V->getType() == DestTy)
2115 return V;
2116 if (auto *VC = dyn_cast<Constant>(V))
2117 return Insert(Folder.CreatePointerCast(VC, DestTy), Name);
2118 return Insert(CastInst::CreatePointerCast(V, DestTy), Name);
2119 }
2120
2121 Value *CreatePointerBitCastOrAddrSpaceCast(Value *V, Type *DestTy,
2122 const Twine &Name = "") {
2123 if (V->getType() == DestTy)
2124 return V;
2125
2126 if (auto *VC = dyn_cast<Constant>(V)) {
2127 return Insert(Folder.CreatePointerBitCastOrAddrSpaceCast(VC, DestTy),
2128 Name);
2129 }
2130
2131 return Insert(CastInst::CreatePointerBitCastOrAddrSpaceCast(V, DestTy),
2132 Name);
2133 }
2134
2135 Value *CreateIntCast(Value *V, Type *DestTy, bool isSigned,
2136 const Twine &Name = "") {
2137 if (V->getType() == DestTy)
2138 return V;
2139 if (auto *VC = dyn_cast<Constant>(V))
2140 return Insert(Folder.CreateIntCast(VC, DestTy, isSigned), Name);
2141 return Insert(CastInst::CreateIntegerCast(V, DestTy, isSigned), Name);
2142 }
2143
2144 Value *CreateBitOrPointerCast(Value *V, Type *DestTy,
2145 const Twine &Name = "") {
2146 if (V->getType() == DestTy)
2147 return V;
2148 if (V->getType()->isPtrOrPtrVectorTy() && DestTy->isIntOrIntVectorTy())
2149 return CreatePtrToInt(V, DestTy, Name);
2150 if (V->getType()->isIntOrIntVectorTy() && DestTy->isPtrOrPtrVectorTy())
2151 return CreateIntToPtr(V, DestTy, Name);
2152
2153 return CreateBitCast(V, DestTy, Name);
2154 }
2155
2156 Value *CreateFPCast(Value *V, Type *DestTy, const Twine &Name = "") {
2157 if (V->getType() == DestTy)
2158 return V;
2159 if (auto *VC = dyn_cast<Constant>(V))
2160 return Insert(Folder.CreateFPCast(VC, DestTy), Name);
2161 return Insert(CastInst::CreateFPCast(V, DestTy), Name);
2162 }
2163
2164 CallInst *CreateConstrainedFPCast(
2165 Intrinsic::ID ID, Value *V, Type *DestTy,
2166 Instruction *FMFSource = nullptr, const Twine &Name = "",
2167 MDNode *FPMathTag = nullptr,
2168 Optional<RoundingMode> Rounding = None,
2169 Optional<fp::ExceptionBehavior> Except = None);
2170
2171 // Provided to resolve 'CreateIntCast(Ptr, Ptr, "...")', giving a
2172 // compile time error, instead of converting the string to bool for the
2173 // isSigned parameter.
2174 Value *CreateIntCast(Value *, Type *, const char *) = delete;
2175
2176 //===--------------------------------------------------------------------===//
2177 // Instruction creation methods: Compare Instructions
2178 //===--------------------------------------------------------------------===//
2179
2180 Value *CreateICmpEQ(Value *LHS, Value *RHS, const Twine &Name = "") {
2181 return CreateICmp(ICmpInst::ICMP_EQ, LHS, RHS, Name);
2182 }
2183
2184 Value *CreateICmpNE(Value *LHS, Value *RHS, const Twine &Name = "") {
2185 return CreateICmp(ICmpInst::ICMP_NE, LHS, RHS, Name);
2186 }
2187
2188 Value *CreateICmpUGT(Value *LHS, Value *RHS, const Twine &Name = "") {
2189 return CreateICmp(ICmpInst::ICMP_UGT, LHS, RHS, Name);
2190 }
2191
2192 Value *CreateICmpUGE(Value *LHS, Value *RHS, const Twine &Name = "") {
2193 return CreateICmp(ICmpInst::ICMP_UGE, LHS, RHS, Name);
2194 }
2195
2196 Value *CreateICmpULT(Value *LHS, Value *RHS, const Twine &Name = "") {
2197 return CreateICmp(ICmpInst::ICMP_ULT, LHS, RHS, Name);
2198 }
2199
2200 Value *CreateICmpULE(Value *LHS, Value *RHS, const Twine &Name = "") {
2201 return CreateICmp(ICmpInst::ICMP_ULE, LHS, RHS, Name);
2202 }
2203
2204 Value *CreateICmpSGT(Value *LHS, Value *RHS, const Twine &Name = "") {
2205 return CreateICmp(ICmpInst::ICMP_SGT, LHS, RHS, Name);
2206 }
2207
2208 Value *CreateICmpSGE(Value *LHS, Value *RHS, const Twine &Name = "") {
2209 return CreateICmp(ICmpInst::ICMP_SGE, LHS, RHS, Name);
46
Passing null pointer value via 2nd parameter 'LHS'
47
Calling 'IRBuilderBase::CreateICmp'
2210 }
2211
2212 Value *CreateICmpSLT(Value *LHS, Value *RHS, const Twine &Name = "") {
2213 return CreateICmp(ICmpInst::ICMP_SLT, LHS, RHS, Name);
2214 }
2215
2216 Value *CreateICmpSLE(Value *LHS, Value *RHS, const Twine &Name = "") {
2217 return CreateICmp(ICmpInst::ICMP_SLE, LHS, RHS, Name);
2218 }
2219
2220 Value *CreateFCmpOEQ(Value *LHS, Value *RHS, const Twine &Name = "",
2221 MDNode *FPMathTag = nullptr) {
2222 return CreateFCmp(FCmpInst::FCMP_OEQ, LHS, RHS, Name, FPMathTag);
2223 }
2224
2225 Value *CreateFCmpOGT(Value *LHS, Value *RHS, const Twine &Name = "",
2226 MDNode *FPMathTag = nullptr) {
2227 return CreateFCmp(FCmpInst::FCMP_OGT, LHS, RHS, Name, FPMathTag);
2228 }
2229
2230 Value *CreateFCmpOGE(Value *LHS, Value *RHS, const Twine &Name = "",
2231 MDNode *FPMathTag = nullptr) {
2232 return CreateFCmp(FCmpInst::FCMP_OGE, LHS, RHS, Name, FPMathTag);
2233 }
2234
2235 Value *CreateFCmpOLT(Value *LHS, Value *RHS, const Twine &Name = "",
2236 MDNode *FPMathTag = nullptr) {
2237 return CreateFCmp(FCmpInst::FCMP_OLT, LHS, RHS, Name, FPMathTag);
2238 }
2239
2240 Value *CreateFCmpOLE(Value *LHS, Value *RHS, const Twine &Name = "",
2241 MDNode *FPMathTag = nullptr) {
2242 return CreateFCmp(FCmpInst::FCMP_OLE, LHS, RHS, Name, FPMathTag);
2243 }
2244
2245 Value *CreateFCmpONE(Value *LHS, Value *RHS, const Twine &Name = "",
2246 MDNode *FPMathTag = nullptr) {
2247 return CreateFCmp(FCmpInst::FCMP_ONE, LHS, RHS, Name, FPMathTag);
2248 }
2249
2250 Value *CreateFCmpORD(Value *LHS, Value *RHS, const Twine &Name = "",
2251 MDNode *FPMathTag = nullptr) {
2252 return CreateFCmp(FCmpInst::FCMP_ORD, LHS, RHS, Name, FPMathTag);
2253 }
2254
2255 Value *CreateFCmpUNO(Value *LHS, Value *RHS, const Twine &Name = "",
2256 MDNode *FPMathTag = nullptr) {
2257 return CreateFCmp(FCmpInst::FCMP_UNO, LHS, RHS, Name, FPMathTag);
2258 }
2259
2260 Value *CreateFCmpUEQ(Value *LHS, Value *RHS, const Twine &Name = "",
2261 MDNode *FPMathTag = nullptr) {
2262 return CreateFCmp(FCmpInst::FCMP_UEQ, LHS, RHS, Name, FPMathTag);
2263 }
2264
2265 Value *CreateFCmpUGT(Value *LHS, Value *RHS, const Twine &Name = "",
2266 MDNode *FPMathTag = nullptr) {
2267 return CreateFCmp(FCmpInst::FCMP_UGT, LHS, RHS, Name, FPMathTag);
2268 }
2269
2270 Value *CreateFCmpUGE(Value *LHS, Value *RHS, const Twine &Name = "",
2271 MDNode *FPMathTag = nullptr) {
2272 return CreateFCmp(FCmpInst::FCMP_UGE, LHS, RHS, Name, FPMathTag);
2273 }
2274
2275 Value *CreateFCmpULT(Value *LHS, Value *RHS, const Twine &Name = "",
2276 MDNode *FPMathTag = nullptr) {
2277 return CreateFCmp(FCmpInst::FCMP_ULT, LHS, RHS, Name, FPMathTag);
2278 }
2279
2280 Value *CreateFCmpULE(Value *LHS, Value *RHS, const Twine &Name = "",
2281 MDNode *FPMathTag = nullptr) {
2282 return CreateFCmp(FCmpInst::FCMP_ULE, LHS, RHS, Name, FPMathTag);
2283 }
2284
2285 Value *CreateFCmpUNE(Value *LHS, Value *RHS, const Twine &Name = "",
2286 MDNode *FPMathTag = nullptr) {
2287 return CreateFCmp(FCmpInst::FCMP_UNE, LHS, RHS, Name, FPMathTag);
2288 }
2289
2290 Value *CreateICmp(CmpInst::Predicate P, Value *LHS, Value *RHS,
2291 const Twine &Name = "") {
2292 if (auto *LC = dyn_cast<Constant>(LHS))
48
Assuming 'LC' is null
49
Taking false branch
2293 if (auto *RC = dyn_cast<Constant>(RHS))
2294 return Insert(Folder.CreateICmp(P, LC, RC), Name);
2295 return Insert(new ICmpInst(P, LHS, RHS), Name);
50
Passing null pointer value via 2nd parameter 'LHS'
51
Calling constructor for 'ICmpInst'
2296 }
2297
2298 // Create a quiet floating-point comparison (i.e. one that raises an FP
2299 // exception only in the case where an input is a signaling NaN).
2300 // Note that this differs from CreateFCmpS only if IsFPConstrained is true.
2301 Value *CreateFCmp(CmpInst::Predicate P, Value *LHS, Value *RHS,
2302 const Twine &Name = "", MDNode *FPMathTag = nullptr) {
2303 return CreateFCmpHelper(P, LHS, RHS, Name, FPMathTag, false);
2304 }
2305
2306 Value *CreateCmp(CmpInst::Predicate Pred, Value *LHS, Value *RHS,
2307 const Twine &Name = "", MDNode *FPMathTag = nullptr) {
2308 return CmpInst::isFPPredicate(Pred)
2309 ? CreateFCmp(Pred, LHS, RHS, Name, FPMathTag)
2310 : CreateICmp(Pred, LHS, RHS, Name);
2311 }
2312
2313 // Create a signaling floating-point comparison (i.e. one that raises an FP
2314 // exception whenever an input is any NaN, signaling or quiet).
2315 // Note that this differs from CreateFCmp only if IsFPConstrained is true.
2316 Value *CreateFCmpS(CmpInst::Predicate P, Value *LHS, Value *RHS,
2317 const Twine &Name = "", MDNode *FPMathTag = nullptr) {
2318 return CreateFCmpHelper(P, LHS, RHS, Name, FPMathTag, true);
2319 }
2320
2321private:
2322 // Helper routine to create either a signaling or a quiet FP comparison.
2323 Value *CreateFCmpHelper(CmpInst::Predicate P, Value *LHS, Value *RHS,
2324 const Twine &Name, MDNode *FPMathTag,
2325 bool IsSignaling);
2326
2327public:
2328 CallInst *CreateConstrainedFPCmp(
2329 Intrinsic::ID ID, CmpInst::Predicate P, Value *L, Value *R,
2330 const Twine &Name = "", Optional<fp::ExceptionBehavior> Except = None);
2331
2332 //===--------------------------------------------------------------------===//
2333 // Instruction creation methods: Other Instructions
2334 //===--------------------------------------------------------------------===//
2335
2336 PHINode *CreatePHI(Type *Ty, unsigned NumReservedValues,
2337 const Twine &Name = "") {
2338 PHINode *Phi = PHINode::Create(Ty, NumReservedValues);
2339 if (isa<FPMathOperator>(Phi))
2340 setFPAttrs(Phi, nullptr /* MDNode* */, FMF);
2341 return Insert(Phi, Name);
2342 }
2343
2344 CallInst *CreateCall(FunctionType *FTy, Value *Callee,
2345 ArrayRef<Value *> Args = None, const Twine &Name = "",
2346 MDNode *FPMathTag = nullptr) {
2347 CallInst *CI = CallInst::Create(FTy, Callee, Args, DefaultOperandBundles);
2348 if (IsFPConstrained)
2349 setConstrainedFPCallAttr(CI);
2350 if (isa<FPMathOperator>(CI))
2351 setFPAttrs(CI, FPMathTag, FMF);
2352 return Insert(CI, Name);
2353 }
2354
2355 CallInst *CreateCall(FunctionType *FTy, Value *Callee, ArrayRef<Value *> Args,
2356 ArrayRef<OperandBundleDef> OpBundles,
2357 const Twine &Name = "", MDNode *FPMathTag = nullptr) {
2358 CallInst *CI = CallInst::Create(FTy, Callee, Args, OpBundles);
2359 if (IsFPConstrained)
2360 setConstrainedFPCallAttr(CI);
2361 if (isa<FPMathOperator>(CI))
2362 setFPAttrs(CI, FPMathTag, FMF);
2363 return Insert(CI, Name);
2364 }
2365
2366 CallInst *CreateCall(FunctionCallee Callee, ArrayRef<Value *> Args = None,
2367 const Twine &Name = "", MDNode *FPMathTag = nullptr) {
2368 return CreateCall(Callee.getFunctionType(), Callee.getCallee(), Args, Name,
2369 FPMathTag);
2370 }
2371
2372 CallInst *CreateCall(FunctionCallee Callee, ArrayRef<Value *> Args,
2373 ArrayRef<OperandBundleDef> OpBundles,
2374 const Twine &Name = "", MDNode *FPMathTag = nullptr) {
2375 return CreateCall(Callee.getFunctionType(), Callee.getCallee(), Args,
2376 OpBundles, Name, FPMathTag);
2377 }
2378
2379 CallInst *CreateConstrainedFPCall(
2380 Function *Callee, ArrayRef<Value *> Args, const Twine &Name = "",
2381 Optional<RoundingMode> Rounding = None,
2382 Optional<fp::ExceptionBehavior> Except = None);
2383
2384 Value *CreateSelect(Value *C, Value *True, Value *False,
2385 const Twine &Name = "", Instruction *MDFrom = nullptr);
2386
2387 VAArgInst *CreateVAArg(Value *List, Type *Ty, const Twine &Name = "") {
2388 return Insert(new VAArgInst(List, Ty), Name);
2389 }
2390
2391 Value *CreateExtractElement(Value *Vec, Value *Idx,
2392 const Twine &Name = "") {
2393 if (auto *VC = dyn_cast<Constant>(Vec))
2394 if (auto *IC = dyn_cast<Constant>(Idx))
2395 return Insert(Folder.CreateExtractElement(VC, IC), Name);
2396 return Insert(ExtractElementInst::Create(Vec, Idx), Name);
2397 }
2398
2399 Value *CreateExtractElement(Value *Vec, uint64_t Idx,
2400 const Twine &Name = "") {
2401 return CreateExtractElement(Vec, getInt64(Idx), Name);
2402 }
2403
2404 Value *CreateInsertElement(Value *Vec, Value *NewElt, Value *Idx,
2405 const Twine &Name = "") {
2406 if (auto *VC = dyn_cast<Constant>(Vec))
2407 if (auto *NC = dyn_cast<Constant>(NewElt))
2408 if (auto *IC = dyn_cast<Constant>(Idx))
2409 return Insert(Folder.CreateInsertElement(VC, NC, IC), Name);
2410 return Insert(InsertElementInst::Create(Vec, NewElt, Idx), Name);
2411 }
2412
2413 Value *CreateInsertElement(Value *Vec, Value *NewElt, uint64_t Idx,
2414 const Twine &Name = "") {
2415 return CreateInsertElement(Vec, NewElt, getInt64(Idx), Name);
2416 }
2417
2418 Value *CreateShuffleVector(Value *V1, Value *V2, Value *Mask,
2419 const Twine &Name = "") {
2420 SmallVector<int, 16> IntMask;
2421 ShuffleVectorInst::getShuffleMask(cast<Constant>(Mask), IntMask);
2422 return CreateShuffleVector(V1, V2, IntMask, Name);
2423 }
2424
2425 LLVM_ATTRIBUTE_DEPRECATED(Value *CreateShuffleVector(Value *V1, Value *V2,[[deprecated("Pass indices as 'int' instead")]] Value *CreateShuffleVector
(Value *V1, Value *V2, ArrayRef<uint32_t> Mask, const Twine
&Name = "")
2426 ArrayRef<uint32_t> Mask,[[deprecated("Pass indices as 'int' instead")]] Value *CreateShuffleVector
(Value *V1, Value *V2, ArrayRef<uint32_t> Mask, const Twine
&Name = "")
2427 const Twine &Name = ""),[[deprecated("Pass indices as 'int' instead")]] Value *CreateShuffleVector
(Value *V1, Value *V2, ArrayRef<uint32_t> Mask, const Twine
&Name = "")
2428 "Pass indices as 'int' instead")[[deprecated("Pass indices as 'int' instead")]] Value *CreateShuffleVector
(Value *V1, Value *V2, ArrayRef<uint32_t> Mask, const Twine
&Name = "")
{
2429 SmallVector<int, 16> IntMask;
2430 IntMask.assign(Mask.begin(), Mask.end());
2431 return CreateShuffleVector(V1, V2, IntMask, Name);
2432 }
2433
2434 /// See class ShuffleVectorInst for a description of the mask representation.
2435 Value *CreateShuffleVector(Value *V1, Value *V2, ArrayRef<int> Mask,
2436 const Twine &Name = "") {
2437 if (auto *V1C = dyn_cast<Constant>(V1))
2438 if (auto *V2C = dyn_cast<Constant>(V2))
2439 return Insert(Folder.CreateShuffleVector(V1C, V2C, Mask), Name);
2440 return Insert(new ShuffleVectorInst(V1, V2, Mask), Name);
2441 }
2442
2443 /// Create a unary shuffle. The second vector operand of the IR instruction
2444 /// is poison.
2445 Value *CreateShuffleVector(Value *V, ArrayRef<int> Mask,
2446 const Twine &Name = "") {
2447 return CreateShuffleVector(V, PoisonValue::get(V->getType()), Mask, Name);
2448 }
2449
2450 Value *CreateExtractValue(Value *Agg,
2451 ArrayRef<unsigned> Idxs,
2452 const Twine &Name = "") {
2453 if (auto *AggC = dyn_cast<Constant>(Agg))
2454 return Insert(Folder.CreateExtractValue(AggC, Idxs), Name);
2455 return Insert(ExtractValueInst::Create(Agg, Idxs), Name);
2456 }
2457
2458 Value *CreateInsertValue(Value *Agg, Value *Val,
2459 ArrayRef<unsigned> Idxs,
2460 const Twine &Name = "") {
2461 if (auto *AggC = dyn_cast<Constant>(Agg))
2462 if (auto *ValC = dyn_cast<Constant>(Val))
2463 return Insert(Folder.CreateInsertValue(AggC, ValC, Idxs), Name);
2464 return Insert(InsertValueInst::Create(Agg, Val, Idxs), Name);
2465 }
2466
2467 LandingPadInst *CreateLandingPad(Type *Ty, unsigned NumClauses,
2468 const Twine &Name = "") {
2469 return Insert(LandingPadInst::Create(Ty, NumClauses), Name);
2470 }
2471
2472 Value *CreateFreeze(Value *V, const Twine &Name = "") {
2473 return Insert(new FreezeInst(V), Name);
2474 }
2475
2476 //===--------------------------------------------------------------------===//
2477 // Utility creation methods
2478 //===--------------------------------------------------------------------===//
2479
2480 /// Return an i1 value testing if \p Arg is null.
2481 Value *CreateIsNull(Value *Arg, const Twine &Name = "") {
2482 return CreateICmpEQ(Arg, Constant::getNullValue(Arg->getType()),
2483 Name);
2484 }
2485
2486 /// Return an i1 value testing if \p Arg is not null.
2487 Value *CreateIsNotNull(Value *Arg, const Twine &Name = "") {
2488 return CreateICmpNE(Arg, Constant::getNullValue(Arg->getType()),
2489 Name);
2490 }
2491
2492 /// Return the i64 difference between two pointer values, dividing out
2493 /// the size of the pointed-to objects.
2494 ///
2495 /// This is intended to implement C-style pointer subtraction. As such, the
2496 /// pointers must be appropriately aligned for their element types and
2497 /// pointing into the same object.
2498 Value *CreatePtrDiff(Value *LHS, Value *RHS, const Twine &Name = "");
2499
2500 /// Create a launder.invariant.group intrinsic call. If Ptr type is
2501 /// different from pointer to i8, it's casted to pointer to i8 in the same
2502 /// address space before call and casted back to Ptr type after call.
2503 Value *CreateLaunderInvariantGroup(Value *Ptr);
2504
2505 /// \brief Create a strip.invariant.group intrinsic call. If Ptr type is
2506 /// different from pointer to i8, it's casted to pointer to i8 in the same
2507 /// address space before call and casted back to Ptr type after call.
2508 Value *CreateStripInvariantGroup(Value *Ptr);
2509
2510 /// Return a vector value that contains the vector V reversed
2511 Value *CreateVectorReverse(Value *V, const Twine &Name = "");
2512
2513 /// Return a vector value that contains \arg V broadcasted to \p
2514 /// NumElts elements.
2515 Value *CreateVectorSplat(unsigned NumElts, Value *V, const Twine &Name = "");
2516
2517 /// Return a vector value that contains \arg V broadcasted to \p
2518 /// EC elements.
2519 Value *CreateVectorSplat(ElementCount EC, Value *V, const Twine &Name = "");
2520
2521 /// Return a value that has been extracted from a larger integer type.
2522 Value *CreateExtractInteger(const DataLayout &DL, Value *From,
2523 IntegerType *ExtractedTy, uint64_t Offset,
2524 const Twine &Name);
2525
2526 Value *CreatePreserveArrayAccessIndex(Type *ElTy, Value *Base,
2527 unsigned Dimension, unsigned LastIndex,
2528 MDNode *DbgInfo);
2529
2530 Value *CreatePreserveUnionAccessIndex(Value *Base, unsigned FieldIndex,
2531 MDNode *DbgInfo);
2532
2533 Value *CreatePreserveStructAccessIndex(Type *ElTy, Value *Base,
2534 unsigned Index, unsigned FieldIndex,
2535 MDNode *DbgInfo);
2536
2537private:
2538 /// Helper function that creates an assume intrinsic call that
2539 /// represents an alignment assumption on the provided pointer \p PtrValue
2540 /// with offset \p OffsetValue and alignment value \p AlignValue.
2541 CallInst *CreateAlignmentAssumptionHelper(const DataLayout &DL,
2542 Value *PtrValue, Value *AlignValue,
2543 Value *OffsetValue);
2544
2545public:
2546 /// Create an assume intrinsic call that represents an alignment
2547 /// assumption on the provided pointer.
2548 ///
2549 /// An optional offset can be provided, and if it is provided, the offset
2550 /// must be subtracted from the provided pointer to get the pointer with the
2551 /// specified alignment.
2552 CallInst *CreateAlignmentAssumption(const DataLayout &DL, Value *PtrValue,
2553 unsigned Alignment,
2554 Value *OffsetValue = nullptr);
2555
2556 /// Create an assume intrinsic call that represents an alignment
2557 /// assumption on the provided pointer.
2558 ///
2559 /// An optional offset can be provided, and if it is provided, the offset
2560 /// must be subtracted from the provided pointer to get the pointer with the
2561 /// specified alignment.
2562 ///
2563 /// This overload handles the condition where the Alignment is dependent
2564 /// on an existing value rather than a static value.
2565 CallInst *CreateAlignmentAssumption(const DataLayout &DL, Value *PtrValue,
2566 Value *Alignment,
2567 Value *OffsetValue = nullptr);
2568};
2569
2570/// This provides a uniform API for creating instructions and inserting
2571/// them into a basic block: either at the end of a BasicBlock, or at a specific
2572/// iterator location in a block.
2573///
2574/// Note that the builder does not expose the full generality of LLVM
2575/// instructions. For access to extra instruction properties, use the mutators
2576/// (e.g. setVolatile) on the instructions after they have been
2577/// created. Convenience state exists to specify fast-math flags and fp-math
2578/// tags.
2579///
2580/// The first template argument specifies a class to use for creating constants.
2581/// This defaults to creating minimally folded constants. The second template
2582/// argument allows clients to specify custom insertion hooks that are called on
2583/// every newly created insertion.
2584template <typename FolderTy = ConstantFolder,
2585 typename InserterTy = IRBuilderDefaultInserter>
2586class IRBuilder : public IRBuilderBase {
2587private:
2588 FolderTy Folder;
2589 InserterTy Inserter;
2590
2591public:
2592 IRBuilder(LLVMContext &C, FolderTy Folder, InserterTy Inserter = InserterTy(),
2593 MDNode *FPMathTag = nullptr,
2594 ArrayRef<OperandBundleDef> OpBundles = None)
2595 : IRBuilderBase(C, this->Folder, this->Inserter, FPMathTag, OpBundles),
2596 Folder(Folder), Inserter(Inserter) {}
2597
2598 explicit IRBuilder(LLVMContext &C, MDNode *FPMathTag = nullptr,
2599 ArrayRef<OperandBundleDef> OpBundles = None)
2600 : IRBuilderBase(C, this->Folder, this->Inserter, FPMathTag, OpBundles) {}
2601
2602 explicit IRBuilder(BasicBlock *TheBB, FolderTy Folder,
2603 MDNode *FPMathTag = nullptr,
2604 ArrayRef<OperandBundleDef> OpBundles = None)
2605 : IRBuilderBase(TheBB->getContext(), this->Folder, this->Inserter,
2606 FPMathTag, OpBundles), Folder(Folder) {
2607 SetInsertPoint(TheBB);
2608 }
2609
2610 explicit IRBuilder(BasicBlock *TheBB, MDNode *FPMathTag = nullptr,
2611 ArrayRef<OperandBundleDef> OpBundles = None)
2612 : IRBuilderBase(TheBB->getContext(), this->Folder, this->Inserter,
2613 FPMathTag, OpBundles) {
2614 SetInsertPoint(TheBB);
2615 }
2616
2617 explicit IRBuilder(Instruction *IP, MDNode *FPMathTag = nullptr,
2618 ArrayRef<OperandBundleDef> OpBundles = None)
2619 : IRBuilderBase(IP->getContext(), this->Folder, this->Inserter,
2620 FPMathTag, OpBundles) {
2621 SetInsertPoint(IP);
2622 }
2623
2624 IRBuilder(BasicBlock *TheBB, BasicBlock::iterator IP, FolderTy Folder,
2625 MDNode *FPMathTag = nullptr,
2626 ArrayRef<OperandBundleDef> OpBundles = None)
2627 : IRBuilderBase(TheBB->getContext(), this->Folder, this->Inserter,
2628 FPMathTag, OpBundles), Folder(Folder) {
2629 SetInsertPoint(TheBB, IP);
2630 }
2631
2632 IRBuilder(BasicBlock *TheBB, BasicBlock::iterator IP,
2633 MDNode *FPMathTag = nullptr,
2634 ArrayRef<OperandBundleDef> OpBundles = None)
2635 : IRBuilderBase(TheBB->getContext(), this->Folder, this->Inserter,
2636 FPMathTag, OpBundles) {
2637 SetInsertPoint(TheBB, IP);
2638 }
2639
2640 /// Avoid copying the full IRBuilder. Prefer using InsertPointGuard
2641 /// or FastMathFlagGuard instead.
2642 IRBuilder(const IRBuilder &) = delete;
2643
2644 InserterTy &getInserter() { return Inserter; }
2645};
2646
2647// Create wrappers for C Binding types (see CBindingWrapping.h).
2648DEFINE_SIMPLE_CONVERSION_FUNCTIONS(IRBuilder<>, LLVMBuilderRef)inline IRBuilder<> *unwrap(LLVMBuilderRef P) { return reinterpret_cast
<IRBuilder<>*>(P); } inline LLVMBuilderRef wrap(const
IRBuilder<> *P) { return reinterpret_cast<LLVMBuilderRef
>(const_cast<IRBuilder<>*>(P)); }
2649
2650} // end namespace llvm
2651
2652#endif // LLVM_IR_IRBUILDER_H

/build/llvm-toolchain-snapshot-13~++20210413100635+64c24f493e5f/llvm/include/llvm/IR/Instructions.h

1//===- llvm/Instructions.h - Instruction subclass definitions ---*- C++ -*-===//
2//
3// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
4// See https://llvm.org/LICENSE.txt for license information.
5// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
6//
7//===----------------------------------------------------------------------===//
8//
9// This file exposes the class definitions of all of the subclasses of the
10// Instruction class. This is meant to be an easy way to get access to all
11// instruction subclasses.
12//
13//===----------------------------------------------------------------------===//
14
15#ifndef LLVM_IR_INSTRUCTIONS_H
16#define LLVM_IR_INSTRUCTIONS_H
17
18#include "llvm/ADT/ArrayRef.h"
19#include "llvm/ADT/Bitfields.h"
20#include "llvm/ADT/None.h"
21#include "llvm/ADT/STLExtras.h"
22#include "llvm/ADT/SmallVector.h"
23#include "llvm/ADT/StringRef.h"
24#include "llvm/ADT/Twine.h"
25#include "llvm/ADT/iterator.h"
26#include "llvm/ADT/iterator_range.h"
27#include "llvm/IR/Attributes.h"
28#include "llvm/IR/BasicBlock.h"
29#include "llvm/IR/CallingConv.h"
30#include "llvm/IR/CFG.h"
31#include "llvm/IR/Constant.h"
32#include "llvm/IR/DerivedTypes.h"
33#include "llvm/IR/Function.h"
34#include "llvm/IR/InstrTypes.h"
35#include "llvm/IR/Instruction.h"
36#include "llvm/IR/OperandTraits.h"
37#include "llvm/IR/Type.h"
38#include "llvm/IR/Use.h"
39#include "llvm/IR/User.h"
40#include "llvm/IR/Value.h"
41#include "llvm/Support/AtomicOrdering.h"
42#include "llvm/Support/Casting.h"
43#include "llvm/Support/ErrorHandling.h"
44#include <cassert>
45#include <cstddef>
46#include <cstdint>
47#include <iterator>
48
49namespace llvm {
50
51class APInt;
52class ConstantInt;
53class DataLayout;
54class LLVMContext;
55
56//===----------------------------------------------------------------------===//
57// AllocaInst Class
58//===----------------------------------------------------------------------===//
59
60/// an instruction to allocate memory on the stack
61class AllocaInst : public UnaryInstruction {
62 Type *AllocatedType;
63
64 using AlignmentField = AlignmentBitfieldElementT<0>;
65 using UsedWithInAllocaField = BoolBitfieldElementT<AlignmentField::NextBit>;
66 using SwiftErrorField = BoolBitfieldElementT<UsedWithInAllocaField::NextBit>;
67 static_assert(Bitfield::areContiguous<AlignmentField, UsedWithInAllocaField,
68 SwiftErrorField>(),
69 "Bitfields must be contiguous");
70
71protected:
72 // Note: Instruction needs to be a friend here to call cloneImpl.
73 friend class Instruction;
74
75 AllocaInst *cloneImpl() const;
76
77public:
78 explicit AllocaInst(Type *Ty, unsigned AddrSpace, Value *ArraySize,
79 const Twine &Name, Instruction *InsertBefore);
80 AllocaInst(Type *Ty, unsigned AddrSpace, Value *ArraySize,
81 const Twine &Name, BasicBlock *InsertAtEnd);
82
83 AllocaInst(Type *Ty, unsigned AddrSpace, const Twine &Name,
84 Instruction *InsertBefore);
85 AllocaInst(Type *Ty, unsigned AddrSpace,
86 const Twine &Name, BasicBlock *InsertAtEnd);
87
88 AllocaInst(Type *Ty, unsigned AddrSpace, Value *ArraySize, Align Align,
89 const Twine &Name = "", Instruction *InsertBefore = nullptr);
90 AllocaInst(Type *Ty, unsigned AddrSpace, Value *ArraySize, Align Align,
91 const Twine &Name, BasicBlock *InsertAtEnd);
92
93 /// Return true if there is an allocation size parameter to the allocation
94 /// instruction that is not 1.
95 bool isArrayAllocation() const;
96
97 /// Get the number of elements allocated. For a simple allocation of a single
98 /// element, this will return a constant 1 value.
99 const Value *getArraySize() const { return getOperand(0); }
100 Value *getArraySize() { return getOperand(0); }
101
102 /// Overload to return most specific pointer type.
103 PointerType *getType() const {
104 return cast<PointerType>(Instruction::getType());
105 }
106
107 /// Get allocation size in bits. Returns None if size can't be determined,
108 /// e.g. in case of a VLA.
109 Optional<TypeSize> getAllocationSizeInBits(const DataLayout &DL) const;
110
111 /// Return the type that is being allocated by the instruction.
112 Type *getAllocatedType() const { return AllocatedType; }
113 /// for use only in special circumstances that need to generically
114 /// transform a whole instruction (eg: IR linking and vectorization).
115 void setAllocatedType(Type *Ty) { AllocatedType = Ty; }
116
117 /// Return the alignment of the memory that is being allocated by the
118 /// instruction.
119 Align getAlign() const {
120 return Align(1ULL << getSubclassData<AlignmentField>());
121 }
122
123 void setAlignment(Align Align) {
124 setSubclassData<AlignmentField>(Log2(Align));
125 }
126
127 // FIXME: Remove this one transition to Align is over.
128 unsigned getAlignment() const { return getAlign().value(); }
129
130 /// Return true if this alloca is in the entry block of the function and is a
131 /// constant size. If so, the code generator will fold it into the
132 /// prolog/epilog code, so it is basically free.
133 bool isStaticAlloca() const;
134
135 /// Return true if this alloca is used as an inalloca argument to a call. Such
136 /// allocas are never considered static even if they are in the entry block.
137 bool isUsedWithInAlloca() const {
138 return getSubclassData<UsedWithInAllocaField>();
139 }
140
141 /// Specify whether this alloca is used to represent the arguments to a call.
142 void setUsedWithInAlloca(bool V) {
143 setSubclassData<UsedWithInAllocaField>(V);
144 }
145
146 /// Return true if this alloca is used as a swifterror argument to a call.
147 bool isSwiftError() const { return getSubclassData<SwiftErrorField>(); }
148 /// Specify whether this alloca is used to represent a swifterror.
149 void setSwiftError(bool V) { setSubclassData<SwiftErrorField>(V); }
150
151 // Methods for support type inquiry through isa, cast, and dyn_cast:
152 static bool classof(const Instruction *I) {
153 return (I->getOpcode() == Instruction::Alloca);
154 }
155 static bool classof(const Value *V) {
156 return isa<Instruction>(V) && classof(cast<Instruction>(V));
157 }
158
159private:
160 // Shadow Instruction::setInstructionSubclassData with a private forwarding
161 // method so that subclasses cannot accidentally use it.
162 template <typename Bitfield>
163 void setSubclassData(typename Bitfield::Type Value) {
164 Instruction::setSubclassData<Bitfield>(Value);
165 }
166};
167
168//===----------------------------------------------------------------------===//
169// LoadInst Class
170//===----------------------------------------------------------------------===//
171
172/// An instruction for reading from memory. This uses the SubclassData field in
173/// Value to store whether or not the load is volatile.
174class LoadInst : public UnaryInstruction {
175 using VolatileField = BoolBitfieldElementT<0>;
176 using AlignmentField = AlignmentBitfieldElementT<VolatileField::NextBit>;
177 using OrderingField = AtomicOrderingBitfieldElementT<AlignmentField::NextBit>;
178 static_assert(
179 Bitfield::areContiguous<VolatileField, AlignmentField, OrderingField>(),
180 "Bitfields must be contiguous");
181
182 void AssertOK();
183
184protected:
185 // Note: Instruction needs to be a friend here to call cloneImpl.
186 friend class Instruction;
187
188 LoadInst *cloneImpl() const;
189
190public:
191 LoadInst(Type *Ty, Value *Ptr, const Twine &NameStr,
192 Instruction *InsertBefore);
193 LoadInst(Type *Ty, Value *Ptr, const Twine &NameStr, BasicBlock *InsertAtEnd);
194 LoadInst(Type *Ty, Value *Ptr, const Twine &NameStr, bool isVolatile,
195 Instruction *InsertBefore);
196 LoadInst(Type *Ty, Value *Ptr, const Twine &NameStr, bool isVolatile,
197 BasicBlock *InsertAtEnd);
198 LoadInst(Type *Ty, Value *Ptr, const Twine &NameStr, bool isVolatile,
199 Align Align, Instruction *InsertBefore = nullptr);
200 LoadInst(Type *Ty, Value *Ptr, const Twine &NameStr, bool isVolatile,
201 Align Align, BasicBlock *InsertAtEnd);
202 LoadInst(Type *Ty, Value *Ptr, const Twine &NameStr, bool isVolatile,
203 Align Align, AtomicOrdering Order,
204 SyncScope::ID SSID = SyncScope::System,
205 Instruction *InsertBefore = nullptr);
206 LoadInst(Type *Ty, Value *Ptr, const Twine &NameStr, bool isVolatile,
207 Align Align, AtomicOrdering Order, SyncScope::ID SSID,
208 BasicBlock *InsertAtEnd);
209
210 /// Return true if this is a load from a volatile memory location.
211 bool isVolatile() const { return getSubclassData<VolatileField>(); }
212
213 /// Specify whether this is a volatile load or not.
214 void setVolatile(bool V) { setSubclassData<VolatileField>(V); }
215
216 /// Return the alignment of the access that is being performed.
217 /// FIXME: Remove this function once transition to Align is over.
218 /// Use getAlign() instead.
219 unsigned getAlignment() const { return getAlign().value(); }
220
221 /// Return the alignment of the access that is being performed.
222 Align getAlign() const {
223 return Align(1ULL << (getSubclassData<AlignmentField>()));
224 }
225
226 void setAlignment(Align Align) {
227 setSubclassData<AlignmentField>(Log2(Align));
228 }
229
230 /// Returns the ordering constraint of this load instruction.
231 AtomicOrdering getOrdering() const {
232 return getSubclassData<OrderingField>();
233 }
234 /// Sets the ordering constraint of this load instruction. May not be Release
235 /// or AcquireRelease.
236 void setOrdering(AtomicOrdering Ordering) {
237 setSubclassData<OrderingField>(Ordering);
238 }
239
240 /// Returns the synchronization scope ID of this load instruction.
241 SyncScope::ID getSyncScopeID() const {
242 return SSID;
243 }
244
245 /// Sets the synchronization scope ID of this load instruction.
246 void setSyncScopeID(SyncScope::ID SSID) {
247 this->SSID = SSID;
248 }
249
250 /// Sets the ordering constraint and the synchronization scope ID of this load
251 /// instruction.
252 void setAtomic(AtomicOrdering Ordering,
253 SyncScope::ID SSID = SyncScope::System) {
254 setOrdering(Ordering);
255 setSyncScopeID(SSID);
256 }
257
258 bool isSimple() const { return !isAtomic() && !isVolatile(); }
259
260 bool isUnordered() const {
261 return (getOrdering() == AtomicOrdering::NotAtomic ||
262 getOrdering() == AtomicOrdering::Unordered) &&
263 !isVolatile();
264 }
265
266 Value *getPointerOperand() { return getOperand(0); }
267 const Value *getPointerOperand() const { return getOperand(0); }
268 static unsigned getPointerOperandIndex() { return 0U; }
269 Type *getPointerOperandType() const { return getPointerOperand()->getType(); }
270
271 /// Returns the address space of the pointer operand.
272 unsigned getPointerAddressSpace() const {
273 return getPointerOperandType()->getPointerAddressSpace();
274 }
275
276 // Methods for support type inquiry through isa, cast, and dyn_cast:
277 static bool classof(const Instruction *I) {
278 return I->getOpcode() == Instruction::Load;
279 }
280 static bool classof(const Value *V) {
281 return isa<Instruction>(V) && classof(cast<Instruction>(V));
282 }
283
284private:
285 // Shadow Instruction::setInstructionSubclassData with a private forwarding
286 // method so that subclasses cannot accidentally use it.
287 template <typename Bitfield>
288 void setSubclassData(typename Bitfield::Type Value) {
289 Instruction::setSubclassData<Bitfield>(Value);
290 }
291
292 /// The synchronization scope ID of this load instruction. Not quite enough
293 /// room in SubClassData for everything, so synchronization scope ID gets its
294 /// own field.
295 SyncScope::ID SSID;
296};
297
298//===----------------------------------------------------------------------===//
299// StoreInst Class
300//===----------------------------------------------------------------------===//
301
302/// An instruction for storing to memory.
303class StoreInst : public Instruction {
304 using VolatileField = BoolBitfieldElementT<0>;
305 using AlignmentField = AlignmentBitfieldElementT<VolatileField::NextBit>;
306 using OrderingField = AtomicOrderingBitfieldElementT<AlignmentField::NextBit>;
307 static_assert(
308 Bitfield::areContiguous<VolatileField, AlignmentField, OrderingField>(),
309 "Bitfields must be contiguous");
310
311 void AssertOK();
312
313protected:
314 // Note: Instruction needs to be a friend here to call cloneImpl.
315 friend class Instruction;
316
317 StoreInst *cloneImpl() const;
318
319public:
320 StoreInst(Value *Val, Value *Ptr, Instruction *InsertBefore);
321 StoreInst(Value *Val, Value *Ptr, BasicBlock *InsertAtEnd);
322 StoreInst(Value *Val, Value *Ptr, bool isVolatile, Instruction *InsertBefore);
323 StoreInst(Value *Val, Value *Ptr, bool isVolatile, BasicBlock *InsertAtEnd);
324 StoreInst(Value *Val, Value *Ptr, bool isVolatile, Align Align,
325 Instruction *InsertBefore = nullptr);
326 StoreInst(Value *Val, Value *Ptr, bool isVolatile, Align Align,
327 BasicBlock *InsertAtEnd);
328 StoreInst(Value *Val, Value *Ptr, bool isVolatile, Align Align,
329 AtomicOrdering Order, SyncScope::ID SSID = SyncScope::System,
330 Instruction *InsertBefore = nullptr);
331 StoreInst(Value *Val, Value *Ptr, bool isVolatile, Align Align,
332 AtomicOrdering Order, SyncScope::ID SSID, BasicBlock *InsertAtEnd);
333
334 // allocate space for exactly two operands
335 void *operator new(size_t s) {
336 return User::operator new(s, 2);
337 }
338
339 /// Return true if this is a store to a volatile memory location.
340 bool isVolatile() const { return getSubclassData<VolatileField>(); }
341
342 /// Specify whether this is a volatile store or not.
343 void setVolatile(bool V) { setSubclassData<VolatileField>(V); }
344
345 /// Transparently provide more efficient getOperand methods.
346 DECLARE_TRANSPARENT_OPERAND_ACCESSORS(Value)public: inline Value *getOperand(unsigned) const; inline void
setOperand(unsigned, Value*); inline op_iterator op_begin();
inline const_op_iterator op_begin() const; inline op_iterator
op_end(); inline const_op_iterator op_end() const; protected
: template <int> inline Use &Op(); template <int
> inline const Use &Op() const; public: inline unsigned
getNumOperands() const
;
347
348 /// Return the alignment of the access that is being performed
349 /// FIXME: Remove this function once transition to Align is over.
350 /// Use getAlign() instead.
351 unsigned getAlignment() const { return getAlign().value(); }
352
353 Align getAlign() const {
354 return Align(1ULL << (getSubclassData<AlignmentField>()));
355 }
356
357 void setAlignment(Align Align) {
358 setSubclassData<AlignmentField>(Log2(Align));
359 }
360
361 /// Returns the ordering constraint of this store instruction.
362 AtomicOrdering getOrdering() const {
363 return getSubclassData<OrderingField>();
364 }
365
366 /// Sets the ordering constraint of this store instruction. May not be
367 /// Acquire or AcquireRelease.
368 void setOrdering(AtomicOrdering Ordering) {
369 setSubclassData<OrderingField>(Ordering);
370 }
371
372 /// Returns the synchronization scope ID of this store instruction.
373 SyncScope::ID getSyncScopeID() const {
374 return SSID;
375 }
376
377 /// Sets the synchronization scope ID of this store instruction.
378 void setSyncScopeID(SyncScope::ID SSID) {
379 this->SSID = SSID;
380 }
381
382 /// Sets the ordering constraint and the synchronization scope ID of this
383 /// store instruction.
384 void setAtomic(AtomicOrdering Ordering,
385 SyncScope::ID SSID = SyncScope::System) {
386 setOrdering(Ordering);
387 setSyncScopeID(SSID);
388 }
389
390 bool isSimple() const { return !isAtomic() && !isVolatile(); }
391
392 bool isUnordered() const {
393 return (getOrdering() == AtomicOrdering::NotAtomic ||
394 getOrdering() == AtomicOrdering::Unordered) &&
395 !isVolatile();
396 }
397
398 Value *getValueOperand() { return getOperand(0); }
399 const Value *getValueOperand() const { return getOperand(0); }
400
401 Value *getPointerOperand() { return getOperand(1); }
402 const Value *getPointerOperand() const { return getOperand(1); }
403 static unsigned getPointerOperandIndex() { return 1U; }
404 Type *getPointerOperandType() const { return getPointerOperand()->getType(); }
405
406 /// Returns the address space of the pointer operand.
407 unsigned getPointerAddressSpace() const {
408 return getPointerOperandType()->getPointerAddressSpace();
409 }
410
411 // Methods for support type inquiry through isa, cast, and dyn_cast:
412 static bool classof(const Instruction *I) {
413 return I->getOpcode() == Instruction::Store;
414 }
415 static bool classof(const Value *V) {
416 return isa<Instruction>(V) && classof(cast<Instruction>(V));
417 }
418
419private:
420 // Shadow Instruction::setInstructionSubclassData with a private forwarding
421 // method so that subclasses cannot accidentally use it.
422 template <typename Bitfield>
423 void setSubclassData(typename Bitfield::Type Value) {
424 Instruction::setSubclassData<Bitfield>(Value);
425 }
426
427 /// The synchronization scope ID of this store instruction. Not quite enough
428 /// room in SubClassData for everything, so synchronization scope ID gets its
429 /// own field.
430 SyncScope::ID SSID;
431};
432
433template <>
434struct OperandTraits<StoreInst> : public FixedNumOperandTraits<StoreInst, 2> {
435};
436
437DEFINE_TRANSPARENT_OPERAND_ACCESSORS(StoreInst, Value)StoreInst::op_iterator StoreInst::op_begin() { return OperandTraits
<StoreInst>::op_begin(this); } StoreInst::const_op_iterator
StoreInst::op_begin() const { return OperandTraits<StoreInst
>::op_begin(const_cast<StoreInst*>(this)); } StoreInst
::op_iterator StoreInst::op_end() { return OperandTraits<StoreInst
>::op_end(this); } StoreInst::const_op_iterator StoreInst::
op_end() const { return OperandTraits<StoreInst>::op_end
(const_cast<StoreInst*>(this)); } Value *StoreInst::getOperand
(unsigned i_nocapture) const { ((i_nocapture < OperandTraits
<StoreInst>::operands(this) && "getOperand() out of range!"
) ? static_cast<void> (0) : __assert_fail ("i_nocapture < OperandTraits<StoreInst>::operands(this) && \"getOperand() out of range!\""
, "/build/llvm-toolchain-snapshot-13~++20210413100635+64c24f493e5f/llvm/include/llvm/IR/Instructions.h"
, 437, __PRETTY_FUNCTION__)); return cast_or_null<Value>
( OperandTraits<StoreInst>::op_begin(const_cast<StoreInst
*>(this))[i_nocapture].get()); } void StoreInst::setOperand
(unsigned i_nocapture, Value *Val_nocapture) { ((i_nocapture <
OperandTraits<StoreInst>::operands(this) && "setOperand() out of range!"
) ? static_cast<void> (0) : __assert_fail ("i_nocapture < OperandTraits<StoreInst>::operands(this) && \"setOperand() out of range!\""
, "/build/llvm-toolchain-snapshot-13~++20210413100635+64c24f493e5f/llvm/include/llvm/IR/Instructions.h"
, 437, __PRETTY_FUNCTION__)); OperandTraits<StoreInst>::
op_begin(this)[i_nocapture] = Val_nocapture; } unsigned StoreInst
::getNumOperands() const { return OperandTraits<StoreInst>
::operands(this); } template <int Idx_nocapture> Use &
StoreInst::Op() { return this->OpFrom<Idx_nocapture>
(this); } template <int Idx_nocapture> const Use &StoreInst
::Op() const { return this->OpFrom<Idx_nocapture>(this
); }
438
439//===----------------------------------------------------------------------===//
440// FenceInst Class
441//===----------------------------------------------------------------------===//
442
443/// An instruction for ordering other memory operations.
444class FenceInst : public Instruction {
445 using OrderingField = AtomicOrderingBitfieldElementT<0>;
446
447 void Init(AtomicOrdering Ordering, SyncScope::ID SSID);
448
449protected:
450 // Note: Instruction needs to be a friend here to call cloneImpl.
451 friend class Instruction;
452
453 FenceInst *cloneImpl() const;
454
455public:
456 // Ordering may only be Acquire, Release, AcquireRelease, or
457 // SequentiallyConsistent.
458 FenceInst(LLVMContext &C, AtomicOrdering Ordering,
459 SyncScope::ID SSID = SyncScope::System,
460 Instruction *InsertBefore = nullptr);
461 FenceInst(LLVMContext &C, AtomicOrdering Ordering, SyncScope::ID SSID,
462 BasicBlock *InsertAtEnd);
463
464 // allocate space for exactly zero operands
465 void *operator new(size_t s) {
466 return User::operator new(s, 0);
467 }
468
469 /// Returns the ordering constraint of this fence instruction.
470 AtomicOrdering getOrdering() const {
471 return getSubclassData<OrderingField>();
472 }
473
474 /// Sets the ordering constraint of this fence instruction. May only be
475 /// Acquire, Release, AcquireRelease, or SequentiallyConsistent.
476 void setOrdering(AtomicOrdering Ordering) {
477 setSubclassData<OrderingField>(Ordering);
478 }
479
480 /// Returns the synchronization scope ID of this fence instruction.
481 SyncScope::ID getSyncScopeID() const {
482 return SSID;
483 }
484
485 /// Sets the synchronization scope ID of this fence instruction.
486 void setSyncScopeID(SyncScope::ID SSID) {
487 this->SSID = SSID;
488 }
489
490 // Methods for support type inquiry through isa, cast, and dyn_cast:
491 static bool classof(const Instruction *I) {
492 return I->getOpcode() == Instruction::Fence;
493 }
494 static bool classof(const Value *V) {
495 return isa<Instruction>(V) && classof(cast<Instruction>(V));
496 }
497
498private:
499 // Shadow Instruction::setInstructionSubclassData with a private forwarding
500 // method so that subclasses cannot accidentally use it.
501 template <typename Bitfield>
502 void setSubclassData(typename Bitfield::Type Value) {
503 Instruction::setSubclassData<Bitfield>(Value);
504 }
505
506 /// The synchronization scope ID of this fence instruction. Not quite enough
507 /// room in SubClassData for everything, so synchronization scope ID gets its
508 /// own field.
509 SyncScope::ID SSID;
510};
511
512//===----------------------------------------------------------------------===//
513// AtomicCmpXchgInst Class
514//===----------------------------------------------------------------------===//
515
516/// An instruction that atomically checks whether a
517/// specified value is in a memory location, and, if it is, stores a new value
518/// there. The value returned by this instruction is a pair containing the
519/// original value as first element, and an i1 indicating success (true) or
520/// failure (false) as second element.
521///
522class AtomicCmpXchgInst : public Instruction {
523 void Init(Value *Ptr, Value *Cmp, Value *NewVal, Align Align,
524 AtomicOrdering SuccessOrdering, AtomicOrdering FailureOrdering,
525 SyncScope::ID SSID);
526
527 template <unsigned Offset>
528 using AtomicOrderingBitfieldElement =
529 typename Bitfield::Element<AtomicOrdering, Offset, 3,
530 AtomicOrdering::LAST>;
531
532protected:
533 // Note: Instruction needs to be a friend here to call cloneImpl.
534 friend class Instruction;
535
536 AtomicCmpXchgInst *cloneImpl() const;
537
538public:
539 AtomicCmpXchgInst(Value *Ptr, Value *Cmp, Value *NewVal, Align Alignment,
540 AtomicOrdering SuccessOrdering,
541 AtomicOrdering FailureOrdering, SyncScope::ID SSID,
542 Instruction *InsertBefore = nullptr);
543 AtomicCmpXchgInst(Value *Ptr, Value *Cmp, Value *NewVal, Align Alignment,
544 AtomicOrdering SuccessOrdering,
545 AtomicOrdering FailureOrdering, SyncScope::ID SSID,
546 BasicBlock *InsertAtEnd);
547
548 // allocate space for exactly three operands
549 void *operator new(size_t s) {
550 return User::operator new(s, 3);
551 }
552
553 using VolatileField = BoolBitfieldElementT<0>;
554 using WeakField = BoolBitfieldElementT<VolatileField::NextBit>;
555 using SuccessOrderingField =
556 AtomicOrderingBitfieldElementT<WeakField::NextBit>;
557 using FailureOrderingField =
558 AtomicOrderingBitfieldElementT<SuccessOrderingField::NextBit>;
559 using AlignmentField =
560 AlignmentBitfieldElementT<FailureOrderingField::NextBit>;
561 static_assert(
562 Bitfield::areContiguous<VolatileField, WeakField, SuccessOrderingField,
563 FailureOrderingField, AlignmentField>(),
564 "Bitfields must be contiguous");
565
566 /// Return the alignment of the memory that is being allocated by the
567 /// instruction.
568 Align getAlign() const {
569 return Align(1ULL << getSubclassData<AlignmentField>());
570 }
571
572 void setAlignment(Align Align) {
573 setSubclassData<AlignmentField>(Log2(Align));
574 }
575
576 /// Return true if this is a cmpxchg from a volatile memory
577 /// location.
578 ///
579 bool isVolatile() const { return getSubclassData<VolatileField>(); }
580
581 /// Specify whether this is a volatile cmpxchg.
582 ///
583 void setVolatile(bool V) { setSubclassData<VolatileField>(V); }
584
585 /// Return true if this cmpxchg may spuriously fail.
586 bool isWeak() const { return getSubclassData<WeakField>(); }
587
588 void setWeak(bool IsWeak) { setSubclassData<WeakField>(IsWeak); }
589
590 /// Transparently provide more efficient getOperand methods.
591 DECLARE_TRANSPARENT_OPERAND_ACCESSORS(Value)public: inline Value *getOperand(unsigned) const; inline void
setOperand(unsigned, Value*); inline op_iterator op_begin();
inline const_op_iterator op_begin() const; inline op_iterator
op_end(); inline const_op_iterator op_end() const; protected
: template <int> inline Use &Op(); template <int
> inline const Use &Op() const; public: inline unsigned
getNumOperands() const
;
592
593 /// Returns the success ordering constraint of this cmpxchg instruction.
594 AtomicOrdering getSuccessOrdering() const {
595 return getSubclassData<SuccessOrderingField>();
596 }
597
598 /// Sets the success ordering constraint of this cmpxchg instruction.
599 void setSuccessOrdering(AtomicOrdering Ordering) {
600 assert(Ordering != AtomicOrdering::NotAtomic &&((Ordering != AtomicOrdering::NotAtomic && "CmpXchg instructions can only be atomic."
) ? static_cast<void> (0) : __assert_fail ("Ordering != AtomicOrdering::NotAtomic && \"CmpXchg instructions can only be atomic.\""
, "/build/llvm-toolchain-snapshot-13~++20210413100635+64c24f493e5f/llvm/include/llvm/IR/Instructions.h"
, 601, __PRETTY_FUNCTION__))
601 "CmpXchg instructions can only be atomic.")((Ordering != AtomicOrdering::NotAtomic && "CmpXchg instructions can only be atomic."
) ? static_cast<void> (0) : __assert_fail ("Ordering != AtomicOrdering::NotAtomic && \"CmpXchg instructions can only be atomic.\""
, "/build/llvm-toolchain-snapshot-13~++20210413100635+64c24f493e5f/llvm/include/llvm/IR/Instructions.h"
, 601, __PRETTY_FUNCTION__))
;
602 setSubclassData<SuccessOrderingField>(Ordering);
603 }
604
605 /// Returns the failure ordering constraint of this cmpxchg instruction.
606 AtomicOrdering getFailureOrdering() const {
607 return getSubclassData<FailureOrderingField>();
608 }
609
610 /// Sets the failure ordering constraint of this cmpxchg instruction.
611 void setFailureOrdering(AtomicOrdering Ordering) {
612 assert(Ordering != AtomicOrdering::NotAtomic &&((Ordering != AtomicOrdering::NotAtomic && "CmpXchg instructions can only be atomic."
) ? static_cast<void> (0) : __assert_fail ("Ordering != AtomicOrdering::NotAtomic && \"CmpXchg instructions can only be atomic.\""
, "/build/llvm-toolchain-snapshot-13~++20210413100635+64c24f493e5f/llvm/include/llvm/IR/Instructions.h"
, 613, __PRETTY_FUNCTION__))
613 "CmpXchg instructions can only be atomic.")((Ordering != AtomicOrdering::NotAtomic && "CmpXchg instructions can only be atomic."
) ? static_cast<void> (0) : __assert_fail ("Ordering != AtomicOrdering::NotAtomic && \"CmpXchg instructions can only be atomic.\""
, "/build/llvm-toolchain-snapshot-13~++20210413100635+64c24f493e5f/llvm/include/llvm/IR/Instructions.h"
, 613, __PRETTY_FUNCTION__))
;
614 setSubclassData<FailureOrderingField>(Ordering);
615 }
616
617 /// Returns the synchronization scope ID of this cmpxchg instruction.
618 SyncScope::ID getSyncScopeID() const {
619 return SSID;
620 }
621
622 /// Sets the synchronization scope ID of this cmpxchg instruction.
623 void setSyncScopeID(SyncScope::ID SSID) {
624 this->SSID = SSID;
625 }
626
627 Value *getPointerOperand() { return getOperand(0); }
628 const Value *getPointerOperand() const { return getOperand(0); }
629 static unsigned getPointerOperandIndex() { return 0U; }
630
631 Value *getCompareOperand() { return getOperand(1); }
632 const Value *getCompareOperand() const { return getOperand(1); }
633
634 Value *getNewValOperand() { return getOperand(2); }
635 const Value *getNewValOperand() const { return getOperand(2); }
636
637 /// Returns the address space of the pointer operand.
638 unsigned getPointerAddressSpace() const {
639 return getPointerOperand()->getType()->getPointerAddressSpace();
640 }
641
642 /// Returns the strongest permitted ordering on failure, given the
643 /// desired ordering on success.
644 ///
645 /// If the comparison in a cmpxchg operation fails, there is no atomic store
646 /// so release semantics cannot be provided. So this function drops explicit
647 /// Release requests from the AtomicOrdering. A SequentiallyConsistent
648 /// operation would remain SequentiallyConsistent.
649 static AtomicOrdering
650 getStrongestFailureOrdering(AtomicOrdering SuccessOrdering) {
651 switch (SuccessOrdering) {
652 default:
653 llvm_unreachable("invalid cmpxchg success ordering")::llvm::llvm_unreachable_internal("invalid cmpxchg success ordering"
, "/build/llvm-toolchain-snapshot-13~++20210413100635+64c24f493e5f/llvm/include/llvm/IR/Instructions.h"
, 653)
;
654 case AtomicOrdering::Release:
655 case AtomicOrdering::Monotonic:
656 return AtomicOrdering::Monotonic;
657 case AtomicOrdering::AcquireRelease:
658 case AtomicOrdering::Acquire:
659 return AtomicOrdering::Acquire;
660 case AtomicOrdering::SequentiallyConsistent:
661 return AtomicOrdering::SequentiallyConsistent;
662 }
663 }
664
665 // Methods for support type inquiry through isa, cast, and dyn_cast:
666 static bool classof(const Instruction *I) {
667 return I->getOpcode() == Instruction::AtomicCmpXchg;
668 }
669 static bool classof(const Value *V) {
670 return isa<Instruction>(V) && classof(cast<Instruction>(V));
671 }
672
673private:
674 // Shadow Instruction::setInstructionSubclassData with a private forwarding
675 // method so that subclasses cannot accidentally use it.
676 template <typename Bitfield>
677 void setSubclassData(typename Bitfield::Type Value) {
678 Instruction::setSubclassData<Bitfield>(Value);
679 }
680
681 /// The synchronization scope ID of this cmpxchg instruction. Not quite
682 /// enough room in SubClassData for everything, so synchronization scope ID
683 /// gets its own field.
684 SyncScope::ID SSID;
685};
686
687template <>
688struct OperandTraits<AtomicCmpXchgInst> :
689 public FixedNumOperandTraits<AtomicCmpXchgInst, 3> {
690};
691
692DEFINE_TRANSPARENT_OPERAND_ACCESSORS(AtomicCmpXchgInst, Value)AtomicCmpXchgInst::op_iterator AtomicCmpXchgInst::op_begin() {
return OperandTraits<AtomicCmpXchgInst>::op_begin(this
); } AtomicCmpXchgInst::const_op_iterator AtomicCmpXchgInst::
op_begin() const { return OperandTraits<AtomicCmpXchgInst>
::op_begin(const_cast<AtomicCmpXchgInst*>(this)); } AtomicCmpXchgInst
::op_iterator AtomicCmpXchgInst::op_end() { return OperandTraits
<AtomicCmpXchgInst>::op_end(this); } AtomicCmpXchgInst::
const_op_iterator AtomicCmpXchgInst::op_end() const { return OperandTraits
<AtomicCmpXchgInst>::op_end(const_cast<AtomicCmpXchgInst
*>(this)); } Value *AtomicCmpXchgInst::getOperand(unsigned
i_nocapture) const { ((i_nocapture < OperandTraits<AtomicCmpXchgInst
>::operands(this) && "getOperand() out of range!")
? static_cast<void> (0) : __assert_fail ("i_nocapture < OperandTraits<AtomicCmpXchgInst>::operands(this) && \"getOperand() out of range!\""
, "/build/llvm-toolchain-snapshot-13~++20210413100635+64c24f493e5f/llvm/include/llvm/IR/Instructions.h"
, 692, __PRETTY_FUNCTION__)); return cast_or_null<Value>
( OperandTraits<AtomicCmpXchgInst>::op_begin(const_cast
<AtomicCmpXchgInst*>(this))[i_nocapture].get()); } void
AtomicCmpXchgInst::setOperand(unsigned i_nocapture, Value *Val_nocapture
) { ((i_nocapture < OperandTraits<AtomicCmpXchgInst>
::operands(this) && "setOperand() out of range!") ? static_cast
<void> (0) : __assert_fail ("i_nocapture < OperandTraits<AtomicCmpXchgInst>::operands(this) && \"setOperand() out of range!\""
, "/build/llvm-toolchain-snapshot-13~++20210413100635+64c24f493e5f/llvm/include/llvm/IR/Instructions.h"
, 692, __PRETTY_FUNCTION__)); OperandTraits<AtomicCmpXchgInst
>::op_begin(this)[i_nocapture] = Val_nocapture; } unsigned
AtomicCmpXchgInst::getNumOperands() const { return OperandTraits
<AtomicCmpXchgInst>::operands(this); } template <int
Idx_nocapture> Use &AtomicCmpXchgInst::Op() { return this
->OpFrom<Idx_nocapture>(this); } template <int Idx_nocapture
> const Use &AtomicCmpXchgInst::Op() const { return this
->OpFrom<Idx_nocapture>(this); }
693
694//===----------------------------------------------------------------------===//
695// AtomicRMWInst Class
696//===----------------------------------------------------------------------===//
697
698/// an instruction that atomically reads a memory location,
699/// combines it with another value, and then stores the result back. Returns
700/// the old value.
701///
702class AtomicRMWInst : public Instruction {
703protected:
704 // Note: Instruction needs to be a friend here to call cloneImpl.
705 friend class Instruction;
706
707 AtomicRMWInst *cloneImpl() const;
708
709public:
710 /// This enumeration lists the possible modifications atomicrmw can make. In
711 /// the descriptions, 'p' is the pointer to the instruction's memory location,
712 /// 'old' is the initial value of *p, and 'v' is the other value passed to the
713 /// instruction. These instructions always return 'old'.
714 enum BinOp : unsigned {
715 /// *p = v
716 Xchg,
717 /// *p = old + v
718 Add,
719 /// *p = old - v
720 Sub,
721 /// *p = old & v
722 And,
723 /// *p = ~(old & v)
724 Nand,
725 /// *p = old | v
726 Or,
727 /// *p = old ^ v
728 Xor,
729 /// *p = old >signed v ? old : v
730 Max,
731 /// *p = old <signed v ? old : v
732 Min,
733 /// *p = old >unsigned v ? old : v
734 UMax,
735 /// *p = old <unsigned v ? old : v
736 UMin,
737
738 /// *p = old + v
739 FAdd,
740
741 /// *p = old - v
742 FSub,
743
744 FIRST_BINOP = Xchg,
745 LAST_BINOP = FSub,
746 BAD_BINOP
747 };
748
749private:
750 template <unsigned Offset>
751 using AtomicOrderingBitfieldElement =
752 typename Bitfield::Element<AtomicOrdering, Offset, 3,
753 AtomicOrdering::LAST>;
754
755 template <unsigned Offset>
756 using BinOpBitfieldElement =
757 typename Bitfield::Element<BinOp, Offset, 4, BinOp::LAST_BINOP>;
758
759public:
760 AtomicRMWInst(BinOp Operation, Value *Ptr, Value *Val, Align Alignment,
761 AtomicOrdering Ordering, SyncScope::ID SSID,
762 Instruction *InsertBefore = nullptr);
763 AtomicRMWInst(BinOp Operation, Value *Ptr, Value *Val, Align Alignment,
764 AtomicOrdering Ordering, SyncScope::ID SSID,
765 BasicBlock *InsertAtEnd);
766
767 // allocate space for exactly two operands
768 void *operator new(size_t s) {
769 return User::operator new(s, 2);
770 }
771
772 using VolatileField = BoolBitfieldElementT<0>;
773 using AtomicOrderingField =
774 AtomicOrderingBitfieldElementT<VolatileField::NextBit>;
775 using OperationField = BinOpBitfieldElement<AtomicOrderingField::NextBit>;
776 using AlignmentField = AlignmentBitfieldElementT<OperationField::NextBit>;
777 static_assert(Bitfield::areContiguous<VolatileField, AtomicOrderingField,
778 OperationField, AlignmentField>(),
779 "Bitfields must be contiguous");
780
781 BinOp getOperation() const { return getSubclassData<OperationField>(); }
782
783 static StringRef getOperationName(BinOp Op);
784
785 static bool isFPOperation(BinOp Op) {
786 switch (Op) {
787 case AtomicRMWInst::FAdd:
788 case AtomicRMWInst::FSub:
789 return true;
790 default:
791 return false;
792 }
793 }
794
795 void setOperation(BinOp Operation) {
796 setSubclassData<OperationField>(Operation);
797 }
798
799 /// Return the alignment of the memory that is being allocated by the
800 /// instruction.
801 Align getAlign() const {
802 return Align(1ULL << getSubclassData<AlignmentField>());
803 }
804
805 void setAlignment(Align Align) {
806 setSubclassData<AlignmentField>(Log2(Align));
807 }
808
809 /// Return true if this is a RMW on a volatile memory location.
810 ///
811 bool isVolatile() const { return getSubclassData<VolatileField>(); }
812
813 /// Specify whether this is a volatile RMW or not.
814 ///
815 void setVolatile(bool V) { setSubclassData<VolatileField>(V); }
816
817 /// Transparently provide more efficient getOperand methods.
818 DECLARE_TRANSPARENT_OPERAND_ACCESSORS(Value)public: inline Value *getOperand(unsigned) const; inline void
setOperand(unsigned, Value*); inline op_iterator op_begin();
inline const_op_iterator op_begin() const; inline op_iterator
op_end(); inline const_op_iterator op_end() const; protected
: template <int> inline Use &Op(); template <int
> inline const Use &Op() const; public: inline unsigned
getNumOperands() const
;
819
820 /// Returns the ordering constraint of this rmw instruction.
821 AtomicOrdering getOrdering() const {
822 return getSubclassData<AtomicOrderingField>();
823 }
824
825 /// Sets the ordering constraint of this rmw instruction.
826 void setOrdering(AtomicOrdering Ordering) {
827 assert(Ordering != AtomicOrdering::NotAtomic &&((Ordering != AtomicOrdering::NotAtomic && "atomicrmw instructions can only be atomic."
) ? static_cast<void> (0) : __assert_fail ("Ordering != AtomicOrdering::NotAtomic && \"atomicrmw instructions can only be atomic.\""
, "/build/llvm-toolchain-snapshot-13~++20210413100635+64c24f493e5f/llvm/include/llvm/IR/Instructions.h"
, 828, __PRETTY_FUNCTION__))
828 "atomicrmw instructions can only be atomic.")((Ordering != AtomicOrdering::NotAtomic && "atomicrmw instructions can only be atomic."
) ? static_cast<void> (0) : __assert_fail ("Ordering != AtomicOrdering::NotAtomic && \"atomicrmw instructions can only be atomic.\""
, "/build/llvm-toolchain-snapshot-13~++20210413100635+64c24f493e5f/llvm/include/llvm/IR/Instructions.h"
, 828, __PRETTY_FUNCTION__))
;
829 setSubclassData<AtomicOrderingField>(Ordering);
830 }
831
832 /// Returns the synchronization scope ID of this rmw instruction.
833 SyncScope::ID getSyncScopeID() const {
834 return SSID;
835 }
836
837 /// Sets the synchronization scope ID of this rmw instruction.
838 void setSyncScopeID(SyncScope::ID SSID) {
839 this->SSID = SSID;
840 }
841
842 Value *getPointerOperand() { return getOperand(0); }
843 const Value *getPointerOperand() const { return getOperand(0); }
844 static unsigned getPointerOperandIndex() { return 0U; }
845
846 Value *getValOperand() { return getOperand(1); }
847 const Value *getValOperand() const { return getOperand(1); }
848
849 /// Returns the address space of the pointer operand.
850 unsigned getPointerAddressSpace() const {
851 return getPointerOperand()->getType()->getPointerAddressSpace();
852 }
853
854 bool isFloatingPointOperation() const {
855 return isFPOperation(getOperation());
856 }
857
858 // Methods for support type inquiry through isa, cast, and dyn_cast:
859 static bool classof(const Instruction *I) {
860 return I->getOpcode() == Instruction::AtomicRMW;
861 }
862 static bool classof(const Value *V) {
863 return isa<Instruction>(V) && classof(cast<Instruction>(V));
864 }
865
866private:
867 void Init(BinOp Operation, Value *Ptr, Value *Val, Align Align,
868 AtomicOrdering Ordering, SyncScope::ID SSID);
869
870 // Shadow Instruction::setInstructionSubclassData with a private forwarding
871 // method so that subclasses cannot accidentally use it.
872 template <typename Bitfield>
873 void setSubclassData(typename Bitfield::Type Value) {
874 Instruction::setSubclassData<Bitfield>(Value);
875 }
876
877 /// The synchronization scope ID of this rmw instruction. Not quite enough
878 /// room in SubClassData for everything, so synchronization scope ID gets its
879 /// own field.
880 SyncScope::ID SSID;
881};
882
883template <>
884struct OperandTraits<AtomicRMWInst>
885 : public FixedNumOperandTraits<AtomicRMWInst,2> {
886};
887
888DEFINE_TRANSPARENT_OPERAND_ACCESSORS(AtomicRMWInst, Value)AtomicRMWInst::op_iterator AtomicRMWInst::op_begin() { return
OperandTraits<AtomicRMWInst>::op_begin(this); } AtomicRMWInst
::const_op_iterator AtomicRMWInst::op_begin() const { return OperandTraits
<AtomicRMWInst>::op_begin(const_cast<AtomicRMWInst*>
(this)); } AtomicRMWInst::op_iterator AtomicRMWInst::op_end()
{ return OperandTraits<AtomicRMWInst>::op_end(this); }
AtomicRMWInst::const_op_iterator AtomicRMWInst::op_end() const
{ return OperandTraits<AtomicRMWInst>::op_end(const_cast
<AtomicRMWInst*>(this)); } Value *AtomicRMWInst::getOperand
(unsigned i_nocapture) const { ((i_nocapture < OperandTraits
<AtomicRMWInst>::operands(this) && "getOperand() out of range!"
) ? static_cast<void> (0) : __assert_fail ("i_nocapture < OperandTraits<AtomicRMWInst>::operands(this) && \"getOperand() out of range!\""
, "/build/llvm-toolchain-snapshot-13~++20210413100635+64c24f493e5f/llvm/include/llvm/IR/Instructions.h"
, 888, __PRETTY_FUNCTION__)); return cast_or_null<Value>
( OperandTraits<AtomicRMWInst>::op_begin(const_cast<
AtomicRMWInst*>(this))[i_nocapture].get()); } void AtomicRMWInst
::setOperand(unsigned i_nocapture, Value *Val_nocapture) { ((
i_nocapture < OperandTraits<AtomicRMWInst>::operands
(this) && "setOperand() out of range!") ? static_cast
<void> (0) : __assert_fail ("i_nocapture < OperandTraits<AtomicRMWInst>::operands(this) && \"setOperand() out of range!\""
, "/build/llvm-toolchain-snapshot-13~++20210413100635+64c24f493e5f/llvm/include/llvm/IR/Instructions.h"
, 888, __PRETTY_FUNCTION__)); OperandTraits<AtomicRMWInst>
::op_begin(this)[i_nocapture] = Val_nocapture; } unsigned AtomicRMWInst
::getNumOperands() const { return OperandTraits<AtomicRMWInst
>::operands(this); } template <int Idx_nocapture> Use
&AtomicRMWInst::Op() { return this->OpFrom<Idx_nocapture
>(this); } template <int Idx_nocapture> const Use &
AtomicRMWInst::Op() const { return this->OpFrom<Idx_nocapture
>(this); }
889
890//===----------------------------------------------------------------------===//
891// GetElementPtrInst Class
892//===----------------------------------------------------------------------===//
893
894// checkGEPType - Simple wrapper function to give a better assertion failure
895// message on bad indexes for a gep instruction.
896//
897inline Type *checkGEPType(Type *Ty) {
898 assert(Ty && "Invalid GetElementPtrInst indices for type!")((Ty && "Invalid GetElementPtrInst indices for type!"
) ? static_cast<void> (0) : __assert_fail ("Ty && \"Invalid GetElementPtrInst indices for type!\""
, "/build/llvm-toolchain-snapshot-13~++20210413100635+64c24f493e5f/llvm/include/llvm/IR/Instructions.h"
, 898, __PRETTY_FUNCTION__))
;
899 return Ty;
900}
901
902/// an instruction for type-safe pointer arithmetic to
903/// access elements of arrays and structs
904///
905class GetElementPtrInst : public Instruction {
906 Type *SourceElementType;
907 Type *ResultElementType;
908
909 GetElementPtrInst(const GetElementPtrInst &GEPI);
910
911 /// Constructors - Create a getelementptr instruction with a base pointer an
912 /// list of indices. The first ctor can optionally insert before an existing
913 /// instruction, the second appends the new instruction to the specified
914 /// BasicBlock.
915 inline GetElementPtrInst(Type *PointeeType, Value *Ptr,
916 ArrayRef<Value *> IdxList, unsigned Values,
917 const Twine &NameStr, Instruction *InsertBefore);
918 inline GetElementPtrInst(Type *PointeeType, Value *Ptr,
919 ArrayRef<Value *> IdxList, unsigned Values,
920 const Twine &NameStr, BasicBlock *InsertAtEnd);
921
922 void init(Value *Ptr, ArrayRef<Value *> IdxList, const Twine &NameStr);
923
924protected:
925 // Note: Instruction needs to be a friend here to call cloneImpl.
926 friend class Instruction;
927
928 GetElementPtrInst *cloneImpl() const;
929
930public:
931 static GetElementPtrInst *Create(Type *PointeeType, Value *Ptr,
932 ArrayRef<Value *> IdxList,
933 const Twine &NameStr = "",
934 Instruction *InsertBefore = nullptr) {
935 unsigned Values = 1 + unsigned(IdxList.size());
936 if (!PointeeType)
937 PointeeType =
938 cast<PointerType>(Ptr->getType()->getScalarType())->getElementType();
939 else
940 assert(((PointeeType == cast<PointerType>(Ptr->getType()->
getScalarType())->getElementType()) ? static_cast<void>
(0) : __assert_fail ("PointeeType == cast<PointerType>(Ptr->getType()->getScalarType())->getElementType()"
, "/build/llvm-toolchain-snapshot-13~++20210413100635+64c24f493e5f/llvm/include/llvm/IR/Instructions.h"
, 942, __PRETTY_FUNCTION__))
941 PointeeType ==((PointeeType == cast<PointerType>(Ptr->getType()->
getScalarType())->getElementType()) ? static_cast<void>
(0) : __assert_fail ("PointeeType == cast<PointerType>(Ptr->getType()->getScalarType())->getElementType()"
, "/build/llvm-toolchain-snapshot-13~++20210413100635+64c24f493e5f/llvm/include/llvm/IR/Instructions.h"
, 942, __PRETTY_FUNCTION__))
942 cast<PointerType>(Ptr->getType()->getScalarType())->getElementType())((PointeeType == cast<PointerType>(Ptr->getType()->
getScalarType())->getElementType()) ? static_cast<void>
(0) : __assert_fail ("PointeeType == cast<PointerType>(Ptr->getType()->getScalarType())->getElementType()"
, "/build/llvm-toolchain-snapshot-13~++20210413100635+64c24f493e5f/llvm/include/llvm/IR/Instructions.h"
, 942, __PRETTY_FUNCTION__))
;
943 return new (Values) GetElementPtrInst(PointeeType, Ptr, IdxList, Values,
944 NameStr, InsertBefore);
945 }
946
947 static GetElementPtrInst *Create(Type *PointeeType, Value *Ptr,
948 ArrayRef<Value *> IdxList,
949 const Twine &NameStr,
950 BasicBlock *InsertAtEnd) {
951 unsigned Values = 1 + unsigned(IdxList.size());
952 if (!PointeeType)
953 PointeeType =
954 cast<PointerType>(Ptr->getType()->getScalarType())->getElementType();
955 else
956 assert(((PointeeType == cast<PointerType>(Ptr->getType()->
getScalarType())->getElementType()) ? static_cast<void>
(0) : __assert_fail ("PointeeType == cast<PointerType>(Ptr->getType()->getScalarType())->getElementType()"
, "/build/llvm-toolchain-snapshot-13~++20210413100635+64c24f493e5f/llvm/include/llvm/IR/Instructions.h"
, 958, __PRETTY_FUNCTION__))
957 PointeeType ==((PointeeType == cast<PointerType>(Ptr->getType()->
getScalarType())->getElementType()) ? static_cast<void>
(0) : __assert_fail ("PointeeType == cast<PointerType>(Ptr->getType()->getScalarType())->getElementType()"
, "/build/llvm-toolchain-snapshot-13~++20210413100635+64c24f493e5f/llvm/include/llvm/IR/Instructions.h"
, 958, __PRETTY_FUNCTION__))
958 cast<PointerType>(Ptr->getType()->getScalarType())->getElementType())((PointeeType == cast<PointerType>(Ptr->getType()->
getScalarType())->getElementType()) ? static_cast<void>
(0) : __assert_fail ("PointeeType == cast<PointerType>(Ptr->getType()->getScalarType())->getElementType()"
, "/build/llvm-toolchain-snapshot-13~++20210413100635+64c24f493e5f/llvm/include/llvm/IR/Instructions.h"
, 958, __PRETTY_FUNCTION__))
;
959 return new (Values) GetElementPtrInst(PointeeType, Ptr, IdxList, Values,
960 NameStr, InsertAtEnd);
961 }
962
963 /// Create an "inbounds" getelementptr. See the documentation for the
964 /// "inbounds" flag in LangRef.html for details.
965 static GetElementPtrInst *CreateInBounds(Value *Ptr,
966 ArrayRef<Value *> IdxList,
967 const Twine &NameStr = "",
968 Instruction *InsertBefore = nullptr){
969 return CreateInBounds(nullptr, Ptr, IdxList, NameStr, InsertBefore);
970 }
971
972 static GetElementPtrInst *
973 CreateInBounds(Type *PointeeType, Value *Ptr, ArrayRef<Value *> IdxList,
974 const Twine &NameStr = "",
975 Instruction *InsertBefore = nullptr) {
976 GetElementPtrInst *GEP =
977 Create(PointeeType, Ptr, IdxList, NameStr, InsertBefore);
978 GEP->setIsInBounds(true);
979 return GEP;
980 }
981
982 static GetElementPtrInst *CreateInBounds(Value *Ptr,
983 ArrayRef<Value *> IdxList,
984 const Twine &NameStr,
985 BasicBlock *InsertAtEnd) {
986 return CreateInBounds(nullptr, Ptr, IdxList, NameStr, InsertAtEnd);
987 }
988
989 static GetElementPtrInst *CreateInBounds(Type *PointeeType, Value *Ptr,
990 ArrayRef<Value *> IdxList,
991 const Twine &NameStr,
992 BasicBlock *InsertAtEnd) {
993 GetElementPtrInst *GEP =
994 Create(PointeeType, Ptr, IdxList, NameStr, InsertAtEnd);
995 GEP->setIsInBounds(true);
996 return GEP;
997 }
998
999 /// Transparently provide more efficient getOperand methods.
1000 DECLARE_TRANSPARENT_OPERAND_ACCESSORS(Value)public: inline Value *getOperand(unsigned) const; inline void
setOperand(unsigned, Value*); inline op_iterator op_begin();
inline const_op_iterator op_begin() const; inline op_iterator
op_end(); inline const_op_iterator op_end() const; protected
: template <int> inline Use &Op(); template <int
> inline const Use &Op() const; public: inline unsigned
getNumOperands() const
;
1001
1002 Type *getSourceElementType() const { return SourceElementType; }
1003
1004 void setSourceElementType(Type *Ty) { SourceElementType = Ty; }
1005 void setResultElementType(Type *Ty) { ResultElementType = Ty; }
1006
1007 Type *getResultElementType() const {
1008 assert(ResultElementType ==((ResultElementType == cast<PointerType>(getType()->
getScalarType())->getElementType()) ? static_cast<void>
(0) : __assert_fail ("ResultElementType == cast<PointerType>(getType()->getScalarType())->getElementType()"
, "/build/llvm-toolchain-snapshot-13~++20210413100635+64c24f493e5f/llvm/include/llvm/IR/Instructions.h"
, 1009, __PRETTY_FUNCTION__))
1009 cast<PointerType>(getType()->getScalarType())->getElementType())((ResultElementType == cast<PointerType>(getType()->
getScalarType())->getElementType()) ? static_cast<void>
(0) : __assert_fail ("ResultElementType == cast<PointerType>(getType()->getScalarType())->getElementType()"
, "/build/llvm-toolchain-snapshot-13~++20210413100635+64c24f493e5f/llvm/include/llvm/IR/Instructions.h"
, 1009, __PRETTY_FUNCTION__))
;
1010 return ResultElementType;
1011 }
1012
1013 /// Returns the address space of this instruction's pointer type.
1014 unsigned getAddressSpace() const {
1015 // Note that this is always the same as the pointer operand's address space
1016 // and that is cheaper to compute, so cheat here.
1017 return getPointerAddressSpace();
1018 }
1019
1020 /// Returns the result type of a getelementptr with the given source
1021 /// element type and indexes.
1022 ///
1023 /// Null is returned if the indices are invalid for the specified
1024 /// source element type.
1025 static Type *getIndexedType(Type *Ty, ArrayRef<Value *> IdxList);
1026 static Type *getIndexedType(Type *Ty, ArrayRef<Constant *> IdxList);
1027 static Type *getIndexedType(Type *Ty, ArrayRef<uint64_t> IdxList);
1028
1029 /// Return the type of the element at the given index of an indexable
1030 /// type. This is equivalent to "getIndexedType(Agg, {Zero, Idx})".
1031 ///
1032 /// Returns null if the type can't be indexed, or the given index is not
1033 /// legal for the given type.
1034 static Type *getTypeAtIndex(Type *Ty, Value *Idx);
1035 static Type *getTypeAtIndex(Type *Ty, uint64_t Idx);
1036
1037 inline op_iterator idx_begin() { return op_begin()+1; }
1038 inline const_op_iterator idx_begin() const { return op_begin()+1; }
1039 inline op_iterator idx_end() { return op_end(); }
1040 inline const_op_iterator idx_end() const { return op_end(); }
1041
1042 inline iterator_range<op_iterator> indices() {
1043 return make_range(idx_begin(), idx_end());
1044 }
1045
1046 inline iterator_range<const_op_iterator> indices() const {
1047 return make_range(idx_begin(), idx_end());
1048 }
1049
1050 Value *getPointerOperand() {
1051 return getOperand(0);
1052 }
1053 const Value *getPointerOperand() const {
1054 return getOperand(0);
1055 }
1056 static unsigned getPointerOperandIndex() {
1057 return 0U; // get index for modifying correct operand.
1058 }
1059
1060 /// Method to return the pointer operand as a
1061 /// PointerType.
1062 Type *getPointerOperandType() const {
1063 return getPointerOperand()->getType();
1064 }
1065
1066 /// Returns the address space of the pointer operand.
1067 unsigned getPointerAddressSpace() const {
1068 return getPointerOperandType()->getPointerAddressSpace();
1069 }
1070
1071 /// Returns the pointer type returned by the GEP
1072 /// instruction, which may be a vector of pointers.
1073 static Type *getGEPReturnType(Type *ElTy, Value *Ptr,
1074 ArrayRef<Value *> IdxList) {
1075 Type *PtrTy = PointerType::get(checkGEPType(getIndexedType(ElTy, IdxList)),
1076 Ptr->getType()->getPointerAddressSpace());
1077 // Vector GEP
1078 if (auto *PtrVTy = dyn_cast<VectorType>(Ptr->getType())) {
1079 ElementCount EltCount = PtrVTy->getElementCount();
1080 return VectorType::get(PtrTy, EltCount);
1081 }
1082 for (Value *Index : IdxList)
1083 if (auto *IndexVTy = dyn_cast<VectorType>(Index->getType())) {
1084 ElementCount EltCount = IndexVTy->getElementCount();
1085 return VectorType::get(PtrTy, EltCount);
1086 }
1087 // Scalar GEP
1088 return PtrTy;
1089 }
1090
1091 unsigned getNumIndices() const { // Note: always non-negative
1092 return getNumOperands() - 1;
1093 }
1094
1095 bool hasIndices() const {
1096 return getNumOperands() > 1;
1097 }
1098
1099 /// Return true if all of the indices of this GEP are
1100 /// zeros. If so, the result pointer and the first operand have the same
1101 /// value, just potentially different types.
1102 bool hasAllZeroIndices() const;
1103
1104 /// Return true if all of the indices of this GEP are
1105 /// constant integers. If so, the result pointer and the first operand have
1106 /// a constant offset between them.
1107 bool hasAllConstantIndices() const;
1108
1109 /// Set or clear the inbounds flag on this GEP instruction.
1110 /// See LangRef.html for the meaning of inbounds on a getelementptr.
1111 void setIsInBounds(bool b = true);
1112
1113 /// Determine whether the GEP has the inbounds flag.
1114 bool isInBounds() const;
1115
1116 /// Accumulate the constant address offset of this GEP if possible.
1117 ///
1118 /// This routine accepts an APInt into which it will accumulate the constant
1119 /// offset of this GEP if the GEP is in fact constant. If the GEP is not
1120 /// all-constant, it returns false and the value of the offset APInt is
1121 /// undefined (it is *not* preserved!). The APInt passed into this routine
1122 /// must be at least as wide as the IntPtr type for the address space of
1123 /// the base GEP pointer.
1124 bool accumulateConstantOffset(const DataLayout &DL, APInt &Offset) const;
1125
1126 // Methods for support type inquiry through isa, cast, and dyn_cast:
1127 static bool classof(const Instruction *I) {
1128 return (I->getOpcode() == Instruction::GetElementPtr);
1129 }
1130 static bool classof(const Value *V) {
1131 return isa<Instruction>(V) && classof(cast<Instruction>(V));
1132 }
1133};
1134
1135template <>
1136struct OperandTraits<GetElementPtrInst> :
1137 public VariadicOperandTraits<GetElementPtrInst, 1> {
1138};
1139
1140GetElementPtrInst::GetElementPtrInst(Type *PointeeType, Value *Ptr,
1141 ArrayRef<Value *> IdxList, unsigned Values,
1142 const Twine &NameStr,
1143 Instruction *InsertBefore)
1144 : Instruction(getGEPReturnType(PointeeType, Ptr, IdxList), GetElementPtr,
1145 OperandTraits<GetElementPtrInst>::op_end(this) - Values,
1146 Values, InsertBefore),
1147 SourceElementType(PointeeType),
1148 ResultElementType(getIndexedType(PointeeType, IdxList)) {
1149 assert(ResultElementType ==((ResultElementType == cast<PointerType>(getType()->
getScalarType())->getElementType()) ? static_cast<void>
(0) : __assert_fail ("ResultElementType == cast<PointerType>(getType()->getScalarType())->getElementType()"
, "/build/llvm-toolchain-snapshot-13~++20210413100635+64c24f493e5f/llvm/include/llvm/IR/Instructions.h"
, 1150, __PRETTY_FUNCTION__))
1150 cast<PointerType>(getType()->getScalarType())->getElementType())((ResultElementType == cast<PointerType>(getType()->
getScalarType())->getElementType()) ? static_cast<void>
(0) : __assert_fail ("ResultElementType == cast<PointerType>(getType()->getScalarType())->getElementType()"
, "/build/llvm-toolchain-snapshot-13~++20210413100635+64c24f493e5f/llvm/include/llvm/IR/Instructions.h"
, 1150, __PRETTY_FUNCTION__))
;
1151 init(Ptr, IdxList, NameStr);
1152}
1153
1154GetElementPtrInst::GetElementPtrInst(Type *PointeeType, Value *Ptr,
1155 ArrayRef<Value *> IdxList, unsigned Values,
1156 const Twine &NameStr,
1157 BasicBlock *InsertAtEnd)
1158 : Instruction(getGEPReturnType(PointeeType, Ptr, IdxList), GetElementPtr,
1159 OperandTraits<GetElementPtrInst>::op_end(this) - Values,
1160 Values, InsertAtEnd),
1161 SourceElementType(PointeeType),
1162 ResultElementType(getIndexedType(PointeeType, IdxList)) {
1163 assert(ResultElementType ==((ResultElementType == cast<PointerType>(getType()->
getScalarType())->getElementType()) ? static_cast<void>
(0) : __assert_fail ("ResultElementType == cast<PointerType>(getType()->getScalarType())->getElementType()"
, "/build/llvm-toolchain-snapshot-13~++20210413100635+64c24f493e5f/llvm/include/llvm/IR/Instructions.h"
, 1164, __PRETTY_FUNCTION__))
1164 cast<PointerType>(getType()->getScalarType())->getElementType())((ResultElementType == cast<PointerType>(getType()->
getScalarType())->getElementType()) ? static_cast<void>
(0) : __assert_fail ("ResultElementType == cast<PointerType>(getType()->getScalarType())->getElementType()"
, "/build/llvm-toolchain-snapshot-13~++20210413100635+64c24f493e5f/llvm/include/llvm/IR/Instructions.h"
, 1164, __PRETTY_FUNCTION__))
;
1165 init(Ptr, IdxList, NameStr);
1166}
1167
1168DEFINE_TRANSPARENT_OPERAND_ACCESSORS(GetElementPtrInst, Value)GetElementPtrInst::op_iterator GetElementPtrInst::op_begin() {
return OperandTraits<GetElementPtrInst>::op_begin(this
); } GetElementPtrInst::const_op_iterator GetElementPtrInst::
op_begin() const { return OperandTraits<GetElementPtrInst>
::op_begin(const_cast<GetElementPtrInst*>(this)); } GetElementPtrInst
::op_iterator GetElementPtrInst::op_end() { return OperandTraits
<GetElementPtrInst>::op_end(this); } GetElementPtrInst::
const_op_iterator GetElementPtrInst::op_end() const { return OperandTraits
<GetElementPtrInst>::op_end(const_cast<GetElementPtrInst
*>(this)); } Value *GetElementPtrInst::getOperand(unsigned
i_nocapture) const { ((i_nocapture < OperandTraits<GetElementPtrInst
>::operands(this) && "getOperand() out of range!")
? static_cast<void> (0) : __assert_fail ("i_nocapture < OperandTraits<GetElementPtrInst>::operands(this) && \"getOperand() out of range!\""
, "/build/llvm-toolchain-snapshot-13~++20210413100635+64c24f493e5f/llvm/include/llvm/IR/Instructions.h"
, 1168, __PRETTY_FUNCTION__)); return cast_or_null<Value>
( OperandTraits<GetElementPtrInst>::op_begin(const_cast
<GetElementPtrInst*>(this))[i_nocapture].get()); } void
GetElementPtrInst::setOperand(unsigned i_nocapture, Value *Val_nocapture
) { ((i_nocapture < OperandTraits<GetElementPtrInst>
::operands(this) && "setOperand() out of range!") ? static_cast
<void> (0) : __assert_fail ("i_nocapture < OperandTraits<GetElementPtrInst>::operands(this) && \"setOperand() out of range!\""
, "/build/llvm-toolchain-snapshot-13~++20210413100635+64c24f493e5f/llvm/include/llvm/IR/Instructions.h"
, 1168, __PRETTY_FUNCTION__)); OperandTraits<GetElementPtrInst
>::op_begin(this)[i_nocapture] = Val_nocapture; } unsigned
GetElementPtrInst::getNumOperands() const { return OperandTraits
<GetElementPtrInst>::operands(this); } template <int
Idx_nocapture> Use &GetElementPtrInst::Op() { return this
->OpFrom<Idx_nocapture>(this); } template <int Idx_nocapture
> const Use &GetElementPtrInst::Op() const { return this
->OpFrom<Idx_nocapture>(this); }
1169
1170//===----------------------------------------------------------------------===//
1171// ICmpInst Class
1172//===----------------------------------------------------------------------===//
1173
1174/// This instruction compares its operands according to the predicate given
1175/// to the constructor. It only operates on integers or pointers. The operands
1176/// must be identical types.
1177/// Represent an integer comparison operator.
1178class ICmpInst: public CmpInst {
1179 void AssertOK() {
1180 assert(isIntPredicate() &&((isIntPredicate() && "Invalid ICmp predicate value")
? static_cast<void> (0) : __assert_fail ("isIntPredicate() && \"Invalid ICmp predicate value\""
, "/build/llvm-toolchain-snapshot-13~++20210413100635+64c24f493e5f/llvm/include/llvm/IR/Instructions.h"
, 1181, __PRETTY_FUNCTION__))
1181 "Invalid ICmp predicate value")((isIntPredicate() && "Invalid ICmp predicate value")
? static_cast<void> (0) : __assert_fail ("isIntPredicate() && \"Invalid ICmp predicate value\""
, "/build/llvm-toolchain-snapshot-13~++20210413100635+64c24f493e5f/llvm/include/llvm/IR/Instructions.h"
, 1181, __PRETTY_FUNCTION__))
;
1182 assert(getOperand(0)->getType() == getOperand(1)->getType() &&((getOperand(0)->getType() == getOperand(1)->getType() &&
"Both operands to ICmp instruction are not of the same type!"
) ? static_cast<void> (0) : __assert_fail ("getOperand(0)->getType() == getOperand(1)->getType() && \"Both operands to ICmp instruction are not of the same type!\""
, "/build/llvm-toolchain-snapshot-13~++20210413100635+64c24f493e5f/llvm/include/llvm/IR/Instructions.h"
, 1183, __PRETTY_FUNCTION__))
1183 "Both operands to ICmp instruction are not of the same type!")((getOperand(0)->getType() == getOperand(1)->getType() &&
"Both operands to ICmp instruction are not of the same type!"
) ? static_cast<void> (0) : __assert_fail ("getOperand(0)->getType() == getOperand(1)->getType() && \"Both operands to ICmp instruction are not of the same type!\""
, "/build/llvm-toolchain-snapshot-13~++20210413100635+64c24f493e5f/llvm/include/llvm/IR/Instructions.h"
, 1183, __PRETTY_FUNCTION__))
;
1184 // Check that the operands are the right type
1185 assert((getOperand(0)->getType()->isIntOrIntVectorTy() ||(((getOperand(0)->getType()->isIntOrIntVectorTy() || getOperand
(0)->getType()->isPtrOrPtrVectorTy()) && "Invalid operand types for ICmp instruction"
) ? static_cast<void> (0) : __assert_fail ("(getOperand(0)->getType()->isIntOrIntVectorTy() || getOperand(0)->getType()->isPtrOrPtrVectorTy()) && \"Invalid operand types for ICmp instruction\""
, "/build/llvm-toolchain-snapshot-13~++20210413100635+64c24f493e5f/llvm/include/llvm/IR/Instructions.h"
, 1187, __PRETTY_FUNCTION__))
1186 getOperand(0)->getType()->isPtrOrPtrVectorTy()) &&(((getOperand(0)->getType()->isIntOrIntVectorTy() || getOperand
(0)->getType()->isPtrOrPtrVectorTy()) && "Invalid operand types for ICmp instruction"
) ? static_cast<void> (0) : __assert_fail ("(getOperand(0)->getType()->isIntOrIntVectorTy() || getOperand(0)->getType()->isPtrOrPtrVectorTy()) && \"Invalid operand types for ICmp instruction\""
, "/build/llvm-toolchain-snapshot-13~++20210413100635+64c24f493e5f/llvm/include/llvm/IR/Instructions.h"
, 1187, __PRETTY_FUNCTION__))
1187 "Invalid operand types for ICmp instruction")(((getOperand(0)->getType()->isIntOrIntVectorTy() || getOperand
(0)->getType()->isPtrOrPtrVectorTy()) && "Invalid operand types for ICmp instruction"
) ? static_cast<void> (0) : __assert_fail ("(getOperand(0)->getType()->isIntOrIntVectorTy() || getOperand(0)->getType()->isPtrOrPtrVectorTy()) && \"Invalid operand types for ICmp instruction\""
, "/build/llvm-toolchain-snapshot-13~++20210413100635+64c24f493e5f/llvm/include/llvm/IR/Instructions.h"
, 1187, __PRETTY_FUNCTION__))
;
1188 }
1189
1190protected:
1191 // Note: Instruction needs to be a friend here to call cloneImpl.
1192 friend class Instruction;
1193
1194 /// Clone an identical ICmpInst
1195 ICmpInst *cloneImpl() const;
1196
1197public:
1198 /// Constructor with insert-before-instruction semantics.
1199 ICmpInst(
1200 Instruction *InsertBefore, ///< Where to insert
1201 Predicate pred, ///< The predicate to use for the comparison
1202 Value *LHS, ///< The left-hand-side of the expression
1203 Value *RHS, ///< The right-hand-side of the expression
1204 const Twine &NameStr = "" ///< Name of the instruction
1205 ) : CmpInst(makeCmpResultType(LHS->getType()),
1206 Instruction::ICmp, pred, LHS, RHS, NameStr,
1207 InsertBefore) {
1208#ifndef NDEBUG
1209 AssertOK();
1210#endif
1211 }
1212
1213 /// Constructor with insert-at-end semantics.
1214 ICmpInst(
1215 BasicBlock &InsertAtEnd, ///< Block to insert into.
1216 Predicate pred, ///< The predicate to use for the comparison
1217 Value *LHS, ///< The left-hand-side of the expression
1218 Value *RHS, ///< The right-hand-side of the expression
1219 const Twine &NameStr = "" ///< Name of the instruction
1220 ) : CmpInst(makeCmpResultType(LHS->getType()),
1221 Instruction::ICmp, pred, LHS, RHS, NameStr,
1222 &InsertAtEnd) {
1223#ifndef NDEBUG
1224 AssertOK();
1225#endif
1226 }
1227
1228 /// Constructor with no-insertion semantics
1229 ICmpInst(
1230 Predicate pred, ///< The predicate to use for the comparison
1231 Value *LHS, ///< The left-hand-side of the expression
1232 Value *RHS, ///< The right-hand-side of the expression
1233 const Twine &NameStr = "" ///< Name of the instruction
1234 ) : CmpInst(makeCmpResultType(LHS->getType()),
52
Called C++ object pointer is null
1235 Instruction::ICmp, pred, LHS, RHS, NameStr) {
1236#ifndef NDEBUG
1237 AssertOK();
1238#endif
1239 }
1240
1241 /// For example, EQ->EQ, SLE->SLE, UGT->SGT, etc.
1242 /// @returns the predicate that would be the result if the operand were
1243 /// regarded as signed.
1244 /// Return the signed version of the predicate
1245 Predicate getSignedPredicate() const {
1246 return getSignedPredicate(getPredicate());
1247 }
1248
1249 /// This is a static version that you can use without an instruction.
1250 /// Return the signed version of the predicate.
1251 static Predicate getSignedPredicate(Predicate pred);
1252
1253 /// For example, EQ->EQ, SLE->ULE, UGT->UGT, etc.
1254 /// @returns the predicate that would be the result if the operand were
1255 /// regarded as unsigned.
1256 /// Return the unsigned version of the predicate
1257 Predicate getUnsignedPredicate() const {
1258 return getUnsignedPredicate(getPredicate());
1259 }
1260
1261 /// This is a static version that you can use without an instruction.
1262 /// Return the unsigned version of the predicate.
1263 static Predicate getUnsignedPredicate(Predicate pred);
1264
1265 /// Return true if this predicate is either EQ or NE. This also
1266 /// tests for commutativity.
1267 static bool isEquality(Predicate P) {
1268 return P == ICMP_EQ || P == ICMP_NE;
1269 }
1270
1271 /// Return true if this predicate is either EQ or NE. This also
1272 /// tests for commutativity.
1273 bool isEquality() const {
1274 return isEquality(getPredicate());
1275 }
1276
1277 /// @returns true if the predicate of this ICmpInst is commutative
1278 /// Determine if this relation is commutative.
1279 bool isCommutative() const { return isEquality(); }
1280
1281 /// Return true if the predicate is relational (not EQ or NE).
1282 ///
1283 bool isRelational() const {
1284 return !isEquality();
1285 }
1286
1287 /// Return true if the predicate is relational (not EQ or NE).
1288 ///
1289 static bool isRelational(Predicate P) {
1290 return !isEquality(P);
1291 }
1292
1293 /// Return true if the predicate is SGT or UGT.
1294 ///
1295 static bool isGT(Predicate P) {
1296 return P == ICMP_SGT || P == ICMP_UGT;
1297 }
1298
1299 /// Return true if the predicate is SLT or ULT.
1300 ///
1301 static bool isLT(Predicate P) {
1302 return P == ICMP_SLT || P == ICMP_ULT;
1303 }
1304
1305 /// Return true if the predicate is SGE or UGE.
1306 ///
1307 static bool isGE(Predicate P) {
1308 return P == ICMP_SGE || P == ICMP_UGE;
1309 }
1310
1311 /// Return true if the predicate is SLE or ULE.
1312 ///
1313 static bool isLE(Predicate P) {
1314 return P == ICMP_SLE || P == ICMP_ULE;
1315 }
1316
1317 /// Exchange the two operands to this instruction in such a way that it does
1318 /// not modify the semantics of the instruction. The predicate value may be
1319 /// changed to retain the same result if the predicate is order dependent
1320 /// (e.g. ult).
1321 /// Swap operands and adjust predicate.
1322 void swapOperands() {
1323 setPredicate(getSwappedPredicate());
1324 Op<0>().swap(Op<1>());
1325 }
1326
1327 // Methods for support type inquiry through isa, cast, and dyn_cast:
1328 static bool classof(const Instruction *I) {
1329 return I->getOpcode() == Instruction::ICmp;
1330 }
1331 static bool classof(const Value *V) {
1332 return isa<Instruction>(V) && classof(cast<Instruction>(V));
1333 }
1334};
1335
1336//===----------------------------------------------------------------------===//
1337// FCmpInst Class
1338//===----------------------------------------------------------------------===//
1339
1340/// This instruction compares its operands according to the predicate given
1341/// to the constructor. It only operates on floating point values or packed
1342/// vectors of floating point values. The operands must be identical types.
1343/// Represents a floating point comparison operator.
1344class FCmpInst: public CmpInst {
1345 void AssertOK() {
1346 assert(isFPPredicate() && "Invalid FCmp predicate value")((isFPPredicate() && "Invalid FCmp predicate value") ?
static_cast<void> (0) : __assert_fail ("isFPPredicate() && \"Invalid FCmp predicate value\""
, "/build/llvm-toolchain-snapshot-13~++20210413100635+64c24f493e5f/llvm/include/llvm/IR/Instructions.h"
, 1346, __PRETTY_FUNCTION__))
;
1347 assert(getOperand(0)->getType() == getOperand(1)->getType() &&((getOperand(0)->getType() == getOperand(1)->getType() &&
"Both operands to FCmp instruction are not of the same type!"
) ? static_cast<void> (0) : __assert_fail ("getOperand(0)->getType() == getOperand(1)->getType() && \"Both operands to FCmp instruction are not of the same type!\""
, "/build/llvm-toolchain-snapshot-13~++20210413100635+64c24f493e5f/llvm/include/llvm/IR/Instructions.h"
, 1348, __PRETTY_FUNCTION__))
1348 "Both operands to FCmp instruction are not of the same type!")((getOperand(0)->getType() == getOperand(1)->getType() &&
"Both operands to FCmp instruction are not of the same type!"
) ? static_cast<void> (0) : __assert_fail ("getOperand(0)->getType() == getOperand(1)->getType() && \"Both operands to FCmp instruction are not of the same type!\""
, "/build/llvm-toolchain-snapshot-13~++20210413100635+64c24f493e5f/llvm/include/llvm/IR/Instructions.h"
, 1348, __PRETTY_FUNCTION__))
;
1349 // Check that the operands are the right type
1350 assert(getOperand(0)->getType()->isFPOrFPVectorTy() &&((getOperand(0)->getType()->isFPOrFPVectorTy() &&
"Invalid operand types for FCmp instruction") ? static_cast<
void> (0) : __assert_fail ("getOperand(0)->getType()->isFPOrFPVectorTy() && \"Invalid operand types for FCmp instruction\""
, "/build/llvm-toolchain-snapshot-13~++20210413100635+64c24f493e5f/llvm/include/llvm/IR/Instructions.h"
, 1351, __PRETTY_FUNCTION__))
1351 "Invalid operand types for FCmp instruction")((getOperand(0)->getType()->isFPOrFPVectorTy() &&
"Invalid operand types for FCmp instruction") ? static_cast<
void> (0) : __assert_fail ("getOperand(0)->getType()->isFPOrFPVectorTy() && \"Invalid operand types for FCmp instruction\""
, "/build/llvm-toolchain-snapshot-13~++20210413100635+64c24f493e5f/llvm/include/llvm/IR/Instructions.h"
, 1351, __PRETTY_FUNCTION__))
;
1352 }
1353
1354protected:
1355 // Note: Instruction needs to be a friend here to call cloneImpl.
1356 friend class Instruction;
1357
1358 /// Clone an identical FCmpInst
1359 FCmpInst *cloneImpl() const;
1360
1361public:
1362 /// Constructor with insert-before-instruction semantics.
1363 FCmpInst(
1364 Instruction *InsertBefore, ///< Where to insert
1365 Predicate pred, ///< The predicate to use for the comparison
1366 Value *LHS, ///< The left-hand-side of the expression
1367 Value *RHS, ///< The right-hand-side of the expression
1368 const Twine &NameStr = "" ///< Name of the instruction
1369 ) : CmpInst(makeCmpResultType(LHS->getType()),
1370 Instruction::FCmp, pred, LHS, RHS, NameStr,
1371 InsertBefore) {
1372 AssertOK();
1373 }
1374
1375 /// Constructor with insert-at-end semantics.
1376 FCmpInst(
1377 BasicBlock &InsertAtEnd, ///< Block to insert into.
1378 Predicate pred, ///< The predicate to use for the comparison
1379 Value *LHS, ///< The left-hand-side of the expression
1380 Value *RHS, ///< The right-hand-side of the expression
1381 const Twine &NameStr = "" ///< Name of the instruction
1382 ) : CmpInst(makeCmpResultType(LHS->getType()),
1383 Instruction::FCmp, pred, LHS, RHS, NameStr,
1384 &InsertAtEnd) {
1385 AssertOK();
1386 }
1387
1388 /// Constructor with no-insertion semantics
1389 FCmpInst(
1390 Predicate Pred, ///< The predicate to use for the comparison
1391 Value *LHS, ///< The left-hand-side of the expression
1392 Value *RHS, ///< The right-hand-side of the expression
1393 const Twine &NameStr = "", ///< Name of the instruction
1394 Instruction *FlagsSource = nullptr
1395 ) : CmpInst(makeCmpResultType(LHS->getType()), Instruction::FCmp, Pred, LHS,
1396 RHS, NameStr, nullptr, FlagsSource) {
1397 AssertOK();
1398 }
1399
1400 /// @returns true if the predicate of this instruction is EQ or NE.
1401 /// Determine if this is an equality predicate.
1402 static bool isEquality(Predicate Pred) {
1403 return Pred == FCMP_OEQ || Pred == FCMP_ONE || Pred == FCMP_UEQ ||
1404 Pred == FCMP_UNE;
1405 }
1406
1407 /// @returns true if the predicate of this instruction is EQ or NE.
1408 /// Determine if this is an equality predicate.
1409 bool isEquality() const { return isEquality(getPredicate()); }
1410
1411 /// @returns true if the predicate of this instruction is commutative.
1412 /// Determine if this is a commutative predicate.
1413 bool isCommutative() const {
1414 return isEquality() ||
1415 getPredicate() == FCMP_FALSE ||
1416 getPredicate() == FCMP_TRUE ||
1417 getPredicate() == FCMP_ORD ||
1418 getPredicate() == FCMP_UNO;
1419 }
1420
1421 /// @returns true if the predicate is relational (not EQ or NE).
1422 /// Determine if this a relational predicate.
1423 bool isRelational() const { return !isEquality(); }
1424
1425 /// Exchange the two operands to this instruction in such a way that it does
1426 /// not modify the semantics of the instruction. The predicate value may be
1427 /// changed to retain the same result if the predicate is order dependent
1428 /// (e.g. ult).
1429 /// Swap operands and adjust predicate.
1430 void swapOperands() {
1431 setPredicate(getSwappedPredicate());
1432 Op<0>().swap(Op<1>());
1433 }
1434
1435 /// Methods for support type inquiry through isa, cast, and dyn_cast:
1436 static bool classof(const Instruction *I) {
1437 return I->getOpcode() == Instruction::FCmp;
1438 }
1439 static bool classof(const Value *V) {
1440 return isa<Instruction>(V) && classof(cast<Instruction>(V));
1441 }
1442};
1443
1444//===----------------------------------------------------------------------===//
1445/// This class represents a function call, abstracting a target
1446/// machine's calling convention. This class uses low bit of the SubClassData
1447/// field to indicate whether or not this is a tail call. The rest of the bits
1448/// hold the calling convention of the call.
1449///
1450class CallInst : public CallBase {
1451 CallInst(const CallInst &CI);
1452
1453 /// Construct a CallInst given a range of arguments.
1454 /// Construct a CallInst from a range of arguments
1455 inline CallInst(FunctionType *Ty, Value *Func, ArrayRef<Value *> Args,
1456 ArrayRef<OperandBundleDef> Bundles, const Twine &NameStr,
1457 Instruction *InsertBefore);
1458
1459 inline CallInst(FunctionType *Ty, Value *Func, ArrayRef<Value *> Args,
1460 const Twine &NameStr, Instruction *InsertBefore)
1461 : CallInst(Ty, Func, Args, None, NameStr, InsertBefore) {}
1462
1463 /// Construct a CallInst given a range of arguments.
1464 /// Construct a CallInst from a range of arguments
1465 inline CallInst(FunctionType *Ty, Value *Func, ArrayRef<Value *> Args,
1466 ArrayRef<OperandBundleDef> Bundles, const Twine &NameStr,
1467 BasicBlock *InsertAtEnd);
1468
1469 explicit CallInst(FunctionType *Ty, Value *F, const Twine &NameStr,
1470 Instruction *InsertBefore);
1471
1472 CallInst(FunctionType *ty, Value *F, const Twine &NameStr,
1473 BasicBlock *InsertAtEnd);
1474
1475 void init(FunctionType *FTy, Value *Func, ArrayRef<Value *> Args,
1476 ArrayRef<OperandBundleDef> Bundles, const Twine &NameStr);
1477 void init(FunctionType *FTy, Value *Func, const Twine &NameStr);
1478
1479 /// Compute the number of operands to allocate.
1480 static int ComputeNumOperands(int NumArgs, int NumBundleInputs = 0) {
1481 // We need one operand for the called function, plus the input operand
1482 // counts provided.
1483 return 1 + NumArgs + NumBundleInputs;
1484 }
1485
1486protected:
1487 // Note: Instruction needs to be a friend here to call cloneImpl.
1488 friend class Instruction;
1489
1490 CallInst *cloneImpl() const;
1491
1492public:
1493 static CallInst *Create(FunctionType *Ty, Value *F, const Twine &NameStr = "",
1494 Instruction *InsertBefore = nullptr) {
1495 return new (ComputeNumOperands(0)) CallInst(Ty, F, NameStr, InsertBefore);
1496 }
1497
1498 static CallInst *Create(FunctionType *Ty, Value *Func, ArrayRef<Value *> Args,
1499 const Twine &NameStr,
1500 Instruction *InsertBefore = nullptr) {
1501 return new (ComputeNumOperands(Args.size()))
1502 CallInst(Ty, Func, Args, None, NameStr, InsertBefore);
1503 }
1504
1505 static CallInst *Create(FunctionType *Ty, Value *Func, ArrayRef<Value *> Args,
1506 ArrayRef<OperandBundleDef> Bundles = None,
1507 const Twine &NameStr = "",
1508 Instruction *InsertBefore = nullptr) {
1509 const int NumOperands =
1510 ComputeNumOperands(Args.size(), CountBundleInputs(Bundles));
1511 const unsigned DescriptorBytes = Bundles.size() * sizeof(BundleOpInfo);
1512
1513 return new (NumOperands, DescriptorBytes)
1514 CallInst(Ty, Func, Args, Bundles, NameStr, InsertBefore);
1515 }
1516
1517 static CallInst *Create(FunctionType *Ty, Value *F, const Twine &NameStr,
1518 BasicBlock *InsertAtEnd) {
1519 return new (ComputeNumOperands(0)) CallInst(Ty, F, NameStr, InsertAtEnd);
1520 }
1521
1522 static CallInst *Create(FunctionType *Ty, Value *Func, ArrayRef<Value *> Args,
1523 const Twine &NameStr, BasicBlock *InsertAtEnd) {
1524 return new (ComputeNumOperands(Args.size()))
1525 CallInst(Ty, Func, Args, None, NameStr, InsertAtEnd);
1526 }
1527
1528 static CallInst *Create(FunctionType *Ty, Value *Func, ArrayRef<Value *> Args,
1529 ArrayRef<OperandBundleDef> Bundles,
1530 const Twine &NameStr, BasicBlock *InsertAtEnd) {
1531 const int NumOperands =
1532 ComputeNumOperands(Args.size(), CountBundleInputs(Bundles));
1533 const unsigned DescriptorBytes = Bundles.size() * sizeof(BundleOpInfo);
1534
1535 return new (NumOperands, DescriptorBytes)
1536 CallInst(Ty, Func, Args, Bundles, NameStr, InsertAtEnd);
1537 }
1538
1539 static CallInst *Create(FunctionCallee Func, const Twine &NameStr = "",
1540 Instruction *InsertBefore = nullptr) {
1541 return Create(Func.getFunctionType(), Func.getCallee(), NameStr,
1542 InsertBefore);
1543 }
1544
1545 static CallInst *Create(FunctionCallee Func, ArrayRef<Value *> Args,
1546 ArrayRef<OperandBundleDef> Bundles = None,
1547 const Twine &NameStr = "",
1548 Instruction *InsertBefore = nullptr) {
1549 return Create(Func.getFunctionType(), Func.getCallee(), Args, Bundles,
1550 NameStr, InsertBefore);
1551 }
1552
1553 static CallInst *Create(FunctionCallee Func, ArrayRef<Value *> Args,
1554 const Twine &NameStr,
1555 Instruction *InsertBefore = nullptr) {
1556 return Create(Func.getFunctionType(), Func.getCallee(), Args, NameStr,
1557 InsertBefore);
1558 }
1559
1560 static CallInst *Create(FunctionCallee Func, const Twine &NameStr,
1561 BasicBlock *InsertAtEnd) {
1562 return Create(Func.getFunctionType(), Func.getCallee(), NameStr,
1563 InsertAtEnd);
1564 }
1565
1566 static CallInst *Create(FunctionCallee Func, ArrayRef<Value *> Args,
1567 const Twine &NameStr, BasicBlock *InsertAtEnd) {
1568 return Create(Func.getFunctionType(), Func.getCallee(), Args, NameStr,
1569 InsertAtEnd);
1570 }
1571
1572 static CallInst *Create(FunctionCallee Func, ArrayRef<Value *> Args,
1573 ArrayRef<OperandBundleDef> Bundles,
1574 const Twine &NameStr, BasicBlock *InsertAtEnd) {
1575 return Create(Func.getFunctionType(), Func.getCallee(), Args, Bundles,
1576 NameStr, InsertAtEnd);
1577 }
1578
1579 /// Create a clone of \p CI with a different set of operand bundles and
1580 /// insert it before \p InsertPt.
1581 ///
1582 /// The returned call instruction is identical \p CI in every way except that
1583 /// the operand bundles for the new instruction are set to the operand bundles
1584 /// in \p Bundles.
1585 static CallInst *Create(CallInst *CI, ArrayRef<OperandBundleDef> Bundles,
1586 Instruction *InsertPt = nullptr);
1587
1588 /// Generate the IR for a call to malloc:
1589 /// 1. Compute the malloc call's argument as the specified type's size,
1590 /// possibly multiplied by the array size if the array size is not
1591 /// constant 1.
1592 /// 2. Call malloc with that argument.
1593 /// 3. Bitcast the result of the malloc call to the specified type.
1594 static Instruction *CreateMalloc(Instruction *InsertBefore, Type *IntPtrTy,
1595 Type *AllocTy, Value *AllocSize,
1596 Value *ArraySize = nullptr,
1597 Function *MallocF = nullptr,
1598 const Twine &Name = "");
1599 static Instruction *CreateMalloc(BasicBlock *InsertAtEnd, Type *IntPtrTy,
1600 Type *AllocTy, Value *AllocSize,
1601 Value *ArraySize = nullptr,
1602 Function *MallocF = nullptr,
1603 const Twine &Name = "");
1604 static Instruction *CreateMalloc(Instruction *InsertBefore, Type *IntPtrTy,
1605 Type *AllocTy, Value *AllocSize,
1606 Value *ArraySize = nullptr,
1607 ArrayRef<OperandBundleDef> Bundles = None,
1608 Function *MallocF = nullptr,
1609 const Twine &Name = "");
1610 static Instruction *CreateMalloc(BasicBlock *InsertAtEnd, Type *IntPtrTy,
1611 Type *AllocTy, Value *AllocSize,
1612 Value *ArraySize = nullptr,
1613 ArrayRef<OperandBundleDef> Bundles = None,
1614 Function *MallocF = nullptr,
1615 const Twine &Name = "");
1616 /// Generate the IR for a call to the builtin free function.
1617 static Instruction *CreateFree(Value *Source, Instruction *InsertBefore);
1618 static Instruction *CreateFree(Value *Source, BasicBlock *InsertAtEnd);
1619 static Instruction *CreateFree(Value *Source,
1620 ArrayRef<OperandBundleDef> Bundles,
1621 Instruction *InsertBefore);
1622 static Instruction *CreateFree(Value *Source,
1623 ArrayRef<OperandBundleDef> Bundles,
1624 BasicBlock *InsertAtEnd);
1625
1626 // Note that 'musttail' implies 'tail'.
1627 enum TailCallKind : unsigned {
1628 TCK_None = 0,
1629 TCK_Tail = 1,
1630 TCK_MustTail = 2,
1631 TCK_NoTail = 3,
1632 TCK_LAST = TCK_NoTail
1633 };
1634
1635 using TailCallKindField = Bitfield::Element<TailCallKind, 0, 2, TCK_LAST>;
1636 static_assert(
1637 Bitfield::areContiguous<TailCallKindField, CallBase::CallingConvField>(),
1638 "Bitfields must be contiguous");
1639
1640 TailCallKind getTailCallKind() const {
1641 return getSubclassData<TailCallKindField>();
1642 }
1643
1644 bool isTailCall() const {
1645 TailCallKind Kind = getTailCallKind();
1646 return Kind == TCK_Tail || Kind == TCK_MustTail;
1647 }
1648
1649 bool isMustTailCall() const { return getTailCallKind() == TCK_MustTail; }
1650
1651 bool isNoTailCall() const { return getTailCallKind() == TCK_NoTail; }
1652
1653 void setTailCallKind(TailCallKind TCK) {
1654 setSubclassData<TailCallKindField>(TCK);
1655 }
1656
1657 void setTailCall(bool IsTc = true) {
1658 setTailCallKind(IsTc ? TCK_Tail : TCK_None);
1659 }
1660
1661 /// Return true if the call can return twice
1662 bool canReturnTwice() const { return hasFnAttr(Attribute::ReturnsTwice); }
1663 void setCanReturnTwice() {
1664 addAttribute(AttributeList::FunctionIndex, Attribute::ReturnsTwice);
1665 }
1666
1667 // Methods for support type inquiry through isa, cast, and dyn_cast:
1668 static bool classof(const Instruction *I) {
1669 return I->getOpcode() == Instruction::Call;
1670 }
1671 static bool classof(const Value *V) {
1672 return isa<Instruction>(V) && classof(cast<Instruction>(V));
1673 }
1674
1675 /// Updates profile metadata by scaling it by \p S / \p T.
1676 void updateProfWeight(uint64_t S, uint64_t T);
1677
1678private:
1679 // Shadow Instruction::setInstructionSubclassData with a private forwarding
1680 // method so that subclasses cannot accidentally use it.
1681 template <typename Bitfield>
1682 void setSubclassData(typename Bitfield::Type Value) {
1683 Instruction::setSubclassData<Bitfield>(Value);
1684 }
1685};
1686
1687CallInst::CallInst(FunctionType *Ty, Value *Func, ArrayRef<Value *> Args,
1688 ArrayRef<OperandBundleDef> Bundles, const Twine &NameStr,
1689 BasicBlock *InsertAtEnd)
1690 : CallBase(Ty->getReturnType(), Instruction::Call,
1691 OperandTraits<CallBase>::op_end(this) -
1692 (Args.size() + CountBundleInputs(Bundles) + 1),
1693 unsigned(Args.size() + CountBundleInputs(Bundles) + 1),
1694 InsertAtEnd) {
1695 init(Ty, Func, Args, Bundles, NameStr);
1696}
1697
1698CallInst::CallInst(FunctionType *Ty, Value *Func, ArrayRef<Value *> Args,
1699 ArrayRef<OperandBundleDef> Bundles, const Twine &NameStr,
1700 Instruction *InsertBefore)
1701 : CallBase(Ty->getReturnType(), Instruction::Call,
1702 OperandTraits<CallBase>::op_end(this) -
1703 (Args.size() + CountBundleInputs(Bundles) + 1),
1704 unsigned(Args.size() + CountBundleInputs(Bundles) + 1),
1705 InsertBefore) {
1706 init(Ty, Func, Args, Bundles, NameStr);
1707}
1708
1709//===----------------------------------------------------------------------===//
1710// SelectInst Class
1711//===----------------------------------------------------------------------===//
1712
1713/// This class represents the LLVM 'select' instruction.
1714///
1715class SelectInst : public Instruction {
1716 SelectInst(Value *C, Value *S1, Value *S2, const Twine &NameStr,
1717 Instruction *InsertBefore)
1718 : Instruction(S1->getType(), Instruction::Select,
1719 &Op<0>(), 3, InsertBefore) {
1720 init(C, S1, S2);
1721 setName(NameStr);
1722 }
1723
1724 SelectInst(Value *C, Value *S1, Value *S2, const Twine &NameStr,
1725 BasicBlock *InsertAtEnd)
1726 : Instruction(S1->getType(), Instruction::Select,
1727 &Op<0>(), 3, InsertAtEnd) {
1728 init(C, S1, S2);
1729 setName(NameStr);
1730 }
1731
1732 void init(Value *C, Value *S1, Value *S2) {
1733 assert(!areInvalidOperands(C, S1, S2) && "Invalid operands for select")((!areInvalidOperands(C, S1, S2) && "Invalid operands for select"
) ? static_cast<void> (0) : __assert_fail ("!areInvalidOperands(C, S1, S2) && \"Invalid operands for select\""
, "/build/llvm-toolchain-snapshot-13~++20210413100635+64c24f493e5f/llvm/include/llvm/IR/Instructions.h"
, 1733, __PRETTY_FUNCTION__))
;
1734 Op<0>() = C;
1735 Op<1>() = S1;
1736 Op<2>() = S2;
1737 }
1738
1739protected:
1740 // Note: Instruction needs to be a friend here to call cloneImpl.
1741 friend class Instruction;
1742
1743 SelectInst *cloneImpl() const;
1744
1745public:
1746 static SelectInst *Create(Value *C, Value *S1, Value *S2,
1747 const Twine &NameStr = "",
1748 Instruction *InsertBefore = nullptr,
1749 Instruction *MDFrom = nullptr) {
1750 SelectInst *Sel = new(3) SelectInst(C, S1, S2, NameStr, InsertBefore);
1751 if (MDFrom)
1752 Sel->copyMetadata(*MDFrom);
1753 return Sel;
1754 }
1755
1756 static SelectInst *Create(Value *C, Value *S1, Value *S2,
1757 const Twine &NameStr,
1758 BasicBlock *InsertAtEnd) {
1759 return new(3) SelectInst(C, S1, S2, NameStr, InsertAtEnd);
1760 }
1761
1762 const Value *getCondition() const { return Op<0>(); }
1763 const Value *getTrueValue() const { return Op<1>(); }
1764 const Value *getFalseValue() const { return Op<2>(); }
1765 Value *getCondition() { return Op<0>(); }
1766 Value *getTrueValue() { return Op<1>(); }
1767 Value *getFalseValue() { return Op<2>(); }
1768
1769 void setCondition(Value *V) { Op<0>() = V; }
1770 void setTrueValue(Value *V) { Op<1>() = V; }
1771 void setFalseValue(Value *V) { Op<2>() = V; }
1772
1773 /// Swap the true and false values of the select instruction.
1774 /// This doesn't swap prof metadata.
1775 void swapValues() { Op<1>().swap(Op<2>()); }
1776
1777 /// Return a string if the specified operands are invalid
1778 /// for a select operation, otherwise return null.
1779 static const char *areInvalidOperands(Value *Cond, Value *True, Value *False);
1780
1781 /// Transparently provide more efficient getOperand methods.
1782 DECLARE_TRANSPARENT_OPERAND_ACCESSORS(Value)public: inline Value *getOperand(unsigned) const; inline void
setOperand(unsigned, Value*); inline op_iterator op_begin();
inline const_op_iterator op_begin() const; inline op_iterator
op_end(); inline const_op_iterator op_end() const; protected
: template <int> inline Use &Op(); template <int
> inline const Use &Op() const; public: inline unsigned
getNumOperands() const
;
1783
1784 OtherOps getOpcode() const {
1785 return static_cast<OtherOps>(Instruction::getOpcode());
1786 }
1787
1788 // Methods for support type inquiry through isa, cast, and dyn_cast:
1789 static bool classof(const Instruction *I) {
1790 return I->getOpcode() == Instruction::Select;
1791 }
1792 static bool classof(const Value *V) {
1793 return isa<Instruction>(V) && classof(cast<Instruction>(V));
1794 }
1795};
1796
1797template <>
1798struct OperandTraits<SelectInst> : public FixedNumOperandTraits<SelectInst, 3> {
1799};
1800
1801DEFINE_TRANSPARENT_OPERAND_ACCESSORS(SelectInst, Value)SelectInst::op_iterator SelectInst::op_begin() { return OperandTraits
<SelectInst>::op_begin(this); } SelectInst::const_op_iterator
SelectInst::op_begin() const { return OperandTraits<SelectInst
>::op_begin(const_cast<SelectInst*>(this)); } SelectInst
::op_iterator SelectInst::op_end() { return OperandTraits<
SelectInst>::op_end(this); } SelectInst::const_op_iterator
SelectInst::op_end() const { return OperandTraits<SelectInst
>::op_end(const_cast<SelectInst*>(this)); } Value *SelectInst
::getOperand(unsigned i_nocapture) const { ((i_nocapture <
OperandTraits<SelectInst>::operands(this) && "getOperand() out of range!"
) ? static_cast<void> (0) : __assert_fail ("i_nocapture < OperandTraits<SelectInst>::operands(this) && \"getOperand() out of range!\""
, "/build/llvm-toolchain-snapshot-13~++20210413100635+64c24f493e5f/llvm/include/llvm/IR/Instructions.h"
, 1801, __PRETTY_FUNCTION__)); return cast_or_null<Value>
( OperandTraits<SelectInst>::op_begin(const_cast<SelectInst
*>(this))[i_nocapture].get()); } void SelectInst::setOperand
(unsigned i_nocapture, Value *Val_nocapture) { ((i_nocapture <
OperandTraits<SelectInst>::operands(this) && "setOperand() out of range!"
) ? static_cast<void> (0) : __assert_fail ("i_nocapture < OperandTraits<SelectInst>::operands(this) && \"setOperand() out of range!\""
, "/build/llvm-toolchain-snapshot-13~++20210413100635+64c24f493e5f/llvm/include/llvm/IR/Instructions.h"
, 1801, __PRETTY_FUNCTION__)); OperandTraits<SelectInst>
::op_begin(this)[i_nocapture] = Val_nocapture; } unsigned SelectInst
::getNumOperands() const { return OperandTraits<SelectInst
>::operands(this); } template <int Idx_nocapture> Use
&SelectInst::Op() { return this->OpFrom<Idx_nocapture
>(this); } template <int Idx_nocapture> const Use &
SelectInst::Op() const { return this->OpFrom<Idx_nocapture
>(this); }
1802
1803//===----------------------------------------------------------------------===//
1804// VAArgInst Class
1805//===----------------------------------------------------------------------===//
1806
1807/// This class represents the va_arg llvm instruction, which returns
1808/// an argument of the specified type given a va_list and increments that list
1809///
1810class VAArgInst : public UnaryInstruction {
1811protected:
1812 // Note: Instruction needs to be a friend here to call cloneImpl.
1813 friend class Instruction;
1814
1815 VAArgInst *cloneImpl() const;
1816
1817public:
1818 VAArgInst(Value *List, Type *Ty, const Twine &NameStr = "",
1819 Instruction *InsertBefore = nullptr)
1820 : UnaryInstruction(Ty, VAArg, List, InsertBefore) {
1821 setName(NameStr);
1822 }
1823
1824 VAArgInst(Value *List, Type *Ty, const Twine &NameStr,
1825 BasicBlock *InsertAtEnd)
1826 : UnaryInstruction(Ty, VAArg, List, InsertAtEnd) {
1827 setName(NameStr);
1828 }
1829
1830 Value *getPointerOperand() { return getOperand(0); }
1831 const Value *getPointerOperand() const { return getOperand(0); }
1832 static unsigned getPointerOperandIndex() { return 0U; }
1833
1834 // Methods for support type inquiry through isa, cast, and dyn_cast:
1835 static bool classof(const Instruction *I) {
1836 return I->getOpcode() == VAArg;
1837 }
1838 static bool classof(const Value *V) {
1839 return isa<Instruction>(V) && classof(cast<Instruction>(V));
1840 }
1841};
1842
1843//===----------------------------------------------------------------------===//
1844// ExtractElementInst Class
1845//===----------------------------------------------------------------------===//
1846
1847/// This instruction extracts a single (scalar)
1848/// element from a VectorType value
1849///
1850class ExtractElementInst : public Instruction {
1851 ExtractElementInst(Value *Vec, Value *Idx, const Twine &NameStr = "",
1852 Instruction *InsertBefore = nullptr);
1853 ExtractElementInst(Value *Vec, Value *Idx, const Twine &NameStr,
1854 BasicBlock *InsertAtEnd);
1855
1856protected:
1857 // Note: Instruction needs to be a friend here to call cloneImpl.
1858 friend class Instruction;
1859
1860 ExtractElementInst *cloneImpl() const;
1861
1862public:
1863 static ExtractElementInst *Create(Value *Vec, Value *Idx,
1864 const Twine &NameStr = "",
1865 Instruction *InsertBefore = nullptr) {
1866 return new(2) ExtractElementInst(Vec, Idx, NameStr, InsertBefore);
1867 }
1868
1869 static ExtractElementInst *Create(Value *Vec, Value *Idx,
1870 const Twine &NameStr,
1871 BasicBlock *InsertAtEnd) {
1872 return new(2) ExtractElementInst(Vec, Idx, NameStr, InsertAtEnd);
1873 }
1874
1875 /// Return true if an extractelement instruction can be
1876 /// formed with the specified operands.
1877 static bool isValidOperands(const Value *Vec, const Value *Idx);
1878
1879 Value *getVectorOperand() { return Op<0>(); }
1880 Value *getIndexOperand() { return Op<1>(); }
1881 const Value *getVectorOperand() const { return Op<0>(); }
1882 const Value *getIndexOperand() const { return Op<1>(); }
1883
1884 VectorType *getVectorOperandType() const {
1885 return cast<VectorType>(getVectorOperand()->getType());
1886 }
1887
1888 /// Transparently provide more efficient getOperand methods.
1889 DECLARE_TRANSPARENT_OPERAND_ACCESSORS(Value)public: inline Value *getOperand(unsigned) const; inline void
setOperand(unsigned, Value*); inline op_iterator op_begin();
inline const_op_iterator op_begin() const; inline op_iterator
op_end(); inline const_op_iterator op_end() const; protected
: template <int> inline Use &Op(); template <int
> inline const Use &Op() const; public: inline unsigned
getNumOperands() const
;
1890
1891 // Methods for support type inquiry through isa, cast, and dyn_cast:
1892 static bool classof(const Instruction *I) {
1893 return I->getOpcode() == Instruction::ExtractElement;
1894 }
1895 static bool classof(const Value *V) {
1896 return isa<Instruction>(V) && classof(cast<Instruction>(V));
1897 }
1898};
1899
1900template <>
1901struct OperandTraits<ExtractElementInst> :
1902 public FixedNumOperandTraits<ExtractElementInst, 2> {
1903};
1904
1905DEFINE_TRANSPARENT_OPERAND_ACCESSORS(ExtractElementInst, Value)ExtractElementInst::op_iterator ExtractElementInst::op_begin(
) { return OperandTraits<ExtractElementInst>::op_begin(
this); } ExtractElementInst::const_op_iterator ExtractElementInst
::op_begin() const { return OperandTraits<ExtractElementInst
>::op_begin(const_cast<ExtractElementInst*>(this)); }
ExtractElementInst::op_iterator ExtractElementInst::op_end()
{ return OperandTraits<ExtractElementInst>::op_end(this
); } ExtractElementInst::const_op_iterator ExtractElementInst
::op_end() const { return OperandTraits<ExtractElementInst
>::op_end(const_cast<ExtractElementInst*>(this)); } Value
*ExtractElementInst::getOperand(unsigned i_nocapture) const {
((i_nocapture < OperandTraits<ExtractElementInst>::
operands(this) && "getOperand() out of range!") ? static_cast
<void> (0) : __assert_fail ("i_nocapture < OperandTraits<ExtractElementInst>::operands(this) && \"getOperand() out of range!\""
, "/build/llvm-toolchain-snapshot-13~++20210413100635+64c24f493e5f/llvm/include/llvm/IR/Instructions.h"
, 1905, __PRETTY_FUNCTION__)); return cast_or_null<Value>
( OperandTraits<ExtractElementInst>::op_begin(const_cast
<ExtractElementInst*>(this))[i_nocapture].get()); } void
ExtractElementInst::setOperand(unsigned i_nocapture, Value *
Val_nocapture) { ((i_nocapture < OperandTraits<ExtractElementInst
>::operands(this) && "setOperand() out of range!")
? static_cast<void> (0) : __assert_fail ("i_nocapture < OperandTraits<ExtractElementInst>::operands(this) && \"setOperand() out of range!\""
, "/build/llvm-toolchain-snapshot-13~++20210413100635+64c24f493e5f/llvm/include/llvm/IR/Instructions.h"
, 1905, __PRETTY_FUNCTION__)); OperandTraits<ExtractElementInst
>::op_begin(this)[i_nocapture] = Val_nocapture; } unsigned
ExtractElementInst::getNumOperands() const { return OperandTraits
<ExtractElementInst>::operands(this); } template <int
Idx_nocapture> Use &ExtractElementInst::Op() { return
this->OpFrom<Idx_nocapture>(this); } template <int
Idx_nocapture> const Use &ExtractElementInst::Op() const
{ return this->OpFrom<Idx_nocapture>(this); }
1906
1907//===----------------------------------------------------------------------===//
1908// InsertElementInst Class
1909//===----------------------------------------------------------------------===//
1910
1911/// This instruction inserts a single (scalar)
1912/// element into a VectorType value
1913///
1914class InsertElementInst : public Instruction {
1915 InsertElementInst(Value *Vec, Value *NewElt, Value *Idx,
1916 const Twine &NameStr = "",
1917 Instruction *InsertBefore = nullptr);
1918 InsertElementInst(Value *Vec, Value *NewElt, Value *Idx, const Twine &NameStr,
1919 BasicBlock *InsertAtEnd);
1920
1921protected:
1922 // Note: Instruction needs to be a friend here to call cloneImpl.
1923 friend class Instruction;
1924
1925 InsertElementInst *cloneImpl() const;
1926
1927public:
1928 static InsertElementInst *Create(Value *Vec, Value *NewElt, Value *Idx,
1929 const Twine &NameStr = "",
1930 Instruction *InsertBefore = nullptr) {
1931 return new(3) InsertElementInst(Vec, NewElt, Idx, NameStr, InsertBefore);
1932 }
1933
1934 static InsertElementInst *Create(Value *Vec, Value *NewElt, Value *Idx,
1935 const Twine &NameStr,
1936 BasicBlock *InsertAtEnd) {
1937 return new(3) InsertElementInst(Vec, NewElt, Idx, NameStr, InsertAtEnd);
1938 }
1939
1940 /// Return true if an insertelement instruction can be
1941 /// formed with the specified operands.
1942 static bool isValidOperands(const Value *Vec, const Value *NewElt,
1943 const Value *Idx);
1944
1945 /// Overload to return most specific vector type.
1946 ///
1947 VectorType *getType() const {
1948 return cast<VectorType>(Instruction::getType());
1949 }
1950
1951 /// Transparently provide more efficient getOperand methods.
1952 DECLARE_TRANSPARENT_OPERAND_ACCESSORS(Value)public: inline Value *getOperand(unsigned) const; inline void
setOperand(unsigned, Value*); inline op_iterator op_begin();
inline const_op_iterator op_begin() const; inline op_iterator
op_end(); inline const_op_iterator op_end() const; protected
: template <int> inline Use &Op(); template <int
> inline const Use &Op() const; public: inline unsigned
getNumOperands() const
;
1953
1954 // Methods for support type inquiry through isa, cast, and dyn_cast:
1955 static bool classof(const Instruction *I) {
1956 return I->getOpcode() == Instruction::InsertElement;
1957 }
1958 static bool classof(const Value *V) {
1959 return isa<Instruction>(V) && classof(cast<Instruction>(V));
1960 }
1961};
1962
1963template <>
1964struct OperandTraits<InsertElementInst> :
1965 public FixedNumOperandTraits<InsertElementInst, 3> {
1966};
1967
1968DEFINE_TRANSPARENT_OPERAND_ACCESSORS(InsertElementInst, Value)InsertElementInst::op_iterator InsertElementInst::op_begin() {
return OperandTraits<InsertElementInst>::op_begin(this
); } InsertElementInst::const_op_iterator InsertElementInst::
op_begin() const { return OperandTraits<InsertElementInst>
::op_begin(const_cast<InsertElementInst*>(this)); } InsertElementInst
::op_iterator InsertElementInst::op_end() { return OperandTraits
<InsertElementInst>::op_end(this); } InsertElementInst::
const_op_iterator InsertElementInst::op_end() const { return OperandTraits
<InsertElementInst>::op_end(const_cast<InsertElementInst
*>(this)); } Value *InsertElementInst::getOperand(unsigned
i_nocapture) const { ((i_nocapture < OperandTraits<InsertElementInst
>::operands(this) && "getOperand() out of range!")
? static_cast<void> (0) : __assert_fail ("i_nocapture < OperandTraits<InsertElementInst>::operands(this) && \"getOperand() out of range!\""
, "/build/llvm-toolchain-snapshot-13~++20210413100635+64c24f493e5f/llvm/include/llvm/IR/Instructions.h"
, 1968, __PRETTY_FUNCTION__)); return cast_or_null<Value>
( OperandTraits<InsertElementInst>::op_begin(const_cast
<InsertElementInst*>(this))[i_nocapture].get()); } void
InsertElementInst::setOperand(unsigned i_nocapture, Value *Val_nocapture
) { ((i_nocapture < OperandTraits<InsertElementInst>
::operands(this) && "setOperand() out of range!") ? static_cast
<void> (0) : __assert_fail ("i_nocapture < OperandTraits<InsertElementInst>::operands(this) && \"setOperand() out of range!\""
, "/build/llvm-toolchain-snapshot-13~++20210413100635+64c24f493e5f/llvm/include/llvm/IR/Instructions.h"
, 1968, __PRETTY_FUNCTION__)); OperandTraits<InsertElementInst
>::op_begin(this)[i_nocapture] = Val_nocapture; } unsigned
InsertElementInst::getNumOperands() const { return OperandTraits
<InsertElementInst>::operands(this); } template <int
Idx_nocapture> Use &InsertElementInst::Op() { return this
->OpFrom<Idx_nocapture>(this); } template <int Idx_nocapture
> const Use &InsertElementInst::Op() const { return this
->OpFrom<Idx_nocapture>(this); }
1969
1970//===----------------------------------------------------------------------===//
1971// ShuffleVectorInst Class
1972//===----------------------------------------------------------------------===//
1973
1974constexpr int UndefMaskElem = -1;
1975
1976/// This instruction constructs a fixed permutation of two
1977/// input vectors.
1978///
1979/// For each element of the result vector, the shuffle mask selects an element
1980/// from one of the input vectors to copy to the result. Non-negative elements
1981/// in the mask represent an index into the concatenated pair of input vectors.
1982/// UndefMaskElem (-1) specifies that the result element is undefined.
1983///
1984/// For scalable vectors, all the elements of the mask must be 0 or -1. This
1985/// requirement may be relaxed in the future.
1986class ShuffleVectorInst : public Instruction {
1987 SmallVector<int, 4> ShuffleMask;
1988 Constant *ShuffleMaskForBitcode;
1989
1990protected:
1991 // Note: Instruction needs to be a friend here to call cloneImpl.
1992 friend class Instruction;
1993
1994 ShuffleVectorInst *cloneImpl() const;
1995
1996public:
1997 ShuffleVectorInst(Value *V1, Value *V2, Value *Mask,
1998 const Twine &NameStr = "",
1999 Instruction *InsertBefor = nullptr);
2000 ShuffleVectorInst(Value *V1, Value *V2, Value *Mask,
2001 const Twine &NameStr, BasicBlock *InsertAtEnd);
2002 ShuffleVectorInst(Value *V1, Value *V2, ArrayRef<int> Mask,
2003 const Twine &NameStr = "",
2004 Instruction *InsertBefor = nullptr);
2005 ShuffleVectorInst(Value *V1, Value *V2, ArrayRef<int> Mask,
2006 const Twine &NameStr, BasicBlock *InsertAtEnd);
2007
2008 void *operator new(size_t s) { return User::operator new(s, 2); }
2009
2010 /// Swap the operands and adjust the mask to preserve the semantics
2011 /// of the instruction.
2012 void commute();
2013
2014 /// Return true if a shufflevector instruction can be
2015 /// formed with the specified operands.
2016 static bool isValidOperands(const Value *V1, const Value *V2,
2017 const Value *Mask);
2018 static bool isValidOperands(const Value *V1, const Value *V2,
2019 ArrayRef<int> Mask);
2020
2021 /// Overload to return most specific vector type.
2022 ///
2023 VectorType *getType() const {
2024 return cast<VectorType>(Instruction::getType());
2025 }
2026
2027 /// Transparently provide more efficient getOperand methods.
2028 DECLARE_TRANSPARENT_OPERAND_ACCESSORS(Value)public: inline Value *getOperand(unsigned) const; inline void
setOperand(unsigned, Value*); inline op_iterator op_begin();
inline const_op_iterator op_begin() const; inline op_iterator
op_end(); inline const_op_iterator op_end() const; protected
: template <int> inline Use &Op(); template <int
> inline const Use &Op() const; public: inline unsigned
getNumOperands() const
;
2029
2030 /// Return the shuffle mask value of this instruction for the given element
2031 /// index. Return UndefMaskElem if the element is undef.
2032 int getMaskValue(unsigned Elt) const { return ShuffleMask[Elt]; }
2033
2034 /// Convert the input shuffle mask operand to a vector of integers. Undefined
2035 /// elements of the mask are returned as UndefMaskElem.
2036 static void getShuffleMask(const Constant *Mask,
2037 SmallVectorImpl<int> &Result);
2038
2039 /// Return the mask for this instruction as a vector of integers. Undefined
2040 /// elements of the mask are returned as UndefMaskElem.
2041 void getShuffleMask(SmallVectorImpl<int> &Result) const {
2042 Result.assign(ShuffleMask.begin(), ShuffleMask.end());
2043 }
2044
2045 /// Return the mask for this instruction, for use in bitcode.
2046 ///
2047 /// TODO: This is temporary until we decide a new bitcode encoding for
2048 /// shufflevector.
2049 Constant *getShuffleMaskForBitcode() const { return ShuffleMaskForBitcode; }
2050
2051 static Constant *convertShuffleMaskForBitcode(ArrayRef<int> Mask,
2052 Type *ResultTy);
2053
2054 void setShuffleMask(ArrayRef<int> Mask);
2055
2056 ArrayRef<int> getShuffleMask() const { return ShuffleMask; }
2057
2058 /// Return true if this shuffle returns a vector with a different number of
2059 /// elements than its source vectors.
2060 /// Examples: shufflevector <4 x n> A, <4 x n> B, <1,2,3>
2061 /// shufflevector <4 x n> A, <4 x n> B, <1,2,3,4,5>
2062 bool changesLength() const {
2063 unsigned NumSourceElts = cast<VectorType>(Op<0>()->getType())
2064 ->getElementCount()
2065 .getKnownMinValue();
2066 unsigned NumMaskElts = ShuffleMask.size();
2067 return NumSourceElts != NumMaskElts;
2068 }
2069
2070 /// Return true if this shuffle returns a vector with a greater number of
2071 /// elements than its source vectors.
2072 /// Example: shufflevector <2 x n> A, <2 x n> B, <1,2,3>
2073 bool increasesLength() const {
2074 unsigned NumSourceElts = cast<VectorType>(Op<0>()->getType())
2075 ->getElementCount()
2076 .getKnownMinValue();
2077 unsigned NumMaskElts = ShuffleMask.size();
2078 return NumSourceElts < NumMaskElts;
2079 }
2080
2081 /// Return true if this shuffle mask chooses elements from exactly one source
2082 /// vector.
2083 /// Example: <7,5,undef,7>
2084 /// This assumes that vector operands are the same length as the mask.
2085 static bool isSingleSourceMask(ArrayRef<int> Mask);
2086 static bool isSingleSourceMask(const Constant *Mask) {
2087 assert(Mask->getType()->isVectorTy() && "Shuffle needs vector constant.")((Mask->getType()->isVectorTy() && "Shuffle needs vector constant."
) ? static_cast<void> (0) : __assert_fail ("Mask->getType()->isVectorTy() && \"Shuffle needs vector constant.\""
, "/build/llvm-toolchain-snapshot-13~++20210413100635+64c24f493e5f/llvm/include/llvm/IR/Instructions.h"
, 2087, __PRETTY_FUNCTION__))
;
2088 SmallVector<int, 16> MaskAsInts;
2089 getShuffleMask(Mask, MaskAsInts);
2090 return isSingleSourceMask(MaskAsInts);
2091 }
2092
2093 /// Return true if this shuffle chooses elements from exactly one source
2094 /// vector without changing the length of that vector.
2095 /// Example: shufflevector <4 x n> A, <4 x n> B, <3,0,undef,3>
2096 /// TODO: Optionally allow length-changing shuffles.
2097 bool isSingleSource() const {
2098 return !changesLength() && isSingleSourceMask(ShuffleMask);
2099 }
2100
2101 /// Return true if this shuffle mask chooses elements from exactly one source
2102 /// vector without lane crossings. A shuffle using this mask is not
2103 /// necessarily a no-op because it may change the number of elements from its
2104 /// input vectors or it may provide demanded bits knowledge via undef lanes.
2105 /// Example: <undef,undef,2,3>
2106 static bool isIdentityMask(ArrayRef<int> Mask);
2107 static bool isIdentityMask(const Constant *Mask) {
2108 assert(Mask->getType()->isVectorTy() && "Shuffle needs vector constant.")((Mask->getType()->isVectorTy() && "Shuffle needs vector constant."
) ? static_cast<void> (0) : __assert_fail ("Mask->getType()->isVectorTy() && \"Shuffle needs vector constant.\""
, "/build/llvm-toolchain-snapshot-13~++20210413100635+64c24f493e5f/llvm/include/llvm/IR/Instructions.h"
, 2108, __PRETTY_FUNCTION__))
;
2109 SmallVector<int, 16> MaskAsInts;
2110 getShuffleMask(Mask, MaskAsInts);
2111 return isIdentityMask(MaskAsInts);
2112 }
2113
2114 /// Return true if this shuffle chooses elements from exactly one source
2115 /// vector without lane crossings and does not change the number of elements
2116 /// from its input vectors.
2117 /// Example: shufflevector <4 x n> A, <4 x n> B, <4,undef,6,undef>
2118 bool isIdentity() const {
2119 return !changesLength() && isIdentityMask(ShuffleMask);
2120 }
2121
2122 /// Return true if this shuffle lengthens exactly one source vector with
2123 /// undefs in the high elements.
2124 bool isIdentityWithPadding() const;
2125
2126 /// Return true if this shuffle extracts the first N elements of exactly one
2127 /// source vector.
2128 bool isIdentityWithExtract() const;
2129
2130 /// Return true if this shuffle concatenates its 2 source vectors. This
2131 /// returns false if either input is undefined. In that case, the shuffle is
2132 /// is better classified as an identity with padding operation.
2133 bool isConcat() const;
2134
2135 /// Return true if this shuffle mask chooses elements from its source vectors
2136 /// without lane crossings. A shuffle using this mask would be
2137 /// equivalent to a vector select with a constant condition operand.
2138 /// Example: <4,1,6,undef>
2139 /// This returns false if the mask does not choose from both input vectors.
2140 /// In that case, the shuffle is better classified as an identity shuffle.
2141 /// This assumes that vector operands are the same length as the mask
2142 /// (a length-changing shuffle can never be equivalent to a vector select).
2143 static bool isSelectMask(ArrayRef<int> Mask);
2144 static bool isSelectMask(const Constant *Mask) {
2145 assert(Mask->getType()->isVectorTy() && "Shuffle needs vector constant.")((Mask->getType()->isVectorTy() && "Shuffle needs vector constant."
) ? static_cast<void> (0) : __assert_fail ("Mask->getType()->isVectorTy() && \"Shuffle needs vector constant.\""
, "/build/llvm-toolchain-snapshot-13~++20210413100635+64c24f493e5f/llvm/include/llvm/IR/Instructions.h"
, 2145, __PRETTY_FUNCTION__))
;
2146 SmallVector<int, 16> MaskAsInts;
2147 getShuffleMask(Mask, MaskAsInts);
2148 return isSelectMask(MaskAsInts);
2149 }
2150
2151 /// Return true if this shuffle chooses elements from its source vectors
2152 /// without lane crossings and all operands have the same number of elements.
2153 /// In other words, this shuffle is equivalent to a vector select with a
2154 /// constant condition operand.
2155 /// Example: shufflevector <4 x n> A, <4 x n> B, <undef,1,6,3>
2156 /// This returns false if the mask does not choose from both input vectors.
2157 /// In that case, the shuffle is better classified as an identity shuffle.
2158 /// TODO: Optionally allow length-changing shuffles.
2159 bool isSelect() const {
2160 return !changesLength() && isSelectMask(ShuffleMask);
2161 }
2162
2163 /// Return true if this shuffle mask swaps the order of elements from exactly
2164 /// one source vector.
2165 /// Example: <7,6,undef,4>
2166 /// This assumes that vector operands are the same length as the mask.
2167 static bool isReverseMask(ArrayRef<int> Mask);
2168 static bool isReverseMask(const Constant *Mask) {
2169 assert(Mask->getType()->isVectorTy() && "Shuffle needs vector constant.")((Mask->getType()->isVectorTy() && "Shuffle needs vector constant."
) ? static_cast<void> (0) : __assert_fail ("Mask->getType()->isVectorTy() && \"Shuffle needs vector constant.\""
, "/build/llvm-toolchain-snapshot-13~++20210413100635+64c24f493e5f/llvm/include/llvm/IR/Instructions.h"
, 2169, __PRETTY_FUNCTION__))
;
2170 SmallVector<int, 16> MaskAsInts;
2171 getShuffleMask(Mask, MaskAsInts);
2172 return isReverseMask(MaskAsInts);
2173 }
2174
2175 /// Return true if this shuffle swaps the order of elements from exactly
2176 /// one source vector.
2177 /// Example: shufflevector <4 x n> A, <4 x n> B, <3,undef,1,undef>
2178 /// TODO: Optionally allow length-changing shuffles.
2179 bool isReverse() const {
2180 return !changesLength() && isReverseMask(ShuffleMask);
2181 }
2182
2183 /// Return true if this shuffle mask chooses all elements with the same value
2184 /// as the first element of exactly one source vector.
2185 /// Example: <4,undef,undef,4>
2186 /// This assumes that vector operands are the same length as the mask.
2187 static bool isZeroEltSplatMask(ArrayRef<int> Mask);
2188 static bool isZeroEltSplatMask(const Constant *Mask) {
2189 assert(Mask->getType()->isVectorTy() && "Shuffle needs vector constant.")((Mask->getType()->isVectorTy() && "Shuffle needs vector constant."
) ? static_cast<void> (0) : __assert_fail ("Mask->getType()->isVectorTy() && \"Shuffle needs vector constant.\""
, "/build/llvm-toolchain-snapshot-13~++20210413100635+64c24f493e5f/llvm/include/llvm/IR/Instructions.h"
, 2189, __PRETTY_FUNCTION__))
;
2190 SmallVector<int, 16> MaskAsInts;
2191 getShuffleMask(Mask, MaskAsInts);
2192 return isZeroEltSplatMask(MaskAsInts);
2193 }
2194
2195 /// Return true if all elements of this shuffle are the same value as the
2196 /// first element of exactly one source vector without changing the length
2197 /// of that vector.
2198 /// Example: shufflevector <4 x n> A, <4 x n> B, <undef,0,undef,0>
2199 /// TODO: Optionally allow length-changing shuffles.
2200 /// TODO: Optionally allow splats from other elements.
2201 bool isZeroEltSplat() const {
2202 return !changesLength() && isZeroEltSplatMask(ShuffleMask);
2203 }
2204
2205 /// Return true if this shuffle mask is a transpose mask.
2206 /// Transpose vector masks transpose a 2xn matrix. They read corresponding
2207 /// even- or odd-numbered vector elements from two n-dimensional source
2208 /// vectors and write each result into consecutive elements of an
2209 /// n-dimensional destination vector. Two shuffles are necessary to complete
2210 /// the transpose, one for the even elements and another for the odd elements.
2211 /// This description closely follows how the TRN1 and TRN2 AArch64
2212 /// instructions operate.
2213 ///
2214 /// For example, a simple 2x2 matrix can be transposed with:
2215 ///
2216 /// ; Original matrix
2217 /// m0 = < a, b >
2218 /// m1 = < c, d >
2219 ///
2220 /// ; Transposed matrix
2221 /// t0 = < a, c > = shufflevector m0, m1, < 0, 2 >
2222 /// t1 = < b, d > = shufflevector m0, m1, < 1, 3 >
2223 ///
2224 /// For matrices having greater than n columns, the resulting nx2 transposed
2225 /// matrix is stored in two result vectors such that one vector contains
2226 /// interleaved elements from all the even-numbered rows and the other vector
2227 /// contains interleaved elements from all the odd-numbered rows. For example,
2228 /// a 2x4 matrix can be transposed with:
2229 ///
2230 /// ; Original matrix
2231 /// m0 = < a, b, c, d >
2232 /// m1 = < e, f, g, h >
2233 ///
2234 /// ; Transposed matrix
2235 /// t0 = < a, e, c, g > = shufflevector m0, m1 < 0, 4, 2, 6 >
2236 /// t1 = < b, f, d, h > = shufflevector m0, m1 < 1, 5, 3, 7 >
2237 static bool isTransposeMask(ArrayRef<int> Mask);
2238 static bool isTransposeMask(const Constant *Mask) {
2239 assert(Mask->getType()->isVectorTy() && "Shuffle needs vector constant.")((Mask->getType()->isVectorTy() && "Shuffle needs vector constant."
) ? static_cast<void> (0) : __assert_fail ("Mask->getType()->isVectorTy() && \"Shuffle needs vector constant.\""
, "/build/llvm-toolchain-snapshot-13~++20210413100635+64c24f493e5f/llvm/include/llvm/IR/Instructions.h"
, 2239, __PRETTY_FUNCTION__))
;
2240 SmallVector<int, 16> MaskAsInts;
2241 getShuffleMask(Mask, MaskAsInts);
2242 return isTransposeMask(MaskAsInts);
2243 }
2244
2245 /// Return true if this shuffle transposes the elements of its inputs without
2246 /// changing the length of the vectors. This operation may also be known as a
2247 /// merge or interleave. See the description for isTransposeMask() for the
2248 /// exact specification.
2249 /// Example: shufflevector <4 x n> A, <4 x n> B, <0,4,2,6>
2250 bool isTranspose() const {
2251 return !changesLength() && isTransposeMask(ShuffleMask);
2252 }
2253
2254 /// Return true if this shuffle mask is an extract subvector mask.
2255 /// A valid extract subvector mask returns a smaller vector from a single
2256 /// source operand. The base extraction index is returned as well.
2257 static bool isExtractSubvectorMask(ArrayRef<int> Mask, int NumSrcElts,
2258 int &Index);
2259 static bool isExtractSubvectorMask(const Constant *Mask, int NumSrcElts,
2260 int &Index) {
2261 assert(Mask->getType()->isVectorTy() && "Shuffle needs vector constant.")((Mask->getType()->isVectorTy() && "Shuffle needs vector constant."
) ? static_cast<void> (0) : __assert_fail ("Mask->getType()->isVectorTy() && \"Shuffle needs vector constant.\""
, "/build/llvm-toolchain-snapshot-13~++20210413100635+64c24f493e5f/llvm/include/llvm/IR/Instructions.h"
, 2261, __PRETTY_FUNCTION__))
;
2262 // Not possible to express a shuffle mask for a scalable vector for this
2263 // case.
2264 if (isa<ScalableVectorType>(Mask->getType()))
2265 return false;
2266 SmallVector<int, 16> MaskAsInts;
2267 getShuffleMask(Mask, MaskAsInts);
2268 return isExtractSubvectorMask(MaskAsInts, NumSrcElts, Index);
2269 }
2270
2271 /// Return true if this shuffle mask is an extract subvector mask.
2272 bool isExtractSubvectorMask(int &Index) const {
2273 // Not possible to express a shuffle mask for a scalable vector for this
2274 // case.
2275 if (isa<ScalableVectorType>(getType()))
2276 return false;
2277
2278 int NumSrcElts =
2279 cast<FixedVectorType>(Op<0>()->getType())->getNumElements();
2280 return isExtractSubvectorMask(ShuffleMask, NumSrcElts, Index);
2281 }
2282
2283 /// Change values in a shuffle permute mask assuming the two vector operands
2284 /// of length InVecNumElts have swapped position.
2285 static void commuteShuffleMask(MutableArrayRef<int> Mask,
2286 unsigned InVecNumElts) {
2287 for (int &Idx : Mask) {
2288 if (Idx == -1)
2289 continue;
2290 Idx = Idx < (int)InVecNumElts ? Idx + InVecNumElts : Idx - InVecNumElts;
2291 assert(Idx >= 0 && Idx < (int)InVecNumElts * 2 &&((Idx >= 0 && Idx < (int)InVecNumElts * 2 &&
"shufflevector mask index out of range") ? static_cast<void
> (0) : __assert_fail ("Idx >= 0 && Idx < (int)InVecNumElts * 2 && \"shufflevector mask index out of range\""
, "/build/llvm-toolchain-snapshot-13~++20210413100635+64c24f493e5f/llvm/include/llvm/IR/Instructions.h"
, 2292, __PRETTY_FUNCTION__))
2292 "shufflevector mask index out of range")((Idx >= 0 && Idx < (int)InVecNumElts * 2 &&
"shufflevector mask index out of range") ? static_cast<void
> (0) : __assert_fail ("Idx >= 0 && Idx < (int)InVecNumElts * 2 && \"shufflevector mask index out of range\""
, "/build/llvm-toolchain-snapshot-13~++20210413100635+64c24f493e5f/llvm/include/llvm/IR/Instructions.h"
, 2292, __PRETTY_FUNCTION__))
;
2293 }
2294 }
2295
2296 // Methods for support type inquiry through isa, cast, and dyn_cast:
2297 static bool classof(const Instruction *I) {
2298 return I->getOpcode() == Instruction::ShuffleVector;
2299 }
2300 static bool classof(const Value *V) {
2301 return isa<Instruction>(V) && classof(cast<Instruction>(V));
2302 }
2303};
2304
2305template <>
2306struct OperandTraits<ShuffleVectorInst>
2307 : public FixedNumOperandTraits<ShuffleVectorInst, 2> {};
2308
2309DEFINE_TRANSPARENT_OPERAND_ACCESSORS(ShuffleVectorInst, Value)ShuffleVectorInst::op_iterator ShuffleVectorInst::op_begin() {
return OperandTraits<ShuffleVectorInst>::op_begin(this
); } ShuffleVectorInst::const_op_iterator ShuffleVectorInst::
op_begin() const { return OperandTraits<ShuffleVectorInst>
::op_begin(const_cast<ShuffleVectorInst*>(this)); } ShuffleVectorInst
::op_iterator ShuffleVectorInst::op_end() { return OperandTraits
<ShuffleVectorInst>::op_end(this); } ShuffleVectorInst::
const_op_iterator ShuffleVectorInst::op_end() const { return OperandTraits
<ShuffleVectorInst>::op_end(const_cast<ShuffleVectorInst
*>(this)); } Value *ShuffleVectorInst::getOperand(unsigned
i_nocapture) const { ((i_nocapture < OperandTraits<ShuffleVectorInst
>::operands(this) && "getOperand() out of range!")
? static_cast<void> (0) : __assert_fail ("i_nocapture < OperandTraits<ShuffleVectorInst>::operands(this) && \"getOperand() out of range!\""
, "/build/llvm-toolchain-snapshot-13~++20210413100635+64c24f493e5f/llvm/include/llvm/IR/Instructions.h"
, 2309, __PRETTY_FUNCTION__)); return cast_or_null<Value>
( OperandTraits<ShuffleVectorInst>::op_begin(const_cast
<ShuffleVectorInst*>(this))[i_nocapture].get()); } void
ShuffleVectorInst::setOperand(unsigned i_nocapture, Value *Val_nocapture
) { ((i_nocapture < OperandTraits<ShuffleVectorInst>
::operands(this) && "setOperand() out of range!") ? static_cast
<void> (0) : __assert_fail ("i_nocapture < OperandTraits<ShuffleVectorInst>::operands(this) && \"setOperand() out of range!\""
, "/build/llvm-toolchain-snapshot-13~++20210413100635+64c24f493e5f/llvm/include/llvm/IR/Instructions.h"
, 2309, __PRETTY_FUNCTION__)); OperandTraits<ShuffleVectorInst
>::op_begin(this)[i_nocapture] = Val_nocapture; } unsigned
ShuffleVectorInst::getNumOperands() const { return OperandTraits
<ShuffleVectorInst>::operands(this); } template <int
Idx_nocapture> Use &ShuffleVectorInst::Op() { return this
->OpFrom<Idx_nocapture>(this); } template <int Idx_nocapture
> const Use &ShuffleVectorInst::Op() const { return this
->OpFrom<Idx_nocapture>(this); }
2310
2311//===----------------------------------------------------------------------===//
2312// ExtractValueInst Class
2313//===----------------------------------------------------------------------===//
2314
2315/// This instruction extracts a struct member or array
2316/// element value from an aggregate value.
2317///
2318class ExtractValueInst : public UnaryInstruction {
2319 SmallVector<unsigned, 4> Indices;
2320
2321 ExtractValueInst(const ExtractValueInst &EVI);
2322
2323 /// Constructors - Create a extractvalue instruction with a base aggregate
2324 /// value and a list of indices. The first ctor can optionally insert before
2325 /// an existing instruction, the second appends the new instruction to the
2326 /// specified BasicBlock.
2327 inline ExtractValueInst(Value *Agg,
2328 ArrayRef<unsigned> Idxs,
2329 const Twine &NameStr,
2330 Instruction *InsertBefore);
2331 inline ExtractValueInst(Value *Agg,
2332 ArrayRef<unsigned> Idxs,
2333 const Twine &NameStr, BasicBlock *InsertAtEnd);
2334
2335 void init(ArrayRef<unsigned> Idxs, const Twine &NameStr);
2336
2337protected:
2338 // Note: Instruction needs to be a friend here to call cloneImpl.
2339 friend class Instruction;
2340
2341 ExtractValueInst *cloneImpl() const;
2342
2343public:
2344 static ExtractValueInst *Create(Value *Agg,
2345 ArrayRef<unsigned> Idxs,
2346 const Twine &NameStr = "",
2347 Instruction *InsertBefore = nullptr) {
2348 return new
2349 ExtractValueInst(Agg, Idxs, NameStr, InsertBefore);
2350 }
2351
2352 static ExtractValueInst *Create(Value *Agg,
2353 ArrayRef<unsigned> Idxs,
2354 const Twine &NameStr,
2355 BasicBlock *InsertAtEnd) {
2356 return new ExtractValueInst(Agg, Idxs, NameStr, InsertAtEnd);
2357 }
2358
2359 /// Returns the type of the element that would be extracted
2360 /// with an extractvalue instruction with the specified parameters.
2361 ///
2362 /// Null is returned if the indices are invalid for the specified type.
2363 static Type *getIndexedType(Type *Agg, ArrayRef<unsigned> Idxs);
2364
2365 using idx_iterator = const unsigned*;
2366
2367 inline idx_iterator idx_begin() const { return Indices.begin(); }
2368 inline idx_iterator idx_end() const { return Indices.end(); }
2369 inline iterator_range<idx_iterator> indices() const {
2370 return make_range(idx_begin(), idx_end());
2371 }
2372
2373 Value *getAggregateOperand() {
2374 return getOperand(0);
2375 }
2376 const Value *getAggregateOperand() const {
2377 return getOperand(0);
2378 }
2379 static unsigned getAggregateOperandIndex() {
2380 return 0U; // get index for modifying correct operand
2381 }
2382
2383 ArrayRef<unsigned> getIndices() const {
2384 return Indices;
2385 }
2386
2387 unsigned getNumIndices() const {
2388 return (unsigned)Indices.size();
2389 }
2390
2391 bool hasIndices() const {
2392 return true;
2393 }
2394
2395 // Methods for support type inquiry through isa, cast, and dyn_cast:
2396 static bool classof(const Instruction *I) {
2397 return I->getOpcode() == Instruction::ExtractValue;
2398 }
2399 static bool classof(const Value *V) {
2400 return isa<Instruction>(V) && classof(cast<Instruction>(V));
2401 }
2402};
2403
2404ExtractValueInst::ExtractValueInst(Value *Agg,
2405 ArrayRef<unsigned> Idxs,
2406 const Twine &NameStr,
2407 Instruction *InsertBefore)
2408 : UnaryInstruction(checkGEPType(getIndexedType(Agg->getType(), Idxs)),
2409 ExtractValue, Agg, InsertBefore) {
2410 init(Idxs, NameStr);
2411}
2412
2413ExtractValueInst::ExtractValueInst(Value *Agg,
2414 ArrayRef<unsigned> Idxs,
2415 const Twine &NameStr,
2416 BasicBlock *InsertAtEnd)
2417 : UnaryInstruction(checkGEPType(getIndexedType(Agg->getType(), Idxs)),
2418 ExtractValue, Agg, InsertAtEnd) {
2419 init(Idxs, NameStr);
2420}
2421
2422//===----------------------------------------------------------------------===//
2423// InsertValueInst Class
2424//===----------------------------------------------------------------------===//
2425
2426/// This instruction inserts a struct field of array element
2427/// value into an aggregate value.
2428///
2429class InsertValueInst : public Instruction {
2430 SmallVector<unsigned, 4> Indices;
2431
2432 InsertValueInst(const InsertValueInst &IVI);
2433
2434 /// Constructors - Create a insertvalue instruction with a base aggregate
2435 /// value, a value to insert, and a list of indices. The first ctor can
2436 /// optionally insert before an existing instruction, the second appends
2437 /// the new instruction to the specified BasicBlock.
2438 inline InsertValueInst(Value *Agg, Value *Val,
2439 ArrayRef<unsigned> Idxs,
2440 const Twine &NameStr,
2441 Instruction *InsertBefore);
2442 inline InsertValueInst(Value *Agg, Value *Val,
2443 ArrayRef<unsigned> Idxs,
2444 const Twine &NameStr, BasicBlock *InsertAtEnd);
2445
2446 /// Constructors - These two constructors are convenience methods because one
2447 /// and two index insertvalue instructions are so common.
2448 InsertValueInst(Value *Agg, Value *Val, unsigned Idx,
2449 const Twine &NameStr = "",
2450 Instruction *InsertBefore = nullptr);
2451 InsertValueInst(Value *Agg, Value *Val, unsigned Idx, const Twine &NameStr,
2452 BasicBlock *InsertAtEnd);
2453
2454 void init(Value *Agg, Value *Val, ArrayRef<unsigned> Idxs,
2455 const Twine &NameStr);
2456
2457protected:
2458 // Note: Instruction needs to be a friend here to call cloneImpl.
2459 friend class Instruction;
2460
2461 InsertValueInst *cloneImpl() const;
2462
2463public:
2464 // allocate space for exactly two operands
2465 void *operator new(size_t s) {
2466 return User::operator new(s, 2);
2467 }
2468
2469 static InsertValueInst *Create(Value *Agg, Value *Val,
2470 ArrayRef<unsigned> Idxs,
2471 const Twine &NameStr = "",
2472 Instruction *InsertBefore = nullptr) {
2473 return new InsertValueInst(Agg, Val, Idxs, NameStr, InsertBefore);
2474 }
2475
2476 static InsertValueInst *Create(Value *Agg, Value *Val,
2477 ArrayRef<unsigned> Idxs,
2478 const Twine &NameStr,
2479 BasicBlock *InsertAtEnd) {
2480 return new InsertValueInst(Agg, Val, Idxs, NameStr, InsertAtEnd);
2481 }
2482
2483 /// Transparently provide more efficient getOperand methods.
2484 DECLARE_TRANSPARENT_OPERAND_ACCESSORS(Value)public: inline Value *getOperand(unsigned) const; inline void
setOperand(unsigned, Value*); inline op_iterator op_begin();
inline const_op_iterator op_begin() const; inline op_iterator
op_end(); inline const_op_iterator op_end() const; protected
: template <int> inline Use &Op(); template <int
> inline const Use &Op() const; public: inline unsigned
getNumOperands() const
;
2485
2486 using idx_iterator = const unsigned*;
2487
2488 inline idx_iterator idx_begin() const { return Indices.begin(); }
2489 inline idx_iterator idx_end() const { return Indices.end(); }
2490 inline iterator_range<idx_iterator> indices() const {
2491 return make_range(idx_begin(), idx_end());
2492 }
2493
2494 Value *getAggregateOperand() {
2495 return getOperand(0);
2496 }
2497 const Value *getAggregateOperand() const {
2498 return getOperand(0);
2499 }
2500 static unsigned getAggregateOperandIndex() {
2501 return 0U; // get index for modifying correct operand
2502 }
2503
2504 Value *getInsertedValueOperand() {
2505 return getOperand(1);
2506 }
2507 const Value *getInsertedValueOperand() const {
2508 return getOperand(1);
2509 }
2510 static unsigned getInsertedValueOperandIndex() {
2511 return 1U; // get index for modifying correct operand
2512 }
2513
2514 ArrayRef<unsigned> getIndices() const {
2515 return Indices;
2516 }
2517
2518 unsigned getNumIndices() const {
2519 return (unsigned)Indices.size();
2520 }
2521
2522 bool hasIndices() const {
2523 return true;
2524 }
2525
2526 // Methods for support type inquiry through isa, cast, and dyn_cast:
2527 static bool classof(const Instruction *I) {
2528 return I->getOpcode() == Instruction::InsertValue;
2529 }
2530 static bool classof(const Value *V) {
2531 return isa<Instruction>(V) && classof(cast<Instruction>(V));
2532 }
2533};
2534
2535template <>
2536struct OperandTraits<InsertValueInst> :
2537 public FixedNumOperandTraits<InsertValueInst, 2> {
2538};
2539
2540InsertValueInst::InsertValueInst(Value *Agg,
2541 Value *Val,
2542 ArrayRef<unsigned> Idxs,
2543 const Twine &NameStr,
2544 Instruction *InsertBefore)
2545 : Instruction(Agg->getType(), InsertValue,
2546 OperandTraits<InsertValueInst>::op_begin(this),
2547 2, InsertBefore) {
2548 init(Agg, Val, Idxs, NameStr);
2549}
2550
2551InsertValueInst::InsertValueInst(Value *Agg,
2552 Value *Val,
2553 ArrayRef<unsigned> Idxs,
2554 const Twine &NameStr,
2555 BasicBlock *InsertAtEnd)
2556 : Instruction(Agg->getType(), InsertValue,
2557 OperandTraits<InsertValueInst>::op_begin(this),
2558 2, InsertAtEnd) {
2559 init(Agg, Val, Idxs, NameStr);
2560}
2561
2562DEFINE_TRANSPARENT_OPERAND_ACCESSORS(InsertValueInst, Value)InsertValueInst::op_iterator InsertValueInst::op_begin() { return
OperandTraits<InsertValueInst>::op_begin(this); } InsertValueInst
::const_op_iterator InsertValueInst::op_begin() const { return
OperandTraits<InsertValueInst>::op_begin(const_cast<
InsertValueInst*>(this)); } InsertValueInst::op_iterator InsertValueInst
::op_end() { return OperandTraits<InsertValueInst>::op_end
(this); } InsertValueInst::const_op_iterator InsertValueInst::
op_end() const { return OperandTraits<InsertValueInst>::
op_end(const_cast<InsertValueInst*>(this)); } Value *InsertValueInst
::getOperand(unsigned i_nocapture) const { ((i_nocapture <
OperandTraits<InsertValueInst>::operands(this) &&
"getOperand() out of range!") ? static_cast<void> (0) :
__assert_fail ("i_nocapture < OperandTraits<InsertValueInst>::operands(this) && \"getOperand() out of range!\""
, "/build/llvm-toolchain-snapshot-13~++20210413100635+64c24f493e5f/llvm/include/llvm/IR/Instructions.h"
, 2562, __PRETTY_FUNCTION__)); return cast_or_null<Value>
( OperandTraits<InsertValueInst>::op_begin(const_cast<
InsertValueInst*>(this))[i_nocapture].get()); } void InsertValueInst
::setOperand(unsigned i_nocapture, Value *Val_nocapture) { ((
i_nocapture < OperandTraits<InsertValueInst>::operands
(this) && "setOperand() out of range!") ? static_cast
<void> (0) : __assert_fail ("i_nocapture < OperandTraits<InsertValueInst>::operands(this) && \"setOperand() out of range!\""
, "/build/llvm-toolchain-snapshot-13~++20210413100635+64c24f493e5f/llvm/include/llvm/IR/Instructions.h"
, 2562, __PRETTY_FUNCTION__)); OperandTraits<InsertValueInst
>::op_begin(this)[i_nocapture] = Val_nocapture; } unsigned
InsertValueInst::getNumOperands() const { return OperandTraits
<InsertValueInst>::operands(this); } template <int Idx_nocapture
> Use &InsertValueInst::Op() { return this->OpFrom<
Idx_nocapture>(this); } template <int Idx_nocapture>
const Use &InsertValueInst::Op() const { return this->
OpFrom<Idx_nocapture>(this); }
2563
2564//===----------------------------------------------------------------------===//
2565// PHINode Class
2566//===----------------------------------------------------------------------===//
2567
2568// PHINode - The PHINode class is used to represent the magical mystical PHI
2569// node, that can not exist in nature, but can be synthesized in a computer
2570// scientist's overactive imagination.
2571//
2572class PHINode : public Instruction {
2573 /// The number of operands actually allocated. NumOperands is
2574 /// the number actually in use.
2575 unsigned ReservedSpace;
2576
2577 PHINode(const PHINode &PN);
2578
2579 explicit PHINode(Type *Ty, unsigned NumReservedValues,
2580 const Twine &NameStr = "",
2581 Instruction *InsertBefore = nullptr)
2582 : Instruction(Ty, Instruction::PHI, nullptr, 0, InsertBefore),
2583 ReservedSpace(NumReservedValues) {
2584 setName(NameStr);
2585 allocHungoffUses(ReservedSpace);
2586 }
2587
2588 PHINode(Type *Ty, unsigned NumReservedValues, const Twine &NameStr,
2589 BasicBlock *InsertAtEnd)
2590 : Instruction(Ty, Instruction::PHI, nullptr, 0, InsertAtEnd),
2591 ReservedSpace(NumReservedValues) {
2592 setName(NameStr);
2593 allocHungoffUses(ReservedSpace);
2594 }
2595
2596protected:
2597 // Note: Instruction needs to be a friend here to call cloneImpl.
2598 friend class Instruction;
2599
2600 PHINode *cloneImpl() const;
2601
2602 // allocHungoffUses - this is more complicated than the generic
2603 // User::allocHungoffUses, because we have to allocate Uses for the incoming
2604 // values and pointers to the incoming blocks, all in one allocation.
2605 void allocHungoffUses(unsigned N) {
2606 User::allocHungoffUses(N, /* IsPhi */ true);
2607 }
2608
2609public:
2610 /// Constructors - NumReservedValues is a hint for the number of incoming
2611 /// edges that this phi node will have (use 0 if you really have no idea).
2612 static PHINode *Create(Type *Ty, unsigned NumReservedValues,
2613 const Twine &NameStr = "",
2614 Instruction *InsertBefore = nullptr) {
2615 return new PHINode(Ty, NumReservedValues, NameStr, InsertBefore);
2616 }
2617
2618 static PHINode *Create(Type *Ty, unsigned NumReservedValues,
2619 const Twine &NameStr, BasicBlock *InsertAtEnd) {
2620 return new PHINode(Ty, NumReservedValues, NameStr, InsertAtEnd);
2621 }
2622
2623 /// Provide fast operand accessors
2624 DECLARE_TRANSPARENT_OPERAND_ACCESSORS(Value)public: inline Value *getOperand(unsigned) const; inline void
setOperand(unsigned, Value*); inline op_iterator op_begin();
inline const_op_iterator op_begin() const; inline op_iterator
op_end(); inline const_op_iterator op_end() const; protected
: template <int> inline Use &Op(); template <int
> inline const Use &Op() const; public: inline unsigned
getNumOperands() const
;
2625
2626 // Block iterator interface. This provides access to the list of incoming
2627 // basic blocks, which parallels the list of incoming values.
2628
2629 using block_iterator = BasicBlock **;
2630 using const_block_iterator = BasicBlock * const *;
2631
2632 block_iterator block_begin() {
2633 return reinterpret_cast<block_iterator>(op_begin() + ReservedSpace);
2634 }
2635
2636 const_block_iterator block_begin() const {
2637 return reinterpret_cast<const_block_iterator>(op_begin() + ReservedSpace);
2638 }
2639
2640 block_iterator block_end() {
2641 return block_begin() + getNumOperands();
2642 }
2643
2644 const_block_iterator block_end() const {
2645 return block_begin() + getNumOperands();
2646 }
2647
2648 iterator_range<block_iterator> blocks() {
2649 return make_range(block_begin(), block_end());
2650 }
2651
2652 iterator_range<const_block_iterator> blocks() const {
2653 return make_range(block_begin(), block_end());
2654 }
2655
2656 op_range incoming_values() { return operands(); }
2657
2658 const_op_range incoming_values() const { return operands(); }
2659
2660 /// Return the number of incoming edges
2661 ///
2662 unsigned getNumIncomingValues() const { return getNumOperands(); }
2663
2664 /// Return incoming value number x
2665 ///
2666 Value *getIncomingValue(unsigned i) const {
2667 return getOperand(i);
2668 }
2669 void setIncomingValue(unsigned i, Value *V) {
2670 assert(V && "PHI node got a null value!")((V && "PHI node got a null value!") ? static_cast<
void> (0) : __assert_fail ("V && \"PHI node got a null value!\""
, "/build/llvm-toolchain-snapshot-13~++20210413100635+64c24f493e5f/llvm/include/llvm/IR/Instructions.h"
, 2670, __PRETTY_FUNCTION__))
;
2671 assert(getType() == V->getType() &&((getType() == V->getType() && "All operands to PHI node must be the same type as the PHI node!"
) ? static_cast<void> (0) : __assert_fail ("getType() == V->getType() && \"All operands to PHI node must be the same type as the PHI node!\""
, "/build/llvm-toolchain-snapshot-13~++20210413100635+64c24f493e5f/llvm/include/llvm/IR/Instructions.h"
, 2672, __PRETTY_FUNCTION__))
2672 "All operands to PHI node must be the same type as the PHI node!")((getType() == V->getType() && "All operands to PHI node must be the same type as the PHI node!"
) ? static_cast<void> (0) : __assert_fail ("getType() == V->getType() && \"All operands to PHI node must be the same type as the PHI node!\""
, "/build/llvm-toolchain-snapshot-13~++20210413100635+64c24f493e5f/llvm/include/llvm/IR/Instructions.h"
, 2672, __PRETTY_FUNCTION__))
;
2673 setOperand(i, V);
2674 }
2675
2676 static unsigned getOperandNumForIncomingValue(unsigned i) {
2677 return i;
2678 }
2679
2680 static unsigned getIncomingValueNumForOperand(unsigned i) {
2681 return i;
2682 }
2683
2684 /// Return incoming basic block number @p i.
2685 ///
2686 BasicBlock *getIncomingBlock(unsigned i) const {
2687 return block_begin()[i];
2688 }
2689
2690 /// Return incoming basic block corresponding
2691 /// to an operand of the PHI.
2692 ///
2693 BasicBlock *getIncomingBlock(const Use &U) const {
2694 assert(this == U.getUser() && "Iterator doesn't point to PHI's Uses?")((this == U.getUser() && "Iterator doesn't point to PHI's Uses?"
) ? static_cast<void> (0) : __assert_fail ("this == U.getUser() && \"Iterator doesn't point to PHI's Uses?\""
, "/build/llvm-toolchain-snapshot-13~++20210413100635+64c24f493e5f/llvm/include/llvm/IR/Instructions.h"
, 2694, __PRETTY_FUNCTION__))
;
2695 return getIncomingBlock(unsigned(&U - op_begin()));
2696 }
2697
2698 /// Return incoming basic block corresponding
2699 /// to value use iterator.
2700 ///
2701 BasicBlock *getIncomingBlock(Value::const_user_iterator I) const {
2702 return getIncomingBlock(I.getUse());
2703 }
2704
2705 void setIncomingBlock(unsigned i, BasicBlock *BB) {
2706 assert(BB && "PHI node got a null basic block!")((BB && "PHI node got a null basic block!") ? static_cast
<void> (0) : __assert_fail ("BB && \"PHI node got a null basic block!\""
, "/build/llvm-toolchain-snapshot-13~++20210413100635+64c24f493e5f/llvm/include/llvm/IR/Instructions.h"
, 2706, __PRETTY_FUNCTION__))
;
2707 block_begin()[i] = BB;
2708 }
2709
2710 /// Replace every incoming basic block \p Old to basic block \p New.
2711 void replaceIncomingBlockWith(const BasicBlock *Old, BasicBlock *New) {
2712 assert(New && Old && "PHI node got a null basic block!")((New && Old && "PHI node got a null basic block!"
) ? static_cast<void> (0) : __assert_fail ("New && Old && \"PHI node got a null basic block!\""
, "/build/llvm-toolchain-snapshot-13~++20210413100635+64c24f493e5f/llvm/include/llvm/IR/Instructions.h"
, 2712, __PRETTY_FUNCTION__))
;
2713 for (unsigned Op = 0, NumOps = getNumOperands(); Op != NumOps; ++Op)
2714 if (getIncomingBlock(Op) == Old)
2715 setIncomingBlock(Op, New);
2716 }
2717
2718 /// Add an incoming value to the end of the PHI list
2719 ///
2720 void addIncoming(Value *V, BasicBlock *BB) {
2721 if (getNumOperands() == ReservedSpace)
2722 growOperands(); // Get more space!
2723 // Initialize some new operands.
2724 setNumHungOffUseOperands(getNumOperands() + 1);
2725 setIncomingValue(getNumOperands() - 1, V);
2726 setIncomingBlock(getNumOperands() - 1, BB);
2727 }
2728
2729 /// Remove an incoming value. This is useful if a
2730 /// predecessor basic block is deleted. The value removed is returned.
2731 ///
2732 /// If the last incoming value for a PHI node is removed (and DeletePHIIfEmpty
2733 /// is true), the PHI node is destroyed and any uses of it are replaced with
2734 /// dummy values. The only time there should be zero incoming values to a PHI
2735 /// node is when the block is dead, so this strategy is sound.
2736 ///
2737 Value *removeIncomingValue(unsigned Idx, bool DeletePHIIfEmpty = true);
2738
2739 Value *removeIncomingValue(const BasicBlock *BB, bool DeletePHIIfEmpty=true) {
2740 int Idx = getBasicBlockIndex(BB);
2741 assert(Idx >= 0 && "Invalid basic block argument to remove!")((Idx >= 0 && "Invalid basic block argument to remove!"
) ? static_cast<void> (0) : __assert_fail ("Idx >= 0 && \"Invalid basic block argument to remove!\""
, "/build/llvm-toolchain-snapshot-13~++20210413100635+64c24f493e5f/llvm/include/llvm/IR/Instructions.h"
, 2741, __PRETTY_FUNCTION__))
;
2742 return removeIncomingValue(Idx, DeletePHIIfEmpty);
2743 }
2744
2745 /// Return the first index of the specified basic
2746 /// block in the value list for this PHI. Returns -1 if no instance.
2747 ///
2748 int getBasicBlockIndex(const BasicBlock *BB) const {
2749 for (unsigned i = 0, e = getNumOperands(); i != e; ++i)
2750 if (block_begin()[i] == BB)
2751 return i;
2752 return -1;
2753 }
2754
2755 Value *getIncomingValueForBlock(const BasicBlock *BB) const {
2756 int Idx = getBasicBlockIndex(BB);
2757 assert(Idx >= 0 && "Invalid basic block argument!")((Idx >= 0 && "Invalid basic block argument!") ? static_cast
<void> (0) : __assert_fail ("Idx >= 0 && \"Invalid basic block argument!\""
, "/build/llvm-toolchain-snapshot-13~++20210413100635+64c24f493e5f/llvm/include/llvm/IR/Instructions.h"
, 2757, __PRETTY_FUNCTION__))
;
2758 return getIncomingValue(Idx);
2759 }
2760
2761 /// Set every incoming value(s) for block \p BB to \p V.
2762 void setIncomingValueForBlock(const BasicBlock *BB, Value *V) {
2763 assert(BB && "PHI node got a null basic block!")((BB && "PHI node got a null basic block!") ? static_cast
<void> (0) : __assert_fail ("BB && \"PHI node got a null basic block!\""
, "/build/llvm-toolchain-snapshot-13~++20210413100635+64c24f493e5f/llvm/include/llvm/IR/Instructions.h"
, 2763, __PRETTY_FUNCTION__))
;
2764 bool Found = false;
2765 for (unsigned Op = 0, NumOps = getNumOperands(); Op != NumOps; ++Op)
2766 if (getIncomingBlock(Op) == BB) {
2767 Found = true;
2768 setIncomingValue(Op, V);
2769 }
2770 (void)Found;
2771 assert(Found && "Invalid basic block argument to set!")((Found && "Invalid basic block argument to set!") ? static_cast
<void> (0) : __assert_fail ("Found && \"Invalid basic block argument to set!\""
, "/build/llvm-toolchain-snapshot-13~++20210413100635+64c24f493e5f/llvm/include/llvm/IR/Instructions.h"
, 2771, __PRETTY_FUNCTION__))
;
2772 }
2773
2774 /// If the specified PHI node always merges together the
2775 /// same value, return the value, otherwise return null.
2776 Value *hasConstantValue() const;
2777
2778 /// Whether the specified PHI node always merges
2779 /// together the same value, assuming undefs are equal to a unique
2780 /// non-undef value.
2781 bool hasConstantOrUndefValue() const;
2782
2783 /// If the PHI node is complete which means all of its parent's predecessors
2784 /// have incoming value in this PHI, return true, otherwise return false.
2785 bool isComplete() const {
2786 return llvm::all_of(predecessors(getParent()),
2787 [this](const BasicBlock *Pred) {
2788 return getBasicBlockIndex(Pred) >= 0;
2789 });
2790 }
2791
2792 /// Methods for support type inquiry through isa, cast, and dyn_cast:
2793 static bool classof(const Instruction *I) {
2794 return I->getOpcode() == Instruction::PHI;
2795 }
2796 static bool classof(const Value *V) {
2797 return isa<Instruction>(V) && classof(cast<Instruction>(V));
2798 }
2799
2800private:
2801 void growOperands();
2802};
2803
2804template <>
2805struct OperandTraits<PHINode> : public HungoffOperandTraits<2> {
2806};
2807
2808DEFINE_TRANSPARENT_OPERAND_ACCESSORS(PHINode, Value)PHINode::op_iterator PHINode::op_begin() { return OperandTraits
<PHINode>::op_begin(this); } PHINode::const_op_iterator
PHINode::op_begin() const { return OperandTraits<PHINode>
::op_begin(const_cast<PHINode*>(this)); } PHINode::op_iterator
PHINode::op_end() { return OperandTraits<PHINode>::op_end
(this); } PHINode::const_op_iterator PHINode::op_end() const {
return OperandTraits<PHINode>::op_end(const_cast<PHINode
*>(this)); } Value *PHINode::getOperand(unsigned i_nocapture
) const { ((i_nocapture < OperandTraits<PHINode>::operands
(this) && "getOperand() out of range!") ? static_cast
<void> (0) : __assert_fail ("i_nocapture < OperandTraits<PHINode>::operands(this) && \"getOperand() out of range!\""
, "/build/llvm-toolchain-snapshot-13~++20210413100635+64c24f493e5f/llvm/include/llvm/IR/Instructions.h"
, 2808, __PRETTY_FUNCTION__)); return cast_or_null<Value>
( OperandTraits<PHINode>::op_begin(const_cast<PHINode
*>(this))[i_nocapture].get()); } void PHINode::setOperand(
unsigned i_nocapture, Value *Val_nocapture) { ((i_nocapture <
OperandTraits<PHINode>::operands(this) && "setOperand() out of range!"
) ? static_cast<void> (0) : __assert_fail ("i_nocapture < OperandTraits<PHINode>::operands(this) && \"setOperand() out of range!\""
, "/build/llvm-toolchain-snapshot-13~++20210413100635+64c24f493e5f/llvm/include/llvm/IR/Instructions.h"
, 2808, __PRETTY_FUNCTION__)); OperandTraits<PHINode>::
op_begin(this)[i_nocapture] = Val_nocapture; } unsigned PHINode
::getNumOperands() const { return OperandTraits<PHINode>
::operands(this); } template <int Idx_nocapture> Use &
PHINode::Op() { return this->OpFrom<Idx_nocapture>(this
); } template <int Idx_nocapture> const Use &PHINode
::Op() const { return this->OpFrom<Idx_nocapture>(this
); }
2809
2810//===----------------------------------------------------------------------===//
2811// LandingPadInst Class
2812//===----------------------------------------------------------------------===//
2813
2814//===---------------------------------------------------------------------------
2815/// The landingpad instruction holds all of the information
2816/// necessary to generate correct exception handling. The landingpad instruction
2817/// cannot be moved from the top of a landing pad block, which itself is
2818/// accessible only from the 'unwind' edge of an invoke. This uses the
2819/// SubclassData field in Value to store whether or not the landingpad is a
2820/// cleanup.
2821///
2822class LandingPadInst : public Instruction {
2823 using CleanupField = BoolBitfieldElementT<0>;
2824
2825 /// The number of operands actually allocated. NumOperands is
2826 /// the number actually in use.
2827 unsigned ReservedSpace;
2828
2829 LandingPadInst(const LandingPadInst &LP);
2830
2831public:
2832 enum ClauseType { Catch, Filter };
2833
2834private:
2835 explicit LandingPadInst(Type *RetTy, unsigned NumReservedValues,
2836 const Twine &NameStr, Instruction *InsertBefore);
2837 explicit LandingPadInst(Type *RetTy, unsigned NumReservedValues,
2838 const Twine &NameStr, BasicBlock *InsertAtEnd);
2839
2840 // Allocate space for exactly zero operands.
2841 void *operator new(size_t s) {
2842 return User::operator new(s);
2843 }
2844
2845 void growOperands(unsigned Size);
2846 void init(unsigned NumReservedValues, const Twine &NameStr);
2847
2848protected:
2849 // Note: Instruction needs to be a friend here to call cloneImpl.
2850 friend class Instruction;
2851
2852 LandingPadInst *cloneImpl() const;
2853
2854public:
2855 /// Constructors - NumReservedClauses is a hint for the number of incoming
2856 /// clauses that this landingpad will have (use 0 if you really have no idea).
2857 static LandingPadInst *Create(Type *RetTy, unsigned NumReservedClauses,
2858 const Twine &NameStr = "",
2859 Instruction *InsertBefore = nullptr);
2860 static LandingPadInst *Create(Type *RetTy, unsigned NumReservedClauses,
2861 const Twine &NameStr, BasicBlock *InsertAtEnd);
2862
2863 /// Provide fast operand accessors
2864 DECLARE_TRANSPARENT_OPERAND_ACCESSORS(Value)public: inline Value *getOperand(unsigned) const; inline void
setOperand(unsigned, Value*); inline op_iterator op_begin();
inline const_op_iterator op_begin() const; inline op_iterator
op_end(); inline const_op_iterator op_end() const; protected
: template <int> inline Use &Op(); template <int
> inline const Use &Op() const; public: inline unsigned
getNumOperands() const
;
2865
2866 /// Return 'true' if this landingpad instruction is a
2867 /// cleanup. I.e., it should be run when unwinding even if its landing pad
2868 /// doesn't catch the exception.
2869 bool isCleanup() const { return getSubclassData<CleanupField>(); }
2870
2871 /// Indicate that this landingpad instruction is a cleanup.
2872 void setCleanup(bool V) { setSubclassData<CleanupField>(V); }
2873
2874 /// Add a catch or filter clause to the landing pad.
2875 void addClause(Constant *ClauseVal);
2876
2877 /// Get the value of the clause at index Idx. Use isCatch/isFilter to
2878 /// determine what type of clause this is.
2879 Constant *getClause(unsigned Idx) const {
2880 return cast<Constant>(getOperandList()[Idx]);
2881 }
2882
2883 /// Return 'true' if the clause and index Idx is a catch clause.
2884 bool isCatch(unsigned Idx) const {
2885 return !isa<ArrayType>(getOperandList()[Idx]->getType());
2886 }
2887
2888 /// Return 'true' if the clause and index Idx is a filter clause.
2889 bool isFilter(unsigned Idx) const {
2890 return isa<ArrayType>(getOperandList()[Idx]->getType());
2891 }
2892
2893 /// Get the number of clauses for this landing pad.
2894 unsigned getNumClauses() const { return getNumOperands(); }
2895
2896 /// Grow the size of the operand list to accommodate the new
2897 /// number of clauses.
2898 void reserveClauses(unsigned Size) { growOperands(Size); }
2899
2900 // Methods for support type inquiry through isa, cast, and dyn_cast:
2901 static bool classof(const Instruction *I) {
2902 return I->getOpcode() == Instruction::LandingPad;
2903 }
2904 static bool classof(const Value *V) {
2905 return isa<Instruction>(V) && classof(cast<Instruction>(V));
2906 }
2907};
2908
2909template <>
2910struct OperandTraits<LandingPadInst> : public HungoffOperandTraits<1> {
2911};
2912
2913DEFINE_TRANSPARENT_OPERAND_ACCESSORS(LandingPadInst, Value)LandingPadInst::op_iterator LandingPadInst::op_begin() { return
OperandTraits<LandingPadInst>::op_begin(this); } LandingPadInst
::const_op_iterator LandingPadInst::op_begin() const { return
OperandTraits<LandingPadInst>::op_begin(const_cast<
LandingPadInst*>(this)); } LandingPadInst::op_iterator LandingPadInst
::op_end() { return OperandTraits<LandingPadInst>::op_end
(this); } LandingPadInst::const_op_iterator LandingPadInst::op_end
() const { return OperandTraits<LandingPadInst>::op_end
(const_cast<LandingPadInst*>(this)); } Value *LandingPadInst
::getOperand(unsigned i_nocapture) const { ((i_nocapture <
OperandTraits<LandingPadInst>::operands(this) &&
"getOperand() out of range!") ? static_cast<void> (0) :
__assert_fail ("i_nocapture < OperandTraits<LandingPadInst>::operands(this) && \"getOperand() out of range!\""
, "/build/llvm-toolchain-snapshot-13~++20210413100635+64c24f493e5f/llvm/include/llvm/IR/Instructions.h"
, 2913, __PRETTY_FUNCTION__)); return cast_or_null<Value>
( OperandTraits<LandingPadInst>::op_begin(const_cast<
LandingPadInst*>(this))[i_nocapture].get()); } void LandingPadInst
::setOperand(unsigned i_nocapture, Value *Val_nocapture) { ((
i_nocapture < OperandTraits<LandingPadInst>::operands
(this) && "setOperand() out of range!") ? static_cast
<void> (0) : __assert_fail ("i_nocapture < OperandTraits<LandingPadInst>::operands(this) && \"setOperand() out of range!\""
, "/build/llvm-toolchain-snapshot-13~++20210413100635+64c24f493e5f/llvm/include/llvm/IR/Instructions.h"
, 2913, __PRETTY_FUNCTION__)); OperandTraits<LandingPadInst
>::op_begin(this)[i_nocapture] = Val_nocapture; } unsigned
LandingPadInst::getNumOperands() const { return OperandTraits
<LandingPadInst>::operands(this); } template <int Idx_nocapture
> Use &LandingPadInst::Op() { return this->OpFrom<
Idx_nocapture>(this); } template <int Idx_nocapture>
const Use &LandingPadInst::Op() const { return this->
OpFrom<Idx_nocapture>(this); }
2914
2915//===----------------------------------------------------------------------===//
2916// ReturnInst Class
2917//===----------------------------------------------------------------------===//
2918
2919//===---------------------------------------------------------------------------
2920/// Return a value (possibly void), from a function. Execution
2921/// does not continue in this function any longer.
2922///
2923class ReturnInst : public Instruction {
2924 ReturnInst(const ReturnInst &RI);
2925
2926private:
2927 // ReturnInst constructors:
2928 // ReturnInst() - 'ret void' instruction
2929 // ReturnInst( null) - 'ret void' instruction
2930 // ReturnInst(Value* X) - 'ret X' instruction
2931 // ReturnInst( null, Inst *I) - 'ret void' instruction, insert before I
2932 // ReturnInst(Value* X, Inst *I) - 'ret X' instruction, insert before I
2933 // ReturnInst( null, BB *B) - 'ret void' instruction, insert @ end of B
2934 // ReturnInst(Value* X, BB *B) - 'ret X' instruction, insert @ end of B
2935 //
2936 // NOTE: If the Value* passed is of type void then the constructor behaves as
2937 // if it was passed NULL.
2938 explicit ReturnInst(LLVMContext &C, Value *retVal = nullptr,
2939 Instruction *InsertBefore = nullptr);
2940 ReturnInst(LLVMContext &C, Value *retVal, BasicBlock *InsertAtEnd);
2941 explicit ReturnInst(LLVMContext &C, BasicBlock *InsertAtEnd);
2942
2943protected:
2944 // Note: Instruction needs to be a friend here to call cloneImpl.
2945 friend class Instruction;
2946
2947 ReturnInst *cloneImpl() const;
2948
2949public:
2950 static ReturnInst* Create(LLVMContext &C, Value *retVal = nullptr,
2951 Instruction *InsertBefore = nullptr) {
2952 return new(!!retVal) ReturnInst(C, retVal, InsertBefore);
2953 }
2954
2955 static ReturnInst* Create(LLVMContext &C, Value *retVal,
2956 BasicBlock *InsertAtEnd) {
2957 return new(!!retVal) ReturnInst(C, retVal, InsertAtEnd);
2958 }
2959
2960 static ReturnInst* Create(LLVMContext &C, BasicBlock *InsertAtEnd) {
2961 return new(0) ReturnInst(C, InsertAtEnd);
2962 }
2963
2964 /// Provide fast operand accessors
2965 DECLARE_TRANSPARENT_OPERAND_ACCESSORS(Value)public: inline Value *getOperand(unsigned) const; inline void
setOperand(unsigned, Value*); inline op_iterator op_begin();
inline const_op_iterator op_begin() const; inline op_iterator
op_end(); inline const_op_iterator op_end() const; protected
: template <int> inline Use &Op(); template <int
> inline const Use &Op() const; public: inline unsigned
getNumOperands() const
;
2966
2967 /// Convenience accessor. Returns null if there is no return value.
2968 Value *getReturnValue() const {
2969 return getNumOperands() != 0 ? getOperand(0) : nullptr;
2970 }
2971
2972 unsigned getNumSuccessors() const { return 0; }
2973
2974 // Methods for support type inquiry through isa, cast, and dyn_cast:
2975 static bool classof(const Instruction *I) {
2976 return (I->getOpcode() == Instruction::Ret);
2977 }
2978 static bool classof(const Value *V) {
2979 return isa<Instruction>(V) && classof(cast<Instruction>(V));
2980 }
2981
2982private:
2983 BasicBlock *getSuccessor(unsigned idx) const {
2984 llvm_unreachable("ReturnInst has no successors!")::llvm::llvm_unreachable_internal("ReturnInst has no successors!"
, "/build/llvm-toolchain-snapshot-13~++20210413100635+64c24f493e5f/llvm/include/llvm/IR/Instructions.h"
, 2984)
;
2985 }
2986
2987 void setSuccessor(unsigned idx, BasicBlock *B) {
2988 llvm_unreachable("ReturnInst has no successors!")::llvm::llvm_unreachable_internal("ReturnInst has no successors!"
, "/build/llvm-toolchain-snapshot-13~++20210413100635+64c24f493e5f/llvm/include/llvm/IR/Instructions.h"
, 2988)
;
2989 }
2990};
2991
2992template <>
2993struct OperandTraits<ReturnInst> : public VariadicOperandTraits<ReturnInst> {
2994};
2995
2996DEFINE_TRANSPARENT_OPERAND_ACCESSORS(ReturnInst, Value)ReturnInst::op_iterator ReturnInst::op_begin() { return OperandTraits
<ReturnInst>::op_begin(this); } ReturnInst::const_op_iterator
ReturnInst::op_begin() const { return OperandTraits<ReturnInst
>::op_begin(const_cast<ReturnInst*>(this)); } ReturnInst
::op_iterator ReturnInst::op_end() { return OperandTraits<
ReturnInst>::op_end(this); } ReturnInst::const_op_iterator
ReturnInst::op_end() const { return OperandTraits<ReturnInst
>::op_end(const_cast<ReturnInst*>(this)); } Value *ReturnInst
::getOperand(unsigned i_nocapture) const { ((i_nocapture <
OperandTraits<ReturnInst>::operands(this) && "getOperand() out of range!"
) ? static_cast<void> (0) : __assert_fail ("i_nocapture < OperandTraits<ReturnInst>::operands(this) && \"getOperand() out of range!\""
, "/build/llvm-toolchain-snapshot-13~++20210413100635+64c24f493e5f/llvm/include/llvm/IR/Instructions.h"
, 2996, __PRETTY_FUNCTION__)); return cast_or_null<Value>
( OperandTraits<ReturnInst>::op_begin(const_cast<ReturnInst
*>(this))[i_nocapture].get()); } void ReturnInst::setOperand
(unsigned i_nocapture, Value *Val_nocapture) { ((i_nocapture <
OperandTraits<ReturnInst>::operands(this) && "setOperand() out of range!"
) ? static_cast<void> (0) : __assert_fail ("i_nocapture < OperandTraits<ReturnInst>::operands(this) && \"setOperand() out of range!\""
, "/build/llvm-toolchain-snapshot-13~++20210413100635+64c24f493e5f/llvm/include/llvm/IR/Instructions.h"
, 2996, __PRETTY_FUNCTION__)); OperandTraits<ReturnInst>
::op_begin(this)[i_nocapture] = Val_nocapture; } unsigned ReturnInst
::getNumOperands() const { return OperandTraits<ReturnInst
>::operands(this); } template <int Idx_nocapture> Use
&ReturnInst::Op() { return this->OpFrom<Idx_nocapture
>(this); } template <int Idx_nocapture> const Use &
ReturnInst::Op() const { return this->OpFrom<Idx_nocapture
>(this); }
2997
2998//===----------------------------------------------------------------------===//
2999// BranchInst Class
3000//===----------------------------------------------------------------------===//
3001
3002//===---------------------------------------------------------------------------
3003/// Conditional or Unconditional Branch instruction.
3004///
3005class BranchInst : public Instruction {
3006 /// Ops list - Branches are strange. The operands are ordered:
3007 /// [Cond, FalseDest,] TrueDest. This makes some accessors faster because
3008 /// they don't have to check for cond/uncond branchness. These are mostly
3009 /// accessed relative from op_end().
3010 BranchInst(const BranchInst &BI);
3011 // BranchInst constructors (where {B, T, F} are blocks, and C is a condition):
3012 // BranchInst(BB *B) - 'br B'
3013 // BranchInst(BB* T, BB *F, Value *C) - 'br C, T, F'
3014 // BranchInst(BB* B, Inst *I) - 'br B' insert before I
3015 // BranchInst(BB* T, BB *F, Value *C, Inst *I) - 'br C, T, F', insert before I
3016 // BranchInst(BB* B, BB *I) - 'br B' insert at end
3017 // BranchInst(BB* T, BB *F, Value *C, BB *I) - 'br C, T, F', insert at end
3018 explicit BranchInst(BasicBlock *IfTrue, Instruction *InsertBefore = nullptr);
3019 BranchInst(BasicBlock *IfTrue, BasicBlock *IfFalse, Value *Cond,
3020 Instruction *InsertBefore = nullptr);
3021 BranchInst(BasicBlock *IfTrue, BasicBlock *InsertAtEnd);
3022 BranchInst(BasicBlock *IfTrue, BasicBlock *IfFalse, Value *Cond,
3023 BasicBlock *InsertAtEnd);
3024
3025 void AssertOK();
3026
3027protected:
3028 // Note: Instruction needs to be a friend here to call cloneImpl.
3029 friend class Instruction;
3030
3031 BranchInst *cloneImpl() const;
3032
3033public:
3034 /// Iterator type that casts an operand to a basic block.
3035 ///
3036 /// This only makes sense because the successors are stored as adjacent
3037 /// operands for branch instructions.
3038 struct succ_op_iterator
3039 : iterator_adaptor_base<succ_op_iterator, value_op_iterator,
3040 std::random_access_iterator_tag, BasicBlock *,
3041 ptrdiff_t, BasicBlock *, BasicBlock *> {
3042 explicit succ_op_iterator(value_op_iterator I) : iterator_adaptor_base(I) {}
3043
3044 BasicBlock *operator*() const { return cast<BasicBlock>(*I); }
3045 BasicBlock *operator->() const { return operator*(); }
3046 };
3047
3048 /// The const version of `succ_op_iterator`.
3049 struct const_succ_op_iterator
3050 : iterator_adaptor_base<const_succ_op_iterator, const_value_op_iterator,
3051 std::random_access_iterator_tag,
3052 const BasicBlock *, ptrdiff_t, const BasicBlock *,
3053 const BasicBlock *> {
3054 explicit const_succ_op_iterator(const_value_op_iterator I)
3055 : iterator_adaptor_base(I) {}
3056
3057 const BasicBlock *operator*() const { return cast<BasicBlock>(*I); }
3058 const BasicBlock *operator->() const { return operator*(); }
3059 };
3060
3061 static BranchInst *Create(BasicBlock *IfTrue,
3062 Instruction *InsertBefore = nullptr) {
3063 return new(1) BranchInst(IfTrue, InsertBefore);
3064 }
3065
3066 static BranchInst *Create(BasicBlock *IfTrue, BasicBlock *IfFalse,
3067 Value *Cond, Instruction *InsertBefore = nullptr) {
3068 return new(3) BranchInst(IfTrue, IfFalse, Cond, InsertBefore);
3069 }
3070
3071 static BranchInst *Create(BasicBlock *IfTrue, BasicBlock *InsertAtEnd) {
3072 return new(1) BranchInst(IfTrue, InsertAtEnd);
3073 }
3074
3075 static BranchInst *Create(BasicBlock *IfTrue, BasicBlock *IfFalse,
3076 Value *Cond, BasicBlock *InsertAtEnd) {
3077 return new(3) BranchInst(IfTrue, IfFalse, Cond, InsertAtEnd);
3078 }
3079
3080 /// Transparently provide more efficient getOperand methods.
3081 DECLARE_TRANSPARENT_OPERAND_ACCESSORS(Value)public: inline Value *getOperand(unsigned) const; inline void
setOperand(unsigned, Value*); inline op_iterator op_begin();
inline const_op_iterator op_begin() const; inline op_iterator
op_end(); inline const_op_iterator op_end() const; protected
: template <int> inline Use &Op(); template <int
> inline const Use &Op() const; public: inline unsigned
getNumOperands() const
;
3082
3083 bool isUnconditional() const { return getNumOperands() == 1; }
3084 bool isConditional() const { return getNumOperands() == 3; }
3085
3086 Value *getCondition() const {
3087 assert(isConditional() && "Cannot get condition of an uncond branch!")((isConditional() && "Cannot get condition of an uncond branch!"
) ? static_cast<void> (0) : __assert_fail ("isConditional() && \"Cannot get condition of an uncond branch!\""
, "/build/llvm-toolchain-snapshot-13~++20210413100635+64c24f493e5f/llvm/include/llvm/IR/Instructions.h"
, 3087, __PRETTY_FUNCTION__))
;
3088 return Op<-3>();
3089 }
3090
3091 void setCondition(Value *V) {
3092 assert(isConditional() && "Cannot set condition of unconditional branch!")((isConditional() && "Cannot set condition of unconditional branch!"
) ? static_cast<void> (0) : __assert_fail ("isConditional() && \"Cannot set condition of unconditional branch!\""
, "/build/llvm-toolchain-snapshot-13~++20210413100635+64c24f493e5f/llvm/include/llvm/IR/Instructions.h"
, 3092, __PRETTY_FUNCTION__))
;
3093 Op<-3>() = V;
3094 }
3095
3096 unsigned getNumSuccessors() const { return 1+isConditional(); }
3097
3098 BasicBlock *getSuccessor(unsigned i) const {
3099 assert(i < getNumSuccessors() && "Successor # out of range for Branch!")((i < getNumSuccessors() && "Successor # out of range for Branch!"
) ? static_cast<void> (0) : __assert_fail ("i < getNumSuccessors() && \"Successor # out of range for Branch!\""
, "/build/llvm-toolchain-snapshot-13~++20210413100635+64c24f493e5f/llvm/include/llvm/IR/Instructions.h"
, 3099, __PRETTY_FUNCTION__))
;
3100 return cast_or_null<BasicBlock>((&Op<-1>() - i)->get());
3101 }
3102
3103 void setSuccessor(unsigned idx, BasicBlock *NewSucc) {
3104 assert(idx < getNumSuccessors() && "Successor # out of range for Branch!")((idx < getNumSuccessors() && "Successor # out of range for Branch!"
) ? static_cast<void> (0) : __assert_fail ("idx < getNumSuccessors() && \"Successor # out of range for Branch!\""
, "/build/llvm-toolchain-snapshot-13~++20210413100635+64c24f493e5f/llvm/include/llvm/IR/Instructions.h"
, 3104, __PRETTY_FUNCTION__))
;
3105 *(&Op<-1>() - idx) = NewSucc;
3106 }
3107
3108 /// Swap the successors of this branch instruction.
3109 ///
3110 /// Swaps the successors of the branch instruction. This also swaps any
3111 /// branch weight metadata associated with the instruction so that it
3112 /// continues to map correctly to each operand.
3113 void swapSuccessors();
3114
3115 iterator_range<succ_op_iterator> successors() {
3116 return make_range(
3117 succ_op_iterator(std::next(value_op_begin(), isConditional() ? 1 : 0)),
3118 succ_op_iterator(value_op_end()));
3119 }
3120
3121 iterator_range<const_succ_op_iterator> successors() const {
3122 return make_range(const_succ_op_iterator(
3123 std::next(value_op_begin(), isConditional() ? 1 : 0)),
3124 const_succ_op_iterator(value_op_end()));
3125 }
3126
3127 // Methods for support type inquiry through isa, cast, and dyn_cast:
3128 static bool classof(const Instruction *I) {
3129 return (I->getOpcode() == Instruction::Br);
3130 }
3131 static bool classof(const Value *V) {
3132 return isa<Instruction>(V) && classof(cast<Instruction>(V));
3133 }
3134};
3135
3136template <>
3137struct OperandTraits<BranchInst> : public VariadicOperandTraits<BranchInst, 1> {
3138};
3139
3140DEFINE_TRANSPARENT_OPERAND_ACCESSORS(BranchInst, Value)BranchInst::op_iterator BranchInst::op_begin() { return OperandTraits
<BranchInst>::op_begin(this); } BranchInst::const_op_iterator
BranchInst::op_begin() const { return OperandTraits<BranchInst
>::op_begin(const_cast<BranchInst*>(this)); } BranchInst
::op_iterator BranchInst::op_end() { return OperandTraits<
BranchInst>::op_end(this); } BranchInst::const_op_iterator
BranchInst::op_end() const { return OperandTraits<BranchInst
>::op_end(const_cast<BranchInst*>(this)); } Value *BranchInst
::getOperand(unsigned i_nocapture) const { ((i_nocapture <
OperandTraits<BranchInst>::operands(this) && "getOperand() out of range!"
) ? static_cast<void> (0) : __assert_fail ("i_nocapture < OperandTraits<BranchInst>::operands(this) && \"getOperand() out of range!\""
, "/build/llvm-toolchain-snapshot-13~++20210413100635+64c24f493e5f/llvm/include/llvm/IR/Instructions.h"
, 3140, __PRETTY_FUNCTION__)); return cast_or_null<Value>
( OperandTraits<BranchInst>::op_begin(const_cast<BranchInst
*>(this))[i_nocapture].get()); } void BranchInst::setOperand
(unsigned i_nocapture, Value *Val_nocapture) { ((i_nocapture <
OperandTraits<BranchInst>::operands(this) && "setOperand() out of range!"
) ? static_cast<void> (0) : __assert_fail ("i_nocapture < OperandTraits<BranchInst>::operands(this) && \"setOperand() out of range!\""
, "/build/llvm-toolchain-snapshot-13~++20210413100635+64c24f493e5f/llvm/include/llvm/IR/Instructions.h"
, 3140, __PRETTY_FUNCTION__)); OperandTraits<BranchInst>
::op_begin(this)[i_nocapture] = Val_nocapture; } unsigned BranchInst
::getNumOperands() const { return OperandTraits<BranchInst
>::operands(this); } template <int Idx_nocapture> Use
&BranchInst::Op() { return this->OpFrom<Idx_nocapture
>(this); } template <int Idx_nocapture> const Use &
BranchInst::Op() const { return this->OpFrom<Idx_nocapture
>(this); }
3141
3142//===----------------------------------------------------------------------===//
3143// SwitchInst Class
3144//===----------------------------------------------------------------------===//
3145
3146//===---------------------------------------------------------------------------
3147/// Multiway switch
3148///
3149class SwitchInst : public Instruction {
3150 unsigned ReservedSpace;
3151
3152 // Operand[0] = Value to switch on
3153 // Operand[1] = Default basic block destination
3154 // Operand[2n ] = Value to match
3155 // Operand[2n+1] = BasicBlock to go to on match
3156 SwitchInst(const SwitchInst &SI);
3157
3158 /// Create a new switch instruction, specifying a value to switch on and a
3159 /// default destination. The number of additional cases can be specified here
3160 /// to make memory allocation more efficient. This constructor can also
3161 /// auto-insert before another instruction.
3162 SwitchInst(Value *Value, BasicBlock *Default, unsigned NumCases,
3163 Instruction *InsertBefore);
3164
3165 /// Create a new switch instruction, specifying a value to switch on and a
3166 /// default destination. The number of additional cases can be specified here
3167 /// to make memory allocation more efficient. This constructor also
3168 /// auto-inserts at the end of the specified BasicBlock.
3169 SwitchInst(Value *Value, BasicBlock *Default, unsigned NumCases,
3170 BasicBlock *InsertAtEnd);
3171
3172 // allocate space for exactly zero operands
3173 void *operator new(size_t s) {
3174 return User::operator new(s);
3175 }
3176
3177 void init(Value *Value, BasicBlock *Default, unsigned NumReserved);
3178 void growOperands();
3179
3180protected:
3181 // Note: Instruction needs to be a friend here to call cloneImpl.
3182 friend class Instruction;
3183
3184 SwitchInst *cloneImpl() const;
3185
3186public:
3187 // -2
3188 static const unsigned DefaultPseudoIndex = static_cast<unsigned>(~0L-1);
3189
3190 template <typename CaseHandleT> class CaseIteratorImpl;
3191
3192 /// A handle to a particular switch case. It exposes a convenient interface
3193 /// to both the case value and the successor block.
3194 ///
3195 /// We define this as a template and instantiate it to form both a const and
3196 /// non-const handle.
3197 template <typename SwitchInstT, typename ConstantIntT, typename BasicBlockT>
3198 class CaseHandleImpl {
3199 // Directly befriend both const and non-const iterators.
3200 friend class SwitchInst::CaseIteratorImpl<
3201 CaseHandleImpl<SwitchInstT, ConstantIntT, BasicBlockT>>;
3202
3203 protected:
3204 // Expose the switch type we're parameterized with to the iterator.
3205 using SwitchInstType = SwitchInstT;
3206
3207 SwitchInstT *SI;
3208 ptrdiff_t Index;
3209
3210 CaseHandleImpl() = default;
3211 CaseHandleImpl(SwitchInstT *SI, ptrdiff_t Index) : SI(SI), Index(Index) {}
3212
3213 public:
3214 /// Resolves case value for current case.
3215 ConstantIntT *getCaseValue() const {
3216 assert((unsigned)Index < SI->getNumCases() &&(((unsigned)Index < SI->getNumCases() && "Index out the number of cases."
) ? static_cast<void> (0) : __assert_fail ("(unsigned)Index < SI->getNumCases() && \"Index out the number of cases.\""
, "/build/llvm-toolchain-snapshot-13~++20210413100635+64c24f493e5f/llvm/include/llvm/IR/Instructions.h"
, 3217, __PRETTY_FUNCTION__))
3217 "Index out the number of cases.")(((unsigned)Index < SI->getNumCases() && "Index out the number of cases."
) ? static_cast<void> (0) : __assert_fail ("(unsigned)Index < SI->getNumCases() && \"Index out the number of cases.\""
, "/build/llvm-toolchain-snapshot-13~++20210413100635+64c24f493e5f/llvm/include/llvm/IR/Instructions.h"
, 3217, __PRETTY_FUNCTION__))
;
3218 return reinterpret_cast<ConstantIntT *>(SI->getOperand(2 + Index * 2));
3219 }
3220
3221 /// Resolves successor for current case.
3222 BasicBlockT *getCaseSuccessor() const {
3223 assert(((unsigned)Index < SI->getNumCases() ||((((unsigned)Index < SI->getNumCases() || (unsigned)Index
== DefaultPseudoIndex) && "Index out the number of cases."
) ? static_cast<void> (0) : __assert_fail ("((unsigned)Index < SI->getNumCases() || (unsigned)Index == DefaultPseudoIndex) && \"Index out the number of cases.\""
, "/build/llvm-toolchain-snapshot-13~++20210413100635+64c24f493e5f/llvm/include/llvm/IR/Instructions.h"
, 3225, __PRETTY_FUNCTION__))
3224 (unsigned)Index == DefaultPseudoIndex) &&((((unsigned)Index < SI->getNumCases() || (unsigned)Index
== DefaultPseudoIndex) && "Index out the number of cases."
) ? static_cast<void> (0) : __assert_fail ("((unsigned)Index < SI->getNumCases() || (unsigned)Index == DefaultPseudoIndex) && \"Index out the number of cases.\""
, "/build/llvm-toolchain-snapshot-13~++20210413100635+64c24f493e5f/llvm/include/llvm/IR/Instructions.h"
, 3225, __PRETTY_FUNCTION__))
3225 "Index out the number of cases.")((((unsigned)Index < SI->getNumCases() || (unsigned)Index
== DefaultPseudoIndex) && "Index out the number of cases."
) ? static_cast<void> (0) : __assert_fail ("((unsigned)Index < SI->getNumCases() || (unsigned)Index == DefaultPseudoIndex) && \"Index out the number of cases.\""
, "/build/llvm-toolchain-snapshot-13~++20210413100635+64c24f493e5f/llvm/include/llvm/IR/Instructions.h"
, 3225, __PRETTY_FUNCTION__))
;
3226 return SI->getSuccessor(getSuccessorIndex());
3227 }
3228
3229 /// Returns number of current case.
3230 unsigned getCaseIndex() const { return Index; }
3231
3232 /// Returns successor index for current case successor.
3233 unsigned getSuccessorIndex() const {
3234 assert(((unsigned)Index == DefaultPseudoIndex ||((((unsigned)Index == DefaultPseudoIndex || (unsigned)Index <
SI->getNumCases()) && "Index out the number of cases."
) ? static_cast<void> (0) : __assert_fail ("((unsigned)Index == DefaultPseudoIndex || (unsigned)Index < SI->getNumCases()) && \"Index out the number of cases.\""
, "/build/llvm-toolchain-snapshot-13~++20210413100635+64c24f493e5f/llvm/include/llvm/IR/Instructions.h"
, 3236, __PRETTY_FUNCTION__))
3235 (unsigned)Index < SI->getNumCases()) &&((((unsigned)Index == DefaultPseudoIndex || (unsigned)Index <
SI->getNumCases()) && "Index out the number of cases."
) ? static_cast<void> (0) : __assert_fail ("((unsigned)Index == DefaultPseudoIndex || (unsigned)Index < SI->getNumCases()) && \"Index out the number of cases.\""
, "/build/llvm-toolchain-snapshot-13~++20210413100635+64c24f493e5f/llvm/include/llvm/IR/Instructions.h"
, 3236, __PRETTY_FUNCTION__))
3236 "Index out the number of cases.")((((unsigned)Index == DefaultPseudoIndex || (unsigned)Index <
SI->getNumCases()) && "Index out the number of cases."
) ? static_cast<void> (0) : __assert_fail ("((unsigned)Index == DefaultPseudoIndex || (unsigned)Index < SI->getNumCases()) && \"Index out the number of cases.\""
, "/build/llvm-toolchain-snapshot-13~++20210413100635+64c24f493e5f/llvm/include/llvm/IR/Instructions.h"
, 3236, __PRETTY_FUNCTION__))
;
3237 return (unsigned)Index != DefaultPseudoIndex ? Index + 1 : 0;
3238 }
3239
3240 bool operator==(const CaseHandleImpl &RHS) const {
3241 assert(SI == RHS.SI && "Incompatible operators.")((SI == RHS.SI && "Incompatible operators.") ? static_cast
<void> (0) : __assert_fail ("SI == RHS.SI && \"Incompatible operators.\""
, "/build/llvm-toolchain-snapshot-13~++20210413100635+64c24f493e5f/llvm/include/llvm/IR/Instructions.h"
, 3241, __PRETTY_FUNCTION__))
;
3242 return Index == RHS.Index;
3243 }
3244 };
3245
3246 using ConstCaseHandle =
3247 CaseHandleImpl<const SwitchInst, const ConstantInt, const BasicBlock>;
3248
3249 class CaseHandle
3250 : public CaseHandleImpl<SwitchInst, ConstantInt, BasicBlock> {
3251 friend class SwitchInst::CaseIteratorImpl<CaseHandle>;
3252
3253 public:
3254 CaseHandle(SwitchInst *SI, ptrdiff_t Index) : CaseHandleImpl(SI, Index) {}
3255
3256 /// Sets the new value for current case.
3257 void setValue(ConstantInt *V) {
3258 assert((unsigned)Index < SI->getNumCases() &&(((unsigned)Index < SI->getNumCases() && "Index out the number of cases."
) ? static_cast<void> (0) : __assert_fail ("(unsigned)Index < SI->getNumCases() && \"Index out the number of cases.\""
, "/build/llvm-toolchain-snapshot-13~++20210413100635+64c24f493e5f/llvm/include/llvm/IR/Instructions.h"
, 3259, __PRETTY_FUNCTION__))
3259 "Index out the number of cases.")(((unsigned)Index < SI->getNumCases() && "Index out the number of cases."
) ? static_cast<void> (0) : __assert_fail ("(unsigned)Index < SI->getNumCases() && \"Index out the number of cases.\""
, "/build/llvm-toolchain-snapshot-13~++20210413100635+64c24f493e5f/llvm/include/llvm/IR/Instructions.h"
, 3259, __PRETTY_FUNCTION__))
;
3260 SI->setOperand(2 + Index*2, reinterpret_cast<Value*>(V));
3261 }
3262
3263 /// Sets the new successor for current case.
3264 void setSuccessor(BasicBlock *S) {
3265 SI->setSuccessor(getSuccessorIndex(), S);
3266 }
3267 };
3268
3269 template <typename CaseHandleT>
3270 class CaseIteratorImpl
3271 : public iterator_facade_base<CaseIteratorImpl<CaseHandleT>,
3272 std::random_access_iterator_tag,
3273 CaseHandleT> {
3274 using SwitchInstT = typename CaseHandleT::SwitchInstType;
3275
3276 CaseHandleT Case;
3277
3278 public:
3279 /// Default constructed iterator is in an invalid state until assigned to
3280 /// a case for a particular switch.
3281 CaseIteratorImpl() = default;
3282
3283 /// Initializes case iterator for given SwitchInst and for given
3284 /// case number.
3285 CaseIteratorImpl(SwitchInstT *SI, unsigned CaseNum) : Case(SI, CaseNum) {}
3286
3287 /// Initializes case iterator for given SwitchInst and for given
3288 /// successor index.
3289 static CaseIteratorImpl fromSuccessorIndex(SwitchInstT *SI,
3290 unsigned SuccessorIndex) {
3291 assert(SuccessorIndex < SI->getNumSuccessors() &&((SuccessorIndex < SI->getNumSuccessors() && "Successor index # out of range!"
) ? static_cast<void> (0) : __assert_fail ("SuccessorIndex < SI->getNumSuccessors() && \"Successor index # out of range!\""
, "/build/llvm-toolchain-snapshot-13~++20210413100635+64c24f493e5f/llvm/include/llvm/IR/Instructions.h"
, 3292, __PRETTY_FUNCTION__))
3292 "Successor index # out of range!")((SuccessorIndex < SI->getNumSuccessors() && "Successor index # out of range!"
) ? static_cast<void> (0) : __assert_fail ("SuccessorIndex < SI->getNumSuccessors() && \"Successor index # out of range!\""
, "/build/llvm-toolchain-snapshot-13~++20210413100635+64c24f493e5f/llvm/include/llvm/IR/Instructions.h"
, 3292, __PRETTY_FUNCTION__))
;
3293 return SuccessorIndex != 0 ? CaseIteratorImpl(SI, SuccessorIndex - 1)
3294 : CaseIteratorImpl(SI, DefaultPseudoIndex);
3295 }
3296
3297 /// Support converting to the const variant. This will be a no-op for const
3298 /// variant.
3299 operator CaseIteratorImpl<ConstCaseHandle>() const {
3300 return CaseIteratorImpl<ConstCaseHandle>(Case.SI, Case.Index);
3301 }
3302
3303 CaseIteratorImpl &operator+=(ptrdiff_t N) {
3304 // Check index correctness after addition.
3305 // Note: Index == getNumCases() means end().
3306 assert(Case.Index + N >= 0 &&((Case.Index + N >= 0 && (unsigned)(Case.Index + N
) <= Case.SI->getNumCases() && "Case.Index out the number of cases."
) ? static_cast<void> (0) : __assert_fail ("Case.Index + N >= 0 && (unsigned)(Case.Index + N) <= Case.SI->getNumCases() && \"Case.Index out the number of cases.\""
, "/build/llvm-toolchain-snapshot-13~++20210413100635+64c24f493e5f/llvm/include/llvm/IR/Instructions.h"
, 3308, __PRETTY_FUNCTION__))
3307 (unsigned)(Case.Index + N) <= Case.SI->getNumCases() &&((Case.Index + N >= 0 && (unsigned)(Case.Index + N
) <= Case.SI->getNumCases() && "Case.Index out the number of cases."
) ? static_cast<void> (0) : __assert_fail ("Case.Index + N >= 0 && (unsigned)(Case.Index + N) <= Case.SI->getNumCases() && \"Case.Index out the number of cases.\""
, "/build/llvm-toolchain-snapshot-13~++20210413100635+64c24f493e5f/llvm/include/llvm/IR/Instructions.h"
, 3308, __PRETTY_FUNCTION__))
3308 "Case.Index out the number of cases.")((Case.Index + N >= 0 && (unsigned)(Case.Index + N
) <= Case.SI->getNumCases() && "Case.Index out the number of cases."
) ? static_cast<void> (0) : __assert_fail ("Case.Index + N >= 0 && (unsigned)(Case.Index + N) <= Case.SI->getNumCases() && \"Case.Index out the number of cases.\""
, "/build/llvm-toolchain-snapshot-13~++20210413100635+64c24f493e5f/llvm/include/llvm/IR/Instructions.h"
, 3308, __PRETTY_FUNCTION__))
;
3309 Case.Index += N;
3310 return *this;
3311 }
3312 CaseIteratorImpl &operator-=(ptrdiff_t N) {
3313 // Check index correctness after subtraction.
3314 // Note: Case.Index == getNumCases() means end().
3315 assert(Case.Index - N >= 0 &&((Case.Index - N >= 0 && (unsigned)(Case.Index - N
) <= Case.SI->getNumCases() && "Case.Index out the number of cases."
) ? static_cast<void> (0) : __assert_fail ("Case.Index - N >= 0 && (unsigned)(Case.Index - N) <= Case.SI->getNumCases() && \"Case.Index out the number of cases.\""
, "/build/llvm-toolchain-snapshot-13~++20210413100635+64c24f493e5f/llvm/include/llvm/IR/Instructions.h"
, 3317, __PRETTY_FUNCTION__))
3316 (unsigned)(Case.Index - N) <= Case.SI->getNumCases() &&((Case.Index - N >= 0 && (unsigned)(Case.Index - N
) <= Case.SI->getNumCases() && "Case.Index out the number of cases."
) ? static_cast<void> (0) : __assert_fail ("Case.Index - N >= 0 && (unsigned)(Case.Index - N) <= Case.SI->getNumCases() && \"Case.Index out the number of cases.\""
, "/build/llvm-toolchain-snapshot-13~++20210413100635+64c24f493e5f/llvm/include/llvm/IR/Instructions.h"
, 3317, __PRETTY_FUNCTION__))
3317 "Case.Index out the number of cases.")((Case.Index - N >= 0 && (unsigned)(Case.Index - N
) <= Case.SI->getNumCases() && "Case.Index out the number of cases."
) ? static_cast<void> (0) : __assert_fail ("Case.Index - N >= 0 && (unsigned)(Case.Index - N) <= Case.SI->getNumCases() && \"Case.Index out the number of cases.\""
, "/build/llvm-toolchain-snapshot-13~++20210413100635+64c24f493e5f/llvm/include/llvm/IR/Instructions.h"
, 3317, __PRETTY_FUNCTION__))
;
3318 Case.Index -= N;
3319 return *this;
3320 }
3321 ptrdiff_t operator-(const CaseIteratorImpl &RHS) const {
3322 assert(Case.SI == RHS.Case.SI && "Incompatible operators.")((Case.SI == RHS.Case.SI && "Incompatible operators."
) ? static_cast<void> (0) : __assert_fail ("Case.SI == RHS.Case.SI && \"Incompatible operators.\""
, "/build/llvm-toolchain-snapshot-13~++20210413100635+64c24f493e5f/llvm/include/llvm/IR/Instructions.h"
, 3322, __PRETTY_FUNCTION__))
;
3323 return Case.Index - RHS.Case.Index;
3324 }
3325 bool operator==(const CaseIteratorImpl &RHS) const {
3326 return Case == RHS.Case;
3327 }
3328 bool operator<(const CaseIteratorImpl &RHS) const {
3329 assert(Case.SI == RHS.Case.SI && "Incompatible operators.")((Case.SI == RHS.Case.SI && "Incompatible operators."
) ? static_cast<void> (0) : __assert_fail ("Case.SI == RHS.Case.SI && \"Incompatible operators.\""
, "/build/llvm-toolchain-snapshot-13~++20210413100635+64c24f493e5f/llvm/include/llvm/IR/Instructions.h"
, 3329, __PRETTY_FUNCTION__))
;
3330 return Case.Index < RHS.Case.Index;
3331 }
3332 CaseHandleT &operator*() { return Case; }
3333 const CaseHandleT &operator*() const { return Case; }
3334 };
3335
3336 using CaseIt = CaseIteratorImpl<CaseHandle>;
3337 using ConstCaseIt = CaseIteratorImpl<ConstCaseHandle>;
3338
3339 static SwitchInst *Create(Value *Value, BasicBlock *Default,
3340 unsigned NumCases,
3341 Instruction *InsertBefore = nullptr) {
3342 return new SwitchInst(Value, Default, NumCases, InsertBefore);
3343 }
3344
3345 static SwitchInst *Create(Value *Value, BasicBlock *Default,
3346 unsigned NumCases, BasicBlock *InsertAtEnd) {
3347 return new SwitchInst(Value, Default, NumCases, InsertAtEnd);
3348 }
3349
3350 /// Provide fast operand accessors
3351 DECLARE_TRANSPARENT_OPERAND_ACCESSORS(Value)public: inline Value *getOperand(unsigned) const; inline void
setOperand(unsigned, Value*); inline op_iterator op_begin();
inline const_op_iterator op_begin() const; inline op_iterator
op_end(); inline const_op_iterator op_end() const; protected
: template <int> inline Use &Op(); template <int
> inline const Use &Op() const; public: inline unsigned
getNumOperands() const
;
3352
3353 // Accessor Methods for Switch stmt
3354 Value *getCondition() const { return getOperand(0); }
3355 void setCondition(Value *V) { setOperand(0, V); }
3356
3357 BasicBlock *getDefaultDest() const {
3358 return cast<BasicBlock>(getOperand(1));
3359 }
3360
3361 void setDefaultDest(BasicBlock *DefaultCase) {
3362 setOperand(1, reinterpret_cast<Value*>(DefaultCase));
3363 }
3364
3365 /// Return the number of 'cases' in this switch instruction, excluding the
3366 /// default case.
3367 unsigned getNumCases() const {
3368 return getNumOperands()/2 - 1;
3369 }
3370
3371 /// Returns a read/write iterator that points to the first case in the
3372 /// SwitchInst.
3373 CaseIt case_begin() {
3374 return CaseIt(this, 0);
3375 }
3376
3377 /// Returns a read-only iterator that points to the first case in the
3378 /// SwitchInst.
3379 ConstCaseIt case_begin() const {
3380 return ConstCaseIt(this, 0);
3381 }
3382
3383 /// Returns a read/write iterator that points one past the last in the
3384 /// SwitchInst.
3385 CaseIt case_end() {
3386 return CaseIt(this, getNumCases());
3387 }
3388
3389 /// Returns a read-only iterator that points one past the last in the
3390 /// SwitchInst.
3391 ConstCaseIt case_end() const {
3392 return ConstCaseIt(this, getNumCases());
3393 }
3394
3395 /// Iteration adapter for range-for loops.
3396 iterator_range<CaseIt> cases() {
3397 return make_range(case_begin(), case_end());
3398 }
3399
3400 /// Constant iteration adapter for range-for loops.
3401 iterator_range<ConstCaseIt> cases() const {
3402 return make_range(case_begin(), case_end());
3403 }
3404
3405 /// Returns an iterator that points to the default case.
3406 /// Note: this iterator allows to resolve successor only. Attempt
3407 /// to resolve case value causes an assertion.
3408 /// Also note, that increment and decrement also causes an assertion and
3409 /// makes iterator invalid.
3410 CaseIt case_default() {
3411 return CaseIt(this, DefaultPseudoIndex);
3412 }
3413 ConstCaseIt case_default() const {
3414 return ConstCaseIt(this, DefaultPseudoIndex);
3415 }
3416
3417 /// Search all of the case values for the specified constant. If it is
3418 /// explicitly handled, return the case iterator of it, otherwise return
3419 /// default case iterator to indicate that it is handled by the default
3420 /// handler.
3421 CaseIt findCaseValue(const ConstantInt *C) {
3422 CaseIt I = llvm::find_if(
3423 cases(), [C](CaseHandle &Case) { return Case.getCaseValue() == C; });
3424 if (I != case_end())
3425 return I;
3426
3427 return case_default();
3428 }
3429 ConstCaseIt findCaseValue(const ConstantInt *C) const {
3430 ConstCaseIt I = llvm::find_if(cases(), [C](ConstCaseHandle &Case) {
3431 return Case.getCaseValue() == C;
3432 });
3433 if (I != case_end())
3434 return I;
3435
3436 return case_default();
3437 }
3438
3439 /// Finds the unique case value for a given successor. Returns null if the
3440 /// successor is not found, not unique, or is the default case.
3441 ConstantInt *findCaseDest(BasicBlock *BB) {
3442 if (BB == getDefaultDest())
3443 return nullptr;
3444
3445 ConstantInt *CI = nullptr;
3446 for (auto Case : cases()) {
3447 if (Case.getCaseSuccessor() != BB)
3448 continue;
3449
3450 if (CI)
3451 return nullptr; // Multiple cases lead to BB.
3452
3453 CI = Case.getCaseValue();
3454 }
3455
3456 return CI;
3457 }
3458
3459 /// Add an entry to the switch instruction.
3460 /// Note:
3461 /// This action invalidates case_end(). Old case_end() iterator will
3462 /// point to the added case.
3463 void addCase(ConstantInt *OnVal, BasicBlock *Dest);
3464
3465 /// This method removes the specified case and its successor from the switch
3466 /// instruction. Note that this operation may reorder the remaining cases at
3467 /// index idx and above.
3468 /// Note:
3469 /// This action invalidates iterators for all cases following the one removed,
3470 /// including the case_end() iterator. It returns an iterator for the next
3471 /// case.
3472 CaseIt removeCase(CaseIt I);
3473
3474 unsigned getNumSuccessors() const { return getNumOperands()/2; }
3475 BasicBlock *getSuccessor(unsigned idx) const {
3476 assert(idx < getNumSuccessors() &&"Successor idx out of range for switch!")((idx < getNumSuccessors() &&"Successor idx out of range for switch!"
) ? static_cast<void> (0) : __assert_fail ("idx < getNumSuccessors() &&\"Successor idx out of range for switch!\""
, "/build/llvm-toolchain-snapshot-13~++20210413100635+64c24f493e5f/llvm/include/llvm/IR/Instructions.h"
, 3476, __PRETTY_FUNCTION__))
;
3477 return cast<BasicBlock>(getOperand(idx*2+1));
3478 }
3479 void setSuccessor(unsigned idx, BasicBlock *NewSucc) {
3480 assert(idx < getNumSuccessors() && "Successor # out of range for switch!")((idx < getNumSuccessors() && "Successor # out of range for switch!"
) ? static_cast<void> (0) : __assert_fail ("idx < getNumSuccessors() && \"Successor # out of range for switch!\""
, "/build/llvm-toolchain-snapshot-13~++20210413100635+64c24f493e5f/llvm/include/llvm/IR/Instructions.h"
, 3480, __PRETTY_FUNCTION__))
;
3481 setOperand(idx * 2 + 1, NewSucc);
3482 }
3483
3484 // Methods for support type inquiry through isa, cast, and dyn_cast:
3485 static bool classof(const Instruction *I) {
3486 return I->getOpcode() == Instruction::Switch;
3487 }
3488 static bool classof(const Value *V) {
3489 return isa<Instruction>(V) && classof(cast<Instruction>(V));
3490 }
3491};
3492
3493/// A wrapper class to simplify modification of SwitchInst cases along with
3494/// their prof branch_weights metadata.
3495class SwitchInstProfUpdateWrapper {
3496 SwitchInst &SI;
3497 Optional<SmallVector<uint32_t, 8> > Weights = None;
3498 bool Changed = false;
3499
3500protected:
3501 static MDNode *getProfBranchWeightsMD(const SwitchInst &SI);
3502
3503 MDNode *buildProfBranchWeightsMD();
3504
3505 void init();
3506
3507public:
3508 using CaseWeightOpt = Optional<uint32_t>;
3509 SwitchInst *operator->() { return &SI; }
3510 SwitchInst &operator*() { return SI; }
3511 operator SwitchInst *() { return &SI; }
3512
3513 SwitchInstProfUpdateWrapper(SwitchInst &SI) : SI(SI) { init(); }
3514
3515 ~SwitchInstProfUpdateWrapper() {
3516 if (Changed)
3517 SI.setMetadata(LLVMContext::MD_prof, buildProfBranchWeightsMD());
3518 }
3519
3520 /// Delegate the call to the underlying SwitchInst::removeCase() and remove
3521 /// correspondent branch weight.
3522 SwitchInst::CaseIt removeCase(SwitchInst::CaseIt I);
3523
3524 /// Delegate the call to the underlying SwitchInst::addCase() and set the
3525 /// specified branch weight for the added case.
3526 void addCase(ConstantInt *OnVal, BasicBlock *Dest, CaseWeightOpt W);
3527
3528 /// Delegate the call to the underlying SwitchInst::eraseFromParent() and mark
3529 /// this object to not touch the underlying SwitchInst in destructor.
3530 SymbolTableList<Instruction>::iterator eraseFromParent();
3531
3532 void setSuccessorWeight(unsigned idx, CaseWeightOpt W);
3533 CaseWeightOpt getSuccessorWeight(unsigned idx);
3534
3535 static CaseWeightOpt getSuccessorWeight(const SwitchInst &SI, unsigned idx);
3536};
3537
3538template <>
3539struct OperandTraits<SwitchInst> : public HungoffOperandTraits<2> {
3540};
3541
3542DEFINE_TRANSPARENT_OPERAND_ACCESSORS(SwitchInst, Value)SwitchInst::op_iterator SwitchInst::op_begin() { return OperandTraits
<SwitchInst>::op_begin(this); } SwitchInst::const_op_iterator
SwitchInst::op_begin() const { return OperandTraits<SwitchInst
>::op_begin(const_cast<SwitchInst*>(this)); } SwitchInst
::op_iterator SwitchInst::op_end() { return OperandTraits<
SwitchInst>::op_end(this); } SwitchInst::const_op_iterator
SwitchInst::op_end() const { return OperandTraits<SwitchInst
>::op_end(const_cast<SwitchInst*>(this)); } Value *SwitchInst
::getOperand(unsigned i_nocapture) const { ((i_nocapture <
OperandTraits<SwitchInst>::operands(this) && "getOperand() out of range!"
) ? static_cast<void> (0) : __assert_fail ("i_nocapture < OperandTraits<SwitchInst>::operands(this) && \"getOperand() out of range!\""
, "/build/llvm-toolchain-snapshot-13~++20210413100635+64c24f493e5f/llvm/include/llvm/IR/Instructions.h"
, 3542, __PRETTY_FUNCTION__)); return cast_or_null<Value>
( OperandTraits<SwitchInst>::op_begin(const_cast<SwitchInst
*>(this))[i_nocapture].get()); } void SwitchInst::setOperand
(unsigned i_nocapture, Value *Val_nocapture) { ((i_nocapture <
OperandTraits<SwitchInst>::operands(this) && "setOperand() out of range!"
) ? static_cast<void> (0) : __assert_fail ("i_nocapture < OperandTraits<SwitchInst>::operands(this) && \"setOperand() out of range!\""
, "/build/llvm-toolchain-snapshot-13~++20210413100635+64c24f493e5f/llvm/include/llvm/IR/Instructions.h"
, 3542, __PRETTY_FUNCTION__)); OperandTraits<SwitchInst>
::op_begin(this)[i_nocapture] = Val_nocapture; } unsigned SwitchInst
::getNumOperands() const { return OperandTraits<SwitchInst
>::operands(this); } template <int Idx_nocapture> Use
&SwitchInst::Op() { return this->OpFrom<Idx_nocapture
>(this); } template <int Idx_nocapture> const Use &
SwitchInst::Op() const { return this->OpFrom<Idx_nocapture
>(this); }
3543
3544//===----------------------------------------------------------------------===//
3545// IndirectBrInst Class
3546//===----------------------------------------------------------------------===//
3547
3548//===---------------------------------------------------------------------------
3549/// Indirect Branch Instruction.
3550///
3551class IndirectBrInst : public Instruction {
3552 unsigned ReservedSpace;
3553
3554 // Operand[0] = Address to jump to
3555 // Operand[n+1] = n-th destination
3556 IndirectBrInst(const IndirectBrInst &IBI);
3557
3558 /// Create a new indirectbr instruction, specifying an
3559 /// Address to jump to. The number of expected destinations can be specified
3560 /// here to make memory allocation more efficient. This constructor can also
3561 /// autoinsert before another instruction.
3562 IndirectBrInst(Value *Address, unsigned NumDests, Instruction *InsertBefore);
3563
3564 /// Create a new indirectbr instruction, specifying an
3565 /// Address to jump to. The number of expected destinations can be specified
3566 /// here to make memory allocation more efficient. This constructor also
3567 /// autoinserts at the end of the specified BasicBlock.
3568 IndirectBrInst(Value *Address, unsigned NumDests, BasicBlock *InsertAtEnd);
3569
3570 // allocate space for exactly zero operands
3571 void *operator new(size_t s) {
3572 return User::operator new(s);
3573 }
3574
3575 void init(Value *Address, unsigned NumDests);
3576 void growOperands();
3577
3578protected:
3579 // Note: Instruction needs to be a friend here to call cloneImpl.
3580 friend class Instruction;
3581
3582 IndirectBrInst *cloneImpl() const;
3583
3584public:
3585 /// Iterator type that casts an operand to a basic block.
3586 ///
3587 /// This only makes sense because the successors are stored as adjacent
3588 /// operands for indirectbr instructions.
3589 struct succ_op_iterator
3590 : iterator_adaptor_base<succ_op_iterator, value_op_iterator,
3591 std::random_access_iterator_tag, BasicBlock *,
3592 ptrdiff_t, BasicBlock *, BasicBlock *> {
3593 explicit succ_op_iterator(value_op_iterator I) : iterator_adaptor_base(I) {}
3594
3595 BasicBlock *operator*() const { return cast<BasicBlock>(*I); }
3596 BasicBlock *operator->() const { return operator*(); }
3597 };
3598
3599 /// The const version of `succ_op_iterator`.
3600 struct const_succ_op_iterator
3601 : iterator_adaptor_base<const_succ_op_iterator, const_value_op_iterator,
3602 std::random_access_iterator_tag,
3603 const BasicBlock *, ptrdiff_t, const BasicBlock *,
3604 const BasicBlock *> {
3605 explicit const_succ_op_iterator(const_value_op_iterator I)
3606 : iterator_adaptor_base(I) {}
3607
3608 const BasicBlock *operator*() const { return cast<BasicBlock>(*I); }
3609 const BasicBlock *operator->() const { return operator*(); }
3610 };
3611
3612 static IndirectBrInst *Create(Value *Address, unsigned NumDests,
3613 Instruction *InsertBefore = nullptr) {
3614 return new IndirectBrInst(Address, NumDests, InsertBefore);
3615 }
3616
3617 static IndirectBrInst *Create(Value *Address, unsigned NumDests,
3618 BasicBlock *InsertAtEnd) {
3619 return new IndirectBrInst(Address, NumDests, InsertAtEnd);
3620 }
3621
3622 /// Provide fast operand accessors.
3623 DECLARE_TRANSPARENT_OPERAND_ACCESSORS(Value)public: inline Value *getOperand(unsigned) const; inline void
setOperand(unsigned, Value*); inline op_iterator op_begin();
inline const_op_iterator op_begin() const; inline op_iterator
op_end(); inline const_op_iterator op_end() const; protected
: template <int> inline Use &Op(); template <int
> inline const Use &Op() const; public: inline unsigned
getNumOperands() const
;
3624
3625 // Accessor Methods for IndirectBrInst instruction.
3626 Value *getAddress() { return getOperand(0); }
3627 const Value *getAddress() const { return getOperand(0); }
3628 void setAddress(Value *V) { setOperand(0, V); }
3629
3630 /// return the number of possible destinations in this
3631 /// indirectbr instruction.
3632 unsigned getNumDestinations() const { return getNumOperands()-1; }
3633
3634 /// Return the specified destination.
3635 BasicBlock *getDestination(unsigned i) { return getSuccessor(i); }
3636 const BasicBlock *getDestination(unsigned i) const { return getSuccessor(i); }
3637
3638 /// Add a destination.
3639 ///
3640 void addDestination(BasicBlock *Dest);
3641
3642 /// This method removes the specified successor from the
3643 /// indirectbr instruction.
3644 void removeDestination(unsigned i);
3645
3646 unsigned getNumSuccessors() const { return getNumOperands()-1; }
3647 BasicBlock *getSuccessor(unsigned i) const {
3648 return cast<BasicBlock>(getOperand(i+1));
3649 }
3650 void setSuccessor(unsigned i, BasicBlock *NewSucc) {
3651 setOperand(i + 1, NewSucc);
3652 }
3653
3654 iterator_range<succ_op_iterator> successors() {
3655 return make_range(succ_op_iterator(std::next(value_op_begin())),
3656 succ_op_iterator(value_op_end()));
3657 }
3658
3659 iterator_range<const_succ_op_iterator> successors() const {
3660 return make_range(const_succ_op_iterator(std::next(value_op_begin())),
3661 const_succ_op_iterator(value_op_end()));
3662 }
3663
3664 // Methods for support type inquiry through isa, cast, and dyn_cast:
3665 static bool classof(const Instruction *I) {
3666 return I->getOpcode() == Instruction::IndirectBr;
3667 }
3668 static bool classof(const Value *V) {
3669 return isa<Instruction>(V) && classof(cast<Instruction>(V));
3670 }
3671};
3672
3673template <>
3674struct OperandTraits<IndirectBrInst> : public HungoffOperandTraits<1> {
3675};
3676
3677DEFINE_TRANSPARENT_OPERAND_ACCESSORS(IndirectBrInst, Value)IndirectBrInst::op_iterator IndirectBrInst::op_begin() { return
OperandTraits<IndirectBrInst>::op_begin(this); } IndirectBrInst
::const_op_iterator IndirectBrInst::op_begin() const { return
OperandTraits<IndirectBrInst>::op_begin(const_cast<
IndirectBrInst*>(this)); } IndirectBrInst::op_iterator IndirectBrInst
::op_end() { return OperandTraits<IndirectBrInst>::op_end
(this); } IndirectBrInst::const_op_iterator IndirectBrInst::op_end
() const { return OperandTraits<IndirectBrInst>::op_end
(const_cast<IndirectBrInst*>(this)); } Value *IndirectBrInst
::getOperand(unsigned i_nocapture) const { ((i_nocapture <
OperandTraits<IndirectBrInst>::operands(this) &&
"getOperand() out of range!") ? static_cast<void> (0) :
__assert_fail ("i_nocapture < OperandTraits<IndirectBrInst>::operands(this) && \"getOperand() out of range!\""
, "/build/llvm-toolchain-snapshot-13~++20210413100635+64c24f493e5f/llvm/include/llvm/IR/Instructions.h"
, 3677, __PRETTY_FUNCTION__)); return cast_or_null<Value>
( OperandTraits<IndirectBrInst>::op_begin(const_cast<
IndirectBrInst*>(this))[i_nocapture].get()); } void IndirectBrInst
::setOperand(unsigned i_nocapture, Value *Val_nocapture) { ((
i_nocapture < OperandTraits<IndirectBrInst>::operands
(this) && "setOperand() out of range!") ? static_cast
<void> (0) : __assert_fail ("i_nocapture < OperandTraits<IndirectBrInst>::operands(this) && \"setOperand() out of range!\""
, "/build/llvm-toolchain-snapshot-13~++20210413100635+64c24f493e5f/llvm/include/llvm/IR/Instructions.h"
, 3677, __PRETTY_FUNCTION__)); OperandTraits<IndirectBrInst
>::op_begin(this)[i_nocapture] = Val_nocapture; } unsigned
IndirectBrInst::getNumOperands() const { return OperandTraits
<IndirectBrInst>::operands(this); } template <int Idx_nocapture
> Use &IndirectBrInst::Op() { return this->OpFrom<
Idx_nocapture>(this); } template <int Idx_nocapture>
const Use &IndirectBrInst::Op() const { return this->
OpFrom<Idx_nocapture>(this); }
3678
3679//===----------------------------------------------------------------------===//
3680// InvokeInst Class
3681//===----------------------------------------------------------------------===//
3682
3683/// Invoke instruction. The SubclassData field is used to hold the
3684/// calling convention of the call.
3685///
3686class InvokeInst : public CallBase {
3687 /// The number of operands for this call beyond the called function,
3688 /// arguments, and operand bundles.
3689 static constexpr int NumExtraOperands = 2;
3690
3691 /// The index from the end of the operand array to the normal destination.
3692 static constexpr int NormalDestOpEndIdx = -3;
3693
3694 /// The index from the end of the operand array to the unwind destination.
3695 static constexpr int UnwindDestOpEndIdx = -2;
3696
3697 InvokeInst(const InvokeInst &BI);
3698
3699 /// Construct an InvokeInst given a range of arguments.
3700 ///
3701 /// Construct an InvokeInst from a range of arguments
3702 inline InvokeInst(FunctionType *Ty, Value *Func, BasicBlock *IfNormal,
3703 BasicBlock *IfException, ArrayRef<Value *> Args,
3704 ArrayRef<OperandBundleDef> Bundles, int NumOperands,
3705 const Twine &NameStr, Instruction *InsertBefore);
3706
3707 inline InvokeInst(FunctionType *Ty, Value *Func, BasicBlock *IfNormal,
3708 BasicBlock *IfException, ArrayRef<Value *> Args,
3709 ArrayRef<OperandBundleDef> Bundles, int NumOperands,
3710 const Twine &NameStr, BasicBlock *InsertAtEnd);
3711
3712 void init(FunctionType *Ty, Value *Func, BasicBlock *IfNormal,
3713 BasicBlock *IfException, ArrayRef<Value *> Args,
3714 ArrayRef<OperandBundleDef> Bundles, const Twine &NameStr);
3715
3716 /// Compute the number of operands to allocate.
3717 static int ComputeNumOperands(int NumArgs, int NumBundleInputs = 0) {
3718 // We need one operand for the called function, plus our extra operands and
3719 // the input operand counts provided.
3720 return 1 + NumExtraOperands + NumArgs + NumBundleInputs;
3721 }
3722
3723protected:
3724 // Note: Instruction needs to be a friend here to call cloneImpl.
3725 friend class Instruction;
3726
3727 InvokeInst *cloneImpl() const;
3728
3729public:
3730 static InvokeInst *Create(FunctionType *Ty, Value *Func, BasicBlock *IfNormal,
3731 BasicBlock *IfException, ArrayRef<Value *> Args,
3732 const Twine &NameStr,
3733 Instruction *InsertBefore = nullptr) {
3734 int NumOperands = ComputeNumOperands(Args.size());
3735 return new (NumOperands)
3736 InvokeInst(Ty, Func, IfNormal, IfException, Args, None, NumOperands,
3737 NameStr, InsertBefore);
3738 }
3739
3740 static InvokeInst *Create(FunctionType *Ty, Value *Func, BasicBlock *IfNormal,
3741 BasicBlock *IfException, ArrayRef<Value *> Args,
3742 ArrayRef<OperandBundleDef> Bundles = None,
3743 const Twine &NameStr = "",
3744 Instruction *InsertBefore = nullptr) {
3745 int NumOperands =
3746 ComputeNumOperands(Args.size(), CountBundleInputs(Bundles));
3747 unsigned DescriptorBytes = Bundles.size() * sizeof(BundleOpInfo);
3748
3749 return new (NumOperands, DescriptorBytes)
3750 InvokeInst(Ty, Func, IfNormal, IfException, Args, Bundles, NumOperands,
3751 NameStr, InsertBefore);
3752 }
3753
3754 static InvokeInst *Create(FunctionType *Ty, Value *Func, BasicBlock *IfNormal,
3755 BasicBlock *IfException, ArrayRef<Value *> Args,
3756 const Twine &NameStr, BasicBlock *InsertAtEnd) {
3757 int NumOperands = ComputeNumOperands(Args.size());
3758 return new (NumOperands)
3759 InvokeInst(Ty, Func, IfNormal, IfException, Args, None, NumOperands,
3760 NameStr, InsertAtEnd);
3761 }
3762
3763 static InvokeInst *Create(FunctionType *Ty, Value *Func, BasicBlock *IfNormal,
3764 BasicBlock *IfException, ArrayRef<Value *> Args,
3765 ArrayRef<OperandBundleDef> Bundles,
3766 const Twine &NameStr, BasicBlock *InsertAtEnd) {
3767 int NumOperands =
3768 ComputeNumOperands(Args.size(), CountBundleInputs(Bundles));
3769 unsigned DescriptorBytes = Bundles.size() * sizeof(BundleOpInfo);
3770
3771 return new (NumOperands, DescriptorBytes)
3772 InvokeInst(Ty, Func, IfNormal, IfException, Args, Bundles, NumOperands,
3773 NameStr, InsertAtEnd);
3774 }
3775
3776 static InvokeInst *Create(FunctionCallee Func, BasicBlock *IfNormal,
3777 BasicBlock *IfException, ArrayRef<Value *> Args,
3778 const Twine &NameStr,
3779 Instruction *InsertBefore = nullptr) {
3780 return Create(Func.getFunctionType(), Func.getCallee(), IfNormal,
3781 IfException, Args, None, NameStr, InsertBefore);
3782 }
3783
3784 static InvokeInst *Create(FunctionCallee Func, BasicBlock *IfNormal,
3785 BasicBlock *IfException, ArrayRef<Value *> Args,
3786 ArrayRef<OperandBundleDef> Bundles = None,
3787 const Twine &NameStr = "",
3788 Instruction *InsertBefore = nullptr) {
3789 return Create(Func.getFunctionType(), Func.getCallee(), IfNormal,
3790 IfException, Args, Bundles, NameStr, InsertBefore);
3791 }
3792
3793 static InvokeInst *Create(FunctionCallee Func, BasicBlock *IfNormal,
3794 BasicBlock *IfException, ArrayRef<Value *> Args,
3795 const Twine &NameStr, BasicBlock *InsertAtEnd) {
3796 return Create(Func.getFunctionType(), Func.getCallee(), IfNormal,
3797 IfException, Args, NameStr, InsertAtEnd);
3798 }
3799
3800 static InvokeInst *Create(FunctionCallee Func, BasicBlock *IfNormal,
3801 BasicBlock *IfException, ArrayRef<Value *> Args,
3802 ArrayRef<OperandBundleDef> Bundles,
3803 const Twine &NameStr, BasicBlock *InsertAtEnd) {
3804 return Create(Func.getFunctionType(), Func.getCallee(), IfNormal,
3805 IfException, Args, Bundles, NameStr, InsertAtEnd);
3806 }
3807
3808 /// Create a clone of \p II with a different set of operand bundles and
3809 /// insert it before \p InsertPt.
3810 ///
3811 /// The returned invoke instruction is identical to \p II in every way except
3812 /// that the operand bundles for the new instruction are set to the operand
3813 /// bundles in \p Bundles.
3814 static InvokeInst *Create(InvokeInst *II, ArrayRef<OperandBundleDef> Bundles,
3815 Instruction *InsertPt = nullptr);
3816
3817 // get*Dest - Return the destination basic blocks...
3818 BasicBlock *getNormalDest() const {
3819 return cast<BasicBlock>(Op<NormalDestOpEndIdx>());
3820 }
3821 BasicBlock *getUnwindDest() const {
3822 return cast<BasicBlock>(Op<UnwindDestOpEndIdx>());
3823 }
3824 void setNormalDest(BasicBlock *B) {
3825 Op<NormalDestOpEndIdx>() = reinterpret_cast<Value *>(B);
3826 }
3827 void setUnwindDest(BasicBlock *B) {
3828 Op<UnwindDestOpEndIdx>() = reinterpret_cast<Value *>(B);
3829 }
3830
3831 /// Get the landingpad instruction from the landing pad
3832 /// block (the unwind destination).
3833 LandingPadInst *getLandingPadInst() const;
3834
3835 BasicBlock *getSuccessor(unsigned i) const {
3836 assert(i < 2 && "Successor # out of range for invoke!")((i < 2 && "Successor # out of range for invoke!")
? static_cast<void> (0) : __assert_fail ("i < 2 && \"Successor # out of range for invoke!\""
, "/build/llvm-toolchain-snapshot-13~++20210413100635+64c24f493e5f/llvm/include/llvm/IR/Instructions.h"
, 3836, __PRETTY_FUNCTION__))
;
3837 return i == 0 ? getNormalDest() : getUnwindDest();
3838 }
3839
3840 void setSuccessor(unsigned i, BasicBlock *NewSucc) {
3841 assert(i < 2 && "Successor # out of range for invoke!")((i < 2 && "Successor # out of range for invoke!")
? static_cast<void> (0) : __assert_fail ("i < 2 && \"Successor # out of range for invoke!\""
, "/build/llvm-toolchain-snapshot-13~++20210413100635+64c24f493e5f/llvm/include/llvm/IR/Instructions.h"
, 3841, __PRETTY_FUNCTION__))
;
3842 if (i == 0)
3843 setNormalDest(NewSucc);
3844 else
3845 setUnwindDest(NewSucc);
3846 }
3847
3848 unsigned getNumSuccessors() const { return 2; }
3849
3850 // Methods for support type inquiry through isa, cast, and dyn_cast:
3851 static bool classof(const Instruction *I) {
3852 return (I->getOpcode() == Instruction::Invoke);
3853 }
3854 static bool classof(const Value *V) {
3855 return isa<Instruction>(V) && classof(cast<Instruction>(V));
3856 }
3857
3858private:
3859 // Shadow Instruction::setInstructionSubclassData with a private forwarding
3860 // method so that subclasses cannot accidentally use it.
3861 template <typename Bitfield>
3862 void setSubclassData(typename Bitfield::Type Value) {
3863 Instruction::setSubclassData<Bitfield>(Value);
3864 }
3865};
3866
3867InvokeInst::InvokeInst(FunctionType *Ty, Value *Func, BasicBlock *IfNormal,
3868 BasicBlock *IfException, ArrayRef<Value *> Args,
3869 ArrayRef<OperandBundleDef> Bundles, int NumOperands,
3870 const Twine &NameStr, Instruction *InsertBefore)
3871 : CallBase(Ty->getReturnType(), Instruction::Invoke,
3872 OperandTraits<CallBase>::op_end(this) - NumOperands, NumOperands,
3873 InsertBefore) {
3874 init(Ty, Func, IfNormal, IfException, Args, Bundles, NameStr);
3875}
3876
3877InvokeInst::InvokeInst(FunctionType *Ty, Value *Func, BasicBlock *IfNormal,
3878 BasicBlock *IfException, ArrayRef<Value *> Args,
3879 ArrayRef<OperandBundleDef> Bundles, int NumOperands,
3880 const Twine &NameStr, BasicBlock *InsertAtEnd)
3881 : CallBase(Ty->getReturnType(), Instruction::Invoke,
3882 OperandTraits<CallBase>::op_end(this) - NumOperands, NumOperands,
3883 InsertAtEnd) {
3884 init(Ty, Func, IfNormal, IfException, Args, Bundles, NameStr);
3885}
3886
3887//===----------------------------------------------------------------------===//
3888// CallBrInst Class
3889//===----------------------------------------------------------------------===//
3890
3891/// CallBr instruction, tracking function calls that may not return control but
3892/// instead transfer it to a third location. The SubclassData field is used to
3893/// hold the calling convention of the call.
3894///
3895class CallBrInst : public CallBase {
3896
3897 unsigned NumIndirectDests;
3898
3899 CallBrInst(const CallBrInst &BI);
3900
3901 /// Construct a CallBrInst given a range of arguments.
3902 ///
3903 /// Construct a CallBrInst from a range of arguments
3904 inline CallBrInst(FunctionType *Ty, Value *Func, BasicBlock *DefaultDest,
3905 ArrayRef<BasicBlock *> IndirectDests,
3906 ArrayRef<Value *> Args,
3907 ArrayRef<OperandBundleDef> Bundles, int NumOperands,
3908 const Twine &NameStr, Instruction *InsertBefore);
3909
3910 inline CallBrInst(FunctionType *Ty, Value *Func, BasicBlock *DefaultDest,
3911 ArrayRef<BasicBlock *> IndirectDests,
3912 ArrayRef<Value *> Args,
3913 ArrayRef<OperandBundleDef> Bundles, int NumOperands,
3914 const Twine &NameStr, BasicBlock *InsertAtEnd);
3915
3916 void init(FunctionType *FTy, Value *Func, BasicBlock *DefaultDest,
3917 ArrayRef<BasicBlock *> IndirectDests, ArrayRef<Value *> Args,
3918 ArrayRef<OperandBundleDef> Bundles, const Twine &NameStr);
3919
3920 /// Should the Indirect Destinations change, scan + update the Arg list.
3921 void updateArgBlockAddresses(unsigned i, BasicBlock *B);
3922
3923 /// Compute the number of operands to allocate.
3924 static int ComputeNumOperands(int NumArgs, int NumIndirectDests,
3925 int NumBundleInputs = 0) {
3926 // We need one operand for the called function, plus our extra operands and
3927 // the input operand counts provided.
3928 return 2 + NumIndirectDests + NumArgs + NumBundleInputs;
3929 }
3930
3931protected:
3932 // Note: Instruction needs to be a friend here to call cloneImpl.
3933 friend class Instruction;
3934
3935 CallBrInst *cloneImpl() const;
3936
3937public:
3938 static CallBrInst *Create(FunctionType *Ty, Value *Func,
3939 BasicBlock *DefaultDest,
3940 ArrayRef<BasicBlock *> IndirectDests,
3941 ArrayRef<Value *> Args, const Twine &NameStr,
3942 Instruction *InsertBefore = nullptr) {
3943 int NumOperands = ComputeNumOperands(Args.size(), IndirectDests.size());
3944 return new (NumOperands)
3945 CallBrInst(Ty, Func, DefaultDest, IndirectDests, Args, None,
3946 NumOperands, NameStr, InsertBefore);
3947 }
3948
3949 static CallBrInst *Create(FunctionType *Ty, Value *Func,
3950 BasicBlock *DefaultDest,
3951 ArrayRef<BasicBlock *> IndirectDests,
3952 ArrayRef<Value *> Args,
3953 ArrayRef<OperandBundleDef> Bundles = None,
3954 const Twine &NameStr = "",
3955 Instruction *InsertBefore = nullptr) {
3956 int NumOperands = ComputeNumOperands(Args.size(), IndirectDests.size(),
3957 CountBundleInputs(Bundles));
3958 unsigned DescriptorBytes = Bundles.size() * sizeof(BundleOpInfo);
3959
3960 return new (NumOperands, DescriptorBytes)
3961 CallBrInst(Ty, Func, DefaultDest, IndirectDests, Args, Bundles,
3962 NumOperands, NameStr, InsertBefore);
3963 }
3964
3965 static CallBrInst *Create(FunctionType *Ty, Value *Func,
3966 BasicBlock *DefaultDest,
3967 ArrayRef<BasicBlock *> IndirectDests,
3968 ArrayRef<Value *> Args, const Twine &NameStr,
3969 BasicBlock *InsertAtEnd) {
3970 int NumOperands = ComputeNumOperands(Args.size(), IndirectDests.size());
3971 return new (NumOperands)
3972 CallBrInst(Ty, Func, DefaultDest, IndirectDests, Args, None,
3973 NumOperands, NameStr, InsertAtEnd);
3974 }
3975
3976 static CallBrInst *Create(FunctionType *Ty, Value *Func,
3977 BasicBlock *DefaultDest,
3978 ArrayRef<BasicBlock *> IndirectDests,
3979 ArrayRef<Value *> Args,
3980 ArrayRef<OperandBundleDef> Bundles,
3981 const Twine &NameStr, BasicBlock *InsertAtEnd) {
3982 int NumOperands = ComputeNumOperands(Args.size(), IndirectDests.size(),
3983 CountBundleInputs(Bundles));
3984 unsigned DescriptorBytes = Bundles.size() * sizeof(BundleOpInfo);
3985
3986 return new (NumOperands, DescriptorBytes)
3987 CallBrInst(Ty, Func, DefaultDest, IndirectDests, Args, Bundles,
3988 NumOperands, NameStr, InsertAtEnd);
3989 }
3990
3991 static CallBrInst *Create(FunctionCallee Func, BasicBlock *DefaultDest,
3992 ArrayRef<BasicBlock *> IndirectDests,
3993 ArrayRef<Value *> Args, const Twine &NameStr,
3994 Instruction *InsertBefore = nullptr) {
3995 return Create(Func.getFunctionType(), Func.getCallee(), DefaultDest,
3996 IndirectDests, Args, NameStr, InsertBefore);
3997 }
3998
3999 static CallBrInst *Create(FunctionCallee Func, BasicBlock *DefaultDest,
4000 ArrayRef<BasicBlock *> IndirectDests,
4001 ArrayRef<Value *> Args,
4002 ArrayRef<OperandBundleDef> Bundles = None,
4003 const Twine &NameStr = "",
4004 Instruction *InsertBefore = nullptr) {
4005 return Create(Func.getFunctionType(), Func.getCallee(), DefaultDest,
4006 IndirectDests, Args, Bundles, NameStr, InsertBefore);
4007 }
4008
4009 static CallBrInst *Create(FunctionCallee Func, BasicBlock *DefaultDest,
4010 ArrayRef<BasicBlock *> IndirectDests,
4011 ArrayRef<Value *> Args, const Twine &NameStr,
4012 BasicBlock *InsertAtEnd) {
4013 return Create(Func.getFunctionType(), Func.getCallee(), DefaultDest,
4014 IndirectDests, Args, NameStr, InsertAtEnd);
4015 }
4016
4017 static CallBrInst *Create(FunctionCallee Func,
4018 BasicBlock *DefaultDest,
4019 ArrayRef<BasicBlock *> IndirectDests,
4020 ArrayRef<Value *> Args,
4021 ArrayRef<OperandBundleDef> Bundles,
4022 const Twine &NameStr, BasicBlock *InsertAtEnd) {
4023 return Create(Func.getFunctionType(), Func.getCallee(), DefaultDest,
4024 IndirectDests, Args, Bundles, NameStr, InsertAtEnd);
4025 }
4026
4027 /// Create a clone of \p CBI with a different set of operand bundles and
4028 /// insert it before \p InsertPt.
4029 ///
4030 /// The returned callbr instruction is identical to \p CBI in every way
4031 /// except that the operand bundles for the new instruction are set to the
4032 /// operand bundles in \p Bundles.
4033 static CallBrInst *Create(CallBrInst *CBI,
4034 ArrayRef<OperandBundleDef> Bundles,
4035 Instruction *InsertPt = nullptr);
4036
4037 /// Return the number of callbr indirect dest labels.
4038 ///
4039 unsigned getNumIndirectDests() const { return NumIndirectDests; }
4040
4041 /// getIndirectDestLabel - Return the i-th indirect dest label.
4042 ///
4043 Value *getIndirectDestLabel(unsigned i) const {
4044 assert(i < getNumIndirectDests() && "Out of bounds!")((i < getNumIndirectDests() && "Out of bounds!") ?
static_cast<void> (0) : __assert_fail ("i < getNumIndirectDests() && \"Out of bounds!\""
, "/build/llvm-toolchain-snapshot-13~++20210413100635+64c24f493e5f/llvm/include/llvm/IR/Instructions.h"
, 4044, __PRETTY_FUNCTION__))
;
4045 return getOperand(i + getNumArgOperands() + getNumTotalBundleOperands() +
4046 1);
4047 }
4048
4049 Value *getIndirectDestLabelUse(unsigned i) const {
4050 assert(i < getNumIndirectDests() && "Out of bounds!")((i < getNumIndirectDests() && "Out of bounds!") ?
static_cast<void> (0) : __assert_fail ("i < getNumIndirectDests() && \"Out of bounds!\""
, "/build/llvm-toolchain-snapshot-13~++20210413100635+64c24f493e5f/llvm/include/llvm/IR/Instructions.h"
, 4050, __PRETTY_FUNCTION__))
;
4051 return getOperandUse(i + getNumArgOperands() + getNumTotalBundleOperands() +
4052 1);
4053 }
4054
4055 // Return the destination basic blocks...
4056 BasicBlock *getDefaultDest() const {
4057 return cast<BasicBlock>(*(&Op<-1>() - getNumIndirectDests() - 1));
4058 }
4059 BasicBlock *getIndirectDest(unsigned i) const {
4060 return cast_or_null<BasicBlock>(*(&Op<-1>() - getNumIndirectDests() + i));
4061 }
4062 SmallVector<BasicBlock *, 16> getIndirectDests() const {
4063 SmallVector<BasicBlock *, 16> IndirectDests;
4064 for (unsigned i = 0, e = getNumIndirectDests(); i < e; ++i)
4065 IndirectDests.push_back(getIndirectDest(i));
4066 return IndirectDests;
4067 }
4068 void setDefaultDest(BasicBlock *B) {
4069 *(&Op<-1>() - getNumIndirectDests() - 1) = reinterpret_cast<Value *>(B);
4070 }
4071 void setIndirectDest(unsigned i, BasicBlock *B) {
4072 updateArgBlockAddresses(i, B);
4073 *(&Op<-1>() - getNumIndirectDests() + i) = reinterpret_cast<Value *>(B);
4074 }
4075
4076 BasicBlock *getSuccessor(unsigned i) const {
4077 assert(i < getNumSuccessors() + 1 &&((i < getNumSuccessors() + 1 && "Successor # out of range for callbr!"
) ? static_cast<void> (0) : __assert_fail ("i < getNumSuccessors() + 1 && \"Successor # out of range for callbr!\""
, "/build/llvm-toolchain-snapshot-13~++20210413100635+64c24f493e5f/llvm/include/llvm/IR/Instructions.h"
, 4078, __PRETTY_FUNCTION__))
4078 "Successor # out of range for callbr!")((i < getNumSuccessors() + 1 && "Successor # out of range for callbr!"
) ? static_cast<void> (0) : __assert_fail ("i < getNumSuccessors() + 1 && \"Successor # out of range for callbr!\""
, "/build/llvm-toolchain-snapshot-13~++20210413100635+64c24f493e5f/llvm/include/llvm/IR/Instructions.h"
, 4078, __PRETTY_FUNCTION__))
;
4079 return i == 0 ? getDefaultDest() : getIndirectDest(i - 1);
4080 }
4081
4082 void setSuccessor(unsigned i, BasicBlock *NewSucc) {
4083 assert(i < getNumIndirectDests() + 1 &&((i < getNumIndirectDests() + 1 && "Successor # out of range for callbr!"
) ? static_cast<void> (0) : __assert_fail ("i < getNumIndirectDests() + 1 && \"Successor # out of range for callbr!\""
, "/build/llvm-toolchain-snapshot-13~++20210413100635+64c24f493e5f/llvm/include/llvm/IR/Instructions.h"
, 4084, __PRETTY_FUNCTION__))
4084 "Successor # out of range for callbr!")((i < getNumIndirectDests() + 1 && "Successor # out of range for callbr!"
) ? static_cast<void> (0) : __assert_fail ("i < getNumIndirectDests() + 1 && \"Successor # out of range for callbr!\""
, "/build/llvm-toolchain-snapshot-13~++20210413100635+64c24f493e5f/llvm/include/llvm/IR/Instructions.h"
, 4084, __PRETTY_FUNCTION__))
;
4085 return i == 0 ? setDefaultDest(NewSucc) : setIndirectDest(i - 1, NewSucc);
4086 }
4087
4088 unsigned getNumSuccessors() const { return getNumIndirectDests() + 1; }
4089
4090 // Methods for support type inquiry through isa, cast, and dyn_cast:
4091 static bool classof(const Instruction *I) {
4092 return (I->getOpcode() == Instruction::CallBr);
4093 }
4094 static bool classof(const Value *V) {
4095 return isa<Instruction>(V) && classof(cast<Instruction>(V));
4096 }
4097
4098private:
4099 // Shadow Instruction::setInstructionSubclassData with a private forwarding
4100 // method so that subclasses cannot accidentally use it.
4101 template <typename Bitfield>
4102 void setSubclassData(typename Bitfield::Type Value) {
4103 Instruction::setSubclassData<Bitfield>(Value);
4104 }
4105};
4106
4107CallBrInst::CallBrInst(FunctionType *Ty, Value *Func, BasicBlock *DefaultDest,
4108 ArrayRef<BasicBlock *> IndirectDests,
4109 ArrayRef<Value *> Args,
4110 ArrayRef<OperandBundleDef> Bundles, int NumOperands,
4111 const Twine &NameStr, Instruction *InsertBefore)
4112 : CallBase(Ty->getReturnType(), Instruction::CallBr,
4113 OperandTraits<CallBase>::op_end(this) - NumOperands, NumOperands,
4114 InsertBefore) {
4115 init(Ty, Func, DefaultDest, IndirectDests, Args, Bundles, NameStr);
4116}
4117
4118CallBrInst::CallBrInst(FunctionType *Ty, Value *Func, BasicBlock *DefaultDest,
4119 ArrayRef<BasicBlock *> IndirectDests,
4120 ArrayRef<Value *> Args,
4121 ArrayRef<OperandBundleDef> Bundles, int NumOperands,
4122 const Twine &NameStr, BasicBlock *InsertAtEnd)
4123 : CallBase(Ty->getReturnType(), Instruction::CallBr,
4124 OperandTraits<CallBase>::op_end(this) - NumOperands, NumOperands,
4125 InsertAtEnd) {
4126 init(Ty, Func, DefaultDest, IndirectDests, Args, Bundles, NameStr);
4127}
4128
4129//===----------------------------------------------------------------------===//
4130// ResumeInst Class
4131//===----------------------------------------------------------------------===//
4132
4133//===---------------------------------------------------------------------------
4134/// Resume the propagation of an exception.
4135///
4136class ResumeInst : public Instruction {
4137 ResumeInst(const ResumeInst &RI);
4138
4139 explicit ResumeInst(Value *Exn, Instruction *InsertBefore=nullptr);
4140 ResumeInst(Value *Exn, BasicBlock *InsertAtEnd);
4141
4142protected:
4143 // Note: Instruction needs to be a friend here to call cloneImpl.
4144 friend class Instruction;
4145
4146 ResumeInst *cloneImpl() const;
4147
4148public:
4149 static ResumeInst *Create(Value *Exn, Instruction *InsertBefore = nullptr) {
4150 return new(1) ResumeInst(Exn, InsertBefore);
4151 }
4152
4153 static ResumeInst *Create(Value *Exn, BasicBlock *InsertAtEnd) {
4154 return new(1) ResumeInst(Exn, InsertAtEnd);
4155 }
4156
4157 /// Provide fast operand accessors
4158 DECLARE_TRANSPARENT_OPERAND_ACCESSORS(Value)public: inline Value *getOperand(unsigned) const; inline void
setOperand(unsigned, Value*); inline op_iterator op_begin();
inline const_op_iterator op_begin() const; inline op_iterator
op_end(); inline const_op_iterator op_end() const; protected
: template <int> inline Use &Op(); template <int
> inline const Use &Op() const; public: inline unsigned
getNumOperands() const
;
4159
4160 /// Convenience accessor.
4161 Value *getValue() const { return Op<0>(); }
4162
4163 unsigned getNumSuccessors() const { return 0; }
4164
4165 // Methods for support type inquiry through isa, cast, and dyn_cast:
4166 static bool classof(const Instruction *I) {
4167 return I->getOpcode() == Instruction::Resume;
4168 }
4169 static bool classof(const Value *V) {
4170 return isa<Instruction>(V) && classof(cast<Instruction>(V));
4171 }
4172
4173private:
4174 BasicBlock *getSuccessor(unsigned idx) const {
4175 llvm_unreachable("ResumeInst has no successors!")::llvm::llvm_unreachable_internal("ResumeInst has no successors!"
, "/build/llvm-toolchain-snapshot-13~++20210413100635+64c24f493e5f/llvm/include/llvm/IR/Instructions.h"
, 4175)
;
4176 }
4177
4178 void setSuccessor(unsigned idx, BasicBlock *NewSucc) {
4179 llvm_unreachable("ResumeInst has no successors!")::llvm::llvm_unreachable_internal("ResumeInst has no successors!"
, "/build/llvm-toolchain-snapshot-13~++20210413100635+64c24f493e5f/llvm/include/llvm/IR/Instructions.h"
, 4179)
;
4180 }
4181};
4182
4183template <>
4184struct OperandTraits<ResumeInst> :
4185 public FixedNumOperandTraits<ResumeInst, 1> {
4186};
4187
4188DEFINE_TRANSPARENT_OPERAND_ACCESSORS(ResumeInst, Value)ResumeInst::op_iterator ResumeInst::op_begin() { return OperandTraits
<ResumeInst>::op_begin(this); } ResumeInst::const_op_iterator
ResumeInst::op_begin() const { return OperandTraits<ResumeInst
>::op_begin(const_cast<ResumeInst*>(this)); } ResumeInst
::op_iterator ResumeInst::op_end() { return OperandTraits<
ResumeInst>::op_end(this); } ResumeInst::const_op_iterator
ResumeInst::op_end() const { return OperandTraits<ResumeInst
>::op_end(const_cast<ResumeInst*>(this)); } Value *ResumeInst
::getOperand(unsigned i_nocapture) const { ((i_nocapture <
OperandTraits<ResumeInst>::operands(this) && "getOperand() out of range!"
) ? static_cast<void> (0) : __assert_fail ("i_nocapture < OperandTraits<ResumeInst>::operands(this) && \"getOperand() out of range!\""
, "/build/llvm-toolchain-snapshot-13~++20210413100635+64c24f493e5f/llvm/include/llvm/IR/Instructions.h"
, 4188, __PRETTY_FUNCTION__)); return cast_or_null<Value>
( OperandTraits<ResumeInst>::op_begin(const_cast<ResumeInst
*>(this))[i_nocapture].get()); } void ResumeInst::setOperand
(unsigned i_nocapture, Value *Val_nocapture) { ((i_nocapture <
OperandTraits<ResumeInst>::operands(this) && "setOperand() out of range!"
) ? static_cast<void> (0) : __assert_fail ("i_nocapture < OperandTraits<ResumeInst>::operands(this) && \"setOperand() out of range!\""
, "/build/llvm-toolchain-snapshot-13~++20210413100635+64c24f493e5f/llvm/include/llvm/IR/Instructions.h"
, 4188, __PRETTY_FUNCTION__)); OperandTraits<ResumeInst>
::op_begin(this)[i_nocapture] = Val_nocapture; } unsigned ResumeInst
::getNumOperands() const { return OperandTraits<ResumeInst
>::operands(this); } template <int Idx_nocapture> Use
&ResumeInst::Op() { return this->OpFrom<Idx_nocapture
>(this); } template <int Idx_nocapture> const Use &
ResumeInst::Op() const { return this->OpFrom<Idx_nocapture
>(this); }
4189
4190//===----------------------------------------------------------------------===//
4191// CatchSwitchInst Class
4192//===----------------------------------------------------------------------===//
4193class CatchSwitchInst : public Instruction {
4194 using UnwindDestField = BoolBitfieldElementT<0>;
4195
4196 /// The number of operands actually allocated. NumOperands is
4197 /// the number actually in use.
4198 unsigned ReservedSpace;
4199
4200 // Operand[0] = Outer scope
4201 // Operand[1] = Unwind block destination
4202 // Operand[n] = BasicBlock to go to on match
4203 CatchSwitchInst(const CatchSwitchInst &CSI);
4204
4205 /// Create a new switch instruction, specifying a
4206 /// default destination. The number of additional handlers can be specified
4207 /// here to make memory allocation more efficient.
4208 /// This constructor can also autoinsert before another instruction.
4209 CatchSwitchInst(Value *ParentPad, BasicBlock *UnwindDest,
4210 unsigned NumHandlers, const Twine &NameStr,
4211 Instruction *InsertBefore);
4212
4213 /// Create a new switch instruction, specifying a
4214 /// default destination. The number of additional handlers can be specified
4215 /// here to make memory allocation more efficient.
4216 /// This constructor also autoinserts at the end of the specified BasicBlock.
4217 CatchSwitchInst(Value *ParentPad, BasicBlock *UnwindDest,
4218 unsigned NumHandlers, const Twine &NameStr,
4219 BasicBlock *InsertAtEnd);
4220
4221 // allocate space for exactly zero operands
4222 void *operator new(size_t s) { return User::operator new(s); }
4223
4224 void init(Value *ParentPad, BasicBlock *UnwindDest, unsigned NumReserved);
4225 void growOperands(unsigned Size);
4226
4227protected:
4228 // Note: Instruction needs to be a friend here to call cloneImpl.
4229 friend class Instruction;
4230
4231 CatchSwitchInst *cloneImpl() const;
4232
4233public:
4234 static CatchSwitchInst *Create(Value *ParentPad, BasicBlock *UnwindDest,
4235 unsigned NumHandlers,
4236 const Twine &NameStr = "",
4237 Instruction *InsertBefore = nullptr) {
4238 return new CatchSwitchInst(ParentPad, UnwindDest, NumHandlers, NameStr,
4239 InsertBefore);
4240 }
4241
4242 static CatchSwitchInst *Create(Value *ParentPad, BasicBlock *UnwindDest,
4243 unsigned NumHandlers, const Twine &NameStr,
4244 BasicBlock *InsertAtEnd) {
4245 return new CatchSwitchInst(ParentPad, UnwindDest, NumHandlers, NameStr,
4246 InsertAtEnd);
4247 }
4248
4249 /// Provide fast operand accessors
4250 DECLARE_TRANSPARENT_OPERAND_ACCESSORS(Value)public: inline Value *getOperand(unsigned) const; inline void
setOperand(unsigned, Value*); inline op_iterator op_begin();
inline const_op_iterator op_begin() const; inline op_iterator
op_end(); inline const_op_iterator op_end() const; protected
: template <int> inline Use &Op(); template <int
> inline const Use &Op() const; public: inline unsigned
getNumOperands() const
;
4251
4252 // Accessor Methods for CatchSwitch stmt
4253 Value *getParentPad() const { return getOperand(0); }
4254 void setParentPad(Value *ParentPad) { setOperand(0, ParentPad); }
4255
4256 // Accessor Methods for CatchSwitch stmt
4257 bool hasUnwindDest() const { return getSubclassData<UnwindDestField>(); }
4258 bool unwindsToCaller() const { return !hasUnwindDest(); }
4259 BasicBlock *getUnwindDest() const {
4260 if (hasUnwindDest())
4261 return cast<BasicBlock>(getOperand(1));
4262 return nullptr;
4263 }
4264 void setUnwindDest(BasicBlock *UnwindDest) {
4265 assert(UnwindDest)((UnwindDest) ? static_cast<void> (0) : __assert_fail (
"UnwindDest", "/build/llvm-toolchain-snapshot-13~++20210413100635+64c24f493e5f/llvm/include/llvm/IR/Instructions.h"
, 4265, __PRETTY_FUNCTION__))
;
4266 assert(hasUnwindDest())((hasUnwindDest()) ? static_cast<void> (0) : __assert_fail
("hasUnwindDest()", "/build/llvm-toolchain-snapshot-13~++20210413100635+64c24f493e5f/llvm/include/llvm/IR/Instructions.h"
, 4266, __PRETTY_FUNCTION__))
;
4267 setOperand(1, UnwindDest);
4268 }
4269
4270 /// return the number of 'handlers' in this catchswitch
4271 /// instruction, except the default handler
4272 unsigned getNumHandlers() const {
4273 if (hasUnwindDest())
4274 return getNumOperands() - 2;
4275 return getNumOperands() - 1;
4276 }
4277
4278private:
4279 static BasicBlock *handler_helper(Value *V) { return cast<BasicBlock>(V); }
4280 static const BasicBlock *handler_helper(const Value *V) {
4281 return cast<BasicBlock>(V);
4282 }
4283
4284public:
4285 using DerefFnTy = BasicBlock *(*)(Value *);
4286 using handler_iterator = mapped_iterator<op_iterator, DerefFnTy>;
4287 using handler_range = iterator_range<handler_iterator>;
4288 using ConstDerefFnTy = const BasicBlock *(*)(const Value *);
4289 using const_handler_iterator =
4290 mapped_iterator<const_op_iterator, ConstDerefFnTy>;
4291 using const_handler_range = iterator_range<const_handler_iterator>;
4292
4293 /// Returns an iterator that points to the first handler in CatchSwitchInst.
4294 handler_iterator handler_begin() {
4295 op_iterator It = op_begin() + 1;
4296 if (hasUnwindDest())
4297 ++It;
4298 return handler_iterator(It, DerefFnTy(handler_helper));
4299 }
4300
4301 /// Returns an iterator that points to the first handler in the
4302 /// CatchSwitchInst.
4303 const_handler_iterator handler_begin() const {
4304 const_op_iterator It = op_begin() + 1;
4305 if (hasUnwindDest())
4306 ++It;
4307 return const_handler_iterator(It, ConstDerefFnTy(handler_helper));
4308 }
4309
4310 /// Returns a read-only iterator that points one past the last
4311 /// handler in the CatchSwitchInst.
4312 handler_iterator handler_end() {
4313 return handler_iterator(op_end(), DerefFnTy(handler_helper));
4314 }
4315
4316 /// Returns an iterator that points one past the last handler in the
4317 /// CatchSwitchInst.
4318 const_handler_iterator handler_end() const {
4319 return const_handler_iterator(op_end(), ConstDerefFnTy(handler_helper));
4320 }
4321
4322 /// iteration adapter for range-for loops.
4323 handler_range handlers() {
4324 return make_range(handler_begin(), handler_end());
4325 }
4326
4327 /// iteration adapter for range-for loops.
4328 const_handler_range handlers() const {
4329 return make_range(handler_begin(), handler_end());
4330 }
4331
4332 /// Add an entry to the switch instruction...
4333 /// Note:
4334 /// This action invalidates handler_end(). Old handler_end() iterator will
4335 /// point to the added handler.
4336 void addHandler(BasicBlock *Dest);
4337
4338 void removeHandler(handler_iterator HI);
4339
4340 unsigned getNumSuccessors() const { return getNumOperands() - 1; }
4341 BasicBlock *getSuccessor(unsigned Idx) const {
4342 assert(Idx < getNumSuccessors() &&((Idx < getNumSuccessors() && "Successor # out of range for catchswitch!"
) ? static_cast<void> (0) : __assert_fail ("Idx < getNumSuccessors() && \"Successor # out of range for catchswitch!\""
, "/build/llvm-toolchain-snapshot-13~++20210413100635+64c24f493e5f/llvm/include/llvm/IR/Instructions.h"
, 4343, __PRETTY_FUNCTION__))
4343 "Successor # out of range for catchswitch!")((Idx < getNumSuccessors() && "Successor # out of range for catchswitch!"
) ? static_cast<void> (0) : __assert_fail ("Idx < getNumSuccessors() && \"Successor # out of range for catchswitch!\""
, "/build/llvm-toolchain-snapshot-13~++20210413100635+64c24f493e5f/llvm/include/llvm/IR/Instructions.h"
, 4343, __PRETTY_FUNCTION__))
;
4344 return cast<BasicBlock>(getOperand(Idx + 1));
4345 }
4346 void setSuccessor(unsigned Idx, BasicBlock *NewSucc) {
4347 assert(Idx < getNumSuccessors() &&((Idx < getNumSuccessors() && "Successor # out of range for catchswitch!"
) ? static_cast<void> (0) : __assert_fail ("Idx < getNumSuccessors() && \"Successor # out of range for catchswitch!\""
, "/build/llvm-toolchain-snapshot-13~++20210413100635+64c24f493e5f/llvm/include/llvm/IR/Instructions.h"
, 4348, __PRETTY_FUNCTION__))
4348 "Successor # out of range for catchswitch!")((Idx < getNumSuccessors() && "Successor # out of range for catchswitch!"
) ? static_cast<void> (0) : __assert_fail ("Idx < getNumSuccessors() && \"Successor # out of range for catchswitch!\""
, "/build/llvm-toolchain-snapshot-13~++20210413100635+64c24f493e5f/llvm/include/llvm/IR/Instructions.h"
, 4348, __PRETTY_FUNCTION__))
;
4349 setOperand(Idx + 1, NewSucc);
4350 }
4351
4352 // Methods for support type inquiry through isa, cast, and dyn_cast:
4353 static bool classof(const Instruction *I) {
4354 return I->getOpcode() == Instruction::CatchSwitch;
4355 }
4356 static bool classof(const Value *V) {
4357 return isa<Instruction>(V) && classof(cast<Instruction>(V));
4358 }
4359};
4360
4361template <>
4362struct OperandTraits<CatchSwitchInst> : public HungoffOperandTraits<2> {};
4363
4364DEFINE_TRANSPARENT_OPERAND_ACCESSORS(CatchSwitchInst, Value)CatchSwitchInst::op_iterator CatchSwitchInst::op_begin() { return
OperandTraits<CatchSwitchInst>::op_begin(this); } CatchSwitchInst
::const_op_iterator CatchSwitchInst::op_begin() const { return
OperandTraits<CatchSwitchInst>::op_begin(const_cast<
CatchSwitchInst*>(this)); } CatchSwitchInst::op_iterator CatchSwitchInst
::op_end() { return OperandTraits<CatchSwitchInst>::op_end
(this); } CatchSwitchInst::const_op_iterator CatchSwitchInst::
op_end() const { return OperandTraits<CatchSwitchInst>::
op_end(const_cast<CatchSwitchInst*>(this)); } Value *CatchSwitchInst
::getOperand(unsigned i_nocapture) const { ((i_nocapture <
OperandTraits<CatchSwitchInst>::operands(this) &&
"getOperand() out of range!") ? static_cast<void> (0) :
__assert_fail ("i_nocapture < OperandTraits<CatchSwitchInst>::operands(this) && \"getOperand() out of range!\""
, "/build/llvm-toolchain-snapshot-13~++20210413100635+64c24f493e5f/llvm/include/llvm/IR/Instructions.h"
, 4364, __PRETTY_FUNCTION__)); return cast_or_null<Value>
( OperandTraits<CatchSwitchInst>::op_begin(const_cast<
CatchSwitchInst*>(this))[i_nocapture].get()); } void CatchSwitchInst
::setOperand(unsigned i_nocapture, Value *Val_nocapture) { ((
i_nocapture < OperandTraits<CatchSwitchInst>::operands
(this) && "setOperand() out of range!") ? static_cast
<void> (0) : __assert_fail ("i_nocapture < OperandTraits<CatchSwitchInst>::operands(this) && \"setOperand() out of range!\""
, "/build/llvm-toolchain-snapshot-13~++20210413100635+64c24f493e5f/llvm/include/llvm/IR/Instructions.h"
, 4364, __PRETTY_FUNCTION__)); OperandTraits<CatchSwitchInst
>::op_begin(this)[i_nocapture] = Val_nocapture; } unsigned
CatchSwitchInst::getNumOperands() const { return OperandTraits
<CatchSwitchInst>::operands(this); } template <int Idx_nocapture
> Use &CatchSwitchInst::Op() { return this->OpFrom<
Idx_nocapture>(this); } template <int Idx_nocapture>
const Use &CatchSwitchInst::Op() const { return this->
OpFrom<Idx_nocapture>(this); }
4365
4366//===----------------------------------------------------------------------===//
4367// CleanupPadInst Class
4368//===----------------------------------------------------------------------===//
4369class CleanupPadInst : public FuncletPadInst {
4370private:
4371 explicit CleanupPadInst(Value *ParentPad, ArrayRef<Value *> Args,
4372 unsigned Values, const Twine &NameStr,
4373 Instruction *InsertBefore)
4374 : FuncletPadInst(Instruction::CleanupPad, ParentPad, Args, Values,
4375 NameStr, InsertBefore) {}
4376 explicit CleanupPadInst(Value *ParentPad, ArrayRef<Value *> Args,
4377 unsigned Values, const Twine &NameStr,
4378 BasicBlock *InsertAtEnd)
4379 : FuncletPadInst(Instruction::CleanupPad, ParentPad, Args, Values,
4380 NameStr, InsertAtEnd) {}
4381
4382public:
4383 static CleanupPadInst *Create(Value *ParentPad, ArrayRef<Value *> Args = None,
4384 const Twine &NameStr = "",
4385 Instruction *InsertBefore = nullptr) {
4386 unsigned Values = 1 + Args.size();
4387 return new (Values)
4388 CleanupPadInst(ParentPad, Args, Values, NameStr, InsertBefore);
4389 }
4390
4391 static CleanupPadInst *Create(Value *ParentPad, ArrayRef<Value *> Args,
4392 const Twine &NameStr, BasicBlock *InsertAtEnd) {
4393 unsigned Values = 1 + Args.size();
4394 return new (Values)
4395 CleanupPadInst(ParentPad, Args, Values, NameStr, InsertAtEnd);
4396 }
4397
4398 /// Methods for support type inquiry through isa, cast, and dyn_cast:
4399 static bool classof(const Instruction *I) {
4400 return I->getOpcode() == Instruction::CleanupPad;
4401 }
4402 static bool classof(const Value *V) {
4403 return isa<Instruction>(V) && classof(cast<Instruction>(V));
4404 }
4405};
4406
4407//===----------------------------------------------------------------------===//
4408// CatchPadInst Class
4409//===----------------------------------------------------------------------===//
4410class CatchPadInst : public FuncletPadInst {
4411private:
4412 explicit CatchPadInst(Value *CatchSwitch, ArrayRef<Value *> Args,
4413 unsigned Values, const Twine &NameStr,
4414 Instruction *InsertBefore)
4415 : FuncletPadInst(Instruction::CatchPad, CatchSwitch, Args, Values,
4416 NameStr, InsertBefore) {}
4417 explicit CatchPadInst(Value *CatchSwitch, ArrayRef<Value *> Args,
4418 unsigned Values, const Twine &NameStr,
4419 BasicBlock *InsertAtEnd)
4420 : FuncletPadInst(Instruction::CatchPad, CatchSwitch, Args, Values,
4421 NameStr, InsertAtEnd) {}
4422
4423public:
4424 static CatchPadInst *Create(Value *CatchSwitch, ArrayRef<Value *> Args,
4425 const Twine &NameStr = "",
4426 Instruction *InsertBefore = nullptr) {
4427 unsigned Values = 1 + Args.size();
4428 return new (Values)
4429 CatchPadInst(CatchSwitch, Args, Values, NameStr, InsertBefore);
4430 }
4431
4432 static CatchPadInst *Create(Value *CatchSwitch, ArrayRef<Value *> Args,
4433 const Twine &NameStr, BasicBlock *InsertAtEnd) {
4434 unsigned Values = 1 + Args.size();
4435 return new (Values)
4436 CatchPadInst(CatchSwitch, Args, Values, NameStr, InsertAtEnd);
4437 }
4438
4439 /// Convenience accessors
4440 CatchSwitchInst *getCatchSwitch() const {
4441 return cast<CatchSwitchInst>(Op<-1>());
4442 }
4443 void setCatchSwitch(Value *CatchSwitch) {
4444 assert(CatchSwitch)((CatchSwitch) ? static_cast<void> (0) : __assert_fail (
"CatchSwitch", "/build/llvm-toolchain-snapshot-13~++20210413100635+64c24f493e5f/llvm/include/llvm/IR/Instructions.h"
, 4444, __PRETTY_FUNCTION__))
;
4445 Op<-1>() = CatchSwitch;
4446 }
4447
4448 /// Methods for support type inquiry through isa, cast, and dyn_cast:
4449 static bool classof(const Instruction *I) {
4450 return I->getOpcode() == Instruction::CatchPad;
4451 }
4452 static bool classof(const Value *V) {
4453 return isa<Instruction>(V) && classof(cast<Instruction>(V));
4454 }
4455};
4456
4457//===----------------------------------------------------------------------===//
4458// CatchReturnInst Class
4459//===----------------------------------------------------------------------===//
4460
4461class CatchReturnInst : public Instruction {
4462 CatchReturnInst(const CatchReturnInst &RI);
4463 CatchReturnInst(Value *CatchPad, BasicBlock *BB, Instruction *InsertBefore);
4464 CatchReturnInst(Value *CatchPad, BasicBlock *BB, BasicBlock *InsertAtEnd);
4465
4466 void init(Value *CatchPad, BasicBlock *BB);
4467
4468protected:
4469 // Note: Instruction needs to be a friend here to call cloneImpl.
4470 friend class Instruction;
4471
4472 CatchReturnInst *cloneImpl() const;
4473
4474public:
4475 static CatchReturnInst *Create(Value *CatchPad, BasicBlock *BB,
4476 Instruction *InsertBefore = nullptr) {
4477 assert(CatchPad)((CatchPad) ? static_cast<void> (0) : __assert_fail ("CatchPad"
, "/build/llvm-toolchain-snapshot-13~++20210413100635+64c24f493e5f/llvm/include/llvm/IR/Instructions.h"
, 4477, __PRETTY_FUNCTION__))
;
4478 assert(BB)((BB) ? static_cast<void> (0) : __assert_fail ("BB", "/build/llvm-toolchain-snapshot-13~++20210413100635+64c24f493e5f/llvm/include/llvm/IR/Instructions.h"
, 4478, __PRETTY_FUNCTION__))
;
4479 return new (2) CatchReturnInst(CatchPad, BB, InsertBefore);
4480 }
4481
4482 static CatchReturnInst *Create(Value *CatchPad, BasicBlock *BB,
4483 BasicBlock *InsertAtEnd) {
4484 assert(CatchPad)((CatchPad) ? static_cast<void> (0) : __assert_fail ("CatchPad"
, "/build/llvm-toolchain-snapshot-13~++20210413100635+64c24f493e5f/llvm/include/llvm/IR/Instructions.h"
, 4484, __PRETTY_FUNCTION__))
;
4485 assert(BB)((BB) ? static_cast<void> (0) : __assert_fail ("BB", "/build/llvm-toolchain-snapshot-13~++20210413100635+64c24f493e5f/llvm/include/llvm/IR/Instructions.h"
, 4485, __PRETTY_FUNCTION__))
;
4486 return new (2) CatchReturnInst(CatchPad, BB, InsertAtEnd);
4487 }
4488
4489 /// Provide fast operand accessors
4490 DECLARE_TRANSPARENT_OPERAND_ACCESSORS(Value)public: inline Value *getOperand(unsigned) const; inline void
setOperand(unsigned, Value*); inline op_iterator op_begin();
inline const_op_iterator op_begin() const; inline op_iterator
op_end(); inline const_op_iterator op_end() const; protected
: template <int> inline Use &Op(); template <int
> inline const Use &Op() const; public: inline unsigned
getNumOperands() const
;
4491
4492 /// Convenience accessors.
4493 CatchPadInst *getCatchPad() const { return cast<CatchPadInst>(Op<0>()); }
4494 void setCatchPad(CatchPadInst *CatchPad) {
4495 assert(CatchPad)((CatchPad) ? static_cast<void> (0) : __assert_fail ("CatchPad"
, "/build/llvm-toolchain-snapshot-13~++20210413100635+64c24f493e5f/llvm/include/llvm/IR/Instructions.h"
, 4495, __PRETTY_FUNCTION__))
;
4496 Op<0>() = CatchPad;
4497 }
4498
4499 BasicBlock *getSuccessor() const { return cast<BasicBlock>(Op<1>()); }
4500 void setSuccessor(BasicBlock *NewSucc) {
4501 assert(NewSucc)((NewSucc) ? static_cast<void> (0) : __assert_fail ("NewSucc"
, "/build/llvm-toolchain-snapshot-13~++20210413100635+64c24f493e5f/llvm/include/llvm/IR/Instructions.h"
, 4501, __PRETTY_FUNCTION__))
;
4502 Op<1>() = NewSucc;
4503 }
4504 unsigned getNumSuccessors() const { return 1; }
4505
4506 /// Get the parentPad of this catchret's catchpad's catchswitch.
4507 /// The successor block is implicitly a member of this funclet.
4508 Value *getCatchSwitchParentPad() const {
4509 return getCatchPad()->getCatchSwitch()->getParentPad();
4510 }
4511
4512 // Methods for support type inquiry through isa, cast, and dyn_cast:
4513 static bool classof(const Instruction *I) {
4514 return (I->getOpcode() == Instruction::CatchRet);
4515 }
4516 static bool classof(const Value *V) {
4517 return isa<Instruction>(V) && classof(cast<Instruction>(V));
4518 }
4519
4520private:
4521 BasicBlock *getSuccessor(unsigned Idx) const {
4522 assert(Idx < getNumSuccessors() && "Successor # out of range for catchret!")((Idx < getNumSuccessors() && "Successor # out of range for catchret!"
) ? static_cast<void> (0) : __assert_fail ("Idx < getNumSuccessors() && \"Successor # out of range for catchret!\""
, "/build/llvm-toolchain-snapshot-13~++20210413100635+64c24f493e5f/llvm/include/llvm/IR/Instructions.h"
, 4522, __PRETTY_FUNCTION__))
;
4523 return getSuccessor();
4524 }
4525
4526 void setSuccessor(unsigned Idx, BasicBlock *B) {
4527 assert(Idx < getNumSuccessors() && "Successor # out of range for catchret!")((Idx < getNumSuccessors() && "Successor # out of range for catchret!"
) ? static_cast<void> (0) : __assert_fail ("Idx < getNumSuccessors() && \"Successor # out of range for catchret!\""
, "/build/llvm-toolchain-snapshot-13~++20210413100635+64c24f493e5f/llvm/include/llvm/IR/Instructions.h"
, 4527, __PRETTY_FUNCTION__))
;
4528 setSuccessor(B);
4529 }
4530};
4531
4532template <>
4533struct OperandTraits<CatchReturnInst>
4534 : public FixedNumOperandTraits<CatchReturnInst, 2> {};
4535
4536DEFINE_TRANSPARENT_OPERAND_ACCESSORS(CatchReturnInst, Value)CatchReturnInst::op_iterator CatchReturnInst::op_begin() { return
OperandTraits<CatchReturnInst>::op_begin(this); } CatchReturnInst
::const_op_iterator CatchReturnInst::op_begin() const { return
OperandTraits<CatchReturnInst>::op_begin(const_cast<
CatchReturnInst*>(this)); } CatchReturnInst::op_iterator CatchReturnInst
::op_end() { return OperandTraits<CatchReturnInst>::op_end
(this); } CatchReturnInst::const_op_iterator CatchReturnInst::
op_end() const { return OperandTraits<CatchReturnInst>::
op_end(const_cast<CatchReturnInst*>(this)); } Value *CatchReturnInst
::getOperand(unsigned i_nocapture) const { ((i_nocapture <
OperandTraits<CatchReturnInst>::operands(this) &&
"getOperand() out of range!") ? static_cast<void> (0) :
__assert_fail ("i_nocapture < OperandTraits<CatchReturnInst>::operands(this) && \"getOperand() out of range!\""
, "/build/llvm-toolchain-snapshot-13~++20210413100635+64c24f493e5f/llvm/include/llvm/IR/Instructions.h"
, 4536, __PRETTY_FUNCTION__)); return cast_or_null<Value>
( OperandTraits<CatchReturnInst>::op_begin(const_cast<
CatchReturnInst*>(this))[i_nocapture].get()); } void CatchReturnInst
::setOperand(unsigned i_nocapture, Value *Val_nocapture) { ((
i_nocapture < OperandTraits<CatchReturnInst>::operands
(this) && "setOperand() out of range!") ? static_cast
<void> (0) : __assert_fail ("i_nocapture < OperandTraits<CatchReturnInst>::operands(this) && \"setOperand() out of range!\""
, "/build/llvm-toolchain-snapshot-13~++20210413100635+64c24f493e5f/llvm/include/llvm/IR/Instructions.h"
, 4536, __PRETTY_FUNCTION__)); OperandTraits<CatchReturnInst
>::op_begin(this)[i_nocapture] = Val_nocapture; } unsigned
CatchReturnInst::getNumOperands() const { return OperandTraits
<CatchReturnInst>::operands(this); } template <int Idx_nocapture
> Use &CatchReturnInst::Op() { return this->OpFrom<
Idx_nocapture>(this); } template <int Idx_nocapture>
const Use &CatchReturnInst::Op() const { return this->
OpFrom<Idx_nocapture>(this); }
4537
4538//===----------------------------------------------------------------------===//
4539// CleanupReturnInst Class
4540//===----------------------------------------------------------------------===//
4541
4542class CleanupReturnInst : public Instruction {
4543 using UnwindDestField = BoolBitfieldElementT<0>;
4544
4545private:
4546 CleanupReturnInst(const CleanupReturnInst &RI);
4547 CleanupReturnInst(Value *CleanupPad, BasicBlock *UnwindBB, unsigned Values,
4548 Instruction *InsertBefore = nullptr);
4549 CleanupReturnInst(Value *CleanupPad, BasicBlock *UnwindBB, unsigned Values,
4550 BasicBlock *InsertAtEnd);
4551
4552 void init(Value *CleanupPad, BasicBlock *UnwindBB);
4553
4554protected:
4555 // Note: Instruction needs to be a friend here to call cloneImpl.
4556 friend class Instruction;
4557
4558 CleanupReturnInst *cloneImpl() const;
4559
4560public:
4561 static CleanupReturnInst *Create(Value *CleanupPad,
4562 BasicBlock *UnwindBB = nullptr,
4563 Instruction *InsertBefore = nullptr) {
4564 assert(CleanupPad)((CleanupPad) ? static_cast<void> (0) : __assert_fail (
"CleanupPad", "/build/llvm-toolchain-snapshot-13~++20210413100635+64c24f493e5f/llvm/include/llvm/IR/Instructions.h"
, 4564, __PRETTY_FUNCTION__))
;
4565 unsigned Values = 1;
4566 if (UnwindBB)
4567 ++Values;
4568 return new (Values)
4569 CleanupReturnInst(CleanupPad, UnwindBB, Values, InsertBefore);
4570 }
4571
4572 static CleanupReturnInst *Create(Value *CleanupPad, BasicBlock *UnwindBB,
4573 BasicBlock *InsertAtEnd) {
4574 assert(CleanupPad)((CleanupPad) ? static_cast<void> (0) : __assert_fail (
"CleanupPad", "/build/llvm-toolchain-snapshot-13~++20210413100635+64c24f493e5f/llvm/include/llvm/IR/Instructions.h"
, 4574, __PRETTY_FUNCTION__))
;
4575 unsigned Values = 1;
4576 if (UnwindBB)
4577 ++Values;
4578 return new (Values)
4579 CleanupReturnInst(CleanupPad, UnwindBB, Values, InsertAtEnd);
4580 }
4581
4582 /// Provide fast operand accessors
4583 DECLARE_TRANSPARENT_OPERAND_ACCESSORS(Value)public: inline Value *getOperand(unsigned) const; inline void
setOperand(unsigned, Value*); inline op_iterator op_begin();
inline const_op_iterator op_begin() const; inline op_iterator
op_end(); inline const_op_iterator op_end() const; protected
: template <int> inline Use &Op(); template <int
> inline const Use &Op() const; public: inline unsigned
getNumOperands() const
;
4584
4585 bool hasUnwindDest() const { return getSubclassData<UnwindDestField>(); }
4586 bool unwindsToCaller() const { return !hasUnwindDest(); }
4587
4588 /// Convenience accessor.
4589 CleanupPadInst *getCleanupPad() const {
4590 return cast<CleanupPadInst>(Op<0>());
4591 }
4592 void setCleanupPad(CleanupPadInst *CleanupPad) {
4593 assert(CleanupPad)((CleanupPad) ? static_cast<void> (0) : __assert_fail (
"CleanupPad", "/build/llvm-toolchain-snapshot-13~++20210413100635+64c24f493e5f/llvm/include/llvm/IR/Instructions.h"
, 4593, __PRETTY_FUNCTION__))
;
4594 Op<0>() = CleanupPad;
4595 }
4596
4597 unsigned getNumSuccessors() const { return hasUnwindDest() ? 1 : 0; }
4598
4599 BasicBlock *getUnwindDest() const {
4600 return hasUnwindDest() ? cast<BasicBlock>(Op<1>()) : nullptr;
4601 }
4602 void setUnwindDest(BasicBlock *NewDest) {
4603 assert(NewDest)((NewDest) ? static_cast<void> (0) : __assert_fail ("NewDest"
, "/build/llvm-toolchain-snapshot-13~++20210413100635+64c24f493e5f/llvm/include/llvm/IR/Instructions.h"
, 4603, __PRETTY_FUNCTION__))
;
4604 assert(hasUnwindDest())((hasUnwindDest()) ? static_cast<void> (0) : __assert_fail
("hasUnwindDest()", "/build/llvm-toolchain-snapshot-13~++20210413100635+64c24f493e5f/llvm/include/llvm/IR/Instructions.h"
, 4604, __PRETTY_FUNCTION__))
;
4605 Op<1>() = NewDest;
4606 }
4607
4608 // Methods for support type inquiry through isa, cast, and dyn_cast:
4609 static bool classof(const Instruction *I) {
4610 return (I->getOpcode() == Instruction::CleanupRet);
4611 }
4612 static bool classof(const Value *V) {
4613 return isa<Instruction>(V) && classof(cast<Instruction>(V));
4614 }
4615
4616private:
4617 BasicBlock *getSuccessor(unsigned Idx) const {
4618 assert(Idx == 0)((Idx == 0) ? static_cast<void> (0) : __assert_fail ("Idx == 0"
, "/build/llvm-toolchain-snapshot-13~++20210413100635+64c24f493e5f/llvm/include/llvm/IR/Instructions.h"
, 4618, __PRETTY_FUNCTION__))
;
4619 return getUnwindDest();
4620 }
4621
4622 void setSuccessor(unsigned Idx, BasicBlock *B) {
4623 assert(Idx == 0)((Idx == 0) ? static_cast<void> (0) : __assert_fail ("Idx == 0"
, "/build/llvm-toolchain-snapshot-13~++20210413100635+64c24f493e5f/llvm/include/llvm/IR/Instructions.h"
, 4623, __PRETTY_FUNCTION__))
;
4624 setUnwindDest(B);
4625 }
4626
4627 // Shadow Instruction::setInstructionSubclassData with a private forwarding
4628 // method so that subclasses cannot accidentally use it.
4629 template <typename Bitfield>
4630 void setSubclassData(typename Bitfield::Type Value) {
4631 Instruction::setSubclassData<Bitfield>(Value);
4632 }
4633};
4634
4635template <>
4636struct OperandTraits<CleanupReturnInst>
4637 : public VariadicOperandTraits<CleanupReturnInst, /*MINARITY=*/1> {};
4638
4639DEFINE_TRANSPARENT_OPERAND_ACCESSORS(CleanupReturnInst, Value)CleanupReturnInst::op_iterator CleanupReturnInst::op_begin() {
return OperandTraits<CleanupReturnInst>::op_begin(this
); } CleanupReturnInst::const_op_iterator CleanupReturnInst::
op_begin() const { return OperandTraits<CleanupReturnInst>
::op_begin(const_cast<CleanupReturnInst*>(this)); } CleanupReturnInst
::op_iterator CleanupReturnInst::op_end() { return OperandTraits
<CleanupReturnInst>::op_end(this); } CleanupReturnInst::
const_op_iterator CleanupReturnInst::op_end() const { return OperandTraits
<CleanupReturnInst>::op_end(const_cast<CleanupReturnInst
*>(this)); } Value *CleanupReturnInst::getOperand(unsigned
i_nocapture) const { ((i_nocapture < OperandTraits<CleanupReturnInst
>::operands(this) && "getOperand() out of range!")
? static_cast<void> (0) : __assert_fail ("i_nocapture < OperandTraits<CleanupReturnInst>::operands(this) && \"getOperand() out of range!\""
, "/build/llvm-toolchain-snapshot-13~++20210413100635+64c24f493e5f/llvm/include/llvm/IR/Instructions.h"
, 4639, __PRETTY_FUNCTION__)); return cast_or_null<Value>
( OperandTraits<CleanupReturnInst>::op_begin(const_cast
<CleanupReturnInst*>(this))[i_nocapture].get()); } void
CleanupReturnInst::setOperand(unsigned i_nocapture, Value *Val_nocapture
) { ((i_nocapture < OperandTraits<CleanupReturnInst>
::operands(this) && "setOperand() out of range!") ? static_cast
<void> (0) : __assert_fail ("i_nocapture < OperandTraits<CleanupReturnInst>::operands(this) && \"setOperand() out of range!\""
, "/build/llvm-toolchain-snapshot-13~++20210413100635+64c24f493e5f/llvm/include/llvm/IR/Instructions.h"
, 4639, __PRETTY_FUNCTION__)); OperandTraits<CleanupReturnInst
>::op_begin(this)[i_nocapture] = Val_nocapture; } unsigned
CleanupReturnInst::getNumOperands() const { return OperandTraits
<CleanupReturnInst>::operands(this); } template <int
Idx_nocapture> Use &CleanupReturnInst::Op() { return this
->OpFrom<Idx_nocapture>(this); } template <int Idx_nocapture
> const Use &CleanupReturnInst::Op() const { return this
->OpFrom<Idx_nocapture>(this); }
4640
4641//===----------------------------------------------------------------------===//
4642// UnreachableInst Class
4643//===----------------------------------------------------------------------===//
4644
4645//===---------------------------------------------------------------------------
4646/// This function has undefined behavior. In particular, the
4647/// presence of this instruction indicates some higher level knowledge that the
4648/// end of the block cannot be reached.
4649///
4650class UnreachableInst : public Instruction {
4651protected:
4652 // Note: Instruction needs to be a friend here to call cloneImpl.
4653 friend class Instruction;
4654
4655 UnreachableInst *cloneImpl() const;
4656
4657public:
4658 explicit UnreachableInst(LLVMContext &C, Instruction *InsertBefore = nullptr);
4659 explicit UnreachableInst(LLVMContext &C, BasicBlock *InsertAtEnd);
4660
4661 // allocate space for exactly zero operands
4662 void *operator new(size_t s) {
4663 return User::operator new(s, 0);
4664 }
4665
4666 unsigned getNumSuccessors() const { return 0; }
4667
4668 // Methods for support type inquiry through isa, cast, and dyn_cast:
4669 static bool classof(const Instruction *I) {
4670 return I->getOpcode() == Instruction::Unreachable;
4671 }
4672 static bool classof(const Value *V) {
4673 return isa<Instruction>(V) && classof(cast<Instruction>(V));
4674 }
4675
4676private:
4677 BasicBlock *getSuccessor(unsigned idx) const {
4678 llvm_unreachable("UnreachableInst has no successors!")::llvm::llvm_unreachable_internal("UnreachableInst has no successors!"
, "/build/llvm-toolchain-snapshot-13~++20210413100635+64c24f493e5f/llvm/include/llvm/IR/Instructions.h"
, 4678)
;
4679 }
4680
4681 void setSuccessor(unsigned idx, BasicBlock *B) {
4682 llvm_unreachable("UnreachableInst has no successors!")::llvm::llvm_unreachable_internal("UnreachableInst has no successors!"
, "/build/llvm-toolchain-snapshot-13~++20210413100635+64c24f493e5f/llvm/include/llvm/IR/Instructions.h"
, 4682)
;
4683 }
4684};
4685
4686//===----------------------------------------------------------------------===//
4687// TruncInst Class
4688//===----------------------------------------------------------------------===//
4689
4690/// This class represents a truncation of integer types.
4691class TruncInst : public CastInst {
4692protected:
4693 // Note: Instruction needs to be a friend here to call cloneImpl.
4694 friend class Instruction;
4695
4696 /// Clone an identical TruncInst
4697 TruncInst *cloneImpl() const;
4698
4699public:
4700 /// Constructor with insert-before-instruction semantics
4701 TruncInst(
4702 Value *S, ///< The value to be truncated
4703 Type *Ty, ///< The (smaller) type to truncate to
4704 const Twine &NameStr = "", ///< A name for the new instruction
4705 Instruction *InsertBefore = nullptr ///< Where to insert the new instruction
4706 );
4707
4708 /// Constructor with insert-at-end-of-block semantics
4709 TruncInst(
4710 Value *S, ///< The value to be truncated
4711 Type *Ty, ///< The (smaller) type to truncate to
4712 const Twine &NameStr, ///< A name for the new instruction
4713 BasicBlock *InsertAtEnd ///< The block to insert the instruction into
4714 );
4715
4716 /// Methods for support type inquiry through isa, cast, and dyn_cast:
4717 static bool classof(const Instruction *I) {
4718 return I->getOpcode() == Trunc;
4719 }
4720 static bool classof(const Value *V) {
4721 return isa<Instruction>(V) && classof(cast<Instruction>(V));
4722 }
4723};
4724
4725//===----------------------------------------------------------------------===//
4726// ZExtInst Class
4727//===----------------------------------------------------------------------===//
4728
4729/// This class represents zero extension of integer types.
4730class ZExtInst : public CastInst {
4731protected:
4732 // Note: Instruction needs to be a friend here to call cloneImpl.
4733 friend class Instruction;
4734
4735 /// Clone an identical ZExtInst
4736 ZExtInst *cloneImpl() const;
4737
4738public:
4739 /// Constructor with insert-before-instruction semantics
4740 ZExtInst(
4741 Value *S, ///< The value to be zero extended
4742 Type *Ty, ///< The type to zero extend to
4743 const Twine &NameStr = "", ///< A name for the new instruction
4744 Instruction *InsertBefore = nullptr ///< Where to insert the new instruction
4745 );
4746
4747 /// Constructor with insert-at-end semantics.
4748 ZExtInst(
4749 Value *S, ///< The value to be zero extended
4750 Type *Ty, ///< The type to zero extend to
4751 const Twine &NameStr, ///< A name for the new instruction
4752 BasicBlock *InsertAtEnd ///< The block to insert the instruction into
4753 );
4754
4755 /// Methods for support type inquiry through isa, cast, and dyn_cast:
4756 static bool classof(const Instruction *I) {
4757 return I->getOpcode() == ZExt;
4758 }
4759 static bool classof(const Value *V) {
4760 return isa<Instruction>(V) && classof(cast<Instruction>(V));
4761 }
4762};
4763
4764//===----------------------------------------------------------------------===//
4765// SExtInst Class
4766//===----------------------------------------------------------------------===//
4767
4768/// This class represents a sign extension of integer types.
4769class SExtInst : public CastInst {
4770protected:
4771 // Note: Instruction needs to be a friend here to call cloneImpl.
4772 friend class Instruction;
4773
4774 /// Clone an identical SExtInst
4775 SExtInst *cloneImpl() const;
4776
4777public:
4778 /// Constructor with insert-before-instruction semantics
4779 SExtInst(
4780 Value *S, ///< The value to be sign extended
4781 Type *Ty, ///< The type to sign extend to
4782 const Twine &NameStr = "", ///< A name for the new instruction
4783 Instruction *InsertBefore = nullptr ///< Where to insert the new instruction
4784 );
4785
4786 /// Constructor with insert-at-end-of-block semantics
4787 SExtInst(
4788 Value *S, ///< The value to be sign extended
4789 Type *Ty, ///< The type to sign extend to
4790 const Twine &NameStr, ///< A name for the new instruction
4791 BasicBlock *InsertAtEnd ///< The block to insert the instruction into
4792 );
4793
4794 /// Methods for support type inquiry through isa, cast, and dyn_cast:
4795 static bool classof(const Instruction *I) {
4796 return I->getOpcode() == SExt;
4797 }
4798 static bool classof(const Value *V) {
4799 return isa<Instruction>(V) && classof(cast<Instruction>(V));
4800 }
4801};
4802
4803//===----------------------------------------------------------------------===//
4804// FPTruncInst Class
4805//===----------------------------------------------------------------------===//
4806
4807/// This class represents a truncation of floating point types.
4808class FPTruncInst : public CastInst {
4809protected:
4810 // Note: Instruction needs to be a friend here to call cloneImpl.
4811 friend class Instruction;
4812
4813 /// Clone an identical FPTruncInst
4814 FPTruncInst *cloneImpl() const;
4815
4816public:
4817 /// Constructor with insert-before-instruction semantics
4818 FPTruncInst(
4819 Value *S, ///< The value to be truncated
4820 Type *Ty, ///< The type to truncate to
4821 const Twine &NameStr = "", ///< A name for the new instruction
4822 Instruction *InsertBefore = nullptr ///< Where to insert the new instruction
4823 );
4824
4825 /// Constructor with insert-before-instruction semantics
4826 FPTruncInst(
4827 Value *S, ///< The value to be truncated
4828 Type *Ty, ///< The type to truncate to
4829 const Twine &NameStr, ///< A name for the new instruction
4830 BasicBlock *InsertAtEnd ///< The block to insert the instruction into
4831 );
4832
4833 /// Methods for support type inquiry through isa, cast, and dyn_cast:
4834 static bool classof(const Instruction *I) {
4835 return I->getOpcode() == FPTrunc;
4836 }
4837 static bool classof(const Value *V) {
4838 return isa<Instruction>(V) && classof(cast<Instruction>(V));
4839 }
4840};
4841
4842//===----------------------------------------------------------------------===//
4843// FPExtInst Class
4844//===----------------------------------------------------------------------===//
4845
4846/// This class represents an extension of floating point types.
4847class FPExtInst : public CastInst {
4848protected:
4849 // Note: Instruction needs to be a friend here to call cloneImpl.
4850 friend class Instruction;
4851
4852 /// Clone an identical FPExtInst
4853 FPExtInst *cloneImpl() const;
4854
4855public:
4856 /// Constructor with insert-before-instruction semantics
4857 FPExtInst(
4858 Value *S, ///< The value to be extended
4859 Type *Ty, ///< The type to extend to
4860 const Twine &NameStr = "", ///< A name for the new instruction
4861 Instruction *InsertBefore = nullptr ///< Where to insert the new instruction
4862 );
4863
4864 /// Constructor with insert-at-end-of-block semantics
4865 FPExtInst(
4866 Value *S, ///< The value to be extended
4867 Type *Ty, ///< The type to extend to
4868 const Twine &NameStr, ///< A name for the new instruction
4869 BasicBlock *InsertAtEnd ///< The block to insert the instruction into
4870 );
4871
4872 /// Methods for support type inquiry through isa, cast, and dyn_cast:
4873 static bool classof(const Instruction *I) {
4874 return I->getOpcode() == FPExt;
4875 }
4876 static bool classof(const Value *V) {
4877 return isa<Instruction>(V) && classof(cast<Instruction>(V));
4878 }
4879};
4880
4881//===----------------------------------------------------------------------===//
4882// UIToFPInst Class
4883//===----------------------------------------------------------------------===//
4884
4885/// This class represents a cast unsigned integer to floating point.
4886class UIToFPInst : public CastInst {
4887protected:
4888 // Note: Instruction needs to be a friend here to call cloneImpl.
4889 friend class Instruction;
4890
4891 /// Clone an identical UIToFPInst
4892 UIToFPInst *cloneImpl() const;
4893
4894public:
4895 /// Constructor with insert-before-instruction semantics
4896 UIToFPInst(
4897 Value *S, ///< The value to be converted
4898 Type *Ty, ///< The type to convert to
4899 const Twine &NameStr = "", ///< A name for the new instruction
4900 Instruction *InsertBefore = nullptr ///< Where to insert the new instruction
4901 );
4902
4903 /// Constructor with insert-at-end-of-block semantics
4904 UIToFPInst(
4905 Value *S, ///< The value to be converted
4906 Type *Ty, ///< The type to convert to
4907 const Twine &NameStr, ///< A name for the new instruction
4908 BasicBlock *InsertAtEnd ///< The block to insert the instruction into
4909 );
4910
4911 /// Methods for support type inquiry through isa, cast, and dyn_cast:
4912 static bool classof(const Instruction *I) {
4913 return I->getOpcode() == UIToFP;
4914 }
4915 static bool classof(const Value *V) {
4916 return isa<Instruction>(V) && classof(cast<Instruction>(V));
4917 }
4918};
4919
4920//===----------------------------------------------------------------------===//
4921// SIToFPInst Class
4922//===----------------------------------------------------------------------===//
4923
4924/// This class represents a cast from signed integer to floating point.
4925class SIToFPInst : public CastInst {
4926protected:
4927 // Note: Instruction needs to be a friend here to call cloneImpl.
4928 friend class Instruction;
4929
4930 /// Clone an identical SIToFPInst
4931 SIToFPInst *cloneImpl() const;
4932
4933public:
4934 /// Constructor with insert-before-instruction semantics
4935 SIToFPInst(
4936 Value *S, ///< The value to be converted
4937 Type *Ty, ///< The type to convert to
4938 const Twine &NameStr = "", ///< A name for the new instruction
4939 Instruction *InsertBefore = nullptr ///< Where to insert the new instruction
4940 );
4941
4942 /// Constructor with insert-at-end-of-block semantics
4943 SIToFPInst(
4944 Value *S, ///< The value to be converted
4945 Type *Ty, ///< The type to convert to
4946 const Twine &NameStr, ///< A name for the new instruction
4947 BasicBlock *InsertAtEnd ///< The block to insert the instruction into
4948 );
4949
4950 /// Methods for support type inquiry through isa, cast, and dyn_cast:
4951 static bool classof(const Instruction *I) {
4952 return I->getOpcode() == SIToFP;
4953 }
4954 static bool classof(const Value *V) {
4955 return isa<Instruction>(V) && classof(cast<Instruction>(V));
4956 }
4957};
4958
4959//===----------------------------------------------------------------------===//
4960// FPToUIInst Class
4961//===----------------------------------------------------------------------===//
4962
4963/// This class represents a cast from floating point to unsigned integer
4964class FPToUIInst : public CastInst {
4965protected:
4966 // Note: Instruction needs to be a friend here to call cloneImpl.
4967 friend class Instruction;
4968
4969 /// Clone an identical FPToUIInst
4970 FPToUIInst *cloneImpl() const;
4971
4972public:
4973 /// Constructor with insert-before-instruction semantics
4974 FPToUIInst(
4975 Value *S, ///< The value to be converted
4976 Type *Ty, ///< The type to convert to
4977 const Twine &NameStr = "", ///< A name for the new instruction
4978 Instruction *InsertBefore = nullptr ///< Where to insert the new instruction
4979 );
4980
4981 /// Constructor with insert-at-end-of-block semantics
4982 FPToUIInst(
4983 Value *S, ///< The value to be converted
4984 Type *Ty, ///< The type to convert to
4985 const Twine &NameStr, ///< A name for the new instruction
4986 BasicBlock *InsertAtEnd ///< Where to insert the new instruction
4987 );
4988
4989 /// Methods for support type inquiry through isa, cast, and dyn_cast:
4990 static bool classof(const Instruction *I) {
4991 return I->getOpcode() == FPToUI;
4992 }
4993 static bool classof(const Value *V) {
4994 return isa<Instruction>(V) && classof(cast<Instruction>(V));
4995 }
4996};
4997
4998//===----------------------------------------------------------------------===//
4999// FPToSIInst Class
5000//===----------------------------------------------------------------------===//
5001
5002/// This class represents a cast from floating point to signed integer.
5003class FPToSIInst : public CastInst {
5004protected:
5005 // Note: Instruction needs to be a friend here to call cloneImpl.
5006 friend class Instruction;
5007
5008 /// Clone an identical FPToSIInst
5009 FPToSIInst *cloneImpl() const;
5010
5011public:
5012 /// Constructor with insert-before-instruction semantics
5013 FPToSIInst(
5014 Value *S, ///< The value to be converted
5015 Type *Ty, ///< The type to convert to
5016 const Twine &NameStr = "", ///< A name for the new instruction
5017 Instruction *InsertBefore = nullptr ///< Where to insert the new instruction
5018 );
5019
5020 /// Constructor with insert-at-end-of-block semantics
5021 FPToSIInst(
5022 Value *S, ///< The value to be converted
5023 Type *Ty, ///< The type to convert to
5024 const Twine &NameStr, ///< A name for the new instruction
5025 BasicBlock *InsertAtEnd ///< The block to insert the instruction into
5026 );
5027
5028 /// Methods for support type inquiry through isa, cast, and dyn_cast:
5029 static bool classof(const Instruction *I) {
5030 return I->getOpcode() == FPToSI;
5031 }
5032 static bool classof(const Value *V) {
5033 return isa<Instruction>(V) && classof(cast<Instruction>(V));
5034 }
5035};
5036
5037//===----------------------------------------------------------------------===//
5038// IntToPtrInst Class
5039//===----------------------------------------------------------------------===//
5040
5041/// This class represents a cast from an integer to a pointer.
5042class IntToPtrInst : public CastInst {
5043public:
5044 // Note: Instruction needs to be a friend here to call cloneImpl.
5045 friend class Instruction;
5046
5047 /// Constructor with insert-before-instruction semantics
5048 IntToPtrInst(
5049 Value *S, ///< The value to be converted
5050 Type *Ty, ///< The type to convert to
5051 const Twine &NameStr = "", ///< A name for the new instruction
5052 Instruction *InsertBefore = nullptr ///< Where to insert the new instruction
5053 );
5054
5055 /// Constructor with insert-at-end-of-block semantics
5056 IntToPtrInst(
5057 Value *S, ///< The value to be converted
5058 Type *Ty, ///< The type to convert to
5059 const Twine &NameStr, ///< A name for the new instruction
5060 BasicBlock *InsertAtEnd ///< The block to insert the instruction into
5061 );
5062
5063 /// Clone an identical IntToPtrInst.
5064 IntToPtrInst *cloneImpl() const;
5065
5066 /// Returns the address space of this instruction's pointer type.
5067 unsigned getAddressSpace() const {
5068 return getType()->getPointerAddressSpace();
5069 }
5070
5071 // Methods for support type inquiry through isa, cast, and dyn_cast:
5072 static bool classof(const Instruction *I) {
5073 return I->getOpcode() == IntToPtr;
5074 }
5075 static bool classof(const Value *V) {
5076 return isa<Instruction>(V) && classof(cast<Instruction>(V));
5077 }
5078};
5079
5080//===----------------------------------------------------------------------===//
5081// PtrToIntInst Class
5082//===----------------------------------------------------------------------===//
5083
5084/// This class represents a cast from a pointer to an integer.
5085class PtrToIntInst : public CastInst {
5086protected:
5087 // Note: Instruction needs to be a friend here to call cloneImpl.
5088 friend class Instruction;
5089
5090 /// Clone an identical PtrToIntInst.
5091 PtrToIntInst *cloneImpl() const;
5092
5093public:
5094 /// Constructor with insert-before-instruction semantics
5095 PtrToIntInst(
5096 Value *S, ///< The value to be converted
5097 Type *Ty, ///< The type to convert to
5098 const Twine &NameStr = "", ///< A name for the new instruction
5099 Instruction *InsertBefore = nullptr ///< Where to insert the new instruction
5100 );
5101
5102 /// Constructor with insert-at-end-of-block semantics
5103 PtrToIntInst(
5104 Value *S, ///< The value to be converted
5105 Type *Ty, ///< The type to convert to
5106 const Twine &NameStr, ///< A name for the new instruction
5107 BasicBlock *InsertAtEnd ///< The block to insert the instruction into
5108 );
5109
5110 /// Gets the pointer operand.
5111 Value *getPointerOperand() { return getOperand(0); }
5112 /// Gets the pointer operand.
5113 const Value *getPointerOperand() const { return getOperand(0); }
5114 /// Gets the operand index of the pointer operand.
5115 static unsigned getPointerOperandIndex() { return 0U; }
5116
5117 /// Returns the address space of the pointer operand.
5118 unsigned getPointerAddressSpace() const {
5119 return getPointerOperand()->getType()->getPointerAddressSpace();
5120 }
5121
5122 // Methods for support type inquiry through isa, cast, and dyn_cast:
5123 static bool classof(const Instruction *I) {
5124 return I->getOpcode() == PtrToInt;
5125 }
5126 static bool classof(const Value *V) {
5127 return isa<Instruction>(V) && classof(cast<Instruction>(V));
5128 }
5129};
5130
5131//===----------------------------------------------------------------------===//
5132// BitCastInst Class
5133//===----------------------------------------------------------------------===//
5134
5135/// This class represents a no-op cast from one type to another.
5136class BitCastInst : public CastInst {
5137protected:
5138 // Note: Instruction needs to be a friend here to call cloneImpl.
5139 friend class Instruction;
5140
5141 /// Clone an identical BitCastInst.
5142 BitCastInst *cloneImpl() const;
5143
5144public:
5145 /// Constructor with insert-before-instruction semantics
5146 BitCastInst(
5147 Value *S, ///< The value to be casted
5148 Type *Ty, ///< The type to casted to
5149 const Twine &NameStr = "", ///< A name for the new instruction
5150 Instruction *InsertBefore = nullptr ///< Where to insert the new instruction
5151 );
5152
5153 /// Constructor with insert-at-end-of-block semantics
5154 BitCastInst(
5155 Value *S, ///< The value to be casted
5156 Type *Ty, ///< The type to casted to
5157 const Twine &NameStr, ///< A name for the new instruction
5158 BasicBlock *InsertAtEnd ///< The block to insert the instruction into
5159 );
5160
5161 // Methods for support type inquiry through isa, cast, and dyn_cast:
5162 static bool classof(const Instruction *I) {
5163 return I->getOpcode() == BitCast;
5164 }
5165 static bool classof(const Value *V) {
5166 return isa<Instruction>(V) && classof(cast<Instruction>(V));
5167 }
5168};
5169
5170//===----------------------------------------------------------------------===//
5171// AddrSpaceCastInst Class
5172//===----------------------------------------------------------------------===//
5173
5174/// This class represents a conversion between pointers from one address space
5175/// to another.
5176class AddrSpaceCastInst : public CastInst {
5177protected:
5178 // Note: Instruction needs to be a friend here to call cloneImpl.
5179 friend class Instruction;
5180
5181 /// Clone an identical AddrSpaceCastInst.
5182 AddrSpaceCastInst *cloneImpl() const;
5183
5184public:
5185 /// Constructor with insert-before-instruction semantics
5186 AddrSpaceCastInst(
5187 Value *S, ///< The value to be casted
5188 Type *Ty, ///< The type to casted to
5189 const Twine &NameStr = "", ///< A name for the new instruction
5190 Instruction *InsertBefore = nullptr ///< Where to insert the new instruction
5191 );
5192
5193 /// Constructor with insert-at-end-of-block semantics
5194 AddrSpaceCastInst(
5195 Value *S, ///< The value to be casted
5196 Type *Ty, ///< The type to casted to
5197 const Twine &NameStr, ///< A name for the new instruction
5198 BasicBlock *InsertAtEnd ///< The block to insert the instruction into
5199 );
5200
5201 // Methods for support type inquiry through isa, cast, and dyn_cast:
5202 static bool classof(const Instruction *I) {
5203 return I->getOpcode() == AddrSpaceCast;
5204 }
5205 static bool classof(const Value *V) {
5206 return isa<Instruction>(V) && classof(cast<Instruction>(V));
5207 }
5208
5209 /// Gets the pointer operand.
5210 Value *getPointerOperand() {
5211 return getOperand(0);
5212 }
5213
5214 /// Gets the pointer operand.
5215 const Value *getPointerOperand() const {
5216 return getOperand(0);
5217 }
5218
5219 /// Gets the operand index of the pointer operand.
5220 static unsigned getPointerOperandIndex() {
5221 return 0U;
5222 }
5223
5224 /// Returns the address space of the pointer operand.
5225 unsigned getSrcAddressSpace() const {
5226 return getPointerOperand()->getType()->getPointerAddressSpace();
5227 }
5228
5229 /// Returns the address space of the result.
5230 unsigned getDestAddressSpace() const {
5231 return getType()->getPointerAddressSpace();
5232 }
5233};
5234
5235/// A helper function that returns the pointer operand of a load or store
5236/// instruction. Returns nullptr if not load or store.
5237inline const Value *getLoadStorePointerOperand(const Value *V) {
5238 if (auto *Load = dyn_cast<LoadInst>(V))
5239 return Load->getPointerOperand();
5240 if (auto *Store = dyn_cast<StoreInst>(V))
5241 return Store->getPointerOperand();
5242 return nullptr;
5243}
5244inline Value *getLoadStorePointerOperand(Value *V) {
5245 return const_cast<Value *>(
5246 getLoadStorePointerOperand(static_cast<const Value *>(V)));
5247}
5248
5249/// A helper function that returns the pointer operand of a load, store
5250/// or GEP instruction. Returns nullptr if not load, store, or GEP.
5251inline const Value *getPointerOperand(const Value *V) {
5252 if (auto *Ptr = getLoadStorePointerOperand(V))
5253 return Ptr;
5254 if (auto *Gep = dyn_cast<GetElementPtrInst>(V))
5255 return Gep->getPointerOperand();
5256 return nullptr;
5257}
5258inline Value *getPointerOperand(Value *V) {
5259 return const_cast<Value *>(getPointerOperand(static_cast<const Value *>(V)));
5260}
5261
5262/// A helper function that returns the alignment of load or store instruction.
5263inline Align getLoadStoreAlignment(Value *I) {
5264 assert((isa<LoadInst>(I) || isa<StoreInst>(I)) &&(((isa<LoadInst>(I) || isa<StoreInst>(I)) &&
"Expected Load or Store instruction") ? static_cast<void>
(0) : __assert_fail ("(isa<LoadInst>(I) || isa<StoreInst>(I)) && \"Expected Load or Store instruction\""
, "/build/llvm-toolchain-snapshot-13~++20210413100635+64c24f493e5f/llvm/include/llvm/IR/Instructions.h"
, 5265, __PRETTY_FUNCTION__))
5265 "Expected Load or Store instruction")(((isa<LoadInst>(I) || isa<StoreInst>(I)) &&
"Expected Load or Store instruction") ? static_cast<void>
(0) : __assert_fail ("(isa<LoadInst>(I) || isa<StoreInst>(I)) && \"Expected Load or Store instruction\""
, "/build/llvm-toolchain-snapshot-13~++20210413100635+64c24f493e5f/llvm/include/llvm/IR/Instructions.h"
, 5265, __PRETTY_FUNCTION__))
;
5266 if (auto *LI = dyn_cast<LoadInst>(I))
5267 return LI->getAlign();
5268 return cast<StoreInst>(I)->getAlign();
5269}
5270
5271/// A helper function that returns the address space of the pointer operand of
5272/// load or store instruction.
5273inline unsigned getLoadStoreAddressSpace(Value *I) {
5274 assert((isa<LoadInst>(I) || isa<StoreInst>(I)) &&(((isa<LoadInst>(I) || isa<StoreInst>(I)) &&
"Expected Load or Store instruction") ? static_cast<void>
(0) : __assert_fail ("(isa<LoadInst>(I) || isa<StoreInst>(I)) && \"Expected Load or Store instruction\""
, "/build/llvm-toolchain-snapshot-13~++20210413100635+64c24f493e5f/llvm/include/llvm/IR/Instructions.h"
, 5275, __PRETTY_FUNCTION__))
5275 "Expected Load or Store instruction")(((isa<LoadInst>(I) || isa<StoreInst>(I)) &&
"Expected Load or Store instruction") ? static_cast<void>
(0) : __assert_fail ("(isa<LoadInst>(I) || isa<StoreInst>(I)) && \"Expected Load or Store instruction\""
, "/build/llvm-toolchain-snapshot-13~++20210413100635+64c24f493e5f/llvm/include/llvm/IR/Instructions.h"
, 5275, __PRETTY_FUNCTION__))
;
5276 if (auto *LI = dyn_cast<LoadInst>(I))
5277 return LI->getPointerAddressSpace();
5278 return cast<StoreInst>(I)->getPointerAddressSpace();
5279}
5280
5281//===----------------------------------------------------------------------===//
5282// FreezeInst Class
5283//===----------------------------------------------------------------------===//
5284
5285/// This class represents a freeze function that returns random concrete
5286/// value if an operand is either a poison value or an undef value
5287class FreezeInst : public UnaryInstruction {
5288protected:
5289 // Note: Instruction needs to be a friend here to call cloneImpl.
5290 friend class Instruction;
5291
5292 /// Clone an identical FreezeInst
5293 FreezeInst *cloneImpl() const;
5294
5295public:
5296 explicit FreezeInst(Value *S,
5297 const Twine &NameStr = "",
5298 Instruction *InsertBefore = nullptr);
5299 FreezeInst(Value *S, const Twine &NameStr, BasicBlock *InsertAtEnd);
5300
5301 // Methods for support type inquiry through isa, cast, and dyn_cast:
5302 static inline bool classof(const Instruction *I) {
5303 return I->getOpcode() == Freeze;
5304 }
5305 static inline bool classof(const Value *V) {
5306 return isa<Instruction>(V) && classof(cast<Instruction>(V));
5307 }
5308};
5309
5310} // end namespace llvm
5311
5312#endif // LLVM_IR_INSTRUCTIONS_H