Location via proxy:   [ UP ]  
[Report a bug]   [Manage cookies]                
clang 20.0.0git
CGStmt.cpp
Go to the documentation of this file.
1//===--- CGStmt.cpp - Emit LLVM Code from Statements ----------------------===//
2//
3// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
4// See https://llvm.org/LICENSE.txt for license information.
5// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
6//
7//===----------------------------------------------------------------------===//
8//
9// This contains code to emit Stmt nodes as LLVM code.
10//
11//===----------------------------------------------------------------------===//
12
13#include "CGDebugInfo.h"
14#include "CGOpenMPRuntime.h"
15#include "CodeGenFunction.h"
16#include "CodeGenModule.h"
17#include "TargetInfo.h"
18#include "clang/AST/Attr.h"
19#include "clang/AST/Expr.h"
20#include "clang/AST/Stmt.h"
27#include "llvm/ADT/ArrayRef.h"
28#include "llvm/ADT/DenseMap.h"
29#include "llvm/ADT/SmallSet.h"
30#include "llvm/ADT/StringExtras.h"
31#include "llvm/IR/Assumptions.h"
32#include "llvm/IR/DataLayout.h"
33#include "llvm/IR/InlineAsm.h"
34#include "llvm/IR/Intrinsics.h"
35#include "llvm/IR/MDBuilder.h"
36#include "llvm/Support/SaveAndRestore.h"
37#include <optional>
38
39using namespace clang;
40using namespace CodeGen;
41
42//===----------------------------------------------------------------------===//
43// Statement Emission
44//===----------------------------------------------------------------------===//
45
46namespace llvm {
47extern cl::opt<bool> EnableSingleByteCoverage;
48} // namespace llvm
49
50void CodeGenFunction::EmitStopPoint(const Stmt *S) {
51 if (CGDebugInfo *DI = getDebugInfo()) {
53 Loc = S->getBeginLoc();
54 DI->EmitLocation(Builder, Loc);
55
56 LastStopPoint = Loc;
57 }
58}
59
61 assert(S && "Null statement?");
62 PGO.setCurrentStmt(S);
63
64 // These statements have their own debug info handling.
65 if (EmitSimpleStmt(S, Attrs))
66 return;
67
68 // Check if we are generating unreachable code.
69 if (!HaveInsertPoint()) {
70 // If so, and the statement doesn't contain a label, then we do not need to
71 // generate actual code. This is safe because (1) the current point is
72 // unreachable, so we don't need to execute the code, and (2) we've already
73 // handled the statements which update internal data structures (like the
74 // local variable map) which could be used by subsequent statements.
75 if (!ContainsLabel(S)) {
76 // Verify that any decl statements were handled as simple, they may be in
77 // scope of subsequent reachable statements.
78 assert(!isa<DeclStmt>(*S) && "Unexpected DeclStmt!");
79 PGO.markStmtMaybeUsed(S);
80 return;
81 }
82
83 // Otherwise, make a new block to hold the code.
85 }
86
87 // Generate a stoppoint if we are emitting debug info.
89
90 // Ignore all OpenMP directives except for simd if OpenMP with Simd is
91 // enabled.
92 if (getLangOpts().OpenMP && getLangOpts().OpenMPSimd) {
93 if (const auto *D = dyn_cast<OMPExecutableDirective>(S)) {
95 return;
96 }
97 }
98
99 switch (S->getStmtClass()) {
101 case Stmt::CXXCatchStmtClass:
102 case Stmt::SEHExceptStmtClass:
103 case Stmt::SEHFinallyStmtClass:
104 case Stmt::MSDependentExistsStmtClass:
105 llvm_unreachable("invalid statement class to emit generically");
106 case Stmt::NullStmtClass:
107 case Stmt::CompoundStmtClass:
108 case Stmt::DeclStmtClass:
109 case Stmt::LabelStmtClass:
110 case Stmt::AttributedStmtClass:
111 case Stmt::GotoStmtClass:
112 case Stmt::BreakStmtClass:
113 case Stmt::ContinueStmtClass:
114 case Stmt::DefaultStmtClass:
115 case Stmt::CaseStmtClass:
116 case Stmt::SEHLeaveStmtClass:
117 case Stmt::SYCLKernelCallStmtClass:
118 llvm_unreachable("should have emitted these statements as simple");
119
120#define STMT(Type, Base)
121#define ABSTRACT_STMT(Op)
122#define EXPR(Type, Base) \
123 case Stmt::Type##Class:
124#include "clang/AST/StmtNodes.inc"
125 {
126 // Remember the block we came in on.
127 llvm::BasicBlock *incoming = Builder.GetInsertBlock();
128 assert(incoming && "expression emission must have an insertion point");
129
130 EmitIgnoredExpr(cast<Expr>(S));
131
132 llvm::BasicBlock *outgoing = Builder.GetInsertBlock();
133 assert(outgoing && "expression emission cleared block!");
134
135 // The expression emitters assume (reasonably!) that the insertion
136 // point is always set. To maintain that, the call-emission code
137 // for noreturn functions has to enter a new block with no
138 // predecessors. We want to kill that block and mark the current
139 // insertion point unreachable in the common case of a call like
140 // "exit();". Since expression emission doesn't otherwise create
141 // blocks with no predecessors, we can just test for that.
142 // However, we must be careful not to do this to our incoming
143 // block, because *statement* emission does sometimes create
144 // reachable blocks which will have no predecessors until later in
145 // the function. This occurs with, e.g., labels that are not
146 // reachable by fallthrough.
147 if (incoming != outgoing && outgoing->use_empty()) {
148 outgoing->eraseFromParent();
149 Builder.ClearInsertionPoint();
150 }
151 break;
152 }
153
154 case Stmt::IndirectGotoStmtClass:
155 EmitIndirectGotoStmt(cast<IndirectGotoStmt>(*S)); break;
156
157 case Stmt::IfStmtClass: EmitIfStmt(cast<IfStmt>(*S)); break;
158 case Stmt::WhileStmtClass: EmitWhileStmt(cast<WhileStmt>(*S), Attrs); break;
159 case Stmt::DoStmtClass: EmitDoStmt(cast<DoStmt>(*S), Attrs); break;
160 case Stmt::ForStmtClass: EmitForStmt(cast<ForStmt>(*S), Attrs); break;
161
162 case Stmt::ReturnStmtClass: EmitReturnStmt(cast<ReturnStmt>(*S)); break;
163
164 case Stmt::SwitchStmtClass: EmitSwitchStmt(cast<SwitchStmt>(*S)); break;
165 case Stmt::GCCAsmStmtClass: // Intentional fall-through.
166 case Stmt::MSAsmStmtClass: EmitAsmStmt(cast<AsmStmt>(*S)); break;
167 case Stmt::CoroutineBodyStmtClass:
168 EmitCoroutineBody(cast<CoroutineBodyStmt>(*S));
169 break;
170 case Stmt::CoreturnStmtClass:
171 EmitCoreturnStmt(cast<CoreturnStmt>(*S));
172 break;
173 case Stmt::CapturedStmtClass: {
174 const CapturedStmt *CS = cast<CapturedStmt>(S);
176 }
177 break;
178 case Stmt::ObjCAtTryStmtClass:
179 EmitObjCAtTryStmt(cast<ObjCAtTryStmt>(*S));
180 break;
181 case Stmt::ObjCAtCatchStmtClass:
182 llvm_unreachable(
183 "@catch statements should be handled by EmitObjCAtTryStmt");
184 case Stmt::ObjCAtFinallyStmtClass:
185 llvm_unreachable(
186 "@finally statements should be handled by EmitObjCAtTryStmt");
187 case Stmt::ObjCAtThrowStmtClass:
188 EmitObjCAtThrowStmt(cast<ObjCAtThrowStmt>(*S));
189 break;
190 case Stmt::ObjCAtSynchronizedStmtClass:
191 EmitObjCAtSynchronizedStmt(cast<ObjCAtSynchronizedStmt>(*S));
192 break;
193 case Stmt::ObjCForCollectionStmtClass:
194 EmitObjCForCollectionStmt(cast<ObjCForCollectionStmt>(*S));
195 break;
196 case Stmt::ObjCAutoreleasePoolStmtClass:
197 EmitObjCAutoreleasePoolStmt(cast<ObjCAutoreleasePoolStmt>(*S));
198 break;
199
200 case Stmt::CXXTryStmtClass:
201 EmitCXXTryStmt(cast<CXXTryStmt>(*S));
202 break;
203 case Stmt::CXXForRangeStmtClass:
204 EmitCXXForRangeStmt(cast<CXXForRangeStmt>(*S), Attrs);
205 break;
206 case Stmt::SEHTryStmtClass:
207 EmitSEHTryStmt(cast<SEHTryStmt>(*S));
208 break;
209 case Stmt::OMPMetaDirectiveClass:
210 EmitOMPMetaDirective(cast<OMPMetaDirective>(*S));
211 break;
212 case Stmt::OMPCanonicalLoopClass:
213 EmitOMPCanonicalLoop(cast<OMPCanonicalLoop>(S));
214 break;
215 case Stmt::OMPParallelDirectiveClass:
216 EmitOMPParallelDirective(cast<OMPParallelDirective>(*S));
217 break;
218 case Stmt::OMPSimdDirectiveClass:
219 EmitOMPSimdDirective(cast<OMPSimdDirective>(*S));
220 break;
221 case Stmt::OMPTileDirectiveClass:
222 EmitOMPTileDirective(cast<OMPTileDirective>(*S));
223 break;
224 case Stmt::OMPUnrollDirectiveClass:
225 EmitOMPUnrollDirective(cast<OMPUnrollDirective>(*S));
226 break;
227 case Stmt::OMPReverseDirectiveClass:
228 EmitOMPReverseDirective(cast<OMPReverseDirective>(*S));
229 break;
230 case Stmt::OMPInterchangeDirectiveClass:
231 EmitOMPInterchangeDirective(cast<OMPInterchangeDirective>(*S));
232 break;
233 case Stmt::OMPForDirectiveClass:
234 EmitOMPForDirective(cast<OMPForDirective>(*S));
235 break;
236 case Stmt::OMPForSimdDirectiveClass:
237 EmitOMPForSimdDirective(cast<OMPForSimdDirective>(*S));
238 break;
239 case Stmt::OMPSectionsDirectiveClass:
240 EmitOMPSectionsDirective(cast<OMPSectionsDirective>(*S));
241 break;
242 case Stmt::OMPSectionDirectiveClass:
243 EmitOMPSectionDirective(cast<OMPSectionDirective>(*S));
244 break;
245 case Stmt::OMPSingleDirectiveClass:
246 EmitOMPSingleDirective(cast<OMPSingleDirective>(*S));
247 break;
248 case Stmt::OMPMasterDirectiveClass:
249 EmitOMPMasterDirective(cast<OMPMasterDirective>(*S));
250 break;
251 case Stmt::OMPCriticalDirectiveClass:
252 EmitOMPCriticalDirective(cast<OMPCriticalDirective>(*S));
253 break;
254 case Stmt::OMPParallelForDirectiveClass:
255 EmitOMPParallelForDirective(cast<OMPParallelForDirective>(*S));
256 break;
257 case Stmt::OMPParallelForSimdDirectiveClass:
258 EmitOMPParallelForSimdDirective(cast<OMPParallelForSimdDirective>(*S));
259 break;
260 case Stmt::OMPParallelMasterDirectiveClass:
261 EmitOMPParallelMasterDirective(cast<OMPParallelMasterDirective>(*S));
262 break;
263 case Stmt::OMPParallelSectionsDirectiveClass:
264 EmitOMPParallelSectionsDirective(cast<OMPParallelSectionsDirective>(*S));
265 break;
266 case Stmt::OMPTaskDirectiveClass:
267 EmitOMPTaskDirective(cast<OMPTaskDirective>(*S));
268 break;
269 case Stmt::OMPTaskyieldDirectiveClass:
270 EmitOMPTaskyieldDirective(cast<OMPTaskyieldDirective>(*S));
271 break;
272 case Stmt::OMPErrorDirectiveClass:
273 EmitOMPErrorDirective(cast<OMPErrorDirective>(*S));
274 break;
275 case Stmt::OMPBarrierDirectiveClass:
276 EmitOMPBarrierDirective(cast<OMPBarrierDirective>(*S));
277 break;
278 case Stmt::OMPTaskwaitDirectiveClass:
279 EmitOMPTaskwaitDirective(cast<OMPTaskwaitDirective>(*S));
280 break;
281 case Stmt::OMPTaskgroupDirectiveClass:
282 EmitOMPTaskgroupDirective(cast<OMPTaskgroupDirective>(*S));
283 break;
284 case Stmt::OMPFlushDirectiveClass:
285 EmitOMPFlushDirective(cast<OMPFlushDirective>(*S));
286 break;
287 case Stmt::OMPDepobjDirectiveClass:
288 EmitOMPDepobjDirective(cast<OMPDepobjDirective>(*S));
289 break;
290 case Stmt::OMPScanDirectiveClass:
291 EmitOMPScanDirective(cast<OMPScanDirective>(*S));
292 break;
293 case Stmt::OMPOrderedDirectiveClass:
294 EmitOMPOrderedDirective(cast<OMPOrderedDirective>(*S));
295 break;
296 case Stmt::OMPAtomicDirectiveClass:
297 EmitOMPAtomicDirective(cast<OMPAtomicDirective>(*S));
298 break;
299 case Stmt::OMPTargetDirectiveClass:
300 EmitOMPTargetDirective(cast<OMPTargetDirective>(*S));
301 break;
302 case Stmt::OMPTeamsDirectiveClass:
303 EmitOMPTeamsDirective(cast<OMPTeamsDirective>(*S));
304 break;
305 case Stmt::OMPCancellationPointDirectiveClass:
306 EmitOMPCancellationPointDirective(cast<OMPCancellationPointDirective>(*S));
307 break;
308 case Stmt::OMPCancelDirectiveClass:
309 EmitOMPCancelDirective(cast<OMPCancelDirective>(*S));
310 break;
311 case Stmt::OMPTargetDataDirectiveClass:
312 EmitOMPTargetDataDirective(cast<OMPTargetDataDirective>(*S));
313 break;
314 case Stmt::OMPTargetEnterDataDirectiveClass:
315 EmitOMPTargetEnterDataDirective(cast<OMPTargetEnterDataDirective>(*S));
316 break;
317 case Stmt::OMPTargetExitDataDirectiveClass:
318 EmitOMPTargetExitDataDirective(cast<OMPTargetExitDataDirective>(*S));
319 break;
320 case Stmt::OMPTargetParallelDirectiveClass:
321 EmitOMPTargetParallelDirective(cast<OMPTargetParallelDirective>(*S));
322 break;
323 case Stmt::OMPTargetParallelForDirectiveClass:
324 EmitOMPTargetParallelForDirective(cast<OMPTargetParallelForDirective>(*S));
325 break;
326 case Stmt::OMPTaskLoopDirectiveClass:
327 EmitOMPTaskLoopDirective(cast<OMPTaskLoopDirective>(*S));
328 break;
329 case Stmt::OMPTaskLoopSimdDirectiveClass:
330 EmitOMPTaskLoopSimdDirective(cast<OMPTaskLoopSimdDirective>(*S));
331 break;
332 case Stmt::OMPMasterTaskLoopDirectiveClass:
333 EmitOMPMasterTaskLoopDirective(cast<OMPMasterTaskLoopDirective>(*S));
334 break;
335 case Stmt::OMPMaskedTaskLoopDirectiveClass:
336 EmitOMPMaskedTaskLoopDirective(cast<OMPMaskedTaskLoopDirective>(*S));
337 break;
338 case Stmt::OMPMasterTaskLoopSimdDirectiveClass:
340 cast<OMPMasterTaskLoopSimdDirective>(*S));
341 break;
342 case Stmt::OMPMaskedTaskLoopSimdDirectiveClass:
344 cast<OMPMaskedTaskLoopSimdDirective>(*S));
345 break;
346 case Stmt::OMPParallelMasterTaskLoopDirectiveClass:
348 cast<OMPParallelMasterTaskLoopDirective>(*S));
349 break;
350 case Stmt::OMPParallelMaskedTaskLoopDirectiveClass:
352 cast<OMPParallelMaskedTaskLoopDirective>(*S));
353 break;
354 case Stmt::OMPParallelMasterTaskLoopSimdDirectiveClass:
356 cast<OMPParallelMasterTaskLoopSimdDirective>(*S));
357 break;
358 case Stmt::OMPParallelMaskedTaskLoopSimdDirectiveClass:
360 cast<OMPParallelMaskedTaskLoopSimdDirective>(*S));
361 break;
362 case Stmt::OMPDistributeDirectiveClass:
363 EmitOMPDistributeDirective(cast<OMPDistributeDirective>(*S));
364 break;
365 case Stmt::OMPTargetUpdateDirectiveClass:
366 EmitOMPTargetUpdateDirective(cast<OMPTargetUpdateDirective>(*S));
367 break;
368 case Stmt::OMPDistributeParallelForDirectiveClass:
370 cast<OMPDistributeParallelForDirective>(*S));
371 break;
372 case Stmt::OMPDistributeParallelForSimdDirectiveClass:
374 cast<OMPDistributeParallelForSimdDirective>(*S));
375 break;
376 case Stmt::OMPDistributeSimdDirectiveClass:
377 EmitOMPDistributeSimdDirective(cast<OMPDistributeSimdDirective>(*S));
378 break;
379 case Stmt::OMPTargetParallelForSimdDirectiveClass:
381 cast<OMPTargetParallelForSimdDirective>(*S));
382 break;
383 case Stmt::OMPTargetSimdDirectiveClass:
384 EmitOMPTargetSimdDirective(cast<OMPTargetSimdDirective>(*S));
385 break;
386 case Stmt::OMPTeamsDistributeDirectiveClass:
387 EmitOMPTeamsDistributeDirective(cast<OMPTeamsDistributeDirective>(*S));
388 break;
389 case Stmt::OMPTeamsDistributeSimdDirectiveClass:
391 cast<OMPTeamsDistributeSimdDirective>(*S));
392 break;
393 case Stmt::OMPTeamsDistributeParallelForSimdDirectiveClass:
395 cast<OMPTeamsDistributeParallelForSimdDirective>(*S));
396 break;
397 case Stmt::OMPTeamsDistributeParallelForDirectiveClass:
399 cast<OMPTeamsDistributeParallelForDirective>(*S));
400 break;
401 case Stmt::OMPTargetTeamsDirectiveClass:
402 EmitOMPTargetTeamsDirective(cast<OMPTargetTeamsDirective>(*S));
403 break;
404 case Stmt::OMPTargetTeamsDistributeDirectiveClass:
406 cast<OMPTargetTeamsDistributeDirective>(*S));
407 break;
408 case Stmt::OMPTargetTeamsDistributeParallelForDirectiveClass:
410 cast<OMPTargetTeamsDistributeParallelForDirective>(*S));
411 break;
412 case Stmt::OMPTargetTeamsDistributeParallelForSimdDirectiveClass:
414 cast<OMPTargetTeamsDistributeParallelForSimdDirective>(*S));
415 break;
416 case Stmt::OMPTargetTeamsDistributeSimdDirectiveClass:
418 cast<OMPTargetTeamsDistributeSimdDirective>(*S));
419 break;
420 case Stmt::OMPInteropDirectiveClass:
421 EmitOMPInteropDirective(cast<OMPInteropDirective>(*S));
422 break;
423 case Stmt::OMPDispatchDirectiveClass:
424 CGM.ErrorUnsupported(S, "OpenMP dispatch directive");
425 break;
426 case Stmt::OMPScopeDirectiveClass:
427 EmitOMPScopeDirective(cast<OMPScopeDirective>(*S));
428 break;
429 case Stmt::OMPMaskedDirectiveClass:
430 EmitOMPMaskedDirective(cast<OMPMaskedDirective>(*S));
431 break;
432 case Stmt::OMPGenericLoopDirectiveClass:
433 EmitOMPGenericLoopDirective(cast<OMPGenericLoopDirective>(*S));
434 break;
435 case Stmt::OMPTeamsGenericLoopDirectiveClass:
436 EmitOMPTeamsGenericLoopDirective(cast<OMPTeamsGenericLoopDirective>(*S));
437 break;
438 case Stmt::OMPTargetTeamsGenericLoopDirectiveClass:
440 cast<OMPTargetTeamsGenericLoopDirective>(*S));
441 break;
442 case Stmt::OMPParallelGenericLoopDirectiveClass:
444 cast<OMPParallelGenericLoopDirective>(*S));
445 break;
446 case Stmt::OMPTargetParallelGenericLoopDirectiveClass:
448 cast<OMPTargetParallelGenericLoopDirective>(*S));
449 break;
450 case Stmt::OMPParallelMaskedDirectiveClass:
451 EmitOMPParallelMaskedDirective(cast<OMPParallelMaskedDirective>(*S));
452 break;
453 case Stmt::OMPAssumeDirectiveClass:
454 EmitOMPAssumeDirective(cast<OMPAssumeDirective>(*S));
455 break;
456 case Stmt::OpenACCComputeConstructClass:
457 EmitOpenACCComputeConstruct(cast<OpenACCComputeConstruct>(*S));
458 break;
459 case Stmt::OpenACCLoopConstructClass:
460 EmitOpenACCLoopConstruct(cast<OpenACCLoopConstruct>(*S));
461 break;
462 case Stmt::OpenACCCombinedConstructClass:
463 EmitOpenACCCombinedConstruct(cast<OpenACCCombinedConstruct>(*S));
464 break;
465 case Stmt::OpenACCDataConstructClass:
466 EmitOpenACCDataConstruct(cast<OpenACCDataConstruct>(*S));
467 break;
468 case Stmt::OpenACCEnterDataConstructClass:
469 EmitOpenACCEnterDataConstruct(cast<OpenACCEnterDataConstruct>(*S));
470 break;
471 case Stmt::OpenACCExitDataConstructClass:
472 EmitOpenACCExitDataConstruct(cast<OpenACCExitDataConstruct>(*S));
473 break;
474 case Stmt::OpenACCHostDataConstructClass:
475 EmitOpenACCHostDataConstruct(cast<OpenACCHostDataConstruct>(*S));
476 break;
477 case Stmt::OpenACCWaitConstructClass:
478 EmitOpenACCWaitConstruct(cast<OpenACCWaitConstruct>(*S));
479 break;
480 case Stmt::OpenACCInitConstructClass:
481 EmitOpenACCInitConstruct(cast<OpenACCInitConstruct>(*S));
482 break;
483 case Stmt::OpenACCShutdownConstructClass:
484 EmitOpenACCShutdownConstruct(cast<OpenACCShutdownConstruct>(*S));
485 break;
486 case Stmt::OpenACCSetConstructClass:
487 EmitOpenACCSetConstruct(cast<OpenACCSetConstruct>(*S));
488 break;
489 case Stmt::OpenACCUpdateConstructClass:
490 EmitOpenACCUpdateConstruct(cast<OpenACCUpdateConstruct>(*S));
491 break;
492 }
493}
494
497 switch (S->getStmtClass()) {
498 default:
499 return false;
500 case Stmt::NullStmtClass:
501 break;
502 case Stmt::CompoundStmtClass:
503 EmitCompoundStmt(cast<CompoundStmt>(*S));
504 break;
505 case Stmt::DeclStmtClass:
506 EmitDeclStmt(cast<DeclStmt>(*S));
507 break;
508 case Stmt::LabelStmtClass:
509 EmitLabelStmt(cast<LabelStmt>(*S));
510 break;
511 case Stmt::AttributedStmtClass:
512 EmitAttributedStmt(cast<AttributedStmt>(*S));
513 break;
514 case Stmt::GotoStmtClass:
515 EmitGotoStmt(cast<GotoStmt>(*S));
516 break;
517 case Stmt::BreakStmtClass:
518 EmitBreakStmt(cast<BreakStmt>(*S));
519 break;
520 case Stmt::ContinueStmtClass:
521 EmitContinueStmt(cast<ContinueStmt>(*S));
522 break;
523 case Stmt::DefaultStmtClass:
524 EmitDefaultStmt(cast<DefaultStmt>(*S), Attrs);
525 break;
526 case Stmt::CaseStmtClass:
527 EmitCaseStmt(cast<CaseStmt>(*S), Attrs);
528 break;
529 case Stmt::SEHLeaveStmtClass:
530 EmitSEHLeaveStmt(cast<SEHLeaveStmt>(*S));
531 break;
532 case Stmt::SYCLKernelCallStmtClass:
533 // SYCL kernel call statements are generated as wrappers around the body
534 // of functions declared with the sycl_kernel_entry_point attribute. Such
535 // functions are used to specify how a SYCL kernel (a function object) is
536 // to be invoked; the SYCL kernel call statement contains a transformed
537 // variation of the function body and is used to generate a SYCL kernel
538 // caller function; a function that serves as the device side entry point
539 // used to execute the SYCL kernel. The sycl_kernel_entry_point attributed
540 // function is invoked by host code in order to trigger emission of the
541 // device side SYCL kernel caller function and to generate metadata needed
542 // by SYCL run-time library implementations; the function is otherwise
543 // intended to have no effect. As such, the function body is not evaluated
544 // as part of the invocation during host compilation (and the function
545 // should not be called or emitted during device compilation); the SYCL
546 // kernel call statement is thus handled as a null statement for the
547 // purpose of code generation.
548 break;
549 }
550 return true;
551}
552
553/// EmitCompoundStmt - Emit a compound statement {..} node. If GetLast is true,
554/// this captures the expression result of the last sub-statement and returns it
555/// (for use by the statement expression extension).
557 AggValueSlot AggSlot) {
558 PrettyStackTraceLoc CrashInfo(getContext().getSourceManager(),S.getLBracLoc(),
559 "LLVM IR generation of compound statement ('{}')");
560
561 // Keep track of the current cleanup stack depth, including debug scopes.
562 LexicalScope Scope(*this, S.getSourceRange());
563
564 return EmitCompoundStmtWithoutScope(S, GetLast, AggSlot);
565}
566
569 bool GetLast,
570 AggValueSlot AggSlot) {
571
572 const Stmt *ExprResult = S.getStmtExprResult();
573 assert((!GetLast || (GetLast && ExprResult)) &&
574 "If GetLast is true then the CompoundStmt must have a StmtExprResult");
575
576 Address RetAlloca = Address::invalid();
577
578 for (auto *CurStmt : S.body()) {
579 if (GetLast && ExprResult == CurStmt) {
580 // We have to special case labels here. They are statements, but when put
581 // at the end of a statement expression, they yield the value of their
582 // subexpression. Handle this by walking through all labels we encounter,
583 // emitting them before we evaluate the subexpr.
584 // Similar issues arise for attributed statements.
585 while (!isa<Expr>(ExprResult)) {
586 if (const auto *LS = dyn_cast<LabelStmt>(ExprResult)) {
587 EmitLabel(LS->getDecl());
588 ExprResult = LS->getSubStmt();
589 } else if (const auto *AS = dyn_cast<AttributedStmt>(ExprResult)) {
590 // FIXME: Update this if we ever have attributes that affect the
591 // semantics of an expression.
592 ExprResult = AS->getSubStmt();
593 } else {
594 llvm_unreachable("unknown value statement");
595 }
596 }
597
599
600 const Expr *E = cast<Expr>(ExprResult);
601 QualType ExprTy = E->getType();
602 if (hasAggregateEvaluationKind(ExprTy)) {
603 EmitAggExpr(E, AggSlot);
604 } else {
605 // We can't return an RValue here because there might be cleanups at
606 // the end of the StmtExpr. Because of that, we have to emit the result
607 // here into a temporary alloca.
608 RetAlloca = CreateMemTemp(ExprTy);
609 EmitAnyExprToMem(E, RetAlloca, Qualifiers(),
610 /*IsInit*/ false);
611 }
612 } else {
613 EmitStmt(CurStmt);
614 }
615 }
616
617 return RetAlloca;
618}
619
620void CodeGenFunction::SimplifyForwardingBlocks(llvm::BasicBlock *BB) {
621 llvm::BranchInst *BI = dyn_cast<llvm::BranchInst>(BB->getTerminator());
622
623 // If there is a cleanup stack, then we it isn't worth trying to
624 // simplify this block (we would need to remove it from the scope map
625 // and cleanup entry).
626 if (!EHStack.empty())
627 return;
628
629 // Can only simplify direct branches.
630 if (!BI || !BI->isUnconditional())
631 return;
632
633 // Can only simplify empty blocks.
634 if (BI->getIterator() != BB->begin())
635 return;
636
637 BB->replaceAllUsesWith(BI->getSuccessor(0));
638 BI->eraseFromParent();
639 BB->eraseFromParent();
640}
641
642void CodeGenFunction::EmitBlock(llvm::BasicBlock *BB, bool IsFinished) {
643 llvm::BasicBlock *CurBB = Builder.GetInsertBlock();
644
645 // Fall out of the current block (if necessary).
646 EmitBranch(BB);
647
648 if (IsFinished && BB->use_empty()) {
649 delete BB;
650 return;
651 }
652
653 // Place the block after the current block, if possible, or else at
654 // the end of the function.
655 if (CurBB && CurBB->getParent())
656 CurFn->insert(std::next(CurBB->getIterator()), BB);
657 else
658 CurFn->insert(CurFn->end(), BB);
659 Builder.SetInsertPoint(BB);
660}
661
662void CodeGenFunction::EmitBranch(llvm::BasicBlock *Target) {
663 // Emit a branch from the current block to the target one if this
664 // was a real block. If this was just a fall-through block after a
665 // terminator, don't emit it.
666 llvm::BasicBlock *CurBB = Builder.GetInsertBlock();
667
668 if (!CurBB || CurBB->getTerminator()) {
669 // If there is no insert point or the previous block is already
670 // terminated, don't touch it.
671 } else {
672 // Otherwise, create a fall-through branch.
673 Builder.CreateBr(Target);
674 }
675
676 Builder.ClearInsertionPoint();
677}
678
679void CodeGenFunction::EmitBlockAfterUses(llvm::BasicBlock *block) {
680 bool inserted = false;
681 for (llvm::User *u : block->users()) {
682 if (llvm::Instruction *insn = dyn_cast<llvm::Instruction>(u)) {
683 CurFn->insert(std::next(insn->getParent()->getIterator()), block);
684 inserted = true;
685 break;
686 }
687 }
688
689 if (!inserted)
690 CurFn->insert(CurFn->end(), block);
691
692 Builder.SetInsertPoint(block);
693}
694
695CodeGenFunction::JumpDest
697 JumpDest &Dest = LabelMap[D];
698 if (Dest.isValid()) return Dest;
699
700 // Create, but don't insert, the new block.
701 Dest = JumpDest(createBasicBlock(D->getName()),
704 return Dest;
705}
706
708 // Add this label to the current lexical scope if we're within any
709 // normal cleanups. Jumps "in" to this label --- when permitted by
710 // the language --- may need to be routed around such cleanups.
711 if (EHStack.hasNormalCleanups() && CurLexicalScope)
712 CurLexicalScope->addLabel(D);
713
714 JumpDest &Dest = LabelMap[D];
715
716 // If we didn't need a forward reference to this label, just go
717 // ahead and create a destination at the current scope.
718 if (!Dest.isValid()) {
719 Dest = getJumpDestInCurrentScope(D->getName());
720
721 // Otherwise, we need to give this label a target depth and remove
722 // it from the branch-fixups list.
723 } else {
724 assert(!Dest.getScopeDepth().isValid() && "already emitted label!");
725 Dest.setScopeDepth(EHStack.stable_begin());
726 ResolveBranchFixups(Dest.getBlock());
727 }
728
729 EmitBlock(Dest.getBlock());
730
731 // Emit debug info for labels.
732 if (CGDebugInfo *DI = getDebugInfo()) {
734 DI->setLocation(D->getLocation());
735 DI->EmitLabel(D, Builder);
736 }
737 }
738
739 incrementProfileCounter(D->getStmt());
740}
741
742/// Change the cleanup scope of the labels in this lexical scope to
743/// match the scope of the enclosing context.
745 assert(!Labels.empty());
746 EHScopeStack::stable_iterator innermostScope
748
749 // Change the scope depth of all the labels.
751 i = Labels.begin(), e = Labels.end(); i != e; ++i) {
752 assert(CGF.LabelMap.count(*i));
753 JumpDest &dest = CGF.LabelMap.find(*i)->second;
754 assert(dest.getScopeDepth().isValid());
755 assert(innermostScope.encloses(dest.getScopeDepth()));
756 dest.setScopeDepth(innermostScope);
757 }
758
759 // Reparent the labels if the new scope also has cleanups.
760 if (innermostScope != EHScopeStack::stable_end() && ParentScope) {
761 ParentScope->Labels.append(Labels.begin(), Labels.end());
762 }
763}
764
765
767 EmitLabel(S.getDecl());
768
769 // IsEHa - emit eha.scope.begin if it's a side entry of a scope
770 if (getLangOpts().EHAsynch && S.isSideEntry())
772
773 EmitStmt(S.getSubStmt());
774}
775
777 bool nomerge = false;
778 bool noinline = false;
779 bool alwaysinline = false;
780 bool noconvergent = false;
781 HLSLControlFlowHintAttr::Spelling flattenOrBranch =
782 HLSLControlFlowHintAttr::SpellingNotCalculated;
783 const CallExpr *musttail = nullptr;
784
785 for (const auto *A : S.getAttrs()) {
786 switch (A->getKind()) {
787 default:
788 break;
789 case attr::NoMerge:
790 nomerge = true;
791 break;
792 case attr::NoInline:
793 noinline = true;
794 break;
795 case attr::AlwaysInline:
796 alwaysinline = true;
797 break;
798 case attr::NoConvergent:
799 noconvergent = true;
800 break;
801 case attr::MustTail: {
802 const Stmt *Sub = S.getSubStmt();
803 const ReturnStmt *R = cast<ReturnStmt>(Sub);
804 musttail = cast<CallExpr>(R->getRetValue()->IgnoreParens());
805 } break;
806 case attr::CXXAssume: {
807 const Expr *Assumption = cast<CXXAssumeAttr>(A)->getAssumption();
808 if (getLangOpts().CXXAssumptions && Builder.GetInsertBlock() &&
809 !Assumption->HasSideEffects(getContext())) {
810 llvm::Value *AssumptionVal = EmitCheckedArgForAssume(Assumption);
811 Builder.CreateAssumption(AssumptionVal);
812 }
813 } break;
814 case attr::HLSLControlFlowHint: {
815 flattenOrBranch = cast<HLSLControlFlowHintAttr>(A)->getSemanticSpelling();
816 } break;
817 }
818 }
819 SaveAndRestore save_nomerge(InNoMergeAttributedStmt, nomerge);
820 SaveAndRestore save_noinline(InNoInlineAttributedStmt, noinline);
821 SaveAndRestore save_alwaysinline(InAlwaysInlineAttributedStmt, alwaysinline);
822 SaveAndRestore save_noconvergent(InNoConvergentAttributedStmt, noconvergent);
823 SaveAndRestore save_musttail(MustTailCall, musttail);
824 SaveAndRestore save_flattenOrBranch(HLSLControlFlowAttr, flattenOrBranch);
825 EmitStmt(S.getSubStmt(), S.getAttrs());
826}
827
829 // If this code is reachable then emit a stop point (if generating
830 // debug info). We have to do this ourselves because we are on the
831 // "simple" statement path.
832 if (HaveInsertPoint())
833 EmitStopPoint(&S);
834
836}
837
838
840 if (const LabelDecl *Target = S.getConstantTarget()) {
842 return;
843 }
844
845 // Ensure that we have an i8* for our PHI node.
846 llvm::Value *V = Builder.CreateBitCast(EmitScalarExpr(S.getTarget()),
847 Int8PtrTy, "addr");
848 llvm::BasicBlock *CurBB = Builder.GetInsertBlock();
849
850 // Get the basic block for the indirect goto.
851 llvm::BasicBlock *IndGotoBB = GetIndirectGotoBlock();
852
853 // The first instruction in the block has to be the PHI for the switch dest,
854 // add an entry for this branch.
855 cast<llvm::PHINode>(IndGotoBB->begin())->addIncoming(V, CurBB);
856
857 EmitBranch(IndGotoBB);
858}
859
860void CodeGenFunction::EmitIfStmt(const IfStmt &S) {
861 const Stmt *Else = S.getElse();
862
863 // The else branch of a consteval if statement is always the only branch that
864 // can be runtime evaluated.
865 if (S.isConsteval()) {
866 const Stmt *Executed = S.isNegatedConsteval() ? S.getThen() : Else;
867 if (Executed) {
868 RunCleanupsScope ExecutedScope(*this);
869 EmitStmt(Executed);
870 }
871 return;
872 }
873
874 // C99 6.8.4.1: The first substatement is executed if the expression compares
875 // unequal to 0. The condition must be a scalar type.
876 LexicalScope ConditionScope(*this, S.getCond()->getSourceRange());
877 ApplyDebugLocation DL(*this, S.getCond());
878
879 if (S.getInit())
880 EmitStmt(S.getInit());
881
882 if (S.getConditionVariable())
883 EmitDecl(*S.getConditionVariable());
884
885 // If the condition constant folds and can be elided, try to avoid emitting
886 // the condition and the dead arm of the if/else.
887 bool CondConstant;
888 if (ConstantFoldsToSimpleInteger(S.getCond(), CondConstant,
889 S.isConstexpr())) {
890 // Figure out which block (then or else) is executed.
891 const Stmt *Executed = S.getThen();
892 const Stmt *Skipped = Else;
893 if (!CondConstant) // Condition false?
894 std::swap(Executed, Skipped);
895
896 // If the skipped block has no labels in it, just emit the executed block.
897 // This avoids emitting dead code and simplifies the CFG substantially.
898 if (S.isConstexpr() || !ContainsLabel(Skipped)) {
899 if (CondConstant)
901 if (Executed) {
902 RunCleanupsScope ExecutedScope(*this);
903 EmitStmt(Executed);
904 }
905 PGO.markStmtMaybeUsed(Skipped);
906 return;
907 }
908 }
909
910 // Otherwise, the condition did not fold, or we couldn't elide it. Just emit
911 // the conditional branch.
912 llvm::BasicBlock *ThenBlock = createBasicBlock("if.then");
913 llvm::BasicBlock *ContBlock = createBasicBlock("if.end");
914 llvm::BasicBlock *ElseBlock = ContBlock;
915 if (Else)
916 ElseBlock = createBasicBlock("if.else");
917
918 // Prefer the PGO based weights over the likelihood attribute.
919 // When the build isn't optimized the metadata isn't used, so don't generate
920 // it.
921 // Also, differentiate between disabled PGO and a never executed branch with
922 // PGO. Assuming PGO is in use:
923 // - we want to ignore the [[likely]] attribute if the branch is never
924 // executed,
925 // - assuming the profile is poor, preserving the attribute may still be
926 // beneficial.
927 // As an approximation, preserve the attribute only if both the branch and the
928 // parent context were not executed.
930 uint64_t ThenCount = getProfileCount(S.getThen());
931 if (!ThenCount && !getCurrentProfileCount() &&
932 CGM.getCodeGenOpts().OptimizationLevel)
933 LH = Stmt::getLikelihood(S.getThen(), Else);
934
935 // When measuring MC/DC, always fully evaluate the condition up front using
936 // EvaluateExprAsBool() so that the test vector bitmap can be updated prior to
937 // executing the body of the if.then or if.else. This is useful for when
938 // there is a 'return' within the body, but this is particularly beneficial
939 // when one if-stmt is nested within another if-stmt so that all of the MC/DC
940 // updates are kept linear and consistent.
941 if (!CGM.getCodeGenOpts().MCDCCoverage)
942 EmitBranchOnBoolExpr(S.getCond(), ThenBlock, ElseBlock, ThenCount, LH);
943 else {
944 llvm::Value *BoolCondVal = EvaluateExprAsBool(S.getCond());
945 Builder.CreateCondBr(BoolCondVal, ThenBlock, ElseBlock);
946 }
947
948 // Emit the 'then' code.
949 EmitBlock(ThenBlock);
951 incrementProfileCounter(S.getThen());
952 else
954 {
955 RunCleanupsScope ThenScope(*this);
956 EmitStmt(S.getThen());
957 }
958 EmitBranch(ContBlock);
959
960 // Emit the 'else' code if present.
961 if (Else) {
962 {
963 // There is no need to emit line number for an unconditional branch.
964 auto NL = ApplyDebugLocation::CreateEmpty(*this);
965 EmitBlock(ElseBlock);
966 }
967 // When single byte coverage mode is enabled, add a counter to else block.
970 {
971 RunCleanupsScope ElseScope(*this);
972 EmitStmt(Else);
973 }
974 {
975 // There is no need to emit line number for an unconditional branch.
976 auto NL = ApplyDebugLocation::CreateEmpty(*this);
977 EmitBranch(ContBlock);
978 }
979 }
980
981 // Emit the continuation block for code after the if.
982 EmitBlock(ContBlock, true);
983
984 // When single byte coverage mode is enabled, add a counter to continuation
985 // block.
988}
989
990bool CodeGenFunction::checkIfLoopMustProgress(const Expr *ControllingExpression,
991 bool HasEmptyBody) {
992 if (CGM.getCodeGenOpts().getFiniteLoops() ==
994 return false;
995
996 // Now apply rules for plain C (see 6.8.5.6 in C11).
997 // Loops with constant conditions do not have to make progress in any C
998 // version.
999 // As an extension, we consisider loops whose constant expression
1000 // can be constant-folded.
1002 bool CondIsConstInt =
1003 !ControllingExpression ||
1004 (ControllingExpression->EvaluateAsInt(Result, getContext()) &&
1005 Result.Val.isInt());
1006
1007 bool CondIsTrue = CondIsConstInt && (!ControllingExpression ||
1008 Result.Val.getInt().getBoolValue());
1009
1010 // Loops with non-constant conditions must make progress in C11 and later.
1011 if (getLangOpts().C11 && !CondIsConstInt)
1012 return true;
1013
1014 // [C++26][intro.progress] (DR)
1015 // The implementation may assume that any thread will eventually do one of the
1016 // following:
1017 // [...]
1018 // - continue execution of a trivial infinite loop ([stmt.iter.general]).
1019 if (CGM.getCodeGenOpts().getFiniteLoops() ==
1021 getLangOpts().CPlusPlus11) {
1022 if (HasEmptyBody && CondIsTrue) {
1023 CurFn->removeFnAttr(llvm::Attribute::MustProgress);
1024 return false;
1025 }
1026 return true;
1027 }
1028 return false;
1029}
1030
1031// [C++26][stmt.iter.general] (DR)
1032// A trivially empty iteration statement is an iteration statement matching one
1033// of the following forms:
1034// - while ( expression ) ;
1035// - while ( expression ) { }
1036// - do ; while ( expression ) ;
1037// - do { } while ( expression ) ;
1038// - for ( init-statement expression(opt); ) ;
1039// - for ( init-statement expression(opt); ) { }
1040template <typename LoopStmt> static bool hasEmptyLoopBody(const LoopStmt &S) {
1041 if constexpr (std::is_same_v<LoopStmt, ForStmt>) {
1042 if (S.getInc())
1043 return false;
1044 }
1045 const Stmt *Body = S.getBody();
1046 if (!Body || isa<NullStmt>(Body))
1047 return true;
1048 if (const CompoundStmt *Compound = dyn_cast<CompoundStmt>(Body))
1049 return Compound->body_empty();
1050 return false;
1051}
1052
1054 ArrayRef<const Attr *> WhileAttrs) {
1055 // Emit the header for the loop, which will also become
1056 // the continue target.
1057 JumpDest LoopHeader = getJumpDestInCurrentScope("while.cond");
1058 EmitBlock(LoopHeader.getBlock());
1059
1061 ConvergenceTokenStack.push_back(
1062 emitConvergenceLoopToken(LoopHeader.getBlock()));
1063
1064 // Create an exit block for when the condition fails, which will
1065 // also become the break target.
1066 JumpDest LoopExit = getJumpDestInCurrentScope("while.end");
1067
1068 // Store the blocks to use for break and continue.
1069 BreakContinueStack.push_back(BreakContinue(LoopExit, LoopHeader));
1070
1071 // C++ [stmt.while]p2:
1072 // When the condition of a while statement is a declaration, the
1073 // scope of the variable that is declared extends from its point
1074 // of declaration (3.3.2) to the end of the while statement.
1075 // [...]
1076 // The object created in a condition is destroyed and created
1077 // with each iteration of the loop.
1078 RunCleanupsScope ConditionScope(*this);
1079
1080 if (S.getConditionVariable())
1081 EmitDecl(*S.getConditionVariable());
1082
1083 // Evaluate the conditional in the while header. C99 6.8.5.1: The
1084 // evaluation of the controlling expression takes place before each
1085 // execution of the loop body.
1086 llvm::Value *BoolCondVal = EvaluateExprAsBool(S.getCond());
1087
1088 // while(1) is common, avoid extra exit blocks. Be sure
1089 // to correctly handle break/continue though.
1090 llvm::ConstantInt *C = dyn_cast<llvm::ConstantInt>(BoolCondVal);
1091 bool EmitBoolCondBranch = !C || !C->isOne();
1092 const SourceRange &R = S.getSourceRange();
1093 LoopStack.push(LoopHeader.getBlock(), CGM.getContext(), CGM.getCodeGenOpts(),
1094 WhileAttrs, SourceLocToDebugLoc(R.getBegin()),
1096 checkIfLoopMustProgress(S.getCond(), hasEmptyLoopBody(S)));
1097
1098 // When single byte coverage mode is enabled, add a counter to loop condition.
1100 incrementProfileCounter(S.getCond());
1101
1102 // As long as the condition is true, go to the loop body.
1103 llvm::BasicBlock *LoopBody = createBasicBlock("while.body");
1104 if (EmitBoolCondBranch) {
1105 llvm::BasicBlock *ExitBlock = LoopExit.getBlock();
1106 if (ConditionScope.requiresCleanups())
1107 ExitBlock = createBasicBlock("while.exit");
1108 llvm::MDNode *Weights =
1109 createProfileWeightsForLoop(S.getCond(), getProfileCount(S.getBody()));
1110 if (!Weights && CGM.getCodeGenOpts().OptimizationLevel)
1111 BoolCondVal = emitCondLikelihoodViaExpectIntrinsic(
1112 BoolCondVal, Stmt::getLikelihood(S.getBody()));
1113 Builder.CreateCondBr(BoolCondVal, LoopBody, ExitBlock, Weights);
1114
1115 if (ExitBlock != LoopExit.getBlock()) {
1116 EmitBlock(ExitBlock);
1118 }
1119 } else if (const Attr *A = Stmt::getLikelihoodAttr(S.getBody())) {
1120 CGM.getDiags().Report(A->getLocation(),
1121 diag::warn_attribute_has_no_effect_on_infinite_loop)
1122 << A << A->getRange();
1124 S.getWhileLoc(),
1125 diag::note_attribute_has_no_effect_on_infinite_loop_here)
1126 << SourceRange(S.getWhileLoc(), S.getRParenLoc());
1127 }
1128
1129 // Emit the loop body. We have to emit this in a cleanup scope
1130 // because it might be a singleton DeclStmt.
1131 {
1132 RunCleanupsScope BodyScope(*this);
1133 EmitBlock(LoopBody);
1134 // When single byte coverage mode is enabled, add a counter to the body.
1136 incrementProfileCounter(S.getBody());
1137 else
1139 EmitStmt(S.getBody());
1140 }
1141
1142 BreakContinueStack.pop_back();
1143
1144 // Immediately force cleanup.
1145 ConditionScope.ForceCleanup();
1146
1147 EmitStopPoint(&S);
1148 // Branch to the loop header again.
1149 EmitBranch(LoopHeader.getBlock());
1150
1151 LoopStack.pop();
1152
1153 // Emit the exit block.
1154 EmitBlock(LoopExit.getBlock(), true);
1155
1156 // The LoopHeader typically is just a branch if we skipped emitting
1157 // a branch, try to erase it.
1158 if (!EmitBoolCondBranch)
1159 SimplifyForwardingBlocks(LoopHeader.getBlock());
1160
1161 // When single byte coverage mode is enabled, add a counter to continuation
1162 // block.
1165
1167 ConvergenceTokenStack.pop_back();
1168}
1169
1171 ArrayRef<const Attr *> DoAttrs) {
1172 JumpDest LoopExit = getJumpDestInCurrentScope("do.end");
1173 JumpDest LoopCond = getJumpDestInCurrentScope("do.cond");
1174
1175 uint64_t ParentCount = getCurrentProfileCount();
1176
1177 // Store the blocks to use for break and continue.
1178 BreakContinueStack.push_back(BreakContinue(LoopExit, LoopCond));
1179
1180 // Emit the body of the loop.
1181 llvm::BasicBlock *LoopBody = createBasicBlock("do.body");
1182
1184 EmitBlockWithFallThrough(LoopBody, S.getBody());
1185 else
1186 EmitBlockWithFallThrough(LoopBody, &S);
1187
1189 ConvergenceTokenStack.push_back(emitConvergenceLoopToken(LoopBody));
1190
1191 {
1192 RunCleanupsScope BodyScope(*this);
1193 EmitStmt(S.getBody());
1194 }
1195
1196 EmitBlock(LoopCond.getBlock());
1197 // When single byte coverage mode is enabled, add a counter to loop condition.
1199 incrementProfileCounter(S.getCond());
1200
1201 // C99 6.8.5.2: "The evaluation of the controlling expression takes place
1202 // after each execution of the loop body."
1203
1204 // Evaluate the conditional in the while header.
1205 // C99 6.8.5p2/p4: The first substatement is executed if the expression
1206 // compares unequal to 0. The condition must be a scalar type.
1207 llvm::Value *BoolCondVal = EvaluateExprAsBool(S.getCond());
1208
1209 BreakContinueStack.pop_back();
1210
1211 // "do {} while (0)" is common in macros, avoid extra blocks. Be sure
1212 // to correctly handle break/continue though.
1213 llvm::ConstantInt *C = dyn_cast<llvm::ConstantInt>(BoolCondVal);
1214 bool EmitBoolCondBranch = !C || !C->isZero();
1215
1216 const SourceRange &R = S.getSourceRange();
1217 LoopStack.push(LoopBody, CGM.getContext(), CGM.getCodeGenOpts(), DoAttrs,
1220 checkIfLoopMustProgress(S.getCond(), hasEmptyLoopBody(S)));
1221
1222 // As long as the condition is true, iterate the loop.
1223 if (EmitBoolCondBranch) {
1224 uint64_t BackedgeCount = getProfileCount(S.getBody()) - ParentCount;
1225 Builder.CreateCondBr(
1226 BoolCondVal, LoopBody, LoopExit.getBlock(),
1227 createProfileWeightsForLoop(S.getCond(), BackedgeCount));
1228 }
1229
1230 LoopStack.pop();
1231
1232 // Emit the exit block.
1233 EmitBlock(LoopExit.getBlock());
1234
1235 // The DoCond block typically is just a branch if we skipped
1236 // emitting a branch, try to erase it.
1237 if (!EmitBoolCondBranch)
1238 SimplifyForwardingBlocks(LoopCond.getBlock());
1239
1240 // When single byte coverage mode is enabled, add a counter to continuation
1241 // block.
1244
1246 ConvergenceTokenStack.pop_back();
1247}
1248
1250 ArrayRef<const Attr *> ForAttrs) {
1251 JumpDest LoopExit = getJumpDestInCurrentScope("for.end");
1252
1253 LexicalScope ForScope(*this, S.getSourceRange());
1254
1255 // Evaluate the first part before the loop.
1256 if (S.getInit())
1257 EmitStmt(S.getInit());
1258
1259 // Start the loop with a block that tests the condition.
1260 // If there's an increment, the continue scope will be overwritten
1261 // later.
1262 JumpDest CondDest = getJumpDestInCurrentScope("for.cond");
1263 llvm::BasicBlock *CondBlock = CondDest.getBlock();
1264 EmitBlock(CondBlock);
1265
1267 ConvergenceTokenStack.push_back(emitConvergenceLoopToken(CondBlock));
1268
1269 const SourceRange &R = S.getSourceRange();
1270 LoopStack.push(CondBlock, CGM.getContext(), CGM.getCodeGenOpts(), ForAttrs,
1273 checkIfLoopMustProgress(S.getCond(), hasEmptyLoopBody(S)));
1274
1275 // Create a cleanup scope for the condition variable cleanups.
1276 LexicalScope ConditionScope(*this, S.getSourceRange());
1277
1278 // If the for loop doesn't have an increment we can just use the condition as
1279 // the continue block. Otherwise, if there is no condition variable, we can
1280 // form the continue block now. If there is a condition variable, we can't
1281 // form the continue block until after we've emitted the condition, because
1282 // the condition is in scope in the increment, but Sema's jump diagnostics
1283 // ensure that there are no continues from the condition variable that jump
1284 // to the loop increment.
1285 JumpDest Continue;
1286 if (!S.getInc())
1287 Continue = CondDest;
1288 else if (!S.getConditionVariable())
1289 Continue = getJumpDestInCurrentScope("for.inc");
1290 BreakContinueStack.push_back(BreakContinue(LoopExit, Continue));
1291
1292 if (S.getCond()) {
1293 // If the for statement has a condition scope, emit the local variable
1294 // declaration.
1295 if (S.getConditionVariable()) {
1296 EmitDecl(*S.getConditionVariable());
1297
1298 // We have entered the condition variable's scope, so we're now able to
1299 // jump to the continue block.
1300 Continue = S.getInc() ? getJumpDestInCurrentScope("for.inc") : CondDest;
1301 BreakContinueStack.back().ContinueBlock = Continue;
1302 }
1303
1304 // When single byte coverage mode is enabled, add a counter to loop
1305 // condition.
1307 incrementProfileCounter(S.getCond());
1308
1309 llvm::BasicBlock *ExitBlock = LoopExit.getBlock();
1310 // If there are any cleanups between here and the loop-exit scope,
1311 // create a block to stage a loop exit along.
1312 if (ForScope.requiresCleanups())
1313 ExitBlock = createBasicBlock("for.cond.cleanup");
1314
1315 // As long as the condition is true, iterate the loop.
1316 llvm::BasicBlock *ForBody = createBasicBlock("for.body");
1317
1318 // C99 6.8.5p2/p4: The first substatement is executed if the expression
1319 // compares unequal to 0. The condition must be a scalar type.
1320 llvm::Value *BoolCondVal = EvaluateExprAsBool(S.getCond());
1321 llvm::MDNode *Weights =
1322 createProfileWeightsForLoop(S.getCond(), getProfileCount(S.getBody()));
1323 if (!Weights && CGM.getCodeGenOpts().OptimizationLevel)
1324 BoolCondVal = emitCondLikelihoodViaExpectIntrinsic(
1325 BoolCondVal, Stmt::getLikelihood(S.getBody()));
1326
1327 Builder.CreateCondBr(BoolCondVal, ForBody, ExitBlock, Weights);
1328
1329 if (ExitBlock != LoopExit.getBlock()) {
1330 EmitBlock(ExitBlock);
1332 }
1333
1334 EmitBlock(ForBody);
1335 } else {
1336 // Treat it as a non-zero constant. Don't even create a new block for the
1337 // body, just fall into it.
1338 }
1339
1340 // When single byte coverage mode is enabled, add a counter to the body.
1342 incrementProfileCounter(S.getBody());
1343 else
1345 {
1346 // Create a separate cleanup scope for the body, in case it is not
1347 // a compound statement.
1348 RunCleanupsScope BodyScope(*this);
1349 EmitStmt(S.getBody());
1350 }
1351
1352 // If there is an increment, emit it next.
1353 if (S.getInc()) {
1354 EmitBlock(Continue.getBlock());
1355 EmitStmt(S.getInc());
1357 incrementProfileCounter(S.getInc());
1358 }
1359
1360 BreakContinueStack.pop_back();
1361
1362 ConditionScope.ForceCleanup();
1363
1364 EmitStopPoint(&S);
1365 EmitBranch(CondBlock);
1366
1367 ForScope.ForceCleanup();
1368
1369 LoopStack.pop();
1370
1371 // Emit the fall-through block.
1372 EmitBlock(LoopExit.getBlock(), true);
1373
1374 // When single byte coverage mode is enabled, add a counter to continuation
1375 // block.
1378
1380 ConvergenceTokenStack.pop_back();
1381}
1382
1383void
1385 ArrayRef<const Attr *> ForAttrs) {
1386 JumpDest LoopExit = getJumpDestInCurrentScope("for.end");
1387
1388 LexicalScope ForScope(*this, S.getSourceRange());
1389
1390 // Evaluate the first pieces before the loop.
1391 if (S.getInit())
1392 EmitStmt(S.getInit());
1393 EmitStmt(S.getRangeStmt());
1394 EmitStmt(S.getBeginStmt());
1395 EmitStmt(S.getEndStmt());
1396
1397 // Start the loop with a block that tests the condition.
1398 // If there's an increment, the continue scope will be overwritten
1399 // later.
1400 llvm::BasicBlock *CondBlock = createBasicBlock("for.cond");
1401 EmitBlock(CondBlock);
1402
1404 ConvergenceTokenStack.push_back(emitConvergenceLoopToken(CondBlock));
1405
1406 const SourceRange &R = S.getSourceRange();
1407 LoopStack.push(CondBlock, CGM.getContext(), CGM.getCodeGenOpts(), ForAttrs,
1410
1411 // If there are any cleanups between here and the loop-exit scope,
1412 // create a block to stage a loop exit along.
1413 llvm::BasicBlock *ExitBlock = LoopExit.getBlock();
1414 if (ForScope.requiresCleanups())
1415 ExitBlock = createBasicBlock("for.cond.cleanup");
1416
1417 // The loop body, consisting of the specified body and the loop variable.
1418 llvm::BasicBlock *ForBody = createBasicBlock("for.body");
1419
1420 // The body is executed if the expression, contextually converted
1421 // to bool, is true.
1422 llvm::Value *BoolCondVal = EvaluateExprAsBool(S.getCond());
1423 llvm::MDNode *Weights =
1424 createProfileWeightsForLoop(S.getCond(), getProfileCount(S.getBody()));
1425 if (!Weights && CGM.getCodeGenOpts().OptimizationLevel)
1426 BoolCondVal = emitCondLikelihoodViaExpectIntrinsic(
1427 BoolCondVal, Stmt::getLikelihood(S.getBody()));
1428 Builder.CreateCondBr(BoolCondVal, ForBody, ExitBlock, Weights);
1429
1430 if (ExitBlock != LoopExit.getBlock()) {
1431 EmitBlock(ExitBlock);
1433 }
1434
1435 EmitBlock(ForBody);
1437 incrementProfileCounter(S.getBody());
1438 else
1440
1441 // Create a block for the increment. In case of a 'continue', we jump there.
1442 JumpDest Continue = getJumpDestInCurrentScope("for.inc");
1443
1444 // Store the blocks to use for break and continue.
1445 BreakContinueStack.push_back(BreakContinue(LoopExit, Continue));
1446
1447 {
1448 // Create a separate cleanup scope for the loop variable and body.
1449 LexicalScope BodyScope(*this, S.getSourceRange());
1450 EmitStmt(S.getLoopVarStmt());
1451 EmitStmt(S.getBody());
1452 }
1453
1454 EmitStopPoint(&S);
1455 // If there is an increment, emit it next.
1456 EmitBlock(Continue.getBlock());
1457 EmitStmt(S.getInc());
1458
1459 BreakContinueStack.pop_back();
1460
1461 EmitBranch(CondBlock);
1462
1463 ForScope.ForceCleanup();
1464
1465 LoopStack.pop();
1466
1467 // Emit the fall-through block.
1468 EmitBlock(LoopExit.getBlock(), true);
1469
1470 // When single byte coverage mode is enabled, add a counter to continuation
1471 // block.
1474
1476 ConvergenceTokenStack.pop_back();
1477}
1478
1479void CodeGenFunction::EmitReturnOfRValue(RValue RV, QualType Ty) {
1480 if (RV.isScalar()) {
1482 } else if (RV.isAggregate()) {
1483 LValue Dest = MakeAddrLValue(ReturnValue, Ty);
1486 } else {
1488 /*init*/ true);
1489 }
1491}
1492
1493namespace {
1494// RAII struct used to save and restore a return statment's result expression.
1495struct SaveRetExprRAII {
1496 SaveRetExprRAII(const Expr *RetExpr, CodeGenFunction &CGF)
1497 : OldRetExpr(CGF.RetExpr), CGF(CGF) {
1498 CGF.RetExpr = RetExpr;
1499 }
1500 ~SaveRetExprRAII() { CGF.RetExpr = OldRetExpr; }
1501 const Expr *OldRetExpr;
1502 CodeGenFunction &CGF;
1503};
1504} // namespace
1505
1506/// Determine if the given call uses the swiftasync calling convention.
1507static bool isSwiftAsyncCallee(const CallExpr *CE) {
1508 auto calleeQualType = CE->getCallee()->getType();
1509 const FunctionType *calleeType = nullptr;
1510 if (calleeQualType->isFunctionPointerType() ||
1511 calleeQualType->isFunctionReferenceType() ||
1512 calleeQualType->isBlockPointerType() ||
1513 calleeQualType->isMemberFunctionPointerType()) {
1514 calleeType = calleeQualType->getPointeeType()->castAs<FunctionType>();
1515 } else if (auto *ty = dyn_cast<FunctionType>(calleeQualType)) {
1516 calleeType = ty;
1517 } else if (auto CMCE = dyn_cast<CXXMemberCallExpr>(CE)) {
1518 if (auto methodDecl = CMCE->getMethodDecl()) {
1519 // getMethodDecl() doesn't handle member pointers at the moment.
1520 calleeType = methodDecl->getType()->castAs<FunctionType>();
1521 } else {
1522 return false;
1523 }
1524 } else {
1525 return false;
1526 }
1527 return calleeType->getCallConv() == CallingConv::CC_SwiftAsync;
1528}
1529
1530/// EmitReturnStmt - Note that due to GCC extensions, this can have an operand
1531/// if the function returns void, or may be missing one if the function returns
1532/// non-void. Fun stuff :).
1534 if (requiresReturnValueCheck()) {
1535 llvm::Constant *SLoc = EmitCheckSourceLocation(S.getBeginLoc());
1536 auto *SLocPtr =
1537 new llvm::GlobalVariable(CGM.getModule(), SLoc->getType(), false,
1538 llvm::GlobalVariable::PrivateLinkage, SLoc);
1539 SLocPtr->setUnnamedAddr(llvm::GlobalValue::UnnamedAddr::Global);
1541 assert(ReturnLocation.isValid() && "No valid return location");
1542 Builder.CreateStore(SLocPtr, ReturnLocation);
1543 }
1544
1545 // Returning from an outlined SEH helper is UB, and we already warn on it.
1546 if (IsOutlinedSEHHelper) {
1547 Builder.CreateUnreachable();
1548 Builder.ClearInsertionPoint();
1549 }
1550
1551 // Emit the result value, even if unused, to evaluate the side effects.
1552 const Expr *RV = S.getRetValue();
1553
1554 // Record the result expression of the return statement. The recorded
1555 // expression is used to determine whether a block capture's lifetime should
1556 // end at the end of the full expression as opposed to the end of the scope
1557 // enclosing the block expression.
1558 //
1559 // This permits a small, easily-implemented exception to our over-conservative
1560 // rules about not jumping to statements following block literals with
1561 // non-trivial cleanups.
1562 SaveRetExprRAII SaveRetExpr(RV, *this);
1563
1564 RunCleanupsScope cleanupScope(*this);
1565 if (const auto *EWC = dyn_cast_or_null<ExprWithCleanups>(RV))
1566 RV = EWC->getSubExpr();
1567
1568 // If we're in a swiftasynccall function, and the return expression is a
1569 // call to a swiftasynccall function, mark the call as the musttail call.
1570 std::optional<llvm::SaveAndRestore<const CallExpr *>> SaveMustTail;
1571 if (RV && CurFnInfo &&
1573 if (auto CE = dyn_cast<CallExpr>(RV)) {
1574 if (isSwiftAsyncCallee(CE)) {
1575 SaveMustTail.emplace(MustTailCall, CE);
1576 }
1577 }
1578 }
1579
1580 // FIXME: Clean this up by using an LValue for ReturnTemp,
1581 // EmitStoreThroughLValue, and EmitAnyExpr.
1582 // Check if the NRVO candidate was not globalized in OpenMP mode.
1583 if (getLangOpts().ElideConstructors && S.getNRVOCandidate() &&
1584 S.getNRVOCandidate()->isNRVOVariable() &&
1585 (!getLangOpts().OpenMP ||
1587 .getAddressOfLocalVariable(*this, S.getNRVOCandidate())
1588 .isValid())) {
1589 // Apply the named return value optimization for this return statement,
1590 // which means doing nothing: the appropriate result has already been
1591 // constructed into the NRVO variable.
1592
1593 // If there is an NRVO flag for this variable, set it to 1 into indicate
1594 // that the cleanup code should not destroy the variable.
1595 if (llvm::Value *NRVOFlag = NRVOFlags[S.getNRVOCandidate()])
1596 Builder.CreateFlagStore(Builder.getTrue(), NRVOFlag);
1597 } else if (!ReturnValue.isValid() || (RV && RV->getType()->isVoidType())) {
1598 // Make sure not to return anything, but evaluate the expression
1599 // for side effects.
1600 if (RV) {
1601 EmitAnyExpr(RV);
1602 }
1603 } else if (!RV) {
1604 // Do nothing (return value is left uninitialized)
1605 } else if (FnRetTy->isReferenceType()) {
1606 // If this function returns a reference, take the address of the expression
1607 // rather than the value.
1609 Builder.CreateStore(Result.getScalarVal(), ReturnValue);
1610 } else {
1611 switch (getEvaluationKind(RV->getType())) {
1612 case TEK_Scalar: {
1613 llvm::Value *Ret = EmitScalarExpr(RV);
1616 /*isInit*/ true);
1617 else
1619 break;
1620 }
1621 case TEK_Complex:
1623 /*isInit*/ true);
1624 break;
1625 case TEK_Aggregate:
1632 break;
1633 }
1634 }
1635
1636 ++NumReturnExprs;
1637 if (!RV || RV->isEvaluatable(getContext()))
1638 ++NumSimpleReturnExprs;
1639
1640 cleanupScope.ForceCleanup();
1642}
1643
1645 // As long as debug info is modeled with instructions, we have to ensure we
1646 // have a place to insert here and write the stop point here.
1647 if (HaveInsertPoint())
1648 EmitStopPoint(&S);
1649
1650 for (const auto *I : S.decls())
1651 EmitDecl(*I);
1652}
1653
1655 assert(!BreakContinueStack.empty() && "break stmt not in a loop or switch!");
1656
1657 // If this code is reachable then emit a stop point (if generating
1658 // debug info). We have to do this ourselves because we are on the
1659 // "simple" statement path.
1660 if (HaveInsertPoint())
1661 EmitStopPoint(&S);
1662
1663 EmitBranchThroughCleanup(BreakContinueStack.back().BreakBlock);
1664}
1665
1667 assert(!BreakContinueStack.empty() && "continue stmt not in a loop!");
1668
1669 // If this code is reachable then emit a stop point (if generating
1670 // debug info). We have to do this ourselves because we are on the
1671 // "simple" statement path.
1672 if (HaveInsertPoint())
1673 EmitStopPoint(&S);
1674
1675 EmitBranchThroughCleanup(BreakContinueStack.back().ContinueBlock);
1676}
1677
1678/// EmitCaseStmtRange - If case statement range is not too big then
1679/// add multiple cases to switch instruction, one for each value within
1680/// the range. If range is too big then emit "if" condition check.
1682 ArrayRef<const Attr *> Attrs) {
1683 assert(S.getRHS() && "Expected RHS value in CaseStmt");
1684
1685 llvm::APSInt LHS = S.getLHS()->EvaluateKnownConstInt(getContext());
1686 llvm::APSInt RHS = S.getRHS()->EvaluateKnownConstInt(getContext());
1687
1688 // Emit the code for this case. We do this first to make sure it is
1689 // properly chained from our predecessor before generating the
1690 // switch machinery to enter this block.
1691 llvm::BasicBlock *CaseDest = createBasicBlock("sw.bb");
1692 EmitBlockWithFallThrough(CaseDest, &S);
1693 EmitStmt(S.getSubStmt());
1694
1695 // If range is empty, do nothing.
1696 if (LHS.isSigned() ? RHS.slt(LHS) : RHS.ult(LHS))
1697 return;
1698
1700 llvm::APInt Range = RHS - LHS;
1701 // FIXME: parameters such as this should not be hardcoded.
1702 if (Range.ult(llvm::APInt(Range.getBitWidth(), 64))) {
1703 // Range is small enough to add multiple switch instruction cases.
1704 uint64_t Total = getProfileCount(&S);
1705 unsigned NCases = Range.getZExtValue() + 1;
1706 // We only have one region counter for the entire set of cases here, so we
1707 // need to divide the weights evenly between the generated cases, ensuring
1708 // that the total weight is preserved. E.g., a weight of 5 over three cases
1709 // will be distributed as weights of 2, 2, and 1.
1710 uint64_t Weight = Total / NCases, Rem = Total % NCases;
1711 for (unsigned I = 0; I != NCases; ++I) {
1712 if (SwitchWeights)
1713 SwitchWeights->push_back(Weight + (Rem ? 1 : 0));
1714 else if (SwitchLikelihood)
1715 SwitchLikelihood->push_back(LH);
1716
1717 if (Rem)
1718 Rem--;
1719 SwitchInsn->addCase(Builder.getInt(LHS), CaseDest);
1720 ++LHS;
1721 }
1722 return;
1723 }
1724
1725 // The range is too big. Emit "if" condition into a new block,
1726 // making sure to save and restore the current insertion point.
1727 llvm::BasicBlock *RestoreBB = Builder.GetInsertBlock();
1728
1729 // Push this test onto the chain of range checks (which terminates
1730 // in the default basic block). The switch's default will be changed
1731 // to the top of this chain after switch emission is complete.
1732 llvm::BasicBlock *FalseDest = CaseRangeBlock;
1733 CaseRangeBlock = createBasicBlock("sw.caserange");
1734
1735 CurFn->insert(CurFn->end(), CaseRangeBlock);
1736 Builder.SetInsertPoint(CaseRangeBlock);
1737
1738 // Emit range check.
1739 llvm::Value *Diff =
1740 Builder.CreateSub(SwitchInsn->getCondition(), Builder.getInt(LHS));
1741 llvm::Value *Cond =
1742 Builder.CreateICmpULE(Diff, Builder.getInt(Range), "inbounds");
1743
1744 llvm::MDNode *Weights = nullptr;
1745 if (SwitchWeights) {
1746 uint64_t ThisCount = getProfileCount(&S);
1747 uint64_t DefaultCount = (*SwitchWeights)[0];
1748 Weights = createProfileWeights(ThisCount, DefaultCount);
1749
1750 // Since we're chaining the switch default through each large case range, we
1751 // need to update the weight for the default, ie, the first case, to include
1752 // this case.
1753 (*SwitchWeights)[0] += ThisCount;
1754 } else if (SwitchLikelihood)
1755 Cond = emitCondLikelihoodViaExpectIntrinsic(Cond, LH);
1756
1757 Builder.CreateCondBr(Cond, CaseDest, FalseDest, Weights);
1758
1759 // Restore the appropriate insertion point.
1760 if (RestoreBB)
1761 Builder.SetInsertPoint(RestoreBB);
1762 else
1763 Builder.ClearInsertionPoint();
1764}
1765
1767 ArrayRef<const Attr *> Attrs) {
1768 // If there is no enclosing switch instance that we're aware of, then this
1769 // case statement and its block can be elided. This situation only happens
1770 // when we've constant-folded the switch, are emitting the constant case,
1771 // and part of the constant case includes another case statement. For
1772 // instance: switch (4) { case 4: do { case 5: } while (1); }
1773 if (!SwitchInsn) {
1774 EmitStmt(S.getSubStmt());
1775 return;
1776 }
1777
1778 // Handle case ranges.
1779 if (S.getRHS()) {
1780 EmitCaseStmtRange(S, Attrs);
1781 return;
1782 }
1783
1784 llvm::ConstantInt *CaseVal =
1785 Builder.getInt(S.getLHS()->EvaluateKnownConstInt(getContext()));
1786
1787 // Emit debuginfo for the case value if it is an enum value.
1788 const ConstantExpr *CE;
1789 if (auto ICE = dyn_cast<ImplicitCastExpr>(S.getLHS()))
1790 CE = dyn_cast<ConstantExpr>(ICE->getSubExpr());
1791 else
1792 CE = dyn_cast<ConstantExpr>(S.getLHS());
1793 if (CE) {
1794 if (auto DE = dyn_cast<DeclRefExpr>(CE->getSubExpr()))
1795 if (CGDebugInfo *Dbg = getDebugInfo())
1797 Dbg->EmitGlobalVariable(DE->getDecl(),
1798 APValue(llvm::APSInt(CaseVal->getValue())));
1799 }
1800
1801 if (SwitchLikelihood)
1802 SwitchLikelihood->push_back(Stmt::getLikelihood(Attrs));
1803
1804 // If the body of the case is just a 'break', try to not emit an empty block.
1805 // If we're profiling or we're not optimizing, leave the block in for better
1806 // debug and coverage analysis.
1808 CGM.getCodeGenOpts().OptimizationLevel > 0 &&
1809 isa<BreakStmt>(S.getSubStmt())) {
1810 JumpDest Block = BreakContinueStack.back().BreakBlock;
1811
1812 // Only do this optimization if there are no cleanups that need emitting.
1814 if (SwitchWeights)
1815 SwitchWeights->push_back(getProfileCount(&S));
1816 SwitchInsn->addCase(CaseVal, Block.getBlock());
1817
1818 // If there was a fallthrough into this case, make sure to redirect it to
1819 // the end of the switch as well.
1820 if (Builder.GetInsertBlock()) {
1821 Builder.CreateBr(Block.getBlock());
1822 Builder.ClearInsertionPoint();
1823 }
1824 return;
1825 }
1826 }
1827
1828 llvm::BasicBlock *CaseDest = createBasicBlock("sw.bb");
1829 EmitBlockWithFallThrough(CaseDest, &S);
1830 if (SwitchWeights)
1831 SwitchWeights->push_back(getProfileCount(&S));
1832 SwitchInsn->addCase(CaseVal, CaseDest);
1833
1834 // Recursively emitting the statement is acceptable, but is not wonderful for
1835 // code where we have many case statements nested together, i.e.:
1836 // case 1:
1837 // case 2:
1838 // case 3: etc.
1839 // Handling this recursively will create a new block for each case statement
1840 // that falls through to the next case which is IR intensive. It also causes
1841 // deep recursion which can run into stack depth limitations. Handle
1842 // sequential non-range case statements specially.
1843 //
1844 // TODO When the next case has a likelihood attribute the code returns to the
1845 // recursive algorithm. Maybe improve this case if it becomes common practice
1846 // to use a lot of attributes.
1847 const CaseStmt *CurCase = &S;
1848 const CaseStmt *NextCase = dyn_cast<CaseStmt>(S.getSubStmt());
1849
1850 // Otherwise, iteratively add consecutive cases to this switch stmt.
1851 while (NextCase && NextCase->getRHS() == nullptr) {
1852 CurCase = NextCase;
1853 llvm::ConstantInt *CaseVal =
1854 Builder.getInt(CurCase->getLHS()->EvaluateKnownConstInt(getContext()));
1855
1856 if (SwitchWeights)
1857 SwitchWeights->push_back(getProfileCount(NextCase));
1859 CaseDest = createBasicBlock("sw.bb");
1860 EmitBlockWithFallThrough(CaseDest, CurCase);
1861 }
1862 // Since this loop is only executed when the CaseStmt has no attributes
1863 // use a hard-coded value.
1864 if (SwitchLikelihood)
1865 SwitchLikelihood->push_back(Stmt::LH_None);
1866
1867 SwitchInsn->addCase(CaseVal, CaseDest);
1868 NextCase = dyn_cast<CaseStmt>(CurCase->getSubStmt());
1869 }
1870
1871 // Generate a stop point for debug info if the case statement is
1872 // followed by a default statement. A fallthrough case before a
1873 // default case gets its own branch target.
1874 if (CurCase->getSubStmt()->getStmtClass() == Stmt::DefaultStmtClass)
1875 EmitStopPoint(CurCase);
1876
1877 // Normal default recursion for non-cases.
1878 EmitStmt(CurCase->getSubStmt());
1879}
1880
1882 ArrayRef<const Attr *> Attrs) {
1883 // If there is no enclosing switch instance that we're aware of, then this
1884 // default statement can be elided. This situation only happens when we've
1885 // constant-folded the switch.
1886 if (!SwitchInsn) {
1887 EmitStmt(S.getSubStmt());
1888 return;
1889 }
1890
1891 llvm::BasicBlock *DefaultBlock = SwitchInsn->getDefaultDest();
1892 assert(DefaultBlock->empty() &&
1893 "EmitDefaultStmt: Default block already defined?");
1894
1895 if (SwitchLikelihood)
1896 SwitchLikelihood->front() = Stmt::getLikelihood(Attrs);
1897
1898 EmitBlockWithFallThrough(DefaultBlock, &S);
1899
1900 EmitStmt(S.getSubStmt());
1901}
1902
1903/// CollectStatementsForCase - Given the body of a 'switch' statement and a
1904/// constant value that is being switched on, see if we can dead code eliminate
1905/// the body of the switch to a simple series of statements to emit. Basically,
1906/// on a switch (5) we want to find these statements:
1907/// case 5:
1908/// printf(...); <--
1909/// ++i; <--
1910/// break;
1911///
1912/// and add them to the ResultStmts vector. If it is unsafe to do this
1913/// transformation (for example, one of the elided statements contains a label
1914/// that might be jumped to), return CSFC_Failure. If we handled it and 'S'
1915/// should include statements after it (e.g. the printf() line is a substmt of
1916/// the case) then return CSFC_FallThrough. If we handled it and found a break
1917/// statement, then return CSFC_Success.
1918///
1919/// If Case is non-null, then we are looking for the specified case, checking
1920/// that nothing we jump over contains labels. If Case is null, then we found
1921/// the case and are looking for the break.
1922///
1923/// If the recursive walk actually finds our Case, then we set FoundCase to
1924/// true.
1925///
1928 const SwitchCase *Case,
1929 bool &FoundCase,
1930 SmallVectorImpl<const Stmt*> &ResultStmts) {
1931 // If this is a null statement, just succeed.
1932 if (!S)
1933 return Case ? CSFC_Success : CSFC_FallThrough;
1934
1935 // If this is the switchcase (case 4: or default) that we're looking for, then
1936 // we're in business. Just add the substatement.
1937 if (const SwitchCase *SC = dyn_cast<SwitchCase>(S)) {
1938 if (S == Case) {
1939 FoundCase = true;
1940 return CollectStatementsForCase(SC->getSubStmt(), nullptr, FoundCase,
1941 ResultStmts);
1942 }
1943
1944 // Otherwise, this is some other case or default statement, just ignore it.
1945 return CollectStatementsForCase(SC->getSubStmt(), Case, FoundCase,
1946 ResultStmts);
1947 }
1948
1949 // If we are in the live part of the code and we found our break statement,
1950 // return a success!
1951 if (!Case && isa<BreakStmt>(S))
1952 return CSFC_Success;
1953
1954 // If this is a switch statement, then it might contain the SwitchCase, the
1955 // break, or neither.
1956 if (const CompoundStmt *CS = dyn_cast<CompoundStmt>(S)) {
1957 // Handle this as two cases: we might be looking for the SwitchCase (if so
1958 // the skipped statements must be skippable) or we might already have it.
1959 CompoundStmt::const_body_iterator I = CS->body_begin(), E = CS->body_end();
1960 bool StartedInLiveCode = FoundCase;
1961 unsigned StartSize = ResultStmts.size();
1962
1963 // If we've not found the case yet, scan through looking for it.
1964 if (Case) {
1965 // Keep track of whether we see a skipped declaration. The code could be
1966 // using the declaration even if it is skipped, so we can't optimize out
1967 // the decl if the kept statements might refer to it.
1968 bool HadSkippedDecl = false;
1969
1970 // If we're looking for the case, just see if we can skip each of the
1971 // substatements.
1972 for (; Case && I != E; ++I) {
1973 HadSkippedDecl |= CodeGenFunction::mightAddDeclToScope(*I);
1974
1975 switch (CollectStatementsForCase(*I, Case, FoundCase, ResultStmts)) {
1976 case CSFC_Failure: return CSFC_Failure;
1977 case CSFC_Success:
1978 // A successful result means that either 1) that the statement doesn't
1979 // have the case and is skippable, or 2) does contain the case value
1980 // and also contains the break to exit the switch. In the later case,
1981 // we just verify the rest of the statements are elidable.
1982 if (FoundCase) {
1983 // If we found the case and skipped declarations, we can't do the
1984 // optimization.
1985 if (HadSkippedDecl)
1986 return CSFC_Failure;
1987
1988 for (++I; I != E; ++I)
1989 if (CodeGenFunction::ContainsLabel(*I, true))
1990 return CSFC_Failure;
1991 return CSFC_Success;
1992 }
1993 break;
1994 case CSFC_FallThrough:
1995 // If we have a fallthrough condition, then we must have found the
1996 // case started to include statements. Consider the rest of the
1997 // statements in the compound statement as candidates for inclusion.
1998 assert(FoundCase && "Didn't find case but returned fallthrough?");
1999 // We recursively found Case, so we're not looking for it anymore.
2000 Case = nullptr;
2001
2002 // If we found the case and skipped declarations, we can't do the
2003 // optimization.
2004 if (HadSkippedDecl)
2005 return CSFC_Failure;
2006 break;
2007 }
2008 }
2009
2010 if (!FoundCase)
2011 return CSFC_Success;
2012
2013 assert(!HadSkippedDecl && "fallthrough after skipping decl");
2014 }
2015
2016 // If we have statements in our range, then we know that the statements are
2017 // live and need to be added to the set of statements we're tracking.
2018 bool AnyDecls = false;
2019 for (; I != E; ++I) {
2021
2022 switch (CollectStatementsForCase(*I, nullptr, FoundCase, ResultStmts)) {
2023 case CSFC_Failure: return CSFC_Failure;
2024 case CSFC_FallThrough:
2025 // A fallthrough result means that the statement was simple and just
2026 // included in ResultStmt, keep adding them afterwards.
2027 break;
2028 case CSFC_Success:
2029 // A successful result means that we found the break statement and
2030 // stopped statement inclusion. We just ensure that any leftover stmts
2031 // are skippable and return success ourselves.
2032 for (++I; I != E; ++I)
2033 if (CodeGenFunction::ContainsLabel(*I, true))
2034 return CSFC_Failure;
2035 return CSFC_Success;
2036 }
2037 }
2038
2039 // If we're about to fall out of a scope without hitting a 'break;', we
2040 // can't perform the optimization if there were any decls in that scope
2041 // (we'd lose their end-of-lifetime).
2042 if (AnyDecls) {
2043 // If the entire compound statement was live, there's one more thing we
2044 // can try before giving up: emit the whole thing as a single statement.
2045 // We can do that unless the statement contains a 'break;'.
2046 // FIXME: Such a break must be at the end of a construct within this one.
2047 // We could emit this by just ignoring the BreakStmts entirely.
2048 if (StartedInLiveCode && !CodeGenFunction::containsBreak(S)) {
2049 ResultStmts.resize(StartSize);
2050 ResultStmts.push_back(S);
2051 } else {
2052 return CSFC_Failure;
2053 }
2054 }
2055
2056 return CSFC_FallThrough;
2057 }
2058
2059 // Okay, this is some other statement that we don't handle explicitly, like a
2060 // for statement or increment etc. If we are skipping over this statement,
2061 // just verify it doesn't have labels, which would make it invalid to elide.
2062 if (Case) {
2063 if (CodeGenFunction::ContainsLabel(S, true))
2064 return CSFC_Failure;
2065 return CSFC_Success;
2066 }
2067
2068 // Otherwise, we want to include this statement. Everything is cool with that
2069 // so long as it doesn't contain a break out of the switch we're in.
2071
2072 // Otherwise, everything is great. Include the statement and tell the caller
2073 // that we fall through and include the next statement as well.
2074 ResultStmts.push_back(S);
2075 return CSFC_FallThrough;
2076}
2077
2078/// FindCaseStatementsForValue - Find the case statement being jumped to and
2079/// then invoke CollectStatementsForCase to find the list of statements to emit
2080/// for a switch on constant. See the comment above CollectStatementsForCase
2081/// for more details.
2083 const llvm::APSInt &ConstantCondValue,
2084 SmallVectorImpl<const Stmt*> &ResultStmts,
2085 ASTContext &C,
2086 const SwitchCase *&ResultCase) {
2087 // First step, find the switch case that is being branched to. We can do this
2088 // efficiently by scanning the SwitchCase list.
2089 const SwitchCase *Case = S.getSwitchCaseList();
2090 const DefaultStmt *DefaultCase = nullptr;
2091
2092 for (; Case; Case = Case->getNextSwitchCase()) {
2093 // It's either a default or case. Just remember the default statement in
2094 // case we're not jumping to any numbered cases.
2095 if (const DefaultStmt *DS = dyn_cast<DefaultStmt>(Case)) {
2096 DefaultCase = DS;
2097 continue;
2098 }
2099
2100 // Check to see if this case is the one we're looking for.
2101 const CaseStmt *CS = cast<CaseStmt>(Case);
2102 // Don't handle case ranges yet.
2103 if (CS->getRHS()) return false;
2104
2105 // If we found our case, remember it as 'case'.
2106 if (CS->getLHS()->EvaluateKnownConstInt(C) == ConstantCondValue)
2107 break;
2108 }
2109
2110 // If we didn't find a matching case, we use a default if it exists, or we
2111 // elide the whole switch body!
2112 if (!Case) {
2113 // It is safe to elide the body of the switch if it doesn't contain labels
2114 // etc. If it is safe, return successfully with an empty ResultStmts list.
2115 if (!DefaultCase)
2117 Case = DefaultCase;
2118 }
2119
2120 // Ok, we know which case is being jumped to, try to collect all the
2121 // statements that follow it. This can fail for a variety of reasons. Also,
2122 // check to see that the recursive walk actually found our case statement.
2123 // Insane cases like this can fail to find it in the recursive walk since we
2124 // don't handle every stmt kind:
2125 // switch (4) {
2126 // while (1) {
2127 // case 4: ...
2128 bool FoundCase = false;
2129 ResultCase = Case;
2130 return CollectStatementsForCase(S.getBody(), Case, FoundCase,
2131 ResultStmts) != CSFC_Failure &&
2132 FoundCase;
2133}
2134
2135static std::optional<SmallVector<uint64_t, 16>>
2137 // Are there enough branches to weight them?
2138 if (Likelihoods.size() <= 1)
2139 return std::nullopt;
2140
2141 uint64_t NumUnlikely = 0;
2142 uint64_t NumNone = 0;
2143 uint64_t NumLikely = 0;
2144 for (const auto LH : Likelihoods) {
2145 switch (LH) {
2146 case Stmt::LH_Unlikely:
2147 ++NumUnlikely;
2148 break;
2149 case Stmt::LH_None:
2150 ++NumNone;
2151 break;
2152 case Stmt::LH_Likely:
2153 ++NumLikely;
2154 break;
2155 }
2156 }
2157
2158 // Is there a likelihood attribute used?
2159 if (NumUnlikely == 0 && NumLikely == 0)
2160 return std::nullopt;
2161
2162 // When multiple cases share the same code they can be combined during
2163 // optimization. In that case the weights of the branch will be the sum of
2164 // the individual weights. Make sure the combined sum of all neutral cases
2165 // doesn't exceed the value of a single likely attribute.
2166 // The additions both avoid divisions by 0 and make sure the weights of None
2167 // don't exceed the weight of Likely.
2168 const uint64_t Likely = INT32_MAX / (NumLikely + 2);
2169 const uint64_t None = Likely / (NumNone + 1);
2170 const uint64_t Unlikely = 0;
2171
2173 Result.reserve(Likelihoods.size());
2174 for (const auto LH : Likelihoods) {
2175 switch (LH) {
2176 case Stmt::LH_Unlikely:
2177 Result.push_back(Unlikely);
2178 break;
2179 case Stmt::LH_None:
2180 Result.push_back(None);
2181 break;
2182 case Stmt::LH_Likely:
2183 Result.push_back(Likely);
2184 break;
2185 }
2186 }
2187
2188 return Result;
2189}
2190
2192 // Handle nested switch statements.
2193 llvm::SwitchInst *SavedSwitchInsn = SwitchInsn;
2194 SmallVector<uint64_t, 16> *SavedSwitchWeights = SwitchWeights;
2195 SmallVector<Stmt::Likelihood, 16> *SavedSwitchLikelihood = SwitchLikelihood;
2196 llvm::BasicBlock *SavedCRBlock = CaseRangeBlock;
2197
2198 // See if we can constant fold the condition of the switch and therefore only
2199 // emit the live case statement (if any) of the switch.
2200 llvm::APSInt ConstantCondValue;
2201 if (ConstantFoldsToSimpleInteger(S.getCond(), ConstantCondValue)) {
2203 const SwitchCase *Case = nullptr;
2204 if (FindCaseStatementsForValue(S, ConstantCondValue, CaseStmts,
2205 getContext(), Case)) {
2206 if (Case)
2208 RunCleanupsScope ExecutedScope(*this);
2209
2210 if (S.getInit())
2211 EmitStmt(S.getInit());
2212
2213 // Emit the condition variable if needed inside the entire cleanup scope
2214 // used by this special case for constant folded switches.
2215 if (S.getConditionVariable())
2216 EmitDecl(*S.getConditionVariable());
2217
2218 // At this point, we are no longer "within" a switch instance, so
2219 // we can temporarily enforce this to ensure that any embedded case
2220 // statements are not emitted.
2221 SwitchInsn = nullptr;
2222
2223 // Okay, we can dead code eliminate everything except this case. Emit the
2224 // specified series of statements and we're good.
2225 for (unsigned i = 0, e = CaseStmts.size(); i != e; ++i)
2226 EmitStmt(CaseStmts[i]);
2228 PGO.markStmtMaybeUsed(S.getBody());
2229
2230 // Now we want to restore the saved switch instance so that nested
2231 // switches continue to function properly
2232 SwitchInsn = SavedSwitchInsn;
2233
2234 return;
2235 }
2236 }
2237
2238 JumpDest SwitchExit = getJumpDestInCurrentScope("sw.epilog");
2239
2240 RunCleanupsScope ConditionScope(*this);
2241
2242 if (S.getInit())
2243 EmitStmt(S.getInit());
2244
2245 if (S.getConditionVariable())
2246 EmitDecl(*S.getConditionVariable());
2247 llvm::Value *CondV = EmitScalarExpr(S.getCond());
2248
2249 // Create basic block to hold stuff that comes after switch
2250 // statement. We also need to create a default block now so that
2251 // explicit case ranges tests can have a place to jump to on
2252 // failure.
2253 llvm::BasicBlock *DefaultBlock = createBasicBlock("sw.default");
2254 SwitchInsn = Builder.CreateSwitch(CondV, DefaultBlock);
2255 if (PGO.haveRegionCounts()) {
2256 // Walk the SwitchCase list to find how many there are.
2257 uint64_t DefaultCount = 0;
2258 unsigned NumCases = 0;
2259 for (const SwitchCase *Case = S.getSwitchCaseList();
2260 Case;
2261 Case = Case->getNextSwitchCase()) {
2262 if (isa<DefaultStmt>(Case))
2263 DefaultCount = getProfileCount(Case);
2264 NumCases += 1;
2265 }
2266 SwitchWeights = new SmallVector<uint64_t, 16>();
2267 SwitchWeights->reserve(NumCases);
2268 // The default needs to be first. We store the edge count, so we already
2269 // know the right weight.
2270 SwitchWeights->push_back(DefaultCount);
2271 } else if (CGM.getCodeGenOpts().OptimizationLevel) {
2272 SwitchLikelihood = new SmallVector<Stmt::Likelihood, 16>();
2273 // Initialize the default case.
2274 SwitchLikelihood->push_back(Stmt::LH_None);
2275 }
2276
2277 CaseRangeBlock = DefaultBlock;
2278
2279 // Clear the insertion point to indicate we are in unreachable code.
2280 Builder.ClearInsertionPoint();
2281
2282 // All break statements jump to NextBlock. If BreakContinueStack is non-empty
2283 // then reuse last ContinueBlock.
2284 JumpDest OuterContinue;
2285 if (!BreakContinueStack.empty())
2286 OuterContinue = BreakContinueStack.back().ContinueBlock;
2287
2288 BreakContinueStack.push_back(BreakContinue(SwitchExit, OuterContinue));
2289
2290 // Emit switch body.
2291 EmitStmt(S.getBody());
2292
2293 BreakContinueStack.pop_back();
2294
2295 // Update the default block in case explicit case range tests have
2296 // been chained on top.
2297 SwitchInsn->setDefaultDest(CaseRangeBlock);
2298
2299 // If a default was never emitted:
2300 if (!DefaultBlock->getParent()) {
2301 // If we have cleanups, emit the default block so that there's a
2302 // place to jump through the cleanups from.
2303 if (ConditionScope.requiresCleanups()) {
2304 EmitBlock(DefaultBlock);
2305
2306 // Otherwise, just forward the default block to the switch end.
2307 } else {
2308 DefaultBlock->replaceAllUsesWith(SwitchExit.getBlock());
2309 delete DefaultBlock;
2310 }
2311 }
2312
2313 ConditionScope.ForceCleanup();
2314
2315 // Emit continuation.
2316 EmitBlock(SwitchExit.getBlock(), true);
2318
2319 // If the switch has a condition wrapped by __builtin_unpredictable,
2320 // create metadata that specifies that the switch is unpredictable.
2321 // Don't bother if not optimizing because that metadata would not be used.
2322 auto *Call = dyn_cast<CallExpr>(S.getCond());
2323 if (Call && CGM.getCodeGenOpts().OptimizationLevel != 0) {
2324 auto *FD = dyn_cast_or_null<FunctionDecl>(Call->getCalleeDecl());
2325 if (FD && FD->getBuiltinID() == Builtin::BI__builtin_unpredictable) {
2326 llvm::MDBuilder MDHelper(getLLVMContext());
2327 SwitchInsn->setMetadata(llvm::LLVMContext::MD_unpredictable,
2328 MDHelper.createUnpredictable());
2329 }
2330 }
2331
2332 if (SwitchWeights) {
2333 assert(SwitchWeights->size() == 1 + SwitchInsn->getNumCases() &&
2334 "switch weights do not match switch cases");
2335 // If there's only one jump destination there's no sense weighting it.
2336 if (SwitchWeights->size() > 1)
2337 SwitchInsn->setMetadata(llvm::LLVMContext::MD_prof,
2338 createProfileWeights(*SwitchWeights));
2339 delete SwitchWeights;
2340 } else if (SwitchLikelihood) {
2341 assert(SwitchLikelihood->size() == 1 + SwitchInsn->getNumCases() &&
2342 "switch likelihoods do not match switch cases");
2343 std::optional<SmallVector<uint64_t, 16>> LHW =
2344 getLikelihoodWeights(*SwitchLikelihood);
2345 if (LHW) {
2346 llvm::MDBuilder MDHelper(CGM.getLLVMContext());
2347 SwitchInsn->setMetadata(llvm::LLVMContext::MD_prof,
2348 createProfileWeights(*LHW));
2349 }
2350 delete SwitchLikelihood;
2351 }
2352 SwitchInsn = SavedSwitchInsn;
2353 SwitchWeights = SavedSwitchWeights;
2354 SwitchLikelihood = SavedSwitchLikelihood;
2355 CaseRangeBlock = SavedCRBlock;
2356}
2357
2358static std::string
2359SimplifyConstraint(const char *Constraint, const TargetInfo &Target,
2361 std::string Result;
2362
2363 while (*Constraint) {
2364 switch (*Constraint) {
2365 default:
2366 Result += Target.convertConstraint(Constraint);
2367 break;
2368 // Ignore these
2369 case '*':
2370 case '?':
2371 case '!':
2372 case '=': // Will see this and the following in mult-alt constraints.
2373 case '+':
2374 break;
2375 case '#': // Ignore the rest of the constraint alternative.
2376 while (Constraint[1] && Constraint[1] != ',')
2377 Constraint++;
2378 break;
2379 case '&':
2380 case '%':
2381 Result += *Constraint;
2382 while (Constraint[1] && Constraint[1] == *Constraint)
2383 Constraint++;
2384 break;
2385 case ',':
2386 Result += "|";
2387 break;
2388 case 'g':
2389 Result += "imr";
2390 break;
2391 case '[': {
2392 assert(OutCons &&
2393 "Must pass output names to constraints with a symbolic name");
2394 unsigned Index;
2395 bool result = Target.resolveSymbolicName(Constraint, *OutCons, Index);
2396 assert(result && "Could not resolve symbolic name"); (void)result;
2397 Result += llvm::utostr(Index);
2398 break;
2399 }
2400 }
2401
2402 Constraint++;
2403 }
2404
2405 return Result;
2406}
2407
2408/// AddVariableConstraints - Look at AsmExpr and if it is a variable declared
2409/// as using a particular register add that as a constraint that will be used
2410/// in this asm stmt.
2411static std::string
2412AddVariableConstraints(const std::string &Constraint, const Expr &AsmExpr,
2414 const AsmStmt &Stmt, const bool EarlyClobber,
2415 std::string *GCCReg = nullptr) {
2416 const DeclRefExpr *AsmDeclRef = dyn_cast<DeclRefExpr>(&AsmExpr);
2417 if (!AsmDeclRef)
2418 return Constraint;
2419 const ValueDecl &Value = *AsmDeclRef->getDecl();
2420 const VarDecl *Variable = dyn_cast<VarDecl>(&Value);
2421 if (!Variable)
2422 return Constraint;
2424 return Constraint;
2425 AsmLabelAttr *Attr = Variable->getAttr<AsmLabelAttr>();
2426 if (!Attr)
2427 return Constraint;
2428 StringRef Register = Attr->getLabel();
2429 assert(Target.isValidGCCRegisterName(Register));
2430 // We're using validateOutputConstraint here because we only care if
2431 // this is a register constraint.
2432 TargetInfo::ConstraintInfo Info(Constraint, "");
2433 if (Target.validateOutputConstraint(Info) &&
2434 !Info.allowsRegister()) {
2435 CGM.ErrorUnsupported(&Stmt, "__asm__");
2436 return Constraint;
2437 }
2438 // Canonicalize the register here before returning it.
2439 Register = Target.getNormalizedGCCRegisterName(Register);
2440 if (GCCReg != nullptr)
2441 *GCCReg = Register.str();
2442 return (EarlyClobber ? "&{" : "{") + Register.str() + "}";
2443}
2444
2445std::pair<llvm::Value*, llvm::Type *> CodeGenFunction::EmitAsmInputLValue(
2446 const TargetInfo::ConstraintInfo &Info, LValue InputValue,
2447 QualType InputType, std::string &ConstraintStr, SourceLocation Loc) {
2448 if (Info.allowsRegister() || !Info.allowsMemory()) {
2450 return {EmitLoadOfLValue(InputValue, Loc).getScalarVal(), nullptr};
2451
2452 llvm::Type *Ty = ConvertType(InputType);
2453 uint64_t Size = CGM.getDataLayout().getTypeSizeInBits(Ty);
2454 if ((Size <= 64 && llvm::isPowerOf2_64(Size)) ||
2455 getTargetHooks().isScalarizableAsmOperand(*this, Ty)) {
2456 Ty = llvm::IntegerType::get(getLLVMContext(), Size);
2457
2458 return {Builder.CreateLoad(InputValue.getAddress().withElementType(Ty)),
2459 nullptr};
2460 }
2461 }
2462
2463 Address Addr = InputValue.getAddress();
2464 ConstraintStr += '*';
2465 return {InputValue.getPointer(*this), Addr.getElementType()};
2466}
2467
2468std::pair<llvm::Value *, llvm::Type *>
2469CodeGenFunction::EmitAsmInput(const TargetInfo::ConstraintInfo &Info,
2470 const Expr *InputExpr,
2471 std::string &ConstraintStr) {
2472 // If this can't be a register or memory, i.e., has to be a constant
2473 // (immediate or symbolic), try to emit it as such.
2474 if (!Info.allowsRegister() && !Info.allowsMemory()) {
2475 if (Info.requiresImmediateConstant()) {
2476 Expr::EvalResult EVResult;
2477 InputExpr->EvaluateAsRValue(EVResult, getContext(), true);
2478
2479 llvm::APSInt IntResult;
2480 if (EVResult.Val.toIntegralConstant(IntResult, InputExpr->getType(),
2481 getContext()))
2482 return {llvm::ConstantInt::get(getLLVMContext(), IntResult), nullptr};
2483 }
2484
2486 if (InputExpr->EvaluateAsInt(Result, getContext()))
2487 return {llvm::ConstantInt::get(getLLVMContext(), Result.Val.getInt()),
2488 nullptr};
2489 }
2490
2491 if (Info.allowsRegister() || !Info.allowsMemory())
2493 return {EmitScalarExpr(InputExpr), nullptr};
2494 if (InputExpr->getStmtClass() == Expr::CXXThisExprClass)
2495 return {EmitScalarExpr(InputExpr), nullptr};
2496 InputExpr = InputExpr->IgnoreParenNoopCasts(getContext());
2497 LValue Dest = EmitLValue(InputExpr);
2498 return EmitAsmInputLValue(Info, Dest, InputExpr->getType(), ConstraintStr,
2499 InputExpr->getExprLoc());
2500}
2501
2502/// getAsmSrcLocInfo - Return the !srcloc metadata node to attach to an inline
2503/// asm call instruction. The !srcloc MDNode contains a list of constant
2504/// integers which are the source locations of the start of each line in the
2505/// asm.
2506static llvm::MDNode *getAsmSrcLocInfo(const StringLiteral *Str,
2507 CodeGenFunction &CGF) {
2509 // Add the location of the first line to the MDNode.
2510 Locs.push_back(llvm::ConstantAsMetadata::get(llvm::ConstantInt::get(
2511 CGF.Int64Ty, Str->getBeginLoc().getRawEncoding())));
2512 StringRef StrVal = Str->getString();
2513 if (!StrVal.empty()) {
2515 const LangOptions &LangOpts = CGF.CGM.getLangOpts();
2516 unsigned StartToken = 0;
2517 unsigned ByteOffset = 0;
2518
2519 // Add the location of the start of each subsequent line of the asm to the
2520 // MDNode.
2521 for (unsigned i = 0, e = StrVal.size() - 1; i != e; ++i) {
2522 if (StrVal[i] != '\n') continue;
2523 SourceLocation LineLoc = Str->getLocationOfByte(
2524 i + 1, SM, LangOpts, CGF.getTarget(), &StartToken, &ByteOffset);
2525 Locs.push_back(llvm::ConstantAsMetadata::get(
2526 llvm::ConstantInt::get(CGF.Int64Ty, LineLoc.getRawEncoding())));
2527 }
2528 }
2529
2530 return llvm::MDNode::get(CGF.getLLVMContext(), Locs);
2531}
2532
2533static void UpdateAsmCallInst(llvm::CallBase &Result, bool HasSideEffect,
2534 bool HasUnwindClobber, bool ReadOnly,
2535 bool ReadNone, bool NoMerge, bool NoConvergent,
2536 const AsmStmt &S,
2537 const std::vector<llvm::Type *> &ResultRegTypes,
2538 const std::vector<llvm::Type *> &ArgElemTypes,
2539 CodeGenFunction &CGF,
2540 std::vector<llvm::Value *> &RegResults) {
2541 if (!HasUnwindClobber)
2542 Result.addFnAttr(llvm::Attribute::NoUnwind);
2543
2544 if (NoMerge)
2545 Result.addFnAttr(llvm::Attribute::NoMerge);
2546 // Attach readnone and readonly attributes.
2547 if (!HasSideEffect) {
2548 if (ReadNone)
2549 Result.setDoesNotAccessMemory();
2550 else if (ReadOnly)
2551 Result.setOnlyReadsMemory();
2552 }
2553
2554 // Add elementtype attribute for indirect constraints.
2555 for (auto Pair : llvm::enumerate(ArgElemTypes)) {
2556 if (Pair.value()) {
2557 auto Attr = llvm::Attribute::get(
2558 CGF.getLLVMContext(), llvm::Attribute::ElementType, Pair.value());
2559 Result.addParamAttr(Pair.index(), Attr);
2560 }
2561 }
2562
2563 // Slap the source location of the inline asm into a !srcloc metadata on the
2564 // call.
2565 if (const auto *gccAsmStmt = dyn_cast<GCCAsmStmt>(&S))
2566 Result.setMetadata("srcloc",
2567 getAsmSrcLocInfo(gccAsmStmt->getAsmString(), CGF));
2568 else {
2569 // At least put the line number on MS inline asm blobs.
2570 llvm::Constant *Loc =
2571 llvm::ConstantInt::get(CGF.Int64Ty, S.getAsmLoc().getRawEncoding());
2572 Result.setMetadata("srcloc",
2573 llvm::MDNode::get(CGF.getLLVMContext(),
2574 llvm::ConstantAsMetadata::get(Loc)));
2575 }
2576
2577 if (!NoConvergent && CGF.getLangOpts().assumeFunctionsAreConvergent())
2578 // Conservatively, mark all inline asm blocks in CUDA or OpenCL as
2579 // convergent (meaning, they may call an intrinsically convergent op, such
2580 // as bar.sync, and so can't have certain optimizations applied around
2581 // them) unless it's explicitly marked 'noconvergent'.
2582 Result.addFnAttr(llvm::Attribute::Convergent);
2583 // Extract all of the register value results from the asm.
2584 if (ResultRegTypes.size() == 1) {
2585 RegResults.push_back(&Result);
2586 } else {
2587 for (unsigned i = 0, e = ResultRegTypes.size(); i != e; ++i) {
2588 llvm::Value *Tmp = CGF.Builder.CreateExtractValue(&Result, i, "asmresult");
2589 RegResults.push_back(Tmp);
2590 }
2591 }
2592}
2593
2594static void
2596 const llvm::ArrayRef<llvm::Value *> RegResults,
2597 const llvm::ArrayRef<llvm::Type *> ResultRegTypes,
2598 const llvm::ArrayRef<llvm::Type *> ResultTruncRegTypes,
2599 const llvm::ArrayRef<LValue> ResultRegDests,
2600 const llvm::ArrayRef<QualType> ResultRegQualTys,
2601 const llvm::BitVector &ResultTypeRequiresCast,
2602 const llvm::BitVector &ResultRegIsFlagReg) {
2604 CodeGenModule &CGM = CGF.CGM;
2605 llvm::LLVMContext &CTX = CGF.getLLVMContext();
2606
2607 assert(RegResults.size() == ResultRegTypes.size());
2608 assert(RegResults.size() == ResultTruncRegTypes.size());
2609 assert(RegResults.size() == ResultRegDests.size());
2610 // ResultRegDests can be also populated by addReturnRegisterOutputs() above,
2611 // in which case its size may grow.
2612 assert(ResultTypeRequiresCast.size() <= ResultRegDests.size());
2613 assert(ResultRegIsFlagReg.size() <= ResultRegDests.size());
2614
2615 for (unsigned i = 0, e = RegResults.size(); i != e; ++i) {
2616 llvm::Value *Tmp = RegResults[i];
2617 llvm::Type *TruncTy = ResultTruncRegTypes[i];
2618
2619 if ((i < ResultRegIsFlagReg.size()) && ResultRegIsFlagReg[i]) {
2620 // Target must guarantee the Value `Tmp` here is lowered to a boolean
2621 // value.
2622 llvm::Constant *Two = llvm::ConstantInt::get(Tmp->getType(), 2);
2623 llvm::Value *IsBooleanValue =
2624 Builder.CreateCmp(llvm::CmpInst::ICMP_ULT, Tmp, Two);
2625 llvm::Function *FnAssume = CGM.getIntrinsic(llvm::Intrinsic::assume);
2626 Builder.CreateCall(FnAssume, IsBooleanValue);
2627 }
2628
2629 // If the result type of the LLVM IR asm doesn't match the result type of
2630 // the expression, do the conversion.
2631 if (ResultRegTypes[i] != TruncTy) {
2632
2633 // Truncate the integer result to the right size, note that TruncTy can be
2634 // a pointer.
2635 if (TruncTy->isFloatingPointTy())
2636 Tmp = Builder.CreateFPTrunc(Tmp, TruncTy);
2637 else if (TruncTy->isPointerTy() && Tmp->getType()->isIntegerTy()) {
2638 uint64_t ResSize = CGM.getDataLayout().getTypeSizeInBits(TruncTy);
2639 Tmp = Builder.CreateTrunc(
2640 Tmp, llvm::IntegerType::get(CTX, (unsigned)ResSize));
2641 Tmp = Builder.CreateIntToPtr(Tmp, TruncTy);
2642 } else if (Tmp->getType()->isPointerTy() && TruncTy->isIntegerTy()) {
2643 uint64_t TmpSize =
2644 CGM.getDataLayout().getTypeSizeInBits(Tmp->getType());
2645 Tmp = Builder.CreatePtrToInt(
2646 Tmp, llvm::IntegerType::get(CTX, (unsigned)TmpSize));
2647 Tmp = Builder.CreateTrunc(Tmp, TruncTy);
2648 } else if (Tmp->getType()->isIntegerTy() && TruncTy->isIntegerTy()) {
2649 Tmp = Builder.CreateZExtOrTrunc(Tmp, TruncTy);
2650 } else if (Tmp->getType()->isVectorTy() || TruncTy->isVectorTy()) {
2651 Tmp = Builder.CreateBitCast(Tmp, TruncTy);
2652 }
2653 }
2654
2655 LValue Dest = ResultRegDests[i];
2656 // ResultTypeRequiresCast elements correspond to the first
2657 // ResultTypeRequiresCast.size() elements of RegResults.
2658 if ((i < ResultTypeRequiresCast.size()) && ResultTypeRequiresCast[i]) {
2659 unsigned Size = CGF.getContext().getTypeSize(ResultRegQualTys[i]);
2660 Address A = Dest.getAddress().withElementType(ResultRegTypes[i]);
2661 if (CGF.getTargetHooks().isScalarizableAsmOperand(CGF, TruncTy)) {
2662 Builder.CreateStore(Tmp, A);
2663 continue;
2664 }
2665
2666 QualType Ty =
2667 CGF.getContext().getIntTypeForBitwidth(Size, /*Signed=*/false);
2668 if (Ty.isNull()) {
2669 const Expr *OutExpr = S.getOutputExpr(i);
2670 CGM.getDiags().Report(OutExpr->getExprLoc(),
2671 diag::err_store_value_to_reg);
2672 return;
2673 }
2674 Dest = CGF.MakeAddrLValue(A, Ty);
2675 }
2676 CGF.EmitStoreThroughLValue(RValue::get(Tmp), Dest);
2677 }
2678}
2679
2681 const AsmStmt &S) {
2682 constexpr auto Name = "__ASM__hipstdpar_unsupported";
2683
2684 StringRef Asm;
2685 if (auto GCCAsm = dyn_cast<GCCAsmStmt>(&S))
2686 Asm = GCCAsm->getAsmString()->getString();
2687
2688 auto &Ctx = CGF->CGM.getLLVMContext();
2689
2690 auto StrTy = llvm::ConstantDataArray::getString(Ctx, Asm);
2691 auto FnTy = llvm::FunctionType::get(llvm::Type::getVoidTy(Ctx),
2692 {StrTy->getType()}, false);
2693 auto UBF = CGF->CGM.getModule().getOrInsertFunction(Name, FnTy);
2694
2695 CGF->Builder.CreateCall(UBF, {StrTy});
2696}
2697
2699 // Pop all cleanup blocks at the end of the asm statement.
2700 CodeGenFunction::RunCleanupsScope Cleanups(*this);
2701
2702 // Assemble the final asm string.
2703 std::string AsmString = S.generateAsmString(getContext());
2704
2705 // Get all the output and input constraints together.
2706 SmallVector<TargetInfo::ConstraintInfo, 4> OutputConstraintInfos;
2707 SmallVector<TargetInfo::ConstraintInfo, 4> InputConstraintInfos;
2708
2709 bool IsHipStdPar = getLangOpts().HIPStdPar && getLangOpts().CUDAIsDevice;
2710 bool IsValidTargetAsm = true;
2711 for (unsigned i = 0, e = S.getNumOutputs(); i != e && IsValidTargetAsm; i++) {
2712 StringRef Name;
2713 if (const GCCAsmStmt *GAS = dyn_cast<GCCAsmStmt>(&S))
2714 Name = GAS->getOutputName(i);
2715 TargetInfo::ConstraintInfo Info(S.getOutputConstraint(i), Name);
2716 bool IsValid = getTarget().validateOutputConstraint(Info); (void)IsValid;
2717 if (IsHipStdPar && !IsValid)
2718 IsValidTargetAsm = false;
2719 else
2720 assert(IsValid && "Failed to parse output constraint");
2721 OutputConstraintInfos.push_back(Info);
2722 }
2723
2724 for (unsigned i = 0, e = S.getNumInputs(); i != e && IsValidTargetAsm; i++) {
2725 StringRef Name;
2726 if (const GCCAsmStmt *GAS = dyn_cast<GCCAsmStmt>(&S))
2727 Name = GAS->getInputName(i);
2728 TargetInfo::ConstraintInfo Info(S.getInputConstraint(i), Name);
2729 bool IsValid =
2730 getTarget().validateInputConstraint(OutputConstraintInfos, Info);
2731 if (IsHipStdPar && !IsValid)
2732 IsValidTargetAsm = false;
2733 else
2734 assert(IsValid && "Failed to parse input constraint");
2735 InputConstraintInfos.push_back(Info);
2736 }
2737
2738 if (!IsValidTargetAsm)
2739 return EmitHipStdParUnsupportedAsm(this, S);
2740
2741 std::string Constraints;
2742
2743 std::vector<LValue> ResultRegDests;
2744 std::vector<QualType> ResultRegQualTys;
2745 std::vector<llvm::Type *> ResultRegTypes;
2746 std::vector<llvm::Type *> ResultTruncRegTypes;
2747 std::vector<llvm::Type *> ArgTypes;
2748 std::vector<llvm::Type *> ArgElemTypes;
2749 std::vector<llvm::Value*> Args;
2750 llvm::BitVector ResultTypeRequiresCast;
2751 llvm::BitVector ResultRegIsFlagReg;
2752
2753 // Keep track of inout constraints.
2754 std::string InOutConstraints;
2755 std::vector<llvm::Value*> InOutArgs;
2756 std::vector<llvm::Type*> InOutArgTypes;
2757 std::vector<llvm::Type*> InOutArgElemTypes;
2758
2759 // Keep track of out constraints for tied input operand.
2760 std::vector<std::string> OutputConstraints;
2761
2762 // Keep track of defined physregs.
2763 llvm::SmallSet<std::string, 8> PhysRegOutputs;
2764
2765 // An inline asm can be marked readonly if it meets the following conditions:
2766 // - it doesn't have any sideeffects
2767 // - it doesn't clobber memory
2768 // - it doesn't return a value by-reference
2769 // It can be marked readnone if it doesn't have any input memory constraints
2770 // in addition to meeting the conditions listed above.
2771 bool ReadOnly = true, ReadNone = true;
2772
2773 for (unsigned i = 0, e = S.getNumOutputs(); i != e; i++) {
2774 TargetInfo::ConstraintInfo &Info = OutputConstraintInfos[i];
2775
2776 // Simplify the output constraint.
2777 std::string OutputConstraint(S.getOutputConstraint(i));
2778 OutputConstraint = SimplifyConstraint(OutputConstraint.c_str() + 1,
2779 getTarget(), &OutputConstraintInfos);
2780
2781 const Expr *OutExpr = S.getOutputExpr(i);
2782 OutExpr = OutExpr->IgnoreParenNoopCasts(getContext());
2783
2784 std::string GCCReg;
2785 OutputConstraint = AddVariableConstraints(OutputConstraint, *OutExpr,
2786 getTarget(), CGM, S,
2787 Info.earlyClobber(),
2788 &GCCReg);
2789 // Give an error on multiple outputs to same physreg.
2790 if (!GCCReg.empty() && !PhysRegOutputs.insert(GCCReg).second)
2791 CGM.Error(S.getAsmLoc(), "multiple outputs to hard register: " + GCCReg);
2792
2793 OutputConstraints.push_back(OutputConstraint);
2794 LValue Dest = EmitLValue(OutExpr);
2795 if (!Constraints.empty())
2796 Constraints += ',';
2797
2798 // If this is a register output, then make the inline asm return it
2799 // by-value. If this is a memory result, return the value by-reference.
2800 QualType QTy = OutExpr->getType();
2801 const bool IsScalarOrAggregate = hasScalarEvaluationKind(QTy) ||
2803 if (!Info.allowsMemory() && IsScalarOrAggregate) {
2804
2805 Constraints += "=" + OutputConstraint;
2806 ResultRegQualTys.push_back(QTy);
2807 ResultRegDests.push_back(Dest);
2808
2809 bool IsFlagReg = llvm::StringRef(OutputConstraint).starts_with("{@cc");
2810 ResultRegIsFlagReg.push_back(IsFlagReg);
2811
2812 llvm::Type *Ty = ConvertTypeForMem(QTy);
2813 const bool RequiresCast = Info.allowsRegister() &&
2815 Ty->isAggregateType());
2816
2817 ResultTruncRegTypes.push_back(Ty);
2818 ResultTypeRequiresCast.push_back(RequiresCast);
2819
2820 if (RequiresCast) {
2821 unsigned Size = getContext().getTypeSize(QTy);
2822 if (Size)
2823 Ty = llvm::IntegerType::get(getLLVMContext(), Size);
2824 else
2825 CGM.Error(OutExpr->getExprLoc(), "output size should not be zero");
2826 }
2827 ResultRegTypes.push_back(Ty);
2828 // If this output is tied to an input, and if the input is larger, then
2829 // we need to set the actual result type of the inline asm node to be the
2830 // same as the input type.
2831 if (Info.hasMatchingInput()) {
2832 unsigned InputNo;
2833 for (InputNo = 0; InputNo != S.getNumInputs(); ++InputNo) {
2834 TargetInfo::ConstraintInfo &Input = InputConstraintInfos[InputNo];
2835 if (Input.hasTiedOperand() && Input.getTiedOperand() == i)
2836 break;
2837 }
2838 assert(InputNo != S.getNumInputs() && "Didn't find matching input!");
2839
2840 QualType InputTy = S.getInputExpr(InputNo)->getType();
2841 QualType OutputType = OutExpr->getType();
2842
2843 uint64_t InputSize = getContext().getTypeSize(InputTy);
2844 if (getContext().getTypeSize(OutputType) < InputSize) {
2845 // Form the asm to return the value as a larger integer or fp type.
2846 ResultRegTypes.back() = ConvertType(InputTy);
2847 }
2848 }
2849 if (llvm::Type* AdjTy =
2850 getTargetHooks().adjustInlineAsmType(*this, OutputConstraint,
2851 ResultRegTypes.back()))
2852 ResultRegTypes.back() = AdjTy;
2853 else {
2854 CGM.getDiags().Report(S.getAsmLoc(),
2855 diag::err_asm_invalid_type_in_input)
2856 << OutExpr->getType() << OutputConstraint;
2857 }
2858
2859 // Update largest vector width for any vector types.
2860 if (auto *VT = dyn_cast<llvm::VectorType>(ResultRegTypes.back()))
2861 LargestVectorWidth =
2862 std::max((uint64_t)LargestVectorWidth,
2863 VT->getPrimitiveSizeInBits().getKnownMinValue());
2864 } else {
2865 Address DestAddr = Dest.getAddress();
2866 // Matrix types in memory are represented by arrays, but accessed through
2867 // vector pointers, with the alignment specified on the access operation.
2868 // For inline assembly, update pointer arguments to use vector pointers.
2869 // Otherwise there will be a mis-match if the matrix is also an
2870 // input-argument which is represented as vector.
2871 if (isa<MatrixType>(OutExpr->getType().getCanonicalType()))
2872 DestAddr = DestAddr.withElementType(ConvertType(OutExpr->getType()));
2873
2874 ArgTypes.push_back(DestAddr.getType());
2875 ArgElemTypes.push_back(DestAddr.getElementType());
2876 Args.push_back(DestAddr.emitRawPointer(*this));
2877 Constraints += "=*";
2878 Constraints += OutputConstraint;
2879 ReadOnly = ReadNone = false;
2880 }
2881
2882 if (Info.isReadWrite()) {
2883 InOutConstraints += ',';
2884
2885 const Expr *InputExpr = S.getOutputExpr(i);
2886 llvm::Value *Arg;
2887 llvm::Type *ArgElemType;
2888 std::tie(Arg, ArgElemType) = EmitAsmInputLValue(
2889 Info, Dest, InputExpr->getType(), InOutConstraints,
2890 InputExpr->getExprLoc());
2891
2892 if (llvm::Type* AdjTy =
2893 getTargetHooks().adjustInlineAsmType(*this, OutputConstraint,
2894 Arg->getType()))
2895 Arg = Builder.CreateBitCast(Arg, AdjTy);
2896
2897 // Update largest vector width for any vector types.
2898 if (auto *VT = dyn_cast<llvm::VectorType>(Arg->getType()))
2899 LargestVectorWidth =
2900 std::max((uint64_t)LargestVectorWidth,
2901 VT->getPrimitiveSizeInBits().getKnownMinValue());
2902 // Only tie earlyclobber physregs.
2903 if (Info.allowsRegister() && (GCCReg.empty() || Info.earlyClobber()))
2904 InOutConstraints += llvm::utostr(i);
2905 else
2906 InOutConstraints += OutputConstraint;
2907
2908 InOutArgTypes.push_back(Arg->getType());
2909 InOutArgElemTypes.push_back(ArgElemType);
2910 InOutArgs.push_back(Arg);
2911 }
2912 }
2913
2914 // If this is a Microsoft-style asm blob, store the return registers (EAX:EDX)
2915 // to the return value slot. Only do this when returning in registers.
2916 if (isa<MSAsmStmt>(&S)) {
2917 const ABIArgInfo &RetAI = CurFnInfo->getReturnInfo();
2918 if (RetAI.isDirect() || RetAI.isExtend()) {
2919 // Make a fake lvalue for the return value slot.
2922 *this, ReturnSlot, Constraints, ResultRegTypes, ResultTruncRegTypes,
2923 ResultRegDests, AsmString, S.getNumOutputs());
2924 SawAsmBlock = true;
2925 }
2926 }
2927
2928 for (unsigned i = 0, e = S.getNumInputs(); i != e; i++) {
2929 const Expr *InputExpr = S.getInputExpr(i);
2930
2931 TargetInfo::ConstraintInfo &Info = InputConstraintInfos[i];
2932
2933 if (Info.allowsMemory())
2934 ReadNone = false;
2935
2936 if (!Constraints.empty())
2937 Constraints += ',';
2938
2939 // Simplify the input constraint.
2940 std::string InputConstraint(S.getInputConstraint(i));
2941 InputConstraint = SimplifyConstraint(InputConstraint.c_str(), getTarget(),
2942 &OutputConstraintInfos);
2943
2944 InputConstraint = AddVariableConstraints(
2945 InputConstraint, *InputExpr->IgnoreParenNoopCasts(getContext()),
2946 getTarget(), CGM, S, false /* No EarlyClobber */);
2947
2948 std::string ReplaceConstraint (InputConstraint);
2949 llvm::Value *Arg;
2950 llvm::Type *ArgElemType;
2951 std::tie(Arg, ArgElemType) = EmitAsmInput(Info, InputExpr, Constraints);
2952
2953 // If this input argument is tied to a larger output result, extend the
2954 // input to be the same size as the output. The LLVM backend wants to see
2955 // the input and output of a matching constraint be the same size. Note
2956 // that GCC does not define what the top bits are here. We use zext because
2957 // that is usually cheaper, but LLVM IR should really get an anyext someday.
2958 if (Info.hasTiedOperand()) {
2959 unsigned Output = Info.getTiedOperand();
2960 QualType OutputType = S.getOutputExpr(Output)->getType();
2961 QualType InputTy = InputExpr->getType();
2962
2963 if (getContext().getTypeSize(OutputType) >
2964 getContext().getTypeSize(InputTy)) {
2965 // Use ptrtoint as appropriate so that we can do our extension.
2966 if (isa<llvm::PointerType>(Arg->getType()))
2967 Arg = Builder.CreatePtrToInt(Arg, IntPtrTy);
2968 llvm::Type *OutputTy = ConvertType(OutputType);
2969 if (isa<llvm::IntegerType>(OutputTy))
2970 Arg = Builder.CreateZExt(Arg, OutputTy);
2971 else if (isa<llvm::PointerType>(OutputTy))
2972 Arg = Builder.CreateZExt(Arg, IntPtrTy);
2973 else if (OutputTy->isFloatingPointTy())
2974 Arg = Builder.CreateFPExt(Arg, OutputTy);
2975 }
2976 // Deal with the tied operands' constraint code in adjustInlineAsmType.
2977 ReplaceConstraint = OutputConstraints[Output];
2978 }
2979 if (llvm::Type* AdjTy =
2980 getTargetHooks().adjustInlineAsmType(*this, ReplaceConstraint,
2981 Arg->getType()))
2982 Arg = Builder.CreateBitCast(Arg, AdjTy);
2983 else
2984 CGM.getDiags().Report(S.getAsmLoc(), diag::err_asm_invalid_type_in_input)
2985 << InputExpr->getType() << InputConstraint;
2986
2987 // Update largest vector width for any vector types.
2988 if (auto *VT = dyn_cast<llvm::VectorType>(Arg->getType()))
2989 LargestVectorWidth =
2990 std::max((uint64_t)LargestVectorWidth,
2991 VT->getPrimitiveSizeInBits().getKnownMinValue());
2992
2993 ArgTypes.push_back(Arg->getType());
2994 ArgElemTypes.push_back(ArgElemType);
2995 Args.push_back(Arg);
2996 Constraints += InputConstraint;
2997 }
2998
2999 // Append the "input" part of inout constraints.
3000 for (unsigned i = 0, e = InOutArgs.size(); i != e; i++) {
3001 ArgTypes.push_back(InOutArgTypes[i]);
3002 ArgElemTypes.push_back(InOutArgElemTypes[i]);
3003 Args.push_back(InOutArgs[i]);
3004 }
3005 Constraints += InOutConstraints;
3006
3007 // Labels
3009 llvm::BasicBlock *Fallthrough = nullptr;
3010 bool IsGCCAsmGoto = false;
3011 if (const auto *GS = dyn_cast<GCCAsmStmt>(&S)) {
3012 IsGCCAsmGoto = GS->isAsmGoto();
3013 if (IsGCCAsmGoto) {
3014 for (const auto *E : GS->labels()) {
3015 JumpDest Dest = getJumpDestForLabel(E->getLabel());
3016 Transfer.push_back(Dest.getBlock());
3017 if (!Constraints.empty())
3018 Constraints += ',';
3019 Constraints += "!i";
3020 }
3021 Fallthrough = createBasicBlock("asm.fallthrough");
3022 }
3023 }
3024
3025 bool HasUnwindClobber = false;
3026
3027 // Clobbers
3028 for (unsigned i = 0, e = S.getNumClobbers(); i != e; i++) {
3029 StringRef Clobber = S.getClobber(i);
3030
3031 if (Clobber == "memory")
3032 ReadOnly = ReadNone = false;
3033 else if (Clobber == "unwind") {
3034 HasUnwindClobber = true;
3035 continue;
3036 } else if (Clobber != "cc") {
3037 Clobber = getTarget().getNormalizedGCCRegisterName(Clobber);
3038 if (CGM.getCodeGenOpts().StackClashProtector &&
3039 getTarget().isSPRegName(Clobber)) {
3040 CGM.getDiags().Report(S.getAsmLoc(),
3041 diag::warn_stack_clash_protection_inline_asm);
3042 }
3043 }
3044
3045 if (isa<MSAsmStmt>(&S)) {
3046 if (Clobber == "eax" || Clobber == "edx") {
3047 if (Constraints.find("=&A") != std::string::npos)
3048 continue;
3049 std::string::size_type position1 =
3050 Constraints.find("={" + Clobber.str() + "}");
3051 if (position1 != std::string::npos) {
3052 Constraints.insert(position1 + 1, "&");
3053 continue;
3054 }
3055 std::string::size_type position2 = Constraints.find("=A");
3056 if (position2 != std::string::npos) {
3057 Constraints.insert(position2 + 1, "&");
3058 continue;
3059 }
3060 }
3061 }
3062 if (!Constraints.empty())
3063 Constraints += ',';
3064
3065 Constraints += "~{";
3066 Constraints += Clobber;
3067 Constraints += '}';
3068 }
3069
3070 assert(!(HasUnwindClobber && IsGCCAsmGoto) &&
3071 "unwind clobber can't be used with asm goto");
3072
3073 // Add machine specific clobbers
3074 std::string_view MachineClobbers = getTarget().getClobbers();
3075 if (!MachineClobbers.empty()) {
3076 if (!Constraints.empty())
3077 Constraints += ',';
3078 Constraints += MachineClobbers;
3079 }
3080
3081 llvm::Type *ResultType;
3082 if (ResultRegTypes.empty())
3083 ResultType = VoidTy;
3084 else if (ResultRegTypes.size() == 1)
3085 ResultType = ResultRegTypes[0];
3086 else
3087 ResultType = llvm::StructType::get(getLLVMContext(), ResultRegTypes);
3088
3089 llvm::FunctionType *FTy =
3090 llvm::FunctionType::get(ResultType, ArgTypes, false);
3091
3092 bool HasSideEffect = S.isVolatile() || S.getNumOutputs() == 0;
3093
3094 llvm::InlineAsm::AsmDialect GnuAsmDialect =
3095 CGM.getCodeGenOpts().getInlineAsmDialect() == CodeGenOptions::IAD_ATT
3096 ? llvm::InlineAsm::AD_ATT
3097 : llvm::InlineAsm::AD_Intel;
3098 llvm::InlineAsm::AsmDialect AsmDialect = isa<MSAsmStmt>(&S) ?
3099 llvm::InlineAsm::AD_Intel : GnuAsmDialect;
3100
3101 llvm::InlineAsm *IA = llvm::InlineAsm::get(
3102 FTy, AsmString, Constraints, HasSideEffect,
3103 /* IsAlignStack */ false, AsmDialect, HasUnwindClobber);
3104 std::vector<llvm::Value*> RegResults;
3105 llvm::CallBrInst *CBR;
3106 llvm::DenseMap<llvm::BasicBlock *, SmallVector<llvm::Value *, 4>>
3107 CBRRegResults;
3108 if (IsGCCAsmGoto) {
3109 CBR = Builder.CreateCallBr(IA, Fallthrough, Transfer, Args);
3110 EmitBlock(Fallthrough);
3111 UpdateAsmCallInst(*CBR, HasSideEffect, /*HasUnwindClobber=*/false, ReadOnly,
3112 ReadNone, InNoMergeAttributedStmt,
3113 InNoConvergentAttributedStmt, S, ResultRegTypes,
3114 ArgElemTypes, *this, RegResults);
3115 // Because we are emitting code top to bottom, we don't have enough
3116 // information at this point to know precisely whether we have a critical
3117 // edge. If we have outputs, split all indirect destinations.
3118 if (!RegResults.empty()) {
3119 unsigned i = 0;
3120 for (llvm::BasicBlock *Dest : CBR->getIndirectDests()) {
3121 llvm::Twine SynthName = Dest->getName() + ".split";
3122 llvm::BasicBlock *SynthBB = createBasicBlock(SynthName);
3123 llvm::IRBuilderBase::InsertPointGuard IPG(Builder);
3124 Builder.SetInsertPoint(SynthBB);
3125
3126 if (ResultRegTypes.size() == 1) {
3127 CBRRegResults[SynthBB].push_back(CBR);
3128 } else {
3129 for (unsigned j = 0, e = ResultRegTypes.size(); j != e; ++j) {
3130 llvm::Value *Tmp = Builder.CreateExtractValue(CBR, j, "asmresult");
3131 CBRRegResults[SynthBB].push_back(Tmp);
3132 }
3133 }
3134
3135 EmitBranch(Dest);
3136 EmitBlock(SynthBB);
3137 CBR->setIndirectDest(i++, SynthBB);
3138 }
3139 }
3140 } else if (HasUnwindClobber) {
3141 llvm::CallBase *Result = EmitCallOrInvoke(IA, Args, "");
3142 UpdateAsmCallInst(*Result, HasSideEffect, /*HasUnwindClobber=*/true,
3143 ReadOnly, ReadNone, InNoMergeAttributedStmt,
3144 InNoConvergentAttributedStmt, S, ResultRegTypes,
3145 ArgElemTypes, *this, RegResults);
3146 } else {
3147 llvm::CallInst *Result =
3148 Builder.CreateCall(IA, Args, getBundlesForFunclet(IA));
3149 UpdateAsmCallInst(*Result, HasSideEffect, /*HasUnwindClobber=*/false,
3150 ReadOnly, ReadNone, InNoMergeAttributedStmt,
3151 InNoConvergentAttributedStmt, S, ResultRegTypes,
3152 ArgElemTypes, *this, RegResults);
3153 }
3154
3155 EmitAsmStores(*this, S, RegResults, ResultRegTypes, ResultTruncRegTypes,
3156 ResultRegDests, ResultRegQualTys, ResultTypeRequiresCast,
3157 ResultRegIsFlagReg);
3158
3159 // If this is an asm goto with outputs, repeat EmitAsmStores, but with a
3160 // different insertion point; one for each indirect destination and with
3161 // CBRRegResults rather than RegResults.
3162 if (IsGCCAsmGoto && !CBRRegResults.empty()) {
3163 for (llvm::BasicBlock *Succ : CBR->getIndirectDests()) {
3164 llvm::IRBuilderBase::InsertPointGuard IPG(Builder);
3165 Builder.SetInsertPoint(Succ, --(Succ->end()));
3166 EmitAsmStores(*this, S, CBRRegResults[Succ], ResultRegTypes,
3167 ResultTruncRegTypes, ResultRegDests, ResultRegQualTys,
3168 ResultTypeRequiresCast, ResultRegIsFlagReg);
3169 }
3170 }
3171}
3172
3174 const RecordDecl *RD = S.getCapturedRecordDecl();
3175 QualType RecordTy = getContext().getRecordType(RD);
3176
3177 // Initialize the captured struct.
3178 LValue SlotLV =
3179 MakeAddrLValue(CreateMemTemp(RecordTy, "agg.captured"), RecordTy);
3180
3181 RecordDecl::field_iterator CurField = RD->field_begin();
3182 for (CapturedStmt::const_capture_init_iterator I = S.capture_init_begin(),
3183 E = S.capture_init_end();
3184 I != E; ++I, ++CurField) {
3185 LValue LV = EmitLValueForFieldInitialization(SlotLV, *CurField);
3186 if (CurField->hasCapturedVLAType()) {
3187 EmitLambdaVLACapture(CurField->getCapturedVLAType(), LV);
3188 } else {
3189 EmitInitializerForField(*CurField, LV, *I);
3190 }
3191 }
3192
3193 return SlotLV;
3194}
3195
3196/// Generate an outlined function for the body of a CapturedStmt, store any
3197/// captured variables into the captured struct, and call the outlined function.
3198llvm::Function *
3200 LValue CapStruct = InitCapturedStruct(S);
3201
3202 // Emit the CapturedDecl
3203 CodeGenFunction CGF(CGM, true);
3204 CGCapturedStmtRAII CapInfoRAII(CGF, new CGCapturedStmtInfo(S, K));
3205 llvm::Function *F = CGF.GenerateCapturedStmtFunction(S);
3206 delete CGF.CapturedStmtInfo;
3207
3208 // Emit call to the helper function.
3209 EmitCallOrInvoke(F, CapStruct.getPointer(*this));
3210
3211 return F;
3212}
3213
3215 LValue CapStruct = InitCapturedStruct(S);
3216 return CapStruct.getAddress();
3217}
3218
3219/// Creates the outlined function for a CapturedStmt.
3220llvm::Function *
3222 assert(CapturedStmtInfo &&
3223 "CapturedStmtInfo should be set when generating the captured function");
3224 const CapturedDecl *CD = S.getCapturedDecl();
3225 const RecordDecl *RD = S.getCapturedRecordDecl();
3226 SourceLocation Loc = S.getBeginLoc();
3227 assert(CD->hasBody() && "missing CapturedDecl body");
3228
3229 // Build the argument list.
3230 ASTContext &Ctx = CGM.getContext();
3231 FunctionArgList Args;
3232 Args.append(CD->param_begin(), CD->param_end());
3233
3234 // Create the function declaration.
3235 const CGFunctionInfo &FuncInfo =
3237 llvm::FunctionType *FuncLLVMTy = CGM.getTypes().GetFunctionType(FuncInfo);
3238
3239 llvm::Function *F =
3240 llvm::Function::Create(FuncLLVMTy, llvm::GlobalValue::InternalLinkage,
3242 CGM.SetInternalFunctionAttributes(CD, F, FuncInfo);
3243 if (CD->isNothrow())
3244 F->addFnAttr(llvm::Attribute::NoUnwind);
3245
3246 // Generate the function.
3247 StartFunction(CD, Ctx.VoidTy, F, FuncInfo, Args, CD->getLocation(),
3248 CD->getBody()->getBeginLoc());
3249 // Set the context parameter in CapturedStmtInfo.
3250 Address DeclPtr = GetAddrOfLocalVar(CD->getContextParam());
3252
3253 // Initialize variable-length arrays.
3256 for (auto *FD : RD->fields()) {
3257 if (FD->hasCapturedVLAType()) {
3258 auto *ExprArg =
3259 EmitLoadOfLValue(EmitLValueForField(Base, FD), S.getBeginLoc())
3260 .getScalarVal();
3261 auto VAT = FD->getCapturedVLAType();
3262 VLASizeMap[VAT->getSizeExpr()] = ExprArg;
3263 }
3264 }
3265
3266 // If 'this' is captured, load it into CXXThisValue.
3269 LValue ThisLValue = EmitLValueForField(Base, FD);
3270 CXXThisValue = EmitLoadOfLValue(ThisLValue, Loc).getScalarVal();
3271 }
3272
3273 PGO.assignRegionCounters(GlobalDecl(CD), F);
3274 CapturedStmtInfo->EmitBody(*this, CD->getBody());
3276
3277 return F;
3278}
3279
3280// Returns the first convergence entry/loop/anchor instruction found in |BB|.
3281// std::nullptr otherwise.
3282static llvm::ConvergenceControlInst *getConvergenceToken(llvm::BasicBlock *BB) {
3283 for (auto &I : *BB) {
3284 if (auto *CI = dyn_cast<llvm::ConvergenceControlInst>(&I))
3285 return CI;
3286 }
3287 return nullptr;
3288}
3289
3290llvm::CallBase *
3291CodeGenFunction::addConvergenceControlToken(llvm::CallBase *Input) {
3292 llvm::ConvergenceControlInst *ParentToken = ConvergenceTokenStack.back();
3293 assert(ParentToken);
3294
3295 llvm::Value *bundleArgs[] = {ParentToken};
3296 llvm::OperandBundleDef OB("convergencectrl", bundleArgs);
3297 auto *Output = llvm::CallBase::addOperandBundle(
3298 Input, llvm::LLVMContext::OB_convergencectrl, OB, Input->getIterator());
3299 Input->replaceAllUsesWith(Output);
3300 Input->eraseFromParent();
3301 return Output;
3302}
3303
3304llvm::ConvergenceControlInst *
3305CodeGenFunction::emitConvergenceLoopToken(llvm::BasicBlock *BB) {
3306 CGBuilderTy::InsertPoint IP = Builder.saveIP();
3307 if (BB->empty())
3308 Builder.SetInsertPoint(BB);
3309 else
3310 Builder.SetInsertPoint(BB->getFirstInsertionPt());
3311
3312 llvm::CallBase *CB = Builder.CreateIntrinsic(
3313 llvm::Intrinsic::experimental_convergence_loop, {}, {});
3314 Builder.restoreIP(IP);
3315
3316 CB = addConvergenceControlToken(CB);
3317 return cast<llvm::ConvergenceControlInst>(CB);
3318}
3319
3320llvm::ConvergenceControlInst *
3321CodeGenFunction::getOrEmitConvergenceEntryToken(llvm::Function *F) {
3322 llvm::BasicBlock *BB = &F->getEntryBlock();
3323 llvm::ConvergenceControlInst *Token = getConvergenceToken(BB);
3324 if (Token)
3325 return Token;
3326
3327 // Adding a convergence token requires the function to be marked as
3328 // convergent.
3329 F->setConvergent();
3330
3331 CGBuilderTy::InsertPoint IP = Builder.saveIP();
3332 Builder.SetInsertPoint(&BB->front());
3333 llvm::CallBase *I = Builder.CreateIntrinsic(
3334 llvm::Intrinsic::experimental_convergence_entry, {}, {});
3335 assert(isa<llvm::IntrinsicInst>(I));
3336 Builder.restoreIP(IP);
3337
3338 return cast<llvm::ConvergenceControlInst>(I);
3339}
#define V(N, I)
Definition: ASTContext.h:3460
#define SM(sm)
Definition: Cuda.cpp:85
Defines enum values for all the target-independent builtin functions.
static std::string AddVariableConstraints(const std::string &Constraint, const Expr &AsmExpr, const TargetInfo &Target, CodeGenModule &CGM, const AsmStmt &Stmt, const bool EarlyClobber, std::string *GCCReg=nullptr)
AddVariableConstraints - Look at AsmExpr and if it is a variable declared as using a particular regis...
Definition: CGStmt.cpp:2412
static bool FindCaseStatementsForValue(const SwitchStmt &S, const llvm::APSInt &ConstantCondValue, SmallVectorImpl< const Stmt * > &ResultStmts, ASTContext &C, const SwitchCase *&ResultCase)
FindCaseStatementsForValue - Find the case statement being jumped to and then invoke CollectStatement...
Definition: CGStmt.cpp:2082
static llvm::ConvergenceControlInst * getConvergenceToken(llvm::BasicBlock *BB)
Definition: CGStmt.cpp:3282
static void EmitHipStdParUnsupportedAsm(CodeGenFunction *CGF, const AsmStmt &S)
Definition: CGStmt.cpp:2680
static std::optional< SmallVector< uint64_t, 16 > > getLikelihoodWeights(ArrayRef< Stmt::Likelihood > Likelihoods)
Definition: CGStmt.cpp:2136
static llvm::MDNode * getAsmSrcLocInfo(const StringLiteral *Str, CodeGenFunction &CGF)
getAsmSrcLocInfo - Return the !srcloc metadata node to attach to an inline asm call instruction.
Definition: CGStmt.cpp:2506
static std::string SimplifyConstraint(const char *Constraint, const TargetInfo &Target, SmallVectorImpl< TargetInfo::ConstraintInfo > *OutCons=nullptr)
Definition: CGStmt.cpp:2359
static bool isSwiftAsyncCallee(const CallExpr *CE)
Determine if the given call uses the swiftasync calling convention.
Definition: CGStmt.cpp:1507
static CSFC_Result CollectStatementsForCase(const Stmt *S, const SwitchCase *Case, bool &FoundCase, SmallVectorImpl< const Stmt * > &ResultStmts)
Definition: CGStmt.cpp:1927
static void EmitAsmStores(CodeGenFunction &CGF, const AsmStmt &S, const llvm::ArrayRef< llvm::Value * > RegResults, const llvm::ArrayRef< llvm::Type * > ResultRegTypes, const llvm::ArrayRef< llvm::Type * > ResultTruncRegTypes, const llvm::ArrayRef< LValue > ResultRegDests, const llvm::ArrayRef< QualType > ResultRegQualTys, const llvm::BitVector &ResultTypeRequiresCast, const llvm::BitVector &ResultRegIsFlagReg)
Definition: CGStmt.cpp:2595
static bool hasEmptyLoopBody(const LoopStmt &S)
Definition: CGStmt.cpp:1040
CSFC_Result
CollectStatementsForCase - Given the body of a 'switch' statement and a constant value that is being ...
Definition: CGStmt.cpp:1926
@ CSFC_Failure
Definition: CGStmt.cpp:1926
@ CSFC_Success
Definition: CGStmt.cpp:1926
@ CSFC_FallThrough
Definition: CGStmt.cpp:1926
static void UpdateAsmCallInst(llvm::CallBase &Result, bool HasSideEffect, bool HasUnwindClobber, bool ReadOnly, bool ReadNone, bool NoMerge, bool NoConvergent, const AsmStmt &S, const std::vector< llvm::Type * > &ResultRegTypes, const std::vector< llvm::Type * > &ArgElemTypes, CodeGenFunction &CGF, std::vector< llvm::Value * > &RegResults)
Definition: CGStmt.cpp:2533
const Decl * D
Expr * E
llvm::MachO::Target Target
Definition: MachO.h:51
Defines the PrettyStackTraceEntry class, which is used to make crashes give more contextual informati...
SourceRange Range
Definition: SemaObjC.cpp:758
VarDecl * Variable
Definition: SemaObjC.cpp:757
SourceLocation Loc
Definition: SemaObjC.cpp:759
Defines the SourceManager interface.
APValue - This class implements a discriminated union of [uninitialized] [APSInt] [APFloat],...
Definition: APValue.h:122
bool toIntegralConstant(APSInt &Result, QualType SrcTy, const ASTContext &Ctx) const
Try to convert this value to an integral constant.
Definition: APValue.cpp:964
Holds long-lived AST nodes (such as types and decls) that can be referred to throughout the semantic ...
Definition: ASTContext.h:188
SourceManager & getSourceManager()
Definition: ASTContext.h:741
QualType getTagDeclType(const TagDecl *Decl) const
Return the unique reference to the type for the specified TagDecl (struct/union/class/enum) decl.
QualType getRecordType(const RecordDecl *Decl) const
QualType getIntTypeForBitwidth(unsigned DestWidth, unsigned Signed) const
getIntTypeForBitwidth - sets integer QualTy according to specified details: bitwidth,...
uint64_t getTypeSize(QualType T) const
Return the size of the specified (complete) type T, in bits.
Definition: ASTContext.h:2489
CanQualType VoidTy
Definition: ASTContext.h:1160
AsmStmt is the base class for GCCAsmStmt and MSAsmStmt.
Definition: Stmt.h:3127
Attr - This represents one attribute.
Definition: Attr.h:43
Represents an attribute applied to a statement.
Definition: Stmt.h:2107
BreakStmt - This represents a break.
Definition: Stmt.h:3007
CXXForRangeStmt - This represents C++0x [stmt.ranged]'s ranged for statement, represented as 'for (ra...
Definition: StmtCXX.h:135
CallExpr - Represents a function call (C99 6.5.2.2, C++ [expr.call]).
Definition: Expr.h:2874
Expr * getCallee()
Definition: Expr.h:3024
Represents the body of a CapturedStmt, and serves as its DeclContext.
Definition: Decl.h:4772
ImplicitParamDecl * getContextParam() const
Retrieve the parameter containing captured variables.
Definition: Decl.h:4834
bool isNothrow() const
Definition: Decl.cpp:5510
param_iterator param_end() const
Retrieve an iterator one past the last parameter decl.
Definition: Decl.h:4851
param_iterator param_begin() const
Retrieve an iterator pointing to the first parameter decl.
Definition: Decl.h:4849
Stmt * getBody() const override
getBody - If this Decl represents a declaration for a body of code, such as a function or method defi...
Definition: Decl.cpp:5507
This captures a statement into a function.
Definition: Stmt.h:3784
Expr *const * const_capture_init_iterator
Const iterator that walks over the capture initialization arguments.
Definition: Stmt.h:3948
CapturedRegionKind getCapturedRegionKind() const
Retrieve the captured region kind.
Definition: Stmt.cpp:1430
CaseStmt - Represent a case statement.
Definition: Stmt.h:1828
Stmt * getSubStmt()
Definition: Stmt.h:1945
Expr * getLHS()
Definition: Stmt.h:1915
Expr * getRHS()
Definition: Stmt.h:1927
bool hasProfileClangInstr() const
Check if Clang profile instrumenation is on.
bool hasReducedDebugInfo() const
Check if type and variable info should be emitted.
ABIArgInfo - Helper class to encapsulate information about how a specific C type should be passed to ...
@ Indirect
Indirect - Pass the argument indirectly via a hidden pointer with the specified alignment (0 indicate...
Like RawAddress, an abstract representation of an aligned address, but the pointer contained in this ...
Definition: Address.h:128
static Address invalid()
Definition: Address.h:176
llvm::Value * emitRawPointer(CodeGenFunction &CGF) const
Return the pointer contained in this class after authenticating it and adding offset to it if necessa...
Definition: Address.h:251
llvm::Type * getElementType() const
Return the type of the values stored in this address.
Definition: Address.h:207
Address withElementType(llvm::Type *ElemTy) const
Return address with different element type, but same pointer and alignment.
Definition: Address.h:274
bool isValid() const
Definition: Address.h:177
llvm::PointerType * getType() const
Return the type of the pointer value.
Definition: Address.h:199
An aggregate value slot.
Definition: CGValue.h:504
static AggValueSlot forAddr(Address addr, Qualifiers quals, IsDestructed_t isDestructed, NeedsGCBarriers_t needsGC, IsAliased_t isAliased, Overlap_t mayOverlap, IsZeroed_t isZeroed=IsNotZeroed, IsSanitizerChecked_t isChecked=IsNotSanitizerChecked)
forAddr - Make a slot for an aggregate value.
Definition: CGValue.h:587
A scoped helper to set the current debug location to the specified location or preferred location of ...
Definition: CGDebugInfo.h:858
static ApplyDebugLocation CreateEmpty(CodeGenFunction &CGF)
Set the IRBuilder to not attach debug locations.
Definition: CGDebugInfo.h:915
llvm::StoreInst * CreateFlagStore(bool Value, llvm::Value *Addr)
Emit a store to an i1 flag variable.
Definition: CGBuilder.h:164
llvm::StoreInst * CreateStore(llvm::Value *Val, Address Addr, bool IsVolatile=false)
Definition: CGBuilder.h:136
llvm::LoadInst * CreateLoad(Address Addr, const llvm::Twine &Name="")
Definition: CGBuilder.h:108
This class gathers all debug information during compilation and is responsible for emitting to llvm g...
Definition: CGDebugInfo.h:58
CGFunctionInfo - Class to encapsulate the information about a function definition.
CallingConv getASTCallingConvention() const
getASTCallingConvention() - Return the AST-specified calling convention.
virtual Address getAddressOfLocalVariable(CodeGenFunction &CGF, const VarDecl *VD)
Gets the OpenMP-specific address of the local variable.
virtual void EmitBody(CodeGenFunction &CGF, const Stmt *S)
Emit the captured statement body.
virtual StringRef getHelperName() const
Get the name of the capture helper.
void rescopeLabels()
Change the cleanup scope of the labels in this lexical scope to match the scope of the enclosing cont...
Definition: CGStmt.cpp:744
CodeGenFunction - This class organizes the per-function state that is used while generating LLVM code...
void EmitGotoStmt(const GotoStmt &S)
void EmitOMPTargetTeamsDirective(const OMPTargetTeamsDirective &S)
void FinishFunction(SourceLocation EndLoc=SourceLocation())
FinishFunction - Complete IR generation of the current function.
void EmitOMPParallelGenericLoopDirective(const OMPLoopDirective &S)
void EmitOMPMaskedTaskLoopSimdDirective(const OMPMaskedTaskLoopSimdDirective &S)
bool checkIfLoopMustProgress(const Expr *, bool HasEmptyBody)
Returns true if a loop must make progress, which means the mustprogress attribute can be added.
static TypeEvaluationKind getEvaluationKind(QualType T)
getEvaluationKind - Return the TypeEvaluationKind of QualType T.
static bool ContainsLabel(const Stmt *S, bool IgnoreCaseStmts=false)
ContainsLabel - Return true if the statement contains a label in it.
void EmitBranchOnBoolExpr(const Expr *Cond, llvm::BasicBlock *TrueBlock, llvm::BasicBlock *FalseBlock, uint64_t TrueCount, Stmt::Likelihood LH=Stmt::LH_None, const Expr *ConditionalOp=nullptr)
EmitBranchOnBoolExpr - Emit a branch on a boolean condition (e.g.
JumpDest getJumpDestInCurrentScope(llvm::BasicBlock *Target)
The given basic block lies in the current EH scope, but may be a target of a potentially scope-crossi...
void EmitIfStmt(const IfStmt &S)
void EmitWhileStmt(const WhileStmt &S, ArrayRef< const Attr * > Attrs={})
void EmitOMPOrderedDirective(const OMPOrderedDirective &S)
void EmitOMPTargetDirective(const OMPTargetDirective &S)
llvm::DenseMap< const VarDecl *, llvm::Value * > NRVOFlags
A mapping from NRVO variables to the flags used to indicate when the NRVO has been applied to this va...
bool IsOutlinedSEHHelper
True if the current function is an outlined SEH helper.
void EmitOMPAtomicDirective(const OMPAtomicDirective &S)
void EmitOMPTargetEnterDataDirective(const OMPTargetEnterDataDirective &S)
void EmitOMPParallelMasterTaskLoopDirective(const OMPParallelMasterTaskLoopDirective &S)
Address EmitCompoundStmtWithoutScope(const CompoundStmt &S, bool GetLast=false, AggValueSlot AVS=AggValueSlot::ignored())
void EmitStoreThroughLValue(RValue Src, LValue Dst, bool isInit=false)
EmitStoreThroughLValue - Store the specified rvalue into the specified lvalue, where both are guarant...
void EmitOMPReverseDirective(const OMPReverseDirective &S)
static bool hasScalarEvaluationKind(QualType T)
void EmitOpenACCExitDataConstruct(const OpenACCExitDataConstruct &S)
CGCapturedStmtInfo * CapturedStmtInfo
void EmitIndirectGotoStmt(const IndirectGotoStmt &S)
void EmitDecl(const Decl &D)
EmitDecl - Emit a declaration.
void EmitCXXTryStmt(const CXXTryStmt &S)
bool EmitSimpleStmt(const Stmt *S, ArrayRef< const Attr * > Attrs)
EmitSimpleStmt - Try to emit a "simple" statement which does not necessarily require an insertion poi...
void EmitLabel(const LabelDecl *D)
EmitLabel - Emit the block for the given label.
void EmitOpenACCInitConstruct(const OpenACCInitConstruct &S)
void EmitOMPTeamsDistributeParallelForDirective(const OMPTeamsDistributeParallelForDirective &S)
void EmitOMPTaskDirective(const OMPTaskDirective &S)
void EmitOMPScanDirective(const OMPScanDirective &S)
void EmitOMPTaskLoopDirective(const OMPTaskLoopDirective &S)
void EmitOMPMasterTaskLoopSimdDirective(const OMPMasterTaskLoopSimdDirective &S)
SmallVector< llvm::ConvergenceControlInst *, 4 > ConvergenceTokenStack
Stack to track the controlled convergence tokens.
LValue EmitLValue(const Expr *E, KnownNonNull_t IsKnownNonNull=NotKnownNonNull)
EmitLValue - Emit code to compute a designator that specifies the location of the expression.
void EmitCaseStmt(const CaseStmt &S, ArrayRef< const Attr * > Attrs)
llvm::BasicBlock * createBasicBlock(const Twine &name="", llvm::Function *parent=nullptr, llvm::BasicBlock *before=nullptr)
createBasicBlock - Create an LLVM basic block.
void EmitDefaultStmt(const DefaultStmt &S, ArrayRef< const Attr * > Attrs)
const LangOptions & getLangOpts() const
void EmitOMPDistributeSimdDirective(const OMPDistributeSimdDirective &S)
void EmitOMPDistributeParallelForDirective(const OMPDistributeParallelForDirective &S)
LValue EmitLValueForFieldInitialization(LValue Base, const FieldDecl *Field)
EmitLValueForFieldInitialization - Like EmitLValueForField, except that if the Field is a reference,...
void EmitOpenACCShutdownConstruct(const OpenACCShutdownConstruct &S)
void EmitBlock(llvm::BasicBlock *BB, bool IsFinished=false)
EmitBlock - Emit the given block.
bool InNoConvergentAttributedStmt
True if the current statement has noconvergent attribute.
void EmitOMPInterchangeDirective(const OMPInterchangeDirective &S)
void EmitObjCAtThrowStmt(const ObjCAtThrowStmt &S)
void EmitOMPTeamsGenericLoopDirective(const OMPTeamsGenericLoopDirective &S)
void EmitOMPTeamsDistributeDirective(const OMPTeamsDistributeDirective &S)
SmallVector< llvm::OperandBundleDef, 1 > getBundlesForFunclet(llvm::Value *Callee)
void EmitOpenACCWaitConstruct(const OpenACCWaitConstruct &S)
llvm::Value * EmitCheckedArgForAssume(const Expr *E)
Emits an argument for a call to a __builtin_assume.
void EmitOMPParallelMaskedTaskLoopDirective(const OMPParallelMaskedTaskLoopDirective &S)
void SimplifyForwardingBlocks(llvm::BasicBlock *BB)
SimplifyForwardingBlocks - If the given basic block is only a branch to another basic block,...
RValue EmitLoadOfLValue(LValue V, SourceLocation Loc)
EmitLoadOfLValue - Given an expression that represents a value lvalue, this method emits the address ...
void EmitOMPParallelDirective(const OMPParallelDirective &S)
void EmitAnyExprToMem(const Expr *E, Address Location, Qualifiers Quals, bool IsInitializer)
EmitAnyExprToMem - Emits the code necessary to evaluate an arbitrary expression into the given memory...
void EmitBlockAfterUses(llvm::BasicBlock *BB)
EmitBlockAfterUses - Emit the given block somewhere hopefully near its uses, and leave the insertion ...
void EmitContinueStmt(const ContinueStmt &S)
void EmitIgnoredExpr(const Expr *E)
EmitIgnoredExpr - Emit an expression in a context which ignores the result.
bool InNoMergeAttributedStmt
True if the current statement has nomerge attribute.
llvm::Type * ConvertTypeForMem(QualType T)
void EmitOMPDistributeParallelForSimdDirective(const OMPDistributeParallelForSimdDirective &S)
LValue MakeAddrLValueWithoutTBAA(Address Addr, QualType T, AlignmentSource Source=AlignmentSource::Type)
void EmitOMPForSimdDirective(const OMPForSimdDirective &S)
JumpDest ReturnBlock
ReturnBlock - Unified return block.
LValue EmitLValueForField(LValue Base, const FieldDecl *Field)
RawAddress CreateMemTemp(QualType T, const Twine &Name="tmp", RawAddress *Alloca=nullptr)
CreateMemTemp - Create a temporary memory object of the given type, with appropriate alignmen and cas...
void EmitOMPFlushDirective(const OMPFlushDirective &S)
void EmitOMPMaskedTaskLoopDirective(const OMPMaskedTaskLoopDirective &S)
void EmitSEHLeaveStmt(const SEHLeaveStmt &S)
void EmitAttributedStmt(const AttributedStmt &S)
void EmitOMPCancelDirective(const OMPCancelDirective &S)
void EmitOMPGenericLoopDirective(const OMPGenericLoopDirective &S)
void EmitOMPTargetTeamsDistributeDirective(const OMPTargetTeamsDistributeDirective &S)
void EmitCaseStmtRange(const CaseStmt &S, ArrayRef< const Attr * > Attrs)
const TargetInfo & getTarget() const
void EmitOMPTaskgroupDirective(const OMPTaskgroupDirective &S)
llvm::DebugLoc SourceLocToDebugLoc(SourceLocation Location)
Converts Location to a DebugLoc, if debug information is enabled.
void EmitCXXForRangeStmt(const CXXForRangeStmt &S, ArrayRef< const Attr * > Attrs={})
llvm::Value * getTypeSize(QualType Ty)
Returns calculated size of the specified type.
void EmitLabelStmt(const LabelStmt &S)
void EmitOMPDepobjDirective(const OMPDepobjDirective &S)
const Expr * RetExpr
If a return statement is being visited, this holds the return statment's result expression.
void EmitInitializerForField(FieldDecl *Field, LValue LHS, Expr *Init)
void EmitComplexExprIntoLValue(const Expr *E, LValue dest, bool isInit)
EmitComplexExprIntoLValue - Emit the given expression of complex type and place its result into the s...
void EmitOMPSingleDirective(const OMPSingleDirective &S)
void EmitOMPTargetTeamsGenericLoopDirective(const OMPTargetTeamsGenericLoopDirective &S)
void EmitBlockWithFallThrough(llvm::BasicBlock *BB, const Stmt *S)
void EmitSimpleOMPExecutableDirective(const OMPExecutableDirective &D)
Emit simple code for OpenMP directives in Simd-only mode.
void EmitForStmt(const ForStmt &S, ArrayRef< const Attr * > Attrs={})
void EmitOMPDistributeDirective(const OMPDistributeDirective &S)
void EmitOpenACCCombinedConstruct(const OpenACCCombinedConstruct &S)
RValue EmitAnyExpr(const Expr *E, AggValueSlot aggSlot=AggValueSlot::ignored(), bool ignoreResult=false)
EmitAnyExpr - Emit code to compute the specified expression which can have any type.
void EmitOMPParallelForDirective(const OMPParallelForDirective &S)
void EmitOMPTeamsDirective(const OMPTeamsDirective &S)
uint64_t getCurrentProfileCount()
Get the profiler's current count.
void StartFunction(GlobalDecl GD, QualType RetTy, llvm::Function *Fn, const CGFunctionInfo &FnInfo, const FunctionArgList &Args, SourceLocation Loc=SourceLocation(), SourceLocation StartLoc=SourceLocation())
Emit code for the start of a function.
void EmitOMPUnrollDirective(const OMPUnrollDirective &S)
void EmitOMPParallelMasterTaskLoopSimdDirective(const OMPParallelMasterTaskLoopSimdDirective &S)
void EmitOMPTargetDataDirective(const OMPTargetDataDirective &S)
bool HaveInsertPoint() const
HaveInsertPoint - True if an insertion point is defined.
llvm::Constant * EmitCheckSourceLocation(SourceLocation Loc)
Emit a description of a source location in a format suitable for passing to a runtime sanitizer handl...
void EmitSwitchStmt(const SwitchStmt &S)
void EmitOMPTargetTeamsDistributeParallelForSimdDirective(const OMPTargetTeamsDistributeParallelForSimdDirective &S)
void EmitOMPTeamsDistributeParallelForSimdDirective(const OMPTeamsDistributeParallelForSimdDirective &S)
void EmitBranch(llvm::BasicBlock *Block)
EmitBranch - Emit a branch to the specified basic block from the current insert block,...
void EmitOpenACCDataConstruct(const OpenACCDataConstruct &S)
llvm::Function * GenerateCapturedStmtFunction(const CapturedStmt &S)
void EmitAggregateCopy(LValue Dest, LValue Src, QualType EltTy, AggValueSlot::Overlap_t MayOverlap, bool isVolatile=false)
EmitAggregateCopy - Emit an aggregate copy.
void EmitSEHTryStmt(const SEHTryStmt &S)
void EmitOMPInteropDirective(const OMPInteropDirective &S)
const TargetCodeGenInfo & getTargetHooks() const
LValue MakeNaturalAlignRawAddrLValue(llvm::Value *V, QualType T)
RValue EmitReferenceBindingToExpr(const Expr *E)
Emits a reference binding to the passed in expression.
void EmitAggExpr(const Expr *E, AggValueSlot AS)
EmitAggExpr - Emit the computation of the specified expression of aggregate type.
void EmitDeclStmt(const DeclStmt &S)
void EmitOMPScopeDirective(const OMPScopeDirective &S)
bool InNoInlineAttributedStmt
True if the current statement has noinline attribute.
void EmitOMPTargetParallelDirective(const OMPTargetParallelDirective &S)
void EmitOMPParallelMaskedDirective(const OMPParallelMaskedDirective &S)
void EmitCoroutineBody(const CoroutineBodyStmt &S)
Address EmitCompoundStmt(const CompoundStmt &S, bool GetLast=false, AggValueSlot AVS=AggValueSlot::ignored())
void EmitOMPParallelForSimdDirective(const OMPParallelForSimdDirective &S)
void EmitOpenACCLoopConstruct(const OpenACCLoopConstruct &S)
void EmitBranchThroughCleanup(JumpDest Dest)
EmitBranchThroughCleanup - Emit a branch from the current insert block through the normal cleanup han...
bool ConstantFoldsToSimpleInteger(const Expr *Cond, bool &Result, bool AllowLabels=false)
ConstantFoldsToSimpleInteger - If the specified expression does not fold to a constant,...
void EmitStmt(const Stmt *S, ArrayRef< const Attr * > Attrs={})
EmitStmt - Emit the code for the statement.
void EmitOMPTileDirective(const OMPTileDirective &S)
JumpDest getJumpDestForLabel(const LabelDecl *S)
getBasicBlockForLabel - Return the LLVM basicblock that the specified label maps to.
void EmitObjCAtSynchronizedStmt(const ObjCAtSynchronizedStmt &S)
llvm::Function * EmitCapturedStmt(const CapturedStmt &S, CapturedRegionKind K)
void EmitOMPParallelMaskedTaskLoopSimdDirective(const OMPParallelMaskedTaskLoopSimdDirective &S)
llvm::Type * ConvertType(QualType T)
void EmitOMPSectionsDirective(const OMPSectionsDirective &S)
void EmitOpenACCEnterDataConstruct(const OpenACCEnterDataConstruct &S)
llvm::CallBase * EmitCallOrInvoke(llvm::FunctionCallee Callee, ArrayRef< llvm::Value * > Args, const Twine &Name="")
HLSLControlFlowHintAttr::Spelling HLSLControlFlowAttr
HLSL Branch attribute.
void EmitDoStmt(const DoStmt &S, ArrayRef< const Attr * > Attrs={})
bool InAlwaysInlineAttributedStmt
True if the current statement has always_inline attribute.
void EmitOMPTargetSimdDirective(const OMPTargetSimdDirective &S)
void EmitOMPTaskyieldDirective(const OMPTaskyieldDirective &S)
void EmitOpenACCComputeConstruct(const OpenACCComputeConstruct &S)
void EmitOMPSimdDirective(const OMPSimdDirective &S)
void EmitOMPCriticalDirective(const OMPCriticalDirective &S)
void EmitObjCAutoreleasePoolStmt(const ObjCAutoreleasePoolStmt &S)
void EmitOMPForDirective(const OMPForDirective &S)
void EmitOMPMetaDirective(const OMPMetaDirective &S)
llvm::Value * EvaluateExprAsBool(const Expr *E)
EvaluateExprAsBool - Perform the usual unary conversions on the specified expression and compare the ...
void EmitOMPTargetUpdateDirective(const OMPTargetUpdateDirective &S)
LValue InitCapturedStruct(const CapturedStmt &S)
void EmitOMPParallelMasterDirective(const OMPParallelMasterDirective &S)
void EmitReturnStmt(const ReturnStmt &S)
AggValueSlot::Overlap_t getOverlapForReturnValue()
Determine whether a return value slot may overlap some other object.
void EmitOMPTargetTeamsDistributeSimdDirective(const OMPTargetTeamsDistributeSimdDirective &S)
void EmitOMPMasterDirective(const OMPMasterDirective &S)
void EmitOpenACCHostDataConstruct(const OpenACCHostDataConstruct &S)
void EmitOMPMasterTaskLoopDirective(const OMPMasterTaskLoopDirective &S)
void EmitOpenACCUpdateConstruct(const OpenACCUpdateConstruct &S)
void EmitOMPTargetParallelGenericLoopDirective(const OMPTargetParallelGenericLoopDirective &S)
void EmitOMPAssumeDirective(const OMPAssumeDirective &S)
static bool mightAddDeclToScope(const Stmt *S)
Determine if the given statement might introduce a declaration into the current scope,...
void EmitOMPMaskedDirective(const OMPMaskedDirective &S)
uint64_t getProfileCount(const Stmt *S)
Get the profiler's count for the given statement.
static bool hasAggregateEvaluationKind(QualType T)
void EmitOMPTaskwaitDirective(const OMPTaskwaitDirective &S)
void EmitOMPTargetParallelForSimdDirective(const OMPTargetParallelForSimdDirective &S)
LValue MakeAddrLValue(Address Addr, QualType T, AlignmentSource Source=AlignmentSource::Type)
void EmitStoreOfComplex(ComplexPairTy V, LValue dest, bool isInit)
EmitStoreOfComplex - Store a complex number into the specified l-value.
void EmitBreakStmt(const BreakStmt &S)
Address GenerateCapturedStmtArgument(const CapturedStmt &S)
void EmitLambdaVLACapture(const VariableArrayType *VAT, LValue LV)
const CGFunctionInfo * CurFnInfo
bool isObviouslyBranchWithoutCleanups(JumpDest Dest) const
isObviouslyBranchWithoutCleanups - Return true if a branch to the specified destination obviously has...
Address GetAddrOfLocalVar(const VarDecl *VD)
GetAddrOfLocalVar - Return the address of a local variable.
void EmitCoreturnStmt(const CoreturnStmt &S)
void EmitOpenACCSetConstruct(const OpenACCSetConstruct &S)
Address ReturnValue
ReturnValue - The temporary alloca to hold the return value.
void EmitOMPTargetExitDataDirective(const OMPTargetExitDataDirective &S)
void EmitOMPErrorDirective(const OMPErrorDirective &S)
void EmitOMPSectionDirective(const OMPSectionDirective &S)
void EmitOMPBarrierDirective(const OMPBarrierDirective &S)
void EmitStopPoint(const Stmt *S)
EmitStopPoint - Emit a debug stoppoint if we are emitting debug info.
void EmitOMPCancellationPointDirective(const OMPCancellationPointDirective &S)
void EnsureInsertPoint()
EnsureInsertPoint - Ensure that an insertion point is defined so that emitted IR has a place to go.
void EmitObjCForCollectionStmt(const ObjCForCollectionStmt &S)
llvm::LLVMContext & getLLVMContext()
void EmitOMPTaskLoopSimdDirective(const OMPTaskLoopSimdDirective &S)
bool SawAsmBlock
Whether we processed a Microsoft-style asm block during CodeGen.
llvm::Value * EmitScalarExpr(const Expr *E, bool IgnoreResultAssign=false)
EmitScalarExpr - Emit the computation of the specified expression of LLVM scalar type,...
void ResolveBranchFixups(llvm::BasicBlock *Target)
void EmitOMPTargetTeamsDistributeParallelForDirective(const OMPTargetTeamsDistributeParallelForDirective &S)
void incrementProfileCounter(const Stmt *S, llvm::Value *StepV=nullptr)
Increment the profiler's counter for the given statement by StepV.
void EmitOMPParallelSectionsDirective(const OMPParallelSectionsDirective &S)
void EmitOMPCanonicalLoop(const OMPCanonicalLoop *S)
Emit an OMPCanonicalLoop using the OpenMPIRBuilder.
void EmitOMPTeamsDistributeSimdDirective(const OMPTeamsDistributeSimdDirective &S)
llvm::BasicBlock * GetIndirectGotoBlock()
void EmitAsmStmt(const AsmStmt &S)
void EmitObjCAtTryStmt(const ObjCAtTryStmt &S)
void EmitStoreOfScalar(llvm::Value *Value, Address Addr, bool Volatile, QualType Ty, AlignmentSource Source=AlignmentSource::Type, bool isInit=false, bool isNontemporal=false)
EmitStoreOfScalar - Store a scalar value to an address, taking care to appropriately convert from the...
void EmitOMPTargetParallelForDirective(const OMPTargetParallelForDirective &S)
static bool containsBreak(const Stmt *S)
containsBreak - Return true if the statement contains a break out of it.
This class organizes the cross-function state that is used while generating LLVM code.
void SetInternalFunctionAttributes(GlobalDecl GD, llvm::Function *F, const CGFunctionInfo &FI)
Set the attributes on the LLVM function for the given decl and function info.
llvm::Module & getModule() const
DiagnosticsEngine & getDiags() const
void ErrorUnsupported(const Stmt *S, const char *Type)
Print out an error that codegen doesn't support the specified stmt yet.
const LangOptions & getLangOpts() const
const llvm::DataLayout & getDataLayout() const
void Error(SourceLocation loc, StringRef error)
Emit a general error that something can't be done.
bool shouldEmitConvergenceTokens() const
CGOpenMPRuntime & getOpenMPRuntime()
Return a reference to the configured OpenMP runtime.
SanitizerMetadata * getSanitizerMetadata()
ASTContext & getContext() const
const TargetCodeGenInfo & getTargetCodeGenInfo()
const CodeGenOptions & getCodeGenOpts() const
llvm::LLVMContext & getLLVMContext()
llvm::Function * getIntrinsic(unsigned IID, ArrayRef< llvm::Type * > Tys={})
void assignRegionCounters(GlobalDecl GD, llvm::Function *Fn)
Assign counters to regions and configure them for PGO of a given function.
void markStmtMaybeUsed(const Stmt *S)
Definition: CodeGenPGO.h:130
void setCurrentStmt(const Stmt *S)
If the execution count for the current statement is known, record that as the current count.
Definition: CodeGenPGO.h:76
bool haveRegionCounts() const
Whether or not we have PGO region data for the current function.
Definition: CodeGenPGO.h:53
llvm::FunctionType * GetFunctionType(const CGFunctionInfo &Info)
GetFunctionType - Get the LLVM function type for.
Definition: CGCall.cpp:1630
const CGFunctionInfo & arrangeBuiltinFunctionDeclaration(QualType resultType, const FunctionArgList &args)
A builtin function is a freestanding function using the default C conventions.
Definition: CGCall.cpp:679
A saved depth on the scope stack.
Definition: EHScopeStack.h:101
bool encloses(stable_iterator I) const
Returns true if this scope encloses I.
Definition: EHScopeStack.h:118
stable_iterator getInnermostNormalCleanup() const
Returns the innermost normal cleanup on the stack, or stable_end() if there are no normal cleanups.
Definition: EHScopeStack.h:370
stable_iterator stable_begin() const
Create a stable reference to the top of the EH stack.
Definition: EHScopeStack.h:393
bool empty() const
Determines whether the exception-scopes stack is empty.
Definition: EHScopeStack.h:359
bool hasNormalCleanups() const
Determines whether there are any normal cleanups on the stack.
Definition: EHScopeStack.h:364
static stable_iterator stable_end()
Create a stable reference to the bottom of the EH stack.
Definition: EHScopeStack.h:398
FunctionArgList - Type for representing both the decl and type of parameters to a function.
Definition: CGCall.h:382
LValue - This represents an lvalue references.
Definition: CGValue.h:182
llvm::Value * getPointer(CodeGenFunction &CGF) const
Address getAddress() const
Definition: CGValue.h:361
void pop()
End the current loop.
Definition: CGLoopInfo.cpp:834
void push(llvm::BasicBlock *Header, const llvm::DebugLoc &StartLoc, const llvm::DebugLoc &EndLoc)
Begin a new structured loop.
RValue - This trivial value class is used to represent the result of an expression that is evaluated.
Definition: CGValue.h:42
bool isScalar() const
Definition: CGValue.h:64
static RValue get(llvm::Value *V)
Definition: CGValue.h:98
bool isAggregate() const
Definition: CGValue.h:66
Address getAggregateAddress() const
getAggregateAddr() - Return the Value* of the address of the aggregate.
Definition: CGValue.h:83
llvm::Value * getScalarVal() const
getScalarVal() - Return the Value* of this scalar value.
Definition: CGValue.h:71
std::pair< llvm::Value *, llvm::Value * > getComplexVal() const
getComplexVal - Return the real/imag components of this complex value.
Definition: CGValue.h:78
void disableSanitizerForGlobal(llvm::GlobalVariable *GV)
virtual void addReturnRegisterOutputs(CodeGen::CodeGenFunction &CGF, CodeGen::LValue ReturnValue, std::string &Constraints, std::vector< llvm::Type * > &ResultRegTypes, std::vector< llvm::Type * > &ResultTruncRegTypes, std::vector< CodeGen::LValue > &ResultRegDests, std::string &AsmString, unsigned NumOutputs) const
Adds constraints and types for result registers.
Definition: TargetInfo.h:204
virtual bool isScalarizableAsmOperand(CodeGen::CodeGenFunction &CGF, llvm::Type *Ty) const
Target hook to decide whether an inline asm operand can be passed by value.
Definition: TargetInfo.h:198
CompoundStmt - This represents a group of statements like { stmt stmt }.
Definition: Stmt.h:1628
Stmt *const * const_body_iterator
Definition: Stmt.h:1700
ConstantExpr - An expression that occurs in a constant context and optionally the result of evaluatin...
Definition: Expr.h:1077
ContinueStmt - This represents a continue.
Definition: Stmt.h:2977
specific_decl_iterator - Iterates over a subrange of declarations stored in a DeclContext,...
Definition: DeclBase.h:2384
A reference to a declared variable, function, enum, etc.
Definition: Expr.h:1265
ValueDecl * getDecl()
Definition: Expr.h:1333
DeclStmt - Adaptor class for mixing declarations with statements and expressions.
Definition: Stmt.h:1519
T * getAttr() const
Definition: DeclBase.h:576
SourceLocation getBodyRBrace() const
getBodyRBrace - Gets the right brace of the body, if a body exists.
Definition: DeclBase.cpp:1073
virtual bool hasBody() const
Returns true if this Decl represents a declaration for a body of code, such as a function or method d...
Definition: DeclBase.h:1086
SourceLocation getLocation() const
Definition: DeclBase.h:442
DiagnosticBuilder Report(SourceLocation Loc, unsigned DiagID)
Issue the message to the client.
Definition: Diagnostic.h:1493
DoStmt - This represents a 'do/while' stmt.
Definition: Stmt.h:2752
This represents one expression.
Definition: Expr.h:110
bool EvaluateAsInt(EvalResult &Result, const ASTContext &Ctx, SideEffectsKind AllowSideEffects=SE_NoSideEffects, bool InConstantContext=false) const
EvaluateAsInt - Return true if this is a constant which we can fold and convert to an integer,...
Expr * IgnoreParenNoopCasts(const ASTContext &Ctx) LLVM_READONLY
Skip past any parentheses and casts which do not change the value (including ptr->int casts of the sa...
Definition: Expr.cpp:3124
llvm::APSInt EvaluateKnownConstInt(const ASTContext &Ctx, SmallVectorImpl< PartialDiagnosticAt > *Diag=nullptr) const
EvaluateKnownConstInt - Call EvaluateAsRValue and return the folded integer.
Expr * IgnoreParens() LLVM_READONLY
Skip past any parentheses which might surround this expression until reaching a fixed point.
Definition: Expr.cpp:3093
bool isEvaluatable(const ASTContext &Ctx, SideEffectsKind AllowSideEffects=SE_NoSideEffects) const
isEvaluatable - Call EvaluateAsRValue to see if this expression can be constant folded without side-e...
bool EvaluateAsRValue(EvalResult &Result, const ASTContext &Ctx, bool InConstantContext=false) const
EvaluateAsRValue - Return true if this is a constant which we can fold to an rvalue using any crazy t...
bool HasSideEffects(const ASTContext &Ctx, bool IncludePossibleEffects=true) const
HasSideEffects - This routine returns true for all those expressions which have any effect other than...
Definition: Expr.cpp:3594
SourceLocation getExprLoc() const LLVM_READONLY
getExprLoc - Return the preferred location for the arrow when diagnosing a problem with a generic exp...
Definition: Expr.cpp:276
QualType getType() const
Definition: Expr.h:142
Represents a member of a struct/union/class.
Definition: Decl.h:3033
ForStmt - This represents a 'for (init;cond;inc)' stmt.
Definition: Stmt.h:2808
const Expr * getSubExpr() const
Definition: Expr.h:1057
FunctionType - C99 6.7.5.3 - Function Declarators.
Definition: Type.h:4321
CallingConv getCallConv() const
Definition: Type.h:4659
This represents a GCC inline-assembly statement extension.
Definition: Stmt.h:3286
GlobalDecl - represents a global declaration.
Definition: GlobalDecl.h:56
GotoStmt - This represents a direct goto.
Definition: Stmt.h:2889
IfStmt - This represents an if/then/else.
Definition: Stmt.h:2165
IndirectGotoStmt - This represents an indirect goto.
Definition: Stmt.h:2928
Represents the declaration of a label.
Definition: Decl.h:503
LabelStmt - Represents a label, which has a substatement.
Definition: Stmt.h:2058
Keeps track of the various options that can be enabled, which controls the dialect of C or C++ that i...
Definition: LangOptions.h:499
bool assumeFunctionsAreConvergent() const
Definition: LangOptions.h:697
Represents a point when we exit a loop.
Definition: ProgramPoint.h:711
If a crash happens while one of these objects are live, the message is printed out along with the spe...
A (possibly-)qualified type.
Definition: Type.h:929
bool isNull() const
Return true if this QualType doesn't point to a type yet.
Definition: Type.h:996
QualType getCanonicalType() const
Definition: Type.h:7988
The collection of all-type qualifiers we support.
Definition: Type.h:324
Represents a struct/union/class.
Definition: Decl.h:4162
field_range fields() const
Definition: Decl.h:4376
field_iterator field_begin() const
Definition: Decl.cpp:5106
ReturnStmt - This represents a return, optionally of an expression: return; return 4;.
Definition: Stmt.h:3046
Expr * getRetValue()
Definition: Stmt.h:3077
Scope - A scope is a transient data structure that is used while parsing the program.
Definition: Scope.h:41
Encodes a location in the source.
UIntTy getRawEncoding() const
When a SourceLocation itself cannot be used, this returns an (opaque) 32-bit integer encoding for it.
This class handles loading and caching of source files into memory.
A trivial tuple used to represent a source range.
SourceLocation getEnd() const
SourceLocation getBegin() const
Stmt - This represents one statement.
Definition: Stmt.h:84
@ NoStmtClass
Definition: Stmt.h:87
StmtClass getStmtClass() const
Definition: Stmt.h:1380
Likelihood
The likelihood of a branch being taken.
Definition: Stmt.h:1323
@ LH_Unlikely
Branch has the [[unlikely]] attribute.
Definition: Stmt.h:1324
@ LH_None
No attribute set or branches of the IfStmt have the same attribute.
Definition: Stmt.h:1325
@ LH_Likely
Branch has the [[likely]] attribute.
Definition: Stmt.h:1327
static const Attr * getLikelihoodAttr(const Stmt *S)
Definition: Stmt.cpp:171
SourceLocation getBeginLoc() const LLVM_READONLY
Definition: Stmt.cpp:346
static Likelihood getLikelihood(ArrayRef< const Attr * > Attrs)
Definition: Stmt.cpp:163
StringLiteral - This represents a string literal expression, e.g.
Definition: Expr.h:1778
SourceLocation getBeginLoc() const LLVM_READONLY
Definition: Expr.h:1959
SourceLocation getLocationOfByte(unsigned ByteNo, const SourceManager &SM, const LangOptions &Features, const TargetInfo &Target, unsigned *StartToken=nullptr, unsigned *StartTokenByteOffset=nullptr) const
getLocationOfByte - Return a source location that points to the specified byte of this string literal...
Definition: Expr.cpp:1332
StringRef getString() const
Definition: Expr.h:1855
const SwitchCase * getNextSwitchCase() const
Definition: Stmt.h:1801
SwitchStmt - This represents a 'switch' stmt.
Definition: Stmt.h:2415
Exposes information about the current target.
Definition: TargetInfo.h:220
bool validateInputConstraint(MutableArrayRef< ConstraintInfo > OutputConstraints, ConstraintInfo &info) const
Definition: TargetInfo.cpp:840
StringRef getNormalizedGCCRegisterName(StringRef Name, bool ReturnCanonical=false) const
Returns the "normalized" GCC register name.
Definition: TargetInfo.cpp:702
bool validateOutputConstraint(ConstraintInfo &Info) const
Definition: TargetInfo.cpp:743
virtual std::string_view getClobbers() const =0
Returns a string of target-specific clobbers, in LLVM format.
Token - This structure provides full information about a lexed token.
Definition: Token.h:36
bool isVoidType() const
Definition: Type.h:8515
const T * castAs() const
Member-template castAs<specific type>.
Definition: Type.h:8805
bool isReferenceType() const
Definition: Type.h:8209
QualType getPointeeType() const
If this is a pointer, ObjC object pointer, or block pointer, this returns the respective pointee.
Definition: Type.cpp:738
Represent the declaration of a variable (in which case it is an lvalue) a function (in which case it ...
Definition: Decl.h:671
Represents a variable declaration or definition.
Definition: Decl.h:882
StorageClass getStorageClass() const
Returns the storage class as written in the source.
Definition: Decl.h:1119
WhileStmt - This represents a 'while' stmt.
Definition: Stmt.h:2611
Defines the clang::TargetInfo interface.
bool Rem(InterpState &S, CodePtr OpPC)
1) Pops the RHS from the stack.
Definition: Interp.h:654
bool Ret(InterpState &S, CodePtr &PC)
Definition: Interp.h:318
The JSON file list parser is used to communicate input to InstallAPI.
CapturedRegionKind
The different kinds of captured statement.
Definition: CapturedStmt.h:16
@ SC_Register
Definition: Specifiers.h:257
@ Asm
Assembly: we accept this only so that we can preprocess it.
@ Result
The result type of a method or function.
@ CC_SwiftAsync
Definition: Specifiers.h:294
unsigned long uint64_t
Diagnostic wrappers for TextAPI types for error reporting.
Definition: Dominators.h:30
cl::opt< bool > EnableSingleByteCoverage
A jump destination is an abstract label, branching to which may require a jump out through normal cle...
void setScopeDepth(EHScopeStack::stable_iterator depth)
EHScopeStack::stable_iterator getScopeDepth() const
EvalResult is a struct with detailed info about an evaluated expression.
Definition: Expr.h:642
APValue Val
Val - This is the value the expression can be folded to.
Definition: Expr.h:644
bool hasMatchingInput() const
Return true if this output operand has a matching (tied) input operand.
Definition: TargetInfo.h:1131
bool hasTiedOperand() const
Return true if this input operand is a matching constraint that ties it to an output operand.
Definition: TargetInfo.h:1138