rustc_codegen_ssa/mir/
block.rs

1use std::cmp;
2
3use rustc_abi::{Align, BackendRepr, ExternAbi, HasDataLayout, Reg, Size, WrappingRange};
4use rustc_ast as ast;
5use rustc_ast::{InlineAsmOptions, InlineAsmTemplatePiece};
6use rustc_data_structures::packed::Pu128;
7use rustc_hir::lang_items::LangItem;
8use rustc_lint_defs::builtin::TAIL_CALL_TRACK_CALLER;
9use rustc_middle::mir::{self, AssertKind, InlineAsmMacro, SwitchTargets, UnwindTerminateReason};
10use rustc_middle::ty::layout::{HasTyCtxt, LayoutOf, ValidityRequirement};
11use rustc_middle::ty::print::{with_no_trimmed_paths, with_no_visible_paths};
12use rustc_middle::ty::{self, Instance, Ty};
13use rustc_middle::{bug, span_bug};
14use rustc_session::config::OptLevel;
15use rustc_span::Span;
16use rustc_span::source_map::Spanned;
17use rustc_target::callconv::{ArgAbi, CastTarget, FnAbi, PassMode};
18use tracing::{debug, info};
19
20use super::operand::OperandRef;
21use super::operand::OperandValue::{Immediate, Pair, Ref, ZeroSized};
22use super::place::{PlaceRef, PlaceValue};
23use super::{CachedLlbb, FunctionCx, LocalRef};
24use crate::base::{self, is_call_from_compiler_builtins_to_upstream_monomorphization};
25use crate::common::{self, IntPredicate};
26use crate::errors::CompilerBuiltinsCannotCall;
27use crate::traits::*;
28use crate::{MemFlags, meth};
29
30// Indicates if we are in the middle of merging a BB's successor into it. This
31// can happen when BB jumps directly to its successor and the successor has no
32// other predecessors.
33#[derive(Debug, PartialEq)]
34enum MergingSucc {
35    False,
36    True,
37}
38
39/// Indicates to the call terminator codegen whether a call
40/// is a normal call or an explicit tail call.
41#[derive(Debug, PartialEq)]
42enum CallKind {
43    Normal,
44    Tail,
45}
46
47/// Used by `FunctionCx::codegen_terminator` for emitting common patterns
48/// e.g., creating a basic block, calling a function, etc.
49struct TerminatorCodegenHelper<'tcx> {
50    bb: mir::BasicBlock,
51    terminator: &'tcx mir::Terminator<'tcx>,
52}
53
54impl<'a, 'tcx> TerminatorCodegenHelper<'tcx> {
55    /// Returns the appropriate `Funclet` for the current funclet, if on MSVC,
56    /// either already previously cached, or newly created, by `landing_pad_for`.
57    fn funclet<'b, Bx: BuilderMethods<'a, 'tcx>>(
58        &self,
59        fx: &'b mut FunctionCx<'a, 'tcx, Bx>,
60    ) -> Option<&'b Bx::Funclet> {
61        let cleanup_kinds = fx.cleanup_kinds.as_ref()?;
62        let funclet_bb = cleanup_kinds[self.bb].funclet_bb(self.bb)?;
63        // If `landing_pad_for` hasn't been called yet to create the `Funclet`,
64        // it has to be now. This may not seem necessary, as RPO should lead
65        // to all the unwind edges being visited (and so to `landing_pad_for`
66        // getting called for them), before building any of the blocks inside
67        // the funclet itself - however, if MIR contains edges that end up not
68        // being needed in the LLVM IR after monomorphization, the funclet may
69        // be unreachable, and we don't have yet a way to skip building it in
70        // such an eventuality (which may be a better solution than this).
71        if fx.funclets[funclet_bb].is_none() {
72            fx.landing_pad_for(funclet_bb);
73        }
74        Some(
75            fx.funclets[funclet_bb]
76                .as_ref()
77                .expect("landing_pad_for didn't also create funclets entry"),
78        )
79    }
80
81    /// Get a basic block (creating it if necessary), possibly with cleanup
82    /// stuff in it or next to it.
83    fn llbb_with_cleanup<Bx: BuilderMethods<'a, 'tcx>>(
84        &self,
85        fx: &mut FunctionCx<'a, 'tcx, Bx>,
86        target: mir::BasicBlock,
87    ) -> Bx::BasicBlock {
88        let (needs_landing_pad, is_cleanupret) = self.llbb_characteristics(fx, target);
89        let mut lltarget = fx.llbb(target);
90        if needs_landing_pad {
91            lltarget = fx.landing_pad_for(target);
92        }
93        if is_cleanupret {
94            // Cross-funclet jump - need a trampoline
95            assert!(base::wants_new_eh_instructions(fx.cx.tcx().sess));
96            debug!("llbb_with_cleanup: creating cleanup trampoline for {:?}", target);
97            let name = &format!("{:?}_cleanup_trampoline_{:?}", self.bb, target);
98            let trampoline_llbb = Bx::append_block(fx.cx, fx.llfn, name);
99            let mut trampoline_bx = Bx::build(fx.cx, trampoline_llbb);
100            trampoline_bx.cleanup_ret(self.funclet(fx).unwrap(), Some(lltarget));
101            trampoline_llbb
102        } else {
103            lltarget
104        }
105    }
106
107    fn llbb_characteristics<Bx: BuilderMethods<'a, 'tcx>>(
108        &self,
109        fx: &mut FunctionCx<'a, 'tcx, Bx>,
110        target: mir::BasicBlock,
111    ) -> (bool, bool) {
112        if let Some(ref cleanup_kinds) = fx.cleanup_kinds {
113            let funclet_bb = cleanup_kinds[self.bb].funclet_bb(self.bb);
114            let target_funclet = cleanup_kinds[target].funclet_bb(target);
115            let (needs_landing_pad, is_cleanupret) = match (funclet_bb, target_funclet) {
116                (None, None) => (false, false),
117                (None, Some(_)) => (true, false),
118                (Some(f), Some(t_f)) => (f != t_f, f != t_f),
119                (Some(_), None) => {
120                    let span = self.terminator.source_info.span;
121                    span_bug!(span, "{:?} - jump out of cleanup?", self.terminator);
122                }
123            };
124            (needs_landing_pad, is_cleanupret)
125        } else {
126            let needs_landing_pad = !fx.mir[self.bb].is_cleanup && fx.mir[target].is_cleanup;
127            let is_cleanupret = false;
128            (needs_landing_pad, is_cleanupret)
129        }
130    }
131
132    fn funclet_br<Bx: BuilderMethods<'a, 'tcx>>(
133        &self,
134        fx: &mut FunctionCx<'a, 'tcx, Bx>,
135        bx: &mut Bx,
136        target: mir::BasicBlock,
137        mergeable_succ: bool,
138    ) -> MergingSucc {
139        let (needs_landing_pad, is_cleanupret) = self.llbb_characteristics(fx, target);
140        if mergeable_succ && !needs_landing_pad && !is_cleanupret {
141            // We can merge the successor into this bb, so no need for a `br`.
142            MergingSucc::True
143        } else {
144            let mut lltarget = fx.llbb(target);
145            if needs_landing_pad {
146                lltarget = fx.landing_pad_for(target);
147            }
148            if is_cleanupret {
149                // micro-optimization: generate a `ret` rather than a jump
150                // to a trampoline.
151                bx.cleanup_ret(self.funclet(fx).unwrap(), Some(lltarget));
152            } else {
153                bx.br(lltarget);
154            }
155            MergingSucc::False
156        }
157    }
158
159    /// Call `fn_ptr` of `fn_abi` with the arguments `llargs`, the optional
160    /// return destination `destination` and the unwind action `unwind`.
161    fn do_call<Bx: BuilderMethods<'a, 'tcx>>(
162        &self,
163        fx: &mut FunctionCx<'a, 'tcx, Bx>,
164        bx: &mut Bx,
165        fn_abi: &'tcx FnAbi<'tcx, Ty<'tcx>>,
166        fn_ptr: Bx::Value,
167        llargs: &[Bx::Value],
168        destination: Option<(ReturnDest<'tcx, Bx::Value>, mir::BasicBlock)>,
169        mut unwind: mir::UnwindAction,
170        lifetime_ends_after_call: &[(Bx::Value, Size)],
171        instance: Option<Instance<'tcx>>,
172        kind: CallKind,
173        mergeable_succ: bool,
174    ) -> MergingSucc {
175        let tcx = bx.tcx();
176        if let Some(instance) = instance
177            && is_call_from_compiler_builtins_to_upstream_monomorphization(tcx, instance)
178        {
179            if destination.is_some() {
180                let caller_def = fx.instance.def_id();
181                let e = CompilerBuiltinsCannotCall {
182                    span: tcx.def_span(caller_def),
183                    caller: with_no_trimmed_paths!(tcx.def_path_str(caller_def)),
184                    callee: with_no_trimmed_paths!(tcx.def_path_str(instance.def_id())),
185                };
186                tcx.dcx().emit_err(e);
187            } else {
188                info!(
189                    "compiler_builtins call to diverging function {:?} replaced with abort",
190                    instance.def_id()
191                );
192                bx.abort();
193                bx.unreachable();
194                return MergingSucc::False;
195            }
196        }
197
198        // If there is a cleanup block and the function we're calling can unwind, then
199        // do an invoke, otherwise do a call.
200        let fn_ty = bx.fn_decl_backend_type(fn_abi);
201
202        let fn_attrs = if bx.tcx().def_kind(fx.instance.def_id()).has_codegen_attrs() {
203            Some(bx.tcx().codegen_fn_attrs(fx.instance.def_id()))
204        } else {
205            None
206        };
207
208        if !fn_abi.can_unwind {
209            unwind = mir::UnwindAction::Unreachable;
210        }
211
212        let unwind_block = match unwind {
213            mir::UnwindAction::Cleanup(cleanup) => Some(self.llbb_with_cleanup(fx, cleanup)),
214            mir::UnwindAction::Continue => None,
215            mir::UnwindAction::Unreachable => None,
216            mir::UnwindAction::Terminate(reason) => {
217                if fx.mir[self.bb].is_cleanup && base::wants_new_eh_instructions(fx.cx.tcx().sess) {
218                    // MSVC SEH will abort automatically if an exception tries to
219                    // propagate out from cleanup.
220
221                    // FIXME(@mirkootter): For wasm, we currently do not support terminate during
222                    // cleanup, because this requires a few more changes: The current code
223                    // caches the `terminate_block` for each function; funclet based code - however -
224                    // requires a different terminate_block for each funclet
225                    // Until this is implemented, we just do not unwind inside cleanup blocks
226
227                    None
228                } else {
229                    Some(fx.terminate_block(reason))
230                }
231            }
232        };
233
234        if kind == CallKind::Tail {
235            bx.tail_call(fn_ty, fn_attrs, fn_abi, fn_ptr, llargs, self.funclet(fx), instance);
236            return MergingSucc::False;
237        }
238
239        if let Some(unwind_block) = unwind_block {
240            let ret_llbb = if let Some((_, target)) = destination {
241                fx.llbb(target)
242            } else {
243                fx.unreachable_block()
244            };
245            let invokeret = bx.invoke(
246                fn_ty,
247                fn_attrs,
248                Some(fn_abi),
249                fn_ptr,
250                llargs,
251                ret_llbb,
252                unwind_block,
253                self.funclet(fx),
254                instance,
255            );
256            if fx.mir[self.bb].is_cleanup {
257                bx.apply_attrs_to_cleanup_callsite(invokeret);
258            }
259
260            if let Some((ret_dest, target)) = destination {
261                bx.switch_to_block(fx.llbb(target));
262                fx.set_debug_loc(bx, self.terminator.source_info);
263                for &(tmp, size) in lifetime_ends_after_call {
264                    bx.lifetime_end(tmp, size);
265                }
266                fx.store_return(bx, ret_dest, &fn_abi.ret, invokeret);
267            }
268            MergingSucc::False
269        } else {
270            let llret =
271                bx.call(fn_ty, fn_attrs, Some(fn_abi), fn_ptr, llargs, self.funclet(fx), instance);
272            if fx.mir[self.bb].is_cleanup {
273                bx.apply_attrs_to_cleanup_callsite(llret);
274            }
275
276            if let Some((ret_dest, target)) = destination {
277                for &(tmp, size) in lifetime_ends_after_call {
278                    bx.lifetime_end(tmp, size);
279                }
280                fx.store_return(bx, ret_dest, &fn_abi.ret, llret);
281                self.funclet_br(fx, bx, target, mergeable_succ)
282            } else {
283                bx.unreachable();
284                MergingSucc::False
285            }
286        }
287    }
288
289    /// Generates inline assembly with optional `destination` and `unwind`.
290    fn do_inlineasm<Bx: BuilderMethods<'a, 'tcx>>(
291        &self,
292        fx: &mut FunctionCx<'a, 'tcx, Bx>,
293        bx: &mut Bx,
294        template: &[InlineAsmTemplatePiece],
295        operands: &[InlineAsmOperandRef<'tcx, Bx>],
296        options: InlineAsmOptions,
297        line_spans: &[Span],
298        destination: Option<mir::BasicBlock>,
299        unwind: mir::UnwindAction,
300        instance: Instance<'_>,
301        mergeable_succ: bool,
302    ) -> MergingSucc {
303        let unwind_target = match unwind {
304            mir::UnwindAction::Cleanup(cleanup) => Some(self.llbb_with_cleanup(fx, cleanup)),
305            mir::UnwindAction::Terminate(reason) => Some(fx.terminate_block(reason)),
306            mir::UnwindAction::Continue => None,
307            mir::UnwindAction::Unreachable => None,
308        };
309
310        if operands.iter().any(|x| matches!(x, InlineAsmOperandRef::Label { .. })) {
311            assert!(unwind_target.is_none());
312            let ret_llbb = if let Some(target) = destination {
313                fx.llbb(target)
314            } else {
315                fx.unreachable_block()
316            };
317
318            bx.codegen_inline_asm(
319                template,
320                operands,
321                options,
322                line_spans,
323                instance,
324                Some(ret_llbb),
325                None,
326            );
327            MergingSucc::False
328        } else if let Some(cleanup) = unwind_target {
329            let ret_llbb = if let Some(target) = destination {
330                fx.llbb(target)
331            } else {
332                fx.unreachable_block()
333            };
334
335            bx.codegen_inline_asm(
336                template,
337                operands,
338                options,
339                line_spans,
340                instance,
341                Some(ret_llbb),
342                Some((cleanup, self.funclet(fx))),
343            );
344            MergingSucc::False
345        } else {
346            bx.codegen_inline_asm(template, operands, options, line_spans, instance, None, None);
347
348            if let Some(target) = destination {
349                self.funclet_br(fx, bx, target, mergeable_succ)
350            } else {
351                bx.unreachable();
352                MergingSucc::False
353            }
354        }
355    }
356}
357
358/// Codegen implementations for some terminator variants.
359impl<'a, 'tcx, Bx: BuilderMethods<'a, 'tcx>> FunctionCx<'a, 'tcx, Bx> {
360    /// Generates code for a `Resume` terminator.
361    fn codegen_resume_terminator(&mut self, helper: TerminatorCodegenHelper<'tcx>, bx: &mut Bx) {
362        if let Some(funclet) = helper.funclet(self) {
363            bx.cleanup_ret(funclet, None);
364        } else {
365            let slot = self.get_personality_slot(bx);
366            let exn0 = slot.project_field(bx, 0);
367            let exn0 = bx.load_operand(exn0).immediate();
368            let exn1 = slot.project_field(bx, 1);
369            let exn1 = bx.load_operand(exn1).immediate();
370            slot.storage_dead(bx);
371
372            bx.resume(exn0, exn1);
373        }
374    }
375
376    fn codegen_switchint_terminator(
377        &mut self,
378        helper: TerminatorCodegenHelper<'tcx>,
379        bx: &mut Bx,
380        discr: &mir::Operand<'tcx>,
381        targets: &SwitchTargets,
382    ) {
383        let discr = self.codegen_operand(bx, discr);
384        let discr_value = discr.immediate();
385        let switch_ty = discr.layout.ty;
386        // If our discriminant is a constant we can branch directly
387        if let Some(const_discr) = bx.const_to_opt_u128(discr_value, false) {
388            let target = targets.target_for_value(const_discr);
389            bx.br(helper.llbb_with_cleanup(self, target));
390            return;
391        };
392
393        let mut target_iter = targets.iter();
394        if target_iter.len() == 1 {
395            // If there are two targets (one conditional, one fallback), emit `br` instead of
396            // `switch`.
397            let (test_value, target) = target_iter.next().unwrap();
398            let otherwise = targets.otherwise();
399            let lltarget = helper.llbb_with_cleanup(self, target);
400            let llotherwise = helper.llbb_with_cleanup(self, otherwise);
401            let target_cold = self.cold_blocks[target];
402            let otherwise_cold = self.cold_blocks[otherwise];
403            // If `target_cold == otherwise_cold`, the branches have the same weight
404            // so there is no expectation. If they differ, the `target` branch is expected
405            // when the `otherwise` branch is cold.
406            let expect = if target_cold == otherwise_cold { None } else { Some(otherwise_cold) };
407            if switch_ty == bx.tcx().types.bool {
408                // Don't generate trivial icmps when switching on bool.
409                match test_value {
410                    0 => {
411                        let expect = expect.map(|e| !e);
412                        bx.cond_br_with_expect(discr_value, llotherwise, lltarget, expect);
413                    }
414                    1 => {
415                        bx.cond_br_with_expect(discr_value, lltarget, llotherwise, expect);
416                    }
417                    _ => bug!(),
418                }
419            } else {
420                let switch_llty = bx.immediate_backend_type(bx.layout_of(switch_ty));
421                let llval = bx.const_uint_big(switch_llty, test_value);
422                let cmp = bx.icmp(IntPredicate::IntEQ, discr_value, llval);
423                bx.cond_br_with_expect(cmp, lltarget, llotherwise, expect);
424            }
425        } else if target_iter.len() == 2
426            && self.mir[targets.otherwise()].is_empty_unreachable()
427            && targets.all_values().contains(&Pu128(0))
428            && targets.all_values().contains(&Pu128(1))
429        {
430            // This is the really common case for `bool`, `Option`, etc.
431            // By using `trunc nuw` we communicate that other values are
432            // impossible without needing `switch` or `assume`s.
433            let true_bb = targets.target_for_value(1);
434            let false_bb = targets.target_for_value(0);
435            let true_ll = helper.llbb_with_cleanup(self, true_bb);
436            let false_ll = helper.llbb_with_cleanup(self, false_bb);
437
438            let expected_cond_value = if self.cx.sess().opts.optimize == OptLevel::No {
439                None
440            } else {
441                match (self.cold_blocks[true_bb], self.cold_blocks[false_bb]) {
442                    // Same coldness, no expectation
443                    (true, true) | (false, false) => None,
444                    // Different coldness, expect the non-cold one
445                    (true, false) => Some(false),
446                    (false, true) => Some(true),
447                }
448            };
449
450            let bool_ty = bx.tcx().types.bool;
451            let cond = if switch_ty == bool_ty {
452                discr_value
453            } else {
454                let bool_llty = bx.immediate_backend_type(bx.layout_of(bool_ty));
455                bx.unchecked_utrunc(discr_value, bool_llty)
456            };
457            bx.cond_br_with_expect(cond, true_ll, false_ll, expected_cond_value);
458        } else if self.cx.sess().opts.optimize == OptLevel::No
459            && target_iter.len() == 2
460            && self.mir[targets.otherwise()].is_empty_unreachable()
461        {
462            // In unoptimized builds, if there are two normal targets and the `otherwise` target is
463            // an unreachable BB, emit `br` instead of `switch`. This leaves behind the unreachable
464            // BB, which will usually (but not always) be dead code.
465            //
466            // Why only in unoptimized builds?
467            // - In unoptimized builds LLVM uses FastISel which does not support switches, so it
468            //   must fall back to the slower SelectionDAG isel. Therefore, using `br` gives
469            //   significant compile time speedups for unoptimized builds.
470            // - In optimized builds the above doesn't hold, and using `br` sometimes results in
471            //   worse generated code because LLVM can no longer tell that the value being switched
472            //   on can only have two values, e.g. 0 and 1.
473            //
474            let (test_value1, target1) = target_iter.next().unwrap();
475            let (_test_value2, target2) = target_iter.next().unwrap();
476            let ll1 = helper.llbb_with_cleanup(self, target1);
477            let ll2 = helper.llbb_with_cleanup(self, target2);
478            let switch_llty = bx.immediate_backend_type(bx.layout_of(switch_ty));
479            let llval = bx.const_uint_big(switch_llty, test_value1);
480            let cmp = bx.icmp(IntPredicate::IntEQ, discr_value, llval);
481            bx.cond_br(cmp, ll1, ll2);
482        } else {
483            let otherwise = targets.otherwise();
484            let otherwise_cold = self.cold_blocks[otherwise];
485            let otherwise_unreachable = self.mir[otherwise].is_empty_unreachable();
486            let cold_count = targets.iter().filter(|(_, target)| self.cold_blocks[*target]).count();
487            let none_cold = cold_count == 0;
488            let all_cold = cold_count == targets.iter().len();
489            if (none_cold && (!otherwise_cold || otherwise_unreachable))
490                || (all_cold && (otherwise_cold || otherwise_unreachable))
491            {
492                // All targets have the same weight,
493                // or `otherwise` is unreachable and it's the only target with a different weight.
494                bx.switch(
495                    discr_value,
496                    helper.llbb_with_cleanup(self, targets.otherwise()),
497                    target_iter
498                        .map(|(value, target)| (value, helper.llbb_with_cleanup(self, target))),
499                );
500            } else {
501                // Targets have different weights
502                bx.switch_with_weights(
503                    discr_value,
504                    helper.llbb_with_cleanup(self, targets.otherwise()),
505                    otherwise_cold,
506                    target_iter.map(|(value, target)| {
507                        (value, helper.llbb_with_cleanup(self, target), self.cold_blocks[target])
508                    }),
509                );
510            }
511        }
512    }
513
514    fn codegen_return_terminator(&mut self, bx: &mut Bx) {
515        // Call `va_end` if this is the definition of a C-variadic function.
516        if self.fn_abi.c_variadic {
517            // The `VaList` "spoofed" argument is just after all the real arguments.
518            let va_list_arg_idx = self.fn_abi.args.len();
519            match self.locals[mir::Local::from_usize(1 + va_list_arg_idx)] {
520                LocalRef::Place(va_list) => {
521                    bx.va_end(va_list.val.llval);
522                }
523                _ => bug!("C-variadic function must have a `VaList` place"),
524            }
525        }
526        if self.fn_abi.ret.layout.is_uninhabited() {
527            // Functions with uninhabited return values are marked `noreturn`,
528            // so we should make sure that we never actually do.
529            // We play it safe by using a well-defined `abort`, but we could go for immediate UB
530            // if that turns out to be helpful.
531            bx.abort();
532            // `abort` does not terminate the block, so we still need to generate
533            // an `unreachable` terminator after it.
534            bx.unreachable();
535            return;
536        }
537        let llval = match &self.fn_abi.ret.mode {
538            PassMode::Ignore | PassMode::Indirect { .. } => {
539                bx.ret_void();
540                return;
541            }
542
543            PassMode::Direct(_) | PassMode::Pair(..) => {
544                let op = self.codegen_consume(bx, mir::Place::return_place().as_ref());
545                if let Ref(place_val) = op.val {
546                    bx.load_from_place(bx.backend_type(op.layout), place_val)
547                } else {
548                    op.immediate_or_packed_pair(bx)
549                }
550            }
551
552            PassMode::Cast { cast: cast_ty, pad_i32: _ } => {
553                let op = match self.locals[mir::RETURN_PLACE] {
554                    LocalRef::Operand(op) => op,
555                    LocalRef::PendingOperand => bug!("use of return before def"),
556                    LocalRef::Place(cg_place) => {
557                        OperandRef { val: Ref(cg_place.val), layout: cg_place.layout }
558                    }
559                    LocalRef::UnsizedPlace(_) => bug!("return type must be sized"),
560                };
561                let llslot = match op.val {
562                    Immediate(_) | Pair(..) => {
563                        let scratch = PlaceRef::alloca(bx, self.fn_abi.ret.layout);
564                        op.val.store(bx, scratch);
565                        scratch.val.llval
566                    }
567                    Ref(place_val) => {
568                        assert_eq!(
569                            place_val.align, op.layout.align.abi,
570                            "return place is unaligned!"
571                        );
572                        place_val.llval
573                    }
574                    ZeroSized => bug!("ZST return value shouldn't be in PassMode::Cast"),
575                };
576                load_cast(bx, cast_ty, llslot, self.fn_abi.ret.layout.align.abi)
577            }
578        };
579        bx.ret(llval);
580    }
581
582    #[tracing::instrument(level = "trace", skip(self, helper, bx))]
583    fn codegen_drop_terminator(
584        &mut self,
585        helper: TerminatorCodegenHelper<'tcx>,
586        bx: &mut Bx,
587        source_info: &mir::SourceInfo,
588        location: mir::Place<'tcx>,
589        target: mir::BasicBlock,
590        unwind: mir::UnwindAction,
591        mergeable_succ: bool,
592    ) -> MergingSucc {
593        let ty = location.ty(self.mir, bx.tcx()).ty;
594        let ty = self.monomorphize(ty);
595        let drop_fn = Instance::resolve_drop_in_place(bx.tcx(), ty);
596
597        if let ty::InstanceKind::DropGlue(_, None) = drop_fn.def {
598            // we don't actually need to drop anything.
599            return helper.funclet_br(self, bx, target, mergeable_succ);
600        }
601
602        let place = self.codegen_place(bx, location.as_ref());
603        let (args1, args2);
604        let mut args = if let Some(llextra) = place.val.llextra {
605            args2 = [place.val.llval, llextra];
606            &args2[..]
607        } else {
608            args1 = [place.val.llval];
609            &args1[..]
610        };
611        let (maybe_null, drop_fn, fn_abi, drop_instance) = match ty.kind() {
612            // FIXME(eddyb) perhaps move some of this logic into
613            // `Instance::resolve_drop_in_place`?
614            ty::Dynamic(_, _, ty::Dyn) => {
615                // IN THIS ARM, WE HAVE:
616                // ty = *mut (dyn Trait)
617                // which is: exists<T> ( *mut T,    Vtable<T: Trait> )
618                //                       args[0]    args[1]
619                //
620                // args = ( Data, Vtable )
621                //                  |
622                //                  v
623                //                /-------\
624                //                | ...   |
625                //                \-------/
626                //
627                let virtual_drop = Instance {
628                    def: ty::InstanceKind::Virtual(drop_fn.def_id(), 0), // idx 0: the drop function
629                    args: drop_fn.args,
630                };
631                debug!("ty = {:?}", ty);
632                debug!("drop_fn = {:?}", drop_fn);
633                debug!("args = {:?}", args);
634                let fn_abi = bx.fn_abi_of_instance(virtual_drop, ty::List::empty());
635                let vtable = args[1];
636                // Truncate vtable off of args list
637                args = &args[..1];
638                (
639                    true,
640                    meth::VirtualIndex::from_index(ty::COMMON_VTABLE_ENTRIES_DROPINPLACE)
641                        .get_optional_fn(bx, vtable, ty, fn_abi),
642                    fn_abi,
643                    virtual_drop,
644                )
645            }
646            _ => (
647                false,
648                bx.get_fn_addr(drop_fn),
649                bx.fn_abi_of_instance(drop_fn, ty::List::empty()),
650                drop_fn,
651            ),
652        };
653
654        // We generate a null check for the drop_fn. This saves a bunch of relocations being
655        // generated for no-op drops.
656        if maybe_null {
657            let is_not_null = bx.append_sibling_block("is_not_null");
658            let llty = bx.fn_ptr_backend_type(fn_abi);
659            let null = bx.const_null(llty);
660            let non_null =
661                bx.icmp(base::bin_op_to_icmp_predicate(mir::BinOp::Ne, false), drop_fn, null);
662            bx.cond_br(non_null, is_not_null, helper.llbb_with_cleanup(self, target));
663            bx.switch_to_block(is_not_null);
664            self.set_debug_loc(bx, *source_info);
665        }
666
667        helper.do_call(
668            self,
669            bx,
670            fn_abi,
671            drop_fn,
672            args,
673            Some((ReturnDest::Nothing, target)),
674            unwind,
675            &[],
676            Some(drop_instance),
677            CallKind::Normal,
678            !maybe_null && mergeable_succ,
679        )
680    }
681
682    fn codegen_assert_terminator(
683        &mut self,
684        helper: TerminatorCodegenHelper<'tcx>,
685        bx: &mut Bx,
686        terminator: &mir::Terminator<'tcx>,
687        cond: &mir::Operand<'tcx>,
688        expected: bool,
689        msg: &mir::AssertMessage<'tcx>,
690        target: mir::BasicBlock,
691        unwind: mir::UnwindAction,
692        mergeable_succ: bool,
693    ) -> MergingSucc {
694        let span = terminator.source_info.span;
695        let cond = self.codegen_operand(bx, cond).immediate();
696        let mut const_cond = bx.const_to_opt_u128(cond, false).map(|c| c == 1);
697
698        // This case can currently arise only from functions marked
699        // with #[rustc_inherit_overflow_checks] and inlined from
700        // another crate (mostly core::num generic/#[inline] fns),
701        // while the current crate doesn't use overflow checks.
702        if !bx.sess().overflow_checks() && msg.is_optional_overflow_check() {
703            const_cond = Some(expected);
704        }
705
706        // Don't codegen the panic block if success if known.
707        if const_cond == Some(expected) {
708            return helper.funclet_br(self, bx, target, mergeable_succ);
709        }
710
711        // Because we're branching to a panic block (either a `#[cold]` one
712        // or an inlined abort), there's no need to `expect` it.
713
714        // Create the failure block and the conditional branch to it.
715        let lltarget = helper.llbb_with_cleanup(self, target);
716        let panic_block = bx.append_sibling_block("panic");
717        if expected {
718            bx.cond_br(cond, lltarget, panic_block);
719        } else {
720            bx.cond_br(cond, panic_block, lltarget);
721        }
722
723        // After this point, bx is the block for the call to panic.
724        bx.switch_to_block(panic_block);
725        self.set_debug_loc(bx, terminator.source_info);
726
727        // Get the location information.
728        let location = self.get_caller_location(bx, terminator.source_info).immediate();
729
730        // Put together the arguments to the panic entry point.
731        let (lang_item, args) = match msg {
732            AssertKind::BoundsCheck { len, index } => {
733                let len = self.codegen_operand(bx, len).immediate();
734                let index = self.codegen_operand(bx, index).immediate();
735                // It's `fn panic_bounds_check(index: usize, len: usize)`,
736                // and `#[track_caller]` adds an implicit third argument.
737                (LangItem::PanicBoundsCheck, vec![index, len, location])
738            }
739            AssertKind::MisalignedPointerDereference { required, found } => {
740                let required = self.codegen_operand(bx, required).immediate();
741                let found = self.codegen_operand(bx, found).immediate();
742                // It's `fn panic_misaligned_pointer_dereference(required: usize, found: usize)`,
743                // and `#[track_caller]` adds an implicit third argument.
744                (LangItem::PanicMisalignedPointerDereference, vec![required, found, location])
745            }
746            AssertKind::NullPointerDereference => {
747                // It's `fn panic_null_pointer_dereference()`,
748                // `#[track_caller]` adds an implicit argument.
749                (LangItem::PanicNullPointerDereference, vec![location])
750            }
751            AssertKind::InvalidEnumConstruction(source) => {
752                let source = self.codegen_operand(bx, source).immediate();
753                // It's `fn panic_invalid_enum_construction(source: u128)`,
754                // `#[track_caller]` adds an implicit argument.
755                (LangItem::PanicInvalidEnumConstruction, vec![source, location])
756            }
757            _ => {
758                // It's `pub fn panic_...()` and `#[track_caller]` adds an implicit argument.
759                (msg.panic_function(), vec![location])
760            }
761        };
762
763        let (fn_abi, llfn, instance) = common::build_langcall(bx, span, lang_item);
764
765        // Codegen the actual panic invoke/call.
766        let merging_succ = helper.do_call(
767            self,
768            bx,
769            fn_abi,
770            llfn,
771            &args,
772            None,
773            unwind,
774            &[],
775            Some(instance),
776            CallKind::Normal,
777            false,
778        );
779        assert_eq!(merging_succ, MergingSucc::False);
780        MergingSucc::False
781    }
782
783    fn codegen_terminate_terminator(
784        &mut self,
785        helper: TerminatorCodegenHelper<'tcx>,
786        bx: &mut Bx,
787        terminator: &mir::Terminator<'tcx>,
788        reason: UnwindTerminateReason,
789    ) {
790        let span = terminator.source_info.span;
791        self.set_debug_loc(bx, terminator.source_info);
792
793        // Obtain the panic entry point.
794        let (fn_abi, llfn, instance) = common::build_langcall(bx, span, reason.lang_item());
795
796        // Codegen the actual panic invoke/call.
797        let merging_succ = helper.do_call(
798            self,
799            bx,
800            fn_abi,
801            llfn,
802            &[],
803            None,
804            mir::UnwindAction::Unreachable,
805            &[],
806            Some(instance),
807            CallKind::Normal,
808            false,
809        );
810        assert_eq!(merging_succ, MergingSucc::False);
811    }
812
813    /// Returns `Some` if this is indeed a panic intrinsic and codegen is done.
814    fn codegen_panic_intrinsic(
815        &mut self,
816        helper: &TerminatorCodegenHelper<'tcx>,
817        bx: &mut Bx,
818        intrinsic: ty::IntrinsicDef,
819        instance: Instance<'tcx>,
820        source_info: mir::SourceInfo,
821        target: Option<mir::BasicBlock>,
822        unwind: mir::UnwindAction,
823        mergeable_succ: bool,
824    ) -> Option<MergingSucc> {
825        // Emit a panic or a no-op for `assert_*` intrinsics.
826        // These are intrinsics that compile to panics so that we can get a message
827        // which mentions the offending type, even from a const context.
828        let Some(requirement) = ValidityRequirement::from_intrinsic(intrinsic.name) else {
829            return None;
830        };
831
832        let ty = instance.args.type_at(0);
833
834        let is_valid = bx
835            .tcx()
836            .check_validity_requirement((requirement, bx.typing_env().as_query_input(ty)))
837            .expect("expect to have layout during codegen");
838
839        if is_valid {
840            // a NOP
841            let target = target.unwrap();
842            return Some(helper.funclet_br(self, bx, target, mergeable_succ));
843        }
844
845        let layout = bx.layout_of(ty);
846
847        let msg_str = with_no_visible_paths!({
848            with_no_trimmed_paths!({
849                if layout.is_uninhabited() {
850                    // Use this error even for the other intrinsics as it is more precise.
851                    format!("attempted to instantiate uninhabited type `{ty}`")
852                } else if requirement == ValidityRequirement::Zero {
853                    format!("attempted to zero-initialize type `{ty}`, which is invalid")
854                } else {
855                    format!("attempted to leave type `{ty}` uninitialized, which is invalid")
856                }
857            })
858        });
859        let msg = bx.const_str(&msg_str);
860
861        // Obtain the panic entry point.
862        let (fn_abi, llfn, instance) =
863            common::build_langcall(bx, source_info.span, LangItem::PanicNounwind);
864
865        // Codegen the actual panic invoke/call.
866        Some(helper.do_call(
867            self,
868            bx,
869            fn_abi,
870            llfn,
871            &[msg.0, msg.1],
872            target.as_ref().map(|bb| (ReturnDest::Nothing, *bb)),
873            unwind,
874            &[],
875            Some(instance),
876            CallKind::Normal,
877            mergeable_succ,
878        ))
879    }
880
881    fn codegen_call_terminator(
882        &mut self,
883        helper: TerminatorCodegenHelper<'tcx>,
884        bx: &mut Bx,
885        terminator: &mir::Terminator<'tcx>,
886        func: &mir::Operand<'tcx>,
887        args: &[Spanned<mir::Operand<'tcx>>],
888        destination: mir::Place<'tcx>,
889        target: Option<mir::BasicBlock>,
890        unwind: mir::UnwindAction,
891        fn_span: Span,
892        kind: CallKind,
893        mergeable_succ: bool,
894    ) -> MergingSucc {
895        let source_info = mir::SourceInfo { span: fn_span, ..terminator.source_info };
896
897        // Create the callee. This is a fn ptr or zero-sized and hence a kind of scalar.
898        let callee = self.codegen_operand(bx, func);
899
900        let (instance, mut llfn) = match *callee.layout.ty.kind() {
901            ty::FnDef(def_id, generic_args) => {
902                let instance = ty::Instance::expect_resolve(
903                    bx.tcx(),
904                    bx.typing_env(),
905                    def_id,
906                    generic_args,
907                    fn_span,
908                );
909
910                match instance.def {
911                    // We don't need AsyncDropGlueCtorShim here because it is not `noop func`,
912                    // it is `func returning noop future`
913                    ty::InstanceKind::DropGlue(_, None) => {
914                        // Empty drop glue; a no-op.
915                        let target = target.unwrap();
916                        return helper.funclet_br(self, bx, target, mergeable_succ);
917                    }
918                    ty::InstanceKind::Intrinsic(def_id) => {
919                        let intrinsic = bx.tcx().intrinsic(def_id).unwrap();
920                        if let Some(merging_succ) = self.codegen_panic_intrinsic(
921                            &helper,
922                            bx,
923                            intrinsic,
924                            instance,
925                            source_info,
926                            target,
927                            unwind,
928                            mergeable_succ,
929                        ) {
930                            return merging_succ;
931                        }
932
933                        let result_layout =
934                            self.cx.layout_of(self.monomorphized_place_ty(destination.as_ref()));
935
936                        let (result, store_in_local) = if result_layout.is_zst() {
937                            (
938                                PlaceRef::new_sized(bx.const_undef(bx.type_ptr()), result_layout),
939                                None,
940                            )
941                        } else if let Some(local) = destination.as_local() {
942                            match self.locals[local] {
943                                LocalRef::Place(dest) => (dest, None),
944                                LocalRef::UnsizedPlace(_) => bug!("return type must be sized"),
945                                LocalRef::PendingOperand => {
946                                    // Currently, intrinsics always need a location to store
947                                    // the result, so we create a temporary `alloca` for the
948                                    // result.
949                                    let tmp = PlaceRef::alloca(bx, result_layout);
950                                    tmp.storage_live(bx);
951                                    (tmp, Some(local))
952                                }
953                                LocalRef::Operand(_) => {
954                                    bug!("place local already assigned to");
955                                }
956                            }
957                        } else {
958                            (self.codegen_place(bx, destination.as_ref()), None)
959                        };
960
961                        if result.val.align < result.layout.align.abi {
962                            // Currently, MIR code generation does not create calls
963                            // that store directly to fields of packed structs (in
964                            // fact, the calls it creates write only to temps).
965                            //
966                            // If someone changes that, please update this code path
967                            // to create a temporary.
968                            span_bug!(self.mir.span, "can't directly store to unaligned value");
969                        }
970
971                        let args: Vec<_> =
972                            args.iter().map(|arg| self.codegen_operand(bx, &arg.node)).collect();
973
974                        match self.codegen_intrinsic_call(bx, instance, &args, result, source_info)
975                        {
976                            Ok(()) => {
977                                if let Some(local) = store_in_local {
978                                    let op = bx.load_operand(result);
979                                    result.storage_dead(bx);
980                                    self.overwrite_local(local, LocalRef::Operand(op));
981                                    self.debug_introduce_local(bx, local);
982                                }
983
984                                return if let Some(target) = target {
985                                    helper.funclet_br(self, bx, target, mergeable_succ)
986                                } else {
987                                    bx.unreachable();
988                                    MergingSucc::False
989                                };
990                            }
991                            Err(instance) => {
992                                if intrinsic.must_be_overridden {
993                                    span_bug!(
994                                        fn_span,
995                                        "intrinsic {} must be overridden by codegen backend, but isn't",
996                                        intrinsic.name,
997                                    );
998                                }
999                                (Some(instance), None)
1000                            }
1001                        }
1002                    }
1003
1004                    _ if kind == CallKind::Tail
1005                        && instance.def.requires_caller_location(bx.tcx()) =>
1006                    {
1007                        if let Some(hir_id) =
1008                            terminator.source_info.scope.lint_root(&self.mir.source_scopes)
1009                        {
1010                            let msg = "tail calling a function marked with `#[track_caller]` has no special effect";
1011                            bx.tcx().node_lint(TAIL_CALL_TRACK_CALLER, hir_id, |d| {
1012                                _ = d.primary_message(msg).span(fn_span)
1013                            });
1014                        }
1015
1016                        let instance = ty::Instance::resolve_for_fn_ptr(
1017                            bx.tcx(),
1018                            bx.typing_env(),
1019                            def_id,
1020                            generic_args,
1021                        )
1022                        .unwrap();
1023
1024                        (None, Some(bx.get_fn_addr(instance)))
1025                    }
1026                    _ => (Some(instance), None),
1027                }
1028            }
1029            ty::FnPtr(..) => (None, Some(callee.immediate())),
1030            _ => bug!("{} is not callable", callee.layout.ty),
1031        };
1032
1033        // FIXME(eddyb) avoid computing this if possible, when `instance` is
1034        // available - right now `sig` is only needed for getting the `abi`
1035        // and figuring out how many extra args were passed to a C-variadic `fn`.
1036        let sig = callee.layout.ty.fn_sig(bx.tcx());
1037
1038        let extra_args = &args[sig.inputs().skip_binder().len()..];
1039        let extra_args = bx.tcx().mk_type_list_from_iter(extra_args.iter().map(|op_arg| {
1040            let op_ty = op_arg.node.ty(self.mir, bx.tcx());
1041            self.monomorphize(op_ty)
1042        }));
1043
1044        let fn_abi = match instance {
1045            Some(instance) => bx.fn_abi_of_instance(instance, extra_args),
1046            None => bx.fn_abi_of_fn_ptr(sig, extra_args),
1047        };
1048
1049        // The arguments we'll be passing. Plus one to account for outptr, if used.
1050        let arg_count = fn_abi.args.len() + fn_abi.ret.is_indirect() as usize;
1051
1052        let mut llargs = Vec::with_capacity(arg_count);
1053
1054        // We still need to call `make_return_dest` even if there's no `target`, since
1055        // `fn_abi.ret` could be `PassMode::Indirect`, even if it is uninhabited,
1056        // and `make_return_dest` adds the return-place indirect pointer to `llargs`.
1057        let destination = match kind {
1058            CallKind::Normal => {
1059                let return_dest = self.make_return_dest(bx, destination, &fn_abi.ret, &mut llargs);
1060                target.map(|target| (return_dest, target))
1061            }
1062            CallKind::Tail => None,
1063        };
1064
1065        // Split the rust-call tupled arguments off.
1066        let (first_args, untuple) = if sig.abi() == ExternAbi::RustCall
1067            && let Some((tup, args)) = args.split_last()
1068        {
1069            (args, Some(tup))
1070        } else {
1071            (args, None)
1072        };
1073
1074        // When generating arguments we sometimes introduce temporary allocations with lifetime
1075        // that extend for the duration of a call. Keep track of those allocations and their sizes
1076        // to generate `lifetime_end` when the call returns.
1077        let mut lifetime_ends_after_call: Vec<(Bx::Value, Size)> = Vec::new();
1078        'make_args: for (i, arg) in first_args.iter().enumerate() {
1079            if kind == CallKind::Tail && matches!(fn_abi.args[i].mode, PassMode::Indirect { .. }) {
1080                // FIXME: https://github.com/rust-lang/rust/pull/144232#discussion_r2218543841
1081                span_bug!(
1082                    fn_span,
1083                    "arguments using PassMode::Indirect are currently not supported for tail calls"
1084                );
1085            }
1086
1087            let mut op = self.codegen_operand(bx, &arg.node);
1088
1089            if let (0, Some(ty::InstanceKind::Virtual(_, idx))) = (i, instance.map(|i| i.def)) {
1090                match op.val {
1091                    Pair(data_ptr, meta) => {
1092                        // In the case of Rc<Self>, we need to explicitly pass a
1093                        // *mut RcInner<Self> with a Scalar (not ScalarPair) ABI. This is a hack
1094                        // that is understood elsewhere in the compiler as a method on
1095                        // `dyn Trait`.
1096                        // To get a `*mut RcInner<Self>`, we just keep unwrapping newtypes until
1097                        // we get a value of a built-in pointer type.
1098                        //
1099                        // This is also relevant for `Pin<&mut Self>`, where we need to peel the
1100                        // `Pin`.
1101                        while !op.layout.ty.is_raw_ptr() && !op.layout.ty.is_ref() {
1102                            let (idx, _) = op.layout.non_1zst_field(bx).expect(
1103                                "not exactly one non-1-ZST field in a `DispatchFromDyn` type",
1104                            );
1105                            op = op.extract_field(self, bx, idx.as_usize());
1106                        }
1107
1108                        // Now that we have `*dyn Trait` or `&dyn Trait`, split it up into its
1109                        // data pointer and vtable. Look up the method in the vtable, and pass
1110                        // the data pointer as the first argument.
1111                        llfn = Some(meth::VirtualIndex::from_index(idx).get_fn(
1112                            bx,
1113                            meta,
1114                            op.layout.ty,
1115                            fn_abi,
1116                        ));
1117                        llargs.push(data_ptr);
1118                        continue 'make_args;
1119                    }
1120                    Ref(PlaceValue { llval: data_ptr, llextra: Some(meta), .. }) => {
1121                        // by-value dynamic dispatch
1122                        llfn = Some(meth::VirtualIndex::from_index(idx).get_fn(
1123                            bx,
1124                            meta,
1125                            op.layout.ty,
1126                            fn_abi,
1127                        ));
1128                        llargs.push(data_ptr);
1129                        continue;
1130                    }
1131                    _ => {
1132                        span_bug!(fn_span, "can't codegen a virtual call on {:#?}", op);
1133                    }
1134                }
1135            }
1136
1137            // The callee needs to own the argument memory if we pass it
1138            // by-ref, so make a local copy of non-immediate constants.
1139            match (&arg.node, op.val) {
1140                (&mir::Operand::Copy(_), Ref(PlaceValue { llextra: None, .. }))
1141                | (&mir::Operand::Constant(_), Ref(PlaceValue { llextra: None, .. })) => {
1142                    let tmp = PlaceRef::alloca(bx, op.layout);
1143                    bx.lifetime_start(tmp.val.llval, tmp.layout.size);
1144                    op.val.store(bx, tmp);
1145                    op.val = Ref(tmp.val);
1146                    lifetime_ends_after_call.push((tmp.val.llval, tmp.layout.size));
1147                }
1148                _ => {}
1149            }
1150
1151            self.codegen_argument(
1152                bx,
1153                op,
1154                &mut llargs,
1155                &fn_abi.args[i],
1156                &mut lifetime_ends_after_call,
1157            );
1158        }
1159        let num_untupled = untuple.map(|tup| {
1160            self.codegen_arguments_untupled(
1161                bx,
1162                &tup.node,
1163                &mut llargs,
1164                &fn_abi.args[first_args.len()..],
1165                &mut lifetime_ends_after_call,
1166            )
1167        });
1168
1169        let needs_location =
1170            instance.is_some_and(|i| i.def.requires_caller_location(self.cx.tcx()));
1171        if needs_location {
1172            let mir_args = if let Some(num_untupled) = num_untupled {
1173                first_args.len() + num_untupled
1174            } else {
1175                args.len()
1176            };
1177            assert_eq!(
1178                fn_abi.args.len(),
1179                mir_args + 1,
1180                "#[track_caller] fn's must have 1 more argument in their ABI than in their MIR: {instance:?} {fn_span:?} {fn_abi:?}",
1181            );
1182            let location = self.get_caller_location(bx, source_info);
1183            debug!(
1184                "codegen_call_terminator({:?}): location={:?} (fn_span {:?})",
1185                terminator, location, fn_span
1186            );
1187
1188            let last_arg = fn_abi.args.last().unwrap();
1189            self.codegen_argument(
1190                bx,
1191                location,
1192                &mut llargs,
1193                last_arg,
1194                &mut lifetime_ends_after_call,
1195            );
1196        }
1197
1198        let fn_ptr = match (instance, llfn) {
1199            (Some(instance), None) => bx.get_fn_addr(instance),
1200            (_, Some(llfn)) => llfn,
1201            _ => span_bug!(fn_span, "no instance or llfn for call"),
1202        };
1203        self.set_debug_loc(bx, source_info);
1204        helper.do_call(
1205            self,
1206            bx,
1207            fn_abi,
1208            fn_ptr,
1209            &llargs,
1210            destination,
1211            unwind,
1212            &lifetime_ends_after_call,
1213            instance,
1214            kind,
1215            mergeable_succ,
1216        )
1217    }
1218
1219    fn codegen_asm_terminator(
1220        &mut self,
1221        helper: TerminatorCodegenHelper<'tcx>,
1222        bx: &mut Bx,
1223        asm_macro: InlineAsmMacro,
1224        terminator: &mir::Terminator<'tcx>,
1225        template: &[ast::InlineAsmTemplatePiece],
1226        operands: &[mir::InlineAsmOperand<'tcx>],
1227        options: ast::InlineAsmOptions,
1228        line_spans: &[Span],
1229        targets: &[mir::BasicBlock],
1230        unwind: mir::UnwindAction,
1231        instance: Instance<'_>,
1232        mergeable_succ: bool,
1233    ) -> MergingSucc {
1234        let span = terminator.source_info.span;
1235
1236        let operands: Vec<_> = operands
1237            .iter()
1238            .map(|op| match *op {
1239                mir::InlineAsmOperand::In { reg, ref value } => {
1240                    let value = self.codegen_operand(bx, value);
1241                    InlineAsmOperandRef::In { reg, value }
1242                }
1243                mir::InlineAsmOperand::Out { reg, late, ref place } => {
1244                    let place = place.map(|place| self.codegen_place(bx, place.as_ref()));
1245                    InlineAsmOperandRef::Out { reg, late, place }
1246                }
1247                mir::InlineAsmOperand::InOut { reg, late, ref in_value, ref out_place } => {
1248                    let in_value = self.codegen_operand(bx, in_value);
1249                    let out_place =
1250                        out_place.map(|out_place| self.codegen_place(bx, out_place.as_ref()));
1251                    InlineAsmOperandRef::InOut { reg, late, in_value, out_place }
1252                }
1253                mir::InlineAsmOperand::Const { ref value } => {
1254                    let const_value = self.eval_mir_constant(value);
1255                    let string = common::asm_const_to_str(
1256                        bx.tcx(),
1257                        span,
1258                        const_value,
1259                        bx.layout_of(value.ty()),
1260                    );
1261                    InlineAsmOperandRef::Const { string }
1262                }
1263                mir::InlineAsmOperand::SymFn { ref value } => {
1264                    let const_ = self.monomorphize(value.const_);
1265                    if let ty::FnDef(def_id, args) = *const_.ty().kind() {
1266                        let instance = ty::Instance::resolve_for_fn_ptr(
1267                            bx.tcx(),
1268                            bx.typing_env(),
1269                            def_id,
1270                            args,
1271                        )
1272                        .unwrap();
1273                        InlineAsmOperandRef::SymFn { instance }
1274                    } else {
1275                        span_bug!(span, "invalid type for asm sym (fn)");
1276                    }
1277                }
1278                mir::InlineAsmOperand::SymStatic { def_id } => {
1279                    InlineAsmOperandRef::SymStatic { def_id }
1280                }
1281                mir::InlineAsmOperand::Label { target_index } => {
1282                    InlineAsmOperandRef::Label { label: self.llbb(targets[target_index]) }
1283                }
1284            })
1285            .collect();
1286
1287        helper.do_inlineasm(
1288            self,
1289            bx,
1290            template,
1291            &operands,
1292            options,
1293            line_spans,
1294            if asm_macro.diverges(options) { None } else { targets.get(0).copied() },
1295            unwind,
1296            instance,
1297            mergeable_succ,
1298        )
1299    }
1300
1301    pub(crate) fn codegen_block(&mut self, mut bb: mir::BasicBlock) {
1302        let llbb = match self.try_llbb(bb) {
1303            Some(llbb) => llbb,
1304            None => return,
1305        };
1306        let bx = &mut Bx::build(self.cx, llbb);
1307        let mir = self.mir;
1308
1309        // MIR basic blocks stop at any function call. This may not be the case
1310        // for the backend's basic blocks, in which case we might be able to
1311        // combine multiple MIR basic blocks into a single backend basic block.
1312        loop {
1313            let data = &mir[bb];
1314
1315            debug!("codegen_block({:?}={:?})", bb, data);
1316
1317            for statement in &data.statements {
1318                self.codegen_statement(bx, statement);
1319            }
1320
1321            let merging_succ = self.codegen_terminator(bx, bb, data.terminator());
1322            if let MergingSucc::False = merging_succ {
1323                break;
1324            }
1325
1326            // We are merging the successor into the produced backend basic
1327            // block. Record that the successor should be skipped when it is
1328            // reached.
1329            //
1330            // Note: we must not have already generated code for the successor.
1331            // This is implicitly ensured by the reverse postorder traversal,
1332            // and the assertion explicitly guarantees that.
1333            let mut successors = data.terminator().successors();
1334            let succ = successors.next().unwrap();
1335            assert!(matches!(self.cached_llbbs[succ], CachedLlbb::None));
1336            self.cached_llbbs[succ] = CachedLlbb::Skip;
1337            bb = succ;
1338        }
1339    }
1340
1341    pub(crate) fn codegen_block_as_unreachable(&mut self, bb: mir::BasicBlock) {
1342        let llbb = match self.try_llbb(bb) {
1343            Some(llbb) => llbb,
1344            None => return,
1345        };
1346        let bx = &mut Bx::build(self.cx, llbb);
1347        debug!("codegen_block_as_unreachable({:?})", bb);
1348        bx.unreachable();
1349    }
1350
1351    fn codegen_terminator(
1352        &mut self,
1353        bx: &mut Bx,
1354        bb: mir::BasicBlock,
1355        terminator: &'tcx mir::Terminator<'tcx>,
1356    ) -> MergingSucc {
1357        debug!("codegen_terminator: {:?}", terminator);
1358
1359        let helper = TerminatorCodegenHelper { bb, terminator };
1360
1361        let mergeable_succ = || {
1362            // Note: any call to `switch_to_block` will invalidate a `true` value
1363            // of `mergeable_succ`.
1364            let mut successors = terminator.successors();
1365            if let Some(succ) = successors.next()
1366                && successors.next().is_none()
1367                && let &[succ_pred] = self.mir.basic_blocks.predecessors()[succ].as_slice()
1368            {
1369                // bb has a single successor, and bb is its only predecessor. This
1370                // makes it a candidate for merging.
1371                assert_eq!(succ_pred, bb);
1372                true
1373            } else {
1374                false
1375            }
1376        };
1377
1378        self.set_debug_loc(bx, terminator.source_info);
1379        match terminator.kind {
1380            mir::TerminatorKind::UnwindResume => {
1381                self.codegen_resume_terminator(helper, bx);
1382                MergingSucc::False
1383            }
1384
1385            mir::TerminatorKind::UnwindTerminate(reason) => {
1386                self.codegen_terminate_terminator(helper, bx, terminator, reason);
1387                MergingSucc::False
1388            }
1389
1390            mir::TerminatorKind::Goto { target } => {
1391                helper.funclet_br(self, bx, target, mergeable_succ())
1392            }
1393
1394            mir::TerminatorKind::SwitchInt { ref discr, ref targets } => {
1395                self.codegen_switchint_terminator(helper, bx, discr, targets);
1396                MergingSucc::False
1397            }
1398
1399            mir::TerminatorKind::Return => {
1400                self.codegen_return_terminator(bx);
1401                MergingSucc::False
1402            }
1403
1404            mir::TerminatorKind::Unreachable => {
1405                bx.unreachable();
1406                MergingSucc::False
1407            }
1408
1409            mir::TerminatorKind::Drop { place, target, unwind, replace: _, drop, async_fut } => {
1410                assert!(
1411                    async_fut.is_none() && drop.is_none(),
1412                    "Async Drop must be expanded or reset to sync before codegen"
1413                );
1414                self.codegen_drop_terminator(
1415                    helper,
1416                    bx,
1417                    &terminator.source_info,
1418                    place,
1419                    target,
1420                    unwind,
1421                    mergeable_succ(),
1422                )
1423            }
1424
1425            mir::TerminatorKind::Assert { ref cond, expected, ref msg, target, unwind } => self
1426                .codegen_assert_terminator(
1427                    helper,
1428                    bx,
1429                    terminator,
1430                    cond,
1431                    expected,
1432                    msg,
1433                    target,
1434                    unwind,
1435                    mergeable_succ(),
1436                ),
1437
1438            mir::TerminatorKind::Call {
1439                ref func,
1440                ref args,
1441                destination,
1442                target,
1443                unwind,
1444                call_source: _,
1445                fn_span,
1446            } => self.codegen_call_terminator(
1447                helper,
1448                bx,
1449                terminator,
1450                func,
1451                args,
1452                destination,
1453                target,
1454                unwind,
1455                fn_span,
1456                CallKind::Normal,
1457                mergeable_succ(),
1458            ),
1459            mir::TerminatorKind::TailCall { ref func, ref args, fn_span } => self
1460                .codegen_call_terminator(
1461                    helper,
1462                    bx,
1463                    terminator,
1464                    func,
1465                    args,
1466                    mir::Place::from(mir::RETURN_PLACE),
1467                    None,
1468                    mir::UnwindAction::Unreachable,
1469                    fn_span,
1470                    CallKind::Tail,
1471                    mergeable_succ(),
1472                ),
1473            mir::TerminatorKind::CoroutineDrop | mir::TerminatorKind::Yield { .. } => {
1474                bug!("coroutine ops in codegen")
1475            }
1476            mir::TerminatorKind::FalseEdge { .. } | mir::TerminatorKind::FalseUnwind { .. } => {
1477                bug!("borrowck false edges in codegen")
1478            }
1479
1480            mir::TerminatorKind::InlineAsm {
1481                asm_macro,
1482                template,
1483                ref operands,
1484                options,
1485                line_spans,
1486                ref targets,
1487                unwind,
1488            } => self.codegen_asm_terminator(
1489                helper,
1490                bx,
1491                asm_macro,
1492                terminator,
1493                template,
1494                operands,
1495                options,
1496                line_spans,
1497                targets,
1498                unwind,
1499                self.instance,
1500                mergeable_succ(),
1501            ),
1502        }
1503    }
1504
1505    fn codegen_argument(
1506        &mut self,
1507        bx: &mut Bx,
1508        op: OperandRef<'tcx, Bx::Value>,
1509        llargs: &mut Vec<Bx::Value>,
1510        arg: &ArgAbi<'tcx, Ty<'tcx>>,
1511        lifetime_ends_after_call: &mut Vec<(Bx::Value, Size)>,
1512    ) {
1513        match arg.mode {
1514            PassMode::Ignore => return,
1515            PassMode::Cast { pad_i32: true, .. } => {
1516                // Fill padding with undef value, where applicable.
1517                llargs.push(bx.const_undef(bx.reg_backend_type(&Reg::i32())));
1518            }
1519            PassMode::Pair(..) => match op.val {
1520                Pair(a, b) => {
1521                    llargs.push(a);
1522                    llargs.push(b);
1523                    return;
1524                }
1525                _ => bug!("codegen_argument: {:?} invalid for pair argument", op),
1526            },
1527            PassMode::Indirect { attrs: _, meta_attrs: Some(_), on_stack: _ } => match op.val {
1528                Ref(PlaceValue { llval: a, llextra: Some(b), .. }) => {
1529                    llargs.push(a);
1530                    llargs.push(b);
1531                    return;
1532                }
1533                _ => bug!("codegen_argument: {:?} invalid for unsized indirect argument", op),
1534            },
1535            _ => {}
1536        }
1537
1538        // Force by-ref if we have to load through a cast pointer.
1539        let (mut llval, align, by_ref) = match op.val {
1540            Immediate(_) | Pair(..) => match arg.mode {
1541                PassMode::Indirect { attrs, .. } => {
1542                    // Indirect argument may have higher alignment requirements than the type's
1543                    // alignment. This can happen, e.g. when passing types with <4 byte alignment
1544                    // on the stack on x86.
1545                    let required_align = match attrs.pointee_align {
1546                        Some(pointee_align) => cmp::max(pointee_align, arg.layout.align.abi),
1547                        None => arg.layout.align.abi,
1548                    };
1549                    let scratch = PlaceValue::alloca(bx, arg.layout.size, required_align);
1550                    bx.lifetime_start(scratch.llval, arg.layout.size);
1551                    op.val.store(bx, scratch.with_type(arg.layout));
1552                    lifetime_ends_after_call.push((scratch.llval, arg.layout.size));
1553                    (scratch.llval, scratch.align, true)
1554                }
1555                PassMode::Cast { .. } => {
1556                    let scratch = PlaceRef::alloca(bx, arg.layout);
1557                    op.val.store(bx, scratch);
1558                    (scratch.val.llval, scratch.val.align, true)
1559                }
1560                _ => (op.immediate_or_packed_pair(bx), arg.layout.align.abi, false),
1561            },
1562            Ref(op_place_val) => match arg.mode {
1563                PassMode::Indirect { attrs, .. } => {
1564                    let required_align = match attrs.pointee_align {
1565                        Some(pointee_align) => cmp::max(pointee_align, arg.layout.align.abi),
1566                        None => arg.layout.align.abi,
1567                    };
1568                    if op_place_val.align < required_align {
1569                        // For `foo(packed.large_field)`, and types with <4 byte alignment on x86,
1570                        // alignment requirements may be higher than the type's alignment, so copy
1571                        // to a higher-aligned alloca.
1572                        let scratch = PlaceValue::alloca(bx, arg.layout.size, required_align);
1573                        bx.lifetime_start(scratch.llval, arg.layout.size);
1574                        bx.typed_place_copy(scratch, op_place_val, op.layout);
1575                        lifetime_ends_after_call.push((scratch.llval, arg.layout.size));
1576                        (scratch.llval, scratch.align, true)
1577                    } else {
1578                        (op_place_val.llval, op_place_val.align, true)
1579                    }
1580                }
1581                _ => (op_place_val.llval, op_place_val.align, true),
1582            },
1583            ZeroSized => match arg.mode {
1584                PassMode::Indirect { on_stack, .. } => {
1585                    if on_stack {
1586                        // It doesn't seem like any target can have `byval` ZSTs, so this assert
1587                        // is here to replace a would-be untested codepath.
1588                        bug!("ZST {op:?} passed on stack with abi {arg:?}");
1589                    }
1590                    // Though `extern "Rust"` doesn't pass ZSTs, some ABIs pass
1591                    // a pointer for `repr(C)` structs even when empty, so get
1592                    // one from an `alloca` (which can be left uninitialized).
1593                    let scratch = PlaceRef::alloca(bx, arg.layout);
1594                    (scratch.val.llval, scratch.val.align, true)
1595                }
1596                _ => bug!("ZST {op:?} wasn't ignored, but was passed with abi {arg:?}"),
1597            },
1598        };
1599
1600        if by_ref && !arg.is_indirect() {
1601            // Have to load the argument, maybe while casting it.
1602            if let PassMode::Cast { cast, pad_i32: _ } = &arg.mode {
1603                // The ABI mandates that the value is passed as a different struct representation.
1604                // Spill and reload it from the stack to convert from the Rust representation to
1605                // the ABI representation.
1606                let scratch_size = cast.size(bx);
1607                let scratch_align = cast.align(bx);
1608                // Note that the ABI type may be either larger or smaller than the Rust type,
1609                // due to the presence or absence of trailing padding. For example:
1610                // - On some ABIs, the Rust layout { f64, f32, <f32 padding> } may omit padding
1611                //   when passed by value, making it smaller.
1612                // - On some ABIs, the Rust layout { u16, u16, u16 } may be padded up to 8 bytes
1613                //   when passed by value, making it larger.
1614                let copy_bytes = cmp::min(cast.unaligned_size(bx).bytes(), arg.layout.size.bytes());
1615                // Allocate some scratch space...
1616                let llscratch = bx.alloca(scratch_size, scratch_align);
1617                bx.lifetime_start(llscratch, scratch_size);
1618                // ...memcpy the value...
1619                bx.memcpy(
1620                    llscratch,
1621                    scratch_align,
1622                    llval,
1623                    align,
1624                    bx.const_usize(copy_bytes),
1625                    MemFlags::empty(),
1626                );
1627                // ...and then load it with the ABI type.
1628                llval = load_cast(bx, cast, llscratch, scratch_align);
1629                bx.lifetime_end(llscratch, scratch_size);
1630            } else {
1631                // We can't use `PlaceRef::load` here because the argument
1632                // may have a type we don't treat as immediate, but the ABI
1633                // used for this call is passing it by-value. In that case,
1634                // the load would just produce `OperandValue::Ref` instead
1635                // of the `OperandValue::Immediate` we need for the call.
1636                llval = bx.load(bx.backend_type(arg.layout), llval, align);
1637                if let BackendRepr::Scalar(scalar) = arg.layout.backend_repr {
1638                    if scalar.is_bool() {
1639                        bx.range_metadata(llval, WrappingRange { start: 0, end: 1 });
1640                    }
1641                    // We store bools as `i8` so we need to truncate to `i1`.
1642                    llval = bx.to_immediate_scalar(llval, scalar);
1643                }
1644            }
1645        }
1646
1647        llargs.push(llval);
1648    }
1649
1650    fn codegen_arguments_untupled(
1651        &mut self,
1652        bx: &mut Bx,
1653        operand: &mir::Operand<'tcx>,
1654        llargs: &mut Vec<Bx::Value>,
1655        args: &[ArgAbi<'tcx, Ty<'tcx>>],
1656        lifetime_ends_after_call: &mut Vec<(Bx::Value, Size)>,
1657    ) -> usize {
1658        let tuple = self.codegen_operand(bx, operand);
1659
1660        // Handle both by-ref and immediate tuples.
1661        if let Ref(place_val) = tuple.val {
1662            if place_val.llextra.is_some() {
1663                bug!("closure arguments must be sized");
1664            }
1665            let tuple_ptr = place_val.with_type(tuple.layout);
1666            for i in 0..tuple.layout.fields.count() {
1667                let field_ptr = tuple_ptr.project_field(bx, i);
1668                let field = bx.load_operand(field_ptr);
1669                self.codegen_argument(bx, field, llargs, &args[i], lifetime_ends_after_call);
1670            }
1671        } else {
1672            // If the tuple is immediate, the elements are as well.
1673            for i in 0..tuple.layout.fields.count() {
1674                let op = tuple.extract_field(self, bx, i);
1675                self.codegen_argument(bx, op, llargs, &args[i], lifetime_ends_after_call);
1676            }
1677        }
1678        tuple.layout.fields.count()
1679    }
1680
1681    pub(super) fn get_caller_location(
1682        &mut self,
1683        bx: &mut Bx,
1684        source_info: mir::SourceInfo,
1685    ) -> OperandRef<'tcx, Bx::Value> {
1686        self.mir.caller_location_span(source_info, self.caller_location, bx.tcx(), |span: Span| {
1687            let const_loc = bx.tcx().span_as_caller_location(span);
1688            OperandRef::from_const(bx, const_loc, bx.tcx().caller_location_ty())
1689        })
1690    }
1691
1692    fn get_personality_slot(&mut self, bx: &mut Bx) -> PlaceRef<'tcx, Bx::Value> {
1693        let cx = bx.cx();
1694        if let Some(slot) = self.personality_slot {
1695            slot
1696        } else {
1697            let layout = cx.layout_of(Ty::new_tup(
1698                cx.tcx(),
1699                &[Ty::new_mut_ptr(cx.tcx(), cx.tcx().types.u8), cx.tcx().types.i32],
1700            ));
1701            let slot = PlaceRef::alloca(bx, layout);
1702            self.personality_slot = Some(slot);
1703            slot
1704        }
1705    }
1706
1707    /// Returns the landing/cleanup pad wrapper around the given basic block.
1708    // FIXME(eddyb) rename this to `eh_pad_for`.
1709    fn landing_pad_for(&mut self, bb: mir::BasicBlock) -> Bx::BasicBlock {
1710        if let Some(landing_pad) = self.landing_pads[bb] {
1711            return landing_pad;
1712        }
1713
1714        let landing_pad = self.landing_pad_for_uncached(bb);
1715        self.landing_pads[bb] = Some(landing_pad);
1716        landing_pad
1717    }
1718
1719    // FIXME(eddyb) rename this to `eh_pad_for_uncached`.
1720    fn landing_pad_for_uncached(&mut self, bb: mir::BasicBlock) -> Bx::BasicBlock {
1721        let llbb = self.llbb(bb);
1722        if base::wants_new_eh_instructions(self.cx.sess()) {
1723            let cleanup_bb = Bx::append_block(self.cx, self.llfn, &format!("funclet_{bb:?}"));
1724            let mut cleanup_bx = Bx::build(self.cx, cleanup_bb);
1725            let funclet = cleanup_bx.cleanup_pad(None, &[]);
1726            cleanup_bx.br(llbb);
1727            self.funclets[bb] = Some(funclet);
1728            cleanup_bb
1729        } else {
1730            let cleanup_llbb = Bx::append_block(self.cx, self.llfn, "cleanup");
1731            let mut cleanup_bx = Bx::build(self.cx, cleanup_llbb);
1732
1733            let llpersonality = self.cx.eh_personality();
1734            let (exn0, exn1) = cleanup_bx.cleanup_landing_pad(llpersonality);
1735
1736            let slot = self.get_personality_slot(&mut cleanup_bx);
1737            slot.storage_live(&mut cleanup_bx);
1738            Pair(exn0, exn1).store(&mut cleanup_bx, slot);
1739
1740            cleanup_bx.br(llbb);
1741            cleanup_llbb
1742        }
1743    }
1744
1745    fn unreachable_block(&mut self) -> Bx::BasicBlock {
1746        self.unreachable_block.unwrap_or_else(|| {
1747            let llbb = Bx::append_block(self.cx, self.llfn, "unreachable");
1748            let mut bx = Bx::build(self.cx, llbb);
1749            bx.unreachable();
1750            self.unreachable_block = Some(llbb);
1751            llbb
1752        })
1753    }
1754
1755    fn terminate_block(&mut self, reason: UnwindTerminateReason) -> Bx::BasicBlock {
1756        if let Some((cached_bb, cached_reason)) = self.terminate_block
1757            && reason == cached_reason
1758        {
1759            return cached_bb;
1760        }
1761
1762        let funclet;
1763        let llbb;
1764        let mut bx;
1765        if base::wants_new_eh_instructions(self.cx.sess()) {
1766            // This is a basic block that we're aborting the program for,
1767            // notably in an `extern` function. These basic blocks are inserted
1768            // so that we assert that `extern` functions do indeed not panic,
1769            // and if they do we abort the process.
1770            //
1771            // On MSVC these are tricky though (where we're doing funclets). If
1772            // we were to do a cleanuppad (like below) the normal functions like
1773            // `longjmp` would trigger the abort logic, terminating the
1774            // program. Instead we insert the equivalent of `catch(...)` for C++
1775            // which magically doesn't trigger when `longjmp` files over this
1776            // frame.
1777            //
1778            // Lots more discussion can be found on #48251 but this codegen is
1779            // modeled after clang's for:
1780            //
1781            //      try {
1782            //          foo();
1783            //      } catch (...) {
1784            //          bar();
1785            //      }
1786            //
1787            // which creates an IR snippet like
1788            //
1789            //      cs_terminate:
1790            //         %cs = catchswitch within none [%cp_terminate] unwind to caller
1791            //      cp_terminate:
1792            //         %cp = catchpad within %cs [null, i32 64, null]
1793            //         ...
1794
1795            llbb = Bx::append_block(self.cx, self.llfn, "cs_terminate");
1796            let cp_llbb = Bx::append_block(self.cx, self.llfn, "cp_terminate");
1797
1798            let mut cs_bx = Bx::build(self.cx, llbb);
1799            let cs = cs_bx.catch_switch(None, None, &[cp_llbb]);
1800
1801            bx = Bx::build(self.cx, cp_llbb);
1802            let null =
1803                bx.const_null(bx.type_ptr_ext(bx.cx().data_layout().instruction_address_space));
1804
1805            // The `null` in first argument here is actually a RTTI type
1806            // descriptor for the C++ personality function, but `catch (...)`
1807            // has no type so it's null.
1808            let args = if base::wants_msvc_seh(self.cx.sess()) {
1809                // This bitmask is a single `HT_IsStdDotDot` flag, which
1810                // represents that this is a C++-style `catch (...)` block that
1811                // only captures programmatic exceptions, not all SEH
1812                // exceptions. The second `null` points to a non-existent
1813                // `alloca` instruction, which an LLVM pass would inline into
1814                // the initial SEH frame allocation.
1815                let adjectives = bx.const_i32(0x40);
1816                &[null, adjectives, null] as &[_]
1817            } else {
1818                // Specifying more arguments than necessary usually doesn't
1819                // hurt, but the `WasmEHPrepare` LLVM pass does not recognize
1820                // anything other than a single `null` as a `catch (...)` block,
1821                // leading to problems down the line during instruction
1822                // selection.
1823                &[null] as &[_]
1824            };
1825
1826            funclet = Some(bx.catch_pad(cs, args));
1827        } else {
1828            llbb = Bx::append_block(self.cx, self.llfn, "terminate");
1829            bx = Bx::build(self.cx, llbb);
1830
1831            let llpersonality = self.cx.eh_personality();
1832            bx.filter_landing_pad(llpersonality);
1833
1834            funclet = None;
1835        }
1836
1837        self.set_debug_loc(&mut bx, mir::SourceInfo::outermost(self.mir.span));
1838
1839        let (fn_abi, fn_ptr, instance) =
1840            common::build_langcall(&bx, self.mir.span, reason.lang_item());
1841        if is_call_from_compiler_builtins_to_upstream_monomorphization(bx.tcx(), instance) {
1842            bx.abort();
1843        } else {
1844            let fn_ty = bx.fn_decl_backend_type(fn_abi);
1845
1846            let llret = bx.call(fn_ty, None, Some(fn_abi), fn_ptr, &[], funclet.as_ref(), None);
1847            bx.apply_attrs_to_cleanup_callsite(llret);
1848        }
1849
1850        bx.unreachable();
1851
1852        self.terminate_block = Some((llbb, reason));
1853        llbb
1854    }
1855
1856    /// Get the backend `BasicBlock` for a MIR `BasicBlock`, either already
1857    /// cached in `self.cached_llbbs`, or created on demand (and cached).
1858    // FIXME(eddyb) rename `llbb` and other `ll`-prefixed things to use a
1859    // more backend-agnostic prefix such as `cg` (i.e. this would be `cgbb`).
1860    pub fn llbb(&mut self, bb: mir::BasicBlock) -> Bx::BasicBlock {
1861        self.try_llbb(bb).unwrap()
1862    }
1863
1864    /// Like `llbb`, but may fail if the basic block should be skipped.
1865    pub(crate) fn try_llbb(&mut self, bb: mir::BasicBlock) -> Option<Bx::BasicBlock> {
1866        match self.cached_llbbs[bb] {
1867            CachedLlbb::None => {
1868                let llbb = Bx::append_block(self.cx, self.llfn, &format!("{bb:?}"));
1869                self.cached_llbbs[bb] = CachedLlbb::Some(llbb);
1870                Some(llbb)
1871            }
1872            CachedLlbb::Some(llbb) => Some(llbb),
1873            CachedLlbb::Skip => None,
1874        }
1875    }
1876
1877    fn make_return_dest(
1878        &mut self,
1879        bx: &mut Bx,
1880        dest: mir::Place<'tcx>,
1881        fn_ret: &ArgAbi<'tcx, Ty<'tcx>>,
1882        llargs: &mut Vec<Bx::Value>,
1883    ) -> ReturnDest<'tcx, Bx::Value> {
1884        // If the return is ignored, we can just return a do-nothing `ReturnDest`.
1885        if fn_ret.is_ignore() {
1886            return ReturnDest::Nothing;
1887        }
1888        let dest = if let Some(index) = dest.as_local() {
1889            match self.locals[index] {
1890                LocalRef::Place(dest) => dest,
1891                LocalRef::UnsizedPlace(_) => bug!("return type must be sized"),
1892                LocalRef::PendingOperand => {
1893                    // Handle temporary places, specifically `Operand` ones, as
1894                    // they don't have `alloca`s.
1895                    return if fn_ret.is_indirect() {
1896                        // Odd, but possible, case, we have an operand temporary,
1897                        // but the calling convention has an indirect return.
1898                        let tmp = PlaceRef::alloca(bx, fn_ret.layout);
1899                        tmp.storage_live(bx);
1900                        llargs.push(tmp.val.llval);
1901                        ReturnDest::IndirectOperand(tmp, index)
1902                    } else {
1903                        ReturnDest::DirectOperand(index)
1904                    };
1905                }
1906                LocalRef::Operand(_) => {
1907                    bug!("place local already assigned to");
1908                }
1909            }
1910        } else {
1911            self.codegen_place(bx, dest.as_ref())
1912        };
1913        if fn_ret.is_indirect() {
1914            if dest.val.align < dest.layout.align.abi {
1915                // Currently, MIR code generation does not create calls
1916                // that store directly to fields of packed structs (in
1917                // fact, the calls it creates write only to temps).
1918                //
1919                // If someone changes that, please update this code path
1920                // to create a temporary.
1921                span_bug!(self.mir.span, "can't directly store to unaligned value");
1922            }
1923            llargs.push(dest.val.llval);
1924            ReturnDest::Nothing
1925        } else {
1926            ReturnDest::Store(dest)
1927        }
1928    }
1929
1930    // Stores the return value of a function call into it's final location.
1931    fn store_return(
1932        &mut self,
1933        bx: &mut Bx,
1934        dest: ReturnDest<'tcx, Bx::Value>,
1935        ret_abi: &ArgAbi<'tcx, Ty<'tcx>>,
1936        llval: Bx::Value,
1937    ) {
1938        use self::ReturnDest::*;
1939
1940        match dest {
1941            Nothing => (),
1942            Store(dst) => bx.store_arg(ret_abi, llval, dst),
1943            IndirectOperand(tmp, index) => {
1944                let op = bx.load_operand(tmp);
1945                tmp.storage_dead(bx);
1946                self.overwrite_local(index, LocalRef::Operand(op));
1947                self.debug_introduce_local(bx, index);
1948            }
1949            DirectOperand(index) => {
1950                // If there is a cast, we have to store and reload.
1951                let op = if let PassMode::Cast { .. } = ret_abi.mode {
1952                    let tmp = PlaceRef::alloca(bx, ret_abi.layout);
1953                    tmp.storage_live(bx);
1954                    bx.store_arg(ret_abi, llval, tmp);
1955                    let op = bx.load_operand(tmp);
1956                    tmp.storage_dead(bx);
1957                    op
1958                } else {
1959                    OperandRef::from_immediate_or_packed_pair(bx, llval, ret_abi.layout)
1960                };
1961                self.overwrite_local(index, LocalRef::Operand(op));
1962                self.debug_introduce_local(bx, index);
1963            }
1964        }
1965    }
1966}
1967
1968enum ReturnDest<'tcx, V> {
1969    /// Do nothing; the return value is indirect or ignored.
1970    Nothing,
1971    /// Store the return value to the pointer.
1972    Store(PlaceRef<'tcx, V>),
1973    /// Store an indirect return value to an operand local place.
1974    IndirectOperand(PlaceRef<'tcx, V>, mir::Local),
1975    /// Store a direct return value to an operand local place.
1976    DirectOperand(mir::Local),
1977}
1978
1979fn load_cast<'a, 'tcx, Bx: BuilderMethods<'a, 'tcx>>(
1980    bx: &mut Bx,
1981    cast: &CastTarget,
1982    ptr: Bx::Value,
1983    align: Align,
1984) -> Bx::Value {
1985    let cast_ty = bx.cast_backend_type(cast);
1986    if let Some(offset_from_start) = cast.rest_offset {
1987        assert!(cast.prefix[1..].iter().all(|p| p.is_none()));
1988        assert_eq!(cast.rest.unit.size, cast.rest.total);
1989        let first_ty = bx.reg_backend_type(&cast.prefix[0].unwrap());
1990        let second_ty = bx.reg_backend_type(&cast.rest.unit);
1991        let first = bx.load(first_ty, ptr, align);
1992        let second_ptr = bx.inbounds_ptradd(ptr, bx.const_usize(offset_from_start.bytes()));
1993        let second = bx.load(second_ty, second_ptr, align.restrict_for_offset(offset_from_start));
1994        let res = bx.cx().const_poison(cast_ty);
1995        let res = bx.insert_value(res, first, 0);
1996        bx.insert_value(res, second, 1)
1997    } else {
1998        bx.load(cast_ty, ptr, align)
1999    }
2000}
2001
2002pub fn store_cast<'a, 'tcx, Bx: BuilderMethods<'a, 'tcx>>(
2003    bx: &mut Bx,
2004    cast: &CastTarget,
2005    value: Bx::Value,
2006    ptr: Bx::Value,
2007    align: Align,
2008) {
2009    if let Some(offset_from_start) = cast.rest_offset {
2010        assert!(cast.prefix[1..].iter().all(|p| p.is_none()));
2011        assert_eq!(cast.rest.unit.size, cast.rest.total);
2012        assert!(cast.prefix[0].is_some());
2013        let first = bx.extract_value(value, 0);
2014        let second = bx.extract_value(value, 1);
2015        bx.store(first, ptr, align);
2016        let second_ptr = bx.inbounds_ptradd(ptr, bx.const_usize(offset_from_start.bytes()));
2017        bx.store(second, second_ptr, align.restrict_for_offset(offset_from_start));
2018    } else {
2019        bx.store(value, ptr, align);
2020    };
2021}