Struct rustc_const_eval::const_eval::machine::CompileTimeInterpreter
source · [−]pub struct CompileTimeInterpreter<'mir, 'tcx> {
pub(super) steps_remaining: usize,
pub(super) stack: Vec<Frame<'mir, 'tcx, AllocId, ()>>,
pub(super) can_access_statics: bool,
pub(super) check_alignment: bool,
}
Expand description
Extra machine state for CTFE, and the Machine instance
Fields
steps_remaining: usize
For now, the number of terminators that can be evaluated before we throw a resource exhaustion error.
Setting this to 0
disables the limit and allows the interpreter to run forever.
stack: Vec<Frame<'mir, 'tcx, AllocId, ()>>
The virtual call stack.
can_access_statics: bool
We need to make sure consts never point to anything mutable, even recursively. That is relied on for pattern matching on consts with references. To achieve this, two pieces have to work together:
- Interning makes everything outside of statics immutable.
- Pointers to allocations inside of statics can never leak outside, to a non-static global. This boolean here controls the second part.
check_alignment: bool
Whether to check alignment during evaluation.
Implementations
Trait Implementations
sourceimpl<'mir, 'tcx> Machine<'mir, 'tcx> for CompileTimeInterpreter<'mir, 'tcx>
impl<'mir, 'tcx> Machine<'mir, 'tcx> for CompileTimeInterpreter<'mir, 'tcx>
type Provenance = AllocId
type Provenance = AllocId
AllocId
they belong to.type ProvenanceExtra = ()
type ProvenanceExtra = ()
type ExtraFnVal = !
type ExtraFnVal = !
dlsym
that can later be called to execute the right thing. Read moretype MemoryMap = HashMap<AllocId, (MemoryKind<MemoryKind>, Allocation<AllocId, ()>), BuildHasherDefault<FxHasher>>
type MemoryMap = HashMap<AllocId, (MemoryKind<MemoryKind>, Allocation<AllocId, ()>), BuildHasherDefault<FxHasher>>
sourceconst GLOBAL_KIND: Option<Self::MemoryKind> = None
const GLOBAL_KIND: Option<Self::MemoryKind> = None
tcx
) –
or None if such memory should not be mutated and thus any such attempt will cause
a ModifiedStatic
error to be raised.
Statics are copied under two circumstances: When they are mutated, and when
adjust_allocation
(see below) returns an owned allocation
that is added to the memory so that the work is not done twice. Read moretype AllocExtra = ()
type AllocExtra = ()
type FrameExtra = ()
type FrameExtra = ()
sourcefn use_addr_for_alignment_check(_ecx: &InterpCx<'mir, 'tcx, Self>) -> bool
fn use_addr_for_alignment_check(_ecx: &InterpCx<'mir, 'tcx, Self>) -> bool
sourcefn checked_binop_checks_overflow(_ecx: &InterpCx<'mir, 'tcx, Self>) -> bool
fn checked_binop_checks_overflow(_ecx: &InterpCx<'mir, 'tcx, Self>) -> bool
sourcefn call_extra_fn(
_ecx: &mut InterpCx<'mir, 'tcx, Self>,
fn_val: !,
_abi: CallAbi,
_args: &[OpTy<'tcx>],
_destination: &PlaceTy<'tcx, Self::Provenance>,
_target: Option<BasicBlock>,
_unwind: StackPopUnwind
) -> InterpResult<'tcx>
fn call_extra_fn(
_ecx: &mut InterpCx<'mir, 'tcx, Self>,
fn_val: !,
_abi: CallAbi,
_args: &[OpTy<'tcx>],
_destination: &PlaceTy<'tcx, Self::Provenance>,
_target: Option<BasicBlock>,
_unwind: StackPopUnwind
) -> InterpResult<'tcx>
fn_val
. It is the hook’s responsibility to advance the instruction
pointer as appropriate. Read moresourcefn adjust_allocation<'b>(
_ecx: &InterpCx<'mir, 'tcx, Self>,
_id: AllocId,
alloc: Cow<'b, Allocation>,
_kind: Option<MemoryKind<Self::MemoryKind>>
) -> InterpResult<'tcx, Cow<'b, Allocation<Self::Provenance>>>
fn adjust_allocation<'b>(
_ecx: &InterpCx<'mir, 'tcx, Self>,
_id: AllocId,
alloc: Cow<'b, Allocation>,
_kind: Option<MemoryKind<Self::MemoryKind>>
) -> InterpResult<'tcx, Cow<'b, Allocation<Self::Provenance>>>
sourcefn extern_static_base_pointer(
ecx: &InterpCx<'mir, 'tcx, Self>,
def_id: DefId
) -> InterpResult<'tcx, Pointer>
fn extern_static_base_pointer(
ecx: &InterpCx<'mir, 'tcx, Self>,
def_id: DefId
) -> InterpResult<'tcx, Pointer>
extern static
.sourcefn adjust_alloc_base_pointer(
_ecx: &InterpCx<'mir, 'tcx, Self>,
ptr: Pointer<AllocId>
) -> Pointer<AllocId>
fn adjust_alloc_base_pointer(
_ecx: &InterpCx<'mir, 'tcx, Self>,
ptr: Pointer<AllocId>
) -> Pointer<AllocId>
sourcefn ptr_from_addr_cast(
_ecx: &InterpCx<'mir, 'tcx, Self>,
addr: u64
) -> InterpResult<'tcx, Pointer<Option<AllocId>>>
fn ptr_from_addr_cast(
_ecx: &InterpCx<'mir, 'tcx, Self>,
addr: u64
) -> InterpResult<'tcx, Pointer<Option<AllocId>>>
sourcefn ptr_get_alloc(
_ecx: &InterpCx<'mir, 'tcx, Self>,
ptr: Pointer<AllocId>
) -> Option<(AllocId, Size, Self::ProvenanceExtra)>
fn ptr_get_alloc(
_ecx: &InterpCx<'mir, 'tcx, Self>,
ptr: Pointer<AllocId>
) -> Option<(AllocId, Size, Self::ProvenanceExtra)>
type MemoryKind = MemoryKind
type MemoryKind = MemoryKind
sourceconst PANIC_ON_ALLOC_FAIL: bool = false
const PANIC_ON_ALLOC_FAIL: bool = false
sourcefn enforce_alignment(ecx: &InterpCx<'mir, 'tcx, Self>) -> bool
fn enforce_alignment(ecx: &InterpCx<'mir, 'tcx, Self>) -> bool
sourcefn enforce_validity(ecx: &InterpCx<'mir, 'tcx, Self>) -> bool
fn enforce_validity(ecx: &InterpCx<'mir, 'tcx, Self>) -> bool
sourcefn load_mir(
ecx: &InterpCx<'mir, 'tcx, Self>,
instance: InstanceDef<'tcx>
) -> InterpResult<'tcx, &'tcx Body<'tcx>>
fn load_mir(
ecx: &InterpCx<'mir, 'tcx, Self>,
instance: InstanceDef<'tcx>
) -> InterpResult<'tcx, &'tcx Body<'tcx>>
sourcefn find_mir_or_eval_fn(
ecx: &mut InterpCx<'mir, 'tcx, Self>,
instance: Instance<'tcx>,
_abi: CallAbi,
args: &[OpTy<'tcx>],
_dest: &PlaceTy<'tcx>,
_ret: Option<BasicBlock>,
_unwind: StackPopUnwind
) -> InterpResult<'tcx, Option<(&'mir Body<'tcx>, Instance<'tcx>)>>
fn find_mir_or_eval_fn(
ecx: &mut InterpCx<'mir, 'tcx, Self>,
instance: Instance<'tcx>,
_abi: CallAbi,
args: &[OpTy<'tcx>],
_dest: &PlaceTy<'tcx>,
_ret: Option<BasicBlock>,
_unwind: StackPopUnwind
) -> InterpResult<'tcx, Option<(&'mir Body<'tcx>, Instance<'tcx>)>>
sourcefn call_intrinsic(
ecx: &mut InterpCx<'mir, 'tcx, Self>,
instance: Instance<'tcx>,
args: &[OpTy<'tcx>],
dest: &PlaceTy<'tcx, Self::Provenance>,
target: Option<BasicBlock>,
_unwind: StackPopUnwind
) -> InterpResult<'tcx>
fn call_intrinsic(
ecx: &mut InterpCx<'mir, 'tcx, Self>,
instance: Instance<'tcx>,
args: &[OpTy<'tcx>],
dest: &PlaceTy<'tcx, Self::Provenance>,
target: Option<BasicBlock>,
_unwind: StackPopUnwind
) -> InterpResult<'tcx>
sourcefn assert_panic(
ecx: &mut InterpCx<'mir, 'tcx, Self>,
msg: &AssertMessage<'tcx>,
_unwind: Option<BasicBlock>
) -> InterpResult<'tcx>
fn assert_panic(
ecx: &mut InterpCx<'mir, 'tcx, Self>,
msg: &AssertMessage<'tcx>,
_unwind: Option<BasicBlock>
) -> InterpResult<'tcx>
Assert
MIR terminators that trigger a panic.sourcefn abort(
_ecx: &mut InterpCx<'mir, 'tcx, Self>,
msg: String
) -> InterpResult<'tcx, !>
fn abort(
_ecx: &mut InterpCx<'mir, 'tcx, Self>,
msg: String
) -> InterpResult<'tcx, !>
Abort
MIR terminator.sourcefn binary_ptr_op(
_ecx: &InterpCx<'mir, 'tcx, Self>,
_bin_op: BinOp,
_left: &ImmTy<'tcx>,
_right: &ImmTy<'tcx>
) -> InterpResult<'tcx, (Scalar, bool, Ty<'tcx>)>
fn binary_ptr_op(
_ecx: &InterpCx<'mir, 'tcx, Self>,
_bin_op: BinOp,
_left: &ImmTy<'tcx>,
_right: &ImmTy<'tcx>
) -> InterpResult<'tcx, (Scalar, bool, Ty<'tcx>)>
sourcefn before_terminator(ecx: &mut InterpCx<'mir, 'tcx, Self>) -> InterpResult<'tcx>
fn before_terminator(ecx: &mut InterpCx<'mir, 'tcx, Self>) -> InterpResult<'tcx>
sourcefn expose_ptr(
_ecx: &mut InterpCx<'mir, 'tcx, Self>,
_ptr: Pointer<AllocId>
) -> InterpResult<'tcx>
fn expose_ptr(
_ecx: &mut InterpCx<'mir, 'tcx, Self>,
_ptr: Pointer<AllocId>
) -> InterpResult<'tcx>
sourcefn init_frame_extra(
ecx: &mut InterpCx<'mir, 'tcx, Self>,
frame: Frame<'mir, 'tcx>
) -> InterpResult<'tcx, Frame<'mir, 'tcx>>
fn init_frame_extra(
ecx: &mut InterpCx<'mir, 'tcx, Self>,
frame: Frame<'mir, 'tcx>
) -> InterpResult<'tcx, Frame<'mir, 'tcx>>
sourcefn stack<'a>(
ecx: &'a InterpCx<'mir, 'tcx, Self>
) -> &'a [Frame<'mir, 'tcx, Self::Provenance, Self::FrameExtra>]
fn stack<'a>(
ecx: &'a InterpCx<'mir, 'tcx, Self>
) -> &'a [Frame<'mir, 'tcx, Self::Provenance, Self::FrameExtra>]
sourcefn stack_mut<'a>(
ecx: &'a mut InterpCx<'mir, 'tcx, Self>
) -> &'a mut Vec<Frame<'mir, 'tcx, Self::Provenance, Self::FrameExtra>>
fn stack_mut<'a>(
ecx: &'a mut InterpCx<'mir, 'tcx, Self>
) -> &'a mut Vec<Frame<'mir, 'tcx, Self::Provenance, Self::FrameExtra>>
sourcefn before_access_global(
_tcx: TyCtxt<'tcx>,
machine: &Self,
alloc_id: AllocId,
alloc: ConstAllocation<'tcx>,
static_def_id: Option<DefId>,
is_write: bool
) -> InterpResult<'tcx>
fn before_access_global(
_tcx: TyCtxt<'tcx>,
machine: &Self,
alloc_id: AllocId,
alloc: ConstAllocation<'tcx>,
static_def_id: Option<DefId>,
is_write: bool
) -> InterpResult<'tcx>
def_id
is Some
if this is the “lazy” allocation of a static. Read moresourcefn enforce_abi(_ecx: &InterpCx<'mir, 'tcx, Self>) -> bool
fn enforce_abi(_ecx: &InterpCx<'mir, 'tcx, Self>) -> bool
sourcefn access_local_mut<'a>(
ecx: &'a mut InterpCx<'mir, 'tcx, Self>,
frame: usize,
local: Local
) -> InterpResult<'tcx, &'a mut Operand<Self::Provenance>>where
'tcx: 'mir,
fn access_local_mut<'a>(
ecx: &'a mut InterpCx<'mir, 'tcx, Self>,
frame: usize,
local: Local
) -> InterpResult<'tcx, &'a mut Operand<Self::Provenance>>where
'tcx: 'mir,
local
from the frame
.
Since writing a ZST is not actually accessing memory or locals, this is never invoked
for ZST reads. Read moresourcefn thread_local_static_base_pointer(
_ecx: &mut InterpCx<'mir, 'tcx, Self>,
def_id: DefId
) -> InterpResult<'tcx, Pointer<Self::Provenance>>
fn thread_local_static_base_pointer(
_ecx: &mut InterpCx<'mir, 'tcx, Self>,
def_id: DefId
) -> InterpResult<'tcx, Pointer<Self::Provenance>>
AllocId
for the given thread-local static in the current thread.fn eval_inline_asm(
_ecx: &mut InterpCx<'mir, 'tcx, Self>,
_template: &'tcx [InlineAsmTemplatePiece],
_operands: &[InlineAsmOperand<'tcx>],
_options: InlineAsmOptions
) -> InterpResult<'tcx>
sourcefn before_memory_read(
_tcx: TyCtxt<'tcx>,
_machine: &Self,
_alloc_extra: &Self::AllocExtra,
_prov: (AllocId, Self::ProvenanceExtra),
_range: AllocRange
) -> InterpResult<'tcx>
fn before_memory_read(
_tcx: TyCtxt<'tcx>,
_machine: &Self,
_alloc_extra: &Self::AllocExtra,
_prov: (AllocId, Self::ProvenanceExtra),
_range: AllocRange
) -> InterpResult<'tcx>
sourcefn before_memory_write(
_tcx: TyCtxt<'tcx>,
_machine: &mut Self,
_alloc_extra: &mut Self::AllocExtra,
_prov: (AllocId, Self::ProvenanceExtra),
_range: AllocRange
) -> InterpResult<'tcx>
fn before_memory_write(
_tcx: TyCtxt<'tcx>,
_machine: &mut Self,
_alloc_extra: &mut Self::AllocExtra,
_prov: (AllocId, Self::ProvenanceExtra),
_range: AllocRange
) -> InterpResult<'tcx>
sourcefn before_memory_deallocation(
_tcx: TyCtxt<'tcx>,
_machine: &mut Self,
_alloc_extra: &mut Self::AllocExtra,
_prov: (AllocId, Self::ProvenanceExtra),
_range: AllocRange
) -> InterpResult<'tcx>
fn before_memory_deallocation(
_tcx: TyCtxt<'tcx>,
_machine: &mut Self,
_alloc_extra: &mut Self::AllocExtra,
_prov: (AllocId, Self::ProvenanceExtra),
_range: AllocRange
) -> InterpResult<'tcx>
sourcefn retag(
_ecx: &mut InterpCx<'mir, 'tcx, Self>,
_kind: RetagKind,
_place: &PlaceTy<'tcx, Self::Provenance>
) -> InterpResult<'tcx>
fn retag(
_ecx: &mut InterpCx<'mir, 'tcx, Self>,
_kind: RetagKind,
_place: &PlaceTy<'tcx, Self::Provenance>
) -> InterpResult<'tcx>
sourcefn after_stack_push(_ecx: &mut InterpCx<'mir, 'tcx, Self>) -> InterpResult<'tcx>
fn after_stack_push(_ecx: &mut InterpCx<'mir, 'tcx, Self>) -> InterpResult<'tcx>
sourcefn after_stack_pop(
_ecx: &mut InterpCx<'mir, 'tcx, Self>,
_frame: Frame<'mir, 'tcx, Self::Provenance, Self::FrameExtra>,
unwinding: bool
) -> InterpResult<'tcx, StackPopJump>
fn after_stack_pop(
_ecx: &mut InterpCx<'mir, 'tcx, Self>,
_frame: Frame<'mir, 'tcx, Self::Provenance, Self::FrameExtra>,
unwinding: bool
) -> InterpResult<'tcx, StackPopJump>
locals
have already been destroyed! Read moreAuto Trait Implementations
impl<'mir, 'tcx> !RefUnwindSafe for CompileTimeInterpreter<'mir, 'tcx>
impl<'mir, 'tcx> !Send for CompileTimeInterpreter<'mir, 'tcx>
impl<'mir, 'tcx> !Sync for CompileTimeInterpreter<'mir, 'tcx>
impl<'mir, 'tcx> Unpin for CompileTimeInterpreter<'mir, 'tcx>where
'tcx: 'mir,
impl<'mir, 'tcx> !UnwindSafe for CompileTimeInterpreter<'mir, 'tcx>
Blanket Implementations
sourceimpl<T> BorrowMut<T> for Twhere
T: ?Sized,
impl<T> BorrowMut<T> for Twhere
T: ?Sized,
const: unstable · sourcefn borrow_mut(&mut self) -> &mut T
fn borrow_mut(&mut self) -> &mut T
Layout
Note: Most layout information is completely unstable and may even differ between compilations. The only exception is types with certain repr(...)
attributes. Please see the Rust Reference’s “Type Layout” chapter for details on type layout guarantees.
Size: 40 bytes