Type Alias miri::machine::MiriInterpCx

source ·
pub type MiriInterpCx<'mir, 'tcx> = InterpCx<'mir, 'tcx, MiriMachine<'mir, 'tcx>>;
Expand description

A rustc InterpCx for Miri.

Aliased Type§

struct MiriInterpCx<'mir, 'tcx> {
    pub machine: MiriMachine<'mir, 'tcx>,
    pub tcx: TyCtxtAt<'tcx>,
    pub(crate) param_env: ParamEnv<'tcx>,
    pub memory: Memory<'mir, 'tcx, MiriMachine<'mir, 'tcx>>,
    pub recursion_limit: Limit,
}

Fields§

§machine: MiriMachine<'mir, 'tcx>

Stores the Machine instance.

Note: the stack is provided by the machine.

§tcx: TyCtxtAt<'tcx>

The results of the type checker, from rustc. The span in this is the “root” of the evaluation, i.e., the const we are evaluating (if this is CTFE).

§param_env: ParamEnv<'tcx>§memory: Memory<'mir, 'tcx, MiriMachine<'mir, 'tcx>>

The virtual memory system.

§recursion_limit: Limit

The recursion limit (cached from tcx.recursion_limit(()))

Trait Implementations§

source§

impl<'mir, 'tcx: 'mir> EvalContextExt<'mir, 'tcx> for MiriInterpCx<'mir, 'tcx>

source§

fn emulate_x86_intrinsic( &mut self, link_name: Symbol, abi: Abi, args: &[OpTy<'tcx, Provenance>], dest: &PlaceTy<'tcx, Provenance> ) -> InterpResult<'tcx, EmulateByNameResult<'mir, 'tcx>>

source§

impl<'mir, 'tcx: 'mir> EvalContextExt<'mir, 'tcx> for MiriInterpCx<'mir, 'tcx>

source§

fn CreateThread( &mut self, security_op: &OpTy<'tcx, Provenance>, stacksize_op: &OpTy<'tcx, Provenance>, start_op: &OpTy<'tcx, Provenance>, arg_op: &OpTy<'tcx, Provenance>, flags_op: &OpTy<'tcx, Provenance>, thread_op: &OpTy<'tcx, Provenance> ) -> InterpResult<'tcx, ThreadId>

source§

fn WaitForSingleObject( &mut self, handle_op: &OpTy<'tcx, Provenance>, timeout_op: &OpTy<'tcx, Provenance> ) -> InterpResult<'tcx, u32>

source§

impl<'mir, 'tcx: 'mir> EvalContextExt<'mir, 'tcx> for MiriInterpCx<'mir, 'tcx>

source§

fn init_once_get_or_create_id( &mut self, lock_op: &OpTy<'tcx, Provenance>, lock_layout: TyAndLayout<'tcx>, offset: u64 ) -> InterpResult<'tcx, InitOnceId>

source§

fn init_once_get_or_create<F>( &mut self, existing: F ) -> InterpResult<'tcx, InitOnceId>where F: FnOnce(&mut MiriInterpCx<'mir, 'tcx>, InitOnceId) -> InterpResult<'tcx, Option<InitOnceId>>,

Provides the closure with the next InitOnceId. Creates that InitOnce if the closure returns None, otherwise returns the value from the closure.
source§

fn init_once_status(&mut self, id: InitOnceId) -> InitOnceStatus

source§

fn init_once_enqueue_and_block( &mut self, id: InitOnceId, thread: ThreadId, callback: Box<dyn MachineCallback<'mir, 'tcx> + 'tcx> )

Put the thread into the queue waiting for the initialization.
source§

fn init_once_begin(&mut self, id: InitOnceId)

Begin initializing this InitOnce. Must only be called after checking that it is currently uninitialized.
source§

fn init_once_complete(&mut self, id: InitOnceId) -> InterpResult<'tcx>

source§

fn init_once_fail(&mut self, id: InitOnceId) -> InterpResult<'tcx>

source§

fn init_once_observe_completed(&mut self, id: InitOnceId)

Synchronize with the previous completion of an InitOnce. Must only be called after checking that it is complete.
source§

impl<'mir, 'tcx: 'mir> EvalContextExt<'mir, 'tcx> for MiriInterpCx<'mir, 'tcx>

source§

fn tb_retag_ptr_value( &mut self, kind: RetagKind, val: &ImmTy<'tcx, Provenance> ) -> InterpResult<'tcx, ImmTy<'tcx, Provenance>>

Retag a pointer. References are passed to from_ref_ty and raw pointers are never reborrowed.
source§

fn tb_retag_place_contents( &mut self, kind: RetagKind, place: &PlaceTy<'tcx, Provenance> ) -> InterpResult<'tcx>

Retag all pointers that are stored in this place.
source§

fn tb_protect_place( &mut self, place: &MPlaceTy<'tcx, Provenance> ) -> InterpResult<'tcx>

Protect a place so that it cannot be used any more for the duration of the current function call. Read more
source§

fn tb_expose_tag( &mut self, alloc_id: AllocId, tag: BorTag ) -> InterpResult<'tcx>

Mark the given tag as exposed. It was found on a pointer with the given AllocId.
source§

fn print_tree( &mut self, alloc_id: AllocId, show_unnamed: bool ) -> InterpResult<'tcx>

Display the tree.
source§

fn tb_give_pointer_debug_name( &mut self, ptr: Pointer<Option<Provenance>>, nth_parent: u8, name: &str ) -> InterpResult<'tcx>

Give a name to the pointer, usually the name it has in the source code (for debugging). The name given is name and the pointer that receives it is the nth_parent of ptr (with 0 representing ptr itself)
source§

impl<'mir, 'tcx: 'mir> EvalContextExt<'mir, 'tcx> for MiriInterpCx<'mir, 'tcx>

source§

fn call_intrinsic( &mut self, instance: Instance<'tcx>, args: &[OpTy<'tcx, Provenance>], dest: &PlaceTy<'tcx, Provenance>, ret: Option<BasicBlock>, _unwind: UnwindAction ) -> InterpResult<'tcx>

source§

fn emulate_intrinsic_by_name( &mut self, intrinsic_name: &str, generic_args: GenericArgsRef<'tcx>, args: &[OpTy<'tcx, Provenance>], dest: &PlaceTy<'tcx, Provenance> ) -> InterpResult<'tcx>

Emulates a Miri-supported intrinsic (not supported by the core engine).
source§

impl<'mir, 'tcx: 'mir> EvalContextExt<'mir, 'tcx> for MiriInterpCx<'mir, 'tcx>

source§

fn handle_miri_start_panic( &mut self, abi: Abi, link_name: Symbol, args: &[OpTy<'tcx, Provenance>], unwind: UnwindAction ) -> InterpResult<'tcx>

Handles the special miri_start_panic intrinsic, which is called by libpanic_unwind to delegate the actual unwinding process to Miri.
source§

fn handle_try( &mut self, args: &[OpTy<'tcx, Provenance>], dest: &PlaceTy<'tcx, Provenance>, ret: BasicBlock ) -> InterpResult<'tcx>

Handles the try intrinsic, the underlying implementation of std::panicking::try.
source§

fn handle_stack_pop_unwind( &mut self, extra: FrameExtra<'tcx>, unwinding: bool ) -> InterpResult<'tcx, StackPopJump>

source§

fn start_panic(&mut self, msg: &str, unwind: UnwindAction) -> InterpResult<'tcx>

Start a panic in the interpreter with the given message as payload.
source§

fn start_panic_nounwind(&mut self, msg: &str) -> InterpResult<'tcx>

Start a non-unwinding panic in the interpreter with the given message as payload.
source§

fn assert_panic( &mut self, msg: &AssertMessage<'tcx>, unwind: UnwindAction ) -> InterpResult<'tcx>

source§

impl<'mir, 'tcx: 'mir> EvalContextExt<'mir, 'tcx> for MiriInterpCx<'mir, 'tcx>

source§

fn emulate_simd_intrinsic( &mut self, intrinsic_name: &str, generic_args: GenericArgsRef<'tcx>, args: &[OpTy<'tcx, Provenance>], dest: &PlaceTy<'tcx, Provenance> ) -> InterpResult<'tcx>

Calls the simd intrinsic intrinsic; the simd_ prefix has already been removed.
source§

impl<'mir, 'tcx: 'mir> EvalContextExt<'mir, 'tcx> for MiriInterpCx<'mir, 'tcx>

source§

fn getenv( &mut self, name_op: &OpTy<'tcx, Provenance> ) -> InterpResult<'tcx, Pointer<Option<Provenance>>>

source§

fn GetEnvironmentVariableW( &mut self, name_op: &OpTy<'tcx, Provenance>, buf_op: &OpTy<'tcx, Provenance>, size_op: &OpTy<'tcx, Provenance> ) -> InterpResult<'tcx, Scalar<Provenance>>

source§

fn GetEnvironmentStringsW( &mut self ) -> InterpResult<'tcx, Pointer<Option<Provenance>>>

source§

fn FreeEnvironmentStringsW( &mut self, env_block_op: &OpTy<'tcx, Provenance> ) -> InterpResult<'tcx, Scalar<Provenance>>

source§

fn setenv( &mut self, name_op: &OpTy<'tcx, Provenance>, value_op: &OpTy<'tcx, Provenance> ) -> InterpResult<'tcx, i32>

source§

fn SetEnvironmentVariableW( &mut self, name_op: &OpTy<'tcx, Provenance>, value_op: &OpTy<'tcx, Provenance> ) -> InterpResult<'tcx, Scalar<Provenance>>

source§

fn unsetenv( &mut self, name_op: &OpTy<'tcx, Provenance> ) -> InterpResult<'tcx, i32>

source§

fn getcwd( &mut self, buf_op: &OpTy<'tcx, Provenance>, size_op: &OpTy<'tcx, Provenance> ) -> InterpResult<'tcx, Pointer<Option<Provenance>>>

source§

fn GetCurrentDirectoryW( &mut self, size_op: &OpTy<'tcx, Provenance>, buf_op: &OpTy<'tcx, Provenance> ) -> InterpResult<'tcx, Scalar<Provenance>>

source§

fn chdir(&mut self, path_op: &OpTy<'tcx, Provenance>) -> InterpResult<'tcx, i32>

source§

fn SetCurrentDirectoryW( &mut self, path_op: &OpTy<'tcx, Provenance> ) -> InterpResult<'tcx, Scalar<Provenance>>

source§

fn update_environ(&mut self) -> InterpResult<'tcx>

Updates the environ static. The first time it gets called, also initializes extra.environ.
source§

fn getpid(&mut self) -> InterpResult<'tcx, i32>

source§

fn GetCurrentProcessId(&mut self) -> InterpResult<'tcx, u32>

source§

impl<'mir, 'tcx: 'mir> EvalContextExt<'mir, 'tcx> for MiriInterpCx<'mir, 'tcx>

source§

fn emulate_foreign_item_by_name( &mut self, link_name: Symbol, abi: Abi, args: &[OpTy<'tcx, Provenance>], dest: &PlaceTy<'tcx, Provenance> ) -> InterpResult<'tcx, EmulateByNameResult<'mir, 'tcx>>

source§

impl<'mir, 'tcx: 'mir> EvalContextExt<'mir, 'tcx> for MiriInterpCx<'mir, 'tcx>

source§

fn emulate_foreign_item_by_name( &mut self, link_name: Symbol, _abi: Abi, _args: &[OpTy<'tcx, Provenance>], _dest: &PlaceTy<'tcx, Provenance> ) -> InterpResult<'tcx, EmulateByNameResult<'mir, 'tcx>>

source§

impl<'mir, 'tcx: 'mir> EvalContextExt<'mir, 'tcx> for MiriInterpCx<'mir, 'tcx>

source§

fn emulate_foreign_item_by_name( &mut self, link_name: Symbol, abi: Abi, args: &[OpTy<'tcx, Provenance>], dest: &PlaceTy<'tcx, Provenance> ) -> InterpResult<'tcx, EmulateByNameResult<'mir, 'tcx>>

source§

impl<'mir, 'tcx: 'mir> EvalContextExt<'mir, 'tcx> for MiriInterpCx<'mir, 'tcx>

source§

fn emulate_foreign_item_by_name( &mut self, link_name: Symbol, abi: Abi, args: &[OpTy<'tcx, Provenance>], dest: &PlaceTy<'tcx, Provenance> ) -> InterpResult<'tcx, EmulateByNameResult<'mir, 'tcx>>

source§

impl<'mir, 'tcx: 'mir> EvalContextExt<'mir, 'tcx> for MiriInterpCx<'mir, 'tcx>

source§

fn emulate_x86_ssse3_intrinsic( &mut self, link_name: Symbol, abi: Abi, args: &[OpTy<'tcx, Provenance>], dest: &PlaceTy<'tcx, Provenance> ) -> InterpResult<'tcx, EmulateByNameResult<'mir, 'tcx>>

source§

impl<'mir, 'tcx: 'mir> EvalContextExt<'mir, 'tcx> for MiriInterpCx<'mir, 'tcx>

source§

fn emulate_foreign_item_by_name( &mut self, link_name: Symbol, abi: Abi, args: &[OpTy<'tcx, Provenance>], dest: &PlaceTy<'tcx, Provenance> ) -> InterpResult<'tcx, EmulateByNameResult<'mir, 'tcx>>

source§

impl<'mir, 'tcx: 'mir> EvalContextExt<'mir, 'tcx> for MiriInterpCx<'mir, 'tcx>

source§

fn call_dlsym( &mut self, dlsym: Dlsym, abi: Abi, args: &[OpTy<'tcx, Provenance>], dest: &PlaceTy<'tcx, Provenance>, ret: Option<BasicBlock> ) -> InterpResult<'tcx>

source§

impl<'mir, 'tcx: 'mir> EvalContextExt<'mir, 'tcx> for MiriInterpCx<'mir, 'tcx>

source§

fn mremap( &mut self, old_address: &OpTy<'tcx, Provenance>, old_size: &OpTy<'tcx, Provenance>, new_size: &OpTy<'tcx, Provenance>, flags: &OpTy<'tcx, Provenance> ) -> InterpResult<'tcx, Scalar<Provenance>>

source§

impl<'mir, 'tcx: 'mir> EvalContextExt<'mir, 'tcx> for MiriInterpCx<'mir, 'tcx>

source§

fn mutex_get_or_create_id( &mut self, lock_op: &OpTy<'tcx, Provenance>, lock_layout: TyAndLayout<'tcx>, offset: u64 ) -> InterpResult<'tcx, MutexId>

source§

fn rwlock_get_or_create_id( &mut self, lock_op: &OpTy<'tcx, Provenance>, lock_layout: TyAndLayout<'tcx>, offset: u64 ) -> InterpResult<'tcx, RwLockId>

source§

fn condvar_get_or_create_id( &mut self, lock_op: &OpTy<'tcx, Provenance>, lock_layout: TyAndLayout<'tcx>, offset: u64 ) -> InterpResult<'tcx, CondvarId>

source§

fn mutex_get_or_create<F>(&mut self, existing: F) -> InterpResult<'tcx, MutexId>where F: FnOnce(&mut MiriInterpCx<'mir, 'tcx>, MutexId) -> InterpResult<'tcx, Option<MutexId>>,

Provides the closure with the next MutexId. Creates that mutex if the closure returns None, otherwise returns the value from the closure
source§

fn mutex_get_owner(&mut self, id: MutexId) -> ThreadId

Get the id of the thread that currently owns this lock.
source§

fn mutex_is_locked(&self, id: MutexId) -> bool

Check if locked.
source§

fn mutex_lock(&mut self, id: MutexId, thread: ThreadId)

Lock by setting the mutex owner and increasing the lock count.
source§

fn mutex_unlock( &mut self, id: MutexId, expected_owner: ThreadId ) -> Option<usize>

Try unlocking by decreasing the lock count and returning the old lock count. If the lock count reaches 0, release the lock and potentially give to a new owner. If the lock was not locked by expected_owner, return None.
source§

fn mutex_enqueue_and_block(&mut self, id: MutexId, thread: ThreadId)

Put the thread into the queue waiting for the mutex.
source§

fn rwlock_get_or_create<F>( &mut self, existing: F ) -> InterpResult<'tcx, RwLockId>where F: FnOnce(&mut MiriInterpCx<'mir, 'tcx>, RwLockId) -> InterpResult<'tcx, Option<RwLockId>>,

Provides the closure with the next RwLockId. Creates that RwLock if the closure returns None, otherwise returns the value from the closure
source§

fn rwlock_is_locked(&self, id: RwLockId) -> bool

Check if locked.
source§

fn rwlock_is_write_locked(&self, id: RwLockId) -> bool

Check if write locked.
source§

fn rwlock_reader_lock(&mut self, id: RwLockId, reader: ThreadId)

Read-lock the lock by adding the reader the list of threads that own this lock.
source§

fn rwlock_reader_unlock(&mut self, id: RwLockId, reader: ThreadId) -> bool

Try read-unlock the lock for reader and potentially give the lock to a new owner. Returns true if succeeded, false if this reader did not hold the lock.
source§

fn rwlock_enqueue_and_block_reader(&mut self, id: RwLockId, reader: ThreadId)

Put the reader in the queue waiting for the lock and block it.
source§

fn rwlock_writer_lock(&mut self, id: RwLockId, writer: ThreadId)

Lock by setting the writer that owns the lock.
source§

fn rwlock_writer_unlock( &mut self, id: RwLockId, expected_writer: ThreadId ) -> bool

Try to unlock by removing the writer.
source§

fn rwlock_enqueue_and_block_writer(&mut self, id: RwLockId, writer: ThreadId)

Put the writer in the queue waiting for the lock.
source§

fn condvar_get_or_create<F>( &mut self, existing: F ) -> InterpResult<'tcx, CondvarId>where F: FnOnce(&mut MiriInterpCx<'mir, 'tcx>, CondvarId) -> InterpResult<'tcx, Option<CondvarId>>,

Provides the closure with the next CondvarId. Creates that Condvar if the closure returns None, otherwise returns the value from the closure
source§

fn condvar_is_awaited(&mut self, id: CondvarId) -> bool

Is the conditional variable awaited?
source§

fn condvar_wait(&mut self, id: CondvarId, thread: ThreadId, lock: CondvarLock)

Mark that the thread is waiting on the conditional variable.
source§

fn condvar_signal(&mut self, id: CondvarId) -> Option<(ThreadId, CondvarLock)>

Wake up some thread (if there is any) sleeping on the conditional variable.
source§

fn condvar_remove_waiter(&mut self, id: CondvarId, thread: ThreadId)

Remove the thread from the queue of threads waiting on this conditional variable.
source§

fn futex_wait(&mut self, addr: u64, thread: ThreadId, bitset: u32)

source§

fn futex_wake(&mut self, addr: u64, bitset: u32) -> Option<ThreadId>

source§

fn futex_remove_waiter(&mut self, addr: u64, thread: ThreadId)

source§

impl<'mir, 'tcx> EvalContextExt<'mir, 'tcx> for MiriInterpCx<'mir, 'tcx>

source§

fn pthread_mutexattr_init( &mut self, attr_op: &OpTy<'tcx, Provenance> ) -> InterpResult<'tcx, i32>

source§

fn pthread_mutexattr_settype( &mut self, attr_op: &OpTy<'tcx, Provenance>, kind_op: &OpTy<'tcx, Provenance> ) -> InterpResult<'tcx, i32>

source§

fn pthread_mutexattr_destroy( &mut self, attr_op: &OpTy<'tcx, Provenance> ) -> InterpResult<'tcx, i32>

source§

fn pthread_mutex_init( &mut self, mutex_op: &OpTy<'tcx, Provenance>, attr_op: &OpTy<'tcx, Provenance> ) -> InterpResult<'tcx, i32>

source§

fn pthread_mutex_lock( &mut self, mutex_op: &OpTy<'tcx, Provenance> ) -> InterpResult<'tcx, i32>

source§

fn pthread_mutex_trylock( &mut self, mutex_op: &OpTy<'tcx, Provenance> ) -> InterpResult<'tcx, i32>

source§

fn pthread_mutex_unlock( &mut self, mutex_op: &OpTy<'tcx, Provenance> ) -> InterpResult<'tcx, i32>

source§

fn pthread_mutex_destroy( &mut self, mutex_op: &OpTy<'tcx, Provenance> ) -> InterpResult<'tcx, i32>

source§

fn pthread_rwlock_rdlock( &mut self, rwlock_op: &OpTy<'tcx, Provenance> ) -> InterpResult<'tcx, i32>

source§

fn pthread_rwlock_tryrdlock( &mut self, rwlock_op: &OpTy<'tcx, Provenance> ) -> InterpResult<'tcx, i32>

source§

fn pthread_rwlock_wrlock( &mut self, rwlock_op: &OpTy<'tcx, Provenance> ) -> InterpResult<'tcx, i32>

source§

fn pthread_rwlock_trywrlock( &mut self, rwlock_op: &OpTy<'tcx, Provenance> ) -> InterpResult<'tcx, i32>

source§

fn pthread_rwlock_unlock( &mut self, rwlock_op: &OpTy<'tcx, Provenance> ) -> InterpResult<'tcx, i32>

source§

fn pthread_rwlock_destroy( &mut self, rwlock_op: &OpTy<'tcx, Provenance> ) -> InterpResult<'tcx, i32>

source§

fn pthread_condattr_init( &mut self, attr_op: &OpTy<'tcx, Provenance> ) -> InterpResult<'tcx, i32>

source§

fn pthread_condattr_setclock( &mut self, attr_op: &OpTy<'tcx, Provenance>, clock_id_op: &OpTy<'tcx, Provenance> ) -> InterpResult<'tcx, Scalar<Provenance>>

source§

fn pthread_condattr_getclock( &mut self, attr_op: &OpTy<'tcx, Provenance>, clk_id_op: &OpTy<'tcx, Provenance> ) -> InterpResult<'tcx, Scalar<Provenance>>

source§

fn pthread_condattr_destroy( &mut self, attr_op: &OpTy<'tcx, Provenance> ) -> InterpResult<'tcx, i32>

source§

fn pthread_cond_init( &mut self, cond_op: &OpTy<'tcx, Provenance>, attr_op: &OpTy<'tcx, Provenance> ) -> InterpResult<'tcx, i32>

source§

fn pthread_cond_signal( &mut self, cond_op: &OpTy<'tcx, Provenance> ) -> InterpResult<'tcx, i32>

source§

fn pthread_cond_broadcast( &mut self, cond_op: &OpTy<'tcx, Provenance> ) -> InterpResult<'tcx, i32>

source§

fn pthread_cond_wait( &mut self, cond_op: &OpTy<'tcx, Provenance>, mutex_op: &OpTy<'tcx, Provenance> ) -> InterpResult<'tcx, i32>

source§

fn pthread_cond_timedwait( &mut self, cond_op: &OpTy<'tcx, Provenance>, mutex_op: &OpTy<'tcx, Provenance>, abstime_op: &OpTy<'tcx, Provenance>, dest: &PlaceTy<'tcx, Provenance> ) -> InterpResult<'tcx>

source§

fn pthread_cond_destroy( &mut self, cond_op: &OpTy<'tcx, Provenance> ) -> InterpResult<'tcx, i32>

source§

impl<'mir, 'tcx: 'mir> EvalContextExt<'mir, 'tcx> for MiriInterpCx<'mir, 'tcx>

source§

fn sb_retag_ptr_value( &mut self, kind: RetagKind, val: &ImmTy<'tcx, Provenance> ) -> InterpResult<'tcx, ImmTy<'tcx, Provenance>>

source§

fn sb_retag_place_contents( &mut self, kind: RetagKind, place: &PlaceTy<'tcx, Provenance> ) -> InterpResult<'tcx>

source§

fn sb_protect_place( &mut self, place: &MPlaceTy<'tcx, Provenance> ) -> InterpResult<'tcx>

Protect a place so that it cannot be used any more for the duration of the current function call. Read more
source§

fn sb_expose_tag( &mut self, alloc_id: AllocId, tag: BorTag ) -> InterpResult<'tcx>

Mark the given tag as exposed. It was found on a pointer with the given AllocId.
source§

fn print_stacks(&mut self, alloc_id: AllocId) -> InterpResult<'tcx>

source§

impl<'mir, 'tcx: 'mir> EvalContextExt<'mir, 'tcx> for MiriInterpCx<'mir, 'tcx>

source§

fn get_or_create_thread_local_alloc( &mut self, def_id: DefId ) -> InterpResult<'tcx, Pointer<Provenance>>

Get a thread-specific allocation id for the given thread-local static. If needed, allocate a new one.
source§

fn start_regular_thread( &mut self, thread: Option<MPlaceTy<'tcx, Provenance>>, start_routine: Pointer<Option<Provenance>>, start_abi: Abi, func_arg: ImmTy<'tcx, Provenance>, ret_layout: TyAndLayout<'tcx> ) -> InterpResult<'tcx, ThreadId>

Start a regular (non-main) thread.
source§

fn detach_thread( &mut self, thread_id: ThreadId, allow_terminated_joined: bool ) -> InterpResult<'tcx>

source§

fn join_thread(&mut self, joined_thread_id: ThreadId) -> InterpResult<'tcx>

source§

fn join_thread_exclusive( &mut self, joined_thread_id: ThreadId ) -> InterpResult<'tcx>

source§

fn set_active_thread(&mut self, thread_id: ThreadId) -> ThreadId

source§

fn get_active_thread(&self) -> ThreadId

source§

fn active_thread_mut(&mut self) -> &mut Thread<'mir, 'tcx>

source§

fn active_thread_ref(&self) -> &Thread<'mir, 'tcx>

source§

fn get_total_thread_count(&self) -> usize

source§

fn have_all_terminated(&self) -> bool

source§

fn enable_thread(&mut self, thread_id: ThreadId)

source§

fn active_thread_stack( &self ) -> &[Frame<'mir, 'tcx, Provenance, FrameExtra<'tcx>>]

source§

fn active_thread_stack_mut( &mut self ) -> &mut Vec<Frame<'mir, 'tcx, Provenance, FrameExtra<'tcx>>>

source§

fn set_thread_name(&mut self, thread: ThreadId, new_thread_name: Vec<u8>)

Set the name of the current thread. The buffer must not include the null terminator.
source§

fn set_thread_name_wide(&mut self, thread: ThreadId, new_thread_name: &[u16])

source§

fn get_thread_name<'c>(&'c self, thread: ThreadId) -> &'c [u8] where 'mir: 'c,

source§

fn block_thread(&mut self, thread: ThreadId)

source§

fn unblock_thread(&mut self, thread: ThreadId)

source§

fn yield_active_thread(&mut self)

source§

fn maybe_preempt_active_thread(&mut self)

source§

fn register_timeout_callback( &mut self, thread: ThreadId, call_time: Time, callback: Box<dyn MachineCallback<'mir, 'tcx> + 'tcx> )

source§

fn unregister_timeout_callback_if_exists(&mut self, thread: ThreadId)

source§

fn run_threads(&mut self) -> InterpResult<'tcx, !>

Run the core interpreter loop. Returns only when an interrupt occurs (an error or program termination).
source§

fn terminate_active_thread(&mut self) -> InterpResult<'tcx>

Handles thread termination of the active thread: wakes up threads joining on this one, and deallocated thread-local statics. Read more
source§

impl<'mir, 'tcx: 'mir> EvalContextExt<'mir, 'tcx> for MiriInterpCx<'mir, 'tcx>

source§

fn garbage_collect_tags(&mut self) -> InterpResult<'tcx>

source§

fn remove_unreachable_tags(&mut self, tags: FxHashSet<BorTag>)

source§

impl<'mir, 'tcx: 'mir> EvalContextExt<'mir, 'tcx> for MiriInterpCx<'mir, 'tcx>

source§

fn read_os_str_from_c_str<'a>( &'a self, ptr: Pointer<Option<Provenance>> ) -> InterpResult<'tcx, &'a OsStr>where 'tcx: 'a, 'mir: 'a,

Helper function to read an OsString from a null-terminated sequence of bytes, which is what the Unix APIs usually handle.
source§

fn read_os_str_from_wide_str<'a>( &'a self, ptr: Pointer<Option<Provenance>> ) -> InterpResult<'tcx, OsString>where 'tcx: 'a, 'mir: 'a,

Helper function to read an OsString from a 0x0000-terminated sequence of u16, which is what the Windows APIs usually handle.
source§

fn write_os_str_to_c_str( &mut self, os_str: &OsStr, ptr: Pointer<Option<Provenance>>, size: u64 ) -> InterpResult<'tcx, (bool, u64)>

Helper function to write an OsStr as a null-terminated sequence of bytes, which is what the Unix APIs usually handle. This function returns Ok((false, length)) without trying to write if size is not large enough to fit the contents of os_string plus a null terminator. It returns Ok((true, length)) if the writing process was successful. The string length returned does include the null terminator.
source§

fn write_os_str_to_wide_str( &mut self, os_str: &OsStr, ptr: Pointer<Option<Provenance>>, size: u64, truncate: bool ) -> InterpResult<'tcx, (bool, u64)>

Helper function to write an OsStr as a 0x0000-terminated u16-sequence, which is what the Windows APIs usually handle. Read more
source§

fn alloc_os_str_as_c_str( &mut self, os_str: &OsStr, memkind: MemoryKind<MiriMemoryKind> ) -> InterpResult<'tcx, Pointer<Option<Provenance>>>

Allocate enough memory to store the given OsStr as a null-terminated sequence of bytes.
source§

fn alloc_os_str_as_wide_str( &mut self, os_str: &OsStr, memkind: MemoryKind<MiriMemoryKind> ) -> InterpResult<'tcx, Pointer<Option<Provenance>>>

Allocate enough memory to store the given OsStr as a null-terminated sequence of u16.
source§

fn read_path_from_c_str<'a>( &'a self, ptr: Pointer<Option<Provenance>> ) -> InterpResult<'tcx, Cow<'a, Path>>where 'tcx: 'a, 'mir: 'a,

Read a null-terminated sequence of bytes, and perform path separator conversion if needed.
source§

fn read_path_from_wide_str( &self, ptr: Pointer<Option<Provenance>> ) -> InterpResult<'tcx, PathBuf>

Read a null-terminated sequence of u16s, and perform path separator conversion if needed.
source§

fn write_path_to_c_str( &mut self, path: &Path, ptr: Pointer<Option<Provenance>>, size: u64 ) -> InterpResult<'tcx, (bool, u64)>

Write a Path to the machine memory (as a null-terminated sequence of bytes), adjusting path separators if needed.
source§

fn write_path_to_wide_str( &mut self, path: &Path, ptr: Pointer<Option<Provenance>>, size: u64, truncate: bool ) -> InterpResult<'tcx, (bool, u64)>

Write a Path to the machine memory (as a null-terminated sequence of u16s), adjusting path separators if needed.
source§

fn alloc_path_as_c_str( &mut self, path: &Path, memkind: MemoryKind<MiriMemoryKind> ) -> InterpResult<'tcx, Pointer<Option<Provenance>>>

Allocate enough memory to store a Path as a null-terminated sequence of bytes, adjusting path separators if needed.
source§

fn alloc_path_as_wide_str( &mut self, path: &Path, memkind: MemoryKind<MiriMemoryKind> ) -> InterpResult<'tcx, Pointer<Option<Provenance>>>

Allocate enough memory to store a Path as a null-terminated sequence of u16s, adjusting path separators if needed.
source§

fn convert_path<'a>( &self, os_str: Cow<'a, OsStr>, direction: PathConversion ) -> Cow<'a, OsStr>

source§

impl<'mir, 'tcx: 'mir> EvalContextExt<'mir, 'tcx> for MiriInterpCx<'mir, 'tcx>

source§

fn emit_diagnostic(&self, e: NonHaltingDiagnostic)

source§

fn handle_ice(&self)

We had a panic in Miri itself, try to print something useful.
source§

impl<'mir, 'tcx: 'mir> EvalContextExt<'mir, 'tcx> for MiriInterpCx<'mir, 'tcx>

source§

fn emulate_x86_sse2_intrinsic( &mut self, link_name: Symbol, abi: Abi, args: &[OpTy<'tcx, Provenance>], dest: &PlaceTy<'tcx, Provenance> ) -> InterpResult<'tcx, EmulateByNameResult<'mir, 'tcx>>

source§

impl<'mir, 'tcx: 'mir> EvalContextExt<'mir, 'tcx> for MiriInterpCx<'mir, 'tcx>

source§

fn call_dlsym( &mut self, dlsym: Dlsym, args: &[OpTy<'tcx, Provenance>], dest: &PlaceTy<'tcx, Provenance>, ret: Option<BasicBlock> ) -> InterpResult<'tcx>

source§

impl<'mir, 'tcx: 'mir> EvalContextExt<'mir, 'tcx> for MiriInterpCx<'mir, 'tcx>

source§

fn emulate_foreign_item_by_name( &mut self, link_name: Symbol, abi: Abi, args: &[OpTy<'tcx, Provenance>], dest: &PlaceTy<'tcx, Provenance> ) -> InterpResult<'tcx, EmulateByNameResult<'mir, 'tcx>>

source§

impl<'mir, 'tcx: 'mir> EvalContextExt<'mir, 'tcx> for MiriInterpCx<'mir, 'tcx>

source§

fn call_dlsym( &mut self, dlsym: Dlsym, args: &[OpTy<'tcx, Provenance>], dest: &PlaceTy<'tcx, Provenance>, ret: Option<BasicBlock> ) -> InterpResult<'tcx>

source§

impl<'mir, 'tcx: 'mir> EvalContextExt<'mir, 'tcx> for MiriInterpCx<'mir, 'tcx>

source§

fn call_dlsym( &mut self, dlsym: Dlsym, _args: &[OpTy<'tcx, Provenance>], _dest: &PlaceTy<'tcx, Provenance>, ret: Option<BasicBlock> ) -> InterpResult<'tcx>

source§

impl<'mir, 'tcx: 'mir> EvalContextExt<'mir, 'tcx> for MiriInterpCx<'mir, 'tcx>

source§

fn emulate_x86_sse3_intrinsic( &mut self, link_name: Symbol, abi: Abi, args: &[OpTy<'tcx, Provenance>], dest: &PlaceTy<'tcx, Provenance> ) -> InterpResult<'tcx, EmulateByNameResult<'mir, 'tcx>>

source§

impl<'mir, 'tcx: 'mir> EvalContextExt<'mir, 'tcx> for MiriInterpCx<'mir, 'tcx>

source§

fn handle_miri_backtrace_size( &mut self, abi: Abi, link_name: Symbol, args: &[OpTy<'tcx, Provenance>], dest: &PlaceTy<'tcx, Provenance> ) -> InterpResult<'tcx>

source§

fn handle_miri_get_backtrace( &mut self, abi: Abi, link_name: Symbol, args: &[OpTy<'tcx, Provenance>], dest: &PlaceTy<'tcx, Provenance> ) -> InterpResult<'tcx>

source§

fn resolve_frame_pointer( &mut self, ptr: &OpTy<'tcx, Provenance> ) -> InterpResult<'tcx, (Instance<'tcx>, Loc, String, String)>

source§

fn handle_miri_resolve_frame( &mut self, abi: Abi, link_name: Symbol, args: &[OpTy<'tcx, Provenance>], dest: &PlaceTy<'tcx, Provenance> ) -> InterpResult<'tcx>

source§

fn handle_miri_resolve_frame_names( &mut self, abi: Abi, link_name: Symbol, args: &[OpTy<'tcx, Provenance>] ) -> InterpResult<'tcx>

source§

impl<'mir, 'tcx: 'mir> EvalContextExt<'mir, 'tcx> for MiriInterpCx<'mir, 'tcx>

source§

fn have_module(&self, path: &[&str]) -> bool

Checks if the given crate/module exists.
source§

fn try_resolve_path( &self, path: &[&str], namespace: Namespace ) -> Option<Instance<'tcx>>

Gets an instance for a path; fails gracefully if the path does not exist.
source§

fn resolve_path(&self, path: &[&str], namespace: Namespace) -> Instance<'tcx>

Gets an instance for a path.
source§

fn eval_path_scalar(&self, path: &[&str]) -> Scalar<Provenance>

Evaluates the scalar at the specified path.
source§

fn eval_libc(&self, name: &str) -> Scalar<Provenance>

Helper function to get a libc constant as a Scalar.
source§

fn eval_libc_i32(&self, name: &str) -> i32

Helper function to get a libc constant as an i32.
source§

fn eval_libc_u32(&self, name: &str) -> u32

Helper function to get a libc constant as an u32.
source§

fn eval_windows(&self, module: &str, name: &str) -> Scalar<Provenance>

Helper function to get a windows constant as a Scalar.
source§

fn eval_windows_u32(&self, module: &str, name: &str) -> u32

Helper function to get a windows constant as a u32.
source§

fn eval_windows_u64(&self, module: &str, name: &str) -> u64

Helper function to get a windows constant as a u64.
source§

fn libc_ty_layout(&self, name: &str) -> TyAndLayout<'tcx>

Helper function to get the TyAndLayout of a libc type
source§

fn windows_ty_layout(&self, name: &str) -> TyAndLayout<'tcx>

Helper function to get the TyAndLayout of a windows type
source§

fn project_field_named<P: Projectable<'tcx, Provenance>>( &self, base: &P, name: &str ) -> InterpResult<'tcx, P>

Project to the given named field (which must be a struct or union type).
source§

fn write_int( &mut self, i: impl Into<i128>, dest: &impl Writeable<'tcx, Provenance> ) -> InterpResult<'tcx>

Write an int of the appropriate size to dest. The target type may be signed or unsigned, we try to do the right thing anyway. i128 can fit all integer types except for u128 so this method is fine for almost all integer types.
source§

fn write_int_fields( &mut self, values: &[i128], dest: &impl Writeable<'tcx, Provenance> ) -> InterpResult<'tcx>

Write the first N fields of the given place.
source§

fn write_int_fields_named( &mut self, values: &[(&str, i128)], dest: &impl Writeable<'tcx, Provenance> ) -> InterpResult<'tcx>

Write the given fields of the given place.
source§

fn write_null( &mut self, dest: &impl Writeable<'tcx, Provenance> ) -> InterpResult<'tcx>

Write a 0 of the appropriate size to dest.
source§

fn ptr_is_null( &self, ptr: Pointer<Option<Provenance>> ) -> InterpResult<'tcx, bool>

Test if this pointer equals 0.
source§

fn gen_random( &mut self, ptr: Pointer<Option<Provenance>>, len: u64 ) -> InterpResult<'tcx>

Generate some random bytes, and write them to dest.
source§

fn call_function( &mut self, f: Instance<'tcx>, caller_abi: Abi, args: &[Immediate<Provenance>], dest: Option<&PlaceTy<'tcx, Provenance>>, stack_pop: StackPopCleanup ) -> InterpResult<'tcx>

Call a function: Push the stack frame and pass the arguments. For now, arguments must be scalars (so that the caller does not have to know the layout). Read more
source§

fn visit_freeze_sensitive( &self, place: &MPlaceTy<'tcx, Provenance>, size: Size, action: impl FnMut(AllocRange, bool) -> InterpResult<'tcx> ) -> InterpResult<'tcx>

Visits the memory covered by place, sensitive to freezing: the 2nd parameter of action will be true if this is frozen, false if this is in an UnsafeCell. The range is relative to place.
source§

fn check_no_isolation(&self, name: &str) -> InterpResult<'tcx>

Helper function used inside the shims of foreign functions to check that isolation is disabled. It returns an error using the name of the foreign function if this is not the case.
source§

fn reject_in_isolation( &self, op_name: &str, reject_with: RejectOpWith ) -> InterpResult<'tcx>

Helper function used inside the shims of foreign functions which reject the op when isolation is enabled. It is used to print a warning/backtrace about the rejection.
source§

fn assert_target_os(&self, target_os: &str, name: &str)

Helper function used inside the shims of foreign functions to assert that the target OS is target_os. It panics showing a message with the name of the foreign function if this is not the case.
source§

fn assert_target_os_is_unix(&self, name: &str)

Helper function used inside the shims of foreign functions to assert that the target OS is part of the UNIX family. It panics showing a message with the name of the foreign function if this is not the case.
source§

fn last_error_place(&mut self) -> InterpResult<'tcx, MPlaceTy<'tcx, Provenance>>

Get last error variable as a place, lazily allocating thread-local storage for it if necessary.
source§

fn set_last_error(&mut self, scalar: Scalar<Provenance>) -> InterpResult<'tcx>

Sets the last error variable.
source§

fn get_last_error(&mut self) -> InterpResult<'tcx, Scalar<Provenance>>

Gets the last error variable.
source§

fn io_error_to_errnum( &self, err_kind: ErrorKind ) -> InterpResult<'tcx, Scalar<Provenance>>

This function tries to produce the most similar OS error from the std::io::ErrorKind as a platform-specific errnum.
source§

fn try_errnum_to_io_error( &self, errnum: Scalar<Provenance> ) -> InterpResult<'tcx, Option<ErrorKind>>

The inverse of io_error_to_errnum.
source§

fn set_last_error_from_io_error( &mut self, err_kind: ErrorKind ) -> InterpResult<'tcx>

Sets the last OS error using a std::io::ErrorKind.
source§

fn try_unwrap_io_result<T: From<i32>>( &mut self, result: Result<T> ) -> InterpResult<'tcx, T>

Helper function that consumes an std::io::Result<T> and returns an InterpResult<'tcx,T>::Ok instead. In case the result is an error, this function returns Ok(-1) and sets the last OS error accordingly. Read more
source§

fn deref_pointer_as( &self, op: &impl Readable<'tcx, Provenance>, layout: TyAndLayout<'tcx> ) -> InterpResult<'tcx, MPlaceTy<'tcx, Provenance>>

Dereference a pointer operand to a place using layout instead of the pointer’s declared type
source§

fn deref_pointer_unchecked( &self, val: &ImmTy<'tcx, Provenance>, layout: TyAndLayout<'tcx> ) -> InterpResult<'tcx, MPlaceTy<'tcx, Provenance>>

Deref’ a pointer without checking that the place is dereferenceable.
source§

fn deref_pointer_and_offset( &self, op: &impl Readable<'tcx, Provenance>, offset: u64, base_layout: TyAndLayout<'tcx>, value_layout: TyAndLayout<'tcx> ) -> InterpResult<'tcx, MPlaceTy<'tcx, Provenance>>

Calculates the MPlaceTy given the offset and layout of an access on an operand
source§

fn deref_pointer_and_read( &self, op: &impl Readable<'tcx, Provenance>, offset: u64, base_layout: TyAndLayout<'tcx>, value_layout: TyAndLayout<'tcx> ) -> InterpResult<'tcx, Scalar<Provenance>>

source§

fn deref_pointer_and_write( &mut self, op: &impl Readable<'tcx, Provenance>, offset: u64, value: impl Into<Scalar<Provenance>>, base_layout: TyAndLayout<'tcx>, value_layout: TyAndLayout<'tcx> ) -> InterpResult<'tcx, ()>

source§

fn read_timespec( &mut self, tp: &MPlaceTy<'tcx, Provenance> ) -> InterpResult<'tcx, Option<Duration>>

Parse a timespec struct and return it as a std::time::Duration. It returns None if the value in the timespec struct is invalid. Some libc functions will return EINVAL in this case.
source§

fn read_c_str<'a>( &'a self, ptr: Pointer<Option<Provenance>> ) -> InterpResult<'tcx, &'a [u8]>where 'tcx: 'a, 'mir: 'a,

Read a sequence of bytes until the first null terminator.
source§

fn write_c_str( &mut self, c_str: &[u8], ptr: Pointer<Option<Provenance>>, size: u64 ) -> InterpResult<'tcx, (bool, u64)>

Helper function to write a sequence of bytes with an added null-terminator, which is what the Unix APIs usually handle. This function returns Ok((false, length)) without trying to write if size is not large enough to fit the contents of c_str plus a null terminator. It returns Ok((true, length)) if the writing process was successful. The string length returned does include the null terminator.
source§

fn read_wide_str( &self, ptr: Pointer<Option<Provenance>> ) -> InterpResult<'tcx, Vec<u16>>

Read a sequence of u16 until the first null terminator.
source§

fn write_wide_str( &mut self, wide_str: &[u16], ptr: Pointer<Option<Provenance>>, size: u64 ) -> InterpResult<'tcx, (bool, u64)>

Helper function to write a sequence of u16 with an added 0x0000-terminator, which is what the Windows APIs usually handle. This function returns Ok((false, length)) without trying to write if size is not large enough to fit the contents of os_string plus a null terminator. It returns Ok((true, length)) if the writing process was successful. The string length returned does include the null terminator. Length is measured in units of u16.
source§

fn check_abi<'a>(&self, abi: Abi, exp_abi: Abi) -> InterpResult<'a, ()>

Check that the ABI is what we expect.
source§

fn frame_in_std(&self) -> bool

source§

fn handle_unsupported<S: AsRef<str>>( &mut self, error_msg: S ) -> InterpResult<'tcx, ()>

Handler that should be called when unsupported functionality is encountered. This function will either panic within the context of the emulated application or return an error in the Miri process context Read more
source§

fn check_abi_and_shim_symbol_clash( &mut self, abi: Abi, exp_abi: Abi, link_name: Symbol ) -> InterpResult<'tcx, ()>

source§

fn check_shim<'a, const N: usize>( &mut self, abi: Abi, exp_abi: Abi, link_name: Symbol, args: &'a [OpTy<'tcx, Provenance>] ) -> InterpResult<'tcx, &'a [OpTy<'tcx, Provenance>; N]>where &'a [OpTy<'tcx, Provenance>; N]: TryFrom<&'a [OpTy<'tcx, Provenance>]>,

source§

fn mark_immutable(&mut self, mplace: &MPlaceTy<'tcx, Provenance>)

Mark a machine allocation that was just created as immutable.
source§

fn float_to_int_checked<F>( &self, f: F, cast_to: TyAndLayout<'tcx>, round: Round ) -> Option<ImmTy<'tcx, Provenance>>where F: Float + Into<Scalar<Provenance>>,

Converts f to integer type dest_ty after rounding with mode round. Returns None if f is NaN or out of range.
source§

fn get_twice_wide_int_ty(&self, ty: Ty<'tcx>) -> Ty<'tcx>

Returns an integer type that is twice wide as ty
source§

impl<'mir, 'tcx: 'mir> EvalContextExt<'mir, 'tcx> for MiriInterpCx<'mir, 'tcx>

source§

fn min_align(&self, size: u64, kind: MiriMemoryKind) -> Align

Returns the minimum alignment for the target architecture for allocations of the given size.
source§

fn malloc( &mut self, size: u64, zero_init: bool, kind: MiriMemoryKind ) -> InterpResult<'tcx, Pointer<Option<Provenance>>>

source§

fn free( &mut self, ptr: Pointer<Option<Provenance>>, kind: MiriMemoryKind ) -> InterpResult<'tcx>

source§

fn realloc( &mut self, old_ptr: Pointer<Option<Provenance>>, new_size: u64, kind: MiriMemoryKind ) -> InterpResult<'tcx, Pointer<Option<Provenance>>>

source§

fn lookup_exported_symbol( &mut self, link_name: Symbol ) -> InterpResult<'tcx, Option<(&'mir Body<'tcx>, Instance<'tcx>)>>

Lookup the body of a function that has link_name as the symbol name.
source§

fn read_byte_slice<'i>( &'i self, bytes: &OpTy<'tcx, Provenance> ) -> InterpResult<'tcx, &'i [u8]>where 'mir: 'i,

Read bytes from a (ptr, len) argument
source§

fn emulate_foreign_item( &mut self, def_id: DefId, abi: Abi, args: &[OpTy<'tcx, Provenance>], dest: &PlaceTy<'tcx, Provenance>, ret: Option<BasicBlock>, unwind: UnwindAction ) -> InterpResult<'tcx, Option<(&'mir Body<'tcx>, Instance<'tcx>)>>

Emulates calling a foreign item, failing if the item is not supported. This function will handle goto_block if needed. Returns Ok(None) if the foreign item was completely handled by this function. Returns Ok(Some(body)) if processing the foreign item is delegated to another function.
source§

fn emulate_allocator( &mut self, default: impl FnOnce(&mut MiriInterpCx<'mir, 'tcx>) -> InterpResult<'tcx> ) -> InterpResult<'tcx, EmulateByNameResult<'mir, 'tcx>>

Emulates calling the internal _rust* allocator functions
source§

fn emulate_foreign_item_by_name( &mut self, link_name: Symbol, abi: Abi, args: &[OpTy<'tcx, Provenance>], dest: &PlaceTy<'tcx, Provenance> ) -> InterpResult<'tcx, EmulateByNameResult<'mir, 'tcx>>

Emulates calling a foreign item using its name.
source§

fn check_alloc_request(size: u64, align: u64) -> InterpResult<'tcx>

Check some basic requirements for this allocation request: non-zero size, power-of-two alignment.
source§

impl<'mir, 'tcx: 'mir> EvalContextExt<'mir, 'tcx> for MiriInterpCx<'mir, 'tcx>

source§

fn call_dlsym( &mut self, dlsym: Dlsym, _args: &[OpTy<'tcx, Provenance>], _dest: &PlaceTy<'tcx, Provenance>, ret: Option<BasicBlock> ) -> InterpResult<'tcx>

source§

impl<'mir, 'tcx: 'mir> EvalContextExt<'mir, 'tcx> for MiriInterpCx<'mir, 'tcx>

source§

fn emulate_atomic_intrinsic( &mut self, intrinsic_name: &str, args: &[OpTy<'tcx, Provenance>], dest: &PlaceTy<'tcx, Provenance> ) -> InterpResult<'tcx>

Calls the atomic intrinsic intrinsic; the atomic_ prefix has already been removed.
source§

impl<'mir, 'tcx: 'mir> EvalContextExt<'mir, 'tcx> for MiriInterpCx<'mir, 'tcx>

source§

fn find_mir_or_eval_fn( &mut self, instance: Instance<'tcx>, abi: Abi, args: &[FnArg<'tcx, Provenance>], dest: &PlaceTy<'tcx, Provenance>, ret: Option<BasicBlock>, unwind: UnwindAction ) -> InterpResult<'tcx, Option<(&'mir Body<'tcx>, Instance<'tcx>)>>

source§

fn align_offset( &mut self, ptr_op: &OpTy<'tcx, Provenance>, align_op: &OpTy<'tcx, Provenance>, dest: &PlaceTy<'tcx, Provenance>, ret: Option<BasicBlock>, unwind: UnwindAction ) -> InterpResult<'tcx, bool>

Returns true if the computation was performed, and false if we should just evaluate the actual MIR of align_offset.
source§

impl<'mir, 'tcx: 'mir> EvalContextExt<'mir, 'tcx> for MiriInterpCx<'mir, 'tcx>

source§

fn clock_gettime( &mut self, clk_id_op: &OpTy<'tcx, Provenance>, tp_op: &OpTy<'tcx, Provenance> ) -> InterpResult<'tcx, Scalar<Provenance>>

source§

fn gettimeofday( &mut self, tv_op: &OpTy<'tcx, Provenance>, tz_op: &OpTy<'tcx, Provenance> ) -> InterpResult<'tcx, i32>

source§

fn GetSystemTimeAsFileTime( &mut self, LPFILETIME_op: &OpTy<'tcx, Provenance> ) -> InterpResult<'tcx>

source§

fn QueryPerformanceCounter( &mut self, lpPerformanceCount_op: &OpTy<'tcx, Provenance> ) -> InterpResult<'tcx, Scalar<Provenance>>

source§

fn QueryPerformanceFrequency( &mut self, lpFrequency_op: &OpTy<'tcx, Provenance> ) -> InterpResult<'tcx, Scalar<Provenance>>

source§

fn mach_absolute_time(&self) -> InterpResult<'tcx, Scalar<Provenance>>

source§

fn mach_timebase_info( &mut self, info_op: &OpTy<'tcx, Provenance> ) -> InterpResult<'tcx, Scalar<Provenance>>

source§

fn nanosleep( &mut self, req_op: &OpTy<'tcx, Provenance>, _rem: &OpTy<'tcx, Provenance> ) -> InterpResult<'tcx, i32>

source§

fn Sleep(&mut self, timeout: &OpTy<'tcx, Provenance>) -> InterpResult<'tcx>

source§

impl<'mir, 'tcx: 'mir> EvalContextExt<'mir, 'tcx> for MiriInterpCx<'mir, 'tcx>

source§

fn validate_overlapping_atomic( &self, place: &MPlaceTy<'tcx, Provenance> ) -> InterpResult<'tcx>

source§

fn buffered_atomic_rmw( &mut self, new_val: Scalar<Provenance>, place: &MPlaceTy<'tcx, Provenance>, atomic: AtomicRwOrd, init: Scalar<Provenance> ) -> InterpResult<'tcx>

source§

fn buffered_atomic_read( &self, place: &MPlaceTy<'tcx, Provenance>, atomic: AtomicReadOrd, latest_in_mo: Scalar<Provenance>, validate: impl FnOnce() -> InterpResult<'tcx> ) -> InterpResult<'tcx, Scalar<Provenance>>

source§

fn buffered_atomic_write( &mut self, val: Scalar<Provenance>, dest: &MPlaceTy<'tcx, Provenance>, atomic: AtomicWriteOrd, init: Scalar<Provenance> ) -> InterpResult<'tcx>

source§

fn perform_read_on_buffered_latest( &self, place: &MPlaceTy<'tcx, Provenance>, atomic: AtomicReadOrd, init: Scalar<Provenance> ) -> InterpResult<'tcx>

Caller should never need to consult the store buffer for the latest value. This function is used exclusively for failed atomic_compare_exchange_scalar to perform load_impl on the latest store element
source§

impl<'mir, 'tcx> EvalContextExt<'mir, 'tcx> for MiriInterpCx<'mir, 'tcx>

source§

fn AcquireSRWLockExclusive( &mut self, lock_op: &OpTy<'tcx, Provenance> ) -> InterpResult<'tcx>

source§

fn TryAcquireSRWLockExclusive( &mut self, lock_op: &OpTy<'tcx, Provenance> ) -> InterpResult<'tcx, Scalar<Provenance>>

source§

fn ReleaseSRWLockExclusive( &mut self, lock_op: &OpTy<'tcx, Provenance> ) -> InterpResult<'tcx>

source§

fn AcquireSRWLockShared( &mut self, lock_op: &OpTy<'tcx, Provenance> ) -> InterpResult<'tcx>

source§

fn TryAcquireSRWLockShared( &mut self, lock_op: &OpTy<'tcx, Provenance> ) -> InterpResult<'tcx, Scalar<Provenance>>

source§

fn ReleaseSRWLockShared( &mut self, lock_op: &OpTy<'tcx, Provenance> ) -> InterpResult<'tcx>

source§

fn InitOnceBeginInitialize( &mut self, init_once_op: &OpTy<'tcx, Provenance>, flags_op: &OpTy<'tcx, Provenance>, pending_op: &OpTy<'tcx, Provenance>, context_op: &OpTy<'tcx, Provenance> ) -> InterpResult<'tcx, Scalar<Provenance>>

source§

fn InitOnceComplete( &mut self, init_once_op: &OpTy<'tcx, Provenance>, flags_op: &OpTy<'tcx, Provenance>, context_op: &OpTy<'tcx, Provenance> ) -> InterpResult<'tcx, Scalar<Provenance>>

source§

fn WaitOnAddress( &mut self, ptr_op: &OpTy<'tcx, Provenance>, compare_op: &OpTy<'tcx, Provenance>, size_op: &OpTy<'tcx, Provenance>, timeout_op: &OpTy<'tcx, Provenance>, dest: &PlaceTy<'tcx, Provenance> ) -> InterpResult<'tcx>

source§

fn WakeByAddressSingle( &mut self, ptr_op: &OpTy<'tcx, Provenance> ) -> InterpResult<'tcx>

source§

fn SleepConditionVariableSRW( &mut self, condvar_op: &OpTy<'tcx, Provenance>, lock_op: &OpTy<'tcx, Provenance>, timeout_op: &OpTy<'tcx, Provenance>, flags_op: &OpTy<'tcx, Provenance>, dest: &PlaceTy<'tcx, Provenance> ) -> InterpResult<'tcx, Scalar<Provenance>>

source§

fn WakeConditionVariable( &mut self, condvar_op: &OpTy<'tcx, Provenance> ) -> InterpResult<'tcx>

source§

fn WakeAllConditionVariable( &mut self, condvar_op: &OpTy<'tcx, Provenance> ) -> InterpResult<'tcx>

source§

impl<'mir, 'tcx: 'mir> EvalContextExt<'mir, 'tcx> for MiriInterpCx<'mir, 'tcx>

Evaluation context extensions.

source§

fn read_scalar_atomic( &self, place: &MPlaceTy<'tcx, Provenance>, atomic: AtomicReadOrd ) -> InterpResult<'tcx, Scalar<Provenance>>

Perform an atomic read operation at the memory location.
source§

fn write_scalar_atomic( &mut self, val: Scalar<Provenance>, dest: &MPlaceTy<'tcx, Provenance>, atomic: AtomicWriteOrd ) -> InterpResult<'tcx>

Perform an atomic write operation at the memory location.
source§

fn atomic_op_immediate( &mut self, place: &MPlaceTy<'tcx, Provenance>, rhs: &ImmTy<'tcx, Provenance>, op: BinOp, neg: bool, atomic: AtomicRwOrd ) -> InterpResult<'tcx, ImmTy<'tcx, Provenance>>

Perform an atomic operation on a memory location.
source§

fn atomic_exchange_scalar( &mut self, place: &MPlaceTy<'tcx, Provenance>, new: Scalar<Provenance>, atomic: AtomicRwOrd ) -> InterpResult<'tcx, Scalar<Provenance>>

Perform an atomic exchange with a memory place and a new scalar value, the old value is returned.
source§

fn atomic_min_max_scalar( &mut self, place: &MPlaceTy<'tcx, Provenance>, rhs: ImmTy<'tcx, Provenance>, min: bool, atomic: AtomicRwOrd ) -> InterpResult<'tcx, ImmTy<'tcx, Provenance>>

Perform an conditional atomic exchange with a memory place and a new scalar value, the old value is returned.
source§

fn atomic_compare_exchange_scalar( &mut self, place: &MPlaceTy<'tcx, Provenance>, expect_old: &ImmTy<'tcx, Provenance>, new: Scalar<Provenance>, success: AtomicRwOrd, fail: AtomicReadOrd, can_fail_spuriously: bool ) -> InterpResult<'tcx, Immediate<Provenance>>

Perform an atomic compare and exchange at a given memory location. On success an atomic RMW operation is performed and on failure only an atomic read occurs. If can_fail_spuriously is true, then we treat it as a “compare_exchange_weak” operation, and some portion of the time fail even when the values are actually identical.
source§

fn atomic_fence(&mut self, atomic: AtomicFenceOrd) -> InterpResult<'tcx>

Update the data-race detector for an atomic fence on the current thread.
source§

fn allow_data_races_all_threads_done(&mut self)

After all threads are done running, this allows data races to occur for subsequent ‘administrative’ machine accesses (that logically happen outside of the Abstract Machine).
source§

impl<'mir, 'tcx: 'mir> EvalContextExt<'mir, 'tcx> for MiriInterpCx<'mir, 'tcx>

source§

fn epoll_create1( &mut self, flags: &OpTy<'tcx, Provenance> ) -> InterpResult<'tcx, Scalar<Provenance>>

This function returns a file descriptor referring to the new Epoll instance. This file descriptor is used for all subsequent calls to the epoll interface. If the flags argument is 0, then this function is the same as epoll_create(). Read more
source§

fn epoll_ctl( &mut self, epfd: &OpTy<'tcx, Provenance>, op: &OpTy<'tcx, Provenance>, fd: &OpTy<'tcx, Provenance>, event: &OpTy<'tcx, Provenance> ) -> InterpResult<'tcx, Scalar<Provenance>>

This function performs control operations on the Epoll instance referred to by the file descriptor epfd. It requests that the operation op be performed for the target file descriptor, fd. Read more
source§

fn epoll_wait( &mut self, epfd: &OpTy<'tcx, Provenance>, events: &OpTy<'tcx, Provenance>, maxevents: &OpTy<'tcx, Provenance>, timeout: &OpTy<'tcx, Provenance> ) -> InterpResult<'tcx, Scalar<Provenance>>

The epoll_wait() system call waits for events on the Epoll instance referred to by the file descriptor epfd. The buffer pointed to by events is used to return information from the ready list about file descriptors in the interest list that have some events available. Up to maxevents are returned by epoll_wait(). The maxevents argument must be greater than zero. The timeout argument specifies the number of milliseconds that epoll_wait() will block. Time is measured against the CLOCK_MONOTONIC clock. A call to epoll_wait() will block until either: • a file descriptor delivers an event; • the call is interrupted by a signal handler; or • the timeout expires. Note that the timeout interval will be rounded up to the system clock granularity, and kernel scheduling delays mean that the blocking interval may overrun by a small amount. Specifying a timeout of -1 causes epoll_wait() to block indefinitely, while specifying a timeout equal to zero cause epoll_wait() to return immediately, even if no events are available. Read more
source§

fn eventfd( &mut self, val: &OpTy<'tcx, Provenance>, flags: &OpTy<'tcx, Provenance> ) -> InterpResult<'tcx, Scalar<Provenance>>

This function creates an Event that is used as an event wait/notify mechanism by user-space applications, and by the kernel to notify user-space applications of events. The Event contains an u64 counter maintained by the kernel. The counter is initialized with the value specified in the initval argument. Read more
source§

fn socketpair( &mut self, domain: &OpTy<'tcx, Provenance>, type_: &OpTy<'tcx, Provenance>, protocol: &OpTy<'tcx, Provenance>, sv: &OpTy<'tcx, Provenance> ) -> InterpResult<'tcx, Scalar<Provenance>>

Currently this function creates new SocketPairs without specifying the domain, type, or protocol of the new socket and these are stored in the socket values sv argument. Read more
source§

impl<'mir, 'tcx: 'mir> EvalContextExt<'mir, 'tcx> for MiriInterpCx<'mir, 'tcx>

source§

fn retag_ptr_value( &mut self, kind: RetagKind, val: &ImmTy<'tcx, Provenance> ) -> InterpResult<'tcx, ImmTy<'tcx, Provenance>>

source§

fn retag_place_contents( &mut self, kind: RetagKind, place: &PlaceTy<'tcx, Provenance> ) -> InterpResult<'tcx>

source§

fn protect_place( &mut self, place: &MPlaceTy<'tcx, Provenance> ) -> InterpResult<'tcx>

source§

fn expose_tag(&mut self, alloc_id: AllocId, tag: BorTag) -> InterpResult<'tcx>

source§

fn give_pointer_debug_name( &mut self, ptr: Pointer<Option<Provenance>>, nth_parent: u8, name: &str ) -> InterpResult<'tcx>

source§

fn print_borrow_state( &mut self, alloc_id: AllocId, show_unnamed: bool ) -> InterpResult<'tcx>

source§

impl<'mir, 'tcx> EvalContextExt<'mir, 'tcx> for MiriInterpCx<'mir, 'tcx>

source§

fn invalid_handle(&mut self, function_name: &str) -> InterpResult<'tcx, !>

source§

fn CloseHandle( &mut self, handle_op: &OpTy<'tcx, Provenance> ) -> InterpResult<'tcx>

source§

impl<'mir, 'tcx> EvalContextExt<'mir, 'tcx> for MiriInterpCx<'mir, 'tcx>

source§

fn pthread_create( &mut self, thread: &OpTy<'tcx, Provenance>, _attr: &OpTy<'tcx, Provenance>, start_routine: &OpTy<'tcx, Provenance>, arg: &OpTy<'tcx, Provenance> ) -> InterpResult<'tcx, i32>

source§

fn pthread_join( &mut self, thread: &OpTy<'tcx, Provenance>, retval: &OpTy<'tcx, Provenance> ) -> InterpResult<'tcx, i32>

source§

fn pthread_detach( &mut self, thread: &OpTy<'tcx, Provenance> ) -> InterpResult<'tcx, i32>

source§

fn pthread_self(&mut self) -> InterpResult<'tcx, Scalar<Provenance>>

source§

fn pthread_setname_np( &mut self, thread: Scalar<Provenance>, name: Scalar<Provenance>, max_name_len: usize ) -> InterpResult<'tcx, Scalar<Provenance>>

Set the name of the current thread. max_name_len is the maximal length of the name including the null terminator.
source§

fn pthread_getname_np( &mut self, thread: Scalar<Provenance>, name_out: Scalar<Provenance>, len: Scalar<Provenance> ) -> InterpResult<'tcx, Scalar<Provenance>>

source§

fn sched_yield(&mut self) -> InterpResult<'tcx, i32>

source§

impl<'mir, 'tcx: 'mir> EvalContextExt<'mir, 'tcx> for MiriInterpCx<'mir, 'tcx>

source§

fn call_dlsym( &mut self, dlsym: Dlsym, abi: Abi, args: &[OpTy<'tcx, Provenance>], dest: &PlaceTy<'tcx, Provenance>, ret: Option<BasicBlock> ) -> InterpResult<'tcx>

source§

impl<'mir, 'tcx: 'mir> EvalContextExt<'mir, 'tcx> for MiriInterpCx<'mir, 'tcx>

source§

fn mmap( &mut self, addr: &OpTy<'tcx, Provenance>, length: &OpTy<'tcx, Provenance>, prot: &OpTy<'tcx, Provenance>, flags: &OpTy<'tcx, Provenance>, fd: &OpTy<'tcx, Provenance>, offset: &OpTy<'tcx, Provenance> ) -> InterpResult<'tcx, Scalar<Provenance>>

source§

fn munmap( &mut self, addr: &OpTy<'tcx, Provenance>, length: &OpTy<'tcx, Provenance> ) -> InterpResult<'tcx, Scalar<Provenance>>

source§

impl<'mir, 'tcx: 'mir> EvalContextExt<'mir, 'tcx> for MiriInterpCx<'mir, 'tcx>

source§

fn call_dlsym( &mut self, dlsym: Dlsym, abi: Abi, args: &[OpTy<'tcx, Provenance>], dest: &PlaceTy<'tcx, Provenance>, ret: Option<BasicBlock> ) -> InterpResult<'tcx>

source§

impl<'mir, 'tcx: 'mir> EvalContextExt<'mir, 'tcx> for MiriInterpCx<'mir, 'tcx>

source§

fn open(&mut self, args: &[OpTy<'tcx, Provenance>]) -> InterpResult<'tcx, i32>

source§

fn fcntl(&mut self, args: &[OpTy<'tcx, Provenance>]) -> InterpResult<'tcx, i32>

source§

fn close( &mut self, fd_op: &OpTy<'tcx, Provenance> ) -> InterpResult<'tcx, Scalar<Provenance>>

source§

fn handle_not_found<T: From<i32>>(&mut self) -> InterpResult<'tcx, T>

Function used when a handle is not found inside FileHandler. It returns Ok(-1)and sets the last OS error to libc::EBADF (invalid file descriptor). This function uses T: From<i32> instead of i32 directly because some fs functions return different integer types (like read, that returns an i64).
source§

fn read( &mut self, fd: i32, buf: Pointer<Option<Provenance>>, count: u64 ) -> InterpResult<'tcx, i64>

source§

fn write( &mut self, fd: i32, buf: Pointer<Option<Provenance>>, count: u64 ) -> InterpResult<'tcx, i64>

source§

fn lseek64( &mut self, fd_op: &OpTy<'tcx, Provenance>, offset_op: &OpTy<'tcx, Provenance>, whence_op: &OpTy<'tcx, Provenance> ) -> InterpResult<'tcx, Scalar<Provenance>>

source§

fn macos_stat( &mut self, path_op: &OpTy<'tcx, Provenance>, buf_op: &OpTy<'tcx, Provenance> ) -> InterpResult<'tcx, Scalar<Provenance>>

source§

fn macos_lstat( &mut self, path_op: &OpTy<'tcx, Provenance>, buf_op: &OpTy<'tcx, Provenance> ) -> InterpResult<'tcx, Scalar<Provenance>>

source§

fn macos_fstat( &mut self, fd_op: &OpTy<'tcx, Provenance>, buf_op: &OpTy<'tcx, Provenance> ) -> InterpResult<'tcx, Scalar<Provenance>>

source§

fn linux_statx( &mut self, dirfd_op: &OpTy<'tcx, Provenance>, pathname_op: &OpTy<'tcx, Provenance>, flags_op: &OpTy<'tcx, Provenance>, mask_op: &OpTy<'tcx, Provenance>, statxbuf_op: &OpTy<'tcx, Provenance> ) -> InterpResult<'tcx, i32>

source§

fn rename( &mut self, oldpath_op: &OpTy<'tcx, Provenance>, newpath_op: &OpTy<'tcx, Provenance> ) -> InterpResult<'tcx, i32>

source§

fn mkdir( &mut self, path_op: &OpTy<'tcx, Provenance>, mode_op: &OpTy<'tcx, Provenance> ) -> InterpResult<'tcx, i32>

source§

fn rmdir(&mut self, path_op: &OpTy<'tcx, Provenance>) -> InterpResult<'tcx, i32>

source§

fn opendir( &mut self, name_op: &OpTy<'tcx, Provenance> ) -> InterpResult<'tcx, Scalar<Provenance>>

source§

fn linux_readdir64( &mut self, dirp_op: &OpTy<'tcx, Provenance> ) -> InterpResult<'tcx, Scalar<Provenance>>

source§

fn macos_readdir_r( &mut self, dirp_op: &OpTy<'tcx, Provenance>, entry_op: &OpTy<'tcx, Provenance>, result_op: &OpTy<'tcx, Provenance> ) -> InterpResult<'tcx, Scalar<Provenance>>

source§

fn closedir( &mut self, dirp_op: &OpTy<'tcx, Provenance> ) -> InterpResult<'tcx, i32>

source§

fn ftruncate64( &mut self, fd_op: &OpTy<'tcx, Provenance>, length_op: &OpTy<'tcx, Provenance> ) -> InterpResult<'tcx, Scalar<Provenance>>

source§

fn fsync(&mut self, fd_op: &OpTy<'tcx, Provenance>) -> InterpResult<'tcx, i32>

source§

fn fdatasync( &mut self, fd_op: &OpTy<'tcx, Provenance> ) -> InterpResult<'tcx, i32>

source§

fn sync_file_range( &mut self, fd_op: &OpTy<'tcx, Provenance>, offset_op: &OpTy<'tcx, Provenance>, nbytes_op: &OpTy<'tcx, Provenance>, flags_op: &OpTy<'tcx, Provenance> ) -> InterpResult<'tcx, Scalar<Provenance>>

source§

fn isatty( &mut self, miri_fd: &OpTy<'tcx, Provenance> ) -> InterpResult<'tcx, Scalar<Provenance>>

source§

fn realpath( &mut self, path_op: &OpTy<'tcx, Provenance>, processed_path_op: &OpTy<'tcx, Provenance> ) -> InterpResult<'tcx, Scalar<Provenance>>

source§

fn mkstemp( &mut self, template_op: &OpTy<'tcx, Provenance> ) -> InterpResult<'tcx, i32>

source§

impl<'mir, 'tcx: 'mir> EvalContextExt<'mir, 'tcx> for MiriInterpCx<'mir, 'tcx>

source§

fn emulate_x86_sse_intrinsic( &mut self, link_name: Symbol, abi: Abi, args: &[OpTy<'tcx, Provenance>], dest: &PlaceTy<'tcx, Provenance> ) -> InterpResult<'tcx, EmulateByNameResult<'mir, 'tcx>>

source§

impl<'mir, 'tcx> EvalContextExt<'tcx> for MiriInterpCx<'mir, 'tcx>

source§

fn binary_ptr_op( &self, bin_op: BinOp, left: &ImmTy<'tcx, Provenance>, right: &ImmTy<'tcx, Provenance> ) -> InterpResult<'tcx, (ImmTy<'tcx, Provenance>, bool)>

source§

impl<'mir, 'tcx: 'mir> EvalContextExtPriv<'mir, 'tcx> for MiriInterpCx<'mir, 'tcx>

source§

fn init_once_observe_attempt(&mut self, id: InitOnceId)

Synchronize with the previous initialization attempt of an InitOnce.
source§

fn init_once_wake_waiter( &mut self, id: InitOnceId, waiter: InitOnceWaiter<'mir, 'tcx> ) -> InterpResult<'tcx>

source§

impl<'mir, 'tcx: 'mir> EvalContextExtPriv<'mir, 'tcx> for MiriInterpCx<'mir, 'tcx>

source§

fn get_or_create_id<Id: SyncId>( &mut self, next_id: Id, lock_op: &OpTy<'tcx, Provenance>, lock_layout: TyAndLayout<'tcx>, offset: u64 ) -> InterpResult<'tcx, Option<Id>>

Lazily initialize the ID of this Miri sync structure. (‘0’ indicates uninit.)
source§

fn rwlock_dequeue_and_lock_reader(&mut self, id: RwLockId) -> bool

Take a reader out of the queue waiting for the lock. Returns true if some thread got the rwlock.
source§

fn rwlock_dequeue_and_lock_writer(&mut self, id: RwLockId) -> bool

Take the writer out of the queue waiting for the lock. Returns true if some thread got the rwlock.
source§

fn mutex_dequeue_and_lock(&mut self, id: MutexId) -> bool

Take a thread out of the queue waiting for the mutex, and lock the mutex for it. Returns true if some thread has the mutex now.
source§

impl<'mir, 'tcx> EvalContextExtPriv<'mir, 'tcx> for MiriInterpCx<'mir, 'tcx>

source§

fn reacquire_cond_lock( &mut self, thread: ThreadId, lock: RwLockId, mode: RwLockMode ) -> InterpResult<'tcx>

Try to reacquire the lock associated with the condition variable after we were signaled.
source§

fn srwlock_get_id( &mut self, rwlock_op: &OpTy<'tcx, Provenance> ) -> InterpResult<'tcx, RwLockId>

source§

fn init_once_get_id( &mut self, init_once_op: &OpTy<'tcx, Provenance> ) -> InterpResult<'tcx, InitOnceId>

source§

fn condvar_get_id( &mut self, condvar_op: &OpTy<'tcx, Provenance> ) -> InterpResult<'tcx, CondvarId>

source§

impl<'mir, 'tcx: 'mir> EvalContextExtPrivate<'mir, 'tcx> for MiriInterpCx<'mir, 'tcx>

source§

fn macos_stat_write_buf( &mut self, metadata: FileMetadata, buf_op: &OpTy<'tcx, Provenance> ) -> InterpResult<'tcx, i32>

source§

fn file_type_to_d_type( &mut self, file_type: Result<FileType> ) -> InterpResult<'tcx, i32>

source§

impl<'mir, 'tcx: 'mir> EvalContextPrivExt<'mir, 'tcx> for MiriInterpCx<'mir, 'tcx>

source§

fn run_timeout_callback(&mut self) -> InterpResult<'tcx>

Execute a timeout callback on the callback’s thread.
source§

fn run_on_stack_empty(&mut self) -> InterpResult<'tcx, Poll<()>>

source§

impl<'mir, 'tcx: 'mir> EvalContextPrivExt<'mir, 'tcx> for MiriInterpCx<'mir, 'tcx>

source§

fn atomic_load( &mut self, args: &[OpTy<'tcx, Provenance>], dest: &PlaceTy<'tcx, Provenance>, atomic: AtomicReadOrd ) -> InterpResult<'tcx>

source§

fn atomic_store( &mut self, args: &[OpTy<'tcx, Provenance>], atomic: AtomicWriteOrd ) -> InterpResult<'tcx>

source§

fn compiler_fence_intrinsic( &mut self, args: &[OpTy<'tcx, Provenance>], atomic: AtomicFenceOrd ) -> InterpResult<'tcx>

source§

fn atomic_fence_intrinsic( &mut self, args: &[OpTy<'tcx, Provenance>], atomic: AtomicFenceOrd ) -> InterpResult<'tcx>

source§

fn atomic_op( &mut self, args: &[OpTy<'tcx, Provenance>], dest: &PlaceTy<'tcx, Provenance>, atomic_op: AtomicOp, atomic: AtomicRwOrd ) -> InterpResult<'tcx>

source§

fn atomic_exchange( &mut self, args: &[OpTy<'tcx, Provenance>], dest: &PlaceTy<'tcx, Provenance>, atomic: AtomicRwOrd ) -> InterpResult<'tcx>

source§

fn atomic_compare_exchange_impl( &mut self, args: &[OpTy<'tcx, Provenance>], dest: &PlaceTy<'tcx, Provenance>, success: AtomicRwOrd, fail: AtomicReadOrd, can_fail_spuriously: bool ) -> InterpResult<'tcx>

source§

fn atomic_compare_exchange( &mut self, args: &[OpTy<'tcx, Provenance>], dest: &PlaceTy<'tcx, Provenance>, success: AtomicRwOrd, fail: AtomicReadOrd ) -> InterpResult<'tcx>

source§

fn atomic_compare_exchange_weak( &mut self, args: &[OpTy<'tcx, Provenance>], dest: &PlaceTy<'tcx, Provenance>, success: AtomicRwOrd, fail: AtomicReadOrd ) -> InterpResult<'tcx>

source§

impl<'mir, 'tcx: 'mir> EvalContextPrivExt<'mir, 'tcx> for MiriInterpCx<'mir, 'tcx>

source§

fn allow_data_races_ref<R>( &self, op: impl FnOnce(&MiriInterpCx<'mir, 'tcx>) -> R ) -> R

Temporarily allow data-races to occur. This should only be used in one of these cases: Read more
source§

fn allow_data_races_mut<R>( &mut self, op: impl FnOnce(&mut MiriInterpCx<'mir, 'tcx>) -> R ) -> R

Same as allow_data_races_ref, this temporarily disables any data-race detection and so should only be used for atomic operations or internal state that the program cannot access.
source§

fn atomic_access_check( &self, place: &MPlaceTy<'tcx, Provenance> ) -> InterpResult<'tcx>

Checks that an atomic access is legal at the given place.
source§

fn validate_atomic_load( &self, place: &MPlaceTy<'tcx, Provenance>, atomic: AtomicReadOrd ) -> InterpResult<'tcx>

Update the data-race detector for an atomic read occurring at the associated memory-place and on the current thread.
source§

fn validate_atomic_store( &mut self, place: &MPlaceTy<'tcx, Provenance>, atomic: AtomicWriteOrd ) -> InterpResult<'tcx>

Update the data-race detector for an atomic write occurring at the associated memory-place and on the current thread.
source§

fn validate_atomic_rmw( &mut self, place: &MPlaceTy<'tcx, Provenance>, atomic: AtomicRwOrd ) -> InterpResult<'tcx>

Update the data-race detector for an atomic read-modify-write occurring at the associated memory place and on the current thread.
source§

fn validate_atomic_op<A: Debug + Copy>( &self, place: &MPlaceTy<'tcx, Provenance>, atomic: A, description: &str, op: impl FnMut(&mut MemoryCellClocks, &mut ThreadClockSet, VectorIdx, A) -> Result<(), DataRace> ) -> InterpResult<'tcx>

Generic atomic operation implementation
source§

impl<'mir, 'tcx: 'mir> EvalContextPrivExt<'mir, 'tcx> for MiriInterpCx<'mir, 'tcx>

source§

fn schedule_windows_tls_dtors(&mut self) -> InterpResult<'tcx>

Schedule TLS destructors for Windows. On windows, TLS destructors are managed by std.
source§

fn schedule_macos_tls_dtor(&mut self) -> InterpResult<'tcx>

Schedule the MacOS thread destructor of the thread local storage to be executed.
source§

fn schedule_next_pthread_tls_dtor( &mut self, state: &mut RunningDtorState ) -> InterpResult<'tcx, Poll<()>>

Schedule a pthread TLS destructor. Returns true if found a destructor to schedule, and false otherwise.
source§

impl<'mir: 'ecx, 'tcx: 'mir, 'ecx> EvalContextPrivExt<'mir, 'tcx, 'ecx> for MiriInterpCx<'mir, 'tcx>

Retagging/reborrowing. Policy on which permission to grant to each pointer should be left to the implementation of NewPermission.

source§

fn tb_reborrow( &mut self, place: &MPlaceTy<'tcx, Provenance>, ptr_size: Size, new_perm: NewPermission, new_tag: BorTag ) -> InterpResult<'tcx, Option<Provenance>>

Returns the provenance that should be used henceforth.
source§

fn tb_retag_reference( &mut self, val: &ImmTy<'tcx, Provenance>, new_perm: NewPermission ) -> InterpResult<'tcx, ImmTy<'tcx, Provenance>>

Retags an individual pointer, returning the retagged version.
source§

impl<'mir: 'ecx, 'tcx: 'mir, 'ecx> EvalContextPrivExt<'mir, 'tcx, 'ecx> for MiriInterpCx<'mir, 'tcx>

Retagging/reborrowing. There is some policy in here, such as which permissions to grant for which references, and when to add protectors.

source§

fn sb_reborrow( &mut self, place: &MPlaceTy<'tcx, Provenance>, size: Size, new_perm: NewPermission, new_tag: BorTag, retag_info: RetagInfo ) -> InterpResult<'tcx, Option<Provenance>>

Returns the provenance that should be used henceforth.
source§

fn sb_retag_reference( &mut self, val: &ImmTy<'tcx, Provenance>, new_perm: NewPermission, info: RetagInfo ) -> InterpResult<'tcx, ImmTy<'tcx, Provenance>>

Retags an individual pointer, returning the retagged version. kind indicates what kind of reference is being created.
source§

impl<'mir, 'tcx> MiriInterpCxExt<'mir, 'tcx> for MiriInterpCx<'mir, 'tcx>

source§

fn eval_context_ref(&self) -> &MiriInterpCx<'mir, 'tcx>

source§

fn eval_context_mut(&mut self) -> &mut MiriInterpCx<'mir, 'tcx>

source§

impl VisitTags for MiriInterpCx<'_, '_>

source§

fn visit_tags(&self, visit: &mut dyn FnMut(BorTag))

Layout§

Note: Most layout information is completely unstable and may even differ between compilations. The only exception is types with certain repr(...) attributes. Please see the Rust Reference's “Type Layout” chapter for details on type layout guarantees.

Size: 2352 bytes