Type Definition miri::machine::MiriInterpCx
source · pub type MiriInterpCx<'mir, 'tcx> = InterpCx<'mir, 'tcx, MiriMachine<'mir, 'tcx>>;
Expand description
A rustc InterpCx for Miri.
Trait Implementations§
source§impl<'mir, 'tcx: 'mir> EvalContextExt<'mir, 'tcx> for MiriInterpCx<'mir, 'tcx>
impl<'mir, 'tcx: 'mir> EvalContextExt<'mir, 'tcx> for MiriInterpCx<'mir, 'tcx>
fn sb_retag_ptr_value(
&mut self,
kind: RetagKind,
val: &ImmTy<'tcx, Provenance>
) -> InterpResult<'tcx, ImmTy<'tcx, Provenance>>
fn sb_retag_place_contents(
&mut self,
kind: RetagKind,
place: &PlaceTy<'tcx, Provenance>
) -> InterpResult<'tcx>
source§fn sb_retag_return_place(&mut self) -> InterpResult<'tcx>
fn sb_retag_return_place(&mut self) -> InterpResult<'tcx>
After a stack frame got pushed, retag the return place so that we are sure
it does not alias with anything. Read more
source§fn sb_expose_tag(&mut self, alloc_id: AllocId, tag: BorTag) -> InterpResult<'tcx>
fn sb_expose_tag(&mut self, alloc_id: AllocId, tag: BorTag) -> InterpResult<'tcx>
Mark the given tag as exposed. It was found on a pointer with the given AllocId.
fn print_stacks(&mut self, alloc_id: AllocId) -> InterpResult<'tcx>
source§impl<'mir, 'tcx: 'mir> EvalContextExt<'mir, 'tcx> for MiriInterpCx<'mir, 'tcx>
impl<'mir, 'tcx: 'mir> EvalContextExt<'mir, 'tcx> for MiriInterpCx<'mir, 'tcx>
fn retag_ptr_value(
&mut self,
kind: RetagKind,
val: &ImmTy<'tcx, Provenance>
) -> InterpResult<'tcx, ImmTy<'tcx, Provenance>>
fn retag_place_contents(
&mut self,
kind: RetagKind,
place: &PlaceTy<'tcx, Provenance>
) -> InterpResult<'tcx>
fn retag_return_place(&mut self) -> InterpResult<'tcx>
fn expose_tag(&mut self, alloc_id: AllocId, tag: BorTag) -> InterpResult<'tcx>
source§impl<'mir, 'tcx: 'mir> EvalContextExt<'mir, 'tcx> for MiriInterpCx<'mir, 'tcx>
impl<'mir, 'tcx: 'mir> EvalContextExt<'mir, 'tcx> for MiriInterpCx<'mir, 'tcx>
source§fn scalar_to_carg(
k: Scalar<Provenance>,
arg_type: Ty<'tcx>,
cx: &impl HasDataLayout
) -> InterpResult<'tcx, CArg>
fn scalar_to_carg(
k: Scalar<Provenance>,
arg_type: Ty<'tcx>,
cx: &impl HasDataLayout
) -> InterpResult<'tcx, CArg>
Extract the scalar value from the result of reading a scalar from the machine,
and convert it to a
CArg
. Read moresource§fn call_external_c_and_store_return<'a>(
&mut self,
link_name: Symbol,
dest: &PlaceTy<'tcx, Provenance>,
ptr: CodePtr,
libffi_args: Vec<Arg<'a>>
) -> InterpResult<'tcx, ()>
fn call_external_c_and_store_return<'a>(
&mut self,
link_name: Symbol,
dest: &PlaceTy<'tcx, Provenance>,
ptr: CodePtr,
libffi_args: Vec<Arg<'a>>
) -> InterpResult<'tcx, ()>
Call external C function and
store output, depending on return type in the function signature. Read more
source§fn get_func_ptr_explicitly_from_lib(
&mut self,
link_name: Symbol
) -> Option<CodePtr>
fn get_func_ptr_explicitly_from_lib(
&mut self,
link_name: Symbol
) -> Option<CodePtr>
Get the pointer to the function of the specified name in the shared object file,
if it exists. The function must be in the shared object file specified: we do not
return pointers to functions in dependencies of the library. Read more
source§fn call_external_c_fct(
&mut self,
link_name: Symbol,
dest: &PlaceTy<'tcx, Provenance>,
args: &[OpTy<'tcx, Provenance>]
) -> InterpResult<'tcx, bool>
fn call_external_c_fct(
&mut self,
link_name: Symbol,
dest: &PlaceTy<'tcx, Provenance>,
args: &[OpTy<'tcx, Provenance>]
) -> InterpResult<'tcx, bool>
Call specified external C function, with supplied arguments.
Need to convert all the arguments from their hir representations to
a form compatible with C (through
libffi
call).
Then, convert return from the C call into a corresponding form that
can be stored in Miri internal memory. Read moresource§impl<'mir, 'tcx: 'mir> EvalContextExt<'mir, 'tcx> for MiriInterpCx<'mir, 'tcx>
impl<'mir, 'tcx: 'mir> EvalContextExt<'mir, 'tcx> for MiriInterpCx<'mir, 'tcx>
source§fn min_align(&self, size: u64, kind: MiriMemoryKind) -> Align
fn min_align(&self, size: u64, kind: MiriMemoryKind) -> Align
Returns the minimum alignment for the target architecture for allocations of the given size.
fn malloc(
&mut self,
size: u64,
zero_init: bool,
kind: MiriMemoryKind
) -> InterpResult<'tcx, Pointer<Option<Provenance>>>
fn free(
&mut self,
ptr: Pointer<Option<Provenance>>,
kind: MiriMemoryKind
) -> InterpResult<'tcx>
fn realloc(
&mut self,
old_ptr: Pointer<Option<Provenance>>,
new_size: u64,
kind: MiriMemoryKind
) -> InterpResult<'tcx, Pointer<Option<Provenance>>>
source§fn lookup_exported_symbol(
&mut self,
link_name: Symbol
) -> InterpResult<'tcx, Option<(&'mir Body<'tcx>, Instance<'tcx>)>>
fn lookup_exported_symbol(
&mut self,
link_name: Symbol
) -> InterpResult<'tcx, Option<(&'mir Body<'tcx>, Instance<'tcx>)>>
Lookup the body of a function that has
link_name
as the symbol name.source§fn emulate_foreign_item(
&mut self,
def_id: DefId,
abi: Abi,
args: &[OpTy<'tcx, Provenance>],
dest: &PlaceTy<'tcx, Provenance>,
ret: Option<BasicBlock>,
unwind: StackPopUnwind
) -> InterpResult<'tcx, Option<(&'mir Body<'tcx>, Instance<'tcx>)>>
fn emulate_foreign_item(
&mut self,
def_id: DefId,
abi: Abi,
args: &[OpTy<'tcx, Provenance>],
dest: &PlaceTy<'tcx, Provenance>,
ret: Option<BasicBlock>,
unwind: StackPopUnwind
) -> InterpResult<'tcx, Option<(&'mir Body<'tcx>, Instance<'tcx>)>>
Emulates calling a foreign item, failing if the item is not supported.
This function will handle
goto_block
if needed.
Returns Ok(None) if the foreign item was completely handled
by this function.
Returns Ok(Some(body)) if processing the foreign item
is delegated to another function. Read moresource§fn emulate_allocator(
&mut self,
symbol: Symbol,
default: impl FnOnce(&mut MiriInterpCx<'mir, 'tcx>) -> InterpResult<'tcx>
) -> InterpResult<'tcx, EmulateByNameResult<'mir, 'tcx>>
fn emulate_allocator(
&mut self,
symbol: Symbol,
default: impl FnOnce(&mut MiriInterpCx<'mir, 'tcx>) -> InterpResult<'tcx>
) -> InterpResult<'tcx, EmulateByNameResult<'mir, 'tcx>>
Emulates calling the internal _rust* allocator functions
source§fn emulate_foreign_item_by_name(
&mut self,
link_name: Symbol,
abi: Abi,
args: &[OpTy<'tcx, Provenance>],
dest: &PlaceTy<'tcx, Provenance>
) -> InterpResult<'tcx, EmulateByNameResult<'mir, 'tcx>>
fn emulate_foreign_item_by_name(
&mut self,
link_name: Symbol,
abi: Abi,
args: &[OpTy<'tcx, Provenance>],
dest: &PlaceTy<'tcx, Provenance>
) -> InterpResult<'tcx, EmulateByNameResult<'mir, 'tcx>>
Emulates calling a foreign item using its name.
source§fn check_alloc_request(size: u64, align: u64) -> InterpResult<'tcx>
fn check_alloc_request(size: u64, align: u64) -> InterpResult<'tcx>
Check some basic requirements for this allocation request:
non-zero size, power-of-two alignment. Read more
source§impl<'mir, 'tcx: 'mir> EvalContextExt<'mir, 'tcx> for MiriInterpCx<'mir, 'tcx>
impl<'mir, 'tcx: 'mir> EvalContextExt<'mir, 'tcx> for MiriInterpCx<'mir, 'tcx>
source§fn emulate_atomic_intrinsic(
&mut self,
intrinsic_name: &str,
args: &[OpTy<'tcx, Provenance>],
dest: &PlaceTy<'tcx, Provenance>
) -> InterpResult<'tcx>
fn emulate_atomic_intrinsic(
&mut self,
intrinsic_name: &str,
args: &[OpTy<'tcx, Provenance>],
dest: &PlaceTy<'tcx, Provenance>
) -> InterpResult<'tcx>
Calls the atomic intrinsic
intrinsic
; the atomic_
prefix has already been removed.source§impl<'mir, 'tcx: 'mir> EvalContextExt<'mir, 'tcx> for MiriInterpCx<'mir, 'tcx>
impl<'mir, 'tcx: 'mir> EvalContextExt<'mir, 'tcx> for MiriInterpCx<'mir, 'tcx>
source§fn emulate_simd_intrinsic(
&mut self,
intrinsic_name: &str,
args: &[OpTy<'tcx, Provenance>],
dest: &PlaceTy<'tcx, Provenance>
) -> InterpResult<'tcx>
fn emulate_simd_intrinsic(
&mut self,
intrinsic_name: &str,
args: &[OpTy<'tcx, Provenance>],
dest: &PlaceTy<'tcx, Provenance>
) -> InterpResult<'tcx>
Calls the simd intrinsic
intrinsic
; the simd_
prefix has already been removed.source§impl<'mir, 'tcx: 'mir> EvalContextExt<'mir, 'tcx> for MiriInterpCx<'mir, 'tcx>
impl<'mir, 'tcx: 'mir> EvalContextExt<'mir, 'tcx> for MiriInterpCx<'mir, 'tcx>
fn call_intrinsic(
&mut self,
instance: Instance<'tcx>,
args: &[OpTy<'tcx, Provenance>],
dest: &PlaceTy<'tcx, Provenance>,
ret: Option<BasicBlock>,
_unwind: StackPopUnwind
) -> InterpResult<'tcx>
source§fn emulate_intrinsic_by_name(
&mut self,
intrinsic_name: &str,
args: &[OpTy<'tcx, Provenance>],
dest: &PlaceTy<'tcx, Provenance>
) -> InterpResult<'tcx>
fn emulate_intrinsic_by_name(
&mut self,
intrinsic_name: &str,
args: &[OpTy<'tcx, Provenance>],
dest: &PlaceTy<'tcx, Provenance>
) -> InterpResult<'tcx>
Emulates a Miri-supported intrinsic (not supported by the core engine).
fn float_to_int_unchecked<F>(
&self,
f: F,
dest_ty: Ty<'tcx>
) -> InterpResult<'tcx, Scalar<Provenance>>where
F: Float + Into<Scalar<Provenance>>,
source§impl<'mir, 'tcx: 'mir> EvalContextExt<'mir, 'tcx> for MiriInterpCx<'mir, 'tcx>
impl<'mir, 'tcx: 'mir> EvalContextExt<'mir, 'tcx> for MiriInterpCx<'mir, 'tcx>
fn call_dlsym(
&mut self,
dlsym: Dlsym,
abi: Abi,
args: &[OpTy<'tcx, Provenance>],
dest: &PlaceTy<'tcx, Provenance>,
ret: Option<BasicBlock>
) -> InterpResult<'tcx>
source§impl<'mir, 'tcx: 'mir> EvalContextExt<'mir, 'tcx> for MiriInterpCx<'mir, 'tcx>
impl<'mir, 'tcx: 'mir> EvalContextExt<'mir, 'tcx> for MiriInterpCx<'mir, 'tcx>
fn emulate_foreign_item_by_name(
&mut self,
link_name: Symbol,
abi: Abi,
args: &[OpTy<'tcx, Provenance>],
dest: &PlaceTy<'tcx, Provenance>
) -> InterpResult<'tcx, EmulateByNameResult<'mir, 'tcx>>
source§impl<'mir, 'tcx: 'mir> EvalContextExt<'mir, 'tcx> for MiriInterpCx<'mir, 'tcx>
impl<'mir, 'tcx: 'mir> EvalContextExt<'mir, 'tcx> for MiriInterpCx<'mir, 'tcx>
fn open(&mut self, args: &[OpTy<'tcx, Provenance>]) -> InterpResult<'tcx, i32>
fn fcntl(&mut self, args: &[OpTy<'tcx, Provenance>]) -> InterpResult<'tcx, i32>
fn close(
&mut self,
fd_op: &OpTy<'tcx, Provenance>
) -> InterpResult<'tcx, Scalar<Provenance>>
fn read(
&mut self,
fd: i32,
buf: Pointer<Option<Provenance>>,
count: u64
) -> InterpResult<'tcx, i64>
fn write(
&mut self,
fd: i32,
buf: Pointer<Option<Provenance>>,
count: u64
) -> InterpResult<'tcx, i64>
fn lseek64(
&mut self,
fd_op: &OpTy<'tcx, Provenance>,
offset_op: &OpTy<'tcx, Provenance>,
whence_op: &OpTy<'tcx, Provenance>
) -> InterpResult<'tcx, Scalar<Provenance>>
fn unlink(&mut self, path_op: &OpTy<'tcx, Provenance>) -> InterpResult<'tcx, i32>
fn symlink(
&mut self,
target_op: &OpTy<'tcx, Provenance>,
linkpath_op: &OpTy<'tcx, Provenance>
) -> InterpResult<'tcx, i32>
fn macos_stat(
&mut self,
path_op: &OpTy<'tcx, Provenance>,
buf_op: &OpTy<'tcx, Provenance>
) -> InterpResult<'tcx, Scalar<Provenance>>
fn macos_lstat(
&mut self,
path_op: &OpTy<'tcx, Provenance>,
buf_op: &OpTy<'tcx, Provenance>
) -> InterpResult<'tcx, Scalar<Provenance>>
fn macos_fstat(
&mut self,
fd_op: &OpTy<'tcx, Provenance>,
buf_op: &OpTy<'tcx, Provenance>
) -> InterpResult<'tcx, Scalar<Provenance>>
fn linux_statx(
&mut self,
dirfd_op: &OpTy<'tcx, Provenance>,
pathname_op: &OpTy<'tcx, Provenance>,
flags_op: &OpTy<'tcx, Provenance>,
mask_op: &OpTy<'tcx, Provenance>,
statxbuf_op: &OpTy<'tcx, Provenance>
) -> InterpResult<'tcx, i32>
fn rename(
&mut self,
oldpath_op: &OpTy<'tcx, Provenance>,
newpath_op: &OpTy<'tcx, Provenance>
) -> InterpResult<'tcx, i32>
fn mkdir(
&mut self,
path_op: &OpTy<'tcx, Provenance>,
mode_op: &OpTy<'tcx, Provenance>
) -> InterpResult<'tcx, i32>
fn rmdir(&mut self, path_op: &OpTy<'tcx, Provenance>) -> InterpResult<'tcx, i32>
fn opendir(
&mut self,
name_op: &OpTy<'tcx, Provenance>
) -> InterpResult<'tcx, Scalar<Provenance>>
fn linux_readdir64(
&mut self,
dirp_op: &OpTy<'tcx, Provenance>
) -> InterpResult<'tcx, Scalar<Provenance>>
fn macos_readdir_r(
&mut self,
dirp_op: &OpTy<'tcx, Provenance>,
entry_op: &OpTy<'tcx, Provenance>,
result_op: &OpTy<'tcx, Provenance>
) -> InterpResult<'tcx, Scalar<Provenance>>
fn closedir(
&mut self,
dirp_op: &OpTy<'tcx, Provenance>
) -> InterpResult<'tcx, i32>
fn ftruncate64(
&mut self,
fd_op: &OpTy<'tcx, Provenance>,
length_op: &OpTy<'tcx, Provenance>
) -> InterpResult<'tcx, Scalar<Provenance>>
fn fsync(&mut self, fd_op: &OpTy<'tcx, Provenance>) -> InterpResult<'tcx, i32>
fn fdatasync(
&mut self,
fd_op: &OpTy<'tcx, Provenance>
) -> InterpResult<'tcx, i32>
fn sync_file_range(
&mut self,
fd_op: &OpTy<'tcx, Provenance>,
offset_op: &OpTy<'tcx, Provenance>,
nbytes_op: &OpTy<'tcx, Provenance>,
flags_op: &OpTy<'tcx, Provenance>
) -> InterpResult<'tcx, Scalar<Provenance>>
fn readlink(
&mut self,
pathname_op: &OpTy<'tcx, Provenance>,
buf_op: &OpTy<'tcx, Provenance>,
bufsize_op: &OpTy<'tcx, Provenance>
) -> InterpResult<'tcx, i64>
fn isatty(
&mut self,
miri_fd: &OpTy<'tcx, Provenance>
) -> InterpResult<'tcx, Scalar<Provenance>>
fn realpath(
&mut self,
path_op: &OpTy<'tcx, Provenance>,
processed_path_op: &OpTy<'tcx, Provenance>
) -> InterpResult<'tcx, Scalar<Provenance>>
fn mkstemp(
&mut self,
template_op: &OpTy<'tcx, Provenance>
) -> InterpResult<'tcx, i32>
source§impl<'mir, 'tcx> EvalContextExt<'mir, 'tcx> for MiriInterpCx<'mir, 'tcx>
impl<'mir, 'tcx> EvalContextExt<'mir, 'tcx> for MiriInterpCx<'mir, 'tcx>
fn pthread_mutexattr_init(
&mut self,
attr_op: &OpTy<'tcx, Provenance>
) -> InterpResult<'tcx, i32>
fn pthread_mutexattr_settype(
&mut self,
attr_op: &OpTy<'tcx, Provenance>,
kind_op: &OpTy<'tcx, Provenance>
) -> InterpResult<'tcx, i32>
fn pthread_mutexattr_destroy(
&mut self,
attr_op: &OpTy<'tcx, Provenance>
) -> InterpResult<'tcx, i32>
fn pthread_mutex_init(
&mut self,
mutex_op: &OpTy<'tcx, Provenance>,
attr_op: &OpTy<'tcx, Provenance>
) -> InterpResult<'tcx, i32>
fn pthread_mutex_lock(
&mut self,
mutex_op: &OpTy<'tcx, Provenance>
) -> InterpResult<'tcx, i32>
fn pthread_mutex_trylock(
&mut self,
mutex_op: &OpTy<'tcx, Provenance>
) -> InterpResult<'tcx, i32>
fn pthread_mutex_unlock(
&mut self,
mutex_op: &OpTy<'tcx, Provenance>
) -> InterpResult<'tcx, i32>
fn pthread_mutex_destroy(
&mut self,
mutex_op: &OpTy<'tcx, Provenance>
) -> InterpResult<'tcx, i32>
fn pthread_rwlock_rdlock(
&mut self,
rwlock_op: &OpTy<'tcx, Provenance>
) -> InterpResult<'tcx, i32>
fn pthread_rwlock_tryrdlock(
&mut self,
rwlock_op: &OpTy<'tcx, Provenance>
) -> InterpResult<'tcx, i32>
fn pthread_rwlock_wrlock(
&mut self,
rwlock_op: &OpTy<'tcx, Provenance>
) -> InterpResult<'tcx, i32>
fn pthread_rwlock_trywrlock(
&mut self,
rwlock_op: &OpTy<'tcx, Provenance>
) -> InterpResult<'tcx, i32>
fn pthread_rwlock_unlock(
&mut self,
rwlock_op: &OpTy<'tcx, Provenance>
) -> InterpResult<'tcx, i32>
fn pthread_rwlock_destroy(
&mut self,
rwlock_op: &OpTy<'tcx, Provenance>
) -> InterpResult<'tcx, i32>
fn pthread_condattr_init(
&mut self,
attr_op: &OpTy<'tcx, Provenance>
) -> InterpResult<'tcx, i32>
fn pthread_condattr_setclock(
&mut self,
attr_op: &OpTy<'tcx, Provenance>,
clock_id_op: &OpTy<'tcx, Provenance>
) -> InterpResult<'tcx, Scalar<Provenance>>
fn pthread_condattr_getclock(
&mut self,
attr_op: &OpTy<'tcx, Provenance>,
clk_id_op: &OpTy<'tcx, Provenance>
) -> InterpResult<'tcx, Scalar<Provenance>>
fn pthread_condattr_destroy(
&mut self,
attr_op: &OpTy<'tcx, Provenance>
) -> InterpResult<'tcx, i32>
fn pthread_cond_init(
&mut self,
cond_op: &OpTy<'tcx, Provenance>,
attr_op: &OpTy<'tcx, Provenance>
) -> InterpResult<'tcx, i32>
fn pthread_cond_signal(
&mut self,
cond_op: &OpTy<'tcx, Provenance>
) -> InterpResult<'tcx, i32>
fn pthread_cond_broadcast(
&mut self,
cond_op: &OpTy<'tcx, Provenance>
) -> InterpResult<'tcx, i32>
fn pthread_cond_wait(
&mut self,
cond_op: &OpTy<'tcx, Provenance>,
mutex_op: &OpTy<'tcx, Provenance>
) -> InterpResult<'tcx, i32>
fn pthread_cond_timedwait(
&mut self,
cond_op: &OpTy<'tcx, Provenance>,
mutex_op: &OpTy<'tcx, Provenance>,
abstime_op: &OpTy<'tcx, Provenance>,
dest: &PlaceTy<'tcx, Provenance>
) -> InterpResult<'tcx>
fn pthread_cond_destroy(
&mut self,
cond_op: &OpTy<'tcx, Provenance>
) -> InterpResult<'tcx, i32>
source§impl<'mir, 'tcx> EvalContextExt<'mir, 'tcx> for MiriInterpCx<'mir, 'tcx>
impl<'mir, 'tcx> EvalContextExt<'mir, 'tcx> for MiriInterpCx<'mir, 'tcx>
fn pthread_create(
&mut self,
thread: &OpTy<'tcx, Provenance>,
_attr: &OpTy<'tcx, Provenance>,
start_routine: &OpTy<'tcx, Provenance>,
arg: &OpTy<'tcx, Provenance>
) -> InterpResult<'tcx, i32>
fn pthread_join(
&mut self,
thread: &OpTy<'tcx, Provenance>,
retval: &OpTy<'tcx, Provenance>
) -> InterpResult<'tcx, i32>
fn pthread_detach(
&mut self,
thread: &OpTy<'tcx, Provenance>
) -> InterpResult<'tcx, i32>
fn pthread_self(&mut self) -> InterpResult<'tcx, Scalar<Provenance>>
source§fn pthread_setname_np(
&mut self,
thread: Scalar<Provenance>,
name: Scalar<Provenance>,
max_name_len: usize
) -> InterpResult<'tcx, Scalar<Provenance>>
fn pthread_setname_np(
&mut self,
thread: Scalar<Provenance>,
name: Scalar<Provenance>,
max_name_len: usize
) -> InterpResult<'tcx, Scalar<Provenance>>
Set the name of the current thread.
max_name_len
is the maximal length of the name
including the null terminator. Read morefn pthread_getname_np(
&mut self,
thread: Scalar<Provenance>,
name_out: Scalar<Provenance>,
len: Scalar<Provenance>
) -> InterpResult<'tcx, Scalar<Provenance>>
fn sched_yield(&mut self) -> InterpResult<'tcx, i32>
source§impl<'mir, 'tcx: 'mir> EvalContextExt<'mir, 'tcx> for MiriInterpCx<'mir, 'tcx>
impl<'mir, 'tcx: 'mir> EvalContextExt<'mir, 'tcx> for MiriInterpCx<'mir, 'tcx>
Evaluation context extensions.
source§fn read_scalar_atomic(
&self,
place: &MPlaceTy<'tcx, Provenance>,
atomic: AtomicReadOrd
) -> InterpResult<'tcx, Scalar<Provenance>>
fn read_scalar_atomic(
&self,
place: &MPlaceTy<'tcx, Provenance>,
atomic: AtomicReadOrd
) -> InterpResult<'tcx, Scalar<Provenance>>
Perform an atomic read operation at the memory location.
source§fn write_scalar_atomic(
&mut self,
val: Scalar<Provenance>,
dest: &MPlaceTy<'tcx, Provenance>,
atomic: AtomicWriteOrd
) -> InterpResult<'tcx>
fn write_scalar_atomic(
&mut self,
val: Scalar<Provenance>,
dest: &MPlaceTy<'tcx, Provenance>,
atomic: AtomicWriteOrd
) -> InterpResult<'tcx>
Perform an atomic write operation at the memory location.
source§fn atomic_op_immediate(
&mut self,
place: &MPlaceTy<'tcx, Provenance>,
rhs: &ImmTy<'tcx, Provenance>,
op: BinOp,
neg: bool,
atomic: AtomicRwOrd
) -> InterpResult<'tcx, ImmTy<'tcx, Provenance>>
fn atomic_op_immediate(
&mut self,
place: &MPlaceTy<'tcx, Provenance>,
rhs: &ImmTy<'tcx, Provenance>,
op: BinOp,
neg: bool,
atomic: AtomicRwOrd
) -> InterpResult<'tcx, ImmTy<'tcx, Provenance>>
Perform an atomic operation on a memory location.
source§fn atomic_exchange_scalar(
&mut self,
place: &MPlaceTy<'tcx, Provenance>,
new: Scalar<Provenance>,
atomic: AtomicRwOrd
) -> InterpResult<'tcx, Scalar<Provenance>>
fn atomic_exchange_scalar(
&mut self,
place: &MPlaceTy<'tcx, Provenance>,
new: Scalar<Provenance>,
atomic: AtomicRwOrd
) -> InterpResult<'tcx, Scalar<Provenance>>
Perform an atomic exchange with a memory place and a new
scalar value, the old value is returned. Read more
source§fn atomic_min_max_scalar(
&mut self,
place: &MPlaceTy<'tcx, Provenance>,
rhs: ImmTy<'tcx, Provenance>,
min: bool,
atomic: AtomicRwOrd
) -> InterpResult<'tcx, ImmTy<'tcx, Provenance>>
fn atomic_min_max_scalar(
&mut self,
place: &MPlaceTy<'tcx, Provenance>,
rhs: ImmTy<'tcx, Provenance>,
min: bool,
atomic: AtomicRwOrd
) -> InterpResult<'tcx, ImmTy<'tcx, Provenance>>
Perform an conditional atomic exchange with a memory place and a new
scalar value, the old value is returned. Read more
source§fn atomic_compare_exchange_scalar(
&mut self,
place: &MPlaceTy<'tcx, Provenance>,
expect_old: &ImmTy<'tcx, Provenance>,
new: Scalar<Provenance>,
success: AtomicRwOrd,
fail: AtomicReadOrd,
can_fail_spuriously: bool
) -> InterpResult<'tcx, Immediate<Provenance>>
fn atomic_compare_exchange_scalar(
&mut self,
place: &MPlaceTy<'tcx, Provenance>,
expect_old: &ImmTy<'tcx, Provenance>,
new: Scalar<Provenance>,
success: AtomicRwOrd,
fail: AtomicReadOrd,
can_fail_spuriously: bool
) -> InterpResult<'tcx, Immediate<Provenance>>
Perform an atomic compare and exchange at a given memory location.
On success an atomic RMW operation is performed and on failure
only an atomic read occurs. If
can_fail_spuriously
is true,
then we treat it as a “compare_exchange_weak” operation, and
some portion of the time fail even when the values are actually
identical. Read moresource§fn atomic_fence(&mut self, atomic: AtomicFenceOrd) -> InterpResult<'tcx>
fn atomic_fence(&mut self, atomic: AtomicFenceOrd) -> InterpResult<'tcx>
Update the data-race detector for an atomic fence on the current thread.
source§fn allow_data_races_all_threads_done(&mut self)
fn allow_data_races_all_threads_done(&mut self)
After all threads are done running, this allows data races to occur for subsequent
‘administrative’ machine accesses (that logically happen outside of the Abstract Machine). Read more
source§impl<'mir, 'tcx: 'mir> EvalContextExt<'mir, 'tcx> for MiriInterpCx<'mir, 'tcx>
impl<'mir, 'tcx: 'mir> EvalContextExt<'mir, 'tcx> for MiriInterpCx<'mir, 'tcx>
fn call_dlsym(
&mut self,
dlsym: Dlsym,
args: &[OpTy<'tcx, Provenance>],
dest: &PlaceTy<'tcx, Provenance>,
ret: Option<BasicBlock>
) -> InterpResult<'tcx>
source§impl<'mir, 'tcx: 'mir> EvalContextExt<'mir, 'tcx> for MiriInterpCx<'mir, 'tcx>
impl<'mir, 'tcx: 'mir> EvalContextExt<'mir, 'tcx> for MiriInterpCx<'mir, 'tcx>
fn emulate_foreign_item_by_name(
&mut self,
link_name: Symbol,
_abi: Abi,
_args: &[OpTy<'tcx, Provenance>],
_dest: &PlaceTy<'tcx, Provenance>
) -> InterpResult<'tcx, EmulateByNameResult<'mir, 'tcx>>
source§impl<'mir, 'tcx: 'mir> EvalContextExt<'mir, 'tcx> for MiriInterpCx<'mir, 'tcx>
impl<'mir, 'tcx: 'mir> EvalContextExt<'mir, 'tcx> for MiriInterpCx<'mir, 'tcx>
fn call_dlsym(
&mut self,
dlsym: Dlsym,
_args: &[OpTy<'tcx, Provenance>],
_dest: &PlaceTy<'tcx, Provenance>,
ret: Option<BasicBlock>
) -> InterpResult<'tcx>
source§impl<'mir, 'tcx: 'mir> EvalContextExt<'mir, 'tcx> for MiriInterpCx<'mir, 'tcx>
impl<'mir, 'tcx: 'mir> EvalContextExt<'mir, 'tcx> for MiriInterpCx<'mir, 'tcx>
fn emulate_foreign_item_by_name(
&mut self,
link_name: Symbol,
abi: Abi,
args: &[OpTy<'tcx, Provenance>],
dest: &PlaceTy<'tcx, Provenance>
) -> InterpResult<'tcx, EmulateByNameResult<'mir, 'tcx>>
source§impl<'mir, 'tcx: 'mir> EvalContextExt<'mir, 'tcx> for MiriInterpCx<'mir, 'tcx>
impl<'mir, 'tcx: 'mir> EvalContextExt<'mir, 'tcx> for MiriInterpCx<'mir, 'tcx>
fn call_dlsym(
&mut self,
dlsym: Dlsym,
_args: &[OpTy<'tcx, Provenance>],
_dest: &PlaceTy<'tcx, Provenance>,
ret: Option<BasicBlock>
) -> InterpResult<'tcx>
source§impl<'mir, 'tcx: 'mir> EvalContextExt<'mir, 'tcx> for MiriInterpCx<'mir, 'tcx>
impl<'mir, 'tcx: 'mir> EvalContextExt<'mir, 'tcx> for MiriInterpCx<'mir, 'tcx>
fn emulate_foreign_item_by_name(
&mut self,
link_name: Symbol,
abi: Abi,
args: &[OpTy<'tcx, Provenance>],
dest: &PlaceTy<'tcx, Provenance>
) -> InterpResult<'tcx, EmulateByNameResult<'mir, 'tcx>>
source§impl<'mir, 'tcx: 'mir> EvalContextExt<'mir, 'tcx> for MiriInterpCx<'mir, 'tcx>
impl<'mir, 'tcx: 'mir> EvalContextExt<'mir, 'tcx> for MiriInterpCx<'mir, 'tcx>
fn call_dlsym(
&mut self,
dlsym: Dlsym,
args: &[OpTy<'tcx, Provenance>],
dest: &PlaceTy<'tcx, Provenance>,
ret: Option<BasicBlock>
) -> InterpResult<'tcx>
source§impl<'mir, 'tcx: 'mir> EvalContextExt<'mir, 'tcx> for MiriInterpCx<'mir, 'tcx>
impl<'mir, 'tcx: 'mir> EvalContextExt<'mir, 'tcx> for MiriInterpCx<'mir, 'tcx>
fn emulate_foreign_item_by_name(
&mut self,
link_name: Symbol,
abi: Abi,
args: &[OpTy<'tcx, Provenance>],
dest: &PlaceTy<'tcx, Provenance>
) -> InterpResult<'tcx, EmulateByNameResult<'mir, 'tcx>>
source§impl<'mir, 'tcx: 'mir> EvalContextExt<'mir, 'tcx> for MiriInterpCx<'mir, 'tcx>
impl<'mir, 'tcx: 'mir> EvalContextExt<'mir, 'tcx> for MiriInterpCx<'mir, 'tcx>
fn call_dlsym(
&mut self,
dlsym: Dlsym,
abi: Abi,
args: &[OpTy<'tcx, Provenance>],
dest: &PlaceTy<'tcx, Provenance>,
ret: Option<BasicBlock>
) -> InterpResult<'tcx>
source§impl<'mir, 'tcx: 'mir> EvalContextExt<'mir, 'tcx> for MiriInterpCx<'mir, 'tcx>
impl<'mir, 'tcx: 'mir> EvalContextExt<'mir, 'tcx> for MiriInterpCx<'mir, 'tcx>
fn emulate_foreign_item_by_name(
&mut self,
link_name: Symbol,
abi: Abi,
args: &[OpTy<'tcx, Provenance>],
dest: &PlaceTy<'tcx, Provenance>
) -> InterpResult<'tcx, EmulateByNameResult<'mir, 'tcx>>
source§impl<'mir, 'tcx: 'mir> EvalContextExt<'mir, 'tcx> for MiriInterpCx<'mir, 'tcx>
impl<'mir, 'tcx: 'mir> EvalContextExt<'mir, 'tcx> for MiriInterpCx<'mir, 'tcx>
fn mutex_get_or_create_id(
&mut self,
lock_op: &OpTy<'tcx, Provenance>,
offset: u64
) -> InterpResult<'tcx, MutexId>
fn rwlock_get_or_create_id(
&mut self,
lock_op: &OpTy<'tcx, Provenance>,
offset: u64
) -> InterpResult<'tcx, RwLockId>
fn condvar_get_or_create_id(
&mut self,
lock_op: &OpTy<'tcx, Provenance>,
offset: u64
) -> InterpResult<'tcx, CondvarId>
source§fn mutex_get_or_create<F>(&mut self, existing: F) -> InterpResult<'tcx, MutexId>where
F: FnOnce(&mut MiriInterpCx<'mir, 'tcx>, MutexId) -> InterpResult<'tcx, Option<MutexId>>,
fn mutex_get_or_create<F>(&mut self, existing: F) -> InterpResult<'tcx, MutexId>where
F: FnOnce(&mut MiriInterpCx<'mir, 'tcx>, MutexId) -> InterpResult<'tcx, Option<MutexId>>,
Provides the closure with the next MutexId. Creates that mutex if the closure returns None,
otherwise returns the value from the closure Read more
source§fn mutex_get_owner(&mut self, id: MutexId) -> ThreadId
fn mutex_get_owner(&mut self, id: MutexId) -> ThreadId
Get the id of the thread that currently owns this lock.
source§fn mutex_is_locked(&self, id: MutexId) -> bool
fn mutex_is_locked(&self, id: MutexId) -> bool
Check if locked.
source§fn mutex_lock(&mut self, id: MutexId, thread: ThreadId)
fn mutex_lock(&mut self, id: MutexId, thread: ThreadId)
Lock by setting the mutex owner and increasing the lock count.
source§fn mutex_unlock(&mut self, id: MutexId, expected_owner: ThreadId) -> Option<usize>
fn mutex_unlock(&mut self, id: MutexId, expected_owner: ThreadId) -> Option<usize>
Try unlocking by decreasing the lock count and returning the old lock
count. If the lock count reaches 0, release the lock and potentially
give to a new owner. If the lock was not locked by
expected_owner
,
return None
. Read moresource§fn mutex_enqueue_and_block(&mut self, id: MutexId, thread: ThreadId)
fn mutex_enqueue_and_block(&mut self, id: MutexId, thread: ThreadId)
Put the thread into the queue waiting for the mutex.
source§fn rwlock_get_or_create<F>(
&mut self,
existing: F
) -> InterpResult<'tcx, RwLockId>where
F: FnOnce(&mut MiriInterpCx<'mir, 'tcx>, RwLockId) -> InterpResult<'tcx, Option<RwLockId>>,
fn rwlock_get_or_create<F>(
&mut self,
existing: F
) -> InterpResult<'tcx, RwLockId>where
F: FnOnce(&mut MiriInterpCx<'mir, 'tcx>, RwLockId) -> InterpResult<'tcx, Option<RwLockId>>,
Provides the closure with the next RwLockId. Creates that RwLock if the closure returns None,
otherwise returns the value from the closure Read more
source§fn rwlock_is_locked(&self, id: RwLockId) -> bool
fn rwlock_is_locked(&self, id: RwLockId) -> bool
Check if locked.
source§fn rwlock_is_write_locked(&self, id: RwLockId) -> bool
fn rwlock_is_write_locked(&self, id: RwLockId) -> bool
Check if write locked.
source§fn rwlock_reader_lock(&mut self, id: RwLockId, reader: ThreadId)
fn rwlock_reader_lock(&mut self, id: RwLockId, reader: ThreadId)
Read-lock the lock by adding the
reader
the list of threads that own
this lock. Read moresource§fn rwlock_reader_unlock(&mut self, id: RwLockId, reader: ThreadId) -> bool
fn rwlock_reader_unlock(&mut self, id: RwLockId, reader: ThreadId) -> bool
Try read-unlock the lock for
reader
and potentially give the lock to a new owner.
Returns true
if succeeded, false
if this reader
did not hold the lock. Read moresource§fn rwlock_enqueue_and_block_reader(&mut self, id: RwLockId, reader: ThreadId)
fn rwlock_enqueue_and_block_reader(&mut self, id: RwLockId, reader: ThreadId)
Put the reader in the queue waiting for the lock and block it.
source§fn rwlock_writer_lock(&mut self, id: RwLockId, writer: ThreadId)
fn rwlock_writer_lock(&mut self, id: RwLockId, writer: ThreadId)
Lock by setting the writer that owns the lock.
source§fn rwlock_writer_unlock(
&mut self,
id: RwLockId,
expected_writer: ThreadId
) -> bool
fn rwlock_writer_unlock(
&mut self,
id: RwLockId,
expected_writer: ThreadId
) -> bool
Try to unlock by removing the writer.
source§fn rwlock_enqueue_and_block_writer(&mut self, id: RwLockId, writer: ThreadId)
fn rwlock_enqueue_and_block_writer(&mut self, id: RwLockId, writer: ThreadId)
Put the writer in the queue waiting for the lock.
source§fn condvar_get_or_create<F>(
&mut self,
existing: F
) -> InterpResult<'tcx, CondvarId>where
F: FnOnce(&mut MiriInterpCx<'mir, 'tcx>, CondvarId) -> InterpResult<'tcx, Option<CondvarId>>,
fn condvar_get_or_create<F>(
&mut self,
existing: F
) -> InterpResult<'tcx, CondvarId>where
F: FnOnce(&mut MiriInterpCx<'mir, 'tcx>, CondvarId) -> InterpResult<'tcx, Option<CondvarId>>,
Provides the closure with the next CondvarId. Creates that Condvar if the closure returns None,
otherwise returns the value from the closure Read more
source§fn condvar_is_awaited(&mut self, id: CondvarId) -> bool
fn condvar_is_awaited(&mut self, id: CondvarId) -> bool
Is the conditional variable awaited?
source§fn condvar_wait(&mut self, id: CondvarId, thread: ThreadId, lock: CondvarLock)
fn condvar_wait(&mut self, id: CondvarId, thread: ThreadId, lock: CondvarLock)
Mark that the thread is waiting on the conditional variable.
source§fn condvar_signal(&mut self, id: CondvarId) -> Option<(ThreadId, CondvarLock)>
fn condvar_signal(&mut self, id: CondvarId) -> Option<(ThreadId, CondvarLock)>
Wake up some thread (if there is any) sleeping on the conditional
variable. Read more
source§fn condvar_remove_waiter(&mut self, id: CondvarId, thread: ThreadId)
fn condvar_remove_waiter(&mut self, id: CondvarId, thread: ThreadId)
Remove the thread from the queue of threads waiting on this conditional variable.
fn futex_wait(&mut self, addr: u64, thread: ThreadId, bitset: u32)
fn futex_wake(&mut self, addr: u64, bitset: u32) -> Option<ThreadId>
fn futex_remove_waiter(&mut self, addr: u64, thread: ThreadId)
source§impl<'mir, 'tcx> EvalContextExt<'mir, 'tcx> for MiriInterpCx<'mir, 'tcx>
impl<'mir, 'tcx> EvalContextExt<'mir, 'tcx> for MiriInterpCx<'mir, 'tcx>
fn invalid_handle(&mut self, function_name: &str) -> InterpResult<'tcx, !>
fn CloseHandle(
&mut self,
handle_op: &OpTy<'tcx, Provenance>
) -> InterpResult<'tcx>
source§impl<'mir, 'tcx> EvalContextExt<'mir, 'tcx> for MiriInterpCx<'mir, 'tcx>
impl<'mir, 'tcx> EvalContextExt<'mir, 'tcx> for MiriInterpCx<'mir, 'tcx>
fn AcquireSRWLockExclusive(
&mut self,
lock_op: &OpTy<'tcx, Provenance>
) -> InterpResult<'tcx>
fn TryAcquireSRWLockExclusive(
&mut self,
lock_op: &OpTy<'tcx, Provenance>
) -> InterpResult<'tcx, Scalar<Provenance>>
fn ReleaseSRWLockExclusive(
&mut self,
lock_op: &OpTy<'tcx, Provenance>
) -> InterpResult<'tcx>
fn InitOnceBeginInitialize(
&mut self,
init_once_op: &OpTy<'tcx, Provenance>,
flags_op: &OpTy<'tcx, Provenance>,
pending_op: &OpTy<'tcx, Provenance>,
context_op: &OpTy<'tcx, Provenance>
) -> InterpResult<'tcx, Scalar<Provenance>>
fn InitOnceComplete(
&mut self,
init_once_op: &OpTy<'tcx, Provenance>,
flags_op: &OpTy<'tcx, Provenance>,
context_op: &OpTy<'tcx, Provenance>
) -> InterpResult<'tcx, Scalar<Provenance>>
fn WaitOnAddress(
&mut self,
ptr_op: &OpTy<'tcx, Provenance>,
compare_op: &OpTy<'tcx, Provenance>,
size_op: &OpTy<'tcx, Provenance>,
timeout_op: &OpTy<'tcx, Provenance>,
dest: &PlaceTy<'tcx, Provenance>
) -> InterpResult<'tcx>
fn WakeByAddressSingle(
&mut self,
ptr_op: &OpTy<'tcx, Provenance>
) -> InterpResult<'tcx>
fn SleepConditionVariableSRW(
&mut self,
condvar_op: &OpTy<'tcx, Provenance>,
lock_op: &OpTy<'tcx, Provenance>,
timeout_op: &OpTy<'tcx, Provenance>,
flags_op: &OpTy<'tcx, Provenance>,
dest: &PlaceTy<'tcx, Provenance>
) -> InterpResult<'tcx, Scalar<Provenance>>
fn WakeConditionVariable(
&mut self,
condvar_op: &OpTy<'tcx, Provenance>
) -> InterpResult<'tcx>
fn WakeAllConditionVariable(
&mut self,
condvar_op: &OpTy<'tcx, Provenance>
) -> InterpResult<'tcx>
source§impl<'mir, 'tcx: 'mir> EvalContextExt<'mir, 'tcx> for MiriInterpCx<'mir, 'tcx>
impl<'mir, 'tcx: 'mir> EvalContextExt<'mir, 'tcx> for MiriInterpCx<'mir, 'tcx>
fn CreateThread(
&mut self,
security_op: &OpTy<'tcx, Provenance>,
stacksize_op: &OpTy<'tcx, Provenance>,
start_op: &OpTy<'tcx, Provenance>,
arg_op: &OpTy<'tcx, Provenance>,
flags_op: &OpTy<'tcx, Provenance>,
thread_op: &OpTy<'tcx, Provenance>
) -> InterpResult<'tcx, ThreadId>
fn WaitForSingleObject(
&mut self,
handle_op: &OpTy<'tcx, Provenance>,
timeout_op: &OpTy<'tcx, Provenance>
) -> InterpResult<'tcx, u32>
source§impl<'mir, 'tcx: 'mir> EvalContextExt<'mir, 'tcx> for MiriInterpCx<'mir, 'tcx>
impl<'mir, 'tcx: 'mir> EvalContextExt<'mir, 'tcx> for MiriInterpCx<'mir, 'tcx>
fn call_dlsym(
&mut self,
dlsym: Dlsym,
abi: Abi,
args: &[OpTy<'tcx, Provenance>],
dest: &PlaceTy<'tcx, Provenance>,
ret: Option<BasicBlock>
) -> InterpResult<'tcx>
source§impl<'mir, 'tcx: 'mir> EvalContextExt<'mir, 'tcx> for MiriInterpCx<'mir, 'tcx>
impl<'mir, 'tcx: 'mir> EvalContextExt<'mir, 'tcx> for MiriInterpCx<'mir, 'tcx>
fn getenv(
&mut self,
name_op: &OpTy<'tcx, Provenance>
) -> InterpResult<'tcx, Pointer<Option<Provenance>>>
fn GetEnvironmentVariableW(
&mut self,
name_op: &OpTy<'tcx, Provenance>,
buf_op: &OpTy<'tcx, Provenance>,
size_op: &OpTy<'tcx, Provenance>
) -> InterpResult<'tcx, Scalar<Provenance>>
fn GetEnvironmentStringsW(
&mut self
) -> InterpResult<'tcx, Pointer<Option<Provenance>>>
fn FreeEnvironmentStringsW(
&mut self,
env_block_op: &OpTy<'tcx, Provenance>
) -> InterpResult<'tcx, Scalar<Provenance>>
fn setenv(
&mut self,
name_op: &OpTy<'tcx, Provenance>,
value_op: &OpTy<'tcx, Provenance>
) -> InterpResult<'tcx, i32>
fn SetEnvironmentVariableW(
&mut self,
name_op: &OpTy<'tcx, Provenance>,
value_op: &OpTy<'tcx, Provenance>
) -> InterpResult<'tcx, Scalar<Provenance>>
fn unsetenv(
&mut self,
name_op: &OpTy<'tcx, Provenance>
) -> InterpResult<'tcx, i32>
fn getcwd(
&mut self,
buf_op: &OpTy<'tcx, Provenance>,
size_op: &OpTy<'tcx, Provenance>
) -> InterpResult<'tcx, Pointer<Option<Provenance>>>
fn GetCurrentDirectoryW(
&mut self,
size_op: &OpTy<'tcx, Provenance>,
buf_op: &OpTy<'tcx, Provenance>
) -> InterpResult<'tcx, Scalar<Provenance>>
fn chdir(&mut self, path_op: &OpTy<'tcx, Provenance>) -> InterpResult<'tcx, i32>
fn SetCurrentDirectoryW(
&mut self,
path_op: &OpTy<'tcx, Provenance>
) -> InterpResult<'tcx, Scalar<Provenance>>
source§fn update_environ(&mut self) -> InterpResult<'tcx>
fn update_environ(&mut self) -> InterpResult<'tcx>
Updates the
environ
static.
The first time it gets called, also initializes extra.environ
. Read morefn getpid(&mut self) -> InterpResult<'tcx, i32>
fn GetCurrentProcessId(&mut self) -> InterpResult<'tcx, u32>
source§impl<'mir, 'tcx: 'mir> EvalContextExt<'mir, 'tcx> for MiriInterpCx<'mir, 'tcx>
impl<'mir, 'tcx: 'mir> EvalContextExt<'mir, 'tcx> for MiriInterpCx<'mir, 'tcx>
source§fn read_os_str_from_c_str<'a>(
&'a self,
ptr: Pointer<Option<Provenance>>
) -> InterpResult<'tcx, &'a OsStr>where
'tcx: 'a,
'mir: 'a,
fn read_os_str_from_c_str<'a>(
&'a self,
ptr: Pointer<Option<Provenance>>
) -> InterpResult<'tcx, &'a OsStr>where
'tcx: 'a,
'mir: 'a,
Helper function to read an OsString from a null-terminated sequence of bytes, which is what
the Unix APIs usually handle. Read more
source§fn read_os_str_from_wide_str<'a>(
&'a self,
ptr: Pointer<Option<Provenance>>
) -> InterpResult<'tcx, OsString>where
'tcx: 'a,
'mir: 'a,
fn read_os_str_from_wide_str<'a>(
&'a self,
ptr: Pointer<Option<Provenance>>
) -> InterpResult<'tcx, OsString>where
'tcx: 'a,
'mir: 'a,
Helper function to read an OsString from a 0x0000-terminated sequence of u16,
which is what the Windows APIs usually handle. Read more
source§fn write_os_str_to_c_str(
&mut self,
os_str: &OsStr,
ptr: Pointer<Option<Provenance>>,
size: u64
) -> InterpResult<'tcx, (bool, u64)>
fn write_os_str_to_c_str(
&mut self,
os_str: &OsStr,
ptr: Pointer<Option<Provenance>>,
size: u64
) -> InterpResult<'tcx, (bool, u64)>
Helper function to write an OsStr as a null-terminated sequence of bytes, which is what
the Unix APIs usually handle. This function returns
Ok((false, length))
without trying
to write if size
is not large enough to fit the contents of os_string
plus a null
terminator. It returns Ok((true, length))
if the writing process was successful. The
string length returned does include the null terminator. Read moresource§fn write_os_str_to_wide_str(
&mut self,
os_str: &OsStr,
ptr: Pointer<Option<Provenance>>,
size: u64
) -> InterpResult<'tcx, (bool, u64)>
fn write_os_str_to_wide_str(
&mut self,
os_str: &OsStr,
ptr: Pointer<Option<Provenance>>,
size: u64
) -> InterpResult<'tcx, (bool, u64)>
Helper function to write an OsStr as a 0x0000-terminated u16-sequence, which is what
the Windows APIs usually handle. This function returns
Ok((false, length))
without trying
to write if size
is not large enough to fit the contents of os_string
plus a null
terminator. It returns Ok((true, length))
if the writing process was successful. The
string length returned does include the null terminator. Length is measured in units of
u16.
Read moresource§fn alloc_os_str_as_c_str(
&mut self,
os_str: &OsStr,
memkind: MemoryKind<MiriMemoryKind>
) -> InterpResult<'tcx, Pointer<Option<Provenance>>>
fn alloc_os_str_as_c_str(
&mut self,
os_str: &OsStr,
memkind: MemoryKind<MiriMemoryKind>
) -> InterpResult<'tcx, Pointer<Option<Provenance>>>
Allocate enough memory to store the given
OsStr
as a null-terminated sequence of bytes.source§fn alloc_os_str_as_wide_str(
&mut self,
os_str: &OsStr,
memkind: MemoryKind<MiriMemoryKind>
) -> InterpResult<'tcx, Pointer<Option<Provenance>>>
fn alloc_os_str_as_wide_str(
&mut self,
os_str: &OsStr,
memkind: MemoryKind<MiriMemoryKind>
) -> InterpResult<'tcx, Pointer<Option<Provenance>>>
Allocate enough memory to store the given
OsStr
as a null-terminated sequence of u16
.source§fn read_path_from_c_str<'a>(
&'a self,
ptr: Pointer<Option<Provenance>>
) -> InterpResult<'tcx, Cow<'a, Path>>where
'tcx: 'a,
'mir: 'a,
fn read_path_from_c_str<'a>(
&'a self,
ptr: Pointer<Option<Provenance>>
) -> InterpResult<'tcx, Cow<'a, Path>>where
'tcx: 'a,
'mir: 'a,
Read a null-terminated sequence of bytes, and perform path separator conversion if needed.
source§fn read_path_from_wide_str(
&self,
ptr: Pointer<Option<Provenance>>
) -> InterpResult<'tcx, PathBuf>
fn read_path_from_wide_str(
&self,
ptr: Pointer<Option<Provenance>>
) -> InterpResult<'tcx, PathBuf>
Read a null-terminated sequence of
u16
s, and perform path separator conversion if needed.source§fn write_path_to_c_str(
&mut self,
path: &Path,
ptr: Pointer<Option<Provenance>>,
size: u64
) -> InterpResult<'tcx, (bool, u64)>
fn write_path_to_c_str(
&mut self,
path: &Path,
ptr: Pointer<Option<Provenance>>,
size: u64
) -> InterpResult<'tcx, (bool, u64)>
Write a Path to the machine memory (as a null-terminated sequence of bytes),
adjusting path separators if needed. Read more
source§fn write_path_to_wide_str(
&mut self,
path: &Path,
ptr: Pointer<Option<Provenance>>,
size: u64
) -> InterpResult<'tcx, (bool, u64)>
fn write_path_to_wide_str(
&mut self,
path: &Path,
ptr: Pointer<Option<Provenance>>,
size: u64
) -> InterpResult<'tcx, (bool, u64)>
Write a Path to the machine memory (as a null-terminated sequence of
u16
s),
adjusting path separators if needed. Read moresource§fn alloc_path_as_c_str(
&mut self,
path: &Path,
memkind: MemoryKind<MiriMemoryKind>
) -> InterpResult<'tcx, Pointer<Option<Provenance>>>
fn alloc_path_as_c_str(
&mut self,
path: &Path,
memkind: MemoryKind<MiriMemoryKind>
) -> InterpResult<'tcx, Pointer<Option<Provenance>>>
Allocate enough memory to store a Path as a null-terminated sequence of bytes,
adjusting path separators if needed. Read more
fn convert_path_separator<'a>(
&self,
os_str: Cow<'a, OsStr>,
direction: PathConversion
) -> Cow<'a, OsStr>
source§impl<'mir, 'tcx: 'mir> EvalContextExt<'mir, 'tcx> for MiriInterpCx<'mir, 'tcx>
impl<'mir, 'tcx: 'mir> EvalContextExt<'mir, 'tcx> for MiriInterpCx<'mir, 'tcx>
source§fn handle_miri_start_panic(
&mut self,
abi: Abi,
link_name: Symbol,
args: &[OpTy<'tcx, Provenance>],
unwind: StackPopUnwind
) -> InterpResult<'tcx>
fn handle_miri_start_panic(
&mut self,
abi: Abi,
link_name: Symbol,
args: &[OpTy<'tcx, Provenance>],
unwind: StackPopUnwind
) -> InterpResult<'tcx>
Handles the special
miri_start_panic
intrinsic, which is called
by libpanic_unwind to delegate the actual unwinding process to Miri. Read moresource§fn handle_try(
&mut self,
args: &[OpTy<'tcx, Provenance>],
dest: &PlaceTy<'tcx, Provenance>,
ret: BasicBlock
) -> InterpResult<'tcx>
fn handle_try(
&mut self,
args: &[OpTy<'tcx, Provenance>],
dest: &PlaceTy<'tcx, Provenance>,
ret: BasicBlock
) -> InterpResult<'tcx>
Handles the
try
intrinsic, the underlying implementation of std::panicking::try
.fn handle_stack_pop_unwind(
&mut self,
extra: FrameExtra<'tcx>,
unwinding: bool
) -> InterpResult<'tcx, StackPopJump>
source§fn start_panic(&mut self, msg: &str, unwind: StackPopUnwind) -> InterpResult<'tcx>
fn start_panic(&mut self, msg: &str, unwind: StackPopUnwind) -> InterpResult<'tcx>
Start a panic in the interpreter with the given message as payload.
fn assert_panic(
&mut self,
msg: &AssertMessage<'tcx>,
unwind: Option<BasicBlock>
) -> InterpResult<'tcx>
source§impl<'mir, 'tcx: 'mir> EvalContextExt<'mir, 'tcx> for MiriInterpCx<'mir, 'tcx>
impl<'mir, 'tcx: 'mir> EvalContextExt<'mir, 'tcx> for MiriInterpCx<'mir, 'tcx>
fn clock_gettime(
&mut self,
clk_id_op: &OpTy<'tcx, Provenance>,
tp_op: &OpTy<'tcx, Provenance>
) -> InterpResult<'tcx, Scalar<Provenance>>
fn gettimeofday(
&mut self,
tv_op: &OpTy<'tcx, Provenance>,
tz_op: &OpTy<'tcx, Provenance>
) -> InterpResult<'tcx, i32>
fn GetSystemTimeAsFileTime(
&mut self,
LPFILETIME_op: &OpTy<'tcx, Provenance>
) -> InterpResult<'tcx>
fn QueryPerformanceCounter(
&mut self,
lpPerformanceCount_op: &OpTy<'tcx, Provenance>
) -> InterpResult<'tcx, Scalar<Provenance>>
fn QueryPerformanceFrequency(
&mut self,
lpFrequency_op: &OpTy<'tcx, Provenance>
) -> InterpResult<'tcx, Scalar<Provenance>>
fn mach_absolute_time(&self) -> InterpResult<'tcx, Scalar<Provenance>>
fn mach_timebase_info(
&mut self,
info_op: &OpTy<'tcx, Provenance>
) -> InterpResult<'tcx, Scalar<Provenance>>
fn nanosleep(
&mut self,
req_op: &OpTy<'tcx, Provenance>,
_rem: &OpTy<'tcx, Provenance>
) -> InterpResult<'tcx, i32>
fn Sleep(&mut self, timeout: &OpTy<'tcx, Provenance>) -> InterpResult<'tcx>
source§impl<'mir, 'tcx: 'mir> EvalContextExt<'mir, 'tcx> for MiriInterpCx<'mir, 'tcx>
impl<'mir, 'tcx: 'mir> EvalContextExt<'mir, 'tcx> for MiriInterpCx<'mir, 'tcx>
fn find_mir_or_eval_fn(
&mut self,
instance: Instance<'tcx>,
abi: Abi,
args: &[OpTy<'tcx, Provenance>],
dest: &PlaceTy<'tcx, Provenance>,
ret: Option<BasicBlock>,
unwind: StackPopUnwind
) -> InterpResult<'tcx, Option<(&'mir Body<'tcx>, Instance<'tcx>)>>
source§fn align_offset(
&mut self,
ptr_op: &OpTy<'tcx, Provenance>,
align_op: &OpTy<'tcx, Provenance>,
dest: &PlaceTy<'tcx, Provenance>,
ret: Option<BasicBlock>,
unwind: StackPopUnwind
) -> InterpResult<'tcx, bool>
fn align_offset(
&mut self,
ptr_op: &OpTy<'tcx, Provenance>,
align_op: &OpTy<'tcx, Provenance>,
dest: &PlaceTy<'tcx, Provenance>,
ret: Option<BasicBlock>,
unwind: StackPopUnwind
) -> InterpResult<'tcx, bool>
Returns
true
if the computation was performed, and false
if we should just evaluate
the actual MIR of align_offset
. Read moresource§impl<'mir, 'tcx: 'mir> EvalContextExt<'mir, 'tcx> for MiriInterpCx<'mir, 'tcx>
impl<'mir, 'tcx: 'mir> EvalContextExt<'mir, 'tcx> for MiriInterpCx<'mir, 'tcx>
source§impl<'mir, 'tcx: 'mir> EvalContextExt<'mir, 'tcx> for MiriInterpCx<'mir, 'tcx>
impl<'mir, 'tcx: 'mir> EvalContextExt<'mir, 'tcx> for MiriInterpCx<'mir, 'tcx>
fn init_once_get_or_create_id(
&mut self,
lock_op: &OpTy<'tcx, Provenance>,
offset: u64
) -> InterpResult<'tcx, InitOnceId>
source§fn init_once_get_or_create<F>(
&mut self,
existing: F
) -> InterpResult<'tcx, InitOnceId>where
F: FnOnce(&mut MiriInterpCx<'mir, 'tcx>, InitOnceId) -> InterpResult<'tcx, Option<InitOnceId>>,
fn init_once_get_or_create<F>(
&mut self,
existing: F
) -> InterpResult<'tcx, InitOnceId>where
F: FnOnce(&mut MiriInterpCx<'mir, 'tcx>, InitOnceId) -> InterpResult<'tcx, Option<InitOnceId>>,
Provides the closure with the next InitOnceId. Creates that InitOnce if the closure returns None,
otherwise returns the value from the closure. Read more
fn init_once_status(&mut self, id: InitOnceId) -> InitOnceStatus
source§fn init_once_enqueue_and_block(
&mut self,
id: InitOnceId,
thread: ThreadId,
callback: Box<dyn MachineCallback<'mir, 'tcx> + 'tcx>
)
fn init_once_enqueue_and_block(
&mut self,
id: InitOnceId,
thread: ThreadId,
callback: Box<dyn MachineCallback<'mir, 'tcx> + 'tcx>
)
Put the thread into the queue waiting for the initialization.
source§fn init_once_begin(&mut self, id: InitOnceId)
fn init_once_begin(&mut self, id: InitOnceId)
Begin initializing this InitOnce. Must only be called after checking that it is currently
uninitialized. Read more
fn init_once_complete(&mut self, id: InitOnceId) -> InterpResult<'tcx>
fn init_once_fail(&mut self, id: InitOnceId) -> InterpResult<'tcx>
source§fn init_once_observe_completed(&mut self, id: InitOnceId)
fn init_once_observe_completed(&mut self, id: InitOnceId)
Synchronize with the previous completion of an InitOnce.
Must only be called after checking that it is complete. Read more
source§impl<'mir, 'tcx: 'mir> EvalContextExt<'mir, 'tcx> for MiriInterpCx<'mir, 'tcx>
impl<'mir, 'tcx: 'mir> EvalContextExt<'mir, 'tcx> for MiriInterpCx<'mir, 'tcx>
source§fn get_or_create_thread_local_alloc(
&mut self,
def_id: DefId
) -> InterpResult<'tcx, Pointer<Provenance>>
fn get_or_create_thread_local_alloc(
&mut self,
def_id: DefId
) -> InterpResult<'tcx, Pointer<Provenance>>
Get a thread-specific allocation id for the given thread-local static.
If needed, allocate a new one. Read more
source§fn start_regular_thread(
&mut self,
thread: Option<MPlaceTy<'tcx, Provenance>>,
start_routine: Pointer<Option<Provenance>>,
start_abi: Abi,
func_arg: ImmTy<'tcx, Provenance>,
ret_layout: TyAndLayout<'tcx>
) -> InterpResult<'tcx, ThreadId>
fn start_regular_thread(
&mut self,
thread: Option<MPlaceTy<'tcx, Provenance>>,
start_routine: Pointer<Option<Provenance>>,
start_abi: Abi,
func_arg: ImmTy<'tcx, Provenance>,
ret_layout: TyAndLayout<'tcx>
) -> InterpResult<'tcx, ThreadId>
Start a regular (non-main) thread.
fn detach_thread(
&mut self,
thread_id: ThreadId,
allow_terminated_joined: bool
) -> InterpResult<'tcx>
fn join_thread(&mut self, joined_thread_id: ThreadId) -> InterpResult<'tcx>
fn join_thread_exclusive(
&mut self,
joined_thread_id: ThreadId
) -> InterpResult<'tcx>
fn set_active_thread(&mut self, thread_id: ThreadId) -> ThreadId
fn get_active_thread(&self) -> ThreadId
fn active_thread_mut(&mut self) -> &mut Thread<'mir, 'tcx>
fn active_thread_ref(&self) -> &Thread<'mir, 'tcx>
fn get_total_thread_count(&self) -> usize
fn have_all_terminated(&self) -> bool
fn enable_thread(&mut self, thread_id: ThreadId)
fn active_thread_stack(
&self
) -> &[Frame<'mir, 'tcx, Provenance, FrameExtra<'tcx>>]
fn active_thread_stack_mut(
&mut self
) -> &mut Vec<Frame<'mir, 'tcx, Provenance, FrameExtra<'tcx>>> ⓘ
source§fn set_thread_name(&mut self, thread: ThreadId, new_thread_name: Vec<u8>)
fn set_thread_name(&mut self, thread: ThreadId, new_thread_name: Vec<u8>)
Set the name of the current thread. The buffer must not include the null terminator.
fn set_thread_name_wide(&mut self, thread: ThreadId, new_thread_name: &[u16])
fn get_thread_name<'c>(&'c self, thread: ThreadId) -> &'c [u8] ⓘwhere
'mir: 'c,
fn block_thread(&mut self, thread: ThreadId)
fn unblock_thread(&mut self, thread: ThreadId)
fn yield_active_thread(&mut self)
fn maybe_preempt_active_thread(&mut self)
fn register_timeout_callback(
&mut self,
thread: ThreadId,
call_time: Time,
callback: Box<dyn MachineCallback<'mir, 'tcx> + 'tcx>
)
fn unregister_timeout_callback_if_exists(&mut self, thread: ThreadId)
source§fn run_threads(&mut self) -> InterpResult<'tcx, !>
fn run_threads(&mut self) -> InterpResult<'tcx, !>
Run the core interpreter loop. Returns only when an interrupt occurs (an error or program
termination). Read more
source§fn terminate_active_thread(&mut self) -> InterpResult<'tcx>
fn terminate_active_thread(&mut self) -> InterpResult<'tcx>
Handles thread termination of the active thread: wakes up threads joining on this one,
and deallocated thread-local statics. Read more
source§impl<'mir, 'tcx: 'mir> EvalContextExt<'mir, 'tcx> for MiriInterpCx<'mir, 'tcx>
impl<'mir, 'tcx: 'mir> EvalContextExt<'mir, 'tcx> for MiriInterpCx<'mir, 'tcx>
fn validate_overlapping_atomic(
&self,
place: &MPlaceTy<'tcx, Provenance>
) -> InterpResult<'tcx>
fn buffered_atomic_rmw(
&mut self,
new_val: Scalar<Provenance>,
place: &MPlaceTy<'tcx, Provenance>,
atomic: AtomicRwOrd,
init: Scalar<Provenance>
) -> InterpResult<'tcx>
fn buffered_atomic_read(
&self,
place: &MPlaceTy<'tcx, Provenance>,
atomic: AtomicReadOrd,
latest_in_mo: Scalar<Provenance>,
validate: impl FnOnce() -> InterpResult<'tcx>
) -> InterpResult<'tcx, Scalar<Provenance>>
fn buffered_atomic_write(
&mut self,
val: Scalar<Provenance>,
dest: &MPlaceTy<'tcx, Provenance>,
atomic: AtomicWriteOrd,
init: Scalar<Provenance>
) -> InterpResult<'tcx>
source§fn perform_read_on_buffered_latest(
&self,
place: &MPlaceTy<'tcx, Provenance>,
atomic: AtomicReadOrd,
init: Scalar<Provenance>
) -> InterpResult<'tcx>
fn perform_read_on_buffered_latest(
&self,
place: &MPlaceTy<'tcx, Provenance>,
atomic: AtomicReadOrd,
init: Scalar<Provenance>
) -> InterpResult<'tcx>
Caller should never need to consult the store buffer for the latest value.
This function is used exclusively for failed atomic_compare_exchange_scalar
to perform load_impl on the latest store element Read more
source§impl<'mir, 'tcx: 'mir> EvalContextExt<'mir, 'tcx> for MiriInterpCx<'mir, 'tcx>
impl<'mir, 'tcx: 'mir> EvalContextExt<'mir, 'tcx> for MiriInterpCx<'mir, 'tcx>
fn emit_diagnostic(&self, e: NonHaltingDiagnostic)
source§fn handle_ice(&self)
fn handle_ice(&self)
We had a panic in Miri itself, try to print something useful.
source§impl<'mir, 'tcx: 'mir> EvalContextExt<'mir, 'tcx> for MiriInterpCx<'mir, 'tcx>
impl<'mir, 'tcx: 'mir> EvalContextExt<'mir, 'tcx> for MiriInterpCx<'mir, 'tcx>
source§fn have_module(&self, path: &[&str]) -> bool
fn have_module(&self, path: &[&str]) -> bool
Checks if the given crate/module exists.
source§fn try_resolve_path(
&self,
path: &[&str],
namespace: Namespace
) -> Option<Instance<'tcx>>
fn try_resolve_path(
&self,
path: &[&str],
namespace: Namespace
) -> Option<Instance<'tcx>>
Gets an instance for a path; fails gracefully if the path does not exist.
source§fn resolve_path(&self, path: &[&str], namespace: Namespace) -> Instance<'tcx>
fn resolve_path(&self, path: &[&str], namespace: Namespace) -> Instance<'tcx>
Gets an instance for a path.
source§fn eval_path_scalar(
&self,
path: &[&str]
) -> InterpResult<'tcx, Scalar<Provenance>>
fn eval_path_scalar(
&self,
path: &[&str]
) -> InterpResult<'tcx, Scalar<Provenance>>
Evaluates the scalar at the specified path. Returns Some(val)
if the path could be resolved, and None otherwise Read more
source§fn eval_libc(&self, name: &str) -> InterpResult<'tcx, Scalar<Provenance>>
fn eval_libc(&self, name: &str) -> InterpResult<'tcx, Scalar<Provenance>>
Helper function to get a
libc
constant as a Scalar
.source§fn eval_libc_i32(&self, name: &str) -> InterpResult<'tcx, i32>
fn eval_libc_i32(&self, name: &str) -> InterpResult<'tcx, i32>
Helper function to get a
libc
constant as an i32
.source§fn eval_windows(
&self,
module: &str,
name: &str
) -> InterpResult<'tcx, Scalar<Provenance>>
fn eval_windows(
&self,
module: &str,
name: &str
) -> InterpResult<'tcx, Scalar<Provenance>>
Helper function to get a
windows
constant as a Scalar
.source§fn eval_windows_u64(&self, module: &str, name: &str) -> InterpResult<'tcx, u64>
fn eval_windows_u64(&self, module: &str, name: &str) -> InterpResult<'tcx, u64>
Helper function to get a
windows
constant as a u64
.source§fn libc_ty_layout(&self, name: &str) -> InterpResult<'tcx, TyAndLayout<'tcx>>
fn libc_ty_layout(&self, name: &str) -> InterpResult<'tcx, TyAndLayout<'tcx>>
Helper function to get the
TyAndLayout
of a libc
typesource§fn windows_ty_layout(&self, name: &str) -> InterpResult<'tcx, TyAndLayout<'tcx>>
fn windows_ty_layout(&self, name: &str) -> InterpResult<'tcx, TyAndLayout<'tcx>>
Helper function to get the
TyAndLayout
of a windows
typesource§fn mplace_field_named(
&self,
mplace: &MPlaceTy<'tcx, Provenance>,
name: &str
) -> InterpResult<'tcx, MPlaceTy<'tcx, Provenance>>
fn mplace_field_named(
&self,
mplace: &MPlaceTy<'tcx, Provenance>,
name: &str
) -> InterpResult<'tcx, MPlaceTy<'tcx, Provenance>>
Project to the given named field of the mplace (which must be a struct or union type).
source§fn write_int(
&mut self,
i: impl Into<i128>,
dest: &PlaceTy<'tcx, Provenance>
) -> InterpResult<'tcx>
fn write_int(
&mut self,
i: impl Into<i128>,
dest: &PlaceTy<'tcx, Provenance>
) -> InterpResult<'tcx>
Write an int of the appropriate size to
dest
. The target type may be signed or unsigned,
we try to do the right thing anyway. i128
can fit all integer types except for u128
so
this method is fine for almost all integer types. Read moresource§fn write_int_fields(
&mut self,
values: &[i128],
dest: &MPlaceTy<'tcx, Provenance>
) -> InterpResult<'tcx>
fn write_int_fields(
&mut self,
values: &[i128],
dest: &MPlaceTy<'tcx, Provenance>
) -> InterpResult<'tcx>
Write the first N fields of the given place.
source§fn write_int_fields_named(
&mut self,
values: &[(&str, i128)],
dest: &MPlaceTy<'tcx, Provenance>
) -> InterpResult<'tcx>
fn write_int_fields_named(
&mut self,
values: &[(&str, i128)],
dest: &MPlaceTy<'tcx, Provenance>
) -> InterpResult<'tcx>
Write the given fields of the given place.
source§fn write_null(&mut self, dest: &PlaceTy<'tcx, Provenance>) -> InterpResult<'tcx>
fn write_null(&mut self, dest: &PlaceTy<'tcx, Provenance>) -> InterpResult<'tcx>
Write a 0 of the appropriate size to
dest
.source§fn ptr_is_null(
&self,
ptr: Pointer<Option<Provenance>>
) -> InterpResult<'tcx, bool>
fn ptr_is_null(
&self,
ptr: Pointer<Option<Provenance>>
) -> InterpResult<'tcx, bool>
Test if this pointer equals 0.
source§fn local_place(
&mut self,
local: Local
) -> InterpResult<'tcx, PlaceTy<'tcx, Provenance>>
fn local_place(
&mut self,
local: Local
) -> InterpResult<'tcx, PlaceTy<'tcx, Provenance>>
Get the
Place
for a localsource§fn gen_random(
&mut self,
ptr: Pointer<Option<Provenance>>,
len: u64
) -> InterpResult<'tcx>
fn gen_random(
&mut self,
ptr: Pointer<Option<Provenance>>,
len: u64
) -> InterpResult<'tcx>
Generate some random bytes, and write them to
dest
.source§fn call_function(
&mut self,
f: Instance<'tcx>,
caller_abi: Abi,
args: &[Immediate<Provenance>],
dest: Option<&PlaceTy<'tcx, Provenance>>,
stack_pop: StackPopCleanup
) -> InterpResult<'tcx>
fn call_function(
&mut self,
f: Instance<'tcx>,
caller_abi: Abi,
args: &[Immediate<Provenance>],
dest: Option<&PlaceTy<'tcx, Provenance>>,
stack_pop: StackPopCleanup
) -> InterpResult<'tcx>
Call a function: Push the stack frame and pass the arguments.
For now, arguments must be scalars (so that the caller does not have to know the layout). Read more
source§fn visit_freeze_sensitive(
&self,
place: &MPlaceTy<'tcx, Provenance>,
size: Size,
action: impl FnMut(AllocRange, bool) -> InterpResult<'tcx>
) -> InterpResult<'tcx>
fn visit_freeze_sensitive(
&self,
place: &MPlaceTy<'tcx, Provenance>,
size: Size,
action: impl FnMut(AllocRange, bool) -> InterpResult<'tcx>
) -> InterpResult<'tcx>
Visits the memory covered by
place
, sensitive to freezing: the 2nd parameter
of action
will be true if this is frozen, false if this is in an UnsafeCell
.
The range is relative to place
. Read moresource§fn check_no_isolation(&self, name: &str) -> InterpResult<'tcx>
fn check_no_isolation(&self, name: &str) -> InterpResult<'tcx>
Helper function used inside the shims of foreign functions to check that isolation is
disabled. It returns an error using the
name
of the foreign function if this is not the
case. Read moresource§fn reject_in_isolation(
&self,
op_name: &str,
reject_with: RejectOpWith
) -> InterpResult<'tcx>
fn reject_in_isolation(
&self,
op_name: &str,
reject_with: RejectOpWith
) -> InterpResult<'tcx>
Helper function used inside the shims of foreign functions which reject the op
when isolation is enabled. It is used to print a warning/backtrace about the rejection. Read more
source§fn assert_target_os(&self, target_os: &str, name: &str)
fn assert_target_os(&self, target_os: &str, name: &str)
Helper function used inside the shims of foreign functions to assert that the target OS
is
target_os
. It panics showing a message with the name
of the foreign function
if this is not the case. Read moresource§fn assert_target_os_is_unix(&self, name: &str)
fn assert_target_os_is_unix(&self, name: &str)
Helper function used inside the shims of foreign functions to assert that the target OS
is part of the UNIX family. It panics showing a message with the
name
of the foreign function
if this is not the case. Read moresource§fn last_error_place(&mut self) -> InterpResult<'tcx, MPlaceTy<'tcx, Provenance>>
fn last_error_place(&mut self) -> InterpResult<'tcx, MPlaceTy<'tcx, Provenance>>
Get last error variable as a place, lazily allocating thread-local storage for it if
necessary. Read more
source§fn set_last_error(&mut self, scalar: Scalar<Provenance>) -> InterpResult<'tcx>
fn set_last_error(&mut self, scalar: Scalar<Provenance>) -> InterpResult<'tcx>
Sets the last error variable.
source§fn get_last_error(&mut self) -> InterpResult<'tcx, Scalar<Provenance>>
fn get_last_error(&mut self) -> InterpResult<'tcx, Scalar<Provenance>>
Gets the last error variable.
source§fn io_error_to_errnum(
&self,
err_kind: ErrorKind
) -> InterpResult<'tcx, Scalar<Provenance>>
fn io_error_to_errnum(
&self,
err_kind: ErrorKind
) -> InterpResult<'tcx, Scalar<Provenance>>
This function tries to produce the most similar OS error from the
std::io::ErrorKind
as a platform-specific errnum. Read moresource§fn try_errnum_to_io_error(
&self,
errnum: Scalar<Provenance>
) -> InterpResult<'tcx, Option<ErrorKind>>
fn try_errnum_to_io_error(
&self,
errnum: Scalar<Provenance>
) -> InterpResult<'tcx, Option<ErrorKind>>
The inverse of
io_error_to_errnum
.source§fn set_last_error_from_io_error(
&mut self,
err_kind: ErrorKind
) -> InterpResult<'tcx>
fn set_last_error_from_io_error(
&mut self,
err_kind: ErrorKind
) -> InterpResult<'tcx>
Sets the last OS error using a
std::io::ErrorKind
.source§fn try_unwrap_io_result<T: From<i32>>(
&mut self,
result: Result<T>
) -> InterpResult<'tcx, T>
fn try_unwrap_io_result<T: From<i32>>(
&mut self,
result: Result<T>
) -> InterpResult<'tcx, T>
Helper function that consumes an
std::io::Result<T>
and returns an
InterpResult<'tcx,T>::Ok
instead. In case the result is an error, this function returns
Ok(-1)
and sets the last OS error accordingly. Read moresource§fn deref_operand_and_offset(
&self,
op: &OpTy<'tcx, Provenance>,
offset: u64,
layout: TyAndLayout<'tcx>
) -> InterpResult<'tcx, MPlaceTy<'tcx, Provenance>>
fn deref_operand_and_offset(
&self,
op: &OpTy<'tcx, Provenance>,
offset: u64,
layout: TyAndLayout<'tcx>
) -> InterpResult<'tcx, MPlaceTy<'tcx, Provenance>>
Calculates the MPlaceTy given the offset and layout of an access on an operand
fn read_scalar_at_offset(
&self,
op: &OpTy<'tcx, Provenance>,
offset: u64,
layout: TyAndLayout<'tcx>
) -> InterpResult<'tcx, Scalar<Provenance>>
fn write_scalar_at_offset(
&mut self,
op: &OpTy<'tcx, Provenance>,
offset: u64,
value: impl Into<Scalar<Provenance>>,
layout: TyAndLayout<'tcx>
) -> InterpResult<'tcx, ()>
source§fn read_timespec(
&mut self,
tp: &MPlaceTy<'tcx, Provenance>
) -> InterpResult<'tcx, Option<Duration>>
fn read_timespec(
&mut self,
tp: &MPlaceTy<'tcx, Provenance>
) -> InterpResult<'tcx, Option<Duration>>
Parse a
timespec
struct and return it as a std::time::Duration
. It returns None
if the value in the timespec
struct is invalid. Some libc functions will return
EINVAL
in this case. Read moresource§fn read_c_str<'a>(
&'a self,
ptr: Pointer<Option<Provenance>>
) -> InterpResult<'tcx, &'a [u8]>where
'tcx: 'a,
'mir: 'a,
fn read_c_str<'a>(
&'a self,
ptr: Pointer<Option<Provenance>>
) -> InterpResult<'tcx, &'a [u8]>where
'tcx: 'a,
'mir: 'a,
Read a sequence of bytes until the first null terminator.
source§fn write_c_str(
&mut self,
c_str: &[u8],
ptr: Pointer<Option<Provenance>>,
size: u64
) -> InterpResult<'tcx, (bool, u64)>
fn write_c_str(
&mut self,
c_str: &[u8],
ptr: Pointer<Option<Provenance>>,
size: u64
) -> InterpResult<'tcx, (bool, u64)>
Helper function to write a sequence of bytes with an added null-terminator, which is what
the Unix APIs usually handle. This function returns
Ok((false, length))
without trying
to write if size
is not large enough to fit the contents of c_str
plus a null
terminator. It returns Ok((true, length))
if the writing process was successful. The
string length returned does include the null terminator. Read moresource§fn read_wide_str(
&self,
ptr: Pointer<Option<Provenance>>
) -> InterpResult<'tcx, Vec<u16>>
fn read_wide_str(
&self,
ptr: Pointer<Option<Provenance>>
) -> InterpResult<'tcx, Vec<u16>>
Read a sequence of u16 until the first null terminator.
source§fn write_wide_str(
&mut self,
wide_str: &[u16],
ptr: Pointer<Option<Provenance>>,
size: u64
) -> InterpResult<'tcx, (bool, u64)>
fn write_wide_str(
&mut self,
wide_str: &[u16],
ptr: Pointer<Option<Provenance>>,
size: u64
) -> InterpResult<'tcx, (bool, u64)>
Helper function to write a sequence of u16 with an added 0x0000-terminator, which is what
the Windows APIs usually handle. This function returns
Ok((false, length))
without trying
to write if size
is not large enough to fit the contents of os_string
plus a null
terminator. It returns Ok((true, length))
if the writing process was successful. The
string length returned does include the null terminator. Length is measured in units of
u16.
Read moresource§fn check_abi<'a>(&self, abi: Abi, exp_abi: Abi) -> InterpResult<'a, ()>
fn check_abi<'a>(&self, abi: Abi, exp_abi: Abi) -> InterpResult<'a, ()>
Check that the ABI is what we expect.
fn frame_in_std(&self) -> bool
source§fn handle_unsupported<S: AsRef<str>>(
&mut self,
error_msg: S
) -> InterpResult<'tcx, ()>
fn handle_unsupported<S: AsRef<str>>(
&mut self,
error_msg: S
) -> InterpResult<'tcx, ()>
Handler that should be called when unsupported functionality is encountered.
This function will either panic within the context of the emulated application
or return an error in the Miri process context Read more
fn check_abi_and_shim_symbol_clash(
&mut self,
abi: Abi,
exp_abi: Abi,
link_name: Symbol
) -> InterpResult<'tcx, ()>
fn check_shim<'a, const N: usize>(
&mut self,
abi: Abi,
exp_abi: Abi,
link_name: Symbol,
args: &'a [OpTy<'tcx, Provenance>]
) -> InterpResult<'tcx, &'a [OpTy<'tcx, Provenance>; N]>where
&'a [OpTy<'tcx, Provenance>; N]: TryFrom<&'a [OpTy<'tcx, Provenance>]>,
source§fn mark_immutable(&mut self, mplace: &MemPlace<Provenance>)
fn mark_immutable(&mut self, mplace: &MemPlace<Provenance>)
Mark a machine allocation that was just created as immutable.
fn item_link_name(&self, def_id: DefId) -> Symbol
source§impl<'mir, 'tcx: 'mir> EvalContextExt<'mir, 'tcx> for MiriInterpCx<'mir, 'tcx>
impl<'mir, 'tcx: 'mir> EvalContextExt<'mir, 'tcx> for MiriInterpCx<'mir, 'tcx>
fn handle_miri_backtrace_size(
&mut self,
abi: Abi,
link_name: Symbol,
args: &[OpTy<'tcx, Provenance>],
dest: &PlaceTy<'tcx, Provenance>
) -> InterpResult<'tcx>
fn handle_miri_get_backtrace(
&mut self,
abi: Abi,
link_name: Symbol,
args: &[OpTy<'tcx, Provenance>],
dest: &PlaceTy<'tcx, Provenance>
) -> InterpResult<'tcx>
fn resolve_frame_pointer(
&mut self,
ptr: &OpTy<'tcx, Provenance>
) -> InterpResult<'tcx, (Instance<'tcx>, Loc, String, String)>
fn handle_miri_resolve_frame(
&mut self,
abi: Abi,
link_name: Symbol,
args: &[OpTy<'tcx, Provenance>],
dest: &PlaceTy<'tcx, Provenance>
) -> InterpResult<'tcx>
fn handle_miri_resolve_frame_names(
&mut self,
abi: Abi,
link_name: Symbol,
args: &[OpTy<'tcx, Provenance>]
) -> InterpResult<'tcx>
source§impl<'mir, 'tcx> EvalContextExt<'tcx> for MiriInterpCx<'mir, 'tcx>
impl<'mir, 'tcx> EvalContextExt<'tcx> for MiriInterpCx<'mir, 'tcx>
fn binary_ptr_op(
&self,
bin_op: BinOp,
left: &ImmTy<'tcx, Provenance>,
right: &ImmTy<'tcx, Provenance>
) -> InterpResult<'tcx, (Scalar<Provenance>, bool, Ty<'tcx>)>
source§impl<'mir, 'tcx: 'mir> EvalContextExtPriv<'mir, 'tcx> for MiriInterpCx<'mir, 'tcx>
impl<'mir, 'tcx: 'mir> EvalContextExtPriv<'mir, 'tcx> for MiriInterpCx<'mir, 'tcx>
source§fn get_or_create_id<Id: SyncId>(
&mut self,
next_id: Id,
lock_op: &OpTy<'tcx, Provenance>,
offset: u64
) -> InterpResult<'tcx, Option<Id>>
fn get_or_create_id<Id: SyncId>(
&mut self,
next_id: Id,
lock_op: &OpTy<'tcx, Provenance>,
offset: u64
) -> InterpResult<'tcx, Option<Id>>
Lazily initialize the ID of this Miri sync structure.
(‘0’ indicates uninit.) Read more
source§fn rwlock_dequeue_and_lock_reader(&mut self, id: RwLockId) -> bool
fn rwlock_dequeue_and_lock_reader(&mut self, id: RwLockId) -> bool
Take a reader out of the queue waiting for the lock.
Returns
true
if some thread got the rwlock. Read moresource§impl<'mir, 'tcx: 'mir> EvalContextExtPriv<'mir, 'tcx> for MiriInterpCx<'mir, 'tcx>
impl<'mir, 'tcx: 'mir> EvalContextExtPriv<'mir, 'tcx> for MiriInterpCx<'mir, 'tcx>
source§fn init_once_observe_attempt(&mut self, id: InitOnceId)
fn init_once_observe_attempt(&mut self, id: InitOnceId)
Synchronize with the previous initialization attempt of an InitOnce.
fn init_once_wake_waiter(
&mut self,
id: InitOnceId,
waiter: InitOnceWaiter<'mir, 'tcx>
) -> InterpResult<'tcx>
source§impl<'mir, 'tcx> EvalContextExtPriv<'mir, 'tcx> for MiriInterpCx<'mir, 'tcx>
impl<'mir, 'tcx> EvalContextExtPriv<'mir, 'tcx> for MiriInterpCx<'mir, 'tcx>
source§fn reacquire_cond_lock(
&mut self,
thread: ThreadId,
lock: RwLockId,
mode: RwLockMode
) -> InterpResult<'tcx>
fn reacquire_cond_lock(
&mut self,
thread: ThreadId,
lock: RwLockId,
mode: RwLockMode
) -> InterpResult<'tcx>
Try to reacquire the lock associated with the condition variable after we
were signaled. Read more
source§impl<'mir, 'tcx: 'mir> EvalContextExtPrivate<'mir, 'tcx> for MiriInterpCx<'mir, 'tcx>
impl<'mir, 'tcx: 'mir> EvalContextExtPrivate<'mir, 'tcx> for MiriInterpCx<'mir, 'tcx>
fn macos_stat_write_buf(
&mut self,
metadata: FileMetadata,
buf_op: &OpTy<'tcx, Provenance>
) -> InterpResult<'tcx, i32>
source§fn handle_not_found<T: From<i32>>(&mut self) -> InterpResult<'tcx, T>
fn handle_not_found<T: From<i32>>(&mut self) -> InterpResult<'tcx, T>
Function used when a handle is not found inside
FileHandler
. It returns Ok(-1)
and sets
the last OS error to libc::EBADF
(invalid file descriptor). This function uses
T: From<i32>
instead of i32
directly because some fs functions return different integer
types (like read
, that returns an i64
). Read morefn file_type_to_d_type(
&mut self,
file_type: Result<FileType>
) -> InterpResult<'tcx, i32>
source§impl<'mir: 'ecx, 'tcx: 'mir, 'ecx> EvalContextPrivExt<'mir, 'tcx, 'ecx> for MiriInterpCx<'mir, 'tcx>
impl<'mir: 'ecx, 'tcx: 'mir, 'ecx> EvalContextPrivExt<'mir, 'tcx, 'ecx> for MiriInterpCx<'mir, 'tcx>
Retagging/reborrowing. There is some policy in here, such as which permissions to grant for which references, and when to add protectors.
source§fn sb_reborrow(
&mut self,
place: &MPlaceTy<'tcx, Provenance>,
size: Size,
new_perm: NewPermission,
new_tag: BorTag,
retag_cause: RetagCause
) -> InterpResult<'tcx, Option<AllocId>>
fn sb_reborrow(
&mut self,
place: &MPlaceTy<'tcx, Provenance>,
size: Size,
new_perm: NewPermission,
new_tag: BorTag,
retag_cause: RetagCause
) -> InterpResult<'tcx, Option<AllocId>>
Returns the
AllocId
the reborrow was done in, if some actual borrow stack manipulation
happened. Read moresource§fn sb_retag_reference(
&mut self,
val: &ImmTy<'tcx, Provenance>,
new_perm: NewPermission,
cause: RetagCause
) -> InterpResult<'tcx, ImmTy<'tcx, Provenance>>
fn sb_retag_reference(
&mut self,
val: &ImmTy<'tcx, Provenance>,
new_perm: NewPermission,
cause: RetagCause
) -> InterpResult<'tcx, ImmTy<'tcx, Provenance>>
Retags an indidual pointer, returning the retagged version.
kind
indicates what kind of reference is being created. Read moresource§impl<'mir, 'tcx: 'mir> EvalContextPrivExt<'mir, 'tcx> for MiriInterpCx<'mir, 'tcx>
impl<'mir, 'tcx: 'mir> EvalContextPrivExt<'mir, 'tcx> for MiriInterpCx<'mir, 'tcx>
source§fn allow_data_races_ref<R>(
&self,
op: impl FnOnce(&MiriInterpCx<'mir, 'tcx>) -> R
) -> R
fn allow_data_races_ref<R>(
&self,
op: impl FnOnce(&MiriInterpCx<'mir, 'tcx>) -> R
) -> R
Temporarily allow data-races to occur. This should only be used in
one of these cases: Read more
source§fn allow_data_races_mut<R>(
&mut self,
op: impl FnOnce(&mut MiriInterpCx<'mir, 'tcx>) -> R
) -> R
fn allow_data_races_mut<R>(
&mut self,
op: impl FnOnce(&mut MiriInterpCx<'mir, 'tcx>) -> R
) -> R
Same as
allow_data_races_ref
, this temporarily disables any data-race detection and
so should only be used for atomic operations or internal state that the program cannot
access. Read moresource§fn atomic_access_check(
&self,
place: &MPlaceTy<'tcx, Provenance>
) -> InterpResult<'tcx>
fn atomic_access_check(
&self,
place: &MPlaceTy<'tcx, Provenance>
) -> InterpResult<'tcx>
Checks that an atomic access is legal at the given place.
source§fn validate_atomic_load(
&self,
place: &MPlaceTy<'tcx, Provenance>,
atomic: AtomicReadOrd
) -> InterpResult<'tcx>
fn validate_atomic_load(
&self,
place: &MPlaceTy<'tcx, Provenance>,
atomic: AtomicReadOrd
) -> InterpResult<'tcx>
Update the data-race detector for an atomic read occurring at the
associated memory-place and on the current thread. Read more
source§fn validate_atomic_store(
&mut self,
place: &MPlaceTy<'tcx, Provenance>,
atomic: AtomicWriteOrd
) -> InterpResult<'tcx>
fn validate_atomic_store(
&mut self,
place: &MPlaceTy<'tcx, Provenance>,
atomic: AtomicWriteOrd
) -> InterpResult<'tcx>
Update the data-race detector for an atomic write occurring at the
associated memory-place and on the current thread. Read more
source§fn validate_atomic_rmw(
&mut self,
place: &MPlaceTy<'tcx, Provenance>,
atomic: AtomicRwOrd
) -> InterpResult<'tcx>
fn validate_atomic_rmw(
&mut self,
place: &MPlaceTy<'tcx, Provenance>,
atomic: AtomicRwOrd
) -> InterpResult<'tcx>
Update the data-race detector for an atomic read-modify-write occurring
at the associated memory place and on the current thread. Read more
source§fn validate_atomic_op<A: Debug + Copy>(
&self,
place: &MPlaceTy<'tcx, Provenance>,
atomic: A,
description: &str,
op: impl FnMut(&mut MemoryCellClocks, &mut ThreadClockSet, VectorIdx, A) -> Result<(), DataRace>
) -> InterpResult<'tcx>
fn validate_atomic_op<A: Debug + Copy>(
&self,
place: &MPlaceTy<'tcx, Provenance>,
atomic: A,
description: &str,
op: impl FnMut(&mut MemoryCellClocks, &mut ThreadClockSet, VectorIdx, A) -> Result<(), DataRace>
) -> InterpResult<'tcx>
Generic atomic operation implementation
source§impl<'mir, 'tcx: 'mir> EvalContextPrivExt<'mir, 'tcx> for MiriInterpCx<'mir, 'tcx>
impl<'mir, 'tcx: 'mir> EvalContextPrivExt<'mir, 'tcx> for MiriInterpCx<'mir, 'tcx>
source§fn run_timeout_callback(&mut self) -> InterpResult<'tcx>
fn run_timeout_callback(&mut self) -> InterpResult<'tcx>
Execute a timeout callback on the callback’s thread.
fn run_on_stack_empty(&mut self) -> InterpResult<'tcx, Poll<()>>
source§impl<'mir, 'tcx: 'mir> EvalContextPrivExt<'mir, 'tcx> for MiriInterpCx<'mir, 'tcx>
impl<'mir, 'tcx: 'mir> EvalContextPrivExt<'mir, 'tcx> for MiriInterpCx<'mir, 'tcx>
fn atomic_load(
&mut self,
args: &[OpTy<'tcx, Provenance>],
dest: &PlaceTy<'tcx, Provenance>,
atomic: AtomicReadOrd
) -> InterpResult<'tcx>
fn atomic_store(
&mut self,
args: &[OpTy<'tcx, Provenance>],
atomic: AtomicWriteOrd
) -> InterpResult<'tcx>
fn compiler_fence_intrinsic(
&mut self,
args: &[OpTy<'tcx, Provenance>],
atomic: AtomicFenceOrd
) -> InterpResult<'tcx>
fn atomic_fence_intrinsic(
&mut self,
args: &[OpTy<'tcx, Provenance>],
atomic: AtomicFenceOrd
) -> InterpResult<'tcx>
fn atomic_op(
&mut self,
args: &[OpTy<'tcx, Provenance>],
dest: &PlaceTy<'tcx, Provenance>,
atomic_op: AtomicOp,
atomic: AtomicRwOrd
) -> InterpResult<'tcx>
fn atomic_exchange(
&mut self,
args: &[OpTy<'tcx, Provenance>],
dest: &PlaceTy<'tcx, Provenance>,
atomic: AtomicRwOrd
) -> InterpResult<'tcx>
fn atomic_compare_exchange_impl(
&mut self,
args: &[OpTy<'tcx, Provenance>],
dest: &PlaceTy<'tcx, Provenance>,
success: AtomicRwOrd,
fail: AtomicReadOrd,
can_fail_spuriously: bool
) -> InterpResult<'tcx>
fn atomic_compare_exchange(
&mut self,
args: &[OpTy<'tcx, Provenance>],
dest: &PlaceTy<'tcx, Provenance>,
success: AtomicRwOrd,
fail: AtomicReadOrd
) -> InterpResult<'tcx>
fn atomic_compare_exchange_weak(
&mut self,
args: &[OpTy<'tcx, Provenance>],
dest: &PlaceTy<'tcx, Provenance>,
success: AtomicRwOrd,
fail: AtomicReadOrd
) -> InterpResult<'tcx>
source§impl<'mir, 'tcx: 'mir> EvalContextPrivExt<'mir, 'tcx> for MiriInterpCx<'mir, 'tcx>
impl<'mir, 'tcx: 'mir> EvalContextPrivExt<'mir, 'tcx> for MiriInterpCx<'mir, 'tcx>
source§fn schedule_windows_tls_dtors(&mut self) -> InterpResult<'tcx>
fn schedule_windows_tls_dtors(&mut self) -> InterpResult<'tcx>
Schedule TLS destructors for Windows.
On windows, TLS destructors are managed by std. Read more
source§fn schedule_macos_tls_dtor(&mut self) -> InterpResult<'tcx>
fn schedule_macos_tls_dtor(&mut self) -> InterpResult<'tcx>
Schedule the MacOS thread destructor of the thread local storage to be
executed. Read more
source§fn schedule_next_pthread_tls_dtor(
&mut self,
state: &mut RunningDtorState
) -> InterpResult<'tcx, Poll<()>>
fn schedule_next_pthread_tls_dtor(
&mut self,
state: &mut RunningDtorState
) -> InterpResult<'tcx, Poll<()>>
Schedule a pthread TLS destructor. Returns
true
if found
a destructor to schedule, and false
otherwise. Read moresource§impl<'mir, 'tcx> MiriInterpCxExt<'mir, 'tcx> for MiriInterpCx<'mir, 'tcx>
impl<'mir, 'tcx> MiriInterpCxExt<'mir, 'tcx> for MiriInterpCx<'mir, 'tcx>
fn eval_context_ref(&self) -> &MiriInterpCx<'mir, 'tcx>
fn eval_context_mut(&mut self) -> &mut MiriInterpCx<'mir, 'tcx>
source§impl VisitTags for MiriInterpCx<'_, '_>
impl VisitTags for MiriInterpCx<'_, '_>
Layout§
Note: Most layout information is completely unstable and may even differ between compilations. The only exception is types with certain repr(...)
attributes. Please see the Rust Reference’s “Type Layout” chapter for details on type layout guarantees.
Size: 2400 bytes