pub trait EvalContextExt<'mir, 'tcx: 'mir>: MiriEvalContextExt<'mir, 'tcx> {
    fn read_scalar_at_offset_atomic(
        &self,
        op: &OpTy<'tcx, Provenance>,
        offset: u64,
        layout: TyAndLayout<'tcx>,
        atomic: AtomicReadOrd
    ) -> InterpResult<'tcx, Scalar<Provenance>> { ... } fn write_scalar_at_offset_atomic(
        &mut self,
        op: &OpTy<'tcx, Provenance>,
        offset: u64,
        value: impl Into<Scalar<Provenance>>,
        layout: TyAndLayout<'tcx>,
        atomic: AtomicWriteOrd
    ) -> InterpResult<'tcx> { ... } fn read_scalar_atomic(
        &self,
        place: &MPlaceTy<'tcx, Provenance>,
        atomic: AtomicReadOrd
    ) -> InterpResult<'tcx, Scalar<Provenance>> { ... } fn write_scalar_atomic(
        &mut self,
        val: Scalar<Provenance>,
        dest: &MPlaceTy<'tcx, Provenance>,
        atomic: AtomicWriteOrd
    ) -> InterpResult<'tcx> { ... } fn atomic_op_immediate(
        &mut self,
        place: &MPlaceTy<'tcx, Provenance>,
        rhs: &ImmTy<'tcx, Provenance>,
        op: BinOp,
        neg: bool,
        atomic: AtomicRwOrd
    ) -> InterpResult<'tcx, ImmTy<'tcx, Provenance>> { ... } fn atomic_exchange_scalar(
        &mut self,
        place: &MPlaceTy<'tcx, Provenance>,
        new: Scalar<Provenance>,
        atomic: AtomicRwOrd
    ) -> InterpResult<'tcx, Scalar<Provenance>> { ... } fn atomic_min_max_scalar(
        &mut self,
        place: &MPlaceTy<'tcx, Provenance>,
        rhs: ImmTy<'tcx, Provenance>,
        min: bool,
        atomic: AtomicRwOrd
    ) -> InterpResult<'tcx, ImmTy<'tcx, Provenance>> { ... } fn atomic_compare_exchange_scalar(
        &mut self,
        place: &MPlaceTy<'tcx, Provenance>,
        expect_old: &ImmTy<'tcx, Provenance>,
        new: Scalar<Provenance>,
        success: AtomicRwOrd,
        fail: AtomicReadOrd,
        can_fail_spuriously: bool
    ) -> InterpResult<'tcx, Immediate<Provenance>> { ... } fn atomic_fence(&mut self, atomic: AtomicFenceOrd) -> InterpResult<'tcx> { ... } fn allow_data_races_all_threads_done(&mut self) { ... } }

Provided Methods

Atomic variant of read_scalar_at_offset.

Atomic variant of write_scalar_at_offset.

Perform an atomic read operation at the memory location.

Perform an atomic write operation at the memory location.

Perform an atomic operation on a memory location.

Perform an atomic exchange with a memory place and a new scalar value, the old value is returned.

Perform an conditional atomic exchange with a memory place and a new scalar value, the old value is returned.

Perform an atomic compare and exchange at a given memory location. On success an atomic RMW operation is performed and on failure only an atomic read occurs. If can_fail_spuriously is true, then we treat it as a “compare_exchange_weak” operation, and some portion of the time fail even when the values are actually identical.

Update the data-race detector for an atomic fence on the current thread.

After all threads are done running, this allows data races to occur for subsequent ‘administrative’ machine accesses (that logically happen outside of the Abstract Machine).

Implementors

Evaluation context extensions.