// SPDX-License-Identifier: GPL-2.012// Copyright (C) 2024 Google LLC.34//! Memory management.5//!6//! This module deals with managing the address space of userspace processes. Each process has an7//! instance of [`Mm`], which keeps track of multiple VMAs (virtual memory areas). Each VMA8//! corresponds to a region of memory that the userspace process can access, and the VMA lets you9//! control what happens when userspace reads or writes to that region of memory.10//!11//! C header: [`include/linux/mm.h`](srctree/include/linux/mm.h)1213use crate::{14bindings,15sync::aref::{ARef, AlwaysRefCounted},16types::{NotThreadSafe, Opaque},17};18use core::{ops::Deref, ptr::NonNull};1920pub mod virt;21use virt::VmaRef;2223#[cfg(CONFIG_MMU)]24pub use mmput_async::MmWithUserAsync;25mod mmput_async;2627/// A wrapper for the kernel's `struct mm_struct`.28///29/// This represents the address space of a userspace process, so each process has one `Mm`30/// instance. It may hold many VMAs internally.31///32/// There is a counter called `mm_users` that counts the users of the address space; this includes33/// the userspace process itself, but can also include kernel threads accessing the address space.34/// Once `mm_users` reaches zero, this indicates that the address space can be destroyed. To access35/// the address space, you must prevent `mm_users` from reaching zero while you are accessing it.36/// The [`MmWithUser`] type represents an address space where this is guaranteed, and you can37/// create one using [`mmget_not_zero`].38///39/// The `ARef<Mm>` smart pointer holds an `mmgrab` refcount. Its destructor may sleep.40///41/// # Invariants42///43/// Values of this type are always refcounted using `mmgrab`.44///45/// [`mmget_not_zero`]: Mm::mmget_not_zero46#[repr(transparent)]47pub struct Mm {48mm: Opaque<bindings::mm_struct>,49}5051// SAFETY: It is safe to call `mmdrop` on another thread than where `mmgrab` was called.52unsafe impl Send for Mm {}53// SAFETY: All methods on `Mm` can be called in parallel from several threads.54unsafe impl Sync for Mm {}5556// SAFETY: By the type invariants, this type is always refcounted.57unsafe impl AlwaysRefCounted for Mm {58#[inline]59fn inc_ref(&self) {60// SAFETY: The pointer is valid since self is a reference.61unsafe { bindings::mmgrab(self.as_raw()) };62}6364#[inline]65unsafe fn dec_ref(obj: NonNull<Self>) {66// SAFETY: The caller is giving up their refcount.67unsafe { bindings::mmdrop(obj.cast().as_ptr()) };68}69}7071/// A wrapper for the kernel's `struct mm_struct`.72///73/// This type is like [`Mm`], but with non-zero `mm_users`. It can only be used when `mm_users` can74/// be proven to be non-zero at compile-time, usually because the relevant code holds an `mmget`75/// refcount. It can be used to access the associated address space.76///77/// The `ARef<MmWithUser>` smart pointer holds an `mmget` refcount. Its destructor may sleep.78///79/// # Invariants80///81/// Values of this type are always refcounted using `mmget`. The value of `mm_users` is non-zero.82#[repr(transparent)]83pub struct MmWithUser {84mm: Mm,85}8687// SAFETY: It is safe to call `mmput` on another thread than where `mmget` was called.88unsafe impl Send for MmWithUser {}89// SAFETY: All methods on `MmWithUser` can be called in parallel from several threads.90unsafe impl Sync for MmWithUser {}9192// SAFETY: By the type invariants, this type is always refcounted.93unsafe impl AlwaysRefCounted for MmWithUser {94#[inline]95fn inc_ref(&self) {96// SAFETY: The pointer is valid since self is a reference.97unsafe { bindings::mmget(self.as_raw()) };98}99100#[inline]101unsafe fn dec_ref(obj: NonNull<Self>) {102// SAFETY: The caller is giving up their refcount.103unsafe { bindings::mmput(obj.cast().as_ptr()) };104}105}106107// Make all `Mm` methods available on `MmWithUser`.108impl Deref for MmWithUser {109type Target = Mm;110111#[inline]112fn deref(&self) -> &Mm {113&self.mm114}115}116117// These methods are safe to call even if `mm_users` is zero.118impl Mm {119/// Returns a raw pointer to the inner `mm_struct`.120#[inline]121pub fn as_raw(&self) -> *mut bindings::mm_struct {122self.mm.get()123}124125/// Obtain a reference from a raw pointer.126///127/// # Safety128///129/// The caller must ensure that `ptr` points at an `mm_struct`, and that it is not deallocated130/// during the lifetime 'a.131#[inline]132pub unsafe fn from_raw<'a>(ptr: *const bindings::mm_struct) -> &'a Mm {133// SAFETY: Caller promises that the pointer is valid for 'a. Layouts are compatible due to134// repr(transparent).135unsafe { &*ptr.cast() }136}137138/// Calls `mmget_not_zero` and returns a handle if it succeeds.139#[inline]140pub fn mmget_not_zero(&self) -> Option<ARef<MmWithUser>> {141// SAFETY: The pointer is valid since self is a reference.142let success = unsafe { bindings::mmget_not_zero(self.as_raw()) };143144if success {145// SAFETY: We just created an `mmget` refcount.146Some(unsafe { ARef::from_raw(NonNull::new_unchecked(self.as_raw().cast())) })147} else {148None149}150}151}152153// These methods require `mm_users` to be non-zero.154impl MmWithUser {155/// Obtain a reference from a raw pointer.156///157/// # Safety158///159/// The caller must ensure that `ptr` points at an `mm_struct`, and that `mm_users` remains160/// non-zero for the duration of the lifetime 'a.161#[inline]162pub unsafe fn from_raw<'a>(ptr: *const bindings::mm_struct) -> &'a MmWithUser {163// SAFETY: Caller promises that the pointer is valid for 'a. The layout is compatible due164// to repr(transparent).165unsafe { &*ptr.cast() }166}167168/// Attempt to access a vma using the vma read lock.169///170/// This is an optimistic trylock operation, so it may fail if there is contention. In that171/// case, you should fall back to taking the mmap read lock.172///173/// When per-vma locks are disabled, this always returns `None`.174#[inline]175pub fn lock_vma_under_rcu(&self, vma_addr: usize) -> Option<VmaReadGuard<'_>> {176#[cfg(CONFIG_PER_VMA_LOCK)]177{178// SAFETY: Calling `bindings::lock_vma_under_rcu` is always okay given an mm where179// `mm_users` is non-zero.180let vma = unsafe { bindings::lock_vma_under_rcu(self.as_raw(), vma_addr) };181if !vma.is_null() {182return Some(VmaReadGuard {183// SAFETY: If `lock_vma_under_rcu` returns a non-null ptr, then it points at a184// valid vma. The vma is stable for as long as the vma read lock is held.185vma: unsafe { VmaRef::from_raw(vma) },186_nts: NotThreadSafe,187});188}189}190191// Silence warnings about unused variables.192#[cfg(not(CONFIG_PER_VMA_LOCK))]193let _ = vma_addr;194195None196}197198/// Lock the mmap read lock.199#[inline]200pub fn mmap_read_lock(&self) -> MmapReadGuard<'_> {201// SAFETY: The pointer is valid since self is a reference.202unsafe { bindings::mmap_read_lock(self.as_raw()) };203204// INVARIANT: We just acquired the read lock.205MmapReadGuard {206mm: self,207_nts: NotThreadSafe,208}209}210211/// Try to lock the mmap read lock.212#[inline]213pub fn mmap_read_trylock(&self) -> Option<MmapReadGuard<'_>> {214// SAFETY: The pointer is valid since self is a reference.215let success = unsafe { bindings::mmap_read_trylock(self.as_raw()) };216217if success {218// INVARIANT: We just acquired the read lock.219Some(MmapReadGuard {220mm: self,221_nts: NotThreadSafe,222})223} else {224None225}226}227}228229/// A guard for the mmap read lock.230///231/// # Invariants232///233/// This `MmapReadGuard` guard owns the mmap read lock.234pub struct MmapReadGuard<'a> {235mm: &'a MmWithUser,236// `mmap_read_lock` and `mmap_read_unlock` must be called on the same thread237_nts: NotThreadSafe,238}239240impl<'a> MmapReadGuard<'a> {241/// Look up a vma at the given address.242#[inline]243pub fn vma_lookup(&self, vma_addr: usize) -> Option<&virt::VmaRef> {244// SAFETY: By the type invariants we hold the mmap read guard, so we can safely call this245// method. Any value is okay for `vma_addr`.246let vma = unsafe { bindings::vma_lookup(self.mm.as_raw(), vma_addr) };247248if vma.is_null() {249None250} else {251// SAFETY: We just checked that a vma was found, so the pointer references a valid vma.252//253// Furthermore, the returned vma is still under the protection of the read lock guard254// and can be used while the mmap read lock is still held. That the vma is not used255// after the MmapReadGuard gets dropped is enforced by the borrow-checker.256unsafe { Some(virt::VmaRef::from_raw(vma)) }257}258}259}260261impl Drop for MmapReadGuard<'_> {262#[inline]263fn drop(&mut self) {264// SAFETY: We hold the read lock by the type invariants.265unsafe { bindings::mmap_read_unlock(self.mm.as_raw()) };266}267}268269/// A guard for the vma read lock.270///271/// # Invariants272///273/// This `VmaReadGuard` guard owns the vma read lock.274pub struct VmaReadGuard<'a> {275vma: &'a VmaRef,276// `vma_end_read` must be called on the same thread as where the lock was taken277_nts: NotThreadSafe,278}279280// Make all `VmaRef` methods available on `VmaReadGuard`.281impl Deref for VmaReadGuard<'_> {282type Target = VmaRef;283284#[inline]285fn deref(&self) -> &VmaRef {286self.vma287}288}289290impl Drop for VmaReadGuard<'_> {291#[inline]292fn drop(&mut self) {293// SAFETY: We hold the read lock by the type invariants.294unsafe { bindings::vma_end_read(self.vma.as_ptr()) };295}296}297298299