Book a Demo!
CoCalc Logo Icon
StoreFeaturesDocsShareSupportNewsAboutPoliciesSign UpSign In
torvalds
GitHub Repository: torvalds/linux
Path: blob/master/rust/kernel/mm.rs
29266 views
1
// SPDX-License-Identifier: GPL-2.0
2
3
// Copyright (C) 2024 Google LLC.
4
5
//! Memory management.
6
//!
7
//! This module deals with managing the address space of userspace processes. Each process has an
8
//! instance of [`Mm`], which keeps track of multiple VMAs (virtual memory areas). Each VMA
9
//! corresponds to a region of memory that the userspace process can access, and the VMA lets you
10
//! control what happens when userspace reads or writes to that region of memory.
11
//!
12
//! C header: [`include/linux/mm.h`](srctree/include/linux/mm.h)
13
14
use crate::{
15
bindings,
16
sync::aref::{ARef, AlwaysRefCounted},
17
types::{NotThreadSafe, Opaque},
18
};
19
use core::{ops::Deref, ptr::NonNull};
20
21
pub mod virt;
22
use virt::VmaRef;
23
24
#[cfg(CONFIG_MMU)]
25
pub use mmput_async::MmWithUserAsync;
26
mod mmput_async;
27
28
/// A wrapper for the kernel's `struct mm_struct`.
29
///
30
/// This represents the address space of a userspace process, so each process has one `Mm`
31
/// instance. It may hold many VMAs internally.
32
///
33
/// There is a counter called `mm_users` that counts the users of the address space; this includes
34
/// the userspace process itself, but can also include kernel threads accessing the address space.
35
/// Once `mm_users` reaches zero, this indicates that the address space can be destroyed. To access
36
/// the address space, you must prevent `mm_users` from reaching zero while you are accessing it.
37
/// The [`MmWithUser`] type represents an address space where this is guaranteed, and you can
38
/// create one using [`mmget_not_zero`].
39
///
40
/// The `ARef<Mm>` smart pointer holds an `mmgrab` refcount. Its destructor may sleep.
41
///
42
/// # Invariants
43
///
44
/// Values of this type are always refcounted using `mmgrab`.
45
///
46
/// [`mmget_not_zero`]: Mm::mmget_not_zero
47
#[repr(transparent)]
48
pub struct Mm {
49
mm: Opaque<bindings::mm_struct>,
50
}
51
52
// SAFETY: It is safe to call `mmdrop` on another thread than where `mmgrab` was called.
53
unsafe impl Send for Mm {}
54
// SAFETY: All methods on `Mm` can be called in parallel from several threads.
55
unsafe impl Sync for Mm {}
56
57
// SAFETY: By the type invariants, this type is always refcounted.
58
unsafe impl AlwaysRefCounted for Mm {
59
#[inline]
60
fn inc_ref(&self) {
61
// SAFETY: The pointer is valid since self is a reference.
62
unsafe { bindings::mmgrab(self.as_raw()) };
63
}
64
65
#[inline]
66
unsafe fn dec_ref(obj: NonNull<Self>) {
67
// SAFETY: The caller is giving up their refcount.
68
unsafe { bindings::mmdrop(obj.cast().as_ptr()) };
69
}
70
}
71
72
/// A wrapper for the kernel's `struct mm_struct`.
73
///
74
/// This type is like [`Mm`], but with non-zero `mm_users`. It can only be used when `mm_users` can
75
/// be proven to be non-zero at compile-time, usually because the relevant code holds an `mmget`
76
/// refcount. It can be used to access the associated address space.
77
///
78
/// The `ARef<MmWithUser>` smart pointer holds an `mmget` refcount. Its destructor may sleep.
79
///
80
/// # Invariants
81
///
82
/// Values of this type are always refcounted using `mmget`. The value of `mm_users` is non-zero.
83
#[repr(transparent)]
84
pub struct MmWithUser {
85
mm: Mm,
86
}
87
88
// SAFETY: It is safe to call `mmput` on another thread than where `mmget` was called.
89
unsafe impl Send for MmWithUser {}
90
// SAFETY: All methods on `MmWithUser` can be called in parallel from several threads.
91
unsafe impl Sync for MmWithUser {}
92
93
// SAFETY: By the type invariants, this type is always refcounted.
94
unsafe impl AlwaysRefCounted for MmWithUser {
95
#[inline]
96
fn inc_ref(&self) {
97
// SAFETY: The pointer is valid since self is a reference.
98
unsafe { bindings::mmget(self.as_raw()) };
99
}
100
101
#[inline]
102
unsafe fn dec_ref(obj: NonNull<Self>) {
103
// SAFETY: The caller is giving up their refcount.
104
unsafe { bindings::mmput(obj.cast().as_ptr()) };
105
}
106
}
107
108
// Make all `Mm` methods available on `MmWithUser`.
109
impl Deref for MmWithUser {
110
type Target = Mm;
111
112
#[inline]
113
fn deref(&self) -> &Mm {
114
&self.mm
115
}
116
}
117
118
// These methods are safe to call even if `mm_users` is zero.
119
impl Mm {
120
/// Returns a raw pointer to the inner `mm_struct`.
121
#[inline]
122
pub fn as_raw(&self) -> *mut bindings::mm_struct {
123
self.mm.get()
124
}
125
126
/// Obtain a reference from a raw pointer.
127
///
128
/// # Safety
129
///
130
/// The caller must ensure that `ptr` points at an `mm_struct`, and that it is not deallocated
131
/// during the lifetime 'a.
132
#[inline]
133
pub unsafe fn from_raw<'a>(ptr: *const bindings::mm_struct) -> &'a Mm {
134
// SAFETY: Caller promises that the pointer is valid for 'a. Layouts are compatible due to
135
// repr(transparent).
136
unsafe { &*ptr.cast() }
137
}
138
139
/// Calls `mmget_not_zero` and returns a handle if it succeeds.
140
#[inline]
141
pub fn mmget_not_zero(&self) -> Option<ARef<MmWithUser>> {
142
// SAFETY: The pointer is valid since self is a reference.
143
let success = unsafe { bindings::mmget_not_zero(self.as_raw()) };
144
145
if success {
146
// SAFETY: We just created an `mmget` refcount.
147
Some(unsafe { ARef::from_raw(NonNull::new_unchecked(self.as_raw().cast())) })
148
} else {
149
None
150
}
151
}
152
}
153
154
// These methods require `mm_users` to be non-zero.
155
impl MmWithUser {
156
/// Obtain a reference from a raw pointer.
157
///
158
/// # Safety
159
///
160
/// The caller must ensure that `ptr` points at an `mm_struct`, and that `mm_users` remains
161
/// non-zero for the duration of the lifetime 'a.
162
#[inline]
163
pub unsafe fn from_raw<'a>(ptr: *const bindings::mm_struct) -> &'a MmWithUser {
164
// SAFETY: Caller promises that the pointer is valid for 'a. The layout is compatible due
165
// to repr(transparent).
166
unsafe { &*ptr.cast() }
167
}
168
169
/// Attempt to access a vma using the vma read lock.
170
///
171
/// This is an optimistic trylock operation, so it may fail if there is contention. In that
172
/// case, you should fall back to taking the mmap read lock.
173
///
174
/// When per-vma locks are disabled, this always returns `None`.
175
#[inline]
176
pub fn lock_vma_under_rcu(&self, vma_addr: usize) -> Option<VmaReadGuard<'_>> {
177
#[cfg(CONFIG_PER_VMA_LOCK)]
178
{
179
// SAFETY: Calling `bindings::lock_vma_under_rcu` is always okay given an mm where
180
// `mm_users` is non-zero.
181
let vma = unsafe { bindings::lock_vma_under_rcu(self.as_raw(), vma_addr) };
182
if !vma.is_null() {
183
return Some(VmaReadGuard {
184
// SAFETY: If `lock_vma_under_rcu` returns a non-null ptr, then it points at a
185
// valid vma. The vma is stable for as long as the vma read lock is held.
186
vma: unsafe { VmaRef::from_raw(vma) },
187
_nts: NotThreadSafe,
188
});
189
}
190
}
191
192
// Silence warnings about unused variables.
193
#[cfg(not(CONFIG_PER_VMA_LOCK))]
194
let _ = vma_addr;
195
196
None
197
}
198
199
/// Lock the mmap read lock.
200
#[inline]
201
pub fn mmap_read_lock(&self) -> MmapReadGuard<'_> {
202
// SAFETY: The pointer is valid since self is a reference.
203
unsafe { bindings::mmap_read_lock(self.as_raw()) };
204
205
// INVARIANT: We just acquired the read lock.
206
MmapReadGuard {
207
mm: self,
208
_nts: NotThreadSafe,
209
}
210
}
211
212
/// Try to lock the mmap read lock.
213
#[inline]
214
pub fn mmap_read_trylock(&self) -> Option<MmapReadGuard<'_>> {
215
// SAFETY: The pointer is valid since self is a reference.
216
let success = unsafe { bindings::mmap_read_trylock(self.as_raw()) };
217
218
if success {
219
// INVARIANT: We just acquired the read lock.
220
Some(MmapReadGuard {
221
mm: self,
222
_nts: NotThreadSafe,
223
})
224
} else {
225
None
226
}
227
}
228
}
229
230
/// A guard for the mmap read lock.
231
///
232
/// # Invariants
233
///
234
/// This `MmapReadGuard` guard owns the mmap read lock.
235
pub struct MmapReadGuard<'a> {
236
mm: &'a MmWithUser,
237
// `mmap_read_lock` and `mmap_read_unlock` must be called on the same thread
238
_nts: NotThreadSafe,
239
}
240
241
impl<'a> MmapReadGuard<'a> {
242
/// Look up a vma at the given address.
243
#[inline]
244
pub fn vma_lookup(&self, vma_addr: usize) -> Option<&virt::VmaRef> {
245
// SAFETY: By the type invariants we hold the mmap read guard, so we can safely call this
246
// method. Any value is okay for `vma_addr`.
247
let vma = unsafe { bindings::vma_lookup(self.mm.as_raw(), vma_addr) };
248
249
if vma.is_null() {
250
None
251
} else {
252
// SAFETY: We just checked that a vma was found, so the pointer references a valid vma.
253
//
254
// Furthermore, the returned vma is still under the protection of the read lock guard
255
// and can be used while the mmap read lock is still held. That the vma is not used
256
// after the MmapReadGuard gets dropped is enforced by the borrow-checker.
257
unsafe { Some(virt::VmaRef::from_raw(vma)) }
258
}
259
}
260
}
261
262
impl Drop for MmapReadGuard<'_> {
263
#[inline]
264
fn drop(&mut self) {
265
// SAFETY: We hold the read lock by the type invariants.
266
unsafe { bindings::mmap_read_unlock(self.mm.as_raw()) };
267
}
268
}
269
270
/// A guard for the vma read lock.
271
///
272
/// # Invariants
273
///
274
/// This `VmaReadGuard` guard owns the vma read lock.
275
pub struct VmaReadGuard<'a> {
276
vma: &'a VmaRef,
277
// `vma_end_read` must be called on the same thread as where the lock was taken
278
_nts: NotThreadSafe,
279
}
280
281
// Make all `VmaRef` methods available on `VmaReadGuard`.
282
impl Deref for VmaReadGuard<'_> {
283
type Target = VmaRef;
284
285
#[inline]
286
fn deref(&self) -> &VmaRef {
287
self.vma
288
}
289
}
290
291
impl Drop for VmaReadGuard<'_> {
292
#[inline]
293
fn drop(&mut self) {
294
// SAFETY: We hold the read lock by the type invariants.
295
unsafe { bindings::vma_end_read(self.vma.as_ptr()) };
296
}
297
}
298
299