Book a Demo!
CoCalc Logo Icon
StoreFeaturesDocsShareSupportNewsAboutPoliciesSign UpSign In
torvalds
GitHub Repository: torvalds/linux
Path: blob/master/mm/damon/paddr.c
29266 views
1
// SPDX-License-Identifier: GPL-2.0
2
/*
3
* DAMON Code for The Physical Address Space
4
*
5
* Author: SeongJae Park <[email protected]>
6
*/
7
8
#define pr_fmt(fmt) "damon-pa: " fmt
9
10
#include <linux/mmu_notifier.h>
11
#include <linux/page_idle.h>
12
#include <linux/pagemap.h>
13
#include <linux/rmap.h>
14
#include <linux/swap.h>
15
#include <linux/memory-tiers.h>
16
#include <linux/mm_inline.h>
17
18
#include "../internal.h"
19
#include "ops-common.h"
20
21
static phys_addr_t damon_pa_phys_addr(
22
unsigned long addr, unsigned long addr_unit)
23
{
24
return (phys_addr_t)addr * addr_unit;
25
}
26
27
static unsigned long damon_pa_core_addr(
28
phys_addr_t pa, unsigned long addr_unit)
29
{
30
/*
31
* Use div_u64() for avoiding linking errors related with __udivdi3,
32
* __aeabi_uldivmod, or similar problems. This should also improve the
33
* performance optimization (read div_u64() comment for the detail).
34
*/
35
if (sizeof(pa) == 8 && sizeof(addr_unit) == 4)
36
return div_u64(pa, addr_unit);
37
return pa / addr_unit;
38
}
39
40
static void damon_pa_mkold(phys_addr_t paddr)
41
{
42
struct folio *folio = damon_get_folio(PHYS_PFN(paddr));
43
44
if (!folio)
45
return;
46
47
damon_folio_mkold(folio);
48
folio_put(folio);
49
}
50
51
static void __damon_pa_prepare_access_check(struct damon_region *r,
52
unsigned long addr_unit)
53
{
54
r->sampling_addr = damon_rand(r->ar.start, r->ar.end);
55
56
damon_pa_mkold(damon_pa_phys_addr(r->sampling_addr, addr_unit));
57
}
58
59
static void damon_pa_prepare_access_checks(struct damon_ctx *ctx)
60
{
61
struct damon_target *t;
62
struct damon_region *r;
63
64
damon_for_each_target(t, ctx) {
65
damon_for_each_region(r, t)
66
__damon_pa_prepare_access_check(r, ctx->addr_unit);
67
}
68
}
69
70
static bool damon_pa_young(phys_addr_t paddr, unsigned long *folio_sz)
71
{
72
struct folio *folio = damon_get_folio(PHYS_PFN(paddr));
73
bool accessed;
74
75
if (!folio)
76
return false;
77
78
accessed = damon_folio_young(folio);
79
*folio_sz = folio_size(folio);
80
folio_put(folio);
81
return accessed;
82
}
83
84
static void __damon_pa_check_access(struct damon_region *r,
85
struct damon_attrs *attrs, unsigned long addr_unit)
86
{
87
static phys_addr_t last_addr;
88
static unsigned long last_folio_sz = PAGE_SIZE;
89
static bool last_accessed;
90
phys_addr_t sampling_addr = damon_pa_phys_addr(
91
r->sampling_addr, addr_unit);
92
93
/* If the region is in the last checked page, reuse the result */
94
if (ALIGN_DOWN(last_addr, last_folio_sz) ==
95
ALIGN_DOWN(sampling_addr, last_folio_sz)) {
96
damon_update_region_access_rate(r, last_accessed, attrs);
97
return;
98
}
99
100
last_accessed = damon_pa_young(sampling_addr, &last_folio_sz);
101
damon_update_region_access_rate(r, last_accessed, attrs);
102
103
last_addr = sampling_addr;
104
}
105
106
static unsigned int damon_pa_check_accesses(struct damon_ctx *ctx)
107
{
108
struct damon_target *t;
109
struct damon_region *r;
110
unsigned int max_nr_accesses = 0;
111
112
damon_for_each_target(t, ctx) {
113
damon_for_each_region(r, t) {
114
__damon_pa_check_access(
115
r, &ctx->attrs, ctx->addr_unit);
116
max_nr_accesses = max(r->nr_accesses, max_nr_accesses);
117
}
118
}
119
120
return max_nr_accesses;
121
}
122
123
/*
124
* damos_pa_filter_out - Return true if the page should be filtered out.
125
*/
126
static bool damos_pa_filter_out(struct damos *scheme, struct folio *folio)
127
{
128
struct damos_filter *filter;
129
130
if (scheme->core_filters_allowed)
131
return false;
132
133
damos_for_each_ops_filter(filter, scheme) {
134
if (damos_folio_filter_match(filter, folio))
135
return !filter->allow;
136
}
137
return scheme->ops_filters_default_reject;
138
}
139
140
static bool damon_pa_invalid_damos_folio(struct folio *folio, struct damos *s)
141
{
142
if (!folio)
143
return true;
144
if (folio == s->last_applied) {
145
folio_put(folio);
146
return true;
147
}
148
return false;
149
}
150
151
static unsigned long damon_pa_pageout(struct damon_region *r,
152
unsigned long addr_unit, struct damos *s,
153
unsigned long *sz_filter_passed)
154
{
155
phys_addr_t addr, applied;
156
LIST_HEAD(folio_list);
157
bool install_young_filter = true;
158
struct damos_filter *filter;
159
struct folio *folio;
160
161
/* check access in page level again by default */
162
damos_for_each_ops_filter(filter, s) {
163
if (filter->type == DAMOS_FILTER_TYPE_YOUNG) {
164
install_young_filter = false;
165
break;
166
}
167
}
168
if (install_young_filter) {
169
filter = damos_new_filter(
170
DAMOS_FILTER_TYPE_YOUNG, true, false);
171
if (!filter)
172
return 0;
173
damos_add_filter(s, filter);
174
}
175
176
addr = damon_pa_phys_addr(r->ar.start, addr_unit);
177
while (addr < damon_pa_phys_addr(r->ar.end, addr_unit)) {
178
folio = damon_get_folio(PHYS_PFN(addr));
179
if (damon_pa_invalid_damos_folio(folio, s)) {
180
addr += PAGE_SIZE;
181
continue;
182
}
183
184
if (damos_pa_filter_out(s, folio))
185
goto put_folio;
186
else
187
*sz_filter_passed += folio_size(folio) / addr_unit;
188
189
folio_clear_referenced(folio);
190
folio_test_clear_young(folio);
191
if (!folio_isolate_lru(folio))
192
goto put_folio;
193
if (folio_test_unevictable(folio))
194
folio_putback_lru(folio);
195
else
196
list_add(&folio->lru, &folio_list);
197
put_folio:
198
addr += folio_size(folio);
199
folio_put(folio);
200
}
201
if (install_young_filter)
202
damos_destroy_filter(filter);
203
applied = reclaim_pages(&folio_list);
204
cond_resched();
205
s->last_applied = folio;
206
return damon_pa_core_addr(applied * PAGE_SIZE, addr_unit);
207
}
208
209
static inline unsigned long damon_pa_mark_accessed_or_deactivate(
210
struct damon_region *r, unsigned long addr_unit,
211
struct damos *s, bool mark_accessed,
212
unsigned long *sz_filter_passed)
213
{
214
phys_addr_t addr, applied = 0;
215
struct folio *folio;
216
217
addr = damon_pa_phys_addr(r->ar.start, addr_unit);
218
while (addr < damon_pa_phys_addr(r->ar.end, addr_unit)) {
219
folio = damon_get_folio(PHYS_PFN(addr));
220
if (damon_pa_invalid_damos_folio(folio, s)) {
221
addr += PAGE_SIZE;
222
continue;
223
}
224
225
if (damos_pa_filter_out(s, folio))
226
goto put_folio;
227
else
228
*sz_filter_passed += folio_size(folio) / addr_unit;
229
230
if (mark_accessed)
231
folio_mark_accessed(folio);
232
else
233
folio_deactivate(folio);
234
applied += folio_nr_pages(folio);
235
put_folio:
236
addr += folio_size(folio);
237
folio_put(folio);
238
}
239
s->last_applied = folio;
240
return damon_pa_core_addr(applied * PAGE_SIZE, addr_unit);
241
}
242
243
static unsigned long damon_pa_mark_accessed(struct damon_region *r,
244
unsigned long addr_unit, struct damos *s,
245
unsigned long *sz_filter_passed)
246
{
247
return damon_pa_mark_accessed_or_deactivate(r, addr_unit, s, true,
248
sz_filter_passed);
249
}
250
251
static unsigned long damon_pa_deactivate_pages(struct damon_region *r,
252
unsigned long addr_unit, struct damos *s,
253
unsigned long *sz_filter_passed)
254
{
255
return damon_pa_mark_accessed_or_deactivate(r, addr_unit, s, false,
256
sz_filter_passed);
257
}
258
259
static unsigned long damon_pa_migrate(struct damon_region *r,
260
unsigned long addr_unit, struct damos *s,
261
unsigned long *sz_filter_passed)
262
{
263
phys_addr_t addr, applied;
264
LIST_HEAD(folio_list);
265
struct folio *folio;
266
267
addr = damon_pa_phys_addr(r->ar.start, addr_unit);
268
while (addr < damon_pa_phys_addr(r->ar.end, addr_unit)) {
269
folio = damon_get_folio(PHYS_PFN(addr));
270
if (damon_pa_invalid_damos_folio(folio, s)) {
271
addr += PAGE_SIZE;
272
continue;
273
}
274
275
if (damos_pa_filter_out(s, folio))
276
goto put_folio;
277
else
278
*sz_filter_passed += folio_size(folio) / addr_unit;
279
280
if (!folio_isolate_lru(folio))
281
goto put_folio;
282
list_add(&folio->lru, &folio_list);
283
put_folio:
284
addr += folio_size(folio);
285
folio_put(folio);
286
}
287
applied = damon_migrate_pages(&folio_list, s->target_nid);
288
cond_resched();
289
s->last_applied = folio;
290
return damon_pa_core_addr(applied * PAGE_SIZE, addr_unit);
291
}
292
293
static unsigned long damon_pa_stat(struct damon_region *r,
294
unsigned long addr_unit, struct damos *s,
295
unsigned long *sz_filter_passed)
296
{
297
phys_addr_t addr;
298
struct folio *folio;
299
300
if (!damos_ops_has_filter(s))
301
return 0;
302
303
addr = damon_pa_phys_addr(r->ar.start, addr_unit);
304
while (addr < damon_pa_phys_addr(r->ar.end, addr_unit)) {
305
folio = damon_get_folio(PHYS_PFN(addr));
306
if (damon_pa_invalid_damos_folio(folio, s)) {
307
addr += PAGE_SIZE;
308
continue;
309
}
310
311
if (!damos_pa_filter_out(s, folio))
312
*sz_filter_passed += folio_size(folio) / addr_unit;
313
addr += folio_size(folio);
314
folio_put(folio);
315
}
316
s->last_applied = folio;
317
return 0;
318
}
319
320
static unsigned long damon_pa_apply_scheme(struct damon_ctx *ctx,
321
struct damon_target *t, struct damon_region *r,
322
struct damos *scheme, unsigned long *sz_filter_passed)
323
{
324
unsigned long aunit = ctx->addr_unit;
325
326
switch (scheme->action) {
327
case DAMOS_PAGEOUT:
328
return damon_pa_pageout(r, aunit, scheme, sz_filter_passed);
329
case DAMOS_LRU_PRIO:
330
return damon_pa_mark_accessed(r, aunit, scheme,
331
sz_filter_passed);
332
case DAMOS_LRU_DEPRIO:
333
return damon_pa_deactivate_pages(r, aunit, scheme,
334
sz_filter_passed);
335
case DAMOS_MIGRATE_HOT:
336
case DAMOS_MIGRATE_COLD:
337
return damon_pa_migrate(r, aunit, scheme, sz_filter_passed);
338
case DAMOS_STAT:
339
return damon_pa_stat(r, aunit, scheme, sz_filter_passed);
340
default:
341
/* DAMOS actions that not yet supported by 'paddr'. */
342
break;
343
}
344
return 0;
345
}
346
347
static int damon_pa_scheme_score(struct damon_ctx *context,
348
struct damon_target *t, struct damon_region *r,
349
struct damos *scheme)
350
{
351
switch (scheme->action) {
352
case DAMOS_PAGEOUT:
353
return damon_cold_score(context, r, scheme);
354
case DAMOS_LRU_PRIO:
355
return damon_hot_score(context, r, scheme);
356
case DAMOS_LRU_DEPRIO:
357
return damon_cold_score(context, r, scheme);
358
case DAMOS_MIGRATE_HOT:
359
return damon_hot_score(context, r, scheme);
360
case DAMOS_MIGRATE_COLD:
361
return damon_cold_score(context, r, scheme);
362
default:
363
break;
364
}
365
366
return DAMOS_MAX_SCORE;
367
}
368
369
static int __init damon_pa_initcall(void)
370
{
371
struct damon_operations ops = {
372
.id = DAMON_OPS_PADDR,
373
.init = NULL,
374
.update = NULL,
375
.prepare_access_checks = damon_pa_prepare_access_checks,
376
.check_accesses = damon_pa_check_accesses,
377
.target_valid = NULL,
378
.cleanup = NULL,
379
.apply_scheme = damon_pa_apply_scheme,
380
.get_scheme_score = damon_pa_scheme_score,
381
};
382
383
return damon_register_ops(&ops);
384
};
385
386
subsys_initcall(damon_pa_initcall);
387
388