Book a Demo!
CoCalc Logo Icon
StoreFeaturesDocsShareSupportNewsAboutPoliciesSign UpSign In
torvalds
GitHub Repository: torvalds/linux
Path: blob/master/mm/damon/core.c
53402 views
1
// SPDX-License-Identifier: GPL-2.0
2
/*
3
* Data Access Monitor
4
*
5
* Author: SeongJae Park <[email protected]>
6
*/
7
8
#define pr_fmt(fmt) "damon: " fmt
9
10
#include <linux/damon.h>
11
#include <linux/delay.h>
12
#include <linux/kthread.h>
13
#include <linux/memcontrol.h>
14
#include <linux/mm.h>
15
#include <linux/psi.h>
16
#include <linux/slab.h>
17
#include <linux/string.h>
18
#include <linux/string_choices.h>
19
20
#define CREATE_TRACE_POINTS
21
#include <trace/events/damon.h>
22
23
static DEFINE_MUTEX(damon_lock);
24
static int nr_running_ctxs;
25
static bool running_exclusive_ctxs;
26
27
static DEFINE_MUTEX(damon_ops_lock);
28
static struct damon_operations damon_registered_ops[NR_DAMON_OPS];
29
30
static struct kmem_cache *damon_region_cache __ro_after_init;
31
32
/* Should be called under damon_ops_lock with id smaller than NR_DAMON_OPS */
33
static bool __damon_is_registered_ops(enum damon_ops_id id)
34
{
35
struct damon_operations empty_ops = {};
36
37
if (!memcmp(&empty_ops, &damon_registered_ops[id], sizeof(empty_ops)))
38
return false;
39
return true;
40
}
41
42
/**
43
* damon_is_registered_ops() - Check if a given damon_operations is registered.
44
* @id: Id of the damon_operations to check if registered.
45
*
46
* Return: true if the ops is set, false otherwise.
47
*/
48
bool damon_is_registered_ops(enum damon_ops_id id)
49
{
50
bool registered;
51
52
if (id >= NR_DAMON_OPS)
53
return false;
54
mutex_lock(&damon_ops_lock);
55
registered = __damon_is_registered_ops(id);
56
mutex_unlock(&damon_ops_lock);
57
return registered;
58
}
59
60
/**
61
* damon_register_ops() - Register a monitoring operations set to DAMON.
62
* @ops: monitoring operations set to register.
63
*
64
* This function registers a monitoring operations set of valid &struct
65
* damon_operations->id so that others can find and use them later.
66
*
67
* Return: 0 on success, negative error code otherwise.
68
*/
69
int damon_register_ops(struct damon_operations *ops)
70
{
71
int err = 0;
72
73
if (ops->id >= NR_DAMON_OPS)
74
return -EINVAL;
75
76
mutex_lock(&damon_ops_lock);
77
/* Fail for already registered ops */
78
if (__damon_is_registered_ops(ops->id))
79
err = -EINVAL;
80
else
81
damon_registered_ops[ops->id] = *ops;
82
mutex_unlock(&damon_ops_lock);
83
return err;
84
}
85
86
/**
87
* damon_select_ops() - Select a monitoring operations to use with the context.
88
* @ctx: monitoring context to use the operations.
89
* @id: id of the registered monitoring operations to select.
90
*
91
* This function finds registered monitoring operations set of @id and make
92
* @ctx to use it.
93
*
94
* Return: 0 on success, negative error code otherwise.
95
*/
96
int damon_select_ops(struct damon_ctx *ctx, enum damon_ops_id id)
97
{
98
int err = 0;
99
100
if (id >= NR_DAMON_OPS)
101
return -EINVAL;
102
103
mutex_lock(&damon_ops_lock);
104
if (!__damon_is_registered_ops(id))
105
err = -EINVAL;
106
else
107
ctx->ops = damon_registered_ops[id];
108
mutex_unlock(&damon_ops_lock);
109
return err;
110
}
111
112
/*
113
* Construct a damon_region struct
114
*
115
* Returns the pointer to the new struct if success, or NULL otherwise
116
*/
117
struct damon_region *damon_new_region(unsigned long start, unsigned long end)
118
{
119
struct damon_region *region;
120
121
region = kmem_cache_alloc(damon_region_cache, GFP_KERNEL);
122
if (!region)
123
return NULL;
124
125
region->ar.start = start;
126
region->ar.end = end;
127
region->nr_accesses = 0;
128
region->nr_accesses_bp = 0;
129
INIT_LIST_HEAD(&region->list);
130
131
region->age = 0;
132
region->last_nr_accesses = 0;
133
134
return region;
135
}
136
137
void damon_add_region(struct damon_region *r, struct damon_target *t)
138
{
139
list_add_tail(&r->list, &t->regions_list);
140
t->nr_regions++;
141
}
142
143
static void damon_del_region(struct damon_region *r, struct damon_target *t)
144
{
145
list_del(&r->list);
146
t->nr_regions--;
147
}
148
149
static void damon_free_region(struct damon_region *r)
150
{
151
kmem_cache_free(damon_region_cache, r);
152
}
153
154
void damon_destroy_region(struct damon_region *r, struct damon_target *t)
155
{
156
damon_del_region(r, t);
157
damon_free_region(r);
158
}
159
160
static bool damon_is_last_region(struct damon_region *r,
161
struct damon_target *t)
162
{
163
return list_is_last(&r->list, &t->regions_list);
164
}
165
166
/*
167
* Check whether a region is intersecting an address range
168
*
169
* Returns true if it is.
170
*/
171
static bool damon_intersect(struct damon_region *r,
172
struct damon_addr_range *re)
173
{
174
return !(r->ar.end <= re->start || re->end <= r->ar.start);
175
}
176
177
/*
178
* Fill holes in regions with new regions.
179
*/
180
static int damon_fill_regions_holes(struct damon_region *first,
181
struct damon_region *last, struct damon_target *t)
182
{
183
struct damon_region *r = first;
184
185
damon_for_each_region_from(r, t) {
186
struct damon_region *next, *newr;
187
188
if (r == last)
189
break;
190
next = damon_next_region(r);
191
if (r->ar.end != next->ar.start) {
192
newr = damon_new_region(r->ar.end, next->ar.start);
193
if (!newr)
194
return -ENOMEM;
195
damon_insert_region(newr, r, next, t);
196
}
197
}
198
return 0;
199
}
200
201
/*
202
* damon_set_regions() - Set regions of a target for given address ranges.
203
* @t: the given target.
204
* @ranges: array of new monitoring target ranges.
205
* @nr_ranges: length of @ranges.
206
* @min_region_sz: minimum region size.
207
*
208
* This function adds new regions to, or modify existing regions of a
209
* monitoring target to fit in specific ranges.
210
*
211
* Return: 0 if success, or negative error code otherwise.
212
*/
213
int damon_set_regions(struct damon_target *t, struct damon_addr_range *ranges,
214
unsigned int nr_ranges, unsigned long min_region_sz)
215
{
216
struct damon_region *r, *next;
217
unsigned int i;
218
int err;
219
220
/* Remove regions which are not in the new ranges */
221
damon_for_each_region_safe(r, next, t) {
222
for (i = 0; i < nr_ranges; i++) {
223
if (damon_intersect(r, &ranges[i]))
224
break;
225
}
226
if (i == nr_ranges)
227
damon_destroy_region(r, t);
228
}
229
230
r = damon_first_region(t);
231
/* Add new regions or resize existing regions to fit in the ranges */
232
for (i = 0; i < nr_ranges; i++) {
233
struct damon_region *first = NULL, *last, *newr;
234
struct damon_addr_range *range;
235
236
range = &ranges[i];
237
/* Get the first/last regions intersecting with the range */
238
damon_for_each_region_from(r, t) {
239
if (damon_intersect(r, range)) {
240
if (!first)
241
first = r;
242
last = r;
243
}
244
if (r->ar.start >= range->end)
245
break;
246
}
247
if (!first) {
248
/* no region intersects with this range */
249
newr = damon_new_region(
250
ALIGN_DOWN(range->start,
251
min_region_sz),
252
ALIGN(range->end, min_region_sz));
253
if (!newr)
254
return -ENOMEM;
255
damon_insert_region(newr, damon_prev_region(r), r, t);
256
} else {
257
/* resize intersecting regions to fit in this range */
258
first->ar.start = ALIGN_DOWN(range->start,
259
min_region_sz);
260
last->ar.end = ALIGN(range->end, min_region_sz);
261
262
/* fill possible holes in the range */
263
err = damon_fill_regions_holes(first, last, t);
264
if (err)
265
return err;
266
}
267
}
268
return 0;
269
}
270
271
struct damos_filter *damos_new_filter(enum damos_filter_type type,
272
bool matching, bool allow)
273
{
274
struct damos_filter *filter;
275
276
filter = kmalloc(sizeof(*filter), GFP_KERNEL);
277
if (!filter)
278
return NULL;
279
filter->type = type;
280
filter->matching = matching;
281
filter->allow = allow;
282
INIT_LIST_HEAD(&filter->list);
283
return filter;
284
}
285
286
/**
287
* damos_filter_for_ops() - Return if the filter is ops-handled one.
288
* @type: type of the filter.
289
*
290
* Return: true if the filter of @type needs to be handled by ops layer, false
291
* otherwise.
292
*/
293
bool damos_filter_for_ops(enum damos_filter_type type)
294
{
295
switch (type) {
296
case DAMOS_FILTER_TYPE_ADDR:
297
case DAMOS_FILTER_TYPE_TARGET:
298
return false;
299
default:
300
break;
301
}
302
return true;
303
}
304
305
void damos_add_filter(struct damos *s, struct damos_filter *f)
306
{
307
if (damos_filter_for_ops(f->type))
308
list_add_tail(&f->list, &s->ops_filters);
309
else
310
list_add_tail(&f->list, &s->core_filters);
311
}
312
313
static void damos_del_filter(struct damos_filter *f)
314
{
315
list_del(&f->list);
316
}
317
318
static void damos_free_filter(struct damos_filter *f)
319
{
320
kfree(f);
321
}
322
323
void damos_destroy_filter(struct damos_filter *f)
324
{
325
damos_del_filter(f);
326
damos_free_filter(f);
327
}
328
329
struct damos_quota_goal *damos_new_quota_goal(
330
enum damos_quota_goal_metric metric,
331
unsigned long target_value)
332
{
333
struct damos_quota_goal *goal;
334
335
goal = kmalloc(sizeof(*goal), GFP_KERNEL);
336
if (!goal)
337
return NULL;
338
goal->metric = metric;
339
goal->target_value = target_value;
340
INIT_LIST_HEAD(&goal->list);
341
return goal;
342
}
343
344
void damos_add_quota_goal(struct damos_quota *q, struct damos_quota_goal *g)
345
{
346
list_add_tail(&g->list, &q->goals);
347
}
348
349
static void damos_del_quota_goal(struct damos_quota_goal *g)
350
{
351
list_del(&g->list);
352
}
353
354
static void damos_free_quota_goal(struct damos_quota_goal *g)
355
{
356
kfree(g);
357
}
358
359
void damos_destroy_quota_goal(struct damos_quota_goal *g)
360
{
361
damos_del_quota_goal(g);
362
damos_free_quota_goal(g);
363
}
364
365
/* initialize fields of @quota that normally API users wouldn't set */
366
static struct damos_quota *damos_quota_init(struct damos_quota *quota)
367
{
368
quota->esz = 0;
369
quota->total_charged_sz = 0;
370
quota->total_charged_ns = 0;
371
quota->charged_sz = 0;
372
quota->charged_from = 0;
373
quota->charge_target_from = NULL;
374
quota->charge_addr_from = 0;
375
quota->esz_bp = 0;
376
return quota;
377
}
378
379
struct damos *damon_new_scheme(struct damos_access_pattern *pattern,
380
enum damos_action action,
381
unsigned long apply_interval_us,
382
struct damos_quota *quota,
383
struct damos_watermarks *wmarks,
384
int target_nid)
385
{
386
struct damos *scheme;
387
388
scheme = kmalloc(sizeof(*scheme), GFP_KERNEL);
389
if (!scheme)
390
return NULL;
391
scheme->pattern = *pattern;
392
scheme->action = action;
393
scheme->apply_interval_us = apply_interval_us;
394
/*
395
* next_apply_sis will be set when kdamond starts. While kdamond is
396
* running, it will also updated when it is added to the DAMON context,
397
* or damon_attrs are updated.
398
*/
399
scheme->next_apply_sis = 0;
400
scheme->walk_completed = false;
401
INIT_LIST_HEAD(&scheme->core_filters);
402
INIT_LIST_HEAD(&scheme->ops_filters);
403
scheme->stat = (struct damos_stat){};
404
scheme->max_nr_snapshots = 0;
405
INIT_LIST_HEAD(&scheme->list);
406
407
scheme->quota = *(damos_quota_init(quota));
408
/* quota.goals should be separately set by caller */
409
INIT_LIST_HEAD(&scheme->quota.goals);
410
411
scheme->wmarks = *wmarks;
412
scheme->wmarks.activated = true;
413
414
scheme->migrate_dests = (struct damos_migrate_dests){};
415
scheme->target_nid = target_nid;
416
417
return scheme;
418
}
419
420
static void damos_set_next_apply_sis(struct damos *s, struct damon_ctx *ctx)
421
{
422
unsigned long sample_interval = ctx->attrs.sample_interval ?
423
ctx->attrs.sample_interval : 1;
424
unsigned long apply_interval = s->apply_interval_us ?
425
s->apply_interval_us : ctx->attrs.aggr_interval;
426
427
s->next_apply_sis = ctx->passed_sample_intervals +
428
apply_interval / sample_interval;
429
}
430
431
void damon_add_scheme(struct damon_ctx *ctx, struct damos *s)
432
{
433
list_add_tail(&s->list, &ctx->schemes);
434
damos_set_next_apply_sis(s, ctx);
435
}
436
437
static void damon_del_scheme(struct damos *s)
438
{
439
list_del(&s->list);
440
}
441
442
static void damon_free_scheme(struct damos *s)
443
{
444
kfree(s);
445
}
446
447
void damon_destroy_scheme(struct damos *s)
448
{
449
struct damos_quota_goal *g, *g_next;
450
struct damos_filter *f, *next;
451
452
damos_for_each_quota_goal_safe(g, g_next, &s->quota)
453
damos_destroy_quota_goal(g);
454
455
damos_for_each_core_filter_safe(f, next, s)
456
damos_destroy_filter(f);
457
458
damos_for_each_ops_filter_safe(f, next, s)
459
damos_destroy_filter(f);
460
461
kfree(s->migrate_dests.node_id_arr);
462
kfree(s->migrate_dests.weight_arr);
463
damon_del_scheme(s);
464
damon_free_scheme(s);
465
}
466
467
/*
468
* Construct a damon_target struct
469
*
470
* Returns the pointer to the new struct if success, or NULL otherwise
471
*/
472
struct damon_target *damon_new_target(void)
473
{
474
struct damon_target *t;
475
476
t = kmalloc(sizeof(*t), GFP_KERNEL);
477
if (!t)
478
return NULL;
479
480
t->pid = NULL;
481
t->nr_regions = 0;
482
INIT_LIST_HEAD(&t->regions_list);
483
INIT_LIST_HEAD(&t->list);
484
t->obsolete = false;
485
486
return t;
487
}
488
489
void damon_add_target(struct damon_ctx *ctx, struct damon_target *t)
490
{
491
list_add_tail(&t->list, &ctx->adaptive_targets);
492
}
493
494
bool damon_targets_empty(struct damon_ctx *ctx)
495
{
496
return list_empty(&ctx->adaptive_targets);
497
}
498
499
static void damon_del_target(struct damon_target *t)
500
{
501
list_del(&t->list);
502
}
503
504
void damon_free_target(struct damon_target *t)
505
{
506
struct damon_region *r, *next;
507
508
damon_for_each_region_safe(r, next, t)
509
damon_free_region(r);
510
kfree(t);
511
}
512
513
void damon_destroy_target(struct damon_target *t, struct damon_ctx *ctx)
514
{
515
516
if (ctx && ctx->ops.cleanup_target)
517
ctx->ops.cleanup_target(t);
518
519
damon_del_target(t);
520
damon_free_target(t);
521
}
522
523
unsigned int damon_nr_regions(struct damon_target *t)
524
{
525
return t->nr_regions;
526
}
527
528
struct damon_ctx *damon_new_ctx(void)
529
{
530
struct damon_ctx *ctx;
531
532
ctx = kzalloc(sizeof(*ctx), GFP_KERNEL);
533
if (!ctx)
534
return NULL;
535
536
init_completion(&ctx->kdamond_started);
537
538
ctx->attrs.sample_interval = 5 * 1000;
539
ctx->attrs.aggr_interval = 100 * 1000;
540
ctx->attrs.ops_update_interval = 60 * 1000 * 1000;
541
542
ctx->passed_sample_intervals = 0;
543
/* These will be set from kdamond_init_ctx() */
544
ctx->next_aggregation_sis = 0;
545
ctx->next_ops_update_sis = 0;
546
547
mutex_init(&ctx->kdamond_lock);
548
INIT_LIST_HEAD(&ctx->call_controls);
549
mutex_init(&ctx->call_controls_lock);
550
mutex_init(&ctx->walk_control_lock);
551
552
ctx->attrs.min_nr_regions = 10;
553
ctx->attrs.max_nr_regions = 1000;
554
555
ctx->addr_unit = 1;
556
ctx->min_region_sz = DAMON_MIN_REGION_SZ;
557
558
INIT_LIST_HEAD(&ctx->adaptive_targets);
559
INIT_LIST_HEAD(&ctx->schemes);
560
561
return ctx;
562
}
563
564
static void damon_destroy_targets(struct damon_ctx *ctx)
565
{
566
struct damon_target *t, *next_t;
567
568
damon_for_each_target_safe(t, next_t, ctx)
569
damon_destroy_target(t, ctx);
570
}
571
572
void damon_destroy_ctx(struct damon_ctx *ctx)
573
{
574
struct damos *s, *next_s;
575
576
damon_destroy_targets(ctx);
577
578
damon_for_each_scheme_safe(s, next_s, ctx)
579
damon_destroy_scheme(s);
580
581
kfree(ctx);
582
}
583
584
static bool damon_attrs_equals(const struct damon_attrs *attrs1,
585
const struct damon_attrs *attrs2)
586
{
587
const struct damon_intervals_goal *ig1 = &attrs1->intervals_goal;
588
const struct damon_intervals_goal *ig2 = &attrs2->intervals_goal;
589
590
return attrs1->sample_interval == attrs2->sample_interval &&
591
attrs1->aggr_interval == attrs2->aggr_interval &&
592
attrs1->ops_update_interval == attrs2->ops_update_interval &&
593
attrs1->min_nr_regions == attrs2->min_nr_regions &&
594
attrs1->max_nr_regions == attrs2->max_nr_regions &&
595
ig1->access_bp == ig2->access_bp &&
596
ig1->aggrs == ig2->aggrs &&
597
ig1->min_sample_us == ig2->min_sample_us &&
598
ig1->max_sample_us == ig2->max_sample_us;
599
}
600
601
static unsigned int damon_age_for_new_attrs(unsigned int age,
602
struct damon_attrs *old_attrs, struct damon_attrs *new_attrs)
603
{
604
return age * old_attrs->aggr_interval / new_attrs->aggr_interval;
605
}
606
607
/* convert access ratio in bp (per 10,000) to nr_accesses */
608
static unsigned int damon_accesses_bp_to_nr_accesses(
609
unsigned int accesses_bp, struct damon_attrs *attrs)
610
{
611
return accesses_bp * damon_max_nr_accesses(attrs) / 10000;
612
}
613
614
/*
615
* Convert nr_accesses to access ratio in bp (per 10,000).
616
*
617
* Callers should ensure attrs.aggr_interval is not zero, like
618
* damon_update_monitoring_results() does . Otherwise, divide-by-zero would
619
* happen.
620
*/
621
static unsigned int damon_nr_accesses_to_accesses_bp(
622
unsigned int nr_accesses, struct damon_attrs *attrs)
623
{
624
return nr_accesses * 10000 / damon_max_nr_accesses(attrs);
625
}
626
627
static unsigned int damon_nr_accesses_for_new_attrs(unsigned int nr_accesses,
628
struct damon_attrs *old_attrs, struct damon_attrs *new_attrs)
629
{
630
return damon_accesses_bp_to_nr_accesses(
631
damon_nr_accesses_to_accesses_bp(
632
nr_accesses, old_attrs),
633
new_attrs);
634
}
635
636
static void damon_update_monitoring_result(struct damon_region *r,
637
struct damon_attrs *old_attrs, struct damon_attrs *new_attrs,
638
bool aggregating)
639
{
640
if (!aggregating) {
641
r->nr_accesses = damon_nr_accesses_for_new_attrs(
642
r->nr_accesses, old_attrs, new_attrs);
643
r->nr_accesses_bp = r->nr_accesses * 10000;
644
} else {
645
/*
646
* if this is called in the middle of the aggregation, reset
647
* the aggregations we made so far for this aggregation
648
* interval. In other words, make the status like
649
* kdamond_reset_aggregated() is called.
650
*/
651
r->last_nr_accesses = damon_nr_accesses_for_new_attrs(
652
r->last_nr_accesses, old_attrs, new_attrs);
653
r->nr_accesses_bp = r->last_nr_accesses * 10000;
654
r->nr_accesses = 0;
655
}
656
r->age = damon_age_for_new_attrs(r->age, old_attrs, new_attrs);
657
}
658
659
/*
660
* region->nr_accesses is the number of sampling intervals in the last
661
* aggregation interval that access to the region has found, and region->age is
662
* the number of aggregation intervals that its access pattern has maintained.
663
* For the reason, the real meaning of the two fields depend on current
664
* sampling interval and aggregation interval. This function updates
665
* ->nr_accesses and ->age of given damon_ctx's regions for new damon_attrs.
666
*/
667
static void damon_update_monitoring_results(struct damon_ctx *ctx,
668
struct damon_attrs *new_attrs, bool aggregating)
669
{
670
struct damon_attrs *old_attrs = &ctx->attrs;
671
struct damon_target *t;
672
struct damon_region *r;
673
674
/* if any interval is zero, simply forgive conversion */
675
if (!old_attrs->sample_interval || !old_attrs->aggr_interval ||
676
!new_attrs->sample_interval ||
677
!new_attrs->aggr_interval)
678
return;
679
680
damon_for_each_target(t, ctx)
681
damon_for_each_region(r, t)
682
damon_update_monitoring_result(
683
r, old_attrs, new_attrs, aggregating);
684
}
685
686
/*
687
* damon_valid_intervals_goal() - return if the intervals goal of @attrs is
688
* valid.
689
*/
690
static bool damon_valid_intervals_goal(struct damon_attrs *attrs)
691
{
692
struct damon_intervals_goal *goal = &attrs->intervals_goal;
693
694
/* tuning is disabled */
695
if (!goal->aggrs)
696
return true;
697
if (goal->min_sample_us > goal->max_sample_us)
698
return false;
699
if (attrs->sample_interval < goal->min_sample_us ||
700
goal->max_sample_us < attrs->sample_interval)
701
return false;
702
return true;
703
}
704
705
/**
706
* damon_set_attrs() - Set attributes for the monitoring.
707
* @ctx: monitoring context
708
* @attrs: monitoring attributes
709
*
710
* This function should be called while the kdamond is not running, an access
711
* check results aggregation is not ongoing (e.g., from damon_call().
712
*
713
* Every time interval is in micro-seconds.
714
*
715
* Return: 0 on success, negative error code otherwise.
716
*/
717
int damon_set_attrs(struct damon_ctx *ctx, struct damon_attrs *attrs)
718
{
719
unsigned long sample_interval = attrs->sample_interval ?
720
attrs->sample_interval : 1;
721
struct damos *s;
722
bool aggregating = ctx->passed_sample_intervals <
723
ctx->next_aggregation_sis;
724
725
if (!damon_valid_intervals_goal(attrs))
726
return -EINVAL;
727
728
if (attrs->min_nr_regions < 3)
729
return -EINVAL;
730
if (attrs->min_nr_regions > attrs->max_nr_regions)
731
return -EINVAL;
732
if (attrs->sample_interval > attrs->aggr_interval)
733
return -EINVAL;
734
735
/* calls from core-external doesn't set this. */
736
if (!attrs->aggr_samples)
737
attrs->aggr_samples = attrs->aggr_interval / sample_interval;
738
739
ctx->next_aggregation_sis = ctx->passed_sample_intervals +
740
attrs->aggr_interval / sample_interval;
741
ctx->next_ops_update_sis = ctx->passed_sample_intervals +
742
attrs->ops_update_interval / sample_interval;
743
744
damon_update_monitoring_results(ctx, attrs, aggregating);
745
ctx->attrs = *attrs;
746
747
damon_for_each_scheme(s, ctx)
748
damos_set_next_apply_sis(s, ctx);
749
750
return 0;
751
}
752
753
/**
754
* damon_set_schemes() - Set data access monitoring based operation schemes.
755
* @ctx: monitoring context
756
* @schemes: array of the schemes
757
* @nr_schemes: number of entries in @schemes
758
*
759
* This function should not be called while the kdamond of the context is
760
* running.
761
*/
762
void damon_set_schemes(struct damon_ctx *ctx, struct damos **schemes,
763
ssize_t nr_schemes)
764
{
765
struct damos *s, *next;
766
ssize_t i;
767
768
damon_for_each_scheme_safe(s, next, ctx)
769
damon_destroy_scheme(s);
770
for (i = 0; i < nr_schemes; i++)
771
damon_add_scheme(ctx, schemes[i]);
772
}
773
774
static struct damos_quota_goal *damos_nth_quota_goal(
775
int n, struct damos_quota *q)
776
{
777
struct damos_quota_goal *goal;
778
int i = 0;
779
780
damos_for_each_quota_goal(goal, q) {
781
if (i++ == n)
782
return goal;
783
}
784
return NULL;
785
}
786
787
static void damos_commit_quota_goal_union(
788
struct damos_quota_goal *dst, struct damos_quota_goal *src)
789
{
790
switch (dst->metric) {
791
case DAMOS_QUOTA_NODE_MEM_USED_BP:
792
case DAMOS_QUOTA_NODE_MEM_FREE_BP:
793
dst->nid = src->nid;
794
break;
795
case DAMOS_QUOTA_NODE_MEMCG_USED_BP:
796
case DAMOS_QUOTA_NODE_MEMCG_FREE_BP:
797
dst->nid = src->nid;
798
dst->memcg_id = src->memcg_id;
799
break;
800
default:
801
break;
802
}
803
}
804
805
static void damos_commit_quota_goal(
806
struct damos_quota_goal *dst, struct damos_quota_goal *src)
807
{
808
dst->metric = src->metric;
809
dst->target_value = src->target_value;
810
if (dst->metric == DAMOS_QUOTA_USER_INPUT)
811
dst->current_value = src->current_value;
812
/* keep last_psi_total as is, since it will be updated in next cycle */
813
damos_commit_quota_goal_union(dst, src);
814
}
815
816
/**
817
* damos_commit_quota_goals() - Commit DAMOS quota goals to another quota.
818
* @dst: The commit destination DAMOS quota.
819
* @src: The commit source DAMOS quota.
820
*
821
* Copies user-specified parameters for quota goals from @src to @dst. Users
822
* should use this function for quota goals-level parameters update of running
823
* DAMON contexts, instead of manual in-place updates.
824
*
825
* This function should be called from parameters-update safe context, like
826
* damon_call().
827
*/
828
int damos_commit_quota_goals(struct damos_quota *dst, struct damos_quota *src)
829
{
830
struct damos_quota_goal *dst_goal, *next, *src_goal, *new_goal;
831
int i = 0, j = 0;
832
833
damos_for_each_quota_goal_safe(dst_goal, next, dst) {
834
src_goal = damos_nth_quota_goal(i++, src);
835
if (src_goal)
836
damos_commit_quota_goal(dst_goal, src_goal);
837
else
838
damos_destroy_quota_goal(dst_goal);
839
}
840
damos_for_each_quota_goal_safe(src_goal, next, src) {
841
if (j++ < i)
842
continue;
843
new_goal = damos_new_quota_goal(
844
src_goal->metric, src_goal->target_value);
845
if (!new_goal)
846
return -ENOMEM;
847
damos_commit_quota_goal(new_goal, src_goal);
848
damos_add_quota_goal(dst, new_goal);
849
}
850
return 0;
851
}
852
853
static int damos_commit_quota(struct damos_quota *dst, struct damos_quota *src)
854
{
855
int err;
856
857
dst->reset_interval = src->reset_interval;
858
dst->ms = src->ms;
859
dst->sz = src->sz;
860
err = damos_commit_quota_goals(dst, src);
861
if (err)
862
return err;
863
dst->weight_sz = src->weight_sz;
864
dst->weight_nr_accesses = src->weight_nr_accesses;
865
dst->weight_age = src->weight_age;
866
return 0;
867
}
868
869
static struct damos_filter *damos_nth_core_filter(int n, struct damos *s)
870
{
871
struct damos_filter *filter;
872
int i = 0;
873
874
damos_for_each_core_filter(filter, s) {
875
if (i++ == n)
876
return filter;
877
}
878
return NULL;
879
}
880
881
static struct damos_filter *damos_nth_ops_filter(int n, struct damos *s)
882
{
883
struct damos_filter *filter;
884
int i = 0;
885
886
damos_for_each_ops_filter(filter, s) {
887
if (i++ == n)
888
return filter;
889
}
890
return NULL;
891
}
892
893
static void damos_commit_filter_arg(
894
struct damos_filter *dst, struct damos_filter *src)
895
{
896
switch (dst->type) {
897
case DAMOS_FILTER_TYPE_MEMCG:
898
dst->memcg_id = src->memcg_id;
899
break;
900
case DAMOS_FILTER_TYPE_ADDR:
901
dst->addr_range = src->addr_range;
902
break;
903
case DAMOS_FILTER_TYPE_TARGET:
904
dst->target_idx = src->target_idx;
905
break;
906
case DAMOS_FILTER_TYPE_HUGEPAGE_SIZE:
907
dst->sz_range = src->sz_range;
908
break;
909
default:
910
break;
911
}
912
}
913
914
static void damos_commit_filter(
915
struct damos_filter *dst, struct damos_filter *src)
916
{
917
dst->type = src->type;
918
dst->matching = src->matching;
919
dst->allow = src->allow;
920
damos_commit_filter_arg(dst, src);
921
}
922
923
static int damos_commit_core_filters(struct damos *dst, struct damos *src)
924
{
925
struct damos_filter *dst_filter, *next, *src_filter, *new_filter;
926
int i = 0, j = 0;
927
928
damos_for_each_core_filter_safe(dst_filter, next, dst) {
929
src_filter = damos_nth_core_filter(i++, src);
930
if (src_filter)
931
damos_commit_filter(dst_filter, src_filter);
932
else
933
damos_destroy_filter(dst_filter);
934
}
935
936
damos_for_each_core_filter_safe(src_filter, next, src) {
937
if (j++ < i)
938
continue;
939
940
new_filter = damos_new_filter(
941
src_filter->type, src_filter->matching,
942
src_filter->allow);
943
if (!new_filter)
944
return -ENOMEM;
945
damos_commit_filter_arg(new_filter, src_filter);
946
damos_add_filter(dst, new_filter);
947
}
948
return 0;
949
}
950
951
static int damos_commit_ops_filters(struct damos *dst, struct damos *src)
952
{
953
struct damos_filter *dst_filter, *next, *src_filter, *new_filter;
954
int i = 0, j = 0;
955
956
damos_for_each_ops_filter_safe(dst_filter, next, dst) {
957
src_filter = damos_nth_ops_filter(i++, src);
958
if (src_filter)
959
damos_commit_filter(dst_filter, src_filter);
960
else
961
damos_destroy_filter(dst_filter);
962
}
963
964
damos_for_each_ops_filter_safe(src_filter, next, src) {
965
if (j++ < i)
966
continue;
967
968
new_filter = damos_new_filter(
969
src_filter->type, src_filter->matching,
970
src_filter->allow);
971
if (!new_filter)
972
return -ENOMEM;
973
damos_commit_filter_arg(new_filter, src_filter);
974
damos_add_filter(dst, new_filter);
975
}
976
return 0;
977
}
978
979
/**
980
* damos_filters_default_reject() - decide whether to reject memory that didn't
981
* match with any given filter.
982
* @filters: Given DAMOS filters of a group.
983
*/
984
static bool damos_filters_default_reject(struct list_head *filters)
985
{
986
struct damos_filter *last_filter;
987
988
if (list_empty(filters))
989
return false;
990
last_filter = list_last_entry(filters, struct damos_filter, list);
991
return last_filter->allow;
992
}
993
994
static void damos_set_filters_default_reject(struct damos *s)
995
{
996
if (!list_empty(&s->ops_filters))
997
s->core_filters_default_reject = false;
998
else
999
s->core_filters_default_reject =
1000
damos_filters_default_reject(&s->core_filters);
1001
s->ops_filters_default_reject =
1002
damos_filters_default_reject(&s->ops_filters);
1003
}
1004
1005
static int damos_commit_dests(struct damos_migrate_dests *dst,
1006
struct damos_migrate_dests *src)
1007
{
1008
if (dst->nr_dests != src->nr_dests) {
1009
kfree(dst->node_id_arr);
1010
kfree(dst->weight_arr);
1011
1012
dst->node_id_arr = kmalloc_array(src->nr_dests,
1013
sizeof(*dst->node_id_arr), GFP_KERNEL);
1014
if (!dst->node_id_arr) {
1015
dst->weight_arr = NULL;
1016
return -ENOMEM;
1017
}
1018
1019
dst->weight_arr = kmalloc_array(src->nr_dests,
1020
sizeof(*dst->weight_arr), GFP_KERNEL);
1021
if (!dst->weight_arr) {
1022
/* ->node_id_arr will be freed by scheme destruction */
1023
return -ENOMEM;
1024
}
1025
}
1026
1027
dst->nr_dests = src->nr_dests;
1028
for (int i = 0; i < src->nr_dests; i++) {
1029
dst->node_id_arr[i] = src->node_id_arr[i];
1030
dst->weight_arr[i] = src->weight_arr[i];
1031
}
1032
1033
return 0;
1034
}
1035
1036
static int damos_commit_filters(struct damos *dst, struct damos *src)
1037
{
1038
int err;
1039
1040
err = damos_commit_core_filters(dst, src);
1041
if (err)
1042
return err;
1043
err = damos_commit_ops_filters(dst, src);
1044
if (err)
1045
return err;
1046
damos_set_filters_default_reject(dst);
1047
return 0;
1048
}
1049
1050
static struct damos *damon_nth_scheme(int n, struct damon_ctx *ctx)
1051
{
1052
struct damos *s;
1053
int i = 0;
1054
1055
damon_for_each_scheme(s, ctx) {
1056
if (i++ == n)
1057
return s;
1058
}
1059
return NULL;
1060
}
1061
1062
static int damos_commit(struct damos *dst, struct damos *src)
1063
{
1064
int err;
1065
1066
dst->pattern = src->pattern;
1067
dst->action = src->action;
1068
dst->apply_interval_us = src->apply_interval_us;
1069
1070
err = damos_commit_quota(&dst->quota, &src->quota);
1071
if (err)
1072
return err;
1073
1074
dst->wmarks = src->wmarks;
1075
dst->target_nid = src->target_nid;
1076
1077
err = damos_commit_dests(&dst->migrate_dests, &src->migrate_dests);
1078
if (err)
1079
return err;
1080
1081
err = damos_commit_filters(dst, src);
1082
if (err)
1083
return err;
1084
1085
dst->max_nr_snapshots = src->max_nr_snapshots;
1086
return 0;
1087
}
1088
1089
static int damon_commit_schemes(struct damon_ctx *dst, struct damon_ctx *src)
1090
{
1091
struct damos *dst_scheme, *next, *src_scheme, *new_scheme;
1092
int i = 0, j = 0, err;
1093
1094
damon_for_each_scheme_safe(dst_scheme, next, dst) {
1095
src_scheme = damon_nth_scheme(i++, src);
1096
if (src_scheme) {
1097
err = damos_commit(dst_scheme, src_scheme);
1098
if (err)
1099
return err;
1100
} else {
1101
damon_destroy_scheme(dst_scheme);
1102
}
1103
}
1104
1105
damon_for_each_scheme_safe(src_scheme, next, src) {
1106
if (j++ < i)
1107
continue;
1108
new_scheme = damon_new_scheme(&src_scheme->pattern,
1109
src_scheme->action,
1110
src_scheme->apply_interval_us,
1111
&src_scheme->quota, &src_scheme->wmarks,
1112
NUMA_NO_NODE);
1113
if (!new_scheme)
1114
return -ENOMEM;
1115
err = damos_commit(new_scheme, src_scheme);
1116
if (err) {
1117
damon_destroy_scheme(new_scheme);
1118
return err;
1119
}
1120
damon_add_scheme(dst, new_scheme);
1121
}
1122
return 0;
1123
}
1124
1125
static struct damon_target *damon_nth_target(int n, struct damon_ctx *ctx)
1126
{
1127
struct damon_target *t;
1128
int i = 0;
1129
1130
damon_for_each_target(t, ctx) {
1131
if (i++ == n)
1132
return t;
1133
}
1134
return NULL;
1135
}
1136
1137
/*
1138
* The caller should ensure the regions of @src are
1139
* 1. valid (end >= src) and
1140
* 2. sorted by starting address.
1141
*
1142
* If @src has no region, @dst keeps current regions.
1143
*/
1144
static int damon_commit_target_regions(struct damon_target *dst,
1145
struct damon_target *src, unsigned long src_min_region_sz)
1146
{
1147
struct damon_region *src_region;
1148
struct damon_addr_range *ranges;
1149
int i = 0, err;
1150
1151
damon_for_each_region(src_region, src)
1152
i++;
1153
if (!i)
1154
return 0;
1155
1156
ranges = kmalloc_array(i, sizeof(*ranges), GFP_KERNEL | __GFP_NOWARN);
1157
if (!ranges)
1158
return -ENOMEM;
1159
i = 0;
1160
damon_for_each_region(src_region, src)
1161
ranges[i++] = src_region->ar;
1162
err = damon_set_regions(dst, ranges, i, src_min_region_sz);
1163
kfree(ranges);
1164
return err;
1165
}
1166
1167
static int damon_commit_target(
1168
struct damon_target *dst, bool dst_has_pid,
1169
struct damon_target *src, bool src_has_pid,
1170
unsigned long src_min_region_sz)
1171
{
1172
int err;
1173
1174
err = damon_commit_target_regions(dst, src, src_min_region_sz);
1175
if (err)
1176
return err;
1177
if (dst_has_pid)
1178
put_pid(dst->pid);
1179
if (src_has_pid)
1180
get_pid(src->pid);
1181
dst->pid = src->pid;
1182
return 0;
1183
}
1184
1185
static int damon_commit_targets(
1186
struct damon_ctx *dst, struct damon_ctx *src)
1187
{
1188
struct damon_target *dst_target, *next, *src_target, *new_target;
1189
int i = 0, j = 0, err;
1190
1191
damon_for_each_target_safe(dst_target, next, dst) {
1192
src_target = damon_nth_target(i++, src);
1193
/*
1194
* If src target is obsolete, do not commit the parameters to
1195
* the dst target, and further remove the dst target.
1196
*/
1197
if (src_target && !src_target->obsolete) {
1198
err = damon_commit_target(
1199
dst_target, damon_target_has_pid(dst),
1200
src_target, damon_target_has_pid(src),
1201
src->min_region_sz);
1202
if (err)
1203
return err;
1204
} else {
1205
struct damos *s;
1206
1207
damon_destroy_target(dst_target, dst);
1208
damon_for_each_scheme(s, dst) {
1209
if (s->quota.charge_target_from == dst_target) {
1210
s->quota.charge_target_from = NULL;
1211
s->quota.charge_addr_from = 0;
1212
}
1213
}
1214
}
1215
}
1216
1217
damon_for_each_target_safe(src_target, next, src) {
1218
if (j++ < i)
1219
continue;
1220
/* target to remove has no matching dst */
1221
if (src_target->obsolete)
1222
return -EINVAL;
1223
new_target = damon_new_target();
1224
if (!new_target)
1225
return -ENOMEM;
1226
err = damon_commit_target(new_target, false,
1227
src_target, damon_target_has_pid(src),
1228
src->min_region_sz);
1229
if (err) {
1230
damon_destroy_target(new_target, NULL);
1231
return err;
1232
}
1233
damon_add_target(dst, new_target);
1234
}
1235
return 0;
1236
}
1237
1238
/**
1239
* damon_commit_ctx() - Commit parameters of a DAMON context to another.
1240
* @dst: The commit destination DAMON context.
1241
* @src: The commit source DAMON context.
1242
*
1243
* This function copies user-specified parameters from @src to @dst and update
1244
* the internal status and results accordingly. Users should use this function
1245
* for context-level parameters update of running context, instead of manual
1246
* in-place updates.
1247
*
1248
* This function should be called from parameters-update safe context, like
1249
* damon_call().
1250
*/
1251
int damon_commit_ctx(struct damon_ctx *dst, struct damon_ctx *src)
1252
{
1253
int err;
1254
1255
err = damon_commit_schemes(dst, src);
1256
if (err)
1257
return err;
1258
err = damon_commit_targets(dst, src);
1259
if (err)
1260
return err;
1261
/*
1262
* schemes and targets should be updated first, since
1263
* 1. damon_set_attrs() updates monitoring results of targets and
1264
* next_apply_sis of schemes, and
1265
* 2. ops update should be done after pid handling is done (target
1266
* committing require putting pids).
1267
*/
1268
if (!damon_attrs_equals(&dst->attrs, &src->attrs)) {
1269
err = damon_set_attrs(dst, &src->attrs);
1270
if (err)
1271
return err;
1272
}
1273
dst->ops = src->ops;
1274
dst->addr_unit = src->addr_unit;
1275
dst->min_region_sz = src->min_region_sz;
1276
1277
return 0;
1278
}
1279
1280
/**
1281
* damon_nr_running_ctxs() - Return number of currently running contexts.
1282
*/
1283
int damon_nr_running_ctxs(void)
1284
{
1285
int nr_ctxs;
1286
1287
mutex_lock(&damon_lock);
1288
nr_ctxs = nr_running_ctxs;
1289
mutex_unlock(&damon_lock);
1290
1291
return nr_ctxs;
1292
}
1293
1294
/* Returns the size upper limit for each monitoring region */
1295
static unsigned long damon_region_sz_limit(struct damon_ctx *ctx)
1296
{
1297
struct damon_target *t;
1298
struct damon_region *r;
1299
unsigned long sz = 0;
1300
1301
damon_for_each_target(t, ctx) {
1302
damon_for_each_region(r, t)
1303
sz += damon_sz_region(r);
1304
}
1305
1306
if (ctx->attrs.min_nr_regions)
1307
sz /= ctx->attrs.min_nr_regions;
1308
if (sz < ctx->min_region_sz)
1309
sz = ctx->min_region_sz;
1310
1311
return sz;
1312
}
1313
1314
static int kdamond_fn(void *data);
1315
1316
/*
1317
* __damon_start() - Starts monitoring with given context.
1318
* @ctx: monitoring context
1319
*
1320
* This function should be called while damon_lock is hold.
1321
*
1322
* Return: 0 on success, negative error code otherwise.
1323
*/
1324
static int __damon_start(struct damon_ctx *ctx)
1325
{
1326
int err = -EBUSY;
1327
1328
mutex_lock(&ctx->kdamond_lock);
1329
if (!ctx->kdamond) {
1330
err = 0;
1331
reinit_completion(&ctx->kdamond_started);
1332
ctx->kdamond = kthread_run(kdamond_fn, ctx, "kdamond.%d",
1333
nr_running_ctxs);
1334
if (IS_ERR(ctx->kdamond)) {
1335
err = PTR_ERR(ctx->kdamond);
1336
ctx->kdamond = NULL;
1337
} else {
1338
wait_for_completion(&ctx->kdamond_started);
1339
}
1340
}
1341
mutex_unlock(&ctx->kdamond_lock);
1342
1343
return err;
1344
}
1345
1346
/**
1347
* damon_start() - Starts the monitorings for a given group of contexts.
1348
* @ctxs: an array of the pointers for contexts to start monitoring
1349
* @nr_ctxs: size of @ctxs
1350
* @exclusive: exclusiveness of this contexts group
1351
*
1352
* This function starts a group of monitoring threads for a group of monitoring
1353
* contexts. One thread per each context is created and run in parallel. The
1354
* caller should handle synchronization between the threads by itself. If
1355
* @exclusive is true and a group of threads that created by other
1356
* 'damon_start()' call is currently running, this function does nothing but
1357
* returns -EBUSY.
1358
*
1359
* Return: 0 on success, negative error code otherwise.
1360
*/
1361
int damon_start(struct damon_ctx **ctxs, int nr_ctxs, bool exclusive)
1362
{
1363
int i;
1364
int err = 0;
1365
1366
mutex_lock(&damon_lock);
1367
if ((exclusive && nr_running_ctxs) ||
1368
(!exclusive && running_exclusive_ctxs)) {
1369
mutex_unlock(&damon_lock);
1370
return -EBUSY;
1371
}
1372
1373
for (i = 0; i < nr_ctxs; i++) {
1374
err = __damon_start(ctxs[i]);
1375
if (err)
1376
break;
1377
nr_running_ctxs++;
1378
}
1379
if (exclusive && nr_running_ctxs)
1380
running_exclusive_ctxs = true;
1381
mutex_unlock(&damon_lock);
1382
1383
return err;
1384
}
1385
1386
/*
1387
* __damon_stop() - Stops monitoring of a given context.
1388
* @ctx: monitoring context
1389
*
1390
* Return: 0 on success, negative error code otherwise.
1391
*/
1392
static int __damon_stop(struct damon_ctx *ctx)
1393
{
1394
struct task_struct *tsk;
1395
1396
mutex_lock(&ctx->kdamond_lock);
1397
tsk = ctx->kdamond;
1398
if (tsk) {
1399
get_task_struct(tsk);
1400
mutex_unlock(&ctx->kdamond_lock);
1401
kthread_stop_put(tsk);
1402
return 0;
1403
}
1404
mutex_unlock(&ctx->kdamond_lock);
1405
1406
return -EPERM;
1407
}
1408
1409
/**
1410
* damon_stop() - Stops the monitorings for a given group of contexts.
1411
* @ctxs: an array of the pointers for contexts to stop monitoring
1412
* @nr_ctxs: size of @ctxs
1413
*
1414
* Return: 0 on success, negative error code otherwise.
1415
*/
1416
int damon_stop(struct damon_ctx **ctxs, int nr_ctxs)
1417
{
1418
int i, err = 0;
1419
1420
for (i = 0; i < nr_ctxs; i++) {
1421
/* nr_running_ctxs is decremented in kdamond_fn */
1422
err = __damon_stop(ctxs[i]);
1423
if (err)
1424
break;
1425
}
1426
return err;
1427
}
1428
1429
/**
1430
* damon_is_running() - Returns if a given DAMON context is running.
1431
* @ctx: The DAMON context to see if running.
1432
*
1433
* Return: true if @ctx is running, false otherwise.
1434
*/
1435
bool damon_is_running(struct damon_ctx *ctx)
1436
{
1437
bool running;
1438
1439
mutex_lock(&ctx->kdamond_lock);
1440
running = ctx->kdamond != NULL;
1441
mutex_unlock(&ctx->kdamond_lock);
1442
return running;
1443
}
1444
1445
/**
1446
* damon_kdamond_pid() - Return pid of a given DAMON context's worker thread.
1447
* @ctx: The DAMON context of the question.
1448
*
1449
* Return: pid if @ctx is running, negative error code otherwise.
1450
*/
1451
int damon_kdamond_pid(struct damon_ctx *ctx)
1452
{
1453
int pid = -EINVAL;
1454
1455
mutex_lock(&ctx->kdamond_lock);
1456
if (ctx->kdamond)
1457
pid = ctx->kdamond->pid;
1458
mutex_unlock(&ctx->kdamond_lock);
1459
return pid;
1460
}
1461
1462
/*
1463
* damon_call_handle_inactive_ctx() - handle DAMON call request that added to
1464
* an inactive context.
1465
* @ctx: The inactive DAMON context.
1466
* @control: Control variable of the call request.
1467
*
1468
* This function is called in a case that @control is added to @ctx but @ctx is
1469
* not running (inactive). See if @ctx handled @control or not, and cleanup
1470
* @control if it was not handled.
1471
*
1472
* Returns 0 if @control was handled by @ctx, negative error code otherwise.
1473
*/
1474
static int damon_call_handle_inactive_ctx(
1475
struct damon_ctx *ctx, struct damon_call_control *control)
1476
{
1477
struct damon_call_control *c;
1478
1479
mutex_lock(&ctx->call_controls_lock);
1480
list_for_each_entry(c, &ctx->call_controls, list) {
1481
if (c == control) {
1482
list_del(&control->list);
1483
mutex_unlock(&ctx->call_controls_lock);
1484
return -EINVAL;
1485
}
1486
}
1487
mutex_unlock(&ctx->call_controls_lock);
1488
return 0;
1489
}
1490
1491
/**
1492
* damon_call() - Invoke a given function on DAMON worker thread (kdamond).
1493
* @ctx: DAMON context to call the function for.
1494
* @control: Control variable of the call request.
1495
*
1496
* Ask DAMON worker thread (kdamond) of @ctx to call a function with an
1497
* argument data that respectively passed via &damon_call_control->fn and
1498
* &damon_call_control->data of @control. If &damon_call_control->repeat of
1499
* @control is unset, further wait until the kdamond finishes handling of the
1500
* request. Otherwise, return as soon as the request is made.
1501
*
1502
* The kdamond executes the function with the argument in the main loop, just
1503
* after a sampling of the iteration is finished. The function can hence
1504
* safely access the internal data of the &struct damon_ctx without additional
1505
* synchronization. The return value of the function will be saved in
1506
* &damon_call_control->return_code.
1507
*
1508
* Return: 0 on success, negative error code otherwise.
1509
*/
1510
int damon_call(struct damon_ctx *ctx, struct damon_call_control *control)
1511
{
1512
if (!control->repeat)
1513
init_completion(&control->completion);
1514
control->canceled = false;
1515
INIT_LIST_HEAD(&control->list);
1516
1517
mutex_lock(&ctx->call_controls_lock);
1518
list_add_tail(&control->list, &ctx->call_controls);
1519
mutex_unlock(&ctx->call_controls_lock);
1520
if (!damon_is_running(ctx))
1521
return damon_call_handle_inactive_ctx(ctx, control);
1522
if (control->repeat)
1523
return 0;
1524
wait_for_completion(&control->completion);
1525
if (control->canceled)
1526
return -ECANCELED;
1527
return 0;
1528
}
1529
1530
/**
1531
* damos_walk() - Invoke a given functions while DAMOS walk regions.
1532
* @ctx: DAMON context to call the functions for.
1533
* @control: Control variable of the walk request.
1534
*
1535
* Ask DAMON worker thread (kdamond) of @ctx to call a function for each region
1536
* that the kdamond will apply DAMOS action to, and wait until the kdamond
1537
* finishes handling of the request.
1538
*
1539
* The kdamond executes the given function in the main loop, for each region
1540
* just after it applied any DAMOS actions of @ctx to it. The invocation is
1541
* made only within one &damos->apply_interval_us since damos_walk()
1542
* invocation, for each scheme. The given callback function can hence safely
1543
* access the internal data of &struct damon_ctx and &struct damon_region that
1544
* each of the scheme will apply the action for next interval, without
1545
* additional synchronizations against the kdamond. If every scheme of @ctx
1546
* passed at least one &damos->apply_interval_us, kdamond marks the request as
1547
* completed so that damos_walk() can wakeup and return.
1548
*
1549
* Return: 0 on success, negative error code otherwise.
1550
*/
1551
int damos_walk(struct damon_ctx *ctx, struct damos_walk_control *control)
1552
{
1553
init_completion(&control->completion);
1554
control->canceled = false;
1555
mutex_lock(&ctx->walk_control_lock);
1556
if (ctx->walk_control) {
1557
mutex_unlock(&ctx->walk_control_lock);
1558
return -EBUSY;
1559
}
1560
ctx->walk_control = control;
1561
mutex_unlock(&ctx->walk_control_lock);
1562
if (!damon_is_running(ctx))
1563
return -EINVAL;
1564
wait_for_completion(&control->completion);
1565
if (control->canceled)
1566
return -ECANCELED;
1567
return 0;
1568
}
1569
1570
/*
1571
* Warn and fix corrupted ->nr_accesses[_bp] for investigations and preventing
1572
* the problem being propagated.
1573
*/
1574
static void damon_warn_fix_nr_accesses_corruption(struct damon_region *r)
1575
{
1576
if (r->nr_accesses_bp == r->nr_accesses * 10000)
1577
return;
1578
WARN_ONCE(true, "invalid nr_accesses_bp at reset: %u %u\n",
1579
r->nr_accesses_bp, r->nr_accesses);
1580
r->nr_accesses_bp = r->nr_accesses * 10000;
1581
}
1582
1583
/*
1584
* Reset the aggregated monitoring results ('nr_accesses' of each region).
1585
*/
1586
static void kdamond_reset_aggregated(struct damon_ctx *c)
1587
{
1588
struct damon_target *t;
1589
unsigned int ti = 0; /* target's index */
1590
1591
damon_for_each_target(t, c) {
1592
struct damon_region *r;
1593
1594
damon_for_each_region(r, t) {
1595
trace_damon_aggregated(ti, r, damon_nr_regions(t));
1596
damon_warn_fix_nr_accesses_corruption(r);
1597
r->last_nr_accesses = r->nr_accesses;
1598
r->nr_accesses = 0;
1599
}
1600
ti++;
1601
}
1602
}
1603
1604
static unsigned long damon_get_intervals_score(struct damon_ctx *c)
1605
{
1606
struct damon_target *t;
1607
struct damon_region *r;
1608
unsigned long sz_region, max_access_events = 0, access_events = 0;
1609
unsigned long target_access_events;
1610
unsigned long goal_bp = c->attrs.intervals_goal.access_bp;
1611
1612
damon_for_each_target(t, c) {
1613
damon_for_each_region(r, t) {
1614
sz_region = damon_sz_region(r);
1615
max_access_events += sz_region * c->attrs.aggr_samples;
1616
access_events += sz_region * r->nr_accesses;
1617
}
1618
}
1619
target_access_events = max_access_events * goal_bp / 10000;
1620
target_access_events = target_access_events ? : 1;
1621
return access_events * 10000 / target_access_events;
1622
}
1623
1624
static unsigned long damon_feed_loop_next_input(unsigned long last_input,
1625
unsigned long score);
1626
1627
static unsigned long damon_get_intervals_adaptation_bp(struct damon_ctx *c)
1628
{
1629
unsigned long score_bp, adaptation_bp;
1630
1631
score_bp = damon_get_intervals_score(c);
1632
adaptation_bp = damon_feed_loop_next_input(100000000, score_bp) /
1633
10000;
1634
/*
1635
* adaptation_bp ranges from 1 to 20,000. Avoid too rapid reduction of
1636
* the intervals by rescaling [1,10,000] to [5000, 10,000].
1637
*/
1638
if (adaptation_bp <= 10000)
1639
adaptation_bp = 5000 + adaptation_bp / 2;
1640
return adaptation_bp;
1641
}
1642
1643
static void kdamond_tune_intervals(struct damon_ctx *c)
1644
{
1645
unsigned long adaptation_bp;
1646
struct damon_attrs new_attrs;
1647
struct damon_intervals_goal *goal;
1648
1649
adaptation_bp = damon_get_intervals_adaptation_bp(c);
1650
if (adaptation_bp == 10000)
1651
return;
1652
1653
new_attrs = c->attrs;
1654
goal = &c->attrs.intervals_goal;
1655
new_attrs.sample_interval = min(goal->max_sample_us,
1656
c->attrs.sample_interval * adaptation_bp / 10000);
1657
new_attrs.sample_interval = max(goal->min_sample_us,
1658
new_attrs.sample_interval);
1659
new_attrs.aggr_interval = new_attrs.sample_interval *
1660
c->attrs.aggr_samples;
1661
trace_damon_monitor_intervals_tune(new_attrs.sample_interval);
1662
damon_set_attrs(c, &new_attrs);
1663
}
1664
1665
static void damon_split_region_at(struct damon_target *t,
1666
struct damon_region *r, unsigned long sz_r);
1667
1668
static bool __damos_valid_target(struct damon_region *r, struct damos *s)
1669
{
1670
unsigned long sz;
1671
unsigned int nr_accesses = r->nr_accesses_bp / 10000;
1672
1673
sz = damon_sz_region(r);
1674
return s->pattern.min_sz_region <= sz &&
1675
sz <= s->pattern.max_sz_region &&
1676
s->pattern.min_nr_accesses <= nr_accesses &&
1677
nr_accesses <= s->pattern.max_nr_accesses &&
1678
s->pattern.min_age_region <= r->age &&
1679
r->age <= s->pattern.max_age_region;
1680
}
1681
1682
static bool damos_valid_target(struct damon_ctx *c, struct damon_target *t,
1683
struct damon_region *r, struct damos *s)
1684
{
1685
bool ret = __damos_valid_target(r, s);
1686
1687
if (!ret || !s->quota.esz || !c->ops.get_scheme_score)
1688
return ret;
1689
1690
return c->ops.get_scheme_score(c, t, r, s) >= s->quota.min_score;
1691
}
1692
1693
/*
1694
* damos_skip_charged_region() - Check if the given region or starting part of
1695
* it is already charged for the DAMOS quota.
1696
* @t: The target of the region.
1697
* @rp: The pointer to the region.
1698
* @s: The scheme to be applied.
1699
* @min_region_sz: minimum region size.
1700
*
1701
* If a quota of a scheme has exceeded in a quota charge window, the scheme's
1702
* action would applied to only a part of the target access pattern fulfilling
1703
* regions. To avoid applying the scheme action to only already applied
1704
* regions, DAMON skips applying the scheme action to the regions that charged
1705
* in the previous charge window.
1706
*
1707
* This function checks if a given region should be skipped or not for the
1708
* reason. If only the starting part of the region has previously charged,
1709
* this function splits the region into two so that the second one covers the
1710
* area that not charged in the previous charge widnow and saves the second
1711
* region in *rp and returns false, so that the caller can apply DAMON action
1712
* to the second one.
1713
*
1714
* Return: true if the region should be entirely skipped, false otherwise.
1715
*/
1716
static bool damos_skip_charged_region(struct damon_target *t,
1717
struct damon_region **rp, struct damos *s,
1718
unsigned long min_region_sz)
1719
{
1720
struct damon_region *r = *rp;
1721
struct damos_quota *quota = &s->quota;
1722
unsigned long sz_to_skip;
1723
1724
/* Skip previously charged regions */
1725
if (quota->charge_target_from) {
1726
if (t != quota->charge_target_from)
1727
return true;
1728
if (r == damon_last_region(t)) {
1729
quota->charge_target_from = NULL;
1730
quota->charge_addr_from = 0;
1731
return true;
1732
}
1733
if (quota->charge_addr_from &&
1734
r->ar.end <= quota->charge_addr_from)
1735
return true;
1736
1737
if (quota->charge_addr_from && r->ar.start <
1738
quota->charge_addr_from) {
1739
sz_to_skip = ALIGN_DOWN(quota->charge_addr_from -
1740
r->ar.start, min_region_sz);
1741
if (!sz_to_skip) {
1742
if (damon_sz_region(r) <= min_region_sz)
1743
return true;
1744
sz_to_skip = min_region_sz;
1745
}
1746
damon_split_region_at(t, r, sz_to_skip);
1747
r = damon_next_region(r);
1748
*rp = r;
1749
}
1750
quota->charge_target_from = NULL;
1751
quota->charge_addr_from = 0;
1752
}
1753
return false;
1754
}
1755
1756
static void damos_update_stat(struct damos *s,
1757
unsigned long sz_tried, unsigned long sz_applied,
1758
unsigned long sz_ops_filter_passed)
1759
{
1760
s->stat.nr_tried++;
1761
s->stat.sz_tried += sz_tried;
1762
if (sz_applied)
1763
s->stat.nr_applied++;
1764
s->stat.sz_applied += sz_applied;
1765
s->stat.sz_ops_filter_passed += sz_ops_filter_passed;
1766
}
1767
1768
static bool damos_filter_match(struct damon_ctx *ctx, struct damon_target *t,
1769
struct damon_region *r, struct damos_filter *filter,
1770
unsigned long min_region_sz)
1771
{
1772
bool matched = false;
1773
struct damon_target *ti;
1774
int target_idx = 0;
1775
unsigned long start, end;
1776
1777
switch (filter->type) {
1778
case DAMOS_FILTER_TYPE_TARGET:
1779
damon_for_each_target(ti, ctx) {
1780
if (ti == t)
1781
break;
1782
target_idx++;
1783
}
1784
matched = target_idx == filter->target_idx;
1785
break;
1786
case DAMOS_FILTER_TYPE_ADDR:
1787
start = ALIGN_DOWN(filter->addr_range.start, min_region_sz);
1788
end = ALIGN_DOWN(filter->addr_range.end, min_region_sz);
1789
1790
/* inside the range */
1791
if (start <= r->ar.start && r->ar.end <= end) {
1792
matched = true;
1793
break;
1794
}
1795
/* outside of the range */
1796
if (r->ar.end <= start || end <= r->ar.start) {
1797
matched = false;
1798
break;
1799
}
1800
/* start before the range and overlap */
1801
if (r->ar.start < start) {
1802
damon_split_region_at(t, r, start - r->ar.start);
1803
matched = false;
1804
break;
1805
}
1806
/* start inside the range */
1807
damon_split_region_at(t, r, end - r->ar.start);
1808
matched = true;
1809
break;
1810
default:
1811
return false;
1812
}
1813
1814
return matched == filter->matching;
1815
}
1816
1817
static bool damos_core_filter_out(struct damon_ctx *ctx, struct damon_target *t,
1818
struct damon_region *r, struct damos *s)
1819
{
1820
struct damos_filter *filter;
1821
1822
s->core_filters_allowed = false;
1823
damos_for_each_core_filter(filter, s) {
1824
if (damos_filter_match(ctx, t, r, filter, ctx->min_region_sz)) {
1825
if (filter->allow)
1826
s->core_filters_allowed = true;
1827
return !filter->allow;
1828
}
1829
}
1830
return s->core_filters_default_reject;
1831
}
1832
1833
/*
1834
* damos_walk_call_walk() - Call &damos_walk_control->walk_fn.
1835
* @ctx: The context of &damon_ctx->walk_control.
1836
* @t: The monitoring target of @r that @s will be applied.
1837
* @r: The region of @t that @s will be applied.
1838
* @s: The scheme of @ctx that will be applied to @r.
1839
*
1840
* This function is called from kdamond whenever it asked the operation set to
1841
* apply a DAMOS scheme action to a region. If a DAMOS walk request is
1842
* installed by damos_walk() and not yet uninstalled, invoke it.
1843
*/
1844
static void damos_walk_call_walk(struct damon_ctx *ctx, struct damon_target *t,
1845
struct damon_region *r, struct damos *s,
1846
unsigned long sz_filter_passed)
1847
{
1848
struct damos_walk_control *control;
1849
1850
if (s->walk_completed)
1851
return;
1852
1853
control = ctx->walk_control;
1854
if (!control)
1855
return;
1856
1857
control->walk_fn(control->data, ctx, t, r, s, sz_filter_passed);
1858
}
1859
1860
/*
1861
* damos_walk_complete() - Complete DAMOS walk request if all walks are done.
1862
* @ctx: The context of &damon_ctx->walk_control.
1863
* @s: A scheme of @ctx that all walks are now done.
1864
*
1865
* This function is called when kdamond finished applying the action of a DAMOS
1866
* scheme to all regions that eligible for the given &damos->apply_interval_us.
1867
* If every scheme of @ctx including @s now finished walking for at least one
1868
* &damos->apply_interval_us, this function makrs the handling of the given
1869
* DAMOS walk request is done, so that damos_walk() can wake up and return.
1870
*/
1871
static void damos_walk_complete(struct damon_ctx *ctx, struct damos *s)
1872
{
1873
struct damos *siter;
1874
struct damos_walk_control *control;
1875
1876
control = ctx->walk_control;
1877
if (!control)
1878
return;
1879
1880
s->walk_completed = true;
1881
/* if all schemes completed, signal completion to walker */
1882
damon_for_each_scheme(siter, ctx) {
1883
if (!siter->walk_completed)
1884
return;
1885
}
1886
damon_for_each_scheme(siter, ctx)
1887
siter->walk_completed = false;
1888
1889
complete(&control->completion);
1890
ctx->walk_control = NULL;
1891
}
1892
1893
/*
1894
* damos_walk_cancel() - Cancel the current DAMOS walk request.
1895
* @ctx: The context of &damon_ctx->walk_control.
1896
*
1897
* This function is called when @ctx is deactivated by DAMOS watermarks, DAMOS
1898
* walk is requested but there is no DAMOS scheme to walk for, or the kdamond
1899
* is already out of the main loop and therefore gonna be terminated, and hence
1900
* cannot continue the walks. This function therefore marks the walk request
1901
* as canceled, so that damos_walk() can wake up and return.
1902
*/
1903
static void damos_walk_cancel(struct damon_ctx *ctx)
1904
{
1905
struct damos_walk_control *control;
1906
1907
mutex_lock(&ctx->walk_control_lock);
1908
control = ctx->walk_control;
1909
mutex_unlock(&ctx->walk_control_lock);
1910
1911
if (!control)
1912
return;
1913
control->canceled = true;
1914
complete(&control->completion);
1915
mutex_lock(&ctx->walk_control_lock);
1916
ctx->walk_control = NULL;
1917
mutex_unlock(&ctx->walk_control_lock);
1918
}
1919
1920
static void damos_apply_scheme(struct damon_ctx *c, struct damon_target *t,
1921
struct damon_region *r, struct damos *s)
1922
{
1923
struct damos_quota *quota = &s->quota;
1924
unsigned long sz = damon_sz_region(r);
1925
struct timespec64 begin, end;
1926
unsigned long sz_applied = 0;
1927
unsigned long sz_ops_filter_passed = 0;
1928
/*
1929
* We plan to support multiple context per kdamond, as DAMON sysfs
1930
* implies with 'nr_contexts' file. Nevertheless, only single context
1931
* per kdamond is supported for now. So, we can simply use '0' context
1932
* index here.
1933
*/
1934
unsigned int cidx = 0;
1935
struct damos *siter; /* schemes iterator */
1936
unsigned int sidx = 0;
1937
struct damon_target *titer; /* targets iterator */
1938
unsigned int tidx = 0;
1939
bool do_trace = false;
1940
1941
/* get indices for trace_damos_before_apply() */
1942
if (trace_damos_before_apply_enabled()) {
1943
damon_for_each_scheme(siter, c) {
1944
if (siter == s)
1945
break;
1946
sidx++;
1947
}
1948
damon_for_each_target(titer, c) {
1949
if (titer == t)
1950
break;
1951
tidx++;
1952
}
1953
do_trace = true;
1954
}
1955
1956
if (c->ops.apply_scheme) {
1957
if (quota->esz && quota->charged_sz + sz > quota->esz) {
1958
sz = ALIGN_DOWN(quota->esz - quota->charged_sz,
1959
c->min_region_sz);
1960
if (!sz)
1961
goto update_stat;
1962
damon_split_region_at(t, r, sz);
1963
}
1964
if (damos_core_filter_out(c, t, r, s))
1965
return;
1966
ktime_get_coarse_ts64(&begin);
1967
trace_damos_before_apply(cidx, sidx, tidx, r,
1968
damon_nr_regions(t), do_trace);
1969
sz_applied = c->ops.apply_scheme(c, t, r, s,
1970
&sz_ops_filter_passed);
1971
damos_walk_call_walk(c, t, r, s, sz_ops_filter_passed);
1972
ktime_get_coarse_ts64(&end);
1973
quota->total_charged_ns += timespec64_to_ns(&end) -
1974
timespec64_to_ns(&begin);
1975
quota->charged_sz += sz;
1976
if (quota->esz && quota->charged_sz >= quota->esz) {
1977
quota->charge_target_from = t;
1978
quota->charge_addr_from = r->ar.end + 1;
1979
}
1980
}
1981
if (s->action != DAMOS_STAT)
1982
r->age = 0;
1983
1984
update_stat:
1985
damos_update_stat(s, sz, sz_applied, sz_ops_filter_passed);
1986
}
1987
1988
static void damon_do_apply_schemes(struct damon_ctx *c,
1989
struct damon_target *t,
1990
struct damon_region *r)
1991
{
1992
struct damos *s;
1993
1994
damon_for_each_scheme(s, c) {
1995
struct damos_quota *quota = &s->quota;
1996
1997
if (c->passed_sample_intervals < s->next_apply_sis)
1998
continue;
1999
2000
if (!s->wmarks.activated)
2001
continue;
2002
2003
/* Check the quota */
2004
if (quota->esz && quota->charged_sz >= quota->esz)
2005
continue;
2006
2007
if (damos_skip_charged_region(t, &r, s, c->min_region_sz))
2008
continue;
2009
2010
if (s->max_nr_snapshots &&
2011
s->max_nr_snapshots <= s->stat.nr_snapshots)
2012
continue;
2013
2014
if (damos_valid_target(c, t, r, s))
2015
damos_apply_scheme(c, t, r, s);
2016
2017
if (damon_is_last_region(r, t))
2018
s->stat.nr_snapshots++;
2019
}
2020
}
2021
2022
/*
2023
* damon_feed_loop_next_input() - get next input to achieve a target score.
2024
* @last_input The last input.
2025
* @score Current score that made with @last_input.
2026
*
2027
* Calculate next input to achieve the target score, based on the last input
2028
* and current score. Assuming the input and the score are positively
2029
* proportional, calculate how much compensation should be added to or
2030
* subtracted from the last input as a proportion of the last input. Avoid
2031
* next input always being zero by setting it non-zero always. In short form
2032
* (assuming support of float and signed calculations), the algorithm is as
2033
* below.
2034
*
2035
* next_input = max(last_input * ((goal - current) / goal + 1), 1)
2036
*
2037
* For simple implementation, we assume the target score is always 10,000. The
2038
* caller should adjust @score for this.
2039
*
2040
* Returns next input that assumed to achieve the target score.
2041
*/
2042
static unsigned long damon_feed_loop_next_input(unsigned long last_input,
2043
unsigned long score)
2044
{
2045
const unsigned long goal = 10000;
2046
/* Set minimum input as 10000 to avoid compensation be zero */
2047
const unsigned long min_input = 10000;
2048
unsigned long score_goal_diff, compensation;
2049
bool over_achieving = score > goal;
2050
2051
if (score == goal)
2052
return last_input;
2053
if (score >= goal * 2)
2054
return min_input;
2055
2056
if (over_achieving)
2057
score_goal_diff = score - goal;
2058
else
2059
score_goal_diff = goal - score;
2060
2061
if (last_input < ULONG_MAX / score_goal_diff)
2062
compensation = last_input * score_goal_diff / goal;
2063
else
2064
compensation = last_input / goal * score_goal_diff;
2065
2066
if (over_achieving)
2067
return max(last_input - compensation, min_input);
2068
if (last_input < ULONG_MAX - compensation)
2069
return last_input + compensation;
2070
return ULONG_MAX;
2071
}
2072
2073
#ifdef CONFIG_PSI
2074
2075
static u64 damos_get_some_mem_psi_total(void)
2076
{
2077
if (static_branch_likely(&psi_disabled))
2078
return 0;
2079
return div_u64(psi_system.total[PSI_AVGS][PSI_MEM * 2],
2080
NSEC_PER_USEC);
2081
}
2082
2083
#else /* CONFIG_PSI */
2084
2085
static inline u64 damos_get_some_mem_psi_total(void)
2086
{
2087
return 0;
2088
};
2089
2090
#endif /* CONFIG_PSI */
2091
2092
#ifdef CONFIG_NUMA
2093
static __kernel_ulong_t damos_get_node_mem_bp(
2094
struct damos_quota_goal *goal)
2095
{
2096
struct sysinfo i;
2097
__kernel_ulong_t numerator;
2098
2099
si_meminfo_node(&i, goal->nid);
2100
if (goal->metric == DAMOS_QUOTA_NODE_MEM_USED_BP)
2101
numerator = i.totalram - i.freeram;
2102
else /* DAMOS_QUOTA_NODE_MEM_FREE_BP */
2103
numerator = i.freeram;
2104
return numerator * 10000 / i.totalram;
2105
}
2106
2107
static unsigned long damos_get_node_memcg_used_bp(
2108
struct damos_quota_goal *goal)
2109
{
2110
struct mem_cgroup *memcg;
2111
struct lruvec *lruvec;
2112
unsigned long used_pages, numerator;
2113
struct sysinfo i;
2114
2115
memcg = mem_cgroup_get_from_id(goal->memcg_id);
2116
if (!memcg) {
2117
if (goal->metric == DAMOS_QUOTA_NODE_MEMCG_USED_BP)
2118
return 0;
2119
else /* DAMOS_QUOTA_NODE_MEMCG_FREE_BP */
2120
return 10000;
2121
}
2122
2123
mem_cgroup_flush_stats(memcg);
2124
lruvec = mem_cgroup_lruvec(memcg, NODE_DATA(goal->nid));
2125
used_pages = lruvec_page_state(lruvec, NR_ACTIVE_ANON);
2126
used_pages += lruvec_page_state(lruvec, NR_INACTIVE_ANON);
2127
used_pages += lruvec_page_state(lruvec, NR_ACTIVE_FILE);
2128
used_pages += lruvec_page_state(lruvec, NR_INACTIVE_FILE);
2129
2130
mem_cgroup_put(memcg);
2131
2132
si_meminfo_node(&i, goal->nid);
2133
if (goal->metric == DAMOS_QUOTA_NODE_MEMCG_USED_BP)
2134
numerator = used_pages;
2135
else /* DAMOS_QUOTA_NODE_MEMCG_FREE_BP */
2136
numerator = i.totalram - used_pages;
2137
return numerator * 10000 / i.totalram;
2138
}
2139
#else
2140
static __kernel_ulong_t damos_get_node_mem_bp(
2141
struct damos_quota_goal *goal)
2142
{
2143
return 0;
2144
}
2145
2146
static unsigned long damos_get_node_memcg_used_bp(
2147
struct damos_quota_goal *goal)
2148
{
2149
return 0;
2150
}
2151
#endif
2152
2153
/*
2154
* Returns LRU-active or inactive memory to total LRU memory size ratio.
2155
*/
2156
static unsigned int damos_get_in_active_mem_bp(bool active_ratio)
2157
{
2158
unsigned long active, inactive, total;
2159
2160
/* This should align with /proc/meminfo output */
2161
active = global_node_page_state(NR_LRU_BASE + LRU_ACTIVE_ANON) +
2162
global_node_page_state(NR_LRU_BASE + LRU_ACTIVE_FILE);
2163
inactive = global_node_page_state(NR_LRU_BASE + LRU_INACTIVE_ANON) +
2164
global_node_page_state(NR_LRU_BASE + LRU_INACTIVE_FILE);
2165
total = active + inactive;
2166
if (active_ratio)
2167
return active * 10000 / total;
2168
return inactive * 10000 / total;
2169
}
2170
2171
static void damos_set_quota_goal_current_value(struct damos_quota_goal *goal)
2172
{
2173
u64 now_psi_total;
2174
2175
switch (goal->metric) {
2176
case DAMOS_QUOTA_USER_INPUT:
2177
/* User should already set goal->current_value */
2178
break;
2179
case DAMOS_QUOTA_SOME_MEM_PSI_US:
2180
now_psi_total = damos_get_some_mem_psi_total();
2181
goal->current_value = now_psi_total - goal->last_psi_total;
2182
goal->last_psi_total = now_psi_total;
2183
break;
2184
case DAMOS_QUOTA_NODE_MEM_USED_BP:
2185
case DAMOS_QUOTA_NODE_MEM_FREE_BP:
2186
goal->current_value = damos_get_node_mem_bp(goal);
2187
break;
2188
case DAMOS_QUOTA_NODE_MEMCG_USED_BP:
2189
case DAMOS_QUOTA_NODE_MEMCG_FREE_BP:
2190
goal->current_value = damos_get_node_memcg_used_bp(goal);
2191
break;
2192
case DAMOS_QUOTA_ACTIVE_MEM_BP:
2193
case DAMOS_QUOTA_INACTIVE_MEM_BP:
2194
goal->current_value = damos_get_in_active_mem_bp(
2195
goal->metric == DAMOS_QUOTA_ACTIVE_MEM_BP);
2196
break;
2197
default:
2198
break;
2199
}
2200
}
2201
2202
/* Return the highest score since it makes schemes least aggressive */
2203
static unsigned long damos_quota_score(struct damos_quota *quota)
2204
{
2205
struct damos_quota_goal *goal;
2206
unsigned long highest_score = 0;
2207
2208
damos_for_each_quota_goal(goal, quota) {
2209
damos_set_quota_goal_current_value(goal);
2210
highest_score = max(highest_score,
2211
goal->current_value * 10000 /
2212
goal->target_value);
2213
}
2214
2215
return highest_score;
2216
}
2217
2218
/*
2219
* Called only if quota->ms, or quota->sz are set, or quota->goals is not empty
2220
*/
2221
static void damos_set_effective_quota(struct damos_quota *quota)
2222
{
2223
unsigned long throughput;
2224
unsigned long esz = ULONG_MAX;
2225
2226
if (!quota->ms && list_empty(&quota->goals)) {
2227
quota->esz = quota->sz;
2228
return;
2229
}
2230
2231
if (!list_empty(&quota->goals)) {
2232
unsigned long score = damos_quota_score(quota);
2233
2234
quota->esz_bp = damon_feed_loop_next_input(
2235
max(quota->esz_bp, 10000UL),
2236
score);
2237
esz = quota->esz_bp / 10000;
2238
}
2239
2240
if (quota->ms) {
2241
if (quota->total_charged_ns)
2242
throughput = mult_frac(quota->total_charged_sz, 1000000,
2243
quota->total_charged_ns);
2244
else
2245
throughput = PAGE_SIZE * 1024;
2246
esz = min(throughput * quota->ms, esz);
2247
}
2248
2249
if (quota->sz && quota->sz < esz)
2250
esz = quota->sz;
2251
2252
quota->esz = esz;
2253
}
2254
2255
static void damos_trace_esz(struct damon_ctx *c, struct damos *s,
2256
struct damos_quota *quota)
2257
{
2258
unsigned int cidx = 0, sidx = 0;
2259
struct damos *siter;
2260
2261
damon_for_each_scheme(siter, c) {
2262
if (siter == s)
2263
break;
2264
sidx++;
2265
}
2266
trace_damos_esz(cidx, sidx, quota->esz);
2267
}
2268
2269
static void damos_adjust_quota(struct damon_ctx *c, struct damos *s)
2270
{
2271
struct damos_quota *quota = &s->quota;
2272
struct damon_target *t;
2273
struct damon_region *r;
2274
unsigned long cumulated_sz, cached_esz;
2275
unsigned int score, max_score = 0;
2276
2277
if (!quota->ms && !quota->sz && list_empty(&quota->goals))
2278
return;
2279
2280
/* First charge window */
2281
if (!quota->total_charged_sz && !quota->charged_from) {
2282
quota->charged_from = jiffies;
2283
damos_set_effective_quota(quota);
2284
}
2285
2286
/* New charge window starts */
2287
if (time_after_eq(jiffies, quota->charged_from +
2288
msecs_to_jiffies(quota->reset_interval))) {
2289
if (quota->esz && quota->charged_sz >= quota->esz)
2290
s->stat.qt_exceeds++;
2291
quota->total_charged_sz += quota->charged_sz;
2292
quota->charged_from = jiffies;
2293
quota->charged_sz = 0;
2294
if (trace_damos_esz_enabled())
2295
cached_esz = quota->esz;
2296
damos_set_effective_quota(quota);
2297
if (trace_damos_esz_enabled() && quota->esz != cached_esz)
2298
damos_trace_esz(c, s, quota);
2299
}
2300
2301
if (!c->ops.get_scheme_score)
2302
return;
2303
2304
/* Fill up the score histogram */
2305
memset(c->regions_score_histogram, 0,
2306
sizeof(*c->regions_score_histogram) *
2307
(DAMOS_MAX_SCORE + 1));
2308
damon_for_each_target(t, c) {
2309
damon_for_each_region(r, t) {
2310
if (!__damos_valid_target(r, s))
2311
continue;
2312
score = c->ops.get_scheme_score(c, t, r, s);
2313
c->regions_score_histogram[score] +=
2314
damon_sz_region(r);
2315
if (score > max_score)
2316
max_score = score;
2317
}
2318
}
2319
2320
/* Set the min score limit */
2321
for (cumulated_sz = 0, score = max_score; ; score--) {
2322
cumulated_sz += c->regions_score_histogram[score];
2323
if (cumulated_sz >= quota->esz || !score)
2324
break;
2325
}
2326
quota->min_score = score;
2327
}
2328
2329
static void damos_trace_stat(struct damon_ctx *c, struct damos *s)
2330
{
2331
unsigned int cidx = 0, sidx = 0;
2332
struct damos *siter;
2333
2334
if (!trace_damos_stat_after_apply_interval_enabled())
2335
return;
2336
2337
damon_for_each_scheme(siter, c) {
2338
if (siter == s)
2339
break;
2340
sidx++;
2341
}
2342
trace_damos_stat_after_apply_interval(cidx, sidx, &s->stat);
2343
}
2344
2345
static void kdamond_apply_schemes(struct damon_ctx *c)
2346
{
2347
struct damon_target *t;
2348
struct damon_region *r, *next_r;
2349
struct damos *s;
2350
unsigned long sample_interval = c->attrs.sample_interval ?
2351
c->attrs.sample_interval : 1;
2352
bool has_schemes_to_apply = false;
2353
2354
damon_for_each_scheme(s, c) {
2355
if (c->passed_sample_intervals < s->next_apply_sis)
2356
continue;
2357
2358
if (!s->wmarks.activated)
2359
continue;
2360
2361
has_schemes_to_apply = true;
2362
2363
damos_adjust_quota(c, s);
2364
}
2365
2366
if (!has_schemes_to_apply)
2367
return;
2368
2369
mutex_lock(&c->walk_control_lock);
2370
damon_for_each_target(t, c) {
2371
if (c->ops.target_valid && c->ops.target_valid(t) == false)
2372
continue;
2373
2374
damon_for_each_region_safe(r, next_r, t)
2375
damon_do_apply_schemes(c, t, r);
2376
}
2377
2378
damon_for_each_scheme(s, c) {
2379
if (c->passed_sample_intervals < s->next_apply_sis)
2380
continue;
2381
damos_walk_complete(c, s);
2382
s->next_apply_sis = c->passed_sample_intervals +
2383
(s->apply_interval_us ? s->apply_interval_us :
2384
c->attrs.aggr_interval) / sample_interval;
2385
s->last_applied = NULL;
2386
damos_trace_stat(c, s);
2387
}
2388
mutex_unlock(&c->walk_control_lock);
2389
}
2390
2391
/*
2392
* Merge two adjacent regions into one region
2393
*/
2394
static void damon_merge_two_regions(struct damon_target *t,
2395
struct damon_region *l, struct damon_region *r)
2396
{
2397
unsigned long sz_l = damon_sz_region(l), sz_r = damon_sz_region(r);
2398
2399
l->nr_accesses = (l->nr_accesses * sz_l + r->nr_accesses * sz_r) /
2400
(sz_l + sz_r);
2401
l->nr_accesses_bp = l->nr_accesses * 10000;
2402
l->age = (l->age * sz_l + r->age * sz_r) / (sz_l + sz_r);
2403
l->ar.end = r->ar.end;
2404
damon_destroy_region(r, t);
2405
}
2406
2407
/*
2408
* Merge adjacent regions having similar access frequencies
2409
*
2410
* t target affected by this merge operation
2411
* thres '->nr_accesses' diff threshold for the merge
2412
* sz_limit size upper limit of each region
2413
*/
2414
static void damon_merge_regions_of(struct damon_target *t, unsigned int thres,
2415
unsigned long sz_limit)
2416
{
2417
struct damon_region *r, *prev = NULL, *next;
2418
2419
damon_for_each_region_safe(r, next, t) {
2420
if (abs(r->nr_accesses - r->last_nr_accesses) > thres)
2421
r->age = 0;
2422
else if ((r->nr_accesses == 0) != (r->last_nr_accesses == 0))
2423
r->age = 0;
2424
else
2425
r->age++;
2426
2427
if (prev && prev->ar.end == r->ar.start &&
2428
abs(prev->nr_accesses - r->nr_accesses) <= thres &&
2429
damon_sz_region(prev) + damon_sz_region(r) <= sz_limit)
2430
damon_merge_two_regions(t, prev, r);
2431
else
2432
prev = r;
2433
}
2434
}
2435
2436
/*
2437
* Merge adjacent regions having similar access frequencies
2438
*
2439
* threshold '->nr_accesses' diff threshold for the merge
2440
* sz_limit size upper limit of each region
2441
*
2442
* This function merges monitoring target regions which are adjacent and their
2443
* access frequencies are similar. This is for minimizing the monitoring
2444
* overhead under the dynamically changeable access pattern. If a merge was
2445
* unnecessarily made, later 'kdamond_split_regions()' will revert it.
2446
*
2447
* The total number of regions could be higher than the user-defined limit,
2448
* max_nr_regions for some cases. For example, the user can update
2449
* max_nr_regions to a number that lower than the current number of regions
2450
* while DAMON is running. For such a case, repeat merging until the limit is
2451
* met while increasing @threshold up to possible maximum level.
2452
*/
2453
static void kdamond_merge_regions(struct damon_ctx *c, unsigned int threshold,
2454
unsigned long sz_limit)
2455
{
2456
struct damon_target *t;
2457
unsigned int nr_regions;
2458
unsigned int max_thres;
2459
2460
max_thres = c->attrs.aggr_interval /
2461
(c->attrs.sample_interval ? c->attrs.sample_interval : 1);
2462
do {
2463
nr_regions = 0;
2464
damon_for_each_target(t, c) {
2465
damon_merge_regions_of(t, threshold, sz_limit);
2466
nr_regions += damon_nr_regions(t);
2467
}
2468
threshold = max(1, threshold * 2);
2469
} while (nr_regions > c->attrs.max_nr_regions &&
2470
threshold / 2 < max_thres);
2471
}
2472
2473
/*
2474
* Split a region in two
2475
*
2476
* r the region to be split
2477
* sz_r size of the first sub-region that will be made
2478
*/
2479
static void damon_split_region_at(struct damon_target *t,
2480
struct damon_region *r, unsigned long sz_r)
2481
{
2482
struct damon_region *new;
2483
2484
new = damon_new_region(r->ar.start + sz_r, r->ar.end);
2485
if (!new)
2486
return;
2487
2488
r->ar.end = new->ar.start;
2489
2490
new->age = r->age;
2491
new->last_nr_accesses = r->last_nr_accesses;
2492
new->nr_accesses_bp = r->nr_accesses_bp;
2493
new->nr_accesses = r->nr_accesses;
2494
2495
damon_insert_region(new, r, damon_next_region(r), t);
2496
}
2497
2498
/* Split every region in the given target into 'nr_subs' regions */
2499
static void damon_split_regions_of(struct damon_target *t, int nr_subs,
2500
unsigned long min_region_sz)
2501
{
2502
struct damon_region *r, *next;
2503
unsigned long sz_region, sz_sub = 0;
2504
int i;
2505
2506
damon_for_each_region_safe(r, next, t) {
2507
sz_region = damon_sz_region(r);
2508
2509
for (i = 0; i < nr_subs - 1 &&
2510
sz_region > 2 * min_region_sz; i++) {
2511
/*
2512
* Randomly select size of left sub-region to be at
2513
* least 10 percent and at most 90% of original region
2514
*/
2515
sz_sub = ALIGN_DOWN(damon_rand(1, 10) *
2516
sz_region / 10, min_region_sz);
2517
/* Do not allow blank region */
2518
if (sz_sub == 0 || sz_sub >= sz_region)
2519
continue;
2520
2521
damon_split_region_at(t, r, sz_sub);
2522
sz_region = sz_sub;
2523
}
2524
}
2525
}
2526
2527
/*
2528
* Split every target region into randomly-sized small regions
2529
*
2530
* This function splits every target region into random-sized small regions if
2531
* current total number of the regions is equal or smaller than half of the
2532
* user-specified maximum number of regions. This is for maximizing the
2533
* monitoring accuracy under the dynamically changeable access patterns. If a
2534
* split was unnecessarily made, later 'kdamond_merge_regions()' will revert
2535
* it.
2536
*/
2537
static void kdamond_split_regions(struct damon_ctx *ctx)
2538
{
2539
struct damon_target *t;
2540
unsigned int nr_regions = 0;
2541
static unsigned int last_nr_regions;
2542
int nr_subregions = 2;
2543
2544
damon_for_each_target(t, ctx)
2545
nr_regions += damon_nr_regions(t);
2546
2547
if (nr_regions > ctx->attrs.max_nr_regions / 2)
2548
return;
2549
2550
/* Maybe the middle of the region has different access frequency */
2551
if (last_nr_regions == nr_regions &&
2552
nr_regions < ctx->attrs.max_nr_regions / 3)
2553
nr_subregions = 3;
2554
2555
damon_for_each_target(t, ctx)
2556
damon_split_regions_of(t, nr_subregions, ctx->min_region_sz);
2557
2558
last_nr_regions = nr_regions;
2559
}
2560
2561
/*
2562
* Check whether current monitoring should be stopped
2563
*
2564
* The monitoring is stopped when either the user requested to stop, or all
2565
* monitoring targets are invalid.
2566
*
2567
* Returns true if need to stop current monitoring.
2568
*/
2569
static bool kdamond_need_stop(struct damon_ctx *ctx)
2570
{
2571
struct damon_target *t;
2572
2573
if (kthread_should_stop())
2574
return true;
2575
2576
if (!ctx->ops.target_valid)
2577
return false;
2578
2579
damon_for_each_target(t, ctx) {
2580
if (ctx->ops.target_valid(t))
2581
return false;
2582
}
2583
2584
return true;
2585
}
2586
2587
static int damos_get_wmark_metric_value(enum damos_wmark_metric metric,
2588
unsigned long *metric_value)
2589
{
2590
switch (metric) {
2591
case DAMOS_WMARK_FREE_MEM_RATE:
2592
*metric_value = global_zone_page_state(NR_FREE_PAGES) * 1000 /
2593
totalram_pages();
2594
return 0;
2595
default:
2596
break;
2597
}
2598
return -EINVAL;
2599
}
2600
2601
/*
2602
* Returns zero if the scheme is active. Else, returns time to wait for next
2603
* watermark check in micro-seconds.
2604
*/
2605
static unsigned long damos_wmark_wait_us(struct damos *scheme)
2606
{
2607
unsigned long metric;
2608
2609
if (damos_get_wmark_metric_value(scheme->wmarks.metric, &metric))
2610
return 0;
2611
2612
/* higher than high watermark or lower than low watermark */
2613
if (metric > scheme->wmarks.high || scheme->wmarks.low > metric) {
2614
if (scheme->wmarks.activated)
2615
pr_debug("deactivate a scheme (%d) for %s wmark\n",
2616
scheme->action,
2617
str_high_low(metric > scheme->wmarks.high));
2618
scheme->wmarks.activated = false;
2619
return scheme->wmarks.interval;
2620
}
2621
2622
/* inactive and higher than middle watermark */
2623
if ((scheme->wmarks.high >= metric && metric >= scheme->wmarks.mid) &&
2624
!scheme->wmarks.activated)
2625
return scheme->wmarks.interval;
2626
2627
if (!scheme->wmarks.activated)
2628
pr_debug("activate a scheme (%d)\n", scheme->action);
2629
scheme->wmarks.activated = true;
2630
return 0;
2631
}
2632
2633
static void kdamond_usleep(unsigned long usecs)
2634
{
2635
if (usecs >= USLEEP_RANGE_UPPER_BOUND)
2636
schedule_timeout_idle(usecs_to_jiffies(usecs));
2637
else
2638
usleep_range_idle(usecs, usecs + 1);
2639
}
2640
2641
/*
2642
* kdamond_call() - handle damon_call_control objects.
2643
* @ctx: The &struct damon_ctx of the kdamond.
2644
* @cancel: Whether to cancel the invocation of the function.
2645
*
2646
* If there are &struct damon_call_control requests that registered via
2647
* &damon_call() on @ctx, do or cancel the invocation of the function depending
2648
* on @cancel. @cancel is set when the kdamond is already out of the main loop
2649
* and therefore will be terminated.
2650
*/
2651
static void kdamond_call(struct damon_ctx *ctx, bool cancel)
2652
{
2653
struct damon_call_control *control, *next;
2654
LIST_HEAD(controls);
2655
2656
mutex_lock(&ctx->call_controls_lock);
2657
list_splice_tail_init(&ctx->call_controls, &controls);
2658
mutex_unlock(&ctx->call_controls_lock);
2659
2660
list_for_each_entry_safe(control, next, &controls, list) {
2661
if (!control->repeat || cancel)
2662
list_del(&control->list);
2663
2664
if (cancel)
2665
control->canceled = true;
2666
else
2667
control->return_code = control->fn(control->data);
2668
2669
if (!control->repeat)
2670
complete(&control->completion);
2671
else if (control->canceled && control->dealloc_on_cancel)
2672
kfree(control);
2673
}
2674
2675
mutex_lock(&ctx->call_controls_lock);
2676
list_splice_tail(&controls, &ctx->call_controls);
2677
mutex_unlock(&ctx->call_controls_lock);
2678
}
2679
2680
/* Returns negative error code if it's not activated but should return */
2681
static int kdamond_wait_activation(struct damon_ctx *ctx)
2682
{
2683
struct damos *s;
2684
unsigned long wait_time;
2685
unsigned long min_wait_time = 0;
2686
bool init_wait_time = false;
2687
2688
while (!kdamond_need_stop(ctx)) {
2689
damon_for_each_scheme(s, ctx) {
2690
wait_time = damos_wmark_wait_us(s);
2691
if (!init_wait_time || wait_time < min_wait_time) {
2692
init_wait_time = true;
2693
min_wait_time = wait_time;
2694
}
2695
}
2696
if (!min_wait_time)
2697
return 0;
2698
2699
kdamond_usleep(min_wait_time);
2700
2701
kdamond_call(ctx, false);
2702
damos_walk_cancel(ctx);
2703
}
2704
return -EBUSY;
2705
}
2706
2707
static void kdamond_init_ctx(struct damon_ctx *ctx)
2708
{
2709
unsigned long sample_interval = ctx->attrs.sample_interval ?
2710
ctx->attrs.sample_interval : 1;
2711
unsigned long apply_interval;
2712
struct damos *scheme;
2713
2714
ctx->passed_sample_intervals = 0;
2715
ctx->next_aggregation_sis = ctx->attrs.aggr_interval / sample_interval;
2716
ctx->next_ops_update_sis = ctx->attrs.ops_update_interval /
2717
sample_interval;
2718
ctx->next_intervals_tune_sis = ctx->next_aggregation_sis *
2719
ctx->attrs.intervals_goal.aggrs;
2720
2721
damon_for_each_scheme(scheme, ctx) {
2722
apply_interval = scheme->apply_interval_us ?
2723
scheme->apply_interval_us : ctx->attrs.aggr_interval;
2724
scheme->next_apply_sis = apply_interval / sample_interval;
2725
damos_set_filters_default_reject(scheme);
2726
}
2727
}
2728
2729
/*
2730
* The monitoring daemon that runs as a kernel thread
2731
*/
2732
static int kdamond_fn(void *data)
2733
{
2734
struct damon_ctx *ctx = data;
2735
unsigned int max_nr_accesses = 0;
2736
unsigned long sz_limit = 0;
2737
2738
pr_debug("kdamond (%d) starts\n", current->pid);
2739
2740
complete(&ctx->kdamond_started);
2741
kdamond_init_ctx(ctx);
2742
2743
if (ctx->ops.init)
2744
ctx->ops.init(ctx);
2745
ctx->regions_score_histogram = kmalloc_array(DAMOS_MAX_SCORE + 1,
2746
sizeof(*ctx->regions_score_histogram), GFP_KERNEL);
2747
if (!ctx->regions_score_histogram)
2748
goto done;
2749
2750
sz_limit = damon_region_sz_limit(ctx);
2751
2752
while (!kdamond_need_stop(ctx)) {
2753
/*
2754
* ctx->attrs and ctx->next_{aggregation,ops_update}_sis could
2755
* be changed from kdamond_call(). Read the values here, and
2756
* use those for this iteration. That is, damon_set_attrs()
2757
* updated new values are respected from next iteration.
2758
*/
2759
unsigned long next_aggregation_sis = ctx->next_aggregation_sis;
2760
unsigned long next_ops_update_sis = ctx->next_ops_update_sis;
2761
unsigned long sample_interval = ctx->attrs.sample_interval;
2762
2763
if (kdamond_wait_activation(ctx))
2764
break;
2765
2766
if (ctx->ops.prepare_access_checks)
2767
ctx->ops.prepare_access_checks(ctx);
2768
2769
kdamond_usleep(sample_interval);
2770
ctx->passed_sample_intervals++;
2771
2772
if (ctx->ops.check_accesses)
2773
max_nr_accesses = ctx->ops.check_accesses(ctx);
2774
2775
if (ctx->passed_sample_intervals >= next_aggregation_sis)
2776
kdamond_merge_regions(ctx,
2777
max_nr_accesses / 10,
2778
sz_limit);
2779
2780
/*
2781
* do kdamond_call() and kdamond_apply_schemes() after
2782
* kdamond_merge_regions() if possible, to reduce overhead
2783
*/
2784
kdamond_call(ctx, false);
2785
if (!list_empty(&ctx->schemes))
2786
kdamond_apply_schemes(ctx);
2787
else
2788
damos_walk_cancel(ctx);
2789
2790
sample_interval = ctx->attrs.sample_interval ?
2791
ctx->attrs.sample_interval : 1;
2792
if (ctx->passed_sample_intervals >= next_aggregation_sis) {
2793
if (ctx->attrs.intervals_goal.aggrs &&
2794
ctx->passed_sample_intervals >=
2795
ctx->next_intervals_tune_sis) {
2796
/*
2797
* ctx->next_aggregation_sis might be updated
2798
* from kdamond_call(). In the case,
2799
* damon_set_attrs() which will be called from
2800
* kdamond_tune_interval() may wrongly think
2801
* this is in the middle of the current
2802
* aggregation, and make aggregation
2803
* information reset for all regions. Then,
2804
* following kdamond_reset_aggregated() call
2805
* will make the region information invalid,
2806
* particularly for ->nr_accesses_bp.
2807
*
2808
* Reset ->next_aggregation_sis to avoid that.
2809
* It will anyway correctly updated after this
2810
* if clause.
2811
*/
2812
ctx->next_aggregation_sis =
2813
next_aggregation_sis;
2814
ctx->next_intervals_tune_sis +=
2815
ctx->attrs.aggr_samples *
2816
ctx->attrs.intervals_goal.aggrs;
2817
kdamond_tune_intervals(ctx);
2818
sample_interval = ctx->attrs.sample_interval ?
2819
ctx->attrs.sample_interval : 1;
2820
2821
}
2822
ctx->next_aggregation_sis = next_aggregation_sis +
2823
ctx->attrs.aggr_interval / sample_interval;
2824
2825
kdamond_reset_aggregated(ctx);
2826
kdamond_split_regions(ctx);
2827
}
2828
2829
if (ctx->passed_sample_intervals >= next_ops_update_sis) {
2830
ctx->next_ops_update_sis = next_ops_update_sis +
2831
ctx->attrs.ops_update_interval /
2832
sample_interval;
2833
if (ctx->ops.update)
2834
ctx->ops.update(ctx);
2835
sz_limit = damon_region_sz_limit(ctx);
2836
}
2837
}
2838
done:
2839
damon_destroy_targets(ctx);
2840
2841
kfree(ctx->regions_score_histogram);
2842
kdamond_call(ctx, true);
2843
damos_walk_cancel(ctx);
2844
2845
pr_debug("kdamond (%d) finishes\n", current->pid);
2846
mutex_lock(&ctx->kdamond_lock);
2847
ctx->kdamond = NULL;
2848
mutex_unlock(&ctx->kdamond_lock);
2849
2850
mutex_lock(&damon_lock);
2851
nr_running_ctxs--;
2852
if (!nr_running_ctxs && running_exclusive_ctxs)
2853
running_exclusive_ctxs = false;
2854
mutex_unlock(&damon_lock);
2855
2856
return 0;
2857
}
2858
2859
static int walk_system_ram(struct resource *res, void *arg)
2860
{
2861
struct damon_addr_range *a = arg;
2862
2863
if (a->end - a->start < resource_size(res)) {
2864
a->start = res->start;
2865
a->end = res->end;
2866
}
2867
return 0;
2868
}
2869
2870
/*
2871
* Find biggest 'System RAM' resource and store its start and end address in
2872
* @start and @end, respectively. If no System RAM is found, returns false.
2873
*/
2874
static bool damon_find_biggest_system_ram(unsigned long *start,
2875
unsigned long *end)
2876
2877
{
2878
struct damon_addr_range arg = {};
2879
2880
walk_system_ram_res(0, ULONG_MAX, &arg, walk_system_ram);
2881
if (arg.end <= arg.start)
2882
return false;
2883
2884
*start = arg.start;
2885
*end = arg.end;
2886
return true;
2887
}
2888
2889
/**
2890
* damon_set_region_biggest_system_ram_default() - Set the region of the given
2891
* monitoring target as requested, or biggest 'System RAM'.
2892
* @t: The monitoring target to set the region.
2893
* @start: The pointer to the start address of the region.
2894
* @end: The pointer to the end address of the region.
2895
* @min_region_sz: Minimum region size.
2896
*
2897
* This function sets the region of @t as requested by @start and @end. If the
2898
* values of @start and @end are zero, however, this function finds the biggest
2899
* 'System RAM' resource and sets the region to cover the resource. In the
2900
* latter case, this function saves the start and end addresses of the resource
2901
* in @start and @end, respectively.
2902
*
2903
* Return: 0 on success, negative error code otherwise.
2904
*/
2905
int damon_set_region_biggest_system_ram_default(struct damon_target *t,
2906
unsigned long *start, unsigned long *end,
2907
unsigned long min_region_sz)
2908
{
2909
struct damon_addr_range addr_range;
2910
2911
if (*start > *end)
2912
return -EINVAL;
2913
2914
if (!*start && !*end &&
2915
!damon_find_biggest_system_ram(start, end))
2916
return -EINVAL;
2917
2918
addr_range.start = *start;
2919
addr_range.end = *end;
2920
return damon_set_regions(t, &addr_range, 1, min_region_sz);
2921
}
2922
2923
/*
2924
* damon_moving_sum() - Calculate an inferred moving sum value.
2925
* @mvsum: Inferred sum of the last @len_window values.
2926
* @nomvsum: Non-moving sum of the last discrete @len_window window values.
2927
* @len_window: The number of last values to take care of.
2928
* @new_value: New value that will be added to the pseudo moving sum.
2929
*
2930
* Moving sum (moving average * window size) is good for handling noise, but
2931
* the cost of keeping past values can be high for arbitrary window size. This
2932
* function implements a lightweight pseudo moving sum function that doesn't
2933
* keep the past window values.
2934
*
2935
* It simply assumes there was no noise in the past, and get the no-noise
2936
* assumed past value to drop from @nomvsum and @len_window. @nomvsum is a
2937
* non-moving sum of the last window. For example, if @len_window is 10 and we
2938
* have 25 values, @nomvsum is the sum of the 11th to 20th values of the 25
2939
* values. Hence, this function simply drops @nomvsum / @len_window from
2940
* given @mvsum and add @new_value.
2941
*
2942
* For example, if @len_window is 10 and @nomvsum is 50, the last 10 values for
2943
* the last window could be vary, e.g., 0, 10, 0, 10, 0, 10, 0, 0, 0, 20. For
2944
* calculating next moving sum with a new value, we should drop 0 from 50 and
2945
* add the new value. However, this function assumes it got value 5 for each
2946
* of the last ten times. Based on the assumption, when the next value is
2947
* measured, it drops the assumed past value, 5 from the current sum, and add
2948
* the new value to get the updated pseduo-moving average.
2949
*
2950
* This means the value could have errors, but the errors will be disappeared
2951
* for every @len_window aligned calls. For example, if @len_window is 10, the
2952
* pseudo moving sum with 11th value to 19th value would have an error. But
2953
* the sum with 20th value will not have the error.
2954
*
2955
* Return: Pseudo-moving average after getting the @new_value.
2956
*/
2957
static unsigned int damon_moving_sum(unsigned int mvsum, unsigned int nomvsum,
2958
unsigned int len_window, unsigned int new_value)
2959
{
2960
return mvsum - nomvsum / len_window + new_value;
2961
}
2962
2963
/**
2964
* damon_update_region_access_rate() - Update the access rate of a region.
2965
* @r: The DAMON region to update for its access check result.
2966
* @accessed: Whether the region has accessed during last sampling interval.
2967
* @attrs: The damon_attrs of the DAMON context.
2968
*
2969
* Update the access rate of a region with the region's last sampling interval
2970
* access check result.
2971
*
2972
* Usually this will be called by &damon_operations->check_accesses callback.
2973
*/
2974
void damon_update_region_access_rate(struct damon_region *r, bool accessed,
2975
struct damon_attrs *attrs)
2976
{
2977
unsigned int len_window = 1;
2978
2979
/*
2980
* sample_interval can be zero, but cannot be larger than
2981
* aggr_interval, owing to validation of damon_set_attrs().
2982
*/
2983
if (attrs->sample_interval)
2984
len_window = damon_max_nr_accesses(attrs);
2985
r->nr_accesses_bp = damon_moving_sum(r->nr_accesses_bp,
2986
r->last_nr_accesses * 10000, len_window,
2987
accessed ? 10000 : 0);
2988
2989
if (accessed)
2990
r->nr_accesses++;
2991
}
2992
2993
/**
2994
* damon_initialized() - Return if DAMON is ready to be used.
2995
*
2996
* Return: true if DAMON is ready to be used, false otherwise.
2997
*/
2998
bool damon_initialized(void)
2999
{
3000
return damon_region_cache != NULL;
3001
}
3002
3003
static int __init damon_init(void)
3004
{
3005
damon_region_cache = KMEM_CACHE(damon_region, 0);
3006
if (unlikely(!damon_region_cache)) {
3007
pr_err("creating damon_region_cache fails\n");
3008
return -ENOMEM;
3009
}
3010
3011
return 0;
3012
}
3013
3014
subsys_initcall(damon_init);
3015
3016
#include "tests/core-kunit.h"
3017
3018