Book a Demo!
CoCalc Logo Icon
StoreFeaturesDocsShareSupportNewsAboutPoliciesSign UpSign In
torvalds
GitHub Repository: torvalds/linux
Path: blob/master/mm/khugepaged.c
29264 views
1
// SPDX-License-Identifier: GPL-2.0
2
#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
3
4
#include <linux/mm.h>
5
#include <linux/sched.h>
6
#include <linux/sched/mm.h>
7
#include <linux/mmu_notifier.h>
8
#include <linux/rmap.h>
9
#include <linux/swap.h>
10
#include <linux/mm_inline.h>
11
#include <linux/kthread.h>
12
#include <linux/khugepaged.h>
13
#include <linux/freezer.h>
14
#include <linux/mman.h>
15
#include <linux/hashtable.h>
16
#include <linux/userfaultfd_k.h>
17
#include <linux/page_idle.h>
18
#include <linux/page_table_check.h>
19
#include <linux/rcupdate_wait.h>
20
#include <linux/swapops.h>
21
#include <linux/shmem_fs.h>
22
#include <linux/dax.h>
23
#include <linux/ksm.h>
24
25
#include <asm/tlb.h>
26
#include <asm/pgalloc.h>
27
#include "internal.h"
28
#include "mm_slot.h"
29
30
enum scan_result {
31
SCAN_FAIL,
32
SCAN_SUCCEED,
33
SCAN_PMD_NULL,
34
SCAN_PMD_NONE,
35
SCAN_PMD_MAPPED,
36
SCAN_EXCEED_NONE_PTE,
37
SCAN_EXCEED_SWAP_PTE,
38
SCAN_EXCEED_SHARED_PTE,
39
SCAN_PTE_NON_PRESENT,
40
SCAN_PTE_UFFD_WP,
41
SCAN_PTE_MAPPED_HUGEPAGE,
42
SCAN_LACK_REFERENCED_PAGE,
43
SCAN_PAGE_NULL,
44
SCAN_SCAN_ABORT,
45
SCAN_PAGE_COUNT,
46
SCAN_PAGE_LRU,
47
SCAN_PAGE_LOCK,
48
SCAN_PAGE_ANON,
49
SCAN_PAGE_COMPOUND,
50
SCAN_ANY_PROCESS,
51
SCAN_VMA_NULL,
52
SCAN_VMA_CHECK,
53
SCAN_ADDRESS_RANGE,
54
SCAN_DEL_PAGE_LRU,
55
SCAN_ALLOC_HUGE_PAGE_FAIL,
56
SCAN_CGROUP_CHARGE_FAIL,
57
SCAN_TRUNCATED,
58
SCAN_PAGE_HAS_PRIVATE,
59
SCAN_STORE_FAILED,
60
SCAN_COPY_MC,
61
SCAN_PAGE_FILLED,
62
};
63
64
#define CREATE_TRACE_POINTS
65
#include <trace/events/huge_memory.h>
66
67
static struct task_struct *khugepaged_thread __read_mostly;
68
static DEFINE_MUTEX(khugepaged_mutex);
69
70
/* default scan 8*512 pte (or vmas) every 30 second */
71
static unsigned int khugepaged_pages_to_scan __read_mostly;
72
static unsigned int khugepaged_pages_collapsed;
73
static unsigned int khugepaged_full_scans;
74
static unsigned int khugepaged_scan_sleep_millisecs __read_mostly = 10000;
75
/* during fragmentation poll the hugepage allocator once every minute */
76
static unsigned int khugepaged_alloc_sleep_millisecs __read_mostly = 60000;
77
static unsigned long khugepaged_sleep_expire;
78
static DEFINE_SPINLOCK(khugepaged_mm_lock);
79
static DECLARE_WAIT_QUEUE_HEAD(khugepaged_wait);
80
/*
81
* default collapse hugepages if there is at least one pte mapped like
82
* it would have happened if the vma was large enough during page
83
* fault.
84
*
85
* Note that these are only respected if collapse was initiated by khugepaged.
86
*/
87
unsigned int khugepaged_max_ptes_none __read_mostly;
88
static unsigned int khugepaged_max_ptes_swap __read_mostly;
89
static unsigned int khugepaged_max_ptes_shared __read_mostly;
90
91
#define MM_SLOTS_HASH_BITS 10
92
static DEFINE_READ_MOSTLY_HASHTABLE(mm_slots_hash, MM_SLOTS_HASH_BITS);
93
94
static struct kmem_cache *mm_slot_cache __ro_after_init;
95
96
struct collapse_control {
97
bool is_khugepaged;
98
99
/* Num pages scanned per node */
100
u32 node_load[MAX_NUMNODES];
101
102
/* nodemask for allocation fallback */
103
nodemask_t alloc_nmask;
104
};
105
106
/**
107
* struct khugepaged_scan - cursor for scanning
108
* @mm_head: the head of the mm list to scan
109
* @mm_slot: the current mm_slot we are scanning
110
* @address: the next address inside that to be scanned
111
*
112
* There is only the one khugepaged_scan instance of this cursor structure.
113
*/
114
struct khugepaged_scan {
115
struct list_head mm_head;
116
struct mm_slot *mm_slot;
117
unsigned long address;
118
};
119
120
static struct khugepaged_scan khugepaged_scan = {
121
.mm_head = LIST_HEAD_INIT(khugepaged_scan.mm_head),
122
};
123
124
#ifdef CONFIG_SYSFS
125
static ssize_t scan_sleep_millisecs_show(struct kobject *kobj,
126
struct kobj_attribute *attr,
127
char *buf)
128
{
129
return sysfs_emit(buf, "%u\n", khugepaged_scan_sleep_millisecs);
130
}
131
132
static ssize_t scan_sleep_millisecs_store(struct kobject *kobj,
133
struct kobj_attribute *attr,
134
const char *buf, size_t count)
135
{
136
unsigned int msecs;
137
int err;
138
139
err = kstrtouint(buf, 10, &msecs);
140
if (err)
141
return -EINVAL;
142
143
khugepaged_scan_sleep_millisecs = msecs;
144
khugepaged_sleep_expire = 0;
145
wake_up_interruptible(&khugepaged_wait);
146
147
return count;
148
}
149
static struct kobj_attribute scan_sleep_millisecs_attr =
150
__ATTR_RW(scan_sleep_millisecs);
151
152
static ssize_t alloc_sleep_millisecs_show(struct kobject *kobj,
153
struct kobj_attribute *attr,
154
char *buf)
155
{
156
return sysfs_emit(buf, "%u\n", khugepaged_alloc_sleep_millisecs);
157
}
158
159
static ssize_t alloc_sleep_millisecs_store(struct kobject *kobj,
160
struct kobj_attribute *attr,
161
const char *buf, size_t count)
162
{
163
unsigned int msecs;
164
int err;
165
166
err = kstrtouint(buf, 10, &msecs);
167
if (err)
168
return -EINVAL;
169
170
khugepaged_alloc_sleep_millisecs = msecs;
171
khugepaged_sleep_expire = 0;
172
wake_up_interruptible(&khugepaged_wait);
173
174
return count;
175
}
176
static struct kobj_attribute alloc_sleep_millisecs_attr =
177
__ATTR_RW(alloc_sleep_millisecs);
178
179
static ssize_t pages_to_scan_show(struct kobject *kobj,
180
struct kobj_attribute *attr,
181
char *buf)
182
{
183
return sysfs_emit(buf, "%u\n", khugepaged_pages_to_scan);
184
}
185
static ssize_t pages_to_scan_store(struct kobject *kobj,
186
struct kobj_attribute *attr,
187
const char *buf, size_t count)
188
{
189
unsigned int pages;
190
int err;
191
192
err = kstrtouint(buf, 10, &pages);
193
if (err || !pages)
194
return -EINVAL;
195
196
khugepaged_pages_to_scan = pages;
197
198
return count;
199
}
200
static struct kobj_attribute pages_to_scan_attr =
201
__ATTR_RW(pages_to_scan);
202
203
static ssize_t pages_collapsed_show(struct kobject *kobj,
204
struct kobj_attribute *attr,
205
char *buf)
206
{
207
return sysfs_emit(buf, "%u\n", khugepaged_pages_collapsed);
208
}
209
static struct kobj_attribute pages_collapsed_attr =
210
__ATTR_RO(pages_collapsed);
211
212
static ssize_t full_scans_show(struct kobject *kobj,
213
struct kobj_attribute *attr,
214
char *buf)
215
{
216
return sysfs_emit(buf, "%u\n", khugepaged_full_scans);
217
}
218
static struct kobj_attribute full_scans_attr =
219
__ATTR_RO(full_scans);
220
221
static ssize_t defrag_show(struct kobject *kobj,
222
struct kobj_attribute *attr, char *buf)
223
{
224
return single_hugepage_flag_show(kobj, attr, buf,
225
TRANSPARENT_HUGEPAGE_DEFRAG_KHUGEPAGED_FLAG);
226
}
227
static ssize_t defrag_store(struct kobject *kobj,
228
struct kobj_attribute *attr,
229
const char *buf, size_t count)
230
{
231
return single_hugepage_flag_store(kobj, attr, buf, count,
232
TRANSPARENT_HUGEPAGE_DEFRAG_KHUGEPAGED_FLAG);
233
}
234
static struct kobj_attribute khugepaged_defrag_attr =
235
__ATTR_RW(defrag);
236
237
/*
238
* max_ptes_none controls if khugepaged should collapse hugepages over
239
* any unmapped ptes in turn potentially increasing the memory
240
* footprint of the vmas. When max_ptes_none is 0 khugepaged will not
241
* reduce the available free memory in the system as it
242
* runs. Increasing max_ptes_none will instead potentially reduce the
243
* free memory in the system during the khugepaged scan.
244
*/
245
static ssize_t max_ptes_none_show(struct kobject *kobj,
246
struct kobj_attribute *attr,
247
char *buf)
248
{
249
return sysfs_emit(buf, "%u\n", khugepaged_max_ptes_none);
250
}
251
static ssize_t max_ptes_none_store(struct kobject *kobj,
252
struct kobj_attribute *attr,
253
const char *buf, size_t count)
254
{
255
int err;
256
unsigned long max_ptes_none;
257
258
err = kstrtoul(buf, 10, &max_ptes_none);
259
if (err || max_ptes_none > HPAGE_PMD_NR - 1)
260
return -EINVAL;
261
262
khugepaged_max_ptes_none = max_ptes_none;
263
264
return count;
265
}
266
static struct kobj_attribute khugepaged_max_ptes_none_attr =
267
__ATTR_RW(max_ptes_none);
268
269
static ssize_t max_ptes_swap_show(struct kobject *kobj,
270
struct kobj_attribute *attr,
271
char *buf)
272
{
273
return sysfs_emit(buf, "%u\n", khugepaged_max_ptes_swap);
274
}
275
276
static ssize_t max_ptes_swap_store(struct kobject *kobj,
277
struct kobj_attribute *attr,
278
const char *buf, size_t count)
279
{
280
int err;
281
unsigned long max_ptes_swap;
282
283
err = kstrtoul(buf, 10, &max_ptes_swap);
284
if (err || max_ptes_swap > HPAGE_PMD_NR - 1)
285
return -EINVAL;
286
287
khugepaged_max_ptes_swap = max_ptes_swap;
288
289
return count;
290
}
291
292
static struct kobj_attribute khugepaged_max_ptes_swap_attr =
293
__ATTR_RW(max_ptes_swap);
294
295
static ssize_t max_ptes_shared_show(struct kobject *kobj,
296
struct kobj_attribute *attr,
297
char *buf)
298
{
299
return sysfs_emit(buf, "%u\n", khugepaged_max_ptes_shared);
300
}
301
302
static ssize_t max_ptes_shared_store(struct kobject *kobj,
303
struct kobj_attribute *attr,
304
const char *buf, size_t count)
305
{
306
int err;
307
unsigned long max_ptes_shared;
308
309
err = kstrtoul(buf, 10, &max_ptes_shared);
310
if (err || max_ptes_shared > HPAGE_PMD_NR - 1)
311
return -EINVAL;
312
313
khugepaged_max_ptes_shared = max_ptes_shared;
314
315
return count;
316
}
317
318
static struct kobj_attribute khugepaged_max_ptes_shared_attr =
319
__ATTR_RW(max_ptes_shared);
320
321
static struct attribute *khugepaged_attr[] = {
322
&khugepaged_defrag_attr.attr,
323
&khugepaged_max_ptes_none_attr.attr,
324
&khugepaged_max_ptes_swap_attr.attr,
325
&khugepaged_max_ptes_shared_attr.attr,
326
&pages_to_scan_attr.attr,
327
&pages_collapsed_attr.attr,
328
&full_scans_attr.attr,
329
&scan_sleep_millisecs_attr.attr,
330
&alloc_sleep_millisecs_attr.attr,
331
NULL,
332
};
333
334
struct attribute_group khugepaged_attr_group = {
335
.attrs = khugepaged_attr,
336
.name = "khugepaged",
337
};
338
#endif /* CONFIG_SYSFS */
339
340
int hugepage_madvise(struct vm_area_struct *vma,
341
vm_flags_t *vm_flags, int advice)
342
{
343
switch (advice) {
344
case MADV_HUGEPAGE:
345
#ifdef CONFIG_S390
346
/*
347
* qemu blindly sets MADV_HUGEPAGE on all allocations, but s390
348
* can't handle this properly after s390_enable_sie, so we simply
349
* ignore the madvise to prevent qemu from causing a SIGSEGV.
350
*/
351
if (mm_has_pgste(vma->vm_mm))
352
return 0;
353
#endif
354
*vm_flags &= ~VM_NOHUGEPAGE;
355
*vm_flags |= VM_HUGEPAGE;
356
/*
357
* If the vma become good for khugepaged to scan,
358
* register it here without waiting a page fault that
359
* may not happen any time soon.
360
*/
361
khugepaged_enter_vma(vma, *vm_flags);
362
break;
363
case MADV_NOHUGEPAGE:
364
*vm_flags &= ~VM_HUGEPAGE;
365
*vm_flags |= VM_NOHUGEPAGE;
366
/*
367
* Setting VM_NOHUGEPAGE will prevent khugepaged from scanning
368
* this vma even if we leave the mm registered in khugepaged if
369
* it got registered before VM_NOHUGEPAGE was set.
370
*/
371
break;
372
}
373
374
return 0;
375
}
376
377
int __init khugepaged_init(void)
378
{
379
mm_slot_cache = kmem_cache_create("khugepaged_mm_slot",
380
sizeof(struct mm_slot),
381
__alignof__(struct mm_slot),
382
0, NULL);
383
if (!mm_slot_cache)
384
return -ENOMEM;
385
386
khugepaged_pages_to_scan = HPAGE_PMD_NR * 8;
387
khugepaged_max_ptes_none = HPAGE_PMD_NR - 1;
388
khugepaged_max_ptes_swap = HPAGE_PMD_NR / 8;
389
khugepaged_max_ptes_shared = HPAGE_PMD_NR / 2;
390
391
return 0;
392
}
393
394
void __init khugepaged_destroy(void)
395
{
396
kmem_cache_destroy(mm_slot_cache);
397
}
398
399
static inline int hpage_collapse_test_exit(struct mm_struct *mm)
400
{
401
return atomic_read(&mm->mm_users) == 0;
402
}
403
404
static inline int hpage_collapse_test_exit_or_disable(struct mm_struct *mm)
405
{
406
return hpage_collapse_test_exit(mm) ||
407
mm_flags_test(MMF_DISABLE_THP_COMPLETELY, mm);
408
}
409
410
static bool hugepage_pmd_enabled(void)
411
{
412
/*
413
* We cover the anon, shmem and the file-backed case here; file-backed
414
* hugepages, when configured in, are determined by the global control.
415
* Anon pmd-sized hugepages are determined by the pmd-size control.
416
* Shmem pmd-sized hugepages are also determined by its pmd-size control,
417
* except when the global shmem_huge is set to SHMEM_HUGE_DENY.
418
*/
419
if (IS_ENABLED(CONFIG_READ_ONLY_THP_FOR_FS) &&
420
hugepage_global_enabled())
421
return true;
422
if (test_bit(PMD_ORDER, &huge_anon_orders_always))
423
return true;
424
if (test_bit(PMD_ORDER, &huge_anon_orders_madvise))
425
return true;
426
if (test_bit(PMD_ORDER, &huge_anon_orders_inherit) &&
427
hugepage_global_enabled())
428
return true;
429
if (IS_ENABLED(CONFIG_SHMEM) && shmem_hpage_pmd_enabled())
430
return true;
431
return false;
432
}
433
434
void __khugepaged_enter(struct mm_struct *mm)
435
{
436
struct mm_slot *slot;
437
int wakeup;
438
439
/* __khugepaged_exit() must not run from under us */
440
VM_BUG_ON_MM(hpage_collapse_test_exit(mm), mm);
441
if (unlikely(mm_flags_test_and_set(MMF_VM_HUGEPAGE, mm)))
442
return;
443
444
slot = mm_slot_alloc(mm_slot_cache);
445
if (!slot)
446
return;
447
448
spin_lock(&khugepaged_mm_lock);
449
mm_slot_insert(mm_slots_hash, mm, slot);
450
/*
451
* Insert just behind the scanning cursor, to let the area settle
452
* down a little.
453
*/
454
wakeup = list_empty(&khugepaged_scan.mm_head);
455
list_add_tail(&slot->mm_node, &khugepaged_scan.mm_head);
456
spin_unlock(&khugepaged_mm_lock);
457
458
mmgrab(mm);
459
if (wakeup)
460
wake_up_interruptible(&khugepaged_wait);
461
}
462
463
void khugepaged_enter_vma(struct vm_area_struct *vma,
464
vm_flags_t vm_flags)
465
{
466
if (!mm_flags_test(MMF_VM_HUGEPAGE, vma->vm_mm) &&
467
hugepage_pmd_enabled()) {
468
if (thp_vma_allowable_order(vma, vm_flags, TVA_KHUGEPAGED, PMD_ORDER))
469
__khugepaged_enter(vma->vm_mm);
470
}
471
}
472
473
void __khugepaged_exit(struct mm_struct *mm)
474
{
475
struct mm_slot *slot;
476
int free = 0;
477
478
spin_lock(&khugepaged_mm_lock);
479
slot = mm_slot_lookup(mm_slots_hash, mm);
480
if (slot && khugepaged_scan.mm_slot != slot) {
481
hash_del(&slot->hash);
482
list_del(&slot->mm_node);
483
free = 1;
484
}
485
spin_unlock(&khugepaged_mm_lock);
486
487
if (free) {
488
mm_flags_clear(MMF_VM_HUGEPAGE, mm);
489
mm_slot_free(mm_slot_cache, slot);
490
mmdrop(mm);
491
} else if (slot) {
492
/*
493
* This is required to serialize against
494
* hpage_collapse_test_exit() (which is guaranteed to run
495
* under mmap sem read mode). Stop here (after we return all
496
* pagetables will be destroyed) until khugepaged has finished
497
* working on the pagetables under the mmap_lock.
498
*/
499
mmap_write_lock(mm);
500
mmap_write_unlock(mm);
501
}
502
}
503
504
static void release_pte_folio(struct folio *folio)
505
{
506
node_stat_mod_folio(folio,
507
NR_ISOLATED_ANON + folio_is_file_lru(folio),
508
-folio_nr_pages(folio));
509
folio_unlock(folio);
510
folio_putback_lru(folio);
511
}
512
513
static void release_pte_pages(pte_t *pte, pte_t *_pte,
514
struct list_head *compound_pagelist)
515
{
516
struct folio *folio, *tmp;
517
518
while (--_pte >= pte) {
519
pte_t pteval = ptep_get(_pte);
520
unsigned long pfn;
521
522
if (pte_none(pteval))
523
continue;
524
pfn = pte_pfn(pteval);
525
if (is_zero_pfn(pfn))
526
continue;
527
folio = pfn_folio(pfn);
528
if (folio_test_large(folio))
529
continue;
530
release_pte_folio(folio);
531
}
532
533
list_for_each_entry_safe(folio, tmp, compound_pagelist, lru) {
534
list_del(&folio->lru);
535
release_pte_folio(folio);
536
}
537
}
538
539
static int __collapse_huge_page_isolate(struct vm_area_struct *vma,
540
unsigned long start_addr,
541
pte_t *pte,
542
struct collapse_control *cc,
543
struct list_head *compound_pagelist)
544
{
545
struct page *page = NULL;
546
struct folio *folio = NULL;
547
unsigned long addr = start_addr;
548
pte_t *_pte;
549
int none_or_zero = 0, shared = 0, result = SCAN_FAIL, referenced = 0;
550
551
for (_pte = pte; _pte < pte + HPAGE_PMD_NR;
552
_pte++, addr += PAGE_SIZE) {
553
pte_t pteval = ptep_get(_pte);
554
if (pte_none(pteval) || (pte_present(pteval) &&
555
is_zero_pfn(pte_pfn(pteval)))) {
556
++none_or_zero;
557
if (!userfaultfd_armed(vma) &&
558
(!cc->is_khugepaged ||
559
none_or_zero <= khugepaged_max_ptes_none)) {
560
continue;
561
} else {
562
result = SCAN_EXCEED_NONE_PTE;
563
count_vm_event(THP_SCAN_EXCEED_NONE_PTE);
564
goto out;
565
}
566
}
567
if (!pte_present(pteval)) {
568
result = SCAN_PTE_NON_PRESENT;
569
goto out;
570
}
571
if (pte_uffd_wp(pteval)) {
572
result = SCAN_PTE_UFFD_WP;
573
goto out;
574
}
575
page = vm_normal_page(vma, addr, pteval);
576
if (unlikely(!page) || unlikely(is_zone_device_page(page))) {
577
result = SCAN_PAGE_NULL;
578
goto out;
579
}
580
581
folio = page_folio(page);
582
VM_BUG_ON_FOLIO(!folio_test_anon(folio), folio);
583
584
/* See hpage_collapse_scan_pmd(). */
585
if (folio_maybe_mapped_shared(folio)) {
586
++shared;
587
if (cc->is_khugepaged &&
588
shared > khugepaged_max_ptes_shared) {
589
result = SCAN_EXCEED_SHARED_PTE;
590
count_vm_event(THP_SCAN_EXCEED_SHARED_PTE);
591
goto out;
592
}
593
}
594
595
if (folio_test_large(folio)) {
596
struct folio *f;
597
598
/*
599
* Check if we have dealt with the compound page
600
* already
601
*/
602
list_for_each_entry(f, compound_pagelist, lru) {
603
if (folio == f)
604
goto next;
605
}
606
}
607
608
/*
609
* We can do it before folio_isolate_lru because the
610
* folio can't be freed from under us. NOTE: PG_lock
611
* is needed to serialize against split_huge_page
612
* when invoked from the VM.
613
*/
614
if (!folio_trylock(folio)) {
615
result = SCAN_PAGE_LOCK;
616
goto out;
617
}
618
619
/*
620
* Check if the page has any GUP (or other external) pins.
621
*
622
* The page table that maps the page has been already unlinked
623
* from the page table tree and this process cannot get
624
* an additional pin on the page.
625
*
626
* New pins can come later if the page is shared across fork,
627
* but not from this process. The other process cannot write to
628
* the page, only trigger CoW.
629
*/
630
if (folio_expected_ref_count(folio) != folio_ref_count(folio)) {
631
folio_unlock(folio);
632
result = SCAN_PAGE_COUNT;
633
goto out;
634
}
635
636
/*
637
* Isolate the page to avoid collapsing an hugepage
638
* currently in use by the VM.
639
*/
640
if (!folio_isolate_lru(folio)) {
641
folio_unlock(folio);
642
result = SCAN_DEL_PAGE_LRU;
643
goto out;
644
}
645
node_stat_mod_folio(folio,
646
NR_ISOLATED_ANON + folio_is_file_lru(folio),
647
folio_nr_pages(folio));
648
VM_BUG_ON_FOLIO(!folio_test_locked(folio), folio);
649
VM_BUG_ON_FOLIO(folio_test_lru(folio), folio);
650
651
if (folio_test_large(folio))
652
list_add_tail(&folio->lru, compound_pagelist);
653
next:
654
/*
655
* If collapse was initiated by khugepaged, check that there is
656
* enough young pte to justify collapsing the page
657
*/
658
if (cc->is_khugepaged &&
659
(pte_young(pteval) || folio_test_young(folio) ||
660
folio_test_referenced(folio) ||
661
mmu_notifier_test_young(vma->vm_mm, addr)))
662
referenced++;
663
}
664
665
if (unlikely(cc->is_khugepaged && !referenced)) {
666
result = SCAN_LACK_REFERENCED_PAGE;
667
} else {
668
result = SCAN_SUCCEED;
669
trace_mm_collapse_huge_page_isolate(folio, none_or_zero,
670
referenced, result);
671
return result;
672
}
673
out:
674
release_pte_pages(pte, _pte, compound_pagelist);
675
trace_mm_collapse_huge_page_isolate(folio, none_or_zero,
676
referenced, result);
677
return result;
678
}
679
680
static void __collapse_huge_page_copy_succeeded(pte_t *pte,
681
struct vm_area_struct *vma,
682
unsigned long address,
683
spinlock_t *ptl,
684
struct list_head *compound_pagelist)
685
{
686
unsigned long end = address + HPAGE_PMD_SIZE;
687
struct folio *src, *tmp;
688
pte_t pteval;
689
pte_t *_pte;
690
unsigned int nr_ptes;
691
692
for (_pte = pte; _pte < pte + HPAGE_PMD_NR; _pte += nr_ptes,
693
address += nr_ptes * PAGE_SIZE) {
694
nr_ptes = 1;
695
pteval = ptep_get(_pte);
696
if (pte_none(pteval) || is_zero_pfn(pte_pfn(pteval))) {
697
add_mm_counter(vma->vm_mm, MM_ANONPAGES, 1);
698
if (is_zero_pfn(pte_pfn(pteval))) {
699
/*
700
* ptl mostly unnecessary.
701
*/
702
spin_lock(ptl);
703
ptep_clear(vma->vm_mm, address, _pte);
704
spin_unlock(ptl);
705
ksm_might_unmap_zero_page(vma->vm_mm, pteval);
706
}
707
} else {
708
struct page *src_page = pte_page(pteval);
709
710
src = page_folio(src_page);
711
712
if (folio_test_large(src)) {
713
unsigned int max_nr_ptes = (end - address) >> PAGE_SHIFT;
714
715
nr_ptes = folio_pte_batch(src, _pte, pteval, max_nr_ptes);
716
} else {
717
release_pte_folio(src);
718
}
719
720
/*
721
* ptl mostly unnecessary, but preempt has to
722
* be disabled to update the per-cpu stats
723
* inside folio_remove_rmap_pte().
724
*/
725
spin_lock(ptl);
726
clear_ptes(vma->vm_mm, address, _pte, nr_ptes);
727
folio_remove_rmap_ptes(src, src_page, nr_ptes, vma);
728
spin_unlock(ptl);
729
free_swap_cache(src);
730
folio_put_refs(src, nr_ptes);
731
}
732
}
733
734
list_for_each_entry_safe(src, tmp, compound_pagelist, lru) {
735
list_del(&src->lru);
736
node_stat_sub_folio(src, NR_ISOLATED_ANON +
737
folio_is_file_lru(src));
738
folio_unlock(src);
739
free_swap_cache(src);
740
folio_putback_lru(src);
741
}
742
}
743
744
static void __collapse_huge_page_copy_failed(pte_t *pte,
745
pmd_t *pmd,
746
pmd_t orig_pmd,
747
struct vm_area_struct *vma,
748
struct list_head *compound_pagelist)
749
{
750
spinlock_t *pmd_ptl;
751
752
/*
753
* Re-establish the PMD to point to the original page table
754
* entry. Restoring PMD needs to be done prior to releasing
755
* pages. Since pages are still isolated and locked here,
756
* acquiring anon_vma_lock_write is unnecessary.
757
*/
758
pmd_ptl = pmd_lock(vma->vm_mm, pmd);
759
pmd_populate(vma->vm_mm, pmd, pmd_pgtable(orig_pmd));
760
spin_unlock(pmd_ptl);
761
/*
762
* Release both raw and compound pages isolated
763
* in __collapse_huge_page_isolate.
764
*/
765
release_pte_pages(pte, pte + HPAGE_PMD_NR, compound_pagelist);
766
}
767
768
/*
769
* __collapse_huge_page_copy - attempts to copy memory contents from raw
770
* pages to a hugepage. Cleans up the raw pages if copying succeeds;
771
* otherwise restores the original page table and releases isolated raw pages.
772
* Returns SCAN_SUCCEED if copying succeeds, otherwise returns SCAN_COPY_MC.
773
*
774
* @pte: starting of the PTEs to copy from
775
* @folio: the new hugepage to copy contents to
776
* @pmd: pointer to the new hugepage's PMD
777
* @orig_pmd: the original raw pages' PMD
778
* @vma: the original raw pages' virtual memory area
779
* @address: starting address to copy
780
* @ptl: lock on raw pages' PTEs
781
* @compound_pagelist: list that stores compound pages
782
*/
783
static int __collapse_huge_page_copy(pte_t *pte, struct folio *folio,
784
pmd_t *pmd, pmd_t orig_pmd, struct vm_area_struct *vma,
785
unsigned long address, spinlock_t *ptl,
786
struct list_head *compound_pagelist)
787
{
788
unsigned int i;
789
int result = SCAN_SUCCEED;
790
791
/*
792
* Copying pages' contents is subject to memory poison at any iteration.
793
*/
794
for (i = 0; i < HPAGE_PMD_NR; i++) {
795
pte_t pteval = ptep_get(pte + i);
796
struct page *page = folio_page(folio, i);
797
unsigned long src_addr = address + i * PAGE_SIZE;
798
struct page *src_page;
799
800
if (pte_none(pteval) || is_zero_pfn(pte_pfn(pteval))) {
801
clear_user_highpage(page, src_addr);
802
continue;
803
}
804
src_page = pte_page(pteval);
805
if (copy_mc_user_highpage(page, src_page, src_addr, vma) > 0) {
806
result = SCAN_COPY_MC;
807
break;
808
}
809
}
810
811
if (likely(result == SCAN_SUCCEED))
812
__collapse_huge_page_copy_succeeded(pte, vma, address, ptl,
813
compound_pagelist);
814
else
815
__collapse_huge_page_copy_failed(pte, pmd, orig_pmd, vma,
816
compound_pagelist);
817
818
return result;
819
}
820
821
static void khugepaged_alloc_sleep(void)
822
{
823
DEFINE_WAIT(wait);
824
825
add_wait_queue(&khugepaged_wait, &wait);
826
__set_current_state(TASK_INTERRUPTIBLE|TASK_FREEZABLE);
827
schedule_timeout(msecs_to_jiffies(khugepaged_alloc_sleep_millisecs));
828
remove_wait_queue(&khugepaged_wait, &wait);
829
}
830
831
struct collapse_control khugepaged_collapse_control = {
832
.is_khugepaged = true,
833
};
834
835
static bool hpage_collapse_scan_abort(int nid, struct collapse_control *cc)
836
{
837
int i;
838
839
/*
840
* If node_reclaim_mode is disabled, then no extra effort is made to
841
* allocate memory locally.
842
*/
843
if (!node_reclaim_enabled())
844
return false;
845
846
/* If there is a count for this node already, it must be acceptable */
847
if (cc->node_load[nid])
848
return false;
849
850
for (i = 0; i < MAX_NUMNODES; i++) {
851
if (!cc->node_load[i])
852
continue;
853
if (node_distance(nid, i) > node_reclaim_distance)
854
return true;
855
}
856
return false;
857
}
858
859
#define khugepaged_defrag() \
860
(transparent_hugepage_flags & \
861
(1<<TRANSPARENT_HUGEPAGE_DEFRAG_KHUGEPAGED_FLAG))
862
863
/* Defrag for khugepaged will enter direct reclaim/compaction if necessary */
864
static inline gfp_t alloc_hugepage_khugepaged_gfpmask(void)
865
{
866
return khugepaged_defrag() ? GFP_TRANSHUGE : GFP_TRANSHUGE_LIGHT;
867
}
868
869
#ifdef CONFIG_NUMA
870
static int hpage_collapse_find_target_node(struct collapse_control *cc)
871
{
872
int nid, target_node = 0, max_value = 0;
873
874
/* find first node with max normal pages hit */
875
for (nid = 0; nid < MAX_NUMNODES; nid++)
876
if (cc->node_load[nid] > max_value) {
877
max_value = cc->node_load[nid];
878
target_node = nid;
879
}
880
881
for_each_online_node(nid) {
882
if (max_value == cc->node_load[nid])
883
node_set(nid, cc->alloc_nmask);
884
}
885
886
return target_node;
887
}
888
#else
889
static int hpage_collapse_find_target_node(struct collapse_control *cc)
890
{
891
return 0;
892
}
893
#endif
894
895
/*
896
* If mmap_lock temporarily dropped, revalidate vma
897
* before taking mmap_lock.
898
* Returns enum scan_result value.
899
*/
900
901
static int hugepage_vma_revalidate(struct mm_struct *mm, unsigned long address,
902
bool expect_anon,
903
struct vm_area_struct **vmap,
904
struct collapse_control *cc)
905
{
906
struct vm_area_struct *vma;
907
enum tva_type type = cc->is_khugepaged ? TVA_KHUGEPAGED :
908
TVA_FORCED_COLLAPSE;
909
910
if (unlikely(hpage_collapse_test_exit_or_disable(mm)))
911
return SCAN_ANY_PROCESS;
912
913
*vmap = vma = find_vma(mm, address);
914
if (!vma)
915
return SCAN_VMA_NULL;
916
917
if (!thp_vma_suitable_order(vma, address, PMD_ORDER))
918
return SCAN_ADDRESS_RANGE;
919
if (!thp_vma_allowable_order(vma, vma->vm_flags, type, PMD_ORDER))
920
return SCAN_VMA_CHECK;
921
/*
922
* Anon VMA expected, the address may be unmapped then
923
* remapped to file after khugepaged reaquired the mmap_lock.
924
*
925
* thp_vma_allowable_order may return true for qualified file
926
* vmas.
927
*/
928
if (expect_anon && (!(*vmap)->anon_vma || !vma_is_anonymous(*vmap)))
929
return SCAN_PAGE_ANON;
930
return SCAN_SUCCEED;
931
}
932
933
static inline int check_pmd_state(pmd_t *pmd)
934
{
935
pmd_t pmde = pmdp_get_lockless(pmd);
936
937
if (pmd_none(pmde))
938
return SCAN_PMD_NONE;
939
940
/*
941
* The folio may be under migration when khugepaged is trying to
942
* collapse it. Migration success or failure will eventually end
943
* up with a present PMD mapping a folio again.
944
*/
945
if (is_pmd_migration_entry(pmde))
946
return SCAN_PMD_MAPPED;
947
if (!pmd_present(pmde))
948
return SCAN_PMD_NULL;
949
if (pmd_trans_huge(pmde))
950
return SCAN_PMD_MAPPED;
951
if (pmd_bad(pmde))
952
return SCAN_PMD_NULL;
953
return SCAN_SUCCEED;
954
}
955
956
static int find_pmd_or_thp_or_none(struct mm_struct *mm,
957
unsigned long address,
958
pmd_t **pmd)
959
{
960
*pmd = mm_find_pmd(mm, address);
961
if (!*pmd)
962
return SCAN_PMD_NULL;
963
964
return check_pmd_state(*pmd);
965
}
966
967
static int check_pmd_still_valid(struct mm_struct *mm,
968
unsigned long address,
969
pmd_t *pmd)
970
{
971
pmd_t *new_pmd;
972
int result = find_pmd_or_thp_or_none(mm, address, &new_pmd);
973
974
if (result != SCAN_SUCCEED)
975
return result;
976
if (new_pmd != pmd)
977
return SCAN_FAIL;
978
return SCAN_SUCCEED;
979
}
980
981
/*
982
* Bring missing pages in from swap, to complete THP collapse.
983
* Only done if hpage_collapse_scan_pmd believes it is worthwhile.
984
*
985
* Called and returns without pte mapped or spinlocks held.
986
* Returns result: if not SCAN_SUCCEED, mmap_lock has been released.
987
*/
988
static int __collapse_huge_page_swapin(struct mm_struct *mm,
989
struct vm_area_struct *vma,
990
unsigned long start_addr, pmd_t *pmd,
991
int referenced)
992
{
993
int swapped_in = 0;
994
vm_fault_t ret = 0;
995
unsigned long addr, end = start_addr + (HPAGE_PMD_NR * PAGE_SIZE);
996
int result;
997
pte_t *pte = NULL;
998
spinlock_t *ptl;
999
1000
for (addr = start_addr; addr < end; addr += PAGE_SIZE) {
1001
struct vm_fault vmf = {
1002
.vma = vma,
1003
.address = addr,
1004
.pgoff = linear_page_index(vma, addr),
1005
.flags = FAULT_FLAG_ALLOW_RETRY,
1006
.pmd = pmd,
1007
};
1008
1009
if (!pte++) {
1010
/*
1011
* Here the ptl is only used to check pte_same() in
1012
* do_swap_page(), so readonly version is enough.
1013
*/
1014
pte = pte_offset_map_ro_nolock(mm, pmd, addr, &ptl);
1015
if (!pte) {
1016
mmap_read_unlock(mm);
1017
result = SCAN_PMD_NULL;
1018
goto out;
1019
}
1020
}
1021
1022
vmf.orig_pte = ptep_get_lockless(pte);
1023
if (!is_swap_pte(vmf.orig_pte))
1024
continue;
1025
1026
vmf.pte = pte;
1027
vmf.ptl = ptl;
1028
ret = do_swap_page(&vmf);
1029
/* Which unmaps pte (after perhaps re-checking the entry) */
1030
pte = NULL;
1031
1032
/*
1033
* do_swap_page returns VM_FAULT_RETRY with released mmap_lock.
1034
* Note we treat VM_FAULT_RETRY as VM_FAULT_ERROR here because
1035
* we do not retry here and swap entry will remain in pagetable
1036
* resulting in later failure.
1037
*/
1038
if (ret & VM_FAULT_RETRY) {
1039
/* Likely, but not guaranteed, that page lock failed */
1040
result = SCAN_PAGE_LOCK;
1041
goto out;
1042
}
1043
if (ret & VM_FAULT_ERROR) {
1044
mmap_read_unlock(mm);
1045
result = SCAN_FAIL;
1046
goto out;
1047
}
1048
swapped_in++;
1049
}
1050
1051
if (pte)
1052
pte_unmap(pte);
1053
1054
/* Drain LRU cache to remove extra pin on the swapped in pages */
1055
if (swapped_in)
1056
lru_add_drain();
1057
1058
result = SCAN_SUCCEED;
1059
out:
1060
trace_mm_collapse_huge_page_swapin(mm, swapped_in, referenced, result);
1061
return result;
1062
}
1063
1064
static int alloc_charge_folio(struct folio **foliop, struct mm_struct *mm,
1065
struct collapse_control *cc)
1066
{
1067
gfp_t gfp = (cc->is_khugepaged ? alloc_hugepage_khugepaged_gfpmask() :
1068
GFP_TRANSHUGE);
1069
int node = hpage_collapse_find_target_node(cc);
1070
struct folio *folio;
1071
1072
folio = __folio_alloc(gfp, HPAGE_PMD_ORDER, node, &cc->alloc_nmask);
1073
if (!folio) {
1074
*foliop = NULL;
1075
count_vm_event(THP_COLLAPSE_ALLOC_FAILED);
1076
return SCAN_ALLOC_HUGE_PAGE_FAIL;
1077
}
1078
1079
count_vm_event(THP_COLLAPSE_ALLOC);
1080
if (unlikely(mem_cgroup_charge(folio, mm, gfp))) {
1081
folio_put(folio);
1082
*foliop = NULL;
1083
return SCAN_CGROUP_CHARGE_FAIL;
1084
}
1085
1086
count_memcg_folio_events(folio, THP_COLLAPSE_ALLOC, 1);
1087
1088
*foliop = folio;
1089
return SCAN_SUCCEED;
1090
}
1091
1092
static int collapse_huge_page(struct mm_struct *mm, unsigned long address,
1093
int referenced, int unmapped,
1094
struct collapse_control *cc)
1095
{
1096
LIST_HEAD(compound_pagelist);
1097
pmd_t *pmd, _pmd;
1098
pte_t *pte;
1099
pgtable_t pgtable;
1100
struct folio *folio;
1101
spinlock_t *pmd_ptl, *pte_ptl;
1102
int result = SCAN_FAIL;
1103
struct vm_area_struct *vma;
1104
struct mmu_notifier_range range;
1105
1106
VM_BUG_ON(address & ~HPAGE_PMD_MASK);
1107
1108
/*
1109
* Before allocating the hugepage, release the mmap_lock read lock.
1110
* The allocation can take potentially a long time if it involves
1111
* sync compaction, and we do not need to hold the mmap_lock during
1112
* that. We will recheck the vma after taking it again in write mode.
1113
*/
1114
mmap_read_unlock(mm);
1115
1116
result = alloc_charge_folio(&folio, mm, cc);
1117
if (result != SCAN_SUCCEED)
1118
goto out_nolock;
1119
1120
mmap_read_lock(mm);
1121
result = hugepage_vma_revalidate(mm, address, true, &vma, cc);
1122
if (result != SCAN_SUCCEED) {
1123
mmap_read_unlock(mm);
1124
goto out_nolock;
1125
}
1126
1127
result = find_pmd_or_thp_or_none(mm, address, &pmd);
1128
if (result != SCAN_SUCCEED) {
1129
mmap_read_unlock(mm);
1130
goto out_nolock;
1131
}
1132
1133
if (unmapped) {
1134
/*
1135
* __collapse_huge_page_swapin will return with mmap_lock
1136
* released when it fails. So we jump out_nolock directly in
1137
* that case. Continuing to collapse causes inconsistency.
1138
*/
1139
result = __collapse_huge_page_swapin(mm, vma, address, pmd,
1140
referenced);
1141
if (result != SCAN_SUCCEED)
1142
goto out_nolock;
1143
}
1144
1145
mmap_read_unlock(mm);
1146
/*
1147
* Prevent all access to pagetables with the exception of
1148
* gup_fast later handled by the ptep_clear_flush and the VM
1149
* handled by the anon_vma lock + PG_lock.
1150
*
1151
* UFFDIO_MOVE is prevented to race as well thanks to the
1152
* mmap_lock.
1153
*/
1154
mmap_write_lock(mm);
1155
result = hugepage_vma_revalidate(mm, address, true, &vma, cc);
1156
if (result != SCAN_SUCCEED)
1157
goto out_up_write;
1158
/* check if the pmd is still valid */
1159
vma_start_write(vma);
1160
result = check_pmd_still_valid(mm, address, pmd);
1161
if (result != SCAN_SUCCEED)
1162
goto out_up_write;
1163
1164
anon_vma_lock_write(vma->anon_vma);
1165
1166
mmu_notifier_range_init(&range, MMU_NOTIFY_CLEAR, 0, mm, address,
1167
address + HPAGE_PMD_SIZE);
1168
mmu_notifier_invalidate_range_start(&range);
1169
1170
pmd_ptl = pmd_lock(mm, pmd); /* probably unnecessary */
1171
/*
1172
* This removes any huge TLB entry from the CPU so we won't allow
1173
* huge and small TLB entries for the same virtual address to
1174
* avoid the risk of CPU bugs in that area.
1175
*
1176
* Parallel GUP-fast is fine since GUP-fast will back off when
1177
* it detects PMD is changed.
1178
*/
1179
_pmd = pmdp_collapse_flush(vma, address, pmd);
1180
spin_unlock(pmd_ptl);
1181
mmu_notifier_invalidate_range_end(&range);
1182
tlb_remove_table_sync_one();
1183
1184
pte = pte_offset_map_lock(mm, &_pmd, address, &pte_ptl);
1185
if (pte) {
1186
result = __collapse_huge_page_isolate(vma, address, pte, cc,
1187
&compound_pagelist);
1188
spin_unlock(pte_ptl);
1189
} else {
1190
result = SCAN_PMD_NULL;
1191
}
1192
1193
if (unlikely(result != SCAN_SUCCEED)) {
1194
if (pte)
1195
pte_unmap(pte);
1196
spin_lock(pmd_ptl);
1197
BUG_ON(!pmd_none(*pmd));
1198
/*
1199
* We can only use set_pmd_at when establishing
1200
* hugepmds and never for establishing regular pmds that
1201
* points to regular pagetables. Use pmd_populate for that
1202
*/
1203
pmd_populate(mm, pmd, pmd_pgtable(_pmd));
1204
spin_unlock(pmd_ptl);
1205
anon_vma_unlock_write(vma->anon_vma);
1206
goto out_up_write;
1207
}
1208
1209
/*
1210
* All pages are isolated and locked so anon_vma rmap
1211
* can't run anymore.
1212
*/
1213
anon_vma_unlock_write(vma->anon_vma);
1214
1215
result = __collapse_huge_page_copy(pte, folio, pmd, _pmd,
1216
vma, address, pte_ptl,
1217
&compound_pagelist);
1218
pte_unmap(pte);
1219
if (unlikely(result != SCAN_SUCCEED))
1220
goto out_up_write;
1221
1222
/*
1223
* The smp_wmb() inside __folio_mark_uptodate() ensures the
1224
* copy_huge_page writes become visible before the set_pmd_at()
1225
* write.
1226
*/
1227
__folio_mark_uptodate(folio);
1228
pgtable = pmd_pgtable(_pmd);
1229
1230
_pmd = folio_mk_pmd(folio, vma->vm_page_prot);
1231
_pmd = maybe_pmd_mkwrite(pmd_mkdirty(_pmd), vma);
1232
1233
spin_lock(pmd_ptl);
1234
BUG_ON(!pmd_none(*pmd));
1235
folio_add_new_anon_rmap(folio, vma, address, RMAP_EXCLUSIVE);
1236
folio_add_lru_vma(folio, vma);
1237
pgtable_trans_huge_deposit(mm, pmd, pgtable);
1238
set_pmd_at(mm, address, pmd, _pmd);
1239
update_mmu_cache_pmd(vma, address, pmd);
1240
deferred_split_folio(folio, false);
1241
spin_unlock(pmd_ptl);
1242
1243
folio = NULL;
1244
1245
result = SCAN_SUCCEED;
1246
out_up_write:
1247
mmap_write_unlock(mm);
1248
out_nolock:
1249
if (folio)
1250
folio_put(folio);
1251
trace_mm_collapse_huge_page(mm, result == SCAN_SUCCEED, result);
1252
return result;
1253
}
1254
1255
static int hpage_collapse_scan_pmd(struct mm_struct *mm,
1256
struct vm_area_struct *vma,
1257
unsigned long start_addr, bool *mmap_locked,
1258
struct collapse_control *cc)
1259
{
1260
pmd_t *pmd;
1261
pte_t *pte, *_pte;
1262
int result = SCAN_FAIL, referenced = 0;
1263
int none_or_zero = 0, shared = 0;
1264
struct page *page = NULL;
1265
struct folio *folio = NULL;
1266
unsigned long addr;
1267
spinlock_t *ptl;
1268
int node = NUMA_NO_NODE, unmapped = 0;
1269
1270
VM_BUG_ON(start_addr & ~HPAGE_PMD_MASK);
1271
1272
result = find_pmd_or_thp_or_none(mm, start_addr, &pmd);
1273
if (result != SCAN_SUCCEED)
1274
goto out;
1275
1276
memset(cc->node_load, 0, sizeof(cc->node_load));
1277
nodes_clear(cc->alloc_nmask);
1278
pte = pte_offset_map_lock(mm, pmd, start_addr, &ptl);
1279
if (!pte) {
1280
result = SCAN_PMD_NULL;
1281
goto out;
1282
}
1283
1284
for (addr = start_addr, _pte = pte; _pte < pte + HPAGE_PMD_NR;
1285
_pte++, addr += PAGE_SIZE) {
1286
pte_t pteval = ptep_get(_pte);
1287
if (is_swap_pte(pteval)) {
1288
++unmapped;
1289
if (!cc->is_khugepaged ||
1290
unmapped <= khugepaged_max_ptes_swap) {
1291
/*
1292
* Always be strict with uffd-wp
1293
* enabled swap entries. Please see
1294
* comment below for pte_uffd_wp().
1295
*/
1296
if (pte_swp_uffd_wp_any(pteval)) {
1297
result = SCAN_PTE_UFFD_WP;
1298
goto out_unmap;
1299
}
1300
continue;
1301
} else {
1302
result = SCAN_EXCEED_SWAP_PTE;
1303
count_vm_event(THP_SCAN_EXCEED_SWAP_PTE);
1304
goto out_unmap;
1305
}
1306
}
1307
if (pte_none(pteval) || is_zero_pfn(pte_pfn(pteval))) {
1308
++none_or_zero;
1309
if (!userfaultfd_armed(vma) &&
1310
(!cc->is_khugepaged ||
1311
none_or_zero <= khugepaged_max_ptes_none)) {
1312
continue;
1313
} else {
1314
result = SCAN_EXCEED_NONE_PTE;
1315
count_vm_event(THP_SCAN_EXCEED_NONE_PTE);
1316
goto out_unmap;
1317
}
1318
}
1319
if (pte_uffd_wp(pteval)) {
1320
/*
1321
* Don't collapse the page if any of the small
1322
* PTEs are armed with uffd write protection.
1323
* Here we can also mark the new huge pmd as
1324
* write protected if any of the small ones is
1325
* marked but that could bring unknown
1326
* userfault messages that falls outside of
1327
* the registered range. So, just be simple.
1328
*/
1329
result = SCAN_PTE_UFFD_WP;
1330
goto out_unmap;
1331
}
1332
1333
page = vm_normal_page(vma, addr, pteval);
1334
if (unlikely(!page) || unlikely(is_zone_device_page(page))) {
1335
result = SCAN_PAGE_NULL;
1336
goto out_unmap;
1337
}
1338
folio = page_folio(page);
1339
1340
if (!folio_test_anon(folio)) {
1341
result = SCAN_PAGE_ANON;
1342
goto out_unmap;
1343
}
1344
1345
/*
1346
* We treat a single page as shared if any part of the THP
1347
* is shared.
1348
*/
1349
if (folio_maybe_mapped_shared(folio)) {
1350
++shared;
1351
if (cc->is_khugepaged &&
1352
shared > khugepaged_max_ptes_shared) {
1353
result = SCAN_EXCEED_SHARED_PTE;
1354
count_vm_event(THP_SCAN_EXCEED_SHARED_PTE);
1355
goto out_unmap;
1356
}
1357
}
1358
1359
/*
1360
* Record which node the original page is from and save this
1361
* information to cc->node_load[].
1362
* Khugepaged will allocate hugepage from the node has the max
1363
* hit record.
1364
*/
1365
node = folio_nid(folio);
1366
if (hpage_collapse_scan_abort(node, cc)) {
1367
result = SCAN_SCAN_ABORT;
1368
goto out_unmap;
1369
}
1370
cc->node_load[node]++;
1371
if (!folio_test_lru(folio)) {
1372
result = SCAN_PAGE_LRU;
1373
goto out_unmap;
1374
}
1375
if (folio_test_locked(folio)) {
1376
result = SCAN_PAGE_LOCK;
1377
goto out_unmap;
1378
}
1379
1380
/*
1381
* Check if the page has any GUP (or other external) pins.
1382
*
1383
* Here the check may be racy:
1384
* it may see folio_mapcount() > folio_ref_count().
1385
* But such case is ephemeral we could always retry collapse
1386
* later. However it may report false positive if the page
1387
* has excessive GUP pins (i.e. 512). Anyway the same check
1388
* will be done again later the risk seems low.
1389
*/
1390
if (folio_expected_ref_count(folio) != folio_ref_count(folio)) {
1391
result = SCAN_PAGE_COUNT;
1392
goto out_unmap;
1393
}
1394
1395
/*
1396
* If collapse was initiated by khugepaged, check that there is
1397
* enough young pte to justify collapsing the page
1398
*/
1399
if (cc->is_khugepaged &&
1400
(pte_young(pteval) || folio_test_young(folio) ||
1401
folio_test_referenced(folio) ||
1402
mmu_notifier_test_young(vma->vm_mm, addr)))
1403
referenced++;
1404
}
1405
if (cc->is_khugepaged &&
1406
(!referenced ||
1407
(unmapped && referenced < HPAGE_PMD_NR / 2))) {
1408
result = SCAN_LACK_REFERENCED_PAGE;
1409
} else {
1410
result = SCAN_SUCCEED;
1411
}
1412
out_unmap:
1413
pte_unmap_unlock(pte, ptl);
1414
if (result == SCAN_SUCCEED) {
1415
result = collapse_huge_page(mm, start_addr, referenced,
1416
unmapped, cc);
1417
/* collapse_huge_page will return with the mmap_lock released */
1418
*mmap_locked = false;
1419
}
1420
out:
1421
trace_mm_khugepaged_scan_pmd(mm, folio, referenced,
1422
none_or_zero, result, unmapped);
1423
return result;
1424
}
1425
1426
static void collect_mm_slot(struct mm_slot *slot)
1427
{
1428
struct mm_struct *mm = slot->mm;
1429
1430
lockdep_assert_held(&khugepaged_mm_lock);
1431
1432
if (hpage_collapse_test_exit(mm)) {
1433
/* free mm_slot */
1434
hash_del(&slot->hash);
1435
list_del(&slot->mm_node);
1436
1437
/*
1438
* Not strictly needed because the mm exited already.
1439
*
1440
* mm_flags_clear(MMF_VM_HUGEPAGE, mm);
1441
*/
1442
1443
/* khugepaged_mm_lock actually not necessary for the below */
1444
mm_slot_free(mm_slot_cache, slot);
1445
mmdrop(mm);
1446
}
1447
}
1448
1449
/* folio must be locked, and mmap_lock must be held */
1450
static int set_huge_pmd(struct vm_area_struct *vma, unsigned long addr,
1451
pmd_t *pmdp, struct folio *folio, struct page *page)
1452
{
1453
struct mm_struct *mm = vma->vm_mm;
1454
struct vm_fault vmf = {
1455
.vma = vma,
1456
.address = addr,
1457
.flags = 0,
1458
};
1459
pgd_t *pgdp;
1460
p4d_t *p4dp;
1461
pud_t *pudp;
1462
1463
mmap_assert_locked(vma->vm_mm);
1464
1465
if (!pmdp) {
1466
pgdp = pgd_offset(mm, addr);
1467
p4dp = p4d_alloc(mm, pgdp, addr);
1468
if (!p4dp)
1469
return SCAN_FAIL;
1470
pudp = pud_alloc(mm, p4dp, addr);
1471
if (!pudp)
1472
return SCAN_FAIL;
1473
pmdp = pmd_alloc(mm, pudp, addr);
1474
if (!pmdp)
1475
return SCAN_FAIL;
1476
}
1477
1478
vmf.pmd = pmdp;
1479
if (do_set_pmd(&vmf, folio, page))
1480
return SCAN_FAIL;
1481
1482
folio_get(folio);
1483
return SCAN_SUCCEED;
1484
}
1485
1486
/**
1487
* collapse_pte_mapped_thp - Try to collapse a pte-mapped THP for mm at
1488
* address haddr.
1489
*
1490
* @mm: process address space where collapse happens
1491
* @addr: THP collapse address
1492
* @install_pmd: If a huge PMD should be installed
1493
*
1494
* This function checks whether all the PTEs in the PMD are pointing to the
1495
* right THP. If so, retract the page table so the THP can refault in with
1496
* as pmd-mapped. Possibly install a huge PMD mapping the THP.
1497
*/
1498
int collapse_pte_mapped_thp(struct mm_struct *mm, unsigned long addr,
1499
bool install_pmd)
1500
{
1501
int nr_mapped_ptes = 0, result = SCAN_FAIL;
1502
unsigned int nr_batch_ptes;
1503
struct mmu_notifier_range range;
1504
bool notified = false;
1505
unsigned long haddr = addr & HPAGE_PMD_MASK;
1506
unsigned long end = haddr + HPAGE_PMD_SIZE;
1507
struct vm_area_struct *vma = vma_lookup(mm, haddr);
1508
struct folio *folio;
1509
pte_t *start_pte, *pte;
1510
pmd_t *pmd, pgt_pmd;
1511
spinlock_t *pml = NULL, *ptl;
1512
int i;
1513
1514
mmap_assert_locked(mm);
1515
1516
/* First check VMA found, in case page tables are being torn down */
1517
if (!vma || !vma->vm_file ||
1518
!range_in_vma(vma, haddr, haddr + HPAGE_PMD_SIZE))
1519
return SCAN_VMA_CHECK;
1520
1521
/* Fast check before locking page if already PMD-mapped */
1522
result = find_pmd_or_thp_or_none(mm, haddr, &pmd);
1523
if (result == SCAN_PMD_MAPPED)
1524
return result;
1525
1526
/*
1527
* If we are here, we've succeeded in replacing all the native pages
1528
* in the page cache with a single hugepage. If a mm were to fault-in
1529
* this memory (mapped by a suitably aligned VMA), we'd get the hugepage
1530
* and map it by a PMD, regardless of sysfs THP settings. As such, let's
1531
* analogously elide sysfs THP settings here and force collapse.
1532
*/
1533
if (!thp_vma_allowable_order(vma, vma->vm_flags, TVA_FORCED_COLLAPSE, PMD_ORDER))
1534
return SCAN_VMA_CHECK;
1535
1536
/* Keep pmd pgtable for uffd-wp; see comment in retract_page_tables() */
1537
if (userfaultfd_wp(vma))
1538
return SCAN_PTE_UFFD_WP;
1539
1540
folio = filemap_lock_folio(vma->vm_file->f_mapping,
1541
linear_page_index(vma, haddr));
1542
if (IS_ERR(folio))
1543
return SCAN_PAGE_NULL;
1544
1545
if (folio_order(folio) != HPAGE_PMD_ORDER) {
1546
result = SCAN_PAGE_COMPOUND;
1547
goto drop_folio;
1548
}
1549
1550
result = find_pmd_or_thp_or_none(mm, haddr, &pmd);
1551
switch (result) {
1552
case SCAN_SUCCEED:
1553
break;
1554
case SCAN_PMD_NULL:
1555
case SCAN_PMD_NONE:
1556
/*
1557
* All pte entries have been removed and pmd cleared.
1558
* Skip all the pte checks and just update the pmd mapping.
1559
*/
1560
goto maybe_install_pmd;
1561
default:
1562
goto drop_folio;
1563
}
1564
1565
result = SCAN_FAIL;
1566
start_pte = pte_offset_map_lock(mm, pmd, haddr, &ptl);
1567
if (!start_pte) /* mmap_lock + page lock should prevent this */
1568
goto drop_folio;
1569
1570
/* step 1: check all mapped PTEs are to the right huge page */
1571
for (i = 0, addr = haddr, pte = start_pte;
1572
i < HPAGE_PMD_NR; i++, addr += PAGE_SIZE, pte++) {
1573
struct page *page;
1574
pte_t ptent = ptep_get(pte);
1575
1576
/* empty pte, skip */
1577
if (pte_none(ptent))
1578
continue;
1579
1580
/* page swapped out, abort */
1581
if (!pte_present(ptent)) {
1582
result = SCAN_PTE_NON_PRESENT;
1583
goto abort;
1584
}
1585
1586
page = vm_normal_page(vma, addr, ptent);
1587
if (WARN_ON_ONCE(page && is_zone_device_page(page)))
1588
page = NULL;
1589
/*
1590
* Note that uprobe, debugger, or MAP_PRIVATE may change the
1591
* page table, but the new page will not be a subpage of hpage.
1592
*/
1593
if (folio_page(folio, i) != page)
1594
goto abort;
1595
}
1596
1597
pte_unmap_unlock(start_pte, ptl);
1598
mmu_notifier_range_init(&range, MMU_NOTIFY_CLEAR, 0, mm,
1599
haddr, haddr + HPAGE_PMD_SIZE);
1600
mmu_notifier_invalidate_range_start(&range);
1601
notified = true;
1602
1603
/*
1604
* pmd_lock covers a wider range than ptl, and (if split from mm's
1605
* page_table_lock) ptl nests inside pml. The less time we hold pml,
1606
* the better; but userfaultfd's mfill_atomic_pte() on a private VMA
1607
* inserts a valid as-if-COWed PTE without even looking up page cache.
1608
* So page lock of folio does not protect from it, so we must not drop
1609
* ptl before pgt_pmd is removed, so uffd private needs pml taken now.
1610
*/
1611
if (userfaultfd_armed(vma) && !(vma->vm_flags & VM_SHARED))
1612
pml = pmd_lock(mm, pmd);
1613
1614
start_pte = pte_offset_map_rw_nolock(mm, pmd, haddr, &pgt_pmd, &ptl);
1615
if (!start_pte) /* mmap_lock + page lock should prevent this */
1616
goto abort;
1617
if (!pml)
1618
spin_lock(ptl);
1619
else if (ptl != pml)
1620
spin_lock_nested(ptl, SINGLE_DEPTH_NESTING);
1621
1622
if (unlikely(!pmd_same(pgt_pmd, pmdp_get_lockless(pmd))))
1623
goto abort;
1624
1625
/* step 2: clear page table and adjust rmap */
1626
for (i = 0, addr = haddr, pte = start_pte; i < HPAGE_PMD_NR;
1627
i += nr_batch_ptes, addr += nr_batch_ptes * PAGE_SIZE,
1628
pte += nr_batch_ptes) {
1629
unsigned int max_nr_batch_ptes = (end - addr) >> PAGE_SHIFT;
1630
struct page *page;
1631
pte_t ptent = ptep_get(pte);
1632
1633
nr_batch_ptes = 1;
1634
1635
if (pte_none(ptent))
1636
continue;
1637
/*
1638
* We dropped ptl after the first scan, to do the mmu_notifier:
1639
* page lock stops more PTEs of the folio being faulted in, but
1640
* does not stop write faults COWing anon copies from existing
1641
* PTEs; and does not stop those being swapped out or migrated.
1642
*/
1643
if (!pte_present(ptent)) {
1644
result = SCAN_PTE_NON_PRESENT;
1645
goto abort;
1646
}
1647
page = vm_normal_page(vma, addr, ptent);
1648
1649
if (folio_page(folio, i) != page)
1650
goto abort;
1651
1652
nr_batch_ptes = folio_pte_batch(folio, pte, ptent, max_nr_batch_ptes);
1653
1654
/*
1655
* Must clear entry, or a racing truncate may re-remove it.
1656
* TLB flush can be left until pmdp_collapse_flush() does it.
1657
* PTE dirty? Shmem page is already dirty; file is read-only.
1658
*/
1659
clear_ptes(mm, addr, pte, nr_batch_ptes);
1660
folio_remove_rmap_ptes(folio, page, nr_batch_ptes, vma);
1661
nr_mapped_ptes += nr_batch_ptes;
1662
}
1663
1664
if (!pml)
1665
spin_unlock(ptl);
1666
1667
/* step 3: set proper refcount and mm_counters. */
1668
if (nr_mapped_ptes) {
1669
folio_ref_sub(folio, nr_mapped_ptes);
1670
add_mm_counter(mm, mm_counter_file(folio), -nr_mapped_ptes);
1671
}
1672
1673
/* step 4: remove empty page table */
1674
if (!pml) {
1675
pml = pmd_lock(mm, pmd);
1676
if (ptl != pml) {
1677
spin_lock_nested(ptl, SINGLE_DEPTH_NESTING);
1678
if (unlikely(!pmd_same(pgt_pmd, pmdp_get_lockless(pmd)))) {
1679
flush_tlb_mm(mm);
1680
goto unlock;
1681
}
1682
}
1683
}
1684
pgt_pmd = pmdp_collapse_flush(vma, haddr, pmd);
1685
pmdp_get_lockless_sync();
1686
pte_unmap_unlock(start_pte, ptl);
1687
if (ptl != pml)
1688
spin_unlock(pml);
1689
1690
mmu_notifier_invalidate_range_end(&range);
1691
1692
mm_dec_nr_ptes(mm);
1693
page_table_check_pte_clear_range(mm, haddr, pgt_pmd);
1694
pte_free_defer(mm, pmd_pgtable(pgt_pmd));
1695
1696
maybe_install_pmd:
1697
/* step 5: install pmd entry */
1698
result = install_pmd
1699
? set_huge_pmd(vma, haddr, pmd, folio, &folio->page)
1700
: SCAN_SUCCEED;
1701
goto drop_folio;
1702
abort:
1703
if (nr_mapped_ptes) {
1704
flush_tlb_mm(mm);
1705
folio_ref_sub(folio, nr_mapped_ptes);
1706
add_mm_counter(mm, mm_counter_file(folio), -nr_mapped_ptes);
1707
}
1708
unlock:
1709
if (start_pte)
1710
pte_unmap_unlock(start_pte, ptl);
1711
if (pml && pml != ptl)
1712
spin_unlock(pml);
1713
if (notified)
1714
mmu_notifier_invalidate_range_end(&range);
1715
drop_folio:
1716
folio_unlock(folio);
1717
folio_put(folio);
1718
return result;
1719
}
1720
1721
static void retract_page_tables(struct address_space *mapping, pgoff_t pgoff)
1722
{
1723
struct vm_area_struct *vma;
1724
1725
i_mmap_lock_read(mapping);
1726
vma_interval_tree_foreach(vma, &mapping->i_mmap, pgoff, pgoff) {
1727
struct mmu_notifier_range range;
1728
struct mm_struct *mm;
1729
unsigned long addr;
1730
pmd_t *pmd, pgt_pmd;
1731
spinlock_t *pml;
1732
spinlock_t *ptl;
1733
bool success = false;
1734
1735
/*
1736
* Check vma->anon_vma to exclude MAP_PRIVATE mappings that
1737
* got written to. These VMAs are likely not worth removing
1738
* page tables from, as PMD-mapping is likely to be split later.
1739
*/
1740
if (READ_ONCE(vma->anon_vma))
1741
continue;
1742
1743
addr = vma->vm_start + ((pgoff - vma->vm_pgoff) << PAGE_SHIFT);
1744
if (addr & ~HPAGE_PMD_MASK ||
1745
vma->vm_end < addr + HPAGE_PMD_SIZE)
1746
continue;
1747
1748
mm = vma->vm_mm;
1749
if (find_pmd_or_thp_or_none(mm, addr, &pmd) != SCAN_SUCCEED)
1750
continue;
1751
1752
if (hpage_collapse_test_exit(mm))
1753
continue;
1754
/*
1755
* When a vma is registered with uffd-wp, we cannot recycle
1756
* the page table because there may be pte markers installed.
1757
* Other vmas can still have the same file mapped hugely, but
1758
* skip this one: it will always be mapped in small page size
1759
* for uffd-wp registered ranges.
1760
*/
1761
if (userfaultfd_wp(vma))
1762
continue;
1763
1764
/* PTEs were notified when unmapped; but now for the PMD? */
1765
mmu_notifier_range_init(&range, MMU_NOTIFY_CLEAR, 0, mm,
1766
addr, addr + HPAGE_PMD_SIZE);
1767
mmu_notifier_invalidate_range_start(&range);
1768
1769
pml = pmd_lock(mm, pmd);
1770
/*
1771
* The lock of new_folio is still held, we will be blocked in
1772
* the page fault path, which prevents the pte entries from
1773
* being set again. So even though the old empty PTE page may be
1774
* concurrently freed and a new PTE page is filled into the pmd
1775
* entry, it is still empty and can be removed.
1776
*
1777
* So here we only need to recheck if the state of pmd entry
1778
* still meets our requirements, rather than checking pmd_same()
1779
* like elsewhere.
1780
*/
1781
if (check_pmd_state(pmd) != SCAN_SUCCEED)
1782
goto drop_pml;
1783
ptl = pte_lockptr(mm, pmd);
1784
if (ptl != pml)
1785
spin_lock_nested(ptl, SINGLE_DEPTH_NESTING);
1786
1787
/*
1788
* Huge page lock is still held, so normally the page table
1789
* must remain empty; and we have already skipped anon_vma
1790
* and userfaultfd_wp() vmas. But since the mmap_lock is not
1791
* held, it is still possible for a racing userfaultfd_ioctl()
1792
* to have inserted ptes or markers. Now that we hold ptlock,
1793
* repeating the anon_vma check protects from one category,
1794
* and repeating the userfaultfd_wp() check from another.
1795
*/
1796
if (likely(!vma->anon_vma && !userfaultfd_wp(vma))) {
1797
pgt_pmd = pmdp_collapse_flush(vma, addr, pmd);
1798
pmdp_get_lockless_sync();
1799
success = true;
1800
}
1801
1802
if (ptl != pml)
1803
spin_unlock(ptl);
1804
drop_pml:
1805
spin_unlock(pml);
1806
1807
mmu_notifier_invalidate_range_end(&range);
1808
1809
if (success) {
1810
mm_dec_nr_ptes(mm);
1811
page_table_check_pte_clear_range(mm, addr, pgt_pmd);
1812
pte_free_defer(mm, pmd_pgtable(pgt_pmd));
1813
}
1814
}
1815
i_mmap_unlock_read(mapping);
1816
}
1817
1818
/**
1819
* collapse_file - collapse filemap/tmpfs/shmem pages into huge one.
1820
*
1821
* @mm: process address space where collapse happens
1822
* @addr: virtual collapse start address
1823
* @file: file that collapse on
1824
* @start: collapse start address
1825
* @cc: collapse context and scratchpad
1826
*
1827
* Basic scheme is simple, details are more complex:
1828
* - allocate and lock a new huge page;
1829
* - scan page cache, locking old pages
1830
* + swap/gup in pages if necessary;
1831
* - copy data to new page
1832
* - handle shmem holes
1833
* + re-validate that holes weren't filled by someone else
1834
* + check for userfaultfd
1835
* - finalize updates to the page cache;
1836
* - if replacing succeeds:
1837
* + unlock huge page;
1838
* + free old pages;
1839
* - if replacing failed;
1840
* + unlock old pages
1841
* + unlock and free huge page;
1842
*/
1843
static int collapse_file(struct mm_struct *mm, unsigned long addr,
1844
struct file *file, pgoff_t start,
1845
struct collapse_control *cc)
1846
{
1847
struct address_space *mapping = file->f_mapping;
1848
struct page *dst;
1849
struct folio *folio, *tmp, *new_folio;
1850
pgoff_t index = 0, end = start + HPAGE_PMD_NR;
1851
LIST_HEAD(pagelist);
1852
XA_STATE_ORDER(xas, &mapping->i_pages, start, HPAGE_PMD_ORDER);
1853
int nr_none = 0, result = SCAN_SUCCEED;
1854
bool is_shmem = shmem_file(file);
1855
1856
VM_BUG_ON(!IS_ENABLED(CONFIG_READ_ONLY_THP_FOR_FS) && !is_shmem);
1857
VM_BUG_ON(start & (HPAGE_PMD_NR - 1));
1858
1859
result = alloc_charge_folio(&new_folio, mm, cc);
1860
if (result != SCAN_SUCCEED)
1861
goto out;
1862
1863
mapping_set_update(&xas, mapping);
1864
1865
__folio_set_locked(new_folio);
1866
if (is_shmem)
1867
__folio_set_swapbacked(new_folio);
1868
new_folio->index = start;
1869
new_folio->mapping = mapping;
1870
1871
/*
1872
* Ensure we have slots for all the pages in the range. This is
1873
* almost certainly a no-op because most of the pages must be present
1874
*/
1875
do {
1876
xas_lock_irq(&xas);
1877
xas_create_range(&xas);
1878
if (!xas_error(&xas))
1879
break;
1880
xas_unlock_irq(&xas);
1881
if (!xas_nomem(&xas, GFP_KERNEL)) {
1882
result = SCAN_FAIL;
1883
goto rollback;
1884
}
1885
} while (1);
1886
1887
for (index = start; index < end;) {
1888
xas_set(&xas, index);
1889
folio = xas_load(&xas);
1890
1891
VM_BUG_ON(index != xas.xa_index);
1892
if (is_shmem) {
1893
if (!folio) {
1894
/*
1895
* Stop if extent has been truncated or
1896
* hole-punched, and is now completely
1897
* empty.
1898
*/
1899
if (index == start) {
1900
if (!xas_next_entry(&xas, end - 1)) {
1901
result = SCAN_TRUNCATED;
1902
goto xa_locked;
1903
}
1904
}
1905
nr_none++;
1906
index++;
1907
continue;
1908
}
1909
1910
if (xa_is_value(folio) || !folio_test_uptodate(folio)) {
1911
xas_unlock_irq(&xas);
1912
/* swap in or instantiate fallocated page */
1913
if (shmem_get_folio(mapping->host, index, 0,
1914
&folio, SGP_NOALLOC)) {
1915
result = SCAN_FAIL;
1916
goto xa_unlocked;
1917
}
1918
/* drain lru cache to help folio_isolate_lru() */
1919
lru_add_drain();
1920
} else if (folio_trylock(folio)) {
1921
folio_get(folio);
1922
xas_unlock_irq(&xas);
1923
} else {
1924
result = SCAN_PAGE_LOCK;
1925
goto xa_locked;
1926
}
1927
} else { /* !is_shmem */
1928
if (!folio || xa_is_value(folio)) {
1929
xas_unlock_irq(&xas);
1930
page_cache_sync_readahead(mapping, &file->f_ra,
1931
file, index,
1932
end - index);
1933
/* drain lru cache to help folio_isolate_lru() */
1934
lru_add_drain();
1935
folio = filemap_lock_folio(mapping, index);
1936
if (IS_ERR(folio)) {
1937
result = SCAN_FAIL;
1938
goto xa_unlocked;
1939
}
1940
} else if (folio_test_dirty(folio)) {
1941
/*
1942
* khugepaged only works on read-only fd,
1943
* so this page is dirty because it hasn't
1944
* been flushed since first write. There
1945
* won't be new dirty pages.
1946
*
1947
* Trigger async flush here and hope the
1948
* writeback is done when khugepaged
1949
* revisits this page.
1950
*
1951
* This is a one-off situation. We are not
1952
* forcing writeback in loop.
1953
*/
1954
xas_unlock_irq(&xas);
1955
filemap_flush(mapping);
1956
result = SCAN_FAIL;
1957
goto xa_unlocked;
1958
} else if (folio_test_writeback(folio)) {
1959
xas_unlock_irq(&xas);
1960
result = SCAN_FAIL;
1961
goto xa_unlocked;
1962
} else if (folio_trylock(folio)) {
1963
folio_get(folio);
1964
xas_unlock_irq(&xas);
1965
} else {
1966
result = SCAN_PAGE_LOCK;
1967
goto xa_locked;
1968
}
1969
}
1970
1971
/*
1972
* The folio must be locked, so we can drop the i_pages lock
1973
* without racing with truncate.
1974
*/
1975
VM_BUG_ON_FOLIO(!folio_test_locked(folio), folio);
1976
1977
/* make sure the folio is up to date */
1978
if (unlikely(!folio_test_uptodate(folio))) {
1979
result = SCAN_FAIL;
1980
goto out_unlock;
1981
}
1982
1983
/*
1984
* If file was truncated then extended, or hole-punched, before
1985
* we locked the first folio, then a THP might be there already.
1986
* This will be discovered on the first iteration.
1987
*/
1988
if (folio_order(folio) == HPAGE_PMD_ORDER &&
1989
folio->index == start) {
1990
/* Maybe PMD-mapped */
1991
result = SCAN_PTE_MAPPED_HUGEPAGE;
1992
goto out_unlock;
1993
}
1994
1995
if (folio_mapping(folio) != mapping) {
1996
result = SCAN_TRUNCATED;
1997
goto out_unlock;
1998
}
1999
2000
if (!is_shmem && (folio_test_dirty(folio) ||
2001
folio_test_writeback(folio))) {
2002
/*
2003
* khugepaged only works on read-only fd, so this
2004
* folio is dirty because it hasn't been flushed
2005
* since first write.
2006
*/
2007
result = SCAN_FAIL;
2008
goto out_unlock;
2009
}
2010
2011
if (!folio_isolate_lru(folio)) {
2012
result = SCAN_DEL_PAGE_LRU;
2013
goto out_unlock;
2014
}
2015
2016
if (!filemap_release_folio(folio, GFP_KERNEL)) {
2017
result = SCAN_PAGE_HAS_PRIVATE;
2018
folio_putback_lru(folio);
2019
goto out_unlock;
2020
}
2021
2022
if (folio_mapped(folio))
2023
try_to_unmap(folio,
2024
TTU_IGNORE_MLOCK | TTU_BATCH_FLUSH);
2025
2026
xas_lock_irq(&xas);
2027
2028
VM_BUG_ON_FOLIO(folio != xa_load(xas.xa, index), folio);
2029
2030
/*
2031
* We control 2 + nr_pages references to the folio:
2032
* - we hold a pin on it;
2033
* - nr_pages reference from page cache;
2034
* - one from lru_isolate_folio;
2035
* If those are the only references, then any new usage
2036
* of the folio will have to fetch it from the page
2037
* cache. That requires locking the folio to handle
2038
* truncate, so any new usage will be blocked until we
2039
* unlock folio after collapse/during rollback.
2040
*/
2041
if (folio_ref_count(folio) != 2 + folio_nr_pages(folio)) {
2042
result = SCAN_PAGE_COUNT;
2043
xas_unlock_irq(&xas);
2044
folio_putback_lru(folio);
2045
goto out_unlock;
2046
}
2047
2048
/*
2049
* Accumulate the folios that are being collapsed.
2050
*/
2051
list_add_tail(&folio->lru, &pagelist);
2052
index += folio_nr_pages(folio);
2053
continue;
2054
out_unlock:
2055
folio_unlock(folio);
2056
folio_put(folio);
2057
goto xa_unlocked;
2058
}
2059
2060
if (!is_shmem) {
2061
filemap_nr_thps_inc(mapping);
2062
/*
2063
* Paired with the fence in do_dentry_open() -> get_write_access()
2064
* to ensure i_writecount is up to date and the update to nr_thps
2065
* is visible. Ensures the page cache will be truncated if the
2066
* file is opened writable.
2067
*/
2068
smp_mb();
2069
if (inode_is_open_for_write(mapping->host)) {
2070
result = SCAN_FAIL;
2071
filemap_nr_thps_dec(mapping);
2072
}
2073
}
2074
2075
xa_locked:
2076
xas_unlock_irq(&xas);
2077
xa_unlocked:
2078
2079
/*
2080
* If collapse is successful, flush must be done now before copying.
2081
* If collapse is unsuccessful, does flush actually need to be done?
2082
* Do it anyway, to clear the state.
2083
*/
2084
try_to_unmap_flush();
2085
2086
if (result == SCAN_SUCCEED && nr_none &&
2087
!shmem_charge(mapping->host, nr_none))
2088
result = SCAN_FAIL;
2089
if (result != SCAN_SUCCEED) {
2090
nr_none = 0;
2091
goto rollback;
2092
}
2093
2094
/*
2095
* The old folios are locked, so they won't change anymore.
2096
*/
2097
index = start;
2098
dst = folio_page(new_folio, 0);
2099
list_for_each_entry(folio, &pagelist, lru) {
2100
int i, nr_pages = folio_nr_pages(folio);
2101
2102
while (index < folio->index) {
2103
clear_highpage(dst);
2104
index++;
2105
dst++;
2106
}
2107
2108
for (i = 0; i < nr_pages; i++) {
2109
if (copy_mc_highpage(dst, folio_page(folio, i)) > 0) {
2110
result = SCAN_COPY_MC;
2111
goto rollback;
2112
}
2113
index++;
2114
dst++;
2115
}
2116
}
2117
while (index < end) {
2118
clear_highpage(dst);
2119
index++;
2120
dst++;
2121
}
2122
2123
if (nr_none) {
2124
struct vm_area_struct *vma;
2125
int nr_none_check = 0;
2126
2127
i_mmap_lock_read(mapping);
2128
xas_lock_irq(&xas);
2129
2130
xas_set(&xas, start);
2131
for (index = start; index < end; index++) {
2132
if (!xas_next(&xas)) {
2133
xas_store(&xas, XA_RETRY_ENTRY);
2134
if (xas_error(&xas)) {
2135
result = SCAN_STORE_FAILED;
2136
goto immap_locked;
2137
}
2138
nr_none_check++;
2139
}
2140
}
2141
2142
if (nr_none != nr_none_check) {
2143
result = SCAN_PAGE_FILLED;
2144
goto immap_locked;
2145
}
2146
2147
/*
2148
* If userspace observed a missing page in a VMA with
2149
* a MODE_MISSING userfaultfd, then it might expect a
2150
* UFFD_EVENT_PAGEFAULT for that page. If so, we need to
2151
* roll back to avoid suppressing such an event. Since
2152
* wp/minor userfaultfds don't give userspace any
2153
* guarantees that the kernel doesn't fill a missing
2154
* page with a zero page, so they don't matter here.
2155
*
2156
* Any userfaultfds registered after this point will
2157
* not be able to observe any missing pages due to the
2158
* previously inserted retry entries.
2159
*/
2160
vma_interval_tree_foreach(vma, &mapping->i_mmap, start, end) {
2161
if (userfaultfd_missing(vma)) {
2162
result = SCAN_EXCEED_NONE_PTE;
2163
goto immap_locked;
2164
}
2165
}
2166
2167
immap_locked:
2168
i_mmap_unlock_read(mapping);
2169
if (result != SCAN_SUCCEED) {
2170
xas_set(&xas, start);
2171
for (index = start; index < end; index++) {
2172
if (xas_next(&xas) == XA_RETRY_ENTRY)
2173
xas_store(&xas, NULL);
2174
}
2175
2176
xas_unlock_irq(&xas);
2177
goto rollback;
2178
}
2179
} else {
2180
xas_lock_irq(&xas);
2181
}
2182
2183
if (is_shmem)
2184
__lruvec_stat_mod_folio(new_folio, NR_SHMEM_THPS, HPAGE_PMD_NR);
2185
else
2186
__lruvec_stat_mod_folio(new_folio, NR_FILE_THPS, HPAGE_PMD_NR);
2187
2188
if (nr_none) {
2189
__lruvec_stat_mod_folio(new_folio, NR_FILE_PAGES, nr_none);
2190
/* nr_none is always 0 for non-shmem. */
2191
__lruvec_stat_mod_folio(new_folio, NR_SHMEM, nr_none);
2192
}
2193
2194
/*
2195
* Mark new_folio as uptodate before inserting it into the
2196
* page cache so that it isn't mistaken for an fallocated but
2197
* unwritten page.
2198
*/
2199
folio_mark_uptodate(new_folio);
2200
folio_ref_add(new_folio, HPAGE_PMD_NR - 1);
2201
2202
if (is_shmem)
2203
folio_mark_dirty(new_folio);
2204
folio_add_lru(new_folio);
2205
2206
/* Join all the small entries into a single multi-index entry. */
2207
xas_set_order(&xas, start, HPAGE_PMD_ORDER);
2208
xas_store(&xas, new_folio);
2209
WARN_ON_ONCE(xas_error(&xas));
2210
xas_unlock_irq(&xas);
2211
2212
/*
2213
* Remove pte page tables, so we can re-fault the page as huge.
2214
* If MADV_COLLAPSE, adjust result to call collapse_pte_mapped_thp().
2215
*/
2216
retract_page_tables(mapping, start);
2217
if (cc && !cc->is_khugepaged)
2218
result = SCAN_PTE_MAPPED_HUGEPAGE;
2219
folio_unlock(new_folio);
2220
2221
/*
2222
* The collapse has succeeded, so free the old folios.
2223
*/
2224
list_for_each_entry_safe(folio, tmp, &pagelist, lru) {
2225
list_del(&folio->lru);
2226
folio->mapping = NULL;
2227
folio_clear_active(folio);
2228
folio_clear_unevictable(folio);
2229
folio_unlock(folio);
2230
folio_put_refs(folio, 2 + folio_nr_pages(folio));
2231
}
2232
2233
goto out;
2234
2235
rollback:
2236
/* Something went wrong: roll back page cache changes */
2237
if (nr_none) {
2238
xas_lock_irq(&xas);
2239
mapping->nrpages -= nr_none;
2240
xas_unlock_irq(&xas);
2241
shmem_uncharge(mapping->host, nr_none);
2242
}
2243
2244
list_for_each_entry_safe(folio, tmp, &pagelist, lru) {
2245
list_del(&folio->lru);
2246
folio_unlock(folio);
2247
folio_putback_lru(folio);
2248
folio_put(folio);
2249
}
2250
/*
2251
* Undo the updates of filemap_nr_thps_inc for non-SHMEM
2252
* file only. This undo is not needed unless failure is
2253
* due to SCAN_COPY_MC.
2254
*/
2255
if (!is_shmem && result == SCAN_COPY_MC) {
2256
filemap_nr_thps_dec(mapping);
2257
/*
2258
* Paired with the fence in do_dentry_open() -> get_write_access()
2259
* to ensure the update to nr_thps is visible.
2260
*/
2261
smp_mb();
2262
}
2263
2264
new_folio->mapping = NULL;
2265
2266
folio_unlock(new_folio);
2267
folio_put(new_folio);
2268
out:
2269
VM_BUG_ON(!list_empty(&pagelist));
2270
trace_mm_khugepaged_collapse_file(mm, new_folio, index, addr, is_shmem, file, HPAGE_PMD_NR, result);
2271
return result;
2272
}
2273
2274
static int hpage_collapse_scan_file(struct mm_struct *mm, unsigned long addr,
2275
struct file *file, pgoff_t start,
2276
struct collapse_control *cc)
2277
{
2278
struct folio *folio = NULL;
2279
struct address_space *mapping = file->f_mapping;
2280
XA_STATE(xas, &mapping->i_pages, start);
2281
int present, swap;
2282
int node = NUMA_NO_NODE;
2283
int result = SCAN_SUCCEED;
2284
2285
present = 0;
2286
swap = 0;
2287
memset(cc->node_load, 0, sizeof(cc->node_load));
2288
nodes_clear(cc->alloc_nmask);
2289
rcu_read_lock();
2290
xas_for_each(&xas, folio, start + HPAGE_PMD_NR - 1) {
2291
if (xas_retry(&xas, folio))
2292
continue;
2293
2294
if (xa_is_value(folio)) {
2295
swap += 1 << xas_get_order(&xas);
2296
if (cc->is_khugepaged &&
2297
swap > khugepaged_max_ptes_swap) {
2298
result = SCAN_EXCEED_SWAP_PTE;
2299
count_vm_event(THP_SCAN_EXCEED_SWAP_PTE);
2300
break;
2301
}
2302
continue;
2303
}
2304
2305
if (!folio_try_get(folio)) {
2306
xas_reset(&xas);
2307
continue;
2308
}
2309
2310
if (unlikely(folio != xas_reload(&xas))) {
2311
folio_put(folio);
2312
xas_reset(&xas);
2313
continue;
2314
}
2315
2316
if (folio_order(folio) == HPAGE_PMD_ORDER &&
2317
folio->index == start) {
2318
/* Maybe PMD-mapped */
2319
result = SCAN_PTE_MAPPED_HUGEPAGE;
2320
/*
2321
* For SCAN_PTE_MAPPED_HUGEPAGE, further processing
2322
* by the caller won't touch the page cache, and so
2323
* it's safe to skip LRU and refcount checks before
2324
* returning.
2325
*/
2326
folio_put(folio);
2327
break;
2328
}
2329
2330
node = folio_nid(folio);
2331
if (hpage_collapse_scan_abort(node, cc)) {
2332
result = SCAN_SCAN_ABORT;
2333
folio_put(folio);
2334
break;
2335
}
2336
cc->node_load[node]++;
2337
2338
if (!folio_test_lru(folio)) {
2339
result = SCAN_PAGE_LRU;
2340
folio_put(folio);
2341
break;
2342
}
2343
2344
if (folio_expected_ref_count(folio) + 1 != folio_ref_count(folio)) {
2345
result = SCAN_PAGE_COUNT;
2346
folio_put(folio);
2347
break;
2348
}
2349
2350
/*
2351
* We probably should check if the folio is referenced
2352
* here, but nobody would transfer pte_young() to
2353
* folio_test_referenced() for us. And rmap walk here
2354
* is just too costly...
2355
*/
2356
2357
present += folio_nr_pages(folio);
2358
folio_put(folio);
2359
2360
if (need_resched()) {
2361
xas_pause(&xas);
2362
cond_resched_rcu();
2363
}
2364
}
2365
rcu_read_unlock();
2366
2367
if (result == SCAN_SUCCEED) {
2368
if (cc->is_khugepaged &&
2369
present < HPAGE_PMD_NR - khugepaged_max_ptes_none) {
2370
result = SCAN_EXCEED_NONE_PTE;
2371
count_vm_event(THP_SCAN_EXCEED_NONE_PTE);
2372
} else {
2373
result = collapse_file(mm, addr, file, start, cc);
2374
}
2375
}
2376
2377
trace_mm_khugepaged_scan_file(mm, folio, file, present, swap, result);
2378
return result;
2379
}
2380
2381
static unsigned int khugepaged_scan_mm_slot(unsigned int pages, int *result,
2382
struct collapse_control *cc)
2383
__releases(&khugepaged_mm_lock)
2384
__acquires(&khugepaged_mm_lock)
2385
{
2386
struct vma_iterator vmi;
2387
struct mm_slot *slot;
2388
struct mm_struct *mm;
2389
struct vm_area_struct *vma;
2390
int progress = 0;
2391
2392
VM_BUG_ON(!pages);
2393
lockdep_assert_held(&khugepaged_mm_lock);
2394
*result = SCAN_FAIL;
2395
2396
if (khugepaged_scan.mm_slot) {
2397
slot = khugepaged_scan.mm_slot;
2398
} else {
2399
slot = list_first_entry(&khugepaged_scan.mm_head,
2400
struct mm_slot, mm_node);
2401
khugepaged_scan.address = 0;
2402
khugepaged_scan.mm_slot = slot;
2403
}
2404
spin_unlock(&khugepaged_mm_lock);
2405
2406
mm = slot->mm;
2407
/*
2408
* Don't wait for semaphore (to avoid long wait times). Just move to
2409
* the next mm on the list.
2410
*/
2411
vma = NULL;
2412
if (unlikely(!mmap_read_trylock(mm)))
2413
goto breakouterloop_mmap_lock;
2414
2415
progress++;
2416
if (unlikely(hpage_collapse_test_exit_or_disable(mm)))
2417
goto breakouterloop;
2418
2419
vma_iter_init(&vmi, mm, khugepaged_scan.address);
2420
for_each_vma(vmi, vma) {
2421
unsigned long hstart, hend;
2422
2423
cond_resched();
2424
if (unlikely(hpage_collapse_test_exit_or_disable(mm))) {
2425
progress++;
2426
break;
2427
}
2428
if (!thp_vma_allowable_order(vma, vma->vm_flags, TVA_KHUGEPAGED, PMD_ORDER)) {
2429
skip:
2430
progress++;
2431
continue;
2432
}
2433
hstart = round_up(vma->vm_start, HPAGE_PMD_SIZE);
2434
hend = round_down(vma->vm_end, HPAGE_PMD_SIZE);
2435
if (khugepaged_scan.address > hend)
2436
goto skip;
2437
if (khugepaged_scan.address < hstart)
2438
khugepaged_scan.address = hstart;
2439
VM_BUG_ON(khugepaged_scan.address & ~HPAGE_PMD_MASK);
2440
2441
while (khugepaged_scan.address < hend) {
2442
bool mmap_locked = true;
2443
2444
cond_resched();
2445
if (unlikely(hpage_collapse_test_exit_or_disable(mm)))
2446
goto breakouterloop;
2447
2448
VM_BUG_ON(khugepaged_scan.address < hstart ||
2449
khugepaged_scan.address + HPAGE_PMD_SIZE >
2450
hend);
2451
if (!vma_is_anonymous(vma)) {
2452
struct file *file = get_file(vma->vm_file);
2453
pgoff_t pgoff = linear_page_index(vma,
2454
khugepaged_scan.address);
2455
2456
mmap_read_unlock(mm);
2457
mmap_locked = false;
2458
*result = hpage_collapse_scan_file(mm,
2459
khugepaged_scan.address, file, pgoff, cc);
2460
fput(file);
2461
if (*result == SCAN_PTE_MAPPED_HUGEPAGE) {
2462
mmap_read_lock(mm);
2463
if (hpage_collapse_test_exit_or_disable(mm))
2464
goto breakouterloop;
2465
*result = collapse_pte_mapped_thp(mm,
2466
khugepaged_scan.address, false);
2467
if (*result == SCAN_PMD_MAPPED)
2468
*result = SCAN_SUCCEED;
2469
mmap_read_unlock(mm);
2470
}
2471
} else {
2472
*result = hpage_collapse_scan_pmd(mm, vma,
2473
khugepaged_scan.address, &mmap_locked, cc);
2474
}
2475
2476
if (*result == SCAN_SUCCEED)
2477
++khugepaged_pages_collapsed;
2478
2479
/* move to next address */
2480
khugepaged_scan.address += HPAGE_PMD_SIZE;
2481
progress += HPAGE_PMD_NR;
2482
if (!mmap_locked)
2483
/*
2484
* We released mmap_lock so break loop. Note
2485
* that we drop mmap_lock before all hugepage
2486
* allocations, so if allocation fails, we are
2487
* guaranteed to break here and report the
2488
* correct result back to caller.
2489
*/
2490
goto breakouterloop_mmap_lock;
2491
if (progress >= pages)
2492
goto breakouterloop;
2493
}
2494
}
2495
breakouterloop:
2496
mmap_read_unlock(mm); /* exit_mmap will destroy ptes after this */
2497
breakouterloop_mmap_lock:
2498
2499
spin_lock(&khugepaged_mm_lock);
2500
VM_BUG_ON(khugepaged_scan.mm_slot != slot);
2501
/*
2502
* Release the current mm_slot if this mm is about to die, or
2503
* if we scanned all vmas of this mm.
2504
*/
2505
if (hpage_collapse_test_exit(mm) || !vma) {
2506
/*
2507
* Make sure that if mm_users is reaching zero while
2508
* khugepaged runs here, khugepaged_exit will find
2509
* mm_slot not pointing to the exiting mm.
2510
*/
2511
if (!list_is_last(&slot->mm_node, &khugepaged_scan.mm_head)) {
2512
khugepaged_scan.mm_slot = list_next_entry(slot, mm_node);
2513
khugepaged_scan.address = 0;
2514
} else {
2515
khugepaged_scan.mm_slot = NULL;
2516
khugepaged_full_scans++;
2517
}
2518
2519
collect_mm_slot(slot);
2520
}
2521
2522
return progress;
2523
}
2524
2525
static int khugepaged_has_work(void)
2526
{
2527
return !list_empty(&khugepaged_scan.mm_head) && hugepage_pmd_enabled();
2528
}
2529
2530
static int khugepaged_wait_event(void)
2531
{
2532
return !list_empty(&khugepaged_scan.mm_head) ||
2533
kthread_should_stop();
2534
}
2535
2536
static void khugepaged_do_scan(struct collapse_control *cc)
2537
{
2538
unsigned int progress = 0, pass_through_head = 0;
2539
unsigned int pages = READ_ONCE(khugepaged_pages_to_scan);
2540
bool wait = true;
2541
int result = SCAN_SUCCEED;
2542
2543
lru_add_drain_all();
2544
2545
while (true) {
2546
cond_resched();
2547
2548
if (unlikely(kthread_should_stop()))
2549
break;
2550
2551
spin_lock(&khugepaged_mm_lock);
2552
if (!khugepaged_scan.mm_slot)
2553
pass_through_head++;
2554
if (khugepaged_has_work() &&
2555
pass_through_head < 2)
2556
progress += khugepaged_scan_mm_slot(pages - progress,
2557
&result, cc);
2558
else
2559
progress = pages;
2560
spin_unlock(&khugepaged_mm_lock);
2561
2562
if (progress >= pages)
2563
break;
2564
2565
if (result == SCAN_ALLOC_HUGE_PAGE_FAIL) {
2566
/*
2567
* If fail to allocate the first time, try to sleep for
2568
* a while. When hit again, cancel the scan.
2569
*/
2570
if (!wait)
2571
break;
2572
wait = false;
2573
khugepaged_alloc_sleep();
2574
}
2575
}
2576
}
2577
2578
static bool khugepaged_should_wakeup(void)
2579
{
2580
return kthread_should_stop() ||
2581
time_after_eq(jiffies, khugepaged_sleep_expire);
2582
}
2583
2584
static void khugepaged_wait_work(void)
2585
{
2586
if (khugepaged_has_work()) {
2587
const unsigned long scan_sleep_jiffies =
2588
msecs_to_jiffies(khugepaged_scan_sleep_millisecs);
2589
2590
if (!scan_sleep_jiffies)
2591
return;
2592
2593
khugepaged_sleep_expire = jiffies + scan_sleep_jiffies;
2594
wait_event_freezable_timeout(khugepaged_wait,
2595
khugepaged_should_wakeup(),
2596
scan_sleep_jiffies);
2597
return;
2598
}
2599
2600
if (hugepage_pmd_enabled())
2601
wait_event_freezable(khugepaged_wait, khugepaged_wait_event());
2602
}
2603
2604
static int khugepaged(void *none)
2605
{
2606
struct mm_slot *slot;
2607
2608
set_freezable();
2609
set_user_nice(current, MAX_NICE);
2610
2611
while (!kthread_should_stop()) {
2612
khugepaged_do_scan(&khugepaged_collapse_control);
2613
khugepaged_wait_work();
2614
}
2615
2616
spin_lock(&khugepaged_mm_lock);
2617
slot = khugepaged_scan.mm_slot;
2618
khugepaged_scan.mm_slot = NULL;
2619
if (slot)
2620
collect_mm_slot(slot);
2621
spin_unlock(&khugepaged_mm_lock);
2622
return 0;
2623
}
2624
2625
static void set_recommended_min_free_kbytes(void)
2626
{
2627
struct zone *zone;
2628
int nr_zones = 0;
2629
unsigned long recommended_min;
2630
2631
if (!hugepage_pmd_enabled()) {
2632
calculate_min_free_kbytes();
2633
goto update_wmarks;
2634
}
2635
2636
for_each_populated_zone(zone) {
2637
/*
2638
* We don't need to worry about fragmentation of
2639
* ZONE_MOVABLE since it only has movable pages.
2640
*/
2641
if (zone_idx(zone) > gfp_zone(GFP_USER))
2642
continue;
2643
2644
nr_zones++;
2645
}
2646
2647
/* Ensure 2 pageblocks are free to assist fragmentation avoidance */
2648
recommended_min = pageblock_nr_pages * nr_zones * 2;
2649
2650
/*
2651
* Make sure that on average at least two pageblocks are almost free
2652
* of another type, one for a migratetype to fall back to and a
2653
* second to avoid subsequent fallbacks of other types There are 3
2654
* MIGRATE_TYPES we care about.
2655
*/
2656
recommended_min += pageblock_nr_pages * nr_zones *
2657
MIGRATE_PCPTYPES * MIGRATE_PCPTYPES;
2658
2659
/* don't ever allow to reserve more than 5% of the lowmem */
2660
recommended_min = min(recommended_min,
2661
(unsigned long) nr_free_buffer_pages() / 20);
2662
recommended_min <<= (PAGE_SHIFT-10);
2663
2664
if (recommended_min > min_free_kbytes) {
2665
if (user_min_free_kbytes >= 0)
2666
pr_info("raising min_free_kbytes from %d to %lu to help transparent hugepage allocations\n",
2667
min_free_kbytes, recommended_min);
2668
2669
min_free_kbytes = recommended_min;
2670
}
2671
2672
update_wmarks:
2673
setup_per_zone_wmarks();
2674
}
2675
2676
int start_stop_khugepaged(void)
2677
{
2678
int err = 0;
2679
2680
mutex_lock(&khugepaged_mutex);
2681
if (hugepage_pmd_enabled()) {
2682
if (!khugepaged_thread)
2683
khugepaged_thread = kthread_run(khugepaged, NULL,
2684
"khugepaged");
2685
if (IS_ERR(khugepaged_thread)) {
2686
pr_err("khugepaged: kthread_run(khugepaged) failed\n");
2687
err = PTR_ERR(khugepaged_thread);
2688
khugepaged_thread = NULL;
2689
goto fail;
2690
}
2691
2692
if (!list_empty(&khugepaged_scan.mm_head))
2693
wake_up_interruptible(&khugepaged_wait);
2694
} else if (khugepaged_thread) {
2695
kthread_stop(khugepaged_thread);
2696
khugepaged_thread = NULL;
2697
}
2698
set_recommended_min_free_kbytes();
2699
fail:
2700
mutex_unlock(&khugepaged_mutex);
2701
return err;
2702
}
2703
2704
void khugepaged_min_free_kbytes_update(void)
2705
{
2706
mutex_lock(&khugepaged_mutex);
2707
if (hugepage_pmd_enabled() && khugepaged_thread)
2708
set_recommended_min_free_kbytes();
2709
mutex_unlock(&khugepaged_mutex);
2710
}
2711
2712
bool current_is_khugepaged(void)
2713
{
2714
return kthread_func(current) == khugepaged;
2715
}
2716
2717
static int madvise_collapse_errno(enum scan_result r)
2718
{
2719
/*
2720
* MADV_COLLAPSE breaks from existing madvise(2) conventions to provide
2721
* actionable feedback to caller, so they may take an appropriate
2722
* fallback measure depending on the nature of the failure.
2723
*/
2724
switch (r) {
2725
case SCAN_ALLOC_HUGE_PAGE_FAIL:
2726
return -ENOMEM;
2727
case SCAN_CGROUP_CHARGE_FAIL:
2728
case SCAN_EXCEED_NONE_PTE:
2729
return -EBUSY;
2730
/* Resource temporary unavailable - trying again might succeed */
2731
case SCAN_PAGE_COUNT:
2732
case SCAN_PAGE_LOCK:
2733
case SCAN_PAGE_LRU:
2734
case SCAN_DEL_PAGE_LRU:
2735
case SCAN_PAGE_FILLED:
2736
return -EAGAIN;
2737
/*
2738
* Other: Trying again likely not to succeed / error intrinsic to
2739
* specified memory range. khugepaged likely won't be able to collapse
2740
* either.
2741
*/
2742
default:
2743
return -EINVAL;
2744
}
2745
}
2746
2747
int madvise_collapse(struct vm_area_struct *vma, unsigned long start,
2748
unsigned long end, bool *lock_dropped)
2749
{
2750
struct collapse_control *cc;
2751
struct mm_struct *mm = vma->vm_mm;
2752
unsigned long hstart, hend, addr;
2753
int thps = 0, last_fail = SCAN_FAIL;
2754
bool mmap_locked = true;
2755
2756
BUG_ON(vma->vm_start > start);
2757
BUG_ON(vma->vm_end < end);
2758
2759
if (!thp_vma_allowable_order(vma, vma->vm_flags, TVA_FORCED_COLLAPSE, PMD_ORDER))
2760
return -EINVAL;
2761
2762
cc = kmalloc(sizeof(*cc), GFP_KERNEL);
2763
if (!cc)
2764
return -ENOMEM;
2765
cc->is_khugepaged = false;
2766
2767
mmgrab(mm);
2768
lru_add_drain_all();
2769
2770
hstart = (start + ~HPAGE_PMD_MASK) & HPAGE_PMD_MASK;
2771
hend = end & HPAGE_PMD_MASK;
2772
2773
for (addr = hstart; addr < hend; addr += HPAGE_PMD_SIZE) {
2774
int result = SCAN_FAIL;
2775
2776
if (!mmap_locked) {
2777
cond_resched();
2778
mmap_read_lock(mm);
2779
mmap_locked = true;
2780
result = hugepage_vma_revalidate(mm, addr, false, &vma,
2781
cc);
2782
if (result != SCAN_SUCCEED) {
2783
last_fail = result;
2784
goto out_nolock;
2785
}
2786
2787
hend = min(hend, vma->vm_end & HPAGE_PMD_MASK);
2788
}
2789
mmap_assert_locked(mm);
2790
memset(cc->node_load, 0, sizeof(cc->node_load));
2791
nodes_clear(cc->alloc_nmask);
2792
if (!vma_is_anonymous(vma)) {
2793
struct file *file = get_file(vma->vm_file);
2794
pgoff_t pgoff = linear_page_index(vma, addr);
2795
2796
mmap_read_unlock(mm);
2797
mmap_locked = false;
2798
result = hpage_collapse_scan_file(mm, addr, file, pgoff,
2799
cc);
2800
fput(file);
2801
} else {
2802
result = hpage_collapse_scan_pmd(mm, vma, addr,
2803
&mmap_locked, cc);
2804
}
2805
if (!mmap_locked)
2806
*lock_dropped = true;
2807
2808
handle_result:
2809
switch (result) {
2810
case SCAN_SUCCEED:
2811
case SCAN_PMD_MAPPED:
2812
++thps;
2813
break;
2814
case SCAN_PTE_MAPPED_HUGEPAGE:
2815
BUG_ON(mmap_locked);
2816
mmap_read_lock(mm);
2817
result = collapse_pte_mapped_thp(mm, addr, true);
2818
mmap_read_unlock(mm);
2819
goto handle_result;
2820
/* Whitelisted set of results where continuing OK */
2821
case SCAN_PMD_NULL:
2822
case SCAN_PTE_NON_PRESENT:
2823
case SCAN_PTE_UFFD_WP:
2824
case SCAN_LACK_REFERENCED_PAGE:
2825
case SCAN_PAGE_NULL:
2826
case SCAN_PAGE_COUNT:
2827
case SCAN_PAGE_LOCK:
2828
case SCAN_PAGE_COMPOUND:
2829
case SCAN_PAGE_LRU:
2830
case SCAN_DEL_PAGE_LRU:
2831
last_fail = result;
2832
break;
2833
default:
2834
last_fail = result;
2835
/* Other error, exit */
2836
goto out_maybelock;
2837
}
2838
}
2839
2840
out_maybelock:
2841
/* Caller expects us to hold mmap_lock on return */
2842
if (!mmap_locked)
2843
mmap_read_lock(mm);
2844
out_nolock:
2845
mmap_assert_locked(mm);
2846
mmdrop(mm);
2847
kfree(cc);
2848
2849
return thps == ((hend - hstart) >> HPAGE_PMD_SHIFT) ? 0
2850
: madvise_collapse_errno(last_fail);
2851
}
2852
2853