Book a Demo!
CoCalc Logo Icon
StoreFeaturesDocsShareSupportNewsAboutPoliciesSign UpSign In
torvalds
GitHub Repository: torvalds/linux
Path: blob/master/mm/hugetlb.c
29264 views
1
// SPDX-License-Identifier: GPL-2.0-only
2
/*
3
* Generic hugetlb support.
4
* (C) Nadia Yvette Chambers, April 2004
5
*/
6
#include <linux/list.h>
7
#include <linux/init.h>
8
#include <linux/mm.h>
9
#include <linux/seq_file.h>
10
#include <linux/sysctl.h>
11
#include <linux/highmem.h>
12
#include <linux/mmu_notifier.h>
13
#include <linux/nodemask.h>
14
#include <linux/pagemap.h>
15
#include <linux/mempolicy.h>
16
#include <linux/compiler.h>
17
#include <linux/cpumask.h>
18
#include <linux/cpuset.h>
19
#include <linux/mutex.h>
20
#include <linux/memblock.h>
21
#include <linux/minmax.h>
22
#include <linux/sysfs.h>
23
#include <linux/slab.h>
24
#include <linux/sched/mm.h>
25
#include <linux/mmdebug.h>
26
#include <linux/sched/signal.h>
27
#include <linux/rmap.h>
28
#include <linux/string_choices.h>
29
#include <linux/string_helpers.h>
30
#include <linux/swap.h>
31
#include <linux/swapops.h>
32
#include <linux/jhash.h>
33
#include <linux/numa.h>
34
#include <linux/llist.h>
35
#include <linux/cma.h>
36
#include <linux/migrate.h>
37
#include <linux/nospec.h>
38
#include <linux/delayacct.h>
39
#include <linux/memory.h>
40
#include <linux/mm_inline.h>
41
#include <linux/padata.h>
42
43
#include <asm/page.h>
44
#include <asm/pgalloc.h>
45
#include <asm/tlb.h>
46
#include <asm/setup.h>
47
48
#include <linux/io.h>
49
#include <linux/hugetlb.h>
50
#include <linux/hugetlb_cgroup.h>
51
#include <linux/node.h>
52
#include <linux/page_owner.h>
53
#include "internal.h"
54
#include "hugetlb_vmemmap.h"
55
#include "hugetlb_cma.h"
56
#include <linux/page-isolation.h>
57
58
int hugetlb_max_hstate __read_mostly;
59
unsigned int default_hstate_idx;
60
struct hstate hstates[HUGE_MAX_HSTATE];
61
62
__initdata nodemask_t hugetlb_bootmem_nodes;
63
__initdata struct list_head huge_boot_pages[MAX_NUMNODES];
64
static unsigned long hstate_boot_nrinvalid[HUGE_MAX_HSTATE] __initdata;
65
66
/*
67
* Due to ordering constraints across the init code for various
68
* architectures, hugetlb hstate cmdline parameters can't simply
69
* be early_param. early_param might call the setup function
70
* before valid hugetlb page sizes are determined, leading to
71
* incorrect rejection of valid hugepagesz= options.
72
*
73
* So, record the parameters early and consume them whenever the
74
* init code is ready for them, by calling hugetlb_parse_params().
75
*/
76
77
/* one (hugepagesz=,hugepages=) pair per hstate, one default_hugepagesz */
78
#define HUGE_MAX_CMDLINE_ARGS (2 * HUGE_MAX_HSTATE + 1)
79
struct hugetlb_cmdline {
80
char *val;
81
int (*setup)(char *val);
82
};
83
84
/* for command line parsing */
85
static struct hstate * __initdata parsed_hstate;
86
static unsigned long __initdata default_hstate_max_huge_pages;
87
static bool __initdata parsed_valid_hugepagesz = true;
88
static bool __initdata parsed_default_hugepagesz;
89
static unsigned int default_hugepages_in_node[MAX_NUMNODES] __initdata;
90
static unsigned long hugepage_allocation_threads __initdata;
91
92
static char hstate_cmdline_buf[COMMAND_LINE_SIZE] __initdata;
93
static int hstate_cmdline_index __initdata;
94
static struct hugetlb_cmdline hugetlb_params[HUGE_MAX_CMDLINE_ARGS] __initdata;
95
static int hugetlb_param_index __initdata;
96
static __init int hugetlb_add_param(char *s, int (*setup)(char *val));
97
static __init void hugetlb_parse_params(void);
98
99
#define hugetlb_early_param(str, func) \
100
static __init int func##args(char *s) \
101
{ \
102
return hugetlb_add_param(s, func); \
103
} \
104
early_param(str, func##args)
105
106
/*
107
* Protects updates to hugepage_freelists, hugepage_activelist, nr_huge_pages,
108
* free_huge_pages, and surplus_huge_pages.
109
*/
110
__cacheline_aligned_in_smp DEFINE_SPINLOCK(hugetlb_lock);
111
112
/*
113
* Serializes faults on the same logical page. This is used to
114
* prevent spurious OOMs when the hugepage pool is fully utilized.
115
*/
116
static int num_fault_mutexes __ro_after_init;
117
struct mutex *hugetlb_fault_mutex_table __ro_after_init;
118
119
/* Forward declaration */
120
static int hugetlb_acct_memory(struct hstate *h, long delta);
121
static void hugetlb_vma_lock_free(struct vm_area_struct *vma);
122
static void hugetlb_vma_lock_alloc(struct vm_area_struct *vma);
123
static void __hugetlb_vma_unlock_write_free(struct vm_area_struct *vma);
124
static void hugetlb_unshare_pmds(struct vm_area_struct *vma,
125
unsigned long start, unsigned long end, bool take_locks);
126
static struct resv_map *vma_resv_map(struct vm_area_struct *vma);
127
128
static void hugetlb_free_folio(struct folio *folio)
129
{
130
if (folio_test_hugetlb_cma(folio)) {
131
hugetlb_cma_free_folio(folio);
132
return;
133
}
134
135
folio_put(folio);
136
}
137
138
static inline bool subpool_is_free(struct hugepage_subpool *spool)
139
{
140
if (spool->count)
141
return false;
142
if (spool->max_hpages != -1)
143
return spool->used_hpages == 0;
144
if (spool->min_hpages != -1)
145
return spool->rsv_hpages == spool->min_hpages;
146
147
return true;
148
}
149
150
static inline void unlock_or_release_subpool(struct hugepage_subpool *spool,
151
unsigned long irq_flags)
152
{
153
spin_unlock_irqrestore(&spool->lock, irq_flags);
154
155
/* If no pages are used, and no other handles to the subpool
156
* remain, give up any reservations based on minimum size and
157
* free the subpool */
158
if (subpool_is_free(spool)) {
159
if (spool->min_hpages != -1)
160
hugetlb_acct_memory(spool->hstate,
161
-spool->min_hpages);
162
kfree(spool);
163
}
164
}
165
166
struct hugepage_subpool *hugepage_new_subpool(struct hstate *h, long max_hpages,
167
long min_hpages)
168
{
169
struct hugepage_subpool *spool;
170
171
spool = kzalloc(sizeof(*spool), GFP_KERNEL);
172
if (!spool)
173
return NULL;
174
175
spin_lock_init(&spool->lock);
176
spool->count = 1;
177
spool->max_hpages = max_hpages;
178
spool->hstate = h;
179
spool->min_hpages = min_hpages;
180
181
if (min_hpages != -1 && hugetlb_acct_memory(h, min_hpages)) {
182
kfree(spool);
183
return NULL;
184
}
185
spool->rsv_hpages = min_hpages;
186
187
return spool;
188
}
189
190
void hugepage_put_subpool(struct hugepage_subpool *spool)
191
{
192
unsigned long flags;
193
194
spin_lock_irqsave(&spool->lock, flags);
195
BUG_ON(!spool->count);
196
spool->count--;
197
unlock_or_release_subpool(spool, flags);
198
}
199
200
/*
201
* Subpool accounting for allocating and reserving pages.
202
* Return -ENOMEM if there are not enough resources to satisfy the
203
* request. Otherwise, return the number of pages by which the
204
* global pools must be adjusted (upward). The returned value may
205
* only be different than the passed value (delta) in the case where
206
* a subpool minimum size must be maintained.
207
*/
208
static long hugepage_subpool_get_pages(struct hugepage_subpool *spool,
209
long delta)
210
{
211
long ret = delta;
212
213
if (!spool)
214
return ret;
215
216
spin_lock_irq(&spool->lock);
217
218
if (spool->max_hpages != -1) { /* maximum size accounting */
219
if ((spool->used_hpages + delta) <= spool->max_hpages)
220
spool->used_hpages += delta;
221
else {
222
ret = -ENOMEM;
223
goto unlock_ret;
224
}
225
}
226
227
/* minimum size accounting */
228
if (spool->min_hpages != -1 && spool->rsv_hpages) {
229
if (delta > spool->rsv_hpages) {
230
/*
231
* Asking for more reserves than those already taken on
232
* behalf of subpool. Return difference.
233
*/
234
ret = delta - spool->rsv_hpages;
235
spool->rsv_hpages = 0;
236
} else {
237
ret = 0; /* reserves already accounted for */
238
spool->rsv_hpages -= delta;
239
}
240
}
241
242
unlock_ret:
243
spin_unlock_irq(&spool->lock);
244
return ret;
245
}
246
247
/*
248
* Subpool accounting for freeing and unreserving pages.
249
* Return the number of global page reservations that must be dropped.
250
* The return value may only be different than the passed value (delta)
251
* in the case where a subpool minimum size must be maintained.
252
*/
253
static long hugepage_subpool_put_pages(struct hugepage_subpool *spool,
254
long delta)
255
{
256
long ret = delta;
257
unsigned long flags;
258
259
if (!spool)
260
return delta;
261
262
spin_lock_irqsave(&spool->lock, flags);
263
264
if (spool->max_hpages != -1) /* maximum size accounting */
265
spool->used_hpages -= delta;
266
267
/* minimum size accounting */
268
if (spool->min_hpages != -1 && spool->used_hpages < spool->min_hpages) {
269
if (spool->rsv_hpages + delta <= spool->min_hpages)
270
ret = 0;
271
else
272
ret = spool->rsv_hpages + delta - spool->min_hpages;
273
274
spool->rsv_hpages += delta;
275
if (spool->rsv_hpages > spool->min_hpages)
276
spool->rsv_hpages = spool->min_hpages;
277
}
278
279
/*
280
* If hugetlbfs_put_super couldn't free spool due to an outstanding
281
* quota reference, free it now.
282
*/
283
unlock_or_release_subpool(spool, flags);
284
285
return ret;
286
}
287
288
static inline struct hugepage_subpool *subpool_vma(struct vm_area_struct *vma)
289
{
290
return subpool_inode(file_inode(vma->vm_file));
291
}
292
293
/*
294
* hugetlb vma_lock helper routines
295
*/
296
void hugetlb_vma_lock_read(struct vm_area_struct *vma)
297
{
298
if (__vma_shareable_lock(vma)) {
299
struct hugetlb_vma_lock *vma_lock = vma->vm_private_data;
300
301
down_read(&vma_lock->rw_sema);
302
} else if (__vma_private_lock(vma)) {
303
struct resv_map *resv_map = vma_resv_map(vma);
304
305
down_read(&resv_map->rw_sema);
306
}
307
}
308
309
void hugetlb_vma_unlock_read(struct vm_area_struct *vma)
310
{
311
if (__vma_shareable_lock(vma)) {
312
struct hugetlb_vma_lock *vma_lock = vma->vm_private_data;
313
314
up_read(&vma_lock->rw_sema);
315
} else if (__vma_private_lock(vma)) {
316
struct resv_map *resv_map = vma_resv_map(vma);
317
318
up_read(&resv_map->rw_sema);
319
}
320
}
321
322
void hugetlb_vma_lock_write(struct vm_area_struct *vma)
323
{
324
if (__vma_shareable_lock(vma)) {
325
struct hugetlb_vma_lock *vma_lock = vma->vm_private_data;
326
327
down_write(&vma_lock->rw_sema);
328
} else if (__vma_private_lock(vma)) {
329
struct resv_map *resv_map = vma_resv_map(vma);
330
331
down_write(&resv_map->rw_sema);
332
}
333
}
334
335
void hugetlb_vma_unlock_write(struct vm_area_struct *vma)
336
{
337
if (__vma_shareable_lock(vma)) {
338
struct hugetlb_vma_lock *vma_lock = vma->vm_private_data;
339
340
up_write(&vma_lock->rw_sema);
341
} else if (__vma_private_lock(vma)) {
342
struct resv_map *resv_map = vma_resv_map(vma);
343
344
up_write(&resv_map->rw_sema);
345
}
346
}
347
348
int hugetlb_vma_trylock_write(struct vm_area_struct *vma)
349
{
350
351
if (__vma_shareable_lock(vma)) {
352
struct hugetlb_vma_lock *vma_lock = vma->vm_private_data;
353
354
return down_write_trylock(&vma_lock->rw_sema);
355
} else if (__vma_private_lock(vma)) {
356
struct resv_map *resv_map = vma_resv_map(vma);
357
358
return down_write_trylock(&resv_map->rw_sema);
359
}
360
361
return 1;
362
}
363
364
void hugetlb_vma_assert_locked(struct vm_area_struct *vma)
365
{
366
if (__vma_shareable_lock(vma)) {
367
struct hugetlb_vma_lock *vma_lock = vma->vm_private_data;
368
369
lockdep_assert_held(&vma_lock->rw_sema);
370
} else if (__vma_private_lock(vma)) {
371
struct resv_map *resv_map = vma_resv_map(vma);
372
373
lockdep_assert_held(&resv_map->rw_sema);
374
}
375
}
376
377
void hugetlb_vma_lock_release(struct kref *kref)
378
{
379
struct hugetlb_vma_lock *vma_lock = container_of(kref,
380
struct hugetlb_vma_lock, refs);
381
382
kfree(vma_lock);
383
}
384
385
static void __hugetlb_vma_unlock_write_put(struct hugetlb_vma_lock *vma_lock)
386
{
387
struct vm_area_struct *vma = vma_lock->vma;
388
389
/*
390
* vma_lock structure may or not be released as a result of put,
391
* it certainly will no longer be attached to vma so clear pointer.
392
* Semaphore synchronizes access to vma_lock->vma field.
393
*/
394
vma_lock->vma = NULL;
395
vma->vm_private_data = NULL;
396
up_write(&vma_lock->rw_sema);
397
kref_put(&vma_lock->refs, hugetlb_vma_lock_release);
398
}
399
400
static void __hugetlb_vma_unlock_write_free(struct vm_area_struct *vma)
401
{
402
if (__vma_shareable_lock(vma)) {
403
struct hugetlb_vma_lock *vma_lock = vma->vm_private_data;
404
405
__hugetlb_vma_unlock_write_put(vma_lock);
406
} else if (__vma_private_lock(vma)) {
407
struct resv_map *resv_map = vma_resv_map(vma);
408
409
/* no free for anon vmas, but still need to unlock */
410
up_write(&resv_map->rw_sema);
411
}
412
}
413
414
static void hugetlb_vma_lock_free(struct vm_area_struct *vma)
415
{
416
/*
417
* Only present in sharable vmas.
418
*/
419
if (!vma || !__vma_shareable_lock(vma))
420
return;
421
422
if (vma->vm_private_data) {
423
struct hugetlb_vma_lock *vma_lock = vma->vm_private_data;
424
425
down_write(&vma_lock->rw_sema);
426
__hugetlb_vma_unlock_write_put(vma_lock);
427
}
428
}
429
430
static void hugetlb_vma_lock_alloc(struct vm_area_struct *vma)
431
{
432
struct hugetlb_vma_lock *vma_lock;
433
434
/* Only establish in (flags) sharable vmas */
435
if (!vma || !(vma->vm_flags & VM_MAYSHARE))
436
return;
437
438
/* Should never get here with non-NULL vm_private_data */
439
if (vma->vm_private_data)
440
return;
441
442
vma_lock = kmalloc(sizeof(*vma_lock), GFP_KERNEL);
443
if (!vma_lock) {
444
/*
445
* If we can not allocate structure, then vma can not
446
* participate in pmd sharing. This is only a possible
447
* performance enhancement and memory saving issue.
448
* However, the lock is also used to synchronize page
449
* faults with truncation. If the lock is not present,
450
* unlikely races could leave pages in a file past i_size
451
* until the file is removed. Warn in the unlikely case of
452
* allocation failure.
453
*/
454
pr_warn_once("HugeTLB: unable to allocate vma specific lock\n");
455
return;
456
}
457
458
kref_init(&vma_lock->refs);
459
init_rwsem(&vma_lock->rw_sema);
460
vma_lock->vma = vma;
461
vma->vm_private_data = vma_lock;
462
}
463
464
/* Helper that removes a struct file_region from the resv_map cache and returns
465
* it for use.
466
*/
467
static struct file_region *
468
get_file_region_entry_from_cache(struct resv_map *resv, long from, long to)
469
{
470
struct file_region *nrg;
471
472
VM_BUG_ON(resv->region_cache_count <= 0);
473
474
resv->region_cache_count--;
475
nrg = list_first_entry(&resv->region_cache, struct file_region, link);
476
list_del(&nrg->link);
477
478
nrg->from = from;
479
nrg->to = to;
480
481
return nrg;
482
}
483
484
static void copy_hugetlb_cgroup_uncharge_info(struct file_region *nrg,
485
struct file_region *rg)
486
{
487
#ifdef CONFIG_CGROUP_HUGETLB
488
nrg->reservation_counter = rg->reservation_counter;
489
nrg->css = rg->css;
490
if (rg->css)
491
css_get(rg->css);
492
#endif
493
}
494
495
/* Helper that records hugetlb_cgroup uncharge info. */
496
static void record_hugetlb_cgroup_uncharge_info(struct hugetlb_cgroup *h_cg,
497
struct hstate *h,
498
struct resv_map *resv,
499
struct file_region *nrg)
500
{
501
#ifdef CONFIG_CGROUP_HUGETLB
502
if (h_cg) {
503
nrg->reservation_counter =
504
&h_cg->rsvd_hugepage[hstate_index(h)];
505
nrg->css = &h_cg->css;
506
/*
507
* The caller will hold exactly one h_cg->css reference for the
508
* whole contiguous reservation region. But this area might be
509
* scattered when there are already some file_regions reside in
510
* it. As a result, many file_regions may share only one css
511
* reference. In order to ensure that one file_region must hold
512
* exactly one h_cg->css reference, we should do css_get for
513
* each file_region and leave the reference held by caller
514
* untouched.
515
*/
516
css_get(&h_cg->css);
517
if (!resv->pages_per_hpage)
518
resv->pages_per_hpage = pages_per_huge_page(h);
519
/* pages_per_hpage should be the same for all entries in
520
* a resv_map.
521
*/
522
VM_BUG_ON(resv->pages_per_hpage != pages_per_huge_page(h));
523
} else {
524
nrg->reservation_counter = NULL;
525
nrg->css = NULL;
526
}
527
#endif
528
}
529
530
static void put_uncharge_info(struct file_region *rg)
531
{
532
#ifdef CONFIG_CGROUP_HUGETLB
533
if (rg->css)
534
css_put(rg->css);
535
#endif
536
}
537
538
static bool has_same_uncharge_info(struct file_region *rg,
539
struct file_region *org)
540
{
541
#ifdef CONFIG_CGROUP_HUGETLB
542
return rg->reservation_counter == org->reservation_counter &&
543
rg->css == org->css;
544
545
#else
546
return true;
547
#endif
548
}
549
550
static void coalesce_file_region(struct resv_map *resv, struct file_region *rg)
551
{
552
struct file_region *nrg, *prg;
553
554
prg = list_prev_entry(rg, link);
555
if (&prg->link != &resv->regions && prg->to == rg->from &&
556
has_same_uncharge_info(prg, rg)) {
557
prg->to = rg->to;
558
559
list_del(&rg->link);
560
put_uncharge_info(rg);
561
kfree(rg);
562
563
rg = prg;
564
}
565
566
nrg = list_next_entry(rg, link);
567
if (&nrg->link != &resv->regions && nrg->from == rg->to &&
568
has_same_uncharge_info(nrg, rg)) {
569
nrg->from = rg->from;
570
571
list_del(&rg->link);
572
put_uncharge_info(rg);
573
kfree(rg);
574
}
575
}
576
577
static inline long
578
hugetlb_resv_map_add(struct resv_map *map, struct list_head *rg, long from,
579
long to, struct hstate *h, struct hugetlb_cgroup *cg,
580
long *regions_needed)
581
{
582
struct file_region *nrg;
583
584
if (!regions_needed) {
585
nrg = get_file_region_entry_from_cache(map, from, to);
586
record_hugetlb_cgroup_uncharge_info(cg, h, map, nrg);
587
list_add(&nrg->link, rg);
588
coalesce_file_region(map, nrg);
589
} else
590
*regions_needed += 1;
591
592
return to - from;
593
}
594
595
/*
596
* Must be called with resv->lock held.
597
*
598
* Calling this with regions_needed != NULL will count the number of pages
599
* to be added but will not modify the linked list. And regions_needed will
600
* indicate the number of file_regions needed in the cache to carry out to add
601
* the regions for this range.
602
*/
603
static long add_reservation_in_range(struct resv_map *resv, long f, long t,
604
struct hugetlb_cgroup *h_cg,
605
struct hstate *h, long *regions_needed)
606
{
607
long add = 0;
608
struct list_head *head = &resv->regions;
609
long last_accounted_offset = f;
610
struct file_region *iter, *trg = NULL;
611
struct list_head *rg = NULL;
612
613
if (regions_needed)
614
*regions_needed = 0;
615
616
/* In this loop, we essentially handle an entry for the range
617
* [last_accounted_offset, iter->from), at every iteration, with some
618
* bounds checking.
619
*/
620
list_for_each_entry_safe(iter, trg, head, link) {
621
/* Skip irrelevant regions that start before our range. */
622
if (iter->from < f) {
623
/* If this region ends after the last accounted offset,
624
* then we need to update last_accounted_offset.
625
*/
626
if (iter->to > last_accounted_offset)
627
last_accounted_offset = iter->to;
628
continue;
629
}
630
631
/* When we find a region that starts beyond our range, we've
632
* finished.
633
*/
634
if (iter->from >= t) {
635
rg = iter->link.prev;
636
break;
637
}
638
639
/* Add an entry for last_accounted_offset -> iter->from, and
640
* update last_accounted_offset.
641
*/
642
if (iter->from > last_accounted_offset)
643
add += hugetlb_resv_map_add(resv, iter->link.prev,
644
last_accounted_offset,
645
iter->from, h, h_cg,
646
regions_needed);
647
648
last_accounted_offset = iter->to;
649
}
650
651
/* Handle the case where our range extends beyond
652
* last_accounted_offset.
653
*/
654
if (!rg)
655
rg = head->prev;
656
if (last_accounted_offset < t)
657
add += hugetlb_resv_map_add(resv, rg, last_accounted_offset,
658
t, h, h_cg, regions_needed);
659
660
return add;
661
}
662
663
/* Must be called with resv->lock acquired. Will drop lock to allocate entries.
664
*/
665
static int allocate_file_region_entries(struct resv_map *resv,
666
int regions_needed)
667
__must_hold(&resv->lock)
668
{
669
LIST_HEAD(allocated_regions);
670
int to_allocate = 0, i = 0;
671
struct file_region *trg = NULL, *rg = NULL;
672
673
VM_BUG_ON(regions_needed < 0);
674
675
/*
676
* Check for sufficient descriptors in the cache to accommodate
677
* the number of in progress add operations plus regions_needed.
678
*
679
* This is a while loop because when we drop the lock, some other call
680
* to region_add or region_del may have consumed some region_entries,
681
* so we keep looping here until we finally have enough entries for
682
* (adds_in_progress + regions_needed).
683
*/
684
while (resv->region_cache_count <
685
(resv->adds_in_progress + regions_needed)) {
686
to_allocate = resv->adds_in_progress + regions_needed -
687
resv->region_cache_count;
688
689
/* At this point, we should have enough entries in the cache
690
* for all the existing adds_in_progress. We should only be
691
* needing to allocate for regions_needed.
692
*/
693
VM_BUG_ON(resv->region_cache_count < resv->adds_in_progress);
694
695
spin_unlock(&resv->lock);
696
for (i = 0; i < to_allocate; i++) {
697
trg = kmalloc(sizeof(*trg), GFP_KERNEL);
698
if (!trg)
699
goto out_of_memory;
700
list_add(&trg->link, &allocated_regions);
701
}
702
703
spin_lock(&resv->lock);
704
705
list_splice(&allocated_regions, &resv->region_cache);
706
resv->region_cache_count += to_allocate;
707
}
708
709
return 0;
710
711
out_of_memory:
712
list_for_each_entry_safe(rg, trg, &allocated_regions, link) {
713
list_del(&rg->link);
714
kfree(rg);
715
}
716
return -ENOMEM;
717
}
718
719
/*
720
* Add the huge page range represented by [f, t) to the reserve
721
* map. Regions will be taken from the cache to fill in this range.
722
* Sufficient regions should exist in the cache due to the previous
723
* call to region_chg with the same range, but in some cases the cache will not
724
* have sufficient entries due to races with other code doing region_add or
725
* region_del. The extra needed entries will be allocated.
726
*
727
* regions_needed is the out value provided by a previous call to region_chg.
728
*
729
* Return the number of new huge pages added to the map. This number is greater
730
* than or equal to zero. If file_region entries needed to be allocated for
731
* this operation and we were not able to allocate, it returns -ENOMEM.
732
* region_add of regions of length 1 never allocate file_regions and cannot
733
* fail; region_chg will always allocate at least 1 entry and a region_add for
734
* 1 page will only require at most 1 entry.
735
*/
736
static long region_add(struct resv_map *resv, long f, long t,
737
long in_regions_needed, struct hstate *h,
738
struct hugetlb_cgroup *h_cg)
739
{
740
long add = 0, actual_regions_needed = 0;
741
742
spin_lock(&resv->lock);
743
retry:
744
745
/* Count how many regions are actually needed to execute this add. */
746
add_reservation_in_range(resv, f, t, NULL, NULL,
747
&actual_regions_needed);
748
749
/*
750
* Check for sufficient descriptors in the cache to accommodate
751
* this add operation. Note that actual_regions_needed may be greater
752
* than in_regions_needed, as the resv_map may have been modified since
753
* the region_chg call. In this case, we need to make sure that we
754
* allocate extra entries, such that we have enough for all the
755
* existing adds_in_progress, plus the excess needed for this
756
* operation.
757
*/
758
if (actual_regions_needed > in_regions_needed &&
759
resv->region_cache_count <
760
resv->adds_in_progress +
761
(actual_regions_needed - in_regions_needed)) {
762
/* region_add operation of range 1 should never need to
763
* allocate file_region entries.
764
*/
765
VM_BUG_ON(t - f <= 1);
766
767
if (allocate_file_region_entries(
768
resv, actual_regions_needed - in_regions_needed)) {
769
return -ENOMEM;
770
}
771
772
goto retry;
773
}
774
775
add = add_reservation_in_range(resv, f, t, h_cg, h, NULL);
776
777
resv->adds_in_progress -= in_regions_needed;
778
779
spin_unlock(&resv->lock);
780
return add;
781
}
782
783
/*
784
* Examine the existing reserve map and determine how many
785
* huge pages in the specified range [f, t) are NOT currently
786
* represented. This routine is called before a subsequent
787
* call to region_add that will actually modify the reserve
788
* map to add the specified range [f, t). region_chg does
789
* not change the number of huge pages represented by the
790
* map. A number of new file_region structures is added to the cache as a
791
* placeholder, for the subsequent region_add call to use. At least 1
792
* file_region structure is added.
793
*
794
* out_regions_needed is the number of regions added to the
795
* resv->adds_in_progress. This value needs to be provided to a follow up call
796
* to region_add or region_abort for proper accounting.
797
*
798
* Returns the number of huge pages that need to be added to the existing
799
* reservation map for the range [f, t). This number is greater or equal to
800
* zero. -ENOMEM is returned if a new file_region structure or cache entry
801
* is needed and can not be allocated.
802
*/
803
static long region_chg(struct resv_map *resv, long f, long t,
804
long *out_regions_needed)
805
{
806
long chg = 0;
807
808
spin_lock(&resv->lock);
809
810
/* Count how many hugepages in this range are NOT represented. */
811
chg = add_reservation_in_range(resv, f, t, NULL, NULL,
812
out_regions_needed);
813
814
if (*out_regions_needed == 0)
815
*out_regions_needed = 1;
816
817
if (allocate_file_region_entries(resv, *out_regions_needed))
818
return -ENOMEM;
819
820
resv->adds_in_progress += *out_regions_needed;
821
822
spin_unlock(&resv->lock);
823
return chg;
824
}
825
826
/*
827
* Abort the in progress add operation. The adds_in_progress field
828
* of the resv_map keeps track of the operations in progress between
829
* calls to region_chg and region_add. Operations are sometimes
830
* aborted after the call to region_chg. In such cases, region_abort
831
* is called to decrement the adds_in_progress counter. regions_needed
832
* is the value returned by the region_chg call, it is used to decrement
833
* the adds_in_progress counter.
834
*
835
* NOTE: The range arguments [f, t) are not needed or used in this
836
* routine. They are kept to make reading the calling code easier as
837
* arguments will match the associated region_chg call.
838
*/
839
static void region_abort(struct resv_map *resv, long f, long t,
840
long regions_needed)
841
{
842
spin_lock(&resv->lock);
843
VM_BUG_ON(!resv->region_cache_count);
844
resv->adds_in_progress -= regions_needed;
845
spin_unlock(&resv->lock);
846
}
847
848
/*
849
* Delete the specified range [f, t) from the reserve map. If the
850
* t parameter is LONG_MAX, this indicates that ALL regions after f
851
* should be deleted. Locate the regions which intersect [f, t)
852
* and either trim, delete or split the existing regions.
853
*
854
* Returns the number of huge pages deleted from the reserve map.
855
* In the normal case, the return value is zero or more. In the
856
* case where a region must be split, a new region descriptor must
857
* be allocated. If the allocation fails, -ENOMEM will be returned.
858
* NOTE: If the parameter t == LONG_MAX, then we will never split
859
* a region and possibly return -ENOMEM. Callers specifying
860
* t == LONG_MAX do not need to check for -ENOMEM error.
861
*/
862
static long region_del(struct resv_map *resv, long f, long t)
863
{
864
struct list_head *head = &resv->regions;
865
struct file_region *rg, *trg;
866
struct file_region *nrg = NULL;
867
long del = 0;
868
869
retry:
870
spin_lock(&resv->lock);
871
list_for_each_entry_safe(rg, trg, head, link) {
872
/*
873
* Skip regions before the range to be deleted. file_region
874
* ranges are normally of the form [from, to). However, there
875
* may be a "placeholder" entry in the map which is of the form
876
* (from, to) with from == to. Check for placeholder entries
877
* at the beginning of the range to be deleted.
878
*/
879
if (rg->to <= f && (rg->to != rg->from || rg->to != f))
880
continue;
881
882
if (rg->from >= t)
883
break;
884
885
if (f > rg->from && t < rg->to) { /* Must split region */
886
/*
887
* Check for an entry in the cache before dropping
888
* lock and attempting allocation.
889
*/
890
if (!nrg &&
891
resv->region_cache_count > resv->adds_in_progress) {
892
nrg = list_first_entry(&resv->region_cache,
893
struct file_region,
894
link);
895
list_del(&nrg->link);
896
resv->region_cache_count--;
897
}
898
899
if (!nrg) {
900
spin_unlock(&resv->lock);
901
nrg = kmalloc(sizeof(*nrg), GFP_KERNEL);
902
if (!nrg)
903
return -ENOMEM;
904
goto retry;
905
}
906
907
del += t - f;
908
hugetlb_cgroup_uncharge_file_region(
909
resv, rg, t - f, false);
910
911
/* New entry for end of split region */
912
nrg->from = t;
913
nrg->to = rg->to;
914
915
copy_hugetlb_cgroup_uncharge_info(nrg, rg);
916
917
INIT_LIST_HEAD(&nrg->link);
918
919
/* Original entry is trimmed */
920
rg->to = f;
921
922
list_add(&nrg->link, &rg->link);
923
nrg = NULL;
924
break;
925
}
926
927
if (f <= rg->from && t >= rg->to) { /* Remove entire region */
928
del += rg->to - rg->from;
929
hugetlb_cgroup_uncharge_file_region(resv, rg,
930
rg->to - rg->from, true);
931
list_del(&rg->link);
932
kfree(rg);
933
continue;
934
}
935
936
if (f <= rg->from) { /* Trim beginning of region */
937
hugetlb_cgroup_uncharge_file_region(resv, rg,
938
t - rg->from, false);
939
940
del += t - rg->from;
941
rg->from = t;
942
} else { /* Trim end of region */
943
hugetlb_cgroup_uncharge_file_region(resv, rg,
944
rg->to - f, false);
945
946
del += rg->to - f;
947
rg->to = f;
948
}
949
}
950
951
spin_unlock(&resv->lock);
952
kfree(nrg);
953
return del;
954
}
955
956
/*
957
* A rare out of memory error was encountered which prevented removal of
958
* the reserve map region for a page. The huge page itself was free'ed
959
* and removed from the page cache. This routine will adjust the subpool
960
* usage count, and the global reserve count if needed. By incrementing
961
* these counts, the reserve map entry which could not be deleted will
962
* appear as a "reserved" entry instead of simply dangling with incorrect
963
* counts.
964
*/
965
void hugetlb_fix_reserve_counts(struct inode *inode)
966
{
967
struct hugepage_subpool *spool = subpool_inode(inode);
968
long rsv_adjust;
969
bool reserved = false;
970
971
rsv_adjust = hugepage_subpool_get_pages(spool, 1);
972
if (rsv_adjust > 0) {
973
struct hstate *h = hstate_inode(inode);
974
975
if (!hugetlb_acct_memory(h, 1))
976
reserved = true;
977
} else if (!rsv_adjust) {
978
reserved = true;
979
}
980
981
if (!reserved)
982
pr_warn("hugetlb: Huge Page Reserved count may go negative.\n");
983
}
984
985
/*
986
* Count and return the number of huge pages in the reserve map
987
* that intersect with the range [f, t).
988
*/
989
static long region_count(struct resv_map *resv, long f, long t)
990
{
991
struct list_head *head = &resv->regions;
992
struct file_region *rg;
993
long chg = 0;
994
995
spin_lock(&resv->lock);
996
/* Locate each segment we overlap with, and count that overlap. */
997
list_for_each_entry(rg, head, link) {
998
long seg_from;
999
long seg_to;
1000
1001
if (rg->to <= f)
1002
continue;
1003
if (rg->from >= t)
1004
break;
1005
1006
seg_from = max(rg->from, f);
1007
seg_to = min(rg->to, t);
1008
1009
chg += seg_to - seg_from;
1010
}
1011
spin_unlock(&resv->lock);
1012
1013
return chg;
1014
}
1015
1016
/*
1017
* Convert the address within this vma to the page offset within
1018
* the mapping, huge page units here.
1019
*/
1020
static pgoff_t vma_hugecache_offset(struct hstate *h,
1021
struct vm_area_struct *vma, unsigned long address)
1022
{
1023
return ((address - vma->vm_start) >> huge_page_shift(h)) +
1024
(vma->vm_pgoff >> huge_page_order(h));
1025
}
1026
1027
/**
1028
* vma_kernel_pagesize - Page size granularity for this VMA.
1029
* @vma: The user mapping.
1030
*
1031
* Folios in this VMA will be aligned to, and at least the size of the
1032
* number of bytes returned by this function.
1033
*
1034
* Return: The default size of the folios allocated when backing a VMA.
1035
*/
1036
unsigned long vma_kernel_pagesize(struct vm_area_struct *vma)
1037
{
1038
if (vma->vm_ops && vma->vm_ops->pagesize)
1039
return vma->vm_ops->pagesize(vma);
1040
return PAGE_SIZE;
1041
}
1042
EXPORT_SYMBOL_GPL(vma_kernel_pagesize);
1043
1044
/*
1045
* Return the page size being used by the MMU to back a VMA. In the majority
1046
* of cases, the page size used by the kernel matches the MMU size. On
1047
* architectures where it differs, an architecture-specific 'strong'
1048
* version of this symbol is required.
1049
*/
1050
__weak unsigned long vma_mmu_pagesize(struct vm_area_struct *vma)
1051
{
1052
return vma_kernel_pagesize(vma);
1053
}
1054
1055
/*
1056
* Flags for MAP_PRIVATE reservations. These are stored in the bottom
1057
* bits of the reservation map pointer, which are always clear due to
1058
* alignment.
1059
*/
1060
#define HPAGE_RESV_OWNER (1UL << 0)
1061
#define HPAGE_RESV_UNMAPPED (1UL << 1)
1062
#define HPAGE_RESV_MASK (HPAGE_RESV_OWNER | HPAGE_RESV_UNMAPPED)
1063
1064
/*
1065
* These helpers are used to track how many pages are reserved for
1066
* faults in a MAP_PRIVATE mapping. Only the process that called mmap()
1067
* is guaranteed to have their future faults succeed.
1068
*
1069
* With the exception of hugetlb_dup_vma_private() which is called at fork(),
1070
* the reserve counters are updated with the hugetlb_lock held. It is safe
1071
* to reset the VMA at fork() time as it is not in use yet and there is no
1072
* chance of the global counters getting corrupted as a result of the values.
1073
*
1074
* The private mapping reservation is represented in a subtly different
1075
* manner to a shared mapping. A shared mapping has a region map associated
1076
* with the underlying file, this region map represents the backing file
1077
* pages which have ever had a reservation assigned which this persists even
1078
* after the page is instantiated. A private mapping has a region map
1079
* associated with the original mmap which is attached to all VMAs which
1080
* reference it, this region map represents those offsets which have consumed
1081
* reservation ie. where pages have been instantiated.
1082
*/
1083
static unsigned long get_vma_private_data(struct vm_area_struct *vma)
1084
{
1085
return (unsigned long)vma->vm_private_data;
1086
}
1087
1088
static void set_vma_private_data(struct vm_area_struct *vma,
1089
unsigned long value)
1090
{
1091
vma->vm_private_data = (void *)value;
1092
}
1093
1094
static void
1095
resv_map_set_hugetlb_cgroup_uncharge_info(struct resv_map *resv_map,
1096
struct hugetlb_cgroup *h_cg,
1097
struct hstate *h)
1098
{
1099
#ifdef CONFIG_CGROUP_HUGETLB
1100
if (!h_cg || !h) {
1101
resv_map->reservation_counter = NULL;
1102
resv_map->pages_per_hpage = 0;
1103
resv_map->css = NULL;
1104
} else {
1105
resv_map->reservation_counter =
1106
&h_cg->rsvd_hugepage[hstate_index(h)];
1107
resv_map->pages_per_hpage = pages_per_huge_page(h);
1108
resv_map->css = &h_cg->css;
1109
}
1110
#endif
1111
}
1112
1113
struct resv_map *resv_map_alloc(void)
1114
{
1115
struct resv_map *resv_map = kmalloc(sizeof(*resv_map), GFP_KERNEL);
1116
struct file_region *rg = kmalloc(sizeof(*rg), GFP_KERNEL);
1117
1118
if (!resv_map || !rg) {
1119
kfree(resv_map);
1120
kfree(rg);
1121
return NULL;
1122
}
1123
1124
kref_init(&resv_map->refs);
1125
spin_lock_init(&resv_map->lock);
1126
INIT_LIST_HEAD(&resv_map->regions);
1127
init_rwsem(&resv_map->rw_sema);
1128
1129
resv_map->adds_in_progress = 0;
1130
/*
1131
* Initialize these to 0. On shared mappings, 0's here indicate these
1132
* fields don't do cgroup accounting. On private mappings, these will be
1133
* re-initialized to the proper values, to indicate that hugetlb cgroup
1134
* reservations are to be un-charged from here.
1135
*/
1136
resv_map_set_hugetlb_cgroup_uncharge_info(resv_map, NULL, NULL);
1137
1138
INIT_LIST_HEAD(&resv_map->region_cache);
1139
list_add(&rg->link, &resv_map->region_cache);
1140
resv_map->region_cache_count = 1;
1141
1142
return resv_map;
1143
}
1144
1145
void resv_map_release(struct kref *ref)
1146
{
1147
struct resv_map *resv_map = container_of(ref, struct resv_map, refs);
1148
struct list_head *head = &resv_map->region_cache;
1149
struct file_region *rg, *trg;
1150
1151
/* Clear out any active regions before we release the map. */
1152
region_del(resv_map, 0, LONG_MAX);
1153
1154
/* ... and any entries left in the cache */
1155
list_for_each_entry_safe(rg, trg, head, link) {
1156
list_del(&rg->link);
1157
kfree(rg);
1158
}
1159
1160
VM_BUG_ON(resv_map->adds_in_progress);
1161
1162
kfree(resv_map);
1163
}
1164
1165
static inline struct resv_map *inode_resv_map(struct inode *inode)
1166
{
1167
/*
1168
* At inode evict time, i_mapping may not point to the original
1169
* address space within the inode. This original address space
1170
* contains the pointer to the resv_map. So, always use the
1171
* address space embedded within the inode.
1172
* The VERY common case is inode->mapping == &inode->i_data but,
1173
* this may not be true for device special inodes.
1174
*/
1175
return (struct resv_map *)(&inode->i_data)->i_private_data;
1176
}
1177
1178
static struct resv_map *vma_resv_map(struct vm_area_struct *vma)
1179
{
1180
VM_BUG_ON_VMA(!is_vm_hugetlb_page(vma), vma);
1181
if (vma->vm_flags & VM_MAYSHARE) {
1182
struct address_space *mapping = vma->vm_file->f_mapping;
1183
struct inode *inode = mapping->host;
1184
1185
return inode_resv_map(inode);
1186
1187
} else {
1188
return (struct resv_map *)(get_vma_private_data(vma) &
1189
~HPAGE_RESV_MASK);
1190
}
1191
}
1192
1193
static void set_vma_resv_map(struct vm_area_struct *vma, struct resv_map *map)
1194
{
1195
VM_BUG_ON_VMA(!is_vm_hugetlb_page(vma), vma);
1196
VM_BUG_ON_VMA(vma->vm_flags & VM_MAYSHARE, vma);
1197
1198
set_vma_private_data(vma, (unsigned long)map);
1199
}
1200
1201
static void set_vma_resv_flags(struct vm_area_struct *vma, unsigned long flags)
1202
{
1203
VM_BUG_ON_VMA(!is_vm_hugetlb_page(vma), vma);
1204
VM_BUG_ON_VMA(vma->vm_flags & VM_MAYSHARE, vma);
1205
1206
set_vma_private_data(vma, get_vma_private_data(vma) | flags);
1207
}
1208
1209
static int is_vma_resv_set(struct vm_area_struct *vma, unsigned long flag)
1210
{
1211
VM_BUG_ON_VMA(!is_vm_hugetlb_page(vma), vma);
1212
1213
return (get_vma_private_data(vma) & flag) != 0;
1214
}
1215
1216
bool __vma_private_lock(struct vm_area_struct *vma)
1217
{
1218
return !(vma->vm_flags & VM_MAYSHARE) &&
1219
get_vma_private_data(vma) & ~HPAGE_RESV_MASK &&
1220
is_vma_resv_set(vma, HPAGE_RESV_OWNER);
1221
}
1222
1223
void hugetlb_dup_vma_private(struct vm_area_struct *vma)
1224
{
1225
VM_BUG_ON_VMA(!is_vm_hugetlb_page(vma), vma);
1226
/*
1227
* Clear vm_private_data
1228
* - For shared mappings this is a per-vma semaphore that may be
1229
* allocated in a subsequent call to hugetlb_vm_op_open.
1230
* Before clearing, make sure pointer is not associated with vma
1231
* as this will leak the structure. This is the case when called
1232
* via clear_vma_resv_huge_pages() and hugetlb_vm_op_open has already
1233
* been called to allocate a new structure.
1234
* - For MAP_PRIVATE mappings, this is the reserve map which does
1235
* not apply to children. Faults generated by the children are
1236
* not guaranteed to succeed, even if read-only.
1237
*/
1238
if (vma->vm_flags & VM_MAYSHARE) {
1239
struct hugetlb_vma_lock *vma_lock = vma->vm_private_data;
1240
1241
if (vma_lock && vma_lock->vma != vma)
1242
vma->vm_private_data = NULL;
1243
} else
1244
vma->vm_private_data = NULL;
1245
}
1246
1247
/*
1248
* Reset and decrement one ref on hugepage private reservation.
1249
* Called with mm->mmap_lock writer semaphore held.
1250
* This function should be only used by mremap and operate on
1251
* same sized vma. It should never come here with last ref on the
1252
* reservation.
1253
*/
1254
void clear_vma_resv_huge_pages(struct vm_area_struct *vma)
1255
{
1256
/*
1257
* Clear the old hugetlb private page reservation.
1258
* It has already been transferred to new_vma.
1259
*
1260
* During a mremap() operation of a hugetlb vma we call move_vma()
1261
* which copies vma into new_vma and unmaps vma. After the copy
1262
* operation both new_vma and vma share a reference to the resv_map
1263
* struct, and at that point vma is about to be unmapped. We don't
1264
* want to return the reservation to the pool at unmap of vma because
1265
* the reservation still lives on in new_vma, so simply decrement the
1266
* ref here and remove the resv_map reference from this vma.
1267
*/
1268
struct resv_map *reservations = vma_resv_map(vma);
1269
1270
if (reservations && is_vma_resv_set(vma, HPAGE_RESV_OWNER)) {
1271
resv_map_put_hugetlb_cgroup_uncharge_info(reservations);
1272
kref_put(&reservations->refs, resv_map_release);
1273
}
1274
1275
hugetlb_dup_vma_private(vma);
1276
}
1277
1278
static void enqueue_hugetlb_folio(struct hstate *h, struct folio *folio)
1279
{
1280
int nid = folio_nid(folio);
1281
1282
lockdep_assert_held(&hugetlb_lock);
1283
VM_BUG_ON_FOLIO(folio_ref_count(folio), folio);
1284
1285
list_move(&folio->lru, &h->hugepage_freelists[nid]);
1286
h->free_huge_pages++;
1287
h->free_huge_pages_node[nid]++;
1288
folio_set_hugetlb_freed(folio);
1289
}
1290
1291
static struct folio *dequeue_hugetlb_folio_node_exact(struct hstate *h,
1292
int nid)
1293
{
1294
struct folio *folio;
1295
bool pin = !!(current->flags & PF_MEMALLOC_PIN);
1296
1297
lockdep_assert_held(&hugetlb_lock);
1298
list_for_each_entry(folio, &h->hugepage_freelists[nid], lru) {
1299
if (pin && !folio_is_longterm_pinnable(folio))
1300
continue;
1301
1302
if (folio_test_hwpoison(folio))
1303
continue;
1304
1305
if (is_migrate_isolate_page(&folio->page))
1306
continue;
1307
1308
list_move(&folio->lru, &h->hugepage_activelist);
1309
folio_ref_unfreeze(folio, 1);
1310
folio_clear_hugetlb_freed(folio);
1311
h->free_huge_pages--;
1312
h->free_huge_pages_node[nid]--;
1313
return folio;
1314
}
1315
1316
return NULL;
1317
}
1318
1319
static struct folio *dequeue_hugetlb_folio_nodemask(struct hstate *h, gfp_t gfp_mask,
1320
int nid, nodemask_t *nmask)
1321
{
1322
unsigned int cpuset_mems_cookie;
1323
struct zonelist *zonelist;
1324
struct zone *zone;
1325
struct zoneref *z;
1326
int node = NUMA_NO_NODE;
1327
1328
/* 'nid' should not be NUMA_NO_NODE. Try to catch any misuse of it and rectifiy. */
1329
if (nid == NUMA_NO_NODE)
1330
nid = numa_node_id();
1331
1332
zonelist = node_zonelist(nid, gfp_mask);
1333
1334
retry_cpuset:
1335
cpuset_mems_cookie = read_mems_allowed_begin();
1336
for_each_zone_zonelist_nodemask(zone, z, zonelist, gfp_zone(gfp_mask), nmask) {
1337
struct folio *folio;
1338
1339
if (!cpuset_zone_allowed(zone, gfp_mask))
1340
continue;
1341
/*
1342
* no need to ask again on the same node. Pool is node rather than
1343
* zone aware
1344
*/
1345
if (zone_to_nid(zone) == node)
1346
continue;
1347
node = zone_to_nid(zone);
1348
1349
folio = dequeue_hugetlb_folio_node_exact(h, node);
1350
if (folio)
1351
return folio;
1352
}
1353
if (unlikely(read_mems_allowed_retry(cpuset_mems_cookie)))
1354
goto retry_cpuset;
1355
1356
return NULL;
1357
}
1358
1359
static unsigned long available_huge_pages(struct hstate *h)
1360
{
1361
return h->free_huge_pages - h->resv_huge_pages;
1362
}
1363
1364
static struct folio *dequeue_hugetlb_folio_vma(struct hstate *h,
1365
struct vm_area_struct *vma,
1366
unsigned long address, long gbl_chg)
1367
{
1368
struct folio *folio = NULL;
1369
struct mempolicy *mpol;
1370
gfp_t gfp_mask;
1371
nodemask_t *nodemask;
1372
int nid;
1373
1374
/*
1375
* gbl_chg==1 means the allocation requires a new page that was not
1376
* reserved before. Making sure there's at least one free page.
1377
*/
1378
if (gbl_chg && !available_huge_pages(h))
1379
goto err;
1380
1381
gfp_mask = htlb_alloc_mask(h);
1382
nid = huge_node(vma, address, gfp_mask, &mpol, &nodemask);
1383
1384
if (mpol_is_preferred_many(mpol)) {
1385
folio = dequeue_hugetlb_folio_nodemask(h, gfp_mask,
1386
nid, nodemask);
1387
1388
/* Fallback to all nodes if page==NULL */
1389
nodemask = NULL;
1390
}
1391
1392
if (!folio)
1393
folio = dequeue_hugetlb_folio_nodemask(h, gfp_mask,
1394
nid, nodemask);
1395
1396
mpol_cond_put(mpol);
1397
return folio;
1398
1399
err:
1400
return NULL;
1401
}
1402
1403
/*
1404
* common helper functions for hstate_next_node_to_{alloc|free}.
1405
* We may have allocated or freed a huge page based on a different
1406
* nodes_allowed previously, so h->next_node_to_{alloc|free} might
1407
* be outside of *nodes_allowed. Ensure that we use an allowed
1408
* node for alloc or free.
1409
*/
1410
static int next_node_allowed(int nid, nodemask_t *nodes_allowed)
1411
{
1412
nid = next_node_in(nid, *nodes_allowed);
1413
VM_BUG_ON(nid >= MAX_NUMNODES);
1414
1415
return nid;
1416
}
1417
1418
static int get_valid_node_allowed(int nid, nodemask_t *nodes_allowed)
1419
{
1420
if (!node_isset(nid, *nodes_allowed))
1421
nid = next_node_allowed(nid, nodes_allowed);
1422
return nid;
1423
}
1424
1425
/*
1426
* returns the previously saved node ["this node"] from which to
1427
* allocate a persistent huge page for the pool and advance the
1428
* next node from which to allocate, handling wrap at end of node
1429
* mask.
1430
*/
1431
static int hstate_next_node_to_alloc(int *next_node,
1432
nodemask_t *nodes_allowed)
1433
{
1434
int nid;
1435
1436
VM_BUG_ON(!nodes_allowed);
1437
1438
nid = get_valid_node_allowed(*next_node, nodes_allowed);
1439
*next_node = next_node_allowed(nid, nodes_allowed);
1440
1441
return nid;
1442
}
1443
1444
/*
1445
* helper for remove_pool_hugetlb_folio() - return the previously saved
1446
* node ["this node"] from which to free a huge page. Advance the
1447
* next node id whether or not we find a free huge page to free so
1448
* that the next attempt to free addresses the next node.
1449
*/
1450
static int hstate_next_node_to_free(struct hstate *h, nodemask_t *nodes_allowed)
1451
{
1452
int nid;
1453
1454
VM_BUG_ON(!nodes_allowed);
1455
1456
nid = get_valid_node_allowed(h->next_nid_to_free, nodes_allowed);
1457
h->next_nid_to_free = next_node_allowed(nid, nodes_allowed);
1458
1459
return nid;
1460
}
1461
1462
#define for_each_node_mask_to_alloc(next_node, nr_nodes, node, mask) \
1463
for (nr_nodes = nodes_weight(*mask); \
1464
nr_nodes > 0 && \
1465
((node = hstate_next_node_to_alloc(next_node, mask)) || 1); \
1466
nr_nodes--)
1467
1468
#define for_each_node_mask_to_free(hs, nr_nodes, node, mask) \
1469
for (nr_nodes = nodes_weight(*mask); \
1470
nr_nodes > 0 && \
1471
((node = hstate_next_node_to_free(hs, mask)) || 1); \
1472
nr_nodes--)
1473
1474
#ifdef CONFIG_ARCH_HAS_GIGANTIC_PAGE
1475
#ifdef CONFIG_CONTIG_ALLOC
1476
static struct folio *alloc_gigantic_folio(int order, gfp_t gfp_mask,
1477
int nid, nodemask_t *nodemask)
1478
{
1479
struct folio *folio;
1480
bool retried = false;
1481
1482
retry:
1483
folio = hugetlb_cma_alloc_folio(order, gfp_mask, nid, nodemask);
1484
if (!folio) {
1485
if (hugetlb_cma_exclusive_alloc())
1486
return NULL;
1487
1488
folio = folio_alloc_gigantic(order, gfp_mask, nid, nodemask);
1489
if (!folio)
1490
return NULL;
1491
}
1492
1493
if (folio_ref_freeze(folio, 1))
1494
return folio;
1495
1496
pr_warn("HugeTLB: unexpected refcount on PFN %lu\n", folio_pfn(folio));
1497
hugetlb_free_folio(folio);
1498
if (!retried) {
1499
retried = true;
1500
goto retry;
1501
}
1502
return NULL;
1503
}
1504
1505
#else /* !CONFIG_CONTIG_ALLOC */
1506
static struct folio *alloc_gigantic_folio(int order, gfp_t gfp_mask, int nid,
1507
nodemask_t *nodemask)
1508
{
1509
return NULL;
1510
}
1511
#endif /* CONFIG_CONTIG_ALLOC */
1512
1513
#else /* !CONFIG_ARCH_HAS_GIGANTIC_PAGE */
1514
static struct folio *alloc_gigantic_folio(int order, gfp_t gfp_mask, int nid,
1515
nodemask_t *nodemask)
1516
{
1517
return NULL;
1518
}
1519
#endif
1520
1521
/*
1522
* Remove hugetlb folio from lists.
1523
* If vmemmap exists for the folio, clear the hugetlb flag so that the
1524
* folio appears as just a compound page. Otherwise, wait until after
1525
* allocating vmemmap to clear the flag.
1526
*
1527
* Must be called with hugetlb lock held.
1528
*/
1529
static void remove_hugetlb_folio(struct hstate *h, struct folio *folio,
1530
bool adjust_surplus)
1531
{
1532
int nid = folio_nid(folio);
1533
1534
VM_BUG_ON_FOLIO(hugetlb_cgroup_from_folio(folio), folio);
1535
VM_BUG_ON_FOLIO(hugetlb_cgroup_from_folio_rsvd(folio), folio);
1536
1537
lockdep_assert_held(&hugetlb_lock);
1538
if (hstate_is_gigantic(h) && !gigantic_page_runtime_supported())
1539
return;
1540
1541
list_del(&folio->lru);
1542
1543
if (folio_test_hugetlb_freed(folio)) {
1544
folio_clear_hugetlb_freed(folio);
1545
h->free_huge_pages--;
1546
h->free_huge_pages_node[nid]--;
1547
}
1548
if (adjust_surplus) {
1549
h->surplus_huge_pages--;
1550
h->surplus_huge_pages_node[nid]--;
1551
}
1552
1553
/*
1554
* We can only clear the hugetlb flag after allocating vmemmap
1555
* pages. Otherwise, someone (memory error handling) may try to write
1556
* to tail struct pages.
1557
*/
1558
if (!folio_test_hugetlb_vmemmap_optimized(folio))
1559
__folio_clear_hugetlb(folio);
1560
1561
h->nr_huge_pages--;
1562
h->nr_huge_pages_node[nid]--;
1563
}
1564
1565
static void add_hugetlb_folio(struct hstate *h, struct folio *folio,
1566
bool adjust_surplus)
1567
{
1568
int nid = folio_nid(folio);
1569
1570
VM_BUG_ON_FOLIO(!folio_test_hugetlb_vmemmap_optimized(folio), folio);
1571
1572
lockdep_assert_held(&hugetlb_lock);
1573
1574
INIT_LIST_HEAD(&folio->lru);
1575
h->nr_huge_pages++;
1576
h->nr_huge_pages_node[nid]++;
1577
1578
if (adjust_surplus) {
1579
h->surplus_huge_pages++;
1580
h->surplus_huge_pages_node[nid]++;
1581
}
1582
1583
__folio_set_hugetlb(folio);
1584
folio_change_private(folio, NULL);
1585
/*
1586
* We have to set hugetlb_vmemmap_optimized again as above
1587
* folio_change_private(folio, NULL) cleared it.
1588
*/
1589
folio_set_hugetlb_vmemmap_optimized(folio);
1590
1591
arch_clear_hugetlb_flags(folio);
1592
enqueue_hugetlb_folio(h, folio);
1593
}
1594
1595
static void __update_and_free_hugetlb_folio(struct hstate *h,
1596
struct folio *folio)
1597
{
1598
bool clear_flag = folio_test_hugetlb_vmemmap_optimized(folio);
1599
1600
if (hstate_is_gigantic(h) && !gigantic_page_runtime_supported())
1601
return;
1602
1603
/*
1604
* If we don't know which subpages are hwpoisoned, we can't free
1605
* the hugepage, so it's leaked intentionally.
1606
*/
1607
if (folio_test_hugetlb_raw_hwp_unreliable(folio))
1608
return;
1609
1610
/*
1611
* If folio is not vmemmap optimized (!clear_flag), then the folio
1612
* is no longer identified as a hugetlb page. hugetlb_vmemmap_restore_folio
1613
* can only be passed hugetlb pages and will BUG otherwise.
1614
*/
1615
if (clear_flag && hugetlb_vmemmap_restore_folio(h, folio)) {
1616
spin_lock_irq(&hugetlb_lock);
1617
/*
1618
* If we cannot allocate vmemmap pages, just refuse to free the
1619
* page and put the page back on the hugetlb free list and treat
1620
* as a surplus page.
1621
*/
1622
add_hugetlb_folio(h, folio, true);
1623
spin_unlock_irq(&hugetlb_lock);
1624
return;
1625
}
1626
1627
/*
1628
* If vmemmap pages were allocated above, then we need to clear the
1629
* hugetlb flag under the hugetlb lock.
1630
*/
1631
if (folio_test_hugetlb(folio)) {
1632
spin_lock_irq(&hugetlb_lock);
1633
__folio_clear_hugetlb(folio);
1634
spin_unlock_irq(&hugetlb_lock);
1635
}
1636
1637
/*
1638
* Move PageHWPoison flag from head page to the raw error pages,
1639
* which makes any healthy subpages reusable.
1640
*/
1641
if (unlikely(folio_test_hwpoison(folio)))
1642
folio_clear_hugetlb_hwpoison(folio);
1643
1644
folio_ref_unfreeze(folio, 1);
1645
1646
hugetlb_free_folio(folio);
1647
}
1648
1649
/*
1650
* As update_and_free_hugetlb_folio() can be called under any context, so we cannot
1651
* use GFP_KERNEL to allocate vmemmap pages. However, we can defer the
1652
* actual freeing in a workqueue to prevent from using GFP_ATOMIC to allocate
1653
* the vmemmap pages.
1654
*
1655
* free_hpage_workfn() locklessly retrieves the linked list of pages to be
1656
* freed and frees them one-by-one. As the page->mapping pointer is going
1657
* to be cleared in free_hpage_workfn() anyway, it is reused as the llist_node
1658
* structure of a lockless linked list of huge pages to be freed.
1659
*/
1660
static LLIST_HEAD(hpage_freelist);
1661
1662
static void free_hpage_workfn(struct work_struct *work)
1663
{
1664
struct llist_node *node;
1665
1666
node = llist_del_all(&hpage_freelist);
1667
1668
while (node) {
1669
struct folio *folio;
1670
struct hstate *h;
1671
1672
folio = container_of((struct address_space **)node,
1673
struct folio, mapping);
1674
node = node->next;
1675
folio->mapping = NULL;
1676
/*
1677
* The VM_BUG_ON_FOLIO(!folio_test_hugetlb(folio), folio) in
1678
* folio_hstate() is going to trigger because a previous call to
1679
* remove_hugetlb_folio() will clear the hugetlb bit, so do
1680
* not use folio_hstate() directly.
1681
*/
1682
h = size_to_hstate(folio_size(folio));
1683
1684
__update_and_free_hugetlb_folio(h, folio);
1685
1686
cond_resched();
1687
}
1688
}
1689
static DECLARE_WORK(free_hpage_work, free_hpage_workfn);
1690
1691
static inline void flush_free_hpage_work(struct hstate *h)
1692
{
1693
if (hugetlb_vmemmap_optimizable(h))
1694
flush_work(&free_hpage_work);
1695
}
1696
1697
static void update_and_free_hugetlb_folio(struct hstate *h, struct folio *folio,
1698
bool atomic)
1699
{
1700
if (!folio_test_hugetlb_vmemmap_optimized(folio) || !atomic) {
1701
__update_and_free_hugetlb_folio(h, folio);
1702
return;
1703
}
1704
1705
/*
1706
* Defer freeing to avoid using GFP_ATOMIC to allocate vmemmap pages.
1707
*
1708
* Only call schedule_work() if hpage_freelist is previously
1709
* empty. Otherwise, schedule_work() had been called but the workfn
1710
* hasn't retrieved the list yet.
1711
*/
1712
if (llist_add((struct llist_node *)&folio->mapping, &hpage_freelist))
1713
schedule_work(&free_hpage_work);
1714
}
1715
1716
static void bulk_vmemmap_restore_error(struct hstate *h,
1717
struct list_head *folio_list,
1718
struct list_head *non_hvo_folios)
1719
{
1720
struct folio *folio, *t_folio;
1721
1722
if (!list_empty(non_hvo_folios)) {
1723
/*
1724
* Free any restored hugetlb pages so that restore of the
1725
* entire list can be retried.
1726
* The idea is that in the common case of ENOMEM errors freeing
1727
* hugetlb pages with vmemmap we will free up memory so that we
1728
* can allocate vmemmap for more hugetlb pages.
1729
*/
1730
list_for_each_entry_safe(folio, t_folio, non_hvo_folios, lru) {
1731
list_del(&folio->lru);
1732
spin_lock_irq(&hugetlb_lock);
1733
__folio_clear_hugetlb(folio);
1734
spin_unlock_irq(&hugetlb_lock);
1735
update_and_free_hugetlb_folio(h, folio, false);
1736
cond_resched();
1737
}
1738
} else {
1739
/*
1740
* In the case where there are no folios which can be
1741
* immediately freed, we loop through the list trying to restore
1742
* vmemmap individually in the hope that someone elsewhere may
1743
* have done something to cause success (such as freeing some
1744
* memory). If unable to restore a hugetlb page, the hugetlb
1745
* page is made a surplus page and removed from the list.
1746
* If are able to restore vmemmap and free one hugetlb page, we
1747
* quit processing the list to retry the bulk operation.
1748
*/
1749
list_for_each_entry_safe(folio, t_folio, folio_list, lru)
1750
if (hugetlb_vmemmap_restore_folio(h, folio)) {
1751
list_del(&folio->lru);
1752
spin_lock_irq(&hugetlb_lock);
1753
add_hugetlb_folio(h, folio, true);
1754
spin_unlock_irq(&hugetlb_lock);
1755
} else {
1756
list_del(&folio->lru);
1757
spin_lock_irq(&hugetlb_lock);
1758
__folio_clear_hugetlb(folio);
1759
spin_unlock_irq(&hugetlb_lock);
1760
update_and_free_hugetlb_folio(h, folio, false);
1761
cond_resched();
1762
break;
1763
}
1764
}
1765
}
1766
1767
static void update_and_free_pages_bulk(struct hstate *h,
1768
struct list_head *folio_list)
1769
{
1770
long ret;
1771
struct folio *folio, *t_folio;
1772
LIST_HEAD(non_hvo_folios);
1773
1774
/*
1775
* First allocate required vmemmmap (if necessary) for all folios.
1776
* Carefully handle errors and free up any available hugetlb pages
1777
* in an effort to make forward progress.
1778
*/
1779
retry:
1780
ret = hugetlb_vmemmap_restore_folios(h, folio_list, &non_hvo_folios);
1781
if (ret < 0) {
1782
bulk_vmemmap_restore_error(h, folio_list, &non_hvo_folios);
1783
goto retry;
1784
}
1785
1786
/*
1787
* At this point, list should be empty, ret should be >= 0 and there
1788
* should only be pages on the non_hvo_folios list.
1789
* Do note that the non_hvo_folios list could be empty.
1790
* Without HVO enabled, ret will be 0 and there is no need to call
1791
* __folio_clear_hugetlb as this was done previously.
1792
*/
1793
VM_WARN_ON(!list_empty(folio_list));
1794
VM_WARN_ON(ret < 0);
1795
if (!list_empty(&non_hvo_folios) && ret) {
1796
spin_lock_irq(&hugetlb_lock);
1797
list_for_each_entry(folio, &non_hvo_folios, lru)
1798
__folio_clear_hugetlb(folio);
1799
spin_unlock_irq(&hugetlb_lock);
1800
}
1801
1802
list_for_each_entry_safe(folio, t_folio, &non_hvo_folios, lru) {
1803
update_and_free_hugetlb_folio(h, folio, false);
1804
cond_resched();
1805
}
1806
}
1807
1808
struct hstate *size_to_hstate(unsigned long size)
1809
{
1810
struct hstate *h;
1811
1812
for_each_hstate(h) {
1813
if (huge_page_size(h) == size)
1814
return h;
1815
}
1816
return NULL;
1817
}
1818
1819
void free_huge_folio(struct folio *folio)
1820
{
1821
/*
1822
* Can't pass hstate in here because it is called from the
1823
* generic mm code.
1824
*/
1825
struct hstate *h = folio_hstate(folio);
1826
int nid = folio_nid(folio);
1827
struct hugepage_subpool *spool = hugetlb_folio_subpool(folio);
1828
bool restore_reserve;
1829
unsigned long flags;
1830
1831
VM_BUG_ON_FOLIO(folio_ref_count(folio), folio);
1832
VM_BUG_ON_FOLIO(folio_mapcount(folio), folio);
1833
1834
hugetlb_set_folio_subpool(folio, NULL);
1835
if (folio_test_anon(folio))
1836
__ClearPageAnonExclusive(&folio->page);
1837
folio->mapping = NULL;
1838
restore_reserve = folio_test_hugetlb_restore_reserve(folio);
1839
folio_clear_hugetlb_restore_reserve(folio);
1840
1841
/*
1842
* If HPageRestoreReserve was set on page, page allocation consumed a
1843
* reservation. If the page was associated with a subpool, there
1844
* would have been a page reserved in the subpool before allocation
1845
* via hugepage_subpool_get_pages(). Since we are 'restoring' the
1846
* reservation, do not call hugepage_subpool_put_pages() as this will
1847
* remove the reserved page from the subpool.
1848
*/
1849
if (!restore_reserve) {
1850
/*
1851
* A return code of zero implies that the subpool will be
1852
* under its minimum size if the reservation is not restored
1853
* after page is free. Therefore, force restore_reserve
1854
* operation.
1855
*/
1856
if (hugepage_subpool_put_pages(spool, 1) == 0)
1857
restore_reserve = true;
1858
}
1859
1860
spin_lock_irqsave(&hugetlb_lock, flags);
1861
folio_clear_hugetlb_migratable(folio);
1862
hugetlb_cgroup_uncharge_folio(hstate_index(h),
1863
pages_per_huge_page(h), folio);
1864
hugetlb_cgroup_uncharge_folio_rsvd(hstate_index(h),
1865
pages_per_huge_page(h), folio);
1866
lruvec_stat_mod_folio(folio, NR_HUGETLB, -pages_per_huge_page(h));
1867
mem_cgroup_uncharge(folio);
1868
if (restore_reserve)
1869
h->resv_huge_pages++;
1870
1871
if (folio_test_hugetlb_temporary(folio)) {
1872
remove_hugetlb_folio(h, folio, false);
1873
spin_unlock_irqrestore(&hugetlb_lock, flags);
1874
update_and_free_hugetlb_folio(h, folio, true);
1875
} else if (h->surplus_huge_pages_node[nid]) {
1876
/* remove the page from active list */
1877
remove_hugetlb_folio(h, folio, true);
1878
spin_unlock_irqrestore(&hugetlb_lock, flags);
1879
update_and_free_hugetlb_folio(h, folio, true);
1880
} else {
1881
arch_clear_hugetlb_flags(folio);
1882
enqueue_hugetlb_folio(h, folio);
1883
spin_unlock_irqrestore(&hugetlb_lock, flags);
1884
}
1885
}
1886
1887
/*
1888
* Must be called with the hugetlb lock held
1889
*/
1890
static void account_new_hugetlb_folio(struct hstate *h, struct folio *folio)
1891
{
1892
lockdep_assert_held(&hugetlb_lock);
1893
h->nr_huge_pages++;
1894
h->nr_huge_pages_node[folio_nid(folio)]++;
1895
}
1896
1897
static void init_new_hugetlb_folio(struct folio *folio)
1898
{
1899
__folio_set_hugetlb(folio);
1900
INIT_LIST_HEAD(&folio->lru);
1901
hugetlb_set_folio_subpool(folio, NULL);
1902
set_hugetlb_cgroup(folio, NULL);
1903
set_hugetlb_cgroup_rsvd(folio, NULL);
1904
}
1905
1906
/*
1907
* Find and lock address space (mapping) in write mode.
1908
*
1909
* Upon entry, the folio is locked which means that folio_mapping() is
1910
* stable. Due to locking order, we can only trylock_write. If we can
1911
* not get the lock, simply return NULL to caller.
1912
*/
1913
struct address_space *hugetlb_folio_mapping_lock_write(struct folio *folio)
1914
{
1915
struct address_space *mapping = folio_mapping(folio);
1916
1917
if (!mapping)
1918
return mapping;
1919
1920
if (i_mmap_trylock_write(mapping))
1921
return mapping;
1922
1923
return NULL;
1924
}
1925
1926
static struct folio *alloc_buddy_hugetlb_folio(int order, gfp_t gfp_mask,
1927
int nid, nodemask_t *nmask, nodemask_t *node_alloc_noretry)
1928
{
1929
struct folio *folio;
1930
bool alloc_try_hard = true;
1931
1932
/*
1933
* By default we always try hard to allocate the folio with
1934
* __GFP_RETRY_MAYFAIL flag. However, if we are allocating folios in
1935
* a loop (to adjust global huge page counts) and previous allocation
1936
* failed, do not continue to try hard on the same node. Use the
1937
* node_alloc_noretry bitmap to manage this state information.
1938
*/
1939
if (node_alloc_noretry && node_isset(nid, *node_alloc_noretry))
1940
alloc_try_hard = false;
1941
if (alloc_try_hard)
1942
gfp_mask |= __GFP_RETRY_MAYFAIL;
1943
1944
folio = (struct folio *)__alloc_frozen_pages(gfp_mask, order, nid, nmask);
1945
1946
/*
1947
* If we did not specify __GFP_RETRY_MAYFAIL, but still got a
1948
* folio this indicates an overall state change. Clear bit so
1949
* that we resume normal 'try hard' allocations.
1950
*/
1951
if (node_alloc_noretry && folio && !alloc_try_hard)
1952
node_clear(nid, *node_alloc_noretry);
1953
1954
/*
1955
* If we tried hard to get a folio but failed, set bit so that
1956
* subsequent attempts will not try as hard until there is an
1957
* overall state change.
1958
*/
1959
if (node_alloc_noretry && !folio && alloc_try_hard)
1960
node_set(nid, *node_alloc_noretry);
1961
1962
if (!folio) {
1963
__count_vm_event(HTLB_BUDDY_PGALLOC_FAIL);
1964
return NULL;
1965
}
1966
1967
__count_vm_event(HTLB_BUDDY_PGALLOC);
1968
return folio;
1969
}
1970
1971
static struct folio *only_alloc_fresh_hugetlb_folio(struct hstate *h,
1972
gfp_t gfp_mask, int nid, nodemask_t *nmask,
1973
nodemask_t *node_alloc_noretry)
1974
{
1975
struct folio *folio;
1976
int order = huge_page_order(h);
1977
1978
if (nid == NUMA_NO_NODE)
1979
nid = numa_mem_id();
1980
1981
if (order_is_gigantic(order))
1982
folio = alloc_gigantic_folio(order, gfp_mask, nid, nmask);
1983
else
1984
folio = alloc_buddy_hugetlb_folio(order, gfp_mask, nid, nmask,
1985
node_alloc_noretry);
1986
if (folio)
1987
init_new_hugetlb_folio(folio);
1988
return folio;
1989
}
1990
1991
/*
1992
* Common helper to allocate a fresh hugetlb folio. All specific allocators
1993
* should use this function to get new hugetlb folio
1994
*
1995
* Note that returned folio is 'frozen': ref count of head page and all tail
1996
* pages is zero, and the accounting must be done in the caller.
1997
*/
1998
static struct folio *alloc_fresh_hugetlb_folio(struct hstate *h,
1999
gfp_t gfp_mask, int nid, nodemask_t *nmask)
2000
{
2001
struct folio *folio;
2002
2003
folio = only_alloc_fresh_hugetlb_folio(h, gfp_mask, nid, nmask, NULL);
2004
if (folio)
2005
hugetlb_vmemmap_optimize_folio(h, folio);
2006
return folio;
2007
}
2008
2009
static void prep_and_add_allocated_folios(struct hstate *h,
2010
struct list_head *folio_list)
2011
{
2012
unsigned long flags;
2013
struct folio *folio, *tmp_f;
2014
2015
/* Send list for bulk vmemmap optimization processing */
2016
hugetlb_vmemmap_optimize_folios(h, folio_list);
2017
2018
/* Add all new pool pages to free lists in one lock cycle */
2019
spin_lock_irqsave(&hugetlb_lock, flags);
2020
list_for_each_entry_safe(folio, tmp_f, folio_list, lru) {
2021
account_new_hugetlb_folio(h, folio);
2022
enqueue_hugetlb_folio(h, folio);
2023
}
2024
spin_unlock_irqrestore(&hugetlb_lock, flags);
2025
}
2026
2027
/*
2028
* Allocates a fresh hugetlb page in a node interleaved manner. The page
2029
* will later be added to the appropriate hugetlb pool.
2030
*/
2031
static struct folio *alloc_pool_huge_folio(struct hstate *h,
2032
nodemask_t *nodes_allowed,
2033
nodemask_t *node_alloc_noretry,
2034
int *next_node)
2035
{
2036
gfp_t gfp_mask = htlb_alloc_mask(h) | __GFP_THISNODE;
2037
int nr_nodes, node;
2038
2039
for_each_node_mask_to_alloc(next_node, nr_nodes, node, nodes_allowed) {
2040
struct folio *folio;
2041
2042
folio = only_alloc_fresh_hugetlb_folio(h, gfp_mask, node,
2043
nodes_allowed, node_alloc_noretry);
2044
if (folio)
2045
return folio;
2046
}
2047
2048
return NULL;
2049
}
2050
2051
/*
2052
* Remove huge page from pool from next node to free. Attempt to keep
2053
* persistent huge pages more or less balanced over allowed nodes.
2054
* This routine only 'removes' the hugetlb page. The caller must make
2055
* an additional call to free the page to low level allocators.
2056
* Called with hugetlb_lock locked.
2057
*/
2058
static struct folio *remove_pool_hugetlb_folio(struct hstate *h,
2059
nodemask_t *nodes_allowed, bool acct_surplus)
2060
{
2061
int nr_nodes, node;
2062
struct folio *folio = NULL;
2063
2064
lockdep_assert_held(&hugetlb_lock);
2065
for_each_node_mask_to_free(h, nr_nodes, node, nodes_allowed) {
2066
/*
2067
* If we're returning unused surplus pages, only examine
2068
* nodes with surplus pages.
2069
*/
2070
if ((!acct_surplus || h->surplus_huge_pages_node[node]) &&
2071
!list_empty(&h->hugepage_freelists[node])) {
2072
folio = list_entry(h->hugepage_freelists[node].next,
2073
struct folio, lru);
2074
remove_hugetlb_folio(h, folio, acct_surplus);
2075
break;
2076
}
2077
}
2078
2079
return folio;
2080
}
2081
2082
/*
2083
* Dissolve a given free hugetlb folio into free buddy pages. This function
2084
* does nothing for in-use hugetlb folios and non-hugetlb folios.
2085
* This function returns values like below:
2086
*
2087
* -ENOMEM: failed to allocate vmemmap pages to free the freed hugepages
2088
* when the system is under memory pressure and the feature of
2089
* freeing unused vmemmap pages associated with each hugetlb page
2090
* is enabled.
2091
* -EBUSY: failed to dissolved free hugepages or the hugepage is in-use
2092
* (allocated or reserved.)
2093
* 0: successfully dissolved free hugepages or the page is not a
2094
* hugepage (considered as already dissolved)
2095
*/
2096
int dissolve_free_hugetlb_folio(struct folio *folio)
2097
{
2098
int rc = -EBUSY;
2099
2100
retry:
2101
/* Not to disrupt normal path by vainly holding hugetlb_lock */
2102
if (!folio_test_hugetlb(folio))
2103
return 0;
2104
2105
spin_lock_irq(&hugetlb_lock);
2106
if (!folio_test_hugetlb(folio)) {
2107
rc = 0;
2108
goto out;
2109
}
2110
2111
if (!folio_ref_count(folio)) {
2112
struct hstate *h = folio_hstate(folio);
2113
bool adjust_surplus = false;
2114
2115
if (!available_huge_pages(h))
2116
goto out;
2117
2118
/*
2119
* We should make sure that the page is already on the free list
2120
* when it is dissolved.
2121
*/
2122
if (unlikely(!folio_test_hugetlb_freed(folio))) {
2123
spin_unlock_irq(&hugetlb_lock);
2124
cond_resched();
2125
2126
/*
2127
* Theoretically, we should return -EBUSY when we
2128
* encounter this race. In fact, we have a chance
2129
* to successfully dissolve the page if we do a
2130
* retry. Because the race window is quite small.
2131
* If we seize this opportunity, it is an optimization
2132
* for increasing the success rate of dissolving page.
2133
*/
2134
goto retry;
2135
}
2136
2137
if (h->surplus_huge_pages_node[folio_nid(folio)])
2138
adjust_surplus = true;
2139
remove_hugetlb_folio(h, folio, adjust_surplus);
2140
h->max_huge_pages--;
2141
spin_unlock_irq(&hugetlb_lock);
2142
2143
/*
2144
* Normally update_and_free_hugtlb_folio will allocate required vmemmmap
2145
* before freeing the page. update_and_free_hugtlb_folio will fail to
2146
* free the page if it can not allocate required vmemmap. We
2147
* need to adjust max_huge_pages if the page is not freed.
2148
* Attempt to allocate vmemmmap here so that we can take
2149
* appropriate action on failure.
2150
*
2151
* The folio_test_hugetlb check here is because
2152
* remove_hugetlb_folio will clear hugetlb folio flag for
2153
* non-vmemmap optimized hugetlb folios.
2154
*/
2155
if (folio_test_hugetlb(folio)) {
2156
rc = hugetlb_vmemmap_restore_folio(h, folio);
2157
if (rc) {
2158
spin_lock_irq(&hugetlb_lock);
2159
add_hugetlb_folio(h, folio, adjust_surplus);
2160
h->max_huge_pages++;
2161
goto out;
2162
}
2163
} else
2164
rc = 0;
2165
2166
update_and_free_hugetlb_folio(h, folio, false);
2167
return rc;
2168
}
2169
out:
2170
spin_unlock_irq(&hugetlb_lock);
2171
return rc;
2172
}
2173
2174
/*
2175
* Dissolve free hugepages in a given pfn range. Used by memory hotplug to
2176
* make specified memory blocks removable from the system.
2177
* Note that this will dissolve a free gigantic hugepage completely, if any
2178
* part of it lies within the given range.
2179
* Also note that if dissolve_free_hugetlb_folio() returns with an error, all
2180
* free hugetlb folios that were dissolved before that error are lost.
2181
*/
2182
int dissolve_free_hugetlb_folios(unsigned long start_pfn, unsigned long end_pfn)
2183
{
2184
unsigned long pfn;
2185
struct folio *folio;
2186
int rc = 0;
2187
unsigned int order;
2188
struct hstate *h;
2189
2190
if (!hugepages_supported())
2191
return rc;
2192
2193
order = huge_page_order(&default_hstate);
2194
for_each_hstate(h)
2195
order = min(order, huge_page_order(h));
2196
2197
for (pfn = start_pfn; pfn < end_pfn; pfn += 1 << order) {
2198
folio = pfn_folio(pfn);
2199
rc = dissolve_free_hugetlb_folio(folio);
2200
if (rc)
2201
break;
2202
}
2203
2204
return rc;
2205
}
2206
2207
/*
2208
* Allocates a fresh surplus page from the page allocator.
2209
*/
2210
static struct folio *alloc_surplus_hugetlb_folio(struct hstate *h,
2211
gfp_t gfp_mask, int nid, nodemask_t *nmask)
2212
{
2213
struct folio *folio = NULL;
2214
2215
if (hstate_is_gigantic(h))
2216
return NULL;
2217
2218
spin_lock_irq(&hugetlb_lock);
2219
if (h->surplus_huge_pages >= h->nr_overcommit_huge_pages)
2220
goto out_unlock;
2221
spin_unlock_irq(&hugetlb_lock);
2222
2223
folio = alloc_fresh_hugetlb_folio(h, gfp_mask, nid, nmask);
2224
if (!folio)
2225
return NULL;
2226
2227
spin_lock_irq(&hugetlb_lock);
2228
/*
2229
* nr_huge_pages needs to be adjusted within the same lock cycle
2230
* as surplus_pages, otherwise it might confuse
2231
* persistent_huge_pages() momentarily.
2232
*/
2233
account_new_hugetlb_folio(h, folio);
2234
2235
/*
2236
* We could have raced with the pool size change.
2237
* Double check that and simply deallocate the new page
2238
* if we would end up overcommiting the surpluses. Abuse
2239
* temporary page to workaround the nasty free_huge_folio
2240
* codeflow
2241
*/
2242
if (h->surplus_huge_pages >= h->nr_overcommit_huge_pages) {
2243
folio_set_hugetlb_temporary(folio);
2244
spin_unlock_irq(&hugetlb_lock);
2245
free_huge_folio(folio);
2246
return NULL;
2247
}
2248
2249
h->surplus_huge_pages++;
2250
h->surplus_huge_pages_node[folio_nid(folio)]++;
2251
2252
out_unlock:
2253
spin_unlock_irq(&hugetlb_lock);
2254
2255
return folio;
2256
}
2257
2258
static struct folio *alloc_migrate_hugetlb_folio(struct hstate *h, gfp_t gfp_mask,
2259
int nid, nodemask_t *nmask)
2260
{
2261
struct folio *folio;
2262
2263
if (hstate_is_gigantic(h))
2264
return NULL;
2265
2266
folio = alloc_fresh_hugetlb_folio(h, gfp_mask, nid, nmask);
2267
if (!folio)
2268
return NULL;
2269
2270
spin_lock_irq(&hugetlb_lock);
2271
account_new_hugetlb_folio(h, folio);
2272
spin_unlock_irq(&hugetlb_lock);
2273
2274
/* fresh huge pages are frozen */
2275
folio_ref_unfreeze(folio, 1);
2276
/*
2277
* We do not account these pages as surplus because they are only
2278
* temporary and will be released properly on the last reference
2279
*/
2280
folio_set_hugetlb_temporary(folio);
2281
2282
return folio;
2283
}
2284
2285
/*
2286
* Use the VMA's mpolicy to allocate a huge page from the buddy.
2287
*/
2288
static
2289
struct folio *alloc_buddy_hugetlb_folio_with_mpol(struct hstate *h,
2290
struct vm_area_struct *vma, unsigned long addr)
2291
{
2292
struct folio *folio = NULL;
2293
struct mempolicy *mpol;
2294
gfp_t gfp_mask = htlb_alloc_mask(h);
2295
int nid;
2296
nodemask_t *nodemask;
2297
2298
nid = huge_node(vma, addr, gfp_mask, &mpol, &nodemask);
2299
if (mpol_is_preferred_many(mpol)) {
2300
gfp_t gfp = gfp_mask & ~(__GFP_DIRECT_RECLAIM | __GFP_NOFAIL);
2301
2302
folio = alloc_surplus_hugetlb_folio(h, gfp, nid, nodemask);
2303
2304
/* Fallback to all nodes if page==NULL */
2305
nodemask = NULL;
2306
}
2307
2308
if (!folio)
2309
folio = alloc_surplus_hugetlb_folio(h, gfp_mask, nid, nodemask);
2310
mpol_cond_put(mpol);
2311
return folio;
2312
}
2313
2314
struct folio *alloc_hugetlb_folio_reserve(struct hstate *h, int preferred_nid,
2315
nodemask_t *nmask, gfp_t gfp_mask)
2316
{
2317
struct folio *folio;
2318
2319
spin_lock_irq(&hugetlb_lock);
2320
if (!h->resv_huge_pages) {
2321
spin_unlock_irq(&hugetlb_lock);
2322
return NULL;
2323
}
2324
2325
folio = dequeue_hugetlb_folio_nodemask(h, gfp_mask, preferred_nid,
2326
nmask);
2327
if (folio)
2328
h->resv_huge_pages--;
2329
2330
spin_unlock_irq(&hugetlb_lock);
2331
return folio;
2332
}
2333
2334
/* folio migration callback function */
2335
struct folio *alloc_hugetlb_folio_nodemask(struct hstate *h, int preferred_nid,
2336
nodemask_t *nmask, gfp_t gfp_mask, bool allow_alloc_fallback)
2337
{
2338
spin_lock_irq(&hugetlb_lock);
2339
if (available_huge_pages(h)) {
2340
struct folio *folio;
2341
2342
folio = dequeue_hugetlb_folio_nodemask(h, gfp_mask,
2343
preferred_nid, nmask);
2344
if (folio) {
2345
spin_unlock_irq(&hugetlb_lock);
2346
return folio;
2347
}
2348
}
2349
spin_unlock_irq(&hugetlb_lock);
2350
2351
/* We cannot fallback to other nodes, as we could break the per-node pool. */
2352
if (!allow_alloc_fallback)
2353
gfp_mask |= __GFP_THISNODE;
2354
2355
return alloc_migrate_hugetlb_folio(h, gfp_mask, preferred_nid, nmask);
2356
}
2357
2358
static nodemask_t *policy_mbind_nodemask(gfp_t gfp)
2359
{
2360
#ifdef CONFIG_NUMA
2361
struct mempolicy *mpol = get_task_policy(current);
2362
2363
/*
2364
* Only enforce MPOL_BIND policy which overlaps with cpuset policy
2365
* (from policy_nodemask) specifically for hugetlb case
2366
*/
2367
if (mpol->mode == MPOL_BIND &&
2368
(apply_policy_zone(mpol, gfp_zone(gfp)) &&
2369
cpuset_nodemask_valid_mems_allowed(&mpol->nodes)))
2370
return &mpol->nodes;
2371
#endif
2372
return NULL;
2373
}
2374
2375
/*
2376
* Increase the hugetlb pool such that it can accommodate a reservation
2377
* of size 'delta'.
2378
*/
2379
static int gather_surplus_pages(struct hstate *h, long delta)
2380
__must_hold(&hugetlb_lock)
2381
{
2382
LIST_HEAD(surplus_list);
2383
struct folio *folio, *tmp;
2384
int ret;
2385
long i;
2386
long needed, allocated;
2387
bool alloc_ok = true;
2388
nodemask_t *mbind_nodemask, alloc_nodemask;
2389
2390
mbind_nodemask = policy_mbind_nodemask(htlb_alloc_mask(h));
2391
if (mbind_nodemask)
2392
nodes_and(alloc_nodemask, *mbind_nodemask, cpuset_current_mems_allowed);
2393
else
2394
alloc_nodemask = cpuset_current_mems_allowed;
2395
2396
lockdep_assert_held(&hugetlb_lock);
2397
needed = (h->resv_huge_pages + delta) - h->free_huge_pages;
2398
if (needed <= 0) {
2399
h->resv_huge_pages += delta;
2400
return 0;
2401
}
2402
2403
allocated = 0;
2404
2405
ret = -ENOMEM;
2406
retry:
2407
spin_unlock_irq(&hugetlb_lock);
2408
for (i = 0; i < needed; i++) {
2409
folio = NULL;
2410
2411
/*
2412
* It is okay to use NUMA_NO_NODE because we use numa_mem_id()
2413
* down the road to pick the current node if that is the case.
2414
*/
2415
folio = alloc_surplus_hugetlb_folio(h, htlb_alloc_mask(h),
2416
NUMA_NO_NODE, &alloc_nodemask);
2417
if (!folio) {
2418
alloc_ok = false;
2419
break;
2420
}
2421
list_add(&folio->lru, &surplus_list);
2422
cond_resched();
2423
}
2424
allocated += i;
2425
2426
/*
2427
* After retaking hugetlb_lock, we need to recalculate 'needed'
2428
* because either resv_huge_pages or free_huge_pages may have changed.
2429
*/
2430
spin_lock_irq(&hugetlb_lock);
2431
needed = (h->resv_huge_pages + delta) -
2432
(h->free_huge_pages + allocated);
2433
if (needed > 0) {
2434
if (alloc_ok)
2435
goto retry;
2436
/*
2437
* We were not able to allocate enough pages to
2438
* satisfy the entire reservation so we free what
2439
* we've allocated so far.
2440
*/
2441
goto free;
2442
}
2443
/*
2444
* The surplus_list now contains _at_least_ the number of extra pages
2445
* needed to accommodate the reservation. Add the appropriate number
2446
* of pages to the hugetlb pool and free the extras back to the buddy
2447
* allocator. Commit the entire reservation here to prevent another
2448
* process from stealing the pages as they are added to the pool but
2449
* before they are reserved.
2450
*/
2451
needed += allocated;
2452
h->resv_huge_pages += delta;
2453
ret = 0;
2454
2455
/* Free the needed pages to the hugetlb pool */
2456
list_for_each_entry_safe(folio, tmp, &surplus_list, lru) {
2457
if ((--needed) < 0)
2458
break;
2459
/* Add the page to the hugetlb allocator */
2460
enqueue_hugetlb_folio(h, folio);
2461
}
2462
free:
2463
spin_unlock_irq(&hugetlb_lock);
2464
2465
/*
2466
* Free unnecessary surplus pages to the buddy allocator.
2467
* Pages have no ref count, call free_huge_folio directly.
2468
*/
2469
list_for_each_entry_safe(folio, tmp, &surplus_list, lru)
2470
free_huge_folio(folio);
2471
spin_lock_irq(&hugetlb_lock);
2472
2473
return ret;
2474
}
2475
2476
/*
2477
* This routine has two main purposes:
2478
* 1) Decrement the reservation count (resv_huge_pages) by the value passed
2479
* in unused_resv_pages. This corresponds to the prior adjustments made
2480
* to the associated reservation map.
2481
* 2) Free any unused surplus pages that may have been allocated to satisfy
2482
* the reservation. As many as unused_resv_pages may be freed.
2483
*/
2484
static void return_unused_surplus_pages(struct hstate *h,
2485
unsigned long unused_resv_pages)
2486
{
2487
unsigned long nr_pages;
2488
LIST_HEAD(page_list);
2489
2490
lockdep_assert_held(&hugetlb_lock);
2491
/* Uncommit the reservation */
2492
h->resv_huge_pages -= unused_resv_pages;
2493
2494
if (hstate_is_gigantic(h) && !gigantic_page_runtime_supported())
2495
goto out;
2496
2497
/*
2498
* Part (or even all) of the reservation could have been backed
2499
* by pre-allocated pages. Only free surplus pages.
2500
*/
2501
nr_pages = min(unused_resv_pages, h->surplus_huge_pages);
2502
2503
/*
2504
* We want to release as many surplus pages as possible, spread
2505
* evenly across all nodes with memory. Iterate across these nodes
2506
* until we can no longer free unreserved surplus pages. This occurs
2507
* when the nodes with surplus pages have no free pages.
2508
* remove_pool_hugetlb_folio() will balance the freed pages across the
2509
* on-line nodes with memory and will handle the hstate accounting.
2510
*/
2511
while (nr_pages--) {
2512
struct folio *folio;
2513
2514
folio = remove_pool_hugetlb_folio(h, &node_states[N_MEMORY], 1);
2515
if (!folio)
2516
goto out;
2517
2518
list_add(&folio->lru, &page_list);
2519
}
2520
2521
out:
2522
spin_unlock_irq(&hugetlb_lock);
2523
update_and_free_pages_bulk(h, &page_list);
2524
spin_lock_irq(&hugetlb_lock);
2525
}
2526
2527
2528
/*
2529
* vma_needs_reservation, vma_commit_reservation and vma_end_reservation
2530
* are used by the huge page allocation routines to manage reservations.
2531
*
2532
* vma_needs_reservation is called to determine if the huge page at addr
2533
* within the vma has an associated reservation. If a reservation is
2534
* needed, the value 1 is returned. The caller is then responsible for
2535
* managing the global reservation and subpool usage counts. After
2536
* the huge page has been allocated, vma_commit_reservation is called
2537
* to add the page to the reservation map. If the page allocation fails,
2538
* the reservation must be ended instead of committed. vma_end_reservation
2539
* is called in such cases.
2540
*
2541
* In the normal case, vma_commit_reservation returns the same value
2542
* as the preceding vma_needs_reservation call. The only time this
2543
* is not the case is if a reserve map was changed between calls. It
2544
* is the responsibility of the caller to notice the difference and
2545
* take appropriate action.
2546
*
2547
* vma_add_reservation is used in error paths where a reservation must
2548
* be restored when a newly allocated huge page must be freed. It is
2549
* to be called after calling vma_needs_reservation to determine if a
2550
* reservation exists.
2551
*
2552
* vma_del_reservation is used in error paths where an entry in the reserve
2553
* map was created during huge page allocation and must be removed. It is to
2554
* be called after calling vma_needs_reservation to determine if a reservation
2555
* exists.
2556
*/
2557
enum vma_resv_mode {
2558
VMA_NEEDS_RESV,
2559
VMA_COMMIT_RESV,
2560
VMA_END_RESV,
2561
VMA_ADD_RESV,
2562
VMA_DEL_RESV,
2563
};
2564
static long __vma_reservation_common(struct hstate *h,
2565
struct vm_area_struct *vma, unsigned long addr,
2566
enum vma_resv_mode mode)
2567
{
2568
struct resv_map *resv;
2569
pgoff_t idx;
2570
long ret;
2571
long dummy_out_regions_needed;
2572
2573
resv = vma_resv_map(vma);
2574
if (!resv)
2575
return 1;
2576
2577
idx = vma_hugecache_offset(h, vma, addr);
2578
switch (mode) {
2579
case VMA_NEEDS_RESV:
2580
ret = region_chg(resv, idx, idx + 1, &dummy_out_regions_needed);
2581
/* We assume that vma_reservation_* routines always operate on
2582
* 1 page, and that adding to resv map a 1 page entry can only
2583
* ever require 1 region.
2584
*/
2585
VM_BUG_ON(dummy_out_regions_needed != 1);
2586
break;
2587
case VMA_COMMIT_RESV:
2588
ret = region_add(resv, idx, idx + 1, 1, NULL, NULL);
2589
/* region_add calls of range 1 should never fail. */
2590
VM_BUG_ON(ret < 0);
2591
break;
2592
case VMA_END_RESV:
2593
region_abort(resv, idx, idx + 1, 1);
2594
ret = 0;
2595
break;
2596
case VMA_ADD_RESV:
2597
if (vma->vm_flags & VM_MAYSHARE) {
2598
ret = region_add(resv, idx, idx + 1, 1, NULL, NULL);
2599
/* region_add calls of range 1 should never fail. */
2600
VM_BUG_ON(ret < 0);
2601
} else {
2602
region_abort(resv, idx, idx + 1, 1);
2603
ret = region_del(resv, idx, idx + 1);
2604
}
2605
break;
2606
case VMA_DEL_RESV:
2607
if (vma->vm_flags & VM_MAYSHARE) {
2608
region_abort(resv, idx, idx + 1, 1);
2609
ret = region_del(resv, idx, idx + 1);
2610
} else {
2611
ret = region_add(resv, idx, idx + 1, 1, NULL, NULL);
2612
/* region_add calls of range 1 should never fail. */
2613
VM_BUG_ON(ret < 0);
2614
}
2615
break;
2616
default:
2617
BUG();
2618
}
2619
2620
if (vma->vm_flags & VM_MAYSHARE || mode == VMA_DEL_RESV)
2621
return ret;
2622
/*
2623
* We know private mapping must have HPAGE_RESV_OWNER set.
2624
*
2625
* In most cases, reserves always exist for private mappings.
2626
* However, a file associated with mapping could have been
2627
* hole punched or truncated after reserves were consumed.
2628
* As subsequent fault on such a range will not use reserves.
2629
* Subtle - The reserve map for private mappings has the
2630
* opposite meaning than that of shared mappings. If NO
2631
* entry is in the reserve map, it means a reservation exists.
2632
* If an entry exists in the reserve map, it means the
2633
* reservation has already been consumed. As a result, the
2634
* return value of this routine is the opposite of the
2635
* value returned from reserve map manipulation routines above.
2636
*/
2637
if (ret > 0)
2638
return 0;
2639
if (ret == 0)
2640
return 1;
2641
return ret;
2642
}
2643
2644
static long vma_needs_reservation(struct hstate *h,
2645
struct vm_area_struct *vma, unsigned long addr)
2646
{
2647
return __vma_reservation_common(h, vma, addr, VMA_NEEDS_RESV);
2648
}
2649
2650
static long vma_commit_reservation(struct hstate *h,
2651
struct vm_area_struct *vma, unsigned long addr)
2652
{
2653
return __vma_reservation_common(h, vma, addr, VMA_COMMIT_RESV);
2654
}
2655
2656
static void vma_end_reservation(struct hstate *h,
2657
struct vm_area_struct *vma, unsigned long addr)
2658
{
2659
(void)__vma_reservation_common(h, vma, addr, VMA_END_RESV);
2660
}
2661
2662
static long vma_add_reservation(struct hstate *h,
2663
struct vm_area_struct *vma, unsigned long addr)
2664
{
2665
return __vma_reservation_common(h, vma, addr, VMA_ADD_RESV);
2666
}
2667
2668
static long vma_del_reservation(struct hstate *h,
2669
struct vm_area_struct *vma, unsigned long addr)
2670
{
2671
return __vma_reservation_common(h, vma, addr, VMA_DEL_RESV);
2672
}
2673
2674
/*
2675
* This routine is called to restore reservation information on error paths.
2676
* It should ONLY be called for folios allocated via alloc_hugetlb_folio(),
2677
* and the hugetlb mutex should remain held when calling this routine.
2678
*
2679
* It handles two specific cases:
2680
* 1) A reservation was in place and the folio consumed the reservation.
2681
* hugetlb_restore_reserve is set in the folio.
2682
* 2) No reservation was in place for the page, so hugetlb_restore_reserve is
2683
* not set. However, alloc_hugetlb_folio always updates the reserve map.
2684
*
2685
* In case 1, free_huge_folio later in the error path will increment the
2686
* global reserve count. But, free_huge_folio does not have enough context
2687
* to adjust the reservation map. This case deals primarily with private
2688
* mappings. Adjust the reserve map here to be consistent with global
2689
* reserve count adjustments to be made by free_huge_folio. Make sure the
2690
* reserve map indicates there is a reservation present.
2691
*
2692
* In case 2, simply undo reserve map modifications done by alloc_hugetlb_folio.
2693
*/
2694
void restore_reserve_on_error(struct hstate *h, struct vm_area_struct *vma,
2695
unsigned long address, struct folio *folio)
2696
{
2697
long rc = vma_needs_reservation(h, vma, address);
2698
2699
if (folio_test_hugetlb_restore_reserve(folio)) {
2700
if (unlikely(rc < 0))
2701
/*
2702
* Rare out of memory condition in reserve map
2703
* manipulation. Clear hugetlb_restore_reserve so
2704
* that global reserve count will not be incremented
2705
* by free_huge_folio. This will make it appear
2706
* as though the reservation for this folio was
2707
* consumed. This may prevent the task from
2708
* faulting in the folio at a later time. This
2709
* is better than inconsistent global huge page
2710
* accounting of reserve counts.
2711
*/
2712
folio_clear_hugetlb_restore_reserve(folio);
2713
else if (rc)
2714
(void)vma_add_reservation(h, vma, address);
2715
else
2716
vma_end_reservation(h, vma, address);
2717
} else {
2718
if (!rc) {
2719
/*
2720
* This indicates there is an entry in the reserve map
2721
* not added by alloc_hugetlb_folio. We know it was added
2722
* before the alloc_hugetlb_folio call, otherwise
2723
* hugetlb_restore_reserve would be set on the folio.
2724
* Remove the entry so that a subsequent allocation
2725
* does not consume a reservation.
2726
*/
2727
rc = vma_del_reservation(h, vma, address);
2728
if (rc < 0)
2729
/*
2730
* VERY rare out of memory condition. Since
2731
* we can not delete the entry, set
2732
* hugetlb_restore_reserve so that the reserve
2733
* count will be incremented when the folio
2734
* is freed. This reserve will be consumed
2735
* on a subsequent allocation.
2736
*/
2737
folio_set_hugetlb_restore_reserve(folio);
2738
} else if (rc < 0) {
2739
/*
2740
* Rare out of memory condition from
2741
* vma_needs_reservation call. Memory allocation is
2742
* only attempted if a new entry is needed. Therefore,
2743
* this implies there is not an entry in the
2744
* reserve map.
2745
*
2746
* For shared mappings, no entry in the map indicates
2747
* no reservation. We are done.
2748
*/
2749
if (!(vma->vm_flags & VM_MAYSHARE))
2750
/*
2751
* For private mappings, no entry indicates
2752
* a reservation is present. Since we can
2753
* not add an entry, set hugetlb_restore_reserve
2754
* on the folio so reserve count will be
2755
* incremented when freed. This reserve will
2756
* be consumed on a subsequent allocation.
2757
*/
2758
folio_set_hugetlb_restore_reserve(folio);
2759
} else
2760
/*
2761
* No reservation present, do nothing
2762
*/
2763
vma_end_reservation(h, vma, address);
2764
}
2765
}
2766
2767
/*
2768
* alloc_and_dissolve_hugetlb_folio - Allocate a new folio and dissolve
2769
* the old one
2770
* @old_folio: Old folio to dissolve
2771
* @list: List to isolate the page in case we need to
2772
* Returns 0 on success, otherwise negated error.
2773
*/
2774
static int alloc_and_dissolve_hugetlb_folio(struct folio *old_folio,
2775
struct list_head *list)
2776
{
2777
gfp_t gfp_mask;
2778
struct hstate *h;
2779
int nid = folio_nid(old_folio);
2780
struct folio *new_folio = NULL;
2781
int ret = 0;
2782
2783
retry:
2784
/*
2785
* The old_folio might have been dissolved from under our feet, so make sure
2786
* to carefully check the state under the lock.
2787
*/
2788
spin_lock_irq(&hugetlb_lock);
2789
if (!folio_test_hugetlb(old_folio)) {
2790
/*
2791
* Freed from under us. Drop new_folio too.
2792
*/
2793
goto free_new;
2794
} else if (folio_ref_count(old_folio)) {
2795
bool isolated;
2796
2797
/*
2798
* Someone has grabbed the folio, try to isolate it here.
2799
* Fail with -EBUSY if not possible.
2800
*/
2801
spin_unlock_irq(&hugetlb_lock);
2802
isolated = folio_isolate_hugetlb(old_folio, list);
2803
ret = isolated ? 0 : -EBUSY;
2804
spin_lock_irq(&hugetlb_lock);
2805
goto free_new;
2806
} else if (!folio_test_hugetlb_freed(old_folio)) {
2807
/*
2808
* Folio's refcount is 0 but it has not been enqueued in the
2809
* freelist yet. Race window is small, so we can succeed here if
2810
* we retry.
2811
*/
2812
spin_unlock_irq(&hugetlb_lock);
2813
cond_resched();
2814
goto retry;
2815
} else {
2816
h = folio_hstate(old_folio);
2817
if (!new_folio) {
2818
spin_unlock_irq(&hugetlb_lock);
2819
gfp_mask = htlb_alloc_mask(h) | __GFP_THISNODE;
2820
new_folio = alloc_fresh_hugetlb_folio(h, gfp_mask,
2821
nid, NULL);
2822
if (!new_folio)
2823
return -ENOMEM;
2824
goto retry;
2825
}
2826
2827
/*
2828
* Ok, old_folio is still a genuine free hugepage. Remove it from
2829
* the freelist and decrease the counters. These will be
2830
* incremented again when calling account_new_hugetlb_folio()
2831
* and enqueue_hugetlb_folio() for new_folio. The counters will
2832
* remain stable since this happens under the lock.
2833
*/
2834
remove_hugetlb_folio(h, old_folio, false);
2835
2836
/*
2837
* Ref count on new_folio is already zero as it was dropped
2838
* earlier. It can be directly added to the pool free list.
2839
*/
2840
account_new_hugetlb_folio(h, new_folio);
2841
enqueue_hugetlb_folio(h, new_folio);
2842
2843
/*
2844
* Folio has been replaced, we can safely free the old one.
2845
*/
2846
spin_unlock_irq(&hugetlb_lock);
2847
update_and_free_hugetlb_folio(h, old_folio, false);
2848
}
2849
2850
return ret;
2851
2852
free_new:
2853
spin_unlock_irq(&hugetlb_lock);
2854
if (new_folio)
2855
update_and_free_hugetlb_folio(h, new_folio, false);
2856
2857
return ret;
2858
}
2859
2860
int isolate_or_dissolve_huge_folio(struct folio *folio, struct list_head *list)
2861
{
2862
int ret = -EBUSY;
2863
2864
/* Not to disrupt normal path by vainly holding hugetlb_lock */
2865
if (!folio_test_hugetlb(folio))
2866
return 0;
2867
2868
/*
2869
* Fence off gigantic pages as there is a cyclic dependency between
2870
* alloc_contig_range and them. Return -ENOMEM as this has the effect
2871
* of bailing out right away without further retrying.
2872
*/
2873
if (order_is_gigantic(folio_order(folio)))
2874
return -ENOMEM;
2875
2876
if (folio_ref_count(folio) && folio_isolate_hugetlb(folio, list))
2877
ret = 0;
2878
else if (!folio_ref_count(folio))
2879
ret = alloc_and_dissolve_hugetlb_folio(folio, list);
2880
2881
return ret;
2882
}
2883
2884
/*
2885
* replace_free_hugepage_folios - Replace free hugepage folios in a given pfn
2886
* range with new folios.
2887
* @start_pfn: start pfn of the given pfn range
2888
* @end_pfn: end pfn of the given pfn range
2889
* Returns 0 on success, otherwise negated error.
2890
*/
2891
int replace_free_hugepage_folios(unsigned long start_pfn, unsigned long end_pfn)
2892
{
2893
struct folio *folio;
2894
int ret = 0;
2895
2896
LIST_HEAD(isolate_list);
2897
2898
while (start_pfn < end_pfn) {
2899
folio = pfn_folio(start_pfn);
2900
2901
/* Not to disrupt normal path by vainly holding hugetlb_lock */
2902
if (folio_test_hugetlb(folio) && !folio_ref_count(folio)) {
2903
ret = alloc_and_dissolve_hugetlb_folio(folio, &isolate_list);
2904
if (ret)
2905
break;
2906
2907
putback_movable_pages(&isolate_list);
2908
}
2909
start_pfn++;
2910
}
2911
2912
return ret;
2913
}
2914
2915
void wait_for_freed_hugetlb_folios(void)
2916
{
2917
if (llist_empty(&hpage_freelist))
2918
return;
2919
2920
flush_work(&free_hpage_work);
2921
}
2922
2923
typedef enum {
2924
/*
2925
* For either 0/1: we checked the per-vma resv map, and one resv
2926
* count either can be reused (0), or an extra needed (1).
2927
*/
2928
MAP_CHG_REUSE = 0,
2929
MAP_CHG_NEEDED = 1,
2930
/*
2931
* Cannot use per-vma resv count can be used, hence a new resv
2932
* count is enforced.
2933
*
2934
* NOTE: This is mostly identical to MAP_CHG_NEEDED, except
2935
* that currently vma_needs_reservation() has an unwanted side
2936
* effect to either use end() or commit() to complete the
2937
* transaction. Hence it needs to differenciate from NEEDED.
2938
*/
2939
MAP_CHG_ENFORCED = 2,
2940
} map_chg_state;
2941
2942
/*
2943
* NOTE! "cow_from_owner" represents a very hacky usage only used in CoW
2944
* faults of hugetlb private mappings on top of a non-page-cache folio (in
2945
* which case even if there's a private vma resv map it won't cover such
2946
* allocation). New call sites should (probably) never set it to true!!
2947
* When it's set, the allocation will bypass all vma level reservations.
2948
*/
2949
struct folio *alloc_hugetlb_folio(struct vm_area_struct *vma,
2950
unsigned long addr, bool cow_from_owner)
2951
{
2952
struct hugepage_subpool *spool = subpool_vma(vma);
2953
struct hstate *h = hstate_vma(vma);
2954
struct folio *folio;
2955
long retval, gbl_chg, gbl_reserve;
2956
map_chg_state map_chg;
2957
int ret, idx;
2958
struct hugetlb_cgroup *h_cg = NULL;
2959
gfp_t gfp = htlb_alloc_mask(h) | __GFP_RETRY_MAYFAIL;
2960
2961
idx = hstate_index(h);
2962
2963
/* Whether we need a separate per-vma reservation? */
2964
if (cow_from_owner) {
2965
/*
2966
* Special case! Since it's a CoW on top of a reserved
2967
* page, the private resv map doesn't count. So it cannot
2968
* consume the per-vma resv map even if it's reserved.
2969
*/
2970
map_chg = MAP_CHG_ENFORCED;
2971
} else {
2972
/*
2973
* Examine the region/reserve map to determine if the process
2974
* has a reservation for the page to be allocated. A return
2975
* code of zero indicates a reservation exists (no change).
2976
*/
2977
retval = vma_needs_reservation(h, vma, addr);
2978
if (retval < 0)
2979
return ERR_PTR(-ENOMEM);
2980
map_chg = retval ? MAP_CHG_NEEDED : MAP_CHG_REUSE;
2981
}
2982
2983
/*
2984
* Whether we need a separate global reservation?
2985
*
2986
* Processes that did not create the mapping will have no
2987
* reserves as indicated by the region/reserve map. Check
2988
* that the allocation will not exceed the subpool limit.
2989
* Or if it can get one from the pool reservation directly.
2990
*/
2991
if (map_chg) {
2992
gbl_chg = hugepage_subpool_get_pages(spool, 1);
2993
if (gbl_chg < 0)
2994
goto out_end_reservation;
2995
} else {
2996
/*
2997
* If we have the vma reservation ready, no need for extra
2998
* global reservation.
2999
*/
3000
gbl_chg = 0;
3001
}
3002
3003
/*
3004
* If this allocation is not consuming a per-vma reservation,
3005
* charge the hugetlb cgroup now.
3006
*/
3007
if (map_chg) {
3008
ret = hugetlb_cgroup_charge_cgroup_rsvd(
3009
idx, pages_per_huge_page(h), &h_cg);
3010
if (ret)
3011
goto out_subpool_put;
3012
}
3013
3014
ret = hugetlb_cgroup_charge_cgroup(idx, pages_per_huge_page(h), &h_cg);
3015
if (ret)
3016
goto out_uncharge_cgroup_reservation;
3017
3018
spin_lock_irq(&hugetlb_lock);
3019
/*
3020
* glb_chg is passed to indicate whether or not a page must be taken
3021
* from the global free pool (global change). gbl_chg == 0 indicates
3022
* a reservation exists for the allocation.
3023
*/
3024
folio = dequeue_hugetlb_folio_vma(h, vma, addr, gbl_chg);
3025
if (!folio) {
3026
spin_unlock_irq(&hugetlb_lock);
3027
folio = alloc_buddy_hugetlb_folio_with_mpol(h, vma, addr);
3028
if (!folio)
3029
goto out_uncharge_cgroup;
3030
spin_lock_irq(&hugetlb_lock);
3031
list_add(&folio->lru, &h->hugepage_activelist);
3032
folio_ref_unfreeze(folio, 1);
3033
/* Fall through */
3034
}
3035
3036
/*
3037
* Either dequeued or buddy-allocated folio needs to add special
3038
* mark to the folio when it consumes a global reservation.
3039
*/
3040
if (!gbl_chg) {
3041
folio_set_hugetlb_restore_reserve(folio);
3042
h->resv_huge_pages--;
3043
}
3044
3045
hugetlb_cgroup_commit_charge(idx, pages_per_huge_page(h), h_cg, folio);
3046
/* If allocation is not consuming a reservation, also store the
3047
* hugetlb_cgroup pointer on the page.
3048
*/
3049
if (map_chg) {
3050
hugetlb_cgroup_commit_charge_rsvd(idx, pages_per_huge_page(h),
3051
h_cg, folio);
3052
}
3053
3054
spin_unlock_irq(&hugetlb_lock);
3055
3056
hugetlb_set_folio_subpool(folio, spool);
3057
3058
if (map_chg != MAP_CHG_ENFORCED) {
3059
/* commit() is only needed if the map_chg is not enforced */
3060
retval = vma_commit_reservation(h, vma, addr);
3061
/*
3062
* Check for possible race conditions. When it happens..
3063
* The page was added to the reservation map between
3064
* vma_needs_reservation and vma_commit_reservation.
3065
* This indicates a race with hugetlb_reserve_pages.
3066
* Adjust for the subpool count incremented above AND
3067
* in hugetlb_reserve_pages for the same page. Also,
3068
* the reservation count added in hugetlb_reserve_pages
3069
* no longer applies.
3070
*/
3071
if (unlikely(map_chg == MAP_CHG_NEEDED && retval == 0)) {
3072
long rsv_adjust;
3073
3074
rsv_adjust = hugepage_subpool_put_pages(spool, 1);
3075
hugetlb_acct_memory(h, -rsv_adjust);
3076
if (map_chg) {
3077
spin_lock_irq(&hugetlb_lock);
3078
hugetlb_cgroup_uncharge_folio_rsvd(
3079
hstate_index(h), pages_per_huge_page(h),
3080
folio);
3081
spin_unlock_irq(&hugetlb_lock);
3082
}
3083
}
3084
}
3085
3086
ret = mem_cgroup_charge_hugetlb(folio, gfp);
3087
/*
3088
* Unconditionally increment NR_HUGETLB here. If it turns out that
3089
* mem_cgroup_charge_hugetlb failed, then immediately free the page and
3090
* decrement NR_HUGETLB.
3091
*/
3092
lruvec_stat_mod_folio(folio, NR_HUGETLB, pages_per_huge_page(h));
3093
3094
if (ret == -ENOMEM) {
3095
free_huge_folio(folio);
3096
return ERR_PTR(-ENOMEM);
3097
}
3098
3099
return folio;
3100
3101
out_uncharge_cgroup:
3102
hugetlb_cgroup_uncharge_cgroup(idx, pages_per_huge_page(h), h_cg);
3103
out_uncharge_cgroup_reservation:
3104
if (map_chg)
3105
hugetlb_cgroup_uncharge_cgroup_rsvd(idx, pages_per_huge_page(h),
3106
h_cg);
3107
out_subpool_put:
3108
/*
3109
* put page to subpool iff the quota of subpool's rsv_hpages is used
3110
* during hugepage_subpool_get_pages.
3111
*/
3112
if (map_chg && !gbl_chg) {
3113
gbl_reserve = hugepage_subpool_put_pages(spool, 1);
3114
hugetlb_acct_memory(h, -gbl_reserve);
3115
}
3116
3117
3118
out_end_reservation:
3119
if (map_chg != MAP_CHG_ENFORCED)
3120
vma_end_reservation(h, vma, addr);
3121
return ERR_PTR(-ENOSPC);
3122
}
3123
3124
static __init void *alloc_bootmem(struct hstate *h, int nid, bool node_exact)
3125
{
3126
struct huge_bootmem_page *m;
3127
int listnode = nid;
3128
3129
if (hugetlb_early_cma(h))
3130
m = hugetlb_cma_alloc_bootmem(h, &listnode, node_exact);
3131
else {
3132
if (node_exact)
3133
m = memblock_alloc_exact_nid_raw(huge_page_size(h),
3134
huge_page_size(h), 0,
3135
MEMBLOCK_ALLOC_ACCESSIBLE, nid);
3136
else {
3137
m = memblock_alloc_try_nid_raw(huge_page_size(h),
3138
huge_page_size(h), 0,
3139
MEMBLOCK_ALLOC_ACCESSIBLE, nid);
3140
/*
3141
* For pre-HVO to work correctly, pages need to be on
3142
* the list for the node they were actually allocated
3143
* from. That node may be different in the case of
3144
* fallback by memblock_alloc_try_nid_raw. So,
3145
* extract the actual node first.
3146
*/
3147
if (m)
3148
listnode = early_pfn_to_nid(PHYS_PFN(virt_to_phys(m)));
3149
}
3150
3151
if (m) {
3152
m->flags = 0;
3153
m->cma = NULL;
3154
}
3155
}
3156
3157
if (m) {
3158
/*
3159
* Use the beginning of the huge page to store the
3160
* huge_bootmem_page struct (until gather_bootmem
3161
* puts them into the mem_map).
3162
*
3163
* Put them into a private list first because mem_map
3164
* is not up yet.
3165
*/
3166
INIT_LIST_HEAD(&m->list);
3167
list_add(&m->list, &huge_boot_pages[listnode]);
3168
m->hstate = h;
3169
}
3170
3171
return m;
3172
}
3173
3174
int alloc_bootmem_huge_page(struct hstate *h, int nid)
3175
__attribute__ ((weak, alias("__alloc_bootmem_huge_page")));
3176
int __alloc_bootmem_huge_page(struct hstate *h, int nid)
3177
{
3178
struct huge_bootmem_page *m = NULL; /* initialize for clang */
3179
int nr_nodes, node = nid;
3180
3181
/* do node specific alloc */
3182
if (nid != NUMA_NO_NODE) {
3183
m = alloc_bootmem(h, node, true);
3184
if (!m)
3185
return 0;
3186
goto found;
3187
}
3188
3189
/* allocate from next node when distributing huge pages */
3190
for_each_node_mask_to_alloc(&h->next_nid_to_alloc, nr_nodes, node,
3191
&hugetlb_bootmem_nodes) {
3192
m = alloc_bootmem(h, node, false);
3193
if (!m)
3194
return 0;
3195
goto found;
3196
}
3197
3198
found:
3199
3200
/*
3201
* Only initialize the head struct page in memmap_init_reserved_pages,
3202
* rest of the struct pages will be initialized by the HugeTLB
3203
* subsystem itself.
3204
* The head struct page is used to get folio information by the HugeTLB
3205
* subsystem like zone id and node id.
3206
*/
3207
memblock_reserved_mark_noinit(virt_to_phys((void *)m + PAGE_SIZE),
3208
huge_page_size(h) - PAGE_SIZE);
3209
3210
return 1;
3211
}
3212
3213
/* Initialize [start_page:end_page_number] tail struct pages of a hugepage */
3214
static void __init hugetlb_folio_init_tail_vmemmap(struct folio *folio,
3215
unsigned long start_page_number,
3216
unsigned long end_page_number)
3217
{
3218
enum zone_type zone = zone_idx(folio_zone(folio));
3219
int nid = folio_nid(folio);
3220
struct page *page = folio_page(folio, start_page_number);
3221
unsigned long head_pfn = folio_pfn(folio);
3222
unsigned long pfn, end_pfn = head_pfn + end_page_number;
3223
3224
/*
3225
* As we marked all tail pages with memblock_reserved_mark_noinit(),
3226
* we must initialize them ourselves here.
3227
*/
3228
for (pfn = head_pfn + start_page_number; pfn < end_pfn; page++, pfn++) {
3229
__init_single_page(page, pfn, zone, nid);
3230
prep_compound_tail((struct page *)folio, pfn - head_pfn);
3231
set_page_count(page, 0);
3232
}
3233
}
3234
3235
static void __init hugetlb_folio_init_vmemmap(struct folio *folio,
3236
struct hstate *h,
3237
unsigned long nr_pages)
3238
{
3239
int ret;
3240
3241
/*
3242
* This is an open-coded prep_compound_page() whereby we avoid
3243
* walking pages twice by initializing/preparing+freezing them in the
3244
* same go.
3245
*/
3246
__folio_clear_reserved(folio);
3247
__folio_set_head(folio);
3248
ret = folio_ref_freeze(folio, 1);
3249
VM_BUG_ON(!ret);
3250
hugetlb_folio_init_tail_vmemmap(folio, 1, nr_pages);
3251
prep_compound_head((struct page *)folio, huge_page_order(h));
3252
}
3253
3254
static bool __init hugetlb_bootmem_page_prehvo(struct huge_bootmem_page *m)
3255
{
3256
return m->flags & HUGE_BOOTMEM_HVO;
3257
}
3258
3259
static bool __init hugetlb_bootmem_page_earlycma(struct huge_bootmem_page *m)
3260
{
3261
return m->flags & HUGE_BOOTMEM_CMA;
3262
}
3263
3264
/*
3265
* memblock-allocated pageblocks might not have the migrate type set
3266
* if marked with the 'noinit' flag. Set it to the default (MIGRATE_MOVABLE)
3267
* here, or MIGRATE_CMA if this was a page allocated through an early CMA
3268
* reservation.
3269
*
3270
* In case of vmemmap optimized folios, the tail vmemmap pages are mapped
3271
* read-only, but that's ok - for sparse vmemmap this does not write to
3272
* the page structure.
3273
*/
3274
static void __init hugetlb_bootmem_init_migratetype(struct folio *folio,
3275
struct hstate *h)
3276
{
3277
unsigned long nr_pages = pages_per_huge_page(h), i;
3278
3279
WARN_ON_ONCE(!pageblock_aligned(folio_pfn(folio)));
3280
3281
for (i = 0; i < nr_pages; i += pageblock_nr_pages) {
3282
if (folio_test_hugetlb_cma(folio))
3283
init_cma_pageblock(folio_page(folio, i));
3284
else
3285
init_pageblock_migratetype(folio_page(folio, i),
3286
MIGRATE_MOVABLE, false);
3287
}
3288
}
3289
3290
static void __init prep_and_add_bootmem_folios(struct hstate *h,
3291
struct list_head *folio_list)
3292
{
3293
unsigned long flags;
3294
struct folio *folio, *tmp_f;
3295
3296
/* Send list for bulk vmemmap optimization processing */
3297
hugetlb_vmemmap_optimize_bootmem_folios(h, folio_list);
3298
3299
list_for_each_entry_safe(folio, tmp_f, folio_list, lru) {
3300
if (!folio_test_hugetlb_vmemmap_optimized(folio)) {
3301
/*
3302
* If HVO fails, initialize all tail struct pages
3303
* We do not worry about potential long lock hold
3304
* time as this is early in boot and there should
3305
* be no contention.
3306
*/
3307
hugetlb_folio_init_tail_vmemmap(folio,
3308
HUGETLB_VMEMMAP_RESERVE_PAGES,
3309
pages_per_huge_page(h));
3310
}
3311
hugetlb_bootmem_init_migratetype(folio, h);
3312
/* Subdivide locks to achieve better parallel performance */
3313
spin_lock_irqsave(&hugetlb_lock, flags);
3314
account_new_hugetlb_folio(h, folio);
3315
enqueue_hugetlb_folio(h, folio);
3316
spin_unlock_irqrestore(&hugetlb_lock, flags);
3317
}
3318
}
3319
3320
bool __init hugetlb_bootmem_page_zones_valid(int nid,
3321
struct huge_bootmem_page *m)
3322
{
3323
unsigned long start_pfn;
3324
bool valid;
3325
3326
if (m->flags & HUGE_BOOTMEM_ZONES_VALID) {
3327
/*
3328
* Already validated, skip check.
3329
*/
3330
return true;
3331
}
3332
3333
if (hugetlb_bootmem_page_earlycma(m)) {
3334
valid = cma_validate_zones(m->cma);
3335
goto out;
3336
}
3337
3338
start_pfn = virt_to_phys(m) >> PAGE_SHIFT;
3339
3340
valid = !pfn_range_intersects_zones(nid, start_pfn,
3341
pages_per_huge_page(m->hstate));
3342
out:
3343
if (!valid)
3344
hstate_boot_nrinvalid[hstate_index(m->hstate)]++;
3345
3346
return valid;
3347
}
3348
3349
/*
3350
* Free a bootmem page that was found to be invalid (intersecting with
3351
* multiple zones).
3352
*
3353
* Since it intersects with multiple zones, we can't just do a free
3354
* operation on all pages at once, but instead have to walk all
3355
* pages, freeing them one by one.
3356
*/
3357
static void __init hugetlb_bootmem_free_invalid_page(int nid, struct page *page,
3358
struct hstate *h)
3359
{
3360
unsigned long npages = pages_per_huge_page(h);
3361
unsigned long pfn;
3362
3363
while (npages--) {
3364
pfn = page_to_pfn(page);
3365
__init_page_from_nid(pfn, nid);
3366
free_reserved_page(page);
3367
page++;
3368
}
3369
}
3370
3371
/*
3372
* Put bootmem huge pages into the standard lists after mem_map is up.
3373
* Note: This only applies to gigantic (order > MAX_PAGE_ORDER) pages.
3374
*/
3375
static void __init gather_bootmem_prealloc_node(unsigned long nid)
3376
{
3377
LIST_HEAD(folio_list);
3378
struct huge_bootmem_page *m, *tm;
3379
struct hstate *h = NULL, *prev_h = NULL;
3380
3381
list_for_each_entry_safe(m, tm, &huge_boot_pages[nid], list) {
3382
struct page *page = virt_to_page(m);
3383
struct folio *folio = (void *)page;
3384
3385
h = m->hstate;
3386
if (!hugetlb_bootmem_page_zones_valid(nid, m)) {
3387
/*
3388
* Can't use this page. Initialize the
3389
* page structures if that hasn't already
3390
* been done, and give them to the page
3391
* allocator.
3392
*/
3393
hugetlb_bootmem_free_invalid_page(nid, page, h);
3394
continue;
3395
}
3396
3397
/*
3398
* It is possible to have multiple huge page sizes (hstates)
3399
* in this list. If so, process each size separately.
3400
*/
3401
if (h != prev_h && prev_h != NULL)
3402
prep_and_add_bootmem_folios(prev_h, &folio_list);
3403
prev_h = h;
3404
3405
VM_BUG_ON(!hstate_is_gigantic(h));
3406
WARN_ON(folio_ref_count(folio) != 1);
3407
3408
hugetlb_folio_init_vmemmap(folio, h,
3409
HUGETLB_VMEMMAP_RESERVE_PAGES);
3410
init_new_hugetlb_folio(folio);
3411
3412
if (hugetlb_bootmem_page_prehvo(m))
3413
/*
3414
* If pre-HVO was done, just set the
3415
* flag, the HVO code will then skip
3416
* this folio.
3417
*/
3418
folio_set_hugetlb_vmemmap_optimized(folio);
3419
3420
if (hugetlb_bootmem_page_earlycma(m))
3421
folio_set_hugetlb_cma(folio);
3422
3423
list_add(&folio->lru, &folio_list);
3424
3425
/*
3426
* We need to restore the 'stolen' pages to totalram_pages
3427
* in order to fix confusing memory reports from free(1) and
3428
* other side-effects, like CommitLimit going negative.
3429
*
3430
* For CMA pages, this is done in init_cma_pageblock
3431
* (via hugetlb_bootmem_init_migratetype), so skip it here.
3432
*/
3433
if (!folio_test_hugetlb_cma(folio))
3434
adjust_managed_page_count(page, pages_per_huge_page(h));
3435
cond_resched();
3436
}
3437
3438
prep_and_add_bootmem_folios(h, &folio_list);
3439
}
3440
3441
static void __init gather_bootmem_prealloc_parallel(unsigned long start,
3442
unsigned long end, void *arg)
3443
{
3444
int nid;
3445
3446
for (nid = start; nid < end; nid++)
3447
gather_bootmem_prealloc_node(nid);
3448
}
3449
3450
static void __init gather_bootmem_prealloc(void)
3451
{
3452
struct padata_mt_job job = {
3453
.thread_fn = gather_bootmem_prealloc_parallel,
3454
.fn_arg = NULL,
3455
.start = 0,
3456
.size = nr_node_ids,
3457
.align = 1,
3458
.min_chunk = 1,
3459
.max_threads = num_node_state(N_MEMORY),
3460
.numa_aware = true,
3461
};
3462
3463
padata_do_multithreaded(&job);
3464
}
3465
3466
static void __init hugetlb_hstate_alloc_pages_onenode(struct hstate *h, int nid)
3467
{
3468
unsigned long i;
3469
char buf[32];
3470
LIST_HEAD(folio_list);
3471
3472
for (i = 0; i < h->max_huge_pages_node[nid]; ++i) {
3473
if (hstate_is_gigantic(h)) {
3474
if (!alloc_bootmem_huge_page(h, nid))
3475
break;
3476
} else {
3477
struct folio *folio;
3478
gfp_t gfp_mask = htlb_alloc_mask(h) | __GFP_THISNODE;
3479
3480
folio = only_alloc_fresh_hugetlb_folio(h, gfp_mask, nid,
3481
&node_states[N_MEMORY], NULL);
3482
if (!folio)
3483
break;
3484
list_add(&folio->lru, &folio_list);
3485
}
3486
cond_resched();
3487
}
3488
3489
if (!list_empty(&folio_list))
3490
prep_and_add_allocated_folios(h, &folio_list);
3491
3492
if (i == h->max_huge_pages_node[nid])
3493
return;
3494
3495
string_get_size(huge_page_size(h), 1, STRING_UNITS_2, buf, 32);
3496
pr_warn("HugeTLB: allocating %u of page size %s failed node%d. Only allocated %lu hugepages.\n",
3497
h->max_huge_pages_node[nid], buf, nid, i);
3498
h->max_huge_pages -= (h->max_huge_pages_node[nid] - i);
3499
h->max_huge_pages_node[nid] = i;
3500
}
3501
3502
static bool __init hugetlb_hstate_alloc_pages_specific_nodes(struct hstate *h)
3503
{
3504
int i;
3505
bool node_specific_alloc = false;
3506
3507
for_each_online_node(i) {
3508
if (h->max_huge_pages_node[i] > 0) {
3509
hugetlb_hstate_alloc_pages_onenode(h, i);
3510
node_specific_alloc = true;
3511
}
3512
}
3513
3514
return node_specific_alloc;
3515
}
3516
3517
static void __init hugetlb_hstate_alloc_pages_errcheck(unsigned long allocated, struct hstate *h)
3518
{
3519
if (allocated < h->max_huge_pages) {
3520
char buf[32];
3521
3522
string_get_size(huge_page_size(h), 1, STRING_UNITS_2, buf, 32);
3523
pr_warn("HugeTLB: allocating %lu of page size %s failed. Only allocated %lu hugepages.\n",
3524
h->max_huge_pages, buf, allocated);
3525
h->max_huge_pages = allocated;
3526
}
3527
}
3528
3529
static void __init hugetlb_pages_alloc_boot_node(unsigned long start, unsigned long end, void *arg)
3530
{
3531
struct hstate *h = (struct hstate *)arg;
3532
int i, num = end - start;
3533
nodemask_t node_alloc_noretry;
3534
LIST_HEAD(folio_list);
3535
int next_node = first_online_node;
3536
3537
/* Bit mask controlling how hard we retry per-node allocations.*/
3538
nodes_clear(node_alloc_noretry);
3539
3540
for (i = 0; i < num; ++i) {
3541
struct folio *folio;
3542
3543
if (hugetlb_vmemmap_optimizable_size(h) &&
3544
(si_mem_available() == 0) && !list_empty(&folio_list)) {
3545
prep_and_add_allocated_folios(h, &folio_list);
3546
INIT_LIST_HEAD(&folio_list);
3547
}
3548
folio = alloc_pool_huge_folio(h, &node_states[N_MEMORY],
3549
&node_alloc_noretry, &next_node);
3550
if (!folio)
3551
break;
3552
3553
list_move(&folio->lru, &folio_list);
3554
cond_resched();
3555
}
3556
3557
prep_and_add_allocated_folios(h, &folio_list);
3558
}
3559
3560
static unsigned long __init hugetlb_gigantic_pages_alloc_boot(struct hstate *h)
3561
{
3562
unsigned long i;
3563
3564
for (i = 0; i < h->max_huge_pages; ++i) {
3565
if (!alloc_bootmem_huge_page(h, NUMA_NO_NODE))
3566
break;
3567
cond_resched();
3568
}
3569
3570
return i;
3571
}
3572
3573
static unsigned long __init hugetlb_pages_alloc_boot(struct hstate *h)
3574
{
3575
struct padata_mt_job job = {
3576
.fn_arg = h,
3577
.align = 1,
3578
.numa_aware = true
3579
};
3580
3581
unsigned long jiffies_start;
3582
unsigned long jiffies_end;
3583
unsigned long remaining;
3584
3585
job.thread_fn = hugetlb_pages_alloc_boot_node;
3586
3587
/*
3588
* job.max_threads is 25% of the available cpu threads by default.
3589
*
3590
* On large servers with terabytes of memory, huge page allocation
3591
* can consume a considerably amount of time.
3592
*
3593
* Tests below show how long it takes to allocate 1 TiB of memory with 2MiB huge pages.
3594
* 2MiB huge pages. Using more threads can significantly improve allocation time.
3595
*
3596
* +-----------------------+-------+-------+-------+-------+-------+
3597
* | threads | 8 | 16 | 32 | 64 | 128 |
3598
* +-----------------------+-------+-------+-------+-------+-------+
3599
* | skylake 144 cpus | 44s | 22s | 16s | 19s | 20s |
3600
* | cascade lake 192 cpus | 39s | 20s | 11s | 10s | 9s |
3601
* +-----------------------+-------+-------+-------+-------+-------+
3602
*/
3603
if (hugepage_allocation_threads == 0) {
3604
hugepage_allocation_threads = num_online_cpus() / 4;
3605
hugepage_allocation_threads = max(hugepage_allocation_threads, 1);
3606
}
3607
3608
job.max_threads = hugepage_allocation_threads;
3609
3610
jiffies_start = jiffies;
3611
do {
3612
remaining = h->max_huge_pages - h->nr_huge_pages;
3613
3614
job.start = h->nr_huge_pages;
3615
job.size = remaining;
3616
job.min_chunk = remaining / hugepage_allocation_threads;
3617
padata_do_multithreaded(&job);
3618
3619
if (h->nr_huge_pages == h->max_huge_pages)
3620
break;
3621
3622
/*
3623
* Retry only if the vmemmap optimization might have been able to free
3624
* some memory back to the system.
3625
*/
3626
if (!hugetlb_vmemmap_optimizable(h))
3627
break;
3628
3629
/* Continue if progress was made in last iteration */
3630
} while (remaining != (h->max_huge_pages - h->nr_huge_pages));
3631
3632
jiffies_end = jiffies;
3633
3634
pr_info("HugeTLB: allocation took %dms with hugepage_allocation_threads=%ld\n",
3635
jiffies_to_msecs(jiffies_end - jiffies_start),
3636
hugepage_allocation_threads);
3637
3638
return h->nr_huge_pages;
3639
}
3640
3641
/*
3642
* NOTE: this routine is called in different contexts for gigantic and
3643
* non-gigantic pages.
3644
* - For gigantic pages, this is called early in the boot process and
3645
* pages are allocated from memblock allocated or something similar.
3646
* Gigantic pages are actually added to pools later with the routine
3647
* gather_bootmem_prealloc.
3648
* - For non-gigantic pages, this is called later in the boot process after
3649
* all of mm is up and functional. Pages are allocated from buddy and
3650
* then added to hugetlb pools.
3651
*/
3652
static void __init hugetlb_hstate_alloc_pages(struct hstate *h)
3653
{
3654
unsigned long allocated;
3655
3656
/*
3657
* Skip gigantic hugepages allocation if early CMA
3658
* reservations are not available.
3659
*/
3660
if (hstate_is_gigantic(h) && hugetlb_cma_total_size() &&
3661
!hugetlb_early_cma(h)) {
3662
pr_warn_once("HugeTLB: hugetlb_cma is enabled, skip boot time allocation\n");
3663
return;
3664
}
3665
3666
if (!h->max_huge_pages)
3667
return;
3668
3669
/* do node specific alloc */
3670
if (hugetlb_hstate_alloc_pages_specific_nodes(h))
3671
return;
3672
3673
/* below will do all node balanced alloc */
3674
if (hstate_is_gigantic(h))
3675
allocated = hugetlb_gigantic_pages_alloc_boot(h);
3676
else
3677
allocated = hugetlb_pages_alloc_boot(h);
3678
3679
hugetlb_hstate_alloc_pages_errcheck(allocated, h);
3680
}
3681
3682
static void __init hugetlb_init_hstates(void)
3683
{
3684
struct hstate *h, *h2;
3685
3686
for_each_hstate(h) {
3687
/*
3688
* Always reset to first_memory_node here, even if
3689
* next_nid_to_alloc was set before - we can't
3690
* reference hugetlb_bootmem_nodes after init, and
3691
* first_memory_node is right for all further allocations.
3692
*/
3693
h->next_nid_to_alloc = first_memory_node;
3694
h->next_nid_to_free = first_memory_node;
3695
3696
/* oversize hugepages were init'ed in early boot */
3697
if (!hstate_is_gigantic(h))
3698
hugetlb_hstate_alloc_pages(h);
3699
3700
/*
3701
* Set demote order for each hstate. Note that
3702
* h->demote_order is initially 0.
3703
* - We can not demote gigantic pages if runtime freeing
3704
* is not supported, so skip this.
3705
* - If CMA allocation is possible, we can not demote
3706
* HUGETLB_PAGE_ORDER or smaller size pages.
3707
*/
3708
if (hstate_is_gigantic(h) && !gigantic_page_runtime_supported())
3709
continue;
3710
if (hugetlb_cma_total_size() && h->order <= HUGETLB_PAGE_ORDER)
3711
continue;
3712
for_each_hstate(h2) {
3713
if (h2 == h)
3714
continue;
3715
if (h2->order < h->order &&
3716
h2->order > h->demote_order)
3717
h->demote_order = h2->order;
3718
}
3719
}
3720
}
3721
3722
static void __init report_hugepages(void)
3723
{
3724
struct hstate *h;
3725
unsigned long nrinvalid;
3726
3727
for_each_hstate(h) {
3728
char buf[32];
3729
3730
nrinvalid = hstate_boot_nrinvalid[hstate_index(h)];
3731
h->max_huge_pages -= nrinvalid;
3732
3733
string_get_size(huge_page_size(h), 1, STRING_UNITS_2, buf, 32);
3734
pr_info("HugeTLB: registered %s page size, pre-allocated %ld pages\n",
3735
buf, h->nr_huge_pages);
3736
if (nrinvalid)
3737
pr_info("HugeTLB: %s page size: %lu invalid page%s discarded\n",
3738
buf, nrinvalid, str_plural(nrinvalid));
3739
pr_info("HugeTLB: %d KiB vmemmap can be freed for a %s page\n",
3740
hugetlb_vmemmap_optimizable_size(h) / SZ_1K, buf);
3741
}
3742
}
3743
3744
#ifdef CONFIG_HIGHMEM
3745
static void try_to_free_low(struct hstate *h, unsigned long count,
3746
nodemask_t *nodes_allowed)
3747
{
3748
int i;
3749
LIST_HEAD(page_list);
3750
3751
lockdep_assert_held(&hugetlb_lock);
3752
if (hstate_is_gigantic(h))
3753
return;
3754
3755
/*
3756
* Collect pages to be freed on a list, and free after dropping lock
3757
*/
3758
for_each_node_mask(i, *nodes_allowed) {
3759
struct folio *folio, *next;
3760
struct list_head *freel = &h->hugepage_freelists[i];
3761
list_for_each_entry_safe(folio, next, freel, lru) {
3762
if (count >= h->nr_huge_pages)
3763
goto out;
3764
if (folio_test_highmem(folio))
3765
continue;
3766
remove_hugetlb_folio(h, folio, false);
3767
list_add(&folio->lru, &page_list);
3768
}
3769
}
3770
3771
out:
3772
spin_unlock_irq(&hugetlb_lock);
3773
update_and_free_pages_bulk(h, &page_list);
3774
spin_lock_irq(&hugetlb_lock);
3775
}
3776
#else
3777
static inline void try_to_free_low(struct hstate *h, unsigned long count,
3778
nodemask_t *nodes_allowed)
3779
{
3780
}
3781
#endif
3782
3783
/*
3784
* Increment or decrement surplus_huge_pages. Keep node-specific counters
3785
* balanced by operating on them in a round-robin fashion.
3786
* Returns 1 if an adjustment was made.
3787
*/
3788
static int adjust_pool_surplus(struct hstate *h, nodemask_t *nodes_allowed,
3789
int delta)
3790
{
3791
int nr_nodes, node;
3792
3793
lockdep_assert_held(&hugetlb_lock);
3794
VM_BUG_ON(delta != -1 && delta != 1);
3795
3796
if (delta < 0) {
3797
for_each_node_mask_to_alloc(&h->next_nid_to_alloc, nr_nodes, node, nodes_allowed) {
3798
if (h->surplus_huge_pages_node[node])
3799
goto found;
3800
}
3801
} else {
3802
for_each_node_mask_to_free(h, nr_nodes, node, nodes_allowed) {
3803
if (h->surplus_huge_pages_node[node] <
3804
h->nr_huge_pages_node[node])
3805
goto found;
3806
}
3807
}
3808
return 0;
3809
3810
found:
3811
h->surplus_huge_pages += delta;
3812
h->surplus_huge_pages_node[node] += delta;
3813
return 1;
3814
}
3815
3816
#define persistent_huge_pages(h) (h->nr_huge_pages - h->surplus_huge_pages)
3817
static int set_max_huge_pages(struct hstate *h, unsigned long count, int nid,
3818
nodemask_t *nodes_allowed)
3819
{
3820
unsigned long persistent_free_count;
3821
unsigned long min_count;
3822
unsigned long allocated;
3823
struct folio *folio;
3824
LIST_HEAD(page_list);
3825
NODEMASK_ALLOC(nodemask_t, node_alloc_noretry, GFP_KERNEL);
3826
3827
/*
3828
* Bit mask controlling how hard we retry per-node allocations.
3829
* If we can not allocate the bit mask, do not attempt to allocate
3830
* the requested huge pages.
3831
*/
3832
if (node_alloc_noretry)
3833
nodes_clear(*node_alloc_noretry);
3834
else
3835
return -ENOMEM;
3836
3837
/*
3838
* resize_lock mutex prevents concurrent adjustments to number of
3839
* pages in hstate via the proc/sysfs interfaces.
3840
*/
3841
mutex_lock(&h->resize_lock);
3842
flush_free_hpage_work(h);
3843
spin_lock_irq(&hugetlb_lock);
3844
3845
/*
3846
* Check for a node specific request.
3847
* Changing node specific huge page count may require a corresponding
3848
* change to the global count. In any case, the passed node mask
3849
* (nodes_allowed) will restrict alloc/free to the specified node.
3850
*/
3851
if (nid != NUMA_NO_NODE) {
3852
unsigned long old_count = count;
3853
3854
count += persistent_huge_pages(h) -
3855
(h->nr_huge_pages_node[nid] -
3856
h->surplus_huge_pages_node[nid]);
3857
/*
3858
* User may have specified a large count value which caused the
3859
* above calculation to overflow. In this case, they wanted
3860
* to allocate as many huge pages as possible. Set count to
3861
* largest possible value to align with their intention.
3862
*/
3863
if (count < old_count)
3864
count = ULONG_MAX;
3865
}
3866
3867
/*
3868
* Gigantic pages runtime allocation depend on the capability for large
3869
* page range allocation.
3870
* If the system does not provide this feature, return an error when
3871
* the user tries to allocate gigantic pages but let the user free the
3872
* boottime allocated gigantic pages.
3873
*/
3874
if (hstate_is_gigantic(h) && !IS_ENABLED(CONFIG_CONTIG_ALLOC)) {
3875
if (count > persistent_huge_pages(h)) {
3876
spin_unlock_irq(&hugetlb_lock);
3877
mutex_unlock(&h->resize_lock);
3878
NODEMASK_FREE(node_alloc_noretry);
3879
return -EINVAL;
3880
}
3881
/* Fall through to decrease pool */
3882
}
3883
3884
/*
3885
* Increase the pool size
3886
* First take pages out of surplus state. Then make up the
3887
* remaining difference by allocating fresh huge pages.
3888
*
3889
* We might race with alloc_surplus_hugetlb_folio() here and be unable
3890
* to convert a surplus huge page to a normal huge page. That is
3891
* not critical, though, it just means the overall size of the
3892
* pool might be one hugepage larger than it needs to be, but
3893
* within all the constraints specified by the sysctls.
3894
*/
3895
while (h->surplus_huge_pages && count > persistent_huge_pages(h)) {
3896
if (!adjust_pool_surplus(h, nodes_allowed, -1))
3897
break;
3898
}
3899
3900
allocated = 0;
3901
while (count > (persistent_huge_pages(h) + allocated)) {
3902
/*
3903
* If this allocation races such that we no longer need the
3904
* page, free_huge_folio will handle it by freeing the page
3905
* and reducing the surplus.
3906
*/
3907
spin_unlock_irq(&hugetlb_lock);
3908
3909
/* yield cpu to avoid soft lockup */
3910
cond_resched();
3911
3912
folio = alloc_pool_huge_folio(h, nodes_allowed,
3913
node_alloc_noretry,
3914
&h->next_nid_to_alloc);
3915
if (!folio) {
3916
prep_and_add_allocated_folios(h, &page_list);
3917
spin_lock_irq(&hugetlb_lock);
3918
goto out;
3919
}
3920
3921
list_add(&folio->lru, &page_list);
3922
allocated++;
3923
3924
/* Bail for signals. Probably ctrl-c from user */
3925
if (signal_pending(current)) {
3926
prep_and_add_allocated_folios(h, &page_list);
3927
spin_lock_irq(&hugetlb_lock);
3928
goto out;
3929
}
3930
3931
spin_lock_irq(&hugetlb_lock);
3932
}
3933
3934
/* Add allocated pages to the pool */
3935
if (!list_empty(&page_list)) {
3936
spin_unlock_irq(&hugetlb_lock);
3937
prep_and_add_allocated_folios(h, &page_list);
3938
spin_lock_irq(&hugetlb_lock);
3939
}
3940
3941
/*
3942
* Decrease the pool size
3943
* First return free pages to the buddy allocator (being careful
3944
* to keep enough around to satisfy reservations). Then place
3945
* pages into surplus state as needed so the pool will shrink
3946
* to the desired size as pages become free.
3947
*
3948
* By placing pages into the surplus state independent of the
3949
* overcommit value, we are allowing the surplus pool size to
3950
* exceed overcommit. There are few sane options here. Since
3951
* alloc_surplus_hugetlb_folio() is checking the global counter,
3952
* though, we'll note that we're not allowed to exceed surplus
3953
* and won't grow the pool anywhere else. Not until one of the
3954
* sysctls are changed, or the surplus pages go out of use.
3955
*
3956
* min_count is the expected number of persistent pages, we
3957
* shouldn't calculate min_count by using
3958
* resv_huge_pages + persistent_huge_pages() - free_huge_pages,
3959
* because there may exist free surplus huge pages, and this will
3960
* lead to subtracting twice. Free surplus huge pages come from HVO
3961
* failing to restore vmemmap, see comments in the callers of
3962
* hugetlb_vmemmap_restore_folio(). Thus, we should calculate
3963
* persistent free count first.
3964
*/
3965
persistent_free_count = h->free_huge_pages;
3966
if (h->free_huge_pages > persistent_huge_pages(h)) {
3967
if (h->free_huge_pages > h->surplus_huge_pages)
3968
persistent_free_count -= h->surplus_huge_pages;
3969
else
3970
persistent_free_count = 0;
3971
}
3972
min_count = h->resv_huge_pages + persistent_huge_pages(h) - persistent_free_count;
3973
min_count = max(count, min_count);
3974
try_to_free_low(h, min_count, nodes_allowed);
3975
3976
/*
3977
* Collect pages to be removed on list without dropping lock
3978
*/
3979
while (min_count < persistent_huge_pages(h)) {
3980
folio = remove_pool_hugetlb_folio(h, nodes_allowed, 0);
3981
if (!folio)
3982
break;
3983
3984
list_add(&folio->lru, &page_list);
3985
}
3986
/* free the pages after dropping lock */
3987
spin_unlock_irq(&hugetlb_lock);
3988
update_and_free_pages_bulk(h, &page_list);
3989
flush_free_hpage_work(h);
3990
spin_lock_irq(&hugetlb_lock);
3991
3992
while (count < persistent_huge_pages(h)) {
3993
if (!adjust_pool_surplus(h, nodes_allowed, 1))
3994
break;
3995
}
3996
out:
3997
h->max_huge_pages = persistent_huge_pages(h);
3998
spin_unlock_irq(&hugetlb_lock);
3999
mutex_unlock(&h->resize_lock);
4000
4001
NODEMASK_FREE(node_alloc_noretry);
4002
4003
return 0;
4004
}
4005
4006
static long demote_free_hugetlb_folios(struct hstate *src, struct hstate *dst,
4007
struct list_head *src_list)
4008
{
4009
long rc;
4010
struct folio *folio, *next;
4011
LIST_HEAD(dst_list);
4012
LIST_HEAD(ret_list);
4013
4014
rc = hugetlb_vmemmap_restore_folios(src, src_list, &ret_list);
4015
list_splice_init(&ret_list, src_list);
4016
4017
/*
4018
* Taking target hstate mutex synchronizes with set_max_huge_pages.
4019
* Without the mutex, pages added to target hstate could be marked
4020
* as surplus.
4021
*
4022
* Note that we already hold src->resize_lock. To prevent deadlock,
4023
* use the convention of always taking larger size hstate mutex first.
4024
*/
4025
mutex_lock(&dst->resize_lock);
4026
4027
list_for_each_entry_safe(folio, next, src_list, lru) {
4028
int i;
4029
bool cma;
4030
4031
if (folio_test_hugetlb_vmemmap_optimized(folio))
4032
continue;
4033
4034
cma = folio_test_hugetlb_cma(folio);
4035
4036
list_del(&folio->lru);
4037
4038
split_page_owner(&folio->page, huge_page_order(src), huge_page_order(dst));
4039
pgalloc_tag_split(folio, huge_page_order(src), huge_page_order(dst));
4040
4041
for (i = 0; i < pages_per_huge_page(src); i += pages_per_huge_page(dst)) {
4042
struct page *page = folio_page(folio, i);
4043
/* Careful: see __split_huge_page_tail() */
4044
struct folio *new_folio = (struct folio *)page;
4045
4046
clear_compound_head(page);
4047
prep_compound_page(page, dst->order);
4048
4049
new_folio->mapping = NULL;
4050
init_new_hugetlb_folio(new_folio);
4051
/* Copy the CMA flag so that it is freed correctly */
4052
if (cma)
4053
folio_set_hugetlb_cma(new_folio);
4054
list_add(&new_folio->lru, &dst_list);
4055
}
4056
}
4057
4058
prep_and_add_allocated_folios(dst, &dst_list);
4059
4060
mutex_unlock(&dst->resize_lock);
4061
4062
return rc;
4063
}
4064
4065
static long demote_pool_huge_page(struct hstate *src, nodemask_t *nodes_allowed,
4066
unsigned long nr_to_demote)
4067
__must_hold(&hugetlb_lock)
4068
{
4069
int nr_nodes, node;
4070
struct hstate *dst;
4071
long rc = 0;
4072
long nr_demoted = 0;
4073
4074
lockdep_assert_held(&hugetlb_lock);
4075
4076
/* We should never get here if no demote order */
4077
if (!src->demote_order) {
4078
pr_warn("HugeTLB: NULL demote order passed to demote_pool_huge_page.\n");
4079
return -EINVAL; /* internal error */
4080
}
4081
dst = size_to_hstate(PAGE_SIZE << src->demote_order);
4082
4083
for_each_node_mask_to_free(src, nr_nodes, node, nodes_allowed) {
4084
LIST_HEAD(list);
4085
struct folio *folio, *next;
4086
4087
list_for_each_entry_safe(folio, next, &src->hugepage_freelists[node], lru) {
4088
if (folio_test_hwpoison(folio))
4089
continue;
4090
4091
remove_hugetlb_folio(src, folio, false);
4092
list_add(&folio->lru, &list);
4093
4094
if (++nr_demoted == nr_to_demote)
4095
break;
4096
}
4097
4098
spin_unlock_irq(&hugetlb_lock);
4099
4100
rc = demote_free_hugetlb_folios(src, dst, &list);
4101
4102
spin_lock_irq(&hugetlb_lock);
4103
4104
list_for_each_entry_safe(folio, next, &list, lru) {
4105
list_del(&folio->lru);
4106
add_hugetlb_folio(src, folio, false);
4107
4108
nr_demoted--;
4109
}
4110
4111
if (rc < 0 || nr_demoted == nr_to_demote)
4112
break;
4113
}
4114
4115
/*
4116
* Not absolutely necessary, but for consistency update max_huge_pages
4117
* based on pool changes for the demoted page.
4118
*/
4119
src->max_huge_pages -= nr_demoted;
4120
dst->max_huge_pages += nr_demoted << (huge_page_order(src) - huge_page_order(dst));
4121
4122
if (rc < 0)
4123
return rc;
4124
4125
if (nr_demoted)
4126
return nr_demoted;
4127
/*
4128
* Only way to get here is if all pages on free lists are poisoned.
4129
* Return -EBUSY so that caller will not retry.
4130
*/
4131
return -EBUSY;
4132
}
4133
4134
#define HSTATE_ATTR_RO(_name) \
4135
static struct kobj_attribute _name##_attr = __ATTR_RO(_name)
4136
4137
#define HSTATE_ATTR_WO(_name) \
4138
static struct kobj_attribute _name##_attr = __ATTR_WO(_name)
4139
4140
#define HSTATE_ATTR(_name) \
4141
static struct kobj_attribute _name##_attr = __ATTR_RW(_name)
4142
4143
static struct kobject *hugepages_kobj;
4144
static struct kobject *hstate_kobjs[HUGE_MAX_HSTATE];
4145
4146
static struct hstate *kobj_to_node_hstate(struct kobject *kobj, int *nidp);
4147
4148
static struct hstate *kobj_to_hstate(struct kobject *kobj, int *nidp)
4149
{
4150
int i;
4151
4152
for (i = 0; i < HUGE_MAX_HSTATE; i++)
4153
if (hstate_kobjs[i] == kobj) {
4154
if (nidp)
4155
*nidp = NUMA_NO_NODE;
4156
return &hstates[i];
4157
}
4158
4159
return kobj_to_node_hstate(kobj, nidp);
4160
}
4161
4162
static ssize_t nr_hugepages_show_common(struct kobject *kobj,
4163
struct kobj_attribute *attr, char *buf)
4164
{
4165
struct hstate *h;
4166
unsigned long nr_huge_pages;
4167
int nid;
4168
4169
h = kobj_to_hstate(kobj, &nid);
4170
if (nid == NUMA_NO_NODE)
4171
nr_huge_pages = h->nr_huge_pages;
4172
else
4173
nr_huge_pages = h->nr_huge_pages_node[nid];
4174
4175
return sysfs_emit(buf, "%lu\n", nr_huge_pages);
4176
}
4177
4178
static ssize_t __nr_hugepages_store_common(bool obey_mempolicy,
4179
struct hstate *h, int nid,
4180
unsigned long count, size_t len)
4181
{
4182
int err;
4183
nodemask_t nodes_allowed, *n_mask;
4184
4185
if (hstate_is_gigantic(h) && !gigantic_page_runtime_supported())
4186
return -EINVAL;
4187
4188
if (nid == NUMA_NO_NODE) {
4189
/*
4190
* global hstate attribute
4191
*/
4192
if (!(obey_mempolicy &&
4193
init_nodemask_of_mempolicy(&nodes_allowed)))
4194
n_mask = &node_states[N_MEMORY];
4195
else
4196
n_mask = &nodes_allowed;
4197
} else {
4198
/*
4199
* Node specific request. count adjustment happens in
4200
* set_max_huge_pages() after acquiring hugetlb_lock.
4201
*/
4202
init_nodemask_of_node(&nodes_allowed, nid);
4203
n_mask = &nodes_allowed;
4204
}
4205
4206
err = set_max_huge_pages(h, count, nid, n_mask);
4207
4208
return err ? err : len;
4209
}
4210
4211
static ssize_t nr_hugepages_store_common(bool obey_mempolicy,
4212
struct kobject *kobj, const char *buf,
4213
size_t len)
4214
{
4215
struct hstate *h;
4216
unsigned long count;
4217
int nid;
4218
int err;
4219
4220
err = kstrtoul(buf, 10, &count);
4221
if (err)
4222
return err;
4223
4224
h = kobj_to_hstate(kobj, &nid);
4225
return __nr_hugepages_store_common(obey_mempolicy, h, nid, count, len);
4226
}
4227
4228
static ssize_t nr_hugepages_show(struct kobject *kobj,
4229
struct kobj_attribute *attr, char *buf)
4230
{
4231
return nr_hugepages_show_common(kobj, attr, buf);
4232
}
4233
4234
static ssize_t nr_hugepages_store(struct kobject *kobj,
4235
struct kobj_attribute *attr, const char *buf, size_t len)
4236
{
4237
return nr_hugepages_store_common(false, kobj, buf, len);
4238
}
4239
HSTATE_ATTR(nr_hugepages);
4240
4241
#ifdef CONFIG_NUMA
4242
4243
/*
4244
* hstate attribute for optionally mempolicy-based constraint on persistent
4245
* huge page alloc/free.
4246
*/
4247
static ssize_t nr_hugepages_mempolicy_show(struct kobject *kobj,
4248
struct kobj_attribute *attr,
4249
char *buf)
4250
{
4251
return nr_hugepages_show_common(kobj, attr, buf);
4252
}
4253
4254
static ssize_t nr_hugepages_mempolicy_store(struct kobject *kobj,
4255
struct kobj_attribute *attr, const char *buf, size_t len)
4256
{
4257
return nr_hugepages_store_common(true, kobj, buf, len);
4258
}
4259
HSTATE_ATTR(nr_hugepages_mempolicy);
4260
#endif
4261
4262
4263
static ssize_t nr_overcommit_hugepages_show(struct kobject *kobj,
4264
struct kobj_attribute *attr, char *buf)
4265
{
4266
struct hstate *h = kobj_to_hstate(kobj, NULL);
4267
return sysfs_emit(buf, "%lu\n", h->nr_overcommit_huge_pages);
4268
}
4269
4270
static ssize_t nr_overcommit_hugepages_store(struct kobject *kobj,
4271
struct kobj_attribute *attr, const char *buf, size_t count)
4272
{
4273
int err;
4274
unsigned long input;
4275
struct hstate *h = kobj_to_hstate(kobj, NULL);
4276
4277
if (hstate_is_gigantic(h))
4278
return -EINVAL;
4279
4280
err = kstrtoul(buf, 10, &input);
4281
if (err)
4282
return err;
4283
4284
spin_lock_irq(&hugetlb_lock);
4285
h->nr_overcommit_huge_pages = input;
4286
spin_unlock_irq(&hugetlb_lock);
4287
4288
return count;
4289
}
4290
HSTATE_ATTR(nr_overcommit_hugepages);
4291
4292
static ssize_t free_hugepages_show(struct kobject *kobj,
4293
struct kobj_attribute *attr, char *buf)
4294
{
4295
struct hstate *h;
4296
unsigned long free_huge_pages;
4297
int nid;
4298
4299
h = kobj_to_hstate(kobj, &nid);
4300
if (nid == NUMA_NO_NODE)
4301
free_huge_pages = h->free_huge_pages;
4302
else
4303
free_huge_pages = h->free_huge_pages_node[nid];
4304
4305
return sysfs_emit(buf, "%lu\n", free_huge_pages);
4306
}
4307
HSTATE_ATTR_RO(free_hugepages);
4308
4309
static ssize_t resv_hugepages_show(struct kobject *kobj,
4310
struct kobj_attribute *attr, char *buf)
4311
{
4312
struct hstate *h = kobj_to_hstate(kobj, NULL);
4313
return sysfs_emit(buf, "%lu\n", h->resv_huge_pages);
4314
}
4315
HSTATE_ATTR_RO(resv_hugepages);
4316
4317
static ssize_t surplus_hugepages_show(struct kobject *kobj,
4318
struct kobj_attribute *attr, char *buf)
4319
{
4320
struct hstate *h;
4321
unsigned long surplus_huge_pages;
4322
int nid;
4323
4324
h = kobj_to_hstate(kobj, &nid);
4325
if (nid == NUMA_NO_NODE)
4326
surplus_huge_pages = h->surplus_huge_pages;
4327
else
4328
surplus_huge_pages = h->surplus_huge_pages_node[nid];
4329
4330
return sysfs_emit(buf, "%lu\n", surplus_huge_pages);
4331
}
4332
HSTATE_ATTR_RO(surplus_hugepages);
4333
4334
static ssize_t demote_store(struct kobject *kobj,
4335
struct kobj_attribute *attr, const char *buf, size_t len)
4336
{
4337
unsigned long nr_demote;
4338
unsigned long nr_available;
4339
nodemask_t nodes_allowed, *n_mask;
4340
struct hstate *h;
4341
int err;
4342
int nid;
4343
4344
err = kstrtoul(buf, 10, &nr_demote);
4345
if (err)
4346
return err;
4347
h = kobj_to_hstate(kobj, &nid);
4348
4349
if (nid != NUMA_NO_NODE) {
4350
init_nodemask_of_node(&nodes_allowed, nid);
4351
n_mask = &nodes_allowed;
4352
} else {
4353
n_mask = &node_states[N_MEMORY];
4354
}
4355
4356
/* Synchronize with other sysfs operations modifying huge pages */
4357
mutex_lock(&h->resize_lock);
4358
spin_lock_irq(&hugetlb_lock);
4359
4360
while (nr_demote) {
4361
long rc;
4362
4363
/*
4364
* Check for available pages to demote each time thorough the
4365
* loop as demote_pool_huge_page will drop hugetlb_lock.
4366
*/
4367
if (nid != NUMA_NO_NODE)
4368
nr_available = h->free_huge_pages_node[nid];
4369
else
4370
nr_available = h->free_huge_pages;
4371
nr_available -= h->resv_huge_pages;
4372
if (!nr_available)
4373
break;
4374
4375
rc = demote_pool_huge_page(h, n_mask, nr_demote);
4376
if (rc < 0) {
4377
err = rc;
4378
break;
4379
}
4380
4381
nr_demote -= rc;
4382
}
4383
4384
spin_unlock_irq(&hugetlb_lock);
4385
mutex_unlock(&h->resize_lock);
4386
4387
if (err)
4388
return err;
4389
return len;
4390
}
4391
HSTATE_ATTR_WO(demote);
4392
4393
static ssize_t demote_size_show(struct kobject *kobj,
4394
struct kobj_attribute *attr, char *buf)
4395
{
4396
struct hstate *h = kobj_to_hstate(kobj, NULL);
4397
unsigned long demote_size = (PAGE_SIZE << h->demote_order) / SZ_1K;
4398
4399
return sysfs_emit(buf, "%lukB\n", demote_size);
4400
}
4401
4402
static ssize_t demote_size_store(struct kobject *kobj,
4403
struct kobj_attribute *attr,
4404
const char *buf, size_t count)
4405
{
4406
struct hstate *h, *demote_hstate;
4407
unsigned long demote_size;
4408
unsigned int demote_order;
4409
4410
demote_size = (unsigned long)memparse(buf, NULL);
4411
4412
demote_hstate = size_to_hstate(demote_size);
4413
if (!demote_hstate)
4414
return -EINVAL;
4415
demote_order = demote_hstate->order;
4416
if (demote_order < HUGETLB_PAGE_ORDER)
4417
return -EINVAL;
4418
4419
/* demote order must be smaller than hstate order */
4420
h = kobj_to_hstate(kobj, NULL);
4421
if (demote_order >= h->order)
4422
return -EINVAL;
4423
4424
/* resize_lock synchronizes access to demote size and writes */
4425
mutex_lock(&h->resize_lock);
4426
h->demote_order = demote_order;
4427
mutex_unlock(&h->resize_lock);
4428
4429
return count;
4430
}
4431
HSTATE_ATTR(demote_size);
4432
4433
static struct attribute *hstate_attrs[] = {
4434
&nr_hugepages_attr.attr,
4435
&nr_overcommit_hugepages_attr.attr,
4436
&free_hugepages_attr.attr,
4437
&resv_hugepages_attr.attr,
4438
&surplus_hugepages_attr.attr,
4439
#ifdef CONFIG_NUMA
4440
&nr_hugepages_mempolicy_attr.attr,
4441
#endif
4442
NULL,
4443
};
4444
4445
static const struct attribute_group hstate_attr_group = {
4446
.attrs = hstate_attrs,
4447
};
4448
4449
static struct attribute *hstate_demote_attrs[] = {
4450
&demote_size_attr.attr,
4451
&demote_attr.attr,
4452
NULL,
4453
};
4454
4455
static const struct attribute_group hstate_demote_attr_group = {
4456
.attrs = hstate_demote_attrs,
4457
};
4458
4459
static int hugetlb_sysfs_add_hstate(struct hstate *h, struct kobject *parent,
4460
struct kobject **hstate_kobjs,
4461
const struct attribute_group *hstate_attr_group)
4462
{
4463
int retval;
4464
int hi = hstate_index(h);
4465
4466
hstate_kobjs[hi] = kobject_create_and_add(h->name, parent);
4467
if (!hstate_kobjs[hi])
4468
return -ENOMEM;
4469
4470
retval = sysfs_create_group(hstate_kobjs[hi], hstate_attr_group);
4471
if (retval) {
4472
kobject_put(hstate_kobjs[hi]);
4473
hstate_kobjs[hi] = NULL;
4474
return retval;
4475
}
4476
4477
if (h->demote_order) {
4478
retval = sysfs_create_group(hstate_kobjs[hi],
4479
&hstate_demote_attr_group);
4480
if (retval) {
4481
pr_warn("HugeTLB unable to create demote interfaces for %s\n", h->name);
4482
sysfs_remove_group(hstate_kobjs[hi], hstate_attr_group);
4483
kobject_put(hstate_kobjs[hi]);
4484
hstate_kobjs[hi] = NULL;
4485
return retval;
4486
}
4487
}
4488
4489
return 0;
4490
}
4491
4492
#ifdef CONFIG_NUMA
4493
static bool hugetlb_sysfs_initialized __ro_after_init;
4494
4495
/*
4496
* node_hstate/s - associate per node hstate attributes, via their kobjects,
4497
* with node devices in node_devices[] using a parallel array. The array
4498
* index of a node device or _hstate == node id.
4499
* This is here to avoid any static dependency of the node device driver, in
4500
* the base kernel, on the hugetlb module.
4501
*/
4502
struct node_hstate {
4503
struct kobject *hugepages_kobj;
4504
struct kobject *hstate_kobjs[HUGE_MAX_HSTATE];
4505
};
4506
static struct node_hstate node_hstates[MAX_NUMNODES];
4507
4508
/*
4509
* A subset of global hstate attributes for node devices
4510
*/
4511
static struct attribute *per_node_hstate_attrs[] = {
4512
&nr_hugepages_attr.attr,
4513
&free_hugepages_attr.attr,
4514
&surplus_hugepages_attr.attr,
4515
NULL,
4516
};
4517
4518
static const struct attribute_group per_node_hstate_attr_group = {
4519
.attrs = per_node_hstate_attrs,
4520
};
4521
4522
/*
4523
* kobj_to_node_hstate - lookup global hstate for node device hstate attr kobj.
4524
* Returns node id via non-NULL nidp.
4525
*/
4526
static struct hstate *kobj_to_node_hstate(struct kobject *kobj, int *nidp)
4527
{
4528
int nid;
4529
4530
for (nid = 0; nid < nr_node_ids; nid++) {
4531
struct node_hstate *nhs = &node_hstates[nid];
4532
int i;
4533
for (i = 0; i < HUGE_MAX_HSTATE; i++)
4534
if (nhs->hstate_kobjs[i] == kobj) {
4535
if (nidp)
4536
*nidp = nid;
4537
return &hstates[i];
4538
}
4539
}
4540
4541
BUG();
4542
return NULL;
4543
}
4544
4545
/*
4546
* Unregister hstate attributes from a single node device.
4547
* No-op if no hstate attributes attached.
4548
*/
4549
void hugetlb_unregister_node(struct node *node)
4550
{
4551
struct hstate *h;
4552
struct node_hstate *nhs = &node_hstates[node->dev.id];
4553
4554
if (!nhs->hugepages_kobj)
4555
return; /* no hstate attributes */
4556
4557
for_each_hstate(h) {
4558
int idx = hstate_index(h);
4559
struct kobject *hstate_kobj = nhs->hstate_kobjs[idx];
4560
4561
if (!hstate_kobj)
4562
continue;
4563
if (h->demote_order)
4564
sysfs_remove_group(hstate_kobj, &hstate_demote_attr_group);
4565
sysfs_remove_group(hstate_kobj, &per_node_hstate_attr_group);
4566
kobject_put(hstate_kobj);
4567
nhs->hstate_kobjs[idx] = NULL;
4568
}
4569
4570
kobject_put(nhs->hugepages_kobj);
4571
nhs->hugepages_kobj = NULL;
4572
}
4573
4574
4575
/*
4576
* Register hstate attributes for a single node device.
4577
* No-op if attributes already registered.
4578
*/
4579
void hugetlb_register_node(struct node *node)
4580
{
4581
struct hstate *h;
4582
struct node_hstate *nhs = &node_hstates[node->dev.id];
4583
int err;
4584
4585
if (!hugetlb_sysfs_initialized)
4586
return;
4587
4588
if (nhs->hugepages_kobj)
4589
return; /* already allocated */
4590
4591
nhs->hugepages_kobj = kobject_create_and_add("hugepages",
4592
&node->dev.kobj);
4593
if (!nhs->hugepages_kobj)
4594
return;
4595
4596
for_each_hstate(h) {
4597
err = hugetlb_sysfs_add_hstate(h, nhs->hugepages_kobj,
4598
nhs->hstate_kobjs,
4599
&per_node_hstate_attr_group);
4600
if (err) {
4601
pr_err("HugeTLB: Unable to add hstate %s for node %d\n",
4602
h->name, node->dev.id);
4603
hugetlb_unregister_node(node);
4604
break;
4605
}
4606
}
4607
}
4608
4609
/*
4610
* hugetlb init time: register hstate attributes for all registered node
4611
* devices of nodes that have memory. All on-line nodes should have
4612
* registered their associated device by this time.
4613
*/
4614
static void __init hugetlb_register_all_nodes(void)
4615
{
4616
int nid;
4617
4618
for_each_online_node(nid)
4619
hugetlb_register_node(node_devices[nid]);
4620
}
4621
#else /* !CONFIG_NUMA */
4622
4623
static struct hstate *kobj_to_node_hstate(struct kobject *kobj, int *nidp)
4624
{
4625
BUG();
4626
if (nidp)
4627
*nidp = -1;
4628
return NULL;
4629
}
4630
4631
static void hugetlb_register_all_nodes(void) { }
4632
4633
#endif
4634
4635
static void __init hugetlb_sysfs_init(void)
4636
{
4637
struct hstate *h;
4638
int err;
4639
4640
hugepages_kobj = kobject_create_and_add("hugepages", mm_kobj);
4641
if (!hugepages_kobj)
4642
return;
4643
4644
for_each_hstate(h) {
4645
err = hugetlb_sysfs_add_hstate(h, hugepages_kobj,
4646
hstate_kobjs, &hstate_attr_group);
4647
if (err)
4648
pr_err("HugeTLB: Unable to add hstate %s\n", h->name);
4649
}
4650
4651
#ifdef CONFIG_NUMA
4652
hugetlb_sysfs_initialized = true;
4653
#endif
4654
hugetlb_register_all_nodes();
4655
}
4656
4657
#ifdef CONFIG_SYSCTL
4658
static void hugetlb_sysctl_init(void);
4659
#else
4660
static inline void hugetlb_sysctl_init(void) { }
4661
#endif
4662
4663
static int __init hugetlb_init(void)
4664
{
4665
int i;
4666
4667
BUILD_BUG_ON(sizeof_field(struct page, private) * BITS_PER_BYTE <
4668
__NR_HPAGEFLAGS);
4669
BUILD_BUG_ON_INVALID(HUGETLB_PAGE_ORDER > MAX_FOLIO_ORDER);
4670
4671
if (!hugepages_supported()) {
4672
if (hugetlb_max_hstate || default_hstate_max_huge_pages)
4673
pr_warn("HugeTLB: huge pages not supported, ignoring associated command-line parameters\n");
4674
return 0;
4675
}
4676
4677
/*
4678
* Make sure HPAGE_SIZE (HUGETLB_PAGE_ORDER) hstate exists. Some
4679
* architectures depend on setup being done here.
4680
*/
4681
hugetlb_add_hstate(HUGETLB_PAGE_ORDER);
4682
if (!parsed_default_hugepagesz) {
4683
/*
4684
* If we did not parse a default huge page size, set
4685
* default_hstate_idx to HPAGE_SIZE hstate. And, if the
4686
* number of huge pages for this default size was implicitly
4687
* specified, set that here as well.
4688
* Note that the implicit setting will overwrite an explicit
4689
* setting. A warning will be printed in this case.
4690
*/
4691
default_hstate_idx = hstate_index(size_to_hstate(HPAGE_SIZE));
4692
if (default_hstate_max_huge_pages) {
4693
if (default_hstate.max_huge_pages) {
4694
char buf[32];
4695
4696
string_get_size(huge_page_size(&default_hstate),
4697
1, STRING_UNITS_2, buf, 32);
4698
pr_warn("HugeTLB: Ignoring hugepages=%lu associated with %s page size\n",
4699
default_hstate.max_huge_pages, buf);
4700
pr_warn("HugeTLB: Using hugepages=%lu for number of default huge pages\n",
4701
default_hstate_max_huge_pages);
4702
}
4703
default_hstate.max_huge_pages =
4704
default_hstate_max_huge_pages;
4705
4706
for_each_online_node(i)
4707
default_hstate.max_huge_pages_node[i] =
4708
default_hugepages_in_node[i];
4709
}
4710
}
4711
4712
hugetlb_cma_check();
4713
hugetlb_init_hstates();
4714
gather_bootmem_prealloc();
4715
report_hugepages();
4716
4717
hugetlb_sysfs_init();
4718
hugetlb_cgroup_file_init();
4719
hugetlb_sysctl_init();
4720
4721
#ifdef CONFIG_SMP
4722
num_fault_mutexes = roundup_pow_of_two(8 * num_possible_cpus());
4723
#else
4724
num_fault_mutexes = 1;
4725
#endif
4726
hugetlb_fault_mutex_table =
4727
kmalloc_array(num_fault_mutexes, sizeof(struct mutex),
4728
GFP_KERNEL);
4729
BUG_ON(!hugetlb_fault_mutex_table);
4730
4731
for (i = 0; i < num_fault_mutexes; i++)
4732
mutex_init(&hugetlb_fault_mutex_table[i]);
4733
return 0;
4734
}
4735
subsys_initcall(hugetlb_init);
4736
4737
/* Overwritten by architectures with more huge page sizes */
4738
bool __init __attribute((weak)) arch_hugetlb_valid_size(unsigned long size)
4739
{
4740
return size == HPAGE_SIZE;
4741
}
4742
4743
void __init hugetlb_add_hstate(unsigned int order)
4744
{
4745
struct hstate *h;
4746
unsigned long i;
4747
4748
if (size_to_hstate(PAGE_SIZE << order)) {
4749
return;
4750
}
4751
BUG_ON(hugetlb_max_hstate >= HUGE_MAX_HSTATE);
4752
BUG_ON(order < order_base_2(__NR_USED_SUBPAGE));
4753
WARN_ON(order > MAX_FOLIO_ORDER);
4754
h = &hstates[hugetlb_max_hstate++];
4755
__mutex_init(&h->resize_lock, "resize mutex", &h->resize_key);
4756
h->order = order;
4757
h->mask = ~(huge_page_size(h) - 1);
4758
for (i = 0; i < MAX_NUMNODES; ++i)
4759
INIT_LIST_HEAD(&h->hugepage_freelists[i]);
4760
INIT_LIST_HEAD(&h->hugepage_activelist);
4761
snprintf(h->name, HSTATE_NAME_LEN, "hugepages-%lukB",
4762
huge_page_size(h)/SZ_1K);
4763
4764
parsed_hstate = h;
4765
}
4766
4767
bool __init __weak hugetlb_node_alloc_supported(void)
4768
{
4769
return true;
4770
}
4771
4772
static void __init hugepages_clear_pages_in_node(void)
4773
{
4774
if (!hugetlb_max_hstate) {
4775
default_hstate_max_huge_pages = 0;
4776
memset(default_hugepages_in_node, 0,
4777
sizeof(default_hugepages_in_node));
4778
} else {
4779
parsed_hstate->max_huge_pages = 0;
4780
memset(parsed_hstate->max_huge_pages_node, 0,
4781
sizeof(parsed_hstate->max_huge_pages_node));
4782
}
4783
}
4784
4785
static __init int hugetlb_add_param(char *s, int (*setup)(char *))
4786
{
4787
size_t len;
4788
char *p;
4789
4790
if (hugetlb_param_index >= HUGE_MAX_CMDLINE_ARGS)
4791
return -EINVAL;
4792
4793
len = strlen(s) + 1;
4794
if (len + hstate_cmdline_index > sizeof(hstate_cmdline_buf))
4795
return -EINVAL;
4796
4797
p = &hstate_cmdline_buf[hstate_cmdline_index];
4798
memcpy(p, s, len);
4799
hstate_cmdline_index += len;
4800
4801
hugetlb_params[hugetlb_param_index].val = p;
4802
hugetlb_params[hugetlb_param_index].setup = setup;
4803
4804
hugetlb_param_index++;
4805
4806
return 0;
4807
}
4808
4809
static __init void hugetlb_parse_params(void)
4810
{
4811
int i;
4812
struct hugetlb_cmdline *hcp;
4813
4814
for (i = 0; i < hugetlb_param_index; i++) {
4815
hcp = &hugetlb_params[i];
4816
4817
hcp->setup(hcp->val);
4818
}
4819
4820
hugetlb_cma_validate_params();
4821
}
4822
4823
/*
4824
* hugepages command line processing
4825
* hugepages normally follows a valid hugepagsz or default_hugepagsz
4826
* specification. If not, ignore the hugepages value. hugepages can also
4827
* be the first huge page command line option in which case it implicitly
4828
* specifies the number of huge pages for the default size.
4829
*/
4830
static int __init hugepages_setup(char *s)
4831
{
4832
unsigned long *mhp;
4833
static unsigned long *last_mhp;
4834
int node = NUMA_NO_NODE;
4835
int count;
4836
unsigned long tmp;
4837
char *p = s;
4838
4839
if (!parsed_valid_hugepagesz) {
4840
pr_warn("HugeTLB: hugepages=%s does not follow a valid hugepagesz, ignoring\n", s);
4841
parsed_valid_hugepagesz = true;
4842
return -EINVAL;
4843
}
4844
4845
/*
4846
* !hugetlb_max_hstate means we haven't parsed a hugepagesz= parameter
4847
* yet, so this hugepages= parameter goes to the "default hstate".
4848
* Otherwise, it goes with the previously parsed hugepagesz or
4849
* default_hugepagesz.
4850
*/
4851
else if (!hugetlb_max_hstate)
4852
mhp = &default_hstate_max_huge_pages;
4853
else
4854
mhp = &parsed_hstate->max_huge_pages;
4855
4856
if (mhp == last_mhp) {
4857
pr_warn("HugeTLB: hugepages= specified twice without interleaving hugepagesz=, ignoring hugepages=%s\n", s);
4858
return 1;
4859
}
4860
4861
while (*p) {
4862
count = 0;
4863
if (sscanf(p, "%lu%n", &tmp, &count) != 1)
4864
goto invalid;
4865
/* Parameter is node format */
4866
if (p[count] == ':') {
4867
if (!hugetlb_node_alloc_supported()) {
4868
pr_warn("HugeTLB: architecture can't support node specific alloc, ignoring!\n");
4869
return 1;
4870
}
4871
if (tmp >= MAX_NUMNODES || !node_online(tmp))
4872
goto invalid;
4873
node = array_index_nospec(tmp, MAX_NUMNODES);
4874
p += count + 1;
4875
/* Parse hugepages */
4876
if (sscanf(p, "%lu%n", &tmp, &count) != 1)
4877
goto invalid;
4878
if (!hugetlb_max_hstate)
4879
default_hugepages_in_node[node] = tmp;
4880
else
4881
parsed_hstate->max_huge_pages_node[node] = tmp;
4882
*mhp += tmp;
4883
/* Go to parse next node*/
4884
if (p[count] == ',')
4885
p += count + 1;
4886
else
4887
break;
4888
} else {
4889
if (p != s)
4890
goto invalid;
4891
*mhp = tmp;
4892
break;
4893
}
4894
}
4895
4896
last_mhp = mhp;
4897
4898
return 0;
4899
4900
invalid:
4901
pr_warn("HugeTLB: Invalid hugepages parameter %s\n", p);
4902
hugepages_clear_pages_in_node();
4903
return -EINVAL;
4904
}
4905
hugetlb_early_param("hugepages", hugepages_setup);
4906
4907
/*
4908
* hugepagesz command line processing
4909
* A specific huge page size can only be specified once with hugepagesz.
4910
* hugepagesz is followed by hugepages on the command line. The global
4911
* variable 'parsed_valid_hugepagesz' is used to determine if prior
4912
* hugepagesz argument was valid.
4913
*/
4914
static int __init hugepagesz_setup(char *s)
4915
{
4916
unsigned long size;
4917
struct hstate *h;
4918
4919
parsed_valid_hugepagesz = false;
4920
size = (unsigned long)memparse(s, NULL);
4921
4922
if (!arch_hugetlb_valid_size(size)) {
4923
pr_err("HugeTLB: unsupported hugepagesz=%s\n", s);
4924
return -EINVAL;
4925
}
4926
4927
h = size_to_hstate(size);
4928
if (h) {
4929
/*
4930
* hstate for this size already exists. This is normally
4931
* an error, but is allowed if the existing hstate is the
4932
* default hstate. More specifically, it is only allowed if
4933
* the number of huge pages for the default hstate was not
4934
* previously specified.
4935
*/
4936
if (!parsed_default_hugepagesz || h != &default_hstate ||
4937
default_hstate.max_huge_pages) {
4938
pr_warn("HugeTLB: hugepagesz=%s specified twice, ignoring\n", s);
4939
return -EINVAL;
4940
}
4941
4942
/*
4943
* No need to call hugetlb_add_hstate() as hstate already
4944
* exists. But, do set parsed_hstate so that a following
4945
* hugepages= parameter will be applied to this hstate.
4946
*/
4947
parsed_hstate = h;
4948
parsed_valid_hugepagesz = true;
4949
return 0;
4950
}
4951
4952
hugetlb_add_hstate(ilog2(size) - PAGE_SHIFT);
4953
parsed_valid_hugepagesz = true;
4954
return 0;
4955
}
4956
hugetlb_early_param("hugepagesz", hugepagesz_setup);
4957
4958
/*
4959
* default_hugepagesz command line input
4960
* Only one instance of default_hugepagesz allowed on command line.
4961
*/
4962
static int __init default_hugepagesz_setup(char *s)
4963
{
4964
unsigned long size;
4965
int i;
4966
4967
parsed_valid_hugepagesz = false;
4968
if (parsed_default_hugepagesz) {
4969
pr_err("HugeTLB: default_hugepagesz previously specified, ignoring %s\n", s);
4970
return -EINVAL;
4971
}
4972
4973
size = (unsigned long)memparse(s, NULL);
4974
4975
if (!arch_hugetlb_valid_size(size)) {
4976
pr_err("HugeTLB: unsupported default_hugepagesz=%s\n", s);
4977
return -EINVAL;
4978
}
4979
4980
hugetlb_add_hstate(ilog2(size) - PAGE_SHIFT);
4981
parsed_valid_hugepagesz = true;
4982
parsed_default_hugepagesz = true;
4983
default_hstate_idx = hstate_index(size_to_hstate(size));
4984
4985
/*
4986
* The number of default huge pages (for this size) could have been
4987
* specified as the first hugetlb parameter: hugepages=X. If so,
4988
* then default_hstate_max_huge_pages is set. If the default huge
4989
* page size is gigantic (> MAX_PAGE_ORDER), then the pages must be
4990
* allocated here from bootmem allocator.
4991
*/
4992
if (default_hstate_max_huge_pages) {
4993
default_hstate.max_huge_pages = default_hstate_max_huge_pages;
4994
/*
4995
* Since this is an early parameter, we can't check
4996
* NUMA node state yet, so loop through MAX_NUMNODES.
4997
*/
4998
for (i = 0; i < MAX_NUMNODES; i++) {
4999
if (default_hugepages_in_node[i] != 0)
5000
default_hstate.max_huge_pages_node[i] =
5001
default_hugepages_in_node[i];
5002
}
5003
default_hstate_max_huge_pages = 0;
5004
}
5005
5006
return 0;
5007
}
5008
hugetlb_early_param("default_hugepagesz", default_hugepagesz_setup);
5009
5010
void __init hugetlb_bootmem_set_nodes(void)
5011
{
5012
int i, nid;
5013
unsigned long start_pfn, end_pfn;
5014
5015
if (!nodes_empty(hugetlb_bootmem_nodes))
5016
return;
5017
5018
for_each_mem_pfn_range(i, MAX_NUMNODES, &start_pfn, &end_pfn, &nid) {
5019
if (end_pfn > start_pfn)
5020
node_set(nid, hugetlb_bootmem_nodes);
5021
}
5022
}
5023
5024
static bool __hugetlb_bootmem_allocated __initdata;
5025
5026
bool __init hugetlb_bootmem_allocated(void)
5027
{
5028
return __hugetlb_bootmem_allocated;
5029
}
5030
5031
void __init hugetlb_bootmem_alloc(void)
5032
{
5033
struct hstate *h;
5034
int i;
5035
5036
if (__hugetlb_bootmem_allocated)
5037
return;
5038
5039
hugetlb_bootmem_set_nodes();
5040
5041
for (i = 0; i < MAX_NUMNODES; i++)
5042
INIT_LIST_HEAD(&huge_boot_pages[i]);
5043
5044
hugetlb_parse_params();
5045
5046
for_each_hstate(h) {
5047
h->next_nid_to_alloc = first_online_node;
5048
5049
if (hstate_is_gigantic(h))
5050
hugetlb_hstate_alloc_pages(h);
5051
}
5052
5053
__hugetlb_bootmem_allocated = true;
5054
}
5055
5056
/*
5057
* hugepage_alloc_threads command line parsing.
5058
*
5059
* When set, use this specific number of threads for the boot
5060
* allocation of hugepages.
5061
*/
5062
static int __init hugepage_alloc_threads_setup(char *s)
5063
{
5064
unsigned long allocation_threads;
5065
5066
if (kstrtoul(s, 0, &allocation_threads) != 0)
5067
return 1;
5068
5069
if (allocation_threads == 0)
5070
return 1;
5071
5072
hugepage_allocation_threads = allocation_threads;
5073
5074
return 1;
5075
}
5076
__setup("hugepage_alloc_threads=", hugepage_alloc_threads_setup);
5077
5078
static unsigned int allowed_mems_nr(struct hstate *h)
5079
{
5080
int node;
5081
unsigned int nr = 0;
5082
nodemask_t *mbind_nodemask;
5083
unsigned int *array = h->free_huge_pages_node;
5084
gfp_t gfp_mask = htlb_alloc_mask(h);
5085
5086
mbind_nodemask = policy_mbind_nodemask(gfp_mask);
5087
for_each_node_mask(node, cpuset_current_mems_allowed) {
5088
if (!mbind_nodemask || node_isset(node, *mbind_nodemask))
5089
nr += array[node];
5090
}
5091
5092
return nr;
5093
}
5094
5095
#ifdef CONFIG_SYSCTL
5096
static int proc_hugetlb_doulongvec_minmax(const struct ctl_table *table, int write,
5097
void *buffer, size_t *length,
5098
loff_t *ppos, unsigned long *out)
5099
{
5100
struct ctl_table dup_table;
5101
5102
/*
5103
* In order to avoid races with __do_proc_doulongvec_minmax(), we
5104
* can duplicate the @table and alter the duplicate of it.
5105
*/
5106
dup_table = *table;
5107
dup_table.data = out;
5108
5109
return proc_doulongvec_minmax(&dup_table, write, buffer, length, ppos);
5110
}
5111
5112
static int hugetlb_sysctl_handler_common(bool obey_mempolicy,
5113
const struct ctl_table *table, int write,
5114
void *buffer, size_t *length, loff_t *ppos)
5115
{
5116
struct hstate *h = &default_hstate;
5117
unsigned long tmp = h->max_huge_pages;
5118
int ret;
5119
5120
if (!hugepages_supported())
5121
return -EOPNOTSUPP;
5122
5123
ret = proc_hugetlb_doulongvec_minmax(table, write, buffer, length, ppos,
5124
&tmp);
5125
if (ret)
5126
goto out;
5127
5128
if (write)
5129
ret = __nr_hugepages_store_common(obey_mempolicy, h,
5130
NUMA_NO_NODE, tmp, *length);
5131
out:
5132
return ret;
5133
}
5134
5135
static int hugetlb_sysctl_handler(const struct ctl_table *table, int write,
5136
void *buffer, size_t *length, loff_t *ppos)
5137
{
5138
5139
return hugetlb_sysctl_handler_common(false, table, write,
5140
buffer, length, ppos);
5141
}
5142
5143
#ifdef CONFIG_NUMA
5144
static int hugetlb_mempolicy_sysctl_handler(const struct ctl_table *table, int write,
5145
void *buffer, size_t *length, loff_t *ppos)
5146
{
5147
return hugetlb_sysctl_handler_common(true, table, write,
5148
buffer, length, ppos);
5149
}
5150
#endif /* CONFIG_NUMA */
5151
5152
static int hugetlb_overcommit_handler(const struct ctl_table *table, int write,
5153
void *buffer, size_t *length, loff_t *ppos)
5154
{
5155
struct hstate *h = &default_hstate;
5156
unsigned long tmp;
5157
int ret;
5158
5159
if (!hugepages_supported())
5160
return -EOPNOTSUPP;
5161
5162
tmp = h->nr_overcommit_huge_pages;
5163
5164
if (write && hstate_is_gigantic(h))
5165
return -EINVAL;
5166
5167
ret = proc_hugetlb_doulongvec_minmax(table, write, buffer, length, ppos,
5168
&tmp);
5169
if (ret)
5170
goto out;
5171
5172
if (write) {
5173
spin_lock_irq(&hugetlb_lock);
5174
h->nr_overcommit_huge_pages = tmp;
5175
spin_unlock_irq(&hugetlb_lock);
5176
}
5177
out:
5178
return ret;
5179
}
5180
5181
static const struct ctl_table hugetlb_table[] = {
5182
{
5183
.procname = "nr_hugepages",
5184
.data = NULL,
5185
.maxlen = sizeof(unsigned long),
5186
.mode = 0644,
5187
.proc_handler = hugetlb_sysctl_handler,
5188
},
5189
#ifdef CONFIG_NUMA
5190
{
5191
.procname = "nr_hugepages_mempolicy",
5192
.data = NULL,
5193
.maxlen = sizeof(unsigned long),
5194
.mode = 0644,
5195
.proc_handler = &hugetlb_mempolicy_sysctl_handler,
5196
},
5197
#endif
5198
{
5199
.procname = "hugetlb_shm_group",
5200
.data = &sysctl_hugetlb_shm_group,
5201
.maxlen = sizeof(gid_t),
5202
.mode = 0644,
5203
.proc_handler = proc_dointvec,
5204
},
5205
{
5206
.procname = "nr_overcommit_hugepages",
5207
.data = NULL,
5208
.maxlen = sizeof(unsigned long),
5209
.mode = 0644,
5210
.proc_handler = hugetlb_overcommit_handler,
5211
},
5212
};
5213
5214
static void __init hugetlb_sysctl_init(void)
5215
{
5216
register_sysctl_init("vm", hugetlb_table);
5217
}
5218
#endif /* CONFIG_SYSCTL */
5219
5220
void hugetlb_report_meminfo(struct seq_file *m)
5221
{
5222
struct hstate *h;
5223
unsigned long total = 0;
5224
5225
if (!hugepages_supported())
5226
return;
5227
5228
for_each_hstate(h) {
5229
unsigned long count = h->nr_huge_pages;
5230
5231
total += huge_page_size(h) * count;
5232
5233
if (h == &default_hstate)
5234
seq_printf(m,
5235
"HugePages_Total: %5lu\n"
5236
"HugePages_Free: %5lu\n"
5237
"HugePages_Rsvd: %5lu\n"
5238
"HugePages_Surp: %5lu\n"
5239
"Hugepagesize: %8lu kB\n",
5240
count,
5241
h->free_huge_pages,
5242
h->resv_huge_pages,
5243
h->surplus_huge_pages,
5244
huge_page_size(h) / SZ_1K);
5245
}
5246
5247
seq_printf(m, "Hugetlb: %8lu kB\n", total / SZ_1K);
5248
}
5249
5250
int hugetlb_report_node_meminfo(char *buf, int len, int nid)
5251
{
5252
struct hstate *h = &default_hstate;
5253
5254
if (!hugepages_supported())
5255
return 0;
5256
5257
return sysfs_emit_at(buf, len,
5258
"Node %d HugePages_Total: %5u\n"
5259
"Node %d HugePages_Free: %5u\n"
5260
"Node %d HugePages_Surp: %5u\n",
5261
nid, h->nr_huge_pages_node[nid],
5262
nid, h->free_huge_pages_node[nid],
5263
nid, h->surplus_huge_pages_node[nid]);
5264
}
5265
5266
void hugetlb_show_meminfo_node(int nid)
5267
{
5268
struct hstate *h;
5269
5270
if (!hugepages_supported())
5271
return;
5272
5273
for_each_hstate(h)
5274
printk("Node %d hugepages_total=%u hugepages_free=%u hugepages_surp=%u hugepages_size=%lukB\n",
5275
nid,
5276
h->nr_huge_pages_node[nid],
5277
h->free_huge_pages_node[nid],
5278
h->surplus_huge_pages_node[nid],
5279
huge_page_size(h) / SZ_1K);
5280
}
5281
5282
void hugetlb_report_usage(struct seq_file *m, struct mm_struct *mm)
5283
{
5284
seq_printf(m, "HugetlbPages:\t%8lu kB\n",
5285
K(atomic_long_read(&mm->hugetlb_usage)));
5286
}
5287
5288
/* Return the number pages of memory we physically have, in PAGE_SIZE units. */
5289
unsigned long hugetlb_total_pages(void)
5290
{
5291
struct hstate *h;
5292
unsigned long nr_total_pages = 0;
5293
5294
for_each_hstate(h)
5295
nr_total_pages += h->nr_huge_pages * pages_per_huge_page(h);
5296
return nr_total_pages;
5297
}
5298
5299
static int hugetlb_acct_memory(struct hstate *h, long delta)
5300
{
5301
int ret = -ENOMEM;
5302
5303
if (!delta)
5304
return 0;
5305
5306
spin_lock_irq(&hugetlb_lock);
5307
/*
5308
* When cpuset is configured, it breaks the strict hugetlb page
5309
* reservation as the accounting is done on a global variable. Such
5310
* reservation is completely rubbish in the presence of cpuset because
5311
* the reservation is not checked against page availability for the
5312
* current cpuset. Application can still potentially OOM'ed by kernel
5313
* with lack of free htlb page in cpuset that the task is in.
5314
* Attempt to enforce strict accounting with cpuset is almost
5315
* impossible (or too ugly) because cpuset is too fluid that
5316
* task or memory node can be dynamically moved between cpusets.
5317
*
5318
* The change of semantics for shared hugetlb mapping with cpuset is
5319
* undesirable. However, in order to preserve some of the semantics,
5320
* we fall back to check against current free page availability as
5321
* a best attempt and hopefully to minimize the impact of changing
5322
* semantics that cpuset has.
5323
*
5324
* Apart from cpuset, we also have memory policy mechanism that
5325
* also determines from which node the kernel will allocate memory
5326
* in a NUMA system. So similar to cpuset, we also should consider
5327
* the memory policy of the current task. Similar to the description
5328
* above.
5329
*/
5330
if (delta > 0) {
5331
if (gather_surplus_pages(h, delta) < 0)
5332
goto out;
5333
5334
if (delta > allowed_mems_nr(h)) {
5335
return_unused_surplus_pages(h, delta);
5336
goto out;
5337
}
5338
}
5339
5340
ret = 0;
5341
if (delta < 0)
5342
return_unused_surplus_pages(h, (unsigned long) -delta);
5343
5344
out:
5345
spin_unlock_irq(&hugetlb_lock);
5346
return ret;
5347
}
5348
5349
static void hugetlb_vm_op_open(struct vm_area_struct *vma)
5350
{
5351
struct resv_map *resv = vma_resv_map(vma);
5352
5353
/*
5354
* HPAGE_RESV_OWNER indicates a private mapping.
5355
* This new VMA should share its siblings reservation map if present.
5356
* The VMA will only ever have a valid reservation map pointer where
5357
* it is being copied for another still existing VMA. As that VMA
5358
* has a reference to the reservation map it cannot disappear until
5359
* after this open call completes. It is therefore safe to take a
5360
* new reference here without additional locking.
5361
*/
5362
if (resv && is_vma_resv_set(vma, HPAGE_RESV_OWNER)) {
5363
resv_map_dup_hugetlb_cgroup_uncharge_info(resv);
5364
kref_get(&resv->refs);
5365
}
5366
5367
/*
5368
* vma_lock structure for sharable mappings is vma specific.
5369
* Clear old pointer (if copied via vm_area_dup) and allocate
5370
* new structure. Before clearing, make sure vma_lock is not
5371
* for this vma.
5372
*/
5373
if (vma->vm_flags & VM_MAYSHARE) {
5374
struct hugetlb_vma_lock *vma_lock = vma->vm_private_data;
5375
5376
if (vma_lock) {
5377
if (vma_lock->vma != vma) {
5378
vma->vm_private_data = NULL;
5379
hugetlb_vma_lock_alloc(vma);
5380
} else
5381
pr_warn("HugeTLB: vma_lock already exists in %s.\n", __func__);
5382
} else
5383
hugetlb_vma_lock_alloc(vma);
5384
}
5385
}
5386
5387
static void hugetlb_vm_op_close(struct vm_area_struct *vma)
5388
{
5389
struct hstate *h = hstate_vma(vma);
5390
struct resv_map *resv;
5391
struct hugepage_subpool *spool = subpool_vma(vma);
5392
unsigned long reserve, start, end;
5393
long gbl_reserve;
5394
5395
hugetlb_vma_lock_free(vma);
5396
5397
resv = vma_resv_map(vma);
5398
if (!resv || !is_vma_resv_set(vma, HPAGE_RESV_OWNER))
5399
return;
5400
5401
start = vma_hugecache_offset(h, vma, vma->vm_start);
5402
end = vma_hugecache_offset(h, vma, vma->vm_end);
5403
5404
reserve = (end - start) - region_count(resv, start, end);
5405
hugetlb_cgroup_uncharge_counter(resv, start, end);
5406
if (reserve) {
5407
/*
5408
* Decrement reserve counts. The global reserve count may be
5409
* adjusted if the subpool has a minimum size.
5410
*/
5411
gbl_reserve = hugepage_subpool_put_pages(spool, reserve);
5412
hugetlb_acct_memory(h, -gbl_reserve);
5413
}
5414
5415
kref_put(&resv->refs, resv_map_release);
5416
}
5417
5418
static int hugetlb_vm_op_split(struct vm_area_struct *vma, unsigned long addr)
5419
{
5420
if (addr & ~(huge_page_mask(hstate_vma(vma))))
5421
return -EINVAL;
5422
return 0;
5423
}
5424
5425
void hugetlb_split(struct vm_area_struct *vma, unsigned long addr)
5426
{
5427
/*
5428
* PMD sharing is only possible for PUD_SIZE-aligned address ranges
5429
* in HugeTLB VMAs. If we will lose PUD_SIZE alignment due to this
5430
* split, unshare PMDs in the PUD_SIZE interval surrounding addr now.
5431
* This function is called in the middle of a VMA split operation, with
5432
* MM, VMA and rmap all write-locked to prevent concurrent page table
5433
* walks (except hardware and gup_fast()).
5434
*/
5435
vma_assert_write_locked(vma);
5436
i_mmap_assert_write_locked(vma->vm_file->f_mapping);
5437
5438
if (addr & ~PUD_MASK) {
5439
unsigned long floor = addr & PUD_MASK;
5440
unsigned long ceil = floor + PUD_SIZE;
5441
5442
if (floor >= vma->vm_start && ceil <= vma->vm_end) {
5443
/*
5444
* Locking:
5445
* Use take_locks=false here.
5446
* The file rmap lock is already held.
5447
* The hugetlb VMA lock can't be taken when we already
5448
* hold the file rmap lock, and we don't need it because
5449
* its purpose is to synchronize against concurrent page
5450
* table walks, which are not possible thanks to the
5451
* locks held by our caller.
5452
*/
5453
hugetlb_unshare_pmds(vma, floor, ceil, /* take_locks = */ false);
5454
}
5455
}
5456
}
5457
5458
static unsigned long hugetlb_vm_op_pagesize(struct vm_area_struct *vma)
5459
{
5460
return huge_page_size(hstate_vma(vma));
5461
}
5462
5463
/*
5464
* We cannot handle pagefaults against hugetlb pages at all. They cause
5465
* handle_mm_fault() to try to instantiate regular-sized pages in the
5466
* hugepage VMA. do_page_fault() is supposed to trap this, so BUG is we get
5467
* this far.
5468
*/
5469
static vm_fault_t hugetlb_vm_op_fault(struct vm_fault *vmf)
5470
{
5471
BUG();
5472
return 0;
5473
}
5474
5475
/*
5476
* When a new function is introduced to vm_operations_struct and added
5477
* to hugetlb_vm_ops, please consider adding the function to shm_vm_ops.
5478
* This is because under System V memory model, mappings created via
5479
* shmget/shmat with "huge page" specified are backed by hugetlbfs files,
5480
* their original vm_ops are overwritten with shm_vm_ops.
5481
*/
5482
const struct vm_operations_struct hugetlb_vm_ops = {
5483
.fault = hugetlb_vm_op_fault,
5484
.open = hugetlb_vm_op_open,
5485
.close = hugetlb_vm_op_close,
5486
.may_split = hugetlb_vm_op_split,
5487
.pagesize = hugetlb_vm_op_pagesize,
5488
};
5489
5490
static pte_t make_huge_pte(struct vm_area_struct *vma, struct folio *folio,
5491
bool try_mkwrite)
5492
{
5493
pte_t entry = folio_mk_pte(folio, vma->vm_page_prot);
5494
unsigned int shift = huge_page_shift(hstate_vma(vma));
5495
5496
if (try_mkwrite && (vma->vm_flags & VM_WRITE)) {
5497
entry = pte_mkwrite_novma(pte_mkdirty(entry));
5498
} else {
5499
entry = pte_wrprotect(entry);
5500
}
5501
entry = pte_mkyoung(entry);
5502
entry = arch_make_huge_pte(entry, shift, vma->vm_flags);
5503
5504
return entry;
5505
}
5506
5507
static void set_huge_ptep_writable(struct vm_area_struct *vma,
5508
unsigned long address, pte_t *ptep)
5509
{
5510
pte_t entry;
5511
5512
entry = huge_pte_mkwrite(huge_pte_mkdirty(huge_ptep_get(vma->vm_mm, address, ptep)));
5513
if (huge_ptep_set_access_flags(vma, address, ptep, entry, 1))
5514
update_mmu_cache(vma, address, ptep);
5515
}
5516
5517
static void set_huge_ptep_maybe_writable(struct vm_area_struct *vma,
5518
unsigned long address, pte_t *ptep)
5519
{
5520
if (vma->vm_flags & VM_WRITE)
5521
set_huge_ptep_writable(vma, address, ptep);
5522
}
5523
5524
bool is_hugetlb_entry_migration(pte_t pte)
5525
{
5526
swp_entry_t swp;
5527
5528
if (huge_pte_none(pte) || pte_present(pte))
5529
return false;
5530
swp = pte_to_swp_entry(pte);
5531
if (is_migration_entry(swp))
5532
return true;
5533
else
5534
return false;
5535
}
5536
5537
bool is_hugetlb_entry_hwpoisoned(pte_t pte)
5538
{
5539
swp_entry_t swp;
5540
5541
if (huge_pte_none(pte) || pte_present(pte))
5542
return false;
5543
swp = pte_to_swp_entry(pte);
5544
if (is_hwpoison_entry(swp))
5545
return true;
5546
else
5547
return false;
5548
}
5549
5550
static void
5551
hugetlb_install_folio(struct vm_area_struct *vma, pte_t *ptep, unsigned long addr,
5552
struct folio *new_folio, pte_t old, unsigned long sz)
5553
{
5554
pte_t newpte = make_huge_pte(vma, new_folio, true);
5555
5556
__folio_mark_uptodate(new_folio);
5557
hugetlb_add_new_anon_rmap(new_folio, vma, addr);
5558
if (userfaultfd_wp(vma) && huge_pte_uffd_wp(old))
5559
newpte = huge_pte_mkuffd_wp(newpte);
5560
set_huge_pte_at(vma->vm_mm, addr, ptep, newpte, sz);
5561
hugetlb_count_add(pages_per_huge_page(hstate_vma(vma)), vma->vm_mm);
5562
folio_set_hugetlb_migratable(new_folio);
5563
}
5564
5565
int copy_hugetlb_page_range(struct mm_struct *dst, struct mm_struct *src,
5566
struct vm_area_struct *dst_vma,
5567
struct vm_area_struct *src_vma)
5568
{
5569
pte_t *src_pte, *dst_pte, entry;
5570
struct folio *pte_folio;
5571
unsigned long addr;
5572
bool cow = is_cow_mapping(src_vma->vm_flags);
5573
struct hstate *h = hstate_vma(src_vma);
5574
unsigned long sz = huge_page_size(h);
5575
unsigned long npages = pages_per_huge_page(h);
5576
struct mmu_notifier_range range;
5577
unsigned long last_addr_mask;
5578
int ret = 0;
5579
5580
if (cow) {
5581
mmu_notifier_range_init(&range, MMU_NOTIFY_CLEAR, 0, src,
5582
src_vma->vm_start,
5583
src_vma->vm_end);
5584
mmu_notifier_invalidate_range_start(&range);
5585
vma_assert_write_locked(src_vma);
5586
raw_write_seqcount_begin(&src->write_protect_seq);
5587
} else {
5588
/*
5589
* For shared mappings the vma lock must be held before
5590
* calling hugetlb_walk() in the src vma. Otherwise, the
5591
* returned ptep could go away if part of a shared pmd and
5592
* another thread calls huge_pmd_unshare.
5593
*/
5594
hugetlb_vma_lock_read(src_vma);
5595
}
5596
5597
last_addr_mask = hugetlb_mask_last_page(h);
5598
for (addr = src_vma->vm_start; addr < src_vma->vm_end; addr += sz) {
5599
spinlock_t *src_ptl, *dst_ptl;
5600
src_pte = hugetlb_walk(src_vma, addr, sz);
5601
if (!src_pte) {
5602
addr |= last_addr_mask;
5603
continue;
5604
}
5605
dst_pte = huge_pte_alloc(dst, dst_vma, addr, sz);
5606
if (!dst_pte) {
5607
ret = -ENOMEM;
5608
break;
5609
}
5610
5611
#ifdef CONFIG_HUGETLB_PMD_PAGE_TABLE_SHARING
5612
/* If the pagetables are shared, there is nothing to do */
5613
if (ptdesc_pmd_is_shared(virt_to_ptdesc(dst_pte))) {
5614
addr |= last_addr_mask;
5615
continue;
5616
}
5617
#endif
5618
5619
dst_ptl = huge_pte_lock(h, dst, dst_pte);
5620
src_ptl = huge_pte_lockptr(h, src, src_pte);
5621
spin_lock_nested(src_ptl, SINGLE_DEPTH_NESTING);
5622
entry = huge_ptep_get(src_vma->vm_mm, addr, src_pte);
5623
again:
5624
if (huge_pte_none(entry)) {
5625
/*
5626
* Skip if src entry none.
5627
*/
5628
;
5629
} else if (unlikely(is_hugetlb_entry_hwpoisoned(entry))) {
5630
if (!userfaultfd_wp(dst_vma))
5631
entry = huge_pte_clear_uffd_wp(entry);
5632
set_huge_pte_at(dst, addr, dst_pte, entry, sz);
5633
} else if (unlikely(is_hugetlb_entry_migration(entry))) {
5634
swp_entry_t swp_entry = pte_to_swp_entry(entry);
5635
bool uffd_wp = pte_swp_uffd_wp(entry);
5636
5637
if (!is_readable_migration_entry(swp_entry) && cow) {
5638
/*
5639
* COW mappings require pages in both
5640
* parent and child to be set to read.
5641
*/
5642
swp_entry = make_readable_migration_entry(
5643
swp_offset(swp_entry));
5644
entry = swp_entry_to_pte(swp_entry);
5645
if (userfaultfd_wp(src_vma) && uffd_wp)
5646
entry = pte_swp_mkuffd_wp(entry);
5647
set_huge_pte_at(src, addr, src_pte, entry, sz);
5648
}
5649
if (!userfaultfd_wp(dst_vma))
5650
entry = huge_pte_clear_uffd_wp(entry);
5651
set_huge_pte_at(dst, addr, dst_pte, entry, sz);
5652
} else if (unlikely(is_pte_marker(entry))) {
5653
pte_marker marker = copy_pte_marker(
5654
pte_to_swp_entry(entry), dst_vma);
5655
5656
if (marker)
5657
set_huge_pte_at(dst, addr, dst_pte,
5658
make_pte_marker(marker), sz);
5659
} else {
5660
entry = huge_ptep_get(src_vma->vm_mm, addr, src_pte);
5661
pte_folio = page_folio(pte_page(entry));
5662
folio_get(pte_folio);
5663
5664
/*
5665
* Failing to duplicate the anon rmap is a rare case
5666
* where we see pinned hugetlb pages while they're
5667
* prone to COW. We need to do the COW earlier during
5668
* fork.
5669
*
5670
* When pre-allocating the page or copying data, we
5671
* need to be without the pgtable locks since we could
5672
* sleep during the process.
5673
*/
5674
if (!folio_test_anon(pte_folio)) {
5675
hugetlb_add_file_rmap(pte_folio);
5676
} else if (hugetlb_try_dup_anon_rmap(pte_folio, src_vma)) {
5677
pte_t src_pte_old = entry;
5678
struct folio *new_folio;
5679
5680
spin_unlock(src_ptl);
5681
spin_unlock(dst_ptl);
5682
/* Do not use reserve as it's private owned */
5683
new_folio = alloc_hugetlb_folio(dst_vma, addr, false);
5684
if (IS_ERR(new_folio)) {
5685
folio_put(pte_folio);
5686
ret = PTR_ERR(new_folio);
5687
break;
5688
}
5689
ret = copy_user_large_folio(new_folio, pte_folio,
5690
addr, dst_vma);
5691
folio_put(pte_folio);
5692
if (ret) {
5693
folio_put(new_folio);
5694
break;
5695
}
5696
5697
/* Install the new hugetlb folio if src pte stable */
5698
dst_ptl = huge_pte_lock(h, dst, dst_pte);
5699
src_ptl = huge_pte_lockptr(h, src, src_pte);
5700
spin_lock_nested(src_ptl, SINGLE_DEPTH_NESTING);
5701
entry = huge_ptep_get(src_vma->vm_mm, addr, src_pte);
5702
if (!pte_same(src_pte_old, entry)) {
5703
restore_reserve_on_error(h, dst_vma, addr,
5704
new_folio);
5705
folio_put(new_folio);
5706
/* huge_ptep of dst_pte won't change as in child */
5707
goto again;
5708
}
5709
hugetlb_install_folio(dst_vma, dst_pte, addr,
5710
new_folio, src_pte_old, sz);
5711
spin_unlock(src_ptl);
5712
spin_unlock(dst_ptl);
5713
continue;
5714
}
5715
5716
if (cow) {
5717
/*
5718
* No need to notify as we are downgrading page
5719
* table protection not changing it to point
5720
* to a new page.
5721
*
5722
* See Documentation/mm/mmu_notifier.rst
5723
*/
5724
huge_ptep_set_wrprotect(src, addr, src_pte);
5725
entry = huge_pte_wrprotect(entry);
5726
}
5727
5728
if (!userfaultfd_wp(dst_vma))
5729
entry = huge_pte_clear_uffd_wp(entry);
5730
5731
set_huge_pte_at(dst, addr, dst_pte, entry, sz);
5732
hugetlb_count_add(npages, dst);
5733
}
5734
spin_unlock(src_ptl);
5735
spin_unlock(dst_ptl);
5736
}
5737
5738
if (cow) {
5739
raw_write_seqcount_end(&src->write_protect_seq);
5740
mmu_notifier_invalidate_range_end(&range);
5741
} else {
5742
hugetlb_vma_unlock_read(src_vma);
5743
}
5744
5745
return ret;
5746
}
5747
5748
static void move_huge_pte(struct vm_area_struct *vma, unsigned long old_addr,
5749
unsigned long new_addr, pte_t *src_pte, pte_t *dst_pte,
5750
unsigned long sz)
5751
{
5752
bool need_clear_uffd_wp = vma_has_uffd_without_event_remap(vma);
5753
struct hstate *h = hstate_vma(vma);
5754
struct mm_struct *mm = vma->vm_mm;
5755
spinlock_t *src_ptl, *dst_ptl;
5756
pte_t pte;
5757
5758
dst_ptl = huge_pte_lock(h, mm, dst_pte);
5759
src_ptl = huge_pte_lockptr(h, mm, src_pte);
5760
5761
/*
5762
* We don't have to worry about the ordering of src and dst ptlocks
5763
* because exclusive mmap_lock (or the i_mmap_lock) prevents deadlock.
5764
*/
5765
if (src_ptl != dst_ptl)
5766
spin_lock_nested(src_ptl, SINGLE_DEPTH_NESTING);
5767
5768
pte = huge_ptep_get_and_clear(mm, old_addr, src_pte, sz);
5769
5770
if (need_clear_uffd_wp && pte_marker_uffd_wp(pte))
5771
huge_pte_clear(mm, new_addr, dst_pte, sz);
5772
else {
5773
if (need_clear_uffd_wp) {
5774
if (pte_present(pte))
5775
pte = huge_pte_clear_uffd_wp(pte);
5776
else if (is_swap_pte(pte))
5777
pte = pte_swp_clear_uffd_wp(pte);
5778
}
5779
set_huge_pte_at(mm, new_addr, dst_pte, pte, sz);
5780
}
5781
5782
if (src_ptl != dst_ptl)
5783
spin_unlock(src_ptl);
5784
spin_unlock(dst_ptl);
5785
}
5786
5787
int move_hugetlb_page_tables(struct vm_area_struct *vma,
5788
struct vm_area_struct *new_vma,
5789
unsigned long old_addr, unsigned long new_addr,
5790
unsigned long len)
5791
{
5792
struct hstate *h = hstate_vma(vma);
5793
struct address_space *mapping = vma->vm_file->f_mapping;
5794
unsigned long sz = huge_page_size(h);
5795
struct mm_struct *mm = vma->vm_mm;
5796
unsigned long old_end = old_addr + len;
5797
unsigned long last_addr_mask;
5798
pte_t *src_pte, *dst_pte;
5799
struct mmu_notifier_range range;
5800
bool shared_pmd = false;
5801
5802
mmu_notifier_range_init(&range, MMU_NOTIFY_CLEAR, 0, mm, old_addr,
5803
old_end);
5804
adjust_range_if_pmd_sharing_possible(vma, &range.start, &range.end);
5805
/*
5806
* In case of shared PMDs, we should cover the maximum possible
5807
* range.
5808
*/
5809
flush_cache_range(vma, range.start, range.end);
5810
5811
mmu_notifier_invalidate_range_start(&range);
5812
last_addr_mask = hugetlb_mask_last_page(h);
5813
/* Prevent race with file truncation */
5814
hugetlb_vma_lock_write(vma);
5815
i_mmap_lock_write(mapping);
5816
for (; old_addr < old_end; old_addr += sz, new_addr += sz) {
5817
src_pte = hugetlb_walk(vma, old_addr, sz);
5818
if (!src_pte) {
5819
old_addr |= last_addr_mask;
5820
new_addr |= last_addr_mask;
5821
continue;
5822
}
5823
if (huge_pte_none(huge_ptep_get(mm, old_addr, src_pte)))
5824
continue;
5825
5826
if (huge_pmd_unshare(mm, vma, old_addr, src_pte)) {
5827
shared_pmd = true;
5828
old_addr |= last_addr_mask;
5829
new_addr |= last_addr_mask;
5830
continue;
5831
}
5832
5833
dst_pte = huge_pte_alloc(mm, new_vma, new_addr, sz);
5834
if (!dst_pte)
5835
break;
5836
5837
move_huge_pte(vma, old_addr, new_addr, src_pte, dst_pte, sz);
5838
}
5839
5840
if (shared_pmd)
5841
flush_hugetlb_tlb_range(vma, range.start, range.end);
5842
else
5843
flush_hugetlb_tlb_range(vma, old_end - len, old_end);
5844
mmu_notifier_invalidate_range_end(&range);
5845
i_mmap_unlock_write(mapping);
5846
hugetlb_vma_unlock_write(vma);
5847
5848
return len + old_addr - old_end;
5849
}
5850
5851
void __unmap_hugepage_range(struct mmu_gather *tlb, struct vm_area_struct *vma,
5852
unsigned long start, unsigned long end,
5853
struct folio *folio, zap_flags_t zap_flags)
5854
{
5855
struct mm_struct *mm = vma->vm_mm;
5856
const bool folio_provided = !!folio;
5857
unsigned long address;
5858
pte_t *ptep;
5859
pte_t pte;
5860
spinlock_t *ptl;
5861
struct hstate *h = hstate_vma(vma);
5862
unsigned long sz = huge_page_size(h);
5863
bool adjust_reservation;
5864
unsigned long last_addr_mask;
5865
bool force_flush = false;
5866
5867
WARN_ON(!is_vm_hugetlb_page(vma));
5868
BUG_ON(start & ~huge_page_mask(h));
5869
BUG_ON(end & ~huge_page_mask(h));
5870
5871
/*
5872
* This is a hugetlb vma, all the pte entries should point
5873
* to huge page.
5874
*/
5875
tlb_change_page_size(tlb, sz);
5876
tlb_start_vma(tlb, vma);
5877
5878
last_addr_mask = hugetlb_mask_last_page(h);
5879
address = start;
5880
for (; address < end; address += sz) {
5881
ptep = hugetlb_walk(vma, address, sz);
5882
if (!ptep) {
5883
address |= last_addr_mask;
5884
continue;
5885
}
5886
5887
ptl = huge_pte_lock(h, mm, ptep);
5888
if (huge_pmd_unshare(mm, vma, address, ptep)) {
5889
spin_unlock(ptl);
5890
tlb_flush_pmd_range(tlb, address & PUD_MASK, PUD_SIZE);
5891
force_flush = true;
5892
address |= last_addr_mask;
5893
continue;
5894
}
5895
5896
pte = huge_ptep_get(mm, address, ptep);
5897
if (huge_pte_none(pte)) {
5898
spin_unlock(ptl);
5899
continue;
5900
}
5901
5902
/*
5903
* Migrating hugepage or HWPoisoned hugepage is already
5904
* unmapped and its refcount is dropped, so just clear pte here.
5905
*/
5906
if (unlikely(!pte_present(pte))) {
5907
/*
5908
* If the pte was wr-protected by uffd-wp in any of the
5909
* swap forms, meanwhile the caller does not want to
5910
* drop the uffd-wp bit in this zap, then replace the
5911
* pte with a marker.
5912
*/
5913
if (pte_swp_uffd_wp_any(pte) &&
5914
!(zap_flags & ZAP_FLAG_DROP_MARKER))
5915
set_huge_pte_at(mm, address, ptep,
5916
make_pte_marker(PTE_MARKER_UFFD_WP),
5917
sz);
5918
else
5919
huge_pte_clear(mm, address, ptep, sz);
5920
spin_unlock(ptl);
5921
continue;
5922
}
5923
5924
/*
5925
* If a folio is supplied, it is because a specific
5926
* folio is being unmapped, not a range. Ensure the folio we
5927
* are about to unmap is the actual folio of interest.
5928
*/
5929
if (folio_provided) {
5930
if (folio != page_folio(pte_page(pte))) {
5931
spin_unlock(ptl);
5932
continue;
5933
}
5934
/*
5935
* Mark the VMA as having unmapped its page so that
5936
* future faults in this VMA will fail rather than
5937
* looking like data was lost
5938
*/
5939
set_vma_resv_flags(vma, HPAGE_RESV_UNMAPPED);
5940
} else {
5941
folio = page_folio(pte_page(pte));
5942
}
5943
5944
pte = huge_ptep_get_and_clear(mm, address, ptep, sz);
5945
tlb_remove_huge_tlb_entry(h, tlb, ptep, address);
5946
if (huge_pte_dirty(pte))
5947
folio_mark_dirty(folio);
5948
/* Leave a uffd-wp pte marker if needed */
5949
if (huge_pte_uffd_wp(pte) &&
5950
!(zap_flags & ZAP_FLAG_DROP_MARKER))
5951
set_huge_pte_at(mm, address, ptep,
5952
make_pte_marker(PTE_MARKER_UFFD_WP),
5953
sz);
5954
hugetlb_count_sub(pages_per_huge_page(h), mm);
5955
hugetlb_remove_rmap(folio);
5956
spin_unlock(ptl);
5957
5958
/*
5959
* Restore the reservation for anonymous page, otherwise the
5960
* backing page could be stolen by someone.
5961
* If there we are freeing a surplus, do not set the restore
5962
* reservation bit.
5963
*/
5964
adjust_reservation = false;
5965
5966
spin_lock_irq(&hugetlb_lock);
5967
if (!h->surplus_huge_pages && __vma_private_lock(vma) &&
5968
folio_test_anon(folio)) {
5969
folio_set_hugetlb_restore_reserve(folio);
5970
/* Reservation to be adjusted after the spin lock */
5971
adjust_reservation = true;
5972
}
5973
spin_unlock_irq(&hugetlb_lock);
5974
5975
/*
5976
* Adjust the reservation for the region that will have the
5977
* reserve restored. Keep in mind that vma_needs_reservation() changes
5978
* resv->adds_in_progress if it succeeds. If this is not done,
5979
* do_exit() will not see it, and will keep the reservation
5980
* forever.
5981
*/
5982
if (adjust_reservation) {
5983
int rc = vma_needs_reservation(h, vma, address);
5984
5985
if (rc < 0)
5986
/* Pressumably allocate_file_region_entries failed
5987
* to allocate a file_region struct. Clear
5988
* hugetlb_restore_reserve so that global reserve
5989
* count will not be incremented by free_huge_folio.
5990
* Act as if we consumed the reservation.
5991
*/
5992
folio_clear_hugetlb_restore_reserve(folio);
5993
else if (rc)
5994
vma_add_reservation(h, vma, address);
5995
}
5996
5997
tlb_remove_page_size(tlb, folio_page(folio, 0),
5998
folio_size(folio));
5999
/*
6000
* If we were instructed to unmap a specific folio, we're done.
6001
*/
6002
if (folio_provided)
6003
break;
6004
}
6005
tlb_end_vma(tlb, vma);
6006
6007
/*
6008
* If we unshared PMDs, the TLB flush was not recorded in mmu_gather. We
6009
* could defer the flush until now, since by holding i_mmap_rwsem we
6010
* guaranteed that the last refernece would not be dropped. But we must
6011
* do the flushing before we return, as otherwise i_mmap_rwsem will be
6012
* dropped and the last reference to the shared PMDs page might be
6013
* dropped as well.
6014
*
6015
* In theory we could defer the freeing of the PMD pages as well, but
6016
* huge_pmd_unshare() relies on the exact page_count for the PMD page to
6017
* detect sharing, so we cannot defer the release of the page either.
6018
* Instead, do flush now.
6019
*/
6020
if (force_flush)
6021
tlb_flush_mmu_tlbonly(tlb);
6022
}
6023
6024
void __hugetlb_zap_begin(struct vm_area_struct *vma,
6025
unsigned long *start, unsigned long *end)
6026
{
6027
if (!vma->vm_file) /* hugetlbfs_file_mmap error */
6028
return;
6029
6030
adjust_range_if_pmd_sharing_possible(vma, start, end);
6031
hugetlb_vma_lock_write(vma);
6032
if (vma->vm_file)
6033
i_mmap_lock_write(vma->vm_file->f_mapping);
6034
}
6035
6036
void __hugetlb_zap_end(struct vm_area_struct *vma,
6037
struct zap_details *details)
6038
{
6039
zap_flags_t zap_flags = details ? details->zap_flags : 0;
6040
6041
if (!vma->vm_file) /* hugetlbfs_file_mmap error */
6042
return;
6043
6044
if (zap_flags & ZAP_FLAG_UNMAP) { /* final unmap */
6045
/*
6046
* Unlock and free the vma lock before releasing i_mmap_rwsem.
6047
* When the vma_lock is freed, this makes the vma ineligible
6048
* for pmd sharing. And, i_mmap_rwsem is required to set up
6049
* pmd sharing. This is important as page tables for this
6050
* unmapped range will be asynchrously deleted. If the page
6051
* tables are shared, there will be issues when accessed by
6052
* someone else.
6053
*/
6054
__hugetlb_vma_unlock_write_free(vma);
6055
} else {
6056
hugetlb_vma_unlock_write(vma);
6057
}
6058
6059
if (vma->vm_file)
6060
i_mmap_unlock_write(vma->vm_file->f_mapping);
6061
}
6062
6063
void unmap_hugepage_range(struct vm_area_struct *vma, unsigned long start,
6064
unsigned long end, struct folio *folio,
6065
zap_flags_t zap_flags)
6066
{
6067
struct mmu_notifier_range range;
6068
struct mmu_gather tlb;
6069
6070
mmu_notifier_range_init(&range, MMU_NOTIFY_CLEAR, 0, vma->vm_mm,
6071
start, end);
6072
adjust_range_if_pmd_sharing_possible(vma, &range.start, &range.end);
6073
mmu_notifier_invalidate_range_start(&range);
6074
tlb_gather_mmu(&tlb, vma->vm_mm);
6075
6076
__unmap_hugepage_range(&tlb, vma, start, end,
6077
folio, zap_flags);
6078
6079
mmu_notifier_invalidate_range_end(&range);
6080
tlb_finish_mmu(&tlb);
6081
}
6082
6083
/*
6084
* This is called when the original mapper is failing to COW a MAP_PRIVATE
6085
* mapping it owns the reserve page for. The intention is to unmap the page
6086
* from other VMAs and let the children be SIGKILLed if they are faulting the
6087
* same region.
6088
*/
6089
static void unmap_ref_private(struct mm_struct *mm, struct vm_area_struct *vma,
6090
struct folio *folio, unsigned long address)
6091
{
6092
struct hstate *h = hstate_vma(vma);
6093
struct vm_area_struct *iter_vma;
6094
struct address_space *mapping;
6095
pgoff_t pgoff;
6096
6097
/*
6098
* vm_pgoff is in PAGE_SIZE units, hence the different calculation
6099
* from page cache lookup which is in HPAGE_SIZE units.
6100
*/
6101
address = address & huge_page_mask(h);
6102
pgoff = ((address - vma->vm_start) >> PAGE_SHIFT) +
6103
vma->vm_pgoff;
6104
mapping = vma->vm_file->f_mapping;
6105
6106
/*
6107
* Take the mapping lock for the duration of the table walk. As
6108
* this mapping should be shared between all the VMAs,
6109
* __unmap_hugepage_range() is called as the lock is already held
6110
*/
6111
i_mmap_lock_write(mapping);
6112
vma_interval_tree_foreach(iter_vma, &mapping->i_mmap, pgoff, pgoff) {
6113
/* Do not unmap the current VMA */
6114
if (iter_vma == vma)
6115
continue;
6116
6117
/*
6118
* Shared VMAs have their own reserves and do not affect
6119
* MAP_PRIVATE accounting but it is possible that a shared
6120
* VMA is using the same page so check and skip such VMAs.
6121
*/
6122
if (iter_vma->vm_flags & VM_MAYSHARE)
6123
continue;
6124
6125
/*
6126
* Unmap the page from other VMAs without their own reserves.
6127
* They get marked to be SIGKILLed if they fault in these
6128
* areas. This is because a future no-page fault on this VMA
6129
* could insert a zeroed page instead of the data existing
6130
* from the time of fork. This would look like data corruption
6131
*/
6132
if (!is_vma_resv_set(iter_vma, HPAGE_RESV_OWNER))
6133
unmap_hugepage_range(iter_vma, address,
6134
address + huge_page_size(h),
6135
folio, 0);
6136
}
6137
i_mmap_unlock_write(mapping);
6138
}
6139
6140
/*
6141
* hugetlb_wp() should be called with page lock of the original hugepage held.
6142
* Called with hugetlb_fault_mutex_table held and pte_page locked so we
6143
* cannot race with other handlers or page migration.
6144
* Keep the pte_same checks anyway to make transition from the mutex easier.
6145
*/
6146
static vm_fault_t hugetlb_wp(struct vm_fault *vmf)
6147
{
6148
struct vm_area_struct *vma = vmf->vma;
6149
struct mm_struct *mm = vma->vm_mm;
6150
const bool unshare = vmf->flags & FAULT_FLAG_UNSHARE;
6151
pte_t pte = huge_ptep_get(mm, vmf->address, vmf->pte);
6152
struct hstate *h = hstate_vma(vma);
6153
struct folio *old_folio;
6154
struct folio *new_folio;
6155
bool cow_from_owner = 0;
6156
vm_fault_t ret = 0;
6157
struct mmu_notifier_range range;
6158
6159
/*
6160
* Never handle CoW for uffd-wp protected pages. It should be only
6161
* handled when the uffd-wp protection is removed.
6162
*
6163
* Note that only the CoW optimization path (in hugetlb_no_page())
6164
* can trigger this, because hugetlb_fault() will always resolve
6165
* uffd-wp bit first.
6166
*/
6167
if (!unshare && huge_pte_uffd_wp(pte))
6168
return 0;
6169
6170
/* Let's take out MAP_SHARED mappings first. */
6171
if (vma->vm_flags & VM_MAYSHARE) {
6172
set_huge_ptep_writable(vma, vmf->address, vmf->pte);
6173
return 0;
6174
}
6175
6176
old_folio = page_folio(pte_page(pte));
6177
6178
delayacct_wpcopy_start();
6179
6180
retry_avoidcopy:
6181
/*
6182
* If no-one else is actually using this page, we're the exclusive
6183
* owner and can reuse this page.
6184
*
6185
* Note that we don't rely on the (safer) folio refcount here, because
6186
* copying the hugetlb folio when there are unexpected (temporary)
6187
* folio references could harm simple fork()+exit() users when
6188
* we run out of free hugetlb folios: we would have to kill processes
6189
* in scenarios that used to work. As a side effect, there can still
6190
* be leaks between processes, for example, with FOLL_GET users.
6191
*/
6192
if (folio_mapcount(old_folio) == 1 && folio_test_anon(old_folio)) {
6193
if (!PageAnonExclusive(&old_folio->page)) {
6194
folio_move_anon_rmap(old_folio, vma);
6195
SetPageAnonExclusive(&old_folio->page);
6196
}
6197
if (likely(!unshare))
6198
set_huge_ptep_maybe_writable(vma, vmf->address,
6199
vmf->pte);
6200
6201
delayacct_wpcopy_end();
6202
return 0;
6203
}
6204
VM_BUG_ON_PAGE(folio_test_anon(old_folio) &&
6205
PageAnonExclusive(&old_folio->page), &old_folio->page);
6206
6207
/*
6208
* If the process that created a MAP_PRIVATE mapping is about to perform
6209
* a COW due to a shared page count, attempt to satisfy the allocation
6210
* without using the existing reserves.
6211
* In order to determine where this is a COW on a MAP_PRIVATE mapping it
6212
* is enough to check whether the old_folio is anonymous. This means that
6213
* the reserve for this address was consumed. If reserves were used, a
6214
* partial faulted mapping at the fime of fork() could consume its reserves
6215
* on COW instead of the full address range.
6216
*/
6217
if (is_vma_resv_set(vma, HPAGE_RESV_OWNER) &&
6218
folio_test_anon(old_folio))
6219
cow_from_owner = true;
6220
6221
folio_get(old_folio);
6222
6223
/*
6224
* Drop page table lock as buddy allocator may be called. It will
6225
* be acquired again before returning to the caller, as expected.
6226
*/
6227
spin_unlock(vmf->ptl);
6228
new_folio = alloc_hugetlb_folio(vma, vmf->address, cow_from_owner);
6229
6230
if (IS_ERR(new_folio)) {
6231
/*
6232
* If a process owning a MAP_PRIVATE mapping fails to COW,
6233
* it is due to references held by a child and an insufficient
6234
* huge page pool. To guarantee the original mappers
6235
* reliability, unmap the page from child processes. The child
6236
* may get SIGKILLed if it later faults.
6237
*/
6238
if (cow_from_owner) {
6239
struct address_space *mapping = vma->vm_file->f_mapping;
6240
pgoff_t idx;
6241
u32 hash;
6242
6243
folio_put(old_folio);
6244
/*
6245
* Drop hugetlb_fault_mutex and vma_lock before
6246
* unmapping. unmapping needs to hold vma_lock
6247
* in write mode. Dropping vma_lock in read mode
6248
* here is OK as COW mappings do not interact with
6249
* PMD sharing.
6250
*
6251
* Reacquire both after unmap operation.
6252
*/
6253
idx = vma_hugecache_offset(h, vma, vmf->address);
6254
hash = hugetlb_fault_mutex_hash(mapping, idx);
6255
hugetlb_vma_unlock_read(vma);
6256
mutex_unlock(&hugetlb_fault_mutex_table[hash]);
6257
6258
unmap_ref_private(mm, vma, old_folio, vmf->address);
6259
6260
mutex_lock(&hugetlb_fault_mutex_table[hash]);
6261
hugetlb_vma_lock_read(vma);
6262
spin_lock(vmf->ptl);
6263
vmf->pte = hugetlb_walk(vma, vmf->address,
6264
huge_page_size(h));
6265
if (likely(vmf->pte &&
6266
pte_same(huge_ptep_get(mm, vmf->address, vmf->pte), pte)))
6267
goto retry_avoidcopy;
6268
/*
6269
* race occurs while re-acquiring page table
6270
* lock, and our job is done.
6271
*/
6272
delayacct_wpcopy_end();
6273
return 0;
6274
}
6275
6276
ret = vmf_error(PTR_ERR(new_folio));
6277
goto out_release_old;
6278
}
6279
6280
/*
6281
* When the original hugepage is shared one, it does not have
6282
* anon_vma prepared.
6283
*/
6284
ret = __vmf_anon_prepare(vmf);
6285
if (unlikely(ret))
6286
goto out_release_all;
6287
6288
if (copy_user_large_folio(new_folio, old_folio, vmf->real_address, vma)) {
6289
ret = VM_FAULT_HWPOISON_LARGE | VM_FAULT_SET_HINDEX(hstate_index(h));
6290
goto out_release_all;
6291
}
6292
__folio_mark_uptodate(new_folio);
6293
6294
mmu_notifier_range_init(&range, MMU_NOTIFY_CLEAR, 0, mm, vmf->address,
6295
vmf->address + huge_page_size(h));
6296
mmu_notifier_invalidate_range_start(&range);
6297
6298
/*
6299
* Retake the page table lock to check for racing updates
6300
* before the page tables are altered
6301
*/
6302
spin_lock(vmf->ptl);
6303
vmf->pte = hugetlb_walk(vma, vmf->address, huge_page_size(h));
6304
if (likely(vmf->pte && pte_same(huge_ptep_get(mm, vmf->address, vmf->pte), pte))) {
6305
pte_t newpte = make_huge_pte(vma, new_folio, !unshare);
6306
6307
/* Break COW or unshare */
6308
huge_ptep_clear_flush(vma, vmf->address, vmf->pte);
6309
hugetlb_remove_rmap(old_folio);
6310
hugetlb_add_new_anon_rmap(new_folio, vma, vmf->address);
6311
if (huge_pte_uffd_wp(pte))
6312
newpte = huge_pte_mkuffd_wp(newpte);
6313
set_huge_pte_at(mm, vmf->address, vmf->pte, newpte,
6314
huge_page_size(h));
6315
folio_set_hugetlb_migratable(new_folio);
6316
/* Make the old page be freed below */
6317
new_folio = old_folio;
6318
}
6319
spin_unlock(vmf->ptl);
6320
mmu_notifier_invalidate_range_end(&range);
6321
out_release_all:
6322
/*
6323
* No restore in case of successful pagetable update (Break COW or
6324
* unshare)
6325
*/
6326
if (new_folio != old_folio)
6327
restore_reserve_on_error(h, vma, vmf->address, new_folio);
6328
folio_put(new_folio);
6329
out_release_old:
6330
folio_put(old_folio);
6331
6332
spin_lock(vmf->ptl); /* Caller expects lock to be held */
6333
6334
delayacct_wpcopy_end();
6335
return ret;
6336
}
6337
6338
/*
6339
* Return whether there is a pagecache page to back given address within VMA.
6340
*/
6341
bool hugetlbfs_pagecache_present(struct hstate *h,
6342
struct vm_area_struct *vma, unsigned long address)
6343
{
6344
struct address_space *mapping = vma->vm_file->f_mapping;
6345
pgoff_t idx = linear_page_index(vma, address);
6346
struct folio *folio;
6347
6348
folio = filemap_get_folio(mapping, idx);
6349
if (IS_ERR(folio))
6350
return false;
6351
folio_put(folio);
6352
return true;
6353
}
6354
6355
int hugetlb_add_to_page_cache(struct folio *folio, struct address_space *mapping,
6356
pgoff_t idx)
6357
{
6358
struct inode *inode = mapping->host;
6359
struct hstate *h = hstate_inode(inode);
6360
int err;
6361
6362
idx <<= huge_page_order(h);
6363
__folio_set_locked(folio);
6364
err = __filemap_add_folio(mapping, folio, idx, GFP_KERNEL, NULL);
6365
6366
if (unlikely(err)) {
6367
__folio_clear_locked(folio);
6368
return err;
6369
}
6370
folio_clear_hugetlb_restore_reserve(folio);
6371
6372
/*
6373
* mark folio dirty so that it will not be removed from cache/file
6374
* by non-hugetlbfs specific code paths.
6375
*/
6376
folio_mark_dirty(folio);
6377
6378
spin_lock(&inode->i_lock);
6379
inode->i_blocks += blocks_per_huge_page(h);
6380
spin_unlock(&inode->i_lock);
6381
return 0;
6382
}
6383
6384
static inline vm_fault_t hugetlb_handle_userfault(struct vm_fault *vmf,
6385
struct address_space *mapping,
6386
unsigned long reason)
6387
{
6388
u32 hash;
6389
6390
/*
6391
* vma_lock and hugetlb_fault_mutex must be dropped before handling
6392
* userfault. Also mmap_lock could be dropped due to handling
6393
* userfault, any vma operation should be careful from here.
6394
*/
6395
hugetlb_vma_unlock_read(vmf->vma);
6396
hash = hugetlb_fault_mutex_hash(mapping, vmf->pgoff);
6397
mutex_unlock(&hugetlb_fault_mutex_table[hash]);
6398
return handle_userfault(vmf, reason);
6399
}
6400
6401
/*
6402
* Recheck pte with pgtable lock. Returns true if pte didn't change, or
6403
* false if pte changed or is changing.
6404
*/
6405
static bool hugetlb_pte_stable(struct hstate *h, struct mm_struct *mm, unsigned long addr,
6406
pte_t *ptep, pte_t old_pte)
6407
{
6408
spinlock_t *ptl;
6409
bool same;
6410
6411
ptl = huge_pte_lock(h, mm, ptep);
6412
same = pte_same(huge_ptep_get(mm, addr, ptep), old_pte);
6413
spin_unlock(ptl);
6414
6415
return same;
6416
}
6417
6418
static vm_fault_t hugetlb_no_page(struct address_space *mapping,
6419
struct vm_fault *vmf)
6420
{
6421
u32 hash = hugetlb_fault_mutex_hash(mapping, vmf->pgoff);
6422
bool new_folio, new_anon_folio = false;
6423
struct vm_area_struct *vma = vmf->vma;
6424
struct mm_struct *mm = vma->vm_mm;
6425
struct hstate *h = hstate_vma(vma);
6426
vm_fault_t ret = VM_FAULT_SIGBUS;
6427
bool folio_locked = true;
6428
struct folio *folio;
6429
unsigned long size;
6430
pte_t new_pte;
6431
6432
/*
6433
* Currently, we are forced to kill the process in the event the
6434
* original mapper has unmapped pages from the child due to a failed
6435
* COW/unsharing. Warn that such a situation has occurred as it may not
6436
* be obvious.
6437
*/
6438
if (is_vma_resv_set(vma, HPAGE_RESV_UNMAPPED)) {
6439
pr_warn_ratelimited("PID %d killed due to inadequate hugepage pool\n",
6440
current->pid);
6441
goto out;
6442
}
6443
6444
/*
6445
* Use page lock to guard against racing truncation
6446
* before we get page_table_lock.
6447
*/
6448
new_folio = false;
6449
folio = filemap_lock_hugetlb_folio(h, mapping, vmf->pgoff);
6450
if (IS_ERR(folio)) {
6451
size = i_size_read(mapping->host) >> huge_page_shift(h);
6452
if (vmf->pgoff >= size)
6453
goto out;
6454
/* Check for page in userfault range */
6455
if (userfaultfd_missing(vma)) {
6456
/*
6457
* Since hugetlb_no_page() was examining pte
6458
* without pgtable lock, we need to re-test under
6459
* lock because the pte may not be stable and could
6460
* have changed from under us. Try to detect
6461
* either changed or during-changing ptes and retry
6462
* properly when needed.
6463
*
6464
* Note that userfaultfd is actually fine with
6465
* false positives (e.g. caused by pte changed),
6466
* but not wrong logical events (e.g. caused by
6467
* reading a pte during changing). The latter can
6468
* confuse the userspace, so the strictness is very
6469
* much preferred. E.g., MISSING event should
6470
* never happen on the page after UFFDIO_COPY has
6471
* correctly installed the page and returned.
6472
*/
6473
if (!hugetlb_pte_stable(h, mm, vmf->address, vmf->pte, vmf->orig_pte)) {
6474
ret = 0;
6475
goto out;
6476
}
6477
6478
return hugetlb_handle_userfault(vmf, mapping,
6479
VM_UFFD_MISSING);
6480
}
6481
6482
if (!(vma->vm_flags & VM_MAYSHARE)) {
6483
ret = __vmf_anon_prepare(vmf);
6484
if (unlikely(ret))
6485
goto out;
6486
}
6487
6488
folio = alloc_hugetlb_folio(vma, vmf->address, false);
6489
if (IS_ERR(folio)) {
6490
/*
6491
* Returning error will result in faulting task being
6492
* sent SIGBUS. The hugetlb fault mutex prevents two
6493
* tasks from racing to fault in the same page which
6494
* could result in false unable to allocate errors.
6495
* Page migration does not take the fault mutex, but
6496
* does a clear then write of pte's under page table
6497
* lock. Page fault code could race with migration,
6498
* notice the clear pte and try to allocate a page
6499
* here. Before returning error, get ptl and make
6500
* sure there really is no pte entry.
6501
*/
6502
if (hugetlb_pte_stable(h, mm, vmf->address, vmf->pte, vmf->orig_pte))
6503
ret = vmf_error(PTR_ERR(folio));
6504
else
6505
ret = 0;
6506
goto out;
6507
}
6508
folio_zero_user(folio, vmf->real_address);
6509
__folio_mark_uptodate(folio);
6510
new_folio = true;
6511
6512
if (vma->vm_flags & VM_MAYSHARE) {
6513
int err = hugetlb_add_to_page_cache(folio, mapping,
6514
vmf->pgoff);
6515
if (err) {
6516
/*
6517
* err can't be -EEXIST which implies someone
6518
* else consumed the reservation since hugetlb
6519
* fault mutex is held when add a hugetlb page
6520
* to the page cache. So it's safe to call
6521
* restore_reserve_on_error() here.
6522
*/
6523
restore_reserve_on_error(h, vma, vmf->address,
6524
folio);
6525
folio_put(folio);
6526
ret = VM_FAULT_SIGBUS;
6527
goto out;
6528
}
6529
} else {
6530
new_anon_folio = true;
6531
folio_lock(folio);
6532
}
6533
} else {
6534
/*
6535
* If memory error occurs between mmap() and fault, some process
6536
* don't have hwpoisoned swap entry for errored virtual address.
6537
* So we need to block hugepage fault by PG_hwpoison bit check.
6538
*/
6539
if (unlikely(folio_test_hwpoison(folio))) {
6540
ret = VM_FAULT_HWPOISON_LARGE |
6541
VM_FAULT_SET_HINDEX(hstate_index(h));
6542
goto backout_unlocked;
6543
}
6544
6545
/* Check for page in userfault range. */
6546
if (userfaultfd_minor(vma)) {
6547
folio_unlock(folio);
6548
folio_put(folio);
6549
/* See comment in userfaultfd_missing() block above */
6550
if (!hugetlb_pte_stable(h, mm, vmf->address, vmf->pte, vmf->orig_pte)) {
6551
ret = 0;
6552
goto out;
6553
}
6554
return hugetlb_handle_userfault(vmf, mapping,
6555
VM_UFFD_MINOR);
6556
}
6557
}
6558
6559
/*
6560
* If we are going to COW a private mapping later, we examine the
6561
* pending reservations for this page now. This will ensure that
6562
* any allocations necessary to record that reservation occur outside
6563
* the spinlock.
6564
*/
6565
if ((vmf->flags & FAULT_FLAG_WRITE) && !(vma->vm_flags & VM_SHARED)) {
6566
if (vma_needs_reservation(h, vma, vmf->address) < 0) {
6567
ret = VM_FAULT_OOM;
6568
goto backout_unlocked;
6569
}
6570
/* Just decrements count, does not deallocate */
6571
vma_end_reservation(h, vma, vmf->address);
6572
}
6573
6574
vmf->ptl = huge_pte_lock(h, mm, vmf->pte);
6575
ret = 0;
6576
/* If pte changed from under us, retry */
6577
if (!pte_same(huge_ptep_get(mm, vmf->address, vmf->pte), vmf->orig_pte))
6578
goto backout;
6579
6580
if (new_anon_folio)
6581
hugetlb_add_new_anon_rmap(folio, vma, vmf->address);
6582
else
6583
hugetlb_add_file_rmap(folio);
6584
new_pte = make_huge_pte(vma, folio, vma->vm_flags & VM_SHARED);
6585
/*
6586
* If this pte was previously wr-protected, keep it wr-protected even
6587
* if populated.
6588
*/
6589
if (unlikely(pte_marker_uffd_wp(vmf->orig_pte)))
6590
new_pte = huge_pte_mkuffd_wp(new_pte);
6591
set_huge_pte_at(mm, vmf->address, vmf->pte, new_pte, huge_page_size(h));
6592
6593
hugetlb_count_add(pages_per_huge_page(h), mm);
6594
if ((vmf->flags & FAULT_FLAG_WRITE) && !(vma->vm_flags & VM_SHARED)) {
6595
/*
6596
* No need to keep file folios locked. See comment in
6597
* hugetlb_fault().
6598
*/
6599
if (!new_anon_folio) {
6600
folio_locked = false;
6601
folio_unlock(folio);
6602
}
6603
/* Optimization, do the COW without a second fault */
6604
ret = hugetlb_wp(vmf);
6605
}
6606
6607
spin_unlock(vmf->ptl);
6608
6609
/*
6610
* Only set hugetlb_migratable in newly allocated pages. Existing pages
6611
* found in the pagecache may not have hugetlb_migratable if they have
6612
* been isolated for migration.
6613
*/
6614
if (new_folio)
6615
folio_set_hugetlb_migratable(folio);
6616
6617
if (folio_locked)
6618
folio_unlock(folio);
6619
out:
6620
hugetlb_vma_unlock_read(vma);
6621
6622
/*
6623
* We must check to release the per-VMA lock. __vmf_anon_prepare() is
6624
* the only way ret can be set to VM_FAULT_RETRY.
6625
*/
6626
if (unlikely(ret & VM_FAULT_RETRY))
6627
vma_end_read(vma);
6628
6629
mutex_unlock(&hugetlb_fault_mutex_table[hash]);
6630
return ret;
6631
6632
backout:
6633
spin_unlock(vmf->ptl);
6634
backout_unlocked:
6635
/* We only need to restore reservations for private mappings */
6636
if (new_anon_folio)
6637
restore_reserve_on_error(h, vma, vmf->address, folio);
6638
6639
folio_unlock(folio);
6640
folio_put(folio);
6641
goto out;
6642
}
6643
6644
#ifdef CONFIG_SMP
6645
u32 hugetlb_fault_mutex_hash(struct address_space *mapping, pgoff_t idx)
6646
{
6647
unsigned long key[2];
6648
u32 hash;
6649
6650
key[0] = (unsigned long) mapping;
6651
key[1] = idx;
6652
6653
hash = jhash2((u32 *)&key, sizeof(key)/(sizeof(u32)), 0);
6654
6655
return hash & (num_fault_mutexes - 1);
6656
}
6657
#else
6658
/*
6659
* For uniprocessor systems we always use a single mutex, so just
6660
* return 0 and avoid the hashing overhead.
6661
*/
6662
u32 hugetlb_fault_mutex_hash(struct address_space *mapping, pgoff_t idx)
6663
{
6664
return 0;
6665
}
6666
#endif
6667
6668
vm_fault_t hugetlb_fault(struct mm_struct *mm, struct vm_area_struct *vma,
6669
unsigned long address, unsigned int flags)
6670
{
6671
vm_fault_t ret;
6672
u32 hash;
6673
struct folio *folio = NULL;
6674
struct hstate *h = hstate_vma(vma);
6675
struct address_space *mapping;
6676
bool need_wait_lock = false;
6677
struct vm_fault vmf = {
6678
.vma = vma,
6679
.address = address & huge_page_mask(h),
6680
.real_address = address,
6681
.flags = flags,
6682
.pgoff = vma_hugecache_offset(h, vma,
6683
address & huge_page_mask(h)),
6684
/* TODO: Track hugetlb faults using vm_fault */
6685
6686
/*
6687
* Some fields may not be initialized, be careful as it may
6688
* be hard to debug if called functions make assumptions
6689
*/
6690
};
6691
6692
/*
6693
* Serialize hugepage allocation and instantiation, so that we don't
6694
* get spurious allocation failures if two CPUs race to instantiate
6695
* the same page in the page cache.
6696
*/
6697
mapping = vma->vm_file->f_mapping;
6698
hash = hugetlb_fault_mutex_hash(mapping, vmf.pgoff);
6699
mutex_lock(&hugetlb_fault_mutex_table[hash]);
6700
6701
/*
6702
* Acquire vma lock before calling huge_pte_alloc and hold
6703
* until finished with vmf.pte. This prevents huge_pmd_unshare from
6704
* being called elsewhere and making the vmf.pte no longer valid.
6705
*/
6706
hugetlb_vma_lock_read(vma);
6707
vmf.pte = huge_pte_alloc(mm, vma, vmf.address, huge_page_size(h));
6708
if (!vmf.pte) {
6709
hugetlb_vma_unlock_read(vma);
6710
mutex_unlock(&hugetlb_fault_mutex_table[hash]);
6711
return VM_FAULT_OOM;
6712
}
6713
6714
vmf.orig_pte = huge_ptep_get(mm, vmf.address, vmf.pte);
6715
if (huge_pte_none_mostly(vmf.orig_pte)) {
6716
if (is_pte_marker(vmf.orig_pte)) {
6717
pte_marker marker =
6718
pte_marker_get(pte_to_swp_entry(vmf.orig_pte));
6719
6720
if (marker & PTE_MARKER_POISONED) {
6721
ret = VM_FAULT_HWPOISON_LARGE |
6722
VM_FAULT_SET_HINDEX(hstate_index(h));
6723
goto out_mutex;
6724
} else if (WARN_ON_ONCE(marker & PTE_MARKER_GUARD)) {
6725
/* This isn't supported in hugetlb. */
6726
ret = VM_FAULT_SIGSEGV;
6727
goto out_mutex;
6728
}
6729
}
6730
6731
/*
6732
* Other PTE markers should be handled the same way as none PTE.
6733
*
6734
* hugetlb_no_page will drop vma lock and hugetlb fault
6735
* mutex internally, which make us return immediately.
6736
*/
6737
return hugetlb_no_page(mapping, &vmf);
6738
}
6739
6740
ret = 0;
6741
6742
/* Not present, either a migration or a hwpoisoned entry */
6743
if (!pte_present(vmf.orig_pte)) {
6744
if (is_hugetlb_entry_migration(vmf.orig_pte)) {
6745
/*
6746
* Release the hugetlb fault lock now, but retain
6747
* the vma lock, because it is needed to guard the
6748
* huge_pte_lockptr() later in
6749
* migration_entry_wait_huge(). The vma lock will
6750
* be released there.
6751
*/
6752
mutex_unlock(&hugetlb_fault_mutex_table[hash]);
6753
migration_entry_wait_huge(vma, vmf.address, vmf.pte);
6754
return 0;
6755
} else if (is_hugetlb_entry_hwpoisoned(vmf.orig_pte))
6756
ret = VM_FAULT_HWPOISON_LARGE |
6757
VM_FAULT_SET_HINDEX(hstate_index(h));
6758
goto out_mutex;
6759
}
6760
6761
/*
6762
* If we are going to COW/unshare the mapping later, we examine the
6763
* pending reservations for this page now. This will ensure that any
6764
* allocations necessary to record that reservation occur outside the
6765
* spinlock.
6766
*/
6767
if ((flags & (FAULT_FLAG_WRITE|FAULT_FLAG_UNSHARE)) &&
6768
!(vma->vm_flags & VM_MAYSHARE) && !huge_pte_write(vmf.orig_pte)) {
6769
if (vma_needs_reservation(h, vma, vmf.address) < 0) {
6770
ret = VM_FAULT_OOM;
6771
goto out_mutex;
6772
}
6773
/* Just decrements count, does not deallocate */
6774
vma_end_reservation(h, vma, vmf.address);
6775
}
6776
6777
vmf.ptl = huge_pte_lock(h, mm, vmf.pte);
6778
6779
/* Check for a racing update before calling hugetlb_wp() */
6780
if (unlikely(!pte_same(vmf.orig_pte, huge_ptep_get(mm, vmf.address, vmf.pte))))
6781
goto out_ptl;
6782
6783
/* Handle userfault-wp first, before trying to lock more pages */
6784
if (userfaultfd_wp(vma) && huge_pte_uffd_wp(huge_ptep_get(mm, vmf.address, vmf.pte)) &&
6785
(flags & FAULT_FLAG_WRITE) && !huge_pte_write(vmf.orig_pte)) {
6786
if (!userfaultfd_wp_async(vma)) {
6787
spin_unlock(vmf.ptl);
6788
hugetlb_vma_unlock_read(vma);
6789
mutex_unlock(&hugetlb_fault_mutex_table[hash]);
6790
return handle_userfault(&vmf, VM_UFFD_WP);
6791
}
6792
6793
vmf.orig_pte = huge_pte_clear_uffd_wp(vmf.orig_pte);
6794
set_huge_pte_at(mm, vmf.address, vmf.pte, vmf.orig_pte,
6795
huge_page_size(hstate_vma(vma)));
6796
/* Fallthrough to CoW */
6797
}
6798
6799
if (flags & (FAULT_FLAG_WRITE|FAULT_FLAG_UNSHARE)) {
6800
if (!huge_pte_write(vmf.orig_pte)) {
6801
/*
6802
* Anonymous folios need to be lock since hugetlb_wp()
6803
* checks whether we can re-use the folio exclusively
6804
* for us in case we are the only user of it.
6805
*/
6806
folio = page_folio(pte_page(vmf.orig_pte));
6807
if (folio_test_anon(folio) && !folio_trylock(folio)) {
6808
need_wait_lock = true;
6809
goto out_ptl;
6810
}
6811
folio_get(folio);
6812
ret = hugetlb_wp(&vmf);
6813
if (folio_test_anon(folio))
6814
folio_unlock(folio);
6815
folio_put(folio);
6816
goto out_ptl;
6817
} else if (likely(flags & FAULT_FLAG_WRITE)) {
6818
vmf.orig_pte = huge_pte_mkdirty(vmf.orig_pte);
6819
}
6820
}
6821
vmf.orig_pte = pte_mkyoung(vmf.orig_pte);
6822
if (huge_ptep_set_access_flags(vma, vmf.address, vmf.pte, vmf.orig_pte,
6823
flags & FAULT_FLAG_WRITE))
6824
update_mmu_cache(vma, vmf.address, vmf.pte);
6825
out_ptl:
6826
spin_unlock(vmf.ptl);
6827
out_mutex:
6828
hugetlb_vma_unlock_read(vma);
6829
6830
/*
6831
* We must check to release the per-VMA lock. __vmf_anon_prepare() in
6832
* hugetlb_wp() is the only way ret can be set to VM_FAULT_RETRY.
6833
*/
6834
if (unlikely(ret & VM_FAULT_RETRY))
6835
vma_end_read(vma);
6836
6837
mutex_unlock(&hugetlb_fault_mutex_table[hash]);
6838
/*
6839
* hugetlb_wp drops all the locks, but the folio lock, before trying to
6840
* unmap the folio from other processes. During that window, if another
6841
* process mapping that folio faults in, it will take the mutex and then
6842
* it will wait on folio_lock, causing an ABBA deadlock.
6843
* Use trylock instead and bail out if we fail.
6844
*
6845
* Ideally, we should hold a refcount on the folio we wait for, but we do
6846
* not want to use the folio after it becomes unlocked, but rather just
6847
* wait for it to become unlocked, so hopefully next fault successes on
6848
* the trylock.
6849
*/
6850
if (need_wait_lock)
6851
folio_wait_locked(folio);
6852
return ret;
6853
}
6854
6855
#ifdef CONFIG_USERFAULTFD
6856
/*
6857
* Can probably be eliminated, but still used by hugetlb_mfill_atomic_pte().
6858
*/
6859
static struct folio *alloc_hugetlb_folio_vma(struct hstate *h,
6860
struct vm_area_struct *vma, unsigned long address)
6861
{
6862
struct mempolicy *mpol;
6863
nodemask_t *nodemask;
6864
struct folio *folio;
6865
gfp_t gfp_mask;
6866
int node;
6867
6868
gfp_mask = htlb_alloc_mask(h);
6869
node = huge_node(vma, address, gfp_mask, &mpol, &nodemask);
6870
/*
6871
* This is used to allocate a temporary hugetlb to hold the copied
6872
* content, which will then be copied again to the final hugetlb
6873
* consuming a reservation. Set the alloc_fallback to false to indicate
6874
* that breaking the per-node hugetlb pool is not allowed in this case.
6875
*/
6876
folio = alloc_hugetlb_folio_nodemask(h, node, nodemask, gfp_mask, false);
6877
mpol_cond_put(mpol);
6878
6879
return folio;
6880
}
6881
6882
/*
6883
* Used by userfaultfd UFFDIO_* ioctls. Based on userfaultfd's mfill_atomic_pte
6884
* with modifications for hugetlb pages.
6885
*/
6886
int hugetlb_mfill_atomic_pte(pte_t *dst_pte,
6887
struct vm_area_struct *dst_vma,
6888
unsigned long dst_addr,
6889
unsigned long src_addr,
6890
uffd_flags_t flags,
6891
struct folio **foliop)
6892
{
6893
struct mm_struct *dst_mm = dst_vma->vm_mm;
6894
bool is_continue = uffd_flags_mode_is(flags, MFILL_ATOMIC_CONTINUE);
6895
bool wp_enabled = (flags & MFILL_ATOMIC_WP);
6896
struct hstate *h = hstate_vma(dst_vma);
6897
struct address_space *mapping = dst_vma->vm_file->f_mapping;
6898
pgoff_t idx = vma_hugecache_offset(h, dst_vma, dst_addr);
6899
unsigned long size = huge_page_size(h);
6900
int vm_shared = dst_vma->vm_flags & VM_SHARED;
6901
pte_t _dst_pte;
6902
spinlock_t *ptl;
6903
int ret = -ENOMEM;
6904
struct folio *folio;
6905
bool folio_in_pagecache = false;
6906
6907
if (uffd_flags_mode_is(flags, MFILL_ATOMIC_POISON)) {
6908
ptl = huge_pte_lock(h, dst_mm, dst_pte);
6909
6910
/* Don't overwrite any existing PTEs (even markers) */
6911
if (!huge_pte_none(huge_ptep_get(dst_mm, dst_addr, dst_pte))) {
6912
spin_unlock(ptl);
6913
return -EEXIST;
6914
}
6915
6916
_dst_pte = make_pte_marker(PTE_MARKER_POISONED);
6917
set_huge_pte_at(dst_mm, dst_addr, dst_pte, _dst_pte, size);
6918
6919
/* No need to invalidate - it was non-present before */
6920
update_mmu_cache(dst_vma, dst_addr, dst_pte);
6921
6922
spin_unlock(ptl);
6923
return 0;
6924
}
6925
6926
if (is_continue) {
6927
ret = -EFAULT;
6928
folio = filemap_lock_hugetlb_folio(h, mapping, idx);
6929
if (IS_ERR(folio))
6930
goto out;
6931
folio_in_pagecache = true;
6932
} else if (!*foliop) {
6933
/* If a folio already exists, then it's UFFDIO_COPY for
6934
* a non-missing case. Return -EEXIST.
6935
*/
6936
if (vm_shared &&
6937
hugetlbfs_pagecache_present(h, dst_vma, dst_addr)) {
6938
ret = -EEXIST;
6939
goto out;
6940
}
6941
6942
folio = alloc_hugetlb_folio(dst_vma, dst_addr, false);
6943
if (IS_ERR(folio)) {
6944
pte_t *actual_pte = hugetlb_walk(dst_vma, dst_addr, PMD_SIZE);
6945
if (actual_pte) {
6946
ret = -EEXIST;
6947
goto out;
6948
}
6949
ret = -ENOMEM;
6950
goto out;
6951
}
6952
6953
ret = copy_folio_from_user(folio, (const void __user *) src_addr,
6954
false);
6955
6956
/* fallback to copy_from_user outside mmap_lock */
6957
if (unlikely(ret)) {
6958
ret = -ENOENT;
6959
/* Free the allocated folio which may have
6960
* consumed a reservation.
6961
*/
6962
restore_reserve_on_error(h, dst_vma, dst_addr, folio);
6963
folio_put(folio);
6964
6965
/* Allocate a temporary folio to hold the copied
6966
* contents.
6967
*/
6968
folio = alloc_hugetlb_folio_vma(h, dst_vma, dst_addr);
6969
if (!folio) {
6970
ret = -ENOMEM;
6971
goto out;
6972
}
6973
*foliop = folio;
6974
/* Set the outparam foliop and return to the caller to
6975
* copy the contents outside the lock. Don't free the
6976
* folio.
6977
*/
6978
goto out;
6979
}
6980
} else {
6981
if (vm_shared &&
6982
hugetlbfs_pagecache_present(h, dst_vma, dst_addr)) {
6983
folio_put(*foliop);
6984
ret = -EEXIST;
6985
*foliop = NULL;
6986
goto out;
6987
}
6988
6989
folio = alloc_hugetlb_folio(dst_vma, dst_addr, false);
6990
if (IS_ERR(folio)) {
6991
folio_put(*foliop);
6992
ret = -ENOMEM;
6993
*foliop = NULL;
6994
goto out;
6995
}
6996
ret = copy_user_large_folio(folio, *foliop, dst_addr, dst_vma);
6997
folio_put(*foliop);
6998
*foliop = NULL;
6999
if (ret) {
7000
folio_put(folio);
7001
goto out;
7002
}
7003
}
7004
7005
/*
7006
* If we just allocated a new page, we need a memory barrier to ensure
7007
* that preceding stores to the page become visible before the
7008
* set_pte_at() write. The memory barrier inside __folio_mark_uptodate
7009
* is what we need.
7010
*
7011
* In the case where we have not allocated a new page (is_continue),
7012
* the page must already be uptodate. UFFDIO_CONTINUE already includes
7013
* an earlier smp_wmb() to ensure that prior stores will be visible
7014
* before the set_pte_at() write.
7015
*/
7016
if (!is_continue)
7017
__folio_mark_uptodate(folio);
7018
else
7019
WARN_ON_ONCE(!folio_test_uptodate(folio));
7020
7021
/* Add shared, newly allocated pages to the page cache. */
7022
if (vm_shared && !is_continue) {
7023
ret = -EFAULT;
7024
if (idx >= (i_size_read(mapping->host) >> huge_page_shift(h)))
7025
goto out_release_nounlock;
7026
7027
/*
7028
* Serialization between remove_inode_hugepages() and
7029
* hugetlb_add_to_page_cache() below happens through the
7030
* hugetlb_fault_mutex_table that here must be hold by
7031
* the caller.
7032
*/
7033
ret = hugetlb_add_to_page_cache(folio, mapping, idx);
7034
if (ret)
7035
goto out_release_nounlock;
7036
folio_in_pagecache = true;
7037
}
7038
7039
ptl = huge_pte_lock(h, dst_mm, dst_pte);
7040
7041
ret = -EIO;
7042
if (folio_test_hwpoison(folio))
7043
goto out_release_unlock;
7044
7045
/*
7046
* We allow to overwrite a pte marker: consider when both MISSING|WP
7047
* registered, we firstly wr-protect a none pte which has no page cache
7048
* page backing it, then access the page.
7049
*/
7050
ret = -EEXIST;
7051
if (!huge_pte_none_mostly(huge_ptep_get(dst_mm, dst_addr, dst_pte)))
7052
goto out_release_unlock;
7053
7054
if (folio_in_pagecache)
7055
hugetlb_add_file_rmap(folio);
7056
else
7057
hugetlb_add_new_anon_rmap(folio, dst_vma, dst_addr);
7058
7059
/*
7060
* For either: (1) CONTINUE on a non-shared VMA, or (2) UFFDIO_COPY
7061
* with wp flag set, don't set pte write bit.
7062
*/
7063
_dst_pte = make_huge_pte(dst_vma, folio,
7064
!wp_enabled && !(is_continue && !vm_shared));
7065
/*
7066
* Always mark UFFDIO_COPY page dirty; note that this may not be
7067
* extremely important for hugetlbfs for now since swapping is not
7068
* supported, but we should still be clear in that this page cannot be
7069
* thrown away at will, even if write bit not set.
7070
*/
7071
_dst_pte = huge_pte_mkdirty(_dst_pte);
7072
_dst_pte = pte_mkyoung(_dst_pte);
7073
7074
if (wp_enabled)
7075
_dst_pte = huge_pte_mkuffd_wp(_dst_pte);
7076
7077
set_huge_pte_at(dst_mm, dst_addr, dst_pte, _dst_pte, size);
7078
7079
hugetlb_count_add(pages_per_huge_page(h), dst_mm);
7080
7081
/* No need to invalidate - it was non-present before */
7082
update_mmu_cache(dst_vma, dst_addr, dst_pte);
7083
7084
spin_unlock(ptl);
7085
if (!is_continue)
7086
folio_set_hugetlb_migratable(folio);
7087
if (vm_shared || is_continue)
7088
folio_unlock(folio);
7089
ret = 0;
7090
out:
7091
return ret;
7092
out_release_unlock:
7093
spin_unlock(ptl);
7094
if (vm_shared || is_continue)
7095
folio_unlock(folio);
7096
out_release_nounlock:
7097
if (!folio_in_pagecache)
7098
restore_reserve_on_error(h, dst_vma, dst_addr, folio);
7099
folio_put(folio);
7100
goto out;
7101
}
7102
#endif /* CONFIG_USERFAULTFD */
7103
7104
long hugetlb_change_protection(struct vm_area_struct *vma,
7105
unsigned long address, unsigned long end,
7106
pgprot_t newprot, unsigned long cp_flags)
7107
{
7108
struct mm_struct *mm = vma->vm_mm;
7109
unsigned long start = address;
7110
pte_t *ptep;
7111
pte_t pte;
7112
struct hstate *h = hstate_vma(vma);
7113
long pages = 0, psize = huge_page_size(h);
7114
bool shared_pmd = false;
7115
struct mmu_notifier_range range;
7116
unsigned long last_addr_mask;
7117
bool uffd_wp = cp_flags & MM_CP_UFFD_WP;
7118
bool uffd_wp_resolve = cp_flags & MM_CP_UFFD_WP_RESOLVE;
7119
7120
/*
7121
* In the case of shared PMDs, the area to flush could be beyond
7122
* start/end. Set range.start/range.end to cover the maximum possible
7123
* range if PMD sharing is possible.
7124
*/
7125
mmu_notifier_range_init(&range, MMU_NOTIFY_PROTECTION_VMA,
7126
0, mm, start, end);
7127
adjust_range_if_pmd_sharing_possible(vma, &range.start, &range.end);
7128
7129
BUG_ON(address >= end);
7130
flush_cache_range(vma, range.start, range.end);
7131
7132
mmu_notifier_invalidate_range_start(&range);
7133
hugetlb_vma_lock_write(vma);
7134
i_mmap_lock_write(vma->vm_file->f_mapping);
7135
last_addr_mask = hugetlb_mask_last_page(h);
7136
for (; address < end; address += psize) {
7137
spinlock_t *ptl;
7138
ptep = hugetlb_walk(vma, address, psize);
7139
if (!ptep) {
7140
if (!uffd_wp) {
7141
address |= last_addr_mask;
7142
continue;
7143
}
7144
/*
7145
* Userfaultfd wr-protect requires pgtable
7146
* pre-allocations to install pte markers.
7147
*/
7148
ptep = huge_pte_alloc(mm, vma, address, psize);
7149
if (!ptep) {
7150
pages = -ENOMEM;
7151
break;
7152
}
7153
}
7154
ptl = huge_pte_lock(h, mm, ptep);
7155
if (huge_pmd_unshare(mm, vma, address, ptep)) {
7156
/*
7157
* When uffd-wp is enabled on the vma, unshare
7158
* shouldn't happen at all. Warn about it if it
7159
* happened due to some reason.
7160
*/
7161
WARN_ON_ONCE(uffd_wp || uffd_wp_resolve);
7162
pages++;
7163
spin_unlock(ptl);
7164
shared_pmd = true;
7165
address |= last_addr_mask;
7166
continue;
7167
}
7168
pte = huge_ptep_get(mm, address, ptep);
7169
if (unlikely(is_hugetlb_entry_hwpoisoned(pte))) {
7170
/* Nothing to do. */
7171
} else if (unlikely(is_hugetlb_entry_migration(pte))) {
7172
swp_entry_t entry = pte_to_swp_entry(pte);
7173
struct folio *folio = pfn_swap_entry_folio(entry);
7174
pte_t newpte = pte;
7175
7176
if (is_writable_migration_entry(entry)) {
7177
if (folio_test_anon(folio))
7178
entry = make_readable_exclusive_migration_entry(
7179
swp_offset(entry));
7180
else
7181
entry = make_readable_migration_entry(
7182
swp_offset(entry));
7183
newpte = swp_entry_to_pte(entry);
7184
pages++;
7185
}
7186
7187
if (uffd_wp)
7188
newpte = pte_swp_mkuffd_wp(newpte);
7189
else if (uffd_wp_resolve)
7190
newpte = pte_swp_clear_uffd_wp(newpte);
7191
if (!pte_same(pte, newpte))
7192
set_huge_pte_at(mm, address, ptep, newpte, psize);
7193
} else if (unlikely(is_pte_marker(pte))) {
7194
/*
7195
* Do nothing on a poison marker; page is
7196
* corrupted, permissons do not apply. Here
7197
* pte_marker_uffd_wp()==true implies !poison
7198
* because they're mutual exclusive.
7199
*/
7200
if (pte_marker_uffd_wp(pte) && uffd_wp_resolve)
7201
/* Safe to modify directly (non-present->none). */
7202
huge_pte_clear(mm, address, ptep, psize);
7203
} else if (!huge_pte_none(pte)) {
7204
pte_t old_pte;
7205
unsigned int shift = huge_page_shift(hstate_vma(vma));
7206
7207
old_pte = huge_ptep_modify_prot_start(vma, address, ptep);
7208
pte = huge_pte_modify(old_pte, newprot);
7209
pte = arch_make_huge_pte(pte, shift, vma->vm_flags);
7210
if (uffd_wp)
7211
pte = huge_pte_mkuffd_wp(pte);
7212
else if (uffd_wp_resolve)
7213
pte = huge_pte_clear_uffd_wp(pte);
7214
huge_ptep_modify_prot_commit(vma, address, ptep, old_pte, pte);
7215
pages++;
7216
} else {
7217
/* None pte */
7218
if (unlikely(uffd_wp))
7219
/* Safe to modify directly (none->non-present). */
7220
set_huge_pte_at(mm, address, ptep,
7221
make_pte_marker(PTE_MARKER_UFFD_WP),
7222
psize);
7223
}
7224
spin_unlock(ptl);
7225
}
7226
/*
7227
* Must flush TLB before releasing i_mmap_rwsem: x86's huge_pmd_unshare
7228
* may have cleared our pud entry and done put_page on the page table:
7229
* once we release i_mmap_rwsem, another task can do the final put_page
7230
* and that page table be reused and filled with junk. If we actually
7231
* did unshare a page of pmds, flush the range corresponding to the pud.
7232
*/
7233
if (shared_pmd)
7234
flush_hugetlb_tlb_range(vma, range.start, range.end);
7235
else
7236
flush_hugetlb_tlb_range(vma, start, end);
7237
/*
7238
* No need to call mmu_notifier_arch_invalidate_secondary_tlbs() we are
7239
* downgrading page table protection not changing it to point to a new
7240
* page.
7241
*
7242
* See Documentation/mm/mmu_notifier.rst
7243
*/
7244
i_mmap_unlock_write(vma->vm_file->f_mapping);
7245
hugetlb_vma_unlock_write(vma);
7246
mmu_notifier_invalidate_range_end(&range);
7247
7248
return pages > 0 ? (pages << h->order) : pages;
7249
}
7250
7251
/*
7252
* Update the reservation map for the range [from, to].
7253
*
7254
* Returns the number of entries that would be added to the reservation map
7255
* associated with the range [from, to]. This number is greater or equal to
7256
* zero. -EINVAL or -ENOMEM is returned in case of any errors.
7257
*/
7258
7259
long hugetlb_reserve_pages(struct inode *inode,
7260
long from, long to,
7261
struct vm_area_struct *vma,
7262
vm_flags_t vm_flags)
7263
{
7264
long chg = -1, add = -1, spool_resv, gbl_resv;
7265
struct hstate *h = hstate_inode(inode);
7266
struct hugepage_subpool *spool = subpool_inode(inode);
7267
struct resv_map *resv_map;
7268
struct hugetlb_cgroup *h_cg = NULL;
7269
long gbl_reserve, regions_needed = 0;
7270
7271
/* This should never happen */
7272
if (from > to) {
7273
VM_WARN(1, "%s called with a negative range\n", __func__);
7274
return -EINVAL;
7275
}
7276
7277
/*
7278
* vma specific semaphore used for pmd sharing and fault/truncation
7279
* synchronization
7280
*/
7281
hugetlb_vma_lock_alloc(vma);
7282
7283
/*
7284
* Only apply hugepage reservation if asked. At fault time, an
7285
* attempt will be made for VM_NORESERVE to allocate a page
7286
* without using reserves
7287
*/
7288
if (vm_flags & VM_NORESERVE)
7289
return 0;
7290
7291
/*
7292
* Shared mappings base their reservation on the number of pages that
7293
* are already allocated on behalf of the file. Private mappings need
7294
* to reserve the full area even if read-only as mprotect() may be
7295
* called to make the mapping read-write. Assume !vma is a shm mapping
7296
*/
7297
if (!vma || vma->vm_flags & VM_MAYSHARE) {
7298
/*
7299
* resv_map can not be NULL as hugetlb_reserve_pages is only
7300
* called for inodes for which resv_maps were created (see
7301
* hugetlbfs_get_inode).
7302
*/
7303
resv_map = inode_resv_map(inode);
7304
7305
chg = region_chg(resv_map, from, to, &regions_needed);
7306
} else {
7307
/* Private mapping. */
7308
resv_map = resv_map_alloc();
7309
if (!resv_map)
7310
goto out_err;
7311
7312
chg = to - from;
7313
7314
set_vma_resv_map(vma, resv_map);
7315
set_vma_resv_flags(vma, HPAGE_RESV_OWNER);
7316
}
7317
7318
if (chg < 0)
7319
goto out_err;
7320
7321
if (hugetlb_cgroup_charge_cgroup_rsvd(hstate_index(h),
7322
chg * pages_per_huge_page(h), &h_cg) < 0)
7323
goto out_err;
7324
7325
if (vma && !(vma->vm_flags & VM_MAYSHARE) && h_cg) {
7326
/* For private mappings, the hugetlb_cgroup uncharge info hangs
7327
* of the resv_map.
7328
*/
7329
resv_map_set_hugetlb_cgroup_uncharge_info(resv_map, h_cg, h);
7330
}
7331
7332
/*
7333
* There must be enough pages in the subpool for the mapping. If
7334
* the subpool has a minimum size, there may be some global
7335
* reservations already in place (gbl_reserve).
7336
*/
7337
gbl_reserve = hugepage_subpool_get_pages(spool, chg);
7338
if (gbl_reserve < 0)
7339
goto out_uncharge_cgroup;
7340
7341
/*
7342
* Check enough hugepages are available for the reservation.
7343
* Hand the pages back to the subpool if there are not
7344
*/
7345
if (hugetlb_acct_memory(h, gbl_reserve) < 0)
7346
goto out_put_pages;
7347
7348
/*
7349
* Account for the reservations made. Shared mappings record regions
7350
* that have reservations as they are shared by multiple VMAs.
7351
* When the last VMA disappears, the region map says how much
7352
* the reservation was and the page cache tells how much of
7353
* the reservation was consumed. Private mappings are per-VMA and
7354
* only the consumed reservations are tracked. When the VMA
7355
* disappears, the original reservation is the VMA size and the
7356
* consumed reservations are stored in the map. Hence, nothing
7357
* else has to be done for private mappings here
7358
*/
7359
if (!vma || vma->vm_flags & VM_MAYSHARE) {
7360
add = region_add(resv_map, from, to, regions_needed, h, h_cg);
7361
7362
if (unlikely(add < 0)) {
7363
hugetlb_acct_memory(h, -gbl_reserve);
7364
goto out_put_pages;
7365
} else if (unlikely(chg > add)) {
7366
/*
7367
* pages in this range were added to the reserve
7368
* map between region_chg and region_add. This
7369
* indicates a race with alloc_hugetlb_folio. Adjust
7370
* the subpool and reserve counts modified above
7371
* based on the difference.
7372
*/
7373
long rsv_adjust;
7374
7375
/*
7376
* hugetlb_cgroup_uncharge_cgroup_rsvd() will put the
7377
* reference to h_cg->css. See comment below for detail.
7378
*/
7379
hugetlb_cgroup_uncharge_cgroup_rsvd(
7380
hstate_index(h),
7381
(chg - add) * pages_per_huge_page(h), h_cg);
7382
7383
rsv_adjust = hugepage_subpool_put_pages(spool,
7384
chg - add);
7385
hugetlb_acct_memory(h, -rsv_adjust);
7386
} else if (h_cg) {
7387
/*
7388
* The file_regions will hold their own reference to
7389
* h_cg->css. So we should release the reference held
7390
* via hugetlb_cgroup_charge_cgroup_rsvd() when we are
7391
* done.
7392
*/
7393
hugetlb_cgroup_put_rsvd_cgroup(h_cg);
7394
}
7395
}
7396
return chg;
7397
7398
out_put_pages:
7399
spool_resv = chg - gbl_reserve;
7400
if (spool_resv) {
7401
/* put sub pool's reservation back, chg - gbl_reserve */
7402
gbl_resv = hugepage_subpool_put_pages(spool, spool_resv);
7403
/*
7404
* subpool's reserved pages can not be put back due to race,
7405
* return to hstate.
7406
*/
7407
hugetlb_acct_memory(h, -gbl_resv);
7408
}
7409
out_uncharge_cgroup:
7410
hugetlb_cgroup_uncharge_cgroup_rsvd(hstate_index(h),
7411
chg * pages_per_huge_page(h), h_cg);
7412
out_err:
7413
hugetlb_vma_lock_free(vma);
7414
if (!vma || vma->vm_flags & VM_MAYSHARE)
7415
/* Only call region_abort if the region_chg succeeded but the
7416
* region_add failed or didn't run.
7417
*/
7418
if (chg >= 0 && add < 0)
7419
region_abort(resv_map, from, to, regions_needed);
7420
if (vma && is_vma_resv_set(vma, HPAGE_RESV_OWNER)) {
7421
kref_put(&resv_map->refs, resv_map_release);
7422
set_vma_resv_map(vma, NULL);
7423
}
7424
return chg < 0 ? chg : add < 0 ? add : -EINVAL;
7425
}
7426
7427
long hugetlb_unreserve_pages(struct inode *inode, long start, long end,
7428
long freed)
7429
{
7430
struct hstate *h = hstate_inode(inode);
7431
struct resv_map *resv_map = inode_resv_map(inode);
7432
long chg = 0;
7433
struct hugepage_subpool *spool = subpool_inode(inode);
7434
long gbl_reserve;
7435
7436
/*
7437
* Since this routine can be called in the evict inode path for all
7438
* hugetlbfs inodes, resv_map could be NULL.
7439
*/
7440
if (resv_map) {
7441
chg = region_del(resv_map, start, end);
7442
/*
7443
* region_del() can fail in the rare case where a region
7444
* must be split and another region descriptor can not be
7445
* allocated. If end == LONG_MAX, it will not fail.
7446
*/
7447
if (chg < 0)
7448
return chg;
7449
}
7450
7451
spin_lock(&inode->i_lock);
7452
inode->i_blocks -= (blocks_per_huge_page(h) * freed);
7453
spin_unlock(&inode->i_lock);
7454
7455
/*
7456
* If the subpool has a minimum size, the number of global
7457
* reservations to be released may be adjusted.
7458
*
7459
* Note that !resv_map implies freed == 0. So (chg - freed)
7460
* won't go negative.
7461
*/
7462
gbl_reserve = hugepage_subpool_put_pages(spool, (chg - freed));
7463
hugetlb_acct_memory(h, -gbl_reserve);
7464
7465
return 0;
7466
}
7467
7468
#ifdef CONFIG_HUGETLB_PMD_PAGE_TABLE_SHARING
7469
static unsigned long page_table_shareable(struct vm_area_struct *svma,
7470
struct vm_area_struct *vma,
7471
unsigned long addr, pgoff_t idx)
7472
{
7473
unsigned long saddr = ((idx - svma->vm_pgoff) << PAGE_SHIFT) +
7474
svma->vm_start;
7475
unsigned long sbase = saddr & PUD_MASK;
7476
unsigned long s_end = sbase + PUD_SIZE;
7477
7478
/* Allow segments to share if only one is marked locked */
7479
vm_flags_t vm_flags = vma->vm_flags & ~VM_LOCKED_MASK;
7480
vm_flags_t svm_flags = svma->vm_flags & ~VM_LOCKED_MASK;
7481
7482
/*
7483
* match the virtual addresses, permission and the alignment of the
7484
* page table page.
7485
*
7486
* Also, vma_lock (vm_private_data) is required for sharing.
7487
*/
7488
if (pmd_index(addr) != pmd_index(saddr) ||
7489
vm_flags != svm_flags ||
7490
!range_in_vma(svma, sbase, s_end) ||
7491
!svma->vm_private_data)
7492
return 0;
7493
7494
return saddr;
7495
}
7496
7497
bool want_pmd_share(struct vm_area_struct *vma, unsigned long addr)
7498
{
7499
unsigned long start = addr & PUD_MASK;
7500
unsigned long end = start + PUD_SIZE;
7501
7502
#ifdef CONFIG_USERFAULTFD
7503
if (uffd_disable_huge_pmd_share(vma))
7504
return false;
7505
#endif
7506
/*
7507
* check on proper vm_flags and page table alignment
7508
*/
7509
if (!(vma->vm_flags & VM_MAYSHARE))
7510
return false;
7511
if (!vma->vm_private_data) /* vma lock required for sharing */
7512
return false;
7513
if (!range_in_vma(vma, start, end))
7514
return false;
7515
return true;
7516
}
7517
7518
/*
7519
* Determine if start,end range within vma could be mapped by shared pmd.
7520
* If yes, adjust start and end to cover range associated with possible
7521
* shared pmd mappings.
7522
*/
7523
void adjust_range_if_pmd_sharing_possible(struct vm_area_struct *vma,
7524
unsigned long *start, unsigned long *end)
7525
{
7526
unsigned long v_start = ALIGN(vma->vm_start, PUD_SIZE),
7527
v_end = ALIGN_DOWN(vma->vm_end, PUD_SIZE);
7528
7529
/*
7530
* vma needs to span at least one aligned PUD size, and the range
7531
* must be at least partially within in.
7532
*/
7533
if (!(vma->vm_flags & VM_MAYSHARE) || !(v_end > v_start) ||
7534
(*end <= v_start) || (*start >= v_end))
7535
return;
7536
7537
/* Extend the range to be PUD aligned for a worst case scenario */
7538
if (*start > v_start)
7539
*start = ALIGN_DOWN(*start, PUD_SIZE);
7540
7541
if (*end < v_end)
7542
*end = ALIGN(*end, PUD_SIZE);
7543
}
7544
7545
/*
7546
* Search for a shareable pmd page for hugetlb. In any case calls pmd_alloc()
7547
* and returns the corresponding pte. While this is not necessary for the
7548
* !shared pmd case because we can allocate the pmd later as well, it makes the
7549
* code much cleaner. pmd allocation is essential for the shared case because
7550
* pud has to be populated inside the same i_mmap_rwsem section - otherwise
7551
* racing tasks could either miss the sharing (see huge_pte_offset) or select a
7552
* bad pmd for sharing.
7553
*/
7554
pte_t *huge_pmd_share(struct mm_struct *mm, struct vm_area_struct *vma,
7555
unsigned long addr, pud_t *pud)
7556
{
7557
struct address_space *mapping = vma->vm_file->f_mapping;
7558
pgoff_t idx = ((addr - vma->vm_start) >> PAGE_SHIFT) +
7559
vma->vm_pgoff;
7560
struct vm_area_struct *svma;
7561
unsigned long saddr;
7562
pte_t *spte = NULL;
7563
pte_t *pte;
7564
7565
i_mmap_lock_read(mapping);
7566
vma_interval_tree_foreach(svma, &mapping->i_mmap, idx, idx) {
7567
if (svma == vma)
7568
continue;
7569
7570
saddr = page_table_shareable(svma, vma, addr, idx);
7571
if (saddr) {
7572
spte = hugetlb_walk(svma, saddr,
7573
vma_mmu_pagesize(svma));
7574
if (spte) {
7575
ptdesc_pmd_pts_inc(virt_to_ptdesc(spte));
7576
break;
7577
}
7578
}
7579
}
7580
7581
if (!spte)
7582
goto out;
7583
7584
spin_lock(&mm->page_table_lock);
7585
if (pud_none(*pud)) {
7586
pud_populate(mm, pud,
7587
(pmd_t *)((unsigned long)spte & PAGE_MASK));
7588
mm_inc_nr_pmds(mm);
7589
} else {
7590
ptdesc_pmd_pts_dec(virt_to_ptdesc(spte));
7591
}
7592
spin_unlock(&mm->page_table_lock);
7593
out:
7594
pte = (pte_t *)pmd_alloc(mm, pud, addr);
7595
i_mmap_unlock_read(mapping);
7596
return pte;
7597
}
7598
7599
/*
7600
* unmap huge page backed by shared pte.
7601
*
7602
* Called with page table lock held.
7603
*
7604
* returns: 1 successfully unmapped a shared pte page
7605
* 0 the underlying pte page is not shared, or it is the last user
7606
*/
7607
int huge_pmd_unshare(struct mm_struct *mm, struct vm_area_struct *vma,
7608
unsigned long addr, pte_t *ptep)
7609
{
7610
unsigned long sz = huge_page_size(hstate_vma(vma));
7611
pgd_t *pgd = pgd_offset(mm, addr);
7612
p4d_t *p4d = p4d_offset(pgd, addr);
7613
pud_t *pud = pud_offset(p4d, addr);
7614
7615
i_mmap_assert_write_locked(vma->vm_file->f_mapping);
7616
hugetlb_vma_assert_locked(vma);
7617
if (sz != PMD_SIZE)
7618
return 0;
7619
if (!ptdesc_pmd_is_shared(virt_to_ptdesc(ptep)))
7620
return 0;
7621
7622
pud_clear(pud);
7623
/*
7624
* Once our caller drops the rmap lock, some other process might be
7625
* using this page table as a normal, non-hugetlb page table.
7626
* Wait for pending gup_fast() in other threads to finish before letting
7627
* that happen.
7628
*/
7629
tlb_remove_table_sync_one();
7630
ptdesc_pmd_pts_dec(virt_to_ptdesc(ptep));
7631
mm_dec_nr_pmds(mm);
7632
return 1;
7633
}
7634
7635
#else /* !CONFIG_HUGETLB_PMD_PAGE_TABLE_SHARING */
7636
7637
pte_t *huge_pmd_share(struct mm_struct *mm, struct vm_area_struct *vma,
7638
unsigned long addr, pud_t *pud)
7639
{
7640
return NULL;
7641
}
7642
7643
int huge_pmd_unshare(struct mm_struct *mm, struct vm_area_struct *vma,
7644
unsigned long addr, pte_t *ptep)
7645
{
7646
return 0;
7647
}
7648
7649
void adjust_range_if_pmd_sharing_possible(struct vm_area_struct *vma,
7650
unsigned long *start, unsigned long *end)
7651
{
7652
}
7653
7654
bool want_pmd_share(struct vm_area_struct *vma, unsigned long addr)
7655
{
7656
return false;
7657
}
7658
#endif /* CONFIG_HUGETLB_PMD_PAGE_TABLE_SHARING */
7659
7660
#ifdef CONFIG_ARCH_WANT_GENERAL_HUGETLB
7661
pte_t *huge_pte_alloc(struct mm_struct *mm, struct vm_area_struct *vma,
7662
unsigned long addr, unsigned long sz)
7663
{
7664
pgd_t *pgd;
7665
p4d_t *p4d;
7666
pud_t *pud;
7667
pte_t *pte = NULL;
7668
7669
pgd = pgd_offset(mm, addr);
7670
p4d = p4d_alloc(mm, pgd, addr);
7671
if (!p4d)
7672
return NULL;
7673
pud = pud_alloc(mm, p4d, addr);
7674
if (pud) {
7675
if (sz == PUD_SIZE) {
7676
pte = (pte_t *)pud;
7677
} else {
7678
BUG_ON(sz != PMD_SIZE);
7679
if (want_pmd_share(vma, addr) && pud_none(*pud))
7680
pte = huge_pmd_share(mm, vma, addr, pud);
7681
else
7682
pte = (pte_t *)pmd_alloc(mm, pud, addr);
7683
}
7684
}
7685
7686
if (pte) {
7687
pte_t pteval = ptep_get_lockless(pte);
7688
7689
BUG_ON(pte_present(pteval) && !pte_huge(pteval));
7690
}
7691
7692
return pte;
7693
}
7694
7695
/*
7696
* huge_pte_offset() - Walk the page table to resolve the hugepage
7697
* entry at address @addr
7698
*
7699
* Return: Pointer to page table entry (PUD or PMD) for
7700
* address @addr, or NULL if a !p*d_present() entry is encountered and the
7701
* size @sz doesn't match the hugepage size at this level of the page
7702
* table.
7703
*/
7704
pte_t *huge_pte_offset(struct mm_struct *mm,
7705
unsigned long addr, unsigned long sz)
7706
{
7707
pgd_t *pgd;
7708
p4d_t *p4d;
7709
pud_t *pud;
7710
pmd_t *pmd;
7711
7712
pgd = pgd_offset(mm, addr);
7713
if (!pgd_present(*pgd))
7714
return NULL;
7715
p4d = p4d_offset(pgd, addr);
7716
if (!p4d_present(*p4d))
7717
return NULL;
7718
7719
pud = pud_offset(p4d, addr);
7720
if (sz == PUD_SIZE)
7721
/* must be pud huge, non-present or none */
7722
return (pte_t *)pud;
7723
if (!pud_present(*pud))
7724
return NULL;
7725
/* must have a valid entry and size to go further */
7726
7727
pmd = pmd_offset(pud, addr);
7728
/* must be pmd huge, non-present or none */
7729
return (pte_t *)pmd;
7730
}
7731
7732
/*
7733
* Return a mask that can be used to update an address to the last huge
7734
* page in a page table page mapping size. Used to skip non-present
7735
* page table entries when linearly scanning address ranges. Architectures
7736
* with unique huge page to page table relationships can define their own
7737
* version of this routine.
7738
*/
7739
unsigned long hugetlb_mask_last_page(struct hstate *h)
7740
{
7741
unsigned long hp_size = huge_page_size(h);
7742
7743
if (hp_size == PUD_SIZE)
7744
return P4D_SIZE - PUD_SIZE;
7745
else if (hp_size == PMD_SIZE)
7746
return PUD_SIZE - PMD_SIZE;
7747
else
7748
return 0UL;
7749
}
7750
7751
#else
7752
7753
/* See description above. Architectures can provide their own version. */
7754
__weak unsigned long hugetlb_mask_last_page(struct hstate *h)
7755
{
7756
#ifdef CONFIG_HUGETLB_PMD_PAGE_TABLE_SHARING
7757
if (huge_page_size(h) == PMD_SIZE)
7758
return PUD_SIZE - PMD_SIZE;
7759
#endif
7760
return 0UL;
7761
}
7762
7763
#endif /* CONFIG_ARCH_WANT_GENERAL_HUGETLB */
7764
7765
/**
7766
* folio_isolate_hugetlb - try to isolate an allocated hugetlb folio
7767
* @folio: the folio to isolate
7768
* @list: the list to add the folio to on success
7769
*
7770
* Isolate an allocated (refcount > 0) hugetlb folio, marking it as
7771
* isolated/non-migratable, and moving it from the active list to the
7772
* given list.
7773
*
7774
* Isolation will fail if @folio is not an allocated hugetlb folio, or if
7775
* it is already isolated/non-migratable.
7776
*
7777
* On success, an additional folio reference is taken that must be dropped
7778
* using folio_putback_hugetlb() to undo the isolation.
7779
*
7780
* Return: True if isolation worked, otherwise False.
7781
*/
7782
bool folio_isolate_hugetlb(struct folio *folio, struct list_head *list)
7783
{
7784
bool ret = true;
7785
7786
spin_lock_irq(&hugetlb_lock);
7787
if (!folio_test_hugetlb(folio) ||
7788
!folio_test_hugetlb_migratable(folio) ||
7789
!folio_try_get(folio)) {
7790
ret = false;
7791
goto unlock;
7792
}
7793
folio_clear_hugetlb_migratable(folio);
7794
list_move_tail(&folio->lru, list);
7795
unlock:
7796
spin_unlock_irq(&hugetlb_lock);
7797
return ret;
7798
}
7799
7800
int get_hwpoison_hugetlb_folio(struct folio *folio, bool *hugetlb, bool unpoison)
7801
{
7802
int ret = 0;
7803
7804
*hugetlb = false;
7805
spin_lock_irq(&hugetlb_lock);
7806
if (folio_test_hugetlb(folio)) {
7807
*hugetlb = true;
7808
if (folio_test_hugetlb_freed(folio))
7809
ret = 0;
7810
else if (folio_test_hugetlb_migratable(folio) || unpoison)
7811
ret = folio_try_get(folio);
7812
else
7813
ret = -EBUSY;
7814
}
7815
spin_unlock_irq(&hugetlb_lock);
7816
return ret;
7817
}
7818
7819
int get_huge_page_for_hwpoison(unsigned long pfn, int flags,
7820
bool *migratable_cleared)
7821
{
7822
int ret;
7823
7824
spin_lock_irq(&hugetlb_lock);
7825
ret = __get_huge_page_for_hwpoison(pfn, flags, migratable_cleared);
7826
spin_unlock_irq(&hugetlb_lock);
7827
return ret;
7828
}
7829
7830
/**
7831
* folio_putback_hugetlb - unisolate a hugetlb folio
7832
* @folio: the isolated hugetlb folio
7833
*
7834
* Putback/un-isolate the hugetlb folio that was previous isolated using
7835
* folio_isolate_hugetlb(): marking it non-isolated/migratable and putting it
7836
* back onto the active list.
7837
*
7838
* Will drop the additional folio reference obtained through
7839
* folio_isolate_hugetlb().
7840
*/
7841
void folio_putback_hugetlb(struct folio *folio)
7842
{
7843
spin_lock_irq(&hugetlb_lock);
7844
folio_set_hugetlb_migratable(folio);
7845
list_move_tail(&folio->lru, &(folio_hstate(folio))->hugepage_activelist);
7846
spin_unlock_irq(&hugetlb_lock);
7847
folio_put(folio);
7848
}
7849
7850
void move_hugetlb_state(struct folio *old_folio, struct folio *new_folio, int reason)
7851
{
7852
struct hstate *h = folio_hstate(old_folio);
7853
7854
hugetlb_cgroup_migrate(old_folio, new_folio);
7855
folio_set_owner_migrate_reason(new_folio, reason);
7856
7857
/*
7858
* transfer temporary state of the new hugetlb folio. This is
7859
* reverse to other transitions because the newpage is going to
7860
* be final while the old one will be freed so it takes over
7861
* the temporary status.
7862
*
7863
* Also note that we have to transfer the per-node surplus state
7864
* here as well otherwise the global surplus count will not match
7865
* the per-node's.
7866
*/
7867
if (folio_test_hugetlb_temporary(new_folio)) {
7868
int old_nid = folio_nid(old_folio);
7869
int new_nid = folio_nid(new_folio);
7870
7871
folio_set_hugetlb_temporary(old_folio);
7872
folio_clear_hugetlb_temporary(new_folio);
7873
7874
7875
/*
7876
* There is no need to transfer the per-node surplus state
7877
* when we do not cross the node.
7878
*/
7879
if (new_nid == old_nid)
7880
return;
7881
spin_lock_irq(&hugetlb_lock);
7882
if (h->surplus_huge_pages_node[old_nid]) {
7883
h->surplus_huge_pages_node[old_nid]--;
7884
h->surplus_huge_pages_node[new_nid]++;
7885
}
7886
spin_unlock_irq(&hugetlb_lock);
7887
}
7888
7889
/*
7890
* Our old folio is isolated and has "migratable" cleared until it
7891
* is putback. As migration succeeded, set the new folio "migratable"
7892
* and add it to the active list.
7893
*/
7894
spin_lock_irq(&hugetlb_lock);
7895
folio_set_hugetlb_migratable(new_folio);
7896
list_move_tail(&new_folio->lru, &(folio_hstate(new_folio))->hugepage_activelist);
7897
spin_unlock_irq(&hugetlb_lock);
7898
}
7899
7900
/*
7901
* If @take_locks is false, the caller must ensure that no concurrent page table
7902
* access can happen (except for gup_fast() and hardware page walks).
7903
* If @take_locks is true, we take the hugetlb VMA lock (to lock out things like
7904
* concurrent page fault handling) and the file rmap lock.
7905
*/
7906
static void hugetlb_unshare_pmds(struct vm_area_struct *vma,
7907
unsigned long start,
7908
unsigned long end,
7909
bool take_locks)
7910
{
7911
struct hstate *h = hstate_vma(vma);
7912
unsigned long sz = huge_page_size(h);
7913
struct mm_struct *mm = vma->vm_mm;
7914
struct mmu_notifier_range range;
7915
unsigned long address;
7916
spinlock_t *ptl;
7917
pte_t *ptep;
7918
7919
if (!(vma->vm_flags & VM_MAYSHARE))
7920
return;
7921
7922
if (start >= end)
7923
return;
7924
7925
flush_cache_range(vma, start, end);
7926
/*
7927
* No need to call adjust_range_if_pmd_sharing_possible(), because
7928
* we have already done the PUD_SIZE alignment.
7929
*/
7930
mmu_notifier_range_init(&range, MMU_NOTIFY_CLEAR, 0, mm,
7931
start, end);
7932
mmu_notifier_invalidate_range_start(&range);
7933
if (take_locks) {
7934
hugetlb_vma_lock_write(vma);
7935
i_mmap_lock_write(vma->vm_file->f_mapping);
7936
} else {
7937
i_mmap_assert_write_locked(vma->vm_file->f_mapping);
7938
}
7939
for (address = start; address < end; address += PUD_SIZE) {
7940
ptep = hugetlb_walk(vma, address, sz);
7941
if (!ptep)
7942
continue;
7943
ptl = huge_pte_lock(h, mm, ptep);
7944
huge_pmd_unshare(mm, vma, address, ptep);
7945
spin_unlock(ptl);
7946
}
7947
flush_hugetlb_tlb_range(vma, start, end);
7948
if (take_locks) {
7949
i_mmap_unlock_write(vma->vm_file->f_mapping);
7950
hugetlb_vma_unlock_write(vma);
7951
}
7952
/*
7953
* No need to call mmu_notifier_arch_invalidate_secondary_tlbs(), see
7954
* Documentation/mm/mmu_notifier.rst.
7955
*/
7956
mmu_notifier_invalidate_range_end(&range);
7957
}
7958
7959
/*
7960
* This function will unconditionally remove all the shared pmd pgtable entries
7961
* within the specific vma for a hugetlbfs memory range.
7962
*/
7963
void hugetlb_unshare_all_pmds(struct vm_area_struct *vma)
7964
{
7965
hugetlb_unshare_pmds(vma, ALIGN(vma->vm_start, PUD_SIZE),
7966
ALIGN_DOWN(vma->vm_end, PUD_SIZE),
7967
/* take_locks = */ true);
7968
}
7969
7970
/*
7971
* For hugetlb, mremap() is an odd edge case - while the VMA copying is
7972
* performed, we permit both the old and new VMAs to reference the same
7973
* reservation.
7974
*
7975
* We fix this up after the operation succeeds, or if a newly allocated VMA
7976
* is closed as a result of a failure to allocate memory.
7977
*/
7978
void fixup_hugetlb_reservations(struct vm_area_struct *vma)
7979
{
7980
if (is_vm_hugetlb_page(vma))
7981
clear_vma_resv_huge_pages(vma);
7982
}
7983
7984