Book a Demo!
CoCalc Logo Icon
StoreFeaturesDocsShareSupportNewsAboutPoliciesSign UpSign In
torvalds
GitHub Repository: torvalds/linux
Path: blob/master/mm/hmm.c
29266 views
1
// SPDX-License-Identifier: GPL-2.0-or-later
2
/*
3
* Copyright 2013 Red Hat Inc.
4
*
5
* Authors: Jérôme Glisse <[email protected]>
6
*/
7
/*
8
* Refer to include/linux/hmm.h for information about heterogeneous memory
9
* management or HMM for short.
10
*/
11
#include <linux/pagewalk.h>
12
#include <linux/hmm.h>
13
#include <linux/hmm-dma.h>
14
#include <linux/init.h>
15
#include <linux/rmap.h>
16
#include <linux/swap.h>
17
#include <linux/slab.h>
18
#include <linux/sched.h>
19
#include <linux/mmzone.h>
20
#include <linux/pagemap.h>
21
#include <linux/swapops.h>
22
#include <linux/hugetlb.h>
23
#include <linux/memremap.h>
24
#include <linux/sched/mm.h>
25
#include <linux/jump_label.h>
26
#include <linux/dma-mapping.h>
27
#include <linux/pci-p2pdma.h>
28
#include <linux/mmu_notifier.h>
29
#include <linux/memory_hotplug.h>
30
31
#include "internal.h"
32
33
struct hmm_vma_walk {
34
struct hmm_range *range;
35
unsigned long last;
36
};
37
38
enum {
39
HMM_NEED_FAULT = 1 << 0,
40
HMM_NEED_WRITE_FAULT = 1 << 1,
41
HMM_NEED_ALL_BITS = HMM_NEED_FAULT | HMM_NEED_WRITE_FAULT,
42
};
43
44
enum {
45
/* These flags are carried from input-to-output */
46
HMM_PFN_INOUT_FLAGS = HMM_PFN_DMA_MAPPED | HMM_PFN_P2PDMA |
47
HMM_PFN_P2PDMA_BUS,
48
};
49
50
static int hmm_pfns_fill(unsigned long addr, unsigned long end,
51
struct hmm_range *range, unsigned long cpu_flags)
52
{
53
unsigned long i = (addr - range->start) >> PAGE_SHIFT;
54
55
for (; addr < end; addr += PAGE_SIZE, i++) {
56
range->hmm_pfns[i] &= HMM_PFN_INOUT_FLAGS;
57
range->hmm_pfns[i] |= cpu_flags;
58
}
59
return 0;
60
}
61
62
/*
63
* hmm_vma_fault() - fault in a range lacking valid pmd or pte(s)
64
* @addr: range virtual start address (inclusive)
65
* @end: range virtual end address (exclusive)
66
* @required_fault: HMM_NEED_* flags
67
* @walk: mm_walk structure
68
* Return: -EBUSY after page fault, or page fault error
69
*
70
* This function will be called whenever pmd_none() or pte_none() returns true,
71
* or whenever there is no page directory covering the virtual address range.
72
*/
73
static int hmm_vma_fault(unsigned long addr, unsigned long end,
74
unsigned int required_fault, struct mm_walk *walk)
75
{
76
struct hmm_vma_walk *hmm_vma_walk = walk->private;
77
struct vm_area_struct *vma = walk->vma;
78
unsigned int fault_flags = FAULT_FLAG_REMOTE;
79
80
WARN_ON_ONCE(!required_fault);
81
hmm_vma_walk->last = addr;
82
83
if (required_fault & HMM_NEED_WRITE_FAULT) {
84
if (!(vma->vm_flags & VM_WRITE))
85
return -EPERM;
86
fault_flags |= FAULT_FLAG_WRITE;
87
}
88
89
for (; addr < end; addr += PAGE_SIZE)
90
if (handle_mm_fault(vma, addr, fault_flags, NULL) &
91
VM_FAULT_ERROR)
92
return -EFAULT;
93
return -EBUSY;
94
}
95
96
static unsigned int hmm_pte_need_fault(const struct hmm_vma_walk *hmm_vma_walk,
97
unsigned long pfn_req_flags,
98
unsigned long cpu_flags)
99
{
100
struct hmm_range *range = hmm_vma_walk->range;
101
102
/*
103
* So we not only consider the individual per page request we also
104
* consider the default flags requested for the range. The API can
105
* be used 2 ways. The first one where the HMM user coalesces
106
* multiple page faults into one request and sets flags per pfn for
107
* those faults. The second one where the HMM user wants to pre-
108
* fault a range with specific flags. For the latter one it is a
109
* waste to have the user pre-fill the pfn arrays with a default
110
* flags value.
111
*/
112
pfn_req_flags &= range->pfn_flags_mask;
113
pfn_req_flags |= range->default_flags;
114
115
/* We aren't ask to do anything ... */
116
if (!(pfn_req_flags & HMM_PFN_REQ_FAULT))
117
return 0;
118
119
/* Need to write fault ? */
120
if ((pfn_req_flags & HMM_PFN_REQ_WRITE) &&
121
!(cpu_flags & HMM_PFN_WRITE))
122
return HMM_NEED_FAULT | HMM_NEED_WRITE_FAULT;
123
124
/* If CPU page table is not valid then we need to fault */
125
if (!(cpu_flags & HMM_PFN_VALID))
126
return HMM_NEED_FAULT;
127
return 0;
128
}
129
130
static unsigned int
131
hmm_range_need_fault(const struct hmm_vma_walk *hmm_vma_walk,
132
const unsigned long hmm_pfns[], unsigned long npages,
133
unsigned long cpu_flags)
134
{
135
struct hmm_range *range = hmm_vma_walk->range;
136
unsigned int required_fault = 0;
137
unsigned long i;
138
139
/*
140
* If the default flags do not request to fault pages, and the mask does
141
* not allow for individual pages to be faulted, then
142
* hmm_pte_need_fault() will always return 0.
143
*/
144
if (!((range->default_flags | range->pfn_flags_mask) &
145
HMM_PFN_REQ_FAULT))
146
return 0;
147
148
for (i = 0; i < npages; ++i) {
149
required_fault |= hmm_pte_need_fault(hmm_vma_walk, hmm_pfns[i],
150
cpu_flags);
151
if (required_fault == HMM_NEED_ALL_BITS)
152
return required_fault;
153
}
154
return required_fault;
155
}
156
157
static int hmm_vma_walk_hole(unsigned long addr, unsigned long end,
158
__always_unused int depth, struct mm_walk *walk)
159
{
160
struct hmm_vma_walk *hmm_vma_walk = walk->private;
161
struct hmm_range *range = hmm_vma_walk->range;
162
unsigned int required_fault;
163
unsigned long i, npages;
164
unsigned long *hmm_pfns;
165
166
i = (addr - range->start) >> PAGE_SHIFT;
167
npages = (end - addr) >> PAGE_SHIFT;
168
hmm_pfns = &range->hmm_pfns[i];
169
required_fault =
170
hmm_range_need_fault(hmm_vma_walk, hmm_pfns, npages, 0);
171
if (!walk->vma) {
172
if (required_fault)
173
return -EFAULT;
174
return hmm_pfns_fill(addr, end, range, HMM_PFN_ERROR);
175
}
176
if (required_fault)
177
return hmm_vma_fault(addr, end, required_fault, walk);
178
return hmm_pfns_fill(addr, end, range, 0);
179
}
180
181
static inline unsigned long hmm_pfn_flags_order(unsigned long order)
182
{
183
return order << HMM_PFN_ORDER_SHIFT;
184
}
185
186
#ifdef CONFIG_TRANSPARENT_HUGEPAGE
187
static inline unsigned long pmd_to_hmm_pfn_flags(struct hmm_range *range,
188
pmd_t pmd)
189
{
190
if (pmd_protnone(pmd))
191
return 0;
192
return (pmd_write(pmd) ? (HMM_PFN_VALID | HMM_PFN_WRITE) :
193
HMM_PFN_VALID) |
194
hmm_pfn_flags_order(PMD_SHIFT - PAGE_SHIFT);
195
}
196
197
static int hmm_vma_handle_pmd(struct mm_walk *walk, unsigned long addr,
198
unsigned long end, unsigned long hmm_pfns[],
199
pmd_t pmd)
200
{
201
struct hmm_vma_walk *hmm_vma_walk = walk->private;
202
struct hmm_range *range = hmm_vma_walk->range;
203
unsigned long pfn, npages, i;
204
unsigned int required_fault;
205
unsigned long cpu_flags;
206
207
npages = (end - addr) >> PAGE_SHIFT;
208
cpu_flags = pmd_to_hmm_pfn_flags(range, pmd);
209
required_fault =
210
hmm_range_need_fault(hmm_vma_walk, hmm_pfns, npages, cpu_flags);
211
if (required_fault)
212
return hmm_vma_fault(addr, end, required_fault, walk);
213
214
pfn = pmd_pfn(pmd) + ((addr & ~PMD_MASK) >> PAGE_SHIFT);
215
for (i = 0; addr < end; addr += PAGE_SIZE, i++, pfn++) {
216
hmm_pfns[i] &= HMM_PFN_INOUT_FLAGS;
217
hmm_pfns[i] |= pfn | cpu_flags;
218
}
219
return 0;
220
}
221
#else /* CONFIG_TRANSPARENT_HUGEPAGE */
222
/* stub to allow the code below to compile */
223
int hmm_vma_handle_pmd(struct mm_walk *walk, unsigned long addr,
224
unsigned long end, unsigned long hmm_pfns[], pmd_t pmd);
225
#endif /* CONFIG_TRANSPARENT_HUGEPAGE */
226
227
static inline unsigned long pte_to_hmm_pfn_flags(struct hmm_range *range,
228
pte_t pte)
229
{
230
if (pte_none(pte) || !pte_present(pte) || pte_protnone(pte))
231
return 0;
232
return pte_write(pte) ? (HMM_PFN_VALID | HMM_PFN_WRITE) : HMM_PFN_VALID;
233
}
234
235
static int hmm_vma_handle_pte(struct mm_walk *walk, unsigned long addr,
236
unsigned long end, pmd_t *pmdp, pte_t *ptep,
237
unsigned long *hmm_pfn)
238
{
239
struct hmm_vma_walk *hmm_vma_walk = walk->private;
240
struct hmm_range *range = hmm_vma_walk->range;
241
unsigned int required_fault;
242
unsigned long cpu_flags;
243
pte_t pte = ptep_get(ptep);
244
uint64_t pfn_req_flags = *hmm_pfn;
245
uint64_t new_pfn_flags = 0;
246
247
if (pte_none_mostly(pte)) {
248
required_fault =
249
hmm_pte_need_fault(hmm_vma_walk, pfn_req_flags, 0);
250
if (required_fault)
251
goto fault;
252
goto out;
253
}
254
255
if (!pte_present(pte)) {
256
swp_entry_t entry = pte_to_swp_entry(pte);
257
258
/*
259
* Don't fault in device private pages owned by the caller,
260
* just report the PFN.
261
*/
262
if (is_device_private_entry(entry) &&
263
page_pgmap(pfn_swap_entry_to_page(entry))->owner ==
264
range->dev_private_owner) {
265
cpu_flags = HMM_PFN_VALID;
266
if (is_writable_device_private_entry(entry))
267
cpu_flags |= HMM_PFN_WRITE;
268
new_pfn_flags = swp_offset_pfn(entry) | cpu_flags;
269
goto out;
270
}
271
272
required_fault =
273
hmm_pte_need_fault(hmm_vma_walk, pfn_req_flags, 0);
274
if (!required_fault)
275
goto out;
276
277
if (!non_swap_entry(entry))
278
goto fault;
279
280
if (is_device_private_entry(entry))
281
goto fault;
282
283
if (is_device_exclusive_entry(entry))
284
goto fault;
285
286
if (is_migration_entry(entry)) {
287
pte_unmap(ptep);
288
hmm_vma_walk->last = addr;
289
migration_entry_wait(walk->mm, pmdp, addr);
290
return -EBUSY;
291
}
292
293
/* Report error for everything else */
294
pte_unmap(ptep);
295
return -EFAULT;
296
}
297
298
cpu_flags = pte_to_hmm_pfn_flags(range, pte);
299
required_fault =
300
hmm_pte_need_fault(hmm_vma_walk, pfn_req_flags, cpu_flags);
301
if (required_fault)
302
goto fault;
303
304
/*
305
* Since each architecture defines a struct page for the zero page, just
306
* fall through and treat it like a normal page.
307
*/
308
if (!vm_normal_page(walk->vma, addr, pte) &&
309
!is_zero_pfn(pte_pfn(pte))) {
310
if (hmm_pte_need_fault(hmm_vma_walk, pfn_req_flags, 0)) {
311
pte_unmap(ptep);
312
return -EFAULT;
313
}
314
new_pfn_flags = HMM_PFN_ERROR;
315
goto out;
316
}
317
318
new_pfn_flags = pte_pfn(pte) | cpu_flags;
319
out:
320
*hmm_pfn = (*hmm_pfn & HMM_PFN_INOUT_FLAGS) | new_pfn_flags;
321
return 0;
322
323
fault:
324
pte_unmap(ptep);
325
/* Fault any virtual address we were asked to fault */
326
return hmm_vma_fault(addr, end, required_fault, walk);
327
}
328
329
#ifdef CONFIG_ARCH_ENABLE_THP_MIGRATION
330
static int hmm_vma_handle_absent_pmd(struct mm_walk *walk, unsigned long start,
331
unsigned long end, unsigned long *hmm_pfns,
332
pmd_t pmd)
333
{
334
struct hmm_vma_walk *hmm_vma_walk = walk->private;
335
struct hmm_range *range = hmm_vma_walk->range;
336
unsigned long npages = (end - start) >> PAGE_SHIFT;
337
unsigned long addr = start;
338
swp_entry_t entry = pmd_to_swp_entry(pmd);
339
unsigned int required_fault;
340
341
if (is_device_private_entry(entry) &&
342
pfn_swap_entry_folio(entry)->pgmap->owner ==
343
range->dev_private_owner) {
344
unsigned long cpu_flags = HMM_PFN_VALID |
345
hmm_pfn_flags_order(PMD_SHIFT - PAGE_SHIFT);
346
unsigned long pfn = swp_offset_pfn(entry);
347
unsigned long i;
348
349
if (is_writable_device_private_entry(entry))
350
cpu_flags |= HMM_PFN_WRITE;
351
352
/*
353
* Fully populate the PFN list though subsequent PFNs could be
354
* inferred, because drivers which are not yet aware of large
355
* folios probably do not support sparsely populated PFN lists.
356
*/
357
for (i = 0; addr < end; addr += PAGE_SIZE, i++, pfn++) {
358
hmm_pfns[i] &= HMM_PFN_INOUT_FLAGS;
359
hmm_pfns[i] |= pfn | cpu_flags;
360
}
361
362
return 0;
363
}
364
365
required_fault = hmm_range_need_fault(hmm_vma_walk, hmm_pfns,
366
npages, 0);
367
if (required_fault) {
368
if (is_device_private_entry(entry))
369
return hmm_vma_fault(addr, end, required_fault, walk);
370
else
371
return -EFAULT;
372
}
373
374
return hmm_pfns_fill(start, end, range, HMM_PFN_ERROR);
375
}
376
#else
377
static int hmm_vma_handle_absent_pmd(struct mm_walk *walk, unsigned long start,
378
unsigned long end, unsigned long *hmm_pfns,
379
pmd_t pmd)
380
{
381
struct hmm_vma_walk *hmm_vma_walk = walk->private;
382
struct hmm_range *range = hmm_vma_walk->range;
383
unsigned long npages = (end - start) >> PAGE_SHIFT;
384
385
if (hmm_range_need_fault(hmm_vma_walk, hmm_pfns, npages, 0))
386
return -EFAULT;
387
return hmm_pfns_fill(start, end, range, HMM_PFN_ERROR);
388
}
389
#endif /* CONFIG_ARCH_ENABLE_THP_MIGRATION */
390
391
static int hmm_vma_walk_pmd(pmd_t *pmdp,
392
unsigned long start,
393
unsigned long end,
394
struct mm_walk *walk)
395
{
396
struct hmm_vma_walk *hmm_vma_walk = walk->private;
397
struct hmm_range *range = hmm_vma_walk->range;
398
unsigned long *hmm_pfns =
399
&range->hmm_pfns[(start - range->start) >> PAGE_SHIFT];
400
unsigned long npages = (end - start) >> PAGE_SHIFT;
401
unsigned long addr = start;
402
pte_t *ptep;
403
pmd_t pmd;
404
405
again:
406
pmd = pmdp_get_lockless(pmdp);
407
if (pmd_none(pmd))
408
return hmm_vma_walk_hole(start, end, -1, walk);
409
410
if (thp_migration_supported() && is_pmd_migration_entry(pmd)) {
411
if (hmm_range_need_fault(hmm_vma_walk, hmm_pfns, npages, 0)) {
412
hmm_vma_walk->last = addr;
413
pmd_migration_entry_wait(walk->mm, pmdp);
414
return -EBUSY;
415
}
416
return hmm_pfns_fill(start, end, range, 0);
417
}
418
419
if (!pmd_present(pmd))
420
return hmm_vma_handle_absent_pmd(walk, start, end, hmm_pfns,
421
pmd);
422
423
if (pmd_trans_huge(pmd)) {
424
/*
425
* No need to take pmd_lock here, even if some other thread
426
* is splitting the huge pmd we will get that event through
427
* mmu_notifier callback.
428
*
429
* So just read pmd value and check again it's a transparent
430
* huge or device mapping one and compute corresponding pfn
431
* values.
432
*/
433
pmd = pmdp_get_lockless(pmdp);
434
if (!pmd_trans_huge(pmd))
435
goto again;
436
437
return hmm_vma_handle_pmd(walk, addr, end, hmm_pfns, pmd);
438
}
439
440
/*
441
* We have handled all the valid cases above ie either none, migration,
442
* huge or transparent huge. At this point either it is a valid pmd
443
* entry pointing to pte directory or it is a bad pmd that will not
444
* recover.
445
*/
446
if (pmd_bad(pmd)) {
447
if (hmm_range_need_fault(hmm_vma_walk, hmm_pfns, npages, 0))
448
return -EFAULT;
449
return hmm_pfns_fill(start, end, range, HMM_PFN_ERROR);
450
}
451
452
ptep = pte_offset_map(pmdp, addr);
453
if (!ptep)
454
goto again;
455
for (; addr < end; addr += PAGE_SIZE, ptep++, hmm_pfns++) {
456
int r;
457
458
r = hmm_vma_handle_pte(walk, addr, end, pmdp, ptep, hmm_pfns);
459
if (r) {
460
/* hmm_vma_handle_pte() did pte_unmap() */
461
return r;
462
}
463
}
464
pte_unmap(ptep - 1);
465
return 0;
466
}
467
468
#if defined(CONFIG_HAVE_ARCH_TRANSPARENT_HUGEPAGE_PUD)
469
static inline unsigned long pud_to_hmm_pfn_flags(struct hmm_range *range,
470
pud_t pud)
471
{
472
if (!pud_present(pud))
473
return 0;
474
return (pud_write(pud) ? (HMM_PFN_VALID | HMM_PFN_WRITE) :
475
HMM_PFN_VALID) |
476
hmm_pfn_flags_order(PUD_SHIFT - PAGE_SHIFT);
477
}
478
479
static int hmm_vma_walk_pud(pud_t *pudp, unsigned long start, unsigned long end,
480
struct mm_walk *walk)
481
{
482
struct hmm_vma_walk *hmm_vma_walk = walk->private;
483
struct hmm_range *range = hmm_vma_walk->range;
484
unsigned long addr = start;
485
pud_t pud;
486
spinlock_t *ptl = pud_trans_huge_lock(pudp, walk->vma);
487
488
if (!ptl)
489
return 0;
490
491
/* Normally we don't want to split the huge page */
492
walk->action = ACTION_CONTINUE;
493
494
pud = READ_ONCE(*pudp);
495
if (!pud_present(pud)) {
496
spin_unlock(ptl);
497
return hmm_vma_walk_hole(start, end, -1, walk);
498
}
499
500
if (pud_leaf(pud)) {
501
unsigned long i, npages, pfn;
502
unsigned int required_fault;
503
unsigned long *hmm_pfns;
504
unsigned long cpu_flags;
505
506
i = (addr - range->start) >> PAGE_SHIFT;
507
npages = (end - addr) >> PAGE_SHIFT;
508
hmm_pfns = &range->hmm_pfns[i];
509
510
cpu_flags = pud_to_hmm_pfn_flags(range, pud);
511
required_fault = hmm_range_need_fault(hmm_vma_walk, hmm_pfns,
512
npages, cpu_flags);
513
if (required_fault) {
514
spin_unlock(ptl);
515
return hmm_vma_fault(addr, end, required_fault, walk);
516
}
517
518
pfn = pud_pfn(pud) + ((addr & ~PUD_MASK) >> PAGE_SHIFT);
519
for (i = 0; i < npages; ++i, ++pfn) {
520
hmm_pfns[i] &= HMM_PFN_INOUT_FLAGS;
521
hmm_pfns[i] |= pfn | cpu_flags;
522
}
523
goto out_unlock;
524
}
525
526
/* Ask for the PUD to be split */
527
walk->action = ACTION_SUBTREE;
528
529
out_unlock:
530
spin_unlock(ptl);
531
return 0;
532
}
533
#else
534
#define hmm_vma_walk_pud NULL
535
#endif
536
537
#ifdef CONFIG_HUGETLB_PAGE
538
static int hmm_vma_walk_hugetlb_entry(pte_t *pte, unsigned long hmask,
539
unsigned long start, unsigned long end,
540
struct mm_walk *walk)
541
{
542
unsigned long addr = start, i, pfn;
543
struct hmm_vma_walk *hmm_vma_walk = walk->private;
544
struct hmm_range *range = hmm_vma_walk->range;
545
struct vm_area_struct *vma = walk->vma;
546
unsigned int required_fault;
547
unsigned long pfn_req_flags;
548
unsigned long cpu_flags;
549
spinlock_t *ptl;
550
pte_t entry;
551
552
ptl = huge_pte_lock(hstate_vma(vma), walk->mm, pte);
553
entry = huge_ptep_get(walk->mm, addr, pte);
554
555
i = (start - range->start) >> PAGE_SHIFT;
556
pfn_req_flags = range->hmm_pfns[i];
557
cpu_flags = pte_to_hmm_pfn_flags(range, entry) |
558
hmm_pfn_flags_order(huge_page_order(hstate_vma(vma)));
559
required_fault =
560
hmm_pte_need_fault(hmm_vma_walk, pfn_req_flags, cpu_flags);
561
if (required_fault) {
562
int ret;
563
564
spin_unlock(ptl);
565
hugetlb_vma_unlock_read(vma);
566
/*
567
* Avoid deadlock: drop the vma lock before calling
568
* hmm_vma_fault(), which will itself potentially take and
569
* drop the vma lock. This is also correct from a
570
* protection point of view, because there is no further
571
* use here of either pte or ptl after dropping the vma
572
* lock.
573
*/
574
ret = hmm_vma_fault(addr, end, required_fault, walk);
575
hugetlb_vma_lock_read(vma);
576
return ret;
577
}
578
579
pfn = pte_pfn(entry) + ((start & ~hmask) >> PAGE_SHIFT);
580
for (; addr < end; addr += PAGE_SIZE, i++, pfn++) {
581
range->hmm_pfns[i] &= HMM_PFN_INOUT_FLAGS;
582
range->hmm_pfns[i] |= pfn | cpu_flags;
583
}
584
585
spin_unlock(ptl);
586
return 0;
587
}
588
#else
589
#define hmm_vma_walk_hugetlb_entry NULL
590
#endif /* CONFIG_HUGETLB_PAGE */
591
592
static int hmm_vma_walk_test(unsigned long start, unsigned long end,
593
struct mm_walk *walk)
594
{
595
struct hmm_vma_walk *hmm_vma_walk = walk->private;
596
struct hmm_range *range = hmm_vma_walk->range;
597
struct vm_area_struct *vma = walk->vma;
598
599
if (!(vma->vm_flags & (VM_IO | VM_PFNMAP)) &&
600
vma->vm_flags & VM_READ)
601
return 0;
602
603
/*
604
* vma ranges that don't have struct page backing them or map I/O
605
* devices directly cannot be handled by hmm_range_fault().
606
*
607
* If the vma does not allow read access, then assume that it does not
608
* allow write access either. HMM does not support architectures that
609
* allow write without read.
610
*
611
* If a fault is requested for an unsupported range then it is a hard
612
* failure.
613
*/
614
if (hmm_range_need_fault(hmm_vma_walk,
615
range->hmm_pfns +
616
((start - range->start) >> PAGE_SHIFT),
617
(end - start) >> PAGE_SHIFT, 0))
618
return -EFAULT;
619
620
hmm_pfns_fill(start, end, range, HMM_PFN_ERROR);
621
622
/* Skip this vma and continue processing the next vma. */
623
return 1;
624
}
625
626
static const struct mm_walk_ops hmm_walk_ops = {
627
.pud_entry = hmm_vma_walk_pud,
628
.pmd_entry = hmm_vma_walk_pmd,
629
.pte_hole = hmm_vma_walk_hole,
630
.hugetlb_entry = hmm_vma_walk_hugetlb_entry,
631
.test_walk = hmm_vma_walk_test,
632
.walk_lock = PGWALK_RDLOCK,
633
};
634
635
/**
636
* hmm_range_fault - try to fault some address in a virtual address range
637
* @range: argument structure
638
*
639
* Returns 0 on success or one of the following error codes:
640
*
641
* -EINVAL: Invalid arguments or mm or virtual address is in an invalid vma
642
* (e.g., device file vma).
643
* -ENOMEM: Out of memory.
644
* -EPERM: Invalid permission (e.g., asking for write and range is read
645
* only).
646
* -EBUSY: The range has been invalidated and the caller needs to wait for
647
* the invalidation to finish.
648
* -EFAULT: A page was requested to be valid and could not be made valid
649
* ie it has no backing VMA or it is illegal to access
650
*
651
* This is similar to get_user_pages(), except that it can read the page tables
652
* without mutating them (ie causing faults).
653
*/
654
int hmm_range_fault(struct hmm_range *range)
655
{
656
struct hmm_vma_walk hmm_vma_walk = {
657
.range = range,
658
.last = range->start,
659
};
660
struct mm_struct *mm = range->notifier->mm;
661
int ret;
662
663
mmap_assert_locked(mm);
664
665
do {
666
/* If range is no longer valid force retry. */
667
if (mmu_interval_check_retry(range->notifier,
668
range->notifier_seq))
669
return -EBUSY;
670
ret = walk_page_range(mm, hmm_vma_walk.last, range->end,
671
&hmm_walk_ops, &hmm_vma_walk);
672
/*
673
* When -EBUSY is returned the loop restarts with
674
* hmm_vma_walk.last set to an address that has not been stored
675
* in pfns. All entries < last in the pfn array are set to their
676
* output, and all >= are still at their input values.
677
*/
678
} while (ret == -EBUSY);
679
return ret;
680
}
681
EXPORT_SYMBOL(hmm_range_fault);
682
683
/**
684
* hmm_dma_map_alloc - Allocate HMM map structure
685
* @dev: device to allocate structure for
686
* @map: HMM map to allocate
687
* @nr_entries: number of entries in the map
688
* @dma_entry_size: size of the DMA entry in the map
689
*
690
* Allocate the HMM map structure and all the lists it contains.
691
* Return 0 on success, -ENOMEM on failure.
692
*/
693
int hmm_dma_map_alloc(struct device *dev, struct hmm_dma_map *map,
694
size_t nr_entries, size_t dma_entry_size)
695
{
696
bool dma_need_sync = false;
697
bool use_iova;
698
699
WARN_ON_ONCE(!(nr_entries * PAGE_SIZE / dma_entry_size));
700
701
/*
702
* The HMM API violates our normal DMA buffer ownership rules and can't
703
* transfer buffer ownership. The dma_addressing_limited() check is a
704
* best approximation to ensure no swiotlb buffering happens.
705
*/
706
#ifdef CONFIG_DMA_NEED_SYNC
707
dma_need_sync = !dev->dma_skip_sync;
708
#endif /* CONFIG_DMA_NEED_SYNC */
709
if (dma_need_sync || dma_addressing_limited(dev))
710
return -EOPNOTSUPP;
711
712
map->dma_entry_size = dma_entry_size;
713
map->pfn_list = kvcalloc(nr_entries, sizeof(*map->pfn_list),
714
GFP_KERNEL | __GFP_NOWARN);
715
if (!map->pfn_list)
716
return -ENOMEM;
717
718
use_iova = dma_iova_try_alloc(dev, &map->state, 0,
719
nr_entries * PAGE_SIZE);
720
if (!use_iova && dma_need_unmap(dev)) {
721
map->dma_list = kvcalloc(nr_entries, sizeof(*map->dma_list),
722
GFP_KERNEL | __GFP_NOWARN);
723
if (!map->dma_list)
724
goto err_dma;
725
}
726
return 0;
727
728
err_dma:
729
kvfree(map->pfn_list);
730
return -ENOMEM;
731
}
732
EXPORT_SYMBOL_GPL(hmm_dma_map_alloc);
733
734
/**
735
* hmm_dma_map_free - iFree HMM map structure
736
* @dev: device to free structure from
737
* @map: HMM map containing the various lists and state
738
*
739
* Free the HMM map structure and all the lists it contains.
740
*/
741
void hmm_dma_map_free(struct device *dev, struct hmm_dma_map *map)
742
{
743
if (dma_use_iova(&map->state))
744
dma_iova_free(dev, &map->state);
745
kvfree(map->pfn_list);
746
kvfree(map->dma_list);
747
}
748
EXPORT_SYMBOL_GPL(hmm_dma_map_free);
749
750
/**
751
* hmm_dma_map_pfn - Map a physical HMM page to DMA address
752
* @dev: Device to map the page for
753
* @map: HMM map
754
* @idx: Index into the PFN and dma address arrays
755
* @p2pdma_state: PCI P2P state.
756
*
757
* dma_alloc_iova() allocates IOVA based on the size specified by their use in
758
* iova->size. Call this function after IOVA allocation to link whole @page
759
* to get the DMA address. Note that very first call to this function
760
* will have @offset set to 0 in the IOVA space allocated from
761
* dma_alloc_iova(). For subsequent calls to this function on same @iova,
762
* @offset needs to be advanced by the caller with the size of previous
763
* page that was linked + DMA address returned for the previous page that was
764
* linked by this function.
765
*/
766
dma_addr_t hmm_dma_map_pfn(struct device *dev, struct hmm_dma_map *map,
767
size_t idx,
768
struct pci_p2pdma_map_state *p2pdma_state)
769
{
770
struct dma_iova_state *state = &map->state;
771
dma_addr_t *dma_addrs = map->dma_list;
772
unsigned long *pfns = map->pfn_list;
773
struct page *page = hmm_pfn_to_page(pfns[idx]);
774
phys_addr_t paddr = hmm_pfn_to_phys(pfns[idx]);
775
size_t offset = idx * map->dma_entry_size;
776
unsigned long attrs = 0;
777
dma_addr_t dma_addr;
778
int ret;
779
780
if ((pfns[idx] & HMM_PFN_DMA_MAPPED) &&
781
!(pfns[idx] & HMM_PFN_P2PDMA_BUS)) {
782
/*
783
* We are in this flow when there is a need to resync flags,
784
* for example when page was already linked in prefetch call
785
* with READ flag and now we need to add WRITE flag
786
*
787
* This page was already programmed to HW and we don't want/need
788
* to unlink and link it again just to resync flags.
789
*/
790
if (dma_use_iova(state))
791
return state->addr + offset;
792
793
/*
794
* Without dma_need_unmap, the dma_addrs array is NULL, thus we
795
* need to regenerate the address below even if there already
796
* was a mapping. But !dma_need_unmap implies that the
797
* mapping stateless, so this is fine.
798
*/
799
if (dma_need_unmap(dev))
800
return dma_addrs[idx];
801
802
/* Continue to remapping */
803
}
804
805
switch (pci_p2pdma_state(p2pdma_state, dev, page)) {
806
case PCI_P2PDMA_MAP_NONE:
807
break;
808
case PCI_P2PDMA_MAP_THRU_HOST_BRIDGE:
809
attrs |= DMA_ATTR_SKIP_CPU_SYNC;
810
pfns[idx] |= HMM_PFN_P2PDMA;
811
break;
812
case PCI_P2PDMA_MAP_BUS_ADDR:
813
pfns[idx] |= HMM_PFN_P2PDMA_BUS | HMM_PFN_DMA_MAPPED;
814
return pci_p2pdma_bus_addr_map(p2pdma_state, paddr);
815
default:
816
return DMA_MAPPING_ERROR;
817
}
818
819
if (dma_use_iova(state)) {
820
ret = dma_iova_link(dev, state, paddr, offset,
821
map->dma_entry_size, DMA_BIDIRECTIONAL,
822
attrs);
823
if (ret)
824
goto error;
825
826
ret = dma_iova_sync(dev, state, offset, map->dma_entry_size);
827
if (ret) {
828
dma_iova_unlink(dev, state, offset, map->dma_entry_size,
829
DMA_BIDIRECTIONAL, attrs);
830
goto error;
831
}
832
833
dma_addr = state->addr + offset;
834
} else {
835
if (WARN_ON_ONCE(dma_need_unmap(dev) && !dma_addrs))
836
goto error;
837
838
dma_addr = dma_map_page(dev, page, 0, map->dma_entry_size,
839
DMA_BIDIRECTIONAL);
840
if (dma_mapping_error(dev, dma_addr))
841
goto error;
842
843
if (dma_need_unmap(dev))
844
dma_addrs[idx] = dma_addr;
845
}
846
pfns[idx] |= HMM_PFN_DMA_MAPPED;
847
return dma_addr;
848
error:
849
pfns[idx] &= ~HMM_PFN_P2PDMA;
850
return DMA_MAPPING_ERROR;
851
852
}
853
EXPORT_SYMBOL_GPL(hmm_dma_map_pfn);
854
855
/**
856
* hmm_dma_unmap_pfn - Unmap a physical HMM page from DMA address
857
* @dev: Device to unmap the page from
858
* @map: HMM map
859
* @idx: Index of the PFN to unmap
860
*
861
* Returns true if the PFN was mapped and has been unmapped, false otherwise.
862
*/
863
bool hmm_dma_unmap_pfn(struct device *dev, struct hmm_dma_map *map, size_t idx)
864
{
865
const unsigned long valid_dma = HMM_PFN_VALID | HMM_PFN_DMA_MAPPED;
866
struct dma_iova_state *state = &map->state;
867
dma_addr_t *dma_addrs = map->dma_list;
868
unsigned long *pfns = map->pfn_list;
869
unsigned long attrs = 0;
870
871
if ((pfns[idx] & valid_dma) != valid_dma)
872
return false;
873
874
if (pfns[idx] & HMM_PFN_P2PDMA_BUS)
875
; /* no need to unmap bus address P2P mappings */
876
else if (dma_use_iova(state)) {
877
if (pfns[idx] & HMM_PFN_P2PDMA)
878
attrs |= DMA_ATTR_SKIP_CPU_SYNC;
879
dma_iova_unlink(dev, state, idx * map->dma_entry_size,
880
map->dma_entry_size, DMA_BIDIRECTIONAL, attrs);
881
} else if (dma_need_unmap(dev))
882
dma_unmap_page(dev, dma_addrs[idx], map->dma_entry_size,
883
DMA_BIDIRECTIONAL);
884
885
pfns[idx] &=
886
~(HMM_PFN_DMA_MAPPED | HMM_PFN_P2PDMA | HMM_PFN_P2PDMA_BUS);
887
return true;
888
}
889
EXPORT_SYMBOL_GPL(hmm_dma_unmap_pfn);
890
891