Book a Demo!
CoCalc Logo Icon
StoreFeaturesDocsShareSupportNewsAboutPoliciesSign UpSign In
torvalds
GitHub Repository: torvalds/linux
Path: blob/master/drivers/accel/amdxdna/amdxdna_ubuf.c
29278 views
1
// SPDX-License-Identifier: GPL-2.0
2
/*
3
* Copyright (C) 2025, Advanced Micro Devices, Inc.
4
*/
5
6
#include <drm/amdxdna_accel.h>
7
#include <drm/drm_device.h>
8
#include <drm/drm_print.h>
9
#include <linux/dma-buf.h>
10
#include <linux/pagemap.h>
11
#include <linux/vmalloc.h>
12
13
#include "amdxdna_pci_drv.h"
14
#include "amdxdna_ubuf.h"
15
16
struct amdxdna_ubuf_priv {
17
struct page **pages;
18
u64 nr_pages;
19
enum amdxdna_ubuf_flag flags;
20
struct mm_struct *mm;
21
};
22
23
static struct sg_table *amdxdna_ubuf_map(struct dma_buf_attachment *attach,
24
enum dma_data_direction direction)
25
{
26
struct amdxdna_ubuf_priv *ubuf = attach->dmabuf->priv;
27
struct sg_table *sg;
28
int ret;
29
30
sg = kzalloc(sizeof(*sg), GFP_KERNEL);
31
if (!sg)
32
return ERR_PTR(-ENOMEM);
33
34
ret = sg_alloc_table_from_pages(sg, ubuf->pages, ubuf->nr_pages, 0,
35
ubuf->nr_pages << PAGE_SHIFT, GFP_KERNEL);
36
if (ret)
37
return ERR_PTR(ret);
38
39
if (ubuf->flags & AMDXDNA_UBUF_FLAG_MAP_DMA) {
40
ret = dma_map_sgtable(attach->dev, sg, direction, 0);
41
if (ret)
42
return ERR_PTR(ret);
43
}
44
45
return sg;
46
}
47
48
static void amdxdna_ubuf_unmap(struct dma_buf_attachment *attach,
49
struct sg_table *sg,
50
enum dma_data_direction direction)
51
{
52
struct amdxdna_ubuf_priv *ubuf = attach->dmabuf->priv;
53
54
if (ubuf->flags & AMDXDNA_UBUF_FLAG_MAP_DMA)
55
dma_unmap_sgtable(attach->dev, sg, direction, 0);
56
57
sg_free_table(sg);
58
kfree(sg);
59
}
60
61
static void amdxdna_ubuf_release(struct dma_buf *dbuf)
62
{
63
struct amdxdna_ubuf_priv *ubuf = dbuf->priv;
64
65
unpin_user_pages(ubuf->pages, ubuf->nr_pages);
66
kvfree(ubuf->pages);
67
atomic64_sub(ubuf->nr_pages, &ubuf->mm->pinned_vm);
68
mmdrop(ubuf->mm);
69
kfree(ubuf);
70
}
71
72
static vm_fault_t amdxdna_ubuf_vm_fault(struct vm_fault *vmf)
73
{
74
struct vm_area_struct *vma = vmf->vma;
75
struct amdxdna_ubuf_priv *ubuf;
76
unsigned long pfn;
77
pgoff_t pgoff;
78
79
ubuf = vma->vm_private_data;
80
pgoff = (vmf->address - vma->vm_start) >> PAGE_SHIFT;
81
82
pfn = page_to_pfn(ubuf->pages[pgoff]);
83
return vmf_insert_pfn(vma, vmf->address, pfn);
84
}
85
86
static const struct vm_operations_struct amdxdna_ubuf_vm_ops = {
87
.fault = amdxdna_ubuf_vm_fault,
88
};
89
90
static int amdxdna_ubuf_mmap(struct dma_buf *dbuf, struct vm_area_struct *vma)
91
{
92
struct amdxdna_ubuf_priv *ubuf = dbuf->priv;
93
94
vma->vm_ops = &amdxdna_ubuf_vm_ops;
95
vma->vm_private_data = ubuf;
96
vm_flags_set(vma, VM_PFNMAP | VM_DONTEXPAND | VM_DONTDUMP);
97
98
return 0;
99
}
100
101
static int amdxdna_ubuf_vmap(struct dma_buf *dbuf, struct iosys_map *map)
102
{
103
struct amdxdna_ubuf_priv *ubuf = dbuf->priv;
104
void *kva;
105
106
kva = vmap(ubuf->pages, ubuf->nr_pages, VM_MAP, PAGE_KERNEL);
107
if (!kva)
108
return -EINVAL;
109
110
iosys_map_set_vaddr(map, kva);
111
return 0;
112
}
113
114
static void amdxdna_ubuf_vunmap(struct dma_buf *dbuf, struct iosys_map *map)
115
{
116
vunmap(map->vaddr);
117
}
118
119
static const struct dma_buf_ops amdxdna_ubuf_dmabuf_ops = {
120
.map_dma_buf = amdxdna_ubuf_map,
121
.unmap_dma_buf = amdxdna_ubuf_unmap,
122
.release = amdxdna_ubuf_release,
123
.mmap = amdxdna_ubuf_mmap,
124
.vmap = amdxdna_ubuf_vmap,
125
.vunmap = amdxdna_ubuf_vunmap,
126
};
127
128
struct dma_buf *amdxdna_get_ubuf(struct drm_device *dev,
129
enum amdxdna_ubuf_flag flags,
130
u32 num_entries, void __user *va_entries)
131
{
132
struct amdxdna_dev *xdna = to_xdna_dev(dev);
133
unsigned long lock_limit, new_pinned;
134
struct amdxdna_drm_va_entry *va_ent;
135
struct amdxdna_ubuf_priv *ubuf;
136
u32 npages, start = 0;
137
struct dma_buf *dbuf;
138
int i, ret;
139
DEFINE_DMA_BUF_EXPORT_INFO(exp_info);
140
141
if (!can_do_mlock())
142
return ERR_PTR(-EPERM);
143
144
ubuf = kzalloc(sizeof(*ubuf), GFP_KERNEL);
145
if (!ubuf)
146
return ERR_PTR(-ENOMEM);
147
148
ubuf->flags = flags;
149
ubuf->mm = current->mm;
150
mmgrab(ubuf->mm);
151
152
va_ent = kvcalloc(num_entries, sizeof(*va_ent), GFP_KERNEL);
153
if (!va_ent) {
154
ret = -ENOMEM;
155
goto free_ubuf;
156
}
157
158
if (copy_from_user(va_ent, va_entries, sizeof(*va_ent) * num_entries)) {
159
XDNA_DBG(xdna, "Access va entries failed");
160
ret = -EINVAL;
161
goto free_ent;
162
}
163
164
for (i = 0, exp_info.size = 0; i < num_entries; i++) {
165
if (!IS_ALIGNED(va_ent[i].vaddr, PAGE_SIZE) ||
166
!IS_ALIGNED(va_ent[i].len, PAGE_SIZE)) {
167
XDNA_ERR(xdna, "Invalid address or len %llx, %llx",
168
va_ent[i].vaddr, va_ent[i].len);
169
ret = -EINVAL;
170
goto free_ent;
171
}
172
173
exp_info.size += va_ent[i].len;
174
}
175
176
ubuf->nr_pages = exp_info.size >> PAGE_SHIFT;
177
lock_limit = rlimit(RLIMIT_MEMLOCK) >> PAGE_SHIFT;
178
new_pinned = atomic64_add_return(ubuf->nr_pages, &ubuf->mm->pinned_vm);
179
if (new_pinned > lock_limit && !capable(CAP_IPC_LOCK)) {
180
XDNA_DBG(xdna, "New pin %ld, limit %ld, cap %d",
181
new_pinned, lock_limit, capable(CAP_IPC_LOCK));
182
ret = -ENOMEM;
183
goto sub_pin_cnt;
184
}
185
186
ubuf->pages = kvmalloc_array(ubuf->nr_pages, sizeof(*ubuf->pages), GFP_KERNEL);
187
if (!ubuf->pages) {
188
ret = -ENOMEM;
189
goto sub_pin_cnt;
190
}
191
192
for (i = 0; i < num_entries; i++) {
193
npages = va_ent[i].len >> PAGE_SHIFT;
194
195
ret = pin_user_pages_fast(va_ent[i].vaddr, npages,
196
FOLL_WRITE | FOLL_LONGTERM,
197
&ubuf->pages[start]);
198
if (ret < 0 || ret != npages) {
199
ret = -ENOMEM;
200
XDNA_ERR(xdna, "Failed to pin pages ret %d", ret);
201
goto destroy_pages;
202
}
203
204
start += ret;
205
}
206
207
exp_info.ops = &amdxdna_ubuf_dmabuf_ops;
208
exp_info.priv = ubuf;
209
exp_info.flags = O_RDWR | O_CLOEXEC;
210
211
dbuf = dma_buf_export(&exp_info);
212
if (IS_ERR(dbuf)) {
213
ret = PTR_ERR(dbuf);
214
goto destroy_pages;
215
}
216
kvfree(va_ent);
217
218
return dbuf;
219
220
destroy_pages:
221
if (start)
222
unpin_user_pages(ubuf->pages, start);
223
kvfree(ubuf->pages);
224
sub_pin_cnt:
225
atomic64_sub(ubuf->nr_pages, &ubuf->mm->pinned_vm);
226
free_ent:
227
kvfree(va_ent);
228
free_ubuf:
229
mmdrop(ubuf->mm);
230
kfree(ubuf);
231
return ERR_PTR(ret);
232
}
233
234