Book a Demo!
CoCalc Logo Icon
StoreFeaturesDocsShareSupportNewsAboutPoliciesSign UpSign In
torvalds
GitHub Repository: torvalds/linux
Path: blob/master/drivers/gpu/drm/amd/amdkfd/kfd_chardev.c
54339 views
1
// SPDX-License-Identifier: GPL-2.0 OR MIT
2
/*
3
* Copyright 2014-2022 Advanced Micro Devices, Inc.
4
*
5
* Permission is hereby granted, free of charge, to any person obtaining a
6
* copy of this software and associated documentation files (the "Software"),
7
* to deal in the Software without restriction, including without limitation
8
* the rights to use, copy, modify, merge, publish, distribute, sublicense,
9
* and/or sell copies of the Software, and to permit persons to whom the
10
* Software is furnished to do so, subject to the following conditions:
11
*
12
* The above copyright notice and this permission notice shall be included in
13
* all copies or substantial portions of the Software.
14
*
15
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
16
* IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
17
* FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
18
* THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
19
* OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
20
* ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
21
* OTHER DEALINGS IN THE SOFTWARE.
22
*/
23
24
#include <linux/device.h>
25
#include <linux/err.h>
26
#include <linux/fs.h>
27
#include <linux/file.h>
28
#include <linux/sched.h>
29
#include <linux/slab.h>
30
#include <linux/uaccess.h>
31
#include <linux/compat.h>
32
#include <uapi/linux/kfd_ioctl.h>
33
#include <linux/time.h>
34
#include <linux/mm.h>
35
#include <linux/mman.h>
36
#include <linux/ptrace.h>
37
#include <linux/dma-buf.h>
38
#include <linux/processor.h>
39
#include "kfd_priv.h"
40
#include "kfd_device_queue_manager.h"
41
#include "kfd_svm.h"
42
#include "amdgpu_amdkfd.h"
43
#include "kfd_smi_events.h"
44
#include "amdgpu_dma_buf.h"
45
#include "kfd_debug.h"
46
47
static long kfd_ioctl(struct file *, unsigned int, unsigned long);
48
static int kfd_open(struct inode *, struct file *);
49
static int kfd_release(struct inode *, struct file *);
50
static int kfd_mmap(struct file *, struct vm_area_struct *);
51
52
static const char kfd_dev_name[] = "kfd";
53
54
static const struct file_operations kfd_fops = {
55
.owner = THIS_MODULE,
56
.unlocked_ioctl = kfd_ioctl,
57
.compat_ioctl = compat_ptr_ioctl,
58
.open = kfd_open,
59
.release = kfd_release,
60
.mmap = kfd_mmap,
61
};
62
63
static int kfd_char_dev_major = -1;
64
struct device *kfd_device;
65
static const struct class kfd_class = {
66
.name = kfd_dev_name,
67
};
68
69
static inline struct kfd_process_device *kfd_lock_pdd_by_id(struct kfd_process *p, __u32 gpu_id)
70
{
71
struct kfd_process_device *pdd;
72
73
mutex_lock(&p->mutex);
74
pdd = kfd_process_device_data_by_id(p, gpu_id);
75
76
if (pdd)
77
return pdd;
78
79
mutex_unlock(&p->mutex);
80
return NULL;
81
}
82
83
static inline void kfd_unlock_pdd(struct kfd_process_device *pdd)
84
{
85
mutex_unlock(&pdd->process->mutex);
86
}
87
88
int kfd_chardev_init(void)
89
{
90
int err = 0;
91
92
kfd_char_dev_major = register_chrdev(0, kfd_dev_name, &kfd_fops);
93
err = kfd_char_dev_major;
94
if (err < 0)
95
goto err_register_chrdev;
96
97
err = class_register(&kfd_class);
98
if (err)
99
goto err_class_create;
100
101
kfd_device = device_create(&kfd_class, NULL,
102
MKDEV(kfd_char_dev_major, 0),
103
NULL, kfd_dev_name);
104
err = PTR_ERR(kfd_device);
105
if (IS_ERR(kfd_device))
106
goto err_device_create;
107
108
return 0;
109
110
err_device_create:
111
class_unregister(&kfd_class);
112
err_class_create:
113
unregister_chrdev(kfd_char_dev_major, kfd_dev_name);
114
err_register_chrdev:
115
return err;
116
}
117
118
void kfd_chardev_exit(void)
119
{
120
device_destroy(&kfd_class, MKDEV(kfd_char_dev_major, 0));
121
class_unregister(&kfd_class);
122
unregister_chrdev(kfd_char_dev_major, kfd_dev_name);
123
kfd_device = NULL;
124
}
125
126
127
static int kfd_open(struct inode *inode, struct file *filep)
128
{
129
struct kfd_process *process;
130
bool is_32bit_user_mode;
131
132
if (iminor(inode) != 0)
133
return -ENODEV;
134
135
is_32bit_user_mode = in_compat_syscall();
136
137
if (is_32bit_user_mode) {
138
dev_warn(kfd_device,
139
"Process %d (32-bit) failed to open /dev/kfd\n"
140
"32-bit processes are not supported by amdkfd\n",
141
current->pid);
142
return -EPERM;
143
}
144
145
process = kfd_create_process(current);
146
if (IS_ERR(process))
147
return PTR_ERR(process);
148
149
if (kfd_process_init_cwsr_apu(process, filep)) {
150
kfd_unref_process(process);
151
return -EFAULT;
152
}
153
154
/* filep now owns the reference returned by kfd_create_process */
155
filep->private_data = process;
156
157
dev_dbg(kfd_device, "process pid %d opened kfd node, compat mode (32 bit) - %d\n",
158
process->lead_thread->pid, process->is_32bit_user_mode);
159
160
return 0;
161
}
162
163
static int kfd_release(struct inode *inode, struct file *filep)
164
{
165
struct kfd_process *process = filep->private_data;
166
167
if (!process)
168
return 0;
169
170
if (process->context_id != KFD_CONTEXT_ID_PRIMARY)
171
kfd_process_notifier_release_internal(process);
172
173
kfd_unref_process(process);
174
175
return 0;
176
}
177
178
static int kfd_ioctl_get_version(struct file *filep, struct kfd_process *p,
179
void *data)
180
{
181
struct kfd_ioctl_get_version_args *args = data;
182
183
args->major_version = KFD_IOCTL_MAJOR_VERSION;
184
args->minor_version = KFD_IOCTL_MINOR_VERSION;
185
186
return 0;
187
}
188
189
static int set_queue_properties_from_user(struct queue_properties *q_properties,
190
struct kfd_ioctl_create_queue_args *args)
191
{
192
/*
193
* Repurpose queue percentage to accommodate new features:
194
* bit 0-7: queue percentage
195
* bit 8-15: pm4_target_xcc
196
*/
197
if ((args->queue_percentage & 0xFF) > KFD_MAX_QUEUE_PERCENTAGE) {
198
pr_err("Queue percentage must be between 0 to KFD_MAX_QUEUE_PERCENTAGE\n");
199
return -EINVAL;
200
}
201
202
if (args->queue_priority > KFD_MAX_QUEUE_PRIORITY) {
203
pr_err("Queue priority must be between 0 to KFD_MAX_QUEUE_PRIORITY\n");
204
return -EINVAL;
205
}
206
207
if ((args->ring_base_address) &&
208
(!access_ok((const void __user *) args->ring_base_address,
209
sizeof(uint64_t)))) {
210
pr_err("Can't access ring base address\n");
211
return -EFAULT;
212
}
213
214
if (!is_power_of_2(args->ring_size) && (args->ring_size != 0)) {
215
pr_err("Ring size must be a power of 2 or 0\n");
216
return -EINVAL;
217
}
218
219
if (args->ring_size < KFD_MIN_QUEUE_RING_SIZE) {
220
args->ring_size = KFD_MIN_QUEUE_RING_SIZE;
221
pr_debug("Size lower. clamped to KFD_MIN_QUEUE_RING_SIZE");
222
}
223
224
if ((args->metadata_ring_size != 0) && !is_power_of_2(args->metadata_ring_size)) {
225
pr_err("Metadata ring size must be a power of 2 or 0\n");
226
return -EINVAL;
227
}
228
229
if (!access_ok((const void __user *) args->read_pointer_address,
230
sizeof(uint32_t))) {
231
pr_err("Can't access read pointer\n");
232
return -EFAULT;
233
}
234
235
if (!access_ok((const void __user *) args->write_pointer_address,
236
sizeof(uint32_t))) {
237
pr_err("Can't access write pointer\n");
238
return -EFAULT;
239
}
240
241
if (args->eop_buffer_address &&
242
!access_ok((const void __user *) args->eop_buffer_address,
243
sizeof(uint32_t))) {
244
pr_debug("Can't access eop buffer");
245
return -EFAULT;
246
}
247
248
if (args->ctx_save_restore_address &&
249
!access_ok((const void __user *) args->ctx_save_restore_address,
250
sizeof(uint32_t))) {
251
pr_debug("Can't access ctx save restore buffer");
252
return -EFAULT;
253
}
254
255
q_properties->is_interop = false;
256
q_properties->is_gws = false;
257
q_properties->queue_percent = args->queue_percentage & 0xFF;
258
/* bit 8-15 are repurposed to be PM4 target XCC */
259
q_properties->pm4_target_xcc = (args->queue_percentage >> 8) & 0xFF;
260
q_properties->priority = args->queue_priority;
261
q_properties->queue_address = args->ring_base_address;
262
q_properties->queue_size = args->ring_size;
263
if (args->queue_type == KFD_IOC_QUEUE_TYPE_COMPUTE_AQL)
264
q_properties->metadata_queue_size = args->metadata_ring_size;
265
266
q_properties->read_ptr = (void __user *)args->read_pointer_address;
267
q_properties->write_ptr = (void __user *)args->write_pointer_address;
268
q_properties->eop_ring_buffer_address = args->eop_buffer_address;
269
q_properties->eop_ring_buffer_size = args->eop_buffer_size;
270
q_properties->ctx_save_restore_area_address =
271
args->ctx_save_restore_address;
272
q_properties->ctx_save_restore_area_size = args->ctx_save_restore_size;
273
q_properties->ctl_stack_size = args->ctl_stack_size;
274
q_properties->sdma_engine_id = args->sdma_engine_id;
275
if (args->queue_type == KFD_IOC_QUEUE_TYPE_COMPUTE ||
276
args->queue_type == KFD_IOC_QUEUE_TYPE_COMPUTE_AQL)
277
q_properties->type = KFD_QUEUE_TYPE_COMPUTE;
278
else if (args->queue_type == KFD_IOC_QUEUE_TYPE_SDMA)
279
q_properties->type = KFD_QUEUE_TYPE_SDMA;
280
else if (args->queue_type == KFD_IOC_QUEUE_TYPE_SDMA_XGMI)
281
q_properties->type = KFD_QUEUE_TYPE_SDMA_XGMI;
282
else if (args->queue_type == KFD_IOC_QUEUE_TYPE_SDMA_BY_ENG_ID)
283
q_properties->type = KFD_QUEUE_TYPE_SDMA_BY_ENG_ID;
284
else
285
return -ENOTSUPP;
286
287
if (args->queue_type == KFD_IOC_QUEUE_TYPE_COMPUTE_AQL)
288
q_properties->format = KFD_QUEUE_FORMAT_AQL;
289
else
290
q_properties->format = KFD_QUEUE_FORMAT_PM4;
291
292
pr_debug("Queue Percentage: %d, %d\n",
293
q_properties->queue_percent, args->queue_percentage);
294
295
pr_debug("Queue Priority: %d, %d\n",
296
q_properties->priority, args->queue_priority);
297
298
pr_debug("Queue Address: 0x%llX, 0x%llX\n",
299
q_properties->queue_address, args->ring_base_address);
300
301
pr_debug("Queue Size: 0x%llX, %u\n",
302
q_properties->queue_size, args->ring_size);
303
304
pr_debug("Queue r/w Pointers: %px, %px\n",
305
q_properties->read_ptr,
306
q_properties->write_ptr);
307
308
pr_debug("Queue Format: %d\n", q_properties->format);
309
310
pr_debug("Queue EOP: 0x%llX\n", q_properties->eop_ring_buffer_address);
311
312
pr_debug("Queue CTX save area: 0x%llX\n",
313
q_properties->ctx_save_restore_area_address);
314
315
return 0;
316
}
317
318
static int kfd_ioctl_create_queue(struct file *filep, struct kfd_process *p,
319
void *data)
320
{
321
struct kfd_ioctl_create_queue_args *args = data;
322
struct kfd_node *dev;
323
int err = 0;
324
unsigned int queue_id;
325
struct kfd_process_device *pdd;
326
struct queue_properties q_properties;
327
uint32_t doorbell_offset_in_process = 0;
328
329
memset(&q_properties, 0, sizeof(struct queue_properties));
330
331
pr_debug("Creating queue ioctl\n");
332
333
err = set_queue_properties_from_user(&q_properties, args);
334
if (err)
335
return err;
336
337
pr_debug("Looking for gpu id 0x%x\n", args->gpu_id);
338
339
mutex_lock(&p->mutex);
340
341
pdd = kfd_process_device_data_by_id(p, args->gpu_id);
342
if (!pdd) {
343
pr_debug("Could not find gpu id 0x%x\n", args->gpu_id);
344
err = -EINVAL;
345
goto err_pdd;
346
}
347
dev = pdd->dev;
348
349
pdd = kfd_bind_process_to_device(dev, p);
350
if (IS_ERR(pdd)) {
351
err = -ESRCH;
352
goto err_bind_process;
353
}
354
355
if (q_properties.type == KFD_QUEUE_TYPE_SDMA_BY_ENG_ID) {
356
int max_sdma_eng_id = kfd_get_num_sdma_engines(dev) +
357
kfd_get_num_xgmi_sdma_engines(dev) - 1;
358
359
if (q_properties.sdma_engine_id > max_sdma_eng_id) {
360
err = -EINVAL;
361
pr_err("sdma_engine_id %i exceeds maximum id of %i\n",
362
q_properties.sdma_engine_id, max_sdma_eng_id);
363
goto err_sdma_engine_id;
364
}
365
}
366
367
if (!pdd->qpd.proc_doorbells) {
368
err = kfd_alloc_process_doorbells(dev->kfd, pdd);
369
if (err) {
370
pr_debug("failed to allocate process doorbells\n");
371
goto err_bind_process;
372
}
373
}
374
375
err = kfd_queue_acquire_buffers(pdd, &q_properties);
376
if (err) {
377
pr_debug("failed to acquire user queue buffers\n");
378
goto err_acquire_queue_buf;
379
}
380
381
pr_debug("Creating queue for process pid %d on gpu 0x%x\n",
382
p->lead_thread->pid,
383
dev->id);
384
385
err = pqm_create_queue(&p->pqm, dev, &q_properties, &queue_id,
386
NULL, NULL, NULL, &doorbell_offset_in_process);
387
if (err != 0)
388
goto err_create_queue;
389
390
args->queue_id = queue_id;
391
392
393
/* Return gpu_id as doorbell offset for mmap usage */
394
args->doorbell_offset = KFD_MMAP_TYPE_DOORBELL;
395
args->doorbell_offset |= KFD_MMAP_GPU_ID(args->gpu_id);
396
if (KFD_IS_SOC15(dev))
397
/* On SOC15 ASICs, include the doorbell offset within the
398
* process doorbell frame, which is 2 pages.
399
*/
400
args->doorbell_offset |= doorbell_offset_in_process;
401
402
mutex_unlock(&p->mutex);
403
404
pr_debug("Queue id %d was created successfully\n", args->queue_id);
405
406
pr_debug("Ring buffer address == 0x%016llX\n",
407
args->ring_base_address);
408
409
pr_debug("Read ptr address == 0x%016llX\n",
410
args->read_pointer_address);
411
412
pr_debug("Write ptr address == 0x%016llX\n",
413
args->write_pointer_address);
414
415
kfd_dbg_ev_raise(KFD_EC_MASK(EC_QUEUE_NEW), p, dev, queue_id, false, NULL, 0);
416
return 0;
417
418
err_create_queue:
419
kfd_queue_unref_bo_vas(pdd, &q_properties);
420
kfd_queue_release_buffers(pdd, &q_properties);
421
err_acquire_queue_buf:
422
err_sdma_engine_id:
423
err_bind_process:
424
err_pdd:
425
mutex_unlock(&p->mutex);
426
return err;
427
}
428
429
static int kfd_ioctl_destroy_queue(struct file *filp, struct kfd_process *p,
430
void *data)
431
{
432
int retval;
433
struct kfd_ioctl_destroy_queue_args *args = data;
434
435
pr_debug("Destroying queue id %d for process pid %d\n",
436
args->queue_id,
437
p->lead_thread->pid);
438
439
mutex_lock(&p->mutex);
440
441
retval = pqm_destroy_queue(&p->pqm, args->queue_id);
442
443
mutex_unlock(&p->mutex);
444
return retval;
445
}
446
447
static int kfd_ioctl_update_queue(struct file *filp, struct kfd_process *p,
448
void *data)
449
{
450
int retval;
451
struct kfd_ioctl_update_queue_args *args = data;
452
struct queue_properties properties;
453
454
/*
455
* Repurpose queue percentage to accommodate new features:
456
* bit 0-7: queue percentage
457
* bit 8-15: pm4_target_xcc
458
*/
459
if ((args->queue_percentage & 0xFF) > KFD_MAX_QUEUE_PERCENTAGE) {
460
pr_err("Queue percentage must be between 0 to KFD_MAX_QUEUE_PERCENTAGE\n");
461
return -EINVAL;
462
}
463
464
if (args->queue_priority > KFD_MAX_QUEUE_PRIORITY) {
465
pr_err("Queue priority must be between 0 to KFD_MAX_QUEUE_PRIORITY\n");
466
return -EINVAL;
467
}
468
469
if ((args->ring_base_address) &&
470
(!access_ok((const void __user *) args->ring_base_address,
471
sizeof(uint64_t)))) {
472
pr_err("Can't access ring base address\n");
473
return -EFAULT;
474
}
475
476
if (!is_power_of_2(args->ring_size) && (args->ring_size != 0)) {
477
pr_err("Ring size must be a power of 2 or 0\n");
478
return -EINVAL;
479
}
480
481
if (args->ring_size < KFD_MIN_QUEUE_RING_SIZE) {
482
args->ring_size = KFD_MIN_QUEUE_RING_SIZE;
483
pr_debug("Size lower. clamped to KFD_MIN_QUEUE_RING_SIZE");
484
}
485
486
properties.queue_address = args->ring_base_address;
487
properties.queue_size = args->ring_size;
488
properties.queue_percent = args->queue_percentage & 0xFF;
489
/* bit 8-15 are repurposed to be PM4 target XCC */
490
properties.pm4_target_xcc = (args->queue_percentage >> 8) & 0xFF;
491
properties.priority = args->queue_priority;
492
493
pr_debug("Updating queue id %d for process pid %d\n",
494
args->queue_id, p->lead_thread->pid);
495
496
mutex_lock(&p->mutex);
497
498
retval = pqm_update_queue_properties(&p->pqm, args->queue_id, &properties);
499
500
mutex_unlock(&p->mutex);
501
502
return retval;
503
}
504
505
static int kfd_ioctl_set_cu_mask(struct file *filp, struct kfd_process *p,
506
void *data)
507
{
508
int retval;
509
const int max_num_cus = 1024;
510
struct kfd_ioctl_set_cu_mask_args *args = data;
511
struct mqd_update_info minfo = {0};
512
uint32_t __user *cu_mask_ptr = (uint32_t __user *)args->cu_mask_ptr;
513
size_t cu_mask_size = sizeof(uint32_t) * (args->num_cu_mask / 32);
514
515
if ((args->num_cu_mask % 32) != 0) {
516
pr_debug("num_cu_mask 0x%x must be a multiple of 32",
517
args->num_cu_mask);
518
return -EINVAL;
519
}
520
521
minfo.cu_mask.count = args->num_cu_mask;
522
if (minfo.cu_mask.count == 0) {
523
pr_debug("CU mask cannot be 0");
524
return -EINVAL;
525
}
526
527
/* To prevent an unreasonably large CU mask size, set an arbitrary
528
* limit of max_num_cus bits. We can then just drop any CU mask bits
529
* past max_num_cus bits and just use the first max_num_cus bits.
530
*/
531
if (minfo.cu_mask.count > max_num_cus) {
532
pr_debug("CU mask cannot be greater than 1024 bits");
533
minfo.cu_mask.count = max_num_cus;
534
cu_mask_size = sizeof(uint32_t) * (max_num_cus/32);
535
}
536
537
minfo.cu_mask.ptr = memdup_user(cu_mask_ptr, cu_mask_size);
538
if (IS_ERR(minfo.cu_mask.ptr)) {
539
pr_debug("Could not copy CU mask from userspace");
540
return PTR_ERR(minfo.cu_mask.ptr);
541
}
542
543
mutex_lock(&p->mutex);
544
545
retval = pqm_update_mqd(&p->pqm, args->queue_id, &minfo);
546
547
mutex_unlock(&p->mutex);
548
549
kfree(minfo.cu_mask.ptr);
550
return retval;
551
}
552
553
static int kfd_ioctl_get_queue_wave_state(struct file *filep,
554
struct kfd_process *p, void *data)
555
{
556
struct kfd_ioctl_get_queue_wave_state_args *args = data;
557
int r;
558
559
mutex_lock(&p->mutex);
560
561
r = pqm_get_wave_state(&p->pqm, args->queue_id,
562
(void __user *)args->ctl_stack_address,
563
&args->ctl_stack_used_size,
564
&args->save_area_used_size);
565
566
mutex_unlock(&p->mutex);
567
568
return r;
569
}
570
571
static int kfd_ioctl_set_memory_policy(struct file *filep,
572
struct kfd_process *p, void *data)
573
{
574
struct kfd_ioctl_set_memory_policy_args *args = data;
575
int err = 0;
576
struct kfd_process_device *pdd;
577
enum cache_policy default_policy, alternate_policy;
578
579
if (args->default_policy != KFD_IOC_CACHE_POLICY_COHERENT
580
&& args->default_policy != KFD_IOC_CACHE_POLICY_NONCOHERENT) {
581
return -EINVAL;
582
}
583
584
if (args->alternate_policy != KFD_IOC_CACHE_POLICY_COHERENT
585
&& args->alternate_policy != KFD_IOC_CACHE_POLICY_NONCOHERENT) {
586
return -EINVAL;
587
}
588
589
mutex_lock(&p->mutex);
590
pdd = kfd_process_device_data_by_id(p, args->gpu_id);
591
if (!pdd) {
592
pr_debug("Could not find gpu id 0x%x\n", args->gpu_id);
593
err = -EINVAL;
594
goto err_pdd;
595
}
596
597
pdd = kfd_bind_process_to_device(pdd->dev, p);
598
if (IS_ERR(pdd)) {
599
err = -ESRCH;
600
goto out;
601
}
602
603
default_policy = (args->default_policy == KFD_IOC_CACHE_POLICY_COHERENT)
604
? cache_policy_coherent : cache_policy_noncoherent;
605
606
alternate_policy =
607
(args->alternate_policy == KFD_IOC_CACHE_POLICY_COHERENT)
608
? cache_policy_coherent : cache_policy_noncoherent;
609
610
if (!pdd->dev->dqm->ops.set_cache_memory_policy(pdd->dev->dqm,
611
&pdd->qpd,
612
default_policy,
613
alternate_policy,
614
(void __user *)args->alternate_aperture_base,
615
args->alternate_aperture_size,
616
args->misc_process_flag))
617
err = -EINVAL;
618
619
out:
620
err_pdd:
621
mutex_unlock(&p->mutex);
622
623
return err;
624
}
625
626
static int kfd_ioctl_set_trap_handler(struct file *filep,
627
struct kfd_process *p, void *data)
628
{
629
struct kfd_ioctl_set_trap_handler_args *args = data;
630
int err = 0;
631
struct kfd_process_device *pdd;
632
633
mutex_lock(&p->mutex);
634
635
pdd = kfd_process_device_data_by_id(p, args->gpu_id);
636
if (!pdd) {
637
err = -EINVAL;
638
goto err_pdd;
639
}
640
641
pdd = kfd_bind_process_to_device(pdd->dev, p);
642
if (IS_ERR(pdd)) {
643
err = -ESRCH;
644
goto out;
645
}
646
647
kfd_process_set_trap_handler(&pdd->qpd, args->tba_addr, args->tma_addr);
648
649
out:
650
err_pdd:
651
mutex_unlock(&p->mutex);
652
653
return err;
654
}
655
656
static int kfd_ioctl_dbg_register(struct file *filep,
657
struct kfd_process *p, void *data)
658
{
659
return -EPERM;
660
}
661
662
static int kfd_ioctl_dbg_unregister(struct file *filep,
663
struct kfd_process *p, void *data)
664
{
665
return -EPERM;
666
}
667
668
static int kfd_ioctl_dbg_address_watch(struct file *filep,
669
struct kfd_process *p, void *data)
670
{
671
return -EPERM;
672
}
673
674
/* Parse and generate fixed size data structure for wave control */
675
static int kfd_ioctl_dbg_wave_control(struct file *filep,
676
struct kfd_process *p, void *data)
677
{
678
return -EPERM;
679
}
680
681
static int kfd_ioctl_get_clock_counters(struct file *filep,
682
struct kfd_process *p, void *data)
683
{
684
struct kfd_ioctl_get_clock_counters_args *args = data;
685
struct kfd_process_device *pdd;
686
687
mutex_lock(&p->mutex);
688
pdd = kfd_process_device_data_by_id(p, args->gpu_id);
689
mutex_unlock(&p->mutex);
690
if (pdd)
691
/* Reading GPU clock counter from KGD */
692
args->gpu_clock_counter = amdgpu_amdkfd_get_gpu_clock_counter(pdd->dev->adev);
693
else
694
/* Node without GPU resource */
695
args->gpu_clock_counter = 0;
696
697
/* No access to rdtsc. Using raw monotonic time */
698
args->cpu_clock_counter = ktime_get_raw_ns();
699
args->system_clock_counter = ktime_get_boottime_ns();
700
701
/* Since the counter is in nano-seconds we use 1GHz frequency */
702
args->system_clock_freq = 1000000000;
703
704
return 0;
705
}
706
707
708
static int kfd_ioctl_get_process_apertures(struct file *filp,
709
struct kfd_process *p, void *data)
710
{
711
struct kfd_ioctl_get_process_apertures_args *args = data;
712
struct kfd_process_device_apertures *pAperture;
713
int i;
714
715
dev_dbg(kfd_device, "get apertures for process pid %d", p->lead_thread->pid);
716
717
args->num_of_nodes = 0;
718
719
mutex_lock(&p->mutex);
720
/* Run over all pdd of the process */
721
for (i = 0; i < p->n_pdds; i++) {
722
struct kfd_process_device *pdd = p->pdds[i];
723
724
pAperture =
725
&args->process_apertures[args->num_of_nodes];
726
pAperture->gpu_id = pdd->dev->id;
727
pAperture->lds_base = pdd->lds_base;
728
pAperture->lds_limit = pdd->lds_limit;
729
pAperture->gpuvm_base = pdd->gpuvm_base;
730
pAperture->gpuvm_limit = pdd->gpuvm_limit;
731
pAperture->scratch_base = pdd->scratch_base;
732
pAperture->scratch_limit = pdd->scratch_limit;
733
734
dev_dbg(kfd_device,
735
"node id %u\n", args->num_of_nodes);
736
dev_dbg(kfd_device,
737
"gpu id %u\n", pdd->dev->id);
738
dev_dbg(kfd_device,
739
"lds_base %llX\n", pdd->lds_base);
740
dev_dbg(kfd_device,
741
"lds_limit %llX\n", pdd->lds_limit);
742
dev_dbg(kfd_device,
743
"gpuvm_base %llX\n", pdd->gpuvm_base);
744
dev_dbg(kfd_device,
745
"gpuvm_limit %llX\n", pdd->gpuvm_limit);
746
dev_dbg(kfd_device,
747
"scratch_base %llX\n", pdd->scratch_base);
748
dev_dbg(kfd_device,
749
"scratch_limit %llX\n", pdd->scratch_limit);
750
751
if (++args->num_of_nodes >= NUM_OF_SUPPORTED_GPUS)
752
break;
753
}
754
mutex_unlock(&p->mutex);
755
756
return 0;
757
}
758
759
static int kfd_ioctl_get_process_apertures_new(struct file *filp,
760
struct kfd_process *p, void *data)
761
{
762
struct kfd_ioctl_get_process_apertures_new_args *args = data;
763
struct kfd_process_device_apertures *pa;
764
int ret;
765
int i;
766
767
dev_dbg(kfd_device, "get apertures for process pid %d",
768
p->lead_thread->pid);
769
770
if (args->num_of_nodes == 0) {
771
/* Return number of nodes, so that user space can alloacate
772
* sufficient memory
773
*/
774
mutex_lock(&p->mutex);
775
args->num_of_nodes = p->n_pdds;
776
goto out_unlock;
777
}
778
779
/* Fill in process-aperture information for all available
780
* nodes, but not more than args->num_of_nodes as that is
781
* the amount of memory allocated by user
782
*/
783
pa = kcalloc(args->num_of_nodes, sizeof(struct kfd_process_device_apertures),
784
GFP_KERNEL);
785
if (!pa)
786
return -ENOMEM;
787
788
mutex_lock(&p->mutex);
789
790
if (!p->n_pdds) {
791
args->num_of_nodes = 0;
792
kfree(pa);
793
goto out_unlock;
794
}
795
796
/* Run over all pdd of the process */
797
for (i = 0; i < min(p->n_pdds, args->num_of_nodes); i++) {
798
struct kfd_process_device *pdd = p->pdds[i];
799
800
pa[i].gpu_id = pdd->dev->id;
801
pa[i].lds_base = pdd->lds_base;
802
pa[i].lds_limit = pdd->lds_limit;
803
pa[i].gpuvm_base = pdd->gpuvm_base;
804
pa[i].gpuvm_limit = pdd->gpuvm_limit;
805
pa[i].scratch_base = pdd->scratch_base;
806
pa[i].scratch_limit = pdd->scratch_limit;
807
808
dev_dbg(kfd_device,
809
"gpu id %u\n", pdd->dev->id);
810
dev_dbg(kfd_device,
811
"lds_base %llX\n", pdd->lds_base);
812
dev_dbg(kfd_device,
813
"lds_limit %llX\n", pdd->lds_limit);
814
dev_dbg(kfd_device,
815
"gpuvm_base %llX\n", pdd->gpuvm_base);
816
dev_dbg(kfd_device,
817
"gpuvm_limit %llX\n", pdd->gpuvm_limit);
818
dev_dbg(kfd_device,
819
"scratch_base %llX\n", pdd->scratch_base);
820
dev_dbg(kfd_device,
821
"scratch_limit %llX\n", pdd->scratch_limit);
822
}
823
mutex_unlock(&p->mutex);
824
825
args->num_of_nodes = i;
826
ret = copy_to_user(
827
(void __user *)args->kfd_process_device_apertures_ptr,
828
pa,
829
(i * sizeof(struct kfd_process_device_apertures)));
830
kfree(pa);
831
return ret ? -EFAULT : 0;
832
833
out_unlock:
834
mutex_unlock(&p->mutex);
835
return 0;
836
}
837
838
static int kfd_ioctl_create_event(struct file *filp, struct kfd_process *p,
839
void *data)
840
{
841
struct kfd_ioctl_create_event_args *args = data;
842
int err;
843
844
/* For dGPUs the event page is allocated in user mode. The
845
* handle is passed to KFD with the first call to this IOCTL
846
* through the event_page_offset field.
847
*/
848
if (args->event_page_offset) {
849
mutex_lock(&p->mutex);
850
err = kfd_kmap_event_page(p, args->event_page_offset);
851
mutex_unlock(&p->mutex);
852
if (err)
853
return err;
854
}
855
856
err = kfd_event_create(filp, p, args->event_type,
857
args->auto_reset != 0, args->node_id,
858
&args->event_id, &args->event_trigger_data,
859
&args->event_page_offset,
860
&args->event_slot_index);
861
862
pr_debug("Created event (id:0x%08x) (%s)\n", args->event_id, __func__);
863
return err;
864
}
865
866
static int kfd_ioctl_destroy_event(struct file *filp, struct kfd_process *p,
867
void *data)
868
{
869
struct kfd_ioctl_destroy_event_args *args = data;
870
871
return kfd_event_destroy(p, args->event_id);
872
}
873
874
static int kfd_ioctl_set_event(struct file *filp, struct kfd_process *p,
875
void *data)
876
{
877
struct kfd_ioctl_set_event_args *args = data;
878
879
return kfd_set_event(p, args->event_id);
880
}
881
882
static int kfd_ioctl_reset_event(struct file *filp, struct kfd_process *p,
883
void *data)
884
{
885
struct kfd_ioctl_reset_event_args *args = data;
886
887
return kfd_reset_event(p, args->event_id);
888
}
889
890
static int kfd_ioctl_wait_events(struct file *filp, struct kfd_process *p,
891
void *data)
892
{
893
struct kfd_ioctl_wait_events_args *args = data;
894
895
return kfd_wait_on_events(p, args->num_events,
896
(void __user *)args->events_ptr,
897
(args->wait_for_all != 0),
898
&args->timeout, &args->wait_result);
899
}
900
static int kfd_ioctl_set_scratch_backing_va(struct file *filep,
901
struct kfd_process *p, void *data)
902
{
903
struct kfd_ioctl_set_scratch_backing_va_args *args = data;
904
struct kfd_process_device *pdd;
905
struct kfd_node *dev;
906
long err;
907
908
mutex_lock(&p->mutex);
909
pdd = kfd_process_device_data_by_id(p, args->gpu_id);
910
if (!pdd) {
911
err = -EINVAL;
912
goto err_pdd;
913
}
914
dev = pdd->dev;
915
916
pdd = kfd_bind_process_to_device(dev, p);
917
if (IS_ERR(pdd)) {
918
err = PTR_ERR(pdd);
919
goto bind_process_to_device_fail;
920
}
921
922
pdd->qpd.sh_hidden_private_base = args->va_addr;
923
924
mutex_unlock(&p->mutex);
925
926
if (dev->dqm->sched_policy == KFD_SCHED_POLICY_NO_HWS &&
927
pdd->qpd.vmid != 0 && dev->kfd2kgd->set_scratch_backing_va)
928
dev->kfd2kgd->set_scratch_backing_va(
929
dev->adev, args->va_addr, pdd->qpd.vmid);
930
931
return 0;
932
933
bind_process_to_device_fail:
934
err_pdd:
935
mutex_unlock(&p->mutex);
936
return err;
937
}
938
939
static int kfd_ioctl_get_tile_config(struct file *filep,
940
struct kfd_process *p, void *data)
941
{
942
struct kfd_ioctl_get_tile_config_args *args = data;
943
struct kfd_process_device *pdd;
944
struct tile_config config;
945
int err = 0;
946
947
mutex_lock(&p->mutex);
948
pdd = kfd_process_device_data_by_id(p, args->gpu_id);
949
mutex_unlock(&p->mutex);
950
if (!pdd)
951
return -EINVAL;
952
953
amdgpu_amdkfd_get_tile_config(pdd->dev->adev, &config);
954
955
args->gb_addr_config = config.gb_addr_config;
956
args->num_banks = config.num_banks;
957
args->num_ranks = config.num_ranks;
958
959
if (args->num_tile_configs > config.num_tile_configs)
960
args->num_tile_configs = config.num_tile_configs;
961
err = copy_to_user((void __user *)args->tile_config_ptr,
962
config.tile_config_ptr,
963
args->num_tile_configs * sizeof(uint32_t));
964
if (err) {
965
args->num_tile_configs = 0;
966
return -EFAULT;
967
}
968
969
if (args->num_macro_tile_configs > config.num_macro_tile_configs)
970
args->num_macro_tile_configs =
971
config.num_macro_tile_configs;
972
err = copy_to_user((void __user *)args->macro_tile_config_ptr,
973
config.macro_tile_config_ptr,
974
args->num_macro_tile_configs * sizeof(uint32_t));
975
if (err) {
976
args->num_macro_tile_configs = 0;
977
return -EFAULT;
978
}
979
980
return 0;
981
}
982
983
static int kfd_ioctl_acquire_vm(struct file *filep, struct kfd_process *p,
984
void *data)
985
{
986
struct kfd_ioctl_acquire_vm_args *args = data;
987
struct kfd_process_device *pdd;
988
struct file *drm_file;
989
int ret;
990
991
drm_file = fget(args->drm_fd);
992
if (!drm_file)
993
return -EINVAL;
994
995
mutex_lock(&p->mutex);
996
pdd = kfd_process_device_data_by_id(p, args->gpu_id);
997
if (!pdd) {
998
ret = -EINVAL;
999
goto err_pdd;
1000
}
1001
1002
if (pdd->drm_file) {
1003
ret = pdd->drm_file == drm_file ? 0 : -EBUSY;
1004
goto err_drm_file;
1005
}
1006
1007
ret = kfd_process_device_init_vm(pdd, drm_file);
1008
if (ret)
1009
goto err_unlock;
1010
1011
/* On success, the PDD keeps the drm_file reference */
1012
mutex_unlock(&p->mutex);
1013
1014
return 0;
1015
1016
err_unlock:
1017
err_pdd:
1018
err_drm_file:
1019
mutex_unlock(&p->mutex);
1020
fput(drm_file);
1021
return ret;
1022
}
1023
1024
bool kfd_dev_is_large_bar(struct kfd_node *dev)
1025
{
1026
if (dev->kfd->adev->debug_largebar) {
1027
pr_debug("Simulate large-bar allocation on non large-bar machine\n");
1028
return true;
1029
}
1030
1031
if (dev->local_mem_info.local_mem_size_private == 0 &&
1032
dev->local_mem_info.local_mem_size_public > 0)
1033
return true;
1034
1035
if (dev->local_mem_info.local_mem_size_public == 0 &&
1036
dev->kfd->adev->gmc.is_app_apu) {
1037
pr_debug("APP APU, Consider like a large bar system\n");
1038
return true;
1039
}
1040
1041
return false;
1042
}
1043
1044
static int kfd_ioctl_get_available_memory(struct file *filep,
1045
struct kfd_process *p, void *data)
1046
{
1047
struct kfd_ioctl_get_available_memory_args *args = data;
1048
struct kfd_process_device *pdd = kfd_lock_pdd_by_id(p, args->gpu_id);
1049
1050
if (!pdd)
1051
return -EINVAL;
1052
args->available = amdgpu_amdkfd_get_available_memory(pdd->dev->adev,
1053
pdd->dev->node_id);
1054
kfd_unlock_pdd(pdd);
1055
return 0;
1056
}
1057
1058
static int kfd_ioctl_alloc_memory_of_gpu(struct file *filep,
1059
struct kfd_process *p, void *data)
1060
{
1061
struct kfd_ioctl_alloc_memory_of_gpu_args *args = data;
1062
struct kfd_process_device *pdd;
1063
void *mem;
1064
struct kfd_node *dev;
1065
int idr_handle;
1066
long err;
1067
uint64_t offset = args->mmap_offset;
1068
uint32_t flags = args->flags;
1069
1070
if (args->size == 0)
1071
return -EINVAL;
1072
1073
if (p->context_id != KFD_CONTEXT_ID_PRIMARY && (flags & KFD_IOC_ALLOC_MEM_FLAGS_USERPTR)) {
1074
pr_debug("USERPTR is not supported on non-primary kfd_process\n");
1075
1076
return -EOPNOTSUPP;
1077
}
1078
1079
#if IS_ENABLED(CONFIG_HSA_AMD_SVM)
1080
/* Flush pending deferred work to avoid racing with deferred actions
1081
* from previous memory map changes (e.g. munmap).
1082
*/
1083
svm_range_list_lock_and_flush_work(&p->svms, current->mm);
1084
mutex_lock(&p->svms.lock);
1085
mmap_write_unlock(current->mm);
1086
1087
/* Skip a special case that allocates VRAM without VA,
1088
* VA will be invalid of 0.
1089
*/
1090
if (!(!args->va_addr && (flags & KFD_IOC_ALLOC_MEM_FLAGS_VRAM)) &&
1091
interval_tree_iter_first(&p->svms.objects,
1092
args->va_addr >> PAGE_SHIFT,
1093
(args->va_addr + args->size - 1) >> PAGE_SHIFT)) {
1094
pr_err("Address: 0x%llx already allocated by SVM\n",
1095
args->va_addr);
1096
mutex_unlock(&p->svms.lock);
1097
return -EADDRINUSE;
1098
}
1099
1100
/* When register user buffer check if it has been registered by svm by
1101
* buffer cpu virtual address.
1102
*/
1103
if ((flags & KFD_IOC_ALLOC_MEM_FLAGS_USERPTR) &&
1104
interval_tree_iter_first(&p->svms.objects,
1105
args->mmap_offset >> PAGE_SHIFT,
1106
(args->mmap_offset + args->size - 1) >> PAGE_SHIFT)) {
1107
pr_err("User Buffer Address: 0x%llx already allocated by SVM\n",
1108
args->mmap_offset);
1109
mutex_unlock(&p->svms.lock);
1110
return -EADDRINUSE;
1111
}
1112
1113
mutex_unlock(&p->svms.lock);
1114
#endif
1115
mutex_lock(&p->mutex);
1116
pdd = kfd_process_device_data_by_id(p, args->gpu_id);
1117
if (!pdd) {
1118
err = -EINVAL;
1119
goto err_pdd;
1120
}
1121
1122
dev = pdd->dev;
1123
1124
if ((flags & KFD_IOC_ALLOC_MEM_FLAGS_PUBLIC) &&
1125
(flags & KFD_IOC_ALLOC_MEM_FLAGS_VRAM) &&
1126
!kfd_dev_is_large_bar(dev)) {
1127
pr_err("Alloc host visible vram on small bar is not allowed\n");
1128
err = -EINVAL;
1129
goto err_large_bar;
1130
}
1131
1132
pdd = kfd_bind_process_to_device(dev, p);
1133
if (IS_ERR(pdd)) {
1134
err = PTR_ERR(pdd);
1135
goto err_unlock;
1136
}
1137
1138
if (flags & KFD_IOC_ALLOC_MEM_FLAGS_DOORBELL) {
1139
if (args->size != kfd_doorbell_process_slice(dev->kfd)) {
1140
err = -EINVAL;
1141
goto err_unlock;
1142
}
1143
offset = kfd_get_process_doorbells(pdd);
1144
if (!offset) {
1145
err = -ENOMEM;
1146
goto err_unlock;
1147
}
1148
} else if (flags & KFD_IOC_ALLOC_MEM_FLAGS_MMIO_REMAP) {
1149
if (args->size != PAGE_SIZE) {
1150
err = -EINVAL;
1151
goto err_unlock;
1152
}
1153
offset = dev->adev->rmmio_remap.bus_addr;
1154
if (!offset || (PAGE_SIZE > 4096)) {
1155
err = -ENOMEM;
1156
goto err_unlock;
1157
}
1158
}
1159
1160
err = amdgpu_amdkfd_gpuvm_alloc_memory_of_gpu(
1161
dev->adev, args->va_addr, args->size,
1162
pdd->drm_priv, (struct kgd_mem **) &mem, &offset,
1163
flags, false);
1164
1165
if (err)
1166
goto err_unlock;
1167
1168
idr_handle = kfd_process_device_create_obj_handle(pdd, mem);
1169
if (idr_handle < 0) {
1170
err = -EFAULT;
1171
goto err_free;
1172
}
1173
1174
/* Update the VRAM usage count */
1175
if (flags & KFD_IOC_ALLOC_MEM_FLAGS_VRAM) {
1176
uint64_t size = args->size;
1177
1178
if (flags & KFD_IOC_ALLOC_MEM_FLAGS_AQL_QUEUE_MEM)
1179
size >>= 1;
1180
atomic64_add(PAGE_ALIGN(size), &pdd->vram_usage);
1181
}
1182
1183
mutex_unlock(&p->mutex);
1184
1185
args->handle = MAKE_HANDLE(args->gpu_id, idr_handle);
1186
args->mmap_offset = offset;
1187
1188
/* MMIO is mapped through kfd device
1189
* Generate a kfd mmap offset
1190
*/
1191
if (flags & KFD_IOC_ALLOC_MEM_FLAGS_MMIO_REMAP)
1192
args->mmap_offset = KFD_MMAP_TYPE_MMIO
1193
| KFD_MMAP_GPU_ID(args->gpu_id);
1194
1195
return 0;
1196
1197
err_free:
1198
amdgpu_amdkfd_gpuvm_free_memory_of_gpu(dev->adev, (struct kgd_mem *)mem,
1199
pdd->drm_priv, NULL);
1200
err_unlock:
1201
err_pdd:
1202
err_large_bar:
1203
mutex_unlock(&p->mutex);
1204
return err;
1205
}
1206
1207
static int kfd_ioctl_free_memory_of_gpu(struct file *filep,
1208
struct kfd_process *p, void *data)
1209
{
1210
struct kfd_ioctl_free_memory_of_gpu_args *args = data;
1211
struct kfd_process_device *pdd;
1212
void *mem;
1213
int ret;
1214
uint64_t size = 0;
1215
1216
mutex_lock(&p->mutex);
1217
/*
1218
* Safeguard to prevent user space from freeing signal BO.
1219
* It will be freed at process termination.
1220
*/
1221
if (p->signal_handle && (p->signal_handle == args->handle)) {
1222
pr_err("Free signal BO is not allowed\n");
1223
ret = -EPERM;
1224
goto err_unlock;
1225
}
1226
1227
pdd = kfd_process_device_data_by_id(p, GET_GPU_ID(args->handle));
1228
if (!pdd) {
1229
pr_err("Process device data doesn't exist\n");
1230
ret = -EINVAL;
1231
goto err_pdd;
1232
}
1233
1234
mem = kfd_process_device_translate_handle(
1235
pdd, GET_IDR_HANDLE(args->handle));
1236
if (!mem) {
1237
ret = -EINVAL;
1238
goto err_unlock;
1239
}
1240
1241
ret = amdgpu_amdkfd_gpuvm_free_memory_of_gpu(pdd->dev->adev,
1242
(struct kgd_mem *)mem, pdd->drm_priv, &size);
1243
1244
/* If freeing the buffer failed, leave the handle in place for
1245
* clean-up during process tear-down.
1246
*/
1247
if (!ret)
1248
kfd_process_device_remove_obj_handle(
1249
pdd, GET_IDR_HANDLE(args->handle));
1250
1251
atomic64_sub(size, &pdd->vram_usage);
1252
1253
err_unlock:
1254
err_pdd:
1255
mutex_unlock(&p->mutex);
1256
return ret;
1257
}
1258
1259
static int kfd_ioctl_map_memory_to_gpu(struct file *filep,
1260
struct kfd_process *p, void *data)
1261
{
1262
struct kfd_ioctl_map_memory_to_gpu_args *args = data;
1263
struct kfd_process_device *pdd, *peer_pdd;
1264
void *mem;
1265
struct kfd_node *dev;
1266
long err = 0;
1267
int i;
1268
uint32_t *devices_arr = NULL;
1269
1270
if (!args->n_devices) {
1271
pr_debug("Device IDs array empty\n");
1272
return -EINVAL;
1273
}
1274
if (args->n_success > args->n_devices) {
1275
pr_debug("n_success exceeds n_devices\n");
1276
return -EINVAL;
1277
}
1278
1279
devices_arr = kmalloc_array(args->n_devices, sizeof(*devices_arr),
1280
GFP_KERNEL);
1281
if (!devices_arr)
1282
return -ENOMEM;
1283
1284
err = copy_from_user(devices_arr,
1285
(void __user *)args->device_ids_array_ptr,
1286
args->n_devices * sizeof(*devices_arr));
1287
if (err != 0) {
1288
err = -EFAULT;
1289
goto copy_from_user_failed;
1290
}
1291
1292
mutex_lock(&p->mutex);
1293
pdd = kfd_process_device_data_by_id(p, GET_GPU_ID(args->handle));
1294
if (!pdd) {
1295
err = -EINVAL;
1296
goto get_process_device_data_failed;
1297
}
1298
dev = pdd->dev;
1299
1300
pdd = kfd_bind_process_to_device(dev, p);
1301
if (IS_ERR(pdd)) {
1302
err = PTR_ERR(pdd);
1303
goto bind_process_to_device_failed;
1304
}
1305
1306
mem = kfd_process_device_translate_handle(pdd,
1307
GET_IDR_HANDLE(args->handle));
1308
if (!mem) {
1309
err = -ENOMEM;
1310
goto get_mem_obj_from_handle_failed;
1311
}
1312
1313
for (i = args->n_success; i < args->n_devices; i++) {
1314
peer_pdd = kfd_process_device_data_by_id(p, devices_arr[i]);
1315
if (!peer_pdd) {
1316
pr_debug("Getting device by id failed for 0x%x\n",
1317
devices_arr[i]);
1318
err = -EINVAL;
1319
goto get_mem_obj_from_handle_failed;
1320
}
1321
1322
peer_pdd = kfd_bind_process_to_device(peer_pdd->dev, p);
1323
if (IS_ERR(peer_pdd)) {
1324
err = PTR_ERR(peer_pdd);
1325
goto get_mem_obj_from_handle_failed;
1326
}
1327
1328
err = amdgpu_amdkfd_gpuvm_map_memory_to_gpu(
1329
peer_pdd->dev->adev, (struct kgd_mem *)mem,
1330
peer_pdd->drm_priv);
1331
if (err) {
1332
struct pci_dev *pdev = peer_pdd->dev->adev->pdev;
1333
1334
dev_err(dev->adev->dev,
1335
"Failed to map peer:%04x:%02x:%02x.%d mem_domain:%d\n",
1336
pci_domain_nr(pdev->bus),
1337
pdev->bus->number,
1338
PCI_SLOT(pdev->devfn),
1339
PCI_FUNC(pdev->devfn),
1340
((struct kgd_mem *)mem)->domain);
1341
goto map_memory_to_gpu_failed;
1342
}
1343
args->n_success = i+1;
1344
}
1345
1346
err = amdgpu_amdkfd_gpuvm_sync_memory(dev->adev, (struct kgd_mem *) mem, true);
1347
if (err) {
1348
pr_debug("Sync memory failed, wait interrupted by user signal\n");
1349
goto sync_memory_failed;
1350
}
1351
1352
mutex_unlock(&p->mutex);
1353
1354
/* Flush TLBs after waiting for the page table updates to complete */
1355
for (i = 0; i < args->n_devices; i++) {
1356
peer_pdd = kfd_process_device_data_by_id(p, devices_arr[i]);
1357
if (WARN_ON_ONCE(!peer_pdd))
1358
continue;
1359
kfd_flush_tlb(peer_pdd, TLB_FLUSH_LEGACY);
1360
}
1361
kfree(devices_arr);
1362
1363
return err;
1364
1365
get_process_device_data_failed:
1366
bind_process_to_device_failed:
1367
get_mem_obj_from_handle_failed:
1368
map_memory_to_gpu_failed:
1369
sync_memory_failed:
1370
mutex_unlock(&p->mutex);
1371
copy_from_user_failed:
1372
kfree(devices_arr);
1373
1374
return err;
1375
}
1376
1377
static int kfd_ioctl_unmap_memory_from_gpu(struct file *filep,
1378
struct kfd_process *p, void *data)
1379
{
1380
struct kfd_ioctl_unmap_memory_from_gpu_args *args = data;
1381
struct kfd_process_device *pdd, *peer_pdd;
1382
void *mem;
1383
long err = 0;
1384
uint32_t *devices_arr = NULL, i;
1385
bool flush_tlb;
1386
1387
if (!args->n_devices) {
1388
pr_debug("Device IDs array empty\n");
1389
return -EINVAL;
1390
}
1391
if (args->n_success > args->n_devices) {
1392
pr_debug("n_success exceeds n_devices\n");
1393
return -EINVAL;
1394
}
1395
1396
devices_arr = kmalloc_array(args->n_devices, sizeof(*devices_arr),
1397
GFP_KERNEL);
1398
if (!devices_arr)
1399
return -ENOMEM;
1400
1401
err = copy_from_user(devices_arr,
1402
(void __user *)args->device_ids_array_ptr,
1403
args->n_devices * sizeof(*devices_arr));
1404
if (err != 0) {
1405
err = -EFAULT;
1406
goto copy_from_user_failed;
1407
}
1408
1409
mutex_lock(&p->mutex);
1410
pdd = kfd_process_device_data_by_id(p, GET_GPU_ID(args->handle));
1411
if (!pdd) {
1412
err = -EINVAL;
1413
goto bind_process_to_device_failed;
1414
}
1415
1416
mem = kfd_process_device_translate_handle(pdd,
1417
GET_IDR_HANDLE(args->handle));
1418
if (!mem) {
1419
err = -ENOMEM;
1420
goto get_mem_obj_from_handle_failed;
1421
}
1422
1423
for (i = args->n_success; i < args->n_devices; i++) {
1424
peer_pdd = kfd_process_device_data_by_id(p, devices_arr[i]);
1425
if (!peer_pdd) {
1426
err = -EINVAL;
1427
goto get_mem_obj_from_handle_failed;
1428
}
1429
err = amdgpu_amdkfd_gpuvm_unmap_memory_from_gpu(
1430
peer_pdd->dev->adev, (struct kgd_mem *)mem, peer_pdd->drm_priv);
1431
if (err) {
1432
pr_debug("Failed to unmap from gpu %d/%d\n", i, args->n_devices);
1433
goto unmap_memory_from_gpu_failed;
1434
}
1435
args->n_success = i+1;
1436
}
1437
1438
flush_tlb = kfd_flush_tlb_after_unmap(pdd->dev->kfd);
1439
if (flush_tlb) {
1440
err = amdgpu_amdkfd_gpuvm_sync_memory(pdd->dev->adev,
1441
(struct kgd_mem *) mem, true);
1442
if (err) {
1443
pr_debug("Sync memory failed, wait interrupted by user signal\n");
1444
goto sync_memory_failed;
1445
}
1446
}
1447
1448
/* Flush TLBs after waiting for the page table updates to complete */
1449
for (i = 0; i < args->n_devices; i++) {
1450
peer_pdd = kfd_process_device_data_by_id(p, devices_arr[i]);
1451
if (WARN_ON_ONCE(!peer_pdd))
1452
continue;
1453
if (flush_tlb)
1454
kfd_flush_tlb(peer_pdd, TLB_FLUSH_HEAVYWEIGHT);
1455
1456
/* Remove dma mapping after tlb flush to avoid IO_PAGE_FAULT */
1457
err = amdgpu_amdkfd_gpuvm_dmaunmap_mem(mem, peer_pdd->drm_priv);
1458
if (err)
1459
goto sync_memory_failed;
1460
}
1461
1462
mutex_unlock(&p->mutex);
1463
1464
kfree(devices_arr);
1465
1466
return 0;
1467
1468
bind_process_to_device_failed:
1469
get_mem_obj_from_handle_failed:
1470
unmap_memory_from_gpu_failed:
1471
sync_memory_failed:
1472
mutex_unlock(&p->mutex);
1473
copy_from_user_failed:
1474
kfree(devices_arr);
1475
return err;
1476
}
1477
1478
static int kfd_ioctl_alloc_queue_gws(struct file *filep,
1479
struct kfd_process *p, void *data)
1480
{
1481
int retval;
1482
struct kfd_ioctl_alloc_queue_gws_args *args = data;
1483
struct queue *q;
1484
struct kfd_node *dev;
1485
1486
mutex_lock(&p->mutex);
1487
q = pqm_get_user_queue(&p->pqm, args->queue_id);
1488
1489
if (q) {
1490
dev = q->device;
1491
} else {
1492
retval = -EINVAL;
1493
goto out_unlock;
1494
}
1495
1496
if (!dev->gws) {
1497
retval = -ENODEV;
1498
goto out_unlock;
1499
}
1500
1501
if (dev->dqm->sched_policy == KFD_SCHED_POLICY_NO_HWS) {
1502
retval = -ENODEV;
1503
goto out_unlock;
1504
}
1505
1506
if (p->debug_trap_enabled && (!kfd_dbg_has_gws_support(dev) ||
1507
kfd_dbg_has_cwsr_workaround(dev))) {
1508
retval = -EBUSY;
1509
goto out_unlock;
1510
}
1511
1512
retval = pqm_set_gws(&p->pqm, args->queue_id, args->num_gws ? dev->gws : NULL);
1513
mutex_unlock(&p->mutex);
1514
1515
args->first_gws = 0;
1516
return retval;
1517
1518
out_unlock:
1519
mutex_unlock(&p->mutex);
1520
return retval;
1521
}
1522
1523
static int kfd_ioctl_get_dmabuf_info(struct file *filep,
1524
struct kfd_process *p, void *data)
1525
{
1526
struct kfd_ioctl_get_dmabuf_info_args *args = data;
1527
struct kfd_node *dev = NULL;
1528
struct amdgpu_device *dmabuf_adev;
1529
void *metadata_buffer = NULL;
1530
uint32_t flags;
1531
int8_t xcp_id;
1532
unsigned int i;
1533
int r;
1534
1535
/* Find a KFD GPU device that supports the get_dmabuf_info query */
1536
for (i = 0; kfd_topology_enum_kfd_devices(i, &dev) == 0; i++)
1537
if (dev && !kfd_devcgroup_check_permission(dev))
1538
break;
1539
if (!dev)
1540
return -EINVAL;
1541
1542
if (args->metadata_ptr) {
1543
metadata_buffer = kzalloc(args->metadata_size, GFP_KERNEL);
1544
if (!metadata_buffer)
1545
return -ENOMEM;
1546
}
1547
1548
/* Get dmabuf info from KGD */
1549
r = amdgpu_amdkfd_get_dmabuf_info(dev->adev, args->dmabuf_fd,
1550
&dmabuf_adev, &args->size,
1551
metadata_buffer, args->metadata_size,
1552
&args->metadata_size, &flags, &xcp_id);
1553
if (r)
1554
goto exit;
1555
1556
if (xcp_id >= 0)
1557
args->gpu_id = dmabuf_adev->kfd.dev->nodes[xcp_id]->id;
1558
else
1559
args->gpu_id = dev->id;
1560
args->flags = flags;
1561
1562
/* Copy metadata buffer to user mode */
1563
if (metadata_buffer) {
1564
r = copy_to_user((void __user *)args->metadata_ptr,
1565
metadata_buffer, args->metadata_size);
1566
if (r != 0)
1567
r = -EFAULT;
1568
}
1569
1570
exit:
1571
kfree(metadata_buffer);
1572
1573
return r;
1574
}
1575
1576
static int kfd_ioctl_import_dmabuf(struct file *filep,
1577
struct kfd_process *p, void *data)
1578
{
1579
struct kfd_ioctl_import_dmabuf_args *args = data;
1580
struct kfd_process_device *pdd;
1581
int idr_handle;
1582
uint64_t size;
1583
void *mem;
1584
int r;
1585
1586
mutex_lock(&p->mutex);
1587
pdd = kfd_process_device_data_by_id(p, args->gpu_id);
1588
if (!pdd) {
1589
r = -EINVAL;
1590
goto err_unlock;
1591
}
1592
1593
pdd = kfd_bind_process_to_device(pdd->dev, p);
1594
if (IS_ERR(pdd)) {
1595
r = PTR_ERR(pdd);
1596
goto err_unlock;
1597
}
1598
1599
r = amdgpu_amdkfd_gpuvm_import_dmabuf_fd(pdd->dev->adev, args->dmabuf_fd,
1600
args->va_addr, pdd->drm_priv,
1601
(struct kgd_mem **)&mem, &size,
1602
NULL);
1603
if (r)
1604
goto err_unlock;
1605
1606
idr_handle = kfd_process_device_create_obj_handle(pdd, mem);
1607
if (idr_handle < 0) {
1608
r = -EFAULT;
1609
goto err_free;
1610
}
1611
1612
mutex_unlock(&p->mutex);
1613
1614
args->handle = MAKE_HANDLE(args->gpu_id, idr_handle);
1615
1616
return 0;
1617
1618
err_free:
1619
amdgpu_amdkfd_gpuvm_free_memory_of_gpu(pdd->dev->adev, (struct kgd_mem *)mem,
1620
pdd->drm_priv, NULL);
1621
err_unlock:
1622
mutex_unlock(&p->mutex);
1623
return r;
1624
}
1625
1626
static int kfd_ioctl_export_dmabuf(struct file *filep,
1627
struct kfd_process *p, void *data)
1628
{
1629
struct kfd_ioctl_export_dmabuf_args *args = data;
1630
struct kfd_process_device *pdd;
1631
struct dma_buf *dmabuf;
1632
struct kfd_node *dev;
1633
void *mem;
1634
int ret = 0;
1635
1636
dev = kfd_device_by_id(GET_GPU_ID(args->handle));
1637
if (!dev)
1638
return -EINVAL;
1639
1640
mutex_lock(&p->mutex);
1641
1642
pdd = kfd_get_process_device_data(dev, p);
1643
if (!pdd) {
1644
ret = -EINVAL;
1645
goto err_unlock;
1646
}
1647
1648
mem = kfd_process_device_translate_handle(pdd,
1649
GET_IDR_HANDLE(args->handle));
1650
if (!mem) {
1651
ret = -EINVAL;
1652
goto err_unlock;
1653
}
1654
1655
ret = amdgpu_amdkfd_gpuvm_export_dmabuf(mem, &dmabuf);
1656
mutex_unlock(&p->mutex);
1657
if (ret)
1658
goto err_out;
1659
1660
ret = dma_buf_fd(dmabuf, args->flags);
1661
if (ret < 0) {
1662
dma_buf_put(dmabuf);
1663
goto err_out;
1664
}
1665
/* dma_buf_fd assigns the reference count to the fd, no need to
1666
* put the reference here.
1667
*/
1668
args->dmabuf_fd = ret;
1669
1670
return 0;
1671
1672
err_unlock:
1673
mutex_unlock(&p->mutex);
1674
err_out:
1675
return ret;
1676
}
1677
1678
/* Handle requests for watching SMI events */
1679
static int kfd_ioctl_smi_events(struct file *filep,
1680
struct kfd_process *p, void *data)
1681
{
1682
struct kfd_ioctl_smi_events_args *args = data;
1683
struct kfd_process_device *pdd;
1684
1685
mutex_lock(&p->mutex);
1686
1687
pdd = kfd_process_device_data_by_id(p, args->gpuid);
1688
mutex_unlock(&p->mutex);
1689
if (!pdd)
1690
return -EINVAL;
1691
1692
return kfd_smi_event_open(pdd->dev, &args->anon_fd);
1693
}
1694
1695
#if IS_ENABLED(CONFIG_HSA_AMD_SVM)
1696
1697
static int kfd_ioctl_set_xnack_mode(struct file *filep,
1698
struct kfd_process *p, void *data)
1699
{
1700
struct kfd_ioctl_set_xnack_mode_args *args = data;
1701
int r = 0;
1702
1703
mutex_lock(&p->mutex);
1704
if (args->xnack_enabled >= 0) {
1705
if (!list_empty(&p->pqm.queues)) {
1706
pr_debug("Process has user queues running\n");
1707
r = -EBUSY;
1708
goto out_unlock;
1709
}
1710
1711
if (p->xnack_enabled == args->xnack_enabled)
1712
goto out_unlock;
1713
1714
if (args->xnack_enabled && !kfd_process_xnack_mode(p, true)) {
1715
r = -EPERM;
1716
goto out_unlock;
1717
}
1718
1719
r = svm_range_switch_xnack_reserve_mem(p, args->xnack_enabled);
1720
} else {
1721
args->xnack_enabled = p->xnack_enabled;
1722
}
1723
1724
out_unlock:
1725
mutex_unlock(&p->mutex);
1726
1727
return r;
1728
}
1729
1730
static int kfd_ioctl_svm(struct file *filep, struct kfd_process *p, void *data)
1731
{
1732
struct kfd_ioctl_svm_args *args = data;
1733
int r = 0;
1734
1735
if (p->context_id != KFD_CONTEXT_ID_PRIMARY) {
1736
pr_debug("SVM ioctl not supported on non-primary kfd process\n");
1737
1738
return -EOPNOTSUPP;
1739
}
1740
1741
pr_debug("start 0x%llx size 0x%llx op 0x%x nattr 0x%x\n",
1742
args->start_addr, args->size, args->op, args->nattr);
1743
1744
if ((args->start_addr & ~PAGE_MASK) || (args->size & ~PAGE_MASK))
1745
return -EINVAL;
1746
if (!args->start_addr || !args->size)
1747
return -EINVAL;
1748
1749
r = svm_ioctl(p, args->op, args->start_addr, args->size, args->nattr,
1750
args->attrs);
1751
1752
return r;
1753
}
1754
#else
1755
static int kfd_ioctl_set_xnack_mode(struct file *filep,
1756
struct kfd_process *p, void *data)
1757
{
1758
return -EPERM;
1759
}
1760
static int kfd_ioctl_svm(struct file *filep, struct kfd_process *p, void *data)
1761
{
1762
return -EPERM;
1763
}
1764
#endif
1765
1766
static int criu_checkpoint_process(struct kfd_process *p,
1767
uint8_t __user *user_priv_data,
1768
uint64_t *priv_offset)
1769
{
1770
struct kfd_criu_process_priv_data process_priv;
1771
int ret;
1772
1773
memset(&process_priv, 0, sizeof(process_priv));
1774
1775
process_priv.version = KFD_CRIU_PRIV_VERSION;
1776
/* For CR, we don't consider negative xnack mode which is used for
1777
* querying without changing it, here 0 simply means disabled and 1
1778
* means enabled so retry for finding a valid PTE.
1779
*/
1780
process_priv.xnack_mode = p->xnack_enabled ? 1 : 0;
1781
1782
ret = copy_to_user(user_priv_data + *priv_offset,
1783
&process_priv, sizeof(process_priv));
1784
1785
if (ret) {
1786
pr_err("Failed to copy process information to user\n");
1787
ret = -EFAULT;
1788
}
1789
1790
*priv_offset += sizeof(process_priv);
1791
return ret;
1792
}
1793
1794
static int criu_checkpoint_devices(struct kfd_process *p,
1795
uint32_t num_devices,
1796
uint8_t __user *user_addr,
1797
uint8_t __user *user_priv_data,
1798
uint64_t *priv_offset)
1799
{
1800
struct kfd_criu_device_priv_data *device_priv = NULL;
1801
struct kfd_criu_device_bucket *device_buckets = NULL;
1802
int ret = 0, i;
1803
1804
device_buckets = kvzalloc(num_devices * sizeof(*device_buckets), GFP_KERNEL);
1805
if (!device_buckets) {
1806
ret = -ENOMEM;
1807
goto exit;
1808
}
1809
1810
device_priv = kvzalloc(num_devices * sizeof(*device_priv), GFP_KERNEL);
1811
if (!device_priv) {
1812
ret = -ENOMEM;
1813
goto exit;
1814
}
1815
1816
for (i = 0; i < num_devices; i++) {
1817
struct kfd_process_device *pdd = p->pdds[i];
1818
1819
device_buckets[i].user_gpu_id = pdd->user_gpu_id;
1820
device_buckets[i].actual_gpu_id = pdd->dev->id;
1821
1822
/*
1823
* priv_data does not contain useful information for now and is reserved for
1824
* future use, so we do not set its contents.
1825
*/
1826
}
1827
1828
ret = copy_to_user(user_addr, device_buckets, num_devices * sizeof(*device_buckets));
1829
if (ret) {
1830
pr_err("Failed to copy device information to user\n");
1831
ret = -EFAULT;
1832
goto exit;
1833
}
1834
1835
ret = copy_to_user(user_priv_data + *priv_offset,
1836
device_priv,
1837
num_devices * sizeof(*device_priv));
1838
if (ret) {
1839
pr_err("Failed to copy device information to user\n");
1840
ret = -EFAULT;
1841
}
1842
*priv_offset += num_devices * sizeof(*device_priv);
1843
1844
exit:
1845
kvfree(device_buckets);
1846
kvfree(device_priv);
1847
return ret;
1848
}
1849
1850
static uint32_t get_process_num_bos(struct kfd_process *p)
1851
{
1852
uint32_t num_of_bos = 0;
1853
int i;
1854
1855
/* Run over all PDDs of the process */
1856
for (i = 0; i < p->n_pdds; i++) {
1857
struct kfd_process_device *pdd = p->pdds[i];
1858
void *mem;
1859
int id;
1860
1861
idr_for_each_entry(&pdd->alloc_idr, mem, id) {
1862
struct kgd_mem *kgd_mem = (struct kgd_mem *)mem;
1863
1864
if (!kgd_mem->va || kgd_mem->va > pdd->gpuvm_base)
1865
num_of_bos++;
1866
}
1867
}
1868
return num_of_bos;
1869
}
1870
1871
static int criu_get_prime_handle(struct kgd_mem *mem,
1872
int flags, u32 *shared_fd,
1873
struct file **file)
1874
{
1875
struct dma_buf *dmabuf;
1876
int ret;
1877
1878
ret = amdgpu_amdkfd_gpuvm_export_dmabuf(mem, &dmabuf);
1879
if (ret) {
1880
pr_err("dmabuf export failed for the BO\n");
1881
return ret;
1882
}
1883
1884
ret = get_unused_fd_flags(flags);
1885
if (ret < 0) {
1886
pr_err("dmabuf create fd failed, ret:%d\n", ret);
1887
goto out_free_dmabuf;
1888
}
1889
1890
*shared_fd = ret;
1891
*file = dmabuf->file;
1892
return 0;
1893
1894
out_free_dmabuf:
1895
dma_buf_put(dmabuf);
1896
return ret;
1897
}
1898
1899
static void commit_files(struct file **files,
1900
struct kfd_criu_bo_bucket *bo_buckets,
1901
unsigned int count,
1902
int err)
1903
{
1904
while (count--) {
1905
struct file *file = files[count];
1906
1907
if (!file)
1908
continue;
1909
if (err) {
1910
fput(file);
1911
put_unused_fd(bo_buckets[count].dmabuf_fd);
1912
} else {
1913
fd_install(bo_buckets[count].dmabuf_fd, file);
1914
}
1915
}
1916
}
1917
1918
static int criu_checkpoint_bos(struct kfd_process *p,
1919
uint32_t num_bos,
1920
uint8_t __user *user_bos,
1921
uint8_t __user *user_priv_data,
1922
uint64_t *priv_offset)
1923
{
1924
struct kfd_criu_bo_bucket *bo_buckets;
1925
struct kfd_criu_bo_priv_data *bo_privs;
1926
struct file **files = NULL;
1927
int ret = 0, pdd_index, bo_index = 0, id;
1928
void *mem;
1929
1930
bo_buckets = kvzalloc(num_bos * sizeof(*bo_buckets), GFP_KERNEL);
1931
if (!bo_buckets)
1932
return -ENOMEM;
1933
1934
bo_privs = kvzalloc(num_bos * sizeof(*bo_privs), GFP_KERNEL);
1935
if (!bo_privs) {
1936
ret = -ENOMEM;
1937
goto exit;
1938
}
1939
1940
files = kvzalloc(num_bos * sizeof(struct file *), GFP_KERNEL);
1941
if (!files) {
1942
ret = -ENOMEM;
1943
goto exit;
1944
}
1945
1946
for (pdd_index = 0; pdd_index < p->n_pdds; pdd_index++) {
1947
struct kfd_process_device *pdd = p->pdds[pdd_index];
1948
struct amdgpu_bo *dumper_bo;
1949
struct kgd_mem *kgd_mem;
1950
1951
idr_for_each_entry(&pdd->alloc_idr, mem, id) {
1952
struct kfd_criu_bo_bucket *bo_bucket;
1953
struct kfd_criu_bo_priv_data *bo_priv;
1954
int i, dev_idx = 0;
1955
1956
kgd_mem = (struct kgd_mem *)mem;
1957
dumper_bo = kgd_mem->bo;
1958
1959
/* Skip checkpointing BOs that are used for Trap handler
1960
* code and state. Currently, these BOs have a VA that
1961
* is less GPUVM Base
1962
*/
1963
if (kgd_mem->va && kgd_mem->va <= pdd->gpuvm_base)
1964
continue;
1965
1966
bo_bucket = &bo_buckets[bo_index];
1967
bo_priv = &bo_privs[bo_index];
1968
1969
bo_bucket->gpu_id = pdd->user_gpu_id;
1970
bo_bucket->addr = (uint64_t)kgd_mem->va;
1971
bo_bucket->size = amdgpu_bo_size(dumper_bo);
1972
bo_bucket->alloc_flags = (uint32_t)kgd_mem->alloc_flags;
1973
bo_priv->idr_handle = id;
1974
1975
if (bo_bucket->alloc_flags & KFD_IOC_ALLOC_MEM_FLAGS_USERPTR) {
1976
ret = amdgpu_ttm_tt_get_userptr(&dumper_bo->tbo,
1977
&bo_priv->user_addr);
1978
if (ret) {
1979
pr_err("Failed to obtain user address for user-pointer bo\n");
1980
goto exit;
1981
}
1982
}
1983
if (bo_bucket->alloc_flags
1984
& (KFD_IOC_ALLOC_MEM_FLAGS_VRAM | KFD_IOC_ALLOC_MEM_FLAGS_GTT)) {
1985
ret = criu_get_prime_handle(kgd_mem,
1986
bo_bucket->alloc_flags &
1987
KFD_IOC_ALLOC_MEM_FLAGS_WRITABLE ? DRM_RDWR : 0,
1988
&bo_bucket->dmabuf_fd, &files[bo_index]);
1989
if (ret)
1990
goto exit;
1991
} else {
1992
bo_bucket->dmabuf_fd = KFD_INVALID_FD;
1993
}
1994
1995
if (bo_bucket->alloc_flags & KFD_IOC_ALLOC_MEM_FLAGS_DOORBELL)
1996
bo_bucket->offset = KFD_MMAP_TYPE_DOORBELL |
1997
KFD_MMAP_GPU_ID(pdd->dev->id);
1998
else if (bo_bucket->alloc_flags &
1999
KFD_IOC_ALLOC_MEM_FLAGS_MMIO_REMAP)
2000
bo_bucket->offset = KFD_MMAP_TYPE_MMIO |
2001
KFD_MMAP_GPU_ID(pdd->dev->id);
2002
else
2003
bo_bucket->offset = amdgpu_bo_mmap_offset(dumper_bo);
2004
2005
for (i = 0; i < p->n_pdds; i++) {
2006
if (amdgpu_amdkfd_bo_mapped_to_dev(p->pdds[i]->drm_priv, kgd_mem))
2007
bo_priv->mapped_gpuids[dev_idx++] = p->pdds[i]->user_gpu_id;
2008
}
2009
2010
pr_debug("bo_size = 0x%llx, bo_addr = 0x%llx bo_offset = 0x%llx\n"
2011
"gpu_id = 0x%x alloc_flags = 0x%x idr_handle = 0x%x",
2012
bo_bucket->size,
2013
bo_bucket->addr,
2014
bo_bucket->offset,
2015
bo_bucket->gpu_id,
2016
bo_bucket->alloc_flags,
2017
bo_priv->idr_handle);
2018
bo_index++;
2019
}
2020
}
2021
2022
ret = copy_to_user(user_bos, bo_buckets, num_bos * sizeof(*bo_buckets));
2023
if (ret) {
2024
pr_err("Failed to copy BO information to user\n");
2025
ret = -EFAULT;
2026
goto exit;
2027
}
2028
2029
ret = copy_to_user(user_priv_data + *priv_offset, bo_privs, num_bos * sizeof(*bo_privs));
2030
if (ret) {
2031
pr_err("Failed to copy BO priv information to user\n");
2032
ret = -EFAULT;
2033
goto exit;
2034
}
2035
2036
*priv_offset += num_bos * sizeof(*bo_privs);
2037
2038
exit:
2039
commit_files(files, bo_buckets, bo_index, ret);
2040
kvfree(files);
2041
kvfree(bo_buckets);
2042
kvfree(bo_privs);
2043
return ret;
2044
}
2045
2046
static int criu_get_process_object_info(struct kfd_process *p,
2047
uint32_t *num_devices,
2048
uint32_t *num_bos,
2049
uint32_t *num_objects,
2050
uint64_t *objs_priv_size)
2051
{
2052
uint64_t queues_priv_data_size, svm_priv_data_size, priv_size;
2053
uint32_t num_queues, num_events, num_svm_ranges;
2054
int ret;
2055
2056
*num_devices = p->n_pdds;
2057
*num_bos = get_process_num_bos(p);
2058
2059
ret = kfd_process_get_queue_info(p, &num_queues, &queues_priv_data_size);
2060
if (ret)
2061
return ret;
2062
2063
num_events = kfd_get_num_events(p);
2064
2065
svm_range_get_info(p, &num_svm_ranges, &svm_priv_data_size);
2066
2067
*num_objects = num_queues + num_events + num_svm_ranges;
2068
2069
if (objs_priv_size) {
2070
priv_size = sizeof(struct kfd_criu_process_priv_data);
2071
priv_size += *num_devices * sizeof(struct kfd_criu_device_priv_data);
2072
priv_size += *num_bos * sizeof(struct kfd_criu_bo_priv_data);
2073
priv_size += queues_priv_data_size;
2074
priv_size += num_events * sizeof(struct kfd_criu_event_priv_data);
2075
priv_size += svm_priv_data_size;
2076
*objs_priv_size = priv_size;
2077
}
2078
return 0;
2079
}
2080
2081
static int criu_checkpoint(struct file *filep,
2082
struct kfd_process *p,
2083
struct kfd_ioctl_criu_args *args)
2084
{
2085
int ret;
2086
uint32_t num_devices, num_bos, num_objects;
2087
uint64_t priv_size, priv_offset = 0, bo_priv_offset;
2088
2089
if (!args->devices || !args->bos || !args->priv_data)
2090
return -EINVAL;
2091
2092
mutex_lock(&p->mutex);
2093
2094
if (!p->n_pdds) {
2095
pr_err("No pdd for given process\n");
2096
ret = -ENODEV;
2097
goto exit_unlock;
2098
}
2099
2100
/* Confirm all process queues are evicted */
2101
if (!p->queues_paused) {
2102
pr_err("Cannot dump process when queues are not in evicted state\n");
2103
/* CRIU plugin did not call op PROCESS_INFO before checkpointing */
2104
ret = -EINVAL;
2105
goto exit_unlock;
2106
}
2107
2108
ret = criu_get_process_object_info(p, &num_devices, &num_bos, &num_objects, &priv_size);
2109
if (ret)
2110
goto exit_unlock;
2111
2112
if (num_devices != args->num_devices ||
2113
num_bos != args->num_bos ||
2114
num_objects != args->num_objects ||
2115
priv_size != args->priv_data_size) {
2116
2117
ret = -EINVAL;
2118
goto exit_unlock;
2119
}
2120
2121
/* each function will store private data inside priv_data and adjust priv_offset */
2122
ret = criu_checkpoint_process(p, (uint8_t __user *)args->priv_data, &priv_offset);
2123
if (ret)
2124
goto exit_unlock;
2125
2126
ret = criu_checkpoint_devices(p, num_devices, (uint8_t __user *)args->devices,
2127
(uint8_t __user *)args->priv_data, &priv_offset);
2128
if (ret)
2129
goto exit_unlock;
2130
2131
/* Leave room for BOs in the private data. They need to be restored
2132
* before events, but we checkpoint them last to simplify the error
2133
* handling.
2134
*/
2135
bo_priv_offset = priv_offset;
2136
priv_offset += num_bos * sizeof(struct kfd_criu_bo_priv_data);
2137
2138
if (num_objects) {
2139
ret = kfd_criu_checkpoint_queues(p, (uint8_t __user *)args->priv_data,
2140
&priv_offset);
2141
if (ret)
2142
goto exit_unlock;
2143
2144
ret = kfd_criu_checkpoint_events(p, (uint8_t __user *)args->priv_data,
2145
&priv_offset);
2146
if (ret)
2147
goto exit_unlock;
2148
2149
ret = kfd_criu_checkpoint_svm(p, (uint8_t __user *)args->priv_data, &priv_offset);
2150
if (ret)
2151
goto exit_unlock;
2152
}
2153
2154
/* This must be the last thing in this function that can fail.
2155
* Otherwise we leak dmabuf file descriptors.
2156
*/
2157
ret = criu_checkpoint_bos(p, num_bos, (uint8_t __user *)args->bos,
2158
(uint8_t __user *)args->priv_data, &bo_priv_offset);
2159
2160
exit_unlock:
2161
mutex_unlock(&p->mutex);
2162
if (ret)
2163
pr_err("Failed to dump CRIU ret:%d\n", ret);
2164
else
2165
pr_debug("CRIU dump ret:%d\n", ret);
2166
2167
return ret;
2168
}
2169
2170
static int criu_restore_process(struct kfd_process *p,
2171
struct kfd_ioctl_criu_args *args,
2172
uint64_t *priv_offset,
2173
uint64_t max_priv_data_size)
2174
{
2175
int ret = 0;
2176
struct kfd_criu_process_priv_data process_priv;
2177
2178
if (*priv_offset + sizeof(process_priv) > max_priv_data_size)
2179
return -EINVAL;
2180
2181
ret = copy_from_user(&process_priv,
2182
(void __user *)(args->priv_data + *priv_offset),
2183
sizeof(process_priv));
2184
if (ret) {
2185
pr_err("Failed to copy process private information from user\n");
2186
ret = -EFAULT;
2187
goto exit;
2188
}
2189
*priv_offset += sizeof(process_priv);
2190
2191
if (process_priv.version != KFD_CRIU_PRIV_VERSION) {
2192
pr_err("Invalid CRIU API version (checkpointed:%d current:%d)\n",
2193
process_priv.version, KFD_CRIU_PRIV_VERSION);
2194
return -EINVAL;
2195
}
2196
2197
pr_debug("Setting XNACK mode\n");
2198
if (process_priv.xnack_mode && !kfd_process_xnack_mode(p, true)) {
2199
pr_err("xnack mode cannot be set\n");
2200
ret = -EPERM;
2201
goto exit;
2202
} else {
2203
pr_debug("set xnack mode: %d\n", process_priv.xnack_mode);
2204
p->xnack_enabled = process_priv.xnack_mode;
2205
}
2206
2207
exit:
2208
return ret;
2209
}
2210
2211
static int criu_restore_devices(struct kfd_process *p,
2212
struct kfd_ioctl_criu_args *args,
2213
uint64_t *priv_offset,
2214
uint64_t max_priv_data_size)
2215
{
2216
struct kfd_criu_device_bucket *device_buckets;
2217
struct kfd_criu_device_priv_data *device_privs;
2218
int ret = 0;
2219
uint32_t i;
2220
2221
if (args->num_devices != p->n_pdds)
2222
return -EINVAL;
2223
2224
if (*priv_offset + (args->num_devices * sizeof(*device_privs)) > max_priv_data_size)
2225
return -EINVAL;
2226
2227
device_buckets = kmalloc_array(args->num_devices, sizeof(*device_buckets), GFP_KERNEL);
2228
if (!device_buckets)
2229
return -ENOMEM;
2230
2231
ret = copy_from_user(device_buckets, (void __user *)args->devices,
2232
args->num_devices * sizeof(*device_buckets));
2233
if (ret) {
2234
pr_err("Failed to copy devices buckets from user\n");
2235
ret = -EFAULT;
2236
goto exit;
2237
}
2238
2239
for (i = 0; i < args->num_devices; i++) {
2240
struct kfd_node *dev;
2241
struct kfd_process_device *pdd;
2242
struct file *drm_file;
2243
2244
/* device private data is not currently used */
2245
2246
if (!device_buckets[i].user_gpu_id) {
2247
pr_err("Invalid user gpu_id\n");
2248
ret = -EINVAL;
2249
goto exit;
2250
}
2251
2252
dev = kfd_device_by_id(device_buckets[i].actual_gpu_id);
2253
if (!dev) {
2254
pr_err("Failed to find device with gpu_id = %x\n",
2255
device_buckets[i].actual_gpu_id);
2256
ret = -EINVAL;
2257
goto exit;
2258
}
2259
2260
pdd = kfd_get_process_device_data(dev, p);
2261
if (!pdd) {
2262
pr_err("Failed to get pdd for gpu_id = %x\n",
2263
device_buckets[i].actual_gpu_id);
2264
ret = -EINVAL;
2265
goto exit;
2266
}
2267
pdd->user_gpu_id = device_buckets[i].user_gpu_id;
2268
2269
drm_file = fget(device_buckets[i].drm_fd);
2270
if (!drm_file) {
2271
pr_err("Invalid render node file descriptor sent from plugin (%d)\n",
2272
device_buckets[i].drm_fd);
2273
ret = -EINVAL;
2274
goto exit;
2275
}
2276
2277
if (pdd->drm_file) {
2278
ret = -EINVAL;
2279
goto exit;
2280
}
2281
2282
/* create the vm using render nodes for kfd pdd */
2283
if (kfd_process_device_init_vm(pdd, drm_file)) {
2284
pr_err("could not init vm for given pdd\n");
2285
/* On success, the PDD keeps the drm_file reference */
2286
fput(drm_file);
2287
ret = -EINVAL;
2288
goto exit;
2289
}
2290
/*
2291
* pdd now already has the vm bound to render node so below api won't create a new
2292
* exclusive kfd mapping but use existing one with renderDXXX but is still needed
2293
* for iommu v2 binding and runtime pm.
2294
*/
2295
pdd = kfd_bind_process_to_device(dev, p);
2296
if (IS_ERR(pdd)) {
2297
ret = PTR_ERR(pdd);
2298
goto exit;
2299
}
2300
2301
if (!pdd->qpd.proc_doorbells) {
2302
ret = kfd_alloc_process_doorbells(dev->kfd, pdd);
2303
if (ret)
2304
goto exit;
2305
}
2306
}
2307
2308
/*
2309
* We are not copying device private data from user as we are not using the data for now,
2310
* but we still adjust for its private data.
2311
*/
2312
*priv_offset += args->num_devices * sizeof(*device_privs);
2313
2314
exit:
2315
kfree(device_buckets);
2316
return ret;
2317
}
2318
2319
static int criu_restore_memory_of_gpu(struct kfd_process_device *pdd,
2320
struct kfd_criu_bo_bucket *bo_bucket,
2321
struct kfd_criu_bo_priv_data *bo_priv,
2322
struct kgd_mem **kgd_mem)
2323
{
2324
int idr_handle;
2325
int ret;
2326
const bool criu_resume = true;
2327
u64 offset;
2328
2329
if (bo_bucket->alloc_flags & KFD_IOC_ALLOC_MEM_FLAGS_DOORBELL) {
2330
if (bo_bucket->size !=
2331
kfd_doorbell_process_slice(pdd->dev->kfd))
2332
return -EINVAL;
2333
2334
offset = kfd_get_process_doorbells(pdd);
2335
if (!offset)
2336
return -ENOMEM;
2337
} else if (bo_bucket->alloc_flags & KFD_IOC_ALLOC_MEM_FLAGS_MMIO_REMAP) {
2338
/* MMIO BOs need remapped bus address */
2339
if (bo_bucket->size != PAGE_SIZE) {
2340
pr_err("Invalid page size\n");
2341
return -EINVAL;
2342
}
2343
offset = pdd->dev->adev->rmmio_remap.bus_addr;
2344
if (!offset || (PAGE_SIZE > 4096)) {
2345
pr_err("amdgpu_amdkfd_get_mmio_remap_phys_addr failed\n");
2346
return -ENOMEM;
2347
}
2348
} else if (bo_bucket->alloc_flags & KFD_IOC_ALLOC_MEM_FLAGS_USERPTR) {
2349
offset = bo_priv->user_addr;
2350
}
2351
/* Create the BO */
2352
ret = amdgpu_amdkfd_gpuvm_alloc_memory_of_gpu(pdd->dev->adev, bo_bucket->addr,
2353
bo_bucket->size, pdd->drm_priv, kgd_mem,
2354
&offset, bo_bucket->alloc_flags, criu_resume);
2355
if (ret) {
2356
pr_err("Could not create the BO\n");
2357
return ret;
2358
}
2359
pr_debug("New BO created: size:0x%llx addr:0x%llx offset:0x%llx\n",
2360
bo_bucket->size, bo_bucket->addr, offset);
2361
2362
/* Restore previous IDR handle */
2363
pr_debug("Restoring old IDR handle for the BO");
2364
idr_handle = idr_alloc(&pdd->alloc_idr, *kgd_mem, bo_priv->idr_handle,
2365
bo_priv->idr_handle + 1, GFP_KERNEL);
2366
2367
if (idr_handle < 0) {
2368
pr_err("Could not allocate idr\n");
2369
amdgpu_amdkfd_gpuvm_free_memory_of_gpu(pdd->dev->adev, *kgd_mem, pdd->drm_priv,
2370
NULL);
2371
return -ENOMEM;
2372
}
2373
2374
if (bo_bucket->alloc_flags & KFD_IOC_ALLOC_MEM_FLAGS_DOORBELL)
2375
bo_bucket->restored_offset = KFD_MMAP_TYPE_DOORBELL | KFD_MMAP_GPU_ID(pdd->dev->id);
2376
if (bo_bucket->alloc_flags & KFD_IOC_ALLOC_MEM_FLAGS_MMIO_REMAP) {
2377
bo_bucket->restored_offset = KFD_MMAP_TYPE_MMIO | KFD_MMAP_GPU_ID(pdd->dev->id);
2378
} else if (bo_bucket->alloc_flags & KFD_IOC_ALLOC_MEM_FLAGS_GTT) {
2379
bo_bucket->restored_offset = offset;
2380
} else if (bo_bucket->alloc_flags & KFD_IOC_ALLOC_MEM_FLAGS_VRAM) {
2381
bo_bucket->restored_offset = offset;
2382
/* Update the VRAM usage count */
2383
atomic64_add(bo_bucket->size, &pdd->vram_usage);
2384
}
2385
return 0;
2386
}
2387
2388
static int criu_restore_bo(struct kfd_process *p,
2389
struct kfd_criu_bo_bucket *bo_bucket,
2390
struct kfd_criu_bo_priv_data *bo_priv,
2391
struct file **file)
2392
{
2393
struct kfd_process_device *pdd;
2394
struct kgd_mem *kgd_mem;
2395
int ret;
2396
int j;
2397
2398
pr_debug("Restoring BO size:0x%llx addr:0x%llx gpu_id:0x%x flags:0x%x idr_handle:0x%x\n",
2399
bo_bucket->size, bo_bucket->addr, bo_bucket->gpu_id, bo_bucket->alloc_flags,
2400
bo_priv->idr_handle);
2401
2402
pdd = kfd_process_device_data_by_id(p, bo_bucket->gpu_id);
2403
if (!pdd) {
2404
pr_err("Failed to get pdd\n");
2405
return -ENODEV;
2406
}
2407
2408
ret = criu_restore_memory_of_gpu(pdd, bo_bucket, bo_priv, &kgd_mem);
2409
if (ret)
2410
return ret;
2411
2412
/* now map these BOs to GPU/s */
2413
for (j = 0; j < p->n_pdds; j++) {
2414
struct kfd_node *peer;
2415
struct kfd_process_device *peer_pdd;
2416
2417
if (!bo_priv->mapped_gpuids[j])
2418
break;
2419
2420
peer_pdd = kfd_process_device_data_by_id(p, bo_priv->mapped_gpuids[j]);
2421
if (!peer_pdd)
2422
return -EINVAL;
2423
2424
peer = peer_pdd->dev;
2425
2426
peer_pdd = kfd_bind_process_to_device(peer, p);
2427
if (IS_ERR(peer_pdd))
2428
return PTR_ERR(peer_pdd);
2429
2430
ret = amdgpu_amdkfd_gpuvm_map_memory_to_gpu(peer->adev, kgd_mem,
2431
peer_pdd->drm_priv);
2432
if (ret) {
2433
pr_err("Failed to map to gpu %d/%d\n", j, p->n_pdds);
2434
return ret;
2435
}
2436
}
2437
2438
pr_debug("map memory was successful for the BO\n");
2439
/* create the dmabuf object and export the bo */
2440
if (bo_bucket->alloc_flags
2441
& (KFD_IOC_ALLOC_MEM_FLAGS_VRAM | KFD_IOC_ALLOC_MEM_FLAGS_GTT)) {
2442
ret = criu_get_prime_handle(kgd_mem, DRM_RDWR,
2443
&bo_bucket->dmabuf_fd, file);
2444
if (ret)
2445
return ret;
2446
} else {
2447
bo_bucket->dmabuf_fd = KFD_INVALID_FD;
2448
}
2449
2450
return 0;
2451
}
2452
2453
static int criu_restore_bos(struct kfd_process *p,
2454
struct kfd_ioctl_criu_args *args,
2455
uint64_t *priv_offset,
2456
uint64_t max_priv_data_size)
2457
{
2458
struct kfd_criu_bo_bucket *bo_buckets = NULL;
2459
struct kfd_criu_bo_priv_data *bo_privs = NULL;
2460
struct file **files = NULL;
2461
int ret = 0;
2462
uint32_t i = 0;
2463
2464
if (*priv_offset + (args->num_bos * sizeof(*bo_privs)) > max_priv_data_size)
2465
return -EINVAL;
2466
2467
/* Prevent MMU notifications until stage-4 IOCTL (CRIU_RESUME) is received */
2468
amdgpu_amdkfd_block_mmu_notifications(p->kgd_process_info);
2469
2470
bo_buckets = kvmalloc_array(args->num_bos, sizeof(*bo_buckets), GFP_KERNEL);
2471
if (!bo_buckets)
2472
return -ENOMEM;
2473
2474
files = kvzalloc(args->num_bos * sizeof(struct file *), GFP_KERNEL);
2475
if (!files) {
2476
ret = -ENOMEM;
2477
goto exit;
2478
}
2479
2480
ret = copy_from_user(bo_buckets, (void __user *)args->bos,
2481
args->num_bos * sizeof(*bo_buckets));
2482
if (ret) {
2483
pr_err("Failed to copy BOs information from user\n");
2484
ret = -EFAULT;
2485
goto exit;
2486
}
2487
2488
bo_privs = kvmalloc_array(args->num_bos, sizeof(*bo_privs), GFP_KERNEL);
2489
if (!bo_privs) {
2490
ret = -ENOMEM;
2491
goto exit;
2492
}
2493
2494
ret = copy_from_user(bo_privs, (void __user *)args->priv_data + *priv_offset,
2495
args->num_bos * sizeof(*bo_privs));
2496
if (ret) {
2497
pr_err("Failed to copy BOs information from user\n");
2498
ret = -EFAULT;
2499
goto exit;
2500
}
2501
*priv_offset += args->num_bos * sizeof(*bo_privs);
2502
2503
/* Create and map new BOs */
2504
for (; i < args->num_bos; i++) {
2505
ret = criu_restore_bo(p, &bo_buckets[i], &bo_privs[i], &files[i]);
2506
if (ret) {
2507
pr_debug("Failed to restore BO[%d] ret%d\n", i, ret);
2508
goto exit;
2509
}
2510
} /* done */
2511
2512
/* Copy only the buckets back so user can read bo_buckets[N].restored_offset */
2513
ret = copy_to_user((void __user *)args->bos,
2514
bo_buckets,
2515
(args->num_bos * sizeof(*bo_buckets)));
2516
if (ret)
2517
ret = -EFAULT;
2518
2519
exit:
2520
commit_files(files, bo_buckets, i, ret);
2521
kvfree(files);
2522
kvfree(bo_buckets);
2523
kvfree(bo_privs);
2524
return ret;
2525
}
2526
2527
static int criu_restore_objects(struct file *filep,
2528
struct kfd_process *p,
2529
struct kfd_ioctl_criu_args *args,
2530
uint64_t *priv_offset,
2531
uint64_t max_priv_data_size)
2532
{
2533
int ret = 0;
2534
uint32_t i;
2535
2536
BUILD_BUG_ON(offsetof(struct kfd_criu_queue_priv_data, object_type));
2537
BUILD_BUG_ON(offsetof(struct kfd_criu_event_priv_data, object_type));
2538
BUILD_BUG_ON(offsetof(struct kfd_criu_svm_range_priv_data, object_type));
2539
2540
for (i = 0; i < args->num_objects; i++) {
2541
uint32_t object_type;
2542
2543
if (*priv_offset + sizeof(object_type) > max_priv_data_size) {
2544
pr_err("Invalid private data size\n");
2545
return -EINVAL;
2546
}
2547
2548
ret = get_user(object_type, (uint32_t __user *)(args->priv_data + *priv_offset));
2549
if (ret) {
2550
pr_err("Failed to copy private information from user\n");
2551
goto exit;
2552
}
2553
2554
switch (object_type) {
2555
case KFD_CRIU_OBJECT_TYPE_QUEUE:
2556
ret = kfd_criu_restore_queue(p, (uint8_t __user *)args->priv_data,
2557
priv_offset, max_priv_data_size);
2558
if (ret)
2559
goto exit;
2560
break;
2561
case KFD_CRIU_OBJECT_TYPE_EVENT:
2562
ret = kfd_criu_restore_event(filep, p, (uint8_t __user *)args->priv_data,
2563
priv_offset, max_priv_data_size);
2564
if (ret)
2565
goto exit;
2566
break;
2567
case KFD_CRIU_OBJECT_TYPE_SVM_RANGE:
2568
ret = kfd_criu_restore_svm(p, (uint8_t __user *)args->priv_data,
2569
priv_offset, max_priv_data_size);
2570
if (ret)
2571
goto exit;
2572
break;
2573
default:
2574
pr_err("Invalid object type:%u at index:%d\n", object_type, i);
2575
ret = -EINVAL;
2576
goto exit;
2577
}
2578
}
2579
exit:
2580
return ret;
2581
}
2582
2583
static int criu_restore(struct file *filep,
2584
struct kfd_process *p,
2585
struct kfd_ioctl_criu_args *args)
2586
{
2587
uint64_t priv_offset = 0;
2588
int ret = 0;
2589
2590
pr_debug("CRIU restore (num_devices:%u num_bos:%u num_objects:%u priv_data_size:%llu)\n",
2591
args->num_devices, args->num_bos, args->num_objects, args->priv_data_size);
2592
2593
if ((args->num_bos > 0 && !args->bos) || !args->devices || !args->priv_data ||
2594
!args->priv_data_size || !args->num_devices)
2595
return -EINVAL;
2596
2597
mutex_lock(&p->mutex);
2598
2599
/*
2600
* Set the process to evicted state to avoid running any new queues before all the memory
2601
* mappings are ready.
2602
*/
2603
ret = kfd_process_evict_queues(p, KFD_QUEUE_EVICTION_CRIU_RESTORE);
2604
if (ret)
2605
goto exit_unlock;
2606
2607
/* Each function will adjust priv_offset based on how many bytes they consumed */
2608
ret = criu_restore_process(p, args, &priv_offset, args->priv_data_size);
2609
if (ret)
2610
goto exit_unlock;
2611
2612
ret = criu_restore_devices(p, args, &priv_offset, args->priv_data_size);
2613
if (ret)
2614
goto exit_unlock;
2615
2616
ret = criu_restore_bos(p, args, &priv_offset, args->priv_data_size);
2617
if (ret)
2618
goto exit_unlock;
2619
2620
ret = criu_restore_objects(filep, p, args, &priv_offset, args->priv_data_size);
2621
if (ret)
2622
goto exit_unlock;
2623
2624
if (priv_offset != args->priv_data_size) {
2625
pr_err("Invalid private data size\n");
2626
ret = -EINVAL;
2627
}
2628
2629
exit_unlock:
2630
mutex_unlock(&p->mutex);
2631
if (ret)
2632
pr_err("Failed to restore CRIU ret:%d\n", ret);
2633
else
2634
pr_debug("CRIU restore successful\n");
2635
2636
return ret;
2637
}
2638
2639
static int criu_unpause(struct file *filep,
2640
struct kfd_process *p,
2641
struct kfd_ioctl_criu_args *args)
2642
{
2643
int ret;
2644
2645
mutex_lock(&p->mutex);
2646
2647
if (!p->queues_paused) {
2648
mutex_unlock(&p->mutex);
2649
return -EINVAL;
2650
}
2651
2652
ret = kfd_process_restore_queues(p);
2653
if (ret)
2654
pr_err("Failed to unpause queues ret:%d\n", ret);
2655
else
2656
p->queues_paused = false;
2657
2658
mutex_unlock(&p->mutex);
2659
2660
return ret;
2661
}
2662
2663
static int criu_resume(struct file *filep,
2664
struct kfd_process *p,
2665
struct kfd_ioctl_criu_args *args)
2666
{
2667
struct kfd_process *target = NULL;
2668
struct pid *pid = NULL;
2669
int ret = 0;
2670
2671
pr_debug("Inside %s, target pid for criu restore: %d\n", __func__,
2672
args->pid);
2673
2674
pid = find_get_pid(args->pid);
2675
if (!pid) {
2676
pr_err("Cannot find pid info for %i\n", args->pid);
2677
return -ESRCH;
2678
}
2679
2680
pr_debug("calling kfd_lookup_process_by_pid\n");
2681
target = kfd_lookup_process_by_pid(pid);
2682
2683
put_pid(pid);
2684
2685
if (!target) {
2686
pr_debug("Cannot find process info for %i\n", args->pid);
2687
return -ESRCH;
2688
}
2689
2690
mutex_lock(&target->mutex);
2691
ret = kfd_criu_resume_svm(target);
2692
if (ret) {
2693
pr_err("kfd_criu_resume_svm failed for %i\n", args->pid);
2694
goto exit;
2695
}
2696
2697
ret = amdgpu_amdkfd_criu_resume(target->kgd_process_info);
2698
if (ret)
2699
pr_err("amdgpu_amdkfd_criu_resume failed for %i\n", args->pid);
2700
2701
exit:
2702
mutex_unlock(&target->mutex);
2703
2704
kfd_unref_process(target);
2705
return ret;
2706
}
2707
2708
static int criu_process_info(struct file *filep,
2709
struct kfd_process *p,
2710
struct kfd_ioctl_criu_args *args)
2711
{
2712
int ret = 0;
2713
2714
mutex_lock(&p->mutex);
2715
2716
if (!p->n_pdds) {
2717
pr_err("No pdd for given process\n");
2718
ret = -ENODEV;
2719
goto err_unlock;
2720
}
2721
2722
ret = kfd_process_evict_queues(p, KFD_QUEUE_EVICTION_CRIU_CHECKPOINT);
2723
if (ret)
2724
goto err_unlock;
2725
2726
p->queues_paused = true;
2727
2728
args->pid = task_pid_nr_ns(p->lead_thread,
2729
task_active_pid_ns(p->lead_thread));
2730
2731
ret = criu_get_process_object_info(p, &args->num_devices, &args->num_bos,
2732
&args->num_objects, &args->priv_data_size);
2733
if (ret)
2734
goto err_unlock;
2735
2736
dev_dbg(kfd_device, "Num of devices:%u bos:%u objects:%u priv_data_size:%lld\n",
2737
args->num_devices, args->num_bos, args->num_objects,
2738
args->priv_data_size);
2739
2740
err_unlock:
2741
if (ret) {
2742
kfd_process_restore_queues(p);
2743
p->queues_paused = false;
2744
}
2745
mutex_unlock(&p->mutex);
2746
return ret;
2747
}
2748
2749
static int kfd_ioctl_criu(struct file *filep, struct kfd_process *p, void *data)
2750
{
2751
struct kfd_ioctl_criu_args *args = data;
2752
int ret;
2753
2754
dev_dbg(kfd_device, "CRIU operation: %d\n", args->op);
2755
switch (args->op) {
2756
case KFD_CRIU_OP_PROCESS_INFO:
2757
ret = criu_process_info(filep, p, args);
2758
break;
2759
case KFD_CRIU_OP_CHECKPOINT:
2760
ret = criu_checkpoint(filep, p, args);
2761
break;
2762
case KFD_CRIU_OP_UNPAUSE:
2763
ret = criu_unpause(filep, p, args);
2764
break;
2765
case KFD_CRIU_OP_RESTORE:
2766
ret = criu_restore(filep, p, args);
2767
break;
2768
case KFD_CRIU_OP_RESUME:
2769
ret = criu_resume(filep, p, args);
2770
break;
2771
default:
2772
dev_dbg(kfd_device, "Unsupported CRIU operation:%d\n", args->op);
2773
ret = -EINVAL;
2774
break;
2775
}
2776
2777
if (ret)
2778
dev_dbg(kfd_device, "CRIU operation:%d err:%d\n", args->op, ret);
2779
2780
return ret;
2781
}
2782
2783
static int runtime_enable(struct kfd_process *p, uint64_t r_debug,
2784
bool enable_ttmp_setup)
2785
{
2786
int i = 0, ret = 0;
2787
2788
if (p->is_runtime_retry)
2789
goto retry;
2790
2791
if (p->runtime_info.runtime_state != DEBUG_RUNTIME_STATE_DISABLED)
2792
return -EBUSY;
2793
2794
for (i = 0; i < p->n_pdds; i++) {
2795
struct kfd_process_device *pdd = p->pdds[i];
2796
2797
if (pdd->qpd.queue_count)
2798
return -EEXIST;
2799
2800
/*
2801
* Setup TTMPs by default.
2802
* Note that this call must remain here for MES ADD QUEUE to
2803
* skip_process_ctx_clear unconditionally as the first call to
2804
* SET_SHADER_DEBUGGER clears any stale process context data
2805
* saved in MES.
2806
*/
2807
if (pdd->dev->kfd->shared_resources.enable_mes)
2808
kfd_dbg_set_mes_debug_mode(pdd, !kfd_dbg_has_cwsr_workaround(pdd->dev));
2809
}
2810
2811
p->runtime_info.runtime_state = DEBUG_RUNTIME_STATE_ENABLED;
2812
p->runtime_info.r_debug = r_debug;
2813
p->runtime_info.ttmp_setup = enable_ttmp_setup;
2814
2815
if (p->runtime_info.ttmp_setup) {
2816
for (i = 0; i < p->n_pdds; i++) {
2817
struct kfd_process_device *pdd = p->pdds[i];
2818
2819
if (!kfd_dbg_is_rlc_restore_supported(pdd->dev)) {
2820
amdgpu_gfx_off_ctrl(pdd->dev->adev, false);
2821
pdd->dev->kfd2kgd->enable_debug_trap(
2822
pdd->dev->adev,
2823
true,
2824
pdd->dev->vm_info.last_vmid_kfd);
2825
} else if (kfd_dbg_is_per_vmid_supported(pdd->dev)) {
2826
pdd->spi_dbg_override = pdd->dev->kfd2kgd->enable_debug_trap(
2827
pdd->dev->adev,
2828
false,
2829
0);
2830
}
2831
}
2832
}
2833
2834
retry:
2835
if (p->debug_trap_enabled) {
2836
if (!p->is_runtime_retry) {
2837
kfd_dbg_trap_activate(p);
2838
kfd_dbg_ev_raise(KFD_EC_MASK(EC_PROCESS_RUNTIME),
2839
p, NULL, 0, false, NULL, 0);
2840
}
2841
2842
mutex_unlock(&p->mutex);
2843
ret = down_interruptible(&p->runtime_enable_sema);
2844
mutex_lock(&p->mutex);
2845
2846
p->is_runtime_retry = !!ret;
2847
}
2848
2849
return ret;
2850
}
2851
2852
static int runtime_disable(struct kfd_process *p)
2853
{
2854
int i = 0, ret = 0;
2855
bool was_enabled = p->runtime_info.runtime_state == DEBUG_RUNTIME_STATE_ENABLED;
2856
2857
p->runtime_info.runtime_state = DEBUG_RUNTIME_STATE_DISABLED;
2858
p->runtime_info.r_debug = 0;
2859
2860
if (p->debug_trap_enabled) {
2861
if (was_enabled)
2862
kfd_dbg_trap_deactivate(p, false, 0);
2863
2864
if (!p->is_runtime_retry)
2865
kfd_dbg_ev_raise(KFD_EC_MASK(EC_PROCESS_RUNTIME),
2866
p, NULL, 0, false, NULL, 0);
2867
2868
mutex_unlock(&p->mutex);
2869
ret = down_interruptible(&p->runtime_enable_sema);
2870
mutex_lock(&p->mutex);
2871
2872
p->is_runtime_retry = !!ret;
2873
if (ret)
2874
return ret;
2875
}
2876
2877
if (was_enabled && p->runtime_info.ttmp_setup) {
2878
for (i = 0; i < p->n_pdds; i++) {
2879
struct kfd_process_device *pdd = p->pdds[i];
2880
2881
if (!kfd_dbg_is_rlc_restore_supported(pdd->dev))
2882
amdgpu_gfx_off_ctrl(pdd->dev->adev, true);
2883
}
2884
}
2885
2886
p->runtime_info.ttmp_setup = false;
2887
2888
/* disable ttmp setup */
2889
for (i = 0; i < p->n_pdds; i++) {
2890
struct kfd_process_device *pdd = p->pdds[i];
2891
int last_err = 0;
2892
2893
if (kfd_dbg_is_per_vmid_supported(pdd->dev)) {
2894
pdd->spi_dbg_override =
2895
pdd->dev->kfd2kgd->disable_debug_trap(
2896
pdd->dev->adev,
2897
false,
2898
pdd->dev->vm_info.last_vmid_kfd);
2899
2900
if (!pdd->dev->kfd->shared_resources.enable_mes)
2901
last_err = debug_refresh_runlist(pdd->dev->dqm);
2902
else
2903
last_err = kfd_dbg_set_mes_debug_mode(pdd,
2904
!kfd_dbg_has_cwsr_workaround(pdd->dev));
2905
2906
if (last_err)
2907
ret = last_err;
2908
}
2909
}
2910
2911
return ret;
2912
}
2913
2914
static int kfd_ioctl_runtime_enable(struct file *filep, struct kfd_process *p, void *data)
2915
{
2916
struct kfd_ioctl_runtime_enable_args *args = data;
2917
int r;
2918
2919
mutex_lock(&p->mutex);
2920
2921
if (args->mode_mask & KFD_RUNTIME_ENABLE_MODE_ENABLE_MASK)
2922
r = runtime_enable(p, args->r_debug,
2923
!!(args->mode_mask & KFD_RUNTIME_ENABLE_MODE_TTMP_SAVE_MASK));
2924
else
2925
r = runtime_disable(p);
2926
2927
mutex_unlock(&p->mutex);
2928
2929
return r;
2930
}
2931
2932
static int kfd_ioctl_set_debug_trap(struct file *filep, struct kfd_process *p, void *data)
2933
{
2934
struct kfd_ioctl_dbg_trap_args *args = data;
2935
struct task_struct *thread = NULL;
2936
struct mm_struct *mm = NULL;
2937
struct pid *pid = NULL;
2938
struct kfd_process *target = NULL;
2939
struct kfd_process_device *pdd = NULL;
2940
int r = 0;
2941
2942
if (p->context_id != KFD_CONTEXT_ID_PRIMARY) {
2943
pr_debug("Set debug trap ioctl can not be invoked on non-primary kfd process\n");
2944
2945
return -EOPNOTSUPP;
2946
}
2947
2948
if (sched_policy == KFD_SCHED_POLICY_NO_HWS) {
2949
pr_err("Debugging does not support sched_policy %i", sched_policy);
2950
return -EINVAL;
2951
}
2952
2953
pid = find_get_pid(args->pid);
2954
if (!pid) {
2955
pr_debug("Cannot find pid info for %i\n", args->pid);
2956
r = -ESRCH;
2957
goto out;
2958
}
2959
2960
thread = get_pid_task(pid, PIDTYPE_PID);
2961
if (!thread) {
2962
r = -ESRCH;
2963
goto out;
2964
}
2965
2966
mm = get_task_mm(thread);
2967
if (!mm) {
2968
r = -ESRCH;
2969
goto out;
2970
}
2971
2972
if (args->op == KFD_IOC_DBG_TRAP_ENABLE) {
2973
bool create_process;
2974
2975
rcu_read_lock();
2976
create_process = thread && thread != current && ptrace_parent(thread) == current;
2977
rcu_read_unlock();
2978
2979
target = create_process ? kfd_create_process(thread) :
2980
kfd_lookup_process_by_pid(pid);
2981
} else {
2982
target = kfd_lookup_process_by_pid(pid);
2983
}
2984
2985
if (IS_ERR_OR_NULL(target)) {
2986
pr_debug("Cannot find process PID %i to debug\n", args->pid);
2987
r = target ? PTR_ERR(target) : -ESRCH;
2988
target = NULL;
2989
goto out;
2990
}
2991
2992
if (target->context_id != KFD_CONTEXT_ID_PRIMARY) {
2993
pr_debug("Set debug trap ioctl not supported on non-primary kfd process\n");
2994
r = -EOPNOTSUPP;
2995
goto out;
2996
}
2997
2998
/* Check if target is still PTRACED. */
2999
rcu_read_lock();
3000
if (target != p && args->op != KFD_IOC_DBG_TRAP_DISABLE
3001
&& ptrace_parent(target->lead_thread) != current) {
3002
pr_err("PID %i is not PTRACED and cannot be debugged\n", args->pid);
3003
r = -EPERM;
3004
}
3005
rcu_read_unlock();
3006
3007
if (r)
3008
goto out;
3009
3010
mutex_lock(&target->mutex);
3011
3012
if (args->op != KFD_IOC_DBG_TRAP_ENABLE && !target->debug_trap_enabled) {
3013
pr_err("PID %i not debug enabled for op %i\n", args->pid, args->op);
3014
r = -EINVAL;
3015
goto unlock_out;
3016
}
3017
3018
if (target->runtime_info.runtime_state != DEBUG_RUNTIME_STATE_ENABLED &&
3019
(args->op == KFD_IOC_DBG_TRAP_SET_WAVE_LAUNCH_OVERRIDE ||
3020
args->op == KFD_IOC_DBG_TRAP_SET_WAVE_LAUNCH_MODE ||
3021
args->op == KFD_IOC_DBG_TRAP_SUSPEND_QUEUES ||
3022
args->op == KFD_IOC_DBG_TRAP_RESUME_QUEUES ||
3023
args->op == KFD_IOC_DBG_TRAP_SET_NODE_ADDRESS_WATCH ||
3024
args->op == KFD_IOC_DBG_TRAP_CLEAR_NODE_ADDRESS_WATCH ||
3025
args->op == KFD_IOC_DBG_TRAP_SET_FLAGS)) {
3026
r = -EPERM;
3027
goto unlock_out;
3028
}
3029
3030
if (args->op == KFD_IOC_DBG_TRAP_SET_NODE_ADDRESS_WATCH ||
3031
args->op == KFD_IOC_DBG_TRAP_CLEAR_NODE_ADDRESS_WATCH) {
3032
int user_gpu_id = kfd_process_get_user_gpu_id(target,
3033
args->op == KFD_IOC_DBG_TRAP_SET_NODE_ADDRESS_WATCH ?
3034
args->set_node_address_watch.gpu_id :
3035
args->clear_node_address_watch.gpu_id);
3036
3037
pdd = kfd_process_device_data_by_id(target, user_gpu_id);
3038
if (user_gpu_id == -EINVAL || !pdd) {
3039
r = -ENODEV;
3040
goto unlock_out;
3041
}
3042
}
3043
3044
switch (args->op) {
3045
case KFD_IOC_DBG_TRAP_ENABLE:
3046
if (target != p)
3047
target->debugger_process = p;
3048
3049
r = kfd_dbg_trap_enable(target,
3050
args->enable.dbg_fd,
3051
(void __user *)args->enable.rinfo_ptr,
3052
&args->enable.rinfo_size);
3053
if (!r)
3054
target->exception_enable_mask = args->enable.exception_mask;
3055
3056
break;
3057
case KFD_IOC_DBG_TRAP_DISABLE:
3058
r = kfd_dbg_trap_disable(target);
3059
break;
3060
case KFD_IOC_DBG_TRAP_SEND_RUNTIME_EVENT:
3061
r = kfd_dbg_send_exception_to_runtime(target,
3062
args->send_runtime_event.gpu_id,
3063
args->send_runtime_event.queue_id,
3064
args->send_runtime_event.exception_mask);
3065
break;
3066
case KFD_IOC_DBG_TRAP_SET_EXCEPTIONS_ENABLED:
3067
kfd_dbg_set_enabled_debug_exception_mask(target,
3068
args->set_exceptions_enabled.exception_mask);
3069
break;
3070
case KFD_IOC_DBG_TRAP_SET_WAVE_LAUNCH_OVERRIDE:
3071
r = kfd_dbg_trap_set_wave_launch_override(target,
3072
args->launch_override.override_mode,
3073
args->launch_override.enable_mask,
3074
args->launch_override.support_request_mask,
3075
&args->launch_override.enable_mask,
3076
&args->launch_override.support_request_mask);
3077
break;
3078
case KFD_IOC_DBG_TRAP_SET_WAVE_LAUNCH_MODE:
3079
r = kfd_dbg_trap_set_wave_launch_mode(target,
3080
args->launch_mode.launch_mode);
3081
break;
3082
case KFD_IOC_DBG_TRAP_SUSPEND_QUEUES:
3083
r = suspend_queues(target,
3084
args->suspend_queues.num_queues,
3085
args->suspend_queues.grace_period,
3086
args->suspend_queues.exception_mask,
3087
(uint32_t *)args->suspend_queues.queue_array_ptr);
3088
3089
break;
3090
case KFD_IOC_DBG_TRAP_RESUME_QUEUES:
3091
r = resume_queues(target, args->resume_queues.num_queues,
3092
(uint32_t *)args->resume_queues.queue_array_ptr);
3093
break;
3094
case KFD_IOC_DBG_TRAP_SET_NODE_ADDRESS_WATCH:
3095
r = kfd_dbg_trap_set_dev_address_watch(pdd,
3096
args->set_node_address_watch.address,
3097
args->set_node_address_watch.mask,
3098
&args->set_node_address_watch.id,
3099
args->set_node_address_watch.mode);
3100
break;
3101
case KFD_IOC_DBG_TRAP_CLEAR_NODE_ADDRESS_WATCH:
3102
r = kfd_dbg_trap_clear_dev_address_watch(pdd,
3103
args->clear_node_address_watch.id);
3104
break;
3105
case KFD_IOC_DBG_TRAP_SET_FLAGS:
3106
r = kfd_dbg_trap_set_flags(target, &args->set_flags.flags);
3107
break;
3108
case KFD_IOC_DBG_TRAP_QUERY_DEBUG_EVENT:
3109
r = kfd_dbg_ev_query_debug_event(target,
3110
&args->query_debug_event.queue_id,
3111
&args->query_debug_event.gpu_id,
3112
args->query_debug_event.exception_mask,
3113
&args->query_debug_event.exception_mask);
3114
break;
3115
case KFD_IOC_DBG_TRAP_QUERY_EXCEPTION_INFO:
3116
r = kfd_dbg_trap_query_exception_info(target,
3117
args->query_exception_info.source_id,
3118
args->query_exception_info.exception_code,
3119
args->query_exception_info.clear_exception,
3120
(void __user *)args->query_exception_info.info_ptr,
3121
&args->query_exception_info.info_size);
3122
break;
3123
case KFD_IOC_DBG_TRAP_GET_QUEUE_SNAPSHOT:
3124
r = pqm_get_queue_snapshot(&target->pqm,
3125
args->queue_snapshot.exception_mask,
3126
(void __user *)args->queue_snapshot.snapshot_buf_ptr,
3127
&args->queue_snapshot.num_queues,
3128
&args->queue_snapshot.entry_size);
3129
break;
3130
case KFD_IOC_DBG_TRAP_GET_DEVICE_SNAPSHOT:
3131
r = kfd_dbg_trap_device_snapshot(target,
3132
args->device_snapshot.exception_mask,
3133
(void __user *)args->device_snapshot.snapshot_buf_ptr,
3134
&args->device_snapshot.num_devices,
3135
&args->device_snapshot.entry_size);
3136
break;
3137
default:
3138
pr_err("Invalid option: %i\n", args->op);
3139
r = -EINVAL;
3140
}
3141
3142
unlock_out:
3143
mutex_unlock(&target->mutex);
3144
3145
out:
3146
if (thread)
3147
put_task_struct(thread);
3148
3149
if (mm)
3150
mmput(mm);
3151
3152
if (pid)
3153
put_pid(pid);
3154
3155
if (target)
3156
kfd_unref_process(target);
3157
3158
return r;
3159
}
3160
3161
/* userspace programs need to invoke this ioctl explicitly on a FD to
3162
* create a secondary kfd_process which replacing its primary kfd_process
3163
*/
3164
static int kfd_ioctl_create_process(struct file *filep, struct kfd_process *p, void *data)
3165
{
3166
struct kfd_process *process;
3167
int ret;
3168
3169
/* Each FD owns only one kfd_process */
3170
if (p->context_id != KFD_CONTEXT_ID_PRIMARY)
3171
return -EINVAL;
3172
3173
if (!filep->private_data || !p)
3174
return -EINVAL;
3175
3176
mutex_lock(&kfd_processes_mutex);
3177
if (p != filep->private_data) {
3178
mutex_unlock(&kfd_processes_mutex);
3179
return -EINVAL;
3180
}
3181
3182
process = create_process(current, false);
3183
if (IS_ERR(process)) {
3184
mutex_unlock(&kfd_processes_mutex);
3185
return PTR_ERR(process);
3186
}
3187
3188
filep->private_data = process;
3189
mutex_unlock(&kfd_processes_mutex);
3190
3191
ret = kfd_create_process_sysfs(process);
3192
if (ret)
3193
pr_warn("Failed to create sysfs entry for the kfd_process");
3194
3195
/* Each open() increases kref of the primary kfd_process,
3196
* so we need to reduce it here when we create a new secondary process replacing it
3197
*/
3198
kfd_unref_process(p);
3199
3200
return 0;
3201
}
3202
3203
#define AMDKFD_IOCTL_DEF(ioctl, _func, _flags) \
3204
[_IOC_NR(ioctl)] = {.cmd = ioctl, .func = _func, .flags = _flags, \
3205
.cmd_drv = 0, .name = #ioctl}
3206
3207
/** Ioctl table */
3208
static const struct amdkfd_ioctl_desc amdkfd_ioctls[] = {
3209
AMDKFD_IOCTL_DEF(AMDKFD_IOC_GET_VERSION,
3210
kfd_ioctl_get_version, 0),
3211
3212
AMDKFD_IOCTL_DEF(AMDKFD_IOC_CREATE_QUEUE,
3213
kfd_ioctl_create_queue, 0),
3214
3215
AMDKFD_IOCTL_DEF(AMDKFD_IOC_DESTROY_QUEUE,
3216
kfd_ioctl_destroy_queue, 0),
3217
3218
AMDKFD_IOCTL_DEF(AMDKFD_IOC_SET_MEMORY_POLICY,
3219
kfd_ioctl_set_memory_policy, 0),
3220
3221
AMDKFD_IOCTL_DEF(AMDKFD_IOC_GET_CLOCK_COUNTERS,
3222
kfd_ioctl_get_clock_counters, 0),
3223
3224
AMDKFD_IOCTL_DEF(AMDKFD_IOC_GET_PROCESS_APERTURES,
3225
kfd_ioctl_get_process_apertures, 0),
3226
3227
AMDKFD_IOCTL_DEF(AMDKFD_IOC_UPDATE_QUEUE,
3228
kfd_ioctl_update_queue, 0),
3229
3230
AMDKFD_IOCTL_DEF(AMDKFD_IOC_CREATE_EVENT,
3231
kfd_ioctl_create_event, 0),
3232
3233
AMDKFD_IOCTL_DEF(AMDKFD_IOC_DESTROY_EVENT,
3234
kfd_ioctl_destroy_event, 0),
3235
3236
AMDKFD_IOCTL_DEF(AMDKFD_IOC_SET_EVENT,
3237
kfd_ioctl_set_event, 0),
3238
3239
AMDKFD_IOCTL_DEF(AMDKFD_IOC_RESET_EVENT,
3240
kfd_ioctl_reset_event, 0),
3241
3242
AMDKFD_IOCTL_DEF(AMDKFD_IOC_WAIT_EVENTS,
3243
kfd_ioctl_wait_events, 0),
3244
3245
AMDKFD_IOCTL_DEF(AMDKFD_IOC_DBG_REGISTER_DEPRECATED,
3246
kfd_ioctl_dbg_register, 0),
3247
3248
AMDKFD_IOCTL_DEF(AMDKFD_IOC_DBG_UNREGISTER_DEPRECATED,
3249
kfd_ioctl_dbg_unregister, 0),
3250
3251
AMDKFD_IOCTL_DEF(AMDKFD_IOC_DBG_ADDRESS_WATCH_DEPRECATED,
3252
kfd_ioctl_dbg_address_watch, 0),
3253
3254
AMDKFD_IOCTL_DEF(AMDKFD_IOC_DBG_WAVE_CONTROL_DEPRECATED,
3255
kfd_ioctl_dbg_wave_control, 0),
3256
3257
AMDKFD_IOCTL_DEF(AMDKFD_IOC_SET_SCRATCH_BACKING_VA,
3258
kfd_ioctl_set_scratch_backing_va, 0),
3259
3260
AMDKFD_IOCTL_DEF(AMDKFD_IOC_GET_TILE_CONFIG,
3261
kfd_ioctl_get_tile_config, 0),
3262
3263
AMDKFD_IOCTL_DEF(AMDKFD_IOC_SET_TRAP_HANDLER,
3264
kfd_ioctl_set_trap_handler, 0),
3265
3266
AMDKFD_IOCTL_DEF(AMDKFD_IOC_GET_PROCESS_APERTURES_NEW,
3267
kfd_ioctl_get_process_apertures_new, 0),
3268
3269
AMDKFD_IOCTL_DEF(AMDKFD_IOC_ACQUIRE_VM,
3270
kfd_ioctl_acquire_vm, 0),
3271
3272
AMDKFD_IOCTL_DEF(AMDKFD_IOC_ALLOC_MEMORY_OF_GPU,
3273
kfd_ioctl_alloc_memory_of_gpu, 0),
3274
3275
AMDKFD_IOCTL_DEF(AMDKFD_IOC_FREE_MEMORY_OF_GPU,
3276
kfd_ioctl_free_memory_of_gpu, 0),
3277
3278
AMDKFD_IOCTL_DEF(AMDKFD_IOC_MAP_MEMORY_TO_GPU,
3279
kfd_ioctl_map_memory_to_gpu, 0),
3280
3281
AMDKFD_IOCTL_DEF(AMDKFD_IOC_UNMAP_MEMORY_FROM_GPU,
3282
kfd_ioctl_unmap_memory_from_gpu, 0),
3283
3284
AMDKFD_IOCTL_DEF(AMDKFD_IOC_SET_CU_MASK,
3285
kfd_ioctl_set_cu_mask, 0),
3286
3287
AMDKFD_IOCTL_DEF(AMDKFD_IOC_GET_QUEUE_WAVE_STATE,
3288
kfd_ioctl_get_queue_wave_state, 0),
3289
3290
AMDKFD_IOCTL_DEF(AMDKFD_IOC_GET_DMABUF_INFO,
3291
kfd_ioctl_get_dmabuf_info, 0),
3292
3293
AMDKFD_IOCTL_DEF(AMDKFD_IOC_IMPORT_DMABUF,
3294
kfd_ioctl_import_dmabuf, 0),
3295
3296
AMDKFD_IOCTL_DEF(AMDKFD_IOC_ALLOC_QUEUE_GWS,
3297
kfd_ioctl_alloc_queue_gws, 0),
3298
3299
AMDKFD_IOCTL_DEF(AMDKFD_IOC_SMI_EVENTS,
3300
kfd_ioctl_smi_events, 0),
3301
3302
AMDKFD_IOCTL_DEF(AMDKFD_IOC_SVM, kfd_ioctl_svm, 0),
3303
3304
AMDKFD_IOCTL_DEF(AMDKFD_IOC_SET_XNACK_MODE,
3305
kfd_ioctl_set_xnack_mode, 0),
3306
3307
AMDKFD_IOCTL_DEF(AMDKFD_IOC_CRIU_OP,
3308
kfd_ioctl_criu, KFD_IOC_FLAG_CHECKPOINT_RESTORE),
3309
3310
AMDKFD_IOCTL_DEF(AMDKFD_IOC_AVAILABLE_MEMORY,
3311
kfd_ioctl_get_available_memory, 0),
3312
3313
AMDKFD_IOCTL_DEF(AMDKFD_IOC_EXPORT_DMABUF,
3314
kfd_ioctl_export_dmabuf, 0),
3315
3316
AMDKFD_IOCTL_DEF(AMDKFD_IOC_RUNTIME_ENABLE,
3317
kfd_ioctl_runtime_enable, 0),
3318
3319
AMDKFD_IOCTL_DEF(AMDKFD_IOC_DBG_TRAP,
3320
kfd_ioctl_set_debug_trap, 0),
3321
3322
AMDKFD_IOCTL_DEF(AMDKFD_IOC_CREATE_PROCESS,
3323
kfd_ioctl_create_process, 0),
3324
};
3325
3326
#define AMDKFD_CORE_IOCTL_COUNT ARRAY_SIZE(amdkfd_ioctls)
3327
3328
static long kfd_ioctl(struct file *filep, unsigned int cmd, unsigned long arg)
3329
{
3330
struct kfd_process *process;
3331
amdkfd_ioctl_t *func;
3332
const struct amdkfd_ioctl_desc *ioctl = NULL;
3333
unsigned int nr = _IOC_NR(cmd);
3334
char stack_kdata[128];
3335
char *kdata = NULL;
3336
unsigned int usize, asize;
3337
int retcode = -EINVAL;
3338
bool ptrace_attached = false;
3339
3340
if (nr >= AMDKFD_CORE_IOCTL_COUNT) {
3341
retcode = -ENOTTY;
3342
goto err_i1;
3343
}
3344
3345
if ((nr >= AMDKFD_COMMAND_START) && (nr < AMDKFD_COMMAND_END)) {
3346
u32 amdkfd_size;
3347
3348
ioctl = &amdkfd_ioctls[nr];
3349
3350
amdkfd_size = _IOC_SIZE(ioctl->cmd);
3351
usize = asize = _IOC_SIZE(cmd);
3352
if (amdkfd_size > asize)
3353
asize = amdkfd_size;
3354
3355
cmd = ioctl->cmd;
3356
} else {
3357
retcode = -ENOTTY;
3358
goto err_i1;
3359
}
3360
3361
dev_dbg(kfd_device, "ioctl cmd 0x%x (#0x%x), arg 0x%lx\n", cmd, nr, arg);
3362
3363
/* Get the process struct from the filep. Only the process
3364
* that opened /dev/kfd can use the file descriptor. Child
3365
* processes need to create their own KFD device context.
3366
*/
3367
process = filep->private_data;
3368
3369
rcu_read_lock();
3370
if ((ioctl->flags & KFD_IOC_FLAG_CHECKPOINT_RESTORE) &&
3371
ptrace_parent(process->lead_thread) == current)
3372
ptrace_attached = true;
3373
rcu_read_unlock();
3374
3375
if (process->lead_thread != current->group_leader
3376
&& !ptrace_attached) {
3377
dev_dbg(kfd_device, "Using KFD FD in wrong process\n");
3378
retcode = -EBADF;
3379
goto err_i1;
3380
}
3381
3382
/* Do not trust userspace, use our own definition */
3383
func = ioctl->func;
3384
3385
if (unlikely(!func)) {
3386
dev_dbg(kfd_device, "no function\n");
3387
retcode = -EINVAL;
3388
goto err_i1;
3389
}
3390
3391
/*
3392
* Versions of docker shipped in Ubuntu 18.xx and 20.xx do not support
3393
* CAP_CHECKPOINT_RESTORE, so we also allow access if CAP_SYS_ADMIN as CAP_SYS_ADMIN is a
3394
* more priviledged access.
3395
*/
3396
if (unlikely(ioctl->flags & KFD_IOC_FLAG_CHECKPOINT_RESTORE)) {
3397
if (!capable(CAP_CHECKPOINT_RESTORE) &&
3398
!capable(CAP_SYS_ADMIN)) {
3399
retcode = -EACCES;
3400
goto err_i1;
3401
}
3402
}
3403
3404
if (cmd & (IOC_IN | IOC_OUT)) {
3405
if (asize <= sizeof(stack_kdata)) {
3406
kdata = stack_kdata;
3407
} else {
3408
kdata = kmalloc(asize, GFP_KERNEL);
3409
if (!kdata) {
3410
retcode = -ENOMEM;
3411
goto err_i1;
3412
}
3413
}
3414
if (asize > usize)
3415
memset(kdata + usize, 0, asize - usize);
3416
}
3417
3418
if (cmd & IOC_IN) {
3419
if (copy_from_user(kdata, (void __user *)arg, usize) != 0) {
3420
retcode = -EFAULT;
3421
goto err_i1;
3422
}
3423
} else if (cmd & IOC_OUT) {
3424
memset(kdata, 0, usize);
3425
}
3426
3427
retcode = func(filep, process, kdata);
3428
3429
if (cmd & IOC_OUT)
3430
if (copy_to_user((void __user *)arg, kdata, usize) != 0)
3431
retcode = -EFAULT;
3432
3433
err_i1:
3434
if (!ioctl)
3435
dev_dbg(kfd_device, "invalid ioctl: pid=%d, cmd=0x%02x, nr=0x%02x\n",
3436
task_pid_nr(current), cmd, nr);
3437
3438
if (kdata != stack_kdata)
3439
kfree(kdata);
3440
3441
if (retcode)
3442
dev_dbg(kfd_device, "ioctl cmd (#0x%x), arg 0x%lx, ret = %d\n",
3443
nr, arg, retcode);
3444
3445
return retcode;
3446
}
3447
3448
static int kfd_mmio_mmap(struct kfd_node *dev, struct kfd_process *process,
3449
struct vm_area_struct *vma)
3450
{
3451
phys_addr_t address;
3452
3453
if (vma->vm_end - vma->vm_start != PAGE_SIZE)
3454
return -EINVAL;
3455
3456
if (PAGE_SIZE > 4096)
3457
return -EINVAL;
3458
3459
address = dev->adev->rmmio_remap.bus_addr;
3460
3461
vm_flags_set(vma, VM_IO | VM_DONTCOPY | VM_DONTEXPAND | VM_NORESERVE |
3462
VM_DONTDUMP | VM_PFNMAP);
3463
3464
vma->vm_page_prot = pgprot_noncached(vma->vm_page_prot);
3465
3466
pr_debug("process pid %d mapping mmio page\n"
3467
" target user address == 0x%08llX\n"
3468
" physical address == 0x%08llX\n"
3469
" vm_flags == 0x%04lX\n"
3470
" size == 0x%04lX\n",
3471
process->lead_thread->pid, (unsigned long long) vma->vm_start,
3472
address, vma->vm_flags, PAGE_SIZE);
3473
3474
return io_remap_pfn_range(vma,
3475
vma->vm_start,
3476
address >> PAGE_SHIFT,
3477
PAGE_SIZE,
3478
vma->vm_page_prot);
3479
}
3480
3481
3482
static int kfd_mmap(struct file *filep, struct vm_area_struct *vma)
3483
{
3484
struct kfd_process *process;
3485
struct kfd_node *dev = NULL;
3486
unsigned long mmap_offset;
3487
unsigned int gpu_id;
3488
3489
process = filep->private_data;
3490
if (!process)
3491
return -ESRCH;
3492
3493
if (process->lead_thread != current->group_leader)
3494
return -EBADF;
3495
3496
mmap_offset = vma->vm_pgoff << PAGE_SHIFT;
3497
gpu_id = KFD_MMAP_GET_GPU_ID(mmap_offset);
3498
if (gpu_id)
3499
dev = kfd_device_by_id(gpu_id);
3500
3501
switch (mmap_offset & KFD_MMAP_TYPE_MASK) {
3502
case KFD_MMAP_TYPE_DOORBELL:
3503
if (!dev)
3504
return -ENODEV;
3505
return kfd_doorbell_mmap(dev, process, vma);
3506
3507
case KFD_MMAP_TYPE_EVENTS:
3508
return kfd_event_mmap(process, vma);
3509
3510
case KFD_MMAP_TYPE_RESERVED_MEM:
3511
if (!dev)
3512
return -ENODEV;
3513
return kfd_reserved_mem_mmap(dev, process, vma);
3514
case KFD_MMAP_TYPE_MMIO:
3515
if (!dev)
3516
return -ENODEV;
3517
return kfd_mmio_mmap(dev, process, vma);
3518
}
3519
3520
return -EFAULT;
3521
}
3522
3523