1 // SPDX-License-Identifier: GPL-2.0 OR MIT
2 /*
3 * Copyright 2014-2022 Advanced Micro Devices, Inc.
4 *
5 * Permission is hereby granted, free of charge, to any person obtaining a
6 * copy of this software and associated documentation files (the "Software"),
7 * to deal in the Software without restriction, including without limitation
8 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
9 * and/or sell copies of the Software, and to permit persons to whom the
10 * Software is furnished to do so, subject to the following conditions:
11 *
12 * The above copyright notice and this permission notice shall be included in
13 * all copies or substantial portions of the Software.
14 *
15 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
16 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
17 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
18 * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
19 * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
20 * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
21 * OTHER DEALINGS IN THE SOFTWARE.
22 */
23
24 #include <linux/device.h>
25 #include <linux/export.h>
26 #include <linux/err.h>
27 #include <linux/fs.h>
28 #include <linux/file.h>
29 #include <linux/sched.h>
30 #include <linux/slab.h>
31 #include <linux/uaccess.h>
32 #include <linux/compat.h>
33 #include <uapi/linux/kfd_ioctl.h>
34 #include <linux/time.h>
35 #include <linux/mm.h>
36 #include <linux/mman.h>
37 #include <linux/ptrace.h>
38 #include <linux/dma-buf.h>
39 #include <linux/fdtable.h>
40 #include <linux/processor.h>
41 #include "kfd_priv.h"
42 #include "kfd_device_queue_manager.h"
43 #include "kfd_svm.h"
44 #include "amdgpu_amdkfd.h"
45 #include "kfd_smi_events.h"
46 #include "amdgpu_dma_buf.h"
47 #include "kfd_debug.h"
48
49 static long kfd_ioctl(struct file *, unsigned int, unsigned long);
50 static int kfd_open(struct inode *, struct file *);
51 static int kfd_release(struct inode *, struct file *);
52 static int kfd_mmap(struct file *, struct vm_area_struct *);
53
54 static const char kfd_dev_name[] = "kfd";
55
56 static const struct file_operations kfd_fops = {
57 .owner = THIS_MODULE,
58 .unlocked_ioctl = kfd_ioctl,
59 .compat_ioctl = compat_ptr_ioctl,
60 .open = kfd_open,
61 .release = kfd_release,
62 .mmap = kfd_mmap,
63 };
64
65 static int kfd_char_dev_major = -1;
66 struct device *kfd_device;
67 static const struct class kfd_class = {
68 .name = kfd_dev_name,
69 };
70
kfd_lock_pdd_by_id(struct kfd_process * p,__u32 gpu_id)71 static inline struct kfd_process_device *kfd_lock_pdd_by_id(struct kfd_process *p, __u32 gpu_id)
72 {
73 struct kfd_process_device *pdd;
74
75 mutex_lock(&p->mutex);
76 pdd = kfd_process_device_data_by_id(p, gpu_id);
77
78 if (pdd)
79 return pdd;
80
81 mutex_unlock(&p->mutex);
82 return NULL;
83 }
84
kfd_unlock_pdd(struct kfd_process_device * pdd)85 static inline void kfd_unlock_pdd(struct kfd_process_device *pdd)
86 {
87 mutex_unlock(&pdd->process->mutex);
88 }
89
kfd_chardev_init(void)90 int kfd_chardev_init(void)
91 {
92 int err = 0;
93
94 kfd_char_dev_major = register_chrdev(0, kfd_dev_name, &kfd_fops);
95 err = kfd_char_dev_major;
96 if (err < 0)
97 goto err_register_chrdev;
98
99 err = class_register(&kfd_class);
100 if (err)
101 goto err_class_create;
102
103 kfd_device = device_create(&kfd_class, NULL,
104 MKDEV(kfd_char_dev_major, 0),
105 NULL, kfd_dev_name);
106 err = PTR_ERR(kfd_device);
107 if (IS_ERR(kfd_device))
108 goto err_device_create;
109
110 return 0;
111
112 err_device_create:
113 class_unregister(&kfd_class);
114 err_class_create:
115 unregister_chrdev(kfd_char_dev_major, kfd_dev_name);
116 err_register_chrdev:
117 return err;
118 }
119
kfd_chardev_exit(void)120 void kfd_chardev_exit(void)
121 {
122 device_destroy(&kfd_class, MKDEV(kfd_char_dev_major, 0));
123 class_unregister(&kfd_class);
124 unregister_chrdev(kfd_char_dev_major, kfd_dev_name);
125 kfd_device = NULL;
126 }
127
128
kfd_open(struct inode * inode,struct file * filep)129 static int kfd_open(struct inode *inode, struct file *filep)
130 {
131 struct kfd_process *process;
132 bool is_32bit_user_mode;
133
134 if (iminor(inode) != 0)
135 return -ENODEV;
136
137 is_32bit_user_mode = in_compat_syscall();
138
139 if (is_32bit_user_mode) {
140 dev_warn(kfd_device,
141 "Process %d (32-bit) failed to open /dev/kfd\n"
142 "32-bit processes are not supported by amdkfd\n",
143 current->pid);
144 return -EPERM;
145 }
146
147 process = kfd_create_process(current);
148 if (IS_ERR(process))
149 return PTR_ERR(process);
150
151 if (kfd_process_init_cwsr_apu(process, filep)) {
152 kfd_unref_process(process);
153 return -EFAULT;
154 }
155
156 /* filep now owns the reference returned by kfd_create_process */
157 filep->private_data = process;
158
159 dev_dbg(kfd_device, "process %d opened, compat mode (32 bit) - %d\n",
160 process->pasid, process->is_32bit_user_mode);
161
162 return 0;
163 }
164
kfd_release(struct inode * inode,struct file * filep)165 static int kfd_release(struct inode *inode, struct file *filep)
166 {
167 struct kfd_process *process = filep->private_data;
168
169 if (process)
170 kfd_unref_process(process);
171
172 return 0;
173 }
174
kfd_ioctl_get_version(struct file * filep,struct kfd_process * p,void * data)175 static int kfd_ioctl_get_version(struct file *filep, struct kfd_process *p,
176 void *data)
177 {
178 struct kfd_ioctl_get_version_args *args = data;
179
180 args->major_version = KFD_IOCTL_MAJOR_VERSION;
181 args->minor_version = KFD_IOCTL_MINOR_VERSION;
182
183 return 0;
184 }
185
set_queue_properties_from_user(struct queue_properties * q_properties,struct kfd_ioctl_create_queue_args * args)186 static int set_queue_properties_from_user(struct queue_properties *q_properties,
187 struct kfd_ioctl_create_queue_args *args)
188 {
189 /*
190 * Repurpose queue percentage to accommodate new features:
191 * bit 0-7: queue percentage
192 * bit 8-15: pm4_target_xcc
193 */
194 if ((args->queue_percentage & 0xFF) > KFD_MAX_QUEUE_PERCENTAGE) {
195 pr_err("Queue percentage must be between 0 to KFD_MAX_QUEUE_PERCENTAGE\n");
196 return -EINVAL;
197 }
198
199 if (args->queue_priority > KFD_MAX_QUEUE_PRIORITY) {
200 pr_err("Queue priority must be between 0 to KFD_MAX_QUEUE_PRIORITY\n");
201 return -EINVAL;
202 }
203
204 if ((args->ring_base_address) &&
205 (!access_ok((const void __user *) args->ring_base_address,
206 sizeof(uint64_t)))) {
207 pr_err("Can't access ring base address\n");
208 return -EFAULT;
209 }
210
211 if (!is_power_of_2(args->ring_size) && (args->ring_size != 0)) {
212 pr_err("Ring size must be a power of 2 or 0\n");
213 return -EINVAL;
214 }
215
216 if (!access_ok((const void __user *) args->read_pointer_address,
217 sizeof(uint32_t))) {
218 pr_err("Can't access read pointer\n");
219 return -EFAULT;
220 }
221
222 if (!access_ok((const void __user *) args->write_pointer_address,
223 sizeof(uint32_t))) {
224 pr_err("Can't access write pointer\n");
225 return -EFAULT;
226 }
227
228 if (args->eop_buffer_address &&
229 !access_ok((const void __user *) args->eop_buffer_address,
230 sizeof(uint32_t))) {
231 pr_debug("Can't access eop buffer");
232 return -EFAULT;
233 }
234
235 if (args->ctx_save_restore_address &&
236 !access_ok((const void __user *) args->ctx_save_restore_address,
237 sizeof(uint32_t))) {
238 pr_debug("Can't access ctx save restore buffer");
239 return -EFAULT;
240 }
241
242 q_properties->is_interop = false;
243 q_properties->is_gws = false;
244 q_properties->queue_percent = args->queue_percentage & 0xFF;
245 /* bit 8-15 are repurposed to be PM4 target XCC */
246 q_properties->pm4_target_xcc = (args->queue_percentage >> 8) & 0xFF;
247 q_properties->priority = args->queue_priority;
248 q_properties->queue_address = args->ring_base_address;
249 q_properties->queue_size = args->ring_size;
250 q_properties->read_ptr = (uint32_t *) args->read_pointer_address;
251 q_properties->write_ptr = (uint32_t *) args->write_pointer_address;
252 q_properties->eop_ring_buffer_address = args->eop_buffer_address;
253 q_properties->eop_ring_buffer_size = args->eop_buffer_size;
254 q_properties->ctx_save_restore_area_address =
255 args->ctx_save_restore_address;
256 q_properties->ctx_save_restore_area_size = args->ctx_save_restore_size;
257 q_properties->ctl_stack_size = args->ctl_stack_size;
258 if (args->queue_type == KFD_IOC_QUEUE_TYPE_COMPUTE ||
259 args->queue_type == KFD_IOC_QUEUE_TYPE_COMPUTE_AQL)
260 q_properties->type = KFD_QUEUE_TYPE_COMPUTE;
261 else if (args->queue_type == KFD_IOC_QUEUE_TYPE_SDMA)
262 q_properties->type = KFD_QUEUE_TYPE_SDMA;
263 else if (args->queue_type == KFD_IOC_QUEUE_TYPE_SDMA_XGMI)
264 q_properties->type = KFD_QUEUE_TYPE_SDMA_XGMI;
265 else
266 return -ENOTSUPP;
267
268 if (args->queue_type == KFD_IOC_QUEUE_TYPE_COMPUTE_AQL)
269 q_properties->format = KFD_QUEUE_FORMAT_AQL;
270 else
271 q_properties->format = KFD_QUEUE_FORMAT_PM4;
272
273 pr_debug("Queue Percentage: %d, %d\n",
274 q_properties->queue_percent, args->queue_percentage);
275
276 pr_debug("Queue Priority: %d, %d\n",
277 q_properties->priority, args->queue_priority);
278
279 pr_debug("Queue Address: 0x%llX, 0x%llX\n",
280 q_properties->queue_address, args->ring_base_address);
281
282 pr_debug("Queue Size: 0x%llX, %u\n",
283 q_properties->queue_size, args->ring_size);
284
285 pr_debug("Queue r/w Pointers: %px, %px\n",
286 q_properties->read_ptr,
287 q_properties->write_ptr);
288
289 pr_debug("Queue Format: %d\n", q_properties->format);
290
291 pr_debug("Queue EOP: 0x%llX\n", q_properties->eop_ring_buffer_address);
292
293 pr_debug("Queue CTX save area: 0x%llX\n",
294 q_properties->ctx_save_restore_area_address);
295
296 return 0;
297 }
298
kfd_ioctl_create_queue(struct file * filep,struct kfd_process * p,void * data)299 static int kfd_ioctl_create_queue(struct file *filep, struct kfd_process *p,
300 void *data)
301 {
302 struct kfd_ioctl_create_queue_args *args = data;
303 struct kfd_node *dev;
304 int err = 0;
305 unsigned int queue_id;
306 struct kfd_process_device *pdd;
307 struct queue_properties q_properties;
308 uint32_t doorbell_offset_in_process = 0;
309 struct amdgpu_bo *wptr_bo = NULL;
310
311 memset(&q_properties, 0, sizeof(struct queue_properties));
312
313 pr_debug("Creating queue ioctl\n");
314
315 err = set_queue_properties_from_user(&q_properties, args);
316 if (err)
317 return err;
318
319 pr_debug("Looking for gpu id 0x%x\n", args->gpu_id);
320
321 mutex_lock(&p->mutex);
322
323 pdd = kfd_process_device_data_by_id(p, args->gpu_id);
324 if (!pdd) {
325 pr_debug("Could not find gpu id 0x%x\n", args->gpu_id);
326 err = -EINVAL;
327 goto err_pdd;
328 }
329 dev = pdd->dev;
330
331 pdd = kfd_bind_process_to_device(dev, p);
332 if (IS_ERR(pdd)) {
333 err = -ESRCH;
334 goto err_bind_process;
335 }
336
337 if (!pdd->qpd.proc_doorbells) {
338 err = kfd_alloc_process_doorbells(dev->kfd, pdd);
339 if (err) {
340 pr_debug("failed to allocate process doorbells\n");
341 goto err_bind_process;
342 }
343 }
344
345 /* Starting with GFX11, wptr BOs must be mapped to GART for MES to determine work
346 * on unmapped queues for usermode queue oversubscription (no aggregated doorbell)
347 */
348 if (dev->kfd->shared_resources.enable_mes &&
349 ((dev->adev->mes.sched_version & AMDGPU_MES_API_VERSION_MASK)
350 >> AMDGPU_MES_API_VERSION_SHIFT) >= 2) {
351 struct amdgpu_bo_va_mapping *wptr_mapping;
352 struct amdgpu_vm *wptr_vm;
353
354 wptr_vm = drm_priv_to_vm(pdd->drm_priv);
355 err = amdgpu_bo_reserve(wptr_vm->root.bo, false);
356 if (err)
357 goto err_wptr_map_gart;
358
359 wptr_mapping = amdgpu_vm_bo_lookup_mapping(
360 wptr_vm, args->write_pointer_address >> PAGE_SHIFT);
361 amdgpu_bo_unreserve(wptr_vm->root.bo);
362 if (!wptr_mapping) {
363 pr_err("Failed to lookup wptr bo\n");
364 err = -EINVAL;
365 goto err_wptr_map_gart;
366 }
367
368 wptr_bo = wptr_mapping->bo_va->base.bo;
369 if (wptr_bo->tbo.base.size > PAGE_SIZE) {
370 pr_err("Requested GART mapping for wptr bo larger than one page\n");
371 err = -EINVAL;
372 goto err_wptr_map_gart;
373 }
374 if (dev->adev != amdgpu_ttm_adev(wptr_bo->tbo.bdev)) {
375 pr_err("Queue memory allocated to wrong device\n");
376 err = -EINVAL;
377 goto err_wptr_map_gart;
378 }
379
380 err = amdgpu_amdkfd_map_gtt_bo_to_gart(wptr_bo);
381 if (err) {
382 pr_err("Failed to map wptr bo to GART\n");
383 goto err_wptr_map_gart;
384 }
385 }
386
387 pr_debug("Creating queue for PASID 0x%x on gpu 0x%x\n",
388 p->pasid,
389 dev->id);
390
391 err = pqm_create_queue(&p->pqm, dev, filep, &q_properties, &queue_id, wptr_bo,
392 NULL, NULL, NULL, &doorbell_offset_in_process);
393 if (err != 0)
394 goto err_create_queue;
395
396 args->queue_id = queue_id;
397
398
399 /* Return gpu_id as doorbell offset for mmap usage */
400 args->doorbell_offset = KFD_MMAP_TYPE_DOORBELL;
401 args->doorbell_offset |= KFD_MMAP_GPU_ID(args->gpu_id);
402 if (KFD_IS_SOC15(dev))
403 /* On SOC15 ASICs, include the doorbell offset within the
404 * process doorbell frame, which is 2 pages.
405 */
406 args->doorbell_offset |= doorbell_offset_in_process;
407
408 mutex_unlock(&p->mutex);
409
410 pr_debug("Queue id %d was created successfully\n", args->queue_id);
411
412 pr_debug("Ring buffer address == 0x%016llX\n",
413 args->ring_base_address);
414
415 pr_debug("Read ptr address == 0x%016llX\n",
416 args->read_pointer_address);
417
418 pr_debug("Write ptr address == 0x%016llX\n",
419 args->write_pointer_address);
420
421 kfd_dbg_ev_raise(KFD_EC_MASK(EC_QUEUE_NEW), p, dev, queue_id, false, NULL, 0);
422 return 0;
423
424 err_create_queue:
425 if (wptr_bo)
426 amdgpu_amdkfd_free_gtt_mem(dev->adev, wptr_bo);
427 err_wptr_map_gart:
428 err_bind_process:
429 err_pdd:
430 mutex_unlock(&p->mutex);
431 return err;
432 }
433
kfd_ioctl_destroy_queue(struct file * filp,struct kfd_process * p,void * data)434 static int kfd_ioctl_destroy_queue(struct file *filp, struct kfd_process *p,
435 void *data)
436 {
437 int retval;
438 struct kfd_ioctl_destroy_queue_args *args = data;
439
440 pr_debug("Destroying queue id %d for pasid 0x%x\n",
441 args->queue_id,
442 p->pasid);
443
444 mutex_lock(&p->mutex);
445
446 retval = pqm_destroy_queue(&p->pqm, args->queue_id);
447
448 mutex_unlock(&p->mutex);
449 return retval;
450 }
451
kfd_ioctl_update_queue(struct file * filp,struct kfd_process * p,void * data)452 static int kfd_ioctl_update_queue(struct file *filp, struct kfd_process *p,
453 void *data)
454 {
455 int retval;
456 struct kfd_ioctl_update_queue_args *args = data;
457 struct queue_properties properties;
458
459 /*
460 * Repurpose queue percentage to accommodate new features:
461 * bit 0-7: queue percentage
462 * bit 8-15: pm4_target_xcc
463 */
464 if ((args->queue_percentage & 0xFF) > KFD_MAX_QUEUE_PERCENTAGE) {
465 pr_err("Queue percentage must be between 0 to KFD_MAX_QUEUE_PERCENTAGE\n");
466 return -EINVAL;
467 }
468
469 if (args->queue_priority > KFD_MAX_QUEUE_PRIORITY) {
470 pr_err("Queue priority must be between 0 to KFD_MAX_QUEUE_PRIORITY\n");
471 return -EINVAL;
472 }
473
474 if ((args->ring_base_address) &&
475 (!access_ok((const void __user *) args->ring_base_address,
476 sizeof(uint64_t)))) {
477 pr_err("Can't access ring base address\n");
478 return -EFAULT;
479 }
480
481 if (!is_power_of_2(args->ring_size) && (args->ring_size != 0)) {
482 pr_err("Ring size must be a power of 2 or 0\n");
483 return -EINVAL;
484 }
485
486 properties.queue_address = args->ring_base_address;
487 properties.queue_size = args->ring_size;
488 properties.queue_percent = args->queue_percentage & 0xFF;
489 /* bit 8-15 are repurposed to be PM4 target XCC */
490 properties.pm4_target_xcc = (args->queue_percentage >> 8) & 0xFF;
491 properties.priority = args->queue_priority;
492
493 pr_debug("Updating queue id %d for pasid 0x%x\n",
494 args->queue_id, p->pasid);
495
496 mutex_lock(&p->mutex);
497
498 retval = pqm_update_queue_properties(&p->pqm, args->queue_id, &properties);
499
500 mutex_unlock(&p->mutex);
501
502 return retval;
503 }
504
kfd_ioctl_set_cu_mask(struct file * filp,struct kfd_process * p,void * data)505 static int kfd_ioctl_set_cu_mask(struct file *filp, struct kfd_process *p,
506 void *data)
507 {
508 int retval;
509 const int max_num_cus = 1024;
510 struct kfd_ioctl_set_cu_mask_args *args = data;
511 struct mqd_update_info minfo = {0};
512 uint32_t __user *cu_mask_ptr = (uint32_t __user *)args->cu_mask_ptr;
513 size_t cu_mask_size = sizeof(uint32_t) * (args->num_cu_mask / 32);
514
515 if ((args->num_cu_mask % 32) != 0) {
516 pr_debug("num_cu_mask 0x%x must be a multiple of 32",
517 args->num_cu_mask);
518 return -EINVAL;
519 }
520
521 minfo.cu_mask.count = args->num_cu_mask;
522 if (minfo.cu_mask.count == 0) {
523 pr_debug("CU mask cannot be 0");
524 return -EINVAL;
525 }
526
527 /* To prevent an unreasonably large CU mask size, set an arbitrary
528 * limit of max_num_cus bits. We can then just drop any CU mask bits
529 * past max_num_cus bits and just use the first max_num_cus bits.
530 */
531 if (minfo.cu_mask.count > max_num_cus) {
532 pr_debug("CU mask cannot be greater than 1024 bits");
533 minfo.cu_mask.count = max_num_cus;
534 cu_mask_size = sizeof(uint32_t) * (max_num_cus/32);
535 }
536
537 minfo.cu_mask.ptr = kzalloc(cu_mask_size, GFP_KERNEL);
538 if (!minfo.cu_mask.ptr)
539 return -ENOMEM;
540
541 retval = copy_from_user(minfo.cu_mask.ptr, cu_mask_ptr, cu_mask_size);
542 if (retval) {
543 pr_debug("Could not copy CU mask from userspace");
544 retval = -EFAULT;
545 goto out;
546 }
547
548 mutex_lock(&p->mutex);
549
550 retval = pqm_update_mqd(&p->pqm, args->queue_id, &minfo);
551
552 mutex_unlock(&p->mutex);
553
554 out:
555 kfree(minfo.cu_mask.ptr);
556 return retval;
557 }
558
kfd_ioctl_get_queue_wave_state(struct file * filep,struct kfd_process * p,void * data)559 static int kfd_ioctl_get_queue_wave_state(struct file *filep,
560 struct kfd_process *p, void *data)
561 {
562 struct kfd_ioctl_get_queue_wave_state_args *args = data;
563 int r;
564
565 mutex_lock(&p->mutex);
566
567 r = pqm_get_wave_state(&p->pqm, args->queue_id,
568 (void __user *)args->ctl_stack_address,
569 &args->ctl_stack_used_size,
570 &args->save_area_used_size);
571
572 mutex_unlock(&p->mutex);
573
574 return r;
575 }
576
kfd_ioctl_set_memory_policy(struct file * filep,struct kfd_process * p,void * data)577 static int kfd_ioctl_set_memory_policy(struct file *filep,
578 struct kfd_process *p, void *data)
579 {
580 struct kfd_ioctl_set_memory_policy_args *args = data;
581 int err = 0;
582 struct kfd_process_device *pdd;
583 enum cache_policy default_policy, alternate_policy;
584
585 if (args->default_policy != KFD_IOC_CACHE_POLICY_COHERENT
586 && args->default_policy != KFD_IOC_CACHE_POLICY_NONCOHERENT) {
587 return -EINVAL;
588 }
589
590 if (args->alternate_policy != KFD_IOC_CACHE_POLICY_COHERENT
591 && args->alternate_policy != KFD_IOC_CACHE_POLICY_NONCOHERENT) {
592 return -EINVAL;
593 }
594
595 mutex_lock(&p->mutex);
596 pdd = kfd_process_device_data_by_id(p, args->gpu_id);
597 if (!pdd) {
598 pr_debug("Could not find gpu id 0x%x\n", args->gpu_id);
599 err = -EINVAL;
600 goto err_pdd;
601 }
602
603 pdd = kfd_bind_process_to_device(pdd->dev, p);
604 if (IS_ERR(pdd)) {
605 err = -ESRCH;
606 goto out;
607 }
608
609 default_policy = (args->default_policy == KFD_IOC_CACHE_POLICY_COHERENT)
610 ? cache_policy_coherent : cache_policy_noncoherent;
611
612 alternate_policy =
613 (args->alternate_policy == KFD_IOC_CACHE_POLICY_COHERENT)
614 ? cache_policy_coherent : cache_policy_noncoherent;
615
616 if (!pdd->dev->dqm->ops.set_cache_memory_policy(pdd->dev->dqm,
617 &pdd->qpd,
618 default_policy,
619 alternate_policy,
620 (void __user *)args->alternate_aperture_base,
621 args->alternate_aperture_size))
622 err = -EINVAL;
623
624 out:
625 err_pdd:
626 mutex_unlock(&p->mutex);
627
628 return err;
629 }
630
kfd_ioctl_set_trap_handler(struct file * filep,struct kfd_process * p,void * data)631 static int kfd_ioctl_set_trap_handler(struct file *filep,
632 struct kfd_process *p, void *data)
633 {
634 struct kfd_ioctl_set_trap_handler_args *args = data;
635 int err = 0;
636 struct kfd_process_device *pdd;
637
638 mutex_lock(&p->mutex);
639
640 pdd = kfd_process_device_data_by_id(p, args->gpu_id);
641 if (!pdd) {
642 err = -EINVAL;
643 goto err_pdd;
644 }
645
646 pdd = kfd_bind_process_to_device(pdd->dev, p);
647 if (IS_ERR(pdd)) {
648 err = -ESRCH;
649 goto out;
650 }
651
652 kfd_process_set_trap_handler(&pdd->qpd, args->tba_addr, args->tma_addr);
653
654 out:
655 err_pdd:
656 mutex_unlock(&p->mutex);
657
658 return err;
659 }
660
kfd_ioctl_dbg_register(struct file * filep,struct kfd_process * p,void * data)661 static int kfd_ioctl_dbg_register(struct file *filep,
662 struct kfd_process *p, void *data)
663 {
664 return -EPERM;
665 }
666
kfd_ioctl_dbg_unregister(struct file * filep,struct kfd_process * p,void * data)667 static int kfd_ioctl_dbg_unregister(struct file *filep,
668 struct kfd_process *p, void *data)
669 {
670 return -EPERM;
671 }
672
kfd_ioctl_dbg_address_watch(struct file * filep,struct kfd_process * p,void * data)673 static int kfd_ioctl_dbg_address_watch(struct file *filep,
674 struct kfd_process *p, void *data)
675 {
676 return -EPERM;
677 }
678
679 /* Parse and generate fixed size data structure for wave control */
kfd_ioctl_dbg_wave_control(struct file * filep,struct kfd_process * p,void * data)680 static int kfd_ioctl_dbg_wave_control(struct file *filep,
681 struct kfd_process *p, void *data)
682 {
683 return -EPERM;
684 }
685
kfd_ioctl_get_clock_counters(struct file * filep,struct kfd_process * p,void * data)686 static int kfd_ioctl_get_clock_counters(struct file *filep,
687 struct kfd_process *p, void *data)
688 {
689 struct kfd_ioctl_get_clock_counters_args *args = data;
690 struct kfd_process_device *pdd;
691
692 mutex_lock(&p->mutex);
693 pdd = kfd_process_device_data_by_id(p, args->gpu_id);
694 mutex_unlock(&p->mutex);
695 if (pdd)
696 /* Reading GPU clock counter from KGD */
697 args->gpu_clock_counter = amdgpu_amdkfd_get_gpu_clock_counter(pdd->dev->adev);
698 else
699 /* Node without GPU resource */
700 args->gpu_clock_counter = 0;
701
702 /* No access to rdtsc. Using raw monotonic time */
703 args->cpu_clock_counter = ktime_get_raw_ns();
704 args->system_clock_counter = ktime_get_boottime_ns();
705
706 /* Since the counter is in nano-seconds we use 1GHz frequency */
707 args->system_clock_freq = 1000000000;
708
709 return 0;
710 }
711
712
kfd_ioctl_get_process_apertures(struct file * filp,struct kfd_process * p,void * data)713 static int kfd_ioctl_get_process_apertures(struct file *filp,
714 struct kfd_process *p, void *data)
715 {
716 struct kfd_ioctl_get_process_apertures_args *args = data;
717 struct kfd_process_device_apertures *pAperture;
718 int i;
719
720 dev_dbg(kfd_device, "get apertures for PASID 0x%x", p->pasid);
721
722 args->num_of_nodes = 0;
723
724 mutex_lock(&p->mutex);
725 /* Run over all pdd of the process */
726 for (i = 0; i < p->n_pdds; i++) {
727 struct kfd_process_device *pdd = p->pdds[i];
728
729 pAperture =
730 &args->process_apertures[args->num_of_nodes];
731 pAperture->gpu_id = pdd->dev->id;
732 pAperture->lds_base = pdd->lds_base;
733 pAperture->lds_limit = pdd->lds_limit;
734 pAperture->gpuvm_base = pdd->gpuvm_base;
735 pAperture->gpuvm_limit = pdd->gpuvm_limit;
736 pAperture->scratch_base = pdd->scratch_base;
737 pAperture->scratch_limit = pdd->scratch_limit;
738
739 dev_dbg(kfd_device,
740 "node id %u\n", args->num_of_nodes);
741 dev_dbg(kfd_device,
742 "gpu id %u\n", pdd->dev->id);
743 dev_dbg(kfd_device,
744 "lds_base %llX\n", pdd->lds_base);
745 dev_dbg(kfd_device,
746 "lds_limit %llX\n", pdd->lds_limit);
747 dev_dbg(kfd_device,
748 "gpuvm_base %llX\n", pdd->gpuvm_base);
749 dev_dbg(kfd_device,
750 "gpuvm_limit %llX\n", pdd->gpuvm_limit);
751 dev_dbg(kfd_device,
752 "scratch_base %llX\n", pdd->scratch_base);
753 dev_dbg(kfd_device,
754 "scratch_limit %llX\n", pdd->scratch_limit);
755
756 if (++args->num_of_nodes >= NUM_OF_SUPPORTED_GPUS)
757 break;
758 }
759 mutex_unlock(&p->mutex);
760
761 return 0;
762 }
763
kfd_ioctl_get_process_apertures_new(struct file * filp,struct kfd_process * p,void * data)764 static int kfd_ioctl_get_process_apertures_new(struct file *filp,
765 struct kfd_process *p, void *data)
766 {
767 struct kfd_ioctl_get_process_apertures_new_args *args = data;
768 struct kfd_process_device_apertures *pa;
769 int ret;
770 int i;
771
772 dev_dbg(kfd_device, "get apertures for PASID 0x%x", p->pasid);
773
774 if (args->num_of_nodes == 0) {
775 /* Return number of nodes, so that user space can alloacate
776 * sufficient memory
777 */
778 mutex_lock(&p->mutex);
779 args->num_of_nodes = p->n_pdds;
780 goto out_unlock;
781 }
782
783 /* Fill in process-aperture information for all available
784 * nodes, but not more than args->num_of_nodes as that is
785 * the amount of memory allocated by user
786 */
787 pa = kcalloc(args->num_of_nodes, sizeof(struct kfd_process_device_apertures),
788 GFP_KERNEL);
789 if (!pa)
790 return -ENOMEM;
791
792 mutex_lock(&p->mutex);
793
794 if (!p->n_pdds) {
795 args->num_of_nodes = 0;
796 kfree(pa);
797 goto out_unlock;
798 }
799
800 /* Run over all pdd of the process */
801 for (i = 0; i < min(p->n_pdds, args->num_of_nodes); i++) {
802 struct kfd_process_device *pdd = p->pdds[i];
803
804 pa[i].gpu_id = pdd->dev->id;
805 pa[i].lds_base = pdd->lds_base;
806 pa[i].lds_limit = pdd->lds_limit;
807 pa[i].gpuvm_base = pdd->gpuvm_base;
808 pa[i].gpuvm_limit = pdd->gpuvm_limit;
809 pa[i].scratch_base = pdd->scratch_base;
810 pa[i].scratch_limit = pdd->scratch_limit;
811
812 dev_dbg(kfd_device,
813 "gpu id %u\n", pdd->dev->id);
814 dev_dbg(kfd_device,
815 "lds_base %llX\n", pdd->lds_base);
816 dev_dbg(kfd_device,
817 "lds_limit %llX\n", pdd->lds_limit);
818 dev_dbg(kfd_device,
819 "gpuvm_base %llX\n", pdd->gpuvm_base);
820 dev_dbg(kfd_device,
821 "gpuvm_limit %llX\n", pdd->gpuvm_limit);
822 dev_dbg(kfd_device,
823 "scratch_base %llX\n", pdd->scratch_base);
824 dev_dbg(kfd_device,
825 "scratch_limit %llX\n", pdd->scratch_limit);
826 }
827 mutex_unlock(&p->mutex);
828
829 args->num_of_nodes = i;
830 ret = copy_to_user(
831 (void __user *)args->kfd_process_device_apertures_ptr,
832 pa,
833 (i * sizeof(struct kfd_process_device_apertures)));
834 kfree(pa);
835 return ret ? -EFAULT : 0;
836
837 out_unlock:
838 mutex_unlock(&p->mutex);
839 return 0;
840 }
841
kfd_ioctl_create_event(struct file * filp,struct kfd_process * p,void * data)842 static int kfd_ioctl_create_event(struct file *filp, struct kfd_process *p,
843 void *data)
844 {
845 struct kfd_ioctl_create_event_args *args = data;
846 int err;
847
848 /* For dGPUs the event page is allocated in user mode. The
849 * handle is passed to KFD with the first call to this IOCTL
850 * through the event_page_offset field.
851 */
852 if (args->event_page_offset) {
853 mutex_lock(&p->mutex);
854 err = kfd_kmap_event_page(p, args->event_page_offset);
855 mutex_unlock(&p->mutex);
856 if (err)
857 return err;
858 }
859
860 err = kfd_event_create(filp, p, args->event_type,
861 args->auto_reset != 0, args->node_id,
862 &args->event_id, &args->event_trigger_data,
863 &args->event_page_offset,
864 &args->event_slot_index);
865
866 pr_debug("Created event (id:0x%08x) (%s)\n", args->event_id, __func__);
867 return err;
868 }
869
kfd_ioctl_destroy_event(struct file * filp,struct kfd_process * p,void * data)870 static int kfd_ioctl_destroy_event(struct file *filp, struct kfd_process *p,
871 void *data)
872 {
873 struct kfd_ioctl_destroy_event_args *args = data;
874
875 return kfd_event_destroy(p, args->event_id);
876 }
877
kfd_ioctl_set_event(struct file * filp,struct kfd_process * p,void * data)878 static int kfd_ioctl_set_event(struct file *filp, struct kfd_process *p,
879 void *data)
880 {
881 struct kfd_ioctl_set_event_args *args = data;
882
883 return kfd_set_event(p, args->event_id);
884 }
885
kfd_ioctl_reset_event(struct file * filp,struct kfd_process * p,void * data)886 static int kfd_ioctl_reset_event(struct file *filp, struct kfd_process *p,
887 void *data)
888 {
889 struct kfd_ioctl_reset_event_args *args = data;
890
891 return kfd_reset_event(p, args->event_id);
892 }
893
kfd_ioctl_wait_events(struct file * filp,struct kfd_process * p,void * data)894 static int kfd_ioctl_wait_events(struct file *filp, struct kfd_process *p,
895 void *data)
896 {
897 struct kfd_ioctl_wait_events_args *args = data;
898
899 return kfd_wait_on_events(p, args->num_events,
900 (void __user *)args->events_ptr,
901 (args->wait_for_all != 0),
902 &args->timeout, &args->wait_result);
903 }
kfd_ioctl_set_scratch_backing_va(struct file * filep,struct kfd_process * p,void * data)904 static int kfd_ioctl_set_scratch_backing_va(struct file *filep,
905 struct kfd_process *p, void *data)
906 {
907 struct kfd_ioctl_set_scratch_backing_va_args *args = data;
908 struct kfd_process_device *pdd;
909 struct kfd_node *dev;
910 long err;
911
912 mutex_lock(&p->mutex);
913 pdd = kfd_process_device_data_by_id(p, args->gpu_id);
914 if (!pdd) {
915 err = -EINVAL;
916 goto err_pdd;
917 }
918 dev = pdd->dev;
919
920 pdd = kfd_bind_process_to_device(dev, p);
921 if (IS_ERR(pdd)) {
922 err = PTR_ERR(pdd);
923 goto bind_process_to_device_fail;
924 }
925
926 pdd->qpd.sh_hidden_private_base = args->va_addr;
927
928 mutex_unlock(&p->mutex);
929
930 if (dev->dqm->sched_policy == KFD_SCHED_POLICY_NO_HWS &&
931 pdd->qpd.vmid != 0 && dev->kfd2kgd->set_scratch_backing_va)
932 dev->kfd2kgd->set_scratch_backing_va(
933 dev->adev, args->va_addr, pdd->qpd.vmid);
934
935 return 0;
936
937 bind_process_to_device_fail:
938 err_pdd:
939 mutex_unlock(&p->mutex);
940 return err;
941 }
942
kfd_ioctl_get_tile_config(struct file * filep,struct kfd_process * p,void * data)943 static int kfd_ioctl_get_tile_config(struct file *filep,
944 struct kfd_process *p, void *data)
945 {
946 struct kfd_ioctl_get_tile_config_args *args = data;
947 struct kfd_process_device *pdd;
948 struct tile_config config;
949 int err = 0;
950
951 mutex_lock(&p->mutex);
952 pdd = kfd_process_device_data_by_id(p, args->gpu_id);
953 mutex_unlock(&p->mutex);
954 if (!pdd)
955 return -EINVAL;
956
957 amdgpu_amdkfd_get_tile_config(pdd->dev->adev, &config);
958
959 args->gb_addr_config = config.gb_addr_config;
960 args->num_banks = config.num_banks;
961 args->num_ranks = config.num_ranks;
962
963 if (args->num_tile_configs > config.num_tile_configs)
964 args->num_tile_configs = config.num_tile_configs;
965 err = copy_to_user((void __user *)args->tile_config_ptr,
966 config.tile_config_ptr,
967 args->num_tile_configs * sizeof(uint32_t));
968 if (err) {
969 args->num_tile_configs = 0;
970 return -EFAULT;
971 }
972
973 if (args->num_macro_tile_configs > config.num_macro_tile_configs)
974 args->num_macro_tile_configs =
975 config.num_macro_tile_configs;
976 err = copy_to_user((void __user *)args->macro_tile_config_ptr,
977 config.macro_tile_config_ptr,
978 args->num_macro_tile_configs * sizeof(uint32_t));
979 if (err) {
980 args->num_macro_tile_configs = 0;
981 return -EFAULT;
982 }
983
984 return 0;
985 }
986
kfd_ioctl_acquire_vm(struct file * filep,struct kfd_process * p,void * data)987 static int kfd_ioctl_acquire_vm(struct file *filep, struct kfd_process *p,
988 void *data)
989 {
990 struct kfd_ioctl_acquire_vm_args *args = data;
991 struct kfd_process_device *pdd;
992 struct file *drm_file;
993 int ret;
994
995 drm_file = fget(args->drm_fd);
996 if (!drm_file)
997 return -EINVAL;
998
999 mutex_lock(&p->mutex);
1000 pdd = kfd_process_device_data_by_id(p, args->gpu_id);
1001 if (!pdd) {
1002 ret = -EINVAL;
1003 goto err_pdd;
1004 }
1005
1006 if (pdd->drm_file) {
1007 ret = pdd->drm_file == drm_file ? 0 : -EBUSY;
1008 goto err_drm_file;
1009 }
1010
1011 ret = kfd_process_device_init_vm(pdd, drm_file);
1012 if (ret)
1013 goto err_unlock;
1014
1015 /* On success, the PDD keeps the drm_file reference */
1016 mutex_unlock(&p->mutex);
1017
1018 return 0;
1019
1020 err_unlock:
1021 err_pdd:
1022 err_drm_file:
1023 mutex_unlock(&p->mutex);
1024 fput(drm_file);
1025 return ret;
1026 }
1027
kfd_dev_is_large_bar(struct kfd_node * dev)1028 bool kfd_dev_is_large_bar(struct kfd_node *dev)
1029 {
1030 if (dev->kfd->adev->debug_largebar) {
1031 pr_debug("Simulate large-bar allocation on non large-bar machine\n");
1032 return true;
1033 }
1034
1035 if (dev->local_mem_info.local_mem_size_private == 0 &&
1036 dev->local_mem_info.local_mem_size_public > 0)
1037 return true;
1038
1039 if (dev->local_mem_info.local_mem_size_public == 0 &&
1040 dev->kfd->adev->gmc.is_app_apu) {
1041 pr_debug("APP APU, Consider like a large bar system\n");
1042 return true;
1043 }
1044
1045 return false;
1046 }
1047
kfd_ioctl_get_available_memory(struct file * filep,struct kfd_process * p,void * data)1048 static int kfd_ioctl_get_available_memory(struct file *filep,
1049 struct kfd_process *p, void *data)
1050 {
1051 struct kfd_ioctl_get_available_memory_args *args = data;
1052 struct kfd_process_device *pdd = kfd_lock_pdd_by_id(p, args->gpu_id);
1053
1054 if (!pdd)
1055 return -EINVAL;
1056 args->available = amdgpu_amdkfd_get_available_memory(pdd->dev->adev,
1057 pdd->dev->node_id);
1058 kfd_unlock_pdd(pdd);
1059 return 0;
1060 }
1061
kfd_ioctl_alloc_memory_of_gpu(struct file * filep,struct kfd_process * p,void * data)1062 static int kfd_ioctl_alloc_memory_of_gpu(struct file *filep,
1063 struct kfd_process *p, void *data)
1064 {
1065 struct kfd_ioctl_alloc_memory_of_gpu_args *args = data;
1066 struct kfd_process_device *pdd;
1067 void *mem;
1068 struct kfd_node *dev;
1069 int idr_handle;
1070 long err;
1071 uint64_t offset = args->mmap_offset;
1072 uint32_t flags = args->flags;
1073
1074 if (args->size == 0)
1075 return -EINVAL;
1076
1077 #if IS_ENABLED(CONFIG_HSA_AMD_SVM)
1078 /* Flush pending deferred work to avoid racing with deferred actions
1079 * from previous memory map changes (e.g. munmap).
1080 */
1081 svm_range_list_lock_and_flush_work(&p->svms, current->mm);
1082 mutex_lock(&p->svms.lock);
1083 mmap_write_unlock(current->mm);
1084 if (interval_tree_iter_first(&p->svms.objects,
1085 args->va_addr >> PAGE_SHIFT,
1086 (args->va_addr + args->size - 1) >> PAGE_SHIFT)) {
1087 pr_err("Address: 0x%llx already allocated by SVM\n",
1088 args->va_addr);
1089 mutex_unlock(&p->svms.lock);
1090 return -EADDRINUSE;
1091 }
1092
1093 /* When register user buffer check if it has been registered by svm by
1094 * buffer cpu virtual address.
1095 */
1096 if ((flags & KFD_IOC_ALLOC_MEM_FLAGS_USERPTR) &&
1097 interval_tree_iter_first(&p->svms.objects,
1098 args->mmap_offset >> PAGE_SHIFT,
1099 (args->mmap_offset + args->size - 1) >> PAGE_SHIFT)) {
1100 pr_err("User Buffer Address: 0x%llx already allocated by SVM\n",
1101 args->mmap_offset);
1102 mutex_unlock(&p->svms.lock);
1103 return -EADDRINUSE;
1104 }
1105
1106 mutex_unlock(&p->svms.lock);
1107 #endif
1108 mutex_lock(&p->mutex);
1109 pdd = kfd_process_device_data_by_id(p, args->gpu_id);
1110 if (!pdd) {
1111 err = -EINVAL;
1112 goto err_pdd;
1113 }
1114
1115 dev = pdd->dev;
1116
1117 if ((flags & KFD_IOC_ALLOC_MEM_FLAGS_PUBLIC) &&
1118 (flags & KFD_IOC_ALLOC_MEM_FLAGS_VRAM) &&
1119 !kfd_dev_is_large_bar(dev)) {
1120 pr_err("Alloc host visible vram on small bar is not allowed\n");
1121 err = -EINVAL;
1122 goto err_large_bar;
1123 }
1124
1125 pdd = kfd_bind_process_to_device(dev, p);
1126 if (IS_ERR(pdd)) {
1127 err = PTR_ERR(pdd);
1128 goto err_unlock;
1129 }
1130
1131 if (flags & KFD_IOC_ALLOC_MEM_FLAGS_DOORBELL) {
1132 if (args->size != kfd_doorbell_process_slice(dev->kfd)) {
1133 err = -EINVAL;
1134 goto err_unlock;
1135 }
1136 offset = kfd_get_process_doorbells(pdd);
1137 if (!offset) {
1138 err = -ENOMEM;
1139 goto err_unlock;
1140 }
1141 } else if (flags & KFD_IOC_ALLOC_MEM_FLAGS_MMIO_REMAP) {
1142 if (args->size != PAGE_SIZE) {
1143 err = -EINVAL;
1144 goto err_unlock;
1145 }
1146 offset = dev->adev->rmmio_remap.bus_addr;
1147 if (!offset || (PAGE_SIZE > 4096)) {
1148 err = -ENOMEM;
1149 goto err_unlock;
1150 }
1151 }
1152
1153 err = amdgpu_amdkfd_gpuvm_alloc_memory_of_gpu(
1154 dev->adev, args->va_addr, args->size,
1155 pdd->drm_priv, (struct kgd_mem **) &mem, &offset,
1156 flags, false);
1157
1158 if (err)
1159 goto err_unlock;
1160
1161 idr_handle = kfd_process_device_create_obj_handle(pdd, mem);
1162 if (idr_handle < 0) {
1163 err = -EFAULT;
1164 goto err_free;
1165 }
1166
1167 /* Update the VRAM usage count */
1168 if (flags & KFD_IOC_ALLOC_MEM_FLAGS_VRAM) {
1169 uint64_t size = args->size;
1170
1171 if (flags & KFD_IOC_ALLOC_MEM_FLAGS_AQL_QUEUE_MEM)
1172 size >>= 1;
1173 WRITE_ONCE(pdd->vram_usage, pdd->vram_usage + PAGE_ALIGN(size));
1174 }
1175
1176 mutex_unlock(&p->mutex);
1177
1178 args->handle = MAKE_HANDLE(args->gpu_id, idr_handle);
1179 args->mmap_offset = offset;
1180
1181 /* MMIO is mapped through kfd device
1182 * Generate a kfd mmap offset
1183 */
1184 if (flags & KFD_IOC_ALLOC_MEM_FLAGS_MMIO_REMAP)
1185 args->mmap_offset = KFD_MMAP_TYPE_MMIO
1186 | KFD_MMAP_GPU_ID(args->gpu_id);
1187
1188 return 0;
1189
1190 err_free:
1191 amdgpu_amdkfd_gpuvm_free_memory_of_gpu(dev->adev, (struct kgd_mem *)mem,
1192 pdd->drm_priv, NULL);
1193 err_unlock:
1194 err_pdd:
1195 err_large_bar:
1196 mutex_unlock(&p->mutex);
1197 return err;
1198 }
1199
kfd_ioctl_free_memory_of_gpu(struct file * filep,struct kfd_process * p,void * data)1200 static int kfd_ioctl_free_memory_of_gpu(struct file *filep,
1201 struct kfd_process *p, void *data)
1202 {
1203 struct kfd_ioctl_free_memory_of_gpu_args *args = data;
1204 struct kfd_process_device *pdd;
1205 void *mem;
1206 int ret;
1207 uint64_t size = 0;
1208
1209 mutex_lock(&p->mutex);
1210 /*
1211 * Safeguard to prevent user space from freeing signal BO.
1212 * It will be freed at process termination.
1213 */
1214 if (p->signal_handle && (p->signal_handle == args->handle)) {
1215 pr_err("Free signal BO is not allowed\n");
1216 ret = -EPERM;
1217 goto err_unlock;
1218 }
1219
1220 pdd = kfd_process_device_data_by_id(p, GET_GPU_ID(args->handle));
1221 if (!pdd) {
1222 pr_err("Process device data doesn't exist\n");
1223 ret = -EINVAL;
1224 goto err_pdd;
1225 }
1226
1227 mem = kfd_process_device_translate_handle(
1228 pdd, GET_IDR_HANDLE(args->handle));
1229 if (!mem) {
1230 ret = -EINVAL;
1231 goto err_unlock;
1232 }
1233
1234 ret = amdgpu_amdkfd_gpuvm_free_memory_of_gpu(pdd->dev->adev,
1235 (struct kgd_mem *)mem, pdd->drm_priv, &size);
1236
1237 /* If freeing the buffer failed, leave the handle in place for
1238 * clean-up during process tear-down.
1239 */
1240 if (!ret)
1241 kfd_process_device_remove_obj_handle(
1242 pdd, GET_IDR_HANDLE(args->handle));
1243
1244 WRITE_ONCE(pdd->vram_usage, pdd->vram_usage - size);
1245
1246 err_unlock:
1247 err_pdd:
1248 mutex_unlock(&p->mutex);
1249 return ret;
1250 }
1251
kfd_ioctl_map_memory_to_gpu(struct file * filep,struct kfd_process * p,void * data)1252 static int kfd_ioctl_map_memory_to_gpu(struct file *filep,
1253 struct kfd_process *p, void *data)
1254 {
1255 struct kfd_ioctl_map_memory_to_gpu_args *args = data;
1256 struct kfd_process_device *pdd, *peer_pdd;
1257 void *mem;
1258 struct kfd_node *dev;
1259 long err = 0;
1260 int i;
1261 uint32_t *devices_arr = NULL;
1262
1263 if (!args->n_devices) {
1264 pr_debug("Device IDs array empty\n");
1265 return -EINVAL;
1266 }
1267 if (args->n_success > args->n_devices) {
1268 pr_debug("n_success exceeds n_devices\n");
1269 return -EINVAL;
1270 }
1271
1272 devices_arr = kmalloc_array(args->n_devices, sizeof(*devices_arr),
1273 GFP_KERNEL);
1274 if (!devices_arr)
1275 return -ENOMEM;
1276
1277 err = copy_from_user(devices_arr,
1278 (void __user *)args->device_ids_array_ptr,
1279 args->n_devices * sizeof(*devices_arr));
1280 if (err != 0) {
1281 err = -EFAULT;
1282 goto copy_from_user_failed;
1283 }
1284
1285 mutex_lock(&p->mutex);
1286 pdd = kfd_process_device_data_by_id(p, GET_GPU_ID(args->handle));
1287 if (!pdd) {
1288 err = -EINVAL;
1289 goto get_process_device_data_failed;
1290 }
1291 dev = pdd->dev;
1292
1293 pdd = kfd_bind_process_to_device(dev, p);
1294 if (IS_ERR(pdd)) {
1295 err = PTR_ERR(pdd);
1296 goto bind_process_to_device_failed;
1297 }
1298
1299 mem = kfd_process_device_translate_handle(pdd,
1300 GET_IDR_HANDLE(args->handle));
1301 if (!mem) {
1302 err = -ENOMEM;
1303 goto get_mem_obj_from_handle_failed;
1304 }
1305
1306 for (i = args->n_success; i < args->n_devices; i++) {
1307 peer_pdd = kfd_process_device_data_by_id(p, devices_arr[i]);
1308 if (!peer_pdd) {
1309 pr_debug("Getting device by id failed for 0x%x\n",
1310 devices_arr[i]);
1311 err = -EINVAL;
1312 goto get_mem_obj_from_handle_failed;
1313 }
1314
1315 peer_pdd = kfd_bind_process_to_device(peer_pdd->dev, p);
1316 if (IS_ERR(peer_pdd)) {
1317 err = PTR_ERR(peer_pdd);
1318 goto get_mem_obj_from_handle_failed;
1319 }
1320
1321 err = amdgpu_amdkfd_gpuvm_map_memory_to_gpu(
1322 peer_pdd->dev->adev, (struct kgd_mem *)mem,
1323 peer_pdd->drm_priv);
1324 if (err) {
1325 struct pci_dev *pdev = peer_pdd->dev->adev->pdev;
1326
1327 dev_err(dev->adev->dev,
1328 "Failed to map peer:%04x:%02x:%02x.%d mem_domain:%d\n",
1329 pci_domain_nr(pdev->bus),
1330 pdev->bus->number,
1331 PCI_SLOT(pdev->devfn),
1332 PCI_FUNC(pdev->devfn),
1333 ((struct kgd_mem *)mem)->domain);
1334 goto map_memory_to_gpu_failed;
1335 }
1336 args->n_success = i+1;
1337 }
1338
1339 err = amdgpu_amdkfd_gpuvm_sync_memory(dev->adev, (struct kgd_mem *) mem, true);
1340 if (err) {
1341 pr_debug("Sync memory failed, wait interrupted by user signal\n");
1342 goto sync_memory_failed;
1343 }
1344
1345 mutex_unlock(&p->mutex);
1346
1347 /* Flush TLBs after waiting for the page table updates to complete */
1348 for (i = 0; i < args->n_devices; i++) {
1349 peer_pdd = kfd_process_device_data_by_id(p, devices_arr[i]);
1350 if (WARN_ON_ONCE(!peer_pdd))
1351 continue;
1352 kfd_flush_tlb(peer_pdd, TLB_FLUSH_LEGACY);
1353 }
1354 kfree(devices_arr);
1355
1356 return err;
1357
1358 get_process_device_data_failed:
1359 bind_process_to_device_failed:
1360 get_mem_obj_from_handle_failed:
1361 map_memory_to_gpu_failed:
1362 sync_memory_failed:
1363 mutex_unlock(&p->mutex);
1364 copy_from_user_failed:
1365 kfree(devices_arr);
1366
1367 return err;
1368 }
1369
kfd_ioctl_unmap_memory_from_gpu(struct file * filep,struct kfd_process * p,void * data)1370 static int kfd_ioctl_unmap_memory_from_gpu(struct file *filep,
1371 struct kfd_process *p, void *data)
1372 {
1373 struct kfd_ioctl_unmap_memory_from_gpu_args *args = data;
1374 struct kfd_process_device *pdd, *peer_pdd;
1375 void *mem;
1376 long err = 0;
1377 uint32_t *devices_arr = NULL, i;
1378 bool flush_tlb;
1379
1380 if (!args->n_devices) {
1381 pr_debug("Device IDs array empty\n");
1382 return -EINVAL;
1383 }
1384 if (args->n_success > args->n_devices) {
1385 pr_debug("n_success exceeds n_devices\n");
1386 return -EINVAL;
1387 }
1388
1389 devices_arr = kmalloc_array(args->n_devices, sizeof(*devices_arr),
1390 GFP_KERNEL);
1391 if (!devices_arr)
1392 return -ENOMEM;
1393
1394 err = copy_from_user(devices_arr,
1395 (void __user *)args->device_ids_array_ptr,
1396 args->n_devices * sizeof(*devices_arr));
1397 if (err != 0) {
1398 err = -EFAULT;
1399 goto copy_from_user_failed;
1400 }
1401
1402 mutex_lock(&p->mutex);
1403 pdd = kfd_process_device_data_by_id(p, GET_GPU_ID(args->handle));
1404 if (!pdd) {
1405 err = -EINVAL;
1406 goto bind_process_to_device_failed;
1407 }
1408
1409 mem = kfd_process_device_translate_handle(pdd,
1410 GET_IDR_HANDLE(args->handle));
1411 if (!mem) {
1412 err = -ENOMEM;
1413 goto get_mem_obj_from_handle_failed;
1414 }
1415
1416 for (i = args->n_success; i < args->n_devices; i++) {
1417 peer_pdd = kfd_process_device_data_by_id(p, devices_arr[i]);
1418 if (!peer_pdd) {
1419 err = -EINVAL;
1420 goto get_mem_obj_from_handle_failed;
1421 }
1422 err = amdgpu_amdkfd_gpuvm_unmap_memory_from_gpu(
1423 peer_pdd->dev->adev, (struct kgd_mem *)mem, peer_pdd->drm_priv);
1424 if (err) {
1425 pr_err("Failed to unmap from gpu %d/%d\n",
1426 i, args->n_devices);
1427 goto unmap_memory_from_gpu_failed;
1428 }
1429 args->n_success = i+1;
1430 }
1431
1432 flush_tlb = kfd_flush_tlb_after_unmap(pdd->dev->kfd);
1433 if (flush_tlb) {
1434 err = amdgpu_amdkfd_gpuvm_sync_memory(pdd->dev->adev,
1435 (struct kgd_mem *) mem, true);
1436 if (err) {
1437 pr_debug("Sync memory failed, wait interrupted by user signal\n");
1438 goto sync_memory_failed;
1439 }
1440 }
1441
1442 /* Flush TLBs after waiting for the page table updates to complete */
1443 for (i = 0; i < args->n_devices; i++) {
1444 peer_pdd = kfd_process_device_data_by_id(p, devices_arr[i]);
1445 if (WARN_ON_ONCE(!peer_pdd))
1446 continue;
1447 if (flush_tlb)
1448 kfd_flush_tlb(peer_pdd, TLB_FLUSH_HEAVYWEIGHT);
1449
1450 /* Remove dma mapping after tlb flush to avoid IO_PAGE_FAULT */
1451 err = amdgpu_amdkfd_gpuvm_dmaunmap_mem(mem, peer_pdd->drm_priv);
1452 if (err)
1453 goto sync_memory_failed;
1454 }
1455
1456 mutex_unlock(&p->mutex);
1457
1458 kfree(devices_arr);
1459
1460 return 0;
1461
1462 bind_process_to_device_failed:
1463 get_mem_obj_from_handle_failed:
1464 unmap_memory_from_gpu_failed:
1465 sync_memory_failed:
1466 mutex_unlock(&p->mutex);
1467 copy_from_user_failed:
1468 kfree(devices_arr);
1469 return err;
1470 }
1471
kfd_ioctl_alloc_queue_gws(struct file * filep,struct kfd_process * p,void * data)1472 static int kfd_ioctl_alloc_queue_gws(struct file *filep,
1473 struct kfd_process *p, void *data)
1474 {
1475 int retval;
1476 struct kfd_ioctl_alloc_queue_gws_args *args = data;
1477 struct queue *q;
1478 struct kfd_node *dev;
1479
1480 mutex_lock(&p->mutex);
1481 q = pqm_get_user_queue(&p->pqm, args->queue_id);
1482
1483 if (q) {
1484 dev = q->device;
1485 } else {
1486 retval = -EINVAL;
1487 goto out_unlock;
1488 }
1489
1490 if (!dev->gws) {
1491 retval = -ENODEV;
1492 goto out_unlock;
1493 }
1494
1495 if (dev->dqm->sched_policy == KFD_SCHED_POLICY_NO_HWS) {
1496 retval = -ENODEV;
1497 goto out_unlock;
1498 }
1499
1500 if (p->debug_trap_enabled && (!kfd_dbg_has_gws_support(dev) ||
1501 kfd_dbg_has_cwsr_workaround(dev))) {
1502 retval = -EBUSY;
1503 goto out_unlock;
1504 }
1505
1506 retval = pqm_set_gws(&p->pqm, args->queue_id, args->num_gws ? dev->gws : NULL);
1507 mutex_unlock(&p->mutex);
1508
1509 args->first_gws = 0;
1510 return retval;
1511
1512 out_unlock:
1513 mutex_unlock(&p->mutex);
1514 return retval;
1515 }
1516
kfd_ioctl_get_dmabuf_info(struct file * filep,struct kfd_process * p,void * data)1517 static int kfd_ioctl_get_dmabuf_info(struct file *filep,
1518 struct kfd_process *p, void *data)
1519 {
1520 struct kfd_ioctl_get_dmabuf_info_args *args = data;
1521 struct kfd_node *dev = NULL;
1522 struct amdgpu_device *dmabuf_adev;
1523 void *metadata_buffer = NULL;
1524 uint32_t flags;
1525 int8_t xcp_id;
1526 unsigned int i;
1527 int r;
1528
1529 /* Find a KFD GPU device that supports the get_dmabuf_info query */
1530 for (i = 0; kfd_topology_enum_kfd_devices(i, &dev) == 0; i++)
1531 if (dev && !kfd_devcgroup_check_permission(dev))
1532 break;
1533 if (!dev)
1534 return -EINVAL;
1535
1536 if (args->metadata_ptr) {
1537 metadata_buffer = kzalloc(args->metadata_size, GFP_KERNEL);
1538 if (!metadata_buffer)
1539 return -ENOMEM;
1540 }
1541
1542 /* Get dmabuf info from KGD */
1543 r = amdgpu_amdkfd_get_dmabuf_info(dev->adev, args->dmabuf_fd,
1544 &dmabuf_adev, &args->size,
1545 metadata_buffer, args->metadata_size,
1546 &args->metadata_size, &flags, &xcp_id);
1547 if (r)
1548 goto exit;
1549
1550 if (xcp_id >= 0)
1551 args->gpu_id = dmabuf_adev->kfd.dev->nodes[xcp_id]->id;
1552 else
1553 args->gpu_id = dev->id;
1554 args->flags = flags;
1555
1556 /* Copy metadata buffer to user mode */
1557 if (metadata_buffer) {
1558 r = copy_to_user((void __user *)args->metadata_ptr,
1559 metadata_buffer, args->metadata_size);
1560 if (r != 0)
1561 r = -EFAULT;
1562 }
1563
1564 exit:
1565 kfree(metadata_buffer);
1566
1567 return r;
1568 }
1569
kfd_ioctl_import_dmabuf(struct file * filep,struct kfd_process * p,void * data)1570 static int kfd_ioctl_import_dmabuf(struct file *filep,
1571 struct kfd_process *p, void *data)
1572 {
1573 struct kfd_ioctl_import_dmabuf_args *args = data;
1574 struct kfd_process_device *pdd;
1575 int idr_handle;
1576 uint64_t size;
1577 void *mem;
1578 int r;
1579
1580 mutex_lock(&p->mutex);
1581 pdd = kfd_process_device_data_by_id(p, args->gpu_id);
1582 if (!pdd) {
1583 r = -EINVAL;
1584 goto err_unlock;
1585 }
1586
1587 pdd = kfd_bind_process_to_device(pdd->dev, p);
1588 if (IS_ERR(pdd)) {
1589 r = PTR_ERR(pdd);
1590 goto err_unlock;
1591 }
1592
1593 r = amdgpu_amdkfd_gpuvm_import_dmabuf_fd(pdd->dev->adev, args->dmabuf_fd,
1594 args->va_addr, pdd->drm_priv,
1595 (struct kgd_mem **)&mem, &size,
1596 NULL);
1597 if (r)
1598 goto err_unlock;
1599
1600 idr_handle = kfd_process_device_create_obj_handle(pdd, mem);
1601 if (idr_handle < 0) {
1602 r = -EFAULT;
1603 goto err_free;
1604 }
1605
1606 mutex_unlock(&p->mutex);
1607
1608 args->handle = MAKE_HANDLE(args->gpu_id, idr_handle);
1609
1610 return 0;
1611
1612 err_free:
1613 amdgpu_amdkfd_gpuvm_free_memory_of_gpu(pdd->dev->adev, (struct kgd_mem *)mem,
1614 pdd->drm_priv, NULL);
1615 err_unlock:
1616 mutex_unlock(&p->mutex);
1617 return r;
1618 }
1619
kfd_ioctl_export_dmabuf(struct file * filep,struct kfd_process * p,void * data)1620 static int kfd_ioctl_export_dmabuf(struct file *filep,
1621 struct kfd_process *p, void *data)
1622 {
1623 struct kfd_ioctl_export_dmabuf_args *args = data;
1624 struct kfd_process_device *pdd;
1625 struct dma_buf *dmabuf;
1626 struct kfd_node *dev;
1627 void *mem;
1628 int ret = 0;
1629
1630 dev = kfd_device_by_id(GET_GPU_ID(args->handle));
1631 if (!dev)
1632 return -EINVAL;
1633
1634 mutex_lock(&p->mutex);
1635
1636 pdd = kfd_get_process_device_data(dev, p);
1637 if (!pdd) {
1638 ret = -EINVAL;
1639 goto err_unlock;
1640 }
1641
1642 mem = kfd_process_device_translate_handle(pdd,
1643 GET_IDR_HANDLE(args->handle));
1644 if (!mem) {
1645 ret = -EINVAL;
1646 goto err_unlock;
1647 }
1648
1649 ret = amdgpu_amdkfd_gpuvm_export_dmabuf(mem, &dmabuf);
1650 mutex_unlock(&p->mutex);
1651 if (ret)
1652 goto err_out;
1653
1654 ret = dma_buf_fd(dmabuf, args->flags);
1655 if (ret < 0) {
1656 dma_buf_put(dmabuf);
1657 goto err_out;
1658 }
1659 /* dma_buf_fd assigns the reference count to the fd, no need to
1660 * put the reference here.
1661 */
1662 args->dmabuf_fd = ret;
1663
1664 return 0;
1665
1666 err_unlock:
1667 mutex_unlock(&p->mutex);
1668 err_out:
1669 return ret;
1670 }
1671
1672 /* Handle requests for watching SMI events */
kfd_ioctl_smi_events(struct file * filep,struct kfd_process * p,void * data)1673 static int kfd_ioctl_smi_events(struct file *filep,
1674 struct kfd_process *p, void *data)
1675 {
1676 struct kfd_ioctl_smi_events_args *args = data;
1677 struct kfd_process_device *pdd;
1678
1679 mutex_lock(&p->mutex);
1680
1681 pdd = kfd_process_device_data_by_id(p, args->gpuid);
1682 mutex_unlock(&p->mutex);
1683 if (!pdd)
1684 return -EINVAL;
1685
1686 return kfd_smi_event_open(pdd->dev, &args->anon_fd);
1687 }
1688
1689 #if IS_ENABLED(CONFIG_HSA_AMD_SVM)
1690
kfd_ioctl_set_xnack_mode(struct file * filep,struct kfd_process * p,void * data)1691 static int kfd_ioctl_set_xnack_mode(struct file *filep,
1692 struct kfd_process *p, void *data)
1693 {
1694 struct kfd_ioctl_set_xnack_mode_args *args = data;
1695 int r = 0;
1696
1697 mutex_lock(&p->mutex);
1698 if (args->xnack_enabled >= 0) {
1699 if (!list_empty(&p->pqm.queues)) {
1700 pr_debug("Process has user queues running\n");
1701 r = -EBUSY;
1702 goto out_unlock;
1703 }
1704
1705 if (p->xnack_enabled == args->xnack_enabled)
1706 goto out_unlock;
1707
1708 if (args->xnack_enabled && !kfd_process_xnack_mode(p, true)) {
1709 r = -EPERM;
1710 goto out_unlock;
1711 }
1712
1713 r = svm_range_switch_xnack_reserve_mem(p, args->xnack_enabled);
1714 } else {
1715 args->xnack_enabled = p->xnack_enabled;
1716 }
1717
1718 out_unlock:
1719 mutex_unlock(&p->mutex);
1720
1721 return r;
1722 }
1723
kfd_ioctl_svm(struct file * filep,struct kfd_process * p,void * data)1724 static int kfd_ioctl_svm(struct file *filep, struct kfd_process *p, void *data)
1725 {
1726 struct kfd_ioctl_svm_args *args = data;
1727 int r = 0;
1728
1729 pr_debug("start 0x%llx size 0x%llx op 0x%x nattr 0x%x\n",
1730 args->start_addr, args->size, args->op, args->nattr);
1731
1732 if ((args->start_addr & ~PAGE_MASK) || (args->size & ~PAGE_MASK))
1733 return -EINVAL;
1734 if (!args->start_addr || !args->size)
1735 return -EINVAL;
1736
1737 r = svm_ioctl(p, args->op, args->start_addr, args->size, args->nattr,
1738 args->attrs);
1739
1740 return r;
1741 }
1742 #else
kfd_ioctl_set_xnack_mode(struct file * filep,struct kfd_process * p,void * data)1743 static int kfd_ioctl_set_xnack_mode(struct file *filep,
1744 struct kfd_process *p, void *data)
1745 {
1746 return -EPERM;
1747 }
kfd_ioctl_svm(struct file * filep,struct kfd_process * p,void * data)1748 static int kfd_ioctl_svm(struct file *filep, struct kfd_process *p, void *data)
1749 {
1750 return -EPERM;
1751 }
1752 #endif
1753
criu_checkpoint_process(struct kfd_process * p,uint8_t __user * user_priv_data,uint64_t * priv_offset)1754 static int criu_checkpoint_process(struct kfd_process *p,
1755 uint8_t __user *user_priv_data,
1756 uint64_t *priv_offset)
1757 {
1758 struct kfd_criu_process_priv_data process_priv;
1759 int ret;
1760
1761 memset(&process_priv, 0, sizeof(process_priv));
1762
1763 process_priv.version = KFD_CRIU_PRIV_VERSION;
1764 /* For CR, we don't consider negative xnack mode which is used for
1765 * querying without changing it, here 0 simply means disabled and 1
1766 * means enabled so retry for finding a valid PTE.
1767 */
1768 process_priv.xnack_mode = p->xnack_enabled ? 1 : 0;
1769
1770 ret = copy_to_user(user_priv_data + *priv_offset,
1771 &process_priv, sizeof(process_priv));
1772
1773 if (ret) {
1774 pr_err("Failed to copy process information to user\n");
1775 ret = -EFAULT;
1776 }
1777
1778 *priv_offset += sizeof(process_priv);
1779 return ret;
1780 }
1781
criu_checkpoint_devices(struct kfd_process * p,uint32_t num_devices,uint8_t __user * user_addr,uint8_t __user * user_priv_data,uint64_t * priv_offset)1782 static int criu_checkpoint_devices(struct kfd_process *p,
1783 uint32_t num_devices,
1784 uint8_t __user *user_addr,
1785 uint8_t __user *user_priv_data,
1786 uint64_t *priv_offset)
1787 {
1788 struct kfd_criu_device_priv_data *device_priv = NULL;
1789 struct kfd_criu_device_bucket *device_buckets = NULL;
1790 int ret = 0, i;
1791
1792 device_buckets = kvzalloc(num_devices * sizeof(*device_buckets), GFP_KERNEL);
1793 if (!device_buckets) {
1794 ret = -ENOMEM;
1795 goto exit;
1796 }
1797
1798 device_priv = kvzalloc(num_devices * sizeof(*device_priv), GFP_KERNEL);
1799 if (!device_priv) {
1800 ret = -ENOMEM;
1801 goto exit;
1802 }
1803
1804 for (i = 0; i < num_devices; i++) {
1805 struct kfd_process_device *pdd = p->pdds[i];
1806
1807 device_buckets[i].user_gpu_id = pdd->user_gpu_id;
1808 device_buckets[i].actual_gpu_id = pdd->dev->id;
1809
1810 /*
1811 * priv_data does not contain useful information for now and is reserved for
1812 * future use, so we do not set its contents.
1813 */
1814 }
1815
1816 ret = copy_to_user(user_addr, device_buckets, num_devices * sizeof(*device_buckets));
1817 if (ret) {
1818 pr_err("Failed to copy device information to user\n");
1819 ret = -EFAULT;
1820 goto exit;
1821 }
1822
1823 ret = copy_to_user(user_priv_data + *priv_offset,
1824 device_priv,
1825 num_devices * sizeof(*device_priv));
1826 if (ret) {
1827 pr_err("Failed to copy device information to user\n");
1828 ret = -EFAULT;
1829 }
1830 *priv_offset += num_devices * sizeof(*device_priv);
1831
1832 exit:
1833 kvfree(device_buckets);
1834 kvfree(device_priv);
1835 return ret;
1836 }
1837
get_process_num_bos(struct kfd_process * p)1838 static uint32_t get_process_num_bos(struct kfd_process *p)
1839 {
1840 uint32_t num_of_bos = 0;
1841 int i;
1842
1843 /* Run over all PDDs of the process */
1844 for (i = 0; i < p->n_pdds; i++) {
1845 struct kfd_process_device *pdd = p->pdds[i];
1846 void *mem;
1847 int id;
1848
1849 idr_for_each_entry(&pdd->alloc_idr, mem, id) {
1850 struct kgd_mem *kgd_mem = (struct kgd_mem *)mem;
1851
1852 if (!kgd_mem->va || kgd_mem->va > pdd->gpuvm_base)
1853 num_of_bos++;
1854 }
1855 }
1856 return num_of_bos;
1857 }
1858
criu_get_prime_handle(struct kgd_mem * mem,int flags,u32 * shared_fd)1859 static int criu_get_prime_handle(struct kgd_mem *mem,
1860 int flags, u32 *shared_fd)
1861 {
1862 struct dma_buf *dmabuf;
1863 int ret;
1864
1865 ret = amdgpu_amdkfd_gpuvm_export_dmabuf(mem, &dmabuf);
1866 if (ret) {
1867 pr_err("dmabuf export failed for the BO\n");
1868 return ret;
1869 }
1870
1871 ret = dma_buf_fd(dmabuf, flags);
1872 if (ret < 0) {
1873 pr_err("dmabuf create fd failed, ret:%d\n", ret);
1874 goto out_free_dmabuf;
1875 }
1876
1877 *shared_fd = ret;
1878 return 0;
1879
1880 out_free_dmabuf:
1881 dma_buf_put(dmabuf);
1882 return ret;
1883 }
1884
criu_checkpoint_bos(struct kfd_process * p,uint32_t num_bos,uint8_t __user * user_bos,uint8_t __user * user_priv_data,uint64_t * priv_offset)1885 static int criu_checkpoint_bos(struct kfd_process *p,
1886 uint32_t num_bos,
1887 uint8_t __user *user_bos,
1888 uint8_t __user *user_priv_data,
1889 uint64_t *priv_offset)
1890 {
1891 struct kfd_criu_bo_bucket *bo_buckets;
1892 struct kfd_criu_bo_priv_data *bo_privs;
1893 int ret = 0, pdd_index, bo_index = 0, id;
1894 void *mem;
1895
1896 bo_buckets = kvzalloc(num_bos * sizeof(*bo_buckets), GFP_KERNEL);
1897 if (!bo_buckets)
1898 return -ENOMEM;
1899
1900 bo_privs = kvzalloc(num_bos * sizeof(*bo_privs), GFP_KERNEL);
1901 if (!bo_privs) {
1902 ret = -ENOMEM;
1903 goto exit;
1904 }
1905
1906 for (pdd_index = 0; pdd_index < p->n_pdds; pdd_index++) {
1907 struct kfd_process_device *pdd = p->pdds[pdd_index];
1908 struct amdgpu_bo *dumper_bo;
1909 struct kgd_mem *kgd_mem;
1910
1911 idr_for_each_entry(&pdd->alloc_idr, mem, id) {
1912 struct kfd_criu_bo_bucket *bo_bucket;
1913 struct kfd_criu_bo_priv_data *bo_priv;
1914 int i, dev_idx = 0;
1915
1916 if (!mem) {
1917 ret = -ENOMEM;
1918 goto exit;
1919 }
1920
1921 kgd_mem = (struct kgd_mem *)mem;
1922 dumper_bo = kgd_mem->bo;
1923
1924 /* Skip checkpointing BOs that are used for Trap handler
1925 * code and state. Currently, these BOs have a VA that
1926 * is less GPUVM Base
1927 */
1928 if (kgd_mem->va && kgd_mem->va <= pdd->gpuvm_base)
1929 continue;
1930
1931 bo_bucket = &bo_buckets[bo_index];
1932 bo_priv = &bo_privs[bo_index];
1933
1934 bo_bucket->gpu_id = pdd->user_gpu_id;
1935 bo_bucket->addr = (uint64_t)kgd_mem->va;
1936 bo_bucket->size = amdgpu_bo_size(dumper_bo);
1937 bo_bucket->alloc_flags = (uint32_t)kgd_mem->alloc_flags;
1938 bo_priv->idr_handle = id;
1939
1940 if (bo_bucket->alloc_flags & KFD_IOC_ALLOC_MEM_FLAGS_USERPTR) {
1941 ret = amdgpu_ttm_tt_get_userptr(&dumper_bo->tbo,
1942 &bo_priv->user_addr);
1943 if (ret) {
1944 pr_err("Failed to obtain user address for user-pointer bo\n");
1945 goto exit;
1946 }
1947 }
1948 if (bo_bucket->alloc_flags
1949 & (KFD_IOC_ALLOC_MEM_FLAGS_VRAM | KFD_IOC_ALLOC_MEM_FLAGS_GTT)) {
1950 ret = criu_get_prime_handle(kgd_mem,
1951 bo_bucket->alloc_flags &
1952 KFD_IOC_ALLOC_MEM_FLAGS_WRITABLE ? DRM_RDWR : 0,
1953 &bo_bucket->dmabuf_fd);
1954 if (ret)
1955 goto exit;
1956 } else {
1957 bo_bucket->dmabuf_fd = KFD_INVALID_FD;
1958 }
1959
1960 if (bo_bucket->alloc_flags & KFD_IOC_ALLOC_MEM_FLAGS_DOORBELL)
1961 bo_bucket->offset = KFD_MMAP_TYPE_DOORBELL |
1962 KFD_MMAP_GPU_ID(pdd->dev->id);
1963 else if (bo_bucket->alloc_flags &
1964 KFD_IOC_ALLOC_MEM_FLAGS_MMIO_REMAP)
1965 bo_bucket->offset = KFD_MMAP_TYPE_MMIO |
1966 KFD_MMAP_GPU_ID(pdd->dev->id);
1967 else
1968 bo_bucket->offset = amdgpu_bo_mmap_offset(dumper_bo);
1969
1970 for (i = 0; i < p->n_pdds; i++) {
1971 if (amdgpu_amdkfd_bo_mapped_to_dev(p->pdds[i]->dev->adev, kgd_mem))
1972 bo_priv->mapped_gpuids[dev_idx++] = p->pdds[i]->user_gpu_id;
1973 }
1974
1975 pr_debug("bo_size = 0x%llx, bo_addr = 0x%llx bo_offset = 0x%llx\n"
1976 "gpu_id = 0x%x alloc_flags = 0x%x idr_handle = 0x%x",
1977 bo_bucket->size,
1978 bo_bucket->addr,
1979 bo_bucket->offset,
1980 bo_bucket->gpu_id,
1981 bo_bucket->alloc_flags,
1982 bo_priv->idr_handle);
1983 bo_index++;
1984 }
1985 }
1986
1987 ret = copy_to_user(user_bos, bo_buckets, num_bos * sizeof(*bo_buckets));
1988 if (ret) {
1989 pr_err("Failed to copy BO information to user\n");
1990 ret = -EFAULT;
1991 goto exit;
1992 }
1993
1994 ret = copy_to_user(user_priv_data + *priv_offset, bo_privs, num_bos * sizeof(*bo_privs));
1995 if (ret) {
1996 pr_err("Failed to copy BO priv information to user\n");
1997 ret = -EFAULT;
1998 goto exit;
1999 }
2000
2001 *priv_offset += num_bos * sizeof(*bo_privs);
2002
2003 exit:
2004 while (ret && bo_index--) {
2005 if (bo_buckets[bo_index].alloc_flags
2006 & (KFD_IOC_ALLOC_MEM_FLAGS_VRAM | KFD_IOC_ALLOC_MEM_FLAGS_GTT))
2007 close_fd(bo_buckets[bo_index].dmabuf_fd);
2008 }
2009
2010 kvfree(bo_buckets);
2011 kvfree(bo_privs);
2012 return ret;
2013 }
2014
criu_get_process_object_info(struct kfd_process * p,uint32_t * num_devices,uint32_t * num_bos,uint32_t * num_objects,uint64_t * objs_priv_size)2015 static int criu_get_process_object_info(struct kfd_process *p,
2016 uint32_t *num_devices,
2017 uint32_t *num_bos,
2018 uint32_t *num_objects,
2019 uint64_t *objs_priv_size)
2020 {
2021 uint64_t queues_priv_data_size, svm_priv_data_size, priv_size;
2022 uint32_t num_queues, num_events, num_svm_ranges;
2023 int ret;
2024
2025 *num_devices = p->n_pdds;
2026 *num_bos = get_process_num_bos(p);
2027
2028 ret = kfd_process_get_queue_info(p, &num_queues, &queues_priv_data_size);
2029 if (ret)
2030 return ret;
2031
2032 num_events = kfd_get_num_events(p);
2033
2034 ret = svm_range_get_info(p, &num_svm_ranges, &svm_priv_data_size);
2035 if (ret)
2036 return ret;
2037
2038 *num_objects = num_queues + num_events + num_svm_ranges;
2039
2040 if (objs_priv_size) {
2041 priv_size = sizeof(struct kfd_criu_process_priv_data);
2042 priv_size += *num_devices * sizeof(struct kfd_criu_device_priv_data);
2043 priv_size += *num_bos * sizeof(struct kfd_criu_bo_priv_data);
2044 priv_size += queues_priv_data_size;
2045 priv_size += num_events * sizeof(struct kfd_criu_event_priv_data);
2046 priv_size += svm_priv_data_size;
2047 *objs_priv_size = priv_size;
2048 }
2049 return 0;
2050 }
2051
criu_checkpoint(struct file * filep,struct kfd_process * p,struct kfd_ioctl_criu_args * args)2052 static int criu_checkpoint(struct file *filep,
2053 struct kfd_process *p,
2054 struct kfd_ioctl_criu_args *args)
2055 {
2056 int ret;
2057 uint32_t num_devices, num_bos, num_objects;
2058 uint64_t priv_size, priv_offset = 0, bo_priv_offset;
2059
2060 if (!args->devices || !args->bos || !args->priv_data)
2061 return -EINVAL;
2062
2063 mutex_lock(&p->mutex);
2064
2065 if (!p->n_pdds) {
2066 pr_err("No pdd for given process\n");
2067 ret = -ENODEV;
2068 goto exit_unlock;
2069 }
2070
2071 /* Confirm all process queues are evicted */
2072 if (!p->queues_paused) {
2073 pr_err("Cannot dump process when queues are not in evicted state\n");
2074 /* CRIU plugin did not call op PROCESS_INFO before checkpointing */
2075 ret = -EINVAL;
2076 goto exit_unlock;
2077 }
2078
2079 ret = criu_get_process_object_info(p, &num_devices, &num_bos, &num_objects, &priv_size);
2080 if (ret)
2081 goto exit_unlock;
2082
2083 if (num_devices != args->num_devices ||
2084 num_bos != args->num_bos ||
2085 num_objects != args->num_objects ||
2086 priv_size != args->priv_data_size) {
2087
2088 ret = -EINVAL;
2089 goto exit_unlock;
2090 }
2091
2092 /* each function will store private data inside priv_data and adjust priv_offset */
2093 ret = criu_checkpoint_process(p, (uint8_t __user *)args->priv_data, &priv_offset);
2094 if (ret)
2095 goto exit_unlock;
2096
2097 ret = criu_checkpoint_devices(p, num_devices, (uint8_t __user *)args->devices,
2098 (uint8_t __user *)args->priv_data, &priv_offset);
2099 if (ret)
2100 goto exit_unlock;
2101
2102 /* Leave room for BOs in the private data. They need to be restored
2103 * before events, but we checkpoint them last to simplify the error
2104 * handling.
2105 */
2106 bo_priv_offset = priv_offset;
2107 priv_offset += num_bos * sizeof(struct kfd_criu_bo_priv_data);
2108
2109 if (num_objects) {
2110 ret = kfd_criu_checkpoint_queues(p, (uint8_t __user *)args->priv_data,
2111 &priv_offset);
2112 if (ret)
2113 goto exit_unlock;
2114
2115 ret = kfd_criu_checkpoint_events(p, (uint8_t __user *)args->priv_data,
2116 &priv_offset);
2117 if (ret)
2118 goto exit_unlock;
2119
2120 ret = kfd_criu_checkpoint_svm(p, (uint8_t __user *)args->priv_data, &priv_offset);
2121 if (ret)
2122 goto exit_unlock;
2123 }
2124
2125 /* This must be the last thing in this function that can fail.
2126 * Otherwise we leak dmabuf file descriptors.
2127 */
2128 ret = criu_checkpoint_bos(p, num_bos, (uint8_t __user *)args->bos,
2129 (uint8_t __user *)args->priv_data, &bo_priv_offset);
2130
2131 exit_unlock:
2132 mutex_unlock(&p->mutex);
2133 if (ret)
2134 pr_err("Failed to dump CRIU ret:%d\n", ret);
2135 else
2136 pr_debug("CRIU dump ret:%d\n", ret);
2137
2138 return ret;
2139 }
2140
criu_restore_process(struct kfd_process * p,struct kfd_ioctl_criu_args * args,uint64_t * priv_offset,uint64_t max_priv_data_size)2141 static int criu_restore_process(struct kfd_process *p,
2142 struct kfd_ioctl_criu_args *args,
2143 uint64_t *priv_offset,
2144 uint64_t max_priv_data_size)
2145 {
2146 int ret = 0;
2147 struct kfd_criu_process_priv_data process_priv;
2148
2149 if (*priv_offset + sizeof(process_priv) > max_priv_data_size)
2150 return -EINVAL;
2151
2152 ret = copy_from_user(&process_priv,
2153 (void __user *)(args->priv_data + *priv_offset),
2154 sizeof(process_priv));
2155 if (ret) {
2156 pr_err("Failed to copy process private information from user\n");
2157 ret = -EFAULT;
2158 goto exit;
2159 }
2160 *priv_offset += sizeof(process_priv);
2161
2162 if (process_priv.version != KFD_CRIU_PRIV_VERSION) {
2163 pr_err("Invalid CRIU API version (checkpointed:%d current:%d)\n",
2164 process_priv.version, KFD_CRIU_PRIV_VERSION);
2165 return -EINVAL;
2166 }
2167
2168 pr_debug("Setting XNACK mode\n");
2169 if (process_priv.xnack_mode && !kfd_process_xnack_mode(p, true)) {
2170 pr_err("xnack mode cannot be set\n");
2171 ret = -EPERM;
2172 goto exit;
2173 } else {
2174 pr_debug("set xnack mode: %d\n", process_priv.xnack_mode);
2175 p->xnack_enabled = process_priv.xnack_mode;
2176 }
2177
2178 exit:
2179 return ret;
2180 }
2181
criu_restore_devices(struct kfd_process * p,struct kfd_ioctl_criu_args * args,uint64_t * priv_offset,uint64_t max_priv_data_size)2182 static int criu_restore_devices(struct kfd_process *p,
2183 struct kfd_ioctl_criu_args *args,
2184 uint64_t *priv_offset,
2185 uint64_t max_priv_data_size)
2186 {
2187 struct kfd_criu_device_bucket *device_buckets;
2188 struct kfd_criu_device_priv_data *device_privs;
2189 int ret = 0;
2190 uint32_t i;
2191
2192 if (args->num_devices != p->n_pdds)
2193 return -EINVAL;
2194
2195 if (*priv_offset + (args->num_devices * sizeof(*device_privs)) > max_priv_data_size)
2196 return -EINVAL;
2197
2198 device_buckets = kmalloc_array(args->num_devices, sizeof(*device_buckets), GFP_KERNEL);
2199 if (!device_buckets)
2200 return -ENOMEM;
2201
2202 ret = copy_from_user(device_buckets, (void __user *)args->devices,
2203 args->num_devices * sizeof(*device_buckets));
2204 if (ret) {
2205 pr_err("Failed to copy devices buckets from user\n");
2206 ret = -EFAULT;
2207 goto exit;
2208 }
2209
2210 for (i = 0; i < args->num_devices; i++) {
2211 struct kfd_node *dev;
2212 struct kfd_process_device *pdd;
2213 struct file *drm_file;
2214
2215 /* device private data is not currently used */
2216
2217 if (!device_buckets[i].user_gpu_id) {
2218 pr_err("Invalid user gpu_id\n");
2219 ret = -EINVAL;
2220 goto exit;
2221 }
2222
2223 dev = kfd_device_by_id(device_buckets[i].actual_gpu_id);
2224 if (!dev) {
2225 pr_err("Failed to find device with gpu_id = %x\n",
2226 device_buckets[i].actual_gpu_id);
2227 ret = -EINVAL;
2228 goto exit;
2229 }
2230
2231 pdd = kfd_get_process_device_data(dev, p);
2232 if (!pdd) {
2233 pr_err("Failed to get pdd for gpu_id = %x\n",
2234 device_buckets[i].actual_gpu_id);
2235 ret = -EINVAL;
2236 goto exit;
2237 }
2238 pdd->user_gpu_id = device_buckets[i].user_gpu_id;
2239
2240 drm_file = fget(device_buckets[i].drm_fd);
2241 if (!drm_file) {
2242 pr_err("Invalid render node file descriptor sent from plugin (%d)\n",
2243 device_buckets[i].drm_fd);
2244 ret = -EINVAL;
2245 goto exit;
2246 }
2247
2248 if (pdd->drm_file) {
2249 ret = -EINVAL;
2250 goto exit;
2251 }
2252
2253 /* create the vm using render nodes for kfd pdd */
2254 if (kfd_process_device_init_vm(pdd, drm_file)) {
2255 pr_err("could not init vm for given pdd\n");
2256 /* On success, the PDD keeps the drm_file reference */
2257 fput(drm_file);
2258 ret = -EINVAL;
2259 goto exit;
2260 }
2261 /*
2262 * pdd now already has the vm bound to render node so below api won't create a new
2263 * exclusive kfd mapping but use existing one with renderDXXX but is still needed
2264 * for iommu v2 binding and runtime pm.
2265 */
2266 pdd = kfd_bind_process_to_device(dev, p);
2267 if (IS_ERR(pdd)) {
2268 ret = PTR_ERR(pdd);
2269 goto exit;
2270 }
2271
2272 if (!pdd->qpd.proc_doorbells) {
2273 ret = kfd_alloc_process_doorbells(dev->kfd, pdd);
2274 if (ret)
2275 goto exit;
2276 }
2277 }
2278
2279 /*
2280 * We are not copying device private data from user as we are not using the data for now,
2281 * but we still adjust for its private data.
2282 */
2283 *priv_offset += args->num_devices * sizeof(*device_privs);
2284
2285 exit:
2286 kfree(device_buckets);
2287 return ret;
2288 }
2289
criu_restore_memory_of_gpu(struct kfd_process_device * pdd,struct kfd_criu_bo_bucket * bo_bucket,struct kfd_criu_bo_priv_data * bo_priv,struct kgd_mem ** kgd_mem)2290 static int criu_restore_memory_of_gpu(struct kfd_process_device *pdd,
2291 struct kfd_criu_bo_bucket *bo_bucket,
2292 struct kfd_criu_bo_priv_data *bo_priv,
2293 struct kgd_mem **kgd_mem)
2294 {
2295 int idr_handle;
2296 int ret;
2297 const bool criu_resume = true;
2298 u64 offset;
2299
2300 if (bo_bucket->alloc_flags & KFD_IOC_ALLOC_MEM_FLAGS_DOORBELL) {
2301 if (bo_bucket->size !=
2302 kfd_doorbell_process_slice(pdd->dev->kfd))
2303 return -EINVAL;
2304
2305 offset = kfd_get_process_doorbells(pdd);
2306 if (!offset)
2307 return -ENOMEM;
2308 } else if (bo_bucket->alloc_flags & KFD_IOC_ALLOC_MEM_FLAGS_MMIO_REMAP) {
2309 /* MMIO BOs need remapped bus address */
2310 if (bo_bucket->size != PAGE_SIZE) {
2311 pr_err("Invalid page size\n");
2312 return -EINVAL;
2313 }
2314 offset = pdd->dev->adev->rmmio_remap.bus_addr;
2315 if (!offset || (PAGE_SIZE > 4096)) {
2316 pr_err("amdgpu_amdkfd_get_mmio_remap_phys_addr failed\n");
2317 return -ENOMEM;
2318 }
2319 } else if (bo_bucket->alloc_flags & KFD_IOC_ALLOC_MEM_FLAGS_USERPTR) {
2320 offset = bo_priv->user_addr;
2321 }
2322 /* Create the BO */
2323 ret = amdgpu_amdkfd_gpuvm_alloc_memory_of_gpu(pdd->dev->adev, bo_bucket->addr,
2324 bo_bucket->size, pdd->drm_priv, kgd_mem,
2325 &offset, bo_bucket->alloc_flags, criu_resume);
2326 if (ret) {
2327 pr_err("Could not create the BO\n");
2328 return ret;
2329 }
2330 pr_debug("New BO created: size:0x%llx addr:0x%llx offset:0x%llx\n",
2331 bo_bucket->size, bo_bucket->addr, offset);
2332
2333 /* Restore previous IDR handle */
2334 pr_debug("Restoring old IDR handle for the BO");
2335 idr_handle = idr_alloc(&pdd->alloc_idr, *kgd_mem, bo_priv->idr_handle,
2336 bo_priv->idr_handle + 1, GFP_KERNEL);
2337
2338 if (idr_handle < 0) {
2339 pr_err("Could not allocate idr\n");
2340 amdgpu_amdkfd_gpuvm_free_memory_of_gpu(pdd->dev->adev, *kgd_mem, pdd->drm_priv,
2341 NULL);
2342 return -ENOMEM;
2343 }
2344
2345 if (bo_bucket->alloc_flags & KFD_IOC_ALLOC_MEM_FLAGS_DOORBELL)
2346 bo_bucket->restored_offset = KFD_MMAP_TYPE_DOORBELL | KFD_MMAP_GPU_ID(pdd->dev->id);
2347 if (bo_bucket->alloc_flags & KFD_IOC_ALLOC_MEM_FLAGS_MMIO_REMAP) {
2348 bo_bucket->restored_offset = KFD_MMAP_TYPE_MMIO | KFD_MMAP_GPU_ID(pdd->dev->id);
2349 } else if (bo_bucket->alloc_flags & KFD_IOC_ALLOC_MEM_FLAGS_GTT) {
2350 bo_bucket->restored_offset = offset;
2351 } else if (bo_bucket->alloc_flags & KFD_IOC_ALLOC_MEM_FLAGS_VRAM) {
2352 bo_bucket->restored_offset = offset;
2353 /* Update the VRAM usage count */
2354 WRITE_ONCE(pdd->vram_usage, pdd->vram_usage + bo_bucket->size);
2355 }
2356 return 0;
2357 }
2358
criu_restore_bo(struct kfd_process * p,struct kfd_criu_bo_bucket * bo_bucket,struct kfd_criu_bo_priv_data * bo_priv)2359 static int criu_restore_bo(struct kfd_process *p,
2360 struct kfd_criu_bo_bucket *bo_bucket,
2361 struct kfd_criu_bo_priv_data *bo_priv)
2362 {
2363 struct kfd_process_device *pdd;
2364 struct kgd_mem *kgd_mem;
2365 int ret;
2366 int j;
2367
2368 pr_debug("Restoring BO size:0x%llx addr:0x%llx gpu_id:0x%x flags:0x%x idr_handle:0x%x\n",
2369 bo_bucket->size, bo_bucket->addr, bo_bucket->gpu_id, bo_bucket->alloc_flags,
2370 bo_priv->idr_handle);
2371
2372 pdd = kfd_process_device_data_by_id(p, bo_bucket->gpu_id);
2373 if (!pdd) {
2374 pr_err("Failed to get pdd\n");
2375 return -ENODEV;
2376 }
2377
2378 ret = criu_restore_memory_of_gpu(pdd, bo_bucket, bo_priv, &kgd_mem);
2379 if (ret)
2380 return ret;
2381
2382 /* now map these BOs to GPU/s */
2383 for (j = 0; j < p->n_pdds; j++) {
2384 struct kfd_node *peer;
2385 struct kfd_process_device *peer_pdd;
2386
2387 if (!bo_priv->mapped_gpuids[j])
2388 break;
2389
2390 peer_pdd = kfd_process_device_data_by_id(p, bo_priv->mapped_gpuids[j]);
2391 if (!peer_pdd)
2392 return -EINVAL;
2393
2394 peer = peer_pdd->dev;
2395
2396 peer_pdd = kfd_bind_process_to_device(peer, p);
2397 if (IS_ERR(peer_pdd))
2398 return PTR_ERR(peer_pdd);
2399
2400 ret = amdgpu_amdkfd_gpuvm_map_memory_to_gpu(peer->adev, kgd_mem,
2401 peer_pdd->drm_priv);
2402 if (ret) {
2403 pr_err("Failed to map to gpu %d/%d\n", j, p->n_pdds);
2404 return ret;
2405 }
2406 }
2407
2408 pr_debug("map memory was successful for the BO\n");
2409 /* create the dmabuf object and export the bo */
2410 if (bo_bucket->alloc_flags
2411 & (KFD_IOC_ALLOC_MEM_FLAGS_VRAM | KFD_IOC_ALLOC_MEM_FLAGS_GTT)) {
2412 ret = criu_get_prime_handle(kgd_mem, DRM_RDWR,
2413 &bo_bucket->dmabuf_fd);
2414 if (ret)
2415 return ret;
2416 } else {
2417 bo_bucket->dmabuf_fd = KFD_INVALID_FD;
2418 }
2419
2420 return 0;
2421 }
2422
criu_restore_bos(struct kfd_process * p,struct kfd_ioctl_criu_args * args,uint64_t * priv_offset,uint64_t max_priv_data_size)2423 static int criu_restore_bos(struct kfd_process *p,
2424 struct kfd_ioctl_criu_args *args,
2425 uint64_t *priv_offset,
2426 uint64_t max_priv_data_size)
2427 {
2428 struct kfd_criu_bo_bucket *bo_buckets = NULL;
2429 struct kfd_criu_bo_priv_data *bo_privs = NULL;
2430 int ret = 0;
2431 uint32_t i = 0;
2432
2433 if (*priv_offset + (args->num_bos * sizeof(*bo_privs)) > max_priv_data_size)
2434 return -EINVAL;
2435
2436 /* Prevent MMU notifications until stage-4 IOCTL (CRIU_RESUME) is received */
2437 amdgpu_amdkfd_block_mmu_notifications(p->kgd_process_info);
2438
2439 bo_buckets = kvmalloc_array(args->num_bos, sizeof(*bo_buckets), GFP_KERNEL);
2440 if (!bo_buckets)
2441 return -ENOMEM;
2442
2443 ret = copy_from_user(bo_buckets, (void __user *)args->bos,
2444 args->num_bos * sizeof(*bo_buckets));
2445 if (ret) {
2446 pr_err("Failed to copy BOs information from user\n");
2447 ret = -EFAULT;
2448 goto exit;
2449 }
2450
2451 bo_privs = kvmalloc_array(args->num_bos, sizeof(*bo_privs), GFP_KERNEL);
2452 if (!bo_privs) {
2453 ret = -ENOMEM;
2454 goto exit;
2455 }
2456
2457 ret = copy_from_user(bo_privs, (void __user *)args->priv_data + *priv_offset,
2458 args->num_bos * sizeof(*bo_privs));
2459 if (ret) {
2460 pr_err("Failed to copy BOs information from user\n");
2461 ret = -EFAULT;
2462 goto exit;
2463 }
2464 *priv_offset += args->num_bos * sizeof(*bo_privs);
2465
2466 /* Create and map new BOs */
2467 for (; i < args->num_bos; i++) {
2468 ret = criu_restore_bo(p, &bo_buckets[i], &bo_privs[i]);
2469 if (ret) {
2470 pr_debug("Failed to restore BO[%d] ret%d\n", i, ret);
2471 goto exit;
2472 }
2473 } /* done */
2474
2475 /* Copy only the buckets back so user can read bo_buckets[N].restored_offset */
2476 ret = copy_to_user((void __user *)args->bos,
2477 bo_buckets,
2478 (args->num_bos * sizeof(*bo_buckets)));
2479 if (ret)
2480 ret = -EFAULT;
2481
2482 exit:
2483 while (ret && i--) {
2484 if (bo_buckets[i].alloc_flags
2485 & (KFD_IOC_ALLOC_MEM_FLAGS_VRAM | KFD_IOC_ALLOC_MEM_FLAGS_GTT))
2486 close_fd(bo_buckets[i].dmabuf_fd);
2487 }
2488 kvfree(bo_buckets);
2489 kvfree(bo_privs);
2490 return ret;
2491 }
2492
criu_restore_objects(struct file * filep,struct kfd_process * p,struct kfd_ioctl_criu_args * args,uint64_t * priv_offset,uint64_t max_priv_data_size)2493 static int criu_restore_objects(struct file *filep,
2494 struct kfd_process *p,
2495 struct kfd_ioctl_criu_args *args,
2496 uint64_t *priv_offset,
2497 uint64_t max_priv_data_size)
2498 {
2499 int ret = 0;
2500 uint32_t i;
2501
2502 BUILD_BUG_ON(offsetof(struct kfd_criu_queue_priv_data, object_type));
2503 BUILD_BUG_ON(offsetof(struct kfd_criu_event_priv_data, object_type));
2504 BUILD_BUG_ON(offsetof(struct kfd_criu_svm_range_priv_data, object_type));
2505
2506 for (i = 0; i < args->num_objects; i++) {
2507 uint32_t object_type;
2508
2509 if (*priv_offset + sizeof(object_type) > max_priv_data_size) {
2510 pr_err("Invalid private data size\n");
2511 return -EINVAL;
2512 }
2513
2514 ret = get_user(object_type, (uint32_t __user *)(args->priv_data + *priv_offset));
2515 if (ret) {
2516 pr_err("Failed to copy private information from user\n");
2517 goto exit;
2518 }
2519
2520 switch (object_type) {
2521 case KFD_CRIU_OBJECT_TYPE_QUEUE:
2522 ret = kfd_criu_restore_queue(p, (uint8_t __user *)args->priv_data,
2523 priv_offset, max_priv_data_size);
2524 if (ret)
2525 goto exit;
2526 break;
2527 case KFD_CRIU_OBJECT_TYPE_EVENT:
2528 ret = kfd_criu_restore_event(filep, p, (uint8_t __user *)args->priv_data,
2529 priv_offset, max_priv_data_size);
2530 if (ret)
2531 goto exit;
2532 break;
2533 case KFD_CRIU_OBJECT_TYPE_SVM_RANGE:
2534 ret = kfd_criu_restore_svm(p, (uint8_t __user *)args->priv_data,
2535 priv_offset, max_priv_data_size);
2536 if (ret)
2537 goto exit;
2538 break;
2539 default:
2540 pr_err("Invalid object type:%u at index:%d\n", object_type, i);
2541 ret = -EINVAL;
2542 goto exit;
2543 }
2544 }
2545 exit:
2546 return ret;
2547 }
2548
criu_restore(struct file * filep,struct kfd_process * p,struct kfd_ioctl_criu_args * args)2549 static int criu_restore(struct file *filep,
2550 struct kfd_process *p,
2551 struct kfd_ioctl_criu_args *args)
2552 {
2553 uint64_t priv_offset = 0;
2554 int ret = 0;
2555
2556 pr_debug("CRIU restore (num_devices:%u num_bos:%u num_objects:%u priv_data_size:%llu)\n",
2557 args->num_devices, args->num_bos, args->num_objects, args->priv_data_size);
2558
2559 if (!args->bos || !args->devices || !args->priv_data || !args->priv_data_size ||
2560 !args->num_devices || !args->num_bos)
2561 return -EINVAL;
2562
2563 mutex_lock(&p->mutex);
2564
2565 /*
2566 * Set the process to evicted state to avoid running any new queues before all the memory
2567 * mappings are ready.
2568 */
2569 ret = kfd_process_evict_queues(p, KFD_QUEUE_EVICTION_CRIU_RESTORE);
2570 if (ret)
2571 goto exit_unlock;
2572
2573 /* Each function will adjust priv_offset based on how many bytes they consumed */
2574 ret = criu_restore_process(p, args, &priv_offset, args->priv_data_size);
2575 if (ret)
2576 goto exit_unlock;
2577
2578 ret = criu_restore_devices(p, args, &priv_offset, args->priv_data_size);
2579 if (ret)
2580 goto exit_unlock;
2581
2582 ret = criu_restore_bos(p, args, &priv_offset, args->priv_data_size);
2583 if (ret)
2584 goto exit_unlock;
2585
2586 ret = criu_restore_objects(filep, p, args, &priv_offset, args->priv_data_size);
2587 if (ret)
2588 goto exit_unlock;
2589
2590 if (priv_offset != args->priv_data_size) {
2591 pr_err("Invalid private data size\n");
2592 ret = -EINVAL;
2593 }
2594
2595 exit_unlock:
2596 mutex_unlock(&p->mutex);
2597 if (ret)
2598 pr_err("Failed to restore CRIU ret:%d\n", ret);
2599 else
2600 pr_debug("CRIU restore successful\n");
2601
2602 return ret;
2603 }
2604
criu_unpause(struct file * filep,struct kfd_process * p,struct kfd_ioctl_criu_args * args)2605 static int criu_unpause(struct file *filep,
2606 struct kfd_process *p,
2607 struct kfd_ioctl_criu_args *args)
2608 {
2609 int ret;
2610
2611 mutex_lock(&p->mutex);
2612
2613 if (!p->queues_paused) {
2614 mutex_unlock(&p->mutex);
2615 return -EINVAL;
2616 }
2617
2618 ret = kfd_process_restore_queues(p);
2619 if (ret)
2620 pr_err("Failed to unpause queues ret:%d\n", ret);
2621 else
2622 p->queues_paused = false;
2623
2624 mutex_unlock(&p->mutex);
2625
2626 return ret;
2627 }
2628
criu_resume(struct file * filep,struct kfd_process * p,struct kfd_ioctl_criu_args * args)2629 static int criu_resume(struct file *filep,
2630 struct kfd_process *p,
2631 struct kfd_ioctl_criu_args *args)
2632 {
2633 struct kfd_process *target = NULL;
2634 struct pid *pid = NULL;
2635 int ret = 0;
2636
2637 pr_debug("Inside %s, target pid for criu restore: %d\n", __func__,
2638 args->pid);
2639
2640 pid = find_get_pid(args->pid);
2641 if (!pid) {
2642 pr_err("Cannot find pid info for %i\n", args->pid);
2643 return -ESRCH;
2644 }
2645
2646 pr_debug("calling kfd_lookup_process_by_pid\n");
2647 target = kfd_lookup_process_by_pid(pid);
2648
2649 put_pid(pid);
2650
2651 if (!target) {
2652 pr_debug("Cannot find process info for %i\n", args->pid);
2653 return -ESRCH;
2654 }
2655
2656 mutex_lock(&target->mutex);
2657 ret = kfd_criu_resume_svm(target);
2658 if (ret) {
2659 pr_err("kfd_criu_resume_svm failed for %i\n", args->pid);
2660 goto exit;
2661 }
2662
2663 ret = amdgpu_amdkfd_criu_resume(target->kgd_process_info);
2664 if (ret)
2665 pr_err("amdgpu_amdkfd_criu_resume failed for %i\n", args->pid);
2666
2667 exit:
2668 mutex_unlock(&target->mutex);
2669
2670 kfd_unref_process(target);
2671 return ret;
2672 }
2673
criu_process_info(struct file * filep,struct kfd_process * p,struct kfd_ioctl_criu_args * args)2674 static int criu_process_info(struct file *filep,
2675 struct kfd_process *p,
2676 struct kfd_ioctl_criu_args *args)
2677 {
2678 int ret = 0;
2679
2680 mutex_lock(&p->mutex);
2681
2682 if (!p->n_pdds) {
2683 pr_err("No pdd for given process\n");
2684 ret = -ENODEV;
2685 goto err_unlock;
2686 }
2687
2688 ret = kfd_process_evict_queues(p, KFD_QUEUE_EVICTION_CRIU_CHECKPOINT);
2689 if (ret)
2690 goto err_unlock;
2691
2692 p->queues_paused = true;
2693
2694 args->pid = task_pid_nr_ns(p->lead_thread,
2695 task_active_pid_ns(p->lead_thread));
2696
2697 ret = criu_get_process_object_info(p, &args->num_devices, &args->num_bos,
2698 &args->num_objects, &args->priv_data_size);
2699 if (ret)
2700 goto err_unlock;
2701
2702 dev_dbg(kfd_device, "Num of devices:%u bos:%u objects:%u priv_data_size:%lld\n",
2703 args->num_devices, args->num_bos, args->num_objects,
2704 args->priv_data_size);
2705
2706 err_unlock:
2707 if (ret) {
2708 kfd_process_restore_queues(p);
2709 p->queues_paused = false;
2710 }
2711 mutex_unlock(&p->mutex);
2712 return ret;
2713 }
2714
kfd_ioctl_criu(struct file * filep,struct kfd_process * p,void * data)2715 static int kfd_ioctl_criu(struct file *filep, struct kfd_process *p, void *data)
2716 {
2717 struct kfd_ioctl_criu_args *args = data;
2718 int ret;
2719
2720 dev_dbg(kfd_device, "CRIU operation: %d\n", args->op);
2721 switch (args->op) {
2722 case KFD_CRIU_OP_PROCESS_INFO:
2723 ret = criu_process_info(filep, p, args);
2724 break;
2725 case KFD_CRIU_OP_CHECKPOINT:
2726 ret = criu_checkpoint(filep, p, args);
2727 break;
2728 case KFD_CRIU_OP_UNPAUSE:
2729 ret = criu_unpause(filep, p, args);
2730 break;
2731 case KFD_CRIU_OP_RESTORE:
2732 ret = criu_restore(filep, p, args);
2733 break;
2734 case KFD_CRIU_OP_RESUME:
2735 ret = criu_resume(filep, p, args);
2736 break;
2737 default:
2738 dev_dbg(kfd_device, "Unsupported CRIU operation:%d\n", args->op);
2739 ret = -EINVAL;
2740 break;
2741 }
2742
2743 if (ret)
2744 dev_dbg(kfd_device, "CRIU operation:%d err:%d\n", args->op, ret);
2745
2746 return ret;
2747 }
2748
runtime_enable(struct kfd_process * p,uint64_t r_debug,bool enable_ttmp_setup)2749 static int runtime_enable(struct kfd_process *p, uint64_t r_debug,
2750 bool enable_ttmp_setup)
2751 {
2752 int i = 0, ret = 0;
2753
2754 if (p->is_runtime_retry)
2755 goto retry;
2756
2757 if (p->runtime_info.runtime_state != DEBUG_RUNTIME_STATE_DISABLED)
2758 return -EBUSY;
2759
2760 for (i = 0; i < p->n_pdds; i++) {
2761 struct kfd_process_device *pdd = p->pdds[i];
2762
2763 if (pdd->qpd.queue_count)
2764 return -EEXIST;
2765
2766 /*
2767 * Setup TTMPs by default.
2768 * Note that this call must remain here for MES ADD QUEUE to
2769 * skip_process_ctx_clear unconditionally as the first call to
2770 * SET_SHADER_DEBUGGER clears any stale process context data
2771 * saved in MES.
2772 */
2773 if (pdd->dev->kfd->shared_resources.enable_mes)
2774 kfd_dbg_set_mes_debug_mode(pdd, !kfd_dbg_has_cwsr_workaround(pdd->dev));
2775 }
2776
2777 p->runtime_info.runtime_state = DEBUG_RUNTIME_STATE_ENABLED;
2778 p->runtime_info.r_debug = r_debug;
2779 p->runtime_info.ttmp_setup = enable_ttmp_setup;
2780
2781 if (p->runtime_info.ttmp_setup) {
2782 for (i = 0; i < p->n_pdds; i++) {
2783 struct kfd_process_device *pdd = p->pdds[i];
2784
2785 if (!kfd_dbg_is_rlc_restore_supported(pdd->dev)) {
2786 amdgpu_gfx_off_ctrl(pdd->dev->adev, false);
2787 pdd->dev->kfd2kgd->enable_debug_trap(
2788 pdd->dev->adev,
2789 true,
2790 pdd->dev->vm_info.last_vmid_kfd);
2791 } else if (kfd_dbg_is_per_vmid_supported(pdd->dev)) {
2792 pdd->spi_dbg_override = pdd->dev->kfd2kgd->enable_debug_trap(
2793 pdd->dev->adev,
2794 false,
2795 0);
2796 }
2797 }
2798 }
2799
2800 retry:
2801 if (p->debug_trap_enabled) {
2802 if (!p->is_runtime_retry) {
2803 kfd_dbg_trap_activate(p);
2804 kfd_dbg_ev_raise(KFD_EC_MASK(EC_PROCESS_RUNTIME),
2805 p, NULL, 0, false, NULL, 0);
2806 }
2807
2808 mutex_unlock(&p->mutex);
2809 ret = down_interruptible(&p->runtime_enable_sema);
2810 mutex_lock(&p->mutex);
2811
2812 p->is_runtime_retry = !!ret;
2813 }
2814
2815 return ret;
2816 }
2817
runtime_disable(struct kfd_process * p)2818 static int runtime_disable(struct kfd_process *p)
2819 {
2820 int i = 0, ret;
2821 bool was_enabled = p->runtime_info.runtime_state == DEBUG_RUNTIME_STATE_ENABLED;
2822
2823 p->runtime_info.runtime_state = DEBUG_RUNTIME_STATE_DISABLED;
2824 p->runtime_info.r_debug = 0;
2825
2826 if (p->debug_trap_enabled) {
2827 if (was_enabled)
2828 kfd_dbg_trap_deactivate(p, false, 0);
2829
2830 if (!p->is_runtime_retry)
2831 kfd_dbg_ev_raise(KFD_EC_MASK(EC_PROCESS_RUNTIME),
2832 p, NULL, 0, false, NULL, 0);
2833
2834 mutex_unlock(&p->mutex);
2835 ret = down_interruptible(&p->runtime_enable_sema);
2836 mutex_lock(&p->mutex);
2837
2838 p->is_runtime_retry = !!ret;
2839 if (ret)
2840 return ret;
2841 }
2842
2843 if (was_enabled && p->runtime_info.ttmp_setup) {
2844 for (i = 0; i < p->n_pdds; i++) {
2845 struct kfd_process_device *pdd = p->pdds[i];
2846
2847 if (!kfd_dbg_is_rlc_restore_supported(pdd->dev))
2848 amdgpu_gfx_off_ctrl(pdd->dev->adev, true);
2849 }
2850 }
2851
2852 p->runtime_info.ttmp_setup = false;
2853
2854 /* disable ttmp setup */
2855 for (i = 0; i < p->n_pdds; i++) {
2856 struct kfd_process_device *pdd = p->pdds[i];
2857
2858 if (kfd_dbg_is_per_vmid_supported(pdd->dev)) {
2859 pdd->spi_dbg_override =
2860 pdd->dev->kfd2kgd->disable_debug_trap(
2861 pdd->dev->adev,
2862 false,
2863 pdd->dev->vm_info.last_vmid_kfd);
2864
2865 if (!pdd->dev->kfd->shared_resources.enable_mes)
2866 debug_refresh_runlist(pdd->dev->dqm);
2867 else
2868 kfd_dbg_set_mes_debug_mode(pdd,
2869 !kfd_dbg_has_cwsr_workaround(pdd->dev));
2870 }
2871 }
2872
2873 return 0;
2874 }
2875
kfd_ioctl_runtime_enable(struct file * filep,struct kfd_process * p,void * data)2876 static int kfd_ioctl_runtime_enable(struct file *filep, struct kfd_process *p, void *data)
2877 {
2878 struct kfd_ioctl_runtime_enable_args *args = data;
2879 int r;
2880
2881 mutex_lock(&p->mutex);
2882
2883 if (args->mode_mask & KFD_RUNTIME_ENABLE_MODE_ENABLE_MASK)
2884 r = runtime_enable(p, args->r_debug,
2885 !!(args->mode_mask & KFD_RUNTIME_ENABLE_MODE_TTMP_SAVE_MASK));
2886 else
2887 r = runtime_disable(p);
2888
2889 mutex_unlock(&p->mutex);
2890
2891 return r;
2892 }
2893
kfd_ioctl_set_debug_trap(struct file * filep,struct kfd_process * p,void * data)2894 static int kfd_ioctl_set_debug_trap(struct file *filep, struct kfd_process *p, void *data)
2895 {
2896 struct kfd_ioctl_dbg_trap_args *args = data;
2897 struct task_struct *thread = NULL;
2898 struct mm_struct *mm = NULL;
2899 struct pid *pid = NULL;
2900 struct kfd_process *target = NULL;
2901 struct kfd_process_device *pdd = NULL;
2902 int r = 0;
2903
2904 if (sched_policy == KFD_SCHED_POLICY_NO_HWS) {
2905 pr_err("Debugging does not support sched_policy %i", sched_policy);
2906 return -EINVAL;
2907 }
2908
2909 pid = find_get_pid(args->pid);
2910 if (!pid) {
2911 pr_debug("Cannot find pid info for %i\n", args->pid);
2912 r = -ESRCH;
2913 goto out;
2914 }
2915
2916 thread = get_pid_task(pid, PIDTYPE_PID);
2917 if (!thread) {
2918 r = -ESRCH;
2919 goto out;
2920 }
2921
2922 mm = get_task_mm(thread);
2923 if (!mm) {
2924 r = -ESRCH;
2925 goto out;
2926 }
2927
2928 if (args->op == KFD_IOC_DBG_TRAP_ENABLE) {
2929 bool create_process;
2930
2931 rcu_read_lock();
2932 create_process = thread && thread != current && ptrace_parent(thread) == current;
2933 rcu_read_unlock();
2934
2935 target = create_process ? kfd_create_process(thread) :
2936 kfd_lookup_process_by_pid(pid);
2937 } else {
2938 target = kfd_lookup_process_by_pid(pid);
2939 }
2940
2941 if (IS_ERR_OR_NULL(target)) {
2942 pr_debug("Cannot find process PID %i to debug\n", args->pid);
2943 r = target ? PTR_ERR(target) : -ESRCH;
2944 target = NULL;
2945 goto out;
2946 }
2947
2948 /* Check if target is still PTRACED. */
2949 rcu_read_lock();
2950 if (target != p && args->op != KFD_IOC_DBG_TRAP_DISABLE
2951 && ptrace_parent(target->lead_thread) != current) {
2952 pr_err("PID %i is not PTRACED and cannot be debugged\n", args->pid);
2953 r = -EPERM;
2954 }
2955 rcu_read_unlock();
2956
2957 if (r)
2958 goto out;
2959
2960 mutex_lock(&target->mutex);
2961
2962 if (args->op != KFD_IOC_DBG_TRAP_ENABLE && !target->debug_trap_enabled) {
2963 pr_err("PID %i not debug enabled for op %i\n", args->pid, args->op);
2964 r = -EINVAL;
2965 goto unlock_out;
2966 }
2967
2968 if (target->runtime_info.runtime_state != DEBUG_RUNTIME_STATE_ENABLED &&
2969 (args->op == KFD_IOC_DBG_TRAP_SET_WAVE_LAUNCH_OVERRIDE ||
2970 args->op == KFD_IOC_DBG_TRAP_SET_WAVE_LAUNCH_MODE ||
2971 args->op == KFD_IOC_DBG_TRAP_SUSPEND_QUEUES ||
2972 args->op == KFD_IOC_DBG_TRAP_RESUME_QUEUES ||
2973 args->op == KFD_IOC_DBG_TRAP_SET_NODE_ADDRESS_WATCH ||
2974 args->op == KFD_IOC_DBG_TRAP_CLEAR_NODE_ADDRESS_WATCH ||
2975 args->op == KFD_IOC_DBG_TRAP_SET_FLAGS)) {
2976 r = -EPERM;
2977 goto unlock_out;
2978 }
2979
2980 if (args->op == KFD_IOC_DBG_TRAP_SET_NODE_ADDRESS_WATCH ||
2981 args->op == KFD_IOC_DBG_TRAP_CLEAR_NODE_ADDRESS_WATCH) {
2982 int user_gpu_id = kfd_process_get_user_gpu_id(target,
2983 args->op == KFD_IOC_DBG_TRAP_SET_NODE_ADDRESS_WATCH ?
2984 args->set_node_address_watch.gpu_id :
2985 args->clear_node_address_watch.gpu_id);
2986
2987 pdd = kfd_process_device_data_by_id(target, user_gpu_id);
2988 if (user_gpu_id == -EINVAL || !pdd) {
2989 r = -ENODEV;
2990 goto unlock_out;
2991 }
2992 }
2993
2994 switch (args->op) {
2995 case KFD_IOC_DBG_TRAP_ENABLE:
2996 if (target != p)
2997 target->debugger_process = p;
2998
2999 r = kfd_dbg_trap_enable(target,
3000 args->enable.dbg_fd,
3001 (void __user *)args->enable.rinfo_ptr,
3002 &args->enable.rinfo_size);
3003 if (!r)
3004 target->exception_enable_mask = args->enable.exception_mask;
3005
3006 break;
3007 case KFD_IOC_DBG_TRAP_DISABLE:
3008 r = kfd_dbg_trap_disable(target);
3009 break;
3010 case KFD_IOC_DBG_TRAP_SEND_RUNTIME_EVENT:
3011 r = kfd_dbg_send_exception_to_runtime(target,
3012 args->send_runtime_event.gpu_id,
3013 args->send_runtime_event.queue_id,
3014 args->send_runtime_event.exception_mask);
3015 break;
3016 case KFD_IOC_DBG_TRAP_SET_EXCEPTIONS_ENABLED:
3017 kfd_dbg_set_enabled_debug_exception_mask(target,
3018 args->set_exceptions_enabled.exception_mask);
3019 break;
3020 case KFD_IOC_DBG_TRAP_SET_WAVE_LAUNCH_OVERRIDE:
3021 r = kfd_dbg_trap_set_wave_launch_override(target,
3022 args->launch_override.override_mode,
3023 args->launch_override.enable_mask,
3024 args->launch_override.support_request_mask,
3025 &args->launch_override.enable_mask,
3026 &args->launch_override.support_request_mask);
3027 break;
3028 case KFD_IOC_DBG_TRAP_SET_WAVE_LAUNCH_MODE:
3029 r = kfd_dbg_trap_set_wave_launch_mode(target,
3030 args->launch_mode.launch_mode);
3031 break;
3032 case KFD_IOC_DBG_TRAP_SUSPEND_QUEUES:
3033 r = suspend_queues(target,
3034 args->suspend_queues.num_queues,
3035 args->suspend_queues.grace_period,
3036 args->suspend_queues.exception_mask,
3037 (uint32_t *)args->suspend_queues.queue_array_ptr);
3038
3039 break;
3040 case KFD_IOC_DBG_TRAP_RESUME_QUEUES:
3041 r = resume_queues(target, args->resume_queues.num_queues,
3042 (uint32_t *)args->resume_queues.queue_array_ptr);
3043 break;
3044 case KFD_IOC_DBG_TRAP_SET_NODE_ADDRESS_WATCH:
3045 r = kfd_dbg_trap_set_dev_address_watch(pdd,
3046 args->set_node_address_watch.address,
3047 args->set_node_address_watch.mask,
3048 &args->set_node_address_watch.id,
3049 args->set_node_address_watch.mode);
3050 break;
3051 case KFD_IOC_DBG_TRAP_CLEAR_NODE_ADDRESS_WATCH:
3052 r = kfd_dbg_trap_clear_dev_address_watch(pdd,
3053 args->clear_node_address_watch.id);
3054 break;
3055 case KFD_IOC_DBG_TRAP_SET_FLAGS:
3056 r = kfd_dbg_trap_set_flags(target, &args->set_flags.flags);
3057 break;
3058 case KFD_IOC_DBG_TRAP_QUERY_DEBUG_EVENT:
3059 r = kfd_dbg_ev_query_debug_event(target,
3060 &args->query_debug_event.queue_id,
3061 &args->query_debug_event.gpu_id,
3062 args->query_debug_event.exception_mask,
3063 &args->query_debug_event.exception_mask);
3064 break;
3065 case KFD_IOC_DBG_TRAP_QUERY_EXCEPTION_INFO:
3066 r = kfd_dbg_trap_query_exception_info(target,
3067 args->query_exception_info.source_id,
3068 args->query_exception_info.exception_code,
3069 args->query_exception_info.clear_exception,
3070 (void __user *)args->query_exception_info.info_ptr,
3071 &args->query_exception_info.info_size);
3072 break;
3073 case KFD_IOC_DBG_TRAP_GET_QUEUE_SNAPSHOT:
3074 r = pqm_get_queue_snapshot(&target->pqm,
3075 args->queue_snapshot.exception_mask,
3076 (void __user *)args->queue_snapshot.snapshot_buf_ptr,
3077 &args->queue_snapshot.num_queues,
3078 &args->queue_snapshot.entry_size);
3079 break;
3080 case KFD_IOC_DBG_TRAP_GET_DEVICE_SNAPSHOT:
3081 r = kfd_dbg_trap_device_snapshot(target,
3082 args->device_snapshot.exception_mask,
3083 (void __user *)args->device_snapshot.snapshot_buf_ptr,
3084 &args->device_snapshot.num_devices,
3085 &args->device_snapshot.entry_size);
3086 break;
3087 default:
3088 pr_err("Invalid option: %i\n", args->op);
3089 r = -EINVAL;
3090 }
3091
3092 unlock_out:
3093 mutex_unlock(&target->mutex);
3094
3095 out:
3096 if (thread)
3097 put_task_struct(thread);
3098
3099 if (mm)
3100 mmput(mm);
3101
3102 if (pid)
3103 put_pid(pid);
3104
3105 if (target)
3106 kfd_unref_process(target);
3107
3108 return r;
3109 }
3110
3111 #define AMDKFD_IOCTL_DEF(ioctl, _func, _flags) \
3112 [_IOC_NR(ioctl)] = {.cmd = ioctl, .func = _func, .flags = _flags, \
3113 .cmd_drv = 0, .name = #ioctl}
3114
3115 /** Ioctl table */
3116 static const struct amdkfd_ioctl_desc amdkfd_ioctls[] = {
3117 AMDKFD_IOCTL_DEF(AMDKFD_IOC_GET_VERSION,
3118 kfd_ioctl_get_version, 0),
3119
3120 AMDKFD_IOCTL_DEF(AMDKFD_IOC_CREATE_QUEUE,
3121 kfd_ioctl_create_queue, 0),
3122
3123 AMDKFD_IOCTL_DEF(AMDKFD_IOC_DESTROY_QUEUE,
3124 kfd_ioctl_destroy_queue, 0),
3125
3126 AMDKFD_IOCTL_DEF(AMDKFD_IOC_SET_MEMORY_POLICY,
3127 kfd_ioctl_set_memory_policy, 0),
3128
3129 AMDKFD_IOCTL_DEF(AMDKFD_IOC_GET_CLOCK_COUNTERS,
3130 kfd_ioctl_get_clock_counters, 0),
3131
3132 AMDKFD_IOCTL_DEF(AMDKFD_IOC_GET_PROCESS_APERTURES,
3133 kfd_ioctl_get_process_apertures, 0),
3134
3135 AMDKFD_IOCTL_DEF(AMDKFD_IOC_UPDATE_QUEUE,
3136 kfd_ioctl_update_queue, 0),
3137
3138 AMDKFD_IOCTL_DEF(AMDKFD_IOC_CREATE_EVENT,
3139 kfd_ioctl_create_event, 0),
3140
3141 AMDKFD_IOCTL_DEF(AMDKFD_IOC_DESTROY_EVENT,
3142 kfd_ioctl_destroy_event, 0),
3143
3144 AMDKFD_IOCTL_DEF(AMDKFD_IOC_SET_EVENT,
3145 kfd_ioctl_set_event, 0),
3146
3147 AMDKFD_IOCTL_DEF(AMDKFD_IOC_RESET_EVENT,
3148 kfd_ioctl_reset_event, 0),
3149
3150 AMDKFD_IOCTL_DEF(AMDKFD_IOC_WAIT_EVENTS,
3151 kfd_ioctl_wait_events, 0),
3152
3153 AMDKFD_IOCTL_DEF(AMDKFD_IOC_DBG_REGISTER_DEPRECATED,
3154 kfd_ioctl_dbg_register, 0),
3155
3156 AMDKFD_IOCTL_DEF(AMDKFD_IOC_DBG_UNREGISTER_DEPRECATED,
3157 kfd_ioctl_dbg_unregister, 0),
3158
3159 AMDKFD_IOCTL_DEF(AMDKFD_IOC_DBG_ADDRESS_WATCH_DEPRECATED,
3160 kfd_ioctl_dbg_address_watch, 0),
3161
3162 AMDKFD_IOCTL_DEF(AMDKFD_IOC_DBG_WAVE_CONTROL_DEPRECATED,
3163 kfd_ioctl_dbg_wave_control, 0),
3164
3165 AMDKFD_IOCTL_DEF(AMDKFD_IOC_SET_SCRATCH_BACKING_VA,
3166 kfd_ioctl_set_scratch_backing_va, 0),
3167
3168 AMDKFD_IOCTL_DEF(AMDKFD_IOC_GET_TILE_CONFIG,
3169 kfd_ioctl_get_tile_config, 0),
3170
3171 AMDKFD_IOCTL_DEF(AMDKFD_IOC_SET_TRAP_HANDLER,
3172 kfd_ioctl_set_trap_handler, 0),
3173
3174 AMDKFD_IOCTL_DEF(AMDKFD_IOC_GET_PROCESS_APERTURES_NEW,
3175 kfd_ioctl_get_process_apertures_new, 0),
3176
3177 AMDKFD_IOCTL_DEF(AMDKFD_IOC_ACQUIRE_VM,
3178 kfd_ioctl_acquire_vm, 0),
3179
3180 AMDKFD_IOCTL_DEF(AMDKFD_IOC_ALLOC_MEMORY_OF_GPU,
3181 kfd_ioctl_alloc_memory_of_gpu, 0),
3182
3183 AMDKFD_IOCTL_DEF(AMDKFD_IOC_FREE_MEMORY_OF_GPU,
3184 kfd_ioctl_free_memory_of_gpu, 0),
3185
3186 AMDKFD_IOCTL_DEF(AMDKFD_IOC_MAP_MEMORY_TO_GPU,
3187 kfd_ioctl_map_memory_to_gpu, 0),
3188
3189 AMDKFD_IOCTL_DEF(AMDKFD_IOC_UNMAP_MEMORY_FROM_GPU,
3190 kfd_ioctl_unmap_memory_from_gpu, 0),
3191
3192 AMDKFD_IOCTL_DEF(AMDKFD_IOC_SET_CU_MASK,
3193 kfd_ioctl_set_cu_mask, 0),
3194
3195 AMDKFD_IOCTL_DEF(AMDKFD_IOC_GET_QUEUE_WAVE_STATE,
3196 kfd_ioctl_get_queue_wave_state, 0),
3197
3198 AMDKFD_IOCTL_DEF(AMDKFD_IOC_GET_DMABUF_INFO,
3199 kfd_ioctl_get_dmabuf_info, 0),
3200
3201 AMDKFD_IOCTL_DEF(AMDKFD_IOC_IMPORT_DMABUF,
3202 kfd_ioctl_import_dmabuf, 0),
3203
3204 AMDKFD_IOCTL_DEF(AMDKFD_IOC_ALLOC_QUEUE_GWS,
3205 kfd_ioctl_alloc_queue_gws, 0),
3206
3207 AMDKFD_IOCTL_DEF(AMDKFD_IOC_SMI_EVENTS,
3208 kfd_ioctl_smi_events, 0),
3209
3210 AMDKFD_IOCTL_DEF(AMDKFD_IOC_SVM, kfd_ioctl_svm, 0),
3211
3212 AMDKFD_IOCTL_DEF(AMDKFD_IOC_SET_XNACK_MODE,
3213 kfd_ioctl_set_xnack_mode, 0),
3214
3215 AMDKFD_IOCTL_DEF(AMDKFD_IOC_CRIU_OP,
3216 kfd_ioctl_criu, KFD_IOC_FLAG_CHECKPOINT_RESTORE),
3217
3218 AMDKFD_IOCTL_DEF(AMDKFD_IOC_AVAILABLE_MEMORY,
3219 kfd_ioctl_get_available_memory, 0),
3220
3221 AMDKFD_IOCTL_DEF(AMDKFD_IOC_EXPORT_DMABUF,
3222 kfd_ioctl_export_dmabuf, 0),
3223
3224 AMDKFD_IOCTL_DEF(AMDKFD_IOC_RUNTIME_ENABLE,
3225 kfd_ioctl_runtime_enable, 0),
3226
3227 AMDKFD_IOCTL_DEF(AMDKFD_IOC_DBG_TRAP,
3228 kfd_ioctl_set_debug_trap, 0),
3229 };
3230
3231 #define AMDKFD_CORE_IOCTL_COUNT ARRAY_SIZE(amdkfd_ioctls)
3232
kfd_ioctl(struct file * filep,unsigned int cmd,unsigned long arg)3233 static long kfd_ioctl(struct file *filep, unsigned int cmd, unsigned long arg)
3234 {
3235 struct kfd_process *process;
3236 amdkfd_ioctl_t *func;
3237 const struct amdkfd_ioctl_desc *ioctl = NULL;
3238 unsigned int nr = _IOC_NR(cmd);
3239 char stack_kdata[128];
3240 char *kdata = NULL;
3241 unsigned int usize, asize;
3242 int retcode = -EINVAL;
3243 bool ptrace_attached = false;
3244
3245 if (nr >= AMDKFD_CORE_IOCTL_COUNT)
3246 goto err_i1;
3247
3248 if ((nr >= AMDKFD_COMMAND_START) && (nr < AMDKFD_COMMAND_END)) {
3249 u32 amdkfd_size;
3250
3251 ioctl = &amdkfd_ioctls[nr];
3252
3253 amdkfd_size = _IOC_SIZE(ioctl->cmd);
3254 usize = asize = _IOC_SIZE(cmd);
3255 if (amdkfd_size > asize)
3256 asize = amdkfd_size;
3257
3258 cmd = ioctl->cmd;
3259 } else
3260 goto err_i1;
3261
3262 dev_dbg(kfd_device, "ioctl cmd 0x%x (#0x%x), arg 0x%lx\n", cmd, nr, arg);
3263
3264 /* Get the process struct from the filep. Only the process
3265 * that opened /dev/kfd can use the file descriptor. Child
3266 * processes need to create their own KFD device context.
3267 */
3268 process = filep->private_data;
3269
3270 rcu_read_lock();
3271 if ((ioctl->flags & KFD_IOC_FLAG_CHECKPOINT_RESTORE) &&
3272 ptrace_parent(process->lead_thread) == current)
3273 ptrace_attached = true;
3274 rcu_read_unlock();
3275
3276 if (process->lead_thread != current->group_leader
3277 && !ptrace_attached) {
3278 dev_dbg(kfd_device, "Using KFD FD in wrong process\n");
3279 retcode = -EBADF;
3280 goto err_i1;
3281 }
3282
3283 /* Do not trust userspace, use our own definition */
3284 func = ioctl->func;
3285
3286 if (unlikely(!func)) {
3287 dev_dbg(kfd_device, "no function\n");
3288 retcode = -EINVAL;
3289 goto err_i1;
3290 }
3291
3292 /*
3293 * Versions of docker shipped in Ubuntu 18.xx and 20.xx do not support
3294 * CAP_CHECKPOINT_RESTORE, so we also allow access if CAP_SYS_ADMIN as CAP_SYS_ADMIN is a
3295 * more priviledged access.
3296 */
3297 if (unlikely(ioctl->flags & KFD_IOC_FLAG_CHECKPOINT_RESTORE)) {
3298 if (!capable(CAP_CHECKPOINT_RESTORE) &&
3299 !capable(CAP_SYS_ADMIN)) {
3300 retcode = -EACCES;
3301 goto err_i1;
3302 }
3303 }
3304
3305 if (cmd & (IOC_IN | IOC_OUT)) {
3306 if (asize <= sizeof(stack_kdata)) {
3307 kdata = stack_kdata;
3308 } else {
3309 kdata = kmalloc(asize, GFP_KERNEL);
3310 if (!kdata) {
3311 retcode = -ENOMEM;
3312 goto err_i1;
3313 }
3314 }
3315 if (asize > usize)
3316 memset(kdata + usize, 0, asize - usize);
3317 }
3318
3319 if (cmd & IOC_IN) {
3320 if (copy_from_user(kdata, (void __user *)arg, usize) != 0) {
3321 retcode = -EFAULT;
3322 goto err_i1;
3323 }
3324 } else if (cmd & IOC_OUT) {
3325 memset(kdata, 0, usize);
3326 }
3327
3328 retcode = func(filep, process, kdata);
3329
3330 if (cmd & IOC_OUT)
3331 if (copy_to_user((void __user *)arg, kdata, usize) != 0)
3332 retcode = -EFAULT;
3333
3334 err_i1:
3335 if (!ioctl)
3336 dev_dbg(kfd_device, "invalid ioctl: pid=%d, cmd=0x%02x, nr=0x%02x\n",
3337 task_pid_nr(current), cmd, nr);
3338
3339 if (kdata != stack_kdata)
3340 kfree(kdata);
3341
3342 if (retcode)
3343 dev_dbg(kfd_device, "ioctl cmd (#0x%x), arg 0x%lx, ret = %d\n",
3344 nr, arg, retcode);
3345
3346 return retcode;
3347 }
3348
kfd_mmio_mmap(struct kfd_node * dev,struct kfd_process * process,struct vm_area_struct * vma)3349 static int kfd_mmio_mmap(struct kfd_node *dev, struct kfd_process *process,
3350 struct vm_area_struct *vma)
3351 {
3352 phys_addr_t address;
3353
3354 if (vma->vm_end - vma->vm_start != PAGE_SIZE)
3355 return -EINVAL;
3356
3357 if (PAGE_SIZE > 4096)
3358 return -EINVAL;
3359
3360 address = dev->adev->rmmio_remap.bus_addr;
3361
3362 vm_flags_set(vma, VM_IO | VM_DONTCOPY | VM_DONTEXPAND | VM_NORESERVE |
3363 VM_DONTDUMP | VM_PFNMAP);
3364
3365 vma->vm_page_prot = pgprot_noncached(vma->vm_page_prot);
3366
3367 pr_debug("pasid 0x%x mapping mmio page\n"
3368 " target user address == 0x%08llX\n"
3369 " physical address == 0x%08llX\n"
3370 " vm_flags == 0x%04lX\n"
3371 " size == 0x%04lX\n",
3372 process->pasid, (unsigned long long) vma->vm_start,
3373 address, vma->vm_flags, PAGE_SIZE);
3374
3375 return io_remap_pfn_range(vma,
3376 vma->vm_start,
3377 address >> PAGE_SHIFT,
3378 PAGE_SIZE,
3379 vma->vm_page_prot);
3380 }
3381
3382
kfd_mmap(struct file * filp,struct vm_area_struct * vma)3383 static int kfd_mmap(struct file *filp, struct vm_area_struct *vma)
3384 {
3385 struct kfd_process *process;
3386 struct kfd_node *dev = NULL;
3387 unsigned long mmap_offset;
3388 unsigned int gpu_id;
3389
3390 process = kfd_get_process(current);
3391 if (IS_ERR(process))
3392 return PTR_ERR(process);
3393
3394 mmap_offset = vma->vm_pgoff << PAGE_SHIFT;
3395 gpu_id = KFD_MMAP_GET_GPU_ID(mmap_offset);
3396 if (gpu_id)
3397 dev = kfd_device_by_id(gpu_id);
3398
3399 switch (mmap_offset & KFD_MMAP_TYPE_MASK) {
3400 case KFD_MMAP_TYPE_DOORBELL:
3401 if (!dev)
3402 return -ENODEV;
3403 return kfd_doorbell_mmap(dev, process, vma);
3404
3405 case KFD_MMAP_TYPE_EVENTS:
3406 return kfd_event_mmap(process, vma);
3407
3408 case KFD_MMAP_TYPE_RESERVED_MEM:
3409 if (!dev)
3410 return -ENODEV;
3411 return kfd_reserved_mem_mmap(dev, process, vma);
3412 case KFD_MMAP_TYPE_MMIO:
3413 if (!dev)
3414 return -ENODEV;
3415 return kfd_mmio_mmap(dev, process, vma);
3416 }
3417
3418 return -EFAULT;
3419 }
3420