1 /*	$NetBSD: radeon_gem.c,v 1.9 2021/12/18 23:45:43 riastradh Exp $	*/
2 
3 /*
4  * Copyright 2008 Advanced Micro Devices, Inc.
5  * Copyright 2008 Red Hat Inc.
6  * Copyright 2009 Jerome Glisse.
7  *
8  * Permission is hereby granted, free of charge, to any person obtaining a
9  * copy of this software and associated documentation files (the "Software"),
10  * to deal in the Software without restriction, including without limitation
11  * the rights to use, copy, modify, merge, publish, distribute, sublicense,
12  * and/or sell copies of the Software, and to permit persons to whom the
13  * Software is furnished to do so, subject to the following conditions:
14  *
15  * The above copyright notice and this permission notice shall be included in
16  * all copies or substantial portions of the Software.
17  *
18  * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
19  * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
20  * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
21  * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
22  * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
23  * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
24  * OTHER DEALINGS IN THE SOFTWARE.
25  *
26  * Authors: Dave Airlie
27  *          Alex Deucher
28  *          Jerome Glisse
29  */
30 
31 #include <sys/cdefs.h>
32 __KERNEL_RCSID(0, "$NetBSD: radeon_gem.c,v 1.9 2021/12/18 23:45:43 riastradh Exp $");
33 
34 #include <linux/pci.h>
35 
36 #include <drm/drm_debugfs.h>
37 #include <drm/drm_device.h>
38 #include <drm/drm_file.h>
39 #include <drm/radeon_drm.h>
40 
41 #include "radeon.h"
42 
43 #include <linux/nbsd-namespace.h>
44 
radeon_gem_object_free(struct drm_gem_object * gobj)45 void radeon_gem_object_free(struct drm_gem_object *gobj)
46 {
47 	struct radeon_bo *robj = gem_to_radeon_bo(gobj);
48 
49 	if (robj) {
50 		radeon_mn_unregister(robj);
51 		radeon_bo_unref(&robj);
52 	}
53 }
54 
radeon_gem_object_create(struct radeon_device * rdev,unsigned long size,int alignment,int initial_domain,u32 flags,bool kernel,struct drm_gem_object ** obj)55 int radeon_gem_object_create(struct radeon_device *rdev, unsigned long size,
56 				int alignment, int initial_domain,
57 				u32 flags, bool kernel,
58 				struct drm_gem_object **obj)
59 {
60 	struct radeon_bo *robj;
61 	unsigned long max_size;
62 	int r;
63 
64 	*obj = NULL;
65 	/* At least align on page size */
66 	if (alignment < PAGE_SIZE) {
67 		alignment = PAGE_SIZE;
68 	}
69 
70 	/* Maximum bo size is the unpinned gtt size since we use the gtt to
71 	 * handle vram to system pool migrations.
72 	 */
73 	max_size = rdev->mc.gtt_size - rdev->gart_pin_size;
74 	if (size > max_size) {
75 		DRM_DEBUG("Allocation size %ldMb bigger than %ldMb limit\n",
76 			  size >> 20, max_size >> 20);
77 		return -ENOMEM;
78 	}
79 
80 retry:
81 	r = radeon_bo_create(rdev, size, alignment, kernel, initial_domain,
82 			     flags, NULL, NULL, &robj);
83 	if (r) {
84 		if (r != -ERESTARTSYS) {
85 			if (initial_domain == RADEON_GEM_DOMAIN_VRAM) {
86 				initial_domain |= RADEON_GEM_DOMAIN_GTT;
87 				goto retry;
88 			}
89 			DRM_ERROR("Failed to allocate GEM object (%ld, %d, %u, %d)\n",
90 				  size, initial_domain, alignment, r);
91 		}
92 		return r;
93 	}
94 	*obj = &robj->tbo.base;
95 #ifndef __NetBSD__
96 	robj->pid = task_pid_nr(current);
97 #endif
98 
99 	mutex_lock(&rdev->gem.mutex);
100 	list_add_tail(&robj->list, &rdev->gem.objects);
101 	mutex_unlock(&rdev->gem.mutex);
102 
103 	return 0;
104 }
105 
radeon_gem_set_domain(struct drm_gem_object * gobj,uint32_t rdomain,uint32_t wdomain)106 static int radeon_gem_set_domain(struct drm_gem_object *gobj,
107 			  uint32_t rdomain, uint32_t wdomain)
108 {
109 	struct radeon_bo *robj;
110 	uint32_t domain;
111 	long r;
112 
113 	/* FIXME: reeimplement */
114 	robj = gem_to_radeon_bo(gobj);
115 	/* work out where to validate the buffer to */
116 	domain = wdomain;
117 	if (!domain) {
118 		domain = rdomain;
119 	}
120 	if (!domain) {
121 		/* Do nothings */
122 		pr_warn("Set domain without domain !\n");
123 		return 0;
124 	}
125 	if (domain == RADEON_GEM_DOMAIN_CPU) {
126 		/* Asking for cpu access wait for object idle */
127 		r = dma_resv_wait_timeout_rcu(robj->tbo.base.resv, true, true, 30 * HZ);
128 		if (!r)
129 			r = -EBUSY;
130 
131 		if (r < 0 && r != -EINTR) {
132 			pr_err("Failed to wait for object: %li\n", r);
133 			return r;
134 		}
135 	}
136 	if (domain == RADEON_GEM_DOMAIN_VRAM && robj->prime_shared_count) {
137 		/* A BO that is associated with a dma-buf cannot be sensibly migrated to VRAM */
138 		return -EINVAL;
139 	}
140 	return 0;
141 }
142 
radeon_gem_init(struct radeon_device * rdev)143 int radeon_gem_init(struct radeon_device *rdev)
144 {
145 	INIT_LIST_HEAD(&rdev->gem.objects);
146 	return 0;
147 }
148 
radeon_gem_fini(struct radeon_device * rdev)149 void radeon_gem_fini(struct radeon_device *rdev)
150 {
151 	radeon_bo_force_delete(rdev);
152 }
153 
154 /*
155  * Call from drm_gem_handle_create which appear in both new and open ioctl
156  * case.
157  */
radeon_gem_object_open(struct drm_gem_object * obj,struct drm_file * file_priv)158 int radeon_gem_object_open(struct drm_gem_object *obj, struct drm_file *file_priv)
159 {
160 	struct radeon_bo *rbo = gem_to_radeon_bo(obj);
161 	struct radeon_device *rdev = rbo->rdev;
162 	struct radeon_fpriv *fpriv = file_priv->driver_priv;
163 	struct radeon_vm *vm = &fpriv->vm;
164 	struct radeon_bo_va *bo_va;
165 	int r;
166 
167 	if ((rdev->family < CHIP_CAYMAN) ||
168 	    (!rdev->accel_working)) {
169 		return 0;
170 	}
171 
172 	r = radeon_bo_reserve(rbo, false);
173 	if (r) {
174 		return r;
175 	}
176 
177 	bo_va = radeon_vm_bo_find(vm, rbo);
178 	if (!bo_va) {
179 		bo_va = radeon_vm_bo_add(rdev, vm, rbo);
180 	} else {
181 		++bo_va->ref_count;
182 	}
183 	radeon_bo_unreserve(rbo);
184 
185 	return 0;
186 }
187 
radeon_gem_object_close(struct drm_gem_object * obj,struct drm_file * file_priv)188 void radeon_gem_object_close(struct drm_gem_object *obj,
189 			     struct drm_file *file_priv)
190 {
191 	struct radeon_bo *rbo = gem_to_radeon_bo(obj);
192 	struct radeon_device *rdev = rbo->rdev;
193 	struct radeon_fpriv *fpriv = file_priv->driver_priv;
194 	struct radeon_vm *vm = &fpriv->vm;
195 	struct radeon_bo_va *bo_va;
196 	int r;
197 
198 	if ((rdev->family < CHIP_CAYMAN) ||
199 	    (!rdev->accel_working)) {
200 		return;
201 	}
202 
203 	r = radeon_bo_reserve(rbo, true);
204 	if (r) {
205 		dev_err(rdev->dev, "leaking bo va because "
206 			"we fail to reserve bo (%d)\n", r);
207 		return;
208 	}
209 	bo_va = radeon_vm_bo_find(vm, rbo);
210 	if (bo_va) {
211 		if (--bo_va->ref_count == 0) {
212 			radeon_vm_bo_rmv(rdev, bo_va);
213 		}
214 	}
215 	radeon_bo_unreserve(rbo);
216 }
217 
radeon_gem_handle_lockup(struct radeon_device * rdev,int r)218 static int radeon_gem_handle_lockup(struct radeon_device *rdev, int r)
219 {
220 	if (r == -EDEADLK) {
221 		r = radeon_gpu_reset(rdev);
222 		if (!r)
223 			r = -EAGAIN;
224 	}
225 	return r;
226 }
227 
228 /*
229  * GEM ioctls.
230  */
radeon_gem_info_ioctl(struct drm_device * dev,void * data,struct drm_file * filp)231 int radeon_gem_info_ioctl(struct drm_device *dev, void *data,
232 			  struct drm_file *filp)
233 {
234 	struct radeon_device *rdev = dev->dev_private;
235 	struct drm_radeon_gem_info *args = data;
236 	struct ttm_mem_type_manager *man;
237 
238 	man = &rdev->mman.bdev.man[TTM_PL_VRAM];
239 
240 	args->vram_size = (u64)man->size << PAGE_SHIFT;
241 	args->vram_visible = rdev->mc.visible_vram_size;
242 	args->vram_visible -= rdev->vram_pin_size;
243 	args->gart_size = rdev->mc.gtt_size;
244 	args->gart_size -= rdev->gart_pin_size;
245 
246 	return 0;
247 }
248 
radeon_gem_pread_ioctl(struct drm_device * dev,void * data,struct drm_file * filp)249 int radeon_gem_pread_ioctl(struct drm_device *dev, void *data,
250 			   struct drm_file *filp)
251 {
252 	/* TODO: implement */
253 	DRM_ERROR("unimplemented %s\n", __func__);
254 	return -ENOSYS;
255 }
256 
radeon_gem_pwrite_ioctl(struct drm_device * dev,void * data,struct drm_file * filp)257 int radeon_gem_pwrite_ioctl(struct drm_device *dev, void *data,
258 			    struct drm_file *filp)
259 {
260 	/* TODO: implement */
261 	DRM_ERROR("unimplemented %s\n", __func__);
262 	return -ENOSYS;
263 }
264 
radeon_gem_create_ioctl(struct drm_device * dev,void * data,struct drm_file * filp)265 int radeon_gem_create_ioctl(struct drm_device *dev, void *data,
266 			    struct drm_file *filp)
267 {
268 	struct radeon_device *rdev = dev->dev_private;
269 	struct drm_radeon_gem_create *args = data;
270 	struct drm_gem_object *gobj;
271 	uint32_t handle;
272 	int r;
273 
274 	down_read(&rdev->exclusive_lock);
275 	/* create a gem object to contain this object in */
276 	args->size = roundup(args->size, PAGE_SIZE);
277 	r = radeon_gem_object_create(rdev, args->size, args->alignment,
278 				     args->initial_domain, args->flags,
279 				     false, &gobj);
280 	if (r) {
281 		up_read(&rdev->exclusive_lock);
282 		r = radeon_gem_handle_lockup(rdev, r);
283 		return r;
284 	}
285 	r = drm_gem_handle_create(filp, gobj, &handle);
286 	/* drop reference from allocate - handle holds it now */
287 	drm_gem_object_put_unlocked(gobj);
288 	if (r) {
289 		up_read(&rdev->exclusive_lock);
290 		r = radeon_gem_handle_lockup(rdev, r);
291 		return r;
292 	}
293 	args->handle = handle;
294 	up_read(&rdev->exclusive_lock);
295 	return 0;
296 }
297 
radeon_gem_userptr_ioctl(struct drm_device * dev,void * data,struct drm_file * filp)298 int radeon_gem_userptr_ioctl(struct drm_device *dev, void *data,
299 			     struct drm_file *filp)
300 {
301 	struct ttm_operation_ctx ctx = { true, false };
302 	struct radeon_device *rdev = dev->dev_private;
303 	struct drm_radeon_gem_userptr *args = data;
304 	struct drm_gem_object *gobj;
305 	struct radeon_bo *bo;
306 	uint32_t handle;
307 	int r;
308 
309 	args->addr = untagged_addr(args->addr);
310 
311 	if (offset_in_page(args->addr | args->size))
312 		return -EINVAL;
313 
314 	/* reject unknown flag values */
315 	if (args->flags & ~(RADEON_GEM_USERPTR_READONLY |
316 	    RADEON_GEM_USERPTR_ANONONLY | RADEON_GEM_USERPTR_VALIDATE |
317 	    RADEON_GEM_USERPTR_REGISTER))
318 		return -EINVAL;
319 
320 	if (args->flags & RADEON_GEM_USERPTR_READONLY) {
321 		/* readonly pages not tested on older hardware */
322 		if (rdev->family < CHIP_R600)
323 			return -EINVAL;
324 
325 	} else if (!(args->flags & RADEON_GEM_USERPTR_ANONONLY) ||
326 		   !(args->flags & RADEON_GEM_USERPTR_REGISTER)) {
327 
328 		/* if we want to write to it we must require anonymous
329 		   memory and install a MMU notifier */
330 		return -EACCES;
331 	}
332 
333 	down_read(&rdev->exclusive_lock);
334 
335 	/* create a gem object to contain this object in */
336 	r = radeon_gem_object_create(rdev, args->size, 0,
337 				     RADEON_GEM_DOMAIN_CPU, 0,
338 				     false, &gobj);
339 	if (r)
340 		goto handle_lockup;
341 
342 	bo = gem_to_radeon_bo(gobj);
343 	r = radeon_ttm_tt_set_userptr(bo->tbo.ttm, args->addr, args->flags);
344 	if (r)
345 		goto release_object;
346 
347 	if (args->flags & RADEON_GEM_USERPTR_REGISTER) {
348 		r = radeon_mn_register(bo, args->addr);
349 		if (r)
350 			goto release_object;
351 	}
352 
353 	if (args->flags & RADEON_GEM_USERPTR_VALIDATE) {
354 #ifdef __NetBSD__
355 		vm_map_lock_read(&curproc->p_vmspace->vm_map);
356 #else
357 		down_read(&current->mm->mmap_sem);
358 #endif
359 		r = radeon_bo_reserve(bo, true);
360 		if (r) {
361 #ifdef __NetBSD__
362 			vm_map_unlock_read(&curproc->p_vmspace->vm_map);
363 #else
364 			up_read(&current->mm->mmap_sem);
365 #endif
366 			goto release_object;
367 		}
368 
369 		radeon_ttm_placement_from_domain(bo, RADEON_GEM_DOMAIN_GTT);
370 		r = ttm_bo_validate(&bo->tbo, &bo->placement, &ctx);
371 		radeon_bo_unreserve(bo);
372 #ifdef __NetBSD__
373 		vm_map_unlock_read(&curproc->p_vmspace->vm_map);
374 #else
375 		up_read(&current->mm->mmap_sem);
376 #endif
377 		if (r)
378 			goto release_object;
379 	}
380 
381 	r = drm_gem_handle_create(filp, gobj, &handle);
382 	/* drop reference from allocate - handle holds it now */
383 	drm_gem_object_put_unlocked(gobj);
384 	if (r)
385 		goto handle_lockup;
386 
387 	args->handle = handle;
388 	up_read(&rdev->exclusive_lock);
389 	return 0;
390 
391 release_object:
392 	drm_gem_object_put_unlocked(gobj);
393 
394 handle_lockup:
395 	up_read(&rdev->exclusive_lock);
396 	r = radeon_gem_handle_lockup(rdev, r);
397 
398 	return r;
399 }
400 
radeon_gem_set_domain_ioctl(struct drm_device * dev,void * data,struct drm_file * filp)401 int radeon_gem_set_domain_ioctl(struct drm_device *dev, void *data,
402 				struct drm_file *filp)
403 {
404 	/* transition the BO to a domain -
405 	 * just validate the BO into a certain domain */
406 	struct radeon_device *rdev = dev->dev_private;
407 	struct drm_radeon_gem_set_domain *args = data;
408 	struct drm_gem_object *gobj;
409 	struct radeon_bo *robj;
410 	int r;
411 
412 	/* for now if someone requests domain CPU -
413 	 * just make sure the buffer is finished with */
414 	down_read(&rdev->exclusive_lock);
415 
416 	/* just do a BO wait for now */
417 	gobj = drm_gem_object_lookup(filp, args->handle);
418 	if (gobj == NULL) {
419 		up_read(&rdev->exclusive_lock);
420 		return -ENOENT;
421 	}
422 	robj = gem_to_radeon_bo(gobj);
423 
424 	r = radeon_gem_set_domain(gobj, args->read_domains, args->write_domain);
425 
426 	drm_gem_object_put_unlocked(gobj);
427 	up_read(&rdev->exclusive_lock);
428 	r = radeon_gem_handle_lockup(robj->rdev, r);
429 	return r;
430 }
431 
radeon_mode_dumb_mmap(struct drm_file * filp,struct drm_device * dev,uint32_t handle,uint64_t * offset_p)432 int radeon_mode_dumb_mmap(struct drm_file *filp,
433 			  struct drm_device *dev,
434 			  uint32_t handle, uint64_t *offset_p)
435 {
436 	struct drm_gem_object *gobj;
437 	struct radeon_bo *robj;
438 
439 	gobj = drm_gem_object_lookup(filp, handle);
440 	if (gobj == NULL) {
441 		return -ENOENT;
442 	}
443 	robj = gem_to_radeon_bo(gobj);
444 	if (radeon_ttm_tt_has_userptr(robj->tbo.ttm)) {
445 		drm_gem_object_put_unlocked(gobj);
446 		return -EPERM;
447 	}
448 	*offset_p = radeon_bo_mmap_offset(robj);
449 	drm_gem_object_put_unlocked(gobj);
450 	return 0;
451 }
452 
radeon_gem_mmap_ioctl(struct drm_device * dev,void * data,struct drm_file * filp)453 int radeon_gem_mmap_ioctl(struct drm_device *dev, void *data,
454 			  struct drm_file *filp)
455 {
456 	struct drm_radeon_gem_mmap *args = data;
457 
458 	return radeon_mode_dumb_mmap(filp, dev, args->handle, &args->addr_ptr);
459 }
460 
radeon_gem_busy_ioctl(struct drm_device * dev,void * data,struct drm_file * filp)461 int radeon_gem_busy_ioctl(struct drm_device *dev, void *data,
462 			  struct drm_file *filp)
463 {
464 	struct drm_radeon_gem_busy *args = data;
465 	struct drm_gem_object *gobj;
466 	struct radeon_bo *robj;
467 	int r;
468 	uint32_t cur_placement = 0;
469 
470 	gobj = drm_gem_object_lookup(filp, args->handle);
471 	if (gobj == NULL) {
472 		return -ENOENT;
473 	}
474 	robj = gem_to_radeon_bo(gobj);
475 
476 	r = dma_resv_test_signaled_rcu(robj->tbo.base.resv, true);
477 	if (r == 0)
478 		r = -EBUSY;
479 	else
480 		r = 0;
481 
482 	cur_placement = READ_ONCE(robj->tbo.mem.mem_type);
483 	args->domain = radeon_mem_type_to_domain(cur_placement);
484 	drm_gem_object_put_unlocked(gobj);
485 	return r;
486 }
487 
radeon_gem_wait_idle_ioctl(struct drm_device * dev,void * data,struct drm_file * filp)488 int radeon_gem_wait_idle_ioctl(struct drm_device *dev, void *data,
489 			      struct drm_file *filp)
490 {
491 	struct radeon_device *rdev = dev->dev_private;
492 	struct drm_radeon_gem_wait_idle *args = data;
493 	struct drm_gem_object *gobj;
494 	struct radeon_bo *robj;
495 	int r = 0;
496 	uint32_t cur_placement = 0;
497 	long ret;
498 
499 	gobj = drm_gem_object_lookup(filp, args->handle);
500 	if (gobj == NULL) {
501 		return -ENOENT;
502 	}
503 	robj = gem_to_radeon_bo(gobj);
504 
505 	ret = dma_resv_wait_timeout_rcu(robj->tbo.base.resv, true, true, 30 * HZ);
506 	if (ret == 0)
507 		r = -EBUSY;
508 	else if (ret < 0)
509 		r = ret;
510 
511 	/* Flush HDP cache via MMIO if necessary */
512 	cur_placement = READ_ONCE(robj->tbo.mem.mem_type);
513 	if (rdev->asic->mmio_hdp_flush &&
514 	    radeon_mem_type_to_domain(cur_placement) == RADEON_GEM_DOMAIN_VRAM)
515 		robj->rdev->asic->mmio_hdp_flush(rdev);
516 	drm_gem_object_put_unlocked(gobj);
517 	r = radeon_gem_handle_lockup(rdev, r);
518 	return r;
519 }
520 
radeon_gem_set_tiling_ioctl(struct drm_device * dev,void * data,struct drm_file * filp)521 int radeon_gem_set_tiling_ioctl(struct drm_device *dev, void *data,
522 				struct drm_file *filp)
523 {
524 	struct drm_radeon_gem_set_tiling *args = data;
525 	struct drm_gem_object *gobj;
526 	struct radeon_bo *robj;
527 	int r = 0;
528 
529 	DRM_DEBUG("%d \n", args->handle);
530 	gobj = drm_gem_object_lookup(filp, args->handle);
531 	if (gobj == NULL)
532 		return -ENOENT;
533 	robj = gem_to_radeon_bo(gobj);
534 	r = radeon_bo_set_tiling_flags(robj, args->tiling_flags, args->pitch);
535 	drm_gem_object_put_unlocked(gobj);
536 	return r;
537 }
538 
radeon_gem_get_tiling_ioctl(struct drm_device * dev,void * data,struct drm_file * filp)539 int radeon_gem_get_tiling_ioctl(struct drm_device *dev, void *data,
540 				struct drm_file *filp)
541 {
542 	struct drm_radeon_gem_get_tiling *args = data;
543 	struct drm_gem_object *gobj;
544 	struct radeon_bo *rbo;
545 	int r = 0;
546 
547 	DRM_DEBUG("\n");
548 	gobj = drm_gem_object_lookup(filp, args->handle);
549 	if (gobj == NULL)
550 		return -ENOENT;
551 	rbo = gem_to_radeon_bo(gobj);
552 	r = radeon_bo_reserve(rbo, false);
553 	if (unlikely(r != 0))
554 		goto out;
555 	radeon_bo_get_tiling_flags(rbo, &args->tiling_flags, &args->pitch);
556 	radeon_bo_unreserve(rbo);
557 out:
558 	drm_gem_object_put_unlocked(gobj);
559 	return r;
560 }
561 
562 /**
563  * radeon_gem_va_update_vm -update the bo_va in its VM
564  *
565  * @rdev: radeon_device pointer
566  * @bo_va: bo_va to update
567  *
568  * Update the bo_va directly after setting it's address. Errors are not
569  * vital here, so they are not reported back to userspace.
570  */
radeon_gem_va_update_vm(struct radeon_device * rdev,struct radeon_bo_va * bo_va)571 static void radeon_gem_va_update_vm(struct radeon_device *rdev,
572 				    struct radeon_bo_va *bo_va)
573 {
574 	struct ttm_validate_buffer tv, *entry;
575 	struct radeon_bo_list *vm_bos;
576 	struct ww_acquire_ctx ticket;
577 	struct list_head list;
578 	unsigned domain;
579 	int r;
580 
581 	INIT_LIST_HEAD(&list);
582 
583 	tv.bo = &bo_va->bo->tbo;
584 	tv.num_shared = 1;
585 	list_add(&tv.head, &list);
586 
587 	vm_bos = radeon_vm_get_bos(rdev, bo_va->vm, &list);
588 	if (!vm_bos)
589 		return;
590 
591 	r = ttm_eu_reserve_buffers(&ticket, &list, true, NULL);
592 	if (r)
593 		goto error_free;
594 
595 	list_for_each_entry(entry, &list, head) {
596 		domain = radeon_mem_type_to_domain(entry->bo->mem.mem_type);
597 		/* if anything is swapped out don't swap it in here,
598 		   just abort and wait for the next CS */
599 		if (domain == RADEON_GEM_DOMAIN_CPU)
600 			goto error_unreserve;
601 	}
602 
603 	mutex_lock(&bo_va->vm->mutex);
604 	r = radeon_vm_clear_freed(rdev, bo_va->vm);
605 	if (r)
606 		goto error_unlock;
607 
608 	if (bo_va->it.start)
609 		r = radeon_vm_bo_update(rdev, bo_va, &bo_va->bo->tbo.mem);
610 
611 error_unlock:
612 	mutex_unlock(&bo_va->vm->mutex);
613 
614 error_unreserve:
615 	ttm_eu_backoff_reservation(&ticket, &list);
616 
617 error_free:
618 	kvfree(vm_bos);
619 
620 	if (r && r != -ERESTARTSYS)
621 		DRM_ERROR("Couldn't update BO_VA (%d)\n", r);
622 }
623 
radeon_gem_va_ioctl(struct drm_device * dev,void * data,struct drm_file * filp)624 int radeon_gem_va_ioctl(struct drm_device *dev, void *data,
625 			  struct drm_file *filp)
626 {
627 	struct drm_radeon_gem_va *args = data;
628 	struct drm_gem_object *gobj;
629 	struct radeon_device *rdev = dev->dev_private;
630 	struct radeon_fpriv *fpriv = filp->driver_priv;
631 	struct radeon_bo *rbo;
632 	struct radeon_bo_va *bo_va;
633 	u32 invalid_flags;
634 	int r = 0;
635 
636 	if (!rdev->vm_manager.enabled) {
637 		args->operation = RADEON_VA_RESULT_ERROR;
638 		return -ENOTTY;
639 	}
640 
641 	/* !! DONT REMOVE !!
642 	 * We don't support vm_id yet, to be sure we don't have have broken
643 	 * userspace, reject anyone trying to use non 0 value thus moving
644 	 * forward we can use those fields without breaking existant userspace
645 	 */
646 	if (args->vm_id) {
647 		args->operation = RADEON_VA_RESULT_ERROR;
648 		return -EINVAL;
649 	}
650 
651 	if (args->offset < RADEON_VA_RESERVED_SIZE) {
652 		dev_err(dev->dev,
653 			"offset 0x%lX is in reserved area 0x%X\n",
654 			(unsigned long)args->offset,
655 			RADEON_VA_RESERVED_SIZE);
656 		args->operation = RADEON_VA_RESULT_ERROR;
657 		return -EINVAL;
658 	}
659 
660 	/* don't remove, we need to enforce userspace to set the snooped flag
661 	 * otherwise we will endup with broken userspace and we won't be able
662 	 * to enable this feature without adding new interface
663 	 */
664 	invalid_flags = RADEON_VM_PAGE_VALID | RADEON_VM_PAGE_SYSTEM;
665 	if ((args->flags & invalid_flags)) {
666 		dev_err(dev->dev, "invalid flags 0x%08X vs 0x%08X\n",
667 			args->flags, invalid_flags);
668 		args->operation = RADEON_VA_RESULT_ERROR;
669 		return -EINVAL;
670 	}
671 
672 	switch (args->operation) {
673 	case RADEON_VA_MAP:
674 	case RADEON_VA_UNMAP:
675 		break;
676 	default:
677 		dev_err(dev->dev, "unsupported operation %d\n",
678 			args->operation);
679 		args->operation = RADEON_VA_RESULT_ERROR;
680 		return -EINVAL;
681 	}
682 
683 	gobj = drm_gem_object_lookup(filp, args->handle);
684 	if (gobj == NULL) {
685 		args->operation = RADEON_VA_RESULT_ERROR;
686 		return -ENOENT;
687 	}
688 	rbo = gem_to_radeon_bo(gobj);
689 	r = radeon_bo_reserve(rbo, false);
690 	if (r) {
691 		args->operation = RADEON_VA_RESULT_ERROR;
692 		drm_gem_object_put_unlocked(gobj);
693 		return r;
694 	}
695 	bo_va = radeon_vm_bo_find(&fpriv->vm, rbo);
696 	if (!bo_va) {
697 		args->operation = RADEON_VA_RESULT_ERROR;
698 		radeon_bo_unreserve(rbo);
699 		drm_gem_object_put_unlocked(gobj);
700 		return -ENOENT;
701 	}
702 
703 	switch (args->operation) {
704 	case RADEON_VA_MAP:
705 		if (bo_va->it.start) {
706 			args->operation = RADEON_VA_RESULT_VA_EXIST;
707 			args->offset = bo_va->it.start * RADEON_GPU_PAGE_SIZE;
708 			radeon_bo_unreserve(rbo);
709 			goto out;
710 		}
711 		r = radeon_vm_bo_set_addr(rdev, bo_va, args->offset, args->flags);
712 		break;
713 	case RADEON_VA_UNMAP:
714 		r = radeon_vm_bo_set_addr(rdev, bo_va, 0, 0);
715 		break;
716 	default:
717 		break;
718 	}
719 	if (!r)
720 		radeon_gem_va_update_vm(rdev, bo_va);
721 	args->operation = RADEON_VA_RESULT_OK;
722 	if (r) {
723 		args->operation = RADEON_VA_RESULT_ERROR;
724 	}
725 out:
726 	drm_gem_object_put_unlocked(gobj);
727 	return r;
728 }
729 
radeon_gem_op_ioctl(struct drm_device * dev,void * data,struct drm_file * filp)730 int radeon_gem_op_ioctl(struct drm_device *dev, void *data,
731 			struct drm_file *filp)
732 {
733 	struct drm_radeon_gem_op *args = data;
734 	struct drm_gem_object *gobj;
735 	struct radeon_bo *robj;
736 	int r;
737 
738 	gobj = drm_gem_object_lookup(filp, args->handle);
739 	if (gobj == NULL) {
740 		return -ENOENT;
741 	}
742 	robj = gem_to_radeon_bo(gobj);
743 
744 	r = -EPERM;
745 	if (radeon_ttm_tt_has_userptr(robj->tbo.ttm))
746 		goto out;
747 
748 	r = radeon_bo_reserve(robj, false);
749 	if (unlikely(r))
750 		goto out;
751 
752 	switch (args->op) {
753 	case RADEON_GEM_OP_GET_INITIAL_DOMAIN:
754 		args->value = robj->initial_domain;
755 		break;
756 	case RADEON_GEM_OP_SET_INITIAL_DOMAIN:
757 		robj->initial_domain = args->value & (RADEON_GEM_DOMAIN_VRAM |
758 						      RADEON_GEM_DOMAIN_GTT |
759 						      RADEON_GEM_DOMAIN_CPU);
760 		break;
761 	default:
762 		r = -EINVAL;
763 	}
764 
765 	radeon_bo_unreserve(robj);
766 out:
767 	drm_gem_object_put_unlocked(gobj);
768 	return r;
769 }
770 
radeon_mode_dumb_create(struct drm_file * file_priv,struct drm_device * dev,struct drm_mode_create_dumb * args)771 int radeon_mode_dumb_create(struct drm_file *file_priv,
772 			    struct drm_device *dev,
773 			    struct drm_mode_create_dumb *args)
774 {
775 	struct radeon_device *rdev = dev->dev_private;
776 	struct drm_gem_object *gobj;
777 	uint32_t handle;
778 	int r;
779 
780 	args->pitch = radeon_align_pitch(rdev, args->width,
781 					 DIV_ROUND_UP(args->bpp, 8), 0);
782 	args->size = args->pitch * args->height;
783 	args->size = ALIGN(args->size, PAGE_SIZE);
784 
785 	r = radeon_gem_object_create(rdev, args->size, 0,
786 				     RADEON_GEM_DOMAIN_VRAM, 0,
787 				     false, &gobj);
788 	if (r)
789 		return -ENOMEM;
790 
791 	r = drm_gem_handle_create(file_priv, gobj, &handle);
792 	/* drop reference from allocate - handle holds it now */
793 	drm_gem_object_put_unlocked(gobj);
794 	if (r) {
795 		return r;
796 	}
797 	args->handle = handle;
798 	return 0;
799 }
800 
801 #if defined(CONFIG_DEBUG_FS)
radeon_debugfs_gem_info(struct seq_file * m,void * data)802 static int radeon_debugfs_gem_info(struct seq_file *m, void *data)
803 {
804 	struct drm_info_node *node = (struct drm_info_node *)m->private;
805 	struct drm_device *dev = node->minor->dev;
806 	struct radeon_device *rdev = dev->dev_private;
807 	struct radeon_bo *rbo;
808 	unsigned i = 0;
809 
810 	mutex_lock(&rdev->gem.mutex);
811 	list_for_each_entry(rbo, &rdev->gem.objects, list) {
812 		unsigned domain;
813 		const char *placement;
814 
815 		domain = radeon_mem_type_to_domain(rbo->tbo.mem.mem_type);
816 		switch (domain) {
817 		case RADEON_GEM_DOMAIN_VRAM:
818 			placement = "VRAM";
819 			break;
820 		case RADEON_GEM_DOMAIN_GTT:
821 			placement = " GTT";
822 			break;
823 		case RADEON_GEM_DOMAIN_CPU:
824 		default:
825 			placement = " CPU";
826 			break;
827 		}
828 		seq_printf(m, "bo[0x%08x] %8ldkB %8ldMB %s pid %8ld\n",
829 			   i, radeon_bo_size(rbo) >> 10, radeon_bo_size(rbo) >> 20,
830 			   placement, (unsigned long)rbo->pid);
831 		i++;
832 	}
833 	mutex_unlock(&rdev->gem.mutex);
834 	return 0;
835 }
836 
837 static struct drm_info_list radeon_debugfs_gem_list[] = {
838 	{"radeon_gem_info", &radeon_debugfs_gem_info, 0, NULL},
839 };
840 #endif
841 
radeon_gem_debugfs_init(struct radeon_device * rdev)842 int radeon_gem_debugfs_init(struct radeon_device *rdev)
843 {
844 #if defined(CONFIG_DEBUG_FS)
845 	return radeon_debugfs_add_files(rdev, radeon_debugfs_gem_list, 1);
846 #endif
847 	return 0;
848 }
849