xref: /openbsd/sys/dev/pci/drm/radeon/radeon_object.c (revision a6445c1d)
1 /*	$OpenBSD: radeon_object.c,v 1.4 2014/07/06 08:16:36 jsg Exp $	*/
2 /*
3  * Copyright 2009 Jerome Glisse.
4  * All Rights Reserved.
5  *
6  * Permission is hereby granted, free of charge, to any person obtaining a
7  * copy of this software and associated documentation files (the
8  * "Software"), to deal in the Software without restriction, including
9  * without limitation the rights to use, copy, modify, merge, publish,
10  * distribute, sub license, and/or sell copies of the Software, and to
11  * permit persons to whom the Software is furnished to do so, subject to
12  * the following conditions:
13  *
14  * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
15  * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
16  * FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT. IN NO EVENT SHALL
17  * THE COPYRIGHT HOLDERS, AUTHORS AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM,
18  * DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR
19  * OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE
20  * USE OR OTHER DEALINGS IN THE SOFTWARE.
21  *
22  * The above copyright notice and this permission notice (including the
23  * next paragraph) shall be included in all copies or substantial portions
24  * of the Software.
25  *
26  */
27 /*
28  * Authors:
29  *    Jerome Glisse <glisse@freedesktop.org>
30  *    Thomas Hellstrom <thomas-at-tungstengraphics-dot-com>
31  *    Dave Airlie
32  */
33 #include <dev/pci/drm/drmP.h>
34 #include <dev/pci/drm/radeon_drm.h>
35 #include "radeon.h"
36 #ifdef notyet
37 #include "radeon_trace.h"
38 #endif
39 
40 
41 int	 radeon_ttm_init(struct radeon_device *);
42 void	 radeon_ttm_fini(struct radeon_device *);
43 void	 radeon_bo_clear_surface_reg(struct radeon_bo *);
44 void	 radeon_bo_clear_va(struct radeon_bo *);
45 
46 /*
47  * To exclude mutual BO access we rely on bo_reserve exclusion, as all
48  * function are calling it.
49  */
50 
51 void radeon_bo_clear_va(struct radeon_bo *bo)
52 {
53 	struct radeon_bo_va *bo_va, *tmp;
54 
55 	list_for_each_entry_safe(bo_va, tmp, &bo->va, bo_list) {
56 		/* remove from all vm address space */
57 		radeon_vm_bo_rmv(bo->rdev, bo_va);
58 	}
59 }
60 
61 static void radeon_ttm_bo_destroy(struct ttm_buffer_object *tbo)
62 {
63 	struct radeon_bo *bo;
64 
65 	bo = container_of(tbo, struct radeon_bo, tbo);
66 	rw_enter_write(&bo->rdev->gem.rwlock);
67 	list_del_init(&bo->list);
68 	rw_exit_write(&bo->rdev->gem.rwlock);
69 	radeon_bo_clear_surface_reg(bo);
70 	radeon_bo_clear_va(bo);
71 	drm_gem_object_release(&bo->gem_base);
72 	pool_put(&bo->rdev->ddev->objpl, bo);
73 }
74 
75 bool radeon_ttm_bo_is_radeon_bo(struct ttm_buffer_object *bo)
76 {
77 	if (bo->destroy == &radeon_ttm_bo_destroy)
78 		return true;
79 	return false;
80 }
81 
82 void radeon_ttm_placement_from_domain(struct radeon_bo *rbo, u32 domain)
83 {
84 	u32 c = 0;
85 
86 	rbo->placement.fpfn = 0;
87 	rbo->placement.lpfn = 0;
88 	rbo->placement.placement = rbo->placements;
89 	rbo->placement.busy_placement = rbo->placements;
90 	if (domain & RADEON_GEM_DOMAIN_VRAM)
91 		rbo->placements[c++] = TTM_PL_FLAG_WC | TTM_PL_FLAG_UNCACHED |
92 					TTM_PL_FLAG_VRAM;
93 	if (domain & RADEON_GEM_DOMAIN_GTT) {
94 		if (rbo->rdev->flags & RADEON_IS_AGP) {
95 			rbo->placements[c++] = TTM_PL_FLAG_WC | TTM_PL_FLAG_TT;
96 		} else {
97 			rbo->placements[c++] = TTM_PL_FLAG_CACHED | TTM_PL_FLAG_TT;
98 		}
99 	}
100 	if (domain & RADEON_GEM_DOMAIN_CPU) {
101 		if (rbo->rdev->flags & RADEON_IS_AGP) {
102 			rbo->placements[c++] = TTM_PL_FLAG_WC | TTM_PL_FLAG_SYSTEM;
103 		} else {
104 			rbo->placements[c++] = TTM_PL_FLAG_CACHED | TTM_PL_FLAG_SYSTEM;
105 		}
106 	}
107 	if (!c)
108 		rbo->placements[c++] = TTM_PL_MASK_CACHING | TTM_PL_FLAG_SYSTEM;
109 	rbo->placement.num_placement = c;
110 	rbo->placement.num_busy_placement = c;
111 }
112 
113 int radeon_bo_create(struct radeon_device *rdev,
114 		     unsigned long size, int byte_align, bool kernel, u32 domain,
115 		     struct sg_table *sg, struct radeon_bo **bo_ptr)
116 {
117 	struct radeon_bo *bo;
118 	enum ttm_bo_type type;
119 	unsigned long page_align = roundup(byte_align, PAGE_SIZE) >> PAGE_SHIFT;
120 	size_t acc_size;
121 	int r;
122 
123 	size = PAGE_ALIGN(size);
124 
125 #ifdef notyet
126 	rdev->mman.bdev.dev_mapping = rdev->ddev->dev_mapping;
127 #endif
128 	if (kernel) {
129 		type = ttm_bo_type_kernel;
130 	} else if (sg) {
131 		type = ttm_bo_type_sg;
132 	} else {
133 		type = ttm_bo_type_device;
134 	}
135 	*bo_ptr = NULL;
136 
137 	acc_size = ttm_bo_dma_acc_size(&rdev->mman.bdev, size,
138 				       sizeof(struct radeon_bo));
139 
140 	bo = pool_get(&rdev->ddev->objpl, PR_WAITOK | PR_ZERO);
141 	if (bo == NULL)
142 		return -ENOMEM;
143 	r = drm_gem_object_init(rdev->ddev, &bo->gem_base, size);
144 	if (unlikely(r)) {
145 		pool_put(&rdev->ddev->objpl, bo);
146 		return r;
147 	}
148 	bo->rdev = rdev;
149 	bo->surface_reg = -1;
150 	INIT_LIST_HEAD(&bo->list);
151 	INIT_LIST_HEAD(&bo->va);
152 	radeon_ttm_placement_from_domain(bo, domain);
153 	/* Kernel allocation are uninterruptible */
154 	rw_enter_read(&rdev->pm.mclk_lock);
155 	r = ttm_bo_init(&rdev->mman.bdev, &bo->tbo, size, type,
156 			&bo->placement, page_align, !kernel, NULL,
157 			acc_size, sg, &radeon_ttm_bo_destroy);
158 	rw_exit_read(&rdev->pm.mclk_lock);
159 	if (unlikely(r != 0)) {
160 		return r;
161 	}
162 	*bo_ptr = bo;
163 
164 #ifdef notyet
165 	trace_radeon_bo_create(bo);
166 #endif
167 
168 	return 0;
169 }
170 
171 int radeon_bo_kmap(struct radeon_bo *bo, void **ptr)
172 {
173 	bool is_iomem;
174 	int r;
175 
176 	if (bo->kptr) {
177 		if (ptr) {
178 			*ptr = bo->kptr;
179 		}
180 		return 0;
181 	}
182 	r = ttm_bo_kmap(&bo->tbo, 0, bo->tbo.num_pages, &bo->kmap);
183 	if (r) {
184 		return r;
185 	}
186 	bo->kptr = ttm_kmap_obj_virtual(&bo->kmap, &is_iomem);
187 	if (ptr) {
188 		*ptr = bo->kptr;
189 	}
190 	radeon_bo_check_tiling(bo, 0, 0);
191 	return 0;
192 }
193 
194 void radeon_bo_kunmap(struct radeon_bo *bo)
195 {
196 	if (bo->kptr == NULL)
197 		return;
198 	bo->kptr = NULL;
199 	radeon_bo_check_tiling(bo, 0, 0);
200 	ttm_bo_kunmap(&bo->kmap);
201 }
202 
203 void radeon_bo_unref(struct radeon_bo **bo)
204 {
205 	struct ttm_buffer_object *tbo;
206 	struct radeon_device *rdev;
207 
208 	if ((*bo) == NULL)
209 		return;
210 	rdev = (*bo)->rdev;
211 	tbo = &((*bo)->tbo);
212 	rw_enter_read(&rdev->pm.mclk_lock);
213 	ttm_bo_unref(&tbo);
214 	rw_exit_read(&rdev->pm.mclk_lock);
215 	if (tbo == NULL)
216 		*bo = NULL;
217 }
218 
219 int radeon_bo_pin_restricted(struct radeon_bo *bo, u32 domain, u64 max_offset,
220 			     u64 *gpu_addr)
221 {
222 	int r, i;
223 
224 	if (bo->pin_count) {
225 		bo->pin_count++;
226 		if (gpu_addr)
227 			*gpu_addr = radeon_bo_gpu_offset(bo);
228 
229 		if (max_offset != 0) {
230 			u64 domain_start;
231 
232 			if (domain == RADEON_GEM_DOMAIN_VRAM)
233 				domain_start = bo->rdev->mc.vram_start;
234 			else
235 				domain_start = bo->rdev->mc.gtt_start;
236 			WARN_ON_ONCE(max_offset <
237 				     (radeon_bo_gpu_offset(bo) - domain_start));
238 		}
239 
240 		return 0;
241 	}
242 	radeon_ttm_placement_from_domain(bo, domain);
243 	if (domain == RADEON_GEM_DOMAIN_VRAM) {
244 		/* force to pin into visible video ram */
245 		bo->placement.lpfn = bo->rdev->mc.visible_vram_size >> PAGE_SHIFT;
246 	}
247 	if (max_offset) {
248 		u64 lpfn = max_offset >> PAGE_SHIFT;
249 
250 		if (!bo->placement.lpfn)
251 			bo->placement.lpfn = bo->rdev->mc.gtt_size >> PAGE_SHIFT;
252 
253 		if (lpfn < bo->placement.lpfn)
254 			bo->placement.lpfn = lpfn;
255 	}
256 	for (i = 0; i < bo->placement.num_placement; i++)
257 		bo->placements[i] |= TTM_PL_FLAG_NO_EVICT;
258 	r = ttm_bo_validate(&bo->tbo, &bo->placement, false, false);
259 	if (likely(r == 0)) {
260 		bo->pin_count = 1;
261 		if (gpu_addr != NULL)
262 			*gpu_addr = radeon_bo_gpu_offset(bo);
263 	}
264 	if (unlikely(r != 0))
265 		DRM_ERROR("%p pin failed\n", bo);
266 	return r;
267 }
268 
269 int radeon_bo_pin(struct radeon_bo *bo, u32 domain, u64 *gpu_addr)
270 {
271 	return radeon_bo_pin_restricted(bo, domain, 0, gpu_addr);
272 }
273 
274 int radeon_bo_unpin(struct radeon_bo *bo)
275 {
276 	int r, i;
277 
278 	if (!bo->pin_count) {
279 		DRM_ERROR("%p unpin not necessary\n", bo);
280 		return 0;
281 	}
282 	bo->pin_count--;
283 	if (bo->pin_count)
284 		return 0;
285 	for (i = 0; i < bo->placement.num_placement; i++)
286 		bo->placements[i] &= ~TTM_PL_FLAG_NO_EVICT;
287 	r = ttm_bo_validate(&bo->tbo, &bo->placement, false, false);
288 	if (unlikely(r != 0))
289 		DRM_ERROR("%p validate failed for unpin\n", bo);
290 	return r;
291 }
292 
293 int radeon_bo_evict_vram(struct radeon_device *rdev)
294 {
295 	/* late 2.6.33 fix IGP hibernate - we need pm ops to do this correct */
296 	if (0 && (rdev->flags & RADEON_IS_IGP)) {
297 		if (rdev->mc.igp_sideport_enabled == false)
298 			/* Useless to evict on IGP chips */
299 			return 0;
300 	}
301 	return ttm_bo_evict_mm(&rdev->mman.bdev, TTM_PL_VRAM);
302 }
303 
304 void radeon_bo_force_delete(struct radeon_device *rdev)
305 {
306 	struct drm_device *dev = rdev->ddev;
307 	struct radeon_bo *bo, *n;
308 
309 	if (list_empty(&rdev->gem.objects)) {
310 		return;
311 	}
312 	DRM_ERROR("Userspace still has active objects !\n");
313 	list_for_each_entry_safe(bo, n, &rdev->gem.objects, list) {
314 		DRM_LOCK();
315 #ifdef notyet
316 		DRM_ERROR("%p %p %lu %lu force free\n",
317 			&bo->gem_base, bo, (unsigned long)bo->gem_base.size,
318 			*((unsigned long *)&bo->gem_base.refcount));
319 #endif
320 		rw_enter_write(&bo->rdev->gem.rwlock);
321 		list_del_init(&bo->list);
322 		rw_exit_write(&bo->rdev->gem.rwlock);
323 		/* this should unref the ttm bo */
324 		drm_gem_object_unreference(&bo->gem_base);
325 		DRM_UNLOCK();
326 	}
327 }
328 
329 int radeon_bo_init(struct radeon_device *rdev)
330 {
331 	paddr_t start, end;
332 
333 	/* Add an MTRR for the VRAM */
334 	drm_mtrr_add(rdev->mc.aper_base, rdev->mc.aper_size, DRM_MTRR_WC);
335 	/* fake a 'cookie', seems to be unused? */
336 	rdev->mc.vram_mtrr = 1;
337 
338 	start = atop(bus_space_mmap(rdev->memt, rdev->mc.aper_base, 0, 0, 0));
339 	end = start + atop(rdev->mc.aper_size);
340 	uvm_page_physload(start, end, start, end, PHYSLOAD_DEVICE);
341 
342 #ifdef DRMDEBUG
343 	DRM_INFO("Detected VRAM RAM=%lluM, BAR=%lluM\n",
344 		rdev->mc.mc_vram_size >> 20,
345 		(unsigned long long)rdev->mc.aper_size >> 20);
346 	DRM_INFO("RAM width %dbits %cDR\n",
347 			rdev->mc.vram_width, rdev->mc.vram_is_ddr ? 'D' : 'S');
348 #endif
349 	return radeon_ttm_init(rdev);
350 }
351 
352 void radeon_bo_fini(struct radeon_device *rdev)
353 {
354 	radeon_ttm_fini(rdev);
355 }
356 
357 void radeon_bo_list_add_object(struct radeon_bo_list *lobj,
358 				struct list_head *head)
359 {
360 	if (lobj->wdomain) {
361 		list_add(&lobj->tv.head, head);
362 	} else {
363 		list_add_tail(&lobj->tv.head, head);
364 	}
365 }
366 
367 int radeon_bo_list_validate(struct list_head *head)
368 {
369 	struct radeon_bo_list *lobj;
370 	struct radeon_bo *bo;
371 	u32 domain;
372 	int r;
373 
374 	r = ttm_eu_reserve_buffers(head);
375 	if (unlikely(r != 0)) {
376 		return r;
377 	}
378 	list_for_each_entry(lobj, head, tv.head) {
379 		bo = lobj->bo;
380 		if (!bo->pin_count) {
381 			domain = lobj->wdomain ? lobj->wdomain : lobj->rdomain;
382 
383 		retry:
384 			radeon_ttm_placement_from_domain(bo, domain);
385 			r = ttm_bo_validate(&bo->tbo, &bo->placement,
386 						true, false);
387 			if (unlikely(r)) {
388 				if (r != -ERESTARTSYS && domain == RADEON_GEM_DOMAIN_VRAM) {
389 					domain |= RADEON_GEM_DOMAIN_GTT;
390 					goto retry;
391 				}
392 				return r;
393 			}
394 		}
395 		lobj->gpu_offset = radeon_bo_gpu_offset(bo);
396 		lobj->tiling_flags = bo->tiling_flags;
397 	}
398 	return 0;
399 }
400 
401 #ifdef notyet
402 int radeon_bo_fbdev_mmap(struct radeon_bo *bo,
403 			     struct vm_area_struct *vma)
404 {
405 	return ttm_fbdev_mmap(vma, &bo->tbo);
406 }
407 #endif
408 
409 int radeon_bo_get_surface_reg(struct radeon_bo *bo)
410 {
411 	struct radeon_device *rdev = bo->rdev;
412 	struct radeon_surface_reg *reg;
413 	struct radeon_bo *old_object;
414 	int steal;
415 	int i;
416 
417 	BUG_ON(!radeon_bo_is_reserved(bo));
418 
419 	if (!bo->tiling_flags)
420 		return 0;
421 
422 	if (bo->surface_reg >= 0) {
423 		reg = &rdev->surface_regs[bo->surface_reg];
424 		i = bo->surface_reg;
425 		goto out;
426 	}
427 
428 	steal = -1;
429 	for (i = 0; i < RADEON_GEM_MAX_SURFACES; i++) {
430 
431 		reg = &rdev->surface_regs[i];
432 		if (!reg->bo)
433 			break;
434 
435 		old_object = reg->bo;
436 		if (old_object->pin_count == 0)
437 			steal = i;
438 	}
439 
440 	/* if we are all out */
441 	if (i == RADEON_GEM_MAX_SURFACES) {
442 		if (steal == -1)
443 			return -ENOMEM;
444 		/* find someone with a surface reg and nuke their BO */
445 		reg = &rdev->surface_regs[steal];
446 		old_object = reg->bo;
447 		/* blow away the mapping */
448 		DRM_DEBUG("stealing surface reg %d from %p\n", steal, old_object);
449 		ttm_bo_unmap_virtual(&old_object->tbo);
450 		old_object->surface_reg = -1;
451 		i = steal;
452 	}
453 
454 	bo->surface_reg = i;
455 	reg->bo = bo;
456 
457 out:
458 	radeon_set_surface_reg(rdev, i, bo->tiling_flags, bo->pitch,
459 			       bo->tbo.mem.start << PAGE_SHIFT,
460 			       bo->tbo.num_pages << PAGE_SHIFT);
461 	return 0;
462 }
463 
464 void
465 radeon_bo_clear_surface_reg(struct radeon_bo *bo)
466 {
467 	struct radeon_device *rdev = bo->rdev;
468 	struct radeon_surface_reg *reg;
469 
470 	if (bo->surface_reg == -1)
471 		return;
472 
473 	reg = &rdev->surface_regs[bo->surface_reg];
474 	radeon_clear_surface_reg(rdev, bo->surface_reg);
475 
476 	reg->bo = NULL;
477 	bo->surface_reg = -1;
478 }
479 
480 int radeon_bo_set_tiling_flags(struct radeon_bo *bo,
481 				uint32_t tiling_flags, uint32_t pitch)
482 {
483 	struct radeon_device *rdev = bo->rdev;
484 	int r;
485 
486 	if (rdev->family >= CHIP_CEDAR) {
487 		unsigned bankw, bankh, mtaspect, tilesplit, stilesplit;
488 
489 		bankw = (tiling_flags >> RADEON_TILING_EG_BANKW_SHIFT) & RADEON_TILING_EG_BANKW_MASK;
490 		bankh = (tiling_flags >> RADEON_TILING_EG_BANKH_SHIFT) & RADEON_TILING_EG_BANKH_MASK;
491 		mtaspect = (tiling_flags >> RADEON_TILING_EG_MACRO_TILE_ASPECT_SHIFT) & RADEON_TILING_EG_MACRO_TILE_ASPECT_MASK;
492 		tilesplit = (tiling_flags >> RADEON_TILING_EG_TILE_SPLIT_SHIFT) & RADEON_TILING_EG_TILE_SPLIT_MASK;
493 		stilesplit = (tiling_flags >> RADEON_TILING_EG_STENCIL_TILE_SPLIT_SHIFT) & RADEON_TILING_EG_STENCIL_TILE_SPLIT_MASK;
494 		switch (bankw) {
495 		case 0:
496 		case 1:
497 		case 2:
498 		case 4:
499 		case 8:
500 			break;
501 		default:
502 			return -EINVAL;
503 		}
504 		switch (bankh) {
505 		case 0:
506 		case 1:
507 		case 2:
508 		case 4:
509 		case 8:
510 			break;
511 		default:
512 			return -EINVAL;
513 		}
514 		switch (mtaspect) {
515 		case 0:
516 		case 1:
517 		case 2:
518 		case 4:
519 		case 8:
520 			break;
521 		default:
522 			return -EINVAL;
523 		}
524 		if (tilesplit > 6) {
525 			return -EINVAL;
526 		}
527 		if (stilesplit > 6) {
528 			return -EINVAL;
529 		}
530 	}
531 	r = radeon_bo_reserve(bo, false);
532 	if (unlikely(r != 0))
533 		return r;
534 	bo->tiling_flags = tiling_flags;
535 	bo->pitch = pitch;
536 	radeon_bo_unreserve(bo);
537 	return 0;
538 }
539 
540 void radeon_bo_get_tiling_flags(struct radeon_bo *bo,
541 				uint32_t *tiling_flags,
542 				uint32_t *pitch)
543 {
544 	BUG_ON(!radeon_bo_is_reserved(bo));
545 	if (tiling_flags)
546 		*tiling_flags = bo->tiling_flags;
547 	if (pitch)
548 		*pitch = bo->pitch;
549 }
550 
551 int radeon_bo_check_tiling(struct radeon_bo *bo, bool has_moved,
552 				bool force_drop)
553 {
554 	BUG_ON(!radeon_bo_is_reserved(bo) && !force_drop);
555 
556 	if (!(bo->tiling_flags & RADEON_TILING_SURFACE))
557 		return 0;
558 
559 	if (force_drop) {
560 		radeon_bo_clear_surface_reg(bo);
561 		return 0;
562 	}
563 
564 	if (bo->tbo.mem.mem_type != TTM_PL_VRAM) {
565 		if (!has_moved)
566 			return 0;
567 
568 		if (bo->surface_reg >= 0)
569 			radeon_bo_clear_surface_reg(bo);
570 		return 0;
571 	}
572 
573 	if ((bo->surface_reg >= 0) && !has_moved)
574 		return 0;
575 
576 	return radeon_bo_get_surface_reg(bo);
577 }
578 
579 void radeon_bo_move_notify(struct ttm_buffer_object *bo,
580 			   struct ttm_mem_reg *mem)
581 {
582 	struct radeon_bo *rbo;
583 	if (!radeon_ttm_bo_is_radeon_bo(bo))
584 		return;
585 	rbo = container_of(bo, struct radeon_bo, tbo);
586 	radeon_bo_check_tiling(rbo, 0, 1);
587 	radeon_vm_bo_invalidate(rbo->rdev, rbo);
588 }
589 
590 int radeon_bo_fault_reserve_notify(struct ttm_buffer_object *bo)
591 {
592 	struct radeon_device *rdev;
593 	struct radeon_bo *rbo;
594 	unsigned long offset, size;
595 	int r;
596 
597 	if (!radeon_ttm_bo_is_radeon_bo(bo))
598 		return 0;
599 	rbo = container_of(bo, struct radeon_bo, tbo);
600 	radeon_bo_check_tiling(rbo, 0, 0);
601 	rdev = rbo->rdev;
602 	if (bo->mem.mem_type != TTM_PL_VRAM)
603 		return 0;
604 
605 	size = bo->mem.num_pages << PAGE_SHIFT;
606 	offset = bo->mem.start << PAGE_SHIFT;
607 	if ((offset + size) <= rdev->mc.visible_vram_size)
608 		return 0;
609 
610 	/* hurrah the memory is not visible ! */
611 	radeon_ttm_placement_from_domain(rbo, RADEON_GEM_DOMAIN_VRAM);
612 	rbo->placement.lpfn = rdev->mc.visible_vram_size >> PAGE_SHIFT;
613 	r = ttm_bo_validate(bo, &rbo->placement, false, false);
614 	if (unlikely(r == -ENOMEM)) {
615 		radeon_ttm_placement_from_domain(rbo, RADEON_GEM_DOMAIN_GTT);
616 		return ttm_bo_validate(bo, &rbo->placement, false, false);
617 	} else if (unlikely(r != 0)) {
618 		return r;
619 	}
620 
621 	offset = bo->mem.start << PAGE_SHIFT;
622 	/* this should never happen */
623 	if ((offset + size) > rdev->mc.visible_vram_size)
624 		return -EINVAL;
625 
626 	return 0;
627 }
628 
629 int radeon_bo_wait(struct radeon_bo *bo, u32 *mem_type, bool no_wait)
630 {
631 	int r;
632 
633 	r = ttm_bo_reserve(&bo->tbo, true, no_wait, false, 0);
634 	if (unlikely(r != 0))
635 		return r;
636 	mtx_enter(&bo->tbo.bdev->fence_lock);
637 	if (mem_type)
638 		*mem_type = bo->tbo.mem.mem_type;
639 	if (bo->tbo.sync_obj)
640 		r = ttm_bo_wait(&bo->tbo, true, true, no_wait);
641 	mtx_leave(&bo->tbo.bdev->fence_lock);
642 	ttm_bo_unreserve(&bo->tbo);
643 	return r;
644 }
645 
646 
647 /**
648  * radeon_bo_reserve - reserve bo
649  * @bo:		bo structure
650  * @no_intr:	don't return -ERESTARTSYS on pending signal
651  *
652  * Returns:
653  * -ERESTARTSYS: A wait for the buffer to become unreserved was interrupted by
654  * a signal. Release all buffer reservations and return to user-space.
655  */
656 int radeon_bo_reserve(struct radeon_bo *bo, bool no_intr)
657 {
658 	int r;
659 
660 	r = ttm_bo_reserve(&bo->tbo, !no_intr, false, false, 0);
661 	if (unlikely(r != 0)) {
662 		if (r != -ERESTARTSYS)
663 			DRM_ERROR("%p reserve failed\n", bo);
664 		return r;
665 	}
666 	return 0;
667 }
668