xref: /dragonfly/sys/dev/drm/radeon/radeon_vm.c (revision 62dc643e)
1 /*
2  * Copyright 2008 Advanced Micro Devices, Inc.
3  * Copyright 2008 Red Hat Inc.
4  * Copyright 2009 Jerome Glisse.
5  *
6  * Permission is hereby granted, free of charge, to any person obtaining a
7  * copy of this software and associated documentation files (the "Software"),
8  * to deal in the Software without restriction, including without limitation
9  * the rights to use, copy, modify, merge, publish, distribute, sublicense,
10  * and/or sell copies of the Software, and to permit persons to whom the
11  * Software is furnished to do so, subject to the following conditions:
12  *
13  * The above copyright notice and this permission notice shall be included in
14  * all copies or substantial portions of the Software.
15  *
16  * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
17  * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
18  * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
19  * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
20  * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
21  * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
22  * OTHER DEALINGS IN THE SOFTWARE.
23  *
24  * Authors: Dave Airlie
25  *          Alex Deucher
26  *          Jerome Glisse
27  */
28 #include <drm/drmP.h>
29 #include <uapi_drm/radeon_drm.h>
30 #include "radeon.h"
31 #ifdef TRACE_TODO
32 #include "radeon_trace.h"
33 #endif
34 
35 /*
36  * GPUVM
37  * GPUVM is similar to the legacy gart on older asics, however
38  * rather than there being a single global gart table
39  * for the entire GPU, there are multiple VM page tables active
40  * at any given time.  The VM page tables can contain a mix
41  * vram pages and system memory pages and system memory pages
42  * can be mapped as snooped (cached system pages) or unsnooped
43  * (uncached system pages).
44  * Each VM has an ID associated with it and there is a page table
45  * associated with each VMID.  When execting a command buffer,
46  * the kernel tells the the ring what VMID to use for that command
47  * buffer.  VMIDs are allocated dynamically as commands are submitted.
48  * The userspace drivers maintain their own address space and the kernel
49  * sets up their pages tables accordingly when they submit their
50  * command buffers and a VMID is assigned.
51  * Cayman/Trinity support up to 8 active VMs at any given time;
52  * SI supports 16.
53  */
54 
55 /**
56  * radeon_vm_num_pde - return the number of page directory entries
57  *
58  * @rdev: radeon_device pointer
59  *
60  * Calculate the number of page directory entries (cayman+).
61  */
62 static unsigned radeon_vm_num_pdes(struct radeon_device *rdev)
63 {
64 	return rdev->vm_manager.max_pfn >> radeon_vm_block_size;
65 }
66 
67 /**
68  * radeon_vm_directory_size - returns the size of the page directory in bytes
69  *
70  * @rdev: radeon_device pointer
71  *
72  * Calculate the size of the page directory in bytes (cayman+).
73  */
74 static unsigned radeon_vm_directory_size(struct radeon_device *rdev)
75 {
76 	return RADEON_GPU_PAGE_ALIGN(radeon_vm_num_pdes(rdev) * 8);
77 }
78 
79 /**
80  * radeon_vm_manager_init - init the vm manager
81  *
82  * @rdev: radeon_device pointer
83  *
84  * Init the vm manager (cayman+).
85  * Returns 0 for success, error for failure.
86  */
87 int radeon_vm_manager_init(struct radeon_device *rdev)
88 {
89 	int r;
90 
91 	if (!rdev->vm_manager.enabled) {
92 		r = radeon_asic_vm_init(rdev);
93 		if (r)
94 			return r;
95 
96 		rdev->vm_manager.enabled = true;
97 	}
98 	return 0;
99 }
100 
101 /**
102  * radeon_vm_manager_fini - tear down the vm manager
103  *
104  * @rdev: radeon_device pointer
105  *
106  * Tear down the VM manager (cayman+).
107  */
108 void radeon_vm_manager_fini(struct radeon_device *rdev)
109 {
110 	int i;
111 
112 	if (!rdev->vm_manager.enabled)
113 		return;
114 
115 	for (i = 0; i < RADEON_NUM_VM; ++i)
116 		radeon_fence_unref(&rdev->vm_manager.active[i]);
117 	radeon_asic_vm_fini(rdev);
118 	rdev->vm_manager.enabled = false;
119 }
120 
121 /**
122  * radeon_vm_get_bos - add the vm BOs to a validation list
123  *
124  * @vm: vm providing the BOs
125  * @head: head of validation list
126  *
127  * Add the page directory to the list of BOs to
128  * validate for command submission (cayman+).
129  */
130 struct radeon_bo_list *radeon_vm_get_bos(struct radeon_device *rdev,
131 					  struct radeon_vm *vm,
132 					  struct list_head *head)
133 {
134 	struct radeon_bo_list *list;
135 	unsigned i, idx;
136 
137 	list = drm_malloc_ab(vm->max_pde_used + 2,
138 			     sizeof(struct radeon_bo_list));
139 	if (!list)
140 		return NULL;
141 
142 	/* add the vm page table to the list */
143 	list[0].robj = vm->page_directory;
144 	list[0].prefered_domains = RADEON_GEM_DOMAIN_VRAM;
145 	list[0].allowed_domains = RADEON_GEM_DOMAIN_VRAM;
146 	list[0].tv.bo = &vm->page_directory->tbo;
147 	list[0].tiling_flags = 0;
148 	list_add(&list[0].tv.head, head);
149 
150 	for (i = 0, idx = 1; i <= vm->max_pde_used; i++) {
151 		if (!vm->page_tables[i].bo)
152 			continue;
153 
154 		list[idx].robj = vm->page_tables[i].bo;
155 		list[idx].prefered_domains = RADEON_GEM_DOMAIN_VRAM;
156 		list[idx].allowed_domains = RADEON_GEM_DOMAIN_VRAM;
157 		list[idx].tv.bo = &list[idx].robj->tbo;
158 		list[idx].tiling_flags = 0;
159 		list_add(&list[idx++].tv.head, head);
160 	}
161 
162 	return list;
163 }
164 
165 /**
166  * radeon_vm_grab_id - allocate the next free VMID
167  *
168  * @rdev: radeon_device pointer
169  * @vm: vm to allocate id for
170  * @ring: ring we want to submit job to
171  *
172  * Allocate an id for the vm (cayman+).
173  * Returns the fence we need to sync to (if any).
174  *
175  * Global and local mutex must be locked!
176  */
177 struct radeon_fence *radeon_vm_grab_id(struct radeon_device *rdev,
178 				       struct radeon_vm *vm, int ring)
179 {
180 	struct radeon_fence *best[RADEON_NUM_RINGS] = {};
181 
182 	unsigned choices[2] = {};
183 	unsigned i;
184 
185 	/* check if the id is still valid */
186 	if (vm->last_id_use && vm->last_id_use == rdev->vm_manager.active[vm->id])
187 		return NULL;
188 
189 	/* we definately need to flush */
190 	radeon_fence_unref(&vm->last_flush);
191 
192 	/* skip over VMID 0, since it is the system VM */
193 	for (i = 1; i < rdev->vm_manager.nvm; ++i) {
194 		struct radeon_fence *fence = rdev->vm_manager.active[i];
195 
196 		if (fence == NULL) {
197 			/* found a free one */
198 			vm->id = i;
199 #ifdef TRACE_TODO
200 			trace_radeon_vm_grab_id(vm->id, ring);
201 #endif
202 			return NULL;
203 		}
204 
205 		if (radeon_fence_is_earlier(fence, best[fence->ring])) {
206 			best[fence->ring] = fence;
207 			choices[fence->ring == ring ? 0 : 1] = i;
208 		}
209 	}
210 
211 	for (i = 0; i < 2; ++i) {
212 		if (choices[i]) {
213 			vm->id = choices[i];
214 #ifdef TRACE_TODO
215 			trace_radeon_vm_grab_id(vm->id, ring);
216 #endif
217 			return rdev->vm_manager.active[choices[i]];
218 		}
219 	}
220 
221 	/* should never happen */
222 	BUG();
223 	return NULL;
224 }
225 
226 /**
227  * radeon_vm_flush - hardware flush the vm
228  *
229  * @rdev: radeon_device pointer
230  * @vm: vm we want to flush
231  * @ring: ring to use for flush
232  *
233  * Flush the vm (cayman+).
234  *
235  * Global and local mutex must be locked!
236  */
237 void radeon_vm_flush(struct radeon_device *rdev,
238 		     struct radeon_vm *vm,
239 		     int ring)
240 {
241 	uint64_t pd_addr = radeon_bo_gpu_offset(vm->page_directory);
242 
243 	/* if we can't remember our last VM flush then flush now! */
244 	if (!vm->last_flush || pd_addr != vm->pd_gpu_addr) {
245 #ifdef TRACE_TODO
246 		trace_radeon_vm_flush(pd_addr, ring, vm->id);
247 #endif
248 		vm->pd_gpu_addr = pd_addr;
249 		radeon_ring_vm_flush(rdev, &rdev->ring[ring],
250 				     vm->id, vm->pd_gpu_addr);
251 
252 	}
253 }
254 
255 /**
256  * radeon_vm_fence - remember fence for vm
257  *
258  * @rdev: radeon_device pointer
259  * @vm: vm we want to fence
260  * @fence: fence to remember
261  *
262  * Fence the vm (cayman+).
263  * Set the fence used to protect page table and id.
264  *
265  * Global and local mutex must be locked!
266  */
267 void radeon_vm_fence(struct radeon_device *rdev,
268 		     struct radeon_vm *vm,
269 		     struct radeon_fence *fence)
270 {
271 	radeon_fence_unref(&vm->fence);
272 	vm->fence = radeon_fence_ref(fence);
273 
274 	radeon_fence_unref(&rdev->vm_manager.active[vm->id]);
275 	rdev->vm_manager.active[vm->id] = radeon_fence_ref(fence);
276 
277 	radeon_fence_unref(&vm->last_id_use);
278 	vm->last_id_use = radeon_fence_ref(fence);
279 
280         /* we just flushed the VM, remember that */
281         if (!vm->last_flush)
282                 vm->last_flush = radeon_fence_ref(fence);
283 }
284 
285 /**
286  * radeon_vm_bo_find - find the bo_va for a specific vm & bo
287  *
288  * @vm: requested vm
289  * @bo: requested buffer object
290  *
291  * Find @bo inside the requested vm (cayman+).
292  * Search inside the @bos vm list for the requested vm
293  * Returns the found bo_va or NULL if none is found
294  *
295  * Object has to be reserved!
296  */
297 struct radeon_bo_va *radeon_vm_bo_find(struct radeon_vm *vm,
298 				       struct radeon_bo *bo)
299 {
300 	struct radeon_bo_va *bo_va;
301 
302 	list_for_each_entry(bo_va, &bo->va, bo_list) {
303 		if (bo_va->vm == vm) {
304 			return bo_va;
305 		}
306 	}
307 	return NULL;
308 }
309 
310 /**
311  * radeon_vm_bo_add - add a bo to a specific vm
312  *
313  * @rdev: radeon_device pointer
314  * @vm: requested vm
315  * @bo: radeon buffer object
316  *
317  * Add @bo into the requested vm (cayman+).
318  * Add @bo to the list of bos associated with the vm
319  * Returns newly added bo_va or NULL for failure
320  *
321  * Object has to be reserved!
322  */
323 struct radeon_bo_va *radeon_vm_bo_add(struct radeon_device *rdev,
324 				      struct radeon_vm *vm,
325 				      struct radeon_bo *bo)
326 {
327 	struct radeon_bo_va *bo_va;
328 
329 	bo_va = kzalloc(sizeof(struct radeon_bo_va), GFP_KERNEL);
330 	if (bo_va == NULL) {
331 		return NULL;
332 	}
333 	bo_va->vm = vm;
334 	bo_va->bo = bo;
335 	bo_va->soffset = 0;
336 	bo_va->eoffset = 0;
337 	bo_va->flags = 0;
338 	bo_va->addr = 0;
339 	bo_va->ref_count = 1;
340 	INIT_LIST_HEAD(&bo_va->bo_list);
341 	INIT_LIST_HEAD(&bo_va->vm_list);
342 	INIT_LIST_HEAD(&bo_va->vm_status);
343 
344 	mutex_lock(&vm->mutex);
345 	list_add(&bo_va->vm_list, &vm->va);
346 	list_add_tail(&bo_va->bo_list, &bo->va);
347 	mutex_unlock(&vm->mutex);
348 
349 	return bo_va;
350 }
351 
352 /**
353  * radeon_vm_set_pages - helper to call the right asic function
354  *
355  * @rdev: radeon_device pointer
356  * @ib: indirect buffer to fill with commands
357  * @pe: addr of the page entry
358  * @addr: dst addr to write into pe
359  * @count: number of page entries to update
360  * @incr: increase next addr by incr bytes
361  * @flags: hw access flags
362  *
363  * Traces the parameters and calls the right asic functions
364  * to setup the page table using the DMA.
365  */
366 static void radeon_vm_set_pages(struct radeon_device *rdev,
367 				struct radeon_ib *ib,
368 				uint64_t pe,
369 				uint64_t addr, unsigned count,
370 				uint32_t incr, uint32_t flags)
371 {
372 #ifdef TRACE_TODO
373 	trace_radeon_vm_set_page(pe, addr, count, incr, flags);
374 #endif
375 
376 	if ((flags & R600_PTE_GART_MASK) == R600_PTE_GART_MASK) {
377 		uint64_t src = rdev->gart.table_addr + (addr >> 12) * 8;
378 		radeon_asic_vm_copy_pages(rdev, ib, pe, src, count);
379 
380 	} else if ((flags & R600_PTE_SYSTEM) || (count < 3)) {
381 		radeon_asic_vm_write_pages(rdev, ib, pe, addr,
382 					   count, incr, flags);
383 
384 	} else {
385 		radeon_asic_vm_set_pages(rdev, ib, pe, addr,
386 					 count, incr, flags);
387 	}
388 }
389 
390 /**
391  * radeon_vm_clear_bo - initially clear the page dir/table
392  *
393  * @rdev: radeon_device pointer
394  * @bo: bo to clear
395  */
396 static int radeon_vm_clear_bo(struct radeon_device *rdev,
397 			      struct radeon_bo *bo)
398 {
399         struct ttm_validate_buffer tv;
400         struct ww_acquire_ctx ticket;
401         struct list_head head;
402 	struct radeon_ib ib;
403 	unsigned entries;
404 	uint64_t addr;
405 	int r;
406 
407         memset(&tv, 0, sizeof(tv));
408         tv.bo = &bo->tbo;
409 
410         INIT_LIST_HEAD(&head);
411         list_add(&tv.head, &head);
412 
413         r = ttm_eu_reserve_buffers(&ticket, &head);
414         if (r)
415 		return r;
416 
417         r = ttm_bo_validate(&bo->tbo, &bo->placement, true, false);
418         if (r)
419                 goto error;
420 
421 	addr = radeon_bo_gpu_offset(bo);
422 	entries = radeon_bo_size(bo) / 8;
423 
424 	r = radeon_ib_get(rdev, R600_RING_TYPE_DMA_INDEX, &ib, NULL, 256);
425 	if (r)
426                 goto error;
427 
428 	ib.length_dw = 0;
429 
430 	radeon_vm_set_pages(rdev, &ib, addr, 0, entries, 0, 0);
431 	radeon_asic_vm_pad_ib(rdev, &ib);
432 	WARN_ON(ib.length_dw > 64);
433 
434 	r = radeon_ib_schedule(rdev, &ib, NULL, false);
435 	if (r)
436                 goto error;
437 
438 	ttm_eu_fence_buffer_objects(&ticket, &head, ib.fence);
439 	radeon_ib_free(rdev, &ib);
440 
441 	return 0;
442 
443 error:
444 	ttm_eu_backoff_reservation(&ticket, &head);
445 	return r;
446 }
447 
448 /**
449  * radeon_vm_bo_set_addr - set bos virtual address inside a vm
450  *
451  * @rdev: radeon_device pointer
452  * @bo_va: bo_va to store the address
453  * @soffset: requested offset of the buffer in the VM address space
454  * @flags: attributes of pages (read/write/valid/etc.)
455  *
456  * Set offset of @bo_va (cayman+).
457  * Validate and set the offset requested within the vm address space.
458  * Returns 0 for success, error for failure.
459  *
460  * Object has to be reserved and gets unreserved by this function!
461  */
462 int radeon_vm_bo_set_addr(struct radeon_device *rdev,
463 			  struct radeon_bo_va *bo_va,
464 			  uint64_t soffset,
465 			  uint32_t flags)
466 {
467 	uint64_t size = radeon_bo_size(bo_va->bo);
468 	uint64_t eoffset, last_offset = 0;
469 	struct radeon_vm *vm = bo_va->vm;
470 	struct radeon_bo_va *tmp;
471 	struct list_head *head;
472 	unsigned last_pfn, pt_idx;
473 	int r;
474 
475 	if (soffset) {
476 		/* make sure object fit at this offset */
477 		eoffset = soffset + size;
478 		if (soffset >= eoffset) {
479 			r = -EINVAL;
480 			goto error_unreserve;
481 		}
482 
483 		last_pfn = eoffset / RADEON_GPU_PAGE_SIZE;
484 		if (last_pfn > rdev->vm_manager.max_pfn) {
485 			dev_err(rdev->dev, "va above limit (0x%08X > 0x%08X)\n",
486 				last_pfn, rdev->vm_manager.max_pfn);
487 			r = -EINVAL;
488 			goto error_unreserve;
489 		}
490 
491 	} else {
492 		eoffset = last_pfn = 0;
493 	}
494 
495 	mutex_lock(&vm->mutex);
496 	head = &vm->va;
497 	last_offset = 0;
498 	list_for_each_entry(tmp, &vm->va, vm_list) {
499 		if (bo_va == tmp) {
500 			/* skip over currently modified bo */
501 			continue;
502 		}
503 
504 		if (soffset >= last_offset && eoffset <= tmp->soffset) {
505 			/* bo can be added before this one */
506 			break;
507 		}
508 		if (eoffset > tmp->soffset && soffset < tmp->eoffset) {
509 			/* bo and tmp overlap, invalid offset */
510 			dev_err(rdev->dev, "bo %p va 0x%08X conflict with (bo %p 0x%08X 0x%08X)\n",
511 				bo_va->bo, (unsigned)bo_va->soffset, tmp->bo,
512 				(unsigned)tmp->soffset, (unsigned)tmp->eoffset);
513 			mutex_unlock(&vm->mutex);
514 			r = -EINVAL;
515 			goto error_unreserve;
516 		}
517 		last_offset = tmp->eoffset;
518 		head = &tmp->vm_list;
519 	}
520 
521 	if (bo_va->soffset) {
522 		/* add a clone of the bo_va to clear the old address */
523 		tmp = kzalloc(sizeof(struct radeon_bo_va), GFP_KERNEL);
524 		if (!tmp) {
525 			mutex_unlock(&vm->mutex);
526 				r = -ENOMEM;
527 				goto error_unreserve;
528 		}
529 		tmp->soffset = bo_va->soffset;
530 		tmp->eoffset = bo_va->eoffset;
531 		tmp->vm = vm;
532 		tmp->bo = radeon_bo_ref(bo_va->bo);
533 		spin_lock(&vm->status_lock);
534  		list_add(&tmp->vm_status, &vm->freed);
535 		spin_unlock(&vm->status_lock);
536 	}
537 
538 	bo_va->soffset = soffset;
539 	bo_va->eoffset = eoffset;
540 	bo_va->flags = flags;
541 	bo_va->addr = 0;
542 	list_move(&bo_va->vm_list, head);
543 
544 	soffset = (soffset / RADEON_GPU_PAGE_SIZE) >> radeon_vm_block_size;
545 	eoffset = (eoffset / RADEON_GPU_PAGE_SIZE) >> radeon_vm_block_size;
546 
547 	BUG_ON(eoffset >= radeon_vm_num_pdes(rdev));
548 
549 	if (eoffset > vm->max_pde_used)
550 		vm->max_pde_used = eoffset;
551 
552 	radeon_bo_unreserve(bo_va->bo);
553 
554 	/* walk over the address space and allocate the page tables */
555 	for (pt_idx = soffset; pt_idx <= eoffset; ++pt_idx) {
556 		struct radeon_bo *pt;
557 
558 		if (vm->page_tables[pt_idx].bo)
559 			continue;
560 
561 		/* drop mutex to allocate and clear page table */
562 		mutex_unlock(&vm->mutex);
563 
564 		r = radeon_bo_create(rdev, RADEON_VM_PTE_COUNT * 8,
565 				     RADEON_GPU_PAGE_SIZE, true,
566 				     RADEON_GEM_DOMAIN_VRAM, 0, NULL, &pt);
567 		if (r)
568 			return r;
569 
570 		r = radeon_vm_clear_bo(rdev, pt);
571 		if (r) {
572 			radeon_bo_unref(&pt);
573 			return r;
574 		}
575 
576 		/* aquire mutex again */
577 		mutex_lock(&vm->mutex);
578 		if (vm->page_tables[pt_idx].bo) {
579 			/* someone else allocated the pt in the meantime */
580 			mutex_unlock(&vm->mutex);
581 			radeon_bo_unref(&pt);
582 			mutex_lock(&vm->mutex);
583 			continue;
584 		}
585 
586 		vm->page_tables[pt_idx].addr = 0;
587 		vm->page_tables[pt_idx].bo = pt;
588 	}
589 
590 	mutex_unlock(&vm->mutex);
591 	return 0;
592 
593 error_unreserve:
594 	radeon_bo_unreserve(bo_va->bo);
595 	return r;
596 }
597 
598 /**
599  * radeon_vm_map_gart - get the physical address of a gart page
600  *
601  * @rdev: radeon_device pointer
602  * @addr: the unmapped addr
603  *
604  * Look up the physical address of the page that the pte resolves
605  * to (cayman+).
606  * Returns the physical address of the page.
607  */
608 uint64_t radeon_vm_map_gart(struct radeon_device *rdev, uint64_t addr)
609 {
610 	uint64_t result;
611 
612 	/* page table offset */
613 	result = rdev->gart.pages_entry[addr >> RADEON_GPU_PAGE_SHIFT];
614 	result &= ~RADEON_GPU_PAGE_MASK;
615 
616 	return result;
617 }
618 
619 /**
620  * radeon_vm_page_flags - translate page flags to what the hw uses
621  *
622  * @flags: flags comming from userspace
623  *
624  * Translate the flags the userspace ABI uses to hw flags.
625  */
626 static uint32_t radeon_vm_page_flags(uint32_t flags)
627 {
628         uint32_t hw_flags = 0;
629 
630         hw_flags |= (flags & RADEON_VM_PAGE_VALID) ? R600_PTE_VALID : 0;
631         hw_flags |= (flags & RADEON_VM_PAGE_READABLE) ? R600_PTE_READABLE : 0;
632         hw_flags |= (flags & RADEON_VM_PAGE_WRITEABLE) ? R600_PTE_WRITEABLE : 0;
633         if (flags & RADEON_VM_PAGE_SYSTEM) {
634                 hw_flags |= R600_PTE_SYSTEM;
635                 hw_flags |= (flags & RADEON_VM_PAGE_SNOOPED) ? R600_PTE_SNOOPED : 0;
636         }
637         return hw_flags;
638 }
639 
640 /**
641  * radeon_vm_update_pdes - make sure that page directory is valid
642  *
643  * @rdev: radeon_device pointer
644  * @vm: requested vm
645  * @start: start of GPU address range
646  * @end: end of GPU address range
647  *
648  * Allocates new page tables if necessary
649  * and updates the page directory (cayman+).
650  * Returns 0 for success, error for failure.
651  *
652  * Global and local mutex must be locked!
653  */
654 int radeon_vm_update_page_directory(struct radeon_device *rdev,
655 				    struct radeon_vm *vm)
656 {
657 	struct radeon_bo *pd = vm->page_directory;
658 	uint64_t pd_addr = radeon_bo_gpu_offset(pd);
659 	uint32_t incr = RADEON_VM_PTE_COUNT * 8;
660 	uint64_t last_pde = ~0, last_pt = ~0;
661 	unsigned count = 0, pt_idx, ndw;
662 	struct radeon_ib ib;
663 	int r;
664 
665 	/* padding, etc. */
666 	ndw = 64;
667 
668 	/* assume the worst case */
669 	ndw += vm->max_pde_used * 6;
670 
671 	/* update too big for an IB */
672 	if (ndw > 0xfffff)
673 		return -ENOMEM;
674 
675 	r = radeon_ib_get(rdev, R600_RING_TYPE_DMA_INDEX, &ib, NULL, ndw * 4);
676 	if (r)
677 		return r;
678 	ib.length_dw = 0;
679 
680 	/* walk over the address space and update the page directory */
681 	for (pt_idx = 0; pt_idx <= vm->max_pde_used; ++pt_idx) {
682 		struct radeon_bo *bo = vm->page_tables[pt_idx].bo;
683 		uint64_t pde, pt;
684 
685 		if (bo == NULL)
686 			continue;
687 
688 		pt = radeon_bo_gpu_offset(bo);
689 		if (vm->page_tables[pt_idx].addr == pt)
690 			continue;
691 		vm->page_tables[pt_idx].addr = pt;
692 
693 		pde = pd_addr + pt_idx * 8;
694 		if (((last_pde + 8 * count) != pde) ||
695 		    ((last_pt + incr * count) != pt)) {
696 
697 			if (count) {
698 				radeon_vm_set_pages(rdev, &ib, last_pde,
699 						    last_pt, count, incr,
700 						    R600_PTE_VALID);
701 			}
702 
703 			count = 1;
704 			last_pde = pde;
705 			last_pt = pt;
706 		} else {
707 			++count;
708 		}
709 	}
710 
711 	if (count)
712 		radeon_vm_set_pages(rdev, &ib, last_pde, last_pt, count,
713 				    incr, R600_PTE_VALID);
714 
715 	if (ib.length_dw != 0) {
716 		radeon_asic_vm_pad_ib(rdev, &ib);
717 		radeon_semaphore_sync_to(ib.semaphore, pd->tbo.sync_obj);
718 		radeon_semaphore_sync_to(ib.semaphore, vm->last_id_use);
719 		WARN_ON(ib.length_dw > ndw);
720 		r = radeon_ib_schedule(rdev, &ib, NULL, false);
721 		if (r) {
722 			radeon_ib_free(rdev, &ib);
723 			return r;
724 		}
725 		radeon_fence_unref(&vm->fence);
726 		vm->fence = radeon_fence_ref(ib.fence);
727 		radeon_fence_unref(&vm->last_flush);
728 	}
729 	radeon_ib_free(rdev, &ib);
730 
731 	return 0;
732 }
733 
734 /**
735  * radeon_vm_frag_ptes - add fragment information to PTEs
736  *
737  * @rdev: radeon_device pointer
738  * @ib: IB for the update
739  * @pe_start: first PTE to handle
740  * @pe_end: last PTE to handle
741  * @addr: addr those PTEs should point to
742  * @flags: hw mapping flags
743  *
744  * Global and local mutex must be locked!
745  */
746 static void radeon_vm_frag_ptes(struct radeon_device *rdev,
747 				struct radeon_ib *ib,
748 				uint64_t pe_start, uint64_t pe_end,
749 				uint64_t addr, uint32_t flags)
750 {
751 	/**
752 	 * The MC L1 TLB supports variable sized pages, based on a fragment
753 	 * field in the PTE. When this field is set to a non-zero value, page
754 	 * granularity is increased from 4KB to (1 << (12 + frag)). The PTE
755 	 * flags are considered valid for all PTEs within the fragment range
756 	 * and corresponding mappings are assumed to be physically contiguous.
757 	 *
758 	 * The L1 TLB can store a single PTE for the whole fragment,
759 	 * significantly increasing the space available for translation
760 	 * caching. This leads to large improvements in throughput when the
761 	 * TLB is under pressure.
762 	 *
763 	 * The L2 TLB distributes small and large fragments into two
764 	 * asymmetric partitions. The large fragment cache is significantly
765 	 * larger. Thus, we try to use large fragments wherever possible.
766 	 * Userspace can support this by aligning virtual base address and
767 	 * allocation size to the fragment size.
768 	 */
769 
770 	/* NI is optimized for 256KB fragments, SI and newer for 64KB */
771 	uint64_t frag_flags = ((rdev->family == CHIP_CAYMAN) ||
772 			       (rdev->family == CHIP_ARUBA)) ?
773 			R600_PTE_FRAG_256KB : R600_PTE_FRAG_64KB;
774 	uint64_t frag_align = ((rdev->family == CHIP_CAYMAN) ||
775 			       (rdev->family == CHIP_ARUBA)) ? 0x200 : 0x80;
776 
777 	uint64_t frag_start = ALIGN(pe_start, frag_align);
778 	uint64_t frag_end = pe_end & ~(frag_align - 1);
779 
780 	unsigned count;
781 
782 	/* system pages are non continuously */
783 	if ((flags & R600_PTE_SYSTEM) || !(flags & R600_PTE_VALID) ||
784 	    (frag_start >= frag_end)) {
785 
786 		count = (pe_end - pe_start) / 8;
787 		radeon_vm_set_pages(rdev, ib, pe_start, addr, count,
788 				    RADEON_GPU_PAGE_SIZE, flags);
789 		return;
790 	}
791 
792 	/* handle the 4K area at the beginning */
793 	if (pe_start != frag_start) {
794 		count = (frag_start - pe_start) / 8;
795 		radeon_vm_set_pages(rdev, ib, pe_start, addr, count,
796 				    RADEON_GPU_PAGE_SIZE, flags);
797 		addr += RADEON_GPU_PAGE_SIZE * count;
798 	}
799 
800 	/* handle the area in the middle */
801 	count = (frag_end - frag_start) / 8;
802 	radeon_vm_set_pages(rdev, ib, frag_start, addr, count,
803 			    RADEON_GPU_PAGE_SIZE, flags | frag_flags);
804 
805 	/* handle the 4K area at the end */
806 	if (frag_end != pe_end) {
807 		addr += RADEON_GPU_PAGE_SIZE * count;
808 		count = (pe_end - frag_end) / 8;
809 		radeon_vm_set_pages(rdev, ib, frag_end, addr, count,
810 				    RADEON_GPU_PAGE_SIZE, flags);
811 	}
812 }
813 
814 /**
815  * radeon_vm_update_ptes - make sure that page tables are valid
816  *
817  * @rdev: radeon_device pointer
818  * @vm: requested vm
819  * @start: start of GPU address range
820  * @end: end of GPU address range
821  * @dst: destination address to map to
822  * @flags: mapping flags
823  *
824  * Update the page tables in the range @start - @end (cayman+).
825  *
826  * Global and local mutex must be locked!
827  */
828 static void radeon_vm_update_ptes(struct radeon_device *rdev,
829 				  struct radeon_vm *vm,
830 				  struct radeon_ib *ib,
831 				  uint64_t start, uint64_t end,
832 				  uint64_t dst, uint32_t flags)
833 {
834 	uint64_t mask = RADEON_VM_PTE_COUNT - 1;
835 	uint64_t last_pte = ~0, last_dst = ~0;
836 	unsigned count = 0;
837 	uint64_t addr;
838 
839 	start = start / RADEON_GPU_PAGE_SIZE;
840 	end = end / RADEON_GPU_PAGE_SIZE;
841 
842 	/* walk over the address space and update the page tables */
843 	for (addr = start; addr < end; ) {
844 		uint64_t pt_idx = addr >> radeon_vm_block_size;
845 		struct radeon_bo *pt = vm->page_tables[pt_idx].bo;
846 		unsigned nptes;
847 		uint64_t pte;
848 
849 		radeon_semaphore_sync_to(ib->semaphore, pt->tbo.sync_obj);
850 
851 		if ((addr & ~mask) == (end & ~mask))
852 			nptes = end - addr;
853 		else
854 			nptes = RADEON_VM_PTE_COUNT - (addr & mask);
855 
856 		pte = radeon_bo_gpu_offset(pt);
857 		pte += (addr & mask) * 8;
858 
859 		if ((last_pte + 8 * count) != pte) {
860 
861 			if (count) {
862 				radeon_vm_frag_ptes(rdev, ib, last_pte,
863 						    last_pte + 8 * count,
864 						    last_dst, flags);
865 			}
866 
867 			count = nptes;
868 			last_pte = pte;
869 			last_dst = dst;
870 		} else {
871 			count += nptes;
872 		}
873 
874 		addr += nptes;
875 		dst += nptes * RADEON_GPU_PAGE_SIZE;
876 	}
877 
878 	if (count) {
879 		radeon_vm_frag_ptes(rdev, ib, last_pte,
880 				    last_pte + 8 * count,
881 				    last_dst, flags);
882 	}
883 }
884 
885 /**
886  * radeon_vm_bo_update - map a bo into the vm page table
887  *
888  * @rdev: radeon_device pointer
889  * @vm: requested vm
890  * @bo: radeon buffer object
891  * @mem: ttm mem
892  *
893  * Fill in the page table entries for @bo (cayman+).
894  * Returns 0 for success, -EINVAL for failure.
895  *
896  * Object have to be reserved and mutex must be locked!
897  */
898 int radeon_vm_bo_update(struct radeon_device *rdev,
899 			struct radeon_bo_va *bo_va,
900 			struct ttm_mem_reg *mem)
901 {
902 	struct radeon_vm *vm = bo_va->vm;
903 	struct radeon_ib ib;
904 	unsigned nptes, ncmds, ndw;
905 	uint64_t addr;
906 	uint32_t flags;
907 	int r;
908 
909 	if (!bo_va->soffset) {
910 		dev_err(rdev->dev, "bo %p don't has a mapping in vm %p\n",
911 			bo_va->bo, vm);
912 		return -EINVAL;
913 	}
914 
915 	spin_lock(&vm->status_lock);
916  	list_del_init(&bo_va->vm_status);
917 	spin_unlock(&vm->status_lock);
918 
919 	bo_va->flags &= ~RADEON_VM_PAGE_VALID;
920 	bo_va->flags &= ~RADEON_VM_PAGE_SYSTEM;
921 	bo_va->flags &= ~RADEON_VM_PAGE_SNOOPED;
922 	if (mem) {
923 		addr = mem->start << PAGE_SHIFT;
924 		if (mem->mem_type != TTM_PL_SYSTEM) {
925 			bo_va->flags |= RADEON_VM_PAGE_VALID;
926 		}
927 		if (mem->mem_type == TTM_PL_TT) {
928 			bo_va->flags |= RADEON_VM_PAGE_SYSTEM;
929 			if (!(bo_va->bo->flags & (RADEON_GEM_GTT_WC | RADEON_GEM_GTT_UC)))
930 				bo_va->flags |= RADEON_VM_PAGE_SNOOPED;
931 
932 		} else {
933 			addr += rdev->vm_manager.vram_base_offset;
934 		}
935 	} else {
936 		addr = 0;
937 	}
938 
939 	if (addr == bo_va->addr)
940 		return 0;
941 	bo_va->addr = addr;
942 
943 #ifdef TRACE_TODO
944 	trace_radeon_vm_bo_update(bo_va);
945 #endif
946 
947 	nptes = (bo_va->eoffset - bo_va->soffset) / RADEON_GPU_PAGE_SIZE;
948 
949 	/* reserve space for one command every (1 << BLOCK_SIZE) entries
950 	   or 2k dwords (whatever is smaller) */
951 	ncmds = (nptes >> min(radeon_vm_block_size, 11)) + 1;
952 
953 	/* padding, etc. */
954 	ndw = 64;
955 
956 	flags = radeon_vm_page_flags(bo_va->flags);
957 	if ((flags & R600_PTE_GART_MASK) == R600_PTE_GART_MASK) {
958 		/* only copy commands needed */
959 		ndw += ncmds * 7;
960 
961 	} else if (flags & R600_PTE_SYSTEM) {
962 		/* header for write data commands */
963 		ndw += ncmds * 4;
964 
965 		/* body of write data command */
966 		ndw += nptes * 2;
967 
968 	} else {
969 		/* set page commands needed */
970 		ndw += ncmds * 10;
971 
972 		/* two extra commands for begin/end of fragment */
973 		ndw += 2 * 10;
974 	}
975 
976 	/* update too big for an IB */
977 	if (ndw > 0xfffff)
978 		return -ENOMEM;
979 
980 	r = radeon_ib_get(rdev, R600_RING_TYPE_DMA_INDEX, &ib, NULL, ndw * 4);
981 	if (r)
982 		return r;
983 	ib.length_dw = 0;
984 
985 	radeon_vm_update_ptes(rdev, vm, &ib, bo_va->soffset, bo_va->eoffset,
986 			      addr, radeon_vm_page_flags(bo_va->flags));
987 
988 	radeon_asic_vm_pad_ib(rdev, &ib);
989 	WARN_ON(ib.length_dw > ndw);
990 
991 	radeon_semaphore_sync_to(ib.semaphore, vm->fence);
992 	r = radeon_ib_schedule(rdev, &ib, NULL, false);
993 	if (r) {
994 		radeon_ib_free(rdev, &ib);
995 		return r;
996 	}
997 	radeon_fence_unref(&vm->fence);
998 	vm->fence = radeon_fence_ref(ib.fence);
999 	radeon_ib_free(rdev, &ib);
1000 	radeon_fence_unref(&vm->last_flush);
1001 
1002 	return 0;
1003 }
1004 
1005 /**
1006  * radeon_vm_clear_freed - clear freed BOs in the PT
1007  *
1008  * @rdev: radeon_device pointer
1009  * @vm: requested vm
1010  *
1011  * Make sure all freed BOs are cleared in the PT.
1012  * Returns 0 for success.
1013  *
1014  * PTs have to be reserved and mutex must be locked!
1015  */
1016 int radeon_vm_clear_freed(struct radeon_device *rdev,
1017 			  struct radeon_vm *vm)
1018 {
1019 	struct radeon_bo_va *bo_va;
1020 	int r;
1021 
1022 	spin_lock(&vm->status_lock);
1023 	while (!list_empty(&vm->freed)) {
1024 		bo_va = list_first_entry(&vm->freed,
1025 			struct radeon_bo_va, vm_status);
1026 		spin_unlock(&vm->status_lock);
1027 
1028 		r = radeon_vm_bo_update(rdev, bo_va, NULL);
1029 		radeon_bo_unref(&bo_va->bo);
1030 		kfree(bo_va);
1031 		if (r)
1032 			return r;
1033 
1034 		spin_lock(&vm->status_lock);
1035  	}
1036 	spin_unlock(&vm->status_lock);
1037 	return 0;
1038 
1039 }
1040 
1041 /**
1042  * radeon_vm_clear_invalids - clear invalidated BOs in the PT
1043  *
1044  * @rdev: radeon_device pointer
1045  * @vm: requested vm
1046  *
1047  * Make sure all invalidated BOs are cleared in the PT.
1048  * Returns 0 for success.
1049  *
1050  * PTs have to be reserved and mutex must be locked!
1051  */
1052 int radeon_vm_clear_invalids(struct radeon_device *rdev,
1053 			     struct radeon_vm *vm)
1054 {
1055 	struct radeon_bo_va *bo_va;
1056 	int r;
1057 
1058 	spin_lock(&vm->status_lock);
1059 	while (!list_empty(&vm->invalidated)) {
1060 		bo_va = list_first_entry(&vm->invalidated,
1061 			struct radeon_bo_va, vm_status);
1062 		spin_unlock(&vm->status_lock);
1063 
1064  		r = radeon_vm_bo_update(rdev, bo_va, NULL);
1065  		if (r)
1066  			return r;
1067 
1068 		spin_lock(&vm->status_lock);
1069  	}
1070 	spin_unlock(&vm->status_lock);
1071 
1072 	return 0;
1073 }
1074 
1075 /**
1076  * radeon_vm_bo_rmv - remove a bo to a specific vm
1077  *
1078  * @rdev: radeon_device pointer
1079  * @bo_va: requested bo_va
1080  *
1081  * Remove @bo_va->bo from the requested vm (cayman+).
1082  *
1083  * Object have to be reserved!
1084  */
1085 void radeon_vm_bo_rmv(struct radeon_device *rdev,
1086 		      struct radeon_bo_va *bo_va)
1087 {
1088 	struct radeon_vm *vm = bo_va->vm;
1089 
1090 	list_del(&bo_va->bo_list);
1091 
1092 	mutex_lock(&vm->mutex);
1093 	list_del(&bo_va->vm_list);
1094 	spin_lock(&vm->status_lock);
1095 	list_del(&bo_va->vm_status);
1096 
1097 	if (bo_va->addr) {
1098 		bo_va->bo = radeon_bo_ref(bo_va->bo);
1099 		list_add(&bo_va->vm_status, &vm->freed);
1100 	} else {
1101 		kfree(bo_va);
1102 	}
1103 	spin_unlock(&vm->status_lock);
1104 
1105 	mutex_unlock(&vm->mutex);
1106 }
1107 
1108 /**
1109  * radeon_vm_bo_invalidate - mark the bo as invalid
1110  *
1111  * @rdev: radeon_device pointer
1112  * @vm: requested vm
1113  * @bo: radeon buffer object
1114  *
1115  * Mark @bo as invalid (cayman+).
1116  */
1117 void radeon_vm_bo_invalidate(struct radeon_device *rdev,
1118 			     struct radeon_bo *bo)
1119 {
1120 	struct radeon_bo_va *bo_va;
1121 
1122 	list_for_each_entry(bo_va, &bo->va, bo_list) {
1123 		if (bo_va->addr) {
1124 			spin_lock(&bo_va->vm->status_lock);
1125 			list_del(&bo_va->vm_status);
1126 			list_add(&bo_va->vm_status, &bo_va->vm->invalidated);
1127 			spin_unlock(&bo_va->vm->status_lock);
1128 		}
1129 	}
1130 }
1131 
1132 /**
1133  * radeon_vm_init - initialize a vm instance
1134  *
1135  * @rdev: radeon_device pointer
1136  * @vm: requested vm
1137  *
1138  * Init @vm fields (cayman+).
1139  */
1140 int radeon_vm_init(struct radeon_device *rdev, struct radeon_vm *vm)
1141 {
1142 	const unsigned align = min(RADEON_VM_PTB_ALIGN_SIZE,
1143 		RADEON_VM_PTE_COUNT * 8);
1144 	unsigned pd_size, pd_entries, pts_size;
1145 	int r;
1146 
1147 	vm->id = 0;
1148 	vm->ib_bo_va = NULL;
1149 	vm->fence = NULL;
1150 	vm->last_flush = NULL;
1151 	vm->last_id_use = NULL;
1152 	lockinit(&vm->mutex, "rvmmtx", 0, LK_CANRECURSE);
1153 	INIT_LIST_HEAD(&vm->va);
1154 	spin_init(&vm->status_lock, "rvmspi");
1155 	INIT_LIST_HEAD(&vm->invalidated);
1156 	INIT_LIST_HEAD(&vm->freed);
1157 
1158 	pd_size = radeon_vm_directory_size(rdev);
1159 	pd_entries = radeon_vm_num_pdes(rdev);
1160 
1161 	/* allocate page table array */
1162 	pts_size = pd_entries * sizeof(struct radeon_vm_pt);
1163 	vm->page_tables = kzalloc(pts_size, GFP_KERNEL);
1164 	if (vm->page_tables == NULL) {
1165 		DRM_ERROR("Cannot allocate memory for page table array\n");
1166 		return -ENOMEM;
1167 	}
1168 
1169 	r = radeon_bo_create(rdev, pd_size, align, true,
1170 			     RADEON_GEM_DOMAIN_VRAM, 0, NULL,
1171 			     &vm->page_directory);
1172 	if (r)
1173 		return r;
1174 
1175 	r = radeon_vm_clear_bo(rdev, vm->page_directory);
1176 	if (r) {
1177 		radeon_bo_unref(&vm->page_directory);
1178 		vm->page_directory = NULL;
1179 		return r;
1180 	}
1181 
1182 	return 0;
1183 }
1184 
1185 /**
1186  * radeon_vm_fini - tear down a vm instance
1187  *
1188  * @rdev: radeon_device pointer
1189  * @vm: requested vm
1190  *
1191  * Tear down @vm (cayman+).
1192  * Unbind the VM and remove all bos from the vm bo list
1193  */
1194 void radeon_vm_fini(struct radeon_device *rdev, struct radeon_vm *vm)
1195 {
1196 	struct radeon_bo_va *bo_va, *tmp;
1197 	int i, r;
1198 
1199 	if (!list_empty(&vm->va)) {
1200 		dev_err(rdev->dev, "still active bo inside vm\n");
1201 	}
1202 	list_for_each_entry_safe(bo_va, tmp, &vm->va, vm_list) {
1203 		list_del_init(&bo_va->vm_list);
1204 		r = radeon_bo_reserve(bo_va->bo, false);
1205 		if (!r) {
1206 			list_del_init(&bo_va->bo_list);
1207 			radeon_bo_unreserve(bo_va->bo);
1208 			kfree(bo_va);
1209 		}
1210 	}
1211 	list_for_each_entry_safe(bo_va, tmp, &vm->freed, vm_status) {
1212 		radeon_bo_unref(&bo_va->bo);
1213  		kfree(bo_va);
1214 	}
1215 
1216 	for (i = 0; i < radeon_vm_num_pdes(rdev); i++)
1217 		radeon_bo_unref(&vm->page_tables[i].bo);
1218 	kfree(vm->page_tables);
1219 
1220 	radeon_bo_unref(&vm->page_directory);
1221 
1222 	radeon_fence_unref(&vm->fence);
1223 	radeon_fence_unref(&vm->last_flush);
1224 	radeon_fence_unref(&vm->last_id_use);
1225 
1226 	lockuninit(&vm->mutex);
1227 }
1228