xref: /openbsd/sys/dev/pci/drm/ttm/ttm_bo_vm.c (revision f46a341e)
1 /* SPDX-License-Identifier: GPL-2.0 OR MIT */
2 /**************************************************************************
3  *
4  * Copyright (c) 2006-2009 VMware, Inc., Palo Alto, CA., USA
5  * All Rights Reserved.
6  *
7  * Permission is hereby granted, free of charge, to any person obtaining a
8  * copy of this software and associated documentation files (the
9  * "Software"), to deal in the Software without restriction, including
10  * without limitation the rights to use, copy, modify, merge, publish,
11  * distribute, sub license, and/or sell copies of the Software, and to
12  * permit persons to whom the Software is furnished to do so, subject to
13  * the following conditions:
14  *
15  * The above copyright notice and this permission notice (including the
16  * next paragraph) shall be included in all copies or substantial portions
17  * of the Software.
18  *
19  * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
20  * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
21  * FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT. IN NO EVENT SHALL
22  * THE COPYRIGHT HOLDERS, AUTHORS AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM,
23  * DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR
24  * OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE
25  * USE OR OTHER DEALINGS IN THE SOFTWARE.
26  *
27  **************************************************************************/
28 /*
29  * Authors: Thomas Hellstrom <thellstrom-at-vmware-dot-com>
30  */
31 
32 #define pr_fmt(fmt) "[TTM] " fmt
33 
34 #include <drm/ttm/ttm_bo.h>
35 #include <drm/ttm/ttm_placement.h>
36 #include <drm/ttm/ttm_tt.h>
37 
38 #include <drm/drm_drv.h>
39 #include <drm/drm_managed.h>
40 
41 #ifdef __linux__
42 
ttm_bo_vm_fault_idle(struct ttm_buffer_object * bo,struct vm_fault * vmf)43 static vm_fault_t ttm_bo_vm_fault_idle(struct ttm_buffer_object *bo,
44 				struct vm_fault *vmf)
45 {
46 	long err = 0;
47 
48 	/*
49 	 * Quick non-stalling check for idle.
50 	 */
51 	if (dma_resv_test_signaled(bo->base.resv, DMA_RESV_USAGE_KERNEL))
52 		return 0;
53 
54 	/*
55 	 * If possible, avoid waiting for GPU with mmap_lock
56 	 * held.  We only do this if the fault allows retry and this
57 	 * is the first attempt.
58 	 */
59 	if (fault_flag_allow_retry_first(vmf->flags)) {
60 		if (vmf->flags & FAULT_FLAG_RETRY_NOWAIT)
61 			return VM_FAULT_RETRY;
62 
63 		ttm_bo_get(bo);
64 		mmap_read_unlock(vmf->vma->vm_mm);
65 		(void)dma_resv_wait_timeout(bo->base.resv,
66 					    DMA_RESV_USAGE_KERNEL, true,
67 					    MAX_SCHEDULE_TIMEOUT);
68 		dma_resv_unlock(bo->base.resv);
69 		ttm_bo_put(bo);
70 		return VM_FAULT_RETRY;
71 	}
72 
73 	/*
74 	 * Ordinary wait.
75 	 */
76 	err = dma_resv_wait_timeout(bo->base.resv, DMA_RESV_USAGE_KERNEL, true,
77 				    MAX_SCHEDULE_TIMEOUT);
78 	if (unlikely(err < 0)) {
79 		return (err != -ERESTARTSYS) ? VM_FAULT_SIGBUS :
80 			VM_FAULT_NOPAGE;
81 	}
82 
83 	return 0;
84 }
85 
ttm_bo_io_mem_pfn(struct ttm_buffer_object * bo,unsigned long page_offset)86 static unsigned long ttm_bo_io_mem_pfn(struct ttm_buffer_object *bo,
87 				       unsigned long page_offset)
88 {
89 	struct ttm_device *bdev = bo->bdev;
90 
91 	if (bdev->funcs->io_mem_pfn)
92 		return bdev->funcs->io_mem_pfn(bo, page_offset);
93 
94 	return (bo->resource->bus.offset >> PAGE_SHIFT) + page_offset;
95 }
96 
97 /**
98  * ttm_bo_vm_reserve - Reserve a buffer object in a retryable vm callback
99  * @bo: The buffer object
100  * @vmf: The fault structure handed to the callback
101  *
102  * vm callbacks like fault() and *_mkwrite() allow for the mmap_lock to be dropped
103  * during long waits, and after the wait the callback will be restarted. This
104  * is to allow other threads using the same virtual memory space concurrent
105  * access to map(), unmap() completely unrelated buffer objects. TTM buffer
106  * object reservations sometimes wait for GPU and should therefore be
107  * considered long waits. This function reserves the buffer object interruptibly
108  * taking this into account. Starvation is avoided by the vm system not
109  * allowing too many repeated restarts.
110  * This function is intended to be used in customized fault() and _mkwrite()
111  * handlers.
112  *
113  * Return:
114  *    0 on success and the bo was reserved.
115  *    VM_FAULT_RETRY if blocking wait.
116  *    VM_FAULT_NOPAGE if blocking wait and retrying was not allowed.
117  */
ttm_bo_vm_reserve(struct ttm_buffer_object * bo,struct vm_fault * vmf)118 vm_fault_t ttm_bo_vm_reserve(struct ttm_buffer_object *bo,
119 			     struct vm_fault *vmf)
120 {
121 	/*
122 	 * Work around locking order reversal in fault / nopfn
123 	 * between mmap_lock and bo_reserve: Perform a trylock operation
124 	 * for reserve, and if it fails, retry the fault after waiting
125 	 * for the buffer to become unreserved.
126 	 */
127 	if (unlikely(!dma_resv_trylock(bo->base.resv))) {
128 		/*
129 		 * If the fault allows retry and this is the first
130 		 * fault attempt, we try to release the mmap_lock
131 		 * before waiting
132 		 */
133 		if (fault_flag_allow_retry_first(vmf->flags)) {
134 			if (!(vmf->flags & FAULT_FLAG_RETRY_NOWAIT)) {
135 				ttm_bo_get(bo);
136 				mmap_read_unlock(vmf->vma->vm_mm);
137 				if (!dma_resv_lock_interruptible(bo->base.resv,
138 								 NULL))
139 					dma_resv_unlock(bo->base.resv);
140 				ttm_bo_put(bo);
141 			}
142 
143 			return VM_FAULT_RETRY;
144 		}
145 
146 		if (dma_resv_lock_interruptible(bo->base.resv, NULL))
147 			return VM_FAULT_NOPAGE;
148 	}
149 
150 	/*
151 	 * Refuse to fault imported pages. This should be handled
152 	 * (if at all) by redirecting mmap to the exporter.
153 	 */
154 	if (bo->ttm && (bo->ttm->page_flags & TTM_TT_FLAG_EXTERNAL)) {
155 		if (!(bo->ttm->page_flags & TTM_TT_FLAG_EXTERNAL_MAPPABLE)) {
156 			dma_resv_unlock(bo->base.resv);
157 			return VM_FAULT_SIGBUS;
158 		}
159 	}
160 
161 	return 0;
162 }
163 EXPORT_SYMBOL(ttm_bo_vm_reserve);
164 
165 /**
166  * ttm_bo_vm_fault_reserved - TTM fault helper
167  * @vmf: The struct vm_fault given as argument to the fault callback
168  * @prot: The page protection to be used for this memory area.
169  * @num_prefault: Maximum number of prefault pages. The caller may want to
170  * specify this based on madvice settings and the size of the GPU object
171  * backed by the memory.
172  *
173  * This function inserts one or more page table entries pointing to the
174  * memory backing the buffer object, and then returns a return code
175  * instructing the caller to retry the page access.
176  *
177  * Return:
178  *   VM_FAULT_NOPAGE on success or pending signal
179  *   VM_FAULT_SIGBUS on unspecified error
180  *   VM_FAULT_OOM on out-of-memory
181  *   VM_FAULT_RETRY if retryable wait
182  */
ttm_bo_vm_fault_reserved(struct vm_fault * vmf,pgprot_t prot,pgoff_t num_prefault)183 vm_fault_t ttm_bo_vm_fault_reserved(struct vm_fault *vmf,
184 				    pgprot_t prot,
185 				    pgoff_t num_prefault)
186 {
187 	struct vm_area_struct *vma = vmf->vma;
188 	struct ttm_buffer_object *bo = vma->vm_private_data;
189 	struct ttm_device *bdev = bo->bdev;
190 	unsigned long page_offset;
191 	unsigned long page_last;
192 	unsigned long pfn;
193 	struct ttm_tt *ttm = NULL;
194 	struct vm_page *page;
195 	int err;
196 	pgoff_t i;
197 	vm_fault_t ret = VM_FAULT_NOPAGE;
198 	unsigned long address = vmf->address;
199 
200 	/*
201 	 * Wait for buffer data in transit, due to a pipelined
202 	 * move.
203 	 */
204 	ret = ttm_bo_vm_fault_idle(bo, vmf);
205 	if (unlikely(ret != 0))
206 		return ret;
207 
208 	err = ttm_mem_io_reserve(bdev, bo->resource);
209 	if (unlikely(err != 0))
210 		return VM_FAULT_SIGBUS;
211 
212 	page_offset = ((address - vma->vm_start) >> PAGE_SHIFT) +
213 		vma->vm_pgoff - drm_vma_node_start(&bo->base.vma_node);
214 	page_last = vma_pages(vma) + vma->vm_pgoff -
215 		drm_vma_node_start(&bo->base.vma_node);
216 
217 	if (unlikely(page_offset >= PFN_UP(bo->base.size)))
218 		return VM_FAULT_SIGBUS;
219 
220 	prot = ttm_io_prot(bo, bo->resource, prot);
221 	if (!bo->resource->bus.is_iomem) {
222 		struct ttm_operation_ctx ctx = {
223 			.interruptible = true,
224 			.no_wait_gpu = false,
225 			.force_alloc = true
226 		};
227 
228 		ttm = bo->ttm;
229 		err = ttm_tt_populate(bdev, bo->ttm, &ctx);
230 		if (err) {
231 			if (err == -EINTR || err == -ERESTARTSYS ||
232 			    err == -EAGAIN)
233 				return VM_FAULT_NOPAGE;
234 
235 			pr_debug("TTM fault hit %pe.\n", ERR_PTR(err));
236 			return VM_FAULT_SIGBUS;
237 		}
238 	} else {
239 		/* Iomem should not be marked encrypted */
240 		prot = pgprot_decrypted(prot);
241 	}
242 
243 	/*
244 	 * Speculatively prefault a number of pages. Only error on
245 	 * first page.
246 	 */
247 	for (i = 0; i < num_prefault; ++i) {
248 		if (bo->resource->bus.is_iomem) {
249 			pfn = ttm_bo_io_mem_pfn(bo, page_offset);
250 		} else {
251 			page = ttm->pages[page_offset];
252 			if (unlikely(!page && i == 0)) {
253 				return VM_FAULT_OOM;
254 			} else if (unlikely(!page)) {
255 				break;
256 			}
257 			pfn = page_to_pfn(page);
258 		}
259 
260 		/*
261 		 * Note that the value of @prot at this point may differ from
262 		 * the value of @vma->vm_page_prot in the caching- and
263 		 * encryption bits. This is because the exact location of the
264 		 * data may not be known at mmap() time and may also change
265 		 * at arbitrary times while the data is mmap'ed.
266 		 * See vmf_insert_pfn_prot() for a discussion.
267 		 */
268 		ret = vmf_insert_pfn_prot(vma, address, pfn, prot);
269 
270 		/* Never error on prefaulted PTEs */
271 		if (unlikely((ret & VM_FAULT_ERROR))) {
272 			if (i == 0)
273 				return VM_FAULT_NOPAGE;
274 			else
275 				break;
276 		}
277 
278 		address += PAGE_SIZE;
279 		if (unlikely(++page_offset >= page_last))
280 			break;
281 	}
282 	return ret;
283 }
284 EXPORT_SYMBOL(ttm_bo_vm_fault_reserved);
285 
ttm_bo_release_dummy_page(struct drm_device * dev,void * res)286 static void ttm_bo_release_dummy_page(struct drm_device *dev, void *res)
287 {
288 	struct page *dummy_page = (struct page *)res;
289 
290 	__free_page(dummy_page);
291 }
292 
ttm_bo_vm_dummy_page(struct vm_fault * vmf,pgprot_t prot)293 vm_fault_t ttm_bo_vm_dummy_page(struct vm_fault *vmf, pgprot_t prot)
294 {
295 	struct vm_area_struct *vma = vmf->vma;
296 	struct ttm_buffer_object *bo = vma->vm_private_data;
297 	struct drm_device *ddev = bo->base.dev;
298 	vm_fault_t ret = VM_FAULT_NOPAGE;
299 	unsigned long address;
300 	unsigned long pfn;
301 	struct page *page;
302 
303 	/* Allocate new dummy page to map all the VA range in this VMA to it*/
304 	page = alloc_page(GFP_KERNEL | __GFP_ZERO);
305 	if (!page)
306 		return VM_FAULT_OOM;
307 
308 	/* Set the page to be freed using drmm release action */
309 	if (drmm_add_action_or_reset(ddev, ttm_bo_release_dummy_page, page))
310 		return VM_FAULT_OOM;
311 
312 	pfn = page_to_pfn(page);
313 
314 	/* Prefault the entire VMA range right away to avoid further faults */
315 	for (address = vma->vm_start; address < vma->vm_end;
316 	     address += PAGE_SIZE)
317 		ret = vmf_insert_pfn_prot(vma, address, pfn, prot);
318 
319 	return ret;
320 }
321 EXPORT_SYMBOL(ttm_bo_vm_dummy_page);
322 
ttm_bo_vm_fault(struct vm_fault * vmf)323 vm_fault_t ttm_bo_vm_fault(struct vm_fault *vmf)
324 {
325 	struct vm_area_struct *vma = vmf->vma;
326 	pgprot_t prot;
327 	struct ttm_buffer_object *bo = vma->vm_private_data;
328 	struct drm_device *ddev = bo->base.dev;
329 	vm_fault_t ret;
330 	int idx;
331 
332 	ret = ttm_bo_vm_reserve(bo, vmf);
333 	if (ret)
334 		return ret;
335 
336 	prot = vma->vm_page_prot;
337 	if (drm_dev_enter(ddev, &idx)) {
338 		ret = ttm_bo_vm_fault_reserved(vmf, prot, TTM_BO_VM_NUM_PREFAULT);
339 		drm_dev_exit(idx);
340 	} else {
341 		ret = ttm_bo_vm_dummy_page(vmf, prot);
342 	}
343 	if (ret == VM_FAULT_RETRY && !(vmf->flags & FAULT_FLAG_RETRY_NOWAIT))
344 		return ret;
345 
346 	dma_resv_unlock(bo->base.resv);
347 
348 	return ret;
349 }
350 EXPORT_SYMBOL(ttm_bo_vm_fault);
351 
352 #else /* !__linux__ */
353 
ttm_bo_vm_fault_idle(struct ttm_buffer_object * bo,struct uvm_faultinfo * ufi)354 static vm_fault_t ttm_bo_vm_fault_idle(struct ttm_buffer_object *bo,
355     struct uvm_faultinfo *ufi)
356 {
357 	long err = 0;
358 
359 	/*
360 	 * Quick non-stalling check for idle.
361 	 */
362 	if (dma_resv_test_signaled(bo->base.resv, DMA_RESV_USAGE_KERNEL))
363 		return 0;
364 
365 #ifdef __linux__
366 	/*
367 	 * If possible, avoid waiting for GPU with mmap_lock
368 	 * held.  We only do this if the fault allows retry and this
369 	 * is the first attempt.
370 	 */
371 	if (fault_flag_allow_retry_first(vmf->flags)) {
372 		if (vmf->flags & FAULT_FLAG_RETRY_NOWAIT)
373 			return VM_FAULT_RETRY;
374 
375 		ttm_bo_get(bo);
376 		mmap_read_unlock(vmf->vma->vm_mm);
377 		(void) dma_fence_wait(bo->moving, true);
378 		(void)dma_resv_wait_timeout(bo->base.resv,
379 					    DMA_RESV_USAGE_KERNEL, true,
380 					    MAX_SCHEDULE_TIMEOUT);
381 		dma_resv_unlock(bo->base.resv);
382 		ttm_bo_put(bo);
383 		return VM_FAULT_RETRY;
384 	}
385 #endif
386 
387 	/*
388 	 * Ordinary wait.
389 	 */
390 	err = dma_resv_wait_timeout(bo->base.resv, DMA_RESV_USAGE_KERNEL, true,
391 				    MAX_SCHEDULE_TIMEOUT);
392 	if (unlikely(err < 0)) {
393 		return (err != -ERESTARTSYS) ? VM_FAULT_SIGBUS :
394 			VM_FAULT_NOPAGE;
395 	}
396 
397 	return 0;
398 }
399 
ttm_bo_io_mem_pfn(struct ttm_buffer_object * bo,unsigned long page_offset)400 static unsigned long ttm_bo_io_mem_pfn(struct ttm_buffer_object *bo,
401 				       unsigned long page_offset)
402 {
403 	struct ttm_device *bdev = bo->bdev;
404 
405 	if (bdev->funcs->io_mem_pfn)
406 		return bdev->funcs->io_mem_pfn(bo, page_offset);
407 
408 	return (bo->resource->bus.offset >> PAGE_SHIFT) + page_offset;
409 }
410 
411 /**
412  * ttm_bo_vm_reserve - Reserve a buffer object in a retryable vm callback
413  * @bo: The buffer object
414  * @vmf: The fault structure handed to the callback
415  *
416  * vm callbacks like fault() and *_mkwrite() allow for the mmap_lock to be dropped
417  * during long waits, and after the wait the callback will be restarted. This
418  * is to allow other threads using the same virtual memory space concurrent
419  * access to map(), unmap() completely unrelated buffer objects. TTM buffer
420  * object reservations sometimes wait for GPU and should therefore be
421  * considered long waits. This function reserves the buffer object interruptibly
422  * taking this into account. Starvation is avoided by the vm system not
423  * allowing too many repeated restarts.
424  * This function is intended to be used in customized fault() and _mkwrite()
425  * handlers.
426  *
427  * Return:
428  *    0 on success and the bo was reserved.
429  *    VM_FAULT_RETRY if blocking wait.
430  *    VM_FAULT_NOPAGE if blocking wait and retrying was not allowed.
431  */
ttm_bo_vm_reserve(struct ttm_buffer_object * bo)432 vm_fault_t ttm_bo_vm_reserve(struct ttm_buffer_object *bo)
433 {
434 	/*
435 	 * Work around locking order reversal in fault / nopfn
436 	 * between mmap_lock and bo_reserve: Perform a trylock operation
437 	 * for reserve, and if it fails, retry the fault after waiting
438 	 * for the buffer to become unreserved.
439 	 */
440 	if (unlikely(!dma_resv_trylock(bo->base.resv))) {
441 #ifdef __linux__
442 		/*
443 		 * If the fault allows retry and this is the first
444 		 * fault attempt, we try to release the mmap_lock
445 		 * before waiting
446 		 */
447 		if (fault_flag_allow_retry_first(vmf->flags)) {
448 			if (!(vmf->flags & FAULT_FLAG_RETRY_NOWAIT)) {
449 				ttm_bo_get(bo);
450 				mmap_read_unlock(vmf->vma->vm_mm);
451 				if (!dma_resv_lock_interruptible(bo->base.resv,
452 								 NULL))
453 					dma_resv_unlock(bo->base.resv);
454 				ttm_bo_put(bo);
455 			}
456 
457 			return VM_FAULT_RETRY;
458 		}
459 #endif
460 
461 		if (dma_resv_lock_interruptible(bo->base.resv, NULL))
462 			return VM_FAULT_NOPAGE;
463 	}
464 
465 	/*
466 	 * Refuse to fault imported pages. This should be handled
467 	 * (if at all) by redirecting mmap to the exporter.
468 	 */
469 	if (bo->ttm && (bo->ttm->page_flags & TTM_TT_FLAG_EXTERNAL)) {
470 		if (!(bo->ttm->page_flags & TTM_TT_FLAG_EXTERNAL_MAPPABLE)) {
471 			dma_resv_unlock(bo->base.resv);
472 			return VM_FAULT_SIGBUS;
473 		}
474 	}
475 
476 	return 0;
477 }
478 
ttm_bo_vm_fault_reserved(struct uvm_faultinfo * ufi,vaddr_t vaddr,pgoff_t num_prefault,pgoff_t fault_page_size)479 vm_fault_t ttm_bo_vm_fault_reserved(struct uvm_faultinfo *ufi,
480 				    vaddr_t vaddr,
481 				    pgoff_t num_prefault,
482 				    pgoff_t fault_page_size)
483 {
484 	struct uvm_object *uobj = ufi->entry->object.uvm_obj;
485 	struct ttm_buffer_object *bo = (struct ttm_buffer_object *)uobj;
486 	struct ttm_device *bdev = bo->bdev;
487 	unsigned long page_offset;
488 	unsigned long page_last;
489 	unsigned long pfn;
490 	struct ttm_tt *ttm = NULL;
491 	struct vm_page *page;
492 	bus_addr_t addr;
493 	paddr_t paddr;
494 	vm_prot_t prot;
495 	int pmap_flags;
496 	int err;
497 	pgoff_t i;
498 	vm_fault_t ret = VM_FAULT_NOPAGE;
499 	unsigned long address = (unsigned long)vaddr;
500 
501 	/*
502 	 * Wait for buffer data in transit, due to a pipelined
503 	 * move.
504 	 */
505 	ret = ttm_bo_vm_fault_idle(bo, ufi);
506 	if (unlikely(ret != 0))
507 		return ret;
508 	ret = VM_FAULT_NOPAGE;
509 
510 	err = ttm_mem_io_reserve(bdev, bo->resource);
511 	if (unlikely(err != 0))
512 		return VM_FAULT_SIGBUS;
513 
514 	page_offset = ((address - ufi->entry->start) >> PAGE_SHIFT) +
515 	    drm_vma_node_start(&bo->base.vma_node) - (ufi->entry->offset >> PAGE_SHIFT);
516 	page_last = ((ufi->entry->end - ufi->entry->start) >> PAGE_SHIFT) +
517 	    drm_vma_node_start(&bo->base.vma_node) - (ufi->entry->offset >> PAGE_SHIFT);
518 
519 	if (unlikely(page_offset >= PFN_UP(bo->base.size)))
520 		return VM_FAULT_SIGBUS;
521 
522 	prot = ufi->entry->protection;
523 	pmap_flags = ttm_io_prot(bo, bo->resource, 0);
524 	if (!bo->resource->bus.is_iomem) {
525 		struct ttm_operation_ctx ctx = {
526 			.interruptible = true,
527 			.no_wait_gpu = false,
528 			.force_alloc = true
529 		};
530 
531 		ttm = bo->ttm;
532 		err = ttm_tt_populate(bdev, bo->ttm, &ctx);
533 		if (err) {
534 			if (err == -EINTR || err == -ERESTARTSYS ||
535 			    err == -EAGAIN)
536 				return VM_FAULT_NOPAGE;
537 
538 			pr_debug("TTM fault hit %pe.\n", ERR_PTR(err));
539 			return VM_FAULT_SIGBUS;
540 		}
541 	}
542 
543 #ifdef __linux__
544 	/* We don't prefault on huge faults. Yet. */
545 	if (IS_ENABLED(CONFIG_TRANSPARENT_HUGEPAGE) && fault_page_size != 1)
546 		return ttm_bo_vm_insert_huge(vmf, bo, page_offset,
547 					     fault_page_size, prot);
548 #endif
549 
550 	/*
551 	 * Speculatively prefault a number of pages. Only error on
552 	 * first page.
553 	 */
554 	for (i = 0; i < num_prefault; ++i) {
555 		if (bo->resource->bus.is_iomem) {
556 			pfn = ttm_bo_io_mem_pfn(bo, page_offset);
557 			addr = pfn << PAGE_SHIFT;
558 			paddr = bus_space_mmap(bdev->memt, addr, 0, prot, 0);
559 		} else {
560 			page = ttm->pages[page_offset];
561 			if (unlikely(!page && i == 0)) {
562 				return VM_FAULT_OOM;
563 			} else if (unlikely(!page)) {
564 				break;
565 			}
566 			paddr = VM_PAGE_TO_PHYS(page);
567 		}
568 
569 		err = pmap_enter(ufi->orig_map->pmap, address,
570 		    paddr | pmap_flags, prot, PMAP_CANFAIL | prot);
571 
572 		/* Never error on prefaulted PTEs */
573 		if (unlikely(err)) {
574 			ret = VM_FAULT_OOM;
575 			if (i == 0)
576 				return VM_FAULT_NOPAGE;
577 			else
578 				break;
579 		}
580 
581 		address += PAGE_SIZE;
582 		if (unlikely(++page_offset >= page_last))
583 			break;
584 	}
585 	pmap_update(ufi->orig_map->pmap);
586 	return ret;
587 }
588 EXPORT_SYMBOL(ttm_bo_vm_fault_reserved);
589 
590 int
ttm_bo_vm_fault(struct uvm_faultinfo * ufi,vaddr_t vaddr,vm_page_t * pps,int npages,int centeridx,vm_fault_t fault_type,vm_prot_t access_type,int flags)591 ttm_bo_vm_fault(struct uvm_faultinfo *ufi, vaddr_t vaddr, vm_page_t *pps,
592     int npages, int centeridx, vm_fault_t fault_type,
593     vm_prot_t access_type, int flags)
594 {
595 	struct uvm_object *uobj = ufi->entry->object.uvm_obj;
596 	struct ttm_buffer_object *bo = (struct ttm_buffer_object *)uobj;
597 	vm_fault_t ret;
598 
599 	ret = ttm_bo_vm_reserve(bo);
600 	if (ret) {
601 		goto out;
602 	}
603 
604 	ret = ttm_bo_vm_fault_reserved(ufi, vaddr, TTM_BO_VM_NUM_PREFAULT, 1);
605 	dma_resv_unlock(bo->base.resv);
606 out:
607 	switch (ret) {
608 	case VM_FAULT_NOPAGE:
609 		ret = 0;
610 		break;
611 	case VM_FAULT_RETRY:
612 		ret = ERESTART;
613 		break;
614 	default:
615 		ret = EACCES;
616 		break;
617 	}
618 	uvmfault_unlockall(ufi, NULL, uobj);
619 	return ret;
620 }
621 EXPORT_SYMBOL(ttm_bo_vm_fault);
622 
623 #endif /* !__linux__ */
624 
625 #ifdef notyet
ttm_bo_vm_open(struct vm_area_struct * vma)626 void ttm_bo_vm_open(struct vm_area_struct *vma)
627 {
628 	struct ttm_buffer_object *bo = vma->vm_private_data;
629 
630 	WARN_ON(bo->bdev->dev_mapping != vma->vm_file->f_mapping);
631 
632 	ttm_bo_get(bo);
633 }
634 EXPORT_SYMBOL(ttm_bo_vm_open);
635 
ttm_bo_vm_close(struct vm_area_struct * vma)636 void ttm_bo_vm_close(struct vm_area_struct *vma)
637 {
638 	struct ttm_buffer_object *bo = vma->vm_private_data;
639 
640 	ttm_bo_put(bo);
641 	vma->vm_private_data = NULL;
642 }
643 EXPORT_SYMBOL(ttm_bo_vm_close);
644 
ttm_bo_vm_access_kmap(struct ttm_buffer_object * bo,unsigned long offset,uint8_t * buf,int len,int write)645 static int ttm_bo_vm_access_kmap(struct ttm_buffer_object *bo,
646 				 unsigned long offset,
647 				 uint8_t *buf, int len, int write)
648 {
649 	unsigned long page = offset >> PAGE_SHIFT;
650 	unsigned long bytes_left = len;
651 	int ret;
652 
653 	/* Copy a page at a time, that way no extra virtual address
654 	 * mapping is needed
655 	 */
656 	offset -= page << PAGE_SHIFT;
657 	do {
658 		unsigned long bytes = min(bytes_left, PAGE_SIZE - offset);
659 		struct ttm_bo_kmap_obj map;
660 		void *ptr;
661 		bool is_iomem;
662 
663 		ret = ttm_bo_kmap(bo, page, 1, &map);
664 		if (ret)
665 			return ret;
666 
667 		ptr = (uint8_t *)ttm_kmap_obj_virtual(&map, &is_iomem) + offset;
668 		WARN_ON_ONCE(is_iomem);
669 		if (write)
670 			memcpy(ptr, buf, bytes);
671 		else
672 			memcpy(buf, ptr, bytes);
673 		ttm_bo_kunmap(&map);
674 
675 		page++;
676 		buf += bytes;
677 		bytes_left -= bytes;
678 		offset = 0;
679 	} while (bytes_left);
680 
681 	return len;
682 }
683 
ttm_bo_vm_access(struct vm_area_struct * vma,unsigned long addr,void * buf,int len,int write)684 int ttm_bo_vm_access(struct vm_area_struct *vma, unsigned long addr,
685 		     void *buf, int len, int write)
686 {
687 	struct ttm_buffer_object *bo = vma->vm_private_data;
688 	unsigned long offset = (addr) - vma->vm_start +
689 		((vma->vm_pgoff - drm_vma_node_start(&bo->base.vma_node))
690 		 << PAGE_SHIFT);
691 	int ret;
692 
693 	if (len < 1 || (offset + len) > bo->base.size)
694 		return -EIO;
695 
696 	ret = ttm_bo_reserve(bo, true, false, NULL);
697 	if (ret)
698 		return ret;
699 
700 	switch (bo->resource->mem_type) {
701 	case TTM_PL_SYSTEM:
702 		fallthrough;
703 	case TTM_PL_TT:
704 		ret = ttm_bo_vm_access_kmap(bo, offset, buf, len, write);
705 		break;
706 	default:
707 		if (bo->bdev->funcs->access_memory)
708 			ret = bo->bdev->funcs->access_memory(
709 				bo, offset, buf, len, write);
710 		else
711 			ret = -EIO;
712 	}
713 
714 	ttm_bo_unreserve(bo);
715 
716 	return ret;
717 }
718 EXPORT_SYMBOL(ttm_bo_vm_access);
719 
720 static const struct vm_operations_struct ttm_bo_vm_ops = {
721 	.fault = ttm_bo_vm_fault,
722 	.open = ttm_bo_vm_open,
723 	.close = ttm_bo_vm_close,
724 	.access = ttm_bo_vm_access,
725 };
726 #endif
727 
728 void
ttm_bo_vm_reference(struct uvm_object * uobj)729 ttm_bo_vm_reference(struct uvm_object *uobj)
730 {
731 	struct ttm_buffer_object *bo =
732 	    (struct ttm_buffer_object *)uobj;
733 
734 	ttm_bo_get(bo);
735 }
736 
737 void
ttm_bo_vm_detach(struct uvm_object * uobj)738 ttm_bo_vm_detach(struct uvm_object *uobj)
739 {
740 	struct ttm_buffer_object *bo = (struct ttm_buffer_object *)uobj;
741 
742 	ttm_bo_put(bo);
743 }
744 
745 const struct uvm_pagerops ttm_bo_vm_ops = {
746 	.pgo_fault = ttm_bo_vm_fault,
747 	.pgo_reference = ttm_bo_vm_reference,
748 	.pgo_detach = ttm_bo_vm_detach
749 };
750 
751 #ifdef __linux__
752 /**
753  * ttm_bo_mmap_obj - mmap memory backed by a ttm buffer object.
754  *
755  * @vma:       vma as input from the fbdev mmap method.
756  * @bo:        The bo backing the address space.
757  *
758  * Maps a buffer object.
759  */
ttm_bo_mmap_obj(struct vm_area_struct * vma,struct ttm_buffer_object * bo)760 int ttm_bo_mmap_obj(struct vm_area_struct *vma, struct ttm_buffer_object *bo)
761 {
762 	/* Enforce no COW since would have really strange behavior with it. */
763 	if (is_cow_mapping(vma->vm_flags))
764 		return -EINVAL;
765 
766 	ttm_bo_get(bo);
767 
768 	/*
769 	 * Drivers may want to override the vm_ops field. Otherwise we
770 	 * use TTM's default callbacks.
771 	 */
772 	if (!vma->vm_ops)
773 		vma->vm_ops = &ttm_bo_vm_ops;
774 
775 	/*
776 	 * Note: We're transferring the bo reference to
777 	 * vma->vm_private_data here.
778 	 */
779 
780 	vma->vm_private_data = bo;
781 
782 	vm_flags_set(vma, VM_PFNMAP | VM_IO | VM_DONTEXPAND | VM_DONTDUMP);
783 	return 0;
784 }
785 EXPORT_SYMBOL(ttm_bo_mmap_obj);
786 #else /* !__linux__ */
ttm_bo_mmap_obj(struct ttm_buffer_object * bo)787 int ttm_bo_mmap_obj(struct ttm_buffer_object *bo)
788 {
789 	/* Enforce no COW since would have really strange behavior with it. */
790 #ifdef notyet
791 	if (UVM_ET_ISCOPYONWRITE(entry))
792 		return -EINVAL;
793 #endif
794 
795 	ttm_bo_get(bo);
796 
797 	/*
798 	 * Drivers may want to override the vm_ops field. Otherwise we
799 	 * use TTM's default callbacks.
800 	 */
801 	if (bo->base.uobj.pgops == NULL)
802 		uvm_obj_init(&bo->base.uobj, &ttm_bo_vm_ops, 1);
803 
804 	/*
805 	 * Note: We're transferring the bo reference to
806 	 * vma->vm_private_data here.
807 	 */
808 
809 #ifdef notyet
810 	vma->vm_private_data = bo;
811 
812 	vma->vm_flags |= VM_PFNMAP;
813 	vma->vm_flags |= VM_IO | VM_DONTEXPAND | VM_DONTDUMP;
814 #endif
815 	return 0;
816 }
817 #endif /* !__linux__ */
818