xref: /dragonfly/sys/dev/drm/ttm/ttm_bo_vm.c (revision 0dace59e)
1 /**************************************************************************
2  *
3  * Copyright (c) 2006-2009 VMware, Inc., Palo Alto, CA., USA
4  * All Rights Reserved.
5  *
6  * Permission is hereby granted, free of charge, to any person obtaining a
7  * copy of this software and associated documentation files (the
8  * "Software"), to deal in the Software without restriction, including
9  * without limitation the rights to use, copy, modify, merge, publish,
10  * distribute, sub license, and/or sell copies of the Software, and to
11  * permit persons to whom the Software is furnished to do so, subject to
12  * the following conditions:
13  *
14  * The above copyright notice and this permission notice (including the
15  * next paragraph) shall be included in all copies or substantial portions
16  * of the Software.
17  *
18  * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
19  * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
20  * FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT. IN NO EVENT SHALL
21  * THE COPYRIGHT HOLDERS, AUTHORS AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM,
22  * DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR
23  * OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE
24  * USE OR OTHER DEALINGS IN THE SOFTWARE.
25  *
26  **************************************************************************/
27 /*
28  * Authors: Thomas Hellstrom <thellstrom-at-vmware-dot-com>
29  */
30 /*
31  * Copyright (c) 2013 The FreeBSD Foundation
32  * All rights reserved.
33  *
34  * Portions of this software were developed by Konstantin Belousov
35  * <kib@FreeBSD.org> under sponsorship from the FreeBSD Foundation.
36  *
37  *$FreeBSD: head/sys/dev/drm2/ttm/ttm_bo_vm.c 253710 2013-07-27 16:44:37Z kib $
38  */
39 
40 #include "opt_vm.h"
41 
42 #include <drm/drmP.h>
43 #include <dev/drm/ttm/ttm_module.h>
44 #include <dev/drm/ttm/ttm_bo_driver.h>
45 #include <dev/drm/ttm/ttm_placement.h>
46 
47 #include <vm/vm.h>
48 #include <vm/vm_page.h>
49 
50 #define TTM_BO_VM_NUM_PREFAULT 16
51 
52 RB_GENERATE(ttm_bo_device_buffer_objects, ttm_buffer_object, vm_rb,
53     ttm_bo_cmp_rb_tree_items);
54 
55 int
56 ttm_bo_cmp_rb_tree_items(struct ttm_buffer_object *a,
57     struct ttm_buffer_object *b)
58 {
59 
60 	if (a->vm_node->start < b->vm_node->start) {
61 		return (-1);
62 	} else if (a->vm_node->start > b->vm_node->start) {
63 		return (1);
64 	} else {
65 		return (0);
66 	}
67 }
68 
69 static struct ttm_buffer_object *ttm_bo_vm_lookup_rb(struct ttm_bo_device *bdev,
70 						     unsigned long page_start,
71 						     unsigned long num_pages)
72 {
73 	unsigned long cur_offset;
74 	struct ttm_buffer_object *bo;
75 	struct ttm_buffer_object *best_bo = NULL;
76 
77 	RB_FOREACH(bo, ttm_bo_device_buffer_objects, &bdev->addr_space_rb) {
78 		cur_offset = bo->vm_node->start;
79 		if (page_start >= cur_offset) {
80 			best_bo = bo;
81 			if (page_start == cur_offset)
82 				break;
83 		}
84 	}
85 
86 	if (unlikely(best_bo == NULL))
87 		return NULL;
88 
89 	if (unlikely((best_bo->vm_node->start + best_bo->num_pages) <
90 		     (page_start + num_pages)))
91 		return NULL;
92 
93 	return best_bo;
94 }
95 
96 static int
97 ttm_bo_vm_fault(vm_object_t vm_obj, vm_ooffset_t offset,
98     int prot, vm_page_t *mres)
99 {
100 
101 	struct ttm_buffer_object *bo = vm_obj->handle;
102 	struct ttm_bo_device *bdev = bo->bdev;
103 	struct ttm_tt *ttm = NULL;
104 	vm_page_t m, m1, oldm;
105 	int ret;
106 	int retval = VM_PAGER_OK;
107 	struct ttm_mem_type_manager *man =
108 		&bdev->man[bo->mem.mem_type];
109 
110 	vm_object_pip_add(vm_obj, 1);
111 	oldm = *mres;
112 	if (oldm != NULL) {
113 		vm_page_remove(oldm);
114 		*mres = NULL;
115 	} else
116 		oldm = NULL;
117 retry:
118 	VM_OBJECT_WUNLOCK(vm_obj);
119 	m = NULL;
120 
121 reserve:
122 	ret = ttm_bo_reserve(bo, false, false, false, 0);
123 	if (unlikely(ret != 0)) {
124 		if (ret == -EBUSY) {
125 			lwkt_yield();
126 			goto reserve;
127 		}
128 	}
129 
130 	if (bdev->driver->fault_reserve_notify) {
131 		ret = bdev->driver->fault_reserve_notify(bo);
132 		switch (ret) {
133 		case 0:
134 			break;
135 		case -EBUSY:
136 		case -ERESTART:
137 		case -EINTR:
138 			lwkt_yield();
139 			goto reserve;
140 		default:
141 			retval = VM_PAGER_ERROR;
142 			goto out_unlock;
143 		}
144 	}
145 
146 	/*
147 	 * Wait for buffer data in transit, due to a pipelined
148 	 * move.
149 	 */
150 
151 	lockmgr(&bdev->fence_lock, LK_EXCLUSIVE);
152 	if (test_bit(TTM_BO_PRIV_FLAG_MOVING, &bo->priv_flags)) {
153 		/*
154 		 * Here, the behavior differs between Linux and FreeBSD.
155 		 *
156 		 * On Linux, the wait is interruptible (3rd argument to
157 		 * ttm_bo_wait). There must be some mechanism to resume
158 		 * page fault handling, once the signal is processed.
159 		 *
160 		 * On FreeBSD, the wait is uninteruptible. This is not a
161 		 * problem as we can't end up with an unkillable process
162 		 * here, because the wait will eventually time out.
163 		 *
164 		 * An example of this situation is the Xorg process
165 		 * which uses SIGALRM internally. The signal could
166 		 * interrupt the wait, causing the page fault to fail
167 		 * and the process to receive SIGSEGV.
168 		 */
169 		ret = ttm_bo_wait(bo, false, false, false);
170 		lockmgr(&bdev->fence_lock, LK_RELEASE);
171 		if (unlikely(ret != 0)) {
172 			retval = VM_PAGER_ERROR;
173 			goto out_unlock;
174 		}
175 	} else
176 		lockmgr(&bdev->fence_lock, LK_RELEASE);
177 
178 	ret = ttm_mem_io_lock(man, true);
179 	if (unlikely(ret != 0)) {
180 		retval = VM_PAGER_ERROR;
181 		goto out_unlock;
182 	}
183 	ret = ttm_mem_io_reserve_vm(bo);
184 	if (unlikely(ret != 0)) {
185 		retval = VM_PAGER_ERROR;
186 		goto out_io_unlock;
187 	}
188 
189 	/*
190 	 * Strictly, we're not allowed to modify vma->vm_page_prot here,
191 	 * since the mmap_sem is only held in read mode. However, we
192 	 * modify only the caching bits of vma->vm_page_prot and
193 	 * consider those bits protected by
194 	 * the bo->mutex, as we should be the only writers.
195 	 * There shouldn't really be any readers of these bits except
196 	 * within vm_insert_mixed()? fork?
197 	 *
198 	 * TODO: Add a list of vmas to the bo, and change the
199 	 * vma->vm_page_prot when the object changes caching policy, with
200 	 * the correct locks held.
201 	 */
202 	if (!bo->mem.bus.is_iomem) {
203 		/* Allocate all page at once, most common usage */
204 		ttm = bo->ttm;
205 		if (ttm->bdev->driver->ttm_tt_populate(ttm)) {
206 			retval = VM_PAGER_ERROR;
207 			goto out_io_unlock;
208 		}
209 	}
210 
211 	if (bo->mem.bus.is_iomem) {
212 		m = vm_phys_fictitious_to_vm_page(bo->mem.bus.base +
213 		    bo->mem.bus.offset + offset);
214 		pmap_page_set_memattr(m, ttm_io_prot(bo->mem.placement));
215 	} else {
216 		ttm = bo->ttm;
217 		m = ttm->pages[OFF_TO_IDX(offset)];
218 		if (unlikely(!m)) {
219 			retval = VM_PAGER_ERROR;
220 			goto out_io_unlock;
221 		}
222 		pmap_page_set_memattr(m,
223 		    (bo->mem.placement & TTM_PL_FLAG_CACHED) ?
224 		    VM_MEMATTR_WRITE_BACK : ttm_io_prot(bo->mem.placement));
225 	}
226 
227 	VM_OBJECT_WLOCK(vm_obj);
228 	if ((m->flags & PG_BUSY) != 0) {
229 #if 0
230 		vm_page_sleep(m, "ttmpbs");
231 #endif
232 		ttm_mem_io_unlock(man);
233 		ttm_bo_unreserve(bo);
234 		goto retry;
235 	}
236 	m->valid = VM_PAGE_BITS_ALL;
237 	*mres = m;
238 	m1 = vm_page_lookup(vm_obj, OFF_TO_IDX(offset));
239 	if (m1 == NULL) {
240 		vm_page_insert(m, vm_obj, OFF_TO_IDX(offset));
241 	} else {
242 		KASSERT(m == m1,
243 		    ("inconsistent insert bo %p m %p m1 %p offset %jx",
244 		    bo, m, m1, (uintmax_t)offset));
245 	}
246 	vm_page_busy_try(m, FALSE);
247 
248 	if (oldm != NULL) {
249 		vm_page_free(oldm);
250 	}
251 
252 out_io_unlock1:
253 	ttm_mem_io_unlock(man);
254 out_unlock1:
255 	ttm_bo_unreserve(bo);
256 	vm_object_pip_wakeup(vm_obj);
257 	return (retval);
258 
259 out_io_unlock:
260 	VM_OBJECT_WLOCK(vm_obj);
261 	goto out_io_unlock1;
262 
263 out_unlock:
264 	VM_OBJECT_WLOCK(vm_obj);
265 	goto out_unlock1;
266 }
267 
268 static int
269 ttm_bo_vm_ctor(void *handle, vm_ooffset_t size, vm_prot_t prot,
270     vm_ooffset_t foff, struct ucred *cred, u_short *color)
271 {
272 
273 	/*
274 	 * On Linux, a reference to the buffer object is acquired here.
275 	 * The reason is that this function is not called when the
276 	 * mmap() is initialized, but only when a process forks for
277 	 * instance. Therefore on Linux, the reference on the bo is
278 	 * acquired either in ttm_bo_mmap() or ttm_bo_vm_open(). It's
279 	 * then released in ttm_bo_vm_close().
280 	 *
281 	 * Here, this function is called during mmap() intialization.
282 	 * Thus, the reference acquired in ttm_bo_mmap_single() is
283 	 * sufficient.
284 	 */
285 
286 	*color = 0;
287 	return (0);
288 }
289 
290 static void
291 ttm_bo_vm_dtor(void *handle)
292 {
293 	struct ttm_buffer_object *bo = handle;
294 
295 	ttm_bo_unref(&bo);
296 }
297 
298 static struct cdev_pager_ops ttm_pager_ops = {
299 	.cdev_pg_fault = ttm_bo_vm_fault,
300 	.cdev_pg_ctor = ttm_bo_vm_ctor,
301 	.cdev_pg_dtor = ttm_bo_vm_dtor
302 };
303 
304 int
305 ttm_bo_mmap_single(struct ttm_bo_device *bdev, vm_ooffset_t *offset, vm_size_t size,
306     struct vm_object **obj_res, int nprot)
307 {
308 	struct ttm_bo_driver *driver;
309 	struct ttm_buffer_object *bo;
310 	struct vm_object *vm_obj;
311 	int ret;
312 
313 	lockmgr(&bdev->vm_lock, LK_EXCLUSIVE);
314 	bo = ttm_bo_vm_lookup_rb(bdev, OFF_TO_IDX(*offset), OFF_TO_IDX(size));
315 	if (likely(bo != NULL))
316 		refcount_acquire(&bo->kref);
317 	lockmgr(&bdev->vm_lock, LK_RELEASE);
318 
319 	if (unlikely(bo == NULL)) {
320 		kprintf("[TTM] Could not find buffer object to map\n");
321 		return (EINVAL);
322 	}
323 
324 	driver = bo->bdev->driver;
325 	if (unlikely(!driver->verify_access)) {
326 		ret = EPERM;
327 		goto out_unref;
328 	}
329 	ret = -driver->verify_access(bo);
330 	if (unlikely(ret != 0))
331 		goto out_unref;
332 
333 	vm_obj = cdev_pager_allocate(bo, OBJT_MGTDEVICE, &ttm_pager_ops,
334 	    size, nprot, 0, curthread->td_ucred);
335 	if (vm_obj == NULL) {
336 		ret = EINVAL;
337 		goto out_unref;
338 	}
339 	/*
340 	 * Note: We're transferring the bo reference to vm_obj->handle here.
341 	 */
342 	*offset = 0;
343 	*obj_res = vm_obj;
344 	return 0;
345 out_unref:
346 	ttm_bo_unref(&bo);
347 	return ret;
348 }
349 
350 void
351 ttm_bo_release_mmap(struct ttm_buffer_object *bo)
352 {
353 	vm_object_t vm_obj;
354 	vm_page_t m;
355 	int i;
356 
357 	vm_obj = cdev_pager_lookup(bo);
358 	if (vm_obj == NULL)
359 		return;
360 
361 	VM_OBJECT_WLOCK(vm_obj);
362 	for (i = 0; i < bo->num_pages; i++) {
363 		m = vm_page_lookup_busy_wait(vm_obj, i, TRUE, "ttm_unm");
364 		if (m == NULL)
365 			continue;
366 		cdev_pager_free_page(vm_obj, m);
367 	}
368 	VM_OBJECT_WUNLOCK(vm_obj);
369 
370 	vm_object_deallocate(vm_obj);
371 }
372 
373 #if 0
374 int ttm_fbdev_mmap(struct vm_area_struct *vma, struct ttm_buffer_object *bo)
375 {
376 	if (vma->vm_pgoff != 0)
377 		return -EACCES;
378 
379 	vma->vm_ops = &ttm_bo_vm_ops;
380 	vma->vm_private_data = ttm_bo_reference(bo);
381 	vma->vm_flags |= VM_IO | VM_MIXEDMAP | VM_DONTEXPAND;
382 	return 0;
383 }
384 
385 ssize_t ttm_bo_io(struct ttm_bo_device *bdev, struct file *filp,
386 		  const char __user *wbuf, char __user *rbuf, size_t count,
387 		  loff_t *f_pos, bool write)
388 {
389 	struct ttm_buffer_object *bo;
390 	struct ttm_bo_driver *driver;
391 	struct ttm_bo_kmap_obj map;
392 	unsigned long dev_offset = (*f_pos >> PAGE_SHIFT);
393 	unsigned long kmap_offset;
394 	unsigned long kmap_end;
395 	unsigned long kmap_num;
396 	size_t io_size;
397 	unsigned int page_offset;
398 	char *virtual;
399 	int ret;
400 	bool no_wait = false;
401 	bool dummy;
402 
403 	read_lock(&bdev->vm_lock);
404 	bo = ttm_bo_vm_lookup_rb(bdev, dev_offset, 1);
405 	if (likely(bo != NULL))
406 		ttm_bo_reference(bo);
407 	read_unlock(&bdev->vm_lock);
408 
409 	if (unlikely(bo == NULL))
410 		return -EFAULT;
411 
412 	driver = bo->bdev->driver;
413 	if (unlikely(!driver->verify_access)) {
414 		ret = -EPERM;
415 		goto out_unref;
416 	}
417 
418 	ret = driver->verify_access(bo, filp);
419 	if (unlikely(ret != 0))
420 		goto out_unref;
421 
422 	kmap_offset = dev_offset - bo->vm_node->start;
423 	if (unlikely(kmap_offset >= bo->num_pages)) {
424 		ret = -EFBIG;
425 		goto out_unref;
426 	}
427 
428 	page_offset = *f_pos & ~PAGE_MASK;
429 	io_size = bo->num_pages - kmap_offset;
430 	io_size = (io_size << PAGE_SHIFT) - page_offset;
431 	if (count < io_size)
432 		io_size = count;
433 
434 	kmap_end = (*f_pos + count - 1) >> PAGE_SHIFT;
435 	kmap_num = kmap_end - kmap_offset + 1;
436 
437 	ret = ttm_bo_reserve(bo, true, no_wait, false, 0);
438 
439 	switch (ret) {
440 	case 0:
441 		break;
442 	case -EBUSY:
443 		ret = -EAGAIN;
444 		goto out_unref;
445 	default:
446 		goto out_unref;
447 	}
448 
449 	ret = ttm_bo_kmap(bo, kmap_offset, kmap_num, &map);
450 	if (unlikely(ret != 0)) {
451 		ttm_bo_unreserve(bo);
452 		goto out_unref;
453 	}
454 
455 	virtual = ttm_kmap_obj_virtual(&map, &dummy);
456 	virtual += page_offset;
457 
458 	if (write)
459 		ret = copy_from_user(virtual, wbuf, io_size);
460 	else
461 		ret = copy_to_user(rbuf, virtual, io_size);
462 
463 	ttm_bo_kunmap(&map);
464 	ttm_bo_unreserve(bo);
465 	ttm_bo_unref(&bo);
466 
467 	if (unlikely(ret != 0))
468 		return -EFBIG;
469 
470 	*f_pos += io_size;
471 
472 	return io_size;
473 out_unref:
474 	ttm_bo_unref(&bo);
475 	return ret;
476 }
477 
478 ssize_t ttm_bo_fbdev_io(struct ttm_buffer_object *bo, const char __user *wbuf,
479 			char __user *rbuf, size_t count, loff_t *f_pos,
480 			bool write)
481 {
482 	struct ttm_bo_kmap_obj map;
483 	unsigned long kmap_offset;
484 	unsigned long kmap_end;
485 	unsigned long kmap_num;
486 	size_t io_size;
487 	unsigned int page_offset;
488 	char *virtual;
489 	int ret;
490 	bool no_wait = false;
491 	bool dummy;
492 
493 	kmap_offset = (*f_pos >> PAGE_SHIFT);
494 	if (unlikely(kmap_offset >= bo->num_pages))
495 		return -EFBIG;
496 
497 	page_offset = *f_pos & ~PAGE_MASK;
498 	io_size = bo->num_pages - kmap_offset;
499 	io_size = (io_size << PAGE_SHIFT) - page_offset;
500 	if (count < io_size)
501 		io_size = count;
502 
503 	kmap_end = (*f_pos + count - 1) >> PAGE_SHIFT;
504 	kmap_num = kmap_end - kmap_offset + 1;
505 
506 	ret = ttm_bo_reserve(bo, true, no_wait, false, 0);
507 
508 	switch (ret) {
509 	case 0:
510 		break;
511 	case -EBUSY:
512 		return -EAGAIN;
513 	default:
514 		return ret;
515 	}
516 
517 	ret = ttm_bo_kmap(bo, kmap_offset, kmap_num, &map);
518 	if (unlikely(ret != 0)) {
519 		ttm_bo_unreserve(bo);
520 		return ret;
521 	}
522 
523 	virtual = ttm_kmap_obj_virtual(&map, &dummy);
524 	virtual += page_offset;
525 
526 	if (write)
527 		ret = copy_from_user(virtual, wbuf, io_size);
528 	else
529 		ret = copy_to_user(rbuf, virtual, io_size);
530 
531 	ttm_bo_kunmap(&map);
532 	ttm_bo_unreserve(bo);
533 	ttm_bo_unref(&bo);
534 
535 	if (unlikely(ret != 0))
536 		return ret;
537 
538 	*f_pos += io_size;
539 
540 	return io_size;
541 }
542 #endif
543