1 /************************************************************************** 2 * 3 * Copyright (c) 2006-2009 VMware, Inc., Palo Alto, CA., USA 4 * All Rights Reserved. 5 * 6 * Permission is hereby granted, free of charge, to any person obtaining a 7 * copy of this software and associated documentation files (the 8 * "Software"), to deal in the Software without restriction, including 9 * without limitation the rights to use, copy, modify, merge, publish, 10 * distribute, sub license, and/or sell copies of the Software, and to 11 * permit persons to whom the Software is furnished to do so, subject to 12 * the following conditions: 13 * 14 * The above copyright notice and this permission notice (including the 15 * next paragraph) shall be included in all copies or substantial portions 16 * of the Software. 17 * 18 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR 19 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, 20 * FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT. IN NO EVENT SHALL 21 * THE COPYRIGHT HOLDERS, AUTHORS AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM, 22 * DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR 23 * OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE 24 * USE OR OTHER DEALINGS IN THE SOFTWARE. 25 * 26 **************************************************************************/ 27 /* 28 * Authors: Thomas Hellstrom <thellstrom-at-vmware-dot-com> 29 */ 30 /* 31 * Copyright (c) 2013 The FreeBSD Foundation 32 * All rights reserved. 33 * 34 * Portions of this software were developed by Konstantin Belousov 35 * <kib@FreeBSD.org> under sponsorship from the FreeBSD Foundation. 36 * 37 *$FreeBSD: head/sys/dev/drm2/ttm/ttm_bo_vm.c 253710 2013-07-27 16:44:37Z kib $ 38 */ 39 40 #include "opt_vm.h" 41 42 #define pr_fmt(fmt) "[TTM] " fmt 43 44 #include <drm/ttm/ttm_module.h> 45 #include <drm/ttm/ttm_bo_driver.h> 46 #include <drm/ttm/ttm_placement.h> 47 #include <vm/vm.h> 48 #include <vm/vm_page.h> 49 #include <linux/export.h> 50 #include <linux/rbtree.h> 51 52 #define TTM_BO_VM_NUM_PREFAULT 16 53 54 static struct ttm_buffer_object *ttm_bo_vm_lookup_rb(struct ttm_bo_device *bdev, 55 unsigned long page_start, 56 unsigned long num_pages) 57 { 58 struct rb_node *cur = bdev->addr_space_rb.rb_node; 59 unsigned long cur_offset; 60 struct ttm_buffer_object *bo; 61 struct ttm_buffer_object *best_bo = NULL; 62 63 while (likely(cur != NULL)) { 64 bo = rb_entry(cur, struct ttm_buffer_object, vm_rb); 65 cur_offset = bo->vm_node->start; 66 if (page_start >= cur_offset) { 67 cur = cur->rb_right; 68 best_bo = bo; 69 if (page_start == cur_offset) 70 break; 71 } else 72 cur = cur->rb_left; 73 } 74 75 if (unlikely(best_bo == NULL)) 76 return NULL; 77 78 if (unlikely((best_bo->vm_node->start + best_bo->num_pages) < 79 (page_start + num_pages))) 80 return NULL; 81 82 return best_bo; 83 } 84 85 static int 86 ttm_bo_vm_fault(vm_object_t vm_obj, vm_ooffset_t offset, 87 int prot, vm_page_t *mres) 88 { 89 struct ttm_buffer_object *bo = vm_obj->handle; 90 struct ttm_bo_device *bdev = bo->bdev; 91 struct ttm_tt *ttm = NULL; 92 vm_page_t m, m1, oldm; 93 int ret; 94 int retval = VM_PAGER_OK; 95 struct ttm_mem_type_manager *man = 96 &bdev->man[bo->mem.mem_type]; 97 98 vm_object_pip_add(vm_obj, 1); 99 oldm = *mres; 100 if (oldm != NULL) { 101 vm_page_remove(oldm); 102 *mres = NULL; 103 } else 104 oldm = NULL; 105 retry: 106 VM_OBJECT_WUNLOCK(vm_obj); 107 m = NULL; 108 109 reserve: 110 ret = ttm_bo_reserve(bo, false, false, false, 0); 111 if (unlikely(ret != 0)) { 112 if (ret == -EBUSY) { 113 lwkt_yield(); 114 goto reserve; 115 } 116 } 117 118 if (bdev->driver->fault_reserve_notify) { 119 ret = bdev->driver->fault_reserve_notify(bo); 120 switch (ret) { 121 case 0: 122 break; 123 case -EBUSY: 124 case -ERESTART: 125 case -EINTR: 126 lwkt_yield(); 127 goto reserve; 128 default: 129 retval = VM_PAGER_ERROR; 130 goto out_unlock; 131 } 132 } 133 134 /* 135 * Wait for buffer data in transit, due to a pipelined 136 * move. 137 */ 138 139 lockmgr(&bdev->fence_lock, LK_EXCLUSIVE); 140 if (test_bit(TTM_BO_PRIV_FLAG_MOVING, &bo->priv_flags)) { 141 /* 142 * Here, the behavior differs between Linux and FreeBSD. 143 * 144 * On Linux, the wait is interruptible (3rd argument to 145 * ttm_bo_wait). There must be some mechanism to resume 146 * page fault handling, once the signal is processed. 147 * 148 * On FreeBSD, the wait is uninteruptible. This is not a 149 * problem as we can't end up with an unkillable process 150 * here, because the wait will eventually time out. 151 * 152 * An example of this situation is the Xorg process 153 * which uses SIGALRM internally. The signal could 154 * interrupt the wait, causing the page fault to fail 155 * and the process to receive SIGSEGV. 156 */ 157 ret = ttm_bo_wait(bo, false, false, false); 158 lockmgr(&bdev->fence_lock, LK_RELEASE); 159 if (unlikely(ret != 0)) { 160 retval = VM_PAGER_ERROR; 161 goto out_unlock; 162 } 163 } else 164 lockmgr(&bdev->fence_lock, LK_RELEASE); 165 166 ret = ttm_mem_io_lock(man, true); 167 if (unlikely(ret != 0)) { 168 retval = VM_PAGER_ERROR; 169 goto out_unlock; 170 } 171 ret = ttm_mem_io_reserve_vm(bo); 172 if (unlikely(ret != 0)) { 173 retval = VM_PAGER_ERROR; 174 goto out_io_unlock; 175 } 176 177 /* 178 * Strictly, we're not allowed to modify vma->vm_page_prot here, 179 * since the mmap_sem is only held in read mode. However, we 180 * modify only the caching bits of vma->vm_page_prot and 181 * consider those bits protected by 182 * the bo->mutex, as we should be the only writers. 183 * There shouldn't really be any readers of these bits except 184 * within vm_insert_mixed()? fork? 185 * 186 * TODO: Add a list of vmas to the bo, and change the 187 * vma->vm_page_prot when the object changes caching policy, with 188 * the correct locks held. 189 */ 190 if (!bo->mem.bus.is_iomem) { 191 /* Allocate all page at once, most common usage */ 192 ttm = bo->ttm; 193 if (ttm->bdev->driver->ttm_tt_populate(ttm)) { 194 retval = VM_PAGER_ERROR; 195 goto out_io_unlock; 196 } 197 } 198 199 if (bo->mem.bus.is_iomem) { 200 m = vm_phys_fictitious_to_vm_page(bo->mem.bus.base + 201 bo->mem.bus.offset + offset); 202 pmap_page_set_memattr(m, ttm_io_prot(bo->mem.placement)); 203 } else { 204 ttm = bo->ttm; 205 m = ttm->pages[OFF_TO_IDX(offset)]; 206 if (unlikely(!m)) { 207 retval = VM_PAGER_ERROR; 208 goto out_io_unlock; 209 } 210 pmap_page_set_memattr(m, 211 (bo->mem.placement & TTM_PL_FLAG_CACHED) ? 212 VM_MEMATTR_WRITE_BACK : ttm_io_prot(bo->mem.placement)); 213 } 214 215 VM_OBJECT_WLOCK(vm_obj); 216 if ((m->flags & PG_BUSY) != 0) { 217 #if 0 218 vm_page_sleep(m, "ttmpbs"); 219 #endif 220 ttm_mem_io_unlock(man); 221 ttm_bo_unreserve(bo); 222 goto retry; 223 } 224 m->valid = VM_PAGE_BITS_ALL; 225 *mres = m; 226 m1 = vm_page_lookup(vm_obj, OFF_TO_IDX(offset)); 227 if (m1 == NULL) { 228 vm_page_insert(m, vm_obj, OFF_TO_IDX(offset)); 229 } else { 230 KASSERT(m == m1, 231 ("inconsistent insert bo %p m %p m1 %p offset %jx", 232 bo, m, m1, (uintmax_t)offset)); 233 } 234 vm_page_busy_try(m, FALSE); 235 236 if (oldm != NULL) { 237 vm_page_free(oldm); 238 } 239 240 out_io_unlock1: 241 ttm_mem_io_unlock(man); 242 out_unlock1: 243 ttm_bo_unreserve(bo); 244 vm_object_pip_wakeup(vm_obj); 245 return (retval); 246 247 out_io_unlock: 248 VM_OBJECT_WLOCK(vm_obj); 249 goto out_io_unlock1; 250 251 out_unlock: 252 VM_OBJECT_WLOCK(vm_obj); 253 goto out_unlock1; 254 } 255 256 static int 257 ttm_bo_vm_ctor(void *handle, vm_ooffset_t size, vm_prot_t prot, 258 vm_ooffset_t foff, struct ucred *cred, u_short *color) 259 { 260 261 /* 262 * On Linux, a reference to the buffer object is acquired here. 263 * The reason is that this function is not called when the 264 * mmap() is initialized, but only when a process forks for 265 * instance. Therefore on Linux, the reference on the bo is 266 * acquired either in ttm_bo_mmap() or ttm_bo_vm_open(). It's 267 * then released in ttm_bo_vm_close(). 268 * 269 * Here, this function is called during mmap() intialization. 270 * Thus, the reference acquired in ttm_bo_mmap_single() is 271 * sufficient. 272 */ 273 274 *color = 0; 275 return (0); 276 } 277 278 static void 279 ttm_bo_vm_dtor(void *handle) 280 { 281 struct ttm_buffer_object *bo = handle; 282 283 ttm_bo_unref(&bo); 284 } 285 286 static struct cdev_pager_ops ttm_pager_ops = { 287 .cdev_pg_fault = ttm_bo_vm_fault, 288 .cdev_pg_ctor = ttm_bo_vm_ctor, 289 .cdev_pg_dtor = ttm_bo_vm_dtor 290 }; 291 292 int 293 ttm_bo_mmap_single(struct ttm_bo_device *bdev, vm_ooffset_t *offset, vm_size_t size, 294 struct vm_object **obj_res, int nprot) 295 { 296 struct ttm_bo_driver *driver; 297 struct ttm_buffer_object *bo; 298 struct vm_object *vm_obj; 299 int ret; 300 301 lockmgr(&bdev->vm_lock, LK_EXCLUSIVE); 302 bo = ttm_bo_vm_lookup_rb(bdev, OFF_TO_IDX(*offset), OFF_TO_IDX(size)); 303 if (likely(bo != NULL)) 304 kref_get(&bo->kref); 305 lockmgr(&bdev->vm_lock, LK_RELEASE); 306 307 if (unlikely(bo == NULL)) { 308 kprintf("[TTM] Could not find buffer object to map\n"); 309 return (EINVAL); 310 } 311 312 driver = bo->bdev->driver; 313 if (unlikely(!driver->verify_access)) { 314 ret = EPERM; 315 goto out_unref; 316 } 317 ret = -driver->verify_access(bo); 318 if (unlikely(ret != 0)) 319 goto out_unref; 320 321 vm_obj = cdev_pager_allocate(bo, OBJT_MGTDEVICE, &ttm_pager_ops, 322 size, nprot, 0, curthread->td_ucred); 323 if (vm_obj == NULL) { 324 ret = EINVAL; 325 goto out_unref; 326 } 327 /* 328 * Note: We're transferring the bo reference to vm_obj->handle here. 329 */ 330 *offset = 0; 331 *obj_res = vm_obj; 332 return 0; 333 out_unref: 334 ttm_bo_unref(&bo); 335 return ret; 336 } 337 EXPORT_SYMBOL(ttm_bo_mmap); 338 339 void 340 ttm_bo_release_mmap(struct ttm_buffer_object *bo) 341 { 342 vm_object_t vm_obj; 343 vm_page_t m; 344 int i; 345 346 vm_obj = cdev_pager_lookup(bo); 347 if (vm_obj == NULL) 348 return; 349 350 VM_OBJECT_WLOCK(vm_obj); 351 for (i = 0; i < bo->num_pages; i++) { 352 m = vm_page_lookup_busy_wait(vm_obj, i, TRUE, "ttm_unm"); 353 if (m == NULL) 354 continue; 355 cdev_pager_free_page(vm_obj, m); 356 } 357 VM_OBJECT_WUNLOCK(vm_obj); 358 359 vm_object_deallocate(vm_obj); 360 } 361 362 #if 0 363 int ttm_fbdev_mmap(struct vm_area_struct *vma, struct ttm_buffer_object *bo) 364 { 365 if (vma->vm_pgoff != 0) 366 return -EACCES; 367 368 vma->vm_ops = &ttm_bo_vm_ops; 369 vma->vm_private_data = ttm_bo_reference(bo); 370 vma->vm_flags |= VM_IO | VM_MIXEDMAP | VM_DONTEXPAND; 371 return 0; 372 } 373 EXPORT_SYMBOL(ttm_fbdev_mmap); 374 375 376 ssize_t ttm_bo_io(struct ttm_bo_device *bdev, struct file *filp, 377 const char __user *wbuf, char __user *rbuf, size_t count, 378 loff_t *f_pos, bool write) 379 { 380 struct ttm_buffer_object *bo; 381 struct ttm_bo_driver *driver; 382 struct ttm_bo_kmap_obj map; 383 unsigned long dev_offset = (*f_pos >> PAGE_SHIFT); 384 unsigned long kmap_offset; 385 unsigned long kmap_end; 386 unsigned long kmap_num; 387 size_t io_size; 388 unsigned int page_offset; 389 char *virtual; 390 int ret; 391 bool no_wait = false; 392 bool dummy; 393 394 read_lock(&bdev->vm_lock); 395 bo = ttm_bo_vm_lookup_rb(bdev, dev_offset, 1); 396 if (likely(bo != NULL)) 397 ttm_bo_reference(bo); 398 read_unlock(&bdev->vm_lock); 399 400 if (unlikely(bo == NULL)) 401 return -EFAULT; 402 403 driver = bo->bdev->driver; 404 if (unlikely(!driver->verify_access)) { 405 ret = -EPERM; 406 goto out_unref; 407 } 408 409 ret = driver->verify_access(bo, filp); 410 if (unlikely(ret != 0)) 411 goto out_unref; 412 413 kmap_offset = dev_offset - bo->vm_node->start; 414 if (unlikely(kmap_offset >= bo->num_pages)) { 415 ret = -EFBIG; 416 goto out_unref; 417 } 418 419 page_offset = *f_pos & ~PAGE_MASK; 420 io_size = bo->num_pages - kmap_offset; 421 io_size = (io_size << PAGE_SHIFT) - page_offset; 422 if (count < io_size) 423 io_size = count; 424 425 kmap_end = (*f_pos + count - 1) >> PAGE_SHIFT; 426 kmap_num = kmap_end - kmap_offset + 1; 427 428 ret = ttm_bo_reserve(bo, true, no_wait, false, 0); 429 430 switch (ret) { 431 case 0: 432 break; 433 case -EBUSY: 434 ret = -EAGAIN; 435 goto out_unref; 436 default: 437 goto out_unref; 438 } 439 440 ret = ttm_bo_kmap(bo, kmap_offset, kmap_num, &map); 441 if (unlikely(ret != 0)) { 442 ttm_bo_unreserve(bo); 443 goto out_unref; 444 } 445 446 virtual = ttm_kmap_obj_virtual(&map, &dummy); 447 virtual += page_offset; 448 449 if (write) 450 ret = copy_from_user(virtual, wbuf, io_size); 451 else 452 ret = copy_to_user(rbuf, virtual, io_size); 453 454 ttm_bo_kunmap(&map); 455 ttm_bo_unreserve(bo); 456 ttm_bo_unref(&bo); 457 458 if (unlikely(ret != 0)) 459 return -EFBIG; 460 461 *f_pos += io_size; 462 463 return io_size; 464 out_unref: 465 ttm_bo_unref(&bo); 466 return ret; 467 } 468 469 ssize_t ttm_bo_fbdev_io(struct ttm_buffer_object *bo, const char __user *wbuf, 470 char __user *rbuf, size_t count, loff_t *f_pos, 471 bool write) 472 { 473 struct ttm_bo_kmap_obj map; 474 unsigned long kmap_offset; 475 unsigned long kmap_end; 476 unsigned long kmap_num; 477 size_t io_size; 478 unsigned int page_offset; 479 char *virtual; 480 int ret; 481 bool no_wait = false; 482 bool dummy; 483 484 kmap_offset = (*f_pos >> PAGE_SHIFT); 485 if (unlikely(kmap_offset >= bo->num_pages)) 486 return -EFBIG; 487 488 page_offset = *f_pos & ~PAGE_MASK; 489 io_size = bo->num_pages - kmap_offset; 490 io_size = (io_size << PAGE_SHIFT) - page_offset; 491 if (count < io_size) 492 io_size = count; 493 494 kmap_end = (*f_pos + count - 1) >> PAGE_SHIFT; 495 kmap_num = kmap_end - kmap_offset + 1; 496 497 ret = ttm_bo_reserve(bo, true, no_wait, false, 0); 498 499 switch (ret) { 500 case 0: 501 break; 502 case -EBUSY: 503 return -EAGAIN; 504 default: 505 return ret; 506 } 507 508 ret = ttm_bo_kmap(bo, kmap_offset, kmap_num, &map); 509 if (unlikely(ret != 0)) { 510 ttm_bo_unreserve(bo); 511 return ret; 512 } 513 514 virtual = ttm_kmap_obj_virtual(&map, &dummy); 515 virtual += page_offset; 516 517 if (write) 518 ret = copy_from_user(virtual, wbuf, io_size); 519 else 520 ret = copy_to_user(rbuf, virtual, io_size); 521 522 ttm_bo_kunmap(&map); 523 ttm_bo_unreserve(bo); 524 ttm_bo_unref(&bo); 525 526 if (unlikely(ret != 0)) 527 return ret; 528 529 *f_pos += io_size; 530 531 return io_size; 532 } 533 #endif 534