1 /**************************************************************************
2 *
3 * Copyright (c) 2007-2009 VMware, Inc., Palo Alto, CA., USA
4 * All Rights Reserved.
5 *
6 * Permission is hereby granted, free of charge, to any person obtaining a
7 * copy of this software and associated documentation files (the
8 * "Software"), to deal in the Software without restriction, including
9 * without limitation the rights to use, copy, modify, merge, publish,
10 * distribute, sub license, and/or sell copies of the Software, and to
11 * permit persons to whom the Software is furnished to do so, subject to
12 * the following conditions:
13 *
14 * The above copyright notice and this permission notice (including the
15 * next paragraph) shall be included in all copies or substantial portions
16 * of the Software.
17 *
18 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
19 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
20 * FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT. IN NO EVENT SHALL
21 * THE COPYRIGHT HOLDERS, AUTHORS AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM,
22 * DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR
23 * OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE
24 * USE OR OTHER DEALINGS IN THE SOFTWARE.
25 *
26 **************************************************************************/
27 /*
28 * Authors: Thomas Hellstrom <thellstrom-at-vmware-dot-com>
29 */
30
31 #include <drm/ttm/ttm_bo_driver.h>
32 #include <drm/ttm/ttm_placement.h>
33 #include <drm/drm_vma_manager.h>
34 #include <linux/io.h>
35 #include <linux/highmem.h>
36 #include <linux/wait.h>
37 #include <linux/slab.h>
38 #include <linux/vmalloc.h>
39 #include <linux/module.h>
40 #include <linux/reservation.h>
41
42 struct ttm_transfer_obj {
43 struct ttm_buffer_object base;
44 struct ttm_buffer_object *bo;
45 };
46
ttm_bo_free_old_node(struct ttm_buffer_object * bo)47 void ttm_bo_free_old_node(struct ttm_buffer_object *bo)
48 {
49 ttm_bo_mem_put(bo, &bo->mem);
50 }
51
ttm_bo_move_ttm(struct ttm_buffer_object * bo,struct ttm_operation_ctx * ctx,struct ttm_mem_reg * new_mem)52 int ttm_bo_move_ttm(struct ttm_buffer_object *bo,
53 struct ttm_operation_ctx *ctx,
54 struct ttm_mem_reg *new_mem)
55 {
56 struct ttm_tt *ttm = bo->ttm;
57 struct ttm_mem_reg *old_mem = &bo->mem;
58 int ret;
59
60 if (old_mem->mem_type != TTM_PL_SYSTEM) {
61 ret = ttm_bo_wait(bo, ctx->interruptible, ctx->no_wait_gpu);
62
63 if (unlikely(ret != 0)) {
64 if (ret != -ERESTARTSYS)
65 pr_err("Failed to expire sync object before unbinding TTM\n");
66 return ret;
67 }
68
69 ttm_tt_unbind(ttm);
70 ttm_bo_free_old_node(bo);
71 ttm_flag_masked(&old_mem->placement, TTM_PL_FLAG_SYSTEM,
72 TTM_PL_MASK_MEM);
73 old_mem->mem_type = TTM_PL_SYSTEM;
74 }
75
76 ret = ttm_tt_set_placement_caching(ttm, new_mem->placement);
77 if (unlikely(ret != 0))
78 return ret;
79
80 if (new_mem->mem_type != TTM_PL_SYSTEM) {
81 ret = ttm_tt_bind(ttm, new_mem, ctx);
82 if (unlikely(ret != 0))
83 return ret;
84 }
85
86 *old_mem = *new_mem;
87 new_mem->mm_node = NULL;
88
89 return 0;
90 }
91 EXPORT_SYMBOL(ttm_bo_move_ttm);
92
ttm_mem_io_lock(struct ttm_mem_type_manager * man,bool interruptible)93 int ttm_mem_io_lock(struct ttm_mem_type_manager *man, bool interruptible)
94 {
95 if (likely(man->io_reserve_fastpath))
96 return 0;
97
98 if (interruptible)
99 return mutex_lock_interruptible(&man->io_reserve_mutex);
100
101 mutex_lock(&man->io_reserve_mutex);
102 return 0;
103 }
104 EXPORT_SYMBOL(ttm_mem_io_lock);
105
ttm_mem_io_unlock(struct ttm_mem_type_manager * man)106 void ttm_mem_io_unlock(struct ttm_mem_type_manager *man)
107 {
108 if (likely(man->io_reserve_fastpath))
109 return;
110
111 mutex_unlock(&man->io_reserve_mutex);
112 }
113 EXPORT_SYMBOL(ttm_mem_io_unlock);
114
ttm_mem_io_evict(struct ttm_mem_type_manager * man)115 static int ttm_mem_io_evict(struct ttm_mem_type_manager *man)
116 {
117 struct ttm_buffer_object *bo;
118
119 if (!man->use_io_reserve_lru || list_empty(&man->io_reserve_lru))
120 return -EAGAIN;
121
122 bo = list_first_entry(&man->io_reserve_lru,
123 struct ttm_buffer_object,
124 io_reserve_lru);
125 list_del_init(&bo->io_reserve_lru);
126 ttm_bo_unmap_virtual_locked(bo);
127
128 return 0;
129 }
130
131
ttm_mem_io_reserve(struct ttm_bo_device * bdev,struct ttm_mem_reg * mem)132 int ttm_mem_io_reserve(struct ttm_bo_device *bdev,
133 struct ttm_mem_reg *mem)
134 {
135 struct ttm_mem_type_manager *man = &bdev->man[mem->mem_type];
136 int ret = 0;
137
138 if (!bdev->driver->io_mem_reserve)
139 return 0;
140 if (likely(man->io_reserve_fastpath))
141 return bdev->driver->io_mem_reserve(bdev, mem);
142
143 if (bdev->driver->io_mem_reserve &&
144 mem->bus.io_reserved_count++ == 0) {
145 retry:
146 ret = bdev->driver->io_mem_reserve(bdev, mem);
147 if (ret == -EAGAIN) {
148 ret = ttm_mem_io_evict(man);
149 if (ret == 0)
150 goto retry;
151 }
152 }
153 return ret;
154 }
155 EXPORT_SYMBOL(ttm_mem_io_reserve);
156
ttm_mem_io_free(struct ttm_bo_device * bdev,struct ttm_mem_reg * mem)157 void ttm_mem_io_free(struct ttm_bo_device *bdev,
158 struct ttm_mem_reg *mem)
159 {
160 struct ttm_mem_type_manager *man = &bdev->man[mem->mem_type];
161
162 if (likely(man->io_reserve_fastpath))
163 return;
164
165 if (bdev->driver->io_mem_reserve &&
166 --mem->bus.io_reserved_count == 0 &&
167 bdev->driver->io_mem_free)
168 bdev->driver->io_mem_free(bdev, mem);
169
170 }
171 EXPORT_SYMBOL(ttm_mem_io_free);
172
ttm_mem_io_reserve_vm(struct ttm_buffer_object * bo)173 int ttm_mem_io_reserve_vm(struct ttm_buffer_object *bo)
174 {
175 struct ttm_mem_reg *mem = &bo->mem;
176 int ret;
177
178 if (!mem->bus.io_reserved_vm) {
179 struct ttm_mem_type_manager *man =
180 &bo->bdev->man[mem->mem_type];
181
182 ret = ttm_mem_io_reserve(bo->bdev, mem);
183 if (unlikely(ret != 0))
184 return ret;
185 mem->bus.io_reserved_vm = true;
186 if (man->use_io_reserve_lru)
187 list_add_tail(&bo->io_reserve_lru,
188 &man->io_reserve_lru);
189 }
190 return 0;
191 }
192
ttm_mem_io_free_vm(struct ttm_buffer_object * bo)193 void ttm_mem_io_free_vm(struct ttm_buffer_object *bo)
194 {
195 struct ttm_mem_reg *mem = &bo->mem;
196
197 if (mem->bus.io_reserved_vm) {
198 mem->bus.io_reserved_vm = false;
199 list_del_init(&bo->io_reserve_lru);
200 ttm_mem_io_free(bo->bdev, mem);
201 }
202 }
203
ttm_mem_reg_ioremap(struct ttm_bo_device * bdev,struct ttm_mem_reg * mem,void ** virtual)204 static int ttm_mem_reg_ioremap(struct ttm_bo_device *bdev, struct ttm_mem_reg *mem,
205 void **virtual)
206 {
207 struct ttm_mem_type_manager *man = &bdev->man[mem->mem_type];
208 int ret;
209 void *addr;
210
211 *virtual = NULL;
212 (void) ttm_mem_io_lock(man, false);
213 ret = ttm_mem_io_reserve(bdev, mem);
214 ttm_mem_io_unlock(man);
215 if (ret || !mem->bus.is_iomem)
216 return ret;
217
218 if (mem->bus.addr) {
219 addr = mem->bus.addr;
220 } else {
221 if (mem->placement & TTM_PL_FLAG_WC)
222 addr = ioremap_wc(mem->bus.base + mem->bus.offset, mem->bus.size);
223 else
224 addr = ioremap_nocache(mem->bus.base + mem->bus.offset, mem->bus.size);
225 if (!addr) {
226 (void) ttm_mem_io_lock(man, false);
227 ttm_mem_io_free(bdev, mem);
228 ttm_mem_io_unlock(man);
229 return -ENOMEM;
230 }
231 }
232 *virtual = addr;
233 return 0;
234 }
235
ttm_mem_reg_iounmap(struct ttm_bo_device * bdev,struct ttm_mem_reg * mem,void * virtual)236 static void ttm_mem_reg_iounmap(struct ttm_bo_device *bdev, struct ttm_mem_reg *mem,
237 void *virtual)
238 {
239 struct ttm_mem_type_manager *man;
240
241 man = &bdev->man[mem->mem_type];
242
243 if (virtual && mem->bus.addr == NULL)
244 iounmap(virtual);
245 (void) ttm_mem_io_lock(man, false);
246 ttm_mem_io_free(bdev, mem);
247 ttm_mem_io_unlock(man);
248 }
249
ttm_copy_io_page(void * dst,void * src,unsigned long page)250 static int ttm_copy_io_page(void *dst, void *src, unsigned long page)
251 {
252 uint32_t *dstP =
253 (uint32_t *) ((unsigned long)dst + (page << PAGE_SHIFT));
254 uint32_t *srcP =
255 (uint32_t *) ((unsigned long)src + (page << PAGE_SHIFT));
256
257 int i;
258 for (i = 0; i < PAGE_SIZE / sizeof(uint32_t); ++i)
259 iowrite32(ioread32(srcP++), dstP++);
260 return 0;
261 }
262
ttm_copy_io_ttm_page(struct ttm_tt * ttm,void * src,unsigned long page,pgprot_t prot)263 static int ttm_copy_io_ttm_page(struct ttm_tt *ttm, void *src,
264 unsigned long page,
265 pgprot_t prot)
266 {
267 struct page *d = ttm->pages[page];
268 void *dst;
269
270 if (!d)
271 return -ENOMEM;
272
273 src = (void *)((unsigned long)src + (page << PAGE_SHIFT));
274
275 #ifdef CONFIG_X86
276 dst = kmap_atomic_prot(d, prot);
277 #else
278 if (pgprot_val(prot) != pgprot_val(PAGE_KERNEL))
279 dst = vmap(&d, 1, 0, prot);
280 else
281 dst = kmap(d);
282 #endif
283 if (!dst)
284 return -ENOMEM;
285
286 memcpy_fromio(dst, src, PAGE_SIZE);
287
288 #ifdef CONFIG_X86
289 kunmap_atomic(dst);
290 #else
291 if (pgprot_val(prot) != pgprot_val(PAGE_KERNEL))
292 vunmap(dst);
293 else
294 kunmap(d);
295 #endif
296
297 return 0;
298 }
299
ttm_copy_ttm_io_page(struct ttm_tt * ttm,void * dst,unsigned long page,pgprot_t prot)300 static int ttm_copy_ttm_io_page(struct ttm_tt *ttm, void *dst,
301 unsigned long page,
302 pgprot_t prot)
303 {
304 struct page *s = ttm->pages[page];
305 void *src;
306
307 if (!s)
308 return -ENOMEM;
309
310 dst = (void *)((unsigned long)dst + (page << PAGE_SHIFT));
311 #ifdef CONFIG_X86
312 src = kmap_atomic_prot(s, prot);
313 #else
314 if (pgprot_val(prot) != pgprot_val(PAGE_KERNEL))
315 src = vmap(&s, 1, 0, prot);
316 else
317 src = kmap(s);
318 #endif
319 if (!src)
320 return -ENOMEM;
321
322 memcpy_toio(dst, src, PAGE_SIZE);
323
324 #ifdef CONFIG_X86
325 kunmap_atomic(src);
326 #else
327 if (pgprot_val(prot) != pgprot_val(PAGE_KERNEL))
328 vunmap(src);
329 else
330 kunmap(s);
331 #endif
332
333 return 0;
334 }
335
ttm_bo_move_memcpy(struct ttm_buffer_object * bo,struct ttm_operation_ctx * ctx,struct ttm_mem_reg * new_mem)336 int ttm_bo_move_memcpy(struct ttm_buffer_object *bo,
337 struct ttm_operation_ctx *ctx,
338 struct ttm_mem_reg *new_mem)
339 {
340 struct ttm_bo_device *bdev = bo->bdev;
341 struct ttm_mem_type_manager *man = &bdev->man[new_mem->mem_type];
342 struct ttm_tt *ttm = bo->ttm;
343 struct ttm_mem_reg *old_mem = &bo->mem;
344 struct ttm_mem_reg old_copy = *old_mem;
345 void *old_iomap;
346 void *new_iomap;
347 int ret;
348 unsigned long i;
349 unsigned long page;
350 unsigned long add = 0;
351 int dir;
352
353 ret = ttm_bo_wait(bo, ctx->interruptible, ctx->no_wait_gpu);
354 if (ret)
355 return ret;
356
357 ret = ttm_mem_reg_ioremap(bdev, old_mem, &old_iomap);
358 if (ret)
359 return ret;
360 ret = ttm_mem_reg_ioremap(bdev, new_mem, &new_iomap);
361 if (ret)
362 goto out;
363
364 /*
365 * Single TTM move. NOP.
366 */
367 if (old_iomap == NULL && new_iomap == NULL)
368 goto out2;
369
370 /*
371 * Don't move nonexistent data. Clear destination instead.
372 */
373 if (old_iomap == NULL &&
374 (ttm == NULL || (ttm->state == tt_unpopulated &&
375 !(ttm->page_flags & TTM_PAGE_FLAG_SWAPPED)))) {
376 memset_io(new_iomap, 0, new_mem->num_pages*PAGE_SIZE);
377 goto out2;
378 }
379
380 /*
381 * TTM might be null for moves within the same region.
382 */
383 if (ttm) {
384 ret = ttm_tt_populate(ttm, ctx);
385 if (ret)
386 goto out1;
387 }
388
389 add = 0;
390 dir = 1;
391
392 if ((old_mem->mem_type == new_mem->mem_type) &&
393 (new_mem->start < old_mem->start + old_mem->size)) {
394 dir = -1;
395 add = new_mem->num_pages - 1;
396 }
397
398 for (i = 0; i < new_mem->num_pages; ++i) {
399 page = i * dir + add;
400 if (old_iomap == NULL) {
401 pgprot_t prot = ttm_io_prot(old_mem->placement,
402 PAGE_KERNEL);
403 ret = ttm_copy_ttm_io_page(ttm, new_iomap, page,
404 prot);
405 } else if (new_iomap == NULL) {
406 pgprot_t prot = ttm_io_prot(new_mem->placement,
407 PAGE_KERNEL);
408 ret = ttm_copy_io_ttm_page(ttm, old_iomap, page,
409 prot);
410 } else {
411 ret = ttm_copy_io_page(new_iomap, old_iomap, page);
412 }
413 if (ret)
414 goto out1;
415 }
416 mb();
417 out2:
418 old_copy = *old_mem;
419 *old_mem = *new_mem;
420 new_mem->mm_node = NULL;
421
422 if (man->flags & TTM_MEMTYPE_FLAG_FIXED) {
423 ttm_tt_destroy(ttm);
424 bo->ttm = NULL;
425 }
426
427 out1:
428 ttm_mem_reg_iounmap(bdev, old_mem, new_iomap);
429 out:
430 ttm_mem_reg_iounmap(bdev, &old_copy, old_iomap);
431
432 /*
433 * On error, keep the mm node!
434 */
435 if (!ret)
436 ttm_bo_mem_put(bo, &old_copy);
437 return ret;
438 }
439 EXPORT_SYMBOL(ttm_bo_move_memcpy);
440
ttm_transfered_destroy(struct ttm_buffer_object * bo)441 static void ttm_transfered_destroy(struct ttm_buffer_object *bo)
442 {
443 struct ttm_transfer_obj *fbo;
444
445 fbo = container_of(bo, struct ttm_transfer_obj, base);
446 ttm_bo_put(fbo->bo);
447 kfree(fbo);
448 }
449
450 /**
451 * ttm_buffer_object_transfer
452 *
453 * @bo: A pointer to a struct ttm_buffer_object.
454 * @new_obj: A pointer to a pointer to a newly created ttm_buffer_object,
455 * holding the data of @bo with the old placement.
456 *
457 * This is a utility function that may be called after an accelerated move
458 * has been scheduled. A new buffer object is created as a placeholder for
459 * the old data while it's being copied. When that buffer object is idle,
460 * it can be destroyed, releasing the space of the old placement.
461 * Returns:
462 * !0: Failure.
463 */
464
ttm_buffer_object_transfer(struct ttm_buffer_object * bo,struct ttm_buffer_object ** new_obj)465 static int ttm_buffer_object_transfer(struct ttm_buffer_object *bo,
466 struct ttm_buffer_object **new_obj)
467 {
468 struct ttm_transfer_obj *fbo;
469 int ret;
470
471 fbo = kmalloc(sizeof(*fbo), M_DRM, GFP_KERNEL);
472 if (!fbo)
473 return -ENOMEM;
474
475 fbo->base = *bo;
476 fbo->base.mem.placement |= TTM_PL_FLAG_NO_EVICT;
477
478 ttm_bo_get(bo);
479 fbo->bo = bo;
480
481 /**
482 * Fix up members that we shouldn't copy directly:
483 * TODO: Explicit member copy would probably be better here.
484 */
485
486 atomic_inc(&bo->bdev->glob->bo_count);
487 INIT_LIST_HEAD(&fbo->base.ddestroy);
488 INIT_LIST_HEAD(&fbo->base.lru);
489 INIT_LIST_HEAD(&fbo->base.swap);
490 INIT_LIST_HEAD(&fbo->base.io_reserve_lru);
491 lockinit(&fbo->base.wu_mutex, "dtfbwm", 0, LK_CANRECURSE);
492 fbo->base.moving = NULL;
493 drm_vma_node_reset(&fbo->base.vma_node);
494 atomic_set(&fbo->base.cpu_writers, 0);
495
496 kref_init(&fbo->base.list_kref);
497 kref_init(&fbo->base.kref);
498 fbo->base.destroy = &ttm_transfered_destroy;
499 fbo->base.acc_size = 0;
500 fbo->base.resv = &fbo->base.ttm_resv;
501 reservation_object_init(fbo->base.resv);
502 ret = ww_mutex_trylock(&fbo->base.resv->lock);
503 WARN_ON(!ret);
504
505 *new_obj = &fbo->base;
506 return 0;
507 }
508
ttm_io_prot(uint32_t caching_flags,pgprot_t tmp)509 pgprot_t ttm_io_prot(uint32_t caching_flags, pgprot_t tmp)
510 {
511 /* Cached mappings need no adjustment */
512 if (caching_flags & TTM_PL_FLAG_CACHED)
513 return tmp;
514
515 #if defined(__i386__) || defined(__x86_64__)
516 if (caching_flags & TTM_PL_FLAG_WC)
517 tmp = pgprot_writecombine(tmp);
518 else
519 tmp = pgprot_noncached(tmp);
520 #endif
521 #if defined(__ia64__) || defined(__arm__) || defined(__aarch64__) || \
522 defined(__powerpc__)
523 if (caching_flags & TTM_PL_FLAG_WC)
524 tmp = pgprot_writecombine(tmp);
525 else
526 tmp = pgprot_noncached(tmp);
527 #endif
528 #if defined(__sparc__) || defined(__mips__)
529 tmp = pgprot_noncached(tmp);
530 #endif
531 return tmp;
532 }
533 EXPORT_SYMBOL(ttm_io_prot);
534
ttm_bo_ioremap(struct ttm_buffer_object * bo,unsigned long offset,unsigned long size,struct ttm_bo_kmap_obj * map)535 static int ttm_bo_ioremap(struct ttm_buffer_object *bo,
536 unsigned long offset,
537 unsigned long size,
538 struct ttm_bo_kmap_obj *map)
539 {
540 struct ttm_mem_reg *mem = &bo->mem;
541
542 if (bo->mem.bus.addr) {
543 map->bo_kmap_type = ttm_bo_map_premapped;
544 map->virtual = (void *)(((u8 *)bo->mem.bus.addr) + offset);
545 } else {
546 map->bo_kmap_type = ttm_bo_map_iomap;
547 if (mem->placement & TTM_PL_FLAG_WC)
548 map->virtual = ioremap_wc(bo->mem.bus.base + bo->mem.bus.offset + offset,
549 size);
550 else
551 map->virtual = ioremap_nocache(bo->mem.bus.base + bo->mem.bus.offset + offset,
552 size);
553 }
554 return (!map->virtual) ? -ENOMEM : 0;
555 }
556
ttm_bo_kmap_ttm(struct ttm_buffer_object * bo,unsigned long start_page,unsigned long num_pages,struct ttm_bo_kmap_obj * map)557 static int ttm_bo_kmap_ttm(struct ttm_buffer_object *bo,
558 unsigned long start_page,
559 unsigned long num_pages,
560 struct ttm_bo_kmap_obj *map)
561 {
562 struct ttm_mem_reg *mem = &bo->mem;
563 struct ttm_operation_ctx ctx = {
564 .interruptible = false,
565 .no_wait_gpu = false
566 };
567 struct ttm_tt *ttm = bo->ttm;
568 pgprot_t prot;
569 int ret;
570
571 BUG_ON(!ttm);
572
573 ret = ttm_tt_populate(ttm, &ctx);
574 if (ret)
575 return ret;
576
577 if (num_pages == 1 && (mem->placement & TTM_PL_FLAG_CACHED)) {
578 /*
579 * We're mapping a single page, and the desired
580 * page protection is consistent with the bo.
581 */
582
583 map->bo_kmap_type = ttm_bo_map_kmap;
584 map->page = ttm->pages[start_page];
585 map->virtual = kmap(map->page);
586 } else {
587 /*
588 * We need to use vmap to get the desired page protection
589 * or to make the buffer object look contiguous.
590 */
591 prot = ttm_io_prot(mem->placement, PAGE_KERNEL);
592 map->bo_kmap_type = ttm_bo_map_vmap;
593 map->virtual = vmap(ttm->pages + start_page, num_pages,
594 0, prot);
595 }
596 return (!map->virtual) ? -ENOMEM : 0;
597 }
598
ttm_bo_kmap(struct ttm_buffer_object * bo,unsigned long start_page,unsigned long num_pages,struct ttm_bo_kmap_obj * map)599 int ttm_bo_kmap(struct ttm_buffer_object *bo,
600 unsigned long start_page, unsigned long num_pages,
601 struct ttm_bo_kmap_obj *map)
602 {
603 struct ttm_mem_type_manager *man =
604 &bo->bdev->man[bo->mem.mem_type];
605 unsigned long offset, size;
606 int ret;
607
608 map->virtual = NULL;
609 map->bo = bo;
610 if (num_pages > bo->num_pages)
611 return -EINVAL;
612 if (start_page > bo->num_pages)
613 return -EINVAL;
614 #if 0
615 if (num_pages > 1 && !capable(CAP_SYS_ADMIN))
616 return -EPERM;
617 #endif
618 (void) ttm_mem_io_lock(man, false);
619 ret = ttm_mem_io_reserve(bo->bdev, &bo->mem);
620 ttm_mem_io_unlock(man);
621 if (ret)
622 return ret;
623 if (!bo->mem.bus.is_iomem) {
624 return ttm_bo_kmap_ttm(bo, start_page, num_pages, map);
625 } else {
626 offset = start_page << PAGE_SHIFT;
627 size = num_pages << PAGE_SHIFT;
628 return ttm_bo_ioremap(bo, offset, size, map);
629 }
630 }
631 EXPORT_SYMBOL(ttm_bo_kmap);
632
ttm_bo_kunmap(struct ttm_bo_kmap_obj * map)633 void ttm_bo_kunmap(struct ttm_bo_kmap_obj *map)
634 {
635 struct ttm_buffer_object *bo = map->bo;
636 struct ttm_mem_type_manager *man =
637 &bo->bdev->man[bo->mem.mem_type];
638
639 if (!map->virtual)
640 return;
641 switch (map->bo_kmap_type) {
642 case ttm_bo_map_iomap:
643 iounmap(map->virtual);
644 break;
645 case ttm_bo_map_vmap:
646 vunmap(map->virtual);
647 break;
648 case ttm_bo_map_kmap:
649 kunmap(map->page);
650 break;
651 case ttm_bo_map_premapped:
652 break;
653 default:
654 BUG();
655 }
656 (void) ttm_mem_io_lock(man, false);
657 ttm_mem_io_free(map->bo->bdev, &map->bo->mem);
658 ttm_mem_io_unlock(man);
659 map->virtual = NULL;
660 map->page = NULL;
661 }
662 EXPORT_SYMBOL(ttm_bo_kunmap);
663
ttm_bo_move_accel_cleanup(struct ttm_buffer_object * bo,struct dma_fence * fence,bool evict,struct ttm_mem_reg * new_mem)664 int ttm_bo_move_accel_cleanup(struct ttm_buffer_object *bo,
665 struct dma_fence *fence,
666 bool evict,
667 struct ttm_mem_reg *new_mem)
668 {
669 struct ttm_bo_device *bdev = bo->bdev;
670 struct ttm_mem_type_manager *man = &bdev->man[new_mem->mem_type];
671 struct ttm_mem_reg *old_mem = &bo->mem;
672 int ret;
673 struct ttm_buffer_object *ghost_obj;
674
675 reservation_object_add_excl_fence(bo->resv, fence);
676 if (evict) {
677 ret = ttm_bo_wait(bo, false, false);
678 if (ret)
679 return ret;
680
681 if (man->flags & TTM_MEMTYPE_FLAG_FIXED) {
682 ttm_tt_destroy(bo->ttm);
683 bo->ttm = NULL;
684 }
685 ttm_bo_free_old_node(bo);
686 } else {
687 /**
688 * This should help pipeline ordinary buffer moves.
689 *
690 * Hang old buffer memory on a new buffer object,
691 * and leave it to be released when the GPU
692 * operation has completed.
693 */
694
695 dma_fence_put(bo->moving);
696 bo->moving = dma_fence_get(fence);
697
698 ret = ttm_buffer_object_transfer(bo, &ghost_obj);
699 if (ret)
700 return ret;
701
702 reservation_object_add_excl_fence(ghost_obj->resv, fence);
703
704 /**
705 * If we're not moving to fixed memory, the TTM object
706 * needs to stay alive. Otherwhise hang it on the ghost
707 * bo to be unbound and destroyed.
708 */
709
710 if (!(man->flags & TTM_MEMTYPE_FLAG_FIXED))
711 ghost_obj->ttm = NULL;
712 else
713 bo->ttm = NULL;
714
715 ttm_bo_unreserve(ghost_obj);
716 ttm_bo_put(ghost_obj);
717 }
718
719 *old_mem = *new_mem;
720 new_mem->mm_node = NULL;
721
722 return 0;
723 }
724 EXPORT_SYMBOL(ttm_bo_move_accel_cleanup);
725
ttm_bo_pipeline_move(struct ttm_buffer_object * bo,struct dma_fence * fence,bool evict,struct ttm_mem_reg * new_mem)726 int ttm_bo_pipeline_move(struct ttm_buffer_object *bo,
727 struct dma_fence *fence, bool evict,
728 struct ttm_mem_reg *new_mem)
729 {
730 struct ttm_bo_device *bdev = bo->bdev;
731 struct ttm_mem_reg *old_mem = &bo->mem;
732
733 struct ttm_mem_type_manager *from = &bdev->man[old_mem->mem_type];
734 struct ttm_mem_type_manager *to = &bdev->man[new_mem->mem_type];
735
736 int ret;
737
738 reservation_object_add_excl_fence(bo->resv, fence);
739
740 if (!evict) {
741 struct ttm_buffer_object *ghost_obj;
742
743 /**
744 * This should help pipeline ordinary buffer moves.
745 *
746 * Hang old buffer memory on a new buffer object,
747 * and leave it to be released when the GPU
748 * operation has completed.
749 */
750
751 dma_fence_put(bo->moving);
752 bo->moving = dma_fence_get(fence);
753
754 ret = ttm_buffer_object_transfer(bo, &ghost_obj);
755 if (ret)
756 return ret;
757
758 reservation_object_add_excl_fence(ghost_obj->resv, fence);
759
760 /**
761 * If we're not moving to fixed memory, the TTM object
762 * needs to stay alive. Otherwhise hang it on the ghost
763 * bo to be unbound and destroyed.
764 */
765
766 if (!(to->flags & TTM_MEMTYPE_FLAG_FIXED))
767 ghost_obj->ttm = NULL;
768 else
769 bo->ttm = NULL;
770
771 ttm_bo_unreserve(ghost_obj);
772 ttm_bo_put(ghost_obj);
773
774 } else if (from->flags & TTM_MEMTYPE_FLAG_FIXED) {
775
776 /**
777 * BO doesn't have a TTM we need to bind/unbind. Just remember
778 * this eviction and free up the allocation
779 */
780
781 lockmgr(&from->move_lock, LK_EXCLUSIVE);
782 if (!from->move || dma_fence_is_later(fence, from->move)) {
783 dma_fence_put(from->move);
784 from->move = dma_fence_get(fence);
785 }
786 lockmgr(&from->move_lock, LK_RELEASE);
787
788 ttm_bo_free_old_node(bo);
789
790 dma_fence_put(bo->moving);
791 bo->moving = dma_fence_get(fence);
792
793 } else {
794 /**
795 * Last resort, wait for the move to be completed.
796 *
797 * Should never happen in pratice.
798 */
799
800 ret = ttm_bo_wait(bo, false, false);
801 if (ret)
802 return ret;
803
804 if (to->flags & TTM_MEMTYPE_FLAG_FIXED) {
805 ttm_tt_destroy(bo->ttm);
806 bo->ttm = NULL;
807 }
808 ttm_bo_free_old_node(bo);
809 }
810
811 *old_mem = *new_mem;
812 new_mem->mm_node = NULL;
813
814 return 0;
815 }
816 EXPORT_SYMBOL(ttm_bo_pipeline_move);
817
ttm_bo_pipeline_gutting(struct ttm_buffer_object * bo)818 int ttm_bo_pipeline_gutting(struct ttm_buffer_object *bo)
819 {
820 struct ttm_buffer_object *ghost;
821 int ret;
822
823 ret = ttm_buffer_object_transfer(bo, &ghost);
824 if (ret)
825 return ret;
826
827 ret = reservation_object_copy_fences(ghost->resv, bo->resv);
828 /* Last resort, wait for the BO to be idle when we are OOM */
829 if (ret)
830 ttm_bo_wait(bo, false, false);
831
832 memset(&bo->mem, 0, sizeof(bo->mem));
833 bo->mem.mem_type = TTM_PL_SYSTEM;
834 bo->ttm = NULL;
835
836 ttm_bo_unreserve(ghost);
837 ttm_bo_put(ghost);
838
839 return 0;
840 }
841