1 // SPDX-License-Identifier: GPL-2.0 OR MIT
2 /**************************************************************************
3 *
4 * Copyright 2009-2015 VMware, Inc., Palo Alto, CA., USA
5 *
6 * Permission is hereby granted, free of charge, to any person obtaining a
7 * copy of this software and associated documentation files (the
8 * "Software"), to deal in the Software without restriction, including
9 * without limitation the rights to use, copy, modify, merge, publish,
10 * distribute, sub license, and/or sell copies of the Software, and to
11 * permit persons to whom the Software is furnished to do so, subject to
12 * the following conditions:
13 *
14 * The above copyright notice and this permission notice (including the
15 * next paragraph) shall be included in all copies or substantial portions
16 * of the Software.
17 *
18 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
19 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
20 * FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT. IN NO EVENT SHALL
21 * THE COPYRIGHT HOLDERS, AUTHORS AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM,
22 * DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR
23 * OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE
24 * USE OR OTHER DEALINGS IN THE SOFTWARE.
25 *
26 **************************************************************************/
27
28 #include "vmwgfx_drv.h"
29 #include <drm/ttm/ttm_bo_driver.h>
30 #include <drm/ttm/ttm_placement.h>
31
32 static const struct ttm_place vram_placement_flags = {
33 .fpfn = 0,
34 .lpfn = 0,
35 .mem_type = TTM_PL_VRAM,
36 .flags = 0
37 };
38
39 static const struct ttm_place sys_placement_flags = {
40 .fpfn = 0,
41 .lpfn = 0,
42 .mem_type = TTM_PL_SYSTEM,
43 .flags = 0
44 };
45
46 static const struct ttm_place gmr_placement_flags = {
47 .fpfn = 0,
48 .lpfn = 0,
49 .mem_type = VMW_PL_GMR,
50 .flags = 0
51 };
52
53 static const struct ttm_place mob_placement_flags = {
54 .fpfn = 0,
55 .lpfn = 0,
56 .mem_type = VMW_PL_MOB,
57 .flags = 0
58 };
59
60 struct ttm_placement vmw_vram_placement = {
61 .num_placement = 1,
62 .placement = &vram_placement_flags,
63 .num_busy_placement = 1,
64 .busy_placement = &vram_placement_flags
65 };
66
67 static const struct ttm_place vram_gmr_placement_flags[] = {
68 {
69 .fpfn = 0,
70 .lpfn = 0,
71 .mem_type = TTM_PL_VRAM,
72 .flags = 0
73 }, {
74 .fpfn = 0,
75 .lpfn = 0,
76 .mem_type = VMW_PL_GMR,
77 .flags = 0
78 }
79 };
80
81 static const struct ttm_place gmr_vram_placement_flags[] = {
82 {
83 .fpfn = 0,
84 .lpfn = 0,
85 .mem_type = VMW_PL_GMR,
86 .flags = 0
87 }, {
88 .fpfn = 0,
89 .lpfn = 0,
90 .mem_type = TTM_PL_VRAM,
91 .flags = 0
92 }
93 };
94
95 struct ttm_placement vmw_vram_gmr_placement = {
96 .num_placement = 2,
97 .placement = vram_gmr_placement_flags,
98 .num_busy_placement = 1,
99 .busy_placement = &gmr_placement_flags
100 };
101
102 struct ttm_placement vmw_vram_sys_placement = {
103 .num_placement = 1,
104 .placement = &vram_placement_flags,
105 .num_busy_placement = 1,
106 .busy_placement = &sys_placement_flags
107 };
108
109 struct ttm_placement vmw_sys_placement = {
110 .num_placement = 1,
111 .placement = &sys_placement_flags,
112 .num_busy_placement = 1,
113 .busy_placement = &sys_placement_flags
114 };
115
116 static const struct ttm_place evictable_placement_flags[] = {
117 {
118 .fpfn = 0,
119 .lpfn = 0,
120 .mem_type = TTM_PL_SYSTEM,
121 .flags = 0
122 }, {
123 .fpfn = 0,
124 .lpfn = 0,
125 .mem_type = TTM_PL_VRAM,
126 .flags = 0
127 }, {
128 .fpfn = 0,
129 .lpfn = 0,
130 .mem_type = VMW_PL_GMR,
131 .flags = 0
132 }, {
133 .fpfn = 0,
134 .lpfn = 0,
135 .mem_type = VMW_PL_MOB,
136 .flags = 0
137 }
138 };
139
140 static const struct ttm_place nonfixed_placement_flags[] = {
141 {
142 .fpfn = 0,
143 .lpfn = 0,
144 .mem_type = TTM_PL_SYSTEM,
145 .flags = 0
146 }, {
147 .fpfn = 0,
148 .lpfn = 0,
149 .mem_type = VMW_PL_GMR,
150 .flags = 0
151 }, {
152 .fpfn = 0,
153 .lpfn = 0,
154 .mem_type = VMW_PL_MOB,
155 .flags = 0
156 }
157 };
158
159 struct ttm_placement vmw_evictable_placement = {
160 .num_placement = 4,
161 .placement = evictable_placement_flags,
162 .num_busy_placement = 1,
163 .busy_placement = &sys_placement_flags
164 };
165
166 struct ttm_placement vmw_srf_placement = {
167 .num_placement = 1,
168 .num_busy_placement = 2,
169 .placement = &gmr_placement_flags,
170 .busy_placement = gmr_vram_placement_flags
171 };
172
173 struct ttm_placement vmw_mob_placement = {
174 .num_placement = 1,
175 .num_busy_placement = 1,
176 .placement = &mob_placement_flags,
177 .busy_placement = &mob_placement_flags
178 };
179
180 struct ttm_placement vmw_nonfixed_placement = {
181 .num_placement = 3,
182 .placement = nonfixed_placement_flags,
183 .num_busy_placement = 1,
184 .busy_placement = &sys_placement_flags
185 };
186
187 struct vmw_ttm_tt {
188 struct ttm_tt dma_ttm;
189 struct vmw_private *dev_priv;
190 int gmr_id;
191 struct vmw_mob *mob;
192 int mem_type;
193 struct sg_table sgt;
194 struct vmw_sg_table vsgt;
195 uint64_t sg_alloc_size;
196 bool mapped;
197 bool bound;
198 };
199
200 const size_t vmw_tt_size = sizeof(struct vmw_ttm_tt);
201
202 /**
203 * Helper functions to advance a struct vmw_piter iterator.
204 *
205 * @viter: Pointer to the iterator.
206 *
207 * These functions return false if past the end of the list,
208 * true otherwise. Functions are selected depending on the current
209 * DMA mapping mode.
210 */
__vmw_piter_non_sg_next(struct vmw_piter * viter)211 static bool __vmw_piter_non_sg_next(struct vmw_piter *viter)
212 {
213 return ++(viter->i) < viter->num_pages;
214 }
215
__vmw_piter_sg_next(struct vmw_piter * viter)216 static bool __vmw_piter_sg_next(struct vmw_piter *viter)
217 {
218 bool ret = __vmw_piter_non_sg_next(viter);
219
220 return __sg_page_iter_dma_next(&viter->iter) && ret;
221 }
222
223
224 /**
225 * Helper functions to return a pointer to the current page.
226 *
227 * @viter: Pointer to the iterator
228 *
229 * These functions return a pointer to the page currently
230 * pointed to by @viter. Functions are selected depending on the
231 * current mapping mode.
232 */
__vmw_piter_non_sg_page(struct vmw_piter * viter)233 static struct page *__vmw_piter_non_sg_page(struct vmw_piter *viter)
234 {
235 return viter->pages[viter->i];
236 }
237
238 /**
239 * Helper functions to return the DMA address of the current page.
240 *
241 * @viter: Pointer to the iterator
242 *
243 * These functions return the DMA address of the page currently
244 * pointed to by @viter. Functions are selected depending on the
245 * current mapping mode.
246 */
__vmw_piter_phys_addr(struct vmw_piter * viter)247 static dma_addr_t __vmw_piter_phys_addr(struct vmw_piter *viter)
248 {
249 return page_to_phys(viter->pages[viter->i]);
250 }
251
__vmw_piter_dma_addr(struct vmw_piter * viter)252 static dma_addr_t __vmw_piter_dma_addr(struct vmw_piter *viter)
253 {
254 return viter->addrs[viter->i];
255 }
256
__vmw_piter_sg_addr(struct vmw_piter * viter)257 static dma_addr_t __vmw_piter_sg_addr(struct vmw_piter *viter)
258 {
259 return sg_page_iter_dma_address(&viter->iter);
260 }
261
262
263 /**
264 * vmw_piter_start - Initialize a struct vmw_piter.
265 *
266 * @viter: Pointer to the iterator to initialize
267 * @vsgt: Pointer to a struct vmw_sg_table to initialize from
268 * @p_offset: Pointer offset used to update current array position
269 *
270 * Note that we're following the convention of __sg_page_iter_start, so that
271 * the iterator doesn't point to a valid page after initialization; it has
272 * to be advanced one step first.
273 */
vmw_piter_start(struct vmw_piter * viter,const struct vmw_sg_table * vsgt,unsigned long p_offset)274 void vmw_piter_start(struct vmw_piter *viter, const struct vmw_sg_table *vsgt,
275 unsigned long p_offset)
276 {
277 viter->i = p_offset - 1;
278 viter->num_pages = vsgt->num_pages;
279 viter->page = &__vmw_piter_non_sg_page;
280 viter->pages = vsgt->pages;
281 switch (vsgt->mode) {
282 case vmw_dma_phys:
283 viter->next = &__vmw_piter_non_sg_next;
284 viter->dma_address = &__vmw_piter_phys_addr;
285 break;
286 case vmw_dma_alloc_coherent:
287 viter->next = &__vmw_piter_non_sg_next;
288 viter->dma_address = &__vmw_piter_dma_addr;
289 viter->addrs = vsgt->addrs;
290 break;
291 case vmw_dma_map_populate:
292 case vmw_dma_map_bind:
293 viter->next = &__vmw_piter_sg_next;
294 viter->dma_address = &__vmw_piter_sg_addr;
295 __sg_page_iter_start(&viter->iter.base, vsgt->sgt->sgl,
296 vsgt->sgt->orig_nents, p_offset);
297 break;
298 default:
299 BUG();
300 }
301 }
302
303 /**
304 * vmw_ttm_unmap_from_dma - unmap device addresses previsouly mapped for
305 * TTM pages
306 *
307 * @vmw_tt: Pointer to a struct vmw_ttm_backend
308 *
309 * Used to free dma mappings previously mapped by vmw_ttm_map_for_dma.
310 */
vmw_ttm_unmap_from_dma(struct vmw_ttm_tt * vmw_tt)311 static void vmw_ttm_unmap_from_dma(struct vmw_ttm_tt *vmw_tt)
312 {
313 struct device *dev = vmw_tt->dev_priv->drm.dev;
314
315 dma_unmap_sgtable(dev, &vmw_tt->sgt, DMA_BIDIRECTIONAL, 0);
316 vmw_tt->sgt.nents = vmw_tt->sgt.orig_nents;
317 }
318
319 /**
320 * vmw_ttm_map_for_dma - map TTM pages to get device addresses
321 *
322 * @vmw_tt: Pointer to a struct vmw_ttm_backend
323 *
324 * This function is used to get device addresses from the kernel DMA layer.
325 * However, it's violating the DMA API in that when this operation has been
326 * performed, it's illegal for the CPU to write to the pages without first
327 * unmapping the DMA mappings, or calling dma_sync_sg_for_cpu(). It is
328 * therefore only legal to call this function if we know that the function
329 * dma_sync_sg_for_cpu() is a NOP, and dma_sync_sg_for_device() is at most
330 * a CPU write buffer flush.
331 */
vmw_ttm_map_for_dma(struct vmw_ttm_tt * vmw_tt)332 static int vmw_ttm_map_for_dma(struct vmw_ttm_tt *vmw_tt)
333 {
334 struct device *dev = vmw_tt->dev_priv->drm.dev;
335
336 return dma_map_sgtable(dev, &vmw_tt->sgt, DMA_BIDIRECTIONAL, 0);
337 }
338
339 /**
340 * vmw_ttm_map_dma - Make sure TTM pages are visible to the device
341 *
342 * @vmw_tt: Pointer to a struct vmw_ttm_tt
343 *
344 * Select the correct function for and make sure the TTM pages are
345 * visible to the device. Allocate storage for the device mappings.
346 * If a mapping has already been performed, indicated by the storage
347 * pointer being non NULL, the function returns success.
348 */
vmw_ttm_map_dma(struct vmw_ttm_tt * vmw_tt)349 static int vmw_ttm_map_dma(struct vmw_ttm_tt *vmw_tt)
350 {
351 struct vmw_private *dev_priv = vmw_tt->dev_priv;
352 struct ttm_mem_global *glob = vmw_mem_glob(dev_priv);
353 struct vmw_sg_table *vsgt = &vmw_tt->vsgt;
354 struct ttm_operation_ctx ctx = {
355 .interruptible = true,
356 .no_wait_gpu = false
357 };
358 struct vmw_piter iter;
359 dma_addr_t old;
360 int ret = 0;
361 static size_t sgl_size;
362 static size_t sgt_size;
363 struct scatterlist *sg;
364
365 if (vmw_tt->mapped)
366 return 0;
367
368 vsgt->mode = dev_priv->map_mode;
369 vsgt->pages = vmw_tt->dma_ttm.pages;
370 vsgt->num_pages = vmw_tt->dma_ttm.num_pages;
371 vsgt->addrs = vmw_tt->dma_ttm.dma_address;
372 vsgt->sgt = &vmw_tt->sgt;
373
374 switch (dev_priv->map_mode) {
375 case vmw_dma_map_bind:
376 case vmw_dma_map_populate:
377 if (unlikely(!sgl_size)) {
378 sgl_size = ttm_round_pot(sizeof(struct scatterlist));
379 sgt_size = ttm_round_pot(sizeof(struct sg_table));
380 }
381 vmw_tt->sg_alloc_size = sgt_size + sgl_size * vsgt->num_pages;
382 ret = ttm_mem_global_alloc(glob, vmw_tt->sg_alloc_size, &ctx);
383 if (unlikely(ret != 0))
384 return ret;
385
386 sg = __sg_alloc_table_from_pages(&vmw_tt->sgt, vsgt->pages,
387 vsgt->num_pages, 0,
388 (unsigned long) vsgt->num_pages << PAGE_SHIFT,
389 dma_get_max_seg_size(dev_priv->drm.dev),
390 NULL, 0, GFP_KERNEL);
391 if (IS_ERR(sg)) {
392 ret = PTR_ERR(sg);
393 goto out_sg_alloc_fail;
394 }
395
396 if (vsgt->num_pages > vmw_tt->sgt.orig_nents) {
397 uint64_t over_alloc =
398 sgl_size * (vsgt->num_pages -
399 vmw_tt->sgt.orig_nents);
400
401 ttm_mem_global_free(glob, over_alloc);
402 vmw_tt->sg_alloc_size -= over_alloc;
403 }
404
405 ret = vmw_ttm_map_for_dma(vmw_tt);
406 if (unlikely(ret != 0))
407 goto out_map_fail;
408
409 break;
410 default:
411 break;
412 }
413
414 old = ~((dma_addr_t) 0);
415 vmw_tt->vsgt.num_regions = 0;
416 for (vmw_piter_start(&iter, vsgt, 0); vmw_piter_next(&iter);) {
417 dma_addr_t cur = vmw_piter_dma_addr(&iter);
418
419 if (cur != old + PAGE_SIZE)
420 vmw_tt->vsgt.num_regions++;
421 old = cur;
422 }
423
424 vmw_tt->mapped = true;
425 return 0;
426
427 out_map_fail:
428 sg_free_table(vmw_tt->vsgt.sgt);
429 vmw_tt->vsgt.sgt = NULL;
430 out_sg_alloc_fail:
431 ttm_mem_global_free(glob, vmw_tt->sg_alloc_size);
432 return ret;
433 }
434
435 /**
436 * vmw_ttm_unmap_dma - Tear down any TTM page device mappings
437 *
438 * @vmw_tt: Pointer to a struct vmw_ttm_tt
439 *
440 * Tear down any previously set up device DMA mappings and free
441 * any storage space allocated for them. If there are no mappings set up,
442 * this function is a NOP.
443 */
vmw_ttm_unmap_dma(struct vmw_ttm_tt * vmw_tt)444 static void vmw_ttm_unmap_dma(struct vmw_ttm_tt *vmw_tt)
445 {
446 struct vmw_private *dev_priv = vmw_tt->dev_priv;
447
448 if (!vmw_tt->vsgt.sgt)
449 return;
450
451 switch (dev_priv->map_mode) {
452 case vmw_dma_map_bind:
453 case vmw_dma_map_populate:
454 vmw_ttm_unmap_from_dma(vmw_tt);
455 sg_free_table(vmw_tt->vsgt.sgt);
456 vmw_tt->vsgt.sgt = NULL;
457 ttm_mem_global_free(vmw_mem_glob(dev_priv),
458 vmw_tt->sg_alloc_size);
459 break;
460 default:
461 break;
462 }
463 vmw_tt->mapped = false;
464 }
465
466 /**
467 * vmw_bo_sg_table - Return a struct vmw_sg_table object for a
468 * TTM buffer object
469 *
470 * @bo: Pointer to a struct ttm_buffer_object
471 *
472 * Returns a pointer to a struct vmw_sg_table object. The object should
473 * not be freed after use.
474 * Note that for the device addresses to be valid, the buffer object must
475 * either be reserved or pinned.
476 */
vmw_bo_sg_table(struct ttm_buffer_object * bo)477 const struct vmw_sg_table *vmw_bo_sg_table(struct ttm_buffer_object *bo)
478 {
479 struct vmw_ttm_tt *vmw_tt =
480 container_of(bo->ttm, struct vmw_ttm_tt, dma_ttm);
481
482 return &vmw_tt->vsgt;
483 }
484
485
vmw_ttm_bind(struct ttm_device * bdev,struct ttm_tt * ttm,struct ttm_resource * bo_mem)486 static int vmw_ttm_bind(struct ttm_device *bdev,
487 struct ttm_tt *ttm, struct ttm_resource *bo_mem)
488 {
489 struct vmw_ttm_tt *vmw_be =
490 container_of(ttm, struct vmw_ttm_tt, dma_ttm);
491 int ret = 0;
492
493 if (!bo_mem)
494 return -EINVAL;
495
496 if (vmw_be->bound)
497 return 0;
498
499 ret = vmw_ttm_map_dma(vmw_be);
500 if (unlikely(ret != 0))
501 return ret;
502
503 vmw_be->gmr_id = bo_mem->start;
504 vmw_be->mem_type = bo_mem->mem_type;
505
506 switch (bo_mem->mem_type) {
507 case VMW_PL_GMR:
508 ret = vmw_gmr_bind(vmw_be->dev_priv, &vmw_be->vsgt,
509 ttm->num_pages, vmw_be->gmr_id);
510 break;
511 case VMW_PL_MOB:
512 if (unlikely(vmw_be->mob == NULL)) {
513 vmw_be->mob =
514 vmw_mob_create(ttm->num_pages);
515 if (unlikely(vmw_be->mob == NULL))
516 return -ENOMEM;
517 }
518
519 ret = vmw_mob_bind(vmw_be->dev_priv, vmw_be->mob,
520 &vmw_be->vsgt, ttm->num_pages,
521 vmw_be->gmr_id);
522 break;
523 default:
524 BUG();
525 }
526 vmw_be->bound = true;
527 return ret;
528 }
529
vmw_ttm_unbind(struct ttm_device * bdev,struct ttm_tt * ttm)530 static void vmw_ttm_unbind(struct ttm_device *bdev,
531 struct ttm_tt *ttm)
532 {
533 struct vmw_ttm_tt *vmw_be =
534 container_of(ttm, struct vmw_ttm_tt, dma_ttm);
535
536 if (!vmw_be->bound)
537 return;
538
539 switch (vmw_be->mem_type) {
540 case VMW_PL_GMR:
541 vmw_gmr_unbind(vmw_be->dev_priv, vmw_be->gmr_id);
542 break;
543 case VMW_PL_MOB:
544 vmw_mob_unbind(vmw_be->dev_priv, vmw_be->mob);
545 break;
546 default:
547 BUG();
548 }
549
550 if (vmw_be->dev_priv->map_mode == vmw_dma_map_bind)
551 vmw_ttm_unmap_dma(vmw_be);
552 vmw_be->bound = false;
553 }
554
555
vmw_ttm_destroy(struct ttm_device * bdev,struct ttm_tt * ttm)556 static void vmw_ttm_destroy(struct ttm_device *bdev, struct ttm_tt *ttm)
557 {
558 struct vmw_ttm_tt *vmw_be =
559 container_of(ttm, struct vmw_ttm_tt, dma_ttm);
560
561 vmw_ttm_unbind(bdev, ttm);
562 ttm_tt_destroy_common(bdev, ttm);
563 vmw_ttm_unmap_dma(vmw_be);
564 if (vmw_be->dev_priv->map_mode == vmw_dma_alloc_coherent)
565 ttm_tt_fini(&vmw_be->dma_ttm);
566 else
567 ttm_tt_fini(ttm);
568
569 if (vmw_be->mob)
570 vmw_mob_destroy(vmw_be->mob);
571
572 kfree(vmw_be);
573 }
574
575
vmw_ttm_populate(struct ttm_device * bdev,struct ttm_tt * ttm,struct ttm_operation_ctx * ctx)576 static int vmw_ttm_populate(struct ttm_device *bdev,
577 struct ttm_tt *ttm, struct ttm_operation_ctx *ctx)
578 {
579 unsigned int i;
580 int ret;
581
582 /* TODO: maybe completely drop this ? */
583 if (ttm_tt_is_populated(ttm))
584 return 0;
585
586 ret = ttm_pool_alloc(&bdev->pool, ttm, ctx);
587 if (ret)
588 return ret;
589
590 for (i = 0; i < ttm->num_pages; ++i) {
591 ret = ttm_mem_global_alloc_page(&ttm_mem_glob, ttm->pages[i],
592 PAGE_SIZE, ctx);
593 if (ret)
594 goto error;
595 }
596 return 0;
597
598 error:
599 while (i--)
600 ttm_mem_global_free_page(&ttm_mem_glob, ttm->pages[i],
601 PAGE_SIZE);
602 ttm_pool_free(&bdev->pool, ttm);
603 return ret;
604 }
605
vmw_ttm_unpopulate(struct ttm_device * bdev,struct ttm_tt * ttm)606 static void vmw_ttm_unpopulate(struct ttm_device *bdev,
607 struct ttm_tt *ttm)
608 {
609 struct vmw_ttm_tt *vmw_tt = container_of(ttm, struct vmw_ttm_tt,
610 dma_ttm);
611 unsigned int i;
612
613 if (vmw_tt->mob) {
614 vmw_mob_destroy(vmw_tt->mob);
615 vmw_tt->mob = NULL;
616 }
617
618 vmw_ttm_unmap_dma(vmw_tt);
619
620 for (i = 0; i < ttm->num_pages; ++i)
621 ttm_mem_global_free_page(&ttm_mem_glob, ttm->pages[i],
622 PAGE_SIZE);
623
624 ttm_pool_free(&bdev->pool, ttm);
625 }
626
vmw_ttm_tt_create(struct ttm_buffer_object * bo,uint32_t page_flags)627 static struct ttm_tt *vmw_ttm_tt_create(struct ttm_buffer_object *bo,
628 uint32_t page_flags)
629 {
630 struct vmw_ttm_tt *vmw_be;
631 int ret;
632
633 vmw_be = kzalloc(sizeof(*vmw_be), GFP_KERNEL);
634 if (!vmw_be)
635 return NULL;
636
637 vmw_be->dev_priv = container_of(bo->bdev, struct vmw_private, bdev);
638 vmw_be->mob = NULL;
639
640 if (vmw_be->dev_priv->map_mode == vmw_dma_alloc_coherent)
641 ret = ttm_sg_tt_init(&vmw_be->dma_ttm, bo, page_flags,
642 ttm_cached);
643 else
644 ret = ttm_tt_init(&vmw_be->dma_ttm, bo, page_flags,
645 ttm_cached);
646 if (unlikely(ret != 0))
647 goto out_no_init;
648
649 return &vmw_be->dma_ttm;
650 out_no_init:
651 kfree(vmw_be);
652 return NULL;
653 }
654
vmw_evict_flags(struct ttm_buffer_object * bo,struct ttm_placement * placement)655 static void vmw_evict_flags(struct ttm_buffer_object *bo,
656 struct ttm_placement *placement)
657 {
658 *placement = vmw_sys_placement;
659 }
660
vmw_verify_access(struct ttm_buffer_object * bo,struct file * filp)661 static int vmw_verify_access(struct ttm_buffer_object *bo, struct file *filp)
662 {
663 struct ttm_object_file *tfile =
664 vmw_fpriv((struct drm_file *)filp->private_data)->tfile;
665
666 return vmw_user_bo_verify_access(bo, tfile);
667 }
668
vmw_ttm_io_mem_reserve(struct ttm_device * bdev,struct ttm_resource * mem)669 static int vmw_ttm_io_mem_reserve(struct ttm_device *bdev, struct ttm_resource *mem)
670 {
671 struct vmw_private *dev_priv = container_of(bdev, struct vmw_private, bdev);
672
673 switch (mem->mem_type) {
674 case TTM_PL_SYSTEM:
675 case VMW_PL_GMR:
676 case VMW_PL_MOB:
677 return 0;
678 case TTM_PL_VRAM:
679 mem->bus.offset = (mem->start << PAGE_SHIFT) +
680 dev_priv->vram_start;
681 mem->bus.is_iomem = true;
682 mem->bus.caching = ttm_cached;
683 break;
684 default:
685 return -EINVAL;
686 }
687 return 0;
688 }
689
690 /**
691 * vmw_move_notify - TTM move_notify_callback
692 *
693 * @bo: The TTM buffer object about to move.
694 * @old_mem: The old memory where we move from
695 * @new_mem: The struct ttm_resource indicating to what memory
696 * region the move is taking place.
697 *
698 * Calls move_notify for all subsystems needing it.
699 * (currently only resources).
700 */
vmw_move_notify(struct ttm_buffer_object * bo,struct ttm_resource * old_mem,struct ttm_resource * new_mem)701 static void vmw_move_notify(struct ttm_buffer_object *bo,
702 struct ttm_resource *old_mem,
703 struct ttm_resource *new_mem)
704 {
705 vmw_bo_move_notify(bo, new_mem);
706 vmw_query_move_notify(bo, old_mem, new_mem);
707 }
708
709
710 /**
711 * vmw_swap_notify - TTM move_notify_callback
712 *
713 * @bo: The TTM buffer object about to be swapped out.
714 */
vmw_swap_notify(struct ttm_buffer_object * bo)715 static void vmw_swap_notify(struct ttm_buffer_object *bo)
716 {
717 vmw_bo_swap_notify(bo);
718 (void) ttm_bo_wait(bo, false, false);
719 }
720
vmw_move(struct ttm_buffer_object * bo,bool evict,struct ttm_operation_ctx * ctx,struct ttm_resource * new_mem,struct ttm_place * hop)721 static int vmw_move(struct ttm_buffer_object *bo,
722 bool evict,
723 struct ttm_operation_ctx *ctx,
724 struct ttm_resource *new_mem,
725 struct ttm_place *hop)
726 {
727 struct ttm_resource_manager *old_man = ttm_manager_type(bo->bdev, bo->mem.mem_type);
728 struct ttm_resource_manager *new_man = ttm_manager_type(bo->bdev, new_mem->mem_type);
729 int ret;
730
731 if (new_man->use_tt && new_mem->mem_type != TTM_PL_SYSTEM) {
732 ret = vmw_ttm_bind(bo->bdev, bo->ttm, new_mem);
733 if (ret)
734 return ret;
735 }
736
737 vmw_move_notify(bo, &bo->mem, new_mem);
738
739 if (old_man->use_tt && new_man->use_tt) {
740 if (bo->mem.mem_type == TTM_PL_SYSTEM) {
741 ttm_bo_assign_mem(bo, new_mem);
742 return 0;
743 }
744 ret = ttm_bo_wait_ctx(bo, ctx);
745 if (ret)
746 goto fail;
747
748 vmw_ttm_unbind(bo->bdev, bo->ttm);
749 ttm_resource_free(bo, &bo->mem);
750 ttm_bo_assign_mem(bo, new_mem);
751 return 0;
752 } else {
753 ret = ttm_bo_move_memcpy(bo, ctx, new_mem);
754 if (ret)
755 goto fail;
756 }
757 return 0;
758 fail:
759 vmw_move_notify(bo, new_mem, &bo->mem);
760 return ret;
761 }
762
763 struct ttm_device_funcs vmw_bo_driver = {
764 .ttm_tt_create = &vmw_ttm_tt_create,
765 .ttm_tt_populate = &vmw_ttm_populate,
766 .ttm_tt_unpopulate = &vmw_ttm_unpopulate,
767 .ttm_tt_destroy = &vmw_ttm_destroy,
768 .eviction_valuable = ttm_bo_eviction_valuable,
769 .evict_flags = vmw_evict_flags,
770 .move = vmw_move,
771 .verify_access = vmw_verify_access,
772 .swap_notify = vmw_swap_notify,
773 .io_mem_reserve = &vmw_ttm_io_mem_reserve,
774 };
775
vmw_bo_create_and_populate(struct vmw_private * dev_priv,unsigned long bo_size,struct ttm_buffer_object ** bo_p)776 int vmw_bo_create_and_populate(struct vmw_private *dev_priv,
777 unsigned long bo_size,
778 struct ttm_buffer_object **bo_p)
779 {
780 struct ttm_operation_ctx ctx = {
781 .interruptible = false,
782 .no_wait_gpu = false
783 };
784 struct ttm_buffer_object *bo;
785 int ret;
786
787 ret = vmw_bo_create_kernel(dev_priv, bo_size,
788 &vmw_sys_placement,
789 &bo);
790 if (unlikely(ret != 0))
791 return ret;
792
793 ret = ttm_bo_reserve(bo, false, true, NULL);
794 BUG_ON(ret != 0);
795 ret = vmw_ttm_populate(bo->bdev, bo->ttm, &ctx);
796 if (likely(ret == 0)) {
797 struct vmw_ttm_tt *vmw_tt =
798 container_of(bo->ttm, struct vmw_ttm_tt, dma_ttm);
799 ret = vmw_ttm_map_dma(vmw_tt);
800 }
801
802 ttm_bo_unreserve(bo);
803
804 if (likely(ret == 0))
805 *bo_p = bo;
806 return ret;
807 }
808