1 /* SPDX-License-Identifier: GPL-2.0 OR MIT */
2 /**************************************************************************
3 *
4 * Copyright (c) 2006-2009 VMware, Inc., Palo Alto, CA., USA
5 * All Rights Reserved.
6 *
7 * Permission is hereby granted, free of charge, to any person obtaining a
8 * copy of this software and associated documentation files (the
9 * "Software"), to deal in the Software without restriction, including
10 * without limitation the rights to use, copy, modify, merge, publish,
11 * distribute, sub license, and/or sell copies of the Software, and to
12 * permit persons to whom the Software is furnished to do so, subject to
13 * the following conditions:
14 *
15 * The above copyright notice and this permission notice (including the
16 * next paragraph) shall be included in all copies or substantial portions
17 * of the Software.
18 *
19 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
20 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
21 * FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT. IN NO EVENT SHALL
22 * THE COPYRIGHT HOLDERS, AUTHORS AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM,
23 * DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR
24 * OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE
25 * USE OR OTHER DEALINGS IN THE SOFTWARE.
26 *
27 **************************************************************************/
28 /*
29 * Authors: Thomas Hellstrom <thellstrom-at-vmware-dot-com>
30 */
31
32 #define pr_fmt(fmt) "[TTM] " fmt
33
34 #include <linux/cc_platform.h>
35 #include <linux/sched.h>
36 #include <linux/shmem_fs.h>
37 #include <linux/file.h>
38 #include <linux/module.h>
39 #include <drm/drm_cache.h>
40 #include <drm/drm_device.h>
41 #include <drm/drm_util.h>
42 #include <drm/ttm/ttm_bo.h>
43 #include <drm/ttm/ttm_tt.h>
44
45 #include "ttm_module.h"
46
47 static unsigned long ttm_pages_limit;
48
49 MODULE_PARM_DESC(pages_limit, "Limit for the allocated pages");
50 module_param_named(pages_limit, ttm_pages_limit, ulong, 0644);
51
52 static unsigned long ttm_dma32_pages_limit;
53
54 MODULE_PARM_DESC(dma32_pages_limit, "Limit for the allocated DMA32 pages");
55 module_param_named(dma32_pages_limit, ttm_dma32_pages_limit, ulong, 0644);
56
57 static atomic_long_t ttm_pages_allocated;
58 static atomic_long_t ttm_dma32_pages_allocated;
59
60 /*
61 * Allocates a ttm structure for the given BO.
62 */
ttm_tt_create(struct ttm_buffer_object * bo,bool zero_alloc)63 int ttm_tt_create(struct ttm_buffer_object *bo, bool zero_alloc)
64 {
65 struct ttm_device *bdev = bo->bdev;
66 struct drm_device *ddev = bo->base.dev;
67 uint32_t page_flags = 0;
68
69 dma_resv_assert_held(bo->base.resv);
70
71 if (bo->ttm)
72 return 0;
73
74 switch (bo->type) {
75 case ttm_bo_type_device:
76 if (zero_alloc)
77 page_flags |= TTM_TT_FLAG_ZERO_ALLOC;
78 break;
79 case ttm_bo_type_kernel:
80 break;
81 case ttm_bo_type_sg:
82 page_flags |= TTM_TT_FLAG_EXTERNAL;
83 break;
84 default:
85 pr_err("Illegal buffer object type\n");
86 return -EINVAL;
87 }
88 /*
89 * When using dma_alloc_coherent with memory encryption the
90 * mapped TT pages need to be decrypted or otherwise the drivers
91 * will end up sending encrypted mem to the gpu.
92 */
93 if (bdev->pool.use_dma_alloc && cc_platform_has(CC_ATTR_GUEST_MEM_ENCRYPT)) {
94 page_flags |= TTM_TT_FLAG_DECRYPTED;
95 drm_info_once(ddev, "TT memory decryption enabled.");
96 }
97
98 bo->ttm = bdev->funcs->ttm_tt_create(bo, page_flags);
99 if (unlikely(bo->ttm == NULL))
100 return -ENOMEM;
101
102 WARN_ON(bo->ttm->page_flags & TTM_TT_FLAG_EXTERNAL_MAPPABLE &&
103 !(bo->ttm->page_flags & TTM_TT_FLAG_EXTERNAL));
104
105 return 0;
106 }
107
108 /*
109 * Allocates storage for pointers to the pages that back the ttm.
110 */
ttm_tt_alloc_page_directory(struct ttm_tt * ttm)111 static int ttm_tt_alloc_page_directory(struct ttm_tt *ttm)
112 {
113 ttm->pages = kvcalloc(ttm->num_pages, sizeof(void*), GFP_KERNEL);
114 if (!ttm->pages)
115 return -ENOMEM;
116 ttm->orders = kvmalloc_array(ttm->num_pages,
117 sizeof(unsigned long), GFP_KERNEL | __GFP_ZERO);
118 if (!ttm->orders)
119 return -ENOMEM;
120 return 0;
121 }
122
ttm_dma_tt_alloc_page_directory(struct ttm_tt * ttm)123 static int ttm_dma_tt_alloc_page_directory(struct ttm_tt *ttm)
124 {
125 ttm->pages = kvcalloc(ttm->num_pages, sizeof(*ttm->pages) +
126 sizeof(*ttm->dma_address), GFP_KERNEL);
127 if (!ttm->pages)
128 return -ENOMEM;
129
130 ttm->dma_address = (void *)(ttm->pages + ttm->num_pages);
131
132 ttm->orders = kvmalloc_array(ttm->num_pages,
133 sizeof(unsigned long),
134 GFP_KERNEL | __GFP_ZERO);
135 if (!ttm->orders)
136 return -ENOMEM;
137 return 0;
138 }
139
ttm_sg_tt_alloc_page_directory(struct ttm_tt * ttm)140 static int ttm_sg_tt_alloc_page_directory(struct ttm_tt *ttm)
141 {
142 ttm->dma_address = kvcalloc(ttm->num_pages, sizeof(*ttm->dma_address),
143 GFP_KERNEL);
144 if (!ttm->dma_address)
145 return -ENOMEM;
146
147 return 0;
148 }
149
ttm_tt_destroy(struct ttm_device * bdev,struct ttm_tt * ttm)150 void ttm_tt_destroy(struct ttm_device *bdev, struct ttm_tt *ttm)
151 {
152 bdev->funcs->ttm_tt_destroy(bdev, ttm);
153 }
154
ttm_tt_init_fields(struct ttm_tt * ttm,struct ttm_buffer_object * bo,uint32_t page_flags,enum ttm_caching caching,unsigned long extra_pages)155 static void ttm_tt_init_fields(struct ttm_tt *ttm,
156 struct ttm_buffer_object *bo,
157 uint32_t page_flags,
158 enum ttm_caching caching,
159 unsigned long extra_pages)
160 {
161 ttm->num_pages = (PAGE_ALIGN(bo->base.size) >> PAGE_SHIFT) + extra_pages;
162 ttm->page_flags = page_flags;
163 ttm->dma_address = NULL;
164 ttm->swap_storage = NULL;
165 ttm->sg = bo->sg;
166 ttm->caching = caching;
167 ttm->dmat = bo->bdev->dmat;
168 ttm->map = NULL;
169 ttm->segs = NULL;
170 }
171
ttm_tt_init(struct ttm_tt * ttm,struct ttm_buffer_object * bo,uint32_t page_flags,enum ttm_caching caching,unsigned long extra_pages)172 int ttm_tt_init(struct ttm_tt *ttm, struct ttm_buffer_object *bo,
173 uint32_t page_flags, enum ttm_caching caching,
174 unsigned long extra_pages)
175 {
176 ttm_tt_init_fields(ttm, bo, page_flags, caching, extra_pages);
177
178 if (ttm_tt_alloc_page_directory(ttm)) {
179 pr_err("Failed allocating page table\n");
180 return -ENOMEM;
181 }
182 return 0;
183 }
184 EXPORT_SYMBOL(ttm_tt_init);
185
ttm_tt_fini(struct ttm_tt * ttm)186 void ttm_tt_fini(struct ttm_tt *ttm)
187 {
188 WARN_ON(ttm->page_flags & TTM_TT_FLAG_PRIV_POPULATED);
189
190 if (ttm->swap_storage)
191 uao_detach(ttm->swap_storage);
192 ttm->swap_storage = NULL;
193
194 if (ttm->pages)
195 kvfree(ttm->pages);
196 else
197 kvfree(ttm->dma_address);
198 kvfree(ttm->orders);
199 ttm->pages = NULL;
200 ttm->dma_address = NULL;
201 ttm->orders = NULL;
202
203 if (ttm->map)
204 bus_dmamap_destroy(ttm->dmat, ttm->map);
205 if (ttm->segs)
206 km_free(ttm->segs, round_page(ttm->num_pages *
207 sizeof(bus_dma_segment_t)), &kv_any, &kp_zero);
208 }
209 EXPORT_SYMBOL(ttm_tt_fini);
210
ttm_sg_tt_init(struct ttm_tt * ttm,struct ttm_buffer_object * bo,uint32_t page_flags,enum ttm_caching caching)211 int ttm_sg_tt_init(struct ttm_tt *ttm, struct ttm_buffer_object *bo,
212 uint32_t page_flags, enum ttm_caching caching)
213 {
214 int ret;
215 int flags = BUS_DMA_WAITOK;
216
217 ttm_tt_init_fields(ttm, bo, page_flags, caching, 0);
218
219 if (page_flags & TTM_TT_FLAG_EXTERNAL)
220 ret = ttm_sg_tt_alloc_page_directory(ttm);
221 else
222 ret = ttm_dma_tt_alloc_page_directory(ttm);
223 if (ret) {
224 pr_err("Failed allocating page table\n");
225 return -ENOMEM;
226 }
227
228 ttm->segs = km_alloc(round_page(ttm->num_pages *
229 sizeof(bus_dma_segment_t)), &kv_any, &kp_zero, &kd_waitok);
230
231 if (bo->bdev->pool.use_dma32 == false)
232 flags |= BUS_DMA_64BIT;
233 if (bus_dmamap_create(ttm->dmat, ttm->num_pages << PAGE_SHIFT,
234 ttm->num_pages, ttm->num_pages << PAGE_SHIFT, 0, flags,
235 &ttm->map)) {
236 km_free(ttm->segs, round_page(ttm->num_pages *
237 sizeof(bus_dma_segment_t)), &kv_any, &kp_zero);
238 if (ttm->pages) {
239 kvfree(ttm->pages);
240 kvfree(ttm->orders);
241 } else
242 kvfree(ttm->dma_address);
243 ttm->pages = NULL;
244 ttm->orders = NULL;
245 ttm->dma_address = NULL;
246 pr_err("Failed allocating page table\n");
247 return -ENOMEM;
248 }
249
250 return 0;
251 }
252 EXPORT_SYMBOL(ttm_sg_tt_init);
253
ttm_tt_swapin(struct ttm_tt * ttm)254 int ttm_tt_swapin(struct ttm_tt *ttm)
255 {
256 struct uvm_object *swap_storage;
257 struct vm_page *from_page;
258 struct vm_page *to_page;
259 struct pglist plist;
260 int i, ret;
261
262 swap_storage = ttm->swap_storage;
263 BUG_ON(swap_storage == NULL);
264
265 TAILQ_INIT(&plist);
266 if (uvm_obj_wire(swap_storage, 0, ttm->num_pages << PAGE_SHIFT,
267 &plist)) {
268 ret = -ENOMEM;
269 goto out_err;
270 }
271
272 from_page = TAILQ_FIRST(&plist);
273 for (i = 0; i < ttm->num_pages; ++i) {
274 to_page = ttm->pages[i];
275 if (unlikely(to_page == NULL)) {
276 ret = -ENOMEM;
277 goto out_err;
278 }
279
280 uvm_pagecopy(from_page, to_page);
281 from_page = TAILQ_NEXT(from_page, pageq);
282 }
283
284 uvm_obj_unwire(swap_storage, 0, ttm->num_pages << PAGE_SHIFT);
285
286 uao_detach(swap_storage);
287 ttm->swap_storage = NULL;
288 ttm->page_flags &= ~TTM_TT_FLAG_SWAPPED;
289
290 return 0;
291
292 out_err:
293 return ret;
294 }
295
296 /**
297 * ttm_tt_swapout - swap out tt object
298 *
299 * @bdev: TTM device structure.
300 * @ttm: The struct ttm_tt.
301 * @gfp_flags: Flags to use for memory allocation.
302 *
303 * Swapout a TT object to a shmem_file, return number of pages swapped out or
304 * negative error code.
305 */
ttm_tt_swapout(struct ttm_device * bdev,struct ttm_tt * ttm,gfp_t gfp_flags)306 int ttm_tt_swapout(struct ttm_device *bdev, struct ttm_tt *ttm,
307 gfp_t gfp_flags)
308 {
309 STUB();
310 return -ENOSYS;
311 #ifdef notyet
312 loff_t size = (loff_t)ttm->num_pages << PAGE_SHIFT;
313 struct uvm_object *swap_storage;
314 struct vm_page *from_page;
315 struct vm_page *to_page;
316 int i, ret;
317
318 swap_storage = uao_create(size, 0);
319 #ifdef notyet
320 if (IS_ERR(swap_storage)) {
321 pr_err("Failed allocating swap storage\n");
322 return PTR_ERR(swap_storage);
323 }
324 #endif
325
326 for (i = 0; i < ttm->num_pages; ++i) {
327 from_page = ttm->pages[i];
328 if (unlikely(from_page == NULL))
329 continue;
330
331 to_page = shmem_read_mapping_page_gfp(swap_space, i, gfp_flags);
332 if (IS_ERR(to_page)) {
333 ret = PTR_ERR(to_page);
334 goto out_err;
335 }
336 copy_highpage(to_page, from_page);
337 set_page_dirty(to_page);
338 mark_page_accessed(to_page);
339 put_page(to_page);
340 }
341
342 ttm_tt_unpopulate(bdev, ttm);
343 ttm->swap_storage = swap_storage;
344 ttm->page_flags |= TTM_TT_FLAG_SWAPPED;
345
346 return ttm->num_pages;
347
348 out_err:
349 uao_detach(swap_storage);
350
351 return ret;
352 #endif
353 }
354
ttm_tt_populate(struct ttm_device * bdev,struct ttm_tt * ttm,struct ttm_operation_ctx * ctx)355 int ttm_tt_populate(struct ttm_device *bdev,
356 struct ttm_tt *ttm, struct ttm_operation_ctx *ctx)
357 {
358 int ret;
359
360 if (!ttm)
361 return -EINVAL;
362
363 if (ttm_tt_is_populated(ttm))
364 return 0;
365
366 if (!(ttm->page_flags & TTM_TT_FLAG_EXTERNAL)) {
367 atomic_long_add(ttm->num_pages, &ttm_pages_allocated);
368 if (bdev->pool.use_dma32)
369 atomic_long_add(ttm->num_pages,
370 &ttm_dma32_pages_allocated);
371 }
372
373 while (atomic_long_read(&ttm_pages_allocated) > ttm_pages_limit ||
374 atomic_long_read(&ttm_dma32_pages_allocated) >
375 ttm_dma32_pages_limit) {
376
377 ret = ttm_global_swapout(ctx, GFP_KERNEL);
378 if (ret == 0)
379 break;
380 if (ret < 0)
381 goto error;
382 }
383
384 if (bdev->funcs->ttm_tt_populate)
385 ret = bdev->funcs->ttm_tt_populate(bdev, ttm, ctx);
386 else
387 ret = ttm_pool_alloc(&bdev->pool, ttm, ctx);
388 if (ret)
389 goto error;
390
391 ttm->page_flags |= TTM_TT_FLAG_PRIV_POPULATED;
392 if (unlikely(ttm->page_flags & TTM_TT_FLAG_SWAPPED)) {
393 ret = ttm_tt_swapin(ttm);
394 if (unlikely(ret != 0)) {
395 ttm_tt_unpopulate(bdev, ttm);
396 return ret;
397 }
398 }
399
400 return 0;
401
402 error:
403 if (!(ttm->page_flags & TTM_TT_FLAG_EXTERNAL)) {
404 atomic_long_sub(ttm->num_pages, &ttm_pages_allocated);
405 if (bdev->pool.use_dma32)
406 atomic_long_sub(ttm->num_pages,
407 &ttm_dma32_pages_allocated);
408 }
409 return ret;
410 }
411 EXPORT_SYMBOL(ttm_tt_populate);
412
ttm_tt_unpopulate(struct ttm_device * bdev,struct ttm_tt * ttm)413 void ttm_tt_unpopulate(struct ttm_device *bdev, struct ttm_tt *ttm)
414 {
415 if (!ttm_tt_is_populated(ttm))
416 return;
417
418 if (bdev->funcs->ttm_tt_unpopulate)
419 bdev->funcs->ttm_tt_unpopulate(bdev, ttm);
420 else
421 ttm_pool_free(&bdev->pool, ttm);
422
423 if (!(ttm->page_flags & TTM_TT_FLAG_EXTERNAL)) {
424 atomic_long_sub(ttm->num_pages, &ttm_pages_allocated);
425 if (bdev->pool.use_dma32)
426 atomic_long_sub(ttm->num_pages,
427 &ttm_dma32_pages_allocated);
428 }
429
430 ttm->page_flags &= ~TTM_TT_FLAG_PRIV_POPULATED;
431 }
432
433 #ifdef CONFIG_DEBUG_FS
434
435 /* Test the shrinker functions and dump the result */
ttm_tt_debugfs_shrink_show(struct seq_file * m,void * data)436 static int ttm_tt_debugfs_shrink_show(struct seq_file *m, void *data)
437 {
438 struct ttm_operation_ctx ctx = { false, false };
439
440 seq_printf(m, "%d\n", ttm_global_swapout(&ctx, GFP_KERNEL));
441 return 0;
442 }
443 DEFINE_SHOW_ATTRIBUTE(ttm_tt_debugfs_shrink);
444
445 #endif
446
447
448 /*
449 * ttm_tt_mgr_init - register with the MM shrinker
450 *
451 * Register with the MM shrinker for swapping out BOs.
452 */
ttm_tt_mgr_init(unsigned long num_pages,unsigned long num_dma32_pages)453 void ttm_tt_mgr_init(unsigned long num_pages, unsigned long num_dma32_pages)
454 {
455 #ifdef CONFIG_DEBUG_FS
456 debugfs_create_file("tt_shrink", 0400, ttm_debugfs_root, NULL,
457 &ttm_tt_debugfs_shrink_fops);
458 #endif
459
460 if (!ttm_pages_limit)
461 ttm_pages_limit = num_pages;
462
463 if (!ttm_dma32_pages_limit)
464 ttm_dma32_pages_limit = num_dma32_pages;
465 }
466
ttm_kmap_iter_tt_map_local(struct ttm_kmap_iter * iter,struct iosys_map * dmap,pgoff_t i,bus_space_tag_t bst)467 static void ttm_kmap_iter_tt_map_local(struct ttm_kmap_iter *iter,
468 struct iosys_map *dmap,
469 pgoff_t i, bus_space_tag_t bst)
470 {
471 struct ttm_kmap_iter_tt *iter_tt =
472 container_of(iter, typeof(*iter_tt), base);
473
474 #ifdef __linux__
475 iosys_map_set_vaddr(dmap, kmap_local_page_prot(iter_tt->tt->pages[i],
476 iter_tt->prot));
477 #else
478 iosys_map_set_vaddr(dmap, kmap_atomic_prot(iter_tt->tt->pages[i],
479 iter_tt->prot));
480 #endif
481 }
482
ttm_kmap_iter_tt_unmap_local(struct ttm_kmap_iter * iter,struct iosys_map * map,bus_space_tag_t bst)483 static void ttm_kmap_iter_tt_unmap_local(struct ttm_kmap_iter *iter,
484 struct iosys_map *map, bus_space_tag_t bst)
485 {
486 #ifdef __linux__
487 kunmap_local(map->vaddr);
488 #else
489 kunmap_atomic(map->vaddr);
490 #endif
491 }
492
493 static const struct ttm_kmap_iter_ops ttm_kmap_iter_tt_ops = {
494 .map_local = ttm_kmap_iter_tt_map_local,
495 .unmap_local = ttm_kmap_iter_tt_unmap_local,
496 .maps_tt = true,
497 };
498
499 /**
500 * ttm_kmap_iter_tt_init - Initialize a struct ttm_kmap_iter_tt
501 * @iter_tt: The struct ttm_kmap_iter_tt to initialize.
502 * @tt: Struct ttm_tt holding page pointers of the struct ttm_resource.
503 *
504 * Return: Pointer to the embedded struct ttm_kmap_iter.
505 */
506 struct ttm_kmap_iter *
ttm_kmap_iter_tt_init(struct ttm_kmap_iter_tt * iter_tt,struct ttm_tt * tt)507 ttm_kmap_iter_tt_init(struct ttm_kmap_iter_tt *iter_tt,
508 struct ttm_tt *tt)
509 {
510 iter_tt->base.ops = &ttm_kmap_iter_tt_ops;
511 iter_tt->tt = tt;
512 if (tt)
513 iter_tt->prot = ttm_prot_from_caching(tt->caching, PAGE_KERNEL);
514 else
515 iter_tt->prot = PAGE_KERNEL;
516
517 return &iter_tt->base;
518 }
519 EXPORT_SYMBOL(ttm_kmap_iter_tt_init);
520
ttm_tt_pages_limit(void)521 unsigned long ttm_tt_pages_limit(void)
522 {
523 return ttm_pages_limit;
524 }
525 EXPORT_SYMBOL(ttm_tt_pages_limit);
526