1 /* SPDX-License-Identifier: GPL-2.0 OR MIT */ 2 3 /* 4 * Copyright (c) 2006-2009 VMware, Inc., Palo Alto, CA., USA 5 * Copyright 2020 Advanced Micro Devices, Inc. 6 * 7 * Permission is hereby granted, free of charge, to any person obtaining a 8 * copy of this software and associated documentation files (the "Software"), 9 * to deal in the Software without restriction, including without limitation 10 * the rights to use, copy, modify, merge, publish, distribute, sublicense, 11 * and/or sell copies of the Software, and to permit persons to whom the 12 * Software is furnished to do so, subject to the following conditions: 13 * 14 * The above copyright notice and this permission notice shall be included in 15 * all copies or substantial portions of the Software. 16 * 17 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR 18 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, 19 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL 20 * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR 21 * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, 22 * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR 23 * OTHER DEALINGS IN THE SOFTWARE. 24 * 25 * Authors: Christian König 26 */ 27 28 #define pr_fmt(fmt) "[TTM DEVICE] " fmt 29 30 #include <linux/mm.h> 31 32 #include <drm/ttm/ttm_bo.h> 33 #include <drm/ttm/ttm_device.h> 34 #include <drm/ttm/ttm_tt.h> 35 #include <drm/ttm/ttm_placement.h> 36 37 #include "ttm_module.h" 38 39 /* 40 * ttm_global_mutex - protecting the global state 41 */ 42 static DEFINE_MUTEX(ttm_global_mutex); 43 static unsigned ttm_glob_use_count; 44 struct ttm_global ttm_glob; 45 EXPORT_SYMBOL(ttm_glob); 46 47 struct dentry *ttm_debugfs_root; 48 49 static void ttm_global_release(void) 50 { 51 struct ttm_global *glob = &ttm_glob; 52 53 mutex_lock(&ttm_global_mutex); 54 if (--ttm_glob_use_count > 0) 55 goto out; 56 57 ttm_pool_mgr_fini(); 58 debugfs_remove(ttm_debugfs_root); 59 60 __free_page(glob->dummy_read_page); 61 memset(glob, 0, sizeof(*glob)); 62 out: 63 mutex_unlock(&ttm_global_mutex); 64 } 65 66 static int ttm_global_init(void) 67 { 68 struct ttm_global *glob = &ttm_glob; 69 unsigned long num_pages, num_dma32; 70 int ret = 0; 71 72 mutex_lock(&ttm_global_mutex); 73 if (++ttm_glob_use_count > 1) 74 goto out; 75 76 ttm_debugfs_root = debugfs_create_dir("ttm", NULL); 77 if (IS_ERR(ttm_debugfs_root)) { 78 ttm_debugfs_root = NULL; 79 } 80 81 /* Limit the number of pages in the pool to about 50% of the total 82 * system memory. 83 */ 84 num_pages = physmem; 85 num_pages /= 2; 86 87 /* But for DMA32 we limit ourself to only use 2GiB maximum. */ 88 num_dma32 = physmem; 89 num_dma32 = min(num_dma32, 2UL << (30 - PAGE_SHIFT)); 90 91 ttm_pool_mgr_init(num_pages); 92 ttm_tt_mgr_init(num_pages, num_dma32); 93 94 glob->dummy_read_page = alloc_page(__GFP_ZERO | GFP_DMA32); 95 96 if (unlikely(glob->dummy_read_page == NULL)) { 97 ret = -ENOMEM; 98 goto out; 99 } 100 101 INIT_LIST_HEAD(&glob->device_list); 102 atomic_set(&glob->bo_count, 0); 103 104 debugfs_create_atomic_t("buffer_objects", 0444, ttm_debugfs_root, 105 &glob->bo_count); 106 out: 107 if (ret && ttm_debugfs_root) 108 debugfs_remove(ttm_debugfs_root); 109 if (ret) 110 --ttm_glob_use_count; 111 mutex_unlock(&ttm_global_mutex); 112 return ret; 113 } 114 115 /* 116 * A buffer object shrink method that tries to swap out the first 117 * buffer object on the global::swap_lru list. 118 */ 119 int ttm_global_swapout(struct ttm_operation_ctx *ctx, gfp_t gfp_flags) 120 { 121 struct ttm_global *glob = &ttm_glob; 122 struct ttm_device *bdev; 123 int ret = 0; 124 125 mutex_lock(&ttm_global_mutex); 126 list_for_each_entry(bdev, &glob->device_list, device_list) { 127 ret = ttm_device_swapout(bdev, ctx, gfp_flags); 128 if (ret > 0) { 129 list_move_tail(&bdev->device_list, &glob->device_list); 130 break; 131 } 132 } 133 mutex_unlock(&ttm_global_mutex); 134 return ret; 135 } 136 137 int ttm_device_swapout(struct ttm_device *bdev, struct ttm_operation_ctx *ctx, 138 gfp_t gfp_flags) 139 { 140 struct ttm_resource_cursor cursor; 141 struct ttm_resource_manager *man; 142 struct ttm_resource *res; 143 unsigned i; 144 int ret; 145 146 spin_lock(&bdev->lru_lock); 147 for (i = TTM_PL_SYSTEM; i < TTM_NUM_MEM_TYPES; ++i) { 148 man = ttm_manager_type(bdev, i); 149 if (!man || !man->use_tt) 150 continue; 151 152 ttm_resource_manager_for_each_res(man, &cursor, res) { 153 struct ttm_buffer_object *bo = res->bo; 154 uint32_t num_pages; 155 156 if (!bo || bo->resource != res) 157 continue; 158 159 num_pages = PFN_UP(bo->base.size); 160 ret = ttm_bo_swapout(bo, ctx, gfp_flags); 161 /* ttm_bo_swapout has dropped the lru_lock */ 162 if (!ret) 163 return num_pages; 164 if (ret != -EBUSY) 165 return ret; 166 } 167 } 168 spin_unlock(&bdev->lru_lock); 169 return 0; 170 } 171 EXPORT_SYMBOL(ttm_device_swapout); 172 173 /** 174 * ttm_device_init 175 * 176 * @bdev: A pointer to a struct ttm_device to initialize. 177 * @funcs: Function table for the device. 178 * @dev: The core kernel device pointer for DMA mappings and allocations. 179 * @mapping: The address space to use for this bo. 180 * @vma_manager: A pointer to a vma manager. 181 * @use_dma_alloc: If coherent DMA allocation API should be used. 182 * @use_dma32: If we should use GFP_DMA32 for device memory allocations. 183 * 184 * Initializes a struct ttm_device: 185 * Returns: 186 * !0: Failure. 187 */ 188 int ttm_device_init(struct ttm_device *bdev, const struct ttm_device_funcs *funcs, 189 struct device *dev, struct address_space *mapping, 190 struct drm_vma_offset_manager *vma_manager, 191 bool use_dma_alloc, bool use_dma32) 192 { 193 struct ttm_global *glob = &ttm_glob; 194 int ret; 195 196 if (WARN_ON(vma_manager == NULL)) 197 return -EINVAL; 198 199 ret = ttm_global_init(); 200 if (ret) 201 return ret; 202 203 bdev->wq = alloc_workqueue("ttm", WQ_MEM_RECLAIM | WQ_HIGHPRI, 16); 204 if (!bdev->wq) { 205 ttm_global_release(); 206 return -ENOMEM; 207 } 208 209 bdev->funcs = funcs; 210 211 ttm_sys_man_init(bdev); 212 ttm_pool_init(&bdev->pool, dev, NUMA_NO_NODE, use_dma_alloc, use_dma32); 213 214 bdev->vma_manager = vma_manager; 215 mtx_init(&bdev->lru_lock, IPL_NONE); 216 INIT_LIST_HEAD(&bdev->pinned); 217 bdev->dev_mapping = mapping; 218 mutex_lock(&ttm_global_mutex); 219 list_add_tail(&bdev->device_list, &glob->device_list); 220 mutex_unlock(&ttm_global_mutex); 221 222 return 0; 223 } 224 EXPORT_SYMBOL(ttm_device_init); 225 226 void ttm_device_fini(struct ttm_device *bdev) 227 { 228 struct ttm_resource_manager *man; 229 unsigned i; 230 231 mutex_lock(&ttm_global_mutex); 232 list_del(&bdev->device_list); 233 mutex_unlock(&ttm_global_mutex); 234 235 drain_workqueue(bdev->wq); 236 destroy_workqueue(bdev->wq); 237 238 man = ttm_manager_type(bdev, TTM_PL_SYSTEM); 239 ttm_resource_manager_set_used(man, false); 240 ttm_set_driver_manager(bdev, TTM_PL_SYSTEM, NULL); 241 242 spin_lock(&bdev->lru_lock); 243 for (i = 0; i < TTM_MAX_BO_PRIORITY; ++i) 244 if (list_empty(&man->lru[0])) 245 pr_debug("Swap list %d was clean\n", i); 246 spin_unlock(&bdev->lru_lock); 247 248 ttm_pool_fini(&bdev->pool); 249 ttm_global_release(); 250 } 251 EXPORT_SYMBOL(ttm_device_fini); 252 253 static void ttm_device_clear_lru_dma_mappings(struct ttm_device *bdev, 254 struct list_head *list) 255 { 256 struct ttm_resource *res; 257 258 spin_lock(&bdev->lru_lock); 259 while ((res = list_first_entry_or_null(list, typeof(*res), lru))) { 260 struct ttm_buffer_object *bo = res->bo; 261 262 /* Take ref against racing releases once lru_lock is unlocked */ 263 if (!ttm_bo_get_unless_zero(bo)) 264 continue; 265 266 list_del_init(&res->lru); 267 spin_unlock(&bdev->lru_lock); 268 269 if (bo->ttm) 270 ttm_tt_unpopulate(bo->bdev, bo->ttm); 271 272 ttm_bo_put(bo); 273 spin_lock(&bdev->lru_lock); 274 } 275 spin_unlock(&bdev->lru_lock); 276 } 277 278 void ttm_device_clear_dma_mappings(struct ttm_device *bdev) 279 { 280 struct ttm_resource_manager *man; 281 unsigned int i, j; 282 283 ttm_device_clear_lru_dma_mappings(bdev, &bdev->pinned); 284 285 for (i = TTM_PL_SYSTEM; i < TTM_NUM_MEM_TYPES; ++i) { 286 man = ttm_manager_type(bdev, i); 287 if (!man || !man->use_tt) 288 continue; 289 290 for (j = 0; j < TTM_MAX_BO_PRIORITY; ++j) 291 ttm_device_clear_lru_dma_mappings(bdev, &man->lru[j]); 292 } 293 } 294 EXPORT_SYMBOL(ttm_device_clear_dma_mappings); 295