1 /* SPDX-License-Identifier: GPL-2.0 OR MIT */ 2 /************************************************************************** 3 * 4 * Copyright (c) 2006-2009 VMware, Inc., Palo Alto, CA., USA 5 * All Rights Reserved. 6 * 7 * Permission is hereby granted, free of charge, to any person obtaining a 8 * copy of this software and associated documentation files (the 9 * "Software"), to deal in the Software without restriction, including 10 * without limitation the rights to use, copy, modify, merge, publish, 11 * distribute, sub license, and/or sell copies of the Software, and to 12 * permit persons to whom the Software is furnished to do so, subject to 13 * the following conditions: 14 * 15 * The above copyright notice and this permission notice (including the 16 * next paragraph) shall be included in all copies or substantial portions 17 * of the Software. 18 * 19 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR 20 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, 21 * FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT. IN NO EVENT SHALL 22 * THE COPYRIGHT HOLDERS, AUTHORS AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM, 23 * DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR 24 * OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE 25 * USE OR OTHER DEALINGS IN THE SOFTWARE. 26 * 27 **************************************************************************/ 28 /* 29 * Authors: Thomas Hellstrom <thellstrom-at-vmware-dot-com> 30 */ 31 32 #define pr_fmt(fmt) "[TTM] " fmt 33 34 #include <linux/sched.h> 35 #include <linux/pagemap.h> 36 #include <linux/shmem_fs.h> 37 #include <linux/file.h> 38 #include <drm/drm_cache.h> 39 #include <drm/ttm/ttm_bo_driver.h> 40 #include <drm/ttm/ttm_page_alloc.h> 41 #include <drm/ttm/ttm_set_memory.h> 42 43 /** 44 * Allocates a ttm structure for the given BO. 45 */ 46 int ttm_tt_create(struct ttm_buffer_object *bo, bool zero_alloc) 47 { 48 struct ttm_bo_device *bdev = bo->bdev; 49 uint32_t page_flags = 0; 50 51 dma_resv_assert_held(bo->base.resv); 52 53 if (bdev->need_dma32) 54 page_flags |= TTM_PAGE_FLAG_DMA32; 55 56 if (bdev->no_retry) 57 page_flags |= TTM_PAGE_FLAG_NO_RETRY; 58 59 switch (bo->type) { 60 case ttm_bo_type_device: 61 if (zero_alloc) 62 page_flags |= TTM_PAGE_FLAG_ZERO_ALLOC; 63 break; 64 case ttm_bo_type_kernel: 65 break; 66 case ttm_bo_type_sg: 67 page_flags |= TTM_PAGE_FLAG_SG; 68 break; 69 default: 70 bo->ttm = NULL; 71 pr_err("Illegal buffer object type\n"); 72 return -EINVAL; 73 } 74 75 bo->ttm = bdev->driver->ttm_tt_create(bo, page_flags); 76 if (unlikely(bo->ttm == NULL)) 77 return -ENOMEM; 78 79 return 0; 80 } 81 82 /** 83 * Allocates storage for pointers to the pages that back the ttm. 84 */ 85 static int ttm_tt_alloc_page_directory(struct ttm_tt *ttm) 86 { 87 ttm->pages = kvmalloc_array(ttm->num_pages, sizeof(void*), 88 GFP_KERNEL | __GFP_ZERO); 89 if (!ttm->pages) 90 return -ENOMEM; 91 return 0; 92 } 93 94 static int ttm_dma_tt_alloc_page_directory(struct ttm_dma_tt *ttm) 95 { 96 ttm->ttm.pages = kvmalloc_array(ttm->ttm.num_pages, 97 sizeof(*ttm->ttm.pages) + 98 sizeof(*ttm->dma_address), 99 GFP_KERNEL | __GFP_ZERO); 100 if (!ttm->ttm.pages) 101 return -ENOMEM; 102 ttm->dma_address = (void *) (ttm->ttm.pages + ttm->ttm.num_pages); 103 return 0; 104 } 105 106 static int ttm_sg_tt_alloc_page_directory(struct ttm_dma_tt *ttm) 107 { 108 ttm->dma_address = kvmalloc_array(ttm->ttm.num_pages, 109 sizeof(*ttm->dma_address), 110 GFP_KERNEL | __GFP_ZERO); 111 if (!ttm->dma_address) 112 return -ENOMEM; 113 return 0; 114 } 115 116 static int ttm_tt_set_page_caching(struct vm_page *p, 117 enum ttm_caching_state c_old, 118 enum ttm_caching_state c_new) 119 { 120 int ret = 0; 121 122 if (PageHighMem(p)) 123 return 0; 124 125 if (c_old != tt_cached) { 126 /* p isn't in the default caching state, set it to 127 * writeback first to free its current memtype. */ 128 129 ret = ttm_set_pages_wb(p, 1); 130 if (ret) 131 return ret; 132 } 133 134 if (c_new == tt_wc) 135 ret = ttm_set_pages_wc(p, 1); 136 else if (c_new == tt_uncached) 137 ret = ttm_set_pages_uc(p, 1); 138 139 return ret; 140 } 141 142 /* 143 * Change caching policy for the linear kernel map 144 * for range of pages in a ttm. 145 */ 146 147 static int ttm_tt_set_caching(struct ttm_tt *ttm, 148 enum ttm_caching_state c_state) 149 { 150 int i, j; 151 struct vm_page *cur_page; 152 int ret; 153 154 if (ttm->caching_state == c_state) 155 return 0; 156 157 if (ttm->state == tt_unpopulated) { 158 /* Change caching but don't populate */ 159 ttm->caching_state = c_state; 160 return 0; 161 } 162 163 if (ttm->caching_state == tt_cached) 164 drm_clflush_pages(ttm->pages, ttm->num_pages); 165 166 for (i = 0; i < ttm->num_pages; ++i) { 167 cur_page = ttm->pages[i]; 168 if (likely(cur_page != NULL)) { 169 ret = ttm_tt_set_page_caching(cur_page, 170 ttm->caching_state, 171 c_state); 172 if (unlikely(ret != 0)) 173 goto out_err; 174 } 175 } 176 177 ttm->caching_state = c_state; 178 179 return 0; 180 181 out_err: 182 for (j = 0; j < i; ++j) { 183 cur_page = ttm->pages[j]; 184 if (likely(cur_page != NULL)) { 185 (void)ttm_tt_set_page_caching(cur_page, c_state, 186 ttm->caching_state); 187 } 188 } 189 190 return ret; 191 } 192 193 int ttm_tt_set_placement_caching(struct ttm_tt *ttm, uint32_t placement) 194 { 195 enum ttm_caching_state state; 196 197 if (placement & TTM_PL_FLAG_WC) 198 state = tt_wc; 199 else if (placement & TTM_PL_FLAG_UNCACHED) 200 state = tt_uncached; 201 else 202 state = tt_cached; 203 204 return ttm_tt_set_caching(ttm, state); 205 } 206 EXPORT_SYMBOL(ttm_tt_set_placement_caching); 207 208 void ttm_tt_destroy(struct ttm_tt *ttm) 209 { 210 if (ttm == NULL) 211 return; 212 213 ttm_tt_unbind(ttm); 214 215 if (ttm->state == tt_unbound) 216 ttm_tt_unpopulate(ttm); 217 218 if (!(ttm->page_flags & TTM_PAGE_FLAG_PERSISTENT_SWAP) && 219 ttm->swap_storage) 220 uao_detach(ttm->swap_storage); 221 222 ttm->swap_storage = NULL; 223 ttm->func->destroy(ttm); 224 } 225 226 static void ttm_tt_init_fields(struct ttm_tt *ttm, 227 struct ttm_buffer_object *bo, 228 uint32_t page_flags) 229 { 230 ttm->bdev = bo->bdev; 231 ttm->num_pages = bo->num_pages; 232 ttm->caching_state = tt_cached; 233 ttm->page_flags = page_flags; 234 ttm->state = tt_unpopulated; 235 ttm->swap_storage = NULL; 236 ttm->sg = bo->sg; 237 } 238 239 int ttm_tt_init(struct ttm_tt *ttm, struct ttm_buffer_object *bo, 240 uint32_t page_flags) 241 { 242 ttm_tt_init_fields(ttm, bo, page_flags); 243 244 if (ttm_tt_alloc_page_directory(ttm)) { 245 ttm_tt_destroy(ttm); 246 pr_err("Failed allocating page table\n"); 247 return -ENOMEM; 248 } 249 return 0; 250 } 251 EXPORT_SYMBOL(ttm_tt_init); 252 253 void ttm_tt_fini(struct ttm_tt *ttm) 254 { 255 kvfree(ttm->pages); 256 ttm->pages = NULL; 257 } 258 EXPORT_SYMBOL(ttm_tt_fini); 259 260 int ttm_dma_tt_init(struct ttm_dma_tt *ttm_dma, struct ttm_buffer_object *bo, 261 uint32_t page_flags) 262 { 263 struct ttm_tt *ttm = &ttm_dma->ttm; 264 int flags = BUS_DMA_WAITOK; 265 266 ttm_tt_init_fields(ttm, bo, page_flags); 267 268 INIT_LIST_HEAD(&ttm_dma->pages_list); 269 if (ttm_dma_tt_alloc_page_directory(ttm_dma)) { 270 ttm_tt_destroy(ttm); 271 pr_err("Failed allocating page table\n"); 272 return -ENOMEM; 273 } 274 275 ttm_dma->segs = km_alloc(round_page(ttm->num_pages * 276 sizeof(bus_dma_segment_t)), &kv_any, &kp_zero, &kd_waitok); 277 278 ttm_dma->dmat = bo->bdev->dmat; 279 280 if ((page_flags & TTM_PAGE_FLAG_DMA32) == 0) 281 flags |= BUS_DMA_64BIT; 282 if (bus_dmamap_create(ttm_dma->dmat, ttm->num_pages << PAGE_SHIFT, 283 ttm->num_pages, ttm->num_pages << PAGE_SHIFT, 0, flags, 284 &ttm_dma->map)) { 285 km_free(ttm_dma->segs, round_page(ttm->num_pages * 286 sizeof(bus_dma_segment_t)), &kv_any, &kp_zero); 287 ttm_tt_destroy(ttm); 288 pr_err("Failed allocating page table\n"); 289 return -ENOMEM; 290 } 291 292 return 0; 293 } 294 EXPORT_SYMBOL(ttm_dma_tt_init); 295 296 int ttm_sg_tt_init(struct ttm_dma_tt *ttm_dma, struct ttm_buffer_object *bo, 297 uint32_t page_flags) 298 { 299 struct ttm_tt *ttm = &ttm_dma->ttm; 300 int flags = BUS_DMA_WAITOK; 301 int ret; 302 303 ttm_tt_init_fields(ttm, bo, page_flags); 304 305 INIT_LIST_HEAD(&ttm_dma->pages_list); 306 if (page_flags & TTM_PAGE_FLAG_SG) 307 ret = ttm_sg_tt_alloc_page_directory(ttm_dma); 308 else 309 ret = ttm_dma_tt_alloc_page_directory(ttm_dma); 310 if (ret) { 311 ttm_tt_destroy(ttm); 312 pr_err("Failed allocating page table\n"); 313 return -ENOMEM; 314 } 315 316 ttm_dma->segs = km_alloc(round_page(ttm->num_pages * 317 sizeof(bus_dma_segment_t)), &kv_any, &kp_zero, &kd_waitok); 318 319 ttm_dma->dmat = bo->bdev->dmat; 320 321 if ((page_flags & TTM_PAGE_FLAG_DMA32) == 0) 322 flags |= BUS_DMA_64BIT; 323 if (bus_dmamap_create(ttm_dma->dmat, ttm->num_pages << PAGE_SHIFT, 324 ttm->num_pages, ttm->num_pages << PAGE_SHIFT, 0, flags, 325 &ttm_dma->map)) { 326 km_free(ttm_dma->segs, round_page(ttm->num_pages * 327 sizeof(bus_dma_segment_t)), &kv_any, &kp_zero); 328 ttm_tt_destroy(ttm); 329 pr_err("Failed allocating page table\n"); 330 return -ENOMEM; 331 } 332 333 return 0; 334 } 335 EXPORT_SYMBOL(ttm_sg_tt_init); 336 337 void ttm_dma_tt_fini(struct ttm_dma_tt *ttm_dma) 338 { 339 struct ttm_tt *ttm = &ttm_dma->ttm; 340 341 if (ttm->pages) 342 kvfree(ttm->pages); 343 else 344 kvfree(ttm_dma->dma_address); 345 ttm->pages = NULL; 346 ttm_dma->dma_address = NULL; 347 348 bus_dmamap_destroy(ttm_dma->dmat, ttm_dma->map); 349 km_free(ttm_dma->segs, round_page(ttm->num_pages * 350 sizeof(bus_dma_segment_t)), &kv_any, &kp_zero); 351 } 352 EXPORT_SYMBOL(ttm_dma_tt_fini); 353 354 void ttm_tt_unbind(struct ttm_tt *ttm) 355 { 356 int ret; 357 358 if (ttm->state == tt_bound) { 359 ret = ttm->func->unbind(ttm); 360 BUG_ON(ret); 361 ttm->state = tt_unbound; 362 } 363 } 364 365 int ttm_tt_bind(struct ttm_tt *ttm, struct ttm_mem_reg *bo_mem, 366 struct ttm_operation_ctx *ctx) 367 { 368 int ret = 0; 369 370 if (!ttm) 371 return -EINVAL; 372 373 if (ttm->state == tt_bound) 374 return 0; 375 376 ret = ttm_tt_populate(ttm, ctx); 377 if (ret) 378 return ret; 379 380 ret = ttm->func->bind(ttm, bo_mem); 381 if (unlikely(ret != 0)) 382 return ret; 383 384 ttm->state = tt_bound; 385 386 return 0; 387 } 388 EXPORT_SYMBOL(ttm_tt_bind); 389 390 int ttm_tt_swapin(struct ttm_tt *ttm) 391 { 392 struct uvm_object *swap_storage; 393 struct vm_page *from_page; 394 struct vm_page *to_page; 395 struct pglist plist; 396 int i; 397 int ret = -ENOMEM; 398 399 swap_storage = ttm->swap_storage; 400 BUG_ON(swap_storage == NULL); 401 402 TAILQ_INIT(&plist); 403 if (uvm_objwire(swap_storage, 0, ttm->num_pages << PAGE_SHIFT, &plist)) 404 goto out_err; 405 406 from_page = TAILQ_FIRST(&plist); 407 for (i = 0; i < ttm->num_pages; ++i) { 408 to_page = ttm->pages[i]; 409 if (unlikely(to_page == NULL)) 410 goto out_err; 411 412 uvm_pagecopy(from_page, to_page); 413 from_page = TAILQ_NEXT(from_page, pageq); 414 } 415 416 uvm_objunwire(swap_storage, 0, ttm->num_pages << PAGE_SHIFT); 417 418 if (!(ttm->page_flags & TTM_PAGE_FLAG_PERSISTENT_SWAP)) 419 uao_detach(swap_storage); 420 ttm->swap_storage = NULL; 421 ttm->page_flags &= ~TTM_PAGE_FLAG_SWAPPED; 422 423 return 0; 424 out_err: 425 return ret; 426 } 427 428 int ttm_tt_swapout(struct ttm_tt *ttm, struct uvm_object *persistent_swap_storage) 429 { 430 struct uvm_object *swap_storage; 431 struct vm_page *from_page; 432 struct vm_page *to_page; 433 struct pglist plist; 434 int i; 435 int ret = -ENOMEM; 436 437 BUG_ON(ttm->state != tt_unbound && ttm->state != tt_unpopulated); 438 BUG_ON(ttm->caching_state != tt_cached); 439 440 if (!persistent_swap_storage) { 441 swap_storage = uao_create(ttm->num_pages << PAGE_SHIFT, 0); 442 #ifdef notyet 443 if (IS_ERR(swap_storage)) { 444 pr_err("Failed allocating swap storage\n"); 445 return PTR_ERR(swap_storage); 446 } 447 #endif 448 } else { 449 swap_storage = persistent_swap_storage; 450 } 451 452 TAILQ_INIT(&plist); 453 if (uvm_objwire(swap_storage, 0, ttm->num_pages << PAGE_SHIFT, &plist)) 454 goto out_err; 455 456 to_page = TAILQ_FIRST(&plist); 457 for (i = 0; i < ttm->num_pages; ++i) { 458 from_page = ttm->pages[i]; 459 if (unlikely(from_page == NULL)) 460 continue; 461 462 uvm_pagecopy(from_page, to_page); 463 #ifdef notyet 464 set_page_dirty(to_page); 465 mark_page_accessed(to_page); 466 #endif 467 to_page = TAILQ_NEXT(to_page, pageq); 468 } 469 470 uvm_objunwire(swap_storage, 0, ttm->num_pages << PAGE_SHIFT); 471 472 ttm->bdev->driver->ttm_tt_unpopulate(ttm); 473 ttm->swap_storage = swap_storage; 474 ttm->page_flags |= TTM_PAGE_FLAG_SWAPPED; 475 if (persistent_swap_storage) 476 ttm->page_flags |= TTM_PAGE_FLAG_PERSISTENT_SWAP; 477 478 return 0; 479 out_err: 480 if (!persistent_swap_storage) 481 uao_detach(swap_storage); 482 483 return ret; 484 } 485 486 static void ttm_tt_add_mapping(struct ttm_tt *ttm) 487 { 488 #ifdef __linux__ 489 pgoff_t i; 490 491 if (ttm->page_flags & TTM_PAGE_FLAG_SG) 492 return; 493 494 for (i = 0; i < ttm->num_pages; ++i) 495 ttm->pages[i]->mapping = ttm->bdev->dev_mapping; 496 #endif 497 } 498 499 int ttm_tt_populate(struct ttm_tt *ttm, struct ttm_operation_ctx *ctx) 500 { 501 int ret; 502 503 if (ttm->state != tt_unpopulated) 504 return 0; 505 506 if (ttm->bdev->driver->ttm_tt_populate) 507 ret = ttm->bdev->driver->ttm_tt_populate(ttm, ctx); 508 else 509 ret = ttm_pool_populate(ttm, ctx); 510 if (!ret) 511 ttm_tt_add_mapping(ttm); 512 return ret; 513 } 514 515 static void ttm_tt_clear_mapping(struct ttm_tt *ttm) 516 { 517 int i; 518 struct vm_page *page; 519 520 if (ttm->page_flags & TTM_PAGE_FLAG_SG) 521 return; 522 523 for (i = 0; i < ttm->num_pages; ++i) { 524 page = ttm->pages[i]; 525 if (unlikely(page == NULL)) 526 continue; 527 pmap_page_protect(page, PROT_NONE); 528 } 529 } 530 531 void ttm_tt_unpopulate(struct ttm_tt *ttm) 532 { 533 if (ttm->state == tt_unpopulated) 534 return; 535 536 ttm_tt_clear_mapping(ttm); 537 if (ttm->bdev->driver->ttm_tt_unpopulate) 538 ttm->bdev->driver->ttm_tt_unpopulate(ttm); 539 else 540 ttm_pool_unpopulate(ttm); 541 } 542