1 /* 2 * Copyright 2011 (c) Oracle Corp. 3 4 * Permission is hereby granted, free of charge, to any person obtaining a 5 * copy of this software and associated documentation files (the "Software"), 6 * to deal in the Software without restriction, including without limitation 7 * the rights to use, copy, modify, merge, publish, distribute, sub license, 8 * and/or sell copies of the Software, and to permit persons to whom the 9 * Software is furnished to do so, subject to the following conditions: 10 * 11 * The above copyright notice and this permission notice (including the 12 * next paragraph) shall be included in all copies or substantial portions 13 * of the Software. 14 * 15 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR 16 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, 17 * FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT. IN NO EVENT SHALL 18 * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER 19 * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING 20 * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER 21 * DEALINGS IN THE SOFTWARE. 22 * 23 * Author: Konrad Rzeszutek Wilk <konrad.wilk@oracle.com> 24 */ 25 26 /* 27 * A simple DMA pool losely based on dmapool.c. It has certain advantages 28 * over the DMA pools: 29 * - Pool collects resently freed pages for reuse (and hooks up to 30 * the shrinker). 31 * - Tracks currently in use pages 32 * - Tracks whether the page is UC, WB or cached (and reverts to WB 33 * when freed). 34 */ 35 36 #if defined(CONFIG_SWIOTLB) || defined(CONFIG_INTEL_IOMMU) 37 #define pr_fmt(fmt) "[TTM] " fmt 38 39 #include <linux/dma-mapping.h> 40 #include <linux/list.h> 41 #include <linux/seq_file.h> /* for seq_printf */ 42 #include <linux/slab.h> 43 #include <linux/spinlock.h> 44 #include <linux/highmem.h> 45 #include <linux/mm_types.h> 46 #include <linux/module.h> 47 #include <linux/mm.h> 48 #include <linux/atomic.h> 49 #include <linux/device.h> 50 #include <linux/kthread.h> 51 #include <drm/ttm/ttm_bo_driver.h> 52 #include <drm/ttm/ttm_page_alloc.h> 53 #include <drm/ttm/ttm_set_memory.h> 54 55 #define NUM_PAGES_TO_ALLOC (PAGE_SIZE/sizeof(struct page *)) 56 #define SMALL_ALLOCATION 4 57 #define FREE_ALL_PAGES (~0U) 58 #define VADDR_FLAG_HUGE_POOL 1UL 59 #define VADDR_FLAG_UPDATED_COUNT 2UL 60 61 enum pool_type { 62 IS_UNDEFINED = 0, 63 IS_WC = 1 << 1, 64 IS_UC = 1 << 2, 65 IS_CACHED = 1 << 3, 66 IS_DMA32 = 1 << 4, 67 IS_HUGE = 1 << 5 68 }; 69 70 /* 71 * The pool structure. There are up to nine pools: 72 * - generic (not restricted to DMA32): 73 * - write combined, uncached, cached. 74 * - dma32 (up to 2^32 - so up 4GB): 75 * - write combined, uncached, cached. 76 * - huge (not restricted to DMA32): 77 * - write combined, uncached, cached. 78 * for each 'struct device'. The 'cached' is for pages that are actively used. 79 * The other ones can be shrunk by the shrinker API if neccessary. 80 * @pools: The 'struct device->dma_pools' link. 81 * @type: Type of the pool 82 * @lock: Protects the free_list from concurrnet access. Must be 83 * used with irqsave/irqrestore variants because pool allocator maybe called 84 * from delayed work. 85 * @free_list: Pool of pages that are free to be used. No order requirements. 86 * @dev: The device that is associated with these pools. 87 * @size: Size used during DMA allocation. 88 * @npages_free: Count of available pages for re-use. 89 * @npages_in_use: Count of pages that are in use. 90 * @nfrees: Stats when pool is shrinking. 91 * @nrefills: Stats when the pool is grown. 92 * @gfp_flags: Flags to pass for alloc_page. 93 * @name: Name of the pool. 94 * @dev_name: Name derieved from dev - similar to how dev_info works. 95 * Used during shutdown as the dev_info during release is unavailable. 96 */ 97 struct dma_pool { 98 struct list_head pools; /* The 'struct device->dma_pools link */ 99 enum pool_type type; 100 spinlock_t lock; 101 struct list_head free_list; 102 struct device *dev; 103 unsigned size; 104 unsigned npages_free; 105 unsigned npages_in_use; 106 unsigned long nfrees; /* Stats when shrunk. */ 107 unsigned long nrefills; /* Stats when grown. */ 108 gfp_t gfp_flags; 109 char name[13]; /* "cached dma32" */ 110 char dev_name[64]; /* Constructed from dev */ 111 }; 112 113 /* 114 * The accounting page keeping track of the allocated page along with 115 * the DMA address. 116 * @page_list: The link to the 'page_list' in 'struct dma_pool'. 117 * @vaddr: The virtual address of the page and a flag if the page belongs to a 118 * huge pool 119 * @dma: The bus address of the page. If the page is not allocated 120 * via the DMA API, it will be -1. 121 */ 122 struct dma_page { 123 struct list_head page_list; 124 unsigned long vaddr; 125 struct page *p; 126 dma_addr_t dma; 127 }; 128 129 /* 130 * Limits for the pool. They are handled without locks because only place where 131 * they may change is in sysfs store. They won't have immediate effect anyway 132 * so forcing serialization to access them is pointless. 133 */ 134 135 struct ttm_pool_opts { 136 unsigned alloc_size; 137 unsigned max_size; 138 unsigned small; 139 }; 140 141 /* 142 * Contains the list of all of the 'struct device' and their corresponding 143 * DMA pools. Guarded by _mutex->lock. 144 * @pools: The link to 'struct ttm_pool_manager->pools' 145 * @dev: The 'struct device' associated with the 'pool' 146 * @pool: The 'struct dma_pool' associated with the 'dev' 147 */ 148 struct device_pools { 149 struct list_head pools; 150 struct device *dev; 151 struct dma_pool *pool; 152 }; 153 154 /* 155 * struct ttm_pool_manager - Holds memory pools for fast allocation 156 * 157 * @lock: Lock used when adding/removing from pools 158 * @pools: List of 'struct device' and 'struct dma_pool' tuples. 159 * @options: Limits for the pool. 160 * @npools: Total amount of pools in existence. 161 * @shrinker: The structure used by [un|]register_shrinker 162 */ 163 struct ttm_pool_manager { 164 struct mutex lock; 165 struct list_head pools; 166 struct ttm_pool_opts options; 167 unsigned npools; 168 struct shrinker mm_shrink; 169 struct kobject kobj; 170 }; 171 172 static struct ttm_pool_manager *_manager; 173 174 static struct attribute ttm_page_pool_max = { 175 .name = "pool_max_size", 176 .mode = S_IRUGO | S_IWUSR 177 }; 178 static struct attribute ttm_page_pool_small = { 179 .name = "pool_small_allocation", 180 .mode = S_IRUGO | S_IWUSR 181 }; 182 static struct attribute ttm_page_pool_alloc_size = { 183 .name = "pool_allocation_size", 184 .mode = S_IRUGO | S_IWUSR 185 }; 186 187 static struct attribute *ttm_pool_attrs[] = { 188 &ttm_page_pool_max, 189 &ttm_page_pool_small, 190 &ttm_page_pool_alloc_size, 191 NULL 192 }; 193 194 static void ttm_pool_kobj_release(struct kobject *kobj) 195 { 196 struct ttm_pool_manager *m = 197 container_of(kobj, struct ttm_pool_manager, kobj); 198 kfree(m); 199 } 200 201 static ssize_t ttm_pool_store(struct kobject *kobj, struct attribute *attr, 202 const char *buffer, size_t size) 203 { 204 struct ttm_pool_manager *m = 205 container_of(kobj, struct ttm_pool_manager, kobj); 206 int chars; 207 unsigned val; 208 209 chars = sscanf(buffer, "%u", &val); 210 if (chars == 0) 211 return size; 212 213 /* Convert kb to number of pages */ 214 val = val / (PAGE_SIZE >> 10); 215 216 if (attr == &ttm_page_pool_max) { 217 m->options.max_size = val; 218 } else if (attr == &ttm_page_pool_small) { 219 m->options.small = val; 220 } else if (attr == &ttm_page_pool_alloc_size) { 221 if (val > NUM_PAGES_TO_ALLOC*8) { 222 pr_err("Setting allocation size to %lu is not allowed. Recommended size is %lu\n", 223 NUM_PAGES_TO_ALLOC*(PAGE_SIZE >> 7), 224 NUM_PAGES_TO_ALLOC*(PAGE_SIZE >> 10)); 225 return size; 226 } else if (val > NUM_PAGES_TO_ALLOC) { 227 pr_warn("Setting allocation size to larger than %lu is not recommended\n", 228 NUM_PAGES_TO_ALLOC*(PAGE_SIZE >> 10)); 229 } 230 m->options.alloc_size = val; 231 } 232 233 return size; 234 } 235 236 static ssize_t ttm_pool_show(struct kobject *kobj, struct attribute *attr, 237 char *buffer) 238 { 239 struct ttm_pool_manager *m = 240 container_of(kobj, struct ttm_pool_manager, kobj); 241 unsigned val = 0; 242 243 if (attr == &ttm_page_pool_max) 244 val = m->options.max_size; 245 else if (attr == &ttm_page_pool_small) 246 val = m->options.small; 247 else if (attr == &ttm_page_pool_alloc_size) 248 val = m->options.alloc_size; 249 250 val = val * (PAGE_SIZE >> 10); 251 252 return snprintf(buffer, PAGE_SIZE, "%u\n", val); 253 } 254 255 static const struct sysfs_ops ttm_pool_sysfs_ops = { 256 .show = &ttm_pool_show, 257 .store = &ttm_pool_store, 258 }; 259 260 static struct kobj_type ttm_pool_kobj_type = { 261 .release = &ttm_pool_kobj_release, 262 .sysfs_ops = &ttm_pool_sysfs_ops, 263 .default_attrs = ttm_pool_attrs, 264 }; 265 266 static int ttm_set_pages_caching(struct dma_pool *pool, 267 struct page **pages, unsigned cpages) 268 { 269 int r = 0; 270 /* Set page caching */ 271 if (pool->type & IS_UC) { 272 r = ttm_set_pages_array_uc(pages, cpages); 273 if (r) 274 pr_err("%s: Failed to set %d pages to uc!\n", 275 pool->dev_name, cpages); 276 } 277 if (pool->type & IS_WC) { 278 r = ttm_set_pages_array_wc(pages, cpages); 279 if (r) 280 pr_err("%s: Failed to set %d pages to wc!\n", 281 pool->dev_name, cpages); 282 } 283 return r; 284 } 285 286 static void __ttm_dma_free_page(struct dma_pool *pool, struct dma_page *d_page) 287 { 288 dma_addr_t dma = d_page->dma; 289 d_page->vaddr &= ~VADDR_FLAG_HUGE_POOL; 290 dma_free_coherent(pool->dev, pool->size, (void *)d_page->vaddr, dma); 291 292 kfree(d_page); 293 d_page = NULL; 294 } 295 static struct dma_page *__ttm_dma_alloc_page(struct dma_pool *pool) 296 { 297 struct dma_page *d_page; 298 unsigned long attrs = 0; 299 void *vaddr; 300 301 d_page = kmalloc(sizeof(struct dma_page), GFP_KERNEL); 302 if (!d_page) 303 return NULL; 304 305 if (pool->type & IS_HUGE) 306 attrs = DMA_ATTR_NO_WARN; 307 308 vaddr = dma_alloc_attrs(pool->dev, pool->size, &d_page->dma, 309 pool->gfp_flags, attrs); 310 if (vaddr) { 311 if (is_vmalloc_addr(vaddr)) 312 d_page->p = vmalloc_to_page(vaddr); 313 else 314 d_page->p = virt_to_page(vaddr); 315 d_page->vaddr = (unsigned long)vaddr; 316 if (pool->type & IS_HUGE) 317 d_page->vaddr |= VADDR_FLAG_HUGE_POOL; 318 } else { 319 kfree(d_page); 320 d_page = NULL; 321 } 322 return d_page; 323 } 324 static enum pool_type ttm_to_type(int flags, enum ttm_caching_state cstate) 325 { 326 enum pool_type type = IS_UNDEFINED; 327 328 if (flags & TTM_PAGE_FLAG_DMA32) 329 type |= IS_DMA32; 330 if (cstate == tt_cached) 331 type |= IS_CACHED; 332 else if (cstate == tt_uncached) 333 type |= IS_UC; 334 else 335 type |= IS_WC; 336 337 return type; 338 } 339 340 static void ttm_pool_update_free_locked(struct dma_pool *pool, 341 unsigned freed_pages) 342 { 343 pool->npages_free -= freed_pages; 344 pool->nfrees += freed_pages; 345 346 } 347 348 /* set memory back to wb and free the pages. */ 349 static void ttm_dma_page_put(struct dma_pool *pool, struct dma_page *d_page) 350 { 351 struct page *page = d_page->p; 352 unsigned num_pages; 353 354 /* Don't set WB on WB page pool. */ 355 if (!(pool->type & IS_CACHED)) { 356 num_pages = pool->size / PAGE_SIZE; 357 if (ttm_set_pages_wb(page, num_pages)) 358 pr_err("%s: Failed to set %d pages to wb!\n", 359 pool->dev_name, num_pages); 360 } 361 362 list_del(&d_page->page_list); 363 __ttm_dma_free_page(pool, d_page); 364 } 365 366 static void ttm_dma_pages_put(struct dma_pool *pool, struct list_head *d_pages, 367 struct page *pages[], unsigned npages) 368 { 369 struct dma_page *d_page, *tmp; 370 371 if (pool->type & IS_HUGE) { 372 list_for_each_entry_safe(d_page, tmp, d_pages, page_list) 373 ttm_dma_page_put(pool, d_page); 374 375 return; 376 } 377 378 /* Don't set WB on WB page pool. */ 379 if (npages && !(pool->type & IS_CACHED) && 380 ttm_set_pages_array_wb(pages, npages)) 381 pr_err("%s: Failed to set %d pages to wb!\n", 382 pool->dev_name, npages); 383 384 list_for_each_entry_safe(d_page, tmp, d_pages, page_list) { 385 list_del(&d_page->page_list); 386 __ttm_dma_free_page(pool, d_page); 387 } 388 } 389 390 /* 391 * Free pages from pool. 392 * 393 * To prevent hogging the ttm_swap process we only free NUM_PAGES_TO_ALLOC 394 * number of pages in one go. 395 * 396 * @pool: to free the pages from 397 * @nr_free: If set to true will free all pages in pool 398 * @use_static: Safe to use static buffer 399 **/ 400 static unsigned ttm_dma_page_pool_free(struct dma_pool *pool, unsigned nr_free, 401 bool use_static) 402 { 403 static struct page *static_buf[NUM_PAGES_TO_ALLOC]; 404 unsigned long irq_flags; 405 struct dma_page *dma_p, *tmp; 406 struct page **pages_to_free; 407 struct list_head d_pages; 408 unsigned freed_pages = 0, 409 npages_to_free = nr_free; 410 411 if (NUM_PAGES_TO_ALLOC < nr_free) 412 npages_to_free = NUM_PAGES_TO_ALLOC; 413 #if 0 414 if (nr_free > 1) { 415 pr_debug("%s: (%s:%d) Attempting to free %d (%d) pages\n", 416 pool->dev_name, pool->name, current->pid, 417 npages_to_free, nr_free); 418 } 419 #endif 420 if (use_static) 421 pages_to_free = static_buf; 422 else 423 pages_to_free = kmalloc_array(npages_to_free, 424 sizeof(struct page *), 425 GFP_KERNEL); 426 427 if (!pages_to_free) { 428 pr_debug("%s: Failed to allocate memory for pool free operation\n", 429 pool->dev_name); 430 return 0; 431 } 432 INIT_LIST_HEAD(&d_pages); 433 restart: 434 spin_lock_irqsave(&pool->lock, irq_flags); 435 436 /* We picking the oldest ones off the list */ 437 list_for_each_entry_safe_reverse(dma_p, tmp, &pool->free_list, 438 page_list) { 439 if (freed_pages >= npages_to_free) 440 break; 441 442 /* Move the dma_page from one list to another. */ 443 list_move(&dma_p->page_list, &d_pages); 444 445 pages_to_free[freed_pages++] = dma_p->p; 446 /* We can only remove NUM_PAGES_TO_ALLOC at a time. */ 447 if (freed_pages >= NUM_PAGES_TO_ALLOC) { 448 449 ttm_pool_update_free_locked(pool, freed_pages); 450 /** 451 * Because changing page caching is costly 452 * we unlock the pool to prevent stalling. 453 */ 454 spin_unlock_irqrestore(&pool->lock, irq_flags); 455 456 ttm_dma_pages_put(pool, &d_pages, pages_to_free, 457 freed_pages); 458 459 INIT_LIST_HEAD(&d_pages); 460 461 if (likely(nr_free != FREE_ALL_PAGES)) 462 nr_free -= freed_pages; 463 464 if (NUM_PAGES_TO_ALLOC >= nr_free) 465 npages_to_free = nr_free; 466 else 467 npages_to_free = NUM_PAGES_TO_ALLOC; 468 469 freed_pages = 0; 470 471 /* free all so restart the processing */ 472 if (nr_free) 473 goto restart; 474 475 /* Not allowed to fall through or break because 476 * following context is inside spinlock while we are 477 * outside here. 478 */ 479 goto out; 480 481 } 482 } 483 484 /* remove range of pages from the pool */ 485 if (freed_pages) { 486 ttm_pool_update_free_locked(pool, freed_pages); 487 nr_free -= freed_pages; 488 } 489 490 spin_unlock_irqrestore(&pool->lock, irq_flags); 491 492 if (freed_pages) 493 ttm_dma_pages_put(pool, &d_pages, pages_to_free, freed_pages); 494 out: 495 if (pages_to_free != static_buf) 496 kfree(pages_to_free); 497 return nr_free; 498 } 499 500 static void ttm_dma_free_pool(struct device *dev, enum pool_type type) 501 { 502 struct device_pools *p; 503 struct dma_pool *pool; 504 505 if (!dev) 506 return; 507 508 mutex_lock(&_manager->lock); 509 list_for_each_entry_reverse(p, &_manager->pools, pools) { 510 if (p->dev != dev) 511 continue; 512 pool = p->pool; 513 if (pool->type != type) 514 continue; 515 516 list_del(&p->pools); 517 kfree(p); 518 _manager->npools--; 519 break; 520 } 521 list_for_each_entry_reverse(pool, &dev->dma_pools, pools) { 522 if (pool->type != type) 523 continue; 524 /* Takes a spinlock.. */ 525 /* OK to use static buffer since global mutex is held. */ 526 ttm_dma_page_pool_free(pool, FREE_ALL_PAGES, true); 527 WARN_ON(((pool->npages_in_use + pool->npages_free) != 0)); 528 /* This code path is called after _all_ references to the 529 * struct device has been dropped - so nobody should be 530 * touching it. In case somebody is trying to _add_ we are 531 * guarded by the mutex. */ 532 list_del(&pool->pools); 533 kfree(pool); 534 break; 535 } 536 mutex_unlock(&_manager->lock); 537 } 538 539 /* 540 * On free-ing of the 'struct device' this deconstructor is run. 541 * Albeit the pool might have already been freed earlier. 542 */ 543 static void ttm_dma_pool_release(struct device *dev, void *res) 544 { 545 struct dma_pool *pool = *(struct dma_pool **)res; 546 547 if (pool) 548 ttm_dma_free_pool(dev, pool->type); 549 } 550 551 static int ttm_dma_pool_match(struct device *dev, void *res, void *match_data) 552 { 553 return *(struct dma_pool **)res == match_data; 554 } 555 556 static struct dma_pool *ttm_dma_pool_init(struct device *dev, gfp_t flags, 557 enum pool_type type) 558 { 559 const char *n[] = {"wc", "uc", "cached", " dma32", "huge"}; 560 enum pool_type t[] = {IS_WC, IS_UC, IS_CACHED, IS_DMA32, IS_HUGE}; 561 struct device_pools *sec_pool = NULL; 562 struct dma_pool *pool = NULL, **ptr; 563 unsigned i; 564 int ret = -ENODEV; 565 char *p; 566 567 if (!dev) 568 return NULL; 569 570 ptr = devres_alloc(ttm_dma_pool_release, sizeof(*ptr), GFP_KERNEL); 571 if (!ptr) 572 return NULL; 573 574 ret = -ENOMEM; 575 576 pool = kmalloc_node(sizeof(struct dma_pool), GFP_KERNEL, 577 dev_to_node(dev)); 578 if (!pool) 579 goto err_mem; 580 581 sec_pool = kmalloc_node(sizeof(struct device_pools), GFP_KERNEL, 582 dev_to_node(dev)); 583 if (!sec_pool) 584 goto err_mem; 585 586 INIT_LIST_HEAD(&sec_pool->pools); 587 sec_pool->dev = dev; 588 sec_pool->pool = pool; 589 590 INIT_LIST_HEAD(&pool->free_list); 591 INIT_LIST_HEAD(&pool->pools); 592 spin_lock_init(&pool->lock); 593 pool->dev = dev; 594 pool->npages_free = pool->npages_in_use = 0; 595 pool->nfrees = 0; 596 pool->gfp_flags = flags; 597 if (type & IS_HUGE) 598 #ifdef CONFIG_TRANSPARENT_HUGEPAGE 599 pool->size = HPAGE_PMD_SIZE; 600 #else 601 BUG(); 602 #endif 603 else 604 pool->size = PAGE_SIZE; 605 pool->type = type; 606 pool->nrefills = 0; 607 p = pool->name; 608 for (i = 0; i < ARRAY_SIZE(t); i++) { 609 if (type & t[i]) { 610 p += snprintf(p, sizeof(pool->name) - (p - pool->name), 611 "%s", n[i]); 612 } 613 } 614 *p = 0; 615 /* We copy the name for pr_ calls b/c when dma_pool_destroy is called 616 * - the kobj->name has already been deallocated.*/ 617 snprintf(pool->dev_name, sizeof(pool->dev_name), "%s %s", 618 dev_driver_string(dev), dev_name(dev)); 619 mutex_lock(&_manager->lock); 620 /* You can get the dma_pool from either the global: */ 621 list_add(&sec_pool->pools, &_manager->pools); 622 _manager->npools++; 623 /* or from 'struct device': */ 624 list_add(&pool->pools, &dev->dma_pools); 625 mutex_unlock(&_manager->lock); 626 627 *ptr = pool; 628 devres_add(dev, ptr); 629 630 return pool; 631 err_mem: 632 devres_free(ptr); 633 kfree(sec_pool); 634 kfree(pool); 635 return ERR_PTR(ret); 636 } 637 638 static struct dma_pool *ttm_dma_find_pool(struct device *dev, 639 enum pool_type type) 640 { 641 struct dma_pool *pool, *tmp; 642 643 if (type == IS_UNDEFINED) 644 return NULL; 645 646 /* NB: We iterate on the 'struct dev' which has no spinlock, but 647 * it does have a kref which we have taken. The kref is taken during 648 * graphic driver loading - in the drm_pci_init it calls either 649 * pci_dev_get or pci_register_driver which both end up taking a kref 650 * on 'struct device'. 651 * 652 * On teardown, the graphic drivers end up quiescing the TTM (put_pages) 653 * and calls the dev_res deconstructors: ttm_dma_pool_release. The nice 654 * thing is at that point of time there are no pages associated with the 655 * driver so this function will not be called. 656 */ 657 list_for_each_entry_safe(pool, tmp, &dev->dma_pools, pools) 658 if (pool->type == type) 659 return pool; 660 return NULL; 661 } 662 663 /* 664 * Free pages the pages that failed to change the caching state. If there 665 * are pages that have changed their caching state already put them to the 666 * pool. 667 */ 668 static void ttm_dma_handle_caching_state_failure(struct dma_pool *pool, 669 struct list_head *d_pages, 670 struct page **failed_pages, 671 unsigned cpages) 672 { 673 struct dma_page *d_page, *tmp; 674 struct page *p; 675 unsigned i = 0; 676 677 p = failed_pages[0]; 678 if (!p) 679 return; 680 /* Find the failed page. */ 681 list_for_each_entry_safe(d_page, tmp, d_pages, page_list) { 682 if (d_page->p != p) 683 continue; 684 /* .. and then progress over the full list. */ 685 list_del(&d_page->page_list); 686 __ttm_dma_free_page(pool, d_page); 687 if (++i < cpages) 688 p = failed_pages[i]; 689 else 690 break; 691 } 692 693 } 694 695 /* 696 * Allocate 'count' pages, and put 'need' number of them on the 697 * 'pages' and as well on the 'dma_address' starting at 'dma_offset' offset. 698 * The full list of pages should also be on 'd_pages'. 699 * We return zero for success, and negative numbers as errors. 700 */ 701 static int ttm_dma_pool_alloc_new_pages(struct dma_pool *pool, 702 struct list_head *d_pages, 703 unsigned count) 704 { 705 struct page **caching_array; 706 struct dma_page *dma_p; 707 struct page *p; 708 int r = 0; 709 unsigned i, j, npages, cpages; 710 unsigned max_cpages = min(count, 711 (unsigned)(PAGE_SIZE/sizeof(struct page *))); 712 713 /* allocate array for page caching change */ 714 caching_array = kmalloc_array(max_cpages, sizeof(struct page *), 715 GFP_KERNEL); 716 717 if (!caching_array) { 718 pr_debug("%s: Unable to allocate table for new pages\n", 719 pool->dev_name); 720 return -ENOMEM; 721 } 722 723 if (count > 1) 724 pr_debug("%s: (%s:%d) Getting %d pages\n", 725 pool->dev_name, pool->name, current->pid, count); 726 727 for (i = 0, cpages = 0; i < count; ++i) { 728 dma_p = __ttm_dma_alloc_page(pool); 729 if (!dma_p) { 730 pr_debug("%s: Unable to get page %u\n", 731 pool->dev_name, i); 732 733 /* store already allocated pages in the pool after 734 * setting the caching state */ 735 if (cpages) { 736 r = ttm_set_pages_caching(pool, caching_array, 737 cpages); 738 if (r) 739 ttm_dma_handle_caching_state_failure( 740 pool, d_pages, caching_array, 741 cpages); 742 } 743 r = -ENOMEM; 744 goto out; 745 } 746 p = dma_p->p; 747 list_add(&dma_p->page_list, d_pages); 748 749 #ifdef CONFIG_HIGHMEM 750 /* gfp flags of highmem page should never be dma32 so we 751 * we should be fine in such case 752 */ 753 if (PageHighMem(p)) 754 continue; 755 #endif 756 757 npages = pool->size / PAGE_SIZE; 758 for (j = 0; j < npages; ++j) { 759 caching_array[cpages++] = p + j; 760 if (cpages == max_cpages) { 761 /* Note: Cannot hold the spinlock */ 762 r = ttm_set_pages_caching(pool, caching_array, 763 cpages); 764 if (r) { 765 ttm_dma_handle_caching_state_failure( 766 pool, d_pages, caching_array, 767 cpages); 768 goto out; 769 } 770 cpages = 0; 771 } 772 } 773 } 774 775 if (cpages) { 776 r = ttm_set_pages_caching(pool, caching_array, cpages); 777 if (r) 778 ttm_dma_handle_caching_state_failure(pool, d_pages, 779 caching_array, cpages); 780 } 781 out: 782 kfree(caching_array); 783 return r; 784 } 785 786 /* 787 * @return count of pages still required to fulfill the request. 788 */ 789 static int ttm_dma_page_pool_fill_locked(struct dma_pool *pool, 790 unsigned long *irq_flags) 791 { 792 unsigned count = _manager->options.small; 793 int r = pool->npages_free; 794 795 if (count > pool->npages_free) { 796 struct list_head d_pages; 797 798 INIT_LIST_HEAD(&d_pages); 799 800 spin_unlock_irqrestore(&pool->lock, *irq_flags); 801 802 /* Returns how many more are neccessary to fulfill the 803 * request. */ 804 r = ttm_dma_pool_alloc_new_pages(pool, &d_pages, count); 805 806 spin_lock_irqsave(&pool->lock, *irq_flags); 807 if (!r) { 808 /* Add the fresh to the end.. */ 809 list_splice(&d_pages, &pool->free_list); 810 ++pool->nrefills; 811 pool->npages_free += count; 812 r = count; 813 } else { 814 struct dma_page *d_page; 815 unsigned cpages = 0; 816 817 pr_debug("%s: Failed to fill %s pool (r:%d)!\n", 818 pool->dev_name, pool->name, r); 819 820 list_for_each_entry(d_page, &d_pages, page_list) { 821 cpages++; 822 } 823 list_splice_tail(&d_pages, &pool->free_list); 824 pool->npages_free += cpages; 825 r = cpages; 826 } 827 } 828 return r; 829 } 830 831 /* 832 * The populate list is actually a stack (not that is matters as TTM 833 * allocates one page at a time. 834 * return dma_page pointer if success, otherwise NULL. 835 */ 836 static struct dma_page *ttm_dma_pool_get_pages(struct dma_pool *pool, 837 struct ttm_dma_tt *ttm_dma, 838 unsigned index) 839 { 840 struct dma_page *d_page = NULL; 841 struct ttm_tt *ttm = &ttm_dma->ttm; 842 unsigned long irq_flags; 843 int count; 844 845 spin_lock_irqsave(&pool->lock, irq_flags); 846 count = ttm_dma_page_pool_fill_locked(pool, &irq_flags); 847 if (count) { 848 d_page = list_first_entry(&pool->free_list, struct dma_page, page_list); 849 ttm->pages[index] = d_page->p; 850 ttm_dma->dma_address[index] = d_page->dma; 851 list_move_tail(&d_page->page_list, &ttm_dma->pages_list); 852 pool->npages_in_use += 1; 853 pool->npages_free -= 1; 854 } 855 spin_unlock_irqrestore(&pool->lock, irq_flags); 856 return d_page; 857 } 858 859 static gfp_t ttm_dma_pool_gfp_flags(struct ttm_dma_tt *ttm_dma, bool huge) 860 { 861 struct ttm_tt *ttm = &ttm_dma->ttm; 862 gfp_t gfp_flags; 863 864 if (ttm->page_flags & TTM_PAGE_FLAG_DMA32) 865 gfp_flags = GFP_USER | GFP_DMA32; 866 else 867 gfp_flags = GFP_HIGHUSER; 868 if (ttm->page_flags & TTM_PAGE_FLAG_ZERO_ALLOC) 869 gfp_flags |= __GFP_ZERO; 870 871 if (huge) { 872 gfp_flags |= GFP_TRANSHUGE_LIGHT | __GFP_NORETRY | 873 __GFP_KSWAPD_RECLAIM; 874 gfp_flags &= ~__GFP_MOVABLE; 875 gfp_flags &= ~__GFP_COMP; 876 } 877 878 if (ttm->page_flags & TTM_PAGE_FLAG_NO_RETRY) 879 gfp_flags |= __GFP_RETRY_MAYFAIL; 880 881 return gfp_flags; 882 } 883 884 /* 885 * On success pages list will hold count number of correctly 886 * cached pages. On failure will hold the negative return value (-ENOMEM, etc). 887 */ 888 int ttm_dma_populate(struct ttm_dma_tt *ttm_dma, struct device *dev, 889 struct ttm_operation_ctx *ctx) 890 { 891 struct ttm_tt *ttm = &ttm_dma->ttm; 892 struct ttm_mem_global *mem_glob = ttm->bdev->glob->mem_glob; 893 unsigned long num_pages = ttm->num_pages; 894 struct dma_pool *pool; 895 struct dma_page *d_page; 896 enum pool_type type; 897 unsigned i; 898 int ret; 899 900 if (ttm->state != tt_unpopulated) 901 return 0; 902 903 if (ttm_check_under_lowerlimit(mem_glob, num_pages, ctx)) 904 return -ENOMEM; 905 906 INIT_LIST_HEAD(&ttm_dma->pages_list); 907 i = 0; 908 909 type = ttm_to_type(ttm->page_flags, ttm->caching_state); 910 911 #ifdef CONFIG_TRANSPARENT_HUGEPAGE 912 if (ttm->page_flags & TTM_PAGE_FLAG_DMA32) 913 goto skip_huge; 914 915 pool = ttm_dma_find_pool(dev, type | IS_HUGE); 916 if (!pool) { 917 gfp_t gfp_flags = ttm_dma_pool_gfp_flags(ttm_dma, true); 918 919 pool = ttm_dma_pool_init(dev, gfp_flags, type | IS_HUGE); 920 if (IS_ERR_OR_NULL(pool)) 921 goto skip_huge; 922 } 923 924 while (num_pages >= HPAGE_PMD_NR) { 925 unsigned j; 926 927 d_page = ttm_dma_pool_get_pages(pool, ttm_dma, i); 928 if (!d_page) 929 break; 930 931 ret = ttm_mem_global_alloc_page(mem_glob, ttm->pages[i], 932 pool->size, ctx); 933 if (unlikely(ret != 0)) { 934 ttm_dma_unpopulate(ttm_dma, dev); 935 return -ENOMEM; 936 } 937 938 d_page->vaddr |= VADDR_FLAG_UPDATED_COUNT; 939 for (j = i + 1; j < (i + HPAGE_PMD_NR); ++j) { 940 ttm->pages[j] = ttm->pages[j - 1] + 1; 941 ttm_dma->dma_address[j] = ttm_dma->dma_address[j - 1] + 942 PAGE_SIZE; 943 } 944 945 i += HPAGE_PMD_NR; 946 num_pages -= HPAGE_PMD_NR; 947 } 948 949 skip_huge: 950 #endif 951 952 pool = ttm_dma_find_pool(dev, type); 953 if (!pool) { 954 gfp_t gfp_flags = ttm_dma_pool_gfp_flags(ttm_dma, false); 955 956 pool = ttm_dma_pool_init(dev, gfp_flags, type); 957 if (IS_ERR_OR_NULL(pool)) 958 return -ENOMEM; 959 } 960 961 while (num_pages) { 962 d_page = ttm_dma_pool_get_pages(pool, ttm_dma, i); 963 if (!d_page) { 964 ttm_dma_unpopulate(ttm_dma, dev); 965 return -ENOMEM; 966 } 967 968 ret = ttm_mem_global_alloc_page(mem_glob, ttm->pages[i], 969 pool->size, ctx); 970 if (unlikely(ret != 0)) { 971 ttm_dma_unpopulate(ttm_dma, dev); 972 return -ENOMEM; 973 } 974 975 d_page->vaddr |= VADDR_FLAG_UPDATED_COUNT; 976 ++i; 977 --num_pages; 978 } 979 980 if (unlikely(ttm->page_flags & TTM_PAGE_FLAG_SWAPPED)) { 981 ret = ttm_tt_swapin(ttm); 982 if (unlikely(ret != 0)) { 983 ttm_dma_unpopulate(ttm_dma, dev); 984 return ret; 985 } 986 } 987 988 ttm->state = tt_unbound; 989 return 0; 990 } 991 EXPORT_SYMBOL_GPL(ttm_dma_populate); 992 993 /* Put all pages in pages list to correct pool to wait for reuse */ 994 void ttm_dma_unpopulate(struct ttm_dma_tt *ttm_dma, struct device *dev) 995 { 996 struct ttm_tt *ttm = &ttm_dma->ttm; 997 struct ttm_mem_global *mem_glob = ttm->bdev->glob->mem_glob; 998 struct dma_pool *pool; 999 struct dma_page *d_page, *next; 1000 enum pool_type type; 1001 bool is_cached = false; 1002 unsigned count, i, npages = 0; 1003 unsigned long irq_flags; 1004 1005 type = ttm_to_type(ttm->page_flags, ttm->caching_state); 1006 1007 #ifdef CONFIG_TRANSPARENT_HUGEPAGE 1008 pool = ttm_dma_find_pool(dev, type | IS_HUGE); 1009 if (pool) { 1010 count = 0; 1011 list_for_each_entry_safe(d_page, next, &ttm_dma->pages_list, 1012 page_list) { 1013 if (!(d_page->vaddr & VADDR_FLAG_HUGE_POOL)) 1014 continue; 1015 1016 count++; 1017 if (d_page->vaddr & VADDR_FLAG_UPDATED_COUNT) { 1018 ttm_mem_global_free_page(mem_glob, d_page->p, 1019 pool->size); 1020 d_page->vaddr &= ~VADDR_FLAG_UPDATED_COUNT; 1021 } 1022 ttm_dma_page_put(pool, d_page); 1023 } 1024 1025 spin_lock_irqsave(&pool->lock, irq_flags); 1026 pool->npages_in_use -= count; 1027 pool->nfrees += count; 1028 spin_unlock_irqrestore(&pool->lock, irq_flags); 1029 } 1030 #endif 1031 1032 pool = ttm_dma_find_pool(dev, type); 1033 if (!pool) 1034 return; 1035 1036 is_cached = (ttm_dma_find_pool(pool->dev, 1037 ttm_to_type(ttm->page_flags, tt_cached)) == pool); 1038 1039 /* make sure pages array match list and count number of pages */ 1040 count = 0; 1041 list_for_each_entry_safe(d_page, next, &ttm_dma->pages_list, 1042 page_list) { 1043 ttm->pages[count] = d_page->p; 1044 count++; 1045 1046 if (d_page->vaddr & VADDR_FLAG_UPDATED_COUNT) { 1047 ttm_mem_global_free_page(mem_glob, d_page->p, 1048 pool->size); 1049 d_page->vaddr &= ~VADDR_FLAG_UPDATED_COUNT; 1050 } 1051 1052 if (is_cached) 1053 ttm_dma_page_put(pool, d_page); 1054 } 1055 1056 spin_lock_irqsave(&pool->lock, irq_flags); 1057 pool->npages_in_use -= count; 1058 if (is_cached) { 1059 pool->nfrees += count; 1060 } else { 1061 pool->npages_free += count; 1062 list_splice(&ttm_dma->pages_list, &pool->free_list); 1063 /* 1064 * Wait to have at at least NUM_PAGES_TO_ALLOC number of pages 1065 * to free in order to minimize calls to set_memory_wb(). 1066 */ 1067 if (pool->npages_free >= (_manager->options.max_size + 1068 NUM_PAGES_TO_ALLOC)) 1069 npages = pool->npages_free - _manager->options.max_size; 1070 } 1071 spin_unlock_irqrestore(&pool->lock, irq_flags); 1072 1073 INIT_LIST_HEAD(&ttm_dma->pages_list); 1074 for (i = 0; i < ttm->num_pages; i++) { 1075 ttm->pages[i] = NULL; 1076 ttm_dma->dma_address[i] = 0; 1077 } 1078 1079 /* shrink pool if necessary (only on !is_cached pools)*/ 1080 if (npages) 1081 ttm_dma_page_pool_free(pool, npages, false); 1082 ttm->state = tt_unpopulated; 1083 } 1084 EXPORT_SYMBOL_GPL(ttm_dma_unpopulate); 1085 1086 /** 1087 * Callback for mm to request pool to reduce number of page held. 1088 * 1089 * XXX: (dchinner) Deadlock warning! 1090 * 1091 * I'm getting sadder as I hear more pathetical whimpers about needing per-pool 1092 * shrinkers 1093 */ 1094 static unsigned long 1095 ttm_dma_pool_shrink_scan(struct shrinker *shrink, struct shrink_control *sc) 1096 { 1097 static unsigned start_pool; 1098 unsigned idx = 0; 1099 unsigned pool_offset; 1100 unsigned shrink_pages = sc->nr_to_scan; 1101 struct device_pools *p; 1102 unsigned long freed = 0; 1103 1104 if (list_empty(&_manager->pools)) 1105 return SHRINK_STOP; 1106 1107 if (!mutex_trylock(&_manager->lock)) 1108 return SHRINK_STOP; 1109 if (!_manager->npools) 1110 goto out; 1111 pool_offset = ++start_pool % _manager->npools; 1112 list_for_each_entry(p, &_manager->pools, pools) { 1113 unsigned nr_free; 1114 1115 if (!p->dev) 1116 continue; 1117 if (shrink_pages == 0) 1118 break; 1119 /* Do it in round-robin fashion. */ 1120 if (++idx < pool_offset) 1121 continue; 1122 nr_free = shrink_pages; 1123 /* OK to use static buffer since global mutex is held. */ 1124 shrink_pages = ttm_dma_page_pool_free(p->pool, nr_free, true); 1125 freed += nr_free - shrink_pages; 1126 1127 pr_debug("%s: (%s:%d) Asked to shrink %d, have %d more to go\n", 1128 p->pool->dev_name, p->pool->name, current->pid, 1129 nr_free, shrink_pages); 1130 } 1131 out: 1132 mutex_unlock(&_manager->lock); 1133 return freed; 1134 } 1135 1136 static unsigned long 1137 ttm_dma_pool_shrink_count(struct shrinker *shrink, struct shrink_control *sc) 1138 { 1139 struct device_pools *p; 1140 unsigned long count = 0; 1141 1142 if (!mutex_trylock(&_manager->lock)) 1143 return 0; 1144 list_for_each_entry(p, &_manager->pools, pools) 1145 count += p->pool->npages_free; 1146 mutex_unlock(&_manager->lock); 1147 return count; 1148 } 1149 1150 static int ttm_dma_pool_mm_shrink_init(struct ttm_pool_manager *manager) 1151 { 1152 manager->mm_shrink.count_objects = ttm_dma_pool_shrink_count; 1153 manager->mm_shrink.scan_objects = &ttm_dma_pool_shrink_scan; 1154 manager->mm_shrink.seeks = 1; 1155 return register_shrinker(&manager->mm_shrink); 1156 } 1157 1158 static void ttm_dma_pool_mm_shrink_fini(struct ttm_pool_manager *manager) 1159 { 1160 unregister_shrinker(&manager->mm_shrink); 1161 } 1162 1163 int ttm_dma_page_alloc_init(struct ttm_mem_global *glob, unsigned max_pages) 1164 { 1165 int ret; 1166 1167 WARN_ON(_manager); 1168 1169 pr_info("Initializing DMA pool allocator\n"); 1170 1171 _manager = kzalloc(sizeof(*_manager), GFP_KERNEL); 1172 if (!_manager) 1173 return -ENOMEM; 1174 1175 mutex_init(&_manager->lock); 1176 INIT_LIST_HEAD(&_manager->pools); 1177 1178 _manager->options.max_size = max_pages; 1179 _manager->options.small = SMALL_ALLOCATION; 1180 _manager->options.alloc_size = NUM_PAGES_TO_ALLOC; 1181 1182 /* This takes care of auto-freeing the _manager */ 1183 ret = kobject_init_and_add(&_manager->kobj, &ttm_pool_kobj_type, 1184 &glob->kobj, "dma_pool"); 1185 if (unlikely(ret != 0)) 1186 goto error; 1187 1188 ret = ttm_dma_pool_mm_shrink_init(_manager); 1189 if (unlikely(ret != 0)) 1190 goto error; 1191 return 0; 1192 1193 error: 1194 kobject_put(&_manager->kobj); 1195 _manager = NULL; 1196 return ret; 1197 } 1198 1199 void ttm_dma_page_alloc_fini(void) 1200 { 1201 struct device_pools *p, *t; 1202 1203 pr_info("Finalizing DMA pool allocator\n"); 1204 ttm_dma_pool_mm_shrink_fini(_manager); 1205 1206 list_for_each_entry_safe_reverse(p, t, &_manager->pools, pools) { 1207 dev_dbg(p->dev, "(%s:%d) Freeing.\n", p->pool->name, 1208 current->pid); 1209 WARN_ON(devres_destroy(p->dev, ttm_dma_pool_release, 1210 ttm_dma_pool_match, p->pool)); 1211 ttm_dma_free_pool(p->dev, p->pool->type); 1212 } 1213 kobject_put(&_manager->kobj); 1214 _manager = NULL; 1215 } 1216 1217 int ttm_dma_page_alloc_debugfs(struct seq_file *m, void *data) 1218 { 1219 struct device_pools *p; 1220 struct dma_pool *pool = NULL; 1221 1222 if (!_manager) { 1223 seq_printf(m, "No pool allocator running.\n"); 1224 return 0; 1225 } 1226 seq_printf(m, " pool refills pages freed inuse available name\n"); 1227 mutex_lock(&_manager->lock); 1228 list_for_each_entry(p, &_manager->pools, pools) { 1229 struct device *dev = p->dev; 1230 if (!dev) 1231 continue; 1232 pool = p->pool; 1233 seq_printf(m, "%13s %12ld %13ld %8d %8d %8s\n", 1234 pool->name, pool->nrefills, 1235 pool->nfrees, pool->npages_in_use, 1236 pool->npages_free, 1237 pool->dev_name); 1238 } 1239 mutex_unlock(&_manager->lock); 1240 return 0; 1241 } 1242 EXPORT_SYMBOL_GPL(ttm_dma_page_alloc_debugfs); 1243 1244 #endif 1245