1 /* 2 * Copyright (c) Red Hat Inc. 3 4 * Permission is hereby granted, free of charge, to any person obtaining a 5 * copy of this software and associated documentation files (the "Software"), 6 * to deal in the Software without restriction, including without limitation 7 * the rights to use, copy, modify, merge, publish, distribute, sub license, 8 * and/or sell copies of the Software, and to permit persons to whom the 9 * Software is furnished to do so, subject to the following conditions: 10 * 11 * The above copyright notice and this permission notice (including the 12 * next paragraph) shall be included in all copies or substantial portions 13 * of the Software. 14 * 15 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR 16 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, 17 * FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT. IN NO EVENT SHALL 18 * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER 19 * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING 20 * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER 21 * DEALINGS IN THE SOFTWARE. 22 * 23 * Authors: Dave Airlie <airlied@redhat.com> 24 * Jerome Glisse <jglisse@redhat.com> 25 * Pauli Nieminen <suokkos@gmail.com> 26 */ 27 /* 28 * Copyright (c) 2013 The FreeBSD Foundation 29 * All rights reserved. 30 * 31 * Portions of this software were developed by Konstantin Belousov 32 * <kib@FreeBSD.org> under sponsorship from the FreeBSD Foundation. 33 */ 34 35 /* simple list based uncached page pool 36 * - Pool collects resently freed pages for reuse 37 * - Use page->lru to keep a free list 38 * - doesn't track currently in use pages 39 */ 40 41 #define pr_fmt(fmt) "[TTM] " fmt 42 43 #include <linux/list.h> 44 #include <linux/spinlock.h> 45 #include <linux/highmem.h> 46 #include <linux/mm_types.h> 47 #include <linux/module.h> 48 #include <linux/mm.h> 49 #include <linux/seq_file.h> /* for seq_printf */ 50 #include <linux/dma-mapping.h> 51 52 #include <linux/atomic.h> 53 54 #include <drm/ttm/ttm_bo_driver.h> 55 #include <drm/ttm/ttm_page_alloc.h> 56 57 #include <sys/eventhandler.h> 58 #include <vm/vm_page2.h> 59 60 #ifdef TTM_HAS_AGP 61 #include <asm/agp.h> 62 #endif 63 64 #define NUM_PAGES_TO_ALLOC (PAGE_SIZE/sizeof(struct page *)) 65 #define SMALL_ALLOCATION 16 66 #define FREE_ALL_PAGES (~0U) 67 /* times are in msecs */ 68 #define PAGE_FREE_INTERVAL 1000 69 70 /** 71 * struct ttm_page_pool - Pool to reuse recently allocated uc/wc pages. 72 * 73 * @lock: Protects the shared pool from concurrnet access. Must be used with 74 * irqsave/irqrestore variants because pool allocator maybe called from 75 * delayed work. 76 * @fill_lock: Prevent concurrent calls to fill. 77 * @list: Pool of free uc/wc pages for fast reuse. 78 * @gfp_flags: Flags to pass for alloc_page. 79 * @npages: Number of pages in pool. 80 */ 81 struct ttm_page_pool { 82 struct lock lock; 83 bool fill_lock; 84 struct pglist list; 85 gfp_t gfp_flags; 86 unsigned npages; 87 char *name; 88 unsigned long nfrees; 89 unsigned long nrefills; 90 }; 91 92 /** 93 * Limits for the pool. They are handled without locks because only place where 94 * they may change is in sysfs store. They won't have immediate effect anyway 95 * so forcing serialization to access them is pointless. 96 */ 97 98 struct ttm_pool_opts { 99 unsigned alloc_size; 100 unsigned max_size; 101 unsigned small; 102 }; 103 104 #define NUM_POOLS 4 105 106 /** 107 * struct ttm_pool_manager - Holds memory pools for fst allocation 108 * 109 * Manager is read only object for pool code so it doesn't need locking. 110 * 111 * @free_interval: minimum number of jiffies between freeing pages from pool. 112 * @page_alloc_inited: reference counting for pool allocation. 113 * @work: Work that is used to shrink the pool. Work is only run when there is 114 * some pages to free. 115 * @small_allocation: Limit in number of pages what is small allocation. 116 * 117 * @pools: All pool objects in use. 118 **/ 119 struct ttm_pool_manager { 120 struct kobject kobj; 121 struct shrinker mm_shrink; 122 eventhandler_tag lowmem_handler; 123 struct ttm_pool_opts options; 124 125 union { 126 struct ttm_page_pool pools[NUM_POOLS]; 127 struct { 128 struct ttm_page_pool wc_pool; 129 struct ttm_page_pool uc_pool; 130 struct ttm_page_pool wc_pool_dma32; 131 struct ttm_page_pool uc_pool_dma32; 132 } ; 133 }; 134 }; 135 136 static struct attribute ttm_page_pool_max = { 137 .name = "pool_max_size", 138 .mode = S_IRUGO | S_IWUSR 139 }; 140 static struct attribute ttm_page_pool_small = { 141 .name = "pool_small_allocation", 142 .mode = S_IRUGO | S_IWUSR 143 }; 144 static struct attribute ttm_page_pool_alloc_size = { 145 .name = "pool_allocation_size", 146 .mode = S_IRUGO | S_IWUSR 147 }; 148 149 static struct attribute *ttm_pool_attrs[] = { 150 &ttm_page_pool_max, 151 &ttm_page_pool_small, 152 &ttm_page_pool_alloc_size, 153 NULL 154 }; 155 156 static void ttm_pool_kobj_release(struct kobject *kobj) 157 { 158 struct ttm_pool_manager *m = 159 container_of(kobj, struct ttm_pool_manager, kobj); 160 kfree(m); 161 } 162 163 static ssize_t ttm_pool_store(struct kobject *kobj, 164 struct attribute *attr, const char *buffer, size_t size) 165 { 166 struct ttm_pool_manager *m = 167 container_of(kobj, struct ttm_pool_manager, kobj); 168 int chars; 169 unsigned val; 170 chars = ksscanf(buffer, "%u", &val); 171 if (chars == 0) 172 return size; 173 174 /* Convert kb to number of pages */ 175 val = val / (PAGE_SIZE >> 10); 176 177 if (attr == &ttm_page_pool_max) 178 m->options.max_size = val; 179 else if (attr == &ttm_page_pool_small) 180 m->options.small = val; 181 else if (attr == &ttm_page_pool_alloc_size) { 182 if (val > NUM_PAGES_TO_ALLOC*8) { 183 pr_err("Setting allocation size to %lu is not allowed. Recommended size is %lu\n", 184 NUM_PAGES_TO_ALLOC*(PAGE_SIZE >> 7), 185 NUM_PAGES_TO_ALLOC*(PAGE_SIZE >> 10)); 186 return size; 187 } else if (val > NUM_PAGES_TO_ALLOC) { 188 pr_warn("Setting allocation size to larger than %lu is not recommended\n", 189 NUM_PAGES_TO_ALLOC*(PAGE_SIZE >> 10)); 190 } 191 m->options.alloc_size = val; 192 } 193 194 return size; 195 } 196 197 static ssize_t ttm_pool_show(struct kobject *kobj, 198 struct attribute *attr, char *buffer) 199 { 200 struct ttm_pool_manager *m = 201 container_of(kobj, struct ttm_pool_manager, kobj); 202 unsigned val = 0; 203 204 if (attr == &ttm_page_pool_max) 205 val = m->options.max_size; 206 else if (attr == &ttm_page_pool_small) 207 val = m->options.small; 208 else if (attr == &ttm_page_pool_alloc_size) 209 val = m->options.alloc_size; 210 211 val = val * (PAGE_SIZE >> 10); 212 213 return ksnprintf(buffer, PAGE_SIZE, "%u\n", val); 214 } 215 216 static const struct sysfs_ops ttm_pool_sysfs_ops = { 217 .show = &ttm_pool_show, 218 .store = &ttm_pool_store, 219 }; 220 221 static struct kobj_type ttm_pool_kobj_type = { 222 .release = &ttm_pool_kobj_release, 223 .sysfs_ops = &ttm_pool_sysfs_ops, 224 .default_attrs = ttm_pool_attrs, 225 }; 226 227 static struct ttm_pool_manager *_manager; 228 229 #ifndef CONFIG_X86 230 static int set_pages_array_wb(struct page **pages, int addrinarray) 231 { 232 #ifdef TTM_HAS_AGP 233 int i; 234 235 for (i = 0; i < addrinarray; i++) 236 unmap_page_from_agp(pages[i]); 237 #endif 238 return 0; 239 } 240 241 static int set_pages_array_wc(struct page **pages, int addrinarray) 242 { 243 #ifdef TTM_HAS_AGP 244 int i; 245 246 for (i = 0; i < addrinarray; i++) 247 map_page_into_agp(pages[i]); 248 #endif 249 return 0; 250 } 251 252 static int set_pages_array_uc(struct page **pages, int addrinarray) 253 { 254 #ifdef TTM_HAS_AGP 255 int i; 256 257 for (i = 0; i < addrinarray; i++) 258 map_page_into_agp(pages[i]); 259 #endif 260 return 0; 261 } 262 #endif 263 264 /** 265 * Select the right pool or requested caching state and ttm flags. */ 266 static struct ttm_page_pool *ttm_get_pool(int flags, 267 enum ttm_caching_state cstate) 268 { 269 int pool_index; 270 271 if (cstate == tt_cached) 272 return NULL; 273 274 if (cstate == tt_wc) 275 pool_index = 0x0; 276 else 277 pool_index = 0x1; 278 279 if (flags & TTM_PAGE_FLAG_DMA32) 280 pool_index |= 0x2; 281 282 return &_manager->pools[pool_index]; 283 } 284 285 /* set memory back to wb and free the pages. */ 286 static void ttm_pages_put(struct page *pages[], unsigned npages) 287 { 288 unsigned i; 289 if (set_pages_array_wb(pages, npages)) 290 pr_err("Failed to set %d pages to wb!\n", npages); 291 for (i = 0; i < npages; ++i) { 292 __free_page(pages[i]); 293 } 294 } 295 296 static void ttm_pool_update_free_locked(struct ttm_page_pool *pool, 297 unsigned freed_pages) 298 { 299 pool->npages -= freed_pages; 300 pool->nfrees += freed_pages; 301 } 302 303 /** 304 * Free pages from pool. 305 * 306 * To prevent hogging the ttm_swap process we only free NUM_PAGES_TO_ALLOC 307 * number of pages in one go. 308 * 309 * @pool: to free the pages from 310 * @free_all: If set to true will free all pages in pool 311 * @gfp: GFP flags. 312 **/ 313 static int ttm_page_pool_free(struct ttm_page_pool *pool, unsigned nr_free, 314 gfp_t gfp) 315 { 316 unsigned long irq_flags; 317 struct vm_page *p, *p1; 318 struct page **pages_to_free; 319 unsigned freed_pages = 0, 320 npages_to_free = nr_free; 321 unsigned i; 322 323 if (NUM_PAGES_TO_ALLOC < nr_free) 324 npages_to_free = NUM_PAGES_TO_ALLOC; 325 326 pages_to_free = kmalloc(npages_to_free * sizeof(struct page *), M_DRM, gfp); 327 if (!pages_to_free) { 328 pr_err("Failed to allocate memory for pool free operation\n"); 329 return 0; 330 } 331 332 restart: 333 spin_lock_irqsave(&pool->lock, irq_flags); 334 335 TAILQ_FOREACH_REVERSE_MUTABLE(p, &pool->list, pglist, pageq, p1) { 336 if (freed_pages >= npages_to_free) 337 break; 338 339 pages_to_free[freed_pages++] = (struct page *)p; 340 /* We can only remove NUM_PAGES_TO_ALLOC at a time. */ 341 if (freed_pages >= NUM_PAGES_TO_ALLOC) { 342 /* remove range of pages from the pool */ 343 for (i = 0; i < freed_pages; i++) 344 TAILQ_REMOVE(&pool->list, (struct vm_page *)pages_to_free[i], pageq); 345 346 ttm_pool_update_free_locked(pool, freed_pages); 347 /** 348 * Because changing page caching is costly 349 * we unlock the pool to prevent stalling. 350 */ 351 spin_unlock_irqrestore(&pool->lock, irq_flags); 352 353 ttm_pages_put(pages_to_free, freed_pages); 354 if (likely(nr_free != FREE_ALL_PAGES)) 355 nr_free -= freed_pages; 356 357 if (NUM_PAGES_TO_ALLOC >= nr_free) 358 npages_to_free = nr_free; 359 else 360 npages_to_free = NUM_PAGES_TO_ALLOC; 361 362 freed_pages = 0; 363 364 /* free all so restart the processing */ 365 if (nr_free) 366 goto restart; 367 368 /* Not allowed to fall through or break because 369 * following context is inside spinlock while we are 370 * outside here. 371 */ 372 goto out; 373 374 } 375 } 376 377 /* remove range of pages from the pool */ 378 if (freed_pages) { 379 for (i = 0; i < freed_pages; i++) 380 TAILQ_REMOVE(&pool->list, (struct vm_page *)pages_to_free[i], pageq); 381 382 ttm_pool_update_free_locked(pool, freed_pages); 383 nr_free -= freed_pages; 384 } 385 386 spin_unlock_irqrestore(&pool->lock, irq_flags); 387 388 if (freed_pages) 389 ttm_pages_put(pages_to_free, freed_pages); 390 out: 391 kfree(pages_to_free); 392 return nr_free; 393 } 394 395 /** 396 * Callback for mm to request pool to reduce number of page held. 397 * 398 * XXX: (dchinner) Deadlock warning! 399 * 400 * We need to pass sc->gfp_mask to ttm_page_pool_free(). 401 * 402 * This code is crying out for a shrinker per pool.... 403 */ 404 static unsigned long 405 ttm_pool_shrink_scan(void *arg) 406 { 407 #ifdef __DragonFly__ 408 static struct shrink_control __sc; 409 struct shrink_control *sc = &__sc; 410 #endif 411 static DEFINE_MUTEX(lock); 412 static unsigned start_pool; 413 unsigned i; 414 unsigned pool_offset; 415 struct ttm_page_pool *pool; 416 int shrink_pages = 100; /* XXXKIB */ 417 unsigned long freed = 0; 418 419 #ifdef __DragonFly__ 420 sc->gfp_mask = M_WAITOK; 421 #endif 422 423 if (!mutex_trylock(&lock)) 424 return SHRINK_STOP; 425 pool_offset = ++start_pool % NUM_POOLS; 426 /* select start pool in round robin fashion */ 427 for (i = 0; i < NUM_POOLS; ++i) { 428 unsigned nr_free = shrink_pages; 429 if (shrink_pages == 0) 430 break; 431 pool = &_manager->pools[(i + pool_offset)%NUM_POOLS]; 432 shrink_pages = ttm_page_pool_free(pool, nr_free, 433 sc->gfp_mask); 434 freed += nr_free - shrink_pages; 435 } 436 mutex_unlock(&lock); 437 return freed; 438 } 439 440 441 static unsigned long 442 ttm_pool_shrink_count(struct shrinker *shrink, struct shrink_control *sc) 443 { 444 unsigned i; 445 unsigned long count = 0; 446 447 for (i = 0; i < NUM_POOLS; ++i) 448 count += _manager->pools[i].npages; 449 450 return count; 451 } 452 453 static void ttm_pool_mm_shrink_init(struct ttm_pool_manager *manager) 454 { 455 manager->mm_shrink.count_objects = ttm_pool_shrink_count; 456 manager->lowmem_handler = EVENTHANDLER_REGISTER(vm_lowmem, 457 ttm_pool_shrink_scan, manager, EVENTHANDLER_PRI_ANY); 458 } 459 460 static void ttm_pool_mm_shrink_fini(struct ttm_pool_manager *manager) 461 { 462 EVENTHANDLER_DEREGISTER(vm_lowmem, manager->lowmem_handler); 463 } 464 465 static int ttm_set_pages_caching(struct page **pages, 466 enum ttm_caching_state cstate, unsigned cpages) 467 { 468 int r = 0; 469 /* Set page caching */ 470 switch (cstate) { 471 case tt_uncached: 472 r = set_pages_array_uc(pages, cpages); 473 if (r) 474 pr_err("Failed to set %d pages to uc!\n", cpages); 475 break; 476 case tt_wc: 477 r = set_pages_array_wc(pages, cpages); 478 if (r) 479 pr_err("Failed to set %d pages to wc!\n", cpages); 480 break; 481 default: 482 break; 483 } 484 return r; 485 } 486 487 /** 488 * Free pages the pages that failed to change the caching state. If there is 489 * any pages that have changed their caching state already put them to the 490 * pool. 491 */ 492 static void ttm_handle_caching_state_failure(struct pglist *pages, 493 int ttm_flags, enum ttm_caching_state cstate, 494 struct page **failed_pages, unsigned cpages) 495 { 496 unsigned i; 497 /* Failed pages have to be freed */ 498 for (i = 0; i < cpages; ++i) { 499 TAILQ_REMOVE(pages, (struct vm_page *)failed_pages[i], pageq); 500 __free_page(failed_pages[i]); 501 } 502 } 503 504 /** 505 * Allocate new pages with correct caching. 506 * 507 * This function is reentrant if caller updates count depending on number of 508 * pages returned in pages array. 509 */ 510 static int ttm_alloc_new_pages(struct pglist *pages, gfp_t gfp_flags, 511 int ttm_flags, enum ttm_caching_state cstate, unsigned count) 512 { 513 struct page **caching_array; 514 struct page *p; 515 int r = 0; 516 unsigned i, cpages; 517 unsigned max_cpages = min(count, 518 (unsigned)(PAGE_SIZE/sizeof(struct page *))); 519 520 /* allocate array for page caching change */ 521 caching_array = kmalloc(max_cpages*sizeof(struct page *), M_DRM, M_WAITOK); 522 523 if (!caching_array) { 524 pr_err("Unable to allocate table for new pages\n"); 525 return -ENOMEM; 526 } 527 528 for (i = 0, cpages = 0; i < count; ++i) { 529 p = alloc_page(gfp_flags); 530 531 if (!p) { 532 pr_err("Unable to get page %u\n", i); 533 534 /* store already allocated pages in the pool after 535 * setting the caching state */ 536 if (cpages) { 537 r = ttm_set_pages_caching(caching_array, 538 cstate, cpages); 539 if (r) 540 ttm_handle_caching_state_failure(pages, 541 ttm_flags, cstate, 542 caching_array, cpages); 543 } 544 r = -ENOMEM; 545 goto out; 546 } 547 548 #ifdef CONFIG_HIGHMEM 549 /* gfp flags of highmem page should never be dma32 so we 550 * we should be fine in such case 551 */ 552 if (!PageHighMem(p)) 553 #endif 554 { 555 caching_array[cpages++] = p; 556 if (cpages == max_cpages) { 557 558 r = ttm_set_pages_caching(caching_array, 559 cstate, cpages); 560 if (r) { 561 ttm_handle_caching_state_failure(pages, 562 ttm_flags, cstate, 563 caching_array, cpages); 564 goto out; 565 } 566 cpages = 0; 567 } 568 } 569 570 TAILQ_INSERT_HEAD(pages, (struct vm_page *)p, pageq); 571 } 572 573 if (cpages) { 574 r = ttm_set_pages_caching(caching_array, cstate, cpages); 575 if (r) 576 ttm_handle_caching_state_failure(pages, 577 ttm_flags, cstate, 578 caching_array, cpages); 579 } 580 out: 581 kfree(caching_array); 582 583 return r; 584 } 585 586 /** 587 * Fill the given pool if there aren't enough pages and the requested number of 588 * pages is small. 589 */ 590 static void ttm_page_pool_fill_locked(struct ttm_page_pool *pool, 591 int ttm_flags, enum ttm_caching_state cstate, unsigned count, 592 unsigned long *irq_flags) 593 { 594 vm_page_t p; 595 int r; 596 unsigned cpages = 0; 597 /** 598 * Only allow one pool fill operation at a time. 599 * If pool doesn't have enough pages for the allocation new pages are 600 * allocated from outside of pool. 601 */ 602 if (pool->fill_lock) 603 return; 604 605 pool->fill_lock = true; 606 607 /* If allocation request is small and there are not enough 608 * pages in a pool we fill the pool up first. */ 609 if (count < _manager->options.small 610 && count > pool->npages) { 611 struct pglist new_pages; 612 unsigned alloc_size = _manager->options.alloc_size; 613 614 /** 615 * Can't change page caching if in irqsave context. We have to 616 * drop the pool->lock. 617 */ 618 spin_unlock_irqrestore(&pool->lock, *irq_flags); 619 620 TAILQ_INIT(&new_pages); 621 r = ttm_alloc_new_pages(&new_pages, pool->gfp_flags, ttm_flags, 622 cstate, alloc_size); 623 spin_lock_irqsave(&pool->lock, *irq_flags); 624 625 if (!r) { 626 TAILQ_CONCAT(&pool->list, &new_pages, pageq); 627 ++pool->nrefills; 628 pool->npages += alloc_size; 629 } else { 630 pr_err("Failed to fill pool (%p)\n", pool); 631 /* If we have any pages left put them to the pool. */ 632 TAILQ_FOREACH(p, &pool->list, pageq) { 633 ++cpages; 634 } 635 TAILQ_CONCAT(&pool->list, &new_pages, pageq); 636 pool->npages += cpages; 637 } 638 639 } 640 pool->fill_lock = false; 641 } 642 643 /** 644 * Cut 'count' number of pages from the pool and put them on the return list. 645 * 646 * @return count of pages still required to fulfill the request. 647 */ 648 static unsigned ttm_page_pool_get_pages(struct ttm_page_pool *pool, 649 struct pglist *pages, 650 int ttm_flags, 651 enum ttm_caching_state cstate, 652 unsigned count) 653 { 654 unsigned long irq_flags; 655 vm_page_t p; 656 unsigned i; 657 658 spin_lock_irqsave(&pool->lock, irq_flags); 659 ttm_page_pool_fill_locked(pool, ttm_flags, cstate, count, &irq_flags); 660 661 if (count >= pool->npages) { 662 /* take all pages from the pool */ 663 TAILQ_CONCAT(pages, &pool->list, pageq); 664 count -= pool->npages; 665 pool->npages = 0; 666 goto out; 667 } 668 for (i = 0; i < count; i++) { 669 p = TAILQ_FIRST(&pool->list); 670 TAILQ_REMOVE(&pool->list, p, pageq); 671 TAILQ_INSERT_TAIL(pages, p, pageq); 672 } 673 pool->npages -= count; 674 count = 0; 675 out: 676 spin_unlock_irqrestore(&pool->lock, irq_flags); 677 return count; 678 } 679 680 /* Put all pages in pages list to correct pool to wait for reuse */ 681 static void ttm_put_pages(struct page **pages, unsigned npages, int flags, 682 enum ttm_caching_state cstate) 683 { 684 unsigned long irq_flags; 685 struct ttm_page_pool *pool = ttm_get_pool(flags, cstate); 686 unsigned i; 687 struct vm_page *page; 688 689 if (pool == NULL) { 690 /* No pool for this memory type so free the pages */ 691 for (i = 0; i < npages; i++) { 692 if (pages[i]) { 693 #if 0 694 if (page_count(pages[i]) != 1) 695 pr_err("Erroneous page count. Leaking pages.\n"); 696 #endif 697 __free_page(pages[i]); 698 pages[i] = NULL; 699 } 700 } 701 return; 702 } 703 704 spin_lock_irqsave(&pool->lock, irq_flags); 705 for (i = 0; i < npages; i++) { 706 if (pages[i]) { 707 page = (struct vm_page *)pages[i]; 708 TAILQ_INSERT_TAIL(&pool->list, page, pageq); 709 pages[i] = NULL; 710 pool->npages++; 711 } 712 } 713 /* Check that we don't go over the pool limit */ 714 npages = 0; 715 if (pool->npages > _manager->options.max_size) { 716 npages = pool->npages - _manager->options.max_size; 717 /* free at least NUM_PAGES_TO_ALLOC number of pages 718 * to reduce calls to set_memory_wb */ 719 if (npages < NUM_PAGES_TO_ALLOC) 720 npages = NUM_PAGES_TO_ALLOC; 721 } 722 spin_unlock_irqrestore(&pool->lock, irq_flags); 723 if (npages) 724 ttm_page_pool_free(pool, npages, GFP_KERNEL); 725 } 726 727 /* 728 * On success pages list will hold count number of correctly 729 * cached pages. 730 */ 731 static int ttm_get_pages(struct page **pages, unsigned npages, int flags, 732 enum ttm_caching_state cstate) 733 { 734 struct ttm_page_pool *pool = ttm_get_pool(flags, cstate); 735 struct pglist plist; 736 struct vm_page *p = NULL; 737 gfp_t gfp_flags = GFP_USER; 738 unsigned count; 739 int r; 740 741 /* set zero flag for page allocation if required */ 742 if (flags & TTM_PAGE_FLAG_ZERO_ALLOC) 743 gfp_flags |= __GFP_ZERO; 744 745 /* No pool for cached pages */ 746 if (pool == NULL) { 747 if (flags & TTM_PAGE_FLAG_DMA32) 748 gfp_flags |= GFP_DMA32; 749 else 750 gfp_flags |= GFP_HIGHUSER; 751 752 for (r = 0; r < npages; ++r) { 753 p = (struct vm_page *)alloc_page(gfp_flags); 754 if (!p) { 755 756 pr_err("Unable to allocate page\n"); 757 return -ENOMEM; 758 } 759 pages[r] = (struct page *)p; 760 } 761 return 0; 762 } 763 764 /* combine zero flag to pool flags */ 765 gfp_flags |= pool->gfp_flags; 766 767 /* First we take pages from the pool */ 768 TAILQ_INIT(&plist); 769 npages = ttm_page_pool_get_pages(pool, &plist, flags, cstate, npages); 770 count = 0; 771 TAILQ_FOREACH(p, &plist, pageq) { 772 pages[count++] = (struct page *)p; 773 } 774 775 /* clear the pages coming from the pool if requested */ 776 if (flags & TTM_PAGE_FLAG_ZERO_ALLOC) { 777 TAILQ_FOREACH(p, &plist, pageq) { 778 pmap_zero_page(VM_PAGE_TO_PHYS(p)); 779 } 780 } 781 782 /* If pool didn't have enough pages allocate new one. */ 783 if (npages > 0) { 784 /* ttm_alloc_new_pages doesn't reference pool so we can run 785 * multiple requests in parallel. 786 **/ 787 TAILQ_INIT(&plist); 788 r = ttm_alloc_new_pages(&plist, gfp_flags, flags, cstate, npages); 789 TAILQ_FOREACH(p, &plist, pageq) { 790 pages[count++] = (struct page *)p; 791 } 792 if (r) { 793 /* If there is any pages in the list put them back to 794 * the pool. */ 795 pr_err("Failed to allocate extra pages for large request\n"); 796 ttm_put_pages(pages, count, flags, cstate); 797 return r; 798 } 799 } 800 801 return 0; 802 } 803 804 static void ttm_page_pool_init_locked(struct ttm_page_pool *pool, gfp_t flags, 805 char *name) 806 { 807 lockinit(&pool->lock, "ttmpool", 0, LK_CANRECURSE); 808 pool->fill_lock = false; 809 TAILQ_INIT(&pool->list); 810 pool->npages = pool->nfrees = 0; 811 pool->gfp_flags = flags; 812 pool->name = name; 813 } 814 815 int ttm_page_alloc_init(struct ttm_mem_global *glob, unsigned max_pages) 816 { 817 int ret; 818 819 WARN_ON(_manager); 820 821 pr_info("Initializing pool allocator\n"); 822 823 _manager = kzalloc(sizeof(*_manager), GFP_KERNEL); 824 825 ttm_page_pool_init_locked(&_manager->wc_pool, GFP_HIGHUSER, "wc"); 826 827 ttm_page_pool_init_locked(&_manager->uc_pool, GFP_HIGHUSER, "uc"); 828 829 ttm_page_pool_init_locked(&_manager->wc_pool_dma32, 830 GFP_USER | GFP_DMA32, "wc dma"); 831 832 ttm_page_pool_init_locked(&_manager->uc_pool_dma32, 833 GFP_USER | GFP_DMA32, "uc dma"); 834 835 _manager->options.max_size = max_pages; 836 _manager->options.small = SMALL_ALLOCATION; 837 _manager->options.alloc_size = NUM_PAGES_TO_ALLOC; 838 839 ret = kobject_init_and_add(&_manager->kobj, &ttm_pool_kobj_type, 840 &glob->kobj, "pool"); 841 if (unlikely(ret != 0)) { 842 kobject_put(&_manager->kobj); 843 _manager = NULL; 844 return ret; 845 } 846 847 ttm_pool_mm_shrink_init(_manager); 848 849 return 0; 850 } 851 852 void ttm_page_alloc_fini(void) 853 { 854 int i; 855 856 pr_info("Finalizing pool allocator\n"); 857 ttm_pool_mm_shrink_fini(_manager); 858 859 for (i = 0; i < NUM_POOLS; ++i) 860 ttm_page_pool_free(&_manager->pools[i], FREE_ALL_PAGES, 861 GFP_KERNEL); 862 863 kobject_put(&_manager->kobj); 864 _manager = NULL; 865 } 866 867 int ttm_pool_populate(struct ttm_tt *ttm) 868 { 869 struct ttm_mem_global *mem_glob = ttm->glob->mem_glob; 870 unsigned i; 871 int ret; 872 873 if (ttm->state != tt_unpopulated) 874 return 0; 875 876 for (i = 0; i < ttm->num_pages; ++i) { 877 ret = ttm_get_pages(&ttm->pages[i], 1, 878 ttm->page_flags, 879 ttm->caching_state); 880 if (ret != 0) { 881 ttm_pool_unpopulate(ttm); 882 return -ENOMEM; 883 } 884 885 ret = ttm_mem_global_alloc_page(mem_glob, ttm->pages[i], 886 false, false); 887 if (unlikely(ret != 0)) { 888 ttm_pool_unpopulate(ttm); 889 return -ENOMEM; 890 } 891 } 892 893 if (unlikely(ttm->page_flags & TTM_PAGE_FLAG_SWAPPED)) { 894 ret = ttm_tt_swapin(ttm); 895 if (unlikely(ret != 0)) { 896 ttm_pool_unpopulate(ttm); 897 return ret; 898 } 899 } 900 901 ttm->state = tt_unbound; 902 return 0; 903 } 904 EXPORT_SYMBOL(ttm_pool_populate); 905 906 void ttm_pool_unpopulate(struct ttm_tt *ttm) 907 { 908 unsigned i; 909 910 for (i = 0; i < ttm->num_pages; ++i) { 911 if (ttm->pages[i]) { 912 ttm_mem_global_free_page(ttm->glob->mem_glob, 913 ttm->pages[i]); 914 ttm_put_pages(&ttm->pages[i], 1, 915 ttm->page_flags, 916 ttm->caching_state); 917 } 918 } 919 ttm->state = tt_unpopulated; 920 } 921 EXPORT_SYMBOL(ttm_pool_unpopulate); 922 923 #if 0 924 int ttm_page_alloc_debugfs(struct seq_file *m, void *data) 925 { 926 struct ttm_page_pool *p; 927 unsigned i; 928 char *h[] = {"pool", "refills", "pages freed", "size"}; 929 if (!_manager) { 930 seq_printf(m, "No pool allocator running.\n"); 931 return 0; 932 } 933 seq_printf(m, "%6s %12s %13s %8s\n", 934 h[0], h[1], h[2], h[3]); 935 for (i = 0; i < NUM_POOLS; ++i) { 936 p = &_manager->pools[i]; 937 938 seq_printf(m, "%6s %12ld %13ld %8d\n", 939 p->name, p->nrefills, 940 p->nfrees, p->npages); 941 } 942 return 0; 943 } 944 #endif 945 EXPORT_SYMBOL(ttm_page_alloc_debugfs); 946