1 /* 2 * Copyright (c) Red Hat Inc. 3 4 * Permission is hereby granted, free of charge, to any person obtaining a 5 * copy of this software and associated documentation files (the "Software"), 6 * to deal in the Software without restriction, including without limitation 7 * the rights to use, copy, modify, merge, publish, distribute, sub license, 8 * and/or sell copies of the Software, and to permit persons to whom the 9 * Software is furnished to do so, subject to the following conditions: 10 * 11 * The above copyright notice and this permission notice (including the 12 * next paragraph) shall be included in all copies or substantial portions 13 * of the Software. 14 * 15 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR 16 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, 17 * FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT. IN NO EVENT SHALL 18 * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER 19 * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING 20 * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER 21 * DEALINGS IN THE SOFTWARE. 22 * 23 * Authors: Dave Airlie <airlied@redhat.com> 24 * Jerome Glisse <jglisse@redhat.com> 25 * Pauli Nieminen <suokkos@gmail.com> 26 */ 27 /* 28 * Copyright (c) 2013 The FreeBSD Foundation 29 * All rights reserved. 30 * 31 * Portions of this software were developed by Konstantin Belousov 32 * <kib@FreeBSD.org> under sponsorship from the FreeBSD Foundation. 33 * 34 * $FreeBSD: head/sys/dev/drm2/ttm/ttm_page_alloc.c 247849 2013-03-05 16:15:34Z kib $ 35 */ 36 37 /* simple list based uncached page pool 38 * - Pool collects resently freed pages for reuse 39 * - Use page->lru to keep a free list 40 * - doesn't track currently in use pages 41 */ 42 43 #define pr_fmt(fmt) "[TTM] " fmt 44 45 #include <sys/eventhandler.h> 46 47 #include <drm/drmP.h> 48 #include <drm/ttm/ttm_bo_driver.h> 49 #include <drm/ttm/ttm_page_alloc.h> 50 51 #ifdef TTM_HAS_AGP 52 #include <asm/agp.h> 53 #endif 54 55 #define NUM_PAGES_TO_ALLOC (PAGE_SIZE/sizeof(struct page *)) 56 #define SMALL_ALLOCATION 16 57 #define FREE_ALL_PAGES (~0U) 58 /* times are in msecs */ 59 #define PAGE_FREE_INTERVAL 1000 60 61 /** 62 * struct ttm_page_pool - Pool to reuse recently allocated uc/wc pages. 63 * 64 * @lock: Protects the shared pool from concurrnet access. Must be used with 65 * irqsave/irqrestore variants because pool allocator maybe called from 66 * delayed work. 67 * @fill_lock: Prevent concurrent calls to fill. 68 * @list: Pool of free uc/wc pages for fast reuse. 69 * @gfp_flags: Flags to pass for alloc_page. 70 * @npages: Number of pages in pool. 71 */ 72 struct ttm_page_pool { 73 struct lock lock; 74 bool fill_lock; 75 bool dma32; 76 struct pglist list; 77 int ttm_page_alloc_flags; 78 unsigned npages; 79 char *name; 80 unsigned long nfrees; 81 unsigned long nrefills; 82 }; 83 84 /** 85 * Limits for the pool. They are handled without locks because only place where 86 * they may change is in sysfs store. They won't have immediate effect anyway 87 * so forcing serialization to access them is pointless. 88 */ 89 90 struct ttm_pool_opts { 91 unsigned alloc_size; 92 unsigned max_size; 93 unsigned small; 94 }; 95 96 #define NUM_POOLS 4 97 98 /** 99 * struct ttm_pool_manager - Holds memory pools for fst allocation 100 * 101 * Manager is read only object for pool code so it doesn't need locking. 102 * 103 * @free_interval: minimum number of jiffies between freeing pages from pool. 104 * @page_alloc_inited: reference counting for pool allocation. 105 * @work: Work that is used to shrink the pool. Work is only run when there is 106 * some pages to free. 107 * @small_allocation: Limit in number of pages what is small allocation. 108 * 109 * @pools: All pool objects in use. 110 **/ 111 struct ttm_pool_manager { 112 struct kobject kobj; 113 eventhandler_tag lowmem_handler; 114 struct ttm_pool_opts options; 115 116 union { 117 struct ttm_page_pool u_pools[NUM_POOLS]; 118 struct _utag { 119 struct ttm_page_pool u_wc_pool; 120 struct ttm_page_pool u_uc_pool; 121 struct ttm_page_pool u_wc_pool_dma32; 122 struct ttm_page_pool u_uc_pool_dma32; 123 } _ut; 124 } _u; 125 }; 126 127 #define pools _u.u_pools 128 #define wc_pool _u._ut.u_wc_pool 129 #define uc_pool _u._ut.u_uc_pool 130 #define wc_pool_dma32 _u._ut.u_wc_pool_dma32 131 #define uc_pool_dma32 _u._ut.u_uc_pool_dma32 132 133 static vm_memattr_t 134 ttm_caching_state_to_vm(enum ttm_caching_state cstate) 135 { 136 137 switch (cstate) { 138 case tt_uncached: 139 return (VM_MEMATTR_UNCACHEABLE); 140 case tt_wc: 141 return (VM_MEMATTR_WRITE_COMBINING); 142 case tt_cached: 143 return (VM_MEMATTR_WRITE_BACK); 144 } 145 panic("caching state %d\n", cstate); 146 } 147 148 static struct attribute ttm_page_pool_max = { 149 .name = "pool_max_size", 150 .mode = S_IRUGO | S_IWUSR 151 }; 152 static struct attribute ttm_page_pool_small = { 153 .name = "pool_small_allocation", 154 .mode = S_IRUGO | S_IWUSR 155 }; 156 static struct attribute ttm_page_pool_alloc_size = { 157 .name = "pool_allocation_size", 158 .mode = S_IRUGO | S_IWUSR 159 }; 160 161 static struct attribute *ttm_pool_attrs[] = { 162 &ttm_page_pool_max, 163 &ttm_page_pool_small, 164 &ttm_page_pool_alloc_size, 165 NULL 166 }; 167 168 static void ttm_pool_kobj_release(struct kobject *kobj) 169 { 170 struct ttm_pool_manager *m = 171 container_of(kobj, struct ttm_pool_manager, kobj); 172 kfree(m); 173 } 174 175 static ssize_t ttm_pool_store(struct kobject *kobj, 176 struct attribute *attr, const char *buffer, size_t size) 177 { 178 struct ttm_pool_manager *m = 179 container_of(kobj, struct ttm_pool_manager, kobj); 180 int chars; 181 unsigned val; 182 chars = ksscanf(buffer, "%u", &val); 183 if (chars == 0) 184 return size; 185 186 /* Convert kb to number of pages */ 187 val = val / (PAGE_SIZE >> 10); 188 189 if (attr == &ttm_page_pool_max) 190 m->options.max_size = val; 191 else if (attr == &ttm_page_pool_small) 192 m->options.small = val; 193 else if (attr == &ttm_page_pool_alloc_size) { 194 if (val > NUM_PAGES_TO_ALLOC*8) { 195 pr_err("Setting allocation size to %lu is not allowed. Recommended size is %lu\n", 196 NUM_PAGES_TO_ALLOC*(PAGE_SIZE >> 7), 197 NUM_PAGES_TO_ALLOC*(PAGE_SIZE >> 10)); 198 return size; 199 } else if (val > NUM_PAGES_TO_ALLOC) { 200 pr_warn("Setting allocation size to larger than %lu is not recommended\n", 201 NUM_PAGES_TO_ALLOC*(PAGE_SIZE >> 10)); 202 } 203 m->options.alloc_size = val; 204 } 205 206 return size; 207 } 208 209 static ssize_t ttm_pool_show(struct kobject *kobj, 210 struct attribute *attr, char *buffer) 211 { 212 struct ttm_pool_manager *m = 213 container_of(kobj, struct ttm_pool_manager, kobj); 214 unsigned val = 0; 215 216 if (attr == &ttm_page_pool_max) 217 val = m->options.max_size; 218 else if (attr == &ttm_page_pool_small) 219 val = m->options.small; 220 else if (attr == &ttm_page_pool_alloc_size) 221 val = m->options.alloc_size; 222 223 val = val * (PAGE_SIZE >> 10); 224 225 return ksnprintf(buffer, PAGE_SIZE, "%u\n", val); 226 } 227 228 static const struct sysfs_ops ttm_pool_sysfs_ops = { 229 .show = &ttm_pool_show, 230 .store = &ttm_pool_store, 231 }; 232 233 static struct kobj_type ttm_pool_kobj_type = { 234 .release = &ttm_pool_kobj_release, 235 .sysfs_ops = &ttm_pool_sysfs_ops, 236 .default_attrs = ttm_pool_attrs, 237 }; 238 239 static struct ttm_pool_manager *_manager; 240 241 static int set_pages_array_wb(struct page **pages, int addrinarray) 242 { 243 vm_page_t m; 244 int i; 245 246 for (i = 0; i < addrinarray; i++) { 247 m = (struct vm_page *)pages[i]; 248 #ifdef TTM_HAS_AGP 249 unmap_page_from_agp(pages[i]); 250 #endif 251 pmap_page_set_memattr(m, VM_MEMATTR_WRITE_BACK); 252 } 253 return 0; 254 } 255 256 static int set_pages_array_wc(struct page **pages, int addrinarray) 257 { 258 vm_page_t m; 259 int i; 260 261 for (i = 0; i < addrinarray; i++) { 262 m = (struct vm_page *)pages[i]; 263 #ifdef TTM_HAS_AGP 264 map_page_into_agp(pages[i]); 265 #endif 266 pmap_page_set_memattr(m, VM_MEMATTR_WRITE_COMBINING); 267 } 268 return 0; 269 } 270 271 static int set_pages_array_uc(struct page **pages, int addrinarray) 272 { 273 vm_page_t m; 274 int i; 275 276 for (i = 0; i < addrinarray; i++) { 277 m = (struct vm_page *)pages[i]; 278 #ifdef TTM_HAS_AGP 279 map_page_into_agp(pages[i]); 280 #endif 281 pmap_page_set_memattr(m, VM_MEMATTR_UNCACHEABLE); 282 } 283 return 0; 284 } 285 286 /** 287 * Select the right pool or requested caching state and ttm flags. */ 288 static struct ttm_page_pool *ttm_get_pool(int flags, 289 enum ttm_caching_state cstate) 290 { 291 int pool_index; 292 293 if (cstate == tt_cached) 294 return NULL; 295 296 if (cstate == tt_wc) 297 pool_index = 0x0; 298 else 299 pool_index = 0x1; 300 301 if (flags & TTM_PAGE_FLAG_DMA32) 302 pool_index |= 0x2; 303 304 return &_manager->pools[pool_index]; 305 } 306 307 /* set memory back to wb and free the pages. */ 308 static void ttm_pages_put(struct page *pages[], unsigned npages) 309 { 310 unsigned i; 311 if (set_pages_array_wb(pages, npages)) 312 pr_err("Failed to set %d pages to wb!\n", npages); 313 for (i = 0; i < npages; ++i) 314 __free_page(pages[i]); 315 } 316 317 static void ttm_pool_update_free_locked(struct ttm_page_pool *pool, 318 unsigned freed_pages) 319 { 320 pool->npages -= freed_pages; 321 pool->nfrees += freed_pages; 322 } 323 324 /** 325 * Free pages from pool. 326 * 327 * To prevent hogging the ttm_swap process we only free NUM_PAGES_TO_ALLOC 328 * number of pages in one go. 329 * 330 * @pool: to free the pages from 331 * @free_all: If set to true will free all pages in pool 332 **/ 333 static int ttm_page_pool_free(struct ttm_page_pool *pool, unsigned nr_free) 334 { 335 vm_page_t p, p1; 336 struct page **pages_to_free; 337 unsigned freed_pages = 0, 338 npages_to_free = nr_free; 339 unsigned i; 340 341 if (NUM_PAGES_TO_ALLOC < nr_free) 342 npages_to_free = NUM_PAGES_TO_ALLOC; 343 344 pages_to_free = kmalloc(npages_to_free * sizeof(struct page *), 345 M_TEMP, M_WAITOK | M_ZERO); 346 347 restart: 348 lockmgr(&pool->lock, LK_EXCLUSIVE); 349 350 TAILQ_FOREACH_REVERSE_MUTABLE(p, &pool->list, pglist, pageq, p1) { 351 if (freed_pages >= npages_to_free) 352 break; 353 354 pages_to_free[freed_pages++] = (struct page *)p; 355 /* We can only remove NUM_PAGES_TO_ALLOC at a time. */ 356 if (freed_pages >= NUM_PAGES_TO_ALLOC) { 357 /* remove range of pages from the pool */ 358 for (i = 0; i < freed_pages; i++) 359 TAILQ_REMOVE(&pool->list, (struct vm_page *)pages_to_free[i], pageq); 360 361 ttm_pool_update_free_locked(pool, freed_pages); 362 /** 363 * Because changing page caching is costly 364 * we unlock the pool to prevent stalling. 365 */ 366 lockmgr(&pool->lock, LK_RELEASE); 367 368 ttm_pages_put(pages_to_free, freed_pages); 369 if (likely(nr_free != FREE_ALL_PAGES)) 370 nr_free -= freed_pages; 371 372 if (NUM_PAGES_TO_ALLOC >= nr_free) 373 npages_to_free = nr_free; 374 else 375 npages_to_free = NUM_PAGES_TO_ALLOC; 376 377 freed_pages = 0; 378 379 /* free all so restart the processing */ 380 if (nr_free) 381 goto restart; 382 383 /* Not allowed to fall through or break because 384 * following context is inside spinlock while we are 385 * outside here. 386 */ 387 goto out; 388 389 } 390 } 391 392 /* remove range of pages from the pool */ 393 if (freed_pages) { 394 for (i = 0; i < freed_pages; i++) 395 TAILQ_REMOVE(&pool->list, (struct vm_page *)pages_to_free[i], pageq); 396 397 ttm_pool_update_free_locked(pool, freed_pages); 398 nr_free -= freed_pages; 399 } 400 401 lockmgr(&pool->lock, LK_RELEASE); 402 403 if (freed_pages) 404 ttm_pages_put(pages_to_free, freed_pages); 405 out: 406 drm_free(pages_to_free, M_TEMP); 407 return nr_free; 408 } 409 410 /* Get good estimation how many pages are free in pools */ 411 static int ttm_pool_get_num_unused_pages(void) 412 { 413 unsigned i; 414 int total = 0; 415 for (i = 0; i < NUM_POOLS; ++i) 416 total += _manager->pools[i].npages; 417 418 return total; 419 } 420 421 /** 422 * Callback for mm to request pool to reduce number of page held. 423 */ 424 static int ttm_pool_mm_shrink(void *arg) 425 { 426 static unsigned int start_pool = 0; 427 unsigned i; 428 unsigned pool_offset = atomic_fetchadd_int(&start_pool, 1); 429 struct ttm_page_pool *pool; 430 int shrink_pages = 100; /* XXXKIB */ 431 432 pool_offset = pool_offset % NUM_POOLS; 433 /* select start pool in round robin fashion */ 434 for (i = 0; i < NUM_POOLS; ++i) { 435 unsigned nr_free = shrink_pages; 436 if (shrink_pages == 0) 437 break; 438 pool = &_manager->pools[(i + pool_offset)%NUM_POOLS]; 439 shrink_pages = ttm_page_pool_free(pool, nr_free); 440 } 441 /* return estimated number of unused pages in pool */ 442 return ttm_pool_get_num_unused_pages(); 443 } 444 445 static void ttm_pool_mm_shrink_init(struct ttm_pool_manager *manager) 446 { 447 448 manager->lowmem_handler = EVENTHANDLER_REGISTER(vm_lowmem, 449 ttm_pool_mm_shrink, manager, EVENTHANDLER_PRI_ANY); 450 } 451 452 static void ttm_pool_mm_shrink_fini(struct ttm_pool_manager *manager) 453 { 454 455 EVENTHANDLER_DEREGISTER(vm_lowmem, manager->lowmem_handler); 456 } 457 458 static int ttm_set_pages_caching(struct page **pages, 459 enum ttm_caching_state cstate, unsigned cpages) 460 { 461 int r = 0; 462 /* Set page caching */ 463 switch (cstate) { 464 case tt_uncached: 465 r = set_pages_array_uc(pages, cpages); 466 if (r) 467 pr_err("Failed to set %d pages to uc!\n", cpages); 468 break; 469 case tt_wc: 470 r = set_pages_array_wc(pages, cpages); 471 if (r) 472 pr_err("Failed to set %d pages to wc!\n", cpages); 473 break; 474 default: 475 break; 476 } 477 return r; 478 } 479 480 /** 481 * Free pages the pages that failed to change the caching state. If there is 482 * any pages that have changed their caching state already put them to the 483 * pool. 484 */ 485 static void ttm_handle_caching_state_failure(struct pglist *pages, 486 int ttm_flags, enum ttm_caching_state cstate, 487 struct page **failed_pages, unsigned cpages) 488 { 489 unsigned i; 490 /* Failed pages have to be freed */ 491 for (i = 0; i < cpages; ++i) { 492 TAILQ_REMOVE(pages, (struct vm_page *)failed_pages[i], pageq); 493 __free_page(failed_pages[i]); 494 } 495 } 496 497 /** 498 * Allocate new pages with correct caching. 499 * 500 * This function is reentrant if caller updates count depending on number of 501 * pages returned in pages array. 502 */ 503 static int ttm_alloc_new_pages(struct pglist *pages, int ttm_alloc_flags, 504 int ttm_flags, enum ttm_caching_state cstate, unsigned count) 505 { 506 struct page **caching_array; 507 struct vm_page *p; 508 int r = 0; 509 unsigned i, cpages, aflags; 510 unsigned max_cpages = min(count, 511 (unsigned)(PAGE_SIZE/sizeof(vm_page_t))); 512 513 aflags = VM_ALLOC_NORMAL | 514 ((ttm_alloc_flags & TTM_PAGE_FLAG_ZERO_ALLOC) != 0 ? 515 VM_ALLOC_ZERO : 0); 516 517 /* allocate array for page caching change */ 518 caching_array = kmalloc(max_cpages * sizeof(vm_page_t), M_TEMP, 519 M_WAITOK | M_ZERO); 520 521 for (i = 0, cpages = 0; i < count; ++i) { 522 p = vm_page_alloc_contig(0, 523 (ttm_alloc_flags & TTM_PAGE_FLAG_DMA32) ? 0xffffffff : 524 VM_MAX_ADDRESS, PAGE_SIZE, 0, 525 1*PAGE_SIZE, ttm_caching_state_to_vm(cstate)); 526 if (!p) { 527 pr_err("Unable to get page %u\n", i); 528 529 /* store already allocated pages in the pool after 530 * setting the caching state */ 531 if (cpages) { 532 r = ttm_set_pages_caching(caching_array, 533 cstate, cpages); 534 if (r) 535 ttm_handle_caching_state_failure(pages, 536 ttm_flags, cstate, 537 caching_array, cpages); 538 } 539 r = -ENOMEM; 540 goto out; 541 } 542 #if 0 543 p->oflags &= ~VPO_UNMANAGED; 544 #endif 545 p->flags |= PG_FICTITIOUS; 546 547 #ifdef CONFIG_HIGHMEM /* KIB: nop */ 548 /* gfp flags of highmem page should never be dma32 so we 549 * we should be fine in such case 550 */ 551 if (!PageHighMem(p)) 552 #endif 553 { 554 caching_array[cpages++] = (struct page *)p; 555 if (cpages == max_cpages) { 556 557 r = ttm_set_pages_caching(caching_array, 558 cstate, cpages); 559 if (r) { 560 ttm_handle_caching_state_failure(pages, 561 ttm_flags, cstate, 562 caching_array, cpages); 563 goto out; 564 } 565 cpages = 0; 566 } 567 } 568 569 TAILQ_INSERT_HEAD(pages, p, pageq); 570 } 571 572 if (cpages) { 573 r = ttm_set_pages_caching(caching_array, cstate, cpages); 574 if (r) 575 ttm_handle_caching_state_failure(pages, 576 ttm_flags, cstate, 577 caching_array, cpages); 578 } 579 out: 580 drm_free(caching_array, M_TEMP); 581 582 return r; 583 } 584 585 /** 586 * Fill the given pool if there aren't enough pages and the requested number of 587 * pages is small. 588 */ 589 static void ttm_page_pool_fill_locked(struct ttm_page_pool *pool, 590 int ttm_flags, enum ttm_caching_state cstate, unsigned count) 591 { 592 vm_page_t p; 593 int r; 594 unsigned cpages = 0; 595 /** 596 * Only allow one pool fill operation at a time. 597 * If pool doesn't have enough pages for the allocation new pages are 598 * allocated from outside of pool. 599 */ 600 if (pool->fill_lock) 601 return; 602 603 pool->fill_lock = true; 604 605 /* If allocation request is small and there are not enough 606 * pages in a pool we fill the pool up first. */ 607 if (count < _manager->options.small 608 && count > pool->npages) { 609 struct pglist new_pages; 610 unsigned alloc_size = _manager->options.alloc_size; 611 612 /** 613 * Can't change page caching if in irqsave context. We have to 614 * drop the pool->lock. 615 */ 616 lockmgr(&pool->lock, LK_RELEASE); 617 618 TAILQ_INIT(&new_pages); 619 r = ttm_alloc_new_pages(&new_pages, pool->ttm_page_alloc_flags, 620 ttm_flags, cstate, alloc_size); 621 lockmgr(&pool->lock, LK_EXCLUSIVE); 622 623 if (!r) { 624 TAILQ_CONCAT(&pool->list, &new_pages, pageq); 625 ++pool->nrefills; 626 pool->npages += alloc_size; 627 } else { 628 pr_err("Failed to fill pool (%p)\n", pool); 629 /* If we have any pages left put them to the pool. */ 630 TAILQ_FOREACH(p, &pool->list, pageq) { 631 ++cpages; 632 } 633 TAILQ_CONCAT(&pool->list, &new_pages, pageq); 634 pool->npages += cpages; 635 } 636 637 } 638 pool->fill_lock = false; 639 } 640 641 /** 642 * Cut 'count' number of pages from the pool and put them on the return list. 643 * 644 * @return count of pages still required to fulfill the request. 645 */ 646 static unsigned ttm_page_pool_get_pages(struct ttm_page_pool *pool, 647 struct pglist *pages, 648 int ttm_flags, 649 enum ttm_caching_state cstate, 650 unsigned count) 651 { 652 vm_page_t p; 653 unsigned i; 654 655 lockmgr(&pool->lock, LK_EXCLUSIVE); 656 ttm_page_pool_fill_locked(pool, ttm_flags, cstate, count); 657 658 if (count >= pool->npages) { 659 /* take all pages from the pool */ 660 TAILQ_CONCAT(pages, &pool->list, pageq); 661 count -= pool->npages; 662 pool->npages = 0; 663 goto out; 664 } 665 for (i = 0; i < count; i++) { 666 p = TAILQ_FIRST(&pool->list); 667 TAILQ_REMOVE(&pool->list, p, pageq); 668 TAILQ_INSERT_TAIL(pages, p, pageq); 669 } 670 pool->npages -= count; 671 count = 0; 672 out: 673 lockmgr(&pool->lock, LK_RELEASE); 674 return count; 675 } 676 677 /* Put all pages in pages list to correct pool to wait for reuse */ 678 static void ttm_put_pages(struct page **pages, unsigned npages, int flags, 679 enum ttm_caching_state cstate) 680 { 681 struct ttm_page_pool *pool = ttm_get_pool(flags, cstate); 682 unsigned i; 683 struct vm_page *page; 684 685 if (pool == NULL) { 686 /* No pool for this memory type so free the pages */ 687 for (i = 0; i < npages; i++) { 688 if (pages[i]) { 689 #if 0 690 if (page_count(pages[i]) != 1) 691 pr_err("Erroneous page count. Leaking pages.\n"); 692 #endif 693 __free_page(pages[i]); 694 pages[i] = NULL; 695 } 696 } 697 return; 698 } 699 700 lockmgr(&pool->lock, LK_EXCLUSIVE); 701 for (i = 0; i < npages; i++) { 702 if (pages[i]) { 703 page = (struct vm_page *)pages[i]; 704 TAILQ_INSERT_TAIL(&pool->list, page, pageq); 705 pages[i] = NULL; 706 pool->npages++; 707 } 708 } 709 /* Check that we don't go over the pool limit */ 710 npages = 0; 711 if (pool->npages > _manager->options.max_size) { 712 npages = pool->npages - _manager->options.max_size; 713 /* free at least NUM_PAGES_TO_ALLOC number of pages 714 * to reduce calls to set_memory_wb */ 715 if (npages < NUM_PAGES_TO_ALLOC) 716 npages = NUM_PAGES_TO_ALLOC; 717 } 718 lockmgr(&pool->lock, LK_RELEASE); 719 if (npages) 720 ttm_page_pool_free(pool, npages); 721 } 722 723 /* 724 * On success pages list will hold count number of correctly 725 * cached pages. 726 */ 727 static int ttm_get_pages(struct page **pages, unsigned npages, int flags, 728 enum ttm_caching_state cstate) 729 { 730 struct ttm_page_pool *pool = ttm_get_pool(flags, cstate); 731 struct pglist plist; 732 struct vm_page *p = NULL; 733 int gfp_flags, aflags; 734 unsigned count; 735 int r; 736 737 aflags = VM_ALLOC_NORMAL | 738 ((flags & TTM_PAGE_FLAG_ZERO_ALLOC) != 0 ? VM_ALLOC_ZERO : 0); 739 740 /* No pool for cached pages */ 741 if (pool == NULL) { 742 for (r = 0; r < npages; ++r) { 743 p = vm_page_alloc_contig(0, 744 (flags & TTM_PAGE_FLAG_DMA32) ? 0xffffffff : 745 VM_MAX_ADDRESS, PAGE_SIZE, 746 0, 1*PAGE_SIZE, ttm_caching_state_to_vm(cstate)); 747 if (!p) { 748 pr_err("Unable to allocate page\n"); 749 return -ENOMEM; 750 } 751 #if 0 752 p->oflags &= ~VPO_UNMANAGED; 753 #endif 754 p->flags |= PG_FICTITIOUS; 755 pages[r] = (struct page *)p; 756 } 757 return 0; 758 } 759 760 /* combine zero flag to pool flags */ 761 gfp_flags = flags | pool->ttm_page_alloc_flags; 762 763 /* First we take pages from the pool */ 764 TAILQ_INIT(&plist); 765 npages = ttm_page_pool_get_pages(pool, &plist, flags, cstate, npages); 766 count = 0; 767 TAILQ_FOREACH(p, &plist, pageq) { 768 pages[count++] = (struct page *)p; 769 } 770 771 /* clear the pages coming from the pool if requested */ 772 if (flags & TTM_PAGE_FLAG_ZERO_ALLOC) { 773 TAILQ_FOREACH(p, &plist, pageq) { 774 pmap_zero_page(VM_PAGE_TO_PHYS(p)); 775 } 776 } 777 778 /* If pool didn't have enough pages allocate new one. */ 779 if (npages > 0) { 780 /* ttm_alloc_new_pages doesn't reference pool so we can run 781 * multiple requests in parallel. 782 **/ 783 TAILQ_INIT(&plist); 784 r = ttm_alloc_new_pages(&plist, gfp_flags, flags, cstate, 785 npages); 786 TAILQ_FOREACH(p, &plist, pageq) { 787 pages[count++] = (struct page *)p; 788 } 789 if (r) { 790 /* If there is any pages in the list put them back to 791 * the pool. */ 792 pr_err("Failed to allocate extra pages for large request\n"); 793 ttm_put_pages(pages, count, flags, cstate); 794 return r; 795 } 796 } 797 798 return 0; 799 } 800 801 static void ttm_page_pool_init_locked(struct ttm_page_pool *pool, gfp_t flags, 802 char *name) 803 { 804 lockinit(&pool->lock, "ttmpool", 0, LK_CANRECURSE); 805 pool->fill_lock = false; 806 TAILQ_INIT(&pool->list); 807 pool->npages = pool->nfrees = 0; 808 pool->ttm_page_alloc_flags = flags; 809 pool->name = name; 810 } 811 812 int ttm_page_alloc_init(struct ttm_mem_global *glob, unsigned max_pages) 813 { 814 int ret; 815 816 WARN_ON(_manager); 817 818 pr_info("Initializing pool allocator\n"); 819 820 _manager = kzalloc(sizeof(*_manager), GFP_KERNEL); 821 822 ttm_page_pool_init_locked(&_manager->wc_pool, 0, "wc"); 823 ttm_page_pool_init_locked(&_manager->uc_pool, 0, "uc"); 824 ttm_page_pool_init_locked(&_manager->wc_pool_dma32, 825 TTM_PAGE_FLAG_DMA32, "wc dma"); 826 ttm_page_pool_init_locked(&_manager->uc_pool_dma32, 827 TTM_PAGE_FLAG_DMA32, "uc dma"); 828 829 _manager->options.max_size = max_pages; 830 _manager->options.small = SMALL_ALLOCATION; 831 _manager->options.alloc_size = NUM_PAGES_TO_ALLOC; 832 833 ret = kobject_init_and_add(&_manager->kobj, &ttm_pool_kobj_type, 834 &glob->kobj, "pool"); 835 if (unlikely(ret != 0)) { 836 kobject_put(&_manager->kobj); 837 _manager = NULL; 838 return ret; 839 } 840 ttm_pool_mm_shrink_init(_manager); 841 842 return 0; 843 } 844 845 void ttm_page_alloc_fini(void) 846 { 847 int i; 848 849 pr_info("Finalizing pool allocator\n"); 850 ttm_pool_mm_shrink_fini(_manager); 851 852 for (i = 0; i < NUM_POOLS; ++i) 853 ttm_page_pool_free(&_manager->pools[i], FREE_ALL_PAGES); 854 855 kobject_put(&_manager->kobj); 856 _manager = NULL; 857 } 858 859 int ttm_pool_populate(struct ttm_tt *ttm) 860 { 861 struct ttm_mem_global *mem_glob = ttm->glob->mem_glob; 862 unsigned i; 863 int ret; 864 865 if (ttm->state != tt_unpopulated) 866 return 0; 867 868 for (i = 0; i < ttm->num_pages; ++i) { 869 ret = ttm_get_pages(&ttm->pages[i], 1, 870 ttm->page_flags, 871 ttm->caching_state); 872 if (ret != 0) { 873 ttm_pool_unpopulate(ttm); 874 return -ENOMEM; 875 } 876 877 ret = ttm_mem_global_alloc_page(mem_glob, ttm->pages[i], 878 false, false); 879 if (unlikely(ret != 0)) { 880 ttm_pool_unpopulate(ttm); 881 return -ENOMEM; 882 } 883 } 884 885 if (unlikely(ttm->page_flags & TTM_PAGE_FLAG_SWAPPED)) { 886 ret = ttm_tt_swapin(ttm); 887 if (unlikely(ret != 0)) { 888 ttm_pool_unpopulate(ttm); 889 return ret; 890 } 891 } 892 893 ttm->state = tt_unbound; 894 return 0; 895 } 896 EXPORT_SYMBOL(ttm_pool_populate); 897 898 void ttm_pool_unpopulate(struct ttm_tt *ttm) 899 { 900 unsigned i; 901 902 for (i = 0; i < ttm->num_pages; ++i) { 903 if (ttm->pages[i]) { 904 ttm_mem_global_free_page(ttm->glob->mem_glob, 905 ttm->pages[i]); 906 ttm_put_pages(&ttm->pages[i], 1, 907 ttm->page_flags, 908 ttm->caching_state); 909 } 910 } 911 ttm->state = tt_unpopulated; 912 } 913 914 #if 0 915 /* XXXKIB sysctl */ 916 int ttm_page_alloc_debugfs(struct seq_file *m, void *data) 917 { 918 struct ttm_page_pool *p; 919 unsigned i; 920 char *h[] = {"pool", "refills", "pages freed", "size"}; 921 if (!_manager) { 922 seq_printf(m, "No pool allocator running.\n"); 923 return 0; 924 } 925 seq_printf(m, "%6s %12s %13s %8s\n", 926 h[0], h[1], h[2], h[3]); 927 for (i = 0; i < NUM_POOLS; ++i) { 928 p = &_manager->pools[i]; 929 930 seq_printf(m, "%6s %12ld %13ld %8d\n", 931 p->name, p->nrefills, 932 p->nfrees, p->npages); 933 } 934 return 0; 935 } 936 #endif 937 EXPORT_SYMBOL(ttm_page_alloc_debugfs); 938