1 /* 2 * Copyright (c) Red Hat Inc. 3 4 * Permission is hereby granted, free of charge, to any person obtaining a 5 * copy of this software and associated documentation files (the "Software"), 6 * to deal in the Software without restriction, including without limitation 7 * the rights to use, copy, modify, merge, publish, distribute, sub license, 8 * and/or sell copies of the Software, and to permit persons to whom the 9 * Software is furnished to do so, subject to the following conditions: 10 * 11 * The above copyright notice and this permission notice (including the 12 * next paragraph) shall be included in all copies or substantial portions 13 * of the Software. 14 * 15 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR 16 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, 17 * FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT. IN NO EVENT SHALL 18 * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER 19 * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING 20 * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER 21 * DEALINGS IN THE SOFTWARE. 22 * 23 * Authors: Dave Airlie <airlied@redhat.com> 24 * Jerome Glisse <jglisse@redhat.com> 25 * Pauli Nieminen <suokkos@gmail.com> 26 */ 27 /* 28 * Copyright (c) 2013 The FreeBSD Foundation 29 * All rights reserved. 30 * 31 * Portions of this software were developed by Konstantin Belousov 32 * <kib@FreeBSD.org> under sponsorship from the FreeBSD Foundation. 33 * 34 * $FreeBSD: head/sys/dev/drm2/ttm/ttm_page_alloc.c 247849 2013-03-05 16:15:34Z kib $ 35 */ 36 37 /* simple list based uncached page pool 38 * - Pool collects resently freed pages for reuse 39 * - Use page->lru to keep a free list 40 * - doesn't track currently in use pages 41 */ 42 43 #include <sys/eventhandler.h> 44 45 #include <drm/drmP.h> 46 #include <dev/drm/ttm/ttm_bo_driver.h> 47 #include <dev/drm/ttm/ttm_page_alloc.h> 48 49 #ifdef TTM_HAS_AGP 50 #include <asm/agp.h> 51 #endif 52 53 #define NUM_PAGES_TO_ALLOC (PAGE_SIZE/sizeof(vm_page_t)) 54 #define SMALL_ALLOCATION 16 55 #define FREE_ALL_PAGES (~0U) 56 /* times are in msecs */ 57 #define PAGE_FREE_INTERVAL 1000 58 59 /** 60 * struct ttm_page_pool - Pool to reuse recently allocated uc/wc pages. 61 * 62 * @lock: Protects the shared pool from concurrnet access. Must be used with 63 * irqsave/irqrestore variants because pool allocator maybe called from 64 * delayed work. 65 * @fill_lock: Prevent concurrent calls to fill. 66 * @list: Pool of free uc/wc pages for fast reuse. 67 * @gfp_flags: Flags to pass for alloc_page. 68 * @npages: Number of pages in pool. 69 */ 70 struct ttm_page_pool { 71 struct lock lock; 72 bool fill_lock; 73 bool dma32; 74 struct pglist list; 75 int ttm_page_alloc_flags; 76 unsigned npages; 77 char *name; 78 unsigned long nfrees; 79 unsigned long nrefills; 80 }; 81 82 /** 83 * Limits for the pool. They are handled without locks because only place where 84 * they may change is in sysfs store. They won't have immediate effect anyway 85 * so forcing serialization to access them is pointless. 86 */ 87 88 struct ttm_pool_opts { 89 unsigned alloc_size; 90 unsigned max_size; 91 unsigned small; 92 }; 93 94 #define NUM_POOLS 4 95 96 /** 97 * struct ttm_pool_manager - Holds memory pools for fst allocation 98 * 99 * Manager is read only object for pool code so it doesn't need locking. 100 * 101 * @free_interval: minimum number of jiffies between freeing pages from pool. 102 * @page_alloc_inited: reference counting for pool allocation. 103 * @work: Work that is used to shrink the pool. Work is only run when there is 104 * some pages to free. 105 * @small_allocation: Limit in number of pages what is small allocation. 106 * 107 * @pools: All pool objects in use. 108 **/ 109 struct ttm_pool_manager { 110 unsigned int kobj_ref; 111 eventhandler_tag lowmem_handler; 112 struct ttm_pool_opts options; 113 114 union { 115 struct ttm_page_pool u_pools[NUM_POOLS]; 116 struct _utag { 117 struct ttm_page_pool u_wc_pool; 118 struct ttm_page_pool u_uc_pool; 119 struct ttm_page_pool u_wc_pool_dma32; 120 struct ttm_page_pool u_uc_pool_dma32; 121 } _ut; 122 } _u; 123 }; 124 125 #define pools _u.u_pools 126 #define wc_pool _u._ut.u_wc_pool 127 #define uc_pool _u._ut.u_uc_pool 128 #define wc_pool_dma32 _u._ut.u_wc_pool_dma32 129 #define uc_pool_dma32 _u._ut.u_uc_pool_dma32 130 131 MALLOC_DEFINE(M_TTM_POOLMGR, "ttm_poolmgr", "TTM Pool Manager"); 132 133 static void 134 ttm_vm_page_free(vm_page_t m) 135 { 136 137 KASSERT(m->object == NULL, ("ttm page %p is owned", m)); 138 KASSERT(m->wire_count == 1, ("ttm lost wire %p", m)); 139 KASSERT((m->flags & PG_FICTITIOUS) != 0, ("ttm lost fictitious %p", m)); 140 #if 0 141 KASSERT((m->oflags & VPO_UNMANAGED) == 0, ("ttm got unmanaged %p", m)); 142 m->oflags |= VPO_UNMANAGED; 143 #endif 144 m->flags &= ~PG_FICTITIOUS; 145 vm_page_busy_wait(m, FALSE, "ttmvpf"); 146 vm_page_unwire(m, 0); 147 vm_page_free(m); 148 } 149 150 static vm_memattr_t 151 ttm_caching_state_to_vm(enum ttm_caching_state cstate) 152 { 153 154 switch (cstate) { 155 case tt_uncached: 156 return (VM_MEMATTR_UNCACHEABLE); 157 case tt_wc: 158 return (VM_MEMATTR_WRITE_COMBINING); 159 case tt_cached: 160 return (VM_MEMATTR_WRITE_BACK); 161 } 162 panic("caching state %d\n", cstate); 163 } 164 165 static void ttm_pool_kobj_release(struct ttm_pool_manager *m) 166 { 167 168 drm_free(m, M_TTM_POOLMGR); 169 } 170 171 #if 0 172 /* XXXKIB sysctl */ 173 static ssize_t ttm_pool_store(struct ttm_pool_manager *m, 174 struct attribute *attr, const char *buffer, size_t size) 175 { 176 int chars; 177 unsigned val; 178 chars = sscanf(buffer, "%u", &val); 179 if (chars == 0) 180 return size; 181 182 /* Convert kb to number of pages */ 183 val = val / (PAGE_SIZE >> 10); 184 185 if (attr == &ttm_page_pool_max) 186 m->options.max_size = val; 187 else if (attr == &ttm_page_pool_small) 188 m->options.small = val; 189 else if (attr == &ttm_page_pool_alloc_size) { 190 if (val > NUM_PAGES_TO_ALLOC*8) { 191 pr_err("Setting allocation size to %lu is not allowed. Recommended size is %lu\n", 192 NUM_PAGES_TO_ALLOC*(PAGE_SIZE >> 7), 193 NUM_PAGES_TO_ALLOC*(PAGE_SIZE >> 10)); 194 return size; 195 } else if (val > NUM_PAGES_TO_ALLOC) { 196 pr_warn("Setting allocation size to larger than %lu is not recommended\n", 197 NUM_PAGES_TO_ALLOC*(PAGE_SIZE >> 10)); 198 } 199 m->options.alloc_size = val; 200 } 201 202 return size; 203 } 204 205 static ssize_t ttm_pool_show(struct ttm_pool_manager *m, 206 struct attribute *attr, char *buffer) 207 { 208 unsigned val = 0; 209 210 if (attr == &ttm_page_pool_max) 211 val = m->options.max_size; 212 else if (attr == &ttm_page_pool_small) 213 val = m->options.small; 214 else if (attr == &ttm_page_pool_alloc_size) 215 val = m->options.alloc_size; 216 217 val = val * (PAGE_SIZE >> 10); 218 219 return snprintf(buffer, PAGE_SIZE, "%u\n", val); 220 } 221 #endif 222 223 static struct ttm_pool_manager *_manager; 224 225 static int set_pages_array_wb(vm_page_t *pages, int addrinarray) 226 { 227 vm_page_t m; 228 int i; 229 230 for (i = 0; i < addrinarray; i++) { 231 m = pages[i]; 232 #ifdef TTM_HAS_AGP 233 unmap_page_from_agp(m); 234 #endif 235 pmap_page_set_memattr(m, VM_MEMATTR_WRITE_BACK); 236 } 237 return 0; 238 } 239 240 static int set_pages_array_wc(vm_page_t *pages, int addrinarray) 241 { 242 vm_page_t m; 243 int i; 244 245 for (i = 0; i < addrinarray; i++) { 246 m = pages[i]; 247 #ifdef TTM_HAS_AGP 248 map_page_into_agp(pages[i]); 249 #endif 250 pmap_page_set_memattr(m, VM_MEMATTR_WRITE_COMBINING); 251 } 252 return 0; 253 } 254 255 static int set_pages_array_uc(vm_page_t *pages, int addrinarray) 256 { 257 vm_page_t m; 258 int i; 259 260 for (i = 0; i < addrinarray; i++) { 261 m = pages[i]; 262 #ifdef TTM_HAS_AGP 263 map_page_into_agp(pages[i]); 264 #endif 265 pmap_page_set_memattr(m, VM_MEMATTR_UNCACHEABLE); 266 } 267 return 0; 268 } 269 270 /** 271 * Select the right pool or requested caching state and ttm flags. */ 272 static struct ttm_page_pool *ttm_get_pool(int flags, 273 enum ttm_caching_state cstate) 274 { 275 int pool_index; 276 277 if (cstate == tt_cached) 278 return NULL; 279 280 if (cstate == tt_wc) 281 pool_index = 0x0; 282 else 283 pool_index = 0x1; 284 285 if (flags & TTM_PAGE_FLAG_DMA32) 286 pool_index |= 0x2; 287 288 return &_manager->pools[pool_index]; 289 } 290 291 /* set memory back to wb and free the pages. */ 292 static void ttm_pages_put(vm_page_t *pages, unsigned npages) 293 { 294 unsigned i; 295 296 /* Our VM handles vm memattr automatically on the page free. */ 297 if (set_pages_array_wb(pages, npages)) 298 kprintf("[TTM] Failed to set %d pages to wb!\n", npages); 299 for (i = 0; i < npages; ++i) 300 ttm_vm_page_free(pages[i]); 301 } 302 303 static void ttm_pool_update_free_locked(struct ttm_page_pool *pool, 304 unsigned freed_pages) 305 { 306 pool->npages -= freed_pages; 307 pool->nfrees += freed_pages; 308 } 309 310 /** 311 * Free pages from pool. 312 * 313 * To prevent hogging the ttm_swap process we only free NUM_PAGES_TO_ALLOC 314 * number of pages in one go. 315 * 316 * @pool: to free the pages from 317 * @free_all: If set to true will free all pages in pool 318 **/ 319 static int ttm_page_pool_free(struct ttm_page_pool *pool, unsigned nr_free) 320 { 321 vm_page_t p, p1; 322 vm_page_t *pages_to_free; 323 unsigned freed_pages = 0, 324 npages_to_free = nr_free; 325 unsigned i; 326 327 if (NUM_PAGES_TO_ALLOC < nr_free) 328 npages_to_free = NUM_PAGES_TO_ALLOC; 329 330 pages_to_free = kmalloc(npages_to_free * sizeof(vm_page_t), 331 M_TEMP, M_WAITOK | M_ZERO); 332 333 restart: 334 lockmgr(&pool->lock, LK_EXCLUSIVE); 335 336 TAILQ_FOREACH_REVERSE_MUTABLE(p, &pool->list, pglist, pageq, p1) { 337 if (freed_pages >= npages_to_free) 338 break; 339 340 pages_to_free[freed_pages++] = p; 341 /* We can only remove NUM_PAGES_TO_ALLOC at a time. */ 342 if (freed_pages >= NUM_PAGES_TO_ALLOC) { 343 /* remove range of pages from the pool */ 344 for (i = 0; i < freed_pages; i++) 345 TAILQ_REMOVE(&pool->list, pages_to_free[i], pageq); 346 347 ttm_pool_update_free_locked(pool, freed_pages); 348 /** 349 * Because changing page caching is costly 350 * we unlock the pool to prevent stalling. 351 */ 352 lockmgr(&pool->lock, LK_RELEASE); 353 354 ttm_pages_put(pages_to_free, freed_pages); 355 if (likely(nr_free != FREE_ALL_PAGES)) 356 nr_free -= freed_pages; 357 358 if (NUM_PAGES_TO_ALLOC >= nr_free) 359 npages_to_free = nr_free; 360 else 361 npages_to_free = NUM_PAGES_TO_ALLOC; 362 363 freed_pages = 0; 364 365 /* free all so restart the processing */ 366 if (nr_free) 367 goto restart; 368 369 /* Not allowed to fall through or break because 370 * following context is inside spinlock while we are 371 * outside here. 372 */ 373 goto out; 374 375 } 376 } 377 378 /* remove range of pages from the pool */ 379 if (freed_pages) { 380 for (i = 0; i < freed_pages; i++) 381 TAILQ_REMOVE(&pool->list, pages_to_free[i], pageq); 382 383 ttm_pool_update_free_locked(pool, freed_pages); 384 nr_free -= freed_pages; 385 } 386 387 lockmgr(&pool->lock, LK_RELEASE); 388 389 if (freed_pages) 390 ttm_pages_put(pages_to_free, freed_pages); 391 out: 392 drm_free(pages_to_free, M_TEMP); 393 return nr_free; 394 } 395 396 /* Get good estimation how many pages are free in pools */ 397 static int ttm_pool_get_num_unused_pages(void) 398 { 399 unsigned i; 400 int total = 0; 401 for (i = 0; i < NUM_POOLS; ++i) 402 total += _manager->pools[i].npages; 403 404 return total; 405 } 406 407 /** 408 * Callback for mm to request pool to reduce number of page held. 409 */ 410 static int ttm_pool_mm_shrink(void *arg) 411 { 412 static unsigned int start_pool = 0; 413 unsigned i; 414 unsigned pool_offset = atomic_fetchadd_int(&start_pool, 1); 415 struct ttm_page_pool *pool; 416 int shrink_pages = 100; /* XXXKIB */ 417 418 pool_offset = pool_offset % NUM_POOLS; 419 /* select start pool in round robin fashion */ 420 for (i = 0; i < NUM_POOLS; ++i) { 421 unsigned nr_free = shrink_pages; 422 if (shrink_pages == 0) 423 break; 424 pool = &_manager->pools[(i + pool_offset)%NUM_POOLS]; 425 shrink_pages = ttm_page_pool_free(pool, nr_free); 426 } 427 /* return estimated number of unused pages in pool */ 428 return ttm_pool_get_num_unused_pages(); 429 } 430 431 static void ttm_pool_mm_shrink_init(struct ttm_pool_manager *manager) 432 { 433 434 manager->lowmem_handler = EVENTHANDLER_REGISTER(vm_lowmem, 435 ttm_pool_mm_shrink, manager, EVENTHANDLER_PRI_ANY); 436 } 437 438 static void ttm_pool_mm_shrink_fini(struct ttm_pool_manager *manager) 439 { 440 441 EVENTHANDLER_DEREGISTER(vm_lowmem, manager->lowmem_handler); 442 } 443 444 static int ttm_set_pages_caching(vm_page_t *pages, 445 enum ttm_caching_state cstate, unsigned cpages) 446 { 447 int r = 0; 448 /* Set page caching */ 449 switch (cstate) { 450 case tt_uncached: 451 r = set_pages_array_uc(pages, cpages); 452 if (r) 453 kprintf("[TTM] Failed to set %d pages to uc!\n", cpages); 454 break; 455 case tt_wc: 456 r = set_pages_array_wc(pages, cpages); 457 if (r) 458 kprintf("[TTM] Failed to set %d pages to wc!\n", cpages); 459 break; 460 default: 461 break; 462 } 463 return r; 464 } 465 466 /** 467 * Free pages the pages that failed to change the caching state. If there is 468 * any pages that have changed their caching state already put them to the 469 * pool. 470 */ 471 static void ttm_handle_caching_state_failure(struct pglist *pages, 472 int ttm_flags, enum ttm_caching_state cstate, 473 vm_page_t *failed_pages, unsigned cpages) 474 { 475 unsigned i; 476 /* Failed pages have to be freed */ 477 for (i = 0; i < cpages; ++i) { 478 TAILQ_REMOVE(pages, failed_pages[i], pageq); 479 ttm_vm_page_free(failed_pages[i]); 480 } 481 } 482 483 /** 484 * Allocate new pages with correct caching. 485 * 486 * This function is reentrant if caller updates count depending on number of 487 * pages returned in pages array. 488 */ 489 static int ttm_alloc_new_pages(struct pglist *pages, int ttm_alloc_flags, 490 int ttm_flags, enum ttm_caching_state cstate, unsigned count) 491 { 492 vm_page_t *caching_array; 493 vm_page_t p; 494 int r = 0; 495 unsigned i, cpages, aflags; 496 unsigned max_cpages = min(count, 497 (unsigned)(PAGE_SIZE/sizeof(vm_page_t))); 498 499 aflags = VM_ALLOC_NORMAL | 500 ((ttm_alloc_flags & TTM_PAGE_FLAG_ZERO_ALLOC) != 0 ? 501 VM_ALLOC_ZERO : 0); 502 503 /* allocate array for page caching change */ 504 caching_array = kmalloc(max_cpages * sizeof(vm_page_t), M_TEMP, 505 M_WAITOK | M_ZERO); 506 507 for (i = 0, cpages = 0; i < count; ++i) { 508 p = vm_page_alloc_contig(0, 509 (ttm_alloc_flags & TTM_PAGE_FLAG_DMA32) ? 0xffffffff : 510 VM_MAX_ADDRESS, PAGE_SIZE, 0, 511 1*PAGE_SIZE, ttm_caching_state_to_vm(cstate)); 512 if (!p) { 513 kprintf("[TTM] Unable to get page %u\n", i); 514 515 /* store already allocated pages in the pool after 516 * setting the caching state */ 517 if (cpages) { 518 r = ttm_set_pages_caching(caching_array, 519 cstate, cpages); 520 if (r) 521 ttm_handle_caching_state_failure(pages, 522 ttm_flags, cstate, 523 caching_array, cpages); 524 } 525 r = -ENOMEM; 526 goto out; 527 } 528 #if 0 529 p->oflags &= ~VPO_UNMANAGED; 530 #endif 531 p->flags |= PG_FICTITIOUS; 532 533 #ifdef CONFIG_HIGHMEM /* KIB: nop */ 534 /* gfp flags of highmem page should never be dma32 so we 535 * we should be fine in such case 536 */ 537 if (!PageHighMem(p)) 538 #endif 539 { 540 caching_array[cpages++] = p; 541 if (cpages == max_cpages) { 542 543 r = ttm_set_pages_caching(caching_array, 544 cstate, cpages); 545 if (r) { 546 ttm_handle_caching_state_failure(pages, 547 ttm_flags, cstate, 548 caching_array, cpages); 549 goto out; 550 } 551 cpages = 0; 552 } 553 } 554 555 TAILQ_INSERT_HEAD(pages, p, pageq); 556 } 557 558 if (cpages) { 559 r = ttm_set_pages_caching(caching_array, cstate, cpages); 560 if (r) 561 ttm_handle_caching_state_failure(pages, 562 ttm_flags, cstate, 563 caching_array, cpages); 564 } 565 out: 566 drm_free(caching_array, M_TEMP); 567 568 return r; 569 } 570 571 /** 572 * Fill the given pool if there aren't enough pages and the requested number of 573 * pages is small. 574 */ 575 static void ttm_page_pool_fill_locked(struct ttm_page_pool *pool, 576 int ttm_flags, enum ttm_caching_state cstate, unsigned count) 577 { 578 vm_page_t p; 579 int r; 580 unsigned cpages = 0; 581 /** 582 * Only allow one pool fill operation at a time. 583 * If pool doesn't have enough pages for the allocation new pages are 584 * allocated from outside of pool. 585 */ 586 if (pool->fill_lock) 587 return; 588 589 pool->fill_lock = true; 590 591 /* If allocation request is small and there are not enough 592 * pages in a pool we fill the pool up first. */ 593 if (count < _manager->options.small 594 && count > pool->npages) { 595 struct pglist new_pages; 596 unsigned alloc_size = _manager->options.alloc_size; 597 598 /** 599 * Can't change page caching if in irqsave context. We have to 600 * drop the pool->lock. 601 */ 602 lockmgr(&pool->lock, LK_RELEASE); 603 604 TAILQ_INIT(&new_pages); 605 r = ttm_alloc_new_pages(&new_pages, pool->ttm_page_alloc_flags, 606 ttm_flags, cstate, alloc_size); 607 lockmgr(&pool->lock, LK_EXCLUSIVE); 608 609 if (!r) { 610 TAILQ_CONCAT(&pool->list, &new_pages, pageq); 611 ++pool->nrefills; 612 pool->npages += alloc_size; 613 } else { 614 kprintf("[TTM] Failed to fill pool (%p)\n", pool); 615 /* If we have any pages left put them to the pool. */ 616 TAILQ_FOREACH(p, &pool->list, pageq) { 617 ++cpages; 618 } 619 TAILQ_CONCAT(&pool->list, &new_pages, pageq); 620 pool->npages += cpages; 621 } 622 623 } 624 pool->fill_lock = false; 625 } 626 627 /** 628 * Cut 'count' number of pages from the pool and put them on the return list. 629 * 630 * @return count of pages still required to fulfill the request. 631 */ 632 static unsigned ttm_page_pool_get_pages(struct ttm_page_pool *pool, 633 struct pglist *pages, 634 int ttm_flags, 635 enum ttm_caching_state cstate, 636 unsigned count) 637 { 638 vm_page_t p; 639 unsigned i; 640 641 lockmgr(&pool->lock, LK_EXCLUSIVE); 642 ttm_page_pool_fill_locked(pool, ttm_flags, cstate, count); 643 644 if (count >= pool->npages) { 645 /* take all pages from the pool */ 646 TAILQ_CONCAT(pages, &pool->list, pageq); 647 count -= pool->npages; 648 pool->npages = 0; 649 goto out; 650 } 651 for (i = 0; i < count; i++) { 652 p = TAILQ_FIRST(&pool->list); 653 TAILQ_REMOVE(&pool->list, p, pageq); 654 TAILQ_INSERT_TAIL(pages, p, pageq); 655 } 656 pool->npages -= count; 657 count = 0; 658 out: 659 lockmgr(&pool->lock, LK_RELEASE); 660 return count; 661 } 662 663 /* Put all pages in pages list to correct pool to wait for reuse */ 664 static void ttm_put_pages(vm_page_t *pages, unsigned npages, int flags, 665 enum ttm_caching_state cstate) 666 { 667 struct ttm_page_pool *pool = ttm_get_pool(flags, cstate); 668 unsigned i; 669 670 if (pool == NULL) { 671 /* No pool for this memory type so free the pages */ 672 for (i = 0; i < npages; i++) { 673 if (pages[i]) { 674 ttm_vm_page_free(pages[i]); 675 pages[i] = NULL; 676 } 677 } 678 return; 679 } 680 681 lockmgr(&pool->lock, LK_EXCLUSIVE); 682 for (i = 0; i < npages; i++) { 683 if (pages[i]) { 684 TAILQ_INSERT_TAIL(&pool->list, pages[i], pageq); 685 pages[i] = NULL; 686 pool->npages++; 687 } 688 } 689 /* Check that we don't go over the pool limit */ 690 npages = 0; 691 if (pool->npages > _manager->options.max_size) { 692 npages = pool->npages - _manager->options.max_size; 693 /* free at least NUM_PAGES_TO_ALLOC number of pages 694 * to reduce calls to set_memory_wb */ 695 if (npages < NUM_PAGES_TO_ALLOC) 696 npages = NUM_PAGES_TO_ALLOC; 697 } 698 lockmgr(&pool->lock, LK_RELEASE); 699 if (npages) 700 ttm_page_pool_free(pool, npages); 701 } 702 703 /* 704 * On success pages list will hold count number of correctly 705 * cached pages. 706 */ 707 static int ttm_get_pages(vm_page_t *pages, unsigned npages, int flags, 708 enum ttm_caching_state cstate) 709 { 710 struct ttm_page_pool *pool = ttm_get_pool(flags, cstate); 711 struct pglist plist; 712 vm_page_t p = NULL; 713 int gfp_flags, aflags; 714 unsigned count; 715 int r; 716 717 aflags = VM_ALLOC_NORMAL | 718 ((flags & TTM_PAGE_FLAG_ZERO_ALLOC) != 0 ? VM_ALLOC_ZERO : 0); 719 720 /* No pool for cached pages */ 721 if (pool == NULL) { 722 for (r = 0; r < npages; ++r) { 723 p = vm_page_alloc_contig(0, 724 (flags & TTM_PAGE_FLAG_DMA32) ? 0xffffffff : 725 VM_MAX_ADDRESS, PAGE_SIZE, 726 0, 1*PAGE_SIZE, ttm_caching_state_to_vm(cstate)); 727 if (!p) { 728 kprintf("[TTM] Unable to allocate page\n"); 729 return -ENOMEM; 730 } 731 #if 0 732 p->oflags &= ~VPO_UNMANAGED; 733 #endif 734 p->flags |= PG_FICTITIOUS; 735 pages[r] = p; 736 } 737 return 0; 738 } 739 740 /* combine zero flag to pool flags */ 741 gfp_flags = flags | pool->ttm_page_alloc_flags; 742 743 /* First we take pages from the pool */ 744 TAILQ_INIT(&plist); 745 npages = ttm_page_pool_get_pages(pool, &plist, flags, cstate, npages); 746 count = 0; 747 TAILQ_FOREACH(p, &plist, pageq) { 748 pages[count++] = p; 749 } 750 751 /* clear the pages coming from the pool if requested */ 752 if (flags & TTM_PAGE_FLAG_ZERO_ALLOC) { 753 TAILQ_FOREACH(p, &plist, pageq) { 754 pmap_zero_page(VM_PAGE_TO_PHYS(p)); 755 } 756 } 757 758 /* If pool didn't have enough pages allocate new one. */ 759 if (npages > 0) { 760 /* ttm_alloc_new_pages doesn't reference pool so we can run 761 * multiple requests in parallel. 762 **/ 763 TAILQ_INIT(&plist); 764 r = ttm_alloc_new_pages(&plist, gfp_flags, flags, cstate, 765 npages); 766 TAILQ_FOREACH(p, &plist, pageq) { 767 pages[count++] = p; 768 } 769 if (r) { 770 /* If there is any pages in the list put them back to 771 * the pool. */ 772 kprintf("[TTM] Failed to allocate extra pages for large request\n"); 773 ttm_put_pages(pages, count, flags, cstate); 774 return r; 775 } 776 } 777 778 return 0; 779 } 780 781 static void ttm_page_pool_init_locked(struct ttm_page_pool *pool, int flags, 782 char *name) 783 { 784 lockinit(&pool->lock, "ttmpool", 0, LK_CANRECURSE); 785 pool->fill_lock = false; 786 TAILQ_INIT(&pool->list); 787 pool->npages = pool->nfrees = 0; 788 pool->ttm_page_alloc_flags = flags; 789 pool->name = name; 790 } 791 792 int ttm_page_alloc_init(struct ttm_mem_global *glob, unsigned max_pages) 793 { 794 795 if (_manager != NULL) 796 kprintf("[TTM] manager != NULL\n"); 797 kprintf("[TTM] Initializing pool allocator\n"); 798 799 _manager = kmalloc(sizeof(*_manager), M_TTM_POOLMGR, M_WAITOK | M_ZERO); 800 801 ttm_page_pool_init_locked(&_manager->wc_pool, 0, "wc"); 802 ttm_page_pool_init_locked(&_manager->uc_pool, 0, "uc"); 803 ttm_page_pool_init_locked(&_manager->wc_pool_dma32, 804 TTM_PAGE_FLAG_DMA32, "wc dma"); 805 ttm_page_pool_init_locked(&_manager->uc_pool_dma32, 806 TTM_PAGE_FLAG_DMA32, "uc dma"); 807 808 _manager->options.max_size = max_pages; 809 _manager->options.small = SMALL_ALLOCATION; 810 _manager->options.alloc_size = NUM_PAGES_TO_ALLOC; 811 812 refcount_init(&_manager->kobj_ref, 1); 813 ttm_pool_mm_shrink_init(_manager); 814 815 return 0; 816 } 817 818 void ttm_page_alloc_fini(void) 819 { 820 int i; 821 822 kprintf("[TTM] Finalizing pool allocator\n"); 823 ttm_pool_mm_shrink_fini(_manager); 824 825 for (i = 0; i < NUM_POOLS; ++i) 826 ttm_page_pool_free(&_manager->pools[i], FREE_ALL_PAGES); 827 828 if (refcount_release(&_manager->kobj_ref)) 829 ttm_pool_kobj_release(_manager); 830 _manager = NULL; 831 } 832 833 int ttm_pool_populate(struct ttm_tt *ttm) 834 { 835 struct ttm_mem_global *mem_glob = ttm->glob->mem_glob; 836 unsigned i; 837 int ret; 838 839 if (ttm->state != tt_unpopulated) 840 return 0; 841 842 for (i = 0; i < ttm->num_pages; ++i) { 843 ret = ttm_get_pages(&ttm->pages[i], 1, 844 ttm->page_flags, 845 ttm->caching_state); 846 if (ret != 0) { 847 ttm_pool_unpopulate(ttm); 848 return -ENOMEM; 849 } 850 851 ret = ttm_mem_global_alloc_page(mem_glob, ttm->pages[i], 852 false, false); 853 if (unlikely(ret != 0)) { 854 ttm_pool_unpopulate(ttm); 855 return -ENOMEM; 856 } 857 } 858 859 if (unlikely(ttm->page_flags & TTM_PAGE_FLAG_SWAPPED)) { 860 ret = ttm_tt_swapin(ttm); 861 if (unlikely(ret != 0)) { 862 ttm_pool_unpopulate(ttm); 863 return ret; 864 } 865 } 866 867 ttm->state = tt_unbound; 868 return 0; 869 } 870 871 void ttm_pool_unpopulate(struct ttm_tt *ttm) 872 { 873 unsigned i; 874 875 for (i = 0; i < ttm->num_pages; ++i) { 876 if (ttm->pages[i]) { 877 ttm_mem_global_free_page(ttm->glob->mem_glob, 878 ttm->pages[i]); 879 ttm_put_pages(&ttm->pages[i], 1, 880 ttm->page_flags, 881 ttm->caching_state); 882 } 883 } 884 ttm->state = tt_unpopulated; 885 } 886 887 #if 0 888 /* XXXKIB sysctl */ 889 int ttm_page_alloc_debugfs(struct seq_file *m, void *data) 890 { 891 struct ttm_page_pool *p; 892 unsigned i; 893 char *h[] = {"pool", "refills", "pages freed", "size"}; 894 if (!_manager) { 895 seq_printf(m, "No pool allocator running.\n"); 896 return 0; 897 } 898 seq_printf(m, "%6s %12s %13s %8s\n", 899 h[0], h[1], h[2], h[3]); 900 for (i = 0; i < NUM_POOLS; ++i) { 901 p = &_manager->pools[i]; 902 903 seq_printf(m, "%6s %12ld %13ld %8d\n", 904 p->name, p->nrefills, 905 p->nfrees, p->npages); 906 } 907 return 0; 908 } 909 #endif 910