1 /* $NetBSD: subr_pool.c,v 1.188 2011/01/17 07:36:58 uebayasi Exp $ */ 2 3 /*- 4 * Copyright (c) 1997, 1999, 2000, 2002, 2007, 2008, 2010 5 * The NetBSD Foundation, Inc. 6 * All rights reserved. 7 * 8 * This code is derived from software contributed to The NetBSD Foundation 9 * by Paul Kranenburg; by Jason R. Thorpe of the Numerical Aerospace 10 * Simulation Facility, NASA Ames Research Center, and by Andrew Doran. 11 * 12 * Redistribution and use in source and binary forms, with or without 13 * modification, are permitted provided that the following conditions 14 * are met: 15 * 1. Redistributions of source code must retain the above copyright 16 * notice, this list of conditions and the following disclaimer. 17 * 2. Redistributions in binary form must reproduce the above copyright 18 * notice, this list of conditions and the following disclaimer in the 19 * documentation and/or other materials provided with the distribution. 20 * 21 * THIS SOFTWARE IS PROVIDED BY THE NETBSD FOUNDATION, INC. AND CONTRIBUTORS 22 * ``AS IS'' AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED 23 * TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR 24 * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE FOUNDATION OR CONTRIBUTORS 25 * BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR 26 * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF 27 * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS 28 * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN 29 * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) 30 * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE 31 * POSSIBILITY OF SUCH DAMAGE. 32 */ 33 34 #include <sys/cdefs.h> 35 __KERNEL_RCSID(0, "$NetBSD: subr_pool.c,v 1.188 2011/01/17 07:36:58 uebayasi Exp $"); 36 37 #include "opt_ddb.h" 38 #include "opt_pool.h" 39 #include "opt_poollog.h" 40 #include "opt_lockdebug.h" 41 42 #include <sys/param.h> 43 #include <sys/systm.h> 44 #include <sys/bitops.h> 45 #include <sys/proc.h> 46 #include <sys/errno.h> 47 #include <sys/kernel.h> 48 #include <sys/malloc.h> 49 #include <sys/pool.h> 50 #include <sys/syslog.h> 51 #include <sys/debug.h> 52 #include <sys/lockdebug.h> 53 #include <sys/xcall.h> 54 #include <sys/cpu.h> 55 #include <sys/atomic.h> 56 57 #include <uvm/uvm_extern.h> 58 #ifdef DIAGNOSTIC 59 #include <uvm/uvm_km.h> /* uvm_km_va_drain */ 60 #endif 61 62 /* 63 * Pool resource management utility. 64 * 65 * Memory is allocated in pages which are split into pieces according to 66 * the pool item size. Each page is kept on one of three lists in the 67 * pool structure: `pr_emptypages', `pr_fullpages' and `pr_partpages', 68 * for empty, full and partially-full pages respectively. The individual 69 * pool items are on a linked list headed by `ph_itemlist' in each page 70 * header. The memory for building the page list is either taken from 71 * the allocated pages themselves (for small pool items) or taken from 72 * an internal pool of page headers (`phpool'). 73 */ 74 75 /* List of all pools */ 76 static TAILQ_HEAD(, pool) pool_head = TAILQ_HEAD_INITIALIZER(pool_head); 77 78 /* Private pool for page header structures */ 79 #define PHPOOL_MAX 8 80 static struct pool phpool[PHPOOL_MAX]; 81 #define PHPOOL_FREELIST_NELEM(idx) \ 82 (((idx) == 0) ? 0 : BITMAP_SIZE * (1 << (idx))) 83 84 #ifdef POOL_SUBPAGE 85 /* Pool of subpages for use by normal pools. */ 86 static struct pool psppool; 87 #endif 88 89 static SLIST_HEAD(, pool_allocator) pa_deferinitq = 90 SLIST_HEAD_INITIALIZER(pa_deferinitq); 91 92 static void *pool_page_alloc_meta(struct pool *, int); 93 static void pool_page_free_meta(struct pool *, void *); 94 95 /* allocator for pool metadata */ 96 struct pool_allocator pool_allocator_meta = { 97 pool_page_alloc_meta, pool_page_free_meta, 98 .pa_backingmapptr = &kmem_map, 99 }; 100 101 /* # of seconds to retain page after last use */ 102 int pool_inactive_time = 10; 103 104 /* Next candidate for drainage (see pool_drain()) */ 105 static struct pool *drainpp; 106 107 /* This lock protects both pool_head and drainpp. */ 108 static kmutex_t pool_head_lock; 109 static kcondvar_t pool_busy; 110 111 /* This lock protects initialization of a potentially shared pool allocator */ 112 static kmutex_t pool_allocator_lock; 113 114 typedef uint32_t pool_item_bitmap_t; 115 #define BITMAP_SIZE (CHAR_BIT * sizeof(pool_item_bitmap_t)) 116 #define BITMAP_MASK (BITMAP_SIZE - 1) 117 118 struct pool_item_header { 119 /* Page headers */ 120 LIST_ENTRY(pool_item_header) 121 ph_pagelist; /* pool page list */ 122 SPLAY_ENTRY(pool_item_header) 123 ph_node; /* Off-page page headers */ 124 void * ph_page; /* this page's address */ 125 uint32_t ph_time; /* last referenced */ 126 uint16_t ph_nmissing; /* # of chunks in use */ 127 uint16_t ph_off; /* start offset in page */ 128 union { 129 /* !PR_NOTOUCH */ 130 struct { 131 LIST_HEAD(, pool_item) 132 phu_itemlist; /* chunk list for this page */ 133 } phu_normal; 134 /* PR_NOTOUCH */ 135 struct { 136 pool_item_bitmap_t phu_bitmap[1]; 137 } phu_notouch; 138 } ph_u; 139 }; 140 #define ph_itemlist ph_u.phu_normal.phu_itemlist 141 #define ph_bitmap ph_u.phu_notouch.phu_bitmap 142 143 struct pool_item { 144 #ifdef DIAGNOSTIC 145 u_int pi_magic; 146 #endif 147 #define PI_MAGIC 0xdeaddeadU 148 /* Other entries use only this list entry */ 149 LIST_ENTRY(pool_item) pi_list; 150 }; 151 152 #define POOL_NEEDS_CATCHUP(pp) \ 153 ((pp)->pr_nitems < (pp)->pr_minitems) 154 155 /* 156 * Pool cache management. 157 * 158 * Pool caches provide a way for constructed objects to be cached by the 159 * pool subsystem. This can lead to performance improvements by avoiding 160 * needless object construction/destruction; it is deferred until absolutely 161 * necessary. 162 * 163 * Caches are grouped into cache groups. Each cache group references up 164 * to PCG_NUMOBJECTS constructed objects. When a cache allocates an 165 * object from the pool, it calls the object's constructor and places it 166 * into a cache group. When a cache group frees an object back to the 167 * pool, it first calls the object's destructor. This allows the object 168 * to persist in constructed form while freed to the cache. 169 * 170 * The pool references each cache, so that when a pool is drained by the 171 * pagedaemon, it can drain each individual cache as well. Each time a 172 * cache is drained, the most idle cache group is freed to the pool in 173 * its entirety. 174 * 175 * Pool caches are layed on top of pools. By layering them, we can avoid 176 * the complexity of cache management for pools which would not benefit 177 * from it. 178 */ 179 180 static struct pool pcg_normal_pool; 181 static struct pool pcg_large_pool; 182 static struct pool cache_pool; 183 static struct pool cache_cpu_pool; 184 185 /* List of all caches. */ 186 TAILQ_HEAD(,pool_cache) pool_cache_head = 187 TAILQ_HEAD_INITIALIZER(pool_cache_head); 188 189 int pool_cache_disable; /* global disable for caching */ 190 static const pcg_t pcg_dummy; /* zero sized: always empty, yet always full */ 191 192 static bool pool_cache_put_slow(pool_cache_cpu_t *, int, 193 void *); 194 static bool pool_cache_get_slow(pool_cache_cpu_t *, int, 195 void **, paddr_t *, int); 196 static void pool_cache_cpu_init1(struct cpu_info *, pool_cache_t); 197 static void pool_cache_invalidate_groups(pool_cache_t, pcg_t *); 198 static void pool_cache_invalidate_cpu(pool_cache_t, u_int); 199 static void pool_cache_xcall(pool_cache_t); 200 201 static int pool_catchup(struct pool *); 202 static void pool_prime_page(struct pool *, void *, 203 struct pool_item_header *); 204 static void pool_update_curpage(struct pool *); 205 206 static int pool_grow(struct pool *, int); 207 static void *pool_allocator_alloc(struct pool *, int); 208 static void pool_allocator_free(struct pool *, void *); 209 210 static void pool_print_pagelist(struct pool *, struct pool_pagelist *, 211 void (*)(const char *, ...)); 212 static void pool_print1(struct pool *, const char *, 213 void (*)(const char *, ...)); 214 215 static int pool_chk_page(struct pool *, const char *, 216 struct pool_item_header *); 217 218 /* 219 * Pool log entry. An array of these is allocated in pool_init(). 220 */ 221 struct pool_log { 222 const char *pl_file; 223 long pl_line; 224 int pl_action; 225 #define PRLOG_GET 1 226 #define PRLOG_PUT 2 227 void *pl_addr; 228 }; 229 230 #ifdef POOL_DIAGNOSTIC 231 /* Number of entries in pool log buffers */ 232 #ifndef POOL_LOGSIZE 233 #define POOL_LOGSIZE 10 234 #endif 235 236 int pool_logsize = POOL_LOGSIZE; 237 238 static inline void 239 pr_log(struct pool *pp, void *v, int action, const char *file, long line) 240 { 241 int n; 242 struct pool_log *pl; 243 244 if ((pp->pr_roflags & PR_LOGGING) == 0) 245 return; 246 247 if (pp->pr_log == NULL) { 248 if (kmem_map != NULL) 249 pp->pr_log = malloc( 250 pool_logsize * sizeof(struct pool_log), 251 M_TEMP, M_NOWAIT | M_ZERO); 252 if (pp->pr_log == NULL) 253 return; 254 pp->pr_curlogentry = 0; 255 pp->pr_logsize = pool_logsize; 256 } 257 258 /* 259 * Fill in the current entry. Wrap around and overwrite 260 * the oldest entry if necessary. 261 */ 262 n = pp->pr_curlogentry; 263 pl = &pp->pr_log[n]; 264 pl->pl_file = file; 265 pl->pl_line = line; 266 pl->pl_action = action; 267 pl->pl_addr = v; 268 if (++n >= pp->pr_logsize) 269 n = 0; 270 pp->pr_curlogentry = n; 271 } 272 273 static void 274 pr_printlog(struct pool *pp, struct pool_item *pi, 275 void (*pr)(const char *, ...)) 276 { 277 int i = pp->pr_logsize; 278 int n = pp->pr_curlogentry; 279 280 if (pp->pr_log == NULL) 281 return; 282 283 /* 284 * Print all entries in this pool's log. 285 */ 286 while (i-- > 0) { 287 struct pool_log *pl = &pp->pr_log[n]; 288 if (pl->pl_action != 0) { 289 if (pi == NULL || pi == pl->pl_addr) { 290 (*pr)("\tlog entry %d:\n", i); 291 (*pr)("\t\taction = %s, addr = %p\n", 292 pl->pl_action == PRLOG_GET ? "get" : "put", 293 pl->pl_addr); 294 (*pr)("\t\tfile: %s at line %lu\n", 295 pl->pl_file, pl->pl_line); 296 } 297 } 298 if (++n >= pp->pr_logsize) 299 n = 0; 300 } 301 } 302 303 static inline void 304 pr_enter(struct pool *pp, const char *file, long line) 305 { 306 307 if (__predict_false(pp->pr_entered_file != NULL)) { 308 printf("pool %s: reentrancy at file %s line %ld\n", 309 pp->pr_wchan, file, line); 310 printf(" previous entry at file %s line %ld\n", 311 pp->pr_entered_file, pp->pr_entered_line); 312 panic("pr_enter"); 313 } 314 315 pp->pr_entered_file = file; 316 pp->pr_entered_line = line; 317 } 318 319 static inline void 320 pr_leave(struct pool *pp) 321 { 322 323 if (__predict_false(pp->pr_entered_file == NULL)) { 324 printf("pool %s not entered?\n", pp->pr_wchan); 325 panic("pr_leave"); 326 } 327 328 pp->pr_entered_file = NULL; 329 pp->pr_entered_line = 0; 330 } 331 332 static inline void 333 pr_enter_check(struct pool *pp, void (*pr)(const char *, ...)) 334 { 335 336 if (pp->pr_entered_file != NULL) 337 (*pr)("\n\tcurrently entered from file %s line %ld\n", 338 pp->pr_entered_file, pp->pr_entered_line); 339 } 340 #else 341 #define pr_log(pp, v, action, file, line) 342 #define pr_printlog(pp, pi, pr) 343 #define pr_enter(pp, file, line) 344 #define pr_leave(pp) 345 #define pr_enter_check(pp, pr) 346 #endif /* POOL_DIAGNOSTIC */ 347 348 static inline unsigned int 349 pr_item_notouch_index(const struct pool *pp, const struct pool_item_header *ph, 350 const void *v) 351 { 352 const char *cp = v; 353 unsigned int idx; 354 355 KASSERT(pp->pr_roflags & PR_NOTOUCH); 356 idx = (cp - (char *)ph->ph_page - ph->ph_off) / pp->pr_size; 357 KASSERT(idx < pp->pr_itemsperpage); 358 return idx; 359 } 360 361 static inline void 362 pr_item_notouch_put(const struct pool *pp, struct pool_item_header *ph, 363 void *obj) 364 { 365 unsigned int idx = pr_item_notouch_index(pp, ph, obj); 366 pool_item_bitmap_t *bitmap = ph->ph_bitmap + (idx / BITMAP_SIZE); 367 pool_item_bitmap_t mask = 1 << (idx & BITMAP_MASK); 368 369 KASSERT((*bitmap & mask) == 0); 370 *bitmap |= mask; 371 } 372 373 static inline void * 374 pr_item_notouch_get(const struct pool *pp, struct pool_item_header *ph) 375 { 376 pool_item_bitmap_t *bitmap = ph->ph_bitmap; 377 unsigned int idx; 378 int i; 379 380 for (i = 0; ; i++) { 381 int bit; 382 383 KASSERT((i * BITMAP_SIZE) < pp->pr_itemsperpage); 384 bit = ffs32(bitmap[i]); 385 if (bit) { 386 pool_item_bitmap_t mask; 387 388 bit--; 389 idx = (i * BITMAP_SIZE) + bit; 390 mask = 1 << bit; 391 KASSERT((bitmap[i] & mask) != 0); 392 bitmap[i] &= ~mask; 393 break; 394 } 395 } 396 KASSERT(idx < pp->pr_itemsperpage); 397 return (char *)ph->ph_page + ph->ph_off + idx * pp->pr_size; 398 } 399 400 static inline void 401 pr_item_notouch_init(const struct pool *pp, struct pool_item_header *ph) 402 { 403 pool_item_bitmap_t *bitmap = ph->ph_bitmap; 404 const int n = howmany(pp->pr_itemsperpage, BITMAP_SIZE); 405 int i; 406 407 for (i = 0; i < n; i++) { 408 bitmap[i] = (pool_item_bitmap_t)-1; 409 } 410 } 411 412 static inline int 413 phtree_compare(struct pool_item_header *a, struct pool_item_header *b) 414 { 415 416 /* 417 * we consider pool_item_header with smaller ph_page bigger. 418 * (this unnatural ordering is for the benefit of pr_find_pagehead.) 419 */ 420 421 if (a->ph_page < b->ph_page) 422 return (1); 423 else if (a->ph_page > b->ph_page) 424 return (-1); 425 else 426 return (0); 427 } 428 429 SPLAY_PROTOTYPE(phtree, pool_item_header, ph_node, phtree_compare); 430 SPLAY_GENERATE(phtree, pool_item_header, ph_node, phtree_compare); 431 432 static inline struct pool_item_header * 433 pr_find_pagehead_noalign(struct pool *pp, void *v) 434 { 435 struct pool_item_header *ph, tmp; 436 437 tmp.ph_page = (void *)(uintptr_t)v; 438 ph = SPLAY_FIND(phtree, &pp->pr_phtree, &tmp); 439 if (ph == NULL) { 440 ph = SPLAY_ROOT(&pp->pr_phtree); 441 if (ph != NULL && phtree_compare(&tmp, ph) >= 0) { 442 ph = SPLAY_NEXT(phtree, &pp->pr_phtree, ph); 443 } 444 KASSERT(ph == NULL || phtree_compare(&tmp, ph) < 0); 445 } 446 447 return ph; 448 } 449 450 /* 451 * Return the pool page header based on item address. 452 */ 453 static inline struct pool_item_header * 454 pr_find_pagehead(struct pool *pp, void *v) 455 { 456 struct pool_item_header *ph, tmp; 457 458 if ((pp->pr_roflags & PR_NOALIGN) != 0) { 459 ph = pr_find_pagehead_noalign(pp, v); 460 } else { 461 void *page = 462 (void *)((uintptr_t)v & pp->pr_alloc->pa_pagemask); 463 464 if ((pp->pr_roflags & PR_PHINPAGE) != 0) { 465 ph = (struct pool_item_header *)((char *)page + pp->pr_phoffset); 466 } else { 467 tmp.ph_page = page; 468 ph = SPLAY_FIND(phtree, &pp->pr_phtree, &tmp); 469 } 470 } 471 472 KASSERT(ph == NULL || ((pp->pr_roflags & PR_PHINPAGE) != 0) || 473 ((char *)ph->ph_page <= (char *)v && 474 (char *)v < (char *)ph->ph_page + pp->pr_alloc->pa_pagesz)); 475 return ph; 476 } 477 478 static void 479 pr_pagelist_free(struct pool *pp, struct pool_pagelist *pq) 480 { 481 struct pool_item_header *ph; 482 483 while ((ph = LIST_FIRST(pq)) != NULL) { 484 LIST_REMOVE(ph, ph_pagelist); 485 pool_allocator_free(pp, ph->ph_page); 486 if ((pp->pr_roflags & PR_PHINPAGE) == 0) 487 pool_put(pp->pr_phpool, ph); 488 } 489 } 490 491 /* 492 * Remove a page from the pool. 493 */ 494 static inline void 495 pr_rmpage(struct pool *pp, struct pool_item_header *ph, 496 struct pool_pagelist *pq) 497 { 498 499 KASSERT(mutex_owned(&pp->pr_lock)); 500 501 /* 502 * If the page was idle, decrement the idle page count. 503 */ 504 if (ph->ph_nmissing == 0) { 505 #ifdef DIAGNOSTIC 506 if (pp->pr_nidle == 0) 507 panic("pr_rmpage: nidle inconsistent"); 508 if (pp->pr_nitems < pp->pr_itemsperpage) 509 panic("pr_rmpage: nitems inconsistent"); 510 #endif 511 pp->pr_nidle--; 512 } 513 514 pp->pr_nitems -= pp->pr_itemsperpage; 515 516 /* 517 * Unlink the page from the pool and queue it for release. 518 */ 519 LIST_REMOVE(ph, ph_pagelist); 520 if ((pp->pr_roflags & PR_PHINPAGE) == 0) 521 SPLAY_REMOVE(phtree, &pp->pr_phtree, ph); 522 LIST_INSERT_HEAD(pq, ph, ph_pagelist); 523 524 pp->pr_npages--; 525 pp->pr_npagefree++; 526 527 pool_update_curpage(pp); 528 } 529 530 static bool 531 pa_starved_p(struct pool_allocator *pa) 532 { 533 534 if (pa->pa_backingmap != NULL) { 535 return vm_map_starved_p(pa->pa_backingmap); 536 } 537 return false; 538 } 539 540 static int 541 pool_reclaim_callback(struct callback_entry *ce, void *obj, void *arg) 542 { 543 struct pool *pp = obj; 544 struct pool_allocator *pa = pp->pr_alloc; 545 546 KASSERT(&pp->pr_reclaimerentry == ce); 547 pool_reclaim(pp); 548 if (!pa_starved_p(pa)) { 549 return CALLBACK_CHAIN_ABORT; 550 } 551 return CALLBACK_CHAIN_CONTINUE; 552 } 553 554 static void 555 pool_reclaim_register(struct pool *pp) 556 { 557 struct vm_map *map = pp->pr_alloc->pa_backingmap; 558 int s; 559 560 if (map == NULL) { 561 return; 562 } 563 564 s = splvm(); /* not necessary for INTRSAFE maps, but don't care. */ 565 callback_register(&vm_map_to_kernel(map)->vmk_reclaim_callback, 566 &pp->pr_reclaimerentry, pp, pool_reclaim_callback); 567 splx(s); 568 569 #ifdef DIAGNOSTIC 570 /* Diagnostic drain attempt. */ 571 uvm_km_va_drain(map, 0); 572 #endif 573 } 574 575 static void 576 pool_reclaim_unregister(struct pool *pp) 577 { 578 struct vm_map *map = pp->pr_alloc->pa_backingmap; 579 int s; 580 581 if (map == NULL) { 582 return; 583 } 584 585 s = splvm(); /* not necessary for INTRSAFE maps, but don't care. */ 586 callback_unregister(&vm_map_to_kernel(map)->vmk_reclaim_callback, 587 &pp->pr_reclaimerentry); 588 splx(s); 589 } 590 591 static void 592 pa_reclaim_register(struct pool_allocator *pa) 593 { 594 struct vm_map *map = *pa->pa_backingmapptr; 595 struct pool *pp; 596 597 KASSERT(pa->pa_backingmap == NULL); 598 if (map == NULL) { 599 SLIST_INSERT_HEAD(&pa_deferinitq, pa, pa_q); 600 return; 601 } 602 pa->pa_backingmap = map; 603 TAILQ_FOREACH(pp, &pa->pa_list, pr_alloc_list) { 604 pool_reclaim_register(pp); 605 } 606 } 607 608 /* 609 * Initialize all the pools listed in the "pools" link set. 610 */ 611 void 612 pool_subsystem_init(void) 613 { 614 struct pool_allocator *pa; 615 616 mutex_init(&pool_head_lock, MUTEX_DEFAULT, IPL_NONE); 617 mutex_init(&pool_allocator_lock, MUTEX_DEFAULT, IPL_NONE); 618 cv_init(&pool_busy, "poolbusy"); 619 620 while ((pa = SLIST_FIRST(&pa_deferinitq)) != NULL) { 621 KASSERT(pa->pa_backingmapptr != NULL); 622 KASSERT(*pa->pa_backingmapptr != NULL); 623 SLIST_REMOVE_HEAD(&pa_deferinitq, pa_q); 624 pa_reclaim_register(pa); 625 } 626 627 pool_init(&cache_pool, sizeof(struct pool_cache), coherency_unit, 628 0, 0, "pcache", &pool_allocator_nointr, IPL_NONE); 629 630 pool_init(&cache_cpu_pool, sizeof(pool_cache_cpu_t), coherency_unit, 631 0, 0, "pcachecpu", &pool_allocator_nointr, IPL_NONE); 632 } 633 634 /* 635 * Initialize the given pool resource structure. 636 * 637 * We export this routine to allow other kernel parts to declare 638 * static pools that must be initialized before malloc() is available. 639 */ 640 void 641 pool_init(struct pool *pp, size_t size, u_int align, u_int ioff, int flags, 642 const char *wchan, struct pool_allocator *palloc, int ipl) 643 { 644 struct pool *pp1; 645 size_t trysize, phsize; 646 int off, slack; 647 648 #ifdef DEBUG 649 /* 650 * Check that the pool hasn't already been initialised and 651 * added to the list of all pools. 652 */ 653 TAILQ_FOREACH(pp1, &pool_head, pr_poollist) { 654 if (pp == pp1) 655 panic("pool_init: pool %s already initialised", 656 wchan); 657 } 658 #endif 659 660 #ifdef POOL_DIAGNOSTIC 661 /* 662 * Always log if POOL_DIAGNOSTIC is defined. 663 */ 664 if (pool_logsize != 0) 665 flags |= PR_LOGGING; 666 #endif 667 668 if (palloc == NULL) 669 palloc = &pool_allocator_kmem; 670 #ifdef POOL_SUBPAGE 671 if (size > palloc->pa_pagesz) { 672 if (palloc == &pool_allocator_kmem) 673 palloc = &pool_allocator_kmem_fullpage; 674 else if (palloc == &pool_allocator_nointr) 675 palloc = &pool_allocator_nointr_fullpage; 676 } 677 #endif /* POOL_SUBPAGE */ 678 if (!cold) 679 mutex_enter(&pool_allocator_lock); 680 if (palloc->pa_refcnt++ == 0) { 681 if (palloc->pa_pagesz == 0) 682 palloc->pa_pagesz = PAGE_SIZE; 683 684 TAILQ_INIT(&palloc->pa_list); 685 686 mutex_init(&palloc->pa_lock, MUTEX_DEFAULT, IPL_VM); 687 palloc->pa_pagemask = ~(palloc->pa_pagesz - 1); 688 palloc->pa_pageshift = ffs(palloc->pa_pagesz) - 1; 689 690 if (palloc->pa_backingmapptr != NULL) { 691 pa_reclaim_register(palloc); 692 } 693 } 694 if (!cold) 695 mutex_exit(&pool_allocator_lock); 696 697 if (align == 0) 698 align = ALIGN(1); 699 700 if ((flags & PR_NOTOUCH) == 0 && size < sizeof(struct pool_item)) 701 size = sizeof(struct pool_item); 702 703 size = roundup(size, align); 704 #ifdef DIAGNOSTIC 705 if (size > palloc->pa_pagesz) 706 panic("pool_init: pool item size (%zu) too large", size); 707 #endif 708 709 /* 710 * Initialize the pool structure. 711 */ 712 LIST_INIT(&pp->pr_emptypages); 713 LIST_INIT(&pp->pr_fullpages); 714 LIST_INIT(&pp->pr_partpages); 715 pp->pr_cache = NULL; 716 pp->pr_curpage = NULL; 717 pp->pr_npages = 0; 718 pp->pr_minitems = 0; 719 pp->pr_minpages = 0; 720 pp->pr_maxpages = UINT_MAX; 721 pp->pr_roflags = flags; 722 pp->pr_flags = 0; 723 pp->pr_size = size; 724 pp->pr_align = align; 725 pp->pr_wchan = wchan; 726 pp->pr_alloc = palloc; 727 pp->pr_nitems = 0; 728 pp->pr_nout = 0; 729 pp->pr_hardlimit = UINT_MAX; 730 pp->pr_hardlimit_warning = NULL; 731 pp->pr_hardlimit_ratecap.tv_sec = 0; 732 pp->pr_hardlimit_ratecap.tv_usec = 0; 733 pp->pr_hardlimit_warning_last.tv_sec = 0; 734 pp->pr_hardlimit_warning_last.tv_usec = 0; 735 pp->pr_drain_hook = NULL; 736 pp->pr_drain_hook_arg = NULL; 737 pp->pr_freecheck = NULL; 738 739 /* 740 * Decide whether to put the page header off page to avoid 741 * wasting too large a part of the page or too big item. 742 * Off-page page headers go on a hash table, so we can match 743 * a returned item with its header based on the page address. 744 * We use 1/16 of the page size and about 8 times of the item 745 * size as the threshold (XXX: tune) 746 * 747 * However, we'll put the header into the page if we can put 748 * it without wasting any items. 749 * 750 * Silently enforce `0 <= ioff < align'. 751 */ 752 pp->pr_itemoffset = ioff %= align; 753 /* See the comment below about reserved bytes. */ 754 trysize = palloc->pa_pagesz - ((align - ioff) % align); 755 phsize = ALIGN(sizeof(struct pool_item_header)); 756 if ((pp->pr_roflags & (PR_NOTOUCH | PR_NOALIGN)) == 0 && 757 (pp->pr_size < MIN(palloc->pa_pagesz / 16, phsize << 3) || 758 trysize / pp->pr_size == (trysize - phsize) / pp->pr_size)) { 759 /* Use the end of the page for the page header */ 760 pp->pr_roflags |= PR_PHINPAGE; 761 pp->pr_phoffset = off = palloc->pa_pagesz - phsize; 762 } else { 763 /* The page header will be taken from our page header pool */ 764 pp->pr_phoffset = 0; 765 off = palloc->pa_pagesz; 766 SPLAY_INIT(&pp->pr_phtree); 767 } 768 769 /* 770 * Alignment is to take place at `ioff' within the item. This means 771 * we must reserve up to `align - 1' bytes on the page to allow 772 * appropriate positioning of each item. 773 */ 774 pp->pr_itemsperpage = (off - ((align - ioff) % align)) / pp->pr_size; 775 KASSERT(pp->pr_itemsperpage != 0); 776 if ((pp->pr_roflags & PR_NOTOUCH)) { 777 int idx; 778 779 for (idx = 0; pp->pr_itemsperpage > PHPOOL_FREELIST_NELEM(idx); 780 idx++) { 781 /* nothing */ 782 } 783 if (idx >= PHPOOL_MAX) { 784 /* 785 * if you see this panic, consider to tweak 786 * PHPOOL_MAX and PHPOOL_FREELIST_NELEM. 787 */ 788 panic("%s: too large itemsperpage(%d) for PR_NOTOUCH", 789 pp->pr_wchan, pp->pr_itemsperpage); 790 } 791 pp->pr_phpool = &phpool[idx]; 792 } else if ((pp->pr_roflags & PR_PHINPAGE) == 0) { 793 pp->pr_phpool = &phpool[0]; 794 } 795 #if defined(DIAGNOSTIC) 796 else { 797 pp->pr_phpool = NULL; 798 } 799 #endif 800 801 /* 802 * Use the slack between the chunks and the page header 803 * for "cache coloring". 804 */ 805 slack = off - pp->pr_itemsperpage * pp->pr_size; 806 pp->pr_maxcolor = (slack / align) * align; 807 pp->pr_curcolor = 0; 808 809 pp->pr_nget = 0; 810 pp->pr_nfail = 0; 811 pp->pr_nput = 0; 812 pp->pr_npagealloc = 0; 813 pp->pr_npagefree = 0; 814 pp->pr_hiwat = 0; 815 pp->pr_nidle = 0; 816 pp->pr_refcnt = 0; 817 818 pp->pr_log = NULL; 819 820 pp->pr_entered_file = NULL; 821 pp->pr_entered_line = 0; 822 823 mutex_init(&pp->pr_lock, MUTEX_DEFAULT, ipl); 824 cv_init(&pp->pr_cv, wchan); 825 pp->pr_ipl = ipl; 826 827 /* 828 * Initialize private page header pool and cache magazine pool if we 829 * haven't done so yet. 830 * XXX LOCKING. 831 */ 832 if (phpool[0].pr_size == 0) { 833 int idx; 834 for (idx = 0; idx < PHPOOL_MAX; idx++) { 835 static char phpool_names[PHPOOL_MAX][6+1+6+1]; 836 int nelem; 837 size_t sz; 838 839 nelem = PHPOOL_FREELIST_NELEM(idx); 840 snprintf(phpool_names[idx], sizeof(phpool_names[idx]), 841 "phpool-%d", nelem); 842 sz = sizeof(struct pool_item_header); 843 if (nelem) { 844 sz = offsetof(struct pool_item_header, 845 ph_bitmap[howmany(nelem, BITMAP_SIZE)]); 846 } 847 pool_init(&phpool[idx], sz, 0, 0, 0, 848 phpool_names[idx], &pool_allocator_meta, IPL_VM); 849 } 850 #ifdef POOL_SUBPAGE 851 pool_init(&psppool, POOL_SUBPAGE, POOL_SUBPAGE, 0, 852 PR_RECURSIVE, "psppool", &pool_allocator_meta, IPL_VM); 853 #endif 854 855 size = sizeof(pcg_t) + 856 (PCG_NOBJECTS_NORMAL - 1) * sizeof(pcgpair_t); 857 pool_init(&pcg_normal_pool, size, coherency_unit, 0, 0, 858 "pcgnormal", &pool_allocator_meta, IPL_VM); 859 860 size = sizeof(pcg_t) + 861 (PCG_NOBJECTS_LARGE - 1) * sizeof(pcgpair_t); 862 pool_init(&pcg_large_pool, size, coherency_unit, 0, 0, 863 "pcglarge", &pool_allocator_meta, IPL_VM); 864 } 865 866 /* Insert into the list of all pools. */ 867 if (!cold) 868 mutex_enter(&pool_head_lock); 869 TAILQ_FOREACH(pp1, &pool_head, pr_poollist) { 870 if (strcmp(pp1->pr_wchan, pp->pr_wchan) > 0) 871 break; 872 } 873 if (pp1 == NULL) 874 TAILQ_INSERT_TAIL(&pool_head, pp, pr_poollist); 875 else 876 TAILQ_INSERT_BEFORE(pp1, pp, pr_poollist); 877 if (!cold) 878 mutex_exit(&pool_head_lock); 879 880 /* Insert this into the list of pools using this allocator. */ 881 if (!cold) 882 mutex_enter(&palloc->pa_lock); 883 TAILQ_INSERT_TAIL(&palloc->pa_list, pp, pr_alloc_list); 884 if (!cold) 885 mutex_exit(&palloc->pa_lock); 886 887 pool_reclaim_register(pp); 888 } 889 890 /* 891 * De-commision a pool resource. 892 */ 893 void 894 pool_destroy(struct pool *pp) 895 { 896 struct pool_pagelist pq; 897 struct pool_item_header *ph; 898 899 /* Remove from global pool list */ 900 mutex_enter(&pool_head_lock); 901 while (pp->pr_refcnt != 0) 902 cv_wait(&pool_busy, &pool_head_lock); 903 TAILQ_REMOVE(&pool_head, pp, pr_poollist); 904 if (drainpp == pp) 905 drainpp = NULL; 906 mutex_exit(&pool_head_lock); 907 908 /* Remove this pool from its allocator's list of pools. */ 909 pool_reclaim_unregister(pp); 910 mutex_enter(&pp->pr_alloc->pa_lock); 911 TAILQ_REMOVE(&pp->pr_alloc->pa_list, pp, pr_alloc_list); 912 mutex_exit(&pp->pr_alloc->pa_lock); 913 914 mutex_enter(&pool_allocator_lock); 915 if (--pp->pr_alloc->pa_refcnt == 0) 916 mutex_destroy(&pp->pr_alloc->pa_lock); 917 mutex_exit(&pool_allocator_lock); 918 919 mutex_enter(&pp->pr_lock); 920 921 KASSERT(pp->pr_cache == NULL); 922 923 #ifdef DIAGNOSTIC 924 if (pp->pr_nout != 0) { 925 pr_printlog(pp, NULL, printf); 926 panic("pool_destroy: pool busy: still out: %u", 927 pp->pr_nout); 928 } 929 #endif 930 931 KASSERT(LIST_EMPTY(&pp->pr_fullpages)); 932 KASSERT(LIST_EMPTY(&pp->pr_partpages)); 933 934 /* Remove all pages */ 935 LIST_INIT(&pq); 936 while ((ph = LIST_FIRST(&pp->pr_emptypages)) != NULL) 937 pr_rmpage(pp, ph, &pq); 938 939 mutex_exit(&pp->pr_lock); 940 941 pr_pagelist_free(pp, &pq); 942 943 #ifdef POOL_DIAGNOSTIC 944 if (pp->pr_log != NULL) { 945 free(pp->pr_log, M_TEMP); 946 pp->pr_log = NULL; 947 } 948 #endif 949 950 cv_destroy(&pp->pr_cv); 951 mutex_destroy(&pp->pr_lock); 952 } 953 954 void 955 pool_set_drain_hook(struct pool *pp, void (*fn)(void *, int), void *arg) 956 { 957 958 /* XXX no locking -- must be used just after pool_init() */ 959 #ifdef DIAGNOSTIC 960 if (pp->pr_drain_hook != NULL) 961 panic("pool_set_drain_hook(%s): already set", pp->pr_wchan); 962 #endif 963 pp->pr_drain_hook = fn; 964 pp->pr_drain_hook_arg = arg; 965 } 966 967 static struct pool_item_header * 968 pool_alloc_item_header(struct pool *pp, void *storage, int flags) 969 { 970 struct pool_item_header *ph; 971 972 if ((pp->pr_roflags & PR_PHINPAGE) != 0) 973 ph = (struct pool_item_header *) ((char *)storage + pp->pr_phoffset); 974 else 975 ph = pool_get(pp->pr_phpool, flags); 976 977 return (ph); 978 } 979 980 /* 981 * Grab an item from the pool. 982 */ 983 void * 984 #ifdef POOL_DIAGNOSTIC 985 _pool_get(struct pool *pp, int flags, const char *file, long line) 986 #else 987 pool_get(struct pool *pp, int flags) 988 #endif 989 { 990 struct pool_item *pi; 991 struct pool_item_header *ph; 992 void *v; 993 994 #ifdef DIAGNOSTIC 995 if (pp->pr_itemsperpage == 0) 996 panic("pool_get: pool '%s': pr_itemsperpage is zero, " 997 "pool not initialized?", pp->pr_wchan); 998 if ((cpu_intr_p() || cpu_softintr_p()) && pp->pr_ipl == IPL_NONE && 999 !cold && panicstr == NULL) 1000 panic("pool '%s' is IPL_NONE, but called from " 1001 "interrupt context\n", pp->pr_wchan); 1002 #endif 1003 if (flags & PR_WAITOK) { 1004 ASSERT_SLEEPABLE(); 1005 } 1006 1007 mutex_enter(&pp->pr_lock); 1008 pr_enter(pp, file, line); 1009 1010 startover: 1011 /* 1012 * Check to see if we've reached the hard limit. If we have, 1013 * and we can wait, then wait until an item has been returned to 1014 * the pool. 1015 */ 1016 #ifdef DIAGNOSTIC 1017 if (__predict_false(pp->pr_nout > pp->pr_hardlimit)) { 1018 pr_leave(pp); 1019 mutex_exit(&pp->pr_lock); 1020 panic("pool_get: %s: crossed hard limit", pp->pr_wchan); 1021 } 1022 #endif 1023 if (__predict_false(pp->pr_nout == pp->pr_hardlimit)) { 1024 if (pp->pr_drain_hook != NULL) { 1025 /* 1026 * Since the drain hook is going to free things 1027 * back to the pool, unlock, call the hook, re-lock, 1028 * and check the hardlimit condition again. 1029 */ 1030 pr_leave(pp); 1031 mutex_exit(&pp->pr_lock); 1032 (*pp->pr_drain_hook)(pp->pr_drain_hook_arg, flags); 1033 mutex_enter(&pp->pr_lock); 1034 pr_enter(pp, file, line); 1035 if (pp->pr_nout < pp->pr_hardlimit) 1036 goto startover; 1037 } 1038 1039 if ((flags & PR_WAITOK) && !(flags & PR_LIMITFAIL)) { 1040 /* 1041 * XXX: A warning isn't logged in this case. Should 1042 * it be? 1043 */ 1044 pp->pr_flags |= PR_WANTED; 1045 pr_leave(pp); 1046 cv_wait(&pp->pr_cv, &pp->pr_lock); 1047 pr_enter(pp, file, line); 1048 goto startover; 1049 } 1050 1051 /* 1052 * Log a message that the hard limit has been hit. 1053 */ 1054 if (pp->pr_hardlimit_warning != NULL && 1055 ratecheck(&pp->pr_hardlimit_warning_last, 1056 &pp->pr_hardlimit_ratecap)) 1057 log(LOG_ERR, "%s\n", pp->pr_hardlimit_warning); 1058 1059 pp->pr_nfail++; 1060 1061 pr_leave(pp); 1062 mutex_exit(&pp->pr_lock); 1063 return (NULL); 1064 } 1065 1066 /* 1067 * The convention we use is that if `curpage' is not NULL, then 1068 * it points at a non-empty bucket. In particular, `curpage' 1069 * never points at a page header which has PR_PHINPAGE set and 1070 * has no items in its bucket. 1071 */ 1072 if ((ph = pp->pr_curpage) == NULL) { 1073 int error; 1074 1075 #ifdef DIAGNOSTIC 1076 if (pp->pr_nitems != 0) { 1077 mutex_exit(&pp->pr_lock); 1078 printf("pool_get: %s: curpage NULL, nitems %u\n", 1079 pp->pr_wchan, pp->pr_nitems); 1080 panic("pool_get: nitems inconsistent"); 1081 } 1082 #endif 1083 1084 /* 1085 * Call the back-end page allocator for more memory. 1086 * Release the pool lock, as the back-end page allocator 1087 * may block. 1088 */ 1089 pr_leave(pp); 1090 error = pool_grow(pp, flags); 1091 pr_enter(pp, file, line); 1092 if (error != 0) { 1093 /* 1094 * We were unable to allocate a page or item 1095 * header, but we released the lock during 1096 * allocation, so perhaps items were freed 1097 * back to the pool. Check for this case. 1098 */ 1099 if (pp->pr_curpage != NULL) 1100 goto startover; 1101 1102 pp->pr_nfail++; 1103 pr_leave(pp); 1104 mutex_exit(&pp->pr_lock); 1105 return (NULL); 1106 } 1107 1108 /* Start the allocation process over. */ 1109 goto startover; 1110 } 1111 if (pp->pr_roflags & PR_NOTOUCH) { 1112 #ifdef DIAGNOSTIC 1113 if (__predict_false(ph->ph_nmissing == pp->pr_itemsperpage)) { 1114 pr_leave(pp); 1115 mutex_exit(&pp->pr_lock); 1116 panic("pool_get: %s: page empty", pp->pr_wchan); 1117 } 1118 #endif 1119 v = pr_item_notouch_get(pp, ph); 1120 #ifdef POOL_DIAGNOSTIC 1121 pr_log(pp, v, PRLOG_GET, file, line); 1122 #endif 1123 } else { 1124 v = pi = LIST_FIRST(&ph->ph_itemlist); 1125 if (__predict_false(v == NULL)) { 1126 pr_leave(pp); 1127 mutex_exit(&pp->pr_lock); 1128 panic("pool_get: %s: page empty", pp->pr_wchan); 1129 } 1130 #ifdef DIAGNOSTIC 1131 if (__predict_false(pp->pr_nitems == 0)) { 1132 pr_leave(pp); 1133 mutex_exit(&pp->pr_lock); 1134 printf("pool_get: %s: items on itemlist, nitems %u\n", 1135 pp->pr_wchan, pp->pr_nitems); 1136 panic("pool_get: nitems inconsistent"); 1137 } 1138 #endif 1139 1140 #ifdef POOL_DIAGNOSTIC 1141 pr_log(pp, v, PRLOG_GET, file, line); 1142 #endif 1143 1144 #ifdef DIAGNOSTIC 1145 if (__predict_false(pi->pi_magic != PI_MAGIC)) { 1146 pr_printlog(pp, pi, printf); 1147 panic("pool_get(%s): free list modified: " 1148 "magic=%x; page %p; item addr %p\n", 1149 pp->pr_wchan, pi->pi_magic, ph->ph_page, pi); 1150 } 1151 #endif 1152 1153 /* 1154 * Remove from item list. 1155 */ 1156 LIST_REMOVE(pi, pi_list); 1157 } 1158 pp->pr_nitems--; 1159 pp->pr_nout++; 1160 if (ph->ph_nmissing == 0) { 1161 #ifdef DIAGNOSTIC 1162 if (__predict_false(pp->pr_nidle == 0)) 1163 panic("pool_get: nidle inconsistent"); 1164 #endif 1165 pp->pr_nidle--; 1166 1167 /* 1168 * This page was previously empty. Move it to the list of 1169 * partially-full pages. This page is already curpage. 1170 */ 1171 LIST_REMOVE(ph, ph_pagelist); 1172 LIST_INSERT_HEAD(&pp->pr_partpages, ph, ph_pagelist); 1173 } 1174 ph->ph_nmissing++; 1175 if (ph->ph_nmissing == pp->pr_itemsperpage) { 1176 #ifdef DIAGNOSTIC 1177 if (__predict_false((pp->pr_roflags & PR_NOTOUCH) == 0 && 1178 !LIST_EMPTY(&ph->ph_itemlist))) { 1179 pr_leave(pp); 1180 mutex_exit(&pp->pr_lock); 1181 panic("pool_get: %s: nmissing inconsistent", 1182 pp->pr_wchan); 1183 } 1184 #endif 1185 /* 1186 * This page is now full. Move it to the full list 1187 * and select a new current page. 1188 */ 1189 LIST_REMOVE(ph, ph_pagelist); 1190 LIST_INSERT_HEAD(&pp->pr_fullpages, ph, ph_pagelist); 1191 pool_update_curpage(pp); 1192 } 1193 1194 pp->pr_nget++; 1195 pr_leave(pp); 1196 1197 /* 1198 * If we have a low water mark and we are now below that low 1199 * water mark, add more items to the pool. 1200 */ 1201 if (POOL_NEEDS_CATCHUP(pp) && pool_catchup(pp) != 0) { 1202 /* 1203 * XXX: Should we log a warning? Should we set up a timeout 1204 * to try again in a second or so? The latter could break 1205 * a caller's assumptions about interrupt protection, etc. 1206 */ 1207 } 1208 1209 mutex_exit(&pp->pr_lock); 1210 KASSERT((((vaddr_t)v + pp->pr_itemoffset) & (pp->pr_align - 1)) == 0); 1211 FREECHECK_OUT(&pp->pr_freecheck, v); 1212 return (v); 1213 } 1214 1215 /* 1216 * Internal version of pool_put(). Pool is already locked/entered. 1217 */ 1218 static void 1219 pool_do_put(struct pool *pp, void *v, struct pool_pagelist *pq) 1220 { 1221 struct pool_item *pi = v; 1222 struct pool_item_header *ph; 1223 1224 KASSERT(mutex_owned(&pp->pr_lock)); 1225 FREECHECK_IN(&pp->pr_freecheck, v); 1226 LOCKDEBUG_MEM_CHECK(v, pp->pr_size); 1227 1228 #ifdef DIAGNOSTIC 1229 if (__predict_false(pp->pr_nout == 0)) { 1230 printf("pool %s: putting with none out\n", 1231 pp->pr_wchan); 1232 panic("pool_put"); 1233 } 1234 #endif 1235 1236 if (__predict_false((ph = pr_find_pagehead(pp, v)) == NULL)) { 1237 pr_printlog(pp, NULL, printf); 1238 panic("pool_put: %s: page header missing", pp->pr_wchan); 1239 } 1240 1241 /* 1242 * Return to item list. 1243 */ 1244 if (pp->pr_roflags & PR_NOTOUCH) { 1245 pr_item_notouch_put(pp, ph, v); 1246 } else { 1247 #ifdef DIAGNOSTIC 1248 pi->pi_magic = PI_MAGIC; 1249 #endif 1250 #ifdef DEBUG 1251 { 1252 int i, *ip = v; 1253 1254 for (i = 0; i < pp->pr_size / sizeof(int); i++) { 1255 *ip++ = PI_MAGIC; 1256 } 1257 } 1258 #endif 1259 1260 LIST_INSERT_HEAD(&ph->ph_itemlist, pi, pi_list); 1261 } 1262 KDASSERT(ph->ph_nmissing != 0); 1263 ph->ph_nmissing--; 1264 pp->pr_nput++; 1265 pp->pr_nitems++; 1266 pp->pr_nout--; 1267 1268 /* Cancel "pool empty" condition if it exists */ 1269 if (pp->pr_curpage == NULL) 1270 pp->pr_curpage = ph; 1271 1272 if (pp->pr_flags & PR_WANTED) { 1273 pp->pr_flags &= ~PR_WANTED; 1274 cv_broadcast(&pp->pr_cv); 1275 } 1276 1277 /* 1278 * If this page is now empty, do one of two things: 1279 * 1280 * (1) If we have more pages than the page high water mark, 1281 * free the page back to the system. ONLY CONSIDER 1282 * FREEING BACK A PAGE IF WE HAVE MORE THAN OUR MINIMUM PAGE 1283 * CLAIM. 1284 * 1285 * (2) Otherwise, move the page to the empty page list. 1286 * 1287 * Either way, select a new current page (so we use a partially-full 1288 * page if one is available). 1289 */ 1290 if (ph->ph_nmissing == 0) { 1291 pp->pr_nidle++; 1292 if (pp->pr_npages > pp->pr_minpages && 1293 pp->pr_npages > pp->pr_maxpages) { 1294 pr_rmpage(pp, ph, pq); 1295 } else { 1296 LIST_REMOVE(ph, ph_pagelist); 1297 LIST_INSERT_HEAD(&pp->pr_emptypages, ph, ph_pagelist); 1298 1299 /* 1300 * Update the timestamp on the page. A page must 1301 * be idle for some period of time before it can 1302 * be reclaimed by the pagedaemon. This minimizes 1303 * ping-pong'ing for memory. 1304 * 1305 * note for 64-bit time_t: truncating to 32-bit is not 1306 * a problem for our usage. 1307 */ 1308 ph->ph_time = time_uptime; 1309 } 1310 pool_update_curpage(pp); 1311 } 1312 1313 /* 1314 * If the page was previously completely full, move it to the 1315 * partially-full list and make it the current page. The next 1316 * allocation will get the item from this page, instead of 1317 * further fragmenting the pool. 1318 */ 1319 else if (ph->ph_nmissing == (pp->pr_itemsperpage - 1)) { 1320 LIST_REMOVE(ph, ph_pagelist); 1321 LIST_INSERT_HEAD(&pp->pr_partpages, ph, ph_pagelist); 1322 pp->pr_curpage = ph; 1323 } 1324 } 1325 1326 /* 1327 * Return resource to the pool. 1328 */ 1329 #ifdef POOL_DIAGNOSTIC 1330 void 1331 _pool_put(struct pool *pp, void *v, const char *file, long line) 1332 { 1333 struct pool_pagelist pq; 1334 1335 LIST_INIT(&pq); 1336 1337 mutex_enter(&pp->pr_lock); 1338 pr_enter(pp, file, line); 1339 1340 pr_log(pp, v, PRLOG_PUT, file, line); 1341 1342 pool_do_put(pp, v, &pq); 1343 1344 pr_leave(pp); 1345 mutex_exit(&pp->pr_lock); 1346 1347 pr_pagelist_free(pp, &pq); 1348 } 1349 #undef pool_put 1350 #endif /* POOL_DIAGNOSTIC */ 1351 1352 void 1353 pool_put(struct pool *pp, void *v) 1354 { 1355 struct pool_pagelist pq; 1356 1357 LIST_INIT(&pq); 1358 1359 mutex_enter(&pp->pr_lock); 1360 pool_do_put(pp, v, &pq); 1361 mutex_exit(&pp->pr_lock); 1362 1363 pr_pagelist_free(pp, &pq); 1364 } 1365 1366 #ifdef POOL_DIAGNOSTIC 1367 #define pool_put(h, v) _pool_put((h), (v), __FILE__, __LINE__) 1368 #endif 1369 1370 /* 1371 * pool_grow: grow a pool by a page. 1372 * 1373 * => called with pool locked. 1374 * => unlock and relock the pool. 1375 * => return with pool locked. 1376 */ 1377 1378 static int 1379 pool_grow(struct pool *pp, int flags) 1380 { 1381 struct pool_item_header *ph = NULL; 1382 char *cp; 1383 1384 mutex_exit(&pp->pr_lock); 1385 cp = pool_allocator_alloc(pp, flags); 1386 if (__predict_true(cp != NULL)) { 1387 ph = pool_alloc_item_header(pp, cp, flags); 1388 } 1389 if (__predict_false(cp == NULL || ph == NULL)) { 1390 if (cp != NULL) { 1391 pool_allocator_free(pp, cp); 1392 } 1393 mutex_enter(&pp->pr_lock); 1394 return ENOMEM; 1395 } 1396 1397 mutex_enter(&pp->pr_lock); 1398 pool_prime_page(pp, cp, ph); 1399 pp->pr_npagealloc++; 1400 return 0; 1401 } 1402 1403 /* 1404 * Add N items to the pool. 1405 */ 1406 int 1407 pool_prime(struct pool *pp, int n) 1408 { 1409 int newpages; 1410 int error = 0; 1411 1412 mutex_enter(&pp->pr_lock); 1413 1414 newpages = roundup(n, pp->pr_itemsperpage) / pp->pr_itemsperpage; 1415 1416 while (newpages-- > 0) { 1417 error = pool_grow(pp, PR_NOWAIT); 1418 if (error) { 1419 break; 1420 } 1421 pp->pr_minpages++; 1422 } 1423 1424 if (pp->pr_minpages >= pp->pr_maxpages) 1425 pp->pr_maxpages = pp->pr_minpages + 1; /* XXX */ 1426 1427 mutex_exit(&pp->pr_lock); 1428 return error; 1429 } 1430 1431 /* 1432 * Add a page worth of items to the pool. 1433 * 1434 * Note, we must be called with the pool descriptor LOCKED. 1435 */ 1436 static void 1437 pool_prime_page(struct pool *pp, void *storage, struct pool_item_header *ph) 1438 { 1439 struct pool_item *pi; 1440 void *cp = storage; 1441 const unsigned int align = pp->pr_align; 1442 const unsigned int ioff = pp->pr_itemoffset; 1443 int n; 1444 1445 KASSERT(mutex_owned(&pp->pr_lock)); 1446 1447 #ifdef DIAGNOSTIC 1448 if ((pp->pr_roflags & PR_NOALIGN) == 0 && 1449 ((uintptr_t)cp & (pp->pr_alloc->pa_pagesz - 1)) != 0) 1450 panic("pool_prime_page: %s: unaligned page", pp->pr_wchan); 1451 #endif 1452 1453 /* 1454 * Insert page header. 1455 */ 1456 LIST_INSERT_HEAD(&pp->pr_emptypages, ph, ph_pagelist); 1457 LIST_INIT(&ph->ph_itemlist); 1458 ph->ph_page = storage; 1459 ph->ph_nmissing = 0; 1460 ph->ph_time = time_uptime; 1461 if ((pp->pr_roflags & PR_PHINPAGE) == 0) 1462 SPLAY_INSERT(phtree, &pp->pr_phtree, ph); 1463 1464 pp->pr_nidle++; 1465 1466 /* 1467 * Color this page. 1468 */ 1469 ph->ph_off = pp->pr_curcolor; 1470 cp = (char *)cp + ph->ph_off; 1471 if ((pp->pr_curcolor += align) > pp->pr_maxcolor) 1472 pp->pr_curcolor = 0; 1473 1474 /* 1475 * Adjust storage to apply aligment to `pr_itemoffset' in each item. 1476 */ 1477 if (ioff != 0) 1478 cp = (char *)cp + align - ioff; 1479 1480 KASSERT((((vaddr_t)cp + ioff) & (align - 1)) == 0); 1481 1482 /* 1483 * Insert remaining chunks on the bucket list. 1484 */ 1485 n = pp->pr_itemsperpage; 1486 pp->pr_nitems += n; 1487 1488 if (pp->pr_roflags & PR_NOTOUCH) { 1489 pr_item_notouch_init(pp, ph); 1490 } else { 1491 while (n--) { 1492 pi = (struct pool_item *)cp; 1493 1494 KASSERT(((((vaddr_t)pi) + ioff) & (align - 1)) == 0); 1495 1496 /* Insert on page list */ 1497 LIST_INSERT_HEAD(&ph->ph_itemlist, pi, pi_list); 1498 #ifdef DIAGNOSTIC 1499 pi->pi_magic = PI_MAGIC; 1500 #endif 1501 cp = (char *)cp + pp->pr_size; 1502 1503 KASSERT((((vaddr_t)cp + ioff) & (align - 1)) == 0); 1504 } 1505 } 1506 1507 /* 1508 * If the pool was depleted, point at the new page. 1509 */ 1510 if (pp->pr_curpage == NULL) 1511 pp->pr_curpage = ph; 1512 1513 if (++pp->pr_npages > pp->pr_hiwat) 1514 pp->pr_hiwat = pp->pr_npages; 1515 } 1516 1517 /* 1518 * Used by pool_get() when nitems drops below the low water mark. This 1519 * is used to catch up pr_nitems with the low water mark. 1520 * 1521 * Note 1, we never wait for memory here, we let the caller decide what to do. 1522 * 1523 * Note 2, we must be called with the pool already locked, and we return 1524 * with it locked. 1525 */ 1526 static int 1527 pool_catchup(struct pool *pp) 1528 { 1529 int error = 0; 1530 1531 while (POOL_NEEDS_CATCHUP(pp)) { 1532 error = pool_grow(pp, PR_NOWAIT); 1533 if (error) { 1534 break; 1535 } 1536 } 1537 return error; 1538 } 1539 1540 static void 1541 pool_update_curpage(struct pool *pp) 1542 { 1543 1544 pp->pr_curpage = LIST_FIRST(&pp->pr_partpages); 1545 if (pp->pr_curpage == NULL) { 1546 pp->pr_curpage = LIST_FIRST(&pp->pr_emptypages); 1547 } 1548 KASSERT((pp->pr_curpage == NULL && pp->pr_nitems == 0) || 1549 (pp->pr_curpage != NULL && pp->pr_nitems > 0)); 1550 } 1551 1552 void 1553 pool_setlowat(struct pool *pp, int n) 1554 { 1555 1556 mutex_enter(&pp->pr_lock); 1557 1558 pp->pr_minitems = n; 1559 pp->pr_minpages = (n == 0) 1560 ? 0 1561 : roundup(n, pp->pr_itemsperpage) / pp->pr_itemsperpage; 1562 1563 /* Make sure we're caught up with the newly-set low water mark. */ 1564 if (POOL_NEEDS_CATCHUP(pp) && pool_catchup(pp) != 0) { 1565 /* 1566 * XXX: Should we log a warning? Should we set up a timeout 1567 * to try again in a second or so? The latter could break 1568 * a caller's assumptions about interrupt protection, etc. 1569 */ 1570 } 1571 1572 mutex_exit(&pp->pr_lock); 1573 } 1574 1575 void 1576 pool_sethiwat(struct pool *pp, int n) 1577 { 1578 1579 mutex_enter(&pp->pr_lock); 1580 1581 pp->pr_maxpages = (n == 0) 1582 ? 0 1583 : roundup(n, pp->pr_itemsperpage) / pp->pr_itemsperpage; 1584 1585 mutex_exit(&pp->pr_lock); 1586 } 1587 1588 void 1589 pool_sethardlimit(struct pool *pp, int n, const char *warnmess, int ratecap) 1590 { 1591 1592 mutex_enter(&pp->pr_lock); 1593 1594 pp->pr_hardlimit = n; 1595 pp->pr_hardlimit_warning = warnmess; 1596 pp->pr_hardlimit_ratecap.tv_sec = ratecap; 1597 pp->pr_hardlimit_warning_last.tv_sec = 0; 1598 pp->pr_hardlimit_warning_last.tv_usec = 0; 1599 1600 /* 1601 * In-line version of pool_sethiwat(), because we don't want to 1602 * release the lock. 1603 */ 1604 pp->pr_maxpages = (n == 0) 1605 ? 0 1606 : roundup(n, pp->pr_itemsperpage) / pp->pr_itemsperpage; 1607 1608 mutex_exit(&pp->pr_lock); 1609 } 1610 1611 /* 1612 * Release all complete pages that have not been used recently. 1613 * 1614 * Might be called from interrupt context. 1615 */ 1616 int 1617 #ifdef POOL_DIAGNOSTIC 1618 _pool_reclaim(struct pool *pp, const char *file, long line) 1619 #else 1620 pool_reclaim(struct pool *pp) 1621 #endif 1622 { 1623 struct pool_item_header *ph, *phnext; 1624 struct pool_pagelist pq; 1625 uint32_t curtime; 1626 bool klock; 1627 int rv; 1628 1629 if (cpu_intr_p() || cpu_softintr_p()) { 1630 KASSERT(pp->pr_ipl != IPL_NONE); 1631 } 1632 1633 if (pp->pr_drain_hook != NULL) { 1634 /* 1635 * The drain hook must be called with the pool unlocked. 1636 */ 1637 (*pp->pr_drain_hook)(pp->pr_drain_hook_arg, PR_NOWAIT); 1638 } 1639 1640 /* 1641 * XXXSMP Because we do not want to cause non-MPSAFE code 1642 * to block. 1643 */ 1644 if (pp->pr_ipl == IPL_SOFTNET || pp->pr_ipl == IPL_SOFTCLOCK || 1645 pp->pr_ipl == IPL_SOFTSERIAL) { 1646 KERNEL_LOCK(1, NULL); 1647 klock = true; 1648 } else 1649 klock = false; 1650 1651 /* Reclaim items from the pool's cache (if any). */ 1652 if (pp->pr_cache != NULL) 1653 pool_cache_invalidate(pp->pr_cache); 1654 1655 if (mutex_tryenter(&pp->pr_lock) == 0) { 1656 if (klock) { 1657 KERNEL_UNLOCK_ONE(NULL); 1658 } 1659 return (0); 1660 } 1661 pr_enter(pp, file, line); 1662 1663 LIST_INIT(&pq); 1664 1665 curtime = time_uptime; 1666 1667 for (ph = LIST_FIRST(&pp->pr_emptypages); ph != NULL; ph = phnext) { 1668 phnext = LIST_NEXT(ph, ph_pagelist); 1669 1670 /* Check our minimum page claim */ 1671 if (pp->pr_npages <= pp->pr_minpages) 1672 break; 1673 1674 KASSERT(ph->ph_nmissing == 0); 1675 if (curtime - ph->ph_time < pool_inactive_time 1676 && !pa_starved_p(pp->pr_alloc)) 1677 continue; 1678 1679 /* 1680 * If freeing this page would put us below 1681 * the low water mark, stop now. 1682 */ 1683 if ((pp->pr_nitems - pp->pr_itemsperpage) < 1684 pp->pr_minitems) 1685 break; 1686 1687 pr_rmpage(pp, ph, &pq); 1688 } 1689 1690 pr_leave(pp); 1691 mutex_exit(&pp->pr_lock); 1692 1693 if (LIST_EMPTY(&pq)) 1694 rv = 0; 1695 else { 1696 pr_pagelist_free(pp, &pq); 1697 rv = 1; 1698 } 1699 1700 if (klock) { 1701 KERNEL_UNLOCK_ONE(NULL); 1702 } 1703 1704 return (rv); 1705 } 1706 1707 /* 1708 * Drain pools, one at a time. This is a two stage process; 1709 * drain_start kicks off a cross call to drain CPU-level caches 1710 * if the pool has an associated pool_cache. drain_end waits 1711 * for those cross calls to finish, and then drains the cache 1712 * (if any) and pool. 1713 * 1714 * Note, must never be called from interrupt context. 1715 */ 1716 void 1717 pool_drain_start(struct pool **ppp, uint64_t *wp) 1718 { 1719 struct pool *pp; 1720 1721 KASSERT(!TAILQ_EMPTY(&pool_head)); 1722 1723 pp = NULL; 1724 1725 /* Find next pool to drain, and add a reference. */ 1726 mutex_enter(&pool_head_lock); 1727 do { 1728 if (drainpp == NULL) { 1729 drainpp = TAILQ_FIRST(&pool_head); 1730 } 1731 if (drainpp != NULL) { 1732 pp = drainpp; 1733 drainpp = TAILQ_NEXT(pp, pr_poollist); 1734 } 1735 /* 1736 * Skip completely idle pools. We depend on at least 1737 * one pool in the system being active. 1738 */ 1739 } while (pp == NULL || pp->pr_npages == 0); 1740 pp->pr_refcnt++; 1741 mutex_exit(&pool_head_lock); 1742 1743 /* If there is a pool_cache, drain CPU level caches. */ 1744 *ppp = pp; 1745 if (pp->pr_cache != NULL) { 1746 *wp = xc_broadcast(0, (xcfunc_t)pool_cache_xcall, 1747 pp->pr_cache, NULL); 1748 } 1749 } 1750 1751 bool 1752 pool_drain_end(struct pool *pp, uint64_t where) 1753 { 1754 bool reclaimed; 1755 1756 if (pp == NULL) 1757 return false; 1758 1759 KASSERT(pp->pr_refcnt > 0); 1760 1761 /* Wait for remote draining to complete. */ 1762 if (pp->pr_cache != NULL) 1763 xc_wait(where); 1764 1765 /* Drain the cache (if any) and pool.. */ 1766 reclaimed = pool_reclaim(pp); 1767 1768 /* Finally, unlock the pool. */ 1769 mutex_enter(&pool_head_lock); 1770 pp->pr_refcnt--; 1771 cv_broadcast(&pool_busy); 1772 mutex_exit(&pool_head_lock); 1773 1774 return reclaimed; 1775 } 1776 1777 /* 1778 * Diagnostic helpers. 1779 */ 1780 void 1781 pool_print(struct pool *pp, const char *modif) 1782 { 1783 1784 pool_print1(pp, modif, printf); 1785 } 1786 1787 void 1788 pool_printall(const char *modif, void (*pr)(const char *, ...)) 1789 { 1790 struct pool *pp; 1791 1792 TAILQ_FOREACH(pp, &pool_head, pr_poollist) { 1793 pool_printit(pp, modif, pr); 1794 } 1795 } 1796 1797 void 1798 pool_printit(struct pool *pp, const char *modif, void (*pr)(const char *, ...)) 1799 { 1800 1801 if (pp == NULL) { 1802 (*pr)("Must specify a pool to print.\n"); 1803 return; 1804 } 1805 1806 pool_print1(pp, modif, pr); 1807 } 1808 1809 static void 1810 pool_print_pagelist(struct pool *pp, struct pool_pagelist *pl, 1811 void (*pr)(const char *, ...)) 1812 { 1813 struct pool_item_header *ph; 1814 #ifdef DIAGNOSTIC 1815 struct pool_item *pi; 1816 #endif 1817 1818 LIST_FOREACH(ph, pl, ph_pagelist) { 1819 (*pr)("\t\tpage %p, nmissing %d, time %" PRIu32 "\n", 1820 ph->ph_page, ph->ph_nmissing, ph->ph_time); 1821 #ifdef DIAGNOSTIC 1822 if (!(pp->pr_roflags & PR_NOTOUCH)) { 1823 LIST_FOREACH(pi, &ph->ph_itemlist, pi_list) { 1824 if (pi->pi_magic != PI_MAGIC) { 1825 (*pr)("\t\t\titem %p, magic 0x%x\n", 1826 pi, pi->pi_magic); 1827 } 1828 } 1829 } 1830 #endif 1831 } 1832 } 1833 1834 static void 1835 pool_print1(struct pool *pp, const char *modif, void (*pr)(const char *, ...)) 1836 { 1837 struct pool_item_header *ph; 1838 pool_cache_t pc; 1839 pcg_t *pcg; 1840 pool_cache_cpu_t *cc; 1841 uint64_t cpuhit, cpumiss; 1842 int i, print_log = 0, print_pagelist = 0, print_cache = 0; 1843 char c; 1844 1845 while ((c = *modif++) != '\0') { 1846 if (c == 'l') 1847 print_log = 1; 1848 if (c == 'p') 1849 print_pagelist = 1; 1850 if (c == 'c') 1851 print_cache = 1; 1852 } 1853 1854 if ((pc = pp->pr_cache) != NULL) { 1855 (*pr)("POOL CACHE"); 1856 } else { 1857 (*pr)("POOL"); 1858 } 1859 1860 (*pr)(" %s: size %u, align %u, ioff %u, roflags 0x%08x\n", 1861 pp->pr_wchan, pp->pr_size, pp->pr_align, pp->pr_itemoffset, 1862 pp->pr_roflags); 1863 (*pr)("\talloc %p\n", pp->pr_alloc); 1864 (*pr)("\tminitems %u, minpages %u, maxpages %u, npages %u\n", 1865 pp->pr_minitems, pp->pr_minpages, pp->pr_maxpages, pp->pr_npages); 1866 (*pr)("\titemsperpage %u, nitems %u, nout %u, hardlimit %u\n", 1867 pp->pr_itemsperpage, pp->pr_nitems, pp->pr_nout, pp->pr_hardlimit); 1868 1869 (*pr)("\tnget %lu, nfail %lu, nput %lu\n", 1870 pp->pr_nget, pp->pr_nfail, pp->pr_nput); 1871 (*pr)("\tnpagealloc %lu, npagefree %lu, hiwat %u, nidle %lu\n", 1872 pp->pr_npagealloc, pp->pr_npagefree, pp->pr_hiwat, pp->pr_nidle); 1873 1874 if (print_pagelist == 0) 1875 goto skip_pagelist; 1876 1877 if ((ph = LIST_FIRST(&pp->pr_emptypages)) != NULL) 1878 (*pr)("\n\tempty page list:\n"); 1879 pool_print_pagelist(pp, &pp->pr_emptypages, pr); 1880 if ((ph = LIST_FIRST(&pp->pr_fullpages)) != NULL) 1881 (*pr)("\n\tfull page list:\n"); 1882 pool_print_pagelist(pp, &pp->pr_fullpages, pr); 1883 if ((ph = LIST_FIRST(&pp->pr_partpages)) != NULL) 1884 (*pr)("\n\tpartial-page list:\n"); 1885 pool_print_pagelist(pp, &pp->pr_partpages, pr); 1886 1887 if (pp->pr_curpage == NULL) 1888 (*pr)("\tno current page\n"); 1889 else 1890 (*pr)("\tcurpage %p\n", pp->pr_curpage->ph_page); 1891 1892 skip_pagelist: 1893 if (print_log == 0) 1894 goto skip_log; 1895 1896 (*pr)("\n"); 1897 if ((pp->pr_roflags & PR_LOGGING) == 0) 1898 (*pr)("\tno log\n"); 1899 else { 1900 pr_printlog(pp, NULL, pr); 1901 } 1902 1903 skip_log: 1904 1905 #define PR_GROUPLIST(pcg) \ 1906 (*pr)("\t\tgroup %p: avail %d\n", pcg, pcg->pcg_avail); \ 1907 for (i = 0; i < pcg->pcg_size; i++) { \ 1908 if (pcg->pcg_objects[i].pcgo_pa != \ 1909 POOL_PADDR_INVALID) { \ 1910 (*pr)("\t\t\t%p, 0x%llx\n", \ 1911 pcg->pcg_objects[i].pcgo_va, \ 1912 (unsigned long long) \ 1913 pcg->pcg_objects[i].pcgo_pa); \ 1914 } else { \ 1915 (*pr)("\t\t\t%p\n", \ 1916 pcg->pcg_objects[i].pcgo_va); \ 1917 } \ 1918 } 1919 1920 if (pc != NULL) { 1921 cpuhit = 0; 1922 cpumiss = 0; 1923 for (i = 0; i < __arraycount(pc->pc_cpus); i++) { 1924 if ((cc = pc->pc_cpus[i]) == NULL) 1925 continue; 1926 cpuhit += cc->cc_hits; 1927 cpumiss += cc->cc_misses; 1928 } 1929 (*pr)("\tcpu layer hits %llu misses %llu\n", cpuhit, cpumiss); 1930 (*pr)("\tcache layer hits %llu misses %llu\n", 1931 pc->pc_hits, pc->pc_misses); 1932 (*pr)("\tcache layer entry uncontended %llu contended %llu\n", 1933 pc->pc_hits + pc->pc_misses - pc->pc_contended, 1934 pc->pc_contended); 1935 (*pr)("\tcache layer empty groups %u full groups %u\n", 1936 pc->pc_nempty, pc->pc_nfull); 1937 if (print_cache) { 1938 (*pr)("\tfull cache groups:\n"); 1939 for (pcg = pc->pc_fullgroups; pcg != NULL; 1940 pcg = pcg->pcg_next) { 1941 PR_GROUPLIST(pcg); 1942 } 1943 (*pr)("\tempty cache groups:\n"); 1944 for (pcg = pc->pc_emptygroups; pcg != NULL; 1945 pcg = pcg->pcg_next) { 1946 PR_GROUPLIST(pcg); 1947 } 1948 } 1949 } 1950 #undef PR_GROUPLIST 1951 1952 pr_enter_check(pp, pr); 1953 } 1954 1955 static int 1956 pool_chk_page(struct pool *pp, const char *label, struct pool_item_header *ph) 1957 { 1958 struct pool_item *pi; 1959 void *page; 1960 int n; 1961 1962 if ((pp->pr_roflags & PR_NOALIGN) == 0) { 1963 page = (void *)((uintptr_t)ph & pp->pr_alloc->pa_pagemask); 1964 if (page != ph->ph_page && 1965 (pp->pr_roflags & PR_PHINPAGE) != 0) { 1966 if (label != NULL) 1967 printf("%s: ", label); 1968 printf("pool(%p:%s): page inconsistency: page %p;" 1969 " at page head addr %p (p %p)\n", pp, 1970 pp->pr_wchan, ph->ph_page, 1971 ph, page); 1972 return 1; 1973 } 1974 } 1975 1976 if ((pp->pr_roflags & PR_NOTOUCH) != 0) 1977 return 0; 1978 1979 for (pi = LIST_FIRST(&ph->ph_itemlist), n = 0; 1980 pi != NULL; 1981 pi = LIST_NEXT(pi,pi_list), n++) { 1982 1983 #ifdef DIAGNOSTIC 1984 if (pi->pi_magic != PI_MAGIC) { 1985 if (label != NULL) 1986 printf("%s: ", label); 1987 printf("pool(%s): free list modified: magic=%x;" 1988 " page %p; item ordinal %d; addr %p\n", 1989 pp->pr_wchan, pi->pi_magic, ph->ph_page, 1990 n, pi); 1991 panic("pool"); 1992 } 1993 #endif 1994 if ((pp->pr_roflags & PR_NOALIGN) != 0) { 1995 continue; 1996 } 1997 page = (void *)((uintptr_t)pi & pp->pr_alloc->pa_pagemask); 1998 if (page == ph->ph_page) 1999 continue; 2000 2001 if (label != NULL) 2002 printf("%s: ", label); 2003 printf("pool(%p:%s): page inconsistency: page %p;" 2004 " item ordinal %d; addr %p (p %p)\n", pp, 2005 pp->pr_wchan, ph->ph_page, 2006 n, pi, page); 2007 return 1; 2008 } 2009 return 0; 2010 } 2011 2012 2013 int 2014 pool_chk(struct pool *pp, const char *label) 2015 { 2016 struct pool_item_header *ph; 2017 int r = 0; 2018 2019 mutex_enter(&pp->pr_lock); 2020 LIST_FOREACH(ph, &pp->pr_emptypages, ph_pagelist) { 2021 r = pool_chk_page(pp, label, ph); 2022 if (r) { 2023 goto out; 2024 } 2025 } 2026 LIST_FOREACH(ph, &pp->pr_fullpages, ph_pagelist) { 2027 r = pool_chk_page(pp, label, ph); 2028 if (r) { 2029 goto out; 2030 } 2031 } 2032 LIST_FOREACH(ph, &pp->pr_partpages, ph_pagelist) { 2033 r = pool_chk_page(pp, label, ph); 2034 if (r) { 2035 goto out; 2036 } 2037 } 2038 2039 out: 2040 mutex_exit(&pp->pr_lock); 2041 return (r); 2042 } 2043 2044 /* 2045 * pool_cache_init: 2046 * 2047 * Initialize a pool cache. 2048 */ 2049 pool_cache_t 2050 pool_cache_init(size_t size, u_int align, u_int align_offset, u_int flags, 2051 const char *wchan, struct pool_allocator *palloc, int ipl, 2052 int (*ctor)(void *, void *, int), void (*dtor)(void *, void *), void *arg) 2053 { 2054 pool_cache_t pc; 2055 2056 pc = pool_get(&cache_pool, PR_WAITOK); 2057 if (pc == NULL) 2058 return NULL; 2059 2060 pool_cache_bootstrap(pc, size, align, align_offset, flags, wchan, 2061 palloc, ipl, ctor, dtor, arg); 2062 2063 return pc; 2064 } 2065 2066 /* 2067 * pool_cache_bootstrap: 2068 * 2069 * Kernel-private version of pool_cache_init(). The caller 2070 * provides initial storage. 2071 */ 2072 void 2073 pool_cache_bootstrap(pool_cache_t pc, size_t size, u_int align, 2074 u_int align_offset, u_int flags, const char *wchan, 2075 struct pool_allocator *palloc, int ipl, 2076 int (*ctor)(void *, void *, int), void (*dtor)(void *, void *), 2077 void *arg) 2078 { 2079 CPU_INFO_ITERATOR cii; 2080 pool_cache_t pc1; 2081 struct cpu_info *ci; 2082 struct pool *pp; 2083 2084 pp = &pc->pc_pool; 2085 if (palloc == NULL && ipl == IPL_NONE) 2086 palloc = &pool_allocator_nointr; 2087 pool_init(pp, size, align, align_offset, flags, wchan, palloc, ipl); 2088 mutex_init(&pc->pc_lock, MUTEX_DEFAULT, ipl); 2089 2090 if (ctor == NULL) { 2091 ctor = (int (*)(void *, void *, int))nullop; 2092 } 2093 if (dtor == NULL) { 2094 dtor = (void (*)(void *, void *))nullop; 2095 } 2096 2097 pc->pc_emptygroups = NULL; 2098 pc->pc_fullgroups = NULL; 2099 pc->pc_partgroups = NULL; 2100 pc->pc_ctor = ctor; 2101 pc->pc_dtor = dtor; 2102 pc->pc_arg = arg; 2103 pc->pc_hits = 0; 2104 pc->pc_misses = 0; 2105 pc->pc_nempty = 0; 2106 pc->pc_npart = 0; 2107 pc->pc_nfull = 0; 2108 pc->pc_contended = 0; 2109 pc->pc_refcnt = 0; 2110 pc->pc_freecheck = NULL; 2111 2112 if ((flags & PR_LARGECACHE) != 0) { 2113 pc->pc_pcgsize = PCG_NOBJECTS_LARGE; 2114 pc->pc_pcgpool = &pcg_large_pool; 2115 } else { 2116 pc->pc_pcgsize = PCG_NOBJECTS_NORMAL; 2117 pc->pc_pcgpool = &pcg_normal_pool; 2118 } 2119 2120 /* Allocate per-CPU caches. */ 2121 memset(pc->pc_cpus, 0, sizeof(pc->pc_cpus)); 2122 pc->pc_ncpu = 0; 2123 if (ncpu < 2) { 2124 /* XXX For sparc: boot CPU is not attached yet. */ 2125 pool_cache_cpu_init1(curcpu(), pc); 2126 } else { 2127 for (CPU_INFO_FOREACH(cii, ci)) { 2128 pool_cache_cpu_init1(ci, pc); 2129 } 2130 } 2131 2132 /* Add to list of all pools. */ 2133 if (__predict_true(!cold)) 2134 mutex_enter(&pool_head_lock); 2135 TAILQ_FOREACH(pc1, &pool_cache_head, pc_cachelist) { 2136 if (strcmp(pc1->pc_pool.pr_wchan, pc->pc_pool.pr_wchan) > 0) 2137 break; 2138 } 2139 if (pc1 == NULL) 2140 TAILQ_INSERT_TAIL(&pool_cache_head, pc, pc_cachelist); 2141 else 2142 TAILQ_INSERT_BEFORE(pc1, pc, pc_cachelist); 2143 if (__predict_true(!cold)) 2144 mutex_exit(&pool_head_lock); 2145 2146 membar_sync(); 2147 pp->pr_cache = pc; 2148 } 2149 2150 /* 2151 * pool_cache_destroy: 2152 * 2153 * Destroy a pool cache. 2154 */ 2155 void 2156 pool_cache_destroy(pool_cache_t pc) 2157 { 2158 struct pool *pp = &pc->pc_pool; 2159 u_int i; 2160 2161 /* Remove it from the global list. */ 2162 mutex_enter(&pool_head_lock); 2163 while (pc->pc_refcnt != 0) 2164 cv_wait(&pool_busy, &pool_head_lock); 2165 TAILQ_REMOVE(&pool_cache_head, pc, pc_cachelist); 2166 mutex_exit(&pool_head_lock); 2167 2168 /* First, invalidate the entire cache. */ 2169 pool_cache_invalidate(pc); 2170 2171 /* Disassociate it from the pool. */ 2172 mutex_enter(&pp->pr_lock); 2173 pp->pr_cache = NULL; 2174 mutex_exit(&pp->pr_lock); 2175 2176 /* Destroy per-CPU data */ 2177 for (i = 0; i < __arraycount(pc->pc_cpus); i++) 2178 pool_cache_invalidate_cpu(pc, i); 2179 2180 /* Finally, destroy it. */ 2181 mutex_destroy(&pc->pc_lock); 2182 pool_destroy(pp); 2183 pool_put(&cache_pool, pc); 2184 } 2185 2186 /* 2187 * pool_cache_cpu_init1: 2188 * 2189 * Called for each pool_cache whenever a new CPU is attached. 2190 */ 2191 static void 2192 pool_cache_cpu_init1(struct cpu_info *ci, pool_cache_t pc) 2193 { 2194 pool_cache_cpu_t *cc; 2195 int index; 2196 2197 index = ci->ci_index; 2198 2199 KASSERT(index < __arraycount(pc->pc_cpus)); 2200 2201 if ((cc = pc->pc_cpus[index]) != NULL) { 2202 KASSERT(cc->cc_cpuindex == index); 2203 return; 2204 } 2205 2206 /* 2207 * The first CPU is 'free'. This needs to be the case for 2208 * bootstrap - we may not be able to allocate yet. 2209 */ 2210 if (pc->pc_ncpu == 0) { 2211 cc = &pc->pc_cpu0; 2212 pc->pc_ncpu = 1; 2213 } else { 2214 mutex_enter(&pc->pc_lock); 2215 pc->pc_ncpu++; 2216 mutex_exit(&pc->pc_lock); 2217 cc = pool_get(&cache_cpu_pool, PR_WAITOK); 2218 } 2219 2220 cc->cc_ipl = pc->pc_pool.pr_ipl; 2221 cc->cc_iplcookie = makeiplcookie(cc->cc_ipl); 2222 cc->cc_cache = pc; 2223 cc->cc_cpuindex = index; 2224 cc->cc_hits = 0; 2225 cc->cc_misses = 0; 2226 cc->cc_current = __UNCONST(&pcg_dummy); 2227 cc->cc_previous = __UNCONST(&pcg_dummy); 2228 2229 pc->pc_cpus[index] = cc; 2230 } 2231 2232 /* 2233 * pool_cache_cpu_init: 2234 * 2235 * Called whenever a new CPU is attached. 2236 */ 2237 void 2238 pool_cache_cpu_init(struct cpu_info *ci) 2239 { 2240 pool_cache_t pc; 2241 2242 mutex_enter(&pool_head_lock); 2243 TAILQ_FOREACH(pc, &pool_cache_head, pc_cachelist) { 2244 pc->pc_refcnt++; 2245 mutex_exit(&pool_head_lock); 2246 2247 pool_cache_cpu_init1(ci, pc); 2248 2249 mutex_enter(&pool_head_lock); 2250 pc->pc_refcnt--; 2251 cv_broadcast(&pool_busy); 2252 } 2253 mutex_exit(&pool_head_lock); 2254 } 2255 2256 /* 2257 * pool_cache_reclaim: 2258 * 2259 * Reclaim memory from a pool cache. 2260 */ 2261 bool 2262 pool_cache_reclaim(pool_cache_t pc) 2263 { 2264 2265 return pool_reclaim(&pc->pc_pool); 2266 } 2267 2268 static void 2269 pool_cache_destruct_object1(pool_cache_t pc, void *object) 2270 { 2271 2272 (*pc->pc_dtor)(pc->pc_arg, object); 2273 pool_put(&pc->pc_pool, object); 2274 } 2275 2276 /* 2277 * pool_cache_destruct_object: 2278 * 2279 * Force destruction of an object and its release back into 2280 * the pool. 2281 */ 2282 void 2283 pool_cache_destruct_object(pool_cache_t pc, void *object) 2284 { 2285 2286 FREECHECK_IN(&pc->pc_freecheck, object); 2287 2288 pool_cache_destruct_object1(pc, object); 2289 } 2290 2291 /* 2292 * pool_cache_invalidate_groups: 2293 * 2294 * Invalidate a chain of groups and destruct all objects. 2295 */ 2296 static void 2297 pool_cache_invalidate_groups(pool_cache_t pc, pcg_t *pcg) 2298 { 2299 void *object; 2300 pcg_t *next; 2301 int i; 2302 2303 for (; pcg != NULL; pcg = next) { 2304 next = pcg->pcg_next; 2305 2306 for (i = 0; i < pcg->pcg_avail; i++) { 2307 object = pcg->pcg_objects[i].pcgo_va; 2308 pool_cache_destruct_object1(pc, object); 2309 } 2310 2311 if (pcg->pcg_size == PCG_NOBJECTS_LARGE) { 2312 pool_put(&pcg_large_pool, pcg); 2313 } else { 2314 KASSERT(pcg->pcg_size == PCG_NOBJECTS_NORMAL); 2315 pool_put(&pcg_normal_pool, pcg); 2316 } 2317 } 2318 } 2319 2320 /* 2321 * pool_cache_invalidate: 2322 * 2323 * Invalidate a pool cache (destruct and release all of the 2324 * cached objects). Does not reclaim objects from the pool. 2325 * 2326 * Note: For pool caches that provide constructed objects, there 2327 * is an assumption that another level of synchronization is occurring 2328 * between the input to the constructor and the cache invalidation. 2329 */ 2330 void 2331 pool_cache_invalidate(pool_cache_t pc) 2332 { 2333 pcg_t *full, *empty, *part; 2334 #if 0 2335 uint64_t where; 2336 2337 if (ncpu < 2 || !mp_online) { 2338 /* 2339 * We might be called early enough in the boot process 2340 * for the CPU data structures to not be fully initialized. 2341 * In this case, simply gather the local CPU's cache now 2342 * since it will be the only one running. 2343 */ 2344 pool_cache_xcall(pc); 2345 } else { 2346 /* 2347 * Gather all of the CPU-specific caches into the 2348 * global cache. 2349 */ 2350 where = xc_broadcast(0, (xcfunc_t)pool_cache_xcall, pc, NULL); 2351 xc_wait(where); 2352 } 2353 #endif 2354 mutex_enter(&pc->pc_lock); 2355 full = pc->pc_fullgroups; 2356 empty = pc->pc_emptygroups; 2357 part = pc->pc_partgroups; 2358 pc->pc_fullgroups = NULL; 2359 pc->pc_emptygroups = NULL; 2360 pc->pc_partgroups = NULL; 2361 pc->pc_nfull = 0; 2362 pc->pc_nempty = 0; 2363 pc->pc_npart = 0; 2364 mutex_exit(&pc->pc_lock); 2365 2366 pool_cache_invalidate_groups(pc, full); 2367 pool_cache_invalidate_groups(pc, empty); 2368 pool_cache_invalidate_groups(pc, part); 2369 } 2370 2371 /* 2372 * pool_cache_invalidate_cpu: 2373 * 2374 * Invalidate all CPU-bound cached objects in pool cache, the CPU being 2375 * identified by its associated index. 2376 * It is caller's responsibility to ensure that no operation is 2377 * taking place on this pool cache while doing this invalidation. 2378 * WARNING: as no inter-CPU locking is enforced, trying to invalidate 2379 * pool cached objects from a CPU different from the one currently running 2380 * may result in an undefined behaviour. 2381 */ 2382 static void 2383 pool_cache_invalidate_cpu(pool_cache_t pc, u_int index) 2384 { 2385 2386 pool_cache_cpu_t *cc; 2387 pcg_t *pcg; 2388 2389 if ((cc = pc->pc_cpus[index]) == NULL) 2390 return; 2391 2392 if ((pcg = cc->cc_current) != &pcg_dummy) { 2393 pcg->pcg_next = NULL; 2394 pool_cache_invalidate_groups(pc, pcg); 2395 } 2396 if ((pcg = cc->cc_previous) != &pcg_dummy) { 2397 pcg->pcg_next = NULL; 2398 pool_cache_invalidate_groups(pc, pcg); 2399 } 2400 if (cc != &pc->pc_cpu0) 2401 pool_put(&cache_cpu_pool, cc); 2402 2403 } 2404 2405 void 2406 pool_cache_set_drain_hook(pool_cache_t pc, void (*fn)(void *, int), void *arg) 2407 { 2408 2409 pool_set_drain_hook(&pc->pc_pool, fn, arg); 2410 } 2411 2412 void 2413 pool_cache_setlowat(pool_cache_t pc, int n) 2414 { 2415 2416 pool_setlowat(&pc->pc_pool, n); 2417 } 2418 2419 void 2420 pool_cache_sethiwat(pool_cache_t pc, int n) 2421 { 2422 2423 pool_sethiwat(&pc->pc_pool, n); 2424 } 2425 2426 void 2427 pool_cache_sethardlimit(pool_cache_t pc, int n, const char *warnmess, int ratecap) 2428 { 2429 2430 pool_sethardlimit(&pc->pc_pool, n, warnmess, ratecap); 2431 } 2432 2433 static bool __noinline 2434 pool_cache_get_slow(pool_cache_cpu_t *cc, int s, void **objectp, 2435 paddr_t *pap, int flags) 2436 { 2437 pcg_t *pcg, *cur; 2438 uint64_t ncsw; 2439 pool_cache_t pc; 2440 void *object; 2441 2442 KASSERT(cc->cc_current->pcg_avail == 0); 2443 KASSERT(cc->cc_previous->pcg_avail == 0); 2444 2445 pc = cc->cc_cache; 2446 cc->cc_misses++; 2447 2448 /* 2449 * Nothing was available locally. Try and grab a group 2450 * from the cache. 2451 */ 2452 if (__predict_false(!mutex_tryenter(&pc->pc_lock))) { 2453 ncsw = curlwp->l_ncsw; 2454 mutex_enter(&pc->pc_lock); 2455 pc->pc_contended++; 2456 2457 /* 2458 * If we context switched while locking, then 2459 * our view of the per-CPU data is invalid: 2460 * retry. 2461 */ 2462 if (curlwp->l_ncsw != ncsw) { 2463 mutex_exit(&pc->pc_lock); 2464 return true; 2465 } 2466 } 2467 2468 if (__predict_true((pcg = pc->pc_fullgroups) != NULL)) { 2469 /* 2470 * If there's a full group, release our empty 2471 * group back to the cache. Install the full 2472 * group as cc_current and return. 2473 */ 2474 if (__predict_true((cur = cc->cc_current) != &pcg_dummy)) { 2475 KASSERT(cur->pcg_avail == 0); 2476 cur->pcg_next = pc->pc_emptygroups; 2477 pc->pc_emptygroups = cur; 2478 pc->pc_nempty++; 2479 } 2480 KASSERT(pcg->pcg_avail == pcg->pcg_size); 2481 cc->cc_current = pcg; 2482 pc->pc_fullgroups = pcg->pcg_next; 2483 pc->pc_hits++; 2484 pc->pc_nfull--; 2485 mutex_exit(&pc->pc_lock); 2486 return true; 2487 } 2488 2489 /* 2490 * Nothing available locally or in cache. Take the slow 2491 * path: fetch a new object from the pool and construct 2492 * it. 2493 */ 2494 pc->pc_misses++; 2495 mutex_exit(&pc->pc_lock); 2496 splx(s); 2497 2498 object = pool_get(&pc->pc_pool, flags); 2499 *objectp = object; 2500 if (__predict_false(object == NULL)) 2501 return false; 2502 2503 if (__predict_false((*pc->pc_ctor)(pc->pc_arg, object, flags) != 0)) { 2504 pool_put(&pc->pc_pool, object); 2505 *objectp = NULL; 2506 return false; 2507 } 2508 2509 KASSERT((((vaddr_t)object + pc->pc_pool.pr_itemoffset) & 2510 (pc->pc_pool.pr_align - 1)) == 0); 2511 2512 if (pap != NULL) { 2513 #ifdef POOL_VTOPHYS 2514 *pap = POOL_VTOPHYS(object); 2515 #else 2516 *pap = POOL_PADDR_INVALID; 2517 #endif 2518 } 2519 2520 FREECHECK_OUT(&pc->pc_freecheck, object); 2521 return false; 2522 } 2523 2524 /* 2525 * pool_cache_get{,_paddr}: 2526 * 2527 * Get an object from a pool cache (optionally returning 2528 * the physical address of the object). 2529 */ 2530 void * 2531 pool_cache_get_paddr(pool_cache_t pc, int flags, paddr_t *pap) 2532 { 2533 pool_cache_cpu_t *cc; 2534 pcg_t *pcg; 2535 void *object; 2536 int s; 2537 2538 KASSERTMSG((!cpu_intr_p() && !cpu_softintr_p()) || 2539 (pc->pc_pool.pr_ipl != IPL_NONE || cold || panicstr != NULL), 2540 ("pool '%s' is IPL_NONE, but called from interrupt context\n", 2541 pc->pc_pool.pr_wchan)); 2542 2543 if (flags & PR_WAITOK) { 2544 ASSERT_SLEEPABLE(); 2545 } 2546 2547 /* Lock out interrupts and disable preemption. */ 2548 s = splvm(); 2549 while (/* CONSTCOND */ true) { 2550 /* Try and allocate an object from the current group. */ 2551 cc = pc->pc_cpus[curcpu()->ci_index]; 2552 KASSERT(cc->cc_cache == pc); 2553 pcg = cc->cc_current; 2554 if (__predict_true(pcg->pcg_avail > 0)) { 2555 object = pcg->pcg_objects[--pcg->pcg_avail].pcgo_va; 2556 if (__predict_false(pap != NULL)) 2557 *pap = pcg->pcg_objects[pcg->pcg_avail].pcgo_pa; 2558 #if defined(DIAGNOSTIC) 2559 pcg->pcg_objects[pcg->pcg_avail].pcgo_va = NULL; 2560 KASSERT(pcg->pcg_avail < pcg->pcg_size); 2561 KASSERT(object != NULL); 2562 #endif 2563 cc->cc_hits++; 2564 splx(s); 2565 FREECHECK_OUT(&pc->pc_freecheck, object); 2566 return object; 2567 } 2568 2569 /* 2570 * That failed. If the previous group isn't empty, swap 2571 * it with the current group and allocate from there. 2572 */ 2573 pcg = cc->cc_previous; 2574 if (__predict_true(pcg->pcg_avail > 0)) { 2575 cc->cc_previous = cc->cc_current; 2576 cc->cc_current = pcg; 2577 continue; 2578 } 2579 2580 /* 2581 * Can't allocate from either group: try the slow path. 2582 * If get_slow() allocated an object for us, or if 2583 * no more objects are available, it will return false. 2584 * Otherwise, we need to retry. 2585 */ 2586 if (!pool_cache_get_slow(cc, s, &object, pap, flags)) 2587 break; 2588 } 2589 2590 return object; 2591 } 2592 2593 static bool __noinline 2594 pool_cache_put_slow(pool_cache_cpu_t *cc, int s, void *object) 2595 { 2596 pcg_t *pcg, *cur; 2597 uint64_t ncsw; 2598 pool_cache_t pc; 2599 2600 KASSERT(cc->cc_current->pcg_avail == cc->cc_current->pcg_size); 2601 KASSERT(cc->cc_previous->pcg_avail == cc->cc_previous->pcg_size); 2602 2603 pc = cc->cc_cache; 2604 pcg = NULL; 2605 cc->cc_misses++; 2606 2607 /* 2608 * If there are no empty groups in the cache then allocate one 2609 * while still unlocked. 2610 */ 2611 if (__predict_false(pc->pc_emptygroups == NULL)) { 2612 if (__predict_true(!pool_cache_disable)) { 2613 pcg = pool_get(pc->pc_pcgpool, PR_NOWAIT); 2614 } 2615 if (__predict_true(pcg != NULL)) { 2616 pcg->pcg_avail = 0; 2617 pcg->pcg_size = pc->pc_pcgsize; 2618 } 2619 } 2620 2621 /* Lock the cache. */ 2622 if (__predict_false(!mutex_tryenter(&pc->pc_lock))) { 2623 ncsw = curlwp->l_ncsw; 2624 mutex_enter(&pc->pc_lock); 2625 pc->pc_contended++; 2626 2627 /* 2628 * If we context switched while locking, then our view of 2629 * the per-CPU data is invalid: retry. 2630 */ 2631 if (__predict_false(curlwp->l_ncsw != ncsw)) { 2632 mutex_exit(&pc->pc_lock); 2633 if (pcg != NULL) { 2634 pool_put(pc->pc_pcgpool, pcg); 2635 } 2636 return true; 2637 } 2638 } 2639 2640 /* If there are no empty groups in the cache then allocate one. */ 2641 if (pcg == NULL && pc->pc_emptygroups != NULL) { 2642 pcg = pc->pc_emptygroups; 2643 pc->pc_emptygroups = pcg->pcg_next; 2644 pc->pc_nempty--; 2645 } 2646 2647 /* 2648 * If there's a empty group, release our full group back 2649 * to the cache. Install the empty group to the local CPU 2650 * and return. 2651 */ 2652 if (pcg != NULL) { 2653 KASSERT(pcg->pcg_avail == 0); 2654 if (__predict_false(cc->cc_previous == &pcg_dummy)) { 2655 cc->cc_previous = pcg; 2656 } else { 2657 cur = cc->cc_current; 2658 if (__predict_true(cur != &pcg_dummy)) { 2659 KASSERT(cur->pcg_avail == cur->pcg_size); 2660 cur->pcg_next = pc->pc_fullgroups; 2661 pc->pc_fullgroups = cur; 2662 pc->pc_nfull++; 2663 } 2664 cc->cc_current = pcg; 2665 } 2666 pc->pc_hits++; 2667 mutex_exit(&pc->pc_lock); 2668 return true; 2669 } 2670 2671 /* 2672 * Nothing available locally or in cache, and we didn't 2673 * allocate an empty group. Take the slow path and destroy 2674 * the object here and now. 2675 */ 2676 pc->pc_misses++; 2677 mutex_exit(&pc->pc_lock); 2678 splx(s); 2679 pool_cache_destruct_object(pc, object); 2680 2681 return false; 2682 } 2683 2684 /* 2685 * pool_cache_put{,_paddr}: 2686 * 2687 * Put an object back to the pool cache (optionally caching the 2688 * physical address of the object). 2689 */ 2690 void 2691 pool_cache_put_paddr(pool_cache_t pc, void *object, paddr_t pa) 2692 { 2693 pool_cache_cpu_t *cc; 2694 pcg_t *pcg; 2695 int s; 2696 2697 KASSERT(object != NULL); 2698 FREECHECK_IN(&pc->pc_freecheck, object); 2699 2700 /* Lock out interrupts and disable preemption. */ 2701 s = splvm(); 2702 while (/* CONSTCOND */ true) { 2703 /* If the current group isn't full, release it there. */ 2704 cc = pc->pc_cpus[curcpu()->ci_index]; 2705 KASSERT(cc->cc_cache == pc); 2706 pcg = cc->cc_current; 2707 if (__predict_true(pcg->pcg_avail < pcg->pcg_size)) { 2708 pcg->pcg_objects[pcg->pcg_avail].pcgo_va = object; 2709 pcg->pcg_objects[pcg->pcg_avail].pcgo_pa = pa; 2710 pcg->pcg_avail++; 2711 cc->cc_hits++; 2712 splx(s); 2713 return; 2714 } 2715 2716 /* 2717 * That failed. If the previous group isn't full, swap 2718 * it with the current group and try again. 2719 */ 2720 pcg = cc->cc_previous; 2721 if (__predict_true(pcg->pcg_avail < pcg->pcg_size)) { 2722 cc->cc_previous = cc->cc_current; 2723 cc->cc_current = pcg; 2724 continue; 2725 } 2726 2727 /* 2728 * Can't free to either group: try the slow path. 2729 * If put_slow() releases the object for us, it 2730 * will return false. Otherwise we need to retry. 2731 */ 2732 if (!pool_cache_put_slow(cc, s, object)) 2733 break; 2734 } 2735 } 2736 2737 /* 2738 * pool_cache_xcall: 2739 * 2740 * Transfer objects from the per-CPU cache to the global cache. 2741 * Run within a cross-call thread. 2742 */ 2743 static void 2744 pool_cache_xcall(pool_cache_t pc) 2745 { 2746 pool_cache_cpu_t *cc; 2747 pcg_t *prev, *cur, **list; 2748 int s; 2749 2750 s = splvm(); 2751 mutex_enter(&pc->pc_lock); 2752 cc = pc->pc_cpus[curcpu()->ci_index]; 2753 cur = cc->cc_current; 2754 cc->cc_current = __UNCONST(&pcg_dummy); 2755 prev = cc->cc_previous; 2756 cc->cc_previous = __UNCONST(&pcg_dummy); 2757 if (cur != &pcg_dummy) { 2758 if (cur->pcg_avail == cur->pcg_size) { 2759 list = &pc->pc_fullgroups; 2760 pc->pc_nfull++; 2761 } else if (cur->pcg_avail == 0) { 2762 list = &pc->pc_emptygroups; 2763 pc->pc_nempty++; 2764 } else { 2765 list = &pc->pc_partgroups; 2766 pc->pc_npart++; 2767 } 2768 cur->pcg_next = *list; 2769 *list = cur; 2770 } 2771 if (prev != &pcg_dummy) { 2772 if (prev->pcg_avail == prev->pcg_size) { 2773 list = &pc->pc_fullgroups; 2774 pc->pc_nfull++; 2775 } else if (prev->pcg_avail == 0) { 2776 list = &pc->pc_emptygroups; 2777 pc->pc_nempty++; 2778 } else { 2779 list = &pc->pc_partgroups; 2780 pc->pc_npart++; 2781 } 2782 prev->pcg_next = *list; 2783 *list = prev; 2784 } 2785 mutex_exit(&pc->pc_lock); 2786 splx(s); 2787 } 2788 2789 /* 2790 * Pool backend allocators. 2791 * 2792 * Each pool has a backend allocator that handles allocation, deallocation, 2793 * and any additional draining that might be needed. 2794 * 2795 * We provide two standard allocators: 2796 * 2797 * pool_allocator_kmem - the default when no allocator is specified 2798 * 2799 * pool_allocator_nointr - used for pools that will not be accessed 2800 * in interrupt context. 2801 */ 2802 void *pool_page_alloc(struct pool *, int); 2803 void pool_page_free(struct pool *, void *); 2804 2805 #ifdef POOL_SUBPAGE 2806 struct pool_allocator pool_allocator_kmem_fullpage = { 2807 pool_page_alloc, pool_page_free, 0, 2808 .pa_backingmapptr = &kmem_map, 2809 }; 2810 #else 2811 struct pool_allocator pool_allocator_kmem = { 2812 pool_page_alloc, pool_page_free, 0, 2813 .pa_backingmapptr = &kmem_map, 2814 }; 2815 #endif 2816 2817 void *pool_page_alloc_nointr(struct pool *, int); 2818 void pool_page_free_nointr(struct pool *, void *); 2819 2820 #ifdef POOL_SUBPAGE 2821 struct pool_allocator pool_allocator_nointr_fullpage = { 2822 pool_page_alloc_nointr, pool_page_free_nointr, 0, 2823 .pa_backingmapptr = &kernel_map, 2824 }; 2825 #else 2826 struct pool_allocator pool_allocator_nointr = { 2827 pool_page_alloc_nointr, pool_page_free_nointr, 0, 2828 .pa_backingmapptr = &kernel_map, 2829 }; 2830 #endif 2831 2832 #ifdef POOL_SUBPAGE 2833 void *pool_subpage_alloc(struct pool *, int); 2834 void pool_subpage_free(struct pool *, void *); 2835 2836 struct pool_allocator pool_allocator_kmem = { 2837 pool_subpage_alloc, pool_subpage_free, POOL_SUBPAGE, 2838 .pa_backingmapptr = &kmem_map, 2839 }; 2840 2841 void *pool_subpage_alloc_nointr(struct pool *, int); 2842 void pool_subpage_free_nointr(struct pool *, void *); 2843 2844 struct pool_allocator pool_allocator_nointr = { 2845 pool_subpage_alloc, pool_subpage_free, POOL_SUBPAGE, 2846 .pa_backingmapptr = &kmem_map, 2847 }; 2848 #endif /* POOL_SUBPAGE */ 2849 2850 static void * 2851 pool_allocator_alloc(struct pool *pp, int flags) 2852 { 2853 struct pool_allocator *pa = pp->pr_alloc; 2854 void *res; 2855 2856 res = (*pa->pa_alloc)(pp, flags); 2857 if (res == NULL && (flags & PR_WAITOK) == 0) { 2858 /* 2859 * We only run the drain hook here if PR_NOWAIT. 2860 * In other cases, the hook will be run in 2861 * pool_reclaim(). 2862 */ 2863 if (pp->pr_drain_hook != NULL) { 2864 (*pp->pr_drain_hook)(pp->pr_drain_hook_arg, flags); 2865 res = (*pa->pa_alloc)(pp, flags); 2866 } 2867 } 2868 return res; 2869 } 2870 2871 static void 2872 pool_allocator_free(struct pool *pp, void *v) 2873 { 2874 struct pool_allocator *pa = pp->pr_alloc; 2875 2876 (*pa->pa_free)(pp, v); 2877 } 2878 2879 void * 2880 pool_page_alloc(struct pool *pp, int flags) 2881 { 2882 bool waitok = (flags & PR_WAITOK) ? true : false; 2883 2884 return ((void *) uvm_km_alloc_poolpage_cache(kmem_map, waitok)); 2885 } 2886 2887 void 2888 pool_page_free(struct pool *pp, void *v) 2889 { 2890 2891 uvm_km_free_poolpage_cache(kmem_map, (vaddr_t) v); 2892 } 2893 2894 static void * 2895 pool_page_alloc_meta(struct pool *pp, int flags) 2896 { 2897 bool waitok = (flags & PR_WAITOK) ? true : false; 2898 2899 return ((void *) uvm_km_alloc_poolpage(kmem_map, waitok)); 2900 } 2901 2902 static void 2903 pool_page_free_meta(struct pool *pp, void *v) 2904 { 2905 2906 uvm_km_free_poolpage(kmem_map, (vaddr_t) v); 2907 } 2908 2909 #ifdef POOL_SUBPAGE 2910 /* Sub-page allocator, for machines with large hardware pages. */ 2911 void * 2912 pool_subpage_alloc(struct pool *pp, int flags) 2913 { 2914 return pool_get(&psppool, flags); 2915 } 2916 2917 void 2918 pool_subpage_free(struct pool *pp, void *v) 2919 { 2920 pool_put(&psppool, v); 2921 } 2922 2923 /* We don't provide a real nointr allocator. Maybe later. */ 2924 void * 2925 pool_subpage_alloc_nointr(struct pool *pp, int flags) 2926 { 2927 2928 return (pool_subpage_alloc(pp, flags)); 2929 } 2930 2931 void 2932 pool_subpage_free_nointr(struct pool *pp, void *v) 2933 { 2934 2935 pool_subpage_free(pp, v); 2936 } 2937 #endif /* POOL_SUBPAGE */ 2938 void * 2939 pool_page_alloc_nointr(struct pool *pp, int flags) 2940 { 2941 bool waitok = (flags & PR_WAITOK) ? true : false; 2942 2943 return ((void *) uvm_km_alloc_poolpage_cache(kernel_map, waitok)); 2944 } 2945 2946 void 2947 pool_page_free_nointr(struct pool *pp, void *v) 2948 { 2949 2950 uvm_km_free_poolpage_cache(kernel_map, (vaddr_t) v); 2951 } 2952 2953 #if defined(DDB) 2954 static bool 2955 pool_in_page(struct pool *pp, struct pool_item_header *ph, uintptr_t addr) 2956 { 2957 2958 return (uintptr_t)ph->ph_page <= addr && 2959 addr < (uintptr_t)ph->ph_page + pp->pr_alloc->pa_pagesz; 2960 } 2961 2962 static bool 2963 pool_in_item(struct pool *pp, void *item, uintptr_t addr) 2964 { 2965 2966 return (uintptr_t)item <= addr && addr < (uintptr_t)item + pp->pr_size; 2967 } 2968 2969 static bool 2970 pool_in_cg(struct pool *pp, struct pool_cache_group *pcg, uintptr_t addr) 2971 { 2972 int i; 2973 2974 if (pcg == NULL) { 2975 return false; 2976 } 2977 for (i = 0; i < pcg->pcg_avail; i++) { 2978 if (pool_in_item(pp, pcg->pcg_objects[i].pcgo_va, addr)) { 2979 return true; 2980 } 2981 } 2982 return false; 2983 } 2984 2985 static bool 2986 pool_allocated(struct pool *pp, struct pool_item_header *ph, uintptr_t addr) 2987 { 2988 2989 if ((pp->pr_roflags & PR_NOTOUCH) != 0) { 2990 unsigned int idx = pr_item_notouch_index(pp, ph, (void *)addr); 2991 pool_item_bitmap_t *bitmap = 2992 ph->ph_bitmap + (idx / BITMAP_SIZE); 2993 pool_item_bitmap_t mask = 1 << (idx & BITMAP_MASK); 2994 2995 return (*bitmap & mask) == 0; 2996 } else { 2997 struct pool_item *pi; 2998 2999 LIST_FOREACH(pi, &ph->ph_itemlist, pi_list) { 3000 if (pool_in_item(pp, pi, addr)) { 3001 return false; 3002 } 3003 } 3004 return true; 3005 } 3006 } 3007 3008 void 3009 pool_whatis(uintptr_t addr, void (*pr)(const char *, ...)) 3010 { 3011 struct pool *pp; 3012 3013 TAILQ_FOREACH(pp, &pool_head, pr_poollist) { 3014 struct pool_item_header *ph; 3015 uintptr_t item; 3016 bool allocated = true; 3017 bool incache = false; 3018 bool incpucache = false; 3019 char cpucachestr[32]; 3020 3021 if ((pp->pr_roflags & PR_PHINPAGE) != 0) { 3022 LIST_FOREACH(ph, &pp->pr_fullpages, ph_pagelist) { 3023 if (pool_in_page(pp, ph, addr)) { 3024 goto found; 3025 } 3026 } 3027 LIST_FOREACH(ph, &pp->pr_partpages, ph_pagelist) { 3028 if (pool_in_page(pp, ph, addr)) { 3029 allocated = 3030 pool_allocated(pp, ph, addr); 3031 goto found; 3032 } 3033 } 3034 LIST_FOREACH(ph, &pp->pr_emptypages, ph_pagelist) { 3035 if (pool_in_page(pp, ph, addr)) { 3036 allocated = false; 3037 goto found; 3038 } 3039 } 3040 continue; 3041 } else { 3042 ph = pr_find_pagehead_noalign(pp, (void *)addr); 3043 if (ph == NULL || !pool_in_page(pp, ph, addr)) { 3044 continue; 3045 } 3046 allocated = pool_allocated(pp, ph, addr); 3047 } 3048 found: 3049 if (allocated && pp->pr_cache) { 3050 pool_cache_t pc = pp->pr_cache; 3051 struct pool_cache_group *pcg; 3052 int i; 3053 3054 for (pcg = pc->pc_fullgroups; pcg != NULL; 3055 pcg = pcg->pcg_next) { 3056 if (pool_in_cg(pp, pcg, addr)) { 3057 incache = true; 3058 goto print; 3059 } 3060 } 3061 for (i = 0; i < __arraycount(pc->pc_cpus); i++) { 3062 pool_cache_cpu_t *cc; 3063 3064 if ((cc = pc->pc_cpus[i]) == NULL) { 3065 continue; 3066 } 3067 if (pool_in_cg(pp, cc->cc_current, addr) || 3068 pool_in_cg(pp, cc->cc_previous, addr)) { 3069 struct cpu_info *ci = 3070 cpu_lookup(i); 3071 3072 incpucache = true; 3073 snprintf(cpucachestr, 3074 sizeof(cpucachestr), 3075 "cached by CPU %u", 3076 ci->ci_index); 3077 goto print; 3078 } 3079 } 3080 } 3081 print: 3082 item = (uintptr_t)ph->ph_page + ph->ph_off; 3083 item = item + rounddown(addr - item, pp->pr_size); 3084 (*pr)("%p is %p+%zu in POOL '%s' (%s)\n", 3085 (void *)addr, item, (size_t)(addr - item), 3086 pp->pr_wchan, 3087 incpucache ? cpucachestr : 3088 incache ? "cached" : allocated ? "allocated" : "free"); 3089 } 3090 } 3091 #endif /* defined(DDB) */ 3092