1 /* $NetBSD: subr_pool.c,v 1.206 2016/02/05 03:04:52 knakahara Exp $ */
2
3 /*-
4 * Copyright (c) 1997, 1999, 2000, 2002, 2007, 2008, 2010, 2014, 2015
5 * The NetBSD Foundation, Inc.
6 * All rights reserved.
7 *
8 * This code is derived from software contributed to The NetBSD Foundation
9 * by Paul Kranenburg; by Jason R. Thorpe of the Numerical Aerospace
10 * Simulation Facility, NASA Ames Research Center; by Andrew Doran, and by
11 * Maxime Villard.
12 *
13 * Redistribution and use in source and binary forms, with or without
14 * modification, are permitted provided that the following conditions
15 * are met:
16 * 1. Redistributions of source code must retain the above copyright
17 * notice, this list of conditions and the following disclaimer.
18 * 2. Redistributions in binary form must reproduce the above copyright
19 * notice, this list of conditions and the following disclaimer in the
20 * documentation and/or other materials provided with the distribution.
21 *
22 * THIS SOFTWARE IS PROVIDED BY THE NETBSD FOUNDATION, INC. AND CONTRIBUTORS
23 * ``AS IS'' AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED
24 * TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
25 * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE FOUNDATION OR CONTRIBUTORS
26 * BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
27 * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
28 * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
29 * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
30 * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
31 * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
32 * POSSIBILITY OF SUCH DAMAGE.
33 */
34
35 #include <sys/cdefs.h>
36 __KERNEL_RCSID(0, "$NetBSD: subr_pool.c,v 1.206 2016/02/05 03:04:52 knakahara Exp $");
37
38 #ifdef _KERNEL_OPT
39 #include "opt_ddb.h"
40 #include "opt_lockdebug.h"
41 #endif
42
43 #include <sys/param.h>
44 #include <sys/systm.h>
45 #include <sys/sysctl.h>
46 #include <sys/bitops.h>
47 #include <sys/proc.h>
48 #include <sys/errno.h>
49 #include <sys/kernel.h>
50 #include <sys/vmem.h>
51 #include <sys/pool.h>
52 #include <sys/syslog.h>
53 #include <sys/debug.h>
54 #include <sys/lockdebug.h>
55 #include <sys/xcall.h>
56 #include <sys/cpu.h>
57 #include <sys/atomic.h>
58
59 #include <uvm/uvm_extern.h>
60
61 /*
62 * Pool resource management utility.
63 *
64 * Memory is allocated in pages which are split into pieces according to
65 * the pool item size. Each page is kept on one of three lists in the
66 * pool structure: `pr_emptypages', `pr_fullpages' and `pr_partpages',
67 * for empty, full and partially-full pages respectively. The individual
68 * pool items are on a linked list headed by `ph_itemlist' in each page
69 * header. The memory for building the page list is either taken from
70 * the allocated pages themselves (for small pool items) or taken from
71 * an internal pool of page headers (`phpool').
72 */
73
74 /* List of all pools. Non static as needed by 'vmstat -i' */
75 TAILQ_HEAD(, pool) pool_head = TAILQ_HEAD_INITIALIZER(pool_head);
76
77 /* Private pool for page header structures */
78 #define PHPOOL_MAX 8
79 static struct pool phpool[PHPOOL_MAX];
80 #define PHPOOL_FREELIST_NELEM(idx) \
81 (((idx) == 0) ? 0 : BITMAP_SIZE * (1 << (idx)))
82
83 #ifdef POOL_SUBPAGE
84 /* Pool of subpages for use by normal pools. */
85 static struct pool psppool;
86 #endif
87
88 #ifdef POOL_REDZONE
89 # define POOL_REDZONE_SIZE 2
90 static void pool_redzone_init(struct pool *, size_t);
91 static void pool_redzone_fill(struct pool *, void *);
92 static void pool_redzone_check(struct pool *, void *);
93 #else
94 # define pool_redzone_init(pp, sz) /* NOTHING */
95 # define pool_redzone_fill(pp, ptr) /* NOTHING */
96 # define pool_redzone_check(pp, ptr) /* NOTHING */
97 #endif
98
99 static void *pool_page_alloc_meta(struct pool *, int);
100 static void pool_page_free_meta(struct pool *, void *);
101
102 /* allocator for pool metadata */
103 struct pool_allocator pool_allocator_meta = {
104 .pa_alloc = pool_page_alloc_meta,
105 .pa_free = pool_page_free_meta,
106 .pa_pagesz = 0
107 };
108
109 /* # of seconds to retain page after last use */
110 int pool_inactive_time = 10;
111
112 /* Next candidate for drainage (see pool_drain()) */
113 static struct pool *drainpp;
114
115 /* This lock protects both pool_head and drainpp. */
116 static kmutex_t pool_head_lock;
117 static kcondvar_t pool_busy;
118
119 /* This lock protects initialization of a potentially shared pool allocator */
120 static kmutex_t pool_allocator_lock;
121
122 typedef uint32_t pool_item_bitmap_t;
123 #define BITMAP_SIZE (CHAR_BIT * sizeof(pool_item_bitmap_t))
124 #define BITMAP_MASK (BITMAP_SIZE - 1)
125
126 struct pool_item_header {
127 /* Page headers */
128 LIST_ENTRY(pool_item_header)
129 ph_pagelist; /* pool page list */
130 SPLAY_ENTRY(pool_item_header)
131 ph_node; /* Off-page page headers */
132 void * ph_page; /* this page's address */
133 uint32_t ph_time; /* last referenced */
134 uint16_t ph_nmissing; /* # of chunks in use */
135 uint16_t ph_off; /* start offset in page */
136 union {
137 /* !PR_NOTOUCH */
138 struct {
139 LIST_HEAD(, pool_item)
140 phu_itemlist; /* chunk list for this page */
141 } phu_normal;
142 /* PR_NOTOUCH */
143 struct {
144 pool_item_bitmap_t phu_bitmap[1];
145 } phu_notouch;
146 } ph_u;
147 };
148 #define ph_itemlist ph_u.phu_normal.phu_itemlist
149 #define ph_bitmap ph_u.phu_notouch.phu_bitmap
150
151 struct pool_item {
152 #ifdef DIAGNOSTIC
153 u_int pi_magic;
154 #endif
155 #define PI_MAGIC 0xdeaddeadU
156 /* Other entries use only this list entry */
157 LIST_ENTRY(pool_item) pi_list;
158 };
159
160 #define POOL_NEEDS_CATCHUP(pp) \
161 ((pp)->pr_nitems < (pp)->pr_minitems)
162
163 /*
164 * Pool cache management.
165 *
166 * Pool caches provide a way for constructed objects to be cached by the
167 * pool subsystem. This can lead to performance improvements by avoiding
168 * needless object construction/destruction; it is deferred until absolutely
169 * necessary.
170 *
171 * Caches are grouped into cache groups. Each cache group references up
172 * to PCG_NUMOBJECTS constructed objects. When a cache allocates an
173 * object from the pool, it calls the object's constructor and places it
174 * into a cache group. When a cache group frees an object back to the
175 * pool, it first calls the object's destructor. This allows the object
176 * to persist in constructed form while freed to the cache.
177 *
178 * The pool references each cache, so that when a pool is drained by the
179 * pagedaemon, it can drain each individual cache as well. Each time a
180 * cache is drained, the most idle cache group is freed to the pool in
181 * its entirety.
182 *
183 * Pool caches are layed on top of pools. By layering them, we can avoid
184 * the complexity of cache management for pools which would not benefit
185 * from it.
186 */
187
188 static struct pool pcg_normal_pool;
189 static struct pool pcg_large_pool;
190 static struct pool cache_pool;
191 static struct pool cache_cpu_pool;
192
193 pool_cache_t pnbuf_cache; /* pathname buffer cache */
194
195 /* List of all caches. */
196 TAILQ_HEAD(,pool_cache) pool_cache_head =
197 TAILQ_HEAD_INITIALIZER(pool_cache_head);
198
199 int pool_cache_disable; /* global disable for caching */
200 static const pcg_t pcg_dummy; /* zero sized: always empty, yet always full */
201
202 static bool pool_cache_put_slow(pool_cache_cpu_t *, int,
203 void *);
204 static bool pool_cache_get_slow(pool_cache_cpu_t *, int,
205 void **, paddr_t *, int);
206 static void pool_cache_cpu_init1(struct cpu_info *, pool_cache_t);
207 static void pool_cache_invalidate_groups(pool_cache_t, pcg_t *);
208 static void pool_cache_invalidate_cpu(pool_cache_t, u_int);
209 static void pool_cache_transfer(pool_cache_t);
210
211 static int pool_catchup(struct pool *);
212 static void pool_prime_page(struct pool *, void *,
213 struct pool_item_header *);
214 static void pool_update_curpage(struct pool *);
215
216 static int pool_grow(struct pool *, int);
217 static void *pool_allocator_alloc(struct pool *, int);
218 static void pool_allocator_free(struct pool *, void *);
219
220 static void pool_print_pagelist(struct pool *, struct pool_pagelist *,
221 void (*)(const char *, ...) __printflike(1, 2));
222 static void pool_print1(struct pool *, const char *,
223 void (*)(const char *, ...) __printflike(1, 2));
224
225 static int pool_chk_page(struct pool *, const char *,
226 struct pool_item_header *);
227
228 static inline unsigned int
pr_item_notouch_index(const struct pool * pp,const struct pool_item_header * ph,const void * v)229 pr_item_notouch_index(const struct pool *pp, const struct pool_item_header *ph,
230 const void *v)
231 {
232 const char *cp = v;
233 unsigned int idx;
234
235 KASSERT(pp->pr_roflags & PR_NOTOUCH);
236 idx = (cp - (char *)ph->ph_page - ph->ph_off) / pp->pr_size;
237 KASSERT(idx < pp->pr_itemsperpage);
238 return idx;
239 }
240
241 static inline void
pr_item_notouch_put(const struct pool * pp,struct pool_item_header * ph,void * obj)242 pr_item_notouch_put(const struct pool *pp, struct pool_item_header *ph,
243 void *obj)
244 {
245 unsigned int idx = pr_item_notouch_index(pp, ph, obj);
246 pool_item_bitmap_t *bitmap = ph->ph_bitmap + (idx / BITMAP_SIZE);
247 pool_item_bitmap_t mask = 1 << (idx & BITMAP_MASK);
248
249 KASSERT((*bitmap & mask) == 0);
250 *bitmap |= mask;
251 }
252
253 static inline void *
pr_item_notouch_get(const struct pool * pp,struct pool_item_header * ph)254 pr_item_notouch_get(const struct pool *pp, struct pool_item_header *ph)
255 {
256 pool_item_bitmap_t *bitmap = ph->ph_bitmap;
257 unsigned int idx;
258 int i;
259
260 for (i = 0; ; i++) {
261 int bit;
262
263 KASSERT((i * BITMAP_SIZE) < pp->pr_itemsperpage);
264 bit = ffs32(bitmap[i]);
265 if (bit) {
266 pool_item_bitmap_t mask;
267
268 bit--;
269 idx = (i * BITMAP_SIZE) + bit;
270 mask = 1 << bit;
271 KASSERT((bitmap[i] & mask) != 0);
272 bitmap[i] &= ~mask;
273 break;
274 }
275 }
276 KASSERT(idx < pp->pr_itemsperpage);
277 return (char *)ph->ph_page + ph->ph_off + idx * pp->pr_size;
278 }
279
280 static inline void
pr_item_notouch_init(const struct pool * pp,struct pool_item_header * ph)281 pr_item_notouch_init(const struct pool *pp, struct pool_item_header *ph)
282 {
283 pool_item_bitmap_t *bitmap = ph->ph_bitmap;
284 const int n = howmany(pp->pr_itemsperpage, BITMAP_SIZE);
285 int i;
286
287 for (i = 0; i < n; i++) {
288 bitmap[i] = (pool_item_bitmap_t)-1;
289 }
290 }
291
292 static inline int
phtree_compare(struct pool_item_header * a,struct pool_item_header * b)293 phtree_compare(struct pool_item_header *a, struct pool_item_header *b)
294 {
295
296 /*
297 * we consider pool_item_header with smaller ph_page bigger.
298 * (this unnatural ordering is for the benefit of pr_find_pagehead.)
299 */
300
301 if (a->ph_page < b->ph_page)
302 return (1);
303 else if (a->ph_page > b->ph_page)
304 return (-1);
305 else
306 return (0);
307 }
308
309 SPLAY_PROTOTYPE(phtree, pool_item_header, ph_node, phtree_compare);
310 SPLAY_GENERATE(phtree, pool_item_header, ph_node, phtree_compare);
311
312 static inline struct pool_item_header *
pr_find_pagehead_noalign(struct pool * pp,void * v)313 pr_find_pagehead_noalign(struct pool *pp, void *v)
314 {
315 struct pool_item_header *ph, tmp;
316
317 tmp.ph_page = (void *)(uintptr_t)v;
318 ph = SPLAY_FIND(phtree, &pp->pr_phtree, &tmp);
319 if (ph == NULL) {
320 ph = SPLAY_ROOT(&pp->pr_phtree);
321 if (ph != NULL && phtree_compare(&tmp, ph) >= 0) {
322 ph = SPLAY_NEXT(phtree, &pp->pr_phtree, ph);
323 }
324 KASSERT(ph == NULL || phtree_compare(&tmp, ph) < 0);
325 }
326
327 return ph;
328 }
329
330 /*
331 * Return the pool page header based on item address.
332 */
333 static inline struct pool_item_header *
pr_find_pagehead(struct pool * pp,void * v)334 pr_find_pagehead(struct pool *pp, void *v)
335 {
336 struct pool_item_header *ph, tmp;
337
338 if ((pp->pr_roflags & PR_NOALIGN) != 0) {
339 ph = pr_find_pagehead_noalign(pp, v);
340 } else {
341 void *page =
342 (void *)((uintptr_t)v & pp->pr_alloc->pa_pagemask);
343
344 if ((pp->pr_roflags & PR_PHINPAGE) != 0) {
345 ph = (struct pool_item_header *)((char *)page + pp->pr_phoffset);
346 } else {
347 tmp.ph_page = page;
348 ph = SPLAY_FIND(phtree, &pp->pr_phtree, &tmp);
349 }
350 }
351
352 KASSERT(ph == NULL || ((pp->pr_roflags & PR_PHINPAGE) != 0) ||
353 ((char *)ph->ph_page <= (char *)v &&
354 (char *)v < (char *)ph->ph_page + pp->pr_alloc->pa_pagesz));
355 return ph;
356 }
357
358 static void
pr_pagelist_free(struct pool * pp,struct pool_pagelist * pq)359 pr_pagelist_free(struct pool *pp, struct pool_pagelist *pq)
360 {
361 struct pool_item_header *ph;
362
363 while ((ph = LIST_FIRST(pq)) != NULL) {
364 LIST_REMOVE(ph, ph_pagelist);
365 pool_allocator_free(pp, ph->ph_page);
366 if ((pp->pr_roflags & PR_PHINPAGE) == 0)
367 pool_put(pp->pr_phpool, ph);
368 }
369 }
370
371 /*
372 * Remove a page from the pool.
373 */
374 static inline void
pr_rmpage(struct pool * pp,struct pool_item_header * ph,struct pool_pagelist * pq)375 pr_rmpage(struct pool *pp, struct pool_item_header *ph,
376 struct pool_pagelist *pq)
377 {
378
379 KASSERT(mutex_owned(&pp->pr_lock));
380
381 /*
382 * If the page was idle, decrement the idle page count.
383 */
384 if (ph->ph_nmissing == 0) {
385 #ifdef DIAGNOSTIC
386 if (pp->pr_nidle == 0)
387 panic("pr_rmpage: nidle inconsistent");
388 if (pp->pr_nitems < pp->pr_itemsperpage)
389 panic("pr_rmpage: nitems inconsistent");
390 #endif
391 pp->pr_nidle--;
392 }
393
394 pp->pr_nitems -= pp->pr_itemsperpage;
395
396 /*
397 * Unlink the page from the pool and queue it for release.
398 */
399 LIST_REMOVE(ph, ph_pagelist);
400 if ((pp->pr_roflags & PR_PHINPAGE) == 0)
401 SPLAY_REMOVE(phtree, &pp->pr_phtree, ph);
402 LIST_INSERT_HEAD(pq, ph, ph_pagelist);
403
404 pp->pr_npages--;
405 pp->pr_npagefree++;
406
407 pool_update_curpage(pp);
408 }
409
410 /*
411 * Initialize all the pools listed in the "pools" link set.
412 */
413 void
pool_subsystem_init(void)414 pool_subsystem_init(void)
415 {
416 size_t size;
417 int idx;
418
419 mutex_init(&pool_head_lock, MUTEX_DEFAULT, IPL_NONE);
420 mutex_init(&pool_allocator_lock, MUTEX_DEFAULT, IPL_NONE);
421 cv_init(&pool_busy, "poolbusy");
422
423 /*
424 * Initialize private page header pool and cache magazine pool if we
425 * haven't done so yet.
426 */
427 for (idx = 0; idx < PHPOOL_MAX; idx++) {
428 static char phpool_names[PHPOOL_MAX][6+1+6+1];
429 int nelem;
430 size_t sz;
431
432 nelem = PHPOOL_FREELIST_NELEM(idx);
433 snprintf(phpool_names[idx], sizeof(phpool_names[idx]),
434 "phpool-%d", nelem);
435 sz = sizeof(struct pool_item_header);
436 if (nelem) {
437 sz = offsetof(struct pool_item_header,
438 ph_bitmap[howmany(nelem, BITMAP_SIZE)]);
439 }
440 pool_init(&phpool[idx], sz, 0, 0, 0,
441 phpool_names[idx], &pool_allocator_meta, IPL_VM);
442 }
443 #ifdef POOL_SUBPAGE
444 pool_init(&psppool, POOL_SUBPAGE, POOL_SUBPAGE, 0,
445 PR_RECURSIVE, "psppool", &pool_allocator_meta, IPL_VM);
446 #endif
447
448 size = sizeof(pcg_t) +
449 (PCG_NOBJECTS_NORMAL - 1) * sizeof(pcgpair_t);
450 pool_init(&pcg_normal_pool, size, coherency_unit, 0, 0,
451 "pcgnormal", &pool_allocator_meta, IPL_VM);
452
453 size = sizeof(pcg_t) +
454 (PCG_NOBJECTS_LARGE - 1) * sizeof(pcgpair_t);
455 pool_init(&pcg_large_pool, size, coherency_unit, 0, 0,
456 "pcglarge", &pool_allocator_meta, IPL_VM);
457
458 pool_init(&cache_pool, sizeof(struct pool_cache), coherency_unit,
459 0, 0, "pcache", &pool_allocator_meta, IPL_NONE);
460
461 pool_init(&cache_cpu_pool, sizeof(pool_cache_cpu_t), coherency_unit,
462 0, 0, "pcachecpu", &pool_allocator_meta, IPL_NONE);
463 }
464
465 /*
466 * Initialize the given pool resource structure.
467 *
468 * We export this routine to allow other kernel parts to declare
469 * static pools that must be initialized before kmem(9) is available.
470 */
471 void
pool_init(struct pool * pp,size_t size,u_int align,u_int ioff,int flags,const char * wchan,struct pool_allocator * palloc,int ipl)472 pool_init(struct pool *pp, size_t size, u_int align, u_int ioff, int flags,
473 const char *wchan, struct pool_allocator *palloc, int ipl)
474 {
475 struct pool *pp1;
476 size_t trysize, phsize, prsize;
477 int off, slack;
478
479 #ifdef DEBUG
480 if (__predict_true(!cold))
481 mutex_enter(&pool_head_lock);
482 /*
483 * Check that the pool hasn't already been initialised and
484 * added to the list of all pools.
485 */
486 TAILQ_FOREACH(pp1, &pool_head, pr_poollist) {
487 if (pp == pp1)
488 panic("pool_init: pool %s already initialised",
489 wchan);
490 }
491 if (__predict_true(!cold))
492 mutex_exit(&pool_head_lock);
493 #endif
494
495 if (palloc == NULL)
496 palloc = &pool_allocator_kmem;
497 #ifdef POOL_SUBPAGE
498 if (size > palloc->pa_pagesz) {
499 if (palloc == &pool_allocator_kmem)
500 palloc = &pool_allocator_kmem_fullpage;
501 else if (palloc == &pool_allocator_nointr)
502 palloc = &pool_allocator_nointr_fullpage;
503 }
504 #endif /* POOL_SUBPAGE */
505 if (!cold)
506 mutex_enter(&pool_allocator_lock);
507 if (palloc->pa_refcnt++ == 0) {
508 if (palloc->pa_pagesz == 0)
509 palloc->pa_pagesz = PAGE_SIZE;
510
511 TAILQ_INIT(&palloc->pa_list);
512
513 mutex_init(&palloc->pa_lock, MUTEX_DEFAULT, IPL_VM);
514 palloc->pa_pagemask = ~(palloc->pa_pagesz - 1);
515 palloc->pa_pageshift = ffs(palloc->pa_pagesz) - 1;
516 }
517 if (!cold)
518 mutex_exit(&pool_allocator_lock);
519
520 if (align == 0)
521 align = ALIGN(1);
522
523 prsize = size;
524 if ((flags & PR_NOTOUCH) == 0 && prsize < sizeof(struct pool_item))
525 prsize = sizeof(struct pool_item);
526
527 prsize = roundup(prsize, align);
528 #ifdef DIAGNOSTIC
529 if (prsize > palloc->pa_pagesz)
530 panic("pool_init: pool item size (%zu) too large", prsize);
531 #endif
532
533 /*
534 * Initialize the pool structure.
535 */
536 LIST_INIT(&pp->pr_emptypages);
537 LIST_INIT(&pp->pr_fullpages);
538 LIST_INIT(&pp->pr_partpages);
539 pp->pr_cache = NULL;
540 pp->pr_curpage = NULL;
541 pp->pr_npages = 0;
542 pp->pr_minitems = 0;
543 pp->pr_minpages = 0;
544 pp->pr_maxpages = UINT_MAX;
545 pp->pr_roflags = flags;
546 pp->pr_flags = 0;
547 pp->pr_size = prsize;
548 pp->pr_align = align;
549 pp->pr_wchan = wchan;
550 pp->pr_alloc = palloc;
551 pp->pr_nitems = 0;
552 pp->pr_nout = 0;
553 pp->pr_hardlimit = UINT_MAX;
554 pp->pr_hardlimit_warning = NULL;
555 pp->pr_hardlimit_ratecap.tv_sec = 0;
556 pp->pr_hardlimit_ratecap.tv_usec = 0;
557 pp->pr_hardlimit_warning_last.tv_sec = 0;
558 pp->pr_hardlimit_warning_last.tv_usec = 0;
559 pp->pr_drain_hook = NULL;
560 pp->pr_drain_hook_arg = NULL;
561 pp->pr_freecheck = NULL;
562 pool_redzone_init(pp, size);
563
564 /*
565 * Decide whether to put the page header off page to avoid
566 * wasting too large a part of the page or too big item.
567 * Off-page page headers go on a hash table, so we can match
568 * a returned item with its header based on the page address.
569 * We use 1/16 of the page size and about 8 times of the item
570 * size as the threshold (XXX: tune)
571 *
572 * However, we'll put the header into the page if we can put
573 * it without wasting any items.
574 *
575 * Silently enforce `0 <= ioff < align'.
576 */
577 pp->pr_itemoffset = ioff %= align;
578 /* See the comment below about reserved bytes. */
579 trysize = palloc->pa_pagesz - ((align - ioff) % align);
580 phsize = ALIGN(sizeof(struct pool_item_header));
581 if (pp->pr_roflags & PR_PHINPAGE ||
582 ((pp->pr_roflags & (PR_NOTOUCH | PR_NOALIGN)) == 0 &&
583 (pp->pr_size < MIN(palloc->pa_pagesz / 16, phsize << 3) ||
584 trysize / pp->pr_size == (trysize - phsize) / pp->pr_size))) {
585 /* Use the end of the page for the page header */
586 pp->pr_roflags |= PR_PHINPAGE;
587 pp->pr_phoffset = off = palloc->pa_pagesz - phsize;
588 } else {
589 /* The page header will be taken from our page header pool */
590 pp->pr_phoffset = 0;
591 off = palloc->pa_pagesz;
592 SPLAY_INIT(&pp->pr_phtree);
593 }
594
595 /*
596 * Alignment is to take place at `ioff' within the item. This means
597 * we must reserve up to `align - 1' bytes on the page to allow
598 * appropriate positioning of each item.
599 */
600 pp->pr_itemsperpage = (off - ((align - ioff) % align)) / pp->pr_size;
601 KASSERT(pp->pr_itemsperpage != 0);
602 if ((pp->pr_roflags & PR_NOTOUCH)) {
603 int idx;
604
605 for (idx = 0; pp->pr_itemsperpage > PHPOOL_FREELIST_NELEM(idx);
606 idx++) {
607 /* nothing */
608 }
609 if (idx >= PHPOOL_MAX) {
610 /*
611 * if you see this panic, consider to tweak
612 * PHPOOL_MAX and PHPOOL_FREELIST_NELEM.
613 */
614 panic("%s: too large itemsperpage(%d) for PR_NOTOUCH",
615 pp->pr_wchan, pp->pr_itemsperpage);
616 }
617 pp->pr_phpool = &phpool[idx];
618 } else if ((pp->pr_roflags & PR_PHINPAGE) == 0) {
619 pp->pr_phpool = &phpool[0];
620 }
621 #if defined(DIAGNOSTIC)
622 else {
623 pp->pr_phpool = NULL;
624 }
625 #endif
626
627 /*
628 * Use the slack between the chunks and the page header
629 * for "cache coloring".
630 */
631 slack = off - pp->pr_itemsperpage * pp->pr_size;
632 pp->pr_maxcolor = (slack / align) * align;
633 pp->pr_curcolor = 0;
634
635 pp->pr_nget = 0;
636 pp->pr_nfail = 0;
637 pp->pr_nput = 0;
638 pp->pr_npagealloc = 0;
639 pp->pr_npagefree = 0;
640 pp->pr_hiwat = 0;
641 pp->pr_nidle = 0;
642 pp->pr_refcnt = 0;
643
644 mutex_init(&pp->pr_lock, MUTEX_DEFAULT, ipl);
645 cv_init(&pp->pr_cv, wchan);
646 pp->pr_ipl = ipl;
647
648 /* Insert into the list of all pools. */
649 if (!cold)
650 mutex_enter(&pool_head_lock);
651 TAILQ_FOREACH(pp1, &pool_head, pr_poollist) {
652 if (strcmp(pp1->pr_wchan, pp->pr_wchan) > 0)
653 break;
654 }
655 if (pp1 == NULL)
656 TAILQ_INSERT_TAIL(&pool_head, pp, pr_poollist);
657 else
658 TAILQ_INSERT_BEFORE(pp1, pp, pr_poollist);
659 if (!cold)
660 mutex_exit(&pool_head_lock);
661
662 /* Insert this into the list of pools using this allocator. */
663 if (!cold)
664 mutex_enter(&palloc->pa_lock);
665 TAILQ_INSERT_TAIL(&palloc->pa_list, pp, pr_alloc_list);
666 if (!cold)
667 mutex_exit(&palloc->pa_lock);
668 }
669
670 /*
671 * De-commision a pool resource.
672 */
673 void
pool_destroy(struct pool * pp)674 pool_destroy(struct pool *pp)
675 {
676 struct pool_pagelist pq;
677 struct pool_item_header *ph;
678
679 /* Remove from global pool list */
680 mutex_enter(&pool_head_lock);
681 while (pp->pr_refcnt != 0)
682 cv_wait(&pool_busy, &pool_head_lock);
683 TAILQ_REMOVE(&pool_head, pp, pr_poollist);
684 if (drainpp == pp)
685 drainpp = NULL;
686 mutex_exit(&pool_head_lock);
687
688 /* Remove this pool from its allocator's list of pools. */
689 mutex_enter(&pp->pr_alloc->pa_lock);
690 TAILQ_REMOVE(&pp->pr_alloc->pa_list, pp, pr_alloc_list);
691 mutex_exit(&pp->pr_alloc->pa_lock);
692
693 mutex_enter(&pool_allocator_lock);
694 if (--pp->pr_alloc->pa_refcnt == 0)
695 mutex_destroy(&pp->pr_alloc->pa_lock);
696 mutex_exit(&pool_allocator_lock);
697
698 mutex_enter(&pp->pr_lock);
699
700 KASSERT(pp->pr_cache == NULL);
701
702 #ifdef DIAGNOSTIC
703 if (pp->pr_nout != 0) {
704 panic("pool_destroy: pool busy: still out: %u",
705 pp->pr_nout);
706 }
707 #endif
708
709 KASSERT(LIST_EMPTY(&pp->pr_fullpages));
710 KASSERT(LIST_EMPTY(&pp->pr_partpages));
711
712 /* Remove all pages */
713 LIST_INIT(&pq);
714 while ((ph = LIST_FIRST(&pp->pr_emptypages)) != NULL)
715 pr_rmpage(pp, ph, &pq);
716
717 mutex_exit(&pp->pr_lock);
718
719 pr_pagelist_free(pp, &pq);
720 cv_destroy(&pp->pr_cv);
721 mutex_destroy(&pp->pr_lock);
722 }
723
724 void
pool_set_drain_hook(struct pool * pp,void (* fn)(void *,int),void * arg)725 pool_set_drain_hook(struct pool *pp, void (*fn)(void *, int), void *arg)
726 {
727
728 /* XXX no locking -- must be used just after pool_init() */
729 #ifdef DIAGNOSTIC
730 if (pp->pr_drain_hook != NULL)
731 panic("pool_set_drain_hook(%s): already set", pp->pr_wchan);
732 #endif
733 pp->pr_drain_hook = fn;
734 pp->pr_drain_hook_arg = arg;
735 }
736
737 static struct pool_item_header *
pool_alloc_item_header(struct pool * pp,void * storage,int flags)738 pool_alloc_item_header(struct pool *pp, void *storage, int flags)
739 {
740 struct pool_item_header *ph;
741
742 if ((pp->pr_roflags & PR_PHINPAGE) != 0)
743 ph = (struct pool_item_header *) ((char *)storage + pp->pr_phoffset);
744 else
745 ph = pool_get(pp->pr_phpool, flags);
746
747 return (ph);
748 }
749
750 /*
751 * Grab an item from the pool.
752 */
753 void *
pool_get(struct pool * pp,int flags)754 pool_get(struct pool *pp, int flags)
755 {
756 struct pool_item *pi;
757 struct pool_item_header *ph;
758 void *v;
759
760 #ifdef DIAGNOSTIC
761 if (pp->pr_itemsperpage == 0)
762 panic("pool_get: pool '%s': pr_itemsperpage is zero, "
763 "pool not initialized?", pp->pr_wchan);
764 if ((cpu_intr_p() || cpu_softintr_p()) && pp->pr_ipl == IPL_NONE &&
765 !cold && panicstr == NULL)
766 panic("pool '%s' is IPL_NONE, but called from "
767 "interrupt context\n", pp->pr_wchan);
768 #endif
769 if (flags & PR_WAITOK) {
770 ASSERT_SLEEPABLE();
771 }
772
773 mutex_enter(&pp->pr_lock);
774 startover:
775 /*
776 * Check to see if we've reached the hard limit. If we have,
777 * and we can wait, then wait until an item has been returned to
778 * the pool.
779 */
780 #ifdef DIAGNOSTIC
781 if (__predict_false(pp->pr_nout > pp->pr_hardlimit)) {
782 mutex_exit(&pp->pr_lock);
783 panic("pool_get: %s: crossed hard limit", pp->pr_wchan);
784 }
785 #endif
786 if (__predict_false(pp->pr_nout == pp->pr_hardlimit)) {
787 if (pp->pr_drain_hook != NULL) {
788 /*
789 * Since the drain hook is going to free things
790 * back to the pool, unlock, call the hook, re-lock,
791 * and check the hardlimit condition again.
792 */
793 mutex_exit(&pp->pr_lock);
794 (*pp->pr_drain_hook)(pp->pr_drain_hook_arg, flags);
795 mutex_enter(&pp->pr_lock);
796 if (pp->pr_nout < pp->pr_hardlimit)
797 goto startover;
798 }
799
800 if ((flags & PR_WAITOK) && !(flags & PR_LIMITFAIL)) {
801 /*
802 * XXX: A warning isn't logged in this case. Should
803 * it be?
804 */
805 pp->pr_flags |= PR_WANTED;
806 cv_wait(&pp->pr_cv, &pp->pr_lock);
807 goto startover;
808 }
809
810 /*
811 * Log a message that the hard limit has been hit.
812 */
813 if (pp->pr_hardlimit_warning != NULL &&
814 ratecheck(&pp->pr_hardlimit_warning_last,
815 &pp->pr_hardlimit_ratecap))
816 log(LOG_ERR, "%s\n", pp->pr_hardlimit_warning);
817
818 pp->pr_nfail++;
819
820 mutex_exit(&pp->pr_lock);
821 return (NULL);
822 }
823
824 /*
825 * The convention we use is that if `curpage' is not NULL, then
826 * it points at a non-empty bucket. In particular, `curpage'
827 * never points at a page header which has PR_PHINPAGE set and
828 * has no items in its bucket.
829 */
830 if ((ph = pp->pr_curpage) == NULL) {
831 int error;
832
833 #ifdef DIAGNOSTIC
834 if (pp->pr_nitems != 0) {
835 mutex_exit(&pp->pr_lock);
836 printf("pool_get: %s: curpage NULL, nitems %u\n",
837 pp->pr_wchan, pp->pr_nitems);
838 panic("pool_get: nitems inconsistent");
839 }
840 #endif
841
842 /*
843 * Call the back-end page allocator for more memory.
844 * Release the pool lock, as the back-end page allocator
845 * may block.
846 */
847 error = pool_grow(pp, flags);
848 if (error != 0) {
849 /*
850 * We were unable to allocate a page or item
851 * header, but we released the lock during
852 * allocation, so perhaps items were freed
853 * back to the pool. Check for this case.
854 */
855 if (pp->pr_curpage != NULL)
856 goto startover;
857
858 pp->pr_nfail++;
859 mutex_exit(&pp->pr_lock);
860 return (NULL);
861 }
862
863 /* Start the allocation process over. */
864 goto startover;
865 }
866 if (pp->pr_roflags & PR_NOTOUCH) {
867 #ifdef DIAGNOSTIC
868 if (__predict_false(ph->ph_nmissing == pp->pr_itemsperpage)) {
869 mutex_exit(&pp->pr_lock);
870 panic("pool_get: %s: page empty", pp->pr_wchan);
871 }
872 #endif
873 v = pr_item_notouch_get(pp, ph);
874 } else {
875 v = pi = LIST_FIRST(&ph->ph_itemlist);
876 if (__predict_false(v == NULL)) {
877 mutex_exit(&pp->pr_lock);
878 panic("pool_get: %s: page empty", pp->pr_wchan);
879 }
880 #ifdef DIAGNOSTIC
881 if (__predict_false(pp->pr_nitems == 0)) {
882 mutex_exit(&pp->pr_lock);
883 printf("pool_get: %s: items on itemlist, nitems %u\n",
884 pp->pr_wchan, pp->pr_nitems);
885 panic("pool_get: nitems inconsistent");
886 }
887 #endif
888
889 #ifdef DIAGNOSTIC
890 if (__predict_false(pi->pi_magic != PI_MAGIC)) {
891 panic("pool_get(%s): free list modified: "
892 "magic=%x; page %p; item addr %p\n",
893 pp->pr_wchan, pi->pi_magic, ph->ph_page, pi);
894 }
895 #endif
896
897 /*
898 * Remove from item list.
899 */
900 LIST_REMOVE(pi, pi_list);
901 }
902 pp->pr_nitems--;
903 pp->pr_nout++;
904 if (ph->ph_nmissing == 0) {
905 #ifdef DIAGNOSTIC
906 if (__predict_false(pp->pr_nidle == 0))
907 panic("pool_get: nidle inconsistent");
908 #endif
909 pp->pr_nidle--;
910
911 /*
912 * This page was previously empty. Move it to the list of
913 * partially-full pages. This page is already curpage.
914 */
915 LIST_REMOVE(ph, ph_pagelist);
916 LIST_INSERT_HEAD(&pp->pr_partpages, ph, ph_pagelist);
917 }
918 ph->ph_nmissing++;
919 if (ph->ph_nmissing == pp->pr_itemsperpage) {
920 #ifdef DIAGNOSTIC
921 if (__predict_false((pp->pr_roflags & PR_NOTOUCH) == 0 &&
922 !LIST_EMPTY(&ph->ph_itemlist))) {
923 mutex_exit(&pp->pr_lock);
924 panic("pool_get: %s: nmissing inconsistent",
925 pp->pr_wchan);
926 }
927 #endif
928 /*
929 * This page is now full. Move it to the full list
930 * and select a new current page.
931 */
932 LIST_REMOVE(ph, ph_pagelist);
933 LIST_INSERT_HEAD(&pp->pr_fullpages, ph, ph_pagelist);
934 pool_update_curpage(pp);
935 }
936
937 pp->pr_nget++;
938
939 /*
940 * If we have a low water mark and we are now below that low
941 * water mark, add more items to the pool.
942 */
943 if (POOL_NEEDS_CATCHUP(pp) && pool_catchup(pp) != 0) {
944 /*
945 * XXX: Should we log a warning? Should we set up a timeout
946 * to try again in a second or so? The latter could break
947 * a caller's assumptions about interrupt protection, etc.
948 */
949 }
950
951 mutex_exit(&pp->pr_lock);
952 KASSERT((((vaddr_t)v + pp->pr_itemoffset) & (pp->pr_align - 1)) == 0);
953 FREECHECK_OUT(&pp->pr_freecheck, v);
954 pool_redzone_fill(pp, v);
955 return (v);
956 }
957
958 /*
959 * Internal version of pool_put(). Pool is already locked/entered.
960 */
961 static void
pool_do_put(struct pool * pp,void * v,struct pool_pagelist * pq)962 pool_do_put(struct pool *pp, void *v, struct pool_pagelist *pq)
963 {
964 struct pool_item *pi = v;
965 struct pool_item_header *ph;
966
967 KASSERT(mutex_owned(&pp->pr_lock));
968 pool_redzone_check(pp, v);
969 FREECHECK_IN(&pp->pr_freecheck, v);
970 LOCKDEBUG_MEM_CHECK(v, pp->pr_size);
971
972 #ifdef DIAGNOSTIC
973 if (__predict_false(pp->pr_nout == 0)) {
974 printf("pool %s: putting with none out\n",
975 pp->pr_wchan);
976 panic("pool_put");
977 }
978 #endif
979
980 if (__predict_false((ph = pr_find_pagehead(pp, v)) == NULL)) {
981 panic("pool_put: %s: page header missing", pp->pr_wchan);
982 }
983
984 /*
985 * Return to item list.
986 */
987 if (pp->pr_roflags & PR_NOTOUCH) {
988 pr_item_notouch_put(pp, ph, v);
989 } else {
990 #ifdef DIAGNOSTIC
991 pi->pi_magic = PI_MAGIC;
992 #endif
993 #ifdef DEBUG
994 {
995 int i, *ip = v;
996
997 for (i = 0; i < pp->pr_size / sizeof(int); i++) {
998 *ip++ = PI_MAGIC;
999 }
1000 }
1001 #endif
1002
1003 LIST_INSERT_HEAD(&ph->ph_itemlist, pi, pi_list);
1004 }
1005 KDASSERT(ph->ph_nmissing != 0);
1006 ph->ph_nmissing--;
1007 pp->pr_nput++;
1008 pp->pr_nitems++;
1009 pp->pr_nout--;
1010
1011 /* Cancel "pool empty" condition if it exists */
1012 if (pp->pr_curpage == NULL)
1013 pp->pr_curpage = ph;
1014
1015 if (pp->pr_flags & PR_WANTED) {
1016 pp->pr_flags &= ~PR_WANTED;
1017 cv_broadcast(&pp->pr_cv);
1018 }
1019
1020 /*
1021 * If this page is now empty, do one of two things:
1022 *
1023 * (1) If we have more pages than the page high water mark,
1024 * free the page back to the system. ONLY CONSIDER
1025 * FREEING BACK A PAGE IF WE HAVE MORE THAN OUR MINIMUM PAGE
1026 * CLAIM.
1027 *
1028 * (2) Otherwise, move the page to the empty page list.
1029 *
1030 * Either way, select a new current page (so we use a partially-full
1031 * page if one is available).
1032 */
1033 if (ph->ph_nmissing == 0) {
1034 pp->pr_nidle++;
1035 if (pp->pr_npages > pp->pr_minpages &&
1036 pp->pr_npages > pp->pr_maxpages) {
1037 pr_rmpage(pp, ph, pq);
1038 } else {
1039 LIST_REMOVE(ph, ph_pagelist);
1040 LIST_INSERT_HEAD(&pp->pr_emptypages, ph, ph_pagelist);
1041
1042 /*
1043 * Update the timestamp on the page. A page must
1044 * be idle for some period of time before it can
1045 * be reclaimed by the pagedaemon. This minimizes
1046 * ping-pong'ing for memory.
1047 *
1048 * note for 64-bit time_t: truncating to 32-bit is not
1049 * a problem for our usage.
1050 */
1051 ph->ph_time = time_uptime;
1052 }
1053 pool_update_curpage(pp);
1054 }
1055
1056 /*
1057 * If the page was previously completely full, move it to the
1058 * partially-full list and make it the current page. The next
1059 * allocation will get the item from this page, instead of
1060 * further fragmenting the pool.
1061 */
1062 else if (ph->ph_nmissing == (pp->pr_itemsperpage - 1)) {
1063 LIST_REMOVE(ph, ph_pagelist);
1064 LIST_INSERT_HEAD(&pp->pr_partpages, ph, ph_pagelist);
1065 pp->pr_curpage = ph;
1066 }
1067 }
1068
1069 void
pool_put(struct pool * pp,void * v)1070 pool_put(struct pool *pp, void *v)
1071 {
1072 struct pool_pagelist pq;
1073
1074 LIST_INIT(&pq);
1075
1076 mutex_enter(&pp->pr_lock);
1077 pool_do_put(pp, v, &pq);
1078 mutex_exit(&pp->pr_lock);
1079
1080 pr_pagelist_free(pp, &pq);
1081 }
1082
1083 /*
1084 * pool_grow: grow a pool by a page.
1085 *
1086 * => called with pool locked.
1087 * => unlock and relock the pool.
1088 * => return with pool locked.
1089 */
1090
1091 static int
pool_grow(struct pool * pp,int flags)1092 pool_grow(struct pool *pp, int flags)
1093 {
1094 struct pool_item_header *ph = NULL;
1095 char *cp;
1096
1097 mutex_exit(&pp->pr_lock);
1098 cp = pool_allocator_alloc(pp, flags);
1099 if (__predict_true(cp != NULL)) {
1100 ph = pool_alloc_item_header(pp, cp, flags);
1101 }
1102 if (__predict_false(cp == NULL || ph == NULL)) {
1103 if (cp != NULL) {
1104 pool_allocator_free(pp, cp);
1105 }
1106 mutex_enter(&pp->pr_lock);
1107 return ENOMEM;
1108 }
1109
1110 mutex_enter(&pp->pr_lock);
1111 pool_prime_page(pp, cp, ph);
1112 pp->pr_npagealloc++;
1113 return 0;
1114 }
1115
1116 /*
1117 * Add N items to the pool.
1118 */
1119 int
pool_prime(struct pool * pp,int n)1120 pool_prime(struct pool *pp, int n)
1121 {
1122 int newpages;
1123 int error = 0;
1124
1125 mutex_enter(&pp->pr_lock);
1126
1127 newpages = roundup(n, pp->pr_itemsperpage) / pp->pr_itemsperpage;
1128
1129 while (newpages-- > 0) {
1130 error = pool_grow(pp, PR_NOWAIT);
1131 if (error) {
1132 break;
1133 }
1134 pp->pr_minpages++;
1135 }
1136
1137 if (pp->pr_minpages >= pp->pr_maxpages)
1138 pp->pr_maxpages = pp->pr_minpages + 1; /* XXX */
1139
1140 mutex_exit(&pp->pr_lock);
1141 return error;
1142 }
1143
1144 /*
1145 * Add a page worth of items to the pool.
1146 *
1147 * Note, we must be called with the pool descriptor LOCKED.
1148 */
1149 static void
pool_prime_page(struct pool * pp,void * storage,struct pool_item_header * ph)1150 pool_prime_page(struct pool *pp, void *storage, struct pool_item_header *ph)
1151 {
1152 struct pool_item *pi;
1153 void *cp = storage;
1154 const unsigned int align = pp->pr_align;
1155 const unsigned int ioff = pp->pr_itemoffset;
1156 int n;
1157
1158 KASSERT(mutex_owned(&pp->pr_lock));
1159
1160 #ifdef DIAGNOSTIC
1161 if ((pp->pr_roflags & PR_NOALIGN) == 0 &&
1162 ((uintptr_t)cp & (pp->pr_alloc->pa_pagesz - 1)) != 0)
1163 panic("pool_prime_page: %s: unaligned page", pp->pr_wchan);
1164 #endif
1165
1166 /*
1167 * Insert page header.
1168 */
1169 LIST_INSERT_HEAD(&pp->pr_emptypages, ph, ph_pagelist);
1170 LIST_INIT(&ph->ph_itemlist);
1171 ph->ph_page = storage;
1172 ph->ph_nmissing = 0;
1173 ph->ph_time = time_uptime;
1174 if ((pp->pr_roflags & PR_PHINPAGE) == 0)
1175 SPLAY_INSERT(phtree, &pp->pr_phtree, ph);
1176
1177 pp->pr_nidle++;
1178
1179 /*
1180 * Color this page.
1181 */
1182 ph->ph_off = pp->pr_curcolor;
1183 cp = (char *)cp + ph->ph_off;
1184 if ((pp->pr_curcolor += align) > pp->pr_maxcolor)
1185 pp->pr_curcolor = 0;
1186
1187 /*
1188 * Adjust storage to apply aligment to `pr_itemoffset' in each item.
1189 */
1190 if (ioff != 0)
1191 cp = (char *)cp + align - ioff;
1192
1193 KASSERT((((vaddr_t)cp + ioff) & (align - 1)) == 0);
1194
1195 /*
1196 * Insert remaining chunks on the bucket list.
1197 */
1198 n = pp->pr_itemsperpage;
1199 pp->pr_nitems += n;
1200
1201 if (pp->pr_roflags & PR_NOTOUCH) {
1202 pr_item_notouch_init(pp, ph);
1203 } else {
1204 while (n--) {
1205 pi = (struct pool_item *)cp;
1206
1207 KASSERT(((((vaddr_t)pi) + ioff) & (align - 1)) == 0);
1208
1209 /* Insert on page list */
1210 LIST_INSERT_HEAD(&ph->ph_itemlist, pi, pi_list);
1211 #ifdef DIAGNOSTIC
1212 pi->pi_magic = PI_MAGIC;
1213 #endif
1214 cp = (char *)cp + pp->pr_size;
1215
1216 KASSERT((((vaddr_t)cp + ioff) & (align - 1)) == 0);
1217 }
1218 }
1219
1220 /*
1221 * If the pool was depleted, point at the new page.
1222 */
1223 if (pp->pr_curpage == NULL)
1224 pp->pr_curpage = ph;
1225
1226 if (++pp->pr_npages > pp->pr_hiwat)
1227 pp->pr_hiwat = pp->pr_npages;
1228 }
1229
1230 /*
1231 * Used by pool_get() when nitems drops below the low water mark. This
1232 * is used to catch up pr_nitems with the low water mark.
1233 *
1234 * Note 1, we never wait for memory here, we let the caller decide what to do.
1235 *
1236 * Note 2, we must be called with the pool already locked, and we return
1237 * with it locked.
1238 */
1239 static int
pool_catchup(struct pool * pp)1240 pool_catchup(struct pool *pp)
1241 {
1242 int error = 0;
1243
1244 while (POOL_NEEDS_CATCHUP(pp)) {
1245 error = pool_grow(pp, PR_NOWAIT);
1246 if (error) {
1247 break;
1248 }
1249 }
1250 return error;
1251 }
1252
1253 static void
pool_update_curpage(struct pool * pp)1254 pool_update_curpage(struct pool *pp)
1255 {
1256
1257 pp->pr_curpage = LIST_FIRST(&pp->pr_partpages);
1258 if (pp->pr_curpage == NULL) {
1259 pp->pr_curpage = LIST_FIRST(&pp->pr_emptypages);
1260 }
1261 KASSERT((pp->pr_curpage == NULL && pp->pr_nitems == 0) ||
1262 (pp->pr_curpage != NULL && pp->pr_nitems > 0));
1263 }
1264
1265 void
pool_setlowat(struct pool * pp,int n)1266 pool_setlowat(struct pool *pp, int n)
1267 {
1268
1269 mutex_enter(&pp->pr_lock);
1270
1271 pp->pr_minitems = n;
1272 pp->pr_minpages = (n == 0)
1273 ? 0
1274 : roundup(n, pp->pr_itemsperpage) / pp->pr_itemsperpage;
1275
1276 /* Make sure we're caught up with the newly-set low water mark. */
1277 if (POOL_NEEDS_CATCHUP(pp) && pool_catchup(pp) != 0) {
1278 /*
1279 * XXX: Should we log a warning? Should we set up a timeout
1280 * to try again in a second or so? The latter could break
1281 * a caller's assumptions about interrupt protection, etc.
1282 */
1283 }
1284
1285 mutex_exit(&pp->pr_lock);
1286 }
1287
1288 void
pool_sethiwat(struct pool * pp,int n)1289 pool_sethiwat(struct pool *pp, int n)
1290 {
1291
1292 mutex_enter(&pp->pr_lock);
1293
1294 pp->pr_maxpages = (n == 0)
1295 ? 0
1296 : roundup(n, pp->pr_itemsperpage) / pp->pr_itemsperpage;
1297
1298 mutex_exit(&pp->pr_lock);
1299 }
1300
1301 void
pool_sethardlimit(struct pool * pp,int n,const char * warnmess,int ratecap)1302 pool_sethardlimit(struct pool *pp, int n, const char *warnmess, int ratecap)
1303 {
1304
1305 mutex_enter(&pp->pr_lock);
1306
1307 pp->pr_hardlimit = n;
1308 pp->pr_hardlimit_warning = warnmess;
1309 pp->pr_hardlimit_ratecap.tv_sec = ratecap;
1310 pp->pr_hardlimit_warning_last.tv_sec = 0;
1311 pp->pr_hardlimit_warning_last.tv_usec = 0;
1312
1313 /*
1314 * In-line version of pool_sethiwat(), because we don't want to
1315 * release the lock.
1316 */
1317 pp->pr_maxpages = (n == 0)
1318 ? 0
1319 : roundup(n, pp->pr_itemsperpage) / pp->pr_itemsperpage;
1320
1321 mutex_exit(&pp->pr_lock);
1322 }
1323
1324 /*
1325 * Release all complete pages that have not been used recently.
1326 *
1327 * Must not be called from interrupt context.
1328 */
1329 int
pool_reclaim(struct pool * pp)1330 pool_reclaim(struct pool *pp)
1331 {
1332 struct pool_item_header *ph, *phnext;
1333 struct pool_pagelist pq;
1334 uint32_t curtime;
1335 bool klock;
1336 int rv;
1337
1338 KASSERT(!cpu_intr_p() && !cpu_softintr_p());
1339
1340 if (pp->pr_drain_hook != NULL) {
1341 /*
1342 * The drain hook must be called with the pool unlocked.
1343 */
1344 (*pp->pr_drain_hook)(pp->pr_drain_hook_arg, PR_NOWAIT);
1345 }
1346
1347 /*
1348 * XXXSMP Because we do not want to cause non-MPSAFE code
1349 * to block.
1350 */
1351 if (pp->pr_ipl == IPL_SOFTNET || pp->pr_ipl == IPL_SOFTCLOCK ||
1352 pp->pr_ipl == IPL_SOFTSERIAL) {
1353 KERNEL_LOCK(1, NULL);
1354 klock = true;
1355 } else
1356 klock = false;
1357
1358 /* Reclaim items from the pool's cache (if any). */
1359 if (pp->pr_cache != NULL)
1360 pool_cache_invalidate(pp->pr_cache);
1361
1362 if (mutex_tryenter(&pp->pr_lock) == 0) {
1363 if (klock) {
1364 KERNEL_UNLOCK_ONE(NULL);
1365 }
1366 return (0);
1367 }
1368
1369 LIST_INIT(&pq);
1370
1371 curtime = time_uptime;
1372
1373 for (ph = LIST_FIRST(&pp->pr_emptypages); ph != NULL; ph = phnext) {
1374 phnext = LIST_NEXT(ph, ph_pagelist);
1375
1376 /* Check our minimum page claim */
1377 if (pp->pr_npages <= pp->pr_minpages)
1378 break;
1379
1380 KASSERT(ph->ph_nmissing == 0);
1381 if (curtime - ph->ph_time < pool_inactive_time)
1382 continue;
1383
1384 /*
1385 * If freeing this page would put us below
1386 * the low water mark, stop now.
1387 */
1388 if ((pp->pr_nitems - pp->pr_itemsperpage) <
1389 pp->pr_minitems)
1390 break;
1391
1392 pr_rmpage(pp, ph, &pq);
1393 }
1394
1395 mutex_exit(&pp->pr_lock);
1396
1397 if (LIST_EMPTY(&pq))
1398 rv = 0;
1399 else {
1400 pr_pagelist_free(pp, &pq);
1401 rv = 1;
1402 }
1403
1404 if (klock) {
1405 KERNEL_UNLOCK_ONE(NULL);
1406 }
1407
1408 return (rv);
1409 }
1410
1411 /*
1412 * Drain pools, one at a time. The drained pool is returned within ppp.
1413 *
1414 * Note, must never be called from interrupt context.
1415 */
1416 bool
pool_drain(struct pool ** ppp)1417 pool_drain(struct pool **ppp)
1418 {
1419 bool reclaimed;
1420 struct pool *pp;
1421
1422 KASSERT(!TAILQ_EMPTY(&pool_head));
1423
1424 pp = NULL;
1425
1426 /* Find next pool to drain, and add a reference. */
1427 mutex_enter(&pool_head_lock);
1428 do {
1429 if (drainpp == NULL) {
1430 drainpp = TAILQ_FIRST(&pool_head);
1431 }
1432 if (drainpp != NULL) {
1433 pp = drainpp;
1434 drainpp = TAILQ_NEXT(pp, pr_poollist);
1435 }
1436 /*
1437 * Skip completely idle pools. We depend on at least
1438 * one pool in the system being active.
1439 */
1440 } while (pp == NULL || pp->pr_npages == 0);
1441 pp->pr_refcnt++;
1442 mutex_exit(&pool_head_lock);
1443
1444 /* Drain the cache (if any) and pool.. */
1445 reclaimed = pool_reclaim(pp);
1446
1447 /* Finally, unlock the pool. */
1448 mutex_enter(&pool_head_lock);
1449 pp->pr_refcnt--;
1450 cv_broadcast(&pool_busy);
1451 mutex_exit(&pool_head_lock);
1452
1453 if (ppp != NULL)
1454 *ppp = pp;
1455
1456 return reclaimed;
1457 }
1458
1459 /*
1460 * Diagnostic helpers.
1461 */
1462
1463 void
pool_printall(const char * modif,void (* pr)(const char *,...))1464 pool_printall(const char *modif, void (*pr)(const char *, ...))
1465 {
1466 struct pool *pp;
1467
1468 TAILQ_FOREACH(pp, &pool_head, pr_poollist) {
1469 pool_printit(pp, modif, pr);
1470 }
1471 }
1472
1473 void
pool_printit(struct pool * pp,const char * modif,void (* pr)(const char *,...))1474 pool_printit(struct pool *pp, const char *modif, void (*pr)(const char *, ...))
1475 {
1476
1477 if (pp == NULL) {
1478 (*pr)("Must specify a pool to print.\n");
1479 return;
1480 }
1481
1482 pool_print1(pp, modif, pr);
1483 }
1484
1485 static void
pool_print_pagelist(struct pool * pp,struct pool_pagelist * pl,void (* pr)(const char *,...))1486 pool_print_pagelist(struct pool *pp, struct pool_pagelist *pl,
1487 void (*pr)(const char *, ...))
1488 {
1489 struct pool_item_header *ph;
1490 #ifdef DIAGNOSTIC
1491 struct pool_item *pi;
1492 #endif
1493
1494 LIST_FOREACH(ph, pl, ph_pagelist) {
1495 (*pr)("\t\tpage %p, nmissing %d, time %" PRIu32 "\n",
1496 ph->ph_page, ph->ph_nmissing, ph->ph_time);
1497 #ifdef DIAGNOSTIC
1498 if (!(pp->pr_roflags & PR_NOTOUCH)) {
1499 LIST_FOREACH(pi, &ph->ph_itemlist, pi_list) {
1500 if (pi->pi_magic != PI_MAGIC) {
1501 (*pr)("\t\t\titem %p, magic 0x%x\n",
1502 pi, pi->pi_magic);
1503 }
1504 }
1505 }
1506 #endif
1507 }
1508 }
1509
1510 static void
pool_print1(struct pool * pp,const char * modif,void (* pr)(const char *,...))1511 pool_print1(struct pool *pp, const char *modif, void (*pr)(const char *, ...))
1512 {
1513 struct pool_item_header *ph;
1514 pool_cache_t pc;
1515 pcg_t *pcg;
1516 pool_cache_cpu_t *cc;
1517 uint64_t cpuhit, cpumiss;
1518 int i, print_log = 0, print_pagelist = 0, print_cache = 0;
1519 char c;
1520
1521 while ((c = *modif++) != '\0') {
1522 if (c == 'l')
1523 print_log = 1;
1524 if (c == 'p')
1525 print_pagelist = 1;
1526 if (c == 'c')
1527 print_cache = 1;
1528 }
1529
1530 if ((pc = pp->pr_cache) != NULL) {
1531 (*pr)("POOL CACHE");
1532 } else {
1533 (*pr)("POOL");
1534 }
1535
1536 (*pr)(" %s: size %u, align %u, ioff %u, roflags 0x%08x\n",
1537 pp->pr_wchan, pp->pr_size, pp->pr_align, pp->pr_itemoffset,
1538 pp->pr_roflags);
1539 (*pr)("\talloc %p\n", pp->pr_alloc);
1540 (*pr)("\tminitems %u, minpages %u, maxpages %u, npages %u\n",
1541 pp->pr_minitems, pp->pr_minpages, pp->pr_maxpages, pp->pr_npages);
1542 (*pr)("\titemsperpage %u, nitems %u, nout %u, hardlimit %u\n",
1543 pp->pr_itemsperpage, pp->pr_nitems, pp->pr_nout, pp->pr_hardlimit);
1544
1545 (*pr)("\tnget %lu, nfail %lu, nput %lu\n",
1546 pp->pr_nget, pp->pr_nfail, pp->pr_nput);
1547 (*pr)("\tnpagealloc %lu, npagefree %lu, hiwat %u, nidle %lu\n",
1548 pp->pr_npagealloc, pp->pr_npagefree, pp->pr_hiwat, pp->pr_nidle);
1549
1550 if (print_pagelist == 0)
1551 goto skip_pagelist;
1552
1553 if ((ph = LIST_FIRST(&pp->pr_emptypages)) != NULL)
1554 (*pr)("\n\tempty page list:\n");
1555 pool_print_pagelist(pp, &pp->pr_emptypages, pr);
1556 if ((ph = LIST_FIRST(&pp->pr_fullpages)) != NULL)
1557 (*pr)("\n\tfull page list:\n");
1558 pool_print_pagelist(pp, &pp->pr_fullpages, pr);
1559 if ((ph = LIST_FIRST(&pp->pr_partpages)) != NULL)
1560 (*pr)("\n\tpartial-page list:\n");
1561 pool_print_pagelist(pp, &pp->pr_partpages, pr);
1562
1563 if (pp->pr_curpage == NULL)
1564 (*pr)("\tno current page\n");
1565 else
1566 (*pr)("\tcurpage %p\n", pp->pr_curpage->ph_page);
1567
1568 skip_pagelist:
1569 if (print_log == 0)
1570 goto skip_log;
1571
1572 (*pr)("\n");
1573
1574 skip_log:
1575
1576 #define PR_GROUPLIST(pcg) \
1577 (*pr)("\t\tgroup %p: avail %d\n", pcg, pcg->pcg_avail); \
1578 for (i = 0; i < pcg->pcg_size; i++) { \
1579 if (pcg->pcg_objects[i].pcgo_pa != \
1580 POOL_PADDR_INVALID) { \
1581 (*pr)("\t\t\t%p, 0x%llx\n", \
1582 pcg->pcg_objects[i].pcgo_va, \
1583 (unsigned long long) \
1584 pcg->pcg_objects[i].pcgo_pa); \
1585 } else { \
1586 (*pr)("\t\t\t%p\n", \
1587 pcg->pcg_objects[i].pcgo_va); \
1588 } \
1589 }
1590
1591 if (pc != NULL) {
1592 cpuhit = 0;
1593 cpumiss = 0;
1594 for (i = 0; i < __arraycount(pc->pc_cpus); i++) {
1595 if ((cc = pc->pc_cpus[i]) == NULL)
1596 continue;
1597 cpuhit += cc->cc_hits;
1598 cpumiss += cc->cc_misses;
1599 }
1600 (*pr)("\tcpu layer hits %llu misses %llu\n", cpuhit, cpumiss);
1601 (*pr)("\tcache layer hits %llu misses %llu\n",
1602 pc->pc_hits, pc->pc_misses);
1603 (*pr)("\tcache layer entry uncontended %llu contended %llu\n",
1604 pc->pc_hits + pc->pc_misses - pc->pc_contended,
1605 pc->pc_contended);
1606 (*pr)("\tcache layer empty groups %u full groups %u\n",
1607 pc->pc_nempty, pc->pc_nfull);
1608 if (print_cache) {
1609 (*pr)("\tfull cache groups:\n");
1610 for (pcg = pc->pc_fullgroups; pcg != NULL;
1611 pcg = pcg->pcg_next) {
1612 PR_GROUPLIST(pcg);
1613 }
1614 (*pr)("\tempty cache groups:\n");
1615 for (pcg = pc->pc_emptygroups; pcg != NULL;
1616 pcg = pcg->pcg_next) {
1617 PR_GROUPLIST(pcg);
1618 }
1619 }
1620 }
1621 #undef PR_GROUPLIST
1622 }
1623
1624 static int
pool_chk_page(struct pool * pp,const char * label,struct pool_item_header * ph)1625 pool_chk_page(struct pool *pp, const char *label, struct pool_item_header *ph)
1626 {
1627 struct pool_item *pi;
1628 void *page;
1629 int n;
1630
1631 if ((pp->pr_roflags & PR_NOALIGN) == 0) {
1632 page = (void *)((uintptr_t)ph & pp->pr_alloc->pa_pagemask);
1633 if (page != ph->ph_page &&
1634 (pp->pr_roflags & PR_PHINPAGE) != 0) {
1635 if (label != NULL)
1636 printf("%s: ", label);
1637 printf("pool(%p:%s): page inconsistency: page %p;"
1638 " at page head addr %p (p %p)\n", pp,
1639 pp->pr_wchan, ph->ph_page,
1640 ph, page);
1641 return 1;
1642 }
1643 }
1644
1645 if ((pp->pr_roflags & PR_NOTOUCH) != 0)
1646 return 0;
1647
1648 for (pi = LIST_FIRST(&ph->ph_itemlist), n = 0;
1649 pi != NULL;
1650 pi = LIST_NEXT(pi,pi_list), n++) {
1651
1652 #ifdef DIAGNOSTIC
1653 if (pi->pi_magic != PI_MAGIC) {
1654 if (label != NULL)
1655 printf("%s: ", label);
1656 printf("pool(%s): free list modified: magic=%x;"
1657 " page %p; item ordinal %d; addr %p\n",
1658 pp->pr_wchan, pi->pi_magic, ph->ph_page,
1659 n, pi);
1660 panic("pool");
1661 }
1662 #endif
1663 if ((pp->pr_roflags & PR_NOALIGN) != 0) {
1664 continue;
1665 }
1666 page = (void *)((uintptr_t)pi & pp->pr_alloc->pa_pagemask);
1667 if (page == ph->ph_page)
1668 continue;
1669
1670 if (label != NULL)
1671 printf("%s: ", label);
1672 printf("pool(%p:%s): page inconsistency: page %p;"
1673 " item ordinal %d; addr %p (p %p)\n", pp,
1674 pp->pr_wchan, ph->ph_page,
1675 n, pi, page);
1676 return 1;
1677 }
1678 return 0;
1679 }
1680
1681
1682 int
pool_chk(struct pool * pp,const char * label)1683 pool_chk(struct pool *pp, const char *label)
1684 {
1685 struct pool_item_header *ph;
1686 int r = 0;
1687
1688 mutex_enter(&pp->pr_lock);
1689 LIST_FOREACH(ph, &pp->pr_emptypages, ph_pagelist) {
1690 r = pool_chk_page(pp, label, ph);
1691 if (r) {
1692 goto out;
1693 }
1694 }
1695 LIST_FOREACH(ph, &pp->pr_fullpages, ph_pagelist) {
1696 r = pool_chk_page(pp, label, ph);
1697 if (r) {
1698 goto out;
1699 }
1700 }
1701 LIST_FOREACH(ph, &pp->pr_partpages, ph_pagelist) {
1702 r = pool_chk_page(pp, label, ph);
1703 if (r) {
1704 goto out;
1705 }
1706 }
1707
1708 out:
1709 mutex_exit(&pp->pr_lock);
1710 return (r);
1711 }
1712
1713 /*
1714 * pool_cache_init:
1715 *
1716 * Initialize a pool cache.
1717 */
1718 pool_cache_t
pool_cache_init(size_t size,u_int align,u_int align_offset,u_int flags,const char * wchan,struct pool_allocator * palloc,int ipl,int (* ctor)(void *,void *,int),void (* dtor)(void *,void *),void * arg)1719 pool_cache_init(size_t size, u_int align, u_int align_offset, u_int flags,
1720 const char *wchan, struct pool_allocator *palloc, int ipl,
1721 int (*ctor)(void *, void *, int), void (*dtor)(void *, void *), void *arg)
1722 {
1723 pool_cache_t pc;
1724
1725 pc = pool_get(&cache_pool, PR_WAITOK);
1726 if (pc == NULL)
1727 return NULL;
1728
1729 pool_cache_bootstrap(pc, size, align, align_offset, flags, wchan,
1730 palloc, ipl, ctor, dtor, arg);
1731
1732 return pc;
1733 }
1734
1735 /*
1736 * pool_cache_bootstrap:
1737 *
1738 * Kernel-private version of pool_cache_init(). The caller
1739 * provides initial storage.
1740 */
1741 void
pool_cache_bootstrap(pool_cache_t pc,size_t size,u_int align,u_int align_offset,u_int flags,const char * wchan,struct pool_allocator * palloc,int ipl,int (* ctor)(void *,void *,int),void (* dtor)(void *,void *),void * arg)1742 pool_cache_bootstrap(pool_cache_t pc, size_t size, u_int align,
1743 u_int align_offset, u_int flags, const char *wchan,
1744 struct pool_allocator *palloc, int ipl,
1745 int (*ctor)(void *, void *, int), void (*dtor)(void *, void *),
1746 void *arg)
1747 {
1748 CPU_INFO_ITERATOR cii;
1749 pool_cache_t pc1;
1750 struct cpu_info *ci;
1751 struct pool *pp;
1752
1753 pp = &pc->pc_pool;
1754 if (palloc == NULL && ipl == IPL_NONE)
1755 palloc = &pool_allocator_nointr;
1756 pool_init(pp, size, align, align_offset, flags, wchan, palloc, ipl);
1757 mutex_init(&pc->pc_lock, MUTEX_DEFAULT, ipl);
1758
1759 if (ctor == NULL) {
1760 ctor = (int (*)(void *, void *, int))nullop;
1761 }
1762 if (dtor == NULL) {
1763 dtor = (void (*)(void *, void *))nullop;
1764 }
1765
1766 pc->pc_emptygroups = NULL;
1767 pc->pc_fullgroups = NULL;
1768 pc->pc_partgroups = NULL;
1769 pc->pc_ctor = ctor;
1770 pc->pc_dtor = dtor;
1771 pc->pc_arg = arg;
1772 pc->pc_hits = 0;
1773 pc->pc_misses = 0;
1774 pc->pc_nempty = 0;
1775 pc->pc_npart = 0;
1776 pc->pc_nfull = 0;
1777 pc->pc_contended = 0;
1778 pc->pc_refcnt = 0;
1779 pc->pc_freecheck = NULL;
1780
1781 if ((flags & PR_LARGECACHE) != 0) {
1782 pc->pc_pcgsize = PCG_NOBJECTS_LARGE;
1783 pc->pc_pcgpool = &pcg_large_pool;
1784 } else {
1785 pc->pc_pcgsize = PCG_NOBJECTS_NORMAL;
1786 pc->pc_pcgpool = &pcg_normal_pool;
1787 }
1788
1789 /* Allocate per-CPU caches. */
1790 memset(pc->pc_cpus, 0, sizeof(pc->pc_cpus));
1791 pc->pc_ncpu = 0;
1792 if (ncpu < 2) {
1793 /* XXX For sparc: boot CPU is not attached yet. */
1794 pool_cache_cpu_init1(curcpu(), pc);
1795 } else {
1796 for (CPU_INFO_FOREACH(cii, ci)) {
1797 pool_cache_cpu_init1(ci, pc);
1798 }
1799 }
1800
1801 /* Add to list of all pools. */
1802 if (__predict_true(!cold))
1803 mutex_enter(&pool_head_lock);
1804 TAILQ_FOREACH(pc1, &pool_cache_head, pc_cachelist) {
1805 if (strcmp(pc1->pc_pool.pr_wchan, pc->pc_pool.pr_wchan) > 0)
1806 break;
1807 }
1808 if (pc1 == NULL)
1809 TAILQ_INSERT_TAIL(&pool_cache_head, pc, pc_cachelist);
1810 else
1811 TAILQ_INSERT_BEFORE(pc1, pc, pc_cachelist);
1812 if (__predict_true(!cold))
1813 mutex_exit(&pool_head_lock);
1814
1815 membar_sync();
1816 pp->pr_cache = pc;
1817 }
1818
1819 /*
1820 * pool_cache_destroy:
1821 *
1822 * Destroy a pool cache.
1823 */
1824 void
pool_cache_destroy(pool_cache_t pc)1825 pool_cache_destroy(pool_cache_t pc)
1826 {
1827
1828 pool_cache_bootstrap_destroy(pc);
1829 pool_put(&cache_pool, pc);
1830 }
1831
1832 /*
1833 * pool_cache_bootstrap_destroy:
1834 *
1835 * Destroy a pool cache.
1836 */
1837 void
pool_cache_bootstrap_destroy(pool_cache_t pc)1838 pool_cache_bootstrap_destroy(pool_cache_t pc)
1839 {
1840 struct pool *pp = &pc->pc_pool;
1841 u_int i;
1842
1843 /* Remove it from the global list. */
1844 mutex_enter(&pool_head_lock);
1845 while (pc->pc_refcnt != 0)
1846 cv_wait(&pool_busy, &pool_head_lock);
1847 TAILQ_REMOVE(&pool_cache_head, pc, pc_cachelist);
1848 mutex_exit(&pool_head_lock);
1849
1850 /* First, invalidate the entire cache. */
1851 pool_cache_invalidate(pc);
1852
1853 /* Disassociate it from the pool. */
1854 mutex_enter(&pp->pr_lock);
1855 pp->pr_cache = NULL;
1856 mutex_exit(&pp->pr_lock);
1857
1858 /* Destroy per-CPU data */
1859 for (i = 0; i < __arraycount(pc->pc_cpus); i++)
1860 pool_cache_invalidate_cpu(pc, i);
1861
1862 /* Finally, destroy it. */
1863 mutex_destroy(&pc->pc_lock);
1864 pool_destroy(pp);
1865 }
1866
1867 /*
1868 * pool_cache_cpu_init1:
1869 *
1870 * Called for each pool_cache whenever a new CPU is attached.
1871 */
1872 static void
pool_cache_cpu_init1(struct cpu_info * ci,pool_cache_t pc)1873 pool_cache_cpu_init1(struct cpu_info *ci, pool_cache_t pc)
1874 {
1875 pool_cache_cpu_t *cc;
1876 int index;
1877
1878 index = ci->ci_index;
1879
1880 KASSERT(index < __arraycount(pc->pc_cpus));
1881
1882 if ((cc = pc->pc_cpus[index]) != NULL) {
1883 KASSERT(cc->cc_cpuindex == index);
1884 return;
1885 }
1886
1887 /*
1888 * The first CPU is 'free'. This needs to be the case for
1889 * bootstrap - we may not be able to allocate yet.
1890 */
1891 if (pc->pc_ncpu == 0) {
1892 cc = &pc->pc_cpu0;
1893 pc->pc_ncpu = 1;
1894 } else {
1895 mutex_enter(&pc->pc_lock);
1896 pc->pc_ncpu++;
1897 mutex_exit(&pc->pc_lock);
1898 cc = pool_get(&cache_cpu_pool, PR_WAITOK);
1899 }
1900
1901 cc->cc_ipl = pc->pc_pool.pr_ipl;
1902 cc->cc_iplcookie = makeiplcookie(cc->cc_ipl);
1903 cc->cc_cache = pc;
1904 cc->cc_cpuindex = index;
1905 cc->cc_hits = 0;
1906 cc->cc_misses = 0;
1907 cc->cc_current = __UNCONST(&pcg_dummy);
1908 cc->cc_previous = __UNCONST(&pcg_dummy);
1909
1910 pc->pc_cpus[index] = cc;
1911 }
1912
1913 /*
1914 * pool_cache_cpu_init:
1915 *
1916 * Called whenever a new CPU is attached.
1917 */
1918 void
pool_cache_cpu_init(struct cpu_info * ci)1919 pool_cache_cpu_init(struct cpu_info *ci)
1920 {
1921 pool_cache_t pc;
1922
1923 mutex_enter(&pool_head_lock);
1924 TAILQ_FOREACH(pc, &pool_cache_head, pc_cachelist) {
1925 pc->pc_refcnt++;
1926 mutex_exit(&pool_head_lock);
1927
1928 pool_cache_cpu_init1(ci, pc);
1929
1930 mutex_enter(&pool_head_lock);
1931 pc->pc_refcnt--;
1932 cv_broadcast(&pool_busy);
1933 }
1934 mutex_exit(&pool_head_lock);
1935 }
1936
1937 /*
1938 * pool_cache_reclaim:
1939 *
1940 * Reclaim memory from a pool cache.
1941 */
1942 bool
pool_cache_reclaim(pool_cache_t pc)1943 pool_cache_reclaim(pool_cache_t pc)
1944 {
1945
1946 return pool_reclaim(&pc->pc_pool);
1947 }
1948
1949 static void
pool_cache_destruct_object1(pool_cache_t pc,void * object)1950 pool_cache_destruct_object1(pool_cache_t pc, void *object)
1951 {
1952
1953 (*pc->pc_dtor)(pc->pc_arg, object);
1954 pool_put(&pc->pc_pool, object);
1955 }
1956
1957 /*
1958 * pool_cache_destruct_object:
1959 *
1960 * Force destruction of an object and its release back into
1961 * the pool.
1962 */
1963 void
pool_cache_destruct_object(pool_cache_t pc,void * object)1964 pool_cache_destruct_object(pool_cache_t pc, void *object)
1965 {
1966
1967 FREECHECK_IN(&pc->pc_freecheck, object);
1968
1969 pool_cache_destruct_object1(pc, object);
1970 }
1971
1972 /*
1973 * pool_cache_invalidate_groups:
1974 *
1975 * Invalidate a chain of groups and destruct all objects.
1976 */
1977 static void
pool_cache_invalidate_groups(pool_cache_t pc,pcg_t * pcg)1978 pool_cache_invalidate_groups(pool_cache_t pc, pcg_t *pcg)
1979 {
1980 void *object;
1981 pcg_t *next;
1982 int i;
1983
1984 for (; pcg != NULL; pcg = next) {
1985 next = pcg->pcg_next;
1986
1987 for (i = 0; i < pcg->pcg_avail; i++) {
1988 object = pcg->pcg_objects[i].pcgo_va;
1989 pool_cache_destruct_object1(pc, object);
1990 }
1991
1992 if (pcg->pcg_size == PCG_NOBJECTS_LARGE) {
1993 pool_put(&pcg_large_pool, pcg);
1994 } else {
1995 KASSERT(pcg->pcg_size == PCG_NOBJECTS_NORMAL);
1996 pool_put(&pcg_normal_pool, pcg);
1997 }
1998 }
1999 }
2000
2001 /*
2002 * pool_cache_invalidate:
2003 *
2004 * Invalidate a pool cache (destruct and release all of the
2005 * cached objects). Does not reclaim objects from the pool.
2006 *
2007 * Note: For pool caches that provide constructed objects, there
2008 * is an assumption that another level of synchronization is occurring
2009 * between the input to the constructor and the cache invalidation.
2010 *
2011 * Invalidation is a costly process and should not be called from
2012 * interrupt context.
2013 */
2014 void
pool_cache_invalidate(pool_cache_t pc)2015 pool_cache_invalidate(pool_cache_t pc)
2016 {
2017 uint64_t where;
2018 pcg_t *full, *empty, *part;
2019
2020 KASSERT(!cpu_intr_p() && !cpu_softintr_p());
2021
2022 if (ncpu < 2 || !mp_online) {
2023 /*
2024 * We might be called early enough in the boot process
2025 * for the CPU data structures to not be fully initialized.
2026 * In this case, transfer the content of the local CPU's
2027 * cache back into global cache as only this CPU is currently
2028 * running.
2029 */
2030 pool_cache_transfer(pc);
2031 } else {
2032 /*
2033 * Signal all CPUs that they must transfer their local
2034 * cache back to the global pool then wait for the xcall to
2035 * complete.
2036 */
2037 where = xc_broadcast(0, (xcfunc_t)pool_cache_transfer,
2038 pc, NULL);
2039 xc_wait(where);
2040 }
2041
2042 /* Empty pool caches, then invalidate objects */
2043 mutex_enter(&pc->pc_lock);
2044 full = pc->pc_fullgroups;
2045 empty = pc->pc_emptygroups;
2046 part = pc->pc_partgroups;
2047 pc->pc_fullgroups = NULL;
2048 pc->pc_emptygroups = NULL;
2049 pc->pc_partgroups = NULL;
2050 pc->pc_nfull = 0;
2051 pc->pc_nempty = 0;
2052 pc->pc_npart = 0;
2053 mutex_exit(&pc->pc_lock);
2054
2055 pool_cache_invalidate_groups(pc, full);
2056 pool_cache_invalidate_groups(pc, empty);
2057 pool_cache_invalidate_groups(pc, part);
2058 }
2059
2060 /*
2061 * pool_cache_invalidate_cpu:
2062 *
2063 * Invalidate all CPU-bound cached objects in pool cache, the CPU being
2064 * identified by its associated index.
2065 * It is caller's responsibility to ensure that no operation is
2066 * taking place on this pool cache while doing this invalidation.
2067 * WARNING: as no inter-CPU locking is enforced, trying to invalidate
2068 * pool cached objects from a CPU different from the one currently running
2069 * may result in an undefined behaviour.
2070 */
2071 static void
pool_cache_invalidate_cpu(pool_cache_t pc,u_int index)2072 pool_cache_invalidate_cpu(pool_cache_t pc, u_int index)
2073 {
2074 pool_cache_cpu_t *cc;
2075 pcg_t *pcg;
2076
2077 if ((cc = pc->pc_cpus[index]) == NULL)
2078 return;
2079
2080 if ((pcg = cc->cc_current) != &pcg_dummy) {
2081 pcg->pcg_next = NULL;
2082 pool_cache_invalidate_groups(pc, pcg);
2083 }
2084 if ((pcg = cc->cc_previous) != &pcg_dummy) {
2085 pcg->pcg_next = NULL;
2086 pool_cache_invalidate_groups(pc, pcg);
2087 }
2088 if (cc != &pc->pc_cpu0)
2089 pool_put(&cache_cpu_pool, cc);
2090
2091 }
2092
2093 void
pool_cache_set_drain_hook(pool_cache_t pc,void (* fn)(void *,int),void * arg)2094 pool_cache_set_drain_hook(pool_cache_t pc, void (*fn)(void *, int), void *arg)
2095 {
2096
2097 pool_set_drain_hook(&pc->pc_pool, fn, arg);
2098 }
2099
2100 void
pool_cache_setlowat(pool_cache_t pc,int n)2101 pool_cache_setlowat(pool_cache_t pc, int n)
2102 {
2103
2104 pool_setlowat(&pc->pc_pool, n);
2105 }
2106
2107 void
pool_cache_sethiwat(pool_cache_t pc,int n)2108 pool_cache_sethiwat(pool_cache_t pc, int n)
2109 {
2110
2111 pool_sethiwat(&pc->pc_pool, n);
2112 }
2113
2114 void
pool_cache_sethardlimit(pool_cache_t pc,int n,const char * warnmess,int ratecap)2115 pool_cache_sethardlimit(pool_cache_t pc, int n, const char *warnmess, int ratecap)
2116 {
2117
2118 pool_sethardlimit(&pc->pc_pool, n, warnmess, ratecap);
2119 }
2120
2121 static bool __noinline
pool_cache_get_slow(pool_cache_cpu_t * cc,int s,void ** objectp,paddr_t * pap,int flags)2122 pool_cache_get_slow(pool_cache_cpu_t *cc, int s, void **objectp,
2123 paddr_t *pap, int flags)
2124 {
2125 pcg_t *pcg, *cur;
2126 uint64_t ncsw;
2127 pool_cache_t pc;
2128 void *object;
2129
2130 KASSERT(cc->cc_current->pcg_avail == 0);
2131 KASSERT(cc->cc_previous->pcg_avail == 0);
2132
2133 pc = cc->cc_cache;
2134 cc->cc_misses++;
2135
2136 /*
2137 * Nothing was available locally. Try and grab a group
2138 * from the cache.
2139 */
2140 if (__predict_false(!mutex_tryenter(&pc->pc_lock))) {
2141 ncsw = curlwp->l_ncsw;
2142 mutex_enter(&pc->pc_lock);
2143 pc->pc_contended++;
2144
2145 /*
2146 * If we context switched while locking, then
2147 * our view of the per-CPU data is invalid:
2148 * retry.
2149 */
2150 if (curlwp->l_ncsw != ncsw) {
2151 mutex_exit(&pc->pc_lock);
2152 return true;
2153 }
2154 }
2155
2156 if (__predict_true((pcg = pc->pc_fullgroups) != NULL)) {
2157 /*
2158 * If there's a full group, release our empty
2159 * group back to the cache. Install the full
2160 * group as cc_current and return.
2161 */
2162 if (__predict_true((cur = cc->cc_current) != &pcg_dummy)) {
2163 KASSERT(cur->pcg_avail == 0);
2164 cur->pcg_next = pc->pc_emptygroups;
2165 pc->pc_emptygroups = cur;
2166 pc->pc_nempty++;
2167 }
2168 KASSERT(pcg->pcg_avail == pcg->pcg_size);
2169 cc->cc_current = pcg;
2170 pc->pc_fullgroups = pcg->pcg_next;
2171 pc->pc_hits++;
2172 pc->pc_nfull--;
2173 mutex_exit(&pc->pc_lock);
2174 return true;
2175 }
2176
2177 /*
2178 * Nothing available locally or in cache. Take the slow
2179 * path: fetch a new object from the pool and construct
2180 * it.
2181 */
2182 pc->pc_misses++;
2183 mutex_exit(&pc->pc_lock);
2184 splx(s);
2185
2186 object = pool_get(&pc->pc_pool, flags);
2187 *objectp = object;
2188 if (__predict_false(object == NULL))
2189 return false;
2190
2191 if (__predict_false((*pc->pc_ctor)(pc->pc_arg, object, flags) != 0)) {
2192 pool_put(&pc->pc_pool, object);
2193 *objectp = NULL;
2194 return false;
2195 }
2196
2197 KASSERT((((vaddr_t)object + pc->pc_pool.pr_itemoffset) &
2198 (pc->pc_pool.pr_align - 1)) == 0);
2199
2200 if (pap != NULL) {
2201 #ifdef POOL_VTOPHYS
2202 *pap = POOL_VTOPHYS(object);
2203 #else
2204 *pap = POOL_PADDR_INVALID;
2205 #endif
2206 }
2207
2208 FREECHECK_OUT(&pc->pc_freecheck, object);
2209 pool_redzone_fill(&pc->pc_pool, object);
2210 return false;
2211 }
2212
2213 /*
2214 * pool_cache_get{,_paddr}:
2215 *
2216 * Get an object from a pool cache (optionally returning
2217 * the physical address of the object).
2218 */
2219 void *
pool_cache_get_paddr(pool_cache_t pc,int flags,paddr_t * pap)2220 pool_cache_get_paddr(pool_cache_t pc, int flags, paddr_t *pap)
2221 {
2222 pool_cache_cpu_t *cc;
2223 pcg_t *pcg;
2224 void *object;
2225 int s;
2226
2227 KASSERTMSG((!cpu_intr_p() && !cpu_softintr_p()) ||
2228 (pc->pc_pool.pr_ipl != IPL_NONE || cold || panicstr != NULL),
2229 "pool '%s' is IPL_NONE, but called from interrupt context\n",
2230 pc->pc_pool.pr_wchan);
2231
2232 if (flags & PR_WAITOK) {
2233 ASSERT_SLEEPABLE();
2234 }
2235
2236 /* Lock out interrupts and disable preemption. */
2237 s = splvm();
2238 while (/* CONSTCOND */ true) {
2239 /* Try and allocate an object from the current group. */
2240 cc = pc->pc_cpus[curcpu()->ci_index];
2241 KASSERT(cc->cc_cache == pc);
2242 pcg = cc->cc_current;
2243 if (__predict_true(pcg->pcg_avail > 0)) {
2244 object = pcg->pcg_objects[--pcg->pcg_avail].pcgo_va;
2245 if (__predict_false(pap != NULL))
2246 *pap = pcg->pcg_objects[pcg->pcg_avail].pcgo_pa;
2247 #if defined(DIAGNOSTIC)
2248 pcg->pcg_objects[pcg->pcg_avail].pcgo_va = NULL;
2249 KASSERT(pcg->pcg_avail < pcg->pcg_size);
2250 KASSERT(object != NULL);
2251 #endif
2252 cc->cc_hits++;
2253 splx(s);
2254 FREECHECK_OUT(&pc->pc_freecheck, object);
2255 pool_redzone_fill(&pc->pc_pool, object);
2256 return object;
2257 }
2258
2259 /*
2260 * That failed. If the previous group isn't empty, swap
2261 * it with the current group and allocate from there.
2262 */
2263 pcg = cc->cc_previous;
2264 if (__predict_true(pcg->pcg_avail > 0)) {
2265 cc->cc_previous = cc->cc_current;
2266 cc->cc_current = pcg;
2267 continue;
2268 }
2269
2270 /*
2271 * Can't allocate from either group: try the slow path.
2272 * If get_slow() allocated an object for us, or if
2273 * no more objects are available, it will return false.
2274 * Otherwise, we need to retry.
2275 */
2276 if (!pool_cache_get_slow(cc, s, &object, pap, flags))
2277 break;
2278 }
2279
2280 return object;
2281 }
2282
2283 static bool __noinline
pool_cache_put_slow(pool_cache_cpu_t * cc,int s,void * object)2284 pool_cache_put_slow(pool_cache_cpu_t *cc, int s, void *object)
2285 {
2286 struct lwp *l = curlwp;
2287 pcg_t *pcg, *cur;
2288 uint64_t ncsw;
2289 pool_cache_t pc;
2290
2291 KASSERT(cc->cc_current->pcg_avail == cc->cc_current->pcg_size);
2292 KASSERT(cc->cc_previous->pcg_avail == cc->cc_previous->pcg_size);
2293
2294 pc = cc->cc_cache;
2295 pcg = NULL;
2296 cc->cc_misses++;
2297 ncsw = l->l_ncsw;
2298
2299 /*
2300 * If there are no empty groups in the cache then allocate one
2301 * while still unlocked.
2302 */
2303 if (__predict_false(pc->pc_emptygroups == NULL)) {
2304 if (__predict_true(!pool_cache_disable)) {
2305 pcg = pool_get(pc->pc_pcgpool, PR_NOWAIT);
2306 }
2307 /*
2308 * If pool_get() blocked, then our view of
2309 * the per-CPU data is invalid: retry.
2310 */
2311 if (__predict_false(l->l_ncsw != ncsw)) {
2312 if (pcg != NULL) {
2313 pool_put(pc->pc_pcgpool, pcg);
2314 }
2315 return true;
2316 }
2317 if (__predict_true(pcg != NULL)) {
2318 pcg->pcg_avail = 0;
2319 pcg->pcg_size = pc->pc_pcgsize;
2320 }
2321 }
2322
2323 /* Lock the cache. */
2324 if (__predict_false(!mutex_tryenter(&pc->pc_lock))) {
2325 mutex_enter(&pc->pc_lock);
2326 pc->pc_contended++;
2327
2328 /*
2329 * If we context switched while locking, then our view of
2330 * the per-CPU data is invalid: retry.
2331 */
2332 if (__predict_false(l->l_ncsw != ncsw)) {
2333 mutex_exit(&pc->pc_lock);
2334 if (pcg != NULL) {
2335 pool_put(pc->pc_pcgpool, pcg);
2336 }
2337 return true;
2338 }
2339 }
2340
2341 /* If there are no empty groups in the cache then allocate one. */
2342 if (pcg == NULL && pc->pc_emptygroups != NULL) {
2343 pcg = pc->pc_emptygroups;
2344 pc->pc_emptygroups = pcg->pcg_next;
2345 pc->pc_nempty--;
2346 }
2347
2348 /*
2349 * If there's a empty group, release our full group back
2350 * to the cache. Install the empty group to the local CPU
2351 * and return.
2352 */
2353 if (pcg != NULL) {
2354 KASSERT(pcg->pcg_avail == 0);
2355 if (__predict_false(cc->cc_previous == &pcg_dummy)) {
2356 cc->cc_previous = pcg;
2357 } else {
2358 cur = cc->cc_current;
2359 if (__predict_true(cur != &pcg_dummy)) {
2360 KASSERT(cur->pcg_avail == cur->pcg_size);
2361 cur->pcg_next = pc->pc_fullgroups;
2362 pc->pc_fullgroups = cur;
2363 pc->pc_nfull++;
2364 }
2365 cc->cc_current = pcg;
2366 }
2367 pc->pc_hits++;
2368 mutex_exit(&pc->pc_lock);
2369 return true;
2370 }
2371
2372 /*
2373 * Nothing available locally or in cache, and we didn't
2374 * allocate an empty group. Take the slow path and destroy
2375 * the object here and now.
2376 */
2377 pc->pc_misses++;
2378 mutex_exit(&pc->pc_lock);
2379 splx(s);
2380 pool_cache_destruct_object(pc, object);
2381
2382 return false;
2383 }
2384
2385 /*
2386 * pool_cache_put{,_paddr}:
2387 *
2388 * Put an object back to the pool cache (optionally caching the
2389 * physical address of the object).
2390 */
2391 void
pool_cache_put_paddr(pool_cache_t pc,void * object,paddr_t pa)2392 pool_cache_put_paddr(pool_cache_t pc, void *object, paddr_t pa)
2393 {
2394 pool_cache_cpu_t *cc;
2395 pcg_t *pcg;
2396 int s;
2397
2398 KASSERT(object != NULL);
2399 pool_redzone_check(&pc->pc_pool, object);
2400 FREECHECK_IN(&pc->pc_freecheck, object);
2401
2402 /* Lock out interrupts and disable preemption. */
2403 s = splvm();
2404 while (/* CONSTCOND */ true) {
2405 /* If the current group isn't full, release it there. */
2406 cc = pc->pc_cpus[curcpu()->ci_index];
2407 KASSERT(cc->cc_cache == pc);
2408 pcg = cc->cc_current;
2409 if (__predict_true(pcg->pcg_avail < pcg->pcg_size)) {
2410 pcg->pcg_objects[pcg->pcg_avail].pcgo_va = object;
2411 pcg->pcg_objects[pcg->pcg_avail].pcgo_pa = pa;
2412 pcg->pcg_avail++;
2413 cc->cc_hits++;
2414 splx(s);
2415 return;
2416 }
2417
2418 /*
2419 * That failed. If the previous group isn't full, swap
2420 * it with the current group and try again.
2421 */
2422 pcg = cc->cc_previous;
2423 if (__predict_true(pcg->pcg_avail < pcg->pcg_size)) {
2424 cc->cc_previous = cc->cc_current;
2425 cc->cc_current = pcg;
2426 continue;
2427 }
2428
2429 /*
2430 * Can't free to either group: try the slow path.
2431 * If put_slow() releases the object for us, it
2432 * will return false. Otherwise we need to retry.
2433 */
2434 if (!pool_cache_put_slow(cc, s, object))
2435 break;
2436 }
2437 }
2438
2439 /*
2440 * pool_cache_transfer:
2441 *
2442 * Transfer objects from the per-CPU cache to the global cache.
2443 * Run within a cross-call thread.
2444 */
2445 static void
pool_cache_transfer(pool_cache_t pc)2446 pool_cache_transfer(pool_cache_t pc)
2447 {
2448 pool_cache_cpu_t *cc;
2449 pcg_t *prev, *cur, **list;
2450 int s;
2451
2452 s = splvm();
2453 mutex_enter(&pc->pc_lock);
2454 cc = pc->pc_cpus[curcpu()->ci_index];
2455 cur = cc->cc_current;
2456 cc->cc_current = __UNCONST(&pcg_dummy);
2457 prev = cc->cc_previous;
2458 cc->cc_previous = __UNCONST(&pcg_dummy);
2459 if (cur != &pcg_dummy) {
2460 if (cur->pcg_avail == cur->pcg_size) {
2461 list = &pc->pc_fullgroups;
2462 pc->pc_nfull++;
2463 } else if (cur->pcg_avail == 0) {
2464 list = &pc->pc_emptygroups;
2465 pc->pc_nempty++;
2466 } else {
2467 list = &pc->pc_partgroups;
2468 pc->pc_npart++;
2469 }
2470 cur->pcg_next = *list;
2471 *list = cur;
2472 }
2473 if (prev != &pcg_dummy) {
2474 if (prev->pcg_avail == prev->pcg_size) {
2475 list = &pc->pc_fullgroups;
2476 pc->pc_nfull++;
2477 } else if (prev->pcg_avail == 0) {
2478 list = &pc->pc_emptygroups;
2479 pc->pc_nempty++;
2480 } else {
2481 list = &pc->pc_partgroups;
2482 pc->pc_npart++;
2483 }
2484 prev->pcg_next = *list;
2485 *list = prev;
2486 }
2487 mutex_exit(&pc->pc_lock);
2488 splx(s);
2489 }
2490
2491 /*
2492 * Pool backend allocators.
2493 *
2494 * Each pool has a backend allocator that handles allocation, deallocation,
2495 * and any additional draining that might be needed.
2496 *
2497 * We provide two standard allocators:
2498 *
2499 * pool_allocator_kmem - the default when no allocator is specified
2500 *
2501 * pool_allocator_nointr - used for pools that will not be accessed
2502 * in interrupt context.
2503 */
2504 void *pool_page_alloc(struct pool *, int);
2505 void pool_page_free(struct pool *, void *);
2506
2507 #ifdef POOL_SUBPAGE
2508 struct pool_allocator pool_allocator_kmem_fullpage = {
2509 .pa_alloc = pool_page_alloc,
2510 .pa_free = pool_page_free,
2511 .pa_pagesz = 0
2512 };
2513 #else
2514 struct pool_allocator pool_allocator_kmem = {
2515 .pa_alloc = pool_page_alloc,
2516 .pa_free = pool_page_free,
2517 .pa_pagesz = 0
2518 };
2519 #endif
2520
2521 #ifdef POOL_SUBPAGE
2522 struct pool_allocator pool_allocator_nointr_fullpage = {
2523 .pa_alloc = pool_page_alloc,
2524 .pa_free = pool_page_free,
2525 .pa_pagesz = 0
2526 };
2527 #else
2528 struct pool_allocator pool_allocator_nointr = {
2529 .pa_alloc = pool_page_alloc,
2530 .pa_free = pool_page_free,
2531 .pa_pagesz = 0
2532 };
2533 #endif
2534
2535 #ifdef POOL_SUBPAGE
2536 void *pool_subpage_alloc(struct pool *, int);
2537 void pool_subpage_free(struct pool *, void *);
2538
2539 struct pool_allocator pool_allocator_kmem = {
2540 .pa_alloc = pool_subpage_alloc,
2541 .pa_free = pool_subpage_free,
2542 .pa_pagesz = POOL_SUBPAGE
2543 };
2544
2545 struct pool_allocator pool_allocator_nointr = {
2546 .pa_alloc = pool_subpage_alloc,
2547 .pa_free = pool_subpage_free,
2548 .pa_pagesz = POOL_SUBPAGE
2549 };
2550 #endif /* POOL_SUBPAGE */
2551
2552 static void *
pool_allocator_alloc(struct pool * pp,int flags)2553 pool_allocator_alloc(struct pool *pp, int flags)
2554 {
2555 struct pool_allocator *pa = pp->pr_alloc;
2556 void *res;
2557
2558 res = (*pa->pa_alloc)(pp, flags);
2559 if (res == NULL && (flags & PR_WAITOK) == 0) {
2560 /*
2561 * We only run the drain hook here if PR_NOWAIT.
2562 * In other cases, the hook will be run in
2563 * pool_reclaim().
2564 */
2565 if (pp->pr_drain_hook != NULL) {
2566 (*pp->pr_drain_hook)(pp->pr_drain_hook_arg, flags);
2567 res = (*pa->pa_alloc)(pp, flags);
2568 }
2569 }
2570 return res;
2571 }
2572
2573 static void
pool_allocator_free(struct pool * pp,void * v)2574 pool_allocator_free(struct pool *pp, void *v)
2575 {
2576 struct pool_allocator *pa = pp->pr_alloc;
2577
2578 (*pa->pa_free)(pp, v);
2579 }
2580
2581 void *
pool_page_alloc(struct pool * pp,int flags)2582 pool_page_alloc(struct pool *pp, int flags)
2583 {
2584 const vm_flag_t vflags = (flags & PR_WAITOK) ? VM_SLEEP: VM_NOSLEEP;
2585 vmem_addr_t va;
2586 int ret;
2587
2588 ret = uvm_km_kmem_alloc(kmem_va_arena, pp->pr_alloc->pa_pagesz,
2589 vflags | VM_INSTANTFIT, &va);
2590
2591 return ret ? NULL : (void *)va;
2592 }
2593
2594 void
pool_page_free(struct pool * pp,void * v)2595 pool_page_free(struct pool *pp, void *v)
2596 {
2597
2598 uvm_km_kmem_free(kmem_va_arena, (vaddr_t)v, pp->pr_alloc->pa_pagesz);
2599 }
2600
2601 static void *
pool_page_alloc_meta(struct pool * pp,int flags)2602 pool_page_alloc_meta(struct pool *pp, int flags)
2603 {
2604 const vm_flag_t vflags = (flags & PR_WAITOK) ? VM_SLEEP: VM_NOSLEEP;
2605 vmem_addr_t va;
2606 int ret;
2607
2608 ret = vmem_alloc(kmem_meta_arena, pp->pr_alloc->pa_pagesz,
2609 vflags | VM_INSTANTFIT, &va);
2610
2611 return ret ? NULL : (void *)va;
2612 }
2613
2614 static void
pool_page_free_meta(struct pool * pp,void * v)2615 pool_page_free_meta(struct pool *pp, void *v)
2616 {
2617
2618 vmem_free(kmem_meta_arena, (vmem_addr_t)v, pp->pr_alloc->pa_pagesz);
2619 }
2620
2621 #ifdef POOL_REDZONE
2622 #if defined(_LP64)
2623 # define PRIME 0x9e37fffffffc0000UL
2624 #else /* defined(_LP64) */
2625 # define PRIME 0x9e3779b1
2626 #endif /* defined(_LP64) */
2627 #define STATIC_BYTE 0xFE
2628 CTASSERT(POOL_REDZONE_SIZE > 1);
2629
2630 static inline uint8_t
pool_pattern_generate(const void * p)2631 pool_pattern_generate(const void *p)
2632 {
2633 return (uint8_t)(((uintptr_t)p) * PRIME
2634 >> ((sizeof(uintptr_t) - sizeof(uint8_t))) * CHAR_BIT);
2635 }
2636
2637 static void
pool_redzone_init(struct pool * pp,size_t requested_size)2638 pool_redzone_init(struct pool *pp, size_t requested_size)
2639 {
2640 size_t nsz;
2641
2642 if (pp->pr_roflags & PR_NOTOUCH) {
2643 pp->pr_reqsize = 0;
2644 pp->pr_redzone = false;
2645 return;
2646 }
2647
2648 /*
2649 * We may have extended the requested size earlier; check if
2650 * there's naturally space in the padding for a red zone.
2651 */
2652 if (pp->pr_size - requested_size >= POOL_REDZONE_SIZE) {
2653 pp->pr_reqsize = requested_size;
2654 pp->pr_redzone = true;
2655 return;
2656 }
2657
2658 /*
2659 * No space in the natural padding; check if we can extend a
2660 * bit the size of the pool.
2661 */
2662 nsz = roundup(pp->pr_size + POOL_REDZONE_SIZE, pp->pr_align);
2663 if (nsz <= pp->pr_alloc->pa_pagesz) {
2664 /* Ok, we can */
2665 pp->pr_size = nsz;
2666 pp->pr_reqsize = requested_size;
2667 pp->pr_redzone = true;
2668 } else {
2669 /* No space for a red zone... snif :'( */
2670 pp->pr_reqsize = 0;
2671 pp->pr_redzone = false;
2672 printf("pool redzone disabled for '%s'\n", pp->pr_wchan);
2673 }
2674 }
2675
2676 static void
pool_redzone_fill(struct pool * pp,void * p)2677 pool_redzone_fill(struct pool *pp, void *p)
2678 {
2679 uint8_t *cp, pat;
2680 const uint8_t *ep;
2681
2682 if (!pp->pr_redzone)
2683 return;
2684
2685 cp = (uint8_t *)p + pp->pr_reqsize;
2686 ep = cp + POOL_REDZONE_SIZE;
2687
2688 /*
2689 * We really don't want the first byte of the red zone to be '\0';
2690 * an off-by-one in a string may not be properly detected.
2691 */
2692 pat = pool_pattern_generate(cp);
2693 *cp = (pat == '\0') ? STATIC_BYTE: pat;
2694 cp++;
2695
2696 while (cp < ep) {
2697 *cp = pool_pattern_generate(cp);
2698 cp++;
2699 }
2700 }
2701
2702 static void
pool_redzone_check(struct pool * pp,void * p)2703 pool_redzone_check(struct pool *pp, void *p)
2704 {
2705 uint8_t *cp, pat, expected;
2706 const uint8_t *ep;
2707
2708 if (!pp->pr_redzone)
2709 return;
2710
2711 cp = (uint8_t *)p + pp->pr_reqsize;
2712 ep = cp + POOL_REDZONE_SIZE;
2713
2714 pat = pool_pattern_generate(cp);
2715 expected = (pat == '\0') ? STATIC_BYTE: pat;
2716 if (expected != *cp) {
2717 panic("%s: %p: 0x%02x != 0x%02x\n",
2718 __func__, cp, *cp, expected);
2719 }
2720 cp++;
2721
2722 while (cp < ep) {
2723 expected = pool_pattern_generate(cp);
2724 if (*cp != expected) {
2725 panic("%s: %p: 0x%02x != 0x%02x\n",
2726 __func__, cp, *cp, expected);
2727 }
2728 cp++;
2729 }
2730 }
2731
2732 #endif /* POOL_REDZONE */
2733
2734
2735 #ifdef POOL_SUBPAGE
2736 /* Sub-page allocator, for machines with large hardware pages. */
2737 void *
pool_subpage_alloc(struct pool * pp,int flags)2738 pool_subpage_alloc(struct pool *pp, int flags)
2739 {
2740 return pool_get(&psppool, flags);
2741 }
2742
2743 void
pool_subpage_free(struct pool * pp,void * v)2744 pool_subpage_free(struct pool *pp, void *v)
2745 {
2746 pool_put(&psppool, v);
2747 }
2748
2749 #endif /* POOL_SUBPAGE */
2750
2751 #if defined(DDB)
2752 static bool
pool_in_page(struct pool * pp,struct pool_item_header * ph,uintptr_t addr)2753 pool_in_page(struct pool *pp, struct pool_item_header *ph, uintptr_t addr)
2754 {
2755
2756 return (uintptr_t)ph->ph_page <= addr &&
2757 addr < (uintptr_t)ph->ph_page + pp->pr_alloc->pa_pagesz;
2758 }
2759
2760 static bool
pool_in_item(struct pool * pp,void * item,uintptr_t addr)2761 pool_in_item(struct pool *pp, void *item, uintptr_t addr)
2762 {
2763
2764 return (uintptr_t)item <= addr && addr < (uintptr_t)item + pp->pr_size;
2765 }
2766
2767 static bool
pool_in_cg(struct pool * pp,struct pool_cache_group * pcg,uintptr_t addr)2768 pool_in_cg(struct pool *pp, struct pool_cache_group *pcg, uintptr_t addr)
2769 {
2770 int i;
2771
2772 if (pcg == NULL) {
2773 return false;
2774 }
2775 for (i = 0; i < pcg->pcg_avail; i++) {
2776 if (pool_in_item(pp, pcg->pcg_objects[i].pcgo_va, addr)) {
2777 return true;
2778 }
2779 }
2780 return false;
2781 }
2782
2783 static bool
pool_allocated(struct pool * pp,struct pool_item_header * ph,uintptr_t addr)2784 pool_allocated(struct pool *pp, struct pool_item_header *ph, uintptr_t addr)
2785 {
2786
2787 if ((pp->pr_roflags & PR_NOTOUCH) != 0) {
2788 unsigned int idx = pr_item_notouch_index(pp, ph, (void *)addr);
2789 pool_item_bitmap_t *bitmap =
2790 ph->ph_bitmap + (idx / BITMAP_SIZE);
2791 pool_item_bitmap_t mask = 1 << (idx & BITMAP_MASK);
2792
2793 return (*bitmap & mask) == 0;
2794 } else {
2795 struct pool_item *pi;
2796
2797 LIST_FOREACH(pi, &ph->ph_itemlist, pi_list) {
2798 if (pool_in_item(pp, pi, addr)) {
2799 return false;
2800 }
2801 }
2802 return true;
2803 }
2804 }
2805
2806 void
pool_whatis(uintptr_t addr,void (* pr)(const char *,...))2807 pool_whatis(uintptr_t addr, void (*pr)(const char *, ...))
2808 {
2809 struct pool *pp;
2810
2811 TAILQ_FOREACH(pp, &pool_head, pr_poollist) {
2812 struct pool_item_header *ph;
2813 uintptr_t item;
2814 bool allocated = true;
2815 bool incache = false;
2816 bool incpucache = false;
2817 char cpucachestr[32];
2818
2819 if ((pp->pr_roflags & PR_PHINPAGE) != 0) {
2820 LIST_FOREACH(ph, &pp->pr_fullpages, ph_pagelist) {
2821 if (pool_in_page(pp, ph, addr)) {
2822 goto found;
2823 }
2824 }
2825 LIST_FOREACH(ph, &pp->pr_partpages, ph_pagelist) {
2826 if (pool_in_page(pp, ph, addr)) {
2827 allocated =
2828 pool_allocated(pp, ph, addr);
2829 goto found;
2830 }
2831 }
2832 LIST_FOREACH(ph, &pp->pr_emptypages, ph_pagelist) {
2833 if (pool_in_page(pp, ph, addr)) {
2834 allocated = false;
2835 goto found;
2836 }
2837 }
2838 continue;
2839 } else {
2840 ph = pr_find_pagehead_noalign(pp, (void *)addr);
2841 if (ph == NULL || !pool_in_page(pp, ph, addr)) {
2842 continue;
2843 }
2844 allocated = pool_allocated(pp, ph, addr);
2845 }
2846 found:
2847 if (allocated && pp->pr_cache) {
2848 pool_cache_t pc = pp->pr_cache;
2849 struct pool_cache_group *pcg;
2850 int i;
2851
2852 for (pcg = pc->pc_fullgroups; pcg != NULL;
2853 pcg = pcg->pcg_next) {
2854 if (pool_in_cg(pp, pcg, addr)) {
2855 incache = true;
2856 goto print;
2857 }
2858 }
2859 for (i = 0; i < __arraycount(pc->pc_cpus); i++) {
2860 pool_cache_cpu_t *cc;
2861
2862 if ((cc = pc->pc_cpus[i]) == NULL) {
2863 continue;
2864 }
2865 if (pool_in_cg(pp, cc->cc_current, addr) ||
2866 pool_in_cg(pp, cc->cc_previous, addr)) {
2867 struct cpu_info *ci =
2868 cpu_lookup(i);
2869
2870 incpucache = true;
2871 snprintf(cpucachestr,
2872 sizeof(cpucachestr),
2873 "cached by CPU %u",
2874 ci->ci_index);
2875 goto print;
2876 }
2877 }
2878 }
2879 print:
2880 item = (uintptr_t)ph->ph_page + ph->ph_off;
2881 item = item + rounddown(addr - item, pp->pr_size);
2882 (*pr)("%p is %p+%zu in POOL '%s' (%s)\n",
2883 (void *)addr, item, (size_t)(addr - item),
2884 pp->pr_wchan,
2885 incpucache ? cpucachestr :
2886 incache ? "cached" : allocated ? "allocated" : "free");
2887 }
2888 }
2889 #endif /* defined(DDB) */
2890
2891 static int
pool_sysctl(SYSCTLFN_ARGS)2892 pool_sysctl(SYSCTLFN_ARGS)
2893 {
2894 struct pool_sysctl data;
2895 struct pool *pp;
2896 struct pool_cache *pc;
2897 pool_cache_cpu_t *cc;
2898 int error;
2899 size_t i, written;
2900
2901 if (oldp == NULL) {
2902 *oldlenp = 0;
2903 TAILQ_FOREACH(pp, &pool_head, pr_poollist)
2904 *oldlenp += sizeof(data);
2905 return 0;
2906 }
2907
2908 memset(&data, 0, sizeof(data));
2909 error = 0;
2910 written = 0;
2911 TAILQ_FOREACH(pp, &pool_head, pr_poollist) {
2912 if (written + sizeof(data) > *oldlenp)
2913 break;
2914 strlcpy(data.pr_wchan, pp->pr_wchan, sizeof(data.pr_wchan));
2915 data.pr_pagesize = pp->pr_alloc->pa_pagesz;
2916 data.pr_flags = pp->pr_roflags | pp->pr_flags;
2917 #define COPY(field) data.field = pp->field
2918 COPY(pr_size);
2919
2920 COPY(pr_itemsperpage);
2921 COPY(pr_nitems);
2922 COPY(pr_nout);
2923 COPY(pr_hardlimit);
2924 COPY(pr_npages);
2925 COPY(pr_minpages);
2926 COPY(pr_maxpages);
2927
2928 COPY(pr_nget);
2929 COPY(pr_nfail);
2930 COPY(pr_nput);
2931 COPY(pr_npagealloc);
2932 COPY(pr_npagefree);
2933 COPY(pr_hiwat);
2934 COPY(pr_nidle);
2935 #undef COPY
2936
2937 data.pr_cache_nmiss_pcpu = 0;
2938 data.pr_cache_nhit_pcpu = 0;
2939 if (pp->pr_cache) {
2940 pc = pp->pr_cache;
2941 data.pr_cache_meta_size = pc->pc_pcgsize;
2942 data.pr_cache_nfull = pc->pc_nfull;
2943 data.pr_cache_npartial = pc->pc_npart;
2944 data.pr_cache_nempty = pc->pc_nempty;
2945 data.pr_cache_ncontended = pc->pc_contended;
2946 data.pr_cache_nmiss_global = pc->pc_misses;
2947 data.pr_cache_nhit_global = pc->pc_hits;
2948 for (i = 0; i < pc->pc_ncpu; ++i) {
2949 cc = pc->pc_cpus[i];
2950 if (cc == NULL)
2951 continue;
2952 data.pr_cache_nmiss_pcpu += cc->cc_misses;
2953 data.pr_cache_nhit_pcpu += cc->cc_hits;
2954 }
2955 } else {
2956 data.pr_cache_meta_size = 0;
2957 data.pr_cache_nfull = 0;
2958 data.pr_cache_npartial = 0;
2959 data.pr_cache_nempty = 0;
2960 data.pr_cache_ncontended = 0;
2961 data.pr_cache_nmiss_global = 0;
2962 data.pr_cache_nhit_global = 0;
2963 }
2964
2965 error = sysctl_copyout(l, &data, oldp, sizeof(data));
2966 if (error)
2967 break;
2968 written += sizeof(data);
2969 oldp = (char *)oldp + sizeof(data);
2970 }
2971
2972 *oldlenp = written;
2973 return error;
2974 }
2975
2976 SYSCTL_SETUP(sysctl_pool_setup, "sysctl kern.pool setup")
2977 {
2978 const struct sysctlnode *rnode = NULL;
2979
2980 sysctl_createv(clog, 0, NULL, &rnode,
2981 CTLFLAG_PERMANENT,
2982 CTLTYPE_STRUCT, "pool",
2983 SYSCTL_DESCR("Get pool statistics"),
2984 pool_sysctl, 0, NULL, 0,
2985 CTL_KERN, CTL_CREATE, CTL_EOL);
2986 }
2987