1 /* $OpenBSD: pool.h,v 1.80 2025/01/04 09:26:01 mvs Exp $ */ 2 /* $NetBSD: pool.h,v 1.27 2001/06/06 22:00:17 rafal Exp $ */ 3 4 /*- 5 * Copyright (c) 1997, 1998, 1999, 2000 The NetBSD Foundation, Inc. 6 * All rights reserved. 7 * 8 * This code is derived from software contributed to The NetBSD Foundation 9 * by Paul Kranenburg; by Jason R. Thorpe of the Numerical Aerospace 10 * Simulation Facility, NASA Ames Research Center. 11 * 12 * Redistribution and use in source and binary forms, with or without 13 * modification, are permitted provided that the following conditions 14 * are met: 15 * 1. Redistributions of source code must retain the above copyright 16 * notice, this list of conditions and the following disclaimer. 17 * 2. Redistributions in binary form must reproduce the above copyright 18 * notice, this list of conditions and the following disclaimer in the 19 * documentation and/or other materials provided with the distribution. 20 * 21 * THIS SOFTWARE IS PROVIDED BY THE NETBSD FOUNDATION, INC. AND CONTRIBUTORS 22 * ``AS IS'' AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED 23 * TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR 24 * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE FOUNDATION OR CONTRIBUTORS 25 * BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR 26 * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF 27 * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS 28 * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN 29 * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) 30 * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE 31 * POSSIBILITY OF SUCH DAMAGE. 32 */ 33 34 #ifndef _SYS_POOL_H_ 35 #define _SYS_POOL_H_ 36 37 /* 38 * sysctls. 39 * kern.pool.npools 40 * kern.pool.name.<number> 41 * kern.pool.pool.<number> 42 */ 43 #define KERN_POOL_NPOOLS 1 44 #define KERN_POOL_NAME 2 45 #define KERN_POOL_POOL 3 46 #define KERN_POOL_CACHE 4 /* global pool cache info */ 47 #define KERN_POOL_CACHE_CPUS 5 /* all cpus cache info */ 48 49 struct kinfo_pool { 50 unsigned int pr_size; /* size of a pool item */ 51 unsigned int pr_pgsize; /* size of a "page" */ 52 unsigned int pr_itemsperpage; /* number of items per "page" */ 53 unsigned int pr_minpages; /* same in page units */ 54 unsigned int pr_maxpages; /* maximum # of idle pages to keep */ 55 unsigned int pr_hardlimit; /* hard limit to number of allocated 56 items */ 57 58 unsigned int pr_npages; /* # of pages allocated */ 59 unsigned int pr_nout; /* # items currently allocated */ 60 unsigned int pr_nitems; /* # items in the pool */ 61 62 unsigned long pr_nget; /* # of successful requests */ 63 unsigned long pr_nput; /* # of releases */ 64 unsigned long pr_nfail; /* # of unsuccessful requests */ 65 unsigned long pr_npagealloc; /* # of pages allocated */ 66 unsigned long pr_npagefree; /* # of pages released */ 67 unsigned int pr_hiwat; /* max # of pages in pool */ 68 unsigned long pr_nidle; /* # of idle pages */ 69 }; 70 71 struct kinfo_pool_cache { 72 uint64_t pr_ngc; /* # of times a list has been gc'ed */ 73 unsigned int pr_len; /* current target for list len */ 74 unsigned int pr_nitems; /* # of idle items in the depot */ 75 unsigned int pr_contention; /* # of times mtx was busy */ 76 }; 77 78 /* 79 * KERN_POOL_CACHE_CPUS provides an array, not a single struct. ie, it 80 * provides struct kinfo_pool_cache_cpu kppc[ncpusfound]. 81 */ 82 struct kinfo_pool_cache_cpu { 83 unsigned int pr_cpu; /* which cpu this cache is on */ 84 85 /* counters for times items were handled by the cache */ 86 uint64_t pr_nget; /* # of requests */ 87 uint64_t pr_nfail; /* # of unsuccessful requests */ 88 uint64_t pr_nput; /* # of releases */ 89 90 /* counters for times the cache interacted with the pool */ 91 uint64_t pr_nlget; /* # of list requests */ 92 uint64_t pr_nlfail; /* # of unsuccessful list requests */ 93 uint64_t pr_nlput; /* # of list releases */ 94 }; 95 96 #if defined(_KERNEL) || defined(_LIBKVM) 97 98 #include <sys/queue.h> 99 #include <sys/tree.h> 100 #include <sys/mutex.h> 101 #include <sys/rwlock.h> 102 103 struct pool; 104 struct pool_request; 105 struct pool_lock_ops; 106 TAILQ_HEAD(pool_requests, pool_request); 107 108 struct pool_allocator { 109 void *(*pa_alloc)(struct pool *, int, int *); 110 void (*pa_free)(struct pool *, void *); 111 size_t pa_pagesz; 112 }; 113 114 /* 115 * The pa_pagesz member encodes the sizes of pages that can be 116 * provided by the allocator, and whether the allocations can be 117 * aligned to their size. 118 * 119 * Page sizes can only be powers of two. Each available page size is 120 * represented by its value set as a bit. e.g., to indicate that an 121 * allocator can provide 16k and 32k pages you initialise pa_pagesz 122 * to (32768 | 16384). 123 * 124 * If the allocator can provide aligned pages the low bit in pa_pagesz 125 * is set. The POOL_ALLOC_ALIGNED macro is provided as a convenience. 126 * 127 * If pa_pagesz is unset (i.e. 0), POOL_ALLOC_DEFAULT will be used 128 * instead. 129 */ 130 131 #define POOL_ALLOC_ALIGNED 1UL 132 #define POOL_ALLOC_SIZE(_sz, _a) ((_sz) | (_a)) 133 #define POOL_ALLOC_SIZES(_min, _max, _a) \ 134 ((_max) | \ 135 (((_max) - 1) & ~((_min) - 1)) | (_a)) 136 137 #define POOL_ALLOC_DEFAULT \ 138 POOL_ALLOC_SIZE(PAGE_SIZE, POOL_ALLOC_ALIGNED) 139 140 TAILQ_HEAD(pool_pagelist, pool_page_header); 141 142 struct pool_cache_item; 143 TAILQ_HEAD(pool_cache_lists, pool_cache_item); 144 struct cpumem; 145 146 union pool_lock { 147 struct mutex prl_mtx; 148 struct rwlock prl_rwlock; 149 }; 150 151 struct pool { 152 struct refcnt pr_refcnt; 153 union pool_lock pr_lock; 154 const struct pool_lock_ops * 155 pr_lock_ops; 156 SIMPLEQ_ENTRY(pool) 157 pr_poollist; 158 struct pool_pagelist 159 pr_emptypages; /* Empty pages */ 160 struct pool_pagelist 161 pr_fullpages; /* Full pages */ 162 struct pool_pagelist 163 pr_partpages; /* Partially-allocated pages */ 164 struct pool_page_header * 165 pr_curpage; 166 unsigned int pr_size; /* Size of item */ 167 unsigned int pr_minitems; /* minimum # of items to keep */ 168 unsigned int pr_minpages; /* same in page units */ 169 unsigned int pr_maxpages; /* maximum # of idle pages to keep */ 170 unsigned int pr_npages; /* # of pages allocated */ 171 unsigned int pr_itemsperpage;/* # items that fit in a page */ 172 unsigned int pr_slack; /* unused space in a page */ 173 unsigned int pr_nitems; /* number of available items in pool */ 174 unsigned int pr_nout; /* # items currently allocated */ 175 unsigned int pr_hardlimit; /* hard limit to number of allocated 176 items */ 177 unsigned int pr_serial; /* unique serial number of the pool */ 178 unsigned int pr_pgsize; /* Size of a "page" */ 179 vaddr_t pr_pgmask; /* Mask with an item to get a page */ 180 struct pool_allocator * 181 pr_alloc; /* backend allocator */ 182 const char * pr_wchan; /* tsleep(9) identifier */ 183 #define PR_WAITOK 0x0001 /* M_WAITOK */ 184 #define PR_NOWAIT 0x0002 /* M_NOWAIT */ 185 #define PR_LIMITFAIL 0x0004 /* M_CANFAIL */ 186 #define PR_ZERO 0x0008 /* M_ZERO */ 187 #define PR_RWLOCK 0x0010 188 #define PR_WANTED 0x0100 189 190 int pr_flags; 191 int pr_ipl; 192 193 RBT_HEAD(phtree, pool_page_header) 194 pr_phtree; 195 196 struct cpumem * pr_cache; 197 unsigned long pr_cache_magic[2]; 198 union pool_lock pr_cache_lock; 199 struct pool_cache_lists 200 pr_cache_lists; /* list of idle item lists */ 201 u_int pr_cache_nitems; /* # of idle items */ 202 u_int pr_cache_items; /* target list length */ 203 u_int pr_cache_contention; 204 u_int pr_cache_contention_prev; 205 uint64_t pr_cache_timestamp; /* time idle list was empty */ 206 uint64_t pr_cache_ngc; /* # of times the gc released a list */ 207 int pr_cache_nout; 208 209 u_int pr_align; 210 u_int pr_maxcolors; /* Cache coloring */ 211 int pr_phoffset; /* Offset in page of page header */ 212 213 /* 214 * Warning message to be issued, and a per-time-delta rate cap, 215 * if the hard limit is reached. 216 */ 217 const char *pr_hardlimit_warning; 218 struct timeval pr_hardlimit_ratecap; 219 struct timeval pr_hardlimit_warning_last; 220 221 /* 222 * pool item requests queue 223 */ 224 union pool_lock pr_requests_lock; 225 struct pool_requests 226 pr_requests; 227 unsigned int pr_requesting; 228 229 /* 230 * Instrumentation 231 */ 232 unsigned long pr_nget; /* # of successful requests */ 233 unsigned long pr_nfail; /* # of unsuccessful requests */ 234 unsigned long pr_nput; /* # of releases */ 235 unsigned long pr_npagealloc; /* # of pages allocated */ 236 unsigned long pr_npagefree; /* # of pages released */ 237 unsigned int pr_hiwat; /* max # of pages in pool */ 238 unsigned long pr_nidle; /* # of idle pages */ 239 240 /* Physical memory configuration. */ 241 const struct kmem_pa_mode * 242 pr_crange; 243 }; 244 245 #endif /* _KERNEL || _LIBKVM */ 246 247 #ifdef _KERNEL 248 249 extern struct pool_allocator pool_allocator_single; 250 extern struct pool_allocator pool_allocator_multi; 251 252 struct pool_request { 253 TAILQ_ENTRY(pool_request) pr_entry; 254 void (*pr_handler)(struct pool *, void *, void *); 255 void *pr_cookie; 256 void *pr_item; 257 }; 258 259 void pool_init(struct pool *, size_t, u_int, int, int, 260 const char *, struct pool_allocator *); 261 void pool_cache_init(struct pool *); 262 void pool_destroy(struct pool *); 263 void pool_setlowat(struct pool *, int); 264 void pool_sethiwat(struct pool *, int); 265 int pool_sethardlimit(struct pool *, u_int, const char *, int); 266 void pool_set_constraints(struct pool *, 267 const struct kmem_pa_mode *mode); 268 269 void *pool_get(struct pool *, int) __malloc; 270 void pool_request_init(struct pool_request *, 271 void (*)(struct pool *, void *, void *), void *); 272 void pool_request(struct pool *, struct pool_request *); 273 void pool_put(struct pool *, void *); 274 void pool_wakeup(struct pool *); 275 int pool_reclaim(struct pool *); 276 void pool_reclaim_all(void); 277 int pool_prime(struct pool *, int); 278 279 #ifdef DDB 280 /* 281 * Debugging and diagnostic aides. 282 */ 283 void pool_printit(struct pool *, const char *, 284 int (*)(const char *, ...)); 285 void pool_walk(struct pool *, int, int (*)(const char *, ...), 286 void (*)(void *, int, int (*)(const char *, ...))); 287 #endif 288 289 /* the allocator for dma-able memory is a thin layer on top of pool */ 290 void dma_alloc_init(void); 291 void *dma_alloc(size_t size, int flags); 292 void dma_free(void *m, size_t size); 293 #endif /* _KERNEL */ 294 295 #endif /* _SYS_POOL_H_ */ 296