1 /*-
2 * SPDX-License-Identifier: BSD-2-Clause
3 *
4 * Copyright (c) 2002-2019 Jeffrey Roberson <jeff@FreeBSD.org>
5 * Copyright (c) 2004, 2005 Bosko Milekic <bmilekic@FreeBSD.org>
6 * All rights reserved.
7 *
8 * Redistribution and use in source and binary forms, with or without
9 * modification, are permitted provided that the following conditions
10 * are met:
11 * 1. Redistributions of source code must retain the above copyright
12 * notice unmodified, this list of conditions, and the following
13 * disclaimer.
14 * 2. Redistributions in binary form must reproduce the above copyright
15 * notice, this list of conditions and the following disclaimer in the
16 * documentation and/or other materials provided with the distribution.
17 *
18 * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR
19 * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES
20 * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
21 * IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT,
22 * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT
23 * NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
24 * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
25 * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
26 * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF
27 * THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
28 *
29 */
30
31 #include <sys/counter.h>
32 #include <sys/_bitset.h>
33 #include <sys/_domainset.h>
34 #include <sys/_task.h>
35
36 /*
37 * This file includes definitions, structures, prototypes, and inlines that
38 * should not be used outside of the actual implementation of UMA.
39 */
40
41 /*
42 * The brief summary; Zones describe unique allocation types. Zones are
43 * organized into per-CPU caches which are filled by buckets. Buckets are
44 * organized according to memory domains. Buckets are filled from kegs which
45 * are also organized according to memory domains. Kegs describe a unique
46 * allocation type, backend memory provider, and layout. Kegs are associated
47 * with one or more zones and zones reference one or more kegs. Kegs provide
48 * slabs which are virtually contiguous collections of pages. Each slab is
49 * broken down int one or more items that will satisfy an individual allocation.
50 *
51 * Allocation is satisfied in the following order:
52 * 1) Per-CPU cache
53 * 2) Per-domain cache of buckets
54 * 3) Slab from any of N kegs
55 * 4) Backend page provider
56 *
57 * More detail on individual objects is contained below:
58 *
59 * Kegs contain lists of slabs which are stored in either the full bin, empty
60 * bin, or partially allocated bin, to reduce fragmentation. They also contain
61 * the user supplied value for size, which is adjusted for alignment purposes
62 * and rsize is the result of that. The Keg also stores information for
63 * managing a hash of page addresses that maps pages to uma_slab_t structures
64 * for pages that don't have embedded uma_slab_t's.
65 *
66 * Keg slab lists are organized by memory domain to support NUMA allocation
67 * policies. By default allocations are spread across domains to reduce the
68 * potential for hotspots. Special keg creation flags may be specified to
69 * prefer location allocation. However there is no strict enforcement as frees
70 * may happen on any CPU and these are returned to the CPU-local cache
71 * regardless of the originating domain.
72 *
73 * The uma_slab_t may be embedded in a UMA_SLAB_SIZE chunk of memory or it may
74 * be allocated off the page from a special slab zone. The free list within a
75 * slab is managed with a bitmask. For item sizes that would yield more than
76 * 10% memory waste we potentially allocate a separate uma_slab_t if this will
77 * improve the number of items per slab that will fit.
78 *
79 * The only really gross cases, with regards to memory waste, are for those
80 * items that are just over half the page size. You can get nearly 50% waste,
81 * so you fall back to the memory footprint of the power of two allocator. I
82 * have looked at memory allocation sizes on many of the machines available to
83 * me, and there does not seem to be an abundance of allocations at this range
84 * so at this time it may not make sense to optimize for it. This can, of
85 * course, be solved with dynamic slab sizes.
86 *
87 * Kegs may serve multiple Zones but by far most of the time they only serve
88 * one. When a Zone is created, a Keg is allocated and setup for it. While
89 * the backing Keg stores slabs, the Zone caches Buckets of items allocated
90 * from the slabs. Each Zone is equipped with an init/fini and ctor/dtor
91 * pair, as well as with its own set of small per-CPU caches, layered above
92 * the Zone's general Bucket cache.
93 *
94 * The PCPU caches are protected by critical sections, and may be accessed
95 * safely only from their associated CPU, while the Zones backed by the same
96 * Keg all share a common Keg lock (to coalesce contention on the backing
97 * slabs). The backing Keg typically only serves one Zone but in the case of
98 * multiple Zones, one of the Zones is considered the Primary Zone and all
99 * Zone-related stats from the Keg are done in the Primary Zone. For an
100 * example of a Multi-Zone setup, refer to the Mbuf allocation code.
101 */
102
103 /*
104 * This is the representation for normal (Non OFFPAGE slab)
105 *
106 * i == item
107 * s == slab pointer
108 *
109 * <---------------- Page (UMA_SLAB_SIZE) ------------------>
110 * ___________________________________________________________
111 * | _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ ___________ |
112 * ||i||i||i||i||i||i||i||i||i||i||i||i||i||i||i| |slab header||
113 * ||_||_||_||_||_||_||_||_||_||_||_||_||_||_||_| |___________||
114 * |___________________________________________________________|
115 *
116 *
117 * This is an OFFPAGE slab. These can be larger than UMA_SLAB_SIZE.
118 *
119 * ___________________________________________________________
120 * | _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ |
121 * ||i||i||i||i||i||i||i||i||i||i||i||i||i||i||i||i||i||i||i| |
122 * ||_||_||_||_||_||_||_||_||_||_||_||_||_||_||_||_||_||_||_| |
123 * |___________________________________________________________|
124 * ___________ ^
125 * |slab header| |
126 * |___________|---*
127 *
128 */
129
130 #ifndef VM_UMA_INT_H
131 #define VM_UMA_INT_H
132
133 #define UMA_SLAB_SIZE PAGE_SIZE /* How big are our slabs? */
134 #define UMA_SLAB_MASK (PAGE_SIZE - 1) /* Mask to get back to the page */
135 #define UMA_SLAB_SHIFT PAGE_SHIFT /* Number of bits PAGE_MASK */
136
137 /* Max waste percentage before going to off page slab management */
138 #define UMA_MAX_WASTE 10
139
140 /* Max size of a CACHESPREAD slab. */
141 #define UMA_CACHESPREAD_MAX_SIZE (128 * 1024)
142
143 /*
144 * These flags must not overlap with the UMA_ZONE flags specified in uma.h.
145 */
146 #define UMA_ZFLAG_OFFPAGE 0x00200000 /*
147 * Force the slab structure
148 * allocation off of the real
149 * memory.
150 */
151 #define UMA_ZFLAG_HASH 0x00400000 /*
152 * Use a hash table instead of
153 * caching information in the
154 * vm_page.
155 */
156 #define UMA_ZFLAG_VTOSLAB 0x00800000 /*
157 * Zone uses vtoslab for
158 * lookup.
159 */
160 #define UMA_ZFLAG_CTORDTOR 0x01000000 /* Zone has ctor/dtor set. */
161 #define UMA_ZFLAG_LIMIT 0x02000000 /* Zone has limit set. */
162 #define UMA_ZFLAG_CACHE 0x04000000 /* uma_zcache_create()d it */
163 #define UMA_ZFLAG_BUCKET 0x10000000 /* Bucket zone. */
164 #define UMA_ZFLAG_INTERNAL 0x20000000 /* No offpage no PCPU. */
165 #define UMA_ZFLAG_TRASH 0x40000000 /* Add trash ctor/dtor. */
166
167 #define UMA_ZFLAG_INHERIT \
168 (UMA_ZFLAG_OFFPAGE | UMA_ZFLAG_HASH | UMA_ZFLAG_VTOSLAB | \
169 UMA_ZFLAG_BUCKET | UMA_ZFLAG_INTERNAL)
170
171 #define PRINT_UMA_ZFLAGS "\20" \
172 "\37TRASH" \
173 "\36INTERNAL" \
174 "\35BUCKET" \
175 "\33CACHE" \
176 "\32LIMIT" \
177 "\31CTORDTOR" \
178 "\30VTOSLAB" \
179 "\27HASH" \
180 "\26OFFPAGE" \
181 "\23SMR" \
182 "\22ROUNDROBIN" \
183 "\21FIRSTTOUCH" \
184 "\20PCPU" \
185 "\17NODUMP" \
186 "\16CACHESPREAD" \
187 "\14MAXBUCKET" \
188 "\13NOBUCKET" \
189 "\12SECONDARY" \
190 "\11NOTPAGE" \
191 "\10VM" \
192 "\7MTXCLASS" \
193 "\6NOFREE" \
194 "\5MALLOC" \
195 "\4NOTOUCH" \
196 "\3CONTIG" \
197 "\2ZINIT"
198
199 /*
200 * Hash table for freed address -> slab translation.
201 *
202 * Only zones with memory not touchable by the allocator use the
203 * hash table. Otherwise slabs are found with vtoslab().
204 */
205 #define UMA_HASH_SIZE_INIT 32
206
207 #define UMA_HASH(h, s) ((((uintptr_t)s) >> UMA_SLAB_SHIFT) & (h)->uh_hashmask)
208
209 #define UMA_HASH_INSERT(h, s, mem) \
210 LIST_INSERT_HEAD(&(h)->uh_slab_hash[UMA_HASH((h), \
211 (mem))], slab_tohashslab(s), uhs_hlink)
212
213 #define UMA_HASH_REMOVE(h, s) \
214 LIST_REMOVE(slab_tohashslab(s), uhs_hlink)
215
216 LIST_HEAD(slabhashhead, uma_hash_slab);
217
218 struct uma_hash {
219 struct slabhashhead *uh_slab_hash; /* Hash table for slabs */
220 u_int uh_hashsize; /* Current size of the hash table */
221 u_int uh_hashmask; /* Mask used during hashing */
222 };
223
224 /*
225 * Align field or structure to cache 'sector' in intel terminology. This
226 * is more efficient with adjacent line prefetch.
227 */
228 #if defined(__amd64__) || defined(__powerpc64__)
229 #define UMA_SUPER_ALIGN (CACHE_LINE_SIZE * 2)
230 #else
231 #define UMA_SUPER_ALIGN CACHE_LINE_SIZE
232 #endif
233
234 #define UMA_ALIGN __aligned(UMA_SUPER_ALIGN)
235
236 /*
237 * The uma_bucket structure is used to queue and manage buckets divorced
238 * from per-cpu caches. They are loaded into uma_cache_bucket structures
239 * for use.
240 */
241 struct uma_bucket {
242 STAILQ_ENTRY(uma_bucket) ub_link; /* Link into the zone */
243 int16_t ub_cnt; /* Count of items in bucket. */
244 int16_t ub_entries; /* Max items. */
245 smr_seq_t ub_seq; /* SMR sequence number. */
246 void *ub_bucket[]; /* actual allocation storage */
247 };
248
249 typedef struct uma_bucket * uma_bucket_t;
250
251 /*
252 * The uma_cache_bucket structure is statically allocated on each per-cpu
253 * cache. Its use reduces branches and cache misses in the fast path.
254 */
255 struct uma_cache_bucket {
256 uma_bucket_t ucb_bucket;
257 int16_t ucb_cnt;
258 int16_t ucb_entries;
259 uint32_t ucb_spare;
260 };
261
262 typedef struct uma_cache_bucket * uma_cache_bucket_t;
263
264 /*
265 * The uma_cache structure is allocated for each cpu for every zone
266 * type. This optimizes synchronization out of the allocator fast path.
267 */
268 struct uma_cache {
269 struct uma_cache_bucket uc_freebucket; /* Bucket we're freeing to */
270 struct uma_cache_bucket uc_allocbucket; /* Bucket to allocate from */
271 struct uma_cache_bucket uc_crossbucket; /* cross domain bucket */
272 uint64_t uc_allocs; /* Count of allocations */
273 uint64_t uc_frees; /* Count of frees */
274 } UMA_ALIGN;
275
276 typedef struct uma_cache * uma_cache_t;
277
278 LIST_HEAD(slabhead, uma_slab);
279
280 /*
281 * The cache structure pads perfectly into 64 bytes so we use spare
282 * bits from the embedded cache buckets to store information from the zone
283 * and keep all fast-path allocations accessing a single per-cpu line.
284 */
285 static inline void
cache_set_uz_flags(uma_cache_t cache,uint32_t flags)286 cache_set_uz_flags(uma_cache_t cache, uint32_t flags)
287 {
288
289 cache->uc_freebucket.ucb_spare = flags;
290 }
291
292 static inline void
cache_set_uz_size(uma_cache_t cache,uint32_t size)293 cache_set_uz_size(uma_cache_t cache, uint32_t size)
294 {
295
296 cache->uc_allocbucket.ucb_spare = size;
297 }
298
299 static inline uint32_t
cache_uz_flags(uma_cache_t cache)300 cache_uz_flags(uma_cache_t cache)
301 {
302
303 return (cache->uc_freebucket.ucb_spare);
304 }
305
306 static inline uint32_t
cache_uz_size(uma_cache_t cache)307 cache_uz_size(uma_cache_t cache)
308 {
309
310 return (cache->uc_allocbucket.ucb_spare);
311 }
312
313 /*
314 * Per-domain slab lists. Embedded in the kegs.
315 */
316 struct uma_domain {
317 struct mtx_padalign ud_lock; /* Lock for the domain lists. */
318 struct slabhead ud_part_slab; /* partially allocated slabs */
319 struct slabhead ud_free_slab; /* completely unallocated slabs */
320 struct slabhead ud_full_slab; /* fully allocated slabs */
321 uint32_t ud_pages; /* Total page count */
322 uint32_t ud_free_items; /* Count of items free in all slabs */
323 uint32_t ud_free_slabs; /* Count of free slabs */
324 } __aligned(CACHE_LINE_SIZE);
325
326 typedef struct uma_domain * uma_domain_t;
327
328 /*
329 * Keg management structure
330 *
331 * TODO: Optimize for cache line size
332 *
333 */
334 struct uma_keg {
335 struct uma_hash uk_hash;
336 LIST_HEAD(,uma_zone) uk_zones; /* Keg's zones */
337
338 struct domainset_ref uk_dr; /* Domain selection policy. */
339 uint32_t uk_align; /* Alignment mask */
340 uint32_t uk_reserve; /* Number of reserved items. */
341 uint32_t uk_size; /* Requested size of each item */
342 uint32_t uk_rsize; /* Real size of each item */
343
344 uma_init uk_init; /* Keg's init routine */
345 uma_fini uk_fini; /* Keg's fini routine */
346 uma_alloc uk_allocf; /* Allocation function */
347 uma_free uk_freef; /* Free routine */
348
349 u_long uk_offset; /* Next free offset from base KVA */
350 vm_offset_t uk_kva; /* Zone base KVA */
351
352 uint32_t uk_pgoff; /* Offset to uma_slab struct */
353 uint16_t uk_ppera; /* pages per allocation from backend */
354 uint16_t uk_ipers; /* Items per slab */
355 uint32_t uk_flags; /* Internal flags */
356
357 /* Least used fields go to the last cache line. */
358 const char *uk_name; /* Name of creating zone. */
359 LIST_ENTRY(uma_keg) uk_link; /* List of all kegs */
360
361 /* Must be last, variable sized. */
362 struct uma_domain uk_domain[]; /* Keg's slab lists. */
363 };
364 typedef struct uma_keg * uma_keg_t;
365
366 /*
367 * Free bits per-slab.
368 */
369 #define SLAB_MAX_SETSIZE (PAGE_SIZE / UMA_SMALLEST_UNIT)
370 #define SLAB_MIN_SETSIZE _BITSET_BITS
371 BITSET_DEFINE(noslabbits, 0);
372
373 /*
374 * The slab structure manages a single contiguous allocation from backing
375 * store and subdivides it into individually allocatable items.
376 */
377 struct uma_slab {
378 LIST_ENTRY(uma_slab) us_link; /* slabs in zone */
379 uint16_t us_freecount; /* How many are free? */
380 uint8_t us_flags; /* Page flags see uma.h */
381 uint8_t us_domain; /* Backing NUMA domain. */
382 struct noslabbits us_free; /* Free bitmask, flexible. */
383 };
384 _Static_assert(sizeof(struct uma_slab) == __offsetof(struct uma_slab, us_free),
385 "us_free field must be last");
386 _Static_assert(MAXMEMDOM < 255,
387 "us_domain field is not wide enough");
388
389 typedef struct uma_slab * uma_slab_t;
390
391 /*
392 * Slab structure with a full sized bitset and hash link for both
393 * HASH and OFFPAGE zones.
394 */
395 struct uma_hash_slab {
396 LIST_ENTRY(uma_hash_slab) uhs_hlink; /* Link for hash table */
397 uint8_t *uhs_data; /* First item */
398 struct uma_slab uhs_slab; /* Must be last. */
399 };
400
401 typedef struct uma_hash_slab * uma_hash_slab_t;
402
403 static inline uma_hash_slab_t
slab_tohashslab(uma_slab_t slab)404 slab_tohashslab(uma_slab_t slab)
405 {
406
407 return (__containerof(slab, struct uma_hash_slab, uhs_slab));
408 }
409
410 static inline void *
slab_data(uma_slab_t slab,uma_keg_t keg)411 slab_data(uma_slab_t slab, uma_keg_t keg)
412 {
413
414 if ((keg->uk_flags & UMA_ZFLAG_OFFPAGE) == 0)
415 return ((void *)((uintptr_t)slab - keg->uk_pgoff));
416 else
417 return (slab_tohashslab(slab)->uhs_data);
418 }
419
420 static inline void *
slab_item(uma_slab_t slab,uma_keg_t keg,int index)421 slab_item(uma_slab_t slab, uma_keg_t keg, int index)
422 {
423 uintptr_t data;
424
425 data = (uintptr_t)slab_data(slab, keg);
426 return ((void *)(data + keg->uk_rsize * index));
427 }
428
429 static inline int
slab_item_index(uma_slab_t slab,uma_keg_t keg,void * item)430 slab_item_index(uma_slab_t slab, uma_keg_t keg, void *item)
431 {
432 uintptr_t data;
433
434 data = (uintptr_t)slab_data(slab, keg);
435 return (((uintptr_t)item - data) / keg->uk_rsize);
436 }
437
438 STAILQ_HEAD(uma_bucketlist, uma_bucket);
439
440 struct uma_zone_domain {
441 struct uma_bucketlist uzd_buckets; /* full buckets */
442 uma_bucket_t uzd_cross; /* Fills from cross buckets. */
443 long uzd_nitems; /* total item count */
444 long uzd_imax; /* maximum item count this period */
445 long uzd_imin; /* minimum item count this period */
446 long uzd_bimin; /* Minimum item count this batch. */
447 long uzd_wss; /* working set size estimate */
448 long uzd_limin; /* Longtime minimum item count. */
449 u_int uzd_timin; /* Time since uzd_limin == 0. */
450 smr_seq_t uzd_seq; /* Lowest queued seq. */
451 struct mtx uzd_lock; /* Lock for the domain */
452 } __aligned(CACHE_LINE_SIZE);
453
454 typedef struct uma_zone_domain * uma_zone_domain_t;
455
456 /*
457 * Zone structure - per memory type.
458 */
459 struct uma_zone {
460 /* Offset 0, used in alloc/free fast/medium fast path and const. */
461 uint32_t uz_flags; /* Flags inherited from kegs */
462 uint32_t uz_size; /* Size inherited from kegs */
463 uma_ctor uz_ctor; /* Constructor for each allocation */
464 uma_dtor uz_dtor; /* Destructor */
465 smr_t uz_smr; /* Safe memory reclaim context. */
466 uint64_t uz_max_items; /* Maximum number of items to alloc */
467 uint64_t uz_bucket_max; /* Maximum bucket cache size */
468 uint16_t uz_bucket_size; /* Number of items in full bucket */
469 uint16_t uz_bucket_size_max; /* Maximum number of bucket items */
470 uint32_t uz_sleepers; /* Threads sleeping on limit */
471 counter_u64_t uz_xdomain; /* Total number of cross-domain frees */
472
473 /* Offset 64, used in bucket replenish. */
474 uma_keg_t uz_keg; /* This zone's keg if !CACHE */
475 uma_import uz_import; /* Import new memory to cache. */
476 uma_release uz_release; /* Release memory from cache. */
477 void *uz_arg; /* Import/release argument. */
478 uma_init uz_init; /* Initializer for each item */
479 uma_fini uz_fini; /* Finalizer for each item. */
480 volatile uint64_t uz_items; /* Total items count & sleepers */
481 uint64_t uz_sleeps; /* Total number of alloc sleeps */
482
483 /* Offset 128 Rare stats, misc read-only. */
484 LIST_ENTRY(uma_zone) uz_link; /* List of all zones in keg */
485 counter_u64_t uz_allocs; /* Total number of allocations */
486 counter_u64_t uz_frees; /* Total number of frees */
487 counter_u64_t uz_fails; /* Total number of alloc failures */
488 const char *uz_name; /* Text name of the zone */
489 char *uz_ctlname; /* sysctl safe name string. */
490 int uz_namecnt; /* duplicate name count. */
491 uint16_t uz_bucket_size_min; /* Min number of items in bucket */
492 uint16_t uz_reclaimers; /* pending reclaim operations. */
493
494 /* Offset 192, rare read-only. */
495 struct sysctl_oid *uz_oid; /* sysctl oid pointer. */
496 const char *uz_warning; /* Warning to print on failure */
497 struct timeval uz_ratecheck; /* Warnings rate-limiting */
498 struct task uz_maxaction; /* Task to run when at limit */
499
500 /* Offset 256. */
501 struct mtx uz_cross_lock; /* Cross domain free lock */
502
503 /*
504 * This HAS to be the last item because we adjust the zone size
505 * based on NCPU and then allocate the space for the zones.
506 */
507 struct uma_cache uz_cpu[]; /* Per cpu caches */
508
509 /* domains follow here. */
510 };
511
512 /*
513 * Macros for interpreting the uz_items field. 20 bits of sleeper count
514 * and 44 bit of item count.
515 */
516 #define UZ_ITEMS_SLEEPER_SHIFT 44LL
517 #define UZ_ITEMS_SLEEPERS_MAX ((1 << (64 - UZ_ITEMS_SLEEPER_SHIFT)) - 1)
518 #define UZ_ITEMS_COUNT_MASK ((1LL << UZ_ITEMS_SLEEPER_SHIFT) - 1)
519 #define UZ_ITEMS_COUNT(x) ((x) & UZ_ITEMS_COUNT_MASK)
520 #define UZ_ITEMS_SLEEPERS(x) ((x) >> UZ_ITEMS_SLEEPER_SHIFT)
521 #define UZ_ITEMS_SLEEPER (1LL << UZ_ITEMS_SLEEPER_SHIFT)
522
523 #define ZONE_ASSERT_COLD(z) \
524 KASSERT(uma_zone_get_allocs((z)) == 0, \
525 ("zone %s initialization after use.", (z)->uz_name))
526
527 /* Domains are contiguous after the last CPU */
528 #define ZDOM_GET(z, n) \
529 (&((uma_zone_domain_t)&(z)->uz_cpu[mp_maxid + 1])[n])
530
531 #undef UMA_ALIGN
532
533 #ifdef _KERNEL
534 /* Internal prototypes */
535 static __inline uma_slab_t hash_sfind(struct uma_hash *hash, uint8_t *data);
536
537 /* Lock Macros */
538
539 #define KEG_LOCKPTR(k, d) (struct mtx *)&(k)->uk_domain[(d)].ud_lock
540 #define KEG_LOCK_INIT(k, d, lc) \
541 do { \
542 if ((lc)) \
543 mtx_init(KEG_LOCKPTR(k, d), (k)->uk_name, \
544 (k)->uk_name, MTX_DEF | MTX_DUPOK); \
545 else \
546 mtx_init(KEG_LOCKPTR(k, d), (k)->uk_name, \
547 "UMA zone", MTX_DEF | MTX_DUPOK); \
548 } while (0)
549
550 #define KEG_LOCK_FINI(k, d) mtx_destroy(KEG_LOCKPTR(k, d))
551 #define KEG_LOCK(k, d) \
552 ({ mtx_lock(KEG_LOCKPTR(k, d)); KEG_LOCKPTR(k, d); })
553 #define KEG_UNLOCK(k, d) mtx_unlock(KEG_LOCKPTR(k, d))
554 #define KEG_LOCK_ASSERT(k, d) mtx_assert(KEG_LOCKPTR(k, d), MA_OWNED)
555
556 #define KEG_GET(zone, keg) do { \
557 (keg) = (zone)->uz_keg; \
558 KASSERT((void *)(keg) != NULL, \
559 ("%s: Invalid zone %p type", __func__, (zone))); \
560 } while (0)
561
562 #define KEG_ASSERT_COLD(k) \
563 KASSERT(uma_keg_get_allocs((k)) == 0, \
564 ("keg %s initialization after use.", (k)->uk_name))
565
566 #define ZDOM_LOCK_INIT(z, zdom, lc) \
567 do { \
568 if ((lc)) \
569 mtx_init(&(zdom)->uzd_lock, (z)->uz_name, \
570 (z)->uz_name, MTX_DEF | MTX_DUPOK); \
571 else \
572 mtx_init(&(zdom)->uzd_lock, (z)->uz_name, \
573 "UMA zone", MTX_DEF | MTX_DUPOK); \
574 } while (0)
575 #define ZDOM_LOCK_FINI(z) mtx_destroy(&(z)->uzd_lock)
576 #define ZDOM_LOCK_ASSERT(z) mtx_assert(&(z)->uzd_lock, MA_OWNED)
577
578 #define ZDOM_LOCK(z) mtx_lock(&(z)->uzd_lock)
579 #define ZDOM_OWNED(z) (mtx_owner(&(z)->uzd_lock) != NULL)
580 #define ZDOM_UNLOCK(z) mtx_unlock(&(z)->uzd_lock)
581
582 #define ZONE_LOCK(z) ZDOM_LOCK(ZDOM_GET((z), 0))
583 #define ZONE_UNLOCK(z) ZDOM_UNLOCK(ZDOM_GET((z), 0))
584 #define ZONE_LOCKPTR(z) (&ZDOM_GET((z), 0)->uzd_lock)
585
586 #define ZONE_CROSS_LOCK_INIT(z) \
587 mtx_init(&(z)->uz_cross_lock, "UMA Cross", NULL, MTX_DEF)
588 #define ZONE_CROSS_LOCK(z) mtx_lock(&(z)->uz_cross_lock)
589 #define ZONE_CROSS_UNLOCK(z) mtx_unlock(&(z)->uz_cross_lock)
590 #define ZONE_CROSS_LOCK_FINI(z) mtx_destroy(&(z)->uz_cross_lock)
591
592 /*
593 * Find a slab within a hash table. This is used for OFFPAGE zones to lookup
594 * the slab structure.
595 *
596 * Arguments:
597 * hash The hash table to search.
598 * data The base page of the item.
599 *
600 * Returns:
601 * A pointer to a slab if successful, else NULL.
602 */
603 static __inline uma_slab_t
hash_sfind(struct uma_hash * hash,uint8_t * data)604 hash_sfind(struct uma_hash *hash, uint8_t *data)
605 {
606 uma_hash_slab_t slab;
607 u_int hval;
608
609 hval = UMA_HASH(hash, data);
610
611 LIST_FOREACH(slab, &hash->uh_slab_hash[hval], uhs_hlink) {
612 if ((uint8_t *)slab->uhs_data == data)
613 return (&slab->uhs_slab);
614 }
615 return (NULL);
616 }
617
618 static __inline uma_slab_t
vtoslab(vm_offset_t va)619 vtoslab(vm_offset_t va)
620 {
621 vm_page_t p;
622
623 p = PHYS_TO_VM_PAGE(pmap_kextract(va));
624 return (p->plinks.uma.slab);
625 }
626
627 static __inline void
vtozoneslab(vm_offset_t va,uma_zone_t * zone,uma_slab_t * slab)628 vtozoneslab(vm_offset_t va, uma_zone_t *zone, uma_slab_t *slab)
629 {
630 vm_page_t p;
631
632 p = PHYS_TO_VM_PAGE(pmap_kextract(va));
633 *slab = p->plinks.uma.slab;
634 *zone = p->plinks.uma.zone;
635 }
636
637 static __inline void
vsetzoneslab(vm_offset_t va,uma_zone_t zone,uma_slab_t slab)638 vsetzoneslab(vm_offset_t va, uma_zone_t zone, uma_slab_t slab)
639 {
640 vm_page_t p;
641
642 p = PHYS_TO_VM_PAGE(pmap_kextract(va));
643 p->plinks.uma.slab = slab;
644 p->plinks.uma.zone = zone;
645 }
646
647 extern unsigned long uma_kmem_limit;
648 extern unsigned long uma_kmem_total;
649
650 /* Adjust bytes under management by UMA. */
651 static inline void
uma_total_dec(unsigned long size)652 uma_total_dec(unsigned long size)
653 {
654
655 atomic_subtract_long(&uma_kmem_total, size);
656 }
657
658 static inline void
uma_total_inc(unsigned long size)659 uma_total_inc(unsigned long size)
660 {
661
662 if (atomic_fetchadd_long(&uma_kmem_total, size) > uma_kmem_limit)
663 uma_reclaim_wakeup();
664 }
665
666 /*
667 * The following two functions may be defined by architecture specific code
668 * if they can provide more efficient allocation functions. This is useful
669 * for using direct mapped addresses.
670 */
671 void *uma_small_alloc(uma_zone_t zone, vm_size_t bytes, int domain,
672 uint8_t *pflag, int wait);
673 void uma_small_free(void *mem, vm_size_t size, uint8_t flags);
674
675 /* Set a global soft limit on UMA managed memory. */
676 void uma_set_limit(unsigned long limit);
677
678 #endif /* _KERNEL */
679
680 #endif /* VM_UMA_INT_H */
681