xref: /netbsd/sys/sys/pool.h (revision bf9ec67e)
1 /*	$NetBSD: pool.h,v 1.37 2002/03/13 10:57:19 simonb Exp $	*/
2 
3 /*-
4  * Copyright (c) 1997, 1998, 1999, 2000 The NetBSD Foundation, Inc.
5  * All rights reserved.
6  *
7  * This code is derived from software contributed to The NetBSD Foundation
8  * by Paul Kranenburg; by Jason R. Thorpe of the Numerical Aerospace
9  * Simulation Facility, NASA Ames Research Center.
10  *
11  * Redistribution and use in source and binary forms, with or without
12  * modification, are permitted provided that the following conditions
13  * are met:
14  * 1. Redistributions of source code must retain the above copyright
15  *    notice, this list of conditions and the following disclaimer.
16  * 2. Redistributions in binary form must reproduce the above copyright
17  *    notice, this list of conditions and the following disclaimer in the
18  *    documentation and/or other materials provided with the distribution.
19  * 3. All advertising materials mentioning features or use of this software
20  *    must display the following acknowledgement:
21  *	This product includes software developed by the NetBSD
22  *	Foundation, Inc. and its contributors.
23  * 4. Neither the name of The NetBSD Foundation nor the names of its
24  *    contributors may be used to endorse or promote products derived
25  *    from this software without specific prior written permission.
26  *
27  * THIS SOFTWARE IS PROVIDED BY THE NETBSD FOUNDATION, INC. AND CONTRIBUTORS
28  * ``AS IS'' AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED
29  * TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
30  * PURPOSE ARE DISCLAIMED.  IN NO EVENT SHALL THE FOUNDATION OR CONTRIBUTORS
31  * BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
32  * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
33  * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
34  * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
35  * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
36  * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
37  * POSSIBILITY OF SUCH DAMAGE.
38  */
39 
40 #ifndef _SYS_POOL_H_
41 #define _SYS_POOL_H_
42 
43 #ifdef _KERNEL
44 #define	__POOL_EXPOSE
45 #endif
46 
47 #if defined(_KERNEL_OPT)
48 #include "opt_pool.h"
49 #endif
50 
51 #ifdef __POOL_EXPOSE
52 #include <sys/lock.h>
53 #include <sys/queue.h>
54 #include <sys/time.h>
55 #endif
56 
57 #define PR_HASHTABSIZE		8
58 #define	PCG_NOBJECTS		16
59 
60 #ifdef __POOL_EXPOSE
61 /* The pool cache group. */
62 struct pool_cache_group {
63 	TAILQ_ENTRY(pool_cache_group)
64 		pcg_list;	/* link in the pool cache's group list */
65 	u_int	pcg_avail;	/* # available objects */
66 				/* pointers to the objects */
67 	void	*pcg_objects[PCG_NOBJECTS];
68 };
69 
70 struct pool_cache {
71 	TAILQ_ENTRY(pool_cache)
72 			pc_poollist;	/* entry on pool's group list */
73 	TAILQ_HEAD(, pool_cache_group)
74 			pc_grouplist;	/* Cache group list */
75 	struct pool_cache_group
76 			*pc_allocfrom;	/* group to allocate from */
77 	struct pool_cache_group
78 			*pc_freeto;	/* grop to free to */
79 	struct pool	*pc_pool;	/* parent pool */
80 	struct simplelock pc_slock;	/* mutex */
81 
82 	int		(*pc_ctor)(void *, void *, int);
83 	void		(*pc_dtor)(void *, void *);
84 	void		*pc_arg;
85 
86 	/* Statistics. */
87 	unsigned long	pc_hits;	/* cache hits */
88 	unsigned long	pc_misses;	/* cache misses */
89 
90 	unsigned long	pc_ngroups;	/* # cache groups */
91 
92 	unsigned long	pc_nitems;	/* # objects currently in cache */
93 };
94 
95 struct pool_allocator {
96 	void		*(*pa_alloc)(struct pool *, int);
97 	void		(*pa_free)(struct pool *, void *);
98 	int		pa_pagesz;
99 
100 	/* The following fields are for internal use only. */
101 	struct simplelock pa_slock;
102 	TAILQ_HEAD(, pool) pa_list;	/* list of pools using this allocator */
103 	int		pa_flags;
104 #define	PA_INITIALIZED	0x01
105 #define	PA_WANT		0x02		/* wakeup any sleeping pools on free */
106 	int		pa_pagemask;
107 	int		pa_pageshift;
108 };
109 
110 struct pool {
111 	TAILQ_ENTRY(pool)
112 			pr_poollist;
113 	TAILQ_HEAD(,pool_item_header)
114 			pr_pagelist;	/* Allocated pages */
115 	struct pool_item_header	*pr_curpage;
116 	TAILQ_HEAD(,pool_cache)
117 			pr_cachelist;	/* Caches for this pool */
118 	unsigned int	pr_size;	/* Size of item */
119 	unsigned int	pr_align;	/* Requested alignment, must be 2^n */
120 	unsigned int	pr_itemoffset;	/* Align this offset in item */
121 	unsigned int	pr_minitems;	/* minimum # of items to keep */
122 	unsigned int	pr_minpages;	/* same in page units */
123 	unsigned int	pr_maxpages;	/* maximum # of pages to keep */
124 	unsigned int	pr_npages;	/* # of pages allocated */
125 	unsigned int	pr_itemsperpage;/* # items that fit in a page */
126 	unsigned int	pr_slack;	/* unused space in a page */
127 	unsigned int	pr_nitems;	/* number of available items in pool */
128 	unsigned int	pr_nout;	/* # items currently allocated */
129 	unsigned int	pr_hardlimit;	/* hard limit to number of allocated
130 					   items */
131 	struct pool_allocator *pr_alloc;/* back-end allocator */
132 	TAILQ_ENTRY(pool) pr_alloc_list;/* link on allocator's pool list */
133 
134 	/* Drain hook. */
135 	void		(*pr_drain_hook)(void *, int);
136 	void		*pr_drain_hook_arg;
137 
138 	const char	*pr_wchan;	/* tsleep(9) identifier */
139 	unsigned int	pr_flags;	/* r/w flags */
140 	unsigned int	pr_roflags;	/* r/o flags */
141 #define	PR_NOWAIT	0x00		/* for symmetry */
142 #define PR_WAITOK	0x02
143 #define PR_WANTED	0x04
144 #define PR_PHINPAGE	0x40
145 #define PR_LOGGING	0x80
146 #define PR_LIMITFAIL	0x100	/* even if waiting, fail if we hit limit */
147 #define PR_RECURSIVE	0x200	/* pool contains pools, for vmstat(8) */
148 
149 	/*
150 	 * `pr_slock' protects the pool's data structures when removing
151 	 * items from or returning items to the pool, or when reading
152 	 * or updating read/write fields in the pool descriptor.
153 	 *
154 	 * We assume back-end page allocators provide their own locking
155 	 * scheme.  They will be called with the pool descriptor _unlocked_,
156 	 * since the page allocators may block.
157 	 */
158 	struct simplelock	pr_slock;
159 
160 	LIST_HEAD(,pool_item_header)		/* Off-page page headers */
161 			pr_hashtab[PR_HASHTABSIZE];
162 
163 	int		pr_maxcolor;	/* Cache colouring */
164 	int		pr_curcolor;
165 	int		pr_phoffset;	/* Offset in page of page header */
166 
167 	/*
168 	 * Warning message to be issued, and a per-time-delta rate cap,
169 	 * if the hard limit is reached.
170 	 */
171 	const char	*pr_hardlimit_warning;
172 	struct timeval	pr_hardlimit_ratecap;
173 	struct timeval	pr_hardlimit_warning_last;
174 
175 	/*
176 	 * Instrumentation
177 	 */
178 	unsigned long	pr_nget;	/* # of successful requests */
179 	unsigned long	pr_nfail;	/* # of unsuccessful requests */
180 	unsigned long	pr_nput;	/* # of releases */
181 	unsigned long	pr_npagealloc;	/* # of pages allocated */
182 	unsigned long	pr_npagefree;	/* # of pages released */
183 	unsigned int	pr_hiwat;	/* max # of pages in pool */
184 	unsigned long	pr_nidle;	/* # of idle pages */
185 
186 	/*
187 	 * Diagnostic aides.
188 	 */
189 	struct pool_log	*pr_log;
190 	int		pr_curlogentry;
191 	int		pr_logsize;
192 
193 	const char	*pr_entered_file; /* reentrancy check */
194 	long		pr_entered_line;
195 };
196 #endif /* __POOL_EXPOSE */
197 
198 #ifdef _KERNEL
199 /*
200  * pool_allocator_kmem is the default that all pools get unless
201  * otherwise specified.  pool_allocator_nointr is provided for
202  * pools that know they will never be accessed in interrupt
203  * context.
204  */
205 extern struct pool_allocator pool_allocator_kmem;
206 extern struct pool_allocator pool_allocator_nointr;
207 
208 void		pool_init(struct pool *, size_t, u_int, u_int,
209 		    int, const char *, struct pool_allocator *);
210 void		pool_destroy(struct pool *);
211 
212 void		pool_set_drain_hook(struct pool *,
213 		    void (*)(void *, int), void *);
214 
215 void		*pool_get(struct pool *, int);
216 void		pool_put(struct pool *, void *);
217 int		pool_reclaim(struct pool *);
218 
219 #ifdef POOL_DIAGNOSTIC
220 /*
221  * These versions do reentrancy checking.
222  */
223 void		*_pool_get(struct pool *, int, const char *, long);
224 void		_pool_put(struct pool *, void *, const char *, long);
225 int		_pool_reclaim(struct pool *, const char *, long);
226 #define		pool_get(h, f)	_pool_get((h), (f), __FILE__, __LINE__)
227 #define		pool_put(h, v)	_pool_put((h), (v), __FILE__, __LINE__)
228 #define		pool_reclaim(h)	_pool_reclaim((h), __FILE__, __LINE__)
229 #endif /* POOL_DIAGNOSTIC */
230 
231 int		pool_prime(struct pool *, int);
232 void		pool_setlowat(struct pool *, int);
233 void		pool_sethiwat(struct pool *, int);
234 void		pool_sethardlimit(struct pool *, int, const char *, int);
235 void		pool_drain(void *);
236 
237 /*
238  * Debugging and diagnostic aides.
239  */
240 void		pool_print(struct pool *, const char *);
241 void		pool_printit(struct pool *, const char *,
242 		    void (*)(const char *, ...));
243 int		pool_chk(struct pool *, const char *);
244 
245 /*
246  * Pool cache routines.
247  */
248 void		pool_cache_init(struct pool_cache *, struct pool *,
249 		    int (*ctor)(void *, void *, int),
250 		    void (*dtor)(void *, void *),
251 		    void *);
252 void		pool_cache_destroy(struct pool_cache *);
253 void		*pool_cache_get(struct pool_cache *, int);
254 void		pool_cache_put(struct pool_cache *, void *);
255 void		pool_cache_destruct_object(struct pool_cache *, void *);
256 void		pool_cache_invalidate(struct pool_cache *);
257 #endif /* _KERNEL */
258 
259 #endif /* _SYS_POOL_H_ */
260