1 /*
2  *  Copyright (C) 2007-2010 Lawrence Livermore National Security, LLC.
3  *  Copyright (C) 2007 The Regents of the University of California.
4  *  Produced at Lawrence Livermore National Laboratory (cf, DISCLAIMER).
5  *  Written by Brian Behlendorf <behlendorf1@llnl.gov>.
6  *  UCRL-CODE-235197
7  *
8  *  This file is part of the SPL, Solaris Porting Layer.
9  *  For details, see <http://zfsonlinux.org/>.
10  *
11  *  The SPL is free software; you can redistribute it and/or modify it
12  *  under the terms of the GNU General Public License as published by the
13  *  Free Software Foundation; either version 2 of the License, or (at your
14  *  option) any later version.
15  *
16  *  The SPL is distributed in the hope that it will be useful, but WITHOUT
17  *  ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
18  *  FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License
19  *  for more details.
20  *
21  *  You should have received a copy of the GNU General Public License along
22  *  with the SPL.  If not, see <http://www.gnu.org/licenses/>.
23  */
24 
25 #ifndef _SPL_KMEM_CACHE_H
26 #define	_SPL_KMEM_CACHE_H
27 
28 #include <sys/taskq.h>
29 
30 /*
31  * Slab allocation interfaces.  The SPL slab differs from the standard
32  * Linux SLAB or SLUB primarily in that each cache may be backed by slabs
33  * allocated from the physical or virtual memory address space.  The virtual
34  * slabs allow for good behavior when allocation large objects of identical
35  * size.  This slab implementation also supports both constructors and
36  * destructors which the Linux slab does not.
37  */
38 typedef enum kmc_bit {
39 	KMC_BIT_NODEBUG		= 1,	/* Default behavior */
40 	KMC_BIT_KVMEM		= 7,	/* Use kvmalloc linux allocator  */
41 	KMC_BIT_SLAB		= 8,	/* Use Linux slab cache */
42 	KMC_BIT_DEADLOCKED	= 14,	/* Deadlock detected */
43 	KMC_BIT_GROWING		= 15,	/* Growing in progress */
44 	KMC_BIT_REAPING		= 16,	/* Reaping in progress */
45 	KMC_BIT_DESTROY		= 17,	/* Destroy in progress */
46 	KMC_BIT_TOTAL		= 18,	/* Proc handler helper bit */
47 	KMC_BIT_ALLOC		= 19,	/* Proc handler helper bit */
48 	KMC_BIT_MAX		= 20,	/* Proc handler helper bit */
49 } kmc_bit_t;
50 
51 /* kmem move callback return values */
52 typedef enum kmem_cbrc {
53 	KMEM_CBRC_YES		= 0,	/* Object moved */
54 	KMEM_CBRC_NO		= 1,	/* Object not moved */
55 	KMEM_CBRC_LATER		= 2,	/* Object not moved, try again later */
56 	KMEM_CBRC_DONT_NEED	= 3,	/* Neither object is needed */
57 	KMEM_CBRC_DONT_KNOW	= 4,	/* Object unknown */
58 } kmem_cbrc_t;
59 
60 #define	KMC_NODEBUG		(1 << KMC_BIT_NODEBUG)
61 #define	KMC_KVMEM		(1 << KMC_BIT_KVMEM)
62 #define	KMC_SLAB		(1 << KMC_BIT_SLAB)
63 #define	KMC_DEADLOCKED		(1 << KMC_BIT_DEADLOCKED)
64 #define	KMC_GROWING		(1 << KMC_BIT_GROWING)
65 #define	KMC_REAPING		(1 << KMC_BIT_REAPING)
66 #define	KMC_DESTROY		(1 << KMC_BIT_DESTROY)
67 #define	KMC_TOTAL		(1 << KMC_BIT_TOTAL)
68 #define	KMC_ALLOC		(1 << KMC_BIT_ALLOC)
69 #define	KMC_MAX			(1 << KMC_BIT_MAX)
70 
71 #define	KMC_REAP_CHUNK		INT_MAX
72 #define	KMC_DEFAULT_SEEKS	1
73 
74 #define	KMC_RECLAIM_ONCE	0x1	/* Force a single shrinker pass */
75 
76 extern struct list_head spl_kmem_cache_list;
77 extern struct rw_semaphore spl_kmem_cache_sem;
78 
79 #define	SKM_MAGIC			0x2e2e2e2e
80 #define	SKO_MAGIC			0x20202020
81 #define	SKS_MAGIC			0x22222222
82 #define	SKC_MAGIC			0x2c2c2c2c
83 
84 #define	SPL_KMEM_CACHE_OBJ_PER_SLAB	8	/* Target objects per slab */
85 #define	SPL_KMEM_CACHE_ALIGN		8	/* Default object alignment */
86 #ifdef _LP64
87 #define	SPL_KMEM_CACHE_MAX_SIZE		32	/* Max slab size in MB */
88 #else
89 #define	SPL_KMEM_CACHE_MAX_SIZE		4	/* Max slab size in MB */
90 #endif
91 
92 #define	SPL_MAX_ORDER			(MAX_ORDER - 3)
93 #define	SPL_MAX_ORDER_NR_PAGES		(1 << (SPL_MAX_ORDER - 1))
94 
95 #ifdef CONFIG_SLUB
96 #define	SPL_MAX_KMEM_CACHE_ORDER	PAGE_ALLOC_COSTLY_ORDER
97 #define	SPL_MAX_KMEM_ORDER_NR_PAGES	(1 << (SPL_MAX_KMEM_CACHE_ORDER - 1))
98 #else
99 #define	SPL_MAX_KMEM_ORDER_NR_PAGES	(KMALLOC_MAX_SIZE >> PAGE_SHIFT)
100 #endif
101 
102 #define	POINTER_IS_VALID(p)		0	/* Unimplemented */
103 #define	POINTER_INVALIDATE(pp)			/* Unimplemented */
104 
105 typedef int (*spl_kmem_ctor_t)(void *, void *, int);
106 typedef void (*spl_kmem_dtor_t)(void *, void *);
107 
108 typedef struct spl_kmem_magazine {
109 	uint32_t		skm_magic;	/* Sanity magic */
110 	uint32_t		skm_avail;	/* Available objects */
111 	uint32_t		skm_size;	/* Magazine size */
112 	uint32_t		skm_refill;	/* Batch refill size */
113 	struct spl_kmem_cache	*skm_cache;	/* Owned by cache */
114 	unsigned int		skm_cpu;	/* Owned by cpu */
115 	void			*skm_objs[0];	/* Object pointers */
116 } spl_kmem_magazine_t;
117 
118 typedef struct spl_kmem_obj {
119 	uint32_t		sko_magic;	/* Sanity magic */
120 	void			*sko_addr;	/* Buffer address */
121 	struct spl_kmem_slab	*sko_slab;	/* Owned by slab */
122 	struct list_head	sko_list;	/* Free object list linkage */
123 } spl_kmem_obj_t;
124 
125 typedef struct spl_kmem_slab {
126 	uint32_t		sks_magic;	/* Sanity magic */
127 	uint32_t		sks_objs;	/* Objects per slab */
128 	struct spl_kmem_cache	*sks_cache;	/* Owned by cache */
129 	struct list_head	sks_list;	/* Slab list linkage */
130 	struct list_head	sks_free_list;	/* Free object list */
131 	unsigned long		sks_age;	/* Last modify jiffie */
132 	uint32_t		sks_ref;	/* Ref count used objects */
133 } spl_kmem_slab_t;
134 
135 typedef struct spl_kmem_alloc {
136 	struct spl_kmem_cache	*ska_cache;	/* Owned by cache */
137 	int			ska_flags;	/* Allocation flags */
138 	taskq_ent_t		ska_tqe;	/* Task queue entry */
139 } spl_kmem_alloc_t;
140 
141 typedef struct spl_kmem_emergency {
142 	struct rb_node		ske_node;	/* Emergency tree linkage */
143 	unsigned long		ske_obj;	/* Buffer address */
144 } spl_kmem_emergency_t;
145 
146 typedef struct spl_kmem_cache {
147 	uint32_t		skc_magic;	/* Sanity magic */
148 	uint32_t		skc_name_size;	/* Name length */
149 	char			*skc_name;	/* Name string */
150 	spl_kmem_magazine_t	**skc_mag;	/* Per-CPU warm cache */
151 	uint32_t		skc_mag_size;	/* Magazine size */
152 	uint32_t		skc_mag_refill;	/* Magazine refill count */
153 	spl_kmem_ctor_t		skc_ctor;	/* Constructor */
154 	spl_kmem_dtor_t		skc_dtor;	/* Destructor */
155 	void			*skc_private;	/* Private data */
156 	void			*skc_vmp;	/* Unused */
157 	struct kmem_cache	*skc_linux_cache; /* Linux slab cache if used */
158 	unsigned long		skc_flags;	/* Flags */
159 	uint32_t		skc_obj_size;	/* Object size */
160 	uint32_t		skc_obj_align;	/* Object alignment */
161 	uint32_t		skc_slab_objs;	/* Objects per slab */
162 	uint32_t		skc_slab_size;	/* Slab size */
163 	atomic_t		skc_ref;	/* Ref count callers */
164 	taskqid_t		skc_taskqid;	/* Slab reclaim task */
165 	struct list_head	skc_list;	/* List of caches linkage */
166 	struct list_head	skc_complete_list; /* Completely alloc'ed */
167 	struct list_head	skc_partial_list;  /* Partially alloc'ed */
168 	struct rb_root		skc_emergency_tree; /* Min sized objects */
169 	spinlock_t		skc_lock;	/* Cache lock */
170 	spl_wait_queue_head_t	skc_waitq;	/* Allocation waiters */
171 	uint64_t		skc_slab_fail;	/* Slab alloc failures */
172 	uint64_t		skc_slab_create;  /* Slab creates */
173 	uint64_t		skc_slab_destroy; /* Slab destroys */
174 	uint64_t		skc_slab_total;	/* Slab total current */
175 	uint64_t		skc_slab_alloc;	/* Slab alloc current */
176 	uint64_t		skc_slab_max;	/* Slab max historic  */
177 	uint64_t		skc_obj_total;	/* Obj total current */
178 	uint64_t		skc_obj_alloc;	/* Obj alloc current */
179 	struct percpu_counter	skc_linux_alloc;   /* Linux-backed Obj alloc  */
180 	uint64_t		skc_obj_max;	/* Obj max historic */
181 	uint64_t		skc_obj_deadlock;  /* Obj emergency deadlocks */
182 	uint64_t		skc_obj_emergency; /* Obj emergency current */
183 	uint64_t		skc_obj_emergency_max; /* Obj emergency max */
184 } spl_kmem_cache_t;
185 #define	kmem_cache_t		spl_kmem_cache_t
186 
187 extern spl_kmem_cache_t *spl_kmem_cache_create(char *name, size_t size,
188     size_t align, spl_kmem_ctor_t ctor, spl_kmem_dtor_t dtor,
189     void *reclaim, void *priv, void *vmp, int flags);
190 extern void spl_kmem_cache_set_move(spl_kmem_cache_t *,
191     kmem_cbrc_t (*)(void *, void *, size_t, void *));
192 extern void spl_kmem_cache_destroy(spl_kmem_cache_t *skc);
193 extern void *spl_kmem_cache_alloc(spl_kmem_cache_t *skc, int flags);
194 extern void spl_kmem_cache_free(spl_kmem_cache_t *skc, void *obj);
195 extern void spl_kmem_cache_set_allocflags(spl_kmem_cache_t *skc, gfp_t flags);
196 extern void spl_kmem_cache_reap_now(spl_kmem_cache_t *skc);
197 extern void spl_kmem_reap(void);
198 extern uint64_t spl_kmem_cache_inuse(kmem_cache_t *cache);
199 extern uint64_t spl_kmem_cache_entry_size(kmem_cache_t *cache);
200 
201 #define	kmem_cache_create(name, size, align, ctor, dtor, rclm, priv, vmp, fl) \
202     spl_kmem_cache_create(name, size, align, ctor, dtor, rclm, priv, vmp, fl)
203 #define	kmem_cache_set_move(skc, move)	spl_kmem_cache_set_move(skc, move)
204 #define	kmem_cache_destroy(skc)		spl_kmem_cache_destroy(skc)
205 #define	kmem_cache_alloc(skc, flags)	spl_kmem_cache_alloc(skc, flags)
206 #define	kmem_cache_free(skc, obj)	spl_kmem_cache_free(skc, obj)
207 #define	kmem_cache_reap_now(skc)	spl_kmem_cache_reap_now(skc)
208 #define	kmem_reap()			spl_kmem_reap()
209 
210 /*
211  * The following functions are only available for internal use.
212  */
213 extern int spl_kmem_cache_init(void);
214 extern void spl_kmem_cache_fini(void);
215 
216 #endif	/* _SPL_KMEM_CACHE_H */
217