1 /*-
2  * Copyright (c) 2017 Mellanox Technologies, Ltd.
3  * All rights reserved.
4  *
5  * Redistribution and use in source and binary forms, with or without
6  * modification, are permitted provided that the following conditions
7  * are met:
8  * 1. Redistributions of source code must retain the above copyright
9  *    notice unmodified, this list of conditions, and the following
10  *    disclaimer.
11  * 2. Redistributions in binary form must reproduce the above copyright
12  *    notice, this list of conditions and the following disclaimer in the
13  *    documentation and/or other materials provided with the distribution.
14  *
15  * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR
16  * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES
17  * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
18  * IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT,
19  * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT
20  * NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
21  * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
22  * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
23  * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF
24  * THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
25  */
26 
27 #include <sys/cdefs.h>
28 #include <linux/slab.h>
29 #include <linux/rcupdate.h>
30 #include <linux/kernel.h>
31 #include <linux/irq_work.h>
32 #include <linux/llist.h>
33 
34 #include <sys/param.h>
35 #include <sys/taskqueue.h>
36 #include <vm/uma.h>
37 
38 struct linux_kmem_rcu {
39 	struct rcu_head rcu_head;
40 	struct linux_kmem_cache *cache;
41 };
42 
43 struct linux_kmem_cache {
44 	uma_zone_t cache_zone;
45 	linux_kmem_ctor_t *cache_ctor;
46 	unsigned cache_flags;
47 	unsigned cache_size;
48 	struct llist_head cache_items;
49 	struct task cache_task;
50 };
51 
52 #define	LINUX_KMEM_TO_RCU(c, m)					\
53 	((struct linux_kmem_rcu *)((char *)(m) +		\
54 	(c)->cache_size - sizeof(struct linux_kmem_rcu)))
55 
56 #define	LINUX_RCU_TO_KMEM(r)					\
57 	((void *)((char *)(r) + sizeof(struct linux_kmem_rcu) - \
58 	(r)->cache->cache_size))
59 
60 static LLIST_HEAD(linux_kfree_async_list);
61 
62 static void	lkpi_kmem_cache_free_async_fn(void *, int);
63 
64 void *
65 lkpi_kmem_cache_alloc(struct linux_kmem_cache *c, gfp_t flags)
66 {
67 	return (uma_zalloc_arg(c->cache_zone, c,
68 	    linux_check_m_flags(flags)));
69 }
70 
71 void *
72 lkpi_kmem_cache_zalloc(struct linux_kmem_cache *c, gfp_t flags)
73 {
74 	return (uma_zalloc_arg(c->cache_zone, c,
75 	    linux_check_m_flags(flags | M_ZERO)));
76 }
77 
78 static int
79 linux_kmem_ctor(void *mem, int size, void *arg, int flags)
80 {
81 	struct linux_kmem_cache *c = arg;
82 
83 	if (unlikely(c->cache_flags & SLAB_TYPESAFE_BY_RCU)) {
84 		struct linux_kmem_rcu *rcu = LINUX_KMEM_TO_RCU(c, mem);
85 
86 		/* duplicate cache pointer */
87 		rcu->cache = c;
88 	}
89 
90 	/* check for constructor */
91 	if (likely(c->cache_ctor != NULL))
92 		c->cache_ctor(mem);
93 
94 	return (0);
95 }
96 
97 static void
98 linux_kmem_cache_free_rcu_callback(struct rcu_head *head)
99 {
100 	struct linux_kmem_rcu *rcu =
101 	    container_of(head, struct linux_kmem_rcu, rcu_head);
102 
103 	uma_zfree(rcu->cache->cache_zone, LINUX_RCU_TO_KMEM(rcu));
104 }
105 
106 struct linux_kmem_cache *
107 linux_kmem_cache_create(const char *name, size_t size, size_t align,
108     unsigned flags, linux_kmem_ctor_t *ctor)
109 {
110 	struct linux_kmem_cache *c;
111 
112 	c = malloc(sizeof(*c), M_KMALLOC, M_WAITOK);
113 
114 	if (flags & SLAB_HWCACHE_ALIGN)
115 		align = UMA_ALIGN_CACHE;
116 	else if (align != 0)
117 		align--;
118 
119 	if (flags & SLAB_TYPESAFE_BY_RCU) {
120 		/* make room for RCU structure */
121 		size = ALIGN(size, sizeof(void *));
122 		size += sizeof(struct linux_kmem_rcu);
123 
124 		/* create cache_zone */
125 		c->cache_zone = uma_zcreate(name, size,
126 		    linux_kmem_ctor, NULL, NULL, NULL,
127 		    align, UMA_ZONE_ZINIT);
128 	} else {
129 		/* make room for async task list items */
130 		size = MAX(size, sizeof(struct llist_node));
131 
132 		/* create cache_zone */
133 		c->cache_zone = uma_zcreate(name, size,
134 		    ctor ? linux_kmem_ctor : NULL, NULL,
135 		    NULL, NULL, align, 0);
136 	}
137 
138 	c->cache_flags = flags;
139 	c->cache_ctor = ctor;
140 	c->cache_size = size;
141 	init_llist_head(&c->cache_items);
142 	TASK_INIT(&c->cache_task, 0, lkpi_kmem_cache_free_async_fn, c);
143 	return (c);
144 }
145 
146 static inline void
147 lkpi_kmem_cache_free_rcu(struct linux_kmem_cache *c, void *m)
148 {
149 	struct linux_kmem_rcu *rcu = LINUX_KMEM_TO_RCU(c, m);
150 
151 	call_rcu(&rcu->rcu_head, linux_kmem_cache_free_rcu_callback);
152 }
153 
154 static inline void
155 lkpi_kmem_cache_free_sync(struct linux_kmem_cache *c, void *m)
156 {
157 	uma_zfree(c->cache_zone, m);
158 }
159 
160 static void
161 lkpi_kmem_cache_free_async_fn(void *context, int pending)
162 {
163 	struct linux_kmem_cache *c = context;
164 	struct llist_node *freed, *next;
165 
166 	llist_for_each_safe(freed, next, llist_del_all(&c->cache_items))
167 		lkpi_kmem_cache_free_sync(c, freed);
168 }
169 
170 static inline void
171 lkpi_kmem_cache_free_async(struct linux_kmem_cache *c, void *m)
172 {
173 	if (m == NULL)
174 		return;
175 
176 	llist_add(m, &c->cache_items);
177 	taskqueue_enqueue(linux_irq_work_tq, &c->cache_task);
178 }
179 
180 void
181 lkpi_kmem_cache_free(struct linux_kmem_cache *c, void *m)
182 {
183 	if (unlikely(c->cache_flags & SLAB_TYPESAFE_BY_RCU))
184 		lkpi_kmem_cache_free_rcu(c, m);
185 	else if (unlikely(curthread->td_critnest != 0))
186 		lkpi_kmem_cache_free_async(c, m);
187 	else
188 		lkpi_kmem_cache_free_sync(c, m);
189 }
190 
191 void
192 linux_kmem_cache_destroy(struct linux_kmem_cache *c)
193 {
194 	if (c == NULL)
195 		return;
196 
197 	if (unlikely(c->cache_flags & SLAB_TYPESAFE_BY_RCU)) {
198 		/* make sure all free callbacks have been called */
199 		rcu_barrier();
200 	}
201 
202 	if (!llist_empty(&c->cache_items))
203 		taskqueue_enqueue(linux_irq_work_tq, &c->cache_task);
204 	taskqueue_drain(linux_irq_work_tq, &c->cache_task);
205 	uma_zdestroy(c->cache_zone);
206 	free(c, M_KMALLOC);
207 }
208 
209 static void
210 linux_kfree_async_fn(void *context, int pending)
211 {
212 	struct llist_node *freed;
213 
214 	while((freed = llist_del_first(&linux_kfree_async_list)) != NULL)
215 		kfree(freed);
216 }
217 static struct task linux_kfree_async_task =
218     TASK_INITIALIZER(0, linux_kfree_async_fn, &linux_kfree_async_task);
219 
220 void
221 linux_kfree_async(void *addr)
222 {
223 	if (addr == NULL)
224 		return;
225 	llist_add(addr, &linux_kfree_async_list);
226 	taskqueue_enqueue(linux_irq_work_tq, &linux_kfree_async_task);
227 }
228