xref: /freebsd/sys/sys/buf_ring.h (revision 4f52dfbb)
1 /*-
2  * SPDX-License-Identifier: BSD-2-Clause-FreeBSD
3  *
4  * Copyright (c) 2007-2009 Kip Macy <kmacy@freebsd.org>
5  * All rights reserved.
6  *
7  * Redistribution and use in source and binary forms, with or without
8  * modification, are permitted provided that the following conditions
9  * are met:
10  * 1. Redistributions of source code must retain the above copyright
11  *    notice, this list of conditions and the following disclaimer.
12  * 2. Redistributions in binary form must reproduce the above copyright
13  *    notice, this list of conditions and the following disclaimer in the
14  *    documentation and/or other materials provided with the distribution.
15  *
16  * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND
17  * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
18  * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
19  * ARE DISCLAIMED.  IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE
20  * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
21  * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
22  * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
23  * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
24  * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
25  * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
26  * SUCH DAMAGE.
27  *
28  * $FreeBSD$
29  *
30  */
31 
32 #ifndef	_SYS_BUF_RING_H_
33 #define	_SYS_BUF_RING_H_
34 
35 #include <machine/cpu.h>
36 
37 #if defined(INVARIANTS) && !defined(DEBUG_BUFRING)
38 #define DEBUG_BUFRING 1
39 #endif
40 
41 #ifdef DEBUG_BUFRING
42 #include <sys/lock.h>
43 #include <sys/mutex.h>
44 #endif
45 
46 struct buf_ring {
47 	volatile uint32_t	br_prod_head;
48 	volatile uint32_t	br_prod_tail;
49 	int              	br_prod_size;
50 	int              	br_prod_mask;
51 	uint64_t		br_drops;
52 	volatile uint32_t	br_cons_head __aligned(CACHE_LINE_SIZE);
53 	volatile uint32_t	br_cons_tail;
54 	int		 	br_cons_size;
55 	int              	br_cons_mask;
56 #ifdef DEBUG_BUFRING
57 	struct mtx		*br_lock;
58 #endif
59 	void			*br_ring[0] __aligned(CACHE_LINE_SIZE);
60 };
61 
62 /*
63  * multi-producer safe lock-free ring buffer enqueue
64  *
65  */
66 static __inline int
67 buf_ring_enqueue(struct buf_ring *br, void *buf)
68 {
69 	uint32_t prod_head, prod_next, cons_tail;
70 #ifdef DEBUG_BUFRING
71 	int i;
72 	for (i = br->br_cons_head; i != br->br_prod_head;
73 	     i = ((i + 1) & br->br_cons_mask))
74 		if(br->br_ring[i] == buf)
75 			panic("buf=%p already enqueue at %d prod=%d cons=%d",
76 			    buf, i, br->br_prod_tail, br->br_cons_tail);
77 #endif
78 	critical_enter();
79 	do {
80 		prod_head = br->br_prod_head;
81 		prod_next = (prod_head + 1) & br->br_prod_mask;
82 		cons_tail = br->br_cons_tail;
83 
84 		if (prod_next == cons_tail) {
85 			rmb();
86 			if (prod_head == br->br_prod_head &&
87 			    cons_tail == br->br_cons_tail) {
88 				br->br_drops++;
89 				critical_exit();
90 				return (ENOBUFS);
91 			}
92 			continue;
93 		}
94 	} while (!atomic_cmpset_acq_int(&br->br_prod_head, prod_head, prod_next));
95 #ifdef DEBUG_BUFRING
96 	if (br->br_ring[prod_head] != NULL)
97 		panic("dangling value in enqueue");
98 #endif
99 	br->br_ring[prod_head] = buf;
100 
101 	/*
102 	 * If there are other enqueues in progress
103 	 * that preceded us, we need to wait for them
104 	 * to complete
105 	 */
106 	while (br->br_prod_tail != prod_head)
107 		cpu_spinwait();
108 	atomic_store_rel_int(&br->br_prod_tail, prod_next);
109 	critical_exit();
110 	return (0);
111 }
112 
113 /*
114  * multi-consumer safe dequeue
115  *
116  */
117 static __inline void *
118 buf_ring_dequeue_mc(struct buf_ring *br)
119 {
120 	uint32_t cons_head, cons_next;
121 	void *buf;
122 
123 	critical_enter();
124 	do {
125 		cons_head = br->br_cons_head;
126 		cons_next = (cons_head + 1) & br->br_cons_mask;
127 
128 		if (cons_head == br->br_prod_tail) {
129 			critical_exit();
130 			return (NULL);
131 		}
132 	} while (!atomic_cmpset_acq_int(&br->br_cons_head, cons_head, cons_next));
133 
134 	buf = br->br_ring[cons_head];
135 #ifdef DEBUG_BUFRING
136 	br->br_ring[cons_head] = NULL;
137 #endif
138 	/*
139 	 * If there are other dequeues in progress
140 	 * that preceded us, we need to wait for them
141 	 * to complete
142 	 */
143 	while (br->br_cons_tail != cons_head)
144 		cpu_spinwait();
145 
146 	atomic_store_rel_int(&br->br_cons_tail, cons_next);
147 	critical_exit();
148 
149 	return (buf);
150 }
151 
152 /*
153  * single-consumer dequeue
154  * use where dequeue is protected by a lock
155  * e.g. a network driver's tx queue lock
156  */
157 static __inline void *
158 buf_ring_dequeue_sc(struct buf_ring *br)
159 {
160 	uint32_t cons_head, cons_next;
161 #ifdef PREFETCH_DEFINED
162 	uint32_t cons_next_next;
163 #endif
164 	uint32_t prod_tail;
165 	void *buf;
166 
167 	/*
168 	 * This is a workaround to allow using buf_ring on ARM and ARM64.
169 	 * ARM64TODO: Fix buf_ring in a generic way.
170 	 * REMARKS: It is suspected that br_cons_head does not require
171 	 *   load_acq operation, but this change was extensively tested
172 	 *   and confirmed it's working. To be reviewed once again in
173 	 *   FreeBSD-12.
174 	 *
175 	 * Preventing following situation:
176 
177 	 * Core(0) - buf_ring_enqueue()                                       Core(1) - buf_ring_dequeue_sc()
178 	 * -----------------------------------------                                       ----------------------------------------------
179 	 *
180 	 *                                                                                cons_head = br->br_cons_head;
181 	 * atomic_cmpset_acq_32(&br->br_prod_head, ...));
182 	 *                                                                                buf = br->br_ring[cons_head];     <see <1>>
183 	 * br->br_ring[prod_head] = buf;
184 	 * atomic_store_rel_32(&br->br_prod_tail, ...);
185 	 *                                                                                prod_tail = br->br_prod_tail;
186 	 *                                                                                if (cons_head == prod_tail)
187 	 *                                                                                        return (NULL);
188 	 *                                                                                <condition is false and code uses invalid(old) buf>`
189 	 *
190 	 * <1> Load (on core 1) from br->br_ring[cons_head] can be reordered (speculative readed) by CPU.
191 	 */
192 #if defined(__arm__) || defined(__aarch64__)
193 	cons_head = atomic_load_acq_32(&br->br_cons_head);
194 #else
195 	cons_head = br->br_cons_head;
196 #endif
197 	prod_tail = atomic_load_acq_32(&br->br_prod_tail);
198 
199 	cons_next = (cons_head + 1) & br->br_cons_mask;
200 #ifdef PREFETCH_DEFINED
201 	cons_next_next = (cons_head + 2) & br->br_cons_mask;
202 #endif
203 
204 	if (cons_head == prod_tail)
205 		return (NULL);
206 
207 #ifdef PREFETCH_DEFINED
208 	if (cons_next != prod_tail) {
209 		prefetch(br->br_ring[cons_next]);
210 		if (cons_next_next != prod_tail)
211 			prefetch(br->br_ring[cons_next_next]);
212 	}
213 #endif
214 	br->br_cons_head = cons_next;
215 	buf = br->br_ring[cons_head];
216 
217 #ifdef DEBUG_BUFRING
218 	br->br_ring[cons_head] = NULL;
219 	if (!mtx_owned(br->br_lock))
220 		panic("lock not held on single consumer dequeue");
221 	if (br->br_cons_tail != cons_head)
222 		panic("inconsistent list cons_tail=%d cons_head=%d",
223 		    br->br_cons_tail, cons_head);
224 #endif
225 	br->br_cons_tail = cons_next;
226 	return (buf);
227 }
228 
229 /*
230  * single-consumer advance after a peek
231  * use where it is protected by a lock
232  * e.g. a network driver's tx queue lock
233  */
234 static __inline void
235 buf_ring_advance_sc(struct buf_ring *br)
236 {
237 	uint32_t cons_head, cons_next;
238 	uint32_t prod_tail;
239 
240 	cons_head = br->br_cons_head;
241 	prod_tail = br->br_prod_tail;
242 
243 	cons_next = (cons_head + 1) & br->br_cons_mask;
244 	if (cons_head == prod_tail)
245 		return;
246 	br->br_cons_head = cons_next;
247 #ifdef DEBUG_BUFRING
248 	br->br_ring[cons_head] = NULL;
249 #endif
250 	br->br_cons_tail = cons_next;
251 }
252 
253 /*
254  * Used to return a buffer (most likely already there)
255  * to the top of the ring. The caller should *not*
256  * have used any dequeue to pull it out of the ring
257  * but instead should have used the peek() function.
258  * This is normally used where the transmit queue
259  * of a driver is full, and an mbuf must be returned.
260  * Most likely whats in the ring-buffer is what
261  * is being put back (since it was not removed), but
262  * sometimes the lower transmit function may have
263  * done a pullup or other function that will have
264  * changed it. As an optimization we always put it
265  * back (since jhb says the store is probably cheaper),
266  * if we have to do a multi-queue version we will need
267  * the compare and an atomic.
268  */
269 static __inline void
270 buf_ring_putback_sc(struct buf_ring *br, void *new)
271 {
272 	KASSERT(br->br_cons_head != br->br_prod_tail,
273 		("Buf-Ring has none in putback")) ;
274 	br->br_ring[br->br_cons_head] = new;
275 }
276 
277 /*
278  * return a pointer to the first entry in the ring
279  * without modifying it, or NULL if the ring is empty
280  * race-prone if not protected by a lock
281  */
282 static __inline void *
283 buf_ring_peek(struct buf_ring *br)
284 {
285 
286 #ifdef DEBUG_BUFRING
287 	if ((br->br_lock != NULL) && !mtx_owned(br->br_lock))
288 		panic("lock not held on single consumer dequeue");
289 #endif
290 	/*
291 	 * I believe it is safe to not have a memory barrier
292 	 * here because we control cons and tail is worst case
293 	 * a lagging indicator so we worst case we might
294 	 * return NULL immediately after a buffer has been enqueued
295 	 */
296 	if (br->br_cons_head == br->br_prod_tail)
297 		return (NULL);
298 
299 	return (br->br_ring[br->br_cons_head]);
300 }
301 
302 static __inline void *
303 buf_ring_peek_clear_sc(struct buf_ring *br)
304 {
305 #ifdef DEBUG_BUFRING
306 	void *ret;
307 
308 	if (!mtx_owned(br->br_lock))
309 		panic("lock not held on single consumer dequeue");
310 #endif
311 	/*
312 	 * I believe it is safe to not have a memory barrier
313 	 * here because we control cons and tail is worst case
314 	 * a lagging indicator so we worst case we might
315 	 * return NULL immediately after a buffer has been enqueued
316 	 */
317 	if (br->br_cons_head == br->br_prod_tail)
318 		return (NULL);
319 
320 #ifdef DEBUG_BUFRING
321 	/*
322 	 * Single consumer, i.e. cons_head will not move while we are
323 	 * running, so atomic_swap_ptr() is not necessary here.
324 	 */
325 	ret = br->br_ring[br->br_cons_head];
326 	br->br_ring[br->br_cons_head] = NULL;
327 	return (ret);
328 #else
329 	return (br->br_ring[br->br_cons_head]);
330 #endif
331 }
332 
333 static __inline int
334 buf_ring_full(struct buf_ring *br)
335 {
336 
337 	return (((br->br_prod_head + 1) & br->br_prod_mask) == br->br_cons_tail);
338 }
339 
340 static __inline int
341 buf_ring_empty(struct buf_ring *br)
342 {
343 
344 	return (br->br_cons_head == br->br_prod_tail);
345 }
346 
347 static __inline int
348 buf_ring_count(struct buf_ring *br)
349 {
350 
351 	return ((br->br_prod_size + br->br_prod_tail - br->br_cons_tail)
352 	    & br->br_prod_mask);
353 }
354 
355 struct buf_ring *buf_ring_alloc(int count, struct malloc_type *type, int flags,
356     struct mtx *);
357 void buf_ring_free(struct buf_ring *br, struct malloc_type *type);
358 
359 
360 
361 #endif
362