1 /* $NetBSD: pktqueue.c,v 1.22 2023/05/28 08:09:34 andvar Exp $ */
2
3 /*-
4 * Copyright (c) 2014 The NetBSD Foundation, Inc.
5 * All rights reserved.
6 *
7 * This code is derived from software contributed to The NetBSD Foundation
8 * by Mindaugas Rasiukevicius.
9 *
10 * Redistribution and use in source and binary forms, with or without
11 * modification, are permitted provided that the following conditions
12 * are met:
13 * 1. Redistributions of source code must retain the above copyright
14 * notice, this list of conditions and the following disclaimer.
15 * 2. Redistributions in binary form must reproduce the above copyright
16 * notice, this list of conditions and the following disclaimer in the
17 * documentation and/or other materials provided with the distribution.
18 *
19 * THIS SOFTWARE IS PROVIDED BY THE NETBSD FOUNDATION, INC. AND CONTRIBUTORS
20 * ``AS IS'' AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED
21 * TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
22 * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE FOUNDATION OR CONTRIBUTORS
23 * BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
24 * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
25 * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
26 * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
27 * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
28 * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
29 * POSSIBILITY OF SUCH DAMAGE.
30 */
31
32 /*
33 * The packet queue (pktqueue) interface is a lockless IP input queue
34 * which also abstracts and handles network ISR scheduling. It provides
35 * a mechanism to enable receiver-side packet steering (RPS).
36 */
37
38 #include <sys/cdefs.h>
39 __KERNEL_RCSID(0, "$NetBSD: pktqueue.c,v 1.22 2023/05/28 08:09:34 andvar Exp $");
40
41 #ifdef _KERNEL_OPT
42 #include "opt_net_mpsafe.h"
43 #endif
44
45 #include <sys/param.h>
46 #include <sys/types.h>
47
48 #include <sys/atomic.h>
49 #include <sys/cpu.h>
50 #include <sys/pcq.h>
51 #include <sys/intr.h>
52 #include <sys/mbuf.h>
53 #include <sys/proc.h>
54 #include <sys/percpu.h>
55 #include <sys/xcall.h>
56 #include <sys/once.h>
57 #include <sys/queue.h>
58 #include <sys/rwlock.h>
59
60 #include <net/pktqueue.h>
61 #include <net/rss_config.h>
62
63 #include <netinet/in.h>
64 #include <netinet/ip.h>
65 #include <netinet/ip6.h>
66
67 struct pktqueue {
68 /*
69 * The lock used for a barrier mechanism. The barrier counter,
70 * as well as the drop counter, are managed atomically though.
71 * Ensure this group is in a separate cache line.
72 */
73 union {
74 struct {
75 kmutex_t pq_lock;
76 volatile u_int pq_barrier;
77 };
78 uint8_t _pad[COHERENCY_UNIT];
79 };
80
81 /* The size of the queue, counters and the interrupt handler. */
82 u_int pq_maxlen;
83 percpu_t * pq_counters;
84 void * pq_sih;
85
86 /* The per-CPU queues. */
87 struct percpu * pq_pcq; /* struct pcq * */
88
89 /* The linkage on the list of all pktqueues. */
90 LIST_ENTRY(pktqueue) pq_list;
91 };
92
93 /* The counters of the packet queue. */
94 #define PQCNT_ENQUEUE 0
95 #define PQCNT_DEQUEUE 1
96 #define PQCNT_DROP 2
97 #define PQCNT_NCOUNTERS 3
98
99 typedef struct {
100 uint64_t count[PQCNT_NCOUNTERS];
101 } pktq_counters_t;
102
103 /* Special marker value used by pktq_barrier() mechanism. */
104 #define PKTQ_MARKER ((void *)(~0ULL))
105
106 /*
107 * This is a list of all pktqueues. This list is used by
108 * pktq_ifdetach() to issue a barrier on every pktqueue.
109 *
110 * The r/w lock is acquired for writing in pktq_create() and
111 * pktq_destroy(), and for reading in pktq_ifdetach().
112 *
113 * This list is not performance critical, and will seldom be
114 * accessed.
115 */
116 static LIST_HEAD(, pktqueue) pktqueue_list __read_mostly;
117 static krwlock_t pktqueue_list_lock __read_mostly;
118 static once_t pktqueue_list_init_once __read_mostly;
119
120 static int
pktqueue_list_init(void)121 pktqueue_list_init(void)
122 {
123 LIST_INIT(&pktqueue_list);
124 rw_init(&pktqueue_list_lock);
125 return 0;
126 }
127
128 static void
pktq_init_cpu(void * vqp,void * vpq,struct cpu_info * ci)129 pktq_init_cpu(void *vqp, void *vpq, struct cpu_info *ci)
130 {
131 struct pcq **qp = vqp;
132 struct pktqueue *pq = vpq;
133
134 *qp = pcq_create(pq->pq_maxlen, KM_SLEEP);
135 }
136
137 static void
pktq_fini_cpu(void * vqp,void * vpq,struct cpu_info * ci)138 pktq_fini_cpu(void *vqp, void *vpq, struct cpu_info *ci)
139 {
140 struct pcq **qp = vqp, *q = *qp;
141
142 KASSERT(pcq_peek(q) == NULL);
143 pcq_destroy(q);
144 *qp = NULL; /* paranoia */
145 }
146
147 static struct pcq *
pktq_pcq(struct pktqueue * pq,struct cpu_info * ci)148 pktq_pcq(struct pktqueue *pq, struct cpu_info *ci)
149 {
150 struct pcq **qp, *q;
151
152 /*
153 * As long as preemption is disabled, the xcall to swap percpu
154 * buffers can't complete, so it is safe to read the pointer.
155 */
156 KASSERT(kpreempt_disabled());
157
158 qp = percpu_getptr_remote(pq->pq_pcq, ci);
159 q = *qp;
160
161 return q;
162 }
163
164 pktqueue_t *
pktq_create(size_t maxlen,void (* intrh)(void *),void * sc)165 pktq_create(size_t maxlen, void (*intrh)(void *), void *sc)
166 {
167 const u_int sflags = SOFTINT_NET | SOFTINT_MPSAFE | SOFTINT_RCPU;
168 pktqueue_t *pq;
169 percpu_t *pc;
170 void *sih;
171
172 RUN_ONCE(&pktqueue_list_init_once, pktqueue_list_init);
173
174 pc = percpu_alloc(sizeof(pktq_counters_t));
175 if ((sih = softint_establish(sflags, intrh, sc)) == NULL) {
176 percpu_free(pc, sizeof(pktq_counters_t));
177 return NULL;
178 }
179
180 pq = kmem_zalloc(sizeof(*pq), KM_SLEEP);
181 mutex_init(&pq->pq_lock, MUTEX_DEFAULT, IPL_NONE);
182 pq->pq_maxlen = maxlen;
183 pq->pq_counters = pc;
184 pq->pq_sih = sih;
185 pq->pq_pcq = percpu_create(sizeof(struct pcq *),
186 pktq_init_cpu, pktq_fini_cpu, pq);
187
188 rw_enter(&pktqueue_list_lock, RW_WRITER);
189 LIST_INSERT_HEAD(&pktqueue_list, pq, pq_list);
190 rw_exit(&pktqueue_list_lock);
191
192 return pq;
193 }
194
195 void
pktq_destroy(pktqueue_t * pq)196 pktq_destroy(pktqueue_t *pq)
197 {
198
199 KASSERT(pktqueue_list_init_once.o_status == ONCE_DONE);
200
201 rw_enter(&pktqueue_list_lock, RW_WRITER);
202 LIST_REMOVE(pq, pq_list);
203 rw_exit(&pktqueue_list_lock);
204
205 percpu_free(pq->pq_pcq, sizeof(struct pcq *));
206 percpu_free(pq->pq_counters, sizeof(pktq_counters_t));
207 softint_disestablish(pq->pq_sih);
208 mutex_destroy(&pq->pq_lock);
209 kmem_free(pq, sizeof(*pq));
210 }
211
212 /*
213 * - pktq_inc_counter: increment the counter given an ID.
214 * - pktq_collect_counts: handler to sum up the counts from each CPU.
215 * - pktq_getcount: return the effective count given an ID.
216 */
217
218 static inline void
pktq_inc_count(pktqueue_t * pq,u_int i)219 pktq_inc_count(pktqueue_t *pq, u_int i)
220 {
221 percpu_t *pc = pq->pq_counters;
222 pktq_counters_t *c;
223
224 c = percpu_getref(pc);
225 c->count[i]++;
226 percpu_putref(pc);
227 }
228
229 static void
pktq_collect_counts(void * mem,void * arg,struct cpu_info * ci)230 pktq_collect_counts(void *mem, void *arg, struct cpu_info *ci)
231 {
232 const pktq_counters_t *c = mem;
233 pktq_counters_t *sum = arg;
234
235 int s = splnet();
236
237 for (u_int i = 0; i < PQCNT_NCOUNTERS; i++) {
238 sum->count[i] += c->count[i];
239 }
240
241 splx(s);
242 }
243
244 static uint64_t
pktq_get_count(pktqueue_t * pq,pktq_count_t c)245 pktq_get_count(pktqueue_t *pq, pktq_count_t c)
246 {
247 pktq_counters_t sum;
248
249 if (c != PKTQ_MAXLEN) {
250 memset(&sum, 0, sizeof(sum));
251 percpu_foreach_xcall(pq->pq_counters,
252 XC_HIGHPRI_IPL(IPL_SOFTNET), pktq_collect_counts, &sum);
253 }
254 switch (c) {
255 case PKTQ_NITEMS:
256 return sum.count[PQCNT_ENQUEUE] - sum.count[PQCNT_DEQUEUE];
257 case PKTQ_DROPS:
258 return sum.count[PQCNT_DROP];
259 case PKTQ_MAXLEN:
260 return pq->pq_maxlen;
261 }
262 return 0;
263 }
264
265 uint32_t
pktq_rps_hash(const pktq_rps_hash_func_t * funcp,const struct mbuf * m)266 pktq_rps_hash(const pktq_rps_hash_func_t *funcp, const struct mbuf *m)
267 {
268 pktq_rps_hash_func_t func = atomic_load_relaxed(funcp);
269
270 KASSERT(func != NULL);
271
272 return (*func)(m);
273 }
274
275 static uint32_t
pktq_rps_hash_zero(const struct mbuf * m __unused)276 pktq_rps_hash_zero(const struct mbuf *m __unused)
277 {
278
279 return 0;
280 }
281
282 static uint32_t
pktq_rps_hash_curcpu(const struct mbuf * m __unused)283 pktq_rps_hash_curcpu(const struct mbuf *m __unused)
284 {
285
286 return cpu_index(curcpu());
287 }
288
289 static uint32_t
pktq_rps_hash_toeplitz(const struct mbuf * m)290 pktq_rps_hash_toeplitz(const struct mbuf *m)
291 {
292 struct ip *ip;
293 /*
294 * Disable UDP port - IP fragments aren't currently being handled
295 * and so we end up with a mix of 2-tuple and 4-tuple
296 * traffic.
297 */
298 const u_int flag = RSS_TOEPLITZ_USE_TCP_PORT;
299
300 /* glance IP version */
301 if ((m->m_flags & M_PKTHDR) == 0)
302 return 0;
303
304 ip = mtod(m, struct ip *);
305 if (ip->ip_v == IPVERSION) {
306 if (__predict_false(m->m_len < sizeof(struct ip)))
307 return 0;
308 return rss_toeplitz_hash_from_mbuf_ipv4(m, flag);
309 } else if (ip->ip_v == 6) {
310 if (__predict_false(m->m_len < sizeof(struct ip6_hdr)))
311 return 0;
312 return rss_toeplitz_hash_from_mbuf_ipv6(m, flag);
313 }
314
315 return 0;
316 }
317
318 /*
319 * toeplitz without curcpu.
320 * Generally, this has better performance than toeplitz.
321 */
322 static uint32_t
pktq_rps_hash_toeplitz_othercpus(const struct mbuf * m)323 pktq_rps_hash_toeplitz_othercpus(const struct mbuf *m)
324 {
325 uint32_t hash;
326
327 if (ncpu == 1)
328 return 0;
329
330 hash = pktq_rps_hash_toeplitz(m);
331 hash %= ncpu - 1;
332 if (hash >= cpu_index(curcpu()))
333 return hash + 1;
334 else
335 return hash;
336 }
337
338 static struct pktq_rps_hash_table {
339 const char* prh_type;
340 pktq_rps_hash_func_t prh_func;
341 } const pktq_rps_hash_tab[] = {
342 { "zero", pktq_rps_hash_zero },
343 { "curcpu", pktq_rps_hash_curcpu },
344 { "toeplitz", pktq_rps_hash_toeplitz },
345 { "toeplitz-othercpus", pktq_rps_hash_toeplitz_othercpus },
346 };
347 const pktq_rps_hash_func_t pktq_rps_hash_default =
348 #ifdef NET_MPSAFE
349 pktq_rps_hash_curcpu;
350 #else
351 pktq_rps_hash_zero;
352 #endif
353
354 static const char *
pktq_get_rps_hash_type(pktq_rps_hash_func_t func)355 pktq_get_rps_hash_type(pktq_rps_hash_func_t func)
356 {
357
358 for (int i = 0; i < __arraycount(pktq_rps_hash_tab); i++) {
359 if (func == pktq_rps_hash_tab[i].prh_func) {
360 return pktq_rps_hash_tab[i].prh_type;
361 }
362 }
363
364 return NULL;
365 }
366
367 static int
pktq_set_rps_hash_type(pktq_rps_hash_func_t * func,const char * type)368 pktq_set_rps_hash_type(pktq_rps_hash_func_t *func, const char *type)
369 {
370
371 if (strcmp(type, pktq_get_rps_hash_type(*func)) == 0)
372 return 0;
373
374 for (int i = 0; i < __arraycount(pktq_rps_hash_tab); i++) {
375 if (strcmp(type, pktq_rps_hash_tab[i].prh_type) == 0) {
376 atomic_store_relaxed(func, pktq_rps_hash_tab[i].prh_func);
377 return 0;
378 }
379 }
380
381 return ENOENT;
382 }
383
384 int
sysctl_pktq_rps_hash_handler(SYSCTLFN_ARGS)385 sysctl_pktq_rps_hash_handler(SYSCTLFN_ARGS)
386 {
387 struct sysctlnode node;
388 pktq_rps_hash_func_t *func;
389 int error;
390 char type[PKTQ_RPS_HASH_NAME_LEN];
391
392 node = *rnode;
393 func = node.sysctl_data;
394
395 strlcpy(type, pktq_get_rps_hash_type(*func), PKTQ_RPS_HASH_NAME_LEN);
396
397 node.sysctl_data = &type;
398 node.sysctl_size = sizeof(type);
399 error = sysctl_lookup(SYSCTLFN_CALL(&node));
400 if (error || newp == NULL)
401 return error;
402
403 error = pktq_set_rps_hash_type(func, type);
404
405 return error;
406 }
407
408 /*
409 * pktq_enqueue: inject the packet into the end of the queue.
410 *
411 * => Must be called from the interrupt or with the preemption disabled.
412 * => Consumes the packet and returns true on success.
413 * => Returns false on failure; caller is responsible to free the packet.
414 */
415 bool
pktq_enqueue(pktqueue_t * pq,struct mbuf * m,const u_int hash __unused)416 pktq_enqueue(pktqueue_t *pq, struct mbuf *m, const u_int hash __unused)
417 {
418 #if defined(_RUMPKERNEL) || defined(_RUMP_NATIVE_ABI)
419 struct cpu_info *ci = curcpu();
420 #else
421 struct cpu_info *ci = cpu_lookup(hash % ncpu);
422 #endif
423
424 KASSERT(kpreempt_disabled());
425
426 if (__predict_false(!pcq_put(pktq_pcq(pq, ci), m))) {
427 pktq_inc_count(pq, PQCNT_DROP);
428 return false;
429 }
430 softint_schedule_cpu(pq->pq_sih, ci);
431 pktq_inc_count(pq, PQCNT_ENQUEUE);
432 return true;
433 }
434
435 /*
436 * pktq_dequeue: take a packet from the queue.
437 *
438 * => Must be called with preemption disabled.
439 * => Must ensure there are not concurrent dequeue calls.
440 */
441 struct mbuf *
pktq_dequeue(pktqueue_t * pq)442 pktq_dequeue(pktqueue_t *pq)
443 {
444 struct cpu_info *ci = curcpu();
445 struct mbuf *m;
446
447 KASSERT(kpreempt_disabled());
448
449 m = pcq_get(pktq_pcq(pq, ci));
450 if (__predict_false(m == PKTQ_MARKER)) {
451 /* Note the marker entry. */
452 atomic_inc_uint(&pq->pq_barrier);
453
454 /* Get the next queue entry. */
455 m = pcq_get(pktq_pcq(pq, ci));
456
457 /*
458 * There can only be one barrier operation pending
459 * on a pktqueue at any given time, so we can assert
460 * that the next item is not a marker.
461 */
462 KASSERT(m != PKTQ_MARKER);
463 }
464 if (__predict_true(m != NULL)) {
465 pktq_inc_count(pq, PQCNT_DEQUEUE);
466 }
467 return m;
468 }
469
470 /*
471 * pktq_barrier: waits for a grace period when all packets enqueued at
472 * the moment of calling this routine will be processed. This is used
473 * to ensure that e.g. packets referencing some interface were drained.
474 */
475 void
pktq_barrier(pktqueue_t * pq)476 pktq_barrier(pktqueue_t *pq)
477 {
478 CPU_INFO_ITERATOR cii;
479 struct cpu_info *ci;
480 u_int pending = 0;
481
482 mutex_enter(&pq->pq_lock);
483 KASSERT(pq->pq_barrier == 0);
484
485 for (CPU_INFO_FOREACH(cii, ci)) {
486 struct pcq *q;
487
488 kpreempt_disable();
489 q = pktq_pcq(pq, ci);
490 kpreempt_enable();
491
492 /* If the queue is empty - nothing to do. */
493 if (pcq_peek(q) == NULL) {
494 continue;
495 }
496 /* Otherwise, put the marker and entry. */
497 while (!pcq_put(q, PKTQ_MARKER)) {
498 kpause("pktqsync", false, 1, NULL);
499 }
500 kpreempt_disable();
501 softint_schedule_cpu(pq->pq_sih, ci);
502 kpreempt_enable();
503 pending++;
504 }
505
506 /* Wait for each queue to process the markers. */
507 while (pq->pq_barrier != pending) {
508 kpause("pktqsync", false, 1, NULL);
509 }
510 pq->pq_barrier = 0;
511 mutex_exit(&pq->pq_lock);
512 }
513
514 /*
515 * pktq_ifdetach: issue a barrier on all pktqueues when a network
516 * interface is detached.
517 */
518 void
pktq_ifdetach(void)519 pktq_ifdetach(void)
520 {
521 pktqueue_t *pq;
522
523 /* Just in case no pktqueues have been created yet... */
524 RUN_ONCE(&pktqueue_list_init_once, pktqueue_list_init);
525
526 rw_enter(&pktqueue_list_lock, RW_READER);
527 LIST_FOREACH(pq, &pktqueue_list, pq_list) {
528 pktq_barrier(pq);
529 }
530 rw_exit(&pktqueue_list_lock);
531 }
532
533 /*
534 * pktq_flush: free mbufs in all queues.
535 *
536 * => The caller must ensure there are no concurrent writers or flush calls.
537 */
538 void
pktq_flush(pktqueue_t * pq)539 pktq_flush(pktqueue_t *pq)
540 {
541 CPU_INFO_ITERATOR cii;
542 struct cpu_info *ci;
543 struct mbuf *m, *m0 = NULL;
544
545 ASSERT_SLEEPABLE();
546
547 /*
548 * Run a dummy softint at IPL_SOFTNET on all CPUs to ensure that any
549 * already running handler for this pktqueue is no longer running.
550 */
551 xc_barrier(XC_HIGHPRI_IPL(IPL_SOFTNET));
552
553 /*
554 * Acquire the barrier lock. While the caller ensures that
555 * no explicit pktq_barrier() calls will be issued, this holds
556 * off any implicit pktq_barrier() calls that would happen
557 * as the result of pktq_ifdetach().
558 */
559 mutex_enter(&pq->pq_lock);
560
561 for (CPU_INFO_FOREACH(cii, ci)) {
562 struct pcq *q;
563
564 kpreempt_disable();
565 q = pktq_pcq(pq, ci);
566 kpreempt_enable();
567
568 /*
569 * Pull the packets off the pcq and chain them into
570 * a list to be freed later.
571 */
572 while ((m = pcq_get(q)) != NULL) {
573 pktq_inc_count(pq, PQCNT_DEQUEUE);
574 m->m_nextpkt = m0;
575 m0 = m;
576 }
577 }
578
579 mutex_exit(&pq->pq_lock);
580
581 /* Free the packets now that the critical section is over. */
582 while ((m = m0) != NULL) {
583 m0 = m->m_nextpkt;
584 m_freem(m);
585 }
586 }
587
588 static void
pktq_set_maxlen_cpu(void * vpq,void * vqs)589 pktq_set_maxlen_cpu(void *vpq, void *vqs)
590 {
591 struct pktqueue *pq = vpq;
592 struct pcq **qp, *q, **qs = vqs;
593 unsigned i = cpu_index(curcpu());
594 int s;
595
596 s = splnet();
597 qp = percpu_getref(pq->pq_pcq);
598 q = *qp;
599 *qp = qs[i];
600 qs[i] = q;
601 percpu_putref(pq->pq_pcq);
602 splx(s);
603 }
604
605 /*
606 * pktq_set_maxlen: create per-CPU queues using a new size and replace
607 * the existing queues without losing any packets.
608 *
609 * XXX ncpu must remain stable throughout.
610 */
611 int
pktq_set_maxlen(pktqueue_t * pq,size_t maxlen)612 pktq_set_maxlen(pktqueue_t *pq, size_t maxlen)
613 {
614 const u_int slotbytes = ncpu * sizeof(pcq_t *);
615 pcq_t **qs;
616
617 if (!maxlen || maxlen > PCQ_MAXLEN)
618 return EINVAL;
619 if (pq->pq_maxlen == maxlen)
620 return 0;
621
622 /* First, allocate the new queues. */
623 qs = kmem_zalloc(slotbytes, KM_SLEEP);
624 for (u_int i = 0; i < ncpu; i++) {
625 qs[i] = pcq_create(maxlen, KM_SLEEP);
626 }
627
628 /*
629 * Issue an xcall to replace the queue pointers on each CPU.
630 * This implies all the necessary memory barriers.
631 */
632 mutex_enter(&pq->pq_lock);
633 xc_wait(xc_broadcast(XC_HIGHPRI, pktq_set_maxlen_cpu, pq, qs));
634 pq->pq_maxlen = maxlen;
635 mutex_exit(&pq->pq_lock);
636
637 /*
638 * At this point, the new packets are flowing into the new
639 * queues. However, the old queues may have some packets
640 * present which are no longer being processed. We are going
641 * to re-enqueue them. This may change the order of packet
642 * arrival, but it is not considered an issue.
643 *
644 * There may be in-flight interrupts calling pktq_dequeue()
645 * which reference the old queues. Issue a barrier to ensure
646 * that we are going to be the only pcq_get() callers on the
647 * old queues.
648 */
649 pktq_barrier(pq);
650
651 for (u_int i = 0; i < ncpu; i++) {
652 struct pcq *q;
653 struct mbuf *m;
654
655 kpreempt_disable();
656 q = pktq_pcq(pq, cpu_lookup(i));
657 kpreempt_enable();
658
659 while ((m = pcq_get(qs[i])) != NULL) {
660 while (!pcq_put(q, m)) {
661 kpause("pktqrenq", false, 1, NULL);
662 }
663 }
664 pcq_destroy(qs[i]);
665 }
666
667 /* Well, that was fun. */
668 kmem_free(qs, slotbytes);
669 return 0;
670 }
671
672 static int
sysctl_pktq_maxlen(SYSCTLFN_ARGS)673 sysctl_pktq_maxlen(SYSCTLFN_ARGS)
674 {
675 struct sysctlnode node = *rnode;
676 pktqueue_t * const pq = node.sysctl_data;
677 u_int nmaxlen = pktq_get_count(pq, PKTQ_MAXLEN);
678 int error;
679
680 node.sysctl_data = &nmaxlen;
681 error = sysctl_lookup(SYSCTLFN_CALL(&node));
682 if (error || newp == NULL)
683 return error;
684 return pktq_set_maxlen(pq, nmaxlen);
685 }
686
687 static int
sysctl_pktq_count(SYSCTLFN_ARGS,u_int count_id)688 sysctl_pktq_count(SYSCTLFN_ARGS, u_int count_id)
689 {
690 struct sysctlnode node = *rnode;
691 pktqueue_t * const pq = node.sysctl_data;
692 uint64_t count = pktq_get_count(pq, count_id);
693
694 node.sysctl_data = &count;
695 return sysctl_lookup(SYSCTLFN_CALL(&node));
696 }
697
698 static int
sysctl_pktq_nitems(SYSCTLFN_ARGS)699 sysctl_pktq_nitems(SYSCTLFN_ARGS)
700 {
701 return sysctl_pktq_count(SYSCTLFN_CALL(rnode), PKTQ_NITEMS);
702 }
703
704 static int
sysctl_pktq_drops(SYSCTLFN_ARGS)705 sysctl_pktq_drops(SYSCTLFN_ARGS)
706 {
707 return sysctl_pktq_count(SYSCTLFN_CALL(rnode), PKTQ_DROPS);
708 }
709
710 /*
711 * pktqueue_sysctl_setup: set up the sysctl nodes for a pktqueue
712 * using standardized names at the specified parent node and
713 * node ID (or CTL_CREATE).
714 */
715 void
pktq_sysctl_setup(pktqueue_t * const pq,struct sysctllog ** const clog,const struct sysctlnode * const parent_node,const int qid)716 pktq_sysctl_setup(pktqueue_t * const pq, struct sysctllog ** const clog,
717 const struct sysctlnode * const parent_node, const int qid)
718 {
719 const struct sysctlnode *rnode = parent_node, *cnode;
720
721 KASSERT(pq != NULL);
722 KASSERT(parent_node != NULL);
723 KASSERT(qid == CTL_CREATE || qid >= 0);
724
725 /* Create the "ifq" node below the parent node. */
726 sysctl_createv(clog, 0, &rnode, &cnode,
727 CTLFLAG_PERMANENT,
728 CTLTYPE_NODE, "ifq",
729 SYSCTL_DESCR("Protocol input queue controls"),
730 NULL, 0, NULL, 0,
731 qid, CTL_EOL);
732
733 /* Now create the standard child nodes below "ifq". */
734 rnode = cnode;
735
736 sysctl_createv(clog, 0, &rnode, &cnode,
737 CTLFLAG_PERMANENT,
738 CTLTYPE_QUAD, "len",
739 SYSCTL_DESCR("Current input queue length"),
740 sysctl_pktq_nitems, 0, (void *)pq, 0,
741 IFQCTL_LEN, CTL_EOL);
742 sysctl_createv(clog, 0, &rnode, &cnode,
743 CTLFLAG_PERMANENT | CTLFLAG_READWRITE,
744 CTLTYPE_INT, "maxlen",
745 SYSCTL_DESCR("Maximum allowed input queue length"),
746 sysctl_pktq_maxlen, 0, (void *)pq, 0,
747 IFQCTL_MAXLEN, CTL_EOL);
748 sysctl_createv(clog, 0, &rnode, &cnode,
749 CTLFLAG_PERMANENT,
750 CTLTYPE_QUAD, "drops",
751 SYSCTL_DESCR("Packets dropped due to full input queue"),
752 sysctl_pktq_drops, 0, (void *)pq, 0,
753 IFQCTL_DROPS, CTL_EOL);
754 }
755