xref: /freebsd/sys/net/netisr.c (revision a0ee8cc6)
1 /*-
2  * Copyright (c) 2007-2009 Robert N. M. Watson
3  * Copyright (c) 2010-2011 Juniper Networks, Inc.
4  * All rights reserved.
5  *
6  * This software was developed by Robert N. M. Watson under contract
7  * to Juniper Networks, Inc.
8  *
9  * Redistribution and use in source and binary forms, with or without
10  * modification, are permitted provided that the following conditions
11  * are met:
12  * 1. Redistributions of source code must retain the above copyright
13  *    notice, this list of conditions and the following disclaimer.
14  * 2. Redistributions in binary form must reproduce the above copyright
15  *    notice, this list of conditions and the following disclaimer in the
16  *    documentation and/or other materials provided with the distribution.
17  *
18  * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND
19  * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
20  * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
21  * ARE DISCLAIMED.  IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE
22  * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
23  * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
24  * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
25  * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
26  * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
27  * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
28  * SUCH DAMAGE.
29  */
30 
31 #include <sys/cdefs.h>
32 __FBSDID("$FreeBSD$");
33 
34 /*
35  * netisr is a packet dispatch service, allowing synchronous (directly
36  * dispatched) and asynchronous (deferred dispatch) processing of packets by
37  * registered protocol handlers.  Callers pass a protocol identifier and
38  * packet to netisr, along with a direct dispatch hint, and work will either
39  * be immediately processed by the registered handler, or passed to a
40  * software interrupt (SWI) thread for deferred dispatch.  Callers will
41  * generally select one or the other based on:
42  *
43  * - Whether directly dispatching a netisr handler lead to code reentrance or
44  *   lock recursion, such as entering the socket code from the socket code.
45  * - Whether directly dispatching a netisr handler lead to recursive
46  *   processing, such as when decapsulating several wrapped layers of tunnel
47  *   information (IPSEC within IPSEC within ...).
48  *
49  * Maintaining ordering for protocol streams is a critical design concern.
50  * Enforcing ordering limits the opportunity for concurrency, but maintains
51  * the strong ordering requirements found in some protocols, such as TCP.  Of
52  * related concern is CPU affinity--it is desirable to process all data
53  * associated with a particular stream on the same CPU over time in order to
54  * avoid acquiring locks associated with the connection on different CPUs,
55  * keep connection data in one cache, and to generally encourage associated
56  * user threads to live on the same CPU as the stream.  It's also desirable
57  * to avoid lock migration and contention where locks are associated with
58  * more than one flow.
59  *
60  * netisr supports several policy variations, represented by the
61  * NETISR_POLICY_* constants, allowing protocols to play various roles in
62  * identifying flows, assigning work to CPUs, etc.  These are described in
63  * netisr.h.
64  */
65 
66 #include "opt_ddb.h"
67 #include "opt_device_polling.h"
68 
69 #include <sys/param.h>
70 #include <sys/bus.h>
71 #include <sys/kernel.h>
72 #include <sys/kthread.h>
73 #include <sys/interrupt.h>
74 #include <sys/lock.h>
75 #include <sys/mbuf.h>
76 #include <sys/mutex.h>
77 #include <sys/pcpu.h>
78 #include <sys/proc.h>
79 #include <sys/rmlock.h>
80 #include <sys/sched.h>
81 #include <sys/smp.h>
82 #include <sys/socket.h>
83 #include <sys/sysctl.h>
84 #include <sys/systm.h>
85 
86 #ifdef DDB
87 #include <ddb/ddb.h>
88 #endif
89 
90 #define	_WANT_NETISR_INTERNAL	/* Enable definitions from netisr_internal.h */
91 #include <net/if.h>
92 #include <net/if_var.h>
93 #include <net/netisr.h>
94 #include <net/netisr_internal.h>
95 #include <net/vnet.h>
96 
97 /*-
98  * Synchronize use and modification of the registered netisr data structures;
99  * acquire a read lock while modifying the set of registered protocols to
100  * prevent partially registered or unregistered protocols from being run.
101  *
102  * The following data structures and fields are protected by this lock:
103  *
104  * - The netisr_proto array, including all fields of struct netisr_proto.
105  * - The nws array, including all fields of struct netisr_worker.
106  * - The nws_array array.
107  *
108  * Note: the NETISR_LOCKING define controls whether read locks are acquired
109  * in packet processing paths requiring netisr registration stability.  This
110  * is disabled by default as it can lead to measurable performance
111  * degradation even with rmlocks (3%-6% for loopback ping-pong traffic), and
112  * because netisr registration and unregistration is extremely rare at
113  * runtime.  If it becomes more common, this decision should be revisited.
114  *
115  * XXXRW: rmlocks don't support assertions.
116  */
117 static struct rmlock	netisr_rmlock;
118 #define	NETISR_LOCK_INIT()	rm_init_flags(&netisr_rmlock, "netisr", \
119 				    RM_NOWITNESS)
120 #define	NETISR_LOCK_ASSERT()
121 #define	NETISR_RLOCK(tracker)	rm_rlock(&netisr_rmlock, (tracker))
122 #define	NETISR_RUNLOCK(tracker)	rm_runlock(&netisr_rmlock, (tracker))
123 #define	NETISR_WLOCK()		rm_wlock(&netisr_rmlock)
124 #define	NETISR_WUNLOCK()	rm_wunlock(&netisr_rmlock)
125 /* #define	NETISR_LOCKING */
126 
127 static SYSCTL_NODE(_net, OID_AUTO, isr, CTLFLAG_RW, 0, "netisr");
128 
129 /*-
130  * Three global direct dispatch policies are supported:
131  *
132  * NETISR_DISPATCH_DEFERRED: All work is deferred for a netisr, regardless of
133  * context (may be overriden by protocols).
134  *
135  * NETISR_DISPATCH_HYBRID: If the executing context allows direct dispatch,
136  * and we're running on the CPU the work would be performed on, then direct
137  * dispatch it if it wouldn't violate ordering constraints on the workstream.
138  *
139  * NETISR_DISPATCH_DIRECT: If the executing context allows direct dispatch,
140  * always direct dispatch.  (The default.)
141  *
142  * Notice that changing the global policy could lead to short periods of
143  * misordered processing, but this is considered acceptable as compared to
144  * the complexity of enforcing ordering during policy changes.  Protocols can
145  * override the global policy (when they're not doing that, they select
146  * NETISR_DISPATCH_DEFAULT).
147  */
148 #define	NETISR_DISPATCH_POLICY_DEFAULT	NETISR_DISPATCH_DIRECT
149 #define	NETISR_DISPATCH_POLICY_MAXSTR	20 /* Used for temporary buffers. */
150 static u_int	netisr_dispatch_policy = NETISR_DISPATCH_POLICY_DEFAULT;
151 static int	sysctl_netisr_dispatch_policy(SYSCTL_HANDLER_ARGS);
152 SYSCTL_PROC(_net_isr, OID_AUTO, dispatch, CTLTYPE_STRING | CTLFLAG_RWTUN,
153     0, 0, sysctl_netisr_dispatch_policy, "A",
154     "netisr dispatch policy");
155 
156 /*
157  * Allow the administrator to limit the number of threads (CPUs) to use for
158  * netisr.  We don't check netisr_maxthreads before creating the thread for
159  * CPU 0. This must be set at boot. We will create at most one thread per CPU.
160  * By default we initialize this to 1 which would assign just 1 cpu (cpu0) and
161  * therefore only 1 workstream. If set to -1, netisr would use all cpus
162  * (mp_ncpus) and therefore would have those many workstreams. One workstream
163  * per thread (CPU).
164  */
165 static int	netisr_maxthreads = 1;		/* Max number of threads. */
166 SYSCTL_INT(_net_isr, OID_AUTO, maxthreads, CTLFLAG_RDTUN,
167     &netisr_maxthreads, 0,
168     "Use at most this many CPUs for netisr processing");
169 
170 static int	netisr_bindthreads = 0;		/* Bind threads to CPUs. */
171 SYSCTL_INT(_net_isr, OID_AUTO, bindthreads, CTLFLAG_RDTUN,
172     &netisr_bindthreads, 0, "Bind netisr threads to CPUs.");
173 
174 /*
175  * Limit per-workstream mbuf queue limits s to at most net.isr.maxqlimit,
176  * both for initial configuration and later modification using
177  * netisr_setqlimit().
178  */
179 #define	NETISR_DEFAULT_MAXQLIMIT	10240
180 static u_int	netisr_maxqlimit = NETISR_DEFAULT_MAXQLIMIT;
181 SYSCTL_UINT(_net_isr, OID_AUTO, maxqlimit, CTLFLAG_RDTUN,
182     &netisr_maxqlimit, 0,
183     "Maximum netisr per-protocol, per-CPU queue depth.");
184 
185 /*
186  * The default per-workstream mbuf queue limit for protocols that don't
187  * initialize the nh_qlimit field of their struct netisr_handler.  If this is
188  * set above netisr_maxqlimit, we truncate it to the maximum during boot.
189  */
190 #define	NETISR_DEFAULT_DEFAULTQLIMIT	256
191 static u_int	netisr_defaultqlimit = NETISR_DEFAULT_DEFAULTQLIMIT;
192 SYSCTL_UINT(_net_isr, OID_AUTO, defaultqlimit, CTLFLAG_RDTUN,
193     &netisr_defaultqlimit, 0,
194     "Default netisr per-protocol, per-CPU queue limit if not set by protocol");
195 
196 /*
197  * Store and export the compile-time constant NETISR_MAXPROT limit on the
198  * number of protocols that can register with netisr at a time.  This is
199  * required for crashdump analysis, as it sizes netisr_proto[].
200  */
201 static u_int	netisr_maxprot = NETISR_MAXPROT;
202 SYSCTL_UINT(_net_isr, OID_AUTO, maxprot, CTLFLAG_RD,
203     &netisr_maxprot, 0,
204     "Compile-time limit on the number of protocols supported by netisr.");
205 
206 /*
207  * The netisr_proto array describes all registered protocols, indexed by
208  * protocol number.  See netisr_internal.h for more details.
209  */
210 static struct netisr_proto	netisr_proto[NETISR_MAXPROT];
211 
212 /*
213  * Per-CPU workstream data.  See netisr_internal.h for more details.
214  */
215 DPCPU_DEFINE(struct netisr_workstream, nws);
216 
217 /*
218  * Map contiguous values between 0 and nws_count into CPU IDs appropriate for
219  * accessing workstreams.  This allows constructions of the form
220  * DPCPU_ID_GET(nws_array[arbitraryvalue % nws_count], nws).
221  */
222 static u_int				 nws_array[MAXCPU];
223 
224 /*
225  * Number of registered workstreams.  Will be at most the number of running
226  * CPUs once fully started.
227  */
228 static u_int				 nws_count;
229 SYSCTL_UINT(_net_isr, OID_AUTO, numthreads, CTLFLAG_RD,
230     &nws_count, 0, "Number of extant netisr threads.");
231 
232 /*
233  * Synchronization for each workstream: a mutex protects all mutable fields
234  * in each stream, including per-protocol state (mbuf queues).  The SWI is
235  * woken up if asynchronous dispatch is required.
236  */
237 #define	NWS_LOCK(s)		mtx_lock(&(s)->nws_mtx)
238 #define	NWS_LOCK_ASSERT(s)	mtx_assert(&(s)->nws_mtx, MA_OWNED)
239 #define	NWS_UNLOCK(s)		mtx_unlock(&(s)->nws_mtx)
240 #define	NWS_SIGNAL(s)		swi_sched((s)->nws_swi_cookie, 0)
241 
242 /*
243  * Utility routines for protocols that implement their own mapping of flows
244  * to CPUs.
245  */
246 u_int
247 netisr_get_cpucount(void)
248 {
249 
250 	return (nws_count);
251 }
252 
253 u_int
254 netisr_get_cpuid(u_int cpunumber)
255 {
256 
257 	KASSERT(cpunumber < nws_count, ("%s: %u > %u", __func__, cpunumber,
258 	    nws_count));
259 
260 	return (nws_array[cpunumber]);
261 }
262 
263 /*
264  * The default implementation of flow -> CPU ID mapping.
265  *
266  * Non-static so that protocols can use it to map their own work to specific
267  * CPUs in a manner consistent to netisr for affinity purposes.
268  */
269 u_int
270 netisr_default_flow2cpu(u_int flowid)
271 {
272 
273 	return (nws_array[flowid % nws_count]);
274 }
275 
276 /*
277  * Dispatch tunable and sysctl configuration.
278  */
279 struct netisr_dispatch_table_entry {
280 	u_int		 ndte_policy;
281 	const char	*ndte_policy_str;
282 };
283 static const struct netisr_dispatch_table_entry netisr_dispatch_table[] = {
284 	{ NETISR_DISPATCH_DEFAULT, "default" },
285 	{ NETISR_DISPATCH_DEFERRED, "deferred" },
286 	{ NETISR_DISPATCH_HYBRID, "hybrid" },
287 	{ NETISR_DISPATCH_DIRECT, "direct" },
288 };
289 static const u_int netisr_dispatch_table_len =
290     (sizeof(netisr_dispatch_table) / sizeof(netisr_dispatch_table[0]));
291 
292 static void
293 netisr_dispatch_policy_to_str(u_int dispatch_policy, char *buffer,
294     u_int buflen)
295 {
296 	const struct netisr_dispatch_table_entry *ndtep;
297 	const char *str;
298 	u_int i;
299 
300 	str = "unknown";
301 	for (i = 0; i < netisr_dispatch_table_len; i++) {
302 		ndtep = &netisr_dispatch_table[i];
303 		if (ndtep->ndte_policy == dispatch_policy) {
304 			str = ndtep->ndte_policy_str;
305 			break;
306 		}
307 	}
308 	snprintf(buffer, buflen, "%s", str);
309 }
310 
311 static int
312 netisr_dispatch_policy_from_str(const char *str, u_int *dispatch_policyp)
313 {
314 	const struct netisr_dispatch_table_entry *ndtep;
315 	u_int i;
316 
317 	for (i = 0; i < netisr_dispatch_table_len; i++) {
318 		ndtep = &netisr_dispatch_table[i];
319 		if (strcmp(ndtep->ndte_policy_str, str) == 0) {
320 			*dispatch_policyp = ndtep->ndte_policy;
321 			return (0);
322 		}
323 	}
324 	return (EINVAL);
325 }
326 
327 static int
328 sysctl_netisr_dispatch_policy(SYSCTL_HANDLER_ARGS)
329 {
330 	char tmp[NETISR_DISPATCH_POLICY_MAXSTR];
331 	u_int dispatch_policy;
332 	int error;
333 
334 	netisr_dispatch_policy_to_str(netisr_dispatch_policy, tmp,
335 	    sizeof(tmp));
336 	error = sysctl_handle_string(oidp, tmp, sizeof(tmp), req);
337 	if (error == 0 && req->newptr != NULL) {
338 		error = netisr_dispatch_policy_from_str(tmp,
339 		    &dispatch_policy);
340 		if (error == 0 && dispatch_policy == NETISR_DISPATCH_DEFAULT)
341 			error = EINVAL;
342 		if (error == 0)
343 			netisr_dispatch_policy = dispatch_policy;
344 	}
345 	return (error);
346 }
347 
348 /*
349  * Register a new netisr handler, which requires initializing per-protocol
350  * fields for each workstream.  All netisr work is briefly suspended while
351  * the protocol is installed.
352  */
353 void
354 netisr_register(const struct netisr_handler *nhp)
355 {
356 	struct netisr_work *npwp;
357 	const char *name;
358 	u_int i, proto;
359 
360 	proto = nhp->nh_proto;
361 	name = nhp->nh_name;
362 
363 	/*
364 	 * Test that the requested registration is valid.
365 	 */
366 	KASSERT(nhp->nh_name != NULL,
367 	    ("%s: nh_name NULL for %u", __func__, proto));
368 	KASSERT(nhp->nh_handler != NULL,
369 	    ("%s: nh_handler NULL for %s", __func__, name));
370 	KASSERT(nhp->nh_policy == NETISR_POLICY_SOURCE ||
371 	    nhp->nh_policy == NETISR_POLICY_FLOW ||
372 	    nhp->nh_policy == NETISR_POLICY_CPU,
373 	    ("%s: unsupported nh_policy %u for %s", __func__,
374 	    nhp->nh_policy, name));
375 	KASSERT(nhp->nh_policy == NETISR_POLICY_FLOW ||
376 	    nhp->nh_m2flow == NULL,
377 	    ("%s: nh_policy != FLOW but m2flow defined for %s", __func__,
378 	    name));
379 	KASSERT(nhp->nh_policy == NETISR_POLICY_CPU || nhp->nh_m2cpuid == NULL,
380 	    ("%s: nh_policy != CPU but m2cpuid defined for %s", __func__,
381 	    name));
382 	KASSERT(nhp->nh_policy != NETISR_POLICY_CPU || nhp->nh_m2cpuid != NULL,
383 	    ("%s: nh_policy == CPU but m2cpuid not defined for %s", __func__,
384 	    name));
385 	KASSERT(nhp->nh_dispatch == NETISR_DISPATCH_DEFAULT ||
386 	    nhp->nh_dispatch == NETISR_DISPATCH_DEFERRED ||
387 	    nhp->nh_dispatch == NETISR_DISPATCH_HYBRID ||
388 	    nhp->nh_dispatch == NETISR_DISPATCH_DIRECT,
389 	    ("%s: invalid nh_dispatch (%u)", __func__, nhp->nh_dispatch));
390 
391 	KASSERT(proto < NETISR_MAXPROT,
392 	    ("%s(%u, %s): protocol too big", __func__, proto, name));
393 
394 	/*
395 	 * Test that no existing registration exists for this protocol.
396 	 */
397 	NETISR_WLOCK();
398 	KASSERT(netisr_proto[proto].np_name == NULL,
399 	    ("%s(%u, %s): name present", __func__, proto, name));
400 	KASSERT(netisr_proto[proto].np_handler == NULL,
401 	    ("%s(%u, %s): handler present", __func__, proto, name));
402 
403 	netisr_proto[proto].np_name = name;
404 	netisr_proto[proto].np_handler = nhp->nh_handler;
405 	netisr_proto[proto].np_m2flow = nhp->nh_m2flow;
406 	netisr_proto[proto].np_m2cpuid = nhp->nh_m2cpuid;
407 	netisr_proto[proto].np_drainedcpu = nhp->nh_drainedcpu;
408 	if (nhp->nh_qlimit == 0)
409 		netisr_proto[proto].np_qlimit = netisr_defaultqlimit;
410 	else if (nhp->nh_qlimit > netisr_maxqlimit) {
411 		printf("%s: %s requested queue limit %u capped to "
412 		    "net.isr.maxqlimit %u\n", __func__, name, nhp->nh_qlimit,
413 		    netisr_maxqlimit);
414 		netisr_proto[proto].np_qlimit = netisr_maxqlimit;
415 	} else
416 		netisr_proto[proto].np_qlimit = nhp->nh_qlimit;
417 	netisr_proto[proto].np_policy = nhp->nh_policy;
418 	netisr_proto[proto].np_dispatch = nhp->nh_dispatch;
419 	CPU_FOREACH(i) {
420 		npwp = &(DPCPU_ID_PTR(i, nws))->nws_work[proto];
421 		bzero(npwp, sizeof(*npwp));
422 		npwp->nw_qlimit = netisr_proto[proto].np_qlimit;
423 	}
424 	NETISR_WUNLOCK();
425 }
426 
427 /*
428  * Clear drop counters across all workstreams for a protocol.
429  */
430 void
431 netisr_clearqdrops(const struct netisr_handler *nhp)
432 {
433 	struct netisr_work *npwp;
434 #ifdef INVARIANTS
435 	const char *name;
436 #endif
437 	u_int i, proto;
438 
439 	proto = nhp->nh_proto;
440 #ifdef INVARIANTS
441 	name = nhp->nh_name;
442 #endif
443 	KASSERT(proto < NETISR_MAXPROT,
444 	    ("%s(%u): protocol too big for %s", __func__, proto, name));
445 
446 	NETISR_WLOCK();
447 	KASSERT(netisr_proto[proto].np_handler != NULL,
448 	    ("%s(%u): protocol not registered for %s", __func__, proto,
449 	    name));
450 
451 	CPU_FOREACH(i) {
452 		npwp = &(DPCPU_ID_PTR(i, nws))->nws_work[proto];
453 		npwp->nw_qdrops = 0;
454 	}
455 	NETISR_WUNLOCK();
456 }
457 
458 /*
459  * Query current drop counters across all workstreams for a protocol.
460  */
461 void
462 netisr_getqdrops(const struct netisr_handler *nhp, u_int64_t *qdropp)
463 {
464 	struct netisr_work *npwp;
465 	struct rm_priotracker tracker;
466 #ifdef INVARIANTS
467 	const char *name;
468 #endif
469 	u_int i, proto;
470 
471 	*qdropp = 0;
472 	proto = nhp->nh_proto;
473 #ifdef INVARIANTS
474 	name = nhp->nh_name;
475 #endif
476 	KASSERT(proto < NETISR_MAXPROT,
477 	    ("%s(%u): protocol too big for %s", __func__, proto, name));
478 
479 	NETISR_RLOCK(&tracker);
480 	KASSERT(netisr_proto[proto].np_handler != NULL,
481 	    ("%s(%u): protocol not registered for %s", __func__, proto,
482 	    name));
483 
484 	CPU_FOREACH(i) {
485 		npwp = &(DPCPU_ID_PTR(i, nws))->nws_work[proto];
486 		*qdropp += npwp->nw_qdrops;
487 	}
488 	NETISR_RUNLOCK(&tracker);
489 }
490 
491 /*
492  * Query current per-workstream queue limit for a protocol.
493  */
494 void
495 netisr_getqlimit(const struct netisr_handler *nhp, u_int *qlimitp)
496 {
497 	struct rm_priotracker tracker;
498 #ifdef INVARIANTS
499 	const char *name;
500 #endif
501 	u_int proto;
502 
503 	proto = nhp->nh_proto;
504 #ifdef INVARIANTS
505 	name = nhp->nh_name;
506 #endif
507 	KASSERT(proto < NETISR_MAXPROT,
508 	    ("%s(%u): protocol too big for %s", __func__, proto, name));
509 
510 	NETISR_RLOCK(&tracker);
511 	KASSERT(netisr_proto[proto].np_handler != NULL,
512 	    ("%s(%u): protocol not registered for %s", __func__, proto,
513 	    name));
514 	*qlimitp = netisr_proto[proto].np_qlimit;
515 	NETISR_RUNLOCK(&tracker);
516 }
517 
518 /*
519  * Update the queue limit across per-workstream queues for a protocol.  We
520  * simply change the limits, and don't drain overflowed packets as they will
521  * (hopefully) take care of themselves shortly.
522  */
523 int
524 netisr_setqlimit(const struct netisr_handler *nhp, u_int qlimit)
525 {
526 	struct netisr_work *npwp;
527 #ifdef INVARIANTS
528 	const char *name;
529 #endif
530 	u_int i, proto;
531 
532 	if (qlimit > netisr_maxqlimit)
533 		return (EINVAL);
534 
535 	proto = nhp->nh_proto;
536 #ifdef INVARIANTS
537 	name = nhp->nh_name;
538 #endif
539 	KASSERT(proto < NETISR_MAXPROT,
540 	    ("%s(%u): protocol too big for %s", __func__, proto, name));
541 
542 	NETISR_WLOCK();
543 	KASSERT(netisr_proto[proto].np_handler != NULL,
544 	    ("%s(%u): protocol not registered for %s", __func__, proto,
545 	    name));
546 
547 	netisr_proto[proto].np_qlimit = qlimit;
548 	CPU_FOREACH(i) {
549 		npwp = &(DPCPU_ID_PTR(i, nws))->nws_work[proto];
550 		npwp->nw_qlimit = qlimit;
551 	}
552 	NETISR_WUNLOCK();
553 	return (0);
554 }
555 
556 /*
557  * Drain all packets currently held in a particular protocol work queue.
558  */
559 static void
560 netisr_drain_proto(struct netisr_work *npwp)
561 {
562 	struct mbuf *m;
563 
564 	/*
565 	 * We would assert the lock on the workstream but it's not passed in.
566 	 */
567 	while ((m = npwp->nw_head) != NULL) {
568 		npwp->nw_head = m->m_nextpkt;
569 		m->m_nextpkt = NULL;
570 		if (npwp->nw_head == NULL)
571 			npwp->nw_tail = NULL;
572 		npwp->nw_len--;
573 		m_freem(m);
574 	}
575 	KASSERT(npwp->nw_tail == NULL, ("%s: tail", __func__));
576 	KASSERT(npwp->nw_len == 0, ("%s: len", __func__));
577 }
578 
579 /*
580  * Remove the registration of a network protocol, which requires clearing
581  * per-protocol fields across all workstreams, including freeing all mbufs in
582  * the queues at time of unregister.  All work in netisr is briefly suspended
583  * while this takes place.
584  */
585 void
586 netisr_unregister(const struct netisr_handler *nhp)
587 {
588 	struct netisr_work *npwp;
589 #ifdef INVARIANTS
590 	const char *name;
591 #endif
592 	u_int i, proto;
593 
594 	proto = nhp->nh_proto;
595 #ifdef INVARIANTS
596 	name = nhp->nh_name;
597 #endif
598 	KASSERT(proto < NETISR_MAXPROT,
599 	    ("%s(%u): protocol too big for %s", __func__, proto, name));
600 
601 	NETISR_WLOCK();
602 	KASSERT(netisr_proto[proto].np_handler != NULL,
603 	    ("%s(%u): protocol not registered for %s", __func__, proto,
604 	    name));
605 
606 	netisr_proto[proto].np_name = NULL;
607 	netisr_proto[proto].np_handler = NULL;
608 	netisr_proto[proto].np_m2flow = NULL;
609 	netisr_proto[proto].np_m2cpuid = NULL;
610 	netisr_proto[proto].np_qlimit = 0;
611 	netisr_proto[proto].np_policy = 0;
612 	CPU_FOREACH(i) {
613 		npwp = &(DPCPU_ID_PTR(i, nws))->nws_work[proto];
614 		netisr_drain_proto(npwp);
615 		bzero(npwp, sizeof(*npwp));
616 	}
617 	NETISR_WUNLOCK();
618 }
619 
620 /*
621  * Compose the global and per-protocol policies on dispatch, and return the
622  * dispatch policy to use.
623  */
624 static u_int
625 netisr_get_dispatch(struct netisr_proto *npp)
626 {
627 
628 	/*
629 	 * Protocol-specific configuration overrides the global default.
630 	 */
631 	if (npp->np_dispatch != NETISR_DISPATCH_DEFAULT)
632 		return (npp->np_dispatch);
633 	return (netisr_dispatch_policy);
634 }
635 
636 /*
637  * Look up the workstream given a packet and source identifier.  Do this by
638  * checking the protocol's policy, and optionally call out to the protocol
639  * for assistance if required.
640  */
641 static struct mbuf *
642 netisr_select_cpuid(struct netisr_proto *npp, u_int dispatch_policy,
643     uintptr_t source, struct mbuf *m, u_int *cpuidp)
644 {
645 	struct ifnet *ifp;
646 	u_int policy;
647 
648 	NETISR_LOCK_ASSERT();
649 
650 	/*
651 	 * In the event we have only one worker, shortcut and deliver to it
652 	 * without further ado.
653 	 */
654 	if (nws_count == 1) {
655 		*cpuidp = nws_array[0];
656 		return (m);
657 	}
658 
659 	/*
660 	 * What happens next depends on the policy selected by the protocol.
661 	 * If we want to support per-interface policies, we should do that
662 	 * here first.
663 	 */
664 	policy = npp->np_policy;
665 	if (policy == NETISR_POLICY_CPU) {
666 		m = npp->np_m2cpuid(m, source, cpuidp);
667 		if (m == NULL)
668 			return (NULL);
669 
670 		/*
671 		 * It's possible for a protocol not to have a good idea about
672 		 * where to process a packet, in which case we fall back on
673 		 * the netisr code to decide.  In the hybrid case, return the
674 		 * current CPU ID, which will force an immediate direct
675 		 * dispatch.  In the queued case, fall back on the SOURCE
676 		 * policy.
677 		 */
678 		if (*cpuidp != NETISR_CPUID_NONE)
679 			return (m);
680 		if (dispatch_policy == NETISR_DISPATCH_HYBRID) {
681 			*cpuidp = curcpu;
682 			return (m);
683 		}
684 		policy = NETISR_POLICY_SOURCE;
685 	}
686 
687 	if (policy == NETISR_POLICY_FLOW) {
688 		if (M_HASHTYPE_GET(m) == M_HASHTYPE_NONE &&
689 		    npp->np_m2flow != NULL) {
690 			m = npp->np_m2flow(m, source);
691 			if (m == NULL)
692 				return (NULL);
693 		}
694 		if (M_HASHTYPE_GET(m) != M_HASHTYPE_NONE) {
695 			*cpuidp =
696 			    netisr_default_flow2cpu(m->m_pkthdr.flowid);
697 			return (m);
698 		}
699 		policy = NETISR_POLICY_SOURCE;
700 	}
701 
702 	KASSERT(policy == NETISR_POLICY_SOURCE,
703 	    ("%s: invalid policy %u for %s", __func__, npp->np_policy,
704 	    npp->np_name));
705 
706 	ifp = m->m_pkthdr.rcvif;
707 	if (ifp != NULL)
708 		*cpuidp = nws_array[(ifp->if_index + source) % nws_count];
709 	else
710 		*cpuidp = nws_array[source % nws_count];
711 	return (m);
712 }
713 
714 /*
715  * Process packets associated with a workstream and protocol.  For reasons of
716  * fairness, we process up to one complete netisr queue at a time, moving the
717  * queue to a stack-local queue for processing, but do not loop refreshing
718  * from the global queue.  The caller is responsible for deciding whether to
719  * loop, and for setting the NWS_RUNNING flag.  The passed workstream will be
720  * locked on entry and relocked before return, but will be released while
721  * processing.  The number of packets processed is returned.
722  */
723 static u_int
724 netisr_process_workstream_proto(struct netisr_workstream *nwsp, u_int proto)
725 {
726 	struct netisr_work local_npw, *npwp;
727 	u_int handled;
728 	struct mbuf *m;
729 
730 	NETISR_LOCK_ASSERT();
731 	NWS_LOCK_ASSERT(nwsp);
732 
733 	KASSERT(nwsp->nws_flags & NWS_RUNNING,
734 	    ("%s(%u): not running", __func__, proto));
735 	KASSERT(proto >= 0 && proto < NETISR_MAXPROT,
736 	    ("%s(%u): invalid proto\n", __func__, proto));
737 
738 	npwp = &nwsp->nws_work[proto];
739 	if (npwp->nw_len == 0)
740 		return (0);
741 
742 	/*
743 	 * Move the global work queue to a thread-local work queue.
744 	 *
745 	 * Notice that this means the effective maximum length of the queue
746 	 * is actually twice that of the maximum queue length specified in
747 	 * the protocol registration call.
748 	 */
749 	handled = npwp->nw_len;
750 	local_npw = *npwp;
751 	npwp->nw_head = NULL;
752 	npwp->nw_tail = NULL;
753 	npwp->nw_len = 0;
754 	nwsp->nws_pendingbits &= ~(1 << proto);
755 	NWS_UNLOCK(nwsp);
756 	while ((m = local_npw.nw_head) != NULL) {
757 		local_npw.nw_head = m->m_nextpkt;
758 		m->m_nextpkt = NULL;
759 		if (local_npw.nw_head == NULL)
760 			local_npw.nw_tail = NULL;
761 		local_npw.nw_len--;
762 		VNET_ASSERT(m->m_pkthdr.rcvif != NULL,
763 		    ("%s:%d rcvif == NULL: m=%p", __func__, __LINE__, m));
764 		CURVNET_SET(m->m_pkthdr.rcvif->if_vnet);
765 		netisr_proto[proto].np_handler(m);
766 		CURVNET_RESTORE();
767 	}
768 	KASSERT(local_npw.nw_len == 0,
769 	    ("%s(%u): len %u", __func__, proto, local_npw.nw_len));
770 	if (netisr_proto[proto].np_drainedcpu)
771 		netisr_proto[proto].np_drainedcpu(nwsp->nws_cpu);
772 	NWS_LOCK(nwsp);
773 	npwp->nw_handled += handled;
774 	return (handled);
775 }
776 
777 /*
778  * SWI handler for netisr -- processes packets in a set of workstreams that
779  * it owns, woken up by calls to NWS_SIGNAL().  If this workstream is already
780  * being direct dispatched, go back to sleep and wait for the dispatching
781  * thread to wake us up again.
782  */
783 static void
784 swi_net(void *arg)
785 {
786 #ifdef NETISR_LOCKING
787 	struct rm_priotracker tracker;
788 #endif
789 	struct netisr_workstream *nwsp;
790 	u_int bits, prot;
791 
792 	nwsp = arg;
793 
794 #ifdef DEVICE_POLLING
795 	KASSERT(nws_count == 1,
796 	    ("%s: device_polling but nws_count != 1", __func__));
797 	netisr_poll();
798 #endif
799 #ifdef NETISR_LOCKING
800 	NETISR_RLOCK(&tracker);
801 #endif
802 	NWS_LOCK(nwsp);
803 	KASSERT(!(nwsp->nws_flags & NWS_RUNNING), ("swi_net: running"));
804 	if (nwsp->nws_flags & NWS_DISPATCHING)
805 		goto out;
806 	nwsp->nws_flags |= NWS_RUNNING;
807 	nwsp->nws_flags &= ~NWS_SCHEDULED;
808 	while ((bits = nwsp->nws_pendingbits) != 0) {
809 		while ((prot = ffs(bits)) != 0) {
810 			prot--;
811 			bits &= ~(1 << prot);
812 			(void)netisr_process_workstream_proto(nwsp, prot);
813 		}
814 	}
815 	nwsp->nws_flags &= ~NWS_RUNNING;
816 out:
817 	NWS_UNLOCK(nwsp);
818 #ifdef NETISR_LOCKING
819 	NETISR_RUNLOCK(&tracker);
820 #endif
821 #ifdef DEVICE_POLLING
822 	netisr_pollmore();
823 #endif
824 }
825 
826 static int
827 netisr_queue_workstream(struct netisr_workstream *nwsp, u_int proto,
828     struct netisr_work *npwp, struct mbuf *m, int *dosignalp)
829 {
830 
831 	NWS_LOCK_ASSERT(nwsp);
832 
833 	*dosignalp = 0;
834 	if (npwp->nw_len < npwp->nw_qlimit) {
835 		m->m_nextpkt = NULL;
836 		if (npwp->nw_head == NULL) {
837 			npwp->nw_head = m;
838 			npwp->nw_tail = m;
839 		} else {
840 			npwp->nw_tail->m_nextpkt = m;
841 			npwp->nw_tail = m;
842 		}
843 		npwp->nw_len++;
844 		if (npwp->nw_len > npwp->nw_watermark)
845 			npwp->nw_watermark = npwp->nw_len;
846 
847 		/*
848 		 * We must set the bit regardless of NWS_RUNNING, so that
849 		 * swi_net() keeps calling netisr_process_workstream_proto().
850 		 */
851 		nwsp->nws_pendingbits |= (1 << proto);
852 		if (!(nwsp->nws_flags &
853 		    (NWS_RUNNING | NWS_DISPATCHING | NWS_SCHEDULED))) {
854 			nwsp->nws_flags |= NWS_SCHEDULED;
855 			*dosignalp = 1;	/* Defer until unlocked. */
856 		}
857 		npwp->nw_queued++;
858 		return (0);
859 	} else {
860 		m_freem(m);
861 		npwp->nw_qdrops++;
862 		return (ENOBUFS);
863 	}
864 }
865 
866 static int
867 netisr_queue_internal(u_int proto, struct mbuf *m, u_int cpuid)
868 {
869 	struct netisr_workstream *nwsp;
870 	struct netisr_work *npwp;
871 	int dosignal, error;
872 
873 #ifdef NETISR_LOCKING
874 	NETISR_LOCK_ASSERT();
875 #endif
876 	KASSERT(cpuid <= mp_maxid, ("%s: cpuid too big (%u, %u)", __func__,
877 	    cpuid, mp_maxid));
878 	KASSERT(!CPU_ABSENT(cpuid), ("%s: CPU %u absent", __func__, cpuid));
879 
880 	dosignal = 0;
881 	error = 0;
882 	nwsp = DPCPU_ID_PTR(cpuid, nws);
883 	npwp = &nwsp->nws_work[proto];
884 	NWS_LOCK(nwsp);
885 	error = netisr_queue_workstream(nwsp, proto, npwp, m, &dosignal);
886 	NWS_UNLOCK(nwsp);
887 	if (dosignal)
888 		NWS_SIGNAL(nwsp);
889 	return (error);
890 }
891 
892 int
893 netisr_queue_src(u_int proto, uintptr_t source, struct mbuf *m)
894 {
895 #ifdef NETISR_LOCKING
896 	struct rm_priotracker tracker;
897 #endif
898 	u_int cpuid;
899 	int error;
900 
901 	KASSERT(proto < NETISR_MAXPROT,
902 	    ("%s: invalid proto %u", __func__, proto));
903 
904 #ifdef NETISR_LOCKING
905 	NETISR_RLOCK(&tracker);
906 #endif
907 	KASSERT(netisr_proto[proto].np_handler != NULL,
908 	    ("%s: invalid proto %u", __func__, proto));
909 
910 	m = netisr_select_cpuid(&netisr_proto[proto], NETISR_DISPATCH_DEFERRED,
911 	    source, m, &cpuid);
912 	if (m != NULL) {
913 		KASSERT(!CPU_ABSENT(cpuid), ("%s: CPU %u absent", __func__,
914 		    cpuid));
915 		error = netisr_queue_internal(proto, m, cpuid);
916 	} else
917 		error = ENOBUFS;
918 #ifdef NETISR_LOCKING
919 	NETISR_RUNLOCK(&tracker);
920 #endif
921 	return (error);
922 }
923 
924 int
925 netisr_queue(u_int proto, struct mbuf *m)
926 {
927 
928 	return (netisr_queue_src(proto, 0, m));
929 }
930 
931 /*
932  * Dispatch a packet for netisr processing; direct dispatch is permitted by
933  * calling context.
934  */
935 int
936 netisr_dispatch_src(u_int proto, uintptr_t source, struct mbuf *m)
937 {
938 #ifdef NETISR_LOCKING
939 	struct rm_priotracker tracker;
940 #endif
941 	struct netisr_workstream *nwsp;
942 	struct netisr_proto *npp;
943 	struct netisr_work *npwp;
944 	int dosignal, error;
945 	u_int cpuid, dispatch_policy;
946 
947 	KASSERT(proto < NETISR_MAXPROT,
948 	    ("%s: invalid proto %u", __func__, proto));
949 #ifdef NETISR_LOCKING
950 	NETISR_RLOCK(&tracker);
951 #endif
952 	npp = &netisr_proto[proto];
953 	KASSERT(npp->np_handler != NULL, ("%s: invalid proto %u", __func__,
954 	    proto));
955 
956 	dispatch_policy = netisr_get_dispatch(npp);
957 	if (dispatch_policy == NETISR_DISPATCH_DEFERRED)
958 		return (netisr_queue_src(proto, source, m));
959 
960 	/*
961 	 * If direct dispatch is forced, then unconditionally dispatch
962 	 * without a formal CPU selection.  Borrow the current CPU's stats,
963 	 * even if there's no worker on it.  In this case we don't update
964 	 * nws_flags because all netisr processing will be source ordered due
965 	 * to always being forced to directly dispatch.
966 	 */
967 	if (dispatch_policy == NETISR_DISPATCH_DIRECT) {
968 		nwsp = DPCPU_PTR(nws);
969 		npwp = &nwsp->nws_work[proto];
970 		npwp->nw_dispatched++;
971 		npwp->nw_handled++;
972 		netisr_proto[proto].np_handler(m);
973 		error = 0;
974 		goto out_unlock;
975 	}
976 
977 	KASSERT(dispatch_policy == NETISR_DISPATCH_HYBRID,
978 	    ("%s: unknown dispatch policy (%u)", __func__, dispatch_policy));
979 
980 	/*
981 	 * Otherwise, we execute in a hybrid mode where we will try to direct
982 	 * dispatch if we're on the right CPU and the netisr worker isn't
983 	 * already running.
984 	 */
985 	sched_pin();
986 	m = netisr_select_cpuid(&netisr_proto[proto], NETISR_DISPATCH_HYBRID,
987 	    source, m, &cpuid);
988 	if (m == NULL) {
989 		error = ENOBUFS;
990 		goto out_unpin;
991 	}
992 	KASSERT(!CPU_ABSENT(cpuid), ("%s: CPU %u absent", __func__, cpuid));
993 	if (cpuid != curcpu)
994 		goto queue_fallback;
995 	nwsp = DPCPU_PTR(nws);
996 	npwp = &nwsp->nws_work[proto];
997 
998 	/*-
999 	 * We are willing to direct dispatch only if three conditions hold:
1000 	 *
1001 	 * (1) The netisr worker isn't already running,
1002 	 * (2) Another thread isn't already directly dispatching, and
1003 	 * (3) The netisr hasn't already been woken up.
1004 	 */
1005 	NWS_LOCK(nwsp);
1006 	if (nwsp->nws_flags & (NWS_RUNNING | NWS_DISPATCHING | NWS_SCHEDULED)) {
1007 		error = netisr_queue_workstream(nwsp, proto, npwp, m,
1008 		    &dosignal);
1009 		NWS_UNLOCK(nwsp);
1010 		if (dosignal)
1011 			NWS_SIGNAL(nwsp);
1012 		goto out_unpin;
1013 	}
1014 
1015 	/*
1016 	 * The current thread is now effectively the netisr worker, so set
1017 	 * the dispatching flag to prevent concurrent processing of the
1018 	 * stream from another thread (even the netisr worker), which could
1019 	 * otherwise lead to effective misordering of the stream.
1020 	 */
1021 	nwsp->nws_flags |= NWS_DISPATCHING;
1022 	NWS_UNLOCK(nwsp);
1023 	netisr_proto[proto].np_handler(m);
1024 	NWS_LOCK(nwsp);
1025 	nwsp->nws_flags &= ~NWS_DISPATCHING;
1026 	npwp->nw_handled++;
1027 	npwp->nw_hybrid_dispatched++;
1028 
1029 	/*
1030 	 * If other work was enqueued by another thread while we were direct
1031 	 * dispatching, we need to signal the netisr worker to do that work.
1032 	 * In the future, we might want to do some of that work in the
1033 	 * current thread, rather than trigger further context switches.  If
1034 	 * so, we'll want to establish a reasonable bound on the work done in
1035 	 * the "borrowed" context.
1036 	 */
1037 	if (nwsp->nws_pendingbits != 0) {
1038 		nwsp->nws_flags |= NWS_SCHEDULED;
1039 		dosignal = 1;
1040 	} else
1041 		dosignal = 0;
1042 	NWS_UNLOCK(nwsp);
1043 	if (dosignal)
1044 		NWS_SIGNAL(nwsp);
1045 	error = 0;
1046 	goto out_unpin;
1047 
1048 queue_fallback:
1049 	error = netisr_queue_internal(proto, m, cpuid);
1050 out_unpin:
1051 	sched_unpin();
1052 out_unlock:
1053 #ifdef NETISR_LOCKING
1054 	NETISR_RUNLOCK(&tracker);
1055 #endif
1056 	return (error);
1057 }
1058 
1059 int
1060 netisr_dispatch(u_int proto, struct mbuf *m)
1061 {
1062 
1063 	return (netisr_dispatch_src(proto, 0, m));
1064 }
1065 
1066 #ifdef DEVICE_POLLING
1067 /*
1068  * Kernel polling borrows a netisr thread to run interface polling in; this
1069  * function allows kernel polling to request that the netisr thread be
1070  * scheduled even if no packets are pending for protocols.
1071  */
1072 void
1073 netisr_sched_poll(void)
1074 {
1075 	struct netisr_workstream *nwsp;
1076 
1077 	nwsp = DPCPU_ID_PTR(nws_array[0], nws);
1078 	NWS_SIGNAL(nwsp);
1079 }
1080 #endif
1081 
1082 static void
1083 netisr_start_swi(u_int cpuid, struct pcpu *pc)
1084 {
1085 	char swiname[12];
1086 	struct netisr_workstream *nwsp;
1087 	int error;
1088 
1089 	KASSERT(!CPU_ABSENT(cpuid), ("%s: CPU %u absent", __func__, cpuid));
1090 
1091 	nwsp = DPCPU_ID_PTR(cpuid, nws);
1092 	mtx_init(&nwsp->nws_mtx, "netisr_mtx", NULL, MTX_DEF);
1093 	nwsp->nws_cpu = cpuid;
1094 	snprintf(swiname, sizeof(swiname), "netisr %u", cpuid);
1095 	error = swi_add(&nwsp->nws_intr_event, swiname, swi_net, nwsp,
1096 	    SWI_NET, INTR_MPSAFE, &nwsp->nws_swi_cookie);
1097 	if (error)
1098 		panic("%s: swi_add %d", __func__, error);
1099 	pc->pc_netisr = nwsp->nws_intr_event;
1100 	if (netisr_bindthreads) {
1101 		error = intr_event_bind(nwsp->nws_intr_event, cpuid);
1102 		if (error != 0)
1103 			printf("%s: cpu %u: intr_event_bind: %d", __func__,
1104 			    cpuid, error);
1105 	}
1106 	NETISR_WLOCK();
1107 	nws_array[nws_count] = nwsp->nws_cpu;
1108 	nws_count++;
1109 	NETISR_WUNLOCK();
1110 }
1111 
1112 /*
1113  * Initialize the netisr subsystem.  We rely on BSS and static initialization
1114  * of most fields in global data structures.
1115  *
1116  * Start a worker thread for the boot CPU so that we can support network
1117  * traffic immediately in case the network stack is used before additional
1118  * CPUs are started (for example, diskless boot).
1119  */
1120 static void
1121 netisr_init(void *arg)
1122 {
1123 	KASSERT(curcpu == 0, ("%s: not on CPU 0", __func__));
1124 
1125 	NETISR_LOCK_INIT();
1126 	if (netisr_maxthreads == 0 || netisr_maxthreads < -1 )
1127 		netisr_maxthreads = 1;		/* default behavior */
1128 	else if (netisr_maxthreads == -1)
1129 		netisr_maxthreads = mp_ncpus;	/* use max cpus */
1130 	if (netisr_maxthreads > mp_ncpus) {
1131 		printf("netisr_init: forcing maxthreads from %d to %d\n",
1132 		    netisr_maxthreads, mp_ncpus);
1133 		netisr_maxthreads = mp_ncpus;
1134 	}
1135 	if (netisr_defaultqlimit > netisr_maxqlimit) {
1136 		printf("netisr_init: forcing defaultqlimit from %d to %d\n",
1137 		    netisr_defaultqlimit, netisr_maxqlimit);
1138 		netisr_defaultqlimit = netisr_maxqlimit;
1139 	}
1140 #ifdef DEVICE_POLLING
1141 	/*
1142 	 * The device polling code is not yet aware of how to deal with
1143 	 * multiple netisr threads, so for the time being compiling in device
1144 	 * polling disables parallel netisr workers.
1145 	 */
1146 	if (netisr_maxthreads != 1 || netisr_bindthreads != 0) {
1147 		printf("netisr_init: forcing maxthreads to 1 and "
1148 		    "bindthreads to 0 for device polling\n");
1149 		netisr_maxthreads = 1;
1150 		netisr_bindthreads = 0;
1151 	}
1152 #endif
1153 	netisr_start_swi(curcpu, pcpu_find(curcpu));
1154 }
1155 SYSINIT(netisr_init, SI_SUB_SOFTINTR, SI_ORDER_FIRST, netisr_init, NULL);
1156 
1157 /*
1158  * Start worker threads for additional CPUs.  No attempt to gracefully handle
1159  * work reassignment, we don't yet support dynamic reconfiguration.
1160  */
1161 static void
1162 netisr_start(void *arg)
1163 {
1164 	struct pcpu *pc;
1165 
1166 	STAILQ_FOREACH(pc, &cpuhead, pc_allcpu) {
1167 		if (nws_count >= netisr_maxthreads)
1168 			break;
1169 		/* XXXRW: Is skipping absent CPUs still required here? */
1170 		if (CPU_ABSENT(pc->pc_cpuid))
1171 			continue;
1172 		/* Worker will already be present for boot CPU. */
1173 		if (pc->pc_netisr != NULL)
1174 			continue;
1175 		netisr_start_swi(pc->pc_cpuid, pc);
1176 	}
1177 }
1178 SYSINIT(netisr_start, SI_SUB_SMP, SI_ORDER_MIDDLE, netisr_start, NULL);
1179 
1180 /*
1181  * Sysctl monitoring for netisr: query a list of registered protocols.
1182  */
1183 static int
1184 sysctl_netisr_proto(SYSCTL_HANDLER_ARGS)
1185 {
1186 	struct rm_priotracker tracker;
1187 	struct sysctl_netisr_proto *snpp, *snp_array;
1188 	struct netisr_proto *npp;
1189 	u_int counter, proto;
1190 	int error;
1191 
1192 	if (req->newptr != NULL)
1193 		return (EINVAL);
1194 	snp_array = malloc(sizeof(*snp_array) * NETISR_MAXPROT, M_TEMP,
1195 	    M_ZERO | M_WAITOK);
1196 	counter = 0;
1197 	NETISR_RLOCK(&tracker);
1198 	for (proto = 0; proto < NETISR_MAXPROT; proto++) {
1199 		npp = &netisr_proto[proto];
1200 		if (npp->np_name == NULL)
1201 			continue;
1202 		snpp = &snp_array[counter];
1203 		snpp->snp_version = sizeof(*snpp);
1204 		strlcpy(snpp->snp_name, npp->np_name, NETISR_NAMEMAXLEN);
1205 		snpp->snp_proto = proto;
1206 		snpp->snp_qlimit = npp->np_qlimit;
1207 		snpp->snp_policy = npp->np_policy;
1208 		snpp->snp_dispatch = npp->np_dispatch;
1209 		if (npp->np_m2flow != NULL)
1210 			snpp->snp_flags |= NETISR_SNP_FLAGS_M2FLOW;
1211 		if (npp->np_m2cpuid != NULL)
1212 			snpp->snp_flags |= NETISR_SNP_FLAGS_M2CPUID;
1213 		if (npp->np_drainedcpu != NULL)
1214 			snpp->snp_flags |= NETISR_SNP_FLAGS_DRAINEDCPU;
1215 		counter++;
1216 	}
1217 	NETISR_RUNLOCK(&tracker);
1218 	KASSERT(counter <= NETISR_MAXPROT,
1219 	    ("sysctl_netisr_proto: counter too big (%d)", counter));
1220 	error = SYSCTL_OUT(req, snp_array, sizeof(*snp_array) * counter);
1221 	free(snp_array, M_TEMP);
1222 	return (error);
1223 }
1224 
1225 SYSCTL_PROC(_net_isr, OID_AUTO, proto,
1226     CTLFLAG_RD|CTLTYPE_STRUCT|CTLFLAG_MPSAFE, 0, 0, sysctl_netisr_proto,
1227     "S,sysctl_netisr_proto",
1228     "Return list of protocols registered with netisr");
1229 
1230 /*
1231  * Sysctl monitoring for netisr: query a list of workstreams.
1232  */
1233 static int
1234 sysctl_netisr_workstream(SYSCTL_HANDLER_ARGS)
1235 {
1236 	struct rm_priotracker tracker;
1237 	struct sysctl_netisr_workstream *snwsp, *snws_array;
1238 	struct netisr_workstream *nwsp;
1239 	u_int counter, cpuid;
1240 	int error;
1241 
1242 	if (req->newptr != NULL)
1243 		return (EINVAL);
1244 	snws_array = malloc(sizeof(*snws_array) * MAXCPU, M_TEMP,
1245 	    M_ZERO | M_WAITOK);
1246 	counter = 0;
1247 	NETISR_RLOCK(&tracker);
1248 	CPU_FOREACH(cpuid) {
1249 		nwsp = DPCPU_ID_PTR(cpuid, nws);
1250 		if (nwsp->nws_intr_event == NULL)
1251 			continue;
1252 		NWS_LOCK(nwsp);
1253 		snwsp = &snws_array[counter];
1254 		snwsp->snws_version = sizeof(*snwsp);
1255 
1256 		/*
1257 		 * For now, we equate workstream IDs and CPU IDs in the
1258 		 * kernel, but expose them independently to userspace in case
1259 		 * that assumption changes in the future.
1260 		 */
1261 		snwsp->snws_wsid = cpuid;
1262 		snwsp->snws_cpu = cpuid;
1263 		if (nwsp->nws_intr_event != NULL)
1264 			snwsp->snws_flags |= NETISR_SNWS_FLAGS_INTR;
1265 		NWS_UNLOCK(nwsp);
1266 		counter++;
1267 	}
1268 	NETISR_RUNLOCK(&tracker);
1269 	KASSERT(counter <= MAXCPU,
1270 	    ("sysctl_netisr_workstream: counter too big (%d)", counter));
1271 	error = SYSCTL_OUT(req, snws_array, sizeof(*snws_array) * counter);
1272 	free(snws_array, M_TEMP);
1273 	return (error);
1274 }
1275 
1276 SYSCTL_PROC(_net_isr, OID_AUTO, workstream,
1277     CTLFLAG_RD|CTLTYPE_STRUCT|CTLFLAG_MPSAFE, 0, 0, sysctl_netisr_workstream,
1278     "S,sysctl_netisr_workstream",
1279     "Return list of workstreams implemented by netisr");
1280 
1281 /*
1282  * Sysctl monitoring for netisr: query per-protocol data across all
1283  * workstreams.
1284  */
1285 static int
1286 sysctl_netisr_work(SYSCTL_HANDLER_ARGS)
1287 {
1288 	struct rm_priotracker tracker;
1289 	struct sysctl_netisr_work *snwp, *snw_array;
1290 	struct netisr_workstream *nwsp;
1291 	struct netisr_proto *npp;
1292 	struct netisr_work *nwp;
1293 	u_int counter, cpuid, proto;
1294 	int error;
1295 
1296 	if (req->newptr != NULL)
1297 		return (EINVAL);
1298 	snw_array = malloc(sizeof(*snw_array) * MAXCPU * NETISR_MAXPROT,
1299 	    M_TEMP, M_ZERO | M_WAITOK);
1300 	counter = 0;
1301 	NETISR_RLOCK(&tracker);
1302 	CPU_FOREACH(cpuid) {
1303 		nwsp = DPCPU_ID_PTR(cpuid, nws);
1304 		if (nwsp->nws_intr_event == NULL)
1305 			continue;
1306 		NWS_LOCK(nwsp);
1307 		for (proto = 0; proto < NETISR_MAXPROT; proto++) {
1308 			npp = &netisr_proto[proto];
1309 			if (npp->np_name == NULL)
1310 				continue;
1311 			nwp = &nwsp->nws_work[proto];
1312 			snwp = &snw_array[counter];
1313 			snwp->snw_version = sizeof(*snwp);
1314 			snwp->snw_wsid = cpuid;		/* See comment above. */
1315 			snwp->snw_proto = proto;
1316 			snwp->snw_len = nwp->nw_len;
1317 			snwp->snw_watermark = nwp->nw_watermark;
1318 			snwp->snw_dispatched = nwp->nw_dispatched;
1319 			snwp->snw_hybrid_dispatched =
1320 			    nwp->nw_hybrid_dispatched;
1321 			snwp->snw_qdrops = nwp->nw_qdrops;
1322 			snwp->snw_queued = nwp->nw_queued;
1323 			snwp->snw_handled = nwp->nw_handled;
1324 			counter++;
1325 		}
1326 		NWS_UNLOCK(nwsp);
1327 	}
1328 	KASSERT(counter <= MAXCPU * NETISR_MAXPROT,
1329 	    ("sysctl_netisr_work: counter too big (%d)", counter));
1330 	NETISR_RUNLOCK(&tracker);
1331 	error = SYSCTL_OUT(req, snw_array, sizeof(*snw_array) * counter);
1332 	free(snw_array, M_TEMP);
1333 	return (error);
1334 }
1335 
1336 SYSCTL_PROC(_net_isr, OID_AUTO, work,
1337     CTLFLAG_RD|CTLTYPE_STRUCT|CTLFLAG_MPSAFE, 0, 0, sysctl_netisr_work,
1338     "S,sysctl_netisr_work",
1339     "Return list of per-workstream, per-protocol work in netisr");
1340 
1341 #ifdef DDB
1342 DB_SHOW_COMMAND(netisr, db_show_netisr)
1343 {
1344 	struct netisr_workstream *nwsp;
1345 	struct netisr_work *nwp;
1346 	int first, proto;
1347 	u_int cpuid;
1348 
1349 	db_printf("%3s %6s %5s %5s %5s %8s %8s %8s %8s\n", "CPU", "Proto",
1350 	    "Len", "WMark", "Max", "Disp", "HDisp", "Drop", "Queue");
1351 	CPU_FOREACH(cpuid) {
1352 		nwsp = DPCPU_ID_PTR(cpuid, nws);
1353 		if (nwsp->nws_intr_event == NULL)
1354 			continue;
1355 		first = 1;
1356 		for (proto = 0; proto < NETISR_MAXPROT; proto++) {
1357 			if (netisr_proto[proto].np_handler == NULL)
1358 				continue;
1359 			nwp = &nwsp->nws_work[proto];
1360 			if (first) {
1361 				db_printf("%3d ", cpuid);
1362 				first = 0;
1363 			} else
1364 				db_printf("%3s ", "");
1365 			db_printf(
1366 			    "%6s %5d %5d %5d %8ju %8ju %8ju %8ju\n",
1367 			    netisr_proto[proto].np_name, nwp->nw_len,
1368 			    nwp->nw_watermark, nwp->nw_qlimit,
1369 			    nwp->nw_dispatched, nwp->nw_hybrid_dispatched,
1370 			    nwp->nw_qdrops, nwp->nw_queued);
1371 		}
1372 	}
1373 }
1374 #endif
1375