xref: /dragonfly/sys/net/if_poll.c (revision 51871435)
1 /*-
2  * Copyright (c) 2001-2002 Luigi Rizzo
3  *
4  * Supported by: the Xorp Project (www.xorp.org)
5  *
6  * Redistribution and use in source and binary forms, with or without
7  * modification, are permitted provided that the following conditions
8  * are met:
9  * 1. Redistributions of source code must retain the above copyright
10  *    notice, this list of conditions and the following disclaimer.
11  * 2. Redistributions in binary form must reproduce the above copyright
12  *    notice, this list of conditions and the following disclaimer in the
13  *    documentation and/or other materials provided with the distribution.
14  *
15  * THIS SOFTWARE IS PROVIDED BY THE AUTHORS AND CONTRIBUTORS ``AS IS'' AND
16  * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
17  * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
18  * ARE DISCLAIMED.  IN NO EVENT SHALL THE AUTHORS OR CONTRIBUTORS BE LIABLE
19  * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
20  * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
21  * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
22  * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
23  * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
24  * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
25  * SUCH DAMAGE.
26  *
27  * $FreeBSD: src/sys/kern/kern_poll.c,v 1.2.2.4 2002/06/27 23:26:33 luigi Exp $
28  */
29 
30 #include "opt_ifpoll.h"
31 
32 #include <sys/param.h>
33 #include <sys/kernel.h>
34 #include <sys/ktr.h>
35 #include <sys/malloc.h>
36 #include <sys/serialize.h>
37 #include <sys/socket.h>
38 #include <sys/sysctl.h>
39 
40 #include <sys/thread2.h>
41 #include <sys/msgport2.h>
42 
43 #include <machine/atomic.h>
44 #include <machine/clock.h>
45 #include <machine/smp.h>
46 
47 #include <net/if.h>
48 #include <net/if_poll.h>
49 #include <net/netmsg2.h>
50 
51 /*
52  * Polling support for network device drivers.
53  *
54  * Drivers which support this feature try to register one status polling
55  * handler and several TX/RX polling handlers with the polling code.
56  * If interface's if_npoll is called with non-NULL second argument, then
57  * a register operation is requested, else a deregister operation is
58  * requested.  If the requested operation is "register", driver should
59  * setup the ifpoll_info passed in accoding its own needs:
60  *   ifpoll_info.ifpi_status.status_func == NULL
61  *     No status polling handler will be installed on CPU(0)
62  *   ifpoll_info.ifpi_rx[n].poll_func == NULL
63  *     No RX polling handler will be installed on CPU(n)
64  *   ifpoll_info.ifpi_tx[n].poll_func == NULL
65  *     No TX polling handler will be installed on CPU(n)
66  *
67  * RX is polled at the specified polling frequency (net.ifpoll.X.pollhz).
68  * TX and status polling could be done at lower frequency than RX frequency
69  * (net.ifpoll.0.status_frac and net.ifpoll.X.tx_frac).  To avoid systimer
70  * staggering at high frequency, RX systimer gives TX and status polling a
71  * piggyback (XXX).
72  *
73  * All of the registered polling handlers are called only if the interface
74  * is marked as 'IFF_RUNNING and IFF_NPOLLING'.  However, the interface's
75  * register and deregister function (ifnet.if_npoll) will be called even
76  * if interface is not marked with 'IFF_RUNNING'.
77  *
78  * If registration is successful, the driver must disable interrupts,
79  * and further I/O is performed through the TX/RX polling handler, which
80  * are invoked (at least once per clock tick) with 3 arguments: the "arg"
81  * passed at register time, a struct ifnet pointer, and a "count" limit.
82  * The registered serializer will be held before calling the related
83  * polling handler.
84  *
85  * The count limit specifies how much work the handler can do during the
86  * call -- typically this is the number of packets to be received, or
87  * transmitted, etc. (drivers are free to interpret this number, as long
88  * as the max time spent in the function grows roughly linearly with the
89  * count).
90  *
91  * A second variable controls the sharing of CPU between polling/kernel
92  * network processing, and other activities (typically userlevel tasks):
93  * net.ifpoll.X.{rx,tx}.user_frac (between 0 and 100, default 50) sets the
94  * share of CPU allocated to user tasks.  CPU is allocated proportionally
95  * to the shares, by dynamically adjusting the "count" (poll_burst).
96  *
97  * Other parameters can should be left to their default values.
98  * The following constraints hold
99  *
100  *	1 <= poll_burst <= poll_burst_max
101  *	1 <= poll_each_burst <= poll_burst_max
102  *	MIN_POLL_BURST_MAX <= poll_burst_max <= MAX_POLL_BURST_MAX
103  */
104 
105 #define IFPOLL_LIST_LEN		128
106 #define IFPOLL_FREQ_MAX		30000
107 
108 #define MIN_IOPOLL_BURST_MAX	10
109 #define MAX_IOPOLL_BURST_MAX	5000
110 #define IOPOLL_BURST_MAX	375	/* good for 1000Mbit net and HZ=4000 */
111 
112 #define IOPOLL_EACH_BURST	50
113 #define IOPOLL_USER_FRAC	50
114 
115 #define IFPOLL_FREQ_DEFAULT	4000
116 
117 #define IFPOLL_TXFRAC_DEFAULT	1	/* 1/1 of the pollhz */
118 #define IFPOLL_STFRAC_DEFAULT	80	/* 1/80 of the pollhz */
119 
120 #define IFPOLL_RX		0x1
121 #define IFPOLL_TX		0x2
122 
123 union ifpoll_time {
124 	struct timeval		tv;
125 	uint64_t		tsc;
126 };
127 
128 struct iopoll_rec {
129 	struct lwkt_serialize	*serializer;
130 	struct ifnet		*ifp;
131 	void			*arg;
132 	ifpoll_iofn_t		poll_func;
133 };
134 
135 struct iopoll_ctx {
136 	union ifpoll_time	prev_t;
137 	u_long			short_ticks;		/* statistics */
138 	u_long			lost_polls;		/* statistics */
139 	u_long			suspect;		/* statistics */
140 	u_long			stalled;		/* statistics */
141 	uint32_t		pending_polls;		/* state */
142 
143 	struct netmsg_base	poll_netmsg;
144 	struct netmsg_base	poll_more_netmsg;
145 
146 	int			poll_cpuid;
147 	int			pollhz;
148 	uint32_t		phase;			/* state */
149 	int			residual_burst;		/* state */
150 	uint32_t		poll_each_burst;	/* tunable */
151 	union ifpoll_time	poll_start_t;		/* state */
152 
153 	uint32_t		poll_burst;		/* state */
154 	uint32_t		poll_burst_max;		/* tunable */
155 	uint32_t		user_frac;		/* tunable */
156 	uint32_t		kern_frac;		/* state */
157 
158 	uint32_t		poll_handlers; /* next free entry in pr[]. */
159 	struct iopoll_rec	pr[IFPOLL_LIST_LEN];
160 
161 	struct sysctl_ctx_list	poll_sysctl_ctx;
162 	struct sysctl_oid	*poll_sysctl_tree;
163 } __cachealign;
164 
165 struct poll_comm {
166 	struct systimer		pollclock;
167 	int			poll_cpuid;
168 
169 	int			stfrac_count;		/* state */
170 	int			poll_stfrac;		/* tunable */
171 
172 	int			txfrac_count;		/* state */
173 	int			poll_txfrac;		/* tunable */
174 
175 	int			pollhz;			/* tunable */
176 
177 	struct sysctl_ctx_list	sysctl_ctx;
178 	struct sysctl_oid	*sysctl_tree;
179 } __cachealign;
180 
181 struct stpoll_rec {
182 	struct lwkt_serialize	*serializer;
183 	struct ifnet		*ifp;
184 	ifpoll_stfn_t		status_func;
185 };
186 
187 struct stpoll_ctx {
188 	struct netmsg_base	poll_netmsg;
189 
190 	uint32_t		poll_handlers; /* next free entry in pr[]. */
191 	struct stpoll_rec	pr[IFPOLL_LIST_LEN];
192 
193 	struct sysctl_ctx_list	poll_sysctl_ctx;
194 	struct sysctl_oid	*poll_sysctl_tree;
195 } __cachealign;
196 
197 struct iopoll_sysctl_netmsg {
198 	struct netmsg_base	base;
199 	struct iopoll_ctx	*ctx;
200 };
201 
202 void		ifpoll_init_pcpu(int);
203 static void	ifpoll_register_handler(netmsg_t);
204 static void	ifpoll_deregister_handler(netmsg_t);
205 
206 /*
207  * Status polling
208  */
209 static void	stpoll_init(void);
210 static void	stpoll_handler(netmsg_t);
211 static void	stpoll_clock(struct stpoll_ctx *);
212 static int	stpoll_register(struct ifnet *, const struct ifpoll_status *);
213 static int	stpoll_deregister(struct ifnet *);
214 
215 /*
216  * RX/TX polling
217  */
218 static struct iopoll_ctx *iopoll_ctx_create(int, int);
219 static void	iopoll_init(int);
220 static void	rxpoll_handler(netmsg_t);
221 static void	txpoll_handler(netmsg_t);
222 static void	rxpollmore_handler(netmsg_t);
223 static void	txpollmore_handler(netmsg_t);
224 static void	iopoll_clock(struct iopoll_ctx *);
225 static int	iopoll_register(struct ifnet *, struct iopoll_ctx *,
226 		    const struct ifpoll_io *);
227 static int	iopoll_deregister(struct ifnet *, struct iopoll_ctx *);
228 
229 static void	iopoll_add_sysctl(struct sysctl_ctx_list *,
230 		    struct sysctl_oid_list *, struct iopoll_ctx *, int);
231 static void	sysctl_burstmax_handler(netmsg_t);
232 static int	sysctl_burstmax(SYSCTL_HANDLER_ARGS);
233 static void	sysctl_eachburst_handler(netmsg_t);
234 static int	sysctl_eachburst(SYSCTL_HANDLER_ARGS);
235 
236 /*
237  * Common functions
238  */
239 static void	poll_comm_init(int);
240 static void	poll_comm_start(int);
241 static void	poll_comm_adjust_pollhz(struct poll_comm *);
242 static void	poll_comm_systimer0(systimer_t, int, struct intrframe *);
243 static void	poll_comm_systimer(systimer_t, int, struct intrframe *);
244 static void	sysctl_pollhz_handler(netmsg_t);
245 static void	sysctl_stfrac_handler(netmsg_t);
246 static void	sysctl_txfrac_handler(netmsg_t);
247 static int	sysctl_pollhz(SYSCTL_HANDLER_ARGS);
248 static int	sysctl_stfrac(SYSCTL_HANDLER_ARGS);
249 static int	sysctl_txfrac(SYSCTL_HANDLER_ARGS);
250 static int	sysctl_compat_npoll_stfrac(SYSCTL_HANDLER_ARGS);
251 static int	sysctl_compat_npoll_cpuid(SYSCTL_HANDLER_ARGS);
252 
253 static struct stpoll_ctx	stpoll_context;
254 static struct poll_comm		*poll_common[MAXCPU];
255 static struct iopoll_ctx	*rxpoll_context[MAXCPU];
256 static struct iopoll_ctx	*txpoll_context[MAXCPU];
257 
258 SYSCTL_NODE(_net, OID_AUTO, ifpoll, CTLFLAG_RW, 0,
259 	    "Network device polling parameters");
260 
261 static int	iopoll_burst_max = IOPOLL_BURST_MAX;
262 static int	iopoll_each_burst = IOPOLL_EACH_BURST;
263 static int	iopoll_user_frac = IOPOLL_USER_FRAC;
264 
265 static int	ifpoll_pollhz = IFPOLL_FREQ_DEFAULT;
266 static int	ifpoll_stfrac = IFPOLL_STFRAC_DEFAULT;
267 static int	ifpoll_txfrac = IFPOLL_TXFRAC_DEFAULT;
268 
269 TUNABLE_INT("net.ifpoll.burst_max", &iopoll_burst_max);
270 TUNABLE_INT("net.ifpoll.each_burst", &iopoll_each_burst);
271 TUNABLE_INT("net.ifpoll.user_frac", &iopoll_user_frac);
272 TUNABLE_INT("net.ifpoll.pollhz", &ifpoll_pollhz);
273 TUNABLE_INT("net.ifpoll.status_frac", &ifpoll_stfrac);
274 TUNABLE_INT("net.ifpoll.tx_frac", &ifpoll_txfrac);
275 
276 #define IFPOLL_FREQ_ADJ(comm)	(((comm)->poll_cpuid * 3) % 50)
277 
278 static __inline int
279 poll_comm_pollhz_div(const struct poll_comm *comm, int pollhz)
280 {
281 	return pollhz + IFPOLL_FREQ_ADJ(comm);
282 }
283 
284 static __inline int
285 poll_comm_pollhz_conv(const struct poll_comm *comm, int pollhz)
286 {
287 	return pollhz - IFPOLL_FREQ_ADJ(comm);
288 }
289 
290 static __inline void
291 ifpoll_sendmsg_oncpu(netmsg_t msg)
292 {
293 	if (msg->lmsg.ms_flags & MSGF_DONE)
294 		lwkt_sendmsg(netisr_portfn(mycpuid), &msg->lmsg);
295 }
296 
297 static __inline void
298 sched_stpoll(struct stpoll_ctx *st_ctx)
299 {
300 	ifpoll_sendmsg_oncpu((netmsg_t)&st_ctx->poll_netmsg);
301 }
302 
303 static __inline void
304 sched_iopoll(struct iopoll_ctx *io_ctx)
305 {
306 	ifpoll_sendmsg_oncpu((netmsg_t)&io_ctx->poll_netmsg);
307 }
308 
309 static __inline void
310 sched_iopollmore(struct iopoll_ctx *io_ctx)
311 {
312 	ifpoll_sendmsg_oncpu((netmsg_t)&io_ctx->poll_more_netmsg);
313 }
314 
315 static __inline void
316 ifpoll_time_get(union ifpoll_time *t)
317 {
318 	if (__predict_true(tsc_present))
319 		t->tsc = rdtsc();
320 	else
321 		microuptime(&t->tv);
322 }
323 
324 /* Return time diff in us */
325 static __inline int
326 ifpoll_time_diff(const union ifpoll_time *s, const union ifpoll_time *e)
327 {
328 	if (__predict_true(tsc_present)) {
329 		return (((e->tsc - s->tsc) * 1000000) / tsc_frequency);
330 	} else {
331 		return ((e->tv.tv_usec - s->tv.tv_usec) +
332 			(e->tv.tv_sec - s->tv.tv_sec) * 1000000);
333 	}
334 }
335 
336 /*
337  * Initialize per-cpu qpolling(4) context.  Called from kern_clock.c:
338  */
339 void
340 ifpoll_init_pcpu(int cpuid)
341 {
342 	if (cpuid >= ncpus2)
343 		return;
344 
345 	poll_comm_init(cpuid);
346 
347 	if (cpuid == 0)
348 		stpoll_init();
349 	iopoll_init(cpuid);
350 
351 	poll_comm_start(cpuid);
352 }
353 
354 int
355 ifpoll_register(struct ifnet *ifp)
356 {
357 	struct ifpoll_info *info;
358 	struct netmsg_base nmsg;
359 	int error;
360 
361 	if (ifp->if_npoll == NULL) {
362 		/* Device does not support polling */
363 		return EOPNOTSUPP;
364 	}
365 
366 	info = kmalloc(sizeof(*info), M_TEMP, M_WAITOK | M_ZERO);
367 
368 	/*
369 	 * Attempt to register.  Interlock with IFF_NPOLLING.
370 	 */
371 
372 	ifnet_serialize_all(ifp);
373 
374 	if (ifp->if_flags & IFF_NPOLLING) {
375 		/* Already polling */
376 		ifnet_deserialize_all(ifp);
377 		kfree(info, M_TEMP);
378 		return EBUSY;
379 	}
380 
381 	info->ifpi_ifp = ifp;
382 
383 	ifp->if_flags |= IFF_NPOLLING;
384 	ifp->if_npoll(ifp, info);
385 	KASSERT(ifp->if_npoll_cpuid >= 0, ("invalid npoll cpuid"));
386 
387 	ifnet_deserialize_all(ifp);
388 
389 	netmsg_init(&nmsg, NULL, &curthread->td_msgport,
390 		    0, ifpoll_register_handler);
391 	nmsg.lmsg.u.ms_resultp = info;
392 
393 	error = lwkt_domsg(netisr_portfn(0), &nmsg.lmsg, 0);
394 	if (error) {
395 		if (!ifpoll_deregister(ifp)) {
396 			if_printf(ifp, "ifpoll_register: "
397 				  "ifpoll_deregister failed!\n");
398 		}
399 	}
400 
401 	kfree(info, M_TEMP);
402 	return error;
403 }
404 
405 int
406 ifpoll_deregister(struct ifnet *ifp)
407 {
408 	struct netmsg_base nmsg;
409 	int error;
410 
411 	if (ifp->if_npoll == NULL)
412 		return EOPNOTSUPP;
413 
414 	ifnet_serialize_all(ifp);
415 
416 	if ((ifp->if_flags & IFF_NPOLLING) == 0) {
417 		ifnet_deserialize_all(ifp);
418 		return EINVAL;
419 	}
420 	ifp->if_flags &= ~IFF_NPOLLING;
421 
422 	ifnet_deserialize_all(ifp);
423 
424 	netmsg_init(&nmsg, NULL, &curthread->td_msgport,
425 		    0, ifpoll_deregister_handler);
426 	nmsg.lmsg.u.ms_resultp = ifp;
427 
428 	error = lwkt_domsg(netisr_portfn(0), &nmsg.lmsg, 0);
429 	if (!error) {
430 		ifnet_serialize_all(ifp);
431 		ifp->if_npoll(ifp, NULL);
432 		KASSERT(ifp->if_npoll_cpuid < 0, ("invalid npoll cpuid"));
433 		ifnet_deserialize_all(ifp);
434 	}
435 	return error;
436 }
437 
438 static void
439 ifpoll_register_handler(netmsg_t nmsg)
440 {
441 	const struct ifpoll_info *info = nmsg->lmsg.u.ms_resultp;
442 	int cpuid = mycpuid, nextcpu;
443 	int error;
444 
445 	KKASSERT(cpuid < ncpus2);
446 	KKASSERT(&curthread->td_msgport == netisr_portfn(cpuid));
447 
448 	if (cpuid == 0) {
449 		error = stpoll_register(info->ifpi_ifp, &info->ifpi_status);
450 		if (error)
451 			goto failed;
452 	}
453 
454 	error = iopoll_register(info->ifpi_ifp, rxpoll_context[cpuid],
455 				&info->ifpi_rx[cpuid]);
456 	if (error)
457 		goto failed;
458 
459 	error = iopoll_register(info->ifpi_ifp, txpoll_context[cpuid],
460 				&info->ifpi_tx[cpuid]);
461 	if (error)
462 		goto failed;
463 
464 	/* Adjust polling frequency, after all registration is done */
465 	poll_comm_adjust_pollhz(poll_common[cpuid]);
466 
467 	nextcpu = cpuid + 1;
468 	if (nextcpu < ncpus2)
469 		lwkt_forwardmsg(netisr_portfn(nextcpu), &nmsg->lmsg);
470 	else
471 		lwkt_replymsg(&nmsg->lmsg, 0);
472 	return;
473 failed:
474 	lwkt_replymsg(&nmsg->lmsg, error);
475 }
476 
477 static void
478 ifpoll_deregister_handler(netmsg_t nmsg)
479 {
480 	struct ifnet *ifp = nmsg->lmsg.u.ms_resultp;
481 	int cpuid = mycpuid, nextcpu;
482 
483 	KKASSERT(cpuid < ncpus2);
484 	KKASSERT(&curthread->td_msgport == netisr_portfn(cpuid));
485 
486 	/* Ignore errors */
487 	if (cpuid == 0)
488 		stpoll_deregister(ifp);
489 	iopoll_deregister(ifp, rxpoll_context[cpuid]);
490 	iopoll_deregister(ifp, txpoll_context[cpuid]);
491 
492 	/* Adjust polling frequency, after all deregistration is done */
493 	poll_comm_adjust_pollhz(poll_common[cpuid]);
494 
495 	nextcpu = cpuid + 1;
496 	if (nextcpu < ncpus2)
497 		lwkt_forwardmsg(netisr_portfn(nextcpu), &nmsg->lmsg);
498 	else
499 		lwkt_replymsg(&nmsg->lmsg, 0);
500 }
501 
502 static void
503 stpoll_init(void)
504 {
505 	struct stpoll_ctx *st_ctx = &stpoll_context;
506 	const struct poll_comm *comm = poll_common[0];
507 
508 	sysctl_ctx_init(&st_ctx->poll_sysctl_ctx);
509 	st_ctx->poll_sysctl_tree = SYSCTL_ADD_NODE(&st_ctx->poll_sysctl_ctx,
510 				   SYSCTL_CHILDREN(comm->sysctl_tree),
511 				   OID_AUTO, "status", CTLFLAG_RD, 0, "");
512 
513 	SYSCTL_ADD_UINT(&st_ctx->poll_sysctl_ctx,
514 			SYSCTL_CHILDREN(st_ctx->poll_sysctl_tree),
515 			OID_AUTO, "handlers", CTLFLAG_RD,
516 			&st_ctx->poll_handlers, 0,
517 			"Number of registered status poll handlers");
518 
519 	netmsg_init(&st_ctx->poll_netmsg, NULL, &netisr_adone_rport,
520 		    0, stpoll_handler);
521 }
522 
523 /*
524  * stpoll_handler is scheduled by sched_stpoll when appropriate, typically
525  * once per polling systimer tick.
526  */
527 static void
528 stpoll_handler(netmsg_t msg)
529 {
530 	struct stpoll_ctx *st_ctx = &stpoll_context;
531 	struct thread *td = curthread;
532 	int i;
533 
534 	KKASSERT(&td->td_msgport == netisr_portfn(0));
535 
536 	crit_enter_quick(td);
537 
538 	/* Reply ASAP */
539 	lwkt_replymsg(&msg->lmsg, 0);
540 
541 	if (st_ctx->poll_handlers == 0) {
542 		crit_exit_quick(td);
543 		return;
544 	}
545 
546 	for (i = 0; i < st_ctx->poll_handlers; ++i) {
547 		const struct stpoll_rec *rec = &st_ctx->pr[i];
548 		struct ifnet *ifp = rec->ifp;
549 
550 		if (!lwkt_serialize_try(rec->serializer))
551 			continue;
552 
553 		if ((ifp->if_flags & (IFF_RUNNING | IFF_NPOLLING)) ==
554 		    (IFF_RUNNING | IFF_NPOLLING))
555 			rec->status_func(ifp);
556 
557 		lwkt_serialize_exit(rec->serializer);
558 	}
559 
560 	crit_exit_quick(td);
561 }
562 
563 /*
564  * Hook from status poll systimer.  Tries to schedule an status poll.
565  * NOTE: Caller should hold critical section.
566  */
567 static void
568 stpoll_clock(struct stpoll_ctx *st_ctx)
569 {
570 	KKASSERT(mycpuid == 0);
571 
572 	if (st_ctx->poll_handlers == 0)
573 		return;
574 	sched_stpoll(st_ctx);
575 }
576 
577 static int
578 stpoll_register(struct ifnet *ifp, const struct ifpoll_status *st_rec)
579 {
580 	struct stpoll_ctx *st_ctx = &stpoll_context;
581 	int error;
582 
583 	KKASSERT(&curthread->td_msgport == netisr_portfn(0));
584 
585 	if (st_rec->status_func == NULL)
586 		return 0;
587 
588 	/*
589 	 * Check if there is room.
590 	 */
591 	if (st_ctx->poll_handlers >= IFPOLL_LIST_LEN) {
592 		/*
593 		 * List full, cannot register more entries.
594 		 * This should never happen; if it does, it is probably a
595 		 * broken driver trying to register multiple times. Checking
596 		 * this at runtime is expensive, and won't solve the problem
597 		 * anyways, so just report a few times and then give up.
598 		 */
599 		static int verbose = 10; /* XXX */
600 
601 		if (verbose > 0) {
602 			kprintf("status poll handlers list full, "
603 				"maybe a broken driver ?\n");
604 			verbose--;
605 		}
606 		error = ENOENT;
607 	} else {
608 		struct stpoll_rec *rec = &st_ctx->pr[st_ctx->poll_handlers];
609 
610 		rec->ifp = ifp;
611 		rec->serializer = st_rec->serializer;
612 		rec->status_func = st_rec->status_func;
613 
614 		st_ctx->poll_handlers++;
615 		error = 0;
616 	}
617 	return error;
618 }
619 
620 static int
621 stpoll_deregister(struct ifnet *ifp)
622 {
623 	struct stpoll_ctx *st_ctx = &stpoll_context;
624 	int i, error;
625 
626 	KKASSERT(&curthread->td_msgport == netisr_portfn(0));
627 
628 	for (i = 0; i < st_ctx->poll_handlers; ++i) {
629 		if (st_ctx->pr[i].ifp == ifp) /* Found it */
630 			break;
631 	}
632 	if (i == st_ctx->poll_handlers) {
633 		error = ENOENT;
634 	} else {
635 		st_ctx->poll_handlers--;
636 		if (i < st_ctx->poll_handlers) {
637 			/* Last entry replaces this one. */
638 			st_ctx->pr[i] = st_ctx->pr[st_ctx->poll_handlers];
639 		}
640 		error = 0;
641 	}
642 	return error;
643 }
644 
645 static __inline void
646 iopoll_reset_state(struct iopoll_ctx *io_ctx)
647 {
648 	crit_enter();
649 	io_ctx->poll_burst = io_ctx->poll_each_burst;
650 	io_ctx->pending_polls = 0;
651 	io_ctx->residual_burst = 0;
652 	io_ctx->phase = 0;
653 	io_ctx->kern_frac = 0;
654 	bzero(&io_ctx->poll_start_t, sizeof(io_ctx->poll_start_t));
655 	bzero(&io_ctx->prev_t, sizeof(io_ctx->prev_t));
656 	crit_exit();
657 }
658 
659 static void
660 iopoll_init(int cpuid)
661 {
662 	KKASSERT(cpuid < ncpus2);
663 
664 	rxpoll_context[cpuid] = iopoll_ctx_create(cpuid, IFPOLL_RX);
665 	txpoll_context[cpuid] = iopoll_ctx_create(cpuid, IFPOLL_TX);
666 }
667 
668 static struct iopoll_ctx *
669 iopoll_ctx_create(int cpuid, int poll_type)
670 {
671 	struct poll_comm *comm;
672 	struct iopoll_ctx *io_ctx;
673 	const char *poll_type_str;
674 	netisr_fn_t handler, more_handler;
675 
676 	KKASSERT(poll_type == IFPOLL_RX || poll_type == IFPOLL_TX);
677 
678 	/*
679 	 * Make sure that tunables are in sane state
680 	 */
681 	if (iopoll_burst_max < MIN_IOPOLL_BURST_MAX)
682 		iopoll_burst_max = MIN_IOPOLL_BURST_MAX;
683 	else if (iopoll_burst_max > MAX_IOPOLL_BURST_MAX)
684 		iopoll_burst_max = MAX_IOPOLL_BURST_MAX;
685 
686 	if (iopoll_each_burst > iopoll_burst_max)
687 		iopoll_each_burst = iopoll_burst_max;
688 
689 	comm = poll_common[cpuid];
690 
691 	/*
692 	 * Create the per-cpu polling context
693 	 */
694 	io_ctx = kmalloc_cachealign(sizeof(*io_ctx), M_DEVBUF,
695 	    M_WAITOK | M_ZERO);
696 
697 	io_ctx->poll_each_burst = iopoll_each_burst;
698 	io_ctx->poll_burst_max = iopoll_burst_max;
699 	io_ctx->user_frac = iopoll_user_frac;
700 	if (poll_type == IFPOLL_RX)
701 		io_ctx->pollhz = comm->pollhz;
702 	else
703 		io_ctx->pollhz = comm->pollhz / (comm->poll_txfrac + 1);
704 	io_ctx->poll_cpuid = cpuid;
705 	iopoll_reset_state(io_ctx);
706 
707 	if (poll_type == IFPOLL_RX) {
708 		handler = rxpoll_handler;
709 		more_handler = rxpollmore_handler;
710 	} else {
711 		handler = txpoll_handler;
712 		more_handler = txpollmore_handler;
713 	}
714 
715 	netmsg_init(&io_ctx->poll_netmsg, NULL, &netisr_adone_rport,
716 	    0, handler);
717 	io_ctx->poll_netmsg.lmsg.u.ms_resultp = io_ctx;
718 
719 	netmsg_init(&io_ctx->poll_more_netmsg, NULL, &netisr_adone_rport,
720 	    0, more_handler);
721 	io_ctx->poll_more_netmsg.lmsg.u.ms_resultp = io_ctx;
722 
723 	/*
724 	 * Initialize per-cpu sysctl nodes
725 	 */
726 	if (poll_type == IFPOLL_RX)
727 		poll_type_str = "rx";
728 	else
729 		poll_type_str = "tx";
730 
731 	sysctl_ctx_init(&io_ctx->poll_sysctl_ctx);
732 	io_ctx->poll_sysctl_tree = SYSCTL_ADD_NODE(&io_ctx->poll_sysctl_ctx,
733 				   SYSCTL_CHILDREN(comm->sysctl_tree),
734 				   OID_AUTO, poll_type_str, CTLFLAG_RD, 0, "");
735 	iopoll_add_sysctl(&io_ctx->poll_sysctl_ctx,
736 	    SYSCTL_CHILDREN(io_ctx->poll_sysctl_tree), io_ctx, poll_type);
737 
738 	return io_ctx;
739 }
740 
741 /*
742  * Hook from iopoll systimer.  Tries to schedule an iopoll, but keeps
743  * track of lost ticks due to the previous handler taking too long.
744  * Normally, this should not happen, because polling handler should
745  * run for a short time.  However, in some cases (e.g. when there are
746  * changes in link status etc.) the drivers take a very long time
747  * (even in the order of milliseconds) to reset and reconfigure the
748  * device, causing apparent lost polls.
749  *
750  * The first part of the code is just for debugging purposes, and tries
751  * to count how often hardclock ticks are shorter than they should,
752  * meaning either stray interrupts or delayed events.
753  *
754  * WARNING! called from fastint or IPI, the MP lock might not be held.
755  * NOTE: Caller should hold critical section.
756  */
757 static void
758 iopoll_clock(struct iopoll_ctx *io_ctx)
759 {
760 	union ifpoll_time t;
761 	int delta;
762 
763 	KKASSERT(mycpuid == io_ctx->poll_cpuid);
764 
765 	if (io_ctx->poll_handlers == 0)
766 		return;
767 
768 	ifpoll_time_get(&t);
769 	delta = ifpoll_time_diff(&io_ctx->prev_t, &t);
770 	if (delta * io_ctx->pollhz < 500000)
771 		io_ctx->short_ticks++;
772 	else
773 		io_ctx->prev_t = t;
774 
775 	if (io_ctx->pending_polls > 100) {
776 		/*
777 		 * Too much, assume it has stalled (not always true
778 		 * see comment above).
779 		 */
780 		io_ctx->stalled++;
781 		io_ctx->pending_polls = 0;
782 		io_ctx->phase = 0;
783 	}
784 
785 	if (io_ctx->phase <= 2) {
786 		if (io_ctx->phase != 0)
787 			io_ctx->suspect++;
788 		io_ctx->phase = 1;
789 		sched_iopoll(io_ctx);
790 		io_ctx->phase = 2;
791 	}
792 	if (io_ctx->pending_polls++ > 0)
793 		io_ctx->lost_polls++;
794 }
795 
796 /*
797  * rxpoll_handler and txpoll_handler are scheduled by sched_iopoll when
798  * appropriate, typically once per polling systimer tick.
799  *
800  * Note that the message is replied immediately in order to allow a new
801  * ISR to be scheduled in the handler.
802  */
803 static void
804 rxpoll_handler(netmsg_t msg)
805 {
806 	struct iopoll_ctx *io_ctx;
807 	struct thread *td = curthread;
808 	int i, cycles;
809 
810 	io_ctx = msg->lmsg.u.ms_resultp;
811 	KKASSERT(&td->td_msgport == netisr_portfn(io_ctx->poll_cpuid));
812 
813 	crit_enter_quick(td);
814 
815 	/* Reply ASAP */
816 	lwkt_replymsg(&msg->lmsg, 0);
817 
818 	if (io_ctx->poll_handlers == 0) {
819 		crit_exit_quick(td);
820 		return;
821 	}
822 
823 	io_ctx->phase = 3;
824 	if (io_ctx->residual_burst == 0) {
825 		/* First call in this tick */
826 		ifpoll_time_get(&io_ctx->poll_start_t);
827 		io_ctx->residual_burst = io_ctx->poll_burst;
828 	}
829 	cycles = (io_ctx->residual_burst < io_ctx->poll_each_burst) ?
830 		 io_ctx->residual_burst : io_ctx->poll_each_burst;
831 	io_ctx->residual_burst -= cycles;
832 
833 	for (i = 0; i < io_ctx->poll_handlers; i++) {
834 		const struct iopoll_rec *rec = &io_ctx->pr[i];
835 		struct ifnet *ifp = rec->ifp;
836 
837 		if (!lwkt_serialize_try(rec->serializer))
838 			continue;
839 
840 		if ((ifp->if_flags & (IFF_RUNNING | IFF_NPOLLING)) ==
841 		    (IFF_RUNNING | IFF_NPOLLING))
842 			rec->poll_func(ifp, rec->arg, cycles);
843 
844 		lwkt_serialize_exit(rec->serializer);
845 	}
846 
847 	/*
848 	 * Do a quick exit/enter to catch any higher-priority
849 	 * interrupt sources.
850 	 */
851 	crit_exit_quick(td);
852 	crit_enter_quick(td);
853 
854 	sched_iopollmore(io_ctx);
855 	io_ctx->phase = 4;
856 
857 	crit_exit_quick(td);
858 }
859 
860 static void
861 txpoll_handler(netmsg_t msg)
862 {
863 	struct iopoll_ctx *io_ctx;
864 	struct thread *td = curthread;
865 	int i;
866 
867 	io_ctx = msg->lmsg.u.ms_resultp;
868 	KKASSERT(&td->td_msgport == netisr_portfn(io_ctx->poll_cpuid));
869 
870 	crit_enter_quick(td);
871 
872 	/* Reply ASAP */
873 	lwkt_replymsg(&msg->lmsg, 0);
874 
875 	if (io_ctx->poll_handlers == 0) {
876 		crit_exit_quick(td);
877 		return;
878 	}
879 
880 	io_ctx->phase = 3;
881 
882 	for (i = 0; i < io_ctx->poll_handlers; i++) {
883 		const struct iopoll_rec *rec = &io_ctx->pr[i];
884 		struct ifnet *ifp = rec->ifp;
885 
886 		if (!lwkt_serialize_try(rec->serializer))
887 			continue;
888 
889 		if ((ifp->if_flags & (IFF_RUNNING | IFF_NPOLLING)) ==
890 		    (IFF_RUNNING | IFF_NPOLLING))
891 			rec->poll_func(ifp, rec->arg, -1);
892 
893 		lwkt_serialize_exit(rec->serializer);
894 	}
895 
896 	/*
897 	 * Do a quick exit/enter to catch any higher-priority
898 	 * interrupt sources.
899 	 */
900 	crit_exit_quick(td);
901 	crit_enter_quick(td);
902 
903 	sched_iopollmore(io_ctx);
904 	io_ctx->phase = 4;
905 
906 	crit_exit_quick(td);
907 }
908 
909 /*
910  * rxpollmore_handler and txpollmore_handler are called after other netisr's,
911  * possibly scheduling another rxpoll_handler or txpoll_handler call, or
912  * adapting the burst size for the next cycle.
913  *
914  * It is very bad to fetch large bursts of packets from a single card at once,
915  * because the burst could take a long time to be completely processed leading
916  * to unfairness.  To reduce the problem, and also to account better for time
917  * spent in network-related processing, we split the burst in smaller chunks
918  * of fixed size, giving control to the other netisr's between chunks.  This
919  * helps in improving the fairness, reducing livelock and accounting for the
920  * work performed in low level handling.
921  */
922 static void
923 rxpollmore_handler(netmsg_t msg)
924 {
925 	struct thread *td = curthread;
926 	struct iopoll_ctx *io_ctx;
927 	union ifpoll_time t;
928 	int kern_load;
929 	uint32_t pending_polls;
930 
931 	io_ctx = msg->lmsg.u.ms_resultp;
932 	KKASSERT(&td->td_msgport == netisr_portfn(io_ctx->poll_cpuid));
933 
934 	crit_enter_quick(td);
935 
936 	/* Replay ASAP */
937 	lwkt_replymsg(&msg->lmsg, 0);
938 
939 	if (io_ctx->poll_handlers == 0) {
940 		crit_exit_quick(td);
941 		return;
942 	}
943 
944 	io_ctx->phase = 5;
945 	if (io_ctx->residual_burst > 0) {
946 		sched_iopoll(io_ctx);
947 		crit_exit_quick(td);
948 		/* Will run immediately on return, followed by netisrs */
949 		return;
950 	}
951 
952 	/* Here we can account time spent in iopoll's in this tick */
953 	ifpoll_time_get(&t);
954 	kern_load = ifpoll_time_diff(&io_ctx->poll_start_t, &t);
955 	kern_load = (kern_load * io_ctx->pollhz) / 10000; /* 0..100 */
956 	io_ctx->kern_frac = kern_load;
957 
958 	if (kern_load > (100 - io_ctx->user_frac)) {
959 		/* Try decrease ticks */
960 		if (io_ctx->poll_burst > 1)
961 			io_ctx->poll_burst--;
962 	} else {
963 		if (io_ctx->poll_burst < io_ctx->poll_burst_max)
964 			io_ctx->poll_burst++;
965 	}
966 
967 	io_ctx->pending_polls--;
968 	pending_polls = io_ctx->pending_polls;
969 
970 	if (pending_polls == 0) {
971 		/* We are done */
972 		io_ctx->phase = 0;
973 	} else {
974 		/*
975 		 * Last cycle was long and caused us to miss one or more
976 		 * hardclock ticks.  Restart processing again, but slightly
977 		 * reduce the burst size to prevent that this happens again.
978 		 */
979 		io_ctx->poll_burst -= (io_ctx->poll_burst / 8);
980 		if (io_ctx->poll_burst < 1)
981 			io_ctx->poll_burst = 1;
982 		sched_iopoll(io_ctx);
983 		io_ctx->phase = 6;
984 	}
985 
986 	crit_exit_quick(td);
987 }
988 
989 static void
990 txpollmore_handler(netmsg_t msg)
991 {
992 	struct thread *td = curthread;
993 	struct iopoll_ctx *io_ctx;
994 	uint32_t pending_polls;
995 
996 	io_ctx = msg->lmsg.u.ms_resultp;
997 	KKASSERT(&td->td_msgport == netisr_portfn(io_ctx->poll_cpuid));
998 
999 	crit_enter_quick(td);
1000 
1001 	/* Replay ASAP */
1002 	lwkt_replymsg(&msg->lmsg, 0);
1003 
1004 	if (io_ctx->poll_handlers == 0) {
1005 		crit_exit_quick(td);
1006 		return;
1007 	}
1008 
1009 	io_ctx->phase = 5;
1010 
1011 	io_ctx->pending_polls--;
1012 	pending_polls = io_ctx->pending_polls;
1013 
1014 	if (pending_polls == 0) {
1015 		/* We are done */
1016 		io_ctx->phase = 0;
1017 	} else {
1018 		/*
1019 		 * Last cycle was long and caused us to miss one or more
1020 		 * hardclock ticks.  Restart processing again.
1021 		 */
1022 		sched_iopoll(io_ctx);
1023 		io_ctx->phase = 6;
1024 	}
1025 
1026 	crit_exit_quick(td);
1027 }
1028 
1029 static void
1030 iopoll_add_sysctl(struct sysctl_ctx_list *ctx, struct sysctl_oid_list *parent,
1031     struct iopoll_ctx *io_ctx, int poll_type)
1032 {
1033 	if (poll_type == IFPOLL_RX) {
1034 		SYSCTL_ADD_PROC(ctx, parent, OID_AUTO, "burst_max",
1035 		    CTLTYPE_UINT | CTLFLAG_RW, io_ctx, 0, sysctl_burstmax,
1036 		    "IU", "Max Polling burst size");
1037 
1038 		SYSCTL_ADD_PROC(ctx, parent, OID_AUTO, "each_burst",
1039 		    CTLTYPE_UINT | CTLFLAG_RW, io_ctx, 0, sysctl_eachburst,
1040 		    "IU", "Max size of each burst");
1041 
1042 		SYSCTL_ADD_UINT(ctx, parent, OID_AUTO, "burst", CTLFLAG_RD,
1043 		    &io_ctx->poll_burst, 0, "Current polling burst size");
1044 
1045 		SYSCTL_ADD_UINT(ctx, parent, OID_AUTO, "user_frac", CTLFLAG_RW,
1046 		    &io_ctx->user_frac, 0, "Desired user fraction of cpu time");
1047 
1048 		SYSCTL_ADD_UINT(ctx, parent, OID_AUTO, "kern_frac", CTLFLAG_RD,
1049 		    &io_ctx->kern_frac, 0, "Kernel fraction of cpu time");
1050 
1051 		SYSCTL_ADD_INT(ctx, parent, OID_AUTO, "residual_burst", CTLFLAG_RD,
1052 		    &io_ctx->residual_burst, 0,
1053 		    "# of residual cycles in burst");
1054 	}
1055 
1056 	SYSCTL_ADD_UINT(ctx, parent, OID_AUTO, "phase", CTLFLAG_RD,
1057 	    &io_ctx->phase, 0, "Polling phase");
1058 
1059 	SYSCTL_ADD_ULONG(ctx, parent, OID_AUTO, "suspect", CTLFLAG_RW,
1060 	    &io_ctx->suspect, "Suspected events");
1061 
1062 	SYSCTL_ADD_ULONG(ctx, parent, OID_AUTO, "stalled", CTLFLAG_RW,
1063 	    &io_ctx->stalled, "Potential stalls");
1064 
1065 	SYSCTL_ADD_ULONG(ctx, parent, OID_AUTO, "short_ticks", CTLFLAG_RW,
1066 	    &io_ctx->short_ticks,
1067 	    "Hardclock ticks shorter than they should be");
1068 
1069 	SYSCTL_ADD_ULONG(ctx, parent, OID_AUTO, "lost_polls", CTLFLAG_RW,
1070 	    &io_ctx->lost_polls,
1071 	    "How many times we would have lost a poll tick");
1072 
1073 	SYSCTL_ADD_UINT(ctx, parent, OID_AUTO, "pending_polls", CTLFLAG_RD,
1074 	    &io_ctx->pending_polls, 0, "Do we need to poll again");
1075 
1076 	SYSCTL_ADD_UINT(ctx, parent, OID_AUTO, "handlers", CTLFLAG_RD,
1077 	    &io_ctx->poll_handlers, 0, "Number of registered poll handlers");
1078 }
1079 
1080 static void
1081 sysctl_burstmax_handler(netmsg_t nmsg)
1082 {
1083 	struct iopoll_sysctl_netmsg *msg = (struct iopoll_sysctl_netmsg *)nmsg;
1084 	struct iopoll_ctx *io_ctx;
1085 
1086 	io_ctx = msg->ctx;
1087 	KKASSERT(&curthread->td_msgport == netisr_portfn(io_ctx->poll_cpuid));
1088 
1089 	io_ctx->poll_burst_max = nmsg->lmsg.u.ms_result;
1090 	if (io_ctx->poll_each_burst > io_ctx->poll_burst_max)
1091 		io_ctx->poll_each_burst = io_ctx->poll_burst_max;
1092 	if (io_ctx->poll_burst > io_ctx->poll_burst_max)
1093 		io_ctx->poll_burst = io_ctx->poll_burst_max;
1094 	if (io_ctx->residual_burst > io_ctx->poll_burst_max)
1095 		io_ctx->residual_burst = io_ctx->poll_burst_max;
1096 
1097 	lwkt_replymsg(&nmsg->lmsg, 0);
1098 }
1099 
1100 static int
1101 sysctl_burstmax(SYSCTL_HANDLER_ARGS)
1102 {
1103 	struct iopoll_ctx *io_ctx = arg1;
1104 	struct iopoll_sysctl_netmsg msg;
1105 	uint32_t burst_max;
1106 	int error;
1107 
1108 	burst_max = io_ctx->poll_burst_max;
1109 	error = sysctl_handle_int(oidp, &burst_max, 0, req);
1110 	if (error || req->newptr == NULL)
1111 		return error;
1112 	if (burst_max < MIN_IOPOLL_BURST_MAX)
1113 		burst_max = MIN_IOPOLL_BURST_MAX;
1114 	else if (burst_max > MAX_IOPOLL_BURST_MAX)
1115 		burst_max = MAX_IOPOLL_BURST_MAX;
1116 
1117 	netmsg_init(&msg.base, NULL, &curthread->td_msgport,
1118 		    0, sysctl_burstmax_handler);
1119 	msg.base.lmsg.u.ms_result = burst_max;
1120 	msg.ctx = io_ctx;
1121 
1122 	return lwkt_domsg(netisr_portfn(io_ctx->poll_cpuid), &msg.base.lmsg, 0);
1123 }
1124 
1125 static void
1126 sysctl_eachburst_handler(netmsg_t nmsg)
1127 {
1128 	struct iopoll_sysctl_netmsg *msg = (struct iopoll_sysctl_netmsg *)nmsg;
1129 	struct iopoll_ctx *io_ctx;
1130 	uint32_t each_burst;
1131 
1132 	io_ctx = msg->ctx;
1133 	KKASSERT(&curthread->td_msgport == netisr_portfn(io_ctx->poll_cpuid));
1134 
1135 	each_burst = nmsg->lmsg.u.ms_result;
1136 	if (each_burst > io_ctx->poll_burst_max)
1137 		each_burst = io_ctx->poll_burst_max;
1138 	else if (each_burst < 1)
1139 		each_burst = 1;
1140 	io_ctx->poll_each_burst = each_burst;
1141 
1142 	lwkt_replymsg(&nmsg->lmsg, 0);
1143 }
1144 
1145 static int
1146 sysctl_eachburst(SYSCTL_HANDLER_ARGS)
1147 {
1148 	struct iopoll_ctx *io_ctx = arg1;
1149 	struct iopoll_sysctl_netmsg msg;
1150 	uint32_t each_burst;
1151 	int error;
1152 
1153 	each_burst = io_ctx->poll_each_burst;
1154 	error = sysctl_handle_int(oidp, &each_burst, 0, req);
1155 	if (error || req->newptr == NULL)
1156 		return error;
1157 
1158 	netmsg_init(&msg.base, NULL, &curthread->td_msgport,
1159 		    0, sysctl_eachburst_handler);
1160 	msg.base.lmsg.u.ms_result = each_burst;
1161 	msg.ctx = io_ctx;
1162 
1163 	return lwkt_domsg(netisr_portfn(io_ctx->poll_cpuid), &msg.base.lmsg, 0);
1164 }
1165 
1166 static int
1167 iopoll_register(struct ifnet *ifp, struct iopoll_ctx *io_ctx,
1168 		const struct ifpoll_io *io_rec)
1169 {
1170 	int error;
1171 
1172 	KKASSERT(&curthread->td_msgport == netisr_portfn(io_ctx->poll_cpuid));
1173 
1174 	if (io_rec->poll_func == NULL)
1175 		return 0;
1176 
1177 	/*
1178 	 * Check if there is room.
1179 	 */
1180 	if (io_ctx->poll_handlers >= IFPOLL_LIST_LEN) {
1181 		/*
1182 		 * List full, cannot register more entries.
1183 		 * This should never happen; if it does, it is probably a
1184 		 * broken driver trying to register multiple times. Checking
1185 		 * this at runtime is expensive, and won't solve the problem
1186 		 * anyways, so just report a few times and then give up.
1187 		 */
1188 		static int verbose = 10; /* XXX */
1189 		if (verbose > 0) {
1190 			kprintf("io poll handlers list full, "
1191 				"maybe a broken driver ?\n");
1192 			verbose--;
1193 		}
1194 		error = ENOENT;
1195 	} else {
1196 		struct iopoll_rec *rec = &io_ctx->pr[io_ctx->poll_handlers];
1197 
1198 		rec->ifp = ifp;
1199 		rec->serializer = io_rec->serializer;
1200 		rec->arg = io_rec->arg;
1201 		rec->poll_func = io_rec->poll_func;
1202 
1203 		io_ctx->poll_handlers++;
1204 		error = 0;
1205 	}
1206 	return error;
1207 }
1208 
1209 static int
1210 iopoll_deregister(struct ifnet *ifp, struct iopoll_ctx *io_ctx)
1211 {
1212 	int i, error;
1213 
1214 	KKASSERT(&curthread->td_msgport == netisr_portfn(io_ctx->poll_cpuid));
1215 
1216 	for (i = 0; i < io_ctx->poll_handlers; ++i) {
1217 		if (io_ctx->pr[i].ifp == ifp) /* Found it */
1218 			break;
1219 	}
1220 	if (i == io_ctx->poll_handlers) {
1221 		error = ENOENT;
1222 	} else {
1223 		io_ctx->poll_handlers--;
1224 		if (i < io_ctx->poll_handlers) {
1225 			/* Last entry replaces this one. */
1226 			io_ctx->pr[i] = io_ctx->pr[io_ctx->poll_handlers];
1227 		}
1228 
1229 		if (io_ctx->poll_handlers == 0)
1230 			iopoll_reset_state(io_ctx);
1231 		error = 0;
1232 	}
1233 	return error;
1234 }
1235 
1236 static void
1237 poll_comm_init(int cpuid)
1238 {
1239 	struct poll_comm *comm;
1240 	char cpuid_str[16];
1241 
1242 	comm = kmalloc_cachealign(sizeof(*comm), M_DEVBUF, M_WAITOK | M_ZERO);
1243 
1244 	if (ifpoll_stfrac < 1)
1245 		ifpoll_stfrac = IFPOLL_STFRAC_DEFAULT;
1246 	if (ifpoll_txfrac < 1)
1247 		ifpoll_txfrac = IFPOLL_TXFRAC_DEFAULT;
1248 
1249 	comm->poll_cpuid = cpuid;
1250 	comm->pollhz = poll_comm_pollhz_div(comm, ifpoll_pollhz);
1251 	comm->poll_stfrac = ifpoll_stfrac - 1;
1252 	comm->poll_txfrac = ifpoll_txfrac - 1;
1253 
1254 	ksnprintf(cpuid_str, sizeof(cpuid_str), "%d", cpuid);
1255 
1256 	sysctl_ctx_init(&comm->sysctl_ctx);
1257 	comm->sysctl_tree = SYSCTL_ADD_NODE(&comm->sysctl_ctx,
1258 			    SYSCTL_STATIC_CHILDREN(_net_ifpoll),
1259 			    OID_AUTO, cpuid_str, CTLFLAG_RD, 0, "");
1260 
1261 	SYSCTL_ADD_PROC(&comm->sysctl_ctx, SYSCTL_CHILDREN(comm->sysctl_tree),
1262 			OID_AUTO, "pollhz", CTLTYPE_INT | CTLFLAG_RW,
1263 			comm, 0, sysctl_pollhz,
1264 			"I", "Device polling frequency");
1265 
1266 	if (cpuid == 0) {
1267 		SYSCTL_ADD_PROC(&comm->sysctl_ctx,
1268 				SYSCTL_CHILDREN(comm->sysctl_tree),
1269 				OID_AUTO, "status_frac",
1270 				CTLTYPE_INT | CTLFLAG_RW,
1271 				comm, 0, sysctl_stfrac,
1272 				"I", "# of cycles before status is polled");
1273 	}
1274 	SYSCTL_ADD_PROC(&comm->sysctl_ctx, SYSCTL_CHILDREN(comm->sysctl_tree),
1275 			OID_AUTO, "tx_frac", CTLTYPE_INT | CTLFLAG_RW,
1276 			comm, 0, sysctl_txfrac,
1277 			"I", "# of cycles before TX is polled");
1278 
1279 	poll_common[cpuid] = comm;
1280 }
1281 
1282 static void
1283 poll_comm_start(int cpuid)
1284 {
1285 	struct poll_comm *comm = poll_common[cpuid];
1286 	systimer_func_t func;
1287 
1288 	/*
1289 	 * Initialize systimer
1290 	 */
1291 	if (cpuid == 0)
1292 		func = poll_comm_systimer0;
1293 	else
1294 		func = poll_comm_systimer;
1295 	systimer_init_periodic_nq(&comm->pollclock, func, comm, 1);
1296 }
1297 
1298 static void
1299 _poll_comm_systimer(struct poll_comm *comm)
1300 {
1301 	if (comm->txfrac_count-- == 0) {
1302 		comm->txfrac_count = comm->poll_txfrac;
1303 		iopoll_clock(txpoll_context[comm->poll_cpuid]);
1304 	}
1305 	iopoll_clock(rxpoll_context[comm->poll_cpuid]);
1306 }
1307 
1308 static void
1309 poll_comm_systimer0(systimer_t info, int in_ipi __unused,
1310     struct intrframe *frame __unused)
1311 {
1312 	struct poll_comm *comm = info->data;
1313 	globaldata_t gd = mycpu;
1314 
1315 	KKASSERT(comm->poll_cpuid == gd->gd_cpuid && gd->gd_cpuid == 0);
1316 
1317 	crit_enter_gd(gd);
1318 
1319 	if (comm->stfrac_count-- == 0) {
1320 		comm->stfrac_count = comm->poll_stfrac;
1321 		stpoll_clock(&stpoll_context);
1322 	}
1323 	_poll_comm_systimer(comm);
1324 
1325 	crit_exit_gd(gd);
1326 }
1327 
1328 static void
1329 poll_comm_systimer(systimer_t info, int in_ipi __unused,
1330     struct intrframe *frame __unused)
1331 {
1332 	struct poll_comm *comm = info->data;
1333 	globaldata_t gd = mycpu;
1334 
1335 	KKASSERT(comm->poll_cpuid == gd->gd_cpuid && gd->gd_cpuid != 0);
1336 
1337 	crit_enter_gd(gd);
1338 	_poll_comm_systimer(comm);
1339 	crit_exit_gd(gd);
1340 }
1341 
1342 static void
1343 poll_comm_adjust_pollhz(struct poll_comm *comm)
1344 {
1345 	uint32_t handlers;
1346 	int pollhz = 1;
1347 
1348 	KKASSERT(&curthread->td_msgport == netisr_portfn(comm->poll_cpuid));
1349 
1350 	/*
1351 	 * If there is no polling handler registered, set systimer
1352 	 * frequency to the lowest value.  Polling systimer frequency
1353 	 * will be adjusted to the requested value, once there are
1354 	 * registered handlers.
1355 	 */
1356 	handlers = rxpoll_context[mycpuid]->poll_handlers +
1357 		   txpoll_context[mycpuid]->poll_handlers;
1358 	if (comm->poll_cpuid == 0)
1359 		handlers += stpoll_context.poll_handlers;
1360 	if (handlers)
1361 		pollhz = comm->pollhz;
1362 	systimer_adjust_periodic(&comm->pollclock, pollhz);
1363 }
1364 
1365 static int
1366 sysctl_pollhz(SYSCTL_HANDLER_ARGS)
1367 {
1368 	struct poll_comm *comm = arg1;
1369 	struct netmsg_base nmsg;
1370 	int error, phz;
1371 
1372 	phz = poll_comm_pollhz_conv(comm, comm->pollhz);
1373 	error = sysctl_handle_int(oidp, &phz, 0, req);
1374 	if (error || req->newptr == NULL)
1375 		return error;
1376 	if (phz <= 0)
1377 		return EINVAL;
1378 	else if (phz > IFPOLL_FREQ_MAX)
1379 		phz = IFPOLL_FREQ_MAX;
1380 
1381 	netmsg_init(&nmsg, NULL, &curthread->td_msgport,
1382 		    0, sysctl_pollhz_handler);
1383 	nmsg.lmsg.u.ms_result = phz;
1384 
1385 	return lwkt_domsg(netisr_portfn(comm->poll_cpuid), &nmsg.lmsg, 0);
1386 }
1387 
1388 static void
1389 sysctl_pollhz_handler(netmsg_t nmsg)
1390 {
1391 	struct poll_comm *comm = poll_common[mycpuid];
1392 
1393 	KKASSERT(&curthread->td_msgport == netisr_portfn(comm->poll_cpuid));
1394 
1395 	/* Save polling frequency */
1396 	comm->pollhz = poll_comm_pollhz_div(comm, nmsg->lmsg.u.ms_result);
1397 
1398 	/*
1399 	 * Adjust cached pollhz
1400 	 */
1401 	rxpoll_context[mycpuid]->pollhz = comm->pollhz;
1402 	txpoll_context[mycpuid]->pollhz =
1403 	    comm->pollhz / (comm->poll_txfrac + 1);
1404 
1405 	/*
1406 	 * Adjust polling frequency
1407 	 */
1408 	poll_comm_adjust_pollhz(comm);
1409 
1410 	lwkt_replymsg(&nmsg->lmsg, 0);
1411 }
1412 
1413 static int
1414 sysctl_stfrac(SYSCTL_HANDLER_ARGS)
1415 {
1416 	struct poll_comm *comm = arg1;
1417 	struct netmsg_base nmsg;
1418 	int error, stfrac;
1419 
1420 	KKASSERT(comm->poll_cpuid == 0);
1421 
1422 	stfrac = comm->poll_stfrac + 1;
1423 	error = sysctl_handle_int(oidp, &stfrac, 0, req);
1424 	if (error || req->newptr == NULL)
1425 		return error;
1426 	if (stfrac < 1)
1427 		return EINVAL;
1428 
1429 	netmsg_init(&nmsg, NULL, &curthread->td_msgport,
1430 		    0, sysctl_stfrac_handler);
1431 	nmsg.lmsg.u.ms_result = stfrac - 1;
1432 
1433 	return lwkt_domsg(netisr_portfn(comm->poll_cpuid), &nmsg.lmsg, 0);
1434 }
1435 
1436 static void
1437 sysctl_stfrac_handler(netmsg_t nmsg)
1438 {
1439 	struct poll_comm *comm = poll_common[mycpuid];
1440 	int stfrac = nmsg->lmsg.u.ms_result;
1441 
1442 	KKASSERT(&curthread->td_msgport == netisr_portfn(comm->poll_cpuid));
1443 
1444 	crit_enter();
1445 	comm->poll_stfrac = stfrac;
1446 	if (comm->stfrac_count > comm->poll_stfrac)
1447 		comm->stfrac_count = comm->poll_stfrac;
1448 	crit_exit();
1449 
1450 	lwkt_replymsg(&nmsg->lmsg, 0);
1451 }
1452 
1453 static int
1454 sysctl_txfrac(SYSCTL_HANDLER_ARGS)
1455 {
1456 	struct poll_comm *comm = arg1;
1457 	struct netmsg_base nmsg;
1458 	int error, txfrac;
1459 
1460 	txfrac = comm->poll_txfrac + 1;
1461 	error = sysctl_handle_int(oidp, &txfrac, 0, req);
1462 	if (error || req->newptr == NULL)
1463 		return error;
1464 	if (txfrac < 1)
1465 		return EINVAL;
1466 
1467 	netmsg_init(&nmsg, NULL, &curthread->td_msgport,
1468 		    0, sysctl_txfrac_handler);
1469 	nmsg.lmsg.u.ms_result = txfrac - 1;
1470 
1471 	return lwkt_domsg(netisr_portfn(comm->poll_cpuid), &nmsg.lmsg, 0);
1472 }
1473 
1474 static void
1475 sysctl_txfrac_handler(netmsg_t nmsg)
1476 {
1477 	struct poll_comm *comm = poll_common[mycpuid];
1478 	int txfrac = nmsg->lmsg.u.ms_result;
1479 
1480 	KKASSERT(&curthread->td_msgport == netisr_portfn(comm->poll_cpuid));
1481 
1482 	crit_enter();
1483 	comm->poll_txfrac = txfrac;
1484 	if (comm->txfrac_count > comm->poll_txfrac)
1485 		comm->txfrac_count = comm->poll_txfrac;
1486 	crit_exit();
1487 
1488 	lwkt_replymsg(&nmsg->lmsg, 0);
1489 }
1490 
1491 void
1492 ifpoll_compat_setup(struct ifpoll_compat *cp,
1493     struct sysctl_ctx_list *sysctl_ctx,
1494     struct sysctl_oid *sysctl_tree,
1495     int unit, struct lwkt_serialize *slz)
1496 {
1497 	cp->ifpc_stcount = 0;
1498 	cp->ifpc_stfrac = ((poll_common[0]->poll_stfrac + 1) *
1499 	    howmany(IOPOLL_BURST_MAX, IOPOLL_EACH_BURST)) - 1;
1500 
1501 	cp->ifpc_cpuid = unit % ncpus2;
1502 	cp->ifpc_serializer = slz;
1503 
1504 	if (sysctl_ctx != NULL && sysctl_tree != NULL) {
1505 		SYSCTL_ADD_PROC(sysctl_ctx, SYSCTL_CHILDREN(sysctl_tree),
1506 		    OID_AUTO, "npoll_stfrac", CTLTYPE_INT | CTLFLAG_RW,
1507 		    cp, 0, sysctl_compat_npoll_stfrac, "I",
1508 		    "polling status frac");
1509 		SYSCTL_ADD_PROC(sysctl_ctx, SYSCTL_CHILDREN(sysctl_tree),
1510 		    OID_AUTO, "npoll_cpuid", CTLTYPE_INT | CTLFLAG_RW,
1511 		    cp, 0, sysctl_compat_npoll_cpuid, "I",
1512 		    "polling cpuid");
1513 	}
1514 }
1515 
1516 static int
1517 sysctl_compat_npoll_stfrac(SYSCTL_HANDLER_ARGS)
1518 {
1519 	struct ifpoll_compat *cp = arg1;
1520 	int error = 0, stfrac;
1521 
1522 	lwkt_serialize_enter(cp->ifpc_serializer);
1523 
1524 	stfrac = cp->ifpc_stfrac + 1;
1525 	error = sysctl_handle_int(oidp, &stfrac, 0, req);
1526 	if (!error && req->newptr != NULL) {
1527 		if (stfrac < 1) {
1528 			error = EINVAL;
1529 		} else {
1530 			cp->ifpc_stfrac = stfrac - 1;
1531 			if (cp->ifpc_stcount > cp->ifpc_stfrac)
1532 				cp->ifpc_stcount = cp->ifpc_stfrac;
1533 		}
1534 	}
1535 
1536 	lwkt_serialize_exit(cp->ifpc_serializer);
1537 	return error;
1538 }
1539 
1540 static int
1541 sysctl_compat_npoll_cpuid(SYSCTL_HANDLER_ARGS)
1542 {
1543 	struct ifpoll_compat *cp = arg1;
1544 	int error = 0, cpuid;
1545 
1546 	lwkt_serialize_enter(cp->ifpc_serializer);
1547 
1548 	cpuid = cp->ifpc_cpuid;
1549 	error = sysctl_handle_int(oidp, &cpuid, 0, req);
1550 	if (!error && req->newptr != NULL) {
1551 		if (cpuid < 0 || cpuid >= ncpus2)
1552 			error = EINVAL;
1553 		else
1554 			cp->ifpc_cpuid = cpuid;
1555 	}
1556 
1557 	lwkt_serialize_exit(cp->ifpc_serializer);
1558 	return error;
1559 }
1560