xref: /dragonfly/sys/net/if_poll.c (revision 65cc0652)
1 /*-
2  * Copyright (c) 2001-2002 Luigi Rizzo
3  *
4  * Supported by: the Xorp Project (www.xorp.org)
5  *
6  * Redistribution and use in source and binary forms, with or without
7  * modification, are permitted provided that the following conditions
8  * are met:
9  * 1. Redistributions of source code must retain the above copyright
10  *    notice, this list of conditions and the following disclaimer.
11  * 2. Redistributions in binary form must reproduce the above copyright
12  *    notice, this list of conditions and the following disclaimer in the
13  *    documentation and/or other materials provided with the distribution.
14  *
15  * THIS SOFTWARE IS PROVIDED BY THE AUTHORS AND CONTRIBUTORS ``AS IS'' AND
16  * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
17  * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
18  * ARE DISCLAIMED.  IN NO EVENT SHALL THE AUTHORS OR CONTRIBUTORS BE LIABLE
19  * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
20  * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
21  * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
22  * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
23  * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
24  * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
25  * SUCH DAMAGE.
26  *
27  * $FreeBSD: src/sys/kern/kern_poll.c,v 1.2.2.4 2002/06/27 23:26:33 luigi Exp $
28  */
29 
30 #include "opt_ifpoll.h"
31 
32 #include <sys/param.h>
33 #include <sys/kernel.h>
34 #include <sys/ktr.h>
35 #include <sys/malloc.h>
36 #include <sys/serialize.h>
37 #include <sys/socket.h>
38 #include <sys/sysctl.h>
39 #include <sys/microtime_pcpu.h>
40 
41 #include <sys/thread2.h>
42 #include <sys/msgport2.h>
43 
44 #include <net/if.h>
45 #include <net/if_poll.h>
46 #include <net/netmsg2.h>
47 #include <net/netisr2.h>
48 
49 /*
50  * Polling support for network device drivers.
51  *
52  * Drivers which support this feature try to register one status polling
53  * handler and several TX/RX polling handlers with the polling code.
54  * If interface's if_npoll is called with non-NULL second argument, then
55  * a register operation is requested, else a deregister operation is
56  * requested.  If the requested operation is "register", driver should
57  * setup the ifpoll_info passed in accoding its own needs:
58  *   ifpoll_info.ifpi_status.status_func == NULL
59  *     No status polling handler will be installed on CPU(0)
60  *   ifpoll_info.ifpi_rx[n].poll_func == NULL
61  *     No RX polling handler will be installed on CPU(n)
62  *   ifpoll_info.ifpi_tx[n].poll_func == NULL
63  *     No TX polling handler will be installed on CPU(n)
64  *
65  * Serializer field of ifpoll_info.ifpi_status and ifpoll_info.ifpi_tx[n]
66  * must _not_ be NULL.  The serializer will be held before the status_func
67  * and poll_func being called.  Serializer field of ifpoll_info.ifpi_rx[n]
68  * can be NULL, but the interface's if_flags must have IFF_IDIRECT set,
69  * which indicates that the network processing of the input packets is
70  * running directly instead of being redispatched.
71  *
72  * RX is polled at the specified polling frequency (net.ifpoll.X.pollhz).
73  * TX and status polling could be done at lower frequency than RX frequency
74  * (net.ifpoll.0.status_frac and net.ifpoll.X.tx_frac).  To avoid systimer
75  * staggering at high frequency, RX systimer gives TX and status polling a
76  * piggyback (XXX).
77  *
78  * All of the registered polling handlers are called only if the interface
79  * is marked as IFF_UP, IFF_RUNNING and IFF_NPOLLING.  However, the
80  * interface's register and deregister function (ifnet.if_npoll) will be
81  * called even if interface is not marked with IFF_RUNNING or IFF_UP.
82  *
83  * If registration is successful, the driver must disable interrupts,
84  * and further I/O is performed through the TX/RX polling handler, which
85  * are invoked (at least once per clock tick) with 3 arguments: the "arg"
86  * passed at register time, a struct ifnet pointer, and a "count" limit.
87  * The registered serializer will be held before calling the related
88  * polling handler.
89  *
90  * The count limit specifies how much work the handler can do during the
91  * call -- typically this is the number of packets to be received, or
92  * transmitted, etc. (drivers are free to interpret this number, as long
93  * as the max time spent in the function grows roughly linearly with the
94  * count).
95  *
96  * A second variable controls the sharing of CPU between polling/kernel
97  * network processing, and other activities (typically userlevel tasks):
98  * net.ifpoll.X.{rx,tx}.user_frac (between 0 and 100, default 50) sets the
99  * share of CPU allocated to user tasks.  CPU is allocated proportionally
100  * to the shares, by dynamically adjusting the "count" (poll_burst).
101  *
102  * Other parameters can should be left to their default values.
103  * The following constraints hold
104  *
105  *	1 <= poll_burst <= poll_burst_max
106  *	1 <= poll_each_burst <= poll_burst_max
107  *	MIN_POLL_BURST_MAX <= poll_burst_max <= MAX_POLL_BURST_MAX
108  */
109 
110 #define IFPOLL_LIST_LEN		128
111 #define IFPOLL_FREQ_MAX		30000
112 
113 #define MIN_IOPOLL_BURST_MAX	10
114 #define MAX_IOPOLL_BURST_MAX	5000
115 #define IOPOLL_BURST_MAX	250	/* good for 1000Mbit net and HZ=6000 */
116 
117 #define IOPOLL_EACH_BURST	50
118 #define IOPOLL_USER_FRAC	50
119 
120 #define IFPOLL_FREQ_DEFAULT	6000
121 
122 #define IFPOLL_TXFRAC_DEFAULT	1	/* 1/1 of the pollhz */
123 #define IFPOLL_STFRAC_DEFAULT	120	/* 1/120 of the pollhz */
124 
125 #define IFPOLL_RX		0x1
126 #define IFPOLL_TX		0x2
127 
128 struct iopoll_rec {
129 	struct lwkt_serialize	*serializer;
130 	struct ifnet		*ifp;
131 	void			*arg;
132 	ifpoll_iofn_t		poll_func;
133 };
134 
135 struct iopoll_ctx {
136 	union microtime_pcpu	prev_t;
137 	u_long			short_ticks;		/* statistics */
138 	u_long			lost_polls;		/* statistics */
139 	u_long			suspect;		/* statistics */
140 	u_long			stalled;		/* statistics */
141 	uint32_t		pending_polls;		/* state */
142 
143 	struct netmsg_base	poll_netmsg;
144 	struct netmsg_base	poll_more_netmsg;
145 
146 	int			poll_cpuid;
147 	int			pollhz;
148 	uint32_t		phase;			/* state */
149 	int			residual_burst;		/* state */
150 	uint32_t		poll_each_burst;	/* tunable */
151 	union microtime_pcpu	poll_start_t;		/* state */
152 
153 	uint32_t		poll_burst;		/* state */
154 	uint32_t		poll_burst_max;		/* tunable */
155 	uint32_t		user_frac;		/* tunable */
156 	uint32_t		kern_frac;		/* state */
157 
158 	uint32_t		poll_handlers; /* next free entry in pr[]. */
159 	struct iopoll_rec	pr[IFPOLL_LIST_LEN];
160 
161 	struct sysctl_ctx_list	poll_sysctl_ctx;
162 	struct sysctl_oid	*poll_sysctl_tree;
163 };
164 
165 struct poll_comm {
166 	struct systimer		pollclock;
167 	int			poll_cpuid;
168 
169 	int			stfrac_count;		/* state */
170 	int			poll_stfrac;		/* tunable */
171 
172 	int			txfrac_count;		/* state */
173 	int			poll_txfrac;		/* tunable */
174 
175 	int			pollhz;			/* tunable */
176 
177 	struct sysctl_ctx_list	sysctl_ctx;
178 	struct sysctl_oid	*sysctl_tree;
179 };
180 
181 struct stpoll_rec {
182 	struct lwkt_serialize	*serializer;
183 	struct ifnet		*ifp;
184 	ifpoll_stfn_t		status_func;
185 };
186 
187 struct stpoll_ctx {
188 	struct netmsg_base	poll_netmsg;
189 
190 	uint32_t		poll_handlers; /* next free entry in pr[]. */
191 	struct stpoll_rec	pr[IFPOLL_LIST_LEN];
192 
193 	struct sysctl_ctx_list	poll_sysctl_ctx;
194 	struct sysctl_oid	*poll_sysctl_tree;
195 } __cachealign;
196 
197 struct iopoll_sysctl_netmsg {
198 	struct netmsg_base	base;
199 	struct iopoll_ctx	*ctx;
200 };
201 
202 static void	ifpoll_init_pcpu(int);
203 static void	ifpoll_register_handler(netmsg_t);
204 static void	ifpoll_deregister_handler(netmsg_t);
205 
206 /*
207  * Status polling
208  */
209 static void	stpoll_init(void);
210 static void	stpoll_handler(netmsg_t);
211 static void	stpoll_clock(struct stpoll_ctx *);
212 static int	stpoll_register(struct ifnet *, const struct ifpoll_status *);
213 static int	stpoll_deregister(struct ifnet *);
214 
215 /*
216  * RX/TX polling
217  */
218 static struct iopoll_ctx *iopoll_ctx_create(int, int);
219 static void	iopoll_init(int);
220 static void	rxpoll_handler(netmsg_t);
221 static void	txpoll_handler(netmsg_t);
222 static void	rxpollmore_handler(netmsg_t);
223 static void	txpollmore_handler(netmsg_t);
224 static void	iopoll_clock(struct iopoll_ctx *);
225 static int	iopoll_register(struct ifnet *, struct iopoll_ctx *,
226 		    const struct ifpoll_io *);
227 static int	iopoll_deregister(struct ifnet *, struct iopoll_ctx *);
228 
229 static void	iopoll_add_sysctl(struct sysctl_ctx_list *,
230 		    struct sysctl_oid_list *, struct iopoll_ctx *, int);
231 static void	sysctl_burstmax_handler(netmsg_t);
232 static int	sysctl_burstmax(SYSCTL_HANDLER_ARGS);
233 static void	sysctl_eachburst_handler(netmsg_t);
234 static int	sysctl_eachburst(SYSCTL_HANDLER_ARGS);
235 
236 /*
237  * Common functions
238  */
239 static void	poll_comm_init(int);
240 static void	poll_comm_start(int);
241 static void	poll_comm_adjust_pollhz(struct poll_comm *);
242 static void	poll_comm_systimer0(systimer_t, int, struct intrframe *);
243 static void	poll_comm_systimer(systimer_t, int, struct intrframe *);
244 static void	sysctl_pollhz_handler(netmsg_t);
245 static void	sysctl_stfrac_handler(netmsg_t);
246 static void	sysctl_txfrac_handler(netmsg_t);
247 static int	sysctl_pollhz(SYSCTL_HANDLER_ARGS);
248 static int	sysctl_stfrac(SYSCTL_HANDLER_ARGS);
249 static int	sysctl_txfrac(SYSCTL_HANDLER_ARGS);
250 static int	sysctl_compat_npoll_stfrac(SYSCTL_HANDLER_ARGS);
251 static int	sysctl_compat_npoll_cpuid(SYSCTL_HANDLER_ARGS);
252 
253 static struct stpoll_ctx	stpoll_context;
254 static struct poll_comm		*poll_common[MAXCPU];
255 static struct iopoll_ctx	*rxpoll_context[MAXCPU];
256 static struct iopoll_ctx	*txpoll_context[MAXCPU];
257 
258 SYSCTL_NODE(_net, OID_AUTO, ifpoll, CTLFLAG_RW, 0,
259 	    "Network device polling parameters");
260 
261 static int	iopoll_burst_max = IOPOLL_BURST_MAX;
262 static int	iopoll_each_burst = IOPOLL_EACH_BURST;
263 static int	iopoll_user_frac = IOPOLL_USER_FRAC;
264 
265 static int	ifpoll_pollhz = IFPOLL_FREQ_DEFAULT;
266 static int	ifpoll_stfrac = IFPOLL_STFRAC_DEFAULT;
267 static int	ifpoll_txfrac = IFPOLL_TXFRAC_DEFAULT;
268 
269 TUNABLE_INT("net.ifpoll.burst_max", &iopoll_burst_max);
270 TUNABLE_INT("net.ifpoll.each_burst", &iopoll_each_burst);
271 TUNABLE_INT("net.ifpoll.user_frac", &iopoll_user_frac);
272 TUNABLE_INT("net.ifpoll.pollhz", &ifpoll_pollhz);
273 TUNABLE_INT("net.ifpoll.status_frac", &ifpoll_stfrac);
274 TUNABLE_INT("net.ifpoll.tx_frac", &ifpoll_txfrac);
275 
276 #if !defined(KTR_IF_POLL)
277 #define  KTR_IF_POLL		KTR_ALL
278 #endif
279 KTR_INFO_MASTER(if_poll);
280 KTR_INFO(KTR_IF_POLL, if_poll, rx_start, 0, "rx start");
281 KTR_INFO(KTR_IF_POLL, if_poll, rx_end, 1, "rx end");
282 KTR_INFO(KTR_IF_POLL, if_poll, tx_start, 2, "tx start");
283 KTR_INFO(KTR_IF_POLL, if_poll, tx_end, 3, "tx end");
284 KTR_INFO(KTR_IF_POLL, if_poll, rx_mstart, 4, "rx more start");
285 KTR_INFO(KTR_IF_POLL, if_poll, rx_mend, 5, "rx more end");
286 KTR_INFO(KTR_IF_POLL, if_poll, tx_mstart, 6, "tx more start");
287 KTR_INFO(KTR_IF_POLL, if_poll, tx_mend, 7, "tx more end");
288 KTR_INFO(KTR_IF_POLL, if_poll, ioclock_start, 8, "ioclock start");
289 KTR_INFO(KTR_IF_POLL, if_poll, ioclock_end, 9, "ioclock end");
290 #define logpoll(name)	KTR_LOG(if_poll_ ## name)
291 
292 #define IFPOLL_FREQ_ADJ(comm)	(((comm)->poll_cpuid * 3) % 50)
293 
294 static __inline int
295 poll_comm_pollhz_div(const struct poll_comm *comm, int pollhz)
296 {
297 	return pollhz + IFPOLL_FREQ_ADJ(comm);
298 }
299 
300 static __inline int
301 poll_comm_pollhz_conv(const struct poll_comm *comm, int pollhz)
302 {
303 	return pollhz - IFPOLL_FREQ_ADJ(comm);
304 }
305 
306 static __inline void
307 ifpoll_sendmsg_oncpu(netmsg_t msg)
308 {
309 	if (msg->lmsg.ms_flags & MSGF_DONE)
310 		netisr_sendmsg_oncpu(&msg->base);
311 }
312 
313 static __inline void
314 sched_stpoll(struct stpoll_ctx *st_ctx)
315 {
316 	ifpoll_sendmsg_oncpu((netmsg_t)&st_ctx->poll_netmsg);
317 }
318 
319 static __inline void
320 sched_iopoll(struct iopoll_ctx *io_ctx)
321 {
322 	ifpoll_sendmsg_oncpu((netmsg_t)&io_ctx->poll_netmsg);
323 }
324 
325 static __inline void
326 sched_iopollmore(struct iopoll_ctx *io_ctx, boolean_t direct)
327 {
328 
329 	if (!direct) {
330 		ifpoll_sendmsg_oncpu((netmsg_t)&io_ctx->poll_more_netmsg);
331 	} else {
332 		struct netmsg_base *nmsg = &io_ctx->poll_more_netmsg;
333 
334 		nmsg->lmsg.ms_flags &= ~(MSGF_REPLY | MSGF_DONE);
335 		nmsg->lmsg.ms_flags |= MSGF_SYNC;
336 		nmsg->nm_dispatch((netmsg_t)nmsg);
337 		KKASSERT(nmsg->lmsg.ms_flags & MSGF_DONE);
338 	}
339 }
340 
341 /*
342  * Initialize per-cpu polling(4) context.
343  */
344 static void
345 ifpoll_init_pcpu(int cpuid)
346 {
347 
348 	poll_comm_init(cpuid);
349 
350 	if (cpuid == 0)
351 		stpoll_init();
352 	iopoll_init(cpuid);
353 
354 	poll_comm_start(cpuid);
355 }
356 
357 static void
358 ifpoll_init_handler(netmsg_t msg)
359 {
360 	int cpu = mycpuid;
361 
362 	ifpoll_init_pcpu(cpu);
363 	netisr_forwardmsg(&msg->base, cpu + 1);
364 }
365 
366 static void
367 ifpoll_sysinit(void *dummy __unused)
368 {
369 	struct netmsg_base msg;
370 
371 	netmsg_init(&msg, NULL, &curthread->td_msgport, 0, ifpoll_init_handler);
372 	netisr_domsg_global(&msg);
373 }
374 SYSINIT(ifpoll, SI_SUB_PRE_DRIVERS, SI_ORDER_ANY, ifpoll_sysinit, NULL);
375 
376 int
377 ifpoll_register(struct ifnet *ifp)
378 {
379 	struct ifpoll_info *info;
380 	struct netmsg_base nmsg;
381 	int error;
382 
383 	if (ifp->if_npoll == NULL) {
384 		/* Device does not support polling */
385 		return EOPNOTSUPP;
386 	}
387 
388 	info = kmalloc(sizeof(*info), M_TEMP, M_WAITOK | M_ZERO);
389 
390 	/*
391 	 * Attempt to register.  Interlock with IFF_NPOLLING.
392 	 */
393 
394 	ifnet_serialize_all(ifp);
395 
396 	if (ifp->if_flags & IFF_NPOLLING) {
397 		/* Already polling */
398 		ifnet_deserialize_all(ifp);
399 		kfree(info, M_TEMP);
400 		return EBUSY;
401 	}
402 
403 	info->ifpi_ifp = ifp;
404 
405 	ifp->if_flags |= IFF_NPOLLING;
406 	ifp->if_npoll(ifp, info);
407 
408 	ifnet_deserialize_all(ifp);
409 
410 	netmsg_init(&nmsg, NULL, &curthread->td_msgport,
411 		    0, ifpoll_register_handler);
412 	nmsg.lmsg.u.ms_resultp = info;
413 
414 	error = netisr_domsg_global(&nmsg);
415 	if (error) {
416 		if (!ifpoll_deregister(ifp)) {
417 			if_printf(ifp, "ifpoll_register: "
418 				  "ifpoll_deregister failed!\n");
419 		}
420 	}
421 
422 	kfree(info, M_TEMP);
423 	return error;
424 }
425 
426 int
427 ifpoll_deregister(struct ifnet *ifp)
428 {
429 	struct netmsg_base nmsg;
430 	int error;
431 
432 	if (ifp->if_npoll == NULL)
433 		return EOPNOTSUPP;
434 
435 	ifnet_serialize_all(ifp);
436 
437 	if ((ifp->if_flags & IFF_NPOLLING) == 0) {
438 		ifnet_deserialize_all(ifp);
439 		return EINVAL;
440 	}
441 	ifp->if_flags &= ~IFF_NPOLLING;
442 
443 	ifnet_deserialize_all(ifp);
444 
445 	netmsg_init(&nmsg, NULL, &curthread->td_msgport,
446 		    0, ifpoll_deregister_handler);
447 	nmsg.lmsg.u.ms_resultp = ifp;
448 
449 	error = netisr_domsg_global(&nmsg);
450 	if (!error) {
451 		ifnet_serialize_all(ifp);
452 		ifp->if_npoll(ifp, NULL);
453 		ifnet_deserialize_all(ifp);
454 	}
455 	return error;
456 }
457 
458 static void
459 ifpoll_register_handler(netmsg_t nmsg)
460 {
461 	const struct ifpoll_info *info = nmsg->lmsg.u.ms_resultp;
462 	int cpuid = mycpuid;
463 	int error;
464 
465 	KKASSERT(cpuid < netisr_ncpus);
466 	KKASSERT(&curthread->td_msgport == netisr_cpuport(cpuid));
467 
468 	if (cpuid == 0) {
469 		error = stpoll_register(info->ifpi_ifp, &info->ifpi_status);
470 		if (error)
471 			goto failed;
472 	}
473 
474 	error = iopoll_register(info->ifpi_ifp, rxpoll_context[cpuid],
475 				&info->ifpi_rx[cpuid]);
476 	if (error)
477 		goto failed;
478 
479 	error = iopoll_register(info->ifpi_ifp, txpoll_context[cpuid],
480 				&info->ifpi_tx[cpuid]);
481 	if (error)
482 		goto failed;
483 
484 	/* Adjust polling frequency, after all registration is done */
485 	poll_comm_adjust_pollhz(poll_common[cpuid]);
486 
487 	netisr_forwardmsg(&nmsg->base, cpuid + 1);
488 	return;
489 failed:
490 	netisr_replymsg(&nmsg->base, error);
491 }
492 
493 static void
494 ifpoll_deregister_handler(netmsg_t nmsg)
495 {
496 	struct ifnet *ifp = nmsg->lmsg.u.ms_resultp;
497 	int cpuid = mycpuid;
498 
499 	KKASSERT(cpuid < netisr_ncpus);
500 	KKASSERT(&curthread->td_msgport == netisr_cpuport(cpuid));
501 
502 	/* Ignore errors */
503 	if (cpuid == 0)
504 		stpoll_deregister(ifp);
505 	iopoll_deregister(ifp, rxpoll_context[cpuid]);
506 	iopoll_deregister(ifp, txpoll_context[cpuid]);
507 
508 	/* Adjust polling frequency, after all deregistration is done */
509 	poll_comm_adjust_pollhz(poll_common[cpuid]);
510 
511 	netisr_forwardmsg(&nmsg->base, cpuid + 1);
512 }
513 
514 static void
515 stpoll_init(void)
516 {
517 	struct stpoll_ctx *st_ctx = &stpoll_context;
518 	const struct poll_comm *comm = poll_common[0];
519 
520 	sysctl_ctx_init(&st_ctx->poll_sysctl_ctx);
521 	st_ctx->poll_sysctl_tree = SYSCTL_ADD_NODE(&st_ctx->poll_sysctl_ctx,
522 				   SYSCTL_CHILDREN(comm->sysctl_tree),
523 				   OID_AUTO, "status", CTLFLAG_RD, 0, "");
524 
525 	SYSCTL_ADD_UINT(&st_ctx->poll_sysctl_ctx,
526 			SYSCTL_CHILDREN(st_ctx->poll_sysctl_tree),
527 			OID_AUTO, "handlers", CTLFLAG_RD,
528 			&st_ctx->poll_handlers, 0,
529 			"Number of registered status poll handlers");
530 
531 	netmsg_init(&st_ctx->poll_netmsg, NULL, &netisr_adone_rport,
532 		    0, stpoll_handler);
533 }
534 
535 /*
536  * stpoll_handler is scheduled by sched_stpoll when appropriate, typically
537  * once per polling systimer tick.
538  */
539 static void
540 stpoll_handler(netmsg_t msg)
541 {
542 	struct stpoll_ctx *st_ctx = &stpoll_context;
543 	struct thread *td = curthread;
544 	int i;
545 
546 	ASSERT_NETISR0;
547 
548 	crit_enter_quick(td);
549 
550 	/* Reply ASAP */
551 	netisr_replymsg(&msg->base, 0);
552 
553 	if (st_ctx->poll_handlers == 0) {
554 		crit_exit_quick(td);
555 		return;
556 	}
557 
558 	for (i = 0; i < st_ctx->poll_handlers; ++i) {
559 		const struct stpoll_rec *rec = &st_ctx->pr[i];
560 		struct ifnet *ifp = rec->ifp;
561 
562 		if (!lwkt_serialize_try(rec->serializer))
563 			continue;
564 
565 		if ((ifp->if_flags & (IFF_RUNNING | IFF_NPOLLING)) ==
566 		    (IFF_RUNNING | IFF_NPOLLING))
567 			rec->status_func(ifp);
568 
569 		lwkt_serialize_exit(rec->serializer);
570 	}
571 
572 	crit_exit_quick(td);
573 }
574 
575 /*
576  * Hook from status poll systimer.  Tries to schedule an status poll.
577  * NOTE: Caller should hold critical section.
578  */
579 static void
580 stpoll_clock(struct stpoll_ctx *st_ctx)
581 {
582 	KKASSERT(mycpuid == 0);
583 
584 	if (st_ctx->poll_handlers == 0)
585 		return;
586 	sched_stpoll(st_ctx);
587 }
588 
589 static int
590 stpoll_register(struct ifnet *ifp, const struct ifpoll_status *st_rec)
591 {
592 	struct stpoll_ctx *st_ctx = &stpoll_context;
593 	int error;
594 
595 	ASSERT_NETISR0;
596 
597 	if (st_rec->status_func == NULL)
598 		return 0;
599 
600 	/*
601 	 * Check if there is room.
602 	 */
603 	if (st_ctx->poll_handlers >= IFPOLL_LIST_LEN) {
604 		/*
605 		 * List full, cannot register more entries.
606 		 * This should never happen; if it does, it is probably a
607 		 * broken driver trying to register multiple times. Checking
608 		 * this at runtime is expensive, and won't solve the problem
609 		 * anyways, so just report a few times and then give up.
610 		 */
611 		static int verbose = 10; /* XXX */
612 
613 		if (verbose > 0) {
614 			kprintf("status poll handlers list full, "
615 				"maybe a broken driver ?\n");
616 			verbose--;
617 		}
618 		error = ENOENT;
619 	} else {
620 		struct stpoll_rec *rec = &st_ctx->pr[st_ctx->poll_handlers];
621 
622 		rec->ifp = ifp;
623 		rec->serializer = st_rec->serializer;
624 		rec->status_func = st_rec->status_func;
625 
626 		st_ctx->poll_handlers++;
627 		error = 0;
628 	}
629 	return error;
630 }
631 
632 static int
633 stpoll_deregister(struct ifnet *ifp)
634 {
635 	struct stpoll_ctx *st_ctx = &stpoll_context;
636 	int i, error;
637 
638 	ASSERT_NETISR0;
639 
640 	for (i = 0; i < st_ctx->poll_handlers; ++i) {
641 		if (st_ctx->pr[i].ifp == ifp) /* Found it */
642 			break;
643 	}
644 	if (i == st_ctx->poll_handlers) {
645 		error = ENOENT;
646 	} else {
647 		st_ctx->poll_handlers--;
648 		if (i < st_ctx->poll_handlers) {
649 			/* Last entry replaces this one. */
650 			st_ctx->pr[i] = st_ctx->pr[st_ctx->poll_handlers];
651 		}
652 		error = 0;
653 	}
654 	return error;
655 }
656 
657 static __inline void
658 iopoll_reset_state(struct iopoll_ctx *io_ctx)
659 {
660 	crit_enter();
661 	io_ctx->poll_burst = io_ctx->poll_each_burst;
662 	io_ctx->pending_polls = 0;
663 	io_ctx->residual_burst = 0;
664 	io_ctx->phase = 0;
665 	io_ctx->kern_frac = 0;
666 	bzero(&io_ctx->poll_start_t, sizeof(io_ctx->poll_start_t));
667 	bzero(&io_ctx->prev_t, sizeof(io_ctx->prev_t));
668 	crit_exit();
669 }
670 
671 static void
672 iopoll_init(int cpuid)
673 {
674 	KKASSERT(cpuid < netisr_ncpus);
675 
676 	rxpoll_context[cpuid] = iopoll_ctx_create(cpuid, IFPOLL_RX);
677 	txpoll_context[cpuid] = iopoll_ctx_create(cpuid, IFPOLL_TX);
678 }
679 
680 static struct iopoll_ctx *
681 iopoll_ctx_create(int cpuid, int poll_type)
682 {
683 	struct poll_comm *comm;
684 	struct iopoll_ctx *io_ctx;
685 	const char *poll_type_str;
686 	netisr_fn_t handler, more_handler;
687 
688 	KKASSERT(poll_type == IFPOLL_RX || poll_type == IFPOLL_TX);
689 
690 	/*
691 	 * Make sure that tunables are in sane state
692 	 */
693 	if (iopoll_burst_max < MIN_IOPOLL_BURST_MAX)
694 		iopoll_burst_max = MIN_IOPOLL_BURST_MAX;
695 	else if (iopoll_burst_max > MAX_IOPOLL_BURST_MAX)
696 		iopoll_burst_max = MAX_IOPOLL_BURST_MAX;
697 
698 	if (iopoll_each_burst > iopoll_burst_max)
699 		iopoll_each_burst = iopoll_burst_max;
700 
701 	comm = poll_common[cpuid];
702 
703 	/*
704 	 * Create the per-cpu polling context
705 	 */
706 	io_ctx = kmalloc(sizeof(*io_ctx), M_DEVBUF, M_WAITOK | M_ZERO);
707 
708 	io_ctx->poll_each_burst = iopoll_each_burst;
709 	io_ctx->poll_burst_max = iopoll_burst_max;
710 	io_ctx->user_frac = iopoll_user_frac;
711 	if (poll_type == IFPOLL_RX)
712 		io_ctx->pollhz = comm->pollhz;
713 	else
714 		io_ctx->pollhz = comm->pollhz / (comm->poll_txfrac + 1);
715 	io_ctx->poll_cpuid = cpuid;
716 	iopoll_reset_state(io_ctx);
717 
718 	if (poll_type == IFPOLL_RX) {
719 		handler = rxpoll_handler;
720 		more_handler = rxpollmore_handler;
721 	} else {
722 		handler = txpoll_handler;
723 		more_handler = txpollmore_handler;
724 	}
725 
726 	netmsg_init(&io_ctx->poll_netmsg, NULL, &netisr_adone_rport,
727 	    0, handler);
728 	io_ctx->poll_netmsg.lmsg.u.ms_resultp = io_ctx;
729 
730 	netmsg_init(&io_ctx->poll_more_netmsg, NULL, &netisr_adone_rport,
731 	    0, more_handler);
732 	io_ctx->poll_more_netmsg.lmsg.u.ms_resultp = io_ctx;
733 
734 	/*
735 	 * Initialize per-cpu sysctl nodes
736 	 */
737 	if (poll_type == IFPOLL_RX)
738 		poll_type_str = "rx";
739 	else
740 		poll_type_str = "tx";
741 
742 	sysctl_ctx_init(&io_ctx->poll_sysctl_ctx);
743 	io_ctx->poll_sysctl_tree = SYSCTL_ADD_NODE(&io_ctx->poll_sysctl_ctx,
744 				   SYSCTL_CHILDREN(comm->sysctl_tree),
745 				   OID_AUTO, poll_type_str, CTLFLAG_RD, 0, "");
746 	iopoll_add_sysctl(&io_ctx->poll_sysctl_ctx,
747 	    SYSCTL_CHILDREN(io_ctx->poll_sysctl_tree), io_ctx, poll_type);
748 
749 	return io_ctx;
750 }
751 
752 /*
753  * Hook from iopoll systimer.  Tries to schedule an iopoll, but keeps
754  * track of lost ticks due to the previous handler taking too long.
755  * Normally, this should not happen, because polling handler should
756  * run for a short time.  However, in some cases (e.g. when there are
757  * changes in link status etc.) the drivers take a very long time
758  * (even in the order of milliseconds) to reset and reconfigure the
759  * device, causing apparent lost polls.
760  *
761  * The first part of the code is just for debugging purposes, and tries
762  * to count how often hardclock ticks are shorter than they should,
763  * meaning either stray interrupts or delayed events.
764  *
765  * WARNING! called from fastint or IPI, the MP lock might not be held.
766  * NOTE: Caller should hold critical section.
767  */
768 static void
769 iopoll_clock(struct iopoll_ctx *io_ctx)
770 {
771 	union microtime_pcpu t;
772 	int delta;
773 
774 	KKASSERT(mycpuid == io_ctx->poll_cpuid);
775 
776 	if (io_ctx->poll_handlers == 0)
777 		return;
778 
779 	logpoll(ioclock_start);
780 
781 	microtime_pcpu_get(&t);
782 	delta = microtime_pcpu_diff(&io_ctx->prev_t, &t);
783 	if (delta * io_ctx->pollhz < 500000)
784 		io_ctx->short_ticks++;
785 	else
786 		io_ctx->prev_t = t;
787 
788 	if (io_ctx->pending_polls > 100) {
789 		/*
790 		 * Too much, assume it has stalled (not always true
791 		 * see comment above).
792 		 */
793 		io_ctx->stalled++;
794 		io_ctx->pending_polls = 0;
795 		io_ctx->phase = 0;
796 	}
797 
798 	if (io_ctx->phase <= 2) {
799 		if (io_ctx->phase != 0)
800 			io_ctx->suspect++;
801 		io_ctx->phase = 1;
802 		sched_iopoll(io_ctx);
803 		io_ctx->phase = 2;
804 	}
805 	if (io_ctx->pending_polls++ > 0)
806 		io_ctx->lost_polls++;
807 
808 	logpoll(ioclock_end);
809 }
810 
811 /*
812  * rxpoll_handler and txpoll_handler are scheduled by sched_iopoll when
813  * appropriate, typically once per polling systimer tick.
814  *
815  * Note that the message is replied immediately in order to allow a new
816  * ISR to be scheduled in the handler.
817  */
818 static void
819 rxpoll_handler(netmsg_t msg)
820 {
821 	struct iopoll_ctx *io_ctx;
822 	struct thread *td = curthread;
823 	boolean_t direct = TRUE, crit;
824 	int i, cycles;
825 
826 	logpoll(rx_start);
827 
828 	io_ctx = msg->lmsg.u.ms_resultp;
829 	KKASSERT(&td->td_msgport == netisr_cpuport(io_ctx->poll_cpuid));
830 
831 	crit = TRUE;
832 	crit_enter_quick(td);
833 
834 	/* Reply ASAP */
835 	netisr_replymsg(&msg->base, 0);
836 
837 	if (io_ctx->poll_handlers == 0) {
838 		crit_exit_quick(td);
839 		logpoll(rx_end);
840 		return;
841 	}
842 
843 	io_ctx->phase = 3;
844 	if (io_ctx->residual_burst == 0) {
845 		/* First call in this tick */
846 		microtime_pcpu_get(&io_ctx->poll_start_t);
847 		io_ctx->residual_burst = io_ctx->poll_burst;
848 	}
849 	cycles = (io_ctx->residual_burst < io_ctx->poll_each_burst) ?
850 		 io_ctx->residual_burst : io_ctx->poll_each_burst;
851 	io_ctx->residual_burst -= cycles;
852 
853 	for (i = 0; i < io_ctx->poll_handlers; i++) {
854 		const struct iopoll_rec *rec = &io_ctx->pr[i];
855 		struct ifnet *ifp = rec->ifp;
856 
857 		if (rec->serializer != NULL) {
858 			if (!crit) {
859 				crit = TRUE;
860 				crit_enter_quick(td);
861 			}
862 			if (__predict_false(
863 			    !lwkt_serialize_try(rec->serializer))) {
864 				/* RX serializer generally will not fail. */
865 				continue;
866 			}
867 		} else if (crit) {
868 			/*
869 			 * Exit critical section, if the RX polling
870 			 * handler does not require serialization,
871 			 * i.e. RX polling is doing direct input.
872 			 */
873 			crit_exit_quick(td);
874 			crit = FALSE;
875 		}
876 
877 		if ((ifp->if_flags & IFF_IDIRECT) == 0) {
878 			direct = FALSE;
879 			KASSERT(rec->serializer != NULL,
880 			    ("rx polling handler is not serialized"));
881 		}
882 #ifdef INVARIANTS
883 		else {
884 			KASSERT(rec->serializer == NULL,
885 			    ("serialized direct input"));
886 		}
887 #endif
888 
889 		if ((ifp->if_flags & (IFF_UP | IFF_RUNNING | IFF_NPOLLING)) ==
890 		    (IFF_UP | IFF_RUNNING | IFF_NPOLLING))
891 			rec->poll_func(ifp, rec->arg, cycles);
892 
893 		if (rec->serializer != NULL)
894 			lwkt_serialize_exit(rec->serializer);
895 	}
896 
897 	if (crit) {
898 		/*
899 		 * Do a quick exit/enter to catch any higher-priority
900 		 * interrupt sources.
901 		 */
902 		crit_exit_quick(td);
903 	}
904 	crit_enter_quick(td);
905 
906 	io_ctx->phase = 4;
907 	sched_iopollmore(io_ctx, direct);
908 
909 	crit_exit_quick(td);
910 
911 	logpoll(rx_end);
912 }
913 
914 static void
915 txpoll_handler(netmsg_t msg)
916 {
917 	struct iopoll_ctx *io_ctx;
918 	struct thread *td = curthread;
919 	int i;
920 
921 	logpoll(tx_start);
922 
923 	io_ctx = msg->lmsg.u.ms_resultp;
924 	KKASSERT(&td->td_msgport == netisr_cpuport(io_ctx->poll_cpuid));
925 
926 	crit_enter_quick(td);
927 
928 	/* Reply ASAP */
929 	netisr_replymsg(&msg->base, 0);
930 
931 	if (io_ctx->poll_handlers == 0) {
932 		crit_exit_quick(td);
933 		logpoll(tx_end);
934 		return;
935 	}
936 
937 	io_ctx->phase = 3;
938 
939 	for (i = 0; i < io_ctx->poll_handlers; i++) {
940 		const struct iopoll_rec *rec = &io_ctx->pr[i];
941 		struct ifnet *ifp = rec->ifp;
942 
943 		if (!lwkt_serialize_try(rec->serializer))
944 			continue;
945 
946 		if ((ifp->if_flags & (IFF_UP | IFF_RUNNING | IFF_NPOLLING)) ==
947 		    (IFF_UP | IFF_RUNNING | IFF_NPOLLING))
948 			rec->poll_func(ifp, rec->arg, -1);
949 
950 		lwkt_serialize_exit(rec->serializer);
951 	}
952 
953 	/*
954 	 * Do a quick exit/enter to catch any higher-priority
955 	 * interrupt sources.
956 	 */
957 	crit_exit_quick(td);
958 	crit_enter_quick(td);
959 
960 	io_ctx->phase = 4;
961 	sched_iopollmore(io_ctx, TRUE);
962 
963 	crit_exit_quick(td);
964 
965 	logpoll(tx_end);
966 }
967 
968 /*
969  * rxpollmore_handler and txpollmore_handler are called after other netisr's,
970  * possibly scheduling another rxpoll_handler or txpoll_handler call, or
971  * adapting the burst size for the next cycle.
972  *
973  * It is very bad to fetch large bursts of packets from a single card at once,
974  * because the burst could take a long time to be completely processed leading
975  * to unfairness.  To reduce the problem, and also to account better for time
976  * spent in network-related processing, we split the burst in smaller chunks
977  * of fixed size, giving control to the other netisr's between chunks.  This
978  * helps in improving the fairness, reducing livelock and accounting for the
979  * work performed in low level handling.
980  */
981 static void
982 rxpollmore_handler(netmsg_t msg)
983 {
984 	struct thread *td = curthread;
985 	struct iopoll_ctx *io_ctx;
986 	union microtime_pcpu t;
987 	int kern_load;
988 	uint32_t pending_polls;
989 
990 	logpoll(rx_mstart);
991 
992 	io_ctx = msg->lmsg.u.ms_resultp;
993 	KKASSERT(&td->td_msgport == netisr_cpuport(io_ctx->poll_cpuid));
994 
995 	crit_enter_quick(td);
996 
997 	/* Replay ASAP */
998 	netisr_replymsg(&msg->base, 0);
999 
1000 	if (io_ctx->poll_handlers == 0) {
1001 		crit_exit_quick(td);
1002 		logpoll(rx_mend);
1003 		return;
1004 	}
1005 
1006 	io_ctx->phase = 5;
1007 	if (io_ctx->residual_burst > 0) {
1008 		sched_iopoll(io_ctx);
1009 		crit_exit_quick(td);
1010 		/* Will run immediately on return, followed by netisrs */
1011 		logpoll(rx_mend);
1012 		return;
1013 	}
1014 
1015 	/* Here we can account time spent in iopoll's in this tick */
1016 	microtime_pcpu_get(&t);
1017 	kern_load = microtime_pcpu_diff(&io_ctx->poll_start_t, &t);
1018 	kern_load = (kern_load * io_ctx->pollhz) / 10000; /* 0..100 */
1019 	io_ctx->kern_frac = kern_load;
1020 
1021 	if (kern_load > (100 - io_ctx->user_frac)) {
1022 		/* Try decrease ticks */
1023 		if (io_ctx->poll_burst > 1)
1024 			io_ctx->poll_burst--;
1025 	} else {
1026 		if (io_ctx->poll_burst < io_ctx->poll_burst_max)
1027 			io_ctx->poll_burst++;
1028 	}
1029 
1030 	io_ctx->pending_polls--;
1031 	pending_polls = io_ctx->pending_polls;
1032 
1033 	if (pending_polls == 0) {
1034 		/* We are done */
1035 		io_ctx->phase = 0;
1036 	} else {
1037 		/*
1038 		 * Last cycle was long and caused us to miss one or more
1039 		 * hardclock ticks.  Restart processing again, but slightly
1040 		 * reduce the burst size to prevent that this happens again.
1041 		 */
1042 		io_ctx->poll_burst -= (io_ctx->poll_burst / 8);
1043 		if (io_ctx->poll_burst < 1)
1044 			io_ctx->poll_burst = 1;
1045 		sched_iopoll(io_ctx);
1046 		io_ctx->phase = 6;
1047 	}
1048 
1049 	crit_exit_quick(td);
1050 
1051 	logpoll(rx_mend);
1052 }
1053 
1054 static void
1055 txpollmore_handler(netmsg_t msg)
1056 {
1057 	struct thread *td = curthread;
1058 	struct iopoll_ctx *io_ctx;
1059 	uint32_t pending_polls;
1060 
1061 	logpoll(tx_mstart);
1062 
1063 	io_ctx = msg->lmsg.u.ms_resultp;
1064 	KKASSERT(&td->td_msgport == netisr_cpuport(io_ctx->poll_cpuid));
1065 
1066 	crit_enter_quick(td);
1067 
1068 	/* Replay ASAP */
1069 	netisr_replymsg(&msg->base, 0);
1070 
1071 	if (io_ctx->poll_handlers == 0) {
1072 		crit_exit_quick(td);
1073 		logpoll(tx_mend);
1074 		return;
1075 	}
1076 
1077 	io_ctx->phase = 5;
1078 
1079 	io_ctx->pending_polls--;
1080 	pending_polls = io_ctx->pending_polls;
1081 
1082 	if (pending_polls == 0) {
1083 		/* We are done */
1084 		io_ctx->phase = 0;
1085 	} else {
1086 		/*
1087 		 * Last cycle was long and caused us to miss one or more
1088 		 * hardclock ticks.  Restart processing again.
1089 		 */
1090 		sched_iopoll(io_ctx);
1091 		io_ctx->phase = 6;
1092 	}
1093 
1094 	crit_exit_quick(td);
1095 
1096 	logpoll(tx_mend);
1097 }
1098 
1099 static void
1100 iopoll_add_sysctl(struct sysctl_ctx_list *ctx, struct sysctl_oid_list *parent,
1101     struct iopoll_ctx *io_ctx, int poll_type)
1102 {
1103 	if (poll_type == IFPOLL_RX) {
1104 		SYSCTL_ADD_PROC(ctx, parent, OID_AUTO, "burst_max",
1105 		    CTLTYPE_UINT | CTLFLAG_RW, io_ctx, 0, sysctl_burstmax,
1106 		    "IU", "Max Polling burst size");
1107 
1108 		SYSCTL_ADD_PROC(ctx, parent, OID_AUTO, "each_burst",
1109 		    CTLTYPE_UINT | CTLFLAG_RW, io_ctx, 0, sysctl_eachburst,
1110 		    "IU", "Max size of each burst");
1111 
1112 		SYSCTL_ADD_UINT(ctx, parent, OID_AUTO, "burst", CTLFLAG_RD,
1113 		    &io_ctx->poll_burst, 0, "Current polling burst size");
1114 
1115 		SYSCTL_ADD_UINT(ctx, parent, OID_AUTO, "user_frac", CTLFLAG_RW,
1116 		    &io_ctx->user_frac, 0, "Desired user fraction of cpu time");
1117 
1118 		SYSCTL_ADD_UINT(ctx, parent, OID_AUTO, "kern_frac", CTLFLAG_RD,
1119 		    &io_ctx->kern_frac, 0, "Kernel fraction of cpu time");
1120 
1121 		SYSCTL_ADD_INT(ctx, parent, OID_AUTO, "residual_burst", CTLFLAG_RD,
1122 		    &io_ctx->residual_burst, 0,
1123 		    "# of residual cycles in burst");
1124 	}
1125 
1126 	SYSCTL_ADD_UINT(ctx, parent, OID_AUTO, "phase", CTLFLAG_RD,
1127 	    &io_ctx->phase, 0, "Polling phase");
1128 
1129 	SYSCTL_ADD_ULONG(ctx, parent, OID_AUTO, "suspect", CTLFLAG_RW,
1130 	    &io_ctx->suspect, "Suspected events");
1131 
1132 	SYSCTL_ADD_ULONG(ctx, parent, OID_AUTO, "stalled", CTLFLAG_RW,
1133 	    &io_ctx->stalled, "Potential stalls");
1134 
1135 	SYSCTL_ADD_ULONG(ctx, parent, OID_AUTO, "short_ticks", CTLFLAG_RW,
1136 	    &io_ctx->short_ticks,
1137 	    "Hardclock ticks shorter than they should be");
1138 
1139 	SYSCTL_ADD_ULONG(ctx, parent, OID_AUTO, "lost_polls", CTLFLAG_RW,
1140 	    &io_ctx->lost_polls,
1141 	    "How many times we would have lost a poll tick");
1142 
1143 	SYSCTL_ADD_UINT(ctx, parent, OID_AUTO, "pending_polls", CTLFLAG_RD,
1144 	    &io_ctx->pending_polls, 0, "Do we need to poll again");
1145 
1146 	SYSCTL_ADD_UINT(ctx, parent, OID_AUTO, "handlers", CTLFLAG_RD,
1147 	    &io_ctx->poll_handlers, 0, "Number of registered poll handlers");
1148 }
1149 
1150 static void
1151 sysctl_burstmax_handler(netmsg_t nmsg)
1152 {
1153 	struct iopoll_sysctl_netmsg *msg = (struct iopoll_sysctl_netmsg *)nmsg;
1154 	struct iopoll_ctx *io_ctx;
1155 
1156 	io_ctx = msg->ctx;
1157 	KKASSERT(&curthread->td_msgport == netisr_cpuport(io_ctx->poll_cpuid));
1158 
1159 	io_ctx->poll_burst_max = nmsg->lmsg.u.ms_result;
1160 	if (io_ctx->poll_each_burst > io_ctx->poll_burst_max)
1161 		io_ctx->poll_each_burst = io_ctx->poll_burst_max;
1162 	if (io_ctx->poll_burst > io_ctx->poll_burst_max)
1163 		io_ctx->poll_burst = io_ctx->poll_burst_max;
1164 	if (io_ctx->residual_burst > io_ctx->poll_burst_max)
1165 		io_ctx->residual_burst = io_ctx->poll_burst_max;
1166 
1167 	netisr_replymsg(&nmsg->base, 0);
1168 }
1169 
1170 static int
1171 sysctl_burstmax(SYSCTL_HANDLER_ARGS)
1172 {
1173 	struct iopoll_ctx *io_ctx = arg1;
1174 	struct iopoll_sysctl_netmsg msg;
1175 	uint32_t burst_max;
1176 	int error;
1177 
1178 	burst_max = io_ctx->poll_burst_max;
1179 	error = sysctl_handle_int(oidp, &burst_max, 0, req);
1180 	if (error || req->newptr == NULL)
1181 		return error;
1182 	if (burst_max < MIN_IOPOLL_BURST_MAX)
1183 		burst_max = MIN_IOPOLL_BURST_MAX;
1184 	else if (burst_max > MAX_IOPOLL_BURST_MAX)
1185 		burst_max = MAX_IOPOLL_BURST_MAX;
1186 
1187 	netmsg_init(&msg.base, NULL, &curthread->td_msgport,
1188 		    0, sysctl_burstmax_handler);
1189 	msg.base.lmsg.u.ms_result = burst_max;
1190 	msg.ctx = io_ctx;
1191 
1192 	return netisr_domsg(&msg.base, io_ctx->poll_cpuid);
1193 }
1194 
1195 static void
1196 sysctl_eachburst_handler(netmsg_t nmsg)
1197 {
1198 	struct iopoll_sysctl_netmsg *msg = (struct iopoll_sysctl_netmsg *)nmsg;
1199 	struct iopoll_ctx *io_ctx;
1200 	uint32_t each_burst;
1201 
1202 	io_ctx = msg->ctx;
1203 	KKASSERT(&curthread->td_msgport == netisr_cpuport(io_ctx->poll_cpuid));
1204 
1205 	each_burst = nmsg->lmsg.u.ms_result;
1206 	if (each_burst > io_ctx->poll_burst_max)
1207 		each_burst = io_ctx->poll_burst_max;
1208 	else if (each_burst < 1)
1209 		each_burst = 1;
1210 	io_ctx->poll_each_burst = each_burst;
1211 
1212 	netisr_replymsg(&nmsg->base, 0);
1213 }
1214 
1215 static int
1216 sysctl_eachburst(SYSCTL_HANDLER_ARGS)
1217 {
1218 	struct iopoll_ctx *io_ctx = arg1;
1219 	struct iopoll_sysctl_netmsg msg;
1220 	uint32_t each_burst;
1221 	int error;
1222 
1223 	each_burst = io_ctx->poll_each_burst;
1224 	error = sysctl_handle_int(oidp, &each_burst, 0, req);
1225 	if (error || req->newptr == NULL)
1226 		return error;
1227 
1228 	netmsg_init(&msg.base, NULL, &curthread->td_msgport,
1229 		    0, sysctl_eachburst_handler);
1230 	msg.base.lmsg.u.ms_result = each_burst;
1231 	msg.ctx = io_ctx;
1232 
1233 	return netisr_domsg(&msg.base, io_ctx->poll_cpuid);
1234 }
1235 
1236 static int
1237 iopoll_register(struct ifnet *ifp, struct iopoll_ctx *io_ctx,
1238 		const struct ifpoll_io *io_rec)
1239 {
1240 	int error;
1241 
1242 	KKASSERT(&curthread->td_msgport == netisr_cpuport(io_ctx->poll_cpuid));
1243 
1244 	if (io_rec->poll_func == NULL)
1245 		return 0;
1246 
1247 	/*
1248 	 * Check if there is room.
1249 	 */
1250 	if (io_ctx->poll_handlers >= IFPOLL_LIST_LEN) {
1251 		/*
1252 		 * List full, cannot register more entries.
1253 		 * This should never happen; if it does, it is probably a
1254 		 * broken driver trying to register multiple times. Checking
1255 		 * this at runtime is expensive, and won't solve the problem
1256 		 * anyways, so just report a few times and then give up.
1257 		 */
1258 		static int verbose = 10; /* XXX */
1259 		if (verbose > 0) {
1260 			kprintf("io poll handlers list full, "
1261 				"maybe a broken driver ?\n");
1262 			verbose--;
1263 		}
1264 		error = ENOENT;
1265 	} else {
1266 		struct iopoll_rec *rec = &io_ctx->pr[io_ctx->poll_handlers];
1267 
1268 		rec->ifp = ifp;
1269 		rec->serializer = io_rec->serializer;
1270 		rec->arg = io_rec->arg;
1271 		rec->poll_func = io_rec->poll_func;
1272 
1273 		io_ctx->poll_handlers++;
1274 		error = 0;
1275 	}
1276 	return error;
1277 }
1278 
1279 static int
1280 iopoll_deregister(struct ifnet *ifp, struct iopoll_ctx *io_ctx)
1281 {
1282 	int i, error;
1283 
1284 	KKASSERT(&curthread->td_msgport == netisr_cpuport(io_ctx->poll_cpuid));
1285 
1286 	for (i = 0; i < io_ctx->poll_handlers; ++i) {
1287 		if (io_ctx->pr[i].ifp == ifp) /* Found it */
1288 			break;
1289 	}
1290 	if (i == io_ctx->poll_handlers) {
1291 		error = ENOENT;
1292 	} else {
1293 		io_ctx->poll_handlers--;
1294 		if (i < io_ctx->poll_handlers) {
1295 			/* Last entry replaces this one. */
1296 			io_ctx->pr[i] = io_ctx->pr[io_ctx->poll_handlers];
1297 		}
1298 
1299 		if (io_ctx->poll_handlers == 0)
1300 			iopoll_reset_state(io_ctx);
1301 		error = 0;
1302 	}
1303 	return error;
1304 }
1305 
1306 static void
1307 poll_comm_init(int cpuid)
1308 {
1309 	struct poll_comm *comm;
1310 	char cpuid_str[16];
1311 
1312 	comm = kmalloc(sizeof(*comm), M_DEVBUF, M_WAITOK | M_ZERO);
1313 
1314 	if (ifpoll_stfrac < 1)
1315 		ifpoll_stfrac = IFPOLL_STFRAC_DEFAULT;
1316 	if (ifpoll_txfrac < 1)
1317 		ifpoll_txfrac = IFPOLL_TXFRAC_DEFAULT;
1318 
1319 	comm->poll_cpuid = cpuid;
1320 	comm->pollhz = poll_comm_pollhz_div(comm, ifpoll_pollhz);
1321 	comm->poll_stfrac = ifpoll_stfrac - 1;
1322 	comm->poll_txfrac = ifpoll_txfrac - 1;
1323 
1324 	ksnprintf(cpuid_str, sizeof(cpuid_str), "%d", cpuid);
1325 
1326 	sysctl_ctx_init(&comm->sysctl_ctx);
1327 	comm->sysctl_tree = SYSCTL_ADD_NODE(&comm->sysctl_ctx,
1328 			    SYSCTL_STATIC_CHILDREN(_net_ifpoll),
1329 			    OID_AUTO, cpuid_str, CTLFLAG_RD, 0, "");
1330 
1331 	SYSCTL_ADD_PROC(&comm->sysctl_ctx, SYSCTL_CHILDREN(comm->sysctl_tree),
1332 			OID_AUTO, "pollhz", CTLTYPE_INT | CTLFLAG_RW,
1333 			comm, 0, sysctl_pollhz,
1334 			"I", "Device polling frequency");
1335 
1336 	if (cpuid == 0) {
1337 		SYSCTL_ADD_PROC(&comm->sysctl_ctx,
1338 				SYSCTL_CHILDREN(comm->sysctl_tree),
1339 				OID_AUTO, "status_frac",
1340 				CTLTYPE_INT | CTLFLAG_RW,
1341 				comm, 0, sysctl_stfrac,
1342 				"I", "# of cycles before status is polled");
1343 	}
1344 	SYSCTL_ADD_PROC(&comm->sysctl_ctx, SYSCTL_CHILDREN(comm->sysctl_tree),
1345 			OID_AUTO, "tx_frac", CTLTYPE_INT | CTLFLAG_RW,
1346 			comm, 0, sysctl_txfrac,
1347 			"I", "# of cycles before TX is polled");
1348 
1349 	poll_common[cpuid] = comm;
1350 }
1351 
1352 static void
1353 poll_comm_start(int cpuid)
1354 {
1355 	struct poll_comm *comm = poll_common[cpuid];
1356 	systimer_func_t func;
1357 
1358 	/*
1359 	 * Initialize systimer
1360 	 */
1361 	if (cpuid == 0)
1362 		func = poll_comm_systimer0;
1363 	else
1364 		func = poll_comm_systimer;
1365 	systimer_init_periodic_nq(&comm->pollclock, func, comm, 1);
1366 }
1367 
1368 static void
1369 _poll_comm_systimer(struct poll_comm *comm)
1370 {
1371 	iopoll_clock(rxpoll_context[comm->poll_cpuid]);
1372 	if (comm->txfrac_count-- == 0) {
1373 		comm->txfrac_count = comm->poll_txfrac;
1374 		iopoll_clock(txpoll_context[comm->poll_cpuid]);
1375 	}
1376 }
1377 
1378 static void
1379 poll_comm_systimer0(systimer_t info, int in_ipi __unused,
1380     struct intrframe *frame __unused)
1381 {
1382 	struct poll_comm *comm = info->data;
1383 	globaldata_t gd = mycpu;
1384 
1385 	KKASSERT(comm->poll_cpuid == gd->gd_cpuid && gd->gd_cpuid == 0);
1386 
1387 	crit_enter_gd(gd);
1388 
1389 	if (comm->stfrac_count-- == 0) {
1390 		comm->stfrac_count = comm->poll_stfrac;
1391 		stpoll_clock(&stpoll_context);
1392 	}
1393 	_poll_comm_systimer(comm);
1394 
1395 	crit_exit_gd(gd);
1396 }
1397 
1398 static void
1399 poll_comm_systimer(systimer_t info, int in_ipi __unused,
1400     struct intrframe *frame __unused)
1401 {
1402 	struct poll_comm *comm = info->data;
1403 	globaldata_t gd = mycpu;
1404 
1405 	KKASSERT(comm->poll_cpuid == gd->gd_cpuid && gd->gd_cpuid != 0);
1406 
1407 	crit_enter_gd(gd);
1408 	_poll_comm_systimer(comm);
1409 	crit_exit_gd(gd);
1410 }
1411 
1412 static void
1413 poll_comm_adjust_pollhz(struct poll_comm *comm)
1414 {
1415 	uint32_t handlers;
1416 	int pollhz = 1;
1417 
1418 	KKASSERT(&curthread->td_msgport == netisr_cpuport(comm->poll_cpuid));
1419 
1420 	/*
1421 	 * If there is no polling handler registered, set systimer
1422 	 * frequency to the lowest value.  Polling systimer frequency
1423 	 * will be adjusted to the requested value, once there are
1424 	 * registered handlers.
1425 	 */
1426 	handlers = rxpoll_context[mycpuid]->poll_handlers +
1427 		   txpoll_context[mycpuid]->poll_handlers;
1428 	if (comm->poll_cpuid == 0)
1429 		handlers += stpoll_context.poll_handlers;
1430 	if (handlers)
1431 		pollhz = comm->pollhz;
1432 	systimer_adjust_periodic(&comm->pollclock, pollhz);
1433 }
1434 
1435 static int
1436 sysctl_pollhz(SYSCTL_HANDLER_ARGS)
1437 {
1438 	struct poll_comm *comm = arg1;
1439 	struct netmsg_base nmsg;
1440 	int error, phz;
1441 
1442 	phz = poll_comm_pollhz_conv(comm, comm->pollhz);
1443 	error = sysctl_handle_int(oidp, &phz, 0, req);
1444 	if (error || req->newptr == NULL)
1445 		return error;
1446 	if (phz <= 0)
1447 		return EINVAL;
1448 	else if (phz > IFPOLL_FREQ_MAX)
1449 		phz = IFPOLL_FREQ_MAX;
1450 
1451 	netmsg_init(&nmsg, NULL, &curthread->td_msgport,
1452 		    0, sysctl_pollhz_handler);
1453 	nmsg.lmsg.u.ms_result = phz;
1454 
1455 	return netisr_domsg(&nmsg, comm->poll_cpuid);
1456 }
1457 
1458 static void
1459 sysctl_pollhz_handler(netmsg_t nmsg)
1460 {
1461 	struct poll_comm *comm = poll_common[mycpuid];
1462 
1463 	KKASSERT(&curthread->td_msgport == netisr_cpuport(comm->poll_cpuid));
1464 
1465 	/* Save polling frequency */
1466 	comm->pollhz = poll_comm_pollhz_div(comm, nmsg->lmsg.u.ms_result);
1467 
1468 	/*
1469 	 * Adjust cached pollhz
1470 	 */
1471 	rxpoll_context[mycpuid]->pollhz = comm->pollhz;
1472 	txpoll_context[mycpuid]->pollhz =
1473 	    comm->pollhz / (comm->poll_txfrac + 1);
1474 
1475 	/*
1476 	 * Adjust polling frequency
1477 	 */
1478 	poll_comm_adjust_pollhz(comm);
1479 
1480 	netisr_replymsg(&nmsg->base, 0);
1481 }
1482 
1483 static int
1484 sysctl_stfrac(SYSCTL_HANDLER_ARGS)
1485 {
1486 	struct poll_comm *comm = arg1;
1487 	struct netmsg_base nmsg;
1488 	int error, stfrac;
1489 
1490 	KKASSERT(comm->poll_cpuid == 0);
1491 
1492 	stfrac = comm->poll_stfrac + 1;
1493 	error = sysctl_handle_int(oidp, &stfrac, 0, req);
1494 	if (error || req->newptr == NULL)
1495 		return error;
1496 	if (stfrac < 1)
1497 		return EINVAL;
1498 
1499 	netmsg_init(&nmsg, NULL, &curthread->td_msgport,
1500 		    0, sysctl_stfrac_handler);
1501 	nmsg.lmsg.u.ms_result = stfrac - 1;
1502 
1503 	return netisr_domsg(&nmsg, comm->poll_cpuid);
1504 }
1505 
1506 static void
1507 sysctl_stfrac_handler(netmsg_t nmsg)
1508 {
1509 	struct poll_comm *comm = poll_common[mycpuid];
1510 	int stfrac = nmsg->lmsg.u.ms_result;
1511 
1512 	KKASSERT(&curthread->td_msgport == netisr_cpuport(comm->poll_cpuid));
1513 
1514 	crit_enter();
1515 	comm->poll_stfrac = stfrac;
1516 	if (comm->stfrac_count > comm->poll_stfrac)
1517 		comm->stfrac_count = comm->poll_stfrac;
1518 	crit_exit();
1519 
1520 	netisr_replymsg(&nmsg->base, 0);
1521 }
1522 
1523 static int
1524 sysctl_txfrac(SYSCTL_HANDLER_ARGS)
1525 {
1526 	struct poll_comm *comm = arg1;
1527 	struct netmsg_base nmsg;
1528 	int error, txfrac;
1529 
1530 	txfrac = comm->poll_txfrac + 1;
1531 	error = sysctl_handle_int(oidp, &txfrac, 0, req);
1532 	if (error || req->newptr == NULL)
1533 		return error;
1534 	if (txfrac < 1)
1535 		return EINVAL;
1536 
1537 	netmsg_init(&nmsg, NULL, &curthread->td_msgport,
1538 		    0, sysctl_txfrac_handler);
1539 	nmsg.lmsg.u.ms_result = txfrac - 1;
1540 
1541 	return netisr_domsg(&nmsg, comm->poll_cpuid);
1542 }
1543 
1544 static void
1545 sysctl_txfrac_handler(netmsg_t nmsg)
1546 {
1547 	struct poll_comm *comm = poll_common[mycpuid];
1548 	int txfrac = nmsg->lmsg.u.ms_result;
1549 
1550 	KKASSERT(&curthread->td_msgport == netisr_cpuport(comm->poll_cpuid));
1551 
1552 	crit_enter();
1553 	comm->poll_txfrac = txfrac;
1554 	if (comm->txfrac_count > comm->poll_txfrac)
1555 		comm->txfrac_count = comm->poll_txfrac;
1556 	crit_exit();
1557 
1558 	netisr_replymsg(&nmsg->base, 0);
1559 }
1560 
1561 void
1562 ifpoll_compat_setup(struct ifpoll_compat *cp,
1563     struct sysctl_ctx_list *sysctl_ctx,
1564     struct sysctl_oid *sysctl_tree,
1565     int unit, struct lwkt_serialize *slz)
1566 {
1567 	cp->ifpc_stcount = 0;
1568 	cp->ifpc_stfrac = ((poll_common[0]->poll_stfrac + 1) *
1569 	    howmany(IOPOLL_BURST_MAX, IOPOLL_EACH_BURST)) - 1;
1570 
1571 	cp->ifpc_cpuid = unit % netisr_ncpus;
1572 	cp->ifpc_serializer = slz;
1573 
1574 	if (sysctl_ctx != NULL && sysctl_tree != NULL) {
1575 		SYSCTL_ADD_PROC(sysctl_ctx, SYSCTL_CHILDREN(sysctl_tree),
1576 		    OID_AUTO, "npoll_stfrac", CTLTYPE_INT | CTLFLAG_RW,
1577 		    cp, 0, sysctl_compat_npoll_stfrac, "I",
1578 		    "polling status frac");
1579 		SYSCTL_ADD_PROC(sysctl_ctx, SYSCTL_CHILDREN(sysctl_tree),
1580 		    OID_AUTO, "npoll_cpuid", CTLTYPE_INT | CTLFLAG_RW,
1581 		    cp, 0, sysctl_compat_npoll_cpuid, "I",
1582 		    "polling cpuid");
1583 	}
1584 }
1585 
1586 static int
1587 sysctl_compat_npoll_stfrac(SYSCTL_HANDLER_ARGS)
1588 {
1589 	struct ifpoll_compat *cp = arg1;
1590 	int error = 0, stfrac;
1591 
1592 	lwkt_serialize_enter(cp->ifpc_serializer);
1593 
1594 	stfrac = cp->ifpc_stfrac + 1;
1595 	error = sysctl_handle_int(oidp, &stfrac, 0, req);
1596 	if (!error && req->newptr != NULL) {
1597 		if (stfrac < 1) {
1598 			error = EINVAL;
1599 		} else {
1600 			cp->ifpc_stfrac = stfrac - 1;
1601 			if (cp->ifpc_stcount > cp->ifpc_stfrac)
1602 				cp->ifpc_stcount = cp->ifpc_stfrac;
1603 		}
1604 	}
1605 
1606 	lwkt_serialize_exit(cp->ifpc_serializer);
1607 	return error;
1608 }
1609 
1610 static int
1611 sysctl_compat_npoll_cpuid(SYSCTL_HANDLER_ARGS)
1612 {
1613 	struct ifpoll_compat *cp = arg1;
1614 	int error = 0, cpuid;
1615 
1616 	lwkt_serialize_enter(cp->ifpc_serializer);
1617 
1618 	cpuid = cp->ifpc_cpuid;
1619 	error = sysctl_handle_int(oidp, &cpuid, 0, req);
1620 	if (!error && req->newptr != NULL) {
1621 		if (cpuid < 0 || cpuid >= netisr_ncpus)
1622 			error = EINVAL;
1623 		else
1624 			cp->ifpc_cpuid = cpuid;
1625 	}
1626 
1627 	lwkt_serialize_exit(cp->ifpc_serializer);
1628 	return error;
1629 }
1630