xref: /dragonfly/sys/net/if_poll.c (revision fae225dc)
1 /*-
2  * Copyright (c) 2001-2002 Luigi Rizzo
3  *
4  * Supported by: the Xorp Project (www.xorp.org)
5  *
6  * Redistribution and use in source and binary forms, with or without
7  * modification, are permitted provided that the following conditions
8  * are met:
9  * 1. Redistributions of source code must retain the above copyright
10  *    notice, this list of conditions and the following disclaimer.
11  * 2. Redistributions in binary form must reproduce the above copyright
12  *    notice, this list of conditions and the following disclaimer in the
13  *    documentation and/or other materials provided with the distribution.
14  *
15  * THIS SOFTWARE IS PROVIDED BY THE AUTHORS AND CONTRIBUTORS ``AS IS'' AND
16  * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
17  * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
18  * ARE DISCLAIMED.  IN NO EVENT SHALL THE AUTHORS OR CONTRIBUTORS BE LIABLE
19  * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
20  * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
21  * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
22  * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
23  * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
24  * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
25  * SUCH DAMAGE.
26  *
27  * $FreeBSD: src/sys/kern/kern_poll.c,v 1.2.2.4 2002/06/27 23:26:33 luigi Exp $
28  */
29 
30 #include "opt_ifpoll.h"
31 
32 #include <sys/param.h>
33 #include <sys/kernel.h>
34 #include <sys/ktr.h>
35 #include <sys/malloc.h>
36 #include <sys/serialize.h>
37 #include <sys/socket.h>
38 #include <sys/sysctl.h>
39 #include <sys/microtime_pcpu.h>
40 
41 #include <sys/thread2.h>
42 #include <sys/msgport2.h>
43 
44 #include <net/if.h>
45 #include <net/if_poll.h>
46 #include <net/netmsg2.h>
47 #include <net/netisr2.h>
48 
49 /*
50  * Polling support for network device drivers.
51  *
52  * Drivers which support this feature try to register one status polling
53  * handler and several TX/RX polling handlers with the polling code.
54  * If interface's if_npoll is called with non-NULL second argument, then
55  * a register operation is requested, else a deregister operation is
56  * requested.  If the requested operation is "register", driver should
57  * setup the ifpoll_info passed in accoding its own needs:
58  *   ifpoll_info.ifpi_status.status_func == NULL
59  *     No status polling handler will be installed on CPU(0)
60  *   ifpoll_info.ifpi_rx[n].poll_func == NULL
61  *     No RX polling handler will be installed on CPU(n)
62  *   ifpoll_info.ifpi_tx[n].poll_func == NULL
63  *     No TX polling handler will be installed on CPU(n)
64  *
65  * RX is polled at the specified polling frequency (net.ifpoll.X.pollhz).
66  * TX and status polling could be done at lower frequency than RX frequency
67  * (net.ifpoll.0.status_frac and net.ifpoll.X.tx_frac).  To avoid systimer
68  * staggering at high frequency, RX systimer gives TX and status polling a
69  * piggyback (XXX).
70  *
71  * All of the registered polling handlers are called only if the interface
72  * is marked as 'IFF_RUNNING and IFF_NPOLLING'.  However, the interface's
73  * register and deregister function (ifnet.if_npoll) will be called even
74  * if interface is not marked with 'IFF_RUNNING'.
75  *
76  * If registration is successful, the driver must disable interrupts,
77  * and further I/O is performed through the TX/RX polling handler, which
78  * are invoked (at least once per clock tick) with 3 arguments: the "arg"
79  * passed at register time, a struct ifnet pointer, and a "count" limit.
80  * The registered serializer will be held before calling the related
81  * polling handler.
82  *
83  * The count limit specifies how much work the handler can do during the
84  * call -- typically this is the number of packets to be received, or
85  * transmitted, etc. (drivers are free to interpret this number, as long
86  * as the max time spent in the function grows roughly linearly with the
87  * count).
88  *
89  * A second variable controls the sharing of CPU between polling/kernel
90  * network processing, and other activities (typically userlevel tasks):
91  * net.ifpoll.X.{rx,tx}.user_frac (between 0 and 100, default 50) sets the
92  * share of CPU allocated to user tasks.  CPU is allocated proportionally
93  * to the shares, by dynamically adjusting the "count" (poll_burst).
94  *
95  * Other parameters can should be left to their default values.
96  * The following constraints hold
97  *
98  *	1 <= poll_burst <= poll_burst_max
99  *	1 <= poll_each_burst <= poll_burst_max
100  *	MIN_POLL_BURST_MAX <= poll_burst_max <= MAX_POLL_BURST_MAX
101  */
102 
103 #define IFPOLL_LIST_LEN		128
104 #define IFPOLL_FREQ_MAX		30000
105 
106 #define MIN_IOPOLL_BURST_MAX	10
107 #define MAX_IOPOLL_BURST_MAX	5000
108 #define IOPOLL_BURST_MAX	250	/* good for 1000Mbit net and HZ=6000 */
109 
110 #define IOPOLL_EACH_BURST	50
111 #define IOPOLL_USER_FRAC	50
112 
113 #define IFPOLL_FREQ_DEFAULT	6000
114 
115 #define IFPOLL_TXFRAC_DEFAULT	1	/* 1/1 of the pollhz */
116 #define IFPOLL_STFRAC_DEFAULT	120	/* 1/120 of the pollhz */
117 
118 #define IFPOLL_RX		0x1
119 #define IFPOLL_TX		0x2
120 
121 struct iopoll_rec {
122 	struct lwkt_serialize	*serializer;
123 	struct ifnet		*ifp;
124 	void			*arg;
125 	ifpoll_iofn_t		poll_func;
126 };
127 
128 struct iopoll_ctx {
129 	union microtime_pcpu	prev_t;
130 	u_long			short_ticks;		/* statistics */
131 	u_long			lost_polls;		/* statistics */
132 	u_long			suspect;		/* statistics */
133 	u_long			stalled;		/* statistics */
134 	uint32_t		pending_polls;		/* state */
135 
136 	struct netmsg_base	poll_netmsg;
137 	struct netmsg_base	poll_more_netmsg;
138 
139 	int			poll_cpuid;
140 	int			pollhz;
141 	uint32_t		phase;			/* state */
142 	int			residual_burst;		/* state */
143 	uint32_t		poll_each_burst;	/* tunable */
144 	union microtime_pcpu	poll_start_t;		/* state */
145 
146 	uint32_t		poll_burst;		/* state */
147 	uint32_t		poll_burst_max;		/* tunable */
148 	uint32_t		user_frac;		/* tunable */
149 	uint32_t		kern_frac;		/* state */
150 
151 	uint32_t		poll_handlers; /* next free entry in pr[]. */
152 	struct iopoll_rec	pr[IFPOLL_LIST_LEN];
153 
154 	struct sysctl_ctx_list	poll_sysctl_ctx;
155 	struct sysctl_oid	*poll_sysctl_tree;
156 } __cachealign;
157 
158 struct poll_comm {
159 	struct systimer		pollclock;
160 	int			poll_cpuid;
161 
162 	int			stfrac_count;		/* state */
163 	int			poll_stfrac;		/* tunable */
164 
165 	int			txfrac_count;		/* state */
166 	int			poll_txfrac;		/* tunable */
167 
168 	int			pollhz;			/* tunable */
169 
170 	struct sysctl_ctx_list	sysctl_ctx;
171 	struct sysctl_oid	*sysctl_tree;
172 } __cachealign;
173 
174 struct stpoll_rec {
175 	struct lwkt_serialize	*serializer;
176 	struct ifnet		*ifp;
177 	ifpoll_stfn_t		status_func;
178 };
179 
180 struct stpoll_ctx {
181 	struct netmsg_base	poll_netmsg;
182 
183 	uint32_t		poll_handlers; /* next free entry in pr[]. */
184 	struct stpoll_rec	pr[IFPOLL_LIST_LEN];
185 
186 	struct sysctl_ctx_list	poll_sysctl_ctx;
187 	struct sysctl_oid	*poll_sysctl_tree;
188 } __cachealign;
189 
190 struct iopoll_sysctl_netmsg {
191 	struct netmsg_base	base;
192 	struct iopoll_ctx	*ctx;
193 };
194 
195 static void	ifpoll_init_pcpu(int);
196 static void	ifpoll_register_handler(netmsg_t);
197 static void	ifpoll_deregister_handler(netmsg_t);
198 
199 /*
200  * Status polling
201  */
202 static void	stpoll_init(void);
203 static void	stpoll_handler(netmsg_t);
204 static void	stpoll_clock(struct stpoll_ctx *);
205 static int	stpoll_register(struct ifnet *, const struct ifpoll_status *);
206 static int	stpoll_deregister(struct ifnet *);
207 
208 /*
209  * RX/TX polling
210  */
211 static struct iopoll_ctx *iopoll_ctx_create(int, int);
212 static void	iopoll_init(int);
213 static void	rxpoll_handler(netmsg_t);
214 static void	txpoll_handler(netmsg_t);
215 static void	rxpollmore_handler(netmsg_t);
216 static void	txpollmore_handler(netmsg_t);
217 static void	iopoll_clock(struct iopoll_ctx *);
218 static int	iopoll_register(struct ifnet *, struct iopoll_ctx *,
219 		    const struct ifpoll_io *);
220 static int	iopoll_deregister(struct ifnet *, struct iopoll_ctx *);
221 
222 static void	iopoll_add_sysctl(struct sysctl_ctx_list *,
223 		    struct sysctl_oid_list *, struct iopoll_ctx *, int);
224 static void	sysctl_burstmax_handler(netmsg_t);
225 static int	sysctl_burstmax(SYSCTL_HANDLER_ARGS);
226 static void	sysctl_eachburst_handler(netmsg_t);
227 static int	sysctl_eachburst(SYSCTL_HANDLER_ARGS);
228 
229 /*
230  * Common functions
231  */
232 static void	poll_comm_init(int);
233 static void	poll_comm_start(int);
234 static void	poll_comm_adjust_pollhz(struct poll_comm *);
235 static void	poll_comm_systimer0(systimer_t, int, struct intrframe *);
236 static void	poll_comm_systimer(systimer_t, int, struct intrframe *);
237 static void	sysctl_pollhz_handler(netmsg_t);
238 static void	sysctl_stfrac_handler(netmsg_t);
239 static void	sysctl_txfrac_handler(netmsg_t);
240 static int	sysctl_pollhz(SYSCTL_HANDLER_ARGS);
241 static int	sysctl_stfrac(SYSCTL_HANDLER_ARGS);
242 static int	sysctl_txfrac(SYSCTL_HANDLER_ARGS);
243 static int	sysctl_compat_npoll_stfrac(SYSCTL_HANDLER_ARGS);
244 static int	sysctl_compat_npoll_cpuid(SYSCTL_HANDLER_ARGS);
245 
246 static struct stpoll_ctx	stpoll_context;
247 static struct poll_comm		*poll_common[MAXCPU];
248 static struct iopoll_ctx	*rxpoll_context[MAXCPU];
249 static struct iopoll_ctx	*txpoll_context[MAXCPU];
250 
251 SYSCTL_NODE(_net, OID_AUTO, ifpoll, CTLFLAG_RW, 0,
252 	    "Network device polling parameters");
253 
254 static int	iopoll_burst_max = IOPOLL_BURST_MAX;
255 static int	iopoll_each_burst = IOPOLL_EACH_BURST;
256 static int	iopoll_user_frac = IOPOLL_USER_FRAC;
257 
258 static int	ifpoll_pollhz = IFPOLL_FREQ_DEFAULT;
259 static int	ifpoll_stfrac = IFPOLL_STFRAC_DEFAULT;
260 static int	ifpoll_txfrac = IFPOLL_TXFRAC_DEFAULT;
261 
262 TUNABLE_INT("net.ifpoll.burst_max", &iopoll_burst_max);
263 TUNABLE_INT("net.ifpoll.each_burst", &iopoll_each_burst);
264 TUNABLE_INT("net.ifpoll.user_frac", &iopoll_user_frac);
265 TUNABLE_INT("net.ifpoll.pollhz", &ifpoll_pollhz);
266 TUNABLE_INT("net.ifpoll.status_frac", &ifpoll_stfrac);
267 TUNABLE_INT("net.ifpoll.tx_frac", &ifpoll_txfrac);
268 
269 #if !defined(KTR_IF_POLL)
270 #define  KTR_IF_POLL		KTR_ALL
271 #endif
272 KTR_INFO_MASTER(if_poll);
273 KTR_INFO(KTR_IF_POLL, if_poll, rx_start, 0, "rx start");
274 KTR_INFO(KTR_IF_POLL, if_poll, rx_end, 1, "rx end");
275 KTR_INFO(KTR_IF_POLL, if_poll, tx_start, 2, "tx start");
276 KTR_INFO(KTR_IF_POLL, if_poll, tx_end, 3, "tx end");
277 KTR_INFO(KTR_IF_POLL, if_poll, rx_mstart, 4, "rx more start");
278 KTR_INFO(KTR_IF_POLL, if_poll, rx_mend, 5, "rx more end");
279 KTR_INFO(KTR_IF_POLL, if_poll, tx_mstart, 6, "tx more start");
280 KTR_INFO(KTR_IF_POLL, if_poll, tx_mend, 7, "tx more end");
281 KTR_INFO(KTR_IF_POLL, if_poll, ioclock_start, 8, "ioclock start");
282 KTR_INFO(KTR_IF_POLL, if_poll, ioclock_end, 9, "ioclock end");
283 #define logpoll(name)	KTR_LOG(if_poll_ ## name)
284 
285 #define IFPOLL_FREQ_ADJ(comm)	(((comm)->poll_cpuid * 3) % 50)
286 
287 static __inline int
288 poll_comm_pollhz_div(const struct poll_comm *comm, int pollhz)
289 {
290 	return pollhz + IFPOLL_FREQ_ADJ(comm);
291 }
292 
293 static __inline int
294 poll_comm_pollhz_conv(const struct poll_comm *comm, int pollhz)
295 {
296 	return pollhz - IFPOLL_FREQ_ADJ(comm);
297 }
298 
299 static __inline void
300 ifpoll_sendmsg_oncpu(netmsg_t msg)
301 {
302 	if (msg->lmsg.ms_flags & MSGF_DONE)
303 		netisr_sendmsg_oncpu(&msg->base);
304 }
305 
306 static __inline void
307 sched_stpoll(struct stpoll_ctx *st_ctx)
308 {
309 	ifpoll_sendmsg_oncpu((netmsg_t)&st_ctx->poll_netmsg);
310 }
311 
312 static __inline void
313 sched_iopoll(struct iopoll_ctx *io_ctx)
314 {
315 	ifpoll_sendmsg_oncpu((netmsg_t)&io_ctx->poll_netmsg);
316 }
317 
318 static __inline void
319 sched_iopollmore(struct iopoll_ctx *io_ctx)
320 {
321 	ifpoll_sendmsg_oncpu((netmsg_t)&io_ctx->poll_more_netmsg);
322 }
323 
324 /*
325  * Initialize per-cpu polling(4) context.
326  */
327 static void
328 ifpoll_init_pcpu(int cpuid)
329 {
330 
331 	poll_comm_init(cpuid);
332 
333 	if (cpuid == 0)
334 		stpoll_init();
335 	iopoll_init(cpuid);
336 
337 	poll_comm_start(cpuid);
338 }
339 
340 static void
341 ifpoll_init_handler(netmsg_t msg)
342 {
343 	int cpu = mycpuid;
344 
345 	ifpoll_init_pcpu(cpu);
346 	netisr_forwardmsg(&msg->base, cpu + 1);
347 }
348 
349 static void
350 ifpoll_sysinit(void *dummy __unused)
351 {
352 	struct netmsg_base msg;
353 
354 	netmsg_init(&msg, NULL, &curthread->td_msgport, 0, ifpoll_init_handler);
355 	netisr_domsg(&msg, 0);
356 }
357 SYSINIT(ifpoll, SI_SUB_PRE_DRIVERS, SI_ORDER_ANY, ifpoll_sysinit, NULL);
358 
359 int
360 ifpoll_register(struct ifnet *ifp)
361 {
362 	struct ifpoll_info *info;
363 	struct netmsg_base nmsg;
364 	int error;
365 
366 	if (ifp->if_npoll == NULL) {
367 		/* Device does not support polling */
368 		return EOPNOTSUPP;
369 	}
370 
371 	info = kmalloc(sizeof(*info), M_TEMP, M_WAITOK | M_ZERO);
372 
373 	/*
374 	 * Attempt to register.  Interlock with IFF_NPOLLING.
375 	 */
376 
377 	ifnet_serialize_all(ifp);
378 
379 	if (ifp->if_flags & IFF_NPOLLING) {
380 		/* Already polling */
381 		ifnet_deserialize_all(ifp);
382 		kfree(info, M_TEMP);
383 		return EBUSY;
384 	}
385 
386 	info->ifpi_ifp = ifp;
387 
388 	ifp->if_flags |= IFF_NPOLLING;
389 	ifp->if_npoll(ifp, info);
390 
391 	ifnet_deserialize_all(ifp);
392 
393 	netmsg_init(&nmsg, NULL, &curthread->td_msgport,
394 		    0, ifpoll_register_handler);
395 	nmsg.lmsg.u.ms_resultp = info;
396 
397 	error = netisr_domsg(&nmsg, 0);
398 	if (error) {
399 		if (!ifpoll_deregister(ifp)) {
400 			if_printf(ifp, "ifpoll_register: "
401 				  "ifpoll_deregister failed!\n");
402 		}
403 	}
404 
405 	kfree(info, M_TEMP);
406 	return error;
407 }
408 
409 int
410 ifpoll_deregister(struct ifnet *ifp)
411 {
412 	struct netmsg_base nmsg;
413 	int error;
414 
415 	if (ifp->if_npoll == NULL)
416 		return EOPNOTSUPP;
417 
418 	ifnet_serialize_all(ifp);
419 
420 	if ((ifp->if_flags & IFF_NPOLLING) == 0) {
421 		ifnet_deserialize_all(ifp);
422 		return EINVAL;
423 	}
424 	ifp->if_flags &= ~IFF_NPOLLING;
425 
426 	ifnet_deserialize_all(ifp);
427 
428 	netmsg_init(&nmsg, NULL, &curthread->td_msgport,
429 		    0, ifpoll_deregister_handler);
430 	nmsg.lmsg.u.ms_resultp = ifp;
431 
432 	error = netisr_domsg(&nmsg, 0);
433 	if (!error) {
434 		ifnet_serialize_all(ifp);
435 		ifp->if_npoll(ifp, NULL);
436 		ifnet_deserialize_all(ifp);
437 	}
438 	return error;
439 }
440 
441 static void
442 ifpoll_register_handler(netmsg_t nmsg)
443 {
444 	const struct ifpoll_info *info = nmsg->lmsg.u.ms_resultp;
445 	int cpuid = mycpuid;
446 	int error;
447 
448 	KKASSERT(cpuid < netisr_ncpus);
449 	KKASSERT(&curthread->td_msgport == netisr_cpuport(cpuid));
450 
451 	if (cpuid == 0) {
452 		error = stpoll_register(info->ifpi_ifp, &info->ifpi_status);
453 		if (error)
454 			goto failed;
455 	}
456 
457 	error = iopoll_register(info->ifpi_ifp, rxpoll_context[cpuid],
458 				&info->ifpi_rx[cpuid]);
459 	if (error)
460 		goto failed;
461 
462 	error = iopoll_register(info->ifpi_ifp, txpoll_context[cpuid],
463 				&info->ifpi_tx[cpuid]);
464 	if (error)
465 		goto failed;
466 
467 	/* Adjust polling frequency, after all registration is done */
468 	poll_comm_adjust_pollhz(poll_common[cpuid]);
469 
470 	netisr_forwardmsg(&nmsg->base, cpuid + 1);
471 	return;
472 failed:
473 	netisr_replymsg(&nmsg->base, error);
474 }
475 
476 static void
477 ifpoll_deregister_handler(netmsg_t nmsg)
478 {
479 	struct ifnet *ifp = nmsg->lmsg.u.ms_resultp;
480 	int cpuid = mycpuid;
481 
482 	KKASSERT(cpuid < netisr_ncpus);
483 	KKASSERT(&curthread->td_msgport == netisr_cpuport(cpuid));
484 
485 	/* Ignore errors */
486 	if (cpuid == 0)
487 		stpoll_deregister(ifp);
488 	iopoll_deregister(ifp, rxpoll_context[cpuid]);
489 	iopoll_deregister(ifp, txpoll_context[cpuid]);
490 
491 	/* Adjust polling frequency, after all deregistration is done */
492 	poll_comm_adjust_pollhz(poll_common[cpuid]);
493 
494 	netisr_forwardmsg(&nmsg->base, cpuid + 1);
495 }
496 
497 static void
498 stpoll_init(void)
499 {
500 	struct stpoll_ctx *st_ctx = &stpoll_context;
501 	const struct poll_comm *comm = poll_common[0];
502 
503 	sysctl_ctx_init(&st_ctx->poll_sysctl_ctx);
504 	st_ctx->poll_sysctl_tree = SYSCTL_ADD_NODE(&st_ctx->poll_sysctl_ctx,
505 				   SYSCTL_CHILDREN(comm->sysctl_tree),
506 				   OID_AUTO, "status", CTLFLAG_RD, 0, "");
507 
508 	SYSCTL_ADD_UINT(&st_ctx->poll_sysctl_ctx,
509 			SYSCTL_CHILDREN(st_ctx->poll_sysctl_tree),
510 			OID_AUTO, "handlers", CTLFLAG_RD,
511 			&st_ctx->poll_handlers, 0,
512 			"Number of registered status poll handlers");
513 
514 	netmsg_init(&st_ctx->poll_netmsg, NULL, &netisr_adone_rport,
515 		    0, stpoll_handler);
516 }
517 
518 /*
519  * stpoll_handler is scheduled by sched_stpoll when appropriate, typically
520  * once per polling systimer tick.
521  */
522 static void
523 stpoll_handler(netmsg_t msg)
524 {
525 	struct stpoll_ctx *st_ctx = &stpoll_context;
526 	struct thread *td = curthread;
527 	int i;
528 
529 	ASSERT_NETISR0;
530 
531 	crit_enter_quick(td);
532 
533 	/* Reply ASAP */
534 	netisr_replymsg(&msg->base, 0);
535 
536 	if (st_ctx->poll_handlers == 0) {
537 		crit_exit_quick(td);
538 		return;
539 	}
540 
541 	for (i = 0; i < st_ctx->poll_handlers; ++i) {
542 		const struct stpoll_rec *rec = &st_ctx->pr[i];
543 		struct ifnet *ifp = rec->ifp;
544 
545 		if (!lwkt_serialize_try(rec->serializer))
546 			continue;
547 
548 		if ((ifp->if_flags & (IFF_RUNNING | IFF_NPOLLING)) ==
549 		    (IFF_RUNNING | IFF_NPOLLING))
550 			rec->status_func(ifp);
551 
552 		lwkt_serialize_exit(rec->serializer);
553 	}
554 
555 	crit_exit_quick(td);
556 }
557 
558 /*
559  * Hook from status poll systimer.  Tries to schedule an status poll.
560  * NOTE: Caller should hold critical section.
561  */
562 static void
563 stpoll_clock(struct stpoll_ctx *st_ctx)
564 {
565 	KKASSERT(mycpuid == 0);
566 
567 	if (st_ctx->poll_handlers == 0)
568 		return;
569 	sched_stpoll(st_ctx);
570 }
571 
572 static int
573 stpoll_register(struct ifnet *ifp, const struct ifpoll_status *st_rec)
574 {
575 	struct stpoll_ctx *st_ctx = &stpoll_context;
576 	int error;
577 
578 	ASSERT_NETISR0;
579 
580 	if (st_rec->status_func == NULL)
581 		return 0;
582 
583 	/*
584 	 * Check if there is room.
585 	 */
586 	if (st_ctx->poll_handlers >= IFPOLL_LIST_LEN) {
587 		/*
588 		 * List full, cannot register more entries.
589 		 * This should never happen; if it does, it is probably a
590 		 * broken driver trying to register multiple times. Checking
591 		 * this at runtime is expensive, and won't solve the problem
592 		 * anyways, so just report a few times and then give up.
593 		 */
594 		static int verbose = 10; /* XXX */
595 
596 		if (verbose > 0) {
597 			kprintf("status poll handlers list full, "
598 				"maybe a broken driver ?\n");
599 			verbose--;
600 		}
601 		error = ENOENT;
602 	} else {
603 		struct stpoll_rec *rec = &st_ctx->pr[st_ctx->poll_handlers];
604 
605 		rec->ifp = ifp;
606 		rec->serializer = st_rec->serializer;
607 		rec->status_func = st_rec->status_func;
608 
609 		st_ctx->poll_handlers++;
610 		error = 0;
611 	}
612 	return error;
613 }
614 
615 static int
616 stpoll_deregister(struct ifnet *ifp)
617 {
618 	struct stpoll_ctx *st_ctx = &stpoll_context;
619 	int i, error;
620 
621 	ASSERT_NETISR0;
622 
623 	for (i = 0; i < st_ctx->poll_handlers; ++i) {
624 		if (st_ctx->pr[i].ifp == ifp) /* Found it */
625 			break;
626 	}
627 	if (i == st_ctx->poll_handlers) {
628 		error = ENOENT;
629 	} else {
630 		st_ctx->poll_handlers--;
631 		if (i < st_ctx->poll_handlers) {
632 			/* Last entry replaces this one. */
633 			st_ctx->pr[i] = st_ctx->pr[st_ctx->poll_handlers];
634 		}
635 		error = 0;
636 	}
637 	return error;
638 }
639 
640 static __inline void
641 iopoll_reset_state(struct iopoll_ctx *io_ctx)
642 {
643 	crit_enter();
644 	io_ctx->poll_burst = io_ctx->poll_each_burst;
645 	io_ctx->pending_polls = 0;
646 	io_ctx->residual_burst = 0;
647 	io_ctx->phase = 0;
648 	io_ctx->kern_frac = 0;
649 	bzero(&io_ctx->poll_start_t, sizeof(io_ctx->poll_start_t));
650 	bzero(&io_ctx->prev_t, sizeof(io_ctx->prev_t));
651 	crit_exit();
652 }
653 
654 static void
655 iopoll_init(int cpuid)
656 {
657 	KKASSERT(cpuid < netisr_ncpus);
658 
659 	rxpoll_context[cpuid] = iopoll_ctx_create(cpuid, IFPOLL_RX);
660 	txpoll_context[cpuid] = iopoll_ctx_create(cpuid, IFPOLL_TX);
661 }
662 
663 static struct iopoll_ctx *
664 iopoll_ctx_create(int cpuid, int poll_type)
665 {
666 	struct poll_comm *comm;
667 	struct iopoll_ctx *io_ctx;
668 	const char *poll_type_str;
669 	netisr_fn_t handler, more_handler;
670 
671 	KKASSERT(poll_type == IFPOLL_RX || poll_type == IFPOLL_TX);
672 
673 	/*
674 	 * Make sure that tunables are in sane state
675 	 */
676 	if (iopoll_burst_max < MIN_IOPOLL_BURST_MAX)
677 		iopoll_burst_max = MIN_IOPOLL_BURST_MAX;
678 	else if (iopoll_burst_max > MAX_IOPOLL_BURST_MAX)
679 		iopoll_burst_max = MAX_IOPOLL_BURST_MAX;
680 
681 	if (iopoll_each_burst > iopoll_burst_max)
682 		iopoll_each_burst = iopoll_burst_max;
683 
684 	comm = poll_common[cpuid];
685 
686 	/*
687 	 * Create the per-cpu polling context
688 	 */
689 	io_ctx = kmalloc_cachealign(sizeof(*io_ctx), M_DEVBUF,
690 	    M_WAITOK | M_ZERO);
691 
692 	io_ctx->poll_each_burst = iopoll_each_burst;
693 	io_ctx->poll_burst_max = iopoll_burst_max;
694 	io_ctx->user_frac = iopoll_user_frac;
695 	if (poll_type == IFPOLL_RX)
696 		io_ctx->pollhz = comm->pollhz;
697 	else
698 		io_ctx->pollhz = comm->pollhz / (comm->poll_txfrac + 1);
699 	io_ctx->poll_cpuid = cpuid;
700 	iopoll_reset_state(io_ctx);
701 
702 	if (poll_type == IFPOLL_RX) {
703 		handler = rxpoll_handler;
704 		more_handler = rxpollmore_handler;
705 	} else {
706 		handler = txpoll_handler;
707 		more_handler = txpollmore_handler;
708 	}
709 
710 	netmsg_init(&io_ctx->poll_netmsg, NULL, &netisr_adone_rport,
711 	    0, handler);
712 	io_ctx->poll_netmsg.lmsg.u.ms_resultp = io_ctx;
713 
714 	netmsg_init(&io_ctx->poll_more_netmsg, NULL, &netisr_adone_rport,
715 	    0, more_handler);
716 	io_ctx->poll_more_netmsg.lmsg.u.ms_resultp = io_ctx;
717 
718 	/*
719 	 * Initialize per-cpu sysctl nodes
720 	 */
721 	if (poll_type == IFPOLL_RX)
722 		poll_type_str = "rx";
723 	else
724 		poll_type_str = "tx";
725 
726 	sysctl_ctx_init(&io_ctx->poll_sysctl_ctx);
727 	io_ctx->poll_sysctl_tree = SYSCTL_ADD_NODE(&io_ctx->poll_sysctl_ctx,
728 				   SYSCTL_CHILDREN(comm->sysctl_tree),
729 				   OID_AUTO, poll_type_str, CTLFLAG_RD, 0, "");
730 	iopoll_add_sysctl(&io_ctx->poll_sysctl_ctx,
731 	    SYSCTL_CHILDREN(io_ctx->poll_sysctl_tree), io_ctx, poll_type);
732 
733 	return io_ctx;
734 }
735 
736 /*
737  * Hook from iopoll systimer.  Tries to schedule an iopoll, but keeps
738  * track of lost ticks due to the previous handler taking too long.
739  * Normally, this should not happen, because polling handler should
740  * run for a short time.  However, in some cases (e.g. when there are
741  * changes in link status etc.) the drivers take a very long time
742  * (even in the order of milliseconds) to reset and reconfigure the
743  * device, causing apparent lost polls.
744  *
745  * The first part of the code is just for debugging purposes, and tries
746  * to count how often hardclock ticks are shorter than they should,
747  * meaning either stray interrupts or delayed events.
748  *
749  * WARNING! called from fastint or IPI, the MP lock might not be held.
750  * NOTE: Caller should hold critical section.
751  */
752 static void
753 iopoll_clock(struct iopoll_ctx *io_ctx)
754 {
755 	union microtime_pcpu t;
756 	int delta;
757 
758 	KKASSERT(mycpuid == io_ctx->poll_cpuid);
759 
760 	if (io_ctx->poll_handlers == 0)
761 		return;
762 
763 	logpoll(ioclock_start);
764 
765 	microtime_pcpu_get(&t);
766 	delta = microtime_pcpu_diff(&io_ctx->prev_t, &t);
767 	if (delta * io_ctx->pollhz < 500000)
768 		io_ctx->short_ticks++;
769 	else
770 		io_ctx->prev_t = t;
771 
772 	if (io_ctx->pending_polls > 100) {
773 		/*
774 		 * Too much, assume it has stalled (not always true
775 		 * see comment above).
776 		 */
777 		io_ctx->stalled++;
778 		io_ctx->pending_polls = 0;
779 		io_ctx->phase = 0;
780 	}
781 
782 	if (io_ctx->phase <= 2) {
783 		if (io_ctx->phase != 0)
784 			io_ctx->suspect++;
785 		io_ctx->phase = 1;
786 		sched_iopoll(io_ctx);
787 		io_ctx->phase = 2;
788 	}
789 	if (io_ctx->pending_polls++ > 0)
790 		io_ctx->lost_polls++;
791 
792 	logpoll(ioclock_end);
793 }
794 
795 /*
796  * rxpoll_handler and txpoll_handler are scheduled by sched_iopoll when
797  * appropriate, typically once per polling systimer tick.
798  *
799  * Note that the message is replied immediately in order to allow a new
800  * ISR to be scheduled in the handler.
801  */
802 static void
803 rxpoll_handler(netmsg_t msg)
804 {
805 	struct iopoll_ctx *io_ctx;
806 	struct thread *td = curthread;
807 	int i, cycles;
808 
809 	logpoll(rx_start);
810 
811 	io_ctx = msg->lmsg.u.ms_resultp;
812 	KKASSERT(&td->td_msgport == netisr_cpuport(io_ctx->poll_cpuid));
813 
814 	crit_enter_quick(td);
815 
816 	/* Reply ASAP */
817 	netisr_replymsg(&msg->base, 0);
818 
819 	if (io_ctx->poll_handlers == 0) {
820 		crit_exit_quick(td);
821 		logpoll(rx_end);
822 		return;
823 	}
824 
825 	io_ctx->phase = 3;
826 	if (io_ctx->residual_burst == 0) {
827 		/* First call in this tick */
828 		microtime_pcpu_get(&io_ctx->poll_start_t);
829 		io_ctx->residual_burst = io_ctx->poll_burst;
830 	}
831 	cycles = (io_ctx->residual_burst < io_ctx->poll_each_burst) ?
832 		 io_ctx->residual_burst : io_ctx->poll_each_burst;
833 	io_ctx->residual_burst -= cycles;
834 
835 	for (i = 0; i < io_ctx->poll_handlers; i++) {
836 		const struct iopoll_rec *rec = &io_ctx->pr[i];
837 		struct ifnet *ifp = rec->ifp;
838 
839 		if (!lwkt_serialize_try(rec->serializer))
840 			continue;
841 
842 		if ((ifp->if_flags & (IFF_RUNNING | IFF_NPOLLING)) ==
843 		    (IFF_RUNNING | IFF_NPOLLING))
844 			rec->poll_func(ifp, rec->arg, cycles);
845 
846 		lwkt_serialize_exit(rec->serializer);
847 	}
848 
849 	/*
850 	 * Do a quick exit/enter to catch any higher-priority
851 	 * interrupt sources.
852 	 */
853 	crit_exit_quick(td);
854 	crit_enter_quick(td);
855 
856 	sched_iopollmore(io_ctx);
857 	io_ctx->phase = 4;
858 
859 	crit_exit_quick(td);
860 
861 	logpoll(rx_end);
862 }
863 
864 static void
865 txpoll_handler(netmsg_t msg)
866 {
867 	struct iopoll_ctx *io_ctx;
868 	struct thread *td = curthread;
869 	int i;
870 
871 	logpoll(tx_start);
872 
873 	io_ctx = msg->lmsg.u.ms_resultp;
874 	KKASSERT(&td->td_msgport == netisr_cpuport(io_ctx->poll_cpuid));
875 
876 	crit_enter_quick(td);
877 
878 	/* Reply ASAP */
879 	netisr_replymsg(&msg->base, 0);
880 
881 	if (io_ctx->poll_handlers == 0) {
882 		crit_exit_quick(td);
883 		logpoll(tx_end);
884 		return;
885 	}
886 
887 	io_ctx->phase = 3;
888 
889 	for (i = 0; i < io_ctx->poll_handlers; i++) {
890 		const struct iopoll_rec *rec = &io_ctx->pr[i];
891 		struct ifnet *ifp = rec->ifp;
892 
893 		if (!lwkt_serialize_try(rec->serializer))
894 			continue;
895 
896 		if ((ifp->if_flags & (IFF_RUNNING | IFF_NPOLLING)) ==
897 		    (IFF_RUNNING | IFF_NPOLLING))
898 			rec->poll_func(ifp, rec->arg, -1);
899 
900 		lwkt_serialize_exit(rec->serializer);
901 	}
902 
903 	/*
904 	 * Do a quick exit/enter to catch any higher-priority
905 	 * interrupt sources.
906 	 */
907 	crit_exit_quick(td);
908 	crit_enter_quick(td);
909 
910 	sched_iopollmore(io_ctx);
911 	io_ctx->phase = 4;
912 
913 	crit_exit_quick(td);
914 
915 	logpoll(tx_end);
916 }
917 
918 /*
919  * rxpollmore_handler and txpollmore_handler are called after other netisr's,
920  * possibly scheduling another rxpoll_handler or txpoll_handler call, or
921  * adapting the burst size for the next cycle.
922  *
923  * It is very bad to fetch large bursts of packets from a single card at once,
924  * because the burst could take a long time to be completely processed leading
925  * to unfairness.  To reduce the problem, and also to account better for time
926  * spent in network-related processing, we split the burst in smaller chunks
927  * of fixed size, giving control to the other netisr's between chunks.  This
928  * helps in improving the fairness, reducing livelock and accounting for the
929  * work performed in low level handling.
930  */
931 static void
932 rxpollmore_handler(netmsg_t msg)
933 {
934 	struct thread *td = curthread;
935 	struct iopoll_ctx *io_ctx;
936 	union microtime_pcpu t;
937 	int kern_load;
938 	uint32_t pending_polls;
939 
940 	logpoll(rx_mstart);
941 
942 	io_ctx = msg->lmsg.u.ms_resultp;
943 	KKASSERT(&td->td_msgport == netisr_cpuport(io_ctx->poll_cpuid));
944 
945 	crit_enter_quick(td);
946 
947 	/* Replay ASAP */
948 	netisr_replymsg(&msg->base, 0);
949 
950 	if (io_ctx->poll_handlers == 0) {
951 		crit_exit_quick(td);
952 		logpoll(rx_mend);
953 		return;
954 	}
955 
956 	io_ctx->phase = 5;
957 	if (io_ctx->residual_burst > 0) {
958 		sched_iopoll(io_ctx);
959 		crit_exit_quick(td);
960 		/* Will run immediately on return, followed by netisrs */
961 		logpoll(rx_mend);
962 		return;
963 	}
964 
965 	/* Here we can account time spent in iopoll's in this tick */
966 	microtime_pcpu_get(&t);
967 	kern_load = microtime_pcpu_diff(&io_ctx->poll_start_t, &t);
968 	kern_load = (kern_load * io_ctx->pollhz) / 10000; /* 0..100 */
969 	io_ctx->kern_frac = kern_load;
970 
971 	if (kern_load > (100 - io_ctx->user_frac)) {
972 		/* Try decrease ticks */
973 		if (io_ctx->poll_burst > 1)
974 			io_ctx->poll_burst--;
975 	} else {
976 		if (io_ctx->poll_burst < io_ctx->poll_burst_max)
977 			io_ctx->poll_burst++;
978 	}
979 
980 	io_ctx->pending_polls--;
981 	pending_polls = io_ctx->pending_polls;
982 
983 	if (pending_polls == 0) {
984 		/* We are done */
985 		io_ctx->phase = 0;
986 	} else {
987 		/*
988 		 * Last cycle was long and caused us to miss one or more
989 		 * hardclock ticks.  Restart processing again, but slightly
990 		 * reduce the burst size to prevent that this happens again.
991 		 */
992 		io_ctx->poll_burst -= (io_ctx->poll_burst / 8);
993 		if (io_ctx->poll_burst < 1)
994 			io_ctx->poll_burst = 1;
995 		sched_iopoll(io_ctx);
996 		io_ctx->phase = 6;
997 	}
998 
999 	crit_exit_quick(td);
1000 
1001 	logpoll(rx_mend);
1002 }
1003 
1004 static void
1005 txpollmore_handler(netmsg_t msg)
1006 {
1007 	struct thread *td = curthread;
1008 	struct iopoll_ctx *io_ctx;
1009 	uint32_t pending_polls;
1010 
1011 	logpoll(tx_mstart);
1012 
1013 	io_ctx = msg->lmsg.u.ms_resultp;
1014 	KKASSERT(&td->td_msgport == netisr_cpuport(io_ctx->poll_cpuid));
1015 
1016 	crit_enter_quick(td);
1017 
1018 	/* Replay ASAP */
1019 	netisr_replymsg(&msg->base, 0);
1020 
1021 	if (io_ctx->poll_handlers == 0) {
1022 		crit_exit_quick(td);
1023 		logpoll(tx_mend);
1024 		return;
1025 	}
1026 
1027 	io_ctx->phase = 5;
1028 
1029 	io_ctx->pending_polls--;
1030 	pending_polls = io_ctx->pending_polls;
1031 
1032 	if (pending_polls == 0) {
1033 		/* We are done */
1034 		io_ctx->phase = 0;
1035 	} else {
1036 		/*
1037 		 * Last cycle was long and caused us to miss one or more
1038 		 * hardclock ticks.  Restart processing again.
1039 		 */
1040 		sched_iopoll(io_ctx);
1041 		io_ctx->phase = 6;
1042 	}
1043 
1044 	crit_exit_quick(td);
1045 
1046 	logpoll(tx_mend);
1047 }
1048 
1049 static void
1050 iopoll_add_sysctl(struct sysctl_ctx_list *ctx, struct sysctl_oid_list *parent,
1051     struct iopoll_ctx *io_ctx, int poll_type)
1052 {
1053 	if (poll_type == IFPOLL_RX) {
1054 		SYSCTL_ADD_PROC(ctx, parent, OID_AUTO, "burst_max",
1055 		    CTLTYPE_UINT | CTLFLAG_RW, io_ctx, 0, sysctl_burstmax,
1056 		    "IU", "Max Polling burst size");
1057 
1058 		SYSCTL_ADD_PROC(ctx, parent, OID_AUTO, "each_burst",
1059 		    CTLTYPE_UINT | CTLFLAG_RW, io_ctx, 0, sysctl_eachburst,
1060 		    "IU", "Max size of each burst");
1061 
1062 		SYSCTL_ADD_UINT(ctx, parent, OID_AUTO, "burst", CTLFLAG_RD,
1063 		    &io_ctx->poll_burst, 0, "Current polling burst size");
1064 
1065 		SYSCTL_ADD_UINT(ctx, parent, OID_AUTO, "user_frac", CTLFLAG_RW,
1066 		    &io_ctx->user_frac, 0, "Desired user fraction of cpu time");
1067 
1068 		SYSCTL_ADD_UINT(ctx, parent, OID_AUTO, "kern_frac", CTLFLAG_RD,
1069 		    &io_ctx->kern_frac, 0, "Kernel fraction of cpu time");
1070 
1071 		SYSCTL_ADD_INT(ctx, parent, OID_AUTO, "residual_burst", CTLFLAG_RD,
1072 		    &io_ctx->residual_burst, 0,
1073 		    "# of residual cycles in burst");
1074 	}
1075 
1076 	SYSCTL_ADD_UINT(ctx, parent, OID_AUTO, "phase", CTLFLAG_RD,
1077 	    &io_ctx->phase, 0, "Polling phase");
1078 
1079 	SYSCTL_ADD_ULONG(ctx, parent, OID_AUTO, "suspect", CTLFLAG_RW,
1080 	    &io_ctx->suspect, "Suspected events");
1081 
1082 	SYSCTL_ADD_ULONG(ctx, parent, OID_AUTO, "stalled", CTLFLAG_RW,
1083 	    &io_ctx->stalled, "Potential stalls");
1084 
1085 	SYSCTL_ADD_ULONG(ctx, parent, OID_AUTO, "short_ticks", CTLFLAG_RW,
1086 	    &io_ctx->short_ticks,
1087 	    "Hardclock ticks shorter than they should be");
1088 
1089 	SYSCTL_ADD_ULONG(ctx, parent, OID_AUTO, "lost_polls", CTLFLAG_RW,
1090 	    &io_ctx->lost_polls,
1091 	    "How many times we would have lost a poll tick");
1092 
1093 	SYSCTL_ADD_UINT(ctx, parent, OID_AUTO, "pending_polls", CTLFLAG_RD,
1094 	    &io_ctx->pending_polls, 0, "Do we need to poll again");
1095 
1096 	SYSCTL_ADD_UINT(ctx, parent, OID_AUTO, "handlers", CTLFLAG_RD,
1097 	    &io_ctx->poll_handlers, 0, "Number of registered poll handlers");
1098 }
1099 
1100 static void
1101 sysctl_burstmax_handler(netmsg_t nmsg)
1102 {
1103 	struct iopoll_sysctl_netmsg *msg = (struct iopoll_sysctl_netmsg *)nmsg;
1104 	struct iopoll_ctx *io_ctx;
1105 
1106 	io_ctx = msg->ctx;
1107 	KKASSERT(&curthread->td_msgport == netisr_cpuport(io_ctx->poll_cpuid));
1108 
1109 	io_ctx->poll_burst_max = nmsg->lmsg.u.ms_result;
1110 	if (io_ctx->poll_each_burst > io_ctx->poll_burst_max)
1111 		io_ctx->poll_each_burst = io_ctx->poll_burst_max;
1112 	if (io_ctx->poll_burst > io_ctx->poll_burst_max)
1113 		io_ctx->poll_burst = io_ctx->poll_burst_max;
1114 	if (io_ctx->residual_burst > io_ctx->poll_burst_max)
1115 		io_ctx->residual_burst = io_ctx->poll_burst_max;
1116 
1117 	netisr_replymsg(&nmsg->base, 0);
1118 }
1119 
1120 static int
1121 sysctl_burstmax(SYSCTL_HANDLER_ARGS)
1122 {
1123 	struct iopoll_ctx *io_ctx = arg1;
1124 	struct iopoll_sysctl_netmsg msg;
1125 	uint32_t burst_max;
1126 	int error;
1127 
1128 	burst_max = io_ctx->poll_burst_max;
1129 	error = sysctl_handle_int(oidp, &burst_max, 0, req);
1130 	if (error || req->newptr == NULL)
1131 		return error;
1132 	if (burst_max < MIN_IOPOLL_BURST_MAX)
1133 		burst_max = MIN_IOPOLL_BURST_MAX;
1134 	else if (burst_max > MAX_IOPOLL_BURST_MAX)
1135 		burst_max = MAX_IOPOLL_BURST_MAX;
1136 
1137 	netmsg_init(&msg.base, NULL, &curthread->td_msgport,
1138 		    0, sysctl_burstmax_handler);
1139 	msg.base.lmsg.u.ms_result = burst_max;
1140 	msg.ctx = io_ctx;
1141 
1142 	return netisr_domsg(&msg.base, io_ctx->poll_cpuid);
1143 }
1144 
1145 static void
1146 sysctl_eachburst_handler(netmsg_t nmsg)
1147 {
1148 	struct iopoll_sysctl_netmsg *msg = (struct iopoll_sysctl_netmsg *)nmsg;
1149 	struct iopoll_ctx *io_ctx;
1150 	uint32_t each_burst;
1151 
1152 	io_ctx = msg->ctx;
1153 	KKASSERT(&curthread->td_msgport == netisr_cpuport(io_ctx->poll_cpuid));
1154 
1155 	each_burst = nmsg->lmsg.u.ms_result;
1156 	if (each_burst > io_ctx->poll_burst_max)
1157 		each_burst = io_ctx->poll_burst_max;
1158 	else if (each_burst < 1)
1159 		each_burst = 1;
1160 	io_ctx->poll_each_burst = each_burst;
1161 
1162 	netisr_replymsg(&nmsg->base, 0);
1163 }
1164 
1165 static int
1166 sysctl_eachburst(SYSCTL_HANDLER_ARGS)
1167 {
1168 	struct iopoll_ctx *io_ctx = arg1;
1169 	struct iopoll_sysctl_netmsg msg;
1170 	uint32_t each_burst;
1171 	int error;
1172 
1173 	each_burst = io_ctx->poll_each_burst;
1174 	error = sysctl_handle_int(oidp, &each_burst, 0, req);
1175 	if (error || req->newptr == NULL)
1176 		return error;
1177 
1178 	netmsg_init(&msg.base, NULL, &curthread->td_msgport,
1179 		    0, sysctl_eachburst_handler);
1180 	msg.base.lmsg.u.ms_result = each_burst;
1181 	msg.ctx = io_ctx;
1182 
1183 	return netisr_domsg(&msg.base, io_ctx->poll_cpuid);
1184 }
1185 
1186 static int
1187 iopoll_register(struct ifnet *ifp, struct iopoll_ctx *io_ctx,
1188 		const struct ifpoll_io *io_rec)
1189 {
1190 	int error;
1191 
1192 	KKASSERT(&curthread->td_msgport == netisr_cpuport(io_ctx->poll_cpuid));
1193 
1194 	if (io_rec->poll_func == NULL)
1195 		return 0;
1196 
1197 	/*
1198 	 * Check if there is room.
1199 	 */
1200 	if (io_ctx->poll_handlers >= IFPOLL_LIST_LEN) {
1201 		/*
1202 		 * List full, cannot register more entries.
1203 		 * This should never happen; if it does, it is probably a
1204 		 * broken driver trying to register multiple times. Checking
1205 		 * this at runtime is expensive, and won't solve the problem
1206 		 * anyways, so just report a few times and then give up.
1207 		 */
1208 		static int verbose = 10; /* XXX */
1209 		if (verbose > 0) {
1210 			kprintf("io poll handlers list full, "
1211 				"maybe a broken driver ?\n");
1212 			verbose--;
1213 		}
1214 		error = ENOENT;
1215 	} else {
1216 		struct iopoll_rec *rec = &io_ctx->pr[io_ctx->poll_handlers];
1217 
1218 		rec->ifp = ifp;
1219 		rec->serializer = io_rec->serializer;
1220 		rec->arg = io_rec->arg;
1221 		rec->poll_func = io_rec->poll_func;
1222 
1223 		io_ctx->poll_handlers++;
1224 		error = 0;
1225 	}
1226 	return error;
1227 }
1228 
1229 static int
1230 iopoll_deregister(struct ifnet *ifp, struct iopoll_ctx *io_ctx)
1231 {
1232 	int i, error;
1233 
1234 	KKASSERT(&curthread->td_msgport == netisr_cpuport(io_ctx->poll_cpuid));
1235 
1236 	for (i = 0; i < io_ctx->poll_handlers; ++i) {
1237 		if (io_ctx->pr[i].ifp == ifp) /* Found it */
1238 			break;
1239 	}
1240 	if (i == io_ctx->poll_handlers) {
1241 		error = ENOENT;
1242 	} else {
1243 		io_ctx->poll_handlers--;
1244 		if (i < io_ctx->poll_handlers) {
1245 			/* Last entry replaces this one. */
1246 			io_ctx->pr[i] = io_ctx->pr[io_ctx->poll_handlers];
1247 		}
1248 
1249 		if (io_ctx->poll_handlers == 0)
1250 			iopoll_reset_state(io_ctx);
1251 		error = 0;
1252 	}
1253 	return error;
1254 }
1255 
1256 static void
1257 poll_comm_init(int cpuid)
1258 {
1259 	struct poll_comm *comm;
1260 	char cpuid_str[16];
1261 
1262 	comm = kmalloc_cachealign(sizeof(*comm), M_DEVBUF, M_WAITOK | M_ZERO);
1263 
1264 	if (ifpoll_stfrac < 1)
1265 		ifpoll_stfrac = IFPOLL_STFRAC_DEFAULT;
1266 	if (ifpoll_txfrac < 1)
1267 		ifpoll_txfrac = IFPOLL_TXFRAC_DEFAULT;
1268 
1269 	comm->poll_cpuid = cpuid;
1270 	comm->pollhz = poll_comm_pollhz_div(comm, ifpoll_pollhz);
1271 	comm->poll_stfrac = ifpoll_stfrac - 1;
1272 	comm->poll_txfrac = ifpoll_txfrac - 1;
1273 
1274 	ksnprintf(cpuid_str, sizeof(cpuid_str), "%d", cpuid);
1275 
1276 	sysctl_ctx_init(&comm->sysctl_ctx);
1277 	comm->sysctl_tree = SYSCTL_ADD_NODE(&comm->sysctl_ctx,
1278 			    SYSCTL_STATIC_CHILDREN(_net_ifpoll),
1279 			    OID_AUTO, cpuid_str, CTLFLAG_RD, 0, "");
1280 
1281 	SYSCTL_ADD_PROC(&comm->sysctl_ctx, SYSCTL_CHILDREN(comm->sysctl_tree),
1282 			OID_AUTO, "pollhz", CTLTYPE_INT | CTLFLAG_RW,
1283 			comm, 0, sysctl_pollhz,
1284 			"I", "Device polling frequency");
1285 
1286 	if (cpuid == 0) {
1287 		SYSCTL_ADD_PROC(&comm->sysctl_ctx,
1288 				SYSCTL_CHILDREN(comm->sysctl_tree),
1289 				OID_AUTO, "status_frac",
1290 				CTLTYPE_INT | CTLFLAG_RW,
1291 				comm, 0, sysctl_stfrac,
1292 				"I", "# of cycles before status is polled");
1293 	}
1294 	SYSCTL_ADD_PROC(&comm->sysctl_ctx, SYSCTL_CHILDREN(comm->sysctl_tree),
1295 			OID_AUTO, "tx_frac", CTLTYPE_INT | CTLFLAG_RW,
1296 			comm, 0, sysctl_txfrac,
1297 			"I", "# of cycles before TX is polled");
1298 
1299 	poll_common[cpuid] = comm;
1300 }
1301 
1302 static void
1303 poll_comm_start(int cpuid)
1304 {
1305 	struct poll_comm *comm = poll_common[cpuid];
1306 	systimer_func_t func;
1307 
1308 	/*
1309 	 * Initialize systimer
1310 	 */
1311 	if (cpuid == 0)
1312 		func = poll_comm_systimer0;
1313 	else
1314 		func = poll_comm_systimer;
1315 	systimer_init_periodic_nq(&comm->pollclock, func, comm, 1);
1316 }
1317 
1318 static void
1319 _poll_comm_systimer(struct poll_comm *comm)
1320 {
1321 	iopoll_clock(rxpoll_context[comm->poll_cpuid]);
1322 	if (comm->txfrac_count-- == 0) {
1323 		comm->txfrac_count = comm->poll_txfrac;
1324 		iopoll_clock(txpoll_context[comm->poll_cpuid]);
1325 	}
1326 }
1327 
1328 static void
1329 poll_comm_systimer0(systimer_t info, int in_ipi __unused,
1330     struct intrframe *frame __unused)
1331 {
1332 	struct poll_comm *comm = info->data;
1333 	globaldata_t gd = mycpu;
1334 
1335 	KKASSERT(comm->poll_cpuid == gd->gd_cpuid && gd->gd_cpuid == 0);
1336 
1337 	crit_enter_gd(gd);
1338 
1339 	if (comm->stfrac_count-- == 0) {
1340 		comm->stfrac_count = comm->poll_stfrac;
1341 		stpoll_clock(&stpoll_context);
1342 	}
1343 	_poll_comm_systimer(comm);
1344 
1345 	crit_exit_gd(gd);
1346 }
1347 
1348 static void
1349 poll_comm_systimer(systimer_t info, int in_ipi __unused,
1350     struct intrframe *frame __unused)
1351 {
1352 	struct poll_comm *comm = info->data;
1353 	globaldata_t gd = mycpu;
1354 
1355 	KKASSERT(comm->poll_cpuid == gd->gd_cpuid && gd->gd_cpuid != 0);
1356 
1357 	crit_enter_gd(gd);
1358 	_poll_comm_systimer(comm);
1359 	crit_exit_gd(gd);
1360 }
1361 
1362 static void
1363 poll_comm_adjust_pollhz(struct poll_comm *comm)
1364 {
1365 	uint32_t handlers;
1366 	int pollhz = 1;
1367 
1368 	KKASSERT(&curthread->td_msgport == netisr_cpuport(comm->poll_cpuid));
1369 
1370 	/*
1371 	 * If there is no polling handler registered, set systimer
1372 	 * frequency to the lowest value.  Polling systimer frequency
1373 	 * will be adjusted to the requested value, once there are
1374 	 * registered handlers.
1375 	 */
1376 	handlers = rxpoll_context[mycpuid]->poll_handlers +
1377 		   txpoll_context[mycpuid]->poll_handlers;
1378 	if (comm->poll_cpuid == 0)
1379 		handlers += stpoll_context.poll_handlers;
1380 	if (handlers)
1381 		pollhz = comm->pollhz;
1382 	systimer_adjust_periodic(&comm->pollclock, pollhz);
1383 }
1384 
1385 static int
1386 sysctl_pollhz(SYSCTL_HANDLER_ARGS)
1387 {
1388 	struct poll_comm *comm = arg1;
1389 	struct netmsg_base nmsg;
1390 	int error, phz;
1391 
1392 	phz = poll_comm_pollhz_conv(comm, comm->pollhz);
1393 	error = sysctl_handle_int(oidp, &phz, 0, req);
1394 	if (error || req->newptr == NULL)
1395 		return error;
1396 	if (phz <= 0)
1397 		return EINVAL;
1398 	else if (phz > IFPOLL_FREQ_MAX)
1399 		phz = IFPOLL_FREQ_MAX;
1400 
1401 	netmsg_init(&nmsg, NULL, &curthread->td_msgport,
1402 		    0, sysctl_pollhz_handler);
1403 	nmsg.lmsg.u.ms_result = phz;
1404 
1405 	return netisr_domsg(&nmsg, comm->poll_cpuid);
1406 }
1407 
1408 static void
1409 sysctl_pollhz_handler(netmsg_t nmsg)
1410 {
1411 	struct poll_comm *comm = poll_common[mycpuid];
1412 
1413 	KKASSERT(&curthread->td_msgport == netisr_cpuport(comm->poll_cpuid));
1414 
1415 	/* Save polling frequency */
1416 	comm->pollhz = poll_comm_pollhz_div(comm, nmsg->lmsg.u.ms_result);
1417 
1418 	/*
1419 	 * Adjust cached pollhz
1420 	 */
1421 	rxpoll_context[mycpuid]->pollhz = comm->pollhz;
1422 	txpoll_context[mycpuid]->pollhz =
1423 	    comm->pollhz / (comm->poll_txfrac + 1);
1424 
1425 	/*
1426 	 * Adjust polling frequency
1427 	 */
1428 	poll_comm_adjust_pollhz(comm);
1429 
1430 	netisr_replymsg(&nmsg->base, 0);
1431 }
1432 
1433 static int
1434 sysctl_stfrac(SYSCTL_HANDLER_ARGS)
1435 {
1436 	struct poll_comm *comm = arg1;
1437 	struct netmsg_base nmsg;
1438 	int error, stfrac;
1439 
1440 	KKASSERT(comm->poll_cpuid == 0);
1441 
1442 	stfrac = comm->poll_stfrac + 1;
1443 	error = sysctl_handle_int(oidp, &stfrac, 0, req);
1444 	if (error || req->newptr == NULL)
1445 		return error;
1446 	if (stfrac < 1)
1447 		return EINVAL;
1448 
1449 	netmsg_init(&nmsg, NULL, &curthread->td_msgport,
1450 		    0, sysctl_stfrac_handler);
1451 	nmsg.lmsg.u.ms_result = stfrac - 1;
1452 
1453 	return netisr_domsg(&nmsg, comm->poll_cpuid);
1454 }
1455 
1456 static void
1457 sysctl_stfrac_handler(netmsg_t nmsg)
1458 {
1459 	struct poll_comm *comm = poll_common[mycpuid];
1460 	int stfrac = nmsg->lmsg.u.ms_result;
1461 
1462 	KKASSERT(&curthread->td_msgport == netisr_cpuport(comm->poll_cpuid));
1463 
1464 	crit_enter();
1465 	comm->poll_stfrac = stfrac;
1466 	if (comm->stfrac_count > comm->poll_stfrac)
1467 		comm->stfrac_count = comm->poll_stfrac;
1468 	crit_exit();
1469 
1470 	netisr_replymsg(&nmsg->base, 0);
1471 }
1472 
1473 static int
1474 sysctl_txfrac(SYSCTL_HANDLER_ARGS)
1475 {
1476 	struct poll_comm *comm = arg1;
1477 	struct netmsg_base nmsg;
1478 	int error, txfrac;
1479 
1480 	txfrac = comm->poll_txfrac + 1;
1481 	error = sysctl_handle_int(oidp, &txfrac, 0, req);
1482 	if (error || req->newptr == NULL)
1483 		return error;
1484 	if (txfrac < 1)
1485 		return EINVAL;
1486 
1487 	netmsg_init(&nmsg, NULL, &curthread->td_msgport,
1488 		    0, sysctl_txfrac_handler);
1489 	nmsg.lmsg.u.ms_result = txfrac - 1;
1490 
1491 	return netisr_domsg(&nmsg, comm->poll_cpuid);
1492 }
1493 
1494 static void
1495 sysctl_txfrac_handler(netmsg_t nmsg)
1496 {
1497 	struct poll_comm *comm = poll_common[mycpuid];
1498 	int txfrac = nmsg->lmsg.u.ms_result;
1499 
1500 	KKASSERT(&curthread->td_msgport == netisr_cpuport(comm->poll_cpuid));
1501 
1502 	crit_enter();
1503 	comm->poll_txfrac = txfrac;
1504 	if (comm->txfrac_count > comm->poll_txfrac)
1505 		comm->txfrac_count = comm->poll_txfrac;
1506 	crit_exit();
1507 
1508 	netisr_replymsg(&nmsg->base, 0);
1509 }
1510 
1511 void
1512 ifpoll_compat_setup(struct ifpoll_compat *cp,
1513     struct sysctl_ctx_list *sysctl_ctx,
1514     struct sysctl_oid *sysctl_tree,
1515     int unit, struct lwkt_serialize *slz)
1516 {
1517 	cp->ifpc_stcount = 0;
1518 	cp->ifpc_stfrac = ((poll_common[0]->poll_stfrac + 1) *
1519 	    howmany(IOPOLL_BURST_MAX, IOPOLL_EACH_BURST)) - 1;
1520 
1521 	cp->ifpc_cpuid = unit % netisr_ncpus;
1522 	cp->ifpc_serializer = slz;
1523 
1524 	if (sysctl_ctx != NULL && sysctl_tree != NULL) {
1525 		SYSCTL_ADD_PROC(sysctl_ctx, SYSCTL_CHILDREN(sysctl_tree),
1526 		    OID_AUTO, "npoll_stfrac", CTLTYPE_INT | CTLFLAG_RW,
1527 		    cp, 0, sysctl_compat_npoll_stfrac, "I",
1528 		    "polling status frac");
1529 		SYSCTL_ADD_PROC(sysctl_ctx, SYSCTL_CHILDREN(sysctl_tree),
1530 		    OID_AUTO, "npoll_cpuid", CTLTYPE_INT | CTLFLAG_RW,
1531 		    cp, 0, sysctl_compat_npoll_cpuid, "I",
1532 		    "polling cpuid");
1533 	}
1534 }
1535 
1536 static int
1537 sysctl_compat_npoll_stfrac(SYSCTL_HANDLER_ARGS)
1538 {
1539 	struct ifpoll_compat *cp = arg1;
1540 	int error = 0, stfrac;
1541 
1542 	lwkt_serialize_enter(cp->ifpc_serializer);
1543 
1544 	stfrac = cp->ifpc_stfrac + 1;
1545 	error = sysctl_handle_int(oidp, &stfrac, 0, req);
1546 	if (!error && req->newptr != NULL) {
1547 		if (stfrac < 1) {
1548 			error = EINVAL;
1549 		} else {
1550 			cp->ifpc_stfrac = stfrac - 1;
1551 			if (cp->ifpc_stcount > cp->ifpc_stfrac)
1552 				cp->ifpc_stcount = cp->ifpc_stfrac;
1553 		}
1554 	}
1555 
1556 	lwkt_serialize_exit(cp->ifpc_serializer);
1557 	return error;
1558 }
1559 
1560 static int
1561 sysctl_compat_npoll_cpuid(SYSCTL_HANDLER_ARGS)
1562 {
1563 	struct ifpoll_compat *cp = arg1;
1564 	int error = 0, cpuid;
1565 
1566 	lwkt_serialize_enter(cp->ifpc_serializer);
1567 
1568 	cpuid = cp->ifpc_cpuid;
1569 	error = sysctl_handle_int(oidp, &cpuid, 0, req);
1570 	if (!error && req->newptr != NULL) {
1571 		if (cpuid < 0 || cpuid >= netisr_ncpus)
1572 			error = EINVAL;
1573 		else
1574 			cp->ifpc_cpuid = cpuid;
1575 	}
1576 
1577 	lwkt_serialize_exit(cp->ifpc_serializer);
1578 	return error;
1579 }
1580