xref: /dragonfly/sys/net/if_poll.c (revision 8accc937)
1 /*-
2  * Copyright (c) 2001-2002 Luigi Rizzo
3  *
4  * Supported by: the Xorp Project (www.xorp.org)
5  *
6  * Redistribution and use in source and binary forms, with or without
7  * modification, are permitted provided that the following conditions
8  * are met:
9  * 1. Redistributions of source code must retain the above copyright
10  *    notice, this list of conditions and the following disclaimer.
11  * 2. Redistributions in binary form must reproduce the above copyright
12  *    notice, this list of conditions and the following disclaimer in the
13  *    documentation and/or other materials provided with the distribution.
14  *
15  * THIS SOFTWARE IS PROVIDED BY THE AUTHORS AND CONTRIBUTORS ``AS IS'' AND
16  * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
17  * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
18  * ARE DISCLAIMED.  IN NO EVENT SHALL THE AUTHORS OR CONTRIBUTORS BE LIABLE
19  * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
20  * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
21  * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
22  * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
23  * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
24  * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
25  * SUCH DAMAGE.
26  *
27  * $FreeBSD: src/sys/kern/kern_poll.c,v 1.2.2.4 2002/06/27 23:26:33 luigi Exp $
28  */
29 
30 #include "opt_ifpoll.h"
31 
32 #include <sys/param.h>
33 #include <sys/kernel.h>
34 #include <sys/ktr.h>
35 #include <sys/malloc.h>
36 #include <sys/serialize.h>
37 #include <sys/socket.h>
38 #include <sys/sysctl.h>
39 
40 #include <sys/thread2.h>
41 #include <sys/msgport2.h>
42 
43 #include <machine/atomic.h>
44 #include <machine/clock.h>
45 #include <machine/smp.h>
46 
47 #include <net/if.h>
48 #include <net/if_poll.h>
49 #include <net/netmsg2.h>
50 
51 /*
52  * Polling support for network device drivers.
53  *
54  * Drivers which support this feature try to register one status polling
55  * handler and several TX/RX polling handlers with the polling code.
56  * If interface's if_npoll is called with non-NULL second argument, then
57  * a register operation is requested, else a deregister operation is
58  * requested.  If the requested operation is "register", driver should
59  * setup the ifpoll_info passed in accoding its own needs:
60  *   ifpoll_info.ifpi_status.status_func == NULL
61  *     No status polling handler will be installed on CPU(0)
62  *   ifpoll_info.ifpi_rx[n].poll_func == NULL
63  *     No RX polling handler will be installed on CPU(n)
64  *   ifpoll_info.ifpi_tx[n].poll_func == NULL
65  *     No TX polling handler will be installed on CPU(n)
66  *
67  * RX is polled at the specified polling frequency (net.ifpoll.X.pollhz).
68  * TX and status polling could be done at lower frequency than RX frequency
69  * (net.ifpoll.0.status_frac and net.ifpoll.X.tx_frac).  To avoid systimer
70  * staggering at high frequency, RX systimer gives TX and status polling a
71  * piggyback (XXX).
72  *
73  * All of the registered polling handlers are called only if the interface
74  * is marked as 'IFF_RUNNING and IFF_NPOLLING'.  However, the interface's
75  * register and deregister function (ifnet.if_npoll) will be called even
76  * if interface is not marked with 'IFF_RUNNING'.
77  *
78  * If registration is successful, the driver must disable interrupts,
79  * and further I/O is performed through the TX/RX polling handler, which
80  * are invoked (at least once per clock tick) with 3 arguments: the "arg"
81  * passed at register time, a struct ifnet pointer, and a "count" limit.
82  * The registered serializer will be held before calling the related
83  * polling handler.
84  *
85  * The count limit specifies how much work the handler can do during the
86  * call -- typically this is the number of packets to be received, or
87  * transmitted, etc. (drivers are free to interpret this number, as long
88  * as the max time spent in the function grows roughly linearly with the
89  * count).
90  *
91  * A second variable controls the sharing of CPU between polling/kernel
92  * network processing, and other activities (typically userlevel tasks):
93  * net.ifpoll.X.{rx,tx}.user_frac (between 0 and 100, default 50) sets the
94  * share of CPU allocated to user tasks.  CPU is allocated proportionally
95  * to the shares, by dynamically adjusting the "count" (poll_burst).
96  *
97  * Other parameters can should be left to their default values.
98  * The following constraints hold
99  *
100  *	1 <= poll_burst <= poll_burst_max
101  *	1 <= poll_each_burst <= poll_burst_max
102  *	MIN_POLL_BURST_MAX <= poll_burst_max <= MAX_POLL_BURST_MAX
103  */
104 
105 #define IFPOLL_LIST_LEN		128
106 #define IFPOLL_FREQ_MAX		30000
107 
108 #define MIN_IOPOLL_BURST_MAX	10
109 #define MAX_IOPOLL_BURST_MAX	5000
110 #define IOPOLL_BURST_MAX	375	/* good for 1000Mbit net and HZ=4000 */
111 
112 #define IOPOLL_EACH_BURST	15
113 
114 #define IFPOLL_FREQ_DEFAULT	4000
115 
116 #define IFPOLL_TXFRAC_DEFAULT	1	/* 1/1 of the pollhz */
117 #define IFPOLL_STFRAC_DEFAULT	40	/* 1/40 of the pollhz */
118 
119 #define IFPOLL_RX		0x1
120 #define IFPOLL_TX		0x2
121 
122 union ifpoll_time {
123 	struct timeval		tv;
124 	uint64_t		tsc;
125 };
126 
127 struct iopoll_rec {
128 	struct lwkt_serialize	*serializer;
129 	struct ifnet		*ifp;
130 	void			*arg;
131 	ifpoll_iofn_t		poll_func;
132 };
133 
134 struct iopoll_ctx {
135 	union ifpoll_time	prev_t;
136 	u_long			short_ticks;		/* statistics */
137 	u_long			lost_polls;		/* statistics */
138 	u_long			suspect;		/* statistics */
139 	u_long			stalled;		/* statistics */
140 	uint32_t		pending_polls;		/* state */
141 
142 	struct netmsg_base	poll_netmsg;
143 	struct netmsg_base	poll_more_netmsg;
144 
145 	int			poll_cpuid;
146 	int			pollhz;
147 	uint32_t		phase;			/* state */
148 	int			residual_burst;		/* state */
149 	uint32_t		poll_each_burst;	/* tunable */
150 	union ifpoll_time	poll_start_t;		/* state */
151 
152 	uint32_t		poll_burst;		/* state */
153 	uint32_t		poll_burst_max;		/* tunable */
154 	uint32_t		user_frac;		/* tunable */
155 	uint32_t		kern_frac;		/* state */
156 
157 	uint32_t		poll_handlers; /* next free entry in pr[]. */
158 	struct iopoll_rec	pr[IFPOLL_LIST_LEN];
159 
160 	struct sysctl_ctx_list	poll_sysctl_ctx;
161 	struct sysctl_oid	*poll_sysctl_tree;
162 } __cachealign;
163 
164 struct poll_comm {
165 	struct systimer		pollclock;
166 	int			poll_cpuid;
167 
168 	int			stfrac_count;		/* state */
169 	int			poll_stfrac;		/* tunable */
170 
171 	int			txfrac_count;		/* state */
172 	int			poll_txfrac;		/* tunable */
173 
174 	int			pollhz;			/* tunable */
175 
176 	struct sysctl_ctx_list	sysctl_ctx;
177 	struct sysctl_oid	*sysctl_tree;
178 } __cachealign;
179 
180 struct stpoll_rec {
181 	struct lwkt_serialize	*serializer;
182 	struct ifnet		*ifp;
183 	ifpoll_stfn_t		status_func;
184 };
185 
186 struct stpoll_ctx {
187 	struct netmsg_base	poll_netmsg;
188 
189 	uint32_t		poll_handlers; /* next free entry in pr[]. */
190 	struct stpoll_rec	pr[IFPOLL_LIST_LEN];
191 
192 	struct sysctl_ctx_list	poll_sysctl_ctx;
193 	struct sysctl_oid	*poll_sysctl_tree;
194 } __cachealign;
195 
196 struct iopoll_sysctl_netmsg {
197 	struct netmsg_base	base;
198 	struct iopoll_ctx	*ctx;
199 };
200 
201 void		ifpoll_init_pcpu(int);
202 static void	ifpoll_register_handler(netmsg_t);
203 static void	ifpoll_deregister_handler(netmsg_t);
204 
205 /*
206  * Status polling
207  */
208 static void	stpoll_init(void);
209 static void	stpoll_handler(netmsg_t);
210 static void	stpoll_clock(struct stpoll_ctx *);
211 static int	stpoll_register(struct ifnet *, const struct ifpoll_status *);
212 static int	stpoll_deregister(struct ifnet *);
213 
214 /*
215  * RX/TX polling
216  */
217 static struct iopoll_ctx *iopoll_ctx_create(int, int);
218 static void	iopoll_init(int);
219 static void	rxpoll_handler(netmsg_t);
220 static void	txpoll_handler(netmsg_t);
221 static void	rxpollmore_handler(netmsg_t);
222 static void	txpollmore_handler(netmsg_t);
223 static void	iopoll_clock(struct iopoll_ctx *);
224 static int	iopoll_register(struct ifnet *, struct iopoll_ctx *,
225 		    const struct ifpoll_io *);
226 static int	iopoll_deregister(struct ifnet *, struct iopoll_ctx *);
227 
228 static void	iopoll_add_sysctl(struct sysctl_ctx_list *,
229 		    struct sysctl_oid_list *, struct iopoll_ctx *, int);
230 static void	sysctl_burstmax_handler(netmsg_t);
231 static int	sysctl_burstmax(SYSCTL_HANDLER_ARGS);
232 static void	sysctl_eachburst_handler(netmsg_t);
233 static int	sysctl_eachburst(SYSCTL_HANDLER_ARGS);
234 
235 /*
236  * Common functions
237  */
238 static void	poll_comm_init(int);
239 static void	poll_comm_start(int);
240 static void	poll_comm_adjust_pollhz(struct poll_comm *);
241 static void	poll_comm_systimer0(systimer_t, int, struct intrframe *);
242 static void	poll_comm_systimer(systimer_t, int, struct intrframe *);
243 static void	sysctl_pollhz_handler(netmsg_t);
244 static void	sysctl_stfrac_handler(netmsg_t);
245 static void	sysctl_txfrac_handler(netmsg_t);
246 static int	sysctl_pollhz(SYSCTL_HANDLER_ARGS);
247 static int	sysctl_stfrac(SYSCTL_HANDLER_ARGS);
248 static int	sysctl_txfrac(SYSCTL_HANDLER_ARGS);
249 static int	sysctl_compat_npoll_stfrac(SYSCTL_HANDLER_ARGS);
250 static int	sysctl_compat_npoll_cpuid(SYSCTL_HANDLER_ARGS);
251 
252 static struct stpoll_ctx	stpoll_context;
253 static struct poll_comm		*poll_common[MAXCPU];
254 static struct iopoll_ctx	*rxpoll_context[MAXCPU];
255 static struct iopoll_ctx	*txpoll_context[MAXCPU];
256 
257 SYSCTL_NODE(_net, OID_AUTO, ifpoll, CTLFLAG_RW, 0,
258 	    "Network device polling parameters");
259 
260 static int	iopoll_burst_max = IOPOLL_BURST_MAX;
261 static int	iopoll_each_burst = IOPOLL_EACH_BURST;
262 
263 static int	ifpoll_pollhz = IFPOLL_FREQ_DEFAULT;
264 static int	ifpoll_stfrac = IFPOLL_STFRAC_DEFAULT;
265 static int	ifpoll_txfrac = IFPOLL_TXFRAC_DEFAULT;
266 
267 TUNABLE_INT("net.ifpoll.burst_max", &iopoll_burst_max);
268 TUNABLE_INT("net.ifpoll.each_burst", &iopoll_each_burst);
269 TUNABLE_INT("net.ifpoll.pollhz", &ifpoll_pollhz);
270 TUNABLE_INT("net.ifpoll.status_frac", &ifpoll_stfrac);
271 TUNABLE_INT("net.ifpoll.tx_frac", &ifpoll_txfrac);
272 
273 static __inline void
274 ifpoll_sendmsg_oncpu(netmsg_t msg)
275 {
276 	if (msg->lmsg.ms_flags & MSGF_DONE)
277 		lwkt_sendmsg(netisr_portfn(mycpuid), &msg->lmsg);
278 }
279 
280 static __inline void
281 sched_stpoll(struct stpoll_ctx *st_ctx)
282 {
283 	ifpoll_sendmsg_oncpu((netmsg_t)&st_ctx->poll_netmsg);
284 }
285 
286 static __inline void
287 sched_iopoll(struct iopoll_ctx *io_ctx)
288 {
289 	ifpoll_sendmsg_oncpu((netmsg_t)&io_ctx->poll_netmsg);
290 }
291 
292 static __inline void
293 sched_iopollmore(struct iopoll_ctx *io_ctx)
294 {
295 	ifpoll_sendmsg_oncpu((netmsg_t)&io_ctx->poll_more_netmsg);
296 }
297 
298 static __inline void
299 ifpoll_time_get(union ifpoll_time *t)
300 {
301 	if (__predict_true(tsc_present))
302 		t->tsc = rdtsc();
303 	else
304 		microuptime(&t->tv);
305 }
306 
307 /* Return time diff in us */
308 static __inline int
309 ifpoll_time_diff(const union ifpoll_time *s, const union ifpoll_time *e)
310 {
311 	if (__predict_true(tsc_present)) {
312 		return (((e->tsc - s->tsc) * 1000000) / tsc_frequency);
313 	} else {
314 		return ((e->tv.tv_usec - s->tv.tv_usec) +
315 			(e->tv.tv_sec - s->tv.tv_sec) * 1000000);
316 	}
317 }
318 
319 /*
320  * Initialize per-cpu qpolling(4) context.  Called from kern_clock.c:
321  */
322 void
323 ifpoll_init_pcpu(int cpuid)
324 {
325 	if (cpuid >= ncpus2)
326 		return;
327 
328 	poll_comm_init(cpuid);
329 
330 	if (cpuid == 0)
331 		stpoll_init();
332 	iopoll_init(cpuid);
333 
334 	poll_comm_start(cpuid);
335 }
336 
337 int
338 ifpoll_register(struct ifnet *ifp)
339 {
340 	struct ifpoll_info *info;
341 	struct netmsg_base nmsg;
342 	int error;
343 
344 	if (ifp->if_npoll == NULL) {
345 		/* Device does not support polling */
346 		return EOPNOTSUPP;
347 	}
348 
349 	info = kmalloc(sizeof(*info), M_TEMP, M_WAITOK | M_ZERO);
350 
351 	/*
352 	 * Attempt to register.  Interlock with IFF_NPOLLING.
353 	 */
354 
355 	ifnet_serialize_all(ifp);
356 
357 	if (ifp->if_flags & IFF_NPOLLING) {
358 		/* Already polling */
359 		ifnet_deserialize_all(ifp);
360 		kfree(info, M_TEMP);
361 		return EBUSY;
362 	}
363 
364 	info->ifpi_ifp = ifp;
365 
366 	ifp->if_flags |= IFF_NPOLLING;
367 	ifp->if_npoll(ifp, info);
368 	KASSERT(ifp->if_npoll_cpuid >= 0, ("invalid npoll cpuid"));
369 
370 	ifnet_deserialize_all(ifp);
371 
372 	netmsg_init(&nmsg, NULL, &curthread->td_msgport,
373 		    0, ifpoll_register_handler);
374 	nmsg.lmsg.u.ms_resultp = info;
375 
376 	error = lwkt_domsg(netisr_portfn(0), &nmsg.lmsg, 0);
377 	if (error) {
378 		if (!ifpoll_deregister(ifp)) {
379 			if_printf(ifp, "ifpoll_register: "
380 				  "ifpoll_deregister failed!\n");
381 		}
382 	}
383 
384 	kfree(info, M_TEMP);
385 	return error;
386 }
387 
388 int
389 ifpoll_deregister(struct ifnet *ifp)
390 {
391 	struct netmsg_base nmsg;
392 	int error;
393 
394 	if (ifp->if_npoll == NULL)
395 		return EOPNOTSUPP;
396 
397 	ifnet_serialize_all(ifp);
398 
399 	if ((ifp->if_flags & IFF_NPOLLING) == 0) {
400 		ifnet_deserialize_all(ifp);
401 		return EINVAL;
402 	}
403 	ifp->if_flags &= ~IFF_NPOLLING;
404 
405 	ifnet_deserialize_all(ifp);
406 
407 	netmsg_init(&nmsg, NULL, &curthread->td_msgport,
408 		    0, ifpoll_deregister_handler);
409 	nmsg.lmsg.u.ms_resultp = ifp;
410 
411 	error = lwkt_domsg(netisr_portfn(0), &nmsg.lmsg, 0);
412 	if (!error) {
413 		ifnet_serialize_all(ifp);
414 		ifp->if_npoll(ifp, NULL);
415 		KASSERT(ifp->if_npoll_cpuid < 0, ("invalid npoll cpuid"));
416 		ifnet_deserialize_all(ifp);
417 	}
418 	return error;
419 }
420 
421 static void
422 ifpoll_register_handler(netmsg_t nmsg)
423 {
424 	const struct ifpoll_info *info = nmsg->lmsg.u.ms_resultp;
425 	int cpuid = mycpuid, nextcpu;
426 	int error;
427 
428 	KKASSERT(cpuid < ncpus2);
429 	KKASSERT(&curthread->td_msgport == netisr_portfn(cpuid));
430 
431 	if (cpuid == 0) {
432 		error = stpoll_register(info->ifpi_ifp, &info->ifpi_status);
433 		if (error)
434 			goto failed;
435 	}
436 
437 	error = iopoll_register(info->ifpi_ifp, rxpoll_context[cpuid],
438 				&info->ifpi_rx[cpuid]);
439 	if (error)
440 		goto failed;
441 
442 	error = iopoll_register(info->ifpi_ifp, txpoll_context[cpuid],
443 				&info->ifpi_tx[cpuid]);
444 	if (error)
445 		goto failed;
446 
447 	/* Adjust polling frequency, after all registration is done */
448 	poll_comm_adjust_pollhz(poll_common[cpuid]);
449 
450 	nextcpu = cpuid + 1;
451 	if (nextcpu < ncpus2)
452 		lwkt_forwardmsg(netisr_portfn(nextcpu), &nmsg->lmsg);
453 	else
454 		lwkt_replymsg(&nmsg->lmsg, 0);
455 	return;
456 failed:
457 	lwkt_replymsg(&nmsg->lmsg, error);
458 }
459 
460 static void
461 ifpoll_deregister_handler(netmsg_t nmsg)
462 {
463 	struct ifnet *ifp = nmsg->lmsg.u.ms_resultp;
464 	int cpuid = mycpuid, nextcpu;
465 
466 	KKASSERT(cpuid < ncpus2);
467 	KKASSERT(&curthread->td_msgport == netisr_portfn(cpuid));
468 
469 	/* Ignore errors */
470 	if (cpuid == 0)
471 		stpoll_deregister(ifp);
472 	iopoll_deregister(ifp, rxpoll_context[cpuid]);
473 	iopoll_deregister(ifp, txpoll_context[cpuid]);
474 
475 	/* Adjust polling frequency, after all deregistration is done */
476 	poll_comm_adjust_pollhz(poll_common[cpuid]);
477 
478 	nextcpu = cpuid + 1;
479 	if (nextcpu < ncpus2)
480 		lwkt_forwardmsg(netisr_portfn(nextcpu), &nmsg->lmsg);
481 	else
482 		lwkt_replymsg(&nmsg->lmsg, 0);
483 }
484 
485 static void
486 stpoll_init(void)
487 {
488 	struct stpoll_ctx *st_ctx = &stpoll_context;
489 	const struct poll_comm *comm = poll_common[0];
490 
491 	sysctl_ctx_init(&st_ctx->poll_sysctl_ctx);
492 	st_ctx->poll_sysctl_tree = SYSCTL_ADD_NODE(&st_ctx->poll_sysctl_ctx,
493 				   SYSCTL_CHILDREN(comm->sysctl_tree),
494 				   OID_AUTO, "status", CTLFLAG_RD, 0, "");
495 
496 	SYSCTL_ADD_UINT(&st_ctx->poll_sysctl_ctx,
497 			SYSCTL_CHILDREN(st_ctx->poll_sysctl_tree),
498 			OID_AUTO, "handlers", CTLFLAG_RD,
499 			&st_ctx->poll_handlers, 0,
500 			"Number of registered status poll handlers");
501 
502 	netmsg_init(&st_ctx->poll_netmsg, NULL, &netisr_adone_rport,
503 		    0, stpoll_handler);
504 }
505 
506 /*
507  * stpoll_handler is scheduled by sched_stpoll when appropriate, typically
508  * once per polling systimer tick.
509  */
510 static void
511 stpoll_handler(netmsg_t msg)
512 {
513 	struct stpoll_ctx *st_ctx = &stpoll_context;
514 	struct thread *td = curthread;
515 	int i;
516 
517 	KKASSERT(&td->td_msgport == netisr_portfn(0));
518 
519 	crit_enter_quick(td);
520 
521 	/* Reply ASAP */
522 	lwkt_replymsg(&msg->lmsg, 0);
523 
524 	if (st_ctx->poll_handlers == 0) {
525 		crit_exit_quick(td);
526 		return;
527 	}
528 
529 	for (i = 0; i < st_ctx->poll_handlers; ++i) {
530 		const struct stpoll_rec *rec = &st_ctx->pr[i];
531 		struct ifnet *ifp = rec->ifp;
532 
533 		if (!lwkt_serialize_try(rec->serializer))
534 			continue;
535 
536 		if ((ifp->if_flags & (IFF_RUNNING | IFF_NPOLLING)) ==
537 		    (IFF_RUNNING | IFF_NPOLLING))
538 			rec->status_func(ifp);
539 
540 		lwkt_serialize_exit(rec->serializer);
541 	}
542 
543 	crit_exit_quick(td);
544 }
545 
546 /*
547  * Hook from status poll systimer.  Tries to schedule an status poll.
548  * NOTE: Caller should hold critical section.
549  */
550 static void
551 stpoll_clock(struct stpoll_ctx *st_ctx)
552 {
553 	KKASSERT(mycpuid == 0);
554 
555 	if (st_ctx->poll_handlers == 0)
556 		return;
557 	sched_stpoll(st_ctx);
558 }
559 
560 static int
561 stpoll_register(struct ifnet *ifp, const struct ifpoll_status *st_rec)
562 {
563 	struct stpoll_ctx *st_ctx = &stpoll_context;
564 	int error;
565 
566 	KKASSERT(&curthread->td_msgport == netisr_portfn(0));
567 
568 	if (st_rec->status_func == NULL)
569 		return 0;
570 
571 	/*
572 	 * Check if there is room.
573 	 */
574 	if (st_ctx->poll_handlers >= IFPOLL_LIST_LEN) {
575 		/*
576 		 * List full, cannot register more entries.
577 		 * This should never happen; if it does, it is probably a
578 		 * broken driver trying to register multiple times. Checking
579 		 * this at runtime is expensive, and won't solve the problem
580 		 * anyways, so just report a few times and then give up.
581 		 */
582 		static int verbose = 10; /* XXX */
583 
584 		if (verbose > 0) {
585 			kprintf("status poll handlers list full, "
586 				"maybe a broken driver ?\n");
587 			verbose--;
588 		}
589 		error = ENOENT;
590 	} else {
591 		struct stpoll_rec *rec = &st_ctx->pr[st_ctx->poll_handlers];
592 
593 		rec->ifp = ifp;
594 		rec->serializer = st_rec->serializer;
595 		rec->status_func = st_rec->status_func;
596 
597 		st_ctx->poll_handlers++;
598 		error = 0;
599 	}
600 	return error;
601 }
602 
603 static int
604 stpoll_deregister(struct ifnet *ifp)
605 {
606 	struct stpoll_ctx *st_ctx = &stpoll_context;
607 	int i, error;
608 
609 	KKASSERT(&curthread->td_msgport == netisr_portfn(0));
610 
611 	for (i = 0; i < st_ctx->poll_handlers; ++i) {
612 		if (st_ctx->pr[i].ifp == ifp) /* Found it */
613 			break;
614 	}
615 	if (i == st_ctx->poll_handlers) {
616 		error = ENOENT;
617 	} else {
618 		st_ctx->poll_handlers--;
619 		if (i < st_ctx->poll_handlers) {
620 			/* Last entry replaces this one. */
621 			st_ctx->pr[i] = st_ctx->pr[st_ctx->poll_handlers];
622 		}
623 		error = 0;
624 	}
625 	return error;
626 }
627 
628 static __inline void
629 iopoll_reset_state(struct iopoll_ctx *io_ctx)
630 {
631 	crit_enter();
632 	io_ctx->poll_burst = io_ctx->poll_each_burst;
633 	io_ctx->pending_polls = 0;
634 	io_ctx->residual_burst = 0;
635 	io_ctx->phase = 0;
636 	io_ctx->kern_frac = 0;
637 	bzero(&io_ctx->poll_start_t, sizeof(io_ctx->poll_start_t));
638 	bzero(&io_ctx->prev_t, sizeof(io_ctx->prev_t));
639 	crit_exit();
640 }
641 
642 static void
643 iopoll_init(int cpuid)
644 {
645 	KKASSERT(cpuid < ncpus2);
646 
647 	rxpoll_context[cpuid] = iopoll_ctx_create(cpuid, IFPOLL_RX);
648 	txpoll_context[cpuid] = iopoll_ctx_create(cpuid, IFPOLL_TX);
649 }
650 
651 static struct iopoll_ctx *
652 iopoll_ctx_create(int cpuid, int poll_type)
653 {
654 	struct poll_comm *comm;
655 	struct iopoll_ctx *io_ctx;
656 	const char *poll_type_str;
657 	netisr_fn_t handler, more_handler;
658 
659 	KKASSERT(poll_type == IFPOLL_RX || poll_type == IFPOLL_TX);
660 
661 	/*
662 	 * Make sure that tunables are in sane state
663 	 */
664 	if (iopoll_burst_max < MIN_IOPOLL_BURST_MAX)
665 		iopoll_burst_max = MIN_IOPOLL_BURST_MAX;
666 	else if (iopoll_burst_max > MAX_IOPOLL_BURST_MAX)
667 		iopoll_burst_max = MAX_IOPOLL_BURST_MAX;
668 
669 	if (iopoll_each_burst > iopoll_burst_max)
670 		iopoll_each_burst = iopoll_burst_max;
671 
672 	comm = poll_common[cpuid];
673 
674 	/*
675 	 * Create the per-cpu polling context
676 	 */
677 	io_ctx = kmalloc_cachealign(sizeof(*io_ctx), M_DEVBUF,
678 	    M_WAITOK | M_ZERO);
679 
680 	io_ctx->poll_each_burst = iopoll_each_burst;
681 	io_ctx->poll_burst_max = iopoll_burst_max;
682 	io_ctx->user_frac = 50;
683 	if (poll_type == IFPOLL_RX)
684 		io_ctx->pollhz = comm->pollhz;
685 	else
686 		io_ctx->pollhz = comm->pollhz / (comm->poll_txfrac + 1);
687 	io_ctx->poll_cpuid = cpuid;
688 	iopoll_reset_state(io_ctx);
689 
690 	if (poll_type == IFPOLL_RX) {
691 		handler = rxpoll_handler;
692 		more_handler = rxpollmore_handler;
693 	} else {
694 		handler = txpoll_handler;
695 		more_handler = txpollmore_handler;
696 	}
697 
698 	netmsg_init(&io_ctx->poll_netmsg, NULL, &netisr_adone_rport,
699 	    0, handler);
700 	io_ctx->poll_netmsg.lmsg.u.ms_resultp = io_ctx;
701 
702 	netmsg_init(&io_ctx->poll_more_netmsg, NULL, &netisr_adone_rport,
703 	    0, more_handler);
704 	io_ctx->poll_more_netmsg.lmsg.u.ms_resultp = io_ctx;
705 
706 	/*
707 	 * Initialize per-cpu sysctl nodes
708 	 */
709 	if (poll_type == IFPOLL_RX)
710 		poll_type_str = "rx";
711 	else
712 		poll_type_str = "tx";
713 
714 	sysctl_ctx_init(&io_ctx->poll_sysctl_ctx);
715 	io_ctx->poll_sysctl_tree = SYSCTL_ADD_NODE(&io_ctx->poll_sysctl_ctx,
716 				   SYSCTL_CHILDREN(comm->sysctl_tree),
717 				   OID_AUTO, poll_type_str, CTLFLAG_RD, 0, "");
718 	iopoll_add_sysctl(&io_ctx->poll_sysctl_ctx,
719 	    SYSCTL_CHILDREN(io_ctx->poll_sysctl_tree), io_ctx, poll_type);
720 
721 	return io_ctx;
722 }
723 
724 /*
725  * Hook from iopoll systimer.  Tries to schedule an iopoll, but keeps
726  * track of lost ticks due to the previous handler taking too long.
727  * Normally, this should not happen, because polling handler should
728  * run for a short time.  However, in some cases (e.g. when there are
729  * changes in link status etc.) the drivers take a very long time
730  * (even in the order of milliseconds) to reset and reconfigure the
731  * device, causing apparent lost polls.
732  *
733  * The first part of the code is just for debugging purposes, and tries
734  * to count how often hardclock ticks are shorter than they should,
735  * meaning either stray interrupts or delayed events.
736  *
737  * WARNING! called from fastint or IPI, the MP lock might not be held.
738  * NOTE: Caller should hold critical section.
739  */
740 static void
741 iopoll_clock(struct iopoll_ctx *io_ctx)
742 {
743 	union ifpoll_time t;
744 	int delta;
745 
746 	KKASSERT(mycpuid == io_ctx->poll_cpuid);
747 
748 	if (io_ctx->poll_handlers == 0)
749 		return;
750 
751 	ifpoll_time_get(&t);
752 	delta = ifpoll_time_diff(&io_ctx->prev_t, &t);
753 	if (delta * io_ctx->pollhz < 500000)
754 		io_ctx->short_ticks++;
755 	else
756 		io_ctx->prev_t = t;
757 
758 	if (io_ctx->pending_polls > 100) {
759 		/*
760 		 * Too much, assume it has stalled (not always true
761 		 * see comment above).
762 		 */
763 		io_ctx->stalled++;
764 		io_ctx->pending_polls = 0;
765 		io_ctx->phase = 0;
766 	}
767 
768 	if (io_ctx->phase <= 2) {
769 		if (io_ctx->phase != 0)
770 			io_ctx->suspect++;
771 		io_ctx->phase = 1;
772 		sched_iopoll(io_ctx);
773 		io_ctx->phase = 2;
774 	}
775 	if (io_ctx->pending_polls++ > 0)
776 		io_ctx->lost_polls++;
777 }
778 
779 /*
780  * rxpoll_handler and txpoll_handler are scheduled by sched_iopoll when
781  * appropriate, typically once per polling systimer tick.
782  *
783  * Note that the message is replied immediately in order to allow a new
784  * ISR to be scheduled in the handler.
785  */
786 static void
787 rxpoll_handler(netmsg_t msg)
788 {
789 	struct iopoll_ctx *io_ctx;
790 	struct thread *td = curthread;
791 	int i, cycles;
792 
793 	io_ctx = msg->lmsg.u.ms_resultp;
794 	KKASSERT(&td->td_msgport == netisr_portfn(io_ctx->poll_cpuid));
795 
796 	crit_enter_quick(td);
797 
798 	/* Reply ASAP */
799 	lwkt_replymsg(&msg->lmsg, 0);
800 
801 	if (io_ctx->poll_handlers == 0) {
802 		crit_exit_quick(td);
803 		return;
804 	}
805 
806 	io_ctx->phase = 3;
807 	if (io_ctx->residual_burst == 0) {
808 		/* First call in this tick */
809 		ifpoll_time_get(&io_ctx->poll_start_t);
810 		io_ctx->residual_burst = io_ctx->poll_burst;
811 	}
812 	cycles = (io_ctx->residual_burst < io_ctx->poll_each_burst) ?
813 		 io_ctx->residual_burst : io_ctx->poll_each_burst;
814 	io_ctx->residual_burst -= cycles;
815 
816 	for (i = 0; i < io_ctx->poll_handlers; i++) {
817 		const struct iopoll_rec *rec = &io_ctx->pr[i];
818 		struct ifnet *ifp = rec->ifp;
819 
820 		if (!lwkt_serialize_try(rec->serializer))
821 			continue;
822 
823 		if ((ifp->if_flags & (IFF_RUNNING | IFF_NPOLLING)) ==
824 		    (IFF_RUNNING | IFF_NPOLLING))
825 			rec->poll_func(ifp, rec->arg, cycles);
826 
827 		lwkt_serialize_exit(rec->serializer);
828 	}
829 
830 	/*
831 	 * Do a quick exit/enter to catch any higher-priority
832 	 * interrupt sources.
833 	 */
834 	crit_exit_quick(td);
835 	crit_enter_quick(td);
836 
837 	sched_iopollmore(io_ctx);
838 	io_ctx->phase = 4;
839 
840 	crit_exit_quick(td);
841 }
842 
843 static void
844 txpoll_handler(netmsg_t msg)
845 {
846 	struct iopoll_ctx *io_ctx;
847 	struct thread *td = curthread;
848 	int i;
849 
850 	io_ctx = msg->lmsg.u.ms_resultp;
851 	KKASSERT(&td->td_msgport == netisr_portfn(io_ctx->poll_cpuid));
852 
853 	crit_enter_quick(td);
854 
855 	/* Reply ASAP */
856 	lwkt_replymsg(&msg->lmsg, 0);
857 
858 	if (io_ctx->poll_handlers == 0) {
859 		crit_exit_quick(td);
860 		return;
861 	}
862 
863 	io_ctx->phase = 3;
864 
865 	for (i = 0; i < io_ctx->poll_handlers; i++) {
866 		const struct iopoll_rec *rec = &io_ctx->pr[i];
867 		struct ifnet *ifp = rec->ifp;
868 
869 		if (!lwkt_serialize_try(rec->serializer))
870 			continue;
871 
872 		if ((ifp->if_flags & (IFF_RUNNING | IFF_NPOLLING)) ==
873 		    (IFF_RUNNING | IFF_NPOLLING))
874 			rec->poll_func(ifp, rec->arg, -1);
875 
876 		lwkt_serialize_exit(rec->serializer);
877 	}
878 
879 	/*
880 	 * Do a quick exit/enter to catch any higher-priority
881 	 * interrupt sources.
882 	 */
883 	crit_exit_quick(td);
884 	crit_enter_quick(td);
885 
886 	sched_iopollmore(io_ctx);
887 	io_ctx->phase = 4;
888 
889 	crit_exit_quick(td);
890 }
891 
892 /*
893  * rxpollmore_handler and txpollmore_handler are called after other netisr's,
894  * possibly scheduling another rxpoll_handler or txpoll_handler call, or
895  * adapting the burst size for the next cycle.
896  *
897  * It is very bad to fetch large bursts of packets from a single card at once,
898  * because the burst could take a long time to be completely processed leading
899  * to unfairness.  To reduce the problem, and also to account better for time
900  * spent in network-related processing, we split the burst in smaller chunks
901  * of fixed size, giving control to the other netisr's between chunks.  This
902  * helps in improving the fairness, reducing livelock and accounting for the
903  * work performed in low level handling.
904  */
905 static void
906 rxpollmore_handler(netmsg_t msg)
907 {
908 	struct thread *td = curthread;
909 	struct iopoll_ctx *io_ctx;
910 	union ifpoll_time t;
911 	int kern_load;
912 	uint32_t pending_polls;
913 
914 	io_ctx = msg->lmsg.u.ms_resultp;
915 	KKASSERT(&td->td_msgport == netisr_portfn(io_ctx->poll_cpuid));
916 
917 	crit_enter_quick(td);
918 
919 	/* Replay ASAP */
920 	lwkt_replymsg(&msg->lmsg, 0);
921 
922 	if (io_ctx->poll_handlers == 0) {
923 		crit_exit_quick(td);
924 		return;
925 	}
926 
927 	io_ctx->phase = 5;
928 	if (io_ctx->residual_burst > 0) {
929 		sched_iopoll(io_ctx);
930 		crit_exit_quick(td);
931 		/* Will run immediately on return, followed by netisrs */
932 		return;
933 	}
934 
935 	/* Here we can account time spent in iopoll's in this tick */
936 	ifpoll_time_get(&t);
937 	kern_load = ifpoll_time_diff(&io_ctx->poll_start_t, &t);
938 	kern_load = (kern_load * io_ctx->pollhz) / 10000; /* 0..100 */
939 	io_ctx->kern_frac = kern_load;
940 
941 	if (kern_load > (100 - io_ctx->user_frac)) {
942 		/* Try decrease ticks */
943 		if (io_ctx->poll_burst > 1)
944 			io_ctx->poll_burst--;
945 	} else {
946 		if (io_ctx->poll_burst < io_ctx->poll_burst_max)
947 			io_ctx->poll_burst++;
948 	}
949 
950 	io_ctx->pending_polls--;
951 	pending_polls = io_ctx->pending_polls;
952 
953 	if (pending_polls == 0) {
954 		/* We are done */
955 		io_ctx->phase = 0;
956 	} else {
957 		/*
958 		 * Last cycle was long and caused us to miss one or more
959 		 * hardclock ticks.  Restart processing again, but slightly
960 		 * reduce the burst size to prevent that this happens again.
961 		 */
962 		io_ctx->poll_burst -= (io_ctx->poll_burst / 8);
963 		if (io_ctx->poll_burst < 1)
964 			io_ctx->poll_burst = 1;
965 		sched_iopoll(io_ctx);
966 		io_ctx->phase = 6;
967 	}
968 
969 	crit_exit_quick(td);
970 }
971 
972 static void
973 txpollmore_handler(netmsg_t msg)
974 {
975 	struct thread *td = curthread;
976 	struct iopoll_ctx *io_ctx;
977 	uint32_t pending_polls;
978 
979 	io_ctx = msg->lmsg.u.ms_resultp;
980 	KKASSERT(&td->td_msgport == netisr_portfn(io_ctx->poll_cpuid));
981 
982 	crit_enter_quick(td);
983 
984 	/* Replay ASAP */
985 	lwkt_replymsg(&msg->lmsg, 0);
986 
987 	if (io_ctx->poll_handlers == 0) {
988 		crit_exit_quick(td);
989 		return;
990 	}
991 
992 	io_ctx->phase = 5;
993 
994 	io_ctx->pending_polls--;
995 	pending_polls = io_ctx->pending_polls;
996 
997 	if (pending_polls == 0) {
998 		/* We are done */
999 		io_ctx->phase = 0;
1000 	} else {
1001 		/*
1002 		 * Last cycle was long and caused us to miss one or more
1003 		 * hardclock ticks.  Restart processing again.
1004 		 */
1005 		sched_iopoll(io_ctx);
1006 		io_ctx->phase = 6;
1007 	}
1008 
1009 	crit_exit_quick(td);
1010 }
1011 
1012 static void
1013 iopoll_add_sysctl(struct sysctl_ctx_list *ctx, struct sysctl_oid_list *parent,
1014     struct iopoll_ctx *io_ctx, int poll_type)
1015 {
1016 	if (poll_type == IFPOLL_RX) {
1017 		SYSCTL_ADD_PROC(ctx, parent, OID_AUTO, "burst_max",
1018 		    CTLTYPE_UINT | CTLFLAG_RW, io_ctx, 0, sysctl_burstmax,
1019 		    "IU", "Max Polling burst size");
1020 
1021 		SYSCTL_ADD_PROC(ctx, parent, OID_AUTO, "each_burst",
1022 		    CTLTYPE_UINT | CTLFLAG_RW, io_ctx, 0, sysctl_eachburst,
1023 		    "IU", "Max size of each burst");
1024 
1025 		SYSCTL_ADD_UINT(ctx, parent, OID_AUTO, "burst", CTLFLAG_RD,
1026 		    &io_ctx->poll_burst, 0, "Current polling burst size");
1027 
1028 		SYSCTL_ADD_UINT(ctx, parent, OID_AUTO, "user_frac", CTLFLAG_RW,
1029 		    &io_ctx->user_frac, 0, "Desired user fraction of cpu time");
1030 
1031 		SYSCTL_ADD_UINT(ctx, parent, OID_AUTO, "kern_frac", CTLFLAG_RD,
1032 		    &io_ctx->kern_frac, 0, "Kernel fraction of cpu time");
1033 
1034 		SYSCTL_ADD_INT(ctx, parent, OID_AUTO, "residual_burst", CTLFLAG_RD,
1035 		    &io_ctx->residual_burst, 0,
1036 		    "# of residual cycles in burst");
1037 	}
1038 
1039 	SYSCTL_ADD_UINT(ctx, parent, OID_AUTO, "phase", CTLFLAG_RD,
1040 	    &io_ctx->phase, 0, "Polling phase");
1041 
1042 	SYSCTL_ADD_ULONG(ctx, parent, OID_AUTO, "suspect", CTLFLAG_RW,
1043 	    &io_ctx->suspect, "Suspected events");
1044 
1045 	SYSCTL_ADD_ULONG(ctx, parent, OID_AUTO, "stalled", CTLFLAG_RW,
1046 	    &io_ctx->stalled, "Potential stalls");
1047 
1048 	SYSCTL_ADD_ULONG(ctx, parent, OID_AUTO, "short_ticks", CTLFLAG_RW,
1049 	    &io_ctx->short_ticks,
1050 	    "Hardclock ticks shorter than they should be");
1051 
1052 	SYSCTL_ADD_ULONG(ctx, parent, OID_AUTO, "lost_polls", CTLFLAG_RW,
1053 	    &io_ctx->lost_polls,
1054 	    "How many times we would have lost a poll tick");
1055 
1056 	SYSCTL_ADD_UINT(ctx, parent, OID_AUTO, "pending_polls", CTLFLAG_RD,
1057 	    &io_ctx->pending_polls, 0, "Do we need to poll again");
1058 
1059 	SYSCTL_ADD_UINT(ctx, parent, OID_AUTO, "handlers", CTLFLAG_RD,
1060 	    &io_ctx->poll_handlers, 0, "Number of registered poll handlers");
1061 }
1062 
1063 static void
1064 sysctl_burstmax_handler(netmsg_t nmsg)
1065 {
1066 	struct iopoll_sysctl_netmsg *msg = (struct iopoll_sysctl_netmsg *)nmsg;
1067 	struct iopoll_ctx *io_ctx;
1068 
1069 	io_ctx = msg->ctx;
1070 	KKASSERT(&curthread->td_msgport == netisr_portfn(io_ctx->poll_cpuid));
1071 
1072 	io_ctx->poll_burst_max = nmsg->lmsg.u.ms_result;
1073 	if (io_ctx->poll_each_burst > io_ctx->poll_burst_max)
1074 		io_ctx->poll_each_burst = io_ctx->poll_burst_max;
1075 	if (io_ctx->poll_burst > io_ctx->poll_burst_max)
1076 		io_ctx->poll_burst = io_ctx->poll_burst_max;
1077 	if (io_ctx->residual_burst > io_ctx->poll_burst_max)
1078 		io_ctx->residual_burst = io_ctx->poll_burst_max;
1079 
1080 	lwkt_replymsg(&nmsg->lmsg, 0);
1081 }
1082 
1083 static int
1084 sysctl_burstmax(SYSCTL_HANDLER_ARGS)
1085 {
1086 	struct iopoll_ctx *io_ctx = arg1;
1087 	struct iopoll_sysctl_netmsg msg;
1088 	uint32_t burst_max;
1089 	int error;
1090 
1091 	burst_max = io_ctx->poll_burst_max;
1092 	error = sysctl_handle_int(oidp, &burst_max, 0, req);
1093 	if (error || req->newptr == NULL)
1094 		return error;
1095 	if (burst_max < MIN_IOPOLL_BURST_MAX)
1096 		burst_max = MIN_IOPOLL_BURST_MAX;
1097 	else if (burst_max > MAX_IOPOLL_BURST_MAX)
1098 		burst_max = MAX_IOPOLL_BURST_MAX;
1099 
1100 	netmsg_init(&msg.base, NULL, &curthread->td_msgport,
1101 		    0, sysctl_burstmax_handler);
1102 	msg.base.lmsg.u.ms_result = burst_max;
1103 	msg.ctx = io_ctx;
1104 
1105 	return lwkt_domsg(netisr_portfn(io_ctx->poll_cpuid), &msg.base.lmsg, 0);
1106 }
1107 
1108 static void
1109 sysctl_eachburst_handler(netmsg_t nmsg)
1110 {
1111 	struct iopoll_sysctl_netmsg *msg = (struct iopoll_sysctl_netmsg *)nmsg;
1112 	struct iopoll_ctx *io_ctx;
1113 	uint32_t each_burst;
1114 
1115 	io_ctx = msg->ctx;
1116 	KKASSERT(&curthread->td_msgport == netisr_portfn(io_ctx->poll_cpuid));
1117 
1118 	each_burst = nmsg->lmsg.u.ms_result;
1119 	if (each_burst > io_ctx->poll_burst_max)
1120 		each_burst = io_ctx->poll_burst_max;
1121 	else if (each_burst < 1)
1122 		each_burst = 1;
1123 	io_ctx->poll_each_burst = each_burst;
1124 
1125 	lwkt_replymsg(&nmsg->lmsg, 0);
1126 }
1127 
1128 static int
1129 sysctl_eachburst(SYSCTL_HANDLER_ARGS)
1130 {
1131 	struct iopoll_ctx *io_ctx = arg1;
1132 	struct iopoll_sysctl_netmsg msg;
1133 	uint32_t each_burst;
1134 	int error;
1135 
1136 	each_burst = io_ctx->poll_each_burst;
1137 	error = sysctl_handle_int(oidp, &each_burst, 0, req);
1138 	if (error || req->newptr == NULL)
1139 		return error;
1140 
1141 	netmsg_init(&msg.base, NULL, &curthread->td_msgport,
1142 		    0, sysctl_eachburst_handler);
1143 	msg.base.lmsg.u.ms_result = each_burst;
1144 	msg.ctx = io_ctx;
1145 
1146 	return lwkt_domsg(netisr_portfn(io_ctx->poll_cpuid), &msg.base.lmsg, 0);
1147 }
1148 
1149 static int
1150 iopoll_register(struct ifnet *ifp, struct iopoll_ctx *io_ctx,
1151 		const struct ifpoll_io *io_rec)
1152 {
1153 	int error;
1154 
1155 	KKASSERT(&curthread->td_msgport == netisr_portfn(io_ctx->poll_cpuid));
1156 
1157 	if (io_rec->poll_func == NULL)
1158 		return 0;
1159 
1160 	/*
1161 	 * Check if there is room.
1162 	 */
1163 	if (io_ctx->poll_handlers >= IFPOLL_LIST_LEN) {
1164 		/*
1165 		 * List full, cannot register more entries.
1166 		 * This should never happen; if it does, it is probably a
1167 		 * broken driver trying to register multiple times. Checking
1168 		 * this at runtime is expensive, and won't solve the problem
1169 		 * anyways, so just report a few times and then give up.
1170 		 */
1171 		static int verbose = 10; /* XXX */
1172 		if (verbose > 0) {
1173 			kprintf("io poll handlers list full, "
1174 				"maybe a broken driver ?\n");
1175 			verbose--;
1176 		}
1177 		error = ENOENT;
1178 	} else {
1179 		struct iopoll_rec *rec = &io_ctx->pr[io_ctx->poll_handlers];
1180 
1181 		rec->ifp = ifp;
1182 		rec->serializer = io_rec->serializer;
1183 		rec->arg = io_rec->arg;
1184 		rec->poll_func = io_rec->poll_func;
1185 
1186 		io_ctx->poll_handlers++;
1187 		error = 0;
1188 	}
1189 	return error;
1190 }
1191 
1192 static int
1193 iopoll_deregister(struct ifnet *ifp, struct iopoll_ctx *io_ctx)
1194 {
1195 	int i, error;
1196 
1197 	KKASSERT(&curthread->td_msgport == netisr_portfn(io_ctx->poll_cpuid));
1198 
1199 	for (i = 0; i < io_ctx->poll_handlers; ++i) {
1200 		if (io_ctx->pr[i].ifp == ifp) /* Found it */
1201 			break;
1202 	}
1203 	if (i == io_ctx->poll_handlers) {
1204 		error = ENOENT;
1205 	} else {
1206 		io_ctx->poll_handlers--;
1207 		if (i < io_ctx->poll_handlers) {
1208 			/* Last entry replaces this one. */
1209 			io_ctx->pr[i] = io_ctx->pr[io_ctx->poll_handlers];
1210 		}
1211 
1212 		if (io_ctx->poll_handlers == 0)
1213 			iopoll_reset_state(io_ctx);
1214 		error = 0;
1215 	}
1216 	return error;
1217 }
1218 
1219 static void
1220 poll_comm_init(int cpuid)
1221 {
1222 	struct poll_comm *comm;
1223 	char cpuid_str[16];
1224 
1225 	comm = kmalloc_cachealign(sizeof(*comm), M_DEVBUF, M_WAITOK | M_ZERO);
1226 
1227 	if (ifpoll_stfrac < 1)
1228 		ifpoll_stfrac = IFPOLL_STFRAC_DEFAULT;
1229 	if (ifpoll_txfrac < 1)
1230 		ifpoll_txfrac = IFPOLL_TXFRAC_DEFAULT;
1231 
1232 	comm->pollhz = ifpoll_pollhz;
1233 	comm->poll_cpuid = cpuid;
1234 	comm->poll_stfrac = ifpoll_stfrac - 1;
1235 	comm->poll_txfrac = ifpoll_txfrac - 1;
1236 
1237 	ksnprintf(cpuid_str, sizeof(cpuid_str), "%d", cpuid);
1238 
1239 	sysctl_ctx_init(&comm->sysctl_ctx);
1240 	comm->sysctl_tree = SYSCTL_ADD_NODE(&comm->sysctl_ctx,
1241 			    SYSCTL_STATIC_CHILDREN(_net_ifpoll),
1242 			    OID_AUTO, cpuid_str, CTLFLAG_RD, 0, "");
1243 
1244 	SYSCTL_ADD_PROC(&comm->sysctl_ctx, SYSCTL_CHILDREN(comm->sysctl_tree),
1245 			OID_AUTO, "pollhz", CTLTYPE_INT | CTLFLAG_RW,
1246 			comm, 0, sysctl_pollhz,
1247 			"I", "Device polling frequency");
1248 
1249 	if (cpuid == 0) {
1250 		SYSCTL_ADD_PROC(&comm->sysctl_ctx,
1251 				SYSCTL_CHILDREN(comm->sysctl_tree),
1252 				OID_AUTO, "status_frac",
1253 				CTLTYPE_INT | CTLFLAG_RW,
1254 				comm, 0, sysctl_stfrac,
1255 				"I", "# of cycles before status is polled");
1256 	}
1257 	SYSCTL_ADD_PROC(&comm->sysctl_ctx, SYSCTL_CHILDREN(comm->sysctl_tree),
1258 			OID_AUTO, "tx_frac", CTLTYPE_INT | CTLFLAG_RW,
1259 			comm, 0, sysctl_txfrac,
1260 			"I", "# of cycles before TX is polled");
1261 
1262 	poll_common[cpuid] = comm;
1263 }
1264 
1265 static void
1266 poll_comm_start(int cpuid)
1267 {
1268 	struct poll_comm *comm = poll_common[cpuid];
1269 	systimer_func_t func;
1270 
1271 	/*
1272 	 * Initialize systimer
1273 	 */
1274 	if (cpuid == 0)
1275 		func = poll_comm_systimer0;
1276 	else
1277 		func = poll_comm_systimer;
1278 	systimer_init_periodic_nq(&comm->pollclock, func, comm, 1);
1279 }
1280 
1281 static void
1282 _poll_comm_systimer(struct poll_comm *comm)
1283 {
1284 	if (comm->txfrac_count-- == 0) {
1285 		comm->txfrac_count = comm->poll_txfrac;
1286 		iopoll_clock(txpoll_context[comm->poll_cpuid]);
1287 	}
1288 	iopoll_clock(rxpoll_context[comm->poll_cpuid]);
1289 }
1290 
1291 static void
1292 poll_comm_systimer0(systimer_t info, int in_ipi __unused,
1293     struct intrframe *frame __unused)
1294 {
1295 	struct poll_comm *comm = info->data;
1296 	globaldata_t gd = mycpu;
1297 
1298 	KKASSERT(comm->poll_cpuid == gd->gd_cpuid && gd->gd_cpuid == 0);
1299 
1300 	crit_enter_gd(gd);
1301 
1302 	if (comm->stfrac_count-- == 0) {
1303 		comm->stfrac_count = comm->poll_stfrac;
1304 		stpoll_clock(&stpoll_context);
1305 	}
1306 	_poll_comm_systimer(comm);
1307 
1308 	crit_exit_gd(gd);
1309 }
1310 
1311 static void
1312 poll_comm_systimer(systimer_t info, int in_ipi __unused,
1313     struct intrframe *frame __unused)
1314 {
1315 	struct poll_comm *comm = info->data;
1316 	globaldata_t gd = mycpu;
1317 
1318 	KKASSERT(comm->poll_cpuid == gd->gd_cpuid && gd->gd_cpuid != 0);
1319 
1320 	crit_enter_gd(gd);
1321 	_poll_comm_systimer(comm);
1322 	crit_exit_gd(gd);
1323 }
1324 
1325 static void
1326 poll_comm_adjust_pollhz(struct poll_comm *comm)
1327 {
1328 	uint32_t handlers;
1329 	int pollhz = 1;
1330 
1331 	KKASSERT(&curthread->td_msgport == netisr_portfn(comm->poll_cpuid));
1332 
1333 	/*
1334 	 * If there is no polling handler registered, set systimer
1335 	 * frequency to the lowest value.  Polling systimer frequency
1336 	 * will be adjusted to the requested value, once there are
1337 	 * registered handlers.
1338 	 */
1339 	handlers = rxpoll_context[mycpuid]->poll_handlers +
1340 		   txpoll_context[mycpuid]->poll_handlers;
1341 	if (comm->poll_cpuid == 0)
1342 		handlers += stpoll_context.poll_handlers;
1343 	if (handlers)
1344 		pollhz = comm->pollhz;
1345 	systimer_adjust_periodic(&comm->pollclock, pollhz);
1346 }
1347 
1348 static int
1349 sysctl_pollhz(SYSCTL_HANDLER_ARGS)
1350 {
1351 	struct poll_comm *comm = arg1;
1352 	struct netmsg_base nmsg;
1353 	int error, phz;
1354 
1355 	phz = comm->pollhz;
1356 	error = sysctl_handle_int(oidp, &phz, 0, req);
1357 	if (error || req->newptr == NULL)
1358 		return error;
1359 	if (phz <= 0)
1360 		return EINVAL;
1361 	else if (phz > IFPOLL_FREQ_MAX)
1362 		phz = IFPOLL_FREQ_MAX;
1363 
1364 	netmsg_init(&nmsg, NULL, &curthread->td_msgport,
1365 		    0, sysctl_pollhz_handler);
1366 	nmsg.lmsg.u.ms_result = phz;
1367 
1368 	return lwkt_domsg(netisr_portfn(comm->poll_cpuid), &nmsg.lmsg, 0);
1369 }
1370 
1371 static void
1372 sysctl_pollhz_handler(netmsg_t nmsg)
1373 {
1374 	struct poll_comm *comm = poll_common[mycpuid];
1375 
1376 	KKASSERT(&curthread->td_msgport == netisr_portfn(comm->poll_cpuid));
1377 
1378 	/* Save polling frequency */
1379 	comm->pollhz = nmsg->lmsg.u.ms_result;
1380 
1381 	/*
1382 	 * Adjust cached pollhz
1383 	 */
1384 	rxpoll_context[mycpuid]->pollhz = comm->pollhz;
1385 	txpoll_context[mycpuid]->pollhz =
1386 	    comm->pollhz / (comm->poll_txfrac + 1);
1387 
1388 	/*
1389 	 * Adjust polling frequency
1390 	 */
1391 	poll_comm_adjust_pollhz(comm);
1392 
1393 	lwkt_replymsg(&nmsg->lmsg, 0);
1394 }
1395 
1396 static int
1397 sysctl_stfrac(SYSCTL_HANDLER_ARGS)
1398 {
1399 	struct poll_comm *comm = arg1;
1400 	struct netmsg_base nmsg;
1401 	int error, stfrac;
1402 
1403 	KKASSERT(comm->poll_cpuid == 0);
1404 
1405 	stfrac = comm->poll_stfrac + 1;
1406 	error = sysctl_handle_int(oidp, &stfrac, 0, req);
1407 	if (error || req->newptr == NULL)
1408 		return error;
1409 	if (stfrac < 1)
1410 		return EINVAL;
1411 
1412 	netmsg_init(&nmsg, NULL, &curthread->td_msgport,
1413 		    0, sysctl_stfrac_handler);
1414 	nmsg.lmsg.u.ms_result = stfrac - 1;
1415 
1416 	return lwkt_domsg(netisr_portfn(comm->poll_cpuid), &nmsg.lmsg, 0);
1417 }
1418 
1419 static void
1420 sysctl_stfrac_handler(netmsg_t nmsg)
1421 {
1422 	struct poll_comm *comm = poll_common[mycpuid];
1423 	int stfrac = nmsg->lmsg.u.ms_result;
1424 
1425 	KKASSERT(&curthread->td_msgport == netisr_portfn(comm->poll_cpuid));
1426 
1427 	crit_enter();
1428 	comm->poll_stfrac = stfrac;
1429 	if (comm->stfrac_count > comm->poll_stfrac)
1430 		comm->stfrac_count = comm->poll_stfrac;
1431 	crit_exit();
1432 
1433 	lwkt_replymsg(&nmsg->lmsg, 0);
1434 }
1435 
1436 static int
1437 sysctl_txfrac(SYSCTL_HANDLER_ARGS)
1438 {
1439 	struct poll_comm *comm = arg1;
1440 	struct netmsg_base nmsg;
1441 	int error, txfrac;
1442 
1443 	txfrac = comm->poll_txfrac + 1;
1444 	error = sysctl_handle_int(oidp, &txfrac, 0, req);
1445 	if (error || req->newptr == NULL)
1446 		return error;
1447 	if (txfrac < 1)
1448 		return EINVAL;
1449 
1450 	netmsg_init(&nmsg, NULL, &curthread->td_msgport,
1451 		    0, sysctl_txfrac_handler);
1452 	nmsg.lmsg.u.ms_result = txfrac - 1;
1453 
1454 	return lwkt_domsg(netisr_portfn(comm->poll_cpuid), &nmsg.lmsg, 0);
1455 }
1456 
1457 static void
1458 sysctl_txfrac_handler(netmsg_t nmsg)
1459 {
1460 	struct poll_comm *comm = poll_common[mycpuid];
1461 	int txfrac = nmsg->lmsg.u.ms_result;
1462 
1463 	KKASSERT(&curthread->td_msgport == netisr_portfn(comm->poll_cpuid));
1464 
1465 	crit_enter();
1466 	comm->poll_txfrac = txfrac;
1467 	if (comm->txfrac_count > comm->poll_txfrac)
1468 		comm->txfrac_count = comm->poll_txfrac;
1469 	crit_exit();
1470 
1471 	lwkt_replymsg(&nmsg->lmsg, 0);
1472 }
1473 
1474 void
1475 ifpoll_compat_setup(struct ifpoll_compat *cp,
1476     struct sysctl_ctx_list *sysctl_ctx,
1477     struct sysctl_oid *sysctl_tree,
1478     int unit, struct lwkt_serialize *slz)
1479 {
1480 	cp->ifpc_stcount = 0;
1481 	cp->ifpc_stfrac = ((poll_common[0]->poll_stfrac + 1) *
1482 	    howmany(IOPOLL_BURST_MAX, IOPOLL_EACH_BURST)) - 1;
1483 
1484 	cp->ifpc_cpuid = unit % ncpus2;
1485 	cp->ifpc_serializer = slz;
1486 
1487 	if (sysctl_ctx != NULL && sysctl_tree != NULL) {
1488 		SYSCTL_ADD_PROC(sysctl_ctx, SYSCTL_CHILDREN(sysctl_tree),
1489 		    OID_AUTO, "npoll_stfrac", CTLTYPE_INT | CTLFLAG_RW,
1490 		    cp, 0, sysctl_compat_npoll_stfrac, "I",
1491 		    "polling status frac");
1492 		SYSCTL_ADD_PROC(sysctl_ctx, SYSCTL_CHILDREN(sysctl_tree),
1493 		    OID_AUTO, "npoll_cpuid", CTLTYPE_INT | CTLFLAG_RW,
1494 		    cp, 0, sysctl_compat_npoll_cpuid, "I",
1495 		    "polling cpuid");
1496 	}
1497 }
1498 
1499 static int
1500 sysctl_compat_npoll_stfrac(SYSCTL_HANDLER_ARGS)
1501 {
1502 	struct ifpoll_compat *cp = arg1;
1503 	int error = 0, stfrac;
1504 
1505 	lwkt_serialize_enter(cp->ifpc_serializer);
1506 
1507 	stfrac = cp->ifpc_stfrac + 1;
1508 	error = sysctl_handle_int(oidp, &stfrac, 0, req);
1509 	if (!error && req->newptr != NULL) {
1510 		if (stfrac < 1) {
1511 			error = EINVAL;
1512 		} else {
1513 			cp->ifpc_stfrac = stfrac - 1;
1514 			if (cp->ifpc_stcount > cp->ifpc_stfrac)
1515 				cp->ifpc_stcount = cp->ifpc_stfrac;
1516 		}
1517 	}
1518 
1519 	lwkt_serialize_exit(cp->ifpc_serializer);
1520 	return error;
1521 }
1522 
1523 static int
1524 sysctl_compat_npoll_cpuid(SYSCTL_HANDLER_ARGS)
1525 {
1526 	struct ifpoll_compat *cp = arg1;
1527 	int error = 0, cpuid;
1528 
1529 	lwkt_serialize_enter(cp->ifpc_serializer);
1530 
1531 	cpuid = cp->ifpc_cpuid;
1532 	error = sysctl_handle_int(oidp, &cpuid, 0, req);
1533 	if (!error && req->newptr != NULL) {
1534 		if (cpuid < 0 || cpuid >= ncpus2)
1535 			error = EINVAL;
1536 		else
1537 			cp->ifpc_cpuid = cpuid;
1538 	}
1539 
1540 	lwkt_serialize_exit(cp->ifpc_serializer);
1541 	return error;
1542 }
1543