xref: /dragonfly/sys/net/netisr.c (revision ce0e08e2)
1 /*
2  * Copyright (c) 2003, 2004 Matthew Dillon. All rights reserved.
3  * Copyright (c) 2003, 2004 Jeffrey M. Hsu.  All rights reserved.
4  * Copyright (c) 2003 Jonathan Lemon.  All rights reserved.
5  * Copyright (c) 2003, 2004 The DragonFly Project.  All rights reserved.
6  *
7  * This code is derived from software contributed to The DragonFly Project
8  * by Jonathan Lemon, Jeffrey M. Hsu, and Matthew Dillon.
9  *
10  * Jonathan Lemon gave Jeffrey Hsu permission to combine his copyright
11  * into this one around July 8 2004.
12  *
13  * Redistribution and use in source and binary forms, with or without
14  * modification, are permitted provided that the following conditions
15  * are met:
16  * 1. Redistributions of source code must retain the above copyright
17  *    notice, this list of conditions and the following disclaimer.
18  * 2. Redistributions in binary form must reproduce the above copyright
19  *    notice, this list of conditions and the following disclaimer in the
20  *    documentation and/or other materials provided with the distribution.
21  * 3. Neither the name of The DragonFly Project nor the names of its
22  *    contributors may be used to endorse or promote products derived
23  *    from this software without specific, prior written permission.
24  *
25  * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
26  * ``AS IS'' AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
27  * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS
28  * FOR A PARTICULAR PURPOSE ARE DISCLAIMED.  IN NO EVENT SHALL THE
29  * COPYRIGHT HOLDERS OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT,
30  * INCIDENTAL, SPECIAL, EXEMPLARY OR CONSEQUENTIAL DAMAGES (INCLUDING,
31  * BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
32  * LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED
33  * AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
34  * OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT
35  * OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
36  * SUCH DAMAGE.
37  *
38  * $DragonFly: src/sys/net/netisr.c,v 1.49 2008/11/01 10:29:31 sephe Exp $
39  */
40 
41 #include <sys/param.h>
42 #include <sys/systm.h>
43 #include <sys/kernel.h>
44 #include <sys/malloc.h>
45 #include <sys/msgport.h>
46 #include <sys/proc.h>
47 #include <sys/interrupt.h>
48 #include <sys/socket.h>
49 #include <sys/sysctl.h>
50 #include <net/if.h>
51 #include <net/if_var.h>
52 #include <net/netisr.h>
53 #include <machine/cpufunc.h>
54 
55 #include <sys/thread2.h>
56 #include <sys/msgport2.h>
57 #include <net/netmsg2.h>
58 
59 #define NETISR_GET_MPLOCK(ni) \
60 do { \
61     if (((ni)->ni_flags & NETISR_FLAG_MPSAFE) == 0) \
62 	get_mplock(); \
63 } while (0)
64 
65 #define NETISR_REL_MPLOCK(ni) \
66 do { \
67     if (((ni)->ni_flags & NETISR_FLAG_MPSAFE) == 0) \
68 	rel_mplock(); \
69 } while (0)
70 
71 static void netmsg_sync_func(struct netmsg *msg);
72 
73 struct netmsg_port_registration {
74     TAILQ_ENTRY(netmsg_port_registration) npr_entry;
75     lwkt_port_t	npr_port;
76 };
77 
78 static struct netisr netisrs[NETISR_MAX];
79 static TAILQ_HEAD(,netmsg_port_registration) netreglist;
80 
81 /* Per-CPU thread to handle any protocol.  */
82 struct thread netisr_cpu[MAXCPU];
83 lwkt_port netisr_afree_rport;
84 lwkt_port netisr_adone_rport;
85 lwkt_port netisr_apanic_rport;
86 lwkt_port netisr_sync_port;
87 
88 static int (*netmsg_fwd_port_fn)(lwkt_port_t, lwkt_msg_t);
89 
90 static int netisr_mpsafe_thread = 0;
91 TUNABLE_INT("net.netisr.mpsafe_thread", &netisr_mpsafe_thread);
92 
93 SYSCTL_NODE(_net, OID_AUTO, netisr, CTLFLAG_RW, 0, "netisr");
94 SYSCTL_INT(_net_netisr, OID_AUTO, mpsafe_thread, CTLFLAG_RW,
95 	   &netisr_mpsafe_thread, 0,
96 	   "0:BGL, 1:Adaptive BGL, 2:No BGL(experimental)");
97 
98 static __inline int
99 NETISR_TO_MSGF(const struct netisr *ni)
100 {
101     int msg_flags = 0;
102 
103     if (ni->ni_flags & NETISR_FLAG_MPSAFE)
104     	msg_flags |= MSGF_MPSAFE;
105     return msg_flags;
106 }
107 
108 /*
109  * netisr_afree_rport replymsg function, only used to handle async
110  * messages which the sender has abandoned to their fate.
111  */
112 static void
113 netisr_autofree_reply(lwkt_port_t port, lwkt_msg_t msg)
114 {
115     kfree(msg, M_LWKTMSG);
116 }
117 
118 /*
119  * We need a custom putport function to handle the case where the
120  * message target is the current thread's message port.  This case
121  * can occur when the TCP or UDP stack does a direct callback to NFS and NFS
122  * then turns around and executes a network operation synchronously.
123  *
124  * To prevent deadlocking, we must execute these self-referential messages
125  * synchronously, effectively turning the message into a glorified direct
126  * procedure call back into the protocol stack.  The operation must be
127  * complete on return or we will deadlock, so panic if it isn't.
128  */
129 static int
130 netmsg_put_port(lwkt_port_t port, lwkt_msg_t lmsg)
131 {
132     netmsg_t netmsg = (void *)lmsg;
133 
134     if ((lmsg->ms_flags & MSGF_SYNC) && port == &curthread->td_msgport) {
135 	netmsg->nm_dispatch(netmsg);
136 	if ((lmsg->ms_flags & MSGF_DONE) == 0)
137 	    panic("netmsg_put_port: self-referential deadlock on netport");
138 	return(EASYNC);
139     } else {
140 	return(netmsg_fwd_port_fn(port, lmsg));
141     }
142 }
143 
144 /*
145  * UNIX DOMAIN sockets still have to run their uipc functions synchronously,
146  * because they depend on the user proc context for a number of things
147  * (like creds) which we have not yet incorporated into the message structure.
148  *
149  * However, we maintain or message/port abstraction.  Having a special
150  * synchronous port which runs the commands synchronously gives us the
151  * ability to serialize operations in one place later on when we start
152  * removing the BGL.
153  */
154 static int
155 netmsg_sync_putport(lwkt_port_t port, lwkt_msg_t lmsg)
156 {
157     netmsg_t netmsg = (void *)lmsg;
158 
159     KKASSERT((lmsg->ms_flags & MSGF_DONE) == 0);
160 
161     lmsg->ms_target_port = port;	/* required for abort */
162     netmsg->nm_dispatch(netmsg);
163     return(EASYNC);
164 }
165 
166 static void
167 netisr_init(void)
168 {
169     int i;
170 
171     TAILQ_INIT(&netreglist);
172 
173     /*
174      * Create default per-cpu threads for generic protocol handling.
175      */
176     for (i = 0; i < ncpus; ++i) {
177 	lwkt_create(netmsg_service_loop, &netisr_mpsafe_thread, NULL,
178 		    &netisr_cpu[i], TDF_NETWORK | TDF_MPSAFE, i,
179 		    "netisr_cpu %d", i);
180 	netmsg_service_port_init(&netisr_cpu[i].td_msgport);
181     }
182 
183     /*
184      * The netisr_afree_rport is a special reply port which automatically
185      * frees the replied message.  The netisr_adone_rport simply marks
186      * the message as being done.  The netisr_apanic_rport panics if
187      * the message is replied to.
188      */
189     lwkt_initport_replyonly(&netisr_afree_rport, netisr_autofree_reply);
190     lwkt_initport_replyonly_null(&netisr_adone_rport);
191     lwkt_initport_panic(&netisr_apanic_rport);
192 
193     /*
194      * The netisr_syncport is a special port which executes the message
195      * synchronously and waits for it if EASYNC is returned.
196      */
197     lwkt_initport_putonly(&netisr_sync_port, netmsg_sync_putport);
198 }
199 
200 SYSINIT(netisr, SI_SUB_PRE_DRIVERS, SI_ORDER_FIRST, netisr_init, NULL);
201 
202 /*
203  * Finish initializing the message port for a netmsg service.  This also
204  * registers the port for synchronous cleanup operations such as when an
205  * ifnet is being destroyed.  There is no deregistration API yet.
206  */
207 void
208 netmsg_service_port_init(lwkt_port_t port)
209 {
210     struct netmsg_port_registration *reg;
211 
212     /*
213      * Override the putport function.  Our custom function checks for
214      * self-references and executes such commands synchronously.
215      */
216     if (netmsg_fwd_port_fn == NULL)
217 	netmsg_fwd_port_fn = port->mp_putport;
218     KKASSERT(netmsg_fwd_port_fn == port->mp_putport);
219     port->mp_putport = netmsg_put_port;
220 
221     /*
222      * Keep track of ports using the netmsg API so we can synchronize
223      * certain operations (such as freeing an ifnet structure) across all
224      * consumers.
225      */
226     reg = kmalloc(sizeof(*reg), M_TEMP, M_WAITOK|M_ZERO);
227     reg->npr_port = port;
228     TAILQ_INSERT_TAIL(&netreglist, reg, npr_entry);
229 }
230 
231 /*
232  * This function synchronizes the caller with all netmsg services.  For
233  * example, if an interface is being removed we must make sure that all
234  * packets related to that interface complete processing before the structure
235  * can actually be freed.  This sort of synchronization is an alternative to
236  * ref-counting the netif, removing the ref counting overhead in favor of
237  * placing additional overhead in the netif freeing sequence (where it is
238  * inconsequential).
239  */
240 void
241 netmsg_service_sync(void)
242 {
243     struct netmsg_port_registration *reg;
244     struct netmsg smsg;
245 
246     netmsg_init(&smsg, &curthread->td_msgport, MSGF_MPSAFE, netmsg_sync_func);
247 
248     TAILQ_FOREACH(reg, &netreglist, npr_entry) {
249 	lwkt_domsg(reg->npr_port, &smsg.nm_lmsg, 0);
250     }
251 }
252 
253 /*
254  * The netmsg function simply replies the message.  API semantics require
255  * EASYNC to be returned if the netmsg function disposes of the message.
256  */
257 static void
258 netmsg_sync_func(struct netmsg *msg)
259 {
260     lwkt_replymsg(&msg->nm_lmsg, 0);
261 }
262 
263 /*
264  * Return current BGL lock state (1:locked, 0: unlocked)
265  */
266 int
267 netmsg_service(struct netmsg *msg, int mpsafe_mode, int mplocked)
268 {
269     /*
270      * Adjust the mplock dynamically.
271      */
272     switch (mpsafe_mode) {
273     case NETMSG_SERVICE_ADAPTIVE: /* Adaptive BGL */
274 	if (msg->nm_lmsg.ms_flags & MSGF_MPSAFE) {
275 	    if (mplocked) {
276 		rel_mplock();
277 		mplocked = 0;
278 	    }
279 	    msg->nm_dispatch(msg);
280 	    /* Leave mpunlocked */
281 	} else {
282 	    if (!mplocked) {
283 		get_mplock();
284 		/* mplocked = 1; not needed */
285 	    }
286 	    msg->nm_dispatch(msg);
287 	    rel_mplock();
288 	    mplocked = 0;
289 	    /* Leave mpunlocked, next msg might be mpsafe */
290 	}
291 	break;
292 
293     case NETMSG_SERVICE_MPSAFE: /* No BGL */
294 	if (mplocked) {
295 	    rel_mplock();
296 	    mplocked = 0;
297 	}
298 	msg->nm_dispatch(msg);
299 	/* Leave mpunlocked */
300 	break;
301 
302     default: /* BGL */
303 	if (!mplocked) {
304 	    get_mplock();
305 	    mplocked = 1;
306 	}
307 	msg->nm_dispatch(msg);
308 	/* Leave mplocked */
309 	break;
310     }
311     return mplocked;
312 }
313 
314 /*
315  * Generic netmsg service loop.  Some protocols may roll their own but all
316  * must do the basic command dispatch function call done here.
317  */
318 void
319 netmsg_service_loop(void *arg)
320 {
321     struct netmsg *msg;
322     int mplocked, *mpsafe_mode = arg;
323 
324     /*
325      * Thread was started with TDF_MPSAFE
326      */
327     mplocked = 0;
328 
329     /*
330      * Loop on netmsgs
331      */
332     while ((msg = lwkt_waitport(&curthread->td_msgport, 0))) {
333 	mplocked = netmsg_service(msg, *mpsafe_mode, mplocked);
334     }
335 }
336 
337 /*
338  * Call the netisr directly.
339  * Queueing may be done in the msg port layer at its discretion.
340  */
341 void
342 netisr_dispatch(int num, struct mbuf *m)
343 {
344     /* just queue it for now XXX JH */
345     netisr_queue(num, m);
346 }
347 
348 /*
349  * Same as netisr_dispatch(), but always queue.
350  * This is either used in places where we are not confident that
351  * direct dispatch is possible, or where queueing is required.
352  */
353 int
354 netisr_queue(int num, struct mbuf *m)
355 {
356     struct netisr *ni;
357     struct netmsg_packet *pmsg;
358     lwkt_port_t port;
359 
360     KASSERT((num > 0 && num <= (sizeof(netisrs)/sizeof(netisrs[0]))),
361     	    ("%s: bad isr %d", __func__, num));
362 
363     ni = &netisrs[num];
364     if (ni->ni_handler == NULL) {
365 	kprintf("%s: unregistered isr %d\n", __func__, num);
366 	m_freem(m);
367 	return (EIO);
368     }
369 
370     if ((port = ni->ni_mport(&m)) == NULL)
371 	return (EIO);
372 
373     pmsg = &m->m_hdr.mh_netmsg;
374 
375     netmsg_init(&pmsg->nm_netmsg, &netisr_apanic_rport, NETISR_TO_MSGF(ni),
376     		ni->ni_handler);
377     pmsg->nm_packet = m;
378     pmsg->nm_netmsg.nm_lmsg.u.ms_result = num;
379     lwkt_sendmsg(port, &pmsg->nm_netmsg.nm_lmsg);
380     return (0);
381 }
382 
383 void
384 netisr_register(int num, lwkt_portfn_t mportfn, netisr_fn_t handler,
385 		uint32_t flags)
386 {
387     struct netisr *ni;
388 
389     KASSERT((num > 0 && num <= (sizeof(netisrs)/sizeof(netisrs[0]))),
390 	("netisr_register: bad isr %d", num));
391     ni = &netisrs[num];
392 
393     ni->ni_mport = mportfn;
394     ni->ni_handler = handler;
395     ni->ni_flags = flags;
396     netmsg_init(&ni->ni_netmsg, &netisr_adone_rport, NETISR_TO_MSGF(ni), NULL);
397 }
398 
399 int
400 netisr_unregister(int num)
401 {
402     KASSERT((num > 0 && num <= (sizeof(netisrs)/sizeof(netisrs[0]))),
403 	("unregister_netisr: bad isr number: %d\n", num));
404 
405     /* XXX JH */
406     return (0);
407 }
408 
409 /*
410  * Return message port for default handler thread on CPU 0.
411  */
412 lwkt_port_t
413 cpu0_portfn(struct mbuf **mptr)
414 {
415     return (&netisr_cpu[0].td_msgport);
416 }
417 
418 lwkt_port_t
419 cpu_portfn(int cpu)
420 {
421     return (&netisr_cpu[cpu].td_msgport);
422 }
423 
424 /*
425  * If the current thread is a network protocol thread (TDF_NETWORK),
426  * then return the current thread's message port.
427  * XXX Else, return the current CPU's netisr message port.
428  */
429 lwkt_port_t
430 cur_netport(void)
431 {
432     if (curthread->td_flags & TDF_NETWORK)
433 	return &curthread->td_msgport;
434     else
435 	return cpu_portfn(mycpuid);
436 }
437 
438 /* ARGSUSED */
439 lwkt_port_t
440 cpu0_soport(struct socket *so __unused, struct sockaddr *nam __unused,
441 	    struct mbuf **dummy __unused, int req __unused)
442 {
443     return (&netisr_cpu[0].td_msgport);
444 }
445 
446 lwkt_port_t
447 cpu0_ctlport(int cmd __unused, struct sockaddr *sa __unused,
448 	     void *extra __unused)
449 {
450     return (&netisr_cpu[0].td_msgport);
451 }
452 
453 lwkt_port_t
454 sync_soport(struct socket *so __unused, struct sockaddr *nam __unused,
455 	    struct mbuf **dummy __unused, int req __unused)
456 {
457     return (&netisr_sync_port);
458 }
459 
460 /*
461  * schednetisr() is used to call the netisr handler from the appropriate
462  * netisr thread for polling and other purposes.
463  *
464  * This function may be called from a hard interrupt or IPI and must be
465  * MP SAFE and non-blocking.  We use a fixed per-cpu message instead of
466  * trying to allocate one.  We must get ourselves onto the target cpu
467  * to safely check the MSGF_DONE bit on the message but since the message
468  * will be sent to that cpu anyway this does not add any extra work beyond
469  * what lwkt_sendmsg() would have already had to do to schedule the target
470  * thread.
471  */
472 static void
473 schednetisr_remote(void *data)
474 {
475     int num = (int)data;
476     struct netisr *ni = &netisrs[num];
477     lwkt_port_t port = &netisr_cpu[0].td_msgport;
478     struct netmsg *pmsg;
479 
480     pmsg = &netisrs[num].ni_netmsg;
481     crit_enter();
482     if (pmsg->nm_lmsg.ms_flags & MSGF_DONE) {
483 	netmsg_init(pmsg, &netisr_adone_rport, NETISR_TO_MSGF(ni),
484 		    ni->ni_handler);
485 	pmsg->nm_lmsg.u.ms_result = num;
486 	lwkt_sendmsg(port, &pmsg->nm_lmsg);
487     }
488     crit_exit();
489 }
490 
491 void
492 schednetisr(int num)
493 {
494     KASSERT((num > 0 && num <= (sizeof(netisrs)/sizeof(netisrs[0]))),
495 	("schednetisr: bad isr %d", num));
496 #ifdef SMP
497     if (mycpu->gd_cpuid != 0)
498 	lwkt_send_ipiq(globaldata_find(0), schednetisr_remote, (void *)num);
499     else
500 	schednetisr_remote((void *)num);
501 #else
502     schednetisr_remote((void *)num);
503 #endif
504 }
505 
506 lwkt_port_t
507 netisr_find_port(int num, struct mbuf **m0)
508 {
509     struct netisr *ni;
510     lwkt_port_t port;
511     struct mbuf *m = *m0;
512 
513     *m0 = NULL;
514 
515     KASSERT((num > 0 && num <= (sizeof(netisrs)/sizeof(netisrs[0]))),
516     	    ("%s: bad isr %d", __func__, num));
517 
518     ni = &netisrs[num];
519     if (ni->ni_mport == NULL) {
520 	kprintf("%s: unregistered isr %d\n", __func__, num);
521 	m_freem(m);
522 	return NULL;
523     }
524 
525     if ((port = ni->ni_mport(&m)) == NULL)
526 	return NULL;
527 
528     *m0 = m;
529     return port;
530 }
531 
532 void
533 netisr_run(int num, struct mbuf *m)
534 {
535     struct netisr *ni;
536     struct netmsg_packet *pmsg;
537 
538     KASSERT((num > 0 && num <= (sizeof(netisrs)/sizeof(netisrs[0]))),
539     	    ("%s: bad isr %d", __func__, num));
540 
541     ni = &netisrs[num];
542     if (ni->ni_handler == NULL) {
543 	kprintf("%s: unregistered isr %d\n", __func__, num);
544 	m_freem(m);
545 	return;
546     }
547 
548     pmsg = &m->m_hdr.mh_netmsg;
549 
550     netmsg_init(&pmsg->nm_netmsg, &netisr_apanic_rport, 0, ni->ni_handler);
551     pmsg->nm_packet = m;
552     pmsg->nm_netmsg.nm_lmsg.u.ms_result = num;
553 
554     NETISR_GET_MPLOCK(ni);
555     ni->ni_handler(&pmsg->nm_netmsg);
556     NETISR_REL_MPLOCK(ni);
557 }
558