xref: /dragonfly/sys/net/netisr.c (revision 1847e88f)
1 /*
2  * Copyright (c) 2003, 2004 Matthew Dillon. All rights reserved.
3  * Copyright (c) 2003, 2004 Jeffrey M. Hsu.  All rights reserved.
4  * Copyright (c) 2003 Jonathan Lemon.  All rights reserved.
5  * Copyright (c) 2003, 2004 The DragonFly Project.  All rights reserved.
6  *
7  * This code is derived from software contributed to The DragonFly Project
8  * by Jonathan Lemon, Jeffrey M. Hsu, and Matthew Dillon.
9  *
10  * Jonathan Lemon gave Jeffrey Hsu permission to combine his copyright
11  * into this one around July 8 2004.
12  *
13  * Redistribution and use in source and binary forms, with or without
14  * modification, are permitted provided that the following conditions
15  * are met:
16  * 1. Redistributions of source code must retain the above copyright
17  *    notice, this list of conditions and the following disclaimer.
18  * 2. Redistributions in binary form must reproduce the above copyright
19  *    notice, this list of conditions and the following disclaimer in the
20  *    documentation and/or other materials provided with the distribution.
21  * 3. Neither the name of The DragonFly Project nor the names of its
22  *    contributors may be used to endorse or promote products derived
23  *    from this software without specific, prior written permission.
24  *
25  * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
26  * ``AS IS'' AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
27  * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS
28  * FOR A PARTICULAR PURPOSE ARE DISCLAIMED.  IN NO EVENT SHALL THE
29  * COPYRIGHT HOLDERS OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT,
30  * INCIDENTAL, SPECIAL, EXEMPLARY OR CONSEQUENTIAL DAMAGES (INCLUDING,
31  * BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
32  * LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED
33  * AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
34  * OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT
35  * OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
36  * SUCH DAMAGE.
37  *
38  * $DragonFly: src/sys/net/netisr.c,v 1.25 2006/01/31 19:05:35 dillon Exp $
39  */
40 
41 /*
42  * Copyright (c) 2003, 2004 Jeffrey M. Hsu.  All rights reserved.
43  *
44  * License terms: all terms for the DragonFly license above plus the following:
45  *
46  * 4. All advertising materials mentioning features or use of this software
47  *    must display the following acknowledgement:
48  *
49  *	This product includes software developed by Jeffrey M. Hsu
50  *	for the DragonFly Project.
51  *
52  *    This requirement may be waived with permission from Jeffrey Hsu.
53  *    This requirement will sunset and may be removed on July 8 2005,
54  *    after which the standard DragonFly license (as shown above) will
55  *    apply.
56  */
57 
58 #include <sys/param.h>
59 #include <sys/systm.h>
60 #include <sys/kernel.h>
61 #include <sys/malloc.h>
62 #include <sys/msgport.h>
63 #include <sys/proc.h>
64 #include <sys/interrupt.h>
65 #include <sys/socket.h>
66 #include <sys/sysctl.h>
67 #include <net/if.h>
68 #include <net/if_var.h>
69 #include <net/netisr.h>
70 #include <machine/cpufunc.h>
71 #include <machine/ipl.h>
72 
73 #include <sys/thread2.h>
74 #include <sys/msgport2.h>
75 
76 static int netmsg_sync_func(struct netmsg *msg);
77 
78 struct netmsg_port_registration {
79     TAILQ_ENTRY(netmsg_port_registration) npr_entry;
80     lwkt_port_t	npr_port;
81 };
82 
83 static struct netisr netisrs[NETISR_MAX];
84 static TAILQ_HEAD(,netmsg_port_registration) netreglist;
85 
86 /* Per-CPU thread to handle any protocol.  */
87 struct thread netisr_cpu[MAXCPU];
88 lwkt_port netisr_afree_rport;
89 lwkt_port netisr_adone_rport;
90 lwkt_port netisr_sync_port;
91 
92 /*
93  * netisr_afree_rport replymsg function, only used to handle async
94  * messages which the sender has abandoned to their fate.
95  */
96 static void
97 netisr_autofree_reply(lwkt_port_t port, lwkt_msg_t msg)
98 {
99     free(msg, M_LWKTMSG);
100 }
101 
102 /*
103  * We must construct a custom putport function (which runs in the context
104  * of the message originator)
105  *
106  * Our custom putport must check for self-referential messages, which can
107  * occur when the so_upcall routine is called (e.g. nfs).  Self referential
108  * messages are executed synchronously.  However, we must panic if the message
109  * is not marked DONE on completion because the self-referential case cannot
110  * block without deadlocking.
111  *
112  * note: ms_target_port does not need to be set when returning a synchronous
113  * error code.
114  */
115 static int
116 netmsg_put_port(lwkt_port_t port, lwkt_msg_t lmsg)
117 {
118     int error;
119 
120     if ((lmsg->ms_flags & MSGF_ASYNC) == 0 && port->mp_td == curthread) {
121 	error = lmsg->ms_cmd.cm_func(lmsg);
122 	if (error == EASYNC && (lmsg->ms_flags & MSGF_DONE) == 0)
123 	    panic("netmsg_put_port: self-referential deadlock on netport");
124 	return(error);
125     } else {
126 	return(lwkt_default_putport(port, lmsg));
127     }
128 }
129 
130 /*
131  * UNIX DOMAIN sockets still have to run their uipc functions synchronously,
132  * because they depend on the user proc context for a number of things
133  * (like creds) which we have not yet incorporated into the message structure.
134  *
135  * However, we maintain or message/port abstraction.  Having a special
136  * synchronous port which runs the commands synchronously gives us the
137  * ability to serialize operations in one place later on when we start
138  * removing the BGL.
139  *
140  * We clear MSGF_DONE prior to executing the message in order to close
141  * any potential replymsg races with the flags field.  If a synchronous
142  * result code is returned we set MSGF_DONE again.  MSGF_DONE's flag state
143  * must be correct or the caller will be confused.
144  */
145 static int
146 netmsg_sync_putport(lwkt_port_t port, lwkt_msg_t lmsg)
147 {
148     int error;
149 
150     lmsg->ms_flags &= ~MSGF_DONE;
151     lmsg->ms_target_port = port;	/* required for abort */
152     error = lmsg->ms_cmd.cm_func(lmsg);
153     if (error == EASYNC)
154 	error = lwkt_waitmsg(lmsg);
155     else
156 	lmsg->ms_flags |= MSGF_DONE;
157     return(error);
158 }
159 
160 static void
161 netmsg_sync_abortport(lwkt_port_t port, lwkt_msg_t lmsg)
162 {
163     lmsg->ms_abort_port = lmsg->ms_reply_port;
164     lmsg->ms_flags |= MSGF_ABORTED;
165     lmsg->ms_abort.cm_func(lmsg);
166 }
167 
168 static void
169 netisr_init(void)
170 {
171     int i;
172 
173     TAILQ_INIT(&netreglist);
174 
175     /*
176      * Create default per-cpu threads for generic protocol handling.
177      */
178     for (i = 0; i < ncpus; ++i) {
179 	lwkt_create(netmsg_service_loop, NULL, NULL, &netisr_cpu[i], 0, i,
180 		    "netisr_cpu %d", i);
181 	netmsg_service_port_init(&netisr_cpu[i].td_msgport);
182     }
183 
184     /*
185      * The netisr_afree_rport is a special reply port which automatically
186      * frees the replied message.  The netisr_adone_rport() simply marks
187      * the message as being done.
188      */
189     lwkt_initport(&netisr_afree_rport, NULL);
190     netisr_afree_rport.mp_replyport = netisr_autofree_reply;
191     lwkt_initport_null_rport(&netisr_adone_rport, NULL);
192 
193     /*
194      * The netisr_syncport is a special port which executes the message
195      * synchronously and waits for it if EASYNC is returned.
196      */
197     lwkt_initport(&netisr_sync_port, NULL);
198     netisr_sync_port.mp_putport = netmsg_sync_putport;
199     netisr_sync_port.mp_abortport = netmsg_sync_abortport;
200 }
201 
202 SYSINIT(netisr, SI_SUB_PROTO_BEGIN, SI_ORDER_FIRST, netisr_init, NULL);
203 
204 /*
205  * Finish initializing the message port for a netmsg service.  This also
206  * registers the port for synchronous cleanup operations such as when an
207  * ifnet is being destroyed.  There is no deregistration API yet.
208  */
209 void
210 netmsg_service_port_init(lwkt_port_t port)
211 {
212     struct netmsg_port_registration *reg;
213 
214     /*
215      * Override the putport function.  Our custom function checks for
216      * self-references and executes such commands synchronously.
217      */
218     port->mp_putport = netmsg_put_port;
219 
220     /*
221      * Keep track of ports using the netmsg API so we can synchronize
222      * certain operations (such as freeing an ifnet structure) across all
223      * consumers.
224      */
225     reg = malloc(sizeof(*reg), M_TEMP, M_WAITOK|M_ZERO);
226     reg->npr_port = port;
227     TAILQ_INSERT_TAIL(&netreglist, reg, npr_entry);
228 }
229 
230 /*
231  * This function synchronizes the caller with all netmsg services.  For
232  * example, if an interface is being removed we must make sure that all
233  * packets related to that interface complete processing before the structure
234  * can actually be freed.  This sort of synchronization is an alternative to
235  * ref-counting the netif, removing the ref counting overhead in favor of
236  * placing additional overhead in the netif freeing sequence (where it is
237  * inconsequential).
238  */
239 void
240 netmsg_service_sync(void)
241 {
242     struct netmsg_port_registration *reg;
243     struct netmsg smsg;
244 
245     lwkt_initmsg(&smsg.nm_lmsg, &curthread->td_msgport, 0,
246 		lwkt_cmd_func((void *)netmsg_sync_func), lwkt_cmd_op_none);
247 
248     TAILQ_FOREACH(reg, &netreglist, npr_entry) {
249 	lwkt_domsg(reg->npr_port, &smsg.nm_lmsg);
250     }
251 }
252 
253 /*
254  * The netmsg function simply replies the message.  API semantics require
255  * EASYNC to be returned if the netmsg function disposes of the message.
256  */
257 static int
258 netmsg_sync_func(struct netmsg *msg)
259 {
260     lwkt_replymsg(&msg->nm_lmsg, 0);
261     return(EASYNC);
262 }
263 
264 /*
265  * Generic netmsg service loop.  Some protocols may roll their own but all
266  * must do the basic command dispatch function call done here.
267  */
268 void
269 netmsg_service_loop(void *arg)
270 {
271     struct netmsg *msg;
272 
273     while ((msg = lwkt_waitport(&curthread->td_msgport, NULL))) {
274 	msg->nm_lmsg.ms_cmd.cm_func(&msg->nm_lmsg);
275     }
276 }
277 
278 /*
279  * Call the netisr directly.
280  * Queueing may be done in the msg port layer at its discretion.
281  */
282 void
283 netisr_dispatch(int num, struct mbuf *m)
284 {
285     /* just queue it for now XXX JH */
286     netisr_queue(num, m);
287 }
288 
289 /*
290  * Same as netisr_dispatch(), but always queue.
291  * This is either used in places where we are not confident that
292  * direct dispatch is possible, or where queueing is required.
293  */
294 int
295 netisr_queue(int num, struct mbuf *m)
296 {
297     struct netisr *ni;
298     struct netmsg_packet *pmsg;
299     lwkt_port_t port;
300 
301     KASSERT((num > 0 && num <= (sizeof(netisrs)/sizeof(netisrs[0]))),
302 	("netisr_queue: bad isr %d", num));
303 
304     ni = &netisrs[num];
305     if (ni->ni_handler == NULL) {
306 	printf("netisr_queue: unregistered isr %d\n", num);
307 	return (EIO);
308     }
309 
310     if ((port = ni->ni_mport(&m)) == NULL)
311 	return (EIO);
312 
313     /* use better message allocation system with limits later XXX JH */
314     pmsg = malloc(sizeof(struct netmsg_packet), M_LWKTMSG, M_WAITOK);
315 
316     lwkt_initmsg(&pmsg->nm_lmsg, &netisr_afree_rport, 0,
317 		lwkt_cmd_func((void *)ni->ni_handler), lwkt_cmd_op_none);
318     pmsg->nm_packet = m;
319     pmsg->nm_lmsg.u.ms_result = num;
320     lwkt_sendmsg(port, &pmsg->nm_lmsg);
321     return (0);
322 }
323 
324 void
325 netisr_register(int num, lwkt_portfn_t mportfn, netisr_fn_t handler)
326 {
327     KASSERT((num > 0 && num <= (sizeof(netisrs)/sizeof(netisrs[0]))),
328 	("netisr_register: bad isr %d", num));
329     lwkt_initmsg(&netisrs[num].ni_netmsg.nm_lmsg, &netisr_adone_rport, 0,
330 	    lwkt_cmd_op_none, lwkt_cmd_op_none);
331     netisrs[num].ni_mport = mportfn;
332     netisrs[num].ni_handler = handler;
333 }
334 
335 int
336 netisr_unregister(int num)
337 {
338     KASSERT((num > 0 && num <= (sizeof(netisrs)/sizeof(netisrs[0]))),
339 	("unregister_netisr: bad isr number: %d\n", num));
340 
341     /* XXX JH */
342     return (0);
343 }
344 
345 /*
346  * Return message port for default handler thread on CPU 0.
347  */
348 lwkt_port_t
349 cpu0_portfn(struct mbuf **mptr)
350 {
351     return (&netisr_cpu[0].td_msgport);
352 }
353 
354 lwkt_port_t
355 cpu_portfn(int cpu)
356 {
357     return (&netisr_cpu[cpu].td_msgport);
358 }
359 
360 /* ARGSUSED */
361 lwkt_port_t
362 cpu0_soport(struct socket *so __unused, struct sockaddr *nam __unused,
363 	    int req __unused)
364 {
365     return (&netisr_cpu[0].td_msgport);
366 }
367 
368 lwkt_port_t
369 sync_soport(struct socket *so __unused, struct sockaddr *nam __unused,
370 	    int req __unused)
371 {
372     return (&netisr_sync_port);
373 }
374 
375 /*
376  * schednetisr() is used to call the netisr handler from the appropriate
377  * netisr thread for polling and other purposes.
378  *
379  * This function may be called from a hard interrupt or IPI and must be
380  * MP SAFE and non-blocking.  We use a fixed per-cpu message instead of
381  * trying to allocate one.  We must get ourselves onto the target cpu
382  * to safely check the MSGF_DONE bit on the message but since the message
383  * will be sent to that cpu anyway this does not add any extra work beyond
384  * what lwkt_sendmsg() would have already had to do to schedule the target
385  * thread.
386  */
387 static void
388 schednetisr_remote(void *data)
389 {
390     int num = (int)data;
391     struct netisr *ni = &netisrs[num];
392     lwkt_port_t port = &netisr_cpu[0].td_msgport;
393     struct netmsg *pmsg;
394 
395     pmsg = &netisrs[num].ni_netmsg;
396     crit_enter();
397     if (pmsg->nm_lmsg.ms_flags & MSGF_DONE) {
398 	lwkt_initmsg(&pmsg->nm_lmsg, &netisr_adone_rport, 0,
399 		    lwkt_cmd_func((void *)ni->ni_handler), lwkt_cmd_op_none);
400 	pmsg->nm_lmsg.u.ms_result = num;
401 	lwkt_sendmsg(port, &pmsg->nm_lmsg);
402     }
403     crit_exit();
404 }
405 
406 void
407 schednetisr(int num)
408 {
409     KASSERT((num > 0 && num <= (sizeof(netisrs)/sizeof(netisrs[0]))),
410 	("schednetisr: bad isr %d", num));
411 #ifdef SMP
412     if (mycpu->gd_cpuid != 0)
413 	lwkt_send_ipiq(globaldata_find(0), schednetisr_remote, (void *)num);
414     else
415 	schednetisr_remote((void *)num);
416 #else
417     schednetisr_remote((void *)num);
418 #endif
419 }
420 
421