1 /* 2 * Copyright (c) 2003, 2004 Matthew Dillon. All rights reserved. 3 * Copyright (c) 2003, 2004 Jeffrey M. Hsu. All rights reserved. 4 * Copyright (c) 2003 Jonathan Lemon. All rights reserved. 5 * Copyright (c) 2003, 2004 The DragonFly Project. All rights reserved. 6 * 7 * This code is derived from software contributed to The DragonFly Project 8 * by Jonathan Lemon, Jeffrey M. Hsu, and Matthew Dillon. 9 * 10 * Jonathan Lemon gave Jeffrey Hsu permission to combine his copyright 11 * into this one around July 8 2004. 12 * 13 * Redistribution and use in source and binary forms, with or without 14 * modification, are permitted provided that the following conditions 15 * are met: 16 * 1. Redistributions of source code must retain the above copyright 17 * notice, this list of conditions and the following disclaimer. 18 * 2. Redistributions in binary form must reproduce the above copyright 19 * notice, this list of conditions and the following disclaimer in the 20 * documentation and/or other materials provided with the distribution. 21 * 3. Neither the name of The DragonFly Project nor the names of its 22 * contributors may be used to endorse or promote products derived 23 * from this software without specific, prior written permission. 24 * 25 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS 26 * ``AS IS'' AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT 27 * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS 28 * FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE 29 * COPYRIGHT HOLDERS OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, 30 * INCIDENTAL, SPECIAL, EXEMPLARY OR CONSEQUENTIAL DAMAGES (INCLUDING, 31 * BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; 32 * LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED 33 * AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, 34 * OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT 35 * OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF 36 * SUCH DAMAGE. 37 * 38 * $DragonFly: src/sys/net/netisr.c,v 1.29 2006/12/22 23:44:54 swildner Exp $ 39 */ 40 41 /* 42 * Copyright (c) 2003, 2004 Jeffrey M. Hsu. All rights reserved. 43 * 44 * License terms: all terms for the DragonFly license above plus the following: 45 * 46 * 4. All advertising materials mentioning features or use of this software 47 * must display the following acknowledgement: 48 * 49 * This product includes software developed by Jeffrey M. Hsu 50 * for the DragonFly Project. 51 * 52 * This requirement may be waived with permission from Jeffrey Hsu. 53 * This requirement will sunset and may be removed on July 8 2005, 54 * after which the standard DragonFly license (as shown above) will 55 * apply. 56 */ 57 58 #include <sys/param.h> 59 #include <sys/systm.h> 60 #include <sys/kernel.h> 61 #include <sys/malloc.h> 62 #include <sys/msgport.h> 63 #include <sys/proc.h> 64 #include <sys/interrupt.h> 65 #include <sys/socket.h> 66 #include <sys/sysctl.h> 67 #include <net/if.h> 68 #include <net/if_var.h> 69 #include <net/netisr.h> 70 #include <machine/cpufunc.h> 71 72 #include <sys/thread2.h> 73 #include <sys/msgport2.h> 74 75 static int netmsg_sync_func(struct netmsg *msg); 76 77 struct netmsg_port_registration { 78 TAILQ_ENTRY(netmsg_port_registration) npr_entry; 79 lwkt_port_t npr_port; 80 }; 81 82 static struct netisr netisrs[NETISR_MAX]; 83 static TAILQ_HEAD(,netmsg_port_registration) netreglist; 84 85 /* Per-CPU thread to handle any protocol. */ 86 struct thread netisr_cpu[MAXCPU]; 87 lwkt_port netisr_afree_rport; 88 lwkt_port netisr_adone_rport; 89 lwkt_port netisr_apanic_rport; 90 lwkt_port netisr_sync_port; 91 92 /* 93 * netisr_afree_rport replymsg function, only used to handle async 94 * messages which the sender has abandoned to their fate. 95 */ 96 static void 97 netisr_autofree_reply(lwkt_port_t port, lwkt_msg_t msg) 98 { 99 kfree(msg, M_LWKTMSG); 100 } 101 102 static void 103 netisr_autopanic_reply(lwkt_port_t port, lwkt_msg_t msg) 104 { 105 panic("unreplyable msg %p was replied!", msg); 106 } 107 108 /* 109 * We must construct a custom putport function (which runs in the context 110 * of the message originator) 111 * 112 * Our custom putport must check for self-referential messages, which can 113 * occur when the so_upcall routine is called (e.g. nfs). Self referential 114 * messages are executed synchronously. However, we must panic if the message 115 * is not marked DONE on completion because the self-referential case cannot 116 * block without deadlocking. 117 * 118 * note: ms_target_port does not need to be set when returning a synchronous 119 * error code. 120 */ 121 static int 122 netmsg_put_port(lwkt_port_t port, lwkt_msg_t lmsg) 123 { 124 int error; 125 126 if ((lmsg->ms_flags & MSGF_ASYNC) == 0 && port->mp_td == curthread) { 127 error = lmsg->ms_cmd.cm_func(lmsg); 128 if (error == EASYNC && (lmsg->ms_flags & MSGF_DONE) == 0) 129 panic("netmsg_put_port: self-referential deadlock on netport"); 130 return(error); 131 } else { 132 return(lwkt_default_putport(port, lmsg)); 133 } 134 } 135 136 /* 137 * UNIX DOMAIN sockets still have to run their uipc functions synchronously, 138 * because they depend on the user proc context for a number of things 139 * (like creds) which we have not yet incorporated into the message structure. 140 * 141 * However, we maintain or message/port abstraction. Having a special 142 * synchronous port which runs the commands synchronously gives us the 143 * ability to serialize operations in one place later on when we start 144 * removing the BGL. 145 * 146 * We clear MSGF_DONE prior to executing the message in order to close 147 * any potential replymsg races with the flags field. If a synchronous 148 * result code is returned we set MSGF_DONE again. MSGF_DONE's flag state 149 * must be correct or the caller will be confused. 150 */ 151 static int 152 netmsg_sync_putport(lwkt_port_t port, lwkt_msg_t lmsg) 153 { 154 int error; 155 156 lmsg->ms_flags &= ~MSGF_DONE; 157 lmsg->ms_target_port = port; /* required for abort */ 158 error = lmsg->ms_cmd.cm_func(lmsg); 159 if (error == EASYNC) 160 error = lwkt_waitmsg(lmsg); 161 else 162 lmsg->ms_flags |= MSGF_DONE; 163 return(error); 164 } 165 166 static void 167 netmsg_sync_abortport(lwkt_port_t port, lwkt_msg_t lmsg) 168 { 169 lmsg->ms_abort_port = lmsg->ms_reply_port; 170 lmsg->ms_flags |= MSGF_ABORTED; 171 lmsg->ms_abort.cm_func(lmsg); 172 } 173 174 static void 175 netisr_init(void) 176 { 177 int i; 178 179 TAILQ_INIT(&netreglist); 180 181 /* 182 * Create default per-cpu threads for generic protocol handling. 183 */ 184 for (i = 0; i < ncpus; ++i) { 185 lwkt_create(netmsg_service_loop, NULL, NULL, &netisr_cpu[i], 0, i, 186 "netisr_cpu %d", i); 187 netmsg_service_port_init(&netisr_cpu[i].td_msgport); 188 } 189 190 /* 191 * The netisr_afree_rport is a special reply port which automatically 192 * frees the replied message. The netisr_adone_rport simply marks 193 * the message as being done. The netisr_apanic_rport panics if 194 * the message is replied to. 195 */ 196 lwkt_initport(&netisr_afree_rport, NULL); 197 netisr_afree_rport.mp_replyport = netisr_autofree_reply; 198 lwkt_initport_null_rport(&netisr_adone_rport, NULL); 199 lwkt_initport(&netisr_apanic_rport, NULL); 200 netisr_apanic_rport.mp_replyport = netisr_autopanic_reply; 201 202 /* 203 * The netisr_syncport is a special port which executes the message 204 * synchronously and waits for it if EASYNC is returned. 205 */ 206 lwkt_initport(&netisr_sync_port, NULL); 207 netisr_sync_port.mp_putport = netmsg_sync_putport; 208 netisr_sync_port.mp_abortport = netmsg_sync_abortport; 209 } 210 211 SYSINIT(netisr, SI_SUB_PROTO_BEGIN, SI_ORDER_FIRST, netisr_init, NULL); 212 213 /* 214 * Finish initializing the message port for a netmsg service. This also 215 * registers the port for synchronous cleanup operations such as when an 216 * ifnet is being destroyed. There is no deregistration API yet. 217 */ 218 void 219 netmsg_service_port_init(lwkt_port_t port) 220 { 221 struct netmsg_port_registration *reg; 222 223 /* 224 * Override the putport function. Our custom function checks for 225 * self-references and executes such commands synchronously. 226 */ 227 port->mp_putport = netmsg_put_port; 228 229 /* 230 * Keep track of ports using the netmsg API so we can synchronize 231 * certain operations (such as freeing an ifnet structure) across all 232 * consumers. 233 */ 234 reg = kmalloc(sizeof(*reg), M_TEMP, M_WAITOK|M_ZERO); 235 reg->npr_port = port; 236 TAILQ_INSERT_TAIL(&netreglist, reg, npr_entry); 237 } 238 239 /* 240 * This function synchronizes the caller with all netmsg services. For 241 * example, if an interface is being removed we must make sure that all 242 * packets related to that interface complete processing before the structure 243 * can actually be freed. This sort of synchronization is an alternative to 244 * ref-counting the netif, removing the ref counting overhead in favor of 245 * placing additional overhead in the netif freeing sequence (where it is 246 * inconsequential). 247 */ 248 void 249 netmsg_service_sync(void) 250 { 251 struct netmsg_port_registration *reg; 252 struct netmsg smsg; 253 254 lwkt_initmsg(&smsg.nm_lmsg, &curthread->td_msgport, 0, 255 lwkt_cmd_func((void *)netmsg_sync_func), lwkt_cmd_op_none); 256 257 TAILQ_FOREACH(reg, &netreglist, npr_entry) { 258 lwkt_domsg(reg->npr_port, &smsg.nm_lmsg); 259 } 260 } 261 262 /* 263 * The netmsg function simply replies the message. API semantics require 264 * EASYNC to be returned if the netmsg function disposes of the message. 265 */ 266 static int 267 netmsg_sync_func(struct netmsg *msg) 268 { 269 lwkt_replymsg(&msg->nm_lmsg, 0); 270 return(EASYNC); 271 } 272 273 /* 274 * Generic netmsg service loop. Some protocols may roll their own but all 275 * must do the basic command dispatch function call done here. 276 */ 277 void 278 netmsg_service_loop(void *arg) 279 { 280 struct netmsg *msg; 281 282 while ((msg = lwkt_waitport(&curthread->td_msgport, NULL))) { 283 msg->nm_lmsg.ms_cmd.cm_func(&msg->nm_lmsg); 284 } 285 } 286 287 /* 288 * Call the netisr directly. 289 * Queueing may be done in the msg port layer at its discretion. 290 */ 291 void 292 netisr_dispatch(int num, struct mbuf *m) 293 { 294 /* just queue it for now XXX JH */ 295 netisr_queue(num, m); 296 } 297 298 /* 299 * Same as netisr_dispatch(), but always queue. 300 * This is either used in places where we are not confident that 301 * direct dispatch is possible, or where queueing is required. 302 */ 303 int 304 netisr_queue(int num, struct mbuf *m) 305 { 306 struct netisr *ni; 307 struct netmsg_packet *pmsg; 308 lwkt_port_t port; 309 310 KASSERT((num > 0 && num <= (sizeof(netisrs)/sizeof(netisrs[0]))), 311 ("netisr_queue: bad isr %d", num)); 312 313 ni = &netisrs[num]; 314 if (ni->ni_handler == NULL) { 315 kprintf("netisr_queue: unregistered isr %d\n", num); 316 return (EIO); 317 } 318 319 if ((port = ni->ni_mport(&m)) == NULL) 320 return (EIO); 321 322 pmsg = &m->m_hdr.mh_netmsg; 323 324 lwkt_initmsg(&pmsg->nm_lmsg, &netisr_apanic_rport, 0, 325 lwkt_cmd_func((void *)ni->ni_handler), lwkt_cmd_op_none); 326 pmsg->nm_packet = m; 327 pmsg->nm_lmsg.u.ms_result = num; 328 lwkt_sendmsg(port, &pmsg->nm_lmsg); 329 return (0); 330 } 331 332 void 333 netisr_register(int num, lwkt_portfn_t mportfn, netisr_fn_t handler) 334 { 335 KASSERT((num > 0 && num <= (sizeof(netisrs)/sizeof(netisrs[0]))), 336 ("netisr_register: bad isr %d", num)); 337 lwkt_initmsg(&netisrs[num].ni_netmsg.nm_lmsg, &netisr_adone_rport, 0, 338 lwkt_cmd_op_none, lwkt_cmd_op_none); 339 netisrs[num].ni_mport = mportfn; 340 netisrs[num].ni_handler = handler; 341 } 342 343 int 344 netisr_unregister(int num) 345 { 346 KASSERT((num > 0 && num <= (sizeof(netisrs)/sizeof(netisrs[0]))), 347 ("unregister_netisr: bad isr number: %d\n", num)); 348 349 /* XXX JH */ 350 return (0); 351 } 352 353 /* 354 * Return message port for default handler thread on CPU 0. 355 */ 356 lwkt_port_t 357 cpu0_portfn(struct mbuf **mptr) 358 { 359 return (&netisr_cpu[0].td_msgport); 360 } 361 362 lwkt_port_t 363 cpu_portfn(int cpu) 364 { 365 return (&netisr_cpu[cpu].td_msgport); 366 } 367 368 /* ARGSUSED */ 369 lwkt_port_t 370 cpu0_soport(struct socket *so __unused, struct sockaddr *nam __unused, 371 int req __unused) 372 { 373 return (&netisr_cpu[0].td_msgport); 374 } 375 376 lwkt_port_t 377 sync_soport(struct socket *so __unused, struct sockaddr *nam __unused, 378 int req __unused) 379 { 380 return (&netisr_sync_port); 381 } 382 383 /* 384 * schednetisr() is used to call the netisr handler from the appropriate 385 * netisr thread for polling and other purposes. 386 * 387 * This function may be called from a hard interrupt or IPI and must be 388 * MP SAFE and non-blocking. We use a fixed per-cpu message instead of 389 * trying to allocate one. We must get ourselves onto the target cpu 390 * to safely check the MSGF_DONE bit on the message but since the message 391 * will be sent to that cpu anyway this does not add any extra work beyond 392 * what lwkt_sendmsg() would have already had to do to schedule the target 393 * thread. 394 */ 395 static void 396 schednetisr_remote(void *data) 397 { 398 int num = (int)data; 399 struct netisr *ni = &netisrs[num]; 400 lwkt_port_t port = &netisr_cpu[0].td_msgport; 401 struct netmsg *pmsg; 402 403 pmsg = &netisrs[num].ni_netmsg; 404 crit_enter(); 405 if (pmsg->nm_lmsg.ms_flags & MSGF_DONE) { 406 lwkt_initmsg(&pmsg->nm_lmsg, &netisr_adone_rport, 0, 407 lwkt_cmd_func((void *)ni->ni_handler), lwkt_cmd_op_none); 408 pmsg->nm_lmsg.u.ms_result = num; 409 lwkt_sendmsg(port, &pmsg->nm_lmsg); 410 } 411 crit_exit(); 412 } 413 414 void 415 schednetisr(int num) 416 { 417 KASSERT((num > 0 && num <= (sizeof(netisrs)/sizeof(netisrs[0]))), 418 ("schednetisr: bad isr %d", num)); 419 #ifdef SMP 420 if (mycpu->gd_cpuid != 0) 421 lwkt_send_ipiq(globaldata_find(0), schednetisr_remote, (void *)num); 422 else 423 schednetisr_remote((void *)num); 424 #else 425 schednetisr_remote((void *)num); 426 #endif 427 } 428 429