1 /*
2 * Copyright (c) 1984, 1985, 1986, 1987, 1993
3 * The Regents of the University of California. All rights reserved.
4 *
5 * %sccs.include.redist.c%
6 *
7 * @(#)spp_usrreq.c 8.2 (Berkeley) 01/09/95
8 */
9
10 #include <sys/param.h>
11 #include <sys/systm.h>
12 #include <sys/malloc.h>
13 #include <sys/mbuf.h>
14 #include <sys/protosw.h>
15 #include <sys/socket.h>
16 #include <sys/socketvar.h>
17 #include <sys/errno.h>
18
19 #include <net/if.h>
20 #include <net/route.h>
21 #include <netinet/tcp_fsm.h>
22
23 #include <netns/ns.h>
24 #include <netns/ns_pcb.h>
25 #include <netns/idp.h>
26 #include <netns/idp_var.h>
27 #include <netns/ns_error.h>
28 #include <netns/sp.h>
29 #include <netns/spidp.h>
30 #include <netns/spp_timer.h>
31 #include <netns/spp_var.h>
32 #include <netns/spp_debug.h>
33
34 /*
35 * SP protocol implementation.
36 */
spp_init()37 spp_init()
38 {
39
40 spp_iss = 1; /* WRONG !! should fish it out of TODR */
41 }
42 struct spidp spp_savesi;
43 int traceallspps = 0;
44 extern int sppconsdebug;
45 int spp_hardnosed;
46 int spp_use_delack = 0;
47 u_short spp_newchecks[50];
48
49 /*ARGSUSED*/
spp_input(m,nsp)50 spp_input(m, nsp)
51 register struct mbuf *m;
52 register struct nspcb *nsp;
53 {
54 register struct sppcb *cb;
55 register struct spidp *si = mtod(m, struct spidp *);
56 register struct socket *so;
57 short ostate;
58 int dropsocket = 0;
59
60
61 sppstat.spps_rcvtotal++;
62 if (nsp == 0) {
63 panic("No nspcb in spp_input\n");
64 return;
65 }
66
67 cb = nstosppcb(nsp);
68 if (cb == 0) goto bad;
69
70 if (m->m_len < sizeof(*si)) {
71 if ((m = m_pullup(m, sizeof(*si))) == 0) {
72 sppstat.spps_rcvshort++;
73 return;
74 }
75 si = mtod(m, struct spidp *);
76 }
77 si->si_seq = ntohs(si->si_seq);
78 si->si_ack = ntohs(si->si_ack);
79 si->si_alo = ntohs(si->si_alo);
80
81 so = nsp->nsp_socket;
82 if (so->so_options & SO_DEBUG || traceallspps) {
83 ostate = cb->s_state;
84 spp_savesi = *si;
85 }
86 if (so->so_options & SO_ACCEPTCONN) {
87 struct sppcb *ocb = cb;
88
89 so = sonewconn(so, 0);
90 if (so == 0) {
91 goto drop;
92 }
93 /*
94 * This is ugly, but ....
95 *
96 * Mark socket as temporary until we're
97 * committed to keeping it. The code at
98 * ``drop'' and ``dropwithreset'' check the
99 * flag dropsocket to see if the temporary
100 * socket created here should be discarded.
101 * We mark the socket as discardable until
102 * we're committed to it below in TCPS_LISTEN.
103 */
104 dropsocket++;
105 nsp = (struct nspcb *)so->so_pcb;
106 nsp->nsp_laddr = si->si_dna;
107 cb = nstosppcb(nsp);
108 cb->s_mtu = ocb->s_mtu; /* preserve sockopts */
109 cb->s_flags = ocb->s_flags; /* preserve sockopts */
110 cb->s_flags2 = ocb->s_flags2; /* preserve sockopts */
111 cb->s_state = TCPS_LISTEN;
112 }
113
114 /*
115 * Packet received on connection.
116 * reset idle time and keep-alive timer;
117 */
118 cb->s_idle = 0;
119 cb->s_timer[SPPT_KEEP] = SPPTV_KEEP;
120
121 switch (cb->s_state) {
122
123 case TCPS_LISTEN:{
124 struct mbuf *am;
125 register struct sockaddr_ns *sns;
126 struct ns_addr laddr;
127
128 /*
129 * If somebody here was carying on a conversation
130 * and went away, and his pen pal thinks he can
131 * still talk, we get the misdirected packet.
132 */
133 if (spp_hardnosed && (si->si_did != 0 || si->si_seq != 0)) {
134 spp_istat.gonawy++;
135 goto dropwithreset;
136 }
137 am = m_get(M_DONTWAIT, MT_SONAME);
138 if (am == NULL)
139 goto drop;
140 am->m_len = sizeof (struct sockaddr_ns);
141 sns = mtod(am, struct sockaddr_ns *);
142 sns->sns_len = sizeof(*sns);
143 sns->sns_family = AF_NS;
144 sns->sns_addr = si->si_sna;
145 laddr = nsp->nsp_laddr;
146 if (ns_nullhost(laddr))
147 nsp->nsp_laddr = si->si_dna;
148 if (ns_pcbconnect(nsp, am)) {
149 nsp->nsp_laddr = laddr;
150 (void) m_free(am);
151 spp_istat.noconn++;
152 goto drop;
153 }
154 (void) m_free(am);
155 spp_template(cb);
156 dropsocket = 0; /* committed to socket */
157 cb->s_did = si->si_sid;
158 cb->s_rack = si->si_ack;
159 cb->s_ralo = si->si_alo;
160 #define THREEWAYSHAKE
161 #ifdef THREEWAYSHAKE
162 cb->s_state = TCPS_SYN_RECEIVED;
163 cb->s_force = 1 + SPPT_KEEP;
164 sppstat.spps_accepts++;
165 cb->s_timer[SPPT_KEEP] = SPPTV_KEEP;
166 }
167 break;
168 /*
169 * This state means that we have heard a response
170 * to our acceptance of their connection
171 * It is probably logically unnecessary in this
172 * implementation.
173 */
174 case TCPS_SYN_RECEIVED: {
175 if (si->si_did!=cb->s_sid) {
176 spp_istat.wrncon++;
177 goto drop;
178 }
179 #endif
180 nsp->nsp_fport = si->si_sport;
181 cb->s_timer[SPPT_REXMT] = 0;
182 cb->s_timer[SPPT_KEEP] = SPPTV_KEEP;
183 soisconnected(so);
184 cb->s_state = TCPS_ESTABLISHED;
185 sppstat.spps_accepts++;
186 }
187 break;
188
189 /*
190 * This state means that we have gotten a response
191 * to our attempt to establish a connection.
192 * We fill in the data from the other side,
193 * telling us which port to respond to, instead of the well-
194 * known one we might have sent to in the first place.
195 * We also require that this is a response to our
196 * connection id.
197 */
198 case TCPS_SYN_SENT:
199 if (si->si_did!=cb->s_sid) {
200 spp_istat.notme++;
201 goto drop;
202 }
203 sppstat.spps_connects++;
204 cb->s_did = si->si_sid;
205 cb->s_rack = si->si_ack;
206 cb->s_ralo = si->si_alo;
207 cb->s_dport = nsp->nsp_fport = si->si_sport;
208 cb->s_timer[SPPT_REXMT] = 0;
209 cb->s_flags |= SF_ACKNOW;
210 soisconnected(so);
211 cb->s_state = TCPS_ESTABLISHED;
212 /* Use roundtrip time of connection request for initial rtt */
213 if (cb->s_rtt) {
214 cb->s_srtt = cb->s_rtt << 3;
215 cb->s_rttvar = cb->s_rtt << 1;
216 SPPT_RANGESET(cb->s_rxtcur,
217 ((cb->s_srtt >> 2) + cb->s_rttvar) >> 1,
218 SPPTV_MIN, SPPTV_REXMTMAX);
219 cb->s_rtt = 0;
220 }
221 }
222 if (so->so_options & SO_DEBUG || traceallspps)
223 spp_trace(SA_INPUT, (u_char)ostate, cb, &spp_savesi, 0);
224
225 m->m_len -= sizeof (struct idp);
226 m->m_pkthdr.len -= sizeof (struct idp);
227 m->m_data += sizeof (struct idp);
228
229 if (spp_reass(cb, si)) {
230 (void) m_freem(m);
231 }
232 if (cb->s_force || (cb->s_flags & (SF_ACKNOW|SF_WIN|SF_RXT)))
233 (void) spp_output(cb, (struct mbuf *)0);
234 cb->s_flags &= ~(SF_WIN|SF_RXT);
235 return;
236
237 dropwithreset:
238 if (dropsocket)
239 (void) soabort(so);
240 si->si_seq = ntohs(si->si_seq);
241 si->si_ack = ntohs(si->si_ack);
242 si->si_alo = ntohs(si->si_alo);
243 ns_error(dtom(si), NS_ERR_NOSOCK, 0);
244 if (cb->s_nspcb->nsp_socket->so_options & SO_DEBUG || traceallspps)
245 spp_trace(SA_DROP, (u_char)ostate, cb, &spp_savesi, 0);
246 return;
247
248 drop:
249 bad:
250 if (cb == 0 || cb->s_nspcb->nsp_socket->so_options & SO_DEBUG ||
251 traceallspps)
252 spp_trace(SA_DROP, (u_char)ostate, cb, &spp_savesi, 0);
253 m_freem(m);
254 }
255
256 int spprexmtthresh = 3;
257
258 /*
259 * This is structurally similar to the tcp reassembly routine
260 * but its function is somewhat different: It merely queues
261 * packets up, and suppresses duplicates.
262 */
spp_reass(cb,si)263 spp_reass(cb, si)
264 register struct sppcb *cb;
265 register struct spidp *si;
266 {
267 register struct spidp_q *q;
268 register struct mbuf *m;
269 register struct socket *so = cb->s_nspcb->nsp_socket;
270 char packetp = cb->s_flags & SF_HI;
271 int incr;
272 char wakeup = 0;
273
274 if (si == SI(0))
275 goto present;
276 /*
277 * Update our news from them.
278 */
279 if (si->si_cc & SP_SA)
280 cb->s_flags |= (spp_use_delack ? SF_DELACK : SF_ACKNOW);
281 if (SSEQ_GT(si->si_alo, cb->s_ralo))
282 cb->s_flags |= SF_WIN;
283 if (SSEQ_LEQ(si->si_ack, cb->s_rack)) {
284 if ((si->si_cc & SP_SP) && cb->s_rack != (cb->s_smax + 1)) {
285 sppstat.spps_rcvdupack++;
286 /*
287 * If this is a completely duplicate ack
288 * and other conditions hold, we assume
289 * a packet has been dropped and retransmit
290 * it exactly as in tcp_input().
291 */
292 if (si->si_ack != cb->s_rack ||
293 si->si_alo != cb->s_ralo)
294 cb->s_dupacks = 0;
295 else if (++cb->s_dupacks == spprexmtthresh) {
296 u_short onxt = cb->s_snxt;
297 int cwnd = cb->s_cwnd;
298
299 cb->s_snxt = si->si_ack;
300 cb->s_cwnd = CUNIT;
301 cb->s_force = 1 + SPPT_REXMT;
302 (void) spp_output(cb, (struct mbuf *)0);
303 cb->s_timer[SPPT_REXMT] = cb->s_rxtcur;
304 cb->s_rtt = 0;
305 if (cwnd >= 4 * CUNIT)
306 cb->s_cwnd = cwnd / 2;
307 if (SSEQ_GT(onxt, cb->s_snxt))
308 cb->s_snxt = onxt;
309 return (1);
310 }
311 } else
312 cb->s_dupacks = 0;
313 goto update_window;
314 }
315 cb->s_dupacks = 0;
316 /*
317 * If our correspondent acknowledges data we haven't sent
318 * TCP would drop the packet after acking. We'll be a little
319 * more permissive
320 */
321 if (SSEQ_GT(si->si_ack, (cb->s_smax + 1))) {
322 sppstat.spps_rcvacktoomuch++;
323 si->si_ack = cb->s_smax + 1;
324 }
325 sppstat.spps_rcvackpack++;
326 /*
327 * If transmit timer is running and timed sequence
328 * number was acked, update smoothed round trip time.
329 * See discussion of algorithm in tcp_input.c
330 */
331 if (cb->s_rtt && SSEQ_GT(si->si_ack, cb->s_rtseq)) {
332 sppstat.spps_rttupdated++;
333 if (cb->s_srtt != 0) {
334 register short delta;
335 delta = cb->s_rtt - (cb->s_srtt >> 3);
336 if ((cb->s_srtt += delta) <= 0)
337 cb->s_srtt = 1;
338 if (delta < 0)
339 delta = -delta;
340 delta -= (cb->s_rttvar >> 2);
341 if ((cb->s_rttvar += delta) <= 0)
342 cb->s_rttvar = 1;
343 } else {
344 /*
345 * No rtt measurement yet
346 */
347 cb->s_srtt = cb->s_rtt << 3;
348 cb->s_rttvar = cb->s_rtt << 1;
349 }
350 cb->s_rtt = 0;
351 cb->s_rxtshift = 0;
352 SPPT_RANGESET(cb->s_rxtcur,
353 ((cb->s_srtt >> 2) + cb->s_rttvar) >> 1,
354 SPPTV_MIN, SPPTV_REXMTMAX);
355 }
356 /*
357 * If all outstanding data is acked, stop retransmit
358 * timer and remember to restart (more output or persist).
359 * If there is more data to be acked, restart retransmit
360 * timer, using current (possibly backed-off) value;
361 */
362 if (si->si_ack == cb->s_smax + 1) {
363 cb->s_timer[SPPT_REXMT] = 0;
364 cb->s_flags |= SF_RXT;
365 } else if (cb->s_timer[SPPT_PERSIST] == 0)
366 cb->s_timer[SPPT_REXMT] = cb->s_rxtcur;
367 /*
368 * When new data is acked, open the congestion window.
369 * If the window gives us less than ssthresh packets
370 * in flight, open exponentially (maxseg at a time).
371 * Otherwise open linearly (maxseg^2 / cwnd at a time).
372 */
373 incr = CUNIT;
374 if (cb->s_cwnd > cb->s_ssthresh)
375 incr = max(incr * incr / cb->s_cwnd, 1);
376 cb->s_cwnd = min(cb->s_cwnd + incr, cb->s_cwmx);
377 /*
378 * Trim Acked data from output queue.
379 */
380 while ((m = so->so_snd.sb_mb) != NULL) {
381 if (SSEQ_LT((mtod(m, struct spidp *))->si_seq, si->si_ack))
382 sbdroprecord(&so->so_snd);
383 else
384 break;
385 }
386 sowwakeup(so);
387 cb->s_rack = si->si_ack;
388 update_window:
389 if (SSEQ_LT(cb->s_snxt, cb->s_rack))
390 cb->s_snxt = cb->s_rack;
391 if (SSEQ_LT(cb->s_swl1, si->si_seq) || cb->s_swl1 == si->si_seq &&
392 (SSEQ_LT(cb->s_swl2, si->si_ack) ||
393 cb->s_swl2 == si->si_ack && SSEQ_LT(cb->s_ralo, si->si_alo))) {
394 /* keep track of pure window updates */
395 if ((si->si_cc & SP_SP) && cb->s_swl2 == si->si_ack
396 && SSEQ_LT(cb->s_ralo, si->si_alo)) {
397 sppstat.spps_rcvwinupd++;
398 sppstat.spps_rcvdupack--;
399 }
400 cb->s_ralo = si->si_alo;
401 cb->s_swl1 = si->si_seq;
402 cb->s_swl2 = si->si_ack;
403 cb->s_swnd = (1 + si->si_alo - si->si_ack);
404 if (cb->s_swnd > cb->s_smxw)
405 cb->s_smxw = cb->s_swnd;
406 cb->s_flags |= SF_WIN;
407 }
408 /*
409 * If this packet number is higher than that which
410 * we have allocated refuse it, unless urgent
411 */
412 if (SSEQ_GT(si->si_seq, cb->s_alo)) {
413 if (si->si_cc & SP_SP) {
414 sppstat.spps_rcvwinprobe++;
415 return (1);
416 } else
417 sppstat.spps_rcvpackafterwin++;
418 if (si->si_cc & SP_OB) {
419 if (SSEQ_GT(si->si_seq, cb->s_alo + 60)) {
420 ns_error(dtom(si), NS_ERR_FULLUP, 0);
421 return (0);
422 } /* else queue this packet; */
423 } else {
424 /*register struct socket *so = cb->s_nspcb->nsp_socket;
425 if (so->so_state && SS_NOFDREF) {
426 ns_error(dtom(si), NS_ERR_NOSOCK, 0);
427 (void)spp_close(cb);
428 } else
429 would crash system*/
430 spp_istat.notyet++;
431 ns_error(dtom(si), NS_ERR_FULLUP, 0);
432 return (0);
433 }
434 }
435 /*
436 * If this is a system packet, we don't need to
437 * queue it up, and won't update acknowledge #
438 */
439 if (si->si_cc & SP_SP) {
440 return (1);
441 }
442 /*
443 * We have already seen this packet, so drop.
444 */
445 if (SSEQ_LT(si->si_seq, cb->s_ack)) {
446 spp_istat.bdreas++;
447 sppstat.spps_rcvduppack++;
448 if (si->si_seq == cb->s_ack - 1)
449 spp_istat.lstdup++;
450 return (1);
451 }
452 /*
453 * Loop through all packets queued up to insert in
454 * appropriate sequence.
455 */
456 for (q = cb->s_q.si_next; q!=&cb->s_q; q = q->si_next) {
457 if (si->si_seq == SI(q)->si_seq) {
458 sppstat.spps_rcvduppack++;
459 return (1);
460 }
461 if (SSEQ_LT(si->si_seq, SI(q)->si_seq)) {
462 sppstat.spps_rcvoopack++;
463 break;
464 }
465 }
466 insque(si, q->si_prev);
467 /*
468 * If this packet is urgent, inform process
469 */
470 if (si->si_cc & SP_OB) {
471 cb->s_iobc = ((char *)si)[1 + sizeof(*si)];
472 sohasoutofband(so);
473 cb->s_oobflags |= SF_IOOB;
474 }
475 present:
476 #define SPINC sizeof(struct sphdr)
477 /*
478 * Loop through all packets queued up to update acknowledge
479 * number, and present all acknowledged data to user;
480 * If in packet interface mode, show packet headers.
481 */
482 for (q = cb->s_q.si_next; q!=&cb->s_q; q = q->si_next) {
483 if (SI(q)->si_seq == cb->s_ack) {
484 cb->s_ack++;
485 m = dtom(q);
486 if (SI(q)->si_cc & SP_OB) {
487 cb->s_oobflags &= ~SF_IOOB;
488 if (so->so_rcv.sb_cc)
489 so->so_oobmark = so->so_rcv.sb_cc;
490 else
491 so->so_state |= SS_RCVATMARK;
492 }
493 q = q->si_prev;
494 remque(q->si_next);
495 wakeup = 1;
496 sppstat.spps_rcvpack++;
497 #ifdef SF_NEWCALL
498 if (cb->s_flags2 & SF_NEWCALL) {
499 struct sphdr *sp = mtod(m, struct sphdr *);
500 u_char dt = sp->sp_dt;
501 spp_newchecks[4]++;
502 if (dt != cb->s_rhdr.sp_dt) {
503 struct mbuf *mm =
504 m_getclr(M_DONTWAIT, MT_CONTROL);
505 spp_newchecks[0]++;
506 if (mm != NULL) {
507 u_short *s =
508 mtod(mm, u_short *);
509 cb->s_rhdr.sp_dt = dt;
510 mm->m_len = 5; /*XXX*/
511 s[0] = 5;
512 s[1] = 1;
513 *(u_char *)(&s[2]) = dt;
514 sbappend(&so->so_rcv, mm);
515 }
516 }
517 if (sp->sp_cc & SP_OB) {
518 MCHTYPE(m, MT_OOBDATA);
519 spp_newchecks[1]++;
520 so->so_oobmark = 0;
521 so->so_state &= ~SS_RCVATMARK;
522 }
523 if (packetp == 0) {
524 m->m_data += SPINC;
525 m->m_len -= SPINC;
526 m->m_pkthdr.len -= SPINC;
527 }
528 if ((sp->sp_cc & SP_EM) || packetp) {
529 sbappendrecord(&so->so_rcv, m);
530 spp_newchecks[9]++;
531 } else
532 sbappend(&so->so_rcv, m);
533 } else
534 #endif
535 if (packetp) {
536 sbappendrecord(&so->so_rcv, m);
537 } else {
538 cb->s_rhdr = *mtod(m, struct sphdr *);
539 m->m_data += SPINC;
540 m->m_len -= SPINC;
541 m->m_pkthdr.len -= SPINC;
542 sbappend(&so->so_rcv, m);
543 }
544 } else
545 break;
546 }
547 if (wakeup) sorwakeup(so);
548 return (0);
549 }
550
spp_ctlinput(cmd,arg)551 spp_ctlinput(cmd, arg)
552 int cmd;
553 caddr_t arg;
554 {
555 struct ns_addr *na;
556 extern u_char nsctlerrmap[];
557 extern spp_abort(), spp_quench();
558 extern struct nspcb *idp_drop();
559 struct ns_errp *errp;
560 struct nspcb *nsp;
561 struct sockaddr_ns *sns;
562 int type;
563
564 if (cmd < 0 || cmd > PRC_NCMDS)
565 return;
566 type = NS_ERR_UNREACH_HOST;
567
568 switch (cmd) {
569
570 case PRC_ROUTEDEAD:
571 return;
572
573 case PRC_IFDOWN:
574 case PRC_HOSTDEAD:
575 case PRC_HOSTUNREACH:
576 sns = (struct sockaddr_ns *)arg;
577 if (sns->sns_family != AF_NS)
578 return;
579 na = &sns->sns_addr;
580 break;
581
582 default:
583 errp = (struct ns_errp *)arg;
584 na = &errp->ns_err_idp.idp_dna;
585 type = errp->ns_err_num;
586 type = ntohs((u_short)type);
587 }
588 switch (type) {
589
590 case NS_ERR_UNREACH_HOST:
591 ns_pcbnotify(na, (int)nsctlerrmap[cmd], spp_abort, (long) 0);
592 break;
593
594 case NS_ERR_TOO_BIG:
595 case NS_ERR_NOSOCK:
596 nsp = ns_pcblookup(na, errp->ns_err_idp.idp_sna.x_port,
597 NS_WILDCARD);
598 if (nsp) {
599 if(nsp->nsp_pcb)
600 (void) spp_drop((struct sppcb *)nsp->nsp_pcb,
601 (int)nsctlerrmap[cmd]);
602 else
603 (void) idp_drop(nsp, (int)nsctlerrmap[cmd]);
604 }
605 break;
606
607 case NS_ERR_FULLUP:
608 ns_pcbnotify(na, 0, spp_quench, (long) 0);
609 }
610 }
611 /*
612 * When a source quench is received, close congestion window
613 * to one packet. We will gradually open it again as we proceed.
614 */
615 spp_quench(nsp)
616 struct nspcb *nsp;
617 {
618 struct sppcb *cb = nstosppcb(nsp);
619
620 if (cb)
621 cb->s_cwnd = CUNIT;
622 }
623
624 #ifdef notdef
625 int
spp_fixmtu(nsp)626 spp_fixmtu(nsp)
627 register struct nspcb *nsp;
628 {
629 register struct sppcb *cb = (struct sppcb *)(nsp->nsp_pcb);
630 register struct mbuf *m;
631 register struct spidp *si;
632 struct ns_errp *ep;
633 struct sockbuf *sb;
634 int badseq, len;
635 struct mbuf *firstbad, *m0;
636
637 if (cb) {
638 /*
639 * The notification that we have sent
640 * too much is bad news -- we will
641 * have to go through queued up so far
642 * splitting ones which are too big and
643 * reassigning sequence numbers and checksums.
644 * we should then retransmit all packets from
645 * one above the offending packet to the last one
646 * we had sent (or our allocation)
647 * then the offending one so that the any queued
648 * data at our destination will be discarded.
649 */
650 ep = (struct ns_errp *)nsp->nsp_notify_param;
651 sb = &nsp->nsp_socket->so_snd;
652 cb->s_mtu = ep->ns_err_param;
653 badseq = SI(&ep->ns_err_idp)->si_seq;
654 for (m = sb->sb_mb; m; m = m->m_act) {
655 si = mtod(m, struct spidp *);
656 if (si->si_seq == badseq)
657 break;
658 }
659 if (m == 0) return;
660 firstbad = m;
661 /*for (;;) {*/
662 /* calculate length */
663 for (m0 = m, len = 0; m ; m = m->m_next)
664 len += m->m_len;
665 if (len > cb->s_mtu) {
666 }
667 /* FINISH THIS
668 } */
669 }
670 }
671 #endif
672
spp_output(cb,m0)673 spp_output(cb, m0)
674 register struct sppcb *cb;
675 struct mbuf *m0;
676 {
677 struct socket *so = cb->s_nspcb->nsp_socket;
678 register struct mbuf *m;
679 register struct spidp *si = (struct spidp *) 0;
680 register struct sockbuf *sb = &so->so_snd;
681 int len = 0, win, rcv_win;
682 short span, off, recordp = 0;
683 u_short alo;
684 int error = 0, sendalot;
685 #ifdef notdef
686 int idle;
687 #endif
688 struct mbuf *mprev;
689 extern int idpcksum;
690
691 if (m0) {
692 int mtu = cb->s_mtu;
693 int datalen;
694 /*
695 * Make sure that packet isn't too big.
696 */
697 for (m = m0; m ; m = m->m_next) {
698 mprev = m;
699 len += m->m_len;
700 if (m->m_flags & M_EOR)
701 recordp = 1;
702 }
703 datalen = (cb->s_flags & SF_HO) ?
704 len - sizeof (struct sphdr) : len;
705 if (datalen > mtu) {
706 if (cb->s_flags & SF_PI) {
707 m_freem(m0);
708 return (EMSGSIZE);
709 } else {
710 int oldEM = cb->s_cc & SP_EM;
711
712 cb->s_cc &= ~SP_EM;
713 while (len > mtu) {
714 /*
715 * Here we are only being called
716 * from usrreq(), so it is OK to
717 * block.
718 */
719 m = m_copym(m0, 0, mtu, M_WAIT);
720 if (cb->s_flags & SF_NEWCALL) {
721 struct mbuf *mm = m;
722 spp_newchecks[7]++;
723 while (mm) {
724 mm->m_flags &= ~M_EOR;
725 mm = mm->m_next;
726 }
727 }
728 error = spp_output(cb, m);
729 if (error) {
730 cb->s_cc |= oldEM;
731 m_freem(m0);
732 return(error);
733 }
734 m_adj(m0, mtu);
735 len -= mtu;
736 }
737 cb->s_cc |= oldEM;
738 }
739 }
740 /*
741 * Force length even, by adding a "garbage byte" if
742 * necessary.
743 */
744 if (len & 1) {
745 m = mprev;
746 if (M_TRAILINGSPACE(m) >= 1)
747 m->m_len++;
748 else {
749 struct mbuf *m1 = m_get(M_DONTWAIT, MT_DATA);
750
751 if (m1 == 0) {
752 m_freem(m0);
753 return (ENOBUFS);
754 }
755 m1->m_len = 1;
756 *(mtod(m1, u_char *)) = 0;
757 m->m_next = m1;
758 }
759 }
760 m = m_gethdr(M_DONTWAIT, MT_HEADER);
761 if (m == 0) {
762 m_freem(m0);
763 return (ENOBUFS);
764 }
765 /*
766 * Fill in mbuf with extended SP header
767 * and addresses and length put into network format.
768 */
769 MH_ALIGN(m, sizeof (struct spidp));
770 m->m_len = sizeof (struct spidp);
771 m->m_next = m0;
772 si = mtod(m, struct spidp *);
773 si->si_i = *cb->s_idp;
774 si->si_s = cb->s_shdr;
775 if ((cb->s_flags & SF_PI) && (cb->s_flags & SF_HO)) {
776 register struct sphdr *sh;
777 if (m0->m_len < sizeof (*sh)) {
778 if((m0 = m_pullup(m0, sizeof(*sh))) == NULL) {
779 (void) m_free(m);
780 m_freem(m0);
781 return (EINVAL);
782 }
783 m->m_next = m0;
784 }
785 sh = mtod(m0, struct sphdr *);
786 si->si_dt = sh->sp_dt;
787 si->si_cc |= sh->sp_cc & SP_EM;
788 m0->m_len -= sizeof (*sh);
789 m0->m_data += sizeof (*sh);
790 len -= sizeof (*sh);
791 }
792 len += sizeof(*si);
793 if ((cb->s_flags2 & SF_NEWCALL) && recordp) {
794 si->si_cc |= SP_EM;
795 spp_newchecks[8]++;
796 }
797 if (cb->s_oobflags & SF_SOOB) {
798 /*
799 * Per jqj@cornell:
800 * make sure OB packets convey exactly 1 byte.
801 * If the packet is 1 byte or larger, we
802 * have already guaranted there to be at least
803 * one garbage byte for the checksum, and
804 * extra bytes shouldn't hurt!
805 */
806 if (len > sizeof(*si)) {
807 si->si_cc |= SP_OB;
808 len = (1 + sizeof(*si));
809 }
810 }
811 si->si_len = htons((u_short)len);
812 m->m_pkthdr.len = ((len - 1) | 1) + 1;
813 /*
814 * queue stuff up for output
815 */
816 sbappendrecord(sb, m);
817 cb->s_seq++;
818 }
819 #ifdef notdef
820 idle = (cb->s_smax == (cb->s_rack - 1));
821 #endif
822 again:
823 sendalot = 0;
824 off = cb->s_snxt - cb->s_rack;
825 win = min(cb->s_swnd, (cb->s_cwnd/CUNIT));
826
827 /*
828 * If in persist timeout with window of 0, send a probe.
829 * Otherwise, if window is small but nonzero
830 * and timer expired, send what we can and go into
831 * transmit state.
832 */
833 if (cb->s_force == 1 + SPPT_PERSIST) {
834 if (win != 0) {
835 cb->s_timer[SPPT_PERSIST] = 0;
836 cb->s_rxtshift = 0;
837 }
838 }
839 span = cb->s_seq - cb->s_rack;
840 len = min(span, win) - off;
841
842 if (len < 0) {
843 /*
844 * Window shrank after we went into it.
845 * If window shrank to 0, cancel pending
846 * restransmission and pull s_snxt back
847 * to (closed) window. We will enter persist
848 * state below. If the widndow didn't close completely,
849 * just wait for an ACK.
850 */
851 len = 0;
852 if (win == 0) {
853 cb->s_timer[SPPT_REXMT] = 0;
854 cb->s_snxt = cb->s_rack;
855 }
856 }
857 if (len > 1)
858 sendalot = 1;
859 rcv_win = sbspace(&so->so_rcv);
860
861 /*
862 * Send if we owe peer an ACK.
863 */
864 if (cb->s_oobflags & SF_SOOB) {
865 /*
866 * must transmit this out of band packet
867 */
868 cb->s_oobflags &= ~ SF_SOOB;
869 sendalot = 1;
870 sppstat.spps_sndurg++;
871 goto found;
872 }
873 if (cb->s_flags & SF_ACKNOW)
874 goto send;
875 if (cb->s_state < TCPS_ESTABLISHED)
876 goto send;
877 /*
878 * Silly window can't happen in spp.
879 * Code from tcp deleted.
880 */
881 if (len)
882 goto send;
883 /*
884 * Compare available window to amount of window
885 * known to peer (as advertised window less
886 * next expected input.) If the difference is at least two
887 * packets or at least 35% of the mximum possible window,
888 * then want to send a window update to peer.
889 */
890 if (rcv_win > 0) {
891 u_short delta = 1 + cb->s_alo - cb->s_ack;
892 int adv = rcv_win - (delta * cb->s_mtu);
893
894 if ((so->so_rcv.sb_cc == 0 && adv >= (2 * cb->s_mtu)) ||
895 (100 * adv / so->so_rcv.sb_hiwat >= 35)) {
896 sppstat.spps_sndwinup++;
897 cb->s_flags |= SF_ACKNOW;
898 goto send;
899 }
900
901 }
902 /*
903 * Many comments from tcp_output.c are appropriate here
904 * including . . .
905 * If send window is too small, there is data to transmit, and no
906 * retransmit or persist is pending, then go to persist state.
907 * If nothing happens soon, send when timer expires:
908 * if window is nonzero, transmit what we can,
909 * otherwise send a probe.
910 */
911 if (so->so_snd.sb_cc && cb->s_timer[SPPT_REXMT] == 0 &&
912 cb->s_timer[SPPT_PERSIST] == 0) {
913 cb->s_rxtshift = 0;
914 spp_setpersist(cb);
915 }
916 /*
917 * No reason to send a packet, just return.
918 */
919 cb->s_outx = 1;
920 return (0);
921
922 send:
923 /*
924 * Find requested packet.
925 */
926 si = 0;
927 if (len > 0) {
928 cb->s_want = cb->s_snxt;
929 for (m = sb->sb_mb; m; m = m->m_act) {
930 si = mtod(m, struct spidp *);
931 if (SSEQ_LEQ(cb->s_snxt, si->si_seq))
932 break;
933 }
934 found:
935 if (si) {
936 if (si->si_seq == cb->s_snxt)
937 cb->s_snxt++;
938 else
939 sppstat.spps_sndvoid++, si = 0;
940 }
941 }
942 /*
943 * update window
944 */
945 if (rcv_win < 0)
946 rcv_win = 0;
947 alo = cb->s_ack - 1 + (rcv_win / ((short)cb->s_mtu));
948 if (SSEQ_LT(alo, cb->s_alo))
949 alo = cb->s_alo;
950
951 if (si) {
952 /*
953 * must make a copy of this packet for
954 * idp_output to monkey with
955 */
956 m = m_copy(dtom(si), 0, (int)M_COPYALL);
957 if (m == NULL) {
958 return (ENOBUFS);
959 }
960 si = mtod(m, struct spidp *);
961 if (SSEQ_LT(si->si_seq, cb->s_smax))
962 sppstat.spps_sndrexmitpack++;
963 else
964 sppstat.spps_sndpack++;
965 } else if (cb->s_force || cb->s_flags & SF_ACKNOW) {
966 /*
967 * Must send an acknowledgement or a probe
968 */
969 if (cb->s_force)
970 sppstat.spps_sndprobe++;
971 if (cb->s_flags & SF_ACKNOW)
972 sppstat.spps_sndacks++;
973 m = m_gethdr(M_DONTWAIT, MT_HEADER);
974 if (m == 0)
975 return (ENOBUFS);
976 /*
977 * Fill in mbuf with extended SP header
978 * and addresses and length put into network format.
979 */
980 MH_ALIGN(m, sizeof (struct spidp));
981 m->m_len = sizeof (*si);
982 m->m_pkthdr.len = sizeof (*si);
983 si = mtod(m, struct spidp *);
984 si->si_i = *cb->s_idp;
985 si->si_s = cb->s_shdr;
986 si->si_seq = cb->s_smax + 1;
987 si->si_len = htons(sizeof (*si));
988 si->si_cc |= SP_SP;
989 } else {
990 cb->s_outx = 3;
991 if (so->so_options & SO_DEBUG || traceallspps)
992 spp_trace(SA_OUTPUT, cb->s_state, cb, si, 0);
993 return (0);
994 }
995 /*
996 * Stuff checksum and output datagram.
997 */
998 if ((si->si_cc & SP_SP) == 0) {
999 if (cb->s_force != (1 + SPPT_PERSIST) ||
1000 cb->s_timer[SPPT_PERSIST] == 0) {
1001 /*
1002 * If this is a new packet and we are not currently
1003 * timing anything, time this one.
1004 */
1005 if (SSEQ_LT(cb->s_smax, si->si_seq)) {
1006 cb->s_smax = si->si_seq;
1007 if (cb->s_rtt == 0) {
1008 sppstat.spps_segstimed++;
1009 cb->s_rtseq = si->si_seq;
1010 cb->s_rtt = 1;
1011 }
1012 }
1013 /*
1014 * Set rexmt timer if not currently set,
1015 * Initial value for retransmit timer is smoothed
1016 * round-trip time + 2 * round-trip time variance.
1017 * Initialize shift counter which is used for backoff
1018 * of retransmit time.
1019 */
1020 if (cb->s_timer[SPPT_REXMT] == 0 &&
1021 cb->s_snxt != cb->s_rack) {
1022 cb->s_timer[SPPT_REXMT] = cb->s_rxtcur;
1023 if (cb->s_timer[SPPT_PERSIST]) {
1024 cb->s_timer[SPPT_PERSIST] = 0;
1025 cb->s_rxtshift = 0;
1026 }
1027 }
1028 } else if (SSEQ_LT(cb->s_smax, si->si_seq)) {
1029 cb->s_smax = si->si_seq;
1030 }
1031 } else if (cb->s_state < TCPS_ESTABLISHED) {
1032 if (cb->s_rtt == 0)
1033 cb->s_rtt = 1; /* Time initial handshake */
1034 if (cb->s_timer[SPPT_REXMT] == 0)
1035 cb->s_timer[SPPT_REXMT] = cb->s_rxtcur;
1036 }
1037 {
1038 /*
1039 * Do not request acks when we ack their data packets or
1040 * when we do a gratuitous window update.
1041 */
1042 if (((si->si_cc & SP_SP) == 0) || cb->s_force)
1043 si->si_cc |= SP_SA;
1044 si->si_seq = htons(si->si_seq);
1045 si->si_alo = htons(alo);
1046 si->si_ack = htons(cb->s_ack);
1047
1048 if (idpcksum) {
1049 si->si_sum = 0;
1050 len = ntohs(si->si_len);
1051 if (len & 1)
1052 len++;
1053 si->si_sum = ns_cksum(m, len);
1054 } else
1055 si->si_sum = 0xffff;
1056
1057 cb->s_outx = 4;
1058 if (so->so_options & SO_DEBUG || traceallspps)
1059 spp_trace(SA_OUTPUT, cb->s_state, cb, si, 0);
1060
1061 if (so->so_options & SO_DONTROUTE)
1062 error = ns_output(m, (struct route *)0, NS_ROUTETOIF);
1063 else
1064 error = ns_output(m, &cb->s_nspcb->nsp_route, 0);
1065 }
1066 if (error) {
1067 return (error);
1068 }
1069 sppstat.spps_sndtotal++;
1070 /*
1071 * Data sent (as far as we can tell).
1072 * If this advertises a larger window than any other segment,
1073 * then remember the size of the advertized window.
1074 * Any pending ACK has now been sent.
1075 */
1076 cb->s_force = 0;
1077 cb->s_flags &= ~(SF_ACKNOW|SF_DELACK);
1078 if (SSEQ_GT(alo, cb->s_alo))
1079 cb->s_alo = alo;
1080 if (sendalot)
1081 goto again;
1082 cb->s_outx = 5;
1083 return (0);
1084 }
1085
1086 int spp_do_persist_panics = 0;
1087
spp_setpersist(cb)1088 spp_setpersist(cb)
1089 register struct sppcb *cb;
1090 {
1091 register t = ((cb->s_srtt >> 2) + cb->s_rttvar) >> 1;
1092 extern int spp_backoff[];
1093
1094 if (cb->s_timer[SPPT_REXMT] && spp_do_persist_panics)
1095 panic("spp_output REXMT");
1096 /*
1097 * Start/restart persistance timer.
1098 */
1099 SPPT_RANGESET(cb->s_timer[SPPT_PERSIST],
1100 t*spp_backoff[cb->s_rxtshift],
1101 SPPTV_PERSMIN, SPPTV_PERSMAX);
1102 if (cb->s_rxtshift < SPP_MAXRXTSHIFT)
1103 cb->s_rxtshift++;
1104 }
1105 /*ARGSUSED*/
spp_ctloutput(req,so,level,name,value)1106 spp_ctloutput(req, so, level, name, value)
1107 int req;
1108 struct socket *so;
1109 int name;
1110 struct mbuf **value;
1111 {
1112 register struct mbuf *m;
1113 struct nspcb *nsp = sotonspcb(so);
1114 register struct sppcb *cb;
1115 int mask, error = 0;
1116
1117 if (level != NSPROTO_SPP) {
1118 /* This will have to be changed when we do more general
1119 stacking of protocols */
1120 return (idp_ctloutput(req, so, level, name, value));
1121 }
1122 if (nsp == NULL) {
1123 error = EINVAL;
1124 goto release;
1125 } else
1126 cb = nstosppcb(nsp);
1127
1128 switch (req) {
1129
1130 case PRCO_GETOPT:
1131 if (value == NULL)
1132 return (EINVAL);
1133 m = m_get(M_DONTWAIT, MT_DATA);
1134 if (m == NULL)
1135 return (ENOBUFS);
1136 switch (name) {
1137
1138 case SO_HEADERS_ON_INPUT:
1139 mask = SF_HI;
1140 goto get_flags;
1141
1142 case SO_HEADERS_ON_OUTPUT:
1143 mask = SF_HO;
1144 get_flags:
1145 m->m_len = sizeof(short);
1146 *mtod(m, short *) = cb->s_flags & mask;
1147 break;
1148
1149 case SO_MTU:
1150 m->m_len = sizeof(u_short);
1151 *mtod(m, short *) = cb->s_mtu;
1152 break;
1153
1154 case SO_LAST_HEADER:
1155 m->m_len = sizeof(struct sphdr);
1156 *mtod(m, struct sphdr *) = cb->s_rhdr;
1157 break;
1158
1159 case SO_DEFAULT_HEADERS:
1160 m->m_len = sizeof(struct spidp);
1161 *mtod(m, struct sphdr *) = cb->s_shdr;
1162 break;
1163
1164 default:
1165 error = EINVAL;
1166 }
1167 *value = m;
1168 break;
1169
1170 case PRCO_SETOPT:
1171 if (value == 0 || *value == 0) {
1172 error = EINVAL;
1173 break;
1174 }
1175 switch (name) {
1176 int *ok;
1177
1178 case SO_HEADERS_ON_INPUT:
1179 mask = SF_HI;
1180 goto set_head;
1181
1182 case SO_HEADERS_ON_OUTPUT:
1183 mask = SF_HO;
1184 set_head:
1185 if (cb->s_flags & SF_PI) {
1186 ok = mtod(*value, int *);
1187 if (*ok)
1188 cb->s_flags |= mask;
1189 else
1190 cb->s_flags &= ~mask;
1191 } else error = EINVAL;
1192 break;
1193
1194 case SO_MTU:
1195 cb->s_mtu = *(mtod(*value, u_short *));
1196 break;
1197
1198 #ifdef SF_NEWCALL
1199 case SO_NEWCALL:
1200 ok = mtod(*value, int *);
1201 if (*ok) {
1202 cb->s_flags2 |= SF_NEWCALL;
1203 spp_newchecks[5]++;
1204 } else {
1205 cb->s_flags2 &= ~SF_NEWCALL;
1206 spp_newchecks[6]++;
1207 }
1208 break;
1209 #endif
1210
1211 case SO_DEFAULT_HEADERS:
1212 {
1213 register struct sphdr *sp
1214 = mtod(*value, struct sphdr *);
1215 cb->s_dt = sp->sp_dt;
1216 cb->s_cc = sp->sp_cc & SP_EM;
1217 }
1218 break;
1219
1220 default:
1221 error = EINVAL;
1222 }
1223 m_freem(*value);
1224 break;
1225 }
1226 release:
1227 return (error);
1228 }
1229
1230 /*ARGSUSED*/
1231 spp_usrreq(so, req, m, nam, controlp)
1232 struct socket *so;
1233 int req;
1234 struct mbuf *m, *nam, *controlp;
1235 {
1236 struct nspcb *nsp = sotonspcb(so);
1237 register struct sppcb *cb;
1238 int s = splnet();
1239 int error = 0, ostate;
1240 struct mbuf *mm;
1241 register struct sockbuf *sb;
1242
1243 if (req == PRU_CONTROL)
1244 return (ns_control(so, m, (caddr_t)nam,
1245 (struct ifnet *)controlp));
1246 if (nsp == NULL) {
1247 if (req != PRU_ATTACH) {
1248 error = EINVAL;
1249 goto release;
1250 }
1251 } else
1252 cb = nstosppcb(nsp);
1253
1254 ostate = cb ? cb->s_state : 0;
1255
1256 switch (req) {
1257
1258 case PRU_ATTACH:
1259 if (nsp != NULL) {
1260 error = EISCONN;
1261 break;
1262 }
1263 error = ns_pcballoc(so, &nspcb);
1264 if (error)
1265 break;
1266 if (so->so_snd.sb_hiwat == 0 || so->so_rcv.sb_hiwat == 0) {
1267 error = soreserve(so, (u_long) 3072, (u_long) 3072);
1268 if (error)
1269 break;
1270 }
1271 nsp = sotonspcb(so);
1272
1273 mm = m_getclr(M_DONTWAIT, MT_PCB);
1274 sb = &so->so_snd;
1275
1276 if (mm == NULL) {
1277 error = ENOBUFS;
1278 break;
1279 }
1280 cb = mtod(mm, struct sppcb *);
1281 mm = m_getclr(M_DONTWAIT, MT_HEADER);
1282 if (mm == NULL) {
1283 (void) m_free(dtom(m));
1284 error = ENOBUFS;
1285 break;
1286 }
1287 cb->s_idp = mtod(mm, struct idp *);
1288 cb->s_state = TCPS_LISTEN;
1289 cb->s_smax = -1;
1290 cb->s_swl1 = -1;
1291 cb->s_q.si_next = cb->s_q.si_prev = &cb->s_q;
1292 cb->s_nspcb = nsp;
1293 cb->s_mtu = 576 - sizeof (struct spidp);
1294 cb->s_cwnd = sbspace(sb) * CUNIT / cb->s_mtu;
1295 cb->s_ssthresh = cb->s_cwnd;
1296 cb->s_cwmx = sbspace(sb) * CUNIT /
1297 (2 * sizeof (struct spidp));
1298 /* Above is recomputed when connecting to account
1299 for changed buffering or mtu's */
1300 cb->s_rtt = SPPTV_SRTTBASE;
1301 cb->s_rttvar = SPPTV_SRTTDFLT << 2;
1302 SPPT_RANGESET(cb->s_rxtcur,
1303 ((SPPTV_SRTTBASE >> 2) + (SPPTV_SRTTDFLT << 2)) >> 1,
1304 SPPTV_MIN, SPPTV_REXMTMAX);
1305 nsp->nsp_pcb = (caddr_t) cb;
1306 break;
1307
1308 case PRU_DETACH:
1309 if (nsp == NULL) {
1310 error = ENOTCONN;
1311 break;
1312 }
1313 if (cb->s_state > TCPS_LISTEN)
1314 cb = spp_disconnect(cb);
1315 else
1316 cb = spp_close(cb);
1317 break;
1318
1319 case PRU_BIND:
1320 error = ns_pcbbind(nsp, nam);
1321 break;
1322
1323 case PRU_LISTEN:
1324 if (nsp->nsp_lport == 0)
1325 error = ns_pcbbind(nsp, (struct mbuf *)0);
1326 if (error == 0)
1327 cb->s_state = TCPS_LISTEN;
1328 break;
1329
1330 /*
1331 * Initiate connection to peer.
1332 * Enter SYN_SENT state, and mark socket as connecting.
1333 * Start keep-alive timer, setup prototype header,
1334 * Send initial system packet requesting connection.
1335 */
1336 case PRU_CONNECT:
1337 if (nsp->nsp_lport == 0) {
1338 error = ns_pcbbind(nsp, (struct mbuf *)0);
1339 if (error)
1340 break;
1341 }
1342 error = ns_pcbconnect(nsp, nam);
1343 if (error)
1344 break;
1345 soisconnecting(so);
1346 sppstat.spps_connattempt++;
1347 cb->s_state = TCPS_SYN_SENT;
1348 cb->s_did = 0;
1349 spp_template(cb);
1350 cb->s_timer[SPPT_KEEP] = SPPTV_KEEP;
1351 cb->s_force = 1 + SPPTV_KEEP;
1352 /*
1353 * Other party is required to respond to
1354 * the port I send from, but he is not
1355 * required to answer from where I am sending to,
1356 * so allow wildcarding.
1357 * original port I am sending to is still saved in
1358 * cb->s_dport.
1359 */
1360 nsp->nsp_fport = 0;
1361 error = spp_output(cb, (struct mbuf *) 0);
1362 break;
1363
1364 case PRU_CONNECT2:
1365 error = EOPNOTSUPP;
1366 break;
1367
1368 /*
1369 * We may decide later to implement connection closing
1370 * handshaking at the spp level optionally.
1371 * here is the hook to do it:
1372 */
1373 case PRU_DISCONNECT:
1374 cb = spp_disconnect(cb);
1375 break;
1376
1377 /*
1378 * Accept a connection. Essentially all the work is
1379 * done at higher levels; just return the address
1380 * of the peer, storing through addr.
1381 */
1382 case PRU_ACCEPT: {
1383 struct sockaddr_ns *sns = mtod(nam, struct sockaddr_ns *);
1384
1385 nam->m_len = sizeof (struct sockaddr_ns);
1386 sns->sns_family = AF_NS;
1387 sns->sns_addr = nsp->nsp_faddr;
1388 break;
1389 }
1390
1391 case PRU_SHUTDOWN:
1392 socantsendmore(so);
1393 cb = spp_usrclosed(cb);
1394 if (cb)
1395 error = spp_output(cb, (struct mbuf *) 0);
1396 break;
1397
1398 /*
1399 * After a receive, possibly send acknowledgment
1400 * updating allocation.
1401 */
1402 case PRU_RCVD:
1403 cb->s_flags |= SF_RVD;
1404 (void) spp_output(cb, (struct mbuf *) 0);
1405 cb->s_flags &= ~SF_RVD;
1406 break;
1407
1408 case PRU_ABORT:
1409 (void) spp_drop(cb, ECONNABORTED);
1410 break;
1411
1412 case PRU_SENSE:
1413 case PRU_CONTROL:
1414 m = NULL;
1415 error = EOPNOTSUPP;
1416 break;
1417
1418 case PRU_RCVOOB:
1419 if ((cb->s_oobflags & SF_IOOB) || so->so_oobmark ||
1420 (so->so_state & SS_RCVATMARK)) {
1421 m->m_len = 1;
1422 *mtod(m, caddr_t) = cb->s_iobc;
1423 break;
1424 }
1425 error = EINVAL;
1426 break;
1427
1428 case PRU_SENDOOB:
1429 if (sbspace(&so->so_snd) < -512) {
1430 error = ENOBUFS;
1431 break;
1432 }
1433 cb->s_oobflags |= SF_SOOB;
1434 /* fall into */
1435 case PRU_SEND:
1436 if (controlp) {
1437 u_short *p = mtod(controlp, u_short *);
1438 spp_newchecks[2]++;
1439 if ((p[0] == 5) && p[1] == 1) { /* XXXX, for testing */
1440 cb->s_shdr.sp_dt = *(u_char *)(&p[2]);
1441 spp_newchecks[3]++;
1442 }
1443 m_freem(controlp);
1444 }
1445 controlp = NULL;
1446 error = spp_output(cb, m);
1447 m = NULL;
1448 break;
1449
1450 case PRU_SOCKADDR:
1451 ns_setsockaddr(nsp, nam);
1452 break;
1453
1454 case PRU_PEERADDR:
1455 ns_setpeeraddr(nsp, nam);
1456 break;
1457
1458 case PRU_SLOWTIMO:
1459 cb = spp_timers(cb, (int)nam);
1460 req |= ((int)nam) << 8;
1461 break;
1462
1463 case PRU_FASTTIMO:
1464 case PRU_PROTORCV:
1465 case PRU_PROTOSEND:
1466 error = EOPNOTSUPP;
1467 break;
1468
1469 default:
1470 panic("sp_usrreq");
1471 }
1472 if (cb && (so->so_options & SO_DEBUG || traceallspps))
1473 spp_trace(SA_USER, (u_char)ostate, cb, (struct spidp *)0, req);
1474 release:
1475 if (controlp != NULL)
1476 m_freem(controlp);
1477 if (m != NULL)
1478 m_freem(m);
1479 splx(s);
1480 return (error);
1481 }
1482
1483 spp_usrreq_sp(so, req, m, nam, controlp)
1484 struct socket *so;
1485 int req;
1486 struct mbuf *m, *nam, *controlp;
1487 {
1488 int error = spp_usrreq(so, req, m, nam, controlp);
1489
1490 if (req == PRU_ATTACH && error == 0) {
1491 struct nspcb *nsp = sotonspcb(so);
1492 ((struct sppcb *)nsp->nsp_pcb)->s_flags |=
1493 (SF_HI | SF_HO | SF_PI);
1494 }
1495 return (error);
1496 }
1497
1498 /*
1499 * Create template to be used to send spp packets on a connection.
1500 * Called after host entry created, fills
1501 * in a skeletal spp header (choosing connection id),
1502 * minimizing the amount of work necessary when the connection is used.
1503 */
spp_template(cb)1504 spp_template(cb)
1505 register struct sppcb *cb;
1506 {
1507 register struct nspcb *nsp = cb->s_nspcb;
1508 register struct idp *idp = cb->s_idp;
1509 register struct sockbuf *sb = &(nsp->nsp_socket->so_snd);
1510
1511 idp->idp_pt = NSPROTO_SPP;
1512 idp->idp_sna = nsp->nsp_laddr;
1513 idp->idp_dna = nsp->nsp_faddr;
1514 cb->s_sid = htons(spp_iss);
1515 spp_iss += SPP_ISSINCR/2;
1516 cb->s_alo = 1;
1517 cb->s_cwnd = (sbspace(sb) * CUNIT) / cb->s_mtu;
1518 cb->s_ssthresh = cb->s_cwnd; /* Try to expand fast to full complement
1519 of large packets */
1520 cb->s_cwmx = (sbspace(sb) * CUNIT) / (2 * sizeof(struct spidp));
1521 cb->s_cwmx = max(cb->s_cwmx, cb->s_cwnd);
1522 /* But allow for lots of little packets as well */
1523 }
1524
1525 /*
1526 * Close a SPIP control block:
1527 * discard spp control block itself
1528 * discard ns protocol control block
1529 * wake up any sleepers
1530 */
1531 struct sppcb *
spp_close(cb)1532 spp_close(cb)
1533 register struct sppcb *cb;
1534 {
1535 register struct spidp_q *s;
1536 struct nspcb *nsp = cb->s_nspcb;
1537 struct socket *so = nsp->nsp_socket;
1538 register struct mbuf *m;
1539
1540 s = cb->s_q.si_next;
1541 while (s != &(cb->s_q)) {
1542 s = s->si_next;
1543 m = dtom(s->si_prev);
1544 remque(s->si_prev);
1545 m_freem(m);
1546 }
1547 (void) m_free(dtom(cb->s_idp));
1548 (void) m_free(dtom(cb));
1549 nsp->nsp_pcb = 0;
1550 soisdisconnected(so);
1551 ns_pcbdetach(nsp);
1552 sppstat.spps_closed++;
1553 return ((struct sppcb *)0);
1554 }
1555 /*
1556 * Someday we may do level 3 handshaking
1557 * to close a connection or send a xerox style error.
1558 * For now, just close.
1559 */
1560 struct sppcb *
spp_usrclosed(cb)1561 spp_usrclosed(cb)
1562 register struct sppcb *cb;
1563 {
1564 return (spp_close(cb));
1565 }
1566 struct sppcb *
spp_disconnect(cb)1567 spp_disconnect(cb)
1568 register struct sppcb *cb;
1569 {
1570 return (spp_close(cb));
1571 }
1572 /*
1573 * Drop connection, reporting
1574 * the specified error.
1575 */
1576 struct sppcb *
spp_drop(cb,errno)1577 spp_drop(cb, errno)
1578 register struct sppcb *cb;
1579 int errno;
1580 {
1581 struct socket *so = cb->s_nspcb->nsp_socket;
1582
1583 /*
1584 * someday, in the xerox world
1585 * we will generate error protocol packets
1586 * announcing that the socket has gone away.
1587 */
1588 if (TCPS_HAVERCVDSYN(cb->s_state)) {
1589 sppstat.spps_drops++;
1590 cb->s_state = TCPS_CLOSED;
1591 /*(void) tcp_output(cb);*/
1592 } else
1593 sppstat.spps_conndrops++;
1594 so->so_error = errno;
1595 return (spp_close(cb));
1596 }
1597
1598 spp_abort(nsp)
1599 struct nspcb *nsp;
1600 {
1601
1602 (void) spp_close((struct sppcb *)nsp->nsp_pcb);
1603 }
1604
1605 int spp_backoff[SPP_MAXRXTSHIFT+1] =
1606 { 1, 2, 4, 8, 16, 32, 64, 64, 64, 64, 64, 64, 64 };
1607 /*
1608 * Fast timeout routine for processing delayed acks
1609 */
spp_fasttimo()1610 spp_fasttimo()
1611 {
1612 register struct nspcb *nsp;
1613 register struct sppcb *cb;
1614 int s = splnet();
1615
1616 nsp = nspcb.nsp_next;
1617 if (nsp)
1618 for (; nsp != &nspcb; nsp = nsp->nsp_next)
1619 if ((cb = (struct sppcb *)nsp->nsp_pcb) &&
1620 (cb->s_flags & SF_DELACK)) {
1621 cb->s_flags &= ~SF_DELACK;
1622 cb->s_flags |= SF_ACKNOW;
1623 sppstat.spps_delack++;
1624 (void) spp_output(cb, (struct mbuf *) 0);
1625 }
1626 splx(s);
1627 }
1628
1629 /*
1630 * spp protocol timeout routine called every 500 ms.
1631 * Updates the timers in all active pcb's and
1632 * causes finite state machine actions if timers expire.
1633 */
spp_slowtimo()1634 spp_slowtimo()
1635 {
1636 register struct nspcb *ip, *ipnxt;
1637 register struct sppcb *cb;
1638 int s = splnet();
1639 register int i;
1640
1641 /*
1642 * Search through tcb's and update active timers.
1643 */
1644 ip = nspcb.nsp_next;
1645 if (ip == 0) {
1646 splx(s);
1647 return;
1648 }
1649 while (ip != &nspcb) {
1650 cb = nstosppcb(ip);
1651 ipnxt = ip->nsp_next;
1652 if (cb == 0)
1653 goto tpgone;
1654 for (i = 0; i < SPPT_NTIMERS; i++) {
1655 if (cb->s_timer[i] && --cb->s_timer[i] == 0) {
1656 (void) spp_usrreq(cb->s_nspcb->nsp_socket,
1657 PRU_SLOWTIMO, (struct mbuf *)0,
1658 (struct mbuf *)i, (struct mbuf *)0,
1659 (struct mbuf *)0);
1660 if (ipnxt->nsp_prev != ip)
1661 goto tpgone;
1662 }
1663 }
1664 cb->s_idle++;
1665 if (cb->s_rtt)
1666 cb->s_rtt++;
1667 tpgone:
1668 ip = ipnxt;
1669 }
1670 spp_iss += SPP_ISSINCR/PR_SLOWHZ; /* increment iss */
1671 splx(s);
1672 }
1673 /*
1674 * SPP timer processing.
1675 */
1676 struct sppcb *
spp_timers(cb,timer)1677 spp_timers(cb, timer)
1678 register struct sppcb *cb;
1679 int timer;
1680 {
1681 long rexmt;
1682 int win;
1683
1684 cb->s_force = 1 + timer;
1685 switch (timer) {
1686
1687 /*
1688 * 2 MSL timeout in shutdown went off. TCP deletes connection
1689 * control block.
1690 */
1691 case SPPT_2MSL:
1692 printf("spp: SPPT_2MSL went off for no reason\n");
1693 cb->s_timer[timer] = 0;
1694 break;
1695
1696 /*
1697 * Retransmission timer went off. Message has not
1698 * been acked within retransmit interval. Back off
1699 * to a longer retransmit interval and retransmit one packet.
1700 */
1701 case SPPT_REXMT:
1702 if (++cb->s_rxtshift > SPP_MAXRXTSHIFT) {
1703 cb->s_rxtshift = SPP_MAXRXTSHIFT;
1704 sppstat.spps_timeoutdrop++;
1705 cb = spp_drop(cb, ETIMEDOUT);
1706 break;
1707 }
1708 sppstat.spps_rexmttimeo++;
1709 rexmt = ((cb->s_srtt >> 2) + cb->s_rttvar) >> 1;
1710 rexmt *= spp_backoff[cb->s_rxtshift];
1711 SPPT_RANGESET(cb->s_rxtcur, rexmt, SPPTV_MIN, SPPTV_REXMTMAX);
1712 cb->s_timer[SPPT_REXMT] = cb->s_rxtcur;
1713 /*
1714 * If we have backed off fairly far, our srtt
1715 * estimate is probably bogus. Clobber it
1716 * so we'll take the next rtt measurement as our srtt;
1717 * move the current srtt into rttvar to keep the current
1718 * retransmit times until then.
1719 */
1720 if (cb->s_rxtshift > SPP_MAXRXTSHIFT / 4 ) {
1721 cb->s_rttvar += (cb->s_srtt >> 2);
1722 cb->s_srtt = 0;
1723 }
1724 cb->s_snxt = cb->s_rack;
1725 /*
1726 * If timing a packet, stop the timer.
1727 */
1728 cb->s_rtt = 0;
1729 /*
1730 * See very long discussion in tcp_timer.c about congestion
1731 * window and sstrhesh
1732 */
1733 win = min(cb->s_swnd, (cb->s_cwnd/CUNIT)) / 2;
1734 if (win < 2)
1735 win = 2;
1736 cb->s_cwnd = CUNIT;
1737 cb->s_ssthresh = win * CUNIT;
1738 (void) spp_output(cb, (struct mbuf *) 0);
1739 break;
1740
1741 /*
1742 * Persistance timer into zero window.
1743 * Force a probe to be sent.
1744 */
1745 case SPPT_PERSIST:
1746 sppstat.spps_persisttimeo++;
1747 spp_setpersist(cb);
1748 (void) spp_output(cb, (struct mbuf *) 0);
1749 break;
1750
1751 /*
1752 * Keep-alive timer went off; send something
1753 * or drop connection if idle for too long.
1754 */
1755 case SPPT_KEEP:
1756 sppstat.spps_keeptimeo++;
1757 if (cb->s_state < TCPS_ESTABLISHED)
1758 goto dropit;
1759 if (cb->s_nspcb->nsp_socket->so_options & SO_KEEPALIVE) {
1760 if (cb->s_idle >= SPPTV_MAXIDLE)
1761 goto dropit;
1762 sppstat.spps_keepprobe++;
1763 (void) spp_output(cb, (struct mbuf *) 0);
1764 } else
1765 cb->s_idle = 0;
1766 cb->s_timer[SPPT_KEEP] = SPPTV_KEEP;
1767 break;
1768 dropit:
1769 sppstat.spps_keepdrops++;
1770 cb = spp_drop(cb, ETIMEDOUT);
1771 break;
1772 }
1773 return (cb);
1774 }
1775 #ifndef lint
1776 int SppcbSize = sizeof (struct sppcb);
1777 int NspcbSize = sizeof (struct nspcb);
1778 #endif /* lint */
1779