sctp_usrreq.c (33dabcc0) sctp_usrreq.c (a10c3242)
1/*-
2 * Copyright (c) 2001-2008, by Cisco Systems, Inc. All rights reserved.
3 *
4 * Redistribution and use in source and binary forms, with or without
5 * modification, are permitted provided that the following conditions are met:
6 *
7 * a) Redistributions of source code must retain the above copyright notice,
8 * this list of conditions and the following disclaimer.
9 *
10 * b) Redistributions in binary form must reproduce the above copyright
11 * notice, this list of conditions and the following disclaimer in
12 * the documentation and/or other materials provided with the distribution.
13 *
14 * c) Neither the name of Cisco Systems, Inc. nor the names of its
15 * contributors may be used to endorse or promote products derived
16 * from this software without specific prior written permission.
17 *
18 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
19 * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO,
20 * THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
21 * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
22 * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
23 * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
24 * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
25 * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
26 * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
27 * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF
28 * THE POSSIBILITY OF SUCH DAMAGE.
29 */
30
31/* $KAME: sctp_usrreq.c,v 1.48 2005/03/07 23:26:08 itojun Exp $ */
32
33#include <sys/cdefs.h>
34__FBSDID("$FreeBSD$");
35#include <netinet/sctp_os.h>
36#include <sys/proc.h>
37#include <netinet/sctp_pcb.h>
38#include <netinet/sctp_header.h>
39#include <netinet/sctp_var.h>
40#if defined(INET6)
41#endif
42#include <netinet/sctp_sysctl.h>
43#include <netinet/sctp_output.h>
44#include <netinet/sctp_uio.h>
45#include <netinet/sctp_asconf.h>
46#include <netinet/sctputil.h>
47#include <netinet/sctp_indata.h>
48#include <netinet/sctp_timer.h>
49#include <netinet/sctp_auth.h>
50#include <netinet/sctp_bsd_addr.h>
51#include <netinet/sctp_cc_functions.h>
52#include <netinet/udp.h>
53
54
55
56
57void
58sctp_init(void)
59{
60 u_long sb_max_adj;
61
62 bzero(&SCTP_BASE_STATS, sizeof(struct sctpstat));
63
64 /* Initialize and modify the sysctled variables */
65 sctp_init_sysctls();
66 if ((nmbclusters / 8) > SCTP_ASOC_MAX_CHUNKS_ON_QUEUE)
67 SCTP_BASE_SYSCTL(sctp_max_chunks_on_queue) = (nmbclusters / 8);
68 /*
69 * Allow a user to take no more than 1/2 the number of clusters or
70 * the SB_MAX whichever is smaller for the send window.
71 */
72 sb_max_adj = (u_long)((u_quad_t) (SB_MAX) * MCLBYTES / (MSIZE + MCLBYTES));
73 SCTP_BASE_SYSCTL(sctp_sendspace) = min(sb_max_adj,
74 (((uint32_t) nmbclusters / 2) * SCTP_DEFAULT_MAXSEGMENT));
75 /*
76 * Now for the recv window, should we take the same amount? or
77 * should I do 1/2 the SB_MAX instead in the SB_MAX min above. For
78 * now I will just copy.
79 */
80 SCTP_BASE_SYSCTL(sctp_recvspace) = SCTP_BASE_SYSCTL(sctp_sendspace);
81
82 SCTP_BASE_VAR(first_time) = 0;
83 SCTP_BASE_VAR(sctp_pcb_initialized) = 0;
84 sctp_pcb_init();
85#if defined(SCTP_PACKET_LOGGING)
86 SCTP_BASE_VAR(packet_log_writers) = 0;
87 SCTP_BASE_VAR(packet_log_end) = 0;
88 bzero(&SCTP_BASE_VAR(packet_log_buffer), SCTP_PACKET_LOG_SIZE);
89#endif
90
91
92}
93
94void
95sctp_finish(void)
96{
97 sctp_pcb_finish();
98}
99
100
101
102void
103sctp_pathmtu_adjustment(struct sctp_inpcb *inp,
104 struct sctp_tcb *stcb,
105 struct sctp_nets *net,
106 uint16_t nxtsz)
107{
108 struct sctp_tmit_chunk *chk;
109 uint16_t overhead;
110
111 /* Adjust that too */
112 stcb->asoc.smallest_mtu = nxtsz;
113 /* now off to subtract IP_DF flag if needed */
114#ifdef SCTP_PRINT_FOR_B_AND_M
115 SCTP_PRINTF("sctp_pathmtu_adjust called inp:%p stcb:%p net:%p nxtsz:%d\n",
116 inp, stcb, net, nxtsz);
117#endif
118 overhead = IP_HDR_SIZE;
119 if (sctp_auth_is_required_chunk(SCTP_DATA, stcb->asoc.peer_auth_chunks)) {
120 overhead += sctp_get_auth_chunk_len(stcb->asoc.peer_hmac_id);
121 }
122 TAILQ_FOREACH(chk, &stcb->asoc.send_queue, sctp_next) {
123 if ((chk->send_size + overhead) > nxtsz) {
124 chk->flags |= CHUNK_FLAGS_FRAGMENT_OK;
125 }
126 }
127 TAILQ_FOREACH(chk, &stcb->asoc.sent_queue, sctp_next) {
128 if ((chk->send_size + overhead) > nxtsz) {
129 /*
130 * For this guy we also mark for immediate resend
131 * since we sent to big of chunk
132 */
133 chk->flags |= CHUNK_FLAGS_FRAGMENT_OK;
134 if (chk->sent < SCTP_DATAGRAM_RESEND) {
135 sctp_flight_size_decrease(chk);
136 sctp_total_flight_decrease(stcb, chk);
137 }
138 if (chk->sent != SCTP_DATAGRAM_RESEND) {
139 sctp_ucount_incr(stcb->asoc.sent_queue_retran_cnt);
140 }
141 chk->sent = SCTP_DATAGRAM_RESEND;
142 chk->rec.data.doing_fast_retransmit = 0;
143 if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_FLIGHT_LOGGING_ENABLE) {
144 sctp_misc_ints(SCTP_FLIGHT_LOG_DOWN_PMTU,
145 chk->whoTo->flight_size,
146 chk->book_size,
147 (uintptr_t) chk->whoTo,
148 chk->rec.data.TSN_seq);
149 }
150 /* Clear any time so NO RTT is being done */
151 chk->do_rtt = 0;
152 }
153 }
154}
155
156static void
157sctp_notify_mbuf(struct sctp_inpcb *inp,
158 struct sctp_tcb *stcb,
159 struct sctp_nets *net,
160 struct ip *ip,
161 struct sctphdr *sh)
162{
163 struct icmp *icmph;
164 int totsz, tmr_stopped = 0;
165 uint16_t nxtsz;
166
167 /* protection */
168 if ((inp == NULL) || (stcb == NULL) || (net == NULL) ||
169 (ip == NULL) || (sh == NULL)) {
170 if (stcb != NULL) {
171 SCTP_TCB_UNLOCK(stcb);
172 }
173 return;
174 }
175 /* First job is to verify the vtag matches what I would send */
176 if (ntohl(sh->v_tag) != (stcb->asoc.peer_vtag)) {
177 SCTP_TCB_UNLOCK(stcb);
178 return;
179 }
180 icmph = (struct icmp *)((caddr_t)ip - (sizeof(struct icmp) -
181 sizeof(struct ip)));
182 if (icmph->icmp_type != ICMP_UNREACH) {
183 /* We only care about unreachable */
184 SCTP_TCB_UNLOCK(stcb);
185 return;
186 }
187 if (icmph->icmp_code != ICMP_UNREACH_NEEDFRAG) {
188 /* not a unreachable message due to frag. */
189 SCTP_TCB_UNLOCK(stcb);
190 return;
191 }
192 totsz = ip->ip_len;
193
194 nxtsz = ntohs(icmph->icmp_nextmtu);
195 if (nxtsz == 0) {
196 /*
197 * old type router that does not tell us what the next size
198 * mtu is. Rats we will have to guess (in a educated fashion
199 * of course)
200 */
201 nxtsz = find_next_best_mtu(totsz);
202 }
203 /* Stop any PMTU timer */
204 if (SCTP_OS_TIMER_PENDING(&net->pmtu_timer.timer)) {
205 tmr_stopped = 1;
206 sctp_timer_stop(SCTP_TIMER_TYPE_PATHMTURAISE, inp, stcb, net,
207 SCTP_FROM_SCTP_USRREQ + SCTP_LOC_1);
208 }
209 /* Adjust destination size limit */
210 if (net->mtu > nxtsz) {
211 net->mtu = nxtsz;
212 if (net->port) {
213 net->mtu -= sizeof(struct udphdr);
214 }
215 }
216 /* now what about the ep? */
217 if (stcb->asoc.smallest_mtu > nxtsz) {
218#ifdef SCTP_PRINT_FOR_B_AND_M
219 SCTP_PRINTF("notify_mbuf (ICMP) calls sctp_pathmtu_adjust mtu:%d\n",
220 nxtsz);
221#endif
222 sctp_pathmtu_adjustment(inp, stcb, net, nxtsz);
223 }
224 if (tmr_stopped)
225 sctp_timer_start(SCTP_TIMER_TYPE_PATHMTURAISE, inp, stcb, net);
226
227 SCTP_TCB_UNLOCK(stcb);
228}
229
230
231void
232sctp_notify(struct sctp_inpcb *inp,
233 struct ip *ip,
234 struct sctphdr *sh,
235 struct sockaddr *to,
236 struct sctp_tcb *stcb,
237 struct sctp_nets *net)
238{
239#if defined (__APPLE__) || defined(SCTP_SO_LOCK_TESTING)
240 struct socket *so;
241
242#endif
243 /* protection */
244 int reason;
245 struct icmp *icmph;
246
247
248 if ((inp == NULL) || (stcb == NULL) || (net == NULL) ||
249 (sh == NULL) || (to == NULL)) {
250 if (stcb)
251 SCTP_TCB_UNLOCK(stcb);
252 return;
253 }
254 /* First job is to verify the vtag matches what I would send */
255 if (ntohl(sh->v_tag) != (stcb->asoc.peer_vtag)) {
256 SCTP_TCB_UNLOCK(stcb);
257 return;
258 }
259 icmph = (struct icmp *)((caddr_t)ip - (sizeof(struct icmp) -
260 sizeof(struct ip)));
261 if (icmph->icmp_type != ICMP_UNREACH) {
262 /* We only care about unreachable */
263 SCTP_TCB_UNLOCK(stcb);
264 return;
265 }
266 if ((icmph->icmp_code == ICMP_UNREACH_NET) ||
267 (icmph->icmp_code == ICMP_UNREACH_HOST) ||
268 (icmph->icmp_code == ICMP_UNREACH_NET_UNKNOWN) ||
269 (icmph->icmp_code == ICMP_UNREACH_HOST_UNKNOWN) ||
270 (icmph->icmp_code == ICMP_UNREACH_ISOLATED) ||
271 (icmph->icmp_code == ICMP_UNREACH_NET_PROHIB) ||
272 (icmph->icmp_code == ICMP_UNREACH_HOST_PROHIB) ||
273 (icmph->icmp_code == ICMP_UNREACH_FILTER_PROHIB)) {
274
275 /*
276 * Hmm reachablity problems we must examine closely. If its
277 * not reachable, we may have lost a network. Or if there is
278 * NO protocol at the other end named SCTP. well we consider
279 * it a OOTB abort.
280 */
281 if (net->dest_state & SCTP_ADDR_REACHABLE) {
282 /* Ok that destination is NOT reachable */
283 SCTP_PRINTF("ICMP (thresh %d/%d) takes interface %p down\n",
284 net->error_count,
285 net->failure_threshold,
286 net);
287
288 net->dest_state &= ~SCTP_ADDR_REACHABLE;
289 net->dest_state |= SCTP_ADDR_NOT_REACHABLE;
290 /*
291 * JRS 5/14/07 - If a destination is unreachable,
292 * the PF bit is turned off. This allows an
293 * unambiguous use of the PF bit for destinations
294 * that are reachable but potentially failed. If the
295 * destination is set to the unreachable state, also
296 * set the destination to the PF state.
297 */
298 /*
299 * Add debug message here if destination is not in
300 * PF state.
301 */
302 /* Stop any running T3 timers here? */
303 if (SCTP_BASE_SYSCTL(sctp_cmt_on_off) && SCTP_BASE_SYSCTL(sctp_cmt_pf)) {
304 net->dest_state &= ~SCTP_ADDR_PF;
305 SCTPDBG(SCTP_DEBUG_TIMER4, "Destination %p moved from PF to unreachable.\n",
306 net);
307 }
308 net->error_count = net->failure_threshold + 1;
309 sctp_ulp_notify(SCTP_NOTIFY_INTERFACE_DOWN,
310 stcb, SCTP_FAILED_THRESHOLD,
311 (void *)net, SCTP_SO_NOT_LOCKED);
312 }
313 SCTP_TCB_UNLOCK(stcb);
314 } else if ((icmph->icmp_code == ICMP_UNREACH_PROTOCOL) ||
315 (icmph->icmp_code == ICMP_UNREACH_PORT)) {
316 /*
317 * Here the peer is either playing tricks on us, including
318 * an address that belongs to someone who does not support
319 * SCTP OR was a userland implementation that shutdown and
320 * now is dead. In either case treat it like a OOTB abort
321 * with no TCB
322 */
323 reason = SCTP_PEER_FAULTY;
324 sctp_abort_notification(stcb, reason, SCTP_SO_NOT_LOCKED);
325#if defined (__APPLE__) || defined(SCTP_SO_LOCK_TESTING)
326 so = SCTP_INP_SO(inp);
327 atomic_add_int(&stcb->asoc.refcnt, 1);
328 SCTP_TCB_UNLOCK(stcb);
329 SCTP_SOCKET_LOCK(so, 1);
330 SCTP_TCB_LOCK(stcb);
331 atomic_subtract_int(&stcb->asoc.refcnt, 1);
332#endif
333 (void)sctp_free_assoc(inp, stcb, SCTP_NORMAL_PROC, SCTP_FROM_SCTP_USRREQ + SCTP_LOC_2);
334#if defined (__APPLE__) || defined(SCTP_SO_LOCK_TESTING)
335 SCTP_SOCKET_UNLOCK(so, 1);
336 /* SCTP_TCB_UNLOCK(stcb); MT: I think this is not needed. */
337#endif
338 /* no need to unlock here, since the TCB is gone */
339 } else {
340 SCTP_TCB_UNLOCK(stcb);
341 }
342}
343
344void
345sctp_ctlinput(cmd, sa, vip)
346 int cmd;
347 struct sockaddr *sa;
348 void *vip;
349{
350 struct ip *ip = vip;
351 struct sctphdr *sh;
352 uint32_t vrf_id;
353
354 /* FIX, for non-bsd is this right? */
355 vrf_id = SCTP_DEFAULT_VRFID;
356 if (sa->sa_family != AF_INET ||
357 ((struct sockaddr_in *)sa)->sin_addr.s_addr == INADDR_ANY) {
358 return;
359 }
360 if (PRC_IS_REDIRECT(cmd)) {
361 ip = 0;
362 } else if ((unsigned)cmd >= PRC_NCMDS || inetctlerrmap[cmd] == 0) {
363 return;
364 }
365 if (ip) {
366 struct sctp_inpcb *inp = NULL;
367 struct sctp_tcb *stcb = NULL;
368 struct sctp_nets *net = NULL;
369 struct sockaddr_in to, from;
370
371 sh = (struct sctphdr *)((caddr_t)ip + (ip->ip_hl << 2));
372 bzero(&to, sizeof(to));
373 bzero(&from, sizeof(from));
374 from.sin_family = to.sin_family = AF_INET;
375 from.sin_len = to.sin_len = sizeof(to);
376 from.sin_port = sh->src_port;
377 from.sin_addr = ip->ip_src;
378 to.sin_port = sh->dest_port;
379 to.sin_addr = ip->ip_dst;
380
381 /*
382 * 'to' holds the dest of the packet that failed to be sent.
383 * 'from' holds our local endpoint address. Thus we reverse
384 * the to and the from in the lookup.
385 */
386 stcb = sctp_findassociation_addr_sa((struct sockaddr *)&from,
387 (struct sockaddr *)&to,
388 &inp, &net, 1, vrf_id);
389 if (stcb != NULL && inp && (inp->sctp_socket != NULL)) {
390 if (cmd != PRC_MSGSIZE) {
391 sctp_notify(inp, ip, sh,
392 (struct sockaddr *)&to, stcb,
393 net);
394 } else {
395 /* handle possible ICMP size messages */
396 sctp_notify_mbuf(inp, stcb, net, ip, sh);
397 }
398 } else {
399 if ((stcb == NULL) && (inp != NULL)) {
400 /* reduce ref-count */
401 SCTP_INP_WLOCK(inp);
402 SCTP_INP_DECR_REF(inp);
403 SCTP_INP_WUNLOCK(inp);
404 }
405 }
406 }
407 return;
408}
409
410static int
411sctp_getcred(SYSCTL_HANDLER_ARGS)
412{
413 struct xucred xuc;
414 struct sockaddr_in addrs[2];
415 struct sctp_inpcb *inp;
416 struct sctp_nets *net;
417 struct sctp_tcb *stcb;
418 int error;
419 uint32_t vrf_id;
420
421 /* FIX, for non-bsd is this right? */
422 vrf_id = SCTP_DEFAULT_VRFID;
423
424 error = priv_check(req->td, PRIV_NETINET_GETCRED);
425
426 if (error)
427 return (error);
428
429 error = SYSCTL_IN(req, addrs, sizeof(addrs));
430 if (error)
431 return (error);
432
433 stcb = sctp_findassociation_addr_sa(sintosa(&addrs[0]),
434 sintosa(&addrs[1]),
435 &inp, &net, 1, vrf_id);
436 if (stcb == NULL || inp == NULL || inp->sctp_socket == NULL) {
437 if ((inp != NULL) && (stcb == NULL)) {
438 /* reduce ref-count */
439 SCTP_INP_WLOCK(inp);
440 SCTP_INP_DECR_REF(inp);
441 goto cred_can_cont;
442 }
443 SCTP_LTRACE_ERR_RET(inp, NULL, NULL, SCTP_FROM_SCTP_USRREQ, ENOENT);
444 error = ENOENT;
445 goto out;
446 }
447 SCTP_TCB_UNLOCK(stcb);
448 /*
449 * We use the write lock here, only since in the error leg we need
450 * it. If we used RLOCK, then we would have to
451 * wlock/decr/unlock/rlock. Which in theory could create a hole.
452 * Better to use higher wlock.
453 */
454 SCTP_INP_WLOCK(inp);
455cred_can_cont:
456 error = cr_canseesocket(req->td->td_ucred, inp->sctp_socket);
457 if (error) {
458 SCTP_INP_WUNLOCK(inp);
459 goto out;
460 }
461 cru2x(inp->sctp_socket->so_cred, &xuc);
462 SCTP_INP_WUNLOCK(inp);
463 error = SYSCTL_OUT(req, &xuc, sizeof(struct xucred));
464out:
465 return (error);
466}
467
468SYSCTL_PROC(_net_inet_sctp, OID_AUTO, getcred, CTLTYPE_OPAQUE | CTLFLAG_RW,
469 0, 0, sctp_getcred, "S,ucred", "Get the ucred of a SCTP connection");
470
471
472static void
473sctp_abort(struct socket *so)
474{
475 struct sctp_inpcb *inp;
476 uint32_t flags;
477
478 inp = (struct sctp_inpcb *)so->so_pcb;
479 if (inp == 0) {
480 return;
481 }
482sctp_must_try_again:
483 flags = inp->sctp_flags;
484#ifdef SCTP_LOG_CLOSING
485 sctp_log_closing(inp, NULL, 17);
486#endif
487 if (((flags & SCTP_PCB_FLAGS_SOCKET_GONE) == 0) &&
488 (atomic_cmpset_int(&inp->sctp_flags, flags, (flags | SCTP_PCB_FLAGS_SOCKET_GONE | SCTP_PCB_FLAGS_CLOSE_IP)))) {
489#ifdef SCTP_LOG_CLOSING
490 sctp_log_closing(inp, NULL, 16);
491#endif
492 sctp_inpcb_free(inp, SCTP_FREE_SHOULD_USE_ABORT,
493 SCTP_CALLED_AFTER_CMPSET_OFCLOSE);
494 SOCK_LOCK(so);
495 SCTP_SB_CLEAR(so->so_snd);
496 /*
497 * same for the rcv ones, they are only here for the
498 * accounting/select.
499 */
500 SCTP_SB_CLEAR(so->so_rcv);
501
502 /* Now null out the reference, we are completely detached. */
503 so->so_pcb = NULL;
504 SOCK_UNLOCK(so);
505 } else {
506 flags = inp->sctp_flags;
507 if ((flags & SCTP_PCB_FLAGS_SOCKET_GONE) == 0) {
508 goto sctp_must_try_again;
509 }
510 }
511 return;
512}
513
514static int
515sctp_attach(struct socket *so, int proto, struct thread *p)
516{
517 struct sctp_inpcb *inp;
518 struct inpcb *ip_inp;
519 int error;
520 uint32_t vrf_id = SCTP_DEFAULT_VRFID;
521
522#ifdef IPSEC
523 uint32_t flags;
524
525#endif
526
527 inp = (struct sctp_inpcb *)so->so_pcb;
528 if (inp != 0) {
529 SCTP_LTRACE_ERR_RET(inp, NULL, NULL, SCTP_FROM_SCTP_USRREQ, EINVAL);
530 return EINVAL;
531 }
532 if (so->so_snd.sb_hiwat == 0 || so->so_rcv.sb_hiwat == 0) {
533 error = SCTP_SORESERVE(so, SCTP_BASE_SYSCTL(sctp_sendspace), SCTP_BASE_SYSCTL(sctp_recvspace));
534 if (error) {
535 return error;
536 }
537 }
538 error = sctp_inpcb_alloc(so, vrf_id);
539 if (error) {
540 return error;
541 }
542 inp = (struct sctp_inpcb *)so->so_pcb;
543 SCTP_INP_WLOCK(inp);
544 inp->sctp_flags &= ~SCTP_PCB_FLAGS_BOUND_V6; /* I'm not v6! */
545 ip_inp = &inp->ip_inp.inp;
546 ip_inp->inp_vflag |= INP_IPV4;
547 ip_inp->inp_ip_ttl = MODULE_GLOBAL(ip_defttl);
548#ifdef IPSEC
549 error = ipsec_init_policy(so, &ip_inp->inp_sp);
550#ifdef SCTP_LOG_CLOSING
551 sctp_log_closing(inp, NULL, 17);
552#endif
553 if (error != 0) {
554 flags = inp->sctp_flags;
555 if (((flags & SCTP_PCB_FLAGS_SOCKET_GONE) == 0) &&
556 (atomic_cmpset_int(&inp->sctp_flags, flags, (flags | SCTP_PCB_FLAGS_SOCKET_GONE | SCTP_PCB_FLAGS_CLOSE_IP)))) {
557#ifdef SCTP_LOG_CLOSING
558 sctp_log_closing(inp, NULL, 15);
559#endif
560 SCTP_INP_WUNLOCK(inp);
561 sctp_inpcb_free(inp, SCTP_FREE_SHOULD_USE_ABORT,
562 SCTP_CALLED_AFTER_CMPSET_OFCLOSE);
563 } else {
564 SCTP_INP_WUNLOCK(inp);
565 }
566 return error;
567 }
568#endif /* IPSEC */
569 SCTP_INP_WUNLOCK(inp);
570 return 0;
571}
572
573static int
574sctp_bind(struct socket *so, struct sockaddr *addr, struct thread *p)
575{
576 struct sctp_inpcb *inp = NULL;
577 int error;
578
579#ifdef INET6
580 if (addr && addr->sa_family != AF_INET) {
581 /* must be a v4 address! */
582 SCTP_LTRACE_ERR_RET(inp, NULL, NULL, SCTP_FROM_SCTP_USRREQ, EINVAL);
583 return EINVAL;
584 }
585#endif /* INET6 */
586 if (addr && (addr->sa_len != sizeof(struct sockaddr_in))) {
587 SCTP_LTRACE_ERR_RET(inp, NULL, NULL, SCTP_FROM_SCTP_USRREQ, EINVAL);
588 return EINVAL;
589 }
590 inp = (struct sctp_inpcb *)so->so_pcb;
591 if (inp == 0) {
592 SCTP_LTRACE_ERR_RET(inp, NULL, NULL, SCTP_FROM_SCTP_USRREQ, EINVAL);
593 return EINVAL;
594 }
595 error = sctp_inpcb_bind(so, addr, NULL, p);
596 return error;
597}
598
599void
600sctp_close(struct socket *so)
601{
602 struct sctp_inpcb *inp;
603 uint32_t flags;
604
605 inp = (struct sctp_inpcb *)so->so_pcb;
606 if (inp == 0)
607 return;
608
609 /*
610 * Inform all the lower layer assoc that we are done.
611 */
612sctp_must_try_again:
613 flags = inp->sctp_flags;
614#ifdef SCTP_LOG_CLOSING
615 sctp_log_closing(inp, NULL, 17);
616#endif
617 if (((flags & SCTP_PCB_FLAGS_SOCKET_GONE) == 0) &&
618 (atomic_cmpset_int(&inp->sctp_flags, flags, (flags | SCTP_PCB_FLAGS_SOCKET_GONE | SCTP_PCB_FLAGS_CLOSE_IP)))) {
619 if (((so->so_options & SO_LINGER) && (so->so_linger == 0)) ||
620 (so->so_rcv.sb_cc > 0)) {
621#ifdef SCTP_LOG_CLOSING
622 sctp_log_closing(inp, NULL, 13);
623#endif
624 sctp_inpcb_free(inp, SCTP_FREE_SHOULD_USE_ABORT,
625 SCTP_CALLED_AFTER_CMPSET_OFCLOSE);
626 } else {
627#ifdef SCTP_LOG_CLOSING
628 sctp_log_closing(inp, NULL, 14);
629#endif
630 sctp_inpcb_free(inp, SCTP_FREE_SHOULD_USE_GRACEFUL_CLOSE,
631 SCTP_CALLED_AFTER_CMPSET_OFCLOSE);
632 }
633 /*
634 * The socket is now detached, no matter what the state of
635 * the SCTP association.
636 */
637 SOCK_LOCK(so);
638 SCTP_SB_CLEAR(so->so_snd);
639 /*
640 * same for the rcv ones, they are only here for the
641 * accounting/select.
642 */
643 SCTP_SB_CLEAR(so->so_rcv);
644
645 /* Now null out the reference, we are completely detached. */
646 so->so_pcb = NULL;
647 SOCK_UNLOCK(so);
648 } else {
649 flags = inp->sctp_flags;
650 if ((flags & SCTP_PCB_FLAGS_SOCKET_GONE) == 0) {
651 goto sctp_must_try_again;
652 }
653 }
654 return;
655}
656
657
658int
659sctp_sendm(struct socket *so, int flags, struct mbuf *m, struct sockaddr *addr,
660 struct mbuf *control, struct thread *p);
661
662
663int
664sctp_sendm(struct socket *so, int flags, struct mbuf *m, struct sockaddr *addr,
665 struct mbuf *control, struct thread *p)
666{
667 struct sctp_inpcb *inp;
668 int error;
669
670 inp = (struct sctp_inpcb *)so->so_pcb;
671 if (inp == 0) {
672 if (control) {
673 sctp_m_freem(control);
674 control = NULL;
675 }
676 SCTP_LTRACE_ERR_RET_PKT(m, inp, NULL, NULL, SCTP_FROM_SCTP_USRREQ, EINVAL);
677 sctp_m_freem(m);
678 return EINVAL;
679 }
680 /* Got to have an to address if we are NOT a connected socket */
681 if ((addr == NULL) &&
682 ((inp->sctp_flags & SCTP_PCB_FLAGS_CONNECTED) ||
683 (inp->sctp_flags & SCTP_PCB_FLAGS_TCPTYPE))
684 ) {
685 goto connected_type;
686 } else if (addr == NULL) {
687 SCTP_LTRACE_ERR_RET_PKT(m, inp, NULL, NULL, SCTP_FROM_SCTP_USRREQ, EDESTADDRREQ);
688 error = EDESTADDRREQ;
689 sctp_m_freem(m);
690 if (control) {
691 sctp_m_freem(control);
692 control = NULL;
693 }
694 return (error);
695 }
696#ifdef INET6
697 if (addr->sa_family != AF_INET) {
698 /* must be a v4 address! */
699 SCTP_LTRACE_ERR_RET_PKT(m, inp, NULL, NULL, SCTP_FROM_SCTP_USRREQ, EDESTADDRREQ);
700 sctp_m_freem(m);
701 if (control) {
702 sctp_m_freem(control);
703 control = NULL;
704 }
705 error = EDESTADDRREQ;
706 return EDESTADDRREQ;
707 }
708#endif /* INET6 */
709connected_type:
710 /* now what about control */
711 if (control) {
712 if (inp->control) {
713 SCTP_PRINTF("huh? control set?\n");
714 sctp_m_freem(inp->control);
715 inp->control = NULL;
716 }
717 inp->control = control;
718 }
719 /* Place the data */
720 if (inp->pkt) {
721 SCTP_BUF_NEXT(inp->pkt_last) = m;
722 inp->pkt_last = m;
723 } else {
724 inp->pkt_last = inp->pkt = m;
725 }
726 if (
727 /* FreeBSD uses a flag passed */
728 ((flags & PRUS_MORETOCOME) == 0)
729 ) {
730 /*
731 * note with the current version this code will only be used
732 * by OpenBSD-- NetBSD, FreeBSD, and MacOS have methods for
733 * re-defining sosend to use the sctp_sosend. One can
734 * optionally switch back to this code (by changing back the
735 * definitions) but this is not advisable. This code is used
736 * by FreeBSD when sending a file with sendfile() though.
737 */
738 int ret;
739
740 ret = sctp_output(inp, inp->pkt, addr, inp->control, p, flags);
741 inp->pkt = NULL;
742 inp->control = NULL;
743 return (ret);
744 } else {
745 return (0);
746 }
747}
748
749int
750sctp_disconnect(struct socket *so)
751{
752 struct sctp_inpcb *inp;
753
754 inp = (struct sctp_inpcb *)so->so_pcb;
755 if (inp == NULL) {
756 SCTP_LTRACE_ERR_RET(inp, NULL, NULL, SCTP_FROM_SCTP_USRREQ, ENOTCONN);
757 return (ENOTCONN);
758 }
759 SCTP_INP_RLOCK(inp);
760 if ((inp->sctp_flags & SCTP_PCB_FLAGS_TCPTYPE) ||
761 (inp->sctp_flags & SCTP_PCB_FLAGS_IN_TCPPOOL)) {
762 if (LIST_EMPTY(&inp->sctp_asoc_list)) {
763 /* No connection */
764 SCTP_INP_RUNLOCK(inp);
765 return (0);
766 } else {
767 struct sctp_association *asoc;
768 struct sctp_tcb *stcb;
769
770 stcb = LIST_FIRST(&inp->sctp_asoc_list);
771 if (stcb == NULL) {
772 SCTP_INP_RUNLOCK(inp);
773 SCTP_LTRACE_ERR_RET(inp, NULL, NULL, SCTP_FROM_SCTP_USRREQ, EINVAL);
774 return (EINVAL);
775 }
776 SCTP_TCB_LOCK(stcb);
777 asoc = &stcb->asoc;
778 if (stcb->asoc.state & SCTP_STATE_ABOUT_TO_BE_FREED) {
779 /* We are about to be freed, out of here */
780 SCTP_TCB_UNLOCK(stcb);
781 SCTP_INP_RUNLOCK(inp);
782 return (0);
783 }
784 if (((so->so_options & SO_LINGER) &&
785 (so->so_linger == 0)) ||
786 (so->so_rcv.sb_cc > 0)) {
787 if (SCTP_GET_STATE(asoc) !=
788 SCTP_STATE_COOKIE_WAIT) {
789 /* Left with Data unread */
790 struct mbuf *err;
791
792 err = sctp_get_mbuf_for_msg(sizeof(struct sctp_paramhdr), 0, M_DONTWAIT, 1, MT_DATA);
793 if (err) {
794 /*
795 * Fill in the user
796 * initiated abort
797 */
798 struct sctp_paramhdr *ph;
799
800 ph = mtod(err, struct sctp_paramhdr *);
801 SCTP_BUF_LEN(err) = sizeof(struct sctp_paramhdr);
802 ph->param_type = htons(SCTP_CAUSE_USER_INITIATED_ABT);
803 ph->param_length = htons(SCTP_BUF_LEN(err));
804 }
805#if defined(SCTP_PANIC_ON_ABORT)
806 panic("disconnect does an abort");
807#endif
808 sctp_send_abort_tcb(stcb, err, SCTP_SO_LOCKED);
809 SCTP_STAT_INCR_COUNTER32(sctps_aborted);
810 }
811 SCTP_INP_RUNLOCK(inp);
812 if ((SCTP_GET_STATE(&stcb->asoc) == SCTP_STATE_OPEN) ||
813 (SCTP_GET_STATE(&stcb->asoc) == SCTP_STATE_SHUTDOWN_RECEIVED)) {
814 SCTP_STAT_DECR_GAUGE32(sctps_currestab);
815 }
816 (void)sctp_free_assoc(inp, stcb, SCTP_NORMAL_PROC, SCTP_FROM_SCTP_USRREQ + SCTP_LOC_3);
817 /* No unlock tcb assoc is gone */
818 return (0);
819 }
820 if (TAILQ_EMPTY(&asoc->send_queue) &&
821 TAILQ_EMPTY(&asoc->sent_queue) &&
822 (asoc->stream_queue_cnt == 0)) {
823 /* there is nothing queued to send, so done */
824 if (asoc->locked_on_sending) {
825 goto abort_anyway;
826 }
827 if ((SCTP_GET_STATE(asoc) != SCTP_STATE_SHUTDOWN_SENT) &&
828 (SCTP_GET_STATE(asoc) != SCTP_STATE_SHUTDOWN_ACK_SENT)) {
829 /* only send SHUTDOWN 1st time thru */
830 sctp_stop_timers_for_shutdown(stcb);
831 sctp_send_shutdown(stcb,
832 stcb->asoc.primary_destination);
833 sctp_chunk_output(stcb->sctp_ep, stcb, SCTP_OUTPUT_FROM_T3, SCTP_SO_LOCKED);
834 if ((SCTP_GET_STATE(asoc) == SCTP_STATE_OPEN) ||
835 (SCTP_GET_STATE(asoc) == SCTP_STATE_SHUTDOWN_RECEIVED)) {
836 SCTP_STAT_DECR_GAUGE32(sctps_currestab);
837 }
838 SCTP_SET_STATE(asoc, SCTP_STATE_SHUTDOWN_SENT);
839 SCTP_CLEAR_SUBSTATE(asoc, SCTP_STATE_SHUTDOWN_PENDING);
840 sctp_timer_start(SCTP_TIMER_TYPE_SHUTDOWN,
841 stcb->sctp_ep, stcb,
842 asoc->primary_destination);
843 sctp_timer_start(SCTP_TIMER_TYPE_SHUTDOWNGUARD,
844 stcb->sctp_ep, stcb,
845 asoc->primary_destination);
846 }
847 } else {
848 /*
849 * we still got (or just got) data to send,
850 * so set SHUTDOWN_PENDING
851 */
852 /*
853 * XXX sockets draft says that SCTP_EOF
854 * should be sent with no data. currently,
855 * we will allow user data to be sent first
856 * and move to SHUTDOWN-PENDING
857 */
858 asoc->state |= SCTP_STATE_SHUTDOWN_PENDING;
859 sctp_timer_start(SCTP_TIMER_TYPE_SHUTDOWNGUARD, stcb->sctp_ep, stcb,
860 asoc->primary_destination);
861 if (asoc->locked_on_sending) {
862 /* Locked to send out the data */
863 struct sctp_stream_queue_pending *sp;
864
865 sp = TAILQ_LAST(&asoc->locked_on_sending->outqueue, sctp_streamhead);
866 if (sp == NULL) {
867 SCTP_PRINTF("Error, sp is NULL, locked on sending is non-null strm:%d\n",
868 asoc->locked_on_sending->stream_no);
869 } else {
870 if ((sp->length == 0) && (sp->msg_is_complete == 0))
871 asoc->state |= SCTP_STATE_PARTIAL_MSG_LEFT;
872 }
873 }
874 if (TAILQ_EMPTY(&asoc->send_queue) &&
875 TAILQ_EMPTY(&asoc->sent_queue) &&
876 (asoc->state & SCTP_STATE_PARTIAL_MSG_LEFT)) {
877 struct mbuf *op_err;
878
879 abort_anyway:
880 op_err = sctp_get_mbuf_for_msg((sizeof(struct sctp_paramhdr) + sizeof(uint32_t)),
881 0, M_DONTWAIT, 1, MT_DATA);
882 if (op_err) {
883 /*
884 * Fill in the user
885 * initiated abort
886 */
887 struct sctp_paramhdr *ph;
888 uint32_t *ippp;
889
890 SCTP_BUF_LEN(op_err) =
891 (sizeof(struct sctp_paramhdr) + sizeof(uint32_t));
892 ph = mtod(op_err,
893 struct sctp_paramhdr *);
894 ph->param_type = htons(
895 SCTP_CAUSE_USER_INITIATED_ABT);
896 ph->param_length = htons(SCTP_BUF_LEN(op_err));
897 ippp = (uint32_t *) (ph + 1);
898 *ippp = htonl(SCTP_FROM_SCTP_USRREQ + SCTP_LOC_4);
899 }
900#if defined(SCTP_PANIC_ON_ABORT)
901 panic("disconnect does an abort");
902#endif
903
904 stcb->sctp_ep->last_abort_code = SCTP_FROM_SCTP_USRREQ + SCTP_LOC_4;
905 sctp_send_abort_tcb(stcb, op_err, SCTP_SO_LOCKED);
906 SCTP_STAT_INCR_COUNTER32(sctps_aborted);
907 if ((SCTP_GET_STATE(&stcb->asoc) == SCTP_STATE_OPEN) ||
908 (SCTP_GET_STATE(&stcb->asoc) == SCTP_STATE_SHUTDOWN_RECEIVED)) {
909 SCTP_STAT_DECR_GAUGE32(sctps_currestab);
910 }
911 SCTP_INP_RUNLOCK(inp);
912 (void)sctp_free_assoc(inp, stcb, SCTP_NORMAL_PROC, SCTP_FROM_SCTP_USRREQ + SCTP_LOC_5);
913 return (0);
914 } else {
915 sctp_chunk_output(inp, stcb, SCTP_OUTPUT_FROM_CLOSING, SCTP_SO_LOCKED);
916 }
917 }
918 soisdisconnecting(so);
919 SCTP_TCB_UNLOCK(stcb);
920 SCTP_INP_RUNLOCK(inp);
921 return (0);
922 }
923 /* not reached */
924 } else {
925 /* UDP model does not support this */
926 SCTP_INP_RUNLOCK(inp);
927 SCTP_LTRACE_ERR_RET(inp, NULL, NULL, SCTP_FROM_SCTP_USRREQ, EOPNOTSUPP);
928 return EOPNOTSUPP;
929 }
930}
931
932int
933sctp_flush(struct socket *so, int how)
934{
935 /*
936 * We will just clear out the values and let subsequent close clear
937 * out the data, if any. Note if the user did a shutdown(SHUT_RD)
938 * they will not be able to read the data, the socket will block
939 * that from happening.
940 */
941 if ((how == PRU_FLUSH_RD) || (how == PRU_FLUSH_RDWR)) {
942 /*
943 * First make sure the sb will be happy, we don't use these
944 * except maybe the count
945 */
946 so->so_rcv.sb_cc = 0;
947 so->so_rcv.sb_mbcnt = 0;
948 so->so_rcv.sb_mb = NULL;
949 }
950 if ((how == PRU_FLUSH_WR) || (how == PRU_FLUSH_RDWR)) {
951 /*
952 * First make sure the sb will be happy, we don't use these
953 * except maybe the count
954 */
955 so->so_snd.sb_cc = 0;
956 so->so_snd.sb_mbcnt = 0;
957 so->so_snd.sb_mb = NULL;
958
959 }
960 return (0);
961}
962
963int
964sctp_shutdown(struct socket *so)
965{
966 struct sctp_inpcb *inp;
967
968 inp = (struct sctp_inpcb *)so->so_pcb;
969 if (inp == 0) {
970 SCTP_LTRACE_ERR_RET(inp, NULL, NULL, SCTP_FROM_SCTP_USRREQ, EINVAL);
971 return EINVAL;
972 }
973 SCTP_INP_RLOCK(inp);
974 /* For UDP model this is a invalid call */
975 if (inp->sctp_flags & SCTP_PCB_FLAGS_UDPTYPE) {
976 /* Restore the flags that the soshutdown took away. */
977 so->so_rcv.sb_state &= ~SBS_CANTRCVMORE;
978 /* This proc will wakeup for read and do nothing (I hope) */
979 SCTP_INP_RUNLOCK(inp);
980 SCTP_LTRACE_ERR_RET(inp, NULL, NULL, SCTP_FROM_SCTP_USRREQ, EOPNOTSUPP);
981 return (EOPNOTSUPP);
982 }
983 /*
984 * Ok if we reach here its the TCP model and it is either a SHUT_WR
985 * or SHUT_RDWR. This means we put the shutdown flag against it.
986 */
987 {
988 struct sctp_tcb *stcb;
989 struct sctp_association *asoc;
990
991 if ((so->so_state &
992 (SS_ISCONNECTED | SS_ISCONNECTING | SS_ISDISCONNECTING)) == 0) {
993 SCTP_INP_RUNLOCK(inp);
994 return (ENOTCONN);
995 }
996 socantsendmore(so);
997
998 stcb = LIST_FIRST(&inp->sctp_asoc_list);
999 if (stcb == NULL) {
1000 /*
1001 * Ok we hit the case that the shutdown call was
1002 * made after an abort or something. Nothing to do
1003 * now.
1004 */
1005 SCTP_INP_RUNLOCK(inp);
1006 return (0);
1007 }
1008 SCTP_TCB_LOCK(stcb);
1009 asoc = &stcb->asoc;
1010 if (TAILQ_EMPTY(&asoc->send_queue) &&
1011 TAILQ_EMPTY(&asoc->sent_queue) &&
1012 (asoc->stream_queue_cnt == 0)) {
1013 if (asoc->locked_on_sending) {
1014 goto abort_anyway;
1015 }
1016 /* there is nothing queued to send, so I'm done... */
1017 if (SCTP_GET_STATE(asoc) != SCTP_STATE_SHUTDOWN_SENT) {
1018 /* only send SHUTDOWN the first time through */
1019 sctp_stop_timers_for_shutdown(stcb);
1020 sctp_send_shutdown(stcb,
1021 stcb->asoc.primary_destination);
1022 sctp_chunk_output(stcb->sctp_ep, stcb, SCTP_OUTPUT_FROM_T3, SCTP_SO_LOCKED);
1023 if ((SCTP_GET_STATE(asoc) == SCTP_STATE_OPEN) ||
1024 (SCTP_GET_STATE(asoc) == SCTP_STATE_SHUTDOWN_RECEIVED)) {
1025 SCTP_STAT_DECR_GAUGE32(sctps_currestab);
1026 }
1027 SCTP_SET_STATE(asoc, SCTP_STATE_SHUTDOWN_SENT);
1028 SCTP_CLEAR_SUBSTATE(asoc, SCTP_STATE_SHUTDOWN_PENDING);
1029 sctp_timer_start(SCTP_TIMER_TYPE_SHUTDOWN,
1030 stcb->sctp_ep, stcb,
1031 asoc->primary_destination);
1032 sctp_timer_start(SCTP_TIMER_TYPE_SHUTDOWNGUARD,
1033 stcb->sctp_ep, stcb,
1034 asoc->primary_destination);
1035 }
1036 } else {
1037 /*
1038 * we still got (or just got) data to send, so set
1039 * SHUTDOWN_PENDING
1040 */
1041 asoc->state |= SCTP_STATE_SHUTDOWN_PENDING;
1042 sctp_timer_start(SCTP_TIMER_TYPE_SHUTDOWNGUARD, stcb->sctp_ep, stcb,
1043 asoc->primary_destination);
1044
1045 if (asoc->locked_on_sending) {
1046 /* Locked to send out the data */
1047 struct sctp_stream_queue_pending *sp;
1048
1049 sp = TAILQ_LAST(&asoc->locked_on_sending->outqueue, sctp_streamhead);
1050 if (sp == NULL) {
1051 SCTP_PRINTF("Error, sp is NULL, locked on sending is non-null strm:%d\n",
1052 asoc->locked_on_sending->stream_no);
1053 } else {
1054 if ((sp->length == 0) && (sp->msg_is_complete == 0)) {
1055 asoc->state |= SCTP_STATE_PARTIAL_MSG_LEFT;
1056 }
1057 }
1058 }
1059 if (TAILQ_EMPTY(&asoc->send_queue) &&
1060 TAILQ_EMPTY(&asoc->sent_queue) &&
1061 (asoc->state & SCTP_STATE_PARTIAL_MSG_LEFT)) {
1062 struct mbuf *op_err;
1063
1064 abort_anyway:
1065 op_err = sctp_get_mbuf_for_msg((sizeof(struct sctp_paramhdr) + sizeof(uint32_t)),
1066 0, M_DONTWAIT, 1, MT_DATA);
1067 if (op_err) {
1068 /* Fill in the user initiated abort */
1069 struct sctp_paramhdr *ph;
1070 uint32_t *ippp;
1071
1072 SCTP_BUF_LEN(op_err) =
1073 sizeof(struct sctp_paramhdr) + sizeof(uint32_t);
1074 ph = mtod(op_err,
1075 struct sctp_paramhdr *);
1076 ph->param_type = htons(
1077 SCTP_CAUSE_USER_INITIATED_ABT);
1078 ph->param_length = htons(SCTP_BUF_LEN(op_err));
1079 ippp = (uint32_t *) (ph + 1);
1080 *ippp = htonl(SCTP_FROM_SCTP_USRREQ + SCTP_LOC_6);
1081 }
1082#if defined(SCTP_PANIC_ON_ABORT)
1083 panic("shutdown does an abort");
1084#endif
1085 stcb->sctp_ep->last_abort_code = SCTP_FROM_SCTP_USRREQ + SCTP_LOC_6;
1086 sctp_abort_an_association(stcb->sctp_ep, stcb,
1087 SCTP_RESPONSE_TO_USER_REQ,
1088 op_err, SCTP_SO_LOCKED);
1089 goto skip_unlock;
1090 } else {
1091 sctp_chunk_output(inp, stcb, SCTP_OUTPUT_FROM_CLOSING, SCTP_SO_LOCKED);
1092 }
1093 }
1094 SCTP_TCB_UNLOCK(stcb);
1095 }
1096skip_unlock:
1097 SCTP_INP_RUNLOCK(inp);
1098 return 0;
1099}
1100
1101/*
1102 * copies a "user" presentable address and removes embedded scope, etc.
1103 * returns 0 on success, 1 on error
1104 */
1105static uint32_t
1106sctp_fill_user_address(struct sockaddr_storage *ss, struct sockaddr *sa)
1107{
1108#ifdef INET6
1109 struct sockaddr_in6 lsa6;
1110
1111 sa = (struct sockaddr *)sctp_recover_scope((struct sockaddr_in6 *)sa,
1112 &lsa6);
1113#endif
1114 memcpy(ss, sa, sa->sa_len);
1115 return (0);
1116}
1117
1118
1119
1120/*
1121 * NOTE: assumes addr lock is held
1122 */
1123static size_t
1124sctp_fill_up_addresses_vrf(struct sctp_inpcb *inp,
1125 struct sctp_tcb *stcb,
1126 size_t limit,
1127 struct sockaddr_storage *sas,
1128 uint32_t vrf_id)
1129{
1130 struct sctp_ifn *sctp_ifn;
1131 struct sctp_ifa *sctp_ifa;
1132 int loopback_scope, ipv4_local_scope, local_scope, site_scope;
1133 size_t actual;
1134 int ipv4_addr_legal, ipv6_addr_legal;
1135 struct sctp_vrf *vrf;
1136
1137 actual = 0;
1138 if (limit <= 0)
1139 return (actual);
1140
1141 if (stcb) {
1142 /* Turn on all the appropriate scope */
1143 loopback_scope = stcb->asoc.loopback_scope;
1144 ipv4_local_scope = stcb->asoc.ipv4_local_scope;
1145 local_scope = stcb->asoc.local_scope;
1146 site_scope = stcb->asoc.site_scope;
1147 } else {
1148 /* Turn on ALL scope, since we look at the EP */
1149 loopback_scope = ipv4_local_scope = local_scope =
1150 site_scope = 1;
1151 }
1152 ipv4_addr_legal = ipv6_addr_legal = 0;
1153 if (inp->sctp_flags & SCTP_PCB_FLAGS_BOUND_V6) {
1154 ipv6_addr_legal = 1;
1155 if (SCTP_IPV6_V6ONLY(inp) == 0) {
1156 ipv4_addr_legal = 1;
1157 }
1158 } else {
1159 ipv4_addr_legal = 1;
1160 }
1161 vrf = sctp_find_vrf(vrf_id);
1162 if (vrf == NULL) {
1163 return (0);
1164 }
1165 if (inp->sctp_flags & SCTP_PCB_FLAGS_BOUNDALL) {
1166 LIST_FOREACH(sctp_ifn, &vrf->ifnlist, next_ifn) {
1167 if ((loopback_scope == 0) &&
1168 SCTP_IFN_IS_IFT_LOOP(sctp_ifn)) {
1169 /* Skip loopback if loopback_scope not set */
1170 continue;
1171 }
1172 LIST_FOREACH(sctp_ifa, &sctp_ifn->ifalist, next_ifa) {
1173 if (stcb) {
1174 /*
1175 * For the BOUND-ALL case, the list
1176 * associated with a TCB is Always
1177 * considered a reverse list.. i.e.
1178 * it lists addresses that are NOT
1179 * part of the association. If this
1180 * is one of those we must skip it.
1181 */
1182 if (sctp_is_addr_restricted(stcb,
1183 sctp_ifa)) {
1184 continue;
1185 }
1186 }
1187 switch (sctp_ifa->address.sa.sa_family) {
1188 case AF_INET:
1189 if (ipv4_addr_legal) {
1190 struct sockaddr_in *sin;
1191
1192 sin = (struct sockaddr_in *)&sctp_ifa->address.sa;
1193 if (sin->sin_addr.s_addr == 0) {
1194 /*
1195 * we skip
1196 * unspecifed
1197 * addresses
1198 */
1199 continue;
1200 }
1201 if ((ipv4_local_scope == 0) &&
1202 (IN4_ISPRIVATE_ADDRESS(&sin->sin_addr))) {
1203 continue;
1204 }
1205#ifdef INET6
1206 if (sctp_is_feature_on(inp, SCTP_PCB_FLAGS_NEEDS_MAPPED_V4)) {
1207 in6_sin_2_v4mapsin6(sin, (struct sockaddr_in6 *)sas);
1208 ((struct sockaddr_in6 *)sas)->sin6_port = inp->sctp_lport;
1209 sas = (struct sockaddr_storage *)((caddr_t)sas + sizeof(struct sockaddr_in6));
1210 actual += sizeof(struct sockaddr_in6);
1211 } else {
1212#endif
1213 memcpy(sas, sin, sizeof(*sin));
1214 ((struct sockaddr_in *)sas)->sin_port = inp->sctp_lport;
1215 sas = (struct sockaddr_storage *)((caddr_t)sas + sizeof(*sin));
1216 actual += sizeof(*sin);
1217#ifdef INET6
1218 }
1219#endif
1220 if (actual >= limit) {
1221 return (actual);
1222 }
1223 } else {
1224 continue;
1225 }
1226 break;
1227#ifdef INET6
1228 case AF_INET6:
1229 if (ipv6_addr_legal) {
1230 struct sockaddr_in6 *sin6;
1231
1232 sin6 = (struct sockaddr_in6 *)&sctp_ifa->address.sa;
1233 if (IN6_IS_ADDR_UNSPECIFIED(&sin6->sin6_addr)) {
1234 /*
1235 * we skip
1236 * unspecifed
1237 * addresses
1238 */
1239 continue;
1240 }
1241 if (IN6_IS_ADDR_LINKLOCAL(&sin6->sin6_addr)) {
1242 if (local_scope == 0)
1243 continue;
1244 if (sin6->sin6_scope_id == 0) {
1245 if (sa6_recoverscope(sin6) != 0)
1246 /*
1247 *
1248 * bad
1249 *
1250 * li
1251 * nk
1252 *
1253 * loc
1254 * al
1255 *
1256 * add
1257 * re
1258 * ss
1259 * */
1260 continue;
1261 }
1262 }
1263 if ((site_scope == 0) &&
1264 (IN6_IS_ADDR_SITELOCAL(&sin6->sin6_addr))) {
1265 continue;
1266 }
1267 memcpy(sas, sin6, sizeof(*sin6));
1268 ((struct sockaddr_in6 *)sas)->sin6_port = inp->sctp_lport;
1269 sas = (struct sockaddr_storage *)((caddr_t)sas + sizeof(*sin6));
1270 actual += sizeof(*sin6);
1271 if (actual >= limit) {
1272 return (actual);
1273 }
1274 } else {
1275 continue;
1276 }
1277 break;
1278#endif
1279 default:
1280 /* TSNH */
1281 break;
1282 }
1283 }
1284 }
1285 } else {
1286 struct sctp_laddr *laddr;
1287
1288 LIST_FOREACH(laddr, &inp->sctp_addr_list, sctp_nxt_addr) {
1289 if (stcb) {
1290 if (sctp_is_addr_restricted(stcb, laddr->ifa)) {
1291 continue;
1292 }
1293 }
1294 if (sctp_fill_user_address(sas, &laddr->ifa->address.sa))
1295 continue;
1296
1297 ((struct sockaddr_in6 *)sas)->sin6_port = inp->sctp_lport;
1298 sas = (struct sockaddr_storage *)((caddr_t)sas +
1299 laddr->ifa->address.sa.sa_len);
1300 actual += laddr->ifa->address.sa.sa_len;
1301 if (actual >= limit) {
1302 return (actual);
1303 }
1304 }
1305 }
1306 return (actual);
1307}
1308
1309static size_t
1310sctp_fill_up_addresses(struct sctp_inpcb *inp,
1311 struct sctp_tcb *stcb,
1312 size_t limit,
1313 struct sockaddr_storage *sas)
1314{
1315 size_t size = 0;
1316
1317 SCTP_IPI_ADDR_RLOCK();
1318 /* fill up addresses for the endpoint's default vrf */
1319 size = sctp_fill_up_addresses_vrf(inp, stcb, limit, sas,
1320 inp->def_vrf_id);
1321 SCTP_IPI_ADDR_RUNLOCK();
1322 return (size);
1323}
1324
1325/*
1326 * NOTE: assumes addr lock is held
1327 */
1328static int
1329sctp_count_max_addresses_vrf(struct sctp_inpcb *inp, uint32_t vrf_id)
1330{
1331 int cnt = 0;
1332 struct sctp_vrf *vrf = NULL;
1333
1334 /*
1335 * In both sub-set bound an bound_all cases we return the MAXIMUM
1336 * number of addresses that you COULD get. In reality the sub-set
1337 * bound may have an exclusion list for a given TCB OR in the
1338 * bound-all case a TCB may NOT include the loopback or other
1339 * addresses as well.
1340 */
1341 vrf = sctp_find_vrf(vrf_id);
1342 if (vrf == NULL) {
1343 return (0);
1344 }
1345 if (inp->sctp_flags & SCTP_PCB_FLAGS_BOUNDALL) {
1346 struct sctp_ifn *sctp_ifn;
1347 struct sctp_ifa *sctp_ifa;
1348
1349 LIST_FOREACH(sctp_ifn, &vrf->ifnlist, next_ifn) {
1350 LIST_FOREACH(sctp_ifa, &sctp_ifn->ifalist, next_ifa) {
1351 /* Count them if they are the right type */
1352 if (sctp_ifa->address.sa.sa_family == AF_INET) {
1353 if (sctp_is_feature_on(inp, SCTP_PCB_FLAGS_NEEDS_MAPPED_V4))
1354 cnt += sizeof(struct sockaddr_in6);
1355 else
1356 cnt += sizeof(struct sockaddr_in);
1357
1358 } else if (sctp_ifa->address.sa.sa_family == AF_INET6)
1359 cnt += sizeof(struct sockaddr_in6);
1360 }
1361 }
1362 } else {
1363 struct sctp_laddr *laddr;
1364
1365 LIST_FOREACH(laddr, &inp->sctp_addr_list, sctp_nxt_addr) {
1366 if (laddr->ifa->address.sa.sa_family == AF_INET) {
1367 if (sctp_is_feature_on(inp, SCTP_PCB_FLAGS_NEEDS_MAPPED_V4))
1368 cnt += sizeof(struct sockaddr_in6);
1369 else
1370 cnt += sizeof(struct sockaddr_in);
1371
1372 } else if (laddr->ifa->address.sa.sa_family == AF_INET6)
1373 cnt += sizeof(struct sockaddr_in6);
1374 }
1375 }
1376 return (cnt);
1377}
1378
1379static int
1380sctp_count_max_addresses(struct sctp_inpcb *inp)
1381{
1382 int cnt = 0;
1383
1384 SCTP_IPI_ADDR_RLOCK();
1385 /* count addresses for the endpoint's default VRF */
1386 cnt = sctp_count_max_addresses_vrf(inp, inp->def_vrf_id);
1387 SCTP_IPI_ADDR_RUNLOCK();
1388 return (cnt);
1389}
1390
1391static int
1392sctp_do_connect_x(struct socket *so, struct sctp_inpcb *inp, void *optval,
1393 size_t optsize, void *p, int delay)
1394{
1395 int error = 0;
1396 int creat_lock_on = 0;
1397 struct sctp_tcb *stcb = NULL;
1398 struct sockaddr *sa;
1399 int num_v6 = 0, num_v4 = 0, *totaddrp, totaddr;
1400 int added = 0;
1401 uint32_t vrf_id;
1402 int bad_addresses = 0;
1403 sctp_assoc_t *a_id;
1404
1405 SCTPDBG(SCTP_DEBUG_PCB1, "Connectx called\n");
1406
1407 if ((inp->sctp_flags & SCTP_PCB_FLAGS_TCPTYPE) &&
1408 (inp->sctp_flags & SCTP_PCB_FLAGS_CONNECTED)) {
1409 /* We are already connected AND the TCP model */
1410 SCTP_LTRACE_ERR_RET(inp, stcb, NULL, SCTP_FROM_SCTP_USRREQ, EADDRINUSE);
1411 return (EADDRINUSE);
1412 }
1413 if ((inp->sctp_flags & SCTP_PCB_FLAGS_IN_TCPPOOL) &&
1414 (sctp_is_feature_off(inp, SCTP_PCB_FLAGS_PORTREUSE))) {
1415 SCTP_LTRACE_ERR_RET(inp, stcb, NULL, SCTP_FROM_SCTP_USRREQ, EINVAL);
1416 return (EINVAL);
1417 }
1418 if (inp->sctp_flags & SCTP_PCB_FLAGS_CONNECTED) {
1419 SCTP_INP_RLOCK(inp);
1420 stcb = LIST_FIRST(&inp->sctp_asoc_list);
1421 SCTP_INP_RUNLOCK(inp);
1422 }
1423 if (stcb) {
1424 SCTP_LTRACE_ERR_RET(inp, stcb, NULL, SCTP_FROM_SCTP_USRREQ, EALREADY);
1425 return (EALREADY);
1426 }
1427 SCTP_INP_INCR_REF(inp);
1428 SCTP_ASOC_CREATE_LOCK(inp);
1429 creat_lock_on = 1;
1430 if ((inp->sctp_flags & SCTP_PCB_FLAGS_SOCKET_ALLGONE) ||
1431 (inp->sctp_flags & SCTP_PCB_FLAGS_SOCKET_GONE)) {
1432 SCTP_LTRACE_ERR_RET(inp, stcb, NULL, SCTP_FROM_SCTP_USRREQ, EFAULT);
1433 error = EFAULT;
1434 goto out_now;
1435 }
1436 totaddrp = (int *)optval;
1437 totaddr = *totaddrp;
1438 sa = (struct sockaddr *)(totaddrp + 1);
1439 stcb = sctp_connectx_helper_find(inp, sa, &totaddr, &num_v4, &num_v6, &error, (optsize - sizeof(int)), &bad_addresses);
1440 if ((stcb != NULL) || bad_addresses) {
1441 /* Already have or am bring up an association */
1442 SCTP_ASOC_CREATE_UNLOCK(inp);
1443 creat_lock_on = 0;
1444 if (stcb)
1445 SCTP_TCB_UNLOCK(stcb);
1446 if (bad_addresses == 0) {
1447 SCTP_LTRACE_ERR_RET(inp, NULL, NULL, SCTP_FROM_SCTP_USRREQ, EALREADY);
1448 error = EALREADY;
1449 }
1450 goto out_now;
1451 }
1452#ifdef INET6
1453 if (((inp->sctp_flags & SCTP_PCB_FLAGS_BOUND_V6) == 0) &&
1454 (num_v6 > 0)) {
1455 error = EINVAL;
1456 goto out_now;
1457 }
1458 if ((inp->sctp_flags & SCTP_PCB_FLAGS_BOUND_V6) &&
1459 (num_v4 > 0)) {
1460 struct in6pcb *inp6;
1461
1462 inp6 = (struct in6pcb *)inp;
1463 if (SCTP_IPV6_V6ONLY(inp6)) {
1464 /*
1465 * if IPV6_V6ONLY flag, ignore connections destined
1466 * to a v4 addr or v4-mapped addr
1467 */
1468 SCTP_LTRACE_ERR_RET(inp, stcb, NULL, SCTP_FROM_SCTP_USRREQ, EINVAL);
1469 error = EINVAL;
1470 goto out_now;
1471 }
1472 }
1473#endif /* INET6 */
1474 if ((inp->sctp_flags & SCTP_PCB_FLAGS_UNBOUND) ==
1475 SCTP_PCB_FLAGS_UNBOUND) {
1476 /* Bind a ephemeral port */
1477 error = sctp_inpcb_bind(so, NULL, NULL, p);
1478 if (error) {
1479 goto out_now;
1480 }
1481 }
1482 /* FIX ME: do we want to pass in a vrf on the connect call? */
1483 vrf_id = inp->def_vrf_id;
1484
1485
1486 /* We are GOOD to go */
1487 stcb = sctp_aloc_assoc(inp, sa, 1, &error, 0, vrf_id,
1488 (struct thread *)p
1489 );
1490 if (stcb == NULL) {
1491 /* Gak! no memory */
1492 goto out_now;
1493 }
1494 SCTP_SET_STATE(&stcb->asoc, SCTP_STATE_COOKIE_WAIT);
1495 /* move to second address */
1496 if (sa->sa_family == AF_INET)
1497 sa = (struct sockaddr *)((caddr_t)sa + sizeof(struct sockaddr_in));
1498 else
1499 sa = (struct sockaddr *)((caddr_t)sa + sizeof(struct sockaddr_in6));
1500
1501 error = 0;
1502 added = sctp_connectx_helper_add(stcb, sa, (totaddr - 1), &error);
1503 /* Fill in the return id */
1504 if (error) {
1505 (void)sctp_free_assoc(inp, stcb, SCTP_PCBFREE_FORCE, SCTP_FROM_SCTP_USRREQ + SCTP_LOC_12);
1506 goto out_now;
1507 }
1508 a_id = (sctp_assoc_t *) optval;
1509 *a_id = sctp_get_associd(stcb);
1510
1511 /* initialize authentication parameters for the assoc */
1512 sctp_initialize_auth_params(inp, stcb);
1513
1514 if (delay) {
1515 /* doing delayed connection */
1516 stcb->asoc.delayed_connection = 1;
1517 sctp_timer_start(SCTP_TIMER_TYPE_INIT, inp, stcb, stcb->asoc.primary_destination);
1518 } else {
1519 (void)SCTP_GETTIME_TIMEVAL(&stcb->asoc.time_entered);
1520 sctp_send_initiate(inp, stcb, SCTP_SO_LOCKED);
1521 }
1522 SCTP_TCB_UNLOCK(stcb);
1523 if (stcb->sctp_ep->sctp_flags & SCTP_PCB_FLAGS_TCPTYPE) {
1524 stcb->sctp_ep->sctp_flags |= SCTP_PCB_FLAGS_CONNECTED;
1525 /* Set the connected flag so we can queue data */
1526 soisconnecting(so);
1527 }
1528out_now:
1529 if (creat_lock_on) {
1530 SCTP_ASOC_CREATE_UNLOCK(inp);
1531 }
1532 SCTP_INP_DECR_REF(inp);
1533 return error;
1534}
1535
1536#define SCTP_FIND_STCB(inp, stcb, assoc_id) { \
1537 if ((inp->sctp_flags & SCTP_PCB_FLAGS_TCPTYPE) ||\
1538 (inp->sctp_flags & SCTP_PCB_FLAGS_IN_TCPPOOL)) { \
1539 SCTP_INP_RLOCK(inp); \
1540 stcb = LIST_FIRST(&inp->sctp_asoc_list); \
1541 if (stcb) { \
1542 SCTP_TCB_LOCK(stcb); \
1543 } \
1544 SCTP_INP_RUNLOCK(inp); \
1545 } else if (assoc_id != 0) { \
1546 stcb = sctp_findassociation_ep_asocid(inp, assoc_id, 1); \
1547 if (stcb == NULL) { \
1548 SCTP_LTRACE_ERR_RET(inp, NULL, NULL, SCTP_FROM_SCTP_USRREQ, ENOENT); \
1549 error = ENOENT; \
1550 break; \
1551 } \
1552 } else { \
1553 stcb = NULL; \
1554 } \
1555 }
1556
1557
1558#define SCTP_CHECK_AND_CAST(destp, srcp, type, size) {\
1559 if (size < sizeof(type)) { \
1560 SCTP_LTRACE_ERR_RET(inp, NULL, NULL, SCTP_FROM_SCTP_USRREQ, EINVAL); \
1561 error = EINVAL; \
1562 break; \
1563 } else { \
1564 destp = (type *)srcp; \
1565 } \
1566 }
1567
1568static int
1569sctp_getopt(struct socket *so, int optname, void *optval, size_t *optsize,
1570 void *p)
1571{
1572 struct sctp_inpcb *inp = NULL;
1573 int error, val = 0;
1574 struct sctp_tcb *stcb = NULL;
1575
1576 if (optval == NULL) {
1577 SCTP_LTRACE_ERR_RET(inp, NULL, NULL, SCTP_FROM_SCTP_USRREQ, EINVAL);
1578 return (EINVAL);
1579 }
1580 inp = (struct sctp_inpcb *)so->so_pcb;
1581 if (inp == 0) {
1582 SCTP_LTRACE_ERR_RET(inp, NULL, NULL, SCTP_FROM_SCTP_USRREQ, EINVAL);
1583 return EINVAL;
1584 }
1585 error = 0;
1586
1587 switch (optname) {
1588 case SCTP_NODELAY:
1589 case SCTP_AUTOCLOSE:
1590 case SCTP_EXPLICIT_EOR:
1591 case SCTP_AUTO_ASCONF:
1592 case SCTP_DISABLE_FRAGMENTS:
1593 case SCTP_I_WANT_MAPPED_V4_ADDR:
1594 case SCTP_USE_EXT_RCVINFO:
1595 SCTP_INP_RLOCK(inp);
1596 switch (optname) {
1597 case SCTP_DISABLE_FRAGMENTS:
1598 val = sctp_is_feature_on(inp, SCTP_PCB_FLAGS_NO_FRAGMENT);
1599 break;
1600 case SCTP_I_WANT_MAPPED_V4_ADDR:
1601 val = sctp_is_feature_on(inp, SCTP_PCB_FLAGS_NEEDS_MAPPED_V4);
1602 break;
1603 case SCTP_AUTO_ASCONF:
1604 if (inp->sctp_flags & SCTP_PCB_FLAGS_BOUNDALL) {
1605 /* only valid for bound all sockets */
1606 val = sctp_is_feature_on(inp, SCTP_PCB_FLAGS_AUTO_ASCONF);
1607 } else {
1608 SCTP_LTRACE_ERR_RET(inp, NULL, NULL, SCTP_FROM_SCTP_USRREQ, EINVAL);
1609 error = EINVAL;
1610 goto flags_out;
1611 }
1612 break;
1613 case SCTP_EXPLICIT_EOR:
1614 val = sctp_is_feature_on(inp, SCTP_PCB_FLAGS_EXPLICIT_EOR);
1615 break;
1616 case SCTP_NODELAY:
1617 val = sctp_is_feature_on(inp, SCTP_PCB_FLAGS_NODELAY);
1618 break;
1619 case SCTP_USE_EXT_RCVINFO:
1620 val = sctp_is_feature_on(inp, SCTP_PCB_FLAGS_EXT_RCVINFO);
1621 break;
1622 case SCTP_AUTOCLOSE:
1623 if (sctp_is_feature_on(inp, SCTP_PCB_FLAGS_AUTOCLOSE))
1624 val = TICKS_TO_SEC(inp->sctp_ep.auto_close_time);
1625 else
1626 val = 0;
1627 break;
1628
1629 default:
1630 SCTP_LTRACE_ERR_RET(inp, NULL, NULL, SCTP_FROM_SCTP_USRREQ, ENOPROTOOPT);
1631 error = ENOPROTOOPT;
1632 } /* end switch (sopt->sopt_name) */
1633 if (optname != SCTP_AUTOCLOSE) {
1634 /* make it an "on/off" value */
1635 val = (val != 0);
1636 }
1637 if (*optsize < sizeof(val)) {
1638 SCTP_LTRACE_ERR_RET(inp, NULL, NULL, SCTP_FROM_SCTP_USRREQ, EINVAL);
1639 error = EINVAL;
1640 }
1641flags_out:
1642 SCTP_INP_RUNLOCK(inp);
1643 if (error == 0) {
1644 /* return the option value */
1645 *(int *)optval = val;
1646 *optsize = sizeof(val);
1647 }
1648 break;
1649 case SCTP_GET_PACKET_LOG:
1650 {
1651#ifdef SCTP_PACKET_LOGGING
1652 uint8_t *target;
1653 int ret;
1654
1655 SCTP_CHECK_AND_CAST(target, optval, uint8_t, *optsize);
1656 ret = sctp_copy_out_packet_log(target, (int)*optsize);
1657 *optsize = ret;
1658#else
1659 SCTP_LTRACE_ERR_RET(inp, NULL, NULL, SCTP_FROM_SCTP_USRREQ, EOPNOTSUPP);
1660 error = EOPNOTSUPP;
1661#endif
1662 break;
1663 }
1664 case SCTP_REUSE_PORT:
1665 {
1666 uint32_t *value;
1667
1668 if ((inp->sctp_flags & SCTP_PCB_FLAGS_UDPTYPE)) {
1669 /* Can't do this for a 1-m socket */
1670 error = EINVAL;
1671 break;
1672 }
1673 SCTP_CHECK_AND_CAST(value, optval, uint32_t, *optsize);
1674 *value = sctp_is_feature_on(inp, SCTP_PCB_FLAGS_PORTREUSE);
1675 *optsize = sizeof(uint32_t);
1676 }
1677 break;
1678 case SCTP_PARTIAL_DELIVERY_POINT:
1679 {
1680 uint32_t *value;
1681
1682 SCTP_CHECK_AND_CAST(value, optval, uint32_t, *optsize);
1683 *value = inp->partial_delivery_point;
1684 *optsize = sizeof(uint32_t);
1685 }
1686 break;
1687 case SCTP_FRAGMENT_INTERLEAVE:
1688 {
1689 uint32_t *value;
1690
1691 SCTP_CHECK_AND_CAST(value, optval, uint32_t, *optsize);
1692 if (sctp_is_feature_on(inp, SCTP_PCB_FLAGS_FRAG_INTERLEAVE)) {
1693 if (sctp_is_feature_on(inp, SCTP_PCB_FLAGS_INTERLEAVE_STRMS)) {
1694 *value = SCTP_FRAG_LEVEL_2;
1695 } else {
1696 *value = SCTP_FRAG_LEVEL_1;
1697 }
1698 } else {
1699 *value = SCTP_FRAG_LEVEL_0;
1700 }
1701 *optsize = sizeof(uint32_t);
1702 }
1703 break;
1704 case SCTP_CMT_ON_OFF:
1705 {
1706 struct sctp_assoc_value *av;
1707
1708 SCTP_CHECK_AND_CAST(av, optval, struct sctp_assoc_value, *optsize);
1709 if (SCTP_BASE_SYSCTL(sctp_cmt_on_off)) {
1710 SCTP_FIND_STCB(inp, stcb, av->assoc_id);
1711 if (stcb) {
1712 av->assoc_value = stcb->asoc.sctp_cmt_on_off;
1713 SCTP_TCB_UNLOCK(stcb);
1714
1715 } else {
1716 SCTP_LTRACE_ERR_RET(inp, NULL, NULL, SCTP_FROM_SCTP_USRREQ, ENOTCONN);
1717 error = ENOTCONN;
1718 }
1719 } else {
1720 SCTP_LTRACE_ERR_RET(inp, NULL, NULL, SCTP_FROM_SCTP_USRREQ, ENOPROTOOPT);
1721 error = ENOPROTOOPT;
1722 }
1723 *optsize = sizeof(*av);
1724 }
1725 break;
1726 /* EY - set socket option for nr_sacks */
1727 case SCTP_NR_SACK_ON_OFF:
1728 {
1729 struct sctp_assoc_value *av;
1730
1731 SCTP_CHECK_AND_CAST(av, optval, struct sctp_assoc_value, *optsize);
1732 if (SCTP_BASE_SYSCTL(sctp_nr_sack_on_off)) {
1733 SCTP_FIND_STCB(inp, stcb, av->assoc_id);
1734 if (stcb) {
1735 av->assoc_value = stcb->asoc.sctp_nr_sack_on_off;
1736 SCTP_TCB_UNLOCK(stcb);
1737
1738 } else {
1739 SCTP_LTRACE_ERR_RET(inp, NULL, NULL, SCTP_FROM_SCTP_USRREQ, ENOTCONN);
1740 error = ENOTCONN;
1741 }
1742 } else {
1743 SCTP_LTRACE_ERR_RET(inp, NULL, NULL, SCTP_FROM_SCTP_USRREQ, ENOPROTOOPT);
1744 error = ENOPROTOOPT;
1745 }
1746 *optsize = sizeof(*av);
1747 }
1748 break;
1749 /* JRS - Get socket option for pluggable congestion control */
1750 case SCTP_PLUGGABLE_CC:
1751 {
1752 struct sctp_assoc_value *av;
1753
1754 SCTP_CHECK_AND_CAST(av, optval, struct sctp_assoc_value, *optsize);
1755 SCTP_FIND_STCB(inp, stcb, av->assoc_id);
1756 if (stcb) {
1757 av->assoc_value = stcb->asoc.congestion_control_module;
1758 SCTP_TCB_UNLOCK(stcb);
1759 } else {
1760 av->assoc_value = inp->sctp_ep.sctp_default_cc_module;
1761 }
1762 *optsize = sizeof(*av);
1763 }
1764 break;
1765 case SCTP_GET_ADDR_LEN:
1766 {
1767 struct sctp_assoc_value *av;
1768
1769 SCTP_CHECK_AND_CAST(av, optval, struct sctp_assoc_value, *optsize);
1770 error = EINVAL;
1771#ifdef INET
1772 if (av->assoc_value == AF_INET) {
1773 av->assoc_value = sizeof(struct sockaddr_in);
1774 error = 0;
1775 }
1776#endif
1777#ifdef INET6
1778 if (av->assoc_value == AF_INET6) {
1779 av->assoc_value = sizeof(struct sockaddr_in6);
1780 error = 0;
1781 }
1782#endif
1783 if (error) {
1784 SCTP_LTRACE_ERR_RET(inp, NULL, NULL, SCTP_FROM_SCTP_USRREQ, error);
1785 }
1786 *optsize = sizeof(*av);
1787 }
1788 break;
1789 case SCTP_GET_ASSOC_NUMBER:
1790 {
1791 uint32_t *value, cnt;
1792
1793 SCTP_CHECK_AND_CAST(value, optval, uint32_t, *optsize);
1794 cnt = 0;
1795 SCTP_INP_RLOCK(inp);
1796 LIST_FOREACH(stcb, &inp->sctp_asoc_list, sctp_tcblist) {
1797 cnt++;
1798 }
1799 SCTP_INP_RUNLOCK(inp);
1800 *value = cnt;
1801 *optsize = sizeof(uint32_t);
1802 }
1803 break;
1804
1805 case SCTP_GET_ASSOC_ID_LIST:
1806 {
1807 struct sctp_assoc_ids *ids;
1808 unsigned int at, limit;
1809
1810 SCTP_CHECK_AND_CAST(ids, optval, struct sctp_assoc_ids, *optsize);
1811 at = 0;
1812 limit = (*optsize - sizeof(uint32_t)) / sizeof(sctp_assoc_t);
1813 SCTP_INP_RLOCK(inp);
1814 LIST_FOREACH(stcb, &inp->sctp_asoc_list, sctp_tcblist) {
1815 if (at < limit) {
1816 ids->gaids_assoc_id[at++] = sctp_get_associd(stcb);
1817 } else {
1818 error = EINVAL;
1819 SCTP_LTRACE_ERR_RET(inp, NULL, NULL, SCTP_FROM_SCTP_USRREQ, error);
1820 break;
1821 }
1822 }
1823 SCTP_INP_RUNLOCK(inp);
1824 ids->gaids_number_of_ids = at;
1825 *optsize = ((at * sizeof(sctp_assoc_t)) + sizeof(uint32_t));
1826 }
1827 break;
1828 case SCTP_CONTEXT:
1829 {
1830 struct sctp_assoc_value *av;
1831
1832 SCTP_CHECK_AND_CAST(av, optval, struct sctp_assoc_value, *optsize);
1833 SCTP_FIND_STCB(inp, stcb, av->assoc_id);
1834
1835 if (stcb) {
1836 av->assoc_value = stcb->asoc.context;
1837 SCTP_TCB_UNLOCK(stcb);
1838 } else {
1839 SCTP_INP_RLOCK(inp);
1840 av->assoc_value = inp->sctp_context;
1841 SCTP_INP_RUNLOCK(inp);
1842 }
1843 *optsize = sizeof(*av);
1844 }
1845 break;
1846 case SCTP_VRF_ID:
1847 {
1848 uint32_t *default_vrfid;
1849
1850 SCTP_CHECK_AND_CAST(default_vrfid, optval, uint32_t, *optsize);
1851 *default_vrfid = inp->def_vrf_id;
1852 break;
1853 }
1854 case SCTP_GET_ASOC_VRF:
1855 {
1856 struct sctp_assoc_value *id;
1857
1858 SCTP_CHECK_AND_CAST(id, optval, struct sctp_assoc_value, *optsize);
1859 SCTP_FIND_STCB(inp, stcb, id->assoc_id);
1860 if (stcb == NULL) {
1861 error = EINVAL;
1862 SCTP_LTRACE_ERR_RET(inp, NULL, NULL, SCTP_FROM_SCTP_USRREQ, error);
1863 break;
1864 }
1865 id->assoc_value = stcb->asoc.vrf_id;
1866 break;
1867 }
1868 case SCTP_GET_VRF_IDS:
1869 {
1870 SCTP_LTRACE_ERR_RET(inp, NULL, NULL, SCTP_FROM_SCTP_USRREQ, EOPNOTSUPP);
1871 error = EOPNOTSUPP;
1872 break;
1873 }
1874 case SCTP_GET_NONCE_VALUES:
1875 {
1876 struct sctp_get_nonce_values *gnv;
1877
1878 SCTP_CHECK_AND_CAST(gnv, optval, struct sctp_get_nonce_values, *optsize);
1879 SCTP_FIND_STCB(inp, stcb, gnv->gn_assoc_id);
1880
1881 if (stcb) {
1882 gnv->gn_peers_tag = stcb->asoc.peer_vtag;
1883 gnv->gn_local_tag = stcb->asoc.my_vtag;
1884 SCTP_TCB_UNLOCK(stcb);
1885 } else {
1886 SCTP_LTRACE_ERR_RET(inp, NULL, NULL, SCTP_FROM_SCTP_USRREQ, ENOTCONN);
1887 error = ENOTCONN;
1888 }
1889 *optsize = sizeof(*gnv);
1890 }
1891 break;
1892 case SCTP_DELAYED_SACK:
1893 {
1894 struct sctp_sack_info *sack;
1895
1896 SCTP_CHECK_AND_CAST(sack, optval, struct sctp_sack_info, *optsize);
1897 SCTP_FIND_STCB(inp, stcb, sack->sack_assoc_id);
1898 if (stcb) {
1899 sack->sack_delay = stcb->asoc.delayed_ack;
1900 sack->sack_freq = stcb->asoc.sack_freq;
1901 SCTP_TCB_UNLOCK(stcb);
1902 } else {
1903 SCTP_INP_RLOCK(inp);
1904 sack->sack_delay = TICKS_TO_MSEC(inp->sctp_ep.sctp_timeoutticks[SCTP_TIMER_RECV]);
1905 sack->sack_freq = inp->sctp_ep.sctp_sack_freq;
1906 SCTP_INP_RUNLOCK(inp);
1907 }
1908 *optsize = sizeof(*sack);
1909 }
1910 break;
1911
1912 case SCTP_GET_SNDBUF_USE:
1913 {
1914 struct sctp_sockstat *ss;
1915
1916 SCTP_CHECK_AND_CAST(ss, optval, struct sctp_sockstat, *optsize);
1917 SCTP_FIND_STCB(inp, stcb, ss->ss_assoc_id);
1918
1919 if (stcb) {
1920 ss->ss_total_sndbuf = stcb->asoc.total_output_queue_size;
1921 ss->ss_total_recv_buf = (stcb->asoc.size_on_reasm_queue +
1922 stcb->asoc.size_on_all_streams);
1923 SCTP_TCB_UNLOCK(stcb);
1924 } else {
1925 SCTP_LTRACE_ERR_RET(inp, NULL, NULL, SCTP_FROM_SCTP_USRREQ, ENOTCONN);
1926 error = ENOTCONN;
1927 }
1928 *optsize = sizeof(struct sctp_sockstat);
1929 }
1930 break;
1931 case SCTP_MAX_BURST:
1932 {
1933 uint8_t *value;
1934
1935 SCTP_CHECK_AND_CAST(value, optval, uint8_t, *optsize);
1936
1937 SCTP_INP_RLOCK(inp);
1938 *value = inp->sctp_ep.max_burst;
1939 SCTP_INP_RUNLOCK(inp);
1940 *optsize = sizeof(uint8_t);
1941 }
1942 break;
1943 case SCTP_MAXSEG:
1944 {
1945 struct sctp_assoc_value *av;
1946 int ovh;
1947
1948 SCTP_CHECK_AND_CAST(av, optval, struct sctp_assoc_value, *optsize);
1949 SCTP_FIND_STCB(inp, stcb, av->assoc_id);
1950
1951 if (stcb) {
1952 av->assoc_value = sctp_get_frag_point(stcb, &stcb->asoc);
1953 SCTP_TCB_UNLOCK(stcb);
1954 } else {
1955 SCTP_INP_RLOCK(inp);
1956 if (inp->sctp_flags & SCTP_PCB_FLAGS_BOUND_V6) {
1957 ovh = SCTP_MED_OVERHEAD;
1958 } else {
1959 ovh = SCTP_MED_V4_OVERHEAD;
1960 }
1961 if (inp->sctp_frag_point >= SCTP_DEFAULT_MAXSEGMENT)
1962 av->assoc_value = 0;
1963 else
1964 av->assoc_value = inp->sctp_frag_point - ovh;
1965 SCTP_INP_RUNLOCK(inp);
1966 }
1967 *optsize = sizeof(struct sctp_assoc_value);
1968 }
1969 break;
1970 case SCTP_GET_STAT_LOG:
1971 error = sctp_fill_stat_log(optval, optsize);
1972 break;
1973 case SCTP_EVENTS:
1974 {
1975 struct sctp_event_subscribe *events;
1976
1977 SCTP_CHECK_AND_CAST(events, optval, struct sctp_event_subscribe, *optsize);
1978 memset(events, 0, sizeof(*events));
1979 SCTP_INP_RLOCK(inp);
1980 if (sctp_is_feature_on(inp, SCTP_PCB_FLAGS_RECVDATAIOEVNT))
1981 events->sctp_data_io_event = 1;
1982
1983 if (sctp_is_feature_on(inp, SCTP_PCB_FLAGS_RECVASSOCEVNT))
1984 events->sctp_association_event = 1;
1985
1986 if (sctp_is_feature_on(inp, SCTP_PCB_FLAGS_RECVPADDREVNT))
1987 events->sctp_address_event = 1;
1988
1989 if (sctp_is_feature_on(inp, SCTP_PCB_FLAGS_RECVSENDFAILEVNT))
1990 events->sctp_send_failure_event = 1;
1991
1992 if (sctp_is_feature_on(inp, SCTP_PCB_FLAGS_RECVPEERERR))
1993 events->sctp_peer_error_event = 1;
1994
1995 if (sctp_is_feature_on(inp, SCTP_PCB_FLAGS_RECVSHUTDOWNEVNT))
1996 events->sctp_shutdown_event = 1;
1997
1998 if (sctp_is_feature_on(inp, SCTP_PCB_FLAGS_PDAPIEVNT))
1999 events->sctp_partial_delivery_event = 1;
2000
2001 if (sctp_is_feature_on(inp, SCTP_PCB_FLAGS_ADAPTATIONEVNT))
2002 events->sctp_adaptation_layer_event = 1;
2003
2004 if (sctp_is_feature_on(inp, SCTP_PCB_FLAGS_AUTHEVNT))
2005 events->sctp_authentication_event = 1;
2006
2007 if (sctp_is_feature_on(inp, SCTP_PCB_FLAGS_DRYEVNT))
2008 events->sctp_sender_dry_event = 1;
2009
2010 if (sctp_is_feature_on(inp, SCTP_PCB_FLAGS_STREAM_RESETEVNT))
1/*-
2 * Copyright (c) 2001-2008, by Cisco Systems, Inc. All rights reserved.
3 *
4 * Redistribution and use in source and binary forms, with or without
5 * modification, are permitted provided that the following conditions are met:
6 *
7 * a) Redistributions of source code must retain the above copyright notice,
8 * this list of conditions and the following disclaimer.
9 *
10 * b) Redistributions in binary form must reproduce the above copyright
11 * notice, this list of conditions and the following disclaimer in
12 * the documentation and/or other materials provided with the distribution.
13 *
14 * c) Neither the name of Cisco Systems, Inc. nor the names of its
15 * contributors may be used to endorse or promote products derived
16 * from this software without specific prior written permission.
17 *
18 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
19 * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO,
20 * THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
21 * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
22 * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
23 * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
24 * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
25 * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
26 * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
27 * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF
28 * THE POSSIBILITY OF SUCH DAMAGE.
29 */
30
31/* $KAME: sctp_usrreq.c,v 1.48 2005/03/07 23:26:08 itojun Exp $ */
32
33#include <sys/cdefs.h>
34__FBSDID("$FreeBSD$");
35#include <netinet/sctp_os.h>
36#include <sys/proc.h>
37#include <netinet/sctp_pcb.h>
38#include <netinet/sctp_header.h>
39#include <netinet/sctp_var.h>
40#if defined(INET6)
41#endif
42#include <netinet/sctp_sysctl.h>
43#include <netinet/sctp_output.h>
44#include <netinet/sctp_uio.h>
45#include <netinet/sctp_asconf.h>
46#include <netinet/sctputil.h>
47#include <netinet/sctp_indata.h>
48#include <netinet/sctp_timer.h>
49#include <netinet/sctp_auth.h>
50#include <netinet/sctp_bsd_addr.h>
51#include <netinet/sctp_cc_functions.h>
52#include <netinet/udp.h>
53
54
55
56
57void
58sctp_init(void)
59{
60 u_long sb_max_adj;
61
62 bzero(&SCTP_BASE_STATS, sizeof(struct sctpstat));
63
64 /* Initialize and modify the sysctled variables */
65 sctp_init_sysctls();
66 if ((nmbclusters / 8) > SCTP_ASOC_MAX_CHUNKS_ON_QUEUE)
67 SCTP_BASE_SYSCTL(sctp_max_chunks_on_queue) = (nmbclusters / 8);
68 /*
69 * Allow a user to take no more than 1/2 the number of clusters or
70 * the SB_MAX whichever is smaller for the send window.
71 */
72 sb_max_adj = (u_long)((u_quad_t) (SB_MAX) * MCLBYTES / (MSIZE + MCLBYTES));
73 SCTP_BASE_SYSCTL(sctp_sendspace) = min(sb_max_adj,
74 (((uint32_t) nmbclusters / 2) * SCTP_DEFAULT_MAXSEGMENT));
75 /*
76 * Now for the recv window, should we take the same amount? or
77 * should I do 1/2 the SB_MAX instead in the SB_MAX min above. For
78 * now I will just copy.
79 */
80 SCTP_BASE_SYSCTL(sctp_recvspace) = SCTP_BASE_SYSCTL(sctp_sendspace);
81
82 SCTP_BASE_VAR(first_time) = 0;
83 SCTP_BASE_VAR(sctp_pcb_initialized) = 0;
84 sctp_pcb_init();
85#if defined(SCTP_PACKET_LOGGING)
86 SCTP_BASE_VAR(packet_log_writers) = 0;
87 SCTP_BASE_VAR(packet_log_end) = 0;
88 bzero(&SCTP_BASE_VAR(packet_log_buffer), SCTP_PACKET_LOG_SIZE);
89#endif
90
91
92}
93
94void
95sctp_finish(void)
96{
97 sctp_pcb_finish();
98}
99
100
101
102void
103sctp_pathmtu_adjustment(struct sctp_inpcb *inp,
104 struct sctp_tcb *stcb,
105 struct sctp_nets *net,
106 uint16_t nxtsz)
107{
108 struct sctp_tmit_chunk *chk;
109 uint16_t overhead;
110
111 /* Adjust that too */
112 stcb->asoc.smallest_mtu = nxtsz;
113 /* now off to subtract IP_DF flag if needed */
114#ifdef SCTP_PRINT_FOR_B_AND_M
115 SCTP_PRINTF("sctp_pathmtu_adjust called inp:%p stcb:%p net:%p nxtsz:%d\n",
116 inp, stcb, net, nxtsz);
117#endif
118 overhead = IP_HDR_SIZE;
119 if (sctp_auth_is_required_chunk(SCTP_DATA, stcb->asoc.peer_auth_chunks)) {
120 overhead += sctp_get_auth_chunk_len(stcb->asoc.peer_hmac_id);
121 }
122 TAILQ_FOREACH(chk, &stcb->asoc.send_queue, sctp_next) {
123 if ((chk->send_size + overhead) > nxtsz) {
124 chk->flags |= CHUNK_FLAGS_FRAGMENT_OK;
125 }
126 }
127 TAILQ_FOREACH(chk, &stcb->asoc.sent_queue, sctp_next) {
128 if ((chk->send_size + overhead) > nxtsz) {
129 /*
130 * For this guy we also mark for immediate resend
131 * since we sent to big of chunk
132 */
133 chk->flags |= CHUNK_FLAGS_FRAGMENT_OK;
134 if (chk->sent < SCTP_DATAGRAM_RESEND) {
135 sctp_flight_size_decrease(chk);
136 sctp_total_flight_decrease(stcb, chk);
137 }
138 if (chk->sent != SCTP_DATAGRAM_RESEND) {
139 sctp_ucount_incr(stcb->asoc.sent_queue_retran_cnt);
140 }
141 chk->sent = SCTP_DATAGRAM_RESEND;
142 chk->rec.data.doing_fast_retransmit = 0;
143 if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_FLIGHT_LOGGING_ENABLE) {
144 sctp_misc_ints(SCTP_FLIGHT_LOG_DOWN_PMTU,
145 chk->whoTo->flight_size,
146 chk->book_size,
147 (uintptr_t) chk->whoTo,
148 chk->rec.data.TSN_seq);
149 }
150 /* Clear any time so NO RTT is being done */
151 chk->do_rtt = 0;
152 }
153 }
154}
155
156static void
157sctp_notify_mbuf(struct sctp_inpcb *inp,
158 struct sctp_tcb *stcb,
159 struct sctp_nets *net,
160 struct ip *ip,
161 struct sctphdr *sh)
162{
163 struct icmp *icmph;
164 int totsz, tmr_stopped = 0;
165 uint16_t nxtsz;
166
167 /* protection */
168 if ((inp == NULL) || (stcb == NULL) || (net == NULL) ||
169 (ip == NULL) || (sh == NULL)) {
170 if (stcb != NULL) {
171 SCTP_TCB_UNLOCK(stcb);
172 }
173 return;
174 }
175 /* First job is to verify the vtag matches what I would send */
176 if (ntohl(sh->v_tag) != (stcb->asoc.peer_vtag)) {
177 SCTP_TCB_UNLOCK(stcb);
178 return;
179 }
180 icmph = (struct icmp *)((caddr_t)ip - (sizeof(struct icmp) -
181 sizeof(struct ip)));
182 if (icmph->icmp_type != ICMP_UNREACH) {
183 /* We only care about unreachable */
184 SCTP_TCB_UNLOCK(stcb);
185 return;
186 }
187 if (icmph->icmp_code != ICMP_UNREACH_NEEDFRAG) {
188 /* not a unreachable message due to frag. */
189 SCTP_TCB_UNLOCK(stcb);
190 return;
191 }
192 totsz = ip->ip_len;
193
194 nxtsz = ntohs(icmph->icmp_nextmtu);
195 if (nxtsz == 0) {
196 /*
197 * old type router that does not tell us what the next size
198 * mtu is. Rats we will have to guess (in a educated fashion
199 * of course)
200 */
201 nxtsz = find_next_best_mtu(totsz);
202 }
203 /* Stop any PMTU timer */
204 if (SCTP_OS_TIMER_PENDING(&net->pmtu_timer.timer)) {
205 tmr_stopped = 1;
206 sctp_timer_stop(SCTP_TIMER_TYPE_PATHMTURAISE, inp, stcb, net,
207 SCTP_FROM_SCTP_USRREQ + SCTP_LOC_1);
208 }
209 /* Adjust destination size limit */
210 if (net->mtu > nxtsz) {
211 net->mtu = nxtsz;
212 if (net->port) {
213 net->mtu -= sizeof(struct udphdr);
214 }
215 }
216 /* now what about the ep? */
217 if (stcb->asoc.smallest_mtu > nxtsz) {
218#ifdef SCTP_PRINT_FOR_B_AND_M
219 SCTP_PRINTF("notify_mbuf (ICMP) calls sctp_pathmtu_adjust mtu:%d\n",
220 nxtsz);
221#endif
222 sctp_pathmtu_adjustment(inp, stcb, net, nxtsz);
223 }
224 if (tmr_stopped)
225 sctp_timer_start(SCTP_TIMER_TYPE_PATHMTURAISE, inp, stcb, net);
226
227 SCTP_TCB_UNLOCK(stcb);
228}
229
230
231void
232sctp_notify(struct sctp_inpcb *inp,
233 struct ip *ip,
234 struct sctphdr *sh,
235 struct sockaddr *to,
236 struct sctp_tcb *stcb,
237 struct sctp_nets *net)
238{
239#if defined (__APPLE__) || defined(SCTP_SO_LOCK_TESTING)
240 struct socket *so;
241
242#endif
243 /* protection */
244 int reason;
245 struct icmp *icmph;
246
247
248 if ((inp == NULL) || (stcb == NULL) || (net == NULL) ||
249 (sh == NULL) || (to == NULL)) {
250 if (stcb)
251 SCTP_TCB_UNLOCK(stcb);
252 return;
253 }
254 /* First job is to verify the vtag matches what I would send */
255 if (ntohl(sh->v_tag) != (stcb->asoc.peer_vtag)) {
256 SCTP_TCB_UNLOCK(stcb);
257 return;
258 }
259 icmph = (struct icmp *)((caddr_t)ip - (sizeof(struct icmp) -
260 sizeof(struct ip)));
261 if (icmph->icmp_type != ICMP_UNREACH) {
262 /* We only care about unreachable */
263 SCTP_TCB_UNLOCK(stcb);
264 return;
265 }
266 if ((icmph->icmp_code == ICMP_UNREACH_NET) ||
267 (icmph->icmp_code == ICMP_UNREACH_HOST) ||
268 (icmph->icmp_code == ICMP_UNREACH_NET_UNKNOWN) ||
269 (icmph->icmp_code == ICMP_UNREACH_HOST_UNKNOWN) ||
270 (icmph->icmp_code == ICMP_UNREACH_ISOLATED) ||
271 (icmph->icmp_code == ICMP_UNREACH_NET_PROHIB) ||
272 (icmph->icmp_code == ICMP_UNREACH_HOST_PROHIB) ||
273 (icmph->icmp_code == ICMP_UNREACH_FILTER_PROHIB)) {
274
275 /*
276 * Hmm reachablity problems we must examine closely. If its
277 * not reachable, we may have lost a network. Or if there is
278 * NO protocol at the other end named SCTP. well we consider
279 * it a OOTB abort.
280 */
281 if (net->dest_state & SCTP_ADDR_REACHABLE) {
282 /* Ok that destination is NOT reachable */
283 SCTP_PRINTF("ICMP (thresh %d/%d) takes interface %p down\n",
284 net->error_count,
285 net->failure_threshold,
286 net);
287
288 net->dest_state &= ~SCTP_ADDR_REACHABLE;
289 net->dest_state |= SCTP_ADDR_NOT_REACHABLE;
290 /*
291 * JRS 5/14/07 - If a destination is unreachable,
292 * the PF bit is turned off. This allows an
293 * unambiguous use of the PF bit for destinations
294 * that are reachable but potentially failed. If the
295 * destination is set to the unreachable state, also
296 * set the destination to the PF state.
297 */
298 /*
299 * Add debug message here if destination is not in
300 * PF state.
301 */
302 /* Stop any running T3 timers here? */
303 if (SCTP_BASE_SYSCTL(sctp_cmt_on_off) && SCTP_BASE_SYSCTL(sctp_cmt_pf)) {
304 net->dest_state &= ~SCTP_ADDR_PF;
305 SCTPDBG(SCTP_DEBUG_TIMER4, "Destination %p moved from PF to unreachable.\n",
306 net);
307 }
308 net->error_count = net->failure_threshold + 1;
309 sctp_ulp_notify(SCTP_NOTIFY_INTERFACE_DOWN,
310 stcb, SCTP_FAILED_THRESHOLD,
311 (void *)net, SCTP_SO_NOT_LOCKED);
312 }
313 SCTP_TCB_UNLOCK(stcb);
314 } else if ((icmph->icmp_code == ICMP_UNREACH_PROTOCOL) ||
315 (icmph->icmp_code == ICMP_UNREACH_PORT)) {
316 /*
317 * Here the peer is either playing tricks on us, including
318 * an address that belongs to someone who does not support
319 * SCTP OR was a userland implementation that shutdown and
320 * now is dead. In either case treat it like a OOTB abort
321 * with no TCB
322 */
323 reason = SCTP_PEER_FAULTY;
324 sctp_abort_notification(stcb, reason, SCTP_SO_NOT_LOCKED);
325#if defined (__APPLE__) || defined(SCTP_SO_LOCK_TESTING)
326 so = SCTP_INP_SO(inp);
327 atomic_add_int(&stcb->asoc.refcnt, 1);
328 SCTP_TCB_UNLOCK(stcb);
329 SCTP_SOCKET_LOCK(so, 1);
330 SCTP_TCB_LOCK(stcb);
331 atomic_subtract_int(&stcb->asoc.refcnt, 1);
332#endif
333 (void)sctp_free_assoc(inp, stcb, SCTP_NORMAL_PROC, SCTP_FROM_SCTP_USRREQ + SCTP_LOC_2);
334#if defined (__APPLE__) || defined(SCTP_SO_LOCK_TESTING)
335 SCTP_SOCKET_UNLOCK(so, 1);
336 /* SCTP_TCB_UNLOCK(stcb); MT: I think this is not needed. */
337#endif
338 /* no need to unlock here, since the TCB is gone */
339 } else {
340 SCTP_TCB_UNLOCK(stcb);
341 }
342}
343
344void
345sctp_ctlinput(cmd, sa, vip)
346 int cmd;
347 struct sockaddr *sa;
348 void *vip;
349{
350 struct ip *ip = vip;
351 struct sctphdr *sh;
352 uint32_t vrf_id;
353
354 /* FIX, for non-bsd is this right? */
355 vrf_id = SCTP_DEFAULT_VRFID;
356 if (sa->sa_family != AF_INET ||
357 ((struct sockaddr_in *)sa)->sin_addr.s_addr == INADDR_ANY) {
358 return;
359 }
360 if (PRC_IS_REDIRECT(cmd)) {
361 ip = 0;
362 } else if ((unsigned)cmd >= PRC_NCMDS || inetctlerrmap[cmd] == 0) {
363 return;
364 }
365 if (ip) {
366 struct sctp_inpcb *inp = NULL;
367 struct sctp_tcb *stcb = NULL;
368 struct sctp_nets *net = NULL;
369 struct sockaddr_in to, from;
370
371 sh = (struct sctphdr *)((caddr_t)ip + (ip->ip_hl << 2));
372 bzero(&to, sizeof(to));
373 bzero(&from, sizeof(from));
374 from.sin_family = to.sin_family = AF_INET;
375 from.sin_len = to.sin_len = sizeof(to);
376 from.sin_port = sh->src_port;
377 from.sin_addr = ip->ip_src;
378 to.sin_port = sh->dest_port;
379 to.sin_addr = ip->ip_dst;
380
381 /*
382 * 'to' holds the dest of the packet that failed to be sent.
383 * 'from' holds our local endpoint address. Thus we reverse
384 * the to and the from in the lookup.
385 */
386 stcb = sctp_findassociation_addr_sa((struct sockaddr *)&from,
387 (struct sockaddr *)&to,
388 &inp, &net, 1, vrf_id);
389 if (stcb != NULL && inp && (inp->sctp_socket != NULL)) {
390 if (cmd != PRC_MSGSIZE) {
391 sctp_notify(inp, ip, sh,
392 (struct sockaddr *)&to, stcb,
393 net);
394 } else {
395 /* handle possible ICMP size messages */
396 sctp_notify_mbuf(inp, stcb, net, ip, sh);
397 }
398 } else {
399 if ((stcb == NULL) && (inp != NULL)) {
400 /* reduce ref-count */
401 SCTP_INP_WLOCK(inp);
402 SCTP_INP_DECR_REF(inp);
403 SCTP_INP_WUNLOCK(inp);
404 }
405 }
406 }
407 return;
408}
409
410static int
411sctp_getcred(SYSCTL_HANDLER_ARGS)
412{
413 struct xucred xuc;
414 struct sockaddr_in addrs[2];
415 struct sctp_inpcb *inp;
416 struct sctp_nets *net;
417 struct sctp_tcb *stcb;
418 int error;
419 uint32_t vrf_id;
420
421 /* FIX, for non-bsd is this right? */
422 vrf_id = SCTP_DEFAULT_VRFID;
423
424 error = priv_check(req->td, PRIV_NETINET_GETCRED);
425
426 if (error)
427 return (error);
428
429 error = SYSCTL_IN(req, addrs, sizeof(addrs));
430 if (error)
431 return (error);
432
433 stcb = sctp_findassociation_addr_sa(sintosa(&addrs[0]),
434 sintosa(&addrs[1]),
435 &inp, &net, 1, vrf_id);
436 if (stcb == NULL || inp == NULL || inp->sctp_socket == NULL) {
437 if ((inp != NULL) && (stcb == NULL)) {
438 /* reduce ref-count */
439 SCTP_INP_WLOCK(inp);
440 SCTP_INP_DECR_REF(inp);
441 goto cred_can_cont;
442 }
443 SCTP_LTRACE_ERR_RET(inp, NULL, NULL, SCTP_FROM_SCTP_USRREQ, ENOENT);
444 error = ENOENT;
445 goto out;
446 }
447 SCTP_TCB_UNLOCK(stcb);
448 /*
449 * We use the write lock here, only since in the error leg we need
450 * it. If we used RLOCK, then we would have to
451 * wlock/decr/unlock/rlock. Which in theory could create a hole.
452 * Better to use higher wlock.
453 */
454 SCTP_INP_WLOCK(inp);
455cred_can_cont:
456 error = cr_canseesocket(req->td->td_ucred, inp->sctp_socket);
457 if (error) {
458 SCTP_INP_WUNLOCK(inp);
459 goto out;
460 }
461 cru2x(inp->sctp_socket->so_cred, &xuc);
462 SCTP_INP_WUNLOCK(inp);
463 error = SYSCTL_OUT(req, &xuc, sizeof(struct xucred));
464out:
465 return (error);
466}
467
468SYSCTL_PROC(_net_inet_sctp, OID_AUTO, getcred, CTLTYPE_OPAQUE | CTLFLAG_RW,
469 0, 0, sctp_getcred, "S,ucred", "Get the ucred of a SCTP connection");
470
471
472static void
473sctp_abort(struct socket *so)
474{
475 struct sctp_inpcb *inp;
476 uint32_t flags;
477
478 inp = (struct sctp_inpcb *)so->so_pcb;
479 if (inp == 0) {
480 return;
481 }
482sctp_must_try_again:
483 flags = inp->sctp_flags;
484#ifdef SCTP_LOG_CLOSING
485 sctp_log_closing(inp, NULL, 17);
486#endif
487 if (((flags & SCTP_PCB_FLAGS_SOCKET_GONE) == 0) &&
488 (atomic_cmpset_int(&inp->sctp_flags, flags, (flags | SCTP_PCB_FLAGS_SOCKET_GONE | SCTP_PCB_FLAGS_CLOSE_IP)))) {
489#ifdef SCTP_LOG_CLOSING
490 sctp_log_closing(inp, NULL, 16);
491#endif
492 sctp_inpcb_free(inp, SCTP_FREE_SHOULD_USE_ABORT,
493 SCTP_CALLED_AFTER_CMPSET_OFCLOSE);
494 SOCK_LOCK(so);
495 SCTP_SB_CLEAR(so->so_snd);
496 /*
497 * same for the rcv ones, they are only here for the
498 * accounting/select.
499 */
500 SCTP_SB_CLEAR(so->so_rcv);
501
502 /* Now null out the reference, we are completely detached. */
503 so->so_pcb = NULL;
504 SOCK_UNLOCK(so);
505 } else {
506 flags = inp->sctp_flags;
507 if ((flags & SCTP_PCB_FLAGS_SOCKET_GONE) == 0) {
508 goto sctp_must_try_again;
509 }
510 }
511 return;
512}
513
514static int
515sctp_attach(struct socket *so, int proto, struct thread *p)
516{
517 struct sctp_inpcb *inp;
518 struct inpcb *ip_inp;
519 int error;
520 uint32_t vrf_id = SCTP_DEFAULT_VRFID;
521
522#ifdef IPSEC
523 uint32_t flags;
524
525#endif
526
527 inp = (struct sctp_inpcb *)so->so_pcb;
528 if (inp != 0) {
529 SCTP_LTRACE_ERR_RET(inp, NULL, NULL, SCTP_FROM_SCTP_USRREQ, EINVAL);
530 return EINVAL;
531 }
532 if (so->so_snd.sb_hiwat == 0 || so->so_rcv.sb_hiwat == 0) {
533 error = SCTP_SORESERVE(so, SCTP_BASE_SYSCTL(sctp_sendspace), SCTP_BASE_SYSCTL(sctp_recvspace));
534 if (error) {
535 return error;
536 }
537 }
538 error = sctp_inpcb_alloc(so, vrf_id);
539 if (error) {
540 return error;
541 }
542 inp = (struct sctp_inpcb *)so->so_pcb;
543 SCTP_INP_WLOCK(inp);
544 inp->sctp_flags &= ~SCTP_PCB_FLAGS_BOUND_V6; /* I'm not v6! */
545 ip_inp = &inp->ip_inp.inp;
546 ip_inp->inp_vflag |= INP_IPV4;
547 ip_inp->inp_ip_ttl = MODULE_GLOBAL(ip_defttl);
548#ifdef IPSEC
549 error = ipsec_init_policy(so, &ip_inp->inp_sp);
550#ifdef SCTP_LOG_CLOSING
551 sctp_log_closing(inp, NULL, 17);
552#endif
553 if (error != 0) {
554 flags = inp->sctp_flags;
555 if (((flags & SCTP_PCB_FLAGS_SOCKET_GONE) == 0) &&
556 (atomic_cmpset_int(&inp->sctp_flags, flags, (flags | SCTP_PCB_FLAGS_SOCKET_GONE | SCTP_PCB_FLAGS_CLOSE_IP)))) {
557#ifdef SCTP_LOG_CLOSING
558 sctp_log_closing(inp, NULL, 15);
559#endif
560 SCTP_INP_WUNLOCK(inp);
561 sctp_inpcb_free(inp, SCTP_FREE_SHOULD_USE_ABORT,
562 SCTP_CALLED_AFTER_CMPSET_OFCLOSE);
563 } else {
564 SCTP_INP_WUNLOCK(inp);
565 }
566 return error;
567 }
568#endif /* IPSEC */
569 SCTP_INP_WUNLOCK(inp);
570 return 0;
571}
572
573static int
574sctp_bind(struct socket *so, struct sockaddr *addr, struct thread *p)
575{
576 struct sctp_inpcb *inp = NULL;
577 int error;
578
579#ifdef INET6
580 if (addr && addr->sa_family != AF_INET) {
581 /* must be a v4 address! */
582 SCTP_LTRACE_ERR_RET(inp, NULL, NULL, SCTP_FROM_SCTP_USRREQ, EINVAL);
583 return EINVAL;
584 }
585#endif /* INET6 */
586 if (addr && (addr->sa_len != sizeof(struct sockaddr_in))) {
587 SCTP_LTRACE_ERR_RET(inp, NULL, NULL, SCTP_FROM_SCTP_USRREQ, EINVAL);
588 return EINVAL;
589 }
590 inp = (struct sctp_inpcb *)so->so_pcb;
591 if (inp == 0) {
592 SCTP_LTRACE_ERR_RET(inp, NULL, NULL, SCTP_FROM_SCTP_USRREQ, EINVAL);
593 return EINVAL;
594 }
595 error = sctp_inpcb_bind(so, addr, NULL, p);
596 return error;
597}
598
599void
600sctp_close(struct socket *so)
601{
602 struct sctp_inpcb *inp;
603 uint32_t flags;
604
605 inp = (struct sctp_inpcb *)so->so_pcb;
606 if (inp == 0)
607 return;
608
609 /*
610 * Inform all the lower layer assoc that we are done.
611 */
612sctp_must_try_again:
613 flags = inp->sctp_flags;
614#ifdef SCTP_LOG_CLOSING
615 sctp_log_closing(inp, NULL, 17);
616#endif
617 if (((flags & SCTP_PCB_FLAGS_SOCKET_GONE) == 0) &&
618 (atomic_cmpset_int(&inp->sctp_flags, flags, (flags | SCTP_PCB_FLAGS_SOCKET_GONE | SCTP_PCB_FLAGS_CLOSE_IP)))) {
619 if (((so->so_options & SO_LINGER) && (so->so_linger == 0)) ||
620 (so->so_rcv.sb_cc > 0)) {
621#ifdef SCTP_LOG_CLOSING
622 sctp_log_closing(inp, NULL, 13);
623#endif
624 sctp_inpcb_free(inp, SCTP_FREE_SHOULD_USE_ABORT,
625 SCTP_CALLED_AFTER_CMPSET_OFCLOSE);
626 } else {
627#ifdef SCTP_LOG_CLOSING
628 sctp_log_closing(inp, NULL, 14);
629#endif
630 sctp_inpcb_free(inp, SCTP_FREE_SHOULD_USE_GRACEFUL_CLOSE,
631 SCTP_CALLED_AFTER_CMPSET_OFCLOSE);
632 }
633 /*
634 * The socket is now detached, no matter what the state of
635 * the SCTP association.
636 */
637 SOCK_LOCK(so);
638 SCTP_SB_CLEAR(so->so_snd);
639 /*
640 * same for the rcv ones, they are only here for the
641 * accounting/select.
642 */
643 SCTP_SB_CLEAR(so->so_rcv);
644
645 /* Now null out the reference, we are completely detached. */
646 so->so_pcb = NULL;
647 SOCK_UNLOCK(so);
648 } else {
649 flags = inp->sctp_flags;
650 if ((flags & SCTP_PCB_FLAGS_SOCKET_GONE) == 0) {
651 goto sctp_must_try_again;
652 }
653 }
654 return;
655}
656
657
658int
659sctp_sendm(struct socket *so, int flags, struct mbuf *m, struct sockaddr *addr,
660 struct mbuf *control, struct thread *p);
661
662
663int
664sctp_sendm(struct socket *so, int flags, struct mbuf *m, struct sockaddr *addr,
665 struct mbuf *control, struct thread *p)
666{
667 struct sctp_inpcb *inp;
668 int error;
669
670 inp = (struct sctp_inpcb *)so->so_pcb;
671 if (inp == 0) {
672 if (control) {
673 sctp_m_freem(control);
674 control = NULL;
675 }
676 SCTP_LTRACE_ERR_RET_PKT(m, inp, NULL, NULL, SCTP_FROM_SCTP_USRREQ, EINVAL);
677 sctp_m_freem(m);
678 return EINVAL;
679 }
680 /* Got to have an to address if we are NOT a connected socket */
681 if ((addr == NULL) &&
682 ((inp->sctp_flags & SCTP_PCB_FLAGS_CONNECTED) ||
683 (inp->sctp_flags & SCTP_PCB_FLAGS_TCPTYPE))
684 ) {
685 goto connected_type;
686 } else if (addr == NULL) {
687 SCTP_LTRACE_ERR_RET_PKT(m, inp, NULL, NULL, SCTP_FROM_SCTP_USRREQ, EDESTADDRREQ);
688 error = EDESTADDRREQ;
689 sctp_m_freem(m);
690 if (control) {
691 sctp_m_freem(control);
692 control = NULL;
693 }
694 return (error);
695 }
696#ifdef INET6
697 if (addr->sa_family != AF_INET) {
698 /* must be a v4 address! */
699 SCTP_LTRACE_ERR_RET_PKT(m, inp, NULL, NULL, SCTP_FROM_SCTP_USRREQ, EDESTADDRREQ);
700 sctp_m_freem(m);
701 if (control) {
702 sctp_m_freem(control);
703 control = NULL;
704 }
705 error = EDESTADDRREQ;
706 return EDESTADDRREQ;
707 }
708#endif /* INET6 */
709connected_type:
710 /* now what about control */
711 if (control) {
712 if (inp->control) {
713 SCTP_PRINTF("huh? control set?\n");
714 sctp_m_freem(inp->control);
715 inp->control = NULL;
716 }
717 inp->control = control;
718 }
719 /* Place the data */
720 if (inp->pkt) {
721 SCTP_BUF_NEXT(inp->pkt_last) = m;
722 inp->pkt_last = m;
723 } else {
724 inp->pkt_last = inp->pkt = m;
725 }
726 if (
727 /* FreeBSD uses a flag passed */
728 ((flags & PRUS_MORETOCOME) == 0)
729 ) {
730 /*
731 * note with the current version this code will only be used
732 * by OpenBSD-- NetBSD, FreeBSD, and MacOS have methods for
733 * re-defining sosend to use the sctp_sosend. One can
734 * optionally switch back to this code (by changing back the
735 * definitions) but this is not advisable. This code is used
736 * by FreeBSD when sending a file with sendfile() though.
737 */
738 int ret;
739
740 ret = sctp_output(inp, inp->pkt, addr, inp->control, p, flags);
741 inp->pkt = NULL;
742 inp->control = NULL;
743 return (ret);
744 } else {
745 return (0);
746 }
747}
748
749int
750sctp_disconnect(struct socket *so)
751{
752 struct sctp_inpcb *inp;
753
754 inp = (struct sctp_inpcb *)so->so_pcb;
755 if (inp == NULL) {
756 SCTP_LTRACE_ERR_RET(inp, NULL, NULL, SCTP_FROM_SCTP_USRREQ, ENOTCONN);
757 return (ENOTCONN);
758 }
759 SCTP_INP_RLOCK(inp);
760 if ((inp->sctp_flags & SCTP_PCB_FLAGS_TCPTYPE) ||
761 (inp->sctp_flags & SCTP_PCB_FLAGS_IN_TCPPOOL)) {
762 if (LIST_EMPTY(&inp->sctp_asoc_list)) {
763 /* No connection */
764 SCTP_INP_RUNLOCK(inp);
765 return (0);
766 } else {
767 struct sctp_association *asoc;
768 struct sctp_tcb *stcb;
769
770 stcb = LIST_FIRST(&inp->sctp_asoc_list);
771 if (stcb == NULL) {
772 SCTP_INP_RUNLOCK(inp);
773 SCTP_LTRACE_ERR_RET(inp, NULL, NULL, SCTP_FROM_SCTP_USRREQ, EINVAL);
774 return (EINVAL);
775 }
776 SCTP_TCB_LOCK(stcb);
777 asoc = &stcb->asoc;
778 if (stcb->asoc.state & SCTP_STATE_ABOUT_TO_BE_FREED) {
779 /* We are about to be freed, out of here */
780 SCTP_TCB_UNLOCK(stcb);
781 SCTP_INP_RUNLOCK(inp);
782 return (0);
783 }
784 if (((so->so_options & SO_LINGER) &&
785 (so->so_linger == 0)) ||
786 (so->so_rcv.sb_cc > 0)) {
787 if (SCTP_GET_STATE(asoc) !=
788 SCTP_STATE_COOKIE_WAIT) {
789 /* Left with Data unread */
790 struct mbuf *err;
791
792 err = sctp_get_mbuf_for_msg(sizeof(struct sctp_paramhdr), 0, M_DONTWAIT, 1, MT_DATA);
793 if (err) {
794 /*
795 * Fill in the user
796 * initiated abort
797 */
798 struct sctp_paramhdr *ph;
799
800 ph = mtod(err, struct sctp_paramhdr *);
801 SCTP_BUF_LEN(err) = sizeof(struct sctp_paramhdr);
802 ph->param_type = htons(SCTP_CAUSE_USER_INITIATED_ABT);
803 ph->param_length = htons(SCTP_BUF_LEN(err));
804 }
805#if defined(SCTP_PANIC_ON_ABORT)
806 panic("disconnect does an abort");
807#endif
808 sctp_send_abort_tcb(stcb, err, SCTP_SO_LOCKED);
809 SCTP_STAT_INCR_COUNTER32(sctps_aborted);
810 }
811 SCTP_INP_RUNLOCK(inp);
812 if ((SCTP_GET_STATE(&stcb->asoc) == SCTP_STATE_OPEN) ||
813 (SCTP_GET_STATE(&stcb->asoc) == SCTP_STATE_SHUTDOWN_RECEIVED)) {
814 SCTP_STAT_DECR_GAUGE32(sctps_currestab);
815 }
816 (void)sctp_free_assoc(inp, stcb, SCTP_NORMAL_PROC, SCTP_FROM_SCTP_USRREQ + SCTP_LOC_3);
817 /* No unlock tcb assoc is gone */
818 return (0);
819 }
820 if (TAILQ_EMPTY(&asoc->send_queue) &&
821 TAILQ_EMPTY(&asoc->sent_queue) &&
822 (asoc->stream_queue_cnt == 0)) {
823 /* there is nothing queued to send, so done */
824 if (asoc->locked_on_sending) {
825 goto abort_anyway;
826 }
827 if ((SCTP_GET_STATE(asoc) != SCTP_STATE_SHUTDOWN_SENT) &&
828 (SCTP_GET_STATE(asoc) != SCTP_STATE_SHUTDOWN_ACK_SENT)) {
829 /* only send SHUTDOWN 1st time thru */
830 sctp_stop_timers_for_shutdown(stcb);
831 sctp_send_shutdown(stcb,
832 stcb->asoc.primary_destination);
833 sctp_chunk_output(stcb->sctp_ep, stcb, SCTP_OUTPUT_FROM_T3, SCTP_SO_LOCKED);
834 if ((SCTP_GET_STATE(asoc) == SCTP_STATE_OPEN) ||
835 (SCTP_GET_STATE(asoc) == SCTP_STATE_SHUTDOWN_RECEIVED)) {
836 SCTP_STAT_DECR_GAUGE32(sctps_currestab);
837 }
838 SCTP_SET_STATE(asoc, SCTP_STATE_SHUTDOWN_SENT);
839 SCTP_CLEAR_SUBSTATE(asoc, SCTP_STATE_SHUTDOWN_PENDING);
840 sctp_timer_start(SCTP_TIMER_TYPE_SHUTDOWN,
841 stcb->sctp_ep, stcb,
842 asoc->primary_destination);
843 sctp_timer_start(SCTP_TIMER_TYPE_SHUTDOWNGUARD,
844 stcb->sctp_ep, stcb,
845 asoc->primary_destination);
846 }
847 } else {
848 /*
849 * we still got (or just got) data to send,
850 * so set SHUTDOWN_PENDING
851 */
852 /*
853 * XXX sockets draft says that SCTP_EOF
854 * should be sent with no data. currently,
855 * we will allow user data to be sent first
856 * and move to SHUTDOWN-PENDING
857 */
858 asoc->state |= SCTP_STATE_SHUTDOWN_PENDING;
859 sctp_timer_start(SCTP_TIMER_TYPE_SHUTDOWNGUARD, stcb->sctp_ep, stcb,
860 asoc->primary_destination);
861 if (asoc->locked_on_sending) {
862 /* Locked to send out the data */
863 struct sctp_stream_queue_pending *sp;
864
865 sp = TAILQ_LAST(&asoc->locked_on_sending->outqueue, sctp_streamhead);
866 if (sp == NULL) {
867 SCTP_PRINTF("Error, sp is NULL, locked on sending is non-null strm:%d\n",
868 asoc->locked_on_sending->stream_no);
869 } else {
870 if ((sp->length == 0) && (sp->msg_is_complete == 0))
871 asoc->state |= SCTP_STATE_PARTIAL_MSG_LEFT;
872 }
873 }
874 if (TAILQ_EMPTY(&asoc->send_queue) &&
875 TAILQ_EMPTY(&asoc->sent_queue) &&
876 (asoc->state & SCTP_STATE_PARTIAL_MSG_LEFT)) {
877 struct mbuf *op_err;
878
879 abort_anyway:
880 op_err = sctp_get_mbuf_for_msg((sizeof(struct sctp_paramhdr) + sizeof(uint32_t)),
881 0, M_DONTWAIT, 1, MT_DATA);
882 if (op_err) {
883 /*
884 * Fill in the user
885 * initiated abort
886 */
887 struct sctp_paramhdr *ph;
888 uint32_t *ippp;
889
890 SCTP_BUF_LEN(op_err) =
891 (sizeof(struct sctp_paramhdr) + sizeof(uint32_t));
892 ph = mtod(op_err,
893 struct sctp_paramhdr *);
894 ph->param_type = htons(
895 SCTP_CAUSE_USER_INITIATED_ABT);
896 ph->param_length = htons(SCTP_BUF_LEN(op_err));
897 ippp = (uint32_t *) (ph + 1);
898 *ippp = htonl(SCTP_FROM_SCTP_USRREQ + SCTP_LOC_4);
899 }
900#if defined(SCTP_PANIC_ON_ABORT)
901 panic("disconnect does an abort");
902#endif
903
904 stcb->sctp_ep->last_abort_code = SCTP_FROM_SCTP_USRREQ + SCTP_LOC_4;
905 sctp_send_abort_tcb(stcb, op_err, SCTP_SO_LOCKED);
906 SCTP_STAT_INCR_COUNTER32(sctps_aborted);
907 if ((SCTP_GET_STATE(&stcb->asoc) == SCTP_STATE_OPEN) ||
908 (SCTP_GET_STATE(&stcb->asoc) == SCTP_STATE_SHUTDOWN_RECEIVED)) {
909 SCTP_STAT_DECR_GAUGE32(sctps_currestab);
910 }
911 SCTP_INP_RUNLOCK(inp);
912 (void)sctp_free_assoc(inp, stcb, SCTP_NORMAL_PROC, SCTP_FROM_SCTP_USRREQ + SCTP_LOC_5);
913 return (0);
914 } else {
915 sctp_chunk_output(inp, stcb, SCTP_OUTPUT_FROM_CLOSING, SCTP_SO_LOCKED);
916 }
917 }
918 soisdisconnecting(so);
919 SCTP_TCB_UNLOCK(stcb);
920 SCTP_INP_RUNLOCK(inp);
921 return (0);
922 }
923 /* not reached */
924 } else {
925 /* UDP model does not support this */
926 SCTP_INP_RUNLOCK(inp);
927 SCTP_LTRACE_ERR_RET(inp, NULL, NULL, SCTP_FROM_SCTP_USRREQ, EOPNOTSUPP);
928 return EOPNOTSUPP;
929 }
930}
931
932int
933sctp_flush(struct socket *so, int how)
934{
935 /*
936 * We will just clear out the values and let subsequent close clear
937 * out the data, if any. Note if the user did a shutdown(SHUT_RD)
938 * they will not be able to read the data, the socket will block
939 * that from happening.
940 */
941 if ((how == PRU_FLUSH_RD) || (how == PRU_FLUSH_RDWR)) {
942 /*
943 * First make sure the sb will be happy, we don't use these
944 * except maybe the count
945 */
946 so->so_rcv.sb_cc = 0;
947 so->so_rcv.sb_mbcnt = 0;
948 so->so_rcv.sb_mb = NULL;
949 }
950 if ((how == PRU_FLUSH_WR) || (how == PRU_FLUSH_RDWR)) {
951 /*
952 * First make sure the sb will be happy, we don't use these
953 * except maybe the count
954 */
955 so->so_snd.sb_cc = 0;
956 so->so_snd.sb_mbcnt = 0;
957 so->so_snd.sb_mb = NULL;
958
959 }
960 return (0);
961}
962
963int
964sctp_shutdown(struct socket *so)
965{
966 struct sctp_inpcb *inp;
967
968 inp = (struct sctp_inpcb *)so->so_pcb;
969 if (inp == 0) {
970 SCTP_LTRACE_ERR_RET(inp, NULL, NULL, SCTP_FROM_SCTP_USRREQ, EINVAL);
971 return EINVAL;
972 }
973 SCTP_INP_RLOCK(inp);
974 /* For UDP model this is a invalid call */
975 if (inp->sctp_flags & SCTP_PCB_FLAGS_UDPTYPE) {
976 /* Restore the flags that the soshutdown took away. */
977 so->so_rcv.sb_state &= ~SBS_CANTRCVMORE;
978 /* This proc will wakeup for read and do nothing (I hope) */
979 SCTP_INP_RUNLOCK(inp);
980 SCTP_LTRACE_ERR_RET(inp, NULL, NULL, SCTP_FROM_SCTP_USRREQ, EOPNOTSUPP);
981 return (EOPNOTSUPP);
982 }
983 /*
984 * Ok if we reach here its the TCP model and it is either a SHUT_WR
985 * or SHUT_RDWR. This means we put the shutdown flag against it.
986 */
987 {
988 struct sctp_tcb *stcb;
989 struct sctp_association *asoc;
990
991 if ((so->so_state &
992 (SS_ISCONNECTED | SS_ISCONNECTING | SS_ISDISCONNECTING)) == 0) {
993 SCTP_INP_RUNLOCK(inp);
994 return (ENOTCONN);
995 }
996 socantsendmore(so);
997
998 stcb = LIST_FIRST(&inp->sctp_asoc_list);
999 if (stcb == NULL) {
1000 /*
1001 * Ok we hit the case that the shutdown call was
1002 * made after an abort or something. Nothing to do
1003 * now.
1004 */
1005 SCTP_INP_RUNLOCK(inp);
1006 return (0);
1007 }
1008 SCTP_TCB_LOCK(stcb);
1009 asoc = &stcb->asoc;
1010 if (TAILQ_EMPTY(&asoc->send_queue) &&
1011 TAILQ_EMPTY(&asoc->sent_queue) &&
1012 (asoc->stream_queue_cnt == 0)) {
1013 if (asoc->locked_on_sending) {
1014 goto abort_anyway;
1015 }
1016 /* there is nothing queued to send, so I'm done... */
1017 if (SCTP_GET_STATE(asoc) != SCTP_STATE_SHUTDOWN_SENT) {
1018 /* only send SHUTDOWN the first time through */
1019 sctp_stop_timers_for_shutdown(stcb);
1020 sctp_send_shutdown(stcb,
1021 stcb->asoc.primary_destination);
1022 sctp_chunk_output(stcb->sctp_ep, stcb, SCTP_OUTPUT_FROM_T3, SCTP_SO_LOCKED);
1023 if ((SCTP_GET_STATE(asoc) == SCTP_STATE_OPEN) ||
1024 (SCTP_GET_STATE(asoc) == SCTP_STATE_SHUTDOWN_RECEIVED)) {
1025 SCTP_STAT_DECR_GAUGE32(sctps_currestab);
1026 }
1027 SCTP_SET_STATE(asoc, SCTP_STATE_SHUTDOWN_SENT);
1028 SCTP_CLEAR_SUBSTATE(asoc, SCTP_STATE_SHUTDOWN_PENDING);
1029 sctp_timer_start(SCTP_TIMER_TYPE_SHUTDOWN,
1030 stcb->sctp_ep, stcb,
1031 asoc->primary_destination);
1032 sctp_timer_start(SCTP_TIMER_TYPE_SHUTDOWNGUARD,
1033 stcb->sctp_ep, stcb,
1034 asoc->primary_destination);
1035 }
1036 } else {
1037 /*
1038 * we still got (or just got) data to send, so set
1039 * SHUTDOWN_PENDING
1040 */
1041 asoc->state |= SCTP_STATE_SHUTDOWN_PENDING;
1042 sctp_timer_start(SCTP_TIMER_TYPE_SHUTDOWNGUARD, stcb->sctp_ep, stcb,
1043 asoc->primary_destination);
1044
1045 if (asoc->locked_on_sending) {
1046 /* Locked to send out the data */
1047 struct sctp_stream_queue_pending *sp;
1048
1049 sp = TAILQ_LAST(&asoc->locked_on_sending->outqueue, sctp_streamhead);
1050 if (sp == NULL) {
1051 SCTP_PRINTF("Error, sp is NULL, locked on sending is non-null strm:%d\n",
1052 asoc->locked_on_sending->stream_no);
1053 } else {
1054 if ((sp->length == 0) && (sp->msg_is_complete == 0)) {
1055 asoc->state |= SCTP_STATE_PARTIAL_MSG_LEFT;
1056 }
1057 }
1058 }
1059 if (TAILQ_EMPTY(&asoc->send_queue) &&
1060 TAILQ_EMPTY(&asoc->sent_queue) &&
1061 (asoc->state & SCTP_STATE_PARTIAL_MSG_LEFT)) {
1062 struct mbuf *op_err;
1063
1064 abort_anyway:
1065 op_err = sctp_get_mbuf_for_msg((sizeof(struct sctp_paramhdr) + sizeof(uint32_t)),
1066 0, M_DONTWAIT, 1, MT_DATA);
1067 if (op_err) {
1068 /* Fill in the user initiated abort */
1069 struct sctp_paramhdr *ph;
1070 uint32_t *ippp;
1071
1072 SCTP_BUF_LEN(op_err) =
1073 sizeof(struct sctp_paramhdr) + sizeof(uint32_t);
1074 ph = mtod(op_err,
1075 struct sctp_paramhdr *);
1076 ph->param_type = htons(
1077 SCTP_CAUSE_USER_INITIATED_ABT);
1078 ph->param_length = htons(SCTP_BUF_LEN(op_err));
1079 ippp = (uint32_t *) (ph + 1);
1080 *ippp = htonl(SCTP_FROM_SCTP_USRREQ + SCTP_LOC_6);
1081 }
1082#if defined(SCTP_PANIC_ON_ABORT)
1083 panic("shutdown does an abort");
1084#endif
1085 stcb->sctp_ep->last_abort_code = SCTP_FROM_SCTP_USRREQ + SCTP_LOC_6;
1086 sctp_abort_an_association(stcb->sctp_ep, stcb,
1087 SCTP_RESPONSE_TO_USER_REQ,
1088 op_err, SCTP_SO_LOCKED);
1089 goto skip_unlock;
1090 } else {
1091 sctp_chunk_output(inp, stcb, SCTP_OUTPUT_FROM_CLOSING, SCTP_SO_LOCKED);
1092 }
1093 }
1094 SCTP_TCB_UNLOCK(stcb);
1095 }
1096skip_unlock:
1097 SCTP_INP_RUNLOCK(inp);
1098 return 0;
1099}
1100
1101/*
1102 * copies a "user" presentable address and removes embedded scope, etc.
1103 * returns 0 on success, 1 on error
1104 */
1105static uint32_t
1106sctp_fill_user_address(struct sockaddr_storage *ss, struct sockaddr *sa)
1107{
1108#ifdef INET6
1109 struct sockaddr_in6 lsa6;
1110
1111 sa = (struct sockaddr *)sctp_recover_scope((struct sockaddr_in6 *)sa,
1112 &lsa6);
1113#endif
1114 memcpy(ss, sa, sa->sa_len);
1115 return (0);
1116}
1117
1118
1119
1120/*
1121 * NOTE: assumes addr lock is held
1122 */
1123static size_t
1124sctp_fill_up_addresses_vrf(struct sctp_inpcb *inp,
1125 struct sctp_tcb *stcb,
1126 size_t limit,
1127 struct sockaddr_storage *sas,
1128 uint32_t vrf_id)
1129{
1130 struct sctp_ifn *sctp_ifn;
1131 struct sctp_ifa *sctp_ifa;
1132 int loopback_scope, ipv4_local_scope, local_scope, site_scope;
1133 size_t actual;
1134 int ipv4_addr_legal, ipv6_addr_legal;
1135 struct sctp_vrf *vrf;
1136
1137 actual = 0;
1138 if (limit <= 0)
1139 return (actual);
1140
1141 if (stcb) {
1142 /* Turn on all the appropriate scope */
1143 loopback_scope = stcb->asoc.loopback_scope;
1144 ipv4_local_scope = stcb->asoc.ipv4_local_scope;
1145 local_scope = stcb->asoc.local_scope;
1146 site_scope = stcb->asoc.site_scope;
1147 } else {
1148 /* Turn on ALL scope, since we look at the EP */
1149 loopback_scope = ipv4_local_scope = local_scope =
1150 site_scope = 1;
1151 }
1152 ipv4_addr_legal = ipv6_addr_legal = 0;
1153 if (inp->sctp_flags & SCTP_PCB_FLAGS_BOUND_V6) {
1154 ipv6_addr_legal = 1;
1155 if (SCTP_IPV6_V6ONLY(inp) == 0) {
1156 ipv4_addr_legal = 1;
1157 }
1158 } else {
1159 ipv4_addr_legal = 1;
1160 }
1161 vrf = sctp_find_vrf(vrf_id);
1162 if (vrf == NULL) {
1163 return (0);
1164 }
1165 if (inp->sctp_flags & SCTP_PCB_FLAGS_BOUNDALL) {
1166 LIST_FOREACH(sctp_ifn, &vrf->ifnlist, next_ifn) {
1167 if ((loopback_scope == 0) &&
1168 SCTP_IFN_IS_IFT_LOOP(sctp_ifn)) {
1169 /* Skip loopback if loopback_scope not set */
1170 continue;
1171 }
1172 LIST_FOREACH(sctp_ifa, &sctp_ifn->ifalist, next_ifa) {
1173 if (stcb) {
1174 /*
1175 * For the BOUND-ALL case, the list
1176 * associated with a TCB is Always
1177 * considered a reverse list.. i.e.
1178 * it lists addresses that are NOT
1179 * part of the association. If this
1180 * is one of those we must skip it.
1181 */
1182 if (sctp_is_addr_restricted(stcb,
1183 sctp_ifa)) {
1184 continue;
1185 }
1186 }
1187 switch (sctp_ifa->address.sa.sa_family) {
1188 case AF_INET:
1189 if (ipv4_addr_legal) {
1190 struct sockaddr_in *sin;
1191
1192 sin = (struct sockaddr_in *)&sctp_ifa->address.sa;
1193 if (sin->sin_addr.s_addr == 0) {
1194 /*
1195 * we skip
1196 * unspecifed
1197 * addresses
1198 */
1199 continue;
1200 }
1201 if ((ipv4_local_scope == 0) &&
1202 (IN4_ISPRIVATE_ADDRESS(&sin->sin_addr))) {
1203 continue;
1204 }
1205#ifdef INET6
1206 if (sctp_is_feature_on(inp, SCTP_PCB_FLAGS_NEEDS_MAPPED_V4)) {
1207 in6_sin_2_v4mapsin6(sin, (struct sockaddr_in6 *)sas);
1208 ((struct sockaddr_in6 *)sas)->sin6_port = inp->sctp_lport;
1209 sas = (struct sockaddr_storage *)((caddr_t)sas + sizeof(struct sockaddr_in6));
1210 actual += sizeof(struct sockaddr_in6);
1211 } else {
1212#endif
1213 memcpy(sas, sin, sizeof(*sin));
1214 ((struct sockaddr_in *)sas)->sin_port = inp->sctp_lport;
1215 sas = (struct sockaddr_storage *)((caddr_t)sas + sizeof(*sin));
1216 actual += sizeof(*sin);
1217#ifdef INET6
1218 }
1219#endif
1220 if (actual >= limit) {
1221 return (actual);
1222 }
1223 } else {
1224 continue;
1225 }
1226 break;
1227#ifdef INET6
1228 case AF_INET6:
1229 if (ipv6_addr_legal) {
1230 struct sockaddr_in6 *sin6;
1231
1232 sin6 = (struct sockaddr_in6 *)&sctp_ifa->address.sa;
1233 if (IN6_IS_ADDR_UNSPECIFIED(&sin6->sin6_addr)) {
1234 /*
1235 * we skip
1236 * unspecifed
1237 * addresses
1238 */
1239 continue;
1240 }
1241 if (IN6_IS_ADDR_LINKLOCAL(&sin6->sin6_addr)) {
1242 if (local_scope == 0)
1243 continue;
1244 if (sin6->sin6_scope_id == 0) {
1245 if (sa6_recoverscope(sin6) != 0)
1246 /*
1247 *
1248 * bad
1249 *
1250 * li
1251 * nk
1252 *
1253 * loc
1254 * al
1255 *
1256 * add
1257 * re
1258 * ss
1259 * */
1260 continue;
1261 }
1262 }
1263 if ((site_scope == 0) &&
1264 (IN6_IS_ADDR_SITELOCAL(&sin6->sin6_addr))) {
1265 continue;
1266 }
1267 memcpy(sas, sin6, sizeof(*sin6));
1268 ((struct sockaddr_in6 *)sas)->sin6_port = inp->sctp_lport;
1269 sas = (struct sockaddr_storage *)((caddr_t)sas + sizeof(*sin6));
1270 actual += sizeof(*sin6);
1271 if (actual >= limit) {
1272 return (actual);
1273 }
1274 } else {
1275 continue;
1276 }
1277 break;
1278#endif
1279 default:
1280 /* TSNH */
1281 break;
1282 }
1283 }
1284 }
1285 } else {
1286 struct sctp_laddr *laddr;
1287
1288 LIST_FOREACH(laddr, &inp->sctp_addr_list, sctp_nxt_addr) {
1289 if (stcb) {
1290 if (sctp_is_addr_restricted(stcb, laddr->ifa)) {
1291 continue;
1292 }
1293 }
1294 if (sctp_fill_user_address(sas, &laddr->ifa->address.sa))
1295 continue;
1296
1297 ((struct sockaddr_in6 *)sas)->sin6_port = inp->sctp_lport;
1298 sas = (struct sockaddr_storage *)((caddr_t)sas +
1299 laddr->ifa->address.sa.sa_len);
1300 actual += laddr->ifa->address.sa.sa_len;
1301 if (actual >= limit) {
1302 return (actual);
1303 }
1304 }
1305 }
1306 return (actual);
1307}
1308
1309static size_t
1310sctp_fill_up_addresses(struct sctp_inpcb *inp,
1311 struct sctp_tcb *stcb,
1312 size_t limit,
1313 struct sockaddr_storage *sas)
1314{
1315 size_t size = 0;
1316
1317 SCTP_IPI_ADDR_RLOCK();
1318 /* fill up addresses for the endpoint's default vrf */
1319 size = sctp_fill_up_addresses_vrf(inp, stcb, limit, sas,
1320 inp->def_vrf_id);
1321 SCTP_IPI_ADDR_RUNLOCK();
1322 return (size);
1323}
1324
1325/*
1326 * NOTE: assumes addr lock is held
1327 */
1328static int
1329sctp_count_max_addresses_vrf(struct sctp_inpcb *inp, uint32_t vrf_id)
1330{
1331 int cnt = 0;
1332 struct sctp_vrf *vrf = NULL;
1333
1334 /*
1335 * In both sub-set bound an bound_all cases we return the MAXIMUM
1336 * number of addresses that you COULD get. In reality the sub-set
1337 * bound may have an exclusion list for a given TCB OR in the
1338 * bound-all case a TCB may NOT include the loopback or other
1339 * addresses as well.
1340 */
1341 vrf = sctp_find_vrf(vrf_id);
1342 if (vrf == NULL) {
1343 return (0);
1344 }
1345 if (inp->sctp_flags & SCTP_PCB_FLAGS_BOUNDALL) {
1346 struct sctp_ifn *sctp_ifn;
1347 struct sctp_ifa *sctp_ifa;
1348
1349 LIST_FOREACH(sctp_ifn, &vrf->ifnlist, next_ifn) {
1350 LIST_FOREACH(sctp_ifa, &sctp_ifn->ifalist, next_ifa) {
1351 /* Count them if they are the right type */
1352 if (sctp_ifa->address.sa.sa_family == AF_INET) {
1353 if (sctp_is_feature_on(inp, SCTP_PCB_FLAGS_NEEDS_MAPPED_V4))
1354 cnt += sizeof(struct sockaddr_in6);
1355 else
1356 cnt += sizeof(struct sockaddr_in);
1357
1358 } else if (sctp_ifa->address.sa.sa_family == AF_INET6)
1359 cnt += sizeof(struct sockaddr_in6);
1360 }
1361 }
1362 } else {
1363 struct sctp_laddr *laddr;
1364
1365 LIST_FOREACH(laddr, &inp->sctp_addr_list, sctp_nxt_addr) {
1366 if (laddr->ifa->address.sa.sa_family == AF_INET) {
1367 if (sctp_is_feature_on(inp, SCTP_PCB_FLAGS_NEEDS_MAPPED_V4))
1368 cnt += sizeof(struct sockaddr_in6);
1369 else
1370 cnt += sizeof(struct sockaddr_in);
1371
1372 } else if (laddr->ifa->address.sa.sa_family == AF_INET6)
1373 cnt += sizeof(struct sockaddr_in6);
1374 }
1375 }
1376 return (cnt);
1377}
1378
1379static int
1380sctp_count_max_addresses(struct sctp_inpcb *inp)
1381{
1382 int cnt = 0;
1383
1384 SCTP_IPI_ADDR_RLOCK();
1385 /* count addresses for the endpoint's default VRF */
1386 cnt = sctp_count_max_addresses_vrf(inp, inp->def_vrf_id);
1387 SCTP_IPI_ADDR_RUNLOCK();
1388 return (cnt);
1389}
1390
1391static int
1392sctp_do_connect_x(struct socket *so, struct sctp_inpcb *inp, void *optval,
1393 size_t optsize, void *p, int delay)
1394{
1395 int error = 0;
1396 int creat_lock_on = 0;
1397 struct sctp_tcb *stcb = NULL;
1398 struct sockaddr *sa;
1399 int num_v6 = 0, num_v4 = 0, *totaddrp, totaddr;
1400 int added = 0;
1401 uint32_t vrf_id;
1402 int bad_addresses = 0;
1403 sctp_assoc_t *a_id;
1404
1405 SCTPDBG(SCTP_DEBUG_PCB1, "Connectx called\n");
1406
1407 if ((inp->sctp_flags & SCTP_PCB_FLAGS_TCPTYPE) &&
1408 (inp->sctp_flags & SCTP_PCB_FLAGS_CONNECTED)) {
1409 /* We are already connected AND the TCP model */
1410 SCTP_LTRACE_ERR_RET(inp, stcb, NULL, SCTP_FROM_SCTP_USRREQ, EADDRINUSE);
1411 return (EADDRINUSE);
1412 }
1413 if ((inp->sctp_flags & SCTP_PCB_FLAGS_IN_TCPPOOL) &&
1414 (sctp_is_feature_off(inp, SCTP_PCB_FLAGS_PORTREUSE))) {
1415 SCTP_LTRACE_ERR_RET(inp, stcb, NULL, SCTP_FROM_SCTP_USRREQ, EINVAL);
1416 return (EINVAL);
1417 }
1418 if (inp->sctp_flags & SCTP_PCB_FLAGS_CONNECTED) {
1419 SCTP_INP_RLOCK(inp);
1420 stcb = LIST_FIRST(&inp->sctp_asoc_list);
1421 SCTP_INP_RUNLOCK(inp);
1422 }
1423 if (stcb) {
1424 SCTP_LTRACE_ERR_RET(inp, stcb, NULL, SCTP_FROM_SCTP_USRREQ, EALREADY);
1425 return (EALREADY);
1426 }
1427 SCTP_INP_INCR_REF(inp);
1428 SCTP_ASOC_CREATE_LOCK(inp);
1429 creat_lock_on = 1;
1430 if ((inp->sctp_flags & SCTP_PCB_FLAGS_SOCKET_ALLGONE) ||
1431 (inp->sctp_flags & SCTP_PCB_FLAGS_SOCKET_GONE)) {
1432 SCTP_LTRACE_ERR_RET(inp, stcb, NULL, SCTP_FROM_SCTP_USRREQ, EFAULT);
1433 error = EFAULT;
1434 goto out_now;
1435 }
1436 totaddrp = (int *)optval;
1437 totaddr = *totaddrp;
1438 sa = (struct sockaddr *)(totaddrp + 1);
1439 stcb = sctp_connectx_helper_find(inp, sa, &totaddr, &num_v4, &num_v6, &error, (optsize - sizeof(int)), &bad_addresses);
1440 if ((stcb != NULL) || bad_addresses) {
1441 /* Already have or am bring up an association */
1442 SCTP_ASOC_CREATE_UNLOCK(inp);
1443 creat_lock_on = 0;
1444 if (stcb)
1445 SCTP_TCB_UNLOCK(stcb);
1446 if (bad_addresses == 0) {
1447 SCTP_LTRACE_ERR_RET(inp, NULL, NULL, SCTP_FROM_SCTP_USRREQ, EALREADY);
1448 error = EALREADY;
1449 }
1450 goto out_now;
1451 }
1452#ifdef INET6
1453 if (((inp->sctp_flags & SCTP_PCB_FLAGS_BOUND_V6) == 0) &&
1454 (num_v6 > 0)) {
1455 error = EINVAL;
1456 goto out_now;
1457 }
1458 if ((inp->sctp_flags & SCTP_PCB_FLAGS_BOUND_V6) &&
1459 (num_v4 > 0)) {
1460 struct in6pcb *inp6;
1461
1462 inp6 = (struct in6pcb *)inp;
1463 if (SCTP_IPV6_V6ONLY(inp6)) {
1464 /*
1465 * if IPV6_V6ONLY flag, ignore connections destined
1466 * to a v4 addr or v4-mapped addr
1467 */
1468 SCTP_LTRACE_ERR_RET(inp, stcb, NULL, SCTP_FROM_SCTP_USRREQ, EINVAL);
1469 error = EINVAL;
1470 goto out_now;
1471 }
1472 }
1473#endif /* INET6 */
1474 if ((inp->sctp_flags & SCTP_PCB_FLAGS_UNBOUND) ==
1475 SCTP_PCB_FLAGS_UNBOUND) {
1476 /* Bind a ephemeral port */
1477 error = sctp_inpcb_bind(so, NULL, NULL, p);
1478 if (error) {
1479 goto out_now;
1480 }
1481 }
1482 /* FIX ME: do we want to pass in a vrf on the connect call? */
1483 vrf_id = inp->def_vrf_id;
1484
1485
1486 /* We are GOOD to go */
1487 stcb = sctp_aloc_assoc(inp, sa, 1, &error, 0, vrf_id,
1488 (struct thread *)p
1489 );
1490 if (stcb == NULL) {
1491 /* Gak! no memory */
1492 goto out_now;
1493 }
1494 SCTP_SET_STATE(&stcb->asoc, SCTP_STATE_COOKIE_WAIT);
1495 /* move to second address */
1496 if (sa->sa_family == AF_INET)
1497 sa = (struct sockaddr *)((caddr_t)sa + sizeof(struct sockaddr_in));
1498 else
1499 sa = (struct sockaddr *)((caddr_t)sa + sizeof(struct sockaddr_in6));
1500
1501 error = 0;
1502 added = sctp_connectx_helper_add(stcb, sa, (totaddr - 1), &error);
1503 /* Fill in the return id */
1504 if (error) {
1505 (void)sctp_free_assoc(inp, stcb, SCTP_PCBFREE_FORCE, SCTP_FROM_SCTP_USRREQ + SCTP_LOC_12);
1506 goto out_now;
1507 }
1508 a_id = (sctp_assoc_t *) optval;
1509 *a_id = sctp_get_associd(stcb);
1510
1511 /* initialize authentication parameters for the assoc */
1512 sctp_initialize_auth_params(inp, stcb);
1513
1514 if (delay) {
1515 /* doing delayed connection */
1516 stcb->asoc.delayed_connection = 1;
1517 sctp_timer_start(SCTP_TIMER_TYPE_INIT, inp, stcb, stcb->asoc.primary_destination);
1518 } else {
1519 (void)SCTP_GETTIME_TIMEVAL(&stcb->asoc.time_entered);
1520 sctp_send_initiate(inp, stcb, SCTP_SO_LOCKED);
1521 }
1522 SCTP_TCB_UNLOCK(stcb);
1523 if (stcb->sctp_ep->sctp_flags & SCTP_PCB_FLAGS_TCPTYPE) {
1524 stcb->sctp_ep->sctp_flags |= SCTP_PCB_FLAGS_CONNECTED;
1525 /* Set the connected flag so we can queue data */
1526 soisconnecting(so);
1527 }
1528out_now:
1529 if (creat_lock_on) {
1530 SCTP_ASOC_CREATE_UNLOCK(inp);
1531 }
1532 SCTP_INP_DECR_REF(inp);
1533 return error;
1534}
1535
1536#define SCTP_FIND_STCB(inp, stcb, assoc_id) { \
1537 if ((inp->sctp_flags & SCTP_PCB_FLAGS_TCPTYPE) ||\
1538 (inp->sctp_flags & SCTP_PCB_FLAGS_IN_TCPPOOL)) { \
1539 SCTP_INP_RLOCK(inp); \
1540 stcb = LIST_FIRST(&inp->sctp_asoc_list); \
1541 if (stcb) { \
1542 SCTP_TCB_LOCK(stcb); \
1543 } \
1544 SCTP_INP_RUNLOCK(inp); \
1545 } else if (assoc_id != 0) { \
1546 stcb = sctp_findassociation_ep_asocid(inp, assoc_id, 1); \
1547 if (stcb == NULL) { \
1548 SCTP_LTRACE_ERR_RET(inp, NULL, NULL, SCTP_FROM_SCTP_USRREQ, ENOENT); \
1549 error = ENOENT; \
1550 break; \
1551 } \
1552 } else { \
1553 stcb = NULL; \
1554 } \
1555 }
1556
1557
1558#define SCTP_CHECK_AND_CAST(destp, srcp, type, size) {\
1559 if (size < sizeof(type)) { \
1560 SCTP_LTRACE_ERR_RET(inp, NULL, NULL, SCTP_FROM_SCTP_USRREQ, EINVAL); \
1561 error = EINVAL; \
1562 break; \
1563 } else { \
1564 destp = (type *)srcp; \
1565 } \
1566 }
1567
1568static int
1569sctp_getopt(struct socket *so, int optname, void *optval, size_t *optsize,
1570 void *p)
1571{
1572 struct sctp_inpcb *inp = NULL;
1573 int error, val = 0;
1574 struct sctp_tcb *stcb = NULL;
1575
1576 if (optval == NULL) {
1577 SCTP_LTRACE_ERR_RET(inp, NULL, NULL, SCTP_FROM_SCTP_USRREQ, EINVAL);
1578 return (EINVAL);
1579 }
1580 inp = (struct sctp_inpcb *)so->so_pcb;
1581 if (inp == 0) {
1582 SCTP_LTRACE_ERR_RET(inp, NULL, NULL, SCTP_FROM_SCTP_USRREQ, EINVAL);
1583 return EINVAL;
1584 }
1585 error = 0;
1586
1587 switch (optname) {
1588 case SCTP_NODELAY:
1589 case SCTP_AUTOCLOSE:
1590 case SCTP_EXPLICIT_EOR:
1591 case SCTP_AUTO_ASCONF:
1592 case SCTP_DISABLE_FRAGMENTS:
1593 case SCTP_I_WANT_MAPPED_V4_ADDR:
1594 case SCTP_USE_EXT_RCVINFO:
1595 SCTP_INP_RLOCK(inp);
1596 switch (optname) {
1597 case SCTP_DISABLE_FRAGMENTS:
1598 val = sctp_is_feature_on(inp, SCTP_PCB_FLAGS_NO_FRAGMENT);
1599 break;
1600 case SCTP_I_WANT_MAPPED_V4_ADDR:
1601 val = sctp_is_feature_on(inp, SCTP_PCB_FLAGS_NEEDS_MAPPED_V4);
1602 break;
1603 case SCTP_AUTO_ASCONF:
1604 if (inp->sctp_flags & SCTP_PCB_FLAGS_BOUNDALL) {
1605 /* only valid for bound all sockets */
1606 val = sctp_is_feature_on(inp, SCTP_PCB_FLAGS_AUTO_ASCONF);
1607 } else {
1608 SCTP_LTRACE_ERR_RET(inp, NULL, NULL, SCTP_FROM_SCTP_USRREQ, EINVAL);
1609 error = EINVAL;
1610 goto flags_out;
1611 }
1612 break;
1613 case SCTP_EXPLICIT_EOR:
1614 val = sctp_is_feature_on(inp, SCTP_PCB_FLAGS_EXPLICIT_EOR);
1615 break;
1616 case SCTP_NODELAY:
1617 val = sctp_is_feature_on(inp, SCTP_PCB_FLAGS_NODELAY);
1618 break;
1619 case SCTP_USE_EXT_RCVINFO:
1620 val = sctp_is_feature_on(inp, SCTP_PCB_FLAGS_EXT_RCVINFO);
1621 break;
1622 case SCTP_AUTOCLOSE:
1623 if (sctp_is_feature_on(inp, SCTP_PCB_FLAGS_AUTOCLOSE))
1624 val = TICKS_TO_SEC(inp->sctp_ep.auto_close_time);
1625 else
1626 val = 0;
1627 break;
1628
1629 default:
1630 SCTP_LTRACE_ERR_RET(inp, NULL, NULL, SCTP_FROM_SCTP_USRREQ, ENOPROTOOPT);
1631 error = ENOPROTOOPT;
1632 } /* end switch (sopt->sopt_name) */
1633 if (optname != SCTP_AUTOCLOSE) {
1634 /* make it an "on/off" value */
1635 val = (val != 0);
1636 }
1637 if (*optsize < sizeof(val)) {
1638 SCTP_LTRACE_ERR_RET(inp, NULL, NULL, SCTP_FROM_SCTP_USRREQ, EINVAL);
1639 error = EINVAL;
1640 }
1641flags_out:
1642 SCTP_INP_RUNLOCK(inp);
1643 if (error == 0) {
1644 /* return the option value */
1645 *(int *)optval = val;
1646 *optsize = sizeof(val);
1647 }
1648 break;
1649 case SCTP_GET_PACKET_LOG:
1650 {
1651#ifdef SCTP_PACKET_LOGGING
1652 uint8_t *target;
1653 int ret;
1654
1655 SCTP_CHECK_AND_CAST(target, optval, uint8_t, *optsize);
1656 ret = sctp_copy_out_packet_log(target, (int)*optsize);
1657 *optsize = ret;
1658#else
1659 SCTP_LTRACE_ERR_RET(inp, NULL, NULL, SCTP_FROM_SCTP_USRREQ, EOPNOTSUPP);
1660 error = EOPNOTSUPP;
1661#endif
1662 break;
1663 }
1664 case SCTP_REUSE_PORT:
1665 {
1666 uint32_t *value;
1667
1668 if ((inp->sctp_flags & SCTP_PCB_FLAGS_UDPTYPE)) {
1669 /* Can't do this for a 1-m socket */
1670 error = EINVAL;
1671 break;
1672 }
1673 SCTP_CHECK_AND_CAST(value, optval, uint32_t, *optsize);
1674 *value = sctp_is_feature_on(inp, SCTP_PCB_FLAGS_PORTREUSE);
1675 *optsize = sizeof(uint32_t);
1676 }
1677 break;
1678 case SCTP_PARTIAL_DELIVERY_POINT:
1679 {
1680 uint32_t *value;
1681
1682 SCTP_CHECK_AND_CAST(value, optval, uint32_t, *optsize);
1683 *value = inp->partial_delivery_point;
1684 *optsize = sizeof(uint32_t);
1685 }
1686 break;
1687 case SCTP_FRAGMENT_INTERLEAVE:
1688 {
1689 uint32_t *value;
1690
1691 SCTP_CHECK_AND_CAST(value, optval, uint32_t, *optsize);
1692 if (sctp_is_feature_on(inp, SCTP_PCB_FLAGS_FRAG_INTERLEAVE)) {
1693 if (sctp_is_feature_on(inp, SCTP_PCB_FLAGS_INTERLEAVE_STRMS)) {
1694 *value = SCTP_FRAG_LEVEL_2;
1695 } else {
1696 *value = SCTP_FRAG_LEVEL_1;
1697 }
1698 } else {
1699 *value = SCTP_FRAG_LEVEL_0;
1700 }
1701 *optsize = sizeof(uint32_t);
1702 }
1703 break;
1704 case SCTP_CMT_ON_OFF:
1705 {
1706 struct sctp_assoc_value *av;
1707
1708 SCTP_CHECK_AND_CAST(av, optval, struct sctp_assoc_value, *optsize);
1709 if (SCTP_BASE_SYSCTL(sctp_cmt_on_off)) {
1710 SCTP_FIND_STCB(inp, stcb, av->assoc_id);
1711 if (stcb) {
1712 av->assoc_value = stcb->asoc.sctp_cmt_on_off;
1713 SCTP_TCB_UNLOCK(stcb);
1714
1715 } else {
1716 SCTP_LTRACE_ERR_RET(inp, NULL, NULL, SCTP_FROM_SCTP_USRREQ, ENOTCONN);
1717 error = ENOTCONN;
1718 }
1719 } else {
1720 SCTP_LTRACE_ERR_RET(inp, NULL, NULL, SCTP_FROM_SCTP_USRREQ, ENOPROTOOPT);
1721 error = ENOPROTOOPT;
1722 }
1723 *optsize = sizeof(*av);
1724 }
1725 break;
1726 /* EY - set socket option for nr_sacks */
1727 case SCTP_NR_SACK_ON_OFF:
1728 {
1729 struct sctp_assoc_value *av;
1730
1731 SCTP_CHECK_AND_CAST(av, optval, struct sctp_assoc_value, *optsize);
1732 if (SCTP_BASE_SYSCTL(sctp_nr_sack_on_off)) {
1733 SCTP_FIND_STCB(inp, stcb, av->assoc_id);
1734 if (stcb) {
1735 av->assoc_value = stcb->asoc.sctp_nr_sack_on_off;
1736 SCTP_TCB_UNLOCK(stcb);
1737
1738 } else {
1739 SCTP_LTRACE_ERR_RET(inp, NULL, NULL, SCTP_FROM_SCTP_USRREQ, ENOTCONN);
1740 error = ENOTCONN;
1741 }
1742 } else {
1743 SCTP_LTRACE_ERR_RET(inp, NULL, NULL, SCTP_FROM_SCTP_USRREQ, ENOPROTOOPT);
1744 error = ENOPROTOOPT;
1745 }
1746 *optsize = sizeof(*av);
1747 }
1748 break;
1749 /* JRS - Get socket option for pluggable congestion control */
1750 case SCTP_PLUGGABLE_CC:
1751 {
1752 struct sctp_assoc_value *av;
1753
1754 SCTP_CHECK_AND_CAST(av, optval, struct sctp_assoc_value, *optsize);
1755 SCTP_FIND_STCB(inp, stcb, av->assoc_id);
1756 if (stcb) {
1757 av->assoc_value = stcb->asoc.congestion_control_module;
1758 SCTP_TCB_UNLOCK(stcb);
1759 } else {
1760 av->assoc_value = inp->sctp_ep.sctp_default_cc_module;
1761 }
1762 *optsize = sizeof(*av);
1763 }
1764 break;
1765 case SCTP_GET_ADDR_LEN:
1766 {
1767 struct sctp_assoc_value *av;
1768
1769 SCTP_CHECK_AND_CAST(av, optval, struct sctp_assoc_value, *optsize);
1770 error = EINVAL;
1771#ifdef INET
1772 if (av->assoc_value == AF_INET) {
1773 av->assoc_value = sizeof(struct sockaddr_in);
1774 error = 0;
1775 }
1776#endif
1777#ifdef INET6
1778 if (av->assoc_value == AF_INET6) {
1779 av->assoc_value = sizeof(struct sockaddr_in6);
1780 error = 0;
1781 }
1782#endif
1783 if (error) {
1784 SCTP_LTRACE_ERR_RET(inp, NULL, NULL, SCTP_FROM_SCTP_USRREQ, error);
1785 }
1786 *optsize = sizeof(*av);
1787 }
1788 break;
1789 case SCTP_GET_ASSOC_NUMBER:
1790 {
1791 uint32_t *value, cnt;
1792
1793 SCTP_CHECK_AND_CAST(value, optval, uint32_t, *optsize);
1794 cnt = 0;
1795 SCTP_INP_RLOCK(inp);
1796 LIST_FOREACH(stcb, &inp->sctp_asoc_list, sctp_tcblist) {
1797 cnt++;
1798 }
1799 SCTP_INP_RUNLOCK(inp);
1800 *value = cnt;
1801 *optsize = sizeof(uint32_t);
1802 }
1803 break;
1804
1805 case SCTP_GET_ASSOC_ID_LIST:
1806 {
1807 struct sctp_assoc_ids *ids;
1808 unsigned int at, limit;
1809
1810 SCTP_CHECK_AND_CAST(ids, optval, struct sctp_assoc_ids, *optsize);
1811 at = 0;
1812 limit = (*optsize - sizeof(uint32_t)) / sizeof(sctp_assoc_t);
1813 SCTP_INP_RLOCK(inp);
1814 LIST_FOREACH(stcb, &inp->sctp_asoc_list, sctp_tcblist) {
1815 if (at < limit) {
1816 ids->gaids_assoc_id[at++] = sctp_get_associd(stcb);
1817 } else {
1818 error = EINVAL;
1819 SCTP_LTRACE_ERR_RET(inp, NULL, NULL, SCTP_FROM_SCTP_USRREQ, error);
1820 break;
1821 }
1822 }
1823 SCTP_INP_RUNLOCK(inp);
1824 ids->gaids_number_of_ids = at;
1825 *optsize = ((at * sizeof(sctp_assoc_t)) + sizeof(uint32_t));
1826 }
1827 break;
1828 case SCTP_CONTEXT:
1829 {
1830 struct sctp_assoc_value *av;
1831
1832 SCTP_CHECK_AND_CAST(av, optval, struct sctp_assoc_value, *optsize);
1833 SCTP_FIND_STCB(inp, stcb, av->assoc_id);
1834
1835 if (stcb) {
1836 av->assoc_value = stcb->asoc.context;
1837 SCTP_TCB_UNLOCK(stcb);
1838 } else {
1839 SCTP_INP_RLOCK(inp);
1840 av->assoc_value = inp->sctp_context;
1841 SCTP_INP_RUNLOCK(inp);
1842 }
1843 *optsize = sizeof(*av);
1844 }
1845 break;
1846 case SCTP_VRF_ID:
1847 {
1848 uint32_t *default_vrfid;
1849
1850 SCTP_CHECK_AND_CAST(default_vrfid, optval, uint32_t, *optsize);
1851 *default_vrfid = inp->def_vrf_id;
1852 break;
1853 }
1854 case SCTP_GET_ASOC_VRF:
1855 {
1856 struct sctp_assoc_value *id;
1857
1858 SCTP_CHECK_AND_CAST(id, optval, struct sctp_assoc_value, *optsize);
1859 SCTP_FIND_STCB(inp, stcb, id->assoc_id);
1860 if (stcb == NULL) {
1861 error = EINVAL;
1862 SCTP_LTRACE_ERR_RET(inp, NULL, NULL, SCTP_FROM_SCTP_USRREQ, error);
1863 break;
1864 }
1865 id->assoc_value = stcb->asoc.vrf_id;
1866 break;
1867 }
1868 case SCTP_GET_VRF_IDS:
1869 {
1870 SCTP_LTRACE_ERR_RET(inp, NULL, NULL, SCTP_FROM_SCTP_USRREQ, EOPNOTSUPP);
1871 error = EOPNOTSUPP;
1872 break;
1873 }
1874 case SCTP_GET_NONCE_VALUES:
1875 {
1876 struct sctp_get_nonce_values *gnv;
1877
1878 SCTP_CHECK_AND_CAST(gnv, optval, struct sctp_get_nonce_values, *optsize);
1879 SCTP_FIND_STCB(inp, stcb, gnv->gn_assoc_id);
1880
1881 if (stcb) {
1882 gnv->gn_peers_tag = stcb->asoc.peer_vtag;
1883 gnv->gn_local_tag = stcb->asoc.my_vtag;
1884 SCTP_TCB_UNLOCK(stcb);
1885 } else {
1886 SCTP_LTRACE_ERR_RET(inp, NULL, NULL, SCTP_FROM_SCTP_USRREQ, ENOTCONN);
1887 error = ENOTCONN;
1888 }
1889 *optsize = sizeof(*gnv);
1890 }
1891 break;
1892 case SCTP_DELAYED_SACK:
1893 {
1894 struct sctp_sack_info *sack;
1895
1896 SCTP_CHECK_AND_CAST(sack, optval, struct sctp_sack_info, *optsize);
1897 SCTP_FIND_STCB(inp, stcb, sack->sack_assoc_id);
1898 if (stcb) {
1899 sack->sack_delay = stcb->asoc.delayed_ack;
1900 sack->sack_freq = stcb->asoc.sack_freq;
1901 SCTP_TCB_UNLOCK(stcb);
1902 } else {
1903 SCTP_INP_RLOCK(inp);
1904 sack->sack_delay = TICKS_TO_MSEC(inp->sctp_ep.sctp_timeoutticks[SCTP_TIMER_RECV]);
1905 sack->sack_freq = inp->sctp_ep.sctp_sack_freq;
1906 SCTP_INP_RUNLOCK(inp);
1907 }
1908 *optsize = sizeof(*sack);
1909 }
1910 break;
1911
1912 case SCTP_GET_SNDBUF_USE:
1913 {
1914 struct sctp_sockstat *ss;
1915
1916 SCTP_CHECK_AND_CAST(ss, optval, struct sctp_sockstat, *optsize);
1917 SCTP_FIND_STCB(inp, stcb, ss->ss_assoc_id);
1918
1919 if (stcb) {
1920 ss->ss_total_sndbuf = stcb->asoc.total_output_queue_size;
1921 ss->ss_total_recv_buf = (stcb->asoc.size_on_reasm_queue +
1922 stcb->asoc.size_on_all_streams);
1923 SCTP_TCB_UNLOCK(stcb);
1924 } else {
1925 SCTP_LTRACE_ERR_RET(inp, NULL, NULL, SCTP_FROM_SCTP_USRREQ, ENOTCONN);
1926 error = ENOTCONN;
1927 }
1928 *optsize = sizeof(struct sctp_sockstat);
1929 }
1930 break;
1931 case SCTP_MAX_BURST:
1932 {
1933 uint8_t *value;
1934
1935 SCTP_CHECK_AND_CAST(value, optval, uint8_t, *optsize);
1936
1937 SCTP_INP_RLOCK(inp);
1938 *value = inp->sctp_ep.max_burst;
1939 SCTP_INP_RUNLOCK(inp);
1940 *optsize = sizeof(uint8_t);
1941 }
1942 break;
1943 case SCTP_MAXSEG:
1944 {
1945 struct sctp_assoc_value *av;
1946 int ovh;
1947
1948 SCTP_CHECK_AND_CAST(av, optval, struct sctp_assoc_value, *optsize);
1949 SCTP_FIND_STCB(inp, stcb, av->assoc_id);
1950
1951 if (stcb) {
1952 av->assoc_value = sctp_get_frag_point(stcb, &stcb->asoc);
1953 SCTP_TCB_UNLOCK(stcb);
1954 } else {
1955 SCTP_INP_RLOCK(inp);
1956 if (inp->sctp_flags & SCTP_PCB_FLAGS_BOUND_V6) {
1957 ovh = SCTP_MED_OVERHEAD;
1958 } else {
1959 ovh = SCTP_MED_V4_OVERHEAD;
1960 }
1961 if (inp->sctp_frag_point >= SCTP_DEFAULT_MAXSEGMENT)
1962 av->assoc_value = 0;
1963 else
1964 av->assoc_value = inp->sctp_frag_point - ovh;
1965 SCTP_INP_RUNLOCK(inp);
1966 }
1967 *optsize = sizeof(struct sctp_assoc_value);
1968 }
1969 break;
1970 case SCTP_GET_STAT_LOG:
1971 error = sctp_fill_stat_log(optval, optsize);
1972 break;
1973 case SCTP_EVENTS:
1974 {
1975 struct sctp_event_subscribe *events;
1976
1977 SCTP_CHECK_AND_CAST(events, optval, struct sctp_event_subscribe, *optsize);
1978 memset(events, 0, sizeof(*events));
1979 SCTP_INP_RLOCK(inp);
1980 if (sctp_is_feature_on(inp, SCTP_PCB_FLAGS_RECVDATAIOEVNT))
1981 events->sctp_data_io_event = 1;
1982
1983 if (sctp_is_feature_on(inp, SCTP_PCB_FLAGS_RECVASSOCEVNT))
1984 events->sctp_association_event = 1;
1985
1986 if (sctp_is_feature_on(inp, SCTP_PCB_FLAGS_RECVPADDREVNT))
1987 events->sctp_address_event = 1;
1988
1989 if (sctp_is_feature_on(inp, SCTP_PCB_FLAGS_RECVSENDFAILEVNT))
1990 events->sctp_send_failure_event = 1;
1991
1992 if (sctp_is_feature_on(inp, SCTP_PCB_FLAGS_RECVPEERERR))
1993 events->sctp_peer_error_event = 1;
1994
1995 if (sctp_is_feature_on(inp, SCTP_PCB_FLAGS_RECVSHUTDOWNEVNT))
1996 events->sctp_shutdown_event = 1;
1997
1998 if (sctp_is_feature_on(inp, SCTP_PCB_FLAGS_PDAPIEVNT))
1999 events->sctp_partial_delivery_event = 1;
2000
2001 if (sctp_is_feature_on(inp, SCTP_PCB_FLAGS_ADAPTATIONEVNT))
2002 events->sctp_adaptation_layer_event = 1;
2003
2004 if (sctp_is_feature_on(inp, SCTP_PCB_FLAGS_AUTHEVNT))
2005 events->sctp_authentication_event = 1;
2006
2007 if (sctp_is_feature_on(inp, SCTP_PCB_FLAGS_DRYEVNT))
2008 events->sctp_sender_dry_event = 1;
2009
2010 if (sctp_is_feature_on(inp, SCTP_PCB_FLAGS_STREAM_RESETEVNT))
2011 events->sctp_stream_reset_events = 1;
2011 events->sctp_stream_reset_event = 1;
2012 SCTP_INP_RUNLOCK(inp);
2013 *optsize = sizeof(struct sctp_event_subscribe);
2014 }
2015 break;
2016
2017 case SCTP_ADAPTATION_LAYER:
2018 {
2019 uint32_t *value;
2020
2021 SCTP_CHECK_AND_CAST(value, optval, uint32_t, *optsize);
2022
2023 SCTP_INP_RLOCK(inp);
2024 *value = inp->sctp_ep.adaptation_layer_indicator;
2025 SCTP_INP_RUNLOCK(inp);
2026 *optsize = sizeof(uint32_t);
2027 }
2028 break;
2029 case SCTP_SET_INITIAL_DBG_SEQ:
2030 {
2031 uint32_t *value;
2032
2033 SCTP_CHECK_AND_CAST(value, optval, uint32_t, *optsize);
2034 SCTP_INP_RLOCK(inp);
2035 *value = inp->sctp_ep.initial_sequence_debug;
2036 SCTP_INP_RUNLOCK(inp);
2037 *optsize = sizeof(uint32_t);
2038 }
2039 break;
2040 case SCTP_GET_LOCAL_ADDR_SIZE:
2041 {
2042 uint32_t *value;
2043
2044 SCTP_CHECK_AND_CAST(value, optval, uint32_t, *optsize);
2045 SCTP_INP_RLOCK(inp);
2046 *value = sctp_count_max_addresses(inp);
2047 SCTP_INP_RUNLOCK(inp);
2048 *optsize = sizeof(uint32_t);
2049 }
2050 break;
2051 case SCTP_GET_REMOTE_ADDR_SIZE:
2052 {
2053 uint32_t *value;
2054 size_t size;
2055 struct sctp_nets *net;
2056
2057 SCTP_CHECK_AND_CAST(value, optval, uint32_t, *optsize);
2058 /* FIXME MT: change to sctp_assoc_value? */
2059 SCTP_FIND_STCB(inp, stcb, (sctp_assoc_t) * value);
2060
2061 if (stcb) {
2062 size = 0;
2063 /* Count the sizes */
2064 TAILQ_FOREACH(net, &stcb->asoc.nets, sctp_next) {
2065 if ((sctp_is_feature_on(inp, SCTP_PCB_FLAGS_NEEDS_MAPPED_V4)) ||
2066 (((struct sockaddr *)&net->ro._l_addr)->sa_family == AF_INET6)) {
2067 size += sizeof(struct sockaddr_in6);
2068 } else if (((struct sockaddr *)&net->ro._l_addr)->sa_family == AF_INET) {
2069 size += sizeof(struct sockaddr_in);
2070 } else {
2071 /* huh */
2072 break;
2073 }
2074 }
2075 SCTP_TCB_UNLOCK(stcb);
2076 *value = (uint32_t) size;
2077 } else {
2078 SCTP_LTRACE_ERR_RET(inp, NULL, NULL, SCTP_FROM_SCTP_USRREQ, ENOTCONN);
2079 error = ENOTCONN;
2080 }
2081 *optsize = sizeof(uint32_t);
2082 }
2083 break;
2084 case SCTP_GET_PEER_ADDRESSES:
2085 /*
2086 * Get the address information, an array is passed in to
2087 * fill up we pack it.
2088 */
2089 {
2090 size_t cpsz, left;
2091 struct sockaddr_storage *sas;
2092 struct sctp_nets *net;
2093 struct sctp_getaddresses *saddr;
2094
2095 SCTP_CHECK_AND_CAST(saddr, optval, struct sctp_getaddresses, *optsize);
2096 SCTP_FIND_STCB(inp, stcb, saddr->sget_assoc_id);
2097
2098 if (stcb) {
2099 left = (*optsize) - sizeof(struct sctp_getaddresses);
2100 *optsize = sizeof(struct sctp_getaddresses);
2101 sas = (struct sockaddr_storage *)&saddr->addr[0];
2102
2103 TAILQ_FOREACH(net, &stcb->asoc.nets, sctp_next) {
2104 if ((sctp_is_feature_on(inp, SCTP_PCB_FLAGS_NEEDS_MAPPED_V4)) ||
2105 (((struct sockaddr *)&net->ro._l_addr)->sa_family == AF_INET6)) {
2106 cpsz = sizeof(struct sockaddr_in6);
2107 } else if (((struct sockaddr *)&net->ro._l_addr)->sa_family == AF_INET) {
2108 cpsz = sizeof(struct sockaddr_in);
2109 } else {
2110 /* huh */
2111 break;
2112 }
2113 if (left < cpsz) {
2114 /* not enough room. */
2115 break;
2116 }
2117#ifdef INET6
2118 if ((sctp_is_feature_on(inp, SCTP_PCB_FLAGS_NEEDS_MAPPED_V4)) &&
2119 (((struct sockaddr *)&net->ro._l_addr)->sa_family == AF_INET)) {
2120 /* Must map the address */
2121 in6_sin_2_v4mapsin6((struct sockaddr_in *)&net->ro._l_addr,
2122 (struct sockaddr_in6 *)sas);
2123 } else {
2124#endif
2125 memcpy(sas, &net->ro._l_addr, cpsz);
2126#ifdef INET6
2127 }
2128#endif
2129 ((struct sockaddr_in *)sas)->sin_port = stcb->rport;
2130
2131 sas = (struct sockaddr_storage *)((caddr_t)sas + cpsz);
2132 left -= cpsz;
2133 *optsize += cpsz;
2134 }
2135 SCTP_TCB_UNLOCK(stcb);
2136 } else {
2137 SCTP_LTRACE_ERR_RET(inp, NULL, NULL, SCTP_FROM_SCTP_USRREQ, ENOENT);
2138 error = ENOENT;
2139 }
2140 }
2141 break;
2142 case SCTP_GET_LOCAL_ADDRESSES:
2143 {
2144 size_t limit, actual;
2145 struct sockaddr_storage *sas;
2146 struct sctp_getaddresses *saddr;
2147
2148 SCTP_CHECK_AND_CAST(saddr, optval, struct sctp_getaddresses, *optsize);
2149 SCTP_FIND_STCB(inp, stcb, saddr->sget_assoc_id);
2150
2151 sas = (struct sockaddr_storage *)&saddr->addr[0];
2152 limit = *optsize - sizeof(sctp_assoc_t);
2153 actual = sctp_fill_up_addresses(inp, stcb, limit, sas);
2154 if (stcb) {
2155 SCTP_TCB_UNLOCK(stcb);
2156 }
2157 *optsize = sizeof(struct sockaddr_storage) + actual;
2158 }
2159 break;
2160 case SCTP_PEER_ADDR_PARAMS:
2161 {
2162 struct sctp_paddrparams *paddrp;
2163 struct sctp_nets *net;
2164
2165 SCTP_CHECK_AND_CAST(paddrp, optval, struct sctp_paddrparams, *optsize);
2166 SCTP_FIND_STCB(inp, stcb, paddrp->spp_assoc_id);
2167
2168 net = NULL;
2169 if (stcb) {
2170 net = sctp_findnet(stcb, (struct sockaddr *)&paddrp->spp_address);
2171 } else {
2172 /*
2173 * We increment here since
2174 * sctp_findassociation_ep_addr() wil do a
2175 * decrement if it finds the stcb as long as
2176 * the locked tcb (last argument) is NOT a
2177 * TCB.. aka NULL.
2178 */
2179 SCTP_INP_INCR_REF(inp);
2180 stcb = sctp_findassociation_ep_addr(&inp, (struct sockaddr *)&paddrp->spp_address, &net, NULL, NULL);
2181 if (stcb == NULL) {
2182 SCTP_INP_DECR_REF(inp);
2183 }
2184 }
2185 if (stcb && (net == NULL)) {
2186 struct sockaddr *sa;
2187
2188 sa = (struct sockaddr *)&paddrp->spp_address;
2189 if (sa->sa_family == AF_INET) {
2190 struct sockaddr_in *sin;
2191
2192 sin = (struct sockaddr_in *)sa;
2193 if (sin->sin_addr.s_addr) {
2194 error = EINVAL;
2195 SCTP_TCB_UNLOCK(stcb);
2196 SCTP_LTRACE_ERR_RET(inp, NULL, NULL, SCTP_FROM_SCTP_USRREQ, error);
2197 break;
2198 }
2199 } else if (sa->sa_family == AF_INET6) {
2200 struct sockaddr_in6 *sin6;
2201
2202 sin6 = (struct sockaddr_in6 *)sa;
2203 if (!IN6_IS_ADDR_UNSPECIFIED(&sin6->sin6_addr)) {
2204 error = EINVAL;
2205 SCTP_TCB_UNLOCK(stcb);
2206 SCTP_LTRACE_ERR_RET(inp, NULL, NULL, SCTP_FROM_SCTP_USRREQ, error);
2207 break;
2208 }
2209 } else {
2210 error = EAFNOSUPPORT;
2211 SCTP_TCB_UNLOCK(stcb);
2212 SCTP_LTRACE_ERR_RET(inp, NULL, NULL, SCTP_FROM_SCTP_USRREQ, error);
2213 break;
2214 }
2215 }
2216 if (stcb) {
2217 /* Applys to the specific association */
2218 paddrp->spp_flags = 0;
2219 if (net) {
2220 int ovh;
2221
2222 if (inp->sctp_flags & SCTP_PCB_FLAGS_BOUND_V6) {
2223 ovh = SCTP_MED_OVERHEAD;
2224 } else {
2225 ovh = SCTP_MED_V4_OVERHEAD;
2226 }
2227
2228
2229 paddrp->spp_pathmaxrxt = net->failure_threshold;
2230 paddrp->spp_pathmtu = net->mtu - ovh;
2231 /* get flags for HB */
2232 if (net->dest_state & SCTP_ADDR_NOHB)
2233 paddrp->spp_flags |= SPP_HB_DISABLE;
2234 else
2235 paddrp->spp_flags |= SPP_HB_ENABLE;
2236 /* get flags for PMTU */
2237 if (SCTP_OS_TIMER_PENDING(&net->pmtu_timer.timer)) {
2238 paddrp->spp_flags |= SPP_PMTUD_ENABLE;
2239 } else {
2240 paddrp->spp_flags |= SPP_PMTUD_DISABLE;
2241 }
2242#ifdef INET
2243 if (net->ro._l_addr.sin.sin_family == AF_INET) {
2244 paddrp->spp_ipv4_tos = net->tos_flowlabel & 0x000000fc;
2245 paddrp->spp_flags |= SPP_IPV4_TOS;
2246 }
2247#endif
2248#ifdef INET6
2249 if (net->ro._l_addr.sin6.sin6_family == AF_INET6) {
2250 paddrp->spp_ipv6_flowlabel = net->tos_flowlabel;
2251 paddrp->spp_flags |= SPP_IPV6_FLOWLABEL;
2252 }
2253#endif
2254 } else {
2255 /*
2256 * No destination so return default
2257 * value
2258 */
2259 int cnt = 0;
2260
2261 paddrp->spp_pathmaxrxt = stcb->asoc.def_net_failure;
2262 paddrp->spp_pathmtu = sctp_get_frag_point(stcb, &stcb->asoc);
2263#ifdef INET
2264 paddrp->spp_ipv4_tos = stcb->asoc.default_tos & 0x000000fc;
2265 paddrp->spp_flags |= SPP_IPV4_TOS;
2266#endif
2267#ifdef INET6
2268 paddrp->spp_ipv6_flowlabel = stcb->asoc.default_flowlabel;
2269 paddrp->spp_flags |= SPP_IPV6_FLOWLABEL;
2270#endif
2271 /* default settings should be these */
2272 if (stcb->asoc.hb_is_disabled == 0) {
2273 paddrp->spp_flags |= SPP_HB_ENABLE;
2274 } else {
2275 paddrp->spp_flags |= SPP_HB_DISABLE;
2276 }
2277 TAILQ_FOREACH(net, &stcb->asoc.nets, sctp_next) {
2278 if (SCTP_OS_TIMER_PENDING(&net->pmtu_timer.timer)) {
2279 cnt++;
2280 }
2281 }
2282 if (cnt) {
2283 paddrp->spp_flags |= SPP_PMTUD_ENABLE;
2284 }
2285 }
2286 paddrp->spp_hbinterval = stcb->asoc.heart_beat_delay;
2287 paddrp->spp_assoc_id = sctp_get_associd(stcb);
2288 SCTP_TCB_UNLOCK(stcb);
2289 } else {
2290 /* Use endpoint defaults */
2291 SCTP_INP_RLOCK(inp);
2292 paddrp->spp_pathmaxrxt = inp->sctp_ep.def_net_failure;
2293 paddrp->spp_hbinterval = TICKS_TO_MSEC(inp->sctp_ep.sctp_timeoutticks[SCTP_TIMER_HEARTBEAT]);
2294 paddrp->spp_assoc_id = (sctp_assoc_t) 0;
2295 /* get inp's default */
2296#ifdef INET
2297 paddrp->spp_ipv4_tos = inp->ip_inp.inp.inp_ip_tos;
2298 paddrp->spp_flags |= SPP_IPV4_TOS;
2299#endif
2300#ifdef INET6
2301 if (inp->sctp_flags & SCTP_PCB_FLAGS_BOUND_V6) {
2302 paddrp->spp_ipv6_flowlabel = ((struct in6pcb *)inp)->in6p_flowinfo;
2303 paddrp->spp_flags |= SPP_IPV6_FLOWLABEL;
2304 }
2305#endif
2306 /* can't return this */
2307 paddrp->spp_pathmtu = 0;
2308
2309 /* default behavior, no stcb */
2310 paddrp->spp_flags = SPP_PMTUD_ENABLE;
2311
2312 if (sctp_is_feature_off(inp, SCTP_PCB_FLAGS_DONOT_HEARTBEAT)) {
2313 paddrp->spp_flags |= SPP_HB_ENABLE;
2314 } else {
2315 paddrp->spp_flags |= SPP_HB_DISABLE;
2316 }
2317 SCTP_INP_RUNLOCK(inp);
2318 }
2319 *optsize = sizeof(struct sctp_paddrparams);
2320 }
2321 break;
2322 case SCTP_GET_PEER_ADDR_INFO:
2323 {
2324 struct sctp_paddrinfo *paddri;
2325 struct sctp_nets *net;
2326
2327 SCTP_CHECK_AND_CAST(paddri, optval, struct sctp_paddrinfo, *optsize);
2328 SCTP_FIND_STCB(inp, stcb, paddri->spinfo_assoc_id);
2329
2330 net = NULL;
2331 if (stcb) {
2332 net = sctp_findnet(stcb, (struct sockaddr *)&paddri->spinfo_address);
2333 } else {
2334 /*
2335 * We increment here since
2336 * sctp_findassociation_ep_addr() wil do a
2337 * decrement if it finds the stcb as long as
2338 * the locked tcb (last argument) is NOT a
2339 * TCB.. aka NULL.
2340 */
2341 SCTP_INP_INCR_REF(inp);
2342 stcb = sctp_findassociation_ep_addr(&inp, (struct sockaddr *)&paddri->spinfo_address, &net, NULL, NULL);
2343 if (stcb == NULL) {
2344 SCTP_INP_DECR_REF(inp);
2345 }
2346 }
2347
2348 if ((stcb) && (net)) {
2349 paddri->spinfo_state = net->dest_state & (SCTP_REACHABLE_MASK | SCTP_ADDR_NOHB);
2350 paddri->spinfo_cwnd = net->cwnd;
2351 paddri->spinfo_srtt = ((net->lastsa >> 2) + net->lastsv) >> 1;
2352 paddri->spinfo_rto = net->RTO;
2353 paddri->spinfo_assoc_id = sctp_get_associd(stcb);
2354 SCTP_TCB_UNLOCK(stcb);
2355 } else {
2356 if (stcb) {
2357 SCTP_TCB_UNLOCK(stcb);
2358 }
2359 SCTP_LTRACE_ERR_RET(inp, NULL, NULL, SCTP_FROM_SCTP_USRREQ, ENOENT);
2360 error = ENOENT;
2361 }
2362 *optsize = sizeof(struct sctp_paddrinfo);
2363 }
2364 break;
2365 case SCTP_PCB_STATUS:
2366 {
2367 struct sctp_pcbinfo *spcb;
2368
2369 SCTP_CHECK_AND_CAST(spcb, optval, struct sctp_pcbinfo, *optsize);
2370 sctp_fill_pcbinfo(spcb);
2371 *optsize = sizeof(struct sctp_pcbinfo);
2372 }
2373 break;
2374
2375 case SCTP_STATUS:
2376 {
2377 struct sctp_nets *net;
2378 struct sctp_status *sstat;
2379
2380 SCTP_CHECK_AND_CAST(sstat, optval, struct sctp_status, *optsize);
2381 SCTP_FIND_STCB(inp, stcb, sstat->sstat_assoc_id);
2382
2383 if (stcb == NULL) {
2384 SCTP_LTRACE_ERR_RET(inp, NULL, NULL, SCTP_FROM_SCTP_USRREQ, error);
2385 error = EINVAL;
2386 break;
2387 }
2388 /*
2389 * I think passing the state is fine since
2390 * sctp_constants.h will be available to the user
2391 * land.
2392 */
2393 sstat->sstat_state = stcb->asoc.state;
2394 sstat->sstat_assoc_id = sctp_get_associd(stcb);
2395 sstat->sstat_rwnd = stcb->asoc.peers_rwnd;
2396 sstat->sstat_unackdata = stcb->asoc.sent_queue_cnt;
2397 /*
2398 * We can't include chunks that have been passed to
2399 * the socket layer. Only things in queue.
2400 */
2401 sstat->sstat_penddata = (stcb->asoc.cnt_on_reasm_queue +
2402 stcb->asoc.cnt_on_all_streams);
2403
2404
2405 sstat->sstat_instrms = stcb->asoc.streamincnt;
2406 sstat->sstat_outstrms = stcb->asoc.streamoutcnt;
2407 sstat->sstat_fragmentation_point = sctp_get_frag_point(stcb, &stcb->asoc);
2408 memcpy(&sstat->sstat_primary.spinfo_address,
2409 &stcb->asoc.primary_destination->ro._l_addr,
2410 ((struct sockaddr *)(&stcb->asoc.primary_destination->ro._l_addr))->sa_len);
2411 net = stcb->asoc.primary_destination;
2412 ((struct sockaddr_in *)&sstat->sstat_primary.spinfo_address)->sin_port = stcb->rport;
2413 /*
2414 * Again the user can get info from sctp_constants.h
2415 * for what the state of the network is.
2416 */
2417 sstat->sstat_primary.spinfo_state = net->dest_state & SCTP_REACHABLE_MASK;
2418 sstat->sstat_primary.spinfo_cwnd = net->cwnd;
2419 sstat->sstat_primary.spinfo_srtt = net->lastsa;
2420 sstat->sstat_primary.spinfo_rto = net->RTO;
2421 sstat->sstat_primary.spinfo_mtu = net->mtu;
2422 sstat->sstat_primary.spinfo_assoc_id = sctp_get_associd(stcb);
2423 SCTP_TCB_UNLOCK(stcb);
2424 *optsize = sizeof(*sstat);
2425 }
2426 break;
2427 case SCTP_RTOINFO:
2428 {
2429 struct sctp_rtoinfo *srto;
2430
2431 SCTP_CHECK_AND_CAST(srto, optval, struct sctp_rtoinfo, *optsize);
2432 SCTP_FIND_STCB(inp, stcb, srto->srto_assoc_id);
2433
2434 if (stcb) {
2435 srto->srto_initial = stcb->asoc.initial_rto;
2436 srto->srto_max = stcb->asoc.maxrto;
2437 srto->srto_min = stcb->asoc.minrto;
2438 SCTP_TCB_UNLOCK(stcb);
2439 } else {
2440 SCTP_INP_RLOCK(inp);
2441 srto->srto_initial = inp->sctp_ep.initial_rto;
2442 srto->srto_max = inp->sctp_ep.sctp_maxrto;
2443 srto->srto_min = inp->sctp_ep.sctp_minrto;
2444 SCTP_INP_RUNLOCK(inp);
2445 }
2446 *optsize = sizeof(*srto);
2447 }
2448 break;
2449 case SCTP_ASSOCINFO:
2450 {
2451 struct sctp_assocparams *sasoc;
2452 uint32_t oldval;
2453
2454 SCTP_CHECK_AND_CAST(sasoc, optval, struct sctp_assocparams, *optsize);
2455 SCTP_FIND_STCB(inp, stcb, sasoc->sasoc_assoc_id);
2456
2457 if (stcb) {
2458 oldval = sasoc->sasoc_cookie_life;
2459 sasoc->sasoc_cookie_life = TICKS_TO_MSEC(stcb->asoc.cookie_life);
2460 sasoc->sasoc_asocmaxrxt = stcb->asoc.max_send_times;
2461 sasoc->sasoc_number_peer_destinations = stcb->asoc.numnets;
2462 sasoc->sasoc_peer_rwnd = stcb->asoc.peers_rwnd;
2463 sasoc->sasoc_local_rwnd = stcb->asoc.my_rwnd;
2464 SCTP_TCB_UNLOCK(stcb);
2465 } else {
2466 SCTP_INP_RLOCK(inp);
2467 sasoc->sasoc_cookie_life = TICKS_TO_MSEC(inp->sctp_ep.def_cookie_life);
2468 sasoc->sasoc_asocmaxrxt = inp->sctp_ep.max_send_times;
2469 sasoc->sasoc_number_peer_destinations = 0;
2470 sasoc->sasoc_peer_rwnd = 0;
2471 sasoc->sasoc_local_rwnd = sbspace(&inp->sctp_socket->so_rcv);
2472 SCTP_INP_RUNLOCK(inp);
2473 }
2474 *optsize = sizeof(*sasoc);
2475 }
2476 break;
2477 case SCTP_DEFAULT_SEND_PARAM:
2478 {
2479 struct sctp_sndrcvinfo *s_info;
2480
2481 SCTP_CHECK_AND_CAST(s_info, optval, struct sctp_sndrcvinfo, *optsize);
2482 SCTP_FIND_STCB(inp, stcb, s_info->sinfo_assoc_id);
2483
2484 if (stcb) {
2485 memcpy(s_info, &stcb->asoc.def_send, sizeof(stcb->asoc.def_send));
2486 SCTP_TCB_UNLOCK(stcb);
2487 } else {
2488 SCTP_INP_RLOCK(inp);
2489 memcpy(s_info, &inp->def_send, sizeof(inp->def_send));
2490 SCTP_INP_RUNLOCK(inp);
2491 }
2492 *optsize = sizeof(*s_info);
2493 }
2494 break;
2495 case SCTP_INITMSG:
2496 {
2497 struct sctp_initmsg *sinit;
2498
2499 SCTP_CHECK_AND_CAST(sinit, optval, struct sctp_initmsg, *optsize);
2500 SCTP_INP_RLOCK(inp);
2501 sinit->sinit_num_ostreams = inp->sctp_ep.pre_open_stream_count;
2502 sinit->sinit_max_instreams = inp->sctp_ep.max_open_streams_intome;
2503 sinit->sinit_max_attempts = inp->sctp_ep.max_init_times;
2504 sinit->sinit_max_init_timeo = inp->sctp_ep.initial_init_rto_max;
2505 SCTP_INP_RUNLOCK(inp);
2506 *optsize = sizeof(*sinit);
2507 }
2508 break;
2509 case SCTP_PRIMARY_ADDR:
2510 /* we allow a "get" operation on this */
2511 {
2512 struct sctp_setprim *ssp;
2513
2514 SCTP_CHECK_AND_CAST(ssp, optval, struct sctp_setprim, *optsize);
2515 SCTP_FIND_STCB(inp, stcb, ssp->ssp_assoc_id);
2516
2517 if (stcb) {
2518 /* simply copy out the sockaddr_storage... */
2519 int len;
2520
2521 len = *optsize;
2522 if (len > stcb->asoc.primary_destination->ro._l_addr.sa.sa_len)
2523 len = stcb->asoc.primary_destination->ro._l_addr.sa.sa_len;
2524
2525 memcpy(&ssp->ssp_addr,
2526 &stcb->asoc.primary_destination->ro._l_addr,
2527 len);
2528 SCTP_TCB_UNLOCK(stcb);
2529 } else {
2530 SCTP_LTRACE_ERR_RET(inp, NULL, NULL, SCTP_FROM_SCTP_USRREQ, error);
2531 error = EINVAL;
2532 }
2533 *optsize = sizeof(*ssp);
2534 }
2535 break;
2536
2537 case SCTP_HMAC_IDENT:
2538 {
2539 struct sctp_hmacalgo *shmac;
2540 sctp_hmaclist_t *hmaclist;
2541 uint32_t size;
2542 int i;
2543
2544 SCTP_CHECK_AND_CAST(shmac, optval, struct sctp_hmacalgo, *optsize);
2545
2546 SCTP_INP_RLOCK(inp);
2547 hmaclist = inp->sctp_ep.local_hmacs;
2548 if (hmaclist == NULL) {
2549 /* no HMACs to return */
2550 *optsize = sizeof(*shmac);
2551 SCTP_INP_RUNLOCK(inp);
2552 break;
2553 }
2554 /* is there room for all of the hmac ids? */
2555 size = sizeof(*shmac) + (hmaclist->num_algo *
2556 sizeof(shmac->shmac_idents[0]));
2557 if ((size_t)(*optsize) < size) {
2558 SCTP_LTRACE_ERR_RET(inp, NULL, NULL, SCTP_FROM_SCTP_USRREQ, error);
2559 error = EINVAL;
2560 SCTP_INP_RUNLOCK(inp);
2561 break;
2562 }
2563 /* copy in the list */
2564 shmac->shmac_number_of_idents = hmaclist->num_algo;
2565 for (i = 0; i < hmaclist->num_algo; i++) {
2566 shmac->shmac_idents[i] = hmaclist->hmac[i];
2567 }
2568 SCTP_INP_RUNLOCK(inp);
2569 *optsize = size;
2570 break;
2571 }
2572 case SCTP_AUTH_ACTIVE_KEY:
2573 {
2574 struct sctp_authkeyid *scact;
2575
2576 SCTP_CHECK_AND_CAST(scact, optval, struct sctp_authkeyid, *optsize);
2577 SCTP_FIND_STCB(inp, stcb, scact->scact_assoc_id);
2578
2579 if (stcb) {
2580 /* get the active key on the assoc */
2581 scact->scact_keynumber = stcb->asoc.authinfo.active_keyid;
2582 SCTP_TCB_UNLOCK(stcb);
2583 } else {
2584 /* get the endpoint active key */
2585 SCTP_INP_RLOCK(inp);
2586 scact->scact_keynumber = inp->sctp_ep.default_keyid;
2587 SCTP_INP_RUNLOCK(inp);
2588 }
2589 *optsize = sizeof(*scact);
2590 break;
2591 }
2592 case SCTP_LOCAL_AUTH_CHUNKS:
2593 {
2594 struct sctp_authchunks *sac;
2595 sctp_auth_chklist_t *chklist = NULL;
2596 size_t size = 0;
2597
2598 SCTP_CHECK_AND_CAST(sac, optval, struct sctp_authchunks, *optsize);
2599 SCTP_FIND_STCB(inp, stcb, sac->gauth_assoc_id);
2600
2601 if (stcb) {
2602 /* get off the assoc */
2603 chklist = stcb->asoc.local_auth_chunks;
2604 /* is there enough space? */
2605 size = sctp_auth_get_chklist_size(chklist);
2606 if (*optsize < (sizeof(struct sctp_authchunks) + size)) {
2607 error = EINVAL;
2608 SCTP_LTRACE_ERR_RET(inp, NULL, NULL, SCTP_FROM_SCTP_USRREQ, error);
2609 } else {
2610 /* copy in the chunks */
2611 (void)sctp_serialize_auth_chunks(chklist, sac->gauth_chunks);
2612 }
2613 SCTP_TCB_UNLOCK(stcb);
2614 } else {
2615 /* get off the endpoint */
2616 SCTP_INP_RLOCK(inp);
2617 chklist = inp->sctp_ep.local_auth_chunks;
2618 /* is there enough space? */
2619 size = sctp_auth_get_chklist_size(chklist);
2620 if (*optsize < (sizeof(struct sctp_authchunks) + size)) {
2621 error = EINVAL;
2622 SCTP_LTRACE_ERR_RET(inp, NULL, NULL, SCTP_FROM_SCTP_USRREQ, error);
2623 } else {
2624 /* copy in the chunks */
2625 (void)sctp_serialize_auth_chunks(chklist, sac->gauth_chunks);
2626 }
2627 SCTP_INP_RUNLOCK(inp);
2628 }
2629 *optsize = sizeof(struct sctp_authchunks) + size;
2630 break;
2631 }
2632 case SCTP_PEER_AUTH_CHUNKS:
2633 {
2634 struct sctp_authchunks *sac;
2635 sctp_auth_chklist_t *chklist = NULL;
2636 size_t size = 0;
2637
2638 SCTP_CHECK_AND_CAST(sac, optval, struct sctp_authchunks, *optsize);
2639 SCTP_FIND_STCB(inp, stcb, sac->gauth_assoc_id);
2640
2641 if (stcb) {
2642 /* get off the assoc */
2643 chklist = stcb->asoc.peer_auth_chunks;
2644 /* is there enough space? */
2645 size = sctp_auth_get_chklist_size(chklist);
2646 if (*optsize < (sizeof(struct sctp_authchunks) + size)) {
2647 error = EINVAL;
2648 SCTP_LTRACE_ERR_RET(inp, NULL, NULL, SCTP_FROM_SCTP_USRREQ, error);
2649 } else {
2650 /* copy in the chunks */
2651 (void)sctp_serialize_auth_chunks(chklist, sac->gauth_chunks);
2652 }
2653 SCTP_TCB_UNLOCK(stcb);
2654 } else {
2655 SCTP_LTRACE_ERR_RET(inp, NULL, NULL, SCTP_FROM_SCTP_USRREQ, ENOENT);
2656 error = ENOENT;
2657 }
2658 *optsize = sizeof(struct sctp_authchunks) + size;
2659 break;
2660 }
2661
2662
2663 default:
2664 SCTP_LTRACE_ERR_RET(inp, NULL, NULL, SCTP_FROM_SCTP_USRREQ, ENOPROTOOPT);
2665 error = ENOPROTOOPT;
2666 *optsize = 0;
2667 break;
2668 } /* end switch (sopt->sopt_name) */
2669 return (error);
2670}
2671
2672static int
2673sctp_setopt(struct socket *so, int optname, void *optval, size_t optsize,
2674 void *p)
2675{
2676 int error, set_opt;
2677 uint32_t *mopt;
2678 struct sctp_tcb *stcb = NULL;
2679 struct sctp_inpcb *inp = NULL;
2680 uint32_t vrf_id;
2681
2682 if (optval == NULL) {
2683 SCTP_PRINTF("optval is NULL\n");
2684 SCTP_LTRACE_ERR_RET(inp, NULL, NULL, SCTP_FROM_SCTP_USRREQ, EINVAL);
2685 return (EINVAL);
2686 }
2687 inp = (struct sctp_inpcb *)so->so_pcb;
2688 if (inp == 0) {
2689 SCTP_PRINTF("inp is NULL?\n");
2690 SCTP_LTRACE_ERR_RET(inp, NULL, NULL, SCTP_FROM_SCTP_USRREQ, EINVAL);
2691 return EINVAL;
2692 }
2693 vrf_id = inp->def_vrf_id;
2694
2695 error = 0;
2696 switch (optname) {
2697 case SCTP_NODELAY:
2698 case SCTP_AUTOCLOSE:
2699 case SCTP_AUTO_ASCONF:
2700 case SCTP_EXPLICIT_EOR:
2701 case SCTP_DISABLE_FRAGMENTS:
2702 case SCTP_USE_EXT_RCVINFO:
2703 case SCTP_I_WANT_MAPPED_V4_ADDR:
2704 /* copy in the option value */
2705 SCTP_CHECK_AND_CAST(mopt, optval, uint32_t, optsize);
2706 set_opt = 0;
2707 if (error)
2708 break;
2709 switch (optname) {
2710 case SCTP_DISABLE_FRAGMENTS:
2711 set_opt = SCTP_PCB_FLAGS_NO_FRAGMENT;
2712 break;
2713 case SCTP_AUTO_ASCONF:
2714 /*
2715 * NOTE: we don't really support this flag
2716 */
2717 if (inp->sctp_flags & SCTP_PCB_FLAGS_BOUNDALL) {
2718 /* only valid for bound all sockets */
2719 set_opt = SCTP_PCB_FLAGS_AUTO_ASCONF;
2720 } else {
2721 SCTP_LTRACE_ERR_RET(inp, NULL, NULL, SCTP_FROM_SCTP_USRREQ, EINVAL);
2722 return (EINVAL);
2723 }
2724 break;
2725 case SCTP_EXPLICIT_EOR:
2726 set_opt = SCTP_PCB_FLAGS_EXPLICIT_EOR;
2727 break;
2728 case SCTP_USE_EXT_RCVINFO:
2729 set_opt = SCTP_PCB_FLAGS_EXT_RCVINFO;
2730 break;
2731 case SCTP_I_WANT_MAPPED_V4_ADDR:
2732 if (inp->sctp_flags & SCTP_PCB_FLAGS_BOUND_V6) {
2733 set_opt = SCTP_PCB_FLAGS_NEEDS_MAPPED_V4;
2734 } else {
2735 SCTP_LTRACE_ERR_RET(inp, NULL, NULL, SCTP_FROM_SCTP_USRREQ, EINVAL);
2736 return (EINVAL);
2737 }
2738 break;
2739 case SCTP_NODELAY:
2740 set_opt = SCTP_PCB_FLAGS_NODELAY;
2741 break;
2742 case SCTP_AUTOCLOSE:
2743 if ((inp->sctp_flags & SCTP_PCB_FLAGS_TCPTYPE) ||
2744 (inp->sctp_flags & SCTP_PCB_FLAGS_IN_TCPPOOL)) {
2745 SCTP_LTRACE_ERR_RET(inp, NULL, NULL, SCTP_FROM_SCTP_USRREQ, EINVAL);
2746 return (EINVAL);
2747 }
2748 set_opt = SCTP_PCB_FLAGS_AUTOCLOSE;
2749 /*
2750 * The value is in ticks. Note this does not effect
2751 * old associations, only new ones.
2752 */
2753 inp->sctp_ep.auto_close_time = SEC_TO_TICKS(*mopt);
2754 break;
2755 }
2756 SCTP_INP_WLOCK(inp);
2757 if (*mopt != 0) {
2758 sctp_feature_on(inp, set_opt);
2759 } else {
2760 sctp_feature_off(inp, set_opt);
2761 }
2762 SCTP_INP_WUNLOCK(inp);
2763 break;
2764 case SCTP_REUSE_PORT:
2765 {
2766 SCTP_CHECK_AND_CAST(mopt, optval, uint32_t, optsize);
2767 if ((inp->sctp_flags & SCTP_PCB_FLAGS_UNBOUND) == 0) {
2768 /* Can't set it after we are bound */
2769 error = EINVAL;
2770 break;
2771 }
2772 if ((inp->sctp_flags & SCTP_PCB_FLAGS_UDPTYPE)) {
2773 /* Can't do this for a 1-m socket */
2774 error = EINVAL;
2775 break;
2776 }
2777 if (optval)
2778 sctp_feature_on(inp, SCTP_PCB_FLAGS_PORTREUSE);
2779 else
2780 sctp_feature_off(inp, SCTP_PCB_FLAGS_PORTREUSE);
2781 }
2782 break;
2783 case SCTP_PARTIAL_DELIVERY_POINT:
2784 {
2785 uint32_t *value;
2786
2787 SCTP_CHECK_AND_CAST(value, optval, uint32_t, optsize);
2788 if (*value > SCTP_SB_LIMIT_RCV(so)) {
2789 SCTP_LTRACE_ERR_RET(inp, NULL, NULL, SCTP_FROM_SCTP_USRREQ, EINVAL);
2790 error = EINVAL;
2791 break;
2792 }
2793 inp->partial_delivery_point = *value;
2794 }
2795 break;
2796 case SCTP_FRAGMENT_INTERLEAVE:
2797 /* not yet until we re-write sctp_recvmsg() */
2798 {
2799 uint32_t *level;
2800
2801 SCTP_CHECK_AND_CAST(level, optval, uint32_t, optsize);
2802 if (*level == SCTP_FRAG_LEVEL_2) {
2803 sctp_feature_on(inp, SCTP_PCB_FLAGS_FRAG_INTERLEAVE);
2804 sctp_feature_on(inp, SCTP_PCB_FLAGS_INTERLEAVE_STRMS);
2805 } else if (*level == SCTP_FRAG_LEVEL_1) {
2806 sctp_feature_on(inp, SCTP_PCB_FLAGS_FRAG_INTERLEAVE);
2807 sctp_feature_off(inp, SCTP_PCB_FLAGS_INTERLEAVE_STRMS);
2808 } else if (*level == SCTP_FRAG_LEVEL_0) {
2809 sctp_feature_off(inp, SCTP_PCB_FLAGS_FRAG_INTERLEAVE);
2810 sctp_feature_off(inp, SCTP_PCB_FLAGS_INTERLEAVE_STRMS);
2811
2812 } else {
2813 SCTP_LTRACE_ERR_RET(inp, NULL, NULL, SCTP_FROM_SCTP_USRREQ, EINVAL);
2814 error = EINVAL;
2815 }
2816 }
2817 break;
2818 case SCTP_CMT_ON_OFF:
2819 {
2820 struct sctp_assoc_value *av;
2821
2822 SCTP_CHECK_AND_CAST(av, optval, struct sctp_assoc_value, optsize);
2823 if (SCTP_BASE_SYSCTL(sctp_cmt_on_off)) {
2824 SCTP_FIND_STCB(inp, stcb, av->assoc_id);
2825 if (stcb) {
2826 stcb->asoc.sctp_cmt_on_off = (uint8_t) av->assoc_value;
2827 SCTP_TCB_UNLOCK(stcb);
2828 } else {
2829 SCTP_LTRACE_ERR_RET(inp, NULL, NULL, SCTP_FROM_SCTP_USRREQ, ENOTCONN);
2830 error = ENOTCONN;
2831 }
2832 } else {
2833 SCTP_LTRACE_ERR_RET(inp, NULL, NULL, SCTP_FROM_SCTP_USRREQ, ENOPROTOOPT);
2834 error = ENOPROTOOPT;
2835 }
2836 }
2837 break;
2838 /* EY nr_sack_on_off socket option */
2839 case SCTP_NR_SACK_ON_OFF:
2840 {
2841 struct sctp_assoc_value *av;
2842
2843 SCTP_CHECK_AND_CAST(av, optval, struct sctp_assoc_value, optsize);
2844 if (SCTP_BASE_SYSCTL(sctp_nr_sack_on_off)) {
2845 SCTP_FIND_STCB(inp, stcb, av->assoc_id);
2846 if (stcb) {
2847 stcb->asoc.sctp_nr_sack_on_off = (uint8_t) av->assoc_value;
2848 SCTP_TCB_UNLOCK(stcb);
2849 } else {
2850 SCTP_LTRACE_ERR_RET(inp, NULL, NULL, SCTP_FROM_SCTP_USRREQ, ENOTCONN);
2851 error = ENOTCONN;
2852 }
2853 } else {
2854 SCTP_LTRACE_ERR_RET(inp, NULL, NULL, SCTP_FROM_SCTP_USRREQ, ENOPROTOOPT);
2855 error = ENOPROTOOPT;
2856 }
2857 }
2858 break;
2859 /* JRS - Set socket option for pluggable congestion control */
2860 case SCTP_PLUGGABLE_CC:
2861 {
2862 struct sctp_assoc_value *av;
2863
2864 SCTP_CHECK_AND_CAST(av, optval, struct sctp_assoc_value, optsize);
2865 SCTP_FIND_STCB(inp, stcb, av->assoc_id);
2866 if (stcb) {
2867 switch (av->assoc_value) {
2868 /*
2869 * JRS - Standard TCP congestion
2870 * control
2871 */
2872 case SCTP_CC_RFC2581:
2873 {
2874 stcb->asoc.congestion_control_module = SCTP_CC_RFC2581;
2875 stcb->asoc.cc_functions.sctp_set_initial_cc_param = &sctp_set_initial_cc_param;
2876 stcb->asoc.cc_functions.sctp_cwnd_update_after_sack = &sctp_cwnd_update_after_sack;
2877 stcb->asoc.cc_functions.sctp_cwnd_update_after_fr = &sctp_cwnd_update_after_fr;
2878 stcb->asoc.cc_functions.sctp_cwnd_update_after_timeout = &sctp_cwnd_update_after_timeout;
2879 stcb->asoc.cc_functions.sctp_cwnd_update_after_ecn_echo = &sctp_cwnd_update_after_ecn_echo;
2880 stcb->asoc.cc_functions.sctp_cwnd_update_after_packet_dropped = &sctp_cwnd_update_after_packet_dropped;
2881 stcb->asoc.cc_functions.sctp_cwnd_update_after_output = &sctp_cwnd_update_after_output;
2882 stcb->asoc.cc_functions.sctp_cwnd_update_after_fr_timer = &sctp_cwnd_update_after_fr_timer;
2883 SCTP_TCB_UNLOCK(stcb);
2884 break;
2885 }
2886 /*
2887 * JRS - High Speed TCP congestion
2888 * control (Floyd)
2889 */
2890 case SCTP_CC_HSTCP:
2891 {
2892 stcb->asoc.congestion_control_module = SCTP_CC_HSTCP;
2893 stcb->asoc.cc_functions.sctp_set_initial_cc_param = &sctp_set_initial_cc_param;
2894 stcb->asoc.cc_functions.sctp_cwnd_update_after_sack = &sctp_hs_cwnd_update_after_sack;
2895 stcb->asoc.cc_functions.sctp_cwnd_update_after_fr = &sctp_hs_cwnd_update_after_fr;
2896 stcb->asoc.cc_functions.sctp_cwnd_update_after_timeout = &sctp_cwnd_update_after_timeout;
2897 stcb->asoc.cc_functions.sctp_cwnd_update_after_ecn_echo = &sctp_cwnd_update_after_ecn_echo;
2898 stcb->asoc.cc_functions.sctp_cwnd_update_after_packet_dropped = &sctp_cwnd_update_after_packet_dropped;
2899 stcb->asoc.cc_functions.sctp_cwnd_update_after_output = &sctp_cwnd_update_after_output;
2900 stcb->asoc.cc_functions.sctp_cwnd_update_after_fr_timer = &sctp_cwnd_update_after_fr_timer;
2901 SCTP_TCB_UNLOCK(stcb);
2902 break;
2903 }
2904 /* JRS - HTCP congestion control */
2905 case SCTP_CC_HTCP:
2906 {
2907 stcb->asoc.congestion_control_module = SCTP_CC_HTCP;
2908 stcb->asoc.cc_functions.sctp_set_initial_cc_param = &sctp_htcp_set_initial_cc_param;
2909 stcb->asoc.cc_functions.sctp_cwnd_update_after_sack = &sctp_htcp_cwnd_update_after_sack;
2910 stcb->asoc.cc_functions.sctp_cwnd_update_after_fr = &sctp_htcp_cwnd_update_after_fr;
2911 stcb->asoc.cc_functions.sctp_cwnd_update_after_timeout = &sctp_htcp_cwnd_update_after_timeout;
2912 stcb->asoc.cc_functions.sctp_cwnd_update_after_ecn_echo = &sctp_htcp_cwnd_update_after_ecn_echo;
2913 stcb->asoc.cc_functions.sctp_cwnd_update_after_packet_dropped = &sctp_cwnd_update_after_packet_dropped;
2914 stcb->asoc.cc_functions.sctp_cwnd_update_after_output = &sctp_cwnd_update_after_output;
2915 stcb->asoc.cc_functions.sctp_cwnd_update_after_fr_timer = &sctp_htcp_cwnd_update_after_fr_timer;
2916 SCTP_TCB_UNLOCK(stcb);
2917 break;
2918 }
2919 /*
2920 * JRS - All other values are
2921 * invalid
2922 */
2923 default:
2924 {
2925 SCTP_LTRACE_ERR_RET(inp, NULL, NULL, SCTP_FROM_SCTP_USRREQ, EINVAL);
2926 error = EINVAL;
2927 SCTP_TCB_UNLOCK(stcb);
2928 break;
2929 }
2930 }
2931 } else {
2932 switch (av->assoc_value) {
2933 case SCTP_CC_RFC2581:
2934 case SCTP_CC_HSTCP:
2935 case SCTP_CC_HTCP:
2936 inp->sctp_ep.sctp_default_cc_module = av->assoc_value;
2937 break;
2938 default:
2939 SCTP_LTRACE_ERR_RET(inp, NULL, NULL, SCTP_FROM_SCTP_USRREQ, EINVAL);
2940 error = EINVAL;
2941 break;
2942 };
2943 }
2944 }
2945 break;
2946 case SCTP_CLR_STAT_LOG:
2947 SCTP_LTRACE_ERR_RET(inp, NULL, NULL, SCTP_FROM_SCTP_USRREQ, EOPNOTSUPP);
2948 error = EOPNOTSUPP;
2949 break;
2950 case SCTP_CONTEXT:
2951 {
2952 struct sctp_assoc_value *av;
2953
2954 SCTP_CHECK_AND_CAST(av, optval, struct sctp_assoc_value, optsize);
2955 SCTP_FIND_STCB(inp, stcb, av->assoc_id);
2956
2957 if (stcb) {
2958 stcb->asoc.context = av->assoc_value;
2959 SCTP_TCB_UNLOCK(stcb);
2960 } else {
2961 SCTP_INP_WLOCK(inp);
2962 inp->sctp_context = av->assoc_value;
2963 SCTP_INP_WUNLOCK(inp);
2964 }
2965 }
2966 break;
2967 case SCTP_VRF_ID:
2968 {
2969 uint32_t *default_vrfid;
2970
2971 SCTP_CHECK_AND_CAST(default_vrfid, optval, uint32_t, optsize);
2972 if (*default_vrfid > SCTP_MAX_VRF_ID) {
2973 SCTP_LTRACE_ERR_RET(inp, NULL, NULL, SCTP_FROM_SCTP_USRREQ, EINVAL);
2974 error = EINVAL;
2975 break;
2976 }
2977 inp->def_vrf_id = *default_vrfid;
2978 break;
2979 }
2980 case SCTP_DEL_VRF_ID:
2981 {
2982 SCTP_LTRACE_ERR_RET(inp, NULL, NULL, SCTP_FROM_SCTP_USRREQ, EOPNOTSUPP);
2983 error = EOPNOTSUPP;
2984 break;
2985 }
2986 case SCTP_ADD_VRF_ID:
2987 {
2988 SCTP_LTRACE_ERR_RET(inp, NULL, NULL, SCTP_FROM_SCTP_USRREQ, EOPNOTSUPP);
2989 error = EOPNOTSUPP;
2990 break;
2991 }
2992 case SCTP_DELAYED_SACK:
2993 {
2994 struct sctp_sack_info *sack;
2995
2996 SCTP_CHECK_AND_CAST(sack, optval, struct sctp_sack_info, optsize);
2997 SCTP_FIND_STCB(inp, stcb, sack->sack_assoc_id);
2998 if (sack->sack_delay) {
2999 if (sack->sack_delay > SCTP_MAX_SACK_DELAY)
3000 sack->sack_delay = SCTP_MAX_SACK_DELAY;
3001 }
3002 if (stcb) {
3003 if (sack->sack_delay) {
3004 if (MSEC_TO_TICKS(sack->sack_delay) < 1) {
3005 sack->sack_delay = TICKS_TO_MSEC(1);
3006 }
3007 stcb->asoc.delayed_ack = sack->sack_delay;
3008 }
3009 if (sack->sack_freq) {
3010 stcb->asoc.sack_freq = sack->sack_freq;
3011 }
3012 SCTP_TCB_UNLOCK(stcb);
3013 } else {
3014 SCTP_INP_WLOCK(inp);
3015 if (sack->sack_delay) {
3016 if (MSEC_TO_TICKS(sack->sack_delay) < 1) {
3017 sack->sack_delay = TICKS_TO_MSEC(1);
3018 }
3019 inp->sctp_ep.sctp_timeoutticks[SCTP_TIMER_RECV] = MSEC_TO_TICKS(sack->sack_delay);
3020 }
3021 if (sack->sack_freq) {
3022 inp->sctp_ep.sctp_sack_freq = sack->sack_freq;
3023 }
3024 SCTP_INP_WUNLOCK(inp);
3025 }
3026 break;
3027 }
3028 case SCTP_AUTH_CHUNK:
3029 {
3030 struct sctp_authchunk *sauth;
3031
3032 SCTP_CHECK_AND_CAST(sauth, optval, struct sctp_authchunk, optsize);
3033
3034 SCTP_INP_WLOCK(inp);
3035 if (sctp_auth_add_chunk(sauth->sauth_chunk, inp->sctp_ep.local_auth_chunks)) {
3036 SCTP_LTRACE_ERR_RET(inp, NULL, NULL, SCTP_FROM_SCTP_USRREQ, EINVAL);
3037 error = EINVAL;
3038 }
3039 SCTP_INP_WUNLOCK(inp);
3040 break;
3041 }
3042 case SCTP_AUTH_KEY:
3043 {
3044 struct sctp_authkey *sca;
3045 struct sctp_keyhead *shared_keys;
3046 sctp_sharedkey_t *shared_key;
3047 sctp_key_t *key = NULL;
3048 size_t size;
3049
3050 SCTP_CHECK_AND_CAST(sca, optval, struct sctp_authkey, optsize);
3051 SCTP_FIND_STCB(inp, stcb, sca->sca_assoc_id);
3052 size = optsize - sizeof(*sca);
3053
3054 if (stcb) {
3055 /* set it on the assoc */
3056 shared_keys = &stcb->asoc.shared_keys;
3057 /* clear the cached keys for this key id */
3058 sctp_clear_cachedkeys(stcb, sca->sca_keynumber);
3059 /*
3060 * create the new shared key and
3061 * insert/replace it
3062 */
3063 if (size > 0) {
3064 key = sctp_set_key(sca->sca_key, (uint32_t) size);
3065 if (key == NULL) {
3066 SCTP_LTRACE_ERR_RET(inp, NULL, NULL, SCTP_FROM_SCTP_USRREQ, ENOMEM);
3067 error = ENOMEM;
3068 SCTP_TCB_UNLOCK(stcb);
3069 break;
3070 }
3071 }
3072 shared_key = sctp_alloc_sharedkey();
3073 if (shared_key == NULL) {
3074 sctp_free_key(key);
3075 SCTP_LTRACE_ERR_RET(inp, NULL, NULL, SCTP_FROM_SCTP_USRREQ, ENOMEM);
3076 error = ENOMEM;
3077 SCTP_TCB_UNLOCK(stcb);
3078 break;
3079 }
3080 shared_key->key = key;
3081 shared_key->keyid = sca->sca_keynumber;
3082 error = sctp_insert_sharedkey(shared_keys, shared_key);
3083 SCTP_TCB_UNLOCK(stcb);
3084 } else {
3085 /* set it on the endpoint */
3086 SCTP_INP_WLOCK(inp);
3087 shared_keys = &inp->sctp_ep.shared_keys;
3088 /*
3089 * clear the cached keys on all assocs for
3090 * this key id
3091 */
3092 sctp_clear_cachedkeys_ep(inp, sca->sca_keynumber);
3093 /*
3094 * create the new shared key and
3095 * insert/replace it
3096 */
3097 if (size > 0) {
3098 key = sctp_set_key(sca->sca_key, (uint32_t) size);
3099 if (key == NULL) {
3100 SCTP_LTRACE_ERR_RET(inp, NULL, NULL, SCTP_FROM_SCTP_USRREQ, ENOMEM);
3101 error = ENOMEM;
3102 SCTP_INP_WUNLOCK(inp);
3103 break;
3104 }
3105 }
3106 shared_key = sctp_alloc_sharedkey();
3107 if (shared_key == NULL) {
3108 sctp_free_key(key);
3109 SCTP_LTRACE_ERR_RET(inp, NULL, NULL, SCTP_FROM_SCTP_USRREQ, ENOMEM);
3110 error = ENOMEM;
3111 SCTP_INP_WUNLOCK(inp);
3112 break;
3113 }
3114 shared_key->key = key;
3115 shared_key->keyid = sca->sca_keynumber;
3116 error = sctp_insert_sharedkey(shared_keys, shared_key);
3117 SCTP_INP_WUNLOCK(inp);
3118 }
3119 break;
3120 }
3121 case SCTP_HMAC_IDENT:
3122 {
3123 struct sctp_hmacalgo *shmac;
3124 sctp_hmaclist_t *hmaclist;
3125 uint16_t hmacid;
3126 uint32_t i;
3127
3128 size_t found;
3129
3130 SCTP_CHECK_AND_CAST(shmac, optval, struct sctp_hmacalgo, optsize);
3131 if (optsize < sizeof(struct sctp_hmacalgo) + shmac->shmac_number_of_idents * sizeof(uint16_t)) {
3132 SCTP_LTRACE_ERR_RET(inp, NULL, NULL, SCTP_FROM_SCTP_USRREQ, EINVAL);
3133 error = EINVAL;
3134 break;
3135 }
3136 hmaclist = sctp_alloc_hmaclist(shmac->shmac_number_of_idents);
3137 if (hmaclist == NULL) {
3138 SCTP_LTRACE_ERR_RET(inp, NULL, NULL, SCTP_FROM_SCTP_USRREQ, ENOMEM);
3139 error = ENOMEM;
3140 break;
3141 }
3142 for (i = 0; i < shmac->shmac_number_of_idents; i++) {
3143 hmacid = shmac->shmac_idents[i];
3144 if (sctp_auth_add_hmacid(hmaclist, hmacid)) {
3145 /* invalid HMACs were found */ ;
3146 SCTP_LTRACE_ERR_RET(inp, NULL, NULL, SCTP_FROM_SCTP_USRREQ, EINVAL);
3147 error = EINVAL;
3148 sctp_free_hmaclist(hmaclist);
3149 goto sctp_set_hmac_done;
3150 }
3151 }
3152 found = 0;
3153 for (i = 0; i < hmaclist->num_algo; i++) {
3154 if (hmaclist->hmac[i] == SCTP_AUTH_HMAC_ID_SHA1) {
3155 /* already in list */
3156 found = 1;
3157 }
3158 }
3159 if (!found) {
3160 sctp_free_hmaclist(hmaclist);
3161 SCTP_LTRACE_ERR_RET(inp, NULL, NULL, SCTP_FROM_SCTP_USRREQ, EINVAL);
3162 error = EINVAL;
3163 break;
3164 }
3165 /* set it on the endpoint */
3166 SCTP_INP_WLOCK(inp);
3167 if (inp->sctp_ep.local_hmacs)
3168 sctp_free_hmaclist(inp->sctp_ep.local_hmacs);
3169 inp->sctp_ep.local_hmacs = hmaclist;
3170 SCTP_INP_WUNLOCK(inp);
3171 sctp_set_hmac_done:
3172 break;
3173 }
3174 case SCTP_AUTH_ACTIVE_KEY:
3175 {
3176 struct sctp_authkeyid *scact;
3177
3178 SCTP_CHECK_AND_CAST(scact, optval, struct sctp_authkeyid,
3179 optsize);
3180 SCTP_FIND_STCB(inp, stcb, scact->scact_assoc_id);
3181
3182 /* set the active key on the right place */
3183 if (stcb) {
3184 /* set the active key on the assoc */
3185 if (sctp_auth_setactivekey(stcb,
3186 scact->scact_keynumber)) {
3187 SCTP_LTRACE_ERR_RET(inp, NULL, NULL,
3188 SCTP_FROM_SCTP_USRREQ,
3189 EINVAL);
3190 error = EINVAL;
3191 }
3192 SCTP_TCB_UNLOCK(stcb);
3193 } else {
3194 /* set the active key on the endpoint */
3195 SCTP_INP_WLOCK(inp);
3196 if (sctp_auth_setactivekey_ep(inp,
3197 scact->scact_keynumber)) {
3198 SCTP_LTRACE_ERR_RET(inp, NULL, NULL,
3199 SCTP_FROM_SCTP_USRREQ,
3200 EINVAL);
3201 error = EINVAL;
3202 }
3203 SCTP_INP_WUNLOCK(inp);
3204 }
3205 break;
3206 }
3207 case SCTP_AUTH_DELETE_KEY:
3208 {
3209 struct sctp_authkeyid *scdel;
3210
3211 SCTP_CHECK_AND_CAST(scdel, optval, struct sctp_authkeyid,
3212 optsize);
3213 SCTP_FIND_STCB(inp, stcb, scdel->scact_assoc_id);
3214
3215 /* delete the key from the right place */
3216 if (stcb) {
3217 if (sctp_delete_sharedkey(stcb,
3218 scdel->scact_keynumber)) {
3219 SCTP_LTRACE_ERR_RET(inp, NULL, NULL,
3220 SCTP_FROM_SCTP_USRREQ,
3221 EINVAL);
3222 error = EINVAL;
3223 }
3224 SCTP_TCB_UNLOCK(stcb);
3225 } else {
3226 SCTP_INP_WLOCK(inp);
3227 if (sctp_delete_sharedkey_ep(inp,
3228 scdel->scact_keynumber)) {
3229 SCTP_LTRACE_ERR_RET(inp, NULL, NULL,
3230 SCTP_FROM_SCTP_USRREQ,
3231 EINVAL);
3232 error = EINVAL;
3233 }
3234 SCTP_INP_WUNLOCK(inp);
3235 }
3236 break;
3237 }
3238 case SCTP_AUTH_DEACTIVATE_KEY:
3239 {
3240 struct sctp_authkeyid *keyid;
3241
3242 SCTP_CHECK_AND_CAST(keyid, optval, struct sctp_authkeyid,
3243 optsize);
3244 SCTP_FIND_STCB(inp, stcb, keyid->scact_assoc_id);
3245
3246 /* deactivate the key from the right place */
3247 if (stcb) {
3248 if (sctp_deact_sharedkey(stcb,
3249 keyid->scact_keynumber)) {
3250 SCTP_LTRACE_ERR_RET(inp, NULL, NULL,
3251 SCTP_FROM_SCTP_USRREQ,
3252 EINVAL);
3253 error = EINVAL;
3254 }
3255 SCTP_TCB_UNLOCK(stcb);
3256 } else {
3257 SCTP_INP_WLOCK(inp);
3258 if (sctp_deact_sharedkey_ep(inp,
3259 keyid->scact_keynumber)) {
3260 SCTP_LTRACE_ERR_RET(inp, NULL, NULL,
3261 SCTP_FROM_SCTP_USRREQ,
3262 EINVAL);
3263 error = EINVAL;
3264 }
3265 SCTP_INP_WUNLOCK(inp);
3266 }
3267 break;
3268 }
3269
3270 case SCTP_RESET_STREAMS:
3271 {
3272 struct sctp_stream_reset *strrst;
3273 uint8_t send_in = 0, send_tsn = 0, send_out = 0,
3274 addstream = 0;
3275 uint16_t addstrmcnt = 0;
3276 int i;
3277
3278 SCTP_CHECK_AND_CAST(strrst, optval, struct sctp_stream_reset, optsize);
3279 SCTP_FIND_STCB(inp, stcb, strrst->strrst_assoc_id);
3280
3281 if (stcb == NULL) {
3282 SCTP_LTRACE_ERR_RET(inp, NULL, NULL, SCTP_FROM_SCTP_USRREQ, ENOENT);
3283 error = ENOENT;
3284 break;
3285 }
3286 if (stcb->asoc.peer_supports_strreset == 0) {
3287 /*
3288 * Peer does not support it, we return
3289 * protocol not supported since this is true
3290 * for this feature and this peer, not the
3291 * socket request in general.
3292 */
3293 SCTP_LTRACE_ERR_RET(inp, NULL, NULL, SCTP_FROM_SCTP_USRREQ, EPROTONOSUPPORT);
3294 error = EPROTONOSUPPORT;
3295 SCTP_TCB_UNLOCK(stcb);
3296 break;
3297 }
3298 if (stcb->asoc.stream_reset_outstanding) {
3299 SCTP_LTRACE_ERR_RET(inp, NULL, NULL, SCTP_FROM_SCTP_USRREQ, EALREADY);
3300 error = EALREADY;
3301 SCTP_TCB_UNLOCK(stcb);
3302 break;
3303 }
3304 if (strrst->strrst_flags == SCTP_RESET_LOCAL_RECV) {
3305 send_in = 1;
3306 } else if (strrst->strrst_flags == SCTP_RESET_LOCAL_SEND) {
3307 send_out = 1;
3308 } else if (strrst->strrst_flags == SCTP_RESET_BOTH) {
3309 send_in = 1;
3310 send_out = 1;
3311 } else if (strrst->strrst_flags == SCTP_RESET_TSN) {
3312 send_tsn = 1;
3313 } else if (strrst->strrst_flags == SCTP_RESET_ADD_STREAMS) {
3314 if (send_tsn ||
3315 send_in ||
3316 send_out) {
3317 /* We can't do that and add streams */
3318 error = EINVAL;
3319 goto skip_stuff;
3320 }
3321 if (stcb->asoc.stream_reset_outstanding) {
3322 error = EBUSY;
3323 goto skip_stuff;
3324 }
3325 addstream = 1;
3326 /* We allocate here */
3327 addstrmcnt = strrst->strrst_num_streams;
3328 if ((int)(addstrmcnt + stcb->asoc.streamoutcnt) > 0xffff) {
3329 /* You can't have more than 64k */
3330 error = EINVAL;
3331 goto skip_stuff;
3332 }
3333 if ((stcb->asoc.strm_realoutsize - stcb->asoc.streamoutcnt) < addstrmcnt) {
3334 /* Need to allocate more */
3335 struct sctp_stream_out *oldstream;
3336 struct sctp_stream_queue_pending *sp;
3337 int removed;
3338
3339 oldstream = stcb->asoc.strmout;
3340 /* get some more */
3341 SCTP_MALLOC(stcb->asoc.strmout, struct sctp_stream_out *,
3342 ((stcb->asoc.streamoutcnt + addstrmcnt) * sizeof(struct sctp_stream_out)),
3343 SCTP_M_STRMO);
3344 if (stcb->asoc.strmout == NULL) {
3345 stcb->asoc.strmout = oldstream;
3346 error = ENOMEM;
3347 goto skip_stuff;
3348 }
3349 /*
3350 * Ok now we proceed with copying
3351 * the old out stuff and
3352 * initializing the new stuff.
3353 */
3354 SCTP_TCB_SEND_LOCK(stcb);
3355 for (i = 0; i < stcb->asoc.streamoutcnt; i++) {
3356 TAILQ_INIT(&stcb->asoc.strmout[i].outqueue);
3357 stcb->asoc.strmout[i].next_sequence_sent = oldstream[i].next_sequence_sent;
3358 stcb->asoc.strmout[i].last_msg_incomplete = oldstream[i].last_msg_incomplete;
3359 stcb->asoc.strmout[i].stream_no = i;
3360 if (oldstream[i].next_spoke.tqe_next) {
3361 sctp_remove_from_wheel(stcb, &stcb->asoc, &oldstream[i], 1);
3362 stcb->asoc.strmout[i].next_spoke.tqe_next = NULL;
3363 stcb->asoc.strmout[i].next_spoke.tqe_prev = NULL;
3364 removed = 1;
3365 } else {
3366 /* not on out wheel */
3367 stcb->asoc.strmout[i].next_spoke.tqe_next = NULL;
3368 stcb->asoc.strmout[i].next_spoke.tqe_prev = NULL;
3369 removed = 0;
3370 }
3371 /*
3372 * now anything on those
3373 * queues?
3374 */
3375 while (TAILQ_EMPTY(&oldstream[i].outqueue) == 0) {
3376 sp = TAILQ_FIRST(&oldstream[i].outqueue);
3377 TAILQ_REMOVE(&oldstream[i].outqueue, sp, next);
3378 TAILQ_INSERT_TAIL(&stcb->asoc.strmout[i].outqueue, sp, next);
3379 }
3380 /* Did we disrupt the wheel? */
3381 if (removed) {
3382 sctp_insert_on_wheel(stcb,
3383 &stcb->asoc,
3384 &stcb->asoc.strmout[i],
3385 1);
3386 }
3387 /*
3388 * Now move assoc pointers
3389 * too
3390 */
3391 if (stcb->asoc.last_out_stream == &oldstream[i]) {
3392 stcb->asoc.last_out_stream = &stcb->asoc.strmout[i];
3393 }
3394 if (stcb->asoc.locked_on_sending == &oldstream[i]) {
3395 stcb->asoc.locked_on_sending = &stcb->asoc.strmout[i];
3396 }
3397 }
3398 /* now the new streams */
3399 for (i = stcb->asoc.streamoutcnt; i < (stcb->asoc.streamoutcnt + addstrmcnt); i++) {
3400 stcb->asoc.strmout[i].next_sequence_sent = 0x0;
3401 TAILQ_INIT(&stcb->asoc.strmout[i].outqueue);
3402 stcb->asoc.strmout[i].stream_no = i;
3403 stcb->asoc.strmout[i].last_msg_incomplete = 0;
3404 stcb->asoc.strmout[i].next_spoke.tqe_next = NULL;
3405 stcb->asoc.strmout[i].next_spoke.tqe_prev = NULL;
3406 }
3407 stcb->asoc.strm_realoutsize = stcb->asoc.streamoutcnt + addstrmcnt;
3408 SCTP_FREE(oldstream, SCTP_M_STRMO);
3409 }
3410 SCTP_TCB_SEND_UNLOCK(stcb);
3411 goto skip_stuff;
3412 } else {
3413 SCTP_LTRACE_ERR_RET(inp, NULL, NULL, SCTP_FROM_SCTP_USRREQ, EINVAL);
3414 error = EINVAL;
3415 SCTP_TCB_UNLOCK(stcb);
3416 break;
3417 }
3418 for (i = 0; i < strrst->strrst_num_streams; i++) {
3419 if ((send_in) &&
3420
3421 (strrst->strrst_list[i] > stcb->asoc.streamincnt)) {
3422 SCTP_LTRACE_ERR_RET(inp, NULL, NULL, SCTP_FROM_SCTP_USRREQ, EINVAL);
3423 error = EINVAL;
3424 goto get_out;
3425 }
3426 if ((send_out) &&
3427 (strrst->strrst_list[i] > stcb->asoc.streamoutcnt)) {
3428 SCTP_LTRACE_ERR_RET(inp, NULL, NULL, SCTP_FROM_SCTP_USRREQ, EINVAL);
3429 error = EINVAL;
3430 goto get_out;
3431 }
3432 }
3433 skip_stuff:
3434 if (error) {
3435 get_out:
3436 SCTP_TCB_UNLOCK(stcb);
3437 break;
3438 }
3439 error = sctp_send_str_reset_req(stcb, strrst->strrst_num_streams,
3440 strrst->strrst_list,
3441 send_out, (stcb->asoc.str_reset_seq_in - 3),
3442 send_in, send_tsn, addstream, addstrmcnt);
3443
3444 sctp_chunk_output(inp, stcb, SCTP_OUTPUT_FROM_STRRST_REQ, SCTP_SO_LOCKED);
3445 SCTP_TCB_UNLOCK(stcb);
3446 }
3447 break;
3448
3449 case SCTP_CONNECT_X:
3450 if (optsize < (sizeof(int) + sizeof(struct sockaddr_in))) {
3451 SCTP_LTRACE_ERR_RET(inp, NULL, NULL, SCTP_FROM_SCTP_USRREQ, EINVAL);
3452 error = EINVAL;
3453 break;
3454 }
3455 error = sctp_do_connect_x(so, inp, optval, optsize, p, 0);
3456 break;
3457
3458 case SCTP_CONNECT_X_DELAYED:
3459 if (optsize < (sizeof(int) + sizeof(struct sockaddr_in))) {
3460 SCTP_LTRACE_ERR_RET(inp, NULL, NULL, SCTP_FROM_SCTP_USRREQ, EINVAL);
3461 error = EINVAL;
3462 break;
3463 }
3464 error = sctp_do_connect_x(so, inp, optval, optsize, p, 1);
3465 break;
3466
3467 case SCTP_CONNECT_X_COMPLETE:
3468 {
3469 struct sockaddr *sa;
3470 struct sctp_nets *net;
3471
3472 /* FIXME MT: check correct? */
3473 SCTP_CHECK_AND_CAST(sa, optval, struct sockaddr, optsize);
3474
3475 /* find tcb */
3476 if (inp->sctp_flags & SCTP_PCB_FLAGS_CONNECTED) {
3477 SCTP_INP_RLOCK(inp);
3478 stcb = LIST_FIRST(&inp->sctp_asoc_list);
3479 if (stcb) {
3480 SCTP_TCB_LOCK(stcb);
3481 net = sctp_findnet(stcb, sa);
3482 }
3483 SCTP_INP_RUNLOCK(inp);
3484 } else {
3485 /*
3486 * We increment here since
3487 * sctp_findassociation_ep_addr() wil do a
3488 * decrement if it finds the stcb as long as
3489 * the locked tcb (last argument) is NOT a
3490 * TCB.. aka NULL.
3491 */
3492 SCTP_INP_INCR_REF(inp);
3493 stcb = sctp_findassociation_ep_addr(&inp, sa, &net, NULL, NULL);
3494 if (stcb == NULL) {
3495 SCTP_INP_DECR_REF(inp);
3496 }
3497 }
3498
3499 if (stcb == NULL) {
3500 SCTP_LTRACE_ERR_RET(inp, NULL, NULL, SCTP_FROM_SCTP_USRREQ, ENOENT);
3501 error = ENOENT;
3502 break;
3503 }
3504 if (stcb->asoc.delayed_connection == 1) {
3505 stcb->asoc.delayed_connection = 0;
3506 (void)SCTP_GETTIME_TIMEVAL(&stcb->asoc.time_entered);
3507 sctp_timer_stop(SCTP_TIMER_TYPE_INIT, inp, stcb,
3508 stcb->asoc.primary_destination,
3509 SCTP_FROM_SCTP_USRREQ + SCTP_LOC_9);
3510 sctp_send_initiate(inp, stcb, SCTP_SO_LOCKED);
3511 } else {
3512 /*
3513 * already expired or did not use delayed
3514 * connectx
3515 */
3516 SCTP_LTRACE_ERR_RET(inp, NULL, NULL, SCTP_FROM_SCTP_USRREQ, EALREADY);
3517 error = EALREADY;
3518 }
3519 SCTP_TCB_UNLOCK(stcb);
3520 }
3521 break;
3522 case SCTP_MAX_BURST:
3523 {
3524 uint8_t *burst;
3525
3526 SCTP_CHECK_AND_CAST(burst, optval, uint8_t, optsize);
3527
3528 SCTP_INP_WLOCK(inp);
3529 if (*burst) {
3530 inp->sctp_ep.max_burst = *burst;
3531 }
3532 SCTP_INP_WUNLOCK(inp);
3533 }
3534 break;
3535 case SCTP_MAXSEG:
3536 {
3537 struct sctp_assoc_value *av;
3538 int ovh;
3539
3540 SCTP_CHECK_AND_CAST(av, optval, struct sctp_assoc_value, optsize);
3541 SCTP_FIND_STCB(inp, stcb, av->assoc_id);
3542
3543 if (inp->sctp_flags & SCTP_PCB_FLAGS_BOUND_V6) {
3544 ovh = SCTP_MED_OVERHEAD;
3545 } else {
3546 ovh = SCTP_MED_V4_OVERHEAD;
3547 }
3548 if (stcb) {
3549 if (av->assoc_value) {
3550 stcb->asoc.sctp_frag_point = (av->assoc_value + ovh);
3551 } else {
3552 stcb->asoc.sctp_frag_point = SCTP_DEFAULT_MAXSEGMENT;
3553 }
3554 SCTP_TCB_UNLOCK(stcb);
3555 } else {
3556 SCTP_INP_WLOCK(inp);
3557 /*
3558 * FIXME MT: I think this is not in tune
3559 * with the API ID
3560 */
3561 if (av->assoc_value) {
3562 inp->sctp_frag_point = (av->assoc_value + ovh);
3563 } else {
3564 inp->sctp_frag_point = SCTP_DEFAULT_MAXSEGMENT;
3565 }
3566 SCTP_INP_WUNLOCK(inp);
3567 }
3568 }
3569 break;
3570 case SCTP_EVENTS:
3571 {
3572 struct sctp_event_subscribe *events;
3573
3574 SCTP_CHECK_AND_CAST(events, optval, struct sctp_event_subscribe, optsize);
3575
3576 SCTP_INP_WLOCK(inp);
3577 if (events->sctp_data_io_event) {
3578 sctp_feature_on(inp, SCTP_PCB_FLAGS_RECVDATAIOEVNT);
3579 } else {
3580 sctp_feature_off(inp, SCTP_PCB_FLAGS_RECVDATAIOEVNT);
3581 }
3582
3583 if (events->sctp_association_event) {
3584 sctp_feature_on(inp, SCTP_PCB_FLAGS_RECVASSOCEVNT);
3585 } else {
3586 sctp_feature_off(inp, SCTP_PCB_FLAGS_RECVASSOCEVNT);
3587 }
3588
3589 if (events->sctp_address_event) {
3590 sctp_feature_on(inp, SCTP_PCB_FLAGS_RECVPADDREVNT);
3591 } else {
3592 sctp_feature_off(inp, SCTP_PCB_FLAGS_RECVPADDREVNT);
3593 }
3594
3595 if (events->sctp_send_failure_event) {
3596 sctp_feature_on(inp, SCTP_PCB_FLAGS_RECVSENDFAILEVNT);
3597 } else {
3598 sctp_feature_off(inp, SCTP_PCB_FLAGS_RECVSENDFAILEVNT);
3599 }
3600
3601 if (events->sctp_peer_error_event) {
3602 sctp_feature_on(inp, SCTP_PCB_FLAGS_RECVPEERERR);
3603 } else {
3604 sctp_feature_off(inp, SCTP_PCB_FLAGS_RECVPEERERR);
3605 }
3606
3607 if (events->sctp_shutdown_event) {
3608 sctp_feature_on(inp, SCTP_PCB_FLAGS_RECVSHUTDOWNEVNT);
3609 } else {
3610 sctp_feature_off(inp, SCTP_PCB_FLAGS_RECVSHUTDOWNEVNT);
3611 }
3612
3613 if (events->sctp_partial_delivery_event) {
3614 sctp_feature_on(inp, SCTP_PCB_FLAGS_PDAPIEVNT);
3615 } else {
3616 sctp_feature_off(inp, SCTP_PCB_FLAGS_PDAPIEVNT);
3617 }
3618
3619 if (events->sctp_adaptation_layer_event) {
3620 sctp_feature_on(inp, SCTP_PCB_FLAGS_ADAPTATIONEVNT);
3621 } else {
3622 sctp_feature_off(inp, SCTP_PCB_FLAGS_ADAPTATIONEVNT);
3623 }
3624
3625 if (events->sctp_authentication_event) {
3626 sctp_feature_on(inp, SCTP_PCB_FLAGS_AUTHEVNT);
3627 } else {
3628 sctp_feature_off(inp, SCTP_PCB_FLAGS_AUTHEVNT);
3629 }
3630
3631 if (events->sctp_sender_dry_event) {
3632 sctp_feature_on(inp, SCTP_PCB_FLAGS_DRYEVNT);
3633 if ((inp->sctp_flags & SCTP_PCB_FLAGS_TCPTYPE) ||
3634 (inp->sctp_flags & SCTP_PCB_FLAGS_IN_TCPPOOL)) {
3635 stcb = LIST_FIRST(&inp->sctp_asoc_list);
3636 if (stcb) {
3637 SCTP_TCB_LOCK(stcb);
3638 }
3639 if (stcb &&
3640 TAILQ_EMPTY(&stcb->asoc.send_queue) &&
3641 TAILQ_EMPTY(&stcb->asoc.sent_queue) &&
3642 (stcb->asoc.stream_queue_cnt == 0)) {
3643 sctp_ulp_notify(SCTP_NOTIFY_SENDER_DRY, stcb, 0, NULL, SCTP_SO_LOCKED);
3644 }
3645 if (stcb) {
3646 SCTP_TCB_UNLOCK(stcb);
3647 }
3648 }
3649 } else {
3650 sctp_feature_off(inp, SCTP_PCB_FLAGS_DRYEVNT);
3651 }
3652
2012 SCTP_INP_RUNLOCK(inp);
2013 *optsize = sizeof(struct sctp_event_subscribe);
2014 }
2015 break;
2016
2017 case SCTP_ADAPTATION_LAYER:
2018 {
2019 uint32_t *value;
2020
2021 SCTP_CHECK_AND_CAST(value, optval, uint32_t, *optsize);
2022
2023 SCTP_INP_RLOCK(inp);
2024 *value = inp->sctp_ep.adaptation_layer_indicator;
2025 SCTP_INP_RUNLOCK(inp);
2026 *optsize = sizeof(uint32_t);
2027 }
2028 break;
2029 case SCTP_SET_INITIAL_DBG_SEQ:
2030 {
2031 uint32_t *value;
2032
2033 SCTP_CHECK_AND_CAST(value, optval, uint32_t, *optsize);
2034 SCTP_INP_RLOCK(inp);
2035 *value = inp->sctp_ep.initial_sequence_debug;
2036 SCTP_INP_RUNLOCK(inp);
2037 *optsize = sizeof(uint32_t);
2038 }
2039 break;
2040 case SCTP_GET_LOCAL_ADDR_SIZE:
2041 {
2042 uint32_t *value;
2043
2044 SCTP_CHECK_AND_CAST(value, optval, uint32_t, *optsize);
2045 SCTP_INP_RLOCK(inp);
2046 *value = sctp_count_max_addresses(inp);
2047 SCTP_INP_RUNLOCK(inp);
2048 *optsize = sizeof(uint32_t);
2049 }
2050 break;
2051 case SCTP_GET_REMOTE_ADDR_SIZE:
2052 {
2053 uint32_t *value;
2054 size_t size;
2055 struct sctp_nets *net;
2056
2057 SCTP_CHECK_AND_CAST(value, optval, uint32_t, *optsize);
2058 /* FIXME MT: change to sctp_assoc_value? */
2059 SCTP_FIND_STCB(inp, stcb, (sctp_assoc_t) * value);
2060
2061 if (stcb) {
2062 size = 0;
2063 /* Count the sizes */
2064 TAILQ_FOREACH(net, &stcb->asoc.nets, sctp_next) {
2065 if ((sctp_is_feature_on(inp, SCTP_PCB_FLAGS_NEEDS_MAPPED_V4)) ||
2066 (((struct sockaddr *)&net->ro._l_addr)->sa_family == AF_INET6)) {
2067 size += sizeof(struct sockaddr_in6);
2068 } else if (((struct sockaddr *)&net->ro._l_addr)->sa_family == AF_INET) {
2069 size += sizeof(struct sockaddr_in);
2070 } else {
2071 /* huh */
2072 break;
2073 }
2074 }
2075 SCTP_TCB_UNLOCK(stcb);
2076 *value = (uint32_t) size;
2077 } else {
2078 SCTP_LTRACE_ERR_RET(inp, NULL, NULL, SCTP_FROM_SCTP_USRREQ, ENOTCONN);
2079 error = ENOTCONN;
2080 }
2081 *optsize = sizeof(uint32_t);
2082 }
2083 break;
2084 case SCTP_GET_PEER_ADDRESSES:
2085 /*
2086 * Get the address information, an array is passed in to
2087 * fill up we pack it.
2088 */
2089 {
2090 size_t cpsz, left;
2091 struct sockaddr_storage *sas;
2092 struct sctp_nets *net;
2093 struct sctp_getaddresses *saddr;
2094
2095 SCTP_CHECK_AND_CAST(saddr, optval, struct sctp_getaddresses, *optsize);
2096 SCTP_FIND_STCB(inp, stcb, saddr->sget_assoc_id);
2097
2098 if (stcb) {
2099 left = (*optsize) - sizeof(struct sctp_getaddresses);
2100 *optsize = sizeof(struct sctp_getaddresses);
2101 sas = (struct sockaddr_storage *)&saddr->addr[0];
2102
2103 TAILQ_FOREACH(net, &stcb->asoc.nets, sctp_next) {
2104 if ((sctp_is_feature_on(inp, SCTP_PCB_FLAGS_NEEDS_MAPPED_V4)) ||
2105 (((struct sockaddr *)&net->ro._l_addr)->sa_family == AF_INET6)) {
2106 cpsz = sizeof(struct sockaddr_in6);
2107 } else if (((struct sockaddr *)&net->ro._l_addr)->sa_family == AF_INET) {
2108 cpsz = sizeof(struct sockaddr_in);
2109 } else {
2110 /* huh */
2111 break;
2112 }
2113 if (left < cpsz) {
2114 /* not enough room. */
2115 break;
2116 }
2117#ifdef INET6
2118 if ((sctp_is_feature_on(inp, SCTP_PCB_FLAGS_NEEDS_MAPPED_V4)) &&
2119 (((struct sockaddr *)&net->ro._l_addr)->sa_family == AF_INET)) {
2120 /* Must map the address */
2121 in6_sin_2_v4mapsin6((struct sockaddr_in *)&net->ro._l_addr,
2122 (struct sockaddr_in6 *)sas);
2123 } else {
2124#endif
2125 memcpy(sas, &net->ro._l_addr, cpsz);
2126#ifdef INET6
2127 }
2128#endif
2129 ((struct sockaddr_in *)sas)->sin_port = stcb->rport;
2130
2131 sas = (struct sockaddr_storage *)((caddr_t)sas + cpsz);
2132 left -= cpsz;
2133 *optsize += cpsz;
2134 }
2135 SCTP_TCB_UNLOCK(stcb);
2136 } else {
2137 SCTP_LTRACE_ERR_RET(inp, NULL, NULL, SCTP_FROM_SCTP_USRREQ, ENOENT);
2138 error = ENOENT;
2139 }
2140 }
2141 break;
2142 case SCTP_GET_LOCAL_ADDRESSES:
2143 {
2144 size_t limit, actual;
2145 struct sockaddr_storage *sas;
2146 struct sctp_getaddresses *saddr;
2147
2148 SCTP_CHECK_AND_CAST(saddr, optval, struct sctp_getaddresses, *optsize);
2149 SCTP_FIND_STCB(inp, stcb, saddr->sget_assoc_id);
2150
2151 sas = (struct sockaddr_storage *)&saddr->addr[0];
2152 limit = *optsize - sizeof(sctp_assoc_t);
2153 actual = sctp_fill_up_addresses(inp, stcb, limit, sas);
2154 if (stcb) {
2155 SCTP_TCB_UNLOCK(stcb);
2156 }
2157 *optsize = sizeof(struct sockaddr_storage) + actual;
2158 }
2159 break;
2160 case SCTP_PEER_ADDR_PARAMS:
2161 {
2162 struct sctp_paddrparams *paddrp;
2163 struct sctp_nets *net;
2164
2165 SCTP_CHECK_AND_CAST(paddrp, optval, struct sctp_paddrparams, *optsize);
2166 SCTP_FIND_STCB(inp, stcb, paddrp->spp_assoc_id);
2167
2168 net = NULL;
2169 if (stcb) {
2170 net = sctp_findnet(stcb, (struct sockaddr *)&paddrp->spp_address);
2171 } else {
2172 /*
2173 * We increment here since
2174 * sctp_findassociation_ep_addr() wil do a
2175 * decrement if it finds the stcb as long as
2176 * the locked tcb (last argument) is NOT a
2177 * TCB.. aka NULL.
2178 */
2179 SCTP_INP_INCR_REF(inp);
2180 stcb = sctp_findassociation_ep_addr(&inp, (struct sockaddr *)&paddrp->spp_address, &net, NULL, NULL);
2181 if (stcb == NULL) {
2182 SCTP_INP_DECR_REF(inp);
2183 }
2184 }
2185 if (stcb && (net == NULL)) {
2186 struct sockaddr *sa;
2187
2188 sa = (struct sockaddr *)&paddrp->spp_address;
2189 if (sa->sa_family == AF_INET) {
2190 struct sockaddr_in *sin;
2191
2192 sin = (struct sockaddr_in *)sa;
2193 if (sin->sin_addr.s_addr) {
2194 error = EINVAL;
2195 SCTP_TCB_UNLOCK(stcb);
2196 SCTP_LTRACE_ERR_RET(inp, NULL, NULL, SCTP_FROM_SCTP_USRREQ, error);
2197 break;
2198 }
2199 } else if (sa->sa_family == AF_INET6) {
2200 struct sockaddr_in6 *sin6;
2201
2202 sin6 = (struct sockaddr_in6 *)sa;
2203 if (!IN6_IS_ADDR_UNSPECIFIED(&sin6->sin6_addr)) {
2204 error = EINVAL;
2205 SCTP_TCB_UNLOCK(stcb);
2206 SCTP_LTRACE_ERR_RET(inp, NULL, NULL, SCTP_FROM_SCTP_USRREQ, error);
2207 break;
2208 }
2209 } else {
2210 error = EAFNOSUPPORT;
2211 SCTP_TCB_UNLOCK(stcb);
2212 SCTP_LTRACE_ERR_RET(inp, NULL, NULL, SCTP_FROM_SCTP_USRREQ, error);
2213 break;
2214 }
2215 }
2216 if (stcb) {
2217 /* Applys to the specific association */
2218 paddrp->spp_flags = 0;
2219 if (net) {
2220 int ovh;
2221
2222 if (inp->sctp_flags & SCTP_PCB_FLAGS_BOUND_V6) {
2223 ovh = SCTP_MED_OVERHEAD;
2224 } else {
2225 ovh = SCTP_MED_V4_OVERHEAD;
2226 }
2227
2228
2229 paddrp->spp_pathmaxrxt = net->failure_threshold;
2230 paddrp->spp_pathmtu = net->mtu - ovh;
2231 /* get flags for HB */
2232 if (net->dest_state & SCTP_ADDR_NOHB)
2233 paddrp->spp_flags |= SPP_HB_DISABLE;
2234 else
2235 paddrp->spp_flags |= SPP_HB_ENABLE;
2236 /* get flags for PMTU */
2237 if (SCTP_OS_TIMER_PENDING(&net->pmtu_timer.timer)) {
2238 paddrp->spp_flags |= SPP_PMTUD_ENABLE;
2239 } else {
2240 paddrp->spp_flags |= SPP_PMTUD_DISABLE;
2241 }
2242#ifdef INET
2243 if (net->ro._l_addr.sin.sin_family == AF_INET) {
2244 paddrp->spp_ipv4_tos = net->tos_flowlabel & 0x000000fc;
2245 paddrp->spp_flags |= SPP_IPV4_TOS;
2246 }
2247#endif
2248#ifdef INET6
2249 if (net->ro._l_addr.sin6.sin6_family == AF_INET6) {
2250 paddrp->spp_ipv6_flowlabel = net->tos_flowlabel;
2251 paddrp->spp_flags |= SPP_IPV6_FLOWLABEL;
2252 }
2253#endif
2254 } else {
2255 /*
2256 * No destination so return default
2257 * value
2258 */
2259 int cnt = 0;
2260
2261 paddrp->spp_pathmaxrxt = stcb->asoc.def_net_failure;
2262 paddrp->spp_pathmtu = sctp_get_frag_point(stcb, &stcb->asoc);
2263#ifdef INET
2264 paddrp->spp_ipv4_tos = stcb->asoc.default_tos & 0x000000fc;
2265 paddrp->spp_flags |= SPP_IPV4_TOS;
2266#endif
2267#ifdef INET6
2268 paddrp->spp_ipv6_flowlabel = stcb->asoc.default_flowlabel;
2269 paddrp->spp_flags |= SPP_IPV6_FLOWLABEL;
2270#endif
2271 /* default settings should be these */
2272 if (stcb->asoc.hb_is_disabled == 0) {
2273 paddrp->spp_flags |= SPP_HB_ENABLE;
2274 } else {
2275 paddrp->spp_flags |= SPP_HB_DISABLE;
2276 }
2277 TAILQ_FOREACH(net, &stcb->asoc.nets, sctp_next) {
2278 if (SCTP_OS_TIMER_PENDING(&net->pmtu_timer.timer)) {
2279 cnt++;
2280 }
2281 }
2282 if (cnt) {
2283 paddrp->spp_flags |= SPP_PMTUD_ENABLE;
2284 }
2285 }
2286 paddrp->spp_hbinterval = stcb->asoc.heart_beat_delay;
2287 paddrp->spp_assoc_id = sctp_get_associd(stcb);
2288 SCTP_TCB_UNLOCK(stcb);
2289 } else {
2290 /* Use endpoint defaults */
2291 SCTP_INP_RLOCK(inp);
2292 paddrp->spp_pathmaxrxt = inp->sctp_ep.def_net_failure;
2293 paddrp->spp_hbinterval = TICKS_TO_MSEC(inp->sctp_ep.sctp_timeoutticks[SCTP_TIMER_HEARTBEAT]);
2294 paddrp->spp_assoc_id = (sctp_assoc_t) 0;
2295 /* get inp's default */
2296#ifdef INET
2297 paddrp->spp_ipv4_tos = inp->ip_inp.inp.inp_ip_tos;
2298 paddrp->spp_flags |= SPP_IPV4_TOS;
2299#endif
2300#ifdef INET6
2301 if (inp->sctp_flags & SCTP_PCB_FLAGS_BOUND_V6) {
2302 paddrp->spp_ipv6_flowlabel = ((struct in6pcb *)inp)->in6p_flowinfo;
2303 paddrp->spp_flags |= SPP_IPV6_FLOWLABEL;
2304 }
2305#endif
2306 /* can't return this */
2307 paddrp->spp_pathmtu = 0;
2308
2309 /* default behavior, no stcb */
2310 paddrp->spp_flags = SPP_PMTUD_ENABLE;
2311
2312 if (sctp_is_feature_off(inp, SCTP_PCB_FLAGS_DONOT_HEARTBEAT)) {
2313 paddrp->spp_flags |= SPP_HB_ENABLE;
2314 } else {
2315 paddrp->spp_flags |= SPP_HB_DISABLE;
2316 }
2317 SCTP_INP_RUNLOCK(inp);
2318 }
2319 *optsize = sizeof(struct sctp_paddrparams);
2320 }
2321 break;
2322 case SCTP_GET_PEER_ADDR_INFO:
2323 {
2324 struct sctp_paddrinfo *paddri;
2325 struct sctp_nets *net;
2326
2327 SCTP_CHECK_AND_CAST(paddri, optval, struct sctp_paddrinfo, *optsize);
2328 SCTP_FIND_STCB(inp, stcb, paddri->spinfo_assoc_id);
2329
2330 net = NULL;
2331 if (stcb) {
2332 net = sctp_findnet(stcb, (struct sockaddr *)&paddri->spinfo_address);
2333 } else {
2334 /*
2335 * We increment here since
2336 * sctp_findassociation_ep_addr() wil do a
2337 * decrement if it finds the stcb as long as
2338 * the locked tcb (last argument) is NOT a
2339 * TCB.. aka NULL.
2340 */
2341 SCTP_INP_INCR_REF(inp);
2342 stcb = sctp_findassociation_ep_addr(&inp, (struct sockaddr *)&paddri->spinfo_address, &net, NULL, NULL);
2343 if (stcb == NULL) {
2344 SCTP_INP_DECR_REF(inp);
2345 }
2346 }
2347
2348 if ((stcb) && (net)) {
2349 paddri->spinfo_state = net->dest_state & (SCTP_REACHABLE_MASK | SCTP_ADDR_NOHB);
2350 paddri->spinfo_cwnd = net->cwnd;
2351 paddri->spinfo_srtt = ((net->lastsa >> 2) + net->lastsv) >> 1;
2352 paddri->spinfo_rto = net->RTO;
2353 paddri->spinfo_assoc_id = sctp_get_associd(stcb);
2354 SCTP_TCB_UNLOCK(stcb);
2355 } else {
2356 if (stcb) {
2357 SCTP_TCB_UNLOCK(stcb);
2358 }
2359 SCTP_LTRACE_ERR_RET(inp, NULL, NULL, SCTP_FROM_SCTP_USRREQ, ENOENT);
2360 error = ENOENT;
2361 }
2362 *optsize = sizeof(struct sctp_paddrinfo);
2363 }
2364 break;
2365 case SCTP_PCB_STATUS:
2366 {
2367 struct sctp_pcbinfo *spcb;
2368
2369 SCTP_CHECK_AND_CAST(spcb, optval, struct sctp_pcbinfo, *optsize);
2370 sctp_fill_pcbinfo(spcb);
2371 *optsize = sizeof(struct sctp_pcbinfo);
2372 }
2373 break;
2374
2375 case SCTP_STATUS:
2376 {
2377 struct sctp_nets *net;
2378 struct sctp_status *sstat;
2379
2380 SCTP_CHECK_AND_CAST(sstat, optval, struct sctp_status, *optsize);
2381 SCTP_FIND_STCB(inp, stcb, sstat->sstat_assoc_id);
2382
2383 if (stcb == NULL) {
2384 SCTP_LTRACE_ERR_RET(inp, NULL, NULL, SCTP_FROM_SCTP_USRREQ, error);
2385 error = EINVAL;
2386 break;
2387 }
2388 /*
2389 * I think passing the state is fine since
2390 * sctp_constants.h will be available to the user
2391 * land.
2392 */
2393 sstat->sstat_state = stcb->asoc.state;
2394 sstat->sstat_assoc_id = sctp_get_associd(stcb);
2395 sstat->sstat_rwnd = stcb->asoc.peers_rwnd;
2396 sstat->sstat_unackdata = stcb->asoc.sent_queue_cnt;
2397 /*
2398 * We can't include chunks that have been passed to
2399 * the socket layer. Only things in queue.
2400 */
2401 sstat->sstat_penddata = (stcb->asoc.cnt_on_reasm_queue +
2402 stcb->asoc.cnt_on_all_streams);
2403
2404
2405 sstat->sstat_instrms = stcb->asoc.streamincnt;
2406 sstat->sstat_outstrms = stcb->asoc.streamoutcnt;
2407 sstat->sstat_fragmentation_point = sctp_get_frag_point(stcb, &stcb->asoc);
2408 memcpy(&sstat->sstat_primary.spinfo_address,
2409 &stcb->asoc.primary_destination->ro._l_addr,
2410 ((struct sockaddr *)(&stcb->asoc.primary_destination->ro._l_addr))->sa_len);
2411 net = stcb->asoc.primary_destination;
2412 ((struct sockaddr_in *)&sstat->sstat_primary.spinfo_address)->sin_port = stcb->rport;
2413 /*
2414 * Again the user can get info from sctp_constants.h
2415 * for what the state of the network is.
2416 */
2417 sstat->sstat_primary.spinfo_state = net->dest_state & SCTP_REACHABLE_MASK;
2418 sstat->sstat_primary.spinfo_cwnd = net->cwnd;
2419 sstat->sstat_primary.spinfo_srtt = net->lastsa;
2420 sstat->sstat_primary.spinfo_rto = net->RTO;
2421 sstat->sstat_primary.spinfo_mtu = net->mtu;
2422 sstat->sstat_primary.spinfo_assoc_id = sctp_get_associd(stcb);
2423 SCTP_TCB_UNLOCK(stcb);
2424 *optsize = sizeof(*sstat);
2425 }
2426 break;
2427 case SCTP_RTOINFO:
2428 {
2429 struct sctp_rtoinfo *srto;
2430
2431 SCTP_CHECK_AND_CAST(srto, optval, struct sctp_rtoinfo, *optsize);
2432 SCTP_FIND_STCB(inp, stcb, srto->srto_assoc_id);
2433
2434 if (stcb) {
2435 srto->srto_initial = stcb->asoc.initial_rto;
2436 srto->srto_max = stcb->asoc.maxrto;
2437 srto->srto_min = stcb->asoc.minrto;
2438 SCTP_TCB_UNLOCK(stcb);
2439 } else {
2440 SCTP_INP_RLOCK(inp);
2441 srto->srto_initial = inp->sctp_ep.initial_rto;
2442 srto->srto_max = inp->sctp_ep.sctp_maxrto;
2443 srto->srto_min = inp->sctp_ep.sctp_minrto;
2444 SCTP_INP_RUNLOCK(inp);
2445 }
2446 *optsize = sizeof(*srto);
2447 }
2448 break;
2449 case SCTP_ASSOCINFO:
2450 {
2451 struct sctp_assocparams *sasoc;
2452 uint32_t oldval;
2453
2454 SCTP_CHECK_AND_CAST(sasoc, optval, struct sctp_assocparams, *optsize);
2455 SCTP_FIND_STCB(inp, stcb, sasoc->sasoc_assoc_id);
2456
2457 if (stcb) {
2458 oldval = sasoc->sasoc_cookie_life;
2459 sasoc->sasoc_cookie_life = TICKS_TO_MSEC(stcb->asoc.cookie_life);
2460 sasoc->sasoc_asocmaxrxt = stcb->asoc.max_send_times;
2461 sasoc->sasoc_number_peer_destinations = stcb->asoc.numnets;
2462 sasoc->sasoc_peer_rwnd = stcb->asoc.peers_rwnd;
2463 sasoc->sasoc_local_rwnd = stcb->asoc.my_rwnd;
2464 SCTP_TCB_UNLOCK(stcb);
2465 } else {
2466 SCTP_INP_RLOCK(inp);
2467 sasoc->sasoc_cookie_life = TICKS_TO_MSEC(inp->sctp_ep.def_cookie_life);
2468 sasoc->sasoc_asocmaxrxt = inp->sctp_ep.max_send_times;
2469 sasoc->sasoc_number_peer_destinations = 0;
2470 sasoc->sasoc_peer_rwnd = 0;
2471 sasoc->sasoc_local_rwnd = sbspace(&inp->sctp_socket->so_rcv);
2472 SCTP_INP_RUNLOCK(inp);
2473 }
2474 *optsize = sizeof(*sasoc);
2475 }
2476 break;
2477 case SCTP_DEFAULT_SEND_PARAM:
2478 {
2479 struct sctp_sndrcvinfo *s_info;
2480
2481 SCTP_CHECK_AND_CAST(s_info, optval, struct sctp_sndrcvinfo, *optsize);
2482 SCTP_FIND_STCB(inp, stcb, s_info->sinfo_assoc_id);
2483
2484 if (stcb) {
2485 memcpy(s_info, &stcb->asoc.def_send, sizeof(stcb->asoc.def_send));
2486 SCTP_TCB_UNLOCK(stcb);
2487 } else {
2488 SCTP_INP_RLOCK(inp);
2489 memcpy(s_info, &inp->def_send, sizeof(inp->def_send));
2490 SCTP_INP_RUNLOCK(inp);
2491 }
2492 *optsize = sizeof(*s_info);
2493 }
2494 break;
2495 case SCTP_INITMSG:
2496 {
2497 struct sctp_initmsg *sinit;
2498
2499 SCTP_CHECK_AND_CAST(sinit, optval, struct sctp_initmsg, *optsize);
2500 SCTP_INP_RLOCK(inp);
2501 sinit->sinit_num_ostreams = inp->sctp_ep.pre_open_stream_count;
2502 sinit->sinit_max_instreams = inp->sctp_ep.max_open_streams_intome;
2503 sinit->sinit_max_attempts = inp->sctp_ep.max_init_times;
2504 sinit->sinit_max_init_timeo = inp->sctp_ep.initial_init_rto_max;
2505 SCTP_INP_RUNLOCK(inp);
2506 *optsize = sizeof(*sinit);
2507 }
2508 break;
2509 case SCTP_PRIMARY_ADDR:
2510 /* we allow a "get" operation on this */
2511 {
2512 struct sctp_setprim *ssp;
2513
2514 SCTP_CHECK_AND_CAST(ssp, optval, struct sctp_setprim, *optsize);
2515 SCTP_FIND_STCB(inp, stcb, ssp->ssp_assoc_id);
2516
2517 if (stcb) {
2518 /* simply copy out the sockaddr_storage... */
2519 int len;
2520
2521 len = *optsize;
2522 if (len > stcb->asoc.primary_destination->ro._l_addr.sa.sa_len)
2523 len = stcb->asoc.primary_destination->ro._l_addr.sa.sa_len;
2524
2525 memcpy(&ssp->ssp_addr,
2526 &stcb->asoc.primary_destination->ro._l_addr,
2527 len);
2528 SCTP_TCB_UNLOCK(stcb);
2529 } else {
2530 SCTP_LTRACE_ERR_RET(inp, NULL, NULL, SCTP_FROM_SCTP_USRREQ, error);
2531 error = EINVAL;
2532 }
2533 *optsize = sizeof(*ssp);
2534 }
2535 break;
2536
2537 case SCTP_HMAC_IDENT:
2538 {
2539 struct sctp_hmacalgo *shmac;
2540 sctp_hmaclist_t *hmaclist;
2541 uint32_t size;
2542 int i;
2543
2544 SCTP_CHECK_AND_CAST(shmac, optval, struct sctp_hmacalgo, *optsize);
2545
2546 SCTP_INP_RLOCK(inp);
2547 hmaclist = inp->sctp_ep.local_hmacs;
2548 if (hmaclist == NULL) {
2549 /* no HMACs to return */
2550 *optsize = sizeof(*shmac);
2551 SCTP_INP_RUNLOCK(inp);
2552 break;
2553 }
2554 /* is there room for all of the hmac ids? */
2555 size = sizeof(*shmac) + (hmaclist->num_algo *
2556 sizeof(shmac->shmac_idents[0]));
2557 if ((size_t)(*optsize) < size) {
2558 SCTP_LTRACE_ERR_RET(inp, NULL, NULL, SCTP_FROM_SCTP_USRREQ, error);
2559 error = EINVAL;
2560 SCTP_INP_RUNLOCK(inp);
2561 break;
2562 }
2563 /* copy in the list */
2564 shmac->shmac_number_of_idents = hmaclist->num_algo;
2565 for (i = 0; i < hmaclist->num_algo; i++) {
2566 shmac->shmac_idents[i] = hmaclist->hmac[i];
2567 }
2568 SCTP_INP_RUNLOCK(inp);
2569 *optsize = size;
2570 break;
2571 }
2572 case SCTP_AUTH_ACTIVE_KEY:
2573 {
2574 struct sctp_authkeyid *scact;
2575
2576 SCTP_CHECK_AND_CAST(scact, optval, struct sctp_authkeyid, *optsize);
2577 SCTP_FIND_STCB(inp, stcb, scact->scact_assoc_id);
2578
2579 if (stcb) {
2580 /* get the active key on the assoc */
2581 scact->scact_keynumber = stcb->asoc.authinfo.active_keyid;
2582 SCTP_TCB_UNLOCK(stcb);
2583 } else {
2584 /* get the endpoint active key */
2585 SCTP_INP_RLOCK(inp);
2586 scact->scact_keynumber = inp->sctp_ep.default_keyid;
2587 SCTP_INP_RUNLOCK(inp);
2588 }
2589 *optsize = sizeof(*scact);
2590 break;
2591 }
2592 case SCTP_LOCAL_AUTH_CHUNKS:
2593 {
2594 struct sctp_authchunks *sac;
2595 sctp_auth_chklist_t *chklist = NULL;
2596 size_t size = 0;
2597
2598 SCTP_CHECK_AND_CAST(sac, optval, struct sctp_authchunks, *optsize);
2599 SCTP_FIND_STCB(inp, stcb, sac->gauth_assoc_id);
2600
2601 if (stcb) {
2602 /* get off the assoc */
2603 chklist = stcb->asoc.local_auth_chunks;
2604 /* is there enough space? */
2605 size = sctp_auth_get_chklist_size(chklist);
2606 if (*optsize < (sizeof(struct sctp_authchunks) + size)) {
2607 error = EINVAL;
2608 SCTP_LTRACE_ERR_RET(inp, NULL, NULL, SCTP_FROM_SCTP_USRREQ, error);
2609 } else {
2610 /* copy in the chunks */
2611 (void)sctp_serialize_auth_chunks(chklist, sac->gauth_chunks);
2612 }
2613 SCTP_TCB_UNLOCK(stcb);
2614 } else {
2615 /* get off the endpoint */
2616 SCTP_INP_RLOCK(inp);
2617 chklist = inp->sctp_ep.local_auth_chunks;
2618 /* is there enough space? */
2619 size = sctp_auth_get_chklist_size(chklist);
2620 if (*optsize < (sizeof(struct sctp_authchunks) + size)) {
2621 error = EINVAL;
2622 SCTP_LTRACE_ERR_RET(inp, NULL, NULL, SCTP_FROM_SCTP_USRREQ, error);
2623 } else {
2624 /* copy in the chunks */
2625 (void)sctp_serialize_auth_chunks(chklist, sac->gauth_chunks);
2626 }
2627 SCTP_INP_RUNLOCK(inp);
2628 }
2629 *optsize = sizeof(struct sctp_authchunks) + size;
2630 break;
2631 }
2632 case SCTP_PEER_AUTH_CHUNKS:
2633 {
2634 struct sctp_authchunks *sac;
2635 sctp_auth_chklist_t *chklist = NULL;
2636 size_t size = 0;
2637
2638 SCTP_CHECK_AND_CAST(sac, optval, struct sctp_authchunks, *optsize);
2639 SCTP_FIND_STCB(inp, stcb, sac->gauth_assoc_id);
2640
2641 if (stcb) {
2642 /* get off the assoc */
2643 chklist = stcb->asoc.peer_auth_chunks;
2644 /* is there enough space? */
2645 size = sctp_auth_get_chklist_size(chklist);
2646 if (*optsize < (sizeof(struct sctp_authchunks) + size)) {
2647 error = EINVAL;
2648 SCTP_LTRACE_ERR_RET(inp, NULL, NULL, SCTP_FROM_SCTP_USRREQ, error);
2649 } else {
2650 /* copy in the chunks */
2651 (void)sctp_serialize_auth_chunks(chklist, sac->gauth_chunks);
2652 }
2653 SCTP_TCB_UNLOCK(stcb);
2654 } else {
2655 SCTP_LTRACE_ERR_RET(inp, NULL, NULL, SCTP_FROM_SCTP_USRREQ, ENOENT);
2656 error = ENOENT;
2657 }
2658 *optsize = sizeof(struct sctp_authchunks) + size;
2659 break;
2660 }
2661
2662
2663 default:
2664 SCTP_LTRACE_ERR_RET(inp, NULL, NULL, SCTP_FROM_SCTP_USRREQ, ENOPROTOOPT);
2665 error = ENOPROTOOPT;
2666 *optsize = 0;
2667 break;
2668 } /* end switch (sopt->sopt_name) */
2669 return (error);
2670}
2671
2672static int
2673sctp_setopt(struct socket *so, int optname, void *optval, size_t optsize,
2674 void *p)
2675{
2676 int error, set_opt;
2677 uint32_t *mopt;
2678 struct sctp_tcb *stcb = NULL;
2679 struct sctp_inpcb *inp = NULL;
2680 uint32_t vrf_id;
2681
2682 if (optval == NULL) {
2683 SCTP_PRINTF("optval is NULL\n");
2684 SCTP_LTRACE_ERR_RET(inp, NULL, NULL, SCTP_FROM_SCTP_USRREQ, EINVAL);
2685 return (EINVAL);
2686 }
2687 inp = (struct sctp_inpcb *)so->so_pcb;
2688 if (inp == 0) {
2689 SCTP_PRINTF("inp is NULL?\n");
2690 SCTP_LTRACE_ERR_RET(inp, NULL, NULL, SCTP_FROM_SCTP_USRREQ, EINVAL);
2691 return EINVAL;
2692 }
2693 vrf_id = inp->def_vrf_id;
2694
2695 error = 0;
2696 switch (optname) {
2697 case SCTP_NODELAY:
2698 case SCTP_AUTOCLOSE:
2699 case SCTP_AUTO_ASCONF:
2700 case SCTP_EXPLICIT_EOR:
2701 case SCTP_DISABLE_FRAGMENTS:
2702 case SCTP_USE_EXT_RCVINFO:
2703 case SCTP_I_WANT_MAPPED_V4_ADDR:
2704 /* copy in the option value */
2705 SCTP_CHECK_AND_CAST(mopt, optval, uint32_t, optsize);
2706 set_opt = 0;
2707 if (error)
2708 break;
2709 switch (optname) {
2710 case SCTP_DISABLE_FRAGMENTS:
2711 set_opt = SCTP_PCB_FLAGS_NO_FRAGMENT;
2712 break;
2713 case SCTP_AUTO_ASCONF:
2714 /*
2715 * NOTE: we don't really support this flag
2716 */
2717 if (inp->sctp_flags & SCTP_PCB_FLAGS_BOUNDALL) {
2718 /* only valid for bound all sockets */
2719 set_opt = SCTP_PCB_FLAGS_AUTO_ASCONF;
2720 } else {
2721 SCTP_LTRACE_ERR_RET(inp, NULL, NULL, SCTP_FROM_SCTP_USRREQ, EINVAL);
2722 return (EINVAL);
2723 }
2724 break;
2725 case SCTP_EXPLICIT_EOR:
2726 set_opt = SCTP_PCB_FLAGS_EXPLICIT_EOR;
2727 break;
2728 case SCTP_USE_EXT_RCVINFO:
2729 set_opt = SCTP_PCB_FLAGS_EXT_RCVINFO;
2730 break;
2731 case SCTP_I_WANT_MAPPED_V4_ADDR:
2732 if (inp->sctp_flags & SCTP_PCB_FLAGS_BOUND_V6) {
2733 set_opt = SCTP_PCB_FLAGS_NEEDS_MAPPED_V4;
2734 } else {
2735 SCTP_LTRACE_ERR_RET(inp, NULL, NULL, SCTP_FROM_SCTP_USRREQ, EINVAL);
2736 return (EINVAL);
2737 }
2738 break;
2739 case SCTP_NODELAY:
2740 set_opt = SCTP_PCB_FLAGS_NODELAY;
2741 break;
2742 case SCTP_AUTOCLOSE:
2743 if ((inp->sctp_flags & SCTP_PCB_FLAGS_TCPTYPE) ||
2744 (inp->sctp_flags & SCTP_PCB_FLAGS_IN_TCPPOOL)) {
2745 SCTP_LTRACE_ERR_RET(inp, NULL, NULL, SCTP_FROM_SCTP_USRREQ, EINVAL);
2746 return (EINVAL);
2747 }
2748 set_opt = SCTP_PCB_FLAGS_AUTOCLOSE;
2749 /*
2750 * The value is in ticks. Note this does not effect
2751 * old associations, only new ones.
2752 */
2753 inp->sctp_ep.auto_close_time = SEC_TO_TICKS(*mopt);
2754 break;
2755 }
2756 SCTP_INP_WLOCK(inp);
2757 if (*mopt != 0) {
2758 sctp_feature_on(inp, set_opt);
2759 } else {
2760 sctp_feature_off(inp, set_opt);
2761 }
2762 SCTP_INP_WUNLOCK(inp);
2763 break;
2764 case SCTP_REUSE_PORT:
2765 {
2766 SCTP_CHECK_AND_CAST(mopt, optval, uint32_t, optsize);
2767 if ((inp->sctp_flags & SCTP_PCB_FLAGS_UNBOUND) == 0) {
2768 /* Can't set it after we are bound */
2769 error = EINVAL;
2770 break;
2771 }
2772 if ((inp->sctp_flags & SCTP_PCB_FLAGS_UDPTYPE)) {
2773 /* Can't do this for a 1-m socket */
2774 error = EINVAL;
2775 break;
2776 }
2777 if (optval)
2778 sctp_feature_on(inp, SCTP_PCB_FLAGS_PORTREUSE);
2779 else
2780 sctp_feature_off(inp, SCTP_PCB_FLAGS_PORTREUSE);
2781 }
2782 break;
2783 case SCTP_PARTIAL_DELIVERY_POINT:
2784 {
2785 uint32_t *value;
2786
2787 SCTP_CHECK_AND_CAST(value, optval, uint32_t, optsize);
2788 if (*value > SCTP_SB_LIMIT_RCV(so)) {
2789 SCTP_LTRACE_ERR_RET(inp, NULL, NULL, SCTP_FROM_SCTP_USRREQ, EINVAL);
2790 error = EINVAL;
2791 break;
2792 }
2793 inp->partial_delivery_point = *value;
2794 }
2795 break;
2796 case SCTP_FRAGMENT_INTERLEAVE:
2797 /* not yet until we re-write sctp_recvmsg() */
2798 {
2799 uint32_t *level;
2800
2801 SCTP_CHECK_AND_CAST(level, optval, uint32_t, optsize);
2802 if (*level == SCTP_FRAG_LEVEL_2) {
2803 sctp_feature_on(inp, SCTP_PCB_FLAGS_FRAG_INTERLEAVE);
2804 sctp_feature_on(inp, SCTP_PCB_FLAGS_INTERLEAVE_STRMS);
2805 } else if (*level == SCTP_FRAG_LEVEL_1) {
2806 sctp_feature_on(inp, SCTP_PCB_FLAGS_FRAG_INTERLEAVE);
2807 sctp_feature_off(inp, SCTP_PCB_FLAGS_INTERLEAVE_STRMS);
2808 } else if (*level == SCTP_FRAG_LEVEL_0) {
2809 sctp_feature_off(inp, SCTP_PCB_FLAGS_FRAG_INTERLEAVE);
2810 sctp_feature_off(inp, SCTP_PCB_FLAGS_INTERLEAVE_STRMS);
2811
2812 } else {
2813 SCTP_LTRACE_ERR_RET(inp, NULL, NULL, SCTP_FROM_SCTP_USRREQ, EINVAL);
2814 error = EINVAL;
2815 }
2816 }
2817 break;
2818 case SCTP_CMT_ON_OFF:
2819 {
2820 struct sctp_assoc_value *av;
2821
2822 SCTP_CHECK_AND_CAST(av, optval, struct sctp_assoc_value, optsize);
2823 if (SCTP_BASE_SYSCTL(sctp_cmt_on_off)) {
2824 SCTP_FIND_STCB(inp, stcb, av->assoc_id);
2825 if (stcb) {
2826 stcb->asoc.sctp_cmt_on_off = (uint8_t) av->assoc_value;
2827 SCTP_TCB_UNLOCK(stcb);
2828 } else {
2829 SCTP_LTRACE_ERR_RET(inp, NULL, NULL, SCTP_FROM_SCTP_USRREQ, ENOTCONN);
2830 error = ENOTCONN;
2831 }
2832 } else {
2833 SCTP_LTRACE_ERR_RET(inp, NULL, NULL, SCTP_FROM_SCTP_USRREQ, ENOPROTOOPT);
2834 error = ENOPROTOOPT;
2835 }
2836 }
2837 break;
2838 /* EY nr_sack_on_off socket option */
2839 case SCTP_NR_SACK_ON_OFF:
2840 {
2841 struct sctp_assoc_value *av;
2842
2843 SCTP_CHECK_AND_CAST(av, optval, struct sctp_assoc_value, optsize);
2844 if (SCTP_BASE_SYSCTL(sctp_nr_sack_on_off)) {
2845 SCTP_FIND_STCB(inp, stcb, av->assoc_id);
2846 if (stcb) {
2847 stcb->asoc.sctp_nr_sack_on_off = (uint8_t) av->assoc_value;
2848 SCTP_TCB_UNLOCK(stcb);
2849 } else {
2850 SCTP_LTRACE_ERR_RET(inp, NULL, NULL, SCTP_FROM_SCTP_USRREQ, ENOTCONN);
2851 error = ENOTCONN;
2852 }
2853 } else {
2854 SCTP_LTRACE_ERR_RET(inp, NULL, NULL, SCTP_FROM_SCTP_USRREQ, ENOPROTOOPT);
2855 error = ENOPROTOOPT;
2856 }
2857 }
2858 break;
2859 /* JRS - Set socket option for pluggable congestion control */
2860 case SCTP_PLUGGABLE_CC:
2861 {
2862 struct sctp_assoc_value *av;
2863
2864 SCTP_CHECK_AND_CAST(av, optval, struct sctp_assoc_value, optsize);
2865 SCTP_FIND_STCB(inp, stcb, av->assoc_id);
2866 if (stcb) {
2867 switch (av->assoc_value) {
2868 /*
2869 * JRS - Standard TCP congestion
2870 * control
2871 */
2872 case SCTP_CC_RFC2581:
2873 {
2874 stcb->asoc.congestion_control_module = SCTP_CC_RFC2581;
2875 stcb->asoc.cc_functions.sctp_set_initial_cc_param = &sctp_set_initial_cc_param;
2876 stcb->asoc.cc_functions.sctp_cwnd_update_after_sack = &sctp_cwnd_update_after_sack;
2877 stcb->asoc.cc_functions.sctp_cwnd_update_after_fr = &sctp_cwnd_update_after_fr;
2878 stcb->asoc.cc_functions.sctp_cwnd_update_after_timeout = &sctp_cwnd_update_after_timeout;
2879 stcb->asoc.cc_functions.sctp_cwnd_update_after_ecn_echo = &sctp_cwnd_update_after_ecn_echo;
2880 stcb->asoc.cc_functions.sctp_cwnd_update_after_packet_dropped = &sctp_cwnd_update_after_packet_dropped;
2881 stcb->asoc.cc_functions.sctp_cwnd_update_after_output = &sctp_cwnd_update_after_output;
2882 stcb->asoc.cc_functions.sctp_cwnd_update_after_fr_timer = &sctp_cwnd_update_after_fr_timer;
2883 SCTP_TCB_UNLOCK(stcb);
2884 break;
2885 }
2886 /*
2887 * JRS - High Speed TCP congestion
2888 * control (Floyd)
2889 */
2890 case SCTP_CC_HSTCP:
2891 {
2892 stcb->asoc.congestion_control_module = SCTP_CC_HSTCP;
2893 stcb->asoc.cc_functions.sctp_set_initial_cc_param = &sctp_set_initial_cc_param;
2894 stcb->asoc.cc_functions.sctp_cwnd_update_after_sack = &sctp_hs_cwnd_update_after_sack;
2895 stcb->asoc.cc_functions.sctp_cwnd_update_after_fr = &sctp_hs_cwnd_update_after_fr;
2896 stcb->asoc.cc_functions.sctp_cwnd_update_after_timeout = &sctp_cwnd_update_after_timeout;
2897 stcb->asoc.cc_functions.sctp_cwnd_update_after_ecn_echo = &sctp_cwnd_update_after_ecn_echo;
2898 stcb->asoc.cc_functions.sctp_cwnd_update_after_packet_dropped = &sctp_cwnd_update_after_packet_dropped;
2899 stcb->asoc.cc_functions.sctp_cwnd_update_after_output = &sctp_cwnd_update_after_output;
2900 stcb->asoc.cc_functions.sctp_cwnd_update_after_fr_timer = &sctp_cwnd_update_after_fr_timer;
2901 SCTP_TCB_UNLOCK(stcb);
2902 break;
2903 }
2904 /* JRS - HTCP congestion control */
2905 case SCTP_CC_HTCP:
2906 {
2907 stcb->asoc.congestion_control_module = SCTP_CC_HTCP;
2908 stcb->asoc.cc_functions.sctp_set_initial_cc_param = &sctp_htcp_set_initial_cc_param;
2909 stcb->asoc.cc_functions.sctp_cwnd_update_after_sack = &sctp_htcp_cwnd_update_after_sack;
2910 stcb->asoc.cc_functions.sctp_cwnd_update_after_fr = &sctp_htcp_cwnd_update_after_fr;
2911 stcb->asoc.cc_functions.sctp_cwnd_update_after_timeout = &sctp_htcp_cwnd_update_after_timeout;
2912 stcb->asoc.cc_functions.sctp_cwnd_update_after_ecn_echo = &sctp_htcp_cwnd_update_after_ecn_echo;
2913 stcb->asoc.cc_functions.sctp_cwnd_update_after_packet_dropped = &sctp_cwnd_update_after_packet_dropped;
2914 stcb->asoc.cc_functions.sctp_cwnd_update_after_output = &sctp_cwnd_update_after_output;
2915 stcb->asoc.cc_functions.sctp_cwnd_update_after_fr_timer = &sctp_htcp_cwnd_update_after_fr_timer;
2916 SCTP_TCB_UNLOCK(stcb);
2917 break;
2918 }
2919 /*
2920 * JRS - All other values are
2921 * invalid
2922 */
2923 default:
2924 {
2925 SCTP_LTRACE_ERR_RET(inp, NULL, NULL, SCTP_FROM_SCTP_USRREQ, EINVAL);
2926 error = EINVAL;
2927 SCTP_TCB_UNLOCK(stcb);
2928 break;
2929 }
2930 }
2931 } else {
2932 switch (av->assoc_value) {
2933 case SCTP_CC_RFC2581:
2934 case SCTP_CC_HSTCP:
2935 case SCTP_CC_HTCP:
2936 inp->sctp_ep.sctp_default_cc_module = av->assoc_value;
2937 break;
2938 default:
2939 SCTP_LTRACE_ERR_RET(inp, NULL, NULL, SCTP_FROM_SCTP_USRREQ, EINVAL);
2940 error = EINVAL;
2941 break;
2942 };
2943 }
2944 }
2945 break;
2946 case SCTP_CLR_STAT_LOG:
2947 SCTP_LTRACE_ERR_RET(inp, NULL, NULL, SCTP_FROM_SCTP_USRREQ, EOPNOTSUPP);
2948 error = EOPNOTSUPP;
2949 break;
2950 case SCTP_CONTEXT:
2951 {
2952 struct sctp_assoc_value *av;
2953
2954 SCTP_CHECK_AND_CAST(av, optval, struct sctp_assoc_value, optsize);
2955 SCTP_FIND_STCB(inp, stcb, av->assoc_id);
2956
2957 if (stcb) {
2958 stcb->asoc.context = av->assoc_value;
2959 SCTP_TCB_UNLOCK(stcb);
2960 } else {
2961 SCTP_INP_WLOCK(inp);
2962 inp->sctp_context = av->assoc_value;
2963 SCTP_INP_WUNLOCK(inp);
2964 }
2965 }
2966 break;
2967 case SCTP_VRF_ID:
2968 {
2969 uint32_t *default_vrfid;
2970
2971 SCTP_CHECK_AND_CAST(default_vrfid, optval, uint32_t, optsize);
2972 if (*default_vrfid > SCTP_MAX_VRF_ID) {
2973 SCTP_LTRACE_ERR_RET(inp, NULL, NULL, SCTP_FROM_SCTP_USRREQ, EINVAL);
2974 error = EINVAL;
2975 break;
2976 }
2977 inp->def_vrf_id = *default_vrfid;
2978 break;
2979 }
2980 case SCTP_DEL_VRF_ID:
2981 {
2982 SCTP_LTRACE_ERR_RET(inp, NULL, NULL, SCTP_FROM_SCTP_USRREQ, EOPNOTSUPP);
2983 error = EOPNOTSUPP;
2984 break;
2985 }
2986 case SCTP_ADD_VRF_ID:
2987 {
2988 SCTP_LTRACE_ERR_RET(inp, NULL, NULL, SCTP_FROM_SCTP_USRREQ, EOPNOTSUPP);
2989 error = EOPNOTSUPP;
2990 break;
2991 }
2992 case SCTP_DELAYED_SACK:
2993 {
2994 struct sctp_sack_info *sack;
2995
2996 SCTP_CHECK_AND_CAST(sack, optval, struct sctp_sack_info, optsize);
2997 SCTP_FIND_STCB(inp, stcb, sack->sack_assoc_id);
2998 if (sack->sack_delay) {
2999 if (sack->sack_delay > SCTP_MAX_SACK_DELAY)
3000 sack->sack_delay = SCTP_MAX_SACK_DELAY;
3001 }
3002 if (stcb) {
3003 if (sack->sack_delay) {
3004 if (MSEC_TO_TICKS(sack->sack_delay) < 1) {
3005 sack->sack_delay = TICKS_TO_MSEC(1);
3006 }
3007 stcb->asoc.delayed_ack = sack->sack_delay;
3008 }
3009 if (sack->sack_freq) {
3010 stcb->asoc.sack_freq = sack->sack_freq;
3011 }
3012 SCTP_TCB_UNLOCK(stcb);
3013 } else {
3014 SCTP_INP_WLOCK(inp);
3015 if (sack->sack_delay) {
3016 if (MSEC_TO_TICKS(sack->sack_delay) < 1) {
3017 sack->sack_delay = TICKS_TO_MSEC(1);
3018 }
3019 inp->sctp_ep.sctp_timeoutticks[SCTP_TIMER_RECV] = MSEC_TO_TICKS(sack->sack_delay);
3020 }
3021 if (sack->sack_freq) {
3022 inp->sctp_ep.sctp_sack_freq = sack->sack_freq;
3023 }
3024 SCTP_INP_WUNLOCK(inp);
3025 }
3026 break;
3027 }
3028 case SCTP_AUTH_CHUNK:
3029 {
3030 struct sctp_authchunk *sauth;
3031
3032 SCTP_CHECK_AND_CAST(sauth, optval, struct sctp_authchunk, optsize);
3033
3034 SCTP_INP_WLOCK(inp);
3035 if (sctp_auth_add_chunk(sauth->sauth_chunk, inp->sctp_ep.local_auth_chunks)) {
3036 SCTP_LTRACE_ERR_RET(inp, NULL, NULL, SCTP_FROM_SCTP_USRREQ, EINVAL);
3037 error = EINVAL;
3038 }
3039 SCTP_INP_WUNLOCK(inp);
3040 break;
3041 }
3042 case SCTP_AUTH_KEY:
3043 {
3044 struct sctp_authkey *sca;
3045 struct sctp_keyhead *shared_keys;
3046 sctp_sharedkey_t *shared_key;
3047 sctp_key_t *key = NULL;
3048 size_t size;
3049
3050 SCTP_CHECK_AND_CAST(sca, optval, struct sctp_authkey, optsize);
3051 SCTP_FIND_STCB(inp, stcb, sca->sca_assoc_id);
3052 size = optsize - sizeof(*sca);
3053
3054 if (stcb) {
3055 /* set it on the assoc */
3056 shared_keys = &stcb->asoc.shared_keys;
3057 /* clear the cached keys for this key id */
3058 sctp_clear_cachedkeys(stcb, sca->sca_keynumber);
3059 /*
3060 * create the new shared key and
3061 * insert/replace it
3062 */
3063 if (size > 0) {
3064 key = sctp_set_key(sca->sca_key, (uint32_t) size);
3065 if (key == NULL) {
3066 SCTP_LTRACE_ERR_RET(inp, NULL, NULL, SCTP_FROM_SCTP_USRREQ, ENOMEM);
3067 error = ENOMEM;
3068 SCTP_TCB_UNLOCK(stcb);
3069 break;
3070 }
3071 }
3072 shared_key = sctp_alloc_sharedkey();
3073 if (shared_key == NULL) {
3074 sctp_free_key(key);
3075 SCTP_LTRACE_ERR_RET(inp, NULL, NULL, SCTP_FROM_SCTP_USRREQ, ENOMEM);
3076 error = ENOMEM;
3077 SCTP_TCB_UNLOCK(stcb);
3078 break;
3079 }
3080 shared_key->key = key;
3081 shared_key->keyid = sca->sca_keynumber;
3082 error = sctp_insert_sharedkey(shared_keys, shared_key);
3083 SCTP_TCB_UNLOCK(stcb);
3084 } else {
3085 /* set it on the endpoint */
3086 SCTP_INP_WLOCK(inp);
3087 shared_keys = &inp->sctp_ep.shared_keys;
3088 /*
3089 * clear the cached keys on all assocs for
3090 * this key id
3091 */
3092 sctp_clear_cachedkeys_ep(inp, sca->sca_keynumber);
3093 /*
3094 * create the new shared key and
3095 * insert/replace it
3096 */
3097 if (size > 0) {
3098 key = sctp_set_key(sca->sca_key, (uint32_t) size);
3099 if (key == NULL) {
3100 SCTP_LTRACE_ERR_RET(inp, NULL, NULL, SCTP_FROM_SCTP_USRREQ, ENOMEM);
3101 error = ENOMEM;
3102 SCTP_INP_WUNLOCK(inp);
3103 break;
3104 }
3105 }
3106 shared_key = sctp_alloc_sharedkey();
3107 if (shared_key == NULL) {
3108 sctp_free_key(key);
3109 SCTP_LTRACE_ERR_RET(inp, NULL, NULL, SCTP_FROM_SCTP_USRREQ, ENOMEM);
3110 error = ENOMEM;
3111 SCTP_INP_WUNLOCK(inp);
3112 break;
3113 }
3114 shared_key->key = key;
3115 shared_key->keyid = sca->sca_keynumber;
3116 error = sctp_insert_sharedkey(shared_keys, shared_key);
3117 SCTP_INP_WUNLOCK(inp);
3118 }
3119 break;
3120 }
3121 case SCTP_HMAC_IDENT:
3122 {
3123 struct sctp_hmacalgo *shmac;
3124 sctp_hmaclist_t *hmaclist;
3125 uint16_t hmacid;
3126 uint32_t i;
3127
3128 size_t found;
3129
3130 SCTP_CHECK_AND_CAST(shmac, optval, struct sctp_hmacalgo, optsize);
3131 if (optsize < sizeof(struct sctp_hmacalgo) + shmac->shmac_number_of_idents * sizeof(uint16_t)) {
3132 SCTP_LTRACE_ERR_RET(inp, NULL, NULL, SCTP_FROM_SCTP_USRREQ, EINVAL);
3133 error = EINVAL;
3134 break;
3135 }
3136 hmaclist = sctp_alloc_hmaclist(shmac->shmac_number_of_idents);
3137 if (hmaclist == NULL) {
3138 SCTP_LTRACE_ERR_RET(inp, NULL, NULL, SCTP_FROM_SCTP_USRREQ, ENOMEM);
3139 error = ENOMEM;
3140 break;
3141 }
3142 for (i = 0; i < shmac->shmac_number_of_idents; i++) {
3143 hmacid = shmac->shmac_idents[i];
3144 if (sctp_auth_add_hmacid(hmaclist, hmacid)) {
3145 /* invalid HMACs were found */ ;
3146 SCTP_LTRACE_ERR_RET(inp, NULL, NULL, SCTP_FROM_SCTP_USRREQ, EINVAL);
3147 error = EINVAL;
3148 sctp_free_hmaclist(hmaclist);
3149 goto sctp_set_hmac_done;
3150 }
3151 }
3152 found = 0;
3153 for (i = 0; i < hmaclist->num_algo; i++) {
3154 if (hmaclist->hmac[i] == SCTP_AUTH_HMAC_ID_SHA1) {
3155 /* already in list */
3156 found = 1;
3157 }
3158 }
3159 if (!found) {
3160 sctp_free_hmaclist(hmaclist);
3161 SCTP_LTRACE_ERR_RET(inp, NULL, NULL, SCTP_FROM_SCTP_USRREQ, EINVAL);
3162 error = EINVAL;
3163 break;
3164 }
3165 /* set it on the endpoint */
3166 SCTP_INP_WLOCK(inp);
3167 if (inp->sctp_ep.local_hmacs)
3168 sctp_free_hmaclist(inp->sctp_ep.local_hmacs);
3169 inp->sctp_ep.local_hmacs = hmaclist;
3170 SCTP_INP_WUNLOCK(inp);
3171 sctp_set_hmac_done:
3172 break;
3173 }
3174 case SCTP_AUTH_ACTIVE_KEY:
3175 {
3176 struct sctp_authkeyid *scact;
3177
3178 SCTP_CHECK_AND_CAST(scact, optval, struct sctp_authkeyid,
3179 optsize);
3180 SCTP_FIND_STCB(inp, stcb, scact->scact_assoc_id);
3181
3182 /* set the active key on the right place */
3183 if (stcb) {
3184 /* set the active key on the assoc */
3185 if (sctp_auth_setactivekey(stcb,
3186 scact->scact_keynumber)) {
3187 SCTP_LTRACE_ERR_RET(inp, NULL, NULL,
3188 SCTP_FROM_SCTP_USRREQ,
3189 EINVAL);
3190 error = EINVAL;
3191 }
3192 SCTP_TCB_UNLOCK(stcb);
3193 } else {
3194 /* set the active key on the endpoint */
3195 SCTP_INP_WLOCK(inp);
3196 if (sctp_auth_setactivekey_ep(inp,
3197 scact->scact_keynumber)) {
3198 SCTP_LTRACE_ERR_RET(inp, NULL, NULL,
3199 SCTP_FROM_SCTP_USRREQ,
3200 EINVAL);
3201 error = EINVAL;
3202 }
3203 SCTP_INP_WUNLOCK(inp);
3204 }
3205 break;
3206 }
3207 case SCTP_AUTH_DELETE_KEY:
3208 {
3209 struct sctp_authkeyid *scdel;
3210
3211 SCTP_CHECK_AND_CAST(scdel, optval, struct sctp_authkeyid,
3212 optsize);
3213 SCTP_FIND_STCB(inp, stcb, scdel->scact_assoc_id);
3214
3215 /* delete the key from the right place */
3216 if (stcb) {
3217 if (sctp_delete_sharedkey(stcb,
3218 scdel->scact_keynumber)) {
3219 SCTP_LTRACE_ERR_RET(inp, NULL, NULL,
3220 SCTP_FROM_SCTP_USRREQ,
3221 EINVAL);
3222 error = EINVAL;
3223 }
3224 SCTP_TCB_UNLOCK(stcb);
3225 } else {
3226 SCTP_INP_WLOCK(inp);
3227 if (sctp_delete_sharedkey_ep(inp,
3228 scdel->scact_keynumber)) {
3229 SCTP_LTRACE_ERR_RET(inp, NULL, NULL,
3230 SCTP_FROM_SCTP_USRREQ,
3231 EINVAL);
3232 error = EINVAL;
3233 }
3234 SCTP_INP_WUNLOCK(inp);
3235 }
3236 break;
3237 }
3238 case SCTP_AUTH_DEACTIVATE_KEY:
3239 {
3240 struct sctp_authkeyid *keyid;
3241
3242 SCTP_CHECK_AND_CAST(keyid, optval, struct sctp_authkeyid,
3243 optsize);
3244 SCTP_FIND_STCB(inp, stcb, keyid->scact_assoc_id);
3245
3246 /* deactivate the key from the right place */
3247 if (stcb) {
3248 if (sctp_deact_sharedkey(stcb,
3249 keyid->scact_keynumber)) {
3250 SCTP_LTRACE_ERR_RET(inp, NULL, NULL,
3251 SCTP_FROM_SCTP_USRREQ,
3252 EINVAL);
3253 error = EINVAL;
3254 }
3255 SCTP_TCB_UNLOCK(stcb);
3256 } else {
3257 SCTP_INP_WLOCK(inp);
3258 if (sctp_deact_sharedkey_ep(inp,
3259 keyid->scact_keynumber)) {
3260 SCTP_LTRACE_ERR_RET(inp, NULL, NULL,
3261 SCTP_FROM_SCTP_USRREQ,
3262 EINVAL);
3263 error = EINVAL;
3264 }
3265 SCTP_INP_WUNLOCK(inp);
3266 }
3267 break;
3268 }
3269
3270 case SCTP_RESET_STREAMS:
3271 {
3272 struct sctp_stream_reset *strrst;
3273 uint8_t send_in = 0, send_tsn = 0, send_out = 0,
3274 addstream = 0;
3275 uint16_t addstrmcnt = 0;
3276 int i;
3277
3278 SCTP_CHECK_AND_CAST(strrst, optval, struct sctp_stream_reset, optsize);
3279 SCTP_FIND_STCB(inp, stcb, strrst->strrst_assoc_id);
3280
3281 if (stcb == NULL) {
3282 SCTP_LTRACE_ERR_RET(inp, NULL, NULL, SCTP_FROM_SCTP_USRREQ, ENOENT);
3283 error = ENOENT;
3284 break;
3285 }
3286 if (stcb->asoc.peer_supports_strreset == 0) {
3287 /*
3288 * Peer does not support it, we return
3289 * protocol not supported since this is true
3290 * for this feature and this peer, not the
3291 * socket request in general.
3292 */
3293 SCTP_LTRACE_ERR_RET(inp, NULL, NULL, SCTP_FROM_SCTP_USRREQ, EPROTONOSUPPORT);
3294 error = EPROTONOSUPPORT;
3295 SCTP_TCB_UNLOCK(stcb);
3296 break;
3297 }
3298 if (stcb->asoc.stream_reset_outstanding) {
3299 SCTP_LTRACE_ERR_RET(inp, NULL, NULL, SCTP_FROM_SCTP_USRREQ, EALREADY);
3300 error = EALREADY;
3301 SCTP_TCB_UNLOCK(stcb);
3302 break;
3303 }
3304 if (strrst->strrst_flags == SCTP_RESET_LOCAL_RECV) {
3305 send_in = 1;
3306 } else if (strrst->strrst_flags == SCTP_RESET_LOCAL_SEND) {
3307 send_out = 1;
3308 } else if (strrst->strrst_flags == SCTP_RESET_BOTH) {
3309 send_in = 1;
3310 send_out = 1;
3311 } else if (strrst->strrst_flags == SCTP_RESET_TSN) {
3312 send_tsn = 1;
3313 } else if (strrst->strrst_flags == SCTP_RESET_ADD_STREAMS) {
3314 if (send_tsn ||
3315 send_in ||
3316 send_out) {
3317 /* We can't do that and add streams */
3318 error = EINVAL;
3319 goto skip_stuff;
3320 }
3321 if (stcb->asoc.stream_reset_outstanding) {
3322 error = EBUSY;
3323 goto skip_stuff;
3324 }
3325 addstream = 1;
3326 /* We allocate here */
3327 addstrmcnt = strrst->strrst_num_streams;
3328 if ((int)(addstrmcnt + stcb->asoc.streamoutcnt) > 0xffff) {
3329 /* You can't have more than 64k */
3330 error = EINVAL;
3331 goto skip_stuff;
3332 }
3333 if ((stcb->asoc.strm_realoutsize - stcb->asoc.streamoutcnt) < addstrmcnt) {
3334 /* Need to allocate more */
3335 struct sctp_stream_out *oldstream;
3336 struct sctp_stream_queue_pending *sp;
3337 int removed;
3338
3339 oldstream = stcb->asoc.strmout;
3340 /* get some more */
3341 SCTP_MALLOC(stcb->asoc.strmout, struct sctp_stream_out *,
3342 ((stcb->asoc.streamoutcnt + addstrmcnt) * sizeof(struct sctp_stream_out)),
3343 SCTP_M_STRMO);
3344 if (stcb->asoc.strmout == NULL) {
3345 stcb->asoc.strmout = oldstream;
3346 error = ENOMEM;
3347 goto skip_stuff;
3348 }
3349 /*
3350 * Ok now we proceed with copying
3351 * the old out stuff and
3352 * initializing the new stuff.
3353 */
3354 SCTP_TCB_SEND_LOCK(stcb);
3355 for (i = 0; i < stcb->asoc.streamoutcnt; i++) {
3356 TAILQ_INIT(&stcb->asoc.strmout[i].outqueue);
3357 stcb->asoc.strmout[i].next_sequence_sent = oldstream[i].next_sequence_sent;
3358 stcb->asoc.strmout[i].last_msg_incomplete = oldstream[i].last_msg_incomplete;
3359 stcb->asoc.strmout[i].stream_no = i;
3360 if (oldstream[i].next_spoke.tqe_next) {
3361 sctp_remove_from_wheel(stcb, &stcb->asoc, &oldstream[i], 1);
3362 stcb->asoc.strmout[i].next_spoke.tqe_next = NULL;
3363 stcb->asoc.strmout[i].next_spoke.tqe_prev = NULL;
3364 removed = 1;
3365 } else {
3366 /* not on out wheel */
3367 stcb->asoc.strmout[i].next_spoke.tqe_next = NULL;
3368 stcb->asoc.strmout[i].next_spoke.tqe_prev = NULL;
3369 removed = 0;
3370 }
3371 /*
3372 * now anything on those
3373 * queues?
3374 */
3375 while (TAILQ_EMPTY(&oldstream[i].outqueue) == 0) {
3376 sp = TAILQ_FIRST(&oldstream[i].outqueue);
3377 TAILQ_REMOVE(&oldstream[i].outqueue, sp, next);
3378 TAILQ_INSERT_TAIL(&stcb->asoc.strmout[i].outqueue, sp, next);
3379 }
3380 /* Did we disrupt the wheel? */
3381 if (removed) {
3382 sctp_insert_on_wheel(stcb,
3383 &stcb->asoc,
3384 &stcb->asoc.strmout[i],
3385 1);
3386 }
3387 /*
3388 * Now move assoc pointers
3389 * too
3390 */
3391 if (stcb->asoc.last_out_stream == &oldstream[i]) {
3392 stcb->asoc.last_out_stream = &stcb->asoc.strmout[i];
3393 }
3394 if (stcb->asoc.locked_on_sending == &oldstream[i]) {
3395 stcb->asoc.locked_on_sending = &stcb->asoc.strmout[i];
3396 }
3397 }
3398 /* now the new streams */
3399 for (i = stcb->asoc.streamoutcnt; i < (stcb->asoc.streamoutcnt + addstrmcnt); i++) {
3400 stcb->asoc.strmout[i].next_sequence_sent = 0x0;
3401 TAILQ_INIT(&stcb->asoc.strmout[i].outqueue);
3402 stcb->asoc.strmout[i].stream_no = i;
3403 stcb->asoc.strmout[i].last_msg_incomplete = 0;
3404 stcb->asoc.strmout[i].next_spoke.tqe_next = NULL;
3405 stcb->asoc.strmout[i].next_spoke.tqe_prev = NULL;
3406 }
3407 stcb->asoc.strm_realoutsize = stcb->asoc.streamoutcnt + addstrmcnt;
3408 SCTP_FREE(oldstream, SCTP_M_STRMO);
3409 }
3410 SCTP_TCB_SEND_UNLOCK(stcb);
3411 goto skip_stuff;
3412 } else {
3413 SCTP_LTRACE_ERR_RET(inp, NULL, NULL, SCTP_FROM_SCTP_USRREQ, EINVAL);
3414 error = EINVAL;
3415 SCTP_TCB_UNLOCK(stcb);
3416 break;
3417 }
3418 for (i = 0; i < strrst->strrst_num_streams; i++) {
3419 if ((send_in) &&
3420
3421 (strrst->strrst_list[i] > stcb->asoc.streamincnt)) {
3422 SCTP_LTRACE_ERR_RET(inp, NULL, NULL, SCTP_FROM_SCTP_USRREQ, EINVAL);
3423 error = EINVAL;
3424 goto get_out;
3425 }
3426 if ((send_out) &&
3427 (strrst->strrst_list[i] > stcb->asoc.streamoutcnt)) {
3428 SCTP_LTRACE_ERR_RET(inp, NULL, NULL, SCTP_FROM_SCTP_USRREQ, EINVAL);
3429 error = EINVAL;
3430 goto get_out;
3431 }
3432 }
3433 skip_stuff:
3434 if (error) {
3435 get_out:
3436 SCTP_TCB_UNLOCK(stcb);
3437 break;
3438 }
3439 error = sctp_send_str_reset_req(stcb, strrst->strrst_num_streams,
3440 strrst->strrst_list,
3441 send_out, (stcb->asoc.str_reset_seq_in - 3),
3442 send_in, send_tsn, addstream, addstrmcnt);
3443
3444 sctp_chunk_output(inp, stcb, SCTP_OUTPUT_FROM_STRRST_REQ, SCTP_SO_LOCKED);
3445 SCTP_TCB_UNLOCK(stcb);
3446 }
3447 break;
3448
3449 case SCTP_CONNECT_X:
3450 if (optsize < (sizeof(int) + sizeof(struct sockaddr_in))) {
3451 SCTP_LTRACE_ERR_RET(inp, NULL, NULL, SCTP_FROM_SCTP_USRREQ, EINVAL);
3452 error = EINVAL;
3453 break;
3454 }
3455 error = sctp_do_connect_x(so, inp, optval, optsize, p, 0);
3456 break;
3457
3458 case SCTP_CONNECT_X_DELAYED:
3459 if (optsize < (sizeof(int) + sizeof(struct sockaddr_in))) {
3460 SCTP_LTRACE_ERR_RET(inp, NULL, NULL, SCTP_FROM_SCTP_USRREQ, EINVAL);
3461 error = EINVAL;
3462 break;
3463 }
3464 error = sctp_do_connect_x(so, inp, optval, optsize, p, 1);
3465 break;
3466
3467 case SCTP_CONNECT_X_COMPLETE:
3468 {
3469 struct sockaddr *sa;
3470 struct sctp_nets *net;
3471
3472 /* FIXME MT: check correct? */
3473 SCTP_CHECK_AND_CAST(sa, optval, struct sockaddr, optsize);
3474
3475 /* find tcb */
3476 if (inp->sctp_flags & SCTP_PCB_FLAGS_CONNECTED) {
3477 SCTP_INP_RLOCK(inp);
3478 stcb = LIST_FIRST(&inp->sctp_asoc_list);
3479 if (stcb) {
3480 SCTP_TCB_LOCK(stcb);
3481 net = sctp_findnet(stcb, sa);
3482 }
3483 SCTP_INP_RUNLOCK(inp);
3484 } else {
3485 /*
3486 * We increment here since
3487 * sctp_findassociation_ep_addr() wil do a
3488 * decrement if it finds the stcb as long as
3489 * the locked tcb (last argument) is NOT a
3490 * TCB.. aka NULL.
3491 */
3492 SCTP_INP_INCR_REF(inp);
3493 stcb = sctp_findassociation_ep_addr(&inp, sa, &net, NULL, NULL);
3494 if (stcb == NULL) {
3495 SCTP_INP_DECR_REF(inp);
3496 }
3497 }
3498
3499 if (stcb == NULL) {
3500 SCTP_LTRACE_ERR_RET(inp, NULL, NULL, SCTP_FROM_SCTP_USRREQ, ENOENT);
3501 error = ENOENT;
3502 break;
3503 }
3504 if (stcb->asoc.delayed_connection == 1) {
3505 stcb->asoc.delayed_connection = 0;
3506 (void)SCTP_GETTIME_TIMEVAL(&stcb->asoc.time_entered);
3507 sctp_timer_stop(SCTP_TIMER_TYPE_INIT, inp, stcb,
3508 stcb->asoc.primary_destination,
3509 SCTP_FROM_SCTP_USRREQ + SCTP_LOC_9);
3510 sctp_send_initiate(inp, stcb, SCTP_SO_LOCKED);
3511 } else {
3512 /*
3513 * already expired or did not use delayed
3514 * connectx
3515 */
3516 SCTP_LTRACE_ERR_RET(inp, NULL, NULL, SCTP_FROM_SCTP_USRREQ, EALREADY);
3517 error = EALREADY;
3518 }
3519 SCTP_TCB_UNLOCK(stcb);
3520 }
3521 break;
3522 case SCTP_MAX_BURST:
3523 {
3524 uint8_t *burst;
3525
3526 SCTP_CHECK_AND_CAST(burst, optval, uint8_t, optsize);
3527
3528 SCTP_INP_WLOCK(inp);
3529 if (*burst) {
3530 inp->sctp_ep.max_burst = *burst;
3531 }
3532 SCTP_INP_WUNLOCK(inp);
3533 }
3534 break;
3535 case SCTP_MAXSEG:
3536 {
3537 struct sctp_assoc_value *av;
3538 int ovh;
3539
3540 SCTP_CHECK_AND_CAST(av, optval, struct sctp_assoc_value, optsize);
3541 SCTP_FIND_STCB(inp, stcb, av->assoc_id);
3542
3543 if (inp->sctp_flags & SCTP_PCB_FLAGS_BOUND_V6) {
3544 ovh = SCTP_MED_OVERHEAD;
3545 } else {
3546 ovh = SCTP_MED_V4_OVERHEAD;
3547 }
3548 if (stcb) {
3549 if (av->assoc_value) {
3550 stcb->asoc.sctp_frag_point = (av->assoc_value + ovh);
3551 } else {
3552 stcb->asoc.sctp_frag_point = SCTP_DEFAULT_MAXSEGMENT;
3553 }
3554 SCTP_TCB_UNLOCK(stcb);
3555 } else {
3556 SCTP_INP_WLOCK(inp);
3557 /*
3558 * FIXME MT: I think this is not in tune
3559 * with the API ID
3560 */
3561 if (av->assoc_value) {
3562 inp->sctp_frag_point = (av->assoc_value + ovh);
3563 } else {
3564 inp->sctp_frag_point = SCTP_DEFAULT_MAXSEGMENT;
3565 }
3566 SCTP_INP_WUNLOCK(inp);
3567 }
3568 }
3569 break;
3570 case SCTP_EVENTS:
3571 {
3572 struct sctp_event_subscribe *events;
3573
3574 SCTP_CHECK_AND_CAST(events, optval, struct sctp_event_subscribe, optsize);
3575
3576 SCTP_INP_WLOCK(inp);
3577 if (events->sctp_data_io_event) {
3578 sctp_feature_on(inp, SCTP_PCB_FLAGS_RECVDATAIOEVNT);
3579 } else {
3580 sctp_feature_off(inp, SCTP_PCB_FLAGS_RECVDATAIOEVNT);
3581 }
3582
3583 if (events->sctp_association_event) {
3584 sctp_feature_on(inp, SCTP_PCB_FLAGS_RECVASSOCEVNT);
3585 } else {
3586 sctp_feature_off(inp, SCTP_PCB_FLAGS_RECVASSOCEVNT);
3587 }
3588
3589 if (events->sctp_address_event) {
3590 sctp_feature_on(inp, SCTP_PCB_FLAGS_RECVPADDREVNT);
3591 } else {
3592 sctp_feature_off(inp, SCTP_PCB_FLAGS_RECVPADDREVNT);
3593 }
3594
3595 if (events->sctp_send_failure_event) {
3596 sctp_feature_on(inp, SCTP_PCB_FLAGS_RECVSENDFAILEVNT);
3597 } else {
3598 sctp_feature_off(inp, SCTP_PCB_FLAGS_RECVSENDFAILEVNT);
3599 }
3600
3601 if (events->sctp_peer_error_event) {
3602 sctp_feature_on(inp, SCTP_PCB_FLAGS_RECVPEERERR);
3603 } else {
3604 sctp_feature_off(inp, SCTP_PCB_FLAGS_RECVPEERERR);
3605 }
3606
3607 if (events->sctp_shutdown_event) {
3608 sctp_feature_on(inp, SCTP_PCB_FLAGS_RECVSHUTDOWNEVNT);
3609 } else {
3610 sctp_feature_off(inp, SCTP_PCB_FLAGS_RECVSHUTDOWNEVNT);
3611 }
3612
3613 if (events->sctp_partial_delivery_event) {
3614 sctp_feature_on(inp, SCTP_PCB_FLAGS_PDAPIEVNT);
3615 } else {
3616 sctp_feature_off(inp, SCTP_PCB_FLAGS_PDAPIEVNT);
3617 }
3618
3619 if (events->sctp_adaptation_layer_event) {
3620 sctp_feature_on(inp, SCTP_PCB_FLAGS_ADAPTATIONEVNT);
3621 } else {
3622 sctp_feature_off(inp, SCTP_PCB_FLAGS_ADAPTATIONEVNT);
3623 }
3624
3625 if (events->sctp_authentication_event) {
3626 sctp_feature_on(inp, SCTP_PCB_FLAGS_AUTHEVNT);
3627 } else {
3628 sctp_feature_off(inp, SCTP_PCB_FLAGS_AUTHEVNT);
3629 }
3630
3631 if (events->sctp_sender_dry_event) {
3632 sctp_feature_on(inp, SCTP_PCB_FLAGS_DRYEVNT);
3633 if ((inp->sctp_flags & SCTP_PCB_FLAGS_TCPTYPE) ||
3634 (inp->sctp_flags & SCTP_PCB_FLAGS_IN_TCPPOOL)) {
3635 stcb = LIST_FIRST(&inp->sctp_asoc_list);
3636 if (stcb) {
3637 SCTP_TCB_LOCK(stcb);
3638 }
3639 if (stcb &&
3640 TAILQ_EMPTY(&stcb->asoc.send_queue) &&
3641 TAILQ_EMPTY(&stcb->asoc.sent_queue) &&
3642 (stcb->asoc.stream_queue_cnt == 0)) {
3643 sctp_ulp_notify(SCTP_NOTIFY_SENDER_DRY, stcb, 0, NULL, SCTP_SO_LOCKED);
3644 }
3645 if (stcb) {
3646 SCTP_TCB_UNLOCK(stcb);
3647 }
3648 }
3649 } else {
3650 sctp_feature_off(inp, SCTP_PCB_FLAGS_DRYEVNT);
3651 }
3652
3653 if (events->sctp_stream_reset_events) {
3653 if (events->sctp_stream_reset_event) {
3654 sctp_feature_on(inp, SCTP_PCB_FLAGS_STREAM_RESETEVNT);
3655 } else {
3656 sctp_feature_off(inp, SCTP_PCB_FLAGS_STREAM_RESETEVNT);
3657 }
3658 SCTP_INP_WUNLOCK(inp);
3659 }
3660 break;
3661
3662 case SCTP_ADAPTATION_LAYER:
3663 {
3664 struct sctp_setadaptation *adap_bits;
3665
3666 SCTP_CHECK_AND_CAST(adap_bits, optval, struct sctp_setadaptation, optsize);
3667 SCTP_INP_WLOCK(inp);
3668 inp->sctp_ep.adaptation_layer_indicator = adap_bits->ssb_adaptation_ind;
3669 SCTP_INP_WUNLOCK(inp);
3670 }
3671 break;
3672#ifdef SCTP_DEBUG
3673 case SCTP_SET_INITIAL_DBG_SEQ:
3674 {
3675 uint32_t *vvv;
3676
3677 SCTP_CHECK_AND_CAST(vvv, optval, uint32_t, optsize);
3678 SCTP_INP_WLOCK(inp);
3679 inp->sctp_ep.initial_sequence_debug = *vvv;
3680 SCTP_INP_WUNLOCK(inp);
3681 }
3682 break;
3683#endif
3684 case SCTP_DEFAULT_SEND_PARAM:
3685 {
3686 struct sctp_sndrcvinfo *s_info;
3687
3688 SCTP_CHECK_AND_CAST(s_info, optval, struct sctp_sndrcvinfo, optsize);
3689 SCTP_FIND_STCB(inp, stcb, s_info->sinfo_assoc_id);
3690
3691 if (stcb) {
3692 if (s_info->sinfo_stream <= stcb->asoc.streamoutcnt) {
3693 memcpy(&stcb->asoc.def_send, s_info, min(optsize, sizeof(stcb->asoc.def_send)));
3694 } else {
3695 SCTP_LTRACE_ERR_RET(inp, NULL, NULL, SCTP_FROM_SCTP_USRREQ, EINVAL);
3696 error = EINVAL;
3697 }
3698 SCTP_TCB_UNLOCK(stcb);
3699 } else {
3700 SCTP_INP_WLOCK(inp);
3701 memcpy(&inp->def_send, s_info, min(optsize, sizeof(inp->def_send)));
3702 SCTP_INP_WUNLOCK(inp);
3703 }
3704 }
3705 break;
3706 case SCTP_PEER_ADDR_PARAMS:
3707 /* Applys to the specific association */
3708 {
3709 struct sctp_paddrparams *paddrp;
3710 struct sctp_nets *net;
3711
3712 SCTP_CHECK_AND_CAST(paddrp, optval, struct sctp_paddrparams, optsize);
3713 SCTP_FIND_STCB(inp, stcb, paddrp->spp_assoc_id);
3714 net = NULL;
3715 if (stcb) {
3716 net = sctp_findnet(stcb, (struct sockaddr *)&paddrp->spp_address);
3717 } else {
3718 /*
3719 * We increment here since
3720 * sctp_findassociation_ep_addr() wil do a
3721 * decrement if it finds the stcb as long as
3722 * the locked tcb (last argument) is NOT a
3723 * TCB.. aka NULL.
3724 */
3725 SCTP_INP_INCR_REF(inp);
3726 stcb = sctp_findassociation_ep_addr(&inp,
3727 (struct sockaddr *)&paddrp->spp_address,
3728 &net, NULL, NULL);
3729 if (stcb == NULL) {
3730 SCTP_INP_DECR_REF(inp);
3731 }
3732 }
3733 if (stcb && (net == NULL)) {
3734 struct sockaddr *sa;
3735
3736 sa = (struct sockaddr *)&paddrp->spp_address;
3737 if (sa->sa_family == AF_INET) {
3738 struct sockaddr_in *sin;
3739
3740 sin = (struct sockaddr_in *)sa;
3741 if (sin->sin_addr.s_addr) {
3742 SCTP_LTRACE_ERR_RET(inp, NULL, NULL, SCTP_FROM_SCTP_USRREQ, EINVAL);
3743 SCTP_TCB_UNLOCK(stcb);
3744 error = EINVAL;
3745 break;
3746 }
3747 } else if (sa->sa_family == AF_INET6) {
3748 struct sockaddr_in6 *sin6;
3749
3750 sin6 = (struct sockaddr_in6 *)sa;
3751 if (!IN6_IS_ADDR_UNSPECIFIED(&sin6->sin6_addr)) {
3752 SCTP_LTRACE_ERR_RET(inp, NULL, NULL, SCTP_FROM_SCTP_USRREQ, EINVAL);
3753 SCTP_TCB_UNLOCK(stcb);
3754 error = EINVAL;
3755 break;
3756 }
3757 } else {
3758 error = EAFNOSUPPORT;
3759 SCTP_TCB_UNLOCK(stcb);
3760 SCTP_LTRACE_ERR_RET(inp, NULL, NULL, SCTP_FROM_SCTP_USRREQ, error);
3761 break;
3762 }
3763 }
3764 /* sanity checks */
3765 if ((paddrp->spp_flags & SPP_HB_ENABLE) && (paddrp->spp_flags & SPP_HB_DISABLE)) {
3766 if (stcb)
3767 SCTP_TCB_UNLOCK(stcb);
3768 SCTP_LTRACE_ERR_RET(inp, NULL, NULL, SCTP_FROM_SCTP_USRREQ, EINVAL);
3769 return (EINVAL);
3770 }
3771 if ((paddrp->spp_flags & SPP_PMTUD_ENABLE) && (paddrp->spp_flags & SPP_PMTUD_DISABLE)) {
3772 if (stcb)
3773 SCTP_TCB_UNLOCK(stcb);
3774 SCTP_LTRACE_ERR_RET(inp, NULL, NULL, SCTP_FROM_SCTP_USRREQ, EINVAL);
3775 return (EINVAL);
3776 }
3777 if (stcb) {
3778 /************************TCB SPECIFIC SET ******************/
3779 /*
3780 * do we change the timer for HB, we run
3781 * only one?
3782 */
3783 int ovh = 0;
3784
3785 if (stcb->sctp_ep->sctp_flags & SCTP_PCB_FLAGS_BOUND_V6) {
3786 ovh = SCTP_MED_OVERHEAD;
3787 } else {
3788 ovh = SCTP_MED_V4_OVERHEAD;
3789 }
3790
3791 if (paddrp->spp_hbinterval)
3792 stcb->asoc.heart_beat_delay = paddrp->spp_hbinterval;
3793 else if (paddrp->spp_flags & SPP_HB_TIME_IS_ZERO)
3794 stcb->asoc.heart_beat_delay = 0;
3795
3796 /* network sets ? */
3797 if (net) {
3798 /************************NET SPECIFIC SET ******************/
3799 if (paddrp->spp_flags & SPP_HB_DEMAND) {
3800 /* on demand HB */
3801 if (sctp_send_hb(stcb, 1, net) < 0) {
3802 /* asoc destroyed */
3803 SCTP_LTRACE_ERR_RET(inp, NULL, NULL, SCTP_FROM_SCTP_USRREQ, EINVAL);
3804 error = EINVAL;
3805 break;
3806 }
3807 }
3808 if (paddrp->spp_flags & SPP_HB_DISABLE) {
3809 net->dest_state |= SCTP_ADDR_NOHB;
3810 }
3811 if (paddrp->spp_flags & SPP_HB_ENABLE) {
3812 net->dest_state &= ~SCTP_ADDR_NOHB;
3813 }
3814 if ((paddrp->spp_flags & SPP_PMTUD_DISABLE) && (paddrp->spp_pathmtu >= SCTP_SMALLEST_PMTU)) {
3815 if (SCTP_OS_TIMER_PENDING(&net->pmtu_timer.timer)) {
3816 sctp_timer_stop(SCTP_TIMER_TYPE_PATHMTURAISE, inp, stcb, net,
3817 SCTP_FROM_SCTP_USRREQ + SCTP_LOC_10);
3818 }
3819 if (paddrp->spp_pathmtu > SCTP_DEFAULT_MINSEGMENT) {
3820 net->mtu = paddrp->spp_pathmtu + ovh;
3821 if (net->mtu < stcb->asoc.smallest_mtu) {
3822#ifdef SCTP_PRINT_FOR_B_AND_M
3823 SCTP_PRINTF("SCTP_PMTU_DISABLE calls sctp_pathmtu_adjustment:%d\n",
3824 net->mtu);
3825#endif
3826 sctp_pathmtu_adjustment(inp, stcb, net, net->mtu);
3827 }
3828 }
3829 }
3830 if (paddrp->spp_flags & SPP_PMTUD_ENABLE) {
3831 if (SCTP_OS_TIMER_PENDING(&net->pmtu_timer.timer)) {
3832 sctp_timer_start(SCTP_TIMER_TYPE_PATHMTURAISE, inp, stcb, net);
3833 }
3834 }
3835 if (paddrp->spp_pathmaxrxt)
3836 net->failure_threshold = paddrp->spp_pathmaxrxt;
3837#ifdef INET
3838 if (paddrp->spp_flags & SPP_IPV4_TOS) {
3839 if (net->ro._l_addr.sin.sin_family == AF_INET) {
3840 net->tos_flowlabel = paddrp->spp_ipv4_tos & 0x000000fc;
3841 }
3842 }
3843#endif
3844#ifdef INET6
3845 if (paddrp->spp_flags & SPP_IPV6_FLOWLABEL) {
3846 if (net->ro._l_addr.sin6.sin6_family == AF_INET6) {
3847 net->tos_flowlabel = paddrp->spp_ipv6_flowlabel;
3848 }
3849 }
3850#endif
3851 } else {
3852 /************************ASSOC ONLY -- NO NET SPECIFIC SET ******************/
3853 if (paddrp->spp_pathmaxrxt)
3854 stcb->asoc.def_net_failure = paddrp->spp_pathmaxrxt;
3855
3856 if (paddrp->spp_flags & SPP_HB_ENABLE) {
3857 /* Turn back on the timer */
3858 stcb->asoc.hb_is_disabled = 0;
3859 sctp_timer_start(SCTP_TIMER_TYPE_HEARTBEAT, inp, stcb, net);
3860 }
3861 if ((paddrp->spp_flags & SPP_PMTUD_DISABLE) && (paddrp->spp_pathmtu >= SCTP_SMALLEST_PMTU)) {
3862 TAILQ_FOREACH(net, &stcb->asoc.nets, sctp_next) {
3863 if (SCTP_OS_TIMER_PENDING(&net->pmtu_timer.timer)) {
3864 sctp_timer_stop(SCTP_TIMER_TYPE_PATHMTURAISE, inp, stcb, net,
3865 SCTP_FROM_SCTP_USRREQ + SCTP_LOC_10);
3866 }
3867 if (paddrp->spp_pathmtu > SCTP_DEFAULT_MINSEGMENT) {
3868 net->mtu = paddrp->spp_pathmtu + ovh;
3869 if (net->mtu < stcb->asoc.smallest_mtu) {
3870#ifdef SCTP_PRINT_FOR_B_AND_M
3871 SCTP_PRINTF("SCTP_PMTU_DISABLE calls sctp_pathmtu_adjustment:%d\n",
3872 net->mtu);
3873#endif
3874 sctp_pathmtu_adjustment(inp, stcb, net, net->mtu);
3875 }
3876 }
3877 }
3878 }
3879 if (paddrp->spp_flags & SPP_PMTUD_ENABLE) {
3880 TAILQ_FOREACH(net, &stcb->asoc.nets, sctp_next) {
3881 if (SCTP_OS_TIMER_PENDING(&net->pmtu_timer.timer)) {
3882 sctp_timer_start(SCTP_TIMER_TYPE_PATHMTURAISE, inp, stcb, net);
3883 }
3884 }
3885 }
3886 if (paddrp->spp_flags & SPP_HB_DISABLE) {
3887 int cnt_of_unconf = 0;
3888 struct sctp_nets *lnet;
3889
3890 stcb->asoc.hb_is_disabled = 1;
3891 TAILQ_FOREACH(lnet, &stcb->asoc.nets, sctp_next) {
3892 if (lnet->dest_state & SCTP_ADDR_UNCONFIRMED) {
3893 cnt_of_unconf++;
3894 }
3895 }
3896 /*
3897 * stop the timer ONLY if we
3898 * have no unconfirmed
3899 * addresses
3900 */
3901 if (cnt_of_unconf == 0) {
3902 TAILQ_FOREACH(net, &stcb->asoc.nets, sctp_next) {
3903 sctp_timer_stop(SCTP_TIMER_TYPE_HEARTBEAT, inp, stcb, net,
3904 SCTP_FROM_SCTP_USRREQ + SCTP_LOC_11);
3905 }
3906 }
3907 }
3908 if (paddrp->spp_flags & SPP_HB_ENABLE) {
3909 /* start up the timer. */
3910 TAILQ_FOREACH(net, &stcb->asoc.nets, sctp_next) {
3911 sctp_timer_start(SCTP_TIMER_TYPE_HEARTBEAT, inp, stcb, net);
3912 }
3913 }
3914#ifdef INET
3915 if (paddrp->spp_flags & SPP_IPV4_TOS)
3916 stcb->asoc.default_tos = paddrp->spp_ipv4_tos & 0x000000fc;
3917#endif
3918#ifdef INET6
3919 if (paddrp->spp_flags & SPP_IPV6_FLOWLABEL)
3920 stcb->asoc.default_flowlabel = paddrp->spp_ipv6_flowlabel;
3921#endif
3922
3923 }
3924 SCTP_TCB_UNLOCK(stcb);
3925 } else {
3926 /************************NO TCB, SET TO default stuff ******************/
3927 SCTP_INP_WLOCK(inp);
3928 /*
3929 * For the TOS/FLOWLABEL stuff you set it
3930 * with the options on the socket
3931 */
3932 if (paddrp->spp_pathmaxrxt) {
3933 inp->sctp_ep.def_net_failure = paddrp->spp_pathmaxrxt;
3934 }
3935 if (paddrp->spp_flags & SPP_HB_TIME_IS_ZERO)
3936 inp->sctp_ep.sctp_timeoutticks[SCTP_TIMER_HEARTBEAT] = 0;
3937 else if (paddrp->spp_hbinterval) {
3938 if (paddrp->spp_hbinterval > SCTP_MAX_HB_INTERVAL)
3939 paddrp->spp_hbinterval = SCTP_MAX_HB_INTERVAL;
3940 inp->sctp_ep.sctp_timeoutticks[SCTP_TIMER_HEARTBEAT] = MSEC_TO_TICKS(paddrp->spp_hbinterval);
3941 }
3942 if (paddrp->spp_flags & SPP_HB_ENABLE) {
3943 sctp_feature_off(inp, SCTP_PCB_FLAGS_DONOT_HEARTBEAT);
3944
3945 } else if (paddrp->spp_flags & SPP_HB_DISABLE) {
3946 sctp_feature_on(inp, SCTP_PCB_FLAGS_DONOT_HEARTBEAT);
3947 }
3948 SCTP_INP_WUNLOCK(inp);
3949 }
3950 }
3951 break;
3952 case SCTP_RTOINFO:
3953 {
3954 struct sctp_rtoinfo *srto;
3955 uint32_t new_init, new_min, new_max;
3956
3957 SCTP_CHECK_AND_CAST(srto, optval, struct sctp_rtoinfo, optsize);
3958 SCTP_FIND_STCB(inp, stcb, srto->srto_assoc_id);
3959
3960 if (stcb) {
3961 if (srto->srto_initial)
3962 new_init = srto->srto_initial;
3963 else
3964 new_init = stcb->asoc.initial_rto;
3965 if (srto->srto_max)
3966 new_max = srto->srto_max;
3967 else
3968 new_max = stcb->asoc.maxrto;
3969 if (srto->srto_min)
3970 new_min = srto->srto_min;
3971 else
3972 new_min = stcb->asoc.minrto;
3973 if ((new_min <= new_init) && (new_init <= new_max)) {
3974 stcb->asoc.initial_rto = new_init;
3975 stcb->asoc.maxrto = new_max;
3976 stcb->asoc.minrto = new_min;
3977 } else {
3978 SCTP_LTRACE_ERR_RET(inp, NULL, NULL, SCTP_FROM_SCTP_USRREQ, EINVAL);
3979 error = EINVAL;
3980 }
3981 SCTP_TCB_UNLOCK(stcb);
3982 } else {
3983 SCTP_INP_WLOCK(inp);
3984 if (srto->srto_initial)
3985 new_init = srto->srto_initial;
3986 else
3987 new_init = inp->sctp_ep.initial_rto;
3988 if (srto->srto_max)
3989 new_max = srto->srto_max;
3990 else
3991 new_max = inp->sctp_ep.sctp_maxrto;
3992 if (srto->srto_min)
3993 new_min = srto->srto_min;
3994 else
3995 new_min = inp->sctp_ep.sctp_minrto;
3996 if ((new_min <= new_init) && (new_init <= new_max)) {
3997 inp->sctp_ep.initial_rto = new_init;
3998 inp->sctp_ep.sctp_maxrto = new_max;
3999 inp->sctp_ep.sctp_minrto = new_min;
4000 } else {
4001 SCTP_LTRACE_ERR_RET(inp, NULL, NULL, SCTP_FROM_SCTP_USRREQ, EINVAL);
4002 error = EINVAL;
4003 }
4004 SCTP_INP_WUNLOCK(inp);
4005 }
4006 }
4007 break;
4008 case SCTP_ASSOCINFO:
4009 {
4010 struct sctp_assocparams *sasoc;
4011
4012 SCTP_CHECK_AND_CAST(sasoc, optval, struct sctp_assocparams, optsize);
4013 SCTP_FIND_STCB(inp, stcb, sasoc->sasoc_assoc_id);
4014 if (sasoc->sasoc_cookie_life) {
4015 /* boundary check the cookie life */
4016 if (sasoc->sasoc_cookie_life < 1000)
4017 sasoc->sasoc_cookie_life = 1000;
4018 if (sasoc->sasoc_cookie_life > SCTP_MAX_COOKIE_LIFE) {
4019 sasoc->sasoc_cookie_life = SCTP_MAX_COOKIE_LIFE;
4020 }
4021 }
4022 if (stcb) {
4023 if (sasoc->sasoc_asocmaxrxt)
4024 stcb->asoc.max_send_times = sasoc->sasoc_asocmaxrxt;
4025 sasoc->sasoc_number_peer_destinations = stcb->asoc.numnets;
4026 sasoc->sasoc_peer_rwnd = 0;
4027 sasoc->sasoc_local_rwnd = 0;
4028 if (sasoc->sasoc_cookie_life) {
4029 stcb->asoc.cookie_life = MSEC_TO_TICKS(sasoc->sasoc_cookie_life);
4030 }
4031 SCTP_TCB_UNLOCK(stcb);
4032 } else {
4033 SCTP_INP_WLOCK(inp);
4034 if (sasoc->sasoc_asocmaxrxt)
4035 inp->sctp_ep.max_send_times = sasoc->sasoc_asocmaxrxt;
4036 sasoc->sasoc_number_peer_destinations = 0;
4037 sasoc->sasoc_peer_rwnd = 0;
4038 sasoc->sasoc_local_rwnd = 0;
4039 if (sasoc->sasoc_cookie_life) {
4040 inp->sctp_ep.def_cookie_life = MSEC_TO_TICKS(sasoc->sasoc_cookie_life);
4041 }
4042 SCTP_INP_WUNLOCK(inp);
4043 }
4044 }
4045 break;
4046 case SCTP_INITMSG:
4047 {
4048 struct sctp_initmsg *sinit;
4049
4050 SCTP_CHECK_AND_CAST(sinit, optval, struct sctp_initmsg, optsize);
4051 SCTP_INP_WLOCK(inp);
4052 if (sinit->sinit_num_ostreams)
4053 inp->sctp_ep.pre_open_stream_count = sinit->sinit_num_ostreams;
4054
4055 if (sinit->sinit_max_instreams)
4056 inp->sctp_ep.max_open_streams_intome = sinit->sinit_max_instreams;
4057
4058 if (sinit->sinit_max_attempts)
4059 inp->sctp_ep.max_init_times = sinit->sinit_max_attempts;
4060
4061 if (sinit->sinit_max_init_timeo)
4062 inp->sctp_ep.initial_init_rto_max = sinit->sinit_max_init_timeo;
4063 SCTP_INP_WUNLOCK(inp);
4064 }
4065 break;
4066 case SCTP_PRIMARY_ADDR:
4067 {
4068 struct sctp_setprim *spa;
4069 struct sctp_nets *net, *lnet;
4070
4071 SCTP_CHECK_AND_CAST(spa, optval, struct sctp_setprim, optsize);
4072 SCTP_FIND_STCB(inp, stcb, spa->ssp_assoc_id);
4073
4074 net = NULL;
4075 if (stcb) {
4076 net = sctp_findnet(stcb, (struct sockaddr *)&spa->ssp_addr);
4077 } else {
4078 /*
4079 * We increment here since
4080 * sctp_findassociation_ep_addr() wil do a
4081 * decrement if it finds the stcb as long as
4082 * the locked tcb (last argument) is NOT a
4083 * TCB.. aka NULL.
4084 */
4085 SCTP_INP_INCR_REF(inp);
4086 stcb = sctp_findassociation_ep_addr(&inp,
4087 (struct sockaddr *)&spa->ssp_addr,
4088 &net, NULL, NULL);
4089 if (stcb == NULL) {
4090 SCTP_INP_DECR_REF(inp);
4091 }
4092 }
4093
4094 if ((stcb) && (net)) {
4095 if ((net != stcb->asoc.primary_destination) &&
4096 (!(net->dest_state & SCTP_ADDR_UNCONFIRMED))) {
4097 /* Ok we need to set it */
4098 lnet = stcb->asoc.primary_destination;
4099 if (sctp_set_primary_addr(stcb, (struct sockaddr *)NULL, net) == 0) {
4100 if (net->dest_state & SCTP_ADDR_SWITCH_PRIMARY) {
4101 net->dest_state |= SCTP_ADDR_DOUBLE_SWITCH;
4102 }
4103 net->dest_state |= SCTP_ADDR_SWITCH_PRIMARY;
4104 }
4105 }
4106 } else {
4107 SCTP_LTRACE_ERR_RET(inp, NULL, NULL, SCTP_FROM_SCTP_USRREQ, EINVAL);
4108 error = EINVAL;
4109 }
4110 if (stcb) {
4111 SCTP_TCB_UNLOCK(stcb);
4112 }
4113 }
4114 break;
4115 case SCTP_SET_DYNAMIC_PRIMARY:
4116 {
4117 union sctp_sockstore *ss;
4118
4119 error = priv_check(curthread,
4120 PRIV_NETINET_RESERVEDPORT);
4121 if (error)
4122 break;
4123
4124 SCTP_CHECK_AND_CAST(ss, optval, union sctp_sockstore, optsize);
4125 /* SUPER USER CHECK? */
4126 error = sctp_dynamic_set_primary(&ss->sa, vrf_id);
4127 }
4128 break;
4129 case SCTP_SET_PEER_PRIMARY_ADDR:
4130 {
4131 struct sctp_setpeerprim *sspp;
4132
4133 SCTP_CHECK_AND_CAST(sspp, optval, struct sctp_setpeerprim, optsize);
4134 SCTP_FIND_STCB(inp, stcb, sspp->sspp_assoc_id);
4135 if (stcb != NULL) {
4136 struct sctp_ifa *ifa;
4137
4138 ifa = sctp_find_ifa_by_addr((struct sockaddr *)&sspp->sspp_addr,
4139 stcb->asoc.vrf_id, SCTP_ADDR_NOT_LOCKED);
4140 if (ifa == NULL) {
4141 SCTP_LTRACE_ERR_RET(inp, NULL, NULL, SCTP_FROM_SCTP_USRREQ, EINVAL);
4142 error = EINVAL;
4143 goto out_of_it;
4144 }
4145 if ((inp->sctp_flags & SCTP_PCB_FLAGS_BOUNDALL) == 0) {
4146 /*
4147 * Must validate the ifa found is in
4148 * our ep
4149 */
4150 struct sctp_laddr *laddr;
4151 int found = 0;
4152
4153 LIST_FOREACH(laddr, &inp->sctp_addr_list, sctp_nxt_addr) {
4154 if (laddr->ifa == NULL) {
4155 SCTPDBG(SCTP_DEBUG_OUTPUT1, "%s: NULL ifa\n",
4156 __FUNCTION__);
4157 continue;
4158 }
4159 if (laddr->ifa == ifa) {
4160 found = 1;
4161 break;
4162 }
4163 }
4164 if (!found) {
4165 SCTP_LTRACE_ERR_RET(inp, NULL, NULL, SCTP_FROM_SCTP_USRREQ, EINVAL);
4166 error = EINVAL;
4167 goto out_of_it;
4168 }
4169 }
4170 if (sctp_set_primary_ip_address_sa(stcb,
4171 (struct sockaddr *)&sspp->sspp_addr) != 0) {
4172 SCTP_LTRACE_ERR_RET(inp, NULL, NULL, SCTP_FROM_SCTP_USRREQ, EINVAL);
4173 error = EINVAL;
4174 }
4175 out_of_it:
4176 SCTP_TCB_UNLOCK(stcb);
4177 } else {
4178 SCTP_LTRACE_ERR_RET(inp, NULL, NULL, SCTP_FROM_SCTP_USRREQ, EINVAL);
4179 error = EINVAL;
4180 }
4181
4182 }
4183 break;
4184 case SCTP_BINDX_ADD_ADDR:
4185 {
4186 struct sctp_getaddresses *addrs;
4187 size_t sz;
4188 struct thread *td;
4189
4190 td = (struct thread *)p;
4191 SCTP_CHECK_AND_CAST(addrs, optval, struct sctp_getaddresses,
4192 optsize);
4193 if (addrs->addr->sa_family == AF_INET) {
4194 sz = sizeof(struct sctp_getaddresses) - sizeof(struct sockaddr) + sizeof(struct sockaddr_in);
4195 if (optsize < sz) {
4196 SCTP_LTRACE_ERR_RET(inp, NULL, NULL, SCTP_FROM_SCTP_USRREQ, EINVAL);
4197 error = EINVAL;
4198 break;
4199 }
4200 if (td != NULL && (error = prison_local_ip4(td->td_ucred, &(((struct sockaddr_in *)(addrs->addr))->sin_addr)))) {
4201 SCTP_LTRACE_ERR_RET(inp, stcb, NULL, SCTP_FROM_SCTP_USRREQ, error);
4202 break;
4203 }
4204#ifdef INET6
4205 } else if (addrs->addr->sa_family == AF_INET6) {
4206 sz = sizeof(struct sctp_getaddresses) - sizeof(struct sockaddr) + sizeof(struct sockaddr_in6);
4207 if (optsize < sz) {
4208 SCTP_LTRACE_ERR_RET(inp, NULL, NULL, SCTP_FROM_SCTP_USRREQ, EINVAL);
4209 error = EINVAL;
4210 break;
4211 }
4212 if (td != NULL && (error = prison_local_ip6(td->td_ucred, &(((struct sockaddr_in6 *)(addrs->addr))->sin6_addr),
4213 (SCTP_IPV6_V6ONLY(inp) != 0))) != 0) {
4214 SCTP_LTRACE_ERR_RET(inp, stcb, NULL, SCTP_FROM_SCTP_USRREQ, error);
4215 break;
4216 }
4217#endif
4218 } else {
4219 error = EAFNOSUPPORT;
4220 break;
4221 }
4222 sctp_bindx_add_address(so, inp, addrs->addr,
4223 addrs->sget_assoc_id, vrf_id,
4224 &error, p);
4225 }
4226 break;
4227 case SCTP_BINDX_REM_ADDR:
4228 {
4229 struct sctp_getaddresses *addrs;
4230 size_t sz;
4231 struct thread *td;
4232
4233 td = (struct thread *)p;
4234
4235 SCTP_CHECK_AND_CAST(addrs, optval, struct sctp_getaddresses, optsize);
4236 if (addrs->addr->sa_family == AF_INET) {
4237 sz = sizeof(struct sctp_getaddresses) - sizeof(struct sockaddr) + sizeof(struct sockaddr_in);
4238 if (optsize < sz) {
4239 SCTP_LTRACE_ERR_RET(inp, NULL, NULL, SCTP_FROM_SCTP_USRREQ, EINVAL);
4240 error = EINVAL;
4241 break;
4242 }
4243 if (td != NULL && (error = prison_local_ip4(td->td_ucred, &(((struct sockaddr_in *)(addrs->addr))->sin_addr)))) {
4244 SCTP_LTRACE_ERR_RET(inp, stcb, NULL, SCTP_FROM_SCTP_USRREQ, error);
4245 break;
4246 }
4247#ifdef INET6
4248 } else if (addrs->addr->sa_family == AF_INET6) {
4249 sz = sizeof(struct sctp_getaddresses) - sizeof(struct sockaddr) + sizeof(struct sockaddr_in6);
4250 if (optsize < sz) {
4251 SCTP_LTRACE_ERR_RET(inp, NULL, NULL, SCTP_FROM_SCTP_USRREQ, EINVAL);
4252 error = EINVAL;
4253 break;
4254 }
4255 if (td != NULL && (error = prison_local_ip6(td->td_ucred, &(((struct sockaddr_in6 *)(addrs->addr))->sin6_addr),
4256 (SCTP_IPV6_V6ONLY(inp) != 0))) != 0) {
4257 SCTP_LTRACE_ERR_RET(inp, stcb, NULL, SCTP_FROM_SCTP_USRREQ, error);
4258 break;
4259 }
4260#endif
4261 } else {
4262 error = EAFNOSUPPORT;
4263 break;
4264 }
4265 sctp_bindx_delete_address(so, inp, addrs->addr,
4266 addrs->sget_assoc_id, vrf_id,
4267 &error);
4268 }
4269 break;
4270 default:
4271 SCTP_LTRACE_ERR_RET(inp, NULL, NULL, SCTP_FROM_SCTP_USRREQ, ENOPROTOOPT);
4272 error = ENOPROTOOPT;
4273 break;
4274 } /* end switch (opt) */
4275 return (error);
4276}
4277
4278int
4279sctp_ctloutput(struct socket *so, struct sockopt *sopt)
4280{
4281 void *optval = NULL;
4282 size_t optsize = 0;
4283 struct sctp_inpcb *inp;
4284 void *p;
4285 int error = 0;
4286
4287 inp = (struct sctp_inpcb *)so->so_pcb;
4288 if (inp == 0) {
4289 /* I made the same as TCP since we are not setup? */
4290 SCTP_LTRACE_ERR_RET(inp, NULL, NULL, SCTP_FROM_SCTP_USRREQ, EINVAL);
4291 return (ECONNRESET);
4292 }
4293 if (sopt->sopt_level != IPPROTO_SCTP) {
4294 /* wrong proto level... send back up to IP */
4295#ifdef INET6
4296 if (INP_CHECK_SOCKAF(so, AF_INET6))
4297 error = ip6_ctloutput(so, sopt);
4298 else
4299#endif /* INET6 */
4300 error = ip_ctloutput(so, sopt);
4301 return (error);
4302 }
4303 optsize = sopt->sopt_valsize;
4304 if (optsize) {
4305 SCTP_MALLOC(optval, void *, optsize, SCTP_M_SOCKOPT);
4306 if (optval == NULL) {
4307 SCTP_LTRACE_ERR_RET(inp, NULL, NULL, SCTP_FROM_SCTP_USRREQ, ENOBUFS);
4308 return (ENOBUFS);
4309 }
4310 error = sooptcopyin(sopt, optval, optsize, optsize);
4311 if (error) {
4312 SCTP_FREE(optval, SCTP_M_SOCKOPT);
4313 goto out;
4314 }
4315 }
4316 p = (void *)sopt->sopt_td;
4317 if (sopt->sopt_dir == SOPT_SET) {
4318 error = sctp_setopt(so, sopt->sopt_name, optval, optsize, p);
4319 } else if (sopt->sopt_dir == SOPT_GET) {
4320 error = sctp_getopt(so, sopt->sopt_name, optval, &optsize, p);
4321 } else {
4322 SCTP_LTRACE_ERR_RET(inp, NULL, NULL, SCTP_FROM_SCTP_USRREQ, EINVAL);
4323 error = EINVAL;
4324 }
4325 if ((error == 0) && (optval != NULL)) {
4326 error = sooptcopyout(sopt, optval, optsize);
4327 SCTP_FREE(optval, SCTP_M_SOCKOPT);
4328 } else if (optval != NULL) {
4329 SCTP_FREE(optval, SCTP_M_SOCKOPT);
4330 }
4331out:
4332 return (error);
4333}
4334
4335
4336static int
4337sctp_connect(struct socket *so, struct sockaddr *addr, struct thread *p)
4338{
4339 int error = 0;
4340 int create_lock_on = 0;
4341 uint32_t vrf_id;
4342 struct sctp_inpcb *inp;
4343 struct sctp_tcb *stcb = NULL;
4344
4345 inp = (struct sctp_inpcb *)so->so_pcb;
4346 if (inp == 0) {
4347 /* I made the same as TCP since we are not setup? */
4348 SCTP_LTRACE_ERR_RET(inp, NULL, NULL, SCTP_FROM_SCTP_USRREQ, EINVAL);
4349 return (ECONNRESET);
4350 }
4351 if (addr == NULL) {
4352 SCTP_LTRACE_ERR_RET(inp, NULL, NULL, SCTP_FROM_SCTP_USRREQ, EINVAL);
4353 return EINVAL;
4354 }
4355#ifdef INET6
4356 if (addr->sa_family == AF_INET6) {
4357 struct sockaddr_in6 *sin6p;
4358
4359 if (addr->sa_len != sizeof(struct sockaddr_in6)) {
4360 SCTP_LTRACE_ERR_RET(inp, NULL, NULL, SCTP_FROM_SCTP_USRREQ, EINVAL);
4361 return (EINVAL);
4362 }
4363 sin6p = (struct sockaddr_in6 *)addr;
4364 if (p != NULL && (error = prison_remote_ip6(p->td_ucred, &sin6p->sin6_addr)) != 0) {
4365 SCTP_LTRACE_ERR_RET(inp, NULL, NULL, SCTP_FROM_SCTP_USRREQ, error);
4366 return (error);
4367 }
4368 } else
4369#endif
4370 if (addr->sa_family == AF_INET) {
4371 struct sockaddr_in *sinp;
4372
4373 if (addr->sa_len != sizeof(struct sockaddr_in)) {
4374 SCTP_LTRACE_ERR_RET(inp, NULL, NULL, SCTP_FROM_SCTP_USRREQ, EINVAL);
4375 return (EINVAL);
4376 }
4377 sinp = (struct sockaddr_in *)addr;
4378 if (p != NULL && (error = prison_remote_ip4(p->td_ucred, &sinp->sin_addr)) != 0) {
4379 SCTP_LTRACE_ERR_RET(inp, NULL, NULL, SCTP_FROM_SCTP_USRREQ, error);
4380 return (error);
4381 }
4382 } else {
4383 SCTP_LTRACE_ERR_RET(inp, NULL, NULL, SCTP_FROM_SCTP_USRREQ, EAFNOSUPPORT);
4384 return (EAFNOSUPPORT);
4385 }
4386 SCTP_INP_INCR_REF(inp);
4387 SCTP_ASOC_CREATE_LOCK(inp);
4388 create_lock_on = 1;
4389
4390
4391 if ((inp->sctp_flags & SCTP_PCB_FLAGS_SOCKET_ALLGONE) ||
4392 (inp->sctp_flags & SCTP_PCB_FLAGS_SOCKET_GONE)) {
4393 /* Should I really unlock ? */
4394 SCTP_LTRACE_ERR_RET(inp, NULL, NULL, SCTP_FROM_SCTP_USRREQ, EFAULT);
4395 error = EFAULT;
4396 goto out_now;
4397 }
4398#ifdef INET6
4399 if (((inp->sctp_flags & SCTP_PCB_FLAGS_BOUND_V6) == 0) &&
4400 (addr->sa_family == AF_INET6)) {
4401 SCTP_LTRACE_ERR_RET(inp, NULL, NULL, SCTP_FROM_SCTP_USRREQ, EINVAL);
4402 error = EINVAL;
4403 goto out_now;
4404 }
4405#endif /* INET6 */
4406 if ((inp->sctp_flags & SCTP_PCB_FLAGS_UNBOUND) ==
4407 SCTP_PCB_FLAGS_UNBOUND) {
4408 /* Bind a ephemeral port */
4409 error = sctp_inpcb_bind(so, NULL, NULL, p);
4410 if (error) {
4411 goto out_now;
4412 }
4413 }
4414 /* Now do we connect? */
4415 if ((inp->sctp_flags & SCTP_PCB_FLAGS_IN_TCPPOOL) &&
4416 (sctp_is_feature_off(inp, SCTP_PCB_FLAGS_PORTREUSE))) {
4417 SCTP_LTRACE_ERR_RET(inp, NULL, NULL, SCTP_FROM_SCTP_USRREQ, EINVAL);
4418 error = EINVAL;
4419 goto out_now;
4420 }
4421 if ((inp->sctp_flags & SCTP_PCB_FLAGS_TCPTYPE) &&
4422 (inp->sctp_flags & SCTP_PCB_FLAGS_CONNECTED)) {
4423 /* We are already connected AND the TCP model */
4424 SCTP_LTRACE_ERR_RET(inp, stcb, NULL, SCTP_FROM_SCTP_USRREQ, EADDRINUSE);
4425 error = EADDRINUSE;
4426 goto out_now;
4427 }
4428 if (inp->sctp_flags & SCTP_PCB_FLAGS_CONNECTED) {
4429 SCTP_INP_RLOCK(inp);
4430 stcb = LIST_FIRST(&inp->sctp_asoc_list);
4431 SCTP_INP_RUNLOCK(inp);
4432 } else {
4433 /*
4434 * We increment here since sctp_findassociation_ep_addr()
4435 * will do a decrement if it finds the stcb as long as the
4436 * locked tcb (last argument) is NOT a TCB.. aka NULL.
4437 */
4438 SCTP_INP_INCR_REF(inp);
4439 stcb = sctp_findassociation_ep_addr(&inp, addr, NULL, NULL, NULL);
4440 if (stcb == NULL) {
4441 SCTP_INP_DECR_REF(inp);
4442 } else {
4443 SCTP_TCB_UNLOCK(stcb);
4444 }
4445 }
4446 if (stcb != NULL) {
4447 /* Already have or am bring up an association */
4448 SCTP_LTRACE_ERR_RET(inp, NULL, NULL, SCTP_FROM_SCTP_USRREQ, EALREADY);
4449 error = EALREADY;
4450 goto out_now;
4451 }
4452 vrf_id = inp->def_vrf_id;
4453 /* We are GOOD to go */
4454 stcb = sctp_aloc_assoc(inp, addr, 1, &error, 0, vrf_id, p);
4455 if (stcb == NULL) {
4456 /* Gak! no memory */
4457 goto out_now;
4458 }
4459 if (stcb->sctp_ep->sctp_flags & SCTP_PCB_FLAGS_TCPTYPE) {
4460 stcb->sctp_ep->sctp_flags |= SCTP_PCB_FLAGS_CONNECTED;
4461 /* Set the connected flag so we can queue data */
4462 soisconnecting(so);
4463 }
4464 SCTP_SET_STATE(&stcb->asoc, SCTP_STATE_COOKIE_WAIT);
4465 (void)SCTP_GETTIME_TIMEVAL(&stcb->asoc.time_entered);
4466
4467 /* initialize authentication parameters for the assoc */
4468 sctp_initialize_auth_params(inp, stcb);
4469
4470 sctp_send_initiate(inp, stcb, SCTP_SO_LOCKED);
4471 SCTP_TCB_UNLOCK(stcb);
4472out_now:
4473 if (create_lock_on) {
4474 SCTP_ASOC_CREATE_UNLOCK(inp);
4475 }
4476 SCTP_INP_DECR_REF(inp);
4477 return error;
4478}
4479
4480int
4481sctp_listen(struct socket *so, int backlog, struct thread *p)
4482{
4483 /*
4484 * Note this module depends on the protocol processing being called
4485 * AFTER any socket level flags and backlog are applied to the
4486 * socket. The traditional way that the socket flags are applied is
4487 * AFTER protocol processing. We have made a change to the
4488 * sys/kern/uipc_socket.c module to reverse this but this MUST be in
4489 * place if the socket API for SCTP is to work properly.
4490 */
4491
4492 int error = 0;
4493 struct sctp_inpcb *inp;
4494
4495 inp = (struct sctp_inpcb *)so->so_pcb;
4496 if (inp == 0) {
4497 /* I made the same as TCP since we are not setup? */
4498 SCTP_LTRACE_ERR_RET(inp, NULL, NULL, SCTP_FROM_SCTP_USRREQ, EINVAL);
4499 return (ECONNRESET);
4500 }
4501 if (sctp_is_feature_on(inp, SCTP_PCB_FLAGS_PORTREUSE)) {
4502 /* See if we have a listener */
4503 struct sctp_inpcb *tinp;
4504 union sctp_sockstore store, *sp;
4505
4506 sp = &store;
4507 if ((inp->sctp_flags & SCTP_PCB_FLAGS_BOUNDALL) == 0) {
4508 /* not bound all */
4509 struct sctp_laddr *laddr;
4510
4511 LIST_FOREACH(laddr, &inp->sctp_addr_list, sctp_nxt_addr) {
4512 memcpy(&store, &laddr->ifa->address, sizeof(store));
4513 sp->sin.sin_port = inp->sctp_lport;
4514 tinp = sctp_pcb_findep(&sp->sa, 0, 0, inp->def_vrf_id);
4515 if (tinp && (tinp != inp) &&
4516 ((tinp->sctp_flags & SCTP_PCB_FLAGS_SOCKET_ALLGONE) == 0) &&
4517 ((tinp->sctp_flags & SCTP_PCB_FLAGS_SOCKET_GONE) == 0) &&
4518 (tinp->sctp_socket->so_qlimit)) {
4519 /*
4520 * we have a listener already and
4521 * its not this inp.
4522 */
4523 SCTP_INP_DECR_REF(tinp);
4524 return (EADDRINUSE);
4525 } else if (tinp) {
4526 SCTP_INP_DECR_REF(tinp);
4527 }
4528 }
4529 } else {
4530 /* Setup a local addr bound all */
4531 memset(&store, 0, sizeof(store));
4532 store.sin.sin_port = inp->sctp_lport;
4533#ifdef INET6
4534 if (inp->sctp_flags & SCTP_PCB_FLAGS_BOUND_V6) {
4535 store.sa.sa_family = AF_INET6;
4536 store.sa.sa_len = sizeof(struct sockaddr_in6);
4537 }
4538#endif
4539 if ((inp->sctp_flags & SCTP_PCB_FLAGS_BOUND_V6) == 0) {
4540 store.sa.sa_family = AF_INET;
4541 store.sa.sa_len = sizeof(struct sockaddr_in);
4542 }
4543 tinp = sctp_pcb_findep(&sp->sa, 0, 0, inp->def_vrf_id);
4544 if (tinp && (tinp != inp) &&
4545 ((tinp->sctp_flags & SCTP_PCB_FLAGS_SOCKET_ALLGONE) == 0) &&
4546 ((tinp->sctp_flags & SCTP_PCB_FLAGS_SOCKET_GONE) == 0) &&
4547 (tinp->sctp_socket->so_qlimit)) {
4548 /*
4549 * we have a listener already and its not
4550 * this inp.
4551 */
4552 SCTP_INP_DECR_REF(tinp);
4553 return (EADDRINUSE);
4554 } else if (tinp) {
4555 SCTP_INP_DECR_REF(inp);
4556 }
4557 }
4558 }
4559 SCTP_INP_RLOCK(inp);
4560#ifdef SCTP_LOCK_LOGGING
4561 if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_LOCK_LOGGING_ENABLE) {
4562 sctp_log_lock(inp, (struct sctp_tcb *)NULL, SCTP_LOG_LOCK_SOCK);
4563 }
4564#endif
4565 SOCK_LOCK(so);
4566 error = solisten_proto_check(so);
4567 if (error) {
4568 SOCK_UNLOCK(so);
4569 SCTP_INP_RUNLOCK(inp);
4570 return (error);
4571 }
4572 if ((sctp_is_feature_on(inp, SCTP_PCB_FLAGS_PORTREUSE)) &&
4573 (inp->sctp_flags & SCTP_PCB_FLAGS_IN_TCPPOOL)) {
4574 /*
4575 * The unlucky case - We are in the tcp pool with this guy.
4576 * - Someone else is in the main inp slot. - We must move
4577 * this guy (the listener) to the main slot - We must then
4578 * move the guy that was listener to the TCP Pool.
4579 */
4580 if (sctp_swap_inpcb_for_listen(inp)) {
4581 goto in_use;
4582 }
4583 }
4584 if ((inp->sctp_flags & SCTP_PCB_FLAGS_TCPTYPE) &&
4585 (inp->sctp_flags & SCTP_PCB_FLAGS_CONNECTED)) {
4586 /* We are already connected AND the TCP model */
4587in_use:
4588 SCTP_INP_RUNLOCK(inp);
4589 SOCK_UNLOCK(so);
4590 SCTP_LTRACE_ERR_RET(inp, NULL, NULL, SCTP_FROM_SCTP_USRREQ, EADDRINUSE);
4591 return (EADDRINUSE);
4592 }
4593 SCTP_INP_RUNLOCK(inp);
4594 if (inp->sctp_flags & SCTP_PCB_FLAGS_UNBOUND) {
4595 /* We must do a bind. */
4596 SOCK_UNLOCK(so);
4597 if ((error = sctp_inpcb_bind(so, NULL, NULL, p))) {
4598 /* bind error, probably perm */
4599 return (error);
4600 }
4601 SOCK_LOCK(so);
4602 }
4603 /* It appears for 7.0 and on, we must always call this. */
4604 solisten_proto(so, backlog);
4605 if (inp->sctp_flags & SCTP_PCB_FLAGS_UDPTYPE) {
4606 /* remove the ACCEPTCONN flag for one-to-many sockets */
4607 so->so_options &= ~SO_ACCEPTCONN;
4608 }
4609 if (backlog == 0) {
4610 /* turning off listen */
4611 so->so_options &= ~SO_ACCEPTCONN;
4612 }
4613 SOCK_UNLOCK(so);
4614 return (error);
4615}
4616
4617static int sctp_defered_wakeup_cnt = 0;
4618
4619int
4620sctp_accept(struct socket *so, struct sockaddr **addr)
4621{
4622 struct sctp_tcb *stcb;
4623 struct sctp_inpcb *inp;
4624 union sctp_sockstore store;
4625
4626#ifdef INET6
4627 int error;
4628
4629#endif
4630 inp = (struct sctp_inpcb *)so->so_pcb;
4631
4632 if (inp == 0) {
4633 SCTP_LTRACE_ERR_RET(inp, NULL, NULL, SCTP_FROM_SCTP_USRREQ, EINVAL);
4634 return (ECONNRESET);
4635 }
4636 SCTP_INP_RLOCK(inp);
4637 if (inp->sctp_flags & SCTP_PCB_FLAGS_UDPTYPE) {
4638 SCTP_INP_RUNLOCK(inp);
4639 SCTP_LTRACE_ERR_RET(inp, NULL, NULL, SCTP_FROM_SCTP_USRREQ, EOPNOTSUPP);
4640 return (EOPNOTSUPP);
4641 }
4642 if (so->so_state & SS_ISDISCONNECTED) {
4643 SCTP_INP_RUNLOCK(inp);
4644 SCTP_LTRACE_ERR_RET(inp, NULL, NULL, SCTP_FROM_SCTP_USRREQ, ECONNABORTED);
4645 return (ECONNABORTED);
4646 }
4647 stcb = LIST_FIRST(&inp->sctp_asoc_list);
4648 if (stcb == NULL) {
4649 SCTP_INP_RUNLOCK(inp);
4650 SCTP_LTRACE_ERR_RET(inp, NULL, NULL, SCTP_FROM_SCTP_USRREQ, EINVAL);
4651 return (ECONNRESET);
4652 }
4653 SCTP_TCB_LOCK(stcb);
4654 SCTP_INP_RUNLOCK(inp);
4655 store = stcb->asoc.primary_destination->ro._l_addr;
4656 SCTP_TCB_UNLOCK(stcb);
4657 switch (store.sa.sa_family) {
4658 case AF_INET:
4659 {
4660 struct sockaddr_in *sin;
4661
4662 SCTP_MALLOC_SONAME(sin, struct sockaddr_in *, sizeof *sin);
4663 sin->sin_family = AF_INET;
4664 sin->sin_len = sizeof(*sin);
4665 sin->sin_port = ((struct sockaddr_in *)&store)->sin_port;
4666 sin->sin_addr = ((struct sockaddr_in *)&store)->sin_addr;
4667 *addr = (struct sockaddr *)sin;
4668 break;
4669 }
4670#ifdef INET6
4671 case AF_INET6:
4672 {
4673 struct sockaddr_in6 *sin6;
4674
4675 SCTP_MALLOC_SONAME(sin6, struct sockaddr_in6 *, sizeof *sin6);
4676 sin6->sin6_family = AF_INET6;
4677 sin6->sin6_len = sizeof(*sin6);
4678 sin6->sin6_port = ((struct sockaddr_in6 *)&store)->sin6_port;
4679
4680 sin6->sin6_addr = ((struct sockaddr_in6 *)&store)->sin6_addr;
4681 if ((error = sa6_recoverscope(sin6)) != 0) {
4682 SCTP_FREE_SONAME(sin6);
4683 return (error);
4684 }
4685 *addr = (struct sockaddr *)sin6;
4686 break;
4687 }
4688#endif
4689 default:
4690 /* TSNH */
4691 break;
4692 }
4693 /* Wake any delayed sleep action */
4694 if (inp->sctp_flags & SCTP_PCB_FLAGS_DONT_WAKE) {
4695 SCTP_INP_WLOCK(inp);
4696 inp->sctp_flags &= ~SCTP_PCB_FLAGS_DONT_WAKE;
4697 if (inp->sctp_flags & SCTP_PCB_FLAGS_WAKEOUTPUT) {
4698 inp->sctp_flags &= ~SCTP_PCB_FLAGS_WAKEOUTPUT;
4699 SCTP_INP_WUNLOCK(inp);
4700 SOCKBUF_LOCK(&inp->sctp_socket->so_snd);
4701 if (sowriteable(inp->sctp_socket)) {
4702 sowwakeup_locked(inp->sctp_socket);
4703 } else {
4704 SOCKBUF_UNLOCK(&inp->sctp_socket->so_snd);
4705 }
4706 SCTP_INP_WLOCK(inp);
4707 }
4708 if (inp->sctp_flags & SCTP_PCB_FLAGS_WAKEINPUT) {
4709 inp->sctp_flags &= ~SCTP_PCB_FLAGS_WAKEINPUT;
4710 SCTP_INP_WUNLOCK(inp);
4711 SOCKBUF_LOCK(&inp->sctp_socket->so_rcv);
4712 if (soreadable(inp->sctp_socket)) {
4713 sctp_defered_wakeup_cnt++;
4714 sorwakeup_locked(inp->sctp_socket);
4715 } else {
4716 SOCKBUF_UNLOCK(&inp->sctp_socket->so_rcv);
4717 }
4718 SCTP_INP_WLOCK(inp);
4719 }
4720 SCTP_INP_WUNLOCK(inp);
4721 }
4722 return (0);
4723}
4724
4725int
4726sctp_ingetaddr(struct socket *so, struct sockaddr **addr)
4727{
4728 struct sockaddr_in *sin;
4729 uint32_t vrf_id;
4730 struct sctp_inpcb *inp;
4731 struct sctp_ifa *sctp_ifa;
4732
4733 /*
4734 * Do the malloc first in case it blocks.
4735 */
4736 SCTP_MALLOC_SONAME(sin, struct sockaddr_in *, sizeof *sin);
4737 sin->sin_family = AF_INET;
4738 sin->sin_len = sizeof(*sin);
4739 inp = (struct sctp_inpcb *)so->so_pcb;
4740 if (!inp) {
4741 SCTP_FREE_SONAME(sin);
4742 SCTP_LTRACE_ERR_RET(inp, NULL, NULL, SCTP_FROM_SCTP_USRREQ, EINVAL);
4743 return ECONNRESET;
4744 }
4745 SCTP_INP_RLOCK(inp);
4746 sin->sin_port = inp->sctp_lport;
4747 if (inp->sctp_flags & SCTP_PCB_FLAGS_BOUNDALL) {
4748 if (inp->sctp_flags & SCTP_PCB_FLAGS_CONNECTED) {
4749 struct sctp_tcb *stcb;
4750 struct sockaddr_in *sin_a;
4751 struct sctp_nets *net;
4752 int fnd;
4753
4754 stcb = LIST_FIRST(&inp->sctp_asoc_list);
4755 if (stcb == NULL) {
4756 goto notConn;
4757 }
4758 fnd = 0;
4759 sin_a = NULL;
4760 SCTP_TCB_LOCK(stcb);
4761 TAILQ_FOREACH(net, &stcb->asoc.nets, sctp_next) {
4762 sin_a = (struct sockaddr_in *)&net->ro._l_addr;
4763 if (sin_a == NULL)
4764 /* this will make coverity happy */
4765 continue;
4766
4767 if (sin_a->sin_family == AF_INET) {
4768 fnd = 1;
4769 break;
4770 }
4771 }
4772 if ((!fnd) || (sin_a == NULL)) {
4773 /* punt */
4774 SCTP_TCB_UNLOCK(stcb);
4775 goto notConn;
4776 }
4777 vrf_id = inp->def_vrf_id;
4778 sctp_ifa = sctp_source_address_selection(inp,
4779 stcb,
4780 (sctp_route_t *) & net->ro,
4781 net, 0, vrf_id);
4782 if (sctp_ifa) {
4783 sin->sin_addr = sctp_ifa->address.sin.sin_addr;
4784 sctp_free_ifa(sctp_ifa);
4785 }
4786 SCTP_TCB_UNLOCK(stcb);
4787 } else {
4788 /* For the bound all case you get back 0 */
4789 notConn:
4790 sin->sin_addr.s_addr = 0;
4791 }
4792
4793 } else {
4794 /* Take the first IPv4 address in the list */
4795 struct sctp_laddr *laddr;
4796 int fnd = 0;
4797
4798 LIST_FOREACH(laddr, &inp->sctp_addr_list, sctp_nxt_addr) {
4799 if (laddr->ifa->address.sa.sa_family == AF_INET) {
4800 struct sockaddr_in *sin_a;
4801
4802 sin_a = (struct sockaddr_in *)&laddr->ifa->address.sa;
4803 sin->sin_addr = sin_a->sin_addr;
4804 fnd = 1;
4805 break;
4806 }
4807 }
4808 if (!fnd) {
4809 SCTP_FREE_SONAME(sin);
4810 SCTP_INP_RUNLOCK(inp);
4811 SCTP_LTRACE_ERR_RET(inp, NULL, NULL, SCTP_FROM_SCTP_USRREQ, ENOENT);
4812 return ENOENT;
4813 }
4814 }
4815 SCTP_INP_RUNLOCK(inp);
4816 (*addr) = (struct sockaddr *)sin;
4817 return (0);
4818}
4819
4820int
4821sctp_peeraddr(struct socket *so, struct sockaddr **addr)
4822{
4823 struct sockaddr_in *sin = (struct sockaddr_in *)*addr;
4824 int fnd;
4825 struct sockaddr_in *sin_a;
4826 struct sctp_inpcb *inp;
4827 struct sctp_tcb *stcb;
4828 struct sctp_nets *net;
4829
4830 /* Do the malloc first in case it blocks. */
4831 inp = (struct sctp_inpcb *)so->so_pcb;
4832 if ((inp == NULL) ||
4833 ((inp->sctp_flags & SCTP_PCB_FLAGS_CONNECTED) == 0)) {
4834 /* UDP type and listeners will drop out here */
4835 SCTP_LTRACE_ERR_RET(inp, NULL, NULL, SCTP_FROM_SCTP_USRREQ, ENOTCONN);
4836 return (ENOTCONN);
4837 }
4838 SCTP_MALLOC_SONAME(sin, struct sockaddr_in *, sizeof *sin);
4839 sin->sin_family = AF_INET;
4840 sin->sin_len = sizeof(*sin);
4841
4842 /* We must recapture incase we blocked */
4843 inp = (struct sctp_inpcb *)so->so_pcb;
4844 if (!inp) {
4845 SCTP_FREE_SONAME(sin);
4846 SCTP_LTRACE_ERR_RET(inp, NULL, NULL, SCTP_FROM_SCTP_USRREQ, EINVAL);
4847 return ECONNRESET;
4848 }
4849 SCTP_INP_RLOCK(inp);
4850 stcb = LIST_FIRST(&inp->sctp_asoc_list);
4851 if (stcb) {
4852 SCTP_TCB_LOCK(stcb);
4853 }
4854 SCTP_INP_RUNLOCK(inp);
4855 if (stcb == NULL) {
4856 SCTP_FREE_SONAME(sin);
4857 SCTP_LTRACE_ERR_RET(inp, NULL, NULL, SCTP_FROM_SCTP_USRREQ, EINVAL);
4858 return ECONNRESET;
4859 }
4860 fnd = 0;
4861 TAILQ_FOREACH(net, &stcb->asoc.nets, sctp_next) {
4862 sin_a = (struct sockaddr_in *)&net->ro._l_addr;
4863 if (sin_a->sin_family == AF_INET) {
4864 fnd = 1;
4865 sin->sin_port = stcb->rport;
4866 sin->sin_addr = sin_a->sin_addr;
4867 break;
4868 }
4869 }
4870 SCTP_TCB_UNLOCK(stcb);
4871 if (!fnd) {
4872 /* No IPv4 address */
4873 SCTP_FREE_SONAME(sin);
4874 SCTP_LTRACE_ERR_RET(inp, NULL, NULL, SCTP_FROM_SCTP_USRREQ, ENOENT);
4875 return ENOENT;
4876 }
4877 (*addr) = (struct sockaddr *)sin;
4878 return (0);
4879}
4880
4881struct pr_usrreqs sctp_usrreqs = {
4882 .pru_abort = sctp_abort,
4883 .pru_accept = sctp_accept,
4884 .pru_attach = sctp_attach,
4885 .pru_bind = sctp_bind,
4886 .pru_connect = sctp_connect,
4887 .pru_control = in_control,
4888 .pru_close = sctp_close,
4889 .pru_detach = sctp_close,
4890 .pru_sopoll = sopoll_generic,
4891 .pru_flush = sctp_flush,
4892 .pru_disconnect = sctp_disconnect,
4893 .pru_listen = sctp_listen,
4894 .pru_peeraddr = sctp_peeraddr,
4895 .pru_send = sctp_sendm,
4896 .pru_shutdown = sctp_shutdown,
4897 .pru_sockaddr = sctp_ingetaddr,
4898 .pru_sosend = sctp_sosend,
4899 .pru_soreceive = sctp_soreceive
4900};
3654 sctp_feature_on(inp, SCTP_PCB_FLAGS_STREAM_RESETEVNT);
3655 } else {
3656 sctp_feature_off(inp, SCTP_PCB_FLAGS_STREAM_RESETEVNT);
3657 }
3658 SCTP_INP_WUNLOCK(inp);
3659 }
3660 break;
3661
3662 case SCTP_ADAPTATION_LAYER:
3663 {
3664 struct sctp_setadaptation *adap_bits;
3665
3666 SCTP_CHECK_AND_CAST(adap_bits, optval, struct sctp_setadaptation, optsize);
3667 SCTP_INP_WLOCK(inp);
3668 inp->sctp_ep.adaptation_layer_indicator = adap_bits->ssb_adaptation_ind;
3669 SCTP_INP_WUNLOCK(inp);
3670 }
3671 break;
3672#ifdef SCTP_DEBUG
3673 case SCTP_SET_INITIAL_DBG_SEQ:
3674 {
3675 uint32_t *vvv;
3676
3677 SCTP_CHECK_AND_CAST(vvv, optval, uint32_t, optsize);
3678 SCTP_INP_WLOCK(inp);
3679 inp->sctp_ep.initial_sequence_debug = *vvv;
3680 SCTP_INP_WUNLOCK(inp);
3681 }
3682 break;
3683#endif
3684 case SCTP_DEFAULT_SEND_PARAM:
3685 {
3686 struct sctp_sndrcvinfo *s_info;
3687
3688 SCTP_CHECK_AND_CAST(s_info, optval, struct sctp_sndrcvinfo, optsize);
3689 SCTP_FIND_STCB(inp, stcb, s_info->sinfo_assoc_id);
3690
3691 if (stcb) {
3692 if (s_info->sinfo_stream <= stcb->asoc.streamoutcnt) {
3693 memcpy(&stcb->asoc.def_send, s_info, min(optsize, sizeof(stcb->asoc.def_send)));
3694 } else {
3695 SCTP_LTRACE_ERR_RET(inp, NULL, NULL, SCTP_FROM_SCTP_USRREQ, EINVAL);
3696 error = EINVAL;
3697 }
3698 SCTP_TCB_UNLOCK(stcb);
3699 } else {
3700 SCTP_INP_WLOCK(inp);
3701 memcpy(&inp->def_send, s_info, min(optsize, sizeof(inp->def_send)));
3702 SCTP_INP_WUNLOCK(inp);
3703 }
3704 }
3705 break;
3706 case SCTP_PEER_ADDR_PARAMS:
3707 /* Applys to the specific association */
3708 {
3709 struct sctp_paddrparams *paddrp;
3710 struct sctp_nets *net;
3711
3712 SCTP_CHECK_AND_CAST(paddrp, optval, struct sctp_paddrparams, optsize);
3713 SCTP_FIND_STCB(inp, stcb, paddrp->spp_assoc_id);
3714 net = NULL;
3715 if (stcb) {
3716 net = sctp_findnet(stcb, (struct sockaddr *)&paddrp->spp_address);
3717 } else {
3718 /*
3719 * We increment here since
3720 * sctp_findassociation_ep_addr() wil do a
3721 * decrement if it finds the stcb as long as
3722 * the locked tcb (last argument) is NOT a
3723 * TCB.. aka NULL.
3724 */
3725 SCTP_INP_INCR_REF(inp);
3726 stcb = sctp_findassociation_ep_addr(&inp,
3727 (struct sockaddr *)&paddrp->spp_address,
3728 &net, NULL, NULL);
3729 if (stcb == NULL) {
3730 SCTP_INP_DECR_REF(inp);
3731 }
3732 }
3733 if (stcb && (net == NULL)) {
3734 struct sockaddr *sa;
3735
3736 sa = (struct sockaddr *)&paddrp->spp_address;
3737 if (sa->sa_family == AF_INET) {
3738 struct sockaddr_in *sin;
3739
3740 sin = (struct sockaddr_in *)sa;
3741 if (sin->sin_addr.s_addr) {
3742 SCTP_LTRACE_ERR_RET(inp, NULL, NULL, SCTP_FROM_SCTP_USRREQ, EINVAL);
3743 SCTP_TCB_UNLOCK(stcb);
3744 error = EINVAL;
3745 break;
3746 }
3747 } else if (sa->sa_family == AF_INET6) {
3748 struct sockaddr_in6 *sin6;
3749
3750 sin6 = (struct sockaddr_in6 *)sa;
3751 if (!IN6_IS_ADDR_UNSPECIFIED(&sin6->sin6_addr)) {
3752 SCTP_LTRACE_ERR_RET(inp, NULL, NULL, SCTP_FROM_SCTP_USRREQ, EINVAL);
3753 SCTP_TCB_UNLOCK(stcb);
3754 error = EINVAL;
3755 break;
3756 }
3757 } else {
3758 error = EAFNOSUPPORT;
3759 SCTP_TCB_UNLOCK(stcb);
3760 SCTP_LTRACE_ERR_RET(inp, NULL, NULL, SCTP_FROM_SCTP_USRREQ, error);
3761 break;
3762 }
3763 }
3764 /* sanity checks */
3765 if ((paddrp->spp_flags & SPP_HB_ENABLE) && (paddrp->spp_flags & SPP_HB_DISABLE)) {
3766 if (stcb)
3767 SCTP_TCB_UNLOCK(stcb);
3768 SCTP_LTRACE_ERR_RET(inp, NULL, NULL, SCTP_FROM_SCTP_USRREQ, EINVAL);
3769 return (EINVAL);
3770 }
3771 if ((paddrp->spp_flags & SPP_PMTUD_ENABLE) && (paddrp->spp_flags & SPP_PMTUD_DISABLE)) {
3772 if (stcb)
3773 SCTP_TCB_UNLOCK(stcb);
3774 SCTP_LTRACE_ERR_RET(inp, NULL, NULL, SCTP_FROM_SCTP_USRREQ, EINVAL);
3775 return (EINVAL);
3776 }
3777 if (stcb) {
3778 /************************TCB SPECIFIC SET ******************/
3779 /*
3780 * do we change the timer for HB, we run
3781 * only one?
3782 */
3783 int ovh = 0;
3784
3785 if (stcb->sctp_ep->sctp_flags & SCTP_PCB_FLAGS_BOUND_V6) {
3786 ovh = SCTP_MED_OVERHEAD;
3787 } else {
3788 ovh = SCTP_MED_V4_OVERHEAD;
3789 }
3790
3791 if (paddrp->spp_hbinterval)
3792 stcb->asoc.heart_beat_delay = paddrp->spp_hbinterval;
3793 else if (paddrp->spp_flags & SPP_HB_TIME_IS_ZERO)
3794 stcb->asoc.heart_beat_delay = 0;
3795
3796 /* network sets ? */
3797 if (net) {
3798 /************************NET SPECIFIC SET ******************/
3799 if (paddrp->spp_flags & SPP_HB_DEMAND) {
3800 /* on demand HB */
3801 if (sctp_send_hb(stcb, 1, net) < 0) {
3802 /* asoc destroyed */
3803 SCTP_LTRACE_ERR_RET(inp, NULL, NULL, SCTP_FROM_SCTP_USRREQ, EINVAL);
3804 error = EINVAL;
3805 break;
3806 }
3807 }
3808 if (paddrp->spp_flags & SPP_HB_DISABLE) {
3809 net->dest_state |= SCTP_ADDR_NOHB;
3810 }
3811 if (paddrp->spp_flags & SPP_HB_ENABLE) {
3812 net->dest_state &= ~SCTP_ADDR_NOHB;
3813 }
3814 if ((paddrp->spp_flags & SPP_PMTUD_DISABLE) && (paddrp->spp_pathmtu >= SCTP_SMALLEST_PMTU)) {
3815 if (SCTP_OS_TIMER_PENDING(&net->pmtu_timer.timer)) {
3816 sctp_timer_stop(SCTP_TIMER_TYPE_PATHMTURAISE, inp, stcb, net,
3817 SCTP_FROM_SCTP_USRREQ + SCTP_LOC_10);
3818 }
3819 if (paddrp->spp_pathmtu > SCTP_DEFAULT_MINSEGMENT) {
3820 net->mtu = paddrp->spp_pathmtu + ovh;
3821 if (net->mtu < stcb->asoc.smallest_mtu) {
3822#ifdef SCTP_PRINT_FOR_B_AND_M
3823 SCTP_PRINTF("SCTP_PMTU_DISABLE calls sctp_pathmtu_adjustment:%d\n",
3824 net->mtu);
3825#endif
3826 sctp_pathmtu_adjustment(inp, stcb, net, net->mtu);
3827 }
3828 }
3829 }
3830 if (paddrp->spp_flags & SPP_PMTUD_ENABLE) {
3831 if (SCTP_OS_TIMER_PENDING(&net->pmtu_timer.timer)) {
3832 sctp_timer_start(SCTP_TIMER_TYPE_PATHMTURAISE, inp, stcb, net);
3833 }
3834 }
3835 if (paddrp->spp_pathmaxrxt)
3836 net->failure_threshold = paddrp->spp_pathmaxrxt;
3837#ifdef INET
3838 if (paddrp->spp_flags & SPP_IPV4_TOS) {
3839 if (net->ro._l_addr.sin.sin_family == AF_INET) {
3840 net->tos_flowlabel = paddrp->spp_ipv4_tos & 0x000000fc;
3841 }
3842 }
3843#endif
3844#ifdef INET6
3845 if (paddrp->spp_flags & SPP_IPV6_FLOWLABEL) {
3846 if (net->ro._l_addr.sin6.sin6_family == AF_INET6) {
3847 net->tos_flowlabel = paddrp->spp_ipv6_flowlabel;
3848 }
3849 }
3850#endif
3851 } else {
3852 /************************ASSOC ONLY -- NO NET SPECIFIC SET ******************/
3853 if (paddrp->spp_pathmaxrxt)
3854 stcb->asoc.def_net_failure = paddrp->spp_pathmaxrxt;
3855
3856 if (paddrp->spp_flags & SPP_HB_ENABLE) {
3857 /* Turn back on the timer */
3858 stcb->asoc.hb_is_disabled = 0;
3859 sctp_timer_start(SCTP_TIMER_TYPE_HEARTBEAT, inp, stcb, net);
3860 }
3861 if ((paddrp->spp_flags & SPP_PMTUD_DISABLE) && (paddrp->spp_pathmtu >= SCTP_SMALLEST_PMTU)) {
3862 TAILQ_FOREACH(net, &stcb->asoc.nets, sctp_next) {
3863 if (SCTP_OS_TIMER_PENDING(&net->pmtu_timer.timer)) {
3864 sctp_timer_stop(SCTP_TIMER_TYPE_PATHMTURAISE, inp, stcb, net,
3865 SCTP_FROM_SCTP_USRREQ + SCTP_LOC_10);
3866 }
3867 if (paddrp->spp_pathmtu > SCTP_DEFAULT_MINSEGMENT) {
3868 net->mtu = paddrp->spp_pathmtu + ovh;
3869 if (net->mtu < stcb->asoc.smallest_mtu) {
3870#ifdef SCTP_PRINT_FOR_B_AND_M
3871 SCTP_PRINTF("SCTP_PMTU_DISABLE calls sctp_pathmtu_adjustment:%d\n",
3872 net->mtu);
3873#endif
3874 sctp_pathmtu_adjustment(inp, stcb, net, net->mtu);
3875 }
3876 }
3877 }
3878 }
3879 if (paddrp->spp_flags & SPP_PMTUD_ENABLE) {
3880 TAILQ_FOREACH(net, &stcb->asoc.nets, sctp_next) {
3881 if (SCTP_OS_TIMER_PENDING(&net->pmtu_timer.timer)) {
3882 sctp_timer_start(SCTP_TIMER_TYPE_PATHMTURAISE, inp, stcb, net);
3883 }
3884 }
3885 }
3886 if (paddrp->spp_flags & SPP_HB_DISABLE) {
3887 int cnt_of_unconf = 0;
3888 struct sctp_nets *lnet;
3889
3890 stcb->asoc.hb_is_disabled = 1;
3891 TAILQ_FOREACH(lnet, &stcb->asoc.nets, sctp_next) {
3892 if (lnet->dest_state & SCTP_ADDR_UNCONFIRMED) {
3893 cnt_of_unconf++;
3894 }
3895 }
3896 /*
3897 * stop the timer ONLY if we
3898 * have no unconfirmed
3899 * addresses
3900 */
3901 if (cnt_of_unconf == 0) {
3902 TAILQ_FOREACH(net, &stcb->asoc.nets, sctp_next) {
3903 sctp_timer_stop(SCTP_TIMER_TYPE_HEARTBEAT, inp, stcb, net,
3904 SCTP_FROM_SCTP_USRREQ + SCTP_LOC_11);
3905 }
3906 }
3907 }
3908 if (paddrp->spp_flags & SPP_HB_ENABLE) {
3909 /* start up the timer. */
3910 TAILQ_FOREACH(net, &stcb->asoc.nets, sctp_next) {
3911 sctp_timer_start(SCTP_TIMER_TYPE_HEARTBEAT, inp, stcb, net);
3912 }
3913 }
3914#ifdef INET
3915 if (paddrp->spp_flags & SPP_IPV4_TOS)
3916 stcb->asoc.default_tos = paddrp->spp_ipv4_tos & 0x000000fc;
3917#endif
3918#ifdef INET6
3919 if (paddrp->spp_flags & SPP_IPV6_FLOWLABEL)
3920 stcb->asoc.default_flowlabel = paddrp->spp_ipv6_flowlabel;
3921#endif
3922
3923 }
3924 SCTP_TCB_UNLOCK(stcb);
3925 } else {
3926 /************************NO TCB, SET TO default stuff ******************/
3927 SCTP_INP_WLOCK(inp);
3928 /*
3929 * For the TOS/FLOWLABEL stuff you set it
3930 * with the options on the socket
3931 */
3932 if (paddrp->spp_pathmaxrxt) {
3933 inp->sctp_ep.def_net_failure = paddrp->spp_pathmaxrxt;
3934 }
3935 if (paddrp->spp_flags & SPP_HB_TIME_IS_ZERO)
3936 inp->sctp_ep.sctp_timeoutticks[SCTP_TIMER_HEARTBEAT] = 0;
3937 else if (paddrp->spp_hbinterval) {
3938 if (paddrp->spp_hbinterval > SCTP_MAX_HB_INTERVAL)
3939 paddrp->spp_hbinterval = SCTP_MAX_HB_INTERVAL;
3940 inp->sctp_ep.sctp_timeoutticks[SCTP_TIMER_HEARTBEAT] = MSEC_TO_TICKS(paddrp->spp_hbinterval);
3941 }
3942 if (paddrp->spp_flags & SPP_HB_ENABLE) {
3943 sctp_feature_off(inp, SCTP_PCB_FLAGS_DONOT_HEARTBEAT);
3944
3945 } else if (paddrp->spp_flags & SPP_HB_DISABLE) {
3946 sctp_feature_on(inp, SCTP_PCB_FLAGS_DONOT_HEARTBEAT);
3947 }
3948 SCTP_INP_WUNLOCK(inp);
3949 }
3950 }
3951 break;
3952 case SCTP_RTOINFO:
3953 {
3954 struct sctp_rtoinfo *srto;
3955 uint32_t new_init, new_min, new_max;
3956
3957 SCTP_CHECK_AND_CAST(srto, optval, struct sctp_rtoinfo, optsize);
3958 SCTP_FIND_STCB(inp, stcb, srto->srto_assoc_id);
3959
3960 if (stcb) {
3961 if (srto->srto_initial)
3962 new_init = srto->srto_initial;
3963 else
3964 new_init = stcb->asoc.initial_rto;
3965 if (srto->srto_max)
3966 new_max = srto->srto_max;
3967 else
3968 new_max = stcb->asoc.maxrto;
3969 if (srto->srto_min)
3970 new_min = srto->srto_min;
3971 else
3972 new_min = stcb->asoc.minrto;
3973 if ((new_min <= new_init) && (new_init <= new_max)) {
3974 stcb->asoc.initial_rto = new_init;
3975 stcb->asoc.maxrto = new_max;
3976 stcb->asoc.minrto = new_min;
3977 } else {
3978 SCTP_LTRACE_ERR_RET(inp, NULL, NULL, SCTP_FROM_SCTP_USRREQ, EINVAL);
3979 error = EINVAL;
3980 }
3981 SCTP_TCB_UNLOCK(stcb);
3982 } else {
3983 SCTP_INP_WLOCK(inp);
3984 if (srto->srto_initial)
3985 new_init = srto->srto_initial;
3986 else
3987 new_init = inp->sctp_ep.initial_rto;
3988 if (srto->srto_max)
3989 new_max = srto->srto_max;
3990 else
3991 new_max = inp->sctp_ep.sctp_maxrto;
3992 if (srto->srto_min)
3993 new_min = srto->srto_min;
3994 else
3995 new_min = inp->sctp_ep.sctp_minrto;
3996 if ((new_min <= new_init) && (new_init <= new_max)) {
3997 inp->sctp_ep.initial_rto = new_init;
3998 inp->sctp_ep.sctp_maxrto = new_max;
3999 inp->sctp_ep.sctp_minrto = new_min;
4000 } else {
4001 SCTP_LTRACE_ERR_RET(inp, NULL, NULL, SCTP_FROM_SCTP_USRREQ, EINVAL);
4002 error = EINVAL;
4003 }
4004 SCTP_INP_WUNLOCK(inp);
4005 }
4006 }
4007 break;
4008 case SCTP_ASSOCINFO:
4009 {
4010 struct sctp_assocparams *sasoc;
4011
4012 SCTP_CHECK_AND_CAST(sasoc, optval, struct sctp_assocparams, optsize);
4013 SCTP_FIND_STCB(inp, stcb, sasoc->sasoc_assoc_id);
4014 if (sasoc->sasoc_cookie_life) {
4015 /* boundary check the cookie life */
4016 if (sasoc->sasoc_cookie_life < 1000)
4017 sasoc->sasoc_cookie_life = 1000;
4018 if (sasoc->sasoc_cookie_life > SCTP_MAX_COOKIE_LIFE) {
4019 sasoc->sasoc_cookie_life = SCTP_MAX_COOKIE_LIFE;
4020 }
4021 }
4022 if (stcb) {
4023 if (sasoc->sasoc_asocmaxrxt)
4024 stcb->asoc.max_send_times = sasoc->sasoc_asocmaxrxt;
4025 sasoc->sasoc_number_peer_destinations = stcb->asoc.numnets;
4026 sasoc->sasoc_peer_rwnd = 0;
4027 sasoc->sasoc_local_rwnd = 0;
4028 if (sasoc->sasoc_cookie_life) {
4029 stcb->asoc.cookie_life = MSEC_TO_TICKS(sasoc->sasoc_cookie_life);
4030 }
4031 SCTP_TCB_UNLOCK(stcb);
4032 } else {
4033 SCTP_INP_WLOCK(inp);
4034 if (sasoc->sasoc_asocmaxrxt)
4035 inp->sctp_ep.max_send_times = sasoc->sasoc_asocmaxrxt;
4036 sasoc->sasoc_number_peer_destinations = 0;
4037 sasoc->sasoc_peer_rwnd = 0;
4038 sasoc->sasoc_local_rwnd = 0;
4039 if (sasoc->sasoc_cookie_life) {
4040 inp->sctp_ep.def_cookie_life = MSEC_TO_TICKS(sasoc->sasoc_cookie_life);
4041 }
4042 SCTP_INP_WUNLOCK(inp);
4043 }
4044 }
4045 break;
4046 case SCTP_INITMSG:
4047 {
4048 struct sctp_initmsg *sinit;
4049
4050 SCTP_CHECK_AND_CAST(sinit, optval, struct sctp_initmsg, optsize);
4051 SCTP_INP_WLOCK(inp);
4052 if (sinit->sinit_num_ostreams)
4053 inp->sctp_ep.pre_open_stream_count = sinit->sinit_num_ostreams;
4054
4055 if (sinit->sinit_max_instreams)
4056 inp->sctp_ep.max_open_streams_intome = sinit->sinit_max_instreams;
4057
4058 if (sinit->sinit_max_attempts)
4059 inp->sctp_ep.max_init_times = sinit->sinit_max_attempts;
4060
4061 if (sinit->sinit_max_init_timeo)
4062 inp->sctp_ep.initial_init_rto_max = sinit->sinit_max_init_timeo;
4063 SCTP_INP_WUNLOCK(inp);
4064 }
4065 break;
4066 case SCTP_PRIMARY_ADDR:
4067 {
4068 struct sctp_setprim *spa;
4069 struct sctp_nets *net, *lnet;
4070
4071 SCTP_CHECK_AND_CAST(spa, optval, struct sctp_setprim, optsize);
4072 SCTP_FIND_STCB(inp, stcb, spa->ssp_assoc_id);
4073
4074 net = NULL;
4075 if (stcb) {
4076 net = sctp_findnet(stcb, (struct sockaddr *)&spa->ssp_addr);
4077 } else {
4078 /*
4079 * We increment here since
4080 * sctp_findassociation_ep_addr() wil do a
4081 * decrement if it finds the stcb as long as
4082 * the locked tcb (last argument) is NOT a
4083 * TCB.. aka NULL.
4084 */
4085 SCTP_INP_INCR_REF(inp);
4086 stcb = sctp_findassociation_ep_addr(&inp,
4087 (struct sockaddr *)&spa->ssp_addr,
4088 &net, NULL, NULL);
4089 if (stcb == NULL) {
4090 SCTP_INP_DECR_REF(inp);
4091 }
4092 }
4093
4094 if ((stcb) && (net)) {
4095 if ((net != stcb->asoc.primary_destination) &&
4096 (!(net->dest_state & SCTP_ADDR_UNCONFIRMED))) {
4097 /* Ok we need to set it */
4098 lnet = stcb->asoc.primary_destination;
4099 if (sctp_set_primary_addr(stcb, (struct sockaddr *)NULL, net) == 0) {
4100 if (net->dest_state & SCTP_ADDR_SWITCH_PRIMARY) {
4101 net->dest_state |= SCTP_ADDR_DOUBLE_SWITCH;
4102 }
4103 net->dest_state |= SCTP_ADDR_SWITCH_PRIMARY;
4104 }
4105 }
4106 } else {
4107 SCTP_LTRACE_ERR_RET(inp, NULL, NULL, SCTP_FROM_SCTP_USRREQ, EINVAL);
4108 error = EINVAL;
4109 }
4110 if (stcb) {
4111 SCTP_TCB_UNLOCK(stcb);
4112 }
4113 }
4114 break;
4115 case SCTP_SET_DYNAMIC_PRIMARY:
4116 {
4117 union sctp_sockstore *ss;
4118
4119 error = priv_check(curthread,
4120 PRIV_NETINET_RESERVEDPORT);
4121 if (error)
4122 break;
4123
4124 SCTP_CHECK_AND_CAST(ss, optval, union sctp_sockstore, optsize);
4125 /* SUPER USER CHECK? */
4126 error = sctp_dynamic_set_primary(&ss->sa, vrf_id);
4127 }
4128 break;
4129 case SCTP_SET_PEER_PRIMARY_ADDR:
4130 {
4131 struct sctp_setpeerprim *sspp;
4132
4133 SCTP_CHECK_AND_CAST(sspp, optval, struct sctp_setpeerprim, optsize);
4134 SCTP_FIND_STCB(inp, stcb, sspp->sspp_assoc_id);
4135 if (stcb != NULL) {
4136 struct sctp_ifa *ifa;
4137
4138 ifa = sctp_find_ifa_by_addr((struct sockaddr *)&sspp->sspp_addr,
4139 stcb->asoc.vrf_id, SCTP_ADDR_NOT_LOCKED);
4140 if (ifa == NULL) {
4141 SCTP_LTRACE_ERR_RET(inp, NULL, NULL, SCTP_FROM_SCTP_USRREQ, EINVAL);
4142 error = EINVAL;
4143 goto out_of_it;
4144 }
4145 if ((inp->sctp_flags & SCTP_PCB_FLAGS_BOUNDALL) == 0) {
4146 /*
4147 * Must validate the ifa found is in
4148 * our ep
4149 */
4150 struct sctp_laddr *laddr;
4151 int found = 0;
4152
4153 LIST_FOREACH(laddr, &inp->sctp_addr_list, sctp_nxt_addr) {
4154 if (laddr->ifa == NULL) {
4155 SCTPDBG(SCTP_DEBUG_OUTPUT1, "%s: NULL ifa\n",
4156 __FUNCTION__);
4157 continue;
4158 }
4159 if (laddr->ifa == ifa) {
4160 found = 1;
4161 break;
4162 }
4163 }
4164 if (!found) {
4165 SCTP_LTRACE_ERR_RET(inp, NULL, NULL, SCTP_FROM_SCTP_USRREQ, EINVAL);
4166 error = EINVAL;
4167 goto out_of_it;
4168 }
4169 }
4170 if (sctp_set_primary_ip_address_sa(stcb,
4171 (struct sockaddr *)&sspp->sspp_addr) != 0) {
4172 SCTP_LTRACE_ERR_RET(inp, NULL, NULL, SCTP_FROM_SCTP_USRREQ, EINVAL);
4173 error = EINVAL;
4174 }
4175 out_of_it:
4176 SCTP_TCB_UNLOCK(stcb);
4177 } else {
4178 SCTP_LTRACE_ERR_RET(inp, NULL, NULL, SCTP_FROM_SCTP_USRREQ, EINVAL);
4179 error = EINVAL;
4180 }
4181
4182 }
4183 break;
4184 case SCTP_BINDX_ADD_ADDR:
4185 {
4186 struct sctp_getaddresses *addrs;
4187 size_t sz;
4188 struct thread *td;
4189
4190 td = (struct thread *)p;
4191 SCTP_CHECK_AND_CAST(addrs, optval, struct sctp_getaddresses,
4192 optsize);
4193 if (addrs->addr->sa_family == AF_INET) {
4194 sz = sizeof(struct sctp_getaddresses) - sizeof(struct sockaddr) + sizeof(struct sockaddr_in);
4195 if (optsize < sz) {
4196 SCTP_LTRACE_ERR_RET(inp, NULL, NULL, SCTP_FROM_SCTP_USRREQ, EINVAL);
4197 error = EINVAL;
4198 break;
4199 }
4200 if (td != NULL && (error = prison_local_ip4(td->td_ucred, &(((struct sockaddr_in *)(addrs->addr))->sin_addr)))) {
4201 SCTP_LTRACE_ERR_RET(inp, stcb, NULL, SCTP_FROM_SCTP_USRREQ, error);
4202 break;
4203 }
4204#ifdef INET6
4205 } else if (addrs->addr->sa_family == AF_INET6) {
4206 sz = sizeof(struct sctp_getaddresses) - sizeof(struct sockaddr) + sizeof(struct sockaddr_in6);
4207 if (optsize < sz) {
4208 SCTP_LTRACE_ERR_RET(inp, NULL, NULL, SCTP_FROM_SCTP_USRREQ, EINVAL);
4209 error = EINVAL;
4210 break;
4211 }
4212 if (td != NULL && (error = prison_local_ip6(td->td_ucred, &(((struct sockaddr_in6 *)(addrs->addr))->sin6_addr),
4213 (SCTP_IPV6_V6ONLY(inp) != 0))) != 0) {
4214 SCTP_LTRACE_ERR_RET(inp, stcb, NULL, SCTP_FROM_SCTP_USRREQ, error);
4215 break;
4216 }
4217#endif
4218 } else {
4219 error = EAFNOSUPPORT;
4220 break;
4221 }
4222 sctp_bindx_add_address(so, inp, addrs->addr,
4223 addrs->sget_assoc_id, vrf_id,
4224 &error, p);
4225 }
4226 break;
4227 case SCTP_BINDX_REM_ADDR:
4228 {
4229 struct sctp_getaddresses *addrs;
4230 size_t sz;
4231 struct thread *td;
4232
4233 td = (struct thread *)p;
4234
4235 SCTP_CHECK_AND_CAST(addrs, optval, struct sctp_getaddresses, optsize);
4236 if (addrs->addr->sa_family == AF_INET) {
4237 sz = sizeof(struct sctp_getaddresses) - sizeof(struct sockaddr) + sizeof(struct sockaddr_in);
4238 if (optsize < sz) {
4239 SCTP_LTRACE_ERR_RET(inp, NULL, NULL, SCTP_FROM_SCTP_USRREQ, EINVAL);
4240 error = EINVAL;
4241 break;
4242 }
4243 if (td != NULL && (error = prison_local_ip4(td->td_ucred, &(((struct sockaddr_in *)(addrs->addr))->sin_addr)))) {
4244 SCTP_LTRACE_ERR_RET(inp, stcb, NULL, SCTP_FROM_SCTP_USRREQ, error);
4245 break;
4246 }
4247#ifdef INET6
4248 } else if (addrs->addr->sa_family == AF_INET6) {
4249 sz = sizeof(struct sctp_getaddresses) - sizeof(struct sockaddr) + sizeof(struct sockaddr_in6);
4250 if (optsize < sz) {
4251 SCTP_LTRACE_ERR_RET(inp, NULL, NULL, SCTP_FROM_SCTP_USRREQ, EINVAL);
4252 error = EINVAL;
4253 break;
4254 }
4255 if (td != NULL && (error = prison_local_ip6(td->td_ucred, &(((struct sockaddr_in6 *)(addrs->addr))->sin6_addr),
4256 (SCTP_IPV6_V6ONLY(inp) != 0))) != 0) {
4257 SCTP_LTRACE_ERR_RET(inp, stcb, NULL, SCTP_FROM_SCTP_USRREQ, error);
4258 break;
4259 }
4260#endif
4261 } else {
4262 error = EAFNOSUPPORT;
4263 break;
4264 }
4265 sctp_bindx_delete_address(so, inp, addrs->addr,
4266 addrs->sget_assoc_id, vrf_id,
4267 &error);
4268 }
4269 break;
4270 default:
4271 SCTP_LTRACE_ERR_RET(inp, NULL, NULL, SCTP_FROM_SCTP_USRREQ, ENOPROTOOPT);
4272 error = ENOPROTOOPT;
4273 break;
4274 } /* end switch (opt) */
4275 return (error);
4276}
4277
4278int
4279sctp_ctloutput(struct socket *so, struct sockopt *sopt)
4280{
4281 void *optval = NULL;
4282 size_t optsize = 0;
4283 struct sctp_inpcb *inp;
4284 void *p;
4285 int error = 0;
4286
4287 inp = (struct sctp_inpcb *)so->so_pcb;
4288 if (inp == 0) {
4289 /* I made the same as TCP since we are not setup? */
4290 SCTP_LTRACE_ERR_RET(inp, NULL, NULL, SCTP_FROM_SCTP_USRREQ, EINVAL);
4291 return (ECONNRESET);
4292 }
4293 if (sopt->sopt_level != IPPROTO_SCTP) {
4294 /* wrong proto level... send back up to IP */
4295#ifdef INET6
4296 if (INP_CHECK_SOCKAF(so, AF_INET6))
4297 error = ip6_ctloutput(so, sopt);
4298 else
4299#endif /* INET6 */
4300 error = ip_ctloutput(so, sopt);
4301 return (error);
4302 }
4303 optsize = sopt->sopt_valsize;
4304 if (optsize) {
4305 SCTP_MALLOC(optval, void *, optsize, SCTP_M_SOCKOPT);
4306 if (optval == NULL) {
4307 SCTP_LTRACE_ERR_RET(inp, NULL, NULL, SCTP_FROM_SCTP_USRREQ, ENOBUFS);
4308 return (ENOBUFS);
4309 }
4310 error = sooptcopyin(sopt, optval, optsize, optsize);
4311 if (error) {
4312 SCTP_FREE(optval, SCTP_M_SOCKOPT);
4313 goto out;
4314 }
4315 }
4316 p = (void *)sopt->sopt_td;
4317 if (sopt->sopt_dir == SOPT_SET) {
4318 error = sctp_setopt(so, sopt->sopt_name, optval, optsize, p);
4319 } else if (sopt->sopt_dir == SOPT_GET) {
4320 error = sctp_getopt(so, sopt->sopt_name, optval, &optsize, p);
4321 } else {
4322 SCTP_LTRACE_ERR_RET(inp, NULL, NULL, SCTP_FROM_SCTP_USRREQ, EINVAL);
4323 error = EINVAL;
4324 }
4325 if ((error == 0) && (optval != NULL)) {
4326 error = sooptcopyout(sopt, optval, optsize);
4327 SCTP_FREE(optval, SCTP_M_SOCKOPT);
4328 } else if (optval != NULL) {
4329 SCTP_FREE(optval, SCTP_M_SOCKOPT);
4330 }
4331out:
4332 return (error);
4333}
4334
4335
4336static int
4337sctp_connect(struct socket *so, struct sockaddr *addr, struct thread *p)
4338{
4339 int error = 0;
4340 int create_lock_on = 0;
4341 uint32_t vrf_id;
4342 struct sctp_inpcb *inp;
4343 struct sctp_tcb *stcb = NULL;
4344
4345 inp = (struct sctp_inpcb *)so->so_pcb;
4346 if (inp == 0) {
4347 /* I made the same as TCP since we are not setup? */
4348 SCTP_LTRACE_ERR_RET(inp, NULL, NULL, SCTP_FROM_SCTP_USRREQ, EINVAL);
4349 return (ECONNRESET);
4350 }
4351 if (addr == NULL) {
4352 SCTP_LTRACE_ERR_RET(inp, NULL, NULL, SCTP_FROM_SCTP_USRREQ, EINVAL);
4353 return EINVAL;
4354 }
4355#ifdef INET6
4356 if (addr->sa_family == AF_INET6) {
4357 struct sockaddr_in6 *sin6p;
4358
4359 if (addr->sa_len != sizeof(struct sockaddr_in6)) {
4360 SCTP_LTRACE_ERR_RET(inp, NULL, NULL, SCTP_FROM_SCTP_USRREQ, EINVAL);
4361 return (EINVAL);
4362 }
4363 sin6p = (struct sockaddr_in6 *)addr;
4364 if (p != NULL && (error = prison_remote_ip6(p->td_ucred, &sin6p->sin6_addr)) != 0) {
4365 SCTP_LTRACE_ERR_RET(inp, NULL, NULL, SCTP_FROM_SCTP_USRREQ, error);
4366 return (error);
4367 }
4368 } else
4369#endif
4370 if (addr->sa_family == AF_INET) {
4371 struct sockaddr_in *sinp;
4372
4373 if (addr->sa_len != sizeof(struct sockaddr_in)) {
4374 SCTP_LTRACE_ERR_RET(inp, NULL, NULL, SCTP_FROM_SCTP_USRREQ, EINVAL);
4375 return (EINVAL);
4376 }
4377 sinp = (struct sockaddr_in *)addr;
4378 if (p != NULL && (error = prison_remote_ip4(p->td_ucred, &sinp->sin_addr)) != 0) {
4379 SCTP_LTRACE_ERR_RET(inp, NULL, NULL, SCTP_FROM_SCTP_USRREQ, error);
4380 return (error);
4381 }
4382 } else {
4383 SCTP_LTRACE_ERR_RET(inp, NULL, NULL, SCTP_FROM_SCTP_USRREQ, EAFNOSUPPORT);
4384 return (EAFNOSUPPORT);
4385 }
4386 SCTP_INP_INCR_REF(inp);
4387 SCTP_ASOC_CREATE_LOCK(inp);
4388 create_lock_on = 1;
4389
4390
4391 if ((inp->sctp_flags & SCTP_PCB_FLAGS_SOCKET_ALLGONE) ||
4392 (inp->sctp_flags & SCTP_PCB_FLAGS_SOCKET_GONE)) {
4393 /* Should I really unlock ? */
4394 SCTP_LTRACE_ERR_RET(inp, NULL, NULL, SCTP_FROM_SCTP_USRREQ, EFAULT);
4395 error = EFAULT;
4396 goto out_now;
4397 }
4398#ifdef INET6
4399 if (((inp->sctp_flags & SCTP_PCB_FLAGS_BOUND_V6) == 0) &&
4400 (addr->sa_family == AF_INET6)) {
4401 SCTP_LTRACE_ERR_RET(inp, NULL, NULL, SCTP_FROM_SCTP_USRREQ, EINVAL);
4402 error = EINVAL;
4403 goto out_now;
4404 }
4405#endif /* INET6 */
4406 if ((inp->sctp_flags & SCTP_PCB_FLAGS_UNBOUND) ==
4407 SCTP_PCB_FLAGS_UNBOUND) {
4408 /* Bind a ephemeral port */
4409 error = sctp_inpcb_bind(so, NULL, NULL, p);
4410 if (error) {
4411 goto out_now;
4412 }
4413 }
4414 /* Now do we connect? */
4415 if ((inp->sctp_flags & SCTP_PCB_FLAGS_IN_TCPPOOL) &&
4416 (sctp_is_feature_off(inp, SCTP_PCB_FLAGS_PORTREUSE))) {
4417 SCTP_LTRACE_ERR_RET(inp, NULL, NULL, SCTP_FROM_SCTP_USRREQ, EINVAL);
4418 error = EINVAL;
4419 goto out_now;
4420 }
4421 if ((inp->sctp_flags & SCTP_PCB_FLAGS_TCPTYPE) &&
4422 (inp->sctp_flags & SCTP_PCB_FLAGS_CONNECTED)) {
4423 /* We are already connected AND the TCP model */
4424 SCTP_LTRACE_ERR_RET(inp, stcb, NULL, SCTP_FROM_SCTP_USRREQ, EADDRINUSE);
4425 error = EADDRINUSE;
4426 goto out_now;
4427 }
4428 if (inp->sctp_flags & SCTP_PCB_FLAGS_CONNECTED) {
4429 SCTP_INP_RLOCK(inp);
4430 stcb = LIST_FIRST(&inp->sctp_asoc_list);
4431 SCTP_INP_RUNLOCK(inp);
4432 } else {
4433 /*
4434 * We increment here since sctp_findassociation_ep_addr()
4435 * will do a decrement if it finds the stcb as long as the
4436 * locked tcb (last argument) is NOT a TCB.. aka NULL.
4437 */
4438 SCTP_INP_INCR_REF(inp);
4439 stcb = sctp_findassociation_ep_addr(&inp, addr, NULL, NULL, NULL);
4440 if (stcb == NULL) {
4441 SCTP_INP_DECR_REF(inp);
4442 } else {
4443 SCTP_TCB_UNLOCK(stcb);
4444 }
4445 }
4446 if (stcb != NULL) {
4447 /* Already have or am bring up an association */
4448 SCTP_LTRACE_ERR_RET(inp, NULL, NULL, SCTP_FROM_SCTP_USRREQ, EALREADY);
4449 error = EALREADY;
4450 goto out_now;
4451 }
4452 vrf_id = inp->def_vrf_id;
4453 /* We are GOOD to go */
4454 stcb = sctp_aloc_assoc(inp, addr, 1, &error, 0, vrf_id, p);
4455 if (stcb == NULL) {
4456 /* Gak! no memory */
4457 goto out_now;
4458 }
4459 if (stcb->sctp_ep->sctp_flags & SCTP_PCB_FLAGS_TCPTYPE) {
4460 stcb->sctp_ep->sctp_flags |= SCTP_PCB_FLAGS_CONNECTED;
4461 /* Set the connected flag so we can queue data */
4462 soisconnecting(so);
4463 }
4464 SCTP_SET_STATE(&stcb->asoc, SCTP_STATE_COOKIE_WAIT);
4465 (void)SCTP_GETTIME_TIMEVAL(&stcb->asoc.time_entered);
4466
4467 /* initialize authentication parameters for the assoc */
4468 sctp_initialize_auth_params(inp, stcb);
4469
4470 sctp_send_initiate(inp, stcb, SCTP_SO_LOCKED);
4471 SCTP_TCB_UNLOCK(stcb);
4472out_now:
4473 if (create_lock_on) {
4474 SCTP_ASOC_CREATE_UNLOCK(inp);
4475 }
4476 SCTP_INP_DECR_REF(inp);
4477 return error;
4478}
4479
4480int
4481sctp_listen(struct socket *so, int backlog, struct thread *p)
4482{
4483 /*
4484 * Note this module depends on the protocol processing being called
4485 * AFTER any socket level flags and backlog are applied to the
4486 * socket. The traditional way that the socket flags are applied is
4487 * AFTER protocol processing. We have made a change to the
4488 * sys/kern/uipc_socket.c module to reverse this but this MUST be in
4489 * place if the socket API for SCTP is to work properly.
4490 */
4491
4492 int error = 0;
4493 struct sctp_inpcb *inp;
4494
4495 inp = (struct sctp_inpcb *)so->so_pcb;
4496 if (inp == 0) {
4497 /* I made the same as TCP since we are not setup? */
4498 SCTP_LTRACE_ERR_RET(inp, NULL, NULL, SCTP_FROM_SCTP_USRREQ, EINVAL);
4499 return (ECONNRESET);
4500 }
4501 if (sctp_is_feature_on(inp, SCTP_PCB_FLAGS_PORTREUSE)) {
4502 /* See if we have a listener */
4503 struct sctp_inpcb *tinp;
4504 union sctp_sockstore store, *sp;
4505
4506 sp = &store;
4507 if ((inp->sctp_flags & SCTP_PCB_FLAGS_BOUNDALL) == 0) {
4508 /* not bound all */
4509 struct sctp_laddr *laddr;
4510
4511 LIST_FOREACH(laddr, &inp->sctp_addr_list, sctp_nxt_addr) {
4512 memcpy(&store, &laddr->ifa->address, sizeof(store));
4513 sp->sin.sin_port = inp->sctp_lport;
4514 tinp = sctp_pcb_findep(&sp->sa, 0, 0, inp->def_vrf_id);
4515 if (tinp && (tinp != inp) &&
4516 ((tinp->sctp_flags & SCTP_PCB_FLAGS_SOCKET_ALLGONE) == 0) &&
4517 ((tinp->sctp_flags & SCTP_PCB_FLAGS_SOCKET_GONE) == 0) &&
4518 (tinp->sctp_socket->so_qlimit)) {
4519 /*
4520 * we have a listener already and
4521 * its not this inp.
4522 */
4523 SCTP_INP_DECR_REF(tinp);
4524 return (EADDRINUSE);
4525 } else if (tinp) {
4526 SCTP_INP_DECR_REF(tinp);
4527 }
4528 }
4529 } else {
4530 /* Setup a local addr bound all */
4531 memset(&store, 0, sizeof(store));
4532 store.sin.sin_port = inp->sctp_lport;
4533#ifdef INET6
4534 if (inp->sctp_flags & SCTP_PCB_FLAGS_BOUND_V6) {
4535 store.sa.sa_family = AF_INET6;
4536 store.sa.sa_len = sizeof(struct sockaddr_in6);
4537 }
4538#endif
4539 if ((inp->sctp_flags & SCTP_PCB_FLAGS_BOUND_V6) == 0) {
4540 store.sa.sa_family = AF_INET;
4541 store.sa.sa_len = sizeof(struct sockaddr_in);
4542 }
4543 tinp = sctp_pcb_findep(&sp->sa, 0, 0, inp->def_vrf_id);
4544 if (tinp && (tinp != inp) &&
4545 ((tinp->sctp_flags & SCTP_PCB_FLAGS_SOCKET_ALLGONE) == 0) &&
4546 ((tinp->sctp_flags & SCTP_PCB_FLAGS_SOCKET_GONE) == 0) &&
4547 (tinp->sctp_socket->so_qlimit)) {
4548 /*
4549 * we have a listener already and its not
4550 * this inp.
4551 */
4552 SCTP_INP_DECR_REF(tinp);
4553 return (EADDRINUSE);
4554 } else if (tinp) {
4555 SCTP_INP_DECR_REF(inp);
4556 }
4557 }
4558 }
4559 SCTP_INP_RLOCK(inp);
4560#ifdef SCTP_LOCK_LOGGING
4561 if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_LOCK_LOGGING_ENABLE) {
4562 sctp_log_lock(inp, (struct sctp_tcb *)NULL, SCTP_LOG_LOCK_SOCK);
4563 }
4564#endif
4565 SOCK_LOCK(so);
4566 error = solisten_proto_check(so);
4567 if (error) {
4568 SOCK_UNLOCK(so);
4569 SCTP_INP_RUNLOCK(inp);
4570 return (error);
4571 }
4572 if ((sctp_is_feature_on(inp, SCTP_PCB_FLAGS_PORTREUSE)) &&
4573 (inp->sctp_flags & SCTP_PCB_FLAGS_IN_TCPPOOL)) {
4574 /*
4575 * The unlucky case - We are in the tcp pool with this guy.
4576 * - Someone else is in the main inp slot. - We must move
4577 * this guy (the listener) to the main slot - We must then
4578 * move the guy that was listener to the TCP Pool.
4579 */
4580 if (sctp_swap_inpcb_for_listen(inp)) {
4581 goto in_use;
4582 }
4583 }
4584 if ((inp->sctp_flags & SCTP_PCB_FLAGS_TCPTYPE) &&
4585 (inp->sctp_flags & SCTP_PCB_FLAGS_CONNECTED)) {
4586 /* We are already connected AND the TCP model */
4587in_use:
4588 SCTP_INP_RUNLOCK(inp);
4589 SOCK_UNLOCK(so);
4590 SCTP_LTRACE_ERR_RET(inp, NULL, NULL, SCTP_FROM_SCTP_USRREQ, EADDRINUSE);
4591 return (EADDRINUSE);
4592 }
4593 SCTP_INP_RUNLOCK(inp);
4594 if (inp->sctp_flags & SCTP_PCB_FLAGS_UNBOUND) {
4595 /* We must do a bind. */
4596 SOCK_UNLOCK(so);
4597 if ((error = sctp_inpcb_bind(so, NULL, NULL, p))) {
4598 /* bind error, probably perm */
4599 return (error);
4600 }
4601 SOCK_LOCK(so);
4602 }
4603 /* It appears for 7.0 and on, we must always call this. */
4604 solisten_proto(so, backlog);
4605 if (inp->sctp_flags & SCTP_PCB_FLAGS_UDPTYPE) {
4606 /* remove the ACCEPTCONN flag for one-to-many sockets */
4607 so->so_options &= ~SO_ACCEPTCONN;
4608 }
4609 if (backlog == 0) {
4610 /* turning off listen */
4611 so->so_options &= ~SO_ACCEPTCONN;
4612 }
4613 SOCK_UNLOCK(so);
4614 return (error);
4615}
4616
4617static int sctp_defered_wakeup_cnt = 0;
4618
4619int
4620sctp_accept(struct socket *so, struct sockaddr **addr)
4621{
4622 struct sctp_tcb *stcb;
4623 struct sctp_inpcb *inp;
4624 union sctp_sockstore store;
4625
4626#ifdef INET6
4627 int error;
4628
4629#endif
4630 inp = (struct sctp_inpcb *)so->so_pcb;
4631
4632 if (inp == 0) {
4633 SCTP_LTRACE_ERR_RET(inp, NULL, NULL, SCTP_FROM_SCTP_USRREQ, EINVAL);
4634 return (ECONNRESET);
4635 }
4636 SCTP_INP_RLOCK(inp);
4637 if (inp->sctp_flags & SCTP_PCB_FLAGS_UDPTYPE) {
4638 SCTP_INP_RUNLOCK(inp);
4639 SCTP_LTRACE_ERR_RET(inp, NULL, NULL, SCTP_FROM_SCTP_USRREQ, EOPNOTSUPP);
4640 return (EOPNOTSUPP);
4641 }
4642 if (so->so_state & SS_ISDISCONNECTED) {
4643 SCTP_INP_RUNLOCK(inp);
4644 SCTP_LTRACE_ERR_RET(inp, NULL, NULL, SCTP_FROM_SCTP_USRREQ, ECONNABORTED);
4645 return (ECONNABORTED);
4646 }
4647 stcb = LIST_FIRST(&inp->sctp_asoc_list);
4648 if (stcb == NULL) {
4649 SCTP_INP_RUNLOCK(inp);
4650 SCTP_LTRACE_ERR_RET(inp, NULL, NULL, SCTP_FROM_SCTP_USRREQ, EINVAL);
4651 return (ECONNRESET);
4652 }
4653 SCTP_TCB_LOCK(stcb);
4654 SCTP_INP_RUNLOCK(inp);
4655 store = stcb->asoc.primary_destination->ro._l_addr;
4656 SCTP_TCB_UNLOCK(stcb);
4657 switch (store.sa.sa_family) {
4658 case AF_INET:
4659 {
4660 struct sockaddr_in *sin;
4661
4662 SCTP_MALLOC_SONAME(sin, struct sockaddr_in *, sizeof *sin);
4663 sin->sin_family = AF_INET;
4664 sin->sin_len = sizeof(*sin);
4665 sin->sin_port = ((struct sockaddr_in *)&store)->sin_port;
4666 sin->sin_addr = ((struct sockaddr_in *)&store)->sin_addr;
4667 *addr = (struct sockaddr *)sin;
4668 break;
4669 }
4670#ifdef INET6
4671 case AF_INET6:
4672 {
4673 struct sockaddr_in6 *sin6;
4674
4675 SCTP_MALLOC_SONAME(sin6, struct sockaddr_in6 *, sizeof *sin6);
4676 sin6->sin6_family = AF_INET6;
4677 sin6->sin6_len = sizeof(*sin6);
4678 sin6->sin6_port = ((struct sockaddr_in6 *)&store)->sin6_port;
4679
4680 sin6->sin6_addr = ((struct sockaddr_in6 *)&store)->sin6_addr;
4681 if ((error = sa6_recoverscope(sin6)) != 0) {
4682 SCTP_FREE_SONAME(sin6);
4683 return (error);
4684 }
4685 *addr = (struct sockaddr *)sin6;
4686 break;
4687 }
4688#endif
4689 default:
4690 /* TSNH */
4691 break;
4692 }
4693 /* Wake any delayed sleep action */
4694 if (inp->sctp_flags & SCTP_PCB_FLAGS_DONT_WAKE) {
4695 SCTP_INP_WLOCK(inp);
4696 inp->sctp_flags &= ~SCTP_PCB_FLAGS_DONT_WAKE;
4697 if (inp->sctp_flags & SCTP_PCB_FLAGS_WAKEOUTPUT) {
4698 inp->sctp_flags &= ~SCTP_PCB_FLAGS_WAKEOUTPUT;
4699 SCTP_INP_WUNLOCK(inp);
4700 SOCKBUF_LOCK(&inp->sctp_socket->so_snd);
4701 if (sowriteable(inp->sctp_socket)) {
4702 sowwakeup_locked(inp->sctp_socket);
4703 } else {
4704 SOCKBUF_UNLOCK(&inp->sctp_socket->so_snd);
4705 }
4706 SCTP_INP_WLOCK(inp);
4707 }
4708 if (inp->sctp_flags & SCTP_PCB_FLAGS_WAKEINPUT) {
4709 inp->sctp_flags &= ~SCTP_PCB_FLAGS_WAKEINPUT;
4710 SCTP_INP_WUNLOCK(inp);
4711 SOCKBUF_LOCK(&inp->sctp_socket->so_rcv);
4712 if (soreadable(inp->sctp_socket)) {
4713 sctp_defered_wakeup_cnt++;
4714 sorwakeup_locked(inp->sctp_socket);
4715 } else {
4716 SOCKBUF_UNLOCK(&inp->sctp_socket->so_rcv);
4717 }
4718 SCTP_INP_WLOCK(inp);
4719 }
4720 SCTP_INP_WUNLOCK(inp);
4721 }
4722 return (0);
4723}
4724
4725int
4726sctp_ingetaddr(struct socket *so, struct sockaddr **addr)
4727{
4728 struct sockaddr_in *sin;
4729 uint32_t vrf_id;
4730 struct sctp_inpcb *inp;
4731 struct sctp_ifa *sctp_ifa;
4732
4733 /*
4734 * Do the malloc first in case it blocks.
4735 */
4736 SCTP_MALLOC_SONAME(sin, struct sockaddr_in *, sizeof *sin);
4737 sin->sin_family = AF_INET;
4738 sin->sin_len = sizeof(*sin);
4739 inp = (struct sctp_inpcb *)so->so_pcb;
4740 if (!inp) {
4741 SCTP_FREE_SONAME(sin);
4742 SCTP_LTRACE_ERR_RET(inp, NULL, NULL, SCTP_FROM_SCTP_USRREQ, EINVAL);
4743 return ECONNRESET;
4744 }
4745 SCTP_INP_RLOCK(inp);
4746 sin->sin_port = inp->sctp_lport;
4747 if (inp->sctp_flags & SCTP_PCB_FLAGS_BOUNDALL) {
4748 if (inp->sctp_flags & SCTP_PCB_FLAGS_CONNECTED) {
4749 struct sctp_tcb *stcb;
4750 struct sockaddr_in *sin_a;
4751 struct sctp_nets *net;
4752 int fnd;
4753
4754 stcb = LIST_FIRST(&inp->sctp_asoc_list);
4755 if (stcb == NULL) {
4756 goto notConn;
4757 }
4758 fnd = 0;
4759 sin_a = NULL;
4760 SCTP_TCB_LOCK(stcb);
4761 TAILQ_FOREACH(net, &stcb->asoc.nets, sctp_next) {
4762 sin_a = (struct sockaddr_in *)&net->ro._l_addr;
4763 if (sin_a == NULL)
4764 /* this will make coverity happy */
4765 continue;
4766
4767 if (sin_a->sin_family == AF_INET) {
4768 fnd = 1;
4769 break;
4770 }
4771 }
4772 if ((!fnd) || (sin_a == NULL)) {
4773 /* punt */
4774 SCTP_TCB_UNLOCK(stcb);
4775 goto notConn;
4776 }
4777 vrf_id = inp->def_vrf_id;
4778 sctp_ifa = sctp_source_address_selection(inp,
4779 stcb,
4780 (sctp_route_t *) & net->ro,
4781 net, 0, vrf_id);
4782 if (sctp_ifa) {
4783 sin->sin_addr = sctp_ifa->address.sin.sin_addr;
4784 sctp_free_ifa(sctp_ifa);
4785 }
4786 SCTP_TCB_UNLOCK(stcb);
4787 } else {
4788 /* For the bound all case you get back 0 */
4789 notConn:
4790 sin->sin_addr.s_addr = 0;
4791 }
4792
4793 } else {
4794 /* Take the first IPv4 address in the list */
4795 struct sctp_laddr *laddr;
4796 int fnd = 0;
4797
4798 LIST_FOREACH(laddr, &inp->sctp_addr_list, sctp_nxt_addr) {
4799 if (laddr->ifa->address.sa.sa_family == AF_INET) {
4800 struct sockaddr_in *sin_a;
4801
4802 sin_a = (struct sockaddr_in *)&laddr->ifa->address.sa;
4803 sin->sin_addr = sin_a->sin_addr;
4804 fnd = 1;
4805 break;
4806 }
4807 }
4808 if (!fnd) {
4809 SCTP_FREE_SONAME(sin);
4810 SCTP_INP_RUNLOCK(inp);
4811 SCTP_LTRACE_ERR_RET(inp, NULL, NULL, SCTP_FROM_SCTP_USRREQ, ENOENT);
4812 return ENOENT;
4813 }
4814 }
4815 SCTP_INP_RUNLOCK(inp);
4816 (*addr) = (struct sockaddr *)sin;
4817 return (0);
4818}
4819
4820int
4821sctp_peeraddr(struct socket *so, struct sockaddr **addr)
4822{
4823 struct sockaddr_in *sin = (struct sockaddr_in *)*addr;
4824 int fnd;
4825 struct sockaddr_in *sin_a;
4826 struct sctp_inpcb *inp;
4827 struct sctp_tcb *stcb;
4828 struct sctp_nets *net;
4829
4830 /* Do the malloc first in case it blocks. */
4831 inp = (struct sctp_inpcb *)so->so_pcb;
4832 if ((inp == NULL) ||
4833 ((inp->sctp_flags & SCTP_PCB_FLAGS_CONNECTED) == 0)) {
4834 /* UDP type and listeners will drop out here */
4835 SCTP_LTRACE_ERR_RET(inp, NULL, NULL, SCTP_FROM_SCTP_USRREQ, ENOTCONN);
4836 return (ENOTCONN);
4837 }
4838 SCTP_MALLOC_SONAME(sin, struct sockaddr_in *, sizeof *sin);
4839 sin->sin_family = AF_INET;
4840 sin->sin_len = sizeof(*sin);
4841
4842 /* We must recapture incase we blocked */
4843 inp = (struct sctp_inpcb *)so->so_pcb;
4844 if (!inp) {
4845 SCTP_FREE_SONAME(sin);
4846 SCTP_LTRACE_ERR_RET(inp, NULL, NULL, SCTP_FROM_SCTP_USRREQ, EINVAL);
4847 return ECONNRESET;
4848 }
4849 SCTP_INP_RLOCK(inp);
4850 stcb = LIST_FIRST(&inp->sctp_asoc_list);
4851 if (stcb) {
4852 SCTP_TCB_LOCK(stcb);
4853 }
4854 SCTP_INP_RUNLOCK(inp);
4855 if (stcb == NULL) {
4856 SCTP_FREE_SONAME(sin);
4857 SCTP_LTRACE_ERR_RET(inp, NULL, NULL, SCTP_FROM_SCTP_USRREQ, EINVAL);
4858 return ECONNRESET;
4859 }
4860 fnd = 0;
4861 TAILQ_FOREACH(net, &stcb->asoc.nets, sctp_next) {
4862 sin_a = (struct sockaddr_in *)&net->ro._l_addr;
4863 if (sin_a->sin_family == AF_INET) {
4864 fnd = 1;
4865 sin->sin_port = stcb->rport;
4866 sin->sin_addr = sin_a->sin_addr;
4867 break;
4868 }
4869 }
4870 SCTP_TCB_UNLOCK(stcb);
4871 if (!fnd) {
4872 /* No IPv4 address */
4873 SCTP_FREE_SONAME(sin);
4874 SCTP_LTRACE_ERR_RET(inp, NULL, NULL, SCTP_FROM_SCTP_USRREQ, ENOENT);
4875 return ENOENT;
4876 }
4877 (*addr) = (struct sockaddr *)sin;
4878 return (0);
4879}
4880
4881struct pr_usrreqs sctp_usrreqs = {
4882 .pru_abort = sctp_abort,
4883 .pru_accept = sctp_accept,
4884 .pru_attach = sctp_attach,
4885 .pru_bind = sctp_bind,
4886 .pru_connect = sctp_connect,
4887 .pru_control = in_control,
4888 .pru_close = sctp_close,
4889 .pru_detach = sctp_close,
4890 .pru_sopoll = sopoll_generic,
4891 .pru_flush = sctp_flush,
4892 .pru_disconnect = sctp_disconnect,
4893 .pru_listen = sctp_listen,
4894 .pru_peeraddr = sctp_peeraddr,
4895 .pru_send = sctp_sendm,
4896 .pru_shutdown = sctp_shutdown,
4897 .pru_sockaddr = sctp_ingetaddr,
4898 .pru_sosend = sctp_sosend,
4899 .pru_soreceive = sctp_soreceive
4900};