1 /*- 2 * Copyright (c) 2001-2007, Cisco Systems, Inc. All rights reserved. 3 * 4 * Redistribution and use in source and binary forms, with or without 5 * modification, are permitted provided that the following conditions are met: 6 * 7 * a) Redistributions of source code must retain the above copyright notice, 8 * this list of conditions and the following disclaimer. 9 * 10 * b) Redistributions in binary form must reproduce the above copyright 11 * notice, this list of conditions and the following disclaimer in 12 * the documentation and/or other materials provided with the distribution. 13 * 14 * c) Neither the name of Cisco Systems, Inc. nor the names of its 15 * contributors may be used to endorse or promote products derived 16 * from this software without specific prior written permission. 17 * 18 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS 19 * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, 20 * THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE 21 * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE 22 * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR 23 * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF 24 * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS 25 * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN 26 * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) 27 * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF 28 * THE POSSIBILITY OF SUCH DAMAGE. 29 */ 30 31 /* $KAME: sctp_output.c,v 1.46 2005/03/06 16:04:17 itojun Exp $ */ 32 33 #include <sys/cdefs.h> 34 __FBSDID("$FreeBSD$"); 35 36 #include <netinet/sctp_os.h> 37 #include <sys/proc.h> 38 #include <netinet/sctp_var.h> 39 #include <netinet/sctp_header.h> 40 #include <netinet/sctp_pcb.h> 41 #include <netinet/sctputil.h> 42 #include <netinet/sctp_output.h> 43 #include <netinet/sctp_uio.h> 44 #include <netinet/sctputil.h> 45 #include <netinet/sctp_auth.h> 46 #include <netinet/sctp_timer.h> 47 #include <netinet/sctp_asconf.h> 48 #include <netinet/sctp_indata.h> 49 #include <netinet/sctp_bsd_addr.h> 50 51 #ifdef SCTP_DEBUG 52 extern uint32_t sctp_debug_on; 53 54 #endif 55 56 57 58 #define SCTP_MAX_GAPS_INARRAY 4 59 struct sack_track { 60 uint8_t right_edge; /* mergable on the right edge */ 61 uint8_t left_edge; /* mergable on the left edge */ 62 uint8_t num_entries; 63 uint8_t spare; 64 struct sctp_gap_ack_block gaps[SCTP_MAX_GAPS_INARRAY]; 65 }; 66 67 struct sack_track sack_array[256] = { 68 {0, 0, 0, 0, /* 0x00 */ 69 {{0, 0}, 70 {0, 0}, 71 {0, 0}, 72 {0, 0} 73 } 74 }, 75 {1, 0, 1, 0, /* 0x01 */ 76 {{0, 0}, 77 {0, 0}, 78 {0, 0}, 79 {0, 0} 80 } 81 }, 82 {0, 0, 1, 0, /* 0x02 */ 83 {{1, 1}, 84 {0, 0}, 85 {0, 0}, 86 {0, 0} 87 } 88 }, 89 {1, 0, 1, 0, /* 0x03 */ 90 {{0, 1}, 91 {0, 0}, 92 {0, 0}, 93 {0, 0} 94 } 95 }, 96 {0, 0, 1, 0, /* 0x04 */ 97 {{2, 2}, 98 {0, 0}, 99 {0, 0}, 100 {0, 0} 101 } 102 }, 103 {1, 0, 2, 0, /* 0x05 */ 104 {{0, 0}, 105 {2, 2}, 106 {0, 0}, 107 {0, 0} 108 } 109 }, 110 {0, 0, 1, 0, /* 0x06 */ 111 {{1, 2}, 112 {0, 0}, 113 {0, 0}, 114 {0, 0} 115 } 116 }, 117 {1, 0, 1, 0, /* 0x07 */ 118 {{0, 2}, 119 {0, 0}, 120 {0, 0}, 121 {0, 0} 122 } 123 }, 124 {0, 0, 1, 0, /* 0x08 */ 125 {{3, 3}, 126 {0, 0}, 127 {0, 0}, 128 {0, 0} 129 } 130 }, 131 {1, 0, 2, 0, /* 0x09 */ 132 {{0, 0}, 133 {3, 3}, 134 {0, 0}, 135 {0, 0} 136 } 137 }, 138 {0, 0, 2, 0, /* 0x0a */ 139 {{1, 1}, 140 {3, 3}, 141 {0, 0}, 142 {0, 0} 143 } 144 }, 145 {1, 0, 2, 0, /* 0x0b */ 146 {{0, 1}, 147 {3, 3}, 148 {0, 0}, 149 {0, 0} 150 } 151 }, 152 {0, 0, 1, 0, /* 0x0c */ 153 {{2, 3}, 154 {0, 0}, 155 {0, 0}, 156 {0, 0} 157 } 158 }, 159 {1, 0, 2, 0, /* 0x0d */ 160 {{0, 0}, 161 {2, 3}, 162 {0, 0}, 163 {0, 0} 164 } 165 }, 166 {0, 0, 1, 0, /* 0x0e */ 167 {{1, 3}, 168 {0, 0}, 169 {0, 0}, 170 {0, 0} 171 } 172 }, 173 {1, 0, 1, 0, /* 0x0f */ 174 {{0, 3}, 175 {0, 0}, 176 {0, 0}, 177 {0, 0} 178 } 179 }, 180 {0, 0, 1, 0, /* 0x10 */ 181 {{4, 4}, 182 {0, 0}, 183 {0, 0}, 184 {0, 0} 185 } 186 }, 187 {1, 0, 2, 0, /* 0x11 */ 188 {{0, 0}, 189 {4, 4}, 190 {0, 0}, 191 {0, 0} 192 } 193 }, 194 {0, 0, 2, 0, /* 0x12 */ 195 {{1, 1}, 196 {4, 4}, 197 {0, 0}, 198 {0, 0} 199 } 200 }, 201 {1, 0, 2, 0, /* 0x13 */ 202 {{0, 1}, 203 {4, 4}, 204 {0, 0}, 205 {0, 0} 206 } 207 }, 208 {0, 0, 2, 0, /* 0x14 */ 209 {{2, 2}, 210 {4, 4}, 211 {0, 0}, 212 {0, 0} 213 } 214 }, 215 {1, 0, 3, 0, /* 0x15 */ 216 {{0, 0}, 217 {2, 2}, 218 {4, 4}, 219 {0, 0} 220 } 221 }, 222 {0, 0, 2, 0, /* 0x16 */ 223 {{1, 2}, 224 {4, 4}, 225 {0, 0}, 226 {0, 0} 227 } 228 }, 229 {1, 0, 2, 0, /* 0x17 */ 230 {{0, 2}, 231 {4, 4}, 232 {0, 0}, 233 {0, 0} 234 } 235 }, 236 {0, 0, 1, 0, /* 0x18 */ 237 {{3, 4}, 238 {0, 0}, 239 {0, 0}, 240 {0, 0} 241 } 242 }, 243 {1, 0, 2, 0, /* 0x19 */ 244 {{0, 0}, 245 {3, 4}, 246 {0, 0}, 247 {0, 0} 248 } 249 }, 250 {0, 0, 2, 0, /* 0x1a */ 251 {{1, 1}, 252 {3, 4}, 253 {0, 0}, 254 {0, 0} 255 } 256 }, 257 {1, 0, 2, 0, /* 0x1b */ 258 {{0, 1}, 259 {3, 4}, 260 {0, 0}, 261 {0, 0} 262 } 263 }, 264 {0, 0, 1, 0, /* 0x1c */ 265 {{2, 4}, 266 {0, 0}, 267 {0, 0}, 268 {0, 0} 269 } 270 }, 271 {1, 0, 2, 0, /* 0x1d */ 272 {{0, 0}, 273 {2, 4}, 274 {0, 0}, 275 {0, 0} 276 } 277 }, 278 {0, 0, 1, 0, /* 0x1e */ 279 {{1, 4}, 280 {0, 0}, 281 {0, 0}, 282 {0, 0} 283 } 284 }, 285 {1, 0, 1, 0, /* 0x1f */ 286 {{0, 4}, 287 {0, 0}, 288 {0, 0}, 289 {0, 0} 290 } 291 }, 292 {0, 0, 1, 0, /* 0x20 */ 293 {{5, 5}, 294 {0, 0}, 295 {0, 0}, 296 {0, 0} 297 } 298 }, 299 {1, 0, 2, 0, /* 0x21 */ 300 {{0, 0}, 301 {5, 5}, 302 {0, 0}, 303 {0, 0} 304 } 305 }, 306 {0, 0, 2, 0, /* 0x22 */ 307 {{1, 1}, 308 {5, 5}, 309 {0, 0}, 310 {0, 0} 311 } 312 }, 313 {1, 0, 2, 0, /* 0x23 */ 314 {{0, 1}, 315 {5, 5}, 316 {0, 0}, 317 {0, 0} 318 } 319 }, 320 {0, 0, 2, 0, /* 0x24 */ 321 {{2, 2}, 322 {5, 5}, 323 {0, 0}, 324 {0, 0} 325 } 326 }, 327 {1, 0, 3, 0, /* 0x25 */ 328 {{0, 0}, 329 {2, 2}, 330 {5, 5}, 331 {0, 0} 332 } 333 }, 334 {0, 0, 2, 0, /* 0x26 */ 335 {{1, 2}, 336 {5, 5}, 337 {0, 0}, 338 {0, 0} 339 } 340 }, 341 {1, 0, 2, 0, /* 0x27 */ 342 {{0, 2}, 343 {5, 5}, 344 {0, 0}, 345 {0, 0} 346 } 347 }, 348 {0, 0, 2, 0, /* 0x28 */ 349 {{3, 3}, 350 {5, 5}, 351 {0, 0}, 352 {0, 0} 353 } 354 }, 355 {1, 0, 3, 0, /* 0x29 */ 356 {{0, 0}, 357 {3, 3}, 358 {5, 5}, 359 {0, 0} 360 } 361 }, 362 {0, 0, 3, 0, /* 0x2a */ 363 {{1, 1}, 364 {3, 3}, 365 {5, 5}, 366 {0, 0} 367 } 368 }, 369 {1, 0, 3, 0, /* 0x2b */ 370 {{0, 1}, 371 {3, 3}, 372 {5, 5}, 373 {0, 0} 374 } 375 }, 376 {0, 0, 2, 0, /* 0x2c */ 377 {{2, 3}, 378 {5, 5}, 379 {0, 0}, 380 {0, 0} 381 } 382 }, 383 {1, 0, 3, 0, /* 0x2d */ 384 {{0, 0}, 385 {2, 3}, 386 {5, 5}, 387 {0, 0} 388 } 389 }, 390 {0, 0, 2, 0, /* 0x2e */ 391 {{1, 3}, 392 {5, 5}, 393 {0, 0}, 394 {0, 0} 395 } 396 }, 397 {1, 0, 2, 0, /* 0x2f */ 398 {{0, 3}, 399 {5, 5}, 400 {0, 0}, 401 {0, 0} 402 } 403 }, 404 {0, 0, 1, 0, /* 0x30 */ 405 {{4, 5}, 406 {0, 0}, 407 {0, 0}, 408 {0, 0} 409 } 410 }, 411 {1, 0, 2, 0, /* 0x31 */ 412 {{0, 0}, 413 {4, 5}, 414 {0, 0}, 415 {0, 0} 416 } 417 }, 418 {0, 0, 2, 0, /* 0x32 */ 419 {{1, 1}, 420 {4, 5}, 421 {0, 0}, 422 {0, 0} 423 } 424 }, 425 {1, 0, 2, 0, /* 0x33 */ 426 {{0, 1}, 427 {4, 5}, 428 {0, 0}, 429 {0, 0} 430 } 431 }, 432 {0, 0, 2, 0, /* 0x34 */ 433 {{2, 2}, 434 {4, 5}, 435 {0, 0}, 436 {0, 0} 437 } 438 }, 439 {1, 0, 3, 0, /* 0x35 */ 440 {{0, 0}, 441 {2, 2}, 442 {4, 5}, 443 {0, 0} 444 } 445 }, 446 {0, 0, 2, 0, /* 0x36 */ 447 {{1, 2}, 448 {4, 5}, 449 {0, 0}, 450 {0, 0} 451 } 452 }, 453 {1, 0, 2, 0, /* 0x37 */ 454 {{0, 2}, 455 {4, 5}, 456 {0, 0}, 457 {0, 0} 458 } 459 }, 460 {0, 0, 1, 0, /* 0x38 */ 461 {{3, 5}, 462 {0, 0}, 463 {0, 0}, 464 {0, 0} 465 } 466 }, 467 {1, 0, 2, 0, /* 0x39 */ 468 {{0, 0}, 469 {3, 5}, 470 {0, 0}, 471 {0, 0} 472 } 473 }, 474 {0, 0, 2, 0, /* 0x3a */ 475 {{1, 1}, 476 {3, 5}, 477 {0, 0}, 478 {0, 0} 479 } 480 }, 481 {1, 0, 2, 0, /* 0x3b */ 482 {{0, 1}, 483 {3, 5}, 484 {0, 0}, 485 {0, 0} 486 } 487 }, 488 {0, 0, 1, 0, /* 0x3c */ 489 {{2, 5}, 490 {0, 0}, 491 {0, 0}, 492 {0, 0} 493 } 494 }, 495 {1, 0, 2, 0, /* 0x3d */ 496 {{0, 0}, 497 {2, 5}, 498 {0, 0}, 499 {0, 0} 500 } 501 }, 502 {0, 0, 1, 0, /* 0x3e */ 503 {{1, 5}, 504 {0, 0}, 505 {0, 0}, 506 {0, 0} 507 } 508 }, 509 {1, 0, 1, 0, /* 0x3f */ 510 {{0, 5}, 511 {0, 0}, 512 {0, 0}, 513 {0, 0} 514 } 515 }, 516 {0, 0, 1, 0, /* 0x40 */ 517 {{6, 6}, 518 {0, 0}, 519 {0, 0}, 520 {0, 0} 521 } 522 }, 523 {1, 0, 2, 0, /* 0x41 */ 524 {{0, 0}, 525 {6, 6}, 526 {0, 0}, 527 {0, 0} 528 } 529 }, 530 {0, 0, 2, 0, /* 0x42 */ 531 {{1, 1}, 532 {6, 6}, 533 {0, 0}, 534 {0, 0} 535 } 536 }, 537 {1, 0, 2, 0, /* 0x43 */ 538 {{0, 1}, 539 {6, 6}, 540 {0, 0}, 541 {0, 0} 542 } 543 }, 544 {0, 0, 2, 0, /* 0x44 */ 545 {{2, 2}, 546 {6, 6}, 547 {0, 0}, 548 {0, 0} 549 } 550 }, 551 {1, 0, 3, 0, /* 0x45 */ 552 {{0, 0}, 553 {2, 2}, 554 {6, 6}, 555 {0, 0} 556 } 557 }, 558 {0, 0, 2, 0, /* 0x46 */ 559 {{1, 2}, 560 {6, 6}, 561 {0, 0}, 562 {0, 0} 563 } 564 }, 565 {1, 0, 2, 0, /* 0x47 */ 566 {{0, 2}, 567 {6, 6}, 568 {0, 0}, 569 {0, 0} 570 } 571 }, 572 {0, 0, 2, 0, /* 0x48 */ 573 {{3, 3}, 574 {6, 6}, 575 {0, 0}, 576 {0, 0} 577 } 578 }, 579 {1, 0, 3, 0, /* 0x49 */ 580 {{0, 0}, 581 {3, 3}, 582 {6, 6}, 583 {0, 0} 584 } 585 }, 586 {0, 0, 3, 0, /* 0x4a */ 587 {{1, 1}, 588 {3, 3}, 589 {6, 6}, 590 {0, 0} 591 } 592 }, 593 {1, 0, 3, 0, /* 0x4b */ 594 {{0, 1}, 595 {3, 3}, 596 {6, 6}, 597 {0, 0} 598 } 599 }, 600 {0, 0, 2, 0, /* 0x4c */ 601 {{2, 3}, 602 {6, 6}, 603 {0, 0}, 604 {0, 0} 605 } 606 }, 607 {1, 0, 3, 0, /* 0x4d */ 608 {{0, 0}, 609 {2, 3}, 610 {6, 6}, 611 {0, 0} 612 } 613 }, 614 {0, 0, 2, 0, /* 0x4e */ 615 {{1, 3}, 616 {6, 6}, 617 {0, 0}, 618 {0, 0} 619 } 620 }, 621 {1, 0, 2, 0, /* 0x4f */ 622 {{0, 3}, 623 {6, 6}, 624 {0, 0}, 625 {0, 0} 626 } 627 }, 628 {0, 0, 2, 0, /* 0x50 */ 629 {{4, 4}, 630 {6, 6}, 631 {0, 0}, 632 {0, 0} 633 } 634 }, 635 {1, 0, 3, 0, /* 0x51 */ 636 {{0, 0}, 637 {4, 4}, 638 {6, 6}, 639 {0, 0} 640 } 641 }, 642 {0, 0, 3, 0, /* 0x52 */ 643 {{1, 1}, 644 {4, 4}, 645 {6, 6}, 646 {0, 0} 647 } 648 }, 649 {1, 0, 3, 0, /* 0x53 */ 650 {{0, 1}, 651 {4, 4}, 652 {6, 6}, 653 {0, 0} 654 } 655 }, 656 {0, 0, 3, 0, /* 0x54 */ 657 {{2, 2}, 658 {4, 4}, 659 {6, 6}, 660 {0, 0} 661 } 662 }, 663 {1, 0, 4, 0, /* 0x55 */ 664 {{0, 0}, 665 {2, 2}, 666 {4, 4}, 667 {6, 6} 668 } 669 }, 670 {0, 0, 3, 0, /* 0x56 */ 671 {{1, 2}, 672 {4, 4}, 673 {6, 6}, 674 {0, 0} 675 } 676 }, 677 {1, 0, 3, 0, /* 0x57 */ 678 {{0, 2}, 679 {4, 4}, 680 {6, 6}, 681 {0, 0} 682 } 683 }, 684 {0, 0, 2, 0, /* 0x58 */ 685 {{3, 4}, 686 {6, 6}, 687 {0, 0}, 688 {0, 0} 689 } 690 }, 691 {1, 0, 3, 0, /* 0x59 */ 692 {{0, 0}, 693 {3, 4}, 694 {6, 6}, 695 {0, 0} 696 } 697 }, 698 {0, 0, 3, 0, /* 0x5a */ 699 {{1, 1}, 700 {3, 4}, 701 {6, 6}, 702 {0, 0} 703 } 704 }, 705 {1, 0, 3, 0, /* 0x5b */ 706 {{0, 1}, 707 {3, 4}, 708 {6, 6}, 709 {0, 0} 710 } 711 }, 712 {0, 0, 2, 0, /* 0x5c */ 713 {{2, 4}, 714 {6, 6}, 715 {0, 0}, 716 {0, 0} 717 } 718 }, 719 {1, 0, 3, 0, /* 0x5d */ 720 {{0, 0}, 721 {2, 4}, 722 {6, 6}, 723 {0, 0} 724 } 725 }, 726 {0, 0, 2, 0, /* 0x5e */ 727 {{1, 4}, 728 {6, 6}, 729 {0, 0}, 730 {0, 0} 731 } 732 }, 733 {1, 0, 2, 0, /* 0x5f */ 734 {{0, 4}, 735 {6, 6}, 736 {0, 0}, 737 {0, 0} 738 } 739 }, 740 {0, 0, 1, 0, /* 0x60 */ 741 {{5, 6}, 742 {0, 0}, 743 {0, 0}, 744 {0, 0} 745 } 746 }, 747 {1, 0, 2, 0, /* 0x61 */ 748 {{0, 0}, 749 {5, 6}, 750 {0, 0}, 751 {0, 0} 752 } 753 }, 754 {0, 0, 2, 0, /* 0x62 */ 755 {{1, 1}, 756 {5, 6}, 757 {0, 0}, 758 {0, 0} 759 } 760 }, 761 {1, 0, 2, 0, /* 0x63 */ 762 {{0, 1}, 763 {5, 6}, 764 {0, 0}, 765 {0, 0} 766 } 767 }, 768 {0, 0, 2, 0, /* 0x64 */ 769 {{2, 2}, 770 {5, 6}, 771 {0, 0}, 772 {0, 0} 773 } 774 }, 775 {1, 0, 3, 0, /* 0x65 */ 776 {{0, 0}, 777 {2, 2}, 778 {5, 6}, 779 {0, 0} 780 } 781 }, 782 {0, 0, 2, 0, /* 0x66 */ 783 {{1, 2}, 784 {5, 6}, 785 {0, 0}, 786 {0, 0} 787 } 788 }, 789 {1, 0, 2, 0, /* 0x67 */ 790 {{0, 2}, 791 {5, 6}, 792 {0, 0}, 793 {0, 0} 794 } 795 }, 796 {0, 0, 2, 0, /* 0x68 */ 797 {{3, 3}, 798 {5, 6}, 799 {0, 0}, 800 {0, 0} 801 } 802 }, 803 {1, 0, 3, 0, /* 0x69 */ 804 {{0, 0}, 805 {3, 3}, 806 {5, 6}, 807 {0, 0} 808 } 809 }, 810 {0, 0, 3, 0, /* 0x6a */ 811 {{1, 1}, 812 {3, 3}, 813 {5, 6}, 814 {0, 0} 815 } 816 }, 817 {1, 0, 3, 0, /* 0x6b */ 818 {{0, 1}, 819 {3, 3}, 820 {5, 6}, 821 {0, 0} 822 } 823 }, 824 {0, 0, 2, 0, /* 0x6c */ 825 {{2, 3}, 826 {5, 6}, 827 {0, 0}, 828 {0, 0} 829 } 830 }, 831 {1, 0, 3, 0, /* 0x6d */ 832 {{0, 0}, 833 {2, 3}, 834 {5, 6}, 835 {0, 0} 836 } 837 }, 838 {0, 0, 2, 0, /* 0x6e */ 839 {{1, 3}, 840 {5, 6}, 841 {0, 0}, 842 {0, 0} 843 } 844 }, 845 {1, 0, 2, 0, /* 0x6f */ 846 {{0, 3}, 847 {5, 6}, 848 {0, 0}, 849 {0, 0} 850 } 851 }, 852 {0, 0, 1, 0, /* 0x70 */ 853 {{4, 6}, 854 {0, 0}, 855 {0, 0}, 856 {0, 0} 857 } 858 }, 859 {1, 0, 2, 0, /* 0x71 */ 860 {{0, 0}, 861 {4, 6}, 862 {0, 0}, 863 {0, 0} 864 } 865 }, 866 {0, 0, 2, 0, /* 0x72 */ 867 {{1, 1}, 868 {4, 6}, 869 {0, 0}, 870 {0, 0} 871 } 872 }, 873 {1, 0, 2, 0, /* 0x73 */ 874 {{0, 1}, 875 {4, 6}, 876 {0, 0}, 877 {0, 0} 878 } 879 }, 880 {0, 0, 2, 0, /* 0x74 */ 881 {{2, 2}, 882 {4, 6}, 883 {0, 0}, 884 {0, 0} 885 } 886 }, 887 {1, 0, 3, 0, /* 0x75 */ 888 {{0, 0}, 889 {2, 2}, 890 {4, 6}, 891 {0, 0} 892 } 893 }, 894 {0, 0, 2, 0, /* 0x76 */ 895 {{1, 2}, 896 {4, 6}, 897 {0, 0}, 898 {0, 0} 899 } 900 }, 901 {1, 0, 2, 0, /* 0x77 */ 902 {{0, 2}, 903 {4, 6}, 904 {0, 0}, 905 {0, 0} 906 } 907 }, 908 {0, 0, 1, 0, /* 0x78 */ 909 {{3, 6}, 910 {0, 0}, 911 {0, 0}, 912 {0, 0} 913 } 914 }, 915 {1, 0, 2, 0, /* 0x79 */ 916 {{0, 0}, 917 {3, 6}, 918 {0, 0}, 919 {0, 0} 920 } 921 }, 922 {0, 0, 2, 0, /* 0x7a */ 923 {{1, 1}, 924 {3, 6}, 925 {0, 0}, 926 {0, 0} 927 } 928 }, 929 {1, 0, 2, 0, /* 0x7b */ 930 {{0, 1}, 931 {3, 6}, 932 {0, 0}, 933 {0, 0} 934 } 935 }, 936 {0, 0, 1, 0, /* 0x7c */ 937 {{2, 6}, 938 {0, 0}, 939 {0, 0}, 940 {0, 0} 941 } 942 }, 943 {1, 0, 2, 0, /* 0x7d */ 944 {{0, 0}, 945 {2, 6}, 946 {0, 0}, 947 {0, 0} 948 } 949 }, 950 {0, 0, 1, 0, /* 0x7e */ 951 {{1, 6}, 952 {0, 0}, 953 {0, 0}, 954 {0, 0} 955 } 956 }, 957 {1, 0, 1, 0, /* 0x7f */ 958 {{0, 6}, 959 {0, 0}, 960 {0, 0}, 961 {0, 0} 962 } 963 }, 964 {0, 1, 1, 0, /* 0x80 */ 965 {{7, 7}, 966 {0, 0}, 967 {0, 0}, 968 {0, 0} 969 } 970 }, 971 {1, 1, 2, 0, /* 0x81 */ 972 {{0, 0}, 973 {7, 7}, 974 {0, 0}, 975 {0, 0} 976 } 977 }, 978 {0, 1, 2, 0, /* 0x82 */ 979 {{1, 1}, 980 {7, 7}, 981 {0, 0}, 982 {0, 0} 983 } 984 }, 985 {1, 1, 2, 0, /* 0x83 */ 986 {{0, 1}, 987 {7, 7}, 988 {0, 0}, 989 {0, 0} 990 } 991 }, 992 {0, 1, 2, 0, /* 0x84 */ 993 {{2, 2}, 994 {7, 7}, 995 {0, 0}, 996 {0, 0} 997 } 998 }, 999 {1, 1, 3, 0, /* 0x85 */ 1000 {{0, 0}, 1001 {2, 2}, 1002 {7, 7}, 1003 {0, 0} 1004 } 1005 }, 1006 {0, 1, 2, 0, /* 0x86 */ 1007 {{1, 2}, 1008 {7, 7}, 1009 {0, 0}, 1010 {0, 0} 1011 } 1012 }, 1013 {1, 1, 2, 0, /* 0x87 */ 1014 {{0, 2}, 1015 {7, 7}, 1016 {0, 0}, 1017 {0, 0} 1018 } 1019 }, 1020 {0, 1, 2, 0, /* 0x88 */ 1021 {{3, 3}, 1022 {7, 7}, 1023 {0, 0}, 1024 {0, 0} 1025 } 1026 }, 1027 {1, 1, 3, 0, /* 0x89 */ 1028 {{0, 0}, 1029 {3, 3}, 1030 {7, 7}, 1031 {0, 0} 1032 } 1033 }, 1034 {0, 1, 3, 0, /* 0x8a */ 1035 {{1, 1}, 1036 {3, 3}, 1037 {7, 7}, 1038 {0, 0} 1039 } 1040 }, 1041 {1, 1, 3, 0, /* 0x8b */ 1042 {{0, 1}, 1043 {3, 3}, 1044 {7, 7}, 1045 {0, 0} 1046 } 1047 }, 1048 {0, 1, 2, 0, /* 0x8c */ 1049 {{2, 3}, 1050 {7, 7}, 1051 {0, 0}, 1052 {0, 0} 1053 } 1054 }, 1055 {1, 1, 3, 0, /* 0x8d */ 1056 {{0, 0}, 1057 {2, 3}, 1058 {7, 7}, 1059 {0, 0} 1060 } 1061 }, 1062 {0, 1, 2, 0, /* 0x8e */ 1063 {{1, 3}, 1064 {7, 7}, 1065 {0, 0}, 1066 {0, 0} 1067 } 1068 }, 1069 {1, 1, 2, 0, /* 0x8f */ 1070 {{0, 3}, 1071 {7, 7}, 1072 {0, 0}, 1073 {0, 0} 1074 } 1075 }, 1076 {0, 1, 2, 0, /* 0x90 */ 1077 {{4, 4}, 1078 {7, 7}, 1079 {0, 0}, 1080 {0, 0} 1081 } 1082 }, 1083 {1, 1, 3, 0, /* 0x91 */ 1084 {{0, 0}, 1085 {4, 4}, 1086 {7, 7}, 1087 {0, 0} 1088 } 1089 }, 1090 {0, 1, 3, 0, /* 0x92 */ 1091 {{1, 1}, 1092 {4, 4}, 1093 {7, 7}, 1094 {0, 0} 1095 } 1096 }, 1097 {1, 1, 3, 0, /* 0x93 */ 1098 {{0, 1}, 1099 {4, 4}, 1100 {7, 7}, 1101 {0, 0} 1102 } 1103 }, 1104 {0, 1, 3, 0, /* 0x94 */ 1105 {{2, 2}, 1106 {4, 4}, 1107 {7, 7}, 1108 {0, 0} 1109 } 1110 }, 1111 {1, 1, 4, 0, /* 0x95 */ 1112 {{0, 0}, 1113 {2, 2}, 1114 {4, 4}, 1115 {7, 7} 1116 } 1117 }, 1118 {0, 1, 3, 0, /* 0x96 */ 1119 {{1, 2}, 1120 {4, 4}, 1121 {7, 7}, 1122 {0, 0} 1123 } 1124 }, 1125 {1, 1, 3, 0, /* 0x97 */ 1126 {{0, 2}, 1127 {4, 4}, 1128 {7, 7}, 1129 {0, 0} 1130 } 1131 }, 1132 {0, 1, 2, 0, /* 0x98 */ 1133 {{3, 4}, 1134 {7, 7}, 1135 {0, 0}, 1136 {0, 0} 1137 } 1138 }, 1139 {1, 1, 3, 0, /* 0x99 */ 1140 {{0, 0}, 1141 {3, 4}, 1142 {7, 7}, 1143 {0, 0} 1144 } 1145 }, 1146 {0, 1, 3, 0, /* 0x9a */ 1147 {{1, 1}, 1148 {3, 4}, 1149 {7, 7}, 1150 {0, 0} 1151 } 1152 }, 1153 {1, 1, 3, 0, /* 0x9b */ 1154 {{0, 1}, 1155 {3, 4}, 1156 {7, 7}, 1157 {0, 0} 1158 } 1159 }, 1160 {0, 1, 2, 0, /* 0x9c */ 1161 {{2, 4}, 1162 {7, 7}, 1163 {0, 0}, 1164 {0, 0} 1165 } 1166 }, 1167 {1, 1, 3, 0, /* 0x9d */ 1168 {{0, 0}, 1169 {2, 4}, 1170 {7, 7}, 1171 {0, 0} 1172 } 1173 }, 1174 {0, 1, 2, 0, /* 0x9e */ 1175 {{1, 4}, 1176 {7, 7}, 1177 {0, 0}, 1178 {0, 0} 1179 } 1180 }, 1181 {1, 1, 2, 0, /* 0x9f */ 1182 {{0, 4}, 1183 {7, 7}, 1184 {0, 0}, 1185 {0, 0} 1186 } 1187 }, 1188 {0, 1, 2, 0, /* 0xa0 */ 1189 {{5, 5}, 1190 {7, 7}, 1191 {0, 0}, 1192 {0, 0} 1193 } 1194 }, 1195 {1, 1, 3, 0, /* 0xa1 */ 1196 {{0, 0}, 1197 {5, 5}, 1198 {7, 7}, 1199 {0, 0} 1200 } 1201 }, 1202 {0, 1, 3, 0, /* 0xa2 */ 1203 {{1, 1}, 1204 {5, 5}, 1205 {7, 7}, 1206 {0, 0} 1207 } 1208 }, 1209 {1, 1, 3, 0, /* 0xa3 */ 1210 {{0, 1}, 1211 {5, 5}, 1212 {7, 7}, 1213 {0, 0} 1214 } 1215 }, 1216 {0, 1, 3, 0, /* 0xa4 */ 1217 {{2, 2}, 1218 {5, 5}, 1219 {7, 7}, 1220 {0, 0} 1221 } 1222 }, 1223 {1, 1, 4, 0, /* 0xa5 */ 1224 {{0, 0}, 1225 {2, 2}, 1226 {5, 5}, 1227 {7, 7} 1228 } 1229 }, 1230 {0, 1, 3, 0, /* 0xa6 */ 1231 {{1, 2}, 1232 {5, 5}, 1233 {7, 7}, 1234 {0, 0} 1235 } 1236 }, 1237 {1, 1, 3, 0, /* 0xa7 */ 1238 {{0, 2}, 1239 {5, 5}, 1240 {7, 7}, 1241 {0, 0} 1242 } 1243 }, 1244 {0, 1, 3, 0, /* 0xa8 */ 1245 {{3, 3}, 1246 {5, 5}, 1247 {7, 7}, 1248 {0, 0} 1249 } 1250 }, 1251 {1, 1, 4, 0, /* 0xa9 */ 1252 {{0, 0}, 1253 {3, 3}, 1254 {5, 5}, 1255 {7, 7} 1256 } 1257 }, 1258 {0, 1, 4, 0, /* 0xaa */ 1259 {{1, 1}, 1260 {3, 3}, 1261 {5, 5}, 1262 {7, 7} 1263 } 1264 }, 1265 {1, 1, 4, 0, /* 0xab */ 1266 {{0, 1}, 1267 {3, 3}, 1268 {5, 5}, 1269 {7, 7} 1270 } 1271 }, 1272 {0, 1, 3, 0, /* 0xac */ 1273 {{2, 3}, 1274 {5, 5}, 1275 {7, 7}, 1276 {0, 0} 1277 } 1278 }, 1279 {1, 1, 4, 0, /* 0xad */ 1280 {{0, 0}, 1281 {2, 3}, 1282 {5, 5}, 1283 {7, 7} 1284 } 1285 }, 1286 {0, 1, 3, 0, /* 0xae */ 1287 {{1, 3}, 1288 {5, 5}, 1289 {7, 7}, 1290 {0, 0} 1291 } 1292 }, 1293 {1, 1, 3, 0, /* 0xaf */ 1294 {{0, 3}, 1295 {5, 5}, 1296 {7, 7}, 1297 {0, 0} 1298 } 1299 }, 1300 {0, 1, 2, 0, /* 0xb0 */ 1301 {{4, 5}, 1302 {7, 7}, 1303 {0, 0}, 1304 {0, 0} 1305 } 1306 }, 1307 {1, 1, 3, 0, /* 0xb1 */ 1308 {{0, 0}, 1309 {4, 5}, 1310 {7, 7}, 1311 {0, 0} 1312 } 1313 }, 1314 {0, 1, 3, 0, /* 0xb2 */ 1315 {{1, 1}, 1316 {4, 5}, 1317 {7, 7}, 1318 {0, 0} 1319 } 1320 }, 1321 {1, 1, 3, 0, /* 0xb3 */ 1322 {{0, 1}, 1323 {4, 5}, 1324 {7, 7}, 1325 {0, 0} 1326 } 1327 }, 1328 {0, 1, 3, 0, /* 0xb4 */ 1329 {{2, 2}, 1330 {4, 5}, 1331 {7, 7}, 1332 {0, 0} 1333 } 1334 }, 1335 {1, 1, 4, 0, /* 0xb5 */ 1336 {{0, 0}, 1337 {2, 2}, 1338 {4, 5}, 1339 {7, 7} 1340 } 1341 }, 1342 {0, 1, 3, 0, /* 0xb6 */ 1343 {{1, 2}, 1344 {4, 5}, 1345 {7, 7}, 1346 {0, 0} 1347 } 1348 }, 1349 {1, 1, 3, 0, /* 0xb7 */ 1350 {{0, 2}, 1351 {4, 5}, 1352 {7, 7}, 1353 {0, 0} 1354 } 1355 }, 1356 {0, 1, 2, 0, /* 0xb8 */ 1357 {{3, 5}, 1358 {7, 7}, 1359 {0, 0}, 1360 {0, 0} 1361 } 1362 }, 1363 {1, 1, 3, 0, /* 0xb9 */ 1364 {{0, 0}, 1365 {3, 5}, 1366 {7, 7}, 1367 {0, 0} 1368 } 1369 }, 1370 {0, 1, 3, 0, /* 0xba */ 1371 {{1, 1}, 1372 {3, 5}, 1373 {7, 7}, 1374 {0, 0} 1375 } 1376 }, 1377 {1, 1, 3, 0, /* 0xbb */ 1378 {{0, 1}, 1379 {3, 5}, 1380 {7, 7}, 1381 {0, 0} 1382 } 1383 }, 1384 {0, 1, 2, 0, /* 0xbc */ 1385 {{2, 5}, 1386 {7, 7}, 1387 {0, 0}, 1388 {0, 0} 1389 } 1390 }, 1391 {1, 1, 3, 0, /* 0xbd */ 1392 {{0, 0}, 1393 {2, 5}, 1394 {7, 7}, 1395 {0, 0} 1396 } 1397 }, 1398 {0, 1, 2, 0, /* 0xbe */ 1399 {{1, 5}, 1400 {7, 7}, 1401 {0, 0}, 1402 {0, 0} 1403 } 1404 }, 1405 {1, 1, 2, 0, /* 0xbf */ 1406 {{0, 5}, 1407 {7, 7}, 1408 {0, 0}, 1409 {0, 0} 1410 } 1411 }, 1412 {0, 1, 1, 0, /* 0xc0 */ 1413 {{6, 7}, 1414 {0, 0}, 1415 {0, 0}, 1416 {0, 0} 1417 } 1418 }, 1419 {1, 1, 2, 0, /* 0xc1 */ 1420 {{0, 0}, 1421 {6, 7}, 1422 {0, 0}, 1423 {0, 0} 1424 } 1425 }, 1426 {0, 1, 2, 0, /* 0xc2 */ 1427 {{1, 1}, 1428 {6, 7}, 1429 {0, 0}, 1430 {0, 0} 1431 } 1432 }, 1433 {1, 1, 2, 0, /* 0xc3 */ 1434 {{0, 1}, 1435 {6, 7}, 1436 {0, 0}, 1437 {0, 0} 1438 } 1439 }, 1440 {0, 1, 2, 0, /* 0xc4 */ 1441 {{2, 2}, 1442 {6, 7}, 1443 {0, 0}, 1444 {0, 0} 1445 } 1446 }, 1447 {1, 1, 3, 0, /* 0xc5 */ 1448 {{0, 0}, 1449 {2, 2}, 1450 {6, 7}, 1451 {0, 0} 1452 } 1453 }, 1454 {0, 1, 2, 0, /* 0xc6 */ 1455 {{1, 2}, 1456 {6, 7}, 1457 {0, 0}, 1458 {0, 0} 1459 } 1460 }, 1461 {1, 1, 2, 0, /* 0xc7 */ 1462 {{0, 2}, 1463 {6, 7}, 1464 {0, 0}, 1465 {0, 0} 1466 } 1467 }, 1468 {0, 1, 2, 0, /* 0xc8 */ 1469 {{3, 3}, 1470 {6, 7}, 1471 {0, 0}, 1472 {0, 0} 1473 } 1474 }, 1475 {1, 1, 3, 0, /* 0xc9 */ 1476 {{0, 0}, 1477 {3, 3}, 1478 {6, 7}, 1479 {0, 0} 1480 } 1481 }, 1482 {0, 1, 3, 0, /* 0xca */ 1483 {{1, 1}, 1484 {3, 3}, 1485 {6, 7}, 1486 {0, 0} 1487 } 1488 }, 1489 {1, 1, 3, 0, /* 0xcb */ 1490 {{0, 1}, 1491 {3, 3}, 1492 {6, 7}, 1493 {0, 0} 1494 } 1495 }, 1496 {0, 1, 2, 0, /* 0xcc */ 1497 {{2, 3}, 1498 {6, 7}, 1499 {0, 0}, 1500 {0, 0} 1501 } 1502 }, 1503 {1, 1, 3, 0, /* 0xcd */ 1504 {{0, 0}, 1505 {2, 3}, 1506 {6, 7}, 1507 {0, 0} 1508 } 1509 }, 1510 {0, 1, 2, 0, /* 0xce */ 1511 {{1, 3}, 1512 {6, 7}, 1513 {0, 0}, 1514 {0, 0} 1515 } 1516 }, 1517 {1, 1, 2, 0, /* 0xcf */ 1518 {{0, 3}, 1519 {6, 7}, 1520 {0, 0}, 1521 {0, 0} 1522 } 1523 }, 1524 {0, 1, 2, 0, /* 0xd0 */ 1525 {{4, 4}, 1526 {6, 7}, 1527 {0, 0}, 1528 {0, 0} 1529 } 1530 }, 1531 {1, 1, 3, 0, /* 0xd1 */ 1532 {{0, 0}, 1533 {4, 4}, 1534 {6, 7}, 1535 {0, 0} 1536 } 1537 }, 1538 {0, 1, 3, 0, /* 0xd2 */ 1539 {{1, 1}, 1540 {4, 4}, 1541 {6, 7}, 1542 {0, 0} 1543 } 1544 }, 1545 {1, 1, 3, 0, /* 0xd3 */ 1546 {{0, 1}, 1547 {4, 4}, 1548 {6, 7}, 1549 {0, 0} 1550 } 1551 }, 1552 {0, 1, 3, 0, /* 0xd4 */ 1553 {{2, 2}, 1554 {4, 4}, 1555 {6, 7}, 1556 {0, 0} 1557 } 1558 }, 1559 {1, 1, 4, 0, /* 0xd5 */ 1560 {{0, 0}, 1561 {2, 2}, 1562 {4, 4}, 1563 {6, 7} 1564 } 1565 }, 1566 {0, 1, 3, 0, /* 0xd6 */ 1567 {{1, 2}, 1568 {4, 4}, 1569 {6, 7}, 1570 {0, 0} 1571 } 1572 }, 1573 {1, 1, 3, 0, /* 0xd7 */ 1574 {{0, 2}, 1575 {4, 4}, 1576 {6, 7}, 1577 {0, 0} 1578 } 1579 }, 1580 {0, 1, 2, 0, /* 0xd8 */ 1581 {{3, 4}, 1582 {6, 7}, 1583 {0, 0}, 1584 {0, 0} 1585 } 1586 }, 1587 {1, 1, 3, 0, /* 0xd9 */ 1588 {{0, 0}, 1589 {3, 4}, 1590 {6, 7}, 1591 {0, 0} 1592 } 1593 }, 1594 {0, 1, 3, 0, /* 0xda */ 1595 {{1, 1}, 1596 {3, 4}, 1597 {6, 7}, 1598 {0, 0} 1599 } 1600 }, 1601 {1, 1, 3, 0, /* 0xdb */ 1602 {{0, 1}, 1603 {3, 4}, 1604 {6, 7}, 1605 {0, 0} 1606 } 1607 }, 1608 {0, 1, 2, 0, /* 0xdc */ 1609 {{2, 4}, 1610 {6, 7}, 1611 {0, 0}, 1612 {0, 0} 1613 } 1614 }, 1615 {1, 1, 3, 0, /* 0xdd */ 1616 {{0, 0}, 1617 {2, 4}, 1618 {6, 7}, 1619 {0, 0} 1620 } 1621 }, 1622 {0, 1, 2, 0, /* 0xde */ 1623 {{1, 4}, 1624 {6, 7}, 1625 {0, 0}, 1626 {0, 0} 1627 } 1628 }, 1629 {1, 1, 2, 0, /* 0xdf */ 1630 {{0, 4}, 1631 {6, 7}, 1632 {0, 0}, 1633 {0, 0} 1634 } 1635 }, 1636 {0, 1, 1, 0, /* 0xe0 */ 1637 {{5, 7}, 1638 {0, 0}, 1639 {0, 0}, 1640 {0, 0} 1641 } 1642 }, 1643 {1, 1, 2, 0, /* 0xe1 */ 1644 {{0, 0}, 1645 {5, 7}, 1646 {0, 0}, 1647 {0, 0} 1648 } 1649 }, 1650 {0, 1, 2, 0, /* 0xe2 */ 1651 {{1, 1}, 1652 {5, 7}, 1653 {0, 0}, 1654 {0, 0} 1655 } 1656 }, 1657 {1, 1, 2, 0, /* 0xe3 */ 1658 {{0, 1}, 1659 {5, 7}, 1660 {0, 0}, 1661 {0, 0} 1662 } 1663 }, 1664 {0, 1, 2, 0, /* 0xe4 */ 1665 {{2, 2}, 1666 {5, 7}, 1667 {0, 0}, 1668 {0, 0} 1669 } 1670 }, 1671 {1, 1, 3, 0, /* 0xe5 */ 1672 {{0, 0}, 1673 {2, 2}, 1674 {5, 7}, 1675 {0, 0} 1676 } 1677 }, 1678 {0, 1, 2, 0, /* 0xe6 */ 1679 {{1, 2}, 1680 {5, 7}, 1681 {0, 0}, 1682 {0, 0} 1683 } 1684 }, 1685 {1, 1, 2, 0, /* 0xe7 */ 1686 {{0, 2}, 1687 {5, 7}, 1688 {0, 0}, 1689 {0, 0} 1690 } 1691 }, 1692 {0, 1, 2, 0, /* 0xe8 */ 1693 {{3, 3}, 1694 {5, 7}, 1695 {0, 0}, 1696 {0, 0} 1697 } 1698 }, 1699 {1, 1, 3, 0, /* 0xe9 */ 1700 {{0, 0}, 1701 {3, 3}, 1702 {5, 7}, 1703 {0, 0} 1704 } 1705 }, 1706 {0, 1, 3, 0, /* 0xea */ 1707 {{1, 1}, 1708 {3, 3}, 1709 {5, 7}, 1710 {0, 0} 1711 } 1712 }, 1713 {1, 1, 3, 0, /* 0xeb */ 1714 {{0, 1}, 1715 {3, 3}, 1716 {5, 7}, 1717 {0, 0} 1718 } 1719 }, 1720 {0, 1, 2, 0, /* 0xec */ 1721 {{2, 3}, 1722 {5, 7}, 1723 {0, 0}, 1724 {0, 0} 1725 } 1726 }, 1727 {1, 1, 3, 0, /* 0xed */ 1728 {{0, 0}, 1729 {2, 3}, 1730 {5, 7}, 1731 {0, 0} 1732 } 1733 }, 1734 {0, 1, 2, 0, /* 0xee */ 1735 {{1, 3}, 1736 {5, 7}, 1737 {0, 0}, 1738 {0, 0} 1739 } 1740 }, 1741 {1, 1, 2, 0, /* 0xef */ 1742 {{0, 3}, 1743 {5, 7}, 1744 {0, 0}, 1745 {0, 0} 1746 } 1747 }, 1748 {0, 1, 1, 0, /* 0xf0 */ 1749 {{4, 7}, 1750 {0, 0}, 1751 {0, 0}, 1752 {0, 0} 1753 } 1754 }, 1755 {1, 1, 2, 0, /* 0xf1 */ 1756 {{0, 0}, 1757 {4, 7}, 1758 {0, 0}, 1759 {0, 0} 1760 } 1761 }, 1762 {0, 1, 2, 0, /* 0xf2 */ 1763 {{1, 1}, 1764 {4, 7}, 1765 {0, 0}, 1766 {0, 0} 1767 } 1768 }, 1769 {1, 1, 2, 0, /* 0xf3 */ 1770 {{0, 1}, 1771 {4, 7}, 1772 {0, 0}, 1773 {0, 0} 1774 } 1775 }, 1776 {0, 1, 2, 0, /* 0xf4 */ 1777 {{2, 2}, 1778 {4, 7}, 1779 {0, 0}, 1780 {0, 0} 1781 } 1782 }, 1783 {1, 1, 3, 0, /* 0xf5 */ 1784 {{0, 0}, 1785 {2, 2}, 1786 {4, 7}, 1787 {0, 0} 1788 } 1789 }, 1790 {0, 1, 2, 0, /* 0xf6 */ 1791 {{1, 2}, 1792 {4, 7}, 1793 {0, 0}, 1794 {0, 0} 1795 } 1796 }, 1797 {1, 1, 2, 0, /* 0xf7 */ 1798 {{0, 2}, 1799 {4, 7}, 1800 {0, 0}, 1801 {0, 0} 1802 } 1803 }, 1804 {0, 1, 1, 0, /* 0xf8 */ 1805 {{3, 7}, 1806 {0, 0}, 1807 {0, 0}, 1808 {0, 0} 1809 } 1810 }, 1811 {1, 1, 2, 0, /* 0xf9 */ 1812 {{0, 0}, 1813 {3, 7}, 1814 {0, 0}, 1815 {0, 0} 1816 } 1817 }, 1818 {0, 1, 2, 0, /* 0xfa */ 1819 {{1, 1}, 1820 {3, 7}, 1821 {0, 0}, 1822 {0, 0} 1823 } 1824 }, 1825 {1, 1, 2, 0, /* 0xfb */ 1826 {{0, 1}, 1827 {3, 7}, 1828 {0, 0}, 1829 {0, 0} 1830 } 1831 }, 1832 {0, 1, 1, 0, /* 0xfc */ 1833 {{2, 7}, 1834 {0, 0}, 1835 {0, 0}, 1836 {0, 0} 1837 } 1838 }, 1839 {1, 1, 2, 0, /* 0xfd */ 1840 {{0, 0}, 1841 {2, 7}, 1842 {0, 0}, 1843 {0, 0} 1844 } 1845 }, 1846 {0, 1, 1, 0, /* 0xfe */ 1847 {{1, 7}, 1848 {0, 0}, 1849 {0, 0}, 1850 {0, 0} 1851 } 1852 }, 1853 {1, 1, 1, 0, /* 0xff */ 1854 {{0, 7}, 1855 {0, 0}, 1856 {0, 0}, 1857 {0, 0} 1858 } 1859 } 1860 }; 1861 1862 1863 1864 1865 extern int sctp_peer_chunk_oh; 1866 1867 static int 1868 sctp_find_cmsg(int c_type, void *data, struct mbuf *control, int cpsize) 1869 { 1870 struct cmsghdr cmh; 1871 int tlen, at; 1872 1873 tlen = SCTP_BUF_LEN(control); 1874 at = 0; 1875 /* 1876 * Independent of how many mbufs, find the c_type inside the control 1877 * structure and copy out the data. 1878 */ 1879 while (at < tlen) { 1880 if ((tlen - at) < (int)CMSG_ALIGN(sizeof(cmh))) { 1881 /* not enough room for one more we are done. */ 1882 return (0); 1883 } 1884 m_copydata(control, at, sizeof(cmh), (caddr_t)&cmh); 1885 if ((cmh.cmsg_len + at) > tlen) { 1886 /* 1887 * this is real messed up since there is not enough 1888 * data here to cover the cmsg header. We are done. 1889 */ 1890 return (0); 1891 } 1892 if ((cmh.cmsg_level == IPPROTO_SCTP) && 1893 (c_type == cmh.cmsg_type)) { 1894 /* found the one we want, copy it out */ 1895 at += CMSG_ALIGN(sizeof(struct cmsghdr)); 1896 if ((int)(cmh.cmsg_len - CMSG_ALIGN(sizeof(struct cmsghdr))) < cpsize) { 1897 /* 1898 * space of cmsg_len after header not big 1899 * enough 1900 */ 1901 return (0); 1902 } 1903 m_copydata(control, at, cpsize, data); 1904 return (1); 1905 } else { 1906 at += CMSG_ALIGN(cmh.cmsg_len); 1907 if (cmh.cmsg_len == 0) { 1908 break; 1909 } 1910 } 1911 } 1912 /* not found */ 1913 return (0); 1914 } 1915 1916 1917 extern int sctp_mbuf_threshold_count; 1918 1919 1920 __inline struct mbuf * 1921 sctp_get_mbuf_for_msg(unsigned int space_needed, int want_header, 1922 int how, int allonebuf, int type) 1923 { 1924 struct mbuf *m = NULL; 1925 int aloc_size; 1926 int index = 0; 1927 int mbuf_threshold; 1928 1929 if (want_header) { 1930 MGETHDR(m, how, type); 1931 } else { 1932 MGET(m, how, type); 1933 } 1934 if (m == NULL) { 1935 return (NULL); 1936 } 1937 if (allonebuf == 0) 1938 mbuf_threshold = sctp_mbuf_threshold_count; 1939 else 1940 mbuf_threshold = 1; 1941 1942 1943 if (space_needed > (((mbuf_threshold - 1) * MLEN) + MHLEN)) { 1944 try_again: 1945 index = 4; 1946 if (space_needed <= MCLBYTES) { 1947 aloc_size = MCLBYTES; 1948 } else if (space_needed <= MJUMPAGESIZE) { 1949 aloc_size = MJUMPAGESIZE; 1950 index = 5; 1951 } else if (space_needed <= MJUM9BYTES) { 1952 aloc_size = MJUM9BYTES; 1953 index = 6; 1954 } else { 1955 aloc_size = MJUM16BYTES; 1956 index = 7; 1957 } 1958 m_cljget(m, how, aloc_size); 1959 if (m == NULL) { 1960 return (NULL); 1961 } 1962 if (SCTP_BUF_IS_EXTENDED(m) == 0) { 1963 if ((aloc_size != MCLBYTES) && 1964 (allonebuf == 0)) { 1965 aloc_size -= 10; 1966 goto try_again; 1967 } 1968 sctp_m_freem(m); 1969 return (NULL); 1970 } 1971 } 1972 SCTP_BUF_LEN(m) = 0; 1973 SCTP_BUF_NEXT(m) = SCTP_BUF_NEXT_PKT(m) = NULL; 1974 #ifdef SCTP_MBUF_LOGGING 1975 if (SCTP_BUF_IS_EXTENDED(m)) { 1976 sctp_log_mb(m, SCTP_MBUF_IALLOC); 1977 } 1978 #endif 1979 return (m); 1980 } 1981 1982 1983 static struct mbuf * 1984 sctp_add_cookie(struct sctp_inpcb *inp, struct mbuf *init, int init_offset, 1985 struct mbuf *initack, int initack_offset, struct sctp_state_cookie *stc_in) 1986 { 1987 struct mbuf *copy_init, *copy_initack, *m_at, *sig, *mret; 1988 struct sctp_state_cookie *stc; 1989 struct sctp_paramhdr *ph; 1990 uint8_t *signature; 1991 int sig_offset; 1992 uint16_t cookie_sz; 1993 1994 mret = NULL; 1995 1996 1997 mret = sctp_get_mbuf_for_msg((sizeof(struct sctp_state_cookie) + 1998 sizeof(struct sctp_paramhdr)), 0, M_DONTWAIT, 1, MT_DATA); 1999 if (mret == NULL) { 2000 return (NULL); 2001 } 2002 copy_init = SCTP_M_COPYM(init, init_offset, M_COPYALL, M_DONTWAIT); 2003 if (copy_init == NULL) { 2004 sctp_m_freem(mret); 2005 return (NULL); 2006 } 2007 copy_initack = SCTP_M_COPYM(initack, initack_offset, M_COPYALL, 2008 M_DONTWAIT); 2009 if (copy_initack == NULL) { 2010 sctp_m_freem(mret); 2011 sctp_m_freem(copy_init); 2012 return (NULL); 2013 } 2014 /* easy side we just drop it on the end */ 2015 ph = mtod(mret, struct sctp_paramhdr *); 2016 SCTP_BUF_LEN(mret) = sizeof(struct sctp_state_cookie) + 2017 sizeof(struct sctp_paramhdr); 2018 stc = (struct sctp_state_cookie *)((caddr_t)ph + 2019 sizeof(struct sctp_paramhdr)); 2020 ph->param_type = htons(SCTP_STATE_COOKIE); 2021 ph->param_length = 0; /* fill in at the end */ 2022 /* Fill in the stc cookie data */ 2023 *stc = *stc_in; 2024 2025 /* tack the INIT and then the INIT-ACK onto the chain */ 2026 cookie_sz = 0; 2027 m_at = mret; 2028 for (m_at = mret; m_at; m_at = SCTP_BUF_NEXT(m_at)) { 2029 cookie_sz += SCTP_BUF_LEN(m_at); 2030 if (SCTP_BUF_NEXT(m_at) == NULL) { 2031 SCTP_BUF_NEXT(m_at) = copy_init; 2032 break; 2033 } 2034 } 2035 2036 for (m_at = copy_init; m_at; m_at = SCTP_BUF_NEXT(m_at)) { 2037 cookie_sz += SCTP_BUF_LEN(m_at); 2038 if (SCTP_BUF_NEXT(m_at) == NULL) { 2039 SCTP_BUF_NEXT(m_at) = copy_initack; 2040 break; 2041 } 2042 } 2043 2044 for (m_at = copy_initack; m_at; m_at = SCTP_BUF_NEXT(m_at)) { 2045 cookie_sz += SCTP_BUF_LEN(m_at); 2046 if (SCTP_BUF_NEXT(m_at) == NULL) { 2047 break; 2048 } 2049 } 2050 sig = sctp_get_mbuf_for_msg(SCTP_SECRET_SIZE, 0, M_DONTWAIT, 1, MT_DATA); 2051 if (sig == NULL) { 2052 /* no space, so free the entire chain */ 2053 sctp_m_freem(mret); 2054 return (NULL); 2055 } 2056 SCTP_BUF_LEN(sig) = 0; 2057 SCTP_BUF_NEXT(m_at) = sig; 2058 sig_offset = 0; 2059 signature = (uint8_t *) (mtod(sig, caddr_t)+sig_offset); 2060 /* Time to sign the cookie */ 2061 sctp_hmac_m(SCTP_HMAC, 2062 (uint8_t *) inp->sctp_ep.secret_key[(int)(inp->sctp_ep.current_secret_number)], 2063 SCTP_SECRET_SIZE, mret, sizeof(struct sctp_paramhdr), 2064 (uint8_t *) signature); 2065 SCTP_BUF_LEN(sig) += SCTP_SIGNATURE_SIZE; 2066 cookie_sz += SCTP_SIGNATURE_SIZE; 2067 2068 ph->param_length = htons(cookie_sz); 2069 return (mret); 2070 } 2071 2072 2073 static __inline uint8_t 2074 sctp_get_ect(struct sctp_tcb *stcb, 2075 struct sctp_tmit_chunk *chk) 2076 { 2077 uint8_t this_random; 2078 2079 /* Huh? */ 2080 if (sctp_ecn_enable == 0) 2081 return (0); 2082 2083 if (sctp_ecn_nonce == 0) 2084 /* no nonce, always return ECT0 */ 2085 return (SCTP_ECT0_BIT); 2086 2087 if (stcb->asoc.peer_supports_ecn_nonce == 0) { 2088 /* Peer does NOT support it, so we send a ECT0 only */ 2089 return (SCTP_ECT0_BIT); 2090 } 2091 if (chk == NULL) 2092 return (SCTP_ECT0_BIT); 2093 2094 if (((stcb->asoc.hb_random_idx == 3) && 2095 (stcb->asoc.hb_ect_randombit > 7)) || 2096 (stcb->asoc.hb_random_idx > 3)) { 2097 uint32_t rndval; 2098 2099 rndval = sctp_select_initial_TSN(&stcb->sctp_ep->sctp_ep); 2100 memcpy(stcb->asoc.hb_random_values, &rndval, 2101 sizeof(stcb->asoc.hb_random_values)); 2102 this_random = stcb->asoc.hb_random_values[0]; 2103 stcb->asoc.hb_random_idx = 0; 2104 stcb->asoc.hb_ect_randombit = 0; 2105 } else { 2106 if (stcb->asoc.hb_ect_randombit > 7) { 2107 stcb->asoc.hb_ect_randombit = 0; 2108 stcb->asoc.hb_random_idx++; 2109 } 2110 this_random = stcb->asoc.hb_random_values[stcb->asoc.hb_random_idx]; 2111 } 2112 if ((this_random >> stcb->asoc.hb_ect_randombit) & 0x01) { 2113 if (chk != NULL) 2114 /* ECN Nonce stuff */ 2115 chk->rec.data.ect_nonce = SCTP_ECT1_BIT; 2116 stcb->asoc.hb_ect_randombit++; 2117 return (SCTP_ECT1_BIT); 2118 } else { 2119 stcb->asoc.hb_ect_randombit++; 2120 return (SCTP_ECT0_BIT); 2121 } 2122 } 2123 2124 extern int sctp_no_csum_on_loopback; 2125 2126 static int 2127 sctp_lowlevel_chunk_output(struct sctp_inpcb *inp, 2128 struct sctp_tcb *stcb, /* may be NULL */ 2129 struct sctp_nets *net, 2130 struct sockaddr *to, 2131 struct mbuf *m, 2132 uint32_t auth_offset, 2133 struct sctp_auth_chunk *auth, 2134 int nofragment_flag, 2135 int ecn_ok, 2136 struct sctp_tmit_chunk *chk, 2137 int out_of_asoc_ok) 2138 /* nofragment_flag to tell if IP_DF should be set (IPv4 only) */ 2139 { 2140 /* 2141 * Given a mbuf chain (via SCTP_BUF_NEXT()) that holds a packet 2142 * header WITH a SCTPHDR but no IP header, endpoint inp and sa 2143 * structure. - fill in the HMAC digest of any AUTH chunk in the 2144 * packet - calculate SCTP checksum and fill in - prepend a IP 2145 * address header - if boundall use INADDR_ANY - if boundspecific do 2146 * source address selection - set fragmentation option for ipV4 - On 2147 * return from IP output, check/adjust mtu size - of output 2148 * interface and smallest_mtu size as well. 2149 */ 2150 /* Will need ifdefs around this */ 2151 struct mbuf *o_pak; 2152 2153 struct sctphdr *sctphdr; 2154 int packet_length; 2155 int o_flgs; 2156 uint32_t csum; 2157 int ret; 2158 unsigned int have_mtu; 2159 struct route *ro; 2160 2161 if ((net) && (net->dest_state & SCTP_ADDR_OUT_OF_SCOPE)) { 2162 sctp_m_freem(m); 2163 return (EFAULT); 2164 } 2165 /* fill in the HMAC digest for any AUTH chunk in the packet */ 2166 if ((auth != NULL) && (stcb != NULL)) { 2167 sctp_fill_hmac_digest_m(m, auth_offset, auth, stcb); 2168 } 2169 /* Calculate the csum and fill in the length of the packet */ 2170 sctphdr = mtod(m, struct sctphdr *); 2171 have_mtu = 0; 2172 if (sctp_no_csum_on_loopback && 2173 (stcb) && 2174 (stcb->asoc.loopback_scope)) { 2175 sctphdr->checksum = 0; 2176 /* 2177 * This can probably now be taken out since my audit shows 2178 * no more bad pktlen's coming in. But we will wait a while 2179 * yet. 2180 */ 2181 packet_length = sctp_calculate_len(m); 2182 } else { 2183 sctphdr->checksum = 0; 2184 csum = sctp_calculate_sum(m, &packet_length, 0); 2185 sctphdr->checksum = csum; 2186 } 2187 2188 if (to->sa_family == AF_INET) { 2189 struct ip *ip; 2190 struct route iproute; 2191 uint8_t tos_value; 2192 2193 o_pak = SCTP_GET_HEADER_FOR_OUTPUT(sizeof(struct ip)); 2194 if (o_pak == NULL) { 2195 /* failed to prepend data, give up */ 2196 sctp_m_freem(m); 2197 return (ENOMEM); 2198 } 2199 SCTP_BUF_LEN(SCTP_HEADER_TO_CHAIN(o_pak)) = sizeof(struct ip); 2200 packet_length += sizeof(struct ip); 2201 SCTP_ATTACH_CHAIN(o_pak, m, packet_length); 2202 ip = mtod(SCTP_HEADER_TO_CHAIN(o_pak), struct ip *); 2203 ip->ip_v = IPVERSION; 2204 ip->ip_hl = (sizeof(struct ip) >> 2); 2205 if (net) { 2206 tos_value = net->tos_flowlabel & 0x000000ff; 2207 } else { 2208 tos_value = inp->ip_inp.inp.inp_ip_tos; 2209 } 2210 if (nofragment_flag) { 2211 #if defined(WITH_CONVERT_IP_OFF) || defined(__FreeBSD__) || defined(__APPLE__) 2212 ip->ip_off = IP_DF; 2213 #else 2214 ip->ip_off = htons(IP_DF); 2215 #endif 2216 } else 2217 ip->ip_off = 0; 2218 2219 2220 /* FreeBSD has a function for ip_id's */ 2221 ip->ip_id = ip_newid(); 2222 2223 ip->ip_ttl = inp->ip_inp.inp.inp_ip_ttl; 2224 ip->ip_len = SCTP_HEADER_LEN(o_pak); 2225 if (stcb) { 2226 if ((stcb->asoc.ecn_allowed) && ecn_ok) { 2227 /* Enable ECN */ 2228 ip->ip_tos = ((u_char)(tos_value & 0xfc) | sctp_get_ect(stcb, chk)); 2229 } else { 2230 /* No ECN */ 2231 ip->ip_tos = (u_char)(tos_value & 0xfc); 2232 } 2233 } else { 2234 /* no association at all */ 2235 ip->ip_tos = (tos_value & 0xfc); 2236 } 2237 ip->ip_p = IPPROTO_SCTP; 2238 ip->ip_sum = 0; 2239 if (net == NULL) { 2240 ro = &iproute; 2241 memset(&iproute, 0, sizeof(iproute)); 2242 memcpy(&ro->ro_dst, to, to->sa_len); 2243 } else { 2244 ro = (struct route *)&net->ro; 2245 } 2246 /* Now the address selection part */ 2247 ip->ip_dst.s_addr = ((struct sockaddr_in *)to)->sin_addr.s_addr; 2248 2249 /* call the routine to select the src address */ 2250 if (net) { 2251 if (net->src_addr_selected == 0) { 2252 /* Cache the source address */ 2253 ((struct sockaddr_in *)&net->ro._s_addr)->sin_addr = sctp_ipv4_source_address_selection(inp, 2254 stcb, 2255 ro, net, out_of_asoc_ok); 2256 if (ro->ro_rt) 2257 net->src_addr_selected = 1; 2258 } 2259 ip->ip_src = ((struct sockaddr_in *)&net->ro._s_addr)->sin_addr; 2260 } else { 2261 ip->ip_src = sctp_ipv4_source_address_selection(inp, 2262 stcb, ro, net, out_of_asoc_ok); 2263 } 2264 2265 /* 2266 * If source address selection fails and we find no route 2267 * then the ip_output should fail as well with a 2268 * NO_ROUTE_TO_HOST type error. We probably should catch 2269 * that somewhere and abort the association right away 2270 * (assuming this is an INIT being sent). 2271 */ 2272 if ((ro->ro_rt == NULL)) { 2273 /* 2274 * src addr selection failed to find a route (or 2275 * valid source addr), so we can't get there from 2276 * here! 2277 */ 2278 #ifdef SCTP_DEBUG 2279 if (sctp_debug_on & SCTP_DEBUG_OUTPUT1) { 2280 printf("low_level_output: dropped v4 packet- no valid source addr\n"); 2281 printf("Destination was %x\n", (uint32_t) (ntohl(ip->ip_dst.s_addr))); 2282 } 2283 #endif /* SCTP_DEBUG */ 2284 if (net) { 2285 if ((net->dest_state & SCTP_ADDR_REACHABLE) && stcb) 2286 sctp_ulp_notify(SCTP_NOTIFY_INTERFACE_DOWN, 2287 stcb, 2288 SCTP_FAILED_THRESHOLD, 2289 (void *)net); 2290 net->dest_state &= ~SCTP_ADDR_REACHABLE; 2291 net->dest_state |= SCTP_ADDR_NOT_REACHABLE; 2292 if (stcb) { 2293 if (net == stcb->asoc.primary_destination) { 2294 /* need a new primary */ 2295 struct sctp_nets *alt; 2296 2297 alt = sctp_find_alternate_net(stcb, net, 0); 2298 if (alt != net) { 2299 if (sctp_set_primary_addr(stcb, 2300 (struct sockaddr *)NULL, 2301 alt) == 0) { 2302 net->dest_state |= SCTP_ADDR_WAS_PRIMARY; 2303 net->src_addr_selected = 0; 2304 } 2305 } 2306 } 2307 } 2308 } 2309 sctp_m_freem(o_pak); 2310 return (EHOSTUNREACH); 2311 } else { 2312 have_mtu = ro->ro_rt->rt_ifp->if_mtu; 2313 } 2314 if (inp->sctp_socket) { 2315 o_flgs = (IP_RAWOUTPUT | (inp->sctp_socket->so_options & (SO_DONTROUTE | SO_BROADCAST))); 2316 } else { 2317 o_flgs = IP_RAWOUTPUT; 2318 } 2319 #ifdef SCTP_DEBUG 2320 if (sctp_debug_on & SCTP_DEBUG_OUTPUT3) { 2321 printf("Calling ipv4 output routine from low level src addr:%x\n", 2322 (uint32_t) (ntohl(ip->ip_src.s_addr))); 2323 printf("Destination is %x\n", (uint32_t) (ntohl(ip->ip_dst.s_addr))); 2324 printf("RTP route is %p through\n", ro->ro_rt); 2325 } 2326 #endif 2327 2328 if ((have_mtu) && (net) && (have_mtu > net->mtu)) { 2329 ro->ro_rt->rt_ifp->if_mtu = net->mtu; 2330 } 2331 if (ro != &iproute) { 2332 memcpy(&iproute, ro, sizeof(*ro)); 2333 } 2334 ret = ip_output(o_pak, inp->ip_inp.inp.inp_options, 2335 ro, o_flgs, inp->ip_inp.inp.inp_moptions 2336 ,(struct inpcb *)NULL 2337 ); 2338 if ((ro->ro_rt) && (have_mtu) && (net) && (have_mtu > net->mtu)) { 2339 ro->ro_rt->rt_ifp->if_mtu = have_mtu; 2340 } 2341 SCTP_STAT_INCR(sctps_sendpackets); 2342 SCTP_STAT_INCR_COUNTER64(sctps_outpackets); 2343 if (ret) 2344 SCTP_STAT_INCR(sctps_senderrors); 2345 #ifdef SCTP_DEBUG 2346 if (sctp_debug_on & SCTP_DEBUG_OUTPUT3) { 2347 printf("Ip output returns %d\n", ret); 2348 } 2349 #endif 2350 if (net == NULL) { 2351 /* free tempy routes */ 2352 if (ro->ro_rt) 2353 RTFREE(ro->ro_rt); 2354 } else { 2355 /* PMTU check versus smallest asoc MTU goes here */ 2356 if (ro->ro_rt != NULL) { 2357 if (ro->ro_rt->rt_rmx.rmx_mtu && 2358 (stcb->asoc.smallest_mtu > ro->ro_rt->rt_rmx.rmx_mtu)) { 2359 sctp_mtu_size_reset(inp, &stcb->asoc, 2360 ro->ro_rt->rt_rmx.rmx_mtu); 2361 } 2362 } else { 2363 /* route was freed */ 2364 net->src_addr_selected = 0; 2365 } 2366 } 2367 return (ret); 2368 } 2369 #ifdef INET6 2370 else if (to->sa_family == AF_INET6) { 2371 uint32_t flowlabel; 2372 struct ip6_hdr *ip6h; 2373 2374 struct route_in6 ip6route; 2375 struct ifnet *ifp; 2376 u_char flowTop; 2377 uint16_t flowBottom; 2378 u_char tosBottom, tosTop; 2379 struct sockaddr_in6 *sin6, tmp, *lsa6, lsa6_tmp; 2380 struct sockaddr_in6 lsa6_storage; 2381 int prev_scope = 0; 2382 int error; 2383 u_short prev_port = 0; 2384 2385 if (net != NULL) { 2386 flowlabel = net->tos_flowlabel; 2387 } else { 2388 flowlabel = ((struct in6pcb *)inp)->in6p_flowinfo; 2389 } 2390 o_pak = SCTP_GET_HEADER_FOR_OUTPUT(sizeof(struct ip6_hdr)); 2391 if (o_pak == NULL) { 2392 /* failed to prepend data, give up */ 2393 sctp_m_freem(m); 2394 return (ENOMEM); 2395 } 2396 SCTP_BUF_LEN(SCTP_HEADER_TO_CHAIN(o_pak)) = sizeof(struct ip6_hdr); 2397 packet_length += sizeof(struct ip6_hdr); 2398 SCTP_ATTACH_CHAIN(o_pak, m, packet_length); 2399 ip6h = mtod(SCTP_HEADER_TO_CHAIN(o_pak), struct ip6_hdr *); 2400 /* 2401 * We assume here that inp_flow is in host byte order within 2402 * the TCB! 2403 */ 2404 flowBottom = flowlabel & 0x0000ffff; 2405 flowTop = ((flowlabel & 0x000f0000) >> 16); 2406 tosTop = (((flowlabel & 0xf0) >> 4) | IPV6_VERSION); 2407 /* protect *sin6 from overwrite */ 2408 sin6 = (struct sockaddr_in6 *)to; 2409 tmp = *sin6; 2410 sin6 = &tmp; 2411 2412 /* KAME hack: embed scopeid */ 2413 if (sa6_embedscope(sin6, ip6_use_defzone) != 0) 2414 return (EINVAL); 2415 if (net == NULL) { 2416 memset(&ip6route, 0, sizeof(ip6route)); 2417 ro = (struct route *)&ip6route; 2418 memcpy(&ro->ro_dst, sin6, sin6->sin6_len); 2419 } else { 2420 ro = (struct route *)&net->ro; 2421 } 2422 if (stcb != NULL) { 2423 if ((stcb->asoc.ecn_allowed) && ecn_ok) { 2424 /* Enable ECN */ 2425 tosBottom = (((((struct in6pcb *)inp)->in6p_flowinfo & 0x0c) | sctp_get_ect(stcb, chk)) << 4); 2426 } else { 2427 /* No ECN */ 2428 tosBottom = ((((struct in6pcb *)inp)->in6p_flowinfo & 0x0c) << 4); 2429 } 2430 } else { 2431 /* we could get no asoc if it is a O-O-T-B packet */ 2432 tosBottom = ((((struct in6pcb *)inp)->in6p_flowinfo & 0x0c) << 4); 2433 } 2434 ip6h->ip6_flow = htonl(((tosTop << 24) | ((tosBottom | flowTop) << 16) | flowBottom)); 2435 ip6h->ip6_nxt = IPPROTO_SCTP; 2436 ip6h->ip6_plen = (SCTP_HEADER_LEN(o_pak) - sizeof(struct ip6_hdr)); 2437 ip6h->ip6_dst = sin6->sin6_addr; 2438 2439 /* 2440 * Add SRC address selection here: we can only reuse to a 2441 * limited degree the kame src-addr-sel, since we can try 2442 * their selection but it may not be bound. 2443 */ 2444 bzero(&lsa6_tmp, sizeof(lsa6_tmp)); 2445 lsa6_tmp.sin6_family = AF_INET6; 2446 lsa6_tmp.sin6_len = sizeof(lsa6_tmp); 2447 lsa6 = &lsa6_tmp; 2448 if (net) { 2449 if (net->src_addr_selected == 0) { 2450 /* Cache the source address */ 2451 ((struct sockaddr_in6 *)&net->ro._s_addr)->sin6_addr = sctp_ipv6_source_address_selection(inp, 2452 stcb, ro, net, out_of_asoc_ok); 2453 2454 if (ro->ro_rt) 2455 net->src_addr_selected = 1; 2456 } 2457 lsa6->sin6_addr = ((struct sockaddr_in6 *)&net->ro._s_addr)->sin6_addr; 2458 } else { 2459 lsa6->sin6_addr = sctp_ipv6_source_address_selection( 2460 inp, stcb, ro, net, out_of_asoc_ok); 2461 } 2462 lsa6->sin6_port = inp->sctp_lport; 2463 2464 if ((ro->ro_rt == NULL)) { 2465 /* 2466 * src addr selection failed to find a route (or 2467 * valid source addr), so we can't get there from 2468 * here! 2469 */ 2470 #ifdef SCTP_DEBUG 2471 if (sctp_debug_on & SCTP_DEBUG_OUTPUT1) { 2472 printf("low_level_output: dropped v6 pkt- no valid source addr\n"); 2473 } 2474 #endif 2475 sctp_m_freem(o_pak); 2476 if (net) { 2477 if ((net->dest_state & SCTP_ADDR_REACHABLE) && stcb) 2478 sctp_ulp_notify(SCTP_NOTIFY_INTERFACE_DOWN, 2479 stcb, 2480 SCTP_FAILED_THRESHOLD, 2481 (void *)net); 2482 net->dest_state &= ~SCTP_ADDR_REACHABLE; 2483 net->dest_state |= SCTP_ADDR_NOT_REACHABLE; 2484 if (stcb) { 2485 if (net == stcb->asoc.primary_destination) { 2486 /* need a new primary */ 2487 struct sctp_nets *alt; 2488 2489 alt = sctp_find_alternate_net(stcb, net, 0); 2490 if (alt != net) { 2491 if (sctp_set_primary_addr(stcb, 2492 (struct sockaddr *)NULL, 2493 alt) == 0) { 2494 net->dest_state |= SCTP_ADDR_WAS_PRIMARY; 2495 net->src_addr_selected = 0; 2496 } 2497 } 2498 } 2499 } 2500 } 2501 return (EHOSTUNREACH); 2502 } 2503 /* 2504 * XXX: sa6 may not have a valid sin6_scope_id in the 2505 * non-SCOPEDROUTING case. 2506 */ 2507 bzero(&lsa6_storage, sizeof(lsa6_storage)); 2508 lsa6_storage.sin6_family = AF_INET6; 2509 lsa6_storage.sin6_len = sizeof(lsa6_storage); 2510 if ((error = sa6_recoverscope(&lsa6_storage)) != 0) { 2511 sctp_m_freem(o_pak); 2512 return (error); 2513 } 2514 /* XXX */ 2515 lsa6_storage.sin6_addr = lsa6->sin6_addr; 2516 lsa6_storage.sin6_port = inp->sctp_lport; 2517 lsa6 = &lsa6_storage; 2518 ip6h->ip6_src = lsa6->sin6_addr; 2519 2520 /* 2521 * We set the hop limit now since there is a good chance 2522 * that our ro pointer is now filled 2523 */ 2524 ip6h->ip6_hlim = in6_selecthlim((struct in6pcb *)&inp->ip_inp.inp, 2525 (ro ? 2526 (ro->ro_rt ? (ro->ro_rt->rt_ifp) : (NULL)) : 2527 (NULL))); 2528 o_flgs = 0; 2529 ifp = ro->ro_rt->rt_ifp; 2530 #ifdef SCTP_DEBUG 2531 if (sctp_debug_on & SCTP_DEBUG_OUTPUT3) { 2532 /* Copy to be sure something bad is not happening */ 2533 sin6->sin6_addr = ip6h->ip6_dst; 2534 lsa6->sin6_addr = ip6h->ip6_src; 2535 2536 printf("Calling ipv6 output routine from low level\n"); 2537 printf("src: "); 2538 sctp_print_address((struct sockaddr *)lsa6); 2539 printf("dst: "); 2540 sctp_print_address((struct sockaddr *)sin6); 2541 } 2542 #endif /* SCTP_DEBUG */ 2543 if (net) { 2544 sin6 = (struct sockaddr_in6 *)&net->ro._l_addr; 2545 /* preserve the port and scope for link local send */ 2546 prev_scope = sin6->sin6_scope_id; 2547 prev_port = sin6->sin6_port; 2548 } 2549 ret = ip6_output(o_pak, ((struct in6pcb *)inp)->in6p_outputopts, 2550 (struct route_in6 *)ro, 2551 o_flgs, 2552 ((struct in6pcb *)inp)->in6p_moptions, 2553 &ifp 2554 ,NULL 2555 ); 2556 if (net) { 2557 /* for link local this must be done */ 2558 sin6->sin6_scope_id = prev_scope; 2559 sin6->sin6_port = prev_port; 2560 } 2561 #ifdef SCTP_DEBUG 2562 if (sctp_debug_on & SCTP_DEBUG_OUTPUT3) { 2563 printf("return from send is %d\n", ret); 2564 } 2565 #endif /* SCTP_DEBUG_OUTPUT */ 2566 SCTP_STAT_INCR(sctps_sendpackets); 2567 SCTP_STAT_INCR_COUNTER64(sctps_outpackets); 2568 if (ret) 2569 SCTP_STAT_INCR(sctps_senderrors); 2570 if (net == NULL) { 2571 /* Now if we had a temp route free it */ 2572 if (ro->ro_rt) { 2573 RTFREE(ro->ro_rt); 2574 } 2575 } else { 2576 /* PMTU check versus smallest asoc MTU goes here */ 2577 if (ro->ro_rt == NULL) { 2578 /* Route was freed */ 2579 net->src_addr_selected = 0; 2580 } 2581 if (ro->ro_rt != NULL) { 2582 if (ro->ro_rt->rt_rmx.rmx_mtu && 2583 (stcb->asoc.smallest_mtu > ro->ro_rt->rt_rmx.rmx_mtu)) { 2584 sctp_mtu_size_reset(inp, 2585 &stcb->asoc, 2586 ro->ro_rt->rt_rmx.rmx_mtu); 2587 } 2588 } else if (ifp) { 2589 if (ND_IFINFO(ifp)->linkmtu && 2590 (stcb->asoc.smallest_mtu > ND_IFINFO(ifp)->linkmtu)) { 2591 sctp_mtu_size_reset(inp, 2592 &stcb->asoc, 2593 ND_IFINFO(ifp)->linkmtu); 2594 } 2595 } 2596 } 2597 return (ret); 2598 } 2599 #endif 2600 else { 2601 #ifdef SCTP_DEBUG 2602 if (sctp_debug_on & SCTP_DEBUG_OUTPUT1) { 2603 printf("Unknown protocol (TSNH) type %d\n", ((struct sockaddr *)to)->sa_family); 2604 } 2605 #endif 2606 sctp_m_freem(m); 2607 return (EFAULT); 2608 } 2609 } 2610 2611 2612 void 2613 sctp_send_initiate(struct sctp_inpcb *inp, struct sctp_tcb *stcb) 2614 { 2615 struct mbuf *m, *m_at, *m_last; 2616 struct sctp_nets *net; 2617 struct sctp_init_msg *initm; 2618 struct sctp_supported_addr_param *sup_addr; 2619 struct sctp_ecn_supported_param *ecn; 2620 struct sctp_prsctp_supported_param *prsctp; 2621 struct sctp_ecn_nonce_supported_param *ecn_nonce; 2622 struct sctp_supported_chunk_types_param *pr_supported; 2623 int cnt_inits_to = 0; 2624 int padval, ret; 2625 int num_ext; 2626 int p_len; 2627 2628 /* INIT's always go to the primary (and usually ONLY address) */ 2629 m_last = NULL; 2630 net = stcb->asoc.primary_destination; 2631 if (net == NULL) { 2632 net = TAILQ_FIRST(&stcb->asoc.nets); 2633 if (net == NULL) { 2634 /* TSNH */ 2635 return; 2636 } 2637 /* we confirm any address we send an INIT to */ 2638 net->dest_state &= ~SCTP_ADDR_UNCONFIRMED; 2639 sctp_set_primary_addr(stcb, NULL, net); 2640 } else { 2641 /* we confirm any address we send an INIT to */ 2642 net->dest_state &= ~SCTP_ADDR_UNCONFIRMED; 2643 } 2644 #ifdef SCTP_DEBUG 2645 if (sctp_debug_on & SCTP_DEBUG_OUTPUT4) { 2646 printf("Sending INIT\n"); 2647 } 2648 #endif 2649 if (((struct sockaddr *)&(net->ro._l_addr))->sa_family == AF_INET6) { 2650 /* 2651 * special hook, if we are sending to link local it will not 2652 * show up in our private address count. 2653 */ 2654 struct sockaddr_in6 *sin6l; 2655 2656 sin6l = &net->ro._l_addr.sin6; 2657 if (IN6_IS_ADDR_LINKLOCAL(&sin6l->sin6_addr)) 2658 cnt_inits_to = 1; 2659 } 2660 if (SCTP_OS_TIMER_PENDING(&net->rxt_timer.timer)) { 2661 /* This case should not happen */ 2662 return; 2663 } 2664 /* start the INIT timer */ 2665 if (sctp_timer_start(SCTP_TIMER_TYPE_INIT, inp, stcb, net)) { 2666 /* we are hosed since I can't start the INIT timer? */ 2667 return; 2668 } 2669 m = sctp_get_mbuf_for_msg(MCLBYTES, 1, M_DONTWAIT, 1, MT_DATA); 2670 if (m == NULL) { 2671 /* No memory, INIT timer will re-attempt. */ 2672 return; 2673 } 2674 SCTP_BUF_LEN(m) = sizeof(struct sctp_init_msg); 2675 /* Now lets put the SCTP header in place */ 2676 initm = mtod(m, struct sctp_init_msg *); 2677 initm->sh.src_port = inp->sctp_lport; 2678 initm->sh.dest_port = stcb->rport; 2679 initm->sh.v_tag = 0; 2680 initm->sh.checksum = 0; /* calculate later */ 2681 /* now the chunk header */ 2682 initm->msg.ch.chunk_type = SCTP_INITIATION; 2683 initm->msg.ch.chunk_flags = 0; 2684 /* fill in later from mbuf we build */ 2685 initm->msg.ch.chunk_length = 0; 2686 /* place in my tag */ 2687 initm->msg.init.initiate_tag = htonl(stcb->asoc.my_vtag); 2688 /* set up some of the credits. */ 2689 initm->msg.init.a_rwnd = htonl(max(inp->sctp_socket->so_rcv.sb_hiwat, 2690 SCTP_MINIMAL_RWND)); 2691 2692 initm->msg.init.num_outbound_streams = htons(stcb->asoc.pre_open_streams); 2693 initm->msg.init.num_inbound_streams = htons(stcb->asoc.max_inbound_streams); 2694 initm->msg.init.initial_tsn = htonl(stcb->asoc.init_seq_number); 2695 /* now the address restriction */ 2696 sup_addr = (struct sctp_supported_addr_param *)((caddr_t)initm + 2697 sizeof(*initm)); 2698 sup_addr->ph.param_type = htons(SCTP_SUPPORTED_ADDRTYPE); 2699 /* we support 2 types IPv6/IPv4 */ 2700 sup_addr->ph.param_length = htons(sizeof(*sup_addr) + 2701 sizeof(uint16_t)); 2702 sup_addr->addr_type[0] = htons(SCTP_IPV4_ADDRESS); 2703 sup_addr->addr_type[1] = htons(SCTP_IPV6_ADDRESS); 2704 SCTP_BUF_LEN(m) += sizeof(*sup_addr) + sizeof(uint16_t); 2705 2706 if (inp->sctp_ep.adaptation_layer_indicator) { 2707 struct sctp_adaptation_layer_indication *ali; 2708 2709 ali = (struct sctp_adaptation_layer_indication *)( 2710 (caddr_t)sup_addr + sizeof(*sup_addr) + sizeof(uint16_t)); 2711 ali->ph.param_type = htons(SCTP_ULP_ADAPTATION); 2712 ali->ph.param_length = htons(sizeof(*ali)); 2713 ali->indication = ntohl(inp->sctp_ep.adaptation_layer_indicator); 2714 SCTP_BUF_LEN(m) += sizeof(*ali); 2715 ecn = (struct sctp_ecn_supported_param *)((caddr_t)ali + 2716 sizeof(*ali)); 2717 } else { 2718 ecn = (struct sctp_ecn_supported_param *)((caddr_t)sup_addr + 2719 sizeof(*sup_addr) + sizeof(uint16_t)); 2720 } 2721 2722 /* now any cookie time extensions */ 2723 if (stcb->asoc.cookie_preserve_req) { 2724 struct sctp_cookie_perserve_param *cookie_preserve; 2725 2726 cookie_preserve = (struct sctp_cookie_perserve_param *)(ecn); 2727 cookie_preserve->ph.param_type = htons(SCTP_COOKIE_PRESERVE); 2728 cookie_preserve->ph.param_length = htons( 2729 sizeof(*cookie_preserve)); 2730 cookie_preserve->time = htonl(stcb->asoc.cookie_preserve_req); 2731 SCTP_BUF_LEN(m) += sizeof(*cookie_preserve); 2732 ecn = (struct sctp_ecn_supported_param *)( 2733 (caddr_t)cookie_preserve + sizeof(*cookie_preserve)); 2734 stcb->asoc.cookie_preserve_req = 0; 2735 } 2736 /* ECN parameter */ 2737 if (sctp_ecn_enable == 1) { 2738 ecn->ph.param_type = htons(SCTP_ECN_CAPABLE); 2739 ecn->ph.param_length = htons(sizeof(*ecn)); 2740 SCTP_BUF_LEN(m) += sizeof(*ecn); 2741 prsctp = (struct sctp_prsctp_supported_param *)((caddr_t)ecn + 2742 sizeof(*ecn)); 2743 } else { 2744 prsctp = (struct sctp_prsctp_supported_param *)((caddr_t)ecn); 2745 } 2746 /* And now tell the peer we do pr-sctp */ 2747 prsctp->ph.param_type = htons(SCTP_PRSCTP_SUPPORTED); 2748 prsctp->ph.param_length = htons(sizeof(*prsctp)); 2749 SCTP_BUF_LEN(m) += sizeof(*prsctp); 2750 2751 /* And now tell the peer we do all the extensions */ 2752 pr_supported = (struct sctp_supported_chunk_types_param *) 2753 ((caddr_t)prsctp + sizeof(*prsctp)); 2754 pr_supported->ph.param_type = htons(SCTP_SUPPORTED_CHUNK_EXT); 2755 num_ext = 0; 2756 pr_supported->chunk_types[num_ext++] = SCTP_ASCONF; 2757 pr_supported->chunk_types[num_ext++] = SCTP_ASCONF_ACK; 2758 pr_supported->chunk_types[num_ext++] = SCTP_FORWARD_CUM_TSN; 2759 pr_supported->chunk_types[num_ext++] = SCTP_PACKET_DROPPED; 2760 pr_supported->chunk_types[num_ext++] = SCTP_STREAM_RESET; 2761 if (!sctp_auth_disable) 2762 pr_supported->chunk_types[num_ext++] = SCTP_AUTHENTICATION; 2763 p_len = sizeof(*pr_supported) + num_ext; 2764 pr_supported->ph.param_length = htons(p_len); 2765 bzero((caddr_t)pr_supported + p_len, SCTP_SIZE32(p_len) - p_len); 2766 SCTP_BUF_LEN(m) += SCTP_SIZE32(p_len); 2767 2768 /* ECN nonce: And now tell the peer we support ECN nonce */ 2769 if (sctp_ecn_nonce) { 2770 ecn_nonce = (struct sctp_ecn_nonce_supported_param *) 2771 ((caddr_t)pr_supported + SCTP_SIZE32(p_len)); 2772 ecn_nonce->ph.param_type = htons(SCTP_ECN_NONCE_SUPPORTED); 2773 ecn_nonce->ph.param_length = htons(sizeof(*ecn_nonce)); 2774 SCTP_BUF_LEN(m) += sizeof(*ecn_nonce); 2775 } 2776 /* add authentication parameters */ 2777 if (!sctp_auth_disable) { 2778 struct sctp_auth_random *random; 2779 struct sctp_auth_hmac_algo *hmacs; 2780 struct sctp_auth_chunk_list *chunks; 2781 2782 /* attach RANDOM parameter, if available */ 2783 if (stcb->asoc.authinfo.random != NULL) { 2784 random = (struct sctp_auth_random *)(mtod(m, caddr_t)+SCTP_BUF_LEN(m)); 2785 p_len = sizeof(*random) + stcb->asoc.authinfo.random_len; 2786 #ifdef SCTP_AUTH_DRAFT_04 2787 random->ph.param_type = htons(SCTP_RANDOM); 2788 random->ph.param_length = htons(p_len); 2789 bcopy(stcb->asoc.authinfo.random->key, 2790 random->random_data, 2791 stcb->asoc.authinfo.random_len); 2792 #else 2793 /* random key already contains the header */ 2794 bcopy(stcb->asoc.authinfo.random->key, random, p_len); 2795 #endif 2796 /* zero out any padding required */ 2797 bzero((caddr_t)random + p_len, SCTP_SIZE32(p_len) - p_len); 2798 SCTP_BUF_LEN(m) += SCTP_SIZE32(p_len); 2799 } 2800 /* add HMAC_ALGO parameter */ 2801 hmacs = (struct sctp_auth_hmac_algo *)(mtod(m, caddr_t)+SCTP_BUF_LEN(m)); 2802 p_len = sctp_serialize_hmaclist(stcb->asoc.local_hmacs, 2803 (uint8_t *) hmacs->hmac_ids); 2804 if (p_len > 0) { 2805 p_len += sizeof(*hmacs); 2806 hmacs->ph.param_type = htons(SCTP_HMAC_LIST); 2807 hmacs->ph.param_length = htons(p_len); 2808 /* zero out any padding required */ 2809 bzero((caddr_t)hmacs + p_len, SCTP_SIZE32(p_len) - p_len); 2810 SCTP_BUF_LEN(m) += SCTP_SIZE32(p_len); 2811 } 2812 /* add CHUNKS parameter */ 2813 chunks = (struct sctp_auth_chunk_list *)(mtod(m, caddr_t)+SCTP_BUF_LEN(m)); 2814 p_len = sctp_serialize_auth_chunks(stcb->asoc.local_auth_chunks, 2815 chunks->chunk_types); 2816 if (p_len > 0) { 2817 p_len += sizeof(*chunks); 2818 chunks->ph.param_type = htons(SCTP_CHUNK_LIST); 2819 chunks->ph.param_length = htons(p_len); 2820 /* zero out any padding required */ 2821 bzero((caddr_t)chunks + p_len, SCTP_SIZE32(p_len) - p_len); 2822 SCTP_BUF_LEN(m) += SCTP_SIZE32(p_len); 2823 } 2824 } 2825 m_at = m; 2826 /* now the addresses */ 2827 { 2828 struct sctp_scoping scp; 2829 2830 /* 2831 * To optimize this we could put the scoping stuff into a 2832 * structure and remove the individual uint8's from the 2833 * assoc structure. Then we could just pass in the address 2834 * within the stcb.. but for now this is a quick hack to get 2835 * the address stuff teased apart. 2836 */ 2837 scp.ipv4_addr_legal = stcb->asoc.ipv4_addr_legal; 2838 scp.ipv6_addr_legal = stcb->asoc.ipv6_addr_legal; 2839 scp.loopback_scope = stcb->asoc.loopback_scope; 2840 scp.ipv4_local_scope = stcb->asoc.ipv4_local_scope; 2841 scp.local_scope = stcb->asoc.local_scope; 2842 scp.site_scope = stcb->asoc.site_scope; 2843 2844 m_at = sctp_add_addresses_to_i_ia(inp, &scp, m_at, cnt_inits_to); 2845 } 2846 2847 2848 /* calulate the size and update pkt header and chunk header */ 2849 p_len = 0; 2850 for (m_at = m; m_at; m_at = SCTP_BUF_NEXT(m_at)) { 2851 if (SCTP_BUF_NEXT(m_at) == NULL) 2852 m_last = m_at; 2853 p_len += SCTP_BUF_LEN(m_at); 2854 } 2855 initm->msg.ch.chunk_length = htons((p_len - sizeof(struct sctphdr))); 2856 /* 2857 * We pass 0 here to NOT set IP_DF if its IPv4, we ignore the return 2858 * here since the timer will drive a retranmission. 2859 */ 2860 2861 /* I don't expect this to execute but we will be safe here */ 2862 padval = p_len % 4; 2863 if ((padval) && (m_last)) { 2864 /* 2865 * The compiler worries that m_last may not be set even 2866 * though I think it is impossible :-> however we add m_last 2867 * here just in case. 2868 */ 2869 int ret; 2870 2871 ret = sctp_add_pad_tombuf(m_last, (4 - padval)); 2872 if (ret) { 2873 /* Houston we have a problem, no space */ 2874 sctp_m_freem(m); 2875 return; 2876 } 2877 p_len += padval; 2878 } 2879 ret = sctp_lowlevel_chunk_output(inp, stcb, net, 2880 (struct sockaddr *)&net->ro._l_addr, 2881 m, 0, NULL, 0, 0, NULL, 0); 2882 SCTP_STAT_INCR_COUNTER64(sctps_outcontrolchunks); 2883 sctp_timer_start(SCTP_TIMER_TYPE_INIT, inp, stcb, net); 2884 SCTP_GETTIME_TIMEVAL(&net->last_sent_time); 2885 } 2886 2887 struct mbuf * 2888 sctp_arethere_unrecognized_parameters(struct mbuf *in_initpkt, 2889 int param_offset, int *abort_processing, struct sctp_chunkhdr *cp) 2890 { 2891 /* 2892 * Given a mbuf containing an INIT or INIT-ACK with the param_offset 2893 * being equal to the beginning of the params i.e. (iphlen + 2894 * sizeof(struct sctp_init_msg) parse through the parameters to the 2895 * end of the mbuf verifying that all parameters are known. 2896 * 2897 * For unknown parameters build and return a mbuf with 2898 * UNRECOGNIZED_PARAMETER errors. If the flags indicate to stop 2899 * processing this chunk stop, and set *abort_processing to 1. 2900 * 2901 * By having param_offset be pre-set to where parameters begin it is 2902 * hoped that this routine may be reused in the future by new 2903 * features. 2904 */ 2905 struct sctp_paramhdr *phdr, params; 2906 2907 struct mbuf *mat, *op_err; 2908 char tempbuf[SCTP_CHUNK_BUFFER_SIZE]; 2909 int at, limit, pad_needed; 2910 uint16_t ptype, plen; 2911 int err_at; 2912 2913 *abort_processing = 0; 2914 mat = in_initpkt; 2915 err_at = 0; 2916 limit = ntohs(cp->chunk_length) - sizeof(struct sctp_init_chunk); 2917 at = param_offset; 2918 op_err = NULL; 2919 2920 phdr = sctp_get_next_param(mat, at, ¶ms, sizeof(params)); 2921 while ((phdr != NULL) && ((size_t)limit >= sizeof(struct sctp_paramhdr))) { 2922 ptype = ntohs(phdr->param_type); 2923 plen = ntohs(phdr->param_length); 2924 limit -= SCTP_SIZE32(plen); 2925 if (plen < sizeof(struct sctp_paramhdr)) { 2926 #ifdef SCTP_DEBUG 2927 if (sctp_debug_on & SCTP_DEBUG_OUTPUT4) { 2928 printf("sctp_output.c:Impossible length in parameter < %d\n", plen); 2929 } 2930 #endif 2931 *abort_processing = 1; 2932 break; 2933 } 2934 /* 2935 * All parameters for all chunks that we know/understand are 2936 * listed here. We process them other places and make 2937 * appropriate stop actions per the upper bits. However this 2938 * is the generic routine processor's can call to get back 2939 * an operr.. to either incorporate (init-ack) or send. 2940 */ 2941 if ((ptype == SCTP_HEARTBEAT_INFO) || 2942 (ptype == SCTP_IPV4_ADDRESS) || 2943 (ptype == SCTP_IPV6_ADDRESS) || 2944 (ptype == SCTP_STATE_COOKIE) || 2945 (ptype == SCTP_UNRECOG_PARAM) || 2946 (ptype == SCTP_COOKIE_PRESERVE) || 2947 (ptype == SCTP_SUPPORTED_ADDRTYPE) || 2948 (ptype == SCTP_PRSCTP_SUPPORTED) || 2949 (ptype == SCTP_ADD_IP_ADDRESS) || 2950 (ptype == SCTP_DEL_IP_ADDRESS) || 2951 (ptype == SCTP_ECN_CAPABLE) || 2952 (ptype == SCTP_ULP_ADAPTATION) || 2953 (ptype == SCTP_ERROR_CAUSE_IND) || 2954 (ptype == SCTP_RANDOM) || 2955 (ptype == SCTP_CHUNK_LIST) || 2956 (ptype == SCTP_CHUNK_LIST) || 2957 (ptype == SCTP_SET_PRIM_ADDR) || 2958 (ptype == SCTP_SUCCESS_REPORT) || 2959 (ptype == SCTP_ULP_ADAPTATION) || 2960 (ptype == SCTP_SUPPORTED_CHUNK_EXT) || 2961 (ptype == SCTP_ECN_NONCE_SUPPORTED) 2962 ) { 2963 /* no skip it */ 2964 at += SCTP_SIZE32(plen); 2965 } else if (ptype == SCTP_HOSTNAME_ADDRESS) { 2966 /* We can NOT handle HOST NAME addresses!! */ 2967 int l_len; 2968 2969 #ifdef SCTP_DEBUG 2970 if (sctp_debug_on & SCTP_DEBUG_OUTPUT4) { 2971 printf("Can't handle hostname addresses.. abort processing\n"); 2972 } 2973 #endif 2974 *abort_processing = 1; 2975 if (op_err == NULL) { 2976 /* Ok need to try to get a mbuf */ 2977 l_len = sizeof(struct ip6_hdr) + sizeof(struct sctphdr) + sizeof(struct sctp_chunkhdr); 2978 l_len += plen; 2979 l_len += sizeof(struct sctp_paramhdr); 2980 op_err = sctp_get_mbuf_for_msg(l_len, 0, M_DONTWAIT, 1, MT_DATA); 2981 if (op_err) { 2982 SCTP_BUF_LEN(op_err) = 0; 2983 /* 2984 * pre-reserve space for ip and sctp 2985 * header and chunk hdr 2986 */ 2987 SCTP_BUF_RESV_UF(op_err, sizeof(struct ip6_hdr)); 2988 SCTP_BUF_RESV_UF(op_err, sizeof(struct sctphdr)); 2989 SCTP_BUF_RESV_UF(op_err, sizeof(struct sctp_chunkhdr)); 2990 } 2991 } 2992 if (op_err) { 2993 /* If we have space */ 2994 struct sctp_paramhdr s; 2995 2996 if (err_at % 4) { 2997 uint32_t cpthis = 0; 2998 2999 pad_needed = 4 - (err_at % 4); 3000 m_copyback(op_err, err_at, pad_needed, (caddr_t)&cpthis); 3001 err_at += pad_needed; 3002 } 3003 s.param_type = htons(SCTP_CAUSE_UNRESOLVABLE_ADDR); 3004 s.param_length = htons(sizeof(s) + plen); 3005 m_copyback(op_err, err_at, sizeof(s), (caddr_t)&s); 3006 err_at += sizeof(s); 3007 phdr = sctp_get_next_param(mat, at, (struct sctp_paramhdr *)tempbuf, plen); 3008 if (phdr == NULL) { 3009 sctp_m_freem(op_err); 3010 /* 3011 * we are out of memory but we still 3012 * need to have a look at what to do 3013 * (the system is in trouble 3014 * though). 3015 */ 3016 return (NULL); 3017 } 3018 m_copyback(op_err, err_at, plen, (caddr_t)phdr); 3019 err_at += plen; 3020 } 3021 return (op_err); 3022 } else { 3023 /* 3024 * we do not recognize the parameter figure out what 3025 * we do. 3026 */ 3027 if ((ptype & 0x4000) == 0x4000) { 3028 /* Report bit is set?? */ 3029 if (op_err == NULL) { 3030 int l_len; 3031 3032 /* Ok need to try to get an mbuf */ 3033 l_len = sizeof(struct ip6_hdr) + sizeof(struct sctphdr) + sizeof(struct sctp_chunkhdr); 3034 l_len += plen; 3035 l_len += sizeof(struct sctp_paramhdr); 3036 op_err = sctp_get_mbuf_for_msg(l_len, 0, M_DONTWAIT, 1, MT_DATA); 3037 if (op_err) { 3038 SCTP_BUF_LEN(op_err) = 0; 3039 SCTP_BUF_RESV_UF(op_err, sizeof(struct ip6_hdr)); 3040 SCTP_BUF_RESV_UF(op_err, sizeof(struct sctphdr)); 3041 SCTP_BUF_RESV_UF(op_err, sizeof(struct sctp_chunkhdr)); 3042 } 3043 } 3044 if (op_err) { 3045 /* If we have space */ 3046 struct sctp_paramhdr s; 3047 3048 if (err_at % 4) { 3049 uint32_t cpthis = 0; 3050 3051 pad_needed = 4 - (err_at % 4); 3052 m_copyback(op_err, err_at, pad_needed, (caddr_t)&cpthis); 3053 err_at += pad_needed; 3054 } 3055 s.param_type = htons(SCTP_UNRECOG_PARAM); 3056 s.param_length = htons(sizeof(s) + plen); 3057 m_copyback(op_err, err_at, sizeof(s), (caddr_t)&s); 3058 err_at += sizeof(s); 3059 if (plen > sizeof(tempbuf)) { 3060 plen = sizeof(tempbuf); 3061 } 3062 phdr = sctp_get_next_param(mat, at, (struct sctp_paramhdr *)tempbuf, plen); 3063 if (phdr == NULL) { 3064 sctp_m_freem(op_err); 3065 /* 3066 * we are out of memory but 3067 * we still need to have a 3068 * look at what to do (the 3069 * system is in trouble 3070 * though). 3071 */ 3072 goto more_processing; 3073 } 3074 m_copyback(op_err, err_at, plen, (caddr_t)phdr); 3075 err_at += plen; 3076 } 3077 } 3078 more_processing: 3079 if ((ptype & 0x8000) == 0x0000) { 3080 return (op_err); 3081 } else { 3082 /* skip this chunk and continue processing */ 3083 at += SCTP_SIZE32(plen); 3084 } 3085 3086 } 3087 phdr = sctp_get_next_param(mat, at, ¶ms, sizeof(params)); 3088 } 3089 return (op_err); 3090 } 3091 3092 static int 3093 sctp_are_there_new_addresses(struct sctp_association *asoc, 3094 struct mbuf *in_initpkt, int iphlen, int offset) 3095 { 3096 /* 3097 * Given a INIT packet, look through the packet to verify that there 3098 * are NO new addresses. As we go through the parameters add reports 3099 * of any un-understood parameters that require an error. Also we 3100 * must return (1) to drop the packet if we see a un-understood 3101 * parameter that tells us to drop the chunk. 3102 */ 3103 struct sockaddr_in sin4, *sa4; 3104 struct sockaddr_in6 sin6, *sa6; 3105 struct sockaddr *sa_touse; 3106 struct sockaddr *sa; 3107 struct sctp_paramhdr *phdr, params; 3108 struct ip *iph; 3109 struct mbuf *mat; 3110 uint16_t ptype, plen; 3111 int err_at; 3112 uint8_t fnd; 3113 struct sctp_nets *net; 3114 3115 memset(&sin4, 0, sizeof(sin4)); 3116 memset(&sin6, 0, sizeof(sin6)); 3117 sin4.sin_family = AF_INET; 3118 sin4.sin_len = sizeof(sin4); 3119 sin6.sin6_family = AF_INET6; 3120 sin6.sin6_len = sizeof(sin6); 3121 3122 sa_touse = NULL; 3123 /* First what about the src address of the pkt ? */ 3124 iph = mtod(in_initpkt, struct ip *); 3125 if (iph->ip_v == IPVERSION) { 3126 /* source addr is IPv4 */ 3127 sin4.sin_addr = iph->ip_src; 3128 sa_touse = (struct sockaddr *)&sin4; 3129 } else if (iph->ip_v == (IPV6_VERSION >> 4)) { 3130 /* source addr is IPv6 */ 3131 struct ip6_hdr *ip6h; 3132 3133 ip6h = mtod(in_initpkt, struct ip6_hdr *); 3134 sin6.sin6_addr = ip6h->ip6_src; 3135 sa_touse = (struct sockaddr *)&sin6; 3136 } else { 3137 return (1); 3138 } 3139 3140 fnd = 0; 3141 TAILQ_FOREACH(net, &asoc->nets, sctp_next) { 3142 sa = (struct sockaddr *)&net->ro._l_addr; 3143 if (sa->sa_family == sa_touse->sa_family) { 3144 if (sa->sa_family == AF_INET) { 3145 sa4 = (struct sockaddr_in *)sa; 3146 if (sa4->sin_addr.s_addr == 3147 sin4.sin_addr.s_addr) { 3148 fnd = 1; 3149 break; 3150 } 3151 } else if (sa->sa_family == AF_INET6) { 3152 sa6 = (struct sockaddr_in6 *)sa; 3153 if (SCTP6_ARE_ADDR_EQUAL(&sa6->sin6_addr, 3154 &sin6.sin6_addr)) { 3155 fnd = 1; 3156 break; 3157 } 3158 } 3159 } 3160 } 3161 if (fnd == 0) { 3162 /* New address added! no need to look futher. */ 3163 return (1); 3164 } 3165 /* Ok so far lets munge through the rest of the packet */ 3166 mat = in_initpkt; 3167 err_at = 0; 3168 sa_touse = NULL; 3169 offset += sizeof(struct sctp_init_chunk); 3170 phdr = sctp_get_next_param(mat, offset, ¶ms, sizeof(params)); 3171 while (phdr) { 3172 ptype = ntohs(phdr->param_type); 3173 plen = ntohs(phdr->param_length); 3174 if (ptype == SCTP_IPV4_ADDRESS) { 3175 struct sctp_ipv4addr_param *p4, p4_buf; 3176 3177 phdr = sctp_get_next_param(mat, offset, 3178 (struct sctp_paramhdr *)&p4_buf, sizeof(p4_buf)); 3179 if (plen != sizeof(struct sctp_ipv4addr_param) || 3180 phdr == NULL) { 3181 return (1); 3182 } 3183 p4 = (struct sctp_ipv4addr_param *)phdr; 3184 sin4.sin_addr.s_addr = p4->addr; 3185 sa_touse = (struct sockaddr *)&sin4; 3186 } else if (ptype == SCTP_IPV6_ADDRESS) { 3187 struct sctp_ipv6addr_param *p6, p6_buf; 3188 3189 phdr = sctp_get_next_param(mat, offset, 3190 (struct sctp_paramhdr *)&p6_buf, sizeof(p6_buf)); 3191 if (plen != sizeof(struct sctp_ipv6addr_param) || 3192 phdr == NULL) { 3193 return (1); 3194 } 3195 p6 = (struct sctp_ipv6addr_param *)phdr; 3196 memcpy((caddr_t)&sin6.sin6_addr, p6->addr, 3197 sizeof(p6->addr)); 3198 sa_touse = (struct sockaddr *)&sin4; 3199 } 3200 if (sa_touse) { 3201 /* ok, sa_touse points to one to check */ 3202 fnd = 0; 3203 TAILQ_FOREACH(net, &asoc->nets, sctp_next) { 3204 sa = (struct sockaddr *)&net->ro._l_addr; 3205 if (sa->sa_family != sa_touse->sa_family) { 3206 continue; 3207 } 3208 if (sa->sa_family == AF_INET) { 3209 sa4 = (struct sockaddr_in *)sa; 3210 if (sa4->sin_addr.s_addr == 3211 sin4.sin_addr.s_addr) { 3212 fnd = 1; 3213 break; 3214 } 3215 } else if (sa->sa_family == AF_INET6) { 3216 sa6 = (struct sockaddr_in6 *)sa; 3217 if (SCTP6_ARE_ADDR_EQUAL( 3218 &sa6->sin6_addr, &sin6.sin6_addr)) { 3219 fnd = 1; 3220 break; 3221 } 3222 } 3223 } 3224 if (!fnd) { 3225 /* New addr added! no need to look further */ 3226 return (1); 3227 } 3228 } 3229 offset += SCTP_SIZE32(plen); 3230 phdr = sctp_get_next_param(mat, offset, ¶ms, sizeof(params)); 3231 } 3232 return (0); 3233 } 3234 3235 /* 3236 * Given a MBUF chain that was sent into us containing an INIT. Build a 3237 * INIT-ACK with COOKIE and send back. We assume that the in_initpkt has done 3238 * a pullup to include IPv6/4header, SCTP header and initial part of INIT 3239 * message (i.e. the struct sctp_init_msg). 3240 */ 3241 void 3242 sctp_send_initiate_ack(struct sctp_inpcb *inp, struct sctp_tcb *stcb, 3243 struct mbuf *init_pkt, int iphlen, int offset, struct sctphdr *sh, 3244 struct sctp_init_chunk *init_chk) 3245 { 3246 struct sctp_association *asoc; 3247 struct mbuf *m, *m_at, *m_tmp, *m_cookie, *op_err, *m_last; 3248 struct sctp_init_msg *initackm_out; 3249 struct sctp_ecn_supported_param *ecn; 3250 struct sctp_prsctp_supported_param *prsctp; 3251 struct sctp_ecn_nonce_supported_param *ecn_nonce; 3252 struct sctp_supported_chunk_types_param *pr_supported; 3253 struct sockaddr_storage store; 3254 struct sockaddr_in *sin; 3255 struct sockaddr_in6 *sin6; 3256 struct route *ro; 3257 struct ip *iph; 3258 struct ip6_hdr *ip6; 3259 struct sockaddr *to; 3260 struct sctp_state_cookie stc; 3261 struct sctp_nets *net = NULL; 3262 int cnt_inits_to = 0; 3263 uint16_t his_limit, i_want; 3264 int abort_flag, padval, sz_of; 3265 int num_ext; 3266 int p_len; 3267 3268 if (stcb) { 3269 asoc = &stcb->asoc; 3270 } else { 3271 asoc = NULL; 3272 } 3273 m_last = NULL; 3274 if ((asoc != NULL) && 3275 (SCTP_GET_STATE(asoc) != SCTP_STATE_COOKIE_WAIT) && 3276 (sctp_are_there_new_addresses(asoc, init_pkt, iphlen, offset))) { 3277 /* new addresses, out of here in non-cookie-wait states */ 3278 /* 3279 * Send a ABORT, we don't add the new address error clause 3280 * though we even set the T bit and copy in the 0 tag.. this 3281 * looks no different than if no listener was present. 3282 */ 3283 sctp_send_abort(init_pkt, iphlen, sh, 0, NULL); 3284 return; 3285 } 3286 abort_flag = 0; 3287 op_err = sctp_arethere_unrecognized_parameters(init_pkt, 3288 (offset + sizeof(struct sctp_init_chunk)), 3289 &abort_flag, (struct sctp_chunkhdr *)init_chk); 3290 if (abort_flag) { 3291 sctp_send_abort(init_pkt, iphlen, sh, init_chk->init.initiate_tag, op_err); 3292 return; 3293 } 3294 m = sctp_get_mbuf_for_msg(MCLBYTES, 0, M_DONTWAIT, 1, MT_DATA); 3295 if (m == NULL) { 3296 /* No memory, INIT timer will re-attempt. */ 3297 if (op_err) 3298 sctp_m_freem(op_err); 3299 return; 3300 } 3301 SCTP_BUF_LEN(m) = sizeof(struct sctp_init_msg); 3302 3303 /* the time I built cookie */ 3304 SCTP_GETTIME_TIMEVAL(&stc.time_entered); 3305 3306 /* populate any tie tags */ 3307 if (asoc != NULL) { 3308 /* unlock before tag selections */ 3309 stc.tie_tag_my_vtag = asoc->my_vtag_nonce; 3310 stc.tie_tag_peer_vtag = asoc->peer_vtag_nonce; 3311 stc.cookie_life = asoc->cookie_life; 3312 net = asoc->primary_destination; 3313 } else { 3314 stc.tie_tag_my_vtag = 0; 3315 stc.tie_tag_peer_vtag = 0; 3316 /* life I will award this cookie */ 3317 stc.cookie_life = inp->sctp_ep.def_cookie_life; 3318 } 3319 3320 /* copy in the ports for later check */ 3321 stc.myport = sh->dest_port; 3322 stc.peerport = sh->src_port; 3323 3324 /* 3325 * If we wanted to honor cookie life extentions, we would add to 3326 * stc.cookie_life. For now we should NOT honor any extension 3327 */ 3328 stc.site_scope = stc.local_scope = stc.loopback_scope = 0; 3329 if (inp->sctp_flags & SCTP_PCB_FLAGS_BOUND_V6) { 3330 struct inpcb *in_inp; 3331 3332 /* Its a V6 socket */ 3333 in_inp = (struct inpcb *)inp; 3334 stc.ipv6_addr_legal = 1; 3335 /* Now look at the binding flag to see if V4 will be legal */ 3336 if (SCTP_IPV6_V6ONLY(in_inp) == 0) { 3337 stc.ipv4_addr_legal = 1; 3338 } else { 3339 /* V4 addresses are NOT legal on the association */ 3340 stc.ipv4_addr_legal = 0; 3341 } 3342 } else { 3343 /* Its a V4 socket, no - V6 */ 3344 stc.ipv4_addr_legal = 1; 3345 stc.ipv6_addr_legal = 0; 3346 } 3347 3348 #ifdef SCTP_DONT_DO_PRIVADDR_SCOPE 3349 stc.ipv4_scope = 1; 3350 #else 3351 stc.ipv4_scope = 0; 3352 #endif 3353 /* now for scope setup */ 3354 memset((caddr_t)&store, 0, sizeof(store)); 3355 sin = (struct sockaddr_in *)&store; 3356 sin6 = (struct sockaddr_in6 *)&store; 3357 if (net == NULL) { 3358 to = (struct sockaddr *)&store; 3359 iph = mtod(init_pkt, struct ip *); 3360 if (iph->ip_v == IPVERSION) { 3361 struct in_addr addr; 3362 struct route iproute; 3363 3364 sin->sin_family = AF_INET; 3365 sin->sin_len = sizeof(struct sockaddr_in); 3366 sin->sin_port = sh->src_port; 3367 sin->sin_addr = iph->ip_src; 3368 /* lookup address */ 3369 stc.address[0] = sin->sin_addr.s_addr; 3370 stc.address[1] = 0; 3371 stc.address[2] = 0; 3372 stc.address[3] = 0; 3373 stc.addr_type = SCTP_IPV4_ADDRESS; 3374 /* local from address */ 3375 memset(&iproute, 0, sizeof(iproute)); 3376 ro = &iproute; 3377 memcpy(&ro->ro_dst, sin, sizeof(*sin)); 3378 addr = sctp_ipv4_source_address_selection(inp, NULL, 3379 ro, NULL, 0); 3380 if (ro->ro_rt) { 3381 RTFREE(ro->ro_rt); 3382 } 3383 stc.laddress[0] = addr.s_addr; 3384 stc.laddress[1] = 0; 3385 stc.laddress[2] = 0; 3386 stc.laddress[3] = 0; 3387 stc.laddr_type = SCTP_IPV4_ADDRESS; 3388 /* scope_id is only for v6 */ 3389 stc.scope_id = 0; 3390 #ifndef SCTP_DONT_DO_PRIVADDR_SCOPE 3391 if (IN4_ISPRIVATE_ADDRESS(&sin->sin_addr)) { 3392 stc.ipv4_scope = 1; 3393 } 3394 #else 3395 stc.ipv4_scope = 1; 3396 #endif /* SCTP_DONT_DO_PRIVADDR_SCOPE */ 3397 /* Must use the address in this case */ 3398 if (sctp_is_address_on_local_host((struct sockaddr *)sin)) { 3399 stc.loopback_scope = 1; 3400 stc.ipv4_scope = 1; 3401 stc.site_scope = 1; 3402 stc.local_scope = 1; 3403 } 3404 } else if (iph->ip_v == (IPV6_VERSION >> 4)) { 3405 struct in6_addr addr; 3406 3407 struct route_in6 iproute6; 3408 3409 ip6 = mtod(init_pkt, struct ip6_hdr *); 3410 sin6->sin6_family = AF_INET6; 3411 sin6->sin6_len = sizeof(struct sockaddr_in6); 3412 sin6->sin6_port = sh->src_port; 3413 sin6->sin6_addr = ip6->ip6_src; 3414 /* lookup address */ 3415 memcpy(&stc.address, &sin6->sin6_addr, 3416 sizeof(struct in6_addr)); 3417 sin6->sin6_scope_id = 0; 3418 stc.addr_type = SCTP_IPV6_ADDRESS; 3419 stc.scope_id = 0; 3420 if (sctp_is_address_on_local_host((struct sockaddr *)sin6)) { 3421 stc.loopback_scope = 1; 3422 stc.local_scope = 1; 3423 stc.site_scope = 1; 3424 stc.ipv4_scope = 1; 3425 } else if (IN6_IS_ADDR_LINKLOCAL(&sin6->sin6_addr)) { 3426 /* 3427 * If the new destination is a LINK_LOCAL we 3428 * must have common both site and local 3429 * scope. Don't set local scope though since 3430 * we must depend on the source to be added 3431 * implicitly. We cannot assure just because 3432 * we share one link that all links are 3433 * common. 3434 */ 3435 stc.local_scope = 0; 3436 stc.site_scope = 1; 3437 stc.ipv4_scope = 1; 3438 /* 3439 * we start counting for the private address 3440 * stuff at 1. since the link local we 3441 * source from won't show up in our scoped 3442 * count. 3443 */ 3444 cnt_inits_to = 1; 3445 /* pull out the scope_id from incoming pkt */ 3446 /* FIX ME: does this have scope from rcvif? */ 3447 (void)sa6_recoverscope(sin6); 3448 3449 sa6_embedscope(sin6, ip6_use_defzone); 3450 stc.scope_id = sin6->sin6_scope_id; 3451 } else if (IN6_IS_ADDR_SITELOCAL(&sin6->sin6_addr)) { 3452 /* 3453 * If the new destination is SITE_LOCAL then 3454 * we must have site scope in common. 3455 */ 3456 stc.site_scope = 1; 3457 } 3458 /* local from address */ 3459 memset(&iproute6, 0, sizeof(iproute6)); 3460 ro = (struct route *)&iproute6; 3461 memcpy(&ro->ro_dst, sin6, sizeof(*sin6)); 3462 addr = sctp_ipv6_source_address_selection(inp, NULL, 3463 ro, NULL, 0); 3464 if (ro->ro_rt) { 3465 RTFREE(ro->ro_rt); 3466 } 3467 memcpy(&stc.laddress, &addr, sizeof(struct in6_addr)); 3468 stc.laddr_type = SCTP_IPV6_ADDRESS; 3469 } 3470 } else { 3471 /* set the scope per the existing tcb */ 3472 struct sctp_nets *lnet; 3473 3474 stc.loopback_scope = asoc->loopback_scope; 3475 stc.ipv4_scope = asoc->ipv4_local_scope; 3476 stc.site_scope = asoc->site_scope; 3477 stc.local_scope = asoc->local_scope; 3478 TAILQ_FOREACH(lnet, &asoc->nets, sctp_next) { 3479 if (lnet->ro._l_addr.sin6.sin6_family == AF_INET6) { 3480 if (IN6_IS_ADDR_LINKLOCAL(&lnet->ro._l_addr.sin6.sin6_addr)) { 3481 /* 3482 * if we have a LL address, start 3483 * counting at 1. 3484 */ 3485 cnt_inits_to = 1; 3486 } 3487 } 3488 } 3489 3490 /* use the net pointer */ 3491 to = (struct sockaddr *)&net->ro._l_addr; 3492 if (to->sa_family == AF_INET) { 3493 sin = (struct sockaddr_in *)to; 3494 stc.address[0] = sin->sin_addr.s_addr; 3495 stc.address[1] = 0; 3496 stc.address[2] = 0; 3497 stc.address[3] = 0; 3498 stc.addr_type = SCTP_IPV4_ADDRESS; 3499 if (net->src_addr_selected == 0) { 3500 /* 3501 * strange case here, the INIT should have 3502 * did the selection. 3503 */ 3504 net->ro._s_addr.sin.sin_addr = 3505 sctp_ipv4_source_address_selection(inp, 3506 stcb, (struct route *)&net->ro, net, 0); 3507 net->src_addr_selected = 1; 3508 3509 } 3510 stc.laddress[0] = net->ro._s_addr.sin.sin_addr.s_addr; 3511 stc.laddress[1] = 0; 3512 stc.laddress[2] = 0; 3513 stc.laddress[3] = 0; 3514 stc.laddr_type = SCTP_IPV4_ADDRESS; 3515 } else if (to->sa_family == AF_INET6) { 3516 sin6 = (struct sockaddr_in6 *)to; 3517 memcpy(&stc.address, &sin6->sin6_addr, 3518 sizeof(struct in6_addr)); 3519 stc.addr_type = SCTP_IPV6_ADDRESS; 3520 if (net->src_addr_selected == 0) { 3521 /* 3522 * strange case here, the INIT should have 3523 * did the selection. 3524 */ 3525 net->ro._s_addr.sin6.sin6_addr = 3526 sctp_ipv6_source_address_selection(inp, 3527 stcb, (struct route *)&net->ro, net, 0); 3528 net->src_addr_selected = 1; 3529 } 3530 memcpy(&stc.laddress, &net->ro._l_addr.sin6.sin6_addr, 3531 sizeof(struct in6_addr)); 3532 stc.laddr_type = SCTP_IPV6_ADDRESS; 3533 } 3534 } 3535 /* Now lets put the SCTP header in place */ 3536 initackm_out = mtod(m, struct sctp_init_msg *); 3537 initackm_out->sh.src_port = inp->sctp_lport; 3538 initackm_out->sh.dest_port = sh->src_port; 3539 initackm_out->sh.v_tag = init_chk->init.initiate_tag; 3540 /* Save it off for quick ref */ 3541 stc.peers_vtag = init_chk->init.initiate_tag; 3542 initackm_out->sh.checksum = 0; /* calculate later */ 3543 /* who are we */ 3544 memcpy(stc.identification, SCTP_VERSION_STRING, 3545 min(strlen(SCTP_VERSION_STRING), sizeof(stc.identification))); 3546 /* now the chunk header */ 3547 initackm_out->msg.ch.chunk_type = SCTP_INITIATION_ACK; 3548 initackm_out->msg.ch.chunk_flags = 0; 3549 /* fill in later from mbuf we build */ 3550 initackm_out->msg.ch.chunk_length = 0; 3551 /* place in my tag */ 3552 if ((asoc != NULL) && 3553 ((SCTP_GET_STATE(asoc) == SCTP_STATE_COOKIE_WAIT) || 3554 (SCTP_GET_STATE(asoc) == SCTP_STATE_INUSE) || 3555 (SCTP_GET_STATE(asoc) == SCTP_STATE_COOKIE_ECHOED))) { 3556 /* re-use the v-tags and init-seq here */ 3557 initackm_out->msg.init.initiate_tag = htonl(asoc->my_vtag); 3558 initackm_out->msg.init.initial_tsn = htonl(asoc->init_seq_number); 3559 } else { 3560 uint32_t vtag; 3561 3562 if (asoc) { 3563 atomic_add_int(&asoc->refcnt, 1); 3564 SCTP_TCB_UNLOCK(stcb); 3565 vtag = sctp_select_a_tag(inp); 3566 initackm_out->msg.init.initiate_tag = htonl(vtag); 3567 /* get a TSN to use too */ 3568 initackm_out->msg.init.initial_tsn = htonl(sctp_select_initial_TSN(&inp->sctp_ep)); 3569 SCTP_TCB_LOCK(stcb); 3570 atomic_add_int(&asoc->refcnt, -1); 3571 } else { 3572 vtag = sctp_select_a_tag(inp); 3573 initackm_out->msg.init.initiate_tag = htonl(vtag); 3574 /* get a TSN to use too */ 3575 initackm_out->msg.init.initial_tsn = htonl(sctp_select_initial_TSN(&inp->sctp_ep)); 3576 } 3577 } 3578 /* save away my tag to */ 3579 stc.my_vtag = initackm_out->msg.init.initiate_tag; 3580 3581 /* set up some of the credits. */ 3582 initackm_out->msg.init.a_rwnd = htonl(max(inp->sctp_socket->so_rcv.sb_hiwat, SCTP_MINIMAL_RWND)); 3583 /* set what I want */ 3584 his_limit = ntohs(init_chk->init.num_inbound_streams); 3585 /* choose what I want */ 3586 if (asoc != NULL) { 3587 if (asoc->streamoutcnt > inp->sctp_ep.pre_open_stream_count) { 3588 i_want = asoc->streamoutcnt; 3589 } else { 3590 i_want = inp->sctp_ep.pre_open_stream_count; 3591 } 3592 } else { 3593 i_want = inp->sctp_ep.pre_open_stream_count; 3594 } 3595 if (his_limit < i_want) { 3596 /* I Want more :< */ 3597 initackm_out->msg.init.num_outbound_streams = init_chk->init.num_inbound_streams; 3598 } else { 3599 /* I can have what I want :> */ 3600 initackm_out->msg.init.num_outbound_streams = htons(i_want); 3601 } 3602 /* tell him his limt. */ 3603 initackm_out->msg.init.num_inbound_streams = 3604 htons(inp->sctp_ep.max_open_streams_intome); 3605 /* setup the ECN pointer */ 3606 3607 if (inp->sctp_ep.adaptation_layer_indicator) { 3608 struct sctp_adaptation_layer_indication *ali; 3609 3610 ali = (struct sctp_adaptation_layer_indication *)( 3611 (caddr_t)initackm_out + sizeof(*initackm_out)); 3612 ali->ph.param_type = htons(SCTP_ULP_ADAPTATION); 3613 ali->ph.param_length = htons(sizeof(*ali)); 3614 ali->indication = ntohl(inp->sctp_ep.adaptation_layer_indicator); 3615 SCTP_BUF_LEN(m) += sizeof(*ali); 3616 ecn = (struct sctp_ecn_supported_param *)((caddr_t)ali + 3617 sizeof(*ali)); 3618 } else { 3619 ecn = (struct sctp_ecn_supported_param *)( 3620 (caddr_t)initackm_out + sizeof(*initackm_out)); 3621 } 3622 3623 /* ECN parameter */ 3624 if (sctp_ecn_enable == 1) { 3625 ecn->ph.param_type = htons(SCTP_ECN_CAPABLE); 3626 ecn->ph.param_length = htons(sizeof(*ecn)); 3627 SCTP_BUF_LEN(m) += sizeof(*ecn); 3628 3629 prsctp = (struct sctp_prsctp_supported_param *)((caddr_t)ecn + 3630 sizeof(*ecn)); 3631 } else { 3632 prsctp = (struct sctp_prsctp_supported_param *)((caddr_t)ecn); 3633 } 3634 /* And now tell the peer we do pr-sctp */ 3635 prsctp->ph.param_type = htons(SCTP_PRSCTP_SUPPORTED); 3636 prsctp->ph.param_length = htons(sizeof(*prsctp)); 3637 SCTP_BUF_LEN(m) += sizeof(*prsctp); 3638 3639 /* And now tell the peer we do all the extensions */ 3640 pr_supported = (struct sctp_supported_chunk_types_param *) 3641 ((caddr_t)prsctp + sizeof(*prsctp)); 3642 3643 pr_supported->ph.param_type = htons(SCTP_SUPPORTED_CHUNK_EXT); 3644 num_ext = 0; 3645 pr_supported->chunk_types[num_ext++] = SCTP_ASCONF; 3646 pr_supported->chunk_types[num_ext++] = SCTP_ASCONF_ACK; 3647 pr_supported->chunk_types[num_ext++] = SCTP_FORWARD_CUM_TSN; 3648 pr_supported->chunk_types[num_ext++] = SCTP_PACKET_DROPPED; 3649 pr_supported->chunk_types[num_ext++] = SCTP_STREAM_RESET; 3650 if (!sctp_auth_disable) 3651 pr_supported->chunk_types[num_ext++] = SCTP_AUTHENTICATION; 3652 p_len = sizeof(*pr_supported) + num_ext; 3653 pr_supported->ph.param_length = htons(p_len); 3654 bzero((caddr_t)pr_supported + p_len, SCTP_SIZE32(p_len) - p_len); 3655 SCTP_BUF_LEN(m) += SCTP_SIZE32(p_len); 3656 3657 /* ECN nonce: And now tell the peer we support ECN nonce */ 3658 if (sctp_ecn_nonce) { 3659 ecn_nonce = (struct sctp_ecn_nonce_supported_param *) 3660 ((caddr_t)pr_supported + SCTP_SIZE32(p_len)); 3661 ecn_nonce->ph.param_type = htons(SCTP_ECN_NONCE_SUPPORTED); 3662 ecn_nonce->ph.param_length = htons(sizeof(*ecn_nonce)); 3663 SCTP_BUF_LEN(m) += sizeof(*ecn_nonce); 3664 } 3665 /* add authentication parameters */ 3666 if (!sctp_auth_disable) { 3667 struct sctp_auth_random *random; 3668 struct sctp_auth_hmac_algo *hmacs; 3669 struct sctp_auth_chunk_list *chunks; 3670 uint16_t random_len; 3671 3672 /* generate and add RANDOM parameter */ 3673 random_len = sctp_auth_random_len; 3674 random = (struct sctp_auth_random *)(mtod(m, caddr_t)+SCTP_BUF_LEN(m)); 3675 random->ph.param_type = htons(SCTP_RANDOM); 3676 p_len = sizeof(*random) + random_len; 3677 random->ph.param_length = htons(p_len); 3678 SCTP_READ_RANDOM(random->random_data, random_len); 3679 /* zero out any padding required */ 3680 bzero((caddr_t)random + p_len, SCTP_SIZE32(p_len) - p_len); 3681 SCTP_BUF_LEN(m) += SCTP_SIZE32(p_len); 3682 3683 /* add HMAC_ALGO parameter */ 3684 hmacs = (struct sctp_auth_hmac_algo *)(mtod(m, caddr_t)+SCTP_BUF_LEN(m)); 3685 p_len = sctp_serialize_hmaclist(inp->sctp_ep.local_hmacs, 3686 (uint8_t *) hmacs->hmac_ids); 3687 if (p_len > 0) { 3688 p_len += sizeof(*hmacs); 3689 hmacs->ph.param_type = htons(SCTP_HMAC_LIST); 3690 hmacs->ph.param_length = htons(p_len); 3691 /* zero out any padding required */ 3692 bzero((caddr_t)hmacs + p_len, SCTP_SIZE32(p_len) - p_len); 3693 SCTP_BUF_LEN(m) += SCTP_SIZE32(p_len); 3694 } 3695 /* add CHUNKS parameter */ 3696 chunks = (struct sctp_auth_chunk_list *)(mtod(m, caddr_t)+SCTP_BUF_LEN(m)); 3697 p_len = sctp_serialize_auth_chunks(inp->sctp_ep.local_auth_chunks, 3698 chunks->chunk_types); 3699 if (p_len > 0) { 3700 p_len += sizeof(*chunks); 3701 chunks->ph.param_type = htons(SCTP_CHUNK_LIST); 3702 chunks->ph.param_length = htons(p_len); 3703 /* zero out any padding required */ 3704 bzero((caddr_t)chunks + p_len, SCTP_SIZE32(p_len) - p_len); 3705 SCTP_BUF_LEN(m) += SCTP_SIZE32(p_len); 3706 } 3707 } 3708 m_at = m; 3709 /* now the addresses */ 3710 { 3711 struct sctp_scoping scp; 3712 3713 /* 3714 * To optimize this we could put the scoping stuff into a 3715 * structure and remove the individual uint8's from the stc 3716 * structure. Then we could just pass in the address within 3717 * the stc.. but for now this is a quick hack to get the 3718 * address stuff teased apart. 3719 */ 3720 scp.ipv4_addr_legal = stc.ipv4_addr_legal; 3721 scp.ipv6_addr_legal = stc.ipv6_addr_legal; 3722 scp.loopback_scope = stc.loopback_scope; 3723 scp.ipv4_local_scope = stc.ipv4_scope; 3724 scp.local_scope = stc.local_scope; 3725 scp.site_scope = stc.site_scope; 3726 m_at = sctp_add_addresses_to_i_ia(inp, &scp, m_at, cnt_inits_to); 3727 } 3728 3729 /* tack on the operational error if present */ 3730 if (op_err) { 3731 struct mbuf *ol; 3732 int llen; 3733 3734 llen = 0; 3735 ol = op_err; 3736 while (ol) { 3737 llen += SCTP_BUF_LEN(ol); 3738 ol = SCTP_BUF_NEXT(ol); 3739 } 3740 if (llen % 4) { 3741 /* must add a pad to the param */ 3742 uint32_t cpthis = 0; 3743 int padlen; 3744 3745 padlen = 4 - (llen % 4); 3746 m_copyback(op_err, llen, padlen, (caddr_t)&cpthis); 3747 } 3748 while (SCTP_BUF_NEXT(m_at) != NULL) { 3749 m_at = SCTP_BUF_NEXT(m_at); 3750 } 3751 SCTP_BUF_NEXT(m_at) = op_err; 3752 while (SCTP_BUF_NEXT(m_at) != NULL) { 3753 m_at = SCTP_BUF_NEXT(m_at); 3754 } 3755 } 3756 /* Get total size of init packet */ 3757 sz_of = SCTP_SIZE32(ntohs(init_chk->ch.chunk_length)); 3758 /* pre-calulate the size and update pkt header and chunk header */ 3759 p_len = 0; 3760 for (m_tmp = m; m_tmp; m_tmp = SCTP_BUF_NEXT(m_tmp)) { 3761 p_len += SCTP_BUF_LEN(m_tmp); 3762 if (SCTP_BUF_NEXT(m_tmp) == NULL) { 3763 /* m_tmp should now point to last one */ 3764 break; 3765 } 3766 } 3767 /* 3768 * Figure now the size of the cookie. We know the size of the 3769 * INIT-ACK. The Cookie is going to be the size of INIT, INIT-ACK, 3770 * COOKIE-STRUCTURE and SIGNATURE. 3771 */ 3772 3773 /* 3774 * take our earlier INIT calc and add in the sz we just calculated 3775 * minus the size of the sctphdr (its not included in chunk size 3776 */ 3777 3778 /* add once for the INIT-ACK */ 3779 sz_of += (p_len - sizeof(struct sctphdr)); 3780 3781 /* add a second time for the INIT-ACK in the cookie */ 3782 sz_of += (p_len - sizeof(struct sctphdr)); 3783 3784 /* Now add the cookie header and cookie message struct */ 3785 sz_of += sizeof(struct sctp_state_cookie_param); 3786 /* ...and add the size of our signature */ 3787 sz_of += SCTP_SIGNATURE_SIZE; 3788 initackm_out->msg.ch.chunk_length = htons(sz_of); 3789 3790 /* Now we must build a cookie */ 3791 m_cookie = sctp_add_cookie(inp, init_pkt, offset, m, 3792 sizeof(struct sctphdr), &stc); 3793 if (m_cookie == NULL) { 3794 /* memory problem */ 3795 sctp_m_freem(m); 3796 return; 3797 } 3798 /* Now append the cookie to the end and update the space/size */ 3799 SCTP_BUF_NEXT(m_tmp) = m_cookie; 3800 for (; m_tmp; m_tmp = SCTP_BUF_NEXT(m_tmp)) { 3801 p_len += SCTP_BUF_LEN(m_tmp); 3802 if (SCTP_BUF_NEXT(m_tmp) == NULL) { 3803 /* m_tmp should now point to last one */ 3804 m_last = m_tmp; 3805 break; 3806 } 3807 } 3808 3809 /* 3810 * We pass 0 here to NOT set IP_DF if its IPv4, we ignore the return 3811 * here since the timer will drive a retranmission. 3812 */ 3813 padval = p_len % 4; 3814 if ((padval) && (m_last)) { 3815 /* see my previous comments on m_last */ 3816 int ret; 3817 3818 ret = sctp_add_pad_tombuf(m_last, (4 - padval)); 3819 if (ret) { 3820 /* Houston we have a problem, no space */ 3821 sctp_m_freem(m); 3822 return; 3823 } 3824 p_len += padval; 3825 } 3826 sctp_lowlevel_chunk_output(inp, NULL, NULL, to, m, 0, NULL, 0, 0, 3827 NULL, 0); 3828 SCTP_STAT_INCR_COUNTER64(sctps_outcontrolchunks); 3829 } 3830 3831 3832 void 3833 sctp_insert_on_wheel(struct sctp_tcb *stcb, 3834 struct sctp_association *asoc, 3835 struct sctp_stream_out *strq, int holds_lock) 3836 { 3837 struct sctp_stream_out *stre, *strn; 3838 3839 if (holds_lock == 0) 3840 SCTP_TCB_SEND_LOCK(stcb); 3841 if ((strq->next_spoke.tqe_next) || 3842 (strq->next_spoke.tqe_prev)) { 3843 /* already on wheel */ 3844 goto outof_here; 3845 } 3846 stre = TAILQ_FIRST(&asoc->out_wheel); 3847 if (stre == NULL) { 3848 /* only one on wheel */ 3849 TAILQ_INSERT_HEAD(&asoc->out_wheel, strq, next_spoke); 3850 goto outof_here; 3851 } 3852 for (; stre; stre = strn) { 3853 strn = TAILQ_NEXT(stre, next_spoke); 3854 if (stre->stream_no > strq->stream_no) { 3855 TAILQ_INSERT_BEFORE(stre, strq, next_spoke); 3856 goto outof_here; 3857 } else if (stre->stream_no == strq->stream_no) { 3858 /* huh, should not happen */ 3859 goto outof_here; 3860 } else if (strn == NULL) { 3861 /* next one is null */ 3862 TAILQ_INSERT_AFTER(&asoc->out_wheel, stre, strq, 3863 next_spoke); 3864 } 3865 } 3866 outof_here: 3867 if (holds_lock == 0) 3868 SCTP_TCB_SEND_UNLOCK(stcb); 3869 3870 3871 } 3872 3873 static void 3874 sctp_remove_from_wheel(struct sctp_tcb *stcb, 3875 struct sctp_association *asoc, 3876 struct sctp_stream_out *strq) 3877 { 3878 /* take off and then setup so we know it is not on the wheel */ 3879 SCTP_TCB_SEND_LOCK(stcb); 3880 if (TAILQ_FIRST(&strq->outqueue)) { 3881 /* more was added */ 3882 SCTP_TCB_SEND_UNLOCK(stcb); 3883 return; 3884 } 3885 TAILQ_REMOVE(&asoc->out_wheel, strq, next_spoke); 3886 strq->next_spoke.tqe_next = NULL; 3887 strq->next_spoke.tqe_prev = NULL; 3888 SCTP_TCB_SEND_UNLOCK(stcb); 3889 } 3890 3891 static void 3892 sctp_prune_prsctp(struct sctp_tcb *stcb, 3893 struct sctp_association *asoc, 3894 struct sctp_sndrcvinfo *srcv, 3895 int dataout) 3896 { 3897 int freed_spc = 0; 3898 struct sctp_tmit_chunk *chk, *nchk; 3899 3900 SCTP_TCB_LOCK_ASSERT(stcb); 3901 if ((asoc->peer_supports_prsctp) && 3902 (asoc->sent_queue_cnt_removeable > 0)) { 3903 TAILQ_FOREACH(chk, &asoc->sent_queue, sctp_next) { 3904 /* 3905 * Look for chunks marked with the PR_SCTP flag AND 3906 * the buffer space flag. If the one being sent is 3907 * equal or greater priority then purge the old one 3908 * and free some space. 3909 */ 3910 if (PR_SCTP_BUF_ENABLED(chk->flags)) { 3911 /* 3912 * This one is PR-SCTP AND buffer space 3913 * limited type 3914 */ 3915 if (chk->rec.data.timetodrop.tv_sec >= (long)srcv->sinfo_timetolive) { 3916 /* 3917 * Lower numbers equates to higher 3918 * priority so if the one we are 3919 * looking at has a larger or equal 3920 * priority we want to drop the data 3921 * and NOT retransmit it. 3922 */ 3923 if (chk->data) { 3924 /* 3925 * We release the book_size 3926 * if the mbuf is here 3927 */ 3928 int ret_spc; 3929 int cause; 3930 3931 if (chk->sent > SCTP_DATAGRAM_UNSENT) 3932 cause = SCTP_RESPONSE_TO_USER_REQ | SCTP_NOTIFY_DATAGRAM_SENT; 3933 else 3934 cause = SCTP_RESPONSE_TO_USER_REQ | SCTP_NOTIFY_DATAGRAM_UNSENT; 3935 ret_spc = sctp_release_pr_sctp_chunk(stcb, chk, 3936 cause, 3937 &asoc->sent_queue); 3938 freed_spc += ret_spc; 3939 if (freed_spc >= dataout) { 3940 return; 3941 } 3942 } /* if chunk was present */ 3943 } /* if of sufficent priority */ 3944 } /* if chunk has enabled */ 3945 } /* tailqforeach */ 3946 3947 chk = TAILQ_FIRST(&asoc->send_queue); 3948 while (chk) { 3949 nchk = TAILQ_NEXT(chk, sctp_next); 3950 /* Here we must move to the sent queue and mark */ 3951 if (PR_SCTP_TTL_ENABLED(chk->flags)) { 3952 if (chk->rec.data.timetodrop.tv_sec >= (long)srcv->sinfo_timetolive) { 3953 if (chk->data) { 3954 /* 3955 * We release the book_size 3956 * if the mbuf is here 3957 */ 3958 int ret_spc; 3959 3960 ret_spc = sctp_release_pr_sctp_chunk(stcb, chk, 3961 SCTP_RESPONSE_TO_USER_REQ | SCTP_NOTIFY_DATAGRAM_UNSENT, 3962 &asoc->send_queue); 3963 3964 freed_spc += ret_spc; 3965 if (freed_spc >= dataout) { 3966 return; 3967 } 3968 } /* end if chk->data */ 3969 } /* end if right class */ 3970 } /* end if chk pr-sctp */ 3971 chk = nchk; 3972 } /* end while (chk) */ 3973 } /* if enabled in asoc */ 3974 } 3975 3976 __inline int 3977 sctp_get_frag_point(struct sctp_tcb *stcb, 3978 struct sctp_association *asoc) 3979 { 3980 int siz, ovh; 3981 3982 /* 3983 * For endpoints that have both v6 and v4 addresses we must reserve 3984 * room for the ipv6 header, for those that are only dealing with V4 3985 * we use a larger frag point. 3986 */ 3987 if (stcb->sctp_ep->sctp_flags & SCTP_PCB_FLAGS_BOUND_V6) { 3988 ovh = SCTP_MED_OVERHEAD; 3989 } else { 3990 ovh = SCTP_MED_V4_OVERHEAD; 3991 } 3992 3993 if (stcb->sctp_ep->sctp_frag_point > asoc->smallest_mtu) 3994 siz = asoc->smallest_mtu - ovh; 3995 else 3996 siz = (stcb->sctp_ep->sctp_frag_point - ovh); 3997 /* 3998 * if (siz > (MCLBYTES-sizeof(struct sctp_data_chunk))) { 3999 */ 4000 /* A data chunk MUST fit in a cluster */ 4001 /* siz = (MCLBYTES - sizeof(struct sctp_data_chunk)); */ 4002 /* } */ 4003 4004 /* adjust for an AUTH chunk if DATA requires auth */ 4005 if (sctp_auth_is_required_chunk(SCTP_DATA, stcb->asoc.peer_auth_chunks)) 4006 siz -= sctp_get_auth_chunk_len(stcb->asoc.peer_hmac_id); 4007 4008 if (siz % 4) { 4009 /* make it an even word boundary please */ 4010 siz -= (siz % 4); 4011 } 4012 return (siz); 4013 } 4014 extern unsigned int sctp_max_chunks_on_queue; 4015 4016 static void 4017 sctp_set_prsctp_policy(struct sctp_tcb *stcb, 4018 struct sctp_stream_queue_pending *sp) 4019 { 4020 sp->pr_sctp_on = 0; 4021 if (stcb->asoc.peer_supports_prsctp) { 4022 /* 4023 * We assume that the user wants PR_SCTP_TTL if the user 4024 * provides a positive lifetime but does not specify any 4025 * PR_SCTP policy. This is a BAD assumption and causes 4026 * problems at least with the U-Vancovers MPI folks. I will 4027 * change this to be no policy means NO PR-SCTP. 4028 */ 4029 if (PR_SCTP_ENABLED(sp->sinfo_flags)) { 4030 sp->act_flags |= PR_SCTP_POLICY(sp->sinfo_flags); 4031 sp->pr_sctp_on = 1; 4032 } else { 4033 return; 4034 } 4035 switch (PR_SCTP_POLICY(sp->sinfo_flags)) { 4036 case CHUNK_FLAGS_PR_SCTP_BUF: 4037 /* 4038 * Time to live is a priority stored in tv_sec when 4039 * doing the buffer drop thing. 4040 */ 4041 sp->ts.tv_sec = sp->timetolive; 4042 sp->ts.tv_usec = 0; 4043 break; 4044 case CHUNK_FLAGS_PR_SCTP_TTL: 4045 { 4046 struct timeval tv; 4047 4048 SCTP_GETTIME_TIMEVAL(&sp->ts); 4049 tv.tv_sec = sp->timetolive / 1000; 4050 tv.tv_usec = (sp->timetolive * 1000) % 1000000; 4051 timevaladd(&sp->ts, &tv); 4052 } 4053 break; 4054 case CHUNK_FLAGS_PR_SCTP_RTX: 4055 /* 4056 * Time to live is a the number or retransmissions 4057 * stored in tv_sec. 4058 */ 4059 sp->ts.tv_sec = sp->timetolive; 4060 sp->ts.tv_usec = 0; 4061 break; 4062 default: 4063 #ifdef SCTP_DEBUG 4064 if (sctp_debug_on & SCTP_DEBUG_USRREQ1) { 4065 printf("Unknown PR_SCTP policy %u.\n", PR_SCTP_POLICY(sp->sinfo_flags)); 4066 } 4067 #endif 4068 break; 4069 } 4070 } 4071 } 4072 4073 static int 4074 sctp_msg_append(struct sctp_tcb *stcb, 4075 struct sctp_nets *net, 4076 struct mbuf *m, 4077 struct sctp_sndrcvinfo *srcv, int hold_stcb_lock) 4078 { 4079 int error = 0, holds_lock; 4080 struct mbuf *at; 4081 struct sctp_stream_queue_pending *sp = NULL; 4082 struct sctp_stream_out *strm; 4083 4084 /* 4085 * Given an mbuf chain, put it into the association send queue and 4086 * place it on the wheel 4087 */ 4088 holds_lock = hold_stcb_lock; 4089 if (srcv->sinfo_stream >= stcb->asoc.streamoutcnt) { 4090 /* Invalid stream number */ 4091 error = EINVAL; 4092 goto out_now; 4093 } 4094 if ((stcb->asoc.stream_locked) && 4095 (stcb->asoc.stream_locked_on != srcv->sinfo_stream)) { 4096 error = EAGAIN; 4097 goto out_now; 4098 } 4099 strm = &stcb->asoc.strmout[srcv->sinfo_stream]; 4100 /* Now can we send this? */ 4101 if ((SCTP_GET_STATE(&stcb->asoc) == SCTP_STATE_SHUTDOWN_SENT) || 4102 (SCTP_GET_STATE(&stcb->asoc) == SCTP_STATE_SHUTDOWN_ACK_SENT) || 4103 (SCTP_GET_STATE(&stcb->asoc) == SCTP_STATE_SHUTDOWN_RECEIVED) || 4104 (stcb->asoc.state & SCTP_STATE_SHUTDOWN_PENDING)) { 4105 /* got data while shutting down */ 4106 error = ECONNRESET; 4107 goto out_now; 4108 } 4109 sp = SCTP_ZONE_GET(sctppcbinfo.ipi_zone_strmoq, struct sctp_stream_queue_pending); 4110 if (sp == NULL) { 4111 error = ENOMEM; 4112 goto out_now; 4113 } 4114 SCTP_INCR_STRMOQ_COUNT(); 4115 sp->sinfo_flags = srcv->sinfo_flags; 4116 sp->timetolive = srcv->sinfo_timetolive; 4117 sp->ppid = srcv->sinfo_ppid; 4118 sp->context = srcv->sinfo_context; 4119 sp->strseq = 0; 4120 if (sp->sinfo_flags & SCTP_ADDR_OVER) { 4121 sp->net = net; 4122 sp->addr_over = 1; 4123 } else { 4124 sp->net = stcb->asoc.primary_destination; 4125 sp->addr_over = 0; 4126 } 4127 atomic_add_int(&sp->net->ref_count, 1); 4128 SCTP_GETTIME_TIMEVAL(&sp->ts); 4129 sp->stream = srcv->sinfo_stream; 4130 sp->msg_is_complete = 1; 4131 sp->some_taken = 0; 4132 sp->data = m; 4133 sp->tail_mbuf = NULL; 4134 sp->length = 0; 4135 at = m; 4136 sctp_set_prsctp_policy(stcb, sp); 4137 /* 4138 * We could in theory (for sendall) pass the length in, but we would 4139 * still have to hunt through the chain since we need to setup the 4140 * tail_mbuf 4141 */ 4142 while (at) { 4143 if (SCTP_BUF_NEXT(at) == NULL) 4144 sp->tail_mbuf = at; 4145 sp->length += SCTP_BUF_LEN(at); 4146 at = SCTP_BUF_NEXT(at); 4147 } 4148 SCTP_TCB_SEND_LOCK(stcb); 4149 sctp_snd_sb_alloc(stcb, sp->length); 4150 stcb->asoc.stream_queue_cnt++; 4151 TAILQ_INSERT_TAIL(&strm->outqueue, sp, next); 4152 if ((srcv->sinfo_flags & SCTP_UNORDERED) == 0) { 4153 sp->strseq = strm->next_sequence_sent; 4154 strm->next_sequence_sent++; 4155 } 4156 if ((strm->next_spoke.tqe_next == NULL) && 4157 (strm->next_spoke.tqe_prev == NULL)) { 4158 /* Not on wheel, insert */ 4159 sctp_insert_on_wheel(stcb, &stcb->asoc, strm, 1); 4160 } 4161 m = NULL; 4162 SCTP_TCB_SEND_UNLOCK(stcb); 4163 out_now: 4164 if (m) { 4165 sctp_m_freem(m); 4166 } 4167 return (error); 4168 } 4169 4170 4171 static struct mbuf * 4172 sctp_copy_mbufchain(struct mbuf *clonechain, 4173 struct mbuf *outchain, 4174 struct mbuf **endofchain, 4175 int can_take_mbuf, 4176 int sizeofcpy, 4177 uint8_t copy_by_ref) 4178 { 4179 struct mbuf *m; 4180 struct mbuf *appendchain; 4181 caddr_t cp; 4182 int len; 4183 4184 if (endofchain == NULL) { 4185 /* error */ 4186 error_out: 4187 if (outchain) 4188 sctp_m_freem(outchain); 4189 return (NULL); 4190 } 4191 if (can_take_mbuf) { 4192 appendchain = clonechain; 4193 } else { 4194 if (!copy_by_ref && (sizeofcpy <= ((((sctp_mbuf_threshold_count - 1) * MLEN) + MHLEN)))) { 4195 /* Its not in a cluster */ 4196 if (*endofchain == NULL) { 4197 /* lets get a mbuf cluster */ 4198 if (outchain == NULL) { 4199 /* This is the general case */ 4200 new_mbuf: 4201 outchain = sctp_get_mbuf_for_msg(MCLBYTES, 0, M_DONTWAIT, 1, MT_HEADER); 4202 if (outchain == NULL) { 4203 goto error_out; 4204 } 4205 SCTP_BUF_LEN(outchain) = 0; 4206 *endofchain = outchain; 4207 /* get the prepend space */ 4208 SCTP_BUF_RESV_UF(outchain, (SCTP_FIRST_MBUF_RESV + 4)); 4209 } else { 4210 /* 4211 * We really should not get a NULL 4212 * in endofchain 4213 */ 4214 /* find end */ 4215 m = outchain; 4216 while (m) { 4217 if (SCTP_BUF_NEXT(m) == NULL) { 4218 *endofchain = m; 4219 break; 4220 } 4221 m = SCTP_BUF_NEXT(m); 4222 } 4223 /* sanity */ 4224 if (*endofchain == NULL) { 4225 /* 4226 * huh, TSNH XXX maybe we 4227 * should panic 4228 */ 4229 sctp_m_freem(outchain); 4230 goto new_mbuf; 4231 } 4232 } 4233 /* get the new end of length */ 4234 len = M_TRAILINGSPACE(*endofchain); 4235 } else { 4236 /* how much is left at the end? */ 4237 len = M_TRAILINGSPACE(*endofchain); 4238 } 4239 /* Find the end of the data, for appending */ 4240 cp = (mtod((*endofchain), caddr_t)+SCTP_BUF_LEN((*endofchain))); 4241 4242 /* Now lets copy it out */ 4243 if (len >= sizeofcpy) { 4244 /* It all fits, copy it in */ 4245 m_copydata(clonechain, 0, sizeofcpy, cp); 4246 SCTP_BUF_LEN((*endofchain)) += sizeofcpy; 4247 } else { 4248 /* fill up the end of the chain */ 4249 if (len > 0) { 4250 m_copydata(clonechain, 0, len, cp); 4251 SCTP_BUF_LEN((*endofchain)) += len; 4252 /* now we need another one */ 4253 sizeofcpy -= len; 4254 } 4255 m = sctp_get_mbuf_for_msg(MCLBYTES, 0, M_DONTWAIT, 1, MT_HEADER); 4256 if (m == NULL) { 4257 /* We failed */ 4258 goto error_out; 4259 } 4260 SCTP_BUF_NEXT((*endofchain)) = m; 4261 *endofchain = m; 4262 cp = mtod((*endofchain), caddr_t); 4263 m_copydata(clonechain, len, sizeofcpy, cp); 4264 SCTP_BUF_LEN((*endofchain)) += sizeofcpy; 4265 } 4266 return (outchain); 4267 } else { 4268 /* copy the old fashion way */ 4269 appendchain = SCTP_M_COPYM(clonechain, 0, M_COPYALL, M_DONTWAIT); 4270 } 4271 } 4272 if (appendchain == NULL) { 4273 /* error */ 4274 if (outchain) 4275 sctp_m_freem(outchain); 4276 return (NULL); 4277 } 4278 if (outchain) { 4279 /* tack on to the end */ 4280 if (*endofchain != NULL) { 4281 SCTP_BUF_NEXT(((*endofchain))) = appendchain; 4282 } else { 4283 m = outchain; 4284 while (m) { 4285 if (SCTP_BUF_NEXT(m) == NULL) { 4286 SCTP_BUF_NEXT(m) = appendchain; 4287 break; 4288 } 4289 m = SCTP_BUF_NEXT(m); 4290 } 4291 } 4292 /* 4293 * save off the end and update the end-chain postion 4294 */ 4295 m = appendchain; 4296 while (m) { 4297 if (SCTP_BUF_NEXT(m) == NULL) { 4298 *endofchain = m; 4299 break; 4300 } 4301 m = SCTP_BUF_NEXT(m); 4302 } 4303 return (outchain); 4304 } else { 4305 /* save off the end and update the end-chain postion */ 4306 m = appendchain; 4307 while (m) { 4308 if (SCTP_BUF_NEXT(m) == NULL) { 4309 *endofchain = m; 4310 break; 4311 } 4312 m = SCTP_BUF_NEXT(m); 4313 } 4314 return (appendchain); 4315 } 4316 } 4317 4318 int 4319 sctp_med_chunk_output(struct sctp_inpcb *inp, 4320 struct sctp_tcb *stcb, 4321 struct sctp_association *asoc, 4322 int *num_out, 4323 int *reason_code, 4324 int control_only, int *cwnd_full, int from_where, 4325 struct timeval *now, int *now_filled, int frag_point); 4326 4327 static void 4328 sctp_sendall_iterator(struct sctp_inpcb *inp, struct sctp_tcb *stcb, void *ptr, 4329 uint32_t val) 4330 { 4331 struct sctp_copy_all *ca; 4332 struct mbuf *m; 4333 int ret = 0; 4334 int added_control = 0; 4335 int un_sent, do_chunk_output = 1; 4336 struct sctp_association *asoc; 4337 4338 ca = (struct sctp_copy_all *)ptr; 4339 if (ca->m == NULL) { 4340 return; 4341 } 4342 if (ca->inp != inp) { 4343 /* TSNH */ 4344 return; 4345 } 4346 if ((ca->m) && ca->sndlen) { 4347 m = SCTP_M_COPYM(ca->m, 0, M_COPYALL, M_DONTWAIT); 4348 if (m == NULL) { 4349 /* can't copy so we are done */ 4350 ca->cnt_failed++; 4351 return; 4352 } 4353 } else { 4354 m = NULL; 4355 } 4356 SCTP_TCB_LOCK_ASSERT(stcb); 4357 if (ca->sndrcv.sinfo_flags & SCTP_ABORT) { 4358 /* Abort this assoc with m as the user defined reason */ 4359 if (m) { 4360 struct sctp_paramhdr *ph; 4361 4362 SCTP_BUF_PREPEND(m, sizeof(struct sctp_paramhdr), M_DONTWAIT); 4363 if (m) { 4364 ph = mtod(m, struct sctp_paramhdr *); 4365 ph->param_type = htons(SCTP_CAUSE_USER_INITIATED_ABT); 4366 ph->param_length = htons(ca->sndlen); 4367 } 4368 /* 4369 * We add one here to keep the assoc from 4370 * dis-appearing on us. 4371 */ 4372 atomic_add_int(&stcb->asoc.refcnt, 1); 4373 sctp_abort_an_association(inp, stcb, 4374 SCTP_RESPONSE_TO_USER_REQ, 4375 m); 4376 /* 4377 * sctp_abort_an_association calls sctp_free_asoc() 4378 * free association will NOT free it since we 4379 * incremented the refcnt .. we do this to prevent 4380 * it being freed and things getting tricky since we 4381 * could end up (from free_asoc) calling inpcb_free 4382 * which would get a recursive lock call to the 4383 * iterator lock.. But as a consequence of that the 4384 * stcb will return to us un-locked.. since 4385 * free_asoc returns with either no TCB or the TCB 4386 * unlocked, we must relock.. to unlock in the 4387 * iterator timer :-0 4388 */ 4389 SCTP_TCB_LOCK(stcb); 4390 atomic_add_int(&stcb->asoc.refcnt, -1); 4391 goto no_chunk_output; 4392 } 4393 } else { 4394 if (m) { 4395 ret = sctp_msg_append(stcb, stcb->asoc.primary_destination, m, 4396 &ca->sndrcv, 1); 4397 } 4398 asoc = &stcb->asoc; 4399 if (ca->sndrcv.sinfo_flags & SCTP_EOF) { 4400 /* shutdown this assoc */ 4401 if (TAILQ_EMPTY(&asoc->send_queue) && 4402 TAILQ_EMPTY(&asoc->sent_queue) && 4403 (asoc->stream_queue_cnt == 0)) { 4404 if (asoc->locked_on_sending) { 4405 goto abort_anyway; 4406 } 4407 /* 4408 * there is nothing queued to send, so I'm 4409 * done... 4410 */ 4411 if ((SCTP_GET_STATE(asoc) != SCTP_STATE_SHUTDOWN_SENT) && 4412 (SCTP_GET_STATE(asoc) != SCTP_STATE_SHUTDOWN_RECEIVED) && 4413 (SCTP_GET_STATE(asoc) != SCTP_STATE_SHUTDOWN_ACK_SENT)) { 4414 /* 4415 * only send SHUTDOWN the first time 4416 * through 4417 */ 4418 sctp_send_shutdown(stcb, stcb->asoc.primary_destination); 4419 if (SCTP_GET_STATE(asoc) == SCTP_STATE_OPEN) { 4420 SCTP_STAT_DECR_GAUGE32(sctps_currestab); 4421 } 4422 asoc->state = SCTP_STATE_SHUTDOWN_SENT; 4423 sctp_timer_start(SCTP_TIMER_TYPE_SHUTDOWN, stcb->sctp_ep, stcb, 4424 asoc->primary_destination); 4425 sctp_timer_start(SCTP_TIMER_TYPE_SHUTDOWNGUARD, stcb->sctp_ep, stcb, 4426 asoc->primary_destination); 4427 added_control = 1; 4428 do_chunk_output = 0; 4429 } 4430 } else { 4431 /* 4432 * we still got (or just got) data to send, 4433 * so set SHUTDOWN_PENDING 4434 */ 4435 /* 4436 * XXX sockets draft says that SCTP_EOF 4437 * should be sent with no data. currently, 4438 * we will allow user data to be sent first 4439 * and move to SHUTDOWN-PENDING 4440 */ 4441 if ((SCTP_GET_STATE(asoc) != SCTP_STATE_SHUTDOWN_SENT) && 4442 (SCTP_GET_STATE(asoc) != SCTP_STATE_SHUTDOWN_RECEIVED) && 4443 (SCTP_GET_STATE(asoc) != SCTP_STATE_SHUTDOWN_ACK_SENT)) { 4444 if (asoc->locked_on_sending) { 4445 /* 4446 * Locked to send out the 4447 * data 4448 */ 4449 struct sctp_stream_queue_pending *sp; 4450 4451 sp = TAILQ_LAST(&asoc->locked_on_sending->outqueue, sctp_streamhead); 4452 if (sp) { 4453 if ((sp->length == 0) && (sp->msg_is_complete == 0)) 4454 asoc->state |= SCTP_STATE_PARTIAL_MSG_LEFT; 4455 } 4456 } 4457 asoc->state |= SCTP_STATE_SHUTDOWN_PENDING; 4458 if (TAILQ_EMPTY(&asoc->send_queue) && 4459 TAILQ_EMPTY(&asoc->sent_queue) && 4460 (asoc->state & SCTP_STATE_PARTIAL_MSG_LEFT)) { 4461 abort_anyway: 4462 atomic_add_int(&stcb->asoc.refcnt, 1); 4463 sctp_abort_an_association(stcb->sctp_ep, stcb, 4464 SCTP_RESPONSE_TO_USER_REQ, 4465 NULL); 4466 atomic_add_int(&stcb->asoc.refcnt, -1); 4467 goto no_chunk_output; 4468 } 4469 sctp_timer_start(SCTP_TIMER_TYPE_SHUTDOWNGUARD, stcb->sctp_ep, stcb, 4470 asoc->primary_destination); 4471 } 4472 } 4473 4474 } 4475 } 4476 un_sent = ((stcb->asoc.total_output_queue_size - stcb->asoc.total_flight) + 4477 ((stcb->asoc.chunks_on_out_queue - stcb->asoc.total_flight_count) * sizeof(struct sctp_data_chunk))); 4478 4479 if ((sctp_is_feature_off(inp, SCTP_PCB_FLAGS_NODELAY)) && 4480 (stcb->asoc.total_flight > 0) && 4481 (un_sent < (int)(stcb->asoc.smallest_mtu - SCTP_MIN_OVERHEAD)) 4482 ) { 4483 do_chunk_output = 0; 4484 } 4485 if (do_chunk_output) 4486 sctp_chunk_output(inp, stcb, SCTP_OUTPUT_FROM_USR_SEND); 4487 else if (added_control) { 4488 int num_out = 0, reason = 0, cwnd_full = 0, now_filled = 0; 4489 struct timeval now; 4490 int frag_point; 4491 4492 frag_point = sctp_get_frag_point(stcb, &stcb->asoc); 4493 sctp_med_chunk_output(inp, stcb, &stcb->asoc, &num_out, 4494 &reason, 1, &cwnd_full, 1, &now, &now_filled, frag_point); 4495 } 4496 no_chunk_output: 4497 if (ret) { 4498 ca->cnt_failed++; 4499 } else { 4500 ca->cnt_sent++; 4501 } 4502 } 4503 4504 static void 4505 sctp_sendall_completes(void *ptr, uint32_t val) 4506 { 4507 struct sctp_copy_all *ca; 4508 4509 ca = (struct sctp_copy_all *)ptr; 4510 /* 4511 * Do a notify here? Kacheong suggests that the notify be done at 4512 * the send time.. so you would push up a notification if any send 4513 * failed. Don't know if this is feasable since the only failures we 4514 * have is "memory" related and if you cannot get an mbuf to send 4515 * the data you surely can't get an mbuf to send up to notify the 4516 * user you can't send the data :-> 4517 */ 4518 4519 /* now free everything */ 4520 sctp_m_freem(ca->m); 4521 SCTP_FREE(ca); 4522 } 4523 4524 4525 #define MC_ALIGN(m, len) do { \ 4526 SCTP_BUF_RESV_UF(m, ((MCLBYTES - (len)) & ~(sizeof(long) - 1)); \ 4527 } while (0) 4528 4529 4530 4531 static struct mbuf * 4532 sctp_copy_out_all(struct uio *uio, int len) 4533 { 4534 struct mbuf *ret, *at; 4535 int left, willcpy, cancpy, error; 4536 4537 ret = sctp_get_mbuf_for_msg(MCLBYTES, 0, M_WAIT, 1, MT_DATA); 4538 if (ret == NULL) { 4539 /* TSNH */ 4540 return (NULL); 4541 } 4542 left = len; 4543 SCTP_BUF_LEN(ret) = 0; 4544 /* save space for the data chunk header */ 4545 cancpy = M_TRAILINGSPACE(ret); 4546 willcpy = min(cancpy, left); 4547 at = ret; 4548 while (left > 0) { 4549 /* Align data to the end */ 4550 error = uiomove(mtod(at, caddr_t), willcpy, uio); 4551 if (error) { 4552 err_out_now: 4553 sctp_m_freem(at); 4554 return (NULL); 4555 } 4556 SCTP_BUF_LEN(at) = willcpy; 4557 SCTP_BUF_NEXT_PKT(at) = SCTP_BUF_NEXT(at) = 0; 4558 left -= willcpy; 4559 if (left > 0) { 4560 SCTP_BUF_NEXT(at) = sctp_get_mbuf_for_msg(left, 0, M_WAIT, 1, MT_DATA); 4561 if (SCTP_BUF_NEXT(at) == NULL) { 4562 goto err_out_now; 4563 } 4564 at = SCTP_BUF_NEXT(at); 4565 SCTP_BUF_LEN(at) = 0; 4566 cancpy = M_TRAILINGSPACE(at); 4567 willcpy = min(cancpy, left); 4568 } 4569 } 4570 return (ret); 4571 } 4572 4573 static int 4574 sctp_sendall(struct sctp_inpcb *inp, struct uio *uio, struct mbuf *m, 4575 struct sctp_sndrcvinfo *srcv) 4576 { 4577 int ret; 4578 struct sctp_copy_all *ca; 4579 4580 SCTP_MALLOC(ca, struct sctp_copy_all *, sizeof(struct sctp_copy_all), 4581 "CopyAll"); 4582 if (ca == NULL) { 4583 sctp_m_freem(m); 4584 return (ENOMEM); 4585 } 4586 memset(ca, 0, sizeof(struct sctp_copy_all)); 4587 4588 ca->inp = inp; 4589 ca->sndrcv = *srcv; 4590 /* 4591 * take off the sendall flag, it would be bad if we failed to do 4592 * this :-0 4593 */ 4594 ca->sndrcv.sinfo_flags &= ~SCTP_SENDALL; 4595 /* get length and mbuf chain */ 4596 if (uio) { 4597 ca->sndlen = uio->uio_resid; 4598 ca->m = sctp_copy_out_all(uio, ca->sndlen); 4599 if (ca->m == NULL) { 4600 SCTP_FREE(ca); 4601 return (ENOMEM); 4602 } 4603 } else { 4604 /* Gather the length of the send */ 4605 struct mbuf *mat; 4606 4607 mat = m; 4608 ca->sndlen = 0; 4609 while (m) { 4610 ca->sndlen += SCTP_BUF_LEN(m); 4611 m = SCTP_BUF_NEXT(m); 4612 } 4613 ca->m = m; 4614 } 4615 ret = sctp_initiate_iterator(NULL, sctp_sendall_iterator, 4616 SCTP_PCB_ANY_FLAGS, SCTP_PCB_ANY_FEATURES, SCTP_ASOC_ANY_STATE, 4617 (void *)ca, 0, 4618 sctp_sendall_completes, inp, 1); 4619 if (ret) { 4620 #ifdef SCTP_DEBUG 4621 printf("Failed to initiate iterator for sendall\n"); 4622 #endif 4623 SCTP_FREE(ca); 4624 return (EFAULT); 4625 } 4626 return (0); 4627 } 4628 4629 4630 void 4631 sctp_toss_old_cookies(struct sctp_tcb *stcb, struct sctp_association *asoc) 4632 { 4633 struct sctp_tmit_chunk *chk, *nchk; 4634 4635 chk = TAILQ_FIRST(&asoc->control_send_queue); 4636 while (chk) { 4637 nchk = TAILQ_NEXT(chk, sctp_next); 4638 if (chk->rec.chunk_id.id == SCTP_COOKIE_ECHO) { 4639 TAILQ_REMOVE(&asoc->control_send_queue, chk, sctp_next); 4640 if (chk->data) { 4641 sctp_m_freem(chk->data); 4642 chk->data = NULL; 4643 } 4644 asoc->ctrl_queue_cnt--; 4645 if (chk->whoTo) 4646 sctp_free_remote_addr(chk->whoTo); 4647 sctp_free_a_chunk(stcb, chk); 4648 } 4649 chk = nchk; 4650 } 4651 } 4652 4653 void 4654 sctp_toss_old_asconf(struct sctp_tcb *stcb) 4655 { 4656 struct sctp_association *asoc; 4657 struct sctp_tmit_chunk *chk, *chk_tmp; 4658 4659 asoc = &stcb->asoc; 4660 for (chk = TAILQ_FIRST(&asoc->control_send_queue); chk != NULL; 4661 chk = chk_tmp) { 4662 /* get next chk */ 4663 chk_tmp = TAILQ_NEXT(chk, sctp_next); 4664 /* find SCTP_ASCONF chunk in queue (only one ever in queue) */ 4665 if (chk->rec.chunk_id.id == SCTP_ASCONF) { 4666 TAILQ_REMOVE(&asoc->control_send_queue, chk, sctp_next); 4667 if (chk->data) { 4668 sctp_m_freem(chk->data); 4669 chk->data = NULL; 4670 } 4671 asoc->ctrl_queue_cnt--; 4672 if (chk->whoTo) 4673 sctp_free_remote_addr(chk->whoTo); 4674 sctp_free_a_chunk(stcb, chk); 4675 } 4676 } 4677 } 4678 4679 4680 static __inline void 4681 sctp_clean_up_datalist(struct sctp_tcb *stcb, 4682 4683 struct sctp_association *asoc, 4684 struct sctp_tmit_chunk **data_list, 4685 int bundle_at, 4686 struct sctp_nets *net) 4687 { 4688 int i; 4689 struct sctp_tmit_chunk *tp1; 4690 4691 for (i = 0; i < bundle_at; i++) { 4692 /* off of the send queue */ 4693 if (i) { 4694 /* 4695 * Any chunk NOT 0 you zap the time chunk 0 gets 4696 * zapped or set based on if a RTO measurment is 4697 * needed. 4698 */ 4699 data_list[i]->do_rtt = 0; 4700 } 4701 /* record time */ 4702 data_list[i]->sent_rcv_time = net->last_sent_time; 4703 data_list[i]->rec.data.fast_retran_tsn = data_list[i]->rec.data.TSN_seq; 4704 TAILQ_REMOVE(&asoc->send_queue, 4705 data_list[i], 4706 sctp_next); 4707 /* on to the sent queue */ 4708 tp1 = TAILQ_LAST(&asoc->sent_queue, sctpchunk_listhead); 4709 if ((tp1) && (compare_with_wrap(tp1->rec.data.TSN_seq, 4710 data_list[i]->rec.data.TSN_seq, MAX_TSN))) { 4711 struct sctp_tmit_chunk *tpp; 4712 4713 /* need to move back */ 4714 back_up_more: 4715 tpp = TAILQ_PREV(tp1, sctpchunk_listhead, sctp_next); 4716 if (tpp == NULL) { 4717 TAILQ_INSERT_BEFORE(tp1, data_list[i], sctp_next); 4718 goto all_done; 4719 } 4720 tp1 = tpp; 4721 if (compare_with_wrap(tp1->rec.data.TSN_seq, 4722 data_list[i]->rec.data.TSN_seq, MAX_TSN)) { 4723 goto back_up_more; 4724 } 4725 TAILQ_INSERT_AFTER(&asoc->sent_queue, tp1, data_list[i], sctp_next); 4726 } else { 4727 TAILQ_INSERT_TAIL(&asoc->sent_queue, 4728 data_list[i], 4729 sctp_next); 4730 } 4731 all_done: 4732 /* This does not lower until the cum-ack passes it */ 4733 asoc->sent_queue_cnt++; 4734 asoc->send_queue_cnt--; 4735 if ((asoc->peers_rwnd <= 0) && 4736 (asoc->total_flight == 0) && 4737 (bundle_at == 1)) { 4738 /* Mark the chunk as being a window probe */ 4739 SCTP_STAT_INCR(sctps_windowprobed); 4740 data_list[i]->rec.data.state_flags |= SCTP_WINDOW_PROBE; 4741 } else { 4742 data_list[i]->rec.data.state_flags &= ~SCTP_WINDOW_PROBE; 4743 } 4744 #ifdef SCTP_AUDITING_ENABLED 4745 sctp_audit_log(0xC2, 3); 4746 #endif 4747 data_list[i]->sent = SCTP_DATAGRAM_SENT; 4748 data_list[i]->snd_count = 1; 4749 data_list[i]->rec.data.chunk_was_revoked = 0; 4750 #ifdef SCTP_FLIGHT_LOGGING 4751 sctp_misc_ints(SCTP_FLIGHT_LOG_UP, 4752 data_list[i]->whoTo->flight_size, 4753 data_list[i]->book_size, 4754 (uintptr_t) stcb, 4755 data_list[i]->rec.data.TSN_seq); 4756 #endif 4757 net->flight_size += data_list[i]->book_size; 4758 asoc->total_flight += data_list[i]->book_size; 4759 asoc->total_flight_count++; 4760 #ifdef SCTP_LOG_RWND 4761 sctp_log_rwnd(SCTP_DECREASE_PEER_RWND, 4762 asoc->peers_rwnd, data_list[i]->send_size, sctp_peer_chunk_oh); 4763 #endif 4764 asoc->peers_rwnd = sctp_sbspace_sub(asoc->peers_rwnd, 4765 (uint32_t) (data_list[i]->send_size + sctp_peer_chunk_oh)); 4766 if (asoc->peers_rwnd < stcb->sctp_ep->sctp_ep.sctp_sws_sender) { 4767 /* SWS sender side engages */ 4768 asoc->peers_rwnd = 0; 4769 } 4770 } 4771 } 4772 4773 static __inline void 4774 sctp_clean_up_ctl(struct sctp_tcb *stcb, struct sctp_association *asoc) 4775 { 4776 struct sctp_tmit_chunk *chk, *nchk; 4777 4778 for (chk = TAILQ_FIRST(&asoc->control_send_queue); 4779 chk; chk = nchk) { 4780 nchk = TAILQ_NEXT(chk, sctp_next); 4781 if ((chk->rec.chunk_id.id == SCTP_SELECTIVE_ACK) || 4782 (chk->rec.chunk_id.id == SCTP_HEARTBEAT_REQUEST) || 4783 (chk->rec.chunk_id.id == SCTP_HEARTBEAT_ACK) || 4784 (chk->rec.chunk_id.id == SCTP_SHUTDOWN) || 4785 (chk->rec.chunk_id.id == SCTP_SHUTDOWN_ACK) || 4786 (chk->rec.chunk_id.id == SCTP_OPERATION_ERROR) || 4787 (chk->rec.chunk_id.id == SCTP_PACKET_DROPPED) || 4788 (chk->rec.chunk_id.id == SCTP_COOKIE_ACK) || 4789 (chk->rec.chunk_id.id == SCTP_ECN_CWR) || 4790 (chk->rec.chunk_id.id == SCTP_ASCONF_ACK)) { 4791 /* Stray chunks must be cleaned up */ 4792 clean_up_anyway: 4793 TAILQ_REMOVE(&asoc->control_send_queue, chk, sctp_next); 4794 if (chk->data) { 4795 sctp_m_freem(chk->data); 4796 chk->data = NULL; 4797 } 4798 asoc->ctrl_queue_cnt--; 4799 sctp_free_remote_addr(chk->whoTo); 4800 sctp_free_a_chunk(stcb, chk); 4801 } else if (chk->rec.chunk_id.id == SCTP_STREAM_RESET) { 4802 /* special handling, we must look into the param */ 4803 if (chk != asoc->str_reset) { 4804 goto clean_up_anyway; 4805 } 4806 } 4807 } 4808 } 4809 4810 extern int sctp_min_split_point; 4811 4812 static __inline int 4813 sctp_can_we_split_this(struct sctp_tcb *stcb, 4814 struct sctp_stream_queue_pending *sp, 4815 int goal_mtu, int frag_point, int eeor_on) 4816 { 4817 /* 4818 * Make a decision on if I should split a msg into multiple parts. 4819 */ 4820 if (goal_mtu < sctp_min_split_point) { 4821 /* you don't want enough */ 4822 return (0); 4823 } 4824 if (sp->msg_is_complete == 0) { 4825 if (eeor_on) { 4826 /* 4827 * If we are doing EEOR we need to always send it if 4828 * its the entire thing. 4829 */ 4830 if (goal_mtu >= sp->length) 4831 return (sp->length); 4832 } else { 4833 if (goal_mtu >= sp->length) { 4834 /* 4835 * If we cannot fill the amount needed there 4836 * is no sense of splitting the chunk. 4837 */ 4838 return (0); 4839 } 4840 } 4841 /* 4842 * If we reach here sp->length is larger than the goal_mtu. 4843 * Do we wish to split it for the sake of packet putting 4844 * together? 4845 */ 4846 if (goal_mtu >= min(sctp_min_split_point, stcb->asoc.smallest_mtu)) { 4847 /* Its ok to split it */ 4848 return (min(goal_mtu, frag_point)); 4849 } 4850 } else { 4851 /* We can always split a complete message to make it fit */ 4852 if (goal_mtu >= sp->length) 4853 /* Take it all */ 4854 return (sp->length); 4855 4856 return (min(goal_mtu, frag_point)); 4857 } 4858 /* Nope, can't split */ 4859 return (0); 4860 4861 } 4862 4863 static int 4864 sctp_move_to_outqueue(struct sctp_tcb *stcb, struct sctp_nets *net, 4865 struct sctp_stream_out *strq, 4866 int goal_mtu, 4867 int frag_point, 4868 int *locked, 4869 int *giveup, 4870 int eeor_mode) 4871 { 4872 /* Move from the stream to the send_queue keeping track of the total */ 4873 struct sctp_association *asoc; 4874 struct sctp_stream_queue_pending *sp; 4875 struct sctp_tmit_chunk *chk; 4876 struct sctp_data_chunk *dchkh; 4877 int to_move; 4878 uint8_t rcv_flags = 0; 4879 uint8_t some_taken; 4880 uint8_t took_all = 0; 4881 4882 SCTP_TCB_LOCK_ASSERT(stcb); 4883 asoc = &stcb->asoc; 4884 sp = TAILQ_FIRST(&strq->outqueue); 4885 if (sp == NULL) { 4886 *locked = 0; 4887 SCTP_TCB_SEND_LOCK(stcb); 4888 if (strq->last_msg_incomplete) { 4889 printf("Huh? Stream:%d lm_in_c=%d but queue is NULL\n", 4890 strq->stream_no, strq->last_msg_incomplete); 4891 strq->last_msg_incomplete = 0; 4892 } 4893 SCTP_TCB_SEND_UNLOCK(stcb); 4894 return (0); 4895 } 4896 SCTP_TCB_SEND_LOCK(stcb); 4897 if ((sp->length == 0) && (sp->msg_is_complete == 0)) { 4898 /* Must wait for more data, must be last msg */ 4899 *locked = 1; 4900 *giveup = 1; 4901 SCTP_TCB_SEND_UNLOCK(stcb); 4902 return (0); 4903 } else if (sp->length == 0) { 4904 /* This should not happen */ 4905 panic("sp length is 0?"); 4906 } 4907 some_taken = sp->some_taken; 4908 if ((goal_mtu >= sp->length) && (sp->msg_is_complete)) { 4909 /* It all fits and its a complete msg, no brainer */ 4910 to_move = min(sp->length, frag_point); 4911 if (to_move == sp->length) { 4912 /* Getting it all */ 4913 if (sp->some_taken) { 4914 rcv_flags |= SCTP_DATA_LAST_FRAG; 4915 } else { 4916 rcv_flags |= SCTP_DATA_NOT_FRAG; 4917 } 4918 } else { 4919 /* Not getting it all, frag point overrides */ 4920 if (sp->some_taken == 0) { 4921 rcv_flags |= SCTP_DATA_FIRST_FRAG; 4922 } 4923 sp->some_taken = 1; 4924 } 4925 } else { 4926 to_move = sctp_can_we_split_this(stcb, sp, goal_mtu, 4927 frag_point, eeor_mode); 4928 if (to_move) { 4929 if (to_move >= sp->length) { 4930 to_move = sp->length; 4931 } 4932 if (sp->some_taken == 0) { 4933 rcv_flags |= SCTP_DATA_FIRST_FRAG; 4934 } 4935 sp->some_taken = 1; 4936 } else { 4937 if (sp->some_taken) { 4938 *locked = 1; 4939 } 4940 *giveup = 1; 4941 SCTP_TCB_SEND_UNLOCK(stcb); 4942 return (0); 4943 } 4944 } 4945 SCTP_TCB_SEND_UNLOCK(stcb); 4946 /* If we reach here, we can copy out a chunk */ 4947 sctp_alloc_a_chunk(stcb, chk); 4948 if (chk == NULL) { 4949 /* No chunk memory */ 4950 out_gu: 4951 *giveup = 1; 4952 return (0); 4953 } 4954 /* 4955 * Setup for unordered if needed by looking at the user sent info 4956 * flags. 4957 */ 4958 if (sp->sinfo_flags & SCTP_UNORDERED) { 4959 rcv_flags |= SCTP_DATA_UNORDERED; 4960 } 4961 /* clear out the chunk before setting up */ 4962 memset(chk, sizeof(*chk), 0); 4963 chk->rec.data.rcv_flags = rcv_flags; 4964 SCTP_TCB_SEND_LOCK(stcb); 4965 if (SCTP_BUF_IS_EXTENDED(sp->data)) { 4966 chk->copy_by_ref = 1; 4967 } else { 4968 chk->copy_by_ref = 0; 4969 } 4970 if (to_move >= sp->length) { 4971 /* we can steal the whole thing */ 4972 chk->data = sp->data; 4973 chk->last_mbuf = sp->tail_mbuf; 4974 /* register the stealing */ 4975 sp->data = sp->tail_mbuf = NULL; 4976 took_all = 1; 4977 } else { 4978 struct mbuf *m; 4979 4980 chk->data = SCTP_M_COPYM(sp->data, 0, to_move, M_DONTWAIT); 4981 chk->last_mbuf = NULL; 4982 if (chk->data == NULL) { 4983 sp->some_taken = some_taken; 4984 sctp_free_a_chunk(stcb, chk); 4985 SCTP_TCB_SEND_UNLOCK(stcb); 4986 goto out_gu; 4987 } 4988 /* Pull off the data */ 4989 m_adj(sp->data, to_move); 4990 /* Now lets work our way down and compact it */ 4991 m = sp->data; 4992 while (m && (SCTP_BUF_LEN(m) == 0)) { 4993 sp->data = SCTP_BUF_NEXT(m); 4994 SCTP_BUF_NEXT(m) = NULL; 4995 if (sp->tail_mbuf == m) { 4996 /* freeing tail */ 4997 sp->tail_mbuf = sp->data; 4998 } 4999 sctp_m_free(m); 5000 m = sp->data; 5001 } 5002 } 5003 if (to_move > sp->length) { 5004 panic("Huh, how can to_move be larger?"); 5005 } else { 5006 sp->length -= to_move; 5007 } 5008 5009 if (M_LEADINGSPACE(chk->data) < sizeof(struct sctp_data_chunk)) { 5010 /* Not enough room for a chunk header, get some */ 5011 struct mbuf *m; 5012 5013 m = sctp_get_mbuf_for_msg(1, 0, M_DONTWAIT, 0, MT_DATA); 5014 if (m == NULL) { 5015 /* 5016 * we're in trouble here. _PREPEND below will free 5017 * all the data if there is no leading space, so we 5018 * must put the data back and restore. 5019 */ 5020 if (took_all) { 5021 /* unsteal the data */ 5022 sp->data = chk->data; 5023 sp->tail_mbuf = chk->last_mbuf; 5024 } else { 5025 struct mbuf *m; 5026 5027 /* reassemble the data */ 5028 m = sp->data; 5029 sp->data = chk->data; 5030 SCTP_BUF_NEXT(sp->data) = m; 5031 } 5032 sp->some_taken = some_taken; 5033 sp->length += to_move; 5034 chk->data = NULL; 5035 sctp_free_a_chunk(stcb, chk); 5036 SCTP_TCB_SEND_UNLOCK(stcb); 5037 goto out_gu; 5038 } else { 5039 SCTP_BUF_LEN(m) = 0; 5040 SCTP_BUF_NEXT(m) = chk->data; 5041 chk->data = m; 5042 M_ALIGN(chk->data, 4); 5043 } 5044 } 5045 SCTP_BUF_PREPEND(chk->data, sizeof(struct sctp_data_chunk), M_DONTWAIT); 5046 if (chk->data == NULL) { 5047 /* HELP */ 5048 sctp_free_a_chunk(stcb, chk); 5049 SCTP_TCB_SEND_UNLOCK(stcb); 5050 goto out_gu; 5051 } 5052 sctp_snd_sb_alloc(stcb, sizeof(struct sctp_data_chunk)); 5053 chk->book_size = chk->send_size = (to_move + 5054 sizeof(struct sctp_data_chunk)); 5055 chk->book_size_scale = 0; 5056 chk->sent = SCTP_DATAGRAM_UNSENT; 5057 5058 /* 5059 * get last_mbuf and counts of mb useage This is ugly but hopefully 5060 * its only one mbuf. 5061 */ 5062 if (chk->last_mbuf == NULL) { 5063 chk->last_mbuf = chk->data; 5064 while (SCTP_BUF_NEXT(chk->last_mbuf) != NULL) { 5065 chk->last_mbuf = SCTP_BUF_NEXT(chk->last_mbuf); 5066 } 5067 } 5068 chk->flags = 0; 5069 chk->asoc = &stcb->asoc; 5070 chk->pad_inplace = 0; 5071 chk->no_fr_allowed = 0; 5072 chk->rec.data.stream_seq = sp->strseq; 5073 chk->rec.data.stream_number = sp->stream; 5074 chk->rec.data.payloadtype = sp->ppid; 5075 chk->rec.data.context = sp->context; 5076 chk->rec.data.doing_fast_retransmit = 0; 5077 chk->rec.data.ect_nonce = 0; /* ECN Nonce */ 5078 5079 chk->rec.data.timetodrop = sp->ts; 5080 chk->flags = sp->act_flags; 5081 chk->addr_over = sp->addr_over; 5082 5083 chk->whoTo = net; 5084 atomic_add_int(&chk->whoTo->ref_count, 1); 5085 5086 chk->rec.data.TSN_seq = atomic_fetchadd_int(&asoc->sending_seq, 1); 5087 #ifdef SCTP_LOG_SENDING_STR 5088 sctp_misc_ints(SCTP_STRMOUT_LOG_SEND, 5089 (uintptr_t) stcb, (uintptr_t) sp, 5090 (uint32_t) ((chk->rec.data.stream_number << 16) | chk->rec.data.stream_seq), 5091 chk->rec.data.TSN_seq); 5092 #endif 5093 5094 dchkh = mtod(chk->data, struct sctp_data_chunk *); 5095 /* 5096 * Put the rest of the things in place now. Size was done earlier in 5097 * previous loop prior to padding. 5098 */ 5099 5100 #ifdef SCTP_ASOCLOG_OF_TSNS 5101 asoc->out_tsnlog[asoc->tsn_out_at].tsn = chk->rec.data.TSN_seq; 5102 asoc->out_tsnlog[asoc->tsn_out_at].strm = chk->rec.data.stream_number; 5103 asoc->out_tsnlog[asoc->tsn_out_at].seq = chk->rec.data.stream_seq; 5104 asoc->tsn_out_at++; 5105 if (asoc->tsn_out_at >= SCTP_TSN_LOG_SIZE) { 5106 asoc->tsn_out_at = 0; 5107 } 5108 #endif 5109 5110 dchkh->ch.chunk_type = SCTP_DATA; 5111 dchkh->ch.chunk_flags = chk->rec.data.rcv_flags; 5112 dchkh->dp.tsn = htonl(chk->rec.data.TSN_seq); 5113 dchkh->dp.stream_id = htons(strq->stream_no); 5114 dchkh->dp.stream_sequence = htons(chk->rec.data.stream_seq); 5115 dchkh->dp.protocol_id = chk->rec.data.payloadtype; 5116 dchkh->ch.chunk_length = htons(chk->send_size); 5117 /* Now advance the chk->send_size by the actual pad needed. */ 5118 if (chk->send_size < SCTP_SIZE32(chk->book_size)) { 5119 /* need a pad */ 5120 struct mbuf *lm; 5121 int pads; 5122 5123 pads = SCTP_SIZE32(chk->book_size) - chk->send_size; 5124 if (sctp_pad_lastmbuf(chk->data, pads, chk->last_mbuf) == 0) { 5125 chk->pad_inplace = 1; 5126 } 5127 if ((lm = SCTP_BUF_NEXT(chk->last_mbuf)) != NULL) { 5128 /* pad added an mbuf */ 5129 chk->last_mbuf = lm; 5130 } 5131 chk->send_size += pads; 5132 } 5133 /* We only re-set the policy if it is on */ 5134 if (sp->pr_sctp_on) { 5135 sctp_set_prsctp_policy(stcb, sp); 5136 } 5137 if (sp->msg_is_complete && (sp->length == 0)) { 5138 /* All done pull and kill the message */ 5139 asoc->stream_queue_cnt--; 5140 TAILQ_REMOVE(&strq->outqueue, sp, next); 5141 sctp_free_remote_addr(sp->net); 5142 if (sp->data) { 5143 sctp_m_freem(sp->data); 5144 sp->data = NULL; 5145 } 5146 sctp_free_a_strmoq(stcb, sp); 5147 5148 /* we can't be locked to it */ 5149 *locked = 0; 5150 stcb->asoc.locked_on_sending = NULL; 5151 } else { 5152 /* more to go, we are locked */ 5153 *locked = 1; 5154 } 5155 asoc->chunks_on_out_queue++; 5156 if (sp->pr_sctp_on) { 5157 asoc->pr_sctp_cnt++; 5158 chk->pr_sctp_on = 1; 5159 } else { 5160 chk->pr_sctp_on = 0; 5161 } 5162 TAILQ_INSERT_TAIL(&asoc->send_queue, chk, sctp_next); 5163 asoc->send_queue_cnt++; 5164 SCTP_TCB_SEND_UNLOCK(stcb); 5165 return (to_move); 5166 } 5167 5168 5169 static struct sctp_stream_out * 5170 sctp_select_a_stream(struct sctp_tcb *stcb, struct sctp_association *asoc) 5171 { 5172 struct sctp_stream_out *strq; 5173 5174 /* Find the next stream to use */ 5175 if (asoc->last_out_stream == NULL) { 5176 strq = asoc->last_out_stream = TAILQ_FIRST(&asoc->out_wheel); 5177 if (asoc->last_out_stream == NULL) { 5178 /* huh nothing on the wheel, TSNH */ 5179 return (NULL); 5180 } 5181 goto done_it; 5182 } 5183 strq = TAILQ_NEXT(asoc->last_out_stream, next_spoke); 5184 done_it: 5185 if (strq == NULL) { 5186 strq = asoc->last_out_stream = TAILQ_FIRST(&asoc->out_wheel); 5187 } 5188 return (strq); 5189 5190 } 5191 5192 static void 5193 sctp_fill_outqueue(struct sctp_tcb *stcb, 5194 struct sctp_nets *net, int frag_point, int eeor_mode) 5195 { 5196 struct sctp_association *asoc; 5197 struct sctp_stream_out *strq, *strqn; 5198 int goal_mtu, moved_how_much, total_moved = 0; 5199 int locked, giveup; 5200 struct sctp_stream_queue_pending *sp; 5201 5202 SCTP_TCB_LOCK_ASSERT(stcb); 5203 asoc = &stcb->asoc; 5204 #ifdef AF_INET6 5205 if (net->ro._l_addr.sin6.sin6_family == AF_INET6) { 5206 goal_mtu = net->mtu - SCTP_MIN_OVERHEAD; 5207 } else { 5208 /* ?? not sure what else to do */ 5209 goal_mtu = net->mtu - SCTP_MIN_V4_OVERHEAD; 5210 } 5211 #else 5212 goal_mtu = net->mtu - SCTP_MIN_OVERHEAD; 5213 mtu_fromwheel = 0; 5214 #endif 5215 /* Need an allowance for the data chunk header too */ 5216 goal_mtu -= sizeof(struct sctp_data_chunk); 5217 5218 /* must make even word boundary */ 5219 goal_mtu &= 0xfffffffc; 5220 if (asoc->locked_on_sending) { 5221 /* We are stuck on one stream until the message completes. */ 5222 strqn = strq = asoc->locked_on_sending; 5223 locked = 1; 5224 } else { 5225 strqn = strq = sctp_select_a_stream(stcb, asoc); 5226 locked = 0; 5227 } 5228 5229 while ((goal_mtu > 0) && strq) { 5230 sp = TAILQ_FIRST(&strq->outqueue); 5231 /* 5232 * If CMT is off, we must validate that the stream in 5233 * question has the first item pointed towards are network 5234 * destionation requested by the caller. Note that if we 5235 * turn out to be locked to a stream (assigning TSN's then 5236 * we must stop, since we cannot look for another stream 5237 * with data to send to that destination). In CMT's case, by 5238 * skipping this check, we will send one data packet towards 5239 * the requested net. 5240 */ 5241 if (sp == NULL) { 5242 break; 5243 } 5244 if ((sp->net != net) && (sctp_cmt_on_off == 0)) { 5245 /* none for this network */ 5246 if (locked) { 5247 break; 5248 } else { 5249 strq = sctp_select_a_stream(stcb, asoc); 5250 if (strq == NULL) 5251 /* none left */ 5252 break; 5253 if (strqn == strq) { 5254 /* I have circled */ 5255 break; 5256 } 5257 continue; 5258 } 5259 } 5260 giveup = 0; 5261 moved_how_much = sctp_move_to_outqueue(stcb, net, strq, goal_mtu, frag_point, &locked, 5262 &giveup, eeor_mode); 5263 asoc->last_out_stream = strq; 5264 if (locked) { 5265 asoc->locked_on_sending = strq; 5266 if ((moved_how_much == 0) || (giveup)) 5267 /* no more to move for now */ 5268 break; 5269 } else { 5270 asoc->locked_on_sending = NULL; 5271 if (TAILQ_FIRST(&strq->outqueue) == NULL) { 5272 sctp_remove_from_wheel(stcb, asoc, strq); 5273 } 5274 if (giveup) { 5275 break; 5276 } 5277 strq = sctp_select_a_stream(stcb, asoc); 5278 if (strq == NULL) { 5279 break; 5280 } 5281 } 5282 total_moved += moved_how_much; 5283 goal_mtu -= moved_how_much; 5284 goal_mtu &= 0xfffffffc; 5285 } 5286 if (total_moved == 0) { 5287 if ((sctp_cmt_on_off == 0) && 5288 (net == stcb->asoc.primary_destination)) { 5289 /* ran dry for primary network net */ 5290 SCTP_STAT_INCR(sctps_primary_randry); 5291 } else if (sctp_cmt_on_off) { 5292 /* ran dry with CMT on */ 5293 SCTP_STAT_INCR(sctps_cmt_randry); 5294 } 5295 } 5296 } 5297 5298 __inline void 5299 sctp_fix_ecn_echo(struct sctp_association *asoc) 5300 { 5301 struct sctp_tmit_chunk *chk; 5302 5303 TAILQ_FOREACH(chk, &asoc->control_send_queue, sctp_next) { 5304 if (chk->rec.chunk_id.id == SCTP_ECN_ECHO) { 5305 chk->sent = SCTP_DATAGRAM_UNSENT; 5306 } 5307 } 5308 } 5309 5310 static void 5311 sctp_move_to_an_alt(struct sctp_tcb *stcb, 5312 struct sctp_association *asoc, 5313 struct sctp_nets *net) 5314 { 5315 struct sctp_tmit_chunk *chk; 5316 struct sctp_nets *a_net; 5317 5318 SCTP_TCB_LOCK_ASSERT(stcb); 5319 a_net = sctp_find_alternate_net(stcb, net, 0); 5320 if ((a_net != net) && 5321 ((a_net->dest_state & SCTP_ADDR_REACHABLE) == SCTP_ADDR_REACHABLE)) { 5322 /* 5323 * We only proceed if a valid alternate is found that is not 5324 * this one and is reachable. Here we must move all chunks 5325 * queued in the send queue off of the destination address 5326 * to our alternate. 5327 */ 5328 TAILQ_FOREACH(chk, &asoc->send_queue, sctp_next) { 5329 if (chk->whoTo == net) { 5330 /* Move the chunk to our alternate */ 5331 sctp_free_remote_addr(chk->whoTo); 5332 chk->whoTo = a_net; 5333 atomic_add_int(&a_net->ref_count, 1); 5334 } 5335 } 5336 } 5337 } 5338 5339 extern int sctp_early_fr; 5340 5341 int 5342 sctp_med_chunk_output(struct sctp_inpcb *inp, 5343 struct sctp_tcb *stcb, 5344 struct sctp_association *asoc, 5345 int *num_out, 5346 int *reason_code, 5347 int control_only, int *cwnd_full, int from_where, 5348 struct timeval *now, int *now_filled, int frag_point) 5349 { 5350 /* 5351 * Ok this is the generic chunk service queue. we must do the 5352 * following: - Service the stream queue that is next, moving any 5353 * message (note I must get a complete message i.e. FIRST/MIDDLE and 5354 * LAST to the out queue in one pass) and assigning TSN's - Check to 5355 * see if the cwnd/rwnd allows any output, if so we go ahead and 5356 * fomulate and send the low level chunks. Making sure to combine 5357 * any control in the control chunk queue also. 5358 */ 5359 struct sctp_nets *net; 5360 struct mbuf *outchain, *endoutchain; 5361 struct sctp_tmit_chunk *chk, *nchk; 5362 struct sctphdr *shdr; 5363 5364 /* temp arrays for unlinking */ 5365 struct sctp_tmit_chunk *data_list[SCTP_MAX_DATA_BUNDLING]; 5366 int no_fragmentflg, error; 5367 int one_chunk, hbflag; 5368 int asconf, cookie, no_out_cnt; 5369 int bundle_at, ctl_cnt, no_data_chunks, cwnd_full_ind, eeor_mode; 5370 unsigned int mtu, r_mtu, omtu, mx_mtu, to_out; 5371 struct sctp_nets *start_at, *old_startat = NULL, *send_start_at; 5372 int tsns_sent = 0; 5373 uint32_t auth_offset = 0; 5374 struct sctp_auth_chunk *auth = NULL; 5375 5376 *num_out = 0; 5377 cwnd_full_ind = 0; 5378 5379 if ((asoc->state & SCTP_STATE_SHUTDOWN_PENDING) || 5380 (asoc->state & SCTP_STATE_SHUTDOWN_RECEIVED) || 5381 (sctp_is_feature_on(inp, SCTP_PCB_FLAGS_EXPLICIT_EOR))) { 5382 eeor_mode = 1; 5383 } else { 5384 eeor_mode = 0; 5385 } 5386 ctl_cnt = no_out_cnt = asconf = cookie = 0; 5387 /* 5388 * First lets prime the pump. For each destination, if there is room 5389 * in the flight size, attempt to pull an MTU's worth out of the 5390 * stream queues into the general send_queue 5391 */ 5392 #ifdef SCTP_AUDITING_ENABLED 5393 sctp_audit_log(0xC2, 2); 5394 #endif 5395 SCTP_TCB_LOCK_ASSERT(stcb); 5396 hbflag = 0; 5397 if ((control_only) || (asoc->stream_reset_outstanding)) 5398 no_data_chunks = 1; 5399 else 5400 no_data_chunks = 0; 5401 5402 /* Nothing to possible to send? */ 5403 if (TAILQ_EMPTY(&asoc->control_send_queue) && 5404 TAILQ_EMPTY(&asoc->send_queue) && 5405 TAILQ_EMPTY(&asoc->out_wheel)) { 5406 *reason_code = 9; 5407 return (0); 5408 } 5409 if (asoc->peers_rwnd == 0) { 5410 /* No room in peers rwnd */ 5411 *cwnd_full = 1; 5412 *reason_code = 1; 5413 if (asoc->total_flight > 0) { 5414 /* we are allowed one chunk in flight */ 5415 no_data_chunks = 1; 5416 } 5417 } 5418 if ((no_data_chunks == 0) && (!TAILQ_EMPTY(&asoc->out_wheel))) { 5419 if (sctp_cmt_on_off) { 5420 /* 5421 * for CMT we start at the next one past the one we 5422 * last added data to. 5423 */ 5424 if (TAILQ_FIRST(&asoc->send_queue) != NULL) { 5425 goto skip_the_fill_from_streams; 5426 } 5427 if (asoc->last_net_data_came_from) { 5428 net = TAILQ_NEXT(asoc->last_net_data_came_from, sctp_next); 5429 if (net == NULL) { 5430 net = TAILQ_FIRST(&asoc->nets); 5431 } 5432 } else { 5433 /* back to start */ 5434 net = TAILQ_FIRST(&asoc->nets); 5435 } 5436 5437 } else { 5438 net = asoc->primary_destination; 5439 if (net == NULL) { 5440 /* TSNH */ 5441 net = TAILQ_FIRST(&asoc->nets); 5442 } 5443 } 5444 start_at = net; 5445 one_more_time: 5446 for (; net != NULL; net = TAILQ_NEXT(net, sctp_next)) { 5447 if (old_startat && (old_startat == net)) { 5448 break; 5449 } 5450 if ((sctp_cmt_on_off == 0) && (net->ref_count < 2)) { 5451 /* nothing can be in queue for this guy */ 5452 continue; 5453 } 5454 if (net->flight_size >= net->cwnd) { 5455 /* skip this network, no room */ 5456 cwnd_full_ind++; 5457 continue; 5458 } 5459 /* 5460 * @@@ JRI : this for loop we are in takes in each 5461 * net, if its's got space in cwnd and has data sent 5462 * to it (when CMT is off) then it calls 5463 * sctp_fill_outqueue for the net. This gets data on 5464 * the send queue for that network. 5465 * 5466 * In sctp_fill_outqueue TSN's are assigned and data is 5467 * copied out of the stream buffers. Note mostly 5468 * copy by reference (we hope). 5469 */ 5470 #ifdef SCTP_CWND_LOGGING 5471 sctp_log_cwnd(stcb, net, 0, SCTP_CWND_LOG_FILL_OUTQ_CALLED); 5472 #endif 5473 sctp_fill_outqueue(stcb, net, frag_point, eeor_mode); 5474 } 5475 if (start_at != TAILQ_FIRST(&asoc->nets)) { 5476 /* got to pick up the beginning stuff. */ 5477 old_startat = start_at; 5478 start_at = net = TAILQ_FIRST(&asoc->nets); 5479 goto one_more_time; 5480 } 5481 } 5482 skip_the_fill_from_streams: 5483 *cwnd_full = cwnd_full_ind; 5484 /* now service each destination and send out what we can for it */ 5485 /* Nothing to send? */ 5486 if ((TAILQ_FIRST(&asoc->control_send_queue) == NULL) && 5487 (TAILQ_FIRST(&asoc->send_queue) == NULL)) { 5488 *reason_code = 8; 5489 return (0); 5490 } 5491 chk = TAILQ_FIRST(&asoc->send_queue); 5492 if (chk) { 5493 send_start_at = chk->whoTo; 5494 } else { 5495 send_start_at = TAILQ_FIRST(&asoc->nets); 5496 } 5497 old_startat = NULL; 5498 again_one_more_time: 5499 for (net = send_start_at; net != NULL; net = TAILQ_NEXT(net, sctp_next)) { 5500 /* how much can we send? */ 5501 /* printf("Examine for sending net:%x\n", (uint32_t)net); */ 5502 if (old_startat && (old_startat == net)) { 5503 /* through list ocmpletely. */ 5504 break; 5505 } 5506 tsns_sent = 0; 5507 if (net->ref_count < 2) { 5508 /* 5509 * Ref-count of 1 so we cannot have data or control 5510 * queued to this address. Skip it. 5511 */ 5512 continue; 5513 } 5514 ctl_cnt = bundle_at = 0; 5515 endoutchain = outchain = NULL; 5516 no_fragmentflg = 1; 5517 one_chunk = 0; 5518 5519 if ((net->ro.ro_rt) && (net->ro.ro_rt->rt_ifp)) { 5520 /* 5521 * if we have a route and an ifp check to see if we 5522 * have room to send to this guy 5523 */ 5524 struct ifnet *ifp; 5525 5526 ifp = net->ro.ro_rt->rt_ifp; 5527 if ((ifp->if_snd.ifq_len + 2) >= ifp->if_snd.ifq_maxlen) { 5528 SCTP_STAT_INCR(sctps_ifnomemqueued); 5529 #ifdef SCTP_LOG_MAXBURST 5530 sctp_log_maxburst(stcb, net, ifp->if_snd.ifq_len, ifp->if_snd.ifq_maxlen, SCTP_MAX_IFP_APPLIED); 5531 #endif 5532 continue; 5533 } 5534 } 5535 if (((struct sockaddr *)&net->ro._l_addr)->sa_family == AF_INET) { 5536 mtu = net->mtu - (sizeof(struct ip) + sizeof(struct sctphdr)); 5537 } else { 5538 mtu = net->mtu - (sizeof(struct ip6_hdr) + sizeof(struct sctphdr)); 5539 } 5540 mx_mtu = mtu; 5541 to_out = 0; 5542 if (mtu > asoc->peers_rwnd) { 5543 if (asoc->total_flight > 0) { 5544 /* We have a packet in flight somewhere */ 5545 r_mtu = asoc->peers_rwnd; 5546 } else { 5547 /* We are always allowed to send one MTU out */ 5548 one_chunk = 1; 5549 r_mtu = mtu; 5550 } 5551 } else { 5552 r_mtu = mtu; 5553 } 5554 /************************/ 5555 /* Control transmission */ 5556 /************************/ 5557 /* Now first lets go through the control queue */ 5558 for (chk = TAILQ_FIRST(&asoc->control_send_queue); 5559 chk; chk = nchk) { 5560 nchk = TAILQ_NEXT(chk, sctp_next); 5561 if (chk->whoTo != net) { 5562 /* 5563 * No, not sent to the network we are 5564 * looking at 5565 */ 5566 continue; 5567 } 5568 if (chk->data == NULL) { 5569 continue; 5570 } 5571 if (chk->sent != SCTP_DATAGRAM_UNSENT) { 5572 /* 5573 * It must be unsent. Cookies and ASCONF's 5574 * hang around but there timers will force 5575 * when marked for resend. 5576 */ 5577 continue; 5578 } 5579 /* 5580 * if no AUTH is yet included and this chunk 5581 * requires it, make sure to account for it. We 5582 * don't apply the size until the AUTH chunk is 5583 * actually added below in case there is no room for 5584 * this chunk. NOTE: we overload the use of "omtu" 5585 * here 5586 */ 5587 if ((auth == NULL) && 5588 sctp_auth_is_required_chunk(chk->rec.chunk_id.id, 5589 stcb->asoc.peer_auth_chunks)) { 5590 omtu = sctp_get_auth_chunk_len(stcb->asoc.peer_hmac_id); 5591 } else 5592 omtu = 0; 5593 /* Here we do NOT factor the r_mtu */ 5594 if ((chk->send_size < (int)(mtu - omtu)) || 5595 (chk->flags & CHUNK_FLAGS_FRAGMENT_OK)) { 5596 /* 5597 * We probably should glom the mbuf chain 5598 * from the chk->data for control but the 5599 * problem is it becomes yet one more level 5600 * of tracking to do if for some reason 5601 * output fails. Then I have got to 5602 * reconstruct the merged control chain.. el 5603 * yucko.. for now we take the easy way and 5604 * do the copy 5605 */ 5606 /* 5607 * Add an AUTH chunk, if chunk requires it 5608 * save the offset into the chain for AUTH 5609 */ 5610 if ((auth == NULL) && 5611 (sctp_auth_is_required_chunk(chk->rec.chunk_id.id, 5612 stcb->asoc.peer_auth_chunks))) { 5613 outchain = sctp_add_auth_chunk(outchain, 5614 &endoutchain, 5615 &auth, 5616 &auth_offset, 5617 stcb, 5618 chk->rec.chunk_id.id); 5619 SCTP_STAT_INCR_COUNTER64(sctps_outcontrolchunks); 5620 } 5621 outchain = sctp_copy_mbufchain(chk->data, outchain, &endoutchain, 5622 (int)chk->rec.chunk_id.can_take_data, 5623 chk->send_size, chk->copy_by_ref); 5624 if (outchain == NULL) { 5625 *reason_code = 8; 5626 return (ENOMEM); 5627 } 5628 SCTP_STAT_INCR_COUNTER64(sctps_outcontrolchunks); 5629 /* update our MTU size */ 5630 if (mtu > (chk->send_size + omtu)) 5631 mtu -= (chk->send_size + omtu); 5632 else 5633 mtu = 0; 5634 to_out += (chk->send_size + omtu); 5635 /* Do clear IP_DF ? */ 5636 if (chk->flags & CHUNK_FLAGS_FRAGMENT_OK) { 5637 no_fragmentflg = 0; 5638 } 5639 if (chk->rec.chunk_id.can_take_data) 5640 chk->data = NULL; 5641 /* Mark things to be removed, if needed */ 5642 if ((chk->rec.chunk_id.id == SCTP_SELECTIVE_ACK) || 5643 (chk->rec.chunk_id.id == SCTP_HEARTBEAT_REQUEST) || 5644 (chk->rec.chunk_id.id == SCTP_HEARTBEAT_ACK) || 5645 (chk->rec.chunk_id.id == SCTP_SHUTDOWN) || 5646 (chk->rec.chunk_id.id == SCTP_SHUTDOWN_ACK) || 5647 (chk->rec.chunk_id.id == SCTP_OPERATION_ERROR) || 5648 (chk->rec.chunk_id.id == SCTP_COOKIE_ACK) || 5649 (chk->rec.chunk_id.id == SCTP_ECN_CWR) || 5650 (chk->rec.chunk_id.id == SCTP_PACKET_DROPPED) || 5651 (chk->rec.chunk_id.id == SCTP_ASCONF_ACK)) { 5652 5653 if (chk->rec.chunk_id.id == SCTP_HEARTBEAT_REQUEST) 5654 hbflag = 1; 5655 /* remove these chunks at the end */ 5656 if (chk->rec.chunk_id.id == SCTP_SELECTIVE_ACK) { 5657 /* turn off the timer */ 5658 if (SCTP_OS_TIMER_PENDING(&stcb->asoc.dack_timer.timer)) { 5659 sctp_timer_stop(SCTP_TIMER_TYPE_RECV, 5660 inp, stcb, net, SCTP_FROM_SCTP_OUTPUT + SCTP_LOC_1); 5661 } 5662 } 5663 ctl_cnt++; 5664 } else { 5665 /* 5666 * Other chunks, since they have 5667 * timers running (i.e. COOKIE or 5668 * ASCONF) we just "trust" that it 5669 * gets sent or retransmitted. 5670 */ 5671 ctl_cnt++; 5672 if (chk->rec.chunk_id.id == SCTP_COOKIE_ECHO) { 5673 cookie = 1; 5674 no_out_cnt = 1; 5675 } else if (chk->rec.chunk_id.id == SCTP_ASCONF) { 5676 /* 5677 * set hb flag since we can 5678 * use these for RTO 5679 */ 5680 hbflag = 1; 5681 asconf = 1; 5682 } 5683 chk->sent = SCTP_DATAGRAM_SENT; 5684 chk->snd_count++; 5685 } 5686 if (mtu == 0) { 5687 /* 5688 * Ok we are out of room but we can 5689 * output without effecting the 5690 * flight size since this little guy 5691 * is a control only packet. 5692 */ 5693 if (asconf) { 5694 sctp_timer_start(SCTP_TIMER_TYPE_ASCONF, inp, stcb, net); 5695 asconf = 0; 5696 } 5697 if (cookie) { 5698 sctp_timer_start(SCTP_TIMER_TYPE_COOKIE, inp, stcb, net); 5699 cookie = 0; 5700 } 5701 SCTP_BUF_PREPEND(outchain, sizeof(struct sctphdr), M_DONTWAIT); 5702 if (outchain == NULL) { 5703 /* no memory */ 5704 error = ENOBUFS; 5705 goto error_out_again; 5706 } 5707 shdr = mtod(outchain, struct sctphdr *); 5708 shdr->src_port = inp->sctp_lport; 5709 shdr->dest_port = stcb->rport; 5710 shdr->v_tag = htonl(stcb->asoc.peer_vtag); 5711 shdr->checksum = 0; 5712 auth_offset += sizeof(struct sctphdr); 5713 if ((error = sctp_lowlevel_chunk_output(inp, stcb, net, 5714 (struct sockaddr *)&net->ro._l_addr, 5715 outchain, auth_offset, auth, 5716 no_fragmentflg, 0, NULL, asconf))) { 5717 if (error == ENOBUFS) { 5718 asoc->ifp_had_enobuf = 1; 5719 } 5720 SCTP_STAT_INCR(sctps_lowlevelerr); 5721 if (from_where == 0) { 5722 SCTP_STAT_INCR(sctps_lowlevelerrusr); 5723 } 5724 error_out_again: 5725 /* error, could not output */ 5726 if (hbflag) { 5727 if (*now_filled == 0) { 5728 SCTP_GETTIME_TIMEVAL(&net->last_sent_time); 5729 *now_filled = 1; 5730 *now = net->last_sent_time; 5731 } else { 5732 net->last_sent_time = *now; 5733 } 5734 hbflag = 0; 5735 } 5736 if (error == EHOSTUNREACH) { 5737 /* 5738 * Destination went 5739 * unreachable 5740 * during this send 5741 */ 5742 sctp_move_to_an_alt(stcb, asoc, net); 5743 } 5744 sctp_clean_up_ctl(stcb, asoc); 5745 *reason_code = 7; 5746 return (error); 5747 } else 5748 asoc->ifp_had_enobuf = 0; 5749 /* Only HB or ASCONF advances time */ 5750 if (hbflag) { 5751 if (*now_filled == 0) { 5752 SCTP_GETTIME_TIMEVAL(&net->last_sent_time); 5753 *now_filled = 1; 5754 *now = net->last_sent_time; 5755 } else { 5756 net->last_sent_time = *now; 5757 } 5758 hbflag = 0; 5759 } 5760 /* 5761 * increase the number we sent, if a 5762 * cookie is sent we don't tell them 5763 * any was sent out. 5764 */ 5765 outchain = endoutchain = NULL; 5766 auth = NULL; 5767 auth_offset = 0; 5768 if (!no_out_cnt) 5769 *num_out += ctl_cnt; 5770 /* recalc a clean slate and setup */ 5771 if (stcb->sctp_ep->sctp_flags & SCTP_PCB_FLAGS_BOUND_V6) { 5772 mtu = (net->mtu - SCTP_MIN_OVERHEAD); 5773 } else { 5774 mtu = (net->mtu - SCTP_MIN_V4_OVERHEAD); 5775 } 5776 to_out = 0; 5777 no_fragmentflg = 1; 5778 } 5779 } 5780 } 5781 /*********************/ 5782 /* Data transmission */ 5783 /*********************/ 5784 /* 5785 * if AUTH for DATA is required and no AUTH has been added 5786 * yet, account for this in the mtu now... if no data can be 5787 * bundled, this adjustment won't matter anyways since the 5788 * packet will be going out... 5789 */ 5790 if ((auth == NULL) && 5791 sctp_auth_is_required_chunk(SCTP_DATA, 5792 stcb->asoc.peer_auth_chunks)) { 5793 mtu -= sctp_get_auth_chunk_len(stcb->asoc.peer_hmac_id); 5794 } 5795 /* now lets add any data within the MTU constraints */ 5796 if (((struct sockaddr *)&net->ro._l_addr)->sa_family == AF_INET) { 5797 if (net->mtu > (sizeof(struct ip) + sizeof(struct sctphdr))) 5798 omtu = net->mtu - (sizeof(struct ip) + sizeof(struct sctphdr)); 5799 else 5800 omtu = 0; 5801 } else { 5802 if (net->mtu > (sizeof(struct ip6_hdr) + sizeof(struct sctphdr))) 5803 omtu = net->mtu - (sizeof(struct ip6_hdr) + sizeof(struct sctphdr)); 5804 else 5805 omtu = 0; 5806 } 5807 if (((asoc->state & SCTP_STATE_OPEN) == SCTP_STATE_OPEN) || 5808 (cookie)) { 5809 for (chk = TAILQ_FIRST(&asoc->send_queue); chk; chk = nchk) { 5810 if (no_data_chunks) { 5811 /* let only control go out */ 5812 *reason_code = 1; 5813 break; 5814 } 5815 if (net->flight_size >= net->cwnd) { 5816 /* skip this net, no room for data */ 5817 *reason_code = 2; 5818 break; 5819 } 5820 nchk = TAILQ_NEXT(chk, sctp_next); 5821 if (chk->whoTo != net) { 5822 /* No, not sent to this net */ 5823 continue; 5824 } 5825 if ((chk->send_size > omtu) && ((chk->flags & CHUNK_FLAGS_FRAGMENT_OK) == 0)) { 5826 /* 5827 * strange, we have a chunk that is 5828 * to bit for its destination and 5829 * yet no fragment ok flag. 5830 * Something went wrong when the 5831 * PMTU changed...we did not mark 5832 * this chunk for some reason?? I 5833 * will fix it here by letting IP 5834 * fragment it for now and printing 5835 * a warning. This really should not 5836 * happen ... 5837 */ 5838 #ifdef SCTP_DEBUG 5839 printf("Warning chunk of %d bytes > mtu:%d and yet PMTU disc missed\n", 5840 chk->send_size, mtu); 5841 #endif 5842 chk->flags |= CHUNK_FLAGS_FRAGMENT_OK; 5843 } 5844 if (((chk->send_size <= mtu) && (chk->send_size <= r_mtu)) || 5845 ((chk->flags & CHUNK_FLAGS_FRAGMENT_OK) && (chk->send_size <= asoc->peers_rwnd))) { 5846 /* ok we will add this one */ 5847 5848 /* 5849 * Add an AUTH chunk, if chunk 5850 * requires it, save the offset into 5851 * the chain for AUTH 5852 */ 5853 if ((auth == NULL) && 5854 (sctp_auth_is_required_chunk(SCTP_DATA, 5855 stcb->asoc.peer_auth_chunks))) { 5856 5857 outchain = sctp_add_auth_chunk(outchain, 5858 &endoutchain, 5859 &auth, 5860 &auth_offset, 5861 stcb, 5862 SCTP_DATA); 5863 SCTP_STAT_INCR_COUNTER64(sctps_outcontrolchunks); 5864 } 5865 outchain = sctp_copy_mbufchain(chk->data, outchain, &endoutchain, 0, 5866 chk->send_size, chk->copy_by_ref); 5867 if (outchain == NULL) { 5868 #ifdef SCTP_DEBUG 5869 if (sctp_debug_on & SCTP_DEBUG_OUTPUT3) { 5870 printf("No memory?\n"); 5871 } 5872 #endif 5873 if (!SCTP_OS_TIMER_PENDING(&net->rxt_timer.timer)) { 5874 sctp_timer_start(SCTP_TIMER_TYPE_SEND, inp, stcb, net); 5875 } 5876 *reason_code = 3; 5877 return (ENOMEM); 5878 } 5879 /* upate our MTU size */ 5880 /* Do clear IP_DF ? */ 5881 if (chk->flags & CHUNK_FLAGS_FRAGMENT_OK) { 5882 no_fragmentflg = 0; 5883 } 5884 /* unsigned subtraction of mtu */ 5885 if (mtu > chk->send_size) 5886 mtu -= chk->send_size; 5887 else 5888 mtu = 0; 5889 /* unsigned subtraction of r_mtu */ 5890 if (r_mtu > chk->send_size) 5891 r_mtu -= chk->send_size; 5892 else 5893 r_mtu = 0; 5894 5895 to_out += chk->send_size; 5896 if (to_out > mx_mtu) { 5897 #ifdef INVARIANTS 5898 panic("gag"); 5899 #else 5900 printf("Exceeding mtu of %d out size is %d\n", 5901 mx_mtu, to_out); 5902 #endif 5903 } 5904 data_list[bundle_at++] = chk; 5905 if (bundle_at >= SCTP_MAX_DATA_BUNDLING) { 5906 mtu = 0; 5907 break; 5908 } 5909 if (chk->sent == SCTP_DATAGRAM_UNSENT) { 5910 if ((chk->rec.data.rcv_flags & SCTP_DATA_UNORDERED) == 0) { 5911 SCTP_STAT_INCR_COUNTER64(sctps_outorderchunks); 5912 } else { 5913 SCTP_STAT_INCR_COUNTER64(sctps_outunorderchunks); 5914 } 5915 if (((chk->rec.data.rcv_flags & SCTP_DATA_LAST_FRAG) == SCTP_DATA_LAST_FRAG) && 5916 ((chk->rec.data.rcv_flags & SCTP_DATA_FIRST_FRAG) == 0)) 5917 /* 5918 * Count number of 5919 * user msg's that 5920 * were fragmented 5921 * we do this by 5922 * counting when we 5923 * see a LAST 5924 * fragment only. 5925 */ 5926 SCTP_STAT_INCR_COUNTER64(sctps_fragusrmsgs); 5927 } 5928 if ((mtu == 0) || (r_mtu == 0) || (one_chunk)) { 5929 break; 5930 } 5931 } else { 5932 /* 5933 * Must be sent in order of the 5934 * TSN's (on a network) 5935 */ 5936 break; 5937 } 5938 } /* for () */ 5939 } /* if asoc.state OPEN */ 5940 /* Is there something to send for this destination? */ 5941 if (outchain) { 5942 /* We may need to start a control timer or two */ 5943 if (asconf) { 5944 sctp_timer_start(SCTP_TIMER_TYPE_ASCONF, inp, stcb, net); 5945 asconf = 0; 5946 } 5947 if (cookie) { 5948 sctp_timer_start(SCTP_TIMER_TYPE_COOKIE, inp, stcb, net); 5949 cookie = 0; 5950 } 5951 /* must start a send timer if data is being sent */ 5952 if (bundle_at && (!SCTP_OS_TIMER_PENDING(&net->rxt_timer.timer))) { 5953 /* 5954 * no timer running on this destination 5955 * restart it. 5956 */ 5957 sctp_timer_start(SCTP_TIMER_TYPE_SEND, inp, stcb, net); 5958 } 5959 /* Now send it, if there is anything to send :> */ 5960 SCTP_BUF_PREPEND(outchain, sizeof(struct sctphdr), M_DONTWAIT); 5961 if (outchain == NULL) { 5962 /* out of mbufs */ 5963 error = ENOBUFS; 5964 goto errored_send; 5965 } 5966 shdr = mtod(outchain, struct sctphdr *); 5967 shdr->src_port = inp->sctp_lport; 5968 shdr->dest_port = stcb->rport; 5969 shdr->v_tag = htonl(stcb->asoc.peer_vtag); 5970 shdr->checksum = 0; 5971 auth_offset += sizeof(struct sctphdr); 5972 if ((error = sctp_lowlevel_chunk_output(inp, stcb, net, 5973 (struct sockaddr *)&net->ro._l_addr, 5974 outchain, 5975 auth_offset, 5976 auth, 5977 no_fragmentflg, 5978 bundle_at, 5979 data_list[0], 5980 asconf))) { 5981 /* error, we could not output */ 5982 if (error == ENOBUFS) { 5983 asoc->ifp_had_enobuf = 1; 5984 } 5985 SCTP_STAT_INCR(sctps_lowlevelerr); 5986 if (from_where == 0) { 5987 SCTP_STAT_INCR(sctps_lowlevelerrusr); 5988 } 5989 errored_send: 5990 #ifdef SCTP_DEBUG 5991 if (sctp_debug_on & SCTP_DEBUG_OUTPUT3) { 5992 printf("Gak send error %d\n", error); 5993 } 5994 #endif 5995 if (hbflag) { 5996 if (*now_filled == 0) { 5997 SCTP_GETTIME_TIMEVAL(&net->last_sent_time); 5998 *now_filled = 1; 5999 *now = net->last_sent_time; 6000 } else { 6001 net->last_sent_time = *now; 6002 } 6003 hbflag = 0; 6004 } 6005 if (error == EHOSTUNREACH) { 6006 /* 6007 * Destination went unreachable 6008 * during this send 6009 */ 6010 sctp_move_to_an_alt(stcb, asoc, net); 6011 } 6012 sctp_clean_up_ctl(stcb, asoc); 6013 *reason_code = 6; 6014 return (error); 6015 } else { 6016 asoc->ifp_had_enobuf = 0; 6017 } 6018 outchain = endoutchain = NULL; 6019 auth = NULL; 6020 auth_offset = 0; 6021 if (bundle_at || hbflag) { 6022 /* For data/asconf and hb set time */ 6023 if (*now_filled == 0) { 6024 SCTP_GETTIME_TIMEVAL(&net->last_sent_time); 6025 *now_filled = 1; 6026 *now = net->last_sent_time; 6027 } else { 6028 net->last_sent_time = *now; 6029 } 6030 } 6031 if (!no_out_cnt) { 6032 *num_out += (ctl_cnt + bundle_at); 6033 } 6034 if (bundle_at) { 6035 /* if (!net->rto_pending) { */ 6036 /* setup for a RTO measurement */ 6037 /* net->rto_pending = 1; */ 6038 tsns_sent = data_list[0]->rec.data.TSN_seq; 6039 6040 data_list[0]->do_rtt = 1; 6041 /* } else { */ 6042 /* data_list[0]->do_rtt = 0; */ 6043 /* } */ 6044 SCTP_STAT_INCR_BY(sctps_senddata, bundle_at); 6045 sctp_clean_up_datalist(stcb, asoc, data_list, bundle_at, net); 6046 if (sctp_early_fr) { 6047 if (net->flight_size < net->cwnd) { 6048 /* start or restart it */ 6049 if (SCTP_OS_TIMER_PENDING(&net->fr_timer.timer)) { 6050 sctp_timer_stop(SCTP_TIMER_TYPE_EARLYFR, inp, stcb, net, 6051 SCTP_FROM_SCTP_OUTPUT + SCTP_LOC_2); 6052 } 6053 SCTP_STAT_INCR(sctps_earlyfrstrout); 6054 sctp_timer_start(SCTP_TIMER_TYPE_EARLYFR, inp, stcb, net); 6055 } else { 6056 /* stop it if its running */ 6057 if (SCTP_OS_TIMER_PENDING(&net->fr_timer.timer)) { 6058 SCTP_STAT_INCR(sctps_earlyfrstpout); 6059 sctp_timer_stop(SCTP_TIMER_TYPE_EARLYFR, inp, stcb, net, 6060 SCTP_FROM_SCTP_OUTPUT + SCTP_LOC_3); 6061 } 6062 } 6063 } 6064 } 6065 if (one_chunk) { 6066 break; 6067 } 6068 } 6069 #ifdef SCTP_CWND_LOGGING 6070 sctp_log_cwnd(stcb, net, tsns_sent, SCTP_CWND_LOG_FROM_SEND); 6071 #endif 6072 } 6073 if (old_startat == NULL) { 6074 old_startat = send_start_at; 6075 send_start_at = TAILQ_FIRST(&asoc->nets); 6076 goto again_one_more_time; 6077 } 6078 /* 6079 * At the end there should be no NON timed chunks hanging on this 6080 * queue. 6081 */ 6082 #ifdef SCTP_CWND_LOGGING 6083 sctp_log_cwnd(stcb, net, *num_out, SCTP_CWND_LOG_FROM_SEND); 6084 #endif 6085 if ((*num_out == 0) && (*reason_code == 0)) { 6086 *reason_code = 4; 6087 } else { 6088 *reason_code = 5; 6089 } 6090 sctp_clean_up_ctl(stcb, asoc); 6091 return (0); 6092 } 6093 6094 void 6095 sctp_queue_op_err(struct sctp_tcb *stcb, struct mbuf *op_err) 6096 { 6097 /* 6098 * Prepend a OPERATIONAL_ERROR chunk header and put on the end of 6099 * the control chunk queue. 6100 */ 6101 struct sctp_chunkhdr *hdr; 6102 struct sctp_tmit_chunk *chk; 6103 struct mbuf *mat; 6104 6105 SCTP_TCB_LOCK_ASSERT(stcb); 6106 sctp_alloc_a_chunk(stcb, chk); 6107 if (chk == NULL) { 6108 /* no memory */ 6109 sctp_m_freem(op_err); 6110 return; 6111 } 6112 chk->copy_by_ref = 0; 6113 SCTP_BUF_PREPEND(op_err, sizeof(struct sctp_chunkhdr), M_DONTWAIT); 6114 if (op_err == NULL) { 6115 sctp_free_a_chunk(stcb, chk); 6116 return; 6117 } 6118 chk->send_size = 0; 6119 mat = op_err; 6120 while (mat != NULL) { 6121 chk->send_size += SCTP_BUF_LEN(mat); 6122 mat = SCTP_BUF_NEXT(mat); 6123 } 6124 chk->rec.chunk_id.id = SCTP_OPERATION_ERROR; 6125 chk->rec.chunk_id.can_take_data = 1; 6126 chk->sent = SCTP_DATAGRAM_UNSENT; 6127 chk->snd_count = 0; 6128 chk->flags = 0; 6129 chk->asoc = &stcb->asoc; 6130 chk->data = op_err; 6131 chk->whoTo = chk->asoc->primary_destination; 6132 atomic_add_int(&chk->whoTo->ref_count, 1); 6133 hdr = mtod(op_err, struct sctp_chunkhdr *); 6134 hdr->chunk_type = SCTP_OPERATION_ERROR; 6135 hdr->chunk_flags = 0; 6136 hdr->chunk_length = htons(chk->send_size); 6137 TAILQ_INSERT_TAIL(&chk->asoc->control_send_queue, 6138 chk, 6139 sctp_next); 6140 chk->asoc->ctrl_queue_cnt++; 6141 } 6142 6143 int 6144 sctp_send_cookie_echo(struct mbuf *m, 6145 int offset, 6146 struct sctp_tcb *stcb, 6147 struct sctp_nets *net) 6148 { 6149 /* 6150 * pull out the cookie and put it at the front of the control chunk 6151 * queue. 6152 */ 6153 int at; 6154 struct mbuf *cookie; 6155 struct sctp_paramhdr parm, *phdr; 6156 struct sctp_chunkhdr *hdr; 6157 struct sctp_tmit_chunk *chk; 6158 uint16_t ptype, plen; 6159 6160 /* First find the cookie in the param area */ 6161 cookie = NULL; 6162 at = offset + sizeof(struct sctp_init_chunk); 6163 6164 SCTP_TCB_LOCK_ASSERT(stcb); 6165 do { 6166 phdr = sctp_get_next_param(m, at, &parm, sizeof(parm)); 6167 if (phdr == NULL) { 6168 return (-3); 6169 } 6170 ptype = ntohs(phdr->param_type); 6171 plen = ntohs(phdr->param_length); 6172 if (ptype == SCTP_STATE_COOKIE) { 6173 int pad; 6174 6175 /* found the cookie */ 6176 if ((pad = (plen % 4))) { 6177 plen += 4 - pad; 6178 } 6179 cookie = SCTP_M_COPYM(m, at, plen, M_DONTWAIT); 6180 if (cookie == NULL) { 6181 /* No memory */ 6182 return (-2); 6183 } 6184 break; 6185 } 6186 at += SCTP_SIZE32(plen); 6187 } while (phdr); 6188 if (cookie == NULL) { 6189 /* Did not find the cookie */ 6190 return (-3); 6191 } 6192 /* ok, we got the cookie lets change it into a cookie echo chunk */ 6193 6194 /* first the change from param to cookie */ 6195 hdr = mtod(cookie, struct sctp_chunkhdr *); 6196 hdr->chunk_type = SCTP_COOKIE_ECHO; 6197 hdr->chunk_flags = 0; 6198 /* get the chunk stuff now and place it in the FRONT of the queue */ 6199 sctp_alloc_a_chunk(stcb, chk); 6200 if (chk == NULL) { 6201 /* no memory */ 6202 sctp_m_freem(cookie); 6203 return (-5); 6204 } 6205 chk->copy_by_ref = 0; 6206 chk->send_size = plen; 6207 chk->rec.chunk_id.id = SCTP_COOKIE_ECHO; 6208 chk->rec.chunk_id.can_take_data = 0; 6209 chk->sent = SCTP_DATAGRAM_UNSENT; 6210 chk->snd_count = 0; 6211 chk->flags = 0; 6212 chk->asoc = &stcb->asoc; 6213 chk->data = cookie; 6214 chk->whoTo = chk->asoc->primary_destination; 6215 atomic_add_int(&chk->whoTo->ref_count, 1); 6216 TAILQ_INSERT_HEAD(&chk->asoc->control_send_queue, chk, sctp_next); 6217 chk->asoc->ctrl_queue_cnt++; 6218 return (0); 6219 } 6220 6221 void 6222 sctp_send_heartbeat_ack(struct sctp_tcb *stcb, 6223 struct mbuf *m, 6224 int offset, 6225 int chk_length, 6226 struct sctp_nets *net) 6227 { 6228 /* 6229 * take a HB request and make it into a HB ack and send it. 6230 */ 6231 struct mbuf *outchain; 6232 struct sctp_chunkhdr *chdr; 6233 struct sctp_tmit_chunk *chk; 6234 6235 6236 if (net == NULL) 6237 /* must have a net pointer */ 6238 return; 6239 6240 outchain = SCTP_M_COPYM(m, offset, chk_length, M_DONTWAIT); 6241 if (outchain == NULL) { 6242 /* gak out of memory */ 6243 return; 6244 } 6245 chdr = mtod(outchain, struct sctp_chunkhdr *); 6246 chdr->chunk_type = SCTP_HEARTBEAT_ACK; 6247 chdr->chunk_flags = 0; 6248 if (chk_length % 4) { 6249 /* need pad */ 6250 uint32_t cpthis = 0; 6251 int padlen; 6252 6253 padlen = 4 - (chk_length % 4); 6254 m_copyback(outchain, chk_length, padlen, (caddr_t)&cpthis); 6255 } 6256 sctp_alloc_a_chunk(stcb, chk); 6257 if (chk == NULL) { 6258 /* no memory */ 6259 sctp_m_freem(outchain); 6260 return; 6261 } 6262 chk->copy_by_ref = 0; 6263 chk->send_size = chk_length; 6264 chk->rec.chunk_id.id = SCTP_HEARTBEAT_ACK; 6265 chk->rec.chunk_id.can_take_data = 1; 6266 chk->sent = SCTP_DATAGRAM_UNSENT; 6267 chk->snd_count = 0; 6268 chk->flags = 0; 6269 chk->asoc = &stcb->asoc; 6270 chk->data = outchain; 6271 chk->whoTo = net; 6272 atomic_add_int(&chk->whoTo->ref_count, 1); 6273 TAILQ_INSERT_TAIL(&chk->asoc->control_send_queue, chk, sctp_next); 6274 chk->asoc->ctrl_queue_cnt++; 6275 } 6276 6277 int 6278 sctp_send_cookie_ack(struct sctp_tcb *stcb) 6279 { 6280 /* formulate and queue a cookie-ack back to sender */ 6281 struct mbuf *cookie_ack; 6282 struct sctp_chunkhdr *hdr; 6283 struct sctp_tmit_chunk *chk; 6284 6285 cookie_ack = NULL; 6286 SCTP_TCB_LOCK_ASSERT(stcb); 6287 6288 cookie_ack = sctp_get_mbuf_for_msg(sizeof(struct sctp_chunkhdr), 0, M_DONTWAIT, 1, MT_HEADER); 6289 if (cookie_ack == NULL) { 6290 /* no mbuf's */ 6291 return (-1); 6292 } 6293 SCTP_BUF_RESV_UF(cookie_ack, SCTP_MIN_OVERHEAD); 6294 sctp_alloc_a_chunk(stcb, chk); 6295 if (chk == NULL) { 6296 /* no memory */ 6297 sctp_m_freem(cookie_ack); 6298 return (-1); 6299 } 6300 chk->copy_by_ref = 0; 6301 chk->send_size = sizeof(struct sctp_chunkhdr); 6302 chk->rec.chunk_id.id = SCTP_COOKIE_ACK; 6303 chk->rec.chunk_id.can_take_data = 1; 6304 chk->sent = SCTP_DATAGRAM_UNSENT; 6305 chk->snd_count = 0; 6306 chk->flags = 0; 6307 chk->asoc = &stcb->asoc; 6308 chk->data = cookie_ack; 6309 if (chk->asoc->last_control_chunk_from != NULL) { 6310 chk->whoTo = chk->asoc->last_control_chunk_from; 6311 } else { 6312 chk->whoTo = chk->asoc->primary_destination; 6313 } 6314 atomic_add_int(&chk->whoTo->ref_count, 1); 6315 hdr = mtod(cookie_ack, struct sctp_chunkhdr *); 6316 hdr->chunk_type = SCTP_COOKIE_ACK; 6317 hdr->chunk_flags = 0; 6318 hdr->chunk_length = htons(chk->send_size); 6319 SCTP_BUF_LEN(cookie_ack) = chk->send_size; 6320 TAILQ_INSERT_TAIL(&chk->asoc->control_send_queue, chk, sctp_next); 6321 chk->asoc->ctrl_queue_cnt++; 6322 return (0); 6323 } 6324 6325 6326 int 6327 sctp_send_shutdown_ack(struct sctp_tcb *stcb, struct sctp_nets *net) 6328 { 6329 /* formulate and queue a SHUTDOWN-ACK back to the sender */ 6330 struct mbuf *m_shutdown_ack; 6331 struct sctp_shutdown_ack_chunk *ack_cp; 6332 struct sctp_tmit_chunk *chk; 6333 6334 m_shutdown_ack = sctp_get_mbuf_for_msg(sizeof(struct sctp_shutdown_ack_chunk), 0, M_DONTWAIT, 1, MT_HEADER); 6335 if (m_shutdown_ack == NULL) { 6336 /* no mbuf's */ 6337 return (-1); 6338 } 6339 SCTP_BUF_RESV_UF(m_shutdown_ack, SCTP_MIN_OVERHEAD); 6340 sctp_alloc_a_chunk(stcb, chk); 6341 if (chk == NULL) { 6342 /* no memory */ 6343 sctp_m_freem(m_shutdown_ack); 6344 return (-1); 6345 } 6346 chk->copy_by_ref = 0; 6347 6348 chk->send_size = sizeof(struct sctp_chunkhdr); 6349 chk->rec.chunk_id.id = SCTP_SHUTDOWN_ACK; 6350 chk->rec.chunk_id.can_take_data = 1; 6351 chk->sent = SCTP_DATAGRAM_UNSENT; 6352 chk->snd_count = 0; 6353 chk->flags = 0; 6354 chk->asoc = &stcb->asoc; 6355 chk->data = m_shutdown_ack; 6356 chk->whoTo = net; 6357 atomic_add_int(&net->ref_count, 1); 6358 6359 ack_cp = mtod(m_shutdown_ack, struct sctp_shutdown_ack_chunk *); 6360 ack_cp->ch.chunk_type = SCTP_SHUTDOWN_ACK; 6361 ack_cp->ch.chunk_flags = 0; 6362 ack_cp->ch.chunk_length = htons(chk->send_size); 6363 SCTP_BUF_LEN(m_shutdown_ack) = chk->send_size; 6364 TAILQ_INSERT_TAIL(&chk->asoc->control_send_queue, chk, sctp_next); 6365 chk->asoc->ctrl_queue_cnt++; 6366 return (0); 6367 } 6368 6369 int 6370 sctp_send_shutdown(struct sctp_tcb *stcb, struct sctp_nets *net) 6371 { 6372 /* formulate and queue a SHUTDOWN to the sender */ 6373 struct mbuf *m_shutdown; 6374 struct sctp_shutdown_chunk *shutdown_cp; 6375 struct sctp_tmit_chunk *chk; 6376 6377 m_shutdown = sctp_get_mbuf_for_msg(sizeof(struct sctp_shutdown_chunk), 0, M_DONTWAIT, 1, MT_HEADER); 6378 if (m_shutdown == NULL) { 6379 /* no mbuf's */ 6380 return (-1); 6381 } 6382 SCTP_BUF_RESV_UF(m_shutdown, SCTP_MIN_OVERHEAD); 6383 sctp_alloc_a_chunk(stcb, chk); 6384 if (chk == NULL) { 6385 /* no memory */ 6386 sctp_m_freem(m_shutdown); 6387 return (-1); 6388 } 6389 chk->copy_by_ref = 0; 6390 chk->send_size = sizeof(struct sctp_shutdown_chunk); 6391 chk->rec.chunk_id.id = SCTP_SHUTDOWN; 6392 chk->rec.chunk_id.can_take_data = 1; 6393 chk->sent = SCTP_DATAGRAM_UNSENT; 6394 chk->snd_count = 0; 6395 chk->flags = 0; 6396 chk->asoc = &stcb->asoc; 6397 chk->data = m_shutdown; 6398 chk->whoTo = net; 6399 atomic_add_int(&net->ref_count, 1); 6400 6401 shutdown_cp = mtod(m_shutdown, struct sctp_shutdown_chunk *); 6402 shutdown_cp->ch.chunk_type = SCTP_SHUTDOWN; 6403 shutdown_cp->ch.chunk_flags = 0; 6404 shutdown_cp->ch.chunk_length = htons(chk->send_size); 6405 shutdown_cp->cumulative_tsn_ack = htonl(stcb->asoc.cumulative_tsn); 6406 SCTP_BUF_LEN(m_shutdown) = chk->send_size; 6407 TAILQ_INSERT_TAIL(&chk->asoc->control_send_queue, chk, sctp_next); 6408 chk->asoc->ctrl_queue_cnt++; 6409 return (0); 6410 } 6411 6412 int 6413 sctp_send_asconf(struct sctp_tcb *stcb, struct sctp_nets *net) 6414 { 6415 /* 6416 * formulate and queue an ASCONF to the peer ASCONF parameters 6417 * should be queued on the assoc queue 6418 */ 6419 struct sctp_tmit_chunk *chk; 6420 struct mbuf *m_asconf; 6421 struct sctp_asconf_chunk *acp; 6422 int len; 6423 6424 6425 SCTP_TCB_LOCK_ASSERT(stcb); 6426 /* compose an ASCONF chunk, maximum length is PMTU */ 6427 m_asconf = sctp_compose_asconf(stcb, &len); 6428 if (m_asconf == NULL) { 6429 return (-1); 6430 } 6431 acp = mtod(m_asconf, struct sctp_asconf_chunk *); 6432 sctp_alloc_a_chunk(stcb, chk); 6433 if (chk == NULL) { 6434 /* no memory */ 6435 sctp_m_freem(m_asconf); 6436 return (-1); 6437 } 6438 chk->copy_by_ref = 0; 6439 chk->data = m_asconf; 6440 chk->send_size = len; 6441 chk->rec.chunk_id.id = SCTP_ASCONF; 6442 chk->rec.chunk_id.can_take_data = 0; 6443 chk->sent = SCTP_DATAGRAM_UNSENT; 6444 chk->snd_count = 0; 6445 chk->flags = 0; 6446 chk->asoc = &stcb->asoc; 6447 chk->whoTo = chk->asoc->primary_destination; 6448 atomic_add_int(&chk->whoTo->ref_count, 1); 6449 TAILQ_INSERT_TAIL(&chk->asoc->control_send_queue, chk, sctp_next); 6450 chk->asoc->ctrl_queue_cnt++; 6451 return (0); 6452 } 6453 6454 int 6455 sctp_send_asconf_ack(struct sctp_tcb *stcb, uint32_t retrans) 6456 { 6457 /* 6458 * formulate and queue a asconf-ack back to sender the asconf-ack 6459 * must be stored in the tcb 6460 */ 6461 struct sctp_tmit_chunk *chk; 6462 struct mbuf *m_ack, *m; 6463 6464 SCTP_TCB_LOCK_ASSERT(stcb); 6465 /* is there a asconf-ack mbuf chain to send? */ 6466 if (stcb->asoc.last_asconf_ack_sent == NULL) { 6467 return (-1); 6468 } 6469 /* copy the asconf_ack */ 6470 m_ack = SCTP_M_COPYM(stcb->asoc.last_asconf_ack_sent, 0, M_COPYALL, M_DONTWAIT); 6471 if (m_ack == NULL) { 6472 /* couldn't copy it */ 6473 6474 return (-1); 6475 } 6476 sctp_alloc_a_chunk(stcb, chk); 6477 if (chk == NULL) { 6478 /* no memory */ 6479 if (m_ack) 6480 sctp_m_freem(m_ack); 6481 return (-1); 6482 } 6483 chk->copy_by_ref = 0; 6484 /* figure out where it goes to */ 6485 if (retrans) { 6486 /* we're doing a retransmission */ 6487 if (stcb->asoc.used_alt_asconfack > 2) { 6488 /* tried alternate nets already, go back */ 6489 chk->whoTo = NULL; 6490 } else { 6491 /* need to try and alternate net */ 6492 chk->whoTo = sctp_find_alternate_net(stcb, stcb->asoc.last_control_chunk_from, 0); 6493 stcb->asoc.used_alt_asconfack++; 6494 } 6495 if (chk->whoTo == NULL) { 6496 /* no alternate */ 6497 if (stcb->asoc.last_control_chunk_from == NULL) 6498 chk->whoTo = stcb->asoc.primary_destination; 6499 else 6500 chk->whoTo = stcb->asoc.last_control_chunk_from; 6501 stcb->asoc.used_alt_asconfack = 0; 6502 } 6503 } else { 6504 /* normal case */ 6505 if (stcb->asoc.last_control_chunk_from == NULL) 6506 chk->whoTo = stcb->asoc.primary_destination; 6507 else 6508 chk->whoTo = stcb->asoc.last_control_chunk_from; 6509 stcb->asoc.used_alt_asconfack = 0; 6510 } 6511 chk->data = m_ack; 6512 chk->send_size = 0; 6513 /* Get size */ 6514 m = m_ack; 6515 while (m) { 6516 chk->send_size += SCTP_BUF_LEN(m); 6517 m = SCTP_BUF_NEXT(m); 6518 } 6519 chk->rec.chunk_id.id = SCTP_ASCONF_ACK; 6520 chk->rec.chunk_id.can_take_data = 1; 6521 chk->sent = SCTP_DATAGRAM_UNSENT; 6522 chk->snd_count = 0; 6523 chk->flags = 0; 6524 chk->asoc = &stcb->asoc; 6525 atomic_add_int(&chk->whoTo->ref_count, 1); 6526 TAILQ_INSERT_TAIL(&chk->asoc->control_send_queue, chk, sctp_next); 6527 chk->asoc->ctrl_queue_cnt++; 6528 return (0); 6529 } 6530 6531 6532 static int 6533 sctp_chunk_retransmission(struct sctp_inpcb *inp, 6534 struct sctp_tcb *stcb, 6535 struct sctp_association *asoc, 6536 int *cnt_out, struct timeval *now, int *now_filled, int *fr_done) 6537 { 6538 /* 6539 * send out one MTU of retransmission. If fast_retransmit is 6540 * happening we ignore the cwnd. Otherwise we obey the cwnd and 6541 * rwnd. For a Cookie or Asconf in the control chunk queue we 6542 * retransmit them by themselves. 6543 * 6544 * For data chunks we will pick out the lowest TSN's in the sent_queue 6545 * marked for resend and bundle them all together (up to a MTU of 6546 * destination). The address to send to should have been 6547 * selected/changed where the retransmission was marked (i.e. in FR 6548 * or t3-timeout routines). 6549 */ 6550 struct sctp_tmit_chunk *data_list[SCTP_MAX_DATA_BUNDLING]; 6551 struct sctp_tmit_chunk *chk, *fwd; 6552 struct mbuf *m, *endofchain; 6553 struct sctphdr *shdr; 6554 int asconf; 6555 struct sctp_nets *net; 6556 uint32_t tsns_sent = 0; 6557 int no_fragmentflg, bundle_at, cnt_thru; 6558 unsigned int mtu; 6559 int error, i, one_chunk, fwd_tsn, ctl_cnt, tmr_started; 6560 struct sctp_auth_chunk *auth = NULL; 6561 uint32_t auth_offset = 0; 6562 uint32_t dmtu = 0; 6563 6564 SCTP_TCB_LOCK_ASSERT(stcb); 6565 tmr_started = ctl_cnt = bundle_at = error = 0; 6566 no_fragmentflg = 1; 6567 asconf = 0; 6568 fwd_tsn = 0; 6569 *cnt_out = 0; 6570 fwd = NULL; 6571 endofchain = m = NULL; 6572 #ifdef SCTP_AUDITING_ENABLED 6573 sctp_audit_log(0xC3, 1); 6574 #endif 6575 if (TAILQ_EMPTY(&asoc->sent_queue)) { 6576 #ifdef SCTP_DEBUG 6577 if (sctp_debug_on & SCTP_DEBUG_OUTPUT1) { 6578 printf("SCTP hits empty queue with cnt set to %d?\n", 6579 asoc->sent_queue_retran_cnt); 6580 } 6581 #endif 6582 asoc->sent_queue_cnt = 0; 6583 asoc->sent_queue_cnt_removeable = 0; 6584 } 6585 TAILQ_FOREACH(chk, &asoc->control_send_queue, sctp_next) { 6586 if ((chk->rec.chunk_id.id == SCTP_COOKIE_ECHO) || 6587 (chk->rec.chunk_id.id == SCTP_ASCONF) || 6588 (chk->rec.chunk_id.id == SCTP_STREAM_RESET) || 6589 (chk->rec.chunk_id.id == SCTP_FORWARD_CUM_TSN)) { 6590 if (chk->rec.chunk_id.id == SCTP_STREAM_RESET) { 6591 if (chk != asoc->str_reset) { 6592 /* 6593 * not eligible for retran if its 6594 * not ours 6595 */ 6596 continue; 6597 } 6598 } 6599 ctl_cnt++; 6600 if (chk->rec.chunk_id.id == SCTP_ASCONF) { 6601 no_fragmentflg = 1; 6602 asconf = 1; 6603 } 6604 if (chk->rec.chunk_id.id == SCTP_FORWARD_CUM_TSN) { 6605 fwd_tsn = 1; 6606 fwd = chk; 6607 } 6608 /* 6609 * Add an AUTH chunk, if chunk requires it save the 6610 * offset into the chain for AUTH 6611 */ 6612 if ((auth == NULL) && 6613 (sctp_auth_is_required_chunk(chk->rec.chunk_id.id, 6614 stcb->asoc.peer_auth_chunks))) { 6615 m = sctp_add_auth_chunk(m, &endofchain, 6616 &auth, &auth_offset, 6617 stcb, 6618 chk->rec.chunk_id.id); 6619 } 6620 m = sctp_copy_mbufchain(chk->data, m, &endofchain, 0, chk->send_size, chk->copy_by_ref); 6621 break; 6622 } 6623 } 6624 one_chunk = 0; 6625 cnt_thru = 0; 6626 /* do we have control chunks to retransmit? */ 6627 if (m != NULL) { 6628 /* Start a timer no matter if we suceed or fail */ 6629 if (chk->rec.chunk_id.id == SCTP_COOKIE_ECHO) { 6630 sctp_timer_start(SCTP_TIMER_TYPE_COOKIE, inp, stcb, chk->whoTo); 6631 } else if (chk->rec.chunk_id.id == SCTP_ASCONF) 6632 sctp_timer_start(SCTP_TIMER_TYPE_ASCONF, inp, stcb, chk->whoTo); 6633 6634 SCTP_BUF_PREPEND(m, sizeof(struct sctphdr), M_DONTWAIT); 6635 if (m == NULL) { 6636 return (ENOBUFS); 6637 } 6638 shdr = mtod(m, struct sctphdr *); 6639 shdr->src_port = inp->sctp_lport; 6640 shdr->dest_port = stcb->rport; 6641 shdr->v_tag = htonl(stcb->asoc.peer_vtag); 6642 shdr->checksum = 0; 6643 auth_offset += sizeof(struct sctphdr); 6644 chk->snd_count++; /* update our count */ 6645 6646 if ((error = sctp_lowlevel_chunk_output(inp, stcb, chk->whoTo, 6647 (struct sockaddr *)&chk->whoTo->ro._l_addr, m, auth_offset, 6648 auth, no_fragmentflg, 0, NULL, asconf))) { 6649 SCTP_STAT_INCR(sctps_lowlevelerr); 6650 return (error); 6651 } 6652 m = endofchain = NULL; 6653 auth = NULL; 6654 auth_offset = 0; 6655 /* 6656 * We don't want to mark the net->sent time here since this 6657 * we use this for HB and retrans cannot measure RTT 6658 */ 6659 /* SCTP_GETTIME_TIMEVAL(&chk->whoTo->last_sent_time); */ 6660 *cnt_out += 1; 6661 chk->sent = SCTP_DATAGRAM_SENT; 6662 sctp_ucount_decr(asoc->sent_queue_retran_cnt); 6663 if (fwd_tsn == 0) { 6664 return (0); 6665 } else { 6666 /* Clean up the fwd-tsn list */ 6667 sctp_clean_up_ctl(stcb, asoc); 6668 return (0); 6669 } 6670 } 6671 /* 6672 * Ok, it is just data retransmission we need to do or that and a 6673 * fwd-tsn with it all. 6674 */ 6675 if (TAILQ_EMPTY(&asoc->sent_queue)) { 6676 return (-1); 6677 } 6678 if ((SCTP_GET_STATE(asoc) == SCTP_STATE_COOKIE_ECHOED) || 6679 (SCTP_GET_STATE(asoc) == SCTP_STATE_COOKIE_WAIT)) { 6680 /* not yet open, resend the cookie and that is it */ 6681 return (1); 6682 } 6683 #ifdef SCTP_AUDITING_ENABLED 6684 sctp_auditing(20, inp, stcb, NULL); 6685 #endif 6686 TAILQ_FOREACH(chk, &asoc->sent_queue, sctp_next) { 6687 if (chk->sent != SCTP_DATAGRAM_RESEND) { 6688 /* No, not sent to this net or not ready for rtx */ 6689 continue; 6690 6691 } 6692 /* pick up the net */ 6693 net = chk->whoTo; 6694 if (stcb->sctp_ep->sctp_flags & SCTP_PCB_FLAGS_BOUND_V6) { 6695 mtu = (net->mtu - SCTP_MIN_OVERHEAD); 6696 } else { 6697 mtu = net->mtu - SCTP_MIN_V4_OVERHEAD; 6698 } 6699 6700 if ((asoc->peers_rwnd < mtu) && (asoc->total_flight > 0)) { 6701 /* No room in peers rwnd */ 6702 uint32_t tsn; 6703 6704 tsn = asoc->last_acked_seq + 1; 6705 if (tsn == chk->rec.data.TSN_seq) { 6706 /* 6707 * we make a special exception for this 6708 * case. The peer has no rwnd but is missing 6709 * the lowest chunk.. which is probably what 6710 * is holding up the rwnd. 6711 */ 6712 goto one_chunk_around; 6713 } 6714 return (1); 6715 } 6716 one_chunk_around: 6717 if (asoc->peers_rwnd < mtu) { 6718 one_chunk = 1; 6719 } 6720 #ifdef SCTP_AUDITING_ENABLED 6721 sctp_audit_log(0xC3, 2); 6722 #endif 6723 bundle_at = 0; 6724 m = NULL; 6725 net->fast_retran_ip = 0; 6726 if (chk->rec.data.doing_fast_retransmit == 0) { 6727 /* 6728 * if no FR in progress skip destination that have 6729 * flight_size > cwnd. 6730 */ 6731 if (net->flight_size >= net->cwnd) { 6732 continue; 6733 } 6734 } else { 6735 /* 6736 * Mark the destination net to have FR recovery 6737 * limits put on it. 6738 */ 6739 *fr_done = 1; 6740 net->fast_retran_ip = 1; 6741 } 6742 6743 /* 6744 * if no AUTH is yet included and this chunk requires it, 6745 * make sure to account for it. We don't apply the size 6746 * until the AUTH chunk is actually added below in case 6747 * there is no room for this chunk. 6748 */ 6749 if ((auth == NULL) && 6750 sctp_auth_is_required_chunk(SCTP_DATA, 6751 stcb->asoc.peer_auth_chunks)) { 6752 dmtu = sctp_get_auth_chunk_len(stcb->asoc.peer_hmac_id); 6753 } else 6754 dmtu = 0; 6755 6756 if ((chk->send_size <= (mtu - dmtu)) || 6757 (chk->flags & CHUNK_FLAGS_FRAGMENT_OK)) { 6758 /* ok we will add this one */ 6759 if ((auth == NULL) && 6760 (sctp_auth_is_required_chunk(SCTP_DATA, 6761 stcb->asoc.peer_auth_chunks))) { 6762 m = sctp_add_auth_chunk(m, &endofchain, 6763 &auth, &auth_offset, 6764 stcb, SCTP_DATA); 6765 } 6766 m = sctp_copy_mbufchain(chk->data, m, &endofchain, 0, chk->send_size, chk->copy_by_ref); 6767 if (m == NULL) { 6768 return (ENOMEM); 6769 } 6770 /* Do clear IP_DF ? */ 6771 if (chk->flags & CHUNK_FLAGS_FRAGMENT_OK) { 6772 no_fragmentflg = 0; 6773 } 6774 /* upate our MTU size */ 6775 if (mtu > (chk->send_size + dmtu)) 6776 mtu -= (chk->send_size + dmtu); 6777 else 6778 mtu = 0; 6779 data_list[bundle_at++] = chk; 6780 if (one_chunk && (asoc->total_flight <= 0)) { 6781 SCTP_STAT_INCR(sctps_windowprobed); 6782 chk->rec.data.state_flags |= SCTP_WINDOW_PROBE; 6783 } 6784 } 6785 if (one_chunk == 0) { 6786 /* 6787 * now are there anymore forward from chk to pick 6788 * up? 6789 */ 6790 fwd = TAILQ_NEXT(chk, sctp_next); 6791 while (fwd) { 6792 if (fwd->sent != SCTP_DATAGRAM_RESEND) { 6793 /* Nope, not for retran */ 6794 fwd = TAILQ_NEXT(fwd, sctp_next); 6795 continue; 6796 } 6797 if (fwd->whoTo != net) { 6798 /* Nope, not the net in question */ 6799 fwd = TAILQ_NEXT(fwd, sctp_next); 6800 continue; 6801 } 6802 if ((auth == NULL) && 6803 sctp_auth_is_required_chunk(SCTP_DATA, 6804 stcb->asoc.peer_auth_chunks)) { 6805 dmtu = sctp_get_auth_chunk_len(stcb->asoc.peer_hmac_id); 6806 } else 6807 dmtu = 0; 6808 if (fwd->send_size <= (mtu - dmtu)) { 6809 if ((auth == NULL) && 6810 (sctp_auth_is_required_chunk(SCTP_DATA, 6811 stcb->asoc.peer_auth_chunks))) { 6812 m = sctp_add_auth_chunk(m, 6813 &endofchain, 6814 &auth, &auth_offset, 6815 stcb, 6816 SCTP_DATA); 6817 } 6818 m = sctp_copy_mbufchain(fwd->data, m, &endofchain, 0, fwd->send_size, fwd->copy_by_ref); 6819 if (m == NULL) { 6820 return (ENOMEM); 6821 } 6822 /* Do clear IP_DF ? */ 6823 if (fwd->flags & CHUNK_FLAGS_FRAGMENT_OK) { 6824 no_fragmentflg = 0; 6825 } 6826 /* upate our MTU size */ 6827 if (mtu > (fwd->send_size + dmtu)) 6828 mtu -= (fwd->send_size + dmtu); 6829 else 6830 mtu = 0; 6831 data_list[bundle_at++] = fwd; 6832 if (bundle_at >= SCTP_MAX_DATA_BUNDLING) { 6833 break; 6834 } 6835 fwd = TAILQ_NEXT(fwd, sctp_next); 6836 } else { 6837 /* can't fit so we are done */ 6838 break; 6839 } 6840 } 6841 } 6842 /* Is there something to send for this destination? */ 6843 if (m) { 6844 /* 6845 * No matter if we fail/or suceed we should start a 6846 * timer. A failure is like a lost IP packet :-) 6847 */ 6848 if (!SCTP_OS_TIMER_PENDING(&net->rxt_timer.timer)) { 6849 /* 6850 * no timer running on this destination 6851 * restart it. 6852 */ 6853 sctp_timer_start(SCTP_TIMER_TYPE_SEND, inp, stcb, net); 6854 tmr_started = 1; 6855 } 6856 SCTP_BUF_PREPEND(m, sizeof(struct sctphdr), M_DONTWAIT); 6857 if (m == NULL) { 6858 return (ENOBUFS); 6859 } 6860 shdr = mtod(m, struct sctphdr *); 6861 shdr->src_port = inp->sctp_lport; 6862 shdr->dest_port = stcb->rport; 6863 shdr->v_tag = htonl(stcb->asoc.peer_vtag); 6864 shdr->checksum = 0; 6865 auth_offset += sizeof(struct sctphdr); 6866 /* Now lets send it, if there is anything to send :> */ 6867 if ((error = sctp_lowlevel_chunk_output(inp, stcb, net, 6868 (struct sockaddr *)&net->ro._l_addr, m, auth_offset, 6869 auth, no_fragmentflg, 0, NULL, asconf))) { 6870 /* error, we could not output */ 6871 SCTP_STAT_INCR(sctps_lowlevelerr); 6872 return (error); 6873 } 6874 m = endofchain = NULL; 6875 auth = NULL; 6876 auth_offset = 0; 6877 /* For HB's */ 6878 /* 6879 * We don't want to mark the net->sent time here 6880 * since this we use this for HB and retrans cannot 6881 * measure RTT 6882 */ 6883 /* SCTP_GETTIME_TIMEVAL(&net->last_sent_time); */ 6884 6885 /* For auto-close */ 6886 cnt_thru++; 6887 if (*now_filled == 0) { 6888 SCTP_GETTIME_TIMEVAL(&asoc->time_last_sent); 6889 *now = asoc->time_last_sent; 6890 *now_filled = 1; 6891 } else { 6892 asoc->time_last_sent = *now; 6893 } 6894 *cnt_out += bundle_at; 6895 #ifdef SCTP_AUDITING_ENABLED 6896 sctp_audit_log(0xC4, bundle_at); 6897 #endif 6898 if (bundle_at) { 6899 tsns_sent = data_list[0]->rec.data.TSN_seq; 6900 } 6901 for (i = 0; i < bundle_at; i++) { 6902 SCTP_STAT_INCR(sctps_sendretransdata); 6903 data_list[i]->sent = SCTP_DATAGRAM_SENT; 6904 /* 6905 * When we have a revoked data, and we 6906 * retransmit it, then we clear the revoked 6907 * flag since this flag dictates if we 6908 * subtracted from the fs 6909 */ 6910 data_list[i]->rec.data.chunk_was_revoked = 0; 6911 data_list[i]->snd_count++; 6912 sctp_ucount_decr(asoc->sent_queue_retran_cnt); 6913 /* record the time */ 6914 data_list[i]->sent_rcv_time = asoc->time_last_sent; 6915 if (asoc->sent_queue_retran_cnt < 0) { 6916 asoc->sent_queue_retran_cnt = 0; 6917 } 6918 if (data_list[i]->book_size_scale) { 6919 /* 6920 * need to double the book size on 6921 * this one 6922 */ 6923 data_list[i]->book_size_scale = 0; 6924 /* 6925 * Since we double the booksize, we 6926 * must also double the output queue 6927 * size, since this get shrunk when 6928 * we free by this amount. 6929 */ 6930 atomic_add_int(&((asoc)->total_output_queue_size), data_list[i]->book_size); 6931 data_list[i]->book_size *= 2; 6932 6933 6934 } else { 6935 sctp_ucount_incr(asoc->total_flight_count); 6936 #ifdef SCTP_LOG_RWND 6937 sctp_log_rwnd(SCTP_DECREASE_PEER_RWND, 6938 asoc->peers_rwnd, data_list[i]->send_size, sctp_peer_chunk_oh); 6939 #endif 6940 asoc->peers_rwnd = sctp_sbspace_sub(asoc->peers_rwnd, 6941 (uint32_t) (data_list[i]->send_size + 6942 sctp_peer_chunk_oh)); 6943 } 6944 #ifdef SCTP_FLIGHT_LOGGING 6945 sctp_misc_ints(SCTP_FLIGHT_LOG_UP, 6946 data_list[i]->whoTo->flight_size, 6947 data_list[i]->book_size, 6948 (uintptr_t) stcb, 6949 data_list[i]->rec.data.TSN_seq); 6950 #endif 6951 net->flight_size += data_list[i]->book_size; 6952 asoc->total_flight += data_list[i]->book_size; 6953 if (asoc->peers_rwnd < stcb->sctp_ep->sctp_ep.sctp_sws_sender) { 6954 /* SWS sender side engages */ 6955 asoc->peers_rwnd = 0; 6956 } 6957 if ((i == 0) && 6958 (data_list[i]->rec.data.doing_fast_retransmit)) { 6959 SCTP_STAT_INCR(sctps_sendfastretrans); 6960 if ((data_list[i] == TAILQ_FIRST(&asoc->sent_queue)) && 6961 (tmr_started == 0)) { 6962 /* 6963 * ok we just fast-retrans'd 6964 * the lowest TSN, i.e the 6965 * first on the list. In 6966 * this case we want to give 6967 * some more time to get a 6968 * SACK back without a 6969 * t3-expiring. 6970 */ 6971 sctp_timer_stop(SCTP_TIMER_TYPE_SEND, inp, stcb, net, 6972 SCTP_FROM_SCTP_OUTPUT + SCTP_LOC_4); 6973 sctp_timer_start(SCTP_TIMER_TYPE_SEND, inp, stcb, net); 6974 } 6975 } 6976 } 6977 #ifdef SCTP_CWND_LOGGING 6978 sctp_log_cwnd(stcb, net, tsns_sent, SCTP_CWND_LOG_FROM_RESEND); 6979 #endif 6980 #ifdef SCTP_AUDITING_ENABLED 6981 sctp_auditing(21, inp, stcb, NULL); 6982 #endif 6983 } else { 6984 /* None will fit */ 6985 return (1); 6986 } 6987 if (asoc->sent_queue_retran_cnt <= 0) { 6988 /* all done we have no more to retran */ 6989 asoc->sent_queue_retran_cnt = 0; 6990 break; 6991 } 6992 if (one_chunk) { 6993 /* No more room in rwnd */ 6994 return (1); 6995 } 6996 /* stop the for loop here. we sent out a packet */ 6997 break; 6998 } 6999 return (0); 7000 } 7001 7002 7003 static int 7004 sctp_timer_validation(struct sctp_inpcb *inp, 7005 struct sctp_tcb *stcb, 7006 struct sctp_association *asoc, 7007 int ret) 7008 { 7009 struct sctp_nets *net; 7010 7011 /* Validate that a timer is running somewhere */ 7012 TAILQ_FOREACH(net, &asoc->nets, sctp_next) { 7013 if (SCTP_OS_TIMER_PENDING(&net->rxt_timer.timer)) { 7014 /* Here is a timer */ 7015 return (ret); 7016 } 7017 } 7018 SCTP_TCB_LOCK_ASSERT(stcb); 7019 /* Gak, we did not have a timer somewhere */ 7020 #ifdef SCTP_DEBUG 7021 if (sctp_debug_on & SCTP_DEBUG_OUTPUT3) { 7022 printf("Deadlock avoided starting timer on a dest at retran\n"); 7023 } 7024 #endif 7025 sctp_timer_start(SCTP_TIMER_TYPE_SEND, inp, stcb, asoc->primary_destination); 7026 return (ret); 7027 } 7028 7029 int 7030 sctp_chunk_output(struct sctp_inpcb *inp, 7031 struct sctp_tcb *stcb, 7032 int from_where) 7033 { 7034 /* 7035 * Ok this is the generic chunk service queue. we must do the 7036 * following: - See if there are retransmits pending, if so we must 7037 * do these first and return. - Service the stream queue that is 7038 * next, moving any message (note I must get a complete message i.e. 7039 * FIRST/MIDDLE and LAST to the out queue in one pass) and assigning 7040 * TSN's - Check to see if the cwnd/rwnd allows any output, if so we 7041 * go ahead and fomulate and send the low level chunks. Making sure 7042 * to combine any control in the control chunk queue also. 7043 */ 7044 struct sctp_association *asoc; 7045 struct sctp_nets *net; 7046 int error = 0, num_out = 0, tot_out = 0, ret = 0, reason_code = 0, 7047 burst_cnt = 0, burst_limit = 0; 7048 struct timeval now; 7049 int now_filled = 0; 7050 int cwnd_full = 0; 7051 int nagle_on = 0; 7052 int frag_point = sctp_get_frag_point(stcb, &stcb->asoc); 7053 int un_sent = 0; 7054 int fr_done, tot_frs = 0; 7055 7056 asoc = &stcb->asoc; 7057 if (from_where == SCTP_OUTPUT_FROM_USR_SEND) { 7058 if (sctp_is_feature_on(inp, SCTP_PCB_FLAGS_NODELAY)) { 7059 nagle_on = 0; 7060 } else { 7061 nagle_on = 1; 7062 } 7063 } 7064 SCTP_TCB_LOCK_ASSERT(stcb); 7065 7066 un_sent = (stcb->asoc.total_output_queue_size - stcb->asoc.total_flight); 7067 7068 if ((un_sent <= 0) && 7069 (TAILQ_EMPTY(&asoc->control_send_queue)) && 7070 (asoc->sent_queue_retran_cnt == 0)) { 7071 /* Nothing to do unless there is something to be sent left */ 7072 return (error); 7073 } 7074 /* 7075 * Do we have something to send, data or control AND a sack timer 7076 * running, if so piggy-back the sack. 7077 */ 7078 if (SCTP_OS_TIMER_PENDING(&stcb->asoc.dack_timer.timer)) { 7079 sctp_send_sack(stcb); 7080 SCTP_OS_TIMER_STOP(&stcb->asoc.dack_timer.timer); 7081 } 7082 while (asoc->sent_queue_retran_cnt) { 7083 /* 7084 * Ok, it is retransmission time only, we send out only ONE 7085 * packet with a single call off to the retran code. 7086 */ 7087 if (from_where == SCTP_OUTPUT_FROM_COOKIE_ACK) { 7088 /* 7089 * Special hook for handling cookiess discarded by 7090 * peer that carried data. Send cookie-ack only and 7091 * then the next call with get the retran's. 7092 */ 7093 (void)sctp_med_chunk_output(inp, stcb, asoc, &num_out, &reason_code, 1, 7094 &cwnd_full, from_where, 7095 &now, &now_filled, frag_point); 7096 return (0); 7097 } else if (from_where != SCTP_OUTPUT_FROM_HB_TMR) { 7098 /* if its not from a HB then do it */ 7099 fr_done = 0; 7100 ret = sctp_chunk_retransmission(inp, stcb, asoc, &num_out, &now, &now_filled, &fr_done); 7101 if (fr_done) { 7102 tot_frs++; 7103 } 7104 } else { 7105 /* 7106 * its from any other place, we don't allow retran 7107 * output (only control) 7108 */ 7109 ret = 1; 7110 } 7111 if (ret > 0) { 7112 /* Can't send anymore */ 7113 /* 7114 * now lets push out control by calling med-level 7115 * output once. this assures that we WILL send HB's 7116 * if queued too. 7117 */ 7118 (void)sctp_med_chunk_output(inp, stcb, asoc, &num_out, &reason_code, 1, 7119 &cwnd_full, from_where, 7120 &now, &now_filled, frag_point); 7121 #ifdef SCTP_AUDITING_ENABLED 7122 sctp_auditing(8, inp, stcb, NULL); 7123 #endif 7124 return (sctp_timer_validation(inp, stcb, asoc, ret)); 7125 } 7126 if (ret < 0) { 7127 /* 7128 * The count was off.. retran is not happening so do 7129 * the normal retransmission. 7130 */ 7131 #ifdef SCTP_AUDITING_ENABLED 7132 sctp_auditing(9, inp, stcb, NULL); 7133 #endif 7134 break; 7135 } 7136 if (from_where == SCTP_OUTPUT_FROM_T3) { 7137 /* Only one transmission allowed out of a timeout */ 7138 #ifdef SCTP_AUDITING_ENABLED 7139 sctp_auditing(10, inp, stcb, NULL); 7140 #endif 7141 /* Push out any control */ 7142 (void)sctp_med_chunk_output(inp, stcb, asoc, &num_out, &reason_code, 1, &cwnd_full, from_where, 7143 &now, &now_filled, frag_point); 7144 return (ret); 7145 } 7146 if (tot_frs > asoc->max_burst) { 7147 /* Hit FR burst limit */ 7148 return (0); 7149 } 7150 if ((num_out == 0) && (ret == 0)) { 7151 7152 /* No more retrans to send */ 7153 break; 7154 } 7155 } 7156 #ifdef SCTP_AUDITING_ENABLED 7157 sctp_auditing(12, inp, stcb, NULL); 7158 #endif 7159 /* Check for bad destinations, if they exist move chunks around. */ 7160 burst_limit = asoc->max_burst; 7161 TAILQ_FOREACH(net, &asoc->nets, sctp_next) { 7162 if ((net->dest_state & SCTP_ADDR_NOT_REACHABLE) == 7163 SCTP_ADDR_NOT_REACHABLE) { 7164 /* 7165 * if possible move things off of this address we 7166 * still may send below due to the dormant state but 7167 * we try to find an alternate address to send to 7168 * and if we have one we move all queued data on the 7169 * out wheel to this alternate address. 7170 */ 7171 if (net->ref_count > 1) 7172 sctp_move_to_an_alt(stcb, asoc, net); 7173 } else { 7174 /* 7175 * if ((asoc->sat_network) || (net->addr_is_local)) 7176 * { burst_limit = asoc->max_burst * 7177 * SCTP_SAT_NETWORK_BURST_INCR; } 7178 */ 7179 if (sctp_use_cwnd_based_maxburst) { 7180 if ((net->flight_size + (burst_limit * net->mtu)) < net->cwnd) { 7181 int old_cwnd; 7182 7183 if (net->ssthresh < net->cwnd) 7184 net->ssthresh = net->cwnd; 7185 old_cwnd = net->cwnd; 7186 net->cwnd = (net->flight_size + (burst_limit * net->mtu)); 7187 7188 #ifdef SCTP_CWND_MONITOR 7189 sctp_log_cwnd(stcb, net, (net->cwnd - old_cwnd), SCTP_CWND_LOG_FROM_BRST); 7190 #endif 7191 7192 #ifdef SCTP_LOG_MAXBURST 7193 sctp_log_maxburst(stcb, net, 0, burst_limit, SCTP_MAX_BURST_APPLIED); 7194 #endif 7195 SCTP_STAT_INCR(sctps_maxburstqueued); 7196 } 7197 net->fast_retran_ip = 0; 7198 } else { 7199 if (net->flight_size == 0) { 7200 /* Should be decaying the cwnd here */ 7201 ; 7202 } 7203 } 7204 } 7205 7206 } 7207 burst_cnt = 0; 7208 cwnd_full = 0; 7209 do { 7210 error = sctp_med_chunk_output(inp, stcb, asoc, &num_out, 7211 &reason_code, 0, &cwnd_full, from_where, 7212 &now, &now_filled, frag_point); 7213 if (error) { 7214 #ifdef SCTP_DEBUG 7215 if (sctp_debug_on & SCTP_DEBUG_OUTPUT1) { 7216 printf("Error %d was returned from med-c-op\n", error); 7217 } 7218 #endif 7219 #ifdef SCTP_LOG_MAXBURST 7220 sctp_log_maxburst(stcb, asoc->primary_destination, error, burst_cnt, SCTP_MAX_BURST_ERROR_STOP); 7221 #endif 7222 #ifdef SCTP_CWND_LOGGING 7223 sctp_log_cwnd(stcb, NULL, error, SCTP_SEND_NOW_COMPLETES); 7224 sctp_log_cwnd(stcb, NULL, 0xdeadbeef, SCTP_SEND_NOW_COMPLETES); 7225 #endif 7226 7227 break; 7228 } 7229 #ifdef SCTP_DEBUG 7230 if (sctp_debug_on & SCTP_DEBUG_OUTPUT3) { 7231 printf("m-c-o put out %d\n", num_out); 7232 } 7233 #endif 7234 tot_out += num_out; 7235 burst_cnt++; 7236 #ifdef SCTP_CWND_LOGGING 7237 sctp_log_cwnd(stcb, NULL, num_out, SCTP_SEND_NOW_COMPLETES); 7238 if (num_out == 0) { 7239 sctp_log_cwnd(stcb, NULL, reason_code, SCTP_SEND_NOW_COMPLETES); 7240 } 7241 #endif 7242 if (nagle_on) { 7243 /* 7244 * When nagle is on, we look at how much is un_sent, 7245 * then if its smaller than an MTU and we have data 7246 * in flight we stop. 7247 */ 7248 un_sent = ((stcb->asoc.total_output_queue_size - stcb->asoc.total_flight) + 7249 ((stcb->asoc.chunks_on_out_queue - stcb->asoc.total_flight_count) 7250 * sizeof(struct sctp_data_chunk))); 7251 if ((un_sent < (int)(stcb->asoc.smallest_mtu - SCTP_MIN_OVERHEAD)) && 7252 (stcb->asoc.total_flight > 0)) { 7253 break; 7254 } 7255 } 7256 if (TAILQ_EMPTY(&asoc->control_send_queue) && 7257 TAILQ_EMPTY(&asoc->send_queue) && 7258 TAILQ_EMPTY(&asoc->out_wheel)) { 7259 /* Nothing left to send */ 7260 break; 7261 } 7262 if ((stcb->asoc.total_output_queue_size - stcb->asoc.total_flight) <= 0) { 7263 /* Nothing left to send */ 7264 break; 7265 } 7266 } while (num_out && (sctp_use_cwnd_based_maxburst || 7267 (burst_cnt < burst_limit))); 7268 7269 if (sctp_use_cwnd_based_maxburst == 0) { 7270 if (burst_cnt >= burst_limit) { 7271 SCTP_STAT_INCR(sctps_maxburstqueued); 7272 asoc->burst_limit_applied = 1; 7273 #ifdef SCTP_LOG_MAXBURST 7274 sctp_log_maxburst(stcb, asoc->primary_destination, 0, burst_cnt, SCTP_MAX_BURST_APPLIED); 7275 #endif 7276 } else { 7277 asoc->burst_limit_applied = 0; 7278 } 7279 } 7280 #ifdef SCTP_CWND_LOGGING 7281 sctp_log_cwnd(stcb, NULL, tot_out, SCTP_SEND_NOW_COMPLETES); 7282 #endif 7283 #ifdef SCTP_DEBUG 7284 if (sctp_debug_on & SCTP_DEBUG_OUTPUT1) { 7285 printf("Ok, we have put out %d chunks\n", tot_out); 7286 } 7287 #endif 7288 /* 7289 * Now we need to clean up the control chunk chain if a ECNE is on 7290 * it. It must be marked as UNSENT again so next call will continue 7291 * to send it until such time that we get a CWR, to remove it. 7292 */ 7293 if (stcb->asoc.ecn_echo_cnt_onq) 7294 sctp_fix_ecn_echo(asoc); 7295 return (error); 7296 } 7297 7298 7299 int 7300 sctp_output(inp, m, addr, control, p, flags) 7301 struct sctp_inpcb *inp; 7302 struct mbuf *m; 7303 struct sockaddr *addr; 7304 struct mbuf *control; 7305 7306 struct thread *p; 7307 int flags; 7308 { 7309 if (inp == NULL) { 7310 return (EINVAL); 7311 } 7312 if (inp->sctp_socket == NULL) { 7313 return (EINVAL); 7314 } 7315 return (sctp_sosend(inp->sctp_socket, 7316 addr, 7317 (struct uio *)NULL, 7318 m, 7319 control, 7320 flags, 7321 p)); 7322 } 7323 7324 void 7325 send_forward_tsn(struct sctp_tcb *stcb, 7326 struct sctp_association *asoc) 7327 { 7328 struct sctp_tmit_chunk *chk; 7329 struct sctp_forward_tsn_chunk *fwdtsn; 7330 7331 SCTP_TCB_LOCK_ASSERT(stcb); 7332 TAILQ_FOREACH(chk, &asoc->control_send_queue, sctp_next) { 7333 if (chk->rec.chunk_id.id == SCTP_FORWARD_CUM_TSN) { 7334 /* mark it to unsent */ 7335 chk->sent = SCTP_DATAGRAM_UNSENT; 7336 chk->snd_count = 0; 7337 /* Do we correct its output location? */ 7338 if (chk->whoTo != asoc->primary_destination) { 7339 sctp_free_remote_addr(chk->whoTo); 7340 chk->whoTo = asoc->primary_destination; 7341 atomic_add_int(&chk->whoTo->ref_count, 1); 7342 } 7343 goto sctp_fill_in_rest; 7344 } 7345 } 7346 /* Ok if we reach here we must build one */ 7347 sctp_alloc_a_chunk(stcb, chk); 7348 if (chk == NULL) { 7349 return; 7350 } 7351 chk->copy_by_ref = 0; 7352 chk->rec.chunk_id.id = SCTP_FORWARD_CUM_TSN; 7353 chk->rec.chunk_id.can_take_data = 0; 7354 chk->asoc = asoc; 7355 chk->data = sctp_get_mbuf_for_msg(MCLBYTES, 0, M_DONTWAIT, 1, MT_DATA); 7356 if (chk->data == NULL) { 7357 atomic_subtract_int(&chk->whoTo->ref_count, 1); 7358 sctp_free_a_chunk(stcb, chk); 7359 return; 7360 } 7361 SCTP_BUF_RESV_UF(chk->data, SCTP_MIN_OVERHEAD); 7362 chk->sent = SCTP_DATAGRAM_UNSENT; 7363 chk->snd_count = 0; 7364 chk->whoTo = asoc->primary_destination; 7365 atomic_add_int(&chk->whoTo->ref_count, 1); 7366 TAILQ_INSERT_TAIL(&asoc->control_send_queue, chk, sctp_next); 7367 asoc->ctrl_queue_cnt++; 7368 sctp_fill_in_rest: 7369 /* 7370 * Here we go through and fill out the part that deals with 7371 * stream/seq of the ones we skip. 7372 */ 7373 SCTP_BUF_LEN(chk->data) = 0; 7374 { 7375 struct sctp_tmit_chunk *at, *tp1, *last; 7376 struct sctp_strseq *strseq; 7377 unsigned int cnt_of_space, i, ovh; 7378 unsigned int space_needed; 7379 unsigned int cnt_of_skipped = 0; 7380 7381 TAILQ_FOREACH(at, &asoc->sent_queue, sctp_next) { 7382 if (at->sent != SCTP_FORWARD_TSN_SKIP) { 7383 /* no more to look at */ 7384 break; 7385 } 7386 if (at->rec.data.rcv_flags & SCTP_DATA_UNORDERED) { 7387 /* We don't report these */ 7388 continue; 7389 } 7390 cnt_of_skipped++; 7391 } 7392 space_needed = (sizeof(struct sctp_forward_tsn_chunk) + 7393 (cnt_of_skipped * sizeof(struct sctp_strseq))); 7394 7395 cnt_of_space = M_TRAILINGSPACE(chk->data); 7396 7397 if (stcb->sctp_ep->sctp_flags & SCTP_PCB_FLAGS_BOUND_V6) { 7398 ovh = SCTP_MIN_OVERHEAD; 7399 } else { 7400 ovh = SCTP_MIN_V4_OVERHEAD; 7401 } 7402 if (cnt_of_space > (asoc->smallest_mtu - ovh)) { 7403 /* trim to a mtu size */ 7404 cnt_of_space = asoc->smallest_mtu - ovh; 7405 } 7406 if (cnt_of_space < space_needed) { 7407 /* 7408 * ok we must trim down the chunk by lowering the 7409 * advance peer ack point. 7410 */ 7411 cnt_of_skipped = (cnt_of_space - 7412 ((sizeof(struct sctp_forward_tsn_chunk)) / 7413 sizeof(struct sctp_strseq))); 7414 /* 7415 * Go through and find the TSN that will be the one 7416 * we report. 7417 */ 7418 at = TAILQ_FIRST(&asoc->sent_queue); 7419 for (i = 0; i < cnt_of_skipped; i++) { 7420 tp1 = TAILQ_NEXT(at, sctp_next); 7421 at = tp1; 7422 } 7423 last = at; 7424 /* 7425 * last now points to last one I can report, update 7426 * peer ack point 7427 */ 7428 asoc->advanced_peer_ack_point = last->rec.data.TSN_seq; 7429 space_needed -= (cnt_of_skipped * sizeof(struct sctp_strseq)); 7430 } 7431 chk->send_size = space_needed; 7432 /* Setup the chunk */ 7433 fwdtsn = mtod(chk->data, struct sctp_forward_tsn_chunk *); 7434 fwdtsn->ch.chunk_length = htons(chk->send_size); 7435 fwdtsn->ch.chunk_flags = 0; 7436 fwdtsn->ch.chunk_type = SCTP_FORWARD_CUM_TSN; 7437 fwdtsn->new_cumulative_tsn = htonl(asoc->advanced_peer_ack_point); 7438 chk->send_size = (sizeof(struct sctp_forward_tsn_chunk) + 7439 (cnt_of_skipped * sizeof(struct sctp_strseq))); 7440 SCTP_BUF_LEN(chk->data) = chk->send_size; 7441 fwdtsn++; 7442 /* 7443 * Move pointer to after the fwdtsn and transfer to the 7444 * strseq pointer. 7445 */ 7446 strseq = (struct sctp_strseq *)fwdtsn; 7447 /* 7448 * Now populate the strseq list. This is done blindly 7449 * without pulling out duplicate stream info. This is 7450 * inefficent but won't harm the process since the peer will 7451 * look at these in sequence and will thus release anything. 7452 * It could mean we exceed the PMTU and chop off some that 7453 * we could have included.. but this is unlikely (aka 1432/4 7454 * would mean 300+ stream seq's would have to be reported in 7455 * one FWD-TSN. With a bit of work we can later FIX this to 7456 * optimize and pull out duplcates.. but it does add more 7457 * overhead. So for now... not! 7458 */ 7459 at = TAILQ_FIRST(&asoc->sent_queue); 7460 for (i = 0; i < cnt_of_skipped; i++) { 7461 tp1 = TAILQ_NEXT(at, sctp_next); 7462 if (at->rec.data.rcv_flags & SCTP_DATA_UNORDERED) { 7463 /* We don't report these */ 7464 i--; 7465 at = tp1; 7466 continue; 7467 } 7468 strseq->stream = ntohs(at->rec.data.stream_number); 7469 strseq->sequence = ntohs(at->rec.data.stream_seq); 7470 strseq++; 7471 at = tp1; 7472 } 7473 } 7474 return; 7475 7476 } 7477 7478 void 7479 sctp_send_sack(struct sctp_tcb *stcb) 7480 { 7481 /* 7482 * Queue up a SACK in the control queue. We must first check to see 7483 * if a SACK is somehow on the control queue. If so, we will take 7484 * and and remove the old one. 7485 */ 7486 struct sctp_association *asoc; 7487 struct sctp_tmit_chunk *chk, *a_chk; 7488 struct sctp_sack_chunk *sack; 7489 struct sctp_gap_ack_block *gap_descriptor; 7490 struct sack_track *selector; 7491 int mergeable = 0; 7492 int offset; 7493 caddr_t limit; 7494 uint32_t *dup; 7495 int limit_reached = 0; 7496 unsigned int i, jstart, siz, j; 7497 unsigned int num_gap_blocks = 0, space; 7498 int num_dups = 0; 7499 int space_req; 7500 7501 7502 a_chk = NULL; 7503 asoc = &stcb->asoc; 7504 SCTP_TCB_LOCK_ASSERT(stcb); 7505 if (asoc->last_data_chunk_from == NULL) { 7506 /* Hmm we never received anything */ 7507 return; 7508 } 7509 sctp_set_rwnd(stcb, asoc); 7510 TAILQ_FOREACH(chk, &asoc->control_send_queue, sctp_next) { 7511 if (chk->rec.chunk_id.id == SCTP_SELECTIVE_ACK) { 7512 /* Hmm, found a sack already on queue, remove it */ 7513 TAILQ_REMOVE(&asoc->control_send_queue, chk, sctp_next); 7514 asoc->ctrl_queue_cnt++; 7515 a_chk = chk; 7516 if (a_chk->data) { 7517 sctp_m_freem(a_chk->data); 7518 a_chk->data = NULL; 7519 } 7520 sctp_free_remote_addr(a_chk->whoTo); 7521 a_chk->whoTo = NULL; 7522 break; 7523 } 7524 } 7525 if (a_chk == NULL) { 7526 sctp_alloc_a_chunk(stcb, a_chk); 7527 if (a_chk == NULL) { 7528 /* No memory so we drop the idea, and set a timer */ 7529 sctp_timer_stop(SCTP_TIMER_TYPE_RECV, 7530 stcb->sctp_ep, stcb, NULL, SCTP_FROM_SCTP_OUTPUT + SCTP_LOC_5); 7531 sctp_timer_start(SCTP_TIMER_TYPE_RECV, 7532 stcb->sctp_ep, stcb, NULL); 7533 return; 7534 } 7535 a_chk->copy_by_ref = 0; 7536 /* a_chk->rec.chunk_id.id = SCTP_SELECTIVE_ACK; */ 7537 a_chk->rec.chunk_id.id = SCTP_SELECTIVE_ACK; 7538 a_chk->rec.chunk_id.can_take_data = 1; 7539 } 7540 a_chk->asoc = asoc; 7541 a_chk->snd_count = 0; 7542 a_chk->send_size = 0; /* fill in later */ 7543 a_chk->sent = SCTP_DATAGRAM_UNSENT; 7544 7545 if ((asoc->numduptsns) || 7546 (asoc->last_data_chunk_from->dest_state & SCTP_ADDR_NOT_REACHABLE) 7547 ) { 7548 /* 7549 * Ok, we have some duplicates or the destination for the 7550 * sack is unreachable, lets see if we can select an 7551 * alternate than asoc->last_data_chunk_from 7552 */ 7553 if ((!(asoc->last_data_chunk_from->dest_state & 7554 SCTP_ADDR_NOT_REACHABLE)) && 7555 (asoc->used_alt_onsack > asoc->numnets)) { 7556 /* We used an alt last time, don't this time */ 7557 a_chk->whoTo = NULL; 7558 } else { 7559 asoc->used_alt_onsack++; 7560 a_chk->whoTo = sctp_find_alternate_net(stcb, asoc->last_data_chunk_from, 0); 7561 } 7562 if (a_chk->whoTo == NULL) { 7563 /* Nope, no alternate */ 7564 a_chk->whoTo = asoc->last_data_chunk_from; 7565 asoc->used_alt_onsack = 0; 7566 } 7567 } else { 7568 /* 7569 * No duplicates so we use the last place we received data 7570 * from. 7571 */ 7572 asoc->used_alt_onsack = 0; 7573 a_chk->whoTo = asoc->last_data_chunk_from; 7574 } 7575 if (a_chk->whoTo) { 7576 atomic_add_int(&a_chk->whoTo->ref_count, 1); 7577 } 7578 if (asoc->highest_tsn_inside_map == asoc->cumulative_tsn) { 7579 /* no gaps */ 7580 space_req = sizeof(struct sctp_sack_chunk); 7581 } else { 7582 /* gaps get a cluster */ 7583 space_req = MCLBYTES; 7584 } 7585 /* Ok now lets formulate a MBUF with our sack */ 7586 a_chk->data = sctp_get_mbuf_for_msg(space_req, 0, M_DONTWAIT, 1, MT_DATA); 7587 if ((a_chk->data == NULL) || 7588 (a_chk->whoTo == NULL)) { 7589 /* rats, no mbuf memory */ 7590 if (a_chk->data) { 7591 /* was a problem with the destination */ 7592 sctp_m_freem(a_chk->data); 7593 a_chk->data = NULL; 7594 } 7595 if (a_chk->whoTo) 7596 atomic_subtract_int(&a_chk->whoTo->ref_count, 1); 7597 sctp_free_a_chunk(stcb, a_chk); 7598 sctp_timer_stop(SCTP_TIMER_TYPE_RECV, 7599 stcb->sctp_ep, stcb, NULL, SCTP_FROM_SCTP_OUTPUT + SCTP_LOC_6); 7600 sctp_timer_start(SCTP_TIMER_TYPE_RECV, 7601 stcb->sctp_ep, stcb, NULL); 7602 return; 7603 } 7604 /* ok, lets go through and fill it in */ 7605 SCTP_BUF_RESV_UF(a_chk->data, SCTP_MIN_OVERHEAD); 7606 space = M_TRAILINGSPACE(a_chk->data); 7607 if (space > (a_chk->whoTo->mtu - SCTP_MIN_OVERHEAD)) { 7608 space = (a_chk->whoTo->mtu - SCTP_MIN_OVERHEAD); 7609 } 7610 limit = mtod(a_chk->data, caddr_t); 7611 limit += space; 7612 7613 sack = mtod(a_chk->data, struct sctp_sack_chunk *); 7614 sack->ch.chunk_type = SCTP_SELECTIVE_ACK; 7615 /* 0x01 is used by nonce for ecn */ 7616 if ((sctp_ecn_enable) && 7617 (sctp_ecn_nonce) && 7618 (asoc->peer_supports_ecn_nonce)) 7619 sack->ch.chunk_flags = (asoc->receiver_nonce_sum & SCTP_SACK_NONCE_SUM); 7620 else 7621 sack->ch.chunk_flags = 0; 7622 7623 if (sctp_cmt_on_off && sctp_cmt_use_dac) { 7624 /* 7625 * CMT DAC algorithm: If 2 (i.e., 0x10) packets have been 7626 * received, then set high bit to 1, else 0. Reset 7627 * pkts_rcvd. 7628 */ 7629 sack->ch.chunk_flags |= (asoc->cmt_dac_pkts_rcvd << 6); 7630 asoc->cmt_dac_pkts_rcvd = 0; 7631 } 7632 sack->sack.cum_tsn_ack = htonl(asoc->cumulative_tsn); 7633 sack->sack.a_rwnd = htonl(asoc->my_rwnd); 7634 asoc->my_last_reported_rwnd = asoc->my_rwnd; 7635 7636 /* reset the readers interpretation */ 7637 stcb->freed_by_sorcv_sincelast = 0; 7638 7639 gap_descriptor = (struct sctp_gap_ack_block *)((caddr_t)sack + sizeof(struct sctp_sack_chunk)); 7640 7641 7642 siz = (((asoc->highest_tsn_inside_map - asoc->mapping_array_base_tsn) + 1) + 7) / 8; 7643 if (asoc->cumulative_tsn < asoc->mapping_array_base_tsn) { 7644 offset = 1; 7645 /* 7646 * cum-ack behind the mapping array, so we start and use all 7647 * entries. 7648 */ 7649 jstart = 0; 7650 } else { 7651 offset = asoc->mapping_array_base_tsn - asoc->cumulative_tsn; 7652 /* 7653 * we skip the first one when the cum-ack is at or above the 7654 * mapping array base. 7655 */ 7656 jstart = 1; 7657 } 7658 if (compare_with_wrap(asoc->highest_tsn_inside_map, asoc->cumulative_tsn, MAX_TSN)) { 7659 /* we have a gap .. maybe */ 7660 for (i = 0; i < siz; i++) { 7661 selector = &sack_array[asoc->mapping_array[i]]; 7662 if (mergeable && selector->right_edge) { 7663 /* 7664 * Backup, left and right edges were ok to 7665 * merge. 7666 */ 7667 num_gap_blocks--; 7668 gap_descriptor--; 7669 } 7670 if (selector->num_entries == 0) 7671 mergeable = 0; 7672 else { 7673 for (j = jstart; j < selector->num_entries; j++) { 7674 if (mergeable && selector->right_edge) { 7675 /* 7676 * do a merge by NOT setting 7677 * the left side 7678 */ 7679 mergeable = 0; 7680 } else { 7681 /* 7682 * no merge, set the left 7683 * side 7684 */ 7685 mergeable = 0; 7686 gap_descriptor->start = htons((selector->gaps[j].start + offset)); 7687 } 7688 gap_descriptor->end = htons((selector->gaps[j].end + offset)); 7689 num_gap_blocks++; 7690 gap_descriptor++; 7691 if (((caddr_t)gap_descriptor + sizeof(struct sctp_gap_ack_block)) > limit) { 7692 /* no more room */ 7693 limit_reached = 1; 7694 break; 7695 } 7696 } 7697 if (selector->left_edge) { 7698 mergeable = 1; 7699 } 7700 } 7701 if (limit_reached) { 7702 /* Reached the limit stop */ 7703 break; 7704 } 7705 jstart = 0; 7706 offset += 8; 7707 } 7708 if (num_gap_blocks == 0) { 7709 /* reneged all chunks */ 7710 asoc->highest_tsn_inside_map = asoc->cumulative_tsn; 7711 } 7712 } 7713 /* now we must add any dups we are going to report. */ 7714 if ((limit_reached == 0) && (asoc->numduptsns)) { 7715 dup = (uint32_t *) gap_descriptor; 7716 for (i = 0; i < asoc->numduptsns; i++) { 7717 *dup = htonl(asoc->dup_tsns[i]); 7718 dup++; 7719 num_dups++; 7720 if (((caddr_t)dup + sizeof(uint32_t)) > limit) { 7721 /* no more room */ 7722 break; 7723 } 7724 } 7725 asoc->numduptsns = 0; 7726 } 7727 /* 7728 * now that the chunk is prepared queue it to the control chunk 7729 * queue. 7730 */ 7731 a_chk->send_size = (sizeof(struct sctp_sack_chunk) + 7732 (num_gap_blocks * sizeof(struct sctp_gap_ack_block)) + 7733 (num_dups * sizeof(int32_t))); 7734 SCTP_BUF_LEN(a_chk->data) = a_chk->send_size; 7735 sack->sack.num_gap_ack_blks = htons(num_gap_blocks); 7736 sack->sack.num_dup_tsns = htons(num_dups); 7737 sack->ch.chunk_length = htons(a_chk->send_size); 7738 TAILQ_INSERT_TAIL(&asoc->control_send_queue, a_chk, sctp_next); 7739 asoc->ctrl_queue_cnt++; 7740 SCTP_STAT_INCR(sctps_sendsacks); 7741 return; 7742 } 7743 7744 7745 void 7746 sctp_send_abort_tcb(struct sctp_tcb *stcb, struct mbuf *operr) 7747 { 7748 struct mbuf *m_abort; 7749 struct mbuf *m_out = NULL, *m_end = NULL; 7750 struct sctp_abort_chunk *abort = NULL; 7751 int sz; 7752 uint32_t auth_offset = 0; 7753 struct sctp_auth_chunk *auth = NULL; 7754 struct sctphdr *shdr; 7755 7756 /* 7757 * Add an AUTH chunk, if chunk requires it and save the offset into 7758 * the chain for AUTH 7759 */ 7760 if (sctp_auth_is_required_chunk(SCTP_ABORT_ASSOCIATION, 7761 stcb->asoc.peer_auth_chunks)) { 7762 m_out = sctp_add_auth_chunk(m_out, &m_end, &auth, &auth_offset, 7763 stcb, SCTP_ABORT_ASSOCIATION); 7764 } 7765 SCTP_TCB_LOCK_ASSERT(stcb); 7766 m_abort = sctp_get_mbuf_for_msg(sizeof(struct sctp_abort_chunk), 0, M_DONTWAIT, 1, MT_HEADER); 7767 if (m_abort == NULL) { 7768 /* no mbuf's */ 7769 if (m_out) 7770 sctp_m_freem(m_out); 7771 return; 7772 } 7773 /* link in any error */ 7774 SCTP_BUF_NEXT(m_abort) = operr; 7775 sz = 0; 7776 if (operr) { 7777 struct mbuf *n; 7778 7779 n = operr; 7780 while (n) { 7781 sz += SCTP_BUF_LEN(n); 7782 n = SCTP_BUF_NEXT(n); 7783 } 7784 } 7785 SCTP_BUF_LEN(m_abort) = sizeof(*abort); 7786 if (m_out == NULL) { 7787 /* NO Auth chunk prepended, so reserve space in front */ 7788 SCTP_BUF_RESV_UF(m_abort, SCTP_MIN_OVERHEAD); 7789 m_out = m_abort; 7790 } else { 7791 /* Put AUTH chunk at the front of the chain */ 7792 SCTP_BUF_NEXT(m_end) = m_abort; 7793 } 7794 7795 /* fill in the ABORT chunk */ 7796 abort = mtod(m_abort, struct sctp_abort_chunk *); 7797 abort->ch.chunk_type = SCTP_ABORT_ASSOCIATION; 7798 abort->ch.chunk_flags = 0; 7799 abort->ch.chunk_length = htons(sizeof(*abort) + sz); 7800 7801 /* prepend and fill in the SCTP header */ 7802 SCTP_BUF_PREPEND(m_out, sizeof(struct sctphdr), M_DONTWAIT); 7803 if (m_out == NULL) { 7804 /* TSNH: no memory */ 7805 return; 7806 } 7807 shdr = mtod(m_out, struct sctphdr *); 7808 shdr->src_port = stcb->sctp_ep->sctp_lport; 7809 shdr->dest_port = stcb->rport; 7810 shdr->v_tag = htonl(stcb->asoc.peer_vtag); 7811 shdr->checksum = 0; 7812 auth_offset += sizeof(struct sctphdr); 7813 7814 sctp_lowlevel_chunk_output(stcb->sctp_ep, stcb, 7815 stcb->asoc.primary_destination, 7816 (struct sockaddr *)&stcb->asoc.primary_destination->ro._l_addr, 7817 m_out, auth_offset, auth, 1, 0, NULL, 0); 7818 SCTP_STAT_INCR_COUNTER64(sctps_outcontrolchunks); 7819 } 7820 7821 int 7822 sctp_send_shutdown_complete(struct sctp_tcb *stcb, 7823 struct sctp_nets *net) 7824 { 7825 /* formulate and SEND a SHUTDOWN-COMPLETE */ 7826 struct mbuf *m_shutdown_comp; 7827 struct sctp_shutdown_complete_msg *comp_cp; 7828 7829 m_shutdown_comp = sctp_get_mbuf_for_msg(sizeof(struct sctp_shutdown_complete_msg), 0, M_DONTWAIT, 1, MT_HEADER); 7830 if (m_shutdown_comp == NULL) { 7831 /* no mbuf's */ 7832 return (-1); 7833 } 7834 comp_cp = mtod(m_shutdown_comp, struct sctp_shutdown_complete_msg *); 7835 comp_cp->shut_cmp.ch.chunk_type = SCTP_SHUTDOWN_COMPLETE; 7836 comp_cp->shut_cmp.ch.chunk_flags = 0; 7837 comp_cp->shut_cmp.ch.chunk_length = htons(sizeof(struct sctp_shutdown_complete_chunk)); 7838 comp_cp->sh.src_port = stcb->sctp_ep->sctp_lport; 7839 comp_cp->sh.dest_port = stcb->rport; 7840 comp_cp->sh.v_tag = htonl(stcb->asoc.peer_vtag); 7841 comp_cp->sh.checksum = 0; 7842 7843 SCTP_BUF_LEN(m_shutdown_comp) = sizeof(struct sctp_shutdown_complete_msg); 7844 sctp_lowlevel_chunk_output(stcb->sctp_ep, stcb, net, 7845 (struct sockaddr *)&net->ro._l_addr, 7846 m_shutdown_comp, 0, NULL, 1, 0, NULL, 0); 7847 SCTP_STAT_INCR_COUNTER64(sctps_outcontrolchunks); 7848 return (0); 7849 } 7850 7851 int 7852 sctp_send_shutdown_complete2(struct mbuf *m, int iphlen, struct sctphdr *sh) 7853 { 7854 /* formulate and SEND a SHUTDOWN-COMPLETE */ 7855 struct mbuf *o_pak; 7856 struct mbuf *mout; 7857 struct ip *iph, *iph_out; 7858 struct ip6_hdr *ip6, *ip6_out; 7859 int offset_out, len; 7860 struct sctp_shutdown_complete_msg *comp_cp; 7861 7862 /* Get room for the largest message */ 7863 len = (sizeof(struct ip6_hdr) + sizeof(struct sctp_shutdown_complete_msg)); 7864 7865 o_pak = SCTP_GET_HEADER_FOR_OUTPUT(len); 7866 if (o_pak == NULL) { 7867 /* no mbuf's */ 7868 return (-1); 7869 } 7870 mout = SCTP_HEADER_TO_CHAIN(o_pak); 7871 iph = mtod(m, struct ip *); 7872 iph_out = NULL; 7873 ip6_out = NULL; 7874 offset_out = 0; 7875 if (iph->ip_v == IPVERSION) { 7876 SCTP_BUF_LEN(mout) = sizeof(struct ip) + 7877 sizeof(struct sctp_shutdown_complete_msg); 7878 SCTP_BUF_NEXT(mout) = NULL; 7879 iph_out = mtod(mout, struct ip *); 7880 7881 /* Fill in the IP header for the ABORT */ 7882 iph_out->ip_v = IPVERSION; 7883 iph_out->ip_hl = (sizeof(struct ip) / 4); 7884 iph_out->ip_tos = (u_char)0; 7885 iph_out->ip_id = 0; 7886 iph_out->ip_off = 0; 7887 iph_out->ip_ttl = MAXTTL; 7888 iph_out->ip_p = IPPROTO_SCTP; 7889 iph_out->ip_src.s_addr = iph->ip_dst.s_addr; 7890 iph_out->ip_dst.s_addr = iph->ip_src.s_addr; 7891 7892 /* let IP layer calculate this */ 7893 iph_out->ip_sum = 0; 7894 offset_out += sizeof(*iph_out); 7895 comp_cp = (struct sctp_shutdown_complete_msg *)( 7896 (caddr_t)iph_out + offset_out); 7897 } else if (iph->ip_v == (IPV6_VERSION >> 4)) { 7898 ip6 = (struct ip6_hdr *)iph; 7899 SCTP_BUF_LEN(mout) = sizeof(struct ip6_hdr) + 7900 sizeof(struct sctp_shutdown_complete_msg); 7901 SCTP_BUF_NEXT(mout) = NULL; 7902 ip6_out = mtod(mout, struct ip6_hdr *); 7903 7904 /* Fill in the IPv6 header for the ABORT */ 7905 ip6_out->ip6_flow = ip6->ip6_flow; 7906 ip6_out->ip6_hlim = ip6_defhlim; 7907 ip6_out->ip6_nxt = IPPROTO_SCTP; 7908 ip6_out->ip6_src = ip6->ip6_dst; 7909 ip6_out->ip6_dst = ip6->ip6_src; 7910 /* 7911 * ?? The old code had both the iph len + payload, I think 7912 * this is wrong and would never have worked 7913 */ 7914 ip6_out->ip6_plen = sizeof(struct sctp_shutdown_complete_msg); 7915 offset_out += sizeof(*ip6_out); 7916 comp_cp = (struct sctp_shutdown_complete_msg *)( 7917 (caddr_t)ip6_out + offset_out); 7918 } else { 7919 /* Currently not supported. */ 7920 return (-1); 7921 } 7922 7923 SCTP_HEADER_LEN(o_pak) = SCTP_BUF_LEN(mout); 7924 /* Now copy in and fill in the ABORT tags etc. */ 7925 comp_cp->sh.src_port = sh->dest_port; 7926 comp_cp->sh.dest_port = sh->src_port; 7927 comp_cp->sh.checksum = 0; 7928 comp_cp->sh.v_tag = sh->v_tag; 7929 comp_cp->shut_cmp.ch.chunk_flags = SCTP_HAD_NO_TCB; 7930 comp_cp->shut_cmp.ch.chunk_type = SCTP_SHUTDOWN_COMPLETE; 7931 comp_cp->shut_cmp.ch.chunk_length = htons(sizeof(struct sctp_shutdown_complete_chunk)); 7932 7933 /* add checksum */ 7934 if ((sctp_no_csum_on_loopback) && SCTP_IS_IT_LOOPBACK(o_pak)) { 7935 comp_cp->sh.checksum = 0; 7936 } else { 7937 comp_cp->sh.checksum = sctp_calculate_sum(mout, NULL, offset_out); 7938 } 7939 if (iph_out != NULL) { 7940 struct route ro; 7941 7942 bzero(&ro, sizeof ro); 7943 /* set IPv4 length */ 7944 iph_out->ip_len = SCTP_HEADER_LEN(o_pak); 7945 /* out it goes */ 7946 ip_output(o_pak, 0, &ro, IP_RAWOUTPUT, NULL 7947 ,NULL 7948 ); 7949 /* Free the route if we got one back */ 7950 if (ro.ro_rt) 7951 RTFREE(ro.ro_rt); 7952 } else if (ip6_out != NULL) { 7953 struct route_in6 ro; 7954 7955 bzero(&ro, sizeof(ro)); 7956 ip6_output(o_pak, NULL, &ro, 0, NULL, NULL 7957 ,NULL 7958 ); 7959 /* Free the route if we got one back */ 7960 if (ro.ro_rt) 7961 RTFREE(ro.ro_rt); 7962 } 7963 SCTP_STAT_INCR(sctps_sendpackets); 7964 SCTP_STAT_INCR_COUNTER64(sctps_outpackets); 7965 SCTP_STAT_INCR_COUNTER64(sctps_outcontrolchunks); 7966 return (0); 7967 } 7968 7969 static struct sctp_nets * 7970 sctp_select_hb_destination(struct sctp_tcb *stcb, struct timeval *now) 7971 { 7972 struct sctp_nets *net, *hnet; 7973 int ms_goneby, highest_ms, state_overide = 0; 7974 7975 SCTP_GETTIME_TIMEVAL(now); 7976 highest_ms = 0; 7977 hnet = NULL; 7978 SCTP_TCB_LOCK_ASSERT(stcb); 7979 TAILQ_FOREACH(net, &stcb->asoc.nets, sctp_next) { 7980 if ( 7981 ((net->dest_state & SCTP_ADDR_NOHB) && ((net->dest_state & SCTP_ADDR_UNCONFIRMED) == 0)) || 7982 (net->dest_state & SCTP_ADDR_OUT_OF_SCOPE) 7983 ) { 7984 /* 7985 * Skip this guy from consideration if HB is off AND 7986 * its confirmed 7987 */ 7988 continue; 7989 } 7990 if (sctp_destination_is_reachable(stcb, (struct sockaddr *)&net->ro._l_addr) == 0) { 7991 /* skip this dest net from consideration */ 7992 continue; 7993 } 7994 if (net->last_sent_time.tv_sec) { 7995 /* Sent to so we subtract */ 7996 ms_goneby = (now->tv_sec - net->last_sent_time.tv_sec) * 1000; 7997 } else 7998 /* Never been sent to */ 7999 ms_goneby = 0x7fffffff; 8000 /* 8001 * When the address state is unconfirmed but still 8002 * considered reachable, we HB at a higher rate. Once it 8003 * goes confirmed OR reaches the "unreachable" state, thenw 8004 * we cut it back to HB at a more normal pace. 8005 */ 8006 if ((net->dest_state & (SCTP_ADDR_UNCONFIRMED | SCTP_ADDR_NOT_REACHABLE)) == SCTP_ADDR_UNCONFIRMED) { 8007 state_overide = 1; 8008 } else { 8009 state_overide = 0; 8010 } 8011 8012 if ((((unsigned int)ms_goneby >= net->RTO) || (state_overide)) && 8013 (ms_goneby > highest_ms)) { 8014 highest_ms = ms_goneby; 8015 hnet = net; 8016 } 8017 } 8018 if (hnet && 8019 ((hnet->dest_state & (SCTP_ADDR_UNCONFIRMED | SCTP_ADDR_NOT_REACHABLE)) == SCTP_ADDR_UNCONFIRMED)) { 8020 state_overide = 1; 8021 } else { 8022 state_overide = 0; 8023 } 8024 8025 if (highest_ms && (((unsigned int)highest_ms >= hnet->RTO) || state_overide)) { 8026 /* 8027 * Found the one with longest delay bounds OR it is 8028 * unconfirmed and still not marked unreachable. 8029 */ 8030 #ifdef SCTP_DEBUG 8031 if (sctp_debug_on & SCTP_DEBUG_OUTPUT4) { 8032 printf("net:%p is the hb winner -", 8033 hnet); 8034 if (hnet) 8035 sctp_print_address((struct sockaddr *)&hnet->ro._l_addr); 8036 else 8037 printf(" none\n"); 8038 } 8039 #endif 8040 /* update the timer now */ 8041 hnet->last_sent_time = *now; 8042 return (hnet); 8043 } 8044 /* Nothing to HB */ 8045 return (NULL); 8046 } 8047 8048 int 8049 sctp_send_hb(struct sctp_tcb *stcb, int user_req, struct sctp_nets *u_net) 8050 { 8051 struct sctp_tmit_chunk *chk; 8052 struct sctp_nets *net; 8053 struct sctp_heartbeat_chunk *hb; 8054 struct timeval now; 8055 struct sockaddr_in *sin; 8056 struct sockaddr_in6 *sin6; 8057 8058 SCTP_TCB_LOCK_ASSERT(stcb); 8059 if (user_req == 0) { 8060 net = sctp_select_hb_destination(stcb, &now); 8061 if (net == NULL) { 8062 /* 8063 * All our busy none to send to, just start the 8064 * timer again. 8065 */ 8066 if (stcb->asoc.state == 0) { 8067 return (0); 8068 } 8069 sctp_timer_start(SCTP_TIMER_TYPE_HEARTBEAT, 8070 stcb->sctp_ep, 8071 stcb, 8072 net); 8073 return (0); 8074 } 8075 } else { 8076 net = u_net; 8077 if (net == NULL) { 8078 return (0); 8079 } 8080 SCTP_GETTIME_TIMEVAL(&now); 8081 } 8082 sin = (struct sockaddr_in *)&net->ro._l_addr; 8083 if (sin->sin_family != AF_INET) { 8084 if (sin->sin_family != AF_INET6) { 8085 /* huh */ 8086 return (0); 8087 } 8088 } 8089 sctp_alloc_a_chunk(stcb, chk); 8090 if (chk == NULL) { 8091 #ifdef SCTP_DEBUG 8092 if (sctp_debug_on & SCTP_DEBUG_OUTPUT4) { 8093 printf("Gak, can't get a chunk for hb\n"); 8094 } 8095 #endif 8096 return (0); 8097 } 8098 chk->copy_by_ref = 0; 8099 chk->rec.chunk_id.id = SCTP_HEARTBEAT_REQUEST; 8100 chk->rec.chunk_id.can_take_data = 1; 8101 chk->asoc = &stcb->asoc; 8102 chk->send_size = sizeof(struct sctp_heartbeat_chunk); 8103 8104 chk->data = sctp_get_mbuf_for_msg(chk->send_size, 0, M_DONTWAIT, 1, MT_HEADER); 8105 if (chk->data == NULL) { 8106 sctp_free_a_chunk(stcb, chk); 8107 return (0); 8108 } 8109 SCTP_BUF_RESV_UF(chk->data, SCTP_MIN_OVERHEAD); 8110 SCTP_BUF_LEN(chk->data) = chk->send_size; 8111 chk->sent = SCTP_DATAGRAM_UNSENT; 8112 chk->snd_count = 0; 8113 chk->whoTo = net; 8114 atomic_add_int(&chk->whoTo->ref_count, 1); 8115 /* Now we have a mbuf that we can fill in with the details */ 8116 hb = mtod(chk->data, struct sctp_heartbeat_chunk *); 8117 8118 /* fill out chunk header */ 8119 hb->ch.chunk_type = SCTP_HEARTBEAT_REQUEST; 8120 hb->ch.chunk_flags = 0; 8121 hb->ch.chunk_length = htons(chk->send_size); 8122 /* Fill out hb parameter */ 8123 hb->heartbeat.hb_info.ph.param_type = htons(SCTP_HEARTBEAT_INFO); 8124 hb->heartbeat.hb_info.ph.param_length = htons(sizeof(struct sctp_heartbeat_info_param)); 8125 hb->heartbeat.hb_info.time_value_1 = now.tv_sec; 8126 hb->heartbeat.hb_info.time_value_2 = now.tv_usec; 8127 /* Did our user request this one, put it in */ 8128 hb->heartbeat.hb_info.user_req = user_req; 8129 hb->heartbeat.hb_info.addr_family = sin->sin_family; 8130 hb->heartbeat.hb_info.addr_len = sin->sin_len; 8131 if (net->dest_state & SCTP_ADDR_UNCONFIRMED) { 8132 /* 8133 * we only take from the entropy pool if the address is not 8134 * confirmed. 8135 */ 8136 net->heartbeat_random1 = hb->heartbeat.hb_info.random_value1 = sctp_select_initial_TSN(&stcb->sctp_ep->sctp_ep); 8137 net->heartbeat_random2 = hb->heartbeat.hb_info.random_value2 = sctp_select_initial_TSN(&stcb->sctp_ep->sctp_ep); 8138 } else { 8139 net->heartbeat_random1 = hb->heartbeat.hb_info.random_value1 = 0; 8140 net->heartbeat_random2 = hb->heartbeat.hb_info.random_value2 = 0; 8141 } 8142 if (sin->sin_family == AF_INET) { 8143 memcpy(hb->heartbeat.hb_info.address, &sin->sin_addr, sizeof(sin->sin_addr)); 8144 } else if (sin->sin_family == AF_INET6) { 8145 /* We leave the scope the way it is in our lookup table. */ 8146 sin6 = (struct sockaddr_in6 *)&net->ro._l_addr; 8147 memcpy(hb->heartbeat.hb_info.address, &sin6->sin6_addr, sizeof(sin6->sin6_addr)); 8148 } else { 8149 /* huh compiler bug */ 8150 return (0); 8151 } 8152 /* ok we have a destination that needs a beat */ 8153 /* lets do the theshold management Qiaobing style */ 8154 8155 if (sctp_threshold_management(stcb->sctp_ep, stcb, net, 8156 stcb->asoc.max_send_times)) { 8157 /* 8158 * we have lost the association, in a way this is quite bad 8159 * since we really are one less time since we really did not 8160 * send yet. This is the down side to the Q's style as 8161 * defined in the RFC and not my alternate style defined in 8162 * the RFC. 8163 */ 8164 atomic_subtract_int(&chk->whoTo->ref_count, 1); 8165 if (chk->data != NULL) { 8166 sctp_m_freem(chk->data); 8167 chk->data = NULL; 8168 } 8169 sctp_free_a_chunk(stcb, chk); 8170 return (-1); 8171 } 8172 net->hb_responded = 0; 8173 TAILQ_INSERT_TAIL(&stcb->asoc.control_send_queue, chk, sctp_next); 8174 stcb->asoc.ctrl_queue_cnt++; 8175 SCTP_STAT_INCR(sctps_sendheartbeat); 8176 /* 8177 * Call directly med level routine to put out the chunk. It will 8178 * always tumble out control chunks aka HB but it may even tumble 8179 * out data too. 8180 */ 8181 return (1); 8182 } 8183 8184 void 8185 sctp_send_ecn_echo(struct sctp_tcb *stcb, struct sctp_nets *net, 8186 uint32_t high_tsn) 8187 { 8188 struct sctp_association *asoc; 8189 struct sctp_ecne_chunk *ecne; 8190 struct sctp_tmit_chunk *chk; 8191 8192 asoc = &stcb->asoc; 8193 SCTP_TCB_LOCK_ASSERT(stcb); 8194 TAILQ_FOREACH(chk, &asoc->control_send_queue, sctp_next) { 8195 if (chk->rec.chunk_id.id == SCTP_ECN_ECHO) { 8196 /* found a previous ECN_ECHO update it if needed */ 8197 ecne = mtod(chk->data, struct sctp_ecne_chunk *); 8198 ecne->tsn = htonl(high_tsn); 8199 return; 8200 } 8201 } 8202 /* nope could not find one to update so we must build one */ 8203 sctp_alloc_a_chunk(stcb, chk); 8204 if (chk == NULL) { 8205 return; 8206 } 8207 chk->copy_by_ref = 0; 8208 SCTP_STAT_INCR(sctps_sendecne); 8209 chk->rec.chunk_id.id = SCTP_ECN_ECHO; 8210 chk->rec.chunk_id.can_take_data = 0; 8211 chk->asoc = &stcb->asoc; 8212 chk->send_size = sizeof(struct sctp_ecne_chunk); 8213 chk->data = sctp_get_mbuf_for_msg(chk->send_size, 0, M_DONTWAIT, 1, MT_HEADER); 8214 if (chk->data == NULL) { 8215 sctp_free_a_chunk(stcb, chk); 8216 return; 8217 } 8218 SCTP_BUF_RESV_UF(chk->data, SCTP_MIN_OVERHEAD); 8219 SCTP_BUF_LEN(chk->data) = chk->send_size; 8220 chk->sent = SCTP_DATAGRAM_UNSENT; 8221 chk->snd_count = 0; 8222 chk->whoTo = net; 8223 atomic_add_int(&chk->whoTo->ref_count, 1); 8224 stcb->asoc.ecn_echo_cnt_onq++; 8225 ecne = mtod(chk->data, struct sctp_ecne_chunk *); 8226 ecne->ch.chunk_type = SCTP_ECN_ECHO; 8227 ecne->ch.chunk_flags = 0; 8228 ecne->ch.chunk_length = htons(sizeof(struct sctp_ecne_chunk)); 8229 ecne->tsn = htonl(high_tsn); 8230 TAILQ_INSERT_TAIL(&stcb->asoc.control_send_queue, chk, sctp_next); 8231 asoc->ctrl_queue_cnt++; 8232 } 8233 8234 void 8235 sctp_send_packet_dropped(struct sctp_tcb *stcb, struct sctp_nets *net, 8236 struct mbuf *m, int iphlen, int bad_crc) 8237 { 8238 struct sctp_association *asoc; 8239 struct sctp_pktdrop_chunk *drp; 8240 struct sctp_tmit_chunk *chk; 8241 uint8_t *datap; 8242 int len; 8243 unsigned int small_one; 8244 struct ip *iph; 8245 8246 long spc; 8247 8248 asoc = &stcb->asoc; 8249 SCTP_TCB_LOCK_ASSERT(stcb); 8250 if (asoc->peer_supports_pktdrop == 0) { 8251 /* 8252 * peer must declare support before I send one. 8253 */ 8254 return; 8255 } 8256 if (stcb->sctp_socket == NULL) { 8257 return; 8258 } 8259 sctp_alloc_a_chunk(stcb, chk); 8260 if (chk == NULL) { 8261 return; 8262 } 8263 chk->copy_by_ref = 0; 8264 iph = mtod(m, struct ip *); 8265 if (iph == NULL) { 8266 return; 8267 } 8268 if (iph->ip_v == IPVERSION) { 8269 /* IPv4 */ 8270 len = chk->send_size = iph->ip_len; 8271 } else { 8272 struct ip6_hdr *ip6h; 8273 8274 /* IPv6 */ 8275 ip6h = mtod(m, struct ip6_hdr *); 8276 len = chk->send_size = htons(ip6h->ip6_plen); 8277 } 8278 chk->asoc = &stcb->asoc; 8279 chk->data = sctp_get_mbuf_for_msg(MCLBYTES, 0, M_DONTWAIT, 1, MT_DATA); 8280 if (chk->data == NULL) { 8281 jump_out: 8282 sctp_free_a_chunk(stcb, chk); 8283 return; 8284 } 8285 SCTP_BUF_RESV_UF(chk->data, SCTP_MIN_OVERHEAD); 8286 drp = mtod(chk->data, struct sctp_pktdrop_chunk *); 8287 if (drp == NULL) { 8288 sctp_m_freem(chk->data); 8289 chk->data = NULL; 8290 goto jump_out; 8291 } 8292 small_one = asoc->smallest_mtu; 8293 if (small_one > MCLBYTES) { 8294 /* Only one cluster worth of data MAX */ 8295 small_one = MCLBYTES; 8296 } 8297 chk->book_size = SCTP_SIZE32((chk->send_size + sizeof(struct sctp_pktdrop_chunk) + 8298 sizeof(struct sctphdr) + SCTP_MED_OVERHEAD)); 8299 chk->book_size_scale = 0; 8300 if (chk->book_size > small_one) { 8301 drp->ch.chunk_flags = SCTP_PACKET_TRUNCATED; 8302 drp->trunc_len = htons(chk->send_size); 8303 chk->send_size = small_one - (SCTP_MED_OVERHEAD + 8304 sizeof(struct sctp_pktdrop_chunk) + 8305 sizeof(struct sctphdr)); 8306 len = chk->send_size; 8307 } else { 8308 /* no truncation needed */ 8309 drp->ch.chunk_flags = 0; 8310 drp->trunc_len = htons(0); 8311 } 8312 if (bad_crc) { 8313 drp->ch.chunk_flags |= SCTP_BADCRC; 8314 } 8315 chk->send_size += sizeof(struct sctp_pktdrop_chunk); 8316 SCTP_BUF_LEN(chk->data) = chk->send_size; 8317 chk->sent = SCTP_DATAGRAM_UNSENT; 8318 chk->snd_count = 0; 8319 if (net) { 8320 /* we should hit here */ 8321 chk->whoTo = net; 8322 } else { 8323 chk->whoTo = asoc->primary_destination; 8324 } 8325 atomic_add_int(&chk->whoTo->ref_count, 1); 8326 chk->rec.chunk_id.id = SCTP_PACKET_DROPPED; 8327 chk->rec.chunk_id.can_take_data = 1; 8328 drp->ch.chunk_type = SCTP_PACKET_DROPPED; 8329 drp->ch.chunk_length = htons(chk->send_size); 8330 spc = stcb->sctp_socket->so_rcv.sb_hiwat; 8331 if (spc < 0) { 8332 spc = 0; 8333 } 8334 drp->bottle_bw = htonl(spc); 8335 if (asoc->my_rwnd) { 8336 drp->current_onq = htonl(asoc->size_on_reasm_queue + 8337 asoc->size_on_all_streams + 8338 asoc->my_rwnd_control_len + 8339 stcb->sctp_socket->so_rcv.sb_cc); 8340 } else { 8341 /* 8342 * If my rwnd is 0, possibly from mbuf depletion as well as 8343 * space used, tell the peer there is NO space aka onq == bw 8344 */ 8345 drp->current_onq = htonl(spc); 8346 } 8347 drp->reserved = 0; 8348 datap = drp->data; 8349 m_copydata(m, iphlen, len, (caddr_t)datap); 8350 TAILQ_INSERT_TAIL(&stcb->asoc.control_send_queue, chk, sctp_next); 8351 asoc->ctrl_queue_cnt++; 8352 } 8353 8354 void 8355 sctp_send_cwr(struct sctp_tcb *stcb, struct sctp_nets *net, uint32_t high_tsn) 8356 { 8357 struct sctp_association *asoc; 8358 struct sctp_cwr_chunk *cwr; 8359 struct sctp_tmit_chunk *chk; 8360 8361 asoc = &stcb->asoc; 8362 SCTP_TCB_LOCK_ASSERT(stcb); 8363 TAILQ_FOREACH(chk, &asoc->control_send_queue, sctp_next) { 8364 if (chk->rec.chunk_id.id == SCTP_ECN_CWR) { 8365 /* found a previous ECN_CWR update it if needed */ 8366 cwr = mtod(chk->data, struct sctp_cwr_chunk *); 8367 if (compare_with_wrap(high_tsn, ntohl(cwr->tsn), 8368 MAX_TSN)) { 8369 cwr->tsn = htonl(high_tsn); 8370 } 8371 return; 8372 } 8373 } 8374 /* nope could not find one to update so we must build one */ 8375 sctp_alloc_a_chunk(stcb, chk); 8376 if (chk == NULL) { 8377 return; 8378 } 8379 chk->copy_by_ref = 0; 8380 chk->rec.chunk_id.id = SCTP_ECN_CWR; 8381 chk->rec.chunk_id.can_take_data = 1; 8382 chk->asoc = &stcb->asoc; 8383 chk->send_size = sizeof(struct sctp_cwr_chunk); 8384 chk->data = sctp_get_mbuf_for_msg(chk->send_size, 0, M_DONTWAIT, 1, MT_HEADER); 8385 if (chk->data == NULL) { 8386 sctp_free_a_chunk(stcb, chk); 8387 return; 8388 } 8389 SCTP_BUF_RESV_UF(chk->data, SCTP_MIN_OVERHEAD); 8390 SCTP_BUF_LEN(chk->data) = chk->send_size; 8391 chk->sent = SCTP_DATAGRAM_UNSENT; 8392 chk->snd_count = 0; 8393 chk->whoTo = net; 8394 atomic_add_int(&chk->whoTo->ref_count, 1); 8395 cwr = mtod(chk->data, struct sctp_cwr_chunk *); 8396 cwr->ch.chunk_type = SCTP_ECN_CWR; 8397 cwr->ch.chunk_flags = 0; 8398 cwr->ch.chunk_length = htons(sizeof(struct sctp_cwr_chunk)); 8399 cwr->tsn = htonl(high_tsn); 8400 TAILQ_INSERT_TAIL(&stcb->asoc.control_send_queue, chk, sctp_next); 8401 asoc->ctrl_queue_cnt++; 8402 } 8403 8404 void 8405 sctp_add_stream_reset_out(struct sctp_tmit_chunk *chk, 8406 int number_entries, uint16_t * list, 8407 uint32_t seq, uint32_t resp_seq, uint32_t last_sent) 8408 { 8409 int len, old_len, i; 8410 struct sctp_stream_reset_out_request *req_out; 8411 struct sctp_chunkhdr *ch; 8412 8413 ch = mtod(chk->data, struct sctp_chunkhdr *); 8414 8415 8416 old_len = len = SCTP_SIZE32(ntohs(ch->chunk_length)); 8417 8418 /* get to new offset for the param. */ 8419 req_out = (struct sctp_stream_reset_out_request *)((caddr_t)ch + len); 8420 /* now how long will this param be? */ 8421 len = (sizeof(struct sctp_stream_reset_out_request) + (sizeof(uint16_t) * number_entries)); 8422 req_out->ph.param_type = htons(SCTP_STR_RESET_OUT_REQUEST); 8423 req_out->ph.param_length = htons(len); 8424 req_out->request_seq = htonl(seq); 8425 req_out->response_seq = htonl(resp_seq); 8426 req_out->send_reset_at_tsn = htonl(last_sent); 8427 if (number_entries) { 8428 for (i = 0; i < number_entries; i++) { 8429 req_out->list_of_streams[i] = htons(list[i]); 8430 } 8431 } 8432 if (SCTP_SIZE32(len) > len) { 8433 /* 8434 * Need to worry about the pad we may end up adding to the 8435 * end. This is easy since the struct is either aligned to 4 8436 * bytes or 2 bytes off. 8437 */ 8438 req_out->list_of_streams[number_entries] = 0; 8439 } 8440 /* now fix the chunk length */ 8441 ch->chunk_length = htons(len + old_len); 8442 chk->book_size = len + old_len; 8443 chk->book_size_scale = 0; 8444 chk->send_size = SCTP_SIZE32(chk->book_size); 8445 SCTP_BUF_LEN(chk->data) = chk->send_size; 8446 return; 8447 } 8448 8449 8450 void 8451 sctp_add_stream_reset_in(struct sctp_tmit_chunk *chk, 8452 int number_entries, uint16_t * list, 8453 uint32_t seq) 8454 { 8455 int len, old_len, i; 8456 struct sctp_stream_reset_in_request *req_in; 8457 struct sctp_chunkhdr *ch; 8458 8459 ch = mtod(chk->data, struct sctp_chunkhdr *); 8460 8461 8462 old_len = len = SCTP_SIZE32(ntohs(ch->chunk_length)); 8463 8464 /* get to new offset for the param. */ 8465 req_in = (struct sctp_stream_reset_in_request *)((caddr_t)ch + len); 8466 /* now how long will this param be? */ 8467 len = (sizeof(struct sctp_stream_reset_in_request) + (sizeof(uint16_t) * number_entries)); 8468 req_in->ph.param_type = htons(SCTP_STR_RESET_IN_REQUEST); 8469 req_in->ph.param_length = htons(len); 8470 req_in->request_seq = htonl(seq); 8471 if (number_entries) { 8472 for (i = 0; i < number_entries; i++) { 8473 req_in->list_of_streams[i] = htons(list[i]); 8474 } 8475 } 8476 if (SCTP_SIZE32(len) > len) { 8477 /* 8478 * Need to worry about the pad we may end up adding to the 8479 * end. This is easy since the struct is either aligned to 4 8480 * bytes or 2 bytes off. 8481 */ 8482 req_in->list_of_streams[number_entries] = 0; 8483 } 8484 /* now fix the chunk length */ 8485 ch->chunk_length = htons(len + old_len); 8486 chk->book_size = len + old_len; 8487 chk->book_size_scale = 0; 8488 chk->send_size = SCTP_SIZE32(chk->book_size); 8489 SCTP_BUF_LEN(chk->data) = chk->send_size; 8490 return; 8491 } 8492 8493 8494 void 8495 sctp_add_stream_reset_tsn(struct sctp_tmit_chunk *chk, 8496 uint32_t seq) 8497 { 8498 int len, old_len; 8499 struct sctp_stream_reset_tsn_request *req_tsn; 8500 struct sctp_chunkhdr *ch; 8501 8502 ch = mtod(chk->data, struct sctp_chunkhdr *); 8503 8504 8505 old_len = len = SCTP_SIZE32(ntohs(ch->chunk_length)); 8506 8507 /* get to new offset for the param. */ 8508 req_tsn = (struct sctp_stream_reset_tsn_request *)((caddr_t)ch + len); 8509 /* now how long will this param be? */ 8510 len = sizeof(struct sctp_stream_reset_tsn_request); 8511 req_tsn->ph.param_type = htons(SCTP_STR_RESET_TSN_REQUEST); 8512 req_tsn->ph.param_length = htons(len); 8513 req_tsn->request_seq = htonl(seq); 8514 8515 /* now fix the chunk length */ 8516 ch->chunk_length = htons(len + old_len); 8517 chk->send_size = len + old_len; 8518 chk->book_size = SCTP_SIZE32(chk->send_size); 8519 chk->book_size_scale = 0; 8520 SCTP_BUF_LEN(chk->data) = SCTP_SIZE32(chk->send_size); 8521 return; 8522 } 8523 8524 void 8525 sctp_add_stream_reset_result(struct sctp_tmit_chunk *chk, 8526 uint32_t resp_seq, uint32_t result) 8527 { 8528 int len, old_len; 8529 struct sctp_stream_reset_response *resp; 8530 struct sctp_chunkhdr *ch; 8531 8532 ch = mtod(chk->data, struct sctp_chunkhdr *); 8533 8534 8535 old_len = len = SCTP_SIZE32(ntohs(ch->chunk_length)); 8536 8537 /* get to new offset for the param. */ 8538 resp = (struct sctp_stream_reset_response *)((caddr_t)ch + len); 8539 /* now how long will this param be? */ 8540 len = sizeof(struct sctp_stream_reset_response); 8541 resp->ph.param_type = htons(SCTP_STR_RESET_RESPONSE); 8542 resp->ph.param_length = htons(len); 8543 resp->response_seq = htonl(resp_seq); 8544 resp->result = ntohl(result); 8545 8546 /* now fix the chunk length */ 8547 ch->chunk_length = htons(len + old_len); 8548 chk->book_size = len + old_len; 8549 chk->book_size_scale = 0; 8550 chk->send_size = SCTP_SIZE32(chk->book_size); 8551 SCTP_BUF_LEN(chk->data) = chk->send_size; 8552 return; 8553 8554 } 8555 8556 8557 void 8558 sctp_add_stream_reset_result_tsn(struct sctp_tmit_chunk *chk, 8559 uint32_t resp_seq, uint32_t result, 8560 uint32_t send_una, uint32_t recv_next) 8561 { 8562 int len, old_len; 8563 struct sctp_stream_reset_response_tsn *resp; 8564 struct sctp_chunkhdr *ch; 8565 8566 ch = mtod(chk->data, struct sctp_chunkhdr *); 8567 8568 8569 old_len = len = SCTP_SIZE32(ntohs(ch->chunk_length)); 8570 8571 /* get to new offset for the param. */ 8572 resp = (struct sctp_stream_reset_response_tsn *)((caddr_t)ch + len); 8573 /* now how long will this param be? */ 8574 len = sizeof(struct sctp_stream_reset_response_tsn); 8575 resp->ph.param_type = htons(SCTP_STR_RESET_RESPONSE); 8576 resp->ph.param_length = htons(len); 8577 resp->response_seq = htonl(resp_seq); 8578 resp->result = htonl(result); 8579 resp->senders_next_tsn = htonl(send_una); 8580 resp->receivers_next_tsn = htonl(recv_next); 8581 8582 /* now fix the chunk length */ 8583 ch->chunk_length = htons(len + old_len); 8584 chk->book_size = len + old_len; 8585 chk->send_size = SCTP_SIZE32(chk->book_size); 8586 chk->book_size_scale = 0; 8587 SCTP_BUF_LEN(chk->data) = chk->send_size; 8588 return; 8589 } 8590 8591 8592 int 8593 sctp_send_str_reset_req(struct sctp_tcb *stcb, 8594 int number_entries, uint16_t * list, 8595 uint8_t send_out_req, uint32_t resp_seq, 8596 uint8_t send_in_req, 8597 uint8_t send_tsn_req) 8598 { 8599 8600 struct sctp_association *asoc; 8601 struct sctp_tmit_chunk *chk; 8602 struct sctp_chunkhdr *ch; 8603 uint32_t seq; 8604 8605 asoc = &stcb->asoc; 8606 if (asoc->stream_reset_outstanding) { 8607 /* 8608 * Already one pending, must get ACK back to clear the flag. 8609 */ 8610 return (EBUSY); 8611 } 8612 if ((send_out_req == 0) && (send_in_req == 0) && (send_tsn_req == 0)) { 8613 /* nothing to do */ 8614 return (EINVAL); 8615 } 8616 if (send_tsn_req && (send_out_req || send_in_req)) { 8617 /* error, can't do that */ 8618 return (EINVAL); 8619 } 8620 sctp_alloc_a_chunk(stcb, chk); 8621 if (chk == NULL) { 8622 return (ENOMEM); 8623 } 8624 chk->copy_by_ref = 0; 8625 chk->rec.chunk_id.id = SCTP_STREAM_RESET; 8626 chk->rec.chunk_id.can_take_data = 0; 8627 chk->asoc = &stcb->asoc; 8628 chk->book_size = sizeof(struct sctp_chunkhdr); 8629 chk->send_size = SCTP_SIZE32(chk->book_size); 8630 chk->book_size_scale = 0; 8631 8632 chk->data = sctp_get_mbuf_for_msg(MCLBYTES, 0, M_DONTWAIT, 1, MT_DATA); 8633 if (chk->data == NULL) { 8634 sctp_free_a_chunk(stcb, chk); 8635 return (ENOMEM); 8636 } 8637 SCTP_BUF_RESV_UF(chk->data, SCTP_MIN_OVERHEAD); 8638 8639 /* setup chunk parameters */ 8640 chk->sent = SCTP_DATAGRAM_UNSENT; 8641 chk->snd_count = 0; 8642 chk->whoTo = asoc->primary_destination; 8643 atomic_add_int(&chk->whoTo->ref_count, 1); 8644 8645 ch = mtod(chk->data, struct sctp_chunkhdr *); 8646 ch->chunk_type = SCTP_STREAM_RESET; 8647 ch->chunk_flags = 0; 8648 ch->chunk_length = htons(chk->book_size); 8649 SCTP_BUF_LEN(chk->data) = chk->send_size; 8650 8651 seq = stcb->asoc.str_reset_seq_out; 8652 if (send_out_req) { 8653 sctp_add_stream_reset_out(chk, number_entries, list, 8654 seq, resp_seq, (stcb->asoc.sending_seq - 1)); 8655 asoc->stream_reset_out_is_outstanding = 1; 8656 seq++; 8657 asoc->stream_reset_outstanding++; 8658 } 8659 if (send_in_req) { 8660 sctp_add_stream_reset_in(chk, number_entries, list, seq); 8661 asoc->stream_reset_outstanding++; 8662 } 8663 if (send_tsn_req) { 8664 sctp_add_stream_reset_tsn(chk, seq); 8665 asoc->stream_reset_outstanding++; 8666 } 8667 asoc->str_reset = chk; 8668 8669 /* insert the chunk for sending */ 8670 TAILQ_INSERT_TAIL(&asoc->control_send_queue, 8671 chk, 8672 sctp_next); 8673 asoc->ctrl_queue_cnt++; 8674 sctp_timer_start(SCTP_TIMER_TYPE_STRRESET, stcb->sctp_ep, stcb, chk->whoTo); 8675 return (0); 8676 } 8677 8678 void 8679 sctp_send_abort(struct mbuf *m, int iphlen, struct sctphdr *sh, uint32_t vtag, 8680 struct mbuf *err_cause) 8681 { 8682 /* 8683 * Formulate the abort message, and send it back down. 8684 */ 8685 struct mbuf *o_pak; 8686 struct mbuf *mout; 8687 struct sctp_abort_msg *abm; 8688 struct ip *iph, *iph_out; 8689 struct ip6_hdr *ip6, *ip6_out; 8690 int iphlen_out; 8691 8692 /* don't respond to ABORT with ABORT */ 8693 if (sctp_is_there_an_abort_here(m, iphlen, &vtag)) { 8694 if (err_cause) 8695 sctp_m_freem(err_cause); 8696 return; 8697 } 8698 o_pak = SCTP_GET_HEADER_FOR_OUTPUT((sizeof(struct ip6_hdr) + sizeof(struct sctp_abort_msg))); 8699 if (o_pak == NULL) { 8700 if (err_cause) 8701 sctp_m_freem(err_cause); 8702 return; 8703 } 8704 mout = SCTP_HEADER_TO_CHAIN(o_pak); 8705 iph = mtod(m, struct ip *); 8706 iph_out = NULL; 8707 ip6_out = NULL; 8708 if (iph->ip_v == IPVERSION) { 8709 iph_out = mtod(mout, struct ip *); 8710 SCTP_BUF_LEN(mout) = sizeof(*iph_out) + sizeof(*abm); 8711 SCTP_BUF_NEXT(mout) = err_cause; 8712 8713 /* Fill in the IP header for the ABORT */ 8714 iph_out->ip_v = IPVERSION; 8715 iph_out->ip_hl = (sizeof(struct ip) / 4); 8716 iph_out->ip_tos = (u_char)0; 8717 iph_out->ip_id = 0; 8718 iph_out->ip_off = 0; 8719 iph_out->ip_ttl = MAXTTL; 8720 iph_out->ip_p = IPPROTO_SCTP; 8721 iph_out->ip_src.s_addr = iph->ip_dst.s_addr; 8722 iph_out->ip_dst.s_addr = iph->ip_src.s_addr; 8723 /* let IP layer calculate this */ 8724 iph_out->ip_sum = 0; 8725 8726 iphlen_out = sizeof(*iph_out); 8727 abm = (struct sctp_abort_msg *)((caddr_t)iph_out + iphlen_out); 8728 } else if (iph->ip_v == (IPV6_VERSION >> 4)) { 8729 ip6 = (struct ip6_hdr *)iph; 8730 ip6_out = mtod(mout, struct ip6_hdr *); 8731 SCTP_BUF_LEN(mout) = sizeof(*ip6_out) + sizeof(*abm); 8732 SCTP_BUF_NEXT(mout) = err_cause; 8733 8734 /* Fill in the IP6 header for the ABORT */ 8735 ip6_out->ip6_flow = ip6->ip6_flow; 8736 ip6_out->ip6_hlim = ip6_defhlim; 8737 ip6_out->ip6_nxt = IPPROTO_SCTP; 8738 ip6_out->ip6_src = ip6->ip6_dst; 8739 ip6_out->ip6_dst = ip6->ip6_src; 8740 8741 iphlen_out = sizeof(*ip6_out); 8742 abm = (struct sctp_abort_msg *)((caddr_t)ip6_out + iphlen_out); 8743 } else { 8744 /* Currently not supported */ 8745 return; 8746 } 8747 8748 abm->sh.src_port = sh->dest_port; 8749 abm->sh.dest_port = sh->src_port; 8750 abm->sh.checksum = 0; 8751 if (vtag == 0) { 8752 abm->sh.v_tag = sh->v_tag; 8753 abm->msg.ch.chunk_flags = SCTP_HAD_NO_TCB; 8754 } else { 8755 abm->sh.v_tag = htonl(vtag); 8756 abm->msg.ch.chunk_flags = 0; 8757 } 8758 abm->msg.ch.chunk_type = SCTP_ABORT_ASSOCIATION; 8759 8760 if (err_cause) { 8761 struct mbuf *m_tmp = err_cause; 8762 int err_len = 0; 8763 8764 /* get length of the err_cause chain */ 8765 while (m_tmp != NULL) { 8766 err_len += SCTP_BUF_LEN(m_tmp); 8767 m_tmp = SCTP_BUF_NEXT(m_tmp); 8768 } 8769 SCTP_HEADER_LEN(o_pak) = SCTP_BUF_LEN(mout) + err_len; 8770 if (err_len % 4) { 8771 /* need pad at end of chunk */ 8772 uint32_t cpthis = 0; 8773 int padlen; 8774 8775 padlen = 4 - (SCTP_HEADER_LEN(o_pak) % 4); 8776 m_copyback(mout, SCTP_HEADER_LEN(o_pak), padlen, (caddr_t)&cpthis); 8777 SCTP_HEADER_LEN(o_pak) += padlen; 8778 } 8779 abm->msg.ch.chunk_length = htons(sizeof(abm->msg.ch) + err_len); 8780 } else { 8781 SCTP_HEADER_LEN(mout) = SCTP_BUF_LEN(mout); 8782 abm->msg.ch.chunk_length = htons(sizeof(abm->msg.ch)); 8783 } 8784 8785 /* add checksum */ 8786 if ((sctp_no_csum_on_loopback) && SCTP_IS_IT_LOOPBACK(m)) { 8787 abm->sh.checksum = 0; 8788 } else { 8789 abm->sh.checksum = sctp_calculate_sum(mout, NULL, iphlen_out); 8790 } 8791 if (iph_out != NULL) { 8792 struct route ro; 8793 8794 /* zap the stack pointer to the route */ 8795 bzero(&ro, sizeof ro); 8796 #ifdef SCTP_DEBUG 8797 if (sctp_debug_on & SCTP_DEBUG_OUTPUT2) { 8798 printf("sctp_send_abort calling ip_output:\n"); 8799 sctp_print_address_pkt(iph_out, &abm->sh); 8800 } 8801 #endif 8802 /* set IPv4 length */ 8803 iph_out->ip_len = SCTP_HEADER_LEN(o_pak); 8804 /* out it goes */ 8805 (void)ip_output(o_pak, 0, &ro, IP_RAWOUTPUT, NULL 8806 ,NULL 8807 ); 8808 /* Free the route if we got one back */ 8809 if (ro.ro_rt) 8810 RTFREE(ro.ro_rt); 8811 } else if (ip6_out != NULL) { 8812 struct route_in6 ro; 8813 8814 /* zap the stack pointer to the route */ 8815 bzero(&ro, sizeof(ro)); 8816 #ifdef SCTP_DEBUG 8817 if (sctp_debug_on & SCTP_DEBUG_OUTPUT2) { 8818 printf("sctp_send_abort calling ip6_output:\n"); 8819 sctp_print_address_pkt((struct ip *)ip6_out, &abm->sh); 8820 } 8821 #endif 8822 ip6_out->ip6_plen = SCTP_HEADER_LEN(o_pak) - sizeof(*ip6_out); 8823 ip6_output(o_pak, NULL, &ro, 0, NULL, NULL 8824 ,NULL 8825 ); 8826 /* Free the route if we got one back */ 8827 if (ro.ro_rt) 8828 RTFREE(ro.ro_rt); 8829 } 8830 SCTP_STAT_INCR(sctps_sendpackets); 8831 SCTP_STAT_INCR_COUNTER64(sctps_outpackets); 8832 } 8833 8834 void 8835 sctp_send_operr_to(struct mbuf *m, int iphlen, 8836 struct mbuf *scm, 8837 uint32_t vtag) 8838 { 8839 struct mbuf *o_pak; 8840 struct sctphdr *ihdr; 8841 int retcode; 8842 struct sctphdr *ohdr; 8843 struct sctp_chunkhdr *ophdr; 8844 8845 struct ip *iph; 8846 8847 #ifdef SCTP_DEBUG 8848 struct sockaddr_in6 lsa6, fsa6; 8849 8850 #endif 8851 uint32_t val; 8852 struct mbuf *at; 8853 int len; 8854 8855 iph = mtod(m, struct ip *); 8856 ihdr = (struct sctphdr *)((caddr_t)iph + iphlen); 8857 8858 SCTP_BUF_PREPEND(scm, (sizeof(struct sctphdr) + sizeof(struct sctp_chunkhdr)), M_DONTWAIT); 8859 if (scm == NULL) { 8860 /* can't send because we can't add a mbuf */ 8861 return; 8862 } 8863 ohdr = mtod(scm, struct sctphdr *); 8864 ohdr->src_port = ihdr->dest_port; 8865 ohdr->dest_port = ihdr->src_port; 8866 ohdr->v_tag = vtag; 8867 ohdr->checksum = 0; 8868 ophdr = (struct sctp_chunkhdr *)(ohdr + 1); 8869 ophdr->chunk_type = SCTP_OPERATION_ERROR; 8870 ophdr->chunk_flags = 0; 8871 len = 0; 8872 at = scm; 8873 while (at) { 8874 len += SCTP_BUF_LEN(at); 8875 at = SCTP_BUF_NEXT(at); 8876 } 8877 8878 ophdr->chunk_length = htons(len - sizeof(struct sctphdr)); 8879 if (len % 4) { 8880 /* need padding */ 8881 uint32_t cpthis = 0; 8882 int padlen; 8883 8884 padlen = 4 - (len % 4); 8885 m_copyback(scm, len, padlen, (caddr_t)&cpthis); 8886 len += padlen; 8887 } 8888 if ((sctp_no_csum_on_loopback) && SCTP_IS_IT_LOOPBACK(m)) { 8889 val = 0; 8890 } else { 8891 val = sctp_calculate_sum(scm, NULL, 0); 8892 } 8893 ohdr->checksum = val; 8894 if (iph->ip_v == IPVERSION) { 8895 /* V4 */ 8896 struct ip *out; 8897 struct route ro; 8898 8899 o_pak = SCTP_GET_HEADER_FOR_OUTPUT(sizeof(struct ip)); 8900 if (o_pak == NULL) { 8901 sctp_m_freem(scm); 8902 return; 8903 } 8904 SCTP_BUF_LEN(SCTP_HEADER_TO_CHAIN(o_pak)) = sizeof(struct ip); 8905 len += sizeof(struct ip); 8906 SCTP_ATTACH_CHAIN(o_pak, scm, len); 8907 bzero(&ro, sizeof ro); 8908 out = mtod(SCTP_HEADER_TO_CHAIN(o_pak), struct ip *); 8909 out->ip_v = iph->ip_v; 8910 out->ip_hl = (sizeof(struct ip) / 4); 8911 out->ip_tos = iph->ip_tos; 8912 out->ip_id = iph->ip_id; 8913 out->ip_off = 0; 8914 out->ip_ttl = MAXTTL; 8915 out->ip_p = IPPROTO_SCTP; 8916 out->ip_sum = 0; 8917 out->ip_src = iph->ip_dst; 8918 out->ip_dst = iph->ip_src; 8919 out->ip_len = SCTP_HEADER_LEN(o_pak); 8920 retcode = ip_output(o_pak, 0, &ro, IP_RAWOUTPUT, NULL 8921 ,NULL 8922 ); 8923 SCTP_STAT_INCR(sctps_sendpackets); 8924 SCTP_STAT_INCR_COUNTER64(sctps_outpackets); 8925 /* Free the route if we got one back */ 8926 if (ro.ro_rt) 8927 RTFREE(ro.ro_rt); 8928 } else { 8929 /* V6 */ 8930 struct route_in6 ro; 8931 struct ip6_hdr *out6, *in6; 8932 8933 o_pak = SCTP_GET_HEADER_FOR_OUTPUT(sizeof(struct ip6_hdr)); 8934 if (o_pak == NULL) { 8935 sctp_m_freem(scm); 8936 return; 8937 } 8938 SCTP_BUF_LEN(SCTP_HEADER_TO_CHAIN(o_pak)) = sizeof(struct ip6_hdr); 8939 len += sizeof(struct ip6_hdr); 8940 SCTP_ATTACH_CHAIN(o_pak, scm, len); 8941 8942 bzero(&ro, sizeof ro); 8943 in6 = mtod(m, struct ip6_hdr *); 8944 out6 = mtod(SCTP_HEADER_TO_CHAIN(o_pak), struct ip6_hdr *); 8945 out6->ip6_flow = in6->ip6_flow; 8946 out6->ip6_hlim = ip6_defhlim; 8947 out6->ip6_nxt = IPPROTO_SCTP; 8948 out6->ip6_src = in6->ip6_dst; 8949 out6->ip6_dst = in6->ip6_src; 8950 out6->ip6_plen = len - sizeof(struct ip6_hdr); 8951 #ifdef SCTP_DEBUG 8952 bzero(&lsa6, sizeof(lsa6)); 8953 lsa6.sin6_len = sizeof(lsa6); 8954 lsa6.sin6_family = AF_INET6; 8955 lsa6.sin6_addr = out6->ip6_src; 8956 bzero(&fsa6, sizeof(fsa6)); 8957 fsa6.sin6_len = sizeof(fsa6); 8958 fsa6.sin6_family = AF_INET6; 8959 fsa6.sin6_addr = out6->ip6_dst; 8960 if (sctp_debug_on & SCTP_DEBUG_OUTPUT2) { 8961 printf("sctp_operr_to calling ipv6 output:\n"); 8962 printf("src: "); 8963 sctp_print_address((struct sockaddr *)&lsa6); 8964 printf("dst "); 8965 sctp_print_address((struct sockaddr *)&fsa6); 8966 } 8967 #endif /* SCTP_DEBUG */ 8968 ip6_output(o_pak, NULL, &ro, 0, NULL, NULL 8969 ,NULL 8970 ); 8971 SCTP_STAT_INCR(sctps_sendpackets); 8972 SCTP_STAT_INCR_COUNTER64(sctps_outpackets); 8973 /* Free the route if we got one back */ 8974 if (ro.ro_rt) 8975 RTFREE(ro.ro_rt); 8976 } 8977 } 8978 8979 8980 8981 static struct mbuf * 8982 sctp_copy_resume(struct sctp_stream_queue_pending *sp, 8983 struct uio *uio, 8984 struct sctp_sndrcvinfo *srcv, 8985 int max_send_len, 8986 int user_marks_eor, 8987 int *error, 8988 uint32_t * sndout, 8989 struct mbuf **new_tail) 8990 { 8991 int left, cancpy, willcpy; 8992 struct mbuf *m, *prev, *head; 8993 8994 left = min(uio->uio_resid, max_send_len); 8995 /* Always get a header just in case */ 8996 head = sctp_get_mbuf_for_msg(left, 0, M_WAIT, 0, MT_DATA); 8997 cancpy = M_TRAILINGSPACE(head); 8998 willcpy = min(cancpy, left); 8999 *error = uiomove(mtod(head, caddr_t), willcpy, uio); 9000 if (*error) { 9001 sctp_m_freem(head); 9002 return (NULL); 9003 } 9004 *sndout += willcpy; 9005 left -= willcpy; 9006 SCTP_BUF_LEN(head) = willcpy; 9007 m = head; 9008 *new_tail = head; 9009 while (left > 0) { 9010 /* move in user data */ 9011 SCTP_BUF_NEXT(m) = sctp_get_mbuf_for_msg(left, 0, M_WAIT, 0, MT_DATA); 9012 if (SCTP_BUF_NEXT(m) == NULL) { 9013 sctp_m_freem(head); 9014 *new_tail = NULL; 9015 *error = ENOMEM; 9016 return (NULL); 9017 } 9018 prev = m; 9019 m = SCTP_BUF_NEXT(m); 9020 cancpy = M_TRAILINGSPACE(m); 9021 willcpy = min(cancpy, left); 9022 *error = uiomove(mtod(m, caddr_t), willcpy, uio); 9023 if (*error) { 9024 sctp_m_freem(head); 9025 *new_tail = NULL; 9026 *error = EFAULT; 9027 return (NULL); 9028 } 9029 SCTP_BUF_LEN(m) = willcpy; 9030 left -= willcpy; 9031 *sndout += willcpy; 9032 *new_tail = m; 9033 if (left == 0) { 9034 SCTP_BUF_NEXT(m) = NULL; 9035 } 9036 } 9037 return (head); 9038 } 9039 9040 static int 9041 sctp_copy_one(struct sctp_stream_queue_pending *sp, 9042 struct uio *uio, 9043 int resv_upfront) 9044 { 9045 int left, cancpy, willcpy, error; 9046 struct mbuf *m, *head; 9047 int cpsz = 0; 9048 9049 /* First one gets a header */ 9050 left = sp->length; 9051 head = m = sctp_get_mbuf_for_msg((left + resv_upfront), 0, M_WAIT, 0, MT_DATA); 9052 if (m == NULL) { 9053 return (ENOMEM); 9054 } 9055 /* 9056 * Add this one for m in now, that way if the alloc fails we won't 9057 * have a bad cnt. 9058 */ 9059 SCTP_BUF_RESV_UF(m, resv_upfront); 9060 cancpy = M_TRAILINGSPACE(m); 9061 willcpy = min(cancpy, left); 9062 while (left > 0) { 9063 /* move in user data */ 9064 error = uiomove(mtod(m, caddr_t), willcpy, uio); 9065 if (error) { 9066 sctp_m_freem(head); 9067 return (error); 9068 } 9069 SCTP_BUF_LEN(m) = willcpy; 9070 left -= willcpy; 9071 cpsz += willcpy; 9072 if (left > 0) { 9073 SCTP_BUF_NEXT(m) = sctp_get_mbuf_for_msg(left, 0, M_WAIT, 0, MT_DATA); 9074 if (SCTP_BUF_NEXT(m) == NULL) { 9075 /* 9076 * the head goes back to caller, he can free 9077 * the rest 9078 */ 9079 sctp_m_freem(head); 9080 return (ENOMEM); 9081 } 9082 m = SCTP_BUF_NEXT(m); 9083 cancpy = M_TRAILINGSPACE(m); 9084 willcpy = min(cancpy, left); 9085 } else { 9086 sp->tail_mbuf = m; 9087 SCTP_BUF_NEXT(m) = NULL; 9088 } 9089 } 9090 sp->data = head; 9091 sp->length = cpsz; 9092 return (0); 9093 } 9094 9095 9096 9097 static struct sctp_stream_queue_pending * 9098 sctp_copy_it_in(struct sctp_tcb *stcb, 9099 struct sctp_association *asoc, 9100 struct sctp_sndrcvinfo *srcv, 9101 struct uio *uio, 9102 struct sctp_nets *net, 9103 int max_send_len, 9104 int user_marks_eor, 9105 int *errno, 9106 int non_blocking) 9107 { 9108 /* 9109 * This routine must be very careful in its work. Protocol 9110 * processing is up and running so care must be taken to spl...() 9111 * when you need to do something that may effect the stcb/asoc. The 9112 * sb is locked however. When data is copied the protocol processing 9113 * should be enabled since this is a slower operation... 9114 */ 9115 struct sctp_stream_queue_pending *sp = NULL; 9116 int resv_in_first; 9117 9118 *errno = 0; 9119 /* 9120 * Unless E_EOR mode is on, we must make a send FIT in one call. 9121 */ 9122 if (((user_marks_eor == 0) && non_blocking) && 9123 (uio->uio_resid > stcb->sctp_socket->so_snd.sb_hiwat)) { 9124 /* It will NEVER fit */ 9125 *errno = EMSGSIZE; 9126 goto out_now; 9127 } 9128 /* Now can we send this? */ 9129 if ((SCTP_GET_STATE(asoc) == SCTP_STATE_SHUTDOWN_SENT) || 9130 (SCTP_GET_STATE(asoc) == SCTP_STATE_SHUTDOWN_ACK_SENT) || 9131 (SCTP_GET_STATE(asoc) == SCTP_STATE_SHUTDOWN_RECEIVED) || 9132 (asoc->state & SCTP_STATE_SHUTDOWN_PENDING)) { 9133 /* got data while shutting down */ 9134 *errno = ECONNRESET; 9135 goto out_now; 9136 } 9137 sp = SCTP_ZONE_GET(sctppcbinfo.ipi_zone_strmoq, struct sctp_stream_queue_pending); 9138 if (sp == NULL) { 9139 *errno = ENOMEM; 9140 goto out_now; 9141 } 9142 SCTP_INCR_STRMOQ_COUNT(); 9143 sp->act_flags = 0; 9144 sp->sinfo_flags = srcv->sinfo_flags; 9145 sp->timetolive = srcv->sinfo_timetolive; 9146 sp->ppid = srcv->sinfo_ppid; 9147 sp->context = srcv->sinfo_context; 9148 sp->strseq = 0; 9149 SCTP_GETTIME_TIMEVAL(&sp->ts); 9150 9151 sp->stream = srcv->sinfo_stream; 9152 sp->length = min(uio->uio_resid, max_send_len); 9153 if ((sp->length == uio->uio_resid) && 9154 ((user_marks_eor == 0) || 9155 (srcv->sinfo_flags & SCTP_EOF) || 9156 (user_marks_eor && (srcv->sinfo_flags & SCTP_EOR))) 9157 ) { 9158 sp->msg_is_complete = 1; 9159 } else { 9160 sp->msg_is_complete = 0; 9161 } 9162 sp->some_taken = 0; 9163 resv_in_first = sizeof(struct sctp_data_chunk); 9164 sp->data = sp->tail_mbuf = NULL; 9165 *errno = sctp_copy_one(sp, uio, resv_in_first); 9166 if (*errno) { 9167 sctp_free_a_strmoq(stcb, sp); 9168 sp->data = NULL; 9169 sp->net = NULL; 9170 sp = NULL; 9171 } else { 9172 if (sp->sinfo_flags & SCTP_ADDR_OVER) { 9173 sp->net = net; 9174 sp->addr_over = 1; 9175 } else { 9176 sp->net = asoc->primary_destination; 9177 sp->addr_over = 0; 9178 } 9179 atomic_add_int(&sp->net->ref_count, 1); 9180 sctp_set_prsctp_policy(stcb, sp); 9181 } 9182 out_now: 9183 return (sp); 9184 } 9185 9186 9187 int 9188 sctp_sosend(struct socket *so, 9189 struct sockaddr *addr, 9190 struct uio *uio, 9191 struct mbuf *top, 9192 struct mbuf *control, 9193 int flags 9194 , 9195 struct thread *p 9196 ) 9197 { 9198 struct sctp_inpcb *inp; 9199 int error, use_rcvinfo = 0; 9200 struct sctp_sndrcvinfo srcv; 9201 9202 inp = (struct sctp_inpcb *)so->so_pcb; 9203 if (control) { 9204 /* process cmsg snd/rcv info (maybe a assoc-id) */ 9205 if (sctp_find_cmsg(SCTP_SNDRCV, (void *)&srcv, control, 9206 sizeof(srcv))) { 9207 /* got one */ 9208 use_rcvinfo = 1; 9209 } 9210 } 9211 error = sctp_lower_sosend(so, addr, uio, top, control, flags, 9212 use_rcvinfo, &srcv, p); 9213 return (error); 9214 } 9215 9216 9217 extern unsigned int sctp_add_more_threshold; 9218 int 9219 sctp_lower_sosend(struct socket *so, 9220 struct sockaddr *addr, 9221 struct uio *uio, 9222 struct mbuf *i_pak, 9223 struct mbuf *control, 9224 int flags, 9225 int use_rcvinfo, 9226 struct sctp_sndrcvinfo *srcv, 9227 struct thread *p 9228 ) 9229 { 9230 unsigned int sndlen, max_len; 9231 int error, len; 9232 struct mbuf *top = NULL; 9233 9234 #if defined(__NetBSD__) || defined(__OpenBSD_) 9235 int s; 9236 9237 #endif 9238 int queue_only = 0, queue_only_for_init = 0; 9239 int free_cnt_applied = 0; 9240 int un_sent = 0; 9241 int now_filled = 0; 9242 struct sctp_block_entry be; 9243 struct sctp_inpcb *inp; 9244 struct sctp_tcb *stcb = NULL; 9245 struct timeval now; 9246 struct sctp_nets *net; 9247 struct sctp_association *asoc; 9248 struct sctp_inpcb *t_inp; 9249 int create_lock_applied = 0; 9250 int nagle_applies = 0; 9251 int some_on_control = 0; 9252 int got_all_of_the_send = 0; 9253 int hold_tcblock = 0; 9254 int non_blocking = 0; 9255 9256 error = 0; 9257 net = NULL; 9258 stcb = NULL; 9259 asoc = NULL; 9260 t_inp = inp = (struct sctp_inpcb *)so->so_pcb; 9261 if (inp == NULL) { 9262 error = EFAULT; 9263 goto out_unlocked; 9264 } 9265 atomic_add_int(&inp->total_sends, 1); 9266 if (uio) 9267 sndlen = uio->uio_resid; 9268 else { 9269 sndlen = SCTP_HEADER_LEN(i_pak); 9270 top = SCTP_HEADER_TO_CHAIN(i_pak); 9271 } 9272 9273 hold_tcblock = 0; 9274 9275 if ((inp->sctp_flags & SCTP_PCB_FLAGS_TCPTYPE) && 9276 (inp->sctp_socket->so_qlimit)) { 9277 /* The listener can NOT send */ 9278 error = EFAULT; 9279 goto out_unlocked; 9280 } 9281 if ((use_rcvinfo) && srcv) { 9282 if (INVALID_SINFO_FLAG(srcv->sinfo_flags) || PR_SCTP_INVALID_POLICY(srcv->sinfo_flags)) { 9283 error = EINVAL; 9284 goto out_unlocked; 9285 } 9286 if (srcv->sinfo_flags) 9287 SCTP_STAT_INCR(sctps_sends_with_flags); 9288 9289 if (srcv->sinfo_flags & SCTP_SENDALL) { 9290 /* its a sendall */ 9291 error = sctp_sendall(inp, uio, top, srcv); 9292 top = NULL; 9293 goto out_unlocked; 9294 } 9295 } 9296 /* now we must find the assoc */ 9297 if (inp->sctp_flags & SCTP_PCB_FLAGS_CONNECTED) { 9298 SCTP_INP_RLOCK(inp); 9299 stcb = LIST_FIRST(&inp->sctp_asoc_list); 9300 if (stcb == NULL) { 9301 SCTP_INP_RUNLOCK(inp); 9302 error = ENOTCONN; 9303 goto out_unlocked; 9304 } 9305 hold_tcblock = 0; 9306 SCTP_INP_RUNLOCK(inp); 9307 if (addr) 9308 /* Must locate the net structure if addr given */ 9309 net = sctp_findnet(stcb, addr); 9310 else 9311 net = stcb->asoc.primary_destination; 9312 9313 } else if (use_rcvinfo && srcv && srcv->sinfo_assoc_id) { 9314 stcb = sctp_findassociation_ep_asocid(inp, srcv->sinfo_assoc_id, 0); 9315 if (stcb) { 9316 if (addr) 9317 /* 9318 * Must locate the net structure if addr 9319 * given 9320 */ 9321 net = sctp_findnet(stcb, addr); 9322 else 9323 net = stcb->asoc.primary_destination; 9324 } 9325 hold_tcblock = 0; 9326 } else if (addr) { 9327 /* 9328 * Since we did not use findep we must increment it, and if 9329 * we don't find a tcb decrement it. 9330 */ 9331 SCTP_INP_WLOCK(inp); 9332 SCTP_INP_INCR_REF(inp); 9333 SCTP_INP_WUNLOCK(inp); 9334 stcb = sctp_findassociation_ep_addr(&t_inp, addr, &net, NULL, NULL); 9335 if (stcb == NULL) { 9336 SCTP_INP_WLOCK(inp); 9337 SCTP_INP_DECR_REF(inp); 9338 SCTP_INP_WUNLOCK(inp); 9339 } else { 9340 hold_tcblock = 1; 9341 } 9342 } 9343 if ((stcb == NULL) && (addr)) { 9344 /* Possible implicit send? */ 9345 SCTP_ASOC_CREATE_LOCK(inp); 9346 create_lock_applied = 1; 9347 if ((inp->sctp_flags & SCTP_PCB_FLAGS_SOCKET_GONE) || 9348 (inp->sctp_flags & SCTP_PCB_FLAGS_SOCKET_ALLGONE)) { 9349 /* Should I really unlock ? */ 9350 error = EFAULT; 9351 goto out_unlocked; 9352 9353 } 9354 if (((inp->sctp_flags & SCTP_PCB_FLAGS_BOUND_V6) == 0) && 9355 (addr->sa_family == AF_INET6)) { 9356 error = EINVAL; 9357 goto out_unlocked; 9358 } 9359 SCTP_INP_WLOCK(inp); 9360 SCTP_INP_INCR_REF(inp); 9361 SCTP_INP_WUNLOCK(inp); 9362 /* With the lock applied look again */ 9363 stcb = sctp_findassociation_ep_addr(&t_inp, addr, &net, NULL, NULL); 9364 if (stcb == NULL) { 9365 SCTP_INP_WLOCK(inp); 9366 SCTP_INP_DECR_REF(inp); 9367 SCTP_INP_WUNLOCK(inp); 9368 } else { 9369 hold_tcblock = 1; 9370 } 9371 } 9372 if (stcb == NULL) { 9373 if (inp->sctp_flags & SCTP_PCB_FLAGS_TCPTYPE) { 9374 error = ENOTCONN; 9375 goto out_unlocked; 9376 } else if (addr == NULL) { 9377 error = ENOENT; 9378 goto out_unlocked; 9379 } else { 9380 /* 9381 * UDP style, we must go ahead and start the INIT 9382 * process 9383 */ 9384 if ((use_rcvinfo) && (srcv) && 9385 ((srcv->sinfo_flags & SCTP_ABORT) || 9386 ((srcv->sinfo_flags & SCTP_EOF) && 9387 (uio->uio_resid == 0)))) { 9388 /* 9389 * User asks to abort a non-existant assoc, 9390 * or EOF a non-existant assoc with no data 9391 */ 9392 error = ENOENT; 9393 goto out_unlocked; 9394 } 9395 /* get an asoc/stcb struct */ 9396 stcb = sctp_aloc_assoc(inp, addr, 1, &error, 0); 9397 if (stcb == NULL) { 9398 /* Error is setup for us in the call */ 9399 goto out_unlocked; 9400 } 9401 if (create_lock_applied) { 9402 SCTP_ASOC_CREATE_UNLOCK(inp); 9403 create_lock_applied = 0; 9404 } else { 9405 printf("Huh-3? create lock should have been on??\n"); 9406 } 9407 /* 9408 * Turn on queue only flag to prevent data from 9409 * being sent 9410 */ 9411 queue_only = 1; 9412 asoc = &stcb->asoc; 9413 asoc->state = SCTP_STATE_COOKIE_WAIT; 9414 SCTP_GETTIME_TIMEVAL(&asoc->time_entered); 9415 9416 /* initialize authentication params for the assoc */ 9417 sctp_initialize_auth_params(inp, stcb); 9418 9419 if (control) { 9420 /* 9421 * see if a init structure exists in cmsg 9422 * headers 9423 */ 9424 struct sctp_initmsg initm; 9425 int i; 9426 9427 if (sctp_find_cmsg(SCTP_INIT, (void *)&initm, control, 9428 sizeof(initm))) { 9429 /* 9430 * we have an INIT override of the 9431 * default 9432 */ 9433 if (initm.sinit_max_attempts) 9434 asoc->max_init_times = initm.sinit_max_attempts; 9435 if (initm.sinit_num_ostreams) 9436 asoc->pre_open_streams = initm.sinit_num_ostreams; 9437 if (initm.sinit_max_instreams) 9438 asoc->max_inbound_streams = initm.sinit_max_instreams; 9439 if (initm.sinit_max_init_timeo) 9440 asoc->initial_init_rto_max = initm.sinit_max_init_timeo; 9441 if (asoc->streamoutcnt < asoc->pre_open_streams) { 9442 /* Default is NOT correct */ 9443 #ifdef SCTP_DEBUG 9444 if (sctp_debug_on & SCTP_DEBUG_OUTPUT1) { 9445 printf("Ok, defout:%d pre_open:%d\n", 9446 asoc->streamoutcnt, asoc->pre_open_streams); 9447 } 9448 #endif 9449 SCTP_FREE(asoc->strmout); 9450 asoc->strmout = NULL; 9451 asoc->streamoutcnt = asoc->pre_open_streams; 9452 /* 9453 * What happens if this 9454 * fails? .. we panic ... 9455 */ 9456 { 9457 struct sctp_stream_out *tmp_str; 9458 int had_lock = 0; 9459 9460 if (hold_tcblock) { 9461 had_lock = 1; 9462 SCTP_TCB_UNLOCK(stcb); 9463 } 9464 SCTP_MALLOC(tmp_str, 9465 struct sctp_stream_out *, 9466 asoc->streamoutcnt * 9467 sizeof(struct sctp_stream_out), 9468 "StreamsOut"); 9469 if (had_lock) { 9470 SCTP_TCB_LOCK(stcb); 9471 } 9472 if (asoc->strmout == NULL) { 9473 asoc->strmout = tmp_str; 9474 } else { 9475 SCTP_FREE(asoc->strmout); 9476 asoc->strmout = tmp_str; 9477 } 9478 } 9479 for (i = 0; i < asoc->streamoutcnt; i++) { 9480 /* 9481 * inbound side must 9482 * be set to 0xffff, 9483 * also NOTE when we 9484 * get the INIT-ACK 9485 * back (for INIT 9486 * sender) we MUST 9487 * reduce the count 9488 * (streamoutcnt) 9489 * but first check 9490 * if we sent to any 9491 * of the upper 9492 * streams that were 9493 * dropped (if some 9494 * were). Those that 9495 * were dropped must 9496 * be notified to 9497 * the upper layer 9498 * as failed to 9499 * send. 9500 */ 9501 asoc->strmout[i].next_sequence_sent = 0x0; 9502 TAILQ_INIT(&asoc->strmout[i].outqueue); 9503 asoc->strmout[i].stream_no = i; 9504 asoc->strmout[i].last_msg_incomplete = 0; 9505 asoc->strmout[i].next_spoke.tqe_next = 0; 9506 asoc->strmout[i].next_spoke.tqe_prev = 0; 9507 } 9508 } 9509 } 9510 } 9511 hold_tcblock = 1; 9512 /* out with the INIT */ 9513 queue_only_for_init = 1; 9514 /* 9515 * we may want to dig in after this call and adjust 9516 * the MTU value. It defaulted to 1500 (constant) 9517 * but the ro structure may now have an update and 9518 * thus we may need to change it BEFORE we append 9519 * the message. 9520 */ 9521 net = stcb->asoc.primary_destination; 9522 asoc = &stcb->asoc; 9523 } 9524 } 9525 if (((so->so_state & SS_NBIO) 9526 || (flags & MSG_NBIO) 9527 )) { 9528 non_blocking = 1; 9529 } 9530 asoc = &stcb->asoc; 9531 /* would we block? */ 9532 if (non_blocking) { 9533 if ((so->so_snd.sb_hiwat < 9534 (sndlen + stcb->asoc.total_output_queue_size)) || 9535 (stcb->asoc.chunks_on_out_queue > 9536 sctp_max_chunks_on_queue)) { 9537 error = EWOULDBLOCK; 9538 atomic_add_int(&stcb->sctp_ep->total_nospaces, 1); 9539 goto out_unlocked; 9540 } 9541 } 9542 /* Keep the stcb from being freed under our feet */ 9543 atomic_add_int(&stcb->asoc.refcnt, 1); 9544 free_cnt_applied = 1; 9545 9546 if (stcb->asoc.state & SCTP_STATE_ABOUT_TO_BE_FREED) { 9547 error = ECONNRESET; 9548 goto out_unlocked; 9549 } 9550 if (create_lock_applied) { 9551 SCTP_ASOC_CREATE_UNLOCK(inp); 9552 create_lock_applied = 0; 9553 } 9554 if (asoc->stream_reset_outstanding) { 9555 /* 9556 * Can't queue any data while stream reset is underway. 9557 */ 9558 error = EAGAIN; 9559 goto out_unlocked; 9560 } 9561 if ((SCTP_GET_STATE(asoc) == SCTP_STATE_COOKIE_WAIT) || 9562 (SCTP_GET_STATE(asoc) == SCTP_STATE_COOKIE_ECHOED)) { 9563 queue_only = 1; 9564 } 9565 if ((use_rcvinfo == 0) || (srcv == NULL)) { 9566 /* Grab the default stuff from the asoc */ 9567 srcv = &stcb->asoc.def_send; 9568 } 9569 /* we are now done with all control */ 9570 if (control) { 9571 sctp_m_freem(control); 9572 control = NULL; 9573 } 9574 if ((SCTP_GET_STATE(asoc) == SCTP_STATE_SHUTDOWN_SENT) || 9575 (SCTP_GET_STATE(asoc) == SCTP_STATE_SHUTDOWN_RECEIVED) || 9576 (SCTP_GET_STATE(asoc) == SCTP_STATE_SHUTDOWN_ACK_SENT) || 9577 (asoc->state & SCTP_STATE_SHUTDOWN_PENDING)) { 9578 if ((use_rcvinfo) && 9579 (srcv->sinfo_flags & SCTP_ABORT)) { 9580 ; 9581 } else { 9582 error = ECONNRESET; 9583 goto out_unlocked; 9584 } 9585 } 9586 /* Ok, we will attempt a msgsnd :> */ 9587 if (p) { 9588 p->td_proc->p_stats->p_ru.ru_msgsnd++; 9589 } 9590 if (stcb) { 9591 if (net && ((srcv->sinfo_flags & SCTP_ADDR_OVER))) { 9592 /* we take the override or the unconfirmed */ 9593 ; 9594 } else { 9595 net = stcb->asoc.primary_destination; 9596 } 9597 } 9598 if ((net->flight_size > net->cwnd) && (sctp_cmt_on_off == 0)) { 9599 /* 9600 * CMT: Added check for CMT above. net above is the primary 9601 * dest. If CMT is ON, sender should always attempt to send 9602 * with the output routine sctp_fill_outqueue() that loops 9603 * through all destination addresses. Therefore, if CMT is 9604 * ON, queue_only is NOT set to 1 here, so that 9605 * sctp_chunk_output() can be called below. 9606 */ 9607 queue_only = 1; 9608 9609 } else if (asoc->ifp_had_enobuf) { 9610 SCTP_STAT_INCR(sctps_ifnomemqueued); 9611 if (net->flight_size > (net->mtu * 2)) 9612 queue_only = 1; 9613 asoc->ifp_had_enobuf = 0; 9614 } else { 9615 un_sent = ((stcb->asoc.total_output_queue_size - stcb->asoc.total_flight) + 9616 ((stcb->asoc.chunks_on_out_queue - stcb->asoc.total_flight_count) * sizeof(struct sctp_data_chunk))); 9617 } 9618 /* Are we aborting? */ 9619 if (srcv->sinfo_flags & SCTP_ABORT) { 9620 struct mbuf *mm; 9621 int tot_demand, tot_out, max; 9622 9623 SCTP_STAT_INCR(sctps_sends_with_abort); 9624 if ((SCTP_GET_STATE(asoc) == SCTP_STATE_COOKIE_WAIT) || 9625 (SCTP_GET_STATE(asoc) == SCTP_STATE_COOKIE_ECHOED)) { 9626 /* It has to be up before we abort */ 9627 /* how big is the user initiated abort? */ 9628 error = EINVAL; 9629 goto out; 9630 } 9631 if (hold_tcblock) { 9632 SCTP_TCB_UNLOCK(stcb); 9633 hold_tcblock = 0; 9634 } 9635 if (top) { 9636 struct mbuf *cntm; 9637 9638 mm = sctp_get_mbuf_for_msg(1, 0, M_WAIT, 1, MT_DATA); 9639 9640 tot_out = 0; 9641 cntm = top; 9642 while (cntm) { 9643 tot_out += SCTP_BUF_LEN(cntm); 9644 cntm = SCTP_BUF_NEXT(cntm); 9645 } 9646 tot_demand = (tot_out + sizeof(struct sctp_paramhdr)); 9647 } else { 9648 /* Must fit in a MTU */ 9649 tot_out = uio->uio_resid; 9650 tot_demand = (tot_out + sizeof(struct sctp_paramhdr)); 9651 mm = sctp_get_mbuf_for_msg(tot_demand, 0, M_WAIT, 1, MT_DATA); 9652 } 9653 if (mm == NULL) { 9654 error = ENOMEM; 9655 goto out; 9656 } 9657 max = asoc->smallest_mtu - sizeof(struct sctp_paramhdr); 9658 max -= sizeof(struct sctp_abort_msg); 9659 if (tot_out > max) { 9660 tot_out = max; 9661 } 9662 if (mm) { 9663 struct sctp_paramhdr *ph; 9664 9665 /* now move forward the data pointer */ 9666 ph = mtod(mm, struct sctp_paramhdr *); 9667 ph->param_type = htons(SCTP_CAUSE_USER_INITIATED_ABT); 9668 ph->param_length = htons((sizeof(struct sctp_paramhdr) + tot_out)); 9669 ph++; 9670 SCTP_BUF_LEN(mm) = tot_out + sizeof(struct sctp_paramhdr); 9671 if (top == NULL) { 9672 error = uiomove((caddr_t)ph, (int)tot_out, uio); 9673 if (error) { 9674 /* 9675 * Here if we can't get his data we 9676 * still abort we just don't get to 9677 * send the users note :-0 9678 */ 9679 sctp_m_freem(mm); 9680 mm = NULL; 9681 } 9682 } else { 9683 SCTP_BUF_NEXT(mm) = top; 9684 } 9685 } 9686 if (hold_tcblock == 0) { 9687 SCTP_TCB_LOCK(stcb); 9688 hold_tcblock = 1; 9689 } 9690 atomic_add_int(&stcb->asoc.refcnt, -1); 9691 free_cnt_applied = 0; 9692 /* release this lock, otherwise we hang on ourselves */ 9693 sctp_abort_an_association(stcb->sctp_ep, stcb, 9694 SCTP_RESPONSE_TO_USER_REQ, 9695 mm); 9696 /* now relock the stcb so everything is sane */ 9697 hold_tcblock = 0; 9698 stcb = NULL; 9699 goto out_unlocked; 9700 } 9701 /* Calculate the maximum we can send */ 9702 if (so->so_snd.sb_hiwat > stcb->asoc.total_output_queue_size) { 9703 max_len = so->so_snd.sb_hiwat - stcb->asoc.total_output_queue_size; 9704 } else { 9705 max_len = 0; 9706 } 9707 if (hold_tcblock) { 9708 SCTP_TCB_UNLOCK(stcb); 9709 hold_tcblock = 0; 9710 } 9711 /* Is the stream no. valid? */ 9712 if (srcv->sinfo_stream >= asoc->streamoutcnt) { 9713 /* Invalid stream number */ 9714 error = EINVAL; 9715 goto out_unlocked; 9716 } 9717 if (asoc->strmout == NULL) { 9718 /* huh? software error */ 9719 error = EFAULT; 9720 goto out_unlocked; 9721 } 9722 len = 0; 9723 if (max_len < sctp_add_more_threshold) { 9724 /* No room right no ! */ 9725 SOCKBUF_LOCK(&so->so_snd); 9726 while (so->so_snd.sb_hiwat < (stcb->asoc.total_output_queue_size + sctp_add_more_threshold)) { 9727 #ifdef SCTP_BLK_LOGGING 9728 sctp_log_block(SCTP_BLOCK_LOG_INTO_BLKA, 9729 so, asoc, uio->uio_resid); 9730 #endif 9731 be.error = 0; 9732 stcb->block_entry = &be; 9733 error = sbwait(&so->so_snd); 9734 stcb->block_entry = NULL; 9735 if (error || so->so_error || be.error) { 9736 if (error == 0) { 9737 if (so->so_error) 9738 error = so->so_error; 9739 if (be.error) { 9740 error = be.error; 9741 } 9742 } 9743 SOCKBUF_UNLOCK(&so->so_snd); 9744 goto out_unlocked; 9745 } 9746 #ifdef SCTP_BLK_LOGGING 9747 sctp_log_block(SCTP_BLOCK_LOG_OUTOF_BLK, 9748 so, asoc, stcb->asoc.total_output_queue_size); 9749 #endif 9750 if (stcb->asoc.state & SCTP_STATE_ABOUT_TO_BE_FREED) { 9751 goto out_unlocked; 9752 } 9753 } 9754 if (so->so_snd.sb_hiwat > stcb->asoc.total_output_queue_size) { 9755 max_len = so->so_snd.sb_hiwat - stcb->asoc.total_output_queue_size; 9756 } else { 9757 max_len = 0; 9758 } 9759 SOCKBUF_UNLOCK(&so->so_snd); 9760 } 9761 if (stcb->asoc.state & SCTP_STATE_ABOUT_TO_BE_FREED) { 9762 goto out_unlocked; 9763 } 9764 atomic_add_int(&stcb->total_sends, 1); 9765 if (top == NULL) { 9766 struct sctp_stream_queue_pending *sp; 9767 struct sctp_stream_out *strm; 9768 uint32_t sndout, initial_out; 9769 int user_marks_eor; 9770 9771 if (uio->uio_resid == 0) { 9772 if (srcv->sinfo_flags & SCTP_EOF) { 9773 got_all_of_the_send = 1; 9774 goto dataless_eof; 9775 } else { 9776 error = EINVAL; 9777 goto out; 9778 } 9779 } 9780 initial_out = uio->uio_resid; 9781 9782 if ((asoc->stream_locked) && 9783 (asoc->stream_locked_on != srcv->sinfo_stream)) { 9784 error = EAGAIN; 9785 goto out; 9786 } 9787 strm = &stcb->asoc.strmout[srcv->sinfo_stream]; 9788 user_marks_eor = sctp_is_feature_on(inp, SCTP_PCB_FLAGS_EXPLICIT_EOR); 9789 if (strm->last_msg_incomplete == 0) { 9790 do_a_copy_in: 9791 sp = sctp_copy_it_in(stcb, asoc, srcv, uio, net, max_len, user_marks_eor, &error, non_blocking); 9792 if ((sp == NULL) || (error)) { 9793 goto out; 9794 } 9795 SCTP_TCB_SEND_LOCK(stcb); 9796 if (sp->msg_is_complete) { 9797 strm->last_msg_incomplete = 0; 9798 asoc->stream_locked = 0; 9799 } else { 9800 /* 9801 * Just got locked to this guy in case of an 9802 * interupt. 9803 */ 9804 strm->last_msg_incomplete = 1; 9805 asoc->stream_locked = 1; 9806 asoc->stream_locked_on = srcv->sinfo_stream; 9807 } 9808 sctp_snd_sb_alloc(stcb, sp->length); 9809 9810 asoc->stream_queue_cnt++; 9811 TAILQ_INSERT_TAIL(&strm->outqueue, sp, next); 9812 if ((srcv->sinfo_flags & SCTP_UNORDERED) == 0) { 9813 sp->strseq = strm->next_sequence_sent; 9814 #ifdef SCTP_LOG_SENDING_STR 9815 sctp_misc_ints(SCTP_STRMOUT_LOG_ASSIGN, 9816 (uintptr_t) stcb, (uintptr_t) sp, 9817 (uint32_t) ((srcv->sinfo_stream << 16) | sp->strseq), 0); 9818 #endif 9819 strm->next_sequence_sent++; 9820 } else { 9821 SCTP_STAT_INCR(sctps_sends_with_unord); 9822 } 9823 9824 if ((strm->next_spoke.tqe_next == NULL) && 9825 (strm->next_spoke.tqe_prev == NULL)) { 9826 /* Not on wheel, insert */ 9827 sctp_insert_on_wheel(stcb, asoc, strm, 1); 9828 } 9829 SCTP_TCB_SEND_UNLOCK(stcb); 9830 } else { 9831 sp = TAILQ_LAST(&strm->outqueue, sctp_streamhead); 9832 if (sp == NULL) { 9833 /* ???? Huh ??? last msg is gone */ 9834 #ifdef INVARIANTS 9835 panic("Warning: Last msg marked incomplete, yet nothing left?"); 9836 #else 9837 printf("Warning: Last msg marked incomplete, yet nothing left?\n"); 9838 strm->last_msg_incomplete = 0; 9839 #endif 9840 goto do_a_copy_in; 9841 9842 } 9843 } 9844 while (uio->uio_resid > 0) { 9845 /* How much room do we have? */ 9846 struct mbuf *new_tail, *mm; 9847 9848 if (so->so_snd.sb_hiwat > stcb->asoc.total_output_queue_size) 9849 max_len = so->so_snd.sb_hiwat - stcb->asoc.total_output_queue_size; 9850 else 9851 max_len = 0; 9852 9853 if ((max_len > sctp_add_more_threshold) || 9854 (uio->uio_resid && (uio->uio_resid < max_len))) { 9855 sndout = 0; 9856 new_tail = NULL; 9857 if (hold_tcblock) { 9858 SCTP_TCB_UNLOCK(stcb); 9859 hold_tcblock = 0; 9860 } 9861 mm = sctp_copy_resume(sp, uio, srcv, max_len, user_marks_eor, &error, &sndout, &new_tail); 9862 if ((mm == NULL) || error) { 9863 if (mm) { 9864 sctp_m_freem(mm); 9865 } 9866 goto out; 9867 } 9868 /* Update the mbuf and count */ 9869 SCTP_TCB_SEND_LOCK(stcb); 9870 if (stcb->asoc.state & SCTP_STATE_ABOUT_TO_BE_FREED) { 9871 /* 9872 * we need to get out. Peer probably 9873 * aborted. 9874 */ 9875 sctp_m_freem(mm); 9876 if (stcb->asoc.state & SCTP_PCB_FLAGS_WAS_ABORTED) 9877 error = ECONNRESET; 9878 goto out; 9879 } 9880 if (sp->tail_mbuf) { 9881 /* tack it to the end */ 9882 SCTP_BUF_NEXT(sp->tail_mbuf) = mm; 9883 sp->tail_mbuf = new_tail; 9884 } else { 9885 /* A stolen mbuf */ 9886 sp->data = mm; 9887 sp->tail_mbuf = new_tail; 9888 } 9889 sctp_snd_sb_alloc(stcb, sndout); 9890 sp->length += sndout; 9891 len += sndout; 9892 /* Did we reach EOR? */ 9893 if ((uio->uio_resid == 0) && 9894 ((user_marks_eor == 0) || 9895 (user_marks_eor && (srcv->sinfo_flags & SCTP_EOR))) 9896 ) { 9897 sp->msg_is_complete = 1; 9898 } else { 9899 sp->msg_is_complete = 0; 9900 } 9901 SCTP_TCB_SEND_UNLOCK(stcb); 9902 } 9903 if (uio->uio_resid == 0) { 9904 /* got it all? */ 9905 continue; 9906 } 9907 /* PR-SCTP? */ 9908 if ((asoc->peer_supports_prsctp) && (asoc->sent_queue_cnt_removeable > 0)) { 9909 /* 9910 * This is ugly but we must assure locking 9911 * order 9912 */ 9913 if (hold_tcblock == 0) { 9914 SCTP_TCB_LOCK(stcb); 9915 hold_tcblock = 1; 9916 } 9917 sctp_prune_prsctp(stcb, asoc, srcv, sndlen); 9918 if (so->so_snd.sb_hiwat > stcb->asoc.total_output_queue_size) 9919 max_len = so->so_snd.sb_hiwat - stcb->asoc.total_output_queue_size; 9920 else 9921 max_len = 0; 9922 if (max_len > 0) { 9923 continue; 9924 } 9925 SCTP_TCB_UNLOCK(stcb); 9926 hold_tcblock = 0; 9927 } 9928 /* wait for space now */ 9929 if (non_blocking) { 9930 /* Non-blocking io in place out */ 9931 goto skip_out_eof; 9932 } 9933 if ((net->flight_size > net->cwnd) && 9934 (sctp_cmt_on_off == 0)) { 9935 queue_only = 1; 9936 9937 } else if (asoc->ifp_had_enobuf) { 9938 SCTP_STAT_INCR(sctps_ifnomemqueued); 9939 if (net->flight_size > (net->mtu * 2)) { 9940 queue_only = 1; 9941 } else { 9942 queue_only = 0; 9943 } 9944 asoc->ifp_had_enobuf = 0; 9945 un_sent = ((stcb->asoc.total_output_queue_size - stcb->asoc.total_flight) + 9946 ((stcb->asoc.chunks_on_out_queue - stcb->asoc.total_flight_count) * 9947 sizeof(struct sctp_data_chunk))); 9948 } else { 9949 un_sent = ((stcb->asoc.total_output_queue_size - stcb->asoc.total_flight) + 9950 ((stcb->asoc.chunks_on_out_queue - stcb->asoc.total_flight_count) * 9951 sizeof(struct sctp_data_chunk))); 9952 queue_only = 0; 9953 } 9954 if ((sctp_is_feature_off(inp, SCTP_PCB_FLAGS_NODELAY)) && 9955 (stcb->asoc.total_flight > 0) && 9956 (un_sent < (int)(stcb->asoc.smallest_mtu - SCTP_MIN_OVERHEAD)) 9957 ) { 9958 9959 /* 9960 * Ok, Nagle is set on and we have data 9961 * outstanding. Don't send anything and let 9962 * SACKs drive out the data unless wen have 9963 * a "full" segment to send. 9964 */ 9965 #ifdef SCTP_NAGLE_LOGGING 9966 sctp_log_nagle_event(stcb, SCTP_NAGLE_APPLIED); 9967 #endif 9968 SCTP_STAT_INCR(sctps_naglequeued); 9969 nagle_applies = 1; 9970 } else { 9971 #ifdef SCTP_NAGLE_LOGGING 9972 if (sctp_is_feature_off(inp, SCTP_PCB_FLAGS_NODELAY)) 9973 sctp_log_nagle_event(stcb, SCTP_NAGLE_SKIPPED); 9974 #endif 9975 SCTP_STAT_INCR(sctps_naglesent); 9976 nagle_applies = 0; 9977 } 9978 /* What about the INIT, send it maybe */ 9979 #ifdef SCTP_BLK_LOGGING 9980 sctp_misc_ints(SCTP_CWNDLOG_PRESEND, queue_only_for_init, queue_only, nagle_applies, un_sent); 9981 sctp_misc_ints(SCTP_CWNDLOG_PRESEND, stcb->asoc.total_output_queue_size, stcb->asoc.total_flight, 9982 stcb->asoc.chunks_on_out_queue, stcb->asoc.total_flight_count); 9983 #endif 9984 if (queue_only_for_init) { 9985 if (hold_tcblock == 0) { 9986 SCTP_TCB_LOCK(stcb); 9987 hold_tcblock = 1; 9988 } 9989 if (SCTP_GET_STATE(&stcb->asoc) == SCTP_STATE_OPEN) { 9990 /* a collision took us forward? */ 9991 queue_only_for_init = 0; 9992 queue_only = 0; 9993 } else { 9994 sctp_send_initiate(inp, stcb); 9995 stcb->asoc.state = SCTP_STATE_COOKIE_WAIT; 9996 queue_only_for_init = 0; 9997 queue_only = 1; 9998 } 9999 } 10000 if ((queue_only == 0) && (nagle_applies == 0) 10001 ) { 10002 /* 10003 * need to start chunk output before 10004 * blocking.. note that if a lock is already 10005 * applied, then the input via the net is 10006 * happening and I don't need to start 10007 * output :-D 10008 */ 10009 if (hold_tcblock == 0) { 10010 if (SCTP_TCB_TRYLOCK(stcb)) { 10011 hold_tcblock = 1; 10012 sctp_chunk_output(inp, 10013 stcb, 10014 SCTP_OUTPUT_FROM_USR_SEND); 10015 10016 } 10017 } else { 10018 sctp_chunk_output(inp, 10019 stcb, 10020 SCTP_OUTPUT_FROM_USR_SEND); 10021 } 10022 if (hold_tcblock == 1) { 10023 SCTP_TCB_UNLOCK(stcb); 10024 hold_tcblock = 0; 10025 } 10026 } 10027 SOCKBUF_LOCK(&so->so_snd); 10028 /* 10029 * This is a bit strange, but I think it will work. 10030 * The total_output_queue_size is locked and 10031 * protected by the TCB_LOCK, which we just 10032 * released. There is a race that can occur between 10033 * releasing it above, and me getting the socket 10034 * lock, where sacks come in but we have not put the 10035 * SB_WAIT on the so_snd buffer to get the wakeup. 10036 * After the LOCK is applied the sack_processing 10037 * will also need to LOCK the so->so_snd to do the 10038 * actual sowwakeup(). So once we have the socket 10039 * buffer lock if we recheck the size we KNOW we 10040 * will get to sleep safely with the wakeup flag in 10041 * place. 10042 */ 10043 if (so->so_snd.sb_hiwat < (stcb->asoc.total_output_queue_size + sctp_add_more_threshold)) { 10044 #ifdef SCTP_BLK_LOGGING 10045 sctp_log_block(SCTP_BLOCK_LOG_INTO_BLK, 10046 so, asoc, uio->uio_resid); 10047 #endif 10048 be.error = 0; 10049 stcb->block_entry = &be; 10050 error = sbwait(&so->so_snd); 10051 stcb->block_entry = NULL; 10052 10053 if (error || so->so_error || be.error) { 10054 if (error == 0) { 10055 if (so->so_error) 10056 error = so->so_error; 10057 if (be.error) { 10058 error = be.error; 10059 } 10060 } 10061 SOCKBUF_UNLOCK(&so->so_snd); 10062 goto out_unlocked; 10063 } 10064 #ifdef SCTP_BLK_LOGGING 10065 sctp_log_block(SCTP_BLOCK_LOG_OUTOF_BLK, 10066 so, asoc, stcb->asoc.total_output_queue_size); 10067 #endif 10068 } 10069 SOCKBUF_UNLOCK(&so->so_snd); 10070 if (stcb->asoc.state & SCTP_STATE_ABOUT_TO_BE_FREED) { 10071 goto out_unlocked; 10072 } 10073 } 10074 SCTP_TCB_SEND_LOCK(stcb); 10075 if (sp->msg_is_complete == 0) { 10076 strm->last_msg_incomplete = 1; 10077 asoc->stream_locked = 1; 10078 asoc->stream_locked_on = srcv->sinfo_stream; 10079 } else { 10080 strm->last_msg_incomplete = 0; 10081 asoc->stream_locked = 0; 10082 } 10083 SCTP_TCB_SEND_UNLOCK(stcb); 10084 if (uio->uio_resid == 0) { 10085 got_all_of_the_send = 1; 10086 } 10087 } else if (top) { 10088 /* We send in a 0, since we do NOT have any locks */ 10089 error = sctp_msg_append(stcb, net, top, srcv, 0); 10090 top = NULL; 10091 } 10092 if (error) { 10093 goto out; 10094 } 10095 dataless_eof: 10096 /* EOF thing ? */ 10097 if ((srcv->sinfo_flags & SCTP_EOF) && 10098 (got_all_of_the_send == 1) && 10099 (stcb->sctp_ep->sctp_flags & SCTP_PCB_FLAGS_UDPTYPE) 10100 ) { 10101 SCTP_STAT_INCR(sctps_sends_with_eof); 10102 error = 0; 10103 if (hold_tcblock == 0) { 10104 SCTP_TCB_LOCK(stcb); 10105 hold_tcblock = 1; 10106 } 10107 if (TAILQ_EMPTY(&asoc->send_queue) && 10108 TAILQ_EMPTY(&asoc->sent_queue) && 10109 (asoc->stream_queue_cnt == 0)) { 10110 if (asoc->locked_on_sending) { 10111 goto abort_anyway; 10112 } 10113 /* there is nothing queued to send, so I'm done... */ 10114 if ((SCTP_GET_STATE(asoc) != SCTP_STATE_SHUTDOWN_SENT) && 10115 (SCTP_GET_STATE(asoc) != SCTP_STATE_SHUTDOWN_RECEIVED) && 10116 (SCTP_GET_STATE(asoc) != SCTP_STATE_SHUTDOWN_ACK_SENT)) { 10117 /* only send SHUTDOWN the first time through */ 10118 sctp_send_shutdown(stcb, stcb->asoc.primary_destination); 10119 if (SCTP_GET_STATE(asoc) == SCTP_STATE_OPEN) { 10120 SCTP_STAT_DECR_GAUGE32(sctps_currestab); 10121 } 10122 asoc->state = SCTP_STATE_SHUTDOWN_SENT; 10123 sctp_timer_start(SCTP_TIMER_TYPE_SHUTDOWN, stcb->sctp_ep, stcb, 10124 asoc->primary_destination); 10125 sctp_timer_start(SCTP_TIMER_TYPE_SHUTDOWNGUARD, stcb->sctp_ep, stcb, 10126 asoc->primary_destination); 10127 } 10128 } else { 10129 /* 10130 * we still got (or just got) data to send, so set 10131 * SHUTDOWN_PENDING 10132 */ 10133 /* 10134 * XXX sockets draft says that SCTP_EOF should be 10135 * sent with no data. currently, we will allow user 10136 * data to be sent first and move to 10137 * SHUTDOWN-PENDING 10138 */ 10139 if ((SCTP_GET_STATE(asoc) != SCTP_STATE_SHUTDOWN_SENT) && 10140 (SCTP_GET_STATE(asoc) != SCTP_STATE_SHUTDOWN_RECEIVED) && 10141 (SCTP_GET_STATE(asoc) != SCTP_STATE_SHUTDOWN_ACK_SENT)) { 10142 if (hold_tcblock == 0) { 10143 SCTP_TCB_LOCK(stcb); 10144 hold_tcblock = 1; 10145 } 10146 if (asoc->locked_on_sending) { 10147 /* Locked to send out the data */ 10148 struct sctp_stream_queue_pending *sp; 10149 10150 sp = TAILQ_LAST(&asoc->locked_on_sending->outqueue, sctp_streamhead); 10151 if (sp) { 10152 if ((sp->length == 0) && (sp->msg_is_complete == 0)) 10153 asoc->state |= SCTP_STATE_PARTIAL_MSG_LEFT; 10154 } 10155 } 10156 asoc->state |= SCTP_STATE_SHUTDOWN_PENDING; 10157 if (TAILQ_EMPTY(&asoc->send_queue) && 10158 TAILQ_EMPTY(&asoc->sent_queue) && 10159 (asoc->state & SCTP_STATE_PARTIAL_MSG_LEFT)) { 10160 abort_anyway: 10161 if (free_cnt_applied) { 10162 atomic_add_int(&stcb->asoc.refcnt, -1); 10163 free_cnt_applied = 0; 10164 } 10165 sctp_abort_an_association(stcb->sctp_ep, stcb, 10166 SCTP_RESPONSE_TO_USER_REQ, 10167 NULL); 10168 /* 10169 * now relock the stcb so everything 10170 * is sane 10171 */ 10172 hold_tcblock = 0; 10173 stcb = NULL; 10174 goto out; 10175 } 10176 sctp_timer_start(SCTP_TIMER_TYPE_SHUTDOWNGUARD, stcb->sctp_ep, stcb, 10177 asoc->primary_destination); 10178 } 10179 } 10180 } 10181 skip_out_eof: 10182 if (!TAILQ_EMPTY(&stcb->asoc.control_send_queue)) { 10183 some_on_control = 1; 10184 } 10185 if ((net->flight_size > net->cwnd) && 10186 (sctp_cmt_on_off == 0)) { 10187 queue_only = 1; 10188 } else if (asoc->ifp_had_enobuf) { 10189 SCTP_STAT_INCR(sctps_ifnomemqueued); 10190 if (net->flight_size > (net->mtu * 2)) { 10191 queue_only = 1; 10192 } else { 10193 queue_only = 0; 10194 } 10195 asoc->ifp_had_enobuf = 0; 10196 un_sent = ((stcb->asoc.total_output_queue_size - stcb->asoc.total_flight) + 10197 ((stcb->asoc.chunks_on_out_queue - stcb->asoc.total_flight_count) * 10198 sizeof(struct sctp_data_chunk))); 10199 } else { 10200 un_sent = ((stcb->asoc.total_output_queue_size - stcb->asoc.total_flight) + 10201 ((stcb->asoc.chunks_on_out_queue - stcb->asoc.total_flight_count) * 10202 sizeof(struct sctp_data_chunk))); 10203 queue_only = 0; 10204 } 10205 if ((sctp_is_feature_off(inp, SCTP_PCB_FLAGS_NODELAY)) && 10206 (stcb->asoc.total_flight > 0) && 10207 (un_sent < (int)(stcb->asoc.smallest_mtu - SCTP_MIN_OVERHEAD)) 10208 ) { 10209 10210 /* 10211 * Ok, Nagle is set on and we have data outstanding. Don't 10212 * send anything and let SACKs drive out the data unless wen 10213 * have a "full" segment to send. 10214 */ 10215 #ifdef SCTP_NAGLE_LOGGING 10216 sctp_log_nagle_event(stcb, SCTP_NAGLE_APPLIED); 10217 #endif 10218 SCTP_STAT_INCR(sctps_naglequeued); 10219 nagle_applies = 1; 10220 } else { 10221 #ifdef SCTP_NAGLE_LOGGING 10222 if (sctp_is_feature_off(inp, SCTP_PCB_FLAGS_NODELAY)) 10223 sctp_log_nagle_event(stcb, SCTP_NAGLE_SKIPPED); 10224 #endif 10225 SCTP_STAT_INCR(sctps_naglesent); 10226 nagle_applies = 0; 10227 } 10228 if (queue_only_for_init) { 10229 if (hold_tcblock == 0) { 10230 SCTP_TCB_LOCK(stcb); 10231 hold_tcblock = 1; 10232 } 10233 if (SCTP_GET_STATE(&stcb->asoc) == SCTP_STATE_OPEN) { 10234 /* a collision took us forward? */ 10235 queue_only_for_init = 0; 10236 queue_only = 0; 10237 } else { 10238 sctp_send_initiate(inp, stcb); 10239 if (stcb->asoc.state & SCTP_STATE_SHUTDOWN_PENDING) 10240 stcb->asoc.state = SCTP_STATE_COOKIE_WAIT | 10241 SCTP_STATE_SHUTDOWN_PENDING; 10242 else 10243 stcb->asoc.state = SCTP_STATE_COOKIE_WAIT; 10244 queue_only_for_init = 0; 10245 queue_only = 1; 10246 } 10247 } 10248 if ((queue_only == 0) && (nagle_applies == 0) && (stcb->asoc.peers_rwnd && un_sent)) { 10249 /* we can attempt to send too. */ 10250 if (hold_tcblock == 0) { 10251 /* 10252 * If there is activity recv'ing sacks no need to 10253 * send 10254 */ 10255 if (SCTP_TCB_TRYLOCK(stcb)) { 10256 sctp_chunk_output(inp, stcb, SCTP_OUTPUT_FROM_USR_SEND); 10257 hold_tcblock = 1; 10258 } 10259 } else { 10260 sctp_chunk_output(inp, stcb, SCTP_OUTPUT_FROM_USR_SEND); 10261 } 10262 } else if ((queue_only == 0) && 10263 (stcb->asoc.peers_rwnd == 0) && 10264 (stcb->asoc.total_flight == 0)) { 10265 /* We get to have a probe outstanding */ 10266 if (hold_tcblock == 0) { 10267 hold_tcblock = 1; 10268 SCTP_TCB_LOCK(stcb); 10269 } 10270 sctp_chunk_output(inp, stcb, SCTP_OUTPUT_FROM_USR_SEND); 10271 } else if (some_on_control) { 10272 int num_out, reason, cwnd_full, frag_point; 10273 10274 /* Here we do control only */ 10275 if (hold_tcblock == 0) { 10276 hold_tcblock = 1; 10277 SCTP_TCB_LOCK(stcb); 10278 } 10279 frag_point = sctp_get_frag_point(stcb, &stcb->asoc); 10280 sctp_med_chunk_output(inp, stcb, &stcb->asoc, &num_out, 10281 &reason, 1, &cwnd_full, 1, &now, &now_filled, frag_point); 10282 } 10283 #ifdef SCTP_DEBUG 10284 if (sctp_debug_on & SCTP_DEBUG_OUTPUT1) { 10285 printf("USR Send complete qo:%d prw:%d unsent:%d tf:%d cooq:%d toqs:%d \n", 10286 queue_only, stcb->asoc.peers_rwnd, un_sent, 10287 stcb->asoc.total_flight, stcb->asoc.chunks_on_out_queue, 10288 stcb->asoc.total_output_queue_size); 10289 } 10290 #endif 10291 out: 10292 out_unlocked: 10293 10294 if (create_lock_applied) { 10295 SCTP_ASOC_CREATE_UNLOCK(inp); 10296 create_lock_applied = 0; 10297 } 10298 if ((stcb) && hold_tcblock) { 10299 SCTP_TCB_UNLOCK(stcb); 10300 } 10301 if (stcb && free_cnt_applied) { 10302 atomic_add_int(&stcb->asoc.refcnt, -1); 10303 } 10304 #ifdef INVARIANTS 10305 if (stcb) { 10306 if (mtx_owned(&stcb->tcb_mtx)) { 10307 panic("Leaving with tcb mtx owned?"); 10308 } 10309 if (mtx_owned(&stcb->tcb_send_mtx)) { 10310 panic("Leaving with tcb send mtx owned?"); 10311 } 10312 } 10313 #endif 10314 if (top) { 10315 sctp_m_freem(top); 10316 } 10317 if (control) { 10318 sctp_m_freem(control); 10319 } 10320 return (error); 10321 } 10322 10323 10324 /* 10325 * generate an AUTHentication chunk, if required 10326 */ 10327 struct mbuf * 10328 sctp_add_auth_chunk(struct mbuf *m, struct mbuf **m_end, 10329 struct sctp_auth_chunk **auth_ret, uint32_t * offset, 10330 struct sctp_tcb *stcb, uint8_t chunk) 10331 { 10332 struct mbuf *m_auth; 10333 struct sctp_auth_chunk *auth; 10334 int chunk_len; 10335 10336 if ((m_end == NULL) || (auth_ret == NULL) || (offset == NULL) || 10337 (stcb == NULL)) 10338 return (m); 10339 10340 /* sysctl disabled auth? */ 10341 if (sctp_auth_disable) 10342 return (m); 10343 10344 /* peer doesn't do auth... */ 10345 if (!stcb->asoc.peer_supports_auth) { 10346 return (m); 10347 } 10348 /* does the requested chunk require auth? */ 10349 if (!sctp_auth_is_required_chunk(chunk, stcb->asoc.peer_auth_chunks)) { 10350 return (m); 10351 } 10352 m_auth = sctp_get_mbuf_for_msg(sizeof(*auth), 0, M_DONTWAIT, 1, MT_HEADER); 10353 if (m_auth == NULL) { 10354 /* no mbuf's */ 10355 return (m); 10356 } 10357 /* reserve some space if this will be the first mbuf */ 10358 if (m == NULL) 10359 SCTP_BUF_RESV_UF(m_auth, SCTP_MIN_OVERHEAD); 10360 /* fill in the AUTH chunk details */ 10361 auth = mtod(m_auth, struct sctp_auth_chunk *); 10362 bzero(auth, sizeof(*auth)); 10363 auth->ch.chunk_type = SCTP_AUTHENTICATION; 10364 auth->ch.chunk_flags = 0; 10365 chunk_len = sizeof(*auth) + 10366 sctp_get_hmac_digest_len(stcb->asoc.peer_hmac_id); 10367 auth->ch.chunk_length = htons(chunk_len); 10368 auth->hmac_id = htons(stcb->asoc.peer_hmac_id); 10369 /* key id and hmac digest will be computed and filled in upon send */ 10370 10371 /* save the offset where the auth was inserted into the chain */ 10372 if (m != NULL) { 10373 struct mbuf *cn; 10374 10375 *offset = 0; 10376 cn = m; 10377 while (cn) { 10378 *offset += SCTP_BUF_LEN(cn); 10379 cn = SCTP_BUF_NEXT(cn); 10380 } 10381 } else 10382 *offset = 0; 10383 10384 /* update length and return pointer to the auth chunk */ 10385 SCTP_BUF_LEN(m_auth) = chunk_len; 10386 m = sctp_copy_mbufchain(m_auth, m, m_end, 1, chunk_len, 0); 10387 if (auth_ret != NULL) 10388 *auth_ret = auth; 10389 10390 return (m); 10391 } 10392