1caf43b02SWarner Losh /*- 251369649SPedro F. Giffuni * SPDX-License-Identifier: BSD-3-Clause 351369649SPedro F. Giffuni * 482cd038dSYoshinobu Inoue * Copyright (C) 1995, 1996, 1997, and 1998 WIDE Project. 582cd038dSYoshinobu Inoue * All rights reserved. 621f08a07SBjoern A. Zeeb * Copyright (c) 2019 Netflix, Inc. 782cd038dSYoshinobu Inoue * 882cd038dSYoshinobu Inoue * Redistribution and use in source and binary forms, with or without 982cd038dSYoshinobu Inoue * modification, are permitted provided that the following conditions 1082cd038dSYoshinobu Inoue * are met: 1182cd038dSYoshinobu Inoue * 1. Redistributions of source code must retain the above copyright 1282cd038dSYoshinobu Inoue * notice, this list of conditions and the following disclaimer. 1382cd038dSYoshinobu Inoue * 2. Redistributions in binary form must reproduce the above copyright 1482cd038dSYoshinobu Inoue * notice, this list of conditions and the following disclaimer in the 1582cd038dSYoshinobu Inoue * documentation and/or other materials provided with the distribution. 1682cd038dSYoshinobu Inoue * 3. Neither the name of the project nor the names of its contributors 1782cd038dSYoshinobu Inoue * may be used to endorse or promote products derived from this software 1882cd038dSYoshinobu Inoue * without specific prior written permission. 1982cd038dSYoshinobu Inoue * 2082cd038dSYoshinobu Inoue * THIS SOFTWARE IS PROVIDED BY THE PROJECT AND CONTRIBUTORS ``AS IS'' AND 2182cd038dSYoshinobu Inoue * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE 2282cd038dSYoshinobu Inoue * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE 2382cd038dSYoshinobu Inoue * ARE DISCLAIMED. IN NO EVENT SHALL THE PROJECT OR CONTRIBUTORS BE LIABLE 2482cd038dSYoshinobu Inoue * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL 2582cd038dSYoshinobu Inoue * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS 2682cd038dSYoshinobu Inoue * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) 2782cd038dSYoshinobu Inoue * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT 2882cd038dSYoshinobu Inoue * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY 2982cd038dSYoshinobu Inoue * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF 3082cd038dSYoshinobu Inoue * SUCH DAMAGE. 31b48287a3SDavid E. O'Brien * 32b48287a3SDavid E. O'Brien * $KAME: frag6.c,v 1.33 2002/01/07 11:34:48 kjc Exp $ 3382cd038dSYoshinobu Inoue */ 3482cd038dSYoshinobu Inoue 35b48287a3SDavid E. O'Brien #include <sys/cdefs.h> 36aaa46574SAdrian Chadd #include "opt_rss.h" 37aaa46574SAdrian Chadd 3882cd038dSYoshinobu Inoue #include <sys/param.h> 39f349c821SBjoern A. Zeeb #include <sys/systm.h> 401a3044faSBjoern A. Zeeb #include <sys/domain.h> 411a3044faSBjoern A. Zeeb #include <sys/eventhandler.h> 4280d7a853SJonathan T. Looney #include <sys/hash.h> 431a3044faSBjoern A. Zeeb #include <sys/kernel.h> 4482cd038dSYoshinobu Inoue #include <sys/malloc.h> 4582cd038dSYoshinobu Inoue #include <sys/mbuf.h> 4682cd038dSYoshinobu Inoue #include <sys/protosw.h> 4721f08a07SBjoern A. Zeeb #include <sys/queue.h> 4882cd038dSYoshinobu Inoue #include <sys/socket.h> 49757cb678SBjoern A. Zeeb #include <sys/sysctl.h> 5082cd038dSYoshinobu Inoue #include <sys/syslog.h> 5182cd038dSYoshinobu Inoue 5282cd038dSYoshinobu Inoue #include <net/if.h> 5376039bc8SGleb Smirnoff #include <net/if_var.h> 543d0d5b21SJustin Hibbits #include <net/if_private.h> 55aaa46574SAdrian Chadd #include <net/netisr.h> 5682cd038dSYoshinobu Inoue #include <net/route.h> 57eddfbb76SRobert Watson #include <net/vnet.h> 5882cd038dSYoshinobu Inoue 5982cd038dSYoshinobu Inoue #include <netinet/in.h> 6082cd038dSYoshinobu Inoue #include <netinet/in_var.h> 61686cdd19SJun-ichiro itojun Hagino #include <netinet/ip6.h> 6282cd038dSYoshinobu Inoue #include <netinet6/ip6_var.h> 63686cdd19SJun-ichiro itojun Hagino #include <netinet/icmp6.h> 6423d374aaSBjoern A. Zeeb #include <netinet/in_systm.h> /* For ECN definitions. */ 6523d374aaSBjoern A. Zeeb #include <netinet/ip.h> /* For ECN definitions. */ 6682cd038dSYoshinobu Inoue 671a3044faSBjoern A. Zeeb #ifdef MAC 684b908c8bSRobert Watson #include <security/mac/mac_framework.h> 691a3044faSBjoern A. Zeeb #endif 704b908c8bSRobert Watson 71f1664f32SBjoern A. Zeeb /* 72f1664f32SBjoern A. Zeeb * A "big picture" of how IPv6 fragment queues are all linked together. 73f1664f32SBjoern A. Zeeb * 74f1664f32SBjoern A. Zeeb * struct ip6qbucket ip6qb[...]; hashed buckets 75f1664f32SBjoern A. Zeeb * |||||||| 76f1664f32SBjoern A. Zeeb * | 77f1664f32SBjoern A. Zeeb * +--- TAILQ(struct ip6q, packets) *q6; tailq entries holding 78f1664f32SBjoern A. Zeeb * |||||||| fragmented packets 79f1664f32SBjoern A. Zeeb * | (1 per original packet) 80f1664f32SBjoern A. Zeeb * | 81f1664f32SBjoern A. Zeeb * +--- TAILQ(struct ip6asfrag, ip6q_frags) *af6; tailq entries of IPv6 82f1664f32SBjoern A. Zeeb * | *ip6af;fragment packets 83f1664f32SBjoern A. Zeeb * | for one original packet 84f1664f32SBjoern A. Zeeb * + *mbuf 85f1664f32SBjoern A. Zeeb */ 86f1664f32SBjoern A. Zeeb 8723d374aaSBjoern A. Zeeb /* Reassembly headers are stored in hash buckets. */ 882ceeacbeSJonathan T. Looney #define IP6REASS_NHASH_LOG2 10 8980d7a853SJonathan T. Looney #define IP6REASS_NHASH (1 << IP6REASS_NHASH_LOG2) 9080d7a853SJonathan T. Looney #define IP6REASS_HMASK (IP6REASS_NHASH - 1) 9180d7a853SJonathan T. Looney 9221f08a07SBjoern A. Zeeb TAILQ_HEAD(ip6qhead, ip6q); 9380d7a853SJonathan T. Looney struct ip6qbucket { 9421f08a07SBjoern A. Zeeb struct ip6qhead packets; 9580d7a853SJonathan T. Looney struct mtx lock; 961e9f3b73SJonathan T. Looney int count; 9780d7a853SJonathan T. Looney }; 9880d7a853SJonathan T. Looney 991540a98eSBjoern A. Zeeb struct ip6asfrag { 10021f08a07SBjoern A. Zeeb TAILQ_ENTRY(ip6asfrag) ip6af_tq; 1011540a98eSBjoern A. Zeeb struct mbuf *ip6af_m; 102f1664f32SBjoern A. Zeeb int ip6af_offset; /* Offset in ip6af_m to next header. */ 103f1664f32SBjoern A. Zeeb int ip6af_frglen; /* Fragmentable part length. */ 104f1664f32SBjoern A. Zeeb int ip6af_off; /* Fragment offset. */ 105f1664f32SBjoern A. Zeeb bool ip6af_mff; /* More fragment bit in frag off. */ 1061540a98eSBjoern A. Zeeb }; 1071540a98eSBjoern A. Zeeb 108487a161cSBjoern A. Zeeb static MALLOC_DEFINE(M_FRAG6, "frag6", "IPv6 fragment reassembly header"); 109487a161cSBjoern A. Zeeb 11067a10c46SBjoern A. Zeeb #ifdef VIMAGE 11167a10c46SBjoern A. Zeeb /* A flag to indicate if IPv6 fragmentation is initialized. */ 11267a10c46SBjoern A. Zeeb VNET_DEFINE_STATIC(bool, frag6_on); 11367a10c46SBjoern A. Zeeb #define V_frag6_on VNET(frag6_on) 11467a10c46SBjoern A. Zeeb #endif 11567a10c46SBjoern A. Zeeb 116757cb678SBjoern A. Zeeb /* System wide (global) maximum and count of packets in reassembly queues. */ 117757cb678SBjoern A. Zeeb static int ip6_maxfrags; 118c17ae180SMateusz Guzik static u_int __exclusive_cache_line frag6_nfrags; 119757cb678SBjoern A. Zeeb 120757cb678SBjoern A. Zeeb /* Maximum and current packets in per-VNET reassembly queue. */ 121757cb678SBjoern A. Zeeb VNET_DEFINE_STATIC(int, ip6_maxfragpackets); 12280d7a853SJonathan T. Looney VNET_DEFINE_STATIC(volatile u_int, frag6_nfragpackets); 123757cb678SBjoern A. Zeeb #define V_ip6_maxfragpackets VNET(ip6_maxfragpackets) 124757cb678SBjoern A. Zeeb #define V_frag6_nfragpackets VNET(frag6_nfragpackets) 125757cb678SBjoern A. Zeeb 126e32221a1SAlexander V. Chernikov /* Maximum per-VNET reassembly timeout (milliseconds) */ 127e32221a1SAlexander V. Chernikov VNET_DEFINE_STATIC(u_int, ip6_fraglifetime) = IPV6_DEFFRAGTTL; 128e32221a1SAlexander V. Chernikov #define V_ip6_fraglifetime VNET(ip6_fraglifetime) 129e32221a1SAlexander V. Chernikov 130757cb678SBjoern A. Zeeb /* Maximum per-VNET reassembly queues per bucket and fragments per packet. */ 131757cb678SBjoern A. Zeeb VNET_DEFINE_STATIC(int, ip6_maxfragbucketsize); 132757cb678SBjoern A. Zeeb VNET_DEFINE_STATIC(int, ip6_maxfragsperpacket); 133757cb678SBjoern A. Zeeb #define V_ip6_maxfragbucketsize VNET(ip6_maxfragbucketsize) 134757cb678SBjoern A. Zeeb #define V_ip6_maxfragsperpacket VNET(ip6_maxfragsperpacket) 135757cb678SBjoern A. Zeeb 136757cb678SBjoern A. Zeeb /* Per-VNET reassembly queue buckets. */ 1379cb1a47aSBjoern A. Zeeb VNET_DEFINE_STATIC(struct ip6qbucket, ip6qb[IP6REASS_NHASH]); 1389cb1a47aSBjoern A. Zeeb VNET_DEFINE_STATIC(uint32_t, ip6qb_hashseed); 1399cb1a47aSBjoern A. Zeeb #define V_ip6qb VNET(ip6qb) 1409cb1a47aSBjoern A. Zeeb #define V_ip6qb_hashseed VNET(ip6qb_hashseed) 14182cd038dSYoshinobu Inoue 1429cb1a47aSBjoern A. Zeeb #define IP6QB_LOCK(_b) mtx_lock(&V_ip6qb[(_b)].lock) 1439cb1a47aSBjoern A. Zeeb #define IP6QB_TRYLOCK(_b) mtx_trylock(&V_ip6qb[(_b)].lock) 1449cb1a47aSBjoern A. Zeeb #define IP6QB_LOCK_ASSERT(_b) mtx_assert(&V_ip6qb[(_b)].lock, MA_OWNED) 1459cb1a47aSBjoern A. Zeeb #define IP6QB_UNLOCK(_b) mtx_unlock(&V_ip6qb[(_b)].lock) 14621f08a07SBjoern A. Zeeb #define IP6QB_HEAD(_b) (&V_ip6qb[(_b)].packets) 1479888c401SHajimu UMEMOTO 14882cd038dSYoshinobu Inoue /* 1492ceeacbeSJonathan T. Looney * By default, limit the number of IP6 fragments across all reassembly 1502ceeacbeSJonathan T. Looney * queues to 1/32 of the total number of mbuf clusters. 1512ceeacbeSJonathan T. Looney * 1522ceeacbeSJonathan T. Looney * Limit the total number of reassembly queues per VNET to the 1532ceeacbeSJonathan T. Looney * IP6 fragment limit, but ensure the limit will not allow any bucket 1542ceeacbeSJonathan T. Looney * to grow above 100 items. (The bucket limit is 1552ceeacbeSJonathan T. Looney * IP_MAXFRAGPACKETS / (IPREASS_NHASH / 2), so the 50 is the correct 1562ceeacbeSJonathan T. Looney * multiplier to reach a 100-item limit.) 1572ceeacbeSJonathan T. Looney * The 100-item limit was chosen as brief testing seems to show that 1582ceeacbeSJonathan T. Looney * this produces "reasonable" performance on some subset of systems 1592ceeacbeSJonathan T. Looney * under DoS attack. 1602ceeacbeSJonathan T. Looney */ 1612ceeacbeSJonathan T. Looney #define IP6_MAXFRAGS (nmbclusters / 32) 1622ceeacbeSJonathan T. Looney #define IP6_MAXFRAGPACKETS (imin(IP6_MAXFRAGS, IP6REASS_NHASH * 50)) 1632ceeacbeSJonathan T. Looney 164e32221a1SAlexander V. Chernikov /* Interval between periodic reassembly queue inspections */ 165e32221a1SAlexander V. Chernikov #define IP6_CALLOUT_INTERVAL_MS 500 166e32221a1SAlexander V. Chernikov 1672ceeacbeSJonathan T. Looney /* 168757cb678SBjoern A. Zeeb * Sysctls and helper function. 16982cd038dSYoshinobu Inoue */ 170757cb678SBjoern A. Zeeb SYSCTL_DECL(_net_inet6_ip6); 171757cb678SBjoern A. Zeeb 17265456706SBjoern A. Zeeb SYSCTL_UINT(_net_inet6_ip6, OID_AUTO, frag6_nfrags, 173c17ae180SMateusz Guzik CTLFLAG_RD, &frag6_nfrags, 0, 17465456706SBjoern A. Zeeb "Global number of IPv6 fragments across all reassembly queues."); 17565456706SBjoern A. Zeeb 176757cb678SBjoern A. Zeeb static void 17709b361c7SBjoern A. Zeeb frag6_set_bucketsize(void) 1781e9f3b73SJonathan T. Looney { 1791e9f3b73SJonathan T. Looney int i; 1801e9f3b73SJonathan T. Looney 1811e9f3b73SJonathan T. Looney if ((i = V_ip6_maxfragpackets) > 0) 1821e9f3b73SJonathan T. Looney V_ip6_maxfragbucketsize = imax(i / (IP6REASS_NHASH / 2), 1); 1831e9f3b73SJonathan T. Looney } 1841e9f3b73SJonathan T. Looney 185757cb678SBjoern A. Zeeb SYSCTL_INT(_net_inet6_ip6, IPV6CTL_MAXFRAGS, maxfrags, 186757cb678SBjoern A. Zeeb CTLFLAG_RW, &ip6_maxfrags, 0, 187757cb678SBjoern A. Zeeb "Maximum allowed number of outstanding IPv6 packet fragments. " 1883cf59750SGordon Bergling "A value of 0 means no fragmented packets will be accepted, while " 189757cb678SBjoern A. Zeeb "a value of -1 means no limit"); 190757cb678SBjoern A. Zeeb 191757cb678SBjoern A. Zeeb static int 192757cb678SBjoern A. Zeeb sysctl_ip6_maxfragpackets(SYSCTL_HANDLER_ARGS) 193757cb678SBjoern A. Zeeb { 194757cb678SBjoern A. Zeeb int error, val; 195757cb678SBjoern A. Zeeb 196757cb678SBjoern A. Zeeb val = V_ip6_maxfragpackets; 197757cb678SBjoern A. Zeeb error = sysctl_handle_int(oidp, &val, 0, req); 198757cb678SBjoern A. Zeeb if (error != 0 || !req->newptr) 199757cb678SBjoern A. Zeeb return (error); 200757cb678SBjoern A. Zeeb V_ip6_maxfragpackets = val; 201757cb678SBjoern A. Zeeb frag6_set_bucketsize(); 202757cb678SBjoern A. Zeeb return (0); 203757cb678SBjoern A. Zeeb } 204757cb678SBjoern A. Zeeb SYSCTL_PROC(_net_inet6_ip6, IPV6CTL_MAXFRAGPACKETS, maxfragpackets, 2057029da5cSPawel Biernacki CTLFLAG_VNET | CTLTYPE_INT | CTLFLAG_RW | CTLFLAG_NEEDGIANT, 2067029da5cSPawel Biernacki NULL, 0, sysctl_ip6_maxfragpackets, "I", 207757cb678SBjoern A. Zeeb "Default maximum number of outstanding fragmented IPv6 packets. " 208757cb678SBjoern A. Zeeb "A value of 0 means no fragmented packets will be accepted, while a " 209757cb678SBjoern A. Zeeb "a value of -1 means no limit"); 21053707abdSBjoern A. Zeeb SYSCTL_UINT(_net_inet6_ip6, OID_AUTO, frag6_nfragpackets, 21153707abdSBjoern A. Zeeb CTLFLAG_VNET | CTLFLAG_RD, 21253707abdSBjoern A. Zeeb __DEVOLATILE(u_int *, &VNET_NAME(frag6_nfragpackets)), 0, 21353707abdSBjoern A. Zeeb "Per-VNET number of IPv6 fragments across all reassembly queues."); 214757cb678SBjoern A. Zeeb SYSCTL_INT(_net_inet6_ip6, IPV6CTL_MAXFRAGSPERPACKET, maxfragsperpacket, 215757cb678SBjoern A. Zeeb CTLFLAG_VNET | CTLFLAG_RW, &VNET_NAME(ip6_maxfragsperpacket), 0, 216757cb678SBjoern A. Zeeb "Maximum allowed number of fragments per packet"); 217757cb678SBjoern A. Zeeb SYSCTL_INT(_net_inet6_ip6, IPV6CTL_MAXFRAGBUCKETSIZE, maxfragbucketsize, 218757cb678SBjoern A. Zeeb CTLFLAG_VNET | CTLFLAG_RW, &VNET_NAME(ip6_maxfragbucketsize), 0, 219757cb678SBjoern A. Zeeb "Maximum number of reassembly queues per hash bucket"); 220757cb678SBjoern A. Zeeb 221e32221a1SAlexander V. Chernikov static int 222e32221a1SAlexander V. Chernikov frag6_milli_to_callout_ticks(int ms) 223e32221a1SAlexander V. Chernikov { 224e32221a1SAlexander V. Chernikov return (ms / IP6_CALLOUT_INTERVAL_MS); 225e32221a1SAlexander V. Chernikov } 226e32221a1SAlexander V. Chernikov 227e32221a1SAlexander V. Chernikov static int 228e32221a1SAlexander V. Chernikov frag6_callout_ticks_to_milli(int ms) 229e32221a1SAlexander V. Chernikov { 230e32221a1SAlexander V. Chernikov return (ms * IP6_CALLOUT_INTERVAL_MS); 231e32221a1SAlexander V. Chernikov } 232e32221a1SAlexander V. Chernikov 233e32221a1SAlexander V. Chernikov _Static_assert(sizeof(((struct ip6q *)NULL)->ip6q_ttl) >= 2, 234e32221a1SAlexander V. Chernikov "ip6q_ttl field is not large enough"); 235e32221a1SAlexander V. Chernikov 236e32221a1SAlexander V. Chernikov static int 237e32221a1SAlexander V. Chernikov sysctl_ip6_fraglifetime(SYSCTL_HANDLER_ARGS) 238e32221a1SAlexander V. Chernikov { 239e32221a1SAlexander V. Chernikov int error, val; 240e32221a1SAlexander V. Chernikov 241e32221a1SAlexander V. Chernikov val = V_ip6_fraglifetime; 242e32221a1SAlexander V. Chernikov error = sysctl_handle_int(oidp, &val, 0, req); 243e32221a1SAlexander V. Chernikov if (error != 0 || !req->newptr) 244e32221a1SAlexander V. Chernikov return (error); 245e32221a1SAlexander V. Chernikov if (val <= 0) 246e32221a1SAlexander V. Chernikov val = IPV6_DEFFRAGTTL; 247e32221a1SAlexander V. Chernikov 248e32221a1SAlexander V. Chernikov if (frag6_milli_to_callout_ticks(val) >= 65536) 249e32221a1SAlexander V. Chernikov val = frag6_callout_ticks_to_milli(65535); 250e32221a1SAlexander V. Chernikov #ifdef VIMAGE 251e32221a1SAlexander V. Chernikov if (!IS_DEFAULT_VNET(curvnet)) { 252e32221a1SAlexander V. Chernikov CURVNET_SET(vnet0); 253e32221a1SAlexander V. Chernikov int host_val = V_ip6_fraglifetime; 254e32221a1SAlexander V. Chernikov CURVNET_RESTORE(); 255e32221a1SAlexander V. Chernikov 256e32221a1SAlexander V. Chernikov if (val > host_val) 257e32221a1SAlexander V. Chernikov val = host_val; 258e32221a1SAlexander V. Chernikov } 259e32221a1SAlexander V. Chernikov #endif 260e32221a1SAlexander V. Chernikov V_ip6_fraglifetime = val; 261e32221a1SAlexander V. Chernikov return (0); 262e32221a1SAlexander V. Chernikov } 263e32221a1SAlexander V. Chernikov SYSCTL_PROC(_net_inet6_ip6, OID_AUTO, fraglifetime_ms, 264e32221a1SAlexander V. Chernikov CTLFLAG_VNET | CTLTYPE_UINT | CTLFLAG_RW | CTLFLAG_NEEDGIANT, 265e32221a1SAlexander V. Chernikov NULL, 0, sysctl_ip6_fraglifetime, "I", 266e32221a1SAlexander V. Chernikov "Fragment lifetime, in milliseconds"); 267e32221a1SAlexander V. Chernikov 268757cb678SBjoern A. Zeeb /* 269c00464a2SBjoern A. Zeeb * Remove the IPv6 fragmentation header from the mbuf. 270c00464a2SBjoern A. Zeeb */ 271c00464a2SBjoern A. Zeeb int 272a61b5cfbSBjoern A. Zeeb ip6_deletefraghdr(struct mbuf *m, int offset, int wait __unused) 273c00464a2SBjoern A. Zeeb { 2745778b399SBjoern A. Zeeb struct ip6_hdr *ip6; 275a61b5cfbSBjoern A. Zeeb 276a61b5cfbSBjoern A. Zeeb KASSERT(m->m_len >= offset + sizeof(struct ip6_frag), 277a61b5cfbSBjoern A. Zeeb ("%s: ext headers not contigous in mbuf %p m_len %d >= " 278a61b5cfbSBjoern A. Zeeb "offset %d + %zu\n", __func__, m, m->m_len, offset, 279a61b5cfbSBjoern A. Zeeb sizeof(struct ip6_frag))); 280c00464a2SBjoern A. Zeeb 281c00464a2SBjoern A. Zeeb /* Delete frag6 header. */ 2825778b399SBjoern A. Zeeb ip6 = mtod(m, struct ip6_hdr *); 283a61b5cfbSBjoern A. Zeeb bcopy(ip6, (char *)ip6 + sizeof(struct ip6_frag), offset); 284c00464a2SBjoern A. Zeeb m->m_data += sizeof(struct ip6_frag); 285c00464a2SBjoern A. Zeeb m->m_len -= sizeof(struct ip6_frag); 286c00464a2SBjoern A. Zeeb m->m_flags |= M_FRAGMENTED; 287a61b5cfbSBjoern A. Zeeb 288c00464a2SBjoern A. Zeeb return (0); 289c00464a2SBjoern A. Zeeb } 290c00464a2SBjoern A. Zeeb 291c00464a2SBjoern A. Zeeb /* 29223d374aaSBjoern A. Zeeb * Free a fragment reassembly header and all associated datagrams. 293757cb678SBjoern A. Zeeb */ 2944f590175SPaul Saab static void 295c00464a2SBjoern A. Zeeb frag6_freef(struct ip6q *q6, uint32_t bucket) 2964f590175SPaul Saab { 2975778b399SBjoern A. Zeeb struct ip6_hdr *ip6; 29821f08a07SBjoern A. Zeeb struct ip6asfrag *af6; 2995778b399SBjoern A. Zeeb struct mbuf *m; 3004f590175SPaul Saab 3019cb1a47aSBjoern A. Zeeb IP6QB_LOCK_ASSERT(bucket); 302c00464a2SBjoern A. Zeeb 30321f08a07SBjoern A. Zeeb while ((af6 = TAILQ_FIRST(&q6->ip6q_frags)) != NULL) { 304da89a0feSBjoern A. Zeeb m = af6->ip6af_m; 30521f08a07SBjoern A. Zeeb TAILQ_REMOVE(&q6->ip6q_frags, af6, ip6af_tq); 306c00464a2SBjoern A. Zeeb 307c00464a2SBjoern A. Zeeb /* 308c00464a2SBjoern A. Zeeb * Return ICMP time exceeded error for the 1st fragment. 309c00464a2SBjoern A. Zeeb * Just free other fragments. 310c00464a2SBjoern A. Zeeb */ 311a55383e7SHans Petter Selasky if (af6->ip6af_off == 0 && m->m_pkthdr.rcvif != NULL) { 31223d374aaSBjoern A. Zeeb /* Adjust pointer. */ 313c00464a2SBjoern A. Zeeb ip6 = mtod(m, struct ip6_hdr *); 314c00464a2SBjoern A. Zeeb 31523d374aaSBjoern A. Zeeb /* Restore source and destination addresses. */ 316c00464a2SBjoern A. Zeeb ip6->ip6_src = q6->ip6q_src; 317c00464a2SBjoern A. Zeeb ip6->ip6_dst = q6->ip6q_dst; 318c00464a2SBjoern A. Zeeb 319c00464a2SBjoern A. Zeeb icmp6_error(m, ICMP6_TIME_EXCEEDED, 320c00464a2SBjoern A. Zeeb ICMP6_TIME_EXCEED_REASSEMBLY, 0); 321c00464a2SBjoern A. Zeeb } else 322c00464a2SBjoern A. Zeeb m_freem(m); 32323d374aaSBjoern A. Zeeb 324c00464a2SBjoern A. Zeeb free(af6, M_FRAG6); 3252adfd64fSJonathan T. Looney } 32621f08a07SBjoern A. Zeeb 32721f08a07SBjoern A. Zeeb TAILQ_REMOVE(IP6QB_HEAD(bucket), q6, ip6q_tq); 32821f08a07SBjoern A. Zeeb V_ip6qb[bucket].count--; 329c00464a2SBjoern A. Zeeb atomic_subtract_int(&frag6_nfrags, q6->ip6q_nfrag); 330c00464a2SBjoern A. Zeeb #ifdef MAC 331c00464a2SBjoern A. Zeeb mac_ip6q_destroy(q6); 332c00464a2SBjoern A. Zeeb #endif 333c00464a2SBjoern A. Zeeb free(q6, M_FRAG6); 334c00464a2SBjoern A. Zeeb atomic_subtract_int(&V_frag6_nfragpackets, 1); 33582cd038dSYoshinobu Inoue } 33682cd038dSYoshinobu Inoue 33782cd038dSYoshinobu Inoue /* 338a55383e7SHans Petter Selasky * Drain off all datagram fragments belonging to 339a55383e7SHans Petter Selasky * the given network interface. 340a55383e7SHans Petter Selasky */ 341a55383e7SHans Petter Selasky static void 342a55383e7SHans Petter Selasky frag6_cleanup(void *arg __unused, struct ifnet *ifp) 343a55383e7SHans Petter Selasky { 34421f08a07SBjoern A. Zeeb struct ip6qhead *head; 34521f08a07SBjoern A. Zeeb struct ip6q *q6; 346a55383e7SHans Petter Selasky struct ip6asfrag *af6; 34721f08a07SBjoern A. Zeeb uint32_t bucket; 348a55383e7SHans Petter Selasky 349a55383e7SHans Petter Selasky KASSERT(ifp != NULL, ("%s: ifp is NULL", __func__)); 350a55383e7SHans Petter Selasky 3516e6b5143SBjoern A. Zeeb CURVNET_SET_QUIET(ifp->if_vnet); 35267a10c46SBjoern A. Zeeb #ifdef VIMAGE 35367a10c46SBjoern A. Zeeb /* 35467a10c46SBjoern A. Zeeb * Skip processing if IPv6 reassembly is not initialised or 35567a10c46SBjoern A. Zeeb * torn down by frag6_destroy(). 35667a10c46SBjoern A. Zeeb */ 3576e6b5143SBjoern A. Zeeb if (!V_frag6_on) { 3586e6b5143SBjoern A. Zeeb CURVNET_RESTORE(); 35967a10c46SBjoern A. Zeeb return; 3606e6b5143SBjoern A. Zeeb } 36167a10c46SBjoern A. Zeeb #endif 36267a10c46SBjoern A. Zeeb 36321f08a07SBjoern A. Zeeb for (bucket = 0; bucket < IP6REASS_NHASH; bucket++) { 36421f08a07SBjoern A. Zeeb IP6QB_LOCK(bucket); 36521f08a07SBjoern A. Zeeb head = IP6QB_HEAD(bucket); 366a55383e7SHans Petter Selasky /* Scan fragment list. */ 36721f08a07SBjoern A. Zeeb TAILQ_FOREACH(q6, head, ip6q_tq) { 36821f08a07SBjoern A. Zeeb TAILQ_FOREACH(af6, &q6->ip6q_frags, ip6af_tq) { 369f1664f32SBjoern A. Zeeb /* Clear no longer valid rcvif pointer. */ 370da89a0feSBjoern A. Zeeb if (af6->ip6af_m->m_pkthdr.rcvif == ifp) 371da89a0feSBjoern A. Zeeb af6->ip6af_m->m_pkthdr.rcvif = NULL; 372a55383e7SHans Petter Selasky } 373a55383e7SHans Petter Selasky } 37421f08a07SBjoern A. Zeeb IP6QB_UNLOCK(bucket); 375a55383e7SHans Petter Selasky } 376a55383e7SHans Petter Selasky CURVNET_RESTORE(); 377a55383e7SHans Petter Selasky } 378a55383e7SHans Petter Selasky EVENTHANDLER_DEFINE(ifnet_departure_event, frag6_cleanup, NULL, 0); 379a55383e7SHans Petter Selasky 380a55383e7SHans Petter Selasky /* 38123d374aaSBjoern A. Zeeb * Like in RFC2460, in RFC8200, fragment and reassembly rules do not agree with 38223d374aaSBjoern A. Zeeb * each other, in terms of next header field handling in fragment header. 383686cdd19SJun-ichiro itojun Hagino * While the sender will use the same value for all of the fragmented packets, 38423d374aaSBjoern A. Zeeb * receiver is suggested not to check for consistency. 385686cdd19SJun-ichiro itojun Hagino * 38623d374aaSBjoern A. Zeeb * Fragment rules (p18,p19): 387686cdd19SJun-ichiro itojun Hagino * (2) A Fragment header containing: 38823d374aaSBjoern A. Zeeb * The Next Header value that identifies the first header 38923d374aaSBjoern A. Zeeb * after the Per-Fragment headers of the original packet. 390686cdd19SJun-ichiro itojun Hagino * -> next header field is same for all fragments 391686cdd19SJun-ichiro itojun Hagino * 39223d374aaSBjoern A. Zeeb * Reassembly rule (p20): 39323d374aaSBjoern A. Zeeb * The Next Header field of the last header of the Per-Fragment 39423d374aaSBjoern A. Zeeb * headers is obtained from the Next Header field of the first 395686cdd19SJun-ichiro itojun Hagino * fragment's Fragment header. 396686cdd19SJun-ichiro itojun Hagino * -> should grab it from the first fragment only 397686cdd19SJun-ichiro itojun Hagino * 398686cdd19SJun-ichiro itojun Hagino * The following note also contradicts with fragment rule - no one is going to 399686cdd19SJun-ichiro itojun Hagino * send different fragment with different next header field. 400686cdd19SJun-ichiro itojun Hagino * 40123d374aaSBjoern A. Zeeb * Additional note (p22) [not an error]: 402686cdd19SJun-ichiro itojun Hagino * The Next Header values in the Fragment headers of different 403686cdd19SJun-ichiro itojun Hagino * fragments of the same original packet may differ. Only the value 404686cdd19SJun-ichiro itojun Hagino * from the Offset zero fragment packet is used for reassembly. 405686cdd19SJun-ichiro itojun Hagino * -> should grab it from the first fragment only 406686cdd19SJun-ichiro itojun Hagino * 407686cdd19SJun-ichiro itojun Hagino * There is no explicit reason given in the RFC. Historical reason maybe? 408686cdd19SJun-ichiro itojun Hagino */ 409686cdd19SJun-ichiro itojun Hagino /* 41023d374aaSBjoern A. Zeeb * Fragment input. 41182cd038dSYoshinobu Inoue */ 41282cd038dSYoshinobu Inoue int 4131272577eSXin LI frag6_input(struct mbuf **mp, int *offp, int proto) 41482cd038dSYoshinobu Inoue { 41521f08a07SBjoern A. Zeeb struct mbuf *m, *t; 41682cd038dSYoshinobu Inoue struct ip6_hdr *ip6; 41782cd038dSYoshinobu Inoue struct ip6_frag *ip6f; 41821f08a07SBjoern A. Zeeb struct ip6qhead *head; 41921f08a07SBjoern A. Zeeb struct ip6q *q6; 42021f08a07SBjoern A. Zeeb struct ip6asfrag *af6, *ip6af, *af6tmp; 42121f08a07SBjoern A. Zeeb struct in6_ifaddr *ia6; 42221f08a07SBjoern A. Zeeb struct ifnet *dstifp, *srcifp; 423505e91f5SKristof Provost uint32_t hashkey[(sizeof(struct in6_addr) * 2 + 424505e91f5SKristof Provost sizeof(ip6f->ip6f_ident)) / sizeof(uint32_t)]; 4259cb1a47aSBjoern A. Zeeb uint32_t bucket, *hashkeyp; 4265778b399SBjoern A. Zeeb int fragoff, frgpartlen; /* Must be larger than uint16_t. */ 4275778b399SBjoern A. Zeeb int nxt, offset, plen; 4285778b399SBjoern A. Zeeb uint8_t ecn, ecn0; 4295778b399SBjoern A. Zeeb bool only_frag; 430aaa46574SAdrian Chadd #ifdef RSS 431aaa46574SAdrian Chadd struct ip6_direct_ctx *ip6dc; 4325778b399SBjoern A. Zeeb struct m_tag *mtag; 433aaa46574SAdrian Chadd #endif 434aaa46574SAdrian Chadd 4355778b399SBjoern A. Zeeb m = *mp; 4365778b399SBjoern A. Zeeb offset = *offp; 4375778b399SBjoern A. Zeeb 438c1131de6SBjoern A. Zeeb M_ASSERTPKTHDR(m); 439c1131de6SBjoern A. Zeeb 440a4adf6ccSBjoern A. Zeeb if (m->m_len < offset + sizeof(struct ip6_frag)) { 441a61b5cfbSBjoern A. Zeeb m = m_pullup(m, offset + sizeof(struct ip6_frag)); 442a61b5cfbSBjoern A. Zeeb if (m == NULL) { 443a61b5cfbSBjoern A. Zeeb IP6STAT_INC(ip6s_exthdrtoolong); 444a61b5cfbSBjoern A. Zeeb *mp = NULL; 44540e39bbbSHajimu UMEMOTO return (IPPROTO_DONE); 446a61b5cfbSBjoern A. Zeeb } 447a4adf6ccSBjoern A. Zeeb } 448a61b5cfbSBjoern A. Zeeb ip6 = mtod(m, struct ip6_hdr *); 44982cd038dSYoshinobu Inoue 45082cd038dSYoshinobu Inoue dstifp = NULL; 45123d374aaSBjoern A. Zeeb /* Find the destination interface of the packet. */ 4528268d82cSAlexander V. Chernikov ia6 = in6ifa_ifwithaddr(&ip6->ip6_dst, 0 /* XXX */, false); 4538268d82cSAlexander V. Chernikov if (ia6 != NULL) 4545778b399SBjoern A. Zeeb dstifp = ia6->ia_ifp; 45523d374aaSBjoern A. Zeeb 45623d374aaSBjoern A. Zeeb /* Jumbo payload cannot contain a fragment header. */ 45782cd038dSYoshinobu Inoue if (ip6->ip6_plen == 0) { 45882cd038dSYoshinobu Inoue icmp6_error(m, ICMP6_PARAM_PROB, ICMP6_PARAMPROB_HEADER, offset); 45982cd038dSYoshinobu Inoue in6_ifstat_inc(dstifp, ifs6_reass_fail); 460a8fe77d8SBjoern A. Zeeb *mp = NULL; 4615778b399SBjoern A. Zeeb return (IPPROTO_DONE); 46282cd038dSYoshinobu Inoue } 46382cd038dSYoshinobu Inoue 46482cd038dSYoshinobu Inoue /* 46523d374aaSBjoern A. Zeeb * Check whether fragment packet's fragment length is a 46623d374aaSBjoern A. Zeeb * multiple of 8 octets (unless it is the last one). 46782cd038dSYoshinobu Inoue * sizeof(struct ip6_frag) == 8 46882cd038dSYoshinobu Inoue * sizeof(struct ip6_hdr) = 40 46982cd038dSYoshinobu Inoue */ 470a61b5cfbSBjoern A. Zeeb ip6f = (struct ip6_frag *)((caddr_t)ip6 + offset); 47182cd038dSYoshinobu Inoue if ((ip6f->ip6f_offlg & IP6F_MORE_FRAG) && 47282cd038dSYoshinobu Inoue (((ntohs(ip6->ip6_plen) - offset) & 0x7) != 0)) { 47306cd0a3fSHajimu UMEMOTO icmp6_error(m, ICMP6_PARAM_PROB, ICMP6_PARAMPROB_HEADER, 474686cdd19SJun-ichiro itojun Hagino offsetof(struct ip6_hdr, ip6_plen)); 47582cd038dSYoshinobu Inoue in6_ifstat_inc(dstifp, ifs6_reass_fail); 476a8fe77d8SBjoern A. Zeeb *mp = NULL; 4775778b399SBjoern A. Zeeb return (IPPROTO_DONE); 47882cd038dSYoshinobu Inoue } 47982cd038dSYoshinobu Inoue 4809cb8d207SAndrey V. Elsukov IP6STAT_INC(ip6s_fragments); 48182cd038dSYoshinobu Inoue in6_ifstat_inc(dstifp, ifs6_reass_reqd); 48282cd038dSYoshinobu Inoue 4834018ea9aSBjoern A. Zeeb /* 4842946a941STom Jones * Handle "atomic" fragments (offset and m bit set to 0) upfront, 485c1131de6SBjoern A. Zeeb * unrelated to any reassembly. We need to remove the frag hdr 486c1131de6SBjoern A. Zeeb * which is ugly. 48723d374aaSBjoern A. Zeeb * See RFC 6946 and section 4.5 of RFC 8200. 4884018ea9aSBjoern A. Zeeb */ 4894018ea9aSBjoern A. Zeeb if ((ip6f->ip6f_offlg & ~IP6F_RESERVED_MASK) == 0) { 4902946a941STom Jones IP6STAT_INC(ip6s_atomicfrags); 491c1131de6SBjoern A. Zeeb nxt = ip6f->ip6f_nxt; 492c1131de6SBjoern A. Zeeb /* 493c1131de6SBjoern A. Zeeb * Set nxt(-hdr field value) to the original value. 494c1131de6SBjoern A. Zeeb * We cannot just set ip6->ip6_nxt as there might be 495c1131de6SBjoern A. Zeeb * an unfragmentable part with extension headers and 496c1131de6SBjoern A. Zeeb * we must update the last one. 497c1131de6SBjoern A. Zeeb */ 498c1131de6SBjoern A. Zeeb m_copyback(m, ip6_get_prevhdr(m, offset), sizeof(uint8_t), 499c1131de6SBjoern A. Zeeb (caddr_t)&nxt); 500c1131de6SBjoern A. Zeeb ip6->ip6_plen = htons(ntohs(ip6->ip6_plen) - 501c1131de6SBjoern A. Zeeb sizeof(struct ip6_frag)); 502c1131de6SBjoern A. Zeeb if (ip6_deletefraghdr(m, offset, M_NOWAIT) != 0) 503c1131de6SBjoern A. Zeeb goto dropfrag2; 504c1131de6SBjoern A. Zeeb m->m_pkthdr.len -= sizeof(struct ip6_frag); 5054018ea9aSBjoern A. Zeeb in6_ifstat_inc(dstifp, ifs6_reass_ok); 506c1131de6SBjoern A. Zeeb *mp = m; 507c1131de6SBjoern A. Zeeb return (nxt); 5084018ea9aSBjoern A. Zeeb } 5094018ea9aSBjoern A. Zeeb 510c1131de6SBjoern A. Zeeb /* Offset now points to data portion. */ 511c1131de6SBjoern A. Zeeb offset += sizeof(struct ip6_frag); 512c1131de6SBjoern A. Zeeb 5135f9f192dSJonathan T. Looney /* Get fragment length and discard 0-byte fragments. */ 5145f9f192dSJonathan T. Looney frgpartlen = sizeof(struct ip6_hdr) + ntohs(ip6->ip6_plen) - offset; 5155f9f192dSJonathan T. Looney if (frgpartlen == 0) { 5165f9f192dSJonathan T. Looney icmp6_error(m, ICMP6_PARAM_PROB, ICMP6_PARAMPROB_HEADER, 5175f9f192dSJonathan T. Looney offsetof(struct ip6_hdr, ip6_plen)); 5185f9f192dSJonathan T. Looney in6_ifstat_inc(dstifp, ifs6_reass_fail); 5195f9f192dSJonathan T. Looney IP6STAT_INC(ip6s_fragdropped); 520a8fe77d8SBjoern A. Zeeb *mp = NULL; 5215778b399SBjoern A. Zeeb return (IPPROTO_DONE); 5225f9f192dSJonathan T. Looney } 5235f9f192dSJonathan T. Looney 5247715d794SBjoern A. Zeeb /* 5257715d794SBjoern A. Zeeb * Enforce upper bound on number of fragments for the entire system. 5267715d794SBjoern A. Zeeb * If maxfrag is 0, never accept fragments. 5277715d794SBjoern A. Zeeb * If maxfrag is -1, accept all fragments without limitation. 5287715d794SBjoern A. Zeeb */ 5297715d794SBjoern A. Zeeb if (ip6_maxfrags < 0) 5307715d794SBjoern A. Zeeb ; 5317715d794SBjoern A. Zeeb else if (atomic_load_int(&frag6_nfrags) >= (u_int)ip6_maxfrags) 5327715d794SBjoern A. Zeeb goto dropfrag2; 5337715d794SBjoern A. Zeeb 53430809ba9SBjoern A. Zeeb /* 53530809ba9SBjoern A. Zeeb * Validate that a full header chain to the ULP is present in the 53630809ba9SBjoern A. Zeeb * packet containing the first fragment as per RFC RFC7112 and 53730809ba9SBjoern A. Zeeb * RFC 8200 pages 18,19: 53830809ba9SBjoern A. Zeeb * The first fragment packet is composed of: 53930809ba9SBjoern A. Zeeb * (3) Extension headers, if any, and the Upper-Layer header. These 54030809ba9SBjoern A. Zeeb * headers must be in the first fragment. ... 54130809ba9SBjoern A. Zeeb */ 54230809ba9SBjoern A. Zeeb fragoff = ntohs(ip6f->ip6f_offlg & IP6F_OFF_MASK); 54330809ba9SBjoern A. Zeeb /* XXX TODO. thj has D16851 open for this. */ 54430809ba9SBjoern A. Zeeb /* Send ICMPv6 4,3 in case of violation. */ 54530809ba9SBjoern A. Zeeb 546efdfee93SBjoern A. Zeeb /* Store receive network interface pointer for later. */ 547efdfee93SBjoern A. Zeeb srcifp = m->m_pkthdr.rcvif; 548efdfee93SBjoern A. Zeeb 54923d374aaSBjoern A. Zeeb /* Generate a hash value for fragment bucket selection. */ 55080d7a853SJonathan T. Looney hashkeyp = hashkey; 55180d7a853SJonathan T. Looney memcpy(hashkeyp, &ip6->ip6_src, sizeof(struct in6_addr)); 55280d7a853SJonathan T. Looney hashkeyp += sizeof(struct in6_addr) / sizeof(*hashkeyp); 55380d7a853SJonathan T. Looney memcpy(hashkeyp, &ip6->ip6_dst, sizeof(struct in6_addr)); 55480d7a853SJonathan T. Looney hashkeyp += sizeof(struct in6_addr) / sizeof(*hashkeyp); 55580d7a853SJonathan T. Looney *hashkeyp = ip6f->ip6f_ident; 5569cb1a47aSBjoern A. Zeeb bucket = jenkins_hash32(hashkey, nitems(hashkey), V_ip6qb_hashseed); 5579cb1a47aSBjoern A. Zeeb bucket &= IP6REASS_HMASK; 5589cb1a47aSBjoern A. Zeeb IP6QB_LOCK(bucket); 55921f08a07SBjoern A. Zeeb head = IP6QB_HEAD(bucket); 5609888c401SHajimu UMEMOTO 56121f08a07SBjoern A. Zeeb TAILQ_FOREACH(q6, head, ip6q_tq) 56282cd038dSYoshinobu Inoue if (ip6f->ip6f_ident == q6->ip6q_ident && 56382cd038dSYoshinobu Inoue IN6_ARE_ADDR_EQUAL(&ip6->ip6_src, &q6->ip6q_src) && 5644b908c8bSRobert Watson IN6_ARE_ADDR_EQUAL(&ip6->ip6_dst, &q6->ip6q_dst) 5654b908c8bSRobert Watson #ifdef MAC 5664b908c8bSRobert Watson && mac_ip6q_match(m, q6) 5674b908c8bSRobert Watson #endif 5684b908c8bSRobert Watson ) 56982cd038dSYoshinobu Inoue break; 57082cd038dSYoshinobu Inoue 5715778b399SBjoern A. Zeeb only_frag = false; 57221f08a07SBjoern A. Zeeb if (q6 == NULL) { 5735778b399SBjoern A. Zeeb /* A first fragment to arrive creates a reassembly queue. */ 5745778b399SBjoern A. Zeeb only_frag = true; 57582cd038dSYoshinobu Inoue 57682cd038dSYoshinobu Inoue /* 57782cd038dSYoshinobu Inoue * Enforce upper bound on number of fragmented packets 57882cd038dSYoshinobu Inoue * for which we attempt reassembly; 5799888c401SHajimu UMEMOTO * If maxfragpackets is 0, never accept fragments. 5809888c401SHajimu UMEMOTO * If maxfragpackets is -1, accept all fragments without 5819888c401SHajimu UMEMOTO * limitation. 58282cd038dSYoshinobu Inoue */ 583603724d3SBjoern A. Zeeb if (V_ip6_maxfragpackets < 0) 58433841545SHajimu UMEMOTO ; 5859cb1a47aSBjoern A. Zeeb else if (V_ip6qb[bucket].count >= V_ip6_maxfragbucketsize || 5861e9f3b73SJonathan T. Looney atomic_load_int(&V_frag6_nfragpackets) >= 58780d7a853SJonathan T. Looney (u_int)V_ip6_maxfragpackets) 58833841545SHajimu UMEMOTO goto dropfrag; 58923d374aaSBjoern A. Zeeb 59023d374aaSBjoern A. Zeeb /* Allocate IPv6 fragement packet queue entry. */ 591f12a9a4cSMark Johnston q6 = malloc(sizeof(struct ip6q), M_FRAG6, M_NOWAIT | M_ZERO); 59282cd038dSYoshinobu Inoue if (q6 == NULL) 59382cd038dSYoshinobu Inoue goto dropfrag; 5944b908c8bSRobert Watson #ifdef MAC 5954b908c8bSRobert Watson if (mac_ip6q_init(q6, M_NOWAIT) != 0) { 596487a161cSBjoern A. Zeeb free(q6, M_FRAG6); 5974b908c8bSRobert Watson goto dropfrag; 5984b908c8bSRobert Watson } 5994b908c8bSRobert Watson mac_ip6q_create(m, q6); 6004b908c8bSRobert Watson #endif 601702828f6SBjoern A. Zeeb atomic_add_int(&V_frag6_nfragpackets, 1); 60282cd038dSYoshinobu Inoue 60323d374aaSBjoern A. Zeeb /* ip6q_nxt will be filled afterwards, from 1st fragment. */ 60421f08a07SBjoern A. Zeeb TAILQ_INIT(&q6->ip6q_frags); 60582cd038dSYoshinobu Inoue q6->ip6q_ident = ip6f->ip6f_ident; 606e32221a1SAlexander V. Chernikov q6->ip6q_ttl = frag6_milli_to_callout_ticks(V_ip6_fraglifetime); 60782cd038dSYoshinobu Inoue q6->ip6q_src = ip6->ip6_src; 60882cd038dSYoshinobu Inoue q6->ip6q_dst = ip6->ip6_dst; 609bb4a7d94SKristof Provost q6->ip6q_ecn = IPV6_ECN(ip6); 61082cd038dSYoshinobu Inoue q6->ip6q_unfrglen = -1; /* The 1st fragment has not arrived. */ 6119888c401SHajimu UMEMOTO 61221f08a07SBjoern A. Zeeb /* Add the fragemented packet to the bucket. */ 61321f08a07SBjoern A. Zeeb TAILQ_INSERT_HEAD(head, q6, ip6q_tq); 61421f08a07SBjoern A. Zeeb V_ip6qb[bucket].count++; 61582cd038dSYoshinobu Inoue } 61682cd038dSYoshinobu Inoue 61782cd038dSYoshinobu Inoue /* 61823d374aaSBjoern A. Zeeb * If it is the 1st fragment, record the length of the 61982cd038dSYoshinobu Inoue * unfragmentable part and the next header of the fragment header. 620619456bbSBjoern A. Zeeb * Assume the first 1st fragement to arrive will be correct. 621619456bbSBjoern A. Zeeb * We do not have any duplicate checks here yet so another packet 622619456bbSBjoern A. Zeeb * with fragoff == 0 could come and overwrite the ip6q_unfrglen 623619456bbSBjoern A. Zeeb * and worse, the next header, at any time. 62482cd038dSYoshinobu Inoue */ 625619456bbSBjoern A. Zeeb if (fragoff == 0 && q6->ip6q_unfrglen == -1) { 62606cd0a3fSHajimu UMEMOTO q6->ip6q_unfrglen = offset - sizeof(struct ip6_hdr) - 62706cd0a3fSHajimu UMEMOTO sizeof(struct ip6_frag); 62882cd038dSYoshinobu Inoue q6->ip6q_nxt = ip6f->ip6f_nxt; 629619456bbSBjoern A. Zeeb /* XXX ECN? */ 63082cd038dSYoshinobu Inoue } 63182cd038dSYoshinobu Inoue 63282cd038dSYoshinobu Inoue /* 63382cd038dSYoshinobu Inoue * Check that the reassembled packet would not exceed 65535 bytes 63482cd038dSYoshinobu Inoue * in size. 63582cd038dSYoshinobu Inoue * If it would exceed, discard the fragment and return an ICMP error. 63682cd038dSYoshinobu Inoue */ 63782cd038dSYoshinobu Inoue if (q6->ip6q_unfrglen >= 0) { 63882cd038dSYoshinobu Inoue /* The 1st fragment has already arrived. */ 63982cd038dSYoshinobu Inoue if (q6->ip6q_unfrglen + fragoff + frgpartlen > IPV6_MAXPACKET) { 640e5fffe9aSBjoern A. Zeeb if (only_frag) { 641e5fffe9aSBjoern A. Zeeb TAILQ_REMOVE(head, q6, ip6q_tq); 642e5fffe9aSBjoern A. Zeeb V_ip6qb[bucket].count--; 643e5fffe9aSBjoern A. Zeeb atomic_subtract_int(&V_frag6_nfragpackets, 1); 644e5fffe9aSBjoern A. Zeeb #ifdef MAC 645e5fffe9aSBjoern A. Zeeb mac_ip6q_destroy(q6); 646e5fffe9aSBjoern A. Zeeb #endif 647e5fffe9aSBjoern A. Zeeb free(q6, M_FRAG6); 648e5fffe9aSBjoern A. Zeeb } 649e5fffe9aSBjoern A. Zeeb IP6QB_UNLOCK(bucket); 65082cd038dSYoshinobu Inoue icmp6_error(m, ICMP6_PARAM_PROB, ICMP6_PARAMPROB_HEADER, 651686cdd19SJun-ichiro itojun Hagino offset - sizeof(struct ip6_frag) + 652686cdd19SJun-ichiro itojun Hagino offsetof(struct ip6_frag, ip6f_offlg)); 653a8fe77d8SBjoern A. Zeeb *mp = NULL; 65482cd038dSYoshinobu Inoue return (IPPROTO_DONE); 65582cd038dSYoshinobu Inoue } 65606cd0a3fSHajimu UMEMOTO } else if (fragoff + frgpartlen > IPV6_MAXPACKET) { 657e5fffe9aSBjoern A. Zeeb if (only_frag) { 658e5fffe9aSBjoern A. Zeeb TAILQ_REMOVE(head, q6, ip6q_tq); 659e5fffe9aSBjoern A. Zeeb V_ip6qb[bucket].count--; 660e5fffe9aSBjoern A. Zeeb atomic_subtract_int(&V_frag6_nfragpackets, 1); 661e5fffe9aSBjoern A. Zeeb #ifdef MAC 662e5fffe9aSBjoern A. Zeeb mac_ip6q_destroy(q6); 663e5fffe9aSBjoern A. Zeeb #endif 664e5fffe9aSBjoern A. Zeeb free(q6, M_FRAG6); 665e5fffe9aSBjoern A. Zeeb } 666e5fffe9aSBjoern A. Zeeb IP6QB_UNLOCK(bucket); 66782cd038dSYoshinobu Inoue icmp6_error(m, ICMP6_PARAM_PROB, ICMP6_PARAMPROB_HEADER, 668686cdd19SJun-ichiro itojun Hagino offset - sizeof(struct ip6_frag) + 669686cdd19SJun-ichiro itojun Hagino offsetof(struct ip6_frag, ip6f_offlg)); 670a8fe77d8SBjoern A. Zeeb *mp = NULL; 67182cd038dSYoshinobu Inoue return (IPPROTO_DONE); 67282cd038dSYoshinobu Inoue } 673f1664f32SBjoern A. Zeeb 67482cd038dSYoshinobu Inoue /* 67523d374aaSBjoern A. Zeeb * If it is the first fragment, do the above check for each 67682cd038dSYoshinobu Inoue * fragment already stored in the reassembly queue. 67782cd038dSYoshinobu Inoue */ 678dda02192SBjoern A. Zeeb if (fragoff == 0 && !only_frag) { 67921f08a07SBjoern A. Zeeb TAILQ_FOREACH_SAFE(af6, &q6->ip6q_frags, ip6af_tq, af6tmp) { 680dda02192SBjoern A. Zeeb if (q6->ip6q_unfrglen + af6->ip6af_off + 681dda02192SBjoern A. Zeeb af6->ip6af_frglen > IPV6_MAXPACKET) { 68282cd038dSYoshinobu Inoue struct ip6_hdr *ip6err; 6835778b399SBjoern A. Zeeb struct mbuf *merr; 6845778b399SBjoern A. Zeeb int erroff; 6855778b399SBjoern A. Zeeb 686da89a0feSBjoern A. Zeeb merr = af6->ip6af_m; 6875778b399SBjoern A. Zeeb erroff = af6->ip6af_offset; 68882cd038dSYoshinobu Inoue 68923d374aaSBjoern A. Zeeb /* Dequeue the fragment. */ 69021f08a07SBjoern A. Zeeb TAILQ_REMOVE(&q6->ip6q_frags, af6, ip6af_tq); 691dda02192SBjoern A. Zeeb q6->ip6q_nfrag--; 692dda02192SBjoern A. Zeeb atomic_subtract_int(&frag6_nfrags, 1); 693487a161cSBjoern A. Zeeb free(af6, M_FRAG6); 69482cd038dSYoshinobu Inoue 695a55383e7SHans Petter Selasky /* Set a valid receive interface pointer. */ 696a55383e7SHans Petter Selasky merr->m_pkthdr.rcvif = srcifp; 697a55383e7SHans Petter Selasky 69823d374aaSBjoern A. Zeeb /* Adjust pointer. */ 69982cd038dSYoshinobu Inoue ip6err = mtod(merr, struct ip6_hdr *); 70082cd038dSYoshinobu Inoue 70182cd038dSYoshinobu Inoue /* 70282cd038dSYoshinobu Inoue * Restore source and destination addresses 70382cd038dSYoshinobu Inoue * in the erroneous IPv6 header. 70482cd038dSYoshinobu Inoue */ 70582cd038dSYoshinobu Inoue ip6err->ip6_src = q6->ip6q_src; 70682cd038dSYoshinobu Inoue ip6err->ip6_dst = q6->ip6q_dst; 70782cd038dSYoshinobu Inoue 70882cd038dSYoshinobu Inoue icmp6_error(merr, ICMP6_PARAM_PROB, 70982cd038dSYoshinobu Inoue ICMP6_PARAMPROB_HEADER, 710686cdd19SJun-ichiro itojun Hagino erroff - sizeof(struct ip6_frag) + 711686cdd19SJun-ichiro itojun Hagino offsetof(struct ip6_frag, ip6f_offlg)); 71282cd038dSYoshinobu Inoue } 71382cd038dSYoshinobu Inoue } 71482cd038dSYoshinobu Inoue } 71582cd038dSYoshinobu Inoue 71623d374aaSBjoern A. Zeeb /* Allocate an IPv6 fragement queue entry for this fragmented part. */ 717f12a9a4cSMark Johnston ip6af = malloc(sizeof(struct ip6asfrag), M_FRAG6, M_NOWAIT | M_ZERO); 718686cdd19SJun-ichiro itojun Hagino if (ip6af == NULL) 719686cdd19SJun-ichiro itojun Hagino goto dropfrag; 72021f08a07SBjoern A. Zeeb ip6af->ip6af_mff = (ip6f->ip6f_offlg & IP6F_MORE_FRAG) ? true : false; 72182cd038dSYoshinobu Inoue ip6af->ip6af_off = fragoff; 72282cd038dSYoshinobu Inoue ip6af->ip6af_frglen = frgpartlen; 72382cd038dSYoshinobu Inoue ip6af->ip6af_offset = offset; 724da89a0feSBjoern A. Zeeb ip6af->ip6af_m = m; 72582cd038dSYoshinobu Inoue 7265778b399SBjoern A. Zeeb if (only_frag) { 72721f08a07SBjoern A. Zeeb /* 72821f08a07SBjoern A. Zeeb * Do a manual insert rather than a hard-to-understand cast 72921f08a07SBjoern A. Zeeb * to a different type relying on data structure order to work. 73021f08a07SBjoern A. Zeeb */ 73121f08a07SBjoern A. Zeeb TAILQ_INSERT_HEAD(&q6->ip6q_frags, ip6af, ip6af_tq); 73221f08a07SBjoern A. Zeeb goto postinsert; 73382cd038dSYoshinobu Inoue } 73482cd038dSYoshinobu Inoue 73523d374aaSBjoern A. Zeeb /* Do duplicate, condition, and boundry checks. */ 73682cd038dSYoshinobu Inoue /* 73759dfcba4SHajimu UMEMOTO * Handle ECN by comparing this segment with the first one; 73859dfcba4SHajimu UMEMOTO * if CE is set, do not lose CE. 73923d374aaSBjoern A. Zeeb * Drop if CE and not-ECT are mixed for the same packet. 74059dfcba4SHajimu UMEMOTO */ 741bb4a7d94SKristof Provost ecn = IPV6_ECN(ip6); 7425e9510e3SJINMEI Tatuya ecn0 = q6->ip6q_ecn; 74359dfcba4SHajimu UMEMOTO if (ecn == IPTOS_ECN_CE) { 74459dfcba4SHajimu UMEMOTO if (ecn0 == IPTOS_ECN_NOTECT) { 745487a161cSBjoern A. Zeeb free(ip6af, M_FRAG6); 74659dfcba4SHajimu UMEMOTO goto dropfrag; 74759dfcba4SHajimu UMEMOTO } 74859dfcba4SHajimu UMEMOTO if (ecn0 != IPTOS_ECN_CE) 7495e9510e3SJINMEI Tatuya q6->ip6q_ecn = IPTOS_ECN_CE; 75059dfcba4SHajimu UMEMOTO } 75159dfcba4SHajimu UMEMOTO if (ecn == IPTOS_ECN_NOTECT && ecn0 != IPTOS_ECN_NOTECT) { 752487a161cSBjoern A. Zeeb free(ip6af, M_FRAG6); 75359dfcba4SHajimu UMEMOTO goto dropfrag; 75459dfcba4SHajimu UMEMOTO } 75559dfcba4SHajimu UMEMOTO 75623d374aaSBjoern A. Zeeb /* Find a fragmented part which begins after this one does. */ 75721f08a07SBjoern A. Zeeb TAILQ_FOREACH(af6, &q6->ip6q_frags, ip6af_tq) 75882cd038dSYoshinobu Inoue if (af6->ip6af_off > ip6af->ip6af_off) 75982cd038dSYoshinobu Inoue break; 76082cd038dSYoshinobu Inoue 76182cd038dSYoshinobu Inoue /* 76282cd038dSYoshinobu Inoue * If the incoming framgent overlaps some existing fragments in 76323d374aaSBjoern A. Zeeb * the reassembly queue, drop both the new fragment and the 76423d374aaSBjoern A. Zeeb * entire reassembly queue. However, if the new fragment 76523d374aaSBjoern A. Zeeb * is an exact duplicate of an existing fragment, only silently 76623d374aaSBjoern A. Zeeb * drop the existing fragment and leave the fragmentation queue 76723d374aaSBjoern A. Zeeb * unchanged, as allowed by the RFC. (RFC 8200, 4.5) 76882cd038dSYoshinobu Inoue */ 76921f08a07SBjoern A. Zeeb if (af6 != NULL) 77021f08a07SBjoern A. Zeeb af6tmp = TAILQ_PREV(af6, ip6fraghead, ip6af_tq); 77121f08a07SBjoern A. Zeeb else 77221f08a07SBjoern A. Zeeb af6tmp = TAILQ_LAST(&q6->ip6q_frags, ip6fraghead); 77321f08a07SBjoern A. Zeeb if (af6tmp != NULL) { 77421f08a07SBjoern A. Zeeb if (af6tmp->ip6af_off + af6tmp->ip6af_frglen - 7755778b399SBjoern A. Zeeb ip6af->ip6af_off > 0) { 776cd188da2SBjoern A. Zeeb if (af6tmp->ip6af_off != ip6af->ip6af_off || 777cd188da2SBjoern A. Zeeb af6tmp->ip6af_frglen != ip6af->ip6af_frglen) 778cd188da2SBjoern A. Zeeb frag6_freef(q6, bucket); 779487a161cSBjoern A. Zeeb free(ip6af, M_FRAG6); 78082cd038dSYoshinobu Inoue goto dropfrag; 78182cd038dSYoshinobu Inoue } 78282cd038dSYoshinobu Inoue } 78321f08a07SBjoern A. Zeeb if (af6 != NULL) { 7845778b399SBjoern A. Zeeb if (ip6af->ip6af_off + ip6af->ip6af_frglen - 7855778b399SBjoern A. Zeeb af6->ip6af_off > 0) { 786cd188da2SBjoern A. Zeeb if (af6->ip6af_off != ip6af->ip6af_off || 787cd188da2SBjoern A. Zeeb af6->ip6af_frglen != ip6af->ip6af_frglen) 788cd188da2SBjoern A. Zeeb frag6_freef(q6, bucket); 789487a161cSBjoern A. Zeeb free(ip6af, M_FRAG6); 79082cd038dSYoshinobu Inoue goto dropfrag; 79182cd038dSYoshinobu Inoue } 79282cd038dSYoshinobu Inoue } 79382cd038dSYoshinobu Inoue 7944b908c8bSRobert Watson #ifdef MAC 7954b908c8bSRobert Watson mac_ip6q_update(m, q6); 7964b908c8bSRobert Watson #endif 79782cd038dSYoshinobu Inoue 79882cd038dSYoshinobu Inoue /* 79923d374aaSBjoern A. Zeeb * Stick new segment in its place; check for complete reassembly. 80023d374aaSBjoern A. Zeeb * If not complete, check fragment limit. Move to front of packet 80123d374aaSBjoern A. Zeeb * queue, as we are the most recently active fragmented packet. 80282cd038dSYoshinobu Inoue */ 80321f08a07SBjoern A. Zeeb if (af6 != NULL) 80421f08a07SBjoern A. Zeeb TAILQ_INSERT_BEFORE(af6, ip6af, ip6af_tq); 80521f08a07SBjoern A. Zeeb else 80621f08a07SBjoern A. Zeeb TAILQ_INSERT_TAIL(&q6->ip6q_frags, ip6af, ip6af_tq); 80721f08a07SBjoern A. Zeeb postinsert: 8082adfd64fSJonathan T. Looney atomic_add_int(&frag6_nfrags, 1); 8099888c401SHajimu UMEMOTO q6->ip6q_nfrag++; 8103c7165b3SBjoern A. Zeeb 8115778b399SBjoern A. Zeeb plen = 0; 81221f08a07SBjoern A. Zeeb TAILQ_FOREACH(af6, &q6->ip6q_frags, ip6af_tq) { 8135778b399SBjoern A. Zeeb if (af6->ip6af_off != plen) { 81403c99d76SJonathan T. Looney if (q6->ip6q_nfrag > V_ip6_maxfragsperpacket) { 815198fdaedSTom Jones IP6STAT_ADD(ip6s_fragdropped, q6->ip6q_nfrag); 8169cb1a47aSBjoern A. Zeeb frag6_freef(q6, bucket); 81703c99d76SJonathan T. Looney } 8189cb1a47aSBjoern A. Zeeb IP6QB_UNLOCK(bucket); 819a8fe77d8SBjoern A. Zeeb *mp = NULL; 8205778b399SBjoern A. Zeeb return (IPPROTO_DONE); 82182cd038dSYoshinobu Inoue } 8225778b399SBjoern A. Zeeb plen += af6->ip6af_frglen; 82382cd038dSYoshinobu Inoue } 82421f08a07SBjoern A. Zeeb af6 = TAILQ_LAST(&q6->ip6q_frags, ip6fraghead); 82521f08a07SBjoern A. Zeeb if (af6->ip6af_mff) { 82603c99d76SJonathan T. Looney if (q6->ip6q_nfrag > V_ip6_maxfragsperpacket) { 827198fdaedSTom Jones IP6STAT_ADD(ip6s_fragdropped, q6->ip6q_nfrag); 8289cb1a47aSBjoern A. Zeeb frag6_freef(q6, bucket); 82903c99d76SJonathan T. Looney } 8309cb1a47aSBjoern A. Zeeb IP6QB_UNLOCK(bucket); 831a8fe77d8SBjoern A. Zeeb *mp = NULL; 8325778b399SBjoern A. Zeeb return (IPPROTO_DONE); 83382cd038dSYoshinobu Inoue } 83482cd038dSYoshinobu Inoue 83523d374aaSBjoern A. Zeeb /* Reassembly is complete; concatenate fragments. */ 83621f08a07SBjoern A. Zeeb ip6af = TAILQ_FIRST(&q6->ip6q_frags); 837da89a0feSBjoern A. Zeeb t = m = ip6af->ip6af_m; 83821f08a07SBjoern A. Zeeb TAILQ_REMOVE(&q6->ip6q_frags, ip6af, ip6af_tq); 83921f08a07SBjoern A. Zeeb while ((af6 = TAILQ_FIRST(&q6->ip6q_frags)) != NULL) { 8409907aba3SAndrey V. Elsukov m->m_pkthdr.csum_flags &= 841da89a0feSBjoern A. Zeeb af6->ip6af_m->m_pkthdr.csum_flags; 8429907aba3SAndrey V. Elsukov m->m_pkthdr.csum_data += 843da89a0feSBjoern A. Zeeb af6->ip6af_m->m_pkthdr.csum_data; 8449907aba3SAndrey V. Elsukov 84521f08a07SBjoern A. Zeeb TAILQ_REMOVE(&q6->ip6q_frags, af6, ip6af_tq); 846efdfee93SBjoern A. Zeeb t = m_last(t); 847da89a0feSBjoern A. Zeeb m_adj(af6->ip6af_m, af6->ip6af_offset); 848da89a0feSBjoern A. Zeeb m_demote_pkthdr(af6->ip6af_m); 849da89a0feSBjoern A. Zeeb m_cat(t, af6->ip6af_m); 850487a161cSBjoern A. Zeeb free(af6, M_FRAG6); 85182cd038dSYoshinobu Inoue } 85282cd038dSYoshinobu Inoue 8539907aba3SAndrey V. Elsukov while (m->m_pkthdr.csum_data & 0xffff0000) 8549907aba3SAndrey V. Elsukov m->m_pkthdr.csum_data = (m->m_pkthdr.csum_data & 0xffff) + 8559907aba3SAndrey V. Elsukov (m->m_pkthdr.csum_data >> 16); 8569907aba3SAndrey V. Elsukov 85723d374aaSBjoern A. Zeeb /* Adjust offset to point where the original next header starts. */ 85882cd038dSYoshinobu Inoue offset = ip6af->ip6af_offset - sizeof(struct ip6_frag); 859487a161cSBjoern A. Zeeb free(ip6af, M_FRAG6); 860ff3d1a3fSJonathan T. Looney if ((u_int)plen + (u_int)offset - sizeof(struct ip6_hdr) > 861ff3d1a3fSJonathan T. Looney IPV6_MAXPACKET) { 862ff3d1a3fSJonathan T. Looney frag6_freef(q6, bucket); 863ff3d1a3fSJonathan T. Looney goto dropfrag; 864ff3d1a3fSJonathan T. Looney } 865686cdd19SJun-ichiro itojun Hagino ip6 = mtod(m, struct ip6_hdr *); 8665778b399SBjoern A. Zeeb ip6->ip6_plen = htons((u_short)plen + offset - sizeof(struct ip6_hdr)); 8675e9510e3SJINMEI Tatuya if (q6->ip6q_ecn == IPTOS_ECN_CE) 8685e9510e3SJINMEI Tatuya ip6->ip6_flow |= htonl(IPTOS_ECN_CE << 20); 86982cd038dSYoshinobu Inoue nxt = q6->ip6q_nxt; 87082cd038dSYoshinobu Inoue 87121f08a07SBjoern A. Zeeb TAILQ_REMOVE(head, q6, ip6q_tq); 87221f08a07SBjoern A. Zeeb V_ip6qb[bucket].count--; 8732adfd64fSJonathan T. Looney atomic_subtract_int(&frag6_nfrags, q6->ip6q_nfrag); 87421f08a07SBjoern A. Zeeb 875a61b5cfbSBjoern A. Zeeb ip6_deletefraghdr(m, offset, M_NOWAIT); 87682cd038dSYoshinobu Inoue 87723d374aaSBjoern A. Zeeb /* Set nxt(-hdr field value) to the original value. */ 87868e0e5a6SAndrey V. Elsukov m_copyback(m, ip6_get_prevhdr(m, offset), sizeof(uint8_t), 87968e0e5a6SAndrey V. Elsukov (caddr_t)&nxt); 88082cd038dSYoshinobu Inoue 8814b908c8bSRobert Watson #ifdef MAC 8824b908c8bSRobert Watson mac_ip6q_reassemble(q6, m); 8834b908c8bSRobert Watson mac_ip6q_destroy(q6); 8844b908c8bSRobert Watson #endif 885487a161cSBjoern A. Zeeb free(q6, M_FRAG6); 88680d7a853SJonathan T. Looney atomic_subtract_int(&V_frag6_nfragpackets, 1); 88782cd038dSYoshinobu Inoue 88882cd038dSYoshinobu Inoue if (m->m_flags & M_PKTHDR) { /* Isn't it always true? */ 8895778b399SBjoern A. Zeeb 8905778b399SBjoern A. Zeeb plen = 0; 89182cd038dSYoshinobu Inoue for (t = m; t; t = t->m_next) 89282cd038dSYoshinobu Inoue plen += t->m_len; 89382cd038dSYoshinobu Inoue m->m_pkthdr.len = plen; 894a55383e7SHans Petter Selasky /* Set a valid receive interface pointer. */ 895a55383e7SHans Petter Selasky m->m_pkthdr.rcvif = srcifp; 89682cd038dSYoshinobu Inoue } 89782cd038dSYoshinobu Inoue 898aaa46574SAdrian Chadd #ifdef RSS 899aaa46574SAdrian Chadd mtag = m_tag_alloc(MTAG_ABI_IPV6, IPV6_TAG_DIRECT, sizeof(*ip6dc), 900aaa46574SAdrian Chadd M_NOWAIT); 901aaa46574SAdrian Chadd if (mtag == NULL) 902aaa46574SAdrian Chadd goto dropfrag; 903aaa46574SAdrian Chadd 904aaa46574SAdrian Chadd ip6dc = (struct ip6_direct_ctx *)(mtag + 1); 905aaa46574SAdrian Chadd ip6dc->ip6dc_nxt = nxt; 906aaa46574SAdrian Chadd ip6dc->ip6dc_off = offset; 907aaa46574SAdrian Chadd 908aaa46574SAdrian Chadd m_tag_prepend(m, mtag); 909aaa46574SAdrian Chadd #endif 910aaa46574SAdrian Chadd 9119cb1a47aSBjoern A. Zeeb IP6QB_UNLOCK(bucket); 9129cb8d207SAndrey V. Elsukov IP6STAT_INC(ip6s_reassembled); 91382cd038dSYoshinobu Inoue in6_ifstat_inc(dstifp, ifs6_reass_ok); 91482cd038dSYoshinobu Inoue 915aaa46574SAdrian Chadd #ifdef RSS 91623d374aaSBjoern A. Zeeb /* Queue/dispatch for reprocessing. */ 917aaa46574SAdrian Chadd netisr_dispatch(NETISR_IPV6_DIRECT, m); 918a8fe77d8SBjoern A. Zeeb *mp = NULL; 9195778b399SBjoern A. Zeeb return (IPPROTO_DONE); 920aaa46574SAdrian Chadd #endif 921aaa46574SAdrian Chadd 92223d374aaSBjoern A. Zeeb /* Tell launch routine the next header. */ 92382cd038dSYoshinobu Inoue *mp = m; 92482cd038dSYoshinobu Inoue *offp = offset; 92582cd038dSYoshinobu Inoue 9265778b399SBjoern A. Zeeb return (nxt); 92782cd038dSYoshinobu Inoue 92882cd038dSYoshinobu Inoue dropfrag: 9299cb1a47aSBjoern A. Zeeb IP6QB_UNLOCK(bucket); 9307715d794SBjoern A. Zeeb dropfrag2: 93182cd038dSYoshinobu Inoue in6_ifstat_inc(dstifp, ifs6_reass_fail); 9329cb8d207SAndrey V. Elsukov IP6STAT_INC(ip6s_fragdropped); 93382cd038dSYoshinobu Inoue m_freem(m); 934a8fe77d8SBjoern A. Zeeb *mp = NULL; 9355778b399SBjoern A. Zeeb return (IPPROTO_DONE); 93682cd038dSYoshinobu Inoue } 93782cd038dSYoshinobu Inoue 93882cd038dSYoshinobu Inoue /* 93933841545SHajimu UMEMOTO * IPv6 reassembling timer processing; 94023d374aaSBjoern A. Zeeb * if a timer expires on a reassembly queue, discard it. 94182cd038dSYoshinobu Inoue */ 942a0d7d247SGleb Smirnoff static struct callout frag6_callout; 943a0d7d247SGleb Smirnoff static void 944a0d7d247SGleb Smirnoff frag6_slowtimo(void *arg __unused) 94582cd038dSYoshinobu Inoue { 9468b615593SMarko Zec VNET_ITERATOR_DECL(vnet_iter); 94721f08a07SBjoern A. Zeeb struct ip6qhead *head; 94821f08a07SBjoern A. Zeeb struct ip6q *q6, *q6tmp; 9499cb1a47aSBjoern A. Zeeb uint32_t bucket; 95082cd038dSYoshinobu Inoue 9518afe9481SMateusz Guzik if (atomic_load_int(&frag6_nfrags) == 0) 952a0d7d247SGleb Smirnoff goto done; 9538afe9481SMateusz Guzik 9545ee847d3SRobert Watson VNET_LIST_RLOCK_NOSLEEP(); 9558b615593SMarko Zec VNET_FOREACH(vnet_iter) { 9568b615593SMarko Zec CURVNET_SET(vnet_iter); 9579cb1a47aSBjoern A. Zeeb for (bucket = 0; bucket < IP6REASS_NHASH; bucket++) { 9588afe9481SMateusz Guzik if (V_ip6qb[bucket].count == 0) 9598afe9481SMateusz Guzik continue; 9609cb1a47aSBjoern A. Zeeb IP6QB_LOCK(bucket); 9619cb1a47aSBjoern A. Zeeb head = IP6QB_HEAD(bucket); 96221f08a07SBjoern A. Zeeb TAILQ_FOREACH_SAFE(q6, head, ip6q_tq, q6tmp) 96321f08a07SBjoern A. Zeeb if (--q6->ip6q_ttl == 0) { 964198fdaedSTom Jones IP6STAT_ADD(ip6s_fragtimeout, 96521f08a07SBjoern A. Zeeb q6->ip6q_nfrag); 96682cd038dSYoshinobu Inoue /* XXX in6_ifstat_inc(ifp, ifs6_reass_fail) */ 96721f08a07SBjoern A. Zeeb frag6_freef(q6, bucket); 96882cd038dSYoshinobu Inoue } 96982cd038dSYoshinobu Inoue /* 97082cd038dSYoshinobu Inoue * If we are over the maximum number of fragments 97182cd038dSYoshinobu Inoue * (due to the limit being lowered), drain off 97282cd038dSYoshinobu Inoue * enough to get down to the new limit. 9731e9f3b73SJonathan T. Looney * Note that we drain all reassembly queues if 9741e9f3b73SJonathan T. Looney * maxfragpackets is 0 (fragmentation is disabled), 97523d374aaSBjoern A. Zeeb * and do not enforce a limit when maxfragpackets 9761e9f3b73SJonathan T. Looney * is negative. 97782cd038dSYoshinobu Inoue */ 9781e9f3b73SJonathan T. Looney while ((V_ip6_maxfragpackets == 0 || 9791e9f3b73SJonathan T. Looney (V_ip6_maxfragpackets > 0 && 9809cb1a47aSBjoern A. Zeeb V_ip6qb[bucket].count > V_ip6_maxfragbucketsize)) && 98121f08a07SBjoern A. Zeeb (q6 = TAILQ_LAST(head, ip6qhead)) != NULL) { 98221f08a07SBjoern A. Zeeb IP6STAT_ADD(ip6s_fragoverflow, q6->ip6q_nfrag); 98382cd038dSYoshinobu Inoue /* XXX in6_ifstat_inc(ifp, ifs6_reass_fail) */ 98421f08a07SBjoern A. Zeeb frag6_freef(q6, bucket); 98580d7a853SJonathan T. Looney } 9869cb1a47aSBjoern A. Zeeb IP6QB_UNLOCK(bucket); 98782cd038dSYoshinobu Inoue } 9881e9f3b73SJonathan T. Looney /* 9891e9f3b73SJonathan T. Looney * If we are still over the maximum number of fragmented 9901e9f3b73SJonathan T. Looney * packets, drain off enough to get down to the new limit. 9911e9f3b73SJonathan T. Looney */ 9929cb1a47aSBjoern A. Zeeb bucket = 0; 9931e9f3b73SJonathan T. Looney while (V_ip6_maxfragpackets >= 0 && 9941e9f3b73SJonathan T. Looney atomic_load_int(&V_frag6_nfragpackets) > 9951e9f3b73SJonathan T. Looney (u_int)V_ip6_maxfragpackets) { 9969cb1a47aSBjoern A. Zeeb IP6QB_LOCK(bucket); 99721f08a07SBjoern A. Zeeb q6 = TAILQ_LAST(IP6QB_HEAD(bucket), ip6qhead); 99821f08a07SBjoern A. Zeeb if (q6 != NULL) { 99921f08a07SBjoern A. Zeeb IP6STAT_ADD(ip6s_fragoverflow, q6->ip6q_nfrag); 10001e9f3b73SJonathan T. Looney /* XXX in6_ifstat_inc(ifp, ifs6_reass_fail) */ 100121f08a07SBjoern A. Zeeb frag6_freef(q6, bucket); 10021e9f3b73SJonathan T. Looney } 10039cb1a47aSBjoern A. Zeeb IP6QB_UNLOCK(bucket); 10049cb1a47aSBjoern A. Zeeb bucket = (bucket + 1) % IP6REASS_NHASH; 10051e9f3b73SJonathan T. Looney } 10068b615593SMarko Zec CURVNET_RESTORE(); 10078b615593SMarko Zec } 10085ee847d3SRobert Watson VNET_LIST_RUNLOCK_NOSLEEP(); 1009a0d7d247SGleb Smirnoff done: 1010e32221a1SAlexander V. Chernikov callout_reset_sbt(&frag6_callout, SBT_1MS * IP6_CALLOUT_INTERVAL_MS, 1011e32221a1SAlexander V. Chernikov SBT_1MS * 10, frag6_slowtimo, NULL, 0); 101282cd038dSYoshinobu Inoue } 101382cd038dSYoshinobu Inoue 1014a0d7d247SGleb Smirnoff static void 1015a0d7d247SGleb Smirnoff frag6_slowtimo_init(void *arg __unused) 1016a0d7d247SGleb Smirnoff { 1017a0d7d247SGleb Smirnoff 1018a0d7d247SGleb Smirnoff callout_init(&frag6_callout, 1); 1019e32221a1SAlexander V. Chernikov callout_reset_sbt(&frag6_callout, SBT_1MS * IP6_CALLOUT_INTERVAL_MS, 1020e32221a1SAlexander V. Chernikov SBT_1MS * 10, frag6_slowtimo, NULL, 0); 1021a0d7d247SGleb Smirnoff } 1022a0d7d247SGleb Smirnoff SYSINIT(frag6, SI_SUB_VNET_DONE, SI_ORDER_ANY, frag6_slowtimo_init, NULL); 1023a0d7d247SGleb Smirnoff 102423d374aaSBjoern A. Zeeb /* 102523d374aaSBjoern A. Zeeb * Eventhandler to adjust limits in case nmbclusters change. 102623d374aaSBjoern A. Zeeb */ 1027c00464a2SBjoern A. Zeeb static void 1028c00464a2SBjoern A. Zeeb frag6_change(void *tag) 1029c00464a2SBjoern A. Zeeb { 1030c00464a2SBjoern A. Zeeb VNET_ITERATOR_DECL(vnet_iter); 1031c00464a2SBjoern A. Zeeb 1032c00464a2SBjoern A. Zeeb ip6_maxfrags = IP6_MAXFRAGS; 1033c00464a2SBjoern A. Zeeb VNET_LIST_RLOCK_NOSLEEP(); 1034c00464a2SBjoern A. Zeeb VNET_FOREACH(vnet_iter) { 1035c00464a2SBjoern A. Zeeb CURVNET_SET(vnet_iter); 1036c00464a2SBjoern A. Zeeb V_ip6_maxfragpackets = IP6_MAXFRAGPACKETS; 1037c00464a2SBjoern A. Zeeb frag6_set_bucketsize(); 1038c00464a2SBjoern A. Zeeb CURVNET_RESTORE(); 1039c00464a2SBjoern A. Zeeb } 1040c00464a2SBjoern A. Zeeb VNET_LIST_RUNLOCK_NOSLEEP(); 1041c00464a2SBjoern A. Zeeb } 1042c00464a2SBjoern A. Zeeb 1043c00464a2SBjoern A. Zeeb /* 1044c00464a2SBjoern A. Zeeb * Initialise reassembly queue and fragment identifier. 1045c00464a2SBjoern A. Zeeb */ 1046c00464a2SBjoern A. Zeeb void 1047c00464a2SBjoern A. Zeeb frag6_init(void) 1048c00464a2SBjoern A. Zeeb { 10499cb1a47aSBjoern A. Zeeb uint32_t bucket; 1050c00464a2SBjoern A. Zeeb 1051c00464a2SBjoern A. Zeeb V_ip6_maxfragpackets = IP6_MAXFRAGPACKETS; 1052c00464a2SBjoern A. Zeeb frag6_set_bucketsize(); 10539cb1a47aSBjoern A. Zeeb for (bucket = 0; bucket < IP6REASS_NHASH; bucket++) { 105421f08a07SBjoern A. Zeeb TAILQ_INIT(IP6QB_HEAD(bucket)); 1055efdfee93SBjoern A. Zeeb mtx_init(&V_ip6qb[bucket].lock, "ip6qb", NULL, MTX_DEF); 10569cb1a47aSBjoern A. Zeeb V_ip6qb[bucket].count = 0; 1057c00464a2SBjoern A. Zeeb } 10589cb1a47aSBjoern A. Zeeb V_ip6qb_hashseed = arc4random(); 1059c00464a2SBjoern A. Zeeb V_ip6_maxfragsperpacket = 64; 106067a10c46SBjoern A. Zeeb #ifdef VIMAGE 106167a10c46SBjoern A. Zeeb V_frag6_on = true; 106267a10c46SBjoern A. Zeeb #endif 1063c00464a2SBjoern A. Zeeb if (!IS_DEFAULT_VNET(curvnet)) 1064c00464a2SBjoern A. Zeeb return; 1065c00464a2SBjoern A. Zeeb 1066c00464a2SBjoern A. Zeeb ip6_maxfrags = IP6_MAXFRAGS; 1067c00464a2SBjoern A. Zeeb EVENTHANDLER_REGISTER(nmbclusters_change, 1068c00464a2SBjoern A. Zeeb frag6_change, NULL, EVENTHANDLER_PRI_ANY); 1069c00464a2SBjoern A. Zeeb } 1070c00464a2SBjoern A. Zeeb 107182cd038dSYoshinobu Inoue /* 107282cd038dSYoshinobu Inoue * Drain off all datagram fragments. 107382cd038dSYoshinobu Inoue */ 107467a10c46SBjoern A. Zeeb static void 107567a10c46SBjoern A. Zeeb frag6_drain_one(void) 107682cd038dSYoshinobu Inoue { 107721f08a07SBjoern A. Zeeb struct ip6q *q6; 10789cb1a47aSBjoern A. Zeeb uint32_t bucket; 10799888c401SHajimu UMEMOTO 10809cb1a47aSBjoern A. Zeeb for (bucket = 0; bucket < IP6REASS_NHASH; bucket++) { 108167a10c46SBjoern A. Zeeb IP6QB_LOCK(bucket); 108221f08a07SBjoern A. Zeeb while ((q6 = TAILQ_FIRST(IP6QB_HEAD(bucket))) != NULL) { 10839cb8d207SAndrey V. Elsukov IP6STAT_INC(ip6s_fragdropped); 108482cd038dSYoshinobu Inoue /* XXX in6_ifstat_inc(ifp, ifs6_reass_fail) */ 108521f08a07SBjoern A. Zeeb frag6_freef(q6, bucket); 108680d7a853SJonathan T. Looney } 10879cb1a47aSBjoern A. Zeeb IP6QB_UNLOCK(bucket); 108882cd038dSYoshinobu Inoue } 108967a10c46SBjoern A. Zeeb } 109067a10c46SBjoern A. Zeeb 109167a10c46SBjoern A. Zeeb void 109267a10c46SBjoern A. Zeeb frag6_drain(void) 109367a10c46SBjoern A. Zeeb { 109467a10c46SBjoern A. Zeeb VNET_ITERATOR_DECL(vnet_iter); 109567a10c46SBjoern A. Zeeb 109667a10c46SBjoern A. Zeeb VNET_LIST_RLOCK_NOSLEEP(); 109767a10c46SBjoern A. Zeeb VNET_FOREACH(vnet_iter) { 109867a10c46SBjoern A. Zeeb CURVNET_SET(vnet_iter); 109967a10c46SBjoern A. Zeeb frag6_drain_one(); 11008b615593SMarko Zec CURVNET_RESTORE(); 11018b615593SMarko Zec } 11025ee847d3SRobert Watson VNET_LIST_RUNLOCK_NOSLEEP(); 110382cd038dSYoshinobu Inoue } 1104e5ee7060SGleb Smirnoff 110567a10c46SBjoern A. Zeeb #ifdef VIMAGE 110667a10c46SBjoern A. Zeeb /* 110767a10c46SBjoern A. Zeeb * Clear up IPv6 reassembly structures. 110867a10c46SBjoern A. Zeeb */ 110967a10c46SBjoern A. Zeeb void 111067a10c46SBjoern A. Zeeb frag6_destroy(void) 111167a10c46SBjoern A. Zeeb { 111267a10c46SBjoern A. Zeeb uint32_t bucket; 111367a10c46SBjoern A. Zeeb 111467a10c46SBjoern A. Zeeb frag6_drain_one(); 111567a10c46SBjoern A. Zeeb V_frag6_on = false; 111667a10c46SBjoern A. Zeeb for (bucket = 0; bucket < IP6REASS_NHASH; bucket++) { 111767a10c46SBjoern A. Zeeb KASSERT(V_ip6qb[bucket].count == 0, 111867a10c46SBjoern A. Zeeb ("%s: V_ip6qb[%d] (%p) count not 0 (%d)", __func__, 111967a10c46SBjoern A. Zeeb bucket, &V_ip6qb[bucket], V_ip6qb[bucket].count)); 112067a10c46SBjoern A. Zeeb mtx_destroy(&V_ip6qb[bucket].lock); 112167a10c46SBjoern A. Zeeb } 112267a10c46SBjoern A. Zeeb } 112367a10c46SBjoern A. Zeeb #endif 1124