1caf43b02SWarner Losh /*- 251369649SPedro F. Giffuni * SPDX-License-Identifier: BSD-3-Clause 351369649SPedro F. Giffuni * 482cd038dSYoshinobu Inoue * Copyright (C) 1995, 1996, 1997, and 1998 WIDE Project. 582cd038dSYoshinobu Inoue * All rights reserved. 682cd038dSYoshinobu Inoue * 782cd038dSYoshinobu Inoue * Redistribution and use in source and binary forms, with or without 882cd038dSYoshinobu Inoue * modification, are permitted provided that the following conditions 982cd038dSYoshinobu Inoue * are met: 1082cd038dSYoshinobu Inoue * 1. Redistributions of source code must retain the above copyright 1182cd038dSYoshinobu Inoue * notice, this list of conditions and the following disclaimer. 1282cd038dSYoshinobu Inoue * 2. Redistributions in binary form must reproduce the above copyright 1382cd038dSYoshinobu Inoue * notice, this list of conditions and the following disclaimer in the 1482cd038dSYoshinobu Inoue * documentation and/or other materials provided with the distribution. 1582cd038dSYoshinobu Inoue * 3. Neither the name of the project nor the names of its contributors 1682cd038dSYoshinobu Inoue * may be used to endorse or promote products derived from this software 1782cd038dSYoshinobu Inoue * without specific prior written permission. 1882cd038dSYoshinobu Inoue * 1982cd038dSYoshinobu Inoue * THIS SOFTWARE IS PROVIDED BY THE PROJECT AND CONTRIBUTORS ``AS IS'' AND 2082cd038dSYoshinobu Inoue * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE 2182cd038dSYoshinobu Inoue * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE 2282cd038dSYoshinobu Inoue * ARE DISCLAIMED. IN NO EVENT SHALL THE PROJECT OR CONTRIBUTORS BE LIABLE 2382cd038dSYoshinobu Inoue * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL 2482cd038dSYoshinobu Inoue * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS 2582cd038dSYoshinobu Inoue * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) 2682cd038dSYoshinobu Inoue * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT 2782cd038dSYoshinobu Inoue * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY 2882cd038dSYoshinobu Inoue * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF 2982cd038dSYoshinobu Inoue * SUCH DAMAGE. 30b48287a3SDavid E. O'Brien * 31b48287a3SDavid E. O'Brien * $KAME: frag6.c,v 1.33 2002/01/07 11:34:48 kjc Exp $ 3282cd038dSYoshinobu Inoue */ 3382cd038dSYoshinobu Inoue 34b48287a3SDavid E. O'Brien #include <sys/cdefs.h> 35b48287a3SDavid E. O'Brien __FBSDID("$FreeBSD$"); 36b48287a3SDavid E. O'Brien 37aaa46574SAdrian Chadd #include "opt_rss.h" 38aaa46574SAdrian Chadd 3982cd038dSYoshinobu Inoue #include <sys/param.h> 40f349c821SBjoern A. Zeeb #include <sys/systm.h> 411a3044faSBjoern A. Zeeb #include <sys/domain.h> 421a3044faSBjoern A. Zeeb #include <sys/eventhandler.h> 4380d7a853SJonathan T. Looney #include <sys/hash.h> 441a3044faSBjoern A. Zeeb #include <sys/kernel.h> 4582cd038dSYoshinobu Inoue #include <sys/malloc.h> 4682cd038dSYoshinobu Inoue #include <sys/mbuf.h> 4782cd038dSYoshinobu Inoue #include <sys/protosw.h> 4882cd038dSYoshinobu Inoue #include <sys/socket.h> 49757cb678SBjoern A. Zeeb #include <sys/sysctl.h> 5082cd038dSYoshinobu Inoue #include <sys/syslog.h> 5182cd038dSYoshinobu Inoue 5282cd038dSYoshinobu Inoue #include <net/if.h> 5376039bc8SGleb Smirnoff #include <net/if_var.h> 54aaa46574SAdrian Chadd #include <net/netisr.h> 5582cd038dSYoshinobu Inoue #include <net/route.h> 56eddfbb76SRobert Watson #include <net/vnet.h> 5782cd038dSYoshinobu Inoue 5882cd038dSYoshinobu Inoue #include <netinet/in.h> 5982cd038dSYoshinobu Inoue #include <netinet/in_var.h> 60686cdd19SJun-ichiro itojun Hagino #include <netinet/ip6.h> 6182cd038dSYoshinobu Inoue #include <netinet6/ip6_var.h> 62686cdd19SJun-ichiro itojun Hagino #include <netinet/icmp6.h> 6359dfcba4SHajimu UMEMOTO #include <netinet/in_systm.h> /* for ECN definitions */ 6459dfcba4SHajimu UMEMOTO #include <netinet/ip.h> /* for ECN definitions */ 6582cd038dSYoshinobu Inoue 661a3044faSBjoern A. Zeeb #ifdef MAC 674b908c8bSRobert Watson #include <security/mac/mac_framework.h> 681a3044faSBjoern A. Zeeb #endif 694b908c8bSRobert Watson 7031e8f7e5SHajimu UMEMOTO /* 7180d7a853SJonathan T. Looney * Reassembly headers are stored in hash buckets. 7231e8f7e5SHajimu UMEMOTO */ 732ceeacbeSJonathan T. Looney #define IP6REASS_NHASH_LOG2 10 7480d7a853SJonathan T. Looney #define IP6REASS_NHASH (1 << IP6REASS_NHASH_LOG2) 7580d7a853SJonathan T. Looney #define IP6REASS_HMASK (IP6REASS_NHASH - 1) 7680d7a853SJonathan T. Looney 7780d7a853SJonathan T. Looney static void frag6_enq(struct ip6asfrag *, struct ip6asfrag *, 7880d7a853SJonathan T. Looney uint32_t bucket __unused); 7980d7a853SJonathan T. Looney static void frag6_deq(struct ip6asfrag *, uint32_t bucket __unused); 8080d7a853SJonathan T. Looney static void frag6_insque_head(struct ip6q *, struct ip6q *, 811e9f3b73SJonathan T. Looney uint32_t bucket); 821e9f3b73SJonathan T. Looney static void frag6_remque(struct ip6q *, uint32_t bucket); 836bbdbbb8SHans Petter Selasky static void frag6_freef(struct ip6q *, uint32_t bucket); 8480d7a853SJonathan T. Looney 8580d7a853SJonathan T. Looney struct ip6qbucket { 8680d7a853SJonathan T. Looney struct ip6q ip6q; 8780d7a853SJonathan T. Looney struct mtx lock; 881e9f3b73SJonathan T. Looney int count; 8980d7a853SJonathan T. Looney }; 9080d7a853SJonathan T. Looney 91487a161cSBjoern A. Zeeb static MALLOC_DEFINE(M_FRAG6, "frag6", "IPv6 fragment reassembly header"); 92487a161cSBjoern A. Zeeb 93757cb678SBjoern A. Zeeb /* System wide (global) maximum and count of packets in reassembly queues. */ 94757cb678SBjoern A. Zeeb static int ip6_maxfrags; 95757cb678SBjoern A. Zeeb static volatile u_int frag6_nfrags = 0; 96757cb678SBjoern A. Zeeb 97757cb678SBjoern A. Zeeb /* Maximum and current packets in per-VNET reassembly queue. */ 98757cb678SBjoern A. Zeeb VNET_DEFINE_STATIC(int, ip6_maxfragpackets); 9980d7a853SJonathan T. Looney VNET_DEFINE_STATIC(volatile u_int, frag6_nfragpackets); 100757cb678SBjoern A. Zeeb #define V_ip6_maxfragpackets VNET(ip6_maxfragpackets) 101757cb678SBjoern A. Zeeb #define V_frag6_nfragpackets VNET(frag6_nfragpackets) 102757cb678SBjoern A. Zeeb 103757cb678SBjoern A. Zeeb /* Maximum per-VNET reassembly queues per bucket and fragments per packet. */ 104757cb678SBjoern A. Zeeb VNET_DEFINE_STATIC(int, ip6_maxfragbucketsize); 105757cb678SBjoern A. Zeeb VNET_DEFINE_STATIC(int, ip6_maxfragsperpacket); 106757cb678SBjoern A. Zeeb #define V_ip6_maxfragbucketsize VNET(ip6_maxfragbucketsize) 107757cb678SBjoern A. Zeeb #define V_ip6_maxfragsperpacket VNET(ip6_maxfragsperpacket) 108757cb678SBjoern A. Zeeb 109757cb678SBjoern A. Zeeb /* Per-VNET reassembly queue buckets. */ 11080d7a853SJonathan T. Looney VNET_DEFINE_STATIC(struct ip6qbucket, ip6q[IP6REASS_NHASH]); 11180d7a853SJonathan T. Looney VNET_DEFINE_STATIC(uint32_t, ip6q_hashseed); 1121e77c105SRobert Watson #define V_ip6q VNET(ip6q) 11380d7a853SJonathan T. Looney #define V_ip6q_hashseed VNET(ip6q_hashseed) 11482cd038dSYoshinobu Inoue 11580d7a853SJonathan T. Looney #define IP6Q_LOCK(i) mtx_lock(&V_ip6q[(i)].lock) 11680d7a853SJonathan T. Looney #define IP6Q_TRYLOCK(i) mtx_trylock(&V_ip6q[(i)].lock) 11780d7a853SJonathan T. Looney #define IP6Q_LOCK_ASSERT(i) mtx_assert(&V_ip6q[(i)].lock, MA_OWNED) 11880d7a853SJonathan T. Looney #define IP6Q_UNLOCK(i) mtx_unlock(&V_ip6q[(i)].lock) 11980d7a853SJonathan T. Looney #define IP6Q_HEAD(i) (&V_ip6q[(i)].ip6q) 1209888c401SHajimu UMEMOTO 12182cd038dSYoshinobu Inoue /* 1222ceeacbeSJonathan T. Looney * By default, limit the number of IP6 fragments across all reassembly 1232ceeacbeSJonathan T. Looney * queues to 1/32 of the total number of mbuf clusters. 1242ceeacbeSJonathan T. Looney * 1252ceeacbeSJonathan T. Looney * Limit the total number of reassembly queues per VNET to the 1262ceeacbeSJonathan T. Looney * IP6 fragment limit, but ensure the limit will not allow any bucket 1272ceeacbeSJonathan T. Looney * to grow above 100 items. (The bucket limit is 1282ceeacbeSJonathan T. Looney * IP_MAXFRAGPACKETS / (IPREASS_NHASH / 2), so the 50 is the correct 1292ceeacbeSJonathan T. Looney * multiplier to reach a 100-item limit.) 1302ceeacbeSJonathan T. Looney * The 100-item limit was chosen as brief testing seems to show that 1312ceeacbeSJonathan T. Looney * this produces "reasonable" performance on some subset of systems 1322ceeacbeSJonathan T. Looney * under DoS attack. 1332ceeacbeSJonathan T. Looney */ 1342ceeacbeSJonathan T. Looney #define IP6_MAXFRAGS (nmbclusters / 32) 1352ceeacbeSJonathan T. Looney #define IP6_MAXFRAGPACKETS (imin(IP6_MAXFRAGS, IP6REASS_NHASH * 50)) 1362ceeacbeSJonathan T. Looney 137757cb678SBjoern A. Zeeb 1382ceeacbeSJonathan T. Looney /* 139757cb678SBjoern A. Zeeb * Sysctls and helper function. 14082cd038dSYoshinobu Inoue */ 141757cb678SBjoern A. Zeeb SYSCTL_DECL(_net_inet6_ip6); 142757cb678SBjoern A. Zeeb 143757cb678SBjoern A. Zeeb static void 14409b361c7SBjoern A. Zeeb frag6_set_bucketsize(void) 1451e9f3b73SJonathan T. Looney { 1461e9f3b73SJonathan T. Looney int i; 1471e9f3b73SJonathan T. Looney 1481e9f3b73SJonathan T. Looney if ((i = V_ip6_maxfragpackets) > 0) 1491e9f3b73SJonathan T. Looney V_ip6_maxfragbucketsize = imax(i / (IP6REASS_NHASH / 2), 1); 1501e9f3b73SJonathan T. Looney } 1511e9f3b73SJonathan T. Looney 152757cb678SBjoern A. Zeeb SYSCTL_INT(_net_inet6_ip6, IPV6CTL_MAXFRAGS, maxfrags, 153757cb678SBjoern A. Zeeb CTLFLAG_RW, &ip6_maxfrags, 0, 154757cb678SBjoern A. Zeeb "Maximum allowed number of outstanding IPv6 packet fragments. " 155757cb678SBjoern A. Zeeb "A value of 0 means no fragmented packets will be accepted, while a " 156757cb678SBjoern A. Zeeb "a value of -1 means no limit"); 157757cb678SBjoern A. Zeeb 158757cb678SBjoern A. Zeeb static int 159757cb678SBjoern A. Zeeb sysctl_ip6_maxfragpackets(SYSCTL_HANDLER_ARGS) 160757cb678SBjoern A. Zeeb { 161757cb678SBjoern A. Zeeb int error, val; 162757cb678SBjoern A. Zeeb 163757cb678SBjoern A. Zeeb val = V_ip6_maxfragpackets; 164757cb678SBjoern A. Zeeb error = sysctl_handle_int(oidp, &val, 0, req); 165757cb678SBjoern A. Zeeb if (error != 0 || !req->newptr) 166757cb678SBjoern A. Zeeb return (error); 167757cb678SBjoern A. Zeeb V_ip6_maxfragpackets = val; 168757cb678SBjoern A. Zeeb frag6_set_bucketsize(); 169757cb678SBjoern A. Zeeb return (0); 170757cb678SBjoern A. Zeeb } 171757cb678SBjoern A. Zeeb SYSCTL_PROC(_net_inet6_ip6, IPV6CTL_MAXFRAGPACKETS, maxfragpackets, 172757cb678SBjoern A. Zeeb CTLFLAG_VNET | CTLTYPE_INT | CTLFLAG_RW, NULL, 0, 173757cb678SBjoern A. Zeeb sysctl_ip6_maxfragpackets, "I", 174757cb678SBjoern A. Zeeb "Default maximum number of outstanding fragmented IPv6 packets. " 175757cb678SBjoern A. Zeeb "A value of 0 means no fragmented packets will be accepted, while a " 176757cb678SBjoern A. Zeeb "a value of -1 means no limit"); 177757cb678SBjoern A. Zeeb SYSCTL_INT(_net_inet6_ip6, IPV6CTL_MAXFRAGSPERPACKET, maxfragsperpacket, 178757cb678SBjoern A. Zeeb CTLFLAG_VNET | CTLFLAG_RW, &VNET_NAME(ip6_maxfragsperpacket), 0, 179757cb678SBjoern A. Zeeb "Maximum allowed number of fragments per packet"); 180757cb678SBjoern A. Zeeb SYSCTL_INT(_net_inet6_ip6, IPV6CTL_MAXFRAGBUCKETSIZE, maxfragbucketsize, 181757cb678SBjoern A. Zeeb CTLFLAG_VNET | CTLFLAG_RW, &VNET_NAME(ip6_maxfragbucketsize), 0, 182757cb678SBjoern A. Zeeb "Maximum number of reassembly queues per hash bucket"); 183757cb678SBjoern A. Zeeb 184757cb678SBjoern A. Zeeb 185757cb678SBjoern A. Zeeb /* 186c00464a2SBjoern A. Zeeb * Remove the IPv6 fragmentation header from the mbuf. 187c00464a2SBjoern A. Zeeb */ 188c00464a2SBjoern A. Zeeb int 189c00464a2SBjoern A. Zeeb ip6_deletefraghdr(struct mbuf *m, int offset, int wait) 190c00464a2SBjoern A. Zeeb { 191c00464a2SBjoern A. Zeeb struct ip6_hdr *ip6 = mtod(m, struct ip6_hdr *); 192c00464a2SBjoern A. Zeeb struct mbuf *t; 193c00464a2SBjoern A. Zeeb 194c00464a2SBjoern A. Zeeb /* Delete frag6 header. */ 195c00464a2SBjoern A. Zeeb if (m->m_len >= offset + sizeof(struct ip6_frag)) { 196c00464a2SBjoern A. Zeeb /* This is the only possible case with !PULLDOWN_TEST. */ 197c00464a2SBjoern A. Zeeb bcopy(ip6, (char *)ip6 + sizeof(struct ip6_frag), 198c00464a2SBjoern A. Zeeb offset); 199c00464a2SBjoern A. Zeeb m->m_data += sizeof(struct ip6_frag); 200c00464a2SBjoern A. Zeeb m->m_len -= sizeof(struct ip6_frag); 201c00464a2SBjoern A. Zeeb } else { 202c00464a2SBjoern A. Zeeb /* This comes with no copy if the boundary is on cluster. */ 203c00464a2SBjoern A. Zeeb if ((t = m_split(m, offset, wait)) == NULL) 204c00464a2SBjoern A. Zeeb return (ENOMEM); 205c00464a2SBjoern A. Zeeb m_adj(t, sizeof(struct ip6_frag)); 206c00464a2SBjoern A. Zeeb m_cat(m, t); 207c00464a2SBjoern A. Zeeb } 208c00464a2SBjoern A. Zeeb 209c00464a2SBjoern A. Zeeb m->m_flags |= M_FRAGMENTED; 210c00464a2SBjoern A. Zeeb return (0); 211c00464a2SBjoern A. Zeeb } 212c00464a2SBjoern A. Zeeb 213c00464a2SBjoern A. Zeeb /* 214c00464a2SBjoern A. Zeeb * Free a fragment reassembly header and all 215c00464a2SBjoern A. Zeeb * associated datagrams. 216757cb678SBjoern A. Zeeb */ 2174f590175SPaul Saab static void 218c00464a2SBjoern A. Zeeb frag6_freef(struct ip6q *q6, uint32_t bucket) 2194f590175SPaul Saab { 220c00464a2SBjoern A. Zeeb struct ip6asfrag *af6, *down6; 2214f590175SPaul Saab 222c00464a2SBjoern A. Zeeb IP6Q_LOCK_ASSERT(bucket); 223c00464a2SBjoern A. Zeeb 224c00464a2SBjoern A. Zeeb for (af6 = q6->ip6q_down; af6 != (struct ip6asfrag *)q6; 225c00464a2SBjoern A. Zeeb af6 = down6) { 226c00464a2SBjoern A. Zeeb struct mbuf *m = IP6_REASS_MBUF(af6); 227c00464a2SBjoern A. Zeeb 228c00464a2SBjoern A. Zeeb down6 = af6->ip6af_down; 229c00464a2SBjoern A. Zeeb frag6_deq(af6, bucket); 230c00464a2SBjoern A. Zeeb 231c00464a2SBjoern A. Zeeb /* 232c00464a2SBjoern A. Zeeb * Return ICMP time exceeded error for the 1st fragment. 233c00464a2SBjoern A. Zeeb * Just free other fragments. 234c00464a2SBjoern A. Zeeb */ 235c00464a2SBjoern A. Zeeb if (af6->ip6af_off == 0) { 236c00464a2SBjoern A. Zeeb struct ip6_hdr *ip6; 237c00464a2SBjoern A. Zeeb 238c00464a2SBjoern A. Zeeb /* adjust pointer */ 239c00464a2SBjoern A. Zeeb ip6 = mtod(m, struct ip6_hdr *); 240c00464a2SBjoern A. Zeeb 241c00464a2SBjoern A. Zeeb /* restore source and destination addresses */ 242c00464a2SBjoern A. Zeeb ip6->ip6_src = q6->ip6q_src; 243c00464a2SBjoern A. Zeeb ip6->ip6_dst = q6->ip6q_dst; 244c00464a2SBjoern A. Zeeb 245c00464a2SBjoern A. Zeeb icmp6_error(m, ICMP6_TIME_EXCEEDED, 246c00464a2SBjoern A. Zeeb ICMP6_TIME_EXCEED_REASSEMBLY, 0); 247c00464a2SBjoern A. Zeeb } else 248c00464a2SBjoern A. Zeeb m_freem(m); 249c00464a2SBjoern A. Zeeb free(af6, M_FRAG6); 2502adfd64fSJonathan T. Looney } 251c00464a2SBjoern A. Zeeb frag6_remque(q6, bucket); 252c00464a2SBjoern A. Zeeb atomic_subtract_int(&frag6_nfrags, q6->ip6q_nfrag); 253c00464a2SBjoern A. Zeeb #ifdef MAC 254c00464a2SBjoern A. Zeeb mac_ip6q_destroy(q6); 255c00464a2SBjoern A. Zeeb #endif 256c00464a2SBjoern A. Zeeb free(q6, M_FRAG6); 257c00464a2SBjoern A. Zeeb atomic_subtract_int(&V_frag6_nfragpackets, 1); 25882cd038dSYoshinobu Inoue } 25982cd038dSYoshinobu Inoue 26082cd038dSYoshinobu Inoue /* 261686cdd19SJun-ichiro itojun Hagino * In RFC2460, fragment and reassembly rule do not agree with each other, 262686cdd19SJun-ichiro itojun Hagino * in terms of next header field handling in fragment header. 263686cdd19SJun-ichiro itojun Hagino * While the sender will use the same value for all of the fragmented packets, 264686cdd19SJun-ichiro itojun Hagino * receiver is suggested not to check the consistency. 265686cdd19SJun-ichiro itojun Hagino * 266686cdd19SJun-ichiro itojun Hagino * fragment rule (p20): 267686cdd19SJun-ichiro itojun Hagino * (2) A Fragment header containing: 268686cdd19SJun-ichiro itojun Hagino * The Next Header value that identifies the first header of 269686cdd19SJun-ichiro itojun Hagino * the Fragmentable Part of the original packet. 270686cdd19SJun-ichiro itojun Hagino * -> next header field is same for all fragments 271686cdd19SJun-ichiro itojun Hagino * 272686cdd19SJun-ichiro itojun Hagino * reassembly rule (p21): 273686cdd19SJun-ichiro itojun Hagino * The Next Header field of the last header of the Unfragmentable 274686cdd19SJun-ichiro itojun Hagino * Part is obtained from the Next Header field of the first 275686cdd19SJun-ichiro itojun Hagino * fragment's Fragment header. 276686cdd19SJun-ichiro itojun Hagino * -> should grab it from the first fragment only 277686cdd19SJun-ichiro itojun Hagino * 278686cdd19SJun-ichiro itojun Hagino * The following note also contradicts with fragment rule - no one is going to 279686cdd19SJun-ichiro itojun Hagino * send different fragment with different next header field. 280686cdd19SJun-ichiro itojun Hagino * 281686cdd19SJun-ichiro itojun Hagino * additional note (p22): 282686cdd19SJun-ichiro itojun Hagino * The Next Header values in the Fragment headers of different 283686cdd19SJun-ichiro itojun Hagino * fragments of the same original packet may differ. Only the value 284686cdd19SJun-ichiro itojun Hagino * from the Offset zero fragment packet is used for reassembly. 285686cdd19SJun-ichiro itojun Hagino * -> should grab it from the first fragment only 286686cdd19SJun-ichiro itojun Hagino * 287686cdd19SJun-ichiro itojun Hagino * There is no explicit reason given in the RFC. Historical reason maybe? 288686cdd19SJun-ichiro itojun Hagino */ 289686cdd19SJun-ichiro itojun Hagino /* 29082cd038dSYoshinobu Inoue * Fragment input 29182cd038dSYoshinobu Inoue */ 29282cd038dSYoshinobu Inoue int 2931272577eSXin LI frag6_input(struct mbuf **mp, int *offp, int proto) 29482cd038dSYoshinobu Inoue { 29582cd038dSYoshinobu Inoue struct mbuf *m = *mp, *t; 29682cd038dSYoshinobu Inoue struct ip6_hdr *ip6; 29782cd038dSYoshinobu Inoue struct ip6_frag *ip6f; 29880d7a853SJonathan T. Looney struct ip6q *head, *q6; 299686cdd19SJun-ichiro itojun Hagino struct ip6asfrag *af6, *ip6af, *af6dwn; 3002a5aafceSHajimu UMEMOTO struct in6_ifaddr *ia; 30182cd038dSYoshinobu Inoue int offset = *offp, nxt, i, next; 30282cd038dSYoshinobu Inoue int first_frag = 0; 303686cdd19SJun-ichiro itojun Hagino int fragoff, frgpartlen; /* must be larger than u_int16_t */ 304505e91f5SKristof Provost uint32_t hashkey[(sizeof(struct in6_addr) * 2 + 305505e91f5SKristof Provost sizeof(ip6f->ip6f_ident)) / sizeof(uint32_t)]; 306505e91f5SKristof Provost uint32_t hash, *hashkeyp; 30782cd038dSYoshinobu Inoue struct ifnet *dstifp; 30859dfcba4SHajimu UMEMOTO u_int8_t ecn, ecn0; 309aaa46574SAdrian Chadd #ifdef RSS 310aaa46574SAdrian Chadd struct m_tag *mtag; 311aaa46574SAdrian Chadd struct ip6_direct_ctx *ip6dc; 312aaa46574SAdrian Chadd #endif 313aaa46574SAdrian Chadd 31482cd038dSYoshinobu Inoue ip6 = mtod(m, struct ip6_hdr *); 315686cdd19SJun-ichiro itojun Hagino #ifndef PULLDOWN_TEST 316686cdd19SJun-ichiro itojun Hagino IP6_EXTHDR_CHECK(m, offset, sizeof(struct ip6_frag), IPPROTO_DONE); 31782cd038dSYoshinobu Inoue ip6f = (struct ip6_frag *)((caddr_t)ip6 + offset); 318686cdd19SJun-ichiro itojun Hagino #else 319686cdd19SJun-ichiro itojun Hagino IP6_EXTHDR_GET(ip6f, struct ip6_frag *, m, offset, sizeof(*ip6f)); 320686cdd19SJun-ichiro itojun Hagino if (ip6f == NULL) 32140e39bbbSHajimu UMEMOTO return (IPPROTO_DONE); 322686cdd19SJun-ichiro itojun Hagino #endif 32382cd038dSYoshinobu Inoue 32482cd038dSYoshinobu Inoue dstifp = NULL; 32582cd038dSYoshinobu Inoue /* find the destination interface of the packet. */ 3263e88eb90SAndrey V. Elsukov ia = in6ifa_ifwithaddr(&ip6->ip6_dst, 0 /* XXX */); 3273e88eb90SAndrey V. Elsukov if (ia != NULL) { 3282a5aafceSHajimu UMEMOTO dstifp = ia->ia_ifp; 3298c0fec80SRobert Watson ifa_free(&ia->ia_ifa); 3308c0fec80SRobert Watson } 33182cd038dSYoshinobu Inoue /* jumbo payload can't contain a fragment header */ 33282cd038dSYoshinobu Inoue if (ip6->ip6_plen == 0) { 33382cd038dSYoshinobu Inoue icmp6_error(m, ICMP6_PARAM_PROB, ICMP6_PARAMPROB_HEADER, offset); 33482cd038dSYoshinobu Inoue in6_ifstat_inc(dstifp, ifs6_reass_fail); 33582cd038dSYoshinobu Inoue return IPPROTO_DONE; 33682cd038dSYoshinobu Inoue } 33782cd038dSYoshinobu Inoue 33882cd038dSYoshinobu Inoue /* 33982cd038dSYoshinobu Inoue * check whether fragment packet's fragment length is 34082cd038dSYoshinobu Inoue * multiple of 8 octets. 34182cd038dSYoshinobu Inoue * sizeof(struct ip6_frag) == 8 34282cd038dSYoshinobu Inoue * sizeof(struct ip6_hdr) = 40 34382cd038dSYoshinobu Inoue */ 34482cd038dSYoshinobu Inoue if ((ip6f->ip6f_offlg & IP6F_MORE_FRAG) && 34582cd038dSYoshinobu Inoue (((ntohs(ip6->ip6_plen) - offset) & 0x7) != 0)) { 34606cd0a3fSHajimu UMEMOTO icmp6_error(m, ICMP6_PARAM_PROB, ICMP6_PARAMPROB_HEADER, 347686cdd19SJun-ichiro itojun Hagino offsetof(struct ip6_hdr, ip6_plen)); 34882cd038dSYoshinobu Inoue in6_ifstat_inc(dstifp, ifs6_reass_fail); 34982cd038dSYoshinobu Inoue return IPPROTO_DONE; 35082cd038dSYoshinobu Inoue } 35182cd038dSYoshinobu Inoue 3529cb8d207SAndrey V. Elsukov IP6STAT_INC(ip6s_fragments); 35382cd038dSYoshinobu Inoue in6_ifstat_inc(dstifp, ifs6_reass_reqd); 35482cd038dSYoshinobu Inoue 355686cdd19SJun-ichiro itojun Hagino /* offset now points to data portion */ 35682cd038dSYoshinobu Inoue offset += sizeof(struct ip6_frag); 35782cd038dSYoshinobu Inoue 3584018ea9aSBjoern A. Zeeb /* 3592946a941STom Jones * Handle "atomic" fragments (offset and m bit set to 0) upfront, 3602946a941STom Jones * unrelated to any reassembly (see RFC 6946 and section 4.5 of RFC 3612946a941STom Jones * 8200). Just skip the fragment header. 3624018ea9aSBjoern A. Zeeb */ 3634018ea9aSBjoern A. Zeeb if ((ip6f->ip6f_offlg & ~IP6F_RESERVED_MASK) == 0) { 3642946a941STom Jones IP6STAT_INC(ip6s_atomicfrags); 3654018ea9aSBjoern A. Zeeb in6_ifstat_inc(dstifp, ifs6_reass_ok); 3664018ea9aSBjoern A. Zeeb *offp = offset; 367a4061289SAndrey V. Elsukov m->m_flags |= M_FRAGMENTED; 3684018ea9aSBjoern A. Zeeb return (ip6f->ip6f_nxt); 3694018ea9aSBjoern A. Zeeb } 3704018ea9aSBjoern A. Zeeb 3715f9f192dSJonathan T. Looney /* Get fragment length and discard 0-byte fragments. */ 3725f9f192dSJonathan T. Looney frgpartlen = sizeof(struct ip6_hdr) + ntohs(ip6->ip6_plen) - offset; 3735f9f192dSJonathan T. Looney if (frgpartlen == 0) { 3745f9f192dSJonathan T. Looney icmp6_error(m, ICMP6_PARAM_PROB, ICMP6_PARAMPROB_HEADER, 3755f9f192dSJonathan T. Looney offsetof(struct ip6_hdr, ip6_plen)); 3765f9f192dSJonathan T. Looney in6_ifstat_inc(dstifp, ifs6_reass_fail); 3775f9f192dSJonathan T. Looney IP6STAT_INC(ip6s_fragdropped); 3785f9f192dSJonathan T. Looney return IPPROTO_DONE; 3795f9f192dSJonathan T. Looney } 3805f9f192dSJonathan T. Looney 38180d7a853SJonathan T. Looney hashkeyp = hashkey; 38280d7a853SJonathan T. Looney memcpy(hashkeyp, &ip6->ip6_src, sizeof(struct in6_addr)); 38380d7a853SJonathan T. Looney hashkeyp += sizeof(struct in6_addr) / sizeof(*hashkeyp); 38480d7a853SJonathan T. Looney memcpy(hashkeyp, &ip6->ip6_dst, sizeof(struct in6_addr)); 38580d7a853SJonathan T. Looney hashkeyp += sizeof(struct in6_addr) / sizeof(*hashkeyp); 38680d7a853SJonathan T. Looney *hashkeyp = ip6f->ip6f_ident; 38780d7a853SJonathan T. Looney hash = jenkins_hash32(hashkey, nitems(hashkey), V_ip6q_hashseed); 38880d7a853SJonathan T. Looney hash &= IP6REASS_HMASK; 38980d7a853SJonathan T. Looney head = IP6Q_HEAD(hash); 39080d7a853SJonathan T. Looney IP6Q_LOCK(hash); 3919888c401SHajimu UMEMOTO 3929888c401SHajimu UMEMOTO /* 3939888c401SHajimu UMEMOTO * Enforce upper bound on number of fragments. 3949888c401SHajimu UMEMOTO * If maxfrag is 0, never accept fragments. 3959888c401SHajimu UMEMOTO * If maxfrag is -1, accept all fragments without limitation. 3969888c401SHajimu UMEMOTO */ 3972adfd64fSJonathan T. Looney if (ip6_maxfrags < 0) 3989888c401SHajimu UMEMOTO ; 3992adfd64fSJonathan T. Looney else if (atomic_load_int(&frag6_nfrags) >= (u_int)ip6_maxfrags) 4009888c401SHajimu UMEMOTO goto dropfrag; 40133841545SHajimu UMEMOTO 40280d7a853SJonathan T. Looney for (q6 = head->ip6q_next; q6 != head; q6 = q6->ip6q_next) 40382cd038dSYoshinobu Inoue if (ip6f->ip6f_ident == q6->ip6q_ident && 40482cd038dSYoshinobu Inoue IN6_ARE_ADDR_EQUAL(&ip6->ip6_src, &q6->ip6q_src) && 4054b908c8bSRobert Watson IN6_ARE_ADDR_EQUAL(&ip6->ip6_dst, &q6->ip6q_dst) 4064b908c8bSRobert Watson #ifdef MAC 4074b908c8bSRobert Watson && mac_ip6q_match(m, q6) 4084b908c8bSRobert Watson #endif 4094b908c8bSRobert Watson ) 41082cd038dSYoshinobu Inoue break; 41182cd038dSYoshinobu Inoue 41280d7a853SJonathan T. Looney if (q6 == head) { 41382cd038dSYoshinobu Inoue /* 41482cd038dSYoshinobu Inoue * the first fragment to arrive, create a reassembly queue. 41582cd038dSYoshinobu Inoue */ 41682cd038dSYoshinobu Inoue first_frag = 1; 41782cd038dSYoshinobu Inoue 41882cd038dSYoshinobu Inoue /* 41982cd038dSYoshinobu Inoue * Enforce upper bound on number of fragmented packets 42082cd038dSYoshinobu Inoue * for which we attempt reassembly; 4219888c401SHajimu UMEMOTO * If maxfragpackets is 0, never accept fragments. 4229888c401SHajimu UMEMOTO * If maxfragpackets is -1, accept all fragments without 4239888c401SHajimu UMEMOTO * limitation. 42482cd038dSYoshinobu Inoue */ 425603724d3SBjoern A. Zeeb if (V_ip6_maxfragpackets < 0) 42633841545SHajimu UMEMOTO ; 4271e9f3b73SJonathan T. Looney else if (V_ip6q[hash].count >= V_ip6_maxfragbucketsize || 4281e9f3b73SJonathan T. Looney atomic_load_int(&V_frag6_nfragpackets) >= 42980d7a853SJonathan T. Looney (u_int)V_ip6_maxfragpackets) 43033841545SHajimu UMEMOTO goto dropfrag; 43180d7a853SJonathan T. Looney atomic_add_int(&V_frag6_nfragpackets, 1); 432487a161cSBjoern A. Zeeb q6 = (struct ip6q *)malloc(sizeof(struct ip6q), M_FRAG6, 433487a161cSBjoern A. Zeeb M_NOWAIT | M_ZERO); 43482cd038dSYoshinobu Inoue if (q6 == NULL) 43582cd038dSYoshinobu Inoue goto dropfrag; 4364b908c8bSRobert Watson #ifdef MAC 4374b908c8bSRobert Watson if (mac_ip6q_init(q6, M_NOWAIT) != 0) { 438487a161cSBjoern A. Zeeb free(q6, M_FRAG6); 4394b908c8bSRobert Watson goto dropfrag; 4404b908c8bSRobert Watson } 4414b908c8bSRobert Watson mac_ip6q_create(m, q6); 4424b908c8bSRobert Watson #endif 44380d7a853SJonathan T. Looney frag6_insque_head(q6, head, hash); 44482cd038dSYoshinobu Inoue 445686cdd19SJun-ichiro itojun Hagino /* ip6q_nxt will be filled afterwards, from 1st fragment */ 44682cd038dSYoshinobu Inoue q6->ip6q_down = q6->ip6q_up = (struct ip6asfrag *)q6; 447686cdd19SJun-ichiro itojun Hagino #ifdef notyet 448686cdd19SJun-ichiro itojun Hagino q6->ip6q_nxtp = (u_char *)nxtp; 449686cdd19SJun-ichiro itojun Hagino #endif 45082cd038dSYoshinobu Inoue q6->ip6q_ident = ip6f->ip6f_ident; 45182cd038dSYoshinobu Inoue q6->ip6q_ttl = IPV6_FRAGTTL; 45282cd038dSYoshinobu Inoue q6->ip6q_src = ip6->ip6_src; 45382cd038dSYoshinobu Inoue q6->ip6q_dst = ip6->ip6_dst; 4545e9510e3SJINMEI Tatuya q6->ip6q_ecn = 4555e9510e3SJINMEI Tatuya (ntohl(ip6->ip6_flow) >> 20) & IPTOS_ECN_MASK; 45682cd038dSYoshinobu Inoue q6->ip6q_unfrglen = -1; /* The 1st fragment has not arrived. */ 4579888c401SHajimu UMEMOTO 4589888c401SHajimu UMEMOTO q6->ip6q_nfrag = 0; 45982cd038dSYoshinobu Inoue } 46082cd038dSYoshinobu Inoue 46182cd038dSYoshinobu Inoue /* 46282cd038dSYoshinobu Inoue * If it's the 1st fragment, record the length of the 46382cd038dSYoshinobu Inoue * unfragmentable part and the next header of the fragment header. 46482cd038dSYoshinobu Inoue */ 46582cd038dSYoshinobu Inoue fragoff = ntohs(ip6f->ip6f_offlg & IP6F_OFF_MASK); 46682cd038dSYoshinobu Inoue if (fragoff == 0) { 46706cd0a3fSHajimu UMEMOTO q6->ip6q_unfrglen = offset - sizeof(struct ip6_hdr) - 46806cd0a3fSHajimu UMEMOTO sizeof(struct ip6_frag); 46982cd038dSYoshinobu Inoue q6->ip6q_nxt = ip6f->ip6f_nxt; 47082cd038dSYoshinobu Inoue } 47182cd038dSYoshinobu Inoue 47282cd038dSYoshinobu Inoue /* 47382cd038dSYoshinobu Inoue * Check that the reassembled packet would not exceed 65535 bytes 47482cd038dSYoshinobu Inoue * in size. 47582cd038dSYoshinobu Inoue * If it would exceed, discard the fragment and return an ICMP error. 47682cd038dSYoshinobu Inoue */ 47782cd038dSYoshinobu Inoue if (q6->ip6q_unfrglen >= 0) { 47882cd038dSYoshinobu Inoue /* The 1st fragment has already arrived. */ 47982cd038dSYoshinobu Inoue if (q6->ip6q_unfrglen + fragoff + frgpartlen > IPV6_MAXPACKET) { 48082cd038dSYoshinobu Inoue icmp6_error(m, ICMP6_PARAM_PROB, ICMP6_PARAMPROB_HEADER, 481686cdd19SJun-ichiro itojun Hagino offset - sizeof(struct ip6_frag) + 482686cdd19SJun-ichiro itojun Hagino offsetof(struct ip6_frag, ip6f_offlg)); 48380d7a853SJonathan T. Looney IP6Q_UNLOCK(hash); 48482cd038dSYoshinobu Inoue return (IPPROTO_DONE); 48582cd038dSYoshinobu Inoue } 48606cd0a3fSHajimu UMEMOTO } else if (fragoff + frgpartlen > IPV6_MAXPACKET) { 48782cd038dSYoshinobu Inoue icmp6_error(m, ICMP6_PARAM_PROB, ICMP6_PARAMPROB_HEADER, 488686cdd19SJun-ichiro itojun Hagino offset - sizeof(struct ip6_frag) + 489686cdd19SJun-ichiro itojun Hagino offsetof(struct ip6_frag, ip6f_offlg)); 49080d7a853SJonathan T. Looney IP6Q_UNLOCK(hash); 49182cd038dSYoshinobu Inoue return (IPPROTO_DONE); 49282cd038dSYoshinobu Inoue } 49382cd038dSYoshinobu Inoue /* 49482cd038dSYoshinobu Inoue * If it's the first fragment, do the above check for each 49582cd038dSYoshinobu Inoue * fragment already stored in the reassembly queue. 49682cd038dSYoshinobu Inoue */ 49782cd038dSYoshinobu Inoue if (fragoff == 0) { 49882cd038dSYoshinobu Inoue for (af6 = q6->ip6q_down; af6 != (struct ip6asfrag *)q6; 49982cd038dSYoshinobu Inoue af6 = af6dwn) { 50082cd038dSYoshinobu Inoue af6dwn = af6->ip6af_down; 50182cd038dSYoshinobu Inoue 50282cd038dSYoshinobu Inoue if (q6->ip6q_unfrglen + af6->ip6af_off + af6->ip6af_frglen > 50382cd038dSYoshinobu Inoue IPV6_MAXPACKET) { 50482cd038dSYoshinobu Inoue struct mbuf *merr = IP6_REASS_MBUF(af6); 50582cd038dSYoshinobu Inoue struct ip6_hdr *ip6err; 50682cd038dSYoshinobu Inoue int erroff = af6->ip6af_offset; 50782cd038dSYoshinobu Inoue 50882cd038dSYoshinobu Inoue /* dequeue the fragment. */ 50980d7a853SJonathan T. Looney frag6_deq(af6, hash); 510487a161cSBjoern A. Zeeb free(af6, M_FRAG6); 51182cd038dSYoshinobu Inoue 51282cd038dSYoshinobu Inoue /* adjust pointer. */ 51382cd038dSYoshinobu Inoue ip6err = mtod(merr, struct ip6_hdr *); 51482cd038dSYoshinobu Inoue 51582cd038dSYoshinobu Inoue /* 51682cd038dSYoshinobu Inoue * Restore source and destination addresses 51782cd038dSYoshinobu Inoue * in the erroneous IPv6 header. 51882cd038dSYoshinobu Inoue */ 51982cd038dSYoshinobu Inoue ip6err->ip6_src = q6->ip6q_src; 52082cd038dSYoshinobu Inoue ip6err->ip6_dst = q6->ip6q_dst; 52182cd038dSYoshinobu Inoue 52282cd038dSYoshinobu Inoue icmp6_error(merr, ICMP6_PARAM_PROB, 52382cd038dSYoshinobu Inoue ICMP6_PARAMPROB_HEADER, 524686cdd19SJun-ichiro itojun Hagino erroff - sizeof(struct ip6_frag) + 525686cdd19SJun-ichiro itojun Hagino offsetof(struct ip6_frag, ip6f_offlg)); 52682cd038dSYoshinobu Inoue } 52782cd038dSYoshinobu Inoue } 52882cd038dSYoshinobu Inoue } 52982cd038dSYoshinobu Inoue 530487a161cSBjoern A. Zeeb ip6af = (struct ip6asfrag *)malloc(sizeof(struct ip6asfrag), M_FRAG6, 531487a161cSBjoern A. Zeeb M_NOWAIT | M_ZERO); 532686cdd19SJun-ichiro itojun Hagino if (ip6af == NULL) 533686cdd19SJun-ichiro itojun Hagino goto dropfrag; 53482cd038dSYoshinobu Inoue ip6af->ip6af_mff = ip6f->ip6f_offlg & IP6F_MORE_FRAG; 53582cd038dSYoshinobu Inoue ip6af->ip6af_off = fragoff; 53682cd038dSYoshinobu Inoue ip6af->ip6af_frglen = frgpartlen; 53782cd038dSYoshinobu Inoue ip6af->ip6af_offset = offset; 53882cd038dSYoshinobu Inoue IP6_REASS_MBUF(ip6af) = m; 53982cd038dSYoshinobu Inoue 54082cd038dSYoshinobu Inoue if (first_frag) { 54182cd038dSYoshinobu Inoue af6 = (struct ip6asfrag *)q6; 54282cd038dSYoshinobu Inoue goto insert; 54382cd038dSYoshinobu Inoue } 54482cd038dSYoshinobu Inoue 54582cd038dSYoshinobu Inoue /* 54659dfcba4SHajimu UMEMOTO * Handle ECN by comparing this segment with the first one; 54759dfcba4SHajimu UMEMOTO * if CE is set, do not lose CE. 54859dfcba4SHajimu UMEMOTO * drop if CE and not-ECT are mixed for the same packet. 54959dfcba4SHajimu UMEMOTO */ 55059dfcba4SHajimu UMEMOTO ecn = (ntohl(ip6->ip6_flow) >> 20) & IPTOS_ECN_MASK; 5515e9510e3SJINMEI Tatuya ecn0 = q6->ip6q_ecn; 55259dfcba4SHajimu UMEMOTO if (ecn == IPTOS_ECN_CE) { 55359dfcba4SHajimu UMEMOTO if (ecn0 == IPTOS_ECN_NOTECT) { 554487a161cSBjoern A. Zeeb free(ip6af, M_FRAG6); 55559dfcba4SHajimu UMEMOTO goto dropfrag; 55659dfcba4SHajimu UMEMOTO } 55759dfcba4SHajimu UMEMOTO if (ecn0 != IPTOS_ECN_CE) 5585e9510e3SJINMEI Tatuya q6->ip6q_ecn = IPTOS_ECN_CE; 55959dfcba4SHajimu UMEMOTO } 56059dfcba4SHajimu UMEMOTO if (ecn == IPTOS_ECN_NOTECT && ecn0 != IPTOS_ECN_NOTECT) { 561487a161cSBjoern A. Zeeb free(ip6af, M_FRAG6); 56259dfcba4SHajimu UMEMOTO goto dropfrag; 56359dfcba4SHajimu UMEMOTO } 56459dfcba4SHajimu UMEMOTO 56559dfcba4SHajimu UMEMOTO /* 56682cd038dSYoshinobu Inoue * Find a segment which begins after this one does. 56782cd038dSYoshinobu Inoue */ 56882cd038dSYoshinobu Inoue for (af6 = q6->ip6q_down; af6 != (struct ip6asfrag *)q6; 56982cd038dSYoshinobu Inoue af6 = af6->ip6af_down) 57082cd038dSYoshinobu Inoue if (af6->ip6af_off > ip6af->ip6af_off) 57182cd038dSYoshinobu Inoue break; 57282cd038dSYoshinobu Inoue 57382cd038dSYoshinobu Inoue /* 57482cd038dSYoshinobu Inoue * If the incoming framgent overlaps some existing fragments in 57582cd038dSYoshinobu Inoue * the reassembly queue, drop it, since it is dangerous to override 57682cd038dSYoshinobu Inoue * existing fragments from a security point of view. 5779888c401SHajimu UMEMOTO * We don't know which fragment is the bad guy - here we trust 5789888c401SHajimu UMEMOTO * fragment that came in earlier, with no real reason. 5795e9510e3SJINMEI Tatuya * 5805e9510e3SJINMEI Tatuya * Note: due to changes after disabling this part, mbuf passed to 5815e9510e3SJINMEI Tatuya * m_adj() below now does not meet the requirement. 58282cd038dSYoshinobu Inoue */ 58382cd038dSYoshinobu Inoue if (af6->ip6af_up != (struct ip6asfrag *)q6) { 58482cd038dSYoshinobu Inoue i = af6->ip6af_up->ip6af_off + af6->ip6af_up->ip6af_frglen 58582cd038dSYoshinobu Inoue - ip6af->ip6af_off; 58682cd038dSYoshinobu Inoue if (i > 0) { 587487a161cSBjoern A. Zeeb free(ip6af, M_FRAG6); 58882cd038dSYoshinobu Inoue goto dropfrag; 58982cd038dSYoshinobu Inoue } 59082cd038dSYoshinobu Inoue } 59182cd038dSYoshinobu Inoue if (af6 != (struct ip6asfrag *)q6) { 59282cd038dSYoshinobu Inoue i = (ip6af->ip6af_off + ip6af->ip6af_frglen) - af6->ip6af_off; 59382cd038dSYoshinobu Inoue if (i > 0) { 594487a161cSBjoern A. Zeeb free(ip6af, M_FRAG6); 59582cd038dSYoshinobu Inoue goto dropfrag; 59682cd038dSYoshinobu Inoue } 59782cd038dSYoshinobu Inoue } 59882cd038dSYoshinobu Inoue 59982cd038dSYoshinobu Inoue insert: 6004b908c8bSRobert Watson #ifdef MAC 6014b908c8bSRobert Watson if (!first_frag) 6024b908c8bSRobert Watson mac_ip6q_update(m, q6); 6034b908c8bSRobert Watson #endif 60482cd038dSYoshinobu Inoue 60582cd038dSYoshinobu Inoue /* 60682cd038dSYoshinobu Inoue * Stick new segment in its place; 60782cd038dSYoshinobu Inoue * check for complete reassembly. 60803c99d76SJonathan T. Looney * If not complete, check fragment limit. 60982cd038dSYoshinobu Inoue * Move to front of packet queue, as we are 61082cd038dSYoshinobu Inoue * the most recently active fragmented packet. 61182cd038dSYoshinobu Inoue */ 61280d7a853SJonathan T. Looney frag6_enq(ip6af, af6->ip6af_up, hash); 6132adfd64fSJonathan T. Looney atomic_add_int(&frag6_nfrags, 1); 6149888c401SHajimu UMEMOTO q6->ip6q_nfrag++; 61582cd038dSYoshinobu Inoue next = 0; 61682cd038dSYoshinobu Inoue for (af6 = q6->ip6q_down; af6 != (struct ip6asfrag *)q6; 61782cd038dSYoshinobu Inoue af6 = af6->ip6af_down) { 61882cd038dSYoshinobu Inoue if (af6->ip6af_off != next) { 61903c99d76SJonathan T. Looney if (q6->ip6q_nfrag > V_ip6_maxfragsperpacket) { 620198fdaedSTom Jones IP6STAT_ADD(ip6s_fragdropped, q6->ip6q_nfrag); 6216bbdbbb8SHans Petter Selasky frag6_freef(q6, hash); 62203c99d76SJonathan T. Looney } 62380d7a853SJonathan T. Looney IP6Q_UNLOCK(hash); 62482cd038dSYoshinobu Inoue return IPPROTO_DONE; 62582cd038dSYoshinobu Inoue } 62682cd038dSYoshinobu Inoue next += af6->ip6af_frglen; 62782cd038dSYoshinobu Inoue } 62882cd038dSYoshinobu Inoue if (af6->ip6af_up->ip6af_mff) { 62903c99d76SJonathan T. Looney if (q6->ip6q_nfrag > V_ip6_maxfragsperpacket) { 630198fdaedSTom Jones IP6STAT_ADD(ip6s_fragdropped, q6->ip6q_nfrag); 6316bbdbbb8SHans Petter Selasky frag6_freef(q6, hash); 63203c99d76SJonathan T. Looney } 63380d7a853SJonathan T. Looney IP6Q_UNLOCK(hash); 63482cd038dSYoshinobu Inoue return IPPROTO_DONE; 63582cd038dSYoshinobu Inoue } 63682cd038dSYoshinobu Inoue 63782cd038dSYoshinobu Inoue /* 63882cd038dSYoshinobu Inoue * Reassembly is complete; concatenate fragments. 63982cd038dSYoshinobu Inoue */ 64082cd038dSYoshinobu Inoue ip6af = q6->ip6q_down; 64182cd038dSYoshinobu Inoue t = m = IP6_REASS_MBUF(ip6af); 64282cd038dSYoshinobu Inoue af6 = ip6af->ip6af_down; 64380d7a853SJonathan T. Looney frag6_deq(ip6af, hash); 64482cd038dSYoshinobu Inoue while (af6 != (struct ip6asfrag *)q6) { 6459907aba3SAndrey V. Elsukov m->m_pkthdr.csum_flags &= 6469907aba3SAndrey V. Elsukov IP6_REASS_MBUF(af6)->m_pkthdr.csum_flags; 6479907aba3SAndrey V. Elsukov m->m_pkthdr.csum_data += 6489907aba3SAndrey V. Elsukov IP6_REASS_MBUF(af6)->m_pkthdr.csum_data; 6499907aba3SAndrey V. Elsukov 650686cdd19SJun-ichiro itojun Hagino af6dwn = af6->ip6af_down; 65180d7a853SJonathan T. Looney frag6_deq(af6, hash); 65282cd038dSYoshinobu Inoue while (t->m_next) 65382cd038dSYoshinobu Inoue t = t->m_next; 654ba99cc0bSAlexander V. Chernikov m_adj(IP6_REASS_MBUF(af6), af6->ip6af_offset); 65509b0b8c0SNavdeep Parhar m_demote_pkthdr(IP6_REASS_MBUF(af6)); 656ba99cc0bSAlexander V. Chernikov m_cat(t, IP6_REASS_MBUF(af6)); 657487a161cSBjoern A. Zeeb free(af6, M_FRAG6); 658686cdd19SJun-ichiro itojun Hagino af6 = af6dwn; 65982cd038dSYoshinobu Inoue } 66082cd038dSYoshinobu Inoue 6619907aba3SAndrey V. Elsukov while (m->m_pkthdr.csum_data & 0xffff0000) 6629907aba3SAndrey V. Elsukov m->m_pkthdr.csum_data = (m->m_pkthdr.csum_data & 0xffff) + 6639907aba3SAndrey V. Elsukov (m->m_pkthdr.csum_data >> 16); 6649907aba3SAndrey V. Elsukov 66582cd038dSYoshinobu Inoue /* adjust offset to point where the original next header starts */ 66682cd038dSYoshinobu Inoue offset = ip6af->ip6af_offset - sizeof(struct ip6_frag); 667487a161cSBjoern A. Zeeb free(ip6af, M_FRAG6); 668686cdd19SJun-ichiro itojun Hagino ip6 = mtod(m, struct ip6_hdr *); 66982cd038dSYoshinobu Inoue ip6->ip6_plen = htons((u_short)next + offset - sizeof(struct ip6_hdr)); 6705e9510e3SJINMEI Tatuya if (q6->ip6q_ecn == IPTOS_ECN_CE) 6715e9510e3SJINMEI Tatuya ip6->ip6_flow |= htonl(IPTOS_ECN_CE << 20); 67282cd038dSYoshinobu Inoue nxt = q6->ip6q_nxt; 67382cd038dSYoshinobu Inoue 6740b438b0fSGleb Smirnoff if (ip6_deletefraghdr(m, offset, M_NOWAIT) != 0) { 67580d7a853SJonathan T. Looney frag6_remque(q6, hash); 6762adfd64fSJonathan T. Looney atomic_subtract_int(&frag6_nfrags, q6->ip6q_nfrag); 6774b908c8bSRobert Watson #ifdef MAC 6784b908c8bSRobert Watson mac_ip6q_destroy(q6); 6794b908c8bSRobert Watson #endif 680487a161cSBjoern A. Zeeb free(q6, M_FRAG6); 68180d7a853SJonathan T. Looney atomic_subtract_int(&V_frag6_nfragpackets, 1); 6820b438b0fSGleb Smirnoff 683686cdd19SJun-ichiro itojun Hagino goto dropfrag; 68482cd038dSYoshinobu Inoue } 68582cd038dSYoshinobu Inoue 68682cd038dSYoshinobu Inoue /* 68782cd038dSYoshinobu Inoue * Store NXT to the original. 68882cd038dSYoshinobu Inoue */ 68968e0e5a6SAndrey V. Elsukov m_copyback(m, ip6_get_prevhdr(m, offset), sizeof(uint8_t), 69068e0e5a6SAndrey V. Elsukov (caddr_t)&nxt); 69182cd038dSYoshinobu Inoue 69280d7a853SJonathan T. Looney frag6_remque(q6, hash); 6932adfd64fSJonathan T. Looney atomic_subtract_int(&frag6_nfrags, q6->ip6q_nfrag); 6944b908c8bSRobert Watson #ifdef MAC 6954b908c8bSRobert Watson mac_ip6q_reassemble(q6, m); 6964b908c8bSRobert Watson mac_ip6q_destroy(q6); 6974b908c8bSRobert Watson #endif 698487a161cSBjoern A. Zeeb free(q6, M_FRAG6); 69980d7a853SJonathan T. Looney atomic_subtract_int(&V_frag6_nfragpackets, 1); 70082cd038dSYoshinobu Inoue 70182cd038dSYoshinobu Inoue if (m->m_flags & M_PKTHDR) { /* Isn't it always true? */ 70282cd038dSYoshinobu Inoue int plen = 0; 70382cd038dSYoshinobu Inoue for (t = m; t; t = t->m_next) 70482cd038dSYoshinobu Inoue plen += t->m_len; 70582cd038dSYoshinobu Inoue m->m_pkthdr.len = plen; 70682cd038dSYoshinobu Inoue } 70782cd038dSYoshinobu Inoue 708aaa46574SAdrian Chadd #ifdef RSS 709aaa46574SAdrian Chadd mtag = m_tag_alloc(MTAG_ABI_IPV6, IPV6_TAG_DIRECT, sizeof(*ip6dc), 710aaa46574SAdrian Chadd M_NOWAIT); 711aaa46574SAdrian Chadd if (mtag == NULL) 712aaa46574SAdrian Chadd goto dropfrag; 713aaa46574SAdrian Chadd 714aaa46574SAdrian Chadd ip6dc = (struct ip6_direct_ctx *)(mtag + 1); 715aaa46574SAdrian Chadd ip6dc->ip6dc_nxt = nxt; 716aaa46574SAdrian Chadd ip6dc->ip6dc_off = offset; 717aaa46574SAdrian Chadd 718aaa46574SAdrian Chadd m_tag_prepend(m, mtag); 719aaa46574SAdrian Chadd #endif 720aaa46574SAdrian Chadd 72180d7a853SJonathan T. Looney IP6Q_UNLOCK(hash); 7229cb8d207SAndrey V. Elsukov IP6STAT_INC(ip6s_reassembled); 72382cd038dSYoshinobu Inoue in6_ifstat_inc(dstifp, ifs6_reass_ok); 72482cd038dSYoshinobu Inoue 725aaa46574SAdrian Chadd #ifdef RSS 726aaa46574SAdrian Chadd /* 727aaa46574SAdrian Chadd * Queue/dispatch for reprocessing. 728aaa46574SAdrian Chadd */ 729aaa46574SAdrian Chadd netisr_dispatch(NETISR_IPV6_DIRECT, m); 730aaa46574SAdrian Chadd return IPPROTO_DONE; 731aaa46574SAdrian Chadd #endif 732aaa46574SAdrian Chadd 73382cd038dSYoshinobu Inoue /* 73482cd038dSYoshinobu Inoue * Tell launch routine the next header 73582cd038dSYoshinobu Inoue */ 73682cd038dSYoshinobu Inoue 73782cd038dSYoshinobu Inoue *mp = m; 73882cd038dSYoshinobu Inoue *offp = offset; 73982cd038dSYoshinobu Inoue 74082cd038dSYoshinobu Inoue return nxt; 74182cd038dSYoshinobu Inoue 74282cd038dSYoshinobu Inoue dropfrag: 74380d7a853SJonathan T. Looney IP6Q_UNLOCK(hash); 74482cd038dSYoshinobu Inoue in6_ifstat_inc(dstifp, ifs6_reass_fail); 7459cb8d207SAndrey V. Elsukov IP6STAT_INC(ip6s_fragdropped); 74682cd038dSYoshinobu Inoue m_freem(m); 74782cd038dSYoshinobu Inoue return IPPROTO_DONE; 74882cd038dSYoshinobu Inoue } 74982cd038dSYoshinobu Inoue 75082cd038dSYoshinobu Inoue /* 75133841545SHajimu UMEMOTO * IPv6 reassembling timer processing; 75282cd038dSYoshinobu Inoue * if a timer expires on a reassembly 75382cd038dSYoshinobu Inoue * queue, discard it. 75482cd038dSYoshinobu Inoue */ 75582cd038dSYoshinobu Inoue void 7561272577eSXin LI frag6_slowtimo(void) 75782cd038dSYoshinobu Inoue { 7588b615593SMarko Zec VNET_ITERATOR_DECL(vnet_iter); 75980d7a853SJonathan T. Looney struct ip6q *head, *q6; 76080d7a853SJonathan T. Looney int i; 76182cd038dSYoshinobu Inoue 7625ee847d3SRobert Watson VNET_LIST_RLOCK_NOSLEEP(); 7638b615593SMarko Zec VNET_FOREACH(vnet_iter) { 7648b615593SMarko Zec CURVNET_SET(vnet_iter); 76580d7a853SJonathan T. Looney for (i = 0; i < IP6REASS_NHASH; i++) { 76680d7a853SJonathan T. Looney IP6Q_LOCK(i); 76780d7a853SJonathan T. Looney head = IP6Q_HEAD(i); 76880d7a853SJonathan T. Looney q6 = head->ip6q_next; 7691e9f3b73SJonathan T. Looney if (q6 == NULL) { 7701e9f3b73SJonathan T. Looney /* 7711e9f3b73SJonathan T. Looney * XXXJTL: This should never happen. This 7721e9f3b73SJonathan T. Looney * should turn into an assertion. 7731e9f3b73SJonathan T. Looney */ 7741e9f3b73SJonathan T. Looney IP6Q_UNLOCK(i); 7751e9f3b73SJonathan T. Looney continue; 7761e9f3b73SJonathan T. Looney } 77780d7a853SJonathan T. Looney while (q6 != head) { 77882cd038dSYoshinobu Inoue --q6->ip6q_ttl; 77982cd038dSYoshinobu Inoue q6 = q6->ip6q_next; 78082cd038dSYoshinobu Inoue if (q6->ip6q_prev->ip6q_ttl == 0) { 781198fdaedSTom Jones IP6STAT_ADD(ip6s_fragtimeout, 782198fdaedSTom Jones q6->ip6q_prev->ip6q_nfrag); 78382cd038dSYoshinobu Inoue /* XXX in6_ifstat_inc(ifp, ifs6_reass_fail) */ 7846bbdbbb8SHans Petter Selasky frag6_freef(q6->ip6q_prev, i); 78582cd038dSYoshinobu Inoue } 78682cd038dSYoshinobu Inoue } 78782cd038dSYoshinobu Inoue /* 78882cd038dSYoshinobu Inoue * If we are over the maximum number of fragments 78982cd038dSYoshinobu Inoue * (due to the limit being lowered), drain off 79082cd038dSYoshinobu Inoue * enough to get down to the new limit. 7911e9f3b73SJonathan T. Looney * Note that we drain all reassembly queues if 7921e9f3b73SJonathan T. Looney * maxfragpackets is 0 (fragmentation is disabled), 7931e9f3b73SJonathan T. Looney * and don't enforce a limit when maxfragpackets 7941e9f3b73SJonathan T. Looney * is negative. 79582cd038dSYoshinobu Inoue */ 7961e9f3b73SJonathan T. Looney while ((V_ip6_maxfragpackets == 0 || 7971e9f3b73SJonathan T. Looney (V_ip6_maxfragpackets > 0 && 7981e9f3b73SJonathan T. Looney V_ip6q[i].count > V_ip6_maxfragbucketsize)) && 79980d7a853SJonathan T. Looney head->ip6q_prev != head) { 800198fdaedSTom Jones IP6STAT_ADD(ip6s_fragoverflow, 801198fdaedSTom Jones q6->ip6q_prev->ip6q_nfrag); 80282cd038dSYoshinobu Inoue /* XXX in6_ifstat_inc(ifp, ifs6_reass_fail) */ 8036bbdbbb8SHans Petter Selasky frag6_freef(head->ip6q_prev, i); 80480d7a853SJonathan T. Looney } 80580d7a853SJonathan T. Looney IP6Q_UNLOCK(i); 80682cd038dSYoshinobu Inoue } 8071e9f3b73SJonathan T. Looney /* 8081e9f3b73SJonathan T. Looney * If we are still over the maximum number of fragmented 8091e9f3b73SJonathan T. Looney * packets, drain off enough to get down to the new limit. 8101e9f3b73SJonathan T. Looney */ 8111e9f3b73SJonathan T. Looney i = 0; 8121e9f3b73SJonathan T. Looney while (V_ip6_maxfragpackets >= 0 && 8131e9f3b73SJonathan T. Looney atomic_load_int(&V_frag6_nfragpackets) > 8141e9f3b73SJonathan T. Looney (u_int)V_ip6_maxfragpackets) { 8151e9f3b73SJonathan T. Looney IP6Q_LOCK(i); 8161e9f3b73SJonathan T. Looney head = IP6Q_HEAD(i); 8171e9f3b73SJonathan T. Looney if (head->ip6q_prev != head) { 818198fdaedSTom Jones IP6STAT_ADD(ip6s_fragoverflow, 819198fdaedSTom Jones q6->ip6q_prev->ip6q_nfrag); 8201e9f3b73SJonathan T. Looney /* XXX in6_ifstat_inc(ifp, ifs6_reass_fail) */ 8216bbdbbb8SHans Petter Selasky frag6_freef(head->ip6q_prev, i); 8221e9f3b73SJonathan T. Looney } 8231e9f3b73SJonathan T. Looney IP6Q_UNLOCK(i); 8241e9f3b73SJonathan T. Looney i = (i + 1) % IP6REASS_NHASH; 8251e9f3b73SJonathan T. Looney } 8268b615593SMarko Zec CURVNET_RESTORE(); 8278b615593SMarko Zec } 8285ee847d3SRobert Watson VNET_LIST_RUNLOCK_NOSLEEP(); 82982cd038dSYoshinobu Inoue } 83082cd038dSYoshinobu Inoue 831c00464a2SBjoern A. Zeeb static void 832c00464a2SBjoern A. Zeeb frag6_change(void *tag) 833c00464a2SBjoern A. Zeeb { 834c00464a2SBjoern A. Zeeb VNET_ITERATOR_DECL(vnet_iter); 835c00464a2SBjoern A. Zeeb 836c00464a2SBjoern A. Zeeb ip6_maxfrags = IP6_MAXFRAGS; 837c00464a2SBjoern A. Zeeb VNET_LIST_RLOCK_NOSLEEP(); 838c00464a2SBjoern A. Zeeb VNET_FOREACH(vnet_iter) { 839c00464a2SBjoern A. Zeeb CURVNET_SET(vnet_iter); 840c00464a2SBjoern A. Zeeb V_ip6_maxfragpackets = IP6_MAXFRAGPACKETS; 841c00464a2SBjoern A. Zeeb frag6_set_bucketsize(); 842c00464a2SBjoern A. Zeeb CURVNET_RESTORE(); 843c00464a2SBjoern A. Zeeb } 844c00464a2SBjoern A. Zeeb VNET_LIST_RUNLOCK_NOSLEEP(); 845c00464a2SBjoern A. Zeeb } 846c00464a2SBjoern A. Zeeb 847c00464a2SBjoern A. Zeeb /* 848c00464a2SBjoern A. Zeeb * Initialise reassembly queue and fragment identifier. 849c00464a2SBjoern A. Zeeb */ 850c00464a2SBjoern A. Zeeb void 851c00464a2SBjoern A. Zeeb frag6_init(void) 852c00464a2SBjoern A. Zeeb { 853c00464a2SBjoern A. Zeeb struct ip6q *q6; 854c00464a2SBjoern A. Zeeb int i; 855c00464a2SBjoern A. Zeeb 856c00464a2SBjoern A. Zeeb V_ip6_maxfragpackets = IP6_MAXFRAGPACKETS; 857c00464a2SBjoern A. Zeeb frag6_set_bucketsize(); 858c00464a2SBjoern A. Zeeb for (i = 0; i < IP6REASS_NHASH; i++) { 859c00464a2SBjoern A. Zeeb q6 = IP6Q_HEAD(i); 860c00464a2SBjoern A. Zeeb q6->ip6q_next = q6->ip6q_prev = q6; 861c00464a2SBjoern A. Zeeb mtx_init(&V_ip6q[i].lock, "ip6qlock", NULL, MTX_DEF); 862c00464a2SBjoern A. Zeeb V_ip6q[i].count = 0; 863c00464a2SBjoern A. Zeeb } 864c00464a2SBjoern A. Zeeb V_ip6q_hashseed = arc4random(); 865c00464a2SBjoern A. Zeeb V_ip6_maxfragsperpacket = 64; 866c00464a2SBjoern A. Zeeb if (!IS_DEFAULT_VNET(curvnet)) 867c00464a2SBjoern A. Zeeb return; 868c00464a2SBjoern A. Zeeb 869c00464a2SBjoern A. Zeeb ip6_maxfrags = IP6_MAXFRAGS; 870c00464a2SBjoern A. Zeeb EVENTHANDLER_REGISTER(nmbclusters_change, 871c00464a2SBjoern A. Zeeb frag6_change, NULL, EVENTHANDLER_PRI_ANY); 872c00464a2SBjoern A. Zeeb } 873c00464a2SBjoern A. Zeeb 87482cd038dSYoshinobu Inoue /* 87582cd038dSYoshinobu Inoue * Drain off all datagram fragments. 87682cd038dSYoshinobu Inoue */ 87782cd038dSYoshinobu Inoue void 8781272577eSXin LI frag6_drain(void) 87982cd038dSYoshinobu Inoue { 8808b615593SMarko Zec VNET_ITERATOR_DECL(vnet_iter); 88180d7a853SJonathan T. Looney struct ip6q *head; 88280d7a853SJonathan T. Looney int i; 8839888c401SHajimu UMEMOTO 8845ee847d3SRobert Watson VNET_LIST_RLOCK_NOSLEEP(); 8858b615593SMarko Zec VNET_FOREACH(vnet_iter) { 8868b615593SMarko Zec CURVNET_SET(vnet_iter); 88780d7a853SJonathan T. Looney for (i = 0; i < IP6REASS_NHASH; i++) { 88880d7a853SJonathan T. Looney if (IP6Q_TRYLOCK(i) == 0) 88980d7a853SJonathan T. Looney continue; 89080d7a853SJonathan T. Looney head = IP6Q_HEAD(i); 89180d7a853SJonathan T. Looney while (head->ip6q_next != head) { 8929cb8d207SAndrey V. Elsukov IP6STAT_INC(ip6s_fragdropped); 89382cd038dSYoshinobu Inoue /* XXX in6_ifstat_inc(ifp, ifs6_reass_fail) */ 8946bbdbbb8SHans Petter Selasky frag6_freef(head->ip6q_next, i); 89580d7a853SJonathan T. Looney } 89680d7a853SJonathan T. Looney IP6Q_UNLOCK(i); 89782cd038dSYoshinobu Inoue } 8988b615593SMarko Zec CURVNET_RESTORE(); 8998b615593SMarko Zec } 9005ee847d3SRobert Watson VNET_LIST_RUNLOCK_NOSLEEP(); 90182cd038dSYoshinobu Inoue } 902e5ee7060SGleb Smirnoff 903c00464a2SBjoern A. Zeeb /* 904c00464a2SBjoern A. Zeeb * Put an ip fragment on a reassembly chain. 905c00464a2SBjoern A. Zeeb * Like insque, but pointers in middle of structure. 906c00464a2SBjoern A. Zeeb */ 907c00464a2SBjoern A. Zeeb static void 908c00464a2SBjoern A. Zeeb frag6_enq(struct ip6asfrag *af6, struct ip6asfrag *up6, 909c00464a2SBjoern A. Zeeb uint32_t bucket __unused) 910e5ee7060SGleb Smirnoff { 911e5ee7060SGleb Smirnoff 912c00464a2SBjoern A. Zeeb IP6Q_LOCK_ASSERT(bucket); 913c00464a2SBjoern A. Zeeb 914c00464a2SBjoern A. Zeeb af6->ip6af_up = up6; 915c00464a2SBjoern A. Zeeb af6->ip6af_down = up6->ip6af_down; 916c00464a2SBjoern A. Zeeb up6->ip6af_down->ip6af_up = af6; 917c00464a2SBjoern A. Zeeb up6->ip6af_down = af6; 918e5ee7060SGleb Smirnoff } 919e5ee7060SGleb Smirnoff 920c00464a2SBjoern A. Zeeb /* 921c00464a2SBjoern A. Zeeb * To frag6_enq as remque is to insque. 922c00464a2SBjoern A. Zeeb */ 923c00464a2SBjoern A. Zeeb static void 924c00464a2SBjoern A. Zeeb frag6_deq(struct ip6asfrag *af6, uint32_t bucket __unused) 925c00464a2SBjoern A. Zeeb { 926c00464a2SBjoern A. Zeeb 927c00464a2SBjoern A. Zeeb IP6Q_LOCK_ASSERT(bucket); 928c00464a2SBjoern A. Zeeb 929c00464a2SBjoern A. Zeeb af6->ip6af_up->ip6af_down = af6->ip6af_down; 930c00464a2SBjoern A. Zeeb af6->ip6af_down->ip6af_up = af6->ip6af_up; 931c00464a2SBjoern A. Zeeb } 932c00464a2SBjoern A. Zeeb 933c00464a2SBjoern A. Zeeb static void 934c00464a2SBjoern A. Zeeb frag6_insque_head(struct ip6q *new, struct ip6q *old, uint32_t bucket) 935c00464a2SBjoern A. Zeeb { 936c00464a2SBjoern A. Zeeb 937c00464a2SBjoern A. Zeeb IP6Q_LOCK_ASSERT(bucket); 938c00464a2SBjoern A. Zeeb KASSERT(IP6Q_HEAD(bucket) == old, 939c00464a2SBjoern A. Zeeb ("%s: attempt to insert at head of wrong bucket" 940c00464a2SBjoern A. Zeeb " (bucket=%u, old=%p)", __func__, bucket, old)); 941c00464a2SBjoern A. Zeeb 942c00464a2SBjoern A. Zeeb new->ip6q_prev = old; 943c00464a2SBjoern A. Zeeb new->ip6q_next = old->ip6q_next; 944c00464a2SBjoern A. Zeeb old->ip6q_next->ip6q_prev= new; 945c00464a2SBjoern A. Zeeb old->ip6q_next = new; 946c00464a2SBjoern A. Zeeb V_ip6q[bucket].count++; 947c00464a2SBjoern A. Zeeb } 948c00464a2SBjoern A. Zeeb 949c00464a2SBjoern A. Zeeb static void 950c00464a2SBjoern A. Zeeb frag6_remque(struct ip6q *p6, uint32_t bucket) 951c00464a2SBjoern A. Zeeb { 952c00464a2SBjoern A. Zeeb 953c00464a2SBjoern A. Zeeb IP6Q_LOCK_ASSERT(bucket); 954c00464a2SBjoern A. Zeeb 955c00464a2SBjoern A. Zeeb p6->ip6q_prev->ip6q_next = p6->ip6q_next; 956c00464a2SBjoern A. Zeeb p6->ip6q_next->ip6q_prev = p6->ip6q_prev; 957c00464a2SBjoern A. Zeeb V_ip6q[bucket].count--; 958e5ee7060SGleb Smirnoff } 959