1caf43b02SWarner Losh /*- 251369649SPedro F. Giffuni * SPDX-License-Identifier: BSD-3-Clause 351369649SPedro F. Giffuni * 482cd038dSYoshinobu Inoue * Copyright (C) 1995, 1996, 1997, and 1998 WIDE Project. 582cd038dSYoshinobu Inoue * All rights reserved. 682cd038dSYoshinobu Inoue * 782cd038dSYoshinobu Inoue * Redistribution and use in source and binary forms, with or without 882cd038dSYoshinobu Inoue * modification, are permitted provided that the following conditions 982cd038dSYoshinobu Inoue * are met: 1082cd038dSYoshinobu Inoue * 1. Redistributions of source code must retain the above copyright 1182cd038dSYoshinobu Inoue * notice, this list of conditions and the following disclaimer. 1282cd038dSYoshinobu Inoue * 2. Redistributions in binary form must reproduce the above copyright 1382cd038dSYoshinobu Inoue * notice, this list of conditions and the following disclaimer in the 1482cd038dSYoshinobu Inoue * documentation and/or other materials provided with the distribution. 1582cd038dSYoshinobu Inoue * 3. Neither the name of the project nor the names of its contributors 1682cd038dSYoshinobu Inoue * may be used to endorse or promote products derived from this software 1782cd038dSYoshinobu Inoue * without specific prior written permission. 1882cd038dSYoshinobu Inoue * 1982cd038dSYoshinobu Inoue * THIS SOFTWARE IS PROVIDED BY THE PROJECT AND CONTRIBUTORS ``AS IS'' AND 2082cd038dSYoshinobu Inoue * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE 2182cd038dSYoshinobu Inoue * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE 2282cd038dSYoshinobu Inoue * ARE DISCLAIMED. IN NO EVENT SHALL THE PROJECT OR CONTRIBUTORS BE LIABLE 2382cd038dSYoshinobu Inoue * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL 2482cd038dSYoshinobu Inoue * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS 2582cd038dSYoshinobu Inoue * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) 2682cd038dSYoshinobu Inoue * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT 2782cd038dSYoshinobu Inoue * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY 2882cd038dSYoshinobu Inoue * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF 2982cd038dSYoshinobu Inoue * SUCH DAMAGE. 30b48287a3SDavid E. O'Brien * 31b48287a3SDavid E. O'Brien * $KAME: frag6.c,v 1.33 2002/01/07 11:34:48 kjc Exp $ 3282cd038dSYoshinobu Inoue */ 3382cd038dSYoshinobu Inoue 34b48287a3SDavid E. O'Brien #include <sys/cdefs.h> 35b48287a3SDavid E. O'Brien __FBSDID("$FreeBSD$"); 36b48287a3SDavid E. O'Brien 37aaa46574SAdrian Chadd #include "opt_rss.h" 38aaa46574SAdrian Chadd 3982cd038dSYoshinobu Inoue #include <sys/param.h> 401a3044faSBjoern A. Zeeb #include <sys/domain.h> 411a3044faSBjoern A. Zeeb #include <sys/errno.h> 421a3044faSBjoern A. Zeeb #include <sys/eventhandler.h> 4380d7a853SJonathan T. Looney #include <sys/hash.h> 441a3044faSBjoern A. Zeeb #include <sys/kernel.h> 4582cd038dSYoshinobu Inoue #include <sys/malloc.h> 4682cd038dSYoshinobu Inoue #include <sys/mbuf.h> 4782cd038dSYoshinobu Inoue #include <sys/protosw.h> 4882cd038dSYoshinobu Inoue #include <sys/socket.h> 4982cd038dSYoshinobu Inoue #include <sys/time.h> 50757cb678SBjoern A. Zeeb #include <sys/sysctl.h> 5182cd038dSYoshinobu Inoue #include <sys/syslog.h> 5282cd038dSYoshinobu Inoue 5380d7a853SJonathan T. Looney #include <machine/atomic.h> 5480d7a853SJonathan T. Looney 5582cd038dSYoshinobu Inoue #include <net/if.h> 5676039bc8SGleb Smirnoff #include <net/if_var.h> 57aaa46574SAdrian Chadd #include <net/netisr.h> 5882cd038dSYoshinobu Inoue #include <net/route.h> 59eddfbb76SRobert Watson #include <net/vnet.h> 6082cd038dSYoshinobu Inoue 6182cd038dSYoshinobu Inoue #include <netinet/in.h> 6282cd038dSYoshinobu Inoue #include <netinet/in_var.h> 63686cdd19SJun-ichiro itojun Hagino #include <netinet/ip6.h> 6482cd038dSYoshinobu Inoue #include <netinet6/ip6_var.h> 65686cdd19SJun-ichiro itojun Hagino #include <netinet/icmp6.h> 6659dfcba4SHajimu UMEMOTO #include <netinet/in_systm.h> /* for ECN definitions */ 6759dfcba4SHajimu UMEMOTO #include <netinet/ip.h> /* for ECN definitions */ 6882cd038dSYoshinobu Inoue 691a3044faSBjoern A. Zeeb #ifdef MAC 704b908c8bSRobert Watson #include <security/mac/mac_framework.h> 711a3044faSBjoern A. Zeeb #endif 724b908c8bSRobert Watson 7331e8f7e5SHajimu UMEMOTO /* 7480d7a853SJonathan T. Looney * Reassembly headers are stored in hash buckets. 7531e8f7e5SHajimu UMEMOTO */ 762ceeacbeSJonathan T. Looney #define IP6REASS_NHASH_LOG2 10 7780d7a853SJonathan T. Looney #define IP6REASS_NHASH (1 << IP6REASS_NHASH_LOG2) 7880d7a853SJonathan T. Looney #define IP6REASS_HMASK (IP6REASS_NHASH - 1) 7980d7a853SJonathan T. Looney 8080d7a853SJonathan T. Looney static void frag6_enq(struct ip6asfrag *, struct ip6asfrag *, 8180d7a853SJonathan T. Looney uint32_t bucket __unused); 8280d7a853SJonathan T. Looney static void frag6_deq(struct ip6asfrag *, uint32_t bucket __unused); 8380d7a853SJonathan T. Looney static void frag6_insque_head(struct ip6q *, struct ip6q *, 841e9f3b73SJonathan T. Looney uint32_t bucket); 851e9f3b73SJonathan T. Looney static void frag6_remque(struct ip6q *, uint32_t bucket); 866bbdbbb8SHans Petter Selasky static void frag6_freef(struct ip6q *, uint32_t bucket); 8780d7a853SJonathan T. Looney 8880d7a853SJonathan T. Looney struct ip6qbucket { 8980d7a853SJonathan T. Looney struct ip6q ip6q; 9080d7a853SJonathan T. Looney struct mtx lock; 911e9f3b73SJonathan T. Looney int count; 9280d7a853SJonathan T. Looney }; 9380d7a853SJonathan T. Looney 94487a161cSBjoern A. Zeeb static MALLOC_DEFINE(M_FRAG6, "frag6", "IPv6 fragment reassembly header"); 95487a161cSBjoern A. Zeeb 96757cb678SBjoern A. Zeeb /* System wide (global) maximum and count of packets in reassembly queues. */ 97757cb678SBjoern A. Zeeb static int ip6_maxfrags; 98757cb678SBjoern A. Zeeb static volatile u_int frag6_nfrags = 0; 99757cb678SBjoern A. Zeeb 100757cb678SBjoern A. Zeeb /* Maximum and current packets in per-VNET reassembly queue. */ 101757cb678SBjoern A. Zeeb VNET_DEFINE_STATIC(int, ip6_maxfragpackets); 10280d7a853SJonathan T. Looney VNET_DEFINE_STATIC(volatile u_int, frag6_nfragpackets); 103757cb678SBjoern A. Zeeb #define V_ip6_maxfragpackets VNET(ip6_maxfragpackets) 104757cb678SBjoern A. Zeeb #define V_frag6_nfragpackets VNET(frag6_nfragpackets) 105757cb678SBjoern A. Zeeb 106757cb678SBjoern A. Zeeb /* Maximum per-VNET reassembly queues per bucket and fragments per packet. */ 107757cb678SBjoern A. Zeeb VNET_DEFINE_STATIC(int, ip6_maxfragbucketsize); 108757cb678SBjoern A. Zeeb VNET_DEFINE_STATIC(int, ip6_maxfragsperpacket); 109757cb678SBjoern A. Zeeb #define V_ip6_maxfragbucketsize VNET(ip6_maxfragbucketsize) 110757cb678SBjoern A. Zeeb #define V_ip6_maxfragsperpacket VNET(ip6_maxfragsperpacket) 111757cb678SBjoern A. Zeeb 112757cb678SBjoern A. Zeeb /* Per-VNET reassembly queue buckets. */ 11380d7a853SJonathan T. Looney VNET_DEFINE_STATIC(struct ip6qbucket, ip6q[IP6REASS_NHASH]); 11480d7a853SJonathan T. Looney VNET_DEFINE_STATIC(uint32_t, ip6q_hashseed); 1151e77c105SRobert Watson #define V_ip6q VNET(ip6q) 11680d7a853SJonathan T. Looney #define V_ip6q_hashseed VNET(ip6q_hashseed) 11782cd038dSYoshinobu Inoue 11880d7a853SJonathan T. Looney #define IP6Q_LOCK(i) mtx_lock(&V_ip6q[(i)].lock) 11980d7a853SJonathan T. Looney #define IP6Q_TRYLOCK(i) mtx_trylock(&V_ip6q[(i)].lock) 12080d7a853SJonathan T. Looney #define IP6Q_LOCK_ASSERT(i) mtx_assert(&V_ip6q[(i)].lock, MA_OWNED) 12180d7a853SJonathan T. Looney #define IP6Q_UNLOCK(i) mtx_unlock(&V_ip6q[(i)].lock) 12280d7a853SJonathan T. Looney #define IP6Q_HEAD(i) (&V_ip6q[(i)].ip6q) 1239888c401SHajimu UMEMOTO 12482cd038dSYoshinobu Inoue /* 1252ceeacbeSJonathan T. Looney * By default, limit the number of IP6 fragments across all reassembly 1262ceeacbeSJonathan T. Looney * queues to 1/32 of the total number of mbuf clusters. 1272ceeacbeSJonathan T. Looney * 1282ceeacbeSJonathan T. Looney * Limit the total number of reassembly queues per VNET to the 1292ceeacbeSJonathan T. Looney * IP6 fragment limit, but ensure the limit will not allow any bucket 1302ceeacbeSJonathan T. Looney * to grow above 100 items. (The bucket limit is 1312ceeacbeSJonathan T. Looney * IP_MAXFRAGPACKETS / (IPREASS_NHASH / 2), so the 50 is the correct 1322ceeacbeSJonathan T. Looney * multiplier to reach a 100-item limit.) 1332ceeacbeSJonathan T. Looney * The 100-item limit was chosen as brief testing seems to show that 1342ceeacbeSJonathan T. Looney * this produces "reasonable" performance on some subset of systems 1352ceeacbeSJonathan T. Looney * under DoS attack. 1362ceeacbeSJonathan T. Looney */ 1372ceeacbeSJonathan T. Looney #define IP6_MAXFRAGS (nmbclusters / 32) 1382ceeacbeSJonathan T. Looney #define IP6_MAXFRAGPACKETS (imin(IP6_MAXFRAGS, IP6REASS_NHASH * 50)) 1392ceeacbeSJonathan T. Looney 140757cb678SBjoern A. Zeeb 1412ceeacbeSJonathan T. Looney /* 142757cb678SBjoern A. Zeeb * Sysctls and helper function. 14382cd038dSYoshinobu Inoue */ 144757cb678SBjoern A. Zeeb SYSCTL_DECL(_net_inet6_ip6); 145757cb678SBjoern A. Zeeb 146757cb678SBjoern A. Zeeb static void 14709b361c7SBjoern A. Zeeb frag6_set_bucketsize(void) 1481e9f3b73SJonathan T. Looney { 1491e9f3b73SJonathan T. Looney int i; 1501e9f3b73SJonathan T. Looney 1511e9f3b73SJonathan T. Looney if ((i = V_ip6_maxfragpackets) > 0) 1521e9f3b73SJonathan T. Looney V_ip6_maxfragbucketsize = imax(i / (IP6REASS_NHASH / 2), 1); 1531e9f3b73SJonathan T. Looney } 1541e9f3b73SJonathan T. Looney 155757cb678SBjoern A. Zeeb SYSCTL_INT(_net_inet6_ip6, IPV6CTL_MAXFRAGS, maxfrags, 156757cb678SBjoern A. Zeeb CTLFLAG_RW, &ip6_maxfrags, 0, 157757cb678SBjoern A. Zeeb "Maximum allowed number of outstanding IPv6 packet fragments. " 158757cb678SBjoern A. Zeeb "A value of 0 means no fragmented packets will be accepted, while a " 159757cb678SBjoern A. Zeeb "a value of -1 means no limit"); 160757cb678SBjoern A. Zeeb 161757cb678SBjoern A. Zeeb static int 162757cb678SBjoern A. Zeeb sysctl_ip6_maxfragpackets(SYSCTL_HANDLER_ARGS) 163757cb678SBjoern A. Zeeb { 164757cb678SBjoern A. Zeeb int error, val; 165757cb678SBjoern A. Zeeb 166757cb678SBjoern A. Zeeb val = V_ip6_maxfragpackets; 167757cb678SBjoern A. Zeeb error = sysctl_handle_int(oidp, &val, 0, req); 168757cb678SBjoern A. Zeeb if (error != 0 || !req->newptr) 169757cb678SBjoern A. Zeeb return (error); 170757cb678SBjoern A. Zeeb V_ip6_maxfragpackets = val; 171757cb678SBjoern A. Zeeb frag6_set_bucketsize(); 172757cb678SBjoern A. Zeeb return (0); 173757cb678SBjoern A. Zeeb } 174757cb678SBjoern A. Zeeb SYSCTL_PROC(_net_inet6_ip6, IPV6CTL_MAXFRAGPACKETS, maxfragpackets, 175757cb678SBjoern A. Zeeb CTLFLAG_VNET | CTLTYPE_INT | CTLFLAG_RW, NULL, 0, 176757cb678SBjoern A. Zeeb sysctl_ip6_maxfragpackets, "I", 177757cb678SBjoern A. Zeeb "Default maximum number of outstanding fragmented IPv6 packets. " 178757cb678SBjoern A. Zeeb "A value of 0 means no fragmented packets will be accepted, while a " 179757cb678SBjoern A. Zeeb "a value of -1 means no limit"); 180757cb678SBjoern A. Zeeb SYSCTL_INT(_net_inet6_ip6, IPV6CTL_MAXFRAGSPERPACKET, maxfragsperpacket, 181757cb678SBjoern A. Zeeb CTLFLAG_VNET | CTLFLAG_RW, &VNET_NAME(ip6_maxfragsperpacket), 0, 182757cb678SBjoern A. Zeeb "Maximum allowed number of fragments per packet"); 183757cb678SBjoern A. Zeeb SYSCTL_INT(_net_inet6_ip6, IPV6CTL_MAXFRAGBUCKETSIZE, maxfragbucketsize, 184757cb678SBjoern A. Zeeb CTLFLAG_VNET | CTLFLAG_RW, &VNET_NAME(ip6_maxfragbucketsize), 0, 185757cb678SBjoern A. Zeeb "Maximum number of reassembly queues per hash bucket"); 186757cb678SBjoern A. Zeeb 187757cb678SBjoern A. Zeeb 188757cb678SBjoern A. Zeeb /* 189757cb678SBjoern A. Zeeb * Initialise reassembly queue and fragment identifier. 190757cb678SBjoern A. Zeeb */ 1914f590175SPaul Saab static void 1924f590175SPaul Saab frag6_change(void *tag) 1934f590175SPaul Saab { 1942adfd64fSJonathan T. Looney VNET_ITERATOR_DECL(vnet_iter); 1954f590175SPaul Saab 1962ceeacbeSJonathan T. Looney ip6_maxfrags = IP6_MAXFRAGS; 1972adfd64fSJonathan T. Looney VNET_LIST_RLOCK_NOSLEEP(); 1982adfd64fSJonathan T. Looney VNET_FOREACH(vnet_iter) { 1992adfd64fSJonathan T. Looney CURVNET_SET(vnet_iter); 2002ceeacbeSJonathan T. Looney V_ip6_maxfragpackets = IP6_MAXFRAGPACKETS; 2011e9f3b73SJonathan T. Looney frag6_set_bucketsize(); 2022adfd64fSJonathan T. Looney CURVNET_RESTORE(); 2032adfd64fSJonathan T. Looney } 2042adfd64fSJonathan T. Looney VNET_LIST_RUNLOCK_NOSLEEP(); 2054f590175SPaul Saab } 2064f590175SPaul Saab 20782cd038dSYoshinobu Inoue void 2081272577eSXin LI frag6_init(void) 20982cd038dSYoshinobu Inoue { 21080d7a853SJonathan T. Looney struct ip6q *q6; 21180d7a853SJonathan T. Looney int i; 21282cd038dSYoshinobu Inoue 2132ceeacbeSJonathan T. Looney V_ip6_maxfragpackets = IP6_MAXFRAGPACKETS; 2141e9f3b73SJonathan T. Looney frag6_set_bucketsize(); 21580d7a853SJonathan T. Looney for (i = 0; i < IP6REASS_NHASH; i++) { 21680d7a853SJonathan T. Looney q6 = IP6Q_HEAD(i); 21780d7a853SJonathan T. Looney q6->ip6q_next = q6->ip6q_prev = q6; 21880d7a853SJonathan T. Looney mtx_init(&V_ip6q[i].lock, "ip6qlock", NULL, MTX_DEF); 2191e9f3b73SJonathan T. Looney V_ip6q[i].count = 0; 22080d7a853SJonathan T. Looney } 22180d7a853SJonathan T. Looney V_ip6q_hashseed = arc4random(); 22203c99d76SJonathan T. Looney V_ip6_maxfragsperpacket = 64; 2231ed81b73SMarko Zec if (!IS_DEFAULT_VNET(curvnet)) 2241ed81b73SMarko Zec return; 22596c2b042SJesper Skriver 2262ceeacbeSJonathan T. Looney ip6_maxfrags = IP6_MAXFRAGS; 2271ed81b73SMarko Zec EVENTHANDLER_REGISTER(nmbclusters_change, 2281ed81b73SMarko Zec frag6_change, NULL, EVENTHANDLER_PRI_ANY); 22982cd038dSYoshinobu Inoue } 23082cd038dSYoshinobu Inoue 23182cd038dSYoshinobu Inoue /* 232686cdd19SJun-ichiro itojun Hagino * In RFC2460, fragment and reassembly rule do not agree with each other, 233686cdd19SJun-ichiro itojun Hagino * in terms of next header field handling in fragment header. 234686cdd19SJun-ichiro itojun Hagino * While the sender will use the same value for all of the fragmented packets, 235686cdd19SJun-ichiro itojun Hagino * receiver is suggested not to check the consistency. 236686cdd19SJun-ichiro itojun Hagino * 237686cdd19SJun-ichiro itojun Hagino * fragment rule (p20): 238686cdd19SJun-ichiro itojun Hagino * (2) A Fragment header containing: 239686cdd19SJun-ichiro itojun Hagino * The Next Header value that identifies the first header of 240686cdd19SJun-ichiro itojun Hagino * the Fragmentable Part of the original packet. 241686cdd19SJun-ichiro itojun Hagino * -> next header field is same for all fragments 242686cdd19SJun-ichiro itojun Hagino * 243686cdd19SJun-ichiro itojun Hagino * reassembly rule (p21): 244686cdd19SJun-ichiro itojun Hagino * The Next Header field of the last header of the Unfragmentable 245686cdd19SJun-ichiro itojun Hagino * Part is obtained from the Next Header field of the first 246686cdd19SJun-ichiro itojun Hagino * fragment's Fragment header. 247686cdd19SJun-ichiro itojun Hagino * -> should grab it from the first fragment only 248686cdd19SJun-ichiro itojun Hagino * 249686cdd19SJun-ichiro itojun Hagino * The following note also contradicts with fragment rule - no one is going to 250686cdd19SJun-ichiro itojun Hagino * send different fragment with different next header field. 251686cdd19SJun-ichiro itojun Hagino * 252686cdd19SJun-ichiro itojun Hagino * additional note (p22): 253686cdd19SJun-ichiro itojun Hagino * The Next Header values in the Fragment headers of different 254686cdd19SJun-ichiro itojun Hagino * fragments of the same original packet may differ. Only the value 255686cdd19SJun-ichiro itojun Hagino * from the Offset zero fragment packet is used for reassembly. 256686cdd19SJun-ichiro itojun Hagino * -> should grab it from the first fragment only 257686cdd19SJun-ichiro itojun Hagino * 258686cdd19SJun-ichiro itojun Hagino * There is no explicit reason given in the RFC. Historical reason maybe? 259686cdd19SJun-ichiro itojun Hagino */ 260686cdd19SJun-ichiro itojun Hagino /* 26182cd038dSYoshinobu Inoue * Fragment input 26282cd038dSYoshinobu Inoue */ 26382cd038dSYoshinobu Inoue int 2641272577eSXin LI frag6_input(struct mbuf **mp, int *offp, int proto) 26582cd038dSYoshinobu Inoue { 26682cd038dSYoshinobu Inoue struct mbuf *m = *mp, *t; 26782cd038dSYoshinobu Inoue struct ip6_hdr *ip6; 26882cd038dSYoshinobu Inoue struct ip6_frag *ip6f; 26980d7a853SJonathan T. Looney struct ip6q *head, *q6; 270686cdd19SJun-ichiro itojun Hagino struct ip6asfrag *af6, *ip6af, *af6dwn; 2712a5aafceSHajimu UMEMOTO struct in6_ifaddr *ia; 27282cd038dSYoshinobu Inoue int offset = *offp, nxt, i, next; 27382cd038dSYoshinobu Inoue int first_frag = 0; 274686cdd19SJun-ichiro itojun Hagino int fragoff, frgpartlen; /* must be larger than u_int16_t */ 275505e91f5SKristof Provost uint32_t hashkey[(sizeof(struct in6_addr) * 2 + 276505e91f5SKristof Provost sizeof(ip6f->ip6f_ident)) / sizeof(uint32_t)]; 277505e91f5SKristof Provost uint32_t hash, *hashkeyp; 27882cd038dSYoshinobu Inoue struct ifnet *dstifp; 27959dfcba4SHajimu UMEMOTO u_int8_t ecn, ecn0; 280aaa46574SAdrian Chadd #ifdef RSS 281aaa46574SAdrian Chadd struct m_tag *mtag; 282aaa46574SAdrian Chadd struct ip6_direct_ctx *ip6dc; 283aaa46574SAdrian Chadd #endif 284aaa46574SAdrian Chadd 28582cd038dSYoshinobu Inoue ip6 = mtod(m, struct ip6_hdr *); 286686cdd19SJun-ichiro itojun Hagino #ifndef PULLDOWN_TEST 287686cdd19SJun-ichiro itojun Hagino IP6_EXTHDR_CHECK(m, offset, sizeof(struct ip6_frag), IPPROTO_DONE); 28882cd038dSYoshinobu Inoue ip6f = (struct ip6_frag *)((caddr_t)ip6 + offset); 289686cdd19SJun-ichiro itojun Hagino #else 290686cdd19SJun-ichiro itojun Hagino IP6_EXTHDR_GET(ip6f, struct ip6_frag *, m, offset, sizeof(*ip6f)); 291686cdd19SJun-ichiro itojun Hagino if (ip6f == NULL) 29240e39bbbSHajimu UMEMOTO return (IPPROTO_DONE); 293686cdd19SJun-ichiro itojun Hagino #endif 29482cd038dSYoshinobu Inoue 29582cd038dSYoshinobu Inoue dstifp = NULL; 29682cd038dSYoshinobu Inoue /* find the destination interface of the packet. */ 2973e88eb90SAndrey V. Elsukov ia = in6ifa_ifwithaddr(&ip6->ip6_dst, 0 /* XXX */); 2983e88eb90SAndrey V. Elsukov if (ia != NULL) { 2992a5aafceSHajimu UMEMOTO dstifp = ia->ia_ifp; 3008c0fec80SRobert Watson ifa_free(&ia->ia_ifa); 3018c0fec80SRobert Watson } 30282cd038dSYoshinobu Inoue /* jumbo payload can't contain a fragment header */ 30382cd038dSYoshinobu Inoue if (ip6->ip6_plen == 0) { 30482cd038dSYoshinobu Inoue icmp6_error(m, ICMP6_PARAM_PROB, ICMP6_PARAMPROB_HEADER, offset); 30582cd038dSYoshinobu Inoue in6_ifstat_inc(dstifp, ifs6_reass_fail); 30682cd038dSYoshinobu Inoue return IPPROTO_DONE; 30782cd038dSYoshinobu Inoue } 30882cd038dSYoshinobu Inoue 30982cd038dSYoshinobu Inoue /* 31082cd038dSYoshinobu Inoue * check whether fragment packet's fragment length is 31182cd038dSYoshinobu Inoue * multiple of 8 octets. 31282cd038dSYoshinobu Inoue * sizeof(struct ip6_frag) == 8 31382cd038dSYoshinobu Inoue * sizeof(struct ip6_hdr) = 40 31482cd038dSYoshinobu Inoue */ 31582cd038dSYoshinobu Inoue if ((ip6f->ip6f_offlg & IP6F_MORE_FRAG) && 31682cd038dSYoshinobu Inoue (((ntohs(ip6->ip6_plen) - offset) & 0x7) != 0)) { 31706cd0a3fSHajimu UMEMOTO icmp6_error(m, ICMP6_PARAM_PROB, ICMP6_PARAMPROB_HEADER, 318686cdd19SJun-ichiro itojun Hagino offsetof(struct ip6_hdr, ip6_plen)); 31982cd038dSYoshinobu Inoue in6_ifstat_inc(dstifp, ifs6_reass_fail); 32082cd038dSYoshinobu Inoue return IPPROTO_DONE; 32182cd038dSYoshinobu Inoue } 32282cd038dSYoshinobu Inoue 3239cb8d207SAndrey V. Elsukov IP6STAT_INC(ip6s_fragments); 32482cd038dSYoshinobu Inoue in6_ifstat_inc(dstifp, ifs6_reass_reqd); 32582cd038dSYoshinobu Inoue 326686cdd19SJun-ichiro itojun Hagino /* offset now points to data portion */ 32782cd038dSYoshinobu Inoue offset += sizeof(struct ip6_frag); 32882cd038dSYoshinobu Inoue 3294018ea9aSBjoern A. Zeeb /* 3302946a941STom Jones * Handle "atomic" fragments (offset and m bit set to 0) upfront, 3312946a941STom Jones * unrelated to any reassembly (see RFC 6946 and section 4.5 of RFC 3322946a941STom Jones * 8200). Just skip the fragment header. 3334018ea9aSBjoern A. Zeeb */ 3344018ea9aSBjoern A. Zeeb if ((ip6f->ip6f_offlg & ~IP6F_RESERVED_MASK) == 0) { 3352946a941STom Jones IP6STAT_INC(ip6s_atomicfrags); 3364018ea9aSBjoern A. Zeeb in6_ifstat_inc(dstifp, ifs6_reass_ok); 3374018ea9aSBjoern A. Zeeb *offp = offset; 338a4061289SAndrey V. Elsukov m->m_flags |= M_FRAGMENTED; 3394018ea9aSBjoern A. Zeeb return (ip6f->ip6f_nxt); 3404018ea9aSBjoern A. Zeeb } 3414018ea9aSBjoern A. Zeeb 3425f9f192dSJonathan T. Looney /* Get fragment length and discard 0-byte fragments. */ 3435f9f192dSJonathan T. Looney frgpartlen = sizeof(struct ip6_hdr) + ntohs(ip6->ip6_plen) - offset; 3445f9f192dSJonathan T. Looney if (frgpartlen == 0) { 3455f9f192dSJonathan T. Looney icmp6_error(m, ICMP6_PARAM_PROB, ICMP6_PARAMPROB_HEADER, 3465f9f192dSJonathan T. Looney offsetof(struct ip6_hdr, ip6_plen)); 3475f9f192dSJonathan T. Looney in6_ifstat_inc(dstifp, ifs6_reass_fail); 3485f9f192dSJonathan T. Looney IP6STAT_INC(ip6s_fragdropped); 3495f9f192dSJonathan T. Looney return IPPROTO_DONE; 3505f9f192dSJonathan T. Looney } 3515f9f192dSJonathan T. Looney 35280d7a853SJonathan T. Looney hashkeyp = hashkey; 35380d7a853SJonathan T. Looney memcpy(hashkeyp, &ip6->ip6_src, sizeof(struct in6_addr)); 35480d7a853SJonathan T. Looney hashkeyp += sizeof(struct in6_addr) / sizeof(*hashkeyp); 35580d7a853SJonathan T. Looney memcpy(hashkeyp, &ip6->ip6_dst, sizeof(struct in6_addr)); 35680d7a853SJonathan T. Looney hashkeyp += sizeof(struct in6_addr) / sizeof(*hashkeyp); 35780d7a853SJonathan T. Looney *hashkeyp = ip6f->ip6f_ident; 35880d7a853SJonathan T. Looney hash = jenkins_hash32(hashkey, nitems(hashkey), V_ip6q_hashseed); 35980d7a853SJonathan T. Looney hash &= IP6REASS_HMASK; 36080d7a853SJonathan T. Looney head = IP6Q_HEAD(hash); 36180d7a853SJonathan T. Looney IP6Q_LOCK(hash); 3629888c401SHajimu UMEMOTO 3639888c401SHajimu UMEMOTO /* 3649888c401SHajimu UMEMOTO * Enforce upper bound on number of fragments. 3659888c401SHajimu UMEMOTO * If maxfrag is 0, never accept fragments. 3669888c401SHajimu UMEMOTO * If maxfrag is -1, accept all fragments without limitation. 3679888c401SHajimu UMEMOTO */ 3682adfd64fSJonathan T. Looney if (ip6_maxfrags < 0) 3699888c401SHajimu UMEMOTO ; 3702adfd64fSJonathan T. Looney else if (atomic_load_int(&frag6_nfrags) >= (u_int)ip6_maxfrags) 3719888c401SHajimu UMEMOTO goto dropfrag; 37233841545SHajimu UMEMOTO 37380d7a853SJonathan T. Looney for (q6 = head->ip6q_next; q6 != head; q6 = q6->ip6q_next) 37482cd038dSYoshinobu Inoue if (ip6f->ip6f_ident == q6->ip6q_ident && 37582cd038dSYoshinobu Inoue IN6_ARE_ADDR_EQUAL(&ip6->ip6_src, &q6->ip6q_src) && 3764b908c8bSRobert Watson IN6_ARE_ADDR_EQUAL(&ip6->ip6_dst, &q6->ip6q_dst) 3774b908c8bSRobert Watson #ifdef MAC 3784b908c8bSRobert Watson && mac_ip6q_match(m, q6) 3794b908c8bSRobert Watson #endif 3804b908c8bSRobert Watson ) 38182cd038dSYoshinobu Inoue break; 38282cd038dSYoshinobu Inoue 38380d7a853SJonathan T. Looney if (q6 == head) { 38482cd038dSYoshinobu Inoue /* 38582cd038dSYoshinobu Inoue * the first fragment to arrive, create a reassembly queue. 38682cd038dSYoshinobu Inoue */ 38782cd038dSYoshinobu Inoue first_frag = 1; 38882cd038dSYoshinobu Inoue 38982cd038dSYoshinobu Inoue /* 39082cd038dSYoshinobu Inoue * Enforce upper bound on number of fragmented packets 39182cd038dSYoshinobu Inoue * for which we attempt reassembly; 3929888c401SHajimu UMEMOTO * If maxfragpackets is 0, never accept fragments. 3939888c401SHajimu UMEMOTO * If maxfragpackets is -1, accept all fragments without 3949888c401SHajimu UMEMOTO * limitation. 39582cd038dSYoshinobu Inoue */ 396603724d3SBjoern A. Zeeb if (V_ip6_maxfragpackets < 0) 39733841545SHajimu UMEMOTO ; 3981e9f3b73SJonathan T. Looney else if (V_ip6q[hash].count >= V_ip6_maxfragbucketsize || 3991e9f3b73SJonathan T. Looney atomic_load_int(&V_frag6_nfragpackets) >= 40080d7a853SJonathan T. Looney (u_int)V_ip6_maxfragpackets) 40133841545SHajimu UMEMOTO goto dropfrag; 40280d7a853SJonathan T. Looney atomic_add_int(&V_frag6_nfragpackets, 1); 403487a161cSBjoern A. Zeeb q6 = (struct ip6q *)malloc(sizeof(struct ip6q), M_FRAG6, 404487a161cSBjoern A. Zeeb M_NOWAIT | M_ZERO); 40582cd038dSYoshinobu Inoue if (q6 == NULL) 40682cd038dSYoshinobu Inoue goto dropfrag; 4074b908c8bSRobert Watson #ifdef MAC 4084b908c8bSRobert Watson if (mac_ip6q_init(q6, M_NOWAIT) != 0) { 409487a161cSBjoern A. Zeeb free(q6, M_FRAG6); 4104b908c8bSRobert Watson goto dropfrag; 4114b908c8bSRobert Watson } 4124b908c8bSRobert Watson mac_ip6q_create(m, q6); 4134b908c8bSRobert Watson #endif 41480d7a853SJonathan T. Looney frag6_insque_head(q6, head, hash); 41582cd038dSYoshinobu Inoue 416686cdd19SJun-ichiro itojun Hagino /* ip6q_nxt will be filled afterwards, from 1st fragment */ 41782cd038dSYoshinobu Inoue q6->ip6q_down = q6->ip6q_up = (struct ip6asfrag *)q6; 418686cdd19SJun-ichiro itojun Hagino #ifdef notyet 419686cdd19SJun-ichiro itojun Hagino q6->ip6q_nxtp = (u_char *)nxtp; 420686cdd19SJun-ichiro itojun Hagino #endif 42182cd038dSYoshinobu Inoue q6->ip6q_ident = ip6f->ip6f_ident; 42282cd038dSYoshinobu Inoue q6->ip6q_ttl = IPV6_FRAGTTL; 42382cd038dSYoshinobu Inoue q6->ip6q_src = ip6->ip6_src; 42482cd038dSYoshinobu Inoue q6->ip6q_dst = ip6->ip6_dst; 4255e9510e3SJINMEI Tatuya q6->ip6q_ecn = 4265e9510e3SJINMEI Tatuya (ntohl(ip6->ip6_flow) >> 20) & IPTOS_ECN_MASK; 42782cd038dSYoshinobu Inoue q6->ip6q_unfrglen = -1; /* The 1st fragment has not arrived. */ 4289888c401SHajimu UMEMOTO 4299888c401SHajimu UMEMOTO q6->ip6q_nfrag = 0; 43082cd038dSYoshinobu Inoue } 43182cd038dSYoshinobu Inoue 43282cd038dSYoshinobu Inoue /* 43382cd038dSYoshinobu Inoue * If it's the 1st fragment, record the length of the 43482cd038dSYoshinobu Inoue * unfragmentable part and the next header of the fragment header. 43582cd038dSYoshinobu Inoue */ 43682cd038dSYoshinobu Inoue fragoff = ntohs(ip6f->ip6f_offlg & IP6F_OFF_MASK); 43782cd038dSYoshinobu Inoue if (fragoff == 0) { 43806cd0a3fSHajimu UMEMOTO q6->ip6q_unfrglen = offset - sizeof(struct ip6_hdr) - 43906cd0a3fSHajimu UMEMOTO sizeof(struct ip6_frag); 44082cd038dSYoshinobu Inoue q6->ip6q_nxt = ip6f->ip6f_nxt; 44182cd038dSYoshinobu Inoue } 44282cd038dSYoshinobu Inoue 44382cd038dSYoshinobu Inoue /* 44482cd038dSYoshinobu Inoue * Check that the reassembled packet would not exceed 65535 bytes 44582cd038dSYoshinobu Inoue * in size. 44682cd038dSYoshinobu Inoue * If it would exceed, discard the fragment and return an ICMP error. 44782cd038dSYoshinobu Inoue */ 44882cd038dSYoshinobu Inoue if (q6->ip6q_unfrglen >= 0) { 44982cd038dSYoshinobu Inoue /* The 1st fragment has already arrived. */ 45082cd038dSYoshinobu Inoue if (q6->ip6q_unfrglen + fragoff + frgpartlen > IPV6_MAXPACKET) { 45182cd038dSYoshinobu Inoue icmp6_error(m, ICMP6_PARAM_PROB, ICMP6_PARAMPROB_HEADER, 452686cdd19SJun-ichiro itojun Hagino offset - sizeof(struct ip6_frag) + 453686cdd19SJun-ichiro itojun Hagino offsetof(struct ip6_frag, ip6f_offlg)); 45480d7a853SJonathan T. Looney IP6Q_UNLOCK(hash); 45582cd038dSYoshinobu Inoue return (IPPROTO_DONE); 45682cd038dSYoshinobu Inoue } 45706cd0a3fSHajimu UMEMOTO } else if (fragoff + frgpartlen > IPV6_MAXPACKET) { 45882cd038dSYoshinobu Inoue icmp6_error(m, ICMP6_PARAM_PROB, ICMP6_PARAMPROB_HEADER, 459686cdd19SJun-ichiro itojun Hagino offset - sizeof(struct ip6_frag) + 460686cdd19SJun-ichiro itojun Hagino offsetof(struct ip6_frag, ip6f_offlg)); 46180d7a853SJonathan T. Looney IP6Q_UNLOCK(hash); 46282cd038dSYoshinobu Inoue return (IPPROTO_DONE); 46382cd038dSYoshinobu Inoue } 46482cd038dSYoshinobu Inoue /* 46582cd038dSYoshinobu Inoue * If it's the first fragment, do the above check for each 46682cd038dSYoshinobu Inoue * fragment already stored in the reassembly queue. 46782cd038dSYoshinobu Inoue */ 46882cd038dSYoshinobu Inoue if (fragoff == 0) { 46982cd038dSYoshinobu Inoue for (af6 = q6->ip6q_down; af6 != (struct ip6asfrag *)q6; 47082cd038dSYoshinobu Inoue af6 = af6dwn) { 47182cd038dSYoshinobu Inoue af6dwn = af6->ip6af_down; 47282cd038dSYoshinobu Inoue 47382cd038dSYoshinobu Inoue if (q6->ip6q_unfrglen + af6->ip6af_off + af6->ip6af_frglen > 47482cd038dSYoshinobu Inoue IPV6_MAXPACKET) { 47582cd038dSYoshinobu Inoue struct mbuf *merr = IP6_REASS_MBUF(af6); 47682cd038dSYoshinobu Inoue struct ip6_hdr *ip6err; 47782cd038dSYoshinobu Inoue int erroff = af6->ip6af_offset; 47882cd038dSYoshinobu Inoue 47982cd038dSYoshinobu Inoue /* dequeue the fragment. */ 48080d7a853SJonathan T. Looney frag6_deq(af6, hash); 481487a161cSBjoern A. Zeeb free(af6, M_FRAG6); 48282cd038dSYoshinobu Inoue 48382cd038dSYoshinobu Inoue /* adjust pointer. */ 48482cd038dSYoshinobu Inoue ip6err = mtod(merr, struct ip6_hdr *); 48582cd038dSYoshinobu Inoue 48682cd038dSYoshinobu Inoue /* 48782cd038dSYoshinobu Inoue * Restore source and destination addresses 48882cd038dSYoshinobu Inoue * in the erroneous IPv6 header. 48982cd038dSYoshinobu Inoue */ 49082cd038dSYoshinobu Inoue ip6err->ip6_src = q6->ip6q_src; 49182cd038dSYoshinobu Inoue ip6err->ip6_dst = q6->ip6q_dst; 49282cd038dSYoshinobu Inoue 49382cd038dSYoshinobu Inoue icmp6_error(merr, ICMP6_PARAM_PROB, 49482cd038dSYoshinobu Inoue ICMP6_PARAMPROB_HEADER, 495686cdd19SJun-ichiro itojun Hagino erroff - sizeof(struct ip6_frag) + 496686cdd19SJun-ichiro itojun Hagino offsetof(struct ip6_frag, ip6f_offlg)); 49782cd038dSYoshinobu Inoue } 49882cd038dSYoshinobu Inoue } 49982cd038dSYoshinobu Inoue } 50082cd038dSYoshinobu Inoue 501487a161cSBjoern A. Zeeb ip6af = (struct ip6asfrag *)malloc(sizeof(struct ip6asfrag), M_FRAG6, 502487a161cSBjoern A. Zeeb M_NOWAIT | M_ZERO); 503686cdd19SJun-ichiro itojun Hagino if (ip6af == NULL) 504686cdd19SJun-ichiro itojun Hagino goto dropfrag; 50582cd038dSYoshinobu Inoue ip6af->ip6af_mff = ip6f->ip6f_offlg & IP6F_MORE_FRAG; 50682cd038dSYoshinobu Inoue ip6af->ip6af_off = fragoff; 50782cd038dSYoshinobu Inoue ip6af->ip6af_frglen = frgpartlen; 50882cd038dSYoshinobu Inoue ip6af->ip6af_offset = offset; 50982cd038dSYoshinobu Inoue IP6_REASS_MBUF(ip6af) = m; 51082cd038dSYoshinobu Inoue 51182cd038dSYoshinobu Inoue if (first_frag) { 51282cd038dSYoshinobu Inoue af6 = (struct ip6asfrag *)q6; 51382cd038dSYoshinobu Inoue goto insert; 51482cd038dSYoshinobu Inoue } 51582cd038dSYoshinobu Inoue 51682cd038dSYoshinobu Inoue /* 51759dfcba4SHajimu UMEMOTO * Handle ECN by comparing this segment with the first one; 51859dfcba4SHajimu UMEMOTO * if CE is set, do not lose CE. 51959dfcba4SHajimu UMEMOTO * drop if CE and not-ECT are mixed for the same packet. 52059dfcba4SHajimu UMEMOTO */ 52159dfcba4SHajimu UMEMOTO ecn = (ntohl(ip6->ip6_flow) >> 20) & IPTOS_ECN_MASK; 5225e9510e3SJINMEI Tatuya ecn0 = q6->ip6q_ecn; 52359dfcba4SHajimu UMEMOTO if (ecn == IPTOS_ECN_CE) { 52459dfcba4SHajimu UMEMOTO if (ecn0 == IPTOS_ECN_NOTECT) { 525487a161cSBjoern A. Zeeb free(ip6af, M_FRAG6); 52659dfcba4SHajimu UMEMOTO goto dropfrag; 52759dfcba4SHajimu UMEMOTO } 52859dfcba4SHajimu UMEMOTO if (ecn0 != IPTOS_ECN_CE) 5295e9510e3SJINMEI Tatuya q6->ip6q_ecn = IPTOS_ECN_CE; 53059dfcba4SHajimu UMEMOTO } 53159dfcba4SHajimu UMEMOTO if (ecn == IPTOS_ECN_NOTECT && ecn0 != IPTOS_ECN_NOTECT) { 532487a161cSBjoern A. Zeeb free(ip6af, M_FRAG6); 53359dfcba4SHajimu UMEMOTO goto dropfrag; 53459dfcba4SHajimu UMEMOTO } 53559dfcba4SHajimu UMEMOTO 53659dfcba4SHajimu UMEMOTO /* 53782cd038dSYoshinobu Inoue * Find a segment which begins after this one does. 53882cd038dSYoshinobu Inoue */ 53982cd038dSYoshinobu Inoue for (af6 = q6->ip6q_down; af6 != (struct ip6asfrag *)q6; 54082cd038dSYoshinobu Inoue af6 = af6->ip6af_down) 54182cd038dSYoshinobu Inoue if (af6->ip6af_off > ip6af->ip6af_off) 54282cd038dSYoshinobu Inoue break; 54382cd038dSYoshinobu Inoue 54482cd038dSYoshinobu Inoue /* 54582cd038dSYoshinobu Inoue * If the incoming framgent overlaps some existing fragments in 54682cd038dSYoshinobu Inoue * the reassembly queue, drop it, since it is dangerous to override 54782cd038dSYoshinobu Inoue * existing fragments from a security point of view. 5489888c401SHajimu UMEMOTO * We don't know which fragment is the bad guy - here we trust 5499888c401SHajimu UMEMOTO * fragment that came in earlier, with no real reason. 5505e9510e3SJINMEI Tatuya * 5515e9510e3SJINMEI Tatuya * Note: due to changes after disabling this part, mbuf passed to 5525e9510e3SJINMEI Tatuya * m_adj() below now does not meet the requirement. 55382cd038dSYoshinobu Inoue */ 55482cd038dSYoshinobu Inoue if (af6->ip6af_up != (struct ip6asfrag *)q6) { 55582cd038dSYoshinobu Inoue i = af6->ip6af_up->ip6af_off + af6->ip6af_up->ip6af_frglen 55682cd038dSYoshinobu Inoue - ip6af->ip6af_off; 55782cd038dSYoshinobu Inoue if (i > 0) { 558487a161cSBjoern A. Zeeb free(ip6af, M_FRAG6); 55982cd038dSYoshinobu Inoue goto dropfrag; 56082cd038dSYoshinobu Inoue } 56182cd038dSYoshinobu Inoue } 56282cd038dSYoshinobu Inoue if (af6 != (struct ip6asfrag *)q6) { 56382cd038dSYoshinobu Inoue i = (ip6af->ip6af_off + ip6af->ip6af_frglen) - af6->ip6af_off; 56482cd038dSYoshinobu Inoue if (i > 0) { 565487a161cSBjoern A. Zeeb free(ip6af, M_FRAG6); 56682cd038dSYoshinobu Inoue goto dropfrag; 56782cd038dSYoshinobu Inoue } 56882cd038dSYoshinobu Inoue } 56982cd038dSYoshinobu Inoue 57082cd038dSYoshinobu Inoue insert: 5714b908c8bSRobert Watson #ifdef MAC 5724b908c8bSRobert Watson if (!first_frag) 5734b908c8bSRobert Watson mac_ip6q_update(m, q6); 5744b908c8bSRobert Watson #endif 57582cd038dSYoshinobu Inoue 57682cd038dSYoshinobu Inoue /* 57782cd038dSYoshinobu Inoue * Stick new segment in its place; 57882cd038dSYoshinobu Inoue * check for complete reassembly. 57903c99d76SJonathan T. Looney * If not complete, check fragment limit. 58082cd038dSYoshinobu Inoue * Move to front of packet queue, as we are 58182cd038dSYoshinobu Inoue * the most recently active fragmented packet. 58282cd038dSYoshinobu Inoue */ 58380d7a853SJonathan T. Looney frag6_enq(ip6af, af6->ip6af_up, hash); 5842adfd64fSJonathan T. Looney atomic_add_int(&frag6_nfrags, 1); 5859888c401SHajimu UMEMOTO q6->ip6q_nfrag++; 58682cd038dSYoshinobu Inoue next = 0; 58782cd038dSYoshinobu Inoue for (af6 = q6->ip6q_down; af6 != (struct ip6asfrag *)q6; 58882cd038dSYoshinobu Inoue af6 = af6->ip6af_down) { 58982cd038dSYoshinobu Inoue if (af6->ip6af_off != next) { 59003c99d76SJonathan T. Looney if (q6->ip6q_nfrag > V_ip6_maxfragsperpacket) { 591198fdaedSTom Jones IP6STAT_ADD(ip6s_fragdropped, q6->ip6q_nfrag); 5926bbdbbb8SHans Petter Selasky frag6_freef(q6, hash); 59303c99d76SJonathan T. Looney } 59480d7a853SJonathan T. Looney IP6Q_UNLOCK(hash); 59582cd038dSYoshinobu Inoue return IPPROTO_DONE; 59682cd038dSYoshinobu Inoue } 59782cd038dSYoshinobu Inoue next += af6->ip6af_frglen; 59882cd038dSYoshinobu Inoue } 59982cd038dSYoshinobu Inoue if (af6->ip6af_up->ip6af_mff) { 60003c99d76SJonathan T. Looney if (q6->ip6q_nfrag > V_ip6_maxfragsperpacket) { 601198fdaedSTom Jones IP6STAT_ADD(ip6s_fragdropped, q6->ip6q_nfrag); 6026bbdbbb8SHans Petter Selasky frag6_freef(q6, hash); 60303c99d76SJonathan T. Looney } 60480d7a853SJonathan T. Looney IP6Q_UNLOCK(hash); 60582cd038dSYoshinobu Inoue return IPPROTO_DONE; 60682cd038dSYoshinobu Inoue } 60782cd038dSYoshinobu Inoue 60882cd038dSYoshinobu Inoue /* 60982cd038dSYoshinobu Inoue * Reassembly is complete; concatenate fragments. 61082cd038dSYoshinobu Inoue */ 61182cd038dSYoshinobu Inoue ip6af = q6->ip6q_down; 61282cd038dSYoshinobu Inoue t = m = IP6_REASS_MBUF(ip6af); 61382cd038dSYoshinobu Inoue af6 = ip6af->ip6af_down; 61480d7a853SJonathan T. Looney frag6_deq(ip6af, hash); 61582cd038dSYoshinobu Inoue while (af6 != (struct ip6asfrag *)q6) { 6169907aba3SAndrey V. Elsukov m->m_pkthdr.csum_flags &= 6179907aba3SAndrey V. Elsukov IP6_REASS_MBUF(af6)->m_pkthdr.csum_flags; 6189907aba3SAndrey V. Elsukov m->m_pkthdr.csum_data += 6199907aba3SAndrey V. Elsukov IP6_REASS_MBUF(af6)->m_pkthdr.csum_data; 6209907aba3SAndrey V. Elsukov 621686cdd19SJun-ichiro itojun Hagino af6dwn = af6->ip6af_down; 62280d7a853SJonathan T. Looney frag6_deq(af6, hash); 62382cd038dSYoshinobu Inoue while (t->m_next) 62482cd038dSYoshinobu Inoue t = t->m_next; 625ba99cc0bSAlexander V. Chernikov m_adj(IP6_REASS_MBUF(af6), af6->ip6af_offset); 62609b0b8c0SNavdeep Parhar m_demote_pkthdr(IP6_REASS_MBUF(af6)); 627ba99cc0bSAlexander V. Chernikov m_cat(t, IP6_REASS_MBUF(af6)); 628487a161cSBjoern A. Zeeb free(af6, M_FRAG6); 629686cdd19SJun-ichiro itojun Hagino af6 = af6dwn; 63082cd038dSYoshinobu Inoue } 63182cd038dSYoshinobu Inoue 6329907aba3SAndrey V. Elsukov while (m->m_pkthdr.csum_data & 0xffff0000) 6339907aba3SAndrey V. Elsukov m->m_pkthdr.csum_data = (m->m_pkthdr.csum_data & 0xffff) + 6349907aba3SAndrey V. Elsukov (m->m_pkthdr.csum_data >> 16); 6359907aba3SAndrey V. Elsukov 63682cd038dSYoshinobu Inoue /* adjust offset to point where the original next header starts */ 63782cd038dSYoshinobu Inoue offset = ip6af->ip6af_offset - sizeof(struct ip6_frag); 638487a161cSBjoern A. Zeeb free(ip6af, M_FRAG6); 639686cdd19SJun-ichiro itojun Hagino ip6 = mtod(m, struct ip6_hdr *); 64082cd038dSYoshinobu Inoue ip6->ip6_plen = htons((u_short)next + offset - sizeof(struct ip6_hdr)); 6415e9510e3SJINMEI Tatuya if (q6->ip6q_ecn == IPTOS_ECN_CE) 6425e9510e3SJINMEI Tatuya ip6->ip6_flow |= htonl(IPTOS_ECN_CE << 20); 64382cd038dSYoshinobu Inoue nxt = q6->ip6q_nxt; 64482cd038dSYoshinobu Inoue 6450b438b0fSGleb Smirnoff if (ip6_deletefraghdr(m, offset, M_NOWAIT) != 0) { 64680d7a853SJonathan T. Looney frag6_remque(q6, hash); 6472adfd64fSJonathan T. Looney atomic_subtract_int(&frag6_nfrags, q6->ip6q_nfrag); 6484b908c8bSRobert Watson #ifdef MAC 6494b908c8bSRobert Watson mac_ip6q_destroy(q6); 6504b908c8bSRobert Watson #endif 651487a161cSBjoern A. Zeeb free(q6, M_FRAG6); 65280d7a853SJonathan T. Looney atomic_subtract_int(&V_frag6_nfragpackets, 1); 6530b438b0fSGleb Smirnoff 654686cdd19SJun-ichiro itojun Hagino goto dropfrag; 65582cd038dSYoshinobu Inoue } 65682cd038dSYoshinobu Inoue 65782cd038dSYoshinobu Inoue /* 65882cd038dSYoshinobu Inoue * Store NXT to the original. 65982cd038dSYoshinobu Inoue */ 66068e0e5a6SAndrey V. Elsukov m_copyback(m, ip6_get_prevhdr(m, offset), sizeof(uint8_t), 66168e0e5a6SAndrey V. Elsukov (caddr_t)&nxt); 66282cd038dSYoshinobu Inoue 66380d7a853SJonathan T. Looney frag6_remque(q6, hash); 6642adfd64fSJonathan T. Looney atomic_subtract_int(&frag6_nfrags, q6->ip6q_nfrag); 6654b908c8bSRobert Watson #ifdef MAC 6664b908c8bSRobert Watson mac_ip6q_reassemble(q6, m); 6674b908c8bSRobert Watson mac_ip6q_destroy(q6); 6684b908c8bSRobert Watson #endif 669487a161cSBjoern A. Zeeb free(q6, M_FRAG6); 67080d7a853SJonathan T. Looney atomic_subtract_int(&V_frag6_nfragpackets, 1); 67182cd038dSYoshinobu Inoue 67282cd038dSYoshinobu Inoue if (m->m_flags & M_PKTHDR) { /* Isn't it always true? */ 67382cd038dSYoshinobu Inoue int plen = 0; 67482cd038dSYoshinobu Inoue for (t = m; t; t = t->m_next) 67582cd038dSYoshinobu Inoue plen += t->m_len; 67682cd038dSYoshinobu Inoue m->m_pkthdr.len = plen; 67782cd038dSYoshinobu Inoue } 67882cd038dSYoshinobu Inoue 679aaa46574SAdrian Chadd #ifdef RSS 680aaa46574SAdrian Chadd mtag = m_tag_alloc(MTAG_ABI_IPV6, IPV6_TAG_DIRECT, sizeof(*ip6dc), 681aaa46574SAdrian Chadd M_NOWAIT); 682aaa46574SAdrian Chadd if (mtag == NULL) 683aaa46574SAdrian Chadd goto dropfrag; 684aaa46574SAdrian Chadd 685aaa46574SAdrian Chadd ip6dc = (struct ip6_direct_ctx *)(mtag + 1); 686aaa46574SAdrian Chadd ip6dc->ip6dc_nxt = nxt; 687aaa46574SAdrian Chadd ip6dc->ip6dc_off = offset; 688aaa46574SAdrian Chadd 689aaa46574SAdrian Chadd m_tag_prepend(m, mtag); 690aaa46574SAdrian Chadd #endif 691aaa46574SAdrian Chadd 69280d7a853SJonathan T. Looney IP6Q_UNLOCK(hash); 6939cb8d207SAndrey V. Elsukov IP6STAT_INC(ip6s_reassembled); 69482cd038dSYoshinobu Inoue in6_ifstat_inc(dstifp, ifs6_reass_ok); 69582cd038dSYoshinobu Inoue 696aaa46574SAdrian Chadd #ifdef RSS 697aaa46574SAdrian Chadd /* 698aaa46574SAdrian Chadd * Queue/dispatch for reprocessing. 699aaa46574SAdrian Chadd */ 700aaa46574SAdrian Chadd netisr_dispatch(NETISR_IPV6_DIRECT, m); 701aaa46574SAdrian Chadd return IPPROTO_DONE; 702aaa46574SAdrian Chadd #endif 703aaa46574SAdrian Chadd 70482cd038dSYoshinobu Inoue /* 70582cd038dSYoshinobu Inoue * Tell launch routine the next header 70682cd038dSYoshinobu Inoue */ 70782cd038dSYoshinobu Inoue 70882cd038dSYoshinobu Inoue *mp = m; 70982cd038dSYoshinobu Inoue *offp = offset; 71082cd038dSYoshinobu Inoue 71182cd038dSYoshinobu Inoue return nxt; 71282cd038dSYoshinobu Inoue 71382cd038dSYoshinobu Inoue dropfrag: 71480d7a853SJonathan T. Looney IP6Q_UNLOCK(hash); 71582cd038dSYoshinobu Inoue in6_ifstat_inc(dstifp, ifs6_reass_fail); 7169cb8d207SAndrey V. Elsukov IP6STAT_INC(ip6s_fragdropped); 71782cd038dSYoshinobu Inoue m_freem(m); 71882cd038dSYoshinobu Inoue return IPPROTO_DONE; 71982cd038dSYoshinobu Inoue } 72082cd038dSYoshinobu Inoue 72182cd038dSYoshinobu Inoue /* 72282cd038dSYoshinobu Inoue * Free a fragment reassembly header and all 72382cd038dSYoshinobu Inoue * associated datagrams. 72482cd038dSYoshinobu Inoue */ 72580d7a853SJonathan T. Looney static void 7266bbdbbb8SHans Petter Selasky frag6_freef(struct ip6q *q6, uint32_t bucket) 72782cd038dSYoshinobu Inoue { 72882cd038dSYoshinobu Inoue struct ip6asfrag *af6, *down6; 72982cd038dSYoshinobu Inoue 73080d7a853SJonathan T. Looney IP6Q_LOCK_ASSERT(bucket); 7319888c401SHajimu UMEMOTO 73282cd038dSYoshinobu Inoue for (af6 = q6->ip6q_down; af6 != (struct ip6asfrag *)q6; 73382cd038dSYoshinobu Inoue af6 = down6) { 73482cd038dSYoshinobu Inoue struct mbuf *m = IP6_REASS_MBUF(af6); 73582cd038dSYoshinobu Inoue 73682cd038dSYoshinobu Inoue down6 = af6->ip6af_down; 73780d7a853SJonathan T. Looney frag6_deq(af6, bucket); 73882cd038dSYoshinobu Inoue 73982cd038dSYoshinobu Inoue /* 74082cd038dSYoshinobu Inoue * Return ICMP time exceeded error for the 1st fragment. 74182cd038dSYoshinobu Inoue * Just free other fragments. 74282cd038dSYoshinobu Inoue */ 7436bbdbbb8SHans Petter Selasky if (af6->ip6af_off == 0) { 74482cd038dSYoshinobu Inoue struct ip6_hdr *ip6; 74582cd038dSYoshinobu Inoue 74682cd038dSYoshinobu Inoue /* adjust pointer */ 74782cd038dSYoshinobu Inoue ip6 = mtod(m, struct ip6_hdr *); 74882cd038dSYoshinobu Inoue 74906cd0a3fSHajimu UMEMOTO /* restore source and destination addresses */ 75082cd038dSYoshinobu Inoue ip6->ip6_src = q6->ip6q_src; 75182cd038dSYoshinobu Inoue ip6->ip6_dst = q6->ip6q_dst; 75282cd038dSYoshinobu Inoue 75382cd038dSYoshinobu Inoue icmp6_error(m, ICMP6_TIME_EXCEEDED, 75482cd038dSYoshinobu Inoue ICMP6_TIME_EXCEED_REASSEMBLY, 0); 755686cdd19SJun-ichiro itojun Hagino } else 75682cd038dSYoshinobu Inoue m_freem(m); 757487a161cSBjoern A. Zeeb free(af6, M_FRAG6); 75882cd038dSYoshinobu Inoue } 75980d7a853SJonathan T. Looney frag6_remque(q6, bucket); 7602adfd64fSJonathan T. Looney atomic_subtract_int(&frag6_nfrags, q6->ip6q_nfrag); 7614b908c8bSRobert Watson #ifdef MAC 7624b908c8bSRobert Watson mac_ip6q_destroy(q6); 7634b908c8bSRobert Watson #endif 764487a161cSBjoern A. Zeeb free(q6, M_FRAG6); 76580d7a853SJonathan T. Looney atomic_subtract_int(&V_frag6_nfragpackets, 1); 76682cd038dSYoshinobu Inoue } 76782cd038dSYoshinobu Inoue 76882cd038dSYoshinobu Inoue /* 76982cd038dSYoshinobu Inoue * Put an ip fragment on a reassembly chain. 77082cd038dSYoshinobu Inoue * Like insque, but pointers in middle of structure. 77182cd038dSYoshinobu Inoue */ 77280d7a853SJonathan T. Looney static void 77380d7a853SJonathan T. Looney frag6_enq(struct ip6asfrag *af6, struct ip6asfrag *up6, 77480d7a853SJonathan T. Looney uint32_t bucket __unused) 77582cd038dSYoshinobu Inoue { 7769888c401SHajimu UMEMOTO 77780d7a853SJonathan T. Looney IP6Q_LOCK_ASSERT(bucket); 7789888c401SHajimu UMEMOTO 77982cd038dSYoshinobu Inoue af6->ip6af_up = up6; 78082cd038dSYoshinobu Inoue af6->ip6af_down = up6->ip6af_down; 78182cd038dSYoshinobu Inoue up6->ip6af_down->ip6af_up = af6; 78282cd038dSYoshinobu Inoue up6->ip6af_down = af6; 78382cd038dSYoshinobu Inoue } 78482cd038dSYoshinobu Inoue 78582cd038dSYoshinobu Inoue /* 78682cd038dSYoshinobu Inoue * To frag6_enq as remque is to insque. 78782cd038dSYoshinobu Inoue */ 78880d7a853SJonathan T. Looney static void 78980d7a853SJonathan T. Looney frag6_deq(struct ip6asfrag *af6, uint32_t bucket __unused) 79082cd038dSYoshinobu Inoue { 7919888c401SHajimu UMEMOTO 79280d7a853SJonathan T. Looney IP6Q_LOCK_ASSERT(bucket); 7939888c401SHajimu UMEMOTO 79482cd038dSYoshinobu Inoue af6->ip6af_up->ip6af_down = af6->ip6af_down; 79582cd038dSYoshinobu Inoue af6->ip6af_down->ip6af_up = af6->ip6af_up; 79682cd038dSYoshinobu Inoue } 79782cd038dSYoshinobu Inoue 79880d7a853SJonathan T. Looney static void 7991e9f3b73SJonathan T. Looney frag6_insque_head(struct ip6q *new, struct ip6q *old, uint32_t bucket) 80082cd038dSYoshinobu Inoue { 8019888c401SHajimu UMEMOTO 80280d7a853SJonathan T. Looney IP6Q_LOCK_ASSERT(bucket); 80380d7a853SJonathan T. Looney KASSERT(IP6Q_HEAD(bucket) == old, 80480d7a853SJonathan T. Looney ("%s: attempt to insert at head of wrong bucket" 80580d7a853SJonathan T. Looney " (bucket=%u, old=%p)", __func__, bucket, old)); 8069888c401SHajimu UMEMOTO 80782cd038dSYoshinobu Inoue new->ip6q_prev = old; 80882cd038dSYoshinobu Inoue new->ip6q_next = old->ip6q_next; 80982cd038dSYoshinobu Inoue old->ip6q_next->ip6q_prev= new; 81082cd038dSYoshinobu Inoue old->ip6q_next = new; 8111e9f3b73SJonathan T. Looney V_ip6q[bucket].count++; 81282cd038dSYoshinobu Inoue } 81382cd038dSYoshinobu Inoue 81480d7a853SJonathan T. Looney static void 8151e9f3b73SJonathan T. Looney frag6_remque(struct ip6q *p6, uint32_t bucket) 81682cd038dSYoshinobu Inoue { 8179888c401SHajimu UMEMOTO 81880d7a853SJonathan T. Looney IP6Q_LOCK_ASSERT(bucket); 8199888c401SHajimu UMEMOTO 82082cd038dSYoshinobu Inoue p6->ip6q_prev->ip6q_next = p6->ip6q_next; 82182cd038dSYoshinobu Inoue p6->ip6q_next->ip6q_prev = p6->ip6q_prev; 8221e9f3b73SJonathan T. Looney V_ip6q[bucket].count--; 82382cd038dSYoshinobu Inoue } 82482cd038dSYoshinobu Inoue 82582cd038dSYoshinobu Inoue /* 82633841545SHajimu UMEMOTO * IPv6 reassembling timer processing; 82782cd038dSYoshinobu Inoue * if a timer expires on a reassembly 82882cd038dSYoshinobu Inoue * queue, discard it. 82982cd038dSYoshinobu Inoue */ 83082cd038dSYoshinobu Inoue void 8311272577eSXin LI frag6_slowtimo(void) 83282cd038dSYoshinobu Inoue { 8338b615593SMarko Zec VNET_ITERATOR_DECL(vnet_iter); 83480d7a853SJonathan T. Looney struct ip6q *head, *q6; 83580d7a853SJonathan T. Looney int i; 83682cd038dSYoshinobu Inoue 8375ee847d3SRobert Watson VNET_LIST_RLOCK_NOSLEEP(); 8388b615593SMarko Zec VNET_FOREACH(vnet_iter) { 8398b615593SMarko Zec CURVNET_SET(vnet_iter); 84080d7a853SJonathan T. Looney for (i = 0; i < IP6REASS_NHASH; i++) { 84180d7a853SJonathan T. Looney IP6Q_LOCK(i); 84280d7a853SJonathan T. Looney head = IP6Q_HEAD(i); 84380d7a853SJonathan T. Looney q6 = head->ip6q_next; 8441e9f3b73SJonathan T. Looney if (q6 == NULL) { 8451e9f3b73SJonathan T. Looney /* 8461e9f3b73SJonathan T. Looney * XXXJTL: This should never happen. This 8471e9f3b73SJonathan T. Looney * should turn into an assertion. 8481e9f3b73SJonathan T. Looney */ 8491e9f3b73SJonathan T. Looney IP6Q_UNLOCK(i); 8501e9f3b73SJonathan T. Looney continue; 8511e9f3b73SJonathan T. Looney } 85280d7a853SJonathan T. Looney while (q6 != head) { 85382cd038dSYoshinobu Inoue --q6->ip6q_ttl; 85482cd038dSYoshinobu Inoue q6 = q6->ip6q_next; 85582cd038dSYoshinobu Inoue if (q6->ip6q_prev->ip6q_ttl == 0) { 856198fdaedSTom Jones IP6STAT_ADD(ip6s_fragtimeout, 857198fdaedSTom Jones q6->ip6q_prev->ip6q_nfrag); 85882cd038dSYoshinobu Inoue /* XXX in6_ifstat_inc(ifp, ifs6_reass_fail) */ 8596bbdbbb8SHans Petter Selasky frag6_freef(q6->ip6q_prev, i); 86082cd038dSYoshinobu Inoue } 86182cd038dSYoshinobu Inoue } 86282cd038dSYoshinobu Inoue /* 86382cd038dSYoshinobu Inoue * If we are over the maximum number of fragments 86482cd038dSYoshinobu Inoue * (due to the limit being lowered), drain off 86582cd038dSYoshinobu Inoue * enough to get down to the new limit. 8661e9f3b73SJonathan T. Looney * Note that we drain all reassembly queues if 8671e9f3b73SJonathan T. Looney * maxfragpackets is 0 (fragmentation is disabled), 8681e9f3b73SJonathan T. Looney * and don't enforce a limit when maxfragpackets 8691e9f3b73SJonathan T. Looney * is negative. 87082cd038dSYoshinobu Inoue */ 8711e9f3b73SJonathan T. Looney while ((V_ip6_maxfragpackets == 0 || 8721e9f3b73SJonathan T. Looney (V_ip6_maxfragpackets > 0 && 8731e9f3b73SJonathan T. Looney V_ip6q[i].count > V_ip6_maxfragbucketsize)) && 87480d7a853SJonathan T. Looney head->ip6q_prev != head) { 875198fdaedSTom Jones IP6STAT_ADD(ip6s_fragoverflow, 876198fdaedSTom Jones q6->ip6q_prev->ip6q_nfrag); 87782cd038dSYoshinobu Inoue /* XXX in6_ifstat_inc(ifp, ifs6_reass_fail) */ 8786bbdbbb8SHans Petter Selasky frag6_freef(head->ip6q_prev, i); 87980d7a853SJonathan T. Looney } 88080d7a853SJonathan T. Looney IP6Q_UNLOCK(i); 88182cd038dSYoshinobu Inoue } 8821e9f3b73SJonathan T. Looney /* 8831e9f3b73SJonathan T. Looney * If we are still over the maximum number of fragmented 8841e9f3b73SJonathan T. Looney * packets, drain off enough to get down to the new limit. 8851e9f3b73SJonathan T. Looney */ 8861e9f3b73SJonathan T. Looney i = 0; 8871e9f3b73SJonathan T. Looney while (V_ip6_maxfragpackets >= 0 && 8881e9f3b73SJonathan T. Looney atomic_load_int(&V_frag6_nfragpackets) > 8891e9f3b73SJonathan T. Looney (u_int)V_ip6_maxfragpackets) { 8901e9f3b73SJonathan T. Looney IP6Q_LOCK(i); 8911e9f3b73SJonathan T. Looney head = IP6Q_HEAD(i); 8921e9f3b73SJonathan T. Looney if (head->ip6q_prev != head) { 893198fdaedSTom Jones IP6STAT_ADD(ip6s_fragoverflow, 894198fdaedSTom Jones q6->ip6q_prev->ip6q_nfrag); 8951e9f3b73SJonathan T. Looney /* XXX in6_ifstat_inc(ifp, ifs6_reass_fail) */ 8966bbdbbb8SHans Petter Selasky frag6_freef(head->ip6q_prev, i); 8971e9f3b73SJonathan T. Looney } 8981e9f3b73SJonathan T. Looney IP6Q_UNLOCK(i); 8991e9f3b73SJonathan T. Looney i = (i + 1) % IP6REASS_NHASH; 9001e9f3b73SJonathan T. Looney } 9018b615593SMarko Zec CURVNET_RESTORE(); 9028b615593SMarko Zec } 9035ee847d3SRobert Watson VNET_LIST_RUNLOCK_NOSLEEP(); 90482cd038dSYoshinobu Inoue } 90582cd038dSYoshinobu Inoue 90682cd038dSYoshinobu Inoue /* 90782cd038dSYoshinobu Inoue * Drain off all datagram fragments. 90882cd038dSYoshinobu Inoue */ 90982cd038dSYoshinobu Inoue void 9101272577eSXin LI frag6_drain(void) 91182cd038dSYoshinobu Inoue { 9128b615593SMarko Zec VNET_ITERATOR_DECL(vnet_iter); 91380d7a853SJonathan T. Looney struct ip6q *head; 91480d7a853SJonathan T. Looney int i; 9159888c401SHajimu UMEMOTO 9165ee847d3SRobert Watson VNET_LIST_RLOCK_NOSLEEP(); 9178b615593SMarko Zec VNET_FOREACH(vnet_iter) { 9188b615593SMarko Zec CURVNET_SET(vnet_iter); 91980d7a853SJonathan T. Looney for (i = 0; i < IP6REASS_NHASH; i++) { 92080d7a853SJonathan T. Looney if (IP6Q_TRYLOCK(i) == 0) 92180d7a853SJonathan T. Looney continue; 92280d7a853SJonathan T. Looney head = IP6Q_HEAD(i); 92380d7a853SJonathan T. Looney while (head->ip6q_next != head) { 9249cb8d207SAndrey V. Elsukov IP6STAT_INC(ip6s_fragdropped); 92582cd038dSYoshinobu Inoue /* XXX in6_ifstat_inc(ifp, ifs6_reass_fail) */ 9266bbdbbb8SHans Petter Selasky frag6_freef(head->ip6q_next, i); 92780d7a853SJonathan T. Looney } 92880d7a853SJonathan T. Looney IP6Q_UNLOCK(i); 92982cd038dSYoshinobu Inoue } 9308b615593SMarko Zec CURVNET_RESTORE(); 9318b615593SMarko Zec } 9325ee847d3SRobert Watson VNET_LIST_RUNLOCK_NOSLEEP(); 93382cd038dSYoshinobu Inoue } 934e5ee7060SGleb Smirnoff 935e5ee7060SGleb Smirnoff int 936e5ee7060SGleb Smirnoff ip6_deletefraghdr(struct mbuf *m, int offset, int wait) 937e5ee7060SGleb Smirnoff { 938e5ee7060SGleb Smirnoff struct ip6_hdr *ip6 = mtod(m, struct ip6_hdr *); 939e5ee7060SGleb Smirnoff struct mbuf *t; 940e5ee7060SGleb Smirnoff 941e5ee7060SGleb Smirnoff /* Delete frag6 header. */ 942e5ee7060SGleb Smirnoff if (m->m_len >= offset + sizeof(struct ip6_frag)) { 943e5ee7060SGleb Smirnoff /* This is the only possible case with !PULLDOWN_TEST. */ 944e5ee7060SGleb Smirnoff bcopy(ip6, (char *)ip6 + sizeof(struct ip6_frag), 945e5ee7060SGleb Smirnoff offset); 946e5ee7060SGleb Smirnoff m->m_data += sizeof(struct ip6_frag); 947e5ee7060SGleb Smirnoff m->m_len -= sizeof(struct ip6_frag); 948e5ee7060SGleb Smirnoff } else { 949e5ee7060SGleb Smirnoff /* This comes with no copy if the boundary is on cluster. */ 950e5ee7060SGleb Smirnoff if ((t = m_split(m, offset, wait)) == NULL) 951e5ee7060SGleb Smirnoff return (ENOMEM); 952e5ee7060SGleb Smirnoff m_adj(t, sizeof(struct ip6_frag)); 953e5ee7060SGleb Smirnoff m_cat(m, t); 954e5ee7060SGleb Smirnoff } 955e5ee7060SGleb Smirnoff 956a4061289SAndrey V. Elsukov m->m_flags |= M_FRAGMENTED; 957e5ee7060SGleb Smirnoff return (0); 958e5ee7060SGleb Smirnoff } 959