1 /* 2 * Copyright (c) 2003 Matthew Dillon <dillon@backplane.com> 3 * All rights reserved. 4 * 5 * Redistribution and use in source and binary forms, with or without 6 * modification, are permitted provided that the following conditions 7 * are met: 8 * 1. Redistributions of source code must retain the above copyright 9 * notice, this list of conditions and the following disclaimer. 10 * 2. Redistributions in binary form must reproduce the above copyright 11 * notice, this list of conditions and the following disclaimer in the 12 * documentation and/or other materials provided with the distribution. 13 * 14 * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND 15 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE 16 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE 17 * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE 18 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL 19 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS 20 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) 21 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT 22 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY 23 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF 24 * SUCH DAMAGE. 25 * 26 * $DragonFly: src/sys/netinet/in_cksum.c,v 1.4 2004/02/14 21:12:39 dillon Exp $ 27 */ 28 29 #include <sys/param.h> 30 #include <sys/systm.h> 31 #include <sys/mbuf.h> 32 #include <sys/in_cksum.h> 33 34 #include <netinet/in.h> 35 #include <netinet/in_systm.h> 36 #include <netinet/ip.h> 37 38 #include <machine/endian.h> 39 40 /* 41 * Return the 16 bit 1's complement checksum in network byte order. Devolve 42 * the mbuf into 32 bit aligned segments that we can pass to assembly and 43 * do the rest manually. Even though we return a 16 bit unsigned value, 44 * we declare it as a 32 bit unsigned value to reduce unnecessary assembly 45 * conversions. 46 * 47 * Byte ordering issues. Note two things. First, no secondary carry occurs, 48 * and second, a one's complement checksum is endian-independant. If we are 49 * given a data buffer in network byte order, our checksum will be in network 50 * byte order. 51 * 52 * 0xffff + 0xffff = 0xfffe + C = 0xffff (so no second carry occurs). 53 * 54 * 0x8142 + 0x8243 = 0x0385 + C = 0x0386 (checksum is in same byte order 55 * 0x4281 + 0x4382 = 0x8603 as the data regardless of arch) 56 * 57 * This works with 16, 32, 64, etc... bits as long as we deal with the 58 * carry when collapsing it back down to 16 bits. 59 */ 60 __uint32_t 61 in_cksum_range(struct mbuf *m, int offset, int bytes) 62 { 63 __uint8_t *ptr; 64 __uint32_t sum0; 65 __uint32_t sum1; 66 int n; 67 int flip; 68 69 /* 70 * Skip fully engulfed mbufs. Branch predict optimal. 71 */ 72 while (m && offset >= m->m_len) { 73 offset -= m->m_len; 74 m = m->m_next; 75 } 76 77 /* 78 * Process the checksum for each segment. Note that the code below is 79 * branch-predict optimal, so it's faster then you might otherwise 80 * believe. When we are buffer-aligned but also odd-byte-aligned from 81 * the point of view of the IP packet, we accumulate to sum1 instead of 82 * sum0. 83 * 84 * Initial offsets do not pre-set flip (assert that offset is even?) 85 */ 86 sum0 = 0; 87 sum1 = 0; 88 flip = 0; 89 while (bytes > 0 && m) { 90 /* 91 * Calculate pointer base and number of bytes to snarf, account 92 * for snarfed bytes. 93 */ 94 ptr = mtod(m, __uint8_t *) + offset; 95 if ((n = m->m_len - offset) > bytes) 96 n = bytes; 97 bytes -= n; 98 99 /* 100 * First 16-bit-align our buffer by eating a byte if necessary, 101 * then 32-bit-align our buffer by eating a word if necessary. 102 * 103 * We are endian-sensitive when chomping a byte. WARNING! Be 104 * careful optimizing this! 16 ane 32 bit words must be aligned 105 * for this to be generic code. 106 */ 107 if (((intptr_t)ptr & 1) && n) { 108 #if BYTE_ORDER == LITTLE_ENDIAN 109 if (flip) 110 sum1 += ptr[0]; 111 else 112 sum0 += ptr[0]; 113 #else 114 if (flip) 115 sum0 += ptr[0]; 116 else 117 sum1 += ptr[0]; 118 #endif 119 ++ptr; 120 --n; 121 flip = 1 - flip; 122 } 123 if (((intptr_t)ptr & 2) && n > 1) { 124 if (flip) 125 sum1 += *(__uint16_t *)ptr; 126 else 127 sum0 += *(__uint16_t *)ptr; 128 ptr += 2; 129 n -= 2; 130 } 131 132 /* 133 * Process a 32-bit aligned data buffer and accumulate the result 134 * in sum0 or sum1. Allow only one 16 bit overflow carry. 135 */ 136 if (n >= 4) { 137 __uint32_t sum32; 138 139 sum32 = asm_ones32((void *)ptr, n >> 2); 140 sum32 = (sum32 >> 16) + (sum32 & 0xffff); 141 if (flip) 142 sum1 += sum32; 143 else 144 sum0 += sum32; 145 ptr += n & ~3; 146 /* n &= 3; dontcare */ 147 } 148 149 /* 150 * Handle oddly-sized buffers. Handle word issues first while 151 * ptr is still aligned. 152 */ 153 if (n & 2) { 154 if (flip) 155 sum1 += *(__uint16_t *)ptr; 156 else 157 sum0 += *(__uint16_t *)ptr; 158 ptr += 2; 159 /* n -= 2; dontcare */ 160 } 161 if (n & 1) { 162 #if BYTE_ORDER == LITTLE_ENDIAN 163 if (flip) 164 sum1 += ptr[0]; 165 else 166 sum0 += ptr[0]; 167 #else 168 if (flip) 169 sum0 += ptr[0]; 170 else 171 sum1 += ptr[0]; 172 #endif 173 /* ++ptr; dontcare */ 174 /* --n; dontcare */ 175 flip = 1 - flip; 176 } 177 m = m->m_next; 178 offset = 0; 179 } 180 181 /* 182 * Due to byte aligned or oddly-sized buffers we may have a checksum 183 * in sum1 which needs to be shifted and added to our main sum. There 184 * is a presumption here that no more then 255 overflows occured which 185 * is 255/3 byte aligned mbufs in the worst case. 186 */ 187 sum0 += sum1 << 8; 188 sum0 = (sum0 >> 16) + (sum0 & 0xffff); 189 if (sum0 > 0xffff) 190 ++sum0; 191 return(~sum0 & 0xffff); 192 } 193 194