xref: /freebsd/sys/i386/i386/in_cksum_machdep.c (revision 9768746b)
1 /*-
2  * SPDX-License-Identifier: BSD-3-Clause
3  *
4  * Copyright (c) 1990 The Regents of the University of California.
5  * All rights reserved.
6  *
7  * Redistribution and use in source and binary forms, with or without
8  * modification, are permitted provided that the following conditions
9  * are met:
10  * 1. Redistributions of source code must retain the above copyright
11  *    notice, this list of conditions and the following disclaimer.
12  * 2. Redistributions in binary form must reproduce the above copyright
13  *    notice, this list of conditions and the following disclaimer in the
14  *    documentation and/or other materials provided with the distribution.
15  * 3. Neither the name of the University nor the names of its contributors
16  *    may be used to endorse or promote products derived from this software
17  *    without specific prior written permission.
18  *
19  * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND
20  * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
21  * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
22  * ARE DISCLAIMED.  IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE
23  * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
24  * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
25  * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
26  * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
27  * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
28  * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
29  * SUCH DAMAGE.
30  *
31  *	from tahoe:	in_cksum.c	1.2	86/01/05
32  *	from:		@(#)in_cksum.c	1.3 (Berkeley) 1/19/91
33  */
34 
35 #include <sys/cdefs.h>
36 __FBSDID("$FreeBSD$");
37 
38 #include <sys/param.h>
39 #include <sys/systm.h>
40 #include <sys/mbuf.h>
41 
42 #include <netinet/in.h>
43 #include <netinet/in_systm.h>
44 #include <netinet/ip.h>
45 
46 #include <machine/in_cksum.h>
47 
48 /*
49  * Checksum routine for Internet Protocol family headers.
50  *
51  * This routine is very heavily used in the network
52  * code and should be modified for each CPU to be as fast as possible.
53  *
54  * This implementation is 386 version.
55  */
56 
57 #undef	ADDCARRY
58 #define ADDCARRY(x)     if ((x) > 0xffff) (x) -= 0xffff
59 #define REDUCE          {sum = (sum & 0xffff) + (sum >> 16); ADDCARRY(sum);}
60 
61 /*
62  * These asm statements require __volatile because they pass information
63  * via the condition codes.  GCC does not currently provide a way to specify
64  * the condition codes as an input or output operand.
65  *
66  * The LOAD macro below is effectively a prefetch into cache.  GCC will
67  * load the value into a register but will not use it.  Since modern CPUs
68  * reorder operations, this will generally take place in parallel with
69  * other calculations.
70  */
71 u_short
72 in_cksum_skip(m, len, skip)
73 	struct mbuf *m;
74 	int len;
75 	int skip;
76 {
77 	u_short *w;
78 	unsigned sum = 0;
79 	int mlen = 0;
80 	int byte_swapped = 0;
81 	union { char	c[2]; u_short	s; } su;
82 
83 	len -= skip;
84 	for (; skip && m; m = m->m_next) {
85 		if (m->m_len > skip) {
86 			mlen = m->m_len - skip;
87 			w = (u_short *)(mtod(m, u_char *) + skip);
88 			goto skip_start;
89 		} else {
90 			skip -= m->m_len;
91 		}
92 	}
93 
94 	for (;m && len; m = m->m_next) {
95 		if (m->m_len == 0)
96 			continue;
97 		w = mtod(m, u_short *);
98 		if (mlen == -1) {
99 			/*
100 			 * The first byte of this mbuf is the continuation
101 			 * of a word spanning between this mbuf and the
102 			 * last mbuf.
103 			 */
104 
105 			/* su.c[0] is already saved when scanning previous
106 			 * mbuf.  sum was REDUCEd when we found mlen == -1
107 			 */
108 			su.c[1] = *(u_char *)w;
109 			sum += su.s;
110 			w = (u_short *)((char *)w + 1);
111 			mlen = m->m_len - 1;
112 			len--;
113 		} else
114 			mlen = m->m_len;
115 skip_start:
116 		if (len < mlen)
117 			mlen = len;
118 		len -= mlen;
119 		/*
120 		 * Force to long boundary so we do longword aligned
121 		 * memory operations
122 		 */
123 		if (3 & (int) w) {
124 			REDUCE;
125 			if ((1 & (int) w) && (mlen > 0)) {
126 				sum <<= 8;
127 				su.c[0] = *(char *)w;
128 				w = (u_short *)((char *)w + 1);
129 				mlen--;
130 				byte_swapped = 1;
131 			}
132 			if ((2 & (int) w) && (mlen >= 2)) {
133 				sum += *w++;
134 				mlen -= 2;
135 			}
136 		}
137 		/*
138 		 * Advance to a 486 cache line boundary.
139 		 */
140 		if (4 & (int) w && mlen >= 4) {
141 			__asm __volatile (
142 				"addl %1, %0\n"
143 				"adcl $0, %0"
144 				: "+r" (sum)
145 				: "g" (((const u_int32_t *)w)[0])
146 			);
147 			w += 2;
148 			mlen -= 4;
149 		}
150 		if (8 & (int) w && mlen >= 8) {
151 			__asm __volatile (
152 				"addl %1, %0\n"
153 				"adcl %2, %0\n"
154 				"adcl $0, %0"
155 				: "+r" (sum)
156 				: "g" (((const u_int32_t *)w)[0]),
157 				  "g" (((const u_int32_t *)w)[1])
158 			);
159 			w += 4;
160 			mlen -= 8;
161 		}
162 		/*
163 		 * Do as much of the checksum as possible 32 bits at at time.
164 		 * In fact, this loop is unrolled to make overhead from
165 		 * branches &c small.
166 		 */
167 		mlen -= 1;
168 		while ((mlen -= 32) >= 0) {
169 			/*
170 			 * Add with carry 16 words and fold in the last
171 			 * carry by adding a 0 with carry.
172 			 *
173 			 * The early ADD(16) and the LOAD(32) are to load
174 			 * the next 2 cache lines in advance on 486's.  The
175 			 * 486 has a penalty of 2 clock cycles for loading
176 			 * a cache line, plus whatever time the external
177 			 * memory takes to load the first word(s) addressed.
178 			 * These penalties are unavoidable.  Subsequent
179 			 * accesses to a cache line being loaded (and to
180 			 * other external memory?) are delayed until the
181 			 * whole load finishes.  These penalties are mostly
182 			 * avoided by not accessing external memory for
183 			 * 8 cycles after the ADD(16) and 12 cycles after
184 			 * the LOAD(32).  The loop terminates when mlen
185 			 * is initially 33 (not 32) to guaranteed that
186 			 * the LOAD(32) is within bounds.
187 			 */
188 			__asm __volatile (
189 				"addl %1, %0\n"
190 				"adcl %2, %0\n"
191 				"adcl %3, %0\n"
192 				"adcl %4, %0\n"
193 				"adcl %5, %0\n"
194 				"mov  %6, %%eax\n"
195 				"adcl %7, %0\n"
196 				"adcl %8, %0\n"
197 				"adcl %9, %0\n"
198 				"adcl $0, %0"
199 				: "+r" (sum)
200 				: "g" (((const u_int32_t *)w)[4]),
201 				  "g" (((const u_int32_t *)w)[0]),
202 				  "g" (((const u_int32_t *)w)[1]),
203 				  "g" (((const u_int32_t *)w)[2]),
204 				  "g" (((const u_int32_t *)w)[3]),
205 				  "g" (((const u_int32_t *)w)[8]),
206 				  "g" (((const u_int32_t *)w)[5]),
207 				  "g" (((const u_int32_t *)w)[6]),
208 				  "g" (((const u_int32_t *)w)[7])
209 				: "eax"
210 			);
211 			w += 16;
212 		}
213 		mlen += 32 + 1;
214 		if (mlen >= 32) {
215 			__asm __volatile (
216 				"addl %1, %0\n"
217 				"adcl %2, %0\n"
218 				"adcl %3, %0\n"
219 				"adcl %4, %0\n"
220 				"adcl %5, %0\n"
221 				"adcl %6, %0\n"
222 				"adcl %7, %0\n"
223 				"adcl %8, %0\n"
224 				"adcl $0, %0"
225 				: "+r" (sum)
226 				: "g" (((const u_int32_t *)w)[4]),
227 				  "g" (((const u_int32_t *)w)[0]),
228 				  "g" (((const u_int32_t *)w)[1]),
229 				  "g" (((const u_int32_t *)w)[2]),
230 				  "g" (((const u_int32_t *)w)[3]),
231 				  "g" (((const u_int32_t *)w)[5]),
232 				  "g" (((const u_int32_t *)w)[6]),
233 				  "g" (((const u_int32_t *)w)[7])
234 			);
235 			w += 16;
236 			mlen -= 32;
237 		}
238 		if (mlen >= 16) {
239 			__asm __volatile (
240 				"addl %1, %0\n"
241 				"adcl %2, %0\n"
242 				"adcl %3, %0\n"
243 				"adcl %4, %0\n"
244 				"adcl $0, %0"
245 				: "+r" (sum)
246 				: "g" (((const u_int32_t *)w)[0]),
247 				  "g" (((const u_int32_t *)w)[1]),
248 				  "g" (((const u_int32_t *)w)[2]),
249 				  "g" (((const u_int32_t *)w)[3])
250 			);
251 			w += 8;
252 			mlen -= 16;
253 		}
254 		if (mlen >= 8) {
255 			__asm __volatile (
256 				"addl %1, %0\n"
257 				"adcl %2, %0\n"
258 				"adcl $0, %0"
259 				: "+r" (sum)
260 				: "g" (((const u_int32_t *)w)[0]),
261 				  "g" (((const u_int32_t *)w)[1])
262 			);
263 			w += 4;
264 			mlen -= 8;
265 		}
266 		if (mlen == 0 && byte_swapped == 0)
267 			continue;       /* worth 1% maybe ?? */
268 		REDUCE;
269 		while ((mlen -= 2) >= 0) {
270 			sum += *w++;
271 		}
272 		if (byte_swapped) {
273 			sum <<= 8;
274 			byte_swapped = 0;
275 			if (mlen == -1) {
276 				su.c[1] = *(char *)w;
277 				sum += su.s;
278 				mlen = 0;
279 			} else
280 				mlen = -1;
281 		} else if (mlen == -1)
282 			/*
283 			 * This mbuf has odd number of bytes.
284 			 * There could be a word split between
285 			 * this mbuf and the next mbuf.
286 			 * Save the last byte (to prepend to next mbuf).
287 			 */
288 			su.c[0] = *(char *)w;
289 	}
290 
291 	if (len)
292 		printf("%s: out of data by %d\n", __func__, len);
293 	if (mlen == -1) {
294 		/* The last mbuf has odd # of bytes. Follow the
295 		   standard (the odd byte is shifted left by 8 bits) */
296 		su.c[1] = 0;
297 		sum += su.s;
298 	}
299 	REDUCE;
300 	return (~sum & 0xffff);
301 }
302