xref: /linux/lib/checksum.c (revision 6e41c585)
1*2874c5fdSThomas Gleixner // SPDX-License-Identifier: GPL-2.0-or-later
226a28fa4SArnd Bergmann /*
326a28fa4SArnd Bergmann  *
426a28fa4SArnd Bergmann  * INET		An implementation of the TCP/IP protocol suite for the LINUX
526a28fa4SArnd Bergmann  *		operating system.  INET is implemented using the  BSD Socket
626a28fa4SArnd Bergmann  *		interface as the means of communication with the user level.
726a28fa4SArnd Bergmann  *
826a28fa4SArnd Bergmann  *		IP/TCP/UDP checksumming routines
926a28fa4SArnd Bergmann  *
1026a28fa4SArnd Bergmann  * Authors:	Jorge Cwik, <jorge@laser.satlink.net>
1126a28fa4SArnd Bergmann  *		Arnt Gulbrandsen, <agulbra@nvg.unit.no>
1226a28fa4SArnd Bergmann  *		Tom May, <ftom@netcom.com>
1326a28fa4SArnd Bergmann  *		Andreas Schwab, <schwab@issan.informatik.uni-dortmund.de>
1426a28fa4SArnd Bergmann  *		Lots of code moved from tcp.c and ip.c; see those files
1526a28fa4SArnd Bergmann  *		for more names.
1626a28fa4SArnd Bergmann  *
1726a28fa4SArnd Bergmann  * 03/02/96	Jes Sorensen, Andreas Schwab, Roman Hodek:
1826a28fa4SArnd Bergmann  *		Fixed some nasty bugs, causing some horrible crashes.
1926a28fa4SArnd Bergmann  *		A: At some points, the sum (%0) was used as
2026a28fa4SArnd Bergmann  *		length-counter instead of the length counter
2126a28fa4SArnd Bergmann  *		(%1). Thanks to Roman Hodek for pointing this out.
2226a28fa4SArnd Bergmann  *		B: GCC seems to mess up if one uses too many
2326a28fa4SArnd Bergmann  *		data-registers to hold input values and one tries to
2426a28fa4SArnd Bergmann  *		specify d0 and d1 as scratch registers. Letting gcc
2526a28fa4SArnd Bergmann  *		choose these registers itself solves the problem.
2626a28fa4SArnd Bergmann  */
2726a28fa4SArnd Bergmann 
2826a28fa4SArnd Bergmann /* Revised by Kenneth Albanowski for m68knommu. Basic problem: unaligned access
2926a28fa4SArnd Bergmann  kills, so most of the assembly has to go. */
3026a28fa4SArnd Bergmann 
318bc3bcc9SPaul Gortmaker #include <linux/export.h>
3226a28fa4SArnd Bergmann #include <net/checksum.h>
3326a28fa4SArnd Bergmann 
3426a28fa4SArnd Bergmann #include <asm/byteorder.h>
3526a28fa4SArnd Bergmann 
3620c1f641SArnd Bergmann #ifndef do_csum
from32to16(unsigned int x)37c44ba9f6SArnd Bergmann static inline unsigned short from32to16(unsigned int x)
3826a28fa4SArnd Bergmann {
3926a28fa4SArnd Bergmann 	/* add up 16-bit and 16-bit for 16+c bit */
4026a28fa4SArnd Bergmann 	x = (x & 0xffff) + (x >> 16);
4126a28fa4SArnd Bergmann 	/* add up carry.. */
4226a28fa4SArnd Bergmann 	x = (x & 0xffff) + (x >> 16);
4326a28fa4SArnd Bergmann 	return x;
4426a28fa4SArnd Bergmann }
4526a28fa4SArnd Bergmann 
do_csum(const unsigned char * buff,int len)4626a28fa4SArnd Bergmann static unsigned int do_csum(const unsigned char *buff, int len)
4726a28fa4SArnd Bergmann {
48be0e1e78SIan Abbott 	int odd;
49c44ba9f6SArnd Bergmann 	unsigned int result = 0;
5026a28fa4SArnd Bergmann 
5126a28fa4SArnd Bergmann 	if (len <= 0)
5226a28fa4SArnd Bergmann 		goto out;
5326a28fa4SArnd Bergmann 	odd = 1 & (unsigned long) buff;
5426a28fa4SArnd Bergmann 	if (odd) {
5532a9ff9cSArnd Bergmann #ifdef __LITTLE_ENDIAN
5632a9ff9cSArnd Bergmann 		result += (*buff << 8);
570a5549edSArnd Bergmann #else
580a5549edSArnd Bergmann 		result = *buff;
5932a9ff9cSArnd Bergmann #endif
6026a28fa4SArnd Bergmann 		len--;
6126a28fa4SArnd Bergmann 		buff++;
6226a28fa4SArnd Bergmann 	}
63be0e1e78SIan Abbott 	if (len >= 2) {
6426a28fa4SArnd Bergmann 		if (2 & (unsigned long) buff) {
6526a28fa4SArnd Bergmann 			result += *(unsigned short *) buff;
6626a28fa4SArnd Bergmann 			len -= 2;
6726a28fa4SArnd Bergmann 			buff += 2;
6826a28fa4SArnd Bergmann 		}
69be0e1e78SIan Abbott 		if (len >= 4) {
70be0e1e78SIan Abbott 			const unsigned char *end = buff + ((unsigned)len & ~3);
71c44ba9f6SArnd Bergmann 			unsigned int carry = 0;
7226a28fa4SArnd Bergmann 			do {
73c44ba9f6SArnd Bergmann 				unsigned int w = *(unsigned int *) buff;
7426a28fa4SArnd Bergmann 				buff += 4;
7526a28fa4SArnd Bergmann 				result += carry;
7626a28fa4SArnd Bergmann 				result += w;
7726a28fa4SArnd Bergmann 				carry = (w > result);
78be0e1e78SIan Abbott 			} while (buff < end);
7926a28fa4SArnd Bergmann 			result += carry;
8026a28fa4SArnd Bergmann 			result = (result & 0xffff) + (result >> 16);
8126a28fa4SArnd Bergmann 		}
8226a28fa4SArnd Bergmann 		if (len & 2) {
8326a28fa4SArnd Bergmann 			result += *(unsigned short *) buff;
8426a28fa4SArnd Bergmann 			buff += 2;
8526a28fa4SArnd Bergmann 		}
8626a28fa4SArnd Bergmann 	}
8726a28fa4SArnd Bergmann 	if (len & 1)
8832a9ff9cSArnd Bergmann #ifdef __LITTLE_ENDIAN
8932a9ff9cSArnd Bergmann 		result += *buff;
9032a9ff9cSArnd Bergmann #else
9126a28fa4SArnd Bergmann 		result += (*buff << 8);
9232a9ff9cSArnd Bergmann #endif
9326a28fa4SArnd Bergmann 	result = from32to16(result);
9426a28fa4SArnd Bergmann 	if (odd)
9526a28fa4SArnd Bergmann 		result = ((result >> 8) & 0xff) | ((result & 0xff) << 8);
9626a28fa4SArnd Bergmann out:
9726a28fa4SArnd Bergmann 	return result;
9826a28fa4SArnd Bergmann }
9920c1f641SArnd Bergmann #endif
10026a28fa4SArnd Bergmann 
10164e69073SVineet Gupta #ifndef ip_fast_csum
10226a28fa4SArnd Bergmann /*
10326a28fa4SArnd Bergmann  *	This is a version of ip_compute_csum() optimized for IP headers,
10426a28fa4SArnd Bergmann  *	which always checksum on 4 octet boundaries.
10526a28fa4SArnd Bergmann  */
ip_fast_csum(const void * iph,unsigned int ihl)10626a28fa4SArnd Bergmann __sum16 ip_fast_csum(const void *iph, unsigned int ihl)
10726a28fa4SArnd Bergmann {
10826a28fa4SArnd Bergmann 	return (__force __sum16)~do_csum(iph, ihl*4);
10926a28fa4SArnd Bergmann }
11026a28fa4SArnd Bergmann EXPORT_SYMBOL(ip_fast_csum);
11164e69073SVineet Gupta #endif
11226a28fa4SArnd Bergmann 
11326a28fa4SArnd Bergmann /*
11426a28fa4SArnd Bergmann  * computes the checksum of a memory block at buff, length len,
11526a28fa4SArnd Bergmann  * and adds in "sum" (32-bit)
11626a28fa4SArnd Bergmann  *
11726a28fa4SArnd Bergmann  * returns a 32-bit number suitable for feeding into itself
11826a28fa4SArnd Bergmann  * or csum_tcpudp_magic
11926a28fa4SArnd Bergmann  *
12026a28fa4SArnd Bergmann  * this function must be called with even lengths, except
12126a28fa4SArnd Bergmann  * for the last fragment, which may be odd
12226a28fa4SArnd Bergmann  *
12326a28fa4SArnd Bergmann  * it's best to have buff aligned on a 32-bit boundary
12426a28fa4SArnd Bergmann  */
csum_partial(const void * buff,int len,__wsum wsum)12526a28fa4SArnd Bergmann __wsum csum_partial(const void *buff, int len, __wsum wsum)
12626a28fa4SArnd Bergmann {
12726a28fa4SArnd Bergmann 	unsigned int sum = (__force unsigned int)wsum;
12826a28fa4SArnd Bergmann 	unsigned int result = do_csum(buff, len);
12926a28fa4SArnd Bergmann 
13026a28fa4SArnd Bergmann 	/* add in old sum, and carry.. */
13126a28fa4SArnd Bergmann 	result += sum;
13226a28fa4SArnd Bergmann 	if (sum > result)
13326a28fa4SArnd Bergmann 		result += 1;
13426a28fa4SArnd Bergmann 	return (__force __wsum)result;
13526a28fa4SArnd Bergmann }
13626a28fa4SArnd Bergmann EXPORT_SYMBOL(csum_partial);
13726a28fa4SArnd Bergmann 
13826a28fa4SArnd Bergmann /*
13926a28fa4SArnd Bergmann  * this routine is used for miscellaneous IP-like checksums, mainly
14026a28fa4SArnd Bergmann  * in icmp.c
14126a28fa4SArnd Bergmann  */
ip_compute_csum(const void * buff,int len)14226a28fa4SArnd Bergmann __sum16 ip_compute_csum(const void *buff, int len)
14326a28fa4SArnd Bergmann {
14426a28fa4SArnd Bergmann 	return (__force __sum16)~do_csum(buff, len);
14526a28fa4SArnd Bergmann }
14626a28fa4SArnd Bergmann EXPORT_SYMBOL(ip_compute_csum);
14726a28fa4SArnd Bergmann 
14826a28fa4SArnd Bergmann #ifndef csum_tcpudp_nofold
from64to32(u64 x)1499ce35779Skarl beldan static inline u32 from64to32(u64 x)
1509ce35779Skarl beldan {
1519ce35779Skarl beldan 	/* add up 32-bit and 32-bit for 32+c bit */
1529ce35779Skarl beldan 	x = (x & 0xffffffff) + (x >> 32);
1539ce35779Skarl beldan 	/* add up carry.. */
1549ce35779Skarl beldan 	x = (x & 0xffffffff) + (x >> 32);
1559ce35779Skarl beldan 	return (u32)x;
1569ce35779Skarl beldan }
1579ce35779Skarl beldan 
csum_tcpudp_nofold(__be32 saddr,__be32 daddr,__u32 len,__u8 proto,__wsum sum)15826a28fa4SArnd Bergmann __wsum csum_tcpudp_nofold(__be32 saddr, __be32 daddr,
15901cfbad7SAlexander Duyck 			  __u32 len, __u8 proto, __wsum sum)
16026a28fa4SArnd Bergmann {
16126a28fa4SArnd Bergmann 	unsigned long long s = (__force u32)sum;
16226a28fa4SArnd Bergmann 
16326a28fa4SArnd Bergmann 	s += (__force u32)saddr;
16426a28fa4SArnd Bergmann 	s += (__force u32)daddr;
16526a28fa4SArnd Bergmann #ifdef __BIG_ENDIAN
16626a28fa4SArnd Bergmann 	s += proto + len;
16726a28fa4SArnd Bergmann #else
16826a28fa4SArnd Bergmann 	s += (proto + len) << 8;
16926a28fa4SArnd Bergmann #endif
170150ae0e9Skarl beldan 	return (__force __wsum)from64to32(s);
17126a28fa4SArnd Bergmann }
17226a28fa4SArnd Bergmann EXPORT_SYMBOL(csum_tcpudp_nofold);
17326a28fa4SArnd Bergmann #endif
174