1 /*
2  * include/asm-xtensa/checksum.h
3  *
4  * This file is subject to the terms and conditions of the GNU General Public
5  * License.  See the file "COPYING" in the main directory of this archive
6  * for more details.
7  *
8  * Copyright (C) 2001 - 2005 Tensilica Inc.
9  */
10 
11 #ifndef _XTENSA_CHECKSUM_H
12 #define _XTENSA_CHECKSUM_H
13 
14 #include <linux/in6.h>
15 #include <linux/uaccess.h>
16 #include <asm/core.h>
17 
18 /*
19  * computes the checksum of a memory block at buff, length len,
20  * and adds in "sum" (32-bit)
21  *
22  * returns a 32-bit number suitable for feeding into itself
23  * or csum_tcpudp_magic
24  *
25  * this function must be called with even lengths, except
26  * for the last fragment, which may be odd
27  *
28  * it's best to have buff aligned on a 32-bit boundary
29  */
30 asmlinkage __wsum csum_partial(const void *buff, int len, __wsum sum);
31 
32 /*
33  * the same as csum_partial, but copies from src while it
34  * checksums, and handles user-space pointer exceptions correctly, when needed.
35  *
36  * here even more important to align src and dst on a 32-bit (or even
37  * better 64-bit) boundary
38  */
39 
40 asmlinkage __wsum csum_partial_copy_generic(const void *src, void *dst, int len);
41 
42 #define _HAVE_ARCH_CSUM_AND_COPY
43 /*
44  *	Note: when you get a NULL pointer exception here this means someone
45  *	passed in an incorrect kernel address to one of these functions.
46  */
47 static inline
csum_partial_copy_nocheck(const void * src,void * dst,int len)48 __wsum csum_partial_copy_nocheck(const void *src, void *dst, int len)
49 {
50 	return csum_partial_copy_generic(src, dst, len);
51 }
52 
53 #define _HAVE_ARCH_COPY_AND_CSUM_FROM_USER
54 static inline
csum_and_copy_from_user(const void __user * src,void * dst,int len)55 __wsum csum_and_copy_from_user(const void __user *src, void *dst,
56 				   int len)
57 {
58 	if (!access_ok(src, len))
59 		return 0;
60 	return csum_partial_copy_generic((__force const void *)src, dst, len);
61 }
62 
63 /*
64  *	Fold a partial checksum
65  */
66 
csum_fold(__wsum sum)67 static __inline__ __sum16 csum_fold(__wsum sum)
68 {
69 	unsigned int __dummy;
70 	__asm__("extui	%1, %0, 16, 16\n\t"
71 		"extui	%0 ,%0, 0, 16\n\t"
72 		"add	%0, %0, %1\n\t"
73 		"slli	%1, %0, 16\n\t"
74 		"add	%0, %0, %1\n\t"
75 		"extui	%0, %0, 16, 16\n\t"
76 		"neg	%0, %0\n\t"
77 		"addi	%0, %0, -1\n\t"
78 		"extui	%0, %0, 0, 16\n\t"
79 		: "=r" (sum), "=&r" (__dummy)
80 		: "0" (sum));
81 	return (__force __sum16)sum;
82 }
83 
84 /*
85  *	This is a version of ip_compute_csum() optimized for IP headers,
86  *	which always checksum on 4 octet boundaries.
87  */
ip_fast_csum(const void * iph,unsigned int ihl)88 static __inline__ __sum16 ip_fast_csum(const void *iph, unsigned int ihl)
89 {
90 	unsigned int sum, tmp, endaddr;
91 
92 	__asm__ __volatile__(
93 		"sub		%0, %0, %0\n\t"
94 #if XCHAL_HAVE_LOOPS
95 		"loopgtz	%2, 2f\n\t"
96 #else
97 		"beqz		%2, 2f\n\t"
98 		"slli		%4, %2, 2\n\t"
99 		"add		%4, %4, %1\n\t"
100 		"0:\t"
101 #endif
102 		"l32i		%3, %1, 0\n\t"
103 		"add		%0, %0, %3\n\t"
104 		"bgeu		%0, %3, 1f\n\t"
105 		"addi		%0, %0, 1\n\t"
106 		"1:\t"
107 		"addi		%1, %1, 4\n\t"
108 #if !XCHAL_HAVE_LOOPS
109 		"blt		%1, %4, 0b\n\t"
110 #endif
111 		"2:\t"
112 	/* Since the input registers which are loaded with iph and ihl
113 	   are modified, we must also specify them as outputs, or gcc
114 	   will assume they contain their original values. */
115 		: "=r" (sum), "=r" (iph), "=r" (ihl), "=&r" (tmp),
116 		  "=&r" (endaddr)
117 		: "1" (iph), "2" (ihl)
118 		: "memory");
119 
120 	return	csum_fold(sum);
121 }
122 
csum_tcpudp_nofold(__be32 saddr,__be32 daddr,__u32 len,__u8 proto,__wsum sum)123 static __inline__ __wsum csum_tcpudp_nofold(__be32 saddr, __be32 daddr,
124 					    __u32 len, __u8 proto,
125 					    __wsum sum)
126 {
127 
128 #ifdef __XTENSA_EL__
129 	unsigned long len_proto = (len + proto) << 8;
130 #elif defined(__XTENSA_EB__)
131 	unsigned long len_proto = len + proto;
132 #else
133 # error processor byte order undefined!
134 #endif
135 	__asm__("add	%0, %0, %1\n\t"
136 		"bgeu	%0, %1, 1f\n\t"
137 		"addi	%0, %0, 1\n\t"
138 		"1:\t"
139 		"add	%0, %0, %2\n\t"
140 		"bgeu	%0, %2, 1f\n\t"
141 		"addi	%0, %0, 1\n\t"
142 		"1:\t"
143 		"add	%0, %0, %3\n\t"
144 		"bgeu	%0, %3, 1f\n\t"
145 		"addi	%0, %0, 1\n\t"
146 		"1:\t"
147 		: "=r" (sum), "=r" (len_proto)
148 		: "r" (daddr), "r" (saddr), "1" (len_proto), "0" (sum));
149 	return sum;
150 }
151 
152 /*
153  * computes the checksum of the TCP/UDP pseudo-header
154  * returns a 16-bit checksum, already complemented
155  */
csum_tcpudp_magic(__be32 saddr,__be32 daddr,__u32 len,__u8 proto,__wsum sum)156 static __inline__ __sum16 csum_tcpudp_magic(__be32 saddr, __be32 daddr,
157 					    __u32 len, __u8 proto,
158 					    __wsum sum)
159 {
160 	return csum_fold(csum_tcpudp_nofold(saddr,daddr,len,proto,sum));
161 }
162 
163 /*
164  * this routine is used for miscellaneous IP-like checksums, mainly
165  * in icmp.c
166  */
167 
ip_compute_csum(const void * buff,int len)168 static __inline__ __sum16 ip_compute_csum(const void *buff, int len)
169 {
170 	return csum_fold (csum_partial(buff, len, 0));
171 }
172 
173 #define _HAVE_ARCH_IPV6_CSUM
csum_ipv6_magic(const struct in6_addr * saddr,const struct in6_addr * daddr,__u32 len,__u8 proto,__wsum sum)174 static __inline__ __sum16 csum_ipv6_magic(const struct in6_addr *saddr,
175 					  const struct in6_addr *daddr,
176 					  __u32 len, __u8 proto,
177 					  __wsum sum)
178 {
179 	unsigned int __dummy;
180 	__asm__("l32i	%1, %2, 0\n\t"
181 		"add	%0, %0, %1\n\t"
182 		"bgeu	%0, %1, 1f\n\t"
183 		"addi	%0, %0, 1\n\t"
184 		"1:\t"
185 		"l32i	%1, %2, 4\n\t"
186 		"add	%0, %0, %1\n\t"
187 		"bgeu	%0, %1, 1f\n\t"
188 		"addi	%0, %0, 1\n\t"
189 		"1:\t"
190 		"l32i	%1, %2, 8\n\t"
191 		"add	%0, %0, %1\n\t"
192 		"bgeu	%0, %1, 1f\n\t"
193 		"addi	%0, %0, 1\n\t"
194 		"1:\t"
195 		"l32i	%1, %2, 12\n\t"
196 		"add	%0, %0, %1\n\t"
197 		"bgeu	%0, %1, 1f\n\t"
198 		"addi	%0, %0, 1\n\t"
199 		"1:\t"
200 		"l32i	%1, %3, 0\n\t"
201 		"add	%0, %0, %1\n\t"
202 		"bgeu	%0, %1, 1f\n\t"
203 		"addi	%0, %0, 1\n\t"
204 		"1:\t"
205 		"l32i	%1, %3, 4\n\t"
206 		"add	%0, %0, %1\n\t"
207 		"bgeu	%0, %1, 1f\n\t"
208 		"addi	%0, %0, 1\n\t"
209 		"1:\t"
210 		"l32i	%1, %3, 8\n\t"
211 		"add	%0, %0, %1\n\t"
212 		"bgeu	%0, %1, 1f\n\t"
213 		"addi	%0, %0, 1\n\t"
214 		"1:\t"
215 		"l32i	%1, %3, 12\n\t"
216 		"add	%0, %0, %1\n\t"
217 		"bgeu	%0, %1, 1f\n\t"
218 		"addi	%0, %0, 1\n\t"
219 		"1:\t"
220 		"add	%0, %0, %4\n\t"
221 		"bgeu	%0, %4, 1f\n\t"
222 		"addi	%0, %0, 1\n\t"
223 		"1:\t"
224 		"add	%0, %0, %5\n\t"
225 		"bgeu	%0, %5, 1f\n\t"
226 		"addi	%0, %0, 1\n\t"
227 		"1:\t"
228 		: "=r" (sum), "=&r" (__dummy)
229 		: "r" (saddr), "r" (daddr),
230 		  "r" (htonl(len)), "r" (htonl(proto)), "0" (sum)
231 		: "memory");
232 
233 	return csum_fold(sum);
234 }
235 
236 /*
237  *	Copy and checksum to user
238  */
239 #define HAVE_CSUM_COPY_USER
csum_and_copy_to_user(const void * src,void __user * dst,int len)240 static __inline__ __wsum csum_and_copy_to_user(const void *src,
241 					       void __user *dst, int len)
242 {
243 	if (!access_ok(dst, len))
244 		return 0;
245 	return csum_partial_copy_generic(src, (__force void *)dst, len);
246 }
247 #endif
248