xref: /reactos/drivers/network/tcpip/include/linux.h (revision c2c66aff)
1 #pragma once
2 
3 #include <ntddk.h>
4 
5 #ifndef NULL
6 #define NULL (void*)0
7 #endif
8 
9 typedef struct page {
10   int x;
11 } mem_map_t;
12 
13 
14 
15 
16 
17 
18 
19 
20 
21 
22 
23 
24 
25 /* i386 */
26 
27 typedef unsigned short umode_t;
28 
29 /*
30  * __xx is ok: it doesn't pollute the POSIX namespace. Use these in the
31  * header files exported to user space
32  */
33 
34 typedef __signed__ char __s8;
35 typedef unsigned char __u8;
36 
37 typedef __signed__ short __s16;
38 typedef unsigned short __u16;
39 
40 typedef __signed__ int __s32;
41 typedef unsigned int __u32;
42 
43 #if defined(__GNUC__) && !defined(__STRICT_ANSI__)
44 typedef __signed__ long long __s64;
45 typedef unsigned long long __u64;
46 #endif
47 
48 /*
49  * These aren't exported outside the kernel to avoid name space clashes
50  */
51 typedef signed char s8;
52 typedef unsigned char u8;
53 
54 typedef signed short s16;
55 typedef unsigned short u16;
56 
57 typedef signed int s32;
58 typedef unsigned int u32;
59 
60 typedef signed long long s64;
61 typedef unsigned long long u64;
62 
63 #define BITS_PER_LONG 32
64 
65 /* DMA addresses come in generic and 64-bit flavours.  */
66 
67 #ifdef CONFIG_HIGHMEM64G
68 typedef u64 dma_addr_t;
69 #else
70 typedef u32 dma_addr_t;
71 #endif
72 typedef u64 dma64_addr_t;
73 
74 
75 
76 /*
77  * This allows for 1024 file descriptors: if NR_OPEN is ever grown
78  * beyond that you'll have to change this too. But 1024 fd's seem to be
79  * enough even for such "real" unices like OSF/1, so hopefully this is
80  * one limit that doesn't have to be changed [again].
81  *
82  * Note that POSIX wants the FD_CLEAR(fd,fdsetp) defines to be in
83  * <sys/time.h> (and thus <linux/time.h>) - but this is a more logical
84  * place for them. Solved by having dummy defines in <sys/time.h>.
85  */
86 
87 /*
88  * Those macros may have been defined in <gnu/types.h>. But we always
89  * use the ones here.
90  */
91 #undef __NFDBITS
92 #define __NFDBITS	(8 * sizeof(unsigned long))
93 
94 #undef __FD_SETSIZE
95 #define __FD_SETSIZE	1024
96 
97 #undef __FDSET_LONGS
98 #define __FDSET_LONGS	(__FD_SETSIZE/__NFDBITS)
99 
100 #undef __FDELT
101 #define	__FDELT(d)	((d) / __NFDBITS)
102 
103 #undef __FDMASK
104 #define	__FDMASK(d)	(1UL << ((d) % __NFDBITS))
105 
106 typedef struct {
107 	unsigned long fds_bits [__FDSET_LONGS];
108 } __kernel_fd_set;
109 
110 /* Type of a signal handler.  */
111 typedef void (*__kernel_sighandler_t)(int);
112 
113 /* Type of a SYSV IPC key.  */
114 typedef int __kernel_key_t;
115 
116 
117 /*
118  * This file is generally used by user-level software, so you need to
119  * be a little careful about namespace pollution etc.  Also, we cannot
120  * assume GCC is being used.
121  */
122 
123 typedef unsigned short	__kernel_dev_t;
124 typedef unsigned long	__kernel_ino_t;
125 typedef unsigned short	__kernel_mode_t;
126 typedef unsigned short	__kernel_nlink_t;
127 typedef long		__kernel_off_t;
128 typedef int		__kernel_pid_t;
129 typedef unsigned short	__kernel_ipc_pid_t;
130 typedef unsigned short	__kernel_uid_t;
131 typedef unsigned short	__kernel_gid_t;
132 typedef unsigned int	__kernel_size_t;
133 typedef int		__kernel_ssize_t;
134 typedef int		__kernel_ptrdiff_t;
135 typedef long		__kernel_time_t;
136 typedef long		__kernel_suseconds_t;
137 typedef long		__kernel_clock_t;
138 typedef int		__kernel_daddr_t;
139 typedef char *		__kernel_caddr_t;
140 typedef unsigned short	__kernel_uid16_t;
141 typedef unsigned short	__kernel_gid16_t;
142 typedef unsigned int	__kernel_uid32_t;
143 typedef unsigned int	__kernel_gid32_t;
144 
145 typedef unsigned short	__kernel_old_uid_t;
146 typedef unsigned short	__kernel_old_gid_t;
147 
148 #ifdef __GNUC__
149 typedef long long	__kernel_loff_t;
150 #endif
151 
152 typedef struct {
153 #if defined(__KERNEL__) || defined(__USE_ALL)
154 	int	val[2];
155 #else /* !defined(__KERNEL__) && !defined(__USE_ALL) */
156 	int	__val[2];
157 #endif /* !defined(__KERNEL__) && !defined(__USE_ALL) */
158 } __kernel_fsid_t;
159 
160 #if defined(__KERNEL__) || !defined(__GLIBC__) || (__GLIBC__ < 2)
161 
162 #undef	__FD_SET
163 #define __FD_SET(fd,fdsetp) \
164 		__asm__ __volatile__("btsl %1,%0": \
165 			"=m" (*(__kernel_fd_set *) (fdsetp)):"r" ((int) (fd)))
166 
167 #undef	__FD_CLR
168 #define __FD_CLR(fd,fdsetp) \
169 		__asm__ __volatile__("btrl %1,%0": \
170 			"=m" (*(__kernel_fd_set *) (fdsetp)):"r" ((int) (fd)))
171 
172 #undef	__FD_ISSET
173 #define __FD_ISSET(fd,fdsetp) (__extension__ ({ \
174 		unsigned char __result; \
175 		__asm__ __volatile__("btl %1,%2 ; setb %0" \
176 			:"=q" (__result) :"r" ((int) (fd)), \
177 			"m" (*(__kernel_fd_set *) (fdsetp))); \
178 		__result; }))
179 
180 #undef	__FD_ZERO
181 #define __FD_ZERO(fdsetp) \
182 do { \
183 	int __d0, __d1; \
184 	__asm__ __volatile__("cld ; rep ; stosl" \
185 			:"=m" (*(__kernel_fd_set *) (fdsetp)), \
186 			  "=&c" (__d0), "=&D" (__d1) \
187 			:"a" (0), "1" (__FDSET_LONGS), \
188 			"2" ((__kernel_fd_set *) (fdsetp)) : "memory"); \
189 } while (0)
190 
191 #endif /* defined(__KERNEL__) || !defined(__GLIBC__) || (__GLIBC__ < 2) */
192 
193 
194 #ifndef __KERNEL_STRICT_NAMES
195 
196 typedef __kernel_fd_set		fd_set;
197 typedef __kernel_dev_t		dev_t;
198 typedef __kernel_ino_t		ino_t;
199 typedef __kernel_mode_t		mode_t;
200 typedef __kernel_nlink_t	nlink_t;
201 typedef __kernel_off_t		off_t;
202 typedef __kernel_pid_t		pid_t;
203 typedef __kernel_daddr_t	daddr_t;
204 typedef __kernel_key_t		key_t;
205 typedef __kernel_suseconds_t	suseconds_t;
206 
207 #ifdef __KERNEL__
208 typedef __kernel_uid32_t	uid_t;
209 typedef __kernel_gid32_t	gid_t;
210 typedef __kernel_uid16_t        uid16_t;
211 typedef __kernel_gid16_t        gid16_t;
212 
213 #ifdef CONFIG_UID16
214 /* This is defined by include/asm-{arch}/posix_types.h */
215 typedef __kernel_old_uid_t	old_uid_t;
216 typedef __kernel_old_gid_t	old_gid_t;
217 #endif /* CONFIG_UID16 */
218 
219 /* libc5 includes this file to define uid_t, thus uid_t can never change
220  * when it is included by non-kernel code
221  */
222 #else
223 typedef __kernel_uid_t		uid_t;
224 typedef __kernel_gid_t		gid_t;
225 #endif /* __KERNEL__ */
226 
227 #if defined(__GNUC__)
228 typedef __kernel_loff_t		loff_t;
229 #endif
230 
231 /*
232  * The following typedefs are also protected by individual ifdefs for
233  * historical reasons:
234  */
235 #ifndef _SIZE_T
236 #define _SIZE_T
237 typedef __kernel_size_t		size_t;
238 #endif
239 
240 #ifndef _SSIZE_T
241 #define _SSIZE_T
242 typedef __kernel_ssize_t	ssize_t;
243 #endif
244 
245 #ifndef _PTRDIFF_T
246 #define _PTRDIFF_T
247 typedef __kernel_ptrdiff_t	ptrdiff_t;
248 #endif
249 
250 #ifndef _TIME_T
251 #define _TIME_T
252 typedef __kernel_time_t		time_t;
253 #endif
254 
255 #ifndef _CLOCK_T
256 #define _CLOCK_T
257 typedef __kernel_clock_t	clock_t;
258 #endif
259 
260 #ifndef _CADDR_T
261 #define _CADDR_T
262 typedef __kernel_caddr_t	caddr_t;
263 #endif
264 
265 /* bsd */
266 typedef unsigned char		u_char;
267 typedef unsigned short		u_short;
268 typedef unsigned int		u_int;
269 typedef unsigned long		u_long;
270 
271 /* sysv */
272 typedef unsigned char		unchar;
273 typedef unsigned short		ushort;
274 typedef unsigned int		uint;
275 typedef unsigned long		ulong;
276 
277 #ifndef __BIT_TYPES_DEFINED__
278 #define __BIT_TYPES_DEFINED__
279 
280 typedef		__u8		u_int8_t;
281 typedef		__s8		int8_t;
282 typedef		__u16		u_int16_t;
283 typedef		__s16		int16_t;
284 typedef		__u32		u_int32_t;
285 typedef		__s32		int32_t;
286 
287 #endif /* !(__BIT_TYPES_DEFINED__) */
288 
289 typedef		__u8		uint8_t;
290 typedef		__u16		uint16_t;
291 typedef		__u32		uint32_t;
292 
293 #if defined(__GNUC__) && !defined(__STRICT_ANSI__)
294 typedef		__u64		uint64_t;
295 typedef		__u64		u_int64_t;
296 typedef		__s64		int64_t;
297 #endif
298 
299 #endif /* __KERNEL_STRICT_NAMES */
300 
301 /*
302  * Below are truly Linux-specific types that should never collide with
303  * any application/library that wants linux/types.h.
304  */
305 
306 struct ustat {
307 	__kernel_daddr_t	f_tfree;
308 	__kernel_ino_t		f_tinode;
309 	char			f_fname[6];
310 	char			f_fpack[6];
311 };
312 
313 
314 
315 
316 
317 
318 
319 
320 
321 
322 
323 
324 
325 
326 #ifndef __LITTLE_ENDIAN
327 #define __LITTLE_ENDIAN 1234
328 #endif
329 #ifndef __LITTLE_ENDIAN_BITFIELD
330 #define __LITTLE_ENDIAN_BITFIELD
331 #endif
332 
333 #if 1 /* swab */
334 
335 /*
336  * linux/byteorder/swab.h
337  * Byte-swapping, independently from CPU endianness
338  *	swabXX[ps]?(foo)
339  *
340  * Francois-Rene Rideau <fare@tunes.org> 19971205
341  *    separated swab functions from cpu_to_XX,
342  *    to clean up support for bizarre-endian architectures.
343  *
344  * See asm-i386/byteorder.h and such for examples of how to provide
345  * architecture-dependent optimized versions
346  *
347  */
348 
349 /* casts are necessary for constants, because we never know how for sure
350  * how U/UL/ULL map to __u16, __u32, __u64. At least not in a portable way.
351  */
352 #define ___swab16(x) \
353 ({ \
354 	__u16 __x = (x); \
355 	((__u16)( \
356 		(((__u16)(__x) & (__u16)0x00ffU) << 8) | \
357 		(((__u16)(__x) & (__u16)0xff00U) >> 8) )); \
358 })
359 
360 #define ___swab24(x) \
361 ({ \
362 	__u32 __x = (x); \
363 	((__u32)( \
364 		((__x & (__u32)0x000000ffUL) << 16) | \
365 		 (__x & (__u32)0x0000ff00UL)        | \
366 		((__x & (__u32)0x00ff0000UL) >> 16) )); \
367 })
368 
369 #define ___swab32(x) \
370 ({ \
371 	__u32 __x = (x); \
372 	((__u32)( \
373 		(((__u32)(__x) & (__u32)0x000000ffUL) << 24) | \
374 		(((__u32)(__x) & (__u32)0x0000ff00UL) <<  8) | \
375 		(((__u32)(__x) & (__u32)0x00ff0000UL) >>  8) | \
376 		(((__u32)(__x) & (__u32)0xff000000UL) >> 24) )); \
377 })
378 
379 #define ___swab64(x) \
380 ({ \
381 	__u64 __x = (x); \
382 	((__u64)( \
383 		(__u64)(((__u64)(__x) & (__u64)0x00000000000000ffULL) << 56) | \
384 		(__u64)(((__u64)(__x) & (__u64)0x000000000000ff00ULL) << 40) | \
385 		(__u64)(((__u64)(__x) & (__u64)0x0000000000ff0000ULL) << 24) | \
386 		(__u64)(((__u64)(__x) & (__u64)0x00000000ff000000ULL) <<  8) | \
387 	        (__u64)(((__u64)(__x) & (__u64)0x000000ff00000000ULL) >>  8) | \
388 		(__u64)(((__u64)(__x) & (__u64)0x0000ff0000000000ULL) >> 24) | \
389 		(__u64)(((__u64)(__x) & (__u64)0x00ff000000000000ULL) >> 40) | \
390 		(__u64)(((__u64)(__x) & (__u64)0xff00000000000000ULL) >> 56) )); \
391 })
392 
393 #define ___constant_swab16(x) \
394 	((__u16)( \
395 		(((__u16)(x) & (__u16)0x00ffU) << 8) | \
396 		(((__u16)(x) & (__u16)0xff00U) >> 8) ))
397 #define ___constant_swab24(x) \
398 	((__u32)( \
399 		(((__u32)(x) & (__u32)0x000000ffU) << 16) | \
400 		(((__u32)(x) & (__u32)0x0000ff00U)	  | \
401 		(((__u32)(x) & (__u32)0x00ff0000U) >> 16) ))
402 #define ___constant_swab32(x) \
403 	((__u32)( \
404 		(((__u32)(x) & (__u32)0x000000ffUL) << 24) | \
405 		(((__u32)(x) & (__u32)0x0000ff00UL) <<  8) | \
406 		(((__u32)(x) & (__u32)0x00ff0000UL) >>  8) | \
407 		(((__u32)(x) & (__u32)0xff000000UL) >> 24) ))
408 #define ___constant_swab64(x) \
409 	((__u64)( \
410 		(__u64)(((__u64)(x) & (__u64)0x00000000000000ffULL) << 56) | \
411 		(__u64)(((__u64)(x) & (__u64)0x000000000000ff00ULL) << 40) | \
412 		(__u64)(((__u64)(x) & (__u64)0x0000000000ff0000ULL) << 24) | \
413 		(__u64)(((__u64)(x) & (__u64)0x00000000ff000000ULL) <<  8) | \
414 	        (__u64)(((__u64)(x) & (__u64)0x000000ff00000000ULL) >>  8) | \
415 		(__u64)(((__u64)(x) & (__u64)0x0000ff0000000000ULL) >> 24) | \
416 		(__u64)(((__u64)(x) & (__u64)0x00ff000000000000ULL) >> 40) | \
417 		(__u64)(((__u64)(x) & (__u64)0xff00000000000000ULL) >> 56) ))
418 
419 /*
420  * provide defaults when no architecture-specific optimization is detected
421  */
422 #ifndef __arch__swab16
423 #  define __arch__swab16(x) ({ __u16 __tmp = (x) ; ___swab16(__tmp); })
424 #endif
425 #ifndef __arch__swab24
426 #  define __arch__swab24(x) ({ __u32 __tmp = (x) ; ___swab24(__tmp); })
427 #endif
428 #ifndef __arch__swab32
429 #  define __arch__swab32(x) ({ __u32 __tmp = (x) ; ___swab32(__tmp); })
430 #endif
431 #ifndef __arch__swab64
432 #  define __arch__swab64(x) ({ __u64 __tmp = (x) ; ___swab64(__tmp); })
433 #endif
434 
435 #ifndef __arch__swab16p
436 #  define __arch__swab16p(x) __arch__swab16(*(x))
437 #endif
438 #ifndef __arch__swab24p
439 #  define __arch__swab24p(x) __arch__swab24(*(x))
440 #endif
441 #ifndef __arch__swab32p
442 #  define __arch__swab32p(x) __arch__swab32(*(x))
443 #endif
444 #ifndef __arch__swab64p
445 #  define __arch__swab64p(x) __arch__swab64(*(x))
446 #endif
447 
448 #ifndef __arch__swab16s
449 #  define __arch__swab16s(x) do { *(x) = __arch__swab16p((x)); } while (0)
450 #endif
451 #ifndef __arch__swab24s
452 #  define __arch__swab24s(x) do { *(x) = __arch__swab24p((x)); } while (0)
453 #endif
454 #ifndef __arch__swab32s
455 #  define __arch__swab32s(x) do { *(x) = __arch__swab32p((x)); } while (0)
456 #endif
457 #ifndef __arch__swab64s
458 #  define __arch__swab64s(x) do { *(x) = __arch__swab64p((x)); } while (0)
459 #endif
460 
461 
462 /*
463  * Allow constant folding
464  */
465 #if defined(__GNUC__) && (__GNUC__ >= 2) && defined(__OPTIMIZE__)
466 #  define __swab16(x) \
467 (__builtin_constant_p((__u16)(x)) ? \
468  ___swab16((x)) : \
469  __fswab16((x)))
470 #  define __swab24(x) \
471 (__builtin_constant_p((__u32)(x)) ? \
472  ___swab24((x)) : \
473  __fswab24((x)))
474 #  define __swab32(x) \
475 (__builtin_constant_p((__u32)(x)) ? \
476  ___swab32((x)) : \
477  __fswab32((x)))
478 #  define __swab64(x) \
479 (__builtin_constant_p((__u64)(x)) ? \
480  ___swab64((x)) : \
481  __fswab64((x)))
482 #else
483 #  define __swab16(x) __fswab16(x)
484 #  define __swab24(x) __fswab24(x)
485 #  define __swab32(x) __fswab32(x)
486 #  define __swab64(x) __fswab64(x)
487 #endif /* OPTIMIZE */
488 
489 
__fswab16(__u16 x)490 static __inline__ __const__ __u16 __fswab16(__u16 x)
491 {
492 	return __arch__swab16(x);
493 }
__swab16p(__u16 * x)494 static __inline__ __u16 __swab16p(__u16 *x)
495 {
496 	return __arch__swab16p(x);
497 }
__swab16s(__u16 * addr)498 static __inline__ void __swab16s(__u16 *addr)
499 {
500 	__arch__swab16s(addr);
501 }
502 
__fswab24(__u32 x)503 static __inline__ __const__ __u32 __fswab24(__u32 x)
504 {
505 	return __arch__swab24(x);
506 }
__swab24p(__u32 * x)507 static __inline__ __u32 __swab24p(__u32 *x)
508 {
509 	return __arch__swab24p(x);
510 }
__swab24s(__u32 * addr)511 static __inline__ void __swab24s(__u32 *addr)
512 {
513 	__arch__swab24s(addr);
514 }
515 
__fswab32(__u32 x)516 static __inline__ __const__ __u32 __fswab32(__u32 x)
517 {
518 	return __arch__swab32(x);
519 }
__swab32p(__u32 * x)520 static __inline__ __u32 __swab32p(__u32 *x)
521 {
522 	return __arch__swab32p(x);
523 }
__swab32s(__u32 * addr)524 static __inline__ void __swab32s(__u32 *addr)
525 {
526 	__arch__swab32s(addr);
527 }
528 
529 #ifdef __BYTEORDER_HAS_U64__
__fswab64(__u64 x)530 static __inline__ __const__ __u64 __fswab64(__u64 x)
531 {
532 #  ifdef __SWAB_64_THRU_32__
533 	__u32 h = x >> 32;
534         __u32 l = x & ((1ULL<<32)-1);
535         return (((__u64)__swab32(l)) << 32) | ((__u64)(__swab32(h)));
536 #  else
537 	return __arch__swab64(x);
538 #  endif
539 }
__swab64p(__u64 * x)540 static __inline__ __u64 __swab64p(__u64 *x)
541 {
542 	return __arch__swab64p(x);
543 }
__swab64s(__u64 * addr)544 static __inline__ void __swab64s(__u64 *addr)
545 {
546 	__arch__swab64s(addr);
547 }
548 #endif /* __BYTEORDER_HAS_U64__ */
549 
550 #if defined(__KERNEL__)
551 #define swab16 __swab16
552 #define swab24 __swab24
553 #define swab32 __swab32
554 #define swab64 __swab64
555 #define swab16p __swab16p
556 #define swab24p __swab24p
557 #define swab32p __swab32p
558 #define swab64p __swab64p
559 #define swab16s __swab16s
560 #define swab24s __swab24s
561 #define swab32s __swab32s
562 #define swab64s __swab64s
563 #endif
564 
565 #endif /* swab */
566 
567 
568 
569 #if 1 /* generic */
570 
571 /*
572  * linux/byteorder_generic.h
573  * Generic Byte-reordering support
574  *
575  * Francois-Rene Rideau <fare@tunes.org> 19970707
576  *    gathered all the good ideas from all asm-foo/byteorder.h into one file,
577  *    cleaned them up.
578  *    I hope it is compliant with non-GCC compilers.
579  *    I decided to put __BYTEORDER_HAS_U64__ in byteorder.h,
580  *    because I wasn't sure it would be ok to put it in types.h
581  *    Upgraded it to 2.1.43
582  * Francois-Rene Rideau <fare@tunes.org> 19971012
583  *    Upgraded it to 2.1.57
584  *    to please Linus T., replaced huge #ifdef's between little/big endian
585  *    by nestedly #include'd files.
586  * Francois-Rene Rideau <fare@tunes.org> 19971205
587  *    Made it to 2.1.71; now a facelift:
588  *    Put files under include/linux/byteorder/
589  *    Split swab from generic support.
590  *
591  * TODO:
592  *   = Regular kernel maintainers could also replace all these manual
593  *    byteswap macros that remain, disseminated among drivers,
594  *    after some grep or the sources...
595  *   = Linus might want to rename all these macros and files to fit his taste,
596  *    to fit his personal naming scheme.
597  *   = it seems that a few drivers would also appreciate
598  *    nybble swapping support...
599  *   = every architecture could add their byteswap macro in asm/byteorder.h
600  *    see how some architectures already do (i386, alpha, ppc, etc)
601  *   = cpu_to_beXX and beXX_to_cpu might some day need to be well
602  *    distinguished throughout the kernel. This is not the case currently,
603  *    since little endian, big endian, and pdp endian machines needn't it.
604  *    But this might be the case for, say, a port of Linux to 20/21 bit
605  *    architectures (and F21 Linux addict around?).
606  */
607 
608 /*
609  * The following macros are to be defined by <asm/byteorder.h>:
610  *
611  * Conversion of long and short int between network and host format
612  *	ntohl(__u32 x)
613  *	ntohs(__u16 x)
614  *	htonl(__u32 x)
615  *	htons(__u16 x)
616  * It seems that some programs (which? where? or perhaps a standard? POSIX?)
617  * might like the above to be functions, not macros (why?).
618  * if that's true, then detect them, and take measures.
619  * Anyway, the measure is: define only ___ntohl as a macro instead,
620  * and in a separate file, have
621  * unsigned long inline ntohl(x){return ___ntohl(x);}
622  *
623  * The same for constant arguments
624  *	__constant_ntohl(__u32 x)
625  *	__constant_ntohs(__u16 x)
626  *	__constant_htonl(__u32 x)
627  *	__constant_htons(__u16 x)
628  *
629  * Conversion of XX-bit integers (16- 32- or 64-)
630  * between native CPU format and little/big endian format
631  * 64-bit stuff only defined for proper architectures
632  *	cpu_to_[bl]eXX(__uXX x)
633  *	[bl]eXX_to_cpu(__uXX x)
634  *
635  * The same, but takes a pointer to the value to convert
636  *	cpu_to_[bl]eXXp(__uXX x)
637  *	[bl]eXX_to_cpup(__uXX x)
638  *
639  * The same, but change in situ
640  *	cpu_to_[bl]eXXs(__uXX x)
641  *	[bl]eXX_to_cpus(__uXX x)
642  *
643  * See asm-foo/byteorder.h for examples of how to provide
644  * architecture-optimized versions
645  *
646  */
647 
648 
649 #if defined(__KERNEL__)
650 /*
651  * inside the kernel, we can use nicknames;
652  * outside of it, we must avoid POSIX namespace pollution...
653  */
654 #define cpu_to_le64 __cpu_to_le64
655 #define le64_to_cpu __le64_to_cpu
656 #define cpu_to_le32 __cpu_to_le32
657 #define le32_to_cpu __le32_to_cpu
658 #define cpu_to_le16 __cpu_to_le16
659 #define le16_to_cpu __le16_to_cpu
660 #define cpu_to_be64 __cpu_to_be64
661 #define be64_to_cpu __be64_to_cpu
662 #define cpu_to_be32 __cpu_to_be32
663 #define be32_to_cpu __be32_to_cpu
664 #define cpu_to_be16 __cpu_to_be16
665 #define be16_to_cpu __be16_to_cpu
666 #define cpu_to_le64p __cpu_to_le64p
667 #define le64_to_cpup __le64_to_cpup
668 #define cpu_to_le32p __cpu_to_le32p
669 #define le32_to_cpup __le32_to_cpup
670 #define cpu_to_le16p __cpu_to_le16p
671 #define le16_to_cpup __le16_to_cpup
672 #define cpu_to_be64p __cpu_to_be64p
673 #define be64_to_cpup __be64_to_cpup
674 #define cpu_to_be32p __cpu_to_be32p
675 #define be32_to_cpup __be32_to_cpup
676 #define cpu_to_be16p __cpu_to_be16p
677 #define be16_to_cpup __be16_to_cpup
678 #define cpu_to_le64s __cpu_to_le64s
679 #define le64_to_cpus __le64_to_cpus
680 #define cpu_to_le32s __cpu_to_le32s
681 #define le32_to_cpus __le32_to_cpus
682 #define cpu_to_le16s __cpu_to_le16s
683 #define le16_to_cpus __le16_to_cpus
684 #define cpu_to_be64s __cpu_to_be64s
685 #define be64_to_cpus __be64_to_cpus
686 #define cpu_to_be32s __cpu_to_be32s
687 #define be32_to_cpus __be32_to_cpus
688 #define cpu_to_be16s __cpu_to_be16s
689 #define be16_to_cpus __be16_to_cpus
690 #endif
691 
692 
693 /*
694  * Handle ntohl and suches. These have various compatibility
695  * issues - like we want to give the prototype even though we
696  * also have a macro for them in case some strange program
697  * wants to take the address of the thing or something..
698  *
699  * Note that these used to return a "long" in libc5, even though
700  * long is often 64-bit these days.. Thus the casts.
701  *
702  * They have to be macros in order to do the constant folding
703  * correctly - if the argument passed into a inline function
704  * it is no longer constant according to gcc..
705  */
706 
707 #undef ntohl
708 #undef ntohs
709 #undef htonl
710 #undef htons
711 
712 /*
713  * Do the prototypes. Somebody might want to take the
714  * address or some such sick thing..
715  */
716 #if defined(__KERNEL__) || (defined (__GLIBC__) && __GLIBC__ >= 2)
717 extern __u32			ntohl(__u32);
718 extern __u32			htonl(__u32);
719 #else
720 extern unsigned long int	ntohl(unsigned long int);
721 extern unsigned long int	htonl(unsigned long int);
722 #endif
723 extern unsigned short int	ntohs(unsigned short int);
724 extern unsigned short int	htons(unsigned short int);
725 
726 
727 #if defined(__GNUC__) && (__GNUC__ >= 2) && defined(__OPTIMIZE__) && !defined(__STRICT_ANSI__)
728 
729 #define ___htonl(x) __cpu_to_be32(x)
730 #define ___htons(x) __cpu_to_be16(x)
731 #define ___ntohl(x) __be32_to_cpu(x)
732 #define ___ntohs(x) __be16_to_cpu(x)
733 
734 #if defined(__KERNEL__) || (defined (__GLIBC__) && __GLIBC__ >= 2)
735 #define htonl(x) ___htonl(x)
736 #define ntohl(x) ___ntohl(x)
737 #else
738 #define htonl(x) ((unsigned long)___htonl(x))
739 #define ntohl(x) ((unsigned long)___ntohl(x))
740 #endif
741 #define htons(x) ___htons(x)
742 #define ntohs(x) ___ntohs(x)
743 
744 #endif /* OPTIMIZE */
745 
746 #endif /* generic */
747 
748 
749 #define __constant_htonl(x) ___constant_swab32((x))
750 #define __constant_ntohl(x) ___constant_swab32((x))
751 #define __constant_htons(x) ___constant_swab16((x))
752 #define __constant_ntohs(x) ___constant_swab16((x))
753 #define __constant_cpu_to_le64(x) ((__u64)(x))
754 #define __constant_le64_to_cpu(x) ((__u64)(x))
755 #define __constant_cpu_to_le32(x) ((__u32)(x))
756 #define __constant_le32_to_cpu(x) ((__u32)(x))
757 #define __constant_cpu_to_le24(x) ((__u32)(x))
758 #define __constant_le24_to_cpu(x) ((__u32)(x))
759 #define __constant_cpu_to_le16(x) ((__u16)(x))
760 #define __constant_le16_to_cpu(x) ((__u16)(x))
761 #define __constant_cpu_to_be64(x) ___constant_swab64((x))
762 #define __constant_be64_to_cpu(x) ___constant_swab64((x))
763 #define __constant_cpu_to_be32(x) ___constant_swab32((x))
764 #define __constant_be32_to_cpu(x) ___constant_swab32((x))
765 #define __constant_cpu_to_be24(x) ___constant_swab24((x))
766 #define __constant_be24_to_cpu(x) ___constant_swab24((x))
767 #define __constant_cpu_to_be16(x) ___constant_swab16((x))
768 #define __constant_be16_to_cpu(x) ___constant_swab16((x))
769 #define __cpu_to_le64(x) ((__u64)(x))
770 #define __le64_to_cpu(x) ((__u64)(x))
771 #define __cpu_to_le32(x) ((__u32)(x))
772 #define __le32_to_cpu(x) ((__u32)(x))
773 #define __cpu_to_le24(x) ((__u32)(x))
774 #define __le24_to_cpu(x) ((__u32)(x))
775 #define __cpu_to_le16(x) ((__u16)(x))
776 #define __le16_to_cpu(x) ((__u16)(x))
777 #define __cpu_to_be64(x) __swab64((x))
778 #define __be64_to_cpu(x) __swab64((x))
779 #define __cpu_to_be32(x) __swab32((x))
780 #define __be32_to_cpu(x) __swab32((x))
781 #define __cpu_to_be24(x) __swab24((x))
782 #define __be24_to_cpu(x) __swab24((x))
783 #define __cpu_to_be16(x) __swab16((x))
784 #define __be16_to_cpu(x) __swab16((x))
785 #define __cpu_to_le64p(x) (*(__u64*)(x))
786 #define __le64_to_cpup(x) (*(__u64*)(x))
787 #define __cpu_to_le32p(x) (*(__u32*)(x))
788 #define __le32_to_cpup(x) (*(__u32*)(x))
789 #define __cpu_to_le24p(x) (*(__u32*)(x))
790 #define __le24_to_cpup(x) (*(__u32*)(x))
791 #define __cpu_to_le16p(x) (*(__u16*)(x))
792 #define __le16_to_cpup(x) (*(__u16*)(x))
793 #define __cpu_to_be64p(x) __swab64p((x))
794 #define __be64_to_cpup(x) __swab64p((x))
795 #define __cpu_to_be32p(x) __swab32p((x))
796 #define __be32_to_cpup(x) __swab32p((x))
797 #define __cpu_to_be24p(x) __swab24p((x))
798 #define __be24_to_cpup(x) __swab24p((x))
799 #define __cpu_to_be16p(x) __swab16p((x))
800 #define __be16_to_cpup(x) __swab16p((x))
801 #define __cpu_to_le64s(x) do {} while (0)
802 #define __le64_to_cpus(x) do {} while (0)
803 #define __cpu_to_le32s(x) do {} while (0)
804 #define __le32_to_cpus(x) do {} while (0)
805 #define __cpu_to_le24s(x) do {} while (0)
806 #define __le24_to_cpus(x) do {} while (0)
807 #define __cpu_to_le16s(x) do {} while (0)
808 #define __le16_to_cpus(x) do {} while (0)
809 #define __cpu_to_be64s(x) __swab64s((x))
810 #define __be64_to_cpus(x) __swab64s((x))
811 #define __cpu_to_be32s(x) __swab32s((x))
812 #define __be32_to_cpus(x) __swab32s((x))
813 #define __cpu_to_be24s(x) __swab24s((x))
814 #define __be24_to_cpus(x) __swab24s((x))
815 #define __cpu_to_be16s(x) __swab16s((x))
816 #define __be16_to_cpus(x) __swab16s((x))
817 
818 
819 
820 
821 
822 
823 
824 
825 #if 1
826 
827 /* Dummy types */
828 
829 #define ____cacheline_aligned
830 
831 typedef struct
832 {
833   volatile unsigned int lock;
834 } rwlock_t;
835 
836 typedef struct {
837 	volatile unsigned int lock;
838 } spinlock_t;
839 
840 struct task_struct;
841 
842 
843 
844 
845 
846 #if 1 /* atomic */
847 
848 /*
849  * Atomic operations that C can't guarantee us.  Useful for
850  * resource counting etc..
851  */
852 
853 #ifdef CONFIG_SMP
854 #define LOCK "lock ; "
855 #else
856 #define LOCK ""
857 #endif
858 
859 /*
860  * Make sure gcc doesn't try to be clever and move things around
861  * on us. We need to use _exactly_ the address the user gave us,
862  * not some alias that contains the same information.
863  */
864 typedef struct { volatile int counter; } atomic_t;
865 
866 #define ATOMIC_INIT(i)	{ (i) }
867 
868 /**
869  * atomic_read - read atomic variable
870  * @v: pointer of type atomic_t
871  *
872  * Atomically reads the value of @v.  Note that the guaranteed
873  * useful range of an atomic_t is only 24 bits.
874  */
875 #define atomic_read(v)		((v)->counter)
876 
877 /**
878  * atomic_set - set atomic variable
879  * @v: pointer of type atomic_t
880  * @i: required value
881  *
882  * Atomically sets the value of @v to @i.  Note that the guaranteed
883  * useful range of an atomic_t is only 24 bits.
884  */
885 #define atomic_set(v,i)		(((v)->counter) = (i))
886 
887 /**
888  * atomic_add - add integer to atomic variable
889  * @i: integer value to add
890  * @v: pointer of type atomic_t
891  *
892  * Atomically adds @i to @v.  Note that the guaranteed useful range
893  * of an atomic_t is only 24 bits.
894  */
atomic_add(int i,atomic_t * v)895 static __inline__ void atomic_add(int i, atomic_t *v)
896 {
897 #if 0
898 	__asm__ __volatile__(
899 		LOCK "addl %1,%0"
900 		:"=m" (v->counter)
901 		:"ir" (i), "m" (v->counter));
902 #endif
903 }
904 
905 /**
906  * atomic_sub - subtract the atomic variable
907  * @i: integer value to subtract
908  * @v: pointer of type atomic_t
909  *
910  * Atomically subtracts @i from @v.  Note that the guaranteed
911  * useful range of an atomic_t is only 24 bits.
912  */
atomic_sub(int i,atomic_t * v)913 static __inline__ void atomic_sub(int i, atomic_t *v)
914 {
915 #if 0
916 	__asm__ __volatile__(
917 		LOCK "subl %1,%0"
918 		:"=m" (v->counter)
919 		:"ir" (i), "m" (v->counter));
920 #endif
921 }
922 
923 /**
924  * atomic_sub_and_test - subtract value from variable and test result
925  * @i: integer value to subtract
926  * @v: pointer of type atomic_t
927  *
928  * Atomically subtracts @i from @v and returns
929  * true if the result is zero, or false for all
930  * other cases.  Note that the guaranteed
931  * useful range of an atomic_t is only 24 bits.
932  */
atomic_sub_and_test(int i,atomic_t * v)933 static __inline__ int atomic_sub_and_test(int i, atomic_t *v)
934 {
935 #if 0
936 	unsigned char c;
937 
938 	__asm__ __volatile__(
939 		LOCK "subl %2,%0; sete %1"
940 		:"=m" (v->counter), "=qm" (c)
941 		:"ir" (i), "m" (v->counter) : "memory");
942 	return c;
943 #endif
944 }
945 
946 /**
947  * atomic_inc - increment atomic variable
948  * @v: pointer of type atomic_t
949  *
950  * Atomically increments @v by 1.  Note that the guaranteed
951  * useful range of an atomic_t is only 24 bits.
952  */
atomic_inc(atomic_t * v)953 static __inline__ void atomic_inc(atomic_t *v)
954 {
955 #if 0
956 	__asm__ __volatile__(
957 		LOCK "incl %0"
958 		:"=m" (v->counter)
959 		:"m" (v->counter));
960 #endif
961 }
962 
963 /**
964  * atomic_dec - decrement atomic variable
965  * @v: pointer of type atomic_t
966  *
967  * Atomically decrements @v by 1.  Note that the guaranteed
968  * useful range of an atomic_t is only 24 bits.
969  */
atomic_dec(atomic_t * v)970 static __inline__ void atomic_dec(atomic_t *v)
971 {
972 #if 0
973 	__asm__ __volatile__(
974 		LOCK "decl %0"
975 		:"=m" (v->counter)
976 		:"m" (v->counter));
977 #endif
978 }
979 
980 /**
981  * atomic_dec_and_test - decrement and test
982  * @v: pointer of type atomic_t
983  *
984  * Atomically decrements @v by 1 and
985  * returns true if the result is 0, or false for all other
986  * cases.  Note that the guaranteed
987  * useful range of an atomic_t is only 24 bits.
988  */
atomic_dec_and_test(atomic_t * v)989 static __inline__ int atomic_dec_and_test(atomic_t *v)
990 {
991 #if 0
992 	unsigned char c;
993 
994 	__asm__ __volatile__(
995 		LOCK "decl %0; sete %1"
996 		:"=m" (v->counter), "=qm" (c)
997 		:"m" (v->counter) : "memory");
998 	return c != 0;
999 #else
1000   return 1;
1001 #endif
1002 }
1003 
1004 /**
1005  * atomic_inc_and_test - increment and test
1006  * @v: pointer of type atomic_t
1007  *
1008  * Atomically increments @v by 1
1009  * and returns true if the result is zero, or false for all
1010  * other cases.  Note that the guaranteed
1011  * useful range of an atomic_t is only 24 bits.
1012  */
atomic_inc_and_test(atomic_t * v)1013 static __inline__ int atomic_inc_and_test(atomic_t *v)
1014 {
1015 #if 0
1016 	unsigned char c;
1017 
1018 	__asm__ __volatile__(
1019 		LOCK "incl %0; sete %1"
1020 		:"=m" (v->counter), "=qm" (c)
1021 		:"m" (v->counter) : "memory");
1022 	return c != 0;
1023 #else
1024   return 1;
1025 #endif
1026 }
1027 
1028 /**
1029  * atomic_add_negative - add and test if negative
1030  * @v: pointer of type atomic_t
1031  * @i: integer value to add
1032  *
1033  * Atomically adds @i to @v and returns true
1034  * if the result is negative, or false when
1035  * result is greater than or equal to zero.  Note that the guaranteed
1036  * useful range of an atomic_t is only 24 bits.
1037  */
atomic_add_negative(int i,atomic_t * v)1038 static __inline__ int atomic_add_negative(int i, atomic_t *v)
1039 {
1040 #if 0
1041 	unsigned char c;
1042 
1043 	__asm__ __volatile__(
1044 		LOCK "addl %2,%0; sets %1"
1045 		:"=m" (v->counter), "=qm" (c)
1046 		:"ir" (i), "m" (v->counter) : "memory");
1047 	return c;
1048 #else
1049   return 0;
1050 #endif
1051 }
1052 
1053 /* These are x86-specific, used by some header files */
1054 #define atomic_clear_mask(mask, addr)
1055 #if 0
1056 __asm__ __volatile__(LOCK "andl %0,%1" \
1057 : : "r" (~(mask)),"m" (*addr) : "memory")
1058 #endif
1059 
1060 #define atomic_set_mask(mask, addr)
1061 #if 0
1062 __asm__ __volatile__(LOCK "orl %0,%1" \
1063 : : "r" (mask),"m" (*addr) : "memory")
1064 #endif
1065 
1066 /* Atomic operations are already serializing on x86 */
1067 #define smp_mb__before_atomic_dec()
1068 #define smp_mb__after_atomic_dec()
1069 #define smp_mb__before_atomic_inc()
1070 #define smp_mb__after_atomic_inc()
1071 
1072 
1073 
1074 #endif /* atomic */
1075 
1076 
1077 
1078 
1079 
1080 #if 1 /* list */
1081 
1082 struct list_head {
1083 	struct list_head *next, *prev;
1084 };
1085 
1086 #define LIST_HEAD_INIT(name) { &(name), &(name) }
1087 
1088 #define LIST_HEAD(name) \
1089 	struct list_head name = LIST_HEAD_INIT(name)
1090 
1091 #define INIT_LIST_HEAD(ptr) do { \
1092 	(ptr)->next = (ptr); (ptr)->prev = (ptr); \
1093 } while (0)
1094 
1095 /*
1096  * Insert a new entry between two known consecutive entries.
1097  *
1098  * This is only for internal list manipulation where we know
1099  * the prev/next entries already!
1100  */
__list_add(struct list_head * new,struct list_head * prev,struct list_head * next)1101 static inline void __list_add(struct list_head *new,
1102 			      struct list_head *prev,
1103 			      struct list_head *next)
1104 {
1105 #if 0
1106 	next->prev = new;
1107 	new->next = next;
1108 	new->prev = prev;
1109 	prev->next = new;
1110 #endif
1111 }
1112 
1113 /**
1114  * list_add - add a new entry
1115  * @new: new entry to be added
1116  * @head: list head to add it after
1117  *
1118  * Insert a new entry after the specified head.
1119  * This is good for implementing stacks.
1120  */
list_add(struct list_head * new,struct list_head * head)1121 static inline void list_add(struct list_head *new, struct list_head *head)
1122 {
1123 #if 0
1124 	__list_add(new, head, head->next);
1125 #endif
1126 }
1127 
1128 /**
1129  * list_add_tail - add a new entry
1130  * @new: new entry to be added
1131  * @head: list head to add it before
1132  *
1133  * Insert a new entry before the specified head.
1134  * This is useful for implementing queues.
1135  */
list_add_tail(struct list_head * new,struct list_head * head)1136 static inline void list_add_tail(struct list_head *new, struct list_head *head)
1137 {
1138 #if 0
1139 	__list_add(new, head->prev, head);
1140 #endif
1141 }
1142 
1143 /*
1144  * Delete a list entry by making the prev/next entries
1145  * point to each other.
1146  *
1147  * This is only for internal list manipulation where we know
1148  * the prev/next entries already!
1149  */
__list_del(struct list_head * prev,struct list_head * next)1150 static inline void __list_del(struct list_head *prev, struct list_head *next)
1151 {
1152 	next->prev = prev;
1153 	prev->next = next;
1154 }
1155 
1156 /**
1157  * list_del - deletes entry from list.
1158  * @entry: the element to delete from the list.
1159  * Note: list_empty on entry does not return true after this, the entry is in an undefined state.
1160  */
list_del(struct list_head * entry)1161 static inline void list_del(struct list_head *entry)
1162 {
1163 #if 0
1164 	__list_del(entry->prev, entry->next);
1165 	entry->next = (void *) 0;
1166 	entry->prev = (void *) 0;
1167 #endif
1168 }
1169 
1170 /**
1171  * list_del_init - deletes entry from list and reinitialize it.
1172  * @entry: the element to delete from the list.
1173  */
list_del_init(struct list_head * entry)1174 static inline void list_del_init(struct list_head *entry)
1175 {
1176 #if 0
1177 	__list_del(entry->prev, entry->next);
1178 	INIT_LIST_HEAD(entry);
1179 #endif
1180 }
1181 
1182 /**
1183  * list_move - delete from one list and add as another's head
1184  * @list: the entry to move
1185  * @head: the head that will precede our entry
1186  */
list_move(struct list_head * list,struct list_head * head)1187 static inline void list_move(struct list_head *list, struct list_head *head)
1188 {
1189 #if 0
1190         __list_del(list->prev, list->next);
1191         list_add(list, head);
1192 #endif
1193 }
1194 
1195 /**
1196  * list_move_tail - delete from one list and add as another's tail
1197  * @list: the entry to move
1198  * @head: the head that will follow our entry
1199  */
list_move_tail(struct list_head * list,struct list_head * head)1200 static inline void list_move_tail(struct list_head *list,
1201 				  struct list_head *head)
1202 {
1203 #if 0
1204         __list_del(list->prev, list->next);
1205         list_add_tail(list, head);
1206 #endif
1207 }
1208 
1209 /**
1210  * list_empty - tests whether a list is empty
1211  * @head: the list to test.
1212  */
list_empty(struct list_head * head)1213 static inline int list_empty(struct list_head *head)
1214 {
1215 	return head->next == head;
1216 }
1217 
__list_splice(struct list_head * list,struct list_head * head)1218 static inline void __list_splice(struct list_head *list,
1219 				 struct list_head *head)
1220 {
1221 #if 0
1222 	struct list_head *first = list->next;
1223 	struct list_head *last = list->prev;
1224 	struct list_head *at = head->next;
1225 
1226 	first->prev = head;
1227 	head->next = first;
1228 
1229 	last->next = at;
1230 	at->prev = last;
1231 #endif
1232 }
1233 
1234 /**
1235  * list_splice - join two lists
1236  * @list: the new list to add.
1237  * @head: the place to add it in the first list.
1238  */
list_splice(struct list_head * list,struct list_head * head)1239 static inline void list_splice(struct list_head *list, struct list_head *head)
1240 {
1241 #if 0
1242 	if (!list_empty(list))
1243 		__list_splice(list, head);
1244 #endif
1245 }
1246 
1247 /**
1248  * list_splice_init - join two lists and reinitialise the emptied list.
1249  * @list: the new list to add.
1250  * @head: the place to add it in the first list.
1251  *
1252  * The list at @list is reinitialised
1253  */
list_splice_init(struct list_head * list,struct list_head * head)1254 static inline void list_splice_init(struct list_head *list,
1255 				    struct list_head *head)
1256 {
1257 #if 0
1258 	if (!list_empty(list)) {
1259 		__list_splice(list, head);
1260 		INIT_LIST_HEAD(list);
1261 	}
1262 #endif
1263 }
1264 
1265 /**
1266  * list_entry - get the struct for this entry
1267  * @ptr:	the &struct list_head pointer.
1268  * @type:	the type of the struct this is embedded in.
1269  * @member:	the name of the list_struct within the struct.
1270  */
1271 #define list_entry(ptr, type, member)
1272 #if 0
1273 	((type *)((char *)(ptr)-(unsigned long)(&((type *)0)->member)))
1274 #endif
1275 
1276 /**
1277  * list_for_each	-	iterate over a list
1278  * @pos:	the &struct list_head to use as a loop counter.
1279  * @head:	the head for your list.
1280  */
1281 #define list_for_each(pos, head)
1282 #if 0
1283 	for (pos = (head)->next, prefetch(pos->next); pos != (head); \
1284         	pos = pos->next, prefetch(pos->next))
1285 #endif
1286 
1287 /**
1288  * list_for_each_prev	-	iterate over a list backwards
1289  * @pos:	the &struct list_head to use as a loop counter.
1290  * @head:	the head for your list.
1291  */
1292 #define list_for_each_prev(pos, head)
1293 #if 0
1294 	for (pos = (head)->prev, prefetch(pos->prev); pos != (head); \
1295         	pos = pos->prev, prefetch(pos->prev))
1296 #endif
1297 
1298 /**
1299  * list_for_each_safe	-	iterate over a list safe against removal of list entry
1300  * @pos:	the &struct list_head to use as a loop counter.
1301  * @n:		another &struct list_head to use as temporary storage
1302  * @head:	the head for your list.
1303  */
1304 #define list_for_each_safe(pos, n, head)
1305 #if 0
1306 	for (pos = (head)->next, n = pos->next; pos != (head); \
1307 		pos = n, n = pos->next)
1308 #endif
1309 
1310 /**
1311  * list_for_each_entry	-	iterate over list of given type
1312  * @pos:	the type * to use as a loop counter.
1313  * @head:	the head for your list.
1314  * @member:	the name of the list_struct within the struct.
1315  */
1316 #define list_for_each_entry(pos, head, member)
1317 #if 0
1318 	for (pos = list_entry((head)->next, typeof(*pos), member),	\
1319 		     prefetch(pos->member.next);			\
1320 	     &pos->member != (head); 					\
1321 	     pos = list_entry(pos->member.next, typeof(*pos), member),	\
1322 		     prefetch(pos->member.next))
1323 #endif
1324 
1325 #endif /* list */
1326 
1327 
1328 
1329 
1330 
1331 #if 1 /* wait */
1332 
1333 #define WNOHANG		0x00000001
1334 #define WUNTRACED	0x00000002
1335 
1336 #define __WNOTHREAD	0x20000000	/* Don't wait on children of other threads in this group */
1337 #define __WALL		0x40000000	/* Wait on all children, regardless of type */
1338 #define __WCLONE	0x80000000	/* Wait only on non-SIGCHLD children */
1339 
1340 #if 0
1341 #include <linux/kernel.h>
1342 #include <linux/list.h>
1343 #include <linux/stddef.h>
1344 #include <linux/spinlock.h>
1345 #include <linux/config.h>
1346 
1347 #include <asm/page.h>
1348 #include <asm/processor.h>
1349 #endif
1350 
1351 /*
1352  * Debug control.  Slow but useful.
1353  */
1354 #if defined(CONFIG_DEBUG_WAITQ)
1355 #define WAITQUEUE_DEBUG 1
1356 #else
1357 #define WAITQUEUE_DEBUG 0
1358 #endif
1359 
1360 struct __wait_queue {
1361 	unsigned int flags;
1362 #define WQ_FLAG_EXCLUSIVE	0x01
1363 	struct task_struct * task;
1364 	struct list_head task_list;
1365 #if WAITQUEUE_DEBUG
1366 	long __magic;
1367 	long __waker;
1368 #endif
1369 };
1370 typedef struct __wait_queue wait_queue_t;
1371 
1372 /*
1373  * 'dual' spinlock architecture. Can be switched between spinlock_t and
1374  * rwlock_t locks via changing this define. Since waitqueues are quite
1375  * decoupled in the new architecture, lightweight 'simple' spinlocks give
1376  * us slightly better latencies and smaller waitqueue structure size.
1377  */
1378 #define USE_RW_WAIT_QUEUE_SPINLOCK 0
1379 
1380 #if USE_RW_WAIT_QUEUE_SPINLOCK
1381 # define wq_lock_t rwlock_t
1382 # define WAITQUEUE_RW_LOCK_UNLOCKED RW_LOCK_UNLOCKED
1383 
1384 # define wq_read_lock read_lock
1385 # define wq_read_lock_irqsave read_lock_irqsave
1386 # define wq_read_unlock_irqrestore read_unlock_irqrestore
1387 # define wq_read_unlock read_unlock
1388 # define wq_write_lock_irq write_lock_irq
1389 # define wq_write_lock_irqsave write_lock_irqsave
1390 # define wq_write_unlock_irqrestore write_unlock_irqrestore
1391 # define wq_write_unlock write_unlock
1392 #else
1393 # define wq_lock_t spinlock_t
1394 # define WAITQUEUE_RW_LOCK_UNLOCKED SPIN_LOCK_UNLOCKED
1395 
1396 # define wq_read_lock spin_lock
1397 # define wq_read_lock_irqsave spin_lock_irqsave
1398 # define wq_read_unlock spin_unlock
1399 # define wq_read_unlock_irqrestore spin_unlock_irqrestore
1400 # define wq_write_lock_irq spin_lock_irq
1401 # define wq_write_lock_irqsave spin_lock_irqsave
1402 # define wq_write_unlock_irqrestore spin_unlock_irqrestore
1403 # define wq_write_unlock spin_unlock
1404 #endif
1405 
1406 struct __wait_queue_head {
1407 	wq_lock_t lock;
1408 	struct list_head task_list;
1409 #if WAITQUEUE_DEBUG
1410 	long __magic;
1411 	long __creator;
1412 #endif
1413 };
1414 typedef struct __wait_queue_head wait_queue_head_t;
1415 
1416 
1417 /*
1418  * Debugging macros.  We eschew `do { } while (0)' because gcc can generate
1419  * spurious .aligns.
1420  */
1421 #if WAITQUEUE_DEBUG
1422 #define WQ_BUG()	BUG()
1423 #define CHECK_MAGIC(x)
1424 #if 0
1425 	do {									\
1426 		if ((x) != (long)&(x)) {					\
1427 			printk("bad magic %lx (should be %lx), ",		\
1428 				(long)x, (long)&(x));				\
1429 			WQ_BUG();						\
1430 		}								\
1431 	} while (0)
1432 #endif
1433 
1434 #define CHECK_MAGIC_WQHEAD(x)
1435 #if 0
1436 	do {									\
1437 		if ((x)->__magic != (long)&((x)->__magic)) {			\
1438 			printk("bad magic %lx (should be %lx, creator %lx), ",	\
1439 			(x)->__magic, (long)&((x)->__magic), (x)->__creator);	\
1440 			WQ_BUG();						\
1441 		}								\
1442 	} while (0)
1443 #endif
1444 
1445 #define WQ_CHECK_LIST_HEAD(list)
1446 #if 0
1447 	do {									\
1448 		if (!(list)->next || !(list)->prev)				\
1449 			WQ_BUG();						\
1450 	} while(0)
1451 #endif
1452 
1453 #define WQ_NOTE_WAKER(tsk)
1454 #if 0
1455 	do {									\
1456 		(tsk)->__waker = (long)__builtin_return_address(0);		\
1457 	} while (0)
1458 #endif
1459 #else
1460 #define WQ_BUG()
1461 #define CHECK_MAGIC(x)
1462 #define CHECK_MAGIC_WQHEAD(x)
1463 #define WQ_CHECK_LIST_HEAD(list)
1464 #define WQ_NOTE_WAKER(tsk)
1465 #endif
1466 
1467 /*
1468  * Macros for declaration and initialisation of the datatypes
1469  */
1470 
1471 #if WAITQUEUE_DEBUG
1472 # define __WAITQUEUE_DEBUG_INIT(name) //(long)&(name).__magic, 0
1473 # define __WAITQUEUE_HEAD_DEBUG_INIT(name) //(long)&(name).__magic, (long)&(name).__magic
1474 #else
1475 # define __WAITQUEUE_DEBUG_INIT(name)
1476 # define __WAITQUEUE_HEAD_DEBUG_INIT(name)
1477 #endif
1478 
1479 #define __WAITQUEUE_INITIALIZER(name, tsk)
1480 #if 0
1481 {
1482 	task:		tsk,						\
1483 	task_list:	{ NULL, NULL },					\
1484 			 __WAITQUEUE_DEBUG_INIT(name)}
1485 #endif
1486 
1487 #define DECLARE_WAITQUEUE(name, tsk)
1488 #if 0
1489 	wait_queue_t name = __WAITQUEUE_INITIALIZER(name, tsk)
1490 #endif
1491 
1492 #define __WAIT_QUEUE_HEAD_INITIALIZER(name)
1493 #if 0
1494 {
1495 	lock:		WAITQUEUE_RW_LOCK_UNLOCKED,			\
1496 	task_list:	{ &(name).task_list, &(name).task_list },	\
1497 			__WAITQUEUE_HEAD_DEBUG_INIT(name)}
1498 #endif
1499 
1500 #define DECLARE_WAIT_QUEUE_HEAD(name)
1501 #if 0
1502 	wait_queue_head_t name = __WAIT_QUEUE_HEAD_INITIALIZER(name)
1503 #endif
1504 
init_waitqueue_head(wait_queue_head_t * q)1505 static inline void init_waitqueue_head(wait_queue_head_t *q)
1506 {
1507 #if 0
1508 #if WAITQUEUE_DEBUG
1509 	if (!q)
1510 		WQ_BUG();
1511 #endif
1512 	q->lock = WAITQUEUE_RW_LOCK_UNLOCKED;
1513 	INIT_LIST_HEAD(&q->task_list);
1514 #if WAITQUEUE_DEBUG
1515 	q->__magic = (long)&q->__magic;
1516 	q->__creator = (long)current_text_addr();
1517 #endif
1518 #endif
1519 }
1520 
init_waitqueue_entry(wait_queue_t * q,struct task_struct * p)1521 static inline void init_waitqueue_entry(wait_queue_t *q, struct task_struct *p)
1522 {
1523 #if 0
1524 #if WAITQUEUE_DEBUG
1525 	if (!q || !p)
1526 		WQ_BUG();
1527 #endif
1528 	q->flags = 0;
1529 	q->task = p;
1530 #if WAITQUEUE_DEBUG
1531 	q->__magic = (long)&q->__magic;
1532 #endif
1533 #endif
1534 }
1535 
waitqueue_active(wait_queue_head_t * q)1536 static inline int waitqueue_active(wait_queue_head_t *q)
1537 {
1538 #if 0
1539 #if WAITQUEUE_DEBUG
1540 	if (!q)
1541 		WQ_BUG();
1542 	CHECK_MAGIC_WQHEAD(q);
1543 #endif
1544 
1545 	return !list_empty(&q->task_list);
1546 #endif
1547 }
1548 
__add_wait_queue(wait_queue_head_t * head,wait_queue_t * new)1549 static inline void __add_wait_queue(wait_queue_head_t *head, wait_queue_t *new)
1550 {
1551 #if 0
1552 #if WAITQUEUE_DEBUG
1553 	if (!head || !new)
1554 		WQ_BUG();
1555 	CHECK_MAGIC_WQHEAD(head);
1556 	CHECK_MAGIC(new->__magic);
1557 	if (!head->task_list.next || !head->task_list.prev)
1558 		WQ_BUG();
1559 #endif
1560 	list_add(&new->task_list, &head->task_list);
1561 #endif
1562 }
1563 
1564 /*
1565  * Used for wake-one threads:
1566  */
__add_wait_queue_tail(wait_queue_head_t * head,wait_queue_t * new)1567 static inline void __add_wait_queue_tail(wait_queue_head_t *head,
1568 						wait_queue_t *new)
1569 {
1570 #if 0
1571 #if WAITQUEUE_DEBUG
1572 	if (!head || !new)
1573 		WQ_BUG();
1574 	CHECK_MAGIC_WQHEAD(head);
1575 	CHECK_MAGIC(new->__magic);
1576 	if (!head->task_list.next || !head->task_list.prev)
1577 		WQ_BUG();
1578 #endif
1579 	list_add_tail(&new->task_list, &head->task_list);
1580 #endif
1581 }
1582 
__remove_wait_queue(wait_queue_head_t * head,wait_queue_t * old)1583 static inline void __remove_wait_queue(wait_queue_head_t *head,
1584 							wait_queue_t *old)
1585 {
1586 #if 0
1587 #if WAITQUEUE_DEBUG
1588 	if (!old)
1589 		WQ_BUG();
1590 	CHECK_MAGIC(old->__magic);
1591 #endif
1592 	list_del(&old->task_list);
1593 #endif
1594 }
1595 
1596 
1597 
1598 
1599 #endif /* wait */
1600 
1601 
1602 #endif
1603 
1604 
1605 
1606 
1607 #if 1 /* slab */
1608 
1609 typedef struct
1610 {
1611  int x;
1612 } kmem_cache_s;
1613 
1614 typedef struct kmem_cache_s kmem_cache_t;
1615 
1616 #if 0
1617 #include	<linux/mm.h>
1618 #include	<linux/cache.h>
1619 #endif
1620 
1621 /* flags for kmem_cache_alloc() */
1622 #define	SLAB_NOFS		GFP_NOFS
1623 #define	SLAB_NOIO		GFP_NOIO
1624 #define SLAB_NOHIGHIO		GFP_NOHIGHIO
1625 #define	SLAB_ATOMIC		GFP_ATOMIC
1626 #define	SLAB_USER		GFP_USER
1627 #define	SLAB_KERNEL		GFP_KERNEL
1628 #define	SLAB_NFS		GFP_NFS
1629 #define	SLAB_DMA		GFP_DMA
1630 
1631 #define SLAB_LEVEL_MASK		(__GFP_WAIT|__GFP_HIGH|__GFP_IO|__GFP_HIGHIO|__GFP_FS)
1632 #define	SLAB_NO_GROW		0x00001000UL	/* don't grow a cache */
1633 
1634 /* flags to pass to kmem_cache_create().
1635  * The first 3 are only valid when the allocator as been build
1636  * SLAB_DEBUG_SUPPORT.
1637  */
1638 #define	SLAB_DEBUG_FREE		0x00000100UL	/* Perform (expensive) checks on free */
1639 #define	SLAB_DEBUG_INITIAL	0x00000200UL	/* Call constructor (as verifier) */
1640 #define	SLAB_RED_ZONE		0x00000400UL	/* Red zone objs in a cache */
1641 #define	SLAB_POISON		0x00000800UL	/* Poison objects */
1642 #define	SLAB_NO_REAP		0x00001000UL	/* never reap from the cache */
1643 #define	SLAB_HWCACHE_ALIGN	0x00002000UL	/* align objs on a h/w cache lines */
1644 #define SLAB_CACHE_DMA		0x00004000UL	/* use GFP_DMA memory */
1645 #define SLAB_MUST_HWCACHE_ALIGN	0x00008000UL	/* force alignment */
1646 
1647 /* flags passed to a constructor func */
1648 #define	SLAB_CTOR_CONSTRUCTOR	0x001UL		/* if not set, then deconstructor */
1649 #define SLAB_CTOR_ATOMIC	0x002UL		/* tell constructor it can't sleep */
1650 #define	SLAB_CTOR_VERIFY	0x004UL		/* tell constructor it's a verify call */
1651 
1652 /* prototypes */
1653 extern void kmem_cache_init(void);
1654 extern void kmem_cache_sizes_init(void);
1655 
1656 extern kmem_cache_t *kmem_find_general_cachep(size_t, int gfpflags);
1657 extern kmem_cache_t *kmem_cache_create(const char *, size_t, size_t, unsigned long,
1658 				       void (*)(void *, kmem_cache_t *, unsigned long),
1659 				       void (*)(void *, kmem_cache_t *, unsigned long));
1660 extern int kmem_cache_destroy(kmem_cache_t *);
1661 extern int kmem_cache_shrink(kmem_cache_t *);
1662 extern void *kmem_cache_alloc(kmem_cache_t *, int);
1663 extern void kmem_cache_free(kmem_cache_t *, void *);
1664 extern unsigned int kmem_cache_size(kmem_cache_t *);
1665 
1666 extern void *kmalloc(size_t, int);
1667 extern void kfree(const void *);
1668 
1669 //extern int FASTCALL(kmem_cache_reap(int));
1670 
1671 /* System wide caches */
1672 extern kmem_cache_t	*vm_area_cachep;
1673 extern kmem_cache_t	*mm_cachep;
1674 extern kmem_cache_t	*names_cachep;
1675 extern kmem_cache_t	*files_cachep;
1676 extern kmem_cache_t	*filp_cachep;
1677 extern kmem_cache_t	*dquot_cachep;
1678 extern kmem_cache_t	*bh_cachep;
1679 extern kmem_cache_t	*fs_cachep;
1680 extern kmem_cache_t	*sigact_cachep;
1681 
1682 #endif /* slab */
1683 
1684 
1685 
1686 /*
1687  *	Berkeley style UIO structures	-	Alan Cox 1994.
1688  *
1689  *		This program is free software; you can redistribute it and/or
1690  *		modify it under the terms of the GNU General Public License
1691  *		as published by the Free Software Foundation; either version
1692  *		2 of the License, or (at your option) any later version.
1693  */
1694 
1695 
1696 /* A word of warning: Our uio structure will clash with the C library one (which is now obsolete). Remove the C
1697    library one from sys/uio.h if you have a very old library set */
1698 
1699 struct iovec
1700 {
1701 	void *iov_base;		/* BSD uses caddr_t (1003.1g requires void *) */
1702 	__kernel_size_t iov_len; /* Must be size_t (1003.1g) */
1703 };
1704 
1705 /*
1706  *	UIO_MAXIOV shall be at least 16 1003.1g (5.4.1.1)
1707  */
1708 
1709 #define UIO_FASTIOV	8
1710 #define UIO_MAXIOV	1024
1711 #if 0
1712 #define UIO_MAXIOV	16	/* Maximum iovec's in one operation
1713 				   16 matches BSD */
1714                                 /* Beg pardon: BSD has 1024 --ANK */
1715 #endif
1716 
1717 
1718 
1719 /*
1720  * In Linux 2.4, static timers have been removed from the kernel.
1721  * Timers may be dynamically created and destroyed, and should be initialized
1722  * by a call to init_timer() upon creation.
1723  *
1724  * The "data" field enables use of a common timeout function for several
1725  * timeouts. You can use this field to distinguish between the different
1726  * invocations.
1727  */
1728 struct timer_list {
1729 	struct list_head list;
1730 	unsigned long expires;
1731 	unsigned long data;
1732 	void (*function)(unsigned long);
1733 };
1734 
1735 
1736 
1737 struct timeval {
1738   unsigned long tv_sec;
1739   unsigned long tv_usec;
1740 //	time_t		tv_sec;		/* seconds */
1741 //	suseconds_t	tv_usec;	/* microseconds */
1742 };
1743 
1744 
1745 
1746 
1747 
1748 
1749 
1750 #if 1 /* poll */
1751 
1752 struct file;
1753 
1754 struct poll_table_page;
1755 
1756 typedef struct poll_table_struct {
1757 	int error;
1758 	struct poll_table_page * table;
1759 } poll_table;
1760 
1761 extern void __pollwait(struct file * filp, wait_queue_head_t * wait_address, poll_table *p);
1762 
poll_wait(struct file * filp,wait_queue_head_t * wait_address,poll_table * p)1763 static inline void poll_wait(struct file * filp, wait_queue_head_t * wait_address, poll_table *p)
1764 {
1765 	if (p && wait_address)
1766 		__pollwait(filp, wait_address, p);
1767 }
1768 
poll_initwait(poll_table * pt)1769 static inline void poll_initwait(poll_table* pt)
1770 {
1771 	pt->error = 0;
1772 	pt->table = NULL;
1773 }
1774 extern void poll_freewait(poll_table* pt);
1775 
1776 
1777 /*
1778  * Scalable version of the fd_set.
1779  */
1780 
1781 typedef struct {
1782 	unsigned long *in, *out, *ex;
1783 	unsigned long *res_in, *res_out, *res_ex;
1784 } fd_set_bits;
1785 
1786 /*
1787  * How many longwords for "nr" bits?
1788  */
1789 #define FDS_BITPERLONG	(8*sizeof(long))
1790 #define FDS_LONGS(nr)	(((nr)+FDS_BITPERLONG-1)/FDS_BITPERLONG)
1791 #define FDS_BYTES(nr)	(FDS_LONGS(nr)*sizeof(long))
1792 
1793 /*
1794  * We do a VERIFY_WRITE here even though we are only reading this time:
1795  * we'll write to it eventually..
1796  *
1797  * Use "unsigned long" accesses to let user-mode fd_set's be long-aligned.
1798  */
1799 static inline
get_fd_set(unsigned long nr,void * ufdset,unsigned long * fdset)1800 int get_fd_set(unsigned long nr, void *ufdset, unsigned long *fdset)
1801 {
1802 #if 0
1803 	nr = FDS_BYTES(nr);
1804 	if (ufdset) {
1805 		int error;
1806 		error = verify_area(VERIFY_WRITE, ufdset, nr);
1807 		if (!error && __copy_from_user(fdset, ufdset, nr))
1808 			error = -EFAULT;
1809 		return error;
1810 	}
1811 	memset(fdset, 0, nr);
1812 	return 0;
1813 #else
1814 	return 0;
1815 #endif
1816 }
1817 
1818 static inline
set_fd_set(unsigned long nr,void * ufdset,unsigned long * fdset)1819 void set_fd_set(unsigned long nr, void *ufdset, unsigned long *fdset)
1820 {
1821 #if 0
1822 	if (ufdset)
1823 		__copy_to_user(ufdset, fdset, FDS_BYTES(nr));
1824 #endif
1825 }
1826 
1827 static inline
zero_fd_set(unsigned long nr,unsigned long * fdset)1828 void zero_fd_set(unsigned long nr, unsigned long *fdset)
1829 {
1830 #if 0
1831 	memset(fdset, 0, FDS_BYTES(nr));
1832 #endif
1833 }
1834 
1835 extern int do_select(int n, fd_set_bits *fds, long *timeout);
1836 
1837 #endif /* poll */
1838 
1839 
1840 
1841 typedef struct
1842 {
1843   int x;
1844 } read_descriptor_t;
1845 
1846 
1847 
1848 
1849 
1850 #if 1 /* poll */
1851 
1852 /* These are specified by iBCS2 */
1853 #define POLLIN		0x0001
1854 #define POLLPRI		0x0002
1855 #define POLLOUT		0x0004
1856 #define POLLERR		0x0008
1857 #define POLLHUP		0x0010
1858 #define POLLNVAL	0x0020
1859 
1860 /* The rest seem to be more-or-less nonstandard. Check them! */
1861 #define POLLRDNORM	0x0040
1862 #define POLLRDBAND	0x0080
1863 #define POLLWRNORM	0x0100
1864 #define POLLWRBAND	0x0200
1865 #define POLLMSG		0x0400
1866 
1867 struct pollfd {
1868 	int fd;
1869 	short events;
1870 	short revents;
1871 };
1872 
1873 #endif /* poll */
1874