1 /*
2  * COPYRIGHT:        See COPYRIGHT.TXT
3  * PROJECT:          Ext2 File System Driver for WinNT/2K/XP
4  * FILE:             Modules.h
5  * PURPOSE:          Header file: nls structures & linux kernel ...
6  * PROGRAMMER:       Matt Wu <mattwu@163.com>
7  * HOMEPAGE:         http://www.ext2fsd.com
8  * UPDATE HISTORY:
9  */
10 
11 #ifndef _EXT2_MODULE_HEADER_
12 #define _EXT2_MODULE_HEADER_
13 
14 /* INCLUDES *************************************************************/
15 
16 #include <linux/types.h>
17 #include <linux/errno.h>
18 #include <linux/rbtree.h>
19 #include <linux/fs.h>
20 #include <linux/log2.h>
21 
22 #if _WIN32_WINNT <= 0x500
23 #define _WIN2K_TARGET_ 1
24 #endif
25 
26 /* STRUCTS ******************************************************/
27 
28 #ifndef offsetof
29 # define offsetof(type, member) ((ULONG_PTR)&(((type *)0)->member))
30 #endif
31 
32 #ifndef container_of
33 #define container_of(ptr, type, member)                  \
34                 ((type *)((char *)ptr - (char *)offsetof(type, member)))
35 #endif
36 
37 //
38 // Byte order swapping routines
39 //
40 
41 /* use the runtime routine or compiler's implementation */
42 #if (defined(_M_IX86) && (_MSC_FULL_VER > 13009037)) || \
43     ((defined(_M_AMD64) || defined(_M_IA64)) &&         \
44      (_MSC_FULL_VER > 13009175))
45 #ifdef __cplusplus
46 extern "C" {
47 #endif
48     unsigned short __cdecl _byteswap_ushort(unsigned short);
49     unsigned long  __cdecl _byteswap_ulong (unsigned long);
50     unsigned __int64 __cdecl _byteswap_uint64(unsigned __int64);
51 #ifdef __cplusplus
52 }
53 #endif
54 #pragma intrinsic(_byteswap_ushort)
55 #pragma intrinsic(_byteswap_ulong)
56 #pragma intrinsic(_byteswap_uint64)
57 
58 #define RtlUshortByteSwap(_x)    _byteswap_ushort((USHORT)(_x))
59 #define RtlUlongByteSwap(_x)     _byteswap_ulong((_x))
60 #define RtlUlonglongByteSwap(_x) _byteswap_uint64((_x))
61 
62 #else
63 #ifndef __REACTOS__
64 
65 USHORT
66 FASTCALL
67 RtlUshortByteSwap(
68     IN USHORT Source
69 );
70 
71 ULONG
72 FASTCALL
73 RtlUlongByteSwap(
74     IN ULONG Source
75 );
76 
77 ULONGLONG
78 FASTCALL
79 RtlUlonglongByteSwap(
80     IN ULONGLONG Source
81 );
82 #endif //#ifndef __REACTOS__
83 #endif
84 
85 #define __swab16(x) RtlUshortByteSwap(x)
86 #define __swab32(x) RtlUlongByteSwap(x)
87 #define __swab64(x) RtlUlonglongByteSwap(x)
88 
89 #define __constant_swab32  __swab32
90 #define __constant_swab64  __swab64
91 
92 #define __constant_htonl(x) __constant_swab32((x))
93 #define __constant_ntohl(x) __constant_swab32((x))
94 #define __constant_htons(x) __constant_swab16((x))
95 #define __constant_ntohs(x) __constant_swab16((x))
96 #define __constant_cpu_to_le64(x) ((__u64)(x))
97 #define __constant_le64_to_cpu(x) ((__u64)(x))
98 #define __constant_cpu_to_le32(x) ((__u32)(x))
99 #define __constant_le32_to_cpu(x) ((__u32)(x))
100 #define __constant_cpu_to_le16(x) ((__u16)(x))
101 #define __constant_le16_to_cpu(x) ((__u16)(x))
102 #define __constant_cpu_to_be64(x) __constant_swab64((x))
103 #define __constant_be64_to_cpu(x) __constant_swab64((x))
104 #define __constant_cpu_to_be32(x) __constant_swab32((x))
105 #define __constant_be32_to_cpu(x) __constant_swab32((x))
106 #define __constant_cpu_to_be16(x) __constant_swab16((x))
107 #define __constant_be16_to_cpu(x) __constant_swab16((x))
108 #define __cpu_to_le64(x) ((__u64)(x))
109 #define __le64_to_cpu(x) ((__u64)(x))
110 #define __cpu_to_le32(x) ((__u32)(x))
111 #define __le32_to_cpu(x) ((__u32)(x))
112 #define __cpu_to_le16(x) ((__u16)(x))
113 #define __le16_to_cpu(x) ((__u16)(x))
114 #define __cpu_to_be64(x) __swab64((x))
115 #define __be64_to_cpu(x) __swab64((x))
116 #define __cpu_to_be32(x) __swab32((x))
117 #define __be32_to_cpu(x) __swab32((x))
118 #define __cpu_to_be16(x) __swab16((x))
119 #define __be16_to_cpu(x) __swab16((x))
120 #define __cpu_to_le64p(x) (*(__u64*)(x))
121 #define __le64_to_cpup(x) (*(__u64*)(x))
122 #define __cpu_to_le32p(x) (*(__u32*)(x))
123 #define __le32_to_cpup(x) (*(__u32*)(x))
124 #define __cpu_to_le16p(x) (*(__u16*)(x))
125 #define __le16_to_cpup(x) (*(__u16*)(x))
126 #define __cpu_to_be64p(x) __swab64p((x))
127 #define __be64_to_cpup(x) __swab64p((x))
128 #define __cpu_to_be32p(x) __swab32p((x))
129 #define __be32_to_cpup(x) __swab32p((x))
130 #define __cpu_to_be16p(x) __swab16p((x))
131 #define __be16_to_cpup(x) __swab16p((x))
132 #define __cpu_to_le64s(x) ((__s64)(x))
133 #define __le64_to_cpus(x) ((__s64)(x))
134 #define __cpu_to_le32s(x) ((__s32)(x))
135 #define __le32_to_cpus(x) ((__s32)(x))
136 #define __cpu_to_le16s(x) ((__s16)(x))
137 #define __le16_to_cpus(x) ((__s16)(x))
138 #define __cpu_to_be64s(x) __swab64s((x))
139 #define __be64_to_cpus(x) __swab64s((x))
140 #define __cpu_to_be32s(x) __swab32s((x))
141 #define __be32_to_cpus(x) __swab32s((x))
142 #define __cpu_to_be16s(x) __swab16s((x))
143 #define __be16_to_cpus(x) __swab16s((x))
144 
145 #ifndef cpu_to_le64
146 #define cpu_to_le64 __cpu_to_le64
147 #define le64_to_cpu __le64_to_cpu
148 #define cpu_to_le32 __cpu_to_le32
149 #define le32_to_cpu __le32_to_cpu
150 #define cpu_to_le16 __cpu_to_le16
151 #define le16_to_cpu __le16_to_cpu
152 #endif
153 
154 #define cpu_to_be64 __cpu_to_be64
155 #define be64_to_cpu __be64_to_cpu
156 #define cpu_to_be32 __cpu_to_be32
157 #define be32_to_cpu __be32_to_cpu
158 #define cpu_to_be16 __cpu_to_be16
159 #define be16_to_cpu __be16_to_cpu
160 #define cpu_to_le64p __cpu_to_le64p
161 #define le64_to_cpup __le64_to_cpup
162 #define cpu_to_le32p __cpu_to_le32p
163 #define le32_to_cpup __le32_to_cpup
164 #define cpu_to_le16p __cpu_to_le16p
165 #define le16_to_cpup __le16_to_cpup
166 #define cpu_to_be64p __cpu_to_be64p
167 #define be64_to_cpup __be64_to_cpup
168 #define cpu_to_be32p __cpu_to_be32p
169 #define be32_to_cpup __be32_to_cpup
170 #define cpu_to_be16p __cpu_to_be16p
171 #define be16_to_cpup __be16_to_cpup
172 #define cpu_to_le64s __cpu_to_le64s
173 #define le64_to_cpus __le64_to_cpus
174 #define cpu_to_le32s __cpu_to_le32s
175 #define le32_to_cpus __le32_to_cpus
176 #define cpu_to_le16s __cpu_to_le16s
177 #define le16_to_cpus __le16_to_cpus
178 #define cpu_to_be64s __cpu_to_be64s
179 #define be64_to_cpus __be64_to_cpus
180 #define cpu_to_be32s __cpu_to_be32s
181 #define be32_to_cpus __be32_to_cpus
182 #define cpu_to_be16s __cpu_to_be16s
183 #define be16_to_cpus __be16_to_cpus
184 
185 
le16_add_cpu(__le16 * var,u16 val)186 static inline void le16_add_cpu(__le16 *var, u16 val)
187 {
188 	*var = cpu_to_le16(le16_to_cpu(*var) + val);
189 }
190 
le32_add_cpu(__le32 * var,u32 val)191 static inline void le32_add_cpu(__le32 *var, u32 val)
192 {
193 	*var = cpu_to_le32(le32_to_cpu(*var) + val);
194 }
195 
le64_add_cpu(__le64 * var,u64 val)196 static inline void le64_add_cpu(__le64 *var, u64 val)
197 {
198 	*var = cpu_to_le64(le64_to_cpu(*var) + val);
199 }
200 
201 //
202 // Network to host byte swap functions
203 //
204 
205 #define ntohl(x)           ( ( ( ( x ) & 0x000000ff ) << 24 ) | \
206                              ( ( ( x ) & 0x0000ff00 ) << 8 ) | \
207                              ( ( ( x ) & 0x00ff0000 ) >> 8 ) | \
208                              ( ( ( x ) & 0xff000000 ) >> 24 )   )
209 
210 #define ntohs(x)           ( ( ( ( x ) & 0xff00 ) >> 8 ) | \
211                              ( ( ( x ) & 0x00ff ) << 8 ) )
212 
213 
214 #define htonl(x)           ntohl(x)
215 #define htons(x)           ntohs(x)
216 
217 
218 //
219 // kernel printk flags
220 //
221 
222 #define KERN_EMERG      "<0>"   /* system is unusable                   */
223 #define KERN_ALERT      "<1>"   /* action must be taken immediately     */
224 #define KERN_CRIT       "<2>"   /* critical conditions                  */
225 #define KERN_ERR        "<3>"   /* error conditions                     */
226 #define KERN_WARNING    "<4>"   /* warning conditions                   */
227 #define KERN_NOTICE     "<5>"   /* normal but significant condition     */
228 #define KERN_INFO       "<6>"   /* informational                        */
229 #define KERN_DEBUG      "<7>"   /* debug-level messages                 */
230 
231 #define printk  DbgPrint
232 
233 /*
234  * error pointer
235  */
236 #define MAX_ERRNO	4095
237 #define IS_ERR_VALUE(x) ((x) >= (unsigned long)-MAX_ERRNO)
238 
ERR_PTR(long error)239 static inline void *ERR_PTR(long error)
240 {
241 	return (void *)(long_ptr_t) error;
242 }
243 
PTR_ERR(const void * ptr)244 static inline long PTR_ERR(const void *ptr)
245 {
246 	return (long)(long_ptr_t) ptr;
247 }
248 
IS_ERR(const void * ptr)249 static inline long IS_ERR(const void *ptr)
250 {
251 	return IS_ERR_VALUE((unsigned long)(long_ptr_t)ptr);
252 }
253 
254 
255 #define BUG_ON(c) assert(!(c))
256 
257 #define WARN_ON(c) BUG_ON(c)
258 
259 //
260 // Linux module definitions
261 //
262 
263 #define likely
264 #define unlikely
265 
266 #define __init
267 #define __exit
268 
269 #define THIS_MODULE NULL
270 #define MODULE_LICENSE(x)
271 #define MODULE_ALIAS_NLS(x)
272 #define EXPORT_SYMBOL(x)
273 
274 
275 #define try_module_get(x) (TRUE)
276 #define module_put(x)
277 
278 #define module_init(X) int  __init module_##X() {return X();}
279 #define module_exit(X) void __exit module_##X() {X();}
280 
281 #define DECLARE_INIT(X) int  __init  module_##X(void)
282 #define DECLARE_EXIT(X) void __exit  module_##X(void)
283 
284 #define LOAD_MODULE(X) do {                             \
285                             rc = module_##X();          \
286                        } while(0)
287 
288 #define UNLOAD_MODULE(X) do {                           \
289                             module_##X();               \
290                          } while(0)
291 
292 #define LOAD_NLS    LOAD_MODULE
293 #define UNLOAD_NLS  UNLOAD_MODULE
294 
295 //
296 // spinlocks .....
297 //
298 
299 typedef struct _spinlock_t {
300 
301     KSPIN_LOCK  lock;
302     KIRQL       irql;
303 } spinlock_t;
304 
305 #define spin_lock_init(sl)    KeInitializeSpinLock(&((sl)->lock))
306 #define spin_lock(sl)         KeAcquireSpinLock(&((sl)->lock), &((sl)->irql))
307 #define spin_unlock(sl)       KeReleaseSpinLock(&((sl)->lock), (sl)->irql)
308 #define spin_lock_irqsave(sl, flags) do {spin_lock(sl); flags=(sl)->irql;} while(0)
309 #define spin_unlock_irqrestore(sl, flags) do {ASSERT((KIRQL)(flags)==(sl)->irql); spin_unlock(sl);} while(0)
310 
311 #define assert_spin_locked(x)   do {} while(0)
312 
313 /*
314  * Does a critical section need to be broken due to another
315  * task waiting?: (technically does not depend on CONFIG_PREEMPT,
316  * but a general need for low latency)
317  */
spin_needbreak(spinlock_t * lock)318 static inline int spin_needbreak(spinlock_t *lock)
319 {
320 #ifdef CONFIG_PREEMPT
321     return spin_is_contended(lock);
322 #else
323     return 0;
324 #endif
325 }
326 
327 //
328 // bit operations
329 //
330 
331 /**
332  * __set_bit - Set a bit in memory
333  * @nr: the bit to set
334  * @addr: the address to start counting from
335  *
336  * Unlike set_bit(), this function is non-atomic and may be reordered.
337  * If it's called on the same region of memory simultaneously, the effect
338  * may be that only one operation succeeds.
339  */
set_bit(int nr,volatile unsigned long * addr)340 static inline int set_bit(int nr, volatile unsigned long *addr)
341 {
342     addr += (nr >> ORDER_PER_LONG);
343     nr &= (BITS_PER_LONG - 1);
344 
345     return !!(InterlockedOr(addr, (1 << nr)) & (1 << nr));
346 }
347 
348 
349 /**
350  * clear_bit - Clears a bit in memory
351  * @nr: Bit to clear
352  * @addr: Address to start counting from
353  *
354  * clear_bit() is atomic and may not be reordered.  However, it does
355  * not contain a memory barrier, so if it is used for locking purposes,
356  * you should call smp_mb__before_clear_bit() and/or smp_mb__after_clear_bit()
357  * in order to ensure changes are visible on other processors.
358  */
clear_bit(int nr,volatile unsigned long * addr)359 static inline int clear_bit(int nr, volatile unsigned long *addr)
360 {
361     addr += (nr >> ORDER_PER_LONG);
362     nr &= (BITS_PER_LONG - 1);
363 
364     return !!(InterlockedAnd(addr, ~(1 << nr)) & (1 << nr));
365 }
366 
367 /**
368  * test_and_clear_bit - Clear a bit and return its old value
369  * @nr: Bit to clear
370  * @addr: Address to count from
371  *
372  * This operation is atomic and cannot be reordered.
373  * It also implies a memory barrier.
374  */
test_and_clear_bit(int nr,volatile unsigned long * addr)375 static inline int test_and_clear_bit(int nr, volatile unsigned long *addr)
376 {
377     return clear_bit(nr, addr);
378 }
379 
380 /*
381  *  test
382  */
test_bit(int nr,volatile const unsigned long * addr)383 static int test_bit(int nr, volatile const unsigned long *addr)
384 {
385     return !!((1 << (nr & (BITS_PER_LONG - 1))) &
386               (addr[nr >> ORDER_PER_LONG]));
387 }
388 
389 /**
390  * test_and_set_bit - Set a bit and return its old value
391  * @nr: Bit to set
392  * @addr: Address to count from
393  *
394  * This operation is atomic and cannot be reordered.
395  * It also implies a memory barrier.
396  */
test_and_set_bit(int nr,volatile unsigned long * addr)397 static inline int test_and_set_bit(int nr, volatile unsigned long *addr)
398 {
399     return set_bit(nr, addr);
400 }
401 
402 //
403 // list definition ...
404 //
405 
406 #include <linux/list.h>
407 
408 
409 /*********************************************
410  *  linux scheduler related structures      *
411 *********************************************/
412 
413 //
414 // task structure
415 //
416 
417 #define TASK_INTERRUPTIBLE      1
418 #define TASK_UNINTERRUPTIBLE    2
419 
420 struct task_struct {
421     pid_t pid;
422     pid_t tid;
423     char comm[32];
424     void * journal_info;
425 };
426 
427 extern struct task_struct *current;
428 
429 //
430 // scheduler routines
431 //
432 
433 
434 #ifdef __REACTOS__
cond_resched()435 static inline int cond_resched() {
436 #else
437 static inline cond_resched() {
438 #endif
439     return FALSE;
440 }
441 #ifdef __REACTOS__
442 static inline int need_resched() {
443 #else
444 static inline need_resched() {
445 #endif
446     return FALSE;
447 }
448 
449 #define yield()        do {} while(0)
450 #define might_sleep()  do {} while(0)
451 
452 //
453 // mutex
454 //
455 
456 typedef struct mutex {
457     FAST_MUTEX  lock;
458 } mutex_t;
459 
460 #define mutex_init(x)   ExInitializeFastMutex(&((x)->lock))
461 #define mutex_lock(x)   ExAcquireFastMutex(&((x)->lock))
462 #define mutex_unlock(x) ExReleaseFastMutex(&((x)->lock))
463 
464 
465 //
466 // wait_queue
467 //
468 
469 
470 typedef PVOID wait_queue_t;
471 
472 #define WQ_FLAG_EXCLUSIVE	    0x01
473 #define WQ_FLAG_AUTO_REMOVAL	0x02
474 
475 struct __wait_queue {
476     unsigned int    flags;
477     void *          private;
478     KEVENT          event;
479     struct list_head task_list;
480 };
481 
482 
483 #define DEFINE_WAIT(name) \
484 	wait_queue_t name = (PVOID)wait_queue_create();
485 
486 /*
487 struct wait_bit_key {
488 	void *flags;
489 	int bit_nr;
490 };
491 
492 struct wait_bit_queue {
493 	struct wait_bit_key key;
494 	wait_queue_t wait;
495 };
496 */
497 
498 struct __wait_queue_head {
499     spinlock_t lock;
500     struct list_head task_list;
501 };
502 typedef struct __wait_queue_head wait_queue_head_t;
503 
504 #define is_sync_wait(wait)  (TRUE)
505 #define set_current_state(state) do {} while(0)
506 #define __set_current_state(state)  do {} while(0)
507 
508 void init_waitqueue_head(wait_queue_head_t *q);
509 int wake_up(wait_queue_head_t *queue);
510 
511 
512 /*
513  * Waitqueues which are removed from the waitqueue_head at wakeup time
514  */
515 struct __wait_queue * wait_queue_create();
516 void wait_queue_destroy(struct __wait_queue *);
517 
518 void prepare_to_wait(wait_queue_head_t *q, wait_queue_t *wait, int state);
519 void prepare_to_wait_exclusive(wait_queue_head_t *q, wait_queue_t *wait, int state);
520 void finish_wait(wait_queue_head_t *q, wait_queue_t *wait);
521 int autoremove_wake_function(wait_queue_t *wait, unsigned mode, int sync, void *key);
522 int wake_bit_function(wait_queue_t *wait, unsigned mode, int sync, void *key);
523 
524 
525 //
526 // timer structure
527 //
528 
529 struct timer_list {
530     struct list_head entry;
531     unsigned long expires;
532 
533     void (*function)(unsigned long);
534     unsigned long data;
535 
536 #ifdef CONFIG_TIMER_STATS
537     void *start_site;
538     char start_comm[16];
539     int start_pid;
540 #endif
541 };
542 
543 
544 typedef struct kmem_cache kmem_cache_t;
545 
546 struct block_device {
547 
548     unsigned long           bd_flags;   /* flags */
549     atomic_t		        bd_count;   /* reference count */
550     PDEVICE_OBJECT          bd_dev;     /* device object */
551     ANSI_STRING             bd_name;    /* name in ansi string */
552     DISK_GEOMETRY           bd_geo;     /* disk geometry */
553     PARTITION_INFORMATION   bd_part;    /* partition information */
554     void *                  bd_priv;    /* pointers to EXT2_VCB
555                                            NULL if it's a journal dev */
556     PFILE_OBJECT            bd_volume;  /* streaming object file */
557     LARGE_MCB               bd_extents; /* dirty extents */
558 
559     kmem_cache_t *          bd_bh_cache;/* memory cache for buffer_head */
560     ERESOURCE               bd_bh_lock; /* lock for bh tree and reaper list */
561     struct rb_root          bd_bh_root; /* buffer_head red-black tree root */
562     LIST_ENTRY              bd_bh_free; /* reaper list */
563     KEVENT                  bd_bh_notify; /* notification event for cleanup */
564 };
565 
566 //
567 // page information
568 //
569 
570 // vom trata paginile in felul urmator:
571 // alocam la sfarsitul structurii inca PAGE_SIZE octeti cand alocam o structura
572 // de tip pagina - acolo vor veni toate buffer-headurile
573 // deci -> page_address(page) = page + sizeof(page)
574 #define page_address(_page) ((char*)_page + sizeof(struct page))
575 
576 typedef struct page {
577     void           *addr;
578     void           *mapping;
579     void           *private;
580     atomic_t        count;
581     __u32           index;
582     __u32           flags;
583 } mem_map_t;
584 
585 #define get_page(p) atomic_inc(&(p)->count)
586 
587 #define PG_locked		 0	/* Page is locked. Don't touch. */
588 #define PG_error		 1
589 #define PG_referenced		 2
590 #define PG_uptodate		 3
591 #define PG_dirty		 4
592 #define PG_unused		 5
593 #define PG_lru			 6
594 #define PG_active		 7
595 #define PG_slab			 8
596 #define PG_skip			10
597 #define PG_highmem		11
598 #define PG_checked		12	/* kill me in 2.5.<early>. */
599 #define PG_arch_1		13
600 #define PG_reserved		14
601 #define PG_launder		15	/* written out by VM pressure.. */
602 #define PG_fs_1			16	/* Filesystem specific */
603 
604 #ifndef arch_set_page_uptodate
605 #define arch_set_page_uptodate(page)
606 #endif
607 
608 /* Make it prettier to test the above... */
609 #define UnlockPage(page)        unlock_page(page)
610 #define Page_Uptodate(page)     test_bit(PG_uptodate, &(page)->flags)
611 #define SetPageUptodate(page) \
612 	do {								\
613 		arch_set_page_uptodate(page);				\
614 		set_bit(PG_uptodate, &(page)->flags);			\
615 	} while (0)
616 #define ClearPageUptodate(page) clear_bit(PG_uptodate, &(page)->flags)
617 #define PageDirty(page)         test_bit(PG_dirty, &(page)->flags)
618 #define SetPageDirty(page)      set_bit(PG_dirty, &(page)->flags)
619 #define ClearPageDirty(page)    clear_bit(PG_dirty, &(page)->flags)
620 #define PageLocked(page)        test_bit(PG_locked, &(page)->flags)
621 #define LockPage(page)          set_bit(PG_locked, &(page)->flags)
622 #define TryLockPage(page)       test_and_set_bit(PG_locked, &(page)->flags)
623 #define PageChecked(page)       test_bit(PG_checked, &(page)->flags)
624 #define SetPageChecked(page)    set_bit(PG_checked, &(page)->flags)
625 #define ClearPageChecked(page)  clear_bit(PG_checked, &(page)->flags)
626 #define PageLaunder(page)       test_bit(PG_launder, &(page)->flags)
627 #define SetPageLaunder(page)    set_bit(PG_launder, &(page)->flags)
628 #define ClearPageLaunder(page)  clear_bit(PG_launder, &(page)->flags)
629 #define ClearPageArch1(page)    clear_bit(PG_arch_1, &(page)->flags)
630 
631 #define PageError(page)		test_bit(PG_error, &(page)->flags)
632 #define SetPageError(page)	set_bit(PG_error, &(page)->flags)
633 #define ClearPageError(page)	clear_bit(PG_error, &(page)->flags)
634 #define PageReferenced(page)    test_bit(PG_referenced, &(page)->flags)
635 #define SetPageReferenced(page) set_bit(PG_referenced, &(page)->flags)
636 #define ClearPageReferenced(page)       clear_bit(PG_referenced, &(page)->flags)
637 
638 #define PageActive(page)        test_bit(PG_active, &(page)->flags)
639 #define SetPageActive(page)     set_bit(PG_active, &(page)->flags)
640 #define ClearPageActive(page)   clear_bit(PG_active, &(page)->flags)
641 
642 
643 extern unsigned long __get_free_pages(unsigned int gfp_mask, unsigned int order);
644 #define __get_free_page(gfp_mask) \
645 		__get_free_pages((gfp_mask),0)
646 
647 extern void __free_pages(struct page *page, unsigned int order);
648 extern void free_pages(unsigned long addr, unsigned int order);
649 
650 #define __free_page(page) __free_pages((page), 0)
651 #define free_page(addr) free_pages((addr),0)
652 
653 #ifndef __REACTOS__
654 extern void truncate_inode_pages(struct address_space *, loff_t);
655 #endif
656 
657 #define __GFP_HIGHMEM   0x02
658 
659 #define __GFP_WAIT	0x10	/* Can wait and reschedule? */
660 #define __GFP_HIGH	0x20	/* Should access emergency pools? */
661 #define __GFP_IO	0x40	/* Can start low memory physical IO? */
662 #define __GFP_HIGHIO	0x80	/* Can start high mem physical IO? */
663 #define __GFP_FS	0x100	/* Can call down to low-level FS? */
664 
665 #define GFP_ATOMIC	(__GFP_HIGH)
666 #define GFP_USER	(             __GFP_WAIT | __GFP_IO | __GFP_HIGHIO | __GFP_FS)
667 #define GFP_HIGHUSER    (             __GFP_WAIT | __GFP_IO | __GFP_HIGHIO | __GFP_FS | __GFP_HIGHMEM)
668 #define GFP_KERNEL	(__GFP_HIGH | __GFP_WAIT | __GFP_IO | __GFP_HIGHIO | __GFP_FS)
669 #define GFP_NOFS    0
670 #define __GFP_NOFAIL 0
671 
672 
673 #define KM_USER0 0
674 
675 //
676 // buffer head definitions
677 //
678 
679 enum bh_state_bits {
680     BH_Uptodate,	        /* Contains valid data */
681     BH_Dirty,	            /* Is dirty */
682     BH_Verified,	 /* Is verified */
683     BH_Lock,	            /* Is locked */
684     BH_Req,		            /* Has been submitted for I/O */
685     BH_Uptodate_Lock,       /* Used by the first bh in a page, to serialise
686 			                 * IO completion of other buffers in the page
687 			                 */
688 
689     BH_Mapped,	            /* Has a disk mapping */
690     BH_New,		            /* Disk mapping was newly created by get_block */
691     BH_Async_Read,	        /* Is under end_buffer_async_read I/O */
692     BH_Async_Write,	        /* Is under end_buffer_async_write I/O */
693     BH_Delay,	            /* Buffer is not yet allocated on disk */
694     BH_Boundary,	        /* Block is followed by a discontiguity */
695     BH_Write_EIO,	        /* I/O error on write */
696     BH_Ordered,	            /* ordered write */
697     BH_Eopnotsupp,	        /* operation not supported (barrier) */
698     BH_Unwritten,	        /* Buffer is allocated on disk but not written */
699 
700     BH_PrivateStart,        /* not a state bit, but the first bit available
701 			                 * for private allocation by other entities
702 			                 */
703 };
704 
705 #define PAGE_CACHE_SIZE  (PAGE_SIZE)
706 #define PAGE_CACHE_SHIFT (12)
707 #define MAX_BUF_PER_PAGE (PAGE_CACHE_SIZE / 512)
708 
709 #ifdef __REACTOS__
710 struct buffer_head;
711 #endif
712 typedef void (bh_end_io_t)(struct buffer_head *bh, int uptodate);
713 
714 /*
715  * Historically, a buffer_head was used to map a single block
716  * within a page, and of course as the unit of I/O through the
717  * filesystem and block layers.  Nowadays the basic I/O unit
718  * is the bio, and buffer_heads are used for extracting block
719  * mappings (via a get_block_t call), for tracking state within
720  * a page (via a page_mapping) and for wrapping bio submission
721  * for backward compatibility reasons (e.g. submit_bh).
722  */
723 struct buffer_head {
724     LIST_ENTRY    b_link;                   /* to be added to reaper list */
725     unsigned long b_state;		            /* buffer state bitmap (see above) */
726     struct page *b_page;                    /* the page this bh is mapped to */
727     PMDL         b_mdl;                     /* MDL of the locked buffer */
728     void	    *b_bcb;                     /* BCB of the buffer */
729 
730     // kdev_t b_dev;                        /* device (B_FREE = free) */
731     struct block_device *b_bdev;            /* block device object */
732 
733     blkcnt_t b_blocknr;		        /* start block number */
734     size_t        b_size;			        /* size of mapping */
735     char *        b_data;			        /* pointer to data within the page */
736     bh_end_io_t *b_end_io;		        /* I/O completion */
737     void *b_private;		                /* reserved for b_end_io */
738     // struct list_head b_assoc_buffers;    /* associated with another mapping */
739     // struct address_space *b_assoc_map;   /* mapping this buffer is associated with */
740     atomic_t b_count;		                /* users using this buffer_head */
741     struct rb_node b_rb_node;               /* Red-black tree node entry */
742 
743     LARGE_INTEGER  b_ts_creat;              /* creation time*/
744     LARGE_INTEGER  b_ts_drop;               /* drop time (to be released) */
745 };
746 
747 
748 /*
749  * macro tricks to expand the set_buffer_foo(), clear_buffer_foo()
750  * and buffer_foo() functions.
751  */
752 #define BUFFER_FNS(bit, name)						\
753 static inline void set_buffer_##name(struct buffer_head *bh)		\
754 {									\
755 	set_bit(BH_##bit, &(bh)->b_state);				\
756 }									\
757 static inline void clear_buffer_##name(struct buffer_head *bh)		\
758 {									\
759 	clear_bit(BH_##bit, &(bh)->b_state);				\
760 }									\
761 static inline int buffer_##name(const struct buffer_head *bh)		\
762 {									\
763 	return test_bit(BH_##bit, &(bh)->b_state);			\
764 }
765 
766 /*
767  * test_set_buffer_foo() and test_clear_buffer_foo()
768  */
769 #define TAS_BUFFER_FNS(bit, name)					\
770 static inline int test_set_buffer_##name(struct buffer_head *bh)	\
771 {									\
772 	return test_and_set_bit(BH_##bit, &(bh)->b_state);		\
773 }									\
774 static inline int test_clear_buffer_##name(struct buffer_head *bh)	\
775 {									\
776 	return test_and_clear_bit(BH_##bit, &(bh)->b_state);		\
777 }									\
778 
779 /*
780  * Emit the buffer bitops functions.   Note that there are also functions
781  * of the form "mark_buffer_foo()".  These are higher-level functions which
782  * do something in addition to setting a b_state bit.
783  */
784 BUFFER_FNS(Uptodate, uptodate)
785 BUFFER_FNS(Dirty, dirty)
786 TAS_BUFFER_FNS(Dirty, dirty)
787 BUFFER_FNS(Verified, verified)
788 BUFFER_FNS(Lock, locked)
789 TAS_BUFFER_FNS(Lock, locked)
790 BUFFER_FNS(Req, req)
791 TAS_BUFFER_FNS(Req, req)
792 BUFFER_FNS(Mapped, mapped)
793 BUFFER_FNS(New, new)
794 BUFFER_FNS(Async_Read, async_read)
795 BUFFER_FNS(Async_Write, async_write)
796 BUFFER_FNS(Delay, delay)
797 BUFFER_FNS(Boundary, boundary)
798 BUFFER_FNS(Write_EIO, write_io_error)
799 BUFFER_FNS(Ordered, ordered)
800 BUFFER_FNS(Eopnotsupp, eopnotsupp)
801 BUFFER_FNS(Unwritten, unwritten)
802 
803 #define bh_offset(bh)		((unsigned long)(bh)->b_data & ~PAGE_MASK)
804 #define touch_buffer(bh)	mark_page_accessed(bh->b_page)
805 
806 /* If we *know* page->private refers to buffer_heads */
807 
808 #define page_buffers(page)					\
809 	(                                       \
810 		BUG_ON(!PagePrivate(page)),			\
811 		((struct buffer_head *)page_private(page))	\
812 	)
813 #define page_has_buffers(page)	PagePrivate(page)
814 
815 
816 /*
817  * Declarations
818  */
819 
820 void mark_buffer_dirty(struct buffer_head *bh);
821 void init_buffer(struct buffer_head *, bh_end_io_t *, void *);
822 void set_bh_page(struct buffer_head *bh,
823                  struct page *page, unsigned long offset);
824 int try_to_free_buffers(struct page *);
825 struct buffer_head *alloc_page_buffers(struct page *page, unsigned long size,
826                                                    int retry);
827 void create_empty_buffers(struct page *, unsigned long,
828                           unsigned long b_state);
829 
830 /* Things to do with buffers at mapping->private_list */
831 void mark_buffer_dirty_inode(struct buffer_head *bh, struct inode *inode);
832 int inode_has_buffers(struct inode *);
833 void invalidate_inode_buffers(struct inode *);
834 int remove_inode_buffers(struct inode *inode);
835 #ifndef __REACTOS__
836 int sync_mapping_buffers(struct address_space *mapping);
837 #endif
838 void unmap_underlying_metadata(struct block_device *bdev, sector_t block);
839 
840 void mark_buffer_async_write(struct buffer_head *bh);
841 void invalidate_bdev(struct block_device *);
842 int sync_blockdev(struct block_device *bdev);
843 void __wait_on_buffer(struct buffer_head *);
844 wait_queue_head_t *bh_waitq_head(struct buffer_head *bh);
845 int fsync_bdev(struct block_device *);
846 struct super_block *freeze_bdev(struct block_device *);
847 void thaw_bdev(struct block_device *, struct super_block *);
848 int fsync_super(struct super_block *);
849 int fsync_no_super(struct block_device *);
850 struct buffer_head *__find_get_block(struct block_device *bdev, sector_t block,
851                                                  unsigned long size);
852 struct buffer_head *get_block_bh(struct block_device *bdev, sector_t block,
853                                  unsigned long size, int zero);
854 struct buffer_head *__getblk(struct block_device *bdev, sector_t block,
855                                          unsigned long size);
856 void __brelse(struct buffer_head *);
857 void __bforget(struct buffer_head *);
858 void __breadahead(struct block_device *, sector_t block, unsigned int size);
859 struct buffer_head *__bread(struct block_device *, sector_t block, unsigned size);
860 void invalidate_bh_lrus(void);
861 struct buffer_head *alloc_buffer_head(gfp_t gfp_flags);
862 void free_buffer_head(struct buffer_head * bh);
863 void unlock_buffer(struct buffer_head *bh);
864 void __lock_buffer(struct buffer_head *bh);
865 void ll_rw_block(int, int, struct buffer_head * bh[]);
866 int sync_dirty_buffer(struct buffer_head *bh);
867 int submit_bh(int, struct buffer_head *);
868 void write_boundary_block(struct block_device *bdev,
869                           sector_t bblock, unsigned blocksize);
870 int bh_uptodate_or_lock(struct buffer_head *bh);
871 int bh_submit_read(struct buffer_head *bh);
872 /* They are separately managed  */
873 struct buffer_head *extents_bread(struct super_block *sb, sector_t block);
874 struct buffer_head *extents_bwrite(struct super_block *sb, sector_t block);
875 void extents_mark_buffer_dirty(struct buffer_head *bh);
876 void extents_brelse(struct buffer_head *bh);
877 void extents_bforget(struct buffer_head *bh);
878 void buffer_head_remove(struct block_device *bdev, struct buffer_head *bh);
879 
880 extern int buffer_heads_over_limit;
881 
882 /*
883  * Generic address_space_operations implementations for buffer_head-backed
884  * address_spaces.
885  */
886 
887 #if 0
888 
889 int block_write_full_page(struct page *page, get_block_t *get_block,
890                           struct writeback_control *wbc);
891 int block_read_full_page(struct page*, get_block_t*);
892 int block_write_begin(struct file *, struct address_space *,
893                       loff_t, unsigned, unsigned,
894                       struct page **, void **, get_block_t*);
895 int block_write_end(struct file *, struct address_space *,
896                     loff_t, unsigned, unsigned,
897                     struct page *, void *);
898 int generic_write_end(struct file *, struct address_space *,
899                       loff_t, unsigned, unsigned,
900                       struct page *, void *);
901 
902 int block_prepare_write(struct page*, unsigned, unsigned, get_block_t*);
903 int cont_write_begin(struct file *, struct address_space *, loff_t,
904                      unsigned, unsigned, struct page **, void **,
905                      get_block_t *, loff_t *);
906 int block_page_mkwrite(struct vm_area_struct *vma, struct page *page,
907                        get_block_t get_block);
908 sector_t generic_block_bmap(struct address_space *, sector_t, get_block_t *);
909 int generic_commit_write(struct file *, struct page *, unsigned, unsigned);
910 int block_truncate_page(struct address_space *, loff_t, get_block_t *);
911 int file_fsync(struct file *, struct dentry *, int);
912 int nobh_write_begin(struct file *, struct address_space *,
913                      loff_t, unsigned, unsigned,
914                      struct page **, void **, get_block_t*);
915 int nobh_write_end(struct file *, struct address_space *,
916                    loff_t, unsigned, unsigned,
917                    struct page *, void *);
918 int nobh_truncate_page(struct address_space *, loff_t, get_block_t *);
919 int nobh_writepage(struct page *page, get_block_t *get_block,
920                    struct writeback_control *wbc);
921 int generic_cont_expand_simple(struct inode *inode, loff_t size);
922 #endif
923 
924 void block_invalidatepage(struct page *page, unsigned long offset);
925 void page_zero_new_buffers(struct page *page, unsigned from, unsigned to);
926 int  block_commit_write(struct page *page, unsigned from, unsigned to);
927 void block_sync_page(struct page *);
928 
929 void buffer_init(void);
930 
931 /*
932  * inline definitions
933  */
934 #if 0
935 static inline void attach_page_buffers(struct page *page,
936                                        struct buffer_head *head)
937 {
938     page_cache_get(page);
939     SetPagePrivate(page);
940     set_page_private(page, (unsigned long)head);
941 }
942 #endif
943 
944 static inline void get_bh(struct buffer_head *bh)
945 {
946     atomic_inc(&bh->b_count);
947 }
948 
949 static inline void put_bh(struct buffer_head *bh)
950 {
951     if (bh)
952         __brelse(bh);
953 }
954 
955 static inline void brelse(struct buffer_head *bh)
956 {
957     if (bh)
958         __brelse(bh);
959 }
960 
961 static inline void fini_bh(struct buffer_head **bh)
962 {
963     if (bh && *bh) {
964         brelse(*bh);
965         *bh = NULL;
966     }
967 }
968 
969 static inline void bforget(struct buffer_head *bh)
970 {
971     if (bh)
972         __bforget(bh);
973 }
974 
975 static inline struct buffer_head *
976             sb_getblk(struct super_block *sb, sector_t block)
977 {
978     return get_block_bh(sb->s_bdev, block, sb->s_blocksize, 0);
979 }
980 
981 static inline struct buffer_head *
982             sb_getblk_zero(struct super_block *sb, sector_t block)
983 {
984     return get_block_bh(sb->s_bdev, block, sb->s_blocksize, 1);
985 }
986 
987 static inline struct buffer_head *
988             sb_bread(struct super_block *sb, sector_t block)
989 {
990     struct buffer_head *bh = __getblk(sb->s_bdev, block, sb->s_blocksize);
991     if (!bh)
992 	    return NULL;
993     if (!buffer_uptodate(bh) && (bh_submit_read(bh) < 0)) {
994         brelse(bh);
995 	return NULL;
996     }
997     return bh;
998 }
999 
1000 static inline struct buffer_head *
1001             sb_find_get_block(struct super_block *sb, sector_t block)
1002 {
1003     return __find_get_block(sb->s_bdev, block, sb->s_blocksize);
1004 }
1005 
1006 static inline void
1007 map_bh(struct buffer_head *bh, struct super_block *sb, sector_t block)
1008 {
1009     set_buffer_mapped(bh);
1010     bh->b_bdev = sb->s_bdev;
1011     bh->b_blocknr = block;
1012     bh->b_size = sb->s_blocksize;
1013 }
1014 
1015 /*
1016  * Calling wait_on_buffer() for a zero-ref buffer is illegal, so we call into
1017  * __wait_on_buffer() just to trip a debug check.  Because debug code in inline
1018  * functions is bloaty.
1019  */
1020 
1021 static inline void wait_on_buffer(struct buffer_head *bh)
1022 {
1023     might_sleep();
1024     if (buffer_locked(bh) || atomic_read(&bh->b_count) == 0)
1025         __wait_on_buffer(bh);
1026 }
1027 
1028 static inline void lock_buffer(struct buffer_head *bh)
1029 {
1030     might_sleep();
1031     if (test_set_buffer_locked(bh))
1032         __lock_buffer(bh);
1033 }
1034 
1035 extern int __set_page_dirty_buffers(struct page *page);
1036 
1037 //
1038 // unicode character
1039 //
1040 
1041 struct nls_table {
1042     char *charset;
1043     char *alias;
1044     int (*uni2char) (wchar_t uni, unsigned char *out, int boundlen);
1045     int (*char2uni) (const unsigned char *rawstring, int boundlen,
1046                      wchar_t *uni);
1047     unsigned char *charset2lower;
1048     unsigned char *charset2upper;
1049     struct module *owner;
1050     struct nls_table *next;
1051 };
1052 
1053 /* this value hold the maximum octet of charset */
1054 #define NLS_MAX_CHARSET_SIZE 6 /* for UTF-8 */
1055 
1056 /* nls.c */
1057 extern int register_nls(struct nls_table *);
1058 extern int unregister_nls(struct nls_table *);
1059 extern struct nls_table *load_nls(char *);
1060 extern void unload_nls(struct nls_table *);
1061 extern struct nls_table *load_nls_default(void);
1062 
1063 extern int utf8_mbtowc(wchar_t *, const __u8 *, int);
1064 extern int utf8_mbstowcs(wchar_t *, const __u8 *, int);
1065 extern int utf8_wctomb(__u8 *, wchar_t, int);
1066 extern int utf8_wcstombs(__u8 *, const wchar_t *, int);
1067 
1068 //
1069 //  kernel jiffies
1070 //
1071 
1072 #define HZ  (100)
1073 
1074 static inline __u32 JIFFIES()
1075 {
1076     LARGE_INTEGER Tick;
1077 
1078     KeQueryTickCount(&Tick);
1079     Tick.QuadPart *= KeQueryTimeIncrement();
1080     Tick.QuadPart /= (10000000 / HZ);
1081 
1082     return Tick.LowPart;
1083 }
1084 
1085 #define jiffies JIFFIES()
1086 
1087 //
1088 // memory routines
1089 //
1090 
1091 #ifdef _WIN2K_TARGET_
1092 
1093 typedef GUID UUID;
1094 NTKERNELAPI
1095 NTSTATUS
1096 ExUuidCreate(
1097     OUT UUID *Uuid
1098 );
1099 
1100 NTKERNELAPI
1101 PVOID
1102 NTAPI
1103 ExAllocatePoolWithTag(
1104     IN POOL_TYPE PoolType,
1105     IN SIZE_T NumberOfBytes,
1106     IN ULONG Tag
1107 );
1108 
1109 #define  ExFreePoolWithTag(_P, _T) ExFreePool(_P)
1110 #endif
1111 
1112 PVOID Ext2AllocatePool(
1113     IN POOL_TYPE PoolType,
1114     IN SIZE_T NumberOfBytes,
1115     IN ULONG Tag
1116 );
1117 
1118 VOID
1119 Ext2FreePool(
1120     IN PVOID P,
1121     IN ULONG Tag
1122 );
1123 
1124 void *kzalloc(int size, int flags);
1125 #define kmalloc(size, gfp) Ext2AllocatePool(NonPagedPool, size, 'JBDM')
1126 #define kfree(p) Ext2FreePool(p, 'JBDM')
1127 
1128 
1129 /* memory slab */
1130 
1131 #define	SLAB_HWCACHE_ALIGN	0x00002000U	/* align objs on a h/w cache lines */
1132 #define SLAB_KERNEL         0x00000001U
1133 #define SLAB_TEMPORARY      0x00000002U
1134 
1135 typedef void (*kmem_cache_cb_t)(void*, kmem_cache_t *, unsigned long);
1136 
1137 struct kmem_cache {
1138     CHAR                    name[32];
1139     ULONG                   flags;
1140     ULONG                   size;
1141     atomic_t                count;
1142     atomic_t                acount;
1143     NPAGED_LOOKASIDE_LIST   la;
1144     kmem_cache_cb_t         constructor;
1145 };
1146 
1147 
1148 kmem_cache_t *
1149 kmem_cache_create(
1150     const char *name,
1151     size_t size,
1152     size_t offset,
1153     unsigned long flags,
1154     kmem_cache_cb_t ctor
1155 );
1156 
1157 void* kmem_cache_alloc(kmem_cache_t *kc, int flags);
1158 void  kmem_cache_free(kmem_cache_t *kc, void *p);
1159 int   kmem_cache_destroy(kmem_cache_t *kc);
1160 
1161 
1162 //
1163 // block device
1164 //
1165 
1166 #define BDEVNAME_SIZE      32      /* Largest string for a blockdev identifier */
1167 
1168 //
1169 // ll_rw_block ....
1170 //
1171 
1172 
1173 #define RW_MASK         1
1174 #define RWA_MASK        2
1175 #define READ 0
1176 #define WRITE 1
1177 #define READA 2         /* read-ahead  - don't block if no resources */
1178 #define SWRITE 3        /* for ll_rw_block() - wait for buffer lock */
1179 #define READ_SYNC       (READ | (1 << BIO_RW_SYNC))
1180 #define READ_META       (READ | (1 << BIO_RW_META))
1181 #define WRITE_SYNC      (WRITE | (1 << BIO_RW_SYNC))
1182 #define WRITE_BARRIER   ((1 << BIO_RW) | (1 << BIO_RW_BARRIER))
1183 
1184 //
1185 // timer routines
1186 //
1187 
1188 /*
1189  *      These inlines deal with timer wrapping correctly. You are
1190  *      strongly encouraged to use them
1191  *      1. Because people otherwise forget
1192  *      2. Because if the timer wrap changes in future you won't have to
1193  *         alter your driver code.
1194  *
1195  * time_after(a,b) returns true if the time a is after time b.
1196  *
1197  * Do this with "<0" and ">=0" to only test the sign of the result. A
1198  * good compiler would generate better code (and a really good compiler
1199  * wouldn't care). Gcc is currently neither.
1200  */
1201 #define typecheck(x, y) (TRUE)
1202 
1203 #define time_after(a,b)         \
1204         (typecheck(unsigned long, a) && \
1205          typecheck(unsigned long, b) && \
1206          ((long)(b) - (long)(a) < 0))
1207 #define time_before(a,b)        time_after(b,a)
1208 
1209 #define time_after_eq(a,b)      \
1210         (typecheck(unsigned long, a) && \
1211          typecheck(unsigned long, b) && \
1212          ((long)(a) - (long)(b) >= 0))
1213 #define time_before_eq(a,b)     time_after_eq(b,a)
1214 
1215 #define time_in_range(a,b,c) \
1216         (time_after_eq(a,b) && \
1217          time_before_eq(a,c))
1218 
1219 #define smp_rmb()  do {}while(0)
1220 
1221 
1222 static inline __u32 do_div64 (__u64 * n, __u64 b)
1223 {
1224     __u64 mod;
1225 
1226     mod = *n % b;
1227     *n = *n / b;
1228     return (__u32) mod;
1229 }
1230 #define do_div(n, b) do_div64(&(n), (__u64)b)
1231 
1232 #endif // _EXT2_MODULE_HEADER_
1233