1 /*
2  * COPYRIGHT:        See COPYRIGHT.TXT
3  * PROJECT:          Ext2 File System Driver for WinNT/2K/XP
4  * FILE:             Modules.h
5  * PURPOSE:          Header file: nls structures & linux kernel ...
6  * PROGRAMMER:       Matt Wu <mattwu@163.com>
7  * HOMEPAGE:         http://www.ext2fsd.com
8  * UPDATE HISTORY:
9  */
10 
11 #ifndef _EXT2_MODULE_HEADER_
12 #define _EXT2_MODULE_HEADER_
13 
14 /* INCLUDES *************************************************************/
15 
16 #include <linux/types.h>
17 #include <linux/errno.h>
18 #include <linux/rbtree.h>
19 #include <linux/fs.h>
20 #include <linux/log2.h>
21 
22 #if _WIN32_WINNT <= 0x500
23 #define _WIN2K_TARGET_ 1
24 #endif
25 
26 /* STRUCTS ******************************************************/
27 
28 #ifndef offsetof
29 # define offsetof(type, member) ((ULONG_PTR)&(((type *)0)->member))
30 #endif
31 
32 #ifndef container_of
33 #define container_of(ptr, type, member)                  \
34                 ((type *)((char *)ptr - (char *)offsetof(type, member)))
35 #endif
36 
37 //
38 // Byte order swapping routines
39 //
40 
41 /* use the runtime routine or compiler's implementation */
42 #if (defined(_M_IX86) && (_MSC_FULL_VER > 13009037)) || \
43     ((defined(_M_AMD64) || defined(_M_IA64)) &&         \
44      (_MSC_FULL_VER > 13009175))
45 #ifdef __cplusplus
46 extern "C" {
47 #endif
48     unsigned short __cdecl _byteswap_ushort(unsigned short);
49     unsigned long  __cdecl _byteswap_ulong (unsigned long);
50     unsigned __int64 __cdecl _byteswap_uint64(unsigned __int64);
51 #ifdef __cplusplus
52 }
53 #endif
54 #pragma intrinsic(_byteswap_ushort)
55 #pragma intrinsic(_byteswap_ulong)
56 #pragma intrinsic(_byteswap_uint64)
57 
58 #define RtlUshortByteSwap(_x)    _byteswap_ushort((USHORT)(_x))
59 #define RtlUlongByteSwap(_x)     _byteswap_ulong((_x))
60 #define RtlUlonglongByteSwap(_x) _byteswap_uint64((_x))
61 
62 #elif !defined(__REACTOS__)
63 
64 USHORT
65 FASTCALL
66 RtlUshortByteSwap(
67     IN USHORT Source
68 );
69 
70 ULONG
71 FASTCALL
72 RtlUlongByteSwap(
73     IN ULONG Source
74 );
75 
76 ULONGLONG
77 FASTCALL
78 RtlUlonglongByteSwap(
79     IN ULONGLONG Source
80 );
81 #endif
82 
83 #define __swab16(x) RtlUshortByteSwap(x)
84 #define __swab32(x) RtlUlongByteSwap(x)
85 #define __swab64(x) RtlUlonglongByteSwap(x)
86 
87 #define __constant_swab32  __swab32
88 #define __constant_swab64  __swab64
89 
90 #define __constant_htonl(x) __constant_swab32((x))
91 #define __constant_ntohl(x) __constant_swab32((x))
92 #define __constant_htons(x) __constant_swab16((x))
93 #define __constant_ntohs(x) __constant_swab16((x))
94 #define __constant_cpu_to_le64(x) ((__u64)(x))
95 #define __constant_le64_to_cpu(x) ((__u64)(x))
96 #define __constant_cpu_to_le32(x) ((__u32)(x))
97 #define __constant_le32_to_cpu(x) ((__u32)(x))
98 #define __constant_cpu_to_le16(x) ((__u16)(x))
99 #define __constant_le16_to_cpu(x) ((__u16)(x))
100 #define __constant_cpu_to_be64(x) __constant_swab64((x))
101 #define __constant_be64_to_cpu(x) __constant_swab64((x))
102 #define __constant_cpu_to_be32(x) __constant_swab32((x))
103 #define __constant_be32_to_cpu(x) __constant_swab32((x))
104 #define __constant_cpu_to_be16(x) __constant_swab16((x))
105 #define __constant_be16_to_cpu(x) __constant_swab16((x))
106 #define __cpu_to_le64(x) ((__u64)(x))
107 #define __le64_to_cpu(x) ((__u64)(x))
108 #define __cpu_to_le32(x) ((__u32)(x))
109 #define __le32_to_cpu(x) ((__u32)(x))
110 #define __cpu_to_le16(x) ((__u16)(x))
111 #define __le16_to_cpu(x) ((__u16)(x))
112 #define __cpu_to_be64(x) __swab64((x))
113 #define __be64_to_cpu(x) __swab64((x))
114 #define __cpu_to_be32(x) __swab32((x))
115 #define __be32_to_cpu(x) __swab32((x))
116 #define __cpu_to_be16(x) __swab16((x))
117 #define __be16_to_cpu(x) __swab16((x))
118 #define __cpu_to_le64p(x) (*(__u64*)(x))
119 #define __le64_to_cpup(x) (*(__u64*)(x))
120 #define __cpu_to_le32p(x) (*(__u32*)(x))
121 #define __le32_to_cpup(x) (*(__u32*)(x))
122 #define __cpu_to_le16p(x) (*(__u16*)(x))
123 #define __le16_to_cpup(x) (*(__u16*)(x))
124 #define __cpu_to_be64p(x) __swab64p((x))
125 #define __be64_to_cpup(x) __swab64p((x))
126 #define __cpu_to_be32p(x) __swab32p((x))
127 #define __be32_to_cpup(x) __swab32p((x))
128 #define __cpu_to_be16p(x) __swab16p((x))
129 #define __be16_to_cpup(x) __swab16p((x))
130 #define __cpu_to_le64s(x) ((__s64)(x))
131 #define __le64_to_cpus(x) ((__s64)(x))
132 #define __cpu_to_le32s(x) ((__s32)(x))
133 #define __le32_to_cpus(x) ((__s32)(x))
134 #define __cpu_to_le16s(x) ((__s16)(x))
135 #define __le16_to_cpus(x) ((__s16)(x))
136 #define __cpu_to_be64s(x) __swab64s((x))
137 #define __be64_to_cpus(x) __swab64s((x))
138 #define __cpu_to_be32s(x) __swab32s((x))
139 #define __be32_to_cpus(x) __swab32s((x))
140 #define __cpu_to_be16s(x) __swab16s((x))
141 #define __be16_to_cpus(x) __swab16s((x))
142 
143 #ifndef cpu_to_le64
144 #define cpu_to_le64 __cpu_to_le64
145 #define le64_to_cpu __le64_to_cpu
146 #define cpu_to_le32 __cpu_to_le32
147 #define le32_to_cpu __le32_to_cpu
148 #define cpu_to_le16 __cpu_to_le16
149 #define le16_to_cpu __le16_to_cpu
150 #endif
151 
152 #define cpu_to_be64 __cpu_to_be64
153 #define be64_to_cpu __be64_to_cpu
154 #define cpu_to_be32 __cpu_to_be32
155 #define be32_to_cpu __be32_to_cpu
156 #define cpu_to_be16 __cpu_to_be16
157 #define be16_to_cpu __be16_to_cpu
158 #define cpu_to_le64p __cpu_to_le64p
159 #define le64_to_cpup __le64_to_cpup
160 #define cpu_to_le32p __cpu_to_le32p
161 #define le32_to_cpup __le32_to_cpup
162 #define cpu_to_le16p __cpu_to_le16p
163 #define le16_to_cpup __le16_to_cpup
164 #define cpu_to_be64p __cpu_to_be64p
165 #define be64_to_cpup __be64_to_cpup
166 #define cpu_to_be32p __cpu_to_be32p
167 #define be32_to_cpup __be32_to_cpup
168 #define cpu_to_be16p __cpu_to_be16p
169 #define be16_to_cpup __be16_to_cpup
170 #define cpu_to_le64s __cpu_to_le64s
171 #define le64_to_cpus __le64_to_cpus
172 #define cpu_to_le32s __cpu_to_le32s
173 #define le32_to_cpus __le32_to_cpus
174 #define cpu_to_le16s __cpu_to_le16s
175 #define le16_to_cpus __le16_to_cpus
176 #define cpu_to_be64s __cpu_to_be64s
177 #define be64_to_cpus __be64_to_cpus
178 #define cpu_to_be32s __cpu_to_be32s
179 #define be32_to_cpus __be32_to_cpus
180 #define cpu_to_be16s __cpu_to_be16s
181 #define be16_to_cpus __be16_to_cpus
182 
183 
184 static inline void le16_add_cpu(__le16 *var, u16 val)
185 {
186 	*var = cpu_to_le16(le16_to_cpu(*var) + val);
187 }
188 
189 static inline void le32_add_cpu(__le32 *var, u32 val)
190 {
191 	*var = cpu_to_le32(le32_to_cpu(*var) + val);
192 }
193 
194 static inline void le64_add_cpu(__le64 *var, u64 val)
195 {
196 	*var = cpu_to_le64(le64_to_cpu(*var) + val);
197 }
198 
199 //
200 // Network to host byte swap functions
201 //
202 
203 #define ntohl(x)           ( ( ( ( x ) & 0x000000ff ) << 24 ) | \
204                              ( ( ( x ) & 0x0000ff00 ) << 8 ) | \
205                              ( ( ( x ) & 0x00ff0000 ) >> 8 ) | \
206                              ( ( ( x ) & 0xff000000 ) >> 24 )   )
207 
208 #define ntohs(x)           ( ( ( ( x ) & 0xff00 ) >> 8 ) | \
209                              ( ( ( x ) & 0x00ff ) << 8 ) )
210 
211 
212 #define htonl(x)           ntohl(x)
213 #define htons(x)           ntohs(x)
214 
215 
216 //
217 // kernel printk flags
218 //
219 
220 #define KERN_EMERG      "<0>"   /* system is unusable                   */
221 #define KERN_ALERT      "<1>"   /* action must be taken immediately     */
222 #define KERN_CRIT       "<2>"   /* critical conditions                  */
223 #define KERN_ERR        "<3>"   /* error conditions                     */
224 #define KERN_WARNING    "<4>"   /* warning conditions                   */
225 #define KERN_NOTICE     "<5>"   /* normal but significant condition     */
226 #define KERN_INFO       "<6>"   /* informational                        */
227 #define KERN_DEBUG      "<7>"   /* debug-level messages                 */
228 
229 #define printk  DbgPrint
230 
231 /*
232  * error pointer
233  */
234 #define MAX_ERRNO	4095
235 #define IS_ERR_VALUE(x) ((x) >= (unsigned long)-MAX_ERRNO)
236 
237 static inline void *ERR_PTR(long error)
238 {
239 	return (void *)(long_ptr_t) error;
240 }
241 
242 static inline long PTR_ERR(const void *ptr)
243 {
244 	return (long)(long_ptr_t) ptr;
245 }
246 
247 static inline long IS_ERR(const void *ptr)
248 {
249 	return IS_ERR_VALUE((unsigned long)(long_ptr_t)ptr);
250 }
251 
252 
253 #define BUG_ON(c) assert(!(c))
254 
255 #define WARN_ON(c) BUG_ON(c)
256 
257 //
258 // Linux module definitions
259 //
260 
261 #define likely
262 #define unlikely
263 
264 #define __init
265 #define __exit
266 
267 #define THIS_MODULE NULL
268 #define MODULE_LICENSE(x)
269 #define MODULE_ALIAS_NLS(x)
270 #define EXPORT_SYMBOL(x)
271 
272 
273 #define try_module_get(x) (TRUE)
274 #define module_put(x)
275 
276 #define module_init(X) int  __init module_##X() {return X();}
277 #define module_exit(X) void __exit module_##X() {X();}
278 
279 #define DECLARE_INIT(X) int  __init  module_##X(void)
280 #define DECLARE_EXIT(X) void __exit  module_##X(void)
281 
282 #define LOAD_MODULE(X) do {                             \
283                             rc = module_##X();          \
284                        } while(0)
285 
286 #define UNLOAD_MODULE(X) do {                           \
287                             module_##X();               \
288                          } while(0)
289 
290 #define LOAD_NLS    LOAD_MODULE
291 #define UNLOAD_NLS  UNLOAD_MODULE
292 
293 //
294 // spinlocks .....
295 //
296 
297 typedef struct _spinlock_t {
298 
299     KSPIN_LOCK  lock;
300     KIRQL       irql;
301 } spinlock_t;
302 
303 #define spin_lock_init(sl)    KeInitializeSpinLock(&((sl)->lock))
304 #define spin_lock(sl)         KeAcquireSpinLock(&((sl)->lock), &((sl)->irql))
305 #define spin_unlock(sl)       KeReleaseSpinLock(&((sl)->lock), (sl)->irql)
306 #define spin_lock_irqsave(sl, flags) do {spin_lock(sl); flags=(sl)->irql;} while(0)
307 #define spin_unlock_irqrestore(sl, flags) do {ASSERT((KIRQL)(flags)==(sl)->irql); spin_unlock(sl);} while(0)
308 
309 #define assert_spin_locked(x)   do {} while(0)
310 
311 /*
312  * Does a critical section need to be broken due to another
313  * task waiting?: (technically does not depend on CONFIG_PREEMPT,
314  * but a general need for low latency)
315  */
316 static inline int spin_needbreak(spinlock_t *lock)
317 {
318 #ifdef CONFIG_PREEMPT
319     return spin_is_contended(lock);
320 #else
321     return 0;
322 #endif
323 }
324 
325 //
326 // bit operations
327 //
328 
329 /**
330  * __set_bit - Set a bit in memory
331  * @nr: the bit to set
332  * @addr: the address to start counting from
333  *
334  * Unlike set_bit(), this function is non-atomic and may be reordered.
335  * If it's called on the same region of memory simultaneously, the effect
336  * may be that only one operation succeeds.
337  */
338 static inline int set_bit(int nr, volatile unsigned long *addr)
339 {
340     addr += (nr >> ORDER_PER_LONG);
341     nr &= (BITS_PER_LONG - 1);
342 
343     return !!(InterlockedOr(addr, (1 << nr)) & (1 << nr));
344 }
345 
346 
347 /**
348  * clear_bit - Clears a bit in memory
349  * @nr: Bit to clear
350  * @addr: Address to start counting from
351  *
352  * clear_bit() is atomic and may not be reordered.  However, it does
353  * not contain a memory barrier, so if it is used for locking purposes,
354  * you should call smp_mb__before_clear_bit() and/or smp_mb__after_clear_bit()
355  * in order to ensure changes are visible on other processors.
356  */
357 static inline int clear_bit(int nr, volatile unsigned long *addr)
358 {
359     addr += (nr >> ORDER_PER_LONG);
360     nr &= (BITS_PER_LONG - 1);
361 
362     return !!(InterlockedAnd(addr, ~(1 << nr)) & (1 << nr));
363 }
364 
365 /**
366  * test_and_clear_bit - Clear a bit and return its old value
367  * @nr: Bit to clear
368  * @addr: Address to count from
369  *
370  * This operation is atomic and cannot be reordered.
371  * It also implies a memory barrier.
372  */
373 static inline int test_and_clear_bit(int nr, volatile unsigned long *addr)
374 {
375     return clear_bit(nr, addr);
376 }
377 
378 /*
379  *  test
380  */
381 static int test_bit(int nr, volatile const unsigned long *addr)
382 {
383     return !!((1 << (nr & (BITS_PER_LONG - 1))) &
384               (addr[nr >> ORDER_PER_LONG]));
385 }
386 
387 /**
388  * test_and_set_bit - Set a bit and return its old value
389  * @nr: Bit to set
390  * @addr: Address to count from
391  *
392  * This operation is atomic and cannot be reordered.
393  * It also implies a memory barrier.
394  */
395 static inline int test_and_set_bit(int nr, volatile unsigned long *addr)
396 {
397     return set_bit(nr, addr);
398 }
399 
400 //
401 // list definition ...
402 //
403 
404 #include <linux/list.h>
405 
406 
407 /*********************************************
408  *  linux scheduler related structures      *
409 *********************************************/
410 
411 //
412 // task structure
413 //
414 
415 #define TASK_INTERRUPTIBLE      1
416 #define TASK_UNINTERRUPTIBLE    2
417 
418 struct task_struct {
419     pid_t pid;
420     pid_t tid;
421     char comm[32];
422     void * journal_info;
423 };
424 
425 extern struct task_struct *current;
426 
427 //
428 // scheduler routines
429 //
430 
431 
432 static inline int cond_resched() {
433     return FALSE;
434 }
435 static inline int need_resched() {
436     return FALSE;
437 }
438 
439 #define yield()        do {} while(0)
440 #define might_sleep()  do {} while(0)
441 
442 //
443 // mutex
444 //
445 
446 typedef struct mutex {
447     FAST_MUTEX  lock;
448 } mutex_t;
449 
450 #define mutex_init(x)   ExInitializeFastMutex(&((x)->lock))
451 #define mutex_lock(x)   ExAcquireFastMutex(&((x)->lock))
452 #define mutex_unlock(x) ExReleaseFastMutex(&((x)->lock))
453 
454 
455 //
456 // wait_queue
457 //
458 
459 
460 typedef PVOID wait_queue_t;
461 
462 #define WQ_FLAG_EXCLUSIVE	    0x01
463 #define WQ_FLAG_AUTO_REMOVAL	0x02
464 
465 struct __wait_queue {
466     unsigned int    flags;
467     void *          private;
468     KEVENT          event;
469     struct list_head task_list;
470 };
471 
472 
473 #define DEFINE_WAIT(name) \
474 	wait_queue_t name = (PVOID)wait_queue_create();
475 
476 /*
477 struct wait_bit_key {
478 	void *flags;
479 	int bit_nr;
480 };
481 
482 struct wait_bit_queue {
483 	struct wait_bit_key key;
484 	wait_queue_t wait;
485 };
486 */
487 
488 struct __wait_queue_head {
489     spinlock_t lock;
490     struct list_head task_list;
491 };
492 typedef struct __wait_queue_head wait_queue_head_t;
493 
494 #define is_sync_wait(wait)  (TRUE)
495 #define set_current_state(state) do {} while(0)
496 #define __set_current_state(state)  do {} while(0)
497 
498 void init_waitqueue_head(wait_queue_head_t *q);
499 int wake_up(wait_queue_head_t *queue);
500 
501 
502 /*
503  * Waitqueues which are removed from the waitqueue_head at wakeup time
504  */
505 struct __wait_queue * wait_queue_create();
506 void wait_queue_destroy(struct __wait_queue *);
507 
508 void prepare_to_wait(wait_queue_head_t *q, wait_queue_t *wait, int state);
509 void prepare_to_wait_exclusive(wait_queue_head_t *q, wait_queue_t *wait, int state);
510 void finish_wait(wait_queue_head_t *q, wait_queue_t *wait);
511 int autoremove_wake_function(wait_queue_t *wait, unsigned mode, int sync, void *key);
512 int wake_bit_function(wait_queue_t *wait, unsigned mode, int sync, void *key);
513 
514 
515 //
516 // timer structure
517 //
518 
519 struct timer_list {
520     struct list_head entry;
521     unsigned long expires;
522 
523     void (*function)(unsigned long);
524     unsigned long data;
525 
526 #ifdef CONFIG_TIMER_STATS
527     void *start_site;
528     char start_comm[16];
529     int start_pid;
530 #endif
531 };
532 
533 
534 typedef struct kmem_cache kmem_cache_t;
535 
536 struct block_device {
537 
538     unsigned long           bd_flags;   /* flags */
539     atomic_t		        bd_count;   /* reference count */
540     PDEVICE_OBJECT          bd_dev;     /* device object */
541     ANSI_STRING             bd_name;    /* name in ansi string */
542     DISK_GEOMETRY           bd_geo;     /* disk geometry */
543     PARTITION_INFORMATION   bd_part;    /* partition information */
544     void *                  bd_priv;    /* pointers to EXT2_VCB
545                                            NULL if it's a journal dev */
546     PFILE_OBJECT            bd_volume;  /* streaming object file */
547     LARGE_MCB               bd_extents; /* dirty extents */
548 
549     kmem_cache_t *          bd_bh_cache;/* memory cache for buffer_head */
550     ERESOURCE               bd_bh_lock; /* lock for bh tree and reaper list */
551     struct rb_root          bd_bh_root; /* buffer_head red-black tree root */
552     LIST_ENTRY              bd_bh_free; /* reaper list */
553     KEVENT                  bd_bh_notify; /* notification event for cleanup */
554 };
555 
556 //
557 // page information
558 //
559 
560 // vom trata paginile in felul urmator:
561 // alocam la sfarsitul structurii inca PAGE_SIZE octeti cand alocam o structura
562 // de tip pagina - acolo vor veni toate buffer-headurile
563 // deci -> page_address(page) = page + sizeof(page)
564 #define page_address(_page) ((char*)_page + sizeof(struct page))
565 
566 typedef struct page {
567     void           *addr;
568     void           *mapping;
569     void           *private;
570     atomic_t        count;
571     __u32           index;
572     __u32           flags;
573 } mem_map_t;
574 
575 #define get_page(p) atomic_inc(&(p)->count)
576 
577 #define PG_locked		 0	/* Page is locked. Don't touch. */
578 #define PG_error		 1
579 #define PG_referenced		 2
580 #define PG_uptodate		 3
581 #define PG_dirty		 4
582 #define PG_unused		 5
583 #define PG_lru			 6
584 #define PG_active		 7
585 #define PG_slab			 8
586 #define PG_skip			10
587 #define PG_highmem		11
588 #define PG_checked		12	/* kill me in 2.5.<early>. */
589 #define PG_arch_1		13
590 #define PG_reserved		14
591 #define PG_launder		15	/* written out by VM pressure.. */
592 #define PG_fs_1			16	/* Filesystem specific */
593 
594 #ifndef arch_set_page_uptodate
595 #define arch_set_page_uptodate(page)
596 #endif
597 
598 /* Make it prettier to test the above... */
599 #define UnlockPage(page)        unlock_page(page)
600 #define Page_Uptodate(page)     test_bit(PG_uptodate, &(page)->flags)
601 #define SetPageUptodate(page) \
602 	do {								\
603 		arch_set_page_uptodate(page);				\
604 		set_bit(PG_uptodate, &(page)->flags);			\
605 	} while (0)
606 #define ClearPageUptodate(page) clear_bit(PG_uptodate, &(page)->flags)
607 #define PageDirty(page)         test_bit(PG_dirty, &(page)->flags)
608 #define SetPageDirty(page)      set_bit(PG_dirty, &(page)->flags)
609 #define ClearPageDirty(page)    clear_bit(PG_dirty, &(page)->flags)
610 #define PageLocked(page)        test_bit(PG_locked, &(page)->flags)
611 #define LockPage(page)          set_bit(PG_locked, &(page)->flags)
612 #define TryLockPage(page)       test_and_set_bit(PG_locked, &(page)->flags)
613 #define PageChecked(page)       test_bit(PG_checked, &(page)->flags)
614 #define SetPageChecked(page)    set_bit(PG_checked, &(page)->flags)
615 #define ClearPageChecked(page)  clear_bit(PG_checked, &(page)->flags)
616 #define PageLaunder(page)       test_bit(PG_launder, &(page)->flags)
617 #define SetPageLaunder(page)    set_bit(PG_launder, &(page)->flags)
618 #define ClearPageLaunder(page)  clear_bit(PG_launder, &(page)->flags)
619 #define ClearPageArch1(page)    clear_bit(PG_arch_1, &(page)->flags)
620 
621 #define PageError(page)		test_bit(PG_error, &(page)->flags)
622 #define SetPageError(page)	set_bit(PG_error, &(page)->flags)
623 #define ClearPageError(page)	clear_bit(PG_error, &(page)->flags)
624 #define PageReferenced(page)    test_bit(PG_referenced, &(page)->flags)
625 #define SetPageReferenced(page) set_bit(PG_referenced, &(page)->flags)
626 #define ClearPageReferenced(page)       clear_bit(PG_referenced, &(page)->flags)
627 
628 #define PageActive(page)        test_bit(PG_active, &(page)->flags)
629 #define SetPageActive(page)     set_bit(PG_active, &(page)->flags)
630 #define ClearPageActive(page)   clear_bit(PG_active, &(page)->flags)
631 
632 
633 extern unsigned long __get_free_pages(unsigned int gfp_mask, unsigned int order);
634 #define __get_free_page(gfp_mask) \
635 		__get_free_pages((gfp_mask),0)
636 
637 extern void __free_pages(struct page *page, unsigned int order);
638 extern void free_pages(unsigned long addr, unsigned int order);
639 
640 #define __free_page(page) __free_pages((page), 0)
641 #define free_page(addr) free_pages((addr),0)
642 
643 #ifndef __REACTOS__
644 extern void truncate_inode_pages(struct address_space *, loff_t);
645 #endif
646 
647 #define __GFP_HIGHMEM   0x02
648 
649 #define __GFP_WAIT	0x10	/* Can wait and reschedule? */
650 #define __GFP_HIGH	0x20	/* Should access emergency pools? */
651 #define __GFP_IO	0x40	/* Can start low memory physical IO? */
652 #define __GFP_HIGHIO	0x80	/* Can start high mem physical IO? */
653 #define __GFP_FS	0x100	/* Can call down to low-level FS? */
654 
655 #define GFP_ATOMIC	(__GFP_HIGH)
656 #define GFP_USER	(             __GFP_WAIT | __GFP_IO | __GFP_HIGHIO | __GFP_FS)
657 #define GFP_HIGHUSER    (             __GFP_WAIT | __GFP_IO | __GFP_HIGHIO | __GFP_FS | __GFP_HIGHMEM)
658 #define GFP_KERNEL	(__GFP_HIGH | __GFP_WAIT | __GFP_IO | __GFP_HIGHIO | __GFP_FS)
659 #define GFP_NOFS    0
660 #define __GFP_NOFAIL 0
661 
662 
663 #define KM_USER0 0
664 
665 //
666 // buffer head definitions
667 //
668 
669 enum bh_state_bits {
670     BH_Uptodate,	        /* Contains valid data */
671     BH_Dirty,	            /* Is dirty */
672     BH_Verified,	 /* Is verified */
673     BH_Lock,	            /* Is locked */
674     BH_Req,		            /* Has been submitted for I/O */
675     BH_Uptodate_Lock,       /* Used by the first bh in a page, to serialise
676 			                 * IO completion of other buffers in the page
677 			                 */
678 
679     BH_Mapped,	            /* Has a disk mapping */
680     BH_New,		            /* Disk mapping was newly created by get_block */
681     BH_Async_Read,	        /* Is under end_buffer_async_read I/O */
682     BH_Async_Write,	        /* Is under end_buffer_async_write I/O */
683     BH_Delay,	            /* Buffer is not yet allocated on disk */
684     BH_Boundary,	        /* Block is followed by a discontiguity */
685     BH_Write_EIO,	        /* I/O error on write */
686     BH_Ordered,	            /* ordered write */
687     BH_Eopnotsupp,	        /* operation not supported (barrier) */
688     BH_Unwritten,	        /* Buffer is allocated on disk but not written */
689 
690     BH_PrivateStart,        /* not a state bit, but the first bit available
691 			                 * for private allocation by other entities
692 			                 */
693 };
694 
695 #define PAGE_CACHE_SIZE  (PAGE_SIZE)
696 #define PAGE_CACHE_SHIFT (12)
697 #define MAX_BUF_PER_PAGE (PAGE_CACHE_SIZE / 512)
698 
699 #ifdef __REACTOS__
700 struct buffer_head;
701 #endif
702 typedef void (bh_end_io_t)(struct buffer_head *bh, int uptodate);
703 
704 /*
705  * Historically, a buffer_head was used to map a single block
706  * within a page, and of course as the unit of I/O through the
707  * filesystem and block layers.  Nowadays the basic I/O unit
708  * is the bio, and buffer_heads are used for extracting block
709  * mappings (via a get_block_t call), for tracking state within
710  * a page (via a page_mapping) and for wrapping bio submission
711  * for backward compatibility reasons (e.g. submit_bh).
712  */
713 struct buffer_head {
714     LIST_ENTRY    b_link;                   /* to be added to reaper list */
715     unsigned long b_state;		            /* buffer state bitmap (see above) */
716     struct page *b_page;                    /* the page this bh is mapped to */
717     PMDL         b_mdl;                     /* MDL of the locked buffer */
718     void	    *b_bcb;                     /* BCB of the buffer */
719 
720     // kdev_t b_dev;                        /* device (B_FREE = free) */
721     struct block_device *b_bdev;            /* block device object */
722 
723     blkcnt_t b_blocknr;		        /* start block number */
724     size_t        b_size;			        /* size of mapping */
725     char *        b_data;			        /* pointer to data within the page */
726     bh_end_io_t *b_end_io;		        /* I/O completion */
727     void *b_private;		                /* reserved for b_end_io */
728     // struct list_head b_assoc_buffers;    /* associated with another mapping */
729     // struct address_space *b_assoc_map;   /* mapping this buffer is associated with */
730     atomic_t b_count;		                /* users using this buffer_head */
731     struct rb_node b_rb_node;               /* Red-black tree node entry */
732 
733     LARGE_INTEGER  b_ts_creat;              /* creation time*/
734     LARGE_INTEGER  b_ts_drop;               /* drop time (to be released) */
735 };
736 
737 
738 /*
739  * macro tricks to expand the set_buffer_foo(), clear_buffer_foo()
740  * and buffer_foo() functions.
741  */
742 #define BUFFER_FNS(bit, name)						\
743 static inline void set_buffer_##name(struct buffer_head *bh)		\
744 {									\
745 	set_bit(BH_##bit, &(bh)->b_state);				\
746 }									\
747 static inline void clear_buffer_##name(struct buffer_head *bh)		\
748 {									\
749 	clear_bit(BH_##bit, &(bh)->b_state);				\
750 }									\
751 static inline int buffer_##name(const struct buffer_head *bh)		\
752 {									\
753 	return test_bit(BH_##bit, &(bh)->b_state);			\
754 }
755 
756 /*
757  * test_set_buffer_foo() and test_clear_buffer_foo()
758  */
759 #define TAS_BUFFER_FNS(bit, name)					\
760 static inline int test_set_buffer_##name(struct buffer_head *bh)	\
761 {									\
762 	return test_and_set_bit(BH_##bit, &(bh)->b_state);		\
763 }									\
764 static inline int test_clear_buffer_##name(struct buffer_head *bh)	\
765 {									\
766 	return test_and_clear_bit(BH_##bit, &(bh)->b_state);		\
767 }									\
768 
769 /*
770  * Emit the buffer bitops functions.   Note that there are also functions
771  * of the form "mark_buffer_foo()".  These are higher-level functions which
772  * do something in addition to setting a b_state bit.
773  */
774 BUFFER_FNS(Uptodate, uptodate)
775 BUFFER_FNS(Dirty, dirty)
776 TAS_BUFFER_FNS(Dirty, dirty)
777 BUFFER_FNS(Verified, verified)
778 BUFFER_FNS(Lock, locked)
779 TAS_BUFFER_FNS(Lock, locked)
780 BUFFER_FNS(Req, req)
781 TAS_BUFFER_FNS(Req, req)
782 BUFFER_FNS(Mapped, mapped)
783 BUFFER_FNS(New, new)
784 BUFFER_FNS(Async_Read, async_read)
785 BUFFER_FNS(Async_Write, async_write)
786 BUFFER_FNS(Delay, delay)
787 BUFFER_FNS(Boundary, boundary)
788 BUFFER_FNS(Write_EIO, write_io_error)
789 BUFFER_FNS(Ordered, ordered)
790 BUFFER_FNS(Eopnotsupp, eopnotsupp)
791 BUFFER_FNS(Unwritten, unwritten)
792 
793 #define bh_offset(bh)		((unsigned long)(bh)->b_data & ~PAGE_MASK)
794 #define touch_buffer(bh)	mark_page_accessed(bh->b_page)
795 
796 /* If we *know* page->private refers to buffer_heads */
797 
798 #define page_buffers(page)					\
799 	(                                       \
800 		BUG_ON(!PagePrivate(page)),			\
801 		((struct buffer_head *)page_private(page))	\
802 	)
803 #define page_has_buffers(page)	PagePrivate(page)
804 
805 
806 /*
807  * Declarations
808  */
809 
810 void mark_buffer_dirty(struct buffer_head *bh);
811 void init_buffer(struct buffer_head *, bh_end_io_t *, void *);
812 void set_bh_page(struct buffer_head *bh,
813                  struct page *page, unsigned long offset);
814 int try_to_free_buffers(struct page *);
815 struct buffer_head *alloc_page_buffers(struct page *page, unsigned long size,
816                                                    int retry);
817 void create_empty_buffers(struct page *, unsigned long,
818                           unsigned long b_state);
819 
820 /* Things to do with buffers at mapping->private_list */
821 void mark_buffer_dirty_inode(struct buffer_head *bh, struct inode *inode);
822 int inode_has_buffers(struct inode *);
823 void invalidate_inode_buffers(struct inode *);
824 int remove_inode_buffers(struct inode *inode);
825 #ifndef __REACTOS__
826 int sync_mapping_buffers(struct address_space *mapping);
827 #endif
828 void unmap_underlying_metadata(struct block_device *bdev, sector_t block);
829 
830 void mark_buffer_async_write(struct buffer_head *bh);
831 void invalidate_bdev(struct block_device *);
832 int sync_blockdev(struct block_device *bdev);
833 void __wait_on_buffer(struct buffer_head *);
834 wait_queue_head_t *bh_waitq_head(struct buffer_head *bh);
835 int fsync_bdev(struct block_device *);
836 struct super_block *freeze_bdev(struct block_device *);
837 void thaw_bdev(struct block_device *, struct super_block *);
838 int fsync_super(struct super_block *);
839 int fsync_no_super(struct block_device *);
840 struct buffer_head *__find_get_block(struct block_device *bdev, sector_t block,
841                                                  unsigned long size);
842 struct buffer_head *get_block_bh(struct block_device *bdev, sector_t block,
843                                  unsigned long size, int zero);
844 struct buffer_head *__getblk(struct block_device *bdev, sector_t block,
845                                          unsigned long size);
846 void __brelse(struct buffer_head *);
847 void __bforget(struct buffer_head *);
848 void __breadahead(struct block_device *, sector_t block, unsigned int size);
849 struct buffer_head *__bread(struct block_device *, sector_t block, unsigned size);
850 void invalidate_bh_lrus(void);
851 struct buffer_head *alloc_buffer_head(gfp_t gfp_flags);
852 void free_buffer_head(struct buffer_head * bh);
853 void unlock_buffer(struct buffer_head *bh);
854 void __lock_buffer(struct buffer_head *bh);
855 void ll_rw_block(int, int, struct buffer_head * bh[]);
856 int sync_dirty_buffer(struct buffer_head *bh);
857 int submit_bh(int, struct buffer_head *);
858 void write_boundary_block(struct block_device *bdev,
859                           sector_t bblock, unsigned blocksize);
860 int bh_uptodate_or_lock(struct buffer_head *bh);
861 int bh_submit_read(struct buffer_head *bh);
862 /* They are separately managed  */
863 struct buffer_head *extents_bread(struct super_block *sb, sector_t block);
864 struct buffer_head *extents_bwrite(struct super_block *sb, sector_t block);
865 void extents_mark_buffer_dirty(struct buffer_head *bh);
866 void extents_brelse(struct buffer_head *bh);
867 void extents_bforget(struct buffer_head *bh);
868 void buffer_head_remove(struct block_device *bdev, struct buffer_head *bh);
869 
870 extern int buffer_heads_over_limit;
871 
872 /*
873  * Generic address_space_operations implementations for buffer_head-backed
874  * address_spaces.
875  */
876 
877 #if 0
878 
879 int block_write_full_page(struct page *page, get_block_t *get_block,
880                           struct writeback_control *wbc);
881 int block_read_full_page(struct page*, get_block_t*);
882 int block_write_begin(struct file *, struct address_space *,
883                       loff_t, unsigned, unsigned,
884                       struct page **, void **, get_block_t*);
885 int block_write_end(struct file *, struct address_space *,
886                     loff_t, unsigned, unsigned,
887                     struct page *, void *);
888 int generic_write_end(struct file *, struct address_space *,
889                       loff_t, unsigned, unsigned,
890                       struct page *, void *);
891 
892 int block_prepare_write(struct page*, unsigned, unsigned, get_block_t*);
893 int cont_write_begin(struct file *, struct address_space *, loff_t,
894                      unsigned, unsigned, struct page **, void **,
895                      get_block_t *, loff_t *);
896 int block_page_mkwrite(struct vm_area_struct *vma, struct page *page,
897                        get_block_t get_block);
898 sector_t generic_block_bmap(struct address_space *, sector_t, get_block_t *);
899 int generic_commit_write(struct file *, struct page *, unsigned, unsigned);
900 int block_truncate_page(struct address_space *, loff_t, get_block_t *);
901 int file_fsync(struct file *, struct dentry *, int);
902 int nobh_write_begin(struct file *, struct address_space *,
903                      loff_t, unsigned, unsigned,
904                      struct page **, void **, get_block_t*);
905 int nobh_write_end(struct file *, struct address_space *,
906                    loff_t, unsigned, unsigned,
907                    struct page *, void *);
908 int nobh_truncate_page(struct address_space *, loff_t, get_block_t *);
909 int nobh_writepage(struct page *page, get_block_t *get_block,
910                    struct writeback_control *wbc);
911 int generic_cont_expand_simple(struct inode *inode, loff_t size);
912 #endif
913 
914 void block_invalidatepage(struct page *page, unsigned long offset);
915 void page_zero_new_buffers(struct page *page, unsigned from, unsigned to);
916 int  block_commit_write(struct page *page, unsigned from, unsigned to);
917 void block_sync_page(struct page *);
918 
919 void buffer_init(void);
920 
921 /*
922  * inline definitions
923  */
924 #if 0
925 static inline void attach_page_buffers(struct page *page,
926                                        struct buffer_head *head)
927 {
928     page_cache_get(page);
929     SetPagePrivate(page);
930     set_page_private(page, (unsigned long)head);
931 }
932 #endif
933 
934 static inline void get_bh(struct buffer_head *bh)
935 {
936     atomic_inc(&bh->b_count);
937 }
938 
939 static inline void put_bh(struct buffer_head *bh)
940 {
941     if (bh)
942         __brelse(bh);
943 }
944 
945 static inline void brelse(struct buffer_head *bh)
946 {
947     if (bh)
948         __brelse(bh);
949 }
950 
951 static inline void fini_bh(struct buffer_head **bh)
952 {
953     if (bh && *bh) {
954         brelse(*bh);
955         *bh = NULL;
956     }
957 }
958 
959 static inline void bforget(struct buffer_head *bh)
960 {
961     if (bh)
962         __bforget(bh);
963 }
964 
965 static inline struct buffer_head *
966             sb_getblk(struct super_block *sb, sector_t block)
967 {
968     return get_block_bh(sb->s_bdev, block, sb->s_blocksize, 0);
969 }
970 
971 static inline struct buffer_head *
972             sb_getblk_zero(struct super_block *sb, sector_t block)
973 {
974     return get_block_bh(sb->s_bdev, block, sb->s_blocksize, 1);
975 }
976 
977 static inline struct buffer_head *
978             sb_bread(struct super_block *sb, sector_t block)
979 {
980     struct buffer_head *bh = __getblk(sb->s_bdev, block, sb->s_blocksize);
981     if (!bh)
982 	    return NULL;
983     if (!buffer_uptodate(bh) && (bh_submit_read(bh) < 0)) {
984         brelse(bh);
985 	return NULL;
986     }
987     return bh;
988 }
989 
990 static inline struct buffer_head *
991             sb_find_get_block(struct super_block *sb, sector_t block)
992 {
993     return __find_get_block(sb->s_bdev, block, sb->s_blocksize);
994 }
995 
996 static inline void
997 map_bh(struct buffer_head *bh, struct super_block *sb, sector_t block)
998 {
999     set_buffer_mapped(bh);
1000     bh->b_bdev = sb->s_bdev;
1001     bh->b_blocknr = block;
1002     bh->b_size = sb->s_blocksize;
1003 }
1004 
1005 /*
1006  * Calling wait_on_buffer() for a zero-ref buffer is illegal, so we call into
1007  * __wait_on_buffer() just to trip a debug check.  Because debug code in inline
1008  * functions is bloaty.
1009  */
1010 
1011 static inline void wait_on_buffer(struct buffer_head *bh)
1012 {
1013     might_sleep();
1014     if (buffer_locked(bh) || atomic_read(&bh->b_count) == 0)
1015         __wait_on_buffer(bh);
1016 }
1017 
1018 static inline void lock_buffer(struct buffer_head *bh)
1019 {
1020     might_sleep();
1021     if (test_set_buffer_locked(bh))
1022         __lock_buffer(bh);
1023 }
1024 
1025 extern int __set_page_dirty_buffers(struct page *page);
1026 
1027 //
1028 // unicode character
1029 //
1030 
1031 struct nls_table {
1032     char *charset;
1033     char *alias;
1034     int (*uni2char) (wchar_t uni, unsigned char *out, int boundlen);
1035     int (*char2uni) (const unsigned char *rawstring, int boundlen,
1036                      wchar_t *uni);
1037     unsigned char *charset2lower;
1038     unsigned char *charset2upper;
1039     struct module *owner;
1040     struct nls_table *next;
1041 };
1042 
1043 /* this value hold the maximum octet of charset */
1044 #define NLS_MAX_CHARSET_SIZE 6 /* for UTF-8 */
1045 
1046 /* nls.c */
1047 extern int register_nls(struct nls_table *);
1048 extern int unregister_nls(struct nls_table *);
1049 extern struct nls_table *load_nls(char *);
1050 extern void unload_nls(struct nls_table *);
1051 extern struct nls_table *load_nls_default(void);
1052 
1053 extern int utf8_mbtowc(wchar_t *, const __u8 *, int);
1054 extern int utf8_mbstowcs(wchar_t *, const __u8 *, int);
1055 extern int utf8_wctomb(__u8 *, wchar_t, int);
1056 extern int utf8_wcstombs(__u8 *, const wchar_t *, int);
1057 
1058 //
1059 //  kernel jiffies
1060 //
1061 
1062 #define HZ  (100)
1063 
1064 static inline __u32 JIFFIES()
1065 {
1066     LARGE_INTEGER Tick;
1067 
1068     KeQueryTickCount(&Tick);
1069     Tick.QuadPart *= KeQueryTimeIncrement();
1070     Tick.QuadPart /= (10000000 / HZ);
1071 
1072     return Tick.LowPart;
1073 }
1074 
1075 #define jiffies JIFFIES()
1076 
1077 //
1078 // memory routines
1079 //
1080 
1081 #ifdef _WIN2K_TARGET_
1082 
1083 typedef GUID UUID;
1084 NTKERNELAPI
1085 NTSTATUS
1086 ExUuidCreate(
1087     OUT UUID *Uuid
1088 );
1089 
1090 NTKERNELAPI
1091 PVOID
1092 NTAPI
1093 ExAllocatePoolWithTag(
1094     IN POOL_TYPE PoolType,
1095     IN SIZE_T NumberOfBytes,
1096     IN ULONG Tag
1097 );
1098 
1099 #define  ExFreePoolWithTag(_P, _T) ExFreePool(_P)
1100 #endif
1101 
1102 PVOID Ext2AllocatePool(
1103     IN POOL_TYPE PoolType,
1104     IN SIZE_T NumberOfBytes,
1105     IN ULONG Tag
1106 );
1107 
1108 VOID
1109 Ext2FreePool(
1110     IN PVOID P,
1111     IN ULONG Tag
1112 );
1113 
1114 void *kzalloc(int size, int flags);
1115 #define kmalloc(size, gfp) Ext2AllocatePool(NonPagedPool, size, 'JBDM')
1116 #define kfree(p) Ext2FreePool(p, 'JBDM')
1117 
1118 
1119 /* memory slab */
1120 
1121 #define	SLAB_HWCACHE_ALIGN	0x00002000U	/* align objs on a h/w cache lines */
1122 #define SLAB_KERNEL         0x00000001U
1123 #define SLAB_TEMPORARY      0x00000002U
1124 
1125 typedef void (*kmem_cache_cb_t)(void*, kmem_cache_t *, unsigned long);
1126 
1127 struct kmem_cache {
1128     CHAR                    name[32];
1129     ULONG                   flags;
1130     ULONG                   size;
1131     atomic_t                count;
1132     atomic_t                acount;
1133     NPAGED_LOOKASIDE_LIST   la;
1134     kmem_cache_cb_t         constructor;
1135 };
1136 
1137 
1138 kmem_cache_t *
1139 kmem_cache_create(
1140     const char *name,
1141     size_t size,
1142     size_t offset,
1143     unsigned long flags,
1144     kmem_cache_cb_t ctor
1145 );
1146 
1147 void* kmem_cache_alloc(kmem_cache_t *kc, int flags);
1148 void  kmem_cache_free(kmem_cache_t *kc, void *p);
1149 int   kmem_cache_destroy(kmem_cache_t *kc);
1150 
1151 
1152 //
1153 // block device
1154 //
1155 
1156 #define BDEVNAME_SIZE      32      /* Largest string for a blockdev identifier */
1157 
1158 //
1159 // ll_rw_block ....
1160 //
1161 
1162 
1163 #define RW_MASK         1
1164 #define RWA_MASK        2
1165 #define READ 0
1166 #define WRITE 1
1167 #define READA 2         /* read-ahead  - don't block if no resources */
1168 #define SWRITE 3        /* for ll_rw_block() - wait for buffer lock */
1169 #define READ_SYNC       (READ | (1 << BIO_RW_SYNC))
1170 #define READ_META       (READ | (1 << BIO_RW_META))
1171 #define WRITE_SYNC      (WRITE | (1 << BIO_RW_SYNC))
1172 #define WRITE_BARRIER   ((1 << BIO_RW) | (1 << BIO_RW_BARRIER))
1173 
1174 //
1175 // timer routines
1176 //
1177 
1178 /*
1179  *      These inlines deal with timer wrapping correctly. You are
1180  *      strongly encouraged to use them
1181  *      1. Because people otherwise forget
1182  *      2. Because if the timer wrap changes in future you won't have to
1183  *         alter your driver code.
1184  *
1185  * time_after(a,b) returns true if the time a is after time b.
1186  *
1187  * Do this with "<0" and ">=0" to only test the sign of the result. A
1188  * good compiler would generate better code (and a really good compiler
1189  * wouldn't care). Gcc is currently neither.
1190  */
1191 #define typecheck(x, y) (TRUE)
1192 
1193 #define time_after(a,b)         \
1194         (typecheck(unsigned long, a) && \
1195          typecheck(unsigned long, b) && \
1196          ((long)(b) - (long)(a) < 0))
1197 #define time_before(a,b)        time_after(b,a)
1198 
1199 #define time_after_eq(a,b)      \
1200         (typecheck(unsigned long, a) && \
1201          typecheck(unsigned long, b) && \
1202          ((long)(a) - (long)(b) >= 0))
1203 #define time_before_eq(a,b)     time_after_eq(b,a)
1204 
1205 #define time_in_range(a,b,c) \
1206         (time_after_eq(a,b) && \
1207          time_before_eq(a,c))
1208 
1209 #define smp_rmb()  do {}while(0)
1210 
1211 
1212 static inline __u32 do_div64 (__u64 * n, __u64 b)
1213 {
1214     __u64 mod;
1215 
1216     mod = *n % b;
1217     *n = *n / b;
1218     return (__u32) mod;
1219 }
1220 #define do_div(n, b) do_div64(&(n), (__u64)b)
1221 
1222 #endif // _EXT2_MODULE_HEADER_
1223