xref: /reactos/drivers/filesystems/ext2/src/linux.c (revision ba3f0743)
1 /*
2  * COPYRIGHT:        See COPYRIGHT.TXT
3  * PROJECT:          Ext2 File System Driver for WinNT/2K/XP
4  * FILE:             linux.c
5  * PROGRAMMER:       Matt Wu <mattwu@163.com>
6  * HOMEPAGE:         http://www.ext2fsd.com
7  * UPDATE HISTORY:
8  */
9 
10 /* INCLUDES *****************************************************************/
11 
12 #include <ext2fs.h>
13 #include <linux/jbd.h>
14 #include <linux/errno.h>
15 
16 /* GLOBALS ***************************************************************/
17 
18 extern PEXT2_GLOBAL Ext2Global;
19 
20 /* DEFINITIONS *************************************************************/
21 
22 #ifdef ALLOC_PRAGMA
23 #pragma alloc_text(PAGE, kzalloc)
24 #endif
25 
26 struct task_struct current_task = {
27     /* pid  */ 0,
28     /* tid  */ 1,
29     /* comm */ "current\0",
30     /* journal_info */ NULL
31 };
32 struct task_struct *current = &current_task;
33 
34 void *kzalloc(int size, int flags)
35 {
36     void *buffer = kmalloc(size, flags);
37     if (buffer) {
38         memset(buffer, 0, size);
39     }
40     return buffer;
41 }
42 
43 //
44 // slab routines
45 //
46 
47 kmem_cache_t *
48 kmem_cache_create(
49     const char *    name,
50     size_t          size,
51     size_t          offset,
52     unsigned long   flags,
53     kmem_cache_cb_t ctor
54 )
55 {
56     kmem_cache_t *kc = NULL;
57 
58     kc = kmalloc(sizeof(kmem_cache_t), GFP_KERNEL);
59     if (kc == NULL) {
60         goto errorout;
61     }
62 
63     memset(kc, 0, sizeof(kmem_cache_t));
64     ExInitializeNPagedLookasideList(
65         &kc->la,
66         NULL,
67         NULL,
68         0,
69         size,
70         'JBKC',
71         0);
72 
73     kc->size = size;
74     strncpy(kc->name, name, 31);
75     kc->constructor = ctor;
76 
77 errorout:
78 
79     return kc;
80 }
81 
82 int kmem_cache_destroy(kmem_cache_t * kc)
83 {
84     ASSERT(kc != NULL);
85 
86     ExDeleteNPagedLookasideList(&(kc->la));
87     kfree(kc);
88 
89     return 0;
90 }
91 
92 void* kmem_cache_alloc(kmem_cache_t *kc, int flags)
93 {
94     PVOID  ptr = NULL;
95     ptr = ExAllocateFromNPagedLookasideList(&(kc->la));
96     if (ptr) {
97         atomic_inc(&kc->count);
98         atomic_inc(&kc->acount);
99     }
100     return ptr;
101 }
102 
103 void kmem_cache_free(kmem_cache_t *kc, void *p)
104 {
105     if (p) {
106         atomic_dec(&kc->count);
107         ExFreeToNPagedLookasideList(&(kc->la), p);
108     }
109 }
110 
111 //
112 // wait queue routines
113 //
114 
115 void init_waitqueue_head(wait_queue_head_t *q)
116 {
117     spin_lock_init(&q->lock);
118     INIT_LIST_HEAD(&q->task_list);
119 }
120 
121 struct __wait_queue *
122 wait_queue_create()
123 {
124     struct __wait_queue * wait = NULL;
125     wait = kmalloc(sizeof(struct __wait_queue), GFP_KERNEL);
126     if (!wait) {
127         return NULL;
128     }
129 
130     memset(wait, 0, sizeof(struct __wait_queue));
131     wait->flags = WQ_FLAG_AUTO_REMOVAL;
132     wait->private = (void *)KeGetCurrentThread();
133     INIT_LIST_HEAD(&wait->task_list);
134     KeInitializeEvent(&(wait->event),
135                       SynchronizationEvent,
136                       FALSE);
137 
138     return wait;
139 }
140 
141 void
142 wait_queue_destroy(struct __wait_queue * wait)
143 {
144     kfree(wait);
145 }
146 
147 static inline void __add_wait_queue(wait_queue_head_t *head, struct __wait_queue *new)
148 {
149     list_add(&new->task_list, &head->task_list);
150 }
151 
152 /*
153  * Used for wake-one threads:
154  */
155 static inline void __add_wait_queue_tail(wait_queue_head_t *head,
156         struct __wait_queue *new)
157 {
158     list_add_tail(&new->task_list, &head->task_list);
159 }
160 
161 static inline void __remove_wait_queue(wait_queue_head_t *head,
162                                        struct __wait_queue *old)
163 {
164     list_del(&old->task_list);
165 }
166 
167 void add_wait_queue(wait_queue_head_t *q, wait_queue_t *waiti)
168 {
169     unsigned long flags;
170     struct __wait_queue *wait = *waiti;
171 
172     wait->flags &= ~WQ_FLAG_EXCLUSIVE;
173     spin_lock_irqsave(&q->lock, flags);
174     __add_wait_queue(q, wait);
175     spin_unlock_irqrestore(&q->lock, flags);
176 }
177 
178 void add_wait_queue_exclusive(wait_queue_head_t *q, wait_queue_t *waiti)
179 {
180     unsigned long flags;
181     struct __wait_queue *wait = *waiti;
182 
183     wait->flags |= WQ_FLAG_EXCLUSIVE;
184     spin_lock_irqsave(&q->lock, flags);
185     __add_wait_queue_tail(q, wait);
186     spin_unlock_irqrestore(&q->lock, flags);
187 }
188 
189 void remove_wait_queue(wait_queue_head_t *q, wait_queue_t *waiti)
190 {
191     unsigned long flags;
192     struct __wait_queue *wait = *waiti;
193 
194     spin_lock_irqsave(&q->lock, flags);
195     __remove_wait_queue(q, wait);
196     spin_unlock_irqrestore(&q->lock, flags);
197 }
198 
199 /*
200  * Note: we use "set_current_state()" _after_ the wait-queue add,
201  * because we need a memory barrier there on SMP, so that any
202  * wake-function that tests for the wait-queue being active
203  * will be guaranteed to see waitqueue addition _or_ subsequent
204  * tests in this thread will see the wakeup having taken place.
205  *
206  * The spin_unlock() itself is semi-permeable and only protects
207  * one way (it only protects stuff inside the critical region and
208  * stops them from bleeding out - it would still allow subsequent
209  * loads to move into the critical region).
210  */
211 void
212 prepare_to_wait(wait_queue_head_t *q, wait_queue_t *waiti, int state)
213 {
214     unsigned long flags;
215     struct __wait_queue *wait = *waiti;
216 
217     wait->flags &= ~WQ_FLAG_EXCLUSIVE;
218     spin_lock_irqsave(&q->lock, flags);
219     if (list_empty(&wait->task_list))
220         __add_wait_queue(q, wait);
221     /*
222      * don't alter the task state if this is just going to
223      * queue an async wait queue callback
224      */
225     if (is_sync_wait(wait))
226         set_current_state(state);
227     spin_unlock_irqrestore(&q->lock, flags);
228 }
229 
230 void
231 prepare_to_wait_exclusive(wait_queue_head_t *q, wait_queue_t *waiti, int state)
232 {
233     unsigned long flags;
234     struct __wait_queue *wait = *waiti;
235 
236     wait->flags |= WQ_FLAG_EXCLUSIVE;
237     spin_lock_irqsave(&q->lock, flags);
238     if (list_empty(&wait->task_list))
239         __add_wait_queue_tail(q, wait);
240     /*
241      * don't alter the task state if this is just going to
242       * queue an async wait queue callback
243      */
244     if (is_sync_wait(wait))
245         set_current_state(state);
246     spin_unlock_irqrestore(&q->lock, flags);
247 }
248 EXPORT_SYMBOL(prepare_to_wait_exclusive);
249 
250 void finish_wait(wait_queue_head_t *q, wait_queue_t *waiti)
251 {
252     unsigned long flags;
253     struct __wait_queue *wait = *waiti;
254 
255     __set_current_state(TASK_RUNNING);
256     /*
257      * We can check for list emptiness outside the lock
258      * IFF:
259      *  - we use the "careful" check that verifies both
260      *    the next and prev pointers, so that there cannot
261      *    be any half-pending updates in progress on other
262      *    CPU's that we haven't seen yet (and that might
263      *    still change the stack area.
264      * and
265      *  - all other users take the lock (ie we can only
266      *    have _one_ other CPU that looks at or modifies
267      *    the list).
268      */
269     if (!list_empty_careful(&wait->task_list)) {
270         spin_lock_irqsave(&q->lock, flags);
271         list_del_init(&wait->task_list);
272         spin_unlock_irqrestore(&q->lock, flags);
273     }
274 
275     /* free wait */
276     wait_queue_destroy(wait);
277 }
278 
279 int wake_up(wait_queue_head_t *queue)
280 {
281     return 0; /* KeSetEvent(&wait->event, 0, FALSE); */
282 }
283 
284 
285 //
286 // kernel timer routines
287 //
288 
289 //
290 // buffer head routines
291 //
292 
293 struct _EXT2_BUFFER_HEAD {
294     kmem_cache_t *  bh_cache;
295     atomic_t        bh_count;
296     atomic_t        bh_acount;
297 } g_jbh = {NULL, ATOMIC_INIT(0)};
298 
299 int
300 ext2_init_bh()
301 {
302     g_jbh.bh_count.counter = 0;
303     g_jbh.bh_acount.counter = 0;
304     g_jbh.bh_cache = kmem_cache_create(
305                          "ext2_bh",   /* bh */
306                          sizeof(struct buffer_head),
307                          0,		        /* offset */
308                          SLAB_TEMPORARY,	/* flags */
309                          NULL);		    /* ctor */
310     if (g_jbh.bh_cache == NULL) {
311         printk(KERN_EMERG "JBD: failed to create handle cache\n");
312         return -ENOMEM;
313     }
314     return 0;
315 }
316 
317 void
318 ext2_destroy_bh()
319 {
320     if (g_jbh.bh_cache) {
321         kmem_cache_destroy(g_jbh.bh_cache);
322         g_jbh.bh_cache = NULL;
323     }
324 }
325 
326 struct buffer_head *
327 new_buffer_head()
328 {
329     struct buffer_head * bh = NULL;
330     bh = kmem_cache_alloc(g_jbh.bh_cache, GFP_NOFS);
331     if (bh) {
332         atomic_inc(&g_jbh.bh_count);
333         atomic_inc(&g_jbh.bh_acount);
334 
335         memset(bh, 0, sizeof(struct buffer_head));
336         InitializeListHead(&bh->b_link);
337         KeQuerySystemTime(&bh->b_ts_creat);
338         DEBUG(DL_BH, ("bh=%p allocated.\n", bh));
339         INC_MEM_COUNT(PS_BUFF_HEAD, bh, sizeof(struct buffer_head));
340     }
341 
342     return bh;
343 }
344 
345 void
346 free_buffer_head(struct buffer_head * bh)
347 {
348     if (bh) {
349         if (bh->b_mdl) {
350 
351             DEBUG(DL_BH, ("bh=%p mdl=%p (Flags:%xh VA:%p) released.\n", bh, bh->b_mdl,
352                           bh->b_mdl->MdlFlags, bh->b_mdl->MappedSystemVa));
353             if (IsFlagOn(bh->b_mdl->MdlFlags, MDL_MAPPED_TO_SYSTEM_VA)) {
354                 MmUnmapLockedPages(bh->b_mdl->MappedSystemVa, bh->b_mdl);
355             }
356             Ext2DestroyMdl(bh->b_mdl);
357         }
358         if (bh->b_bcb) {
359             CcUnpinDataForThread(bh->b_bcb, (ERESOURCE_THREAD)bh | 0x3);
360         }
361 
362         DEBUG(DL_BH, ("bh=%p freed.\n", bh));
363         DEC_MEM_COUNT(PS_BUFF_HEAD, bh, sizeof(struct buffer_head));
364         kmem_cache_free(g_jbh.bh_cache, bh);
365         atomic_dec(&g_jbh.bh_count);
366     }
367 }
368 
369 //
370 // Red-black tree insert routine.
371 //
372 
373 static struct buffer_head *__buffer_head_search(struct rb_root *root,
374                        sector_t blocknr)
375 {
376     struct rb_node *new = root->rb_node;
377 
378     /* Figure out where to put new node */
379     while (new) {
380         struct buffer_head *bh =
381             container_of(new, struct buffer_head, b_rb_node);
382         s64 result = blocknr - bh->b_blocknr;
383 
384         if (result < 0)
385             new = new->rb_left;
386         else if (result > 0)
387             new = new->rb_right;
388         else
389             return bh;
390 
391     }
392 
393     return NULL;
394 }
395 
396 static int buffer_head_blocknr_cmp(struct rb_node *a, struct rb_node *b)
397 {
398     struct buffer_head *a_bh, *b_bh;
399     s64 result;
400     a_bh = container_of(a, struct buffer_head, b_rb_node);
401     b_bh = container_of(b, struct buffer_head, b_rb_node);
402     result = a_bh->b_blocknr - b_bh->b_blocknr;
403 
404     if (result < 0)
405         return -1;
406     if (result > 0)
407         return 1;
408     return 0;
409 }
410 
411 static struct buffer_head *buffer_head_search(struct block_device *bdev,
412                      sector_t blocknr)
413 {
414     struct rb_root *root;
415     root = &bdev->bd_bh_root;
416     return __buffer_head_search(root, blocknr);
417 }
418 
419 static void buffer_head_insert(struct block_device *bdev, struct buffer_head *bh)
420 {
421     rb_insert(&bdev->bd_bh_root, &bh->b_rb_node, buffer_head_blocknr_cmp);
422 }
423 
424 void buffer_head_remove(struct block_device *bdev, struct buffer_head *bh)
425 {
426     rb_erase(&bh->b_rb_node, &bdev->bd_bh_root);
427 }
428 
429 struct buffer_head *
430 get_block_bh_mdl(
431     struct block_device *   bdev,
432     sector_t                block,
433     unsigned long           size,
434     int                     zero
435 )
436 {
437     PEXT2_VCB Vcb = bdev->bd_priv;
438     LARGE_INTEGER offset;
439     PVOID         bcb = NULL;
440     PVOID         ptr = NULL;
441 
442     struct list_head *entry;
443 
444     /* allocate buffer_head and initialize it */
445     struct buffer_head *bh = NULL, *tbh = NULL;
446 
447     /* check the block is valid or not */
448     if (block >= TOTAL_BLOCKS) {
449         DbgBreak();
450         goto errorout;
451     }
452 
453     /* search the bdev bh list */
454     ExAcquireSharedStarveExclusive(&bdev->bd_bh_lock, TRUE);
455     tbh = buffer_head_search(bdev, block);
456     if (tbh) {
457         bh = tbh;
458         get_bh(bh);
459         ExReleaseResourceLite(&bdev->bd_bh_lock);
460         goto errorout;
461     }
462     ExReleaseResourceLite(&bdev->bd_bh_lock);
463 
464     bh = new_buffer_head();
465     if (!bh) {
466         goto errorout;
467     }
468     bh->b_bdev = bdev;
469     bh->b_blocknr = block;
470     bh->b_size = size;
471     bh->b_data = NULL;
472 #ifdef __REACTOS__
473     InitializeListHead(&bh->b_link);
474 #endif
475 
476 again:
477 
478     offset.QuadPart = (s64) bh->b_blocknr;
479     offset.QuadPart <<= BLOCK_BITS;
480 
481     if (zero) {
482         /* PIN_EXCLUSIVE disabled, likely to deadlock with volume operations */
483         if (!CcPreparePinWrite(Vcb->Volume,
484                             &offset,
485                             bh->b_size,
486                             FALSE,
487                             PIN_WAIT /* | PIN_EXCLUSIVE */,
488                             &bcb,
489                             &ptr)) {
490             Ext2Sleep(100);
491             goto again;
492         }
493     } else {
494         if (!CcPinRead( Vcb->Volume,
495                         &offset,
496                         bh->b_size,
497                         PIN_WAIT,
498                         &bcb,
499                         &ptr)) {
500             Ext2Sleep(100);
501             goto again;
502         }
503         set_buffer_uptodate(bh);
504     }
505 
506     bh->b_mdl = Ext2CreateMdl(ptr, bh->b_size, IoModifyAccess);
507     if (bh->b_mdl) {
508         /* muse map the PTE to NonCached zone. journal recovery will
509            access the PTE under spinlock: DISPATCH_LEVEL IRQL */
510         bh->b_data = MmMapLockedPagesSpecifyCache(
511                          bh->b_mdl, KernelMode, MmNonCached,
512                          NULL,FALSE, HighPagePriority);
513         /* bh->b_data = MmMapLockedPages(bh->b_mdl, KernelMode); */
514     }
515     if (!bh->b_mdl || !bh->b_data) {
516         free_buffer_head(bh);
517         bh = NULL;
518         goto errorout;
519     }
520 
521     get_bh(bh);
522 
523     DEBUG(DL_BH, ("getblk: Vcb=%p bhcount=%u block=%u bh=%p mdl=%p (Flags:%xh VA:%p)\n",
524                   Vcb, atomic_read(&g_jbh.bh_count), block, bh, bh->b_mdl, bh->b_mdl->MdlFlags, bh->b_data));
525 
526     ExAcquireResourceExclusiveLite(&bdev->bd_bh_lock, TRUE);
527     /* do search again here */
528     tbh = buffer_head_search(bdev, block);
529     if (tbh) {
530         free_buffer_head(bh);
531         bh = tbh;
532         get_bh(bh);
533         RemoveEntryList(&bh->b_link);
534         InitializeListHead(&bh->b_link);
535         ExReleaseResourceLite(&bdev->bd_bh_lock);
536         goto errorout;
537     } else {
538         buffer_head_insert(bdev, bh);
539     }
540     ExReleaseResourceLite(&bdev->bd_bh_lock);
541 
542     /* we get it */
543 errorout:
544 
545     if (bcb)
546         CcUnpinData(bcb);
547 
548     return bh;
549 }
550 
551 int submit_bh_mdl(int rw, struct buffer_head *bh)
552 {
553     struct block_device *bdev = bh->b_bdev;
554     PEXT2_VCB            Vcb  = bdev->bd_priv;
555     PBCB                 Bcb;
556     PVOID                Buffer;
557     LARGE_INTEGER        Offset;
558 
559     ASSERT(Vcb->Identifier.Type == EXT2VCB);
560     ASSERT(bh->b_data);
561 
562     if (rw == WRITE) {
563 
564         if (IsVcbReadOnly(Vcb)) {
565             goto errorout;
566         }
567 
568         SetFlag(Vcb->Volume->Flags, FO_FILE_MODIFIED);
569         Offset.QuadPart = ((LONGLONG)bh->b_blocknr) << BLOCK_BITS;
570 
571         /* PIN_EXCLUSIVE disabled, likely to deadlock with volume operations */
572         if (CcPreparePinWrite(
573                     Vcb->Volume,
574                     &Offset,
575                     BLOCK_SIZE,
576                     FALSE,
577                     PIN_WAIT /* | PIN_EXCLUSIVE */,
578                     &Bcb,
579                     &Buffer )) {
580 #if 0
581             if (memcmp(Buffer, bh->b_data, BLOCK_SIZE) != 0) {
582                 DbgBreak();
583             }
584             memmove(Buffer, bh->b_data, BLOCK_SIZE);
585 #endif
586             CcSetDirtyPinnedData(Bcb, NULL);
587             Ext2AddBlockExtent( Vcb, NULL,
588                                 (ULONG)bh->b_blocknr,
589                                 (ULONG)bh->b_blocknr,
590                                 (bh->b_size >> BLOCK_BITS));
591             CcUnpinData(Bcb);
592         } else {
593 
594             Ext2AddBlockExtent( Vcb, NULL,
595                                 (ULONG)bh->b_blocknr,
596                                 (ULONG)bh->b_blocknr,
597                                 (bh->b_size >> BLOCK_BITS));
598         }
599 
600     } else {
601     }
602 
603 errorout:
604 
605     unlock_buffer(bh);
606     put_bh(bh);
607     return 0;
608 }
609 
610 struct buffer_head *
611 get_block_bh_pin(
612     struct block_device *   bdev,
613     sector_t                block,
614     unsigned long           size,
615     int                     zero
616 )
617 {
618     PEXT2_VCB Vcb = bdev->bd_priv;
619     LARGE_INTEGER offset;
620 
621     struct list_head *entry;
622 
623     /* allocate buffer_head and initialize it */
624     struct buffer_head *bh = NULL, *tbh = NULL;
625 
626     /* check the block is valid or not */
627     if (block >= TOTAL_BLOCKS) {
628         DbgBreak();
629         goto errorout;
630     }
631 
632     /* search the bdev bh list */
633     ExAcquireSharedStarveExclusive(&bdev->bd_bh_lock, TRUE);
634     tbh = buffer_head_search(bdev, block);
635     if (tbh) {
636         bh = tbh;
637         get_bh(bh);
638         ExReleaseResourceLite(&bdev->bd_bh_lock);
639         goto errorout;
640     }
641     ExReleaseResourceLite(&bdev->bd_bh_lock);
642 
643     bh = new_buffer_head();
644     if (!bh) {
645         goto errorout;
646     }
647     bh->b_bdev = bdev;
648     bh->b_blocknr = block;
649     bh->b_size = size;
650     bh->b_data = NULL;
651 #ifdef __REACTOS__
652     InitializeListHead(&bh->b_link);
653 #endif
654 
655 again:
656 
657     offset.QuadPart = (s64) bh->b_blocknr;
658     offset.QuadPart <<= BLOCK_BITS;
659 
660     if (zero) {
661         if (!CcPreparePinWrite(Vcb->Volume,
662                             &offset,
663                             bh->b_size,
664                             FALSE,
665                             PIN_WAIT,
666                             &bh->b_bcb,
667                             (PVOID *)&bh->b_data)) {
668             Ext2Sleep(100);
669             goto again;
670         }
671     } else {
672         if (!CcPinRead( Vcb->Volume,
673                         &offset,
674                         bh->b_size,
675                         PIN_WAIT,
676                         &bh->b_bcb,
677                         (PVOID *)&bh->b_data)) {
678             Ext2Sleep(100);
679             goto again;
680         }
681         set_buffer_uptodate(bh);
682     }
683 
684     if (bh->b_bcb)
685         CcSetBcbOwnerPointer(bh->b_bcb, (PVOID)((ERESOURCE_THREAD)bh | 0x3));
686 
687     if (!bh->b_data) {
688         free_buffer_head(bh);
689         bh = NULL;
690         goto errorout;
691     }
692     get_bh(bh);
693 
694     DEBUG(DL_BH, ("getblk: Vcb=%p bhcount=%u block=%u bh=%p ptr=%p.\n",
695                   Vcb, atomic_read(&g_jbh.bh_count), block, bh, bh->b_data));
696 
697     ExAcquireResourceExclusiveLite(&bdev->bd_bh_lock, TRUE);
698     /* do search again here */
699     tbh = buffer_head_search(bdev, block);
700     if (tbh) {
701         get_bh(tbh);
702         free_buffer_head(bh);
703         bh = tbh;
704         RemoveEntryList(&bh->b_link);
705         InitializeListHead(&bh->b_link);
706         ExReleaseResourceLite(&bdev->bd_bh_lock);
707         goto errorout;
708     } else {
709         buffer_head_insert(bdev, bh);
710     }
711     ExReleaseResourceLite(&bdev->bd_bh_lock);
712 
713     /* we get it */
714 errorout:
715 
716     return bh;
717 }
718 
719 int submit_bh_pin(int rw, struct buffer_head *bh)
720 {
721     struct block_device *bdev = bh->b_bdev;
722     PEXT2_VCB            Vcb  = bdev->bd_priv;
723     PVOID                Buffer;
724     LARGE_INTEGER        Offset;
725 
726     ASSERT(Vcb->Identifier.Type == EXT2VCB);
727     ASSERT(bh->b_data && bh->b_bcb);
728 
729     if (rw == WRITE) {
730 
731         if (IsVcbReadOnly(Vcb)) {
732             goto errorout;
733         }
734 
735         SetFlag(Vcb->Volume->Flags, FO_FILE_MODIFIED);
736         Offset.QuadPart = ((LONGLONG)bh->b_blocknr) << BLOCK_BITS;
737 
738         CcSetDirtyPinnedData(bh->b_bcb, NULL);
739         Ext2AddBlockExtent( Vcb, NULL,
740                             (ULONG)bh->b_blocknr,
741                             (ULONG)bh->b_blocknr,
742                             (bh->b_size >> BLOCK_BITS));
743     } else {
744     }
745 
746 errorout:
747 
748     unlock_buffer(bh);
749     put_bh(bh);
750     return 0;
751 }
752 
753 #if 0
754 
755 struct buffer_head *
756 get_block_bh(
757     struct block_device *   bdev,
758     sector_t                block,
759     unsigned long           size,
760     int                     zero
761 )
762 {
763     return get_block_bh_mdl(bdev, block, size, zero);
764 }
765 
766 int submit_bh(int rw, struct buffer_head *bh)
767 {
768     return submit_bh_mdl(rw, bh);
769 }
770 
771 #else
772 
773 struct buffer_head *
774 get_block_bh(
775     struct block_device *   bdev,
776     sector_t                block,
777     unsigned long           size,
778     int                     zero
779 )
780 {
781     return get_block_bh_pin(bdev, block, size, zero);
782 }
783 
784 int submit_bh(int rw, struct buffer_head *bh)
785 {
786     return submit_bh_pin(rw, bh);
787 }
788 #endif
789 
790 struct buffer_head *
791 __getblk(
792     struct block_device *   bdev,
793     sector_t                block,
794     unsigned long           size
795 )
796 {
797     return get_block_bh(bdev, block, size, 0);
798 }
799 
800 void __brelse(struct buffer_head *bh)
801 {
802     struct block_device *bdev = bh->b_bdev;
803     PEXT2_VCB Vcb = (PEXT2_VCB)bdev->bd_priv;
804 
805     ASSERT(Vcb->Identifier.Type == EXT2VCB);
806 
807     /* write data in case it's dirty */
808     while (buffer_dirty(bh)) {
809         ll_rw_block(WRITE, 1, &bh);
810     }
811 
812     ExAcquireResourceExclusiveLite(&bdev->bd_bh_lock, TRUE);
813     if (atomic_dec_and_test(&bh->b_count)) {
814         ASSERT(0 == atomic_read(&bh->b_count));
815     } else {
816         ExReleaseResourceLite(&bdev->bd_bh_lock);
817         return;
818     }
819     KeQuerySystemTime(&bh->b_ts_drop);
820 #ifdef __REACTOS__
821     if (!IsListEmpty(&bh->b_link))
822 #endif
823     RemoveEntryList(&bh->b_link);
824     InsertTailList(&Vcb->bd.bd_bh_free, &bh->b_link);
825     KeClearEvent(&Vcb->bd.bd_bh_notify);
826     ExReleaseResourceLite(&bdev->bd_bh_lock);
827     KeSetEvent(&Ext2Global->bhReaper.Wait, 0, FALSE);
828 
829     DEBUG(DL_BH, ("brelse: cnt=%u size=%u blk=%10.10xh bh=%p ptr=%p\n",
830                   atomic_read(&g_jbh.bh_count) - 1, bh->b_size,
831                   bh->b_blocknr, bh, bh->b_data ));
832 }
833 
834 
835 void __bforget(struct buffer_head *bh)
836 {
837     clear_buffer_dirty(bh);
838     __brelse(bh);
839 }
840 
841 void __lock_buffer(struct buffer_head *bh)
842 {
843 }
844 
845 void unlock_buffer(struct buffer_head *bh)
846 {
847     clear_buffer_locked(bh);
848 }
849 
850 void __wait_on_buffer(struct buffer_head *bh)
851 {
852 }
853 
854 void ll_rw_block(int rw, int nr, struct buffer_head * bhs[])
855 {
856     int i;
857 
858     for (i = 0; i < nr; i++) {
859 
860         struct buffer_head *bh = bhs[i];
861 
862         if (rw == SWRITE)
863             lock_buffer(bh);
864         else if (test_set_buffer_locked(bh))
865             continue;
866 
867         if (rw == WRITE || rw == SWRITE) {
868             if (test_clear_buffer_dirty(bh)) {
869                 get_bh(bh);
870                 submit_bh(WRITE, bh);
871                 continue;
872             }
873         } else {
874             if (!buffer_uptodate(bh)) {
875                 get_bh(bh);
876                 submit_bh(rw, bh);
877                 continue;
878             }
879         }
880         unlock_buffer(bh);
881     }
882 }
883 
884 int bh_submit_read(struct buffer_head *bh)
885 {
886 	ll_rw_block(READ, 1, &bh);
887     return 0;
888 }
889 
890 int sync_dirty_buffer(struct buffer_head *bh)
891 {
892     int ret = 0;
893 
894     ASSERT(atomic_read(&bh->b_count) <= 1);
895     lock_buffer(bh);
896     if (test_clear_buffer_dirty(bh)) {
897         get_bh(bh);
898         ret = submit_bh(WRITE, bh);
899         wait_on_buffer(bh);
900     } else {
901         unlock_buffer(bh);
902     }
903     return ret;
904 }
905 
906 void mark_buffer_dirty(struct buffer_head *bh)
907 {
908     set_buffer_dirty(bh);
909 }
910 
911 int sync_blockdev(struct block_device *bdev)
912 {
913     PEXT2_VCB Vcb = (PEXT2_VCB) bdev->bd_priv;
914     Ext2FlushVolume(NULL, Vcb, FALSE);
915     return 0;
916 }
917 
918 /*
919  * Perform a pagecache lookup for the matching buffer.  If it's there, refre
920  * it in the LRU and mark it as accessed.  If it is not present then return
921  * NULL
922  */
923 struct buffer_head *
924 __find_get_block(struct block_device *bdev, sector_t block, unsigned long size)
925 {
926     return __getblk(bdev, block, size);
927 }
928 
929 
930 //
931 // inode block mapping
932 //
933 
934 ULONGLONG bmap(struct inode *i, ULONGLONG b)
935 {
936     ULONGLONG lcn = 0;
937     struct super_block *s = i->i_sb;
938 
939     PEXT2_MCB  Mcb = (PEXT2_MCB)i->i_priv;
940     PEXT2_VCB  Vcb = (PEXT2_VCB)s->s_priv;
941     PEXT2_EXTENT extent = NULL;
942     ULONGLONG  offset = (ULONGLONG)b;
943     NTSTATUS   status;
944 
945     if (!Mcb || !Vcb) {
946         goto errorout;
947     }
948 
949     offset <<= BLOCK_BITS;
950     status = Ext2BuildExtents(
951                  NULL,
952                  Vcb,
953                  Mcb,
954                  offset,
955                  BLOCK_SIZE,
956                  FALSE,
957                  &extent
958              );
959 
960     if (!NT_SUCCESS(status)) {
961         goto errorout;
962     }
963 
964     if (extent == NULL) {
965         goto errorout;
966     }
967 
968     lcn = (unsigned long)(extent->Lba >> BLOCK_BITS);
969 
970 errorout:
971 
972     if (extent) {
973         Ext2FreeExtent(extent);
974     }
975 
976     return lcn;
977 }
978 
979 void iget(struct inode *inode)
980 {
981     atomic_inc(&inode->i_count);
982 }
983 
984 void iput(struct inode *inode)
985 {
986     if (atomic_dec_and_test(&inode->i_count)) {
987         kfree(inode);
988     }
989 }
990 
991 //
992 // initialzer and destructor
993 //
994 
995 int
996 ext2_init_linux()
997 {
998     int rc = 0;
999 
1000     rc = ext2_init_bh();
1001     if (rc != 0) {
1002         goto errorout;
1003     }
1004 
1005 errorout:
1006 
1007     return rc;
1008 }
1009 
1010 void
1011 ext2_destroy_linux()
1012 {
1013     ext2_destroy_bh();
1014 }
1015