1 /*
2 * COPYRIGHT: See COPYRIGHT.TXT
3 * PROJECT: Ext2 File System Driver for WinNT/2K/XP
4 * FILE: linux.c
5 * PROGRAMMER: Matt Wu <mattwu@163.com>
6 * HOMEPAGE: http://www.ext2fsd.com
7 * UPDATE HISTORY:
8 */
9
10 /* INCLUDES *****************************************************************/
11
12 #include <ext2fs.h>
13 #include <linux/jbd.h>
14 #include <linux/errno.h>
15
16 /* GLOBALS ***************************************************************/
17
18 extern PEXT2_GLOBAL Ext2Global;
19
20 /* DEFINITIONS *************************************************************/
21
22 #ifdef ALLOC_PRAGMA
23 #pragma alloc_text(PAGE, kzalloc)
24 #endif
25
26 struct task_struct current_task = {
27 /* pid */ 0,
28 /* tid */ 1,
29 /* comm */ "current\0",
30 /* journal_info */ NULL
31 };
32 struct task_struct *current = ¤t_task;
33
kzalloc(int size,int flags)34 void *kzalloc(int size, int flags)
35 {
36 void *buffer = kmalloc(size, flags);
37 if (buffer) {
38 memset(buffer, 0, size);
39 }
40 return buffer;
41 }
42
43 //
44 // slab routines
45 //
46
47 kmem_cache_t *
kmem_cache_create(const char * name,size_t size,size_t offset,unsigned long flags,kmem_cache_cb_t ctor)48 kmem_cache_create(
49 const char * name,
50 size_t size,
51 size_t offset,
52 unsigned long flags,
53 kmem_cache_cb_t ctor
54 )
55 {
56 kmem_cache_t *kc = NULL;
57
58 kc = kmalloc(sizeof(kmem_cache_t), GFP_KERNEL);
59 if (kc == NULL) {
60 goto errorout;
61 }
62
63 memset(kc, 0, sizeof(kmem_cache_t));
64 ExInitializeNPagedLookasideList(
65 &kc->la,
66 NULL,
67 NULL,
68 0,
69 size,
70 'JBKC',
71 0);
72
73 kc->size = size;
74 strncpy(kc->name, name, 31);
75 kc->constructor = ctor;
76
77 errorout:
78
79 return kc;
80 }
81
kmem_cache_destroy(kmem_cache_t * kc)82 int kmem_cache_destroy(kmem_cache_t * kc)
83 {
84 ASSERT(kc != NULL);
85
86 ExDeleteNPagedLookasideList(&(kc->la));
87 kfree(kc);
88
89 return 0;
90 }
91
kmem_cache_alloc(kmem_cache_t * kc,int flags)92 void* kmem_cache_alloc(kmem_cache_t *kc, int flags)
93 {
94 PVOID ptr = NULL;
95 ptr = ExAllocateFromNPagedLookasideList(&(kc->la));
96 if (ptr) {
97 atomic_inc(&kc->count);
98 atomic_inc(&kc->acount);
99 }
100 return ptr;
101 }
102
kmem_cache_free(kmem_cache_t * kc,void * p)103 void kmem_cache_free(kmem_cache_t *kc, void *p)
104 {
105 if (p) {
106 atomic_dec(&kc->count);
107 ExFreeToNPagedLookasideList(&(kc->la), p);
108 }
109 }
110
111 //
112 // wait queue routines
113 //
114
init_waitqueue_head(wait_queue_head_t * q)115 void init_waitqueue_head(wait_queue_head_t *q)
116 {
117 spin_lock_init(&q->lock);
118 INIT_LIST_HEAD(&q->task_list);
119 }
120
121 struct __wait_queue *
wait_queue_create()122 wait_queue_create()
123 {
124 struct __wait_queue * wait = NULL;
125 wait = kmalloc(sizeof(struct __wait_queue), GFP_KERNEL);
126 if (!wait) {
127 return NULL;
128 }
129
130 memset(wait, 0, sizeof(struct __wait_queue));
131 wait->flags = WQ_FLAG_AUTO_REMOVAL;
132 wait->private = (void *)KeGetCurrentThread();
133 INIT_LIST_HEAD(&wait->task_list);
134 KeInitializeEvent(&(wait->event),
135 SynchronizationEvent,
136 FALSE);
137
138 return wait;
139 }
140
141 void
wait_queue_destroy(struct __wait_queue * wait)142 wait_queue_destroy(struct __wait_queue * wait)
143 {
144 kfree(wait);
145 }
146
__add_wait_queue(wait_queue_head_t * head,struct __wait_queue * new)147 static inline void __add_wait_queue(wait_queue_head_t *head, struct __wait_queue *new)
148 {
149 list_add(&new->task_list, &head->task_list);
150 }
151
152 /*
153 * Used for wake-one threads:
154 */
__add_wait_queue_tail(wait_queue_head_t * head,struct __wait_queue * new)155 static inline void __add_wait_queue_tail(wait_queue_head_t *head,
156 struct __wait_queue *new)
157 {
158 list_add_tail(&new->task_list, &head->task_list);
159 }
160
__remove_wait_queue(wait_queue_head_t * head,struct __wait_queue * old)161 static inline void __remove_wait_queue(wait_queue_head_t *head,
162 struct __wait_queue *old)
163 {
164 list_del(&old->task_list);
165 }
166
add_wait_queue(wait_queue_head_t * q,wait_queue_t * waiti)167 void add_wait_queue(wait_queue_head_t *q, wait_queue_t *waiti)
168 {
169 unsigned long flags;
170 struct __wait_queue *wait = *waiti;
171
172 wait->flags &= ~WQ_FLAG_EXCLUSIVE;
173 spin_lock_irqsave(&q->lock, flags);
174 __add_wait_queue(q, wait);
175 spin_unlock_irqrestore(&q->lock, flags);
176 }
177
add_wait_queue_exclusive(wait_queue_head_t * q,wait_queue_t * waiti)178 void add_wait_queue_exclusive(wait_queue_head_t *q, wait_queue_t *waiti)
179 {
180 unsigned long flags;
181 struct __wait_queue *wait = *waiti;
182
183 wait->flags |= WQ_FLAG_EXCLUSIVE;
184 spin_lock_irqsave(&q->lock, flags);
185 __add_wait_queue_tail(q, wait);
186 spin_unlock_irqrestore(&q->lock, flags);
187 }
188
remove_wait_queue(wait_queue_head_t * q,wait_queue_t * waiti)189 void remove_wait_queue(wait_queue_head_t *q, wait_queue_t *waiti)
190 {
191 unsigned long flags;
192 struct __wait_queue *wait = *waiti;
193
194 spin_lock_irqsave(&q->lock, flags);
195 __remove_wait_queue(q, wait);
196 spin_unlock_irqrestore(&q->lock, flags);
197 }
198
199 /*
200 * Note: we use "set_current_state()" _after_ the wait-queue add,
201 * because we need a memory barrier there on SMP, so that any
202 * wake-function that tests for the wait-queue being active
203 * will be guaranteed to see waitqueue addition _or_ subsequent
204 * tests in this thread will see the wakeup having taken place.
205 *
206 * The spin_unlock() itself is semi-permeable and only protects
207 * one way (it only protects stuff inside the critical region and
208 * stops them from bleeding out - it would still allow subsequent
209 * loads to move into the critical region).
210 */
211 void
prepare_to_wait(wait_queue_head_t * q,wait_queue_t * waiti,int state)212 prepare_to_wait(wait_queue_head_t *q, wait_queue_t *waiti, int state)
213 {
214 unsigned long flags;
215 struct __wait_queue *wait = *waiti;
216
217 wait->flags &= ~WQ_FLAG_EXCLUSIVE;
218 spin_lock_irqsave(&q->lock, flags);
219 if (list_empty(&wait->task_list))
220 __add_wait_queue(q, wait);
221 /*
222 * don't alter the task state if this is just going to
223 * queue an async wait queue callback
224 */
225 if (is_sync_wait(wait))
226 set_current_state(state);
227 spin_unlock_irqrestore(&q->lock, flags);
228 }
229
230 void
prepare_to_wait_exclusive(wait_queue_head_t * q,wait_queue_t * waiti,int state)231 prepare_to_wait_exclusive(wait_queue_head_t *q, wait_queue_t *waiti, int state)
232 {
233 unsigned long flags;
234 struct __wait_queue *wait = *waiti;
235
236 wait->flags |= WQ_FLAG_EXCLUSIVE;
237 spin_lock_irqsave(&q->lock, flags);
238 if (list_empty(&wait->task_list))
239 __add_wait_queue_tail(q, wait);
240 /*
241 * don't alter the task state if this is just going to
242 * queue an async wait queue callback
243 */
244 if (is_sync_wait(wait))
245 set_current_state(state);
246 spin_unlock_irqrestore(&q->lock, flags);
247 }
248 EXPORT_SYMBOL(prepare_to_wait_exclusive);
249
finish_wait(wait_queue_head_t * q,wait_queue_t * waiti)250 void finish_wait(wait_queue_head_t *q, wait_queue_t *waiti)
251 {
252 unsigned long flags;
253 struct __wait_queue *wait = *waiti;
254
255 __set_current_state(TASK_RUNNING);
256 /*
257 * We can check for list emptiness outside the lock
258 * IFF:
259 * - we use the "careful" check that verifies both
260 * the next and prev pointers, so that there cannot
261 * be any half-pending updates in progress on other
262 * CPU's that we haven't seen yet (and that might
263 * still change the stack area.
264 * and
265 * - all other users take the lock (ie we can only
266 * have _one_ other CPU that looks at or modifies
267 * the list).
268 */
269 if (!list_empty_careful(&wait->task_list)) {
270 spin_lock_irqsave(&q->lock, flags);
271 list_del_init(&wait->task_list);
272 spin_unlock_irqrestore(&q->lock, flags);
273 }
274
275 /* free wait */
276 wait_queue_destroy(wait);
277 }
278
wake_up(wait_queue_head_t * queue)279 int wake_up(wait_queue_head_t *queue)
280 {
281 return 0; /* KeSetEvent(&wait->event, 0, FALSE); */
282 }
283
284
285 //
286 // kernel timer routines
287 //
288
289 //
290 // buffer head routines
291 //
292
293 struct _EXT2_BUFFER_HEAD {
294 kmem_cache_t * bh_cache;
295 atomic_t bh_count;
296 atomic_t bh_acount;
297 } g_jbh = {NULL, ATOMIC_INIT(0)};
298
299 int
ext2_init_bh()300 ext2_init_bh()
301 {
302 g_jbh.bh_count.counter = 0;
303 g_jbh.bh_acount.counter = 0;
304 g_jbh.bh_cache = kmem_cache_create(
305 "ext2_bh", /* bh */
306 sizeof(struct buffer_head),
307 0, /* offset */
308 SLAB_TEMPORARY, /* flags */
309 NULL); /* ctor */
310 if (g_jbh.bh_cache == NULL) {
311 printk(KERN_EMERG "JBD: failed to create handle cache\n");
312 return -ENOMEM;
313 }
314 return 0;
315 }
316
317 void
ext2_destroy_bh()318 ext2_destroy_bh()
319 {
320 if (g_jbh.bh_cache) {
321 kmem_cache_destroy(g_jbh.bh_cache);
322 g_jbh.bh_cache = NULL;
323 }
324 }
325
326 struct buffer_head *
new_buffer_head()327 new_buffer_head()
328 {
329 struct buffer_head * bh = NULL;
330 bh = kmem_cache_alloc(g_jbh.bh_cache, GFP_NOFS);
331 if (bh) {
332 atomic_inc(&g_jbh.bh_count);
333 atomic_inc(&g_jbh.bh_acount);
334
335 memset(bh, 0, sizeof(struct buffer_head));
336 InitializeListHead(&bh->b_link);
337 KeQuerySystemTime(&bh->b_ts_creat);
338 DEBUG(DL_BH, ("bh=%p allocated.\n", bh));
339 INC_MEM_COUNT(PS_BUFF_HEAD, bh, sizeof(struct buffer_head));
340 }
341
342 return bh;
343 }
344
345 void
free_buffer_head(struct buffer_head * bh)346 free_buffer_head(struct buffer_head * bh)
347 {
348 if (bh) {
349 if (bh->b_mdl) {
350
351 DEBUG(DL_BH, ("bh=%p mdl=%p (Flags:%xh VA:%p) released.\n", bh, bh->b_mdl,
352 bh->b_mdl->MdlFlags, bh->b_mdl->MappedSystemVa));
353 if (IsFlagOn(bh->b_mdl->MdlFlags, MDL_MAPPED_TO_SYSTEM_VA)) {
354 MmUnmapLockedPages(bh->b_mdl->MappedSystemVa, bh->b_mdl);
355 }
356 Ext2DestroyMdl(bh->b_mdl);
357 }
358 if (bh->b_bcb) {
359 CcUnpinDataForThread(bh->b_bcb, (ERESOURCE_THREAD)bh | 0x3);
360 }
361
362 DEBUG(DL_BH, ("bh=%p freed.\n", bh));
363 DEC_MEM_COUNT(PS_BUFF_HEAD, bh, sizeof(struct buffer_head));
364 kmem_cache_free(g_jbh.bh_cache, bh);
365 atomic_dec(&g_jbh.bh_count);
366 }
367 }
368
369 //
370 // Red-black tree insert routine.
371 //
372
__buffer_head_search(struct rb_root * root,sector_t blocknr)373 static struct buffer_head *__buffer_head_search(struct rb_root *root,
374 sector_t blocknr)
375 {
376 struct rb_node *new = root->rb_node;
377
378 /* Figure out where to put new node */
379 while (new) {
380 struct buffer_head *bh =
381 container_of(new, struct buffer_head, b_rb_node);
382 s64 result = blocknr - bh->b_blocknr;
383
384 if (result < 0)
385 new = new->rb_left;
386 else if (result > 0)
387 new = new->rb_right;
388 else
389 return bh;
390
391 }
392
393 return NULL;
394 }
395
buffer_head_blocknr_cmp(struct rb_node * a,struct rb_node * b)396 static int buffer_head_blocknr_cmp(struct rb_node *a, struct rb_node *b)
397 {
398 struct buffer_head *a_bh, *b_bh;
399 s64 result;
400 a_bh = container_of(a, struct buffer_head, b_rb_node);
401 b_bh = container_of(b, struct buffer_head, b_rb_node);
402 result = a_bh->b_blocknr - b_bh->b_blocknr;
403
404 if (result < 0)
405 return -1;
406 if (result > 0)
407 return 1;
408 return 0;
409 }
410
buffer_head_search(struct block_device * bdev,sector_t blocknr)411 static struct buffer_head *buffer_head_search(struct block_device *bdev,
412 sector_t blocknr)
413 {
414 struct rb_root *root;
415 root = &bdev->bd_bh_root;
416 return __buffer_head_search(root, blocknr);
417 }
418
buffer_head_insert(struct block_device * bdev,struct buffer_head * bh)419 static void buffer_head_insert(struct block_device *bdev, struct buffer_head *bh)
420 {
421 rb_insert(&bdev->bd_bh_root, &bh->b_rb_node, buffer_head_blocknr_cmp);
422 }
423
buffer_head_remove(struct block_device * bdev,struct buffer_head * bh)424 void buffer_head_remove(struct block_device *bdev, struct buffer_head *bh)
425 {
426 rb_erase(&bh->b_rb_node, &bdev->bd_bh_root);
427 }
428
429 struct buffer_head *
get_block_bh_mdl(struct block_device * bdev,sector_t block,unsigned long size,int zero)430 get_block_bh_mdl(
431 struct block_device * bdev,
432 sector_t block,
433 unsigned long size,
434 int zero
435 )
436 {
437 PEXT2_VCB Vcb = bdev->bd_priv;
438 LARGE_INTEGER offset;
439 PVOID bcb = NULL;
440 PVOID ptr = NULL;
441
442 struct list_head *entry;
443
444 /* allocate buffer_head and initialize it */
445 struct buffer_head *bh = NULL, *tbh = NULL;
446
447 /* check the block is valid or not */
448 if (block >= TOTAL_BLOCKS) {
449 DbgBreak();
450 goto errorout;
451 }
452
453 /* search the bdev bh list */
454 ExAcquireSharedStarveExclusive(&bdev->bd_bh_lock, TRUE);
455 tbh = buffer_head_search(bdev, block);
456 if (tbh) {
457 bh = tbh;
458 get_bh(bh);
459 ExReleaseResourceLite(&bdev->bd_bh_lock);
460 goto errorout;
461 }
462 ExReleaseResourceLite(&bdev->bd_bh_lock);
463
464 bh = new_buffer_head();
465 if (!bh) {
466 goto errorout;
467 }
468 bh->b_bdev = bdev;
469 bh->b_blocknr = block;
470 bh->b_size = size;
471 bh->b_data = NULL;
472 #ifdef __REACTOS__
473 InitializeListHead(&bh->b_link);
474 #endif
475
476 again:
477
478 offset.QuadPart = (s64) bh->b_blocknr;
479 offset.QuadPart <<= BLOCK_BITS;
480
481 if (zero) {
482 /* PIN_EXCLUSIVE disabled, likely to deadlock with volume operations */
483 if (!CcPreparePinWrite(Vcb->Volume,
484 &offset,
485 bh->b_size,
486 FALSE,
487 PIN_WAIT /* | PIN_EXCLUSIVE */,
488 &bcb,
489 &ptr)) {
490 Ext2Sleep(100);
491 goto again;
492 }
493 } else {
494 if (!CcPinRead( Vcb->Volume,
495 &offset,
496 bh->b_size,
497 PIN_WAIT,
498 &bcb,
499 &ptr)) {
500 Ext2Sleep(100);
501 goto again;
502 }
503 set_buffer_uptodate(bh);
504 }
505
506 bh->b_mdl = Ext2CreateMdl(ptr, bh->b_size, IoModifyAccess);
507 if (bh->b_mdl) {
508 /* muse map the PTE to NonCached zone. journal recovery will
509 access the PTE under spinlock: DISPATCH_LEVEL IRQL */
510 bh->b_data = MmMapLockedPagesSpecifyCache(
511 bh->b_mdl, KernelMode, MmNonCached,
512 NULL,FALSE, HighPagePriority);
513 /* bh->b_data = MmMapLockedPages(bh->b_mdl, KernelMode); */
514 }
515 if (!bh->b_mdl || !bh->b_data) {
516 free_buffer_head(bh);
517 bh = NULL;
518 goto errorout;
519 }
520
521 get_bh(bh);
522
523 DEBUG(DL_BH, ("getblk: Vcb=%p bhcount=%u block=%u bh=%p mdl=%p (Flags:%xh VA:%p)\n",
524 Vcb, atomic_read(&g_jbh.bh_count), block, bh, bh->b_mdl, bh->b_mdl->MdlFlags, bh->b_data));
525
526 ExAcquireResourceExclusiveLite(&bdev->bd_bh_lock, TRUE);
527 /* do search again here */
528 tbh = buffer_head_search(bdev, block);
529 if (tbh) {
530 free_buffer_head(bh);
531 bh = tbh;
532 get_bh(bh);
533 RemoveEntryList(&bh->b_link);
534 InitializeListHead(&bh->b_link);
535 ExReleaseResourceLite(&bdev->bd_bh_lock);
536 goto errorout;
537 } else {
538 buffer_head_insert(bdev, bh);
539 }
540 ExReleaseResourceLite(&bdev->bd_bh_lock);
541
542 /* we get it */
543 errorout:
544
545 if (bcb)
546 CcUnpinData(bcb);
547
548 return bh;
549 }
550
submit_bh_mdl(int rw,struct buffer_head * bh)551 int submit_bh_mdl(int rw, struct buffer_head *bh)
552 {
553 struct block_device *bdev = bh->b_bdev;
554 PEXT2_VCB Vcb = bdev->bd_priv;
555 PBCB Bcb;
556 PVOID Buffer;
557 LARGE_INTEGER Offset;
558
559 ASSERT(Vcb->Identifier.Type == EXT2VCB);
560 ASSERT(bh->b_data);
561
562 if (rw == WRITE) {
563
564 if (IsVcbReadOnly(Vcb)) {
565 goto errorout;
566 }
567
568 SetFlag(Vcb->Volume->Flags, FO_FILE_MODIFIED);
569 Offset.QuadPart = ((LONGLONG)bh->b_blocknr) << BLOCK_BITS;
570
571 /* PIN_EXCLUSIVE disabled, likely to deadlock with volume operations */
572 if (CcPreparePinWrite(
573 Vcb->Volume,
574 &Offset,
575 BLOCK_SIZE,
576 FALSE,
577 PIN_WAIT /* | PIN_EXCLUSIVE */,
578 &Bcb,
579 &Buffer )) {
580 #if 0
581 if (memcmp(Buffer, bh->b_data, BLOCK_SIZE) != 0) {
582 DbgBreak();
583 }
584 memmove(Buffer, bh->b_data, BLOCK_SIZE);
585 #endif
586 CcSetDirtyPinnedData(Bcb, NULL);
587 Ext2AddBlockExtent( Vcb, NULL,
588 (ULONG)bh->b_blocknr,
589 (ULONG)bh->b_blocknr,
590 (bh->b_size >> BLOCK_BITS));
591 CcUnpinData(Bcb);
592 } else {
593
594 Ext2AddBlockExtent( Vcb, NULL,
595 (ULONG)bh->b_blocknr,
596 (ULONG)bh->b_blocknr,
597 (bh->b_size >> BLOCK_BITS));
598 }
599
600 } else {
601 }
602
603 errorout:
604
605 unlock_buffer(bh);
606 put_bh(bh);
607 return 0;
608 }
609
610 struct buffer_head *
get_block_bh_pin(struct block_device * bdev,sector_t block,unsigned long size,int zero)611 get_block_bh_pin(
612 struct block_device * bdev,
613 sector_t block,
614 unsigned long size,
615 int zero
616 )
617 {
618 PEXT2_VCB Vcb = bdev->bd_priv;
619 LARGE_INTEGER offset;
620
621 struct list_head *entry;
622
623 /* allocate buffer_head and initialize it */
624 struct buffer_head *bh = NULL, *tbh = NULL;
625
626 /* check the block is valid or not */
627 if (block >= TOTAL_BLOCKS) {
628 DbgBreak();
629 goto errorout;
630 }
631
632 /* search the bdev bh list */
633 ExAcquireSharedStarveExclusive(&bdev->bd_bh_lock, TRUE);
634 tbh = buffer_head_search(bdev, block);
635 if (tbh) {
636 bh = tbh;
637 get_bh(bh);
638 ExReleaseResourceLite(&bdev->bd_bh_lock);
639 goto errorout;
640 }
641 ExReleaseResourceLite(&bdev->bd_bh_lock);
642
643 bh = new_buffer_head();
644 if (!bh) {
645 goto errorout;
646 }
647 bh->b_bdev = bdev;
648 bh->b_blocknr = block;
649 bh->b_size = size;
650 bh->b_data = NULL;
651 #ifdef __REACTOS__
652 InitializeListHead(&bh->b_link);
653 #endif
654
655 again:
656
657 offset.QuadPart = (s64) bh->b_blocknr;
658 offset.QuadPart <<= BLOCK_BITS;
659
660 if (zero) {
661 if (!CcPreparePinWrite(Vcb->Volume,
662 &offset,
663 bh->b_size,
664 FALSE,
665 PIN_WAIT,
666 &bh->b_bcb,
667 #ifdef __REACTOS__
668 (PVOID *)&bh->b_data)) {
669 #else
670 &bh->b_data)) {
671 #endif
672 Ext2Sleep(100);
673 goto again;
674 }
675 } else {
676 if (!CcPinRead( Vcb->Volume,
677 &offset,
678 bh->b_size,
679 PIN_WAIT,
680 &bh->b_bcb,
681 #ifdef __REACTOS__
682 (PVOID *)&bh->b_data)) {
683 #else
684 &bh->b_data)) {
685 #endif
686 Ext2Sleep(100);
687 goto again;
688 }
689 set_buffer_uptodate(bh);
690 }
691
692 if (bh->b_bcb)
693 CcSetBcbOwnerPointer(bh->b_bcb, (PVOID)((ERESOURCE_THREAD)bh | 0x3));
694
695 if (!bh->b_data) {
696 free_buffer_head(bh);
697 bh = NULL;
698 goto errorout;
699 }
700 get_bh(bh);
701
702 DEBUG(DL_BH, ("getblk: Vcb=%p bhcount=%u block=%u bh=%p ptr=%p.\n",
703 Vcb, atomic_read(&g_jbh.bh_count), block, bh, bh->b_data));
704
705 ExAcquireResourceExclusiveLite(&bdev->bd_bh_lock, TRUE);
706 /* do search again here */
707 tbh = buffer_head_search(bdev, block);
708 if (tbh) {
709 get_bh(tbh);
710 free_buffer_head(bh);
711 bh = tbh;
712 RemoveEntryList(&bh->b_link);
713 InitializeListHead(&bh->b_link);
714 ExReleaseResourceLite(&bdev->bd_bh_lock);
715 goto errorout;
716 } else {
717 buffer_head_insert(bdev, bh);
718 }
719 ExReleaseResourceLite(&bdev->bd_bh_lock);
720
721 /* we get it */
722 errorout:
723
724 return bh;
725 }
726
727 int submit_bh_pin(int rw, struct buffer_head *bh)
728 {
729 struct block_device *bdev = bh->b_bdev;
730 PEXT2_VCB Vcb = bdev->bd_priv;
731 PVOID Buffer;
732 LARGE_INTEGER Offset;
733
734 ASSERT(Vcb->Identifier.Type == EXT2VCB);
735 ASSERT(bh->b_data && bh->b_bcb);
736
737 if (rw == WRITE) {
738
739 if (IsVcbReadOnly(Vcb)) {
740 goto errorout;
741 }
742
743 SetFlag(Vcb->Volume->Flags, FO_FILE_MODIFIED);
744 Offset.QuadPart = ((LONGLONG)bh->b_blocknr) << BLOCK_BITS;
745
746 CcSetDirtyPinnedData(bh->b_bcb, NULL);
747 Ext2AddBlockExtent( Vcb, NULL,
748 (ULONG)bh->b_blocknr,
749 (ULONG)bh->b_blocknr,
750 (bh->b_size >> BLOCK_BITS));
751 } else {
752 }
753
754 errorout:
755
756 unlock_buffer(bh);
757 put_bh(bh);
758 return 0;
759 }
760
761 #if 0
762
763 struct buffer_head *
764 get_block_bh(
765 struct block_device * bdev,
766 sector_t block,
767 unsigned long size,
768 int zero
769 )
770 {
771 return get_block_bh_mdl(bdev, block, size, zero);
772 }
773
774 int submit_bh(int rw, struct buffer_head *bh)
775 {
776 return submit_bh_mdl(rw, bh);
777 }
778
779 #else
780
781 struct buffer_head *
782 get_block_bh(
783 struct block_device * bdev,
784 sector_t block,
785 unsigned long size,
786 int zero
787 )
788 {
789 return get_block_bh_pin(bdev, block, size, zero);
790 }
791
792 int submit_bh(int rw, struct buffer_head *bh)
793 {
794 return submit_bh_pin(rw, bh);
795 }
796 #endif
797
798 struct buffer_head *
799 __getblk(
800 struct block_device * bdev,
801 sector_t block,
802 unsigned long size
803 )
804 {
805 return get_block_bh(bdev, block, size, 0);
806 }
807
808 void __brelse(struct buffer_head *bh)
809 {
810 struct block_device *bdev = bh->b_bdev;
811 PEXT2_VCB Vcb = (PEXT2_VCB)bdev->bd_priv;
812
813 ASSERT(Vcb->Identifier.Type == EXT2VCB);
814
815 /* write data in case it's dirty */
816 while (buffer_dirty(bh)) {
817 ll_rw_block(WRITE, 1, &bh);
818 }
819
820 ExAcquireResourceExclusiveLite(&bdev->bd_bh_lock, TRUE);
821 if (atomic_dec_and_test(&bh->b_count)) {
822 ASSERT(0 == atomic_read(&bh->b_count));
823 } else {
824 ExReleaseResourceLite(&bdev->bd_bh_lock);
825 return;
826 }
827 KeQuerySystemTime(&bh->b_ts_drop);
828 #ifdef __REACTOS__
829 if (!IsListEmpty(&bh->b_link))
830 #endif
831 RemoveEntryList(&bh->b_link);
832 InsertTailList(&Vcb->bd.bd_bh_free, &bh->b_link);
833 KeClearEvent(&Vcb->bd.bd_bh_notify);
834 ExReleaseResourceLite(&bdev->bd_bh_lock);
835 KeSetEvent(&Ext2Global->bhReaper.Wait, 0, FALSE);
836
837 DEBUG(DL_BH, ("brelse: cnt=%u size=%u blk=%10.10xh bh=%p ptr=%p\n",
838 atomic_read(&g_jbh.bh_count) - 1, bh->b_size,
839 bh->b_blocknr, bh, bh->b_data ));
840 }
841
842
843 void __bforget(struct buffer_head *bh)
844 {
845 clear_buffer_dirty(bh);
846 __brelse(bh);
847 }
848
849 void __lock_buffer(struct buffer_head *bh)
850 {
851 }
852
853 void unlock_buffer(struct buffer_head *bh)
854 {
855 clear_buffer_locked(bh);
856 }
857
858 void __wait_on_buffer(struct buffer_head *bh)
859 {
860 }
861
862 void ll_rw_block(int rw, int nr, struct buffer_head * bhs[])
863 {
864 int i;
865
866 for (i = 0; i < nr; i++) {
867
868 struct buffer_head *bh = bhs[i];
869
870 if (rw == SWRITE)
871 lock_buffer(bh);
872 else if (test_set_buffer_locked(bh))
873 continue;
874
875 if (rw == WRITE || rw == SWRITE) {
876 if (test_clear_buffer_dirty(bh)) {
877 get_bh(bh);
878 submit_bh(WRITE, bh);
879 continue;
880 }
881 } else {
882 if (!buffer_uptodate(bh)) {
883 get_bh(bh);
884 submit_bh(rw, bh);
885 continue;
886 }
887 }
888 unlock_buffer(bh);
889 }
890 }
891
892 int bh_submit_read(struct buffer_head *bh)
893 {
894 ll_rw_block(READ, 1, &bh);
895 return 0;
896 }
897
898 int sync_dirty_buffer(struct buffer_head *bh)
899 {
900 int ret = 0;
901
902 ASSERT(atomic_read(&bh->b_count) <= 1);
903 lock_buffer(bh);
904 if (test_clear_buffer_dirty(bh)) {
905 get_bh(bh);
906 ret = submit_bh(WRITE, bh);
907 wait_on_buffer(bh);
908 } else {
909 unlock_buffer(bh);
910 }
911 return ret;
912 }
913
914 void mark_buffer_dirty(struct buffer_head *bh)
915 {
916 set_buffer_dirty(bh);
917 }
918
919 int sync_blockdev(struct block_device *bdev)
920 {
921 PEXT2_VCB Vcb = (PEXT2_VCB) bdev->bd_priv;
922 Ext2FlushVolume(NULL, Vcb, FALSE);
923 return 0;
924 }
925
926 /*
927 * Perform a pagecache lookup for the matching buffer. If it's there, refre
928 * it in the LRU and mark it as accessed. If it is not present then return
929 * NULL
930 */
931 struct buffer_head *
932 __find_get_block(struct block_device *bdev, sector_t block, unsigned long size)
933 {
934 return __getblk(bdev, block, size);
935 }
936
937
938 //
939 // inode block mapping
940 //
941
942 ULONGLONG bmap(struct inode *i, ULONGLONG b)
943 {
944 ULONGLONG lcn = 0;
945 struct super_block *s = i->i_sb;
946
947 PEXT2_MCB Mcb = (PEXT2_MCB)i->i_priv;
948 PEXT2_VCB Vcb = (PEXT2_VCB)s->s_priv;
949 PEXT2_EXTENT extent = NULL;
950 ULONGLONG offset = (ULONGLONG)b;
951 NTSTATUS status;
952
953 if (!Mcb || !Vcb) {
954 goto errorout;
955 }
956
957 offset <<= BLOCK_BITS;
958 status = Ext2BuildExtents(
959 NULL,
960 Vcb,
961 Mcb,
962 offset,
963 BLOCK_SIZE,
964 FALSE,
965 &extent
966 );
967
968 if (!NT_SUCCESS(status)) {
969 goto errorout;
970 }
971
972 if (extent == NULL) {
973 goto errorout;
974 }
975
976 lcn = (unsigned long)(extent->Lba >> BLOCK_BITS);
977
978 errorout:
979
980 if (extent) {
981 Ext2FreeExtent(extent);
982 }
983
984 return lcn;
985 }
986
987 void iget(struct inode *inode)
988 {
989 atomic_inc(&inode->i_count);
990 }
991
992 void iput(struct inode *inode)
993 {
994 if (atomic_dec_and_test(&inode->i_count)) {
995 kfree(inode);
996 }
997 }
998
999 //
1000 // initialzer and destructor
1001 //
1002
1003 int
1004 ext2_init_linux()
1005 {
1006 int rc = 0;
1007
1008 rc = ext2_init_bh();
1009 if (rc != 0) {
1010 goto errorout;
1011 }
1012
1013 errorout:
1014
1015 return rc;
1016 }
1017
1018 void
1019 ext2_destroy_linux()
1020 {
1021 ext2_destroy_bh();
1022 }
1023