1 ////////////////////////////////////////////////////////////////////
2 // Copyright (C) Alexander Telyatnikov, Ivan Keliukh, Yegor Anchishkin, SKIF Software, 1999-2013. Kiev, Ukraine
3 // All rights reserved
4 // This file was released under the GPLv2 on June 2015.
5 ////////////////////////////////////////////////////////////////////
6 /*
7 Module name:
8
9 alloc.cpp
10
11 Abstract:
12
13 This file contains filesystem-specific routines
14 responsible for disk space management
15
16 */
17
18 #include "udf.h"
19
20 #define UDF_BUG_CHECK_ID UDF_FILE_UDF_INFO_ALLOC
21
22 static const int8 bit_count_tab[] = {
23 0, 1, 1, 2, 1, 2, 2, 3, 1, 2, 2, 3, 2, 3, 3, 4,
24 1, 2, 2, 3, 2, 3, 3, 4, 2, 3, 3, 4, 3, 4, 4, 5,
25 1, 2, 2, 3, 2, 3, 3, 4, 2, 3, 3, 4, 3, 4, 4, 5,
26 2, 3, 3, 4, 3, 4, 4, 5, 3, 4, 4, 5, 4, 5, 5, 6,
27 1, 2, 2, 3, 2, 3, 3, 4, 2, 3, 3, 4, 3, 4, 4, 5,
28 2, 3, 3, 4, 3, 4, 4, 5, 3, 4, 4, 5, 4, 5, 5, 6,
29 2, 3, 3, 4, 3, 4, 4, 5, 3, 4, 4, 5, 4, 5, 5, 6,
30 3, 4, 4, 5, 4, 5, 5, 6, 4, 5, 5, 6, 5, 6, 6, 7,
31
32 1, 2, 2, 3, 2, 3, 3, 4, 2, 3, 3, 4, 3, 4, 4, 5,
33 2, 3, 3, 4, 3, 4, 4, 5, 3, 4, 4, 5, 4, 5, 5, 6,
34 2, 3, 3, 4, 3, 4, 4, 5, 3, 4, 4, 5, 4, 5, 5, 6,
35 3, 4, 4, 5, 4, 5, 5, 6, 4, 5, 5, 6, 5, 6, 6, 7,
36 2, 3, 3, 4, 3, 4, 4, 5, 3, 4, 4, 5, 4, 5, 5, 6,
37 3, 4, 4, 5, 4, 5, 5, 6, 4, 5, 5, 6, 5, 6, 6, 7,
38 3, 4, 4, 5, 4, 5, 5, 6, 4, 5, 5, 6, 5, 6, 6, 7,
39 4, 5, 5, 6, 5, 6, 6, 7, 5, 6, 6, 7, 6, 7, 7, 8
40 };
41
42 /*
43 This routine converts physical address to logical in specified partition
44 */
45 uint32
UDFPhysLbaToPart(IN PVCB Vcb,IN uint32 PartNum,IN uint32 Addr)46 UDFPhysLbaToPart(
47 IN PVCB Vcb,
48 IN uint32 PartNum,
49 IN uint32 Addr
50 )
51 {
52 PUDFPartMap pm = Vcb->Partitions;
53 #if defined (_X86_) && defined (_MSC_VER) && !defined(__clang__)
54 uint32 retval;
55 __asm {
56 push ebx
57 push ecx
58 push edx
59
60 mov ebx,Vcb
61 mov edx,[ebx]Vcb.PartitionMaps
62 mov ebx,pm
63 mov ecx,PartNum
64 xor eax,eax
65 loop_pl2p:
66 cmp ecx,edx
67 jae short EO_pl2p
68 cmp [ebx]pm.PartitionNum,cx
69 jne short cont_pl2p
70 mov eax,Addr
71 sub eax,[ebx]pm.PartitionRoot
72 mov ecx,Vcb
73 mov ecx,[ecx]Vcb.LB2B_Bits
74 shr eax,cl
75 jmp short EO_pl2p
76 cont_pl2p:
77 add ebx,size UDFPartMap
78 inc ecx
79 jmp short loop_pl2p
80 EO_pl2p:
81 mov retval,eax
82
83 pop edx
84 pop ecx
85 pop ebx
86 }
87 #ifdef UDF_DBG
88 {
89 // validate return value
90 lb_addr locAddr;
91 locAddr.logicalBlockNum = retval;
92 locAddr.partitionReferenceNum = (uint16)PartNum;
93 UDFPartLbaToPhys(Vcb, &locAddr);
94 }
95 #endif // UDF_DBG
96 return retval;
97 #else // NO X86 optimization , use generic C/C++
98 uint32 i;
99 // walk through partition maps to find suitable one...
100 for(i=PartNum; i<Vcb->PartitionMaps; i++, pm++) {
101 if(pm->PartitionNum == PartNum)
102 // wow! return relative address
103 return (Addr - pm->PartitionRoot) >> Vcb->LB2B_Bits;
104 }
105 return 0;
106 #endif // _X86_
107 } // end UDFPhysLbaToPart()
108
109 /*
110 This routine returns physycal Lba for partition-relative addr
111 */
112 uint32
113 __fastcall
UDFPartLbaToPhys(IN PVCB Vcb,IN lb_addr * Addr)114 UDFPartLbaToPhys(
115 IN PVCB Vcb,
116 IN lb_addr* Addr
117 )
118 {
119 uint32 i, a;
120 if(Addr->partitionReferenceNum >= Vcb->PartitionMaps) {
121 AdPrint(("UDFPartLbaToPhys: part %x, lbn %x (err)\n",
122 Addr->partitionReferenceNum, Addr->logicalBlockNum));
123 if(Vcb->PartitionMaps &&
124 (Vcb->CompatFlags & UDF_VCB_IC_INSTANT_COMPAT_ALLOC_DESCS)) {
125 AdPrint(("UDFPartLbaToPhys: try to recover: part %x -> %x\n",
126 Addr->partitionReferenceNum, Vcb->PartitionMaps-1));
127 Addr->partitionReferenceNum = (USHORT)(Vcb->PartitionMaps-1);
128 } else {
129 return LBA_OUT_OF_EXTENT;
130 }
131 }
132 // walk through partition maps & transform relative address
133 // to physical
134 for(i=Addr->partitionReferenceNum; i<Vcb->PartitionMaps; i++) {
135 if(Vcb->Partitions[i].PartitionNum == Addr->partitionReferenceNum) {
136 a = Vcb->Partitions[i].PartitionRoot +
137 (Addr->logicalBlockNum << Vcb->LB2B_Bits);
138 if(a > Vcb->LastPossibleLBA) {
139 AdPrint(("UDFPartLbaToPhys: root %x, lbn %x, lba %x (err1)\n",
140 Vcb->Partitions[i].PartitionRoot, Addr->logicalBlockNum, a));
141 BrutePoint();
142 return LBA_OUT_OF_EXTENT;
143 }
144 return a;
145 }
146 }
147 a = Vcb->Partitions[i-1].PartitionRoot +
148 (Addr->logicalBlockNum << Vcb->LB2B_Bits);
149 if(a > Vcb->LastPossibleLBA) {
150 AdPrint(("UDFPartLbaToPhys: i %x, root %x, lbn %x, lba %x (err2)\n",
151 i, Vcb->Partitions[i-1].PartitionRoot, Addr->logicalBlockNum, a));
152 BrutePoint();
153 return LBA_OUT_OF_EXTENT;
154 }
155 return a;
156 } // end UDFPartLbaToPhys()
157
158
159 /*
160 This routine returns physycal Lba for partition-relative addr
161 No partition bounds check is performed.
162 This routine only checks if requested partition exists.
163 It is introduced for 'Adaptec DirectCD' compatibility,
164 because it uses negative values as extent terminator (against standard)
165 */
166 /*uint32
167 __fastcall
168 UDFPartLbaToPhysCompat(
169 IN PVCB Vcb,
170 IN lb_addr* Addr
171 )
172 {
173 uint32 i, a;
174 if(Addr->partitionReferenceNum >= Vcb->PartitionMaps) return LBA_NOT_ALLOCATED;
175 // walk through partition maps & transform relative address
176 // to physical
177 for(i=Addr->partitionReferenceNum; i<Vcb->PartitionMaps; i++) {
178 if(Vcb->Partitions[i].PartitionNum == Addr->partitionReferenceNum) {
179 a = Vcb->Partitions[i].PartitionRoot +
180 (Addr->logicalBlockNum << Vcb->LB2B_Bits);
181 if(a > Vcb->LastPossibleLBA) {
182 BrutePoint();
183 }
184 return a;
185 }
186 }
187 a = Vcb->Partitions[i-1].PartitionRoot +
188 (Addr->logicalBlockNum << Vcb->LB2B_Bits);
189 if(a > Vcb->LastPossibleLBA) {
190 BrutePoint();
191 }
192 return a;
193 } // end UDFPartLbaToPhysCompat()*/
194
195
196 /*
197 This routine looks for the partition containing given physical sector
198 */
199 uint32
200 __fastcall
UDFGetPartNumByPhysLba(IN PVCB Vcb,IN uint32 Lba)201 UDFGetPartNumByPhysLba(
202 IN PVCB Vcb,
203 IN uint32 Lba
204 )
205 {
206 uint32 i=Vcb->PartitionMaps-1, root;
207 PUDFPartMap pm = &(Vcb->Partitions[i]);
208 // walk through the partition maps to find suitable one
209 for(;i!=0xffffffff;i--,pm--) {
210 if( ((root = pm->PartitionRoot) <= Lba) &&
211 ((root + pm->PartitionLen) > Lba) ) return (uint16)pm->PartitionNum;
212 }
213 return LBA_OUT_OF_EXTENT; // Lba doesn't belong to any partition
214 } // end UDFGetPartNumByPhysLba()
215
216 /*
217 Very simple routine. It walks through the Partition Maps & returns
218 the 1st Lba of the 1st suitable one
219 */
220 uint32
221 __fastcall
UDFPartStart(PVCB Vcb,uint32 PartNum)222 UDFPartStart(
223 PVCB Vcb,
224 uint32 PartNum
225 )
226 {
227 uint32 i;
228 if(PartNum == (uint32)-1) return 0;
229 if(PartNum == (uint32)-2) return Vcb->Partitions[0].PartitionRoot;
230 for(i=PartNum; i<Vcb->PartitionMaps; i++) {
231 if(Vcb->Partitions[i].PartitionNum == PartNum) return Vcb->Partitions[i].PartitionRoot;
232 }
233 return 0;
234 } // end UDFPartStart(
235
236 /*
237 This routine does almost the same as previous.
238 The only difference is changing First Lba to Last one...
239 */
240 uint32
241 __fastcall
UDFPartEnd(PVCB Vcb,uint32 PartNum)242 UDFPartEnd(
243 PVCB Vcb,
244 uint32 PartNum
245 )
246 {
247 uint32 i;
248 if(PartNum == (uint32)-1) return Vcb->LastLBA;
249 if(PartNum == (uint32)-2) PartNum = Vcb->PartitionMaps-1;
250 for(i=PartNum; i<Vcb->PartitionMaps; i++) {
251 if(Vcb->Partitions[i].PartitionNum == PartNum)
252 return (Vcb->Partitions[i].PartitionRoot +
253 Vcb->Partitions[i].PartitionLen);
254 }
255 return (Vcb->Partitions[i-1].PartitionRoot +
256 Vcb->Partitions[i-1].PartitionLen);
257 } // end UDFPartEnd()
258
259 /*
260 Very simple routine. It walks through the Partition Maps & returns
261 the 1st Lba of the 1st suitable one
262 */
263 uint32
264 __fastcall
UDFPartLen(PVCB Vcb,uint32 PartNum)265 UDFPartLen(
266 PVCB Vcb,
267 uint32 PartNum
268 )
269 {
270
271 if(PartNum == (uint32)-2) return UDFPartEnd(Vcb, -2) - UDFPartStart(Vcb, -2);
272 /*#ifdef _X86_
273 uint32 ret_val;
274 __asm {
275 mov ebx,Vcb
276 mov eax,PartNum
277 cmp eax,-1
278 jne short NOT_last_gpl
279 mov eax,[ebx]Vcb.LastLBA
280 jmp short EO_gpl
281 NOT_last_gpl:
282 mov esi,eax
283 xor eax,eax
284 mov ecx,[ebx]Vcb.PartitionMaps
285 jecxz EO_gpl
286
287 mov eax,esi
288 mov edx,size UDFTrackMap
289 mul edx
290 add ebx,eax
291 mov eax,esi
292 gpl_loop:
293 cmp [ebx]Vcb.PartitionMaps.PartitionNum,ax
294 je short EO_gpl_1
295 add ebx,size UDFTrackMap
296 inc eax
297 cmp eax,ecx
298 jb short gpl_loop
299 sub ebx,size UDFTrackMap
300 EO_gpl_1:
301 mov eax,[ebx]Vcb.PartitionMaps.PartitionLen
302 add eax,[ebx]Vcb.PartitionMaps.PartitionRoot
303 EO_gpl:
304 mov ret_val,eax
305 }
306 return ret_val;
307 #else // NO X86 optimization , use generic C/C++*/
308 uint32 i;
309 if(PartNum == (uint32)-1) return Vcb->LastLBA;
310 for(i=PartNum; i<Vcb->PartitionMaps; i++) {
311 if(Vcb->Partitions[i].PartitionNum == PartNum)
312 return Vcb->Partitions[i].PartitionLen;
313 }
314 return (Vcb->Partitions[i-1].PartitionRoot +
315 Vcb->Partitions[i-1].PartitionLen);
316 /*#endif // _X86_*/
317 } // end UDFPartLen()
318
319 /*
320 This routine returns length of bit-chain starting from Offs bit in
321 array Bitmap. Bitmap scan is limited with Lim.
322 */
323
324 #if defined (_X86_) && defined (_MSC_VER)
325
326 __declspec (naked)
327 SIZE_T
328 __stdcall
UDFGetBitmapLen(uint32 * Bitmap,SIZE_T Offs,SIZE_T Lim)329 UDFGetBitmapLen(
330 uint32* Bitmap,
331 SIZE_T Offs,
332 SIZE_T Lim // NOT included
333 )
334 {
335 _asm {
336 push ebp
337 mov ebp, esp
338
339 push ebx
340 push ecx
341 push edx
342 push esi
343 push edi
344
345 xor edx,edx // init bit-counter
346 mov ebx,[ebp+0x08] // set base pointer in EBX (Bitmap)
347 mov esi,[ebp+0x0c] // set Offs in ESI
348 mov edi,[ebp+0x10] // set Lim in EDI
349
350 // check if Lim <= Offs
351 cmp esi,edi
352 // jb start_count
353 // ja exit_count
354 // inc edx
355 // jmp exit_count
356 jae exit_count
357
358 //start_count:
359
360 // set 1st bit number in CL
361 mov ecx,esi
362 and cl,0x1f
363 // make ESI uint32-index
364 shr esi,5
365
366 // save last bit number in CH
367 mov eax,edi
368 and al,0x1f
369 mov ch,al
370 // make EDI uint32-index of the last uint32
371 shr edi,5
372
373 mov eax,[ebx+esi*4]
374 shr eax,cl
375 test eax,1
376
377 jz Loop_0
378
379 /* COUNT 1-BITS SECTION */
380 Loop_1:
381
382 cmp esi,edi
383 ja exit_count // must never happen
384 jb non_last_1
385
386 Loop_last_1:
387
388 cmp cl,ch
389 jae exit_count
390 // do we met 0 ?
391 test eax,1
392 jz exit_count
393 shr eax,1
394 inc edx
395 inc cl
396 jmp Loop_last_1
397
398 non_last_1:
399
400 or cl,cl
401 jnz std_count_1
402 cmp eax,-1
403 je quick_count_1
404
405 std_count_1:
406
407 cmp cl,0x1f
408 ja next_uint32_1
409 // do we met 0 ?
410 test eax,1
411 jz exit_count
412 shr eax,1
413 inc edx
414 inc cl
415 jmp std_count_1
416
417 quick_count_1:
418
419 add edx,0x20
420
421 next_uint32_1:
422
423 inc esi
424 mov eax,[ebx+esi*4]
425 xor cl,cl
426 jmp Loop_1
427
428 /* COUNT 0-BITS SECTION */
429 Loop_0:
430
431 cmp esi,edi
432 ja exit_count // must never happen
433 jb non_last_0
434
435 Loop_last_0:
436
437 cmp cl,ch
438 jae exit_count
439 // do we met 1 ?
440 test eax,1
441 jnz exit_count
442 shr eax,1
443 inc edx
444 inc cl
445 jmp Loop_last_0
446
447 non_last_0:
448
449 or cl,cl
450 jnz std_count_0
451 or eax,eax
452 jz quick_count_0
453
454 std_count_0:
455
456 cmp cl,0x1f
457 ja next_uint32_0
458 // do we met 1 ?
459 test eax,1
460 jnz exit_count
461 shr eax,1
462 inc edx
463 inc cl
464 jmp std_count_0
465
466 quick_count_0:
467
468 add edx,0x20
469
470 next_uint32_0:
471
472 inc esi
473 mov eax,[ebx+esi*4]
474 xor cl,cl
475 jmp Loop_0
476
477 exit_count:
478
479 mov eax,edx
480
481 pop edi
482 pop esi
483 pop edx
484 pop ecx
485 pop ebx
486
487 pop ebp
488
489 ret 0x0c
490 }
491
492 #else // NO X86 optimization , use generic C/C++
493
494 SIZE_T
495 __stdcall
496 UDFGetBitmapLen(
497 uint32* Bitmap,
498 SIZE_T Offs,
499 SIZE_T Lim // NOT included
500 )
501 {
502 ASSERT(Offs <= Lim);
503 if(Offs >= Lim) {
504 return 0;//(Offs == Lim);
505 }
506
507 BOOLEAN bit = UDFGetBit(Bitmap, Offs);
508 SIZE_T i=Offs>>5;
509 SIZE_T len=0;
510 uint8 j=(uint8)(Offs&31);
511 uint8 lLim=(uint8)(Lim&31);
512
513 Lim = Lim>>5;
514
515 ASSERT((bit == 0) || (bit == 1));
516
517 uint32 a;
518
519 a = Bitmap[i] >> j;
520
521 while(i<=Lim) {
522
523 while( j < ((i<Lim) ? 32 : lLim) ) {
524 if( ((BOOLEAN)(a&1)) != bit)
525 return len;
526 len++;
527 a>>=1;
528 j++;
529 }
530 j=0;
531 While_3:
532 i++;
533 a = Bitmap[i];
534
535 if(i<Lim) {
536 if((bit && (a==0xffffffff)) ||
537 (!bit && !a)) {
538 len+=32;
539 goto While_3;
540 }
541 }
542 }
543
544 return len;
545
546 #endif // _X86_
547
548 } // end UDFGetBitmapLen()
549
550 #ifndef UDF_READ_ONLY_BUILD
551 /*
552 This routine scans disc free space Bitmap for minimal suitable extent.
553 It returns maximal available extent if no long enough extents found.
554 */
555 SIZE_T
556 UDFFindMinSuitableExtent(
557 IN PVCB Vcb,
558 IN uint32 Length, // in blocks
559 IN uint32 SearchStart,
560 IN uint32 SearchLim, // NOT included
561 OUT uint32* MaxExtLen,
562 IN uint8 AllocFlags
563 )
564 {
565 SIZE_T i, len;
566 uint32* cur;
567 SIZE_T best_lba=0;
568 SIZE_T best_len=0;
569 SIZE_T max_lba=0;
570 SIZE_T max_len=0;
571 BOOLEAN align = FALSE;
572 SIZE_T PS = Vcb->WriteBlockSize >> Vcb->BlockSizeBits;
573
574 UDF_CHECK_BITMAP_RESOURCE(Vcb);
575
576 // we'll try to allocate packet-aligned block at first
577 if(!(Length & (PS-1)) && !Vcb->CDR_Mode && (Length >= PS*2))
578 align = TRUE;
579 if(AllocFlags & EXTENT_FLAG_ALLOC_SEQUENTIAL)
580 align = TRUE;
581 if(Length > (uint32)(UDF_MAX_EXTENT_LENGTH >> Vcb->BlockSizeBits))
582 Length = (UDF_MAX_EXTENT_LENGTH >> Vcb->BlockSizeBits);
583 // align Length according to _Logical_ block size & convert it to BCount
584 i = (1<<Vcb->LB2B_Bits)-1;
585 Length = (Length+i) & ~i;
586 cur = (uint32*)(Vcb->FSBM_Bitmap);
587
588 retry_no_align:
589
590 i=SearchStart;
591 // scan Bitmap
592 while(i<SearchLim) {
593 ASSERT(i <= SearchLim);
594 if(align) {
595 i = (i+PS-1) & ~(PS-1);
596 ASSERT(i <= SearchLim);
597 if(i >= SearchLim)
598 break;
599 }
600 len = UDFGetBitmapLen(cur, i, SearchLim);
601 if(UDFGetFreeBit(cur, i)) { // is the extent found free or used ?
602 // wow! it is free!
603 if(len >= Length) {
604 // minimize extent length
605 if(!best_len || (best_len > len)) {
606 best_lba = i;
607 best_len = len;
608 }
609 if(len == Length)
610 break;
611 } else {
612 // remember max extent
613 if(max_len < len) {
614 max_lba = i;
615 max_len = len;
616 }
617 }
618 // if this is CD-R mode, we should not think about fragmentation
619 // due to CD-R nature file will be fragmented in any case
620 if(Vcb->CDR_Mode) break;
621 }
622 i += len;
623 }
624 // if we can't find suitable Packet-size aligned block,
625 // retry without any alignment requirements
626 if(!best_len && align) {
627 align = FALSE;
628 goto retry_no_align;
629 }
630 if(best_len) {
631 // minimal suitable block
632 (*MaxExtLen) = best_len;
633 return best_lba;
634 }
635 // maximal available
636 (*MaxExtLen) = max_len;
637 return max_lba;
638 } // end UDFFindMinSuitableExtent()
639 #endif //UDF_READ_ONLY_BUILD
640
641 #ifdef UDF_CHECK_DISK_ALLOCATION
642 /*
643 This routine checks space described by Mapping as Used/Freed (optionaly)
644 */
645 void
646 UDFCheckSpaceAllocation_(
647 IN PVCB Vcb,
648 IN PEXTENT_MAP Map,
649 IN uint32 asXXX
650 #ifdef UDF_TRACK_ONDISK_ALLOCATION
651 ,IN uint32 FE_lba,
652 IN uint32 BugCheckId,
653 IN uint32 Line
654 #endif //UDF_TRACK_ONDISK_ALLOCATION
655 )
656 {
657 uint32 i=0;
658 uint32 lba, j, len, BS, BSh;
659 BOOLEAN asUsed = (asXXX == AS_USED);
660
661 if(!Map) return;
662
663 BS = Vcb->BlockSize;
664 BSh = Vcb->BlockSizeBits;
665
666 UDFAcquireResourceShared(&(Vcb->BitMapResource1),TRUE);
667 // walk through all frags in data area specified
668 #ifdef UDF_TRACK_ONDISK_ALLOCATION
669 AdPrint(("ChkAlloc:Map:%x:File:%x:Line:%d\n",
670 Map,
671 BugCheckId,
672 Line
673 ));
674 #endif //UDF_TRACK_ONDISK_ALLOCATION
675 while(Map[i].extLength & UDF_EXTENT_LENGTH_MASK) {
676
677 #ifdef UDF_TRACK_ONDISK_ALLOCATION
678 AdPrint(("ChkAlloc:%x:%s:%x:@:%x:(%x):File:%x:Line:%d\n",
679 FE_lba,
680 asUsed ? "U" : "F",
681 (Map[i].extLength & UDF_EXTENT_LENGTH_MASK) >> BSh,
682 Map[i].extLocation,
683 (Map[i].extLength >> 30),
684 BugCheckId,
685 Line
686 ));
687 #endif //UDF_TRACK_ONDISK_ALLOCATION
688 if(asUsed) {
689 UDFCheckUsedBitOwner(Vcb, (Map[i].extLength & UDF_EXTENT_LENGTH_MASK) >> BSh, FE_lba);
690 } else {
691 UDFCheckFreeBitOwner(Vcb, (Map[i].extLength & UDF_EXTENT_LENGTH_MASK) >> BSh);
692 }
693
694 if((Map[i].extLength >> 30) == EXTENT_NOT_RECORDED_NOT_ALLOCATED) {
695 // skip unallocated frags
696 // ASSERT(!(Map[i].extLength & UDF_EXTENT_LENGTH_MASK));
697 ASSERT(!Map[i].extLocation);
698 i++;
699 continue;
700 } else {
701 // ASSERT(!(Map[i].extLength & UDF_EXTENT_LENGTH_MASK));
702 ASSERT(Map[i].extLocation);
703 }
704
705 #ifdef UDF_CHECK_EXTENT_SIZE_ALIGNMENT
706 ASSERT(!(Map[i].extLength & (BS-1)));
707 #endif //UDF_CHECK_EXTENT_SIZE_ALIGNMENT
708 len = ((Map[i].extLength & UDF_EXTENT_LENGTH_MASK)+BS-1) >> BSh;
709 lba = Map[i].extLocation;
710 if((lba+len) > Vcb->LastPossibleLBA) {
711 // skip blocks beyond media boundary
712 if(lba > Vcb->LastPossibleLBA) {
713 ASSERT(FALSE);
714 i++;
715 continue;
716 }
717 len = Vcb->LastPossibleLBA - lba;
718 }
719
720 // mark frag as XXX (see asUsed parameter)
721 if(asUsed) {
722
723 ASSERT(len);
724 for(j=0;j<len;j++) {
725 if(lba+j > Vcb->LastPossibleLBA) {
726 BrutePoint();
727 AdPrint(("USED Mapping covers block(s) beyond media @%x\n",lba+j));
728 break;
729 }
730 if(!UDFGetUsedBit(Vcb->FSBM_Bitmap, lba+j)) {
731 BrutePoint();
732 AdPrint(("USED Mapping covers FREE block(s) @%x\n",lba+j));
733 break;
734 }
735 }
736
737 } else {
738
739 ASSERT(len);
740 for(j=0;j<len;j++) {
741 if(lba+j > Vcb->LastPossibleLBA) {
742 BrutePoint();
743 AdPrint(("USED Mapping covers block(s) beyond media @%x\n",lba+j));
744 break;
745 }
746 if(!UDFGetFreeBit(Vcb->FSBM_Bitmap, lba+j)) {
747 BrutePoint();
748 AdPrint(("FREE Mapping covers USED block(s) @%x\n",lba+j));
749 break;
750 }
751 }
752 }
753
754 i++;
755 }
756 UDFReleaseResource(&(Vcb->BitMapResource1));
757 } // end UDFCheckSpaceAllocation_()
758 #endif //UDF_CHECK_DISK_ALLOCATION
759
760 void
761 UDFMarkBadSpaceAsUsed(
762 IN PVCB Vcb,
763 IN lba_t lba,
764 IN ULONG len
765 )
766 {
767 uint32 j;
768 #define BIT_C (sizeof(Vcb->BSBM_Bitmap[0])*8)
769 len = (lba+len+BIT_C-1)/BIT_C;
770 if(Vcb->BSBM_Bitmap) {
771 for(j=lba/BIT_C; j<len; j++) {
772 Vcb->FSBM_Bitmap[j] &= ~Vcb->BSBM_Bitmap[j];
773 }
774 }
775 #undef BIT_C
776 } // UDFMarkBadSpaceAsUsed()
777
778 /*
779 This routine marks space described by Mapping as Used/Freed (optionaly)
780 */
781 void
782 UDFMarkSpaceAsXXXNoProtect_(
783 IN PVCB Vcb,
784 IN PEXTENT_MAP Map,
785 IN uint32 asXXX
786 #ifdef UDF_TRACK_ONDISK_ALLOCATION
787 ,IN uint32 FE_lba,
788 IN uint32 BugCheckId,
789 IN uint32 Line
790 #endif //UDF_TRACK_ONDISK_ALLOCATION
791 )
792 {
793 uint32 i=0;
794 uint32 lba, j, len, BS, BSh;
795 uint32 root;
796 BOOLEAN asUsed = (asXXX == AS_USED || (asXXX & AS_BAD));
797 #ifdef UDF_TRACK_ONDISK_ALLOCATION
798 BOOLEAN bit_before, bit_after;
799 #endif //UDF_TRACK_ONDISK_ALLOCATION
800
801 UDF_CHECK_BITMAP_RESOURCE(Vcb);
802
803 if(!Map) return;
804
805 BS = Vcb->BlockSize;
806 BSh = Vcb->BlockSizeBits;
807 Vcb->BitmapModified = TRUE;
808 UDFSetModified(Vcb);
809 // walk through all frags in data area specified
810 while(Map[i].extLength & UDF_EXTENT_LENGTH_MASK) {
811 if((Map[i].extLength >> 30) == EXTENT_NOT_RECORDED_NOT_ALLOCATED) {
812 // skip unallocated frags
813 i++;
814 continue;
815 }
816 ASSERT(Map[i].extLocation);
817
818 #ifdef UDF_TRACK_ONDISK_ALLOCATION
819 AdPrint(("Alloc:%x:%s:%x:@:%x:File:%x:Line:%d\n",
820 FE_lba,
821 asUsed ? ((asXXX & AS_BAD) ? "B" : "U") : "F",
822 (Map[i].extLength & UDF_EXTENT_LENGTH_MASK) >> Vcb->BlockSizeBits,
823 Map[i].extLocation,
824 BugCheckId,
825 Line
826 ));
827 #endif //UDF_TRACK_ONDISK_ALLOCATION
828
829 #ifdef UDF_DBG
830 #ifdef UDF_CHECK_EXTENT_SIZE_ALIGNMENT
831 ASSERT(!(Map[i].extLength & (BS-1)));
832 #endif //UDF_CHECK_EXTENT_SIZE_ALIGNMENT
833 // len = ((Map[i].extLength & UDF_EXTENT_LENGTH_MASK)+BS-1) >> BSh;
834 #else // UDF_DBG
835 // len = (Map[i].extLength & UDF_EXTENT_LENGTH_MASK) >> BSh;
836 #endif // UDF_DBG
837 len = ((Map[i].extLength & UDF_EXTENT_LENGTH_MASK)+BS-1) >> BSh;
838 lba = Map[i].extLocation;
839 if((lba+len) > Vcb->LastPossibleLBA) {
840 // skip blocks beyond media boundary
841 if(lba > Vcb->LastPossibleLBA) {
842 ASSERT(FALSE);
843 i++;
844 continue;
845 }
846 len = Vcb->LastPossibleLBA - lba;
847 }
848
849 #ifdef UDF_TRACK_ONDISK_ALLOCATION
850 if(lba)
851 bit_before = UDFGetBit(Vcb->FSBM_Bitmap, lba-1);
852 bit_after = UDFGetBit(Vcb->FSBM_Bitmap, lba+len);
853 #endif //UDF_TRACK_ONDISK_ALLOCATION
854
855 // mark frag as XXX (see asUsed parameter)
856 if(asUsed) {
857 /* for(j=0;j<len;j++) {
858 UDFSetUsedBit(Vcb->FSBM_Bitmap, lba+j);
859 }*/
860 ASSERT(len);
861 UDFSetUsedBits(Vcb->FSBM_Bitmap, lba, len);
862 #ifdef UDF_TRACK_ONDISK_ALLOCATION
863 for(j=0;j<len;j++) {
864 ASSERT(UDFGetUsedBit(Vcb->FSBM_Bitmap, lba+j));
865 }
866 #endif //UDF_TRACK_ONDISK_ALLOCATION
867
868 if(Vcb->Vat) {
869 // mark logical blocks in VAT as used
870 for(j=0;j<len;j++) {
871 root = UDFPartStart(Vcb, UDFGetPartNumByPhysLba(Vcb, lba));
872 if((Vcb->Vat[lba-root+j] == UDF_VAT_FREE_ENTRY) &&
873 (lba > Vcb->LastLBA)) {
874 Vcb->Vat[lba-root+j] = 0x7fffffff;
875 }
876 }
877 }
878 } else {
879 /* for(j=0;j<len;j++) {
880 UDFSetFreeBit(Vcb->FSBM_Bitmap, lba+j);
881 }*/
882 ASSERT(len);
883 UDFSetFreeBits(Vcb->FSBM_Bitmap, lba, len);
884 #ifdef UDF_TRACK_ONDISK_ALLOCATION
885 for(j=0;j<len;j++) {
886 ASSERT(UDFGetFreeBit(Vcb->FSBM_Bitmap, lba+j));
887 }
888 #endif //UDF_TRACK_ONDISK_ALLOCATION
889 if(asXXX & AS_BAD) {
890 UDFSetBits(Vcb->BSBM_Bitmap, lba, len);
891 }
892 UDFMarkBadSpaceAsUsed(Vcb, lba, len);
893
894 if(asXXX & AS_DISCARDED) {
895 UDFUnmapRange(Vcb, lba, len);
896 WCacheDiscardBlocks__(&(Vcb->FastCache), Vcb, lba, len);
897 UDFSetZeroBits(Vcb->ZSBM_Bitmap, lba, len);
898 }
899 if(Vcb->Vat) {
900 // mark logical blocks in VAT as free
901 // this operation can decrease resulting VAT size
902 for(j=0;j<len;j++) {
903 root = UDFPartStart(Vcb, UDFGetPartNumByPhysLba(Vcb, lba));
904 Vcb->Vat[lba-root+j] = UDF_VAT_FREE_ENTRY;
905 }
906 }
907 // mark discarded extent as Not-Alloc-Not-Rec to
908 // prevent writes there
909 Map[i].extLength = (len << BSh) | (EXTENT_NOT_RECORDED_NOT_ALLOCATED << 30);
910 Map[i].extLocation = 0;
911 }
912
913 #ifdef UDF_TRACK_ONDISK_ALLOCATION
914 if(lba)
915 ASSERT(bit_before == UDFGetBit(Vcb->FSBM_Bitmap, lba-1));
916 ASSERT(bit_after == UDFGetBit(Vcb->FSBM_Bitmap, lba+len));
917 #endif //UDF_TRACK_ONDISK_ALLOCATION
918
919 i++;
920 }
921 } // end UDFMarkSpaceAsXXXNoProtect_()
922
923 /*
924 This routine marks space described by Mapping as Used/Freed (optionaly)
925 It protects data with sync Resource
926 */
927 void
928 UDFMarkSpaceAsXXX_(
929 IN PVCB Vcb,
930 IN PEXTENT_MAP Map,
931 IN uint32 asXXX
932 #ifdef UDF_TRACK_ONDISK_ALLOCATION
933 ,IN uint32 FE_lba,
934 IN uint32 BugCheckId,
935 IN uint32 Line
936 #endif //UDF_TRACK_ONDISK_ALLOCATION
937 )
938 {
939 if(!Map) return;
940 if(!Map[0].extLength) {
941 #ifdef UDF_DBG
942 ASSERT(!Map[0].extLocation);
943 #endif // UDF_DBG
944 return;
945 }
946
947 UDFAcquireResourceExclusive(&(Vcb->BitMapResource1),TRUE);
948 #ifdef UDF_TRACK_ONDISK_ALLOCATION
949 UDFMarkSpaceAsXXXNoProtect_(Vcb, Map, asXXX, FE_lba, BugCheckId, Line);
950 #else //UDF_TRACK_ONDISK_ALLOCATION
951 UDFMarkSpaceAsXXXNoProtect_(Vcb, Map, asXXX);
952 #endif //UDF_TRACK_ONDISK_ALLOCATION
953 UDFReleaseResource(&(Vcb->BitMapResource1));
954
955 } // end UDFMarkSpaceAsXXX_()
956
957 #ifndef UDF_READ_ONLY_BUILD
958 /*
959 This routine builds mapping for Length bytes in FreeSpace
960 It should be used when IN_ICB method is unavailable.
961 */
962 OSSTATUS
963 UDFAllocFreeExtent_(
964 IN PVCB Vcb,
965 IN int64 Length,
966 IN uint32 SearchStart,
967 IN uint32 SearchLim, // NOT included
968 OUT PEXTENT_INFO ExtInfo,
969 IN uint8 AllocFlags
970 #ifdef UDF_TRACK_ALLOC_FREE_EXTENT
971 ,IN uint32 src,
972 IN uint32 line
973 #endif //UDF_TRACK_ALLOC_FREE_EXTENT
974 )
975 {
976 EXTENT_AD Ext;
977 PEXTENT_MAP Map = NULL;
978 uint32 len, LBS, BSh, blen;
979
980 LBS = Vcb->LBlockSize;
981 BSh = Vcb->BlockSizeBits;
982 blen = (uint32)(((Length+LBS-1) & ~((int64)LBS-1)) >> BSh);
983 ExtInfo->Mapping = NULL;
984 ExtInfo->Offset = 0;
985
986 ASSERT(blen <= (uint32)(UDF_MAX_EXTENT_LENGTH >> BSh));
987
988 UDFAcquireResourceExclusive(&(Vcb->BitMapResource1),TRUE);
989
990 if(blen > (SearchLim - SearchStart)) {
991 goto no_free_space_err;
992 }
993 // walk through the free space bitmap & find a single extent or a set of
994 // frags giving in sum the Length specified
995 while(blen) {
996 Ext.extLocation = UDFFindMinSuitableExtent(Vcb, blen, SearchStart,
997 SearchLim, &len, AllocFlags);
998
999 // ASSERT(len <= (uint32)(UDF_MAX_EXTENT_LENGTH >> BSh));
1000 if(len >= blen) {
1001 // complete search
1002 Ext.extLength = blen<<BSh;
1003 blen = 0;
1004 } else if(len) {
1005 // we need still some frags to complete request &
1006 // probably we have the opportunity to do it
1007 Ext.extLength = len<<BSh;
1008 blen -= len;
1009 } else {
1010 no_free_space_err:
1011 // no more free space. abort
1012 if(ExtInfo->Mapping) {
1013 UDFMarkSpaceAsXXXNoProtect(Vcb, 0, ExtInfo->Mapping, AS_DISCARDED); // free
1014 MyFreePool__(ExtInfo->Mapping);
1015 ExtInfo->Mapping = NULL;
1016 }
1017 UDFReleaseResource(&(Vcb->BitMapResource1));
1018 ExtInfo->Length = 0;//UDFGetExtentLength(ExtInfo->Mapping);
1019 AdPrint((" DISK_FULL\n"));
1020 return STATUS_DISK_FULL;
1021 }
1022 // append the frag found to mapping
1023 ASSERT(!(Ext.extLength >> 30));
1024 ASSERT(Ext.extLocation);
1025
1026 // mark newly allocated blocks as zero-filled
1027 UDFSetZeroBits(Vcb->ZSBM_Bitmap, Ext.extLocation, (Ext.extLength & UDF_EXTENT_LENGTH_MASK) >> BSh);
1028
1029 if(AllocFlags & EXTENT_FLAG_VERIFY) {
1030 if(!UDFCheckArea(Vcb, Ext.extLocation, Ext.extLength >> BSh)) {
1031 AdPrint(("newly allocated extent contains BB\n"));
1032 UDFMarkSpaceAsXXXNoProtect(Vcb, 0, ExtInfo->Mapping, AS_DISCARDED); // free
1033 UDFMarkBadSpaceAsUsed(Vcb, Ext.extLocation, Ext.extLength >> BSh); // bad -> bad+used
1034 // roll back
1035 blen += Ext.extLength>>BSh;
1036 continue;
1037 }
1038 }
1039
1040 Ext.extLength |= EXTENT_NOT_RECORDED_ALLOCATED << 30;
1041 if(!(ExtInfo->Mapping)) {
1042 // create new
1043 #ifdef UDF_TRACK_ALLOC_FREE_EXTENT
1044 ExtInfo->Mapping = UDFExtentToMapping_(&Ext, src, line);
1045 #else // UDF_TRACK_ALLOC_FREE_EXTENT
1046 ExtInfo->Mapping = UDFExtentToMapping(&Ext);
1047 #endif // UDF_TRACK_ALLOC_FREE_EXTENT
1048 if(!ExtInfo->Mapping) {
1049 BrutePoint();
1050 UDFReleaseResource(&(Vcb->BitMapResource1));
1051 ExtInfo->Length = 0;
1052 return STATUS_INSUFFICIENT_RESOURCES;
1053 }
1054 UDFMarkSpaceAsXXXNoProtect(Vcb, 0, ExtInfo->Mapping, AS_USED); // used
1055 } else {
1056 // update existing
1057 Map = UDFExtentToMapping(&Ext);
1058 if(!Map) {
1059 BrutePoint();
1060 UDFReleaseResource(&(Vcb->BitMapResource1));
1061 ExtInfo->Length = UDFGetExtentLength(ExtInfo->Mapping);
1062 return STATUS_INSUFFICIENT_RESOURCES;
1063 }
1064 UDFMarkSpaceAsXXXNoProtect(Vcb, 0, Map, AS_USED); // used
1065 ExtInfo->Mapping = UDFMergeMappings(ExtInfo->Mapping, Map);
1066 MyFreePool__(Map);
1067 }
1068 if(!ExtInfo->Mapping) {
1069 BrutePoint();
1070 UDFReleaseResource(&(Vcb->BitMapResource1));
1071 ExtInfo->Length = 0;
1072 return STATUS_INSUFFICIENT_RESOURCES;
1073 }
1074 }
1075 UDFReleaseResource(&(Vcb->BitMapResource1));
1076 ExtInfo->Length = Length;
1077 return STATUS_SUCCESS;
1078 } // end UDFAllocFreeExtent_()
1079 #endif //UDF_READ_ONLY_BUILD
1080
1081 /*
1082 Returns block-count
1083 */
1084 uint32
1085 __fastcall
1086 UDFGetPartFreeSpace(
1087 IN PVCB Vcb,
1088 IN uint32 partNum
1089 )
1090 {
1091 uint32 lim/*, len=1*/;
1092 uint32 s=0;
1093 uint32 j;
1094 PUCHAR cur = (PUCHAR)(Vcb->FSBM_Bitmap);
1095
1096 lim = (UDFPartEnd(Vcb,partNum)+7)/8;
1097 for(j=(UDFPartStart(Vcb,partNum)+7)/8; j<lim/* && len*/; j++) {
1098 s+=bit_count_tab[cur[j]];
1099 }
1100 return s;
1101 } // end UDFGetPartFreeSpace()
1102
1103 int64
1104 __fastcall
1105 UDFGetFreeSpace(
1106 IN PVCB Vcb
1107 )
1108 {
1109 int64 s=0;
1110 uint32 i;
1111 // uint32* cur = (uint32*)(Vcb->FSBM_Bitmap);
1112
1113 if(!Vcb->CDR_Mode &&
1114 !(Vcb->VCBFlags & UDF_VCB_FLAGS_RAW_DISK)) {
1115 for(i=0;i<Vcb->PartitionMaps;i++) {
1116 /* lim = UDFPartEnd(Vcb,i);
1117 for(j=UDFPartStart(Vcb,i); j<lim && len; ) {
1118 len = UDFGetBitmapLen(cur, j, lim);
1119 if(UDFGetFreeBit(cur, j)) // is the extent found free or used ?
1120 s+=len;
1121 j+=len;
1122 }*/
1123 s += UDFGetPartFreeSpace(Vcb, i);
1124 }
1125 } else {
1126 ASSERT(Vcb->LastPossibleLBA >= max(Vcb->NWA, Vcb->LastLBA));
1127 s = Vcb->LastPossibleLBA - max(Vcb->NWA, Vcb->LastLBA);
1128 //if(s & ((int64)1 << 64)) s=0;
1129 }
1130 return s >> Vcb->LB2B_Bits;
1131 } // end UDFGetFreeSpace()
1132
1133 /*
1134 Returns block-count
1135 */
1136 int64
1137 __fastcall
1138 UDFGetTotalSpace(
1139 IN PVCB Vcb
1140 )
1141 {
1142 int64 s=0;
1143 uint32 i;
1144
1145 if(Vcb->VCBFlags & UDF_VCB_FLAGS_RAW_DISK) {
1146 s= Vcb->LastPossibleLBA;
1147 } else if(!Vcb->CDR_Mode) {
1148 for(i=0;i<Vcb->PartitionMaps;i++) {
1149 s+=Vcb->Partitions[i].PartitionLen;
1150 }
1151 } else {
1152 if(s & ((int64)1 << 63)) s=0; /* FIXME ReactOS this shift value was 64, which is undefiened behavior. */
1153 s= Vcb->LastPossibleLBA - Vcb->Partitions[0].PartitionRoot;
1154 }
1155 return s >> Vcb->LB2B_Bits;
1156 } // end UDFGetTotalSpace()
1157
1158 /*
1159 Callback for WCache
1160 returns Allocated and Zero-filled flags for given block
1161 any data in 'unallocated' blocks may be changed during flush process
1162 */
1163 uint32
1164 UDFIsBlockAllocated(
1165 IN void* _Vcb,
1166 IN uint32 Lba
1167 )
1168 {
1169 ULONG ret_val = 0;
1170 uint32* bm;
1171 // return TRUE;
1172 if(!(((PVCB)_Vcb)->VCBFlags & UDF_VCB_ASSUME_ALL_USED)) {
1173 // check used
1174 if((bm = (uint32*)(((PVCB)_Vcb)->FSBM_Bitmap)))
1175 ret_val = (UDFGetUsedBit(bm, Lba) ? WCACHE_BLOCK_USED : 0);
1176 // check zero-filled
1177 if((bm = (uint32*)(((PVCB)_Vcb)->ZSBM_Bitmap)))
1178 ret_val |= (UDFGetZeroBit(bm, Lba) ? WCACHE_BLOCK_ZERO : 0);
1179 } else {
1180 ret_val = WCACHE_BLOCK_USED;
1181 }
1182 // check bad block
1183
1184 // WCache works with LOGICAL addresses, not PHYSICAL, BB check must be performed UNDER cache
1185 /*
1186 if(bm = (uint32*)(((PVCB)_Vcb)->BSBM_Bitmap)) {
1187 ret_val |= (UDFGetBadBit(bm, Lba) ? WCACHE_BLOCK_BAD : 0);
1188 if(ret_val & WCACHE_BLOCK_BAD) {
1189 UDFPrint(("Marked BB @ %#x\n", Lba));
1190 }
1191 }
1192 */
1193 return ret_val;
1194 } // end UDFIsBlockAllocated()
1195
1196 #ifdef _X86_
1197
1198 #ifdef _MSC_VER
1199 #pragma warning(disable:4035) // re-enable below
1200 #endif
1201
1202 #ifdef _MSC_VER
1203 __declspec (naked)
1204 #endif
1205 BOOLEAN
1206 __fastcall
1207 UDFGetBit__(
1208 IN uint32* arr, // ECX
1209 IN uint32 bit // EDX
1210 )
1211 {
1212 // CheckAddr(arr);
1213 // ASSERT(bit < 300000);
1214 #ifdef _MSC_VER
1215 __asm {
1216 push ebx
1217 push ecx
1218 // mov eax,bit
1219 mov eax,edx
1220 shr eax,3
1221 and al,0fch
1222 add eax,ecx // eax+arr
1223 mov eax,[eax]
1224 mov cl,dl
1225 ror eax,cl
1226 and eax,1
1227
1228 pop ecx
1229 pop ebx
1230 ret
1231 }
1232 #else
1233 /* FIXME ReactOS */
1234 return ((BOOLEAN)(((((uint32*)(arr))[(bit)>>5]) >> ((bit)&31)) &1));
1235 #endif
1236 } // end UDFGetBit__()
1237
1238 #ifdef _MSC_VER
1239 __declspec (naked)
1240 #endif
1241 void
1242 __fastcall
1243 UDFSetBit__(
1244 IN uint32* arr, // ECX
1245 IN uint32 bit // EDX
1246 )
1247 {
1248 // CheckAddr(arr);
1249 // ASSERT(bit < 300000);
1250 #ifdef _MSC_VER
1251 __asm {
1252 push eax
1253 push ebx
1254 push ecx
1255 // mov eax,bit
1256 mov eax,edx
1257 shr eax,3
1258 and al,0fch
1259 add eax,ecx // eax+arr
1260 mov ebx,1
1261 mov cl,dl
1262 rol ebx,cl
1263 or [eax],ebx
1264
1265 pop ecx
1266 pop ebx
1267 pop eax
1268 ret
1269 }
1270 #else
1271 /* FIXME ReactOS */
1272 (((uint32*)(arr))[(bit)>>5]) |= (((uint32)1) << ((bit)&31));
1273 #endif
1274 } // end UDFSetBit__()
1275
1276 void
1277 UDFSetBits__(
1278 IN uint32* arr,
1279 IN uint32 bit,
1280 IN uint32 bc
1281 )
1282 {
1283 #if defined(_MSC_VER) && !defined(__clang__)
1284 __asm {
1285 push eax
1286 push ebx
1287 push ecx
1288 push edx
1289 push esi
1290
1291 mov edx,bc
1292 or edx,edx
1293 jz short EO_sb_loop
1294
1295 mov ecx,bit
1296 mov esi,arr
1297
1298 mov ebx,1
1299 rol ebx,cl
1300
1301 mov eax,ecx
1302 shr eax,3
1303 and al,0fch
1304
1305 test cl, 0x1f
1306 jnz short sb_loop_cont
1307 sb_loop_2:
1308 cmp edx,0x20
1309 jb short sb_loop_cont
1310
1311 mov [dword ptr esi+eax],0xffffffff
1312 sub edx,0x20
1313 jz short EO_sb_loop
1314 add eax,4
1315 add ecx,0x20
1316 jmp short sb_loop_2
1317
1318 sb_loop_cont:
1319 or [esi+eax],ebx
1320
1321 rol ebx,1
1322 inc ecx
1323 dec edx
1324 jz short EO_sb_loop
1325
1326 test cl, 0x1f
1327 jnz short sb_loop_cont
1328 add eax,4
1329 jmp short sb_loop_2
1330 EO_sb_loop:
1331 pop esi
1332 pop edx
1333 pop ecx
1334 pop ebx
1335 pop eax
1336 }
1337 #else
1338 /* FIXME ReactOS */
1339 uint32 j;
1340 for(j=0;j<bc;j++) {
1341 UDFSetBit(arr, bit+j);
1342 }
1343 #endif
1344 } // end UDFSetBits__()
1345
1346 #ifdef _MSC_VER
1347 __declspec (naked)
1348 #endif
1349 void
1350 __fastcall
1351 UDFClrBit__(
1352 IN uint32* arr, // ECX
1353 IN uint32 bit // EDX
1354 )
1355 {
1356 // CheckAddr(arr);
1357 // ASSERT(bit < 300000);
1358 #ifdef _MSC_VER
1359 __asm {
1360 push eax
1361 push ebx
1362 push ecx
1363 // mov eax,bit
1364 mov eax,edx
1365 shr eax,3
1366 and al,0fch
1367 add eax,ecx // eax+arr
1368 mov ebx,0fffffffeh
1369 mov cl,dl
1370 rol ebx,cl
1371 and [eax],ebx
1372
1373 pop ecx
1374 pop ebx
1375 pop eax
1376 ret
1377 }
1378 #else
1379 /* FIXME ReactOS */
1380 (((uint32*)(arr))[(bit)>>5]) &= (~(((uint32)1) << ((bit)&31)));
1381 #endif
1382 } // end UDFClrBit__()
1383
1384 void
1385 UDFClrBits__(
1386 IN uint32* arr,
1387 IN uint32 bit,
1388 IN uint32 bc
1389 )
1390 {
1391 #if defined(_MSC_VER) && !defined(__clang__)
1392 __asm {
1393 push eax
1394 push ebx
1395 push ecx
1396 push edx
1397 push esi
1398
1399 mov edx,bc
1400 or edx,edx
1401 jz short EO_cp_loop
1402
1403 mov ecx,bit
1404 mov esi,arr
1405
1406 mov ebx,0xfffffffe
1407 rol ebx,cl
1408
1409 mov eax,ecx
1410 shr eax,3
1411 and al,0fch
1412
1413 test cl, 0x1f
1414 jnz short cp_loop_cont
1415 cp_loop_2:
1416 cmp edx,0x20
1417 jb short cp_loop_cont
1418
1419 mov [dword ptr esi+eax],0x00000000
1420 sub edx,0x20
1421 jz short EO_cp_loop
1422 add eax,4
1423 add ecx,0x20
1424 jmp short cp_loop_2
1425
1426 cp_loop_cont:
1427 and [esi+eax],ebx
1428
1429 rol ebx,1
1430 inc ecx
1431 dec edx
1432 jz short EO_cp_loop
1433
1434 test cl, 0x1f
1435 jnz short cp_loop_cont
1436 add eax,4
1437 jmp short cp_loop_2
1438 EO_cp_loop:
1439 pop esi
1440 pop edx
1441 pop ecx
1442 pop ebx
1443 pop eax
1444 }
1445 #else
1446 /* FIXME ReactOS */
1447 uint32 j;
1448 for(j=0;j<bc;j++) {
1449 UDFClrBit(arr, bit+j);
1450 }
1451 #endif
1452 } // end UDFClrBits__()
1453
1454 #ifdef _MSC_VER
1455 #pragma warning(default:4035)
1456 #endif
1457 #endif // _X86_
1458