1 2 #define _SYSTEM 3 4 #include <assert.h> 5 #include <string.h> 6 #include <errno.h> 7 #include <math.h> 8 #include <stdlib.h> 9 10 #include <machine/vmparam.h> 11 12 #include <sys/param.h> 13 #include <sys/mman.h> 14 15 #include <minix/dmap.h> 16 #include <minix/libminixfs.h> 17 #include <minix/syslib.h> 18 #include <minix/sysutil.h> 19 #include <minix/u64.h> 20 #include <minix/bdev.h> 21 #include <minix/bitmap.h> 22 23 #include "inc.h" 24 25 /* Buffer (block) cache. To acquire a block, a routine calls lmfs_get_block(), 26 * telling which block it wants. The block is then regarded as "in use" and 27 * has its reference count incremented. All the blocks that are not in use are 28 * chained together in an LRU list, with 'front' pointing to the least recently 29 * used block, and 'rear' to the most recently used block. A reverse chain is 30 * also maintained. Usage for LRU is measured by the time the put_block() is 31 * done. The second parameter to put_block() can violate the LRU order and put 32 * a block on the front of the list, if it will probably not be needed again. 33 * This is used internally only; the lmfs_put_block() API call has no second 34 * parameter. If a block is modified, the modifying routine must mark the 35 * block as dirty, so the block will eventually be rewritten to the disk. 36 */ 37 38 /* Flags to put_block(). */ 39 #define ONE_SHOT 0x1 /* set if block will not be needed again */ 40 41 #define BUFHASH(b) ((unsigned int)((b) % nr_bufs)) 42 #define MARKCLEAN lmfs_markclean 43 44 #define MINBUFS 6 /* minimal no of bufs for sanity check */ 45 46 static struct buf *front; /* points to least recently used free block */ 47 static struct buf *rear; /* points to most recently used free block */ 48 static unsigned int bufs_in_use;/* # bufs currently in use (not on free list)*/ 49 50 static void rm_lru(struct buf *bp); 51 static int read_block(struct buf *bp, size_t size); 52 static void freeblock(struct buf *bp); 53 static void cache_heuristic_check(void); 54 static void put_block(struct buf *bp, int put_flags); 55 56 static int vmcache = 0; /* are we using vm's secondary cache? (initially not) */ 57 58 static struct buf *buf; 59 static struct buf **buf_hash; /* the buffer hash table */ 60 static unsigned int nr_bufs; 61 static int may_use_vmcache; 62 63 static size_t fs_block_size = PAGE_SIZE; /* raw i/o block size */ 64 65 static fsblkcnt_t fs_btotal = 0, fs_bused = 0; 66 67 static int quiet = 0; 68 69 typedef struct buf *noxfer_buf_ptr_t; /* annotation for temporary buf ptrs */ 70 71 void lmfs_setquiet(int q) { quiet = q; } 72 73 static int fs_bufs_heuristic(int minbufs, fsblkcnt_t btotal, 74 fsblkcnt_t bused, int blocksize) 75 { 76 struct vm_stats_info vsi; 77 int bufs; 78 u32_t kbytes_used_fs, kbytes_total_fs, kbcache, kb_fsmax; 79 u32_t kbytes_remain_mem; 80 81 /* set a reasonable cache size; cache at most a certain 82 * portion of the used FS, and at most a certain %age of remaining 83 * memory 84 */ 85 if(vm_info_stats(&vsi) != OK) { 86 bufs = 1024; 87 if(!quiet) 88 printf("fslib: heuristic info fail: default to %d bufs\n", bufs); 89 return bufs; 90 } 91 92 /* remaining free memory is unused memory plus memory in used for cache, 93 * as the cache can be evicted 94 */ 95 kbytes_remain_mem = (u64_t)(vsi.vsi_free + vsi.vsi_cached) * 96 vsi.vsi_pagesize / 1024; 97 98 /* check fs usage. */ 99 kbytes_used_fs = (unsigned long)(((u64_t)bused * blocksize) / 1024); 100 kbytes_total_fs = (unsigned long)(((u64_t)btotal * blocksize) / 1024); 101 102 /* heuristic for a desired cache size based on FS usage; 103 * but never bigger than half of the total filesystem 104 */ 105 kb_fsmax = sqrt_approx(kbytes_used_fs)*40; 106 kb_fsmax = MIN(kb_fsmax, kbytes_total_fs/2); 107 108 /* heuristic for a maximum usage - 10% of remaining memory */ 109 kbcache = MIN(kbytes_remain_mem/10, kb_fsmax); 110 bufs = kbcache * 1024 / blocksize; 111 112 /* but we simply need MINBUFS no matter what */ 113 if(bufs < minbufs) 114 bufs = minbufs; 115 116 return bufs; 117 } 118 119 void lmfs_change_blockusage(int delta) 120 { 121 /* Change the number of allocated blocks by 'delta.' 122 * Also accumulate the delta since the last cache re-evaluation. 123 * If it is outside a certain band, ask the cache library to 124 * re-evaluate the cache size. 125 */ 126 static int bitdelta = 0, warn_low = TRUE, warn_high = TRUE; 127 128 /* Adjust the file system block usage counter accordingly. Do bounds 129 * checking, and report file system misbehavior. 130 */ 131 if (delta > 0 && (fsblkcnt_t)delta > fs_btotal - fs_bused) { 132 if (warn_high) { 133 printf("libminixfs: block usage overflow\n"); 134 warn_high = FALSE; 135 } 136 delta = (int)(fs_btotal - fs_bused); 137 } else if (delta < 0 && (fsblkcnt_t)-delta > fs_bused) { 138 if (warn_low) { 139 printf("libminixfs: block usage underflow\n"); 140 warn_low = FALSE; 141 } 142 delta = -(int)fs_bused; 143 } 144 fs_bused += delta; 145 146 bitdelta += delta; 147 148 #define BAND_KB (10*1024) /* recheck cache every 10MB change */ 149 150 /* If the accumulated delta exceeds the configured threshold, resize 151 * the cache, but only if the cache isn't in use any more. In order to 152 * avoid that the latter case blocks a resize forever, we also call 153 * this function from lmfs_flushall(). Since lmfs_buf_pool() may call 154 * lmfs_flushall(), reset 'bitdelta' before doing the heuristics check. 155 */ 156 if (bufs_in_use == 0 && 157 (bitdelta*(int)fs_block_size/1024 > BAND_KB || 158 bitdelta*(int)fs_block_size/1024 < -BAND_KB)) { 159 bitdelta = 0; 160 cache_heuristic_check(); 161 } 162 } 163 164 void lmfs_markdirty(struct buf *bp) 165 { 166 bp->lmfs_flags |= VMMC_DIRTY; 167 } 168 169 void lmfs_markclean(struct buf *bp) 170 { 171 bp->lmfs_flags &= ~VMMC_DIRTY; 172 } 173 174 int lmfs_isclean(struct buf *bp) 175 { 176 return !(bp->lmfs_flags & VMMC_DIRTY); 177 } 178 179 static void free_unused_blocks(void) 180 { 181 struct buf *bp; 182 183 int freed = 0, bytes = 0; 184 printf("libminixfs: freeing; %d blocks in use\n", bufs_in_use); 185 for(bp = &buf[0]; bp < &buf[nr_bufs]; bp++) { 186 if(bp->lmfs_bytes > 0 && bp->lmfs_count == 0) { 187 freed++; 188 bytes += bp->lmfs_bytes; 189 freeblock(bp); 190 } 191 } 192 printf("libminixfs: freeing; %d blocks, %d bytes\n", freed, bytes); 193 } 194 195 static void lmfs_alloc_block(struct buf *bp, size_t block_size) 196 { 197 int len; 198 ASSERT(!bp->data); 199 ASSERT(bp->lmfs_bytes == 0); 200 201 len = roundup(block_size, PAGE_SIZE); 202 203 if((bp->data = mmap(0, block_size, PROT_READ|PROT_WRITE, 204 MAP_PREALLOC|MAP_ANON, -1, 0)) == MAP_FAILED) { 205 free_unused_blocks(); 206 if((bp->data = mmap(0, block_size, PROT_READ|PROT_WRITE, 207 MAP_PREALLOC|MAP_ANON, -1, 0)) == MAP_FAILED) { 208 panic("libminixfs: could not allocate block"); 209 } 210 } 211 assert(bp->data); 212 bp->lmfs_bytes = block_size; 213 bp->lmfs_needsetcache = 1; 214 } 215 216 /*===========================================================================* 217 * lmfs_get_block * 218 *===========================================================================*/ 219 int lmfs_get_block(struct buf **bpp, dev_t dev, block64_t block, int how) 220 { 221 return lmfs_get_block_ino(bpp, dev, block, how, VMC_NO_INODE, 0); 222 } 223 224 static void munmap_t(void *a, int len) 225 { 226 assert(a); 227 assert(a != MAP_FAILED); 228 assert(!((vir_bytes)a % PAGE_SIZE)); 229 assert(len > 0); 230 231 len = roundup(len, PAGE_SIZE); 232 233 assert(!(len % PAGE_SIZE)); 234 235 if(munmap(a, len) < 0) 236 panic("libminixfs cache: munmap failed"); 237 } 238 239 static void raisecount(struct buf *bp) 240 { 241 ASSERT(bp->lmfs_count < CHAR_MAX); 242 bp->lmfs_count++; 243 if(bp->lmfs_count == 1) bufs_in_use++; 244 assert(bufs_in_use > 0); 245 } 246 247 static void lowercount(struct buf *bp) 248 { 249 assert(bufs_in_use > 0); 250 ASSERT(bp->lmfs_count > 0); 251 bp->lmfs_count--; 252 if(bp->lmfs_count == 0) bufs_in_use--; 253 } 254 255 static void freeblock(struct buf *bp) 256 { 257 ASSERT(bp->lmfs_count == 0); 258 /* If the block taken is dirty, make it clean by writing it to the disk. 259 * Avoid hysteresis by flushing all other dirty blocks for the same device. 260 */ 261 if (bp->lmfs_dev != NO_DEV) { 262 if (!lmfs_isclean(bp)) lmfs_flushdev(bp->lmfs_dev); 263 assert(bp->lmfs_bytes > 0); 264 bp->lmfs_dev = NO_DEV; 265 } 266 267 /* Fill in block's parameters and add it to the hash chain where it goes. */ 268 MARKCLEAN(bp); /* NO_DEV blocks may be marked dirty */ 269 if(bp->lmfs_bytes > 0) { 270 assert(bp->data); 271 munmap_t(bp->data, bp->lmfs_bytes); 272 bp->lmfs_bytes = 0; 273 bp->data = NULL; 274 } else assert(!bp->data); 275 } 276 277 /*===========================================================================* 278 * find_block * 279 *===========================================================================*/ 280 static struct buf *find_block(dev_t dev, block64_t block) 281 { 282 /* Search the hash chain for (dev, block). Return the buffer structure if 283 * found, or NULL otherwise. 284 */ 285 struct buf *bp; 286 int b; 287 288 assert(dev != NO_DEV); 289 290 b = BUFHASH(block); 291 for (bp = buf_hash[b]; bp != NULL; bp = bp->lmfs_hash) 292 if (bp->lmfs_blocknr == block && bp->lmfs_dev == dev) 293 return bp; 294 295 return NULL; 296 } 297 298 /*===========================================================================* 299 * get_block_ino * 300 *===========================================================================*/ 301 static int get_block_ino(struct buf **bpp, dev_t dev, block64_t block, int how, 302 ino_t ino, u64_t ino_off, size_t block_size) 303 { 304 /* Check to see if the requested block is in the block cache. The requested 305 * block is identified by the block number in 'block' on device 'dev', counted 306 * in the file system block size. The amount of data requested for this block 307 * is given in 'block_size', which may be less than the file system block size 308 * iff the requested block is the last (partial) block on a device. Note that 309 * the given block size does *not* affect the conversion of 'block' to a byte 310 * offset! Either way, if the block could be obtained, either from the cache 311 * or by reading from the device, return OK, with a pointer to the buffer 312 * structure stored in 'bpp'. If not, return a negative error code (and no 313 * buffer). If necessary, evict some other block and fetch the contents from 314 * disk (if 'how' is NORMAL). If 'how' is NO_READ, the caller intends to 315 * overwrite the requested block in its entirety, so it is only necessary to 316 * see if it is in the cache; if it is not, any free buffer will do. If 'how' 317 * is PEEK, the function returns the block if it is in the cache or the VM 318 * cache, and an ENOENT error code otherwise. 319 * In addition to the LRU chain, there is also a hash chain to link together 320 * blocks whose block numbers end with the same bit strings, for fast lookup. 321 */ 322 int b, r; 323 static struct buf *bp; 324 uint64_t dev_off; 325 struct buf *prev_ptr; 326 327 assert(buf_hash); 328 assert(buf); 329 assert(nr_bufs > 0); 330 331 ASSERT(fs_block_size > 0); 332 333 assert(dev != NO_DEV); 334 335 assert(block <= UINT64_MAX / fs_block_size); 336 337 dev_off = block * fs_block_size; 338 339 if((ino_off % fs_block_size)) { 340 341 printf("cache: unaligned lmfs_get_block_ino ino_off %llu\n", 342 ino_off); 343 util_stacktrace(); 344 } 345 346 /* See if the block is in the cache. If so, we can return it right away. */ 347 bp = find_block(dev, block); 348 if (bp != NULL && !(bp->lmfs_flags & VMMC_EVICTED)) { 349 ASSERT(bp->lmfs_dev == dev); 350 ASSERT(bp->lmfs_dev != NO_DEV); 351 352 /* The block must have exactly the requested number of bytes. */ 353 if (bp->lmfs_bytes != block_size) 354 return EIO; 355 356 /* Block needed has been found. */ 357 if (bp->lmfs_count == 0) { 358 rm_lru(bp); 359 ASSERT(bp->lmfs_needsetcache == 0); 360 ASSERT(!(bp->lmfs_flags & VMMC_BLOCK_LOCKED)); 361 /* FIXME: race condition against the VMMC_EVICTED check */ 362 bp->lmfs_flags |= VMMC_BLOCK_LOCKED; 363 } 364 raisecount(bp); 365 ASSERT(bp->lmfs_flags & VMMC_BLOCK_LOCKED); 366 ASSERT(bp->data); 367 368 if(ino != VMC_NO_INODE) { 369 if(bp->lmfs_inode == VMC_NO_INODE 370 || bp->lmfs_inode != ino 371 || bp->lmfs_inode_offset != ino_off) { 372 bp->lmfs_inode = ino; 373 bp->lmfs_inode_offset = ino_off; 374 bp->lmfs_needsetcache = 1; 375 } 376 } 377 378 *bpp = bp; 379 return OK; 380 } 381 382 /* We had the block in the cache but VM evicted it; invalidate it. */ 383 if (bp != NULL) { 384 assert(bp->lmfs_flags & VMMC_EVICTED); 385 ASSERT(bp->lmfs_count == 0); 386 ASSERT(!(bp->lmfs_flags & VMMC_BLOCK_LOCKED)); 387 ASSERT(!(bp->lmfs_flags & VMMC_DIRTY)); 388 bp->lmfs_dev = NO_DEV; 389 bp->lmfs_bytes = 0; 390 bp->data = NULL; 391 } 392 393 /* Desired block is not on available chain. Find a free block to use. */ 394 if(bp) { 395 ASSERT(bp->lmfs_flags & VMMC_EVICTED); 396 } else { 397 if ((bp = front) == NULL) panic("all buffers in use: %d", nr_bufs); 398 } 399 assert(bp); 400 401 rm_lru(bp); 402 403 /* Remove the block that was just taken from its hash chain. */ 404 b = BUFHASH(bp->lmfs_blocknr); 405 prev_ptr = buf_hash[b]; 406 if (prev_ptr == bp) { 407 buf_hash[b] = bp->lmfs_hash; 408 } else { 409 /* The block just taken is not on the front of its hash chain. */ 410 while (prev_ptr->lmfs_hash != NULL) 411 if (prev_ptr->lmfs_hash == bp) { 412 prev_ptr->lmfs_hash = bp->lmfs_hash; /* found it */ 413 break; 414 } else { 415 prev_ptr = prev_ptr->lmfs_hash; /* keep looking */ 416 } 417 } 418 419 freeblock(bp); 420 421 bp->lmfs_inode = ino; 422 bp->lmfs_inode_offset = ino_off; 423 424 bp->lmfs_flags = VMMC_BLOCK_LOCKED; 425 bp->lmfs_needsetcache = 0; 426 bp->lmfs_dev = dev; /* fill in device number */ 427 bp->lmfs_blocknr = block; /* fill in block number */ 428 ASSERT(bp->lmfs_count == 0); 429 raisecount(bp); 430 b = BUFHASH(bp->lmfs_blocknr); 431 bp->lmfs_hash = buf_hash[b]; 432 433 buf_hash[b] = bp; /* add to hash list */ 434 435 assert(dev != NO_DEV); 436 437 /* The block is not found in our cache, but we do want it if it's in the VM 438 * cache. The exception is NO_READ, purely for context switching performance 439 * reasons. NO_READ is used for 1) newly allocated blocks, 2) blocks being 440 * prefetched, and 3) blocks about to be fully overwritten. In the first two 441 * cases, VM will not have the block in its cache anyway, and for the third 442 * we save on one VM call only if the block is in the VM cache. 443 */ 444 assert(!bp->data); 445 assert(!bp->lmfs_bytes); 446 if (how != NO_READ && vmcache) { 447 if((bp->data = vm_map_cacheblock(dev, dev_off, ino, ino_off, 448 &bp->lmfs_flags, roundup(block_size, PAGE_SIZE))) != MAP_FAILED) { 449 bp->lmfs_bytes = block_size; 450 ASSERT(!bp->lmfs_needsetcache); 451 *bpp = bp; 452 return OK; 453 } 454 } 455 bp->data = NULL; 456 457 /* The block is not in the cache, and VM does not know about it. If we were 458 * requested to search for the block only, we can now return failure to the 459 * caller. Return the block to the pool without allocating data pages, since 460 * these would be freed upon recycling the block anyway. 461 */ 462 if (how == PEEK) { 463 bp->lmfs_dev = NO_DEV; 464 465 put_block(bp, ONE_SHOT); 466 467 return ENOENT; 468 } 469 470 /* Not in the cache; reserve memory for its contents. */ 471 472 lmfs_alloc_block(bp, block_size); 473 474 assert(bp->data); 475 476 if (how == NORMAL) { 477 /* Try to read the block. Return an error code on failure. */ 478 if ((r = read_block(bp, block_size)) != OK) { 479 put_block(bp, 0); 480 481 return r; 482 } 483 } else if(how == NO_READ) { 484 /* This block will be overwritten by new contents. */ 485 } else 486 panic("unexpected 'how' value: %d", how); 487 488 assert(bp->data); 489 490 *bpp = bp; /* return the newly acquired block */ 491 return OK; 492 } 493 494 /*===========================================================================* 495 * lmfs_get_block_ino * 496 *===========================================================================*/ 497 int lmfs_get_block_ino(struct buf **bpp, dev_t dev, block64_t block, int how, 498 ino_t ino, u64_t ino_off) 499 { 500 return get_block_ino(bpp, dev, block, how, ino, ino_off, fs_block_size); 501 } 502 503 /*===========================================================================* 504 * lmfs_get_partial_block * 505 *===========================================================================*/ 506 int lmfs_get_partial_block(struct buf **bpp, dev_t dev, block64_t block, 507 int how, size_t block_size) 508 { 509 return get_block_ino(bpp, dev, block, how, VMC_NO_INODE, 0, block_size); 510 } 511 512 /*===========================================================================* 513 * put_block * 514 *===========================================================================*/ 515 static void put_block(struct buf *bp, int put_flags) 516 { 517 /* Return a block to the list of available blocks. Depending on 'put_flags' 518 * it may be put on the front or rear of the LRU chain. Blocks that are 519 * expected to be needed again at some point go on the rear; blocks that are 520 * unlikely to be needed again at all go on the front. 521 */ 522 dev_t dev; 523 uint64_t dev_off; 524 int r, setflags; 525 526 assert(bp != NULL); 527 528 dev = bp->lmfs_dev; 529 530 dev_off = bp->lmfs_blocknr * fs_block_size; 531 532 lowercount(bp); 533 if (bp->lmfs_count != 0) return; /* block is still in use */ 534 535 /* Put this block back on the LRU chain. */ 536 if (dev == NO_DEV || dev == DEV_RAM || (put_flags & ONE_SHOT)) { 537 /* Block will not be needed again. Put it on front of chain. 538 * It will be the next block to be evicted from the cache. 539 */ 540 bp->lmfs_prev = NULL; 541 bp->lmfs_next = front; 542 if (front == NULL) 543 rear = bp; /* LRU chain was empty */ 544 else 545 front->lmfs_prev = bp; 546 front = bp; 547 } 548 else { 549 /* Block may be needed again. Put it on rear of chain. 550 * It will not be evicted from the cache for a long time. 551 */ 552 bp->lmfs_prev = rear; 553 bp->lmfs_next = NULL; 554 if (rear == NULL) 555 front = bp; 556 else 557 rear->lmfs_next = bp; 558 rear = bp; 559 } 560 561 assert(bp->lmfs_flags & VMMC_BLOCK_LOCKED); 562 bp->lmfs_flags &= ~VMMC_BLOCK_LOCKED; 563 564 /* block has sensible content - if necessary, identify it to VM */ 565 if(vmcache && bp->lmfs_needsetcache && dev != NO_DEV) { 566 assert(bp->data); 567 568 setflags = (put_flags & ONE_SHOT) ? VMSF_ONCE : 0; 569 570 if ((r = vm_set_cacheblock(bp->data, dev, dev_off, bp->lmfs_inode, 571 bp->lmfs_inode_offset, &bp->lmfs_flags, 572 roundup(bp->lmfs_bytes, PAGE_SIZE), setflags)) != OK) { 573 if(r == ENOSYS) { 574 printf("libminixfs: ENOSYS, disabling VM calls\n"); 575 vmcache = 0; 576 } else if (r == ENOMEM) { 577 /* Do not panic in this case. Running out of memory is 578 * bad, especially since it may lead to applications 579 * crashing when trying to access memory-mapped pages 580 * we haven't been able to pass off to the VM cache, 581 * but the entire file system crashing is always worse. 582 */ 583 printf("libminixfs: no memory for cache block!\n"); 584 } else { 585 panic("libminixfs: setblock of %p dev 0x%llx off " 586 "0x%llx failed\n", bp->data, dev, dev_off); 587 } 588 } 589 } 590 bp->lmfs_needsetcache = 0; 591 592 /* Now that we (may) have given the block to VM, invalidate the block if it 593 * is a one-shot block. Otherwise, it may still be reobtained immediately 594 * after, which could be a problem if VM already forgot the block and we are 595 * expected to pass it to VM again, which then wouldn't happen. 596 */ 597 if (put_flags & ONE_SHOT) 598 bp->lmfs_dev = NO_DEV; 599 } 600 601 /*===========================================================================* 602 * lmfs_put_block * 603 *===========================================================================*/ 604 void lmfs_put_block(struct buf *bp) 605 { 606 /* User interface to put_block(). */ 607 608 if (bp == NULL) return; /* for poorly written file systems */ 609 610 put_block(bp, 0); 611 } 612 613 /*===========================================================================* 614 * lmfs_free_block * 615 *===========================================================================*/ 616 void lmfs_free_block(dev_t dev, block64_t block) 617 { 618 /* The file system has just freed the given block. The block may previously 619 * have been in use as data block for an inode. Therefore, we now need to tell 620 * VM that the block is no longer associated with an inode. If we fail to do so 621 * and the inode now has a hole at this location, mapping in the hole would 622 * yield the old block contents rather than a zeroed page. In addition, if the 623 * block is in the cache, it will be removed, even if it was dirty. 624 */ 625 struct buf *bp; 626 int r; 627 628 /* Tell VM to forget about the block. The primary purpose of this call is to 629 * break the inode association, but since the block is part of a mounted file 630 * system, it is not expected to be accessed directly anyway. So, save some 631 * cache memory by throwing it out of the VM cache altogether. 632 */ 633 if (vmcache) { 634 if ((r = vm_forget_cacheblock(dev, block * fs_block_size, 635 fs_block_size)) != OK) 636 printf("libminixfs: vm_forget_cacheblock failed (%d)\n", r); 637 } 638 639 if ((bp = find_block(dev, block)) != NULL) { 640 lmfs_markclean(bp); 641 642 /* Invalidate the block. The block may or may not be in use right now, 643 * so don't be smart about freeing memory or repositioning in the LRU. 644 */ 645 bp->lmfs_dev = NO_DEV; 646 } 647 648 /* Note that this is *not* the right place to implement TRIM support. Even 649 * though the block is freed, on the device it may still be part of a 650 * previous checkpoint or snapshot of some sort. Only the file system can 651 * be trusted to decide which blocks can be reused on the device! 652 */ 653 } 654 655 /*===========================================================================* 656 * lmfs_zero_block_ino * 657 *===========================================================================*/ 658 void lmfs_zero_block_ino(dev_t dev, ino_t ino, u64_t ino_off) 659 { 660 /* Files may have holes. From an application perspective, these are just file 661 * regions filled with zeroes. From a file system perspective however, holes 662 * may represent unallocated regions on disk. Thus, these holes do not have 663 * corresponding blocks on the disk, and therefore also no block number. 664 * Therefore, we cannot simply use lmfs_get_block_ino() for them. For reads, 665 * this is not a problem, since the file system can just zero out the target 666 * application buffer instead. For mapped pages however, this *is* a problem, 667 * since the VM cache needs to be told about the corresponding block, and VM 668 * does not accept blocks without a device offset. The role of this function is 669 * therefore to tell VM about the hole using a fake device offset. The device 670 * offsets are picked so that the VM cache will see a block memory-mapped for 671 * the hole in the file, while the same block is not visible when 672 * memory-mapping the block device. 673 */ 674 struct buf *bp; 675 static block64_t fake_block = 0; 676 int r; 677 678 if (!vmcache) 679 return; 680 681 assert(fs_block_size > 0); 682 683 /* Pick a block number which is above the threshold of what can possibly be 684 * mapped in by mmap'ing the device, since off_t is signed, and it is safe to 685 * say that it will take a while before we have 8-exabyte devices. Pick a 686 * different block number each time to avoid possible concurrency issues. 687 * FIXME: it does not seem like VM actually verifies mmap offsets though.. 688 */ 689 if (fake_block == 0 || ++fake_block >= UINT64_MAX / fs_block_size) 690 fake_block = ((uint64_t)INT64_MAX + 1) / fs_block_size; 691 692 /* Obtain a block. */ 693 if ((r = lmfs_get_block_ino(&bp, dev, fake_block, NO_READ, ino, 694 ino_off)) != OK) 695 panic("libminixfs: getting a NO_READ block failed: %d", r); 696 assert(bp != NULL); 697 assert(bp->lmfs_dev != NO_DEV); 698 699 /* The block is already zeroed, as it has just been allocated with mmap. File 700 * systems do not rely on this assumption yet, so if VM ever gets changed to 701 * not clear the blocks we allocate (e.g., by recycling pages in the VM cache 702 * for the same process, which would be safe), we need to add a memset here. 703 */ 704 705 /* Release the block. We don't expect it to be accessed ever again. Moreover, 706 * if we keep the block around in the VM cache, it may erroneously be mapped 707 * in beyond the file end later. Hence, use VMSF_ONCE when passing it to VM. 708 * TODO: tell VM that it is an all-zeroes block, so that VM can deduplicate 709 * all such pages in its cache. 710 */ 711 put_block(bp, ONE_SHOT); 712 } 713 714 void lmfs_set_blockusage(fsblkcnt_t btotal, fsblkcnt_t bused) 715 { 716 717 assert(bused <= btotal); 718 fs_btotal = btotal; 719 fs_bused = bused; 720 721 /* if the cache isn't in use, we could resize it. */ 722 if (bufs_in_use == 0) 723 cache_heuristic_check(); 724 } 725 726 /*===========================================================================* 727 * read_block * 728 *===========================================================================*/ 729 static int read_block(struct buf *bp, size_t block_size) 730 { 731 /* Read a disk block of 'size' bytes. The given size is always the FS block 732 * size, except for the last block of a device. If an I/O error occurs, 733 * invalidate the block and return an error code. 734 */ 735 ssize_t r; 736 off_t pos; 737 dev_t dev = bp->lmfs_dev; 738 739 assert(dev != NO_DEV); 740 741 ASSERT(bp->lmfs_bytes == block_size); 742 ASSERT(fs_block_size > 0); 743 744 pos = (off_t)bp->lmfs_blocknr * fs_block_size; 745 if (block_size > PAGE_SIZE) { 746 #define MAXPAGES 20 747 vir_bytes blockrem, vaddr = (vir_bytes) bp->data; 748 int p = 0; 749 static iovec_t iovec[MAXPAGES]; 750 blockrem = block_size; 751 while(blockrem > 0) { 752 vir_bytes chunk = blockrem >= PAGE_SIZE ? PAGE_SIZE : blockrem; 753 iovec[p].iov_addr = vaddr; 754 iovec[p].iov_size = chunk; 755 vaddr += chunk; 756 blockrem -= chunk; 757 p++; 758 } 759 r = bdev_gather(dev, pos, iovec, p, BDEV_NOFLAGS); 760 } else { 761 r = bdev_read(dev, pos, bp->data, block_size, BDEV_NOFLAGS); 762 } 763 if (r != (ssize_t)block_size) { 764 printf("fs cache: I/O error on device %d/%d, block %"PRIu64" (%zd)\n", 765 major(dev), minor(dev), bp->lmfs_blocknr, r); 766 if (r >= 0) 767 r = EIO; /* TODO: retry retrieving (just) the remaining part */ 768 769 bp->lmfs_dev = NO_DEV; /* invalidate block */ 770 771 return r; 772 } 773 774 return OK; 775 } 776 777 /*===========================================================================* 778 * lmfs_invalidate * 779 *===========================================================================*/ 780 void lmfs_invalidate( 781 dev_t device /* device whose blocks are to be purged */ 782 ) 783 { 784 /* Remove all the blocks belonging to some device from the cache. */ 785 786 register struct buf *bp; 787 788 assert(device != NO_DEV); 789 790 for (bp = &buf[0]; bp < &buf[nr_bufs]; bp++) { 791 if (bp->lmfs_dev == device) { 792 assert(bp->data); 793 assert(bp->lmfs_bytes > 0); 794 munmap_t(bp->data, bp->lmfs_bytes); 795 bp->lmfs_dev = NO_DEV; 796 bp->lmfs_bytes = 0; 797 bp->data = NULL; 798 } 799 } 800 801 /* Clear the cache even if VM caching is disabled for the file system: 802 * caching may be disabled as side effect of an error, leaving blocks behind 803 * in the actual VM cache. 804 */ 805 vm_clear_cache(device); 806 } 807 808 /*===========================================================================* 809 * sort_blocks * 810 *===========================================================================*/ 811 static void sort_blocks(struct buf **bufq, unsigned int bufqsize) 812 { 813 struct buf *bp; 814 int i, j, gap; 815 816 gap = 1; 817 do 818 gap = 3 * gap + 1; 819 while ((unsigned int)gap <= bufqsize); 820 821 while (gap != 1) { 822 gap /= 3; 823 for (j = gap; (unsigned int)j < bufqsize; j++) { 824 for (i = j - gap; i >= 0 && 825 bufq[i]->lmfs_blocknr > bufq[i + gap]->lmfs_blocknr; 826 i -= gap) { 827 bp = bufq[i]; 828 bufq[i] = bufq[i + gap]; 829 bufq[i + gap] = bp; 830 } 831 } 832 } 833 } 834 835 /*===========================================================================* 836 * rw_scattered * 837 *===========================================================================*/ 838 static void rw_scattered( 839 dev_t dev, /* major-minor device number */ 840 struct buf **bufq, /* pointer to array of buffers */ 841 unsigned int bufqsize, /* number of buffers */ 842 int rw_flag /* READING or WRITING */ 843 ) 844 { 845 /* Read or write scattered data from a device. */ 846 847 register struct buf *bp; 848 register iovec_t *iop; 849 static iovec_t iovec[NR_IOREQS]; 850 off_t pos; 851 unsigned int i, iov_per_block; 852 #if !defined(NDEBUG) 853 unsigned int start_in_use = bufs_in_use, start_bufqsize = bufqsize; 854 #endif /* !defined(NDEBUG) */ 855 856 if(bufqsize == 0) return; 857 858 #if !defined(NDEBUG) 859 /* for READING, check all buffers on the list are obtained and held 860 * (count > 0) 861 */ 862 if (rw_flag == READING) { 863 assert(bufqsize <= LMFS_MAX_PREFETCH); 864 865 for(i = 0; i < bufqsize; i++) { 866 assert(bufq[i] != NULL); 867 assert(bufq[i]->lmfs_count > 0); 868 } 869 870 /* therefore they are all 'in use' and must be at least this many */ 871 assert(start_in_use >= start_bufqsize); 872 } 873 874 assert(dev != NO_DEV); 875 assert(fs_block_size > 0); 876 assert(howmany(fs_block_size, PAGE_SIZE) <= NR_IOREQS); 877 #endif /* !defined(NDEBUG) */ 878 879 /* For WRITING, (Shell) sort buffers on lmfs_blocknr. 880 * For READING, the buffers are already sorted. 881 */ 882 if (rw_flag == WRITING) 883 sort_blocks(bufq, bufqsize); 884 885 /* Set up I/O vector and do I/O. The result of bdev I/O is OK if everything 886 * went fine, otherwise the error code for the first failed transfer. 887 */ 888 while (bufqsize > 0) { 889 unsigned int p, nblocks = 0, niovecs = 0; 890 int r; 891 for (iop = iovec; nblocks < bufqsize; nblocks++) { 892 vir_bytes vdata, blockrem; 893 bp = bufq[nblocks]; 894 if (bp->lmfs_blocknr != bufq[0]->lmfs_blocknr + nblocks) 895 break; 896 blockrem = bp->lmfs_bytes; 897 iov_per_block = howmany(blockrem, PAGE_SIZE); 898 if (niovecs > NR_IOREQS - iov_per_block) break; 899 vdata = (vir_bytes) bp->data; 900 for(p = 0; p < iov_per_block; p++) { 901 vir_bytes chunk = 902 blockrem < PAGE_SIZE ? blockrem : PAGE_SIZE; 903 iop->iov_addr = vdata; 904 iop->iov_size = chunk; 905 vdata += PAGE_SIZE; 906 blockrem -= chunk; 907 iop++; 908 niovecs++; 909 } 910 assert(p == iov_per_block); 911 assert(blockrem == 0); 912 } 913 914 assert(nblocks > 0); 915 assert(niovecs > 0 && niovecs <= NR_IOREQS); 916 917 pos = (off_t)bufq[0]->lmfs_blocknr * fs_block_size; 918 if (rw_flag == READING) 919 r = bdev_gather(dev, pos, iovec, niovecs, BDEV_NOFLAGS); 920 else 921 r = bdev_scatter(dev, pos, iovec, niovecs, BDEV_NOFLAGS); 922 923 /* Harvest the results. The driver may have returned an error, or it 924 * may have done less than what we asked for. 925 */ 926 if (r < 0) { 927 printf("fs cache: I/O error %d on device %d/%d, " 928 "block %"PRIu64"\n", 929 r, major(dev), minor(dev), bufq[0]->lmfs_blocknr); 930 } 931 for (i = 0; i < nblocks; i++) { 932 bp = bufq[i]; 933 if (r < (ssize_t)bp->lmfs_bytes) { 934 /* Transfer failed. */ 935 if (i == 0) { 936 bp->lmfs_dev = NO_DEV; /* Invalidate block */ 937 } 938 break; 939 } 940 if (rw_flag == READING) { 941 lmfs_put_block(bp); 942 } else { 943 MARKCLEAN(bp); 944 } 945 r -= bp->lmfs_bytes; 946 } 947 948 bufq += i; 949 bufqsize -= i; 950 951 if (rw_flag == READING) { 952 /* Don't bother reading more than the device is willing to 953 * give at this time. Don't forget to release those extras. 954 */ 955 while (bufqsize > 0) { 956 bp = *bufq++; 957 bp->lmfs_dev = NO_DEV; /* invalidate block */ 958 lmfs_put_block(bp); 959 bufqsize--; 960 } 961 } 962 if (rw_flag == WRITING && i == 0) { 963 /* We're not making progress, this means we might keep 964 * looping. Buffers remain dirty if un-written. Buffers are 965 * lost if invalidate()d or LRU-removed while dirty. This 966 * is better than keeping unwritable blocks around forever.. 967 */ 968 break; 969 } 970 } 971 972 #if !defined(NDEBUG) 973 if(rw_flag == READING) { 974 assert(start_in_use >= start_bufqsize); 975 976 /* READING callers assume all bufs are released. */ 977 assert(start_in_use - start_bufqsize == bufs_in_use); 978 } 979 #endif /* !defined(NDEBUG) */ 980 } 981 982 /*===========================================================================* 983 * lmfs_readahead * 984 *===========================================================================*/ 985 void lmfs_readahead(dev_t dev, block64_t base_block, unsigned int nblocks, 986 size_t last_size) 987 { 988 /* Read ahead 'nblocks' blocks starting from the block 'base_block' on device 989 * 'dev'. The number of blocks must be between 1 and LMFS_MAX_PREFETCH, 990 * inclusive. All blocks have the file system's block size, possibly except the 991 * last block in the range, which is of size 'last_size'. The caller must 992 * ensure that none of the blocks in the range are already in the cache. 993 * However, the caller must also not rely on all or even any of the blocks to 994 * be present in the cache afterwards--failures are (deliberately!) ignored. 995 */ 996 static noxfer_buf_ptr_t bufq[LMFS_MAX_PREFETCH]; /* static for size only */ 997 struct buf *bp; 998 unsigned int count; 999 int r; 1000 1001 assert(nblocks >= 1 && nblocks <= LMFS_MAX_PREFETCH); 1002 1003 for (count = 0; count < nblocks; count++) { 1004 if (count == nblocks - 1) 1005 r = lmfs_get_partial_block(&bp, dev, base_block + count, 1006 NO_READ, last_size); 1007 else 1008 r = lmfs_get_block(&bp, dev, base_block + count, NO_READ); 1009 1010 if (r != OK) 1011 break; 1012 1013 /* We could add a flag that makes the get_block() calls fail if the 1014 * block is already in the cache, but it is not a major concern if it 1015 * is: we just perform a useless read in that case. However, if the 1016 * block is cached *and* dirty, we are about to lose its new contents. 1017 */ 1018 assert(lmfs_isclean(bp)); 1019 1020 bufq[count] = bp; 1021 } 1022 1023 rw_scattered(dev, bufq, count, READING); 1024 } 1025 1026 /*===========================================================================* 1027 * lmfs_prefetch * 1028 *===========================================================================*/ 1029 unsigned int lmfs_readahead_limit(void) 1030 { 1031 /* Return the maximum number of blocks that should be read ahead at once. The 1032 * return value is guaranteed to be between 1 and LMFS_MAX_PREFETCH, inclusive. 1033 */ 1034 unsigned int max_transfer, max_bufs; 1035 1036 /* The returned value is the minimum of two factors: the maximum number of 1037 * blocks that can be transferred in a single I/O gather request (see how 1038 * rw_scattered() generates I/O requests), and a policy limit on the number 1039 * of buffers that any read-ahead operation may use (that is, thrash). 1040 */ 1041 max_transfer = NR_IOREQS / MAX(fs_block_size / PAGE_SIZE, 1); 1042 1043 /* The constants have been imported from MFS as is, and may need tuning. */ 1044 if (nr_bufs < 50) 1045 max_bufs = 18; 1046 else 1047 max_bufs = nr_bufs - 4; 1048 1049 return MIN(max_transfer, max_bufs); 1050 } 1051 1052 /*===========================================================================* 1053 * lmfs_prefetch * 1054 *===========================================================================*/ 1055 void lmfs_prefetch(dev_t dev, const block64_t *blockset, unsigned int nblocks) 1056 { 1057 /* The given set of blocks is expected to be needed soon, so prefetch a 1058 * convenient subset. The blocks are expected to be sorted by likelihood of 1059 * being accessed soon, making the first block of the set the most important 1060 * block to prefetch right now. The caller must have made sure that the blocks 1061 * are not in the cache already. The array may have duplicate block numbers. 1062 */ 1063 bitchunk_t blocks_before[BITMAP_CHUNKS(LMFS_MAX_PREFETCH)]; 1064 bitchunk_t blocks_after[BITMAP_CHUNKS(LMFS_MAX_PREFETCH)]; 1065 block64_t block, base_block; 1066 unsigned int i, bit, nr_before, nr_after, span, limit, nr_blocks; 1067 1068 if (nblocks == 0) 1069 return; 1070 1071 /* Here is the deal. We are going to prefetch one range only, because seeking 1072 * is too expensive for just prefetching. The range we select should at least 1073 * include the first ("base") block of the given set, since that is the block 1074 * the caller is primarily interested in. Thus, the rest of the range is 1075 * going to have to be directly around this base block. We first check which 1076 * blocks from the set fall just before and after the base block, which then 1077 * allows us to construct a contiguous range of desired blocks directly 1078 * around the base block, in O(n) time. As a natural part of this, we ignore 1079 * duplicate blocks in the given set. We then read from the beginning of this 1080 * range, in order to maximize the chance that a next prefetch request will 1081 * continue from the last disk position without requiring a seek. However, we 1082 * do correct for the maximum number of blocks we can (or should) read in at 1083 * once, such that we will still end up reading the base block. 1084 */ 1085 base_block = blockset[0]; 1086 1087 memset(blocks_before, 0, sizeof(blocks_before)); 1088 memset(blocks_after, 0, sizeof(blocks_after)); 1089 1090 for (i = 1; i < nblocks; i++) { 1091 block = blockset[i]; 1092 1093 if (block < base_block && block + LMFS_MAX_PREFETCH >= base_block) { 1094 bit = base_block - block - 1; 1095 assert(bit < LMFS_MAX_PREFETCH); 1096 SET_BIT(blocks_before, bit); 1097 } else if (block > base_block && 1098 block - LMFS_MAX_PREFETCH <= base_block) { 1099 bit = block - base_block - 1; 1100 assert(bit < LMFS_MAX_PREFETCH); 1101 SET_BIT(blocks_after, bit); 1102 } 1103 } 1104 1105 for (nr_before = 0; nr_before < LMFS_MAX_PREFETCH; nr_before++) 1106 if (!GET_BIT(blocks_before, nr_before)) 1107 break; 1108 1109 for (nr_after = 0; nr_after < LMFS_MAX_PREFETCH; nr_after++) 1110 if (!GET_BIT(blocks_after, nr_after)) 1111 break; 1112 1113 /* The number of blocks to prefetch is the minimum of two factors: the number 1114 * of blocks in the range around the base block, and the maximum number of 1115 * blocks that should be read ahead at once at all. 1116 */ 1117 span = nr_before + 1 + nr_after; 1118 limit = lmfs_readahead_limit(); 1119 1120 nr_blocks = MIN(span, limit); 1121 assert(nr_blocks >= 1 && nr_blocks <= LMFS_MAX_PREFETCH); 1122 1123 /* Start prefetching from the lowest block within the contiguous range, but 1124 * make sure that we read at least the original base block itself, too. 1125 */ 1126 base_block -= MIN(nr_before, nr_blocks - 1); 1127 1128 lmfs_readahead(dev, base_block, nr_blocks, fs_block_size); 1129 } 1130 1131 /*===========================================================================* 1132 * lmfs_flushdev * 1133 *===========================================================================*/ 1134 void lmfs_flushdev(dev_t dev) 1135 { 1136 /* Flush all dirty blocks for one device. */ 1137 1138 register struct buf *bp; 1139 static noxfer_buf_ptr_t *dirty; 1140 static unsigned int dirtylistsize = 0; 1141 unsigned int ndirty; 1142 1143 if(dirtylistsize != nr_bufs) { 1144 if(dirtylistsize > 0) { 1145 assert(dirty != NULL); 1146 free(dirty); 1147 } 1148 if(!(dirty = malloc(sizeof(dirty[0])*nr_bufs))) 1149 panic("couldn't allocate dirty buf list"); 1150 dirtylistsize = nr_bufs; 1151 } 1152 1153 for (bp = &buf[0], ndirty = 0; bp < &buf[nr_bufs]; bp++) { 1154 /* Do not flush dirty blocks that are in use (lmfs_count>0): the file 1155 * system may mark the block as dirty before changing its contents, in 1156 * which case the new contents could end up being lost. 1157 */ 1158 if (!lmfs_isclean(bp) && bp->lmfs_dev == dev && bp->lmfs_count == 0) { 1159 dirty[ndirty++] = bp; 1160 } 1161 } 1162 1163 rw_scattered(dev, dirty, ndirty, WRITING); 1164 } 1165 1166 /*===========================================================================* 1167 * rm_lru * 1168 *===========================================================================*/ 1169 static void rm_lru(struct buf *bp) 1170 { 1171 /* Remove a block from its LRU chain. */ 1172 struct buf *next_ptr, *prev_ptr; 1173 1174 next_ptr = bp->lmfs_next; /* successor on LRU chain */ 1175 prev_ptr = bp->lmfs_prev; /* predecessor on LRU chain */ 1176 if (prev_ptr != NULL) 1177 prev_ptr->lmfs_next = next_ptr; 1178 else 1179 front = next_ptr; /* this block was at front of chain */ 1180 1181 if (next_ptr != NULL) 1182 next_ptr->lmfs_prev = prev_ptr; 1183 else 1184 rear = prev_ptr; /* this block was at rear of chain */ 1185 } 1186 1187 /*===========================================================================* 1188 * cache_resize * 1189 *===========================================================================*/ 1190 static void cache_resize(size_t blocksize, unsigned int bufs) 1191 { 1192 struct buf *bp; 1193 1194 assert(blocksize > 0); 1195 assert(bufs >= MINBUFS); 1196 1197 for (bp = &buf[0]; bp < &buf[nr_bufs]; bp++) 1198 if(bp->lmfs_count != 0) panic("change blocksize with buffer in use"); 1199 1200 lmfs_buf_pool(bufs); 1201 1202 fs_block_size = blocksize; 1203 } 1204 1205 static void cache_heuristic_check(void) 1206 { 1207 int bufs, d; 1208 1209 bufs = fs_bufs_heuristic(MINBUFS, fs_btotal, fs_bused, fs_block_size); 1210 1211 /* set the cache to the new heuristic size if the new one 1212 * is more than 10% off from the current one. 1213 */ 1214 d = bufs-nr_bufs; 1215 if(d < 0) d = -d; 1216 if(d*100/nr_bufs > 10) { 1217 cache_resize(fs_block_size, bufs); 1218 } 1219 } 1220 1221 /*===========================================================================* 1222 * lmfs_set_blocksize * 1223 *===========================================================================*/ 1224 void lmfs_set_blocksize(size_t new_block_size) 1225 { 1226 cache_resize(new_block_size, MINBUFS); 1227 cache_heuristic_check(); 1228 1229 /* Decide whether to use seconday cache or not. 1230 * Only do this if the block size is a multiple of the page size, and using 1231 * the VM cache has been enabled for this FS. 1232 */ 1233 1234 vmcache = 0; 1235 1236 if(may_use_vmcache && !(new_block_size % PAGE_SIZE)) 1237 vmcache = 1; 1238 } 1239 1240 /*===========================================================================* 1241 * lmfs_buf_pool * 1242 *===========================================================================*/ 1243 void lmfs_buf_pool(int new_nr_bufs) 1244 { 1245 /* Initialize the buffer pool. */ 1246 register struct buf *bp; 1247 1248 assert(new_nr_bufs >= MINBUFS); 1249 1250 if(nr_bufs > 0) { 1251 assert(buf); 1252 lmfs_flushall(); 1253 for (bp = &buf[0]; bp < &buf[nr_bufs]; bp++) { 1254 if(bp->data) { 1255 assert(bp->lmfs_bytes > 0); 1256 munmap_t(bp->data, bp->lmfs_bytes); 1257 } 1258 } 1259 } 1260 1261 if(buf) 1262 free(buf); 1263 1264 if(!(buf = calloc(sizeof(buf[0]), new_nr_bufs))) 1265 panic("couldn't allocate buf list (%d)", new_nr_bufs); 1266 1267 if(buf_hash) 1268 free(buf_hash); 1269 if(!(buf_hash = calloc(sizeof(buf_hash[0]), new_nr_bufs))) 1270 panic("couldn't allocate buf hash list (%d)", new_nr_bufs); 1271 1272 nr_bufs = new_nr_bufs; 1273 1274 bufs_in_use = 0; 1275 front = &buf[0]; 1276 rear = &buf[nr_bufs - 1]; 1277 1278 for (bp = &buf[0]; bp < &buf[nr_bufs]; bp++) { 1279 bp->lmfs_blocknr = NO_BLOCK; 1280 bp->lmfs_dev = NO_DEV; 1281 bp->lmfs_next = bp + 1; 1282 bp->lmfs_prev = bp - 1; 1283 bp->data = NULL; 1284 bp->lmfs_bytes = 0; 1285 } 1286 front->lmfs_prev = NULL; 1287 rear->lmfs_next = NULL; 1288 1289 for (bp = &buf[0]; bp < &buf[nr_bufs]; bp++) bp->lmfs_hash = bp->lmfs_next; 1290 buf_hash[0] = front; 1291 } 1292 1293 void lmfs_flushall(void) 1294 { 1295 struct buf *bp; 1296 for(bp = &buf[0]; bp < &buf[nr_bufs]; bp++) 1297 if(bp->lmfs_dev != NO_DEV && !lmfs_isclean(bp)) 1298 lmfs_flushdev(bp->lmfs_dev); 1299 1300 /* This is the moment where it is least likely (although certainly not 1301 * impossible!) that there are buffers in use, since buffers should not 1302 * be held across file system syncs. See if we already intended to 1303 * resize the buffer cache, but couldn't. Be aware that we may be 1304 * called indirectly from within lmfs_change_blockusage(), so care must 1305 * be taken not to recurse infinitely. TODO: see if it is better to 1306 * resize the cache from here *only*, thus guaranteeing a clean cache. 1307 */ 1308 lmfs_change_blockusage(0); 1309 } 1310 1311 size_t lmfs_fs_block_size(void) 1312 { 1313 return fs_block_size; 1314 } 1315 1316 void lmfs_may_use_vmcache(int ok) 1317 { 1318 may_use_vmcache = ok; 1319 } 1320