1 2 #define _SYSTEM 3 4 #include <assert.h> 5 #include <string.h> 6 #include <errno.h> 7 #include <math.h> 8 #include <stdlib.h> 9 10 #include <machine/vmparam.h> 11 12 #include <sys/param.h> 13 #include <sys/mman.h> 14 15 #include <minix/dmap.h> 16 #include <minix/libminixfs.h> 17 #include <minix/syslib.h> 18 #include <minix/sysutil.h> 19 #include <minix/u64.h> 20 #include <minix/bdev.h> 21 22 #include "inc.h" 23 24 /* Buffer (block) cache. To acquire a block, a routine calls lmfs_get_block(), 25 * telling which block it wants. The block is then regarded as "in use" and 26 * has its reference count incremented. All the blocks that are not in use are 27 * chained together in an LRU list, with 'front' pointing to the least recently 28 * used block, and 'rear' to the most recently used block. A reverse chain is 29 * also maintained. Usage for LRU is measured by the time the put_block() is 30 * done. The second parameter to put_block() can violate the LRU order and put 31 * a block on the front of the list, if it will probably not be needed again. 32 * This is used internally only; the lmfs_put_block() API call has no second 33 * parameter. If a block is modified, the modifying routine must mark the 34 * block as dirty, so the block will eventually be rewritten to the disk. 35 */ 36 37 /* Flags to put_block(). */ 38 #define ONE_SHOT 0x1 /* set if block will not be needed again */ 39 40 #define BUFHASH(b) ((unsigned int)((b) % nr_bufs)) 41 #define MARKCLEAN lmfs_markclean 42 43 #define MINBUFS 6 /* minimal no of bufs for sanity check */ 44 45 static struct buf *front; /* points to least recently used free block */ 46 static struct buf *rear; /* points to most recently used free block */ 47 static unsigned int bufs_in_use;/* # bufs currently in use (not on free list)*/ 48 49 static void rm_lru(struct buf *bp); 50 static int read_block(struct buf *bp, size_t size); 51 static void freeblock(struct buf *bp); 52 static void cache_heuristic_check(void); 53 static void put_block(struct buf *bp, int put_flags); 54 55 static int vmcache = 0; /* are we using vm's secondary cache? (initially not) */ 56 57 static struct buf *buf; 58 static struct buf **buf_hash; /* the buffer hash table */ 59 static unsigned int nr_bufs; 60 static int may_use_vmcache; 61 62 static size_t fs_block_size = PAGE_SIZE; /* raw i/o block size */ 63 64 static fsblkcnt_t fs_btotal = 0, fs_bused = 0; 65 66 static int quiet = 0; 67 68 void lmfs_setquiet(int q) { quiet = q; } 69 70 static int fs_bufs_heuristic(int minbufs, fsblkcnt_t btotal, 71 fsblkcnt_t bused, int blocksize) 72 { 73 struct vm_stats_info vsi; 74 int bufs; 75 u32_t kbytes_used_fs, kbytes_total_fs, kbcache, kb_fsmax; 76 u32_t kbytes_remain_mem; 77 78 /* set a reasonable cache size; cache at most a certain 79 * portion of the used FS, and at most a certain %age of remaining 80 * memory 81 */ 82 if(vm_info_stats(&vsi) != OK) { 83 bufs = 1024; 84 if(!quiet) 85 printf("fslib: heuristic info fail: default to %d bufs\n", bufs); 86 return bufs; 87 } 88 89 /* remaining free memory is unused memory plus memory in used for cache, 90 * as the cache can be evicted 91 */ 92 kbytes_remain_mem = (u64_t)(vsi.vsi_free + vsi.vsi_cached) * 93 vsi.vsi_pagesize / 1024; 94 95 /* check fs usage. */ 96 kbytes_used_fs = (unsigned long)(((u64_t)bused * blocksize) / 1024); 97 kbytes_total_fs = (unsigned long)(((u64_t)btotal * blocksize) / 1024); 98 99 /* heuristic for a desired cache size based on FS usage; 100 * but never bigger than half of the total filesystem 101 */ 102 kb_fsmax = sqrt_approx(kbytes_used_fs)*40; 103 kb_fsmax = MIN(kb_fsmax, kbytes_total_fs/2); 104 105 /* heuristic for a maximum usage - 10% of remaining memory */ 106 kbcache = MIN(kbytes_remain_mem/10, kb_fsmax); 107 bufs = kbcache * 1024 / blocksize; 108 109 /* but we simply need MINBUFS no matter what */ 110 if(bufs < minbufs) 111 bufs = minbufs; 112 113 return bufs; 114 } 115 116 void lmfs_change_blockusage(int delta) 117 { 118 /* Change the number of allocated blocks by 'delta.' 119 * Also accumulate the delta since the last cache re-evaluation. 120 * If it is outside a certain band, ask the cache library to 121 * re-evaluate the cache size. 122 */ 123 static int bitdelta = 0, warn_low = TRUE, warn_high = TRUE; 124 125 /* Adjust the file system block usage counter accordingly. Do bounds 126 * checking, and report file system misbehavior. 127 */ 128 if (delta > 0 && (fsblkcnt_t)delta > fs_btotal - fs_bused) { 129 if (warn_high) { 130 printf("libminixfs: block usage overflow\n"); 131 warn_high = FALSE; 132 } 133 delta = (int)(fs_btotal - fs_bused); 134 } else if (delta < 0 && (fsblkcnt_t)-delta > fs_bused) { 135 if (warn_low) { 136 printf("libminixfs: block usage underflow\n"); 137 warn_low = FALSE; 138 } 139 delta = -(int)fs_bused; 140 } 141 fs_bused += delta; 142 143 bitdelta += delta; 144 145 #define BAND_KB (10*1024) /* recheck cache every 10MB change */ 146 147 /* If the accumulated delta exceeds the configured threshold, resize 148 * the cache, but only if the cache isn't in use any more. In order to 149 * avoid that the latter case blocks a resize forever, we also call 150 * this function from lmfs_flushall(). Since lmfs_buf_pool() may call 151 * lmfs_flushall(), reset 'bitdelta' before doing the heuristics check. 152 */ 153 if (bufs_in_use == 0 && 154 (bitdelta*(int)fs_block_size/1024 > BAND_KB || 155 bitdelta*(int)fs_block_size/1024 < -BAND_KB)) { 156 bitdelta = 0; 157 cache_heuristic_check(); 158 } 159 } 160 161 void lmfs_markdirty(struct buf *bp) 162 { 163 bp->lmfs_flags |= VMMC_DIRTY; 164 } 165 166 void lmfs_markclean(struct buf *bp) 167 { 168 bp->lmfs_flags &= ~VMMC_DIRTY; 169 } 170 171 int lmfs_isclean(struct buf *bp) 172 { 173 return !(bp->lmfs_flags & VMMC_DIRTY); 174 } 175 176 dev_t lmfs_dev(struct buf *bp) 177 { 178 return bp->lmfs_dev; 179 } 180 181 static void free_unused_blocks(void) 182 { 183 struct buf *bp; 184 185 int freed = 0, bytes = 0; 186 printf("libminixfs: freeing; %d blocks in use\n", bufs_in_use); 187 for(bp = &buf[0]; bp < &buf[nr_bufs]; bp++) { 188 if(bp->lmfs_bytes > 0 && bp->lmfs_count == 0) { 189 freed++; 190 bytes += bp->lmfs_bytes; 191 freeblock(bp); 192 } 193 } 194 printf("libminixfs: freeing; %d blocks, %d bytes\n", freed, bytes); 195 } 196 197 static void lmfs_alloc_block(struct buf *bp, size_t block_size) 198 { 199 int len; 200 ASSERT(!bp->data); 201 ASSERT(bp->lmfs_bytes == 0); 202 203 len = roundup(block_size, PAGE_SIZE); 204 205 if((bp->data = mmap(0, block_size, PROT_READ|PROT_WRITE, 206 MAP_PREALLOC|MAP_ANON, -1, 0)) == MAP_FAILED) { 207 free_unused_blocks(); 208 if((bp->data = mmap(0, block_size, PROT_READ|PROT_WRITE, 209 MAP_PREALLOC|MAP_ANON, -1, 0)) == MAP_FAILED) { 210 panic("libminixfs: could not allocate block"); 211 } 212 } 213 assert(bp->data); 214 bp->lmfs_bytes = block_size; 215 bp->lmfs_needsetcache = 1; 216 } 217 218 /*===========================================================================* 219 * lmfs_get_block * 220 *===========================================================================*/ 221 int lmfs_get_block(struct buf **bpp, dev_t dev, block64_t block, int how) 222 { 223 return lmfs_get_block_ino(bpp, dev, block, how, VMC_NO_INODE, 0); 224 } 225 226 static void munmap_t(void *a, int len) 227 { 228 vir_bytes av = (vir_bytes) a; 229 assert(a); 230 assert(a != MAP_FAILED); 231 assert(len > 0); 232 assert(!(av % PAGE_SIZE)); 233 234 len = roundup(len, PAGE_SIZE); 235 236 assert(!(len % PAGE_SIZE)); 237 238 if(munmap(a, len) < 0) 239 panic("libminixfs cache: munmap failed"); 240 } 241 242 static void raisecount(struct buf *bp) 243 { 244 assert(bufs_in_use >= 0); 245 ASSERT(bp->lmfs_count >= 0); 246 bp->lmfs_count++; 247 if(bp->lmfs_count == 1) bufs_in_use++; 248 assert(bufs_in_use > 0); 249 } 250 251 static void lowercount(struct buf *bp) 252 { 253 assert(bufs_in_use > 0); 254 ASSERT(bp->lmfs_count > 0); 255 bp->lmfs_count--; 256 if(bp->lmfs_count == 0) bufs_in_use--; 257 assert(bufs_in_use >= 0); 258 } 259 260 static void freeblock(struct buf *bp) 261 { 262 ASSERT(bp->lmfs_count == 0); 263 /* If the block taken is dirty, make it clean by writing it to the disk. 264 * Avoid hysteresis by flushing all other dirty blocks for the same device. 265 */ 266 if (bp->lmfs_dev != NO_DEV) { 267 if (!lmfs_isclean(bp)) lmfs_flushdev(bp->lmfs_dev); 268 assert(bp->lmfs_bytes > 0); 269 bp->lmfs_dev = NO_DEV; 270 } 271 272 /* Fill in block's parameters and add it to the hash chain where it goes. */ 273 MARKCLEAN(bp); /* NO_DEV blocks may be marked dirty */ 274 if(bp->lmfs_bytes > 0) { 275 assert(bp->data); 276 munmap_t(bp->data, bp->lmfs_bytes); 277 bp->lmfs_bytes = 0; 278 bp->data = NULL; 279 } else assert(!bp->data); 280 } 281 282 /*===========================================================================* 283 * find_block * 284 *===========================================================================*/ 285 static struct buf *find_block(dev_t dev, block64_t block) 286 { 287 /* Search the hash chain for (dev, block). Return the buffer structure if 288 * found, or NULL otherwise. 289 */ 290 struct buf *bp; 291 int b; 292 293 assert(dev != NO_DEV); 294 295 b = BUFHASH(block); 296 for (bp = buf_hash[b]; bp != NULL; bp = bp->lmfs_hash) 297 if (bp->lmfs_blocknr == block && bp->lmfs_dev == dev) 298 return bp; 299 300 return NULL; 301 } 302 303 /*===========================================================================* 304 * get_block_ino * 305 *===========================================================================*/ 306 static int get_block_ino(struct buf **bpp, dev_t dev, block64_t block, int how, 307 ino_t ino, u64_t ino_off, size_t block_size) 308 { 309 /* Check to see if the requested block is in the block cache. The requested 310 * block is identified by the block number in 'block' on device 'dev', counted 311 * in the file system block size. The amount of data requested for this block 312 * is given in 'block_size', which may be less than the file system block size 313 * iff the requested block is the last (partial) block on a device. Note that 314 * the given block size does *not* affect the conversion of 'block' to a byte 315 * offset! Either way, if the block could be obtained, either from the cache 316 * or by reading from the device, return OK, with a pointer to the buffer 317 * structure stored in 'bpp'. If not, return a negative error code (and no 318 * buffer). If necessary, evict some other block and fetch the contents from 319 * disk (if 'how' is NORMAL). If 'how' is NO_READ, the caller intends to 320 * overwrite the requested block in its entirety, so it is only necessary to 321 * see if it is in the cache; if it is not, any free buffer will do. If 'how' 322 * is PREFETCH, the block need not be read from the disk, and the device is not 323 * to be marked on the block (i.e., set to NO_DEV), so callers can tell if the 324 * block returned is valid. If 'how' is PEEK, the function returns the block 325 * if it is in the cache or the VM cache, and an ENOENT error code otherwise. 326 * In addition to the LRU chain, there is also a hash chain to link together 327 * blocks whose block numbers end with the same bit strings, for fast lookup. 328 */ 329 int b, r; 330 static struct buf *bp; 331 uint64_t dev_off; 332 struct buf *prev_ptr; 333 334 assert(buf_hash); 335 assert(buf); 336 assert(nr_bufs > 0); 337 338 ASSERT(fs_block_size > 0); 339 340 assert(dev != NO_DEV); 341 342 assert(block <= UINT64_MAX / fs_block_size); 343 344 dev_off = block * fs_block_size; 345 346 if((ino_off % fs_block_size)) { 347 348 printf("cache: unaligned lmfs_get_block_ino ino_off %llu\n", 349 ino_off); 350 util_stacktrace(); 351 } 352 353 /* See if the block is in the cache. If so, we can return it right away. */ 354 bp = find_block(dev, block); 355 if (bp != NULL && !(bp->lmfs_flags & VMMC_EVICTED)) { 356 ASSERT(bp->lmfs_dev == dev); 357 ASSERT(bp->lmfs_dev != NO_DEV); 358 359 /* The block must have exactly the requested number of bytes. */ 360 if (bp->lmfs_bytes != block_size) 361 return EIO; 362 363 /* Block needed has been found. */ 364 if (bp->lmfs_count == 0) { 365 rm_lru(bp); 366 ASSERT(bp->lmfs_needsetcache == 0); 367 ASSERT(!(bp->lmfs_flags & VMMC_BLOCK_LOCKED)); 368 /* FIXME: race condition against the VMMC_EVICTED check */ 369 bp->lmfs_flags |= VMMC_BLOCK_LOCKED; 370 } 371 raisecount(bp); 372 ASSERT(bp->lmfs_flags & VMMC_BLOCK_LOCKED); 373 ASSERT(bp->data); 374 375 if(ino != VMC_NO_INODE) { 376 if(bp->lmfs_inode == VMC_NO_INODE 377 || bp->lmfs_inode != ino 378 || bp->lmfs_inode_offset != ino_off) { 379 bp->lmfs_inode = ino; 380 bp->lmfs_inode_offset = ino_off; 381 bp->lmfs_needsetcache = 1; 382 } 383 } 384 385 *bpp = bp; 386 return OK; 387 } 388 389 /* We had the block in the cache but VM evicted it; invalidate it. */ 390 if (bp != NULL) { 391 assert(bp->lmfs_flags & VMMC_EVICTED); 392 ASSERT(bp->lmfs_count == 0); 393 ASSERT(!(bp->lmfs_flags & VMMC_BLOCK_LOCKED)); 394 ASSERT(!(bp->lmfs_flags & VMMC_DIRTY)); 395 bp->lmfs_dev = NO_DEV; 396 bp->lmfs_bytes = 0; 397 bp->data = NULL; 398 } 399 400 /* Desired block is not on available chain. Find a free block to use. */ 401 if(bp) { 402 ASSERT(bp->lmfs_flags & VMMC_EVICTED); 403 } else { 404 if ((bp = front) == NULL) panic("all buffers in use: %d", nr_bufs); 405 } 406 assert(bp); 407 408 rm_lru(bp); 409 410 /* Remove the block that was just taken from its hash chain. */ 411 b = BUFHASH(bp->lmfs_blocknr); 412 prev_ptr = buf_hash[b]; 413 if (prev_ptr == bp) { 414 buf_hash[b] = bp->lmfs_hash; 415 } else { 416 /* The block just taken is not on the front of its hash chain. */ 417 while (prev_ptr->lmfs_hash != NULL) 418 if (prev_ptr->lmfs_hash == bp) { 419 prev_ptr->lmfs_hash = bp->lmfs_hash; /* found it */ 420 break; 421 } else { 422 prev_ptr = prev_ptr->lmfs_hash; /* keep looking */ 423 } 424 } 425 426 freeblock(bp); 427 428 bp->lmfs_inode = ino; 429 bp->lmfs_inode_offset = ino_off; 430 431 bp->lmfs_flags = VMMC_BLOCK_LOCKED; 432 bp->lmfs_needsetcache = 0; 433 bp->lmfs_dev = dev; /* fill in device number */ 434 bp->lmfs_blocknr = block; /* fill in block number */ 435 ASSERT(bp->lmfs_count == 0); 436 raisecount(bp); 437 b = BUFHASH(bp->lmfs_blocknr); 438 bp->lmfs_hash = buf_hash[b]; 439 440 buf_hash[b] = bp; /* add to hash list */ 441 442 assert(dev != NO_DEV); 443 444 /* Block is not found in our cache, but we do want it 445 * if it's in the vm cache. 446 */ 447 assert(!bp->data); 448 assert(!bp->lmfs_bytes); 449 if(vmcache) { 450 if((bp->data = vm_map_cacheblock(dev, dev_off, ino, ino_off, 451 &bp->lmfs_flags, roundup(block_size, PAGE_SIZE))) != MAP_FAILED) { 452 bp->lmfs_bytes = block_size; 453 ASSERT(!bp->lmfs_needsetcache); 454 *bpp = bp; 455 return OK; 456 } 457 } 458 bp->data = NULL; 459 460 /* The block is not in the cache, and VM does not know about it. If we were 461 * requested to search for the block only, we can now return failure to the 462 * caller. Return the block to the pool without allocating data pages, since 463 * these would be freed upon recycling the block anyway. 464 */ 465 if (how == PEEK) { 466 bp->lmfs_dev = NO_DEV; 467 468 put_block(bp, ONE_SHOT); 469 470 return ENOENT; 471 } 472 473 /* Not in the cache; reserve memory for its contents. */ 474 475 lmfs_alloc_block(bp, block_size); 476 477 assert(bp->data); 478 479 if(how == PREFETCH) { 480 /* PREFETCH: don't do i/o. */ 481 bp->lmfs_dev = NO_DEV; 482 } else if (how == NORMAL) { 483 /* Try to read the block. Return an error code on failure. */ 484 if ((r = read_block(bp, block_size)) != OK) { 485 put_block(bp, 0); 486 487 return r; 488 } 489 } else if(how == NO_READ) { 490 /* This block will be overwritten by new contents. */ 491 } else 492 panic("unexpected 'how' value: %d", how); 493 494 assert(bp->data); 495 496 *bpp = bp; /* return the newly acquired block */ 497 return OK; 498 } 499 500 /*===========================================================================* 501 * lmfs_get_block_ino * 502 *===========================================================================*/ 503 int lmfs_get_block_ino(struct buf **bpp, dev_t dev, block64_t block, int how, 504 ino_t ino, u64_t ino_off) 505 { 506 return get_block_ino(bpp, dev, block, how, ino, ino_off, fs_block_size); 507 } 508 509 /*===========================================================================* 510 * lmfs_get_partial_block * 511 *===========================================================================*/ 512 int lmfs_get_partial_block(struct buf **bpp, dev_t dev, block64_t block, 513 int how, size_t block_size) 514 { 515 return get_block_ino(bpp, dev, block, how, VMC_NO_INODE, 0, block_size); 516 } 517 518 /*===========================================================================* 519 * put_block * 520 *===========================================================================*/ 521 static void put_block(struct buf *bp, int put_flags) 522 { 523 /* Return a block to the list of available blocks. Depending on 'put_flags' 524 * it may be put on the front or rear of the LRU chain. Blocks that are 525 * expected to be needed again at some point go on the rear; blocks that are 526 * unlikely to be needed again at all go on the front. 527 */ 528 dev_t dev; 529 uint64_t dev_off; 530 int r, setflags; 531 532 assert(bp != NULL); 533 534 dev = bp->lmfs_dev; 535 536 dev_off = bp->lmfs_blocknr * fs_block_size; 537 538 lowercount(bp); 539 if (bp->lmfs_count != 0) return; /* block is still in use */ 540 541 /* Put this block back on the LRU chain. */ 542 if (dev == NO_DEV || dev == DEV_RAM || (put_flags & ONE_SHOT)) { 543 /* Block will not be needed again. Put it on front of chain. 544 * It will be the next block to be evicted from the cache. 545 */ 546 bp->lmfs_prev = NULL; 547 bp->lmfs_next = front; 548 if (front == NULL) 549 rear = bp; /* LRU chain was empty */ 550 else 551 front->lmfs_prev = bp; 552 front = bp; 553 } 554 else { 555 /* Block may be needed again. Put it on rear of chain. 556 * It will not be evicted from the cache for a long time. 557 */ 558 bp->lmfs_prev = rear; 559 bp->lmfs_next = NULL; 560 if (rear == NULL) 561 front = bp; 562 else 563 rear->lmfs_next = bp; 564 rear = bp; 565 } 566 567 assert(bp->lmfs_flags & VMMC_BLOCK_LOCKED); 568 bp->lmfs_flags &= ~VMMC_BLOCK_LOCKED; 569 570 /* block has sensible content - if necessary, identify it to VM */ 571 if(vmcache && bp->lmfs_needsetcache && dev != NO_DEV) { 572 assert(bp->data); 573 574 setflags = (put_flags & ONE_SHOT) ? VMSF_ONCE : 0; 575 576 if ((r = vm_set_cacheblock(bp->data, dev, dev_off, bp->lmfs_inode, 577 bp->lmfs_inode_offset, &bp->lmfs_flags, 578 roundup(bp->lmfs_bytes, PAGE_SIZE), setflags)) != OK) { 579 if(r == ENOSYS) { 580 printf("libminixfs: ENOSYS, disabling VM calls\n"); 581 vmcache = 0; 582 } else if (r == ENOMEM) { 583 /* Do not panic in this case. Running out of memory is 584 * bad, especially since it may lead to applications 585 * crashing when trying to access memory-mapped pages 586 * we haven't been able to pass off to the VM cache, 587 * but the entire file system crashing is always worse. 588 */ 589 printf("libminixfs: no memory for cache block!\n"); 590 } else { 591 panic("libminixfs: setblock of %p dev 0x%llx off " 592 "0x%llx failed\n", bp->data, dev, dev_off); 593 } 594 } 595 } 596 bp->lmfs_needsetcache = 0; 597 598 /* Now that we (may) have given the block to VM, invalidate the block if it 599 * is a one-shot block. Otherwise, it may still be reobtained immediately 600 * after, which could be a problem if VM already forgot the block and we are 601 * expected to pass it to VM again, which then wouldn't happen. 602 */ 603 if (put_flags & ONE_SHOT) 604 bp->lmfs_dev = NO_DEV; 605 } 606 607 /*===========================================================================* 608 * lmfs_put_block * 609 *===========================================================================*/ 610 void lmfs_put_block(struct buf *bp) 611 { 612 /* User interface to put_block(). */ 613 614 if (bp == NULL) return; /* for poorly written file systems */ 615 616 put_block(bp, 0); 617 } 618 619 /*===========================================================================* 620 * lmfs_free_block * 621 *===========================================================================*/ 622 void lmfs_free_block(dev_t dev, block64_t block) 623 { 624 /* The file system has just freed the given block. The block may previously 625 * have been in use as data block for an inode. Therefore, we now need to tell 626 * VM that the block is no longer associated with an inode. If we fail to do so 627 * and the inode now has a hole at this location, mapping in the hole would 628 * yield the old block contents rather than a zeroed page. In addition, if the 629 * block is in the cache, it will be removed, even if it was dirty. 630 */ 631 struct buf *bp; 632 int r; 633 634 /* Tell VM to forget about the block. The primary purpose of this call is to 635 * break the inode association, but since the block is part of a mounted file 636 * system, it is not expected to be accessed directly anyway. So, save some 637 * cache memory by throwing it out of the VM cache altogether. 638 */ 639 if (vmcache) { 640 if ((r = vm_forget_cacheblock(dev, block * fs_block_size, 641 fs_block_size)) != OK) 642 printf("libminixfs: vm_forget_cacheblock failed (%d)\n", r); 643 } 644 645 if ((bp = find_block(dev, block)) != NULL) { 646 lmfs_markclean(bp); 647 648 /* Invalidate the block. The block may or may not be in use right now, 649 * so don't be smart about freeing memory or repositioning in the LRU. 650 */ 651 bp->lmfs_dev = NO_DEV; 652 } 653 654 /* Note that this is *not* the right place to implement TRIM support. Even 655 * though the block is freed, on the device it may still be part of a 656 * previous checkpoint or snapshot of some sort. Only the file system can 657 * be trusted to decide which blocks can be reused on the device! 658 */ 659 } 660 661 /*===========================================================================* 662 * lmfs_zero_block_ino * 663 *===========================================================================*/ 664 void lmfs_zero_block_ino(dev_t dev, ino_t ino, u64_t ino_off) 665 { 666 /* Files may have holes. From an application perspective, these are just file 667 * regions filled with zeroes. From a file system perspective however, holes 668 * may represent unallocated regions on disk. Thus, these holes do not have 669 * corresponding blocks on the disk, and therefore also no block number. 670 * Therefore, we cannot simply use lmfs_get_block_ino() for them. For reads, 671 * this is not a problem, since the file system can just zero out the target 672 * application buffer instead. For mapped pages however, this *is* a problem, 673 * since the VM cache needs to be told about the corresponding block, and VM 674 * does not accept blocks without a device offset. The role of this function is 675 * therefore to tell VM about the hole using a fake device offset. The device 676 * offsets are picked so that the VM cache will see a block memory-mapped for 677 * the hole in the file, while the same block is not visible when 678 * memory-mapping the block device. 679 */ 680 struct buf *bp; 681 static block64_t fake_block = 0; 682 int r; 683 684 if (!vmcache) 685 return; 686 687 assert(fs_block_size > 0); 688 689 /* Pick a block number which is above the threshold of what can possibly be 690 * mapped in by mmap'ing the device, since off_t is signed, and it is safe to 691 * say that it will take a while before we have 8-exabyte devices. Pick a 692 * different block number each time to avoid possible concurrency issues. 693 * FIXME: it does not seem like VM actually verifies mmap offsets though.. 694 */ 695 if (fake_block == 0 || ++fake_block >= UINT64_MAX / fs_block_size) 696 fake_block = ((uint64_t)INT64_MAX + 1) / fs_block_size; 697 698 /* Obtain a block. */ 699 if ((r = lmfs_get_block_ino(&bp, dev, fake_block, NO_READ, ino, 700 ino_off)) != OK) 701 panic("libminixfs: getting a NO_READ block failed: %d", r); 702 assert(bp != NULL); 703 assert(bp->lmfs_dev != NO_DEV); 704 705 /* The block is already zeroed, as it has just been allocated with mmap. File 706 * systems do not rely on this assumption yet, so if VM ever gets changed to 707 * not clear the blocks we allocate (e.g., by recycling pages in the VM cache 708 * for the same process, which would be safe), we need to add a memset here. 709 */ 710 711 /* Release the block. We don't expect it to be accessed ever again. Moreover, 712 * if we keep the block around in the VM cache, it may erroneously be mapped 713 * in beyond the file end later. Hence, use VMSF_ONCE when passing it to VM. 714 * TODO: tell VM that it is an all-zeroes block, so that VM can deduplicate 715 * all such pages in its cache. 716 */ 717 put_block(bp, ONE_SHOT); 718 } 719 720 void lmfs_set_blockusage(fsblkcnt_t btotal, fsblkcnt_t bused) 721 { 722 723 assert(bused <= btotal); 724 fs_btotal = btotal; 725 fs_bused = bused; 726 727 /* if the cache isn't in use, we could resize it. */ 728 if (bufs_in_use == 0) 729 cache_heuristic_check(); 730 } 731 732 /*===========================================================================* 733 * read_block * 734 *===========================================================================*/ 735 static int read_block(struct buf *bp, size_t block_size) 736 { 737 /* Read a disk block of 'size' bytes. The given size is always the FS block 738 * size, except for the last block of a device. If an I/O error occurs, 739 * invalidate the block and return an error code. 740 */ 741 ssize_t r; 742 off_t pos; 743 dev_t dev = bp->lmfs_dev; 744 745 assert(dev != NO_DEV); 746 747 ASSERT(bp->lmfs_bytes == block_size); 748 ASSERT(fs_block_size > 0); 749 750 pos = (off_t)bp->lmfs_blocknr * fs_block_size; 751 if (block_size > PAGE_SIZE) { 752 #define MAXPAGES 20 753 vir_bytes blockrem, vaddr = (vir_bytes) bp->data; 754 int p = 0; 755 static iovec_t iovec[MAXPAGES]; 756 blockrem = block_size; 757 while(blockrem > 0) { 758 vir_bytes chunk = blockrem >= PAGE_SIZE ? PAGE_SIZE : blockrem; 759 iovec[p].iov_addr = vaddr; 760 iovec[p].iov_size = chunk; 761 vaddr += chunk; 762 blockrem -= chunk; 763 p++; 764 } 765 r = bdev_gather(dev, pos, iovec, p, BDEV_NOFLAGS); 766 } else { 767 r = bdev_read(dev, pos, bp->data, block_size, BDEV_NOFLAGS); 768 } 769 if (r != (ssize_t)block_size) { 770 printf("fs cache: I/O error on device %d/%d, block %"PRIu64" (%zd)\n", 771 major(dev), minor(dev), bp->lmfs_blocknr, r); 772 if (r >= 0) 773 r = EIO; /* TODO: retry retrieving (just) the remaining part */ 774 775 bp->lmfs_dev = NO_DEV; /* invalidate block */ 776 777 return r; 778 } 779 780 return OK; 781 } 782 783 /*===========================================================================* 784 * lmfs_invalidate * 785 *===========================================================================*/ 786 void lmfs_invalidate( 787 dev_t device /* device whose blocks are to be purged */ 788 ) 789 { 790 /* Remove all the blocks belonging to some device from the cache. */ 791 792 register struct buf *bp; 793 794 assert(device != NO_DEV); 795 796 for (bp = &buf[0]; bp < &buf[nr_bufs]; bp++) { 797 if (bp->lmfs_dev == device) { 798 assert(bp->data); 799 assert(bp->lmfs_bytes > 0); 800 munmap_t(bp->data, bp->lmfs_bytes); 801 bp->lmfs_dev = NO_DEV; 802 bp->lmfs_bytes = 0; 803 bp->data = NULL; 804 } 805 } 806 807 /* Clear the cache even if VM caching is disabled for the file system: 808 * caching may be disabled as side effect of an error, leaving blocks behind 809 * in the actual VM cache. 810 */ 811 vm_clear_cache(device); 812 } 813 814 /*===========================================================================* 815 * lmfs_flushdev * 816 *===========================================================================*/ 817 void lmfs_flushdev(dev_t dev) 818 { 819 /* Flush all dirty blocks for one device. */ 820 821 register struct buf *bp; 822 static struct buf **dirty; 823 static unsigned int dirtylistsize = 0; 824 int ndirty; 825 826 if(dirtylistsize != nr_bufs) { 827 if(dirtylistsize > 0) { 828 assert(dirty != NULL); 829 free(dirty); 830 } 831 if(!(dirty = malloc(sizeof(dirty[0])*nr_bufs))) 832 panic("couldn't allocate dirty buf list"); 833 dirtylistsize = nr_bufs; 834 } 835 836 for (bp = &buf[0], ndirty = 0; bp < &buf[nr_bufs]; bp++) { 837 /* Do not flush dirty blocks that are in use (lmfs_count>0): the file 838 * system may mark the block as dirty before changing its contents, in 839 * which case the new contents could end up being lost. 840 */ 841 if (!lmfs_isclean(bp) && bp->lmfs_dev == dev && bp->lmfs_count == 0) { 842 dirty[ndirty++] = bp; 843 } 844 } 845 846 lmfs_rw_scattered(dev, dirty, ndirty, WRITING); 847 } 848 849 /*===========================================================================* 850 * lmfs_rw_scattered * 851 *===========================================================================*/ 852 void lmfs_rw_scattered( 853 dev_t dev, /* major-minor device number */ 854 struct buf **bufq, /* pointer to array of buffers */ 855 int bufqsize, /* number of buffers */ 856 int rw_flag /* READING or WRITING */ 857 ) 858 { 859 /* Read or write scattered data from a device. */ 860 861 register struct buf *bp; 862 int gap; 863 register int i; 864 register iovec_t *iop; 865 static iovec_t iovec[NR_IOREQS]; 866 off_t pos; 867 int iov_per_block; 868 unsigned int start_in_use = bufs_in_use, start_bufqsize = bufqsize; 869 870 assert(bufqsize >= 0); 871 if(bufqsize == 0) return; 872 873 /* for READING, check all buffers on the list are obtained and held 874 * (count > 0) 875 */ 876 if (rw_flag == READING) { 877 for(i = 0; i < bufqsize; i++) { 878 assert(bufq[i] != NULL); 879 assert(bufq[i]->lmfs_count > 0); 880 } 881 882 /* therefore they are all 'in use' and must be at least this many */ 883 assert(start_in_use >= start_bufqsize); 884 } 885 886 assert(dev != NO_DEV); 887 assert(fs_block_size > 0); 888 assert(howmany(fs_block_size, PAGE_SIZE) <= NR_IOREQS); 889 890 /* (Shell) sort buffers on lmfs_blocknr. */ 891 gap = 1; 892 do 893 gap = 3 * gap + 1; 894 while (gap <= bufqsize); 895 while (gap != 1) { 896 int j; 897 gap /= 3; 898 for (j = gap; j < bufqsize; j++) { 899 for (i = j - gap; 900 i >= 0 && bufq[i]->lmfs_blocknr > bufq[i + gap]->lmfs_blocknr; 901 i -= gap) { 902 bp = bufq[i]; 903 bufq[i] = bufq[i + gap]; 904 bufq[i + gap] = bp; 905 } 906 } 907 } 908 909 /* Set up I/O vector and do I/O. The result of bdev I/O is OK if everything 910 * went fine, otherwise the error code for the first failed transfer. 911 */ 912 while (bufqsize > 0) { 913 int nblocks = 0, niovecs = 0; 914 int r; 915 for (iop = iovec; nblocks < bufqsize; nblocks++) { 916 int p; 917 vir_bytes vdata, blockrem; 918 bp = bufq[nblocks]; 919 if (bp->lmfs_blocknr != bufq[0]->lmfs_blocknr + nblocks) 920 break; 921 blockrem = bp->lmfs_bytes; 922 iov_per_block = howmany(blockrem, PAGE_SIZE); 923 if(niovecs >= NR_IOREQS-iov_per_block) break; 924 vdata = (vir_bytes) bp->data; 925 for(p = 0; p < iov_per_block; p++) { 926 vir_bytes chunk = 927 blockrem < PAGE_SIZE ? blockrem : PAGE_SIZE; 928 iop->iov_addr = vdata; 929 iop->iov_size = chunk; 930 vdata += PAGE_SIZE; 931 blockrem -= chunk; 932 iop++; 933 niovecs++; 934 } 935 assert(p == iov_per_block); 936 assert(blockrem == 0); 937 } 938 939 assert(nblocks > 0); 940 assert(niovecs > 0); 941 942 pos = (off_t)bufq[0]->lmfs_blocknr * fs_block_size; 943 if (rw_flag == READING) 944 r = bdev_gather(dev, pos, iovec, niovecs, BDEV_NOFLAGS); 945 else 946 r = bdev_scatter(dev, pos, iovec, niovecs, BDEV_NOFLAGS); 947 948 /* Harvest the results. The driver may have returned an error, or it 949 * may have done less than what we asked for. 950 */ 951 if (r < 0) { 952 printf("fs cache: I/O error %d on device %d/%d, " 953 "block %"PRIu64"\n", 954 r, major(dev), minor(dev), bufq[0]->lmfs_blocknr); 955 } 956 for (i = 0; i < nblocks; i++) { 957 bp = bufq[i]; 958 if (r < (ssize_t)bp->lmfs_bytes) { 959 /* Transfer failed. */ 960 if (i == 0) { 961 bp->lmfs_dev = NO_DEV; /* Invalidate block */ 962 } 963 break; 964 } 965 if (rw_flag == READING) { 966 bp->lmfs_dev = dev; /* validate block */ 967 lmfs_put_block(bp); 968 } else { 969 MARKCLEAN(bp); 970 } 971 r -= bp->lmfs_bytes; 972 } 973 974 bufq += i; 975 bufqsize -= i; 976 977 if (rw_flag == READING) { 978 /* Don't bother reading more than the device is willing to 979 * give at this time. Don't forget to release those extras. 980 */ 981 while (bufqsize > 0) { 982 lmfs_put_block(*bufq++); 983 bufqsize--; 984 } 985 } 986 if (rw_flag == WRITING && i == 0) { 987 /* We're not making progress, this means we might keep 988 * looping. Buffers remain dirty if un-written. Buffers are 989 * lost if invalidate()d or LRU-removed while dirty. This 990 * is better than keeping unwritable blocks around forever.. 991 */ 992 break; 993 } 994 } 995 996 if(rw_flag == READING) { 997 assert(start_in_use >= start_bufqsize); 998 999 /* READING callers assume all bufs are released. */ 1000 assert(start_in_use - start_bufqsize == bufs_in_use); 1001 } 1002 } 1003 1004 /*===========================================================================* 1005 * rm_lru * 1006 *===========================================================================*/ 1007 static void rm_lru(struct buf *bp) 1008 { 1009 /* Remove a block from its LRU chain. */ 1010 struct buf *next_ptr, *prev_ptr; 1011 1012 next_ptr = bp->lmfs_next; /* successor on LRU chain */ 1013 prev_ptr = bp->lmfs_prev; /* predecessor on LRU chain */ 1014 if (prev_ptr != NULL) 1015 prev_ptr->lmfs_next = next_ptr; 1016 else 1017 front = next_ptr; /* this block was at front of chain */ 1018 1019 if (next_ptr != NULL) 1020 next_ptr->lmfs_prev = prev_ptr; 1021 else 1022 rear = prev_ptr; /* this block was at rear of chain */ 1023 } 1024 1025 /*===========================================================================* 1026 * cache_resize * 1027 *===========================================================================*/ 1028 static void cache_resize(size_t blocksize, unsigned int bufs) 1029 { 1030 struct buf *bp; 1031 1032 assert(blocksize > 0); 1033 assert(bufs >= MINBUFS); 1034 1035 for (bp = &buf[0]; bp < &buf[nr_bufs]; bp++) 1036 if(bp->lmfs_count != 0) panic("change blocksize with buffer in use"); 1037 1038 lmfs_buf_pool(bufs); 1039 1040 fs_block_size = blocksize; 1041 } 1042 1043 static void cache_heuristic_check(void) 1044 { 1045 int bufs, d; 1046 1047 bufs = fs_bufs_heuristic(MINBUFS, fs_btotal, fs_bused, fs_block_size); 1048 1049 /* set the cache to the new heuristic size if the new one 1050 * is more than 10% off from the current one. 1051 */ 1052 d = bufs-nr_bufs; 1053 if(d < 0) d = -d; 1054 if(d*100/nr_bufs > 10) { 1055 cache_resize(fs_block_size, bufs); 1056 } 1057 } 1058 1059 /*===========================================================================* 1060 * lmfs_set_blocksize * 1061 *===========================================================================*/ 1062 void lmfs_set_blocksize(size_t new_block_size) 1063 { 1064 cache_resize(new_block_size, MINBUFS); 1065 cache_heuristic_check(); 1066 1067 /* Decide whether to use seconday cache or not. 1068 * Only do this if the block size is a multiple of the page size, and using 1069 * the VM cache has been enabled for this FS. 1070 */ 1071 1072 vmcache = 0; 1073 1074 if(may_use_vmcache && !(new_block_size % PAGE_SIZE)) 1075 vmcache = 1; 1076 } 1077 1078 /*===========================================================================* 1079 * lmfs_buf_pool * 1080 *===========================================================================*/ 1081 void lmfs_buf_pool(int new_nr_bufs) 1082 { 1083 /* Initialize the buffer pool. */ 1084 register struct buf *bp; 1085 1086 assert(new_nr_bufs >= MINBUFS); 1087 1088 if(nr_bufs > 0) { 1089 assert(buf); 1090 lmfs_flushall(); 1091 for (bp = &buf[0]; bp < &buf[nr_bufs]; bp++) { 1092 if(bp->data) { 1093 assert(bp->lmfs_bytes > 0); 1094 munmap_t(bp->data, bp->lmfs_bytes); 1095 } 1096 } 1097 } 1098 1099 if(buf) 1100 free(buf); 1101 1102 if(!(buf = calloc(sizeof(buf[0]), new_nr_bufs))) 1103 panic("couldn't allocate buf list (%d)", new_nr_bufs); 1104 1105 if(buf_hash) 1106 free(buf_hash); 1107 if(!(buf_hash = calloc(sizeof(buf_hash[0]), new_nr_bufs))) 1108 panic("couldn't allocate buf hash list (%d)", new_nr_bufs); 1109 1110 nr_bufs = new_nr_bufs; 1111 1112 bufs_in_use = 0; 1113 front = &buf[0]; 1114 rear = &buf[nr_bufs - 1]; 1115 1116 for (bp = &buf[0]; bp < &buf[nr_bufs]; bp++) { 1117 bp->lmfs_blocknr = NO_BLOCK; 1118 bp->lmfs_dev = NO_DEV; 1119 bp->lmfs_next = bp + 1; 1120 bp->lmfs_prev = bp - 1; 1121 bp->data = NULL; 1122 bp->lmfs_bytes = 0; 1123 } 1124 front->lmfs_prev = NULL; 1125 rear->lmfs_next = NULL; 1126 1127 for (bp = &buf[0]; bp < &buf[nr_bufs]; bp++) bp->lmfs_hash = bp->lmfs_next; 1128 buf_hash[0] = front; 1129 } 1130 1131 int lmfs_bufs_in_use(void) 1132 { 1133 return bufs_in_use; 1134 } 1135 1136 int lmfs_nr_bufs(void) 1137 { 1138 return nr_bufs; 1139 } 1140 1141 void lmfs_flushall(void) 1142 { 1143 struct buf *bp; 1144 for(bp = &buf[0]; bp < &buf[nr_bufs]; bp++) 1145 if(bp->lmfs_dev != NO_DEV && !lmfs_isclean(bp)) 1146 lmfs_flushdev(bp->lmfs_dev); 1147 1148 /* This is the moment where it is least likely (although certainly not 1149 * impossible!) that there are buffers in use, since buffers should not 1150 * be held across file system syncs. See if we already intended to 1151 * resize the buffer cache, but couldn't. Be aware that we may be 1152 * called indirectly from within lmfs_change_blockusage(), so care must 1153 * be taken not to recurse infinitely. TODO: see if it is better to 1154 * resize the cache from here *only*, thus guaranteeing a clean cache. 1155 */ 1156 lmfs_change_blockusage(0); 1157 } 1158 1159 size_t lmfs_fs_block_size(void) 1160 { 1161 return fs_block_size; 1162 } 1163 1164 void lmfs_may_use_vmcache(int ok) 1165 { 1166 may_use_vmcache = ok; 1167 } 1168