1 /* 2 * Copyright 1998, 2000 Marshall Kirk McKusick. All Rights Reserved. 3 * 4 * The soft updates code is derived from the appendix of a University 5 * of Michigan technical report (Gregory R. Ganger and Yale N. Patt, 6 * "Soft Updates: A Solution to the Metadata Update Problem in File 7 * Systems", CSE-TR-254-95, August 1995). 8 * 9 * Further information about soft updates can be obtained from: 10 * 11 * Marshall Kirk McKusick http://www.mckusick.com/softdep/ 12 * 1614 Oxford Street mckusick@mckusick.com 13 * Berkeley, CA 94709-1608 +1-510-843-9542 14 * USA 15 * 16 * Redistribution and use in source and binary forms, with or without 17 * modification, are permitted provided that the following conditions 18 * are met: 19 * 20 * 1. Redistributions of source code must retain the above copyright 21 * notice, this list of conditions and the following disclaimer. 22 * 2. Redistributions in binary form must reproduce the above copyright 23 * notice, this list of conditions and the following disclaimer in the 24 * documentation and/or other materials provided with the distribution. 25 * 26 * THIS SOFTWARE IS PROVIDED BY MARSHALL KIRK MCKUSICK ``AS IS'' AND ANY 27 * EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED 28 * WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE 29 * DISCLAIMED. IN NO EVENT SHALL MARSHALL KIRK MCKUSICK BE LIABLE FOR 30 * ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL 31 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS 32 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) 33 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT 34 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY 35 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF 36 * SUCH DAMAGE. 37 * 38 * from: @(#)ffs_softdep.c 9.59 (McKusick) 6/21/00 39 * $FreeBSD: src/sys/ufs/ffs/ffs_softdep.c,v 1.57.2.11 2002/02/05 18:46:53 dillon Exp $ 40 */ 41 42 /* 43 * For now we want the safety net that the DIAGNOSTIC and DEBUG flags provide. 44 */ 45 #ifndef DIAGNOSTIC 46 #define DIAGNOSTIC 47 #endif 48 #ifndef DEBUG 49 #define DEBUG 50 #endif 51 52 #include <sys/param.h> 53 #include <sys/kernel.h> 54 #include <sys/systm.h> 55 #include <sys/buf.h> 56 #include <sys/malloc.h> 57 #include <sys/mount.h> 58 #include <sys/proc.h> 59 #include <sys/spinlock2.h> 60 #include <sys/syslog.h> 61 #include <sys/vnode.h> 62 #include <sys/conf.h> 63 #include <machine/inttypes.h> 64 #include "dir.h" 65 #include "quota.h" 66 #include "inode.h" 67 #include "ufsmount.h" 68 #include "fs.h" 69 #include "softdep.h" 70 #include "ffs_extern.h" 71 #include "ufs_extern.h" 72 73 #include <sys/buf2.h> 74 #include <sys/lock.h> 75 76 /* 77 * These definitions need to be adapted to the system to which 78 * this file is being ported. 79 */ 80 /* 81 * malloc types defined for the softdep system. 82 */ 83 MALLOC_DEFINE(M_PAGEDEP, "pagedep","File page dependencies"); 84 MALLOC_DEFINE(M_INODEDEP, "inodedep","Inode dependencies"); 85 MALLOC_DEFINE(M_NEWBLK, "newblk","New block allocation"); 86 MALLOC_DEFINE(M_BMSAFEMAP, "bmsafemap","Block or frag allocated from cyl group map"); 87 MALLOC_DEFINE(M_ALLOCDIRECT, "allocdirect","Block or frag dependency for an inode"); 88 MALLOC_DEFINE(M_INDIRDEP, "indirdep","Indirect block dependencies"); 89 MALLOC_DEFINE(M_ALLOCINDIR, "allocindir","Block dependency for an indirect block"); 90 MALLOC_DEFINE(M_FREEFRAG, "freefrag","Previously used frag for an inode"); 91 MALLOC_DEFINE(M_FREEBLKS, "freeblks","Blocks freed from an inode"); 92 MALLOC_DEFINE(M_FREEFILE, "freefile","Inode deallocated"); 93 MALLOC_DEFINE(M_DIRADD, "diradd","New directory entry"); 94 MALLOC_DEFINE(M_MKDIR, "mkdir","New directory"); 95 MALLOC_DEFINE(M_DIRREM, "dirrem","Directory entry deleted"); 96 97 #define M_SOFTDEP_FLAGS (M_WAITOK | M_USE_RESERVE) 98 99 #define D_PAGEDEP 0 100 #define D_INODEDEP 1 101 #define D_NEWBLK 2 102 #define D_BMSAFEMAP 3 103 #define D_ALLOCDIRECT 4 104 #define D_INDIRDEP 5 105 #define D_ALLOCINDIR 6 106 #define D_FREEFRAG 7 107 #define D_FREEBLKS 8 108 #define D_FREEFILE 9 109 #define D_DIRADD 10 110 #define D_MKDIR 11 111 #define D_DIRREM 12 112 #define D_LAST D_DIRREM 113 114 /* 115 * translate from workitem type to memory type 116 * MUST match the defines above, such that memtype[D_XXX] == M_XXX 117 */ 118 static struct malloc_type *memtype[] = { 119 M_PAGEDEP, 120 M_INODEDEP, 121 M_NEWBLK, 122 M_BMSAFEMAP, 123 M_ALLOCDIRECT, 124 M_INDIRDEP, 125 M_ALLOCINDIR, 126 M_FREEFRAG, 127 M_FREEBLKS, 128 M_FREEFILE, 129 M_DIRADD, 130 M_MKDIR, 131 M_DIRREM 132 }; 133 134 #define DtoM(type) (memtype[type]) 135 136 /* 137 * Names of malloc types. 138 */ 139 #define TYPENAME(type) \ 140 ((unsigned)(type) < D_LAST ? memtype[type]->ks_shortdesc : "???") 141 /* 142 * End system adaptaion definitions. 143 */ 144 145 /* 146 * Internal function prototypes. 147 */ 148 static void softdep_error(char *, int); 149 static void drain_output(struct vnode *, int); 150 static int getdirtybuf(struct buf **, int); 151 static void clear_remove(struct thread *); 152 static void clear_inodedeps(struct thread *); 153 static int flush_pagedep_deps(struct vnode *, struct mount *, 154 struct diraddhd *); 155 static int flush_inodedep_deps(struct fs *, ino_t); 156 static int handle_written_filepage(struct pagedep *, struct buf *); 157 static void diradd_inode_written(struct diradd *, struct inodedep *); 158 static int handle_written_inodeblock(struct inodedep *, struct buf *); 159 static void handle_allocdirect_partdone(struct allocdirect *); 160 static void handle_allocindir_partdone(struct allocindir *); 161 static void initiate_write_filepage(struct pagedep *, struct buf *); 162 static void handle_written_mkdir(struct mkdir *, int); 163 static void initiate_write_inodeblock(struct inodedep *, struct buf *); 164 static void handle_workitem_freefile(struct freefile *); 165 static void handle_workitem_remove(struct dirrem *); 166 static struct dirrem *newdirrem(struct buf *, struct inode *, 167 struct inode *, int, struct dirrem **); 168 static void free_diradd(struct diradd *); 169 static void free_allocindir(struct allocindir *, struct inodedep *); 170 static int indir_trunc (struct inode *, off_t, int, ufs_lbn_t, long *); 171 static void deallocate_dependencies(struct buf *, struct inodedep *); 172 static void free_allocdirect(struct allocdirectlst *, 173 struct allocdirect *, int); 174 static int check_inode_unwritten(struct inodedep *); 175 static int free_inodedep(struct inodedep *); 176 static void handle_workitem_freeblocks(struct freeblks *); 177 static void merge_inode_lists(struct inodedep *); 178 static void setup_allocindir_phase2(struct buf *, struct inode *, 179 struct allocindir *); 180 static struct allocindir *newallocindir(struct inode *, int, ufs_daddr_t, 181 ufs_daddr_t); 182 static void handle_workitem_freefrag(struct freefrag *); 183 static struct freefrag *newfreefrag(struct inode *, ufs_daddr_t, long); 184 static void allocdirect_merge(struct allocdirectlst *, 185 struct allocdirect *, struct allocdirect *); 186 static struct bmsafemap *bmsafemap_lookup(struct buf *); 187 static int newblk_lookup(struct fs *, ufs_daddr_t, int, 188 struct newblk **); 189 static int inodedep_lookup(struct fs *, ino_t, int, struct inodedep **); 190 static int pagedep_lookup(struct inode *, ufs_lbn_t, int, 191 struct pagedep **); 192 static int request_cleanup(int); 193 static int process_worklist_item(struct mount *, int); 194 static void add_to_worklist(struct worklist *); 195 196 /* 197 * Exported softdep operations. 198 */ 199 static void softdep_disk_io_initiation(struct buf *); 200 static void softdep_disk_write_complete(struct buf *); 201 static void softdep_deallocate_dependencies(struct buf *); 202 static int softdep_fsync(struct vnode *); 203 static int softdep_process_worklist(struct mount *); 204 static void softdep_move_dependencies(struct buf *, struct buf *); 205 static int softdep_count_dependencies(struct buf *bp, int); 206 static int softdep_checkread(struct buf *bp); 207 static int softdep_checkwrite(struct buf *bp); 208 209 static struct bio_ops softdep_bioops = { 210 .io_start = softdep_disk_io_initiation, 211 .io_complete = softdep_disk_write_complete, 212 .io_deallocate = softdep_deallocate_dependencies, 213 .io_fsync = softdep_fsync, 214 .io_sync = softdep_process_worklist, 215 .io_movedeps = softdep_move_dependencies, 216 .io_countdeps = softdep_count_dependencies, 217 .io_checkread = softdep_checkread, 218 .io_checkwrite = softdep_checkwrite 219 }; 220 221 /* 222 * Locking primitives. 223 */ 224 static void acquire_lock(struct lock *); 225 static void free_lock(struct lock *); 226 #ifdef INVARIANTS 227 static int lock_held(struct lock *); 228 #endif 229 230 static struct lock lk; 231 232 #define ACQUIRE_LOCK(lkp) acquire_lock(lkp) 233 #define FREE_LOCK(lkp) free_lock(lkp) 234 235 static void 236 acquire_lock(struct lock *lkp) 237 { 238 lockmgr(lkp, LK_EXCLUSIVE); 239 } 240 241 static void 242 free_lock(struct lock *lkp) 243 { 244 lockmgr(lkp, LK_RELEASE); 245 } 246 247 #ifdef INVARIANTS 248 static int 249 lock_held(struct lock *lkp) 250 { 251 return lockinuse(lkp); 252 } 253 #endif 254 255 /* 256 * Place holder for real semaphores. 257 */ 258 struct sema { 259 int value; 260 thread_t holder; 261 char *name; 262 int timo; 263 struct spinlock spin; 264 }; 265 static void sema_init(struct sema *, char *, int); 266 static int sema_get(struct sema *, struct lock *); 267 static void sema_release(struct sema *, struct lock *); 268 269 #define NOHOLDER ((struct thread *) -1) 270 271 static void 272 sema_init(struct sema *semap, char *name, int timo) 273 { 274 semap->holder = NOHOLDER; 275 semap->value = 0; 276 semap->name = name; 277 semap->timo = timo; 278 spin_init(&semap->spin, "ufssema"); 279 } 280 281 /* 282 * Obtain exclusive access, semaphore is protected by the interlock. 283 * If interlock is NULL we must protect the semaphore ourselves. 284 */ 285 static int 286 sema_get(struct sema *semap, struct lock *interlock) 287 { 288 int rv; 289 290 if (interlock) { 291 if (semap->value > 0) { 292 ++semap->value; /* serves as wakeup flag */ 293 lksleep(semap, interlock, 0, 294 semap->name, semap->timo); 295 rv = 0; 296 } else { 297 semap->value = 1; /* serves as owned flag */ 298 semap->holder = curthread; 299 rv = 1; 300 } 301 } else { 302 spin_lock(&semap->spin); 303 if (semap->value > 0) { 304 ++semap->value; /* serves as wakeup flag */ 305 ssleep(semap, &semap->spin, 0, 306 semap->name, semap->timo); 307 spin_unlock(&semap->spin); 308 rv = 0; 309 } else { 310 semap->value = 1; /* serves as owned flag */ 311 semap->holder = curthread; 312 spin_unlock(&semap->spin); 313 rv = 1; 314 } 315 } 316 return (rv); 317 } 318 319 static void 320 sema_release(struct sema *semap, struct lock *lk) 321 { 322 if (semap->value <= 0 || semap->holder != curthread) 323 panic("sema_release: not held"); 324 if (lk) { 325 semap->holder = NOHOLDER; 326 if (--semap->value > 0) { 327 semap->value = 0; 328 wakeup(semap); 329 } 330 } else { 331 spin_lock(&semap->spin); 332 semap->holder = NOHOLDER; 333 if (--semap->value > 0) { 334 semap->value = 0; 335 spin_unlock(&semap->spin); 336 wakeup(semap); 337 } else { 338 spin_unlock(&semap->spin); 339 } 340 } 341 } 342 343 /* 344 * Worklist queue management. 345 * These routines require that the lock be held. 346 */ 347 static void worklist_insert(struct workhead *, struct worklist *); 348 static void worklist_remove(struct worklist *); 349 static void workitem_free(struct worklist *, int); 350 351 #define WORKLIST_INSERT_BP(bp, item) do { \ 352 (bp)->b_ops = &softdep_bioops; \ 353 worklist_insert(&(bp)->b_dep, item); \ 354 } while (0) 355 356 #define WORKLIST_INSERT(head, item) worklist_insert(head, item) 357 #define WORKLIST_REMOVE(item) worklist_remove(item) 358 #define WORKITEM_FREE(item, type) workitem_free((struct worklist *)item, type) 359 360 static void 361 worklist_insert(struct workhead *head, struct worklist *item) 362 { 363 KKASSERT(lock_held(&lk)); 364 365 if (item->wk_state & ONWORKLIST) { 366 panic("worklist_insert: already on list"); 367 } 368 item->wk_state |= ONWORKLIST; 369 LIST_INSERT_HEAD(head, item, wk_list); 370 } 371 372 static void 373 worklist_remove(struct worklist *item) 374 { 375 376 KKASSERT(lock_held(&lk)); 377 if ((item->wk_state & ONWORKLIST) == 0) 378 panic("worklist_remove: not on list"); 379 380 item->wk_state &= ~ONWORKLIST; 381 LIST_REMOVE(item, wk_list); 382 } 383 384 static void 385 workitem_free(struct worklist *item, int type) 386 { 387 388 if (item->wk_state & ONWORKLIST) 389 panic("workitem_free: still on list"); 390 if (item->wk_type != type) 391 panic("workitem_free: type mismatch"); 392 393 kfree(item, DtoM(type)); 394 } 395 396 /* 397 * Workitem queue management 398 */ 399 static struct workhead softdep_workitem_pending; 400 static int num_on_worklist; /* number of worklist items to be processed */ 401 static int softdep_worklist_busy; /* 1 => trying to do unmount */ 402 static int softdep_worklist_req; /* serialized waiters */ 403 static int max_softdeps; /* maximum number of structs before slowdown */ 404 static int tickdelay = 2; /* number of ticks to pause during slowdown */ 405 static int *stat_countp; /* statistic to count in proc_waiting timeout */ 406 static int proc_waiting; /* tracks whether we have a timeout posted */ 407 static struct thread *filesys_syncer; /* proc of filesystem syncer process */ 408 static int req_clear_inodedeps; /* syncer process flush some inodedeps */ 409 #define FLUSH_INODES 1 410 static int req_clear_remove; /* syncer process flush some freeblks */ 411 #define FLUSH_REMOVE 2 412 /* 413 * runtime statistics 414 */ 415 static int stat_worklist_push; /* number of worklist cleanups */ 416 static int stat_blk_limit_push; /* number of times block limit neared */ 417 static int stat_ino_limit_push; /* number of times inode limit neared */ 418 static int stat_blk_limit_hit; /* number of times block slowdown imposed */ 419 static int stat_ino_limit_hit; /* number of times inode slowdown imposed */ 420 static int stat_sync_limit_hit; /* number of synchronous slowdowns imposed */ 421 static int stat_indir_blk_ptrs; /* bufs redirtied as indir ptrs not written */ 422 static int stat_inode_bitmap; /* bufs redirtied as inode bitmap not written */ 423 static int stat_direct_blk_ptrs;/* bufs redirtied as direct ptrs not written */ 424 static int stat_dir_entry; /* bufs redirtied as dir entry cannot write */ 425 #ifdef DEBUG 426 #include <vm/vm.h> 427 #include <sys/sysctl.h> 428 SYSCTL_INT(_debug, OID_AUTO, max_softdeps, CTLFLAG_RW, &max_softdeps, 0, 429 "Maximum soft dependencies before slowdown occurs"); 430 SYSCTL_INT(_debug, OID_AUTO, tickdelay, CTLFLAG_RW, &tickdelay, 0, 431 "Ticks to delay before allocating during slowdown"); 432 SYSCTL_INT(_debug, OID_AUTO, worklist_push, CTLFLAG_RW, &stat_worklist_push, 0, 433 "Number of worklist cleanups"); 434 SYSCTL_INT(_debug, OID_AUTO, blk_limit_push, CTLFLAG_RW, &stat_blk_limit_push, 0, 435 "Number of times block limit neared"); 436 SYSCTL_INT(_debug, OID_AUTO, ino_limit_push, CTLFLAG_RW, &stat_ino_limit_push, 0, 437 "Number of times inode limit neared"); 438 SYSCTL_INT(_debug, OID_AUTO, blk_limit_hit, CTLFLAG_RW, &stat_blk_limit_hit, 0, 439 "Number of times block slowdown imposed"); 440 SYSCTL_INT(_debug, OID_AUTO, ino_limit_hit, CTLFLAG_RW, &stat_ino_limit_hit, 0, 441 "Number of times inode slowdown imposed "); 442 SYSCTL_INT(_debug, OID_AUTO, sync_limit_hit, CTLFLAG_RW, &stat_sync_limit_hit, 0, 443 "Number of synchronous slowdowns imposed"); 444 SYSCTL_INT(_debug, OID_AUTO, indir_blk_ptrs, CTLFLAG_RW, &stat_indir_blk_ptrs, 0, 445 "Bufs redirtied as indir ptrs not written"); 446 SYSCTL_INT(_debug, OID_AUTO, inode_bitmap, CTLFLAG_RW, &stat_inode_bitmap, 0, 447 "Bufs redirtied as inode bitmap not written"); 448 SYSCTL_INT(_debug, OID_AUTO, direct_blk_ptrs, CTLFLAG_RW, &stat_direct_blk_ptrs, 0, 449 "Bufs redirtied as direct ptrs not written"); 450 SYSCTL_INT(_debug, OID_AUTO, dir_entry, CTLFLAG_RW, &stat_dir_entry, 0, 451 "Bufs redirtied as dir entry cannot write"); 452 #endif /* DEBUG */ 453 454 /* 455 * Add an item to the end of the work queue. 456 * This routine requires that the lock be held. 457 * This is the only routine that adds items to the list. 458 * The following routine is the only one that removes items 459 * and does so in order from first to last. 460 */ 461 static void 462 add_to_worklist(struct worklist *wk) 463 { 464 static struct worklist *worklist_tail; 465 466 if (wk->wk_state & ONWORKLIST) { 467 panic("add_to_worklist: already on list"); 468 } 469 wk->wk_state |= ONWORKLIST; 470 if (LIST_FIRST(&softdep_workitem_pending) == NULL) 471 LIST_INSERT_HEAD(&softdep_workitem_pending, wk, wk_list); 472 else 473 LIST_INSERT_AFTER(worklist_tail, wk, wk_list); 474 worklist_tail = wk; 475 num_on_worklist += 1; 476 } 477 478 /* 479 * Process that runs once per second to handle items in the background queue. 480 * 481 * Note that we ensure that everything is done in the order in which they 482 * appear in the queue. The code below depends on this property to ensure 483 * that blocks of a file are freed before the inode itself is freed. This 484 * ordering ensures that no new <vfsid, inum, lbn> triples will be generated 485 * until all the old ones have been purged from the dependency lists. 486 * 487 * bioops callback - hold io_token 488 */ 489 static int 490 softdep_process_worklist(struct mount *matchmnt) 491 { 492 thread_t td = curthread; 493 int matchcnt, loopcount; 494 int starttime; 495 496 ACQUIRE_LOCK(&lk); 497 498 /* 499 * Record the process identifier of our caller so that we can give 500 * this process preferential treatment in request_cleanup below. 501 */ 502 filesys_syncer = td; 503 matchcnt = 0; 504 505 /* 506 * There is no danger of having multiple processes run this 507 * code, but we have to single-thread it when softdep_flushfiles() 508 * is in operation to get an accurate count of the number of items 509 * related to its mount point that are in the list. 510 */ 511 if (matchmnt == NULL) { 512 if (softdep_worklist_busy < 0) { 513 matchcnt = -1; 514 goto done; 515 } 516 softdep_worklist_busy += 1; 517 } 518 519 /* 520 * If requested, try removing inode or removal dependencies. 521 */ 522 if (req_clear_inodedeps) { 523 clear_inodedeps(td); 524 req_clear_inodedeps -= 1; 525 wakeup_one(&proc_waiting); 526 } 527 if (req_clear_remove) { 528 clear_remove(td); 529 req_clear_remove -= 1; 530 wakeup_one(&proc_waiting); 531 } 532 loopcount = 1; 533 starttime = ticks; 534 while (num_on_worklist > 0) { 535 matchcnt += process_worklist_item(matchmnt, 0); 536 537 /* 538 * If a umount operation wants to run the worklist 539 * accurately, abort. 540 */ 541 if (softdep_worklist_req && matchmnt == NULL) { 542 matchcnt = -1; 543 break; 544 } 545 546 /* 547 * If requested, try removing inode or removal dependencies. 548 */ 549 if (req_clear_inodedeps) { 550 clear_inodedeps(td); 551 req_clear_inodedeps -= 1; 552 wakeup_one(&proc_waiting); 553 } 554 if (req_clear_remove) { 555 clear_remove(td); 556 req_clear_remove -= 1; 557 wakeup_one(&proc_waiting); 558 } 559 /* 560 * We do not generally want to stop for buffer space, but if 561 * we are really being a buffer hog, we will stop and wait. 562 */ 563 if (loopcount++ % 128 == 0) { 564 FREE_LOCK(&lk); 565 bwillinode(1); 566 ACQUIRE_LOCK(&lk); 567 } 568 569 /* 570 * Never allow processing to run for more than one 571 * second. Otherwise the other syncer tasks may get 572 * excessively backlogged. 573 * 574 * Use ticks to avoid boundary condition w/time_second or 575 * time_uptime. 576 */ 577 if ((ticks - starttime) > hz && matchmnt == NULL) { 578 matchcnt = -1; 579 break; 580 } 581 } 582 if (matchmnt == NULL) { 583 --softdep_worklist_busy; 584 if (softdep_worklist_req && softdep_worklist_busy == 0) 585 wakeup(&softdep_worklist_req); 586 } 587 done: 588 FREE_LOCK(&lk); 589 return (matchcnt); 590 } 591 592 /* 593 * Process one item on the worklist. 594 */ 595 static int 596 process_worklist_item(struct mount *matchmnt, int flags) 597 { 598 struct ufsmount *ump; 599 struct worklist *wk; 600 struct dirrem *dirrem; 601 struct fs *matchfs; 602 struct vnode *vp; 603 int matchcnt = 0; 604 605 KKASSERT(lock_held(&lk)); 606 607 matchfs = NULL; 608 if (matchmnt != NULL) 609 matchfs = VFSTOUFS(matchmnt)->um_fs; 610 611 /* 612 * Normally we just process each item on the worklist in order. 613 * However, if we are in a situation where we cannot lock any 614 * inodes, we have to skip over any dirrem requests whose 615 * vnodes are resident and locked. 616 */ 617 LIST_FOREACH(wk, &softdep_workitem_pending, wk_list) { 618 if ((flags & LK_NOWAIT) == 0 || wk->wk_type != D_DIRREM) 619 break; 620 dirrem = WK_DIRREM(wk); 621 ump = VFSTOUFS(dirrem->dm_mnt); 622 lwkt_gettoken(&ump->um_mountp->mnt_token); 623 vp = ufs_ihashlookup(ump, ump->um_dev, dirrem->dm_oldinum); 624 lwkt_reltoken(&ump->um_mountp->mnt_token); 625 if (vp == NULL || !vn_islocked(vp)) 626 break; 627 } 628 if (wk == NULL) { 629 return (0); 630 } 631 WORKLIST_REMOVE(wk); 632 num_on_worklist -= 1; 633 FREE_LOCK(&lk); 634 switch (wk->wk_type) { 635 case D_DIRREM: 636 /* removal of a directory entry */ 637 if (WK_DIRREM(wk)->dm_mnt == matchmnt) 638 matchcnt += 1; 639 handle_workitem_remove(WK_DIRREM(wk)); 640 break; 641 642 case D_FREEBLKS: 643 /* releasing blocks and/or fragments from a file */ 644 if (WK_FREEBLKS(wk)->fb_fs == matchfs) 645 matchcnt += 1; 646 handle_workitem_freeblocks(WK_FREEBLKS(wk)); 647 break; 648 649 case D_FREEFRAG: 650 /* releasing a fragment when replaced as a file grows */ 651 if (WK_FREEFRAG(wk)->ff_fs == matchfs) 652 matchcnt += 1; 653 handle_workitem_freefrag(WK_FREEFRAG(wk)); 654 break; 655 656 case D_FREEFILE: 657 /* releasing an inode when its link count drops to 0 */ 658 if (WK_FREEFILE(wk)->fx_fs == matchfs) 659 matchcnt += 1; 660 handle_workitem_freefile(WK_FREEFILE(wk)); 661 break; 662 663 default: 664 panic("%s_process_worklist: Unknown type %s", 665 "softdep", TYPENAME(wk->wk_type)); 666 /* NOTREACHED */ 667 } 668 ACQUIRE_LOCK(&lk); 669 return (matchcnt); 670 } 671 672 /* 673 * Move dependencies from one buffer to another. 674 * 675 * bioops callback - hold io_token 676 */ 677 static void 678 softdep_move_dependencies(struct buf *oldbp, struct buf *newbp) 679 { 680 struct worklist *wk, *wktail; 681 682 if (LIST_FIRST(&newbp->b_dep) != NULL) 683 panic("softdep_move_dependencies: need merge code"); 684 wktail = NULL; 685 ACQUIRE_LOCK(&lk); 686 while ((wk = LIST_FIRST(&oldbp->b_dep)) != NULL) { 687 LIST_REMOVE(wk, wk_list); 688 if (wktail == NULL) 689 LIST_INSERT_HEAD(&newbp->b_dep, wk, wk_list); 690 else 691 LIST_INSERT_AFTER(wktail, wk, wk_list); 692 wktail = wk; 693 newbp->b_ops = &softdep_bioops; 694 } 695 FREE_LOCK(&lk); 696 } 697 698 /* 699 * Purge the work list of all items associated with a particular mount point. 700 */ 701 int 702 softdep_flushfiles(struct mount *oldmnt, int flags) 703 { 704 struct vnode *devvp; 705 int error, loopcnt; 706 707 /* 708 * Await our turn to clear out the queue, then serialize access. 709 */ 710 ACQUIRE_LOCK(&lk); 711 while (softdep_worklist_busy != 0) { 712 softdep_worklist_req += 1; 713 lksleep(&softdep_worklist_req, &lk, 0, "softflush", 0); 714 softdep_worklist_req -= 1; 715 } 716 softdep_worklist_busy = -1; 717 FREE_LOCK(&lk); 718 719 if ((error = ffs_flushfiles(oldmnt, flags)) != 0) { 720 softdep_worklist_busy = 0; 721 if (softdep_worklist_req) 722 wakeup(&softdep_worklist_req); 723 return (error); 724 } 725 /* 726 * Alternately flush the block device associated with the mount 727 * point and process any dependencies that the flushing 728 * creates. In theory, this loop can happen at most twice, 729 * but we give it a few extra just to be sure. 730 */ 731 devvp = VFSTOUFS(oldmnt)->um_devvp; 732 for (loopcnt = 10; loopcnt > 0; ) { 733 if (softdep_process_worklist(oldmnt) == 0) { 734 loopcnt--; 735 /* 736 * Do another flush in case any vnodes were brought in 737 * as part of the cleanup operations. 738 */ 739 if ((error = ffs_flushfiles(oldmnt, flags)) != 0) 740 break; 741 /* 742 * If we still found nothing to do, we are really done. 743 */ 744 if (softdep_process_worklist(oldmnt) == 0) 745 break; 746 } 747 vn_lock(devvp, LK_EXCLUSIVE | LK_RETRY); 748 error = VOP_FSYNC(devvp, MNT_WAIT, 0); 749 vn_unlock(devvp); 750 if (error) 751 break; 752 } 753 ACQUIRE_LOCK(&lk); 754 softdep_worklist_busy = 0; 755 if (softdep_worklist_req) 756 wakeup(&softdep_worklist_req); 757 FREE_LOCK(&lk); 758 759 /* 760 * If we are unmounting then it is an error to fail. If we 761 * are simply trying to downgrade to read-only, then filesystem 762 * activity can keep us busy forever, so we just fail with EBUSY. 763 */ 764 if (loopcnt == 0) { 765 if (oldmnt->mnt_kern_flag & MNTK_UNMOUNT) 766 panic("softdep_flushfiles: looping"); 767 error = EBUSY; 768 } 769 return (error); 770 } 771 772 /* 773 * Structure hashing. 774 * 775 * There are three types of structures that can be looked up: 776 * 1) pagedep structures identified by mount point, inode number, 777 * and logical block. 778 * 2) inodedep structures identified by mount point and inode number. 779 * 3) newblk structures identified by mount point and 780 * physical block number. 781 * 782 * The "pagedep" and "inodedep" dependency structures are hashed 783 * separately from the file blocks and inodes to which they correspond. 784 * This separation helps when the in-memory copy of an inode or 785 * file block must be replaced. It also obviates the need to access 786 * an inode or file page when simply updating (or de-allocating) 787 * dependency structures. Lookup of newblk structures is needed to 788 * find newly allocated blocks when trying to associate them with 789 * their allocdirect or allocindir structure. 790 * 791 * The lookup routines optionally create and hash a new instance when 792 * an existing entry is not found. 793 */ 794 #define DEPALLOC 0x0001 /* allocate structure if lookup fails */ 795 #define NODELAY 0x0002 /* cannot do background work */ 796 797 /* 798 * Structures and routines associated with pagedep caching. 799 */ 800 LIST_HEAD(pagedep_hashhead, pagedep) *pagedep_hashtbl; 801 u_long pagedep_hash; /* size of hash table - 1 */ 802 #define PAGEDEP_HASH(mp, inum, lbn) \ 803 (&pagedep_hashtbl[((((register_t)(mp)) >> 13) + (inum) + (lbn)) & \ 804 pagedep_hash]) 805 static struct sema pagedep_in_progress; 806 807 /* 808 * Helper routine for pagedep_lookup() 809 */ 810 static __inline 811 struct pagedep * 812 pagedep_find(struct pagedep_hashhead *pagedephd, ino_t ino, ufs_lbn_t lbn, 813 struct mount *mp) 814 { 815 struct pagedep *pagedep; 816 817 LIST_FOREACH(pagedep, pagedephd, pd_hash) { 818 if (ino == pagedep->pd_ino && 819 lbn == pagedep->pd_lbn && 820 mp == pagedep->pd_mnt) { 821 return (pagedep); 822 } 823 } 824 return(NULL); 825 } 826 827 /* 828 * Look up a pagedep. Return 1 if found, 0 if not found. 829 * If not found, allocate if DEPALLOC flag is passed. 830 * Found or allocated entry is returned in pagedeppp. 831 * This routine must be called with splbio interrupts blocked. 832 */ 833 static int 834 pagedep_lookup(struct inode *ip, ufs_lbn_t lbn, int flags, 835 struct pagedep **pagedeppp) 836 { 837 struct pagedep *pagedep; 838 struct pagedep_hashhead *pagedephd; 839 struct mount *mp; 840 int i; 841 842 KKASSERT(lock_held(&lk)); 843 844 mp = ITOV(ip)->v_mount; 845 pagedephd = PAGEDEP_HASH(mp, ip->i_number, lbn); 846 top: 847 *pagedeppp = pagedep_find(pagedephd, ip->i_number, lbn, mp); 848 if (*pagedeppp) 849 return(1); 850 if ((flags & DEPALLOC) == 0) 851 return (0); 852 if (sema_get(&pagedep_in_progress, &lk) == 0) 853 goto top; 854 855 FREE_LOCK(&lk); 856 pagedep = kmalloc(sizeof(struct pagedep), M_PAGEDEP, 857 M_SOFTDEP_FLAGS | M_ZERO); 858 ACQUIRE_LOCK(&lk); 859 if (pagedep_find(pagedephd, ip->i_number, lbn, mp)) { 860 kprintf("pagedep_lookup: blocking race avoided\n"); 861 sema_release(&pagedep_in_progress, &lk); 862 kfree(pagedep, M_PAGEDEP); 863 goto top; 864 } 865 866 pagedep->pd_list.wk_type = D_PAGEDEP; 867 pagedep->pd_mnt = mp; 868 pagedep->pd_ino = ip->i_number; 869 pagedep->pd_lbn = lbn; 870 LIST_INIT(&pagedep->pd_dirremhd); 871 LIST_INIT(&pagedep->pd_pendinghd); 872 for (i = 0; i < DAHASHSZ; i++) 873 LIST_INIT(&pagedep->pd_diraddhd[i]); 874 LIST_INSERT_HEAD(pagedephd, pagedep, pd_hash); 875 sema_release(&pagedep_in_progress, &lk); 876 *pagedeppp = pagedep; 877 return (0); 878 } 879 880 /* 881 * Structures and routines associated with inodedep caching. 882 */ 883 LIST_HEAD(inodedep_hashhead, inodedep) *inodedep_hashtbl; 884 static u_long inodedep_hash; /* size of hash table - 1 */ 885 static long num_inodedep; /* number of inodedep allocated */ 886 #define INODEDEP_HASH(fs, inum) \ 887 (&inodedep_hashtbl[((((register_t)(fs)) >> 13) + (inum)) & inodedep_hash]) 888 static struct sema inodedep_in_progress; 889 890 /* 891 * Helper routine for inodedep_lookup() 892 */ 893 static __inline 894 struct inodedep * 895 inodedep_find(struct inodedep_hashhead *inodedephd, struct fs *fs, ino_t inum) 896 { 897 struct inodedep *inodedep; 898 899 LIST_FOREACH(inodedep, inodedephd, id_hash) { 900 if (inum == inodedep->id_ino && fs == inodedep->id_fs) 901 return(inodedep); 902 } 903 return (NULL); 904 } 905 906 /* 907 * Look up a inodedep. Return 1 if found, 0 if not found. 908 * If not found, allocate if DEPALLOC flag is passed. 909 * Found or allocated entry is returned in inodedeppp. 910 * This routine must be called with splbio interrupts blocked. 911 */ 912 static int 913 inodedep_lookup(struct fs *fs, ino_t inum, int flags, 914 struct inodedep **inodedeppp) 915 { 916 struct inodedep *inodedep; 917 struct inodedep_hashhead *inodedephd; 918 919 KKASSERT(lock_held(&lk)); 920 921 inodedephd = INODEDEP_HASH(fs, inum); 922 top: 923 *inodedeppp = inodedep_find(inodedephd, fs, inum); 924 if (*inodedeppp) 925 return (1); 926 if ((flags & DEPALLOC) == 0) 927 return (0); 928 929 /* 930 * If we are over our limit, try to improve the situation. 931 */ 932 if (num_inodedep > max_softdeps / 2) 933 speedup_syncer(NULL); 934 if (num_inodedep > max_softdeps && 935 (flags & NODELAY) == 0 && 936 request_cleanup(FLUSH_INODES)) { 937 goto top; 938 } 939 if (sema_get(&inodedep_in_progress, &lk) == 0) 940 goto top; 941 942 FREE_LOCK(&lk); 943 inodedep = kmalloc(sizeof(struct inodedep), M_INODEDEP, 944 M_SOFTDEP_FLAGS | M_ZERO); 945 ACQUIRE_LOCK(&lk); 946 if (inodedep_find(inodedephd, fs, inum)) { 947 kprintf("inodedep_lookup: blocking race avoided\n"); 948 sema_release(&inodedep_in_progress, &lk); 949 kfree(inodedep, M_INODEDEP); 950 goto top; 951 } 952 inodedep->id_list.wk_type = D_INODEDEP; 953 inodedep->id_fs = fs; 954 inodedep->id_ino = inum; 955 inodedep->id_state = ALLCOMPLETE; 956 inodedep->id_nlinkdelta = 0; 957 inodedep->id_savedino = NULL; 958 inodedep->id_savedsize = -1; 959 inodedep->id_buf = NULL; 960 LIST_INIT(&inodedep->id_pendinghd); 961 LIST_INIT(&inodedep->id_inowait); 962 LIST_INIT(&inodedep->id_bufwait); 963 TAILQ_INIT(&inodedep->id_inoupdt); 964 TAILQ_INIT(&inodedep->id_newinoupdt); 965 num_inodedep += 1; 966 LIST_INSERT_HEAD(inodedephd, inodedep, id_hash); 967 sema_release(&inodedep_in_progress, &lk); 968 *inodedeppp = inodedep; 969 return (0); 970 } 971 972 /* 973 * Structures and routines associated with newblk caching. 974 */ 975 LIST_HEAD(newblk_hashhead, newblk) *newblk_hashtbl; 976 u_long newblk_hash; /* size of hash table - 1 */ 977 #define NEWBLK_HASH(fs, inum) \ 978 (&newblk_hashtbl[((((register_t)(fs)) >> 13) + (inum)) & newblk_hash]) 979 static struct sema newblk_in_progress; 980 981 /* 982 * Helper routine for newblk_lookup() 983 */ 984 static __inline 985 struct newblk * 986 newblk_find(struct newblk_hashhead *newblkhd, struct fs *fs, 987 ufs_daddr_t newblkno) 988 { 989 struct newblk *newblk; 990 991 LIST_FOREACH(newblk, newblkhd, nb_hash) { 992 if (newblkno == newblk->nb_newblkno && fs == newblk->nb_fs) 993 return (newblk); 994 } 995 return(NULL); 996 } 997 998 /* 999 * Look up a newblk. Return 1 if found, 0 if not found. 1000 * If not found, allocate if DEPALLOC flag is passed. 1001 * Found or allocated entry is returned in newblkpp. 1002 */ 1003 static int 1004 newblk_lookup(struct fs *fs, ufs_daddr_t newblkno, int flags, 1005 struct newblk **newblkpp) 1006 { 1007 struct newblk *newblk; 1008 struct newblk_hashhead *newblkhd; 1009 1010 newblkhd = NEWBLK_HASH(fs, newblkno); 1011 top: 1012 *newblkpp = newblk_find(newblkhd, fs, newblkno); 1013 if (*newblkpp) 1014 return(1); 1015 if ((flags & DEPALLOC) == 0) 1016 return (0); 1017 if (sema_get(&newblk_in_progress, NULL) == 0) 1018 goto top; 1019 1020 newblk = kmalloc(sizeof(struct newblk), M_NEWBLK, 1021 M_SOFTDEP_FLAGS | M_ZERO); 1022 1023 if (newblk_find(newblkhd, fs, newblkno)) { 1024 kprintf("newblk_lookup: blocking race avoided\n"); 1025 sema_release(&pagedep_in_progress, NULL); 1026 kfree(newblk, M_NEWBLK); 1027 goto top; 1028 } 1029 newblk->nb_state = 0; 1030 newblk->nb_fs = fs; 1031 newblk->nb_newblkno = newblkno; 1032 LIST_INSERT_HEAD(newblkhd, newblk, nb_hash); 1033 sema_release(&newblk_in_progress, NULL); 1034 *newblkpp = newblk; 1035 return (0); 1036 } 1037 1038 /* 1039 * Executed during filesystem system initialization before 1040 * mounting any filesystems. 1041 */ 1042 void 1043 softdep_initialize(void) 1044 { 1045 size_t idsize = sizeof(struct inodedep); 1046 int hsize = vfs_inodehashsize(); 1047 1048 LIST_INIT(&mkdirlisthd); 1049 LIST_INIT(&softdep_workitem_pending); 1050 max_softdeps = min(maxvnodes * 8, M_INODEDEP->ks_limit / (2 * idsize)); 1051 1052 /* 1053 * Cap it at 100,000, having more just gets kinda silly. 1054 */ 1055 max_softdeps = min(max_softdeps, 100000); 1056 1057 pagedep_hashtbl = hashinit(hsize / 4, M_PAGEDEP, &pagedep_hash); 1058 lockinit(&lk, "ffs_softdep", 0, LK_CANRECURSE); 1059 sema_init(&pagedep_in_progress, "pagedep", 0); 1060 inodedep_hashtbl = hashinit(hsize, M_INODEDEP, &inodedep_hash); 1061 sema_init(&inodedep_in_progress, "inodedep", 0); 1062 newblk_hashtbl = hashinit(64, M_NEWBLK, &newblk_hash); 1063 sema_init(&newblk_in_progress, "newblk", 0); 1064 add_bio_ops(&softdep_bioops); 1065 } 1066 1067 /* 1068 * Called at mount time to notify the dependency code that a 1069 * filesystem wishes to use it. 1070 */ 1071 int 1072 softdep_mount(struct vnode *devvp, struct mount *mp, struct fs *fs) 1073 { 1074 struct csum cstotal; 1075 struct cg *cgp; 1076 struct buf *bp; 1077 int error, cyl; 1078 1079 mp->mnt_flag &= ~MNT_ASYNC; 1080 mp->mnt_flag |= MNT_SOFTDEP; 1081 mp->mnt_bioops = &softdep_bioops; 1082 /* 1083 * When doing soft updates, the counters in the 1084 * superblock may have gotten out of sync, so we have 1085 * to scan the cylinder groups and recalculate them. 1086 */ 1087 if (fs->fs_clean != 0) 1088 return (0); 1089 bzero(&cstotal, sizeof cstotal); 1090 for (cyl = 0; cyl < fs->fs_ncg; cyl++) { 1091 if ((error = bread(devvp, fsbtodoff(fs, cgtod(fs, cyl)), 1092 fs->fs_cgsize, &bp)) != 0) { 1093 brelse(bp); 1094 return (error); 1095 } 1096 cgp = (struct cg *)bp->b_data; 1097 cstotal.cs_nffree += cgp->cg_cs.cs_nffree; 1098 cstotal.cs_nbfree += cgp->cg_cs.cs_nbfree; 1099 cstotal.cs_nifree += cgp->cg_cs.cs_nifree; 1100 cstotal.cs_ndir += cgp->cg_cs.cs_ndir; 1101 fs->fs_cs(fs, cyl) = cgp->cg_cs; 1102 brelse(bp); 1103 } 1104 #ifdef DEBUG 1105 if (bcmp(&cstotal, &fs->fs_cstotal, sizeof cstotal)) 1106 kprintf("ffs_mountfs: superblock updated for soft updates\n"); 1107 #endif 1108 bcopy(&cstotal, &fs->fs_cstotal, sizeof cstotal); 1109 return (0); 1110 } 1111 1112 /* 1113 * Protecting the freemaps (or bitmaps). 1114 * 1115 * To eliminate the need to execute fsck before mounting a filesystem 1116 * after a power failure, one must (conservatively) guarantee that the 1117 * on-disk copy of the bitmaps never indicate that a live inode or block is 1118 * free. So, when a block or inode is allocated, the bitmap should be 1119 * updated (on disk) before any new pointers. When a block or inode is 1120 * freed, the bitmap should not be updated until all pointers have been 1121 * reset. The latter dependency is handled by the delayed de-allocation 1122 * approach described below for block and inode de-allocation. The former 1123 * dependency is handled by calling the following procedure when a block or 1124 * inode is allocated. When an inode is allocated an "inodedep" is created 1125 * with its DEPCOMPLETE flag cleared until its bitmap is written to disk. 1126 * Each "inodedep" is also inserted into the hash indexing structure so 1127 * that any additional link additions can be made dependent on the inode 1128 * allocation. 1129 * 1130 * The ufs filesystem maintains a number of free block counts (e.g., per 1131 * cylinder group, per cylinder and per <cylinder, rotational position> pair) 1132 * in addition to the bitmaps. These counts are used to improve efficiency 1133 * during allocation and therefore must be consistent with the bitmaps. 1134 * There is no convenient way to guarantee post-crash consistency of these 1135 * counts with simple update ordering, for two main reasons: (1) The counts 1136 * and bitmaps for a single cylinder group block are not in the same disk 1137 * sector. If a disk write is interrupted (e.g., by power failure), one may 1138 * be written and the other not. (2) Some of the counts are located in the 1139 * superblock rather than the cylinder group block. So, we focus our soft 1140 * updates implementation on protecting the bitmaps. When mounting a 1141 * filesystem, we recompute the auxiliary counts from the bitmaps. 1142 */ 1143 1144 /* 1145 * Called just after updating the cylinder group block to allocate an inode. 1146 * 1147 * Parameters: 1148 * bp: buffer for cylgroup block with inode map 1149 * ip: inode related to allocation 1150 * newinum: new inode number being allocated 1151 */ 1152 void 1153 softdep_setup_inomapdep(struct buf *bp, struct inode *ip, ino_t newinum) 1154 { 1155 struct inodedep *inodedep; 1156 struct bmsafemap *bmsafemap; 1157 1158 /* 1159 * Create a dependency for the newly allocated inode. 1160 * Panic if it already exists as something is seriously wrong. 1161 * Otherwise add it to the dependency list for the buffer holding 1162 * the cylinder group map from which it was allocated. 1163 */ 1164 ACQUIRE_LOCK(&lk); 1165 if ((inodedep_lookup(ip->i_fs, newinum, DEPALLOC|NODELAY, &inodedep))) { 1166 panic("softdep_setup_inomapdep: found inode"); 1167 } 1168 inodedep->id_buf = bp; 1169 inodedep->id_state &= ~DEPCOMPLETE; 1170 bmsafemap = bmsafemap_lookup(bp); 1171 LIST_INSERT_HEAD(&bmsafemap->sm_inodedephd, inodedep, id_deps); 1172 FREE_LOCK(&lk); 1173 } 1174 1175 /* 1176 * Called just after updating the cylinder group block to 1177 * allocate block or fragment. 1178 * 1179 * Parameters: 1180 * bp: buffer for cylgroup block with block map 1181 * fs: filesystem doing allocation 1182 * newblkno: number of newly allocated block 1183 */ 1184 void 1185 softdep_setup_blkmapdep(struct buf *bp, struct fs *fs, 1186 ufs_daddr_t newblkno) 1187 { 1188 struct newblk *newblk; 1189 struct bmsafemap *bmsafemap; 1190 1191 /* 1192 * Create a dependency for the newly allocated block. 1193 * Add it to the dependency list for the buffer holding 1194 * the cylinder group map from which it was allocated. 1195 */ 1196 if (newblk_lookup(fs, newblkno, DEPALLOC, &newblk) != 0) 1197 panic("softdep_setup_blkmapdep: found block"); 1198 ACQUIRE_LOCK(&lk); 1199 newblk->nb_bmsafemap = bmsafemap = bmsafemap_lookup(bp); 1200 LIST_INSERT_HEAD(&bmsafemap->sm_newblkhd, newblk, nb_deps); 1201 FREE_LOCK(&lk); 1202 } 1203 1204 /* 1205 * Find the bmsafemap associated with a cylinder group buffer. 1206 * If none exists, create one. The buffer must be locked when 1207 * this routine is called and this routine must be called with 1208 * splbio interrupts blocked. 1209 */ 1210 static struct bmsafemap * 1211 bmsafemap_lookup(struct buf *bp) 1212 { 1213 struct bmsafemap *bmsafemap; 1214 struct worklist *wk; 1215 1216 KKASSERT(lock_held(&lk)); 1217 1218 LIST_FOREACH(wk, &bp->b_dep, wk_list) { 1219 if (wk->wk_type == D_BMSAFEMAP) 1220 return (WK_BMSAFEMAP(wk)); 1221 } 1222 FREE_LOCK(&lk); 1223 bmsafemap = kmalloc(sizeof(struct bmsafemap), M_BMSAFEMAP, 1224 M_SOFTDEP_FLAGS); 1225 bmsafemap->sm_list.wk_type = D_BMSAFEMAP; 1226 bmsafemap->sm_list.wk_state = 0; 1227 bmsafemap->sm_buf = bp; 1228 LIST_INIT(&bmsafemap->sm_allocdirecthd); 1229 LIST_INIT(&bmsafemap->sm_allocindirhd); 1230 LIST_INIT(&bmsafemap->sm_inodedephd); 1231 LIST_INIT(&bmsafemap->sm_newblkhd); 1232 ACQUIRE_LOCK(&lk); 1233 WORKLIST_INSERT_BP(bp, &bmsafemap->sm_list); 1234 return (bmsafemap); 1235 } 1236 1237 /* 1238 * Direct block allocation dependencies. 1239 * 1240 * When a new block is allocated, the corresponding disk locations must be 1241 * initialized (with zeros or new data) before the on-disk inode points to 1242 * them. Also, the freemap from which the block was allocated must be 1243 * updated (on disk) before the inode's pointer. These two dependencies are 1244 * independent of each other and are needed for all file blocks and indirect 1245 * blocks that are pointed to directly by the inode. Just before the 1246 * "in-core" version of the inode is updated with a newly allocated block 1247 * number, a procedure (below) is called to setup allocation dependency 1248 * structures. These structures are removed when the corresponding 1249 * dependencies are satisfied or when the block allocation becomes obsolete 1250 * (i.e., the file is deleted, the block is de-allocated, or the block is a 1251 * fragment that gets upgraded). All of these cases are handled in 1252 * procedures described later. 1253 * 1254 * When a file extension causes a fragment to be upgraded, either to a larger 1255 * fragment or to a full block, the on-disk location may change (if the 1256 * previous fragment could not simply be extended). In this case, the old 1257 * fragment must be de-allocated, but not until after the inode's pointer has 1258 * been updated. In most cases, this is handled by later procedures, which 1259 * will construct a "freefrag" structure to be added to the workitem queue 1260 * when the inode update is complete (or obsolete). The main exception to 1261 * this is when an allocation occurs while a pending allocation dependency 1262 * (for the same block pointer) remains. This case is handled in the main 1263 * allocation dependency setup procedure by immediately freeing the 1264 * unreferenced fragments. 1265 * 1266 * Parameters: 1267 * ip: inode to which block is being added 1268 * lbn: block pointer within inode 1269 * newblkno: disk block number being added 1270 * oldblkno: previous block number, 0 unless frag 1271 * newsize: size of new block 1272 * oldsize: size of new block 1273 * bp: bp for allocated block 1274 */ 1275 void 1276 softdep_setup_allocdirect(struct inode *ip, ufs_lbn_t lbn, ufs_daddr_t newblkno, 1277 ufs_daddr_t oldblkno, long newsize, long oldsize, 1278 struct buf *bp) 1279 { 1280 struct allocdirect *adp, *oldadp; 1281 struct allocdirectlst *adphead; 1282 struct bmsafemap *bmsafemap; 1283 struct inodedep *inodedep; 1284 struct pagedep *pagedep; 1285 struct newblk *newblk; 1286 1287 adp = kmalloc(sizeof(struct allocdirect), M_ALLOCDIRECT, 1288 M_SOFTDEP_FLAGS | M_ZERO); 1289 adp->ad_list.wk_type = D_ALLOCDIRECT; 1290 adp->ad_lbn = lbn; 1291 adp->ad_newblkno = newblkno; 1292 adp->ad_oldblkno = oldblkno; 1293 adp->ad_newsize = newsize; 1294 adp->ad_oldsize = oldsize; 1295 adp->ad_state = ATTACHED; 1296 if (newblkno == oldblkno) 1297 adp->ad_freefrag = NULL; 1298 else 1299 adp->ad_freefrag = newfreefrag(ip, oldblkno, oldsize); 1300 1301 if (newblk_lookup(ip->i_fs, newblkno, 0, &newblk) == 0) 1302 panic("softdep_setup_allocdirect: lost block"); 1303 1304 ACQUIRE_LOCK(&lk); 1305 inodedep_lookup(ip->i_fs, ip->i_number, DEPALLOC | NODELAY, &inodedep); 1306 adp->ad_inodedep = inodedep; 1307 1308 if (newblk->nb_state == DEPCOMPLETE) { 1309 adp->ad_state |= DEPCOMPLETE; 1310 adp->ad_buf = NULL; 1311 } else { 1312 bmsafemap = newblk->nb_bmsafemap; 1313 adp->ad_buf = bmsafemap->sm_buf; 1314 LIST_REMOVE(newblk, nb_deps); 1315 LIST_INSERT_HEAD(&bmsafemap->sm_allocdirecthd, adp, ad_deps); 1316 } 1317 LIST_REMOVE(newblk, nb_hash); 1318 kfree(newblk, M_NEWBLK); 1319 1320 WORKLIST_INSERT_BP(bp, &adp->ad_list); 1321 if (lbn >= UFS_NDADDR) { 1322 /* allocating an indirect block */ 1323 if (oldblkno != 0) { 1324 panic("softdep_setup_allocdirect: non-zero indir"); 1325 } 1326 } else { 1327 /* 1328 * Allocating a direct block. 1329 * 1330 * If we are allocating a directory block, then we must 1331 * allocate an associated pagedep to track additions and 1332 * deletions. 1333 */ 1334 if ((ip->i_mode & IFMT) == IFDIR && 1335 pagedep_lookup(ip, lbn, DEPALLOC, &pagedep) == 0) { 1336 WORKLIST_INSERT_BP(bp, &pagedep->pd_list); 1337 } 1338 } 1339 /* 1340 * The list of allocdirects must be kept in sorted and ascending 1341 * order so that the rollback routines can quickly determine the 1342 * first uncommitted block (the size of the file stored on disk 1343 * ends at the end of the lowest committed fragment, or if there 1344 * are no fragments, at the end of the highest committed block). 1345 * Since files generally grow, the typical case is that the new 1346 * block is to be added at the end of the list. We speed this 1347 * special case by checking against the last allocdirect in the 1348 * list before laboriously traversing the list looking for the 1349 * insertion point. 1350 */ 1351 adphead = &inodedep->id_newinoupdt; 1352 oldadp = TAILQ_LAST(adphead, allocdirectlst); 1353 if (oldadp == NULL || oldadp->ad_lbn <= lbn) { 1354 /* insert at end of list */ 1355 TAILQ_INSERT_TAIL(adphead, adp, ad_next); 1356 if (oldadp != NULL && oldadp->ad_lbn == lbn) 1357 allocdirect_merge(adphead, adp, oldadp); 1358 FREE_LOCK(&lk); 1359 return; 1360 } 1361 TAILQ_FOREACH(oldadp, adphead, ad_next) { 1362 if (oldadp->ad_lbn >= lbn) 1363 break; 1364 } 1365 if (oldadp == NULL) { 1366 panic("softdep_setup_allocdirect: lost entry"); 1367 } 1368 /* insert in middle of list */ 1369 TAILQ_INSERT_BEFORE(oldadp, adp, ad_next); 1370 if (oldadp->ad_lbn == lbn) 1371 allocdirect_merge(adphead, adp, oldadp); 1372 FREE_LOCK(&lk); 1373 } 1374 1375 /* 1376 * Replace an old allocdirect dependency with a newer one. 1377 * This routine must be called with splbio interrupts blocked. 1378 * 1379 * Parameters: 1380 * adphead: head of list holding allocdirects 1381 * newadp: allocdirect being added 1382 * oldadp: existing allocdirect being checked 1383 */ 1384 static void 1385 allocdirect_merge(struct allocdirectlst *adphead, 1386 struct allocdirect *newadp, 1387 struct allocdirect *oldadp) 1388 { 1389 struct freefrag *freefrag; 1390 1391 KKASSERT(lock_held(&lk)); 1392 1393 if (newadp->ad_oldblkno != oldadp->ad_newblkno || 1394 newadp->ad_oldsize != oldadp->ad_newsize || 1395 newadp->ad_lbn >= UFS_NDADDR) { 1396 panic("allocdirect_check: old %d != new %d || lbn %ld >= %d", 1397 newadp->ad_oldblkno, oldadp->ad_newblkno, newadp->ad_lbn, 1398 UFS_NDADDR); 1399 } 1400 newadp->ad_oldblkno = oldadp->ad_oldblkno; 1401 newadp->ad_oldsize = oldadp->ad_oldsize; 1402 /* 1403 * If the old dependency had a fragment to free or had never 1404 * previously had a block allocated, then the new dependency 1405 * can immediately post its freefrag and adopt the old freefrag. 1406 * This action is done by swapping the freefrag dependencies. 1407 * The new dependency gains the old one's freefrag, and the 1408 * old one gets the new one and then immediately puts it on 1409 * the worklist when it is freed by free_allocdirect. It is 1410 * not possible to do this swap when the old dependency had a 1411 * non-zero size but no previous fragment to free. This condition 1412 * arises when the new block is an extension of the old block. 1413 * Here, the first part of the fragment allocated to the new 1414 * dependency is part of the block currently claimed on disk by 1415 * the old dependency, so cannot legitimately be freed until the 1416 * conditions for the new dependency are fulfilled. 1417 */ 1418 if (oldadp->ad_freefrag != NULL || oldadp->ad_oldblkno == 0) { 1419 freefrag = newadp->ad_freefrag; 1420 newadp->ad_freefrag = oldadp->ad_freefrag; 1421 oldadp->ad_freefrag = freefrag; 1422 } 1423 free_allocdirect(adphead, oldadp, 0); 1424 } 1425 1426 /* 1427 * Allocate a new freefrag structure if needed. 1428 */ 1429 static struct freefrag * 1430 newfreefrag(struct inode *ip, ufs_daddr_t blkno, long size) 1431 { 1432 struct freefrag *freefrag; 1433 struct fs *fs; 1434 1435 if (blkno == 0) 1436 return (NULL); 1437 fs = ip->i_fs; 1438 if (fragnum(fs, blkno) + numfrags(fs, size) > fs->fs_frag) 1439 panic("newfreefrag: frag size"); 1440 freefrag = kmalloc(sizeof(struct freefrag), M_FREEFRAG, 1441 M_SOFTDEP_FLAGS); 1442 freefrag->ff_list.wk_type = D_FREEFRAG; 1443 freefrag->ff_state = ip->i_uid & ~ONWORKLIST; /* XXX - used below */ 1444 freefrag->ff_inum = ip->i_number; 1445 freefrag->ff_fs = fs; 1446 freefrag->ff_devvp = ip->i_devvp; 1447 freefrag->ff_blkno = blkno; 1448 freefrag->ff_fragsize = size; 1449 return (freefrag); 1450 } 1451 1452 /* 1453 * This workitem de-allocates fragments that were replaced during 1454 * file block allocation. 1455 */ 1456 static void 1457 handle_workitem_freefrag(struct freefrag *freefrag) 1458 { 1459 struct inode tip; 1460 1461 tip.i_fs = freefrag->ff_fs; 1462 tip.i_devvp = freefrag->ff_devvp; 1463 tip.i_dev = freefrag->ff_devvp->v_rdev; 1464 tip.i_number = freefrag->ff_inum; 1465 tip.i_uid = freefrag->ff_state & ~ONWORKLIST; /* XXX - set above */ 1466 ffs_blkfree(&tip, freefrag->ff_blkno, freefrag->ff_fragsize); 1467 kfree(freefrag, M_FREEFRAG); 1468 } 1469 1470 /* 1471 * Indirect block allocation dependencies. 1472 * 1473 * The same dependencies that exist for a direct block also exist when 1474 * a new block is allocated and pointed to by an entry in a block of 1475 * indirect pointers. The undo/redo states described above are also 1476 * used here. Because an indirect block contains many pointers that 1477 * may have dependencies, a second copy of the entire in-memory indirect 1478 * block is kept. The buffer cache copy is always completely up-to-date. 1479 * The second copy, which is used only as a source for disk writes, 1480 * contains only the safe pointers (i.e., those that have no remaining 1481 * update dependencies). The second copy is freed when all pointers 1482 * are safe. The cache is not allowed to replace indirect blocks with 1483 * pending update dependencies. If a buffer containing an indirect 1484 * block with dependencies is written, these routines will mark it 1485 * dirty again. It can only be successfully written once all the 1486 * dependencies are removed. The ffs_fsync routine in conjunction with 1487 * softdep_sync_metadata work together to get all the dependencies 1488 * removed so that a file can be successfully written to disk. Three 1489 * procedures are used when setting up indirect block pointer 1490 * dependencies. The division is necessary because of the organization 1491 * of the "balloc" routine and because of the distinction between file 1492 * pages and file metadata blocks. 1493 */ 1494 1495 /* 1496 * Allocate a new allocindir structure. 1497 * 1498 * Parameters: 1499 * ip: inode for file being extended 1500 * ptrno: offset of pointer in indirect block 1501 * newblkno: disk block number being added 1502 * oldblkno: previous block number, 0 if none 1503 */ 1504 static struct allocindir * 1505 newallocindir(struct inode *ip, int ptrno, ufs_daddr_t newblkno, 1506 ufs_daddr_t oldblkno) 1507 { 1508 struct allocindir *aip; 1509 1510 aip = kmalloc(sizeof(struct allocindir), M_ALLOCINDIR, 1511 M_SOFTDEP_FLAGS | M_ZERO); 1512 aip->ai_list.wk_type = D_ALLOCINDIR; 1513 aip->ai_state = ATTACHED; 1514 aip->ai_offset = ptrno; 1515 aip->ai_newblkno = newblkno; 1516 aip->ai_oldblkno = oldblkno; 1517 aip->ai_freefrag = newfreefrag(ip, oldblkno, ip->i_fs->fs_bsize); 1518 return (aip); 1519 } 1520 1521 /* 1522 * Called just before setting an indirect block pointer 1523 * to a newly allocated file page. 1524 * 1525 * Parameters: 1526 * ip: inode for file being extended 1527 * lbn: allocated block number within file 1528 * bp: buffer with indirect blk referencing page 1529 * ptrno: offset of pointer in indirect block 1530 * newblkno: disk block number being added 1531 * oldblkno: previous block number, 0 if none 1532 * nbp: buffer holding allocated page 1533 */ 1534 void 1535 softdep_setup_allocindir_page(struct inode *ip, ufs_lbn_t lbn, 1536 struct buf *bp, int ptrno, 1537 ufs_daddr_t newblkno, ufs_daddr_t oldblkno, 1538 struct buf *nbp) 1539 { 1540 struct allocindir *aip; 1541 struct pagedep *pagedep; 1542 1543 aip = newallocindir(ip, ptrno, newblkno, oldblkno); 1544 ACQUIRE_LOCK(&lk); 1545 /* 1546 * If we are allocating a directory page, then we must 1547 * allocate an associated pagedep to track additions and 1548 * deletions. 1549 */ 1550 if ((ip->i_mode & IFMT) == IFDIR && 1551 pagedep_lookup(ip, lbn, DEPALLOC, &pagedep) == 0) 1552 WORKLIST_INSERT_BP(nbp, &pagedep->pd_list); 1553 WORKLIST_INSERT_BP(nbp, &aip->ai_list); 1554 FREE_LOCK(&lk); 1555 setup_allocindir_phase2(bp, ip, aip); 1556 } 1557 1558 /* 1559 * Called just before setting an indirect block pointer to a 1560 * newly allocated indirect block. 1561 * Parameters: 1562 * nbp: newly allocated indirect block 1563 * ip: inode for file being extended 1564 * bp: indirect block referencing allocated block 1565 * ptrno: offset of pointer in indirect block 1566 * newblkno: disk block number being added 1567 */ 1568 void 1569 softdep_setup_allocindir_meta(struct buf *nbp, struct inode *ip, 1570 struct buf *bp, int ptrno, 1571 ufs_daddr_t newblkno) 1572 { 1573 struct allocindir *aip; 1574 1575 aip = newallocindir(ip, ptrno, newblkno, 0); 1576 ACQUIRE_LOCK(&lk); 1577 WORKLIST_INSERT_BP(nbp, &aip->ai_list); 1578 FREE_LOCK(&lk); 1579 setup_allocindir_phase2(bp, ip, aip); 1580 } 1581 1582 /* 1583 * Called to finish the allocation of the "aip" allocated 1584 * by one of the two routines above. 1585 * 1586 * Parameters: 1587 * bp: in-memory copy of the indirect block 1588 * ip: inode for file being extended 1589 * aip: allocindir allocated by the above routines 1590 */ 1591 static void 1592 setup_allocindir_phase2(struct buf *bp, struct inode *ip, 1593 struct allocindir *aip) 1594 { 1595 struct worklist *wk; 1596 struct indirdep *indirdep, *newindirdep; 1597 struct bmsafemap *bmsafemap; 1598 struct allocindir *oldaip; 1599 struct freefrag *freefrag; 1600 struct newblk *newblk; 1601 1602 if (bp->b_loffset >= 0) 1603 panic("setup_allocindir_phase2: not indir blk"); 1604 for (indirdep = NULL, newindirdep = NULL; ; ) { 1605 ACQUIRE_LOCK(&lk); 1606 LIST_FOREACH(wk, &bp->b_dep, wk_list) { 1607 if (wk->wk_type != D_INDIRDEP) 1608 continue; 1609 indirdep = WK_INDIRDEP(wk); 1610 break; 1611 } 1612 if (indirdep == NULL && newindirdep) { 1613 indirdep = newindirdep; 1614 WORKLIST_INSERT_BP(bp, &indirdep->ir_list); 1615 newindirdep = NULL; 1616 } 1617 FREE_LOCK(&lk); 1618 if (indirdep) { 1619 if (newblk_lookup(ip->i_fs, aip->ai_newblkno, 0, 1620 &newblk) == 0) 1621 panic("setup_allocindir: lost block"); 1622 ACQUIRE_LOCK(&lk); 1623 if (newblk->nb_state == DEPCOMPLETE) { 1624 aip->ai_state |= DEPCOMPLETE; 1625 aip->ai_buf = NULL; 1626 } else { 1627 bmsafemap = newblk->nb_bmsafemap; 1628 aip->ai_buf = bmsafemap->sm_buf; 1629 LIST_REMOVE(newblk, nb_deps); 1630 LIST_INSERT_HEAD(&bmsafemap->sm_allocindirhd, 1631 aip, ai_deps); 1632 } 1633 LIST_REMOVE(newblk, nb_hash); 1634 kfree(newblk, M_NEWBLK); 1635 aip->ai_indirdep = indirdep; 1636 /* 1637 * Check to see if there is an existing dependency 1638 * for this block. If there is, merge the old 1639 * dependency into the new one. 1640 */ 1641 if (aip->ai_oldblkno == 0) 1642 oldaip = NULL; 1643 else 1644 1645 LIST_FOREACH(oldaip, &indirdep->ir_deplisthd, ai_next) 1646 if (oldaip->ai_offset == aip->ai_offset) 1647 break; 1648 if (oldaip != NULL) { 1649 if (oldaip->ai_newblkno != aip->ai_oldblkno) { 1650 panic("setup_allocindir_phase2: blkno"); 1651 } 1652 aip->ai_oldblkno = oldaip->ai_oldblkno; 1653 freefrag = oldaip->ai_freefrag; 1654 oldaip->ai_freefrag = aip->ai_freefrag; 1655 aip->ai_freefrag = freefrag; 1656 free_allocindir(oldaip, NULL); 1657 } 1658 LIST_INSERT_HEAD(&indirdep->ir_deplisthd, aip, ai_next); 1659 ((ufs_daddr_t *)indirdep->ir_savebp->b_data) 1660 [aip->ai_offset] = aip->ai_oldblkno; 1661 FREE_LOCK(&lk); 1662 } 1663 if (newindirdep) { 1664 /* 1665 * Avoid any possibility of data corruption by 1666 * ensuring that our old version is thrown away. 1667 */ 1668 newindirdep->ir_savebp->b_flags |= B_INVAL | B_NOCACHE; 1669 brelse(newindirdep->ir_savebp); 1670 WORKITEM_FREE((caddr_t)newindirdep, D_INDIRDEP); 1671 } 1672 if (indirdep) 1673 break; 1674 newindirdep = kmalloc(sizeof(struct indirdep), M_INDIRDEP, 1675 M_SOFTDEP_FLAGS); 1676 newindirdep->ir_list.wk_type = D_INDIRDEP; 1677 newindirdep->ir_state = ATTACHED; 1678 LIST_INIT(&newindirdep->ir_deplisthd); 1679 LIST_INIT(&newindirdep->ir_donehd); 1680 if (bp->b_bio2.bio_offset == NOOFFSET) { 1681 VOP_BMAP(bp->b_vp, bp->b_bio1.bio_offset, 1682 &bp->b_bio2.bio_offset, NULL, NULL, 1683 BUF_CMD_WRITE); 1684 } 1685 KKASSERT(bp->b_bio2.bio_offset != NOOFFSET); 1686 newindirdep->ir_savebp = getblk(ip->i_devvp, 1687 bp->b_bio2.bio_offset, 1688 bp->b_bcount, 0, 0); 1689 BUF_KERNPROC(newindirdep->ir_savebp); 1690 bcopy(bp->b_data, newindirdep->ir_savebp->b_data, bp->b_bcount); 1691 } 1692 } 1693 1694 /* 1695 * Block de-allocation dependencies. 1696 * 1697 * When blocks are de-allocated, the on-disk pointers must be nullified before 1698 * the blocks are made available for use by other files. (The true 1699 * requirement is that old pointers must be nullified before new on-disk 1700 * pointers are set. We chose this slightly more stringent requirement to 1701 * reduce complexity.) Our implementation handles this dependency by updating 1702 * the inode (or indirect block) appropriately but delaying the actual block 1703 * de-allocation (i.e., freemap and free space count manipulation) until 1704 * after the updated versions reach stable storage. After the disk is 1705 * updated, the blocks can be safely de-allocated whenever it is convenient. 1706 * This implementation handles only the common case of reducing a file's 1707 * length to zero. Other cases are handled by the conventional synchronous 1708 * write approach. 1709 * 1710 * The ffs implementation with which we worked double-checks 1711 * the state of the block pointers and file size as it reduces 1712 * a file's length. Some of this code is replicated here in our 1713 * soft updates implementation. The freeblks->fb_chkcnt field is 1714 * used to transfer a part of this information to the procedure 1715 * that eventually de-allocates the blocks. 1716 * 1717 * This routine should be called from the routine that shortens 1718 * a file's length, before the inode's size or block pointers 1719 * are modified. It will save the block pointer information for 1720 * later release and zero the inode so that the calling routine 1721 * can release it. 1722 */ 1723 struct softdep_setup_freeblocks_info { 1724 struct fs *fs; 1725 struct inode *ip; 1726 }; 1727 1728 static int softdep_setup_freeblocks_bp(struct buf *bp, void *data); 1729 1730 /* 1731 * Parameters: 1732 * ip: The inode whose length is to be reduced 1733 * length: The new length for the file 1734 */ 1735 void 1736 softdep_setup_freeblocks(struct inode *ip, off_t length) 1737 { 1738 struct softdep_setup_freeblocks_info info; 1739 struct freeblks *freeblks; 1740 struct inodedep *inodedep; 1741 struct allocdirect *adp; 1742 struct vnode *vp; 1743 struct buf *bp; 1744 struct fs *fs; 1745 int i, error, delay; 1746 int count; 1747 1748 fs = ip->i_fs; 1749 if (length != 0) 1750 panic("softde_setup_freeblocks: non-zero length"); 1751 freeblks = kmalloc(sizeof(struct freeblks), M_FREEBLKS, 1752 M_SOFTDEP_FLAGS | M_ZERO); 1753 freeblks->fb_list.wk_type = D_FREEBLKS; 1754 freeblks->fb_state = ATTACHED; 1755 freeblks->fb_uid = ip->i_uid; 1756 freeblks->fb_previousinum = ip->i_number; 1757 freeblks->fb_devvp = ip->i_devvp; 1758 freeblks->fb_fs = fs; 1759 freeblks->fb_oldsize = ip->i_size; 1760 freeblks->fb_newsize = length; 1761 freeblks->fb_chkcnt = ip->i_blocks; 1762 for (i = 0; i < UFS_NDADDR; i++) { 1763 freeblks->fb_dblks[i] = ip->i_db[i]; 1764 ip->i_db[i] = 0; 1765 } 1766 for (i = 0; i < UFS_NIADDR; i++) { 1767 freeblks->fb_iblks[i] = ip->i_ib[i]; 1768 ip->i_ib[i] = 0; 1769 } 1770 ip->i_blocks = 0; 1771 ip->i_size = 0; 1772 /* 1773 * Push the zero'ed inode to to its disk buffer so that we are free 1774 * to delete its dependencies below. Once the dependencies are gone 1775 * the buffer can be safely released. 1776 */ 1777 if ((error = bread(ip->i_devvp, 1778 fsbtodoff(fs, ino_to_fsba(fs, ip->i_number)), 1779 (int)fs->fs_bsize, &bp)) != 0) 1780 softdep_error("softdep_setup_freeblocks", error); 1781 *((struct ufs1_dinode *)bp->b_data + ino_to_fsbo(fs, ip->i_number)) = 1782 ip->i_din; 1783 /* 1784 * Find and eliminate any inode dependencies. 1785 */ 1786 ACQUIRE_LOCK(&lk); 1787 (void) inodedep_lookup(fs, ip->i_number, DEPALLOC, &inodedep); 1788 if ((inodedep->id_state & IOSTARTED) != 0) { 1789 panic("softdep_setup_freeblocks: inode busy"); 1790 } 1791 /* 1792 * Add the freeblks structure to the list of operations that 1793 * must await the zero'ed inode being written to disk. If we 1794 * still have a bitmap dependency (delay == 0), then the inode 1795 * has never been written to disk, so we can process the 1796 * freeblks below once we have deleted the dependencies. 1797 */ 1798 delay = (inodedep->id_state & DEPCOMPLETE); 1799 if (delay) 1800 WORKLIST_INSERT(&inodedep->id_bufwait, &freeblks->fb_list); 1801 /* 1802 * Because the file length has been truncated to zero, any 1803 * pending block allocation dependency structures associated 1804 * with this inode are obsolete and can simply be de-allocated. 1805 * We must first merge the two dependency lists to get rid of 1806 * any duplicate freefrag structures, then purge the merged list. 1807 */ 1808 merge_inode_lists(inodedep); 1809 while ((adp = TAILQ_FIRST(&inodedep->id_inoupdt)) != NULL) 1810 free_allocdirect(&inodedep->id_inoupdt, adp, 1); 1811 FREE_LOCK(&lk); 1812 bdwrite(bp); 1813 /* 1814 * We must wait for any I/O in progress to finish so that 1815 * all potential buffers on the dirty list will be visible. 1816 * Once they are all there, walk the list and get rid of 1817 * any dependencies. 1818 */ 1819 vp = ITOV(ip); 1820 ACQUIRE_LOCK(&lk); 1821 drain_output(vp, 1); 1822 1823 info.fs = fs; 1824 info.ip = ip; 1825 lwkt_gettoken(&vp->v_token); 1826 do { 1827 count = RB_SCAN(buf_rb_tree, &vp->v_rbdirty_tree, NULL, 1828 softdep_setup_freeblocks_bp, &info); 1829 } while (count != 0); 1830 lwkt_reltoken(&vp->v_token); 1831 1832 if (inodedep_lookup(fs, ip->i_number, 0, &inodedep) != 0) 1833 (void)free_inodedep(inodedep); 1834 1835 if (delay) { 1836 freeblks->fb_state |= DEPCOMPLETE; 1837 /* 1838 * If the inode with zeroed block pointers is now on disk 1839 * we can start freeing blocks. Add freeblks to the worklist 1840 * instead of calling handle_workitem_freeblocks directly as 1841 * it is more likely that additional IO is needed to complete 1842 * the request here than in the !delay case. 1843 */ 1844 if ((freeblks->fb_state & ALLCOMPLETE) == ALLCOMPLETE) 1845 add_to_worklist(&freeblks->fb_list); 1846 } 1847 1848 FREE_LOCK(&lk); 1849 /* 1850 * If the inode has never been written to disk (delay == 0), 1851 * then we can process the freeblks now that we have deleted 1852 * the dependencies. 1853 */ 1854 if (!delay) 1855 handle_workitem_freeblocks(freeblks); 1856 } 1857 1858 static int 1859 softdep_setup_freeblocks_bp(struct buf *bp, void *data) 1860 { 1861 struct softdep_setup_freeblocks_info *info = data; 1862 struct inodedep *inodedep; 1863 1864 if (getdirtybuf(&bp, MNT_WAIT) == 0) { 1865 kprintf("softdep_setup_freeblocks_bp(1): caught bp %p going away\n", bp); 1866 return(-1); 1867 } 1868 if (bp->b_vp != ITOV(info->ip) || (bp->b_flags & B_DELWRI) == 0) { 1869 kprintf("softdep_setup_freeblocks_bp(2): caught bp %p going away\n", bp); 1870 BUF_UNLOCK(bp); 1871 return(-1); 1872 } 1873 (void) inodedep_lookup(info->fs, info->ip->i_number, 0, &inodedep); 1874 deallocate_dependencies(bp, inodedep); 1875 bp->b_flags |= B_INVAL | B_NOCACHE; 1876 FREE_LOCK(&lk); 1877 brelse(bp); 1878 ACQUIRE_LOCK(&lk); 1879 return(1); 1880 } 1881 1882 /* 1883 * Reclaim any dependency structures from a buffer that is about to 1884 * be reallocated to a new vnode. The buffer must be locked, thus, 1885 * no I/O completion operations can occur while we are manipulating 1886 * its associated dependencies. The mutex is held so that other I/O's 1887 * associated with related dependencies do not occur. 1888 */ 1889 static void 1890 deallocate_dependencies(struct buf *bp, struct inodedep *inodedep) 1891 { 1892 struct worklist *wk; 1893 struct indirdep *indirdep; 1894 struct allocindir *aip; 1895 struct pagedep *pagedep; 1896 struct dirrem *dirrem; 1897 struct diradd *dap; 1898 int i; 1899 1900 while ((wk = LIST_FIRST(&bp->b_dep)) != NULL) { 1901 switch (wk->wk_type) { 1902 1903 case D_INDIRDEP: 1904 indirdep = WK_INDIRDEP(wk); 1905 /* 1906 * None of the indirect pointers will ever be visible, 1907 * so they can simply be tossed. GOINGAWAY ensures 1908 * that allocated pointers will be saved in the buffer 1909 * cache until they are freed. Note that they will 1910 * only be able to be found by their physical address 1911 * since the inode mapping the logical address will 1912 * be gone. The save buffer used for the safe copy 1913 * was allocated in setup_allocindir_phase2 using 1914 * the physical address so it could be used for this 1915 * purpose. Hence we swap the safe copy with the real 1916 * copy, allowing the safe copy to be freed and holding 1917 * on to the real copy for later use in indir_trunc. 1918 * 1919 * NOTE: ir_savebp is relative to the block device 1920 * so b_bio1 contains the device block number. 1921 */ 1922 if (indirdep->ir_state & GOINGAWAY) { 1923 panic("deallocate_dependencies: already gone"); 1924 } 1925 indirdep->ir_state |= GOINGAWAY; 1926 while ((aip = LIST_FIRST(&indirdep->ir_deplisthd)) != NULL) 1927 free_allocindir(aip, inodedep); 1928 if (bp->b_bio1.bio_offset >= 0 || 1929 bp->b_bio2.bio_offset != indirdep->ir_savebp->b_bio1.bio_offset) { 1930 panic("deallocate_dependencies: not indir"); 1931 } 1932 bcopy(bp->b_data, indirdep->ir_savebp->b_data, 1933 bp->b_bcount); 1934 WORKLIST_REMOVE(wk); 1935 WORKLIST_INSERT_BP(indirdep->ir_savebp, wk); 1936 continue; 1937 1938 case D_PAGEDEP: 1939 pagedep = WK_PAGEDEP(wk); 1940 /* 1941 * None of the directory additions will ever be 1942 * visible, so they can simply be tossed. 1943 */ 1944 for (i = 0; i < DAHASHSZ; i++) 1945 while ((dap = 1946 LIST_FIRST(&pagedep->pd_diraddhd[i]))) 1947 free_diradd(dap); 1948 while ((dap = LIST_FIRST(&pagedep->pd_pendinghd)) != NULL) 1949 free_diradd(dap); 1950 /* 1951 * Copy any directory remove dependencies to the list 1952 * to be processed after the zero'ed inode is written. 1953 * If the inode has already been written, then they 1954 * can be dumped directly onto the work list. 1955 */ 1956 LIST_FOREACH(dirrem, &pagedep->pd_dirremhd, dm_next) { 1957 LIST_REMOVE(dirrem, dm_next); 1958 dirrem->dm_dirinum = pagedep->pd_ino; 1959 if (inodedep == NULL || 1960 (inodedep->id_state & ALLCOMPLETE) == 1961 ALLCOMPLETE) 1962 add_to_worklist(&dirrem->dm_list); 1963 else 1964 WORKLIST_INSERT(&inodedep->id_bufwait, 1965 &dirrem->dm_list); 1966 } 1967 WORKLIST_REMOVE(&pagedep->pd_list); 1968 LIST_REMOVE(pagedep, pd_hash); 1969 WORKITEM_FREE(pagedep, D_PAGEDEP); 1970 continue; 1971 1972 case D_ALLOCINDIR: 1973 free_allocindir(WK_ALLOCINDIR(wk), inodedep); 1974 continue; 1975 1976 case D_ALLOCDIRECT: 1977 case D_INODEDEP: 1978 panic("deallocate_dependencies: Unexpected type %s", 1979 TYPENAME(wk->wk_type)); 1980 /* NOTREACHED */ 1981 1982 default: 1983 panic("deallocate_dependencies: Unknown type %s", 1984 TYPENAME(wk->wk_type)); 1985 /* NOTREACHED */ 1986 } 1987 } 1988 } 1989 1990 /* 1991 * Free an allocdirect. Generate a new freefrag work request if appropriate. 1992 * This routine must be called with splbio interrupts blocked. 1993 */ 1994 static void 1995 free_allocdirect(struct allocdirectlst *adphead, 1996 struct allocdirect *adp, int delay) 1997 { 1998 KKASSERT(lock_held(&lk)); 1999 2000 if ((adp->ad_state & DEPCOMPLETE) == 0) 2001 LIST_REMOVE(adp, ad_deps); 2002 TAILQ_REMOVE(adphead, adp, ad_next); 2003 if ((adp->ad_state & COMPLETE) == 0) 2004 WORKLIST_REMOVE(&adp->ad_list); 2005 if (adp->ad_freefrag != NULL) { 2006 if (delay) 2007 WORKLIST_INSERT(&adp->ad_inodedep->id_bufwait, 2008 &adp->ad_freefrag->ff_list); 2009 else 2010 add_to_worklist(&adp->ad_freefrag->ff_list); 2011 } 2012 WORKITEM_FREE(adp, D_ALLOCDIRECT); 2013 } 2014 2015 /* 2016 * Prepare an inode to be freed. The actual free operation is not 2017 * done until the zero'ed inode has been written to disk. 2018 */ 2019 void 2020 softdep_freefile(struct vnode *pvp, ino_t ino, int mode) 2021 { 2022 struct inode *ip = VTOI(pvp); 2023 struct inodedep *inodedep; 2024 struct freefile *freefile; 2025 2026 /* 2027 * This sets up the inode de-allocation dependency. 2028 */ 2029 freefile = kmalloc(sizeof(struct freefile), M_FREEFILE, 2030 M_SOFTDEP_FLAGS); 2031 freefile->fx_list.wk_type = D_FREEFILE; 2032 freefile->fx_list.wk_state = 0; 2033 freefile->fx_mode = mode; 2034 freefile->fx_oldinum = ino; 2035 freefile->fx_devvp = ip->i_devvp; 2036 freefile->fx_fs = ip->i_fs; 2037 2038 /* 2039 * If the inodedep does not exist, then the zero'ed inode has 2040 * been written to disk. If the allocated inode has never been 2041 * written to disk, then the on-disk inode is zero'ed. In either 2042 * case we can free the file immediately. 2043 */ 2044 ACQUIRE_LOCK(&lk); 2045 if (inodedep_lookup(ip->i_fs, ino, 0, &inodedep) == 0 || 2046 check_inode_unwritten(inodedep)) { 2047 FREE_LOCK(&lk); 2048 handle_workitem_freefile(freefile); 2049 return; 2050 } 2051 WORKLIST_INSERT(&inodedep->id_inowait, &freefile->fx_list); 2052 FREE_LOCK(&lk); 2053 } 2054 2055 /* 2056 * Check to see if an inode has never been written to disk. If 2057 * so free the inodedep and return success, otherwise return failure. 2058 * This routine must be called with splbio interrupts blocked. 2059 * 2060 * If we still have a bitmap dependency, then the inode has never 2061 * been written to disk. Drop the dependency as it is no longer 2062 * necessary since the inode is being deallocated. We set the 2063 * ALLCOMPLETE flags since the bitmap now properly shows that the 2064 * inode is not allocated. Even if the inode is actively being 2065 * written, it has been rolled back to its zero'ed state, so we 2066 * are ensured that a zero inode is what is on the disk. For short 2067 * lived files, this change will usually result in removing all the 2068 * dependencies from the inode so that it can be freed immediately. 2069 */ 2070 static int 2071 check_inode_unwritten(struct inodedep *inodedep) 2072 { 2073 2074 if ((inodedep->id_state & DEPCOMPLETE) != 0 || 2075 LIST_FIRST(&inodedep->id_pendinghd) != NULL || 2076 LIST_FIRST(&inodedep->id_bufwait) != NULL || 2077 LIST_FIRST(&inodedep->id_inowait) != NULL || 2078 TAILQ_FIRST(&inodedep->id_inoupdt) != NULL || 2079 TAILQ_FIRST(&inodedep->id_newinoupdt) != NULL || 2080 inodedep->id_nlinkdelta != 0) 2081 return (0); 2082 2083 /* 2084 * Another process might be in initiate_write_inodeblock 2085 * trying to allocate memory without holding "Softdep Lock". 2086 */ 2087 if ((inodedep->id_state & IOSTARTED) != 0 && 2088 inodedep->id_savedino == NULL) 2089 return(0); 2090 2091 inodedep->id_state |= ALLCOMPLETE; 2092 LIST_REMOVE(inodedep, id_deps); 2093 inodedep->id_buf = NULL; 2094 if (inodedep->id_state & ONWORKLIST) 2095 WORKLIST_REMOVE(&inodedep->id_list); 2096 if (inodedep->id_savedino != NULL) { 2097 kfree(inodedep->id_savedino, M_INODEDEP); 2098 inodedep->id_savedino = NULL; 2099 } 2100 if (free_inodedep(inodedep) == 0) { 2101 panic("check_inode_unwritten: busy inode"); 2102 } 2103 return (1); 2104 } 2105 2106 /* 2107 * Try to free an inodedep structure. Return 1 if it could be freed. 2108 */ 2109 static int 2110 free_inodedep(struct inodedep *inodedep) 2111 { 2112 2113 if ((inodedep->id_state & ONWORKLIST) != 0 || 2114 (inodedep->id_state & ALLCOMPLETE) != ALLCOMPLETE || 2115 LIST_FIRST(&inodedep->id_pendinghd) != NULL || 2116 LIST_FIRST(&inodedep->id_bufwait) != NULL || 2117 LIST_FIRST(&inodedep->id_inowait) != NULL || 2118 TAILQ_FIRST(&inodedep->id_inoupdt) != NULL || 2119 TAILQ_FIRST(&inodedep->id_newinoupdt) != NULL || 2120 inodedep->id_nlinkdelta != 0 || inodedep->id_savedino != NULL) 2121 return (0); 2122 LIST_REMOVE(inodedep, id_hash); 2123 WORKITEM_FREE(inodedep, D_INODEDEP); 2124 num_inodedep -= 1; 2125 return (1); 2126 } 2127 2128 /* 2129 * This workitem routine performs the block de-allocation. 2130 * The workitem is added to the pending list after the updated 2131 * inode block has been written to disk. As mentioned above, 2132 * checks regarding the number of blocks de-allocated (compared 2133 * to the number of blocks allocated for the file) are also 2134 * performed in this function. 2135 */ 2136 static void 2137 handle_workitem_freeblocks(struct freeblks *freeblks) 2138 { 2139 struct inode tip; 2140 ufs_daddr_t bn; 2141 struct fs *fs; 2142 int i, level, bsize; 2143 long nblocks, blocksreleased = 0; 2144 int error, allerror = 0; 2145 ufs_lbn_t baselbns[UFS_NIADDR], tmpval; 2146 2147 tip.i_number = freeblks->fb_previousinum; 2148 tip.i_devvp = freeblks->fb_devvp; 2149 tip.i_dev = freeblks->fb_devvp->v_rdev; 2150 tip.i_fs = freeblks->fb_fs; 2151 tip.i_size = freeblks->fb_oldsize; 2152 tip.i_uid = freeblks->fb_uid; 2153 fs = freeblks->fb_fs; 2154 tmpval = 1; 2155 baselbns[0] = UFS_NDADDR; 2156 for (i = 1; i < UFS_NIADDR; i++) { 2157 tmpval *= NINDIR(fs); 2158 baselbns[i] = baselbns[i - 1] + tmpval; 2159 } 2160 nblocks = btodb(fs->fs_bsize); 2161 blocksreleased = 0; 2162 /* 2163 * Indirect blocks first. 2164 */ 2165 for (level = (UFS_NIADDR - 1); level >= 0; level--) { 2166 if ((bn = freeblks->fb_iblks[level]) == 0) 2167 continue; 2168 if ((error = indir_trunc(&tip, fsbtodoff(fs, bn), level, 2169 baselbns[level], &blocksreleased)) == 0) 2170 allerror = error; 2171 ffs_blkfree(&tip, bn, fs->fs_bsize); 2172 blocksreleased += nblocks; 2173 } 2174 /* 2175 * All direct blocks or frags. 2176 */ 2177 for (i = (UFS_NDADDR - 1); i >= 0; i--) { 2178 if ((bn = freeblks->fb_dblks[i]) == 0) 2179 continue; 2180 bsize = blksize(fs, &tip, i); 2181 ffs_blkfree(&tip, bn, bsize); 2182 blocksreleased += btodb(bsize); 2183 } 2184 2185 #ifdef DIAGNOSTIC 2186 if (freeblks->fb_chkcnt != blocksreleased) 2187 kprintf("handle_workitem_freeblocks: block count\n"); 2188 if (allerror) 2189 softdep_error("handle_workitem_freeblks", allerror); 2190 #endif /* DIAGNOSTIC */ 2191 WORKITEM_FREE(freeblks, D_FREEBLKS); 2192 } 2193 2194 /* 2195 * Release blocks associated with the inode ip and stored in the indirect 2196 * block at doffset. If level is greater than SINGLE, the block is an 2197 * indirect block and recursive calls to indirtrunc must be used to 2198 * cleanse other indirect blocks. 2199 */ 2200 static int 2201 indir_trunc(struct inode *ip, off_t doffset, int level, ufs_lbn_t lbn, 2202 long *countp) 2203 { 2204 struct buf *bp; 2205 ufs_daddr_t *bap; 2206 ufs_daddr_t nb; 2207 struct fs *fs; 2208 struct worklist *wk; 2209 struct indirdep *indirdep; 2210 int i, lbnadd, nblocks; 2211 int error, allerror = 0; 2212 2213 fs = ip->i_fs; 2214 lbnadd = 1; 2215 for (i = level; i > 0; i--) 2216 lbnadd *= NINDIR(fs); 2217 /* 2218 * Get buffer of block pointers to be freed. This routine is not 2219 * called until the zero'ed inode has been written, so it is safe 2220 * to free blocks as they are encountered. Because the inode has 2221 * been zero'ed, calls to bmap on these blocks will fail. So, we 2222 * have to use the on-disk address and the block device for the 2223 * filesystem to look them up. If the file was deleted before its 2224 * indirect blocks were all written to disk, the routine that set 2225 * us up (deallocate_dependencies) will have arranged to leave 2226 * a complete copy of the indirect block in memory for our use. 2227 * Otherwise we have to read the blocks in from the disk. 2228 */ 2229 ACQUIRE_LOCK(&lk); 2230 if ((bp = findblk(ip->i_devvp, doffset, FINDBLK_TEST)) != NULL && 2231 (wk = LIST_FIRST(&bp->b_dep)) != NULL) { 2232 /* 2233 * bp must be ir_savebp, which is held locked for our use. 2234 */ 2235 if (wk->wk_type != D_INDIRDEP || 2236 (indirdep = WK_INDIRDEP(wk))->ir_savebp != bp || 2237 (indirdep->ir_state & GOINGAWAY) == 0) { 2238 panic("indir_trunc: lost indirdep"); 2239 } 2240 WORKLIST_REMOVE(wk); 2241 WORKITEM_FREE(indirdep, D_INDIRDEP); 2242 if (LIST_FIRST(&bp->b_dep) != NULL) { 2243 panic("indir_trunc: dangling dep"); 2244 } 2245 FREE_LOCK(&lk); 2246 } else { 2247 FREE_LOCK(&lk); 2248 error = bread(ip->i_devvp, doffset, (int)fs->fs_bsize, &bp); 2249 if (error) 2250 return (error); 2251 } 2252 /* 2253 * Recursively free indirect blocks. 2254 */ 2255 bap = (ufs_daddr_t *)bp->b_data; 2256 nblocks = btodb(fs->fs_bsize); 2257 for (i = NINDIR(fs) - 1; i >= 0; i--) { 2258 if ((nb = bap[i]) == 0) 2259 continue; 2260 if (level != 0) { 2261 if ((error = indir_trunc(ip, fsbtodoff(fs, nb), 2262 level - 1, lbn + (i * lbnadd), countp)) != 0) 2263 allerror = error; 2264 } 2265 ffs_blkfree(ip, nb, fs->fs_bsize); 2266 *countp += nblocks; 2267 } 2268 bp->b_flags |= B_INVAL | B_NOCACHE; 2269 brelse(bp); 2270 return (allerror); 2271 } 2272 2273 /* 2274 * Free an allocindir. 2275 * This routine must be called with splbio interrupts blocked. 2276 */ 2277 static void 2278 free_allocindir(struct allocindir *aip, struct inodedep *inodedep) 2279 { 2280 struct freefrag *freefrag; 2281 2282 KKASSERT(lock_held(&lk)); 2283 2284 if ((aip->ai_state & DEPCOMPLETE) == 0) 2285 LIST_REMOVE(aip, ai_deps); 2286 if (aip->ai_state & ONWORKLIST) 2287 WORKLIST_REMOVE(&aip->ai_list); 2288 LIST_REMOVE(aip, ai_next); 2289 if ((freefrag = aip->ai_freefrag) != NULL) { 2290 if (inodedep == NULL) 2291 add_to_worklist(&freefrag->ff_list); 2292 else 2293 WORKLIST_INSERT(&inodedep->id_bufwait, 2294 &freefrag->ff_list); 2295 } 2296 WORKITEM_FREE(aip, D_ALLOCINDIR); 2297 } 2298 2299 /* 2300 * Directory entry addition dependencies. 2301 * 2302 * When adding a new directory entry, the inode (with its incremented link 2303 * count) must be written to disk before the directory entry's pointer to it. 2304 * Also, if the inode is newly allocated, the corresponding freemap must be 2305 * updated (on disk) before the directory entry's pointer. These requirements 2306 * are met via undo/redo on the directory entry's pointer, which consists 2307 * simply of the inode number. 2308 * 2309 * As directory entries are added and deleted, the free space within a 2310 * directory block can become fragmented. The ufs filesystem will compact 2311 * a fragmented directory block to make space for a new entry. When this 2312 * occurs, the offsets of previously added entries change. Any "diradd" 2313 * dependency structures corresponding to these entries must be updated with 2314 * the new offsets. 2315 */ 2316 2317 /* 2318 * This routine is called after the in-memory inode's link 2319 * count has been incremented, but before the directory entry's 2320 * pointer to the inode has been set. 2321 * 2322 * Parameters: 2323 * bp: buffer containing directory block 2324 * dp: inode for directory 2325 * diroffset: offset of new entry in directory 2326 * newinum: inode referenced by new directory entry 2327 * newdirbp: non-NULL => contents of new mkdir 2328 */ 2329 void 2330 softdep_setup_directory_add(struct buf *bp, struct inode *dp, off_t diroffset, 2331 ino_t newinum, struct buf *newdirbp) 2332 { 2333 int offset; /* offset of new entry within directory block */ 2334 ufs_lbn_t lbn; /* block in directory containing new entry */ 2335 struct fs *fs; 2336 struct diradd *dap; 2337 struct pagedep *pagedep; 2338 struct inodedep *inodedep; 2339 struct mkdir *mkdir1, *mkdir2; 2340 2341 /* 2342 * Whiteouts have no dependencies. 2343 */ 2344 if (newinum == UFS_WINO) { 2345 if (newdirbp != NULL) 2346 bdwrite(newdirbp); 2347 return; 2348 } 2349 2350 fs = dp->i_fs; 2351 lbn = lblkno(fs, diroffset); 2352 offset = blkoff(fs, diroffset); 2353 dap = kmalloc(sizeof(struct diradd), M_DIRADD, 2354 M_SOFTDEP_FLAGS | M_ZERO); 2355 dap->da_list.wk_type = D_DIRADD; 2356 dap->da_offset = offset; 2357 dap->da_newinum = newinum; 2358 dap->da_state = ATTACHED; 2359 if (newdirbp == NULL) { 2360 dap->da_state |= DEPCOMPLETE; 2361 ACQUIRE_LOCK(&lk); 2362 } else { 2363 dap->da_state |= MKDIR_BODY | MKDIR_PARENT; 2364 mkdir1 = kmalloc(sizeof(struct mkdir), M_MKDIR, 2365 M_SOFTDEP_FLAGS); 2366 mkdir1->md_list.wk_type = D_MKDIR; 2367 mkdir1->md_state = MKDIR_BODY; 2368 mkdir1->md_diradd = dap; 2369 mkdir2 = kmalloc(sizeof(struct mkdir), M_MKDIR, 2370 M_SOFTDEP_FLAGS); 2371 mkdir2->md_list.wk_type = D_MKDIR; 2372 mkdir2->md_state = MKDIR_PARENT; 2373 mkdir2->md_diradd = dap; 2374 /* 2375 * Dependency on "." and ".." being written to disk. 2376 */ 2377 mkdir1->md_buf = newdirbp; 2378 ACQUIRE_LOCK(&lk); 2379 LIST_INSERT_HEAD(&mkdirlisthd, mkdir1, md_mkdirs); 2380 WORKLIST_INSERT_BP(newdirbp, &mkdir1->md_list); 2381 FREE_LOCK(&lk); 2382 bdwrite(newdirbp); 2383 /* 2384 * Dependency on link count increase for parent directory 2385 */ 2386 ACQUIRE_LOCK(&lk); 2387 if (inodedep_lookup(dp->i_fs, dp->i_number, 0, &inodedep) == 0 2388 || (inodedep->id_state & ALLCOMPLETE) == ALLCOMPLETE) { 2389 dap->da_state &= ~MKDIR_PARENT; 2390 WORKITEM_FREE(mkdir2, D_MKDIR); 2391 } else { 2392 LIST_INSERT_HEAD(&mkdirlisthd, mkdir2, md_mkdirs); 2393 WORKLIST_INSERT(&inodedep->id_bufwait,&mkdir2->md_list); 2394 } 2395 } 2396 /* 2397 * Link into parent directory pagedep to await its being written. 2398 */ 2399 if (pagedep_lookup(dp, lbn, DEPALLOC, &pagedep) == 0) 2400 WORKLIST_INSERT_BP(bp, &pagedep->pd_list); 2401 dap->da_pagedep = pagedep; 2402 LIST_INSERT_HEAD(&pagedep->pd_diraddhd[DIRADDHASH(offset)], dap, 2403 da_pdlist); 2404 /* 2405 * Link into its inodedep. Put it on the id_bufwait list if the inode 2406 * is not yet written. If it is written, do the post-inode write 2407 * processing to put it on the id_pendinghd list. 2408 */ 2409 (void) inodedep_lookup(fs, newinum, DEPALLOC, &inodedep); 2410 if ((inodedep->id_state & ALLCOMPLETE) == ALLCOMPLETE) 2411 diradd_inode_written(dap, inodedep); 2412 else 2413 WORKLIST_INSERT(&inodedep->id_bufwait, &dap->da_list); 2414 FREE_LOCK(&lk); 2415 } 2416 2417 /* 2418 * This procedure is called to change the offset of a directory 2419 * entry when compacting a directory block which must be owned 2420 * exclusively by the caller. Note that the actual entry movement 2421 * must be done in this procedure to ensure that no I/O completions 2422 * occur while the move is in progress. 2423 * 2424 * Parameters: 2425 * dp: inode for directory 2426 * base: address of dp->i_offset 2427 * oldloc: address of old directory location 2428 * newloc: address of new directory location 2429 * entrysize: size of directory entry 2430 */ 2431 void 2432 softdep_change_directoryentry_offset(struct inode *dp, caddr_t base, 2433 caddr_t oldloc, caddr_t newloc, 2434 int entrysize) 2435 { 2436 int offset, oldoffset, newoffset; 2437 struct pagedep *pagedep; 2438 struct diradd *dap; 2439 ufs_lbn_t lbn; 2440 2441 ACQUIRE_LOCK(&lk); 2442 lbn = lblkno(dp->i_fs, dp->i_offset); 2443 offset = blkoff(dp->i_fs, dp->i_offset); 2444 if (pagedep_lookup(dp, lbn, 0, &pagedep) == 0) 2445 goto done; 2446 oldoffset = offset + (oldloc - base); 2447 newoffset = offset + (newloc - base); 2448 2449 LIST_FOREACH(dap, &pagedep->pd_diraddhd[DIRADDHASH(oldoffset)], da_pdlist) { 2450 if (dap->da_offset != oldoffset) 2451 continue; 2452 dap->da_offset = newoffset; 2453 if (DIRADDHASH(newoffset) == DIRADDHASH(oldoffset)) 2454 break; 2455 LIST_REMOVE(dap, da_pdlist); 2456 LIST_INSERT_HEAD(&pagedep->pd_diraddhd[DIRADDHASH(newoffset)], 2457 dap, da_pdlist); 2458 break; 2459 } 2460 if (dap == NULL) { 2461 2462 LIST_FOREACH(dap, &pagedep->pd_pendinghd, da_pdlist) { 2463 if (dap->da_offset == oldoffset) { 2464 dap->da_offset = newoffset; 2465 break; 2466 } 2467 } 2468 } 2469 done: 2470 bcopy(oldloc, newloc, entrysize); 2471 FREE_LOCK(&lk); 2472 } 2473 2474 /* 2475 * Free a diradd dependency structure. This routine must be called 2476 * with splbio interrupts blocked. 2477 */ 2478 static void 2479 free_diradd(struct diradd *dap) 2480 { 2481 struct dirrem *dirrem; 2482 struct pagedep *pagedep; 2483 struct inodedep *inodedep; 2484 struct mkdir *mkdir, *nextmd; 2485 2486 KKASSERT(lock_held(&lk)); 2487 2488 WORKLIST_REMOVE(&dap->da_list); 2489 LIST_REMOVE(dap, da_pdlist); 2490 if ((dap->da_state & DIRCHG) == 0) { 2491 pagedep = dap->da_pagedep; 2492 } else { 2493 dirrem = dap->da_previous; 2494 pagedep = dirrem->dm_pagedep; 2495 dirrem->dm_dirinum = pagedep->pd_ino; 2496 add_to_worklist(&dirrem->dm_list); 2497 } 2498 if (inodedep_lookup(VFSTOUFS(pagedep->pd_mnt)->um_fs, dap->da_newinum, 2499 0, &inodedep) != 0) 2500 (void) free_inodedep(inodedep); 2501 if ((dap->da_state & (MKDIR_PARENT | MKDIR_BODY)) != 0) { 2502 for (mkdir = LIST_FIRST(&mkdirlisthd); mkdir; mkdir = nextmd) { 2503 nextmd = LIST_NEXT(mkdir, md_mkdirs); 2504 if (mkdir->md_diradd != dap) 2505 continue; 2506 dap->da_state &= ~mkdir->md_state; 2507 WORKLIST_REMOVE(&mkdir->md_list); 2508 LIST_REMOVE(mkdir, md_mkdirs); 2509 WORKITEM_FREE(mkdir, D_MKDIR); 2510 } 2511 if ((dap->da_state & (MKDIR_PARENT | MKDIR_BODY)) != 0) { 2512 panic("free_diradd: unfound ref"); 2513 } 2514 } 2515 WORKITEM_FREE(dap, D_DIRADD); 2516 } 2517 2518 /* 2519 * Directory entry removal dependencies. 2520 * 2521 * When removing a directory entry, the entry's inode pointer must be 2522 * zero'ed on disk before the corresponding inode's link count is decremented 2523 * (possibly freeing the inode for re-use). This dependency is handled by 2524 * updating the directory entry but delaying the inode count reduction until 2525 * after the directory block has been written to disk. After this point, the 2526 * inode count can be decremented whenever it is convenient. 2527 */ 2528 2529 /* 2530 * This routine should be called immediately after removing 2531 * a directory entry. The inode's link count should not be 2532 * decremented by the calling procedure -- the soft updates 2533 * code will do this task when it is safe. 2534 * 2535 * Parameters: 2536 * bp: buffer containing directory block 2537 * dp: inode for the directory being modified 2538 * ip: inode for directory entry being removed 2539 * isrmdir: indicates if doing RMDIR 2540 */ 2541 void 2542 softdep_setup_remove(struct buf *bp, struct inode *dp, struct inode *ip, 2543 int isrmdir) 2544 { 2545 struct dirrem *dirrem, *prevdirrem; 2546 2547 /* 2548 * Allocate a new dirrem if appropriate and ACQUIRE_LOCK. 2549 */ 2550 dirrem = newdirrem(bp, dp, ip, isrmdir, &prevdirrem); 2551 2552 /* 2553 * If the COMPLETE flag is clear, then there were no active 2554 * entries and we want to roll back to a zeroed entry until 2555 * the new inode is committed to disk. If the COMPLETE flag is 2556 * set then we have deleted an entry that never made it to 2557 * disk. If the entry we deleted resulted from a name change, 2558 * then the old name still resides on disk. We cannot delete 2559 * its inode (returned to us in prevdirrem) until the zeroed 2560 * directory entry gets to disk. The new inode has never been 2561 * referenced on the disk, so can be deleted immediately. 2562 */ 2563 if ((dirrem->dm_state & COMPLETE) == 0) { 2564 LIST_INSERT_HEAD(&dirrem->dm_pagedep->pd_dirremhd, dirrem, 2565 dm_next); 2566 FREE_LOCK(&lk); 2567 } else { 2568 if (prevdirrem != NULL) 2569 LIST_INSERT_HEAD(&dirrem->dm_pagedep->pd_dirremhd, 2570 prevdirrem, dm_next); 2571 dirrem->dm_dirinum = dirrem->dm_pagedep->pd_ino; 2572 FREE_LOCK(&lk); 2573 handle_workitem_remove(dirrem); 2574 } 2575 } 2576 2577 /* 2578 * Allocate a new dirrem if appropriate and return it along with 2579 * its associated pagedep. Called without a lock, returns with lock. 2580 */ 2581 static long num_dirrem; /* number of dirrem allocated */ 2582 2583 /* 2584 * Parameters: 2585 * bp: buffer containing directory block 2586 * dp: inode for the directory being modified 2587 * ip: inode for directory entry being removed 2588 * isrmdir: indicates if doing RMDIR 2589 * prevdirremp: previously referenced inode, if any 2590 */ 2591 static struct dirrem * 2592 newdirrem(struct buf *bp, struct inode *dp, struct inode *ip, 2593 int isrmdir, struct dirrem **prevdirremp) 2594 { 2595 int offset; 2596 ufs_lbn_t lbn; 2597 struct diradd *dap; 2598 struct dirrem *dirrem; 2599 struct pagedep *pagedep; 2600 2601 /* 2602 * Whiteouts have no deletion dependencies. 2603 */ 2604 if (ip == NULL) 2605 panic("newdirrem: whiteout"); 2606 /* 2607 * If we are over our limit, try to improve the situation. 2608 * Limiting the number of dirrem structures will also limit 2609 * the number of freefile and freeblks structures. 2610 */ 2611 if (num_dirrem > max_softdeps / 4) 2612 speedup_syncer(NULL); 2613 if (num_dirrem > max_softdeps / 2) { 2614 ACQUIRE_LOCK(&lk); 2615 request_cleanup(FLUSH_REMOVE); 2616 FREE_LOCK(&lk); 2617 } 2618 2619 num_dirrem += 1; 2620 dirrem = kmalloc(sizeof(struct dirrem), M_DIRREM, 2621 M_SOFTDEP_FLAGS | M_ZERO); 2622 dirrem->dm_list.wk_type = D_DIRREM; 2623 dirrem->dm_state = isrmdir ? RMDIR : 0; 2624 dirrem->dm_mnt = ITOV(ip)->v_mount; 2625 dirrem->dm_oldinum = ip->i_number; 2626 *prevdirremp = NULL; 2627 2628 ACQUIRE_LOCK(&lk); 2629 lbn = lblkno(dp->i_fs, dp->i_offset); 2630 offset = blkoff(dp->i_fs, dp->i_offset); 2631 if (pagedep_lookup(dp, lbn, DEPALLOC, &pagedep) == 0) 2632 WORKLIST_INSERT_BP(bp, &pagedep->pd_list); 2633 dirrem->dm_pagedep = pagedep; 2634 /* 2635 * Check for a diradd dependency for the same directory entry. 2636 * If present, then both dependencies become obsolete and can 2637 * be de-allocated. Check for an entry on both the pd_dirraddhd 2638 * list and the pd_pendinghd list. 2639 */ 2640 2641 LIST_FOREACH(dap, &pagedep->pd_diraddhd[DIRADDHASH(offset)], da_pdlist) 2642 if (dap->da_offset == offset) 2643 break; 2644 if (dap == NULL) { 2645 2646 LIST_FOREACH(dap, &pagedep->pd_pendinghd, da_pdlist) 2647 if (dap->da_offset == offset) 2648 break; 2649 if (dap == NULL) 2650 return (dirrem); 2651 } 2652 /* 2653 * Must be ATTACHED at this point. 2654 */ 2655 if ((dap->da_state & ATTACHED) == 0) { 2656 panic("newdirrem: not ATTACHED"); 2657 } 2658 if (dap->da_newinum != ip->i_number) { 2659 panic("newdirrem: inum %"PRId64" should be %"PRId64, 2660 ip->i_number, dap->da_newinum); 2661 } 2662 /* 2663 * If we are deleting a changed name that never made it to disk, 2664 * then return the dirrem describing the previous inode (which 2665 * represents the inode currently referenced from this entry on disk). 2666 */ 2667 if ((dap->da_state & DIRCHG) != 0) { 2668 *prevdirremp = dap->da_previous; 2669 dap->da_state &= ~DIRCHG; 2670 dap->da_pagedep = pagedep; 2671 } 2672 /* 2673 * We are deleting an entry that never made it to disk. 2674 * Mark it COMPLETE so we can delete its inode immediately. 2675 */ 2676 dirrem->dm_state |= COMPLETE; 2677 free_diradd(dap); 2678 return (dirrem); 2679 } 2680 2681 /* 2682 * Directory entry change dependencies. 2683 * 2684 * Changing an existing directory entry requires that an add operation 2685 * be completed first followed by a deletion. The semantics for the addition 2686 * are identical to the description of adding a new entry above except 2687 * that the rollback is to the old inode number rather than zero. Once 2688 * the addition dependency is completed, the removal is done as described 2689 * in the removal routine above. 2690 */ 2691 2692 /* 2693 * This routine should be called immediately after changing 2694 * a directory entry. The inode's link count should not be 2695 * decremented by the calling procedure -- the soft updates 2696 * code will perform this task when it is safe. 2697 * 2698 * Parameters: 2699 * bp: buffer containing directory block 2700 * dp: inode for the directory being modified 2701 * ip: inode for directory entry being removed 2702 * newinum: new inode number for changed entry 2703 * isrmdir: indicates if doing RMDIR 2704 */ 2705 void 2706 softdep_setup_directory_change(struct buf *bp, struct inode *dp, 2707 struct inode *ip, ino_t newinum, 2708 int isrmdir) 2709 { 2710 int offset; 2711 struct diradd *dap = NULL; 2712 struct dirrem *dirrem, *prevdirrem; 2713 struct pagedep *pagedep; 2714 struct inodedep *inodedep; 2715 2716 offset = blkoff(dp->i_fs, dp->i_offset); 2717 2718 /* 2719 * Whiteouts do not need diradd dependencies. 2720 */ 2721 if (newinum != UFS_WINO) { 2722 dap = kmalloc(sizeof(struct diradd), M_DIRADD, 2723 M_SOFTDEP_FLAGS | M_ZERO); 2724 dap->da_list.wk_type = D_DIRADD; 2725 dap->da_state = DIRCHG | ATTACHED | DEPCOMPLETE; 2726 dap->da_offset = offset; 2727 dap->da_newinum = newinum; 2728 } 2729 2730 /* 2731 * Allocate a new dirrem and ACQUIRE_LOCK. 2732 */ 2733 dirrem = newdirrem(bp, dp, ip, isrmdir, &prevdirrem); 2734 pagedep = dirrem->dm_pagedep; 2735 /* 2736 * The possible values for isrmdir: 2737 * 0 - non-directory file rename 2738 * 1 - directory rename within same directory 2739 * inum - directory rename to new directory of given inode number 2740 * When renaming to a new directory, we are both deleting and 2741 * creating a new directory entry, so the link count on the new 2742 * directory should not change. Thus we do not need the followup 2743 * dirrem which is usually done in handle_workitem_remove. We set 2744 * the DIRCHG flag to tell handle_workitem_remove to skip the 2745 * followup dirrem. 2746 */ 2747 if (isrmdir > 1) 2748 dirrem->dm_state |= DIRCHG; 2749 2750 /* 2751 * Whiteouts have no additional dependencies, 2752 * so just put the dirrem on the correct list. 2753 */ 2754 if (newinum == UFS_WINO) { 2755 if ((dirrem->dm_state & COMPLETE) == 0) { 2756 LIST_INSERT_HEAD(&pagedep->pd_dirremhd, dirrem, 2757 dm_next); 2758 } else { 2759 dirrem->dm_dirinum = pagedep->pd_ino; 2760 add_to_worklist(&dirrem->dm_list); 2761 } 2762 FREE_LOCK(&lk); 2763 return; 2764 } 2765 2766 /* 2767 * If the COMPLETE flag is clear, then there were no active 2768 * entries and we want to roll back to the previous inode until 2769 * the new inode is committed to disk. If the COMPLETE flag is 2770 * set, then we have deleted an entry that never made it to disk. 2771 * If the entry we deleted resulted from a name change, then the old 2772 * inode reference still resides on disk. Any rollback that we do 2773 * needs to be to that old inode (returned to us in prevdirrem). If 2774 * the entry we deleted resulted from a create, then there is 2775 * no entry on the disk, so we want to roll back to zero rather 2776 * than the uncommitted inode. In either of the COMPLETE cases we 2777 * want to immediately free the unwritten and unreferenced inode. 2778 */ 2779 if ((dirrem->dm_state & COMPLETE) == 0) { 2780 dap->da_previous = dirrem; 2781 } else { 2782 if (prevdirrem != NULL) { 2783 dap->da_previous = prevdirrem; 2784 } else { 2785 dap->da_state &= ~DIRCHG; 2786 dap->da_pagedep = pagedep; 2787 } 2788 dirrem->dm_dirinum = pagedep->pd_ino; 2789 add_to_worklist(&dirrem->dm_list); 2790 } 2791 /* 2792 * Link into its inodedep. Put it on the id_bufwait list if the inode 2793 * is not yet written. If it is written, do the post-inode write 2794 * processing to put it on the id_pendinghd list. 2795 */ 2796 if (inodedep_lookup(dp->i_fs, newinum, DEPALLOC, &inodedep) == 0 || 2797 (inodedep->id_state & ALLCOMPLETE) == ALLCOMPLETE) { 2798 dap->da_state |= COMPLETE; 2799 LIST_INSERT_HEAD(&pagedep->pd_pendinghd, dap, da_pdlist); 2800 WORKLIST_INSERT(&inodedep->id_pendinghd, &dap->da_list); 2801 } else { 2802 LIST_INSERT_HEAD(&pagedep->pd_diraddhd[DIRADDHASH(offset)], 2803 dap, da_pdlist); 2804 WORKLIST_INSERT(&inodedep->id_bufwait, &dap->da_list); 2805 } 2806 FREE_LOCK(&lk); 2807 } 2808 2809 /* 2810 * Called whenever the link count on an inode is changed. 2811 * It creates an inode dependency so that the new reference(s) 2812 * to the inode cannot be committed to disk until the updated 2813 * inode has been written. 2814 * 2815 * Parameters: 2816 * ip: the inode with the increased link count 2817 */ 2818 void 2819 softdep_change_linkcnt(struct inode *ip) 2820 { 2821 struct inodedep *inodedep; 2822 2823 ACQUIRE_LOCK(&lk); 2824 (void) inodedep_lookup(ip->i_fs, ip->i_number, DEPALLOC, &inodedep); 2825 if (ip->i_nlink < ip->i_effnlink) { 2826 panic("softdep_change_linkcnt: bad delta"); 2827 } 2828 inodedep->id_nlinkdelta = ip->i_nlink - ip->i_effnlink; 2829 FREE_LOCK(&lk); 2830 } 2831 2832 /* 2833 * This workitem decrements the inode's link count. 2834 * If the link count reaches zero, the file is removed. 2835 */ 2836 static void 2837 handle_workitem_remove(struct dirrem *dirrem) 2838 { 2839 struct inodedep *inodedep; 2840 struct vnode *vp; 2841 struct inode *ip; 2842 ino_t oldinum; 2843 int error; 2844 2845 error = VFS_VGET(dirrem->dm_mnt, NULL, dirrem->dm_oldinum, &vp); 2846 if (error) { 2847 softdep_error("handle_workitem_remove: vget", error); 2848 return; 2849 } 2850 ip = VTOI(vp); 2851 ACQUIRE_LOCK(&lk); 2852 if ((inodedep_lookup(ip->i_fs, dirrem->dm_oldinum, 0, &inodedep)) == 0){ 2853 panic("handle_workitem_remove: lost inodedep"); 2854 } 2855 /* 2856 * Normal file deletion. 2857 */ 2858 if ((dirrem->dm_state & RMDIR) == 0) { 2859 ip->i_nlink--; 2860 ip->i_flag |= IN_CHANGE; 2861 if (ip->i_nlink < ip->i_effnlink) { 2862 panic("handle_workitem_remove: bad file delta"); 2863 } 2864 inodedep->id_nlinkdelta = ip->i_nlink - ip->i_effnlink; 2865 FREE_LOCK(&lk); 2866 vput(vp); 2867 num_dirrem -= 1; 2868 WORKITEM_FREE(dirrem, D_DIRREM); 2869 return; 2870 } 2871 /* 2872 * Directory deletion. Decrement reference count for both the 2873 * just deleted parent directory entry and the reference for ".". 2874 * Next truncate the directory to length zero. When the 2875 * truncation completes, arrange to have the reference count on 2876 * the parent decremented to account for the loss of "..". 2877 */ 2878 ip->i_nlink -= 2; 2879 ip->i_flag |= IN_CHANGE; 2880 if (ip->i_nlink < ip->i_effnlink) { 2881 panic("handle_workitem_remove: bad dir delta"); 2882 } 2883 inodedep->id_nlinkdelta = ip->i_nlink - ip->i_effnlink; 2884 FREE_LOCK(&lk); 2885 if ((error = ffs_truncate(vp, (off_t)0, 0, proc0.p_ucred)) != 0) 2886 softdep_error("handle_workitem_remove: truncate", error); 2887 /* 2888 * Rename a directory to a new parent. Since, we are both deleting 2889 * and creating a new directory entry, the link count on the new 2890 * directory should not change. Thus we skip the followup dirrem. 2891 */ 2892 if (dirrem->dm_state & DIRCHG) { 2893 vput(vp); 2894 num_dirrem -= 1; 2895 WORKITEM_FREE(dirrem, D_DIRREM); 2896 return; 2897 } 2898 /* 2899 * If the inodedep does not exist, then the zero'ed inode has 2900 * been written to disk. If the allocated inode has never been 2901 * written to disk, then the on-disk inode is zero'ed. In either 2902 * case we can remove the file immediately. 2903 */ 2904 ACQUIRE_LOCK(&lk); 2905 dirrem->dm_state = 0; 2906 oldinum = dirrem->dm_oldinum; 2907 dirrem->dm_oldinum = dirrem->dm_dirinum; 2908 if (inodedep_lookup(ip->i_fs, oldinum, 0, &inodedep) == 0 || 2909 check_inode_unwritten(inodedep)) { 2910 FREE_LOCK(&lk); 2911 vput(vp); 2912 handle_workitem_remove(dirrem); 2913 return; 2914 } 2915 WORKLIST_INSERT(&inodedep->id_inowait, &dirrem->dm_list); 2916 FREE_LOCK(&lk); 2917 ip->i_flag |= IN_CHANGE; 2918 ffs_update(vp, 0); 2919 vput(vp); 2920 } 2921 2922 /* 2923 * Inode de-allocation dependencies. 2924 * 2925 * When an inode's link count is reduced to zero, it can be de-allocated. We 2926 * found it convenient to postpone de-allocation until after the inode is 2927 * written to disk with its new link count (zero). At this point, all of the 2928 * on-disk inode's block pointers are nullified and, with careful dependency 2929 * list ordering, all dependencies related to the inode will be satisfied and 2930 * the corresponding dependency structures de-allocated. So, if/when the 2931 * inode is reused, there will be no mixing of old dependencies with new 2932 * ones. This artificial dependency is set up by the block de-allocation 2933 * procedure above (softdep_setup_freeblocks) and completed by the 2934 * following procedure. 2935 */ 2936 static void 2937 handle_workitem_freefile(struct freefile *freefile) 2938 { 2939 struct vnode vp; 2940 struct inode tip; 2941 struct inodedep *idp; 2942 int error; 2943 2944 #ifdef DEBUG 2945 ACQUIRE_LOCK(&lk); 2946 error = inodedep_lookup(freefile->fx_fs, freefile->fx_oldinum, 0, &idp); 2947 FREE_LOCK(&lk); 2948 if (error) 2949 panic("handle_workitem_freefile: inodedep survived"); 2950 #endif 2951 tip.i_devvp = freefile->fx_devvp; 2952 tip.i_dev = freefile->fx_devvp->v_rdev; 2953 tip.i_fs = freefile->fx_fs; 2954 vp.v_data = &tip; 2955 if ((error = ffs_freefile(&vp, freefile->fx_oldinum, freefile->fx_mode)) != 0) 2956 softdep_error("handle_workitem_freefile", error); 2957 WORKITEM_FREE(freefile, D_FREEFILE); 2958 } 2959 2960 /* 2961 * Helper function which unlinks marker element from work list and returns 2962 * the next element on the list. 2963 */ 2964 static __inline struct worklist * 2965 markernext(struct worklist *marker) 2966 { 2967 struct worklist *next; 2968 2969 next = LIST_NEXT(marker, wk_list); 2970 LIST_REMOVE(marker, wk_list); 2971 return next; 2972 } 2973 2974 /* 2975 * checkread, checkwrite 2976 * 2977 * bioops callback - hold io_token 2978 */ 2979 static int 2980 softdep_checkread(struct buf *bp) 2981 { 2982 /* nothing to do, mp lock not needed */ 2983 return(0); 2984 } 2985 2986 /* 2987 * bioops callback - hold io_token 2988 */ 2989 static int 2990 softdep_checkwrite(struct buf *bp) 2991 { 2992 /* nothing to do, mp lock not needed */ 2993 return(0); 2994 } 2995 2996 /* 2997 * Disk writes. 2998 * 2999 * The dependency structures constructed above are most actively used when file 3000 * system blocks are written to disk. No constraints are placed on when a 3001 * block can be written, but unsatisfied update dependencies are made safe by 3002 * modifying (or replacing) the source memory for the duration of the disk 3003 * write. When the disk write completes, the memory block is again brought 3004 * up-to-date. 3005 * 3006 * In-core inode structure reclamation. 3007 * 3008 * Because there are a finite number of "in-core" inode structures, they are 3009 * reused regularly. By transferring all inode-related dependencies to the 3010 * in-memory inode block and indexing them separately (via "inodedep"s), we 3011 * can allow "in-core" inode structures to be reused at any time and avoid 3012 * any increase in contention. 3013 * 3014 * Called just before entering the device driver to initiate a new disk I/O. 3015 * The buffer must be locked, thus, no I/O completion operations can occur 3016 * while we are manipulating its associated dependencies. 3017 * 3018 * bioops callback - hold io_token 3019 * 3020 * Parameters: 3021 * bp: structure describing disk write to occur 3022 */ 3023 static void 3024 softdep_disk_io_initiation(struct buf *bp) 3025 { 3026 struct worklist *wk; 3027 struct worklist marker; 3028 struct indirdep *indirdep; 3029 3030 /* 3031 * We only care about write operations. There should never 3032 * be dependencies for reads. 3033 */ 3034 if (bp->b_cmd == BUF_CMD_READ) 3035 panic("softdep_disk_io_initiation: read"); 3036 3037 ACQUIRE_LOCK(&lk); 3038 marker.wk_type = D_LAST + 1; /* Not a normal workitem */ 3039 3040 /* 3041 * Do any necessary pre-I/O processing. 3042 */ 3043 for (wk = LIST_FIRST(&bp->b_dep); wk; wk = markernext(&marker)) { 3044 LIST_INSERT_AFTER(wk, &marker, wk_list); 3045 3046 switch (wk->wk_type) { 3047 case D_PAGEDEP: 3048 initiate_write_filepage(WK_PAGEDEP(wk), bp); 3049 continue; 3050 3051 case D_INODEDEP: 3052 initiate_write_inodeblock(WK_INODEDEP(wk), bp); 3053 continue; 3054 3055 case D_INDIRDEP: 3056 indirdep = WK_INDIRDEP(wk); 3057 if (indirdep->ir_state & GOINGAWAY) 3058 panic("disk_io_initiation: indirdep gone"); 3059 /* 3060 * If there are no remaining dependencies, this 3061 * will be writing the real pointers, so the 3062 * dependency can be freed. 3063 */ 3064 if (LIST_FIRST(&indirdep->ir_deplisthd) == NULL) { 3065 indirdep->ir_savebp->b_flags |= B_INVAL | B_NOCACHE; 3066 brelse(indirdep->ir_savebp); 3067 /* inline expand WORKLIST_REMOVE(wk); */ 3068 wk->wk_state &= ~ONWORKLIST; 3069 LIST_REMOVE(wk, wk_list); 3070 WORKITEM_FREE(indirdep, D_INDIRDEP); 3071 continue; 3072 } 3073 /* 3074 * Replace up-to-date version with safe version. 3075 */ 3076 indirdep->ir_saveddata = kmalloc(bp->b_bcount, 3077 M_INDIRDEP, 3078 M_SOFTDEP_FLAGS); 3079 ACQUIRE_LOCK(&lk); 3080 indirdep->ir_state &= ~ATTACHED; 3081 indirdep->ir_state |= UNDONE; 3082 bcopy(bp->b_data, indirdep->ir_saveddata, bp->b_bcount); 3083 bcopy(indirdep->ir_savebp->b_data, bp->b_data, 3084 bp->b_bcount); 3085 FREE_LOCK(&lk); 3086 continue; 3087 3088 case D_MKDIR: 3089 case D_BMSAFEMAP: 3090 case D_ALLOCDIRECT: 3091 case D_ALLOCINDIR: 3092 continue; 3093 3094 default: 3095 panic("handle_disk_io_initiation: Unexpected type %s", 3096 TYPENAME(wk->wk_type)); 3097 /* NOTREACHED */ 3098 } 3099 } 3100 FREE_LOCK(&lk); 3101 } 3102 3103 /* 3104 * Called from within the procedure above to deal with unsatisfied 3105 * allocation dependencies in a directory. The buffer must be locked, 3106 * thus, no I/O completion operations can occur while we are 3107 * manipulating its associated dependencies. 3108 */ 3109 static void 3110 initiate_write_filepage(struct pagedep *pagedep, struct buf *bp) 3111 { 3112 struct diradd *dap; 3113 struct direct *ep; 3114 int i; 3115 3116 if (pagedep->pd_state & IOSTARTED) { 3117 /* 3118 * This can only happen if there is a driver that does not 3119 * understand chaining. Here biodone will reissue the call 3120 * to strategy for the incomplete buffers. 3121 */ 3122 kprintf("initiate_write_filepage: already started\n"); 3123 return; 3124 } 3125 pagedep->pd_state |= IOSTARTED; 3126 ACQUIRE_LOCK(&lk); 3127 for (i = 0; i < DAHASHSZ; i++) { 3128 LIST_FOREACH(dap, &pagedep->pd_diraddhd[i], da_pdlist) { 3129 ep = (struct direct *) 3130 ((char *)bp->b_data + dap->da_offset); 3131 if (ep->d_ino != dap->da_newinum) { 3132 panic("%s: dir inum %d != new %"PRId64, 3133 "initiate_write_filepage", 3134 ep->d_ino, dap->da_newinum); 3135 } 3136 if (dap->da_state & DIRCHG) 3137 ep->d_ino = dap->da_previous->dm_oldinum; 3138 else 3139 ep->d_ino = 0; 3140 dap->da_state &= ~ATTACHED; 3141 dap->da_state |= UNDONE; 3142 } 3143 } 3144 FREE_LOCK(&lk); 3145 } 3146 3147 /* 3148 * Called from within the procedure above to deal with unsatisfied 3149 * allocation dependencies in an inodeblock. The buffer must be 3150 * locked, thus, no I/O completion operations can occur while we 3151 * are manipulating its associated dependencies. 3152 * 3153 * Parameters: 3154 * bp: The inode block 3155 */ 3156 static void 3157 initiate_write_inodeblock(struct inodedep *inodedep, struct buf *bp) 3158 { 3159 struct allocdirect *adp, *lastadp; 3160 struct ufs1_dinode *dp; 3161 struct ufs1_dinode *sip; 3162 struct fs *fs; 3163 ufs_lbn_t prevlbn = 0; 3164 int i, deplist; 3165 3166 if (inodedep->id_state & IOSTARTED) 3167 panic("initiate_write_inodeblock: already started"); 3168 inodedep->id_state |= IOSTARTED; 3169 fs = inodedep->id_fs; 3170 dp = (struct ufs1_dinode *)bp->b_data + 3171 ino_to_fsbo(fs, inodedep->id_ino); 3172 /* 3173 * If the bitmap is not yet written, then the allocated 3174 * inode cannot be written to disk. 3175 */ 3176 if ((inodedep->id_state & DEPCOMPLETE) == 0) { 3177 if (inodedep->id_savedino != NULL) 3178 panic("initiate_write_inodeblock: already doing I/O"); 3179 sip = kmalloc(sizeof(struct ufs1_dinode), M_INODEDEP, 3180 M_SOFTDEP_FLAGS); 3181 inodedep->id_savedino = sip; 3182 *inodedep->id_savedino = *dp; 3183 bzero((caddr_t)dp, sizeof(struct ufs1_dinode)); 3184 dp->di_gen = inodedep->id_savedino->di_gen; 3185 return; 3186 } 3187 /* 3188 * If no dependencies, then there is nothing to roll back. 3189 */ 3190 inodedep->id_savedsize = dp->di_size; 3191 if (TAILQ_FIRST(&inodedep->id_inoupdt) == NULL) 3192 return; 3193 /* 3194 * Set the dependencies to busy. 3195 */ 3196 ACQUIRE_LOCK(&lk); 3197 for (deplist = 0, adp = TAILQ_FIRST(&inodedep->id_inoupdt); adp; 3198 adp = TAILQ_NEXT(adp, ad_next)) { 3199 #ifdef DIAGNOSTIC 3200 if (deplist != 0 && prevlbn >= adp->ad_lbn) { 3201 panic("softdep_write_inodeblock: lbn order"); 3202 } 3203 prevlbn = adp->ad_lbn; 3204 if (adp->ad_lbn < UFS_NDADDR && 3205 dp->di_db[adp->ad_lbn] != adp->ad_newblkno) { 3206 panic("%s: direct pointer #%ld mismatch %d != %d", 3207 "softdep_write_inodeblock", adp->ad_lbn, 3208 dp->di_db[adp->ad_lbn], adp->ad_newblkno); 3209 } 3210 if (adp->ad_lbn >= UFS_NDADDR && 3211 dp->di_ib[adp->ad_lbn - UFS_NDADDR] != adp->ad_newblkno) { 3212 panic("%s: indirect pointer #%ld mismatch %d != %d", 3213 "softdep_write_inodeblock", 3214 adp->ad_lbn - UFS_NDADDR, 3215 dp->di_ib[adp->ad_lbn - UFS_NDADDR], 3216 adp->ad_newblkno); 3217 } 3218 deplist |= 1 << adp->ad_lbn; 3219 if ((adp->ad_state & ATTACHED) == 0) { 3220 panic("softdep_write_inodeblock: Unknown state 0x%x", 3221 adp->ad_state); 3222 } 3223 #endif /* DIAGNOSTIC */ 3224 adp->ad_state &= ~ATTACHED; 3225 adp->ad_state |= UNDONE; 3226 } 3227 /* 3228 * The on-disk inode cannot claim to be any larger than the last 3229 * fragment that has been written. Otherwise, the on-disk inode 3230 * might have fragments that were not the last block in the file 3231 * which would corrupt the filesystem. 3232 */ 3233 for (lastadp = NULL, adp = TAILQ_FIRST(&inodedep->id_inoupdt); adp; 3234 lastadp = adp, adp = TAILQ_NEXT(adp, ad_next)) { 3235 if (adp->ad_lbn >= UFS_NDADDR) 3236 break; 3237 dp->di_db[adp->ad_lbn] = adp->ad_oldblkno; 3238 /* keep going until hitting a rollback to a frag */ 3239 if (adp->ad_oldsize == 0 || adp->ad_oldsize == fs->fs_bsize) 3240 continue; 3241 dp->di_size = fs->fs_bsize * adp->ad_lbn + adp->ad_oldsize; 3242 for (i = adp->ad_lbn + 1; i < UFS_NDADDR; i++) { 3243 #ifdef DIAGNOSTIC 3244 if (dp->di_db[i] != 0 && (deplist & (1 << i)) == 0) { 3245 panic("softdep_write_inodeblock: lost dep1"); 3246 } 3247 #endif /* DIAGNOSTIC */ 3248 dp->di_db[i] = 0; 3249 } 3250 for (i = 0; i < UFS_NIADDR; i++) { 3251 #ifdef DIAGNOSTIC 3252 if (dp->di_ib[i] != 0 && 3253 (deplist & ((1 << UFS_NDADDR) << i)) == 0) { 3254 panic("softdep_write_inodeblock: lost dep2"); 3255 } 3256 #endif /* DIAGNOSTIC */ 3257 dp->di_ib[i] = 0; 3258 } 3259 FREE_LOCK(&lk); 3260 return; 3261 } 3262 /* 3263 * If we have zero'ed out the last allocated block of the file, 3264 * roll back the size to the last currently allocated block. 3265 * We know that this last allocated block is a full-sized as 3266 * we already checked for fragments in the loop above. 3267 */ 3268 if (lastadp != NULL && 3269 dp->di_size <= (lastadp->ad_lbn + 1) * fs->fs_bsize) { 3270 for (i = lastadp->ad_lbn; i >= 0; i--) 3271 if (dp->di_db[i] != 0) 3272 break; 3273 dp->di_size = (i + 1) * fs->fs_bsize; 3274 } 3275 /* 3276 * The only dependencies are for indirect blocks. 3277 * 3278 * The file size for indirect block additions is not guaranteed. 3279 * Such a guarantee would be non-trivial to achieve. The conventional 3280 * synchronous write implementation also does not make this guarantee. 3281 * Fsck should catch and fix discrepancies. Arguably, the file size 3282 * can be over-estimated without destroying integrity when the file 3283 * moves into the indirect blocks (i.e., is large). If we want to 3284 * postpone fsck, we are stuck with this argument. 3285 */ 3286 for (; adp; adp = TAILQ_NEXT(adp, ad_next)) 3287 dp->di_ib[adp->ad_lbn - UFS_NDADDR] = 0; 3288 FREE_LOCK(&lk); 3289 } 3290 3291 /* 3292 * This routine is called during the completion interrupt 3293 * service routine for a disk write (from the procedure called 3294 * by the device driver to inform the filesystem caches of 3295 * a request completion). It should be called early in this 3296 * procedure, before the block is made available to other 3297 * processes or other routines are called. 3298 * 3299 * bioops callback - hold io_token 3300 * 3301 * Parameters: 3302 * bp: describes the completed disk write 3303 */ 3304 static void 3305 softdep_disk_write_complete(struct buf *bp) 3306 { 3307 struct worklist *wk; 3308 struct workhead reattach; 3309 struct newblk *newblk; 3310 struct allocindir *aip; 3311 struct allocdirect *adp; 3312 struct indirdep *indirdep; 3313 struct inodedep *inodedep; 3314 struct bmsafemap *bmsafemap; 3315 3316 ACQUIRE_LOCK(&lk); 3317 3318 LIST_INIT(&reattach); 3319 while ((wk = LIST_FIRST(&bp->b_dep)) != NULL) { 3320 WORKLIST_REMOVE(wk); 3321 switch (wk->wk_type) { 3322 3323 case D_PAGEDEP: 3324 if (handle_written_filepage(WK_PAGEDEP(wk), bp)) 3325 WORKLIST_INSERT(&reattach, wk); 3326 continue; 3327 3328 case D_INODEDEP: 3329 if (handle_written_inodeblock(WK_INODEDEP(wk), bp)) 3330 WORKLIST_INSERT(&reattach, wk); 3331 continue; 3332 3333 case D_BMSAFEMAP: 3334 bmsafemap = WK_BMSAFEMAP(wk); 3335 while ((newblk = LIST_FIRST(&bmsafemap->sm_newblkhd))) { 3336 newblk->nb_state |= DEPCOMPLETE; 3337 newblk->nb_bmsafemap = NULL; 3338 LIST_REMOVE(newblk, nb_deps); 3339 } 3340 while ((adp = 3341 LIST_FIRST(&bmsafemap->sm_allocdirecthd))) { 3342 adp->ad_state |= DEPCOMPLETE; 3343 adp->ad_buf = NULL; 3344 LIST_REMOVE(adp, ad_deps); 3345 handle_allocdirect_partdone(adp); 3346 } 3347 while ((aip = 3348 LIST_FIRST(&bmsafemap->sm_allocindirhd))) { 3349 aip->ai_state |= DEPCOMPLETE; 3350 aip->ai_buf = NULL; 3351 LIST_REMOVE(aip, ai_deps); 3352 handle_allocindir_partdone(aip); 3353 } 3354 while ((inodedep = 3355 LIST_FIRST(&bmsafemap->sm_inodedephd)) != NULL) { 3356 inodedep->id_state |= DEPCOMPLETE; 3357 LIST_REMOVE(inodedep, id_deps); 3358 inodedep->id_buf = NULL; 3359 } 3360 WORKITEM_FREE(bmsafemap, D_BMSAFEMAP); 3361 continue; 3362 3363 case D_MKDIR: 3364 handle_written_mkdir(WK_MKDIR(wk), MKDIR_BODY); 3365 continue; 3366 3367 case D_ALLOCDIRECT: 3368 adp = WK_ALLOCDIRECT(wk); 3369 adp->ad_state |= COMPLETE; 3370 handle_allocdirect_partdone(adp); 3371 continue; 3372 3373 case D_ALLOCINDIR: 3374 aip = WK_ALLOCINDIR(wk); 3375 aip->ai_state |= COMPLETE; 3376 handle_allocindir_partdone(aip); 3377 continue; 3378 3379 case D_INDIRDEP: 3380 indirdep = WK_INDIRDEP(wk); 3381 if (indirdep->ir_state & GOINGAWAY) { 3382 panic("disk_write_complete: indirdep gone"); 3383 } 3384 bcopy(indirdep->ir_saveddata, bp->b_data, bp->b_bcount); 3385 kfree(indirdep->ir_saveddata, M_INDIRDEP); 3386 indirdep->ir_saveddata = NULL; 3387 indirdep->ir_state &= ~UNDONE; 3388 indirdep->ir_state |= ATTACHED; 3389 while ((aip = LIST_FIRST(&indirdep->ir_donehd)) != NULL) { 3390 handle_allocindir_partdone(aip); 3391 if (aip == LIST_FIRST(&indirdep->ir_donehd)) { 3392 panic("disk_write_complete: not gone"); 3393 } 3394 } 3395 WORKLIST_INSERT(&reattach, wk); 3396 if ((bp->b_flags & B_DELWRI) == 0) 3397 stat_indir_blk_ptrs++; 3398 bdirty(bp); 3399 continue; 3400 3401 default: 3402 panic("handle_disk_write_complete: Unknown type %s", 3403 TYPENAME(wk->wk_type)); 3404 /* NOTREACHED */ 3405 } 3406 } 3407 /* 3408 * Reattach any requests that must be redone. 3409 */ 3410 while ((wk = LIST_FIRST(&reattach)) != NULL) { 3411 WORKLIST_REMOVE(wk); 3412 WORKLIST_INSERT_BP(bp, wk); 3413 } 3414 3415 FREE_LOCK(&lk); 3416 } 3417 3418 /* 3419 * Called from within softdep_disk_write_complete above. Note that 3420 * this routine is always called from interrupt level with further 3421 * splbio interrupts blocked. 3422 * 3423 * Parameters: 3424 * adp: the completed allocdirect 3425 */ 3426 static void 3427 handle_allocdirect_partdone(struct allocdirect *adp) 3428 { 3429 struct allocdirect *listadp; 3430 struct inodedep *inodedep; 3431 long bsize; 3432 3433 if ((adp->ad_state & ALLCOMPLETE) != ALLCOMPLETE) 3434 return; 3435 if (adp->ad_buf != NULL) 3436 panic("handle_allocdirect_partdone: dangling dep"); 3437 3438 /* 3439 * The on-disk inode cannot claim to be any larger than the last 3440 * fragment that has been written. Otherwise, the on-disk inode 3441 * might have fragments that were not the last block in the file 3442 * which would corrupt the filesystem. Thus, we cannot free any 3443 * allocdirects after one whose ad_oldblkno claims a fragment as 3444 * these blocks must be rolled back to zero before writing the inode. 3445 * We check the currently active set of allocdirects in id_inoupdt. 3446 */ 3447 inodedep = adp->ad_inodedep; 3448 bsize = inodedep->id_fs->fs_bsize; 3449 TAILQ_FOREACH(listadp, &inodedep->id_inoupdt, ad_next) { 3450 /* found our block */ 3451 if (listadp == adp) 3452 break; 3453 /* continue if ad_oldlbn is not a fragment */ 3454 if (listadp->ad_oldsize == 0 || 3455 listadp->ad_oldsize == bsize) 3456 continue; 3457 /* hit a fragment */ 3458 return; 3459 } 3460 /* 3461 * If we have reached the end of the current list without 3462 * finding the just finished dependency, then it must be 3463 * on the future dependency list. Future dependencies cannot 3464 * be freed until they are moved to the current list. 3465 */ 3466 if (listadp == NULL) { 3467 #ifdef DEBUG 3468 TAILQ_FOREACH(listadp, &inodedep->id_newinoupdt, ad_next) 3469 /* found our block */ 3470 if (listadp == adp) 3471 break; 3472 if (listadp == NULL) 3473 panic("handle_allocdirect_partdone: lost dep"); 3474 #endif /* DEBUG */ 3475 return; 3476 } 3477 /* 3478 * If we have found the just finished dependency, then free 3479 * it along with anything that follows it that is complete. 3480 */ 3481 for (; adp; adp = listadp) { 3482 listadp = TAILQ_NEXT(adp, ad_next); 3483 if ((adp->ad_state & ALLCOMPLETE) != ALLCOMPLETE) 3484 return; 3485 free_allocdirect(&inodedep->id_inoupdt, adp, 1); 3486 } 3487 } 3488 3489 /* 3490 * Called from within softdep_disk_write_complete above. Note that 3491 * this routine is always called from interrupt level with further 3492 * splbio interrupts blocked. 3493 * 3494 * Parameters: 3495 * aip: the completed allocindir 3496 */ 3497 static void 3498 handle_allocindir_partdone(struct allocindir *aip) 3499 { 3500 struct indirdep *indirdep; 3501 3502 if ((aip->ai_state & ALLCOMPLETE) != ALLCOMPLETE) 3503 return; 3504 if (aip->ai_buf != NULL) 3505 panic("handle_allocindir_partdone: dangling dependency"); 3506 3507 indirdep = aip->ai_indirdep; 3508 if (indirdep->ir_state & UNDONE) { 3509 LIST_REMOVE(aip, ai_next); 3510 LIST_INSERT_HEAD(&indirdep->ir_donehd, aip, ai_next); 3511 return; 3512 } 3513 ((ufs_daddr_t *)indirdep->ir_savebp->b_data)[aip->ai_offset] = 3514 aip->ai_newblkno; 3515 LIST_REMOVE(aip, ai_next); 3516 if (aip->ai_freefrag != NULL) 3517 add_to_worklist(&aip->ai_freefrag->ff_list); 3518 WORKITEM_FREE(aip, D_ALLOCINDIR); 3519 } 3520 3521 /* 3522 * Called from within softdep_disk_write_complete above to restore 3523 * in-memory inode block contents to their most up-to-date state. Note 3524 * that this routine is always called from interrupt level with further 3525 * splbio interrupts blocked. 3526 * 3527 * Parameters: 3528 * bp: buffer containing the inode block 3529 */ 3530 static int 3531 handle_written_inodeblock(struct inodedep *inodedep, struct buf *bp) 3532 { 3533 struct worklist *wk, *filefree; 3534 struct allocdirect *adp, *nextadp; 3535 struct ufs1_dinode *dp; 3536 int hadchanges; 3537 3538 if ((inodedep->id_state & IOSTARTED) == 0) 3539 panic("handle_written_inodeblock: not started"); 3540 3541 inodedep->id_state &= ~IOSTARTED; 3542 dp = (struct ufs1_dinode *)bp->b_data + 3543 ino_to_fsbo(inodedep->id_fs, inodedep->id_ino); 3544 /* 3545 * If we had to rollback the inode allocation because of 3546 * bitmaps being incomplete, then simply restore it. 3547 * Keep the block dirty so that it will not be reclaimed until 3548 * all associated dependencies have been cleared and the 3549 * corresponding updates written to disk. 3550 */ 3551 if (inodedep->id_savedino != NULL) { 3552 *dp = *inodedep->id_savedino; 3553 kfree(inodedep->id_savedino, M_INODEDEP); 3554 inodedep->id_savedino = NULL; 3555 if ((bp->b_flags & B_DELWRI) == 0) 3556 stat_inode_bitmap++; 3557 bdirty(bp); 3558 return (1); 3559 } 3560 inodedep->id_state |= COMPLETE; 3561 /* 3562 * Roll forward anything that had to be rolled back before 3563 * the inode could be updated. 3564 */ 3565 hadchanges = 0; 3566 for (adp = TAILQ_FIRST(&inodedep->id_inoupdt); adp; adp = nextadp) { 3567 nextadp = TAILQ_NEXT(adp, ad_next); 3568 if (adp->ad_state & ATTACHED) 3569 panic("handle_written_inodeblock: new entry"); 3570 3571 if (adp->ad_lbn < UFS_NDADDR) { 3572 if (dp->di_db[adp->ad_lbn] != adp->ad_oldblkno) { 3573 panic("%s: %s #%ld mismatch %d != %d", 3574 "handle_written_inodeblock", 3575 "direct pointer", adp->ad_lbn, 3576 dp->di_db[adp->ad_lbn], adp->ad_oldblkno); 3577 } 3578 dp->di_db[adp->ad_lbn] = adp->ad_newblkno; 3579 } else { 3580 if (dp->di_ib[adp->ad_lbn - UFS_NDADDR] != 0) { 3581 panic("%s: %s #%ld allocated as %d", 3582 "handle_written_inodeblock", 3583 "indirect pointer", 3584 adp->ad_lbn - UFS_NDADDR, 3585 dp->di_ib[adp->ad_lbn - UFS_NDADDR]); 3586 } 3587 dp->di_ib[adp->ad_lbn - UFS_NDADDR] = adp->ad_newblkno; 3588 } 3589 adp->ad_state &= ~UNDONE; 3590 adp->ad_state |= ATTACHED; 3591 hadchanges = 1; 3592 } 3593 if (hadchanges && (bp->b_flags & B_DELWRI) == 0) 3594 stat_direct_blk_ptrs++; 3595 /* 3596 * Reset the file size to its most up-to-date value. 3597 */ 3598 if (inodedep->id_savedsize == -1) { 3599 panic("handle_written_inodeblock: bad size"); 3600 } 3601 if (dp->di_size != inodedep->id_savedsize) { 3602 dp->di_size = inodedep->id_savedsize; 3603 hadchanges = 1; 3604 } 3605 inodedep->id_savedsize = -1; 3606 /* 3607 * If there were any rollbacks in the inode block, then it must be 3608 * marked dirty so that its will eventually get written back in 3609 * its correct form. 3610 */ 3611 if (hadchanges) 3612 bdirty(bp); 3613 /* 3614 * Process any allocdirects that completed during the update. 3615 */ 3616 if ((adp = TAILQ_FIRST(&inodedep->id_inoupdt)) != NULL) 3617 handle_allocdirect_partdone(adp); 3618 /* 3619 * Process deallocations that were held pending until the 3620 * inode had been written to disk. Freeing of the inode 3621 * is delayed until after all blocks have been freed to 3622 * avoid creation of new <vfsid, inum, lbn> triples 3623 * before the old ones have been deleted. 3624 */ 3625 filefree = NULL; 3626 while ((wk = LIST_FIRST(&inodedep->id_bufwait)) != NULL) { 3627 WORKLIST_REMOVE(wk); 3628 switch (wk->wk_type) { 3629 3630 case D_FREEFILE: 3631 /* 3632 * We defer adding filefree to the worklist until 3633 * all other additions have been made to ensure 3634 * that it will be done after all the old blocks 3635 * have been freed. 3636 */ 3637 if (filefree != NULL) { 3638 panic("handle_written_inodeblock: filefree"); 3639 } 3640 filefree = wk; 3641 continue; 3642 3643 case D_MKDIR: 3644 handle_written_mkdir(WK_MKDIR(wk), MKDIR_PARENT); 3645 continue; 3646 3647 case D_DIRADD: 3648 diradd_inode_written(WK_DIRADD(wk), inodedep); 3649 continue; 3650 3651 case D_FREEBLKS: 3652 wk->wk_state |= COMPLETE; 3653 if ((wk->wk_state & ALLCOMPLETE) != ALLCOMPLETE) 3654 continue; 3655 /* -- fall through -- */ 3656 case D_FREEFRAG: 3657 case D_DIRREM: 3658 add_to_worklist(wk); 3659 continue; 3660 3661 default: 3662 panic("handle_written_inodeblock: Unknown type %s", 3663 TYPENAME(wk->wk_type)); 3664 /* NOTREACHED */ 3665 } 3666 } 3667 if (filefree != NULL) { 3668 if (free_inodedep(inodedep) == 0) { 3669 panic("handle_written_inodeblock: live inodedep"); 3670 } 3671 add_to_worklist(filefree); 3672 return (0); 3673 } 3674 3675 /* 3676 * If no outstanding dependencies, free it. 3677 */ 3678 if (free_inodedep(inodedep) || TAILQ_FIRST(&inodedep->id_inoupdt) == NULL) 3679 return (0); 3680 return (hadchanges); 3681 } 3682 3683 /* 3684 * Process a diradd entry after its dependent inode has been written. 3685 * This routine must be called with splbio interrupts blocked. 3686 */ 3687 static void 3688 diradd_inode_written(struct diradd *dap, struct inodedep *inodedep) 3689 { 3690 struct pagedep *pagedep; 3691 3692 dap->da_state |= COMPLETE; 3693 if ((dap->da_state & ALLCOMPLETE) == ALLCOMPLETE) { 3694 if (dap->da_state & DIRCHG) 3695 pagedep = dap->da_previous->dm_pagedep; 3696 else 3697 pagedep = dap->da_pagedep; 3698 LIST_REMOVE(dap, da_pdlist); 3699 LIST_INSERT_HEAD(&pagedep->pd_pendinghd, dap, da_pdlist); 3700 } 3701 WORKLIST_INSERT(&inodedep->id_pendinghd, &dap->da_list); 3702 } 3703 3704 /* 3705 * Handle the completion of a mkdir dependency. 3706 */ 3707 static void 3708 handle_written_mkdir(struct mkdir *mkdir, int type) 3709 { 3710 struct diradd *dap; 3711 struct pagedep *pagedep; 3712 3713 if (mkdir->md_state != type) { 3714 panic("handle_written_mkdir: bad type"); 3715 } 3716 dap = mkdir->md_diradd; 3717 dap->da_state &= ~type; 3718 if ((dap->da_state & (MKDIR_PARENT | MKDIR_BODY)) == 0) 3719 dap->da_state |= DEPCOMPLETE; 3720 if ((dap->da_state & ALLCOMPLETE) == ALLCOMPLETE) { 3721 if (dap->da_state & DIRCHG) 3722 pagedep = dap->da_previous->dm_pagedep; 3723 else 3724 pagedep = dap->da_pagedep; 3725 LIST_REMOVE(dap, da_pdlist); 3726 LIST_INSERT_HEAD(&pagedep->pd_pendinghd, dap, da_pdlist); 3727 } 3728 LIST_REMOVE(mkdir, md_mkdirs); 3729 WORKITEM_FREE(mkdir, D_MKDIR); 3730 } 3731 3732 /* 3733 * Called from within softdep_disk_write_complete above. 3734 * A write operation was just completed. Removed inodes can 3735 * now be freed and associated block pointers may be committed. 3736 * Note that this routine is always called from interrupt level 3737 * with further splbio interrupts blocked. 3738 * 3739 * Parameters: 3740 * bp: buffer containing the written page 3741 */ 3742 static int 3743 handle_written_filepage(struct pagedep *pagedep, struct buf *bp) 3744 { 3745 struct dirrem *dirrem; 3746 struct diradd *dap, *nextdap; 3747 struct direct *ep; 3748 int i, chgs; 3749 3750 if ((pagedep->pd_state & IOSTARTED) == 0) { 3751 panic("handle_written_filepage: not started"); 3752 } 3753 pagedep->pd_state &= ~IOSTARTED; 3754 /* 3755 * Process any directory removals that have been committed. 3756 */ 3757 while ((dirrem = LIST_FIRST(&pagedep->pd_dirremhd)) != NULL) { 3758 LIST_REMOVE(dirrem, dm_next); 3759 dirrem->dm_dirinum = pagedep->pd_ino; 3760 add_to_worklist(&dirrem->dm_list); 3761 } 3762 /* 3763 * Free any directory additions that have been committed. 3764 */ 3765 while ((dap = LIST_FIRST(&pagedep->pd_pendinghd)) != NULL) 3766 free_diradd(dap); 3767 /* 3768 * Uncommitted directory entries must be restored. 3769 */ 3770 for (chgs = 0, i = 0; i < DAHASHSZ; i++) { 3771 for (dap = LIST_FIRST(&pagedep->pd_diraddhd[i]); dap; 3772 dap = nextdap) { 3773 nextdap = LIST_NEXT(dap, da_pdlist); 3774 if (dap->da_state & ATTACHED) { 3775 panic("handle_written_filepage: attached"); 3776 } 3777 ep = (struct direct *) 3778 ((char *)bp->b_data + dap->da_offset); 3779 ep->d_ino = dap->da_newinum; 3780 dap->da_state &= ~UNDONE; 3781 dap->da_state |= ATTACHED; 3782 chgs = 1; 3783 /* 3784 * If the inode referenced by the directory has 3785 * been written out, then the dependency can be 3786 * moved to the pending list. 3787 */ 3788 if ((dap->da_state & ALLCOMPLETE) == ALLCOMPLETE) { 3789 LIST_REMOVE(dap, da_pdlist); 3790 LIST_INSERT_HEAD(&pagedep->pd_pendinghd, dap, 3791 da_pdlist); 3792 } 3793 } 3794 } 3795 /* 3796 * If there were any rollbacks in the directory, then it must be 3797 * marked dirty so that its will eventually get written back in 3798 * its correct form. 3799 */ 3800 if (chgs) { 3801 if ((bp->b_flags & B_DELWRI) == 0) 3802 stat_dir_entry++; 3803 bdirty(bp); 3804 } 3805 /* 3806 * If no dependencies remain, the pagedep will be freed. 3807 * Otherwise it will remain to update the page before it 3808 * is written back to disk. 3809 */ 3810 if (LIST_FIRST(&pagedep->pd_pendinghd) == NULL) { 3811 for (i = 0; i < DAHASHSZ; i++) 3812 if (LIST_FIRST(&pagedep->pd_diraddhd[i]) != NULL) 3813 break; 3814 if (i == DAHASHSZ) { 3815 LIST_REMOVE(pagedep, pd_hash); 3816 WORKITEM_FREE(pagedep, D_PAGEDEP); 3817 return (0); 3818 } 3819 } 3820 return (1); 3821 } 3822 3823 /* 3824 * Writing back in-core inode structures. 3825 * 3826 * The filesystem only accesses an inode's contents when it occupies an 3827 * "in-core" inode structure. These "in-core" structures are separate from 3828 * the page frames used to cache inode blocks. Only the latter are 3829 * transferred to/from the disk. So, when the updated contents of the 3830 * "in-core" inode structure are copied to the corresponding in-memory inode 3831 * block, the dependencies are also transferred. The following procedure is 3832 * called when copying a dirty "in-core" inode to a cached inode block. 3833 */ 3834 3835 /* 3836 * Called when an inode is loaded from disk. If the effective link count 3837 * differed from the actual link count when it was last flushed, then we 3838 * need to ensure that the correct effective link count is put back. 3839 * 3840 * Parameters: 3841 * ip: the "in_core" copy of the inode 3842 */ 3843 void 3844 softdep_load_inodeblock(struct inode *ip) 3845 { 3846 struct inodedep *inodedep; 3847 3848 /* 3849 * Check for alternate nlink count. 3850 */ 3851 ip->i_effnlink = ip->i_nlink; 3852 ACQUIRE_LOCK(&lk); 3853 if (inodedep_lookup(ip->i_fs, ip->i_number, 0, &inodedep) == 0) { 3854 FREE_LOCK(&lk); 3855 return; 3856 } 3857 ip->i_effnlink -= inodedep->id_nlinkdelta; 3858 FREE_LOCK(&lk); 3859 } 3860 3861 /* 3862 * This routine is called just before the "in-core" inode 3863 * information is to be copied to the in-memory inode block. 3864 * Recall that an inode block contains several inodes. If 3865 * the force flag is set, then the dependencies will be 3866 * cleared so that the update can always be made. Note that 3867 * the buffer is locked when this routine is called, so we 3868 * will never be in the middle of writing the inode block 3869 * to disk. 3870 * 3871 * Parameters: 3872 * ip: the "in_core" copy of the inode 3873 * bp: the buffer containing the inode block 3874 * waitfor: nonzero => update must be allowed 3875 */ 3876 void 3877 softdep_update_inodeblock(struct inode *ip, struct buf *bp, 3878 int waitfor) 3879 { 3880 struct inodedep *inodedep; 3881 struct worklist *wk; 3882 struct buf *ibp; 3883 int error, gotit; 3884 3885 /* 3886 * If the effective link count is not equal to the actual link 3887 * count, then we must track the difference in an inodedep while 3888 * the inode is (potentially) tossed out of the cache. Otherwise, 3889 * if there is no existing inodedep, then there are no dependencies 3890 * to track. 3891 */ 3892 ACQUIRE_LOCK(&lk); 3893 if (inodedep_lookup(ip->i_fs, ip->i_number, 0, &inodedep) == 0) { 3894 FREE_LOCK(&lk); 3895 if (ip->i_effnlink != ip->i_nlink) 3896 panic("softdep_update_inodeblock: bad link count"); 3897 return; 3898 } 3899 if (inodedep->id_nlinkdelta != ip->i_nlink - ip->i_effnlink) { 3900 panic("softdep_update_inodeblock: bad delta"); 3901 } 3902 /* 3903 * Changes have been initiated. Anything depending on these 3904 * changes cannot occur until this inode has been written. 3905 */ 3906 inodedep->id_state &= ~COMPLETE; 3907 if ((inodedep->id_state & ONWORKLIST) == 0) 3908 WORKLIST_INSERT_BP(bp, &inodedep->id_list); 3909 /* 3910 * Any new dependencies associated with the incore inode must 3911 * now be moved to the list associated with the buffer holding 3912 * the in-memory copy of the inode. Once merged process any 3913 * allocdirects that are completed by the merger. 3914 */ 3915 merge_inode_lists(inodedep); 3916 if (TAILQ_FIRST(&inodedep->id_inoupdt) != NULL) 3917 handle_allocdirect_partdone(TAILQ_FIRST(&inodedep->id_inoupdt)); 3918 /* 3919 * Now that the inode has been pushed into the buffer, the 3920 * operations dependent on the inode being written to disk 3921 * can be moved to the id_bufwait so that they will be 3922 * processed when the buffer I/O completes. 3923 */ 3924 while ((wk = LIST_FIRST(&inodedep->id_inowait)) != NULL) { 3925 WORKLIST_REMOVE(wk); 3926 WORKLIST_INSERT(&inodedep->id_bufwait, wk); 3927 } 3928 /* 3929 * Newly allocated inodes cannot be written until the bitmap 3930 * that allocates them have been written (indicated by 3931 * DEPCOMPLETE being set in id_state). If we are doing a 3932 * forced sync (e.g., an fsync on a file), we force the bitmap 3933 * to be written so that the update can be done. 3934 */ 3935 if (waitfor == 0) { 3936 FREE_LOCK(&lk); 3937 return; 3938 } 3939 retry: 3940 if ((inodedep->id_state & DEPCOMPLETE) != 0) { 3941 FREE_LOCK(&lk); 3942 return; 3943 } 3944 gotit = getdirtybuf(&inodedep->id_buf, MNT_WAIT); 3945 if (gotit == 0) { 3946 if (inodedep_lookup(ip->i_fs, ip->i_number, 0, &inodedep) != 0) 3947 goto retry; 3948 FREE_LOCK(&lk); 3949 return; 3950 } 3951 ibp = inodedep->id_buf; 3952 FREE_LOCK(&lk); 3953 if ((error = bwrite(ibp)) != 0) 3954 softdep_error("softdep_update_inodeblock: bwrite", error); 3955 } 3956 3957 /* 3958 * Merge the new inode dependency list (id_newinoupdt) into the old 3959 * inode dependency list (id_inoupdt). This routine must be called 3960 * with splbio interrupts blocked. 3961 */ 3962 static void 3963 merge_inode_lists(struct inodedep *inodedep) 3964 { 3965 struct allocdirect *listadp, *newadp; 3966 3967 newadp = TAILQ_FIRST(&inodedep->id_newinoupdt); 3968 for (listadp = TAILQ_FIRST(&inodedep->id_inoupdt); listadp && newadp;) { 3969 if (listadp->ad_lbn < newadp->ad_lbn) { 3970 listadp = TAILQ_NEXT(listadp, ad_next); 3971 continue; 3972 } 3973 TAILQ_REMOVE(&inodedep->id_newinoupdt, newadp, ad_next); 3974 TAILQ_INSERT_BEFORE(listadp, newadp, ad_next); 3975 if (listadp->ad_lbn == newadp->ad_lbn) { 3976 allocdirect_merge(&inodedep->id_inoupdt, newadp, 3977 listadp); 3978 listadp = newadp; 3979 } 3980 newadp = TAILQ_FIRST(&inodedep->id_newinoupdt); 3981 } 3982 while ((newadp = TAILQ_FIRST(&inodedep->id_newinoupdt)) != NULL) { 3983 TAILQ_REMOVE(&inodedep->id_newinoupdt, newadp, ad_next); 3984 TAILQ_INSERT_TAIL(&inodedep->id_inoupdt, newadp, ad_next); 3985 } 3986 } 3987 3988 /* 3989 * If we are doing an fsync, then we must ensure that any directory 3990 * entries for the inode have been written after the inode gets to disk. 3991 * 3992 * bioops callback - hold io_token 3993 * 3994 * Parameters: 3995 * vp: the "in_core" copy of the inode 3996 */ 3997 static int 3998 softdep_fsync(struct vnode *vp) 3999 { 4000 struct inodedep *inodedep; 4001 struct pagedep *pagedep; 4002 struct worklist *wk; 4003 struct diradd *dap; 4004 struct mount *mnt; 4005 struct vnode *pvp; 4006 struct inode *ip; 4007 struct buf *bp; 4008 struct fs *fs; 4009 int error, flushparent; 4010 ino_t parentino; 4011 ufs_lbn_t lbn; 4012 4013 /* 4014 * Move check from original kernel code, possibly not needed any 4015 * more with the per-mount bioops. 4016 */ 4017 if ((vp->v_mount->mnt_flag & MNT_SOFTDEP) == 0) 4018 return (0); 4019 4020 ip = VTOI(vp); 4021 fs = ip->i_fs; 4022 ACQUIRE_LOCK(&lk); 4023 if (inodedep_lookup(fs, ip->i_number, 0, &inodedep) == 0) { 4024 FREE_LOCK(&lk); 4025 return (0); 4026 } 4027 if (LIST_FIRST(&inodedep->id_inowait) != NULL || 4028 LIST_FIRST(&inodedep->id_bufwait) != NULL || 4029 TAILQ_FIRST(&inodedep->id_inoupdt) != NULL || 4030 TAILQ_FIRST(&inodedep->id_newinoupdt) != NULL) { 4031 panic("softdep_fsync: pending ops"); 4032 } 4033 for (error = 0, flushparent = 0; ; ) { 4034 if ((wk = LIST_FIRST(&inodedep->id_pendinghd)) == NULL) 4035 break; 4036 if (wk->wk_type != D_DIRADD) { 4037 panic("softdep_fsync: Unexpected type %s", 4038 TYPENAME(wk->wk_type)); 4039 } 4040 dap = WK_DIRADD(wk); 4041 /* 4042 * Flush our parent if this directory entry 4043 * has a MKDIR_PARENT dependency. 4044 */ 4045 if (dap->da_state & DIRCHG) 4046 pagedep = dap->da_previous->dm_pagedep; 4047 else 4048 pagedep = dap->da_pagedep; 4049 mnt = pagedep->pd_mnt; 4050 parentino = pagedep->pd_ino; 4051 lbn = pagedep->pd_lbn; 4052 if ((dap->da_state & (MKDIR_BODY | COMPLETE)) != COMPLETE) { 4053 panic("softdep_fsync: dirty"); 4054 } 4055 flushparent = dap->da_state & MKDIR_PARENT; 4056 /* 4057 * If we are being fsync'ed as part of vgone'ing this vnode, 4058 * then we will not be able to release and recover the 4059 * vnode below, so we just have to give up on writing its 4060 * directory entry out. It will eventually be written, just 4061 * not now, but then the user was not asking to have it 4062 * written, so we are not breaking any promises. 4063 */ 4064 if (vp->v_flag & VRECLAIMED) 4065 break; 4066 /* 4067 * We prevent deadlock by always fetching inodes from the 4068 * root, moving down the directory tree. Thus, when fetching 4069 * our parent directory, we must unlock ourselves before 4070 * requesting the lock on our parent. See the comment in 4071 * ufs_lookup for details on possible races. 4072 */ 4073 FREE_LOCK(&lk); 4074 vn_unlock(vp); 4075 error = VFS_VGET(mnt, NULL, parentino, &pvp); 4076 vn_lock(vp, LK_EXCLUSIVE | LK_RETRY); 4077 if (error != 0) { 4078 return (error); 4079 } 4080 if (flushparent) { 4081 if ((error = ffs_update(pvp, 1)) != 0) { 4082 vput(pvp); 4083 return (error); 4084 } 4085 } 4086 /* 4087 * Flush directory page containing the inode's name. 4088 */ 4089 error = bread(pvp, lblktodoff(fs, lbn), blksize(fs, VTOI(pvp), lbn), &bp); 4090 if (error == 0) 4091 error = bwrite(bp); 4092 vput(pvp); 4093 if (error != 0) { 4094 return (error); 4095 } 4096 ACQUIRE_LOCK(&lk); 4097 if (inodedep_lookup(fs, ip->i_number, 0, &inodedep) == 0) 4098 break; 4099 } 4100 FREE_LOCK(&lk); 4101 return (0); 4102 } 4103 4104 /* 4105 * Flush all the dirty bitmaps associated with the block device 4106 * before flushing the rest of the dirty blocks so as to reduce 4107 * the number of dependencies that will have to be rolled back. 4108 */ 4109 static int softdep_fsync_mountdev_bp(struct buf *bp, void *data); 4110 4111 void 4112 softdep_fsync_mountdev(struct vnode *vp) 4113 { 4114 if (!vn_isdisk(vp, NULL)) 4115 panic("softdep_fsync_mountdev: vnode not a disk"); 4116 ACQUIRE_LOCK(&lk); 4117 lwkt_gettoken(&vp->v_token); 4118 RB_SCAN(buf_rb_tree, &vp->v_rbdirty_tree, NULL, 4119 softdep_fsync_mountdev_bp, vp); 4120 lwkt_reltoken(&vp->v_token); 4121 drain_output(vp, 1); 4122 FREE_LOCK(&lk); 4123 } 4124 4125 static int 4126 softdep_fsync_mountdev_bp(struct buf *bp, void *data) 4127 { 4128 struct worklist *wk; 4129 struct vnode *vp = data; 4130 4131 /* 4132 * If it is already scheduled, skip to the next buffer. 4133 */ 4134 if (BUF_LOCK(bp, LK_EXCLUSIVE | LK_NOWAIT)) 4135 return(0); 4136 if (bp->b_vp != vp || (bp->b_flags & B_DELWRI) == 0) { 4137 BUF_UNLOCK(bp); 4138 kprintf("softdep_fsync_mountdev_bp: warning, buffer %p ripped out from under vnode %p\n", bp, vp); 4139 return(0); 4140 } 4141 /* 4142 * We are only interested in bitmaps with outstanding 4143 * dependencies. 4144 */ 4145 if ((wk = LIST_FIRST(&bp->b_dep)) == NULL || 4146 wk->wk_type != D_BMSAFEMAP) { 4147 BUF_UNLOCK(bp); 4148 return(0); 4149 } 4150 bremfree(bp); 4151 FREE_LOCK(&lk); 4152 (void) bawrite(bp); 4153 ACQUIRE_LOCK(&lk); 4154 return(0); 4155 } 4156 4157 /* 4158 * This routine is called when we are trying to synchronously flush a 4159 * file. This routine must eliminate any filesystem metadata dependencies 4160 * so that the syncing routine can succeed by pushing the dirty blocks 4161 * associated with the file. If any I/O errors occur, they are returned. 4162 */ 4163 struct softdep_sync_metadata_info { 4164 struct vnode *vp; 4165 int waitfor; 4166 }; 4167 4168 static int softdep_sync_metadata_bp(struct buf *bp, void *data); 4169 4170 int 4171 softdep_sync_metadata(struct vnode *vp, struct thread *td) 4172 { 4173 struct softdep_sync_metadata_info info; 4174 int error, waitfor; 4175 4176 /* 4177 * Check whether this vnode is involved in a filesystem 4178 * that is doing soft dependency processing. 4179 */ 4180 if (!vn_isdisk(vp, NULL)) { 4181 if (!DOINGSOFTDEP(vp)) 4182 return (0); 4183 } else 4184 if (vp->v_rdev->si_mountpoint == NULL || 4185 (vp->v_rdev->si_mountpoint->mnt_flag & MNT_SOFTDEP) == 0) 4186 return (0); 4187 /* 4188 * Ensure that any direct block dependencies have been cleared. 4189 */ 4190 ACQUIRE_LOCK(&lk); 4191 if ((error = flush_inodedep_deps(VTOI(vp)->i_fs, VTOI(vp)->i_number))) { 4192 FREE_LOCK(&lk); 4193 return (error); 4194 } 4195 /* 4196 * For most files, the only metadata dependencies are the 4197 * cylinder group maps that allocate their inode or blocks. 4198 * The block allocation dependencies can be found by traversing 4199 * the dependency lists for any buffers that remain on their 4200 * dirty buffer list. The inode allocation dependency will 4201 * be resolved when the inode is updated with MNT_WAIT. 4202 * This work is done in two passes. The first pass grabs most 4203 * of the buffers and begins asynchronously writing them. The 4204 * only way to wait for these asynchronous writes is to sleep 4205 * on the filesystem vnode which may stay busy for a long time 4206 * if the filesystem is active. So, instead, we make a second 4207 * pass over the dependencies blocking on each write. In the 4208 * usual case we will be blocking against a write that we 4209 * initiated, so when it is done the dependency will have been 4210 * resolved. Thus the second pass is expected to end quickly. 4211 */ 4212 waitfor = MNT_NOWAIT; 4213 top: 4214 /* 4215 * We must wait for any I/O in progress to finish so that 4216 * all potential buffers on the dirty list will be visible. 4217 */ 4218 drain_output(vp, 1); 4219 4220 info.vp = vp; 4221 info.waitfor = waitfor; 4222 lwkt_gettoken(&vp->v_token); 4223 error = RB_SCAN(buf_rb_tree, &vp->v_rbdirty_tree, NULL, 4224 softdep_sync_metadata_bp, &info); 4225 lwkt_reltoken(&vp->v_token); 4226 if (error < 0) { 4227 FREE_LOCK(&lk); 4228 return(-error); /* error code */ 4229 } 4230 4231 /* 4232 * The brief unlock is to allow any pent up dependency 4233 * processing to be done. Then proceed with the second pass. 4234 */ 4235 if (waitfor & MNT_NOWAIT) { 4236 waitfor = MNT_WAIT; 4237 FREE_LOCK(&lk); 4238 ACQUIRE_LOCK(&lk); 4239 goto top; 4240 } 4241 4242 /* 4243 * If we have managed to get rid of all the dirty buffers, 4244 * then we are done. For certain directories and block 4245 * devices, we may need to do further work. 4246 * 4247 * We must wait for any I/O in progress to finish so that 4248 * all potential buffers on the dirty list will be visible. 4249 */ 4250 drain_output(vp, 1); 4251 if (RB_EMPTY(&vp->v_rbdirty_tree)) { 4252 FREE_LOCK(&lk); 4253 return (0); 4254 } 4255 4256 FREE_LOCK(&lk); 4257 /* 4258 * If we are trying to sync a block device, some of its buffers may 4259 * contain metadata that cannot be written until the contents of some 4260 * partially written files have been written to disk. The only easy 4261 * way to accomplish this is to sync the entire filesystem (luckily 4262 * this happens rarely). 4263 */ 4264 if (vn_isdisk(vp, NULL) && 4265 vp->v_rdev && 4266 vp->v_rdev->si_mountpoint && !vn_islocked(vp) && 4267 (error = VFS_SYNC(vp->v_rdev->si_mountpoint, MNT_WAIT)) != 0) 4268 return (error); 4269 return (0); 4270 } 4271 4272 static int 4273 softdep_sync_metadata_bp(struct buf *bp, void *data) 4274 { 4275 struct softdep_sync_metadata_info *info = data; 4276 struct pagedep *pagedep; 4277 struct allocdirect *adp; 4278 struct allocindir *aip; 4279 struct worklist *wk; 4280 struct buf *nbp; 4281 int error; 4282 int i; 4283 4284 if (getdirtybuf(&bp, MNT_WAIT) == 0) { 4285 kprintf("softdep_sync_metadata_bp(1): caught buf %p going away\n", bp); 4286 return (1); 4287 } 4288 if (bp->b_vp != info->vp || (bp->b_flags & B_DELWRI) == 0) { 4289 kprintf("softdep_sync_metadata_bp(2): caught buf %p going away vp %p\n", bp, info->vp); 4290 BUF_UNLOCK(bp); 4291 return(1); 4292 } 4293 4294 /* 4295 * As we hold the buffer locked, none of its dependencies 4296 * will disappear. 4297 */ 4298 LIST_FOREACH(wk, &bp->b_dep, wk_list) { 4299 switch (wk->wk_type) { 4300 4301 case D_ALLOCDIRECT: 4302 adp = WK_ALLOCDIRECT(wk); 4303 if (adp->ad_state & DEPCOMPLETE) 4304 break; 4305 nbp = adp->ad_buf; 4306 if (getdirtybuf(&nbp, info->waitfor) == 0) 4307 break; 4308 FREE_LOCK(&lk); 4309 if (info->waitfor & MNT_NOWAIT) { 4310 bawrite(nbp); 4311 } else if ((error = bwrite(nbp)) != 0) { 4312 bawrite(bp); 4313 ACQUIRE_LOCK(&lk); 4314 return (-error); 4315 } 4316 ACQUIRE_LOCK(&lk); 4317 break; 4318 4319 case D_ALLOCINDIR: 4320 aip = WK_ALLOCINDIR(wk); 4321 if (aip->ai_state & DEPCOMPLETE) 4322 break; 4323 nbp = aip->ai_buf; 4324 if (getdirtybuf(&nbp, info->waitfor) == 0) 4325 break; 4326 FREE_LOCK(&lk); 4327 if (info->waitfor & MNT_NOWAIT) { 4328 bawrite(nbp); 4329 } else if ((error = bwrite(nbp)) != 0) { 4330 bawrite(bp); 4331 ACQUIRE_LOCK(&lk); 4332 return (-error); 4333 } 4334 ACQUIRE_LOCK(&lk); 4335 break; 4336 4337 case D_INDIRDEP: 4338 restart: 4339 4340 LIST_FOREACH(aip, &WK_INDIRDEP(wk)->ir_deplisthd, ai_next) { 4341 if (aip->ai_state & DEPCOMPLETE) 4342 continue; 4343 nbp = aip->ai_buf; 4344 if (getdirtybuf(&nbp, MNT_WAIT) == 0) 4345 goto restart; 4346 FREE_LOCK(&lk); 4347 if ((error = bwrite(nbp)) != 0) { 4348 bawrite(bp); 4349 ACQUIRE_LOCK(&lk); 4350 return (-error); 4351 } 4352 ACQUIRE_LOCK(&lk); 4353 goto restart; 4354 } 4355 break; 4356 4357 case D_INODEDEP: 4358 if ((error = flush_inodedep_deps(WK_INODEDEP(wk)->id_fs, 4359 WK_INODEDEP(wk)->id_ino)) != 0) { 4360 FREE_LOCK(&lk); 4361 bawrite(bp); 4362 ACQUIRE_LOCK(&lk); 4363 return (-error); 4364 } 4365 break; 4366 4367 case D_PAGEDEP: 4368 /* 4369 * We are trying to sync a directory that may 4370 * have dependencies on both its own metadata 4371 * and/or dependencies on the inodes of any 4372 * recently allocated files. We walk its diradd 4373 * lists pushing out the associated inode. 4374 */ 4375 pagedep = WK_PAGEDEP(wk); 4376 for (i = 0; i < DAHASHSZ; i++) { 4377 if (LIST_FIRST(&pagedep->pd_diraddhd[i]) == NULL) 4378 continue; 4379 if ((error = 4380 flush_pagedep_deps(info->vp, 4381 pagedep->pd_mnt, 4382 &pagedep->pd_diraddhd[i]))) { 4383 FREE_LOCK(&lk); 4384 bawrite(bp); 4385 ACQUIRE_LOCK(&lk); 4386 return (-error); 4387 } 4388 } 4389 break; 4390 4391 case D_MKDIR: 4392 /* 4393 * This case should never happen if the vnode has 4394 * been properly sync'ed. However, if this function 4395 * is used at a place where the vnode has not yet 4396 * been sync'ed, this dependency can show up. So, 4397 * rather than panic, just flush it. 4398 */ 4399 nbp = WK_MKDIR(wk)->md_buf; 4400 if (getdirtybuf(&nbp, info->waitfor) == 0) 4401 break; 4402 FREE_LOCK(&lk); 4403 if (info->waitfor & MNT_NOWAIT) { 4404 bawrite(nbp); 4405 } else if ((error = bwrite(nbp)) != 0) { 4406 bawrite(bp); 4407 ACQUIRE_LOCK(&lk); 4408 return (-error); 4409 } 4410 ACQUIRE_LOCK(&lk); 4411 break; 4412 4413 case D_BMSAFEMAP: 4414 /* 4415 * This case should never happen if the vnode has 4416 * been properly sync'ed. However, if this function 4417 * is used at a place where the vnode has not yet 4418 * been sync'ed, this dependency can show up. So, 4419 * rather than panic, just flush it. 4420 * 4421 * nbp can wind up == bp if a device node for the 4422 * same filesystem is being fsynced at the same time, 4423 * leading to a panic if we don't catch the case. 4424 */ 4425 nbp = WK_BMSAFEMAP(wk)->sm_buf; 4426 if (nbp == bp) 4427 break; 4428 if (getdirtybuf(&nbp, info->waitfor) == 0) 4429 break; 4430 FREE_LOCK(&lk); 4431 if (info->waitfor & MNT_NOWAIT) { 4432 bawrite(nbp); 4433 } else if ((error = bwrite(nbp)) != 0) { 4434 bawrite(bp); 4435 ACQUIRE_LOCK(&lk); 4436 return (-error); 4437 } 4438 ACQUIRE_LOCK(&lk); 4439 break; 4440 4441 default: 4442 panic("softdep_sync_metadata: Unknown type %s", 4443 TYPENAME(wk->wk_type)); 4444 /* NOTREACHED */ 4445 } 4446 } 4447 FREE_LOCK(&lk); 4448 bawrite(bp); 4449 ACQUIRE_LOCK(&lk); 4450 return(0); 4451 } 4452 4453 /* 4454 * Flush the dependencies associated with an inodedep. 4455 * Called with splbio blocked. 4456 */ 4457 static int 4458 flush_inodedep_deps(struct fs *fs, ino_t ino) 4459 { 4460 struct inodedep *inodedep; 4461 struct allocdirect *adp; 4462 int error, waitfor; 4463 struct buf *bp; 4464 4465 /* 4466 * This work is done in two passes. The first pass grabs most 4467 * of the buffers and begins asynchronously writing them. The 4468 * only way to wait for these asynchronous writes is to sleep 4469 * on the filesystem vnode which may stay busy for a long time 4470 * if the filesystem is active. So, instead, we make a second 4471 * pass over the dependencies blocking on each write. In the 4472 * usual case we will be blocking against a write that we 4473 * initiated, so when it is done the dependency will have been 4474 * resolved. Thus the second pass is expected to end quickly. 4475 * We give a brief window at the top of the loop to allow 4476 * any pending I/O to complete. 4477 */ 4478 for (waitfor = MNT_NOWAIT; ; ) { 4479 FREE_LOCK(&lk); 4480 ACQUIRE_LOCK(&lk); 4481 if (inodedep_lookup(fs, ino, 0, &inodedep) == 0) 4482 return (0); 4483 TAILQ_FOREACH(adp, &inodedep->id_inoupdt, ad_next) { 4484 if (adp->ad_state & DEPCOMPLETE) 4485 continue; 4486 bp = adp->ad_buf; 4487 if (getdirtybuf(&bp, waitfor) == 0) { 4488 if (waitfor & MNT_NOWAIT) 4489 continue; 4490 break; 4491 } 4492 FREE_LOCK(&lk); 4493 if (waitfor & MNT_NOWAIT) { 4494 bawrite(bp); 4495 } else if ((error = bwrite(bp)) != 0) { 4496 ACQUIRE_LOCK(&lk); 4497 return (error); 4498 } 4499 ACQUIRE_LOCK(&lk); 4500 break; 4501 } 4502 if (adp != NULL) 4503 continue; 4504 TAILQ_FOREACH(adp, &inodedep->id_newinoupdt, ad_next) { 4505 if (adp->ad_state & DEPCOMPLETE) 4506 continue; 4507 bp = adp->ad_buf; 4508 if (getdirtybuf(&bp, waitfor) == 0) { 4509 if (waitfor & MNT_NOWAIT) 4510 continue; 4511 break; 4512 } 4513 FREE_LOCK(&lk); 4514 if (waitfor & MNT_NOWAIT) { 4515 bawrite(bp); 4516 } else if ((error = bwrite(bp)) != 0) { 4517 ACQUIRE_LOCK(&lk); 4518 return (error); 4519 } 4520 ACQUIRE_LOCK(&lk); 4521 break; 4522 } 4523 if (adp != NULL) 4524 continue; 4525 /* 4526 * If pass2, we are done, otherwise do pass 2. 4527 */ 4528 if (waitfor == MNT_WAIT) 4529 break; 4530 waitfor = MNT_WAIT; 4531 } 4532 /* 4533 * Try freeing inodedep in case all dependencies have been removed. 4534 */ 4535 if (inodedep_lookup(fs, ino, 0, &inodedep) != 0) 4536 (void) free_inodedep(inodedep); 4537 return (0); 4538 } 4539 4540 /* 4541 * Eliminate a pagedep dependency by flushing out all its diradd dependencies. 4542 * Called with splbio blocked. 4543 */ 4544 static int 4545 flush_pagedep_deps(struct vnode *pvp, struct mount *mp, 4546 struct diraddhd *diraddhdp) 4547 { 4548 struct inodedep *inodedep; 4549 struct ufsmount *ump; 4550 struct diradd *dap; 4551 struct worklist *wk; 4552 struct vnode *vp; 4553 int gotit, error = 0; 4554 struct buf *bp; 4555 ino_t inum; 4556 4557 ump = VFSTOUFS(mp); 4558 while ((dap = LIST_FIRST(diraddhdp)) != NULL) { 4559 /* 4560 * Flush ourselves if this directory entry 4561 * has a MKDIR_PARENT dependency. 4562 */ 4563 if (dap->da_state & MKDIR_PARENT) { 4564 FREE_LOCK(&lk); 4565 if ((error = ffs_update(pvp, 1)) != 0) 4566 break; 4567 ACQUIRE_LOCK(&lk); 4568 /* 4569 * If that cleared dependencies, go on to next. 4570 */ 4571 if (dap != LIST_FIRST(diraddhdp)) 4572 continue; 4573 if (dap->da_state & MKDIR_PARENT) { 4574 panic("flush_pagedep_deps: MKDIR_PARENT"); 4575 } 4576 } 4577 /* 4578 * A newly allocated directory must have its "." and 4579 * ".." entries written out before its name can be 4580 * committed in its parent. We do not want or need 4581 * the full semantics of a synchronous VOP_FSYNC as 4582 * that may end up here again, once for each directory 4583 * level in the filesystem. Instead, we push the blocks 4584 * and wait for them to clear. We have to fsync twice 4585 * because the first call may choose to defer blocks 4586 * that still have dependencies, but deferral will 4587 * happen at most once. 4588 */ 4589 inum = dap->da_newinum; 4590 if (dap->da_state & MKDIR_BODY) { 4591 FREE_LOCK(&lk); 4592 if ((error = VFS_VGET(mp, NULL, inum, &vp)) != 0) 4593 break; 4594 if ((error=VOP_FSYNC(vp, MNT_NOWAIT, 0)) || 4595 (error=VOP_FSYNC(vp, MNT_NOWAIT, 0))) { 4596 vput(vp); 4597 break; 4598 } 4599 drain_output(vp, 0); 4600 /* 4601 * If first block is still dirty with a D_MKDIR 4602 * dependency then it needs to be written now. 4603 */ 4604 error = 0; 4605 ACQUIRE_LOCK(&lk); 4606 bp = findblk(vp, 0, FINDBLK_TEST); 4607 if (bp == NULL) { 4608 FREE_LOCK(&lk); 4609 goto mkdir_body_continue; 4610 } 4611 LIST_FOREACH(wk, &bp->b_dep, wk_list) 4612 if (wk->wk_type == D_MKDIR) { 4613 gotit = getdirtybuf(&bp, MNT_WAIT); 4614 FREE_LOCK(&lk); 4615 if (gotit && (error = bwrite(bp)) != 0) 4616 goto mkdir_body_continue; 4617 break; 4618 } 4619 if (wk == NULL) 4620 FREE_LOCK(&lk); 4621 mkdir_body_continue: 4622 vput(vp); 4623 /* Flushing of first block failed. */ 4624 if (error) 4625 break; 4626 ACQUIRE_LOCK(&lk); 4627 /* 4628 * If that cleared dependencies, go on to next. 4629 */ 4630 if (dap != LIST_FIRST(diraddhdp)) 4631 continue; 4632 if (dap->da_state & MKDIR_BODY) { 4633 panic("flush_pagedep_deps: %p MKDIR_BODY", dap); 4634 } 4635 } 4636 /* 4637 * Flush the inode on which the directory entry depends. 4638 * Having accounted for MKDIR_PARENT and MKDIR_BODY above, 4639 * the only remaining dependency is that the updated inode 4640 * count must get pushed to disk. The inode has already 4641 * been pushed into its inode buffer (via VOP_UPDATE) at 4642 * the time of the reference count change. So we need only 4643 * locate that buffer, ensure that there will be no rollback 4644 * caused by a bitmap dependency, then write the inode buffer. 4645 */ 4646 retry_lookup: 4647 if (inodedep_lookup(ump->um_fs, inum, 0, &inodedep) == 0) { 4648 panic("flush_pagedep_deps: lost inode"); 4649 } 4650 /* 4651 * If the inode still has bitmap dependencies, 4652 * push them to disk. 4653 */ 4654 if ((inodedep->id_state & DEPCOMPLETE) == 0) { 4655 gotit = getdirtybuf(&inodedep->id_buf, MNT_WAIT); 4656 if (gotit == 0) 4657 goto retry_lookup; 4658 FREE_LOCK(&lk); 4659 if (gotit && (error = bwrite(inodedep->id_buf)) != 0) 4660 break; 4661 ACQUIRE_LOCK(&lk); 4662 if (dap != LIST_FIRST(diraddhdp)) 4663 continue; 4664 } 4665 /* 4666 * If the inode is still sitting in a buffer waiting 4667 * to be written, push it to disk. 4668 */ 4669 FREE_LOCK(&lk); 4670 if ((error = bread(ump->um_devvp, 4671 fsbtodoff(ump->um_fs, ino_to_fsba(ump->um_fs, inum)), 4672 (int)ump->um_fs->fs_bsize, &bp)) != 0) 4673 break; 4674 if ((error = bwrite(bp)) != 0) 4675 break; 4676 ACQUIRE_LOCK(&lk); 4677 /* 4678 * If we have failed to get rid of all the dependencies 4679 * then something is seriously wrong. 4680 */ 4681 if (dap == LIST_FIRST(diraddhdp)) { 4682 panic("flush_pagedep_deps: flush failed"); 4683 } 4684 } 4685 if (error) 4686 ACQUIRE_LOCK(&lk); 4687 return (error); 4688 } 4689 4690 /* 4691 * A large burst of file addition or deletion activity can drive the 4692 * memory load excessively high. First attempt to slow things down 4693 * using the techniques below. If that fails, this routine requests 4694 * the offending operations to fall back to running synchronously 4695 * until the memory load returns to a reasonable level. 4696 */ 4697 int 4698 softdep_slowdown(struct vnode *vp) 4699 { 4700 int max_softdeps_hard; 4701 4702 max_softdeps_hard = max_softdeps * 11 / 10; 4703 if (num_dirrem < max_softdeps_hard / 2 && 4704 num_inodedep < max_softdeps_hard) 4705 return (0); 4706 stat_sync_limit_hit += 1; 4707 return (1); 4708 } 4709 4710 /* 4711 * If memory utilization has gotten too high, deliberately slow things 4712 * down and speed up the I/O processing. 4713 */ 4714 static int 4715 request_cleanup(int resource) 4716 { 4717 struct thread *td = curthread; /* XXX */ 4718 4719 KKASSERT(lock_held(&lk)); 4720 4721 /* 4722 * We never hold up the filesystem syncer process. 4723 */ 4724 if (td == filesys_syncer) 4725 return (0); 4726 /* 4727 * First check to see if the work list has gotten backlogged. 4728 * If it has, co-opt this process to help clean up two entries. 4729 * Because this process may hold inodes locked, we cannot 4730 * handle any remove requests that might block on a locked 4731 * inode as that could lead to deadlock. 4732 */ 4733 if (num_on_worklist > max_softdeps / 10) { 4734 process_worklist_item(NULL, LK_NOWAIT); 4735 process_worklist_item(NULL, LK_NOWAIT); 4736 stat_worklist_push += 2; 4737 return(1); 4738 } 4739 4740 /* 4741 * If we are resource constrained on inode dependencies, try 4742 * flushing some dirty inodes. Otherwise, we are constrained 4743 * by file deletions, so try accelerating flushes of directories 4744 * with removal dependencies. We would like to do the cleanup 4745 * here, but we probably hold an inode locked at this point and 4746 * that might deadlock against one that we try to clean. So, 4747 * the best that we can do is request the syncer daemon to do 4748 * the cleanup for us. 4749 */ 4750 switch (resource) { 4751 4752 case FLUSH_INODES: 4753 stat_ino_limit_push += 1; 4754 req_clear_inodedeps += 1; 4755 stat_countp = &stat_ino_limit_hit; 4756 break; 4757 4758 case FLUSH_REMOVE: 4759 stat_blk_limit_push += 1; 4760 req_clear_remove += 1; 4761 stat_countp = &stat_blk_limit_hit; 4762 break; 4763 4764 default: 4765 panic("request_cleanup: unknown type"); 4766 } 4767 /* 4768 * Hopefully the syncer daemon will catch up and awaken us. 4769 * We wait at most tickdelay before proceeding in any case. 4770 */ 4771 lksleep(&proc_waiting, &lk, 0, "softupdate", 4772 tickdelay > 2 ? tickdelay : 2); 4773 return (1); 4774 } 4775 4776 /* 4777 * Flush out a directory with at least one removal dependency in an effort to 4778 * reduce the number of dirrem, freefile, and freeblks dependency structures. 4779 */ 4780 static void 4781 clear_remove(struct thread *td) 4782 { 4783 struct pagedep_hashhead *pagedephd; 4784 struct pagedep *pagedep; 4785 static int next = 0; 4786 struct mount *mp; 4787 struct vnode *vp; 4788 int error, cnt; 4789 ino_t ino; 4790 4791 ACQUIRE_LOCK(&lk); 4792 for (cnt = 0; cnt < pagedep_hash; cnt++) { 4793 pagedephd = &pagedep_hashtbl[next++]; 4794 if (next >= pagedep_hash) 4795 next = 0; 4796 LIST_FOREACH(pagedep, pagedephd, pd_hash) { 4797 if (LIST_FIRST(&pagedep->pd_dirremhd) == NULL) 4798 continue; 4799 mp = pagedep->pd_mnt; 4800 ino = pagedep->pd_ino; 4801 FREE_LOCK(&lk); 4802 if ((error = VFS_VGET(mp, NULL, ino, &vp)) != 0) { 4803 softdep_error("clear_remove: vget", error); 4804 return; 4805 } 4806 if ((error = VOP_FSYNC(vp, MNT_NOWAIT, 0))) 4807 softdep_error("clear_remove: fsync", error); 4808 drain_output(vp, 0); 4809 vput(vp); 4810 return; 4811 } 4812 } 4813 FREE_LOCK(&lk); 4814 } 4815 4816 /* 4817 * Clear out a block of dirty inodes in an effort to reduce 4818 * the number of inodedep dependency structures. 4819 */ 4820 struct clear_inodedeps_info { 4821 struct fs *fs; 4822 struct mount *mp; 4823 }; 4824 4825 static int 4826 clear_inodedeps_mountlist_callback(struct mount *mp, void *data) 4827 { 4828 struct clear_inodedeps_info *info = data; 4829 4830 if ((mp->mnt_flag & MNT_SOFTDEP) && info->fs == VFSTOUFS(mp)->um_fs) { 4831 info->mp = mp; 4832 return(-1); 4833 } 4834 return(0); 4835 } 4836 4837 static void 4838 clear_inodedeps(struct thread *td) 4839 { 4840 struct clear_inodedeps_info info; 4841 struct inodedep_hashhead *inodedephd; 4842 struct inodedep *inodedep; 4843 static int next = 0; 4844 struct vnode *vp; 4845 struct fs *fs; 4846 int error, cnt; 4847 ino_t firstino, lastino, ino; 4848 4849 ACQUIRE_LOCK(&lk); 4850 /* 4851 * Pick a random inode dependency to be cleared. 4852 * We will then gather up all the inodes in its block 4853 * that have dependencies and flush them out. 4854 */ 4855 inodedep = NULL; /* avoid gcc warnings */ 4856 for (cnt = 0; cnt < inodedep_hash; cnt++) { 4857 inodedephd = &inodedep_hashtbl[next++]; 4858 if (next >= inodedep_hash) 4859 next = 0; 4860 if ((inodedep = LIST_FIRST(inodedephd)) != NULL) 4861 break; 4862 } 4863 if (inodedep == NULL) { 4864 FREE_LOCK(&lk); 4865 return; 4866 } 4867 /* 4868 * Ugly code to find mount point given pointer to superblock. 4869 */ 4870 fs = inodedep->id_fs; 4871 info.mp = NULL; 4872 info.fs = fs; 4873 mountlist_scan(clear_inodedeps_mountlist_callback, 4874 &info, MNTSCAN_FORWARD|MNTSCAN_NOBUSY); 4875 /* 4876 * Find the last inode in the block with dependencies. 4877 */ 4878 firstino = rounddown2(inodedep->id_ino, INOPB(fs)); 4879 for (lastino = firstino + INOPB(fs) - 1; lastino > firstino; lastino--) 4880 if (inodedep_lookup(fs, lastino, 0, &inodedep) != 0) 4881 break; 4882 /* 4883 * Asynchronously push all but the last inode with dependencies. 4884 * Synchronously push the last inode with dependencies to ensure 4885 * that the inode block gets written to free up the inodedeps. 4886 */ 4887 for (ino = firstino; ino <= lastino; ino++) { 4888 if (inodedep_lookup(fs, ino, 0, &inodedep) == 0) 4889 continue; 4890 FREE_LOCK(&lk); 4891 if ((error = VFS_VGET(info.mp, NULL, ino, &vp)) != 0) { 4892 softdep_error("clear_inodedeps: vget", error); 4893 return; 4894 } 4895 if (ino == lastino) { 4896 if ((error = VOP_FSYNC(vp, MNT_WAIT, 0))) 4897 softdep_error("clear_inodedeps: fsync1", error); 4898 } else { 4899 if ((error = VOP_FSYNC(vp, MNT_NOWAIT, 0))) 4900 softdep_error("clear_inodedeps: fsync2", error); 4901 drain_output(vp, 0); 4902 } 4903 vput(vp); 4904 ACQUIRE_LOCK(&lk); 4905 } 4906 FREE_LOCK(&lk); 4907 } 4908 4909 /* 4910 * Function to determine if the buffer has outstanding dependencies 4911 * that will cause a roll-back if the buffer is written. If wantcount 4912 * is set, return number of dependencies, otherwise just yes or no. 4913 * 4914 * bioops callback - hold io_token 4915 */ 4916 static int 4917 softdep_count_dependencies(struct buf *bp, int wantcount) 4918 { 4919 struct worklist *wk; 4920 struct inodedep *inodedep; 4921 struct indirdep *indirdep; 4922 struct allocindir *aip; 4923 struct pagedep *pagedep; 4924 struct diradd *dap; 4925 int i, retval; 4926 4927 retval = 0; 4928 ACQUIRE_LOCK(&lk); 4929 4930 LIST_FOREACH(wk, &bp->b_dep, wk_list) { 4931 switch (wk->wk_type) { 4932 4933 case D_INODEDEP: 4934 inodedep = WK_INODEDEP(wk); 4935 if ((inodedep->id_state & DEPCOMPLETE) == 0) { 4936 /* bitmap allocation dependency */ 4937 retval += 1; 4938 if (!wantcount) 4939 goto out; 4940 } 4941 if (TAILQ_FIRST(&inodedep->id_inoupdt)) { 4942 /* direct block pointer dependency */ 4943 retval += 1; 4944 if (!wantcount) 4945 goto out; 4946 } 4947 continue; 4948 4949 case D_INDIRDEP: 4950 indirdep = WK_INDIRDEP(wk); 4951 4952 LIST_FOREACH(aip, &indirdep->ir_deplisthd, ai_next) { 4953 /* indirect block pointer dependency */ 4954 retval += 1; 4955 if (!wantcount) 4956 goto out; 4957 } 4958 continue; 4959 4960 case D_PAGEDEP: 4961 pagedep = WK_PAGEDEP(wk); 4962 for (i = 0; i < DAHASHSZ; i++) { 4963 4964 LIST_FOREACH(dap, &pagedep->pd_diraddhd[i], da_pdlist) { 4965 /* directory entry dependency */ 4966 retval += 1; 4967 if (!wantcount) 4968 goto out; 4969 } 4970 } 4971 continue; 4972 4973 case D_BMSAFEMAP: 4974 case D_ALLOCDIRECT: 4975 case D_ALLOCINDIR: 4976 case D_MKDIR: 4977 /* never a dependency on these blocks */ 4978 continue; 4979 4980 default: 4981 panic("softdep_check_for_rollback: Unexpected type %s", 4982 TYPENAME(wk->wk_type)); 4983 /* NOTREACHED */ 4984 } 4985 } 4986 out: 4987 FREE_LOCK(&lk); 4988 4989 return retval; 4990 } 4991 4992 /* 4993 * Acquire exclusive access to a buffer. Requires softdep lock 4994 * to be held on entry. If waitfor is MNT_WAIT, may release/reacquire 4995 * softdep lock. 4996 * 4997 * Returns 1 if the buffer was locked, 0 if it was not locked or 4998 * if we had to block. 4999 * 5000 * NOTE! In order to return 1 we must acquire the buffer lock prior 5001 * to any release of &lk. Once we release &lk it's all over. 5002 * We may still have to block on the (type-stable) bp in that 5003 * case, but we must then unlock it and return 0. 5004 */ 5005 static int 5006 getdirtybuf(struct buf **bpp, int waitfor) 5007 { 5008 struct buf *bp; 5009 int error; 5010 5011 /* 5012 * If the contents of *bpp is NULL the caller presumably lost a race. 5013 */ 5014 bp = *bpp; 5015 if (bp == NULL) 5016 return (0); 5017 5018 /* 5019 * Try to obtain the buffer lock without deadlocking on &lk. 5020 */ 5021 KKASSERT(lock_held(&lk)); 5022 error = BUF_LOCK(bp, LK_EXCLUSIVE | LK_NOWAIT); 5023 if (error == 0) { 5024 /* 5025 * If the buffer is no longer dirty the OS already wrote it 5026 * out, return failure. 5027 */ 5028 if ((bp->b_flags & B_DELWRI) == 0) { 5029 BUF_UNLOCK(bp); 5030 return (0); 5031 } 5032 5033 /* 5034 * Finish nominal buffer locking sequence return success. 5035 * 5036 * Since we are not using a normal getblk(), and UFS 5037 * isn't KVABIO aware, we must make sure that the bp 5038 * is synchronized before returning it. 5039 */ 5040 bremfree(bp); 5041 bkvasync_all(bp); 5042 return (1); 5043 } 5044 5045 /* 5046 * Failure case. 5047 * 5048 * If we are not being asked to wait, return 0 immediately. 5049 */ 5050 if (waitfor != MNT_WAIT) 5051 return (0); 5052 5053 /* 5054 * Once we release the softdep lock we can never return success, 5055 * but we still have to block on the type-stable buf for the caller 5056 * to be able to retry without livelocking the system. 5057 * 5058 * The caller will normally retry in this case. 5059 */ 5060 FREE_LOCK(&lk); 5061 error = BUF_LOCK(bp, LK_EXCLUSIVE | LK_SLEEPFAIL); 5062 ACQUIRE_LOCK(&lk); 5063 if (error == 0) 5064 BUF_UNLOCK(bp); 5065 return (0); 5066 } 5067 5068 /* 5069 * Wait for pending output on a vnode to complete. 5070 * Must be called with vnode locked. 5071 */ 5072 static void 5073 drain_output(struct vnode *vp, int islocked) 5074 { 5075 5076 if (!islocked) 5077 ACQUIRE_LOCK(&lk); 5078 while (bio_track_active(&vp->v_track_write)) { 5079 FREE_LOCK(&lk); 5080 bio_track_wait(&vp->v_track_write, 0, 0); 5081 ACQUIRE_LOCK(&lk); 5082 } 5083 if (!islocked) 5084 FREE_LOCK(&lk); 5085 } 5086 5087 /* 5088 * Called whenever a buffer that is being invalidated or reallocated 5089 * contains dependencies. This should only happen if an I/O error has 5090 * occurred. The routine is called with the buffer locked. 5091 * 5092 * bioops callback - hold io_token 5093 */ 5094 static void 5095 softdep_deallocate_dependencies(struct buf *bp) 5096 { 5097 /* nothing to do, mp lock not needed */ 5098 if ((bp->b_flags & B_ERROR) == 0) 5099 panic("softdep_deallocate_dependencies: dangling deps"); 5100 softdep_error(bp->b_vp->v_mount->mnt_stat.f_mntfromname, bp->b_error); 5101 panic("softdep_deallocate_dependencies: unrecovered I/O error"); 5102 } 5103 5104 /* 5105 * Function to handle asynchronous write errors in the filesystem. 5106 */ 5107 void 5108 softdep_error(char *func, int error) 5109 { 5110 /* XXX should do something better! */ 5111 kprintf("%s: got error %d while accessing filesystem\n", func, error); 5112 } 5113