1 /* 2 * Copyright 1998, 2000 Marshall Kirk McKusick. All Rights Reserved. 3 * 4 * The soft updates code is derived from the appendix of a University 5 * of Michigan technical report (Gregory R. Ganger and Yale N. Patt, 6 * "Soft Updates: A Solution to the Metadata Update Problem in File 7 * Systems", CSE-TR-254-95, August 1995). 8 * 9 * Further information about soft updates can be obtained from: 10 * 11 * Marshall Kirk McKusick http://www.mckusick.com/softdep/ 12 * 1614 Oxford Street mckusick@mckusick.com 13 * Berkeley, CA 94709-1608 +1-510-843-9542 14 * USA 15 * 16 * Redistribution and use in source and binary forms, with or without 17 * modification, are permitted provided that the following conditions 18 * are met: 19 * 20 * 1. Redistributions of source code must retain the above copyright 21 * notice, this list of conditions and the following disclaimer. 22 * 2. Redistributions in binary form must reproduce the above copyright 23 * notice, this list of conditions and the following disclaimer in the 24 * documentation and/or other materials provided with the distribution. 25 * 26 * THIS SOFTWARE IS PROVIDED BY MARSHALL KIRK MCKUSICK ``AS IS'' AND ANY 27 * EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED 28 * WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE 29 * DISCLAIMED. IN NO EVENT SHALL MARSHALL KIRK MCKUSICK BE LIABLE FOR 30 * ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL 31 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS 32 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) 33 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT 34 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY 35 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF 36 * SUCH DAMAGE. 37 * 38 * from: @(#)ffs_softdep.c 9.59 (McKusick) 6/21/00 39 * $FreeBSD: src/sys/ufs/ffs/ffs_softdep.c,v 1.57.2.11 2002/02/05 18:46:53 dillon Exp $ 40 * $DragonFly: src/sys/vfs/ufs/ffs_softdep.c,v 1.31 2005/11/16 17:55:22 dillon Exp $ 41 */ 42 43 /* 44 * For now we want the safety net that the DIAGNOSTIC and DEBUG flags provide. 45 */ 46 #ifndef DIAGNOSTIC 47 #define DIAGNOSTIC 48 #endif 49 #ifndef DEBUG 50 #define DEBUG 51 #endif 52 53 #include <sys/param.h> 54 #include <sys/kernel.h> 55 #include <sys/systm.h> 56 #include <sys/buf.h> 57 #include <sys/malloc.h> 58 #include <sys/mount.h> 59 #include <sys/proc.h> 60 #include <sys/syslog.h> 61 #include <sys/vnode.h> 62 #include <sys/conf.h> 63 #include <sys/buf2.h> 64 #include <machine/inttypes.h> 65 #include "dir.h" 66 #include "quota.h" 67 #include "inode.h" 68 #include "ufsmount.h" 69 #include "fs.h" 70 #include "softdep.h" 71 #include "ffs_extern.h" 72 #include "ufs_extern.h" 73 74 #include <sys/thread2.h> 75 76 /* 77 * These definitions need to be adapted to the system to which 78 * this file is being ported. 79 */ 80 /* 81 * malloc types defined for the softdep system. 82 */ 83 MALLOC_DEFINE(M_PAGEDEP, "pagedep","File page dependencies"); 84 MALLOC_DEFINE(M_INODEDEP, "inodedep","Inode dependencies"); 85 MALLOC_DEFINE(M_NEWBLK, "newblk","New block allocation"); 86 MALLOC_DEFINE(M_BMSAFEMAP, "bmsafemap","Block or frag allocated from cyl group map"); 87 MALLOC_DEFINE(M_ALLOCDIRECT, "allocdirect","Block or frag dependency for an inode"); 88 MALLOC_DEFINE(M_INDIRDEP, "indirdep","Indirect block dependencies"); 89 MALLOC_DEFINE(M_ALLOCINDIR, "allocindir","Block dependency for an indirect block"); 90 MALLOC_DEFINE(M_FREEFRAG, "freefrag","Previously used frag for an inode"); 91 MALLOC_DEFINE(M_FREEBLKS, "freeblks","Blocks freed from an inode"); 92 MALLOC_DEFINE(M_FREEFILE, "freefile","Inode deallocated"); 93 MALLOC_DEFINE(M_DIRADD, "diradd","New directory entry"); 94 MALLOC_DEFINE(M_MKDIR, "mkdir","New directory"); 95 MALLOC_DEFINE(M_DIRREM, "dirrem","Directory entry deleted"); 96 97 #define M_SOFTDEP_FLAGS (M_WAITOK | M_USE_RESERVE) 98 99 #define D_PAGEDEP 0 100 #define D_INODEDEP 1 101 #define D_NEWBLK 2 102 #define D_BMSAFEMAP 3 103 #define D_ALLOCDIRECT 4 104 #define D_INDIRDEP 5 105 #define D_ALLOCINDIR 6 106 #define D_FREEFRAG 7 107 #define D_FREEBLKS 8 108 #define D_FREEFILE 9 109 #define D_DIRADD 10 110 #define D_MKDIR 11 111 #define D_DIRREM 12 112 #define D_LAST D_DIRREM 113 114 /* 115 * translate from workitem type to memory type 116 * MUST match the defines above, such that memtype[D_XXX] == M_XXX 117 */ 118 static struct malloc_type *memtype[] = { 119 M_PAGEDEP, 120 M_INODEDEP, 121 M_NEWBLK, 122 M_BMSAFEMAP, 123 M_ALLOCDIRECT, 124 M_INDIRDEP, 125 M_ALLOCINDIR, 126 M_FREEFRAG, 127 M_FREEBLKS, 128 M_FREEFILE, 129 M_DIRADD, 130 M_MKDIR, 131 M_DIRREM 132 }; 133 134 #define DtoM(type) (memtype[type]) 135 136 /* 137 * Names of malloc types. 138 */ 139 #define TYPENAME(type) \ 140 ((unsigned)(type) < D_LAST ? memtype[type]->ks_shortdesc : "???") 141 /* 142 * End system adaptaion definitions. 143 */ 144 145 /* 146 * Internal function prototypes. 147 */ 148 static void softdep_error(char *, int); 149 static void drain_output(struct vnode *, int); 150 static int getdirtybuf(struct buf **, int); 151 static void clear_remove(struct thread *); 152 static void clear_inodedeps(struct thread *); 153 static int flush_pagedep_deps(struct vnode *, struct mount *, 154 struct diraddhd *); 155 static int flush_inodedep_deps(struct fs *, ino_t); 156 static int handle_written_filepage(struct pagedep *, struct buf *); 157 static void diradd_inode_written(struct diradd *, struct inodedep *); 158 static int handle_written_inodeblock(struct inodedep *, struct buf *); 159 static void handle_allocdirect_partdone(struct allocdirect *); 160 static void handle_allocindir_partdone(struct allocindir *); 161 static void initiate_write_filepage(struct pagedep *, struct buf *); 162 static void handle_written_mkdir(struct mkdir *, int); 163 static void initiate_write_inodeblock(struct inodedep *, struct buf *); 164 static void handle_workitem_freefile(struct freefile *); 165 static void handle_workitem_remove(struct dirrem *); 166 static struct dirrem *newdirrem(struct buf *, struct inode *, 167 struct inode *, int, struct dirrem **); 168 static void free_diradd(struct diradd *); 169 static void free_allocindir(struct allocindir *, struct inodedep *); 170 static int indir_trunc (struct inode *, ufs_daddr_t, int, ufs_lbn_t, 171 long *); 172 static void deallocate_dependencies(struct buf *, struct inodedep *); 173 static void free_allocdirect(struct allocdirectlst *, 174 struct allocdirect *, int); 175 static int check_inode_unwritten(struct inodedep *); 176 static int free_inodedep(struct inodedep *); 177 static void handle_workitem_freeblocks(struct freeblks *); 178 static void merge_inode_lists(struct inodedep *); 179 static void setup_allocindir_phase2(struct buf *, struct inode *, 180 struct allocindir *); 181 static struct allocindir *newallocindir(struct inode *, int, ufs_daddr_t, 182 ufs_daddr_t); 183 static void handle_workitem_freefrag(struct freefrag *); 184 static struct freefrag *newfreefrag(struct inode *, ufs_daddr_t, long); 185 static void allocdirect_merge(struct allocdirectlst *, 186 struct allocdirect *, struct allocdirect *); 187 static struct bmsafemap *bmsafemap_lookup(struct buf *); 188 static int newblk_lookup(struct fs *, ufs_daddr_t, int, 189 struct newblk **); 190 static int inodedep_lookup(struct fs *, ino_t, int, struct inodedep **); 191 static int pagedep_lookup(struct inode *, ufs_lbn_t, int, 192 struct pagedep **); 193 static void pause_timer(void *); 194 static int request_cleanup(int, int); 195 static int process_worklist_item(struct mount *, int); 196 static void add_to_worklist(struct worklist *); 197 198 /* 199 * Exported softdep operations. 200 */ 201 static void softdep_disk_io_initiation(struct buf *); 202 static void softdep_disk_write_complete(struct buf *); 203 static void softdep_deallocate_dependencies(struct buf *); 204 static int softdep_fsync(struct vnode *); 205 static int softdep_process_worklist(struct mount *); 206 static void softdep_move_dependencies(struct buf *, struct buf *); 207 static int softdep_count_dependencies(struct buf *bp, int); 208 209 static struct bio_ops softdep_bioops = { 210 softdep_disk_io_initiation, /* io_start */ 211 softdep_disk_write_complete, /* io_complete */ 212 softdep_deallocate_dependencies, /* io_deallocate */ 213 softdep_fsync, /* io_fsync */ 214 softdep_process_worklist, /* io_sync */ 215 softdep_move_dependencies, /* io_movedeps */ 216 softdep_count_dependencies, /* io_countdeps */ 217 }; 218 219 /* 220 * Locking primitives. 221 * 222 * For a uniprocessor, all we need to do is protect against disk 223 * interrupts. For a multiprocessor, this lock would have to be 224 * a mutex. A single mutex is used throughout this file, though 225 * finer grain locking could be used if contention warranted it. 226 * 227 * For a multiprocessor, the sleep call would accept a lock and 228 * release it after the sleep processing was complete. In a uniprocessor 229 * implementation there is no such interlock, so we simple mark 230 * the places where it needs to be done with the `interlocked' form 231 * of the lock calls. Since the uniprocessor sleep already interlocks 232 * the spl, there is nothing that really needs to be done. 233 */ 234 #ifndef /* NOT */ DEBUG 235 static struct lockit { 236 } lk = { 0 }; 237 #define ACQUIRE_LOCK(lk) crit_enter_id("softupdates"); 238 #define FREE_LOCK(lk) crit_exit_id("softupdates"); 239 240 #else /* DEBUG */ 241 #define NOHOLDER ((struct thread *)-1) 242 #define SPECIAL_FLAG ((struct thread *)-2) 243 static struct lockit { 244 int lkt_spl; 245 struct thread *lkt_held; 246 } lk = { 0, NOHOLDER }; 247 static int lockcnt; 248 249 static void acquire_lock(struct lockit *); 250 static void free_lock(struct lockit *); 251 void softdep_panic(char *); 252 253 #define ACQUIRE_LOCK(lk) acquire_lock(lk) 254 #define FREE_LOCK(lk) free_lock(lk) 255 256 static void 257 acquire_lock(lk) 258 struct lockit *lk; 259 { 260 thread_t holder; 261 262 if (lk->lkt_held != NOHOLDER) { 263 holder = lk->lkt_held; 264 FREE_LOCK(lk); 265 if (holder == curthread) 266 panic("softdep_lock: locking against myself"); 267 else 268 panic("softdep_lock: lock held by %p", holder); 269 } 270 crit_enter_id("softupdates"); 271 lk->lkt_held = curthread; 272 lockcnt++; 273 } 274 275 static void 276 free_lock(lk) 277 struct lockit *lk; 278 { 279 280 if (lk->lkt_held == NOHOLDER) 281 panic("softdep_unlock: lock not held"); 282 lk->lkt_held = NOHOLDER; 283 crit_exit_id("softupdates"); 284 } 285 286 /* 287 * Function to release soft updates lock and panic. 288 */ 289 void 290 softdep_panic(msg) 291 char *msg; 292 { 293 294 if (lk.lkt_held != NOHOLDER) 295 FREE_LOCK(&lk); 296 panic(msg); 297 } 298 #endif /* DEBUG */ 299 300 static int interlocked_sleep(struct lockit *, int, void *, int, 301 const char *, int); 302 303 /* 304 * When going to sleep, we must save our SPL so that it does 305 * not get lost if some other process uses the lock while we 306 * are sleeping. We restore it after we have slept. This routine 307 * wraps the interlocking with functions that sleep. The list 308 * below enumerates the available set of operations. 309 */ 310 #define UNKNOWN 0 311 #define SLEEP 1 312 #define LOCKBUF 2 313 314 static int 315 interlocked_sleep(lk, op, ident, flags, wmesg, timo) 316 struct lockit *lk; 317 int op; 318 void *ident; 319 int flags; 320 const char *wmesg; 321 int timo; 322 { 323 thread_t holder; 324 int s, retval; 325 326 s = lk->lkt_spl; 327 # ifdef DEBUG 328 if (lk->lkt_held == NOHOLDER) 329 panic("interlocked_sleep: lock not held"); 330 lk->lkt_held = NOHOLDER; 331 # endif /* DEBUG */ 332 switch (op) { 333 case SLEEP: 334 retval = tsleep(ident, flags, wmesg, timo); 335 break; 336 case LOCKBUF: 337 retval = BUF_LOCK((struct buf *)ident, flags); 338 break; 339 default: 340 panic("interlocked_sleep: unknown operation"); 341 } 342 # ifdef DEBUG 343 if (lk->lkt_held != NOHOLDER) { 344 holder = lk->lkt_held; 345 FREE_LOCK(lk); 346 if (holder == curthread) 347 panic("interlocked_sleep: locking against self"); 348 else 349 panic("interlocked_sleep: lock held by %p", holder); 350 } 351 lk->lkt_held = curthread; 352 lockcnt++; 353 # endif /* DEBUG */ 354 lk->lkt_spl = s; 355 return (retval); 356 } 357 358 /* 359 * Place holder for real semaphores. 360 */ 361 struct sema { 362 int value; 363 thread_t holder; 364 char *name; 365 int prio; 366 int timo; 367 }; 368 static void sema_init(struct sema *, char *, int, int); 369 static int sema_get(struct sema *, struct lockit *); 370 static void sema_release(struct sema *); 371 372 static void 373 sema_init(semap, name, prio, timo) 374 struct sema *semap; 375 char *name; 376 int prio, timo; 377 { 378 379 semap->holder = NOHOLDER; 380 semap->value = 0; 381 semap->name = name; 382 semap->prio = prio; 383 semap->timo = timo; 384 } 385 386 static int 387 sema_get(semap, interlock) 388 struct sema *semap; 389 struct lockit *interlock; 390 { 391 392 if (semap->value++ > 0) { 393 if (interlock != NULL) { 394 interlocked_sleep(interlock, SLEEP, (caddr_t)semap, 395 semap->prio, semap->name, semap->timo); 396 FREE_LOCK(interlock); 397 } else { 398 tsleep((caddr_t)semap, semap->prio, semap->name, 399 semap->timo); 400 } 401 return (0); 402 } 403 semap->holder = curthread; 404 if (interlock != NULL) 405 FREE_LOCK(interlock); 406 return (1); 407 } 408 409 static void 410 sema_release(semap) 411 struct sema *semap; 412 { 413 414 if (semap->value <= 0 || semap->holder != curthread) { 415 if (lk.lkt_held != NOHOLDER) 416 FREE_LOCK(&lk); 417 panic("sema_release: not held"); 418 } 419 if (--semap->value > 0) { 420 semap->value = 0; 421 wakeup(semap); 422 } 423 semap->holder = NOHOLDER; 424 } 425 426 /* 427 * Worklist queue management. 428 * These routines require that the lock be held. 429 */ 430 #ifndef /* NOT */ DEBUG 431 #define WORKLIST_INSERT(head, item) do { \ 432 (item)->wk_state |= ONWORKLIST; \ 433 LIST_INSERT_HEAD(head, item, wk_list); \ 434 } while (0) 435 #define WORKLIST_REMOVE(item) do { \ 436 (item)->wk_state &= ~ONWORKLIST; \ 437 LIST_REMOVE(item, wk_list); \ 438 } while (0) 439 #define WORKITEM_FREE(item, type) FREE(item, DtoM(type)) 440 441 #else /* DEBUG */ 442 static void worklist_insert(struct workhead *, struct worklist *); 443 static void worklist_remove(struct worklist *); 444 static void workitem_free(struct worklist *, int); 445 446 #define WORKLIST_INSERT(head, item) worklist_insert(head, item) 447 #define WORKLIST_REMOVE(item) worklist_remove(item) 448 #define WORKITEM_FREE(item, type) workitem_free((struct worklist *)item, type) 449 450 static void 451 worklist_insert(head, item) 452 struct workhead *head; 453 struct worklist *item; 454 { 455 456 if (lk.lkt_held == NOHOLDER) 457 panic("worklist_insert: lock not held"); 458 if (item->wk_state & ONWORKLIST) { 459 FREE_LOCK(&lk); 460 panic("worklist_insert: already on list"); 461 } 462 item->wk_state |= ONWORKLIST; 463 LIST_INSERT_HEAD(head, item, wk_list); 464 } 465 466 static void 467 worklist_remove(item) 468 struct worklist *item; 469 { 470 471 if (lk.lkt_held == NOHOLDER) 472 panic("worklist_remove: lock not held"); 473 if ((item->wk_state & ONWORKLIST) == 0) { 474 FREE_LOCK(&lk); 475 panic("worklist_remove: not on list"); 476 } 477 item->wk_state &= ~ONWORKLIST; 478 LIST_REMOVE(item, wk_list); 479 } 480 481 static void 482 workitem_free(item, type) 483 struct worklist *item; 484 int type; 485 { 486 487 if (item->wk_state & ONWORKLIST) { 488 if (lk.lkt_held != NOHOLDER) 489 FREE_LOCK(&lk); 490 panic("workitem_free: still on list"); 491 } 492 if (item->wk_type != type) { 493 if (lk.lkt_held != NOHOLDER) 494 FREE_LOCK(&lk); 495 panic("workitem_free: type mismatch"); 496 } 497 FREE(item, DtoM(type)); 498 } 499 #endif /* DEBUG */ 500 501 /* 502 * Workitem queue management 503 */ 504 static struct workhead softdep_workitem_pending; 505 static int num_on_worklist; /* number of worklist items to be processed */ 506 static int softdep_worklist_busy; /* 1 => trying to do unmount */ 507 static int softdep_worklist_req; /* serialized waiters */ 508 static int max_softdeps; /* maximum number of structs before slowdown */ 509 static int tickdelay = 2; /* number of ticks to pause during slowdown */ 510 static int *stat_countp; /* statistic to count in proc_waiting timeout */ 511 static int proc_waiting; /* tracks whether we have a timeout posted */ 512 static struct callout handle; /* handle on posted proc_waiting timeout */ 513 static struct thread *filesys_syncer; /* proc of filesystem syncer process */ 514 static int req_clear_inodedeps; /* syncer process flush some inodedeps */ 515 #define FLUSH_INODES 1 516 static int req_clear_remove; /* syncer process flush some freeblks */ 517 #define FLUSH_REMOVE 2 518 /* 519 * runtime statistics 520 */ 521 static int stat_worklist_push; /* number of worklist cleanups */ 522 static int stat_blk_limit_push; /* number of times block limit neared */ 523 static int stat_ino_limit_push; /* number of times inode limit neared */ 524 static int stat_blk_limit_hit; /* number of times block slowdown imposed */ 525 static int stat_ino_limit_hit; /* number of times inode slowdown imposed */ 526 static int stat_sync_limit_hit; /* number of synchronous slowdowns imposed */ 527 static int stat_indir_blk_ptrs; /* bufs redirtied as indir ptrs not written */ 528 static int stat_inode_bitmap; /* bufs redirtied as inode bitmap not written */ 529 static int stat_direct_blk_ptrs;/* bufs redirtied as direct ptrs not written */ 530 static int stat_dir_entry; /* bufs redirtied as dir entry cannot write */ 531 #ifdef DEBUG 532 #include <vm/vm.h> 533 #include <sys/sysctl.h> 534 SYSCTL_INT(_debug, OID_AUTO, max_softdeps, CTLFLAG_RW, &max_softdeps, 0, ""); 535 SYSCTL_INT(_debug, OID_AUTO, tickdelay, CTLFLAG_RW, &tickdelay, 0, ""); 536 SYSCTL_INT(_debug, OID_AUTO, worklist_push, CTLFLAG_RW, &stat_worklist_push, 0,""); 537 SYSCTL_INT(_debug, OID_AUTO, blk_limit_push, CTLFLAG_RW, &stat_blk_limit_push, 0,""); 538 SYSCTL_INT(_debug, OID_AUTO, ino_limit_push, CTLFLAG_RW, &stat_ino_limit_push, 0,""); 539 SYSCTL_INT(_debug, OID_AUTO, blk_limit_hit, CTLFLAG_RW, &stat_blk_limit_hit, 0, ""); 540 SYSCTL_INT(_debug, OID_AUTO, ino_limit_hit, CTLFLAG_RW, &stat_ino_limit_hit, 0, ""); 541 SYSCTL_INT(_debug, OID_AUTO, sync_limit_hit, CTLFLAG_RW, &stat_sync_limit_hit, 0, ""); 542 SYSCTL_INT(_debug, OID_AUTO, indir_blk_ptrs, CTLFLAG_RW, &stat_indir_blk_ptrs, 0, ""); 543 SYSCTL_INT(_debug, OID_AUTO, inode_bitmap, CTLFLAG_RW, &stat_inode_bitmap, 0, ""); 544 SYSCTL_INT(_debug, OID_AUTO, direct_blk_ptrs, CTLFLAG_RW, &stat_direct_blk_ptrs, 0, ""); 545 SYSCTL_INT(_debug, OID_AUTO, dir_entry, CTLFLAG_RW, &stat_dir_entry, 0, ""); 546 #endif /* DEBUG */ 547 548 /* 549 * Add an item to the end of the work queue. 550 * This routine requires that the lock be held. 551 * This is the only routine that adds items to the list. 552 * The following routine is the only one that removes items 553 * and does so in order from first to last. 554 */ 555 static void 556 add_to_worklist(wk) 557 struct worklist *wk; 558 { 559 static struct worklist *worklist_tail; 560 561 if (wk->wk_state & ONWORKLIST) { 562 if (lk.lkt_held != NOHOLDER) 563 FREE_LOCK(&lk); 564 panic("add_to_worklist: already on list"); 565 } 566 wk->wk_state |= ONWORKLIST; 567 if (LIST_FIRST(&softdep_workitem_pending) == NULL) 568 LIST_INSERT_HEAD(&softdep_workitem_pending, wk, wk_list); 569 else 570 LIST_INSERT_AFTER(worklist_tail, wk, wk_list); 571 worklist_tail = wk; 572 num_on_worklist += 1; 573 } 574 575 /* 576 * Process that runs once per second to handle items in the background queue. 577 * 578 * Note that we ensure that everything is done in the order in which they 579 * appear in the queue. The code below depends on this property to ensure 580 * that blocks of a file are freed before the inode itself is freed. This 581 * ordering ensures that no new <vfsid, inum, lbn> triples will be generated 582 * until all the old ones have been purged from the dependency lists. 583 */ 584 static int 585 softdep_process_worklist(matchmnt) 586 struct mount *matchmnt; 587 { 588 thread_t td = curthread; 589 int matchcnt, loopcount; 590 long starttime; 591 592 /* 593 * Record the process identifier of our caller so that we can give 594 * this process preferential treatment in request_cleanup below. 595 */ 596 filesys_syncer = td; 597 matchcnt = 0; 598 599 /* 600 * There is no danger of having multiple processes run this 601 * code, but we have to single-thread it when softdep_flushfiles() 602 * is in operation to get an accurate count of the number of items 603 * related to its mount point that are in the list. 604 */ 605 if (matchmnt == NULL) { 606 if (softdep_worklist_busy < 0) 607 return(-1); 608 softdep_worklist_busy += 1; 609 } 610 611 /* 612 * If requested, try removing inode or removal dependencies. 613 */ 614 if (req_clear_inodedeps) { 615 clear_inodedeps(td); 616 req_clear_inodedeps -= 1; 617 wakeup_one(&proc_waiting); 618 } 619 if (req_clear_remove) { 620 clear_remove(td); 621 req_clear_remove -= 1; 622 wakeup_one(&proc_waiting); 623 } 624 loopcount = 1; 625 starttime = time_second; 626 while (num_on_worklist > 0) { 627 matchcnt += process_worklist_item(matchmnt, 0); 628 629 /* 630 * If a umount operation wants to run the worklist 631 * accurately, abort. 632 */ 633 if (softdep_worklist_req && matchmnt == NULL) { 634 matchcnt = -1; 635 break; 636 } 637 638 /* 639 * If requested, try removing inode or removal dependencies. 640 */ 641 if (req_clear_inodedeps) { 642 clear_inodedeps(td); 643 req_clear_inodedeps -= 1; 644 wakeup_one(&proc_waiting); 645 } 646 if (req_clear_remove) { 647 clear_remove(td); 648 req_clear_remove -= 1; 649 wakeup_one(&proc_waiting); 650 } 651 /* 652 * We do not generally want to stop for buffer space, but if 653 * we are really being a buffer hog, we will stop and wait. 654 */ 655 if (loopcount++ % 128 == 0) 656 bwillwrite(); 657 /* 658 * Never allow processing to run for more than one 659 * second. Otherwise the other syncer tasks may get 660 * excessively backlogged. 661 */ 662 if (starttime != time_second && matchmnt == NULL) { 663 matchcnt = -1; 664 break; 665 } 666 } 667 if (matchmnt == NULL) { 668 --softdep_worklist_busy; 669 if (softdep_worklist_req && softdep_worklist_busy == 0) 670 wakeup(&softdep_worklist_req); 671 } 672 return (matchcnt); 673 } 674 675 /* 676 * Process one item on the worklist. 677 */ 678 static int 679 process_worklist_item(matchmnt, flags) 680 struct mount *matchmnt; 681 int flags; 682 { 683 struct worklist *wk; 684 struct dirrem *dirrem; 685 struct fs *matchfs; 686 struct vnode *vp; 687 int matchcnt = 0; 688 689 matchfs = NULL; 690 if (matchmnt != NULL) 691 matchfs = VFSTOUFS(matchmnt)->um_fs; 692 ACQUIRE_LOCK(&lk); 693 /* 694 * Normally we just process each item on the worklist in order. 695 * However, if we are in a situation where we cannot lock any 696 * inodes, we have to skip over any dirrem requests whose 697 * vnodes are resident and locked. 698 */ 699 LIST_FOREACH(wk, &softdep_workitem_pending, wk_list) { 700 if ((flags & LK_NOWAIT) == 0 || wk->wk_type != D_DIRREM) 701 break; 702 dirrem = WK_DIRREM(wk); 703 vp = ufs_ihashlookup(VFSTOUFS(dirrem->dm_mnt)->um_dev, 704 dirrem->dm_oldinum); 705 if (vp == NULL || !VOP_ISLOCKED(vp, curthread)) 706 break; 707 } 708 if (wk == 0) { 709 FREE_LOCK(&lk); 710 return (0); 711 } 712 WORKLIST_REMOVE(wk); 713 num_on_worklist -= 1; 714 FREE_LOCK(&lk); 715 switch (wk->wk_type) { 716 717 case D_DIRREM: 718 /* removal of a directory entry */ 719 if (WK_DIRREM(wk)->dm_mnt == matchmnt) 720 matchcnt += 1; 721 handle_workitem_remove(WK_DIRREM(wk)); 722 break; 723 724 case D_FREEBLKS: 725 /* releasing blocks and/or fragments from a file */ 726 if (WK_FREEBLKS(wk)->fb_fs == matchfs) 727 matchcnt += 1; 728 handle_workitem_freeblocks(WK_FREEBLKS(wk)); 729 break; 730 731 case D_FREEFRAG: 732 /* releasing a fragment when replaced as a file grows */ 733 if (WK_FREEFRAG(wk)->ff_fs == matchfs) 734 matchcnt += 1; 735 handle_workitem_freefrag(WK_FREEFRAG(wk)); 736 break; 737 738 case D_FREEFILE: 739 /* releasing an inode when its link count drops to 0 */ 740 if (WK_FREEFILE(wk)->fx_fs == matchfs) 741 matchcnt += 1; 742 handle_workitem_freefile(WK_FREEFILE(wk)); 743 break; 744 745 default: 746 panic("%s_process_worklist: Unknown type %s", 747 "softdep", TYPENAME(wk->wk_type)); 748 /* NOTREACHED */ 749 } 750 return (matchcnt); 751 } 752 753 /* 754 * Move dependencies from one buffer to another. 755 */ 756 static void 757 softdep_move_dependencies(oldbp, newbp) 758 struct buf *oldbp; 759 struct buf *newbp; 760 { 761 struct worklist *wk, *wktail; 762 763 if (LIST_FIRST(&newbp->b_dep) != NULL) 764 panic("softdep_move_dependencies: need merge code"); 765 wktail = 0; 766 ACQUIRE_LOCK(&lk); 767 while ((wk = LIST_FIRST(&oldbp->b_dep)) != NULL) { 768 LIST_REMOVE(wk, wk_list); 769 if (wktail == 0) 770 LIST_INSERT_HEAD(&newbp->b_dep, wk, wk_list); 771 else 772 LIST_INSERT_AFTER(wktail, wk, wk_list); 773 wktail = wk; 774 } 775 FREE_LOCK(&lk); 776 } 777 778 /* 779 * Purge the work list of all items associated with a particular mount point. 780 */ 781 int 782 softdep_flushfiles(struct mount *oldmnt, int flags, struct thread *td) 783 { 784 struct vnode *devvp; 785 int error, loopcnt; 786 787 /* 788 * Await our turn to clear out the queue, then serialize access. 789 */ 790 while (softdep_worklist_busy != 0) { 791 softdep_worklist_req += 1; 792 tsleep(&softdep_worklist_req, 0, "softflush", 0); 793 softdep_worklist_req -= 1; 794 } 795 softdep_worklist_busy = -1; 796 797 if ((error = ffs_flushfiles(oldmnt, flags, td)) != 0) { 798 softdep_worklist_busy = 0; 799 if (softdep_worklist_req) 800 wakeup(&softdep_worklist_req); 801 return (error); 802 } 803 /* 804 * Alternately flush the block device associated with the mount 805 * point and process any dependencies that the flushing 806 * creates. In theory, this loop can happen at most twice, 807 * but we give it a few extra just to be sure. 808 */ 809 devvp = VFSTOUFS(oldmnt)->um_devvp; 810 for (loopcnt = 10; loopcnt > 0; ) { 811 if (softdep_process_worklist(oldmnt) == 0) { 812 loopcnt--; 813 /* 814 * Do another flush in case any vnodes were brought in 815 * as part of the cleanup operations. 816 */ 817 if ((error = ffs_flushfiles(oldmnt, flags, td)) != 0) 818 break; 819 /* 820 * If we still found nothing to do, we are really done. 821 */ 822 if (softdep_process_worklist(oldmnt) == 0) 823 break; 824 } 825 vn_lock(devvp, LK_EXCLUSIVE | LK_RETRY, td); 826 error = VOP_FSYNC(devvp, MNT_WAIT, td); 827 VOP_UNLOCK(devvp, 0, td); 828 if (error) 829 break; 830 } 831 softdep_worklist_busy = 0; 832 if (softdep_worklist_req) 833 wakeup(&softdep_worklist_req); 834 835 /* 836 * If we are unmounting then it is an error to fail. If we 837 * are simply trying to downgrade to read-only, then filesystem 838 * activity can keep us busy forever, so we just fail with EBUSY. 839 */ 840 if (loopcnt == 0) { 841 if (oldmnt->mnt_kern_flag & MNTK_UNMOUNT) 842 panic("softdep_flushfiles: looping"); 843 error = EBUSY; 844 } 845 return (error); 846 } 847 848 /* 849 * Structure hashing. 850 * 851 * There are three types of structures that can be looked up: 852 * 1) pagedep structures identified by mount point, inode number, 853 * and logical block. 854 * 2) inodedep structures identified by mount point and inode number. 855 * 3) newblk structures identified by mount point and 856 * physical block number. 857 * 858 * The "pagedep" and "inodedep" dependency structures are hashed 859 * separately from the file blocks and inodes to which they correspond. 860 * This separation helps when the in-memory copy of an inode or 861 * file block must be replaced. It also obviates the need to access 862 * an inode or file page when simply updating (or de-allocating) 863 * dependency structures. Lookup of newblk structures is needed to 864 * find newly allocated blocks when trying to associate them with 865 * their allocdirect or allocindir structure. 866 * 867 * The lookup routines optionally create and hash a new instance when 868 * an existing entry is not found. 869 */ 870 #define DEPALLOC 0x0001 /* allocate structure if lookup fails */ 871 #define NODELAY 0x0002 /* cannot do background work */ 872 873 /* 874 * Structures and routines associated with pagedep caching. 875 */ 876 LIST_HEAD(pagedep_hashhead, pagedep) *pagedep_hashtbl; 877 u_long pagedep_hash; /* size of hash table - 1 */ 878 #define PAGEDEP_HASH(mp, inum, lbn) \ 879 (&pagedep_hashtbl[((((register_t)(mp)) >> 13) + (inum) + (lbn)) & \ 880 pagedep_hash]) 881 static struct sema pagedep_in_progress; 882 883 /* 884 * Look up a pagedep. Return 1 if found, 0 if not found. 885 * If not found, allocate if DEPALLOC flag is passed. 886 * Found or allocated entry is returned in pagedeppp. 887 * This routine must be called with splbio interrupts blocked. 888 */ 889 static int 890 pagedep_lookup(ip, lbn, flags, pagedeppp) 891 struct inode *ip; 892 ufs_lbn_t lbn; 893 int flags; 894 struct pagedep **pagedeppp; 895 { 896 struct pagedep *pagedep; 897 struct pagedep_hashhead *pagedephd; 898 struct mount *mp; 899 int i; 900 901 #ifdef DEBUG 902 if (lk.lkt_held == NOHOLDER) 903 panic("pagedep_lookup: lock not held"); 904 #endif 905 mp = ITOV(ip)->v_mount; 906 pagedephd = PAGEDEP_HASH(mp, ip->i_number, lbn); 907 top: 908 LIST_FOREACH(pagedep, pagedephd, pd_hash) 909 if (ip->i_number == pagedep->pd_ino && 910 lbn == pagedep->pd_lbn && 911 mp == pagedep->pd_mnt) 912 break; 913 if (pagedep) { 914 *pagedeppp = pagedep; 915 return (1); 916 } 917 if ((flags & DEPALLOC) == 0) { 918 *pagedeppp = NULL; 919 return (0); 920 } 921 if (sema_get(&pagedep_in_progress, &lk) == 0) { 922 ACQUIRE_LOCK(&lk); 923 goto top; 924 } 925 MALLOC(pagedep, struct pagedep *, sizeof(struct pagedep), M_PAGEDEP, 926 M_SOFTDEP_FLAGS); 927 bzero(pagedep, sizeof(struct pagedep)); 928 pagedep->pd_list.wk_type = D_PAGEDEP; 929 pagedep->pd_mnt = mp; 930 pagedep->pd_ino = ip->i_number; 931 pagedep->pd_lbn = lbn; 932 LIST_INIT(&pagedep->pd_dirremhd); 933 LIST_INIT(&pagedep->pd_pendinghd); 934 for (i = 0; i < DAHASHSZ; i++) 935 LIST_INIT(&pagedep->pd_diraddhd[i]); 936 ACQUIRE_LOCK(&lk); 937 LIST_INSERT_HEAD(pagedephd, pagedep, pd_hash); 938 sema_release(&pagedep_in_progress); 939 *pagedeppp = pagedep; 940 return (0); 941 } 942 943 /* 944 * Structures and routines associated with inodedep caching. 945 */ 946 LIST_HEAD(inodedep_hashhead, inodedep) *inodedep_hashtbl; 947 static u_long inodedep_hash; /* size of hash table - 1 */ 948 static long num_inodedep; /* number of inodedep allocated */ 949 #define INODEDEP_HASH(fs, inum) \ 950 (&inodedep_hashtbl[((((register_t)(fs)) >> 13) + (inum)) & inodedep_hash]) 951 static struct sema inodedep_in_progress; 952 953 /* 954 * Look up a inodedep. Return 1 if found, 0 if not found. 955 * If not found, allocate if DEPALLOC flag is passed. 956 * Found or allocated entry is returned in inodedeppp. 957 * This routine must be called with splbio interrupts blocked. 958 */ 959 static int 960 inodedep_lookup(fs, inum, flags, inodedeppp) 961 struct fs *fs; 962 ino_t inum; 963 int flags; 964 struct inodedep **inodedeppp; 965 { 966 struct inodedep *inodedep; 967 struct inodedep_hashhead *inodedephd; 968 int firsttry; 969 970 #ifdef DEBUG 971 if (lk.lkt_held == NOHOLDER) 972 panic("inodedep_lookup: lock not held"); 973 #endif 974 firsttry = 1; 975 inodedephd = INODEDEP_HASH(fs, inum); 976 top: 977 LIST_FOREACH(inodedep, inodedephd, id_hash) 978 if (inum == inodedep->id_ino && fs == inodedep->id_fs) 979 break; 980 if (inodedep) { 981 *inodedeppp = inodedep; 982 return (1); 983 } 984 if ((flags & DEPALLOC) == 0) { 985 *inodedeppp = NULL; 986 return (0); 987 } 988 /* 989 * If we are over our limit, try to improve the situation. 990 */ 991 if (num_inodedep > max_softdeps && firsttry && 992 speedup_syncer() == 0 && (flags & NODELAY) == 0 && 993 request_cleanup(FLUSH_INODES, 1)) { 994 firsttry = 0; 995 goto top; 996 } 997 if (sema_get(&inodedep_in_progress, &lk) == 0) { 998 ACQUIRE_LOCK(&lk); 999 goto top; 1000 } 1001 num_inodedep += 1; 1002 MALLOC(inodedep, struct inodedep *, sizeof(struct inodedep), 1003 M_INODEDEP, M_SOFTDEP_FLAGS); 1004 inodedep->id_list.wk_type = D_INODEDEP; 1005 inodedep->id_fs = fs; 1006 inodedep->id_ino = inum; 1007 inodedep->id_state = ALLCOMPLETE; 1008 inodedep->id_nlinkdelta = 0; 1009 inodedep->id_savedino = NULL; 1010 inodedep->id_savedsize = -1; 1011 inodedep->id_buf = NULL; 1012 LIST_INIT(&inodedep->id_pendinghd); 1013 LIST_INIT(&inodedep->id_inowait); 1014 LIST_INIT(&inodedep->id_bufwait); 1015 TAILQ_INIT(&inodedep->id_inoupdt); 1016 TAILQ_INIT(&inodedep->id_newinoupdt); 1017 ACQUIRE_LOCK(&lk); 1018 LIST_INSERT_HEAD(inodedephd, inodedep, id_hash); 1019 sema_release(&inodedep_in_progress); 1020 *inodedeppp = inodedep; 1021 return (0); 1022 } 1023 1024 /* 1025 * Structures and routines associated with newblk caching. 1026 */ 1027 LIST_HEAD(newblk_hashhead, newblk) *newblk_hashtbl; 1028 u_long newblk_hash; /* size of hash table - 1 */ 1029 #define NEWBLK_HASH(fs, inum) \ 1030 (&newblk_hashtbl[((((register_t)(fs)) >> 13) + (inum)) & newblk_hash]) 1031 static struct sema newblk_in_progress; 1032 1033 /* 1034 * Look up a newblk. Return 1 if found, 0 if not found. 1035 * If not found, allocate if DEPALLOC flag is passed. 1036 * Found or allocated entry is returned in newblkpp. 1037 */ 1038 static int 1039 newblk_lookup(fs, newblkno, flags, newblkpp) 1040 struct fs *fs; 1041 ufs_daddr_t newblkno; 1042 int flags; 1043 struct newblk **newblkpp; 1044 { 1045 struct newblk *newblk; 1046 struct newblk_hashhead *newblkhd; 1047 1048 newblkhd = NEWBLK_HASH(fs, newblkno); 1049 top: 1050 LIST_FOREACH(newblk, newblkhd, nb_hash) 1051 if (newblkno == newblk->nb_newblkno && fs == newblk->nb_fs) 1052 break; 1053 if (newblk) { 1054 *newblkpp = newblk; 1055 return (1); 1056 } 1057 if ((flags & DEPALLOC) == 0) { 1058 *newblkpp = NULL; 1059 return (0); 1060 } 1061 if (sema_get(&newblk_in_progress, 0) == 0) 1062 goto top; 1063 MALLOC(newblk, struct newblk *, sizeof(struct newblk), 1064 M_NEWBLK, M_SOFTDEP_FLAGS); 1065 newblk->nb_state = 0; 1066 newblk->nb_fs = fs; 1067 newblk->nb_newblkno = newblkno; 1068 LIST_INSERT_HEAD(newblkhd, newblk, nb_hash); 1069 sema_release(&newblk_in_progress); 1070 *newblkpp = newblk; 1071 return (0); 1072 } 1073 1074 /* 1075 * Executed during filesystem system initialization before 1076 * mounting any filesystems. 1077 */ 1078 void 1079 softdep_initialize() 1080 { 1081 callout_init(&handle); 1082 bioops = softdep_bioops; /* XXX hack */ 1083 1084 LIST_INIT(&mkdirlisthd); 1085 LIST_INIT(&softdep_workitem_pending); 1086 max_softdeps = min(desiredvnodes * 8, 1087 M_INODEDEP->ks_limit / (2 * sizeof(struct inodedep))); 1088 pagedep_hashtbl = hashinit(desiredvnodes / 5, M_PAGEDEP, 1089 &pagedep_hash); 1090 sema_init(&pagedep_in_progress, "pagedep", 0, 0); 1091 inodedep_hashtbl = hashinit(desiredvnodes, M_INODEDEP, &inodedep_hash); 1092 sema_init(&inodedep_in_progress, "inodedep", 0, 0); 1093 newblk_hashtbl = hashinit(64, M_NEWBLK, &newblk_hash); 1094 sema_init(&newblk_in_progress, "newblk", 0, 0); 1095 } 1096 1097 /* 1098 * Called at mount time to notify the dependency code that a 1099 * filesystem wishes to use it. 1100 */ 1101 int 1102 softdep_mount(devvp, mp, fs) 1103 struct vnode *devvp; 1104 struct mount *mp; 1105 struct fs *fs; 1106 { 1107 struct csum cstotal; 1108 struct cg *cgp; 1109 struct buf *bp; 1110 int error, cyl; 1111 1112 mp->mnt_flag &= ~MNT_ASYNC; 1113 mp->mnt_flag |= MNT_SOFTDEP; 1114 /* 1115 * When doing soft updates, the counters in the 1116 * superblock may have gotten out of sync, so we have 1117 * to scan the cylinder groups and recalculate them. 1118 */ 1119 if (fs->fs_clean != 0) 1120 return (0); 1121 bzero(&cstotal, sizeof cstotal); 1122 for (cyl = 0; cyl < fs->fs_ncg; cyl++) { 1123 if ((error = bread(devvp, fsbtodb(fs, cgtod(fs, cyl)), 1124 fs->fs_cgsize, &bp)) != 0) { 1125 brelse(bp); 1126 return (error); 1127 } 1128 cgp = (struct cg *)bp->b_data; 1129 cstotal.cs_nffree += cgp->cg_cs.cs_nffree; 1130 cstotal.cs_nbfree += cgp->cg_cs.cs_nbfree; 1131 cstotal.cs_nifree += cgp->cg_cs.cs_nifree; 1132 cstotal.cs_ndir += cgp->cg_cs.cs_ndir; 1133 fs->fs_cs(fs, cyl) = cgp->cg_cs; 1134 brelse(bp); 1135 } 1136 #ifdef DEBUG 1137 if (bcmp(&cstotal, &fs->fs_cstotal, sizeof cstotal)) 1138 printf("ffs_mountfs: superblock updated for soft updates\n"); 1139 #endif 1140 bcopy(&cstotal, &fs->fs_cstotal, sizeof cstotal); 1141 return (0); 1142 } 1143 1144 /* 1145 * Protecting the freemaps (or bitmaps). 1146 * 1147 * To eliminate the need to execute fsck before mounting a filesystem 1148 * after a power failure, one must (conservatively) guarantee that the 1149 * on-disk copy of the bitmaps never indicate that a live inode or block is 1150 * free. So, when a block or inode is allocated, the bitmap should be 1151 * updated (on disk) before any new pointers. When a block or inode is 1152 * freed, the bitmap should not be updated until all pointers have been 1153 * reset. The latter dependency is handled by the delayed de-allocation 1154 * approach described below for block and inode de-allocation. The former 1155 * dependency is handled by calling the following procedure when a block or 1156 * inode is allocated. When an inode is allocated an "inodedep" is created 1157 * with its DEPCOMPLETE flag cleared until its bitmap is written to disk. 1158 * Each "inodedep" is also inserted into the hash indexing structure so 1159 * that any additional link additions can be made dependent on the inode 1160 * allocation. 1161 * 1162 * The ufs filesystem maintains a number of free block counts (e.g., per 1163 * cylinder group, per cylinder and per <cylinder, rotational position> pair) 1164 * in addition to the bitmaps. These counts are used to improve efficiency 1165 * during allocation and therefore must be consistent with the bitmaps. 1166 * There is no convenient way to guarantee post-crash consistency of these 1167 * counts with simple update ordering, for two main reasons: (1) The counts 1168 * and bitmaps for a single cylinder group block are not in the same disk 1169 * sector. If a disk write is interrupted (e.g., by power failure), one may 1170 * be written and the other not. (2) Some of the counts are located in the 1171 * superblock rather than the cylinder group block. So, we focus our soft 1172 * updates implementation on protecting the bitmaps. When mounting a 1173 * filesystem, we recompute the auxiliary counts from the bitmaps. 1174 */ 1175 1176 /* 1177 * Called just after updating the cylinder group block to allocate an inode. 1178 */ 1179 void 1180 softdep_setup_inomapdep(bp, ip, newinum) 1181 struct buf *bp; /* buffer for cylgroup block with inode map */ 1182 struct inode *ip; /* inode related to allocation */ 1183 ino_t newinum; /* new inode number being allocated */ 1184 { 1185 struct inodedep *inodedep; 1186 struct bmsafemap *bmsafemap; 1187 1188 /* 1189 * Create a dependency for the newly allocated inode. 1190 * Panic if it already exists as something is seriously wrong. 1191 * Otherwise add it to the dependency list for the buffer holding 1192 * the cylinder group map from which it was allocated. 1193 */ 1194 ACQUIRE_LOCK(&lk); 1195 if ((inodedep_lookup(ip->i_fs, newinum, DEPALLOC|NODELAY, &inodedep))) { 1196 FREE_LOCK(&lk); 1197 panic("softdep_setup_inomapdep: found inode"); 1198 } 1199 inodedep->id_buf = bp; 1200 inodedep->id_state &= ~DEPCOMPLETE; 1201 bmsafemap = bmsafemap_lookup(bp); 1202 LIST_INSERT_HEAD(&bmsafemap->sm_inodedephd, inodedep, id_deps); 1203 FREE_LOCK(&lk); 1204 } 1205 1206 /* 1207 * Called just after updating the cylinder group block to 1208 * allocate block or fragment. 1209 */ 1210 void 1211 softdep_setup_blkmapdep(bp, fs, newblkno) 1212 struct buf *bp; /* buffer for cylgroup block with block map */ 1213 struct fs *fs; /* filesystem doing allocation */ 1214 ufs_daddr_t newblkno; /* number of newly allocated block */ 1215 { 1216 struct newblk *newblk; 1217 struct bmsafemap *bmsafemap; 1218 1219 /* 1220 * Create a dependency for the newly allocated block. 1221 * Add it to the dependency list for the buffer holding 1222 * the cylinder group map from which it was allocated. 1223 */ 1224 if (newblk_lookup(fs, newblkno, DEPALLOC, &newblk) != 0) 1225 panic("softdep_setup_blkmapdep: found block"); 1226 ACQUIRE_LOCK(&lk); 1227 newblk->nb_bmsafemap = bmsafemap = bmsafemap_lookup(bp); 1228 LIST_INSERT_HEAD(&bmsafemap->sm_newblkhd, newblk, nb_deps); 1229 FREE_LOCK(&lk); 1230 } 1231 1232 /* 1233 * Find the bmsafemap associated with a cylinder group buffer. 1234 * If none exists, create one. The buffer must be locked when 1235 * this routine is called and this routine must be called with 1236 * splbio interrupts blocked. 1237 */ 1238 static struct bmsafemap * 1239 bmsafemap_lookup(bp) 1240 struct buf *bp; 1241 { 1242 struct bmsafemap *bmsafemap; 1243 struct worklist *wk; 1244 1245 #ifdef DEBUG 1246 if (lk.lkt_held == NOHOLDER) 1247 panic("bmsafemap_lookup: lock not held"); 1248 #endif 1249 LIST_FOREACH(wk, &bp->b_dep, wk_list) 1250 if (wk->wk_type == D_BMSAFEMAP) 1251 return (WK_BMSAFEMAP(wk)); 1252 FREE_LOCK(&lk); 1253 MALLOC(bmsafemap, struct bmsafemap *, sizeof(struct bmsafemap), 1254 M_BMSAFEMAP, M_SOFTDEP_FLAGS); 1255 bmsafemap->sm_list.wk_type = D_BMSAFEMAP; 1256 bmsafemap->sm_list.wk_state = 0; 1257 bmsafemap->sm_buf = bp; 1258 LIST_INIT(&bmsafemap->sm_allocdirecthd); 1259 LIST_INIT(&bmsafemap->sm_allocindirhd); 1260 LIST_INIT(&bmsafemap->sm_inodedephd); 1261 LIST_INIT(&bmsafemap->sm_newblkhd); 1262 ACQUIRE_LOCK(&lk); 1263 WORKLIST_INSERT(&bp->b_dep, &bmsafemap->sm_list); 1264 return (bmsafemap); 1265 } 1266 1267 /* 1268 * Direct block allocation dependencies. 1269 * 1270 * When a new block is allocated, the corresponding disk locations must be 1271 * initialized (with zeros or new data) before the on-disk inode points to 1272 * them. Also, the freemap from which the block was allocated must be 1273 * updated (on disk) before the inode's pointer. These two dependencies are 1274 * independent of each other and are needed for all file blocks and indirect 1275 * blocks that are pointed to directly by the inode. Just before the 1276 * "in-core" version of the inode is updated with a newly allocated block 1277 * number, a procedure (below) is called to setup allocation dependency 1278 * structures. These structures are removed when the corresponding 1279 * dependencies are satisfied or when the block allocation becomes obsolete 1280 * (i.e., the file is deleted, the block is de-allocated, or the block is a 1281 * fragment that gets upgraded). All of these cases are handled in 1282 * procedures described later. 1283 * 1284 * When a file extension causes a fragment to be upgraded, either to a larger 1285 * fragment or to a full block, the on-disk location may change (if the 1286 * previous fragment could not simply be extended). In this case, the old 1287 * fragment must be de-allocated, but not until after the inode's pointer has 1288 * been updated. In most cases, this is handled by later procedures, which 1289 * will construct a "freefrag" structure to be added to the workitem queue 1290 * when the inode update is complete (or obsolete). The main exception to 1291 * this is when an allocation occurs while a pending allocation dependency 1292 * (for the same block pointer) remains. This case is handled in the main 1293 * allocation dependency setup procedure by immediately freeing the 1294 * unreferenced fragments. 1295 */ 1296 void 1297 softdep_setup_allocdirect(ip, lbn, newblkno, oldblkno, newsize, oldsize, bp) 1298 struct inode *ip; /* inode to which block is being added */ 1299 ufs_lbn_t lbn; /* block pointer within inode */ 1300 ufs_daddr_t newblkno; /* disk block number being added */ 1301 ufs_daddr_t oldblkno; /* previous block number, 0 unless frag */ 1302 long newsize; /* size of new block */ 1303 long oldsize; /* size of new block */ 1304 struct buf *bp; /* bp for allocated block */ 1305 { 1306 struct allocdirect *adp, *oldadp; 1307 struct allocdirectlst *adphead; 1308 struct bmsafemap *bmsafemap; 1309 struct inodedep *inodedep; 1310 struct pagedep *pagedep; 1311 struct newblk *newblk; 1312 1313 MALLOC(adp, struct allocdirect *, sizeof(struct allocdirect), 1314 M_ALLOCDIRECT, M_SOFTDEP_FLAGS); 1315 bzero(adp, sizeof(struct allocdirect)); 1316 adp->ad_list.wk_type = D_ALLOCDIRECT; 1317 adp->ad_lbn = lbn; 1318 adp->ad_newblkno = newblkno; 1319 adp->ad_oldblkno = oldblkno; 1320 adp->ad_newsize = newsize; 1321 adp->ad_oldsize = oldsize; 1322 adp->ad_state = ATTACHED; 1323 if (newblkno == oldblkno) 1324 adp->ad_freefrag = NULL; 1325 else 1326 adp->ad_freefrag = newfreefrag(ip, oldblkno, oldsize); 1327 1328 if (newblk_lookup(ip->i_fs, newblkno, 0, &newblk) == 0) 1329 panic("softdep_setup_allocdirect: lost block"); 1330 1331 ACQUIRE_LOCK(&lk); 1332 inodedep_lookup(ip->i_fs, ip->i_number, DEPALLOC | NODELAY, &inodedep); 1333 adp->ad_inodedep = inodedep; 1334 1335 if (newblk->nb_state == DEPCOMPLETE) { 1336 adp->ad_state |= DEPCOMPLETE; 1337 adp->ad_buf = NULL; 1338 } else { 1339 bmsafemap = newblk->nb_bmsafemap; 1340 adp->ad_buf = bmsafemap->sm_buf; 1341 LIST_REMOVE(newblk, nb_deps); 1342 LIST_INSERT_HEAD(&bmsafemap->sm_allocdirecthd, adp, ad_deps); 1343 } 1344 LIST_REMOVE(newblk, nb_hash); 1345 FREE(newblk, M_NEWBLK); 1346 1347 WORKLIST_INSERT(&bp->b_dep, &adp->ad_list); 1348 if (lbn >= NDADDR) { 1349 /* allocating an indirect block */ 1350 if (oldblkno != 0) { 1351 FREE_LOCK(&lk); 1352 panic("softdep_setup_allocdirect: non-zero indir"); 1353 } 1354 } else { 1355 /* 1356 * Allocating a direct block. 1357 * 1358 * If we are allocating a directory block, then we must 1359 * allocate an associated pagedep to track additions and 1360 * deletions. 1361 */ 1362 if ((ip->i_mode & IFMT) == IFDIR && 1363 pagedep_lookup(ip, lbn, DEPALLOC, &pagedep) == 0) 1364 WORKLIST_INSERT(&bp->b_dep, &pagedep->pd_list); 1365 } 1366 /* 1367 * The list of allocdirects must be kept in sorted and ascending 1368 * order so that the rollback routines can quickly determine the 1369 * first uncommitted block (the size of the file stored on disk 1370 * ends at the end of the lowest committed fragment, or if there 1371 * are no fragments, at the end of the highest committed block). 1372 * Since files generally grow, the typical case is that the new 1373 * block is to be added at the end of the list. We speed this 1374 * special case by checking against the last allocdirect in the 1375 * list before laboriously traversing the list looking for the 1376 * insertion point. 1377 */ 1378 adphead = &inodedep->id_newinoupdt; 1379 oldadp = TAILQ_LAST(adphead, allocdirectlst); 1380 if (oldadp == NULL || oldadp->ad_lbn <= lbn) { 1381 /* insert at end of list */ 1382 TAILQ_INSERT_TAIL(adphead, adp, ad_next); 1383 if (oldadp != NULL && oldadp->ad_lbn == lbn) 1384 allocdirect_merge(adphead, adp, oldadp); 1385 FREE_LOCK(&lk); 1386 return; 1387 } 1388 TAILQ_FOREACH(oldadp, adphead, ad_next) { 1389 if (oldadp->ad_lbn >= lbn) 1390 break; 1391 } 1392 if (oldadp == NULL) { 1393 FREE_LOCK(&lk); 1394 panic("softdep_setup_allocdirect: lost entry"); 1395 } 1396 /* insert in middle of list */ 1397 TAILQ_INSERT_BEFORE(oldadp, adp, ad_next); 1398 if (oldadp->ad_lbn == lbn) 1399 allocdirect_merge(adphead, adp, oldadp); 1400 FREE_LOCK(&lk); 1401 } 1402 1403 /* 1404 * Replace an old allocdirect dependency with a newer one. 1405 * This routine must be called with splbio interrupts blocked. 1406 */ 1407 static void 1408 allocdirect_merge(adphead, newadp, oldadp) 1409 struct allocdirectlst *adphead; /* head of list holding allocdirects */ 1410 struct allocdirect *newadp; /* allocdirect being added */ 1411 struct allocdirect *oldadp; /* existing allocdirect being checked */ 1412 { 1413 struct freefrag *freefrag; 1414 1415 #ifdef DEBUG 1416 if (lk.lkt_held == NOHOLDER) 1417 panic("allocdirect_merge: lock not held"); 1418 #endif 1419 if (newadp->ad_oldblkno != oldadp->ad_newblkno || 1420 newadp->ad_oldsize != oldadp->ad_newsize || 1421 newadp->ad_lbn >= NDADDR) { 1422 FREE_LOCK(&lk); 1423 panic("allocdirect_check: old %d != new %d || lbn %ld >= %d", 1424 newadp->ad_oldblkno, oldadp->ad_newblkno, newadp->ad_lbn, 1425 NDADDR); 1426 } 1427 newadp->ad_oldblkno = oldadp->ad_oldblkno; 1428 newadp->ad_oldsize = oldadp->ad_oldsize; 1429 /* 1430 * If the old dependency had a fragment to free or had never 1431 * previously had a block allocated, then the new dependency 1432 * can immediately post its freefrag and adopt the old freefrag. 1433 * This action is done by swapping the freefrag dependencies. 1434 * The new dependency gains the old one's freefrag, and the 1435 * old one gets the new one and then immediately puts it on 1436 * the worklist when it is freed by free_allocdirect. It is 1437 * not possible to do this swap when the old dependency had a 1438 * non-zero size but no previous fragment to free. This condition 1439 * arises when the new block is an extension of the old block. 1440 * Here, the first part of the fragment allocated to the new 1441 * dependency is part of the block currently claimed on disk by 1442 * the old dependency, so cannot legitimately be freed until the 1443 * conditions for the new dependency are fulfilled. 1444 */ 1445 if (oldadp->ad_freefrag != NULL || oldadp->ad_oldblkno == 0) { 1446 freefrag = newadp->ad_freefrag; 1447 newadp->ad_freefrag = oldadp->ad_freefrag; 1448 oldadp->ad_freefrag = freefrag; 1449 } 1450 free_allocdirect(adphead, oldadp, 0); 1451 } 1452 1453 /* 1454 * Allocate a new freefrag structure if needed. 1455 */ 1456 static struct freefrag * 1457 newfreefrag(ip, blkno, size) 1458 struct inode *ip; 1459 ufs_daddr_t blkno; 1460 long size; 1461 { 1462 struct freefrag *freefrag; 1463 struct fs *fs; 1464 1465 if (blkno == 0) 1466 return (NULL); 1467 fs = ip->i_fs; 1468 if (fragnum(fs, blkno) + numfrags(fs, size) > fs->fs_frag) 1469 panic("newfreefrag: frag size"); 1470 MALLOC(freefrag, struct freefrag *, sizeof(struct freefrag), 1471 M_FREEFRAG, M_SOFTDEP_FLAGS); 1472 freefrag->ff_list.wk_type = D_FREEFRAG; 1473 freefrag->ff_state = ip->i_uid & ~ONWORKLIST; /* XXX - used below */ 1474 freefrag->ff_inum = ip->i_number; 1475 freefrag->ff_fs = fs; 1476 freefrag->ff_devvp = ip->i_devvp; 1477 freefrag->ff_blkno = blkno; 1478 freefrag->ff_fragsize = size; 1479 return (freefrag); 1480 } 1481 1482 /* 1483 * This workitem de-allocates fragments that were replaced during 1484 * file block allocation. 1485 */ 1486 static void 1487 handle_workitem_freefrag(freefrag) 1488 struct freefrag *freefrag; 1489 { 1490 struct inode tip; 1491 1492 tip.i_fs = freefrag->ff_fs; 1493 tip.i_devvp = freefrag->ff_devvp; 1494 tip.i_dev = freefrag->ff_devvp->v_rdev; 1495 tip.i_number = freefrag->ff_inum; 1496 tip.i_uid = freefrag->ff_state & ~ONWORKLIST; /* XXX - set above */ 1497 ffs_blkfree(&tip, freefrag->ff_blkno, freefrag->ff_fragsize); 1498 FREE(freefrag, M_FREEFRAG); 1499 } 1500 1501 /* 1502 * Indirect block allocation dependencies. 1503 * 1504 * The same dependencies that exist for a direct block also exist when 1505 * a new block is allocated and pointed to by an entry in a block of 1506 * indirect pointers. The undo/redo states described above are also 1507 * used here. Because an indirect block contains many pointers that 1508 * may have dependencies, a second copy of the entire in-memory indirect 1509 * block is kept. The buffer cache copy is always completely up-to-date. 1510 * The second copy, which is used only as a source for disk writes, 1511 * contains only the safe pointers (i.e., those that have no remaining 1512 * update dependencies). The second copy is freed when all pointers 1513 * are safe. The cache is not allowed to replace indirect blocks with 1514 * pending update dependencies. If a buffer containing an indirect 1515 * block with dependencies is written, these routines will mark it 1516 * dirty again. It can only be successfully written once all the 1517 * dependencies are removed. The ffs_fsync routine in conjunction with 1518 * softdep_sync_metadata work together to get all the dependencies 1519 * removed so that a file can be successfully written to disk. Three 1520 * procedures are used when setting up indirect block pointer 1521 * dependencies. The division is necessary because of the organization 1522 * of the "balloc" routine and because of the distinction between file 1523 * pages and file metadata blocks. 1524 */ 1525 1526 /* 1527 * Allocate a new allocindir structure. 1528 */ 1529 static struct allocindir * 1530 newallocindir(ip, ptrno, newblkno, oldblkno) 1531 struct inode *ip; /* inode for file being extended */ 1532 int ptrno; /* offset of pointer in indirect block */ 1533 ufs_daddr_t newblkno; /* disk block number being added */ 1534 ufs_daddr_t oldblkno; /* previous block number, 0 if none */ 1535 { 1536 struct allocindir *aip; 1537 1538 MALLOC(aip, struct allocindir *, sizeof(struct allocindir), 1539 M_ALLOCINDIR, M_SOFTDEP_FLAGS); 1540 bzero(aip, sizeof(struct allocindir)); 1541 aip->ai_list.wk_type = D_ALLOCINDIR; 1542 aip->ai_state = ATTACHED; 1543 aip->ai_offset = ptrno; 1544 aip->ai_newblkno = newblkno; 1545 aip->ai_oldblkno = oldblkno; 1546 aip->ai_freefrag = newfreefrag(ip, oldblkno, ip->i_fs->fs_bsize); 1547 return (aip); 1548 } 1549 1550 /* 1551 * Called just before setting an indirect block pointer 1552 * to a newly allocated file page. 1553 */ 1554 void 1555 softdep_setup_allocindir_page(ip, lbn, bp, ptrno, newblkno, oldblkno, nbp) 1556 struct inode *ip; /* inode for file being extended */ 1557 ufs_lbn_t lbn; /* allocated block number within file */ 1558 struct buf *bp; /* buffer with indirect blk referencing page */ 1559 int ptrno; /* offset of pointer in indirect block */ 1560 ufs_daddr_t newblkno; /* disk block number being added */ 1561 ufs_daddr_t oldblkno; /* previous block number, 0 if none */ 1562 struct buf *nbp; /* buffer holding allocated page */ 1563 { 1564 struct allocindir *aip; 1565 struct pagedep *pagedep; 1566 1567 aip = newallocindir(ip, ptrno, newblkno, oldblkno); 1568 ACQUIRE_LOCK(&lk); 1569 /* 1570 * If we are allocating a directory page, then we must 1571 * allocate an associated pagedep to track additions and 1572 * deletions. 1573 */ 1574 if ((ip->i_mode & IFMT) == IFDIR && 1575 pagedep_lookup(ip, lbn, DEPALLOC, &pagedep) == 0) 1576 WORKLIST_INSERT(&nbp->b_dep, &pagedep->pd_list); 1577 WORKLIST_INSERT(&nbp->b_dep, &aip->ai_list); 1578 FREE_LOCK(&lk); 1579 setup_allocindir_phase2(bp, ip, aip); 1580 } 1581 1582 /* 1583 * Called just before setting an indirect block pointer to a 1584 * newly allocated indirect block. 1585 */ 1586 void 1587 softdep_setup_allocindir_meta(nbp, ip, bp, ptrno, newblkno) 1588 struct buf *nbp; /* newly allocated indirect block */ 1589 struct inode *ip; /* inode for file being extended */ 1590 struct buf *bp; /* indirect block referencing allocated block */ 1591 int ptrno; /* offset of pointer in indirect block */ 1592 ufs_daddr_t newblkno; /* disk block number being added */ 1593 { 1594 struct allocindir *aip; 1595 1596 aip = newallocindir(ip, ptrno, newblkno, 0); 1597 ACQUIRE_LOCK(&lk); 1598 WORKLIST_INSERT(&nbp->b_dep, &aip->ai_list); 1599 FREE_LOCK(&lk); 1600 setup_allocindir_phase2(bp, ip, aip); 1601 } 1602 1603 /* 1604 * Called to finish the allocation of the "aip" allocated 1605 * by one of the two routines above. 1606 */ 1607 static void 1608 setup_allocindir_phase2(bp, ip, aip) 1609 struct buf *bp; /* in-memory copy of the indirect block */ 1610 struct inode *ip; /* inode for file being extended */ 1611 struct allocindir *aip; /* allocindir allocated by the above routines */ 1612 { 1613 struct worklist *wk; 1614 struct indirdep *indirdep, *newindirdep; 1615 struct bmsafemap *bmsafemap; 1616 struct allocindir *oldaip; 1617 struct freefrag *freefrag; 1618 struct newblk *newblk; 1619 1620 if (bp->b_lblkno >= 0) 1621 panic("setup_allocindir_phase2: not indir blk"); 1622 for (indirdep = NULL, newindirdep = NULL; ; ) { 1623 ACQUIRE_LOCK(&lk); 1624 LIST_FOREACH(wk, &bp->b_dep, wk_list) { 1625 if (wk->wk_type != D_INDIRDEP) 1626 continue; 1627 indirdep = WK_INDIRDEP(wk); 1628 break; 1629 } 1630 if (indirdep == NULL && newindirdep) { 1631 indirdep = newindirdep; 1632 WORKLIST_INSERT(&bp->b_dep, &indirdep->ir_list); 1633 newindirdep = NULL; 1634 } 1635 FREE_LOCK(&lk); 1636 if (indirdep) { 1637 if (newblk_lookup(ip->i_fs, aip->ai_newblkno, 0, 1638 &newblk) == 0) 1639 panic("setup_allocindir: lost block"); 1640 ACQUIRE_LOCK(&lk); 1641 if (newblk->nb_state == DEPCOMPLETE) { 1642 aip->ai_state |= DEPCOMPLETE; 1643 aip->ai_buf = NULL; 1644 } else { 1645 bmsafemap = newblk->nb_bmsafemap; 1646 aip->ai_buf = bmsafemap->sm_buf; 1647 LIST_REMOVE(newblk, nb_deps); 1648 LIST_INSERT_HEAD(&bmsafemap->sm_allocindirhd, 1649 aip, ai_deps); 1650 } 1651 LIST_REMOVE(newblk, nb_hash); 1652 FREE(newblk, M_NEWBLK); 1653 aip->ai_indirdep = indirdep; 1654 /* 1655 * Check to see if there is an existing dependency 1656 * for this block. If there is, merge the old 1657 * dependency into the new one. 1658 */ 1659 if (aip->ai_oldblkno == 0) 1660 oldaip = NULL; 1661 else 1662 1663 LIST_FOREACH(oldaip, &indirdep->ir_deplisthd, ai_next) 1664 if (oldaip->ai_offset == aip->ai_offset) 1665 break; 1666 if (oldaip != NULL) { 1667 if (oldaip->ai_newblkno != aip->ai_oldblkno) { 1668 FREE_LOCK(&lk); 1669 panic("setup_allocindir_phase2: blkno"); 1670 } 1671 aip->ai_oldblkno = oldaip->ai_oldblkno; 1672 freefrag = oldaip->ai_freefrag; 1673 oldaip->ai_freefrag = aip->ai_freefrag; 1674 aip->ai_freefrag = freefrag; 1675 free_allocindir(oldaip, NULL); 1676 } 1677 LIST_INSERT_HEAD(&indirdep->ir_deplisthd, aip, ai_next); 1678 ((ufs_daddr_t *)indirdep->ir_savebp->b_data) 1679 [aip->ai_offset] = aip->ai_oldblkno; 1680 FREE_LOCK(&lk); 1681 } 1682 if (newindirdep) { 1683 /* 1684 * Avoid any possibility of data corruption by 1685 * ensuring that our old version is thrown away. 1686 */ 1687 newindirdep->ir_savebp->b_flags |= B_INVAL | B_NOCACHE; 1688 brelse(newindirdep->ir_savebp); 1689 WORKITEM_FREE((caddr_t)newindirdep, D_INDIRDEP); 1690 } 1691 if (indirdep) 1692 break; 1693 MALLOC(newindirdep, struct indirdep *, sizeof(struct indirdep), 1694 M_INDIRDEP, M_SOFTDEP_FLAGS); 1695 newindirdep->ir_list.wk_type = D_INDIRDEP; 1696 newindirdep->ir_state = ATTACHED; 1697 LIST_INIT(&newindirdep->ir_deplisthd); 1698 LIST_INIT(&newindirdep->ir_donehd); 1699 if (bp->b_blkno == bp->b_lblkno) { 1700 VOP_BMAP(bp->b_vp, bp->b_lblkno, NULL, &bp->b_blkno, 1701 NULL, NULL); 1702 } 1703 newindirdep->ir_savebp = 1704 getblk(ip->i_devvp, bp->b_blkno, bp->b_bcount, 0, 0); 1705 BUF_KERNPROC(newindirdep->ir_savebp); 1706 bcopy(bp->b_data, newindirdep->ir_savebp->b_data, bp->b_bcount); 1707 } 1708 } 1709 1710 /* 1711 * Block de-allocation dependencies. 1712 * 1713 * When blocks are de-allocated, the on-disk pointers must be nullified before 1714 * the blocks are made available for use by other files. (The true 1715 * requirement is that old pointers must be nullified before new on-disk 1716 * pointers are set. We chose this slightly more stringent requirement to 1717 * reduce complexity.) Our implementation handles this dependency by updating 1718 * the inode (or indirect block) appropriately but delaying the actual block 1719 * de-allocation (i.e., freemap and free space count manipulation) until 1720 * after the updated versions reach stable storage. After the disk is 1721 * updated, the blocks can be safely de-allocated whenever it is convenient. 1722 * This implementation handles only the common case of reducing a file's 1723 * length to zero. Other cases are handled by the conventional synchronous 1724 * write approach. 1725 * 1726 * The ffs implementation with which we worked double-checks 1727 * the state of the block pointers and file size as it reduces 1728 * a file's length. Some of this code is replicated here in our 1729 * soft updates implementation. The freeblks->fb_chkcnt field is 1730 * used to transfer a part of this information to the procedure 1731 * that eventually de-allocates the blocks. 1732 * 1733 * This routine should be called from the routine that shortens 1734 * a file's length, before the inode's size or block pointers 1735 * are modified. It will save the block pointer information for 1736 * later release and zero the inode so that the calling routine 1737 * can release it. 1738 */ 1739 struct softdep_setup_freeblocks_info { 1740 struct fs *fs; 1741 struct inode *ip; 1742 }; 1743 1744 static int softdep_setup_freeblocks_bp(struct buf *bp, void *data); 1745 1746 void 1747 softdep_setup_freeblocks(ip, length) 1748 struct inode *ip; /* The inode whose length is to be reduced */ 1749 off_t length; /* The new length for the file */ 1750 { 1751 struct softdep_setup_freeblocks_info info; 1752 struct freeblks *freeblks; 1753 struct inodedep *inodedep; 1754 struct allocdirect *adp; 1755 struct vnode *vp; 1756 struct buf *bp; 1757 struct fs *fs; 1758 int i, error, delay; 1759 int count; 1760 1761 fs = ip->i_fs; 1762 if (length != 0) 1763 panic("softde_setup_freeblocks: non-zero length"); 1764 MALLOC(freeblks, struct freeblks *, sizeof(struct freeblks), 1765 M_FREEBLKS, M_SOFTDEP_FLAGS); 1766 bzero(freeblks, sizeof(struct freeblks)); 1767 freeblks->fb_list.wk_type = D_FREEBLKS; 1768 freeblks->fb_state = ATTACHED; 1769 freeblks->fb_uid = ip->i_uid; 1770 freeblks->fb_previousinum = ip->i_number; 1771 freeblks->fb_devvp = ip->i_devvp; 1772 freeblks->fb_fs = fs; 1773 freeblks->fb_oldsize = ip->i_size; 1774 freeblks->fb_newsize = length; 1775 freeblks->fb_chkcnt = ip->i_blocks; 1776 for (i = 0; i < NDADDR; i++) { 1777 freeblks->fb_dblks[i] = ip->i_db[i]; 1778 ip->i_db[i] = 0; 1779 } 1780 for (i = 0; i < NIADDR; i++) { 1781 freeblks->fb_iblks[i] = ip->i_ib[i]; 1782 ip->i_ib[i] = 0; 1783 } 1784 ip->i_blocks = 0; 1785 ip->i_size = 0; 1786 /* 1787 * Push the zero'ed inode to to its disk buffer so that we are free 1788 * to delete its dependencies below. Once the dependencies are gone 1789 * the buffer can be safely released. 1790 */ 1791 if ((error = bread(ip->i_devvp, 1792 fsbtodb(fs, ino_to_fsba(fs, ip->i_number)), 1793 (int)fs->fs_bsize, &bp)) != 0) 1794 softdep_error("softdep_setup_freeblocks", error); 1795 *((struct dinode *)bp->b_data + ino_to_fsbo(fs, ip->i_number)) = 1796 ip->i_din; 1797 /* 1798 * Find and eliminate any inode dependencies. 1799 */ 1800 ACQUIRE_LOCK(&lk); 1801 (void) inodedep_lookup(fs, ip->i_number, DEPALLOC, &inodedep); 1802 if ((inodedep->id_state & IOSTARTED) != 0) { 1803 FREE_LOCK(&lk); 1804 panic("softdep_setup_freeblocks: inode busy"); 1805 } 1806 /* 1807 * Add the freeblks structure to the list of operations that 1808 * must await the zero'ed inode being written to disk. If we 1809 * still have a bitmap dependency (delay == 0), then the inode 1810 * has never been written to disk, so we can process the 1811 * freeblks below once we have deleted the dependencies. 1812 */ 1813 delay = (inodedep->id_state & DEPCOMPLETE); 1814 if (delay) 1815 WORKLIST_INSERT(&inodedep->id_bufwait, &freeblks->fb_list); 1816 /* 1817 * Because the file length has been truncated to zero, any 1818 * pending block allocation dependency structures associated 1819 * with this inode are obsolete and can simply be de-allocated. 1820 * We must first merge the two dependency lists to get rid of 1821 * any duplicate freefrag structures, then purge the merged list. 1822 */ 1823 merge_inode_lists(inodedep); 1824 while ((adp = TAILQ_FIRST(&inodedep->id_inoupdt)) != 0) 1825 free_allocdirect(&inodedep->id_inoupdt, adp, 1); 1826 FREE_LOCK(&lk); 1827 bdwrite(bp); 1828 /* 1829 * We must wait for any I/O in progress to finish so that 1830 * all potential buffers on the dirty list will be visible. 1831 * Once they are all there, walk the list and get rid of 1832 * any dependencies. 1833 */ 1834 vp = ITOV(ip); 1835 ACQUIRE_LOCK(&lk); 1836 drain_output(vp, 1); 1837 1838 info.fs = fs; 1839 info.ip = ip; 1840 do { 1841 count = RB_SCAN(buf_rb_tree, &vp->v_rbdirty_tree, NULL, 1842 softdep_setup_freeblocks_bp, &info); 1843 } while (count > 0); 1844 if (inodedep_lookup(fs, ip->i_number, 0, &inodedep) != 0) 1845 (void)free_inodedep(inodedep); 1846 1847 if (delay) { 1848 freeblks->fb_state |= DEPCOMPLETE; 1849 /* 1850 * If the inode with zeroed block pointers is now on disk 1851 * we can start freeing blocks. Add freeblks to the worklist 1852 * instead of calling handle_workitem_freeblocks directly as 1853 * it is more likely that additional IO is needed to complete 1854 * the request here than in the !delay case. 1855 */ 1856 if ((freeblks->fb_state & ALLCOMPLETE) == ALLCOMPLETE) 1857 add_to_worklist(&freeblks->fb_list); 1858 } 1859 1860 FREE_LOCK(&lk); 1861 /* 1862 * If the inode has never been written to disk (delay == 0), 1863 * then we can process the freeblks now that we have deleted 1864 * the dependencies. 1865 */ 1866 if (!delay) 1867 handle_workitem_freeblocks(freeblks); 1868 } 1869 1870 static int 1871 softdep_setup_freeblocks_bp(struct buf *bp, void *data) 1872 { 1873 struct softdep_setup_freeblocks_info *info = data; 1874 struct inodedep *inodedep; 1875 1876 if (getdirtybuf(&bp, MNT_WAIT) == 0) 1877 return(-1); 1878 (void) inodedep_lookup(info->fs, info->ip->i_number, 0, &inodedep); 1879 deallocate_dependencies(bp, inodedep); 1880 bp->b_flags |= B_INVAL | B_NOCACHE; 1881 FREE_LOCK(&lk); 1882 brelse(bp); 1883 ACQUIRE_LOCK(&lk); 1884 return(1); 1885 } 1886 1887 /* 1888 * Reclaim any dependency structures from a buffer that is about to 1889 * be reallocated to a new vnode. The buffer must be locked, thus, 1890 * no I/O completion operations can occur while we are manipulating 1891 * its associated dependencies. The mutex is held so that other I/O's 1892 * associated with related dependencies do not occur. 1893 */ 1894 static void 1895 deallocate_dependencies(bp, inodedep) 1896 struct buf *bp; 1897 struct inodedep *inodedep; 1898 { 1899 struct worklist *wk; 1900 struct indirdep *indirdep; 1901 struct allocindir *aip; 1902 struct pagedep *pagedep; 1903 struct dirrem *dirrem; 1904 struct diradd *dap; 1905 int i; 1906 1907 while ((wk = LIST_FIRST(&bp->b_dep)) != NULL) { 1908 switch (wk->wk_type) { 1909 1910 case D_INDIRDEP: 1911 indirdep = WK_INDIRDEP(wk); 1912 /* 1913 * None of the indirect pointers will ever be visible, 1914 * so they can simply be tossed. GOINGAWAY ensures 1915 * that allocated pointers will be saved in the buffer 1916 * cache until they are freed. Note that they will 1917 * only be able to be found by their physical address 1918 * since the inode mapping the logical address will 1919 * be gone. The save buffer used for the safe copy 1920 * was allocated in setup_allocindir_phase2 using 1921 * the physical address so it could be used for this 1922 * purpose. Hence we swap the safe copy with the real 1923 * copy, allowing the safe copy to be freed and holding 1924 * on to the real copy for later use in indir_trunc. 1925 */ 1926 if (indirdep->ir_state & GOINGAWAY) { 1927 FREE_LOCK(&lk); 1928 panic("deallocate_dependencies: already gone"); 1929 } 1930 indirdep->ir_state |= GOINGAWAY; 1931 while ((aip = LIST_FIRST(&indirdep->ir_deplisthd)) != 0) 1932 free_allocindir(aip, inodedep); 1933 if (bp->b_lblkno >= 0 || 1934 bp->b_blkno != indirdep->ir_savebp->b_lblkno) { 1935 FREE_LOCK(&lk); 1936 panic("deallocate_dependencies: not indir"); 1937 } 1938 bcopy(bp->b_data, indirdep->ir_savebp->b_data, 1939 bp->b_bcount); 1940 WORKLIST_REMOVE(wk); 1941 WORKLIST_INSERT(&indirdep->ir_savebp->b_dep, wk); 1942 continue; 1943 1944 case D_PAGEDEP: 1945 pagedep = WK_PAGEDEP(wk); 1946 /* 1947 * None of the directory additions will ever be 1948 * visible, so they can simply be tossed. 1949 */ 1950 for (i = 0; i < DAHASHSZ; i++) 1951 while ((dap = 1952 LIST_FIRST(&pagedep->pd_diraddhd[i]))) 1953 free_diradd(dap); 1954 while ((dap = LIST_FIRST(&pagedep->pd_pendinghd)) != 0) 1955 free_diradd(dap); 1956 /* 1957 * Copy any directory remove dependencies to the list 1958 * to be processed after the zero'ed inode is written. 1959 * If the inode has already been written, then they 1960 * can be dumped directly onto the work list. 1961 */ 1962 LIST_FOREACH(dirrem, &pagedep->pd_dirremhd, dm_next) { 1963 LIST_REMOVE(dirrem, dm_next); 1964 dirrem->dm_dirinum = pagedep->pd_ino; 1965 if (inodedep == NULL || 1966 (inodedep->id_state & ALLCOMPLETE) == 1967 ALLCOMPLETE) 1968 add_to_worklist(&dirrem->dm_list); 1969 else 1970 WORKLIST_INSERT(&inodedep->id_bufwait, 1971 &dirrem->dm_list); 1972 } 1973 WORKLIST_REMOVE(&pagedep->pd_list); 1974 LIST_REMOVE(pagedep, pd_hash); 1975 WORKITEM_FREE(pagedep, D_PAGEDEP); 1976 continue; 1977 1978 case D_ALLOCINDIR: 1979 free_allocindir(WK_ALLOCINDIR(wk), inodedep); 1980 continue; 1981 1982 case D_ALLOCDIRECT: 1983 case D_INODEDEP: 1984 FREE_LOCK(&lk); 1985 panic("deallocate_dependencies: Unexpected type %s", 1986 TYPENAME(wk->wk_type)); 1987 /* NOTREACHED */ 1988 1989 default: 1990 FREE_LOCK(&lk); 1991 panic("deallocate_dependencies: Unknown type %s", 1992 TYPENAME(wk->wk_type)); 1993 /* NOTREACHED */ 1994 } 1995 } 1996 } 1997 1998 /* 1999 * Free an allocdirect. Generate a new freefrag work request if appropriate. 2000 * This routine must be called with splbio interrupts blocked. 2001 */ 2002 static void 2003 free_allocdirect(adphead, adp, delay) 2004 struct allocdirectlst *adphead; 2005 struct allocdirect *adp; 2006 int delay; 2007 { 2008 2009 #ifdef DEBUG 2010 if (lk.lkt_held == NOHOLDER) 2011 panic("free_allocdirect: lock not held"); 2012 #endif 2013 if ((adp->ad_state & DEPCOMPLETE) == 0) 2014 LIST_REMOVE(adp, ad_deps); 2015 TAILQ_REMOVE(adphead, adp, ad_next); 2016 if ((adp->ad_state & COMPLETE) == 0) 2017 WORKLIST_REMOVE(&adp->ad_list); 2018 if (adp->ad_freefrag != NULL) { 2019 if (delay) 2020 WORKLIST_INSERT(&adp->ad_inodedep->id_bufwait, 2021 &adp->ad_freefrag->ff_list); 2022 else 2023 add_to_worklist(&adp->ad_freefrag->ff_list); 2024 } 2025 WORKITEM_FREE(adp, D_ALLOCDIRECT); 2026 } 2027 2028 /* 2029 * Prepare an inode to be freed. The actual free operation is not 2030 * done until the zero'ed inode has been written to disk. 2031 */ 2032 void 2033 softdep_freefile(pvp, ino, mode) 2034 struct vnode *pvp; 2035 ino_t ino; 2036 int mode; 2037 { 2038 struct inode *ip = VTOI(pvp); 2039 struct inodedep *inodedep; 2040 struct freefile *freefile; 2041 2042 /* 2043 * This sets up the inode de-allocation dependency. 2044 */ 2045 MALLOC(freefile, struct freefile *, sizeof(struct freefile), 2046 M_FREEFILE, M_SOFTDEP_FLAGS); 2047 freefile->fx_list.wk_type = D_FREEFILE; 2048 freefile->fx_list.wk_state = 0; 2049 freefile->fx_mode = mode; 2050 freefile->fx_oldinum = ino; 2051 freefile->fx_devvp = ip->i_devvp; 2052 freefile->fx_fs = ip->i_fs; 2053 2054 /* 2055 * If the inodedep does not exist, then the zero'ed inode has 2056 * been written to disk. If the allocated inode has never been 2057 * written to disk, then the on-disk inode is zero'ed. In either 2058 * case we can free the file immediately. 2059 */ 2060 ACQUIRE_LOCK(&lk); 2061 if (inodedep_lookup(ip->i_fs, ino, 0, &inodedep) == 0 || 2062 check_inode_unwritten(inodedep)) { 2063 FREE_LOCK(&lk); 2064 handle_workitem_freefile(freefile); 2065 return; 2066 } 2067 WORKLIST_INSERT(&inodedep->id_inowait, &freefile->fx_list); 2068 FREE_LOCK(&lk); 2069 } 2070 2071 /* 2072 * Check to see if an inode has never been written to disk. If 2073 * so free the inodedep and return success, otherwise return failure. 2074 * This routine must be called with splbio interrupts blocked. 2075 * 2076 * If we still have a bitmap dependency, then the inode has never 2077 * been written to disk. Drop the dependency as it is no longer 2078 * necessary since the inode is being deallocated. We set the 2079 * ALLCOMPLETE flags since the bitmap now properly shows that the 2080 * inode is not allocated. Even if the inode is actively being 2081 * written, it has been rolled back to its zero'ed state, so we 2082 * are ensured that a zero inode is what is on the disk. For short 2083 * lived files, this change will usually result in removing all the 2084 * dependencies from the inode so that it can be freed immediately. 2085 */ 2086 static int 2087 check_inode_unwritten(inodedep) 2088 struct inodedep *inodedep; 2089 { 2090 2091 if ((inodedep->id_state & DEPCOMPLETE) != 0 || 2092 LIST_FIRST(&inodedep->id_pendinghd) != NULL || 2093 LIST_FIRST(&inodedep->id_bufwait) != NULL || 2094 LIST_FIRST(&inodedep->id_inowait) != NULL || 2095 TAILQ_FIRST(&inodedep->id_inoupdt) != NULL || 2096 TAILQ_FIRST(&inodedep->id_newinoupdt) != NULL || 2097 inodedep->id_nlinkdelta != 0) 2098 return (0); 2099 2100 /* 2101 * Another process might be in initiate_write_inodeblock 2102 * trying to allocate memory without holding "Softdep Lock". 2103 */ 2104 if ((inodedep->id_state & IOSTARTED) != 0 && 2105 inodedep->id_savedino == NULL) 2106 return(0); 2107 2108 inodedep->id_state |= ALLCOMPLETE; 2109 LIST_REMOVE(inodedep, id_deps); 2110 inodedep->id_buf = NULL; 2111 if (inodedep->id_state & ONWORKLIST) 2112 WORKLIST_REMOVE(&inodedep->id_list); 2113 if (inodedep->id_savedino != NULL) { 2114 FREE(inodedep->id_savedino, M_INODEDEP); 2115 inodedep->id_savedino = NULL; 2116 } 2117 if (free_inodedep(inodedep) == 0) { 2118 FREE_LOCK(&lk); 2119 panic("check_inode_unwritten: busy inode"); 2120 } 2121 return (1); 2122 } 2123 2124 /* 2125 * Try to free an inodedep structure. Return 1 if it could be freed. 2126 */ 2127 static int 2128 free_inodedep(inodedep) 2129 struct inodedep *inodedep; 2130 { 2131 2132 if ((inodedep->id_state & ONWORKLIST) != 0 || 2133 (inodedep->id_state & ALLCOMPLETE) != ALLCOMPLETE || 2134 LIST_FIRST(&inodedep->id_pendinghd) != NULL || 2135 LIST_FIRST(&inodedep->id_bufwait) != NULL || 2136 LIST_FIRST(&inodedep->id_inowait) != NULL || 2137 TAILQ_FIRST(&inodedep->id_inoupdt) != NULL || 2138 TAILQ_FIRST(&inodedep->id_newinoupdt) != NULL || 2139 inodedep->id_nlinkdelta != 0 || inodedep->id_savedino != NULL) 2140 return (0); 2141 LIST_REMOVE(inodedep, id_hash); 2142 WORKITEM_FREE(inodedep, D_INODEDEP); 2143 num_inodedep -= 1; 2144 return (1); 2145 } 2146 2147 /* 2148 * This workitem routine performs the block de-allocation. 2149 * The workitem is added to the pending list after the updated 2150 * inode block has been written to disk. As mentioned above, 2151 * checks regarding the number of blocks de-allocated (compared 2152 * to the number of blocks allocated for the file) are also 2153 * performed in this function. 2154 */ 2155 static void 2156 handle_workitem_freeblocks(freeblks) 2157 struct freeblks *freeblks; 2158 { 2159 struct inode tip; 2160 ufs_daddr_t bn; 2161 struct fs *fs; 2162 int i, level, bsize; 2163 long nblocks, blocksreleased = 0; 2164 int error, allerror = 0; 2165 ufs_lbn_t baselbns[NIADDR], tmpval; 2166 2167 tip.i_number = freeblks->fb_previousinum; 2168 tip.i_devvp = freeblks->fb_devvp; 2169 tip.i_dev = freeblks->fb_devvp->v_rdev; 2170 tip.i_fs = freeblks->fb_fs; 2171 tip.i_size = freeblks->fb_oldsize; 2172 tip.i_uid = freeblks->fb_uid; 2173 fs = freeblks->fb_fs; 2174 tmpval = 1; 2175 baselbns[0] = NDADDR; 2176 for (i = 1; i < NIADDR; i++) { 2177 tmpval *= NINDIR(fs); 2178 baselbns[i] = baselbns[i - 1] + tmpval; 2179 } 2180 nblocks = btodb(fs->fs_bsize); 2181 blocksreleased = 0; 2182 /* 2183 * Indirect blocks first. 2184 */ 2185 for (level = (NIADDR - 1); level >= 0; level--) { 2186 if ((bn = freeblks->fb_iblks[level]) == 0) 2187 continue; 2188 if ((error = indir_trunc(&tip, fsbtodb(fs, bn), level, 2189 baselbns[level], &blocksreleased)) == 0) 2190 allerror = error; 2191 ffs_blkfree(&tip, bn, fs->fs_bsize); 2192 blocksreleased += nblocks; 2193 } 2194 /* 2195 * All direct blocks or frags. 2196 */ 2197 for (i = (NDADDR - 1); i >= 0; i--) { 2198 if ((bn = freeblks->fb_dblks[i]) == 0) 2199 continue; 2200 bsize = blksize(fs, &tip, i); 2201 ffs_blkfree(&tip, bn, bsize); 2202 blocksreleased += btodb(bsize); 2203 } 2204 2205 #ifdef DIAGNOSTIC 2206 if (freeblks->fb_chkcnt != blocksreleased) 2207 printf("handle_workitem_freeblocks: block count\n"); 2208 if (allerror) 2209 softdep_error("handle_workitem_freeblks", allerror); 2210 #endif /* DIAGNOSTIC */ 2211 WORKITEM_FREE(freeblks, D_FREEBLKS); 2212 } 2213 2214 /* 2215 * Release blocks associated with the inode ip and stored in the indirect 2216 * block dbn. If level is greater than SINGLE, the block is an indirect block 2217 * and recursive calls to indirtrunc must be used to cleanse other indirect 2218 * blocks. 2219 */ 2220 static int 2221 indir_trunc(ip, dbn, level, lbn, countp) 2222 struct inode *ip; 2223 ufs_daddr_t dbn; 2224 int level; 2225 ufs_lbn_t lbn; 2226 long *countp; 2227 { 2228 struct buf *bp; 2229 ufs_daddr_t *bap; 2230 ufs_daddr_t nb; 2231 struct fs *fs; 2232 struct worklist *wk; 2233 struct indirdep *indirdep; 2234 int i, lbnadd, nblocks; 2235 int error, allerror = 0; 2236 2237 fs = ip->i_fs; 2238 lbnadd = 1; 2239 for (i = level; i > 0; i--) 2240 lbnadd *= NINDIR(fs); 2241 /* 2242 * Get buffer of block pointers to be freed. This routine is not 2243 * called until the zero'ed inode has been written, so it is safe 2244 * to free blocks as they are encountered. Because the inode has 2245 * been zero'ed, calls to bmap on these blocks will fail. So, we 2246 * have to use the on-disk address and the block device for the 2247 * filesystem to look them up. If the file was deleted before its 2248 * indirect blocks were all written to disk, the routine that set 2249 * us up (deallocate_dependencies) will have arranged to leave 2250 * a complete copy of the indirect block in memory for our use. 2251 * Otherwise we have to read the blocks in from the disk. 2252 */ 2253 ACQUIRE_LOCK(&lk); 2254 if ((bp = incore(ip->i_devvp, dbn)) != NULL && 2255 (wk = LIST_FIRST(&bp->b_dep)) != NULL) { 2256 if (wk->wk_type != D_INDIRDEP || 2257 (indirdep = WK_INDIRDEP(wk))->ir_savebp != bp || 2258 (indirdep->ir_state & GOINGAWAY) == 0) { 2259 FREE_LOCK(&lk); 2260 panic("indir_trunc: lost indirdep"); 2261 } 2262 WORKLIST_REMOVE(wk); 2263 WORKITEM_FREE(indirdep, D_INDIRDEP); 2264 if (LIST_FIRST(&bp->b_dep) != NULL) { 2265 FREE_LOCK(&lk); 2266 panic("indir_trunc: dangling dep"); 2267 } 2268 FREE_LOCK(&lk); 2269 } else { 2270 FREE_LOCK(&lk); 2271 error = bread(ip->i_devvp, dbn, (int)fs->fs_bsize, &bp); 2272 if (error) 2273 return (error); 2274 } 2275 /* 2276 * Recursively free indirect blocks. 2277 */ 2278 bap = (ufs_daddr_t *)bp->b_data; 2279 nblocks = btodb(fs->fs_bsize); 2280 for (i = NINDIR(fs) - 1; i >= 0; i--) { 2281 if ((nb = bap[i]) == 0) 2282 continue; 2283 if (level != 0) { 2284 if ((error = indir_trunc(ip, fsbtodb(fs, nb), 2285 level - 1, lbn + (i * lbnadd), countp)) != 0) 2286 allerror = error; 2287 } 2288 ffs_blkfree(ip, nb, fs->fs_bsize); 2289 *countp += nblocks; 2290 } 2291 bp->b_flags |= B_INVAL | B_NOCACHE; 2292 brelse(bp); 2293 return (allerror); 2294 } 2295 2296 /* 2297 * Free an allocindir. 2298 * This routine must be called with splbio interrupts blocked. 2299 */ 2300 static void 2301 free_allocindir(aip, inodedep) 2302 struct allocindir *aip; 2303 struct inodedep *inodedep; 2304 { 2305 struct freefrag *freefrag; 2306 2307 #ifdef DEBUG 2308 if (lk.lkt_held == NOHOLDER) 2309 panic("free_allocindir: lock not held"); 2310 #endif 2311 if ((aip->ai_state & DEPCOMPLETE) == 0) 2312 LIST_REMOVE(aip, ai_deps); 2313 if (aip->ai_state & ONWORKLIST) 2314 WORKLIST_REMOVE(&aip->ai_list); 2315 LIST_REMOVE(aip, ai_next); 2316 if ((freefrag = aip->ai_freefrag) != NULL) { 2317 if (inodedep == NULL) 2318 add_to_worklist(&freefrag->ff_list); 2319 else 2320 WORKLIST_INSERT(&inodedep->id_bufwait, 2321 &freefrag->ff_list); 2322 } 2323 WORKITEM_FREE(aip, D_ALLOCINDIR); 2324 } 2325 2326 /* 2327 * Directory entry addition dependencies. 2328 * 2329 * When adding a new directory entry, the inode (with its incremented link 2330 * count) must be written to disk before the directory entry's pointer to it. 2331 * Also, if the inode is newly allocated, the corresponding freemap must be 2332 * updated (on disk) before the directory entry's pointer. These requirements 2333 * are met via undo/redo on the directory entry's pointer, which consists 2334 * simply of the inode number. 2335 * 2336 * As directory entries are added and deleted, the free space within a 2337 * directory block can become fragmented. The ufs filesystem will compact 2338 * a fragmented directory block to make space for a new entry. When this 2339 * occurs, the offsets of previously added entries change. Any "diradd" 2340 * dependency structures corresponding to these entries must be updated with 2341 * the new offsets. 2342 */ 2343 2344 /* 2345 * This routine is called after the in-memory inode's link 2346 * count has been incremented, but before the directory entry's 2347 * pointer to the inode has been set. 2348 */ 2349 void 2350 softdep_setup_directory_add(bp, dp, diroffset, newinum, newdirbp) 2351 struct buf *bp; /* buffer containing directory block */ 2352 struct inode *dp; /* inode for directory */ 2353 off_t diroffset; /* offset of new entry in directory */ 2354 ino_t newinum; /* inode referenced by new directory entry */ 2355 struct buf *newdirbp; /* non-NULL => contents of new mkdir */ 2356 { 2357 int offset; /* offset of new entry within directory block */ 2358 ufs_lbn_t lbn; /* block in directory containing new entry */ 2359 struct fs *fs; 2360 struct diradd *dap; 2361 struct pagedep *pagedep; 2362 struct inodedep *inodedep; 2363 struct mkdir *mkdir1, *mkdir2; 2364 2365 /* 2366 * Whiteouts have no dependencies. 2367 */ 2368 if (newinum == WINO) { 2369 if (newdirbp != NULL) 2370 bdwrite(newdirbp); 2371 return; 2372 } 2373 2374 fs = dp->i_fs; 2375 lbn = lblkno(fs, diroffset); 2376 offset = blkoff(fs, diroffset); 2377 MALLOC(dap, struct diradd *, sizeof(struct diradd), M_DIRADD, 2378 M_SOFTDEP_FLAGS); 2379 bzero(dap, sizeof(struct diradd)); 2380 dap->da_list.wk_type = D_DIRADD; 2381 dap->da_offset = offset; 2382 dap->da_newinum = newinum; 2383 dap->da_state = ATTACHED; 2384 if (newdirbp == NULL) { 2385 dap->da_state |= DEPCOMPLETE; 2386 ACQUIRE_LOCK(&lk); 2387 } else { 2388 dap->da_state |= MKDIR_BODY | MKDIR_PARENT; 2389 MALLOC(mkdir1, struct mkdir *, sizeof(struct mkdir), M_MKDIR, 2390 M_SOFTDEP_FLAGS); 2391 mkdir1->md_list.wk_type = D_MKDIR; 2392 mkdir1->md_state = MKDIR_BODY; 2393 mkdir1->md_diradd = dap; 2394 MALLOC(mkdir2, struct mkdir *, sizeof(struct mkdir), M_MKDIR, 2395 M_SOFTDEP_FLAGS); 2396 mkdir2->md_list.wk_type = D_MKDIR; 2397 mkdir2->md_state = MKDIR_PARENT; 2398 mkdir2->md_diradd = dap; 2399 /* 2400 * Dependency on "." and ".." being written to disk. 2401 */ 2402 mkdir1->md_buf = newdirbp; 2403 ACQUIRE_LOCK(&lk); 2404 LIST_INSERT_HEAD(&mkdirlisthd, mkdir1, md_mkdirs); 2405 WORKLIST_INSERT(&newdirbp->b_dep, &mkdir1->md_list); 2406 FREE_LOCK(&lk); 2407 bdwrite(newdirbp); 2408 /* 2409 * Dependency on link count increase for parent directory 2410 */ 2411 ACQUIRE_LOCK(&lk); 2412 if (inodedep_lookup(dp->i_fs, dp->i_number, 0, &inodedep) == 0 2413 || (inodedep->id_state & ALLCOMPLETE) == ALLCOMPLETE) { 2414 dap->da_state &= ~MKDIR_PARENT; 2415 WORKITEM_FREE(mkdir2, D_MKDIR); 2416 } else { 2417 LIST_INSERT_HEAD(&mkdirlisthd, mkdir2, md_mkdirs); 2418 WORKLIST_INSERT(&inodedep->id_bufwait,&mkdir2->md_list); 2419 } 2420 } 2421 /* 2422 * Link into parent directory pagedep to await its being written. 2423 */ 2424 if (pagedep_lookup(dp, lbn, DEPALLOC, &pagedep) == 0) 2425 WORKLIST_INSERT(&bp->b_dep, &pagedep->pd_list); 2426 dap->da_pagedep = pagedep; 2427 LIST_INSERT_HEAD(&pagedep->pd_diraddhd[DIRADDHASH(offset)], dap, 2428 da_pdlist); 2429 /* 2430 * Link into its inodedep. Put it on the id_bufwait list if the inode 2431 * is not yet written. If it is written, do the post-inode write 2432 * processing to put it on the id_pendinghd list. 2433 */ 2434 (void) inodedep_lookup(fs, newinum, DEPALLOC, &inodedep); 2435 if ((inodedep->id_state & ALLCOMPLETE) == ALLCOMPLETE) 2436 diradd_inode_written(dap, inodedep); 2437 else 2438 WORKLIST_INSERT(&inodedep->id_bufwait, &dap->da_list); 2439 FREE_LOCK(&lk); 2440 } 2441 2442 /* 2443 * This procedure is called to change the offset of a directory 2444 * entry when compacting a directory block which must be owned 2445 * exclusively by the caller. Note that the actual entry movement 2446 * must be done in this procedure to ensure that no I/O completions 2447 * occur while the move is in progress. 2448 */ 2449 void 2450 softdep_change_directoryentry_offset(dp, base, oldloc, newloc, entrysize) 2451 struct inode *dp; /* inode for directory */ 2452 caddr_t base; /* address of dp->i_offset */ 2453 caddr_t oldloc; /* address of old directory location */ 2454 caddr_t newloc; /* address of new directory location */ 2455 int entrysize; /* size of directory entry */ 2456 { 2457 int offset, oldoffset, newoffset; 2458 struct pagedep *pagedep; 2459 struct diradd *dap; 2460 ufs_lbn_t lbn; 2461 2462 ACQUIRE_LOCK(&lk); 2463 lbn = lblkno(dp->i_fs, dp->i_offset); 2464 offset = blkoff(dp->i_fs, dp->i_offset); 2465 if (pagedep_lookup(dp, lbn, 0, &pagedep) == 0) 2466 goto done; 2467 oldoffset = offset + (oldloc - base); 2468 newoffset = offset + (newloc - base); 2469 2470 LIST_FOREACH(dap, &pagedep->pd_diraddhd[DIRADDHASH(oldoffset)], da_pdlist) { 2471 if (dap->da_offset != oldoffset) 2472 continue; 2473 dap->da_offset = newoffset; 2474 if (DIRADDHASH(newoffset) == DIRADDHASH(oldoffset)) 2475 break; 2476 LIST_REMOVE(dap, da_pdlist); 2477 LIST_INSERT_HEAD(&pagedep->pd_diraddhd[DIRADDHASH(newoffset)], 2478 dap, da_pdlist); 2479 break; 2480 } 2481 if (dap == NULL) { 2482 2483 LIST_FOREACH(dap, &pagedep->pd_pendinghd, da_pdlist) { 2484 if (dap->da_offset == oldoffset) { 2485 dap->da_offset = newoffset; 2486 break; 2487 } 2488 } 2489 } 2490 done: 2491 bcopy(oldloc, newloc, entrysize); 2492 FREE_LOCK(&lk); 2493 } 2494 2495 /* 2496 * Free a diradd dependency structure. This routine must be called 2497 * with splbio interrupts blocked. 2498 */ 2499 static void 2500 free_diradd(dap) 2501 struct diradd *dap; 2502 { 2503 struct dirrem *dirrem; 2504 struct pagedep *pagedep; 2505 struct inodedep *inodedep; 2506 struct mkdir *mkdir, *nextmd; 2507 2508 #ifdef DEBUG 2509 if (lk.lkt_held == NOHOLDER) 2510 panic("free_diradd: lock not held"); 2511 #endif 2512 WORKLIST_REMOVE(&dap->da_list); 2513 LIST_REMOVE(dap, da_pdlist); 2514 if ((dap->da_state & DIRCHG) == 0) { 2515 pagedep = dap->da_pagedep; 2516 } else { 2517 dirrem = dap->da_previous; 2518 pagedep = dirrem->dm_pagedep; 2519 dirrem->dm_dirinum = pagedep->pd_ino; 2520 add_to_worklist(&dirrem->dm_list); 2521 } 2522 if (inodedep_lookup(VFSTOUFS(pagedep->pd_mnt)->um_fs, dap->da_newinum, 2523 0, &inodedep) != 0) 2524 (void) free_inodedep(inodedep); 2525 if ((dap->da_state & (MKDIR_PARENT | MKDIR_BODY)) != 0) { 2526 for (mkdir = LIST_FIRST(&mkdirlisthd); mkdir; mkdir = nextmd) { 2527 nextmd = LIST_NEXT(mkdir, md_mkdirs); 2528 if (mkdir->md_diradd != dap) 2529 continue; 2530 dap->da_state &= ~mkdir->md_state; 2531 WORKLIST_REMOVE(&mkdir->md_list); 2532 LIST_REMOVE(mkdir, md_mkdirs); 2533 WORKITEM_FREE(mkdir, D_MKDIR); 2534 } 2535 if ((dap->da_state & (MKDIR_PARENT | MKDIR_BODY)) != 0) { 2536 FREE_LOCK(&lk); 2537 panic("free_diradd: unfound ref"); 2538 } 2539 } 2540 WORKITEM_FREE(dap, D_DIRADD); 2541 } 2542 2543 /* 2544 * Directory entry removal dependencies. 2545 * 2546 * When removing a directory entry, the entry's inode pointer must be 2547 * zero'ed on disk before the corresponding inode's link count is decremented 2548 * (possibly freeing the inode for re-use). This dependency is handled by 2549 * updating the directory entry but delaying the inode count reduction until 2550 * after the directory block has been written to disk. After this point, the 2551 * inode count can be decremented whenever it is convenient. 2552 */ 2553 2554 /* 2555 * This routine should be called immediately after removing 2556 * a directory entry. The inode's link count should not be 2557 * decremented by the calling procedure -- the soft updates 2558 * code will do this task when it is safe. 2559 */ 2560 void 2561 softdep_setup_remove(bp, dp, ip, isrmdir) 2562 struct buf *bp; /* buffer containing directory block */ 2563 struct inode *dp; /* inode for the directory being modified */ 2564 struct inode *ip; /* inode for directory entry being removed */ 2565 int isrmdir; /* indicates if doing RMDIR */ 2566 { 2567 struct dirrem *dirrem, *prevdirrem; 2568 2569 /* 2570 * Allocate a new dirrem if appropriate and ACQUIRE_LOCK. 2571 */ 2572 dirrem = newdirrem(bp, dp, ip, isrmdir, &prevdirrem); 2573 2574 /* 2575 * If the COMPLETE flag is clear, then there were no active 2576 * entries and we want to roll back to a zeroed entry until 2577 * the new inode is committed to disk. If the COMPLETE flag is 2578 * set then we have deleted an entry that never made it to 2579 * disk. If the entry we deleted resulted from a name change, 2580 * then the old name still resides on disk. We cannot delete 2581 * its inode (returned to us in prevdirrem) until the zeroed 2582 * directory entry gets to disk. The new inode has never been 2583 * referenced on the disk, so can be deleted immediately. 2584 */ 2585 if ((dirrem->dm_state & COMPLETE) == 0) { 2586 LIST_INSERT_HEAD(&dirrem->dm_pagedep->pd_dirremhd, dirrem, 2587 dm_next); 2588 FREE_LOCK(&lk); 2589 } else { 2590 if (prevdirrem != NULL) 2591 LIST_INSERT_HEAD(&dirrem->dm_pagedep->pd_dirremhd, 2592 prevdirrem, dm_next); 2593 dirrem->dm_dirinum = dirrem->dm_pagedep->pd_ino; 2594 FREE_LOCK(&lk); 2595 handle_workitem_remove(dirrem); 2596 } 2597 } 2598 2599 /* 2600 * Allocate a new dirrem if appropriate and return it along with 2601 * its associated pagedep. Called without a lock, returns with lock. 2602 */ 2603 static long num_dirrem; /* number of dirrem allocated */ 2604 static struct dirrem * 2605 newdirrem(bp, dp, ip, isrmdir, prevdirremp) 2606 struct buf *bp; /* buffer containing directory block */ 2607 struct inode *dp; /* inode for the directory being modified */ 2608 struct inode *ip; /* inode for directory entry being removed */ 2609 int isrmdir; /* indicates if doing RMDIR */ 2610 struct dirrem **prevdirremp; /* previously referenced inode, if any */ 2611 { 2612 int offset; 2613 ufs_lbn_t lbn; 2614 struct diradd *dap; 2615 struct dirrem *dirrem; 2616 struct pagedep *pagedep; 2617 2618 /* 2619 * Whiteouts have no deletion dependencies. 2620 */ 2621 if (ip == NULL) 2622 panic("newdirrem: whiteout"); 2623 /* 2624 * If we are over our limit, try to improve the situation. 2625 * Limiting the number of dirrem structures will also limit 2626 * the number of freefile and freeblks structures. 2627 */ 2628 if (num_dirrem > max_softdeps / 2 && speedup_syncer() == 0) 2629 (void) request_cleanup(FLUSH_REMOVE, 0); 2630 num_dirrem += 1; 2631 MALLOC(dirrem, struct dirrem *, sizeof(struct dirrem), 2632 M_DIRREM, M_SOFTDEP_FLAGS); 2633 bzero(dirrem, sizeof(struct dirrem)); 2634 dirrem->dm_list.wk_type = D_DIRREM; 2635 dirrem->dm_state = isrmdir ? RMDIR : 0; 2636 dirrem->dm_mnt = ITOV(ip)->v_mount; 2637 dirrem->dm_oldinum = ip->i_number; 2638 *prevdirremp = NULL; 2639 2640 ACQUIRE_LOCK(&lk); 2641 lbn = lblkno(dp->i_fs, dp->i_offset); 2642 offset = blkoff(dp->i_fs, dp->i_offset); 2643 if (pagedep_lookup(dp, lbn, DEPALLOC, &pagedep) == 0) 2644 WORKLIST_INSERT(&bp->b_dep, &pagedep->pd_list); 2645 dirrem->dm_pagedep = pagedep; 2646 /* 2647 * Check for a diradd dependency for the same directory entry. 2648 * If present, then both dependencies become obsolete and can 2649 * be de-allocated. Check for an entry on both the pd_dirraddhd 2650 * list and the pd_pendinghd list. 2651 */ 2652 2653 LIST_FOREACH(dap, &pagedep->pd_diraddhd[DIRADDHASH(offset)], da_pdlist) 2654 if (dap->da_offset == offset) 2655 break; 2656 if (dap == NULL) { 2657 2658 LIST_FOREACH(dap, &pagedep->pd_pendinghd, da_pdlist) 2659 if (dap->da_offset == offset) 2660 break; 2661 if (dap == NULL) 2662 return (dirrem); 2663 } 2664 /* 2665 * Must be ATTACHED at this point. 2666 */ 2667 if ((dap->da_state & ATTACHED) == 0) { 2668 FREE_LOCK(&lk); 2669 panic("newdirrem: not ATTACHED"); 2670 } 2671 if (dap->da_newinum != ip->i_number) { 2672 FREE_LOCK(&lk); 2673 panic("newdirrem: inum %"PRId64" should be %"PRId64, 2674 ip->i_number, dap->da_newinum); 2675 } 2676 /* 2677 * If we are deleting a changed name that never made it to disk, 2678 * then return the dirrem describing the previous inode (which 2679 * represents the inode currently referenced from this entry on disk). 2680 */ 2681 if ((dap->da_state & DIRCHG) != 0) { 2682 *prevdirremp = dap->da_previous; 2683 dap->da_state &= ~DIRCHG; 2684 dap->da_pagedep = pagedep; 2685 } 2686 /* 2687 * We are deleting an entry that never made it to disk. 2688 * Mark it COMPLETE so we can delete its inode immediately. 2689 */ 2690 dirrem->dm_state |= COMPLETE; 2691 free_diradd(dap); 2692 return (dirrem); 2693 } 2694 2695 /* 2696 * Directory entry change dependencies. 2697 * 2698 * Changing an existing directory entry requires that an add operation 2699 * be completed first followed by a deletion. The semantics for the addition 2700 * are identical to the description of adding a new entry above except 2701 * that the rollback is to the old inode number rather than zero. Once 2702 * the addition dependency is completed, the removal is done as described 2703 * in the removal routine above. 2704 */ 2705 2706 /* 2707 * This routine should be called immediately after changing 2708 * a directory entry. The inode's link count should not be 2709 * decremented by the calling procedure -- the soft updates 2710 * code will perform this task when it is safe. 2711 */ 2712 void 2713 softdep_setup_directory_change(bp, dp, ip, newinum, isrmdir) 2714 struct buf *bp; /* buffer containing directory block */ 2715 struct inode *dp; /* inode for the directory being modified */ 2716 struct inode *ip; /* inode for directory entry being removed */ 2717 ino_t newinum; /* new inode number for changed entry */ 2718 int isrmdir; /* indicates if doing RMDIR */ 2719 { 2720 int offset; 2721 struct diradd *dap = NULL; 2722 struct dirrem *dirrem, *prevdirrem; 2723 struct pagedep *pagedep; 2724 struct inodedep *inodedep; 2725 2726 offset = blkoff(dp->i_fs, dp->i_offset); 2727 2728 /* 2729 * Whiteouts do not need diradd dependencies. 2730 */ 2731 if (newinum != WINO) { 2732 MALLOC(dap, struct diradd *, sizeof(struct diradd), 2733 M_DIRADD, M_SOFTDEP_FLAGS); 2734 bzero(dap, sizeof(struct diradd)); 2735 dap->da_list.wk_type = D_DIRADD; 2736 dap->da_state = DIRCHG | ATTACHED | DEPCOMPLETE; 2737 dap->da_offset = offset; 2738 dap->da_newinum = newinum; 2739 } 2740 2741 /* 2742 * Allocate a new dirrem and ACQUIRE_LOCK. 2743 */ 2744 dirrem = newdirrem(bp, dp, ip, isrmdir, &prevdirrem); 2745 pagedep = dirrem->dm_pagedep; 2746 /* 2747 * The possible values for isrmdir: 2748 * 0 - non-directory file rename 2749 * 1 - directory rename within same directory 2750 * inum - directory rename to new directory of given inode number 2751 * When renaming to a new directory, we are both deleting and 2752 * creating a new directory entry, so the link count on the new 2753 * directory should not change. Thus we do not need the followup 2754 * dirrem which is usually done in handle_workitem_remove. We set 2755 * the DIRCHG flag to tell handle_workitem_remove to skip the 2756 * followup dirrem. 2757 */ 2758 if (isrmdir > 1) 2759 dirrem->dm_state |= DIRCHG; 2760 2761 /* 2762 * Whiteouts have no additional dependencies, 2763 * so just put the dirrem on the correct list. 2764 */ 2765 if (newinum == WINO) { 2766 if ((dirrem->dm_state & COMPLETE) == 0) { 2767 LIST_INSERT_HEAD(&pagedep->pd_dirremhd, dirrem, 2768 dm_next); 2769 } else { 2770 dirrem->dm_dirinum = pagedep->pd_ino; 2771 add_to_worklist(&dirrem->dm_list); 2772 } 2773 FREE_LOCK(&lk); 2774 return; 2775 } 2776 2777 /* 2778 * If the COMPLETE flag is clear, then there were no active 2779 * entries and we want to roll back to the previous inode until 2780 * the new inode is committed to disk. If the COMPLETE flag is 2781 * set, then we have deleted an entry that never made it to disk. 2782 * If the entry we deleted resulted from a name change, then the old 2783 * inode reference still resides on disk. Any rollback that we do 2784 * needs to be to that old inode (returned to us in prevdirrem). If 2785 * the entry we deleted resulted from a create, then there is 2786 * no entry on the disk, so we want to roll back to zero rather 2787 * than the uncommitted inode. In either of the COMPLETE cases we 2788 * want to immediately free the unwritten and unreferenced inode. 2789 */ 2790 if ((dirrem->dm_state & COMPLETE) == 0) { 2791 dap->da_previous = dirrem; 2792 } else { 2793 if (prevdirrem != NULL) { 2794 dap->da_previous = prevdirrem; 2795 } else { 2796 dap->da_state &= ~DIRCHG; 2797 dap->da_pagedep = pagedep; 2798 } 2799 dirrem->dm_dirinum = pagedep->pd_ino; 2800 add_to_worklist(&dirrem->dm_list); 2801 } 2802 /* 2803 * Link into its inodedep. Put it on the id_bufwait list if the inode 2804 * is not yet written. If it is written, do the post-inode write 2805 * processing to put it on the id_pendinghd list. 2806 */ 2807 if (inodedep_lookup(dp->i_fs, newinum, DEPALLOC, &inodedep) == 0 || 2808 (inodedep->id_state & ALLCOMPLETE) == ALLCOMPLETE) { 2809 dap->da_state |= COMPLETE; 2810 LIST_INSERT_HEAD(&pagedep->pd_pendinghd, dap, da_pdlist); 2811 WORKLIST_INSERT(&inodedep->id_pendinghd, &dap->da_list); 2812 } else { 2813 LIST_INSERT_HEAD(&pagedep->pd_diraddhd[DIRADDHASH(offset)], 2814 dap, da_pdlist); 2815 WORKLIST_INSERT(&inodedep->id_bufwait, &dap->da_list); 2816 } 2817 FREE_LOCK(&lk); 2818 } 2819 2820 /* 2821 * Called whenever the link count on an inode is changed. 2822 * It creates an inode dependency so that the new reference(s) 2823 * to the inode cannot be committed to disk until the updated 2824 * inode has been written. 2825 */ 2826 void 2827 softdep_change_linkcnt(ip) 2828 struct inode *ip; /* the inode with the increased link count */ 2829 { 2830 struct inodedep *inodedep; 2831 2832 ACQUIRE_LOCK(&lk); 2833 (void) inodedep_lookup(ip->i_fs, ip->i_number, DEPALLOC, &inodedep); 2834 if (ip->i_nlink < ip->i_effnlink) { 2835 FREE_LOCK(&lk); 2836 panic("softdep_change_linkcnt: bad delta"); 2837 } 2838 inodedep->id_nlinkdelta = ip->i_nlink - ip->i_effnlink; 2839 FREE_LOCK(&lk); 2840 } 2841 2842 /* 2843 * This workitem decrements the inode's link count. 2844 * If the link count reaches zero, the file is removed. 2845 */ 2846 static void 2847 handle_workitem_remove(dirrem) 2848 struct dirrem *dirrem; 2849 { 2850 struct thread *td = curthread; /* XXX */ 2851 struct inodedep *inodedep; 2852 struct vnode *vp; 2853 struct inode *ip; 2854 ino_t oldinum; 2855 int error; 2856 2857 if ((error = VFS_VGET(dirrem->dm_mnt, dirrem->dm_oldinum, &vp)) != 0) { 2858 softdep_error("handle_workitem_remove: vget", error); 2859 return; 2860 } 2861 ip = VTOI(vp); 2862 ACQUIRE_LOCK(&lk); 2863 if ((inodedep_lookup(ip->i_fs, dirrem->dm_oldinum, 0, &inodedep)) == 0){ 2864 FREE_LOCK(&lk); 2865 panic("handle_workitem_remove: lost inodedep"); 2866 } 2867 /* 2868 * Normal file deletion. 2869 */ 2870 if ((dirrem->dm_state & RMDIR) == 0) { 2871 ip->i_nlink--; 2872 ip->i_flag |= IN_CHANGE; 2873 if (ip->i_nlink < ip->i_effnlink) { 2874 FREE_LOCK(&lk); 2875 panic("handle_workitem_remove: bad file delta"); 2876 } 2877 inodedep->id_nlinkdelta = ip->i_nlink - ip->i_effnlink; 2878 FREE_LOCK(&lk); 2879 vput(vp); 2880 num_dirrem -= 1; 2881 WORKITEM_FREE(dirrem, D_DIRREM); 2882 return; 2883 } 2884 /* 2885 * Directory deletion. Decrement reference count for both the 2886 * just deleted parent directory entry and the reference for ".". 2887 * Next truncate the directory to length zero. When the 2888 * truncation completes, arrange to have the reference count on 2889 * the parent decremented to account for the loss of "..". 2890 */ 2891 ip->i_nlink -= 2; 2892 ip->i_flag |= IN_CHANGE; 2893 if (ip->i_nlink < ip->i_effnlink) { 2894 FREE_LOCK(&lk); 2895 panic("handle_workitem_remove: bad dir delta"); 2896 } 2897 inodedep->id_nlinkdelta = ip->i_nlink - ip->i_effnlink; 2898 FREE_LOCK(&lk); 2899 if ((error = UFS_TRUNCATE(vp, (off_t)0, 0, proc0.p_ucred, td)) != 0) 2900 softdep_error("handle_workitem_remove: truncate", error); 2901 /* 2902 * Rename a directory to a new parent. Since, we are both deleting 2903 * and creating a new directory entry, the link count on the new 2904 * directory should not change. Thus we skip the followup dirrem. 2905 */ 2906 if (dirrem->dm_state & DIRCHG) { 2907 vput(vp); 2908 num_dirrem -= 1; 2909 WORKITEM_FREE(dirrem, D_DIRREM); 2910 return; 2911 } 2912 /* 2913 * If the inodedep does not exist, then the zero'ed inode has 2914 * been written to disk. If the allocated inode has never been 2915 * written to disk, then the on-disk inode is zero'ed. In either 2916 * case we can remove the file immediately. 2917 */ 2918 ACQUIRE_LOCK(&lk); 2919 dirrem->dm_state = 0; 2920 oldinum = dirrem->dm_oldinum; 2921 dirrem->dm_oldinum = dirrem->dm_dirinum; 2922 if (inodedep_lookup(ip->i_fs, oldinum, 0, &inodedep) == 0 || 2923 check_inode_unwritten(inodedep)) { 2924 FREE_LOCK(&lk); 2925 vput(vp); 2926 handle_workitem_remove(dirrem); 2927 return; 2928 } 2929 WORKLIST_INSERT(&inodedep->id_inowait, &dirrem->dm_list); 2930 FREE_LOCK(&lk); 2931 ip->i_flag |= IN_CHANGE; 2932 ffs_update(vp, 0); 2933 vput(vp); 2934 } 2935 2936 /* 2937 * Inode de-allocation dependencies. 2938 * 2939 * When an inode's link count is reduced to zero, it can be de-allocated. We 2940 * found it convenient to postpone de-allocation until after the inode is 2941 * written to disk with its new link count (zero). At this point, all of the 2942 * on-disk inode's block pointers are nullified and, with careful dependency 2943 * list ordering, all dependencies related to the inode will be satisfied and 2944 * the corresponding dependency structures de-allocated. So, if/when the 2945 * inode is reused, there will be no mixing of old dependencies with new 2946 * ones. This artificial dependency is set up by the block de-allocation 2947 * procedure above (softdep_setup_freeblocks) and completed by the 2948 * following procedure. 2949 */ 2950 static void 2951 handle_workitem_freefile(freefile) 2952 struct freefile *freefile; 2953 { 2954 struct vnode vp; 2955 struct inode tip; 2956 struct inodedep *idp; 2957 int error; 2958 2959 #ifdef DEBUG 2960 ACQUIRE_LOCK(&lk); 2961 error = inodedep_lookup(freefile->fx_fs, freefile->fx_oldinum, 0, &idp); 2962 FREE_LOCK(&lk); 2963 if (error) 2964 panic("handle_workitem_freefile: inodedep survived"); 2965 #endif 2966 tip.i_devvp = freefile->fx_devvp; 2967 tip.i_dev = freefile->fx_devvp->v_rdev; 2968 tip.i_fs = freefile->fx_fs; 2969 vp.v_data = &tip; 2970 if ((error = ffs_freefile(&vp, freefile->fx_oldinum, freefile->fx_mode)) != 0) 2971 softdep_error("handle_workitem_freefile", error); 2972 WORKITEM_FREE(freefile, D_FREEFILE); 2973 } 2974 2975 /* 2976 * Helper function which unlinks marker element from work list and returns 2977 * the next element on the list. 2978 */ 2979 static __inline struct worklist * 2980 markernext(struct worklist *marker) 2981 { 2982 struct worklist *next; 2983 2984 next = LIST_NEXT(marker, wk_list); 2985 LIST_REMOVE(marker, wk_list); 2986 return next; 2987 } 2988 2989 /* 2990 * Disk writes. 2991 * 2992 * The dependency structures constructed above are most actively used when file 2993 * system blocks are written to disk. No constraints are placed on when a 2994 * block can be written, but unsatisfied update dependencies are made safe by 2995 * modifying (or replacing) the source memory for the duration of the disk 2996 * write. When the disk write completes, the memory block is again brought 2997 * up-to-date. 2998 * 2999 * In-core inode structure reclamation. 3000 * 3001 * Because there are a finite number of "in-core" inode structures, they are 3002 * reused regularly. By transferring all inode-related dependencies to the 3003 * in-memory inode block and indexing them separately (via "inodedep"s), we 3004 * can allow "in-core" inode structures to be reused at any time and avoid 3005 * any increase in contention. 3006 * 3007 * Called just before entering the device driver to initiate a new disk I/O. 3008 * The buffer must be locked, thus, no I/O completion operations can occur 3009 * while we are manipulating its associated dependencies. 3010 */ 3011 static void 3012 softdep_disk_io_initiation(bp) 3013 struct buf *bp; /* structure describing disk write to occur */ 3014 { 3015 struct worklist *wk; 3016 struct worklist marker; 3017 struct indirdep *indirdep; 3018 3019 /* 3020 * We only care about write operations. There should never 3021 * be dependencies for reads. 3022 */ 3023 if (bp->b_flags & B_READ) 3024 panic("softdep_disk_io_initiation: read"); 3025 3026 marker.wk_type = D_LAST + 1; /* Not a normal workitem */ 3027 3028 /* 3029 * Do any necessary pre-I/O processing. 3030 */ 3031 for (wk = LIST_FIRST(&bp->b_dep); wk; wk = markernext(&marker)) { 3032 LIST_INSERT_AFTER(wk, &marker, wk_list); 3033 3034 switch (wk->wk_type) { 3035 3036 case D_PAGEDEP: 3037 initiate_write_filepage(WK_PAGEDEP(wk), bp); 3038 continue; 3039 3040 case D_INODEDEP: 3041 initiate_write_inodeblock(WK_INODEDEP(wk), bp); 3042 continue; 3043 3044 case D_INDIRDEP: 3045 indirdep = WK_INDIRDEP(wk); 3046 if (indirdep->ir_state & GOINGAWAY) 3047 panic("disk_io_initiation: indirdep gone"); 3048 /* 3049 * If there are no remaining dependencies, this 3050 * will be writing the real pointers, so the 3051 * dependency can be freed. 3052 */ 3053 if (LIST_FIRST(&indirdep->ir_deplisthd) == NULL) { 3054 indirdep->ir_savebp->b_flags |= B_INVAL | B_NOCACHE; 3055 brelse(indirdep->ir_savebp); 3056 /* inline expand WORKLIST_REMOVE(wk); */ 3057 wk->wk_state &= ~ONWORKLIST; 3058 LIST_REMOVE(wk, wk_list); 3059 WORKITEM_FREE(indirdep, D_INDIRDEP); 3060 continue; 3061 } 3062 /* 3063 * Replace up-to-date version with safe version. 3064 */ 3065 MALLOC(indirdep->ir_saveddata, caddr_t, bp->b_bcount, 3066 M_INDIRDEP, M_SOFTDEP_FLAGS); 3067 ACQUIRE_LOCK(&lk); 3068 indirdep->ir_state &= ~ATTACHED; 3069 indirdep->ir_state |= UNDONE; 3070 bcopy(bp->b_data, indirdep->ir_saveddata, bp->b_bcount); 3071 bcopy(indirdep->ir_savebp->b_data, bp->b_data, 3072 bp->b_bcount); 3073 FREE_LOCK(&lk); 3074 continue; 3075 3076 case D_MKDIR: 3077 case D_BMSAFEMAP: 3078 case D_ALLOCDIRECT: 3079 case D_ALLOCINDIR: 3080 continue; 3081 3082 default: 3083 panic("handle_disk_io_initiation: Unexpected type %s", 3084 TYPENAME(wk->wk_type)); 3085 /* NOTREACHED */ 3086 } 3087 } 3088 } 3089 3090 /* 3091 * Called from within the procedure above to deal with unsatisfied 3092 * allocation dependencies in a directory. The buffer must be locked, 3093 * thus, no I/O completion operations can occur while we are 3094 * manipulating its associated dependencies. 3095 */ 3096 static void 3097 initiate_write_filepage(pagedep, bp) 3098 struct pagedep *pagedep; 3099 struct buf *bp; 3100 { 3101 struct diradd *dap; 3102 struct direct *ep; 3103 int i; 3104 3105 if (pagedep->pd_state & IOSTARTED) { 3106 /* 3107 * This can only happen if there is a driver that does not 3108 * understand chaining. Here biodone will reissue the call 3109 * to strategy for the incomplete buffers. 3110 */ 3111 printf("initiate_write_filepage: already started\n"); 3112 return; 3113 } 3114 pagedep->pd_state |= IOSTARTED; 3115 ACQUIRE_LOCK(&lk); 3116 for (i = 0; i < DAHASHSZ; i++) { 3117 LIST_FOREACH(dap, &pagedep->pd_diraddhd[i], da_pdlist) { 3118 ep = (struct direct *) 3119 ((char *)bp->b_data + dap->da_offset); 3120 if (ep->d_ino != dap->da_newinum) { 3121 FREE_LOCK(&lk); 3122 panic("%s: dir inum %d != new %"PRId64, 3123 "initiate_write_filepage", 3124 ep->d_ino, dap->da_newinum); 3125 } 3126 if (dap->da_state & DIRCHG) 3127 ep->d_ino = dap->da_previous->dm_oldinum; 3128 else 3129 ep->d_ino = 0; 3130 dap->da_state &= ~ATTACHED; 3131 dap->da_state |= UNDONE; 3132 } 3133 } 3134 FREE_LOCK(&lk); 3135 } 3136 3137 /* 3138 * Called from within the procedure above to deal with unsatisfied 3139 * allocation dependencies in an inodeblock. The buffer must be 3140 * locked, thus, no I/O completion operations can occur while we 3141 * are manipulating its associated dependencies. 3142 */ 3143 static void 3144 initiate_write_inodeblock(inodedep, bp) 3145 struct inodedep *inodedep; 3146 struct buf *bp; /* The inode block */ 3147 { 3148 struct allocdirect *adp, *lastadp; 3149 struct dinode *dp; 3150 struct dinode *sip; 3151 struct fs *fs; 3152 ufs_lbn_t prevlbn = 0; 3153 int i, deplist; 3154 3155 if (inodedep->id_state & IOSTARTED) 3156 panic("initiate_write_inodeblock: already started"); 3157 inodedep->id_state |= IOSTARTED; 3158 fs = inodedep->id_fs; 3159 dp = (struct dinode *)bp->b_data + 3160 ino_to_fsbo(fs, inodedep->id_ino); 3161 /* 3162 * If the bitmap is not yet written, then the allocated 3163 * inode cannot be written to disk. 3164 */ 3165 if ((inodedep->id_state & DEPCOMPLETE) == 0) { 3166 if (inodedep->id_savedino != NULL) 3167 panic("initiate_write_inodeblock: already doing I/O"); 3168 MALLOC(sip, struct dinode *, 3169 sizeof(struct dinode), M_INODEDEP, M_SOFTDEP_FLAGS); 3170 inodedep->id_savedino = sip; 3171 *inodedep->id_savedino = *dp; 3172 bzero((caddr_t)dp, sizeof(struct dinode)); 3173 dp->di_gen = inodedep->id_savedino->di_gen; 3174 return; 3175 } 3176 /* 3177 * If no dependencies, then there is nothing to roll back. 3178 */ 3179 inodedep->id_savedsize = dp->di_size; 3180 if (TAILQ_FIRST(&inodedep->id_inoupdt) == NULL) 3181 return; 3182 /* 3183 * Set the dependencies to busy. 3184 */ 3185 ACQUIRE_LOCK(&lk); 3186 for (deplist = 0, adp = TAILQ_FIRST(&inodedep->id_inoupdt); adp; 3187 adp = TAILQ_NEXT(adp, ad_next)) { 3188 #ifdef DIAGNOSTIC 3189 if (deplist != 0 && prevlbn >= adp->ad_lbn) { 3190 FREE_LOCK(&lk); 3191 panic("softdep_write_inodeblock: lbn order"); 3192 } 3193 prevlbn = adp->ad_lbn; 3194 if (adp->ad_lbn < NDADDR && 3195 dp->di_db[adp->ad_lbn] != adp->ad_newblkno) { 3196 FREE_LOCK(&lk); 3197 panic("%s: direct pointer #%ld mismatch %d != %d", 3198 "softdep_write_inodeblock", adp->ad_lbn, 3199 dp->di_db[adp->ad_lbn], adp->ad_newblkno); 3200 } 3201 if (adp->ad_lbn >= NDADDR && 3202 dp->di_ib[adp->ad_lbn - NDADDR] != adp->ad_newblkno) { 3203 FREE_LOCK(&lk); 3204 panic("%s: indirect pointer #%ld mismatch %d != %d", 3205 "softdep_write_inodeblock", adp->ad_lbn - NDADDR, 3206 dp->di_ib[adp->ad_lbn - NDADDR], adp->ad_newblkno); 3207 } 3208 deplist |= 1 << adp->ad_lbn; 3209 if ((adp->ad_state & ATTACHED) == 0) { 3210 FREE_LOCK(&lk); 3211 panic("softdep_write_inodeblock: Unknown state 0x%x", 3212 adp->ad_state); 3213 } 3214 #endif /* DIAGNOSTIC */ 3215 adp->ad_state &= ~ATTACHED; 3216 adp->ad_state |= UNDONE; 3217 } 3218 /* 3219 * The on-disk inode cannot claim to be any larger than the last 3220 * fragment that has been written. Otherwise, the on-disk inode 3221 * might have fragments that were not the last block in the file 3222 * which would corrupt the filesystem. 3223 */ 3224 for (lastadp = NULL, adp = TAILQ_FIRST(&inodedep->id_inoupdt); adp; 3225 lastadp = adp, adp = TAILQ_NEXT(adp, ad_next)) { 3226 if (adp->ad_lbn >= NDADDR) 3227 break; 3228 dp->di_db[adp->ad_lbn] = adp->ad_oldblkno; 3229 /* keep going until hitting a rollback to a frag */ 3230 if (adp->ad_oldsize == 0 || adp->ad_oldsize == fs->fs_bsize) 3231 continue; 3232 dp->di_size = fs->fs_bsize * adp->ad_lbn + adp->ad_oldsize; 3233 for (i = adp->ad_lbn + 1; i < NDADDR; i++) { 3234 #ifdef DIAGNOSTIC 3235 if (dp->di_db[i] != 0 && (deplist & (1 << i)) == 0) { 3236 FREE_LOCK(&lk); 3237 panic("softdep_write_inodeblock: lost dep1"); 3238 } 3239 #endif /* DIAGNOSTIC */ 3240 dp->di_db[i] = 0; 3241 } 3242 for (i = 0; i < NIADDR; i++) { 3243 #ifdef DIAGNOSTIC 3244 if (dp->di_ib[i] != 0 && 3245 (deplist & ((1 << NDADDR) << i)) == 0) { 3246 FREE_LOCK(&lk); 3247 panic("softdep_write_inodeblock: lost dep2"); 3248 } 3249 #endif /* DIAGNOSTIC */ 3250 dp->di_ib[i] = 0; 3251 } 3252 FREE_LOCK(&lk); 3253 return; 3254 } 3255 /* 3256 * If we have zero'ed out the last allocated block of the file, 3257 * roll back the size to the last currently allocated block. 3258 * We know that this last allocated block is a full-sized as 3259 * we already checked for fragments in the loop above. 3260 */ 3261 if (lastadp != NULL && 3262 dp->di_size <= (lastadp->ad_lbn + 1) * fs->fs_bsize) { 3263 for (i = lastadp->ad_lbn; i >= 0; i--) 3264 if (dp->di_db[i] != 0) 3265 break; 3266 dp->di_size = (i + 1) * fs->fs_bsize; 3267 } 3268 /* 3269 * The only dependencies are for indirect blocks. 3270 * 3271 * The file size for indirect block additions is not guaranteed. 3272 * Such a guarantee would be non-trivial to achieve. The conventional 3273 * synchronous write implementation also does not make this guarantee. 3274 * Fsck should catch and fix discrepancies. Arguably, the file size 3275 * can be over-estimated without destroying integrity when the file 3276 * moves into the indirect blocks (i.e., is large). If we want to 3277 * postpone fsck, we are stuck with this argument. 3278 */ 3279 for (; adp; adp = TAILQ_NEXT(adp, ad_next)) 3280 dp->di_ib[adp->ad_lbn - NDADDR] = 0; 3281 FREE_LOCK(&lk); 3282 } 3283 3284 /* 3285 * This routine is called during the completion interrupt 3286 * service routine for a disk write (from the procedure called 3287 * by the device driver to inform the filesystem caches of 3288 * a request completion). It should be called early in this 3289 * procedure, before the block is made available to other 3290 * processes or other routines are called. 3291 */ 3292 static void 3293 softdep_disk_write_complete(bp) 3294 struct buf *bp; /* describes the completed disk write */ 3295 { 3296 struct worklist *wk; 3297 struct workhead reattach; 3298 struct newblk *newblk; 3299 struct allocindir *aip; 3300 struct allocdirect *adp; 3301 struct indirdep *indirdep; 3302 struct inodedep *inodedep; 3303 struct bmsafemap *bmsafemap; 3304 3305 #ifdef DEBUG 3306 if (lk.lkt_held != NOHOLDER) 3307 panic("softdep_disk_write_complete: lock is held"); 3308 lk.lkt_held = SPECIAL_FLAG; 3309 #endif 3310 LIST_INIT(&reattach); 3311 while ((wk = LIST_FIRST(&bp->b_dep)) != NULL) { 3312 WORKLIST_REMOVE(wk); 3313 switch (wk->wk_type) { 3314 3315 case D_PAGEDEP: 3316 if (handle_written_filepage(WK_PAGEDEP(wk), bp)) 3317 WORKLIST_INSERT(&reattach, wk); 3318 continue; 3319 3320 case D_INODEDEP: 3321 if (handle_written_inodeblock(WK_INODEDEP(wk), bp)) 3322 WORKLIST_INSERT(&reattach, wk); 3323 continue; 3324 3325 case D_BMSAFEMAP: 3326 bmsafemap = WK_BMSAFEMAP(wk); 3327 while ((newblk = LIST_FIRST(&bmsafemap->sm_newblkhd))) { 3328 newblk->nb_state |= DEPCOMPLETE; 3329 newblk->nb_bmsafemap = NULL; 3330 LIST_REMOVE(newblk, nb_deps); 3331 } 3332 while ((adp = 3333 LIST_FIRST(&bmsafemap->sm_allocdirecthd))) { 3334 adp->ad_state |= DEPCOMPLETE; 3335 adp->ad_buf = NULL; 3336 LIST_REMOVE(adp, ad_deps); 3337 handle_allocdirect_partdone(adp); 3338 } 3339 while ((aip = 3340 LIST_FIRST(&bmsafemap->sm_allocindirhd))) { 3341 aip->ai_state |= DEPCOMPLETE; 3342 aip->ai_buf = NULL; 3343 LIST_REMOVE(aip, ai_deps); 3344 handle_allocindir_partdone(aip); 3345 } 3346 while ((inodedep = 3347 LIST_FIRST(&bmsafemap->sm_inodedephd)) != NULL) { 3348 inodedep->id_state |= DEPCOMPLETE; 3349 LIST_REMOVE(inodedep, id_deps); 3350 inodedep->id_buf = NULL; 3351 } 3352 WORKITEM_FREE(bmsafemap, D_BMSAFEMAP); 3353 continue; 3354 3355 case D_MKDIR: 3356 handle_written_mkdir(WK_MKDIR(wk), MKDIR_BODY); 3357 continue; 3358 3359 case D_ALLOCDIRECT: 3360 adp = WK_ALLOCDIRECT(wk); 3361 adp->ad_state |= COMPLETE; 3362 handle_allocdirect_partdone(adp); 3363 continue; 3364 3365 case D_ALLOCINDIR: 3366 aip = WK_ALLOCINDIR(wk); 3367 aip->ai_state |= COMPLETE; 3368 handle_allocindir_partdone(aip); 3369 continue; 3370 3371 case D_INDIRDEP: 3372 indirdep = WK_INDIRDEP(wk); 3373 if (indirdep->ir_state & GOINGAWAY) { 3374 lk.lkt_held = NOHOLDER; 3375 panic("disk_write_complete: indirdep gone"); 3376 } 3377 bcopy(indirdep->ir_saveddata, bp->b_data, bp->b_bcount); 3378 FREE(indirdep->ir_saveddata, M_INDIRDEP); 3379 indirdep->ir_saveddata = 0; 3380 indirdep->ir_state &= ~UNDONE; 3381 indirdep->ir_state |= ATTACHED; 3382 while ((aip = LIST_FIRST(&indirdep->ir_donehd)) != 0) { 3383 handle_allocindir_partdone(aip); 3384 if (aip == LIST_FIRST(&indirdep->ir_donehd)) { 3385 lk.lkt_held = NOHOLDER; 3386 panic("disk_write_complete: not gone"); 3387 } 3388 } 3389 WORKLIST_INSERT(&reattach, wk); 3390 if ((bp->b_flags & B_DELWRI) == 0) 3391 stat_indir_blk_ptrs++; 3392 bdirty(bp); 3393 continue; 3394 3395 default: 3396 lk.lkt_held = NOHOLDER; 3397 panic("handle_disk_write_complete: Unknown type %s", 3398 TYPENAME(wk->wk_type)); 3399 /* NOTREACHED */ 3400 } 3401 } 3402 /* 3403 * Reattach any requests that must be redone. 3404 */ 3405 while ((wk = LIST_FIRST(&reattach)) != NULL) { 3406 WORKLIST_REMOVE(wk); 3407 WORKLIST_INSERT(&bp->b_dep, wk); 3408 } 3409 #ifdef DEBUG 3410 if (lk.lkt_held != SPECIAL_FLAG) 3411 panic("softdep_disk_write_complete: lock lost"); 3412 lk.lkt_held = NOHOLDER; 3413 #endif 3414 } 3415 3416 /* 3417 * Called from within softdep_disk_write_complete above. Note that 3418 * this routine is always called from interrupt level with further 3419 * splbio interrupts blocked. 3420 */ 3421 static void 3422 handle_allocdirect_partdone(adp) 3423 struct allocdirect *adp; /* the completed allocdirect */ 3424 { 3425 struct allocdirect *listadp; 3426 struct inodedep *inodedep; 3427 long bsize; 3428 3429 if ((adp->ad_state & ALLCOMPLETE) != ALLCOMPLETE) 3430 return; 3431 if (adp->ad_buf != NULL) { 3432 lk.lkt_held = NOHOLDER; 3433 panic("handle_allocdirect_partdone: dangling dep"); 3434 } 3435 /* 3436 * The on-disk inode cannot claim to be any larger than the last 3437 * fragment that has been written. Otherwise, the on-disk inode 3438 * might have fragments that were not the last block in the file 3439 * which would corrupt the filesystem. Thus, we cannot free any 3440 * allocdirects after one whose ad_oldblkno claims a fragment as 3441 * these blocks must be rolled back to zero before writing the inode. 3442 * We check the currently active set of allocdirects in id_inoupdt. 3443 */ 3444 inodedep = adp->ad_inodedep; 3445 bsize = inodedep->id_fs->fs_bsize; 3446 TAILQ_FOREACH(listadp, &inodedep->id_inoupdt, ad_next) { 3447 /* found our block */ 3448 if (listadp == adp) 3449 break; 3450 /* continue if ad_oldlbn is not a fragment */ 3451 if (listadp->ad_oldsize == 0 || 3452 listadp->ad_oldsize == bsize) 3453 continue; 3454 /* hit a fragment */ 3455 return; 3456 } 3457 /* 3458 * If we have reached the end of the current list without 3459 * finding the just finished dependency, then it must be 3460 * on the future dependency list. Future dependencies cannot 3461 * be freed until they are moved to the current list. 3462 */ 3463 if (listadp == NULL) { 3464 #ifdef DEBUG 3465 TAILQ_FOREACH(listadp, &inodedep->id_newinoupdt, ad_next) 3466 /* found our block */ 3467 if (listadp == adp) 3468 break; 3469 if (listadp == NULL) { 3470 lk.lkt_held = NOHOLDER; 3471 panic("handle_allocdirect_partdone: lost dep"); 3472 } 3473 #endif /* DEBUG */ 3474 return; 3475 } 3476 /* 3477 * If we have found the just finished dependency, then free 3478 * it along with anything that follows it that is complete. 3479 */ 3480 for (; adp; adp = listadp) { 3481 listadp = TAILQ_NEXT(adp, ad_next); 3482 if ((adp->ad_state & ALLCOMPLETE) != ALLCOMPLETE) 3483 return; 3484 free_allocdirect(&inodedep->id_inoupdt, adp, 1); 3485 } 3486 } 3487 3488 /* 3489 * Called from within softdep_disk_write_complete above. Note that 3490 * this routine is always called from interrupt level with further 3491 * splbio interrupts blocked. 3492 */ 3493 static void 3494 handle_allocindir_partdone(aip) 3495 struct allocindir *aip; /* the completed allocindir */ 3496 { 3497 struct indirdep *indirdep; 3498 3499 if ((aip->ai_state & ALLCOMPLETE) != ALLCOMPLETE) 3500 return; 3501 if (aip->ai_buf != NULL) { 3502 lk.lkt_held = NOHOLDER; 3503 panic("handle_allocindir_partdone: dangling dependency"); 3504 } 3505 indirdep = aip->ai_indirdep; 3506 if (indirdep->ir_state & UNDONE) { 3507 LIST_REMOVE(aip, ai_next); 3508 LIST_INSERT_HEAD(&indirdep->ir_donehd, aip, ai_next); 3509 return; 3510 } 3511 ((ufs_daddr_t *)indirdep->ir_savebp->b_data)[aip->ai_offset] = 3512 aip->ai_newblkno; 3513 LIST_REMOVE(aip, ai_next); 3514 if (aip->ai_freefrag != NULL) 3515 add_to_worklist(&aip->ai_freefrag->ff_list); 3516 WORKITEM_FREE(aip, D_ALLOCINDIR); 3517 } 3518 3519 /* 3520 * Called from within softdep_disk_write_complete above to restore 3521 * in-memory inode block contents to their most up-to-date state. Note 3522 * that this routine is always called from interrupt level with further 3523 * splbio interrupts blocked. 3524 */ 3525 static int 3526 handle_written_inodeblock(inodedep, bp) 3527 struct inodedep *inodedep; 3528 struct buf *bp; /* buffer containing the inode block */ 3529 { 3530 struct worklist *wk, *filefree; 3531 struct allocdirect *adp, *nextadp; 3532 struct dinode *dp; 3533 int hadchanges; 3534 3535 if ((inodedep->id_state & IOSTARTED) == 0) { 3536 lk.lkt_held = NOHOLDER; 3537 panic("handle_written_inodeblock: not started"); 3538 } 3539 inodedep->id_state &= ~IOSTARTED; 3540 inodedep->id_state |= COMPLETE; 3541 dp = (struct dinode *)bp->b_data + 3542 ino_to_fsbo(inodedep->id_fs, inodedep->id_ino); 3543 /* 3544 * If we had to rollback the inode allocation because of 3545 * bitmaps being incomplete, then simply restore it. 3546 * Keep the block dirty so that it will not be reclaimed until 3547 * all associated dependencies have been cleared and the 3548 * corresponding updates written to disk. 3549 */ 3550 if (inodedep->id_savedino != NULL) { 3551 *dp = *inodedep->id_savedino; 3552 FREE(inodedep->id_savedino, M_INODEDEP); 3553 inodedep->id_savedino = NULL; 3554 if ((bp->b_flags & B_DELWRI) == 0) 3555 stat_inode_bitmap++; 3556 bdirty(bp); 3557 return (1); 3558 } 3559 /* 3560 * Roll forward anything that had to be rolled back before 3561 * the inode could be updated. 3562 */ 3563 hadchanges = 0; 3564 for (adp = TAILQ_FIRST(&inodedep->id_inoupdt); adp; adp = nextadp) { 3565 nextadp = TAILQ_NEXT(adp, ad_next); 3566 if (adp->ad_state & ATTACHED) { 3567 lk.lkt_held = NOHOLDER; 3568 panic("handle_written_inodeblock: new entry"); 3569 } 3570 if (adp->ad_lbn < NDADDR) { 3571 if (dp->di_db[adp->ad_lbn] != adp->ad_oldblkno) { 3572 lk.lkt_held = NOHOLDER; 3573 panic("%s: %s #%ld mismatch %d != %d", 3574 "handle_written_inodeblock", 3575 "direct pointer", adp->ad_lbn, 3576 dp->di_db[adp->ad_lbn], adp->ad_oldblkno); 3577 } 3578 dp->di_db[adp->ad_lbn] = adp->ad_newblkno; 3579 } else { 3580 if (dp->di_ib[adp->ad_lbn - NDADDR] != 0) { 3581 lk.lkt_held = NOHOLDER; 3582 panic("%s: %s #%ld allocated as %d", 3583 "handle_written_inodeblock", 3584 "indirect pointer", adp->ad_lbn - NDADDR, 3585 dp->di_ib[adp->ad_lbn - NDADDR]); 3586 } 3587 dp->di_ib[adp->ad_lbn - NDADDR] = adp->ad_newblkno; 3588 } 3589 adp->ad_state &= ~UNDONE; 3590 adp->ad_state |= ATTACHED; 3591 hadchanges = 1; 3592 } 3593 if (hadchanges && (bp->b_flags & B_DELWRI) == 0) 3594 stat_direct_blk_ptrs++; 3595 /* 3596 * Reset the file size to its most up-to-date value. 3597 */ 3598 if (inodedep->id_savedsize == -1) { 3599 lk.lkt_held = NOHOLDER; 3600 panic("handle_written_inodeblock: bad size"); 3601 } 3602 if (dp->di_size != inodedep->id_savedsize) { 3603 dp->di_size = inodedep->id_savedsize; 3604 hadchanges = 1; 3605 } 3606 inodedep->id_savedsize = -1; 3607 /* 3608 * If there were any rollbacks in the inode block, then it must be 3609 * marked dirty so that its will eventually get written back in 3610 * its correct form. 3611 */ 3612 if (hadchanges) 3613 bdirty(bp); 3614 /* 3615 * Process any allocdirects that completed during the update. 3616 */ 3617 if ((adp = TAILQ_FIRST(&inodedep->id_inoupdt)) != NULL) 3618 handle_allocdirect_partdone(adp); 3619 /* 3620 * Process deallocations that were held pending until the 3621 * inode had been written to disk. Freeing of the inode 3622 * is delayed until after all blocks have been freed to 3623 * avoid creation of new <vfsid, inum, lbn> triples 3624 * before the old ones have been deleted. 3625 */ 3626 filefree = NULL; 3627 while ((wk = LIST_FIRST(&inodedep->id_bufwait)) != NULL) { 3628 WORKLIST_REMOVE(wk); 3629 switch (wk->wk_type) { 3630 3631 case D_FREEFILE: 3632 /* 3633 * We defer adding filefree to the worklist until 3634 * all other additions have been made to ensure 3635 * that it will be done after all the old blocks 3636 * have been freed. 3637 */ 3638 if (filefree != NULL) { 3639 lk.lkt_held = NOHOLDER; 3640 panic("handle_written_inodeblock: filefree"); 3641 } 3642 filefree = wk; 3643 continue; 3644 3645 case D_MKDIR: 3646 handle_written_mkdir(WK_MKDIR(wk), MKDIR_PARENT); 3647 continue; 3648 3649 case D_DIRADD: 3650 diradd_inode_written(WK_DIRADD(wk), inodedep); 3651 continue; 3652 3653 case D_FREEBLKS: 3654 wk->wk_state |= COMPLETE; 3655 if ((wk->wk_state & ALLCOMPLETE) != ALLCOMPLETE) 3656 continue; 3657 /* -- fall through -- */ 3658 case D_FREEFRAG: 3659 case D_DIRREM: 3660 add_to_worklist(wk); 3661 continue; 3662 3663 default: 3664 lk.lkt_held = NOHOLDER; 3665 panic("handle_written_inodeblock: Unknown type %s", 3666 TYPENAME(wk->wk_type)); 3667 /* NOTREACHED */ 3668 } 3669 } 3670 if (filefree != NULL) { 3671 if (free_inodedep(inodedep) == 0) { 3672 lk.lkt_held = NOHOLDER; 3673 panic("handle_written_inodeblock: live inodedep"); 3674 } 3675 add_to_worklist(filefree); 3676 return (0); 3677 } 3678 3679 /* 3680 * If no outstanding dependencies, free it. 3681 */ 3682 if (free_inodedep(inodedep) || TAILQ_FIRST(&inodedep->id_inoupdt) == 0) 3683 return (0); 3684 return (hadchanges); 3685 } 3686 3687 /* 3688 * Process a diradd entry after its dependent inode has been written. 3689 * This routine must be called with splbio interrupts blocked. 3690 */ 3691 static void 3692 diradd_inode_written(dap, inodedep) 3693 struct diradd *dap; 3694 struct inodedep *inodedep; 3695 { 3696 struct pagedep *pagedep; 3697 3698 dap->da_state |= COMPLETE; 3699 if ((dap->da_state & ALLCOMPLETE) == ALLCOMPLETE) { 3700 if (dap->da_state & DIRCHG) 3701 pagedep = dap->da_previous->dm_pagedep; 3702 else 3703 pagedep = dap->da_pagedep; 3704 LIST_REMOVE(dap, da_pdlist); 3705 LIST_INSERT_HEAD(&pagedep->pd_pendinghd, dap, da_pdlist); 3706 } 3707 WORKLIST_INSERT(&inodedep->id_pendinghd, &dap->da_list); 3708 } 3709 3710 /* 3711 * Handle the completion of a mkdir dependency. 3712 */ 3713 static void 3714 handle_written_mkdir(mkdir, type) 3715 struct mkdir *mkdir; 3716 int type; 3717 { 3718 struct diradd *dap; 3719 struct pagedep *pagedep; 3720 3721 if (mkdir->md_state != type) { 3722 lk.lkt_held = NOHOLDER; 3723 panic("handle_written_mkdir: bad type"); 3724 } 3725 dap = mkdir->md_diradd; 3726 dap->da_state &= ~type; 3727 if ((dap->da_state & (MKDIR_PARENT | MKDIR_BODY)) == 0) 3728 dap->da_state |= DEPCOMPLETE; 3729 if ((dap->da_state & ALLCOMPLETE) == ALLCOMPLETE) { 3730 if (dap->da_state & DIRCHG) 3731 pagedep = dap->da_previous->dm_pagedep; 3732 else 3733 pagedep = dap->da_pagedep; 3734 LIST_REMOVE(dap, da_pdlist); 3735 LIST_INSERT_HEAD(&pagedep->pd_pendinghd, dap, da_pdlist); 3736 } 3737 LIST_REMOVE(mkdir, md_mkdirs); 3738 WORKITEM_FREE(mkdir, D_MKDIR); 3739 } 3740 3741 /* 3742 * Called from within softdep_disk_write_complete above. 3743 * A write operation was just completed. Removed inodes can 3744 * now be freed and associated block pointers may be committed. 3745 * Note that this routine is always called from interrupt level 3746 * with further splbio interrupts blocked. 3747 */ 3748 static int 3749 handle_written_filepage(pagedep, bp) 3750 struct pagedep *pagedep; 3751 struct buf *bp; /* buffer containing the written page */ 3752 { 3753 struct dirrem *dirrem; 3754 struct diradd *dap, *nextdap; 3755 struct direct *ep; 3756 int i, chgs; 3757 3758 if ((pagedep->pd_state & IOSTARTED) == 0) { 3759 lk.lkt_held = NOHOLDER; 3760 panic("handle_written_filepage: not started"); 3761 } 3762 pagedep->pd_state &= ~IOSTARTED; 3763 /* 3764 * Process any directory removals that have been committed. 3765 */ 3766 while ((dirrem = LIST_FIRST(&pagedep->pd_dirremhd)) != NULL) { 3767 LIST_REMOVE(dirrem, dm_next); 3768 dirrem->dm_dirinum = pagedep->pd_ino; 3769 add_to_worklist(&dirrem->dm_list); 3770 } 3771 /* 3772 * Free any directory additions that have been committed. 3773 */ 3774 while ((dap = LIST_FIRST(&pagedep->pd_pendinghd)) != NULL) 3775 free_diradd(dap); 3776 /* 3777 * Uncommitted directory entries must be restored. 3778 */ 3779 for (chgs = 0, i = 0; i < DAHASHSZ; i++) { 3780 for (dap = LIST_FIRST(&pagedep->pd_diraddhd[i]); dap; 3781 dap = nextdap) { 3782 nextdap = LIST_NEXT(dap, da_pdlist); 3783 if (dap->da_state & ATTACHED) { 3784 lk.lkt_held = NOHOLDER; 3785 panic("handle_written_filepage: attached"); 3786 } 3787 ep = (struct direct *) 3788 ((char *)bp->b_data + dap->da_offset); 3789 ep->d_ino = dap->da_newinum; 3790 dap->da_state &= ~UNDONE; 3791 dap->da_state |= ATTACHED; 3792 chgs = 1; 3793 /* 3794 * If the inode referenced by the directory has 3795 * been written out, then the dependency can be 3796 * moved to the pending list. 3797 */ 3798 if ((dap->da_state & ALLCOMPLETE) == ALLCOMPLETE) { 3799 LIST_REMOVE(dap, da_pdlist); 3800 LIST_INSERT_HEAD(&pagedep->pd_pendinghd, dap, 3801 da_pdlist); 3802 } 3803 } 3804 } 3805 /* 3806 * If there were any rollbacks in the directory, then it must be 3807 * marked dirty so that its will eventually get written back in 3808 * its correct form. 3809 */ 3810 if (chgs) { 3811 if ((bp->b_flags & B_DELWRI) == 0) 3812 stat_dir_entry++; 3813 bdirty(bp); 3814 } 3815 /* 3816 * If no dependencies remain, the pagedep will be freed. 3817 * Otherwise it will remain to update the page before it 3818 * is written back to disk. 3819 */ 3820 if (LIST_FIRST(&pagedep->pd_pendinghd) == 0) { 3821 for (i = 0; i < DAHASHSZ; i++) 3822 if (LIST_FIRST(&pagedep->pd_diraddhd[i]) != NULL) 3823 break; 3824 if (i == DAHASHSZ) { 3825 LIST_REMOVE(pagedep, pd_hash); 3826 WORKITEM_FREE(pagedep, D_PAGEDEP); 3827 return (0); 3828 } 3829 } 3830 return (1); 3831 } 3832 3833 /* 3834 * Writing back in-core inode structures. 3835 * 3836 * The filesystem only accesses an inode's contents when it occupies an 3837 * "in-core" inode structure. These "in-core" structures are separate from 3838 * the page frames used to cache inode blocks. Only the latter are 3839 * transferred to/from the disk. So, when the updated contents of the 3840 * "in-core" inode structure are copied to the corresponding in-memory inode 3841 * block, the dependencies are also transferred. The following procedure is 3842 * called when copying a dirty "in-core" inode to a cached inode block. 3843 */ 3844 3845 /* 3846 * Called when an inode is loaded from disk. If the effective link count 3847 * differed from the actual link count when it was last flushed, then we 3848 * need to ensure that the correct effective link count is put back. 3849 */ 3850 void 3851 softdep_load_inodeblock(ip) 3852 struct inode *ip; /* the "in_core" copy of the inode */ 3853 { 3854 struct inodedep *inodedep; 3855 3856 /* 3857 * Check for alternate nlink count. 3858 */ 3859 ip->i_effnlink = ip->i_nlink; 3860 ACQUIRE_LOCK(&lk); 3861 if (inodedep_lookup(ip->i_fs, ip->i_number, 0, &inodedep) == 0) { 3862 FREE_LOCK(&lk); 3863 return; 3864 } 3865 ip->i_effnlink -= inodedep->id_nlinkdelta; 3866 FREE_LOCK(&lk); 3867 } 3868 3869 /* 3870 * This routine is called just before the "in-core" inode 3871 * information is to be copied to the in-memory inode block. 3872 * Recall that an inode block contains several inodes. If 3873 * the force flag is set, then the dependencies will be 3874 * cleared so that the update can always be made. Note that 3875 * the buffer is locked when this routine is called, so we 3876 * will never be in the middle of writing the inode block 3877 * to disk. 3878 */ 3879 void 3880 softdep_update_inodeblock(ip, bp, waitfor) 3881 struct inode *ip; /* the "in_core" copy of the inode */ 3882 struct buf *bp; /* the buffer containing the inode block */ 3883 int waitfor; /* nonzero => update must be allowed */ 3884 { 3885 struct inodedep *inodedep; 3886 struct worklist *wk; 3887 int error, gotit; 3888 3889 /* 3890 * If the effective link count is not equal to the actual link 3891 * count, then we must track the difference in an inodedep while 3892 * the inode is (potentially) tossed out of the cache. Otherwise, 3893 * if there is no existing inodedep, then there are no dependencies 3894 * to track. 3895 */ 3896 ACQUIRE_LOCK(&lk); 3897 if (inodedep_lookup(ip->i_fs, ip->i_number, 0, &inodedep) == 0) { 3898 FREE_LOCK(&lk); 3899 if (ip->i_effnlink != ip->i_nlink) 3900 panic("softdep_update_inodeblock: bad link count"); 3901 return; 3902 } 3903 if (inodedep->id_nlinkdelta != ip->i_nlink - ip->i_effnlink) { 3904 FREE_LOCK(&lk); 3905 panic("softdep_update_inodeblock: bad delta"); 3906 } 3907 /* 3908 * Changes have been initiated. Anything depending on these 3909 * changes cannot occur until this inode has been written. 3910 */ 3911 inodedep->id_state &= ~COMPLETE; 3912 if ((inodedep->id_state & ONWORKLIST) == 0) 3913 WORKLIST_INSERT(&bp->b_dep, &inodedep->id_list); 3914 /* 3915 * Any new dependencies associated with the incore inode must 3916 * now be moved to the list associated with the buffer holding 3917 * the in-memory copy of the inode. Once merged process any 3918 * allocdirects that are completed by the merger. 3919 */ 3920 merge_inode_lists(inodedep); 3921 if (TAILQ_FIRST(&inodedep->id_inoupdt) != NULL) 3922 handle_allocdirect_partdone(TAILQ_FIRST(&inodedep->id_inoupdt)); 3923 /* 3924 * Now that the inode has been pushed into the buffer, the 3925 * operations dependent on the inode being written to disk 3926 * can be moved to the id_bufwait so that they will be 3927 * processed when the buffer I/O completes. 3928 */ 3929 while ((wk = LIST_FIRST(&inodedep->id_inowait)) != NULL) { 3930 WORKLIST_REMOVE(wk); 3931 WORKLIST_INSERT(&inodedep->id_bufwait, wk); 3932 } 3933 /* 3934 * Newly allocated inodes cannot be written until the bitmap 3935 * that allocates them have been written (indicated by 3936 * DEPCOMPLETE being set in id_state). If we are doing a 3937 * forced sync (e.g., an fsync on a file), we force the bitmap 3938 * to be written so that the update can be done. 3939 */ 3940 if ((inodedep->id_state & DEPCOMPLETE) != 0 || waitfor == 0) { 3941 FREE_LOCK(&lk); 3942 return; 3943 } 3944 gotit = getdirtybuf(&inodedep->id_buf, MNT_WAIT); 3945 FREE_LOCK(&lk); 3946 if (gotit && 3947 (error = VOP_BWRITE(inodedep->id_buf->b_vp, inodedep->id_buf)) != 0) 3948 softdep_error("softdep_update_inodeblock: bwrite", error); 3949 if ((inodedep->id_state & DEPCOMPLETE) == 0) 3950 panic("softdep_update_inodeblock: update failed"); 3951 } 3952 3953 /* 3954 * Merge the new inode dependency list (id_newinoupdt) into the old 3955 * inode dependency list (id_inoupdt). This routine must be called 3956 * with splbio interrupts blocked. 3957 */ 3958 static void 3959 merge_inode_lists(inodedep) 3960 struct inodedep *inodedep; 3961 { 3962 struct allocdirect *listadp, *newadp; 3963 3964 newadp = TAILQ_FIRST(&inodedep->id_newinoupdt); 3965 for (listadp = TAILQ_FIRST(&inodedep->id_inoupdt); listadp && newadp;) { 3966 if (listadp->ad_lbn < newadp->ad_lbn) { 3967 listadp = TAILQ_NEXT(listadp, ad_next); 3968 continue; 3969 } 3970 TAILQ_REMOVE(&inodedep->id_newinoupdt, newadp, ad_next); 3971 TAILQ_INSERT_BEFORE(listadp, newadp, ad_next); 3972 if (listadp->ad_lbn == newadp->ad_lbn) { 3973 allocdirect_merge(&inodedep->id_inoupdt, newadp, 3974 listadp); 3975 listadp = newadp; 3976 } 3977 newadp = TAILQ_FIRST(&inodedep->id_newinoupdt); 3978 } 3979 while ((newadp = TAILQ_FIRST(&inodedep->id_newinoupdt)) != NULL) { 3980 TAILQ_REMOVE(&inodedep->id_newinoupdt, newadp, ad_next); 3981 TAILQ_INSERT_TAIL(&inodedep->id_inoupdt, newadp, ad_next); 3982 } 3983 } 3984 3985 /* 3986 * If we are doing an fsync, then we must ensure that any directory 3987 * entries for the inode have been written after the inode gets to disk. 3988 */ 3989 static int 3990 softdep_fsync(vp) 3991 struct vnode *vp; /* the "in_core" copy of the inode */ 3992 { 3993 struct inodedep *inodedep; 3994 struct pagedep *pagedep; 3995 struct worklist *wk; 3996 struct diradd *dap; 3997 struct mount *mnt; 3998 struct vnode *pvp; 3999 struct inode *ip; 4000 struct buf *bp; 4001 struct fs *fs; 4002 struct thread *td = curthread; /* XXX */ 4003 int error, flushparent; 4004 ino_t parentino; 4005 ufs_lbn_t lbn; 4006 4007 ip = VTOI(vp); 4008 fs = ip->i_fs; 4009 ACQUIRE_LOCK(&lk); 4010 if (inodedep_lookup(fs, ip->i_number, 0, &inodedep) == 0) { 4011 FREE_LOCK(&lk); 4012 return (0); 4013 } 4014 if (LIST_FIRST(&inodedep->id_inowait) != NULL || 4015 LIST_FIRST(&inodedep->id_bufwait) != NULL || 4016 TAILQ_FIRST(&inodedep->id_inoupdt) != NULL || 4017 TAILQ_FIRST(&inodedep->id_newinoupdt) != NULL) { 4018 FREE_LOCK(&lk); 4019 panic("softdep_fsync: pending ops"); 4020 } 4021 for (error = 0, flushparent = 0; ; ) { 4022 if ((wk = LIST_FIRST(&inodedep->id_pendinghd)) == NULL) 4023 break; 4024 if (wk->wk_type != D_DIRADD) { 4025 FREE_LOCK(&lk); 4026 panic("softdep_fsync: Unexpected type %s", 4027 TYPENAME(wk->wk_type)); 4028 } 4029 dap = WK_DIRADD(wk); 4030 /* 4031 * Flush our parent if this directory entry 4032 * has a MKDIR_PARENT dependency. 4033 */ 4034 if (dap->da_state & DIRCHG) 4035 pagedep = dap->da_previous->dm_pagedep; 4036 else 4037 pagedep = dap->da_pagedep; 4038 mnt = pagedep->pd_mnt; 4039 parentino = pagedep->pd_ino; 4040 lbn = pagedep->pd_lbn; 4041 if ((dap->da_state & (MKDIR_BODY | COMPLETE)) != COMPLETE) { 4042 FREE_LOCK(&lk); 4043 panic("softdep_fsync: dirty"); 4044 } 4045 flushparent = dap->da_state & MKDIR_PARENT; 4046 /* 4047 * If we are being fsync'ed as part of vgone'ing this vnode, 4048 * then we will not be able to release and recover the 4049 * vnode below, so we just have to give up on writing its 4050 * directory entry out. It will eventually be written, just 4051 * not now, but then the user was not asking to have it 4052 * written, so we are not breaking any promises. 4053 */ 4054 if (vp->v_flag & VRECLAIMED) 4055 break; 4056 /* 4057 * We prevent deadlock by always fetching inodes from the 4058 * root, moving down the directory tree. Thus, when fetching 4059 * our parent directory, we must unlock ourselves before 4060 * requesting the lock on our parent. See the comment in 4061 * ufs_lookup for details on possible races. 4062 */ 4063 FREE_LOCK(&lk); 4064 VOP_UNLOCK(vp, 0, td); 4065 error = VFS_VGET(mnt, parentino, &pvp); 4066 vn_lock(vp, LK_EXCLUSIVE | LK_RETRY, td); 4067 if (error != 0) 4068 return (error); 4069 if (flushparent) { 4070 if ((error = UFS_UPDATE(pvp, 1)) != 0) { 4071 vput(pvp); 4072 return (error); 4073 } 4074 } 4075 /* 4076 * Flush directory page containing the inode's name. 4077 */ 4078 error = bread(pvp, lbn, blksize(fs, VTOI(pvp), lbn), &bp); 4079 if (error == 0) 4080 error = VOP_BWRITE(bp->b_vp, bp); 4081 vput(pvp); 4082 if (error != 0) 4083 return (error); 4084 ACQUIRE_LOCK(&lk); 4085 if (inodedep_lookup(fs, ip->i_number, 0, &inodedep) == 0) 4086 break; 4087 } 4088 FREE_LOCK(&lk); 4089 return (0); 4090 } 4091 4092 /* 4093 * Flush all the dirty bitmaps associated with the block device 4094 * before flushing the rest of the dirty blocks so as to reduce 4095 * the number of dependencies that will have to be rolled back. 4096 */ 4097 static int softdep_fsync_mountdev_bp(struct buf *bp, void *data); 4098 4099 void 4100 softdep_fsync_mountdev(vp) 4101 struct vnode *vp; 4102 { 4103 if (!vn_isdisk(vp, NULL)) 4104 panic("softdep_fsync_mountdev: vnode not a disk"); 4105 ACQUIRE_LOCK(&lk); 4106 RB_SCAN(buf_rb_tree, &vp->v_rbdirty_tree, NULL, 4107 softdep_fsync_mountdev_bp, NULL); 4108 drain_output(vp, 1); 4109 FREE_LOCK(&lk); 4110 } 4111 4112 static int 4113 softdep_fsync_mountdev_bp(struct buf *bp, void *data) 4114 { 4115 struct worklist *wk; 4116 4117 /* 4118 * If it is already scheduled, skip to the next buffer. 4119 */ 4120 if (BUF_LOCK(bp, LK_EXCLUSIVE | LK_NOWAIT)) 4121 return(0); 4122 if ((bp->b_flags & B_DELWRI) == 0) { 4123 FREE_LOCK(&lk); 4124 panic("softdep_fsync_mountdev: not dirty"); 4125 } 4126 /* 4127 * We are only interested in bitmaps with outstanding 4128 * dependencies. 4129 */ 4130 if ((wk = LIST_FIRST(&bp->b_dep)) == NULL || 4131 wk->wk_type != D_BMSAFEMAP || 4132 (bp->b_xflags & BX_BKGRDINPROG)) { 4133 BUF_UNLOCK(bp); 4134 return(0); 4135 } 4136 bremfree(bp); 4137 FREE_LOCK(&lk); 4138 (void) bawrite(bp); 4139 ACQUIRE_LOCK(&lk); 4140 return(0); 4141 } 4142 4143 /* 4144 * This routine is called when we are trying to synchronously flush a 4145 * file. This routine must eliminate any filesystem metadata dependencies 4146 * so that the syncing routine can succeed by pushing the dirty blocks 4147 * associated with the file. If any I/O errors occur, they are returned. 4148 */ 4149 struct softdep_sync_metadata_info { 4150 struct vnode *vp; 4151 int waitfor; 4152 }; 4153 4154 static int softdep_sync_metadata_bp(struct buf *bp, void *data); 4155 4156 int 4157 softdep_sync_metadata(struct vnode *vp, struct thread *td) 4158 { 4159 struct softdep_sync_metadata_info info; 4160 int error, waitfor; 4161 4162 /* 4163 * Check whether this vnode is involved in a filesystem 4164 * that is doing soft dependency processing. 4165 */ 4166 if (!vn_isdisk(vp, NULL)) { 4167 if (!DOINGSOFTDEP(vp)) 4168 return (0); 4169 } else 4170 if (vp->v_rdev->si_mountpoint == NULL || 4171 (vp->v_rdev->si_mountpoint->mnt_flag & MNT_SOFTDEP) == 0) 4172 return (0); 4173 /* 4174 * Ensure that any direct block dependencies have been cleared. 4175 */ 4176 ACQUIRE_LOCK(&lk); 4177 if ((error = flush_inodedep_deps(VTOI(vp)->i_fs, VTOI(vp)->i_number))) { 4178 FREE_LOCK(&lk); 4179 return (error); 4180 } 4181 /* 4182 * For most files, the only metadata dependencies are the 4183 * cylinder group maps that allocate their inode or blocks. 4184 * The block allocation dependencies can be found by traversing 4185 * the dependency lists for any buffers that remain on their 4186 * dirty buffer list. The inode allocation dependency will 4187 * be resolved when the inode is updated with MNT_WAIT. 4188 * This work is done in two passes. The first pass grabs most 4189 * of the buffers and begins asynchronously writing them. The 4190 * only way to wait for these asynchronous writes is to sleep 4191 * on the filesystem vnode which may stay busy for a long time 4192 * if the filesystem is active. So, instead, we make a second 4193 * pass over the dependencies blocking on each write. In the 4194 * usual case we will be blocking against a write that we 4195 * initiated, so when it is done the dependency will have been 4196 * resolved. Thus the second pass is expected to end quickly. 4197 */ 4198 waitfor = MNT_NOWAIT; 4199 top: 4200 /* 4201 * We must wait for any I/O in progress to finish so that 4202 * all potential buffers on the dirty list will be visible. 4203 */ 4204 drain_output(vp, 1); 4205 info.vp = vp; 4206 info.waitfor = waitfor; 4207 error = RB_SCAN(buf_rb_tree, &vp->v_rbdirty_tree, NULL, 4208 softdep_sync_metadata_bp, &info); 4209 if (error < 0) { 4210 FREE_LOCK(&lk); 4211 return(-error); /* error code */ 4212 } 4213 4214 /* 4215 * The brief unlock is to allow any pent up dependency 4216 * processing to be done. Then proceed with the second pass. 4217 */ 4218 if (waitfor == MNT_NOWAIT) { 4219 waitfor = MNT_WAIT; 4220 FREE_LOCK(&lk); 4221 ACQUIRE_LOCK(&lk); 4222 goto top; 4223 } 4224 4225 /* 4226 * If we have managed to get rid of all the dirty buffers, 4227 * then we are done. For certain directories and block 4228 * devices, we may need to do further work. 4229 * 4230 * We must wait for any I/O in progress to finish so that 4231 * all potential buffers on the dirty list will be visible. 4232 */ 4233 drain_output(vp, 1); 4234 if (RB_EMPTY(&vp->v_rbdirty_tree)) { 4235 FREE_LOCK(&lk); 4236 return (0); 4237 } 4238 4239 FREE_LOCK(&lk); 4240 /* 4241 * If we are trying to sync a block device, some of its buffers may 4242 * contain metadata that cannot be written until the contents of some 4243 * partially written files have been written to disk. The only easy 4244 * way to accomplish this is to sync the entire filesystem (luckily 4245 * this happens rarely). 4246 */ 4247 if (vn_isdisk(vp, NULL) && 4248 vp->v_rdev && 4249 vp->v_rdev->si_mountpoint && !VOP_ISLOCKED(vp, NULL) && 4250 (error = VFS_SYNC(vp->v_rdev->si_mountpoint, MNT_WAIT, td)) != 0) 4251 return (error); 4252 return (0); 4253 } 4254 4255 static int 4256 softdep_sync_metadata_bp(struct buf *bp, void *data) 4257 { 4258 struct softdep_sync_metadata_info *info = data; 4259 struct pagedep *pagedep; 4260 struct allocdirect *adp; 4261 struct allocindir *aip; 4262 struct worklist *wk; 4263 struct buf *nbp; 4264 int error; 4265 int i; 4266 4267 if (getdirtybuf(&bp, MNT_WAIT) == 0) 4268 return (0); 4269 4270 /* 4271 * As we hold the buffer locked, none of its dependencies 4272 * will disappear. 4273 */ 4274 LIST_FOREACH(wk, &bp->b_dep, wk_list) { 4275 switch (wk->wk_type) { 4276 4277 case D_ALLOCDIRECT: 4278 adp = WK_ALLOCDIRECT(wk); 4279 if (adp->ad_state & DEPCOMPLETE) 4280 break; 4281 nbp = adp->ad_buf; 4282 if (getdirtybuf(&nbp, info->waitfor) == 0) 4283 break; 4284 FREE_LOCK(&lk); 4285 if (info->waitfor == MNT_NOWAIT) { 4286 bawrite(nbp); 4287 } else if ((error = VOP_BWRITE(nbp->b_vp, nbp)) != 0) { 4288 bawrite(bp); 4289 ACQUIRE_LOCK(&lk); 4290 return (-error); 4291 } 4292 ACQUIRE_LOCK(&lk); 4293 break; 4294 4295 case D_ALLOCINDIR: 4296 aip = WK_ALLOCINDIR(wk); 4297 if (aip->ai_state & DEPCOMPLETE) 4298 break; 4299 nbp = aip->ai_buf; 4300 if (getdirtybuf(&nbp, info->waitfor) == 0) 4301 break; 4302 FREE_LOCK(&lk); 4303 if (info->waitfor == MNT_NOWAIT) { 4304 bawrite(nbp); 4305 } else if ((error = VOP_BWRITE(nbp->b_vp, nbp)) != 0) { 4306 bawrite(bp); 4307 ACQUIRE_LOCK(&lk); 4308 return (-error); 4309 } 4310 ACQUIRE_LOCK(&lk); 4311 break; 4312 4313 case D_INDIRDEP: 4314 restart: 4315 4316 LIST_FOREACH(aip, &WK_INDIRDEP(wk)->ir_deplisthd, ai_next) { 4317 if (aip->ai_state & DEPCOMPLETE) 4318 continue; 4319 nbp = aip->ai_buf; 4320 if (getdirtybuf(&nbp, MNT_WAIT) == 0) 4321 goto restart; 4322 FREE_LOCK(&lk); 4323 if ((error = VOP_BWRITE(nbp->b_vp, nbp)) != 0) { 4324 bawrite(bp); 4325 ACQUIRE_LOCK(&lk); 4326 return (-error); 4327 } 4328 ACQUIRE_LOCK(&lk); 4329 goto restart; 4330 } 4331 break; 4332 4333 case D_INODEDEP: 4334 if ((error = flush_inodedep_deps(WK_INODEDEP(wk)->id_fs, 4335 WK_INODEDEP(wk)->id_ino)) != 0) { 4336 FREE_LOCK(&lk); 4337 bawrite(bp); 4338 ACQUIRE_LOCK(&lk); 4339 return (-error); 4340 } 4341 break; 4342 4343 case D_PAGEDEP: 4344 /* 4345 * We are trying to sync a directory that may 4346 * have dependencies on both its own metadata 4347 * and/or dependencies on the inodes of any 4348 * recently allocated files. We walk its diradd 4349 * lists pushing out the associated inode. 4350 */ 4351 pagedep = WK_PAGEDEP(wk); 4352 for (i = 0; i < DAHASHSZ; i++) { 4353 if (LIST_FIRST(&pagedep->pd_diraddhd[i]) == 0) 4354 continue; 4355 if ((error = 4356 flush_pagedep_deps(info->vp, 4357 pagedep->pd_mnt, 4358 &pagedep->pd_diraddhd[i]))) { 4359 FREE_LOCK(&lk); 4360 bawrite(bp); 4361 ACQUIRE_LOCK(&lk); 4362 return (-error); 4363 } 4364 } 4365 break; 4366 4367 case D_MKDIR: 4368 /* 4369 * This case should never happen if the vnode has 4370 * been properly sync'ed. However, if this function 4371 * is used at a place where the vnode has not yet 4372 * been sync'ed, this dependency can show up. So, 4373 * rather than panic, just flush it. 4374 */ 4375 nbp = WK_MKDIR(wk)->md_buf; 4376 if (getdirtybuf(&nbp, info->waitfor) == 0) 4377 break; 4378 FREE_LOCK(&lk); 4379 if (info->waitfor == MNT_NOWAIT) { 4380 bawrite(nbp); 4381 } else if ((error = VOP_BWRITE(nbp->b_vp, nbp)) != 0) { 4382 bawrite(bp); 4383 ACQUIRE_LOCK(&lk); 4384 return (-error); 4385 } 4386 ACQUIRE_LOCK(&lk); 4387 break; 4388 4389 case D_BMSAFEMAP: 4390 /* 4391 * This case should never happen if the vnode has 4392 * been properly sync'ed. However, if this function 4393 * is used at a place where the vnode has not yet 4394 * been sync'ed, this dependency can show up. So, 4395 * rather than panic, just flush it. 4396 * 4397 * nbp can wind up == bp if a device node for the 4398 * same filesystem is being fsynced at the same time, 4399 * leading to a panic if we don't catch the case. 4400 */ 4401 nbp = WK_BMSAFEMAP(wk)->sm_buf; 4402 if (nbp == bp) 4403 break; 4404 if (getdirtybuf(&nbp, info->waitfor) == 0) 4405 break; 4406 FREE_LOCK(&lk); 4407 if (info->waitfor == MNT_NOWAIT) { 4408 bawrite(nbp); 4409 } else if ((error = VOP_BWRITE(nbp->b_vp, nbp)) != 0) { 4410 bawrite(bp); 4411 ACQUIRE_LOCK(&lk); 4412 return (-error); 4413 } 4414 ACQUIRE_LOCK(&lk); 4415 break; 4416 4417 default: 4418 FREE_LOCK(&lk); 4419 panic("softdep_sync_metadata: Unknown type %s", 4420 TYPENAME(wk->wk_type)); 4421 /* NOTREACHED */ 4422 } 4423 } 4424 FREE_LOCK(&lk); 4425 bawrite(bp); 4426 ACQUIRE_LOCK(&lk); 4427 return(0); 4428 } 4429 4430 /* 4431 * Flush the dependencies associated with an inodedep. 4432 * Called with splbio blocked. 4433 */ 4434 static int 4435 flush_inodedep_deps(fs, ino) 4436 struct fs *fs; 4437 ino_t ino; 4438 { 4439 struct inodedep *inodedep; 4440 struct allocdirect *adp; 4441 int error, waitfor; 4442 struct buf *bp; 4443 4444 /* 4445 * This work is done in two passes. The first pass grabs most 4446 * of the buffers and begins asynchronously writing them. The 4447 * only way to wait for these asynchronous writes is to sleep 4448 * on the filesystem vnode which may stay busy for a long time 4449 * if the filesystem is active. So, instead, we make a second 4450 * pass over the dependencies blocking on each write. In the 4451 * usual case we will be blocking against a write that we 4452 * initiated, so when it is done the dependency will have been 4453 * resolved. Thus the second pass is expected to end quickly. 4454 * We give a brief window at the top of the loop to allow 4455 * any pending I/O to complete. 4456 */ 4457 for (waitfor = MNT_NOWAIT; ; ) { 4458 FREE_LOCK(&lk); 4459 ACQUIRE_LOCK(&lk); 4460 if (inodedep_lookup(fs, ino, 0, &inodedep) == 0) 4461 return (0); 4462 TAILQ_FOREACH(adp, &inodedep->id_inoupdt, ad_next) { 4463 if (adp->ad_state & DEPCOMPLETE) 4464 continue; 4465 bp = adp->ad_buf; 4466 if (getdirtybuf(&bp, waitfor) == 0) { 4467 if (waitfor == MNT_NOWAIT) 4468 continue; 4469 break; 4470 } 4471 FREE_LOCK(&lk); 4472 if (waitfor == MNT_NOWAIT) { 4473 bawrite(bp); 4474 } else if ((error = VOP_BWRITE(bp->b_vp, bp)) != 0) { 4475 ACQUIRE_LOCK(&lk); 4476 return (error); 4477 } 4478 ACQUIRE_LOCK(&lk); 4479 break; 4480 } 4481 if (adp != NULL) 4482 continue; 4483 TAILQ_FOREACH(adp, &inodedep->id_newinoupdt, ad_next) { 4484 if (adp->ad_state & DEPCOMPLETE) 4485 continue; 4486 bp = adp->ad_buf; 4487 if (getdirtybuf(&bp, waitfor) == 0) { 4488 if (waitfor == MNT_NOWAIT) 4489 continue; 4490 break; 4491 } 4492 FREE_LOCK(&lk); 4493 if (waitfor == MNT_NOWAIT) { 4494 bawrite(bp); 4495 } else if ((error = VOP_BWRITE(bp->b_vp, bp)) != 0) { 4496 ACQUIRE_LOCK(&lk); 4497 return (error); 4498 } 4499 ACQUIRE_LOCK(&lk); 4500 break; 4501 } 4502 if (adp != NULL) 4503 continue; 4504 /* 4505 * If pass2, we are done, otherwise do pass 2. 4506 */ 4507 if (waitfor == MNT_WAIT) 4508 break; 4509 waitfor = MNT_WAIT; 4510 } 4511 /* 4512 * Try freeing inodedep in case all dependencies have been removed. 4513 */ 4514 if (inodedep_lookup(fs, ino, 0, &inodedep) != 0) 4515 (void) free_inodedep(inodedep); 4516 return (0); 4517 } 4518 4519 /* 4520 * Eliminate a pagedep dependency by flushing out all its diradd dependencies. 4521 * Called with splbio blocked. 4522 */ 4523 static int 4524 flush_pagedep_deps(pvp, mp, diraddhdp) 4525 struct vnode *pvp; 4526 struct mount *mp; 4527 struct diraddhd *diraddhdp; 4528 { 4529 struct thread *td = curthread; /* XXX */ 4530 struct inodedep *inodedep; 4531 struct ufsmount *ump; 4532 struct diradd *dap; 4533 struct vnode *vp; 4534 int gotit, error = 0; 4535 struct buf *bp; 4536 ino_t inum; 4537 4538 ump = VFSTOUFS(mp); 4539 while ((dap = LIST_FIRST(diraddhdp)) != NULL) { 4540 /* 4541 * Flush ourselves if this directory entry 4542 * has a MKDIR_PARENT dependency. 4543 */ 4544 if (dap->da_state & MKDIR_PARENT) { 4545 FREE_LOCK(&lk); 4546 if ((error = UFS_UPDATE(pvp, 1)) != 0) 4547 break; 4548 ACQUIRE_LOCK(&lk); 4549 /* 4550 * If that cleared dependencies, go on to next. 4551 */ 4552 if (dap != LIST_FIRST(diraddhdp)) 4553 continue; 4554 if (dap->da_state & MKDIR_PARENT) { 4555 FREE_LOCK(&lk); 4556 panic("flush_pagedep_deps: MKDIR_PARENT"); 4557 } 4558 } 4559 /* 4560 * A newly allocated directory must have its "." and 4561 * ".." entries written out before its name can be 4562 * committed in its parent. We do not want or need 4563 * the full semantics of a synchronous VOP_FSYNC as 4564 * that may end up here again, once for each directory 4565 * level in the filesystem. Instead, we push the blocks 4566 * and wait for them to clear. We have to fsync twice 4567 * because the first call may choose to defer blocks 4568 * that still have dependencies, but deferral will 4569 * happen at most once. 4570 */ 4571 inum = dap->da_newinum; 4572 if (dap->da_state & MKDIR_BODY) { 4573 FREE_LOCK(&lk); 4574 if ((error = VFS_VGET(mp, inum, &vp)) != 0) 4575 break; 4576 if ((error=VOP_FSYNC(vp, MNT_NOWAIT, td)) || 4577 (error=VOP_FSYNC(vp, MNT_NOWAIT, td))) { 4578 vput(vp); 4579 break; 4580 } 4581 drain_output(vp, 0); 4582 vput(vp); 4583 ACQUIRE_LOCK(&lk); 4584 /* 4585 * If that cleared dependencies, go on to next. 4586 */ 4587 if (dap != LIST_FIRST(diraddhdp)) 4588 continue; 4589 if (dap->da_state & MKDIR_BODY) { 4590 FREE_LOCK(&lk); 4591 panic("flush_pagedep_deps: MKDIR_BODY"); 4592 } 4593 } 4594 /* 4595 * Flush the inode on which the directory entry depends. 4596 * Having accounted for MKDIR_PARENT and MKDIR_BODY above, 4597 * the only remaining dependency is that the updated inode 4598 * count must get pushed to disk. The inode has already 4599 * been pushed into its inode buffer (via VOP_UPDATE) at 4600 * the time of the reference count change. So we need only 4601 * locate that buffer, ensure that there will be no rollback 4602 * caused by a bitmap dependency, then write the inode buffer. 4603 */ 4604 if (inodedep_lookup(ump->um_fs, inum, 0, &inodedep) == 0) { 4605 FREE_LOCK(&lk); 4606 panic("flush_pagedep_deps: lost inode"); 4607 } 4608 /* 4609 * If the inode still has bitmap dependencies, 4610 * push them to disk. 4611 */ 4612 if ((inodedep->id_state & DEPCOMPLETE) == 0) { 4613 gotit = getdirtybuf(&inodedep->id_buf, MNT_WAIT); 4614 FREE_LOCK(&lk); 4615 if (gotit && 4616 (error = VOP_BWRITE(inodedep->id_buf->b_vp, 4617 inodedep->id_buf)) != 0) 4618 break; 4619 ACQUIRE_LOCK(&lk); 4620 if (dap != LIST_FIRST(diraddhdp)) 4621 continue; 4622 } 4623 /* 4624 * If the inode is still sitting in a buffer waiting 4625 * to be written, push it to disk. 4626 */ 4627 FREE_LOCK(&lk); 4628 if ((error = bread(ump->um_devvp, 4629 fsbtodb(ump->um_fs, ino_to_fsba(ump->um_fs, inum)), 4630 (int)ump->um_fs->fs_bsize, &bp)) != 0) 4631 break; 4632 if ((error = VOP_BWRITE(bp->b_vp, bp)) != 0) 4633 break; 4634 ACQUIRE_LOCK(&lk); 4635 /* 4636 * If we have failed to get rid of all the dependencies 4637 * then something is seriously wrong. 4638 */ 4639 if (dap == LIST_FIRST(diraddhdp)) { 4640 FREE_LOCK(&lk); 4641 panic("flush_pagedep_deps: flush failed"); 4642 } 4643 } 4644 if (error) 4645 ACQUIRE_LOCK(&lk); 4646 return (error); 4647 } 4648 4649 /* 4650 * A large burst of file addition or deletion activity can drive the 4651 * memory load excessively high. First attempt to slow things down 4652 * using the techniques below. If that fails, this routine requests 4653 * the offending operations to fall back to running synchronously 4654 * until the memory load returns to a reasonable level. 4655 */ 4656 int 4657 softdep_slowdown(vp) 4658 struct vnode *vp; 4659 { 4660 int max_softdeps_hard; 4661 4662 max_softdeps_hard = max_softdeps * 11 / 10; 4663 if (num_dirrem < max_softdeps_hard / 2 && 4664 num_inodedep < max_softdeps_hard) 4665 return (0); 4666 stat_sync_limit_hit += 1; 4667 return (1); 4668 } 4669 4670 /* 4671 * If memory utilization has gotten too high, deliberately slow things 4672 * down and speed up the I/O processing. 4673 */ 4674 static int 4675 request_cleanup(resource, islocked) 4676 int resource; 4677 int islocked; 4678 { 4679 struct thread *td = curthread; /* XXX */ 4680 4681 /* 4682 * We never hold up the filesystem syncer process. 4683 */ 4684 if (td == filesys_syncer) 4685 return (0); 4686 /* 4687 * First check to see if the work list has gotten backlogged. 4688 * If it has, co-opt this process to help clean up two entries. 4689 * Because this process may hold inodes locked, we cannot 4690 * handle any remove requests that might block on a locked 4691 * inode as that could lead to deadlock. 4692 */ 4693 if (num_on_worklist > max_softdeps / 10) { 4694 if (islocked) 4695 FREE_LOCK(&lk); 4696 process_worklist_item(NULL, LK_NOWAIT); 4697 process_worklist_item(NULL, LK_NOWAIT); 4698 stat_worklist_push += 2; 4699 if (islocked) 4700 ACQUIRE_LOCK(&lk); 4701 return(1); 4702 } 4703 4704 /* 4705 * If we are resource constrained on inode dependencies, try 4706 * flushing some dirty inodes. Otherwise, we are constrained 4707 * by file deletions, so try accelerating flushes of directories 4708 * with removal dependencies. We would like to do the cleanup 4709 * here, but we probably hold an inode locked at this point and 4710 * that might deadlock against one that we try to clean. So, 4711 * the best that we can do is request the syncer daemon to do 4712 * the cleanup for us. 4713 */ 4714 switch (resource) { 4715 4716 case FLUSH_INODES: 4717 stat_ino_limit_push += 1; 4718 req_clear_inodedeps += 1; 4719 stat_countp = &stat_ino_limit_hit; 4720 break; 4721 4722 case FLUSH_REMOVE: 4723 stat_blk_limit_push += 1; 4724 req_clear_remove += 1; 4725 stat_countp = &stat_blk_limit_hit; 4726 break; 4727 4728 default: 4729 if (islocked) 4730 FREE_LOCK(&lk); 4731 panic("request_cleanup: unknown type"); 4732 } 4733 /* 4734 * Hopefully the syncer daemon will catch up and awaken us. 4735 * We wait at most tickdelay before proceeding in any case. 4736 */ 4737 if (islocked == 0) 4738 ACQUIRE_LOCK(&lk); 4739 crit_enter(); 4740 proc_waiting += 1; 4741 if (!callout_active(&handle)) 4742 callout_reset(&handle, tickdelay > 2 ? tickdelay : 2, 4743 pause_timer, NULL); 4744 interlocked_sleep(&lk, SLEEP, (caddr_t)&proc_waiting, 0, 4745 "softupdate", 0); 4746 proc_waiting -= 1; 4747 crit_exit(); 4748 if (islocked == 0) 4749 FREE_LOCK(&lk); 4750 return (1); 4751 } 4752 4753 /* 4754 * Awaken processes pausing in request_cleanup and clear proc_waiting 4755 * to indicate that there is no longer a timer running. 4756 */ 4757 void 4758 pause_timer(arg) 4759 void *arg; 4760 { 4761 *stat_countp += 1; 4762 wakeup_one(&proc_waiting); 4763 if (proc_waiting > 0) 4764 callout_reset(&handle, tickdelay > 2 ? tickdelay : 2, 4765 pause_timer, NULL); 4766 else 4767 callout_deactivate(&handle); 4768 } 4769 4770 /* 4771 * Flush out a directory with at least one removal dependency in an effort to 4772 * reduce the number of dirrem, freefile, and freeblks dependency structures. 4773 */ 4774 static void 4775 clear_remove(struct thread *td) 4776 { 4777 struct pagedep_hashhead *pagedephd; 4778 struct pagedep *pagedep; 4779 static int next = 0; 4780 struct mount *mp; 4781 struct vnode *vp; 4782 int error, cnt; 4783 ino_t ino; 4784 4785 ACQUIRE_LOCK(&lk); 4786 for (cnt = 0; cnt < pagedep_hash; cnt++) { 4787 pagedephd = &pagedep_hashtbl[next++]; 4788 if (next >= pagedep_hash) 4789 next = 0; 4790 LIST_FOREACH(pagedep, pagedephd, pd_hash) { 4791 if (LIST_FIRST(&pagedep->pd_dirremhd) == NULL) 4792 continue; 4793 mp = pagedep->pd_mnt; 4794 ino = pagedep->pd_ino; 4795 FREE_LOCK(&lk); 4796 if ((error = VFS_VGET(mp, ino, &vp)) != 0) { 4797 softdep_error("clear_remove: vget", error); 4798 return; 4799 } 4800 if ((error = VOP_FSYNC(vp, MNT_NOWAIT, td))) 4801 softdep_error("clear_remove: fsync", error); 4802 drain_output(vp, 0); 4803 vput(vp); 4804 return; 4805 } 4806 } 4807 FREE_LOCK(&lk); 4808 } 4809 4810 /* 4811 * Clear out a block of dirty inodes in an effort to reduce 4812 * the number of inodedep dependency structures. 4813 */ 4814 struct clear_inodedeps_info { 4815 struct fs *fs; 4816 struct mount *mp; 4817 }; 4818 4819 static int 4820 clear_inodedeps_mountlist_callback(struct mount *mp, void *data) 4821 { 4822 struct clear_inodedeps_info *info = data; 4823 4824 if ((mp->mnt_flag & MNT_SOFTDEP) && info->fs == VFSTOUFS(mp)->um_fs) { 4825 info->mp = mp; 4826 return(-1); 4827 } 4828 return(0); 4829 } 4830 4831 static void 4832 clear_inodedeps(struct thread *td) 4833 { 4834 struct clear_inodedeps_info info; 4835 struct inodedep_hashhead *inodedephd; 4836 struct inodedep *inodedep; 4837 static int next = 0; 4838 struct vnode *vp; 4839 struct fs *fs; 4840 int error, cnt; 4841 ino_t firstino, lastino, ino; 4842 4843 ACQUIRE_LOCK(&lk); 4844 /* 4845 * Pick a random inode dependency to be cleared. 4846 * We will then gather up all the inodes in its block 4847 * that have dependencies and flush them out. 4848 */ 4849 for (cnt = 0; cnt < inodedep_hash; cnt++) { 4850 inodedephd = &inodedep_hashtbl[next++]; 4851 if (next >= inodedep_hash) 4852 next = 0; 4853 if ((inodedep = LIST_FIRST(inodedephd)) != NULL) 4854 break; 4855 } 4856 if (inodedep == NULL) { 4857 FREE_LOCK(&lk); 4858 return; 4859 } 4860 /* 4861 * Ugly code to find mount point given pointer to superblock. 4862 */ 4863 fs = inodedep->id_fs; 4864 info.mp = NULL; 4865 info.fs = fs; 4866 mountlist_scan(clear_inodedeps_mountlist_callback, 4867 &info, MNTSCAN_FORWARD|MNTSCAN_NOBUSY); 4868 /* 4869 * Find the last inode in the block with dependencies. 4870 */ 4871 firstino = inodedep->id_ino & ~(INOPB(fs) - 1); 4872 for (lastino = firstino + INOPB(fs) - 1; lastino > firstino; lastino--) 4873 if (inodedep_lookup(fs, lastino, 0, &inodedep) != 0) 4874 break; 4875 /* 4876 * Asynchronously push all but the last inode with dependencies. 4877 * Synchronously push the last inode with dependencies to ensure 4878 * that the inode block gets written to free up the inodedeps. 4879 */ 4880 for (ino = firstino; ino <= lastino; ino++) { 4881 if (inodedep_lookup(fs, ino, 0, &inodedep) == 0) 4882 continue; 4883 FREE_LOCK(&lk); 4884 if ((error = VFS_VGET(info.mp, ino, &vp)) != 0) { 4885 softdep_error("clear_inodedeps: vget", error); 4886 return; 4887 } 4888 if (ino == lastino) { 4889 if ((error = VOP_FSYNC(vp, MNT_WAIT, td))) 4890 softdep_error("clear_inodedeps: fsync1", error); 4891 } else { 4892 if ((error = VOP_FSYNC(vp, MNT_NOWAIT, td))) 4893 softdep_error("clear_inodedeps: fsync2", error); 4894 drain_output(vp, 0); 4895 } 4896 vput(vp); 4897 ACQUIRE_LOCK(&lk); 4898 } 4899 FREE_LOCK(&lk); 4900 } 4901 4902 /* 4903 * Function to determine if the buffer has outstanding dependencies 4904 * that will cause a roll-back if the buffer is written. If wantcount 4905 * is set, return number of dependencies, otherwise just yes or no. 4906 */ 4907 static int 4908 softdep_count_dependencies(bp, wantcount) 4909 struct buf *bp; 4910 int wantcount; 4911 { 4912 struct worklist *wk; 4913 struct inodedep *inodedep; 4914 struct indirdep *indirdep; 4915 struct allocindir *aip; 4916 struct pagedep *pagedep; 4917 struct diradd *dap; 4918 int i, retval; 4919 4920 retval = 0; 4921 ACQUIRE_LOCK(&lk); 4922 LIST_FOREACH(wk, &bp->b_dep, wk_list) { 4923 switch (wk->wk_type) { 4924 4925 case D_INODEDEP: 4926 inodedep = WK_INODEDEP(wk); 4927 if ((inodedep->id_state & DEPCOMPLETE) == 0) { 4928 /* bitmap allocation dependency */ 4929 retval += 1; 4930 if (!wantcount) 4931 goto out; 4932 } 4933 if (TAILQ_FIRST(&inodedep->id_inoupdt)) { 4934 /* direct block pointer dependency */ 4935 retval += 1; 4936 if (!wantcount) 4937 goto out; 4938 } 4939 continue; 4940 4941 case D_INDIRDEP: 4942 indirdep = WK_INDIRDEP(wk); 4943 4944 LIST_FOREACH(aip, &indirdep->ir_deplisthd, ai_next) { 4945 /* indirect block pointer dependency */ 4946 retval += 1; 4947 if (!wantcount) 4948 goto out; 4949 } 4950 continue; 4951 4952 case D_PAGEDEP: 4953 pagedep = WK_PAGEDEP(wk); 4954 for (i = 0; i < DAHASHSZ; i++) { 4955 4956 LIST_FOREACH(dap, &pagedep->pd_diraddhd[i], da_pdlist) { 4957 /* directory entry dependency */ 4958 retval += 1; 4959 if (!wantcount) 4960 goto out; 4961 } 4962 } 4963 continue; 4964 4965 case D_BMSAFEMAP: 4966 case D_ALLOCDIRECT: 4967 case D_ALLOCINDIR: 4968 case D_MKDIR: 4969 /* never a dependency on these blocks */ 4970 continue; 4971 4972 default: 4973 FREE_LOCK(&lk); 4974 panic("softdep_check_for_rollback: Unexpected type %s", 4975 TYPENAME(wk->wk_type)); 4976 /* NOTREACHED */ 4977 } 4978 } 4979 out: 4980 FREE_LOCK(&lk); 4981 return retval; 4982 } 4983 4984 /* 4985 * Acquire exclusive access to a buffer. 4986 * Must be called with splbio blocked. 4987 * Return 1 if buffer was acquired. 4988 */ 4989 static int 4990 getdirtybuf(bpp, waitfor) 4991 struct buf **bpp; 4992 int waitfor; 4993 { 4994 struct buf *bp; 4995 int error; 4996 4997 for (;;) { 4998 if ((bp = *bpp) == NULL) 4999 return (0); 5000 if (BUF_LOCK(bp, LK_EXCLUSIVE | LK_NOWAIT) == 0) { 5001 if ((bp->b_xflags & BX_BKGRDINPROG) == 0) 5002 break; 5003 BUF_UNLOCK(bp); 5004 if (waitfor != MNT_WAIT) 5005 return (0); 5006 bp->b_xflags |= BX_BKGRDWAIT; 5007 interlocked_sleep(&lk, SLEEP, &bp->b_xflags, 0, 5008 "getbuf", 0); 5009 continue; 5010 } 5011 if (waitfor != MNT_WAIT) 5012 return (0); 5013 error = interlocked_sleep(&lk, LOCKBUF, bp, 5014 LK_EXCLUSIVE | LK_SLEEPFAIL, 0, 0); 5015 if (error != ENOLCK) { 5016 FREE_LOCK(&lk); 5017 panic("getdirtybuf: inconsistent lock"); 5018 } 5019 } 5020 if ((bp->b_flags & B_DELWRI) == 0) { 5021 BUF_UNLOCK(bp); 5022 return (0); 5023 } 5024 bremfree(bp); 5025 return (1); 5026 } 5027 5028 /* 5029 * Wait for pending output on a vnode to complete. 5030 * Must be called with vnode locked. 5031 */ 5032 static void 5033 drain_output(vp, islocked) 5034 struct vnode *vp; 5035 int islocked; 5036 { 5037 5038 if (!islocked) 5039 ACQUIRE_LOCK(&lk); 5040 while (vp->v_numoutput) { 5041 vp->v_flag |= VBWAIT; 5042 interlocked_sleep(&lk, SLEEP, (caddr_t)&vp->v_numoutput, 5043 0, "drainvp", 0); 5044 } 5045 if (!islocked) 5046 FREE_LOCK(&lk); 5047 } 5048 5049 /* 5050 * Called whenever a buffer that is being invalidated or reallocated 5051 * contains dependencies. This should only happen if an I/O error has 5052 * occurred. The routine is called with the buffer locked. 5053 */ 5054 static void 5055 softdep_deallocate_dependencies(bp) 5056 struct buf *bp; 5057 { 5058 5059 if ((bp->b_flags & B_ERROR) == 0) 5060 panic("softdep_deallocate_dependencies: dangling deps"); 5061 softdep_error(bp->b_vp->v_mount->mnt_stat.f_mntfromname, bp->b_error); 5062 panic("softdep_deallocate_dependencies: unrecovered I/O error"); 5063 } 5064 5065 /* 5066 * Function to handle asynchronous write errors in the filesystem. 5067 */ 5068 void 5069 softdep_error(func, error) 5070 char *func; 5071 int error; 5072 { 5073 5074 /* XXX should do something better! */ 5075 printf("%s: got error %d while accessing filesystem\n", func, error); 5076 } 5077