1 /* 2 * Copyright 1998, 2000 Marshall Kirk McKusick. All Rights Reserved. 3 * 4 * The soft updates code is derived from the appendix of a University 5 * of Michigan technical report (Gregory R. Ganger and Yale N. Patt, 6 * "Soft Updates: A Solution to the Metadata Update Problem in File 7 * Systems", CSE-TR-254-95, August 1995). 8 * 9 * Further information about soft updates can be obtained from: 10 * 11 * Marshall Kirk McKusick http://www.mckusick.com/softdep/ 12 * 1614 Oxford Street mckusick@mckusick.com 13 * Berkeley, CA 94709-1608 +1-510-843-9542 14 * USA 15 * 16 * Redistribution and use in source and binary forms, with or without 17 * modification, are permitted provided that the following conditions 18 * are met: 19 * 20 * 1. Redistributions of source code must retain the above copyright 21 * notice, this list of conditions and the following disclaimer. 22 * 2. Redistributions in binary form must reproduce the above copyright 23 * notice, this list of conditions and the following disclaimer in the 24 * documentation and/or other materials provided with the distribution. 25 * 26 * THIS SOFTWARE IS PROVIDED BY MARSHALL KIRK MCKUSICK ``AS IS'' AND ANY 27 * EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED 28 * WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE 29 * DISCLAIMED. IN NO EVENT SHALL MARSHALL KIRK MCKUSICK BE LIABLE FOR 30 * ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL 31 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS 32 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) 33 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT 34 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY 35 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF 36 * SUCH DAMAGE. 37 * 38 * from: @(#)ffs_softdep.c 9.59 (McKusick) 6/21/00 39 * $FreeBSD: src/sys/ufs/ffs/ffs_softdep.c,v 1.57.2.11 2002/02/05 18:46:53 dillon Exp $ 40 * $DragonFly: src/sys/vfs/ufs/ffs_softdep.c,v 1.50 2006/12/23 00:41:30 swildner Exp $ 41 */ 42 43 /* 44 * For now we want the safety net that the DIAGNOSTIC and DEBUG flags provide. 45 */ 46 #ifndef DIAGNOSTIC 47 #define DIAGNOSTIC 48 #endif 49 #ifndef DEBUG 50 #define DEBUG 51 #endif 52 53 #include <sys/param.h> 54 #include <sys/kernel.h> 55 #include <sys/systm.h> 56 #include <sys/buf.h> 57 #include <sys/malloc.h> 58 #include <sys/mount.h> 59 #include <sys/proc.h> 60 #include <sys/syslog.h> 61 #include <sys/vnode.h> 62 #include <sys/conf.h> 63 #include <sys/buf2.h> 64 #include <machine/inttypes.h> 65 #include "dir.h" 66 #include "quota.h" 67 #include "inode.h" 68 #include "ufsmount.h" 69 #include "fs.h" 70 #include "softdep.h" 71 #include "ffs_extern.h" 72 #include "ufs_extern.h" 73 74 #include <sys/thread2.h> 75 76 /* 77 * These definitions need to be adapted to the system to which 78 * this file is being ported. 79 */ 80 /* 81 * malloc types defined for the softdep system. 82 */ 83 MALLOC_DEFINE(M_PAGEDEP, "pagedep","File page dependencies"); 84 MALLOC_DEFINE(M_INODEDEP, "inodedep","Inode dependencies"); 85 MALLOC_DEFINE(M_NEWBLK, "newblk","New block allocation"); 86 MALLOC_DEFINE(M_BMSAFEMAP, "bmsafemap","Block or frag allocated from cyl group map"); 87 MALLOC_DEFINE(M_ALLOCDIRECT, "allocdirect","Block or frag dependency for an inode"); 88 MALLOC_DEFINE(M_INDIRDEP, "indirdep","Indirect block dependencies"); 89 MALLOC_DEFINE(M_ALLOCINDIR, "allocindir","Block dependency for an indirect block"); 90 MALLOC_DEFINE(M_FREEFRAG, "freefrag","Previously used frag for an inode"); 91 MALLOC_DEFINE(M_FREEBLKS, "freeblks","Blocks freed from an inode"); 92 MALLOC_DEFINE(M_FREEFILE, "freefile","Inode deallocated"); 93 MALLOC_DEFINE(M_DIRADD, "diradd","New directory entry"); 94 MALLOC_DEFINE(M_MKDIR, "mkdir","New directory"); 95 MALLOC_DEFINE(M_DIRREM, "dirrem","Directory entry deleted"); 96 97 #define M_SOFTDEP_FLAGS (M_WAITOK | M_USE_RESERVE) 98 99 #define D_PAGEDEP 0 100 #define D_INODEDEP 1 101 #define D_NEWBLK 2 102 #define D_BMSAFEMAP 3 103 #define D_ALLOCDIRECT 4 104 #define D_INDIRDEP 5 105 #define D_ALLOCINDIR 6 106 #define D_FREEFRAG 7 107 #define D_FREEBLKS 8 108 #define D_FREEFILE 9 109 #define D_DIRADD 10 110 #define D_MKDIR 11 111 #define D_DIRREM 12 112 #define D_LAST D_DIRREM 113 114 /* 115 * translate from workitem type to memory type 116 * MUST match the defines above, such that memtype[D_XXX] == M_XXX 117 */ 118 static struct malloc_type *memtype[] = { 119 M_PAGEDEP, 120 M_INODEDEP, 121 M_NEWBLK, 122 M_BMSAFEMAP, 123 M_ALLOCDIRECT, 124 M_INDIRDEP, 125 M_ALLOCINDIR, 126 M_FREEFRAG, 127 M_FREEBLKS, 128 M_FREEFILE, 129 M_DIRADD, 130 M_MKDIR, 131 M_DIRREM 132 }; 133 134 #define DtoM(type) (memtype[type]) 135 136 /* 137 * Names of malloc types. 138 */ 139 #define TYPENAME(type) \ 140 ((unsigned)(type) < D_LAST ? memtype[type]->ks_shortdesc : "???") 141 /* 142 * End system adaptaion definitions. 143 */ 144 145 /* 146 * Internal function prototypes. 147 */ 148 static void softdep_error(char *, int); 149 static void drain_output(struct vnode *, int); 150 static int getdirtybuf(struct buf **, int); 151 static void clear_remove(struct thread *); 152 static void clear_inodedeps(struct thread *); 153 static int flush_pagedep_deps(struct vnode *, struct mount *, 154 struct diraddhd *); 155 static int flush_inodedep_deps(struct fs *, ino_t); 156 static int handle_written_filepage(struct pagedep *, struct buf *); 157 static void diradd_inode_written(struct diradd *, struct inodedep *); 158 static int handle_written_inodeblock(struct inodedep *, struct buf *); 159 static void handle_allocdirect_partdone(struct allocdirect *); 160 static void handle_allocindir_partdone(struct allocindir *); 161 static void initiate_write_filepage(struct pagedep *, struct buf *); 162 static void handle_written_mkdir(struct mkdir *, int); 163 static void initiate_write_inodeblock(struct inodedep *, struct buf *); 164 static void handle_workitem_freefile(struct freefile *); 165 static void handle_workitem_remove(struct dirrem *); 166 static struct dirrem *newdirrem(struct buf *, struct inode *, 167 struct inode *, int, struct dirrem **); 168 static void free_diradd(struct diradd *); 169 static void free_allocindir(struct allocindir *, struct inodedep *); 170 static int indir_trunc (struct inode *, off_t, int, ufs_lbn_t, long *); 171 static void deallocate_dependencies(struct buf *, struct inodedep *); 172 static void free_allocdirect(struct allocdirectlst *, 173 struct allocdirect *, int); 174 static int check_inode_unwritten(struct inodedep *); 175 static int free_inodedep(struct inodedep *); 176 static void handle_workitem_freeblocks(struct freeblks *); 177 static void merge_inode_lists(struct inodedep *); 178 static void setup_allocindir_phase2(struct buf *, struct inode *, 179 struct allocindir *); 180 static struct allocindir *newallocindir(struct inode *, int, ufs_daddr_t, 181 ufs_daddr_t); 182 static void handle_workitem_freefrag(struct freefrag *); 183 static struct freefrag *newfreefrag(struct inode *, ufs_daddr_t, long); 184 static void allocdirect_merge(struct allocdirectlst *, 185 struct allocdirect *, struct allocdirect *); 186 static struct bmsafemap *bmsafemap_lookup(struct buf *); 187 static int newblk_lookup(struct fs *, ufs_daddr_t, int, 188 struct newblk **); 189 static int inodedep_lookup(struct fs *, ino_t, int, struct inodedep **); 190 static int pagedep_lookup(struct inode *, ufs_lbn_t, int, 191 struct pagedep **); 192 static void pause_timer(void *); 193 static int request_cleanup(int, int); 194 static int process_worklist_item(struct mount *, int); 195 static void add_to_worklist(struct worklist *); 196 197 /* 198 * Exported softdep operations. 199 */ 200 static void softdep_disk_io_initiation(struct buf *); 201 static void softdep_disk_write_complete(struct buf *); 202 static void softdep_deallocate_dependencies(struct buf *); 203 static int softdep_fsync(struct vnode *); 204 static int softdep_process_worklist(struct mount *); 205 static void softdep_move_dependencies(struct buf *, struct buf *); 206 static int softdep_count_dependencies(struct buf *bp, int); 207 208 static struct bio_ops softdep_bioops = { 209 softdep_disk_io_initiation, /* io_start */ 210 softdep_disk_write_complete, /* io_complete */ 211 softdep_deallocate_dependencies, /* io_deallocate */ 212 softdep_fsync, /* io_fsync */ 213 softdep_process_worklist, /* io_sync */ 214 softdep_move_dependencies, /* io_movedeps */ 215 softdep_count_dependencies, /* io_countdeps */ 216 }; 217 218 /* 219 * Locking primitives. 220 * 221 * For a uniprocessor, all we need to do is protect against disk 222 * interrupts. For a multiprocessor, this lock would have to be 223 * a mutex. A single mutex is used throughout this file, though 224 * finer grain locking could be used if contention warranted it. 225 * 226 * For a multiprocessor, the sleep call would accept a lock and 227 * release it after the sleep processing was complete. In a uniprocessor 228 * implementation there is no such interlock, so we simple mark 229 * the places where it needs to be done with the `interlocked' form 230 * of the lock calls. Since the uniprocessor sleep already interlocks 231 * the spl, there is nothing that really needs to be done. 232 */ 233 #ifndef /* NOT */ DEBUG 234 static struct lockit { 235 } lk = { 0 }; 236 #define ACQUIRE_LOCK(lk) crit_enter_id("softupdates"); 237 #define FREE_LOCK(lk) crit_exit_id("softupdates"); 238 239 #else /* DEBUG */ 240 #define NOHOLDER ((struct thread *)-1) 241 #define SPECIAL_FLAG ((struct thread *)-2) 242 static struct lockit { 243 int lkt_spl; 244 struct thread *lkt_held; 245 } lk = { 0, NOHOLDER }; 246 static int lockcnt; 247 248 static void acquire_lock(struct lockit *); 249 static void free_lock(struct lockit *); 250 void softdep_panic(char *); 251 252 #define ACQUIRE_LOCK(lk) acquire_lock(lk) 253 #define FREE_LOCK(lk) free_lock(lk) 254 255 static void 256 acquire_lock(lk) 257 struct lockit *lk; 258 { 259 thread_t holder; 260 261 if (lk->lkt_held != NOHOLDER) { 262 holder = lk->lkt_held; 263 FREE_LOCK(lk); 264 if (holder == curthread) 265 panic("softdep_lock: locking against myself"); 266 else 267 panic("softdep_lock: lock held by %p", holder); 268 } 269 crit_enter_id("softupdates"); 270 lk->lkt_held = curthread; 271 lockcnt++; 272 } 273 274 static void 275 free_lock(lk) 276 struct lockit *lk; 277 { 278 279 if (lk->lkt_held == NOHOLDER) 280 panic("softdep_unlock: lock not held"); 281 lk->lkt_held = NOHOLDER; 282 crit_exit_id("softupdates"); 283 } 284 285 /* 286 * Function to release soft updates lock and panic. 287 */ 288 void 289 softdep_panic(msg) 290 char *msg; 291 { 292 293 if (lk.lkt_held != NOHOLDER) 294 FREE_LOCK(&lk); 295 panic(msg); 296 } 297 #endif /* DEBUG */ 298 299 static int interlocked_sleep(struct lockit *, int, void *, int, 300 const char *, int); 301 302 /* 303 * When going to sleep, we must save our SPL so that it does 304 * not get lost if some other process uses the lock while we 305 * are sleeping. We restore it after we have slept. This routine 306 * wraps the interlocking with functions that sleep. The list 307 * below enumerates the available set of operations. 308 */ 309 #define UNKNOWN 0 310 #define SLEEP 1 311 #define LOCKBUF 2 312 313 static int 314 interlocked_sleep(lk, op, ident, flags, wmesg, timo) 315 struct lockit *lk; 316 int op; 317 void *ident; 318 int flags; 319 const char *wmesg; 320 int timo; 321 { 322 thread_t holder; 323 int s, retval; 324 325 s = lk->lkt_spl; 326 # ifdef DEBUG 327 if (lk->lkt_held == NOHOLDER) 328 panic("interlocked_sleep: lock not held"); 329 lk->lkt_held = NOHOLDER; 330 # endif /* DEBUG */ 331 switch (op) { 332 case SLEEP: 333 retval = tsleep(ident, flags, wmesg, timo); 334 break; 335 case LOCKBUF: 336 retval = BUF_LOCK((struct buf *)ident, flags); 337 break; 338 default: 339 panic("interlocked_sleep: unknown operation"); 340 } 341 # ifdef DEBUG 342 if (lk->lkt_held != NOHOLDER) { 343 holder = lk->lkt_held; 344 FREE_LOCK(lk); 345 if (holder == curthread) 346 panic("interlocked_sleep: locking against self"); 347 else 348 panic("interlocked_sleep: lock held by %p", holder); 349 } 350 lk->lkt_held = curthread; 351 lockcnt++; 352 # endif /* DEBUG */ 353 lk->lkt_spl = s; 354 return (retval); 355 } 356 357 /* 358 * Place holder for real semaphores. 359 */ 360 struct sema { 361 int value; 362 thread_t holder; 363 char *name; 364 int prio; 365 int timo; 366 }; 367 static void sema_init(struct sema *, char *, int, int); 368 static int sema_get(struct sema *, struct lockit *); 369 static void sema_release(struct sema *); 370 371 static void 372 sema_init(semap, name, prio, timo) 373 struct sema *semap; 374 char *name; 375 int prio, timo; 376 { 377 378 semap->holder = NOHOLDER; 379 semap->value = 0; 380 semap->name = name; 381 semap->prio = prio; 382 semap->timo = timo; 383 } 384 385 static int 386 sema_get(semap, interlock) 387 struct sema *semap; 388 struct lockit *interlock; 389 { 390 391 if (semap->value++ > 0) { 392 if (interlock != NULL) { 393 interlocked_sleep(interlock, SLEEP, (caddr_t)semap, 394 semap->prio, semap->name, semap->timo); 395 FREE_LOCK(interlock); 396 } else { 397 tsleep((caddr_t)semap, semap->prio, semap->name, 398 semap->timo); 399 } 400 return (0); 401 } 402 semap->holder = curthread; 403 if (interlock != NULL) 404 FREE_LOCK(interlock); 405 return (1); 406 } 407 408 static void 409 sema_release(semap) 410 struct sema *semap; 411 { 412 413 if (semap->value <= 0 || semap->holder != curthread) { 414 if (lk.lkt_held != NOHOLDER) 415 FREE_LOCK(&lk); 416 panic("sema_release: not held"); 417 } 418 if (--semap->value > 0) { 419 semap->value = 0; 420 wakeup(semap); 421 } 422 semap->holder = NOHOLDER; 423 } 424 425 /* 426 * Worklist queue management. 427 * These routines require that the lock be held. 428 */ 429 #ifndef /* NOT */ DEBUG 430 #define WORKLIST_INSERT(head, item) do { \ 431 (item)->wk_state |= ONWORKLIST; \ 432 LIST_INSERT_HEAD(head, item, wk_list); \ 433 } while (0) 434 #define WORKLIST_REMOVE(item) do { \ 435 (item)->wk_state &= ~ONWORKLIST; \ 436 LIST_REMOVE(item, wk_list); \ 437 } while (0) 438 #define WORKITEM_FREE(item, type) FREE(item, DtoM(type)) 439 440 #else /* DEBUG */ 441 static void worklist_insert(struct workhead *, struct worklist *); 442 static void worklist_remove(struct worklist *); 443 static void workitem_free(struct worklist *, int); 444 445 #define WORKLIST_INSERT(head, item) worklist_insert(head, item) 446 #define WORKLIST_REMOVE(item) worklist_remove(item) 447 #define WORKITEM_FREE(item, type) workitem_free((struct worklist *)item, type) 448 449 static void 450 worklist_insert(head, item) 451 struct workhead *head; 452 struct worklist *item; 453 { 454 455 if (lk.lkt_held == NOHOLDER) 456 panic("worklist_insert: lock not held"); 457 if (item->wk_state & ONWORKLIST) { 458 FREE_LOCK(&lk); 459 panic("worklist_insert: already on list"); 460 } 461 item->wk_state |= ONWORKLIST; 462 LIST_INSERT_HEAD(head, item, wk_list); 463 } 464 465 static void 466 worklist_remove(item) 467 struct worklist *item; 468 { 469 470 if (lk.lkt_held == NOHOLDER) 471 panic("worklist_remove: lock not held"); 472 if ((item->wk_state & ONWORKLIST) == 0) { 473 FREE_LOCK(&lk); 474 panic("worklist_remove: not on list"); 475 } 476 item->wk_state &= ~ONWORKLIST; 477 LIST_REMOVE(item, wk_list); 478 } 479 480 static void 481 workitem_free(item, type) 482 struct worklist *item; 483 int type; 484 { 485 486 if (item->wk_state & ONWORKLIST) { 487 if (lk.lkt_held != NOHOLDER) 488 FREE_LOCK(&lk); 489 panic("workitem_free: still on list"); 490 } 491 if (item->wk_type != type) { 492 if (lk.lkt_held != NOHOLDER) 493 FREE_LOCK(&lk); 494 panic("workitem_free: type mismatch"); 495 } 496 FREE(item, DtoM(type)); 497 } 498 #endif /* DEBUG */ 499 500 /* 501 * Workitem queue management 502 */ 503 static struct workhead softdep_workitem_pending; 504 static int num_on_worklist; /* number of worklist items to be processed */ 505 static int softdep_worklist_busy; /* 1 => trying to do unmount */ 506 static int softdep_worklist_req; /* serialized waiters */ 507 static int max_softdeps; /* maximum number of structs before slowdown */ 508 static int tickdelay = 2; /* number of ticks to pause during slowdown */ 509 static int *stat_countp; /* statistic to count in proc_waiting timeout */ 510 static int proc_waiting; /* tracks whether we have a timeout posted */ 511 static struct callout handle; /* handle on posted proc_waiting timeout */ 512 static struct thread *filesys_syncer; /* proc of filesystem syncer process */ 513 static int req_clear_inodedeps; /* syncer process flush some inodedeps */ 514 #define FLUSH_INODES 1 515 static int req_clear_remove; /* syncer process flush some freeblks */ 516 #define FLUSH_REMOVE 2 517 /* 518 * runtime statistics 519 */ 520 static int stat_worklist_push; /* number of worklist cleanups */ 521 static int stat_blk_limit_push; /* number of times block limit neared */ 522 static int stat_ino_limit_push; /* number of times inode limit neared */ 523 static int stat_blk_limit_hit; /* number of times block slowdown imposed */ 524 static int stat_ino_limit_hit; /* number of times inode slowdown imposed */ 525 static int stat_sync_limit_hit; /* number of synchronous slowdowns imposed */ 526 static int stat_indir_blk_ptrs; /* bufs redirtied as indir ptrs not written */ 527 static int stat_inode_bitmap; /* bufs redirtied as inode bitmap not written */ 528 static int stat_direct_blk_ptrs;/* bufs redirtied as direct ptrs not written */ 529 static int stat_dir_entry; /* bufs redirtied as dir entry cannot write */ 530 #ifdef DEBUG 531 #include <vm/vm.h> 532 #include <sys/sysctl.h> 533 SYSCTL_INT(_debug, OID_AUTO, max_softdeps, CTLFLAG_RW, &max_softdeps, 0, ""); 534 SYSCTL_INT(_debug, OID_AUTO, tickdelay, CTLFLAG_RW, &tickdelay, 0, ""); 535 SYSCTL_INT(_debug, OID_AUTO, worklist_push, CTLFLAG_RW, &stat_worklist_push, 0,""); 536 SYSCTL_INT(_debug, OID_AUTO, blk_limit_push, CTLFLAG_RW, &stat_blk_limit_push, 0,""); 537 SYSCTL_INT(_debug, OID_AUTO, ino_limit_push, CTLFLAG_RW, &stat_ino_limit_push, 0,""); 538 SYSCTL_INT(_debug, OID_AUTO, blk_limit_hit, CTLFLAG_RW, &stat_blk_limit_hit, 0, ""); 539 SYSCTL_INT(_debug, OID_AUTO, ino_limit_hit, CTLFLAG_RW, &stat_ino_limit_hit, 0, ""); 540 SYSCTL_INT(_debug, OID_AUTO, sync_limit_hit, CTLFLAG_RW, &stat_sync_limit_hit, 0, ""); 541 SYSCTL_INT(_debug, OID_AUTO, indir_blk_ptrs, CTLFLAG_RW, &stat_indir_blk_ptrs, 0, ""); 542 SYSCTL_INT(_debug, OID_AUTO, inode_bitmap, CTLFLAG_RW, &stat_inode_bitmap, 0, ""); 543 SYSCTL_INT(_debug, OID_AUTO, direct_blk_ptrs, CTLFLAG_RW, &stat_direct_blk_ptrs, 0, ""); 544 SYSCTL_INT(_debug, OID_AUTO, dir_entry, CTLFLAG_RW, &stat_dir_entry, 0, ""); 545 #endif /* DEBUG */ 546 547 /* 548 * Add an item to the end of the work queue. 549 * This routine requires that the lock be held. 550 * This is the only routine that adds items to the list. 551 * The following routine is the only one that removes items 552 * and does so in order from first to last. 553 */ 554 static void 555 add_to_worklist(wk) 556 struct worklist *wk; 557 { 558 static struct worklist *worklist_tail; 559 560 if (wk->wk_state & ONWORKLIST) { 561 if (lk.lkt_held != NOHOLDER) 562 FREE_LOCK(&lk); 563 panic("add_to_worklist: already on list"); 564 } 565 wk->wk_state |= ONWORKLIST; 566 if (LIST_FIRST(&softdep_workitem_pending) == NULL) 567 LIST_INSERT_HEAD(&softdep_workitem_pending, wk, wk_list); 568 else 569 LIST_INSERT_AFTER(worklist_tail, wk, wk_list); 570 worklist_tail = wk; 571 num_on_worklist += 1; 572 } 573 574 /* 575 * Process that runs once per second to handle items in the background queue. 576 * 577 * Note that we ensure that everything is done in the order in which they 578 * appear in the queue. The code below depends on this property to ensure 579 * that blocks of a file are freed before the inode itself is freed. This 580 * ordering ensures that no new <vfsid, inum, lbn> triples will be generated 581 * until all the old ones have been purged from the dependency lists. 582 */ 583 static int 584 softdep_process_worklist(matchmnt) 585 struct mount *matchmnt; 586 { 587 thread_t td = curthread; 588 int matchcnt, loopcount; 589 long starttime; 590 591 /* 592 * Record the process identifier of our caller so that we can give 593 * this process preferential treatment in request_cleanup below. 594 */ 595 filesys_syncer = td; 596 matchcnt = 0; 597 598 /* 599 * There is no danger of having multiple processes run this 600 * code, but we have to single-thread it when softdep_flushfiles() 601 * is in operation to get an accurate count of the number of items 602 * related to its mount point that are in the list. 603 */ 604 if (matchmnt == NULL) { 605 if (softdep_worklist_busy < 0) 606 return(-1); 607 softdep_worklist_busy += 1; 608 } 609 610 /* 611 * If requested, try removing inode or removal dependencies. 612 */ 613 if (req_clear_inodedeps) { 614 clear_inodedeps(td); 615 req_clear_inodedeps -= 1; 616 wakeup_one(&proc_waiting); 617 } 618 if (req_clear_remove) { 619 clear_remove(td); 620 req_clear_remove -= 1; 621 wakeup_one(&proc_waiting); 622 } 623 loopcount = 1; 624 starttime = time_second; 625 while (num_on_worklist > 0) { 626 matchcnt += process_worklist_item(matchmnt, 0); 627 628 /* 629 * If a umount operation wants to run the worklist 630 * accurately, abort. 631 */ 632 if (softdep_worklist_req && matchmnt == NULL) { 633 matchcnt = -1; 634 break; 635 } 636 637 /* 638 * If requested, try removing inode or removal dependencies. 639 */ 640 if (req_clear_inodedeps) { 641 clear_inodedeps(td); 642 req_clear_inodedeps -= 1; 643 wakeup_one(&proc_waiting); 644 } 645 if (req_clear_remove) { 646 clear_remove(td); 647 req_clear_remove -= 1; 648 wakeup_one(&proc_waiting); 649 } 650 /* 651 * We do not generally want to stop for buffer space, but if 652 * we are really being a buffer hog, we will stop and wait. 653 */ 654 if (loopcount++ % 128 == 0) 655 bwillwrite(); 656 /* 657 * Never allow processing to run for more than one 658 * second. Otherwise the other syncer tasks may get 659 * excessively backlogged. 660 */ 661 if (starttime != time_second && matchmnt == NULL) { 662 matchcnt = -1; 663 break; 664 } 665 } 666 if (matchmnt == NULL) { 667 --softdep_worklist_busy; 668 if (softdep_worklist_req && softdep_worklist_busy == 0) 669 wakeup(&softdep_worklist_req); 670 } 671 return (matchcnt); 672 } 673 674 /* 675 * Process one item on the worklist. 676 */ 677 static int 678 process_worklist_item(matchmnt, flags) 679 struct mount *matchmnt; 680 int flags; 681 { 682 struct worklist *wk; 683 struct dirrem *dirrem; 684 struct fs *matchfs; 685 struct vnode *vp; 686 int matchcnt = 0; 687 688 matchfs = NULL; 689 if (matchmnt != NULL) 690 matchfs = VFSTOUFS(matchmnt)->um_fs; 691 ACQUIRE_LOCK(&lk); 692 /* 693 * Normally we just process each item on the worklist in order. 694 * However, if we are in a situation where we cannot lock any 695 * inodes, we have to skip over any dirrem requests whose 696 * vnodes are resident and locked. 697 */ 698 LIST_FOREACH(wk, &softdep_workitem_pending, wk_list) { 699 if ((flags & LK_NOWAIT) == 0 || wk->wk_type != D_DIRREM) 700 break; 701 dirrem = WK_DIRREM(wk); 702 vp = ufs_ihashlookup(VFSTOUFS(dirrem->dm_mnt)->um_dev, 703 dirrem->dm_oldinum); 704 if (vp == NULL || !vn_islocked(vp)) 705 break; 706 } 707 if (wk == 0) { 708 FREE_LOCK(&lk); 709 return (0); 710 } 711 WORKLIST_REMOVE(wk); 712 num_on_worklist -= 1; 713 FREE_LOCK(&lk); 714 switch (wk->wk_type) { 715 716 case D_DIRREM: 717 /* removal of a directory entry */ 718 if (WK_DIRREM(wk)->dm_mnt == matchmnt) 719 matchcnt += 1; 720 handle_workitem_remove(WK_DIRREM(wk)); 721 break; 722 723 case D_FREEBLKS: 724 /* releasing blocks and/or fragments from a file */ 725 if (WK_FREEBLKS(wk)->fb_fs == matchfs) 726 matchcnt += 1; 727 handle_workitem_freeblocks(WK_FREEBLKS(wk)); 728 break; 729 730 case D_FREEFRAG: 731 /* releasing a fragment when replaced as a file grows */ 732 if (WK_FREEFRAG(wk)->ff_fs == matchfs) 733 matchcnt += 1; 734 handle_workitem_freefrag(WK_FREEFRAG(wk)); 735 break; 736 737 case D_FREEFILE: 738 /* releasing an inode when its link count drops to 0 */ 739 if (WK_FREEFILE(wk)->fx_fs == matchfs) 740 matchcnt += 1; 741 handle_workitem_freefile(WK_FREEFILE(wk)); 742 break; 743 744 default: 745 panic("%s_process_worklist: Unknown type %s", 746 "softdep", TYPENAME(wk->wk_type)); 747 /* NOTREACHED */ 748 } 749 return (matchcnt); 750 } 751 752 /* 753 * Move dependencies from one buffer to another. 754 */ 755 static void 756 softdep_move_dependencies(oldbp, newbp) 757 struct buf *oldbp; 758 struct buf *newbp; 759 { 760 struct worklist *wk, *wktail; 761 762 if (LIST_FIRST(&newbp->b_dep) != NULL) 763 panic("softdep_move_dependencies: need merge code"); 764 wktail = 0; 765 ACQUIRE_LOCK(&lk); 766 while ((wk = LIST_FIRST(&oldbp->b_dep)) != NULL) { 767 LIST_REMOVE(wk, wk_list); 768 if (wktail == 0) 769 LIST_INSERT_HEAD(&newbp->b_dep, wk, wk_list); 770 else 771 LIST_INSERT_AFTER(wktail, wk, wk_list); 772 wktail = wk; 773 } 774 FREE_LOCK(&lk); 775 } 776 777 /* 778 * Purge the work list of all items associated with a particular mount point. 779 */ 780 int 781 softdep_flushfiles(struct mount *oldmnt, int flags) 782 { 783 struct vnode *devvp; 784 int error, loopcnt; 785 786 /* 787 * Await our turn to clear out the queue, then serialize access. 788 */ 789 while (softdep_worklist_busy != 0) { 790 softdep_worklist_req += 1; 791 tsleep(&softdep_worklist_req, 0, "softflush", 0); 792 softdep_worklist_req -= 1; 793 } 794 softdep_worklist_busy = -1; 795 796 if ((error = ffs_flushfiles(oldmnt, flags)) != 0) { 797 softdep_worklist_busy = 0; 798 if (softdep_worklist_req) 799 wakeup(&softdep_worklist_req); 800 return (error); 801 } 802 /* 803 * Alternately flush the block device associated with the mount 804 * point and process any dependencies that the flushing 805 * creates. In theory, this loop can happen at most twice, 806 * but we give it a few extra just to be sure. 807 */ 808 devvp = VFSTOUFS(oldmnt)->um_devvp; 809 for (loopcnt = 10; loopcnt > 0; ) { 810 if (softdep_process_worklist(oldmnt) == 0) { 811 loopcnt--; 812 /* 813 * Do another flush in case any vnodes were brought in 814 * as part of the cleanup operations. 815 */ 816 if ((error = ffs_flushfiles(oldmnt, flags)) != 0) 817 break; 818 /* 819 * If we still found nothing to do, we are really done. 820 */ 821 if (softdep_process_worklist(oldmnt) == 0) 822 break; 823 } 824 vn_lock(devvp, LK_EXCLUSIVE | LK_RETRY); 825 error = VOP_FSYNC(devvp, MNT_WAIT); 826 vn_unlock(devvp); 827 if (error) 828 break; 829 } 830 softdep_worklist_busy = 0; 831 if (softdep_worklist_req) 832 wakeup(&softdep_worklist_req); 833 834 /* 835 * If we are unmounting then it is an error to fail. If we 836 * are simply trying to downgrade to read-only, then filesystem 837 * activity can keep us busy forever, so we just fail with EBUSY. 838 */ 839 if (loopcnt == 0) { 840 if (oldmnt->mnt_kern_flag & MNTK_UNMOUNT) 841 panic("softdep_flushfiles: looping"); 842 error = EBUSY; 843 } 844 return (error); 845 } 846 847 /* 848 * Structure hashing. 849 * 850 * There are three types of structures that can be looked up: 851 * 1) pagedep structures identified by mount point, inode number, 852 * and logical block. 853 * 2) inodedep structures identified by mount point and inode number. 854 * 3) newblk structures identified by mount point and 855 * physical block number. 856 * 857 * The "pagedep" and "inodedep" dependency structures are hashed 858 * separately from the file blocks and inodes to which they correspond. 859 * This separation helps when the in-memory copy of an inode or 860 * file block must be replaced. It also obviates the need to access 861 * an inode or file page when simply updating (or de-allocating) 862 * dependency structures. Lookup of newblk structures is needed to 863 * find newly allocated blocks when trying to associate them with 864 * their allocdirect or allocindir structure. 865 * 866 * The lookup routines optionally create and hash a new instance when 867 * an existing entry is not found. 868 */ 869 #define DEPALLOC 0x0001 /* allocate structure if lookup fails */ 870 #define NODELAY 0x0002 /* cannot do background work */ 871 872 /* 873 * Structures and routines associated with pagedep caching. 874 */ 875 LIST_HEAD(pagedep_hashhead, pagedep) *pagedep_hashtbl; 876 u_long pagedep_hash; /* size of hash table - 1 */ 877 #define PAGEDEP_HASH(mp, inum, lbn) \ 878 (&pagedep_hashtbl[((((register_t)(mp)) >> 13) + (inum) + (lbn)) & \ 879 pagedep_hash]) 880 static struct sema pagedep_in_progress; 881 882 /* 883 * Helper routine for pagedep_lookup() 884 */ 885 static __inline 886 struct pagedep * 887 pagedep_find(struct pagedep_hashhead *pagedephd, ino_t ino, ufs_lbn_t lbn, 888 struct mount *mp) 889 { 890 struct pagedep *pagedep; 891 892 LIST_FOREACH(pagedep, pagedephd, pd_hash) { 893 if (ino == pagedep->pd_ino && 894 lbn == pagedep->pd_lbn && 895 mp == pagedep->pd_mnt) { 896 return (pagedep); 897 } 898 } 899 return(NULL); 900 } 901 902 /* 903 * Look up a pagedep. Return 1 if found, 0 if not found. 904 * If not found, allocate if DEPALLOC flag is passed. 905 * Found or allocated entry is returned in pagedeppp. 906 * This routine must be called with splbio interrupts blocked. 907 */ 908 static int 909 pagedep_lookup(ip, lbn, flags, pagedeppp) 910 struct inode *ip; 911 ufs_lbn_t lbn; 912 int flags; 913 struct pagedep **pagedeppp; 914 { 915 struct pagedep *pagedep; 916 struct pagedep_hashhead *pagedephd; 917 struct mount *mp; 918 int i; 919 920 #ifdef DEBUG 921 if (lk.lkt_held == NOHOLDER) 922 panic("pagedep_lookup: lock not held"); 923 #endif 924 mp = ITOV(ip)->v_mount; 925 pagedephd = PAGEDEP_HASH(mp, ip->i_number, lbn); 926 top: 927 *pagedeppp = pagedep_find(pagedephd, ip->i_number, lbn, mp); 928 if (*pagedeppp) 929 return(1); 930 if ((flags & DEPALLOC) == 0) 931 return (0); 932 if (sema_get(&pagedep_in_progress, &lk) == 0) { 933 ACQUIRE_LOCK(&lk); 934 goto top; 935 } 936 MALLOC(pagedep, struct pagedep *, sizeof(struct pagedep), M_PAGEDEP, 937 M_SOFTDEP_FLAGS | M_ZERO); 938 939 if (pagedep_find(pagedephd, ip->i_number, lbn, mp)) { 940 kprintf("pagedep_lookup: blocking race avoided\n"); 941 ACQUIRE_LOCK(&lk); 942 sema_release(&pagedep_in_progress); 943 kfree(pagedep, M_PAGEDEP); 944 goto top; 945 } 946 947 pagedep->pd_list.wk_type = D_PAGEDEP; 948 pagedep->pd_mnt = mp; 949 pagedep->pd_ino = ip->i_number; 950 pagedep->pd_lbn = lbn; 951 LIST_INIT(&pagedep->pd_dirremhd); 952 LIST_INIT(&pagedep->pd_pendinghd); 953 for (i = 0; i < DAHASHSZ; i++) 954 LIST_INIT(&pagedep->pd_diraddhd[i]); 955 ACQUIRE_LOCK(&lk); 956 LIST_INSERT_HEAD(pagedephd, pagedep, pd_hash); 957 sema_release(&pagedep_in_progress); 958 *pagedeppp = pagedep; 959 return (0); 960 } 961 962 /* 963 * Structures and routines associated with inodedep caching. 964 */ 965 LIST_HEAD(inodedep_hashhead, inodedep) *inodedep_hashtbl; 966 static u_long inodedep_hash; /* size of hash table - 1 */ 967 static long num_inodedep; /* number of inodedep allocated */ 968 #define INODEDEP_HASH(fs, inum) \ 969 (&inodedep_hashtbl[((((register_t)(fs)) >> 13) + (inum)) & inodedep_hash]) 970 static struct sema inodedep_in_progress; 971 972 /* 973 * Helper routine for inodedep_lookup() 974 */ 975 static __inline 976 struct inodedep * 977 inodedep_find(struct inodedep_hashhead *inodedephd, struct fs *fs, ino_t inum) 978 { 979 struct inodedep *inodedep; 980 981 LIST_FOREACH(inodedep, inodedephd, id_hash) { 982 if (inum == inodedep->id_ino && fs == inodedep->id_fs) 983 return(inodedep); 984 } 985 return (NULL); 986 } 987 988 /* 989 * Look up a inodedep. Return 1 if found, 0 if not found. 990 * If not found, allocate if DEPALLOC flag is passed. 991 * Found or allocated entry is returned in inodedeppp. 992 * This routine must be called with splbio interrupts blocked. 993 */ 994 static int 995 inodedep_lookup(fs, inum, flags, inodedeppp) 996 struct fs *fs; 997 ino_t inum; 998 int flags; 999 struct inodedep **inodedeppp; 1000 { 1001 struct inodedep *inodedep; 1002 struct inodedep_hashhead *inodedephd; 1003 int firsttry; 1004 1005 #ifdef DEBUG 1006 if (lk.lkt_held == NOHOLDER) 1007 panic("inodedep_lookup: lock not held"); 1008 #endif 1009 firsttry = 1; 1010 inodedephd = INODEDEP_HASH(fs, inum); 1011 top: 1012 *inodedeppp = inodedep_find(inodedephd, fs, inum); 1013 if (*inodedeppp) 1014 return (1); 1015 if ((flags & DEPALLOC) == 0) 1016 return (0); 1017 /* 1018 * If we are over our limit, try to improve the situation. 1019 */ 1020 if (num_inodedep > max_softdeps && firsttry && 1021 speedup_syncer() == 0 && (flags & NODELAY) == 0 && 1022 request_cleanup(FLUSH_INODES, 1)) { 1023 firsttry = 0; 1024 goto top; 1025 } 1026 if (sema_get(&inodedep_in_progress, &lk) == 0) { 1027 ACQUIRE_LOCK(&lk); 1028 goto top; 1029 } 1030 MALLOC(inodedep, struct inodedep *, sizeof(struct inodedep), 1031 M_INODEDEP, M_SOFTDEP_FLAGS | M_ZERO); 1032 if (inodedep_find(inodedephd, fs, inum)) { 1033 kprintf("inodedep_lookup: blocking race avoided\n"); 1034 ACQUIRE_LOCK(&lk); 1035 sema_release(&inodedep_in_progress); 1036 kfree(inodedep, M_INODEDEP); 1037 goto top; 1038 } 1039 inodedep->id_list.wk_type = D_INODEDEP; 1040 inodedep->id_fs = fs; 1041 inodedep->id_ino = inum; 1042 inodedep->id_state = ALLCOMPLETE; 1043 inodedep->id_nlinkdelta = 0; 1044 inodedep->id_savedino = NULL; 1045 inodedep->id_savedsize = -1; 1046 inodedep->id_buf = NULL; 1047 LIST_INIT(&inodedep->id_pendinghd); 1048 LIST_INIT(&inodedep->id_inowait); 1049 LIST_INIT(&inodedep->id_bufwait); 1050 TAILQ_INIT(&inodedep->id_inoupdt); 1051 TAILQ_INIT(&inodedep->id_newinoupdt); 1052 ACQUIRE_LOCK(&lk); 1053 num_inodedep += 1; 1054 LIST_INSERT_HEAD(inodedephd, inodedep, id_hash); 1055 sema_release(&inodedep_in_progress); 1056 *inodedeppp = inodedep; 1057 return (0); 1058 } 1059 1060 /* 1061 * Structures and routines associated with newblk caching. 1062 */ 1063 LIST_HEAD(newblk_hashhead, newblk) *newblk_hashtbl; 1064 u_long newblk_hash; /* size of hash table - 1 */ 1065 #define NEWBLK_HASH(fs, inum) \ 1066 (&newblk_hashtbl[((((register_t)(fs)) >> 13) + (inum)) & newblk_hash]) 1067 static struct sema newblk_in_progress; 1068 1069 /* 1070 * Helper routine for newblk_lookup() 1071 */ 1072 static __inline 1073 struct newblk * 1074 newblk_find(struct newblk_hashhead *newblkhd, struct fs *fs, 1075 ufs_daddr_t newblkno) 1076 { 1077 struct newblk *newblk; 1078 1079 LIST_FOREACH(newblk, newblkhd, nb_hash) { 1080 if (newblkno == newblk->nb_newblkno && fs == newblk->nb_fs) 1081 return (newblk); 1082 } 1083 return(NULL); 1084 } 1085 1086 /* 1087 * Look up a newblk. Return 1 if found, 0 if not found. 1088 * If not found, allocate if DEPALLOC flag is passed. 1089 * Found or allocated entry is returned in newblkpp. 1090 */ 1091 static int 1092 newblk_lookup(fs, newblkno, flags, newblkpp) 1093 struct fs *fs; 1094 ufs_daddr_t newblkno; 1095 int flags; 1096 struct newblk **newblkpp; 1097 { 1098 struct newblk *newblk; 1099 struct newblk_hashhead *newblkhd; 1100 1101 newblkhd = NEWBLK_HASH(fs, newblkno); 1102 top: 1103 *newblkpp = newblk_find(newblkhd, fs, newblkno); 1104 if (*newblkpp) 1105 return(1); 1106 if ((flags & DEPALLOC) == 0) 1107 return (0); 1108 if (sema_get(&newblk_in_progress, 0) == 0) 1109 goto top; 1110 MALLOC(newblk, struct newblk *, sizeof(struct newblk), 1111 M_NEWBLK, M_SOFTDEP_FLAGS | M_ZERO); 1112 1113 if (newblk_find(newblkhd, fs, newblkno)) { 1114 kprintf("newblk_lookup: blocking race avoided\n"); 1115 sema_release(&pagedep_in_progress); 1116 kfree(newblk, M_NEWBLK); 1117 goto top; 1118 } 1119 newblk->nb_state = 0; 1120 newblk->nb_fs = fs; 1121 newblk->nb_newblkno = newblkno; 1122 LIST_INSERT_HEAD(newblkhd, newblk, nb_hash); 1123 sema_release(&newblk_in_progress); 1124 *newblkpp = newblk; 1125 return (0); 1126 } 1127 1128 /* 1129 * Executed during filesystem system initialization before 1130 * mounting any filesystems. 1131 */ 1132 void 1133 softdep_initialize() 1134 { 1135 callout_init(&handle); 1136 bioops = softdep_bioops; /* XXX hack */ 1137 1138 LIST_INIT(&mkdirlisthd); 1139 LIST_INIT(&softdep_workitem_pending); 1140 max_softdeps = min(desiredvnodes * 8, 1141 M_INODEDEP->ks_limit / (2 * sizeof(struct inodedep))); 1142 pagedep_hashtbl = hashinit(desiredvnodes / 5, M_PAGEDEP, 1143 &pagedep_hash); 1144 sema_init(&pagedep_in_progress, "pagedep", 0, 0); 1145 inodedep_hashtbl = hashinit(desiredvnodes, M_INODEDEP, &inodedep_hash); 1146 sema_init(&inodedep_in_progress, "inodedep", 0, 0); 1147 newblk_hashtbl = hashinit(64, M_NEWBLK, &newblk_hash); 1148 sema_init(&newblk_in_progress, "newblk", 0, 0); 1149 } 1150 1151 /* 1152 * Called at mount time to notify the dependency code that a 1153 * filesystem wishes to use it. 1154 */ 1155 int 1156 softdep_mount(devvp, mp, fs) 1157 struct vnode *devvp; 1158 struct mount *mp; 1159 struct fs *fs; 1160 { 1161 struct csum cstotal; 1162 struct cg *cgp; 1163 struct buf *bp; 1164 int error, cyl; 1165 1166 mp->mnt_flag &= ~MNT_ASYNC; 1167 mp->mnt_flag |= MNT_SOFTDEP; 1168 /* 1169 * When doing soft updates, the counters in the 1170 * superblock may have gotten out of sync, so we have 1171 * to scan the cylinder groups and recalculate them. 1172 */ 1173 if (fs->fs_clean != 0) 1174 return (0); 1175 bzero(&cstotal, sizeof cstotal); 1176 for (cyl = 0; cyl < fs->fs_ncg; cyl++) { 1177 if ((error = bread(devvp, fsbtodoff(fs, cgtod(fs, cyl)), 1178 fs->fs_cgsize, &bp)) != 0) { 1179 brelse(bp); 1180 return (error); 1181 } 1182 cgp = (struct cg *)bp->b_data; 1183 cstotal.cs_nffree += cgp->cg_cs.cs_nffree; 1184 cstotal.cs_nbfree += cgp->cg_cs.cs_nbfree; 1185 cstotal.cs_nifree += cgp->cg_cs.cs_nifree; 1186 cstotal.cs_ndir += cgp->cg_cs.cs_ndir; 1187 fs->fs_cs(fs, cyl) = cgp->cg_cs; 1188 brelse(bp); 1189 } 1190 #ifdef DEBUG 1191 if (bcmp(&cstotal, &fs->fs_cstotal, sizeof cstotal)) 1192 kprintf("ffs_mountfs: superblock updated for soft updates\n"); 1193 #endif 1194 bcopy(&cstotal, &fs->fs_cstotal, sizeof cstotal); 1195 return (0); 1196 } 1197 1198 /* 1199 * Protecting the freemaps (or bitmaps). 1200 * 1201 * To eliminate the need to execute fsck before mounting a filesystem 1202 * after a power failure, one must (conservatively) guarantee that the 1203 * on-disk copy of the bitmaps never indicate that a live inode or block is 1204 * free. So, when a block or inode is allocated, the bitmap should be 1205 * updated (on disk) before any new pointers. When a block or inode is 1206 * freed, the bitmap should not be updated until all pointers have been 1207 * reset. The latter dependency is handled by the delayed de-allocation 1208 * approach described below for block and inode de-allocation. The former 1209 * dependency is handled by calling the following procedure when a block or 1210 * inode is allocated. When an inode is allocated an "inodedep" is created 1211 * with its DEPCOMPLETE flag cleared until its bitmap is written to disk. 1212 * Each "inodedep" is also inserted into the hash indexing structure so 1213 * that any additional link additions can be made dependent on the inode 1214 * allocation. 1215 * 1216 * The ufs filesystem maintains a number of free block counts (e.g., per 1217 * cylinder group, per cylinder and per <cylinder, rotational position> pair) 1218 * in addition to the bitmaps. These counts are used to improve efficiency 1219 * during allocation and therefore must be consistent with the bitmaps. 1220 * There is no convenient way to guarantee post-crash consistency of these 1221 * counts with simple update ordering, for two main reasons: (1) The counts 1222 * and bitmaps for a single cylinder group block are not in the same disk 1223 * sector. If a disk write is interrupted (e.g., by power failure), one may 1224 * be written and the other not. (2) Some of the counts are located in the 1225 * superblock rather than the cylinder group block. So, we focus our soft 1226 * updates implementation on protecting the bitmaps. When mounting a 1227 * filesystem, we recompute the auxiliary counts from the bitmaps. 1228 */ 1229 1230 /* 1231 * Called just after updating the cylinder group block to allocate an inode. 1232 */ 1233 void 1234 softdep_setup_inomapdep(bp, ip, newinum) 1235 struct buf *bp; /* buffer for cylgroup block with inode map */ 1236 struct inode *ip; /* inode related to allocation */ 1237 ino_t newinum; /* new inode number being allocated */ 1238 { 1239 struct inodedep *inodedep; 1240 struct bmsafemap *bmsafemap; 1241 1242 /* 1243 * Create a dependency for the newly allocated inode. 1244 * Panic if it already exists as something is seriously wrong. 1245 * Otherwise add it to the dependency list for the buffer holding 1246 * the cylinder group map from which it was allocated. 1247 */ 1248 ACQUIRE_LOCK(&lk); 1249 if ((inodedep_lookup(ip->i_fs, newinum, DEPALLOC|NODELAY, &inodedep))) { 1250 FREE_LOCK(&lk); 1251 panic("softdep_setup_inomapdep: found inode"); 1252 } 1253 inodedep->id_buf = bp; 1254 inodedep->id_state &= ~DEPCOMPLETE; 1255 bmsafemap = bmsafemap_lookup(bp); 1256 LIST_INSERT_HEAD(&bmsafemap->sm_inodedephd, inodedep, id_deps); 1257 FREE_LOCK(&lk); 1258 } 1259 1260 /* 1261 * Called just after updating the cylinder group block to 1262 * allocate block or fragment. 1263 */ 1264 void 1265 softdep_setup_blkmapdep(bp, fs, newblkno) 1266 struct buf *bp; /* buffer for cylgroup block with block map */ 1267 struct fs *fs; /* filesystem doing allocation */ 1268 ufs_daddr_t newblkno; /* number of newly allocated block */ 1269 { 1270 struct newblk *newblk; 1271 struct bmsafemap *bmsafemap; 1272 1273 /* 1274 * Create a dependency for the newly allocated block. 1275 * Add it to the dependency list for the buffer holding 1276 * the cylinder group map from which it was allocated. 1277 */ 1278 if (newblk_lookup(fs, newblkno, DEPALLOC, &newblk) != 0) 1279 panic("softdep_setup_blkmapdep: found block"); 1280 ACQUIRE_LOCK(&lk); 1281 newblk->nb_bmsafemap = bmsafemap = bmsafemap_lookup(bp); 1282 LIST_INSERT_HEAD(&bmsafemap->sm_newblkhd, newblk, nb_deps); 1283 FREE_LOCK(&lk); 1284 } 1285 1286 /* 1287 * Find the bmsafemap associated with a cylinder group buffer. 1288 * If none exists, create one. The buffer must be locked when 1289 * this routine is called and this routine must be called with 1290 * splbio interrupts blocked. 1291 */ 1292 static struct bmsafemap * 1293 bmsafemap_lookup(bp) 1294 struct buf *bp; 1295 { 1296 struct bmsafemap *bmsafemap; 1297 struct worklist *wk; 1298 1299 #ifdef DEBUG 1300 if (lk.lkt_held == NOHOLDER) 1301 panic("bmsafemap_lookup: lock not held"); 1302 #endif 1303 LIST_FOREACH(wk, &bp->b_dep, wk_list) 1304 if (wk->wk_type == D_BMSAFEMAP) 1305 return (WK_BMSAFEMAP(wk)); 1306 FREE_LOCK(&lk); 1307 MALLOC(bmsafemap, struct bmsafemap *, sizeof(struct bmsafemap), 1308 M_BMSAFEMAP, M_SOFTDEP_FLAGS); 1309 bmsafemap->sm_list.wk_type = D_BMSAFEMAP; 1310 bmsafemap->sm_list.wk_state = 0; 1311 bmsafemap->sm_buf = bp; 1312 LIST_INIT(&bmsafemap->sm_allocdirecthd); 1313 LIST_INIT(&bmsafemap->sm_allocindirhd); 1314 LIST_INIT(&bmsafemap->sm_inodedephd); 1315 LIST_INIT(&bmsafemap->sm_newblkhd); 1316 ACQUIRE_LOCK(&lk); 1317 WORKLIST_INSERT(&bp->b_dep, &bmsafemap->sm_list); 1318 return (bmsafemap); 1319 } 1320 1321 /* 1322 * Direct block allocation dependencies. 1323 * 1324 * When a new block is allocated, the corresponding disk locations must be 1325 * initialized (with zeros or new data) before the on-disk inode points to 1326 * them. Also, the freemap from which the block was allocated must be 1327 * updated (on disk) before the inode's pointer. These two dependencies are 1328 * independent of each other and are needed for all file blocks and indirect 1329 * blocks that are pointed to directly by the inode. Just before the 1330 * "in-core" version of the inode is updated with a newly allocated block 1331 * number, a procedure (below) is called to setup allocation dependency 1332 * structures. These structures are removed when the corresponding 1333 * dependencies are satisfied or when the block allocation becomes obsolete 1334 * (i.e., the file is deleted, the block is de-allocated, or the block is a 1335 * fragment that gets upgraded). All of these cases are handled in 1336 * procedures described later. 1337 * 1338 * When a file extension causes a fragment to be upgraded, either to a larger 1339 * fragment or to a full block, the on-disk location may change (if the 1340 * previous fragment could not simply be extended). In this case, the old 1341 * fragment must be de-allocated, but not until after the inode's pointer has 1342 * been updated. In most cases, this is handled by later procedures, which 1343 * will construct a "freefrag" structure to be added to the workitem queue 1344 * when the inode update is complete (or obsolete). The main exception to 1345 * this is when an allocation occurs while a pending allocation dependency 1346 * (for the same block pointer) remains. This case is handled in the main 1347 * allocation dependency setup procedure by immediately freeing the 1348 * unreferenced fragments. 1349 */ 1350 void 1351 softdep_setup_allocdirect(ip, lbn, newblkno, oldblkno, newsize, oldsize, bp) 1352 struct inode *ip; /* inode to which block is being added */ 1353 ufs_lbn_t lbn; /* block pointer within inode */ 1354 ufs_daddr_t newblkno; /* disk block number being added */ 1355 ufs_daddr_t oldblkno; /* previous block number, 0 unless frag */ 1356 long newsize; /* size of new block */ 1357 long oldsize; /* size of new block */ 1358 struct buf *bp; /* bp for allocated block */ 1359 { 1360 struct allocdirect *adp, *oldadp; 1361 struct allocdirectlst *adphead; 1362 struct bmsafemap *bmsafemap; 1363 struct inodedep *inodedep; 1364 struct pagedep *pagedep; 1365 struct newblk *newblk; 1366 1367 MALLOC(adp, struct allocdirect *, sizeof(struct allocdirect), 1368 M_ALLOCDIRECT, M_SOFTDEP_FLAGS); 1369 bzero(adp, sizeof(struct allocdirect)); 1370 adp->ad_list.wk_type = D_ALLOCDIRECT; 1371 adp->ad_lbn = lbn; 1372 adp->ad_newblkno = newblkno; 1373 adp->ad_oldblkno = oldblkno; 1374 adp->ad_newsize = newsize; 1375 adp->ad_oldsize = oldsize; 1376 adp->ad_state = ATTACHED; 1377 if (newblkno == oldblkno) 1378 adp->ad_freefrag = NULL; 1379 else 1380 adp->ad_freefrag = newfreefrag(ip, oldblkno, oldsize); 1381 1382 if (newblk_lookup(ip->i_fs, newblkno, 0, &newblk) == 0) 1383 panic("softdep_setup_allocdirect: lost block"); 1384 1385 ACQUIRE_LOCK(&lk); 1386 inodedep_lookup(ip->i_fs, ip->i_number, DEPALLOC | NODELAY, &inodedep); 1387 adp->ad_inodedep = inodedep; 1388 1389 if (newblk->nb_state == DEPCOMPLETE) { 1390 adp->ad_state |= DEPCOMPLETE; 1391 adp->ad_buf = NULL; 1392 } else { 1393 bmsafemap = newblk->nb_bmsafemap; 1394 adp->ad_buf = bmsafemap->sm_buf; 1395 LIST_REMOVE(newblk, nb_deps); 1396 LIST_INSERT_HEAD(&bmsafemap->sm_allocdirecthd, adp, ad_deps); 1397 } 1398 LIST_REMOVE(newblk, nb_hash); 1399 FREE(newblk, M_NEWBLK); 1400 1401 WORKLIST_INSERT(&bp->b_dep, &adp->ad_list); 1402 if (lbn >= NDADDR) { 1403 /* allocating an indirect block */ 1404 if (oldblkno != 0) { 1405 FREE_LOCK(&lk); 1406 panic("softdep_setup_allocdirect: non-zero indir"); 1407 } 1408 } else { 1409 /* 1410 * Allocating a direct block. 1411 * 1412 * If we are allocating a directory block, then we must 1413 * allocate an associated pagedep to track additions and 1414 * deletions. 1415 */ 1416 if ((ip->i_mode & IFMT) == IFDIR && 1417 pagedep_lookup(ip, lbn, DEPALLOC, &pagedep) == 0) 1418 WORKLIST_INSERT(&bp->b_dep, &pagedep->pd_list); 1419 } 1420 /* 1421 * The list of allocdirects must be kept in sorted and ascending 1422 * order so that the rollback routines can quickly determine the 1423 * first uncommitted block (the size of the file stored on disk 1424 * ends at the end of the lowest committed fragment, or if there 1425 * are no fragments, at the end of the highest committed block). 1426 * Since files generally grow, the typical case is that the new 1427 * block is to be added at the end of the list. We speed this 1428 * special case by checking against the last allocdirect in the 1429 * list before laboriously traversing the list looking for the 1430 * insertion point. 1431 */ 1432 adphead = &inodedep->id_newinoupdt; 1433 oldadp = TAILQ_LAST(adphead, allocdirectlst); 1434 if (oldadp == NULL || oldadp->ad_lbn <= lbn) { 1435 /* insert at end of list */ 1436 TAILQ_INSERT_TAIL(adphead, adp, ad_next); 1437 if (oldadp != NULL && oldadp->ad_lbn == lbn) 1438 allocdirect_merge(adphead, adp, oldadp); 1439 FREE_LOCK(&lk); 1440 return; 1441 } 1442 TAILQ_FOREACH(oldadp, adphead, ad_next) { 1443 if (oldadp->ad_lbn >= lbn) 1444 break; 1445 } 1446 if (oldadp == NULL) { 1447 FREE_LOCK(&lk); 1448 panic("softdep_setup_allocdirect: lost entry"); 1449 } 1450 /* insert in middle of list */ 1451 TAILQ_INSERT_BEFORE(oldadp, adp, ad_next); 1452 if (oldadp->ad_lbn == lbn) 1453 allocdirect_merge(adphead, adp, oldadp); 1454 FREE_LOCK(&lk); 1455 } 1456 1457 /* 1458 * Replace an old allocdirect dependency with a newer one. 1459 * This routine must be called with splbio interrupts blocked. 1460 */ 1461 static void 1462 allocdirect_merge(adphead, newadp, oldadp) 1463 struct allocdirectlst *adphead; /* head of list holding allocdirects */ 1464 struct allocdirect *newadp; /* allocdirect being added */ 1465 struct allocdirect *oldadp; /* existing allocdirect being checked */ 1466 { 1467 struct freefrag *freefrag; 1468 1469 #ifdef DEBUG 1470 if (lk.lkt_held == NOHOLDER) 1471 panic("allocdirect_merge: lock not held"); 1472 #endif 1473 if (newadp->ad_oldblkno != oldadp->ad_newblkno || 1474 newadp->ad_oldsize != oldadp->ad_newsize || 1475 newadp->ad_lbn >= NDADDR) { 1476 FREE_LOCK(&lk); 1477 panic("allocdirect_check: old %d != new %d || lbn %ld >= %d", 1478 newadp->ad_oldblkno, oldadp->ad_newblkno, newadp->ad_lbn, 1479 NDADDR); 1480 } 1481 newadp->ad_oldblkno = oldadp->ad_oldblkno; 1482 newadp->ad_oldsize = oldadp->ad_oldsize; 1483 /* 1484 * If the old dependency had a fragment to free or had never 1485 * previously had a block allocated, then the new dependency 1486 * can immediately post its freefrag and adopt the old freefrag. 1487 * This action is done by swapping the freefrag dependencies. 1488 * The new dependency gains the old one's freefrag, and the 1489 * old one gets the new one and then immediately puts it on 1490 * the worklist when it is freed by free_allocdirect. It is 1491 * not possible to do this swap when the old dependency had a 1492 * non-zero size but no previous fragment to free. This condition 1493 * arises when the new block is an extension of the old block. 1494 * Here, the first part of the fragment allocated to the new 1495 * dependency is part of the block currently claimed on disk by 1496 * the old dependency, so cannot legitimately be freed until the 1497 * conditions for the new dependency are fulfilled. 1498 */ 1499 if (oldadp->ad_freefrag != NULL || oldadp->ad_oldblkno == 0) { 1500 freefrag = newadp->ad_freefrag; 1501 newadp->ad_freefrag = oldadp->ad_freefrag; 1502 oldadp->ad_freefrag = freefrag; 1503 } 1504 free_allocdirect(adphead, oldadp, 0); 1505 } 1506 1507 /* 1508 * Allocate a new freefrag structure if needed. 1509 */ 1510 static struct freefrag * 1511 newfreefrag(ip, blkno, size) 1512 struct inode *ip; 1513 ufs_daddr_t blkno; 1514 long size; 1515 { 1516 struct freefrag *freefrag; 1517 struct fs *fs; 1518 1519 if (blkno == 0) 1520 return (NULL); 1521 fs = ip->i_fs; 1522 if (fragnum(fs, blkno) + numfrags(fs, size) > fs->fs_frag) 1523 panic("newfreefrag: frag size"); 1524 MALLOC(freefrag, struct freefrag *, sizeof(struct freefrag), 1525 M_FREEFRAG, M_SOFTDEP_FLAGS); 1526 freefrag->ff_list.wk_type = D_FREEFRAG; 1527 freefrag->ff_state = ip->i_uid & ~ONWORKLIST; /* XXX - used below */ 1528 freefrag->ff_inum = ip->i_number; 1529 freefrag->ff_fs = fs; 1530 freefrag->ff_devvp = ip->i_devvp; 1531 freefrag->ff_blkno = blkno; 1532 freefrag->ff_fragsize = size; 1533 return (freefrag); 1534 } 1535 1536 /* 1537 * This workitem de-allocates fragments that were replaced during 1538 * file block allocation. 1539 */ 1540 static void 1541 handle_workitem_freefrag(freefrag) 1542 struct freefrag *freefrag; 1543 { 1544 struct inode tip; 1545 1546 tip.i_fs = freefrag->ff_fs; 1547 tip.i_devvp = freefrag->ff_devvp; 1548 tip.i_dev = freefrag->ff_devvp->v_rdev; 1549 tip.i_number = freefrag->ff_inum; 1550 tip.i_uid = freefrag->ff_state & ~ONWORKLIST; /* XXX - set above */ 1551 ffs_blkfree(&tip, freefrag->ff_blkno, freefrag->ff_fragsize); 1552 FREE(freefrag, M_FREEFRAG); 1553 } 1554 1555 /* 1556 * Indirect block allocation dependencies. 1557 * 1558 * The same dependencies that exist for a direct block also exist when 1559 * a new block is allocated and pointed to by an entry in a block of 1560 * indirect pointers. The undo/redo states described above are also 1561 * used here. Because an indirect block contains many pointers that 1562 * may have dependencies, a second copy of the entire in-memory indirect 1563 * block is kept. The buffer cache copy is always completely up-to-date. 1564 * The second copy, which is used only as a source for disk writes, 1565 * contains only the safe pointers (i.e., those that have no remaining 1566 * update dependencies). The second copy is freed when all pointers 1567 * are safe. The cache is not allowed to replace indirect blocks with 1568 * pending update dependencies. If a buffer containing an indirect 1569 * block with dependencies is written, these routines will mark it 1570 * dirty again. It can only be successfully written once all the 1571 * dependencies are removed. The ffs_fsync routine in conjunction with 1572 * softdep_sync_metadata work together to get all the dependencies 1573 * removed so that a file can be successfully written to disk. Three 1574 * procedures are used when setting up indirect block pointer 1575 * dependencies. The division is necessary because of the organization 1576 * of the "balloc" routine and because of the distinction between file 1577 * pages and file metadata blocks. 1578 */ 1579 1580 /* 1581 * Allocate a new allocindir structure. 1582 */ 1583 static struct allocindir * 1584 newallocindir(ip, ptrno, newblkno, oldblkno) 1585 struct inode *ip; /* inode for file being extended */ 1586 int ptrno; /* offset of pointer in indirect block */ 1587 ufs_daddr_t newblkno; /* disk block number being added */ 1588 ufs_daddr_t oldblkno; /* previous block number, 0 if none */ 1589 { 1590 struct allocindir *aip; 1591 1592 MALLOC(aip, struct allocindir *, sizeof(struct allocindir), 1593 M_ALLOCINDIR, M_SOFTDEP_FLAGS); 1594 bzero(aip, sizeof(struct allocindir)); 1595 aip->ai_list.wk_type = D_ALLOCINDIR; 1596 aip->ai_state = ATTACHED; 1597 aip->ai_offset = ptrno; 1598 aip->ai_newblkno = newblkno; 1599 aip->ai_oldblkno = oldblkno; 1600 aip->ai_freefrag = newfreefrag(ip, oldblkno, ip->i_fs->fs_bsize); 1601 return (aip); 1602 } 1603 1604 /* 1605 * Called just before setting an indirect block pointer 1606 * to a newly allocated file page. 1607 */ 1608 void 1609 softdep_setup_allocindir_page(ip, lbn, bp, ptrno, newblkno, oldblkno, nbp) 1610 struct inode *ip; /* inode for file being extended */ 1611 ufs_lbn_t lbn; /* allocated block number within file */ 1612 struct buf *bp; /* buffer with indirect blk referencing page */ 1613 int ptrno; /* offset of pointer in indirect block */ 1614 ufs_daddr_t newblkno; /* disk block number being added */ 1615 ufs_daddr_t oldblkno; /* previous block number, 0 if none */ 1616 struct buf *nbp; /* buffer holding allocated page */ 1617 { 1618 struct allocindir *aip; 1619 struct pagedep *pagedep; 1620 1621 aip = newallocindir(ip, ptrno, newblkno, oldblkno); 1622 ACQUIRE_LOCK(&lk); 1623 /* 1624 * If we are allocating a directory page, then we must 1625 * allocate an associated pagedep to track additions and 1626 * deletions. 1627 */ 1628 if ((ip->i_mode & IFMT) == IFDIR && 1629 pagedep_lookup(ip, lbn, DEPALLOC, &pagedep) == 0) 1630 WORKLIST_INSERT(&nbp->b_dep, &pagedep->pd_list); 1631 WORKLIST_INSERT(&nbp->b_dep, &aip->ai_list); 1632 FREE_LOCK(&lk); 1633 setup_allocindir_phase2(bp, ip, aip); 1634 } 1635 1636 /* 1637 * Called just before setting an indirect block pointer to a 1638 * newly allocated indirect block. 1639 */ 1640 void 1641 softdep_setup_allocindir_meta(nbp, ip, bp, ptrno, newblkno) 1642 struct buf *nbp; /* newly allocated indirect block */ 1643 struct inode *ip; /* inode for file being extended */ 1644 struct buf *bp; /* indirect block referencing allocated block */ 1645 int ptrno; /* offset of pointer in indirect block */ 1646 ufs_daddr_t newblkno; /* disk block number being added */ 1647 { 1648 struct allocindir *aip; 1649 1650 aip = newallocindir(ip, ptrno, newblkno, 0); 1651 ACQUIRE_LOCK(&lk); 1652 WORKLIST_INSERT(&nbp->b_dep, &aip->ai_list); 1653 FREE_LOCK(&lk); 1654 setup_allocindir_phase2(bp, ip, aip); 1655 } 1656 1657 /* 1658 * Called to finish the allocation of the "aip" allocated 1659 * by one of the two routines above. 1660 */ 1661 static void 1662 setup_allocindir_phase2(bp, ip, aip) 1663 struct buf *bp; /* in-memory copy of the indirect block */ 1664 struct inode *ip; /* inode for file being extended */ 1665 struct allocindir *aip; /* allocindir allocated by the above routines */ 1666 { 1667 struct worklist *wk; 1668 struct indirdep *indirdep, *newindirdep; 1669 struct bmsafemap *bmsafemap; 1670 struct allocindir *oldaip; 1671 struct freefrag *freefrag; 1672 struct newblk *newblk; 1673 1674 if (bp->b_loffset >= 0) 1675 panic("setup_allocindir_phase2: not indir blk"); 1676 for (indirdep = NULL, newindirdep = NULL; ; ) { 1677 ACQUIRE_LOCK(&lk); 1678 LIST_FOREACH(wk, &bp->b_dep, wk_list) { 1679 if (wk->wk_type != D_INDIRDEP) 1680 continue; 1681 indirdep = WK_INDIRDEP(wk); 1682 break; 1683 } 1684 if (indirdep == NULL && newindirdep) { 1685 indirdep = newindirdep; 1686 WORKLIST_INSERT(&bp->b_dep, &indirdep->ir_list); 1687 newindirdep = NULL; 1688 } 1689 FREE_LOCK(&lk); 1690 if (indirdep) { 1691 if (newblk_lookup(ip->i_fs, aip->ai_newblkno, 0, 1692 &newblk) == 0) 1693 panic("setup_allocindir: lost block"); 1694 ACQUIRE_LOCK(&lk); 1695 if (newblk->nb_state == DEPCOMPLETE) { 1696 aip->ai_state |= DEPCOMPLETE; 1697 aip->ai_buf = NULL; 1698 } else { 1699 bmsafemap = newblk->nb_bmsafemap; 1700 aip->ai_buf = bmsafemap->sm_buf; 1701 LIST_REMOVE(newblk, nb_deps); 1702 LIST_INSERT_HEAD(&bmsafemap->sm_allocindirhd, 1703 aip, ai_deps); 1704 } 1705 LIST_REMOVE(newblk, nb_hash); 1706 FREE(newblk, M_NEWBLK); 1707 aip->ai_indirdep = indirdep; 1708 /* 1709 * Check to see if there is an existing dependency 1710 * for this block. If there is, merge the old 1711 * dependency into the new one. 1712 */ 1713 if (aip->ai_oldblkno == 0) 1714 oldaip = NULL; 1715 else 1716 1717 LIST_FOREACH(oldaip, &indirdep->ir_deplisthd, ai_next) 1718 if (oldaip->ai_offset == aip->ai_offset) 1719 break; 1720 if (oldaip != NULL) { 1721 if (oldaip->ai_newblkno != aip->ai_oldblkno) { 1722 FREE_LOCK(&lk); 1723 panic("setup_allocindir_phase2: blkno"); 1724 } 1725 aip->ai_oldblkno = oldaip->ai_oldblkno; 1726 freefrag = oldaip->ai_freefrag; 1727 oldaip->ai_freefrag = aip->ai_freefrag; 1728 aip->ai_freefrag = freefrag; 1729 free_allocindir(oldaip, NULL); 1730 } 1731 LIST_INSERT_HEAD(&indirdep->ir_deplisthd, aip, ai_next); 1732 ((ufs_daddr_t *)indirdep->ir_savebp->b_data) 1733 [aip->ai_offset] = aip->ai_oldblkno; 1734 FREE_LOCK(&lk); 1735 } 1736 if (newindirdep) { 1737 /* 1738 * Avoid any possibility of data corruption by 1739 * ensuring that our old version is thrown away. 1740 */ 1741 newindirdep->ir_savebp->b_flags |= B_INVAL | B_NOCACHE; 1742 brelse(newindirdep->ir_savebp); 1743 WORKITEM_FREE((caddr_t)newindirdep, D_INDIRDEP); 1744 } 1745 if (indirdep) 1746 break; 1747 MALLOC(newindirdep, struct indirdep *, sizeof(struct indirdep), 1748 M_INDIRDEP, M_SOFTDEP_FLAGS); 1749 newindirdep->ir_list.wk_type = D_INDIRDEP; 1750 newindirdep->ir_state = ATTACHED; 1751 LIST_INIT(&newindirdep->ir_deplisthd); 1752 LIST_INIT(&newindirdep->ir_donehd); 1753 if (bp->b_bio2.bio_offset == NOOFFSET) { 1754 VOP_BMAP(bp->b_vp, bp->b_bio1.bio_offset, 1755 NULL, &bp->b_bio2.bio_offset, 1756 NULL, NULL); 1757 } 1758 KKASSERT(bp->b_bio2.bio_offset != NOOFFSET); 1759 newindirdep->ir_savebp = getblk(ip->i_devvp, 1760 bp->b_bio2.bio_offset, 1761 bp->b_bcount, 0, 0); 1762 BUF_KERNPROC(newindirdep->ir_savebp); 1763 bcopy(bp->b_data, newindirdep->ir_savebp->b_data, bp->b_bcount); 1764 } 1765 } 1766 1767 /* 1768 * Block de-allocation dependencies. 1769 * 1770 * When blocks are de-allocated, the on-disk pointers must be nullified before 1771 * the blocks are made available for use by other files. (The true 1772 * requirement is that old pointers must be nullified before new on-disk 1773 * pointers are set. We chose this slightly more stringent requirement to 1774 * reduce complexity.) Our implementation handles this dependency by updating 1775 * the inode (or indirect block) appropriately but delaying the actual block 1776 * de-allocation (i.e., freemap and free space count manipulation) until 1777 * after the updated versions reach stable storage. After the disk is 1778 * updated, the blocks can be safely de-allocated whenever it is convenient. 1779 * This implementation handles only the common case of reducing a file's 1780 * length to zero. Other cases are handled by the conventional synchronous 1781 * write approach. 1782 * 1783 * The ffs implementation with which we worked double-checks 1784 * the state of the block pointers and file size as it reduces 1785 * a file's length. Some of this code is replicated here in our 1786 * soft updates implementation. The freeblks->fb_chkcnt field is 1787 * used to transfer a part of this information to the procedure 1788 * that eventually de-allocates the blocks. 1789 * 1790 * This routine should be called from the routine that shortens 1791 * a file's length, before the inode's size or block pointers 1792 * are modified. It will save the block pointer information for 1793 * later release and zero the inode so that the calling routine 1794 * can release it. 1795 */ 1796 struct softdep_setup_freeblocks_info { 1797 struct fs *fs; 1798 struct inode *ip; 1799 }; 1800 1801 static int softdep_setup_freeblocks_bp(struct buf *bp, void *data); 1802 1803 void 1804 softdep_setup_freeblocks(ip, length) 1805 struct inode *ip; /* The inode whose length is to be reduced */ 1806 off_t length; /* The new length for the file */ 1807 { 1808 struct softdep_setup_freeblocks_info info; 1809 struct freeblks *freeblks; 1810 struct inodedep *inodedep; 1811 struct allocdirect *adp; 1812 struct vnode *vp; 1813 struct buf *bp; 1814 struct fs *fs; 1815 int i, error, delay; 1816 int count; 1817 1818 fs = ip->i_fs; 1819 if (length != 0) 1820 panic("softde_setup_freeblocks: non-zero length"); 1821 MALLOC(freeblks, struct freeblks *, sizeof(struct freeblks), 1822 M_FREEBLKS, M_SOFTDEP_FLAGS); 1823 bzero(freeblks, sizeof(struct freeblks)); 1824 freeblks->fb_list.wk_type = D_FREEBLKS; 1825 freeblks->fb_state = ATTACHED; 1826 freeblks->fb_uid = ip->i_uid; 1827 freeblks->fb_previousinum = ip->i_number; 1828 freeblks->fb_devvp = ip->i_devvp; 1829 freeblks->fb_fs = fs; 1830 freeblks->fb_oldsize = ip->i_size; 1831 freeblks->fb_newsize = length; 1832 freeblks->fb_chkcnt = ip->i_blocks; 1833 for (i = 0; i < NDADDR; i++) { 1834 freeblks->fb_dblks[i] = ip->i_db[i]; 1835 ip->i_db[i] = 0; 1836 } 1837 for (i = 0; i < NIADDR; i++) { 1838 freeblks->fb_iblks[i] = ip->i_ib[i]; 1839 ip->i_ib[i] = 0; 1840 } 1841 ip->i_blocks = 0; 1842 ip->i_size = 0; 1843 /* 1844 * Push the zero'ed inode to to its disk buffer so that we are free 1845 * to delete its dependencies below. Once the dependencies are gone 1846 * the buffer can be safely released. 1847 */ 1848 if ((error = bread(ip->i_devvp, 1849 fsbtodoff(fs, ino_to_fsba(fs, ip->i_number)), 1850 (int)fs->fs_bsize, &bp)) != 0) 1851 softdep_error("softdep_setup_freeblocks", error); 1852 *((struct ufs1_dinode *)bp->b_data + ino_to_fsbo(fs, ip->i_number)) = 1853 ip->i_din; 1854 /* 1855 * Find and eliminate any inode dependencies. 1856 */ 1857 ACQUIRE_LOCK(&lk); 1858 (void) inodedep_lookup(fs, ip->i_number, DEPALLOC, &inodedep); 1859 if ((inodedep->id_state & IOSTARTED) != 0) { 1860 FREE_LOCK(&lk); 1861 panic("softdep_setup_freeblocks: inode busy"); 1862 } 1863 /* 1864 * Add the freeblks structure to the list of operations that 1865 * must await the zero'ed inode being written to disk. If we 1866 * still have a bitmap dependency (delay == 0), then the inode 1867 * has never been written to disk, so we can process the 1868 * freeblks below once we have deleted the dependencies. 1869 */ 1870 delay = (inodedep->id_state & DEPCOMPLETE); 1871 if (delay) 1872 WORKLIST_INSERT(&inodedep->id_bufwait, &freeblks->fb_list); 1873 /* 1874 * Because the file length has been truncated to zero, any 1875 * pending block allocation dependency structures associated 1876 * with this inode are obsolete and can simply be de-allocated. 1877 * We must first merge the two dependency lists to get rid of 1878 * any duplicate freefrag structures, then purge the merged list. 1879 */ 1880 merge_inode_lists(inodedep); 1881 while ((adp = TAILQ_FIRST(&inodedep->id_inoupdt)) != 0) 1882 free_allocdirect(&inodedep->id_inoupdt, adp, 1); 1883 FREE_LOCK(&lk); 1884 bdwrite(bp); 1885 /* 1886 * We must wait for any I/O in progress to finish so that 1887 * all potential buffers on the dirty list will be visible. 1888 * Once they are all there, walk the list and get rid of 1889 * any dependencies. 1890 */ 1891 vp = ITOV(ip); 1892 ACQUIRE_LOCK(&lk); 1893 drain_output(vp, 1); 1894 1895 info.fs = fs; 1896 info.ip = ip; 1897 do { 1898 count = RB_SCAN(buf_rb_tree, &vp->v_rbdirty_tree, NULL, 1899 softdep_setup_freeblocks_bp, &info); 1900 } while (count != 0); 1901 if (inodedep_lookup(fs, ip->i_number, 0, &inodedep) != 0) 1902 (void)free_inodedep(inodedep); 1903 1904 if (delay) { 1905 freeblks->fb_state |= DEPCOMPLETE; 1906 /* 1907 * If the inode with zeroed block pointers is now on disk 1908 * we can start freeing blocks. Add freeblks to the worklist 1909 * instead of calling handle_workitem_freeblocks directly as 1910 * it is more likely that additional IO is needed to complete 1911 * the request here than in the !delay case. 1912 */ 1913 if ((freeblks->fb_state & ALLCOMPLETE) == ALLCOMPLETE) 1914 add_to_worklist(&freeblks->fb_list); 1915 } 1916 1917 FREE_LOCK(&lk); 1918 /* 1919 * If the inode has never been written to disk (delay == 0), 1920 * then we can process the freeblks now that we have deleted 1921 * the dependencies. 1922 */ 1923 if (!delay) 1924 handle_workitem_freeblocks(freeblks); 1925 } 1926 1927 static int 1928 softdep_setup_freeblocks_bp(struct buf *bp, void *data) 1929 { 1930 struct softdep_setup_freeblocks_info *info = data; 1931 struct inodedep *inodedep; 1932 1933 if (getdirtybuf(&bp, MNT_WAIT) == 0) { 1934 kprintf("softdep_setup_freeblocks_bp(1): caught bp %p going away\n", bp); 1935 return(-1); 1936 } 1937 if (bp->b_vp != ITOV(info->ip) || (bp->b_flags & B_DELWRI) == 0) { 1938 kprintf("softdep_setup_freeblocks_bp(2): caught bp %p going away\n", bp); 1939 BUF_UNLOCK(bp); 1940 return(-1); 1941 } 1942 (void) inodedep_lookup(info->fs, info->ip->i_number, 0, &inodedep); 1943 deallocate_dependencies(bp, inodedep); 1944 bp->b_flags |= B_INVAL | B_NOCACHE; 1945 FREE_LOCK(&lk); 1946 brelse(bp); 1947 ACQUIRE_LOCK(&lk); 1948 return(1); 1949 } 1950 1951 /* 1952 * Reclaim any dependency structures from a buffer that is about to 1953 * be reallocated to a new vnode. The buffer must be locked, thus, 1954 * no I/O completion operations can occur while we are manipulating 1955 * its associated dependencies. The mutex is held so that other I/O's 1956 * associated with related dependencies do not occur. 1957 */ 1958 static void 1959 deallocate_dependencies(bp, inodedep) 1960 struct buf *bp; 1961 struct inodedep *inodedep; 1962 { 1963 struct worklist *wk; 1964 struct indirdep *indirdep; 1965 struct allocindir *aip; 1966 struct pagedep *pagedep; 1967 struct dirrem *dirrem; 1968 struct diradd *dap; 1969 int i; 1970 1971 while ((wk = LIST_FIRST(&bp->b_dep)) != NULL) { 1972 switch (wk->wk_type) { 1973 1974 case D_INDIRDEP: 1975 indirdep = WK_INDIRDEP(wk); 1976 /* 1977 * None of the indirect pointers will ever be visible, 1978 * so they can simply be tossed. GOINGAWAY ensures 1979 * that allocated pointers will be saved in the buffer 1980 * cache until they are freed. Note that they will 1981 * only be able to be found by their physical address 1982 * since the inode mapping the logical address will 1983 * be gone. The save buffer used for the safe copy 1984 * was allocated in setup_allocindir_phase2 using 1985 * the physical address so it could be used for this 1986 * purpose. Hence we swap the safe copy with the real 1987 * copy, allowing the safe copy to be freed and holding 1988 * on to the real copy for later use in indir_trunc. 1989 * 1990 * NOTE: ir_savebp is relative to the block device 1991 * so b_bio1 contains the device block number. 1992 */ 1993 if (indirdep->ir_state & GOINGAWAY) { 1994 FREE_LOCK(&lk); 1995 panic("deallocate_dependencies: already gone"); 1996 } 1997 indirdep->ir_state |= GOINGAWAY; 1998 while ((aip = LIST_FIRST(&indirdep->ir_deplisthd)) != 0) 1999 free_allocindir(aip, inodedep); 2000 if (bp->b_bio1.bio_offset >= 0 || 2001 bp->b_bio2.bio_offset != indirdep->ir_savebp->b_bio1.bio_offset) { 2002 FREE_LOCK(&lk); 2003 panic("deallocate_dependencies: not indir"); 2004 } 2005 bcopy(bp->b_data, indirdep->ir_savebp->b_data, 2006 bp->b_bcount); 2007 WORKLIST_REMOVE(wk); 2008 WORKLIST_INSERT(&indirdep->ir_savebp->b_dep, wk); 2009 continue; 2010 2011 case D_PAGEDEP: 2012 pagedep = WK_PAGEDEP(wk); 2013 /* 2014 * None of the directory additions will ever be 2015 * visible, so they can simply be tossed. 2016 */ 2017 for (i = 0; i < DAHASHSZ; i++) 2018 while ((dap = 2019 LIST_FIRST(&pagedep->pd_diraddhd[i]))) 2020 free_diradd(dap); 2021 while ((dap = LIST_FIRST(&pagedep->pd_pendinghd)) != 0) 2022 free_diradd(dap); 2023 /* 2024 * Copy any directory remove dependencies to the list 2025 * to be processed after the zero'ed inode is written. 2026 * If the inode has already been written, then they 2027 * can be dumped directly onto the work list. 2028 */ 2029 LIST_FOREACH(dirrem, &pagedep->pd_dirremhd, dm_next) { 2030 LIST_REMOVE(dirrem, dm_next); 2031 dirrem->dm_dirinum = pagedep->pd_ino; 2032 if (inodedep == NULL || 2033 (inodedep->id_state & ALLCOMPLETE) == 2034 ALLCOMPLETE) 2035 add_to_worklist(&dirrem->dm_list); 2036 else 2037 WORKLIST_INSERT(&inodedep->id_bufwait, 2038 &dirrem->dm_list); 2039 } 2040 WORKLIST_REMOVE(&pagedep->pd_list); 2041 LIST_REMOVE(pagedep, pd_hash); 2042 WORKITEM_FREE(pagedep, D_PAGEDEP); 2043 continue; 2044 2045 case D_ALLOCINDIR: 2046 free_allocindir(WK_ALLOCINDIR(wk), inodedep); 2047 continue; 2048 2049 case D_ALLOCDIRECT: 2050 case D_INODEDEP: 2051 FREE_LOCK(&lk); 2052 panic("deallocate_dependencies: Unexpected type %s", 2053 TYPENAME(wk->wk_type)); 2054 /* NOTREACHED */ 2055 2056 default: 2057 FREE_LOCK(&lk); 2058 panic("deallocate_dependencies: Unknown type %s", 2059 TYPENAME(wk->wk_type)); 2060 /* NOTREACHED */ 2061 } 2062 } 2063 } 2064 2065 /* 2066 * Free an allocdirect. Generate a new freefrag work request if appropriate. 2067 * This routine must be called with splbio interrupts blocked. 2068 */ 2069 static void 2070 free_allocdirect(adphead, adp, delay) 2071 struct allocdirectlst *adphead; 2072 struct allocdirect *adp; 2073 int delay; 2074 { 2075 2076 #ifdef DEBUG 2077 if (lk.lkt_held == NOHOLDER) 2078 panic("free_allocdirect: lock not held"); 2079 #endif 2080 if ((adp->ad_state & DEPCOMPLETE) == 0) 2081 LIST_REMOVE(adp, ad_deps); 2082 TAILQ_REMOVE(adphead, adp, ad_next); 2083 if ((adp->ad_state & COMPLETE) == 0) 2084 WORKLIST_REMOVE(&adp->ad_list); 2085 if (adp->ad_freefrag != NULL) { 2086 if (delay) 2087 WORKLIST_INSERT(&adp->ad_inodedep->id_bufwait, 2088 &adp->ad_freefrag->ff_list); 2089 else 2090 add_to_worklist(&adp->ad_freefrag->ff_list); 2091 } 2092 WORKITEM_FREE(adp, D_ALLOCDIRECT); 2093 } 2094 2095 /* 2096 * Prepare an inode to be freed. The actual free operation is not 2097 * done until the zero'ed inode has been written to disk. 2098 */ 2099 void 2100 softdep_freefile(pvp, ino, mode) 2101 struct vnode *pvp; 2102 ino_t ino; 2103 int mode; 2104 { 2105 struct inode *ip = VTOI(pvp); 2106 struct inodedep *inodedep; 2107 struct freefile *freefile; 2108 2109 /* 2110 * This sets up the inode de-allocation dependency. 2111 */ 2112 MALLOC(freefile, struct freefile *, sizeof(struct freefile), 2113 M_FREEFILE, M_SOFTDEP_FLAGS); 2114 freefile->fx_list.wk_type = D_FREEFILE; 2115 freefile->fx_list.wk_state = 0; 2116 freefile->fx_mode = mode; 2117 freefile->fx_oldinum = ino; 2118 freefile->fx_devvp = ip->i_devvp; 2119 freefile->fx_fs = ip->i_fs; 2120 2121 /* 2122 * If the inodedep does not exist, then the zero'ed inode has 2123 * been written to disk. If the allocated inode has never been 2124 * written to disk, then the on-disk inode is zero'ed. In either 2125 * case we can free the file immediately. 2126 */ 2127 ACQUIRE_LOCK(&lk); 2128 if (inodedep_lookup(ip->i_fs, ino, 0, &inodedep) == 0 || 2129 check_inode_unwritten(inodedep)) { 2130 FREE_LOCK(&lk); 2131 handle_workitem_freefile(freefile); 2132 return; 2133 } 2134 WORKLIST_INSERT(&inodedep->id_inowait, &freefile->fx_list); 2135 FREE_LOCK(&lk); 2136 } 2137 2138 /* 2139 * Check to see if an inode has never been written to disk. If 2140 * so free the inodedep and return success, otherwise return failure. 2141 * This routine must be called with splbio interrupts blocked. 2142 * 2143 * If we still have a bitmap dependency, then the inode has never 2144 * been written to disk. Drop the dependency as it is no longer 2145 * necessary since the inode is being deallocated. We set the 2146 * ALLCOMPLETE flags since the bitmap now properly shows that the 2147 * inode is not allocated. Even if the inode is actively being 2148 * written, it has been rolled back to its zero'ed state, so we 2149 * are ensured that a zero inode is what is on the disk. For short 2150 * lived files, this change will usually result in removing all the 2151 * dependencies from the inode so that it can be freed immediately. 2152 */ 2153 static int 2154 check_inode_unwritten(inodedep) 2155 struct inodedep *inodedep; 2156 { 2157 2158 if ((inodedep->id_state & DEPCOMPLETE) != 0 || 2159 LIST_FIRST(&inodedep->id_pendinghd) != NULL || 2160 LIST_FIRST(&inodedep->id_bufwait) != NULL || 2161 LIST_FIRST(&inodedep->id_inowait) != NULL || 2162 TAILQ_FIRST(&inodedep->id_inoupdt) != NULL || 2163 TAILQ_FIRST(&inodedep->id_newinoupdt) != NULL || 2164 inodedep->id_nlinkdelta != 0) 2165 return (0); 2166 2167 /* 2168 * Another process might be in initiate_write_inodeblock 2169 * trying to allocate memory without holding "Softdep Lock". 2170 */ 2171 if ((inodedep->id_state & IOSTARTED) != 0 && 2172 inodedep->id_savedino == NULL) 2173 return(0); 2174 2175 inodedep->id_state |= ALLCOMPLETE; 2176 LIST_REMOVE(inodedep, id_deps); 2177 inodedep->id_buf = NULL; 2178 if (inodedep->id_state & ONWORKLIST) 2179 WORKLIST_REMOVE(&inodedep->id_list); 2180 if (inodedep->id_savedino != NULL) { 2181 FREE(inodedep->id_savedino, M_INODEDEP); 2182 inodedep->id_savedino = NULL; 2183 } 2184 if (free_inodedep(inodedep) == 0) { 2185 FREE_LOCK(&lk); 2186 panic("check_inode_unwritten: busy inode"); 2187 } 2188 return (1); 2189 } 2190 2191 /* 2192 * Try to free an inodedep structure. Return 1 if it could be freed. 2193 */ 2194 static int 2195 free_inodedep(inodedep) 2196 struct inodedep *inodedep; 2197 { 2198 2199 if ((inodedep->id_state & ONWORKLIST) != 0 || 2200 (inodedep->id_state & ALLCOMPLETE) != ALLCOMPLETE || 2201 LIST_FIRST(&inodedep->id_pendinghd) != NULL || 2202 LIST_FIRST(&inodedep->id_bufwait) != NULL || 2203 LIST_FIRST(&inodedep->id_inowait) != NULL || 2204 TAILQ_FIRST(&inodedep->id_inoupdt) != NULL || 2205 TAILQ_FIRST(&inodedep->id_newinoupdt) != NULL || 2206 inodedep->id_nlinkdelta != 0 || inodedep->id_savedino != NULL) 2207 return (0); 2208 LIST_REMOVE(inodedep, id_hash); 2209 WORKITEM_FREE(inodedep, D_INODEDEP); 2210 num_inodedep -= 1; 2211 return (1); 2212 } 2213 2214 /* 2215 * This workitem routine performs the block de-allocation. 2216 * The workitem is added to the pending list after the updated 2217 * inode block has been written to disk. As mentioned above, 2218 * checks regarding the number of blocks de-allocated (compared 2219 * to the number of blocks allocated for the file) are also 2220 * performed in this function. 2221 */ 2222 static void 2223 handle_workitem_freeblocks(freeblks) 2224 struct freeblks *freeblks; 2225 { 2226 struct inode tip; 2227 ufs_daddr_t bn; 2228 struct fs *fs; 2229 int i, level, bsize; 2230 long nblocks, blocksreleased = 0; 2231 int error, allerror = 0; 2232 ufs_lbn_t baselbns[NIADDR], tmpval; 2233 2234 tip.i_number = freeblks->fb_previousinum; 2235 tip.i_devvp = freeblks->fb_devvp; 2236 tip.i_dev = freeblks->fb_devvp->v_rdev; 2237 tip.i_fs = freeblks->fb_fs; 2238 tip.i_size = freeblks->fb_oldsize; 2239 tip.i_uid = freeblks->fb_uid; 2240 fs = freeblks->fb_fs; 2241 tmpval = 1; 2242 baselbns[0] = NDADDR; 2243 for (i = 1; i < NIADDR; i++) { 2244 tmpval *= NINDIR(fs); 2245 baselbns[i] = baselbns[i - 1] + tmpval; 2246 } 2247 nblocks = btodb(fs->fs_bsize); 2248 blocksreleased = 0; 2249 /* 2250 * Indirect blocks first. 2251 */ 2252 for (level = (NIADDR - 1); level >= 0; level--) { 2253 if ((bn = freeblks->fb_iblks[level]) == 0) 2254 continue; 2255 if ((error = indir_trunc(&tip, fsbtodoff(fs, bn), level, 2256 baselbns[level], &blocksreleased)) == 0) 2257 allerror = error; 2258 ffs_blkfree(&tip, bn, fs->fs_bsize); 2259 blocksreleased += nblocks; 2260 } 2261 /* 2262 * All direct blocks or frags. 2263 */ 2264 for (i = (NDADDR - 1); i >= 0; i--) { 2265 if ((bn = freeblks->fb_dblks[i]) == 0) 2266 continue; 2267 bsize = blksize(fs, &tip, i); 2268 ffs_blkfree(&tip, bn, bsize); 2269 blocksreleased += btodb(bsize); 2270 } 2271 2272 #ifdef DIAGNOSTIC 2273 if (freeblks->fb_chkcnt != blocksreleased) 2274 kprintf("handle_workitem_freeblocks: block count\n"); 2275 if (allerror) 2276 softdep_error("handle_workitem_freeblks", allerror); 2277 #endif /* DIAGNOSTIC */ 2278 WORKITEM_FREE(freeblks, D_FREEBLKS); 2279 } 2280 2281 /* 2282 * Release blocks associated with the inode ip and stored in the indirect 2283 * block at doffset. If level is greater than SINGLE, the block is an 2284 * indirect block and recursive calls to indirtrunc must be used to 2285 * cleanse other indirect blocks. 2286 */ 2287 static int 2288 indir_trunc(ip, doffset, level, lbn, countp) 2289 struct inode *ip; 2290 off_t doffset; 2291 int level; 2292 ufs_lbn_t lbn; 2293 long *countp; 2294 { 2295 struct buf *bp; 2296 ufs_daddr_t *bap; 2297 ufs_daddr_t nb; 2298 struct fs *fs; 2299 struct worklist *wk; 2300 struct indirdep *indirdep; 2301 int i, lbnadd, nblocks; 2302 int error, allerror = 0; 2303 2304 fs = ip->i_fs; 2305 lbnadd = 1; 2306 for (i = level; i > 0; i--) 2307 lbnadd *= NINDIR(fs); 2308 /* 2309 * Get buffer of block pointers to be freed. This routine is not 2310 * called until the zero'ed inode has been written, so it is safe 2311 * to free blocks as they are encountered. Because the inode has 2312 * been zero'ed, calls to bmap on these blocks will fail. So, we 2313 * have to use the on-disk address and the block device for the 2314 * filesystem to look them up. If the file was deleted before its 2315 * indirect blocks were all written to disk, the routine that set 2316 * us up (deallocate_dependencies) will have arranged to leave 2317 * a complete copy of the indirect block in memory for our use. 2318 * Otherwise we have to read the blocks in from the disk. 2319 */ 2320 ACQUIRE_LOCK(&lk); 2321 if ((bp = findblk(ip->i_devvp, doffset)) != NULL && 2322 (wk = LIST_FIRST(&bp->b_dep)) != NULL) { 2323 /* 2324 * bp must be ir_savebp, which is held locked for our use. 2325 */ 2326 if (wk->wk_type != D_INDIRDEP || 2327 (indirdep = WK_INDIRDEP(wk))->ir_savebp != bp || 2328 (indirdep->ir_state & GOINGAWAY) == 0) { 2329 FREE_LOCK(&lk); 2330 panic("indir_trunc: lost indirdep"); 2331 } 2332 WORKLIST_REMOVE(wk); 2333 WORKITEM_FREE(indirdep, D_INDIRDEP); 2334 if (LIST_FIRST(&bp->b_dep) != NULL) { 2335 FREE_LOCK(&lk); 2336 panic("indir_trunc: dangling dep"); 2337 } 2338 FREE_LOCK(&lk); 2339 } else { 2340 FREE_LOCK(&lk); 2341 error = bread(ip->i_devvp, doffset, (int)fs->fs_bsize, &bp); 2342 if (error) 2343 return (error); 2344 } 2345 /* 2346 * Recursively free indirect blocks. 2347 */ 2348 bap = (ufs_daddr_t *)bp->b_data; 2349 nblocks = btodb(fs->fs_bsize); 2350 for (i = NINDIR(fs) - 1; i >= 0; i--) { 2351 if ((nb = bap[i]) == 0) 2352 continue; 2353 if (level != 0) { 2354 if ((error = indir_trunc(ip, fsbtodoff(fs, nb), 2355 level - 1, lbn + (i * lbnadd), countp)) != 0) 2356 allerror = error; 2357 } 2358 ffs_blkfree(ip, nb, fs->fs_bsize); 2359 *countp += nblocks; 2360 } 2361 bp->b_flags |= B_INVAL | B_NOCACHE; 2362 brelse(bp); 2363 return (allerror); 2364 } 2365 2366 /* 2367 * Free an allocindir. 2368 * This routine must be called with splbio interrupts blocked. 2369 */ 2370 static void 2371 free_allocindir(aip, inodedep) 2372 struct allocindir *aip; 2373 struct inodedep *inodedep; 2374 { 2375 struct freefrag *freefrag; 2376 2377 #ifdef DEBUG 2378 if (lk.lkt_held == NOHOLDER) 2379 panic("free_allocindir: lock not held"); 2380 #endif 2381 if ((aip->ai_state & DEPCOMPLETE) == 0) 2382 LIST_REMOVE(aip, ai_deps); 2383 if (aip->ai_state & ONWORKLIST) 2384 WORKLIST_REMOVE(&aip->ai_list); 2385 LIST_REMOVE(aip, ai_next); 2386 if ((freefrag = aip->ai_freefrag) != NULL) { 2387 if (inodedep == NULL) 2388 add_to_worklist(&freefrag->ff_list); 2389 else 2390 WORKLIST_INSERT(&inodedep->id_bufwait, 2391 &freefrag->ff_list); 2392 } 2393 WORKITEM_FREE(aip, D_ALLOCINDIR); 2394 } 2395 2396 /* 2397 * Directory entry addition dependencies. 2398 * 2399 * When adding a new directory entry, the inode (with its incremented link 2400 * count) must be written to disk before the directory entry's pointer to it. 2401 * Also, if the inode is newly allocated, the corresponding freemap must be 2402 * updated (on disk) before the directory entry's pointer. These requirements 2403 * are met via undo/redo on the directory entry's pointer, which consists 2404 * simply of the inode number. 2405 * 2406 * As directory entries are added and deleted, the free space within a 2407 * directory block can become fragmented. The ufs filesystem will compact 2408 * a fragmented directory block to make space for a new entry. When this 2409 * occurs, the offsets of previously added entries change. Any "diradd" 2410 * dependency structures corresponding to these entries must be updated with 2411 * the new offsets. 2412 */ 2413 2414 /* 2415 * This routine is called after the in-memory inode's link 2416 * count has been incremented, but before the directory entry's 2417 * pointer to the inode has been set. 2418 */ 2419 void 2420 softdep_setup_directory_add(bp, dp, diroffset, newinum, newdirbp) 2421 struct buf *bp; /* buffer containing directory block */ 2422 struct inode *dp; /* inode for directory */ 2423 off_t diroffset; /* offset of new entry in directory */ 2424 ino_t newinum; /* inode referenced by new directory entry */ 2425 struct buf *newdirbp; /* non-NULL => contents of new mkdir */ 2426 { 2427 int offset; /* offset of new entry within directory block */ 2428 ufs_lbn_t lbn; /* block in directory containing new entry */ 2429 struct fs *fs; 2430 struct diradd *dap; 2431 struct pagedep *pagedep; 2432 struct inodedep *inodedep; 2433 struct mkdir *mkdir1, *mkdir2; 2434 2435 /* 2436 * Whiteouts have no dependencies. 2437 */ 2438 if (newinum == WINO) { 2439 if (newdirbp != NULL) 2440 bdwrite(newdirbp); 2441 return; 2442 } 2443 2444 fs = dp->i_fs; 2445 lbn = lblkno(fs, diroffset); 2446 offset = blkoff(fs, diroffset); 2447 MALLOC(dap, struct diradd *, sizeof(struct diradd), M_DIRADD, 2448 M_SOFTDEP_FLAGS); 2449 bzero(dap, sizeof(struct diradd)); 2450 dap->da_list.wk_type = D_DIRADD; 2451 dap->da_offset = offset; 2452 dap->da_newinum = newinum; 2453 dap->da_state = ATTACHED; 2454 if (newdirbp == NULL) { 2455 dap->da_state |= DEPCOMPLETE; 2456 ACQUIRE_LOCK(&lk); 2457 } else { 2458 dap->da_state |= MKDIR_BODY | MKDIR_PARENT; 2459 MALLOC(mkdir1, struct mkdir *, sizeof(struct mkdir), M_MKDIR, 2460 M_SOFTDEP_FLAGS); 2461 mkdir1->md_list.wk_type = D_MKDIR; 2462 mkdir1->md_state = MKDIR_BODY; 2463 mkdir1->md_diradd = dap; 2464 MALLOC(mkdir2, struct mkdir *, sizeof(struct mkdir), M_MKDIR, 2465 M_SOFTDEP_FLAGS); 2466 mkdir2->md_list.wk_type = D_MKDIR; 2467 mkdir2->md_state = MKDIR_PARENT; 2468 mkdir2->md_diradd = dap; 2469 /* 2470 * Dependency on "." and ".." being written to disk. 2471 */ 2472 mkdir1->md_buf = newdirbp; 2473 ACQUIRE_LOCK(&lk); 2474 LIST_INSERT_HEAD(&mkdirlisthd, mkdir1, md_mkdirs); 2475 WORKLIST_INSERT(&newdirbp->b_dep, &mkdir1->md_list); 2476 FREE_LOCK(&lk); 2477 bdwrite(newdirbp); 2478 /* 2479 * Dependency on link count increase for parent directory 2480 */ 2481 ACQUIRE_LOCK(&lk); 2482 if (inodedep_lookup(dp->i_fs, dp->i_number, 0, &inodedep) == 0 2483 || (inodedep->id_state & ALLCOMPLETE) == ALLCOMPLETE) { 2484 dap->da_state &= ~MKDIR_PARENT; 2485 WORKITEM_FREE(mkdir2, D_MKDIR); 2486 } else { 2487 LIST_INSERT_HEAD(&mkdirlisthd, mkdir2, md_mkdirs); 2488 WORKLIST_INSERT(&inodedep->id_bufwait,&mkdir2->md_list); 2489 } 2490 } 2491 /* 2492 * Link into parent directory pagedep to await its being written. 2493 */ 2494 if (pagedep_lookup(dp, lbn, DEPALLOC, &pagedep) == 0) 2495 WORKLIST_INSERT(&bp->b_dep, &pagedep->pd_list); 2496 dap->da_pagedep = pagedep; 2497 LIST_INSERT_HEAD(&pagedep->pd_diraddhd[DIRADDHASH(offset)], dap, 2498 da_pdlist); 2499 /* 2500 * Link into its inodedep. Put it on the id_bufwait list if the inode 2501 * is not yet written. If it is written, do the post-inode write 2502 * processing to put it on the id_pendinghd list. 2503 */ 2504 (void) inodedep_lookup(fs, newinum, DEPALLOC, &inodedep); 2505 if ((inodedep->id_state & ALLCOMPLETE) == ALLCOMPLETE) 2506 diradd_inode_written(dap, inodedep); 2507 else 2508 WORKLIST_INSERT(&inodedep->id_bufwait, &dap->da_list); 2509 FREE_LOCK(&lk); 2510 } 2511 2512 /* 2513 * This procedure is called to change the offset of a directory 2514 * entry when compacting a directory block which must be owned 2515 * exclusively by the caller. Note that the actual entry movement 2516 * must be done in this procedure to ensure that no I/O completions 2517 * occur while the move is in progress. 2518 */ 2519 void 2520 softdep_change_directoryentry_offset(dp, base, oldloc, newloc, entrysize) 2521 struct inode *dp; /* inode for directory */ 2522 caddr_t base; /* address of dp->i_offset */ 2523 caddr_t oldloc; /* address of old directory location */ 2524 caddr_t newloc; /* address of new directory location */ 2525 int entrysize; /* size of directory entry */ 2526 { 2527 int offset, oldoffset, newoffset; 2528 struct pagedep *pagedep; 2529 struct diradd *dap; 2530 ufs_lbn_t lbn; 2531 2532 ACQUIRE_LOCK(&lk); 2533 lbn = lblkno(dp->i_fs, dp->i_offset); 2534 offset = blkoff(dp->i_fs, dp->i_offset); 2535 if (pagedep_lookup(dp, lbn, 0, &pagedep) == 0) 2536 goto done; 2537 oldoffset = offset + (oldloc - base); 2538 newoffset = offset + (newloc - base); 2539 2540 LIST_FOREACH(dap, &pagedep->pd_diraddhd[DIRADDHASH(oldoffset)], da_pdlist) { 2541 if (dap->da_offset != oldoffset) 2542 continue; 2543 dap->da_offset = newoffset; 2544 if (DIRADDHASH(newoffset) == DIRADDHASH(oldoffset)) 2545 break; 2546 LIST_REMOVE(dap, da_pdlist); 2547 LIST_INSERT_HEAD(&pagedep->pd_diraddhd[DIRADDHASH(newoffset)], 2548 dap, da_pdlist); 2549 break; 2550 } 2551 if (dap == NULL) { 2552 2553 LIST_FOREACH(dap, &pagedep->pd_pendinghd, da_pdlist) { 2554 if (dap->da_offset == oldoffset) { 2555 dap->da_offset = newoffset; 2556 break; 2557 } 2558 } 2559 } 2560 done: 2561 bcopy(oldloc, newloc, entrysize); 2562 FREE_LOCK(&lk); 2563 } 2564 2565 /* 2566 * Free a diradd dependency structure. This routine must be called 2567 * with splbio interrupts blocked. 2568 */ 2569 static void 2570 free_diradd(dap) 2571 struct diradd *dap; 2572 { 2573 struct dirrem *dirrem; 2574 struct pagedep *pagedep; 2575 struct inodedep *inodedep; 2576 struct mkdir *mkdir, *nextmd; 2577 2578 #ifdef DEBUG 2579 if (lk.lkt_held == NOHOLDER) 2580 panic("free_diradd: lock not held"); 2581 #endif 2582 WORKLIST_REMOVE(&dap->da_list); 2583 LIST_REMOVE(dap, da_pdlist); 2584 if ((dap->da_state & DIRCHG) == 0) { 2585 pagedep = dap->da_pagedep; 2586 } else { 2587 dirrem = dap->da_previous; 2588 pagedep = dirrem->dm_pagedep; 2589 dirrem->dm_dirinum = pagedep->pd_ino; 2590 add_to_worklist(&dirrem->dm_list); 2591 } 2592 if (inodedep_lookup(VFSTOUFS(pagedep->pd_mnt)->um_fs, dap->da_newinum, 2593 0, &inodedep) != 0) 2594 (void) free_inodedep(inodedep); 2595 if ((dap->da_state & (MKDIR_PARENT | MKDIR_BODY)) != 0) { 2596 for (mkdir = LIST_FIRST(&mkdirlisthd); mkdir; mkdir = nextmd) { 2597 nextmd = LIST_NEXT(mkdir, md_mkdirs); 2598 if (mkdir->md_diradd != dap) 2599 continue; 2600 dap->da_state &= ~mkdir->md_state; 2601 WORKLIST_REMOVE(&mkdir->md_list); 2602 LIST_REMOVE(mkdir, md_mkdirs); 2603 WORKITEM_FREE(mkdir, D_MKDIR); 2604 } 2605 if ((dap->da_state & (MKDIR_PARENT | MKDIR_BODY)) != 0) { 2606 FREE_LOCK(&lk); 2607 panic("free_diradd: unfound ref"); 2608 } 2609 } 2610 WORKITEM_FREE(dap, D_DIRADD); 2611 } 2612 2613 /* 2614 * Directory entry removal dependencies. 2615 * 2616 * When removing a directory entry, the entry's inode pointer must be 2617 * zero'ed on disk before the corresponding inode's link count is decremented 2618 * (possibly freeing the inode for re-use). This dependency is handled by 2619 * updating the directory entry but delaying the inode count reduction until 2620 * after the directory block has been written to disk. After this point, the 2621 * inode count can be decremented whenever it is convenient. 2622 */ 2623 2624 /* 2625 * This routine should be called immediately after removing 2626 * a directory entry. The inode's link count should not be 2627 * decremented by the calling procedure -- the soft updates 2628 * code will do this task when it is safe. 2629 */ 2630 void 2631 softdep_setup_remove(bp, dp, ip, isrmdir) 2632 struct buf *bp; /* buffer containing directory block */ 2633 struct inode *dp; /* inode for the directory being modified */ 2634 struct inode *ip; /* inode for directory entry being removed */ 2635 int isrmdir; /* indicates if doing RMDIR */ 2636 { 2637 struct dirrem *dirrem, *prevdirrem; 2638 2639 /* 2640 * Allocate a new dirrem if appropriate and ACQUIRE_LOCK. 2641 */ 2642 dirrem = newdirrem(bp, dp, ip, isrmdir, &prevdirrem); 2643 2644 /* 2645 * If the COMPLETE flag is clear, then there were no active 2646 * entries and we want to roll back to a zeroed entry until 2647 * the new inode is committed to disk. If the COMPLETE flag is 2648 * set then we have deleted an entry that never made it to 2649 * disk. If the entry we deleted resulted from a name change, 2650 * then the old name still resides on disk. We cannot delete 2651 * its inode (returned to us in prevdirrem) until the zeroed 2652 * directory entry gets to disk. The new inode has never been 2653 * referenced on the disk, so can be deleted immediately. 2654 */ 2655 if ((dirrem->dm_state & COMPLETE) == 0) { 2656 LIST_INSERT_HEAD(&dirrem->dm_pagedep->pd_dirremhd, dirrem, 2657 dm_next); 2658 FREE_LOCK(&lk); 2659 } else { 2660 if (prevdirrem != NULL) 2661 LIST_INSERT_HEAD(&dirrem->dm_pagedep->pd_dirremhd, 2662 prevdirrem, dm_next); 2663 dirrem->dm_dirinum = dirrem->dm_pagedep->pd_ino; 2664 FREE_LOCK(&lk); 2665 handle_workitem_remove(dirrem); 2666 } 2667 } 2668 2669 /* 2670 * Allocate a new dirrem if appropriate and return it along with 2671 * its associated pagedep. Called without a lock, returns with lock. 2672 */ 2673 static long num_dirrem; /* number of dirrem allocated */ 2674 static struct dirrem * 2675 newdirrem(bp, dp, ip, isrmdir, prevdirremp) 2676 struct buf *bp; /* buffer containing directory block */ 2677 struct inode *dp; /* inode for the directory being modified */ 2678 struct inode *ip; /* inode for directory entry being removed */ 2679 int isrmdir; /* indicates if doing RMDIR */ 2680 struct dirrem **prevdirremp; /* previously referenced inode, if any */ 2681 { 2682 int offset; 2683 ufs_lbn_t lbn; 2684 struct diradd *dap; 2685 struct dirrem *dirrem; 2686 struct pagedep *pagedep; 2687 2688 /* 2689 * Whiteouts have no deletion dependencies. 2690 */ 2691 if (ip == NULL) 2692 panic("newdirrem: whiteout"); 2693 /* 2694 * If we are over our limit, try to improve the situation. 2695 * Limiting the number of dirrem structures will also limit 2696 * the number of freefile and freeblks structures. 2697 */ 2698 if (num_dirrem > max_softdeps / 2 && speedup_syncer() == 0) 2699 (void) request_cleanup(FLUSH_REMOVE, 0); 2700 num_dirrem += 1; 2701 MALLOC(dirrem, struct dirrem *, sizeof(struct dirrem), 2702 M_DIRREM, M_SOFTDEP_FLAGS); 2703 bzero(dirrem, sizeof(struct dirrem)); 2704 dirrem->dm_list.wk_type = D_DIRREM; 2705 dirrem->dm_state = isrmdir ? RMDIR : 0; 2706 dirrem->dm_mnt = ITOV(ip)->v_mount; 2707 dirrem->dm_oldinum = ip->i_number; 2708 *prevdirremp = NULL; 2709 2710 ACQUIRE_LOCK(&lk); 2711 lbn = lblkno(dp->i_fs, dp->i_offset); 2712 offset = blkoff(dp->i_fs, dp->i_offset); 2713 if (pagedep_lookup(dp, lbn, DEPALLOC, &pagedep) == 0) 2714 WORKLIST_INSERT(&bp->b_dep, &pagedep->pd_list); 2715 dirrem->dm_pagedep = pagedep; 2716 /* 2717 * Check for a diradd dependency for the same directory entry. 2718 * If present, then both dependencies become obsolete and can 2719 * be de-allocated. Check for an entry on both the pd_dirraddhd 2720 * list and the pd_pendinghd list. 2721 */ 2722 2723 LIST_FOREACH(dap, &pagedep->pd_diraddhd[DIRADDHASH(offset)], da_pdlist) 2724 if (dap->da_offset == offset) 2725 break; 2726 if (dap == NULL) { 2727 2728 LIST_FOREACH(dap, &pagedep->pd_pendinghd, da_pdlist) 2729 if (dap->da_offset == offset) 2730 break; 2731 if (dap == NULL) 2732 return (dirrem); 2733 } 2734 /* 2735 * Must be ATTACHED at this point. 2736 */ 2737 if ((dap->da_state & ATTACHED) == 0) { 2738 FREE_LOCK(&lk); 2739 panic("newdirrem: not ATTACHED"); 2740 } 2741 if (dap->da_newinum != ip->i_number) { 2742 FREE_LOCK(&lk); 2743 panic("newdirrem: inum %"PRId64" should be %"PRId64, 2744 ip->i_number, dap->da_newinum); 2745 } 2746 /* 2747 * If we are deleting a changed name that never made it to disk, 2748 * then return the dirrem describing the previous inode (which 2749 * represents the inode currently referenced from this entry on disk). 2750 */ 2751 if ((dap->da_state & DIRCHG) != 0) { 2752 *prevdirremp = dap->da_previous; 2753 dap->da_state &= ~DIRCHG; 2754 dap->da_pagedep = pagedep; 2755 } 2756 /* 2757 * We are deleting an entry that never made it to disk. 2758 * Mark it COMPLETE so we can delete its inode immediately. 2759 */ 2760 dirrem->dm_state |= COMPLETE; 2761 free_diradd(dap); 2762 return (dirrem); 2763 } 2764 2765 /* 2766 * Directory entry change dependencies. 2767 * 2768 * Changing an existing directory entry requires that an add operation 2769 * be completed first followed by a deletion. The semantics for the addition 2770 * are identical to the description of adding a new entry above except 2771 * that the rollback is to the old inode number rather than zero. Once 2772 * the addition dependency is completed, the removal is done as described 2773 * in the removal routine above. 2774 */ 2775 2776 /* 2777 * This routine should be called immediately after changing 2778 * a directory entry. The inode's link count should not be 2779 * decremented by the calling procedure -- the soft updates 2780 * code will perform this task when it is safe. 2781 */ 2782 void 2783 softdep_setup_directory_change(bp, dp, ip, newinum, isrmdir) 2784 struct buf *bp; /* buffer containing directory block */ 2785 struct inode *dp; /* inode for the directory being modified */ 2786 struct inode *ip; /* inode for directory entry being removed */ 2787 ino_t newinum; /* new inode number for changed entry */ 2788 int isrmdir; /* indicates if doing RMDIR */ 2789 { 2790 int offset; 2791 struct diradd *dap = NULL; 2792 struct dirrem *dirrem, *prevdirrem; 2793 struct pagedep *pagedep; 2794 struct inodedep *inodedep; 2795 2796 offset = blkoff(dp->i_fs, dp->i_offset); 2797 2798 /* 2799 * Whiteouts do not need diradd dependencies. 2800 */ 2801 if (newinum != WINO) { 2802 MALLOC(dap, struct diradd *, sizeof(struct diradd), 2803 M_DIRADD, M_SOFTDEP_FLAGS); 2804 bzero(dap, sizeof(struct diradd)); 2805 dap->da_list.wk_type = D_DIRADD; 2806 dap->da_state = DIRCHG | ATTACHED | DEPCOMPLETE; 2807 dap->da_offset = offset; 2808 dap->da_newinum = newinum; 2809 } 2810 2811 /* 2812 * Allocate a new dirrem and ACQUIRE_LOCK. 2813 */ 2814 dirrem = newdirrem(bp, dp, ip, isrmdir, &prevdirrem); 2815 pagedep = dirrem->dm_pagedep; 2816 /* 2817 * The possible values for isrmdir: 2818 * 0 - non-directory file rename 2819 * 1 - directory rename within same directory 2820 * inum - directory rename to new directory of given inode number 2821 * When renaming to a new directory, we are both deleting and 2822 * creating a new directory entry, so the link count on the new 2823 * directory should not change. Thus we do not need the followup 2824 * dirrem which is usually done in handle_workitem_remove. We set 2825 * the DIRCHG flag to tell handle_workitem_remove to skip the 2826 * followup dirrem. 2827 */ 2828 if (isrmdir > 1) 2829 dirrem->dm_state |= DIRCHG; 2830 2831 /* 2832 * Whiteouts have no additional dependencies, 2833 * so just put the dirrem on the correct list. 2834 */ 2835 if (newinum == WINO) { 2836 if ((dirrem->dm_state & COMPLETE) == 0) { 2837 LIST_INSERT_HEAD(&pagedep->pd_dirremhd, dirrem, 2838 dm_next); 2839 } else { 2840 dirrem->dm_dirinum = pagedep->pd_ino; 2841 add_to_worklist(&dirrem->dm_list); 2842 } 2843 FREE_LOCK(&lk); 2844 return; 2845 } 2846 2847 /* 2848 * If the COMPLETE flag is clear, then there were no active 2849 * entries and we want to roll back to the previous inode until 2850 * the new inode is committed to disk. If the COMPLETE flag is 2851 * set, then we have deleted an entry that never made it to disk. 2852 * If the entry we deleted resulted from a name change, then the old 2853 * inode reference still resides on disk. Any rollback that we do 2854 * needs to be to that old inode (returned to us in prevdirrem). If 2855 * the entry we deleted resulted from a create, then there is 2856 * no entry on the disk, so we want to roll back to zero rather 2857 * than the uncommitted inode. In either of the COMPLETE cases we 2858 * want to immediately free the unwritten and unreferenced inode. 2859 */ 2860 if ((dirrem->dm_state & COMPLETE) == 0) { 2861 dap->da_previous = dirrem; 2862 } else { 2863 if (prevdirrem != NULL) { 2864 dap->da_previous = prevdirrem; 2865 } else { 2866 dap->da_state &= ~DIRCHG; 2867 dap->da_pagedep = pagedep; 2868 } 2869 dirrem->dm_dirinum = pagedep->pd_ino; 2870 add_to_worklist(&dirrem->dm_list); 2871 } 2872 /* 2873 * Link into its inodedep. Put it on the id_bufwait list if the inode 2874 * is not yet written. If it is written, do the post-inode write 2875 * processing to put it on the id_pendinghd list. 2876 */ 2877 if (inodedep_lookup(dp->i_fs, newinum, DEPALLOC, &inodedep) == 0 || 2878 (inodedep->id_state & ALLCOMPLETE) == ALLCOMPLETE) { 2879 dap->da_state |= COMPLETE; 2880 LIST_INSERT_HEAD(&pagedep->pd_pendinghd, dap, da_pdlist); 2881 WORKLIST_INSERT(&inodedep->id_pendinghd, &dap->da_list); 2882 } else { 2883 LIST_INSERT_HEAD(&pagedep->pd_diraddhd[DIRADDHASH(offset)], 2884 dap, da_pdlist); 2885 WORKLIST_INSERT(&inodedep->id_bufwait, &dap->da_list); 2886 } 2887 FREE_LOCK(&lk); 2888 } 2889 2890 /* 2891 * Called whenever the link count on an inode is changed. 2892 * It creates an inode dependency so that the new reference(s) 2893 * to the inode cannot be committed to disk until the updated 2894 * inode has been written. 2895 */ 2896 void 2897 softdep_change_linkcnt(ip) 2898 struct inode *ip; /* the inode with the increased link count */ 2899 { 2900 struct inodedep *inodedep; 2901 2902 ACQUIRE_LOCK(&lk); 2903 (void) inodedep_lookup(ip->i_fs, ip->i_number, DEPALLOC, &inodedep); 2904 if (ip->i_nlink < ip->i_effnlink) { 2905 FREE_LOCK(&lk); 2906 panic("softdep_change_linkcnt: bad delta"); 2907 } 2908 inodedep->id_nlinkdelta = ip->i_nlink - ip->i_effnlink; 2909 FREE_LOCK(&lk); 2910 } 2911 2912 /* 2913 * This workitem decrements the inode's link count. 2914 * If the link count reaches zero, the file is removed. 2915 */ 2916 static void 2917 handle_workitem_remove(dirrem) 2918 struct dirrem *dirrem; 2919 { 2920 struct inodedep *inodedep; 2921 struct vnode *vp; 2922 struct inode *ip; 2923 ino_t oldinum; 2924 int error; 2925 2926 if ((error = VFS_VGET(dirrem->dm_mnt, dirrem->dm_oldinum, &vp)) != 0) { 2927 softdep_error("handle_workitem_remove: vget", error); 2928 return; 2929 } 2930 ip = VTOI(vp); 2931 ACQUIRE_LOCK(&lk); 2932 if ((inodedep_lookup(ip->i_fs, dirrem->dm_oldinum, 0, &inodedep)) == 0){ 2933 FREE_LOCK(&lk); 2934 panic("handle_workitem_remove: lost inodedep"); 2935 } 2936 /* 2937 * Normal file deletion. 2938 */ 2939 if ((dirrem->dm_state & RMDIR) == 0) { 2940 ip->i_nlink--; 2941 ip->i_flag |= IN_CHANGE; 2942 if (ip->i_nlink < ip->i_effnlink) { 2943 FREE_LOCK(&lk); 2944 panic("handle_workitem_remove: bad file delta"); 2945 } 2946 inodedep->id_nlinkdelta = ip->i_nlink - ip->i_effnlink; 2947 FREE_LOCK(&lk); 2948 vput(vp); 2949 num_dirrem -= 1; 2950 WORKITEM_FREE(dirrem, D_DIRREM); 2951 return; 2952 } 2953 /* 2954 * Directory deletion. Decrement reference count for both the 2955 * just deleted parent directory entry and the reference for ".". 2956 * Next truncate the directory to length zero. When the 2957 * truncation completes, arrange to have the reference count on 2958 * the parent decremented to account for the loss of "..". 2959 */ 2960 ip->i_nlink -= 2; 2961 ip->i_flag |= IN_CHANGE; 2962 if (ip->i_nlink < ip->i_effnlink) { 2963 FREE_LOCK(&lk); 2964 panic("handle_workitem_remove: bad dir delta"); 2965 } 2966 inodedep->id_nlinkdelta = ip->i_nlink - ip->i_effnlink; 2967 FREE_LOCK(&lk); 2968 if ((error = ffs_truncate(vp, (off_t)0, 0, proc0.p_ucred)) != 0) 2969 softdep_error("handle_workitem_remove: truncate", error); 2970 /* 2971 * Rename a directory to a new parent. Since, we are both deleting 2972 * and creating a new directory entry, the link count on the new 2973 * directory should not change. Thus we skip the followup dirrem. 2974 */ 2975 if (dirrem->dm_state & DIRCHG) { 2976 vput(vp); 2977 num_dirrem -= 1; 2978 WORKITEM_FREE(dirrem, D_DIRREM); 2979 return; 2980 } 2981 /* 2982 * If the inodedep does not exist, then the zero'ed inode has 2983 * been written to disk. If the allocated inode has never been 2984 * written to disk, then the on-disk inode is zero'ed. In either 2985 * case we can remove the file immediately. 2986 */ 2987 ACQUIRE_LOCK(&lk); 2988 dirrem->dm_state = 0; 2989 oldinum = dirrem->dm_oldinum; 2990 dirrem->dm_oldinum = dirrem->dm_dirinum; 2991 if (inodedep_lookup(ip->i_fs, oldinum, 0, &inodedep) == 0 || 2992 check_inode_unwritten(inodedep)) { 2993 FREE_LOCK(&lk); 2994 vput(vp); 2995 handle_workitem_remove(dirrem); 2996 return; 2997 } 2998 WORKLIST_INSERT(&inodedep->id_inowait, &dirrem->dm_list); 2999 FREE_LOCK(&lk); 3000 ip->i_flag |= IN_CHANGE; 3001 ffs_update(vp, 0); 3002 vput(vp); 3003 } 3004 3005 /* 3006 * Inode de-allocation dependencies. 3007 * 3008 * When an inode's link count is reduced to zero, it can be de-allocated. We 3009 * found it convenient to postpone de-allocation until after the inode is 3010 * written to disk with its new link count (zero). At this point, all of the 3011 * on-disk inode's block pointers are nullified and, with careful dependency 3012 * list ordering, all dependencies related to the inode will be satisfied and 3013 * the corresponding dependency structures de-allocated. So, if/when the 3014 * inode is reused, there will be no mixing of old dependencies with new 3015 * ones. This artificial dependency is set up by the block de-allocation 3016 * procedure above (softdep_setup_freeblocks) and completed by the 3017 * following procedure. 3018 */ 3019 static void 3020 handle_workitem_freefile(freefile) 3021 struct freefile *freefile; 3022 { 3023 struct vnode vp; 3024 struct inode tip; 3025 struct inodedep *idp; 3026 int error; 3027 3028 #ifdef DEBUG 3029 ACQUIRE_LOCK(&lk); 3030 error = inodedep_lookup(freefile->fx_fs, freefile->fx_oldinum, 0, &idp); 3031 FREE_LOCK(&lk); 3032 if (error) 3033 panic("handle_workitem_freefile: inodedep survived"); 3034 #endif 3035 tip.i_devvp = freefile->fx_devvp; 3036 tip.i_dev = freefile->fx_devvp->v_rdev; 3037 tip.i_fs = freefile->fx_fs; 3038 vp.v_data = &tip; 3039 if ((error = ffs_freefile(&vp, freefile->fx_oldinum, freefile->fx_mode)) != 0) 3040 softdep_error("handle_workitem_freefile", error); 3041 WORKITEM_FREE(freefile, D_FREEFILE); 3042 } 3043 3044 /* 3045 * Helper function which unlinks marker element from work list and returns 3046 * the next element on the list. 3047 */ 3048 static __inline struct worklist * 3049 markernext(struct worklist *marker) 3050 { 3051 struct worklist *next; 3052 3053 next = LIST_NEXT(marker, wk_list); 3054 LIST_REMOVE(marker, wk_list); 3055 return next; 3056 } 3057 3058 /* 3059 * Disk writes. 3060 * 3061 * The dependency structures constructed above are most actively used when file 3062 * system blocks are written to disk. No constraints are placed on when a 3063 * block can be written, but unsatisfied update dependencies are made safe by 3064 * modifying (or replacing) the source memory for the duration of the disk 3065 * write. When the disk write completes, the memory block is again brought 3066 * up-to-date. 3067 * 3068 * In-core inode structure reclamation. 3069 * 3070 * Because there are a finite number of "in-core" inode structures, they are 3071 * reused regularly. By transferring all inode-related dependencies to the 3072 * in-memory inode block and indexing them separately (via "inodedep"s), we 3073 * can allow "in-core" inode structures to be reused at any time and avoid 3074 * any increase in contention. 3075 * 3076 * Called just before entering the device driver to initiate a new disk I/O. 3077 * The buffer must be locked, thus, no I/O completion operations can occur 3078 * while we are manipulating its associated dependencies. 3079 */ 3080 static void 3081 softdep_disk_io_initiation(bp) 3082 struct buf *bp; /* structure describing disk write to occur */ 3083 { 3084 struct worklist *wk; 3085 struct worklist marker; 3086 struct indirdep *indirdep; 3087 3088 /* 3089 * We only care about write operations. There should never 3090 * be dependencies for reads. 3091 */ 3092 if (bp->b_cmd == BUF_CMD_READ) 3093 panic("softdep_disk_io_initiation: read"); 3094 3095 marker.wk_type = D_LAST + 1; /* Not a normal workitem */ 3096 3097 /* 3098 * Do any necessary pre-I/O processing. 3099 */ 3100 for (wk = LIST_FIRST(&bp->b_dep); wk; wk = markernext(&marker)) { 3101 LIST_INSERT_AFTER(wk, &marker, wk_list); 3102 3103 switch (wk->wk_type) { 3104 3105 case D_PAGEDEP: 3106 initiate_write_filepage(WK_PAGEDEP(wk), bp); 3107 continue; 3108 3109 case D_INODEDEP: 3110 initiate_write_inodeblock(WK_INODEDEP(wk), bp); 3111 continue; 3112 3113 case D_INDIRDEP: 3114 indirdep = WK_INDIRDEP(wk); 3115 if (indirdep->ir_state & GOINGAWAY) 3116 panic("disk_io_initiation: indirdep gone"); 3117 /* 3118 * If there are no remaining dependencies, this 3119 * will be writing the real pointers, so the 3120 * dependency can be freed. 3121 */ 3122 if (LIST_FIRST(&indirdep->ir_deplisthd) == NULL) { 3123 indirdep->ir_savebp->b_flags |= B_INVAL | B_NOCACHE; 3124 brelse(indirdep->ir_savebp); 3125 /* inline expand WORKLIST_REMOVE(wk); */ 3126 wk->wk_state &= ~ONWORKLIST; 3127 LIST_REMOVE(wk, wk_list); 3128 WORKITEM_FREE(indirdep, D_INDIRDEP); 3129 continue; 3130 } 3131 /* 3132 * Replace up-to-date version with safe version. 3133 */ 3134 MALLOC(indirdep->ir_saveddata, caddr_t, bp->b_bcount, 3135 M_INDIRDEP, M_SOFTDEP_FLAGS); 3136 ACQUIRE_LOCK(&lk); 3137 indirdep->ir_state &= ~ATTACHED; 3138 indirdep->ir_state |= UNDONE; 3139 bcopy(bp->b_data, indirdep->ir_saveddata, bp->b_bcount); 3140 bcopy(indirdep->ir_savebp->b_data, bp->b_data, 3141 bp->b_bcount); 3142 FREE_LOCK(&lk); 3143 continue; 3144 3145 case D_MKDIR: 3146 case D_BMSAFEMAP: 3147 case D_ALLOCDIRECT: 3148 case D_ALLOCINDIR: 3149 continue; 3150 3151 default: 3152 panic("handle_disk_io_initiation: Unexpected type %s", 3153 TYPENAME(wk->wk_type)); 3154 /* NOTREACHED */ 3155 } 3156 } 3157 } 3158 3159 /* 3160 * Called from within the procedure above to deal with unsatisfied 3161 * allocation dependencies in a directory. The buffer must be locked, 3162 * thus, no I/O completion operations can occur while we are 3163 * manipulating its associated dependencies. 3164 */ 3165 static void 3166 initiate_write_filepage(pagedep, bp) 3167 struct pagedep *pagedep; 3168 struct buf *bp; 3169 { 3170 struct diradd *dap; 3171 struct direct *ep; 3172 int i; 3173 3174 if (pagedep->pd_state & IOSTARTED) { 3175 /* 3176 * This can only happen if there is a driver that does not 3177 * understand chaining. Here biodone will reissue the call 3178 * to strategy for the incomplete buffers. 3179 */ 3180 kprintf("initiate_write_filepage: already started\n"); 3181 return; 3182 } 3183 pagedep->pd_state |= IOSTARTED; 3184 ACQUIRE_LOCK(&lk); 3185 for (i = 0; i < DAHASHSZ; i++) { 3186 LIST_FOREACH(dap, &pagedep->pd_diraddhd[i], da_pdlist) { 3187 ep = (struct direct *) 3188 ((char *)bp->b_data + dap->da_offset); 3189 if (ep->d_ino != dap->da_newinum) { 3190 FREE_LOCK(&lk); 3191 panic("%s: dir inum %d != new %"PRId64, 3192 "initiate_write_filepage", 3193 ep->d_ino, dap->da_newinum); 3194 } 3195 if (dap->da_state & DIRCHG) 3196 ep->d_ino = dap->da_previous->dm_oldinum; 3197 else 3198 ep->d_ino = 0; 3199 dap->da_state &= ~ATTACHED; 3200 dap->da_state |= UNDONE; 3201 } 3202 } 3203 FREE_LOCK(&lk); 3204 } 3205 3206 /* 3207 * Called from within the procedure above to deal with unsatisfied 3208 * allocation dependencies in an inodeblock. The buffer must be 3209 * locked, thus, no I/O completion operations can occur while we 3210 * are manipulating its associated dependencies. 3211 */ 3212 static void 3213 initiate_write_inodeblock(inodedep, bp) 3214 struct inodedep *inodedep; 3215 struct buf *bp; /* The inode block */ 3216 { 3217 struct allocdirect *adp, *lastadp; 3218 struct ufs1_dinode *dp; 3219 struct ufs1_dinode *sip; 3220 struct fs *fs; 3221 ufs_lbn_t prevlbn = 0; 3222 int i, deplist; 3223 3224 if (inodedep->id_state & IOSTARTED) 3225 panic("initiate_write_inodeblock: already started"); 3226 inodedep->id_state |= IOSTARTED; 3227 fs = inodedep->id_fs; 3228 dp = (struct ufs1_dinode *)bp->b_data + 3229 ino_to_fsbo(fs, inodedep->id_ino); 3230 /* 3231 * If the bitmap is not yet written, then the allocated 3232 * inode cannot be written to disk. 3233 */ 3234 if ((inodedep->id_state & DEPCOMPLETE) == 0) { 3235 if (inodedep->id_savedino != NULL) 3236 panic("initiate_write_inodeblock: already doing I/O"); 3237 MALLOC(sip, struct ufs1_dinode *, 3238 sizeof(struct ufs1_dinode), M_INODEDEP, M_SOFTDEP_FLAGS); 3239 inodedep->id_savedino = sip; 3240 *inodedep->id_savedino = *dp; 3241 bzero((caddr_t)dp, sizeof(struct ufs1_dinode)); 3242 dp->di_gen = inodedep->id_savedino->di_gen; 3243 return; 3244 } 3245 /* 3246 * If no dependencies, then there is nothing to roll back. 3247 */ 3248 inodedep->id_savedsize = dp->di_size; 3249 if (TAILQ_FIRST(&inodedep->id_inoupdt) == NULL) 3250 return; 3251 /* 3252 * Set the dependencies to busy. 3253 */ 3254 ACQUIRE_LOCK(&lk); 3255 for (deplist = 0, adp = TAILQ_FIRST(&inodedep->id_inoupdt); adp; 3256 adp = TAILQ_NEXT(adp, ad_next)) { 3257 #ifdef DIAGNOSTIC 3258 if (deplist != 0 && prevlbn >= adp->ad_lbn) { 3259 FREE_LOCK(&lk); 3260 panic("softdep_write_inodeblock: lbn order"); 3261 } 3262 prevlbn = adp->ad_lbn; 3263 if (adp->ad_lbn < NDADDR && 3264 dp->di_db[adp->ad_lbn] != adp->ad_newblkno) { 3265 FREE_LOCK(&lk); 3266 panic("%s: direct pointer #%ld mismatch %d != %d", 3267 "softdep_write_inodeblock", adp->ad_lbn, 3268 dp->di_db[adp->ad_lbn], adp->ad_newblkno); 3269 } 3270 if (adp->ad_lbn >= NDADDR && 3271 dp->di_ib[adp->ad_lbn - NDADDR] != adp->ad_newblkno) { 3272 FREE_LOCK(&lk); 3273 panic("%s: indirect pointer #%ld mismatch %d != %d", 3274 "softdep_write_inodeblock", adp->ad_lbn - NDADDR, 3275 dp->di_ib[adp->ad_lbn - NDADDR], adp->ad_newblkno); 3276 } 3277 deplist |= 1 << adp->ad_lbn; 3278 if ((adp->ad_state & ATTACHED) == 0) { 3279 FREE_LOCK(&lk); 3280 panic("softdep_write_inodeblock: Unknown state 0x%x", 3281 adp->ad_state); 3282 } 3283 #endif /* DIAGNOSTIC */ 3284 adp->ad_state &= ~ATTACHED; 3285 adp->ad_state |= UNDONE; 3286 } 3287 /* 3288 * The on-disk inode cannot claim to be any larger than the last 3289 * fragment that has been written. Otherwise, the on-disk inode 3290 * might have fragments that were not the last block in the file 3291 * which would corrupt the filesystem. 3292 */ 3293 for (lastadp = NULL, adp = TAILQ_FIRST(&inodedep->id_inoupdt); adp; 3294 lastadp = adp, adp = TAILQ_NEXT(adp, ad_next)) { 3295 if (adp->ad_lbn >= NDADDR) 3296 break; 3297 dp->di_db[adp->ad_lbn] = adp->ad_oldblkno; 3298 /* keep going until hitting a rollback to a frag */ 3299 if (adp->ad_oldsize == 0 || adp->ad_oldsize == fs->fs_bsize) 3300 continue; 3301 dp->di_size = fs->fs_bsize * adp->ad_lbn + adp->ad_oldsize; 3302 for (i = adp->ad_lbn + 1; i < NDADDR; i++) { 3303 #ifdef DIAGNOSTIC 3304 if (dp->di_db[i] != 0 && (deplist & (1 << i)) == 0) { 3305 FREE_LOCK(&lk); 3306 panic("softdep_write_inodeblock: lost dep1"); 3307 } 3308 #endif /* DIAGNOSTIC */ 3309 dp->di_db[i] = 0; 3310 } 3311 for (i = 0; i < NIADDR; i++) { 3312 #ifdef DIAGNOSTIC 3313 if (dp->di_ib[i] != 0 && 3314 (deplist & ((1 << NDADDR) << i)) == 0) { 3315 FREE_LOCK(&lk); 3316 panic("softdep_write_inodeblock: lost dep2"); 3317 } 3318 #endif /* DIAGNOSTIC */ 3319 dp->di_ib[i] = 0; 3320 } 3321 FREE_LOCK(&lk); 3322 return; 3323 } 3324 /* 3325 * If we have zero'ed out the last allocated block of the file, 3326 * roll back the size to the last currently allocated block. 3327 * We know that this last allocated block is a full-sized as 3328 * we already checked for fragments in the loop above. 3329 */ 3330 if (lastadp != NULL && 3331 dp->di_size <= (lastadp->ad_lbn + 1) * fs->fs_bsize) { 3332 for (i = lastadp->ad_lbn; i >= 0; i--) 3333 if (dp->di_db[i] != 0) 3334 break; 3335 dp->di_size = (i + 1) * fs->fs_bsize; 3336 } 3337 /* 3338 * The only dependencies are for indirect blocks. 3339 * 3340 * The file size for indirect block additions is not guaranteed. 3341 * Such a guarantee would be non-trivial to achieve. The conventional 3342 * synchronous write implementation also does not make this guarantee. 3343 * Fsck should catch and fix discrepancies. Arguably, the file size 3344 * can be over-estimated without destroying integrity when the file 3345 * moves into the indirect blocks (i.e., is large). If we want to 3346 * postpone fsck, we are stuck with this argument. 3347 */ 3348 for (; adp; adp = TAILQ_NEXT(adp, ad_next)) 3349 dp->di_ib[adp->ad_lbn - NDADDR] = 0; 3350 FREE_LOCK(&lk); 3351 } 3352 3353 /* 3354 * This routine is called during the completion interrupt 3355 * service routine for a disk write (from the procedure called 3356 * by the device driver to inform the filesystem caches of 3357 * a request completion). It should be called early in this 3358 * procedure, before the block is made available to other 3359 * processes or other routines are called. 3360 */ 3361 static void 3362 softdep_disk_write_complete(bp) 3363 struct buf *bp; /* describes the completed disk write */ 3364 { 3365 struct worklist *wk; 3366 struct workhead reattach; 3367 struct newblk *newblk; 3368 struct allocindir *aip; 3369 struct allocdirect *adp; 3370 struct indirdep *indirdep; 3371 struct inodedep *inodedep; 3372 struct bmsafemap *bmsafemap; 3373 3374 #ifdef DEBUG 3375 if (lk.lkt_held != NOHOLDER) 3376 panic("softdep_disk_write_complete: lock is held"); 3377 lk.lkt_held = SPECIAL_FLAG; 3378 #endif 3379 LIST_INIT(&reattach); 3380 while ((wk = LIST_FIRST(&bp->b_dep)) != NULL) { 3381 WORKLIST_REMOVE(wk); 3382 switch (wk->wk_type) { 3383 3384 case D_PAGEDEP: 3385 if (handle_written_filepage(WK_PAGEDEP(wk), bp)) 3386 WORKLIST_INSERT(&reattach, wk); 3387 continue; 3388 3389 case D_INODEDEP: 3390 if (handle_written_inodeblock(WK_INODEDEP(wk), bp)) 3391 WORKLIST_INSERT(&reattach, wk); 3392 continue; 3393 3394 case D_BMSAFEMAP: 3395 bmsafemap = WK_BMSAFEMAP(wk); 3396 while ((newblk = LIST_FIRST(&bmsafemap->sm_newblkhd))) { 3397 newblk->nb_state |= DEPCOMPLETE; 3398 newblk->nb_bmsafemap = NULL; 3399 LIST_REMOVE(newblk, nb_deps); 3400 } 3401 while ((adp = 3402 LIST_FIRST(&bmsafemap->sm_allocdirecthd))) { 3403 adp->ad_state |= DEPCOMPLETE; 3404 adp->ad_buf = NULL; 3405 LIST_REMOVE(adp, ad_deps); 3406 handle_allocdirect_partdone(adp); 3407 } 3408 while ((aip = 3409 LIST_FIRST(&bmsafemap->sm_allocindirhd))) { 3410 aip->ai_state |= DEPCOMPLETE; 3411 aip->ai_buf = NULL; 3412 LIST_REMOVE(aip, ai_deps); 3413 handle_allocindir_partdone(aip); 3414 } 3415 while ((inodedep = 3416 LIST_FIRST(&bmsafemap->sm_inodedephd)) != NULL) { 3417 inodedep->id_state |= DEPCOMPLETE; 3418 LIST_REMOVE(inodedep, id_deps); 3419 inodedep->id_buf = NULL; 3420 } 3421 WORKITEM_FREE(bmsafemap, D_BMSAFEMAP); 3422 continue; 3423 3424 case D_MKDIR: 3425 handle_written_mkdir(WK_MKDIR(wk), MKDIR_BODY); 3426 continue; 3427 3428 case D_ALLOCDIRECT: 3429 adp = WK_ALLOCDIRECT(wk); 3430 adp->ad_state |= COMPLETE; 3431 handle_allocdirect_partdone(adp); 3432 continue; 3433 3434 case D_ALLOCINDIR: 3435 aip = WK_ALLOCINDIR(wk); 3436 aip->ai_state |= COMPLETE; 3437 handle_allocindir_partdone(aip); 3438 continue; 3439 3440 case D_INDIRDEP: 3441 indirdep = WK_INDIRDEP(wk); 3442 if (indirdep->ir_state & GOINGAWAY) { 3443 lk.lkt_held = NOHOLDER; 3444 panic("disk_write_complete: indirdep gone"); 3445 } 3446 bcopy(indirdep->ir_saveddata, bp->b_data, bp->b_bcount); 3447 FREE(indirdep->ir_saveddata, M_INDIRDEP); 3448 indirdep->ir_saveddata = 0; 3449 indirdep->ir_state &= ~UNDONE; 3450 indirdep->ir_state |= ATTACHED; 3451 while ((aip = LIST_FIRST(&indirdep->ir_donehd)) != 0) { 3452 handle_allocindir_partdone(aip); 3453 if (aip == LIST_FIRST(&indirdep->ir_donehd)) { 3454 lk.lkt_held = NOHOLDER; 3455 panic("disk_write_complete: not gone"); 3456 } 3457 } 3458 WORKLIST_INSERT(&reattach, wk); 3459 if ((bp->b_flags & B_DELWRI) == 0) 3460 stat_indir_blk_ptrs++; 3461 bdirty(bp); 3462 continue; 3463 3464 default: 3465 lk.lkt_held = NOHOLDER; 3466 panic("handle_disk_write_complete: Unknown type %s", 3467 TYPENAME(wk->wk_type)); 3468 /* NOTREACHED */ 3469 } 3470 } 3471 /* 3472 * Reattach any requests that must be redone. 3473 */ 3474 while ((wk = LIST_FIRST(&reattach)) != NULL) { 3475 WORKLIST_REMOVE(wk); 3476 WORKLIST_INSERT(&bp->b_dep, wk); 3477 } 3478 #ifdef DEBUG 3479 if (lk.lkt_held != SPECIAL_FLAG) 3480 panic("softdep_disk_write_complete: lock lost"); 3481 lk.lkt_held = NOHOLDER; 3482 #endif 3483 } 3484 3485 /* 3486 * Called from within softdep_disk_write_complete above. Note that 3487 * this routine is always called from interrupt level with further 3488 * splbio interrupts blocked. 3489 */ 3490 static void 3491 handle_allocdirect_partdone(adp) 3492 struct allocdirect *adp; /* the completed allocdirect */ 3493 { 3494 struct allocdirect *listadp; 3495 struct inodedep *inodedep; 3496 long bsize; 3497 3498 if ((adp->ad_state & ALLCOMPLETE) != ALLCOMPLETE) 3499 return; 3500 if (adp->ad_buf != NULL) { 3501 lk.lkt_held = NOHOLDER; 3502 panic("handle_allocdirect_partdone: dangling dep"); 3503 } 3504 /* 3505 * The on-disk inode cannot claim to be any larger than the last 3506 * fragment that has been written. Otherwise, the on-disk inode 3507 * might have fragments that were not the last block in the file 3508 * which would corrupt the filesystem. Thus, we cannot free any 3509 * allocdirects after one whose ad_oldblkno claims a fragment as 3510 * these blocks must be rolled back to zero before writing the inode. 3511 * We check the currently active set of allocdirects in id_inoupdt. 3512 */ 3513 inodedep = adp->ad_inodedep; 3514 bsize = inodedep->id_fs->fs_bsize; 3515 TAILQ_FOREACH(listadp, &inodedep->id_inoupdt, ad_next) { 3516 /* found our block */ 3517 if (listadp == adp) 3518 break; 3519 /* continue if ad_oldlbn is not a fragment */ 3520 if (listadp->ad_oldsize == 0 || 3521 listadp->ad_oldsize == bsize) 3522 continue; 3523 /* hit a fragment */ 3524 return; 3525 } 3526 /* 3527 * If we have reached the end of the current list without 3528 * finding the just finished dependency, then it must be 3529 * on the future dependency list. Future dependencies cannot 3530 * be freed until they are moved to the current list. 3531 */ 3532 if (listadp == NULL) { 3533 #ifdef DEBUG 3534 TAILQ_FOREACH(listadp, &inodedep->id_newinoupdt, ad_next) 3535 /* found our block */ 3536 if (listadp == adp) 3537 break; 3538 if (listadp == NULL) { 3539 lk.lkt_held = NOHOLDER; 3540 panic("handle_allocdirect_partdone: lost dep"); 3541 } 3542 #endif /* DEBUG */ 3543 return; 3544 } 3545 /* 3546 * If we have found the just finished dependency, then free 3547 * it along with anything that follows it that is complete. 3548 */ 3549 for (; adp; adp = listadp) { 3550 listadp = TAILQ_NEXT(adp, ad_next); 3551 if ((adp->ad_state & ALLCOMPLETE) != ALLCOMPLETE) 3552 return; 3553 free_allocdirect(&inodedep->id_inoupdt, adp, 1); 3554 } 3555 } 3556 3557 /* 3558 * Called from within softdep_disk_write_complete above. Note that 3559 * this routine is always called from interrupt level with further 3560 * splbio interrupts blocked. 3561 */ 3562 static void 3563 handle_allocindir_partdone(aip) 3564 struct allocindir *aip; /* the completed allocindir */ 3565 { 3566 struct indirdep *indirdep; 3567 3568 if ((aip->ai_state & ALLCOMPLETE) != ALLCOMPLETE) 3569 return; 3570 if (aip->ai_buf != NULL) { 3571 lk.lkt_held = NOHOLDER; 3572 panic("handle_allocindir_partdone: dangling dependency"); 3573 } 3574 indirdep = aip->ai_indirdep; 3575 if (indirdep->ir_state & UNDONE) { 3576 LIST_REMOVE(aip, ai_next); 3577 LIST_INSERT_HEAD(&indirdep->ir_donehd, aip, ai_next); 3578 return; 3579 } 3580 ((ufs_daddr_t *)indirdep->ir_savebp->b_data)[aip->ai_offset] = 3581 aip->ai_newblkno; 3582 LIST_REMOVE(aip, ai_next); 3583 if (aip->ai_freefrag != NULL) 3584 add_to_worklist(&aip->ai_freefrag->ff_list); 3585 WORKITEM_FREE(aip, D_ALLOCINDIR); 3586 } 3587 3588 /* 3589 * Called from within softdep_disk_write_complete above to restore 3590 * in-memory inode block contents to their most up-to-date state. Note 3591 * that this routine is always called from interrupt level with further 3592 * splbio interrupts blocked. 3593 */ 3594 static int 3595 handle_written_inodeblock(inodedep, bp) 3596 struct inodedep *inodedep; 3597 struct buf *bp; /* buffer containing the inode block */ 3598 { 3599 struct worklist *wk, *filefree; 3600 struct allocdirect *adp, *nextadp; 3601 struct ufs1_dinode *dp; 3602 int hadchanges; 3603 3604 if ((inodedep->id_state & IOSTARTED) == 0) { 3605 lk.lkt_held = NOHOLDER; 3606 panic("handle_written_inodeblock: not started"); 3607 } 3608 inodedep->id_state &= ~IOSTARTED; 3609 dp = (struct ufs1_dinode *)bp->b_data + 3610 ino_to_fsbo(inodedep->id_fs, inodedep->id_ino); 3611 /* 3612 * If we had to rollback the inode allocation because of 3613 * bitmaps being incomplete, then simply restore it. 3614 * Keep the block dirty so that it will not be reclaimed until 3615 * all associated dependencies have been cleared and the 3616 * corresponding updates written to disk. 3617 */ 3618 if (inodedep->id_savedino != NULL) { 3619 *dp = *inodedep->id_savedino; 3620 FREE(inodedep->id_savedino, M_INODEDEP); 3621 inodedep->id_savedino = NULL; 3622 if ((bp->b_flags & B_DELWRI) == 0) 3623 stat_inode_bitmap++; 3624 bdirty(bp); 3625 return (1); 3626 } 3627 inodedep->id_state |= COMPLETE; 3628 /* 3629 * Roll forward anything that had to be rolled back before 3630 * the inode could be updated. 3631 */ 3632 hadchanges = 0; 3633 for (adp = TAILQ_FIRST(&inodedep->id_inoupdt); adp; adp = nextadp) { 3634 nextadp = TAILQ_NEXT(adp, ad_next); 3635 if (adp->ad_state & ATTACHED) { 3636 lk.lkt_held = NOHOLDER; 3637 panic("handle_written_inodeblock: new entry"); 3638 } 3639 if (adp->ad_lbn < NDADDR) { 3640 if (dp->di_db[adp->ad_lbn] != adp->ad_oldblkno) { 3641 lk.lkt_held = NOHOLDER; 3642 panic("%s: %s #%ld mismatch %d != %d", 3643 "handle_written_inodeblock", 3644 "direct pointer", adp->ad_lbn, 3645 dp->di_db[adp->ad_lbn], adp->ad_oldblkno); 3646 } 3647 dp->di_db[adp->ad_lbn] = adp->ad_newblkno; 3648 } else { 3649 if (dp->di_ib[adp->ad_lbn - NDADDR] != 0) { 3650 lk.lkt_held = NOHOLDER; 3651 panic("%s: %s #%ld allocated as %d", 3652 "handle_written_inodeblock", 3653 "indirect pointer", adp->ad_lbn - NDADDR, 3654 dp->di_ib[adp->ad_lbn - NDADDR]); 3655 } 3656 dp->di_ib[adp->ad_lbn - NDADDR] = adp->ad_newblkno; 3657 } 3658 adp->ad_state &= ~UNDONE; 3659 adp->ad_state |= ATTACHED; 3660 hadchanges = 1; 3661 } 3662 if (hadchanges && (bp->b_flags & B_DELWRI) == 0) 3663 stat_direct_blk_ptrs++; 3664 /* 3665 * Reset the file size to its most up-to-date value. 3666 */ 3667 if (inodedep->id_savedsize == -1) { 3668 lk.lkt_held = NOHOLDER; 3669 panic("handle_written_inodeblock: bad size"); 3670 } 3671 if (dp->di_size != inodedep->id_savedsize) { 3672 dp->di_size = inodedep->id_savedsize; 3673 hadchanges = 1; 3674 } 3675 inodedep->id_savedsize = -1; 3676 /* 3677 * If there were any rollbacks in the inode block, then it must be 3678 * marked dirty so that its will eventually get written back in 3679 * its correct form. 3680 */ 3681 if (hadchanges) 3682 bdirty(bp); 3683 /* 3684 * Process any allocdirects that completed during the update. 3685 */ 3686 if ((adp = TAILQ_FIRST(&inodedep->id_inoupdt)) != NULL) 3687 handle_allocdirect_partdone(adp); 3688 /* 3689 * Process deallocations that were held pending until the 3690 * inode had been written to disk. Freeing of the inode 3691 * is delayed until after all blocks have been freed to 3692 * avoid creation of new <vfsid, inum, lbn> triples 3693 * before the old ones have been deleted. 3694 */ 3695 filefree = NULL; 3696 while ((wk = LIST_FIRST(&inodedep->id_bufwait)) != NULL) { 3697 WORKLIST_REMOVE(wk); 3698 switch (wk->wk_type) { 3699 3700 case D_FREEFILE: 3701 /* 3702 * We defer adding filefree to the worklist until 3703 * all other additions have been made to ensure 3704 * that it will be done after all the old blocks 3705 * have been freed. 3706 */ 3707 if (filefree != NULL) { 3708 lk.lkt_held = NOHOLDER; 3709 panic("handle_written_inodeblock: filefree"); 3710 } 3711 filefree = wk; 3712 continue; 3713 3714 case D_MKDIR: 3715 handle_written_mkdir(WK_MKDIR(wk), MKDIR_PARENT); 3716 continue; 3717 3718 case D_DIRADD: 3719 diradd_inode_written(WK_DIRADD(wk), inodedep); 3720 continue; 3721 3722 case D_FREEBLKS: 3723 wk->wk_state |= COMPLETE; 3724 if ((wk->wk_state & ALLCOMPLETE) != ALLCOMPLETE) 3725 continue; 3726 /* -- fall through -- */ 3727 case D_FREEFRAG: 3728 case D_DIRREM: 3729 add_to_worklist(wk); 3730 continue; 3731 3732 default: 3733 lk.lkt_held = NOHOLDER; 3734 panic("handle_written_inodeblock: Unknown type %s", 3735 TYPENAME(wk->wk_type)); 3736 /* NOTREACHED */ 3737 } 3738 } 3739 if (filefree != NULL) { 3740 if (free_inodedep(inodedep) == 0) { 3741 lk.lkt_held = NOHOLDER; 3742 panic("handle_written_inodeblock: live inodedep"); 3743 } 3744 add_to_worklist(filefree); 3745 return (0); 3746 } 3747 3748 /* 3749 * If no outstanding dependencies, free it. 3750 */ 3751 if (free_inodedep(inodedep) || TAILQ_FIRST(&inodedep->id_inoupdt) == 0) 3752 return (0); 3753 return (hadchanges); 3754 } 3755 3756 /* 3757 * Process a diradd entry after its dependent inode has been written. 3758 * This routine must be called with splbio interrupts blocked. 3759 */ 3760 static void 3761 diradd_inode_written(dap, inodedep) 3762 struct diradd *dap; 3763 struct inodedep *inodedep; 3764 { 3765 struct pagedep *pagedep; 3766 3767 dap->da_state |= COMPLETE; 3768 if ((dap->da_state & ALLCOMPLETE) == ALLCOMPLETE) { 3769 if (dap->da_state & DIRCHG) 3770 pagedep = dap->da_previous->dm_pagedep; 3771 else 3772 pagedep = dap->da_pagedep; 3773 LIST_REMOVE(dap, da_pdlist); 3774 LIST_INSERT_HEAD(&pagedep->pd_pendinghd, dap, da_pdlist); 3775 } 3776 WORKLIST_INSERT(&inodedep->id_pendinghd, &dap->da_list); 3777 } 3778 3779 /* 3780 * Handle the completion of a mkdir dependency. 3781 */ 3782 static void 3783 handle_written_mkdir(mkdir, type) 3784 struct mkdir *mkdir; 3785 int type; 3786 { 3787 struct diradd *dap; 3788 struct pagedep *pagedep; 3789 3790 if (mkdir->md_state != type) { 3791 lk.lkt_held = NOHOLDER; 3792 panic("handle_written_mkdir: bad type"); 3793 } 3794 dap = mkdir->md_diradd; 3795 dap->da_state &= ~type; 3796 if ((dap->da_state & (MKDIR_PARENT | MKDIR_BODY)) == 0) 3797 dap->da_state |= DEPCOMPLETE; 3798 if ((dap->da_state & ALLCOMPLETE) == ALLCOMPLETE) { 3799 if (dap->da_state & DIRCHG) 3800 pagedep = dap->da_previous->dm_pagedep; 3801 else 3802 pagedep = dap->da_pagedep; 3803 LIST_REMOVE(dap, da_pdlist); 3804 LIST_INSERT_HEAD(&pagedep->pd_pendinghd, dap, da_pdlist); 3805 } 3806 LIST_REMOVE(mkdir, md_mkdirs); 3807 WORKITEM_FREE(mkdir, D_MKDIR); 3808 } 3809 3810 /* 3811 * Called from within softdep_disk_write_complete above. 3812 * A write operation was just completed. Removed inodes can 3813 * now be freed and associated block pointers may be committed. 3814 * Note that this routine is always called from interrupt level 3815 * with further splbio interrupts blocked. 3816 */ 3817 static int 3818 handle_written_filepage(pagedep, bp) 3819 struct pagedep *pagedep; 3820 struct buf *bp; /* buffer containing the written page */ 3821 { 3822 struct dirrem *dirrem; 3823 struct diradd *dap, *nextdap; 3824 struct direct *ep; 3825 int i, chgs; 3826 3827 if ((pagedep->pd_state & IOSTARTED) == 0) { 3828 lk.lkt_held = NOHOLDER; 3829 panic("handle_written_filepage: not started"); 3830 } 3831 pagedep->pd_state &= ~IOSTARTED; 3832 /* 3833 * Process any directory removals that have been committed. 3834 */ 3835 while ((dirrem = LIST_FIRST(&pagedep->pd_dirremhd)) != NULL) { 3836 LIST_REMOVE(dirrem, dm_next); 3837 dirrem->dm_dirinum = pagedep->pd_ino; 3838 add_to_worklist(&dirrem->dm_list); 3839 } 3840 /* 3841 * Free any directory additions that have been committed. 3842 */ 3843 while ((dap = LIST_FIRST(&pagedep->pd_pendinghd)) != NULL) 3844 free_diradd(dap); 3845 /* 3846 * Uncommitted directory entries must be restored. 3847 */ 3848 for (chgs = 0, i = 0; i < DAHASHSZ; i++) { 3849 for (dap = LIST_FIRST(&pagedep->pd_diraddhd[i]); dap; 3850 dap = nextdap) { 3851 nextdap = LIST_NEXT(dap, da_pdlist); 3852 if (dap->da_state & ATTACHED) { 3853 lk.lkt_held = NOHOLDER; 3854 panic("handle_written_filepage: attached"); 3855 } 3856 ep = (struct direct *) 3857 ((char *)bp->b_data + dap->da_offset); 3858 ep->d_ino = dap->da_newinum; 3859 dap->da_state &= ~UNDONE; 3860 dap->da_state |= ATTACHED; 3861 chgs = 1; 3862 /* 3863 * If the inode referenced by the directory has 3864 * been written out, then the dependency can be 3865 * moved to the pending list. 3866 */ 3867 if ((dap->da_state & ALLCOMPLETE) == ALLCOMPLETE) { 3868 LIST_REMOVE(dap, da_pdlist); 3869 LIST_INSERT_HEAD(&pagedep->pd_pendinghd, dap, 3870 da_pdlist); 3871 } 3872 } 3873 } 3874 /* 3875 * If there were any rollbacks in the directory, then it must be 3876 * marked dirty so that its will eventually get written back in 3877 * its correct form. 3878 */ 3879 if (chgs) { 3880 if ((bp->b_flags & B_DELWRI) == 0) 3881 stat_dir_entry++; 3882 bdirty(bp); 3883 } 3884 /* 3885 * If no dependencies remain, the pagedep will be freed. 3886 * Otherwise it will remain to update the page before it 3887 * is written back to disk. 3888 */ 3889 if (LIST_FIRST(&pagedep->pd_pendinghd) == 0) { 3890 for (i = 0; i < DAHASHSZ; i++) 3891 if (LIST_FIRST(&pagedep->pd_diraddhd[i]) != NULL) 3892 break; 3893 if (i == DAHASHSZ) { 3894 LIST_REMOVE(pagedep, pd_hash); 3895 WORKITEM_FREE(pagedep, D_PAGEDEP); 3896 return (0); 3897 } 3898 } 3899 return (1); 3900 } 3901 3902 /* 3903 * Writing back in-core inode structures. 3904 * 3905 * The filesystem only accesses an inode's contents when it occupies an 3906 * "in-core" inode structure. These "in-core" structures are separate from 3907 * the page frames used to cache inode blocks. Only the latter are 3908 * transferred to/from the disk. So, when the updated contents of the 3909 * "in-core" inode structure are copied to the corresponding in-memory inode 3910 * block, the dependencies are also transferred. The following procedure is 3911 * called when copying a dirty "in-core" inode to a cached inode block. 3912 */ 3913 3914 /* 3915 * Called when an inode is loaded from disk. If the effective link count 3916 * differed from the actual link count when it was last flushed, then we 3917 * need to ensure that the correct effective link count is put back. 3918 */ 3919 void 3920 softdep_load_inodeblock(ip) 3921 struct inode *ip; /* the "in_core" copy of the inode */ 3922 { 3923 struct inodedep *inodedep; 3924 3925 /* 3926 * Check for alternate nlink count. 3927 */ 3928 ip->i_effnlink = ip->i_nlink; 3929 ACQUIRE_LOCK(&lk); 3930 if (inodedep_lookup(ip->i_fs, ip->i_number, 0, &inodedep) == 0) { 3931 FREE_LOCK(&lk); 3932 return; 3933 } 3934 ip->i_effnlink -= inodedep->id_nlinkdelta; 3935 FREE_LOCK(&lk); 3936 } 3937 3938 /* 3939 * This routine is called just before the "in-core" inode 3940 * information is to be copied to the in-memory inode block. 3941 * Recall that an inode block contains several inodes. If 3942 * the force flag is set, then the dependencies will be 3943 * cleared so that the update can always be made. Note that 3944 * the buffer is locked when this routine is called, so we 3945 * will never be in the middle of writing the inode block 3946 * to disk. 3947 */ 3948 void 3949 softdep_update_inodeblock(ip, bp, waitfor) 3950 struct inode *ip; /* the "in_core" copy of the inode */ 3951 struct buf *bp; /* the buffer containing the inode block */ 3952 int waitfor; /* nonzero => update must be allowed */ 3953 { 3954 struct inodedep *inodedep; 3955 struct worklist *wk; 3956 int error, gotit; 3957 3958 /* 3959 * If the effective link count is not equal to the actual link 3960 * count, then we must track the difference in an inodedep while 3961 * the inode is (potentially) tossed out of the cache. Otherwise, 3962 * if there is no existing inodedep, then there are no dependencies 3963 * to track. 3964 */ 3965 ACQUIRE_LOCK(&lk); 3966 if (inodedep_lookup(ip->i_fs, ip->i_number, 0, &inodedep) == 0) { 3967 FREE_LOCK(&lk); 3968 if (ip->i_effnlink != ip->i_nlink) 3969 panic("softdep_update_inodeblock: bad link count"); 3970 return; 3971 } 3972 if (inodedep->id_nlinkdelta != ip->i_nlink - ip->i_effnlink) { 3973 FREE_LOCK(&lk); 3974 panic("softdep_update_inodeblock: bad delta"); 3975 } 3976 /* 3977 * Changes have been initiated. Anything depending on these 3978 * changes cannot occur until this inode has been written. 3979 */ 3980 inodedep->id_state &= ~COMPLETE; 3981 if ((inodedep->id_state & ONWORKLIST) == 0) 3982 WORKLIST_INSERT(&bp->b_dep, &inodedep->id_list); 3983 /* 3984 * Any new dependencies associated with the incore inode must 3985 * now be moved to the list associated with the buffer holding 3986 * the in-memory copy of the inode. Once merged process any 3987 * allocdirects that are completed by the merger. 3988 */ 3989 merge_inode_lists(inodedep); 3990 if (TAILQ_FIRST(&inodedep->id_inoupdt) != NULL) 3991 handle_allocdirect_partdone(TAILQ_FIRST(&inodedep->id_inoupdt)); 3992 /* 3993 * Now that the inode has been pushed into the buffer, the 3994 * operations dependent on the inode being written to disk 3995 * can be moved to the id_bufwait so that they will be 3996 * processed when the buffer I/O completes. 3997 */ 3998 while ((wk = LIST_FIRST(&inodedep->id_inowait)) != NULL) { 3999 WORKLIST_REMOVE(wk); 4000 WORKLIST_INSERT(&inodedep->id_bufwait, wk); 4001 } 4002 /* 4003 * Newly allocated inodes cannot be written until the bitmap 4004 * that allocates them have been written (indicated by 4005 * DEPCOMPLETE being set in id_state). If we are doing a 4006 * forced sync (e.g., an fsync on a file), we force the bitmap 4007 * to be written so that the update can be done. 4008 */ 4009 if ((inodedep->id_state & DEPCOMPLETE) != 0 || waitfor == 0) { 4010 FREE_LOCK(&lk); 4011 return; 4012 } 4013 gotit = getdirtybuf(&inodedep->id_buf, MNT_WAIT); 4014 FREE_LOCK(&lk); 4015 if (gotit && 4016 (error = bwrite(inodedep->id_buf)) != 0) 4017 softdep_error("softdep_update_inodeblock: bwrite", error); 4018 } 4019 4020 /* 4021 * Merge the new inode dependency list (id_newinoupdt) into the old 4022 * inode dependency list (id_inoupdt). This routine must be called 4023 * with splbio interrupts blocked. 4024 */ 4025 static void 4026 merge_inode_lists(inodedep) 4027 struct inodedep *inodedep; 4028 { 4029 struct allocdirect *listadp, *newadp; 4030 4031 newadp = TAILQ_FIRST(&inodedep->id_newinoupdt); 4032 for (listadp = TAILQ_FIRST(&inodedep->id_inoupdt); listadp && newadp;) { 4033 if (listadp->ad_lbn < newadp->ad_lbn) { 4034 listadp = TAILQ_NEXT(listadp, ad_next); 4035 continue; 4036 } 4037 TAILQ_REMOVE(&inodedep->id_newinoupdt, newadp, ad_next); 4038 TAILQ_INSERT_BEFORE(listadp, newadp, ad_next); 4039 if (listadp->ad_lbn == newadp->ad_lbn) { 4040 allocdirect_merge(&inodedep->id_inoupdt, newadp, 4041 listadp); 4042 listadp = newadp; 4043 } 4044 newadp = TAILQ_FIRST(&inodedep->id_newinoupdt); 4045 } 4046 while ((newadp = TAILQ_FIRST(&inodedep->id_newinoupdt)) != NULL) { 4047 TAILQ_REMOVE(&inodedep->id_newinoupdt, newadp, ad_next); 4048 TAILQ_INSERT_TAIL(&inodedep->id_inoupdt, newadp, ad_next); 4049 } 4050 } 4051 4052 /* 4053 * If we are doing an fsync, then we must ensure that any directory 4054 * entries for the inode have been written after the inode gets to disk. 4055 */ 4056 static int 4057 softdep_fsync(vp) 4058 struct vnode *vp; /* the "in_core" copy of the inode */ 4059 { 4060 struct inodedep *inodedep; 4061 struct pagedep *pagedep; 4062 struct worklist *wk; 4063 struct diradd *dap; 4064 struct mount *mnt; 4065 struct vnode *pvp; 4066 struct inode *ip; 4067 struct buf *bp; 4068 struct fs *fs; 4069 int error, flushparent; 4070 ino_t parentino; 4071 ufs_lbn_t lbn; 4072 4073 ip = VTOI(vp); 4074 fs = ip->i_fs; 4075 ACQUIRE_LOCK(&lk); 4076 if (inodedep_lookup(fs, ip->i_number, 0, &inodedep) == 0) { 4077 FREE_LOCK(&lk); 4078 return (0); 4079 } 4080 if (LIST_FIRST(&inodedep->id_inowait) != NULL || 4081 LIST_FIRST(&inodedep->id_bufwait) != NULL || 4082 TAILQ_FIRST(&inodedep->id_inoupdt) != NULL || 4083 TAILQ_FIRST(&inodedep->id_newinoupdt) != NULL) { 4084 FREE_LOCK(&lk); 4085 panic("softdep_fsync: pending ops"); 4086 } 4087 for (error = 0, flushparent = 0; ; ) { 4088 if ((wk = LIST_FIRST(&inodedep->id_pendinghd)) == NULL) 4089 break; 4090 if (wk->wk_type != D_DIRADD) { 4091 FREE_LOCK(&lk); 4092 panic("softdep_fsync: Unexpected type %s", 4093 TYPENAME(wk->wk_type)); 4094 } 4095 dap = WK_DIRADD(wk); 4096 /* 4097 * Flush our parent if this directory entry 4098 * has a MKDIR_PARENT dependency. 4099 */ 4100 if (dap->da_state & DIRCHG) 4101 pagedep = dap->da_previous->dm_pagedep; 4102 else 4103 pagedep = dap->da_pagedep; 4104 mnt = pagedep->pd_mnt; 4105 parentino = pagedep->pd_ino; 4106 lbn = pagedep->pd_lbn; 4107 if ((dap->da_state & (MKDIR_BODY | COMPLETE)) != COMPLETE) { 4108 FREE_LOCK(&lk); 4109 panic("softdep_fsync: dirty"); 4110 } 4111 flushparent = dap->da_state & MKDIR_PARENT; 4112 /* 4113 * If we are being fsync'ed as part of vgone'ing this vnode, 4114 * then we will not be able to release and recover the 4115 * vnode below, so we just have to give up on writing its 4116 * directory entry out. It will eventually be written, just 4117 * not now, but then the user was not asking to have it 4118 * written, so we are not breaking any promises. 4119 */ 4120 if (vp->v_flag & VRECLAIMED) 4121 break; 4122 /* 4123 * We prevent deadlock by always fetching inodes from the 4124 * root, moving down the directory tree. Thus, when fetching 4125 * our parent directory, we must unlock ourselves before 4126 * requesting the lock on our parent. See the comment in 4127 * ufs_lookup for details on possible races. 4128 */ 4129 FREE_LOCK(&lk); 4130 vn_unlock(vp); 4131 error = VFS_VGET(mnt, parentino, &pvp); 4132 vn_lock(vp, LK_EXCLUSIVE | LK_RETRY); 4133 if (error != 0) 4134 return (error); 4135 if (flushparent) { 4136 if ((error = ffs_update(pvp, 1)) != 0) { 4137 vput(pvp); 4138 return (error); 4139 } 4140 } 4141 /* 4142 * Flush directory page containing the inode's name. 4143 */ 4144 error = bread(pvp, lblktodoff(fs, lbn), blksize(fs, VTOI(pvp), lbn), &bp); 4145 if (error == 0) 4146 error = bwrite(bp); 4147 vput(pvp); 4148 if (error != 0) 4149 return (error); 4150 ACQUIRE_LOCK(&lk); 4151 if (inodedep_lookup(fs, ip->i_number, 0, &inodedep) == 0) 4152 break; 4153 } 4154 FREE_LOCK(&lk); 4155 return (0); 4156 } 4157 4158 /* 4159 * Flush all the dirty bitmaps associated with the block device 4160 * before flushing the rest of the dirty blocks so as to reduce 4161 * the number of dependencies that will have to be rolled back. 4162 */ 4163 static int softdep_fsync_mountdev_bp(struct buf *bp, void *data); 4164 4165 void 4166 softdep_fsync_mountdev(vp) 4167 struct vnode *vp; 4168 { 4169 if (!vn_isdisk(vp, NULL)) 4170 panic("softdep_fsync_mountdev: vnode not a disk"); 4171 ACQUIRE_LOCK(&lk); 4172 RB_SCAN(buf_rb_tree, &vp->v_rbdirty_tree, NULL, 4173 softdep_fsync_mountdev_bp, vp); 4174 drain_output(vp, 1); 4175 FREE_LOCK(&lk); 4176 } 4177 4178 static int 4179 softdep_fsync_mountdev_bp(struct buf *bp, void *data) 4180 { 4181 struct worklist *wk; 4182 struct vnode *vp = data; 4183 4184 /* 4185 * If it is already scheduled, skip to the next buffer. 4186 */ 4187 if (BUF_LOCK(bp, LK_EXCLUSIVE | LK_NOWAIT)) 4188 return(0); 4189 if (bp->b_vp != vp || (bp->b_flags & B_DELWRI) == 0) { 4190 BUF_UNLOCK(bp); 4191 kprintf("softdep_fsync_mountdev_bp: warning, buffer %p ripped out from under vnode %p\n", bp, vp); 4192 return(0); 4193 } 4194 /* 4195 * We are only interested in bitmaps with outstanding 4196 * dependencies. 4197 */ 4198 if ((wk = LIST_FIRST(&bp->b_dep)) == NULL || 4199 wk->wk_type != D_BMSAFEMAP) { 4200 BUF_UNLOCK(bp); 4201 return(0); 4202 } 4203 bremfree(bp); 4204 FREE_LOCK(&lk); 4205 (void) bawrite(bp); 4206 ACQUIRE_LOCK(&lk); 4207 return(0); 4208 } 4209 4210 /* 4211 * This routine is called when we are trying to synchronously flush a 4212 * file. This routine must eliminate any filesystem metadata dependencies 4213 * so that the syncing routine can succeed by pushing the dirty blocks 4214 * associated with the file. If any I/O errors occur, they are returned. 4215 */ 4216 struct softdep_sync_metadata_info { 4217 struct vnode *vp; 4218 int waitfor; 4219 }; 4220 4221 static int softdep_sync_metadata_bp(struct buf *bp, void *data); 4222 4223 int 4224 softdep_sync_metadata(struct vnode *vp, struct thread *td) 4225 { 4226 struct softdep_sync_metadata_info info; 4227 int error, waitfor; 4228 4229 /* 4230 * Check whether this vnode is involved in a filesystem 4231 * that is doing soft dependency processing. 4232 */ 4233 if (!vn_isdisk(vp, NULL)) { 4234 if (!DOINGSOFTDEP(vp)) 4235 return (0); 4236 } else 4237 if (vp->v_rdev->si_mountpoint == NULL || 4238 (vp->v_rdev->si_mountpoint->mnt_flag & MNT_SOFTDEP) == 0) 4239 return (0); 4240 /* 4241 * Ensure that any direct block dependencies have been cleared. 4242 */ 4243 ACQUIRE_LOCK(&lk); 4244 if ((error = flush_inodedep_deps(VTOI(vp)->i_fs, VTOI(vp)->i_number))) { 4245 FREE_LOCK(&lk); 4246 return (error); 4247 } 4248 /* 4249 * For most files, the only metadata dependencies are the 4250 * cylinder group maps that allocate their inode or blocks. 4251 * The block allocation dependencies can be found by traversing 4252 * the dependency lists for any buffers that remain on their 4253 * dirty buffer list. The inode allocation dependency will 4254 * be resolved when the inode is updated with MNT_WAIT. 4255 * This work is done in two passes. The first pass grabs most 4256 * of the buffers and begins asynchronously writing them. The 4257 * only way to wait for these asynchronous writes is to sleep 4258 * on the filesystem vnode which may stay busy for a long time 4259 * if the filesystem is active. So, instead, we make a second 4260 * pass over the dependencies blocking on each write. In the 4261 * usual case we will be blocking against a write that we 4262 * initiated, so when it is done the dependency will have been 4263 * resolved. Thus the second pass is expected to end quickly. 4264 */ 4265 waitfor = MNT_NOWAIT; 4266 top: 4267 /* 4268 * We must wait for any I/O in progress to finish so that 4269 * all potential buffers on the dirty list will be visible. 4270 */ 4271 drain_output(vp, 1); 4272 info.vp = vp; 4273 info.waitfor = waitfor; 4274 error = RB_SCAN(buf_rb_tree, &vp->v_rbdirty_tree, NULL, 4275 softdep_sync_metadata_bp, &info); 4276 if (error < 0) { 4277 FREE_LOCK(&lk); 4278 return(-error); /* error code */ 4279 } 4280 4281 /* 4282 * The brief unlock is to allow any pent up dependency 4283 * processing to be done. Then proceed with the second pass. 4284 */ 4285 if (waitfor == MNT_NOWAIT) { 4286 waitfor = MNT_WAIT; 4287 FREE_LOCK(&lk); 4288 ACQUIRE_LOCK(&lk); 4289 goto top; 4290 } 4291 4292 /* 4293 * If we have managed to get rid of all the dirty buffers, 4294 * then we are done. For certain directories and block 4295 * devices, we may need to do further work. 4296 * 4297 * We must wait for any I/O in progress to finish so that 4298 * all potential buffers on the dirty list will be visible. 4299 */ 4300 drain_output(vp, 1); 4301 if (RB_EMPTY(&vp->v_rbdirty_tree)) { 4302 FREE_LOCK(&lk); 4303 return (0); 4304 } 4305 4306 FREE_LOCK(&lk); 4307 /* 4308 * If we are trying to sync a block device, some of its buffers may 4309 * contain metadata that cannot be written until the contents of some 4310 * partially written files have been written to disk. The only easy 4311 * way to accomplish this is to sync the entire filesystem (luckily 4312 * this happens rarely). 4313 */ 4314 if (vn_isdisk(vp, NULL) && 4315 vp->v_rdev && 4316 vp->v_rdev->si_mountpoint && !vn_islocked(vp) && 4317 (error = VFS_SYNC(vp->v_rdev->si_mountpoint, MNT_WAIT)) != 0) 4318 return (error); 4319 return (0); 4320 } 4321 4322 static int 4323 softdep_sync_metadata_bp(struct buf *bp, void *data) 4324 { 4325 struct softdep_sync_metadata_info *info = data; 4326 struct pagedep *pagedep; 4327 struct allocdirect *adp; 4328 struct allocindir *aip; 4329 struct worklist *wk; 4330 struct buf *nbp; 4331 int error; 4332 int i; 4333 4334 if (getdirtybuf(&bp, MNT_WAIT) == 0) { 4335 kprintf("softdep_sync_metadata_bp(1): caught buf %p going away\n", bp); 4336 return (1); 4337 } 4338 if (bp->b_vp != info->vp || (bp->b_flags & B_DELWRI) == 0) { 4339 kprintf("softdep_sync_metadata_bp(2): caught buf %p going away vp %p\n", bp, info->vp); 4340 BUF_UNLOCK(bp); 4341 return(1); 4342 } 4343 4344 /* 4345 * As we hold the buffer locked, none of its dependencies 4346 * will disappear. 4347 */ 4348 LIST_FOREACH(wk, &bp->b_dep, wk_list) { 4349 switch (wk->wk_type) { 4350 4351 case D_ALLOCDIRECT: 4352 adp = WK_ALLOCDIRECT(wk); 4353 if (adp->ad_state & DEPCOMPLETE) 4354 break; 4355 nbp = adp->ad_buf; 4356 if (getdirtybuf(&nbp, info->waitfor) == 0) 4357 break; 4358 FREE_LOCK(&lk); 4359 if (info->waitfor == MNT_NOWAIT) { 4360 bawrite(nbp); 4361 } else if ((error = bwrite(nbp)) != 0) { 4362 bawrite(bp); 4363 ACQUIRE_LOCK(&lk); 4364 return (-error); 4365 } 4366 ACQUIRE_LOCK(&lk); 4367 break; 4368 4369 case D_ALLOCINDIR: 4370 aip = WK_ALLOCINDIR(wk); 4371 if (aip->ai_state & DEPCOMPLETE) 4372 break; 4373 nbp = aip->ai_buf; 4374 if (getdirtybuf(&nbp, info->waitfor) == 0) 4375 break; 4376 FREE_LOCK(&lk); 4377 if (info->waitfor == MNT_NOWAIT) { 4378 bawrite(nbp); 4379 } else if ((error = bwrite(nbp)) != 0) { 4380 bawrite(bp); 4381 ACQUIRE_LOCK(&lk); 4382 return (-error); 4383 } 4384 ACQUIRE_LOCK(&lk); 4385 break; 4386 4387 case D_INDIRDEP: 4388 restart: 4389 4390 LIST_FOREACH(aip, &WK_INDIRDEP(wk)->ir_deplisthd, ai_next) { 4391 if (aip->ai_state & DEPCOMPLETE) 4392 continue; 4393 nbp = aip->ai_buf; 4394 if (getdirtybuf(&nbp, MNT_WAIT) == 0) 4395 goto restart; 4396 FREE_LOCK(&lk); 4397 if ((error = bwrite(nbp)) != 0) { 4398 bawrite(bp); 4399 ACQUIRE_LOCK(&lk); 4400 return (-error); 4401 } 4402 ACQUIRE_LOCK(&lk); 4403 goto restart; 4404 } 4405 break; 4406 4407 case D_INODEDEP: 4408 if ((error = flush_inodedep_deps(WK_INODEDEP(wk)->id_fs, 4409 WK_INODEDEP(wk)->id_ino)) != 0) { 4410 FREE_LOCK(&lk); 4411 bawrite(bp); 4412 ACQUIRE_LOCK(&lk); 4413 return (-error); 4414 } 4415 break; 4416 4417 case D_PAGEDEP: 4418 /* 4419 * We are trying to sync a directory that may 4420 * have dependencies on both its own metadata 4421 * and/or dependencies on the inodes of any 4422 * recently allocated files. We walk its diradd 4423 * lists pushing out the associated inode. 4424 */ 4425 pagedep = WK_PAGEDEP(wk); 4426 for (i = 0; i < DAHASHSZ; i++) { 4427 if (LIST_FIRST(&pagedep->pd_diraddhd[i]) == 0) 4428 continue; 4429 if ((error = 4430 flush_pagedep_deps(info->vp, 4431 pagedep->pd_mnt, 4432 &pagedep->pd_diraddhd[i]))) { 4433 FREE_LOCK(&lk); 4434 bawrite(bp); 4435 ACQUIRE_LOCK(&lk); 4436 return (-error); 4437 } 4438 } 4439 break; 4440 4441 case D_MKDIR: 4442 /* 4443 * This case should never happen if the vnode has 4444 * been properly sync'ed. However, if this function 4445 * is used at a place where the vnode has not yet 4446 * been sync'ed, this dependency can show up. So, 4447 * rather than panic, just flush it. 4448 */ 4449 nbp = WK_MKDIR(wk)->md_buf; 4450 if (getdirtybuf(&nbp, info->waitfor) == 0) 4451 break; 4452 FREE_LOCK(&lk); 4453 if (info->waitfor == MNT_NOWAIT) { 4454 bawrite(nbp); 4455 } else if ((error = bwrite(nbp)) != 0) { 4456 bawrite(bp); 4457 ACQUIRE_LOCK(&lk); 4458 return (-error); 4459 } 4460 ACQUIRE_LOCK(&lk); 4461 break; 4462 4463 case D_BMSAFEMAP: 4464 /* 4465 * This case should never happen if the vnode has 4466 * been properly sync'ed. However, if this function 4467 * is used at a place where the vnode has not yet 4468 * been sync'ed, this dependency can show up. So, 4469 * rather than panic, just flush it. 4470 * 4471 * nbp can wind up == bp if a device node for the 4472 * same filesystem is being fsynced at the same time, 4473 * leading to a panic if we don't catch the case. 4474 */ 4475 nbp = WK_BMSAFEMAP(wk)->sm_buf; 4476 if (nbp == bp) 4477 break; 4478 if (getdirtybuf(&nbp, info->waitfor) == 0) 4479 break; 4480 FREE_LOCK(&lk); 4481 if (info->waitfor == MNT_NOWAIT) { 4482 bawrite(nbp); 4483 } else if ((error = bwrite(nbp)) != 0) { 4484 bawrite(bp); 4485 ACQUIRE_LOCK(&lk); 4486 return (-error); 4487 } 4488 ACQUIRE_LOCK(&lk); 4489 break; 4490 4491 default: 4492 FREE_LOCK(&lk); 4493 panic("softdep_sync_metadata: Unknown type %s", 4494 TYPENAME(wk->wk_type)); 4495 /* NOTREACHED */ 4496 } 4497 } 4498 FREE_LOCK(&lk); 4499 bawrite(bp); 4500 ACQUIRE_LOCK(&lk); 4501 return(0); 4502 } 4503 4504 /* 4505 * Flush the dependencies associated with an inodedep. 4506 * Called with splbio blocked. 4507 */ 4508 static int 4509 flush_inodedep_deps(fs, ino) 4510 struct fs *fs; 4511 ino_t ino; 4512 { 4513 struct inodedep *inodedep; 4514 struct allocdirect *adp; 4515 int error, waitfor; 4516 struct buf *bp; 4517 4518 /* 4519 * This work is done in two passes. The first pass grabs most 4520 * of the buffers and begins asynchronously writing them. The 4521 * only way to wait for these asynchronous writes is to sleep 4522 * on the filesystem vnode which may stay busy for a long time 4523 * if the filesystem is active. So, instead, we make a second 4524 * pass over the dependencies blocking on each write. In the 4525 * usual case we will be blocking against a write that we 4526 * initiated, so when it is done the dependency will have been 4527 * resolved. Thus the second pass is expected to end quickly. 4528 * We give a brief window at the top of the loop to allow 4529 * any pending I/O to complete. 4530 */ 4531 for (waitfor = MNT_NOWAIT; ; ) { 4532 FREE_LOCK(&lk); 4533 ACQUIRE_LOCK(&lk); 4534 if (inodedep_lookup(fs, ino, 0, &inodedep) == 0) 4535 return (0); 4536 TAILQ_FOREACH(adp, &inodedep->id_inoupdt, ad_next) { 4537 if (adp->ad_state & DEPCOMPLETE) 4538 continue; 4539 bp = adp->ad_buf; 4540 if (getdirtybuf(&bp, waitfor) == 0) { 4541 if (waitfor == MNT_NOWAIT) 4542 continue; 4543 break; 4544 } 4545 FREE_LOCK(&lk); 4546 if (waitfor == MNT_NOWAIT) { 4547 bawrite(bp); 4548 } else if ((error = bwrite(bp)) != 0) { 4549 ACQUIRE_LOCK(&lk); 4550 return (error); 4551 } 4552 ACQUIRE_LOCK(&lk); 4553 break; 4554 } 4555 if (adp != NULL) 4556 continue; 4557 TAILQ_FOREACH(adp, &inodedep->id_newinoupdt, ad_next) { 4558 if (adp->ad_state & DEPCOMPLETE) 4559 continue; 4560 bp = adp->ad_buf; 4561 if (getdirtybuf(&bp, waitfor) == 0) { 4562 if (waitfor == MNT_NOWAIT) 4563 continue; 4564 break; 4565 } 4566 FREE_LOCK(&lk); 4567 if (waitfor == MNT_NOWAIT) { 4568 bawrite(bp); 4569 } else if ((error = bwrite(bp)) != 0) { 4570 ACQUIRE_LOCK(&lk); 4571 return (error); 4572 } 4573 ACQUIRE_LOCK(&lk); 4574 break; 4575 } 4576 if (adp != NULL) 4577 continue; 4578 /* 4579 * If pass2, we are done, otherwise do pass 2. 4580 */ 4581 if (waitfor == MNT_WAIT) 4582 break; 4583 waitfor = MNT_WAIT; 4584 } 4585 /* 4586 * Try freeing inodedep in case all dependencies have been removed. 4587 */ 4588 if (inodedep_lookup(fs, ino, 0, &inodedep) != 0) 4589 (void) free_inodedep(inodedep); 4590 return (0); 4591 } 4592 4593 /* 4594 * Eliminate a pagedep dependency by flushing out all its diradd dependencies. 4595 * Called with splbio blocked. 4596 */ 4597 static int 4598 flush_pagedep_deps(pvp, mp, diraddhdp) 4599 struct vnode *pvp; 4600 struct mount *mp; 4601 struct diraddhd *diraddhdp; 4602 { 4603 struct inodedep *inodedep; 4604 struct ufsmount *ump; 4605 struct diradd *dap; 4606 struct vnode *vp; 4607 int gotit, error = 0; 4608 struct buf *bp; 4609 ino_t inum; 4610 4611 ump = VFSTOUFS(mp); 4612 while ((dap = LIST_FIRST(diraddhdp)) != NULL) { 4613 /* 4614 * Flush ourselves if this directory entry 4615 * has a MKDIR_PARENT dependency. 4616 */ 4617 if (dap->da_state & MKDIR_PARENT) { 4618 FREE_LOCK(&lk); 4619 if ((error = ffs_update(pvp, 1)) != 0) 4620 break; 4621 ACQUIRE_LOCK(&lk); 4622 /* 4623 * If that cleared dependencies, go on to next. 4624 */ 4625 if (dap != LIST_FIRST(diraddhdp)) 4626 continue; 4627 if (dap->da_state & MKDIR_PARENT) { 4628 FREE_LOCK(&lk); 4629 panic("flush_pagedep_deps: MKDIR_PARENT"); 4630 } 4631 } 4632 /* 4633 * A newly allocated directory must have its "." and 4634 * ".." entries written out before its name can be 4635 * committed in its parent. We do not want or need 4636 * the full semantics of a synchronous VOP_FSYNC as 4637 * that may end up here again, once for each directory 4638 * level in the filesystem. Instead, we push the blocks 4639 * and wait for them to clear. We have to fsync twice 4640 * because the first call may choose to defer blocks 4641 * that still have dependencies, but deferral will 4642 * happen at most once. 4643 */ 4644 inum = dap->da_newinum; 4645 if (dap->da_state & MKDIR_BODY) { 4646 FREE_LOCK(&lk); 4647 if ((error = VFS_VGET(mp, inum, &vp)) != 0) 4648 break; 4649 if ((error=VOP_FSYNC(vp, MNT_NOWAIT)) || 4650 (error=VOP_FSYNC(vp, MNT_NOWAIT))) { 4651 vput(vp); 4652 break; 4653 } 4654 drain_output(vp, 0); 4655 vput(vp); 4656 ACQUIRE_LOCK(&lk); 4657 /* 4658 * If that cleared dependencies, go on to next. 4659 */ 4660 if (dap != LIST_FIRST(diraddhdp)) 4661 continue; 4662 if (dap->da_state & MKDIR_BODY) { 4663 FREE_LOCK(&lk); 4664 panic("flush_pagedep_deps: MKDIR_BODY"); 4665 } 4666 } 4667 /* 4668 * Flush the inode on which the directory entry depends. 4669 * Having accounted for MKDIR_PARENT and MKDIR_BODY above, 4670 * the only remaining dependency is that the updated inode 4671 * count must get pushed to disk. The inode has already 4672 * been pushed into its inode buffer (via VOP_UPDATE) at 4673 * the time of the reference count change. So we need only 4674 * locate that buffer, ensure that there will be no rollback 4675 * caused by a bitmap dependency, then write the inode buffer. 4676 */ 4677 if (inodedep_lookup(ump->um_fs, inum, 0, &inodedep) == 0) { 4678 FREE_LOCK(&lk); 4679 panic("flush_pagedep_deps: lost inode"); 4680 } 4681 /* 4682 * If the inode still has bitmap dependencies, 4683 * push them to disk. 4684 */ 4685 if ((inodedep->id_state & DEPCOMPLETE) == 0) { 4686 gotit = getdirtybuf(&inodedep->id_buf, MNT_WAIT); 4687 FREE_LOCK(&lk); 4688 if (gotit && (error = bwrite(inodedep->id_buf)) != 0) 4689 break; 4690 ACQUIRE_LOCK(&lk); 4691 if (dap != LIST_FIRST(diraddhdp)) 4692 continue; 4693 } 4694 /* 4695 * If the inode is still sitting in a buffer waiting 4696 * to be written, push it to disk. 4697 */ 4698 FREE_LOCK(&lk); 4699 if ((error = bread(ump->um_devvp, 4700 fsbtodoff(ump->um_fs, ino_to_fsba(ump->um_fs, inum)), 4701 (int)ump->um_fs->fs_bsize, &bp)) != 0) 4702 break; 4703 if ((error = bwrite(bp)) != 0) 4704 break; 4705 ACQUIRE_LOCK(&lk); 4706 /* 4707 * If we have failed to get rid of all the dependencies 4708 * then something is seriously wrong. 4709 */ 4710 if (dap == LIST_FIRST(diraddhdp)) { 4711 FREE_LOCK(&lk); 4712 panic("flush_pagedep_deps: flush failed"); 4713 } 4714 } 4715 if (error) 4716 ACQUIRE_LOCK(&lk); 4717 return (error); 4718 } 4719 4720 /* 4721 * A large burst of file addition or deletion activity can drive the 4722 * memory load excessively high. First attempt to slow things down 4723 * using the techniques below. If that fails, this routine requests 4724 * the offending operations to fall back to running synchronously 4725 * until the memory load returns to a reasonable level. 4726 */ 4727 int 4728 softdep_slowdown(vp) 4729 struct vnode *vp; 4730 { 4731 int max_softdeps_hard; 4732 4733 max_softdeps_hard = max_softdeps * 11 / 10; 4734 if (num_dirrem < max_softdeps_hard / 2 && 4735 num_inodedep < max_softdeps_hard) 4736 return (0); 4737 stat_sync_limit_hit += 1; 4738 return (1); 4739 } 4740 4741 /* 4742 * If memory utilization has gotten too high, deliberately slow things 4743 * down and speed up the I/O processing. 4744 */ 4745 static int 4746 request_cleanup(resource, islocked) 4747 int resource; 4748 int islocked; 4749 { 4750 struct thread *td = curthread; /* XXX */ 4751 4752 /* 4753 * We never hold up the filesystem syncer process. 4754 */ 4755 if (td == filesys_syncer) 4756 return (0); 4757 /* 4758 * First check to see if the work list has gotten backlogged. 4759 * If it has, co-opt this process to help clean up two entries. 4760 * Because this process may hold inodes locked, we cannot 4761 * handle any remove requests that might block on a locked 4762 * inode as that could lead to deadlock. 4763 */ 4764 if (num_on_worklist > max_softdeps / 10) { 4765 if (islocked) 4766 FREE_LOCK(&lk); 4767 process_worklist_item(NULL, LK_NOWAIT); 4768 process_worklist_item(NULL, LK_NOWAIT); 4769 stat_worklist_push += 2; 4770 if (islocked) 4771 ACQUIRE_LOCK(&lk); 4772 return(1); 4773 } 4774 4775 /* 4776 * If we are resource constrained on inode dependencies, try 4777 * flushing some dirty inodes. Otherwise, we are constrained 4778 * by file deletions, so try accelerating flushes of directories 4779 * with removal dependencies. We would like to do the cleanup 4780 * here, but we probably hold an inode locked at this point and 4781 * that might deadlock against one that we try to clean. So, 4782 * the best that we can do is request the syncer daemon to do 4783 * the cleanup for us. 4784 */ 4785 switch (resource) { 4786 4787 case FLUSH_INODES: 4788 stat_ino_limit_push += 1; 4789 req_clear_inodedeps += 1; 4790 stat_countp = &stat_ino_limit_hit; 4791 break; 4792 4793 case FLUSH_REMOVE: 4794 stat_blk_limit_push += 1; 4795 req_clear_remove += 1; 4796 stat_countp = &stat_blk_limit_hit; 4797 break; 4798 4799 default: 4800 if (islocked) 4801 FREE_LOCK(&lk); 4802 panic("request_cleanup: unknown type"); 4803 } 4804 /* 4805 * Hopefully the syncer daemon will catch up and awaken us. 4806 * We wait at most tickdelay before proceeding in any case. 4807 */ 4808 if (islocked == 0) 4809 ACQUIRE_LOCK(&lk); 4810 proc_waiting += 1; 4811 if (!callout_active(&handle)) 4812 callout_reset(&handle, tickdelay > 2 ? tickdelay : 2, 4813 pause_timer, NULL); 4814 interlocked_sleep(&lk, SLEEP, (caddr_t)&proc_waiting, 0, 4815 "softupdate", 0); 4816 proc_waiting -= 1; 4817 if (islocked == 0) 4818 FREE_LOCK(&lk); 4819 return (1); 4820 } 4821 4822 /* 4823 * Awaken processes pausing in request_cleanup and clear proc_waiting 4824 * to indicate that there is no longer a timer running. 4825 */ 4826 void 4827 pause_timer(arg) 4828 void *arg; 4829 { 4830 *stat_countp += 1; 4831 wakeup_one(&proc_waiting); 4832 if (proc_waiting > 0) 4833 callout_reset(&handle, tickdelay > 2 ? tickdelay : 2, 4834 pause_timer, NULL); 4835 else 4836 callout_deactivate(&handle); 4837 } 4838 4839 /* 4840 * Flush out a directory with at least one removal dependency in an effort to 4841 * reduce the number of dirrem, freefile, and freeblks dependency structures. 4842 */ 4843 static void 4844 clear_remove(struct thread *td) 4845 { 4846 struct pagedep_hashhead *pagedephd; 4847 struct pagedep *pagedep; 4848 static int next = 0; 4849 struct mount *mp; 4850 struct vnode *vp; 4851 int error, cnt; 4852 ino_t ino; 4853 4854 ACQUIRE_LOCK(&lk); 4855 for (cnt = 0; cnt < pagedep_hash; cnt++) { 4856 pagedephd = &pagedep_hashtbl[next++]; 4857 if (next >= pagedep_hash) 4858 next = 0; 4859 LIST_FOREACH(pagedep, pagedephd, pd_hash) { 4860 if (LIST_FIRST(&pagedep->pd_dirremhd) == NULL) 4861 continue; 4862 mp = pagedep->pd_mnt; 4863 ino = pagedep->pd_ino; 4864 FREE_LOCK(&lk); 4865 if ((error = VFS_VGET(mp, ino, &vp)) != 0) { 4866 softdep_error("clear_remove: vget", error); 4867 return; 4868 } 4869 if ((error = VOP_FSYNC(vp, MNT_NOWAIT))) 4870 softdep_error("clear_remove: fsync", error); 4871 drain_output(vp, 0); 4872 vput(vp); 4873 return; 4874 } 4875 } 4876 FREE_LOCK(&lk); 4877 } 4878 4879 /* 4880 * Clear out a block of dirty inodes in an effort to reduce 4881 * the number of inodedep dependency structures. 4882 */ 4883 struct clear_inodedeps_info { 4884 struct fs *fs; 4885 struct mount *mp; 4886 }; 4887 4888 static int 4889 clear_inodedeps_mountlist_callback(struct mount *mp, void *data) 4890 { 4891 struct clear_inodedeps_info *info = data; 4892 4893 if ((mp->mnt_flag & MNT_SOFTDEP) && info->fs == VFSTOUFS(mp)->um_fs) { 4894 info->mp = mp; 4895 return(-1); 4896 } 4897 return(0); 4898 } 4899 4900 static void 4901 clear_inodedeps(struct thread *td) 4902 { 4903 struct clear_inodedeps_info info; 4904 struct inodedep_hashhead *inodedephd; 4905 struct inodedep *inodedep; 4906 static int next = 0; 4907 struct vnode *vp; 4908 struct fs *fs; 4909 int error, cnt; 4910 ino_t firstino, lastino, ino; 4911 4912 ACQUIRE_LOCK(&lk); 4913 /* 4914 * Pick a random inode dependency to be cleared. 4915 * We will then gather up all the inodes in its block 4916 * that have dependencies and flush them out. 4917 */ 4918 for (cnt = 0; cnt < inodedep_hash; cnt++) { 4919 inodedephd = &inodedep_hashtbl[next++]; 4920 if (next >= inodedep_hash) 4921 next = 0; 4922 if ((inodedep = LIST_FIRST(inodedephd)) != NULL) 4923 break; 4924 } 4925 if (inodedep == NULL) { 4926 FREE_LOCK(&lk); 4927 return; 4928 } 4929 /* 4930 * Ugly code to find mount point given pointer to superblock. 4931 */ 4932 fs = inodedep->id_fs; 4933 info.mp = NULL; 4934 info.fs = fs; 4935 mountlist_scan(clear_inodedeps_mountlist_callback, 4936 &info, MNTSCAN_FORWARD|MNTSCAN_NOBUSY); 4937 /* 4938 * Find the last inode in the block with dependencies. 4939 */ 4940 firstino = inodedep->id_ino & ~(INOPB(fs) - 1); 4941 for (lastino = firstino + INOPB(fs) - 1; lastino > firstino; lastino--) 4942 if (inodedep_lookup(fs, lastino, 0, &inodedep) != 0) 4943 break; 4944 /* 4945 * Asynchronously push all but the last inode with dependencies. 4946 * Synchronously push the last inode with dependencies to ensure 4947 * that the inode block gets written to free up the inodedeps. 4948 */ 4949 for (ino = firstino; ino <= lastino; ino++) { 4950 if (inodedep_lookup(fs, ino, 0, &inodedep) == 0) 4951 continue; 4952 FREE_LOCK(&lk); 4953 if ((error = VFS_VGET(info.mp, ino, &vp)) != 0) { 4954 softdep_error("clear_inodedeps: vget", error); 4955 return; 4956 } 4957 if (ino == lastino) { 4958 if ((error = VOP_FSYNC(vp, MNT_WAIT))) 4959 softdep_error("clear_inodedeps: fsync1", error); 4960 } else { 4961 if ((error = VOP_FSYNC(vp, MNT_NOWAIT))) 4962 softdep_error("clear_inodedeps: fsync2", error); 4963 drain_output(vp, 0); 4964 } 4965 vput(vp); 4966 ACQUIRE_LOCK(&lk); 4967 } 4968 FREE_LOCK(&lk); 4969 } 4970 4971 /* 4972 * Function to determine if the buffer has outstanding dependencies 4973 * that will cause a roll-back if the buffer is written. If wantcount 4974 * is set, return number of dependencies, otherwise just yes or no. 4975 */ 4976 static int 4977 softdep_count_dependencies(bp, wantcount) 4978 struct buf *bp; 4979 int wantcount; 4980 { 4981 struct worklist *wk; 4982 struct inodedep *inodedep; 4983 struct indirdep *indirdep; 4984 struct allocindir *aip; 4985 struct pagedep *pagedep; 4986 struct diradd *dap; 4987 int i, retval; 4988 4989 retval = 0; 4990 ACQUIRE_LOCK(&lk); 4991 LIST_FOREACH(wk, &bp->b_dep, wk_list) { 4992 switch (wk->wk_type) { 4993 4994 case D_INODEDEP: 4995 inodedep = WK_INODEDEP(wk); 4996 if ((inodedep->id_state & DEPCOMPLETE) == 0) { 4997 /* bitmap allocation dependency */ 4998 retval += 1; 4999 if (!wantcount) 5000 goto out; 5001 } 5002 if (TAILQ_FIRST(&inodedep->id_inoupdt)) { 5003 /* direct block pointer dependency */ 5004 retval += 1; 5005 if (!wantcount) 5006 goto out; 5007 } 5008 continue; 5009 5010 case D_INDIRDEP: 5011 indirdep = WK_INDIRDEP(wk); 5012 5013 LIST_FOREACH(aip, &indirdep->ir_deplisthd, ai_next) { 5014 /* indirect block pointer dependency */ 5015 retval += 1; 5016 if (!wantcount) 5017 goto out; 5018 } 5019 continue; 5020 5021 case D_PAGEDEP: 5022 pagedep = WK_PAGEDEP(wk); 5023 for (i = 0; i < DAHASHSZ; i++) { 5024 5025 LIST_FOREACH(dap, &pagedep->pd_diraddhd[i], da_pdlist) { 5026 /* directory entry dependency */ 5027 retval += 1; 5028 if (!wantcount) 5029 goto out; 5030 } 5031 } 5032 continue; 5033 5034 case D_BMSAFEMAP: 5035 case D_ALLOCDIRECT: 5036 case D_ALLOCINDIR: 5037 case D_MKDIR: 5038 /* never a dependency on these blocks */ 5039 continue; 5040 5041 default: 5042 FREE_LOCK(&lk); 5043 panic("softdep_check_for_rollback: Unexpected type %s", 5044 TYPENAME(wk->wk_type)); 5045 /* NOTREACHED */ 5046 } 5047 } 5048 out: 5049 FREE_LOCK(&lk); 5050 return retval; 5051 } 5052 5053 /* 5054 * Acquire exclusive access to a buffer. 5055 * Must be called with splbio blocked. 5056 * Return 1 if buffer was acquired. 5057 */ 5058 static int 5059 getdirtybuf(bpp, waitfor) 5060 struct buf **bpp; 5061 int waitfor; 5062 { 5063 struct buf *bp; 5064 int error; 5065 5066 for (;;) { 5067 if ((bp = *bpp) == NULL) 5068 return (0); 5069 if (BUF_LOCK(bp, LK_EXCLUSIVE | LK_NOWAIT) == 0) 5070 break; 5071 if (waitfor != MNT_WAIT) 5072 return (0); 5073 error = interlocked_sleep(&lk, LOCKBUF, bp, 5074 LK_EXCLUSIVE | LK_SLEEPFAIL, 0, 0); 5075 if (error != ENOLCK) { 5076 FREE_LOCK(&lk); 5077 panic("getdirtybuf: inconsistent lock"); 5078 } 5079 } 5080 if ((bp->b_flags & B_DELWRI) == 0) { 5081 BUF_UNLOCK(bp); 5082 return (0); 5083 } 5084 bremfree(bp); 5085 return (1); 5086 } 5087 5088 /* 5089 * Wait for pending output on a vnode to complete. 5090 * Must be called with vnode locked. 5091 */ 5092 static void 5093 drain_output(vp, islocked) 5094 struct vnode *vp; 5095 int islocked; 5096 { 5097 5098 if (!islocked) 5099 ACQUIRE_LOCK(&lk); 5100 while (vp->v_track_write.bk_active) { 5101 vp->v_track_write.bk_waitflag = 1; 5102 interlocked_sleep(&lk, SLEEP, &vp->v_track_write, 5103 0, "drainvp", 0); 5104 } 5105 if (!islocked) 5106 FREE_LOCK(&lk); 5107 } 5108 5109 /* 5110 * Called whenever a buffer that is being invalidated or reallocated 5111 * contains dependencies. This should only happen if an I/O error has 5112 * occurred. The routine is called with the buffer locked. 5113 */ 5114 static void 5115 softdep_deallocate_dependencies(bp) 5116 struct buf *bp; 5117 { 5118 5119 if ((bp->b_flags & B_ERROR) == 0) 5120 panic("softdep_deallocate_dependencies: dangling deps"); 5121 softdep_error(bp->b_vp->v_mount->mnt_stat.f_mntfromname, bp->b_error); 5122 panic("softdep_deallocate_dependencies: unrecovered I/O error"); 5123 } 5124 5125 /* 5126 * Function to handle asynchronous write errors in the filesystem. 5127 */ 5128 void 5129 softdep_error(func, error) 5130 char *func; 5131 int error; 5132 { 5133 5134 /* XXX should do something better! */ 5135 kprintf("%s: got error %d while accessing filesystem\n", func, error); 5136 } 5137