1 /* 2 * Copyright 1998, 2000 Marshall Kirk McKusick. All Rights Reserved. 3 * 4 * The soft updates code is derived from the appendix of a University 5 * of Michigan technical report (Gregory R. Ganger and Yale N. Patt, 6 * "Soft Updates: A Solution to the Metadata Update Problem in File 7 * Systems", CSE-TR-254-95, August 1995). 8 * 9 * Further information about soft updates can be obtained from: 10 * 11 * Marshall Kirk McKusick http://www.mckusick.com/softdep/ 12 * 1614 Oxford Street mckusick@mckusick.com 13 * Berkeley, CA 94709-1608 +1-510-843-9542 14 * USA 15 * 16 * Redistribution and use in source and binary forms, with or without 17 * modification, are permitted provided that the following conditions 18 * are met: 19 * 20 * 1. Redistributions of source code must retain the above copyright 21 * notice, this list of conditions and the following disclaimer. 22 * 2. Redistributions in binary form must reproduce the above copyright 23 * notice, this list of conditions and the following disclaimer in the 24 * documentation and/or other materials provided with the distribution. 25 * 26 * THIS SOFTWARE IS PROVIDED BY MARSHALL KIRK MCKUSICK ``AS IS'' AND ANY 27 * EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED 28 * WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE 29 * DISCLAIMED. IN NO EVENT SHALL MARSHALL KIRK MCKUSICK BE LIABLE FOR 30 * ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL 31 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS 32 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) 33 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT 34 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY 35 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF 36 * SUCH DAMAGE. 37 * 38 * from: @(#)ffs_softdep.c 9.59 (McKusick) 6/21/00 39 * $FreeBSD: src/sys/ufs/ffs/ffs_softdep.c,v 1.57.2.11 2002/02/05 18:46:53 dillon Exp $ 40 */ 41 42 /* 43 * For now we want the safety net that the DIAGNOSTIC and DEBUG flags provide. 44 */ 45 #ifndef DIAGNOSTIC 46 #define DIAGNOSTIC 47 #endif 48 #ifndef DEBUG 49 #define DEBUG 50 #endif 51 52 #include <sys/param.h> 53 #include <sys/kernel.h> 54 #include <sys/systm.h> 55 #include <sys/buf.h> 56 #include <sys/malloc.h> 57 #include <sys/mount.h> 58 #include <sys/proc.h> 59 #include <sys/syslog.h> 60 #include <sys/vnode.h> 61 #include <sys/conf.h> 62 #include <machine/inttypes.h> 63 #include "dir.h" 64 #include "quota.h" 65 #include "inode.h" 66 #include "ufsmount.h" 67 #include "fs.h" 68 #include "softdep.h" 69 #include "ffs_extern.h" 70 #include "ufs_extern.h" 71 72 #include <sys/buf2.h> 73 #include <sys/mplock2.h> 74 #include <sys/thread2.h> 75 76 /* 77 * These definitions need to be adapted to the system to which 78 * this file is being ported. 79 */ 80 /* 81 * malloc types defined for the softdep system. 82 */ 83 MALLOC_DEFINE(M_PAGEDEP, "pagedep","File page dependencies"); 84 MALLOC_DEFINE(M_INODEDEP, "inodedep","Inode dependencies"); 85 MALLOC_DEFINE(M_NEWBLK, "newblk","New block allocation"); 86 MALLOC_DEFINE(M_BMSAFEMAP, "bmsafemap","Block or frag allocated from cyl group map"); 87 MALLOC_DEFINE(M_ALLOCDIRECT, "allocdirect","Block or frag dependency for an inode"); 88 MALLOC_DEFINE(M_INDIRDEP, "indirdep","Indirect block dependencies"); 89 MALLOC_DEFINE(M_ALLOCINDIR, "allocindir","Block dependency for an indirect block"); 90 MALLOC_DEFINE(M_FREEFRAG, "freefrag","Previously used frag for an inode"); 91 MALLOC_DEFINE(M_FREEBLKS, "freeblks","Blocks freed from an inode"); 92 MALLOC_DEFINE(M_FREEFILE, "freefile","Inode deallocated"); 93 MALLOC_DEFINE(M_DIRADD, "diradd","New directory entry"); 94 MALLOC_DEFINE(M_MKDIR, "mkdir","New directory"); 95 MALLOC_DEFINE(M_DIRREM, "dirrem","Directory entry deleted"); 96 97 #define M_SOFTDEP_FLAGS (M_WAITOK | M_USE_RESERVE) 98 99 #define D_PAGEDEP 0 100 #define D_INODEDEP 1 101 #define D_NEWBLK 2 102 #define D_BMSAFEMAP 3 103 #define D_ALLOCDIRECT 4 104 #define D_INDIRDEP 5 105 #define D_ALLOCINDIR 6 106 #define D_FREEFRAG 7 107 #define D_FREEBLKS 8 108 #define D_FREEFILE 9 109 #define D_DIRADD 10 110 #define D_MKDIR 11 111 #define D_DIRREM 12 112 #define D_LAST D_DIRREM 113 114 /* 115 * translate from workitem type to memory type 116 * MUST match the defines above, such that memtype[D_XXX] == M_XXX 117 */ 118 static struct malloc_type *memtype[] = { 119 M_PAGEDEP, 120 M_INODEDEP, 121 M_NEWBLK, 122 M_BMSAFEMAP, 123 M_ALLOCDIRECT, 124 M_INDIRDEP, 125 M_ALLOCINDIR, 126 M_FREEFRAG, 127 M_FREEBLKS, 128 M_FREEFILE, 129 M_DIRADD, 130 M_MKDIR, 131 M_DIRREM 132 }; 133 134 #define DtoM(type) (memtype[type]) 135 136 /* 137 * Names of malloc types. 138 */ 139 #define TYPENAME(type) \ 140 ((unsigned)(type) < D_LAST ? memtype[type]->ks_shortdesc : "???") 141 /* 142 * End system adaptaion definitions. 143 */ 144 145 /* 146 * Internal function prototypes. 147 */ 148 static void softdep_error(char *, int); 149 static void drain_output(struct vnode *, int); 150 static int getdirtybuf(struct buf **, int); 151 static void clear_remove(struct thread *); 152 static void clear_inodedeps(struct thread *); 153 static int flush_pagedep_deps(struct vnode *, struct mount *, 154 struct diraddhd *); 155 static int flush_inodedep_deps(struct fs *, ino_t); 156 static int handle_written_filepage(struct pagedep *, struct buf *); 157 static void diradd_inode_written(struct diradd *, struct inodedep *); 158 static int handle_written_inodeblock(struct inodedep *, struct buf *); 159 static void handle_allocdirect_partdone(struct allocdirect *); 160 static void handle_allocindir_partdone(struct allocindir *); 161 static void initiate_write_filepage(struct pagedep *, struct buf *); 162 static void handle_written_mkdir(struct mkdir *, int); 163 static void initiate_write_inodeblock(struct inodedep *, struct buf *); 164 static void handle_workitem_freefile(struct freefile *); 165 static void handle_workitem_remove(struct dirrem *); 166 static struct dirrem *newdirrem(struct buf *, struct inode *, 167 struct inode *, int, struct dirrem **); 168 static void free_diradd(struct diradd *); 169 static void free_allocindir(struct allocindir *, struct inodedep *); 170 static int indir_trunc (struct inode *, off_t, int, ufs_lbn_t, long *); 171 static void deallocate_dependencies(struct buf *, struct inodedep *); 172 static void free_allocdirect(struct allocdirectlst *, 173 struct allocdirect *, int); 174 static int check_inode_unwritten(struct inodedep *); 175 static int free_inodedep(struct inodedep *); 176 static void handle_workitem_freeblocks(struct freeblks *); 177 static void merge_inode_lists(struct inodedep *); 178 static void setup_allocindir_phase2(struct buf *, struct inode *, 179 struct allocindir *); 180 static struct allocindir *newallocindir(struct inode *, int, ufs_daddr_t, 181 ufs_daddr_t); 182 static void handle_workitem_freefrag(struct freefrag *); 183 static struct freefrag *newfreefrag(struct inode *, ufs_daddr_t, long); 184 static void allocdirect_merge(struct allocdirectlst *, 185 struct allocdirect *, struct allocdirect *); 186 static struct bmsafemap *bmsafemap_lookup(struct buf *); 187 static int newblk_lookup(struct fs *, ufs_daddr_t, int, 188 struct newblk **); 189 static int inodedep_lookup(struct fs *, ino_t, int, struct inodedep **); 190 static int pagedep_lookup(struct inode *, ufs_lbn_t, int, 191 struct pagedep **); 192 static void pause_timer(void *); 193 static int request_cleanup(int, int); 194 static int process_worklist_item(struct mount *, int); 195 static void add_to_worklist(struct worklist *); 196 197 /* 198 * Exported softdep operations. 199 */ 200 static void softdep_disk_io_initiation(struct buf *); 201 static void softdep_disk_write_complete(struct buf *); 202 static void softdep_deallocate_dependencies(struct buf *); 203 static int softdep_fsync(struct vnode *); 204 static int softdep_process_worklist(struct mount *); 205 static void softdep_move_dependencies(struct buf *, struct buf *); 206 static int softdep_count_dependencies(struct buf *bp, int); 207 static int softdep_checkread(struct buf *bp); 208 static int softdep_checkwrite(struct buf *bp); 209 210 static struct bio_ops softdep_bioops = { 211 .io_start = softdep_disk_io_initiation, 212 .io_complete = softdep_disk_write_complete, 213 .io_deallocate = softdep_deallocate_dependencies, 214 .io_fsync = softdep_fsync, 215 .io_sync = softdep_process_worklist, 216 .io_movedeps = softdep_move_dependencies, 217 .io_countdeps = softdep_count_dependencies, 218 .io_checkread = softdep_checkread, 219 .io_checkwrite = softdep_checkwrite 220 }; 221 222 /* 223 * Locking primitives. 224 * 225 * For a uniprocessor, all we need to do is protect against disk 226 * interrupts. For a multiprocessor, this lock would have to be 227 * a mutex. A single mutex is used throughout this file, though 228 * finer grain locking could be used if contention warranted it. 229 * 230 * For a multiprocessor, the sleep call would accept a lock and 231 * release it after the sleep processing was complete. In a uniprocessor 232 * implementation there is no such interlock, so we simple mark 233 * the places where it needs to be done with the `interlocked' form 234 * of the lock calls. Since the uniprocessor sleep already interlocks 235 * the spl, there is nothing that really needs to be done. 236 */ 237 #ifndef /* NOT */ DEBUG 238 static struct lockit { 239 } lk = { 0 }; 240 #define ACQUIRE_LOCK(lk) crit_enter_id("softupdates"); 241 #define FREE_LOCK(lk) crit_exit_id("softupdates"); 242 243 #else /* DEBUG */ 244 #define NOHOLDER ((struct thread *)-1) 245 #define SPECIAL_FLAG ((struct thread *)-2) 246 static struct lockit { 247 int lkt_spl; 248 struct thread *lkt_held; 249 } lk = { 0, NOHOLDER }; 250 static int lockcnt; 251 252 static void acquire_lock(struct lockit *); 253 static void free_lock(struct lockit *); 254 void softdep_panic(char *); 255 256 #define ACQUIRE_LOCK(lk) acquire_lock(lk) 257 #define FREE_LOCK(lk) free_lock(lk) 258 259 static void 260 acquire_lock(struct lockit *lk) 261 { 262 thread_t holder; 263 264 if (lk->lkt_held != NOHOLDER) { 265 holder = lk->lkt_held; 266 FREE_LOCK(lk); 267 if (holder == curthread) 268 panic("softdep_lock: locking against myself"); 269 else 270 panic("softdep_lock: lock held by %p", holder); 271 } 272 crit_enter_id("softupdates"); 273 lk->lkt_held = curthread; 274 lockcnt++; 275 } 276 277 static void 278 free_lock(struct lockit *lk) 279 { 280 281 if (lk->lkt_held == NOHOLDER) 282 panic("softdep_unlock: lock not held"); 283 lk->lkt_held = NOHOLDER; 284 crit_exit_id("softupdates"); 285 } 286 287 /* 288 * Function to release soft updates lock and panic. 289 */ 290 void 291 softdep_panic(char *msg) 292 { 293 294 if (lk.lkt_held != NOHOLDER) 295 FREE_LOCK(&lk); 296 panic(msg); 297 } 298 #endif /* DEBUG */ 299 300 static int interlocked_sleep(struct lockit *, int, void *, int, 301 const char *, int); 302 303 /* 304 * When going to sleep, we must save our SPL so that it does 305 * not get lost if some other process uses the lock while we 306 * are sleeping. We restore it after we have slept. This routine 307 * wraps the interlocking with functions that sleep. The list 308 * below enumerates the available set of operations. 309 */ 310 #define UNKNOWN 0 311 #define SLEEP 1 312 #define LOCKBUF 2 313 314 static int 315 interlocked_sleep(struct lockit *lk, int op, void *ident, int flags, 316 const char *wmesg, int timo) 317 { 318 thread_t holder; 319 int s, retval; 320 321 s = lk->lkt_spl; 322 # ifdef DEBUG 323 if (lk->lkt_held == NOHOLDER) 324 panic("interlocked_sleep: lock not held"); 325 lk->lkt_held = NOHOLDER; 326 # endif /* DEBUG */ 327 switch (op) { 328 case SLEEP: 329 retval = tsleep(ident, flags, wmesg, timo); 330 break; 331 case LOCKBUF: 332 retval = BUF_LOCK((struct buf *)ident, flags); 333 break; 334 default: 335 panic("interlocked_sleep: unknown operation"); 336 } 337 # ifdef DEBUG 338 if (lk->lkt_held != NOHOLDER) { 339 holder = lk->lkt_held; 340 FREE_LOCK(lk); 341 if (holder == curthread) 342 panic("interlocked_sleep: locking against self"); 343 else 344 panic("interlocked_sleep: lock held by %p", holder); 345 } 346 lk->lkt_held = curthread; 347 lockcnt++; 348 # endif /* DEBUG */ 349 lk->lkt_spl = s; 350 return (retval); 351 } 352 353 /* 354 * Place holder for real semaphores. 355 */ 356 struct sema { 357 int value; 358 thread_t holder; 359 char *name; 360 int prio; 361 int timo; 362 }; 363 static void sema_init(struct sema *, char *, int, int); 364 static int sema_get(struct sema *, struct lockit *); 365 static void sema_release(struct sema *); 366 367 static void 368 sema_init(struct sema *semap, char *name, int prio, int timo) 369 { 370 371 semap->holder = NOHOLDER; 372 semap->value = 0; 373 semap->name = name; 374 semap->prio = prio; 375 semap->timo = timo; 376 } 377 378 static int 379 sema_get(struct sema *semap, struct lockit *interlock) 380 { 381 382 if (semap->value++ > 0) { 383 if (interlock != NULL) { 384 interlocked_sleep(interlock, SLEEP, (caddr_t)semap, 385 semap->prio, semap->name, semap->timo); 386 FREE_LOCK(interlock); 387 } else { 388 tsleep((caddr_t)semap, semap->prio, semap->name, 389 semap->timo); 390 } 391 return (0); 392 } 393 semap->holder = curthread; 394 if (interlock != NULL) 395 FREE_LOCK(interlock); 396 return (1); 397 } 398 399 static void 400 sema_release(struct sema *semap) 401 { 402 403 if (semap->value <= 0 || semap->holder != curthread) { 404 if (lk.lkt_held != NOHOLDER) 405 FREE_LOCK(&lk); 406 panic("sema_release: not held"); 407 } 408 if (--semap->value > 0) { 409 semap->value = 0; 410 wakeup(semap); 411 } 412 semap->holder = NOHOLDER; 413 } 414 415 /* 416 * Worklist queue management. 417 * These routines require that the lock be held. 418 */ 419 #ifndef /* NOT */ DEBUG 420 #define WORKLIST_INSERT(head, item) do { \ 421 (item)->wk_state |= ONWORKLIST; \ 422 LIST_INSERT_HEAD(head, item, wk_list); \ 423 } while (0) 424 425 #define WORKLIST_INSERT_BP(bp, item) do { \ 426 (item)->wk_state |= ONWORKLIST; \ 427 (bp)->b_ops = &softdep_bioops; \ 428 LIST_INSERT_HEAD(&(bp)->b_dep, item, wk_list); \ 429 } while (0) 430 431 #define WORKLIST_REMOVE(item) do { \ 432 (item)->wk_state &= ~ONWORKLIST; \ 433 LIST_REMOVE(item, wk_list); \ 434 } while (0) 435 436 #define WORKITEM_FREE(item, type) kfree(item, DtoM(type)) 437 438 #else /* DEBUG */ 439 static void worklist_insert(struct workhead *, struct worklist *); 440 static void worklist_remove(struct worklist *); 441 static void workitem_free(struct worklist *, int); 442 443 #define WORKLIST_INSERT_BP(bp, item) do { \ 444 (bp)->b_ops = &softdep_bioops; \ 445 worklist_insert(&(bp)->b_dep, item); \ 446 } while (0) 447 448 #define WORKLIST_INSERT(head, item) worklist_insert(head, item) 449 #define WORKLIST_REMOVE(item) worklist_remove(item) 450 #define WORKITEM_FREE(item, type) workitem_free((struct worklist *)item, type) 451 452 static void 453 worklist_insert(struct workhead *head, struct worklist *item) 454 { 455 456 if (lk.lkt_held == NOHOLDER) 457 panic("worklist_insert: lock not held"); 458 if (item->wk_state & ONWORKLIST) { 459 FREE_LOCK(&lk); 460 panic("worklist_insert: already on list"); 461 } 462 item->wk_state |= ONWORKLIST; 463 LIST_INSERT_HEAD(head, item, wk_list); 464 } 465 466 static void 467 worklist_remove(struct worklist *item) 468 { 469 470 if (lk.lkt_held == NOHOLDER) 471 panic("worklist_remove: lock not held"); 472 if ((item->wk_state & ONWORKLIST) == 0) { 473 FREE_LOCK(&lk); 474 panic("worklist_remove: not on list"); 475 } 476 item->wk_state &= ~ONWORKLIST; 477 LIST_REMOVE(item, wk_list); 478 } 479 480 static void 481 workitem_free(struct worklist *item, int type) 482 { 483 484 if (item->wk_state & ONWORKLIST) { 485 if (lk.lkt_held != NOHOLDER) 486 FREE_LOCK(&lk); 487 panic("workitem_free: still on list"); 488 } 489 if (item->wk_type != type) { 490 if (lk.lkt_held != NOHOLDER) 491 FREE_LOCK(&lk); 492 panic("workitem_free: type mismatch"); 493 } 494 kfree(item, DtoM(type)); 495 } 496 #endif /* DEBUG */ 497 498 /* 499 * Workitem queue management 500 */ 501 static struct workhead softdep_workitem_pending; 502 static int num_on_worklist; /* number of worklist items to be processed */ 503 static int softdep_worklist_busy; /* 1 => trying to do unmount */ 504 static int softdep_worklist_req; /* serialized waiters */ 505 static int max_softdeps; /* maximum number of structs before slowdown */ 506 static int tickdelay = 2; /* number of ticks to pause during slowdown */ 507 static int *stat_countp; /* statistic to count in proc_waiting timeout */ 508 static int proc_waiting; /* tracks whether we have a timeout posted */ 509 static struct callout handle; /* handle on posted proc_waiting timeout */ 510 static struct thread *filesys_syncer; /* proc of filesystem syncer process */ 511 static int req_clear_inodedeps; /* syncer process flush some inodedeps */ 512 #define FLUSH_INODES 1 513 static int req_clear_remove; /* syncer process flush some freeblks */ 514 #define FLUSH_REMOVE 2 515 /* 516 * runtime statistics 517 */ 518 static int stat_worklist_push; /* number of worklist cleanups */ 519 static int stat_blk_limit_push; /* number of times block limit neared */ 520 static int stat_ino_limit_push; /* number of times inode limit neared */ 521 static int stat_blk_limit_hit; /* number of times block slowdown imposed */ 522 static int stat_ino_limit_hit; /* number of times inode slowdown imposed */ 523 static int stat_sync_limit_hit; /* number of synchronous slowdowns imposed */ 524 static int stat_indir_blk_ptrs; /* bufs redirtied as indir ptrs not written */ 525 static int stat_inode_bitmap; /* bufs redirtied as inode bitmap not written */ 526 static int stat_direct_blk_ptrs;/* bufs redirtied as direct ptrs not written */ 527 static int stat_dir_entry; /* bufs redirtied as dir entry cannot write */ 528 #ifdef DEBUG 529 #include <vm/vm.h> 530 #include <sys/sysctl.h> 531 SYSCTL_INT(_debug, OID_AUTO, max_softdeps, CTLFLAG_RW, &max_softdeps, 0, 532 "Maximum soft dependencies before slowdown occurs"); 533 SYSCTL_INT(_debug, OID_AUTO, tickdelay, CTLFLAG_RW, &tickdelay, 0, 534 "Ticks to delay before allocating during slowdown"); 535 SYSCTL_INT(_debug, OID_AUTO, worklist_push, CTLFLAG_RW, &stat_worklist_push, 0, 536 "Number of worklist cleanups"); 537 SYSCTL_INT(_debug, OID_AUTO, blk_limit_push, CTLFLAG_RW, &stat_blk_limit_push, 0, 538 "Number of times block limit neared"); 539 SYSCTL_INT(_debug, OID_AUTO, ino_limit_push, CTLFLAG_RW, &stat_ino_limit_push, 0, 540 "Number of times inode limit neared"); 541 SYSCTL_INT(_debug, OID_AUTO, blk_limit_hit, CTLFLAG_RW, &stat_blk_limit_hit, 0, 542 "Number of times block slowdown imposed"); 543 SYSCTL_INT(_debug, OID_AUTO, ino_limit_hit, CTLFLAG_RW, &stat_ino_limit_hit, 0, 544 "Number of times inode slowdown imposed "); 545 SYSCTL_INT(_debug, OID_AUTO, sync_limit_hit, CTLFLAG_RW, &stat_sync_limit_hit, 0, 546 "Number of synchronous slowdowns imposed"); 547 SYSCTL_INT(_debug, OID_AUTO, indir_blk_ptrs, CTLFLAG_RW, &stat_indir_blk_ptrs, 0, 548 "Bufs redirtied as indir ptrs not written"); 549 SYSCTL_INT(_debug, OID_AUTO, inode_bitmap, CTLFLAG_RW, &stat_inode_bitmap, 0, 550 "Bufs redirtied as inode bitmap not written"); 551 SYSCTL_INT(_debug, OID_AUTO, direct_blk_ptrs, CTLFLAG_RW, &stat_direct_blk_ptrs, 0, 552 "Bufs redirtied as direct ptrs not written"); 553 SYSCTL_INT(_debug, OID_AUTO, dir_entry, CTLFLAG_RW, &stat_dir_entry, 0, 554 "Bufs redirtied as dir entry cannot write"); 555 #endif /* DEBUG */ 556 557 /* 558 * Add an item to the end of the work queue. 559 * This routine requires that the lock be held. 560 * This is the only routine that adds items to the list. 561 * The following routine is the only one that removes items 562 * and does so in order from first to last. 563 */ 564 static void 565 add_to_worklist(struct worklist *wk) 566 { 567 static struct worklist *worklist_tail; 568 569 if (wk->wk_state & ONWORKLIST) { 570 if (lk.lkt_held != NOHOLDER) 571 FREE_LOCK(&lk); 572 panic("add_to_worklist: already on list"); 573 } 574 wk->wk_state |= ONWORKLIST; 575 if (LIST_FIRST(&softdep_workitem_pending) == NULL) 576 LIST_INSERT_HEAD(&softdep_workitem_pending, wk, wk_list); 577 else 578 LIST_INSERT_AFTER(worklist_tail, wk, wk_list); 579 worklist_tail = wk; 580 num_on_worklist += 1; 581 } 582 583 /* 584 * Process that runs once per second to handle items in the background queue. 585 * 586 * Note that we ensure that everything is done in the order in which they 587 * appear in the queue. The code below depends on this property to ensure 588 * that blocks of a file are freed before the inode itself is freed. This 589 * ordering ensures that no new <vfsid, inum, lbn> triples will be generated 590 * until all the old ones have been purged from the dependency lists. 591 * 592 * bioops callback - hold io_token 593 */ 594 static int 595 softdep_process_worklist(struct mount *matchmnt) 596 { 597 thread_t td = curthread; 598 int matchcnt, loopcount; 599 long starttime; 600 601 get_mplock(); 602 603 /* 604 * Record the process identifier of our caller so that we can give 605 * this process preferential treatment in request_cleanup below. 606 */ 607 filesys_syncer = td; 608 matchcnt = 0; 609 610 /* 611 * There is no danger of having multiple processes run this 612 * code, but we have to single-thread it when softdep_flushfiles() 613 * is in operation to get an accurate count of the number of items 614 * related to its mount point that are in the list. 615 */ 616 if (matchmnt == NULL) { 617 if (softdep_worklist_busy < 0) { 618 matchcnt = -1; 619 goto done; 620 } 621 softdep_worklist_busy += 1; 622 } 623 624 /* 625 * If requested, try removing inode or removal dependencies. 626 */ 627 if (req_clear_inodedeps) { 628 clear_inodedeps(td); 629 req_clear_inodedeps -= 1; 630 wakeup_one(&proc_waiting); 631 } 632 if (req_clear_remove) { 633 clear_remove(td); 634 req_clear_remove -= 1; 635 wakeup_one(&proc_waiting); 636 } 637 loopcount = 1; 638 starttime = time_second; 639 while (num_on_worklist > 0) { 640 matchcnt += process_worklist_item(matchmnt, 0); 641 642 /* 643 * If a umount operation wants to run the worklist 644 * accurately, abort. 645 */ 646 if (softdep_worklist_req && matchmnt == NULL) { 647 matchcnt = -1; 648 break; 649 } 650 651 /* 652 * If requested, try removing inode or removal dependencies. 653 */ 654 if (req_clear_inodedeps) { 655 clear_inodedeps(td); 656 req_clear_inodedeps -= 1; 657 wakeup_one(&proc_waiting); 658 } 659 if (req_clear_remove) { 660 clear_remove(td); 661 req_clear_remove -= 1; 662 wakeup_one(&proc_waiting); 663 } 664 /* 665 * We do not generally want to stop for buffer space, but if 666 * we are really being a buffer hog, we will stop and wait. 667 */ 668 if (loopcount++ % 128 == 0) 669 bwillinode(1); 670 /* 671 * Never allow processing to run for more than one 672 * second. Otherwise the other syncer tasks may get 673 * excessively backlogged. 674 */ 675 if (starttime != time_second && matchmnt == NULL) { 676 matchcnt = -1; 677 break; 678 } 679 } 680 if (matchmnt == NULL) { 681 --softdep_worklist_busy; 682 if (softdep_worklist_req && softdep_worklist_busy == 0) 683 wakeup(&softdep_worklist_req); 684 } 685 done: 686 rel_mplock(); 687 return (matchcnt); 688 } 689 690 /* 691 * Process one item on the worklist. 692 */ 693 static int 694 process_worklist_item(struct mount *matchmnt, int flags) 695 { 696 struct worklist *wk; 697 struct dirrem *dirrem; 698 struct fs *matchfs; 699 struct vnode *vp; 700 int matchcnt = 0; 701 702 matchfs = NULL; 703 if (matchmnt != NULL) 704 matchfs = VFSTOUFS(matchmnt)->um_fs; 705 ACQUIRE_LOCK(&lk); 706 /* 707 * Normally we just process each item on the worklist in order. 708 * However, if we are in a situation where we cannot lock any 709 * inodes, we have to skip over any dirrem requests whose 710 * vnodes are resident and locked. 711 */ 712 LIST_FOREACH(wk, &softdep_workitem_pending, wk_list) { 713 if ((flags & LK_NOWAIT) == 0 || wk->wk_type != D_DIRREM) 714 break; 715 dirrem = WK_DIRREM(wk); 716 vp = ufs_ihashlookup(VFSTOUFS(dirrem->dm_mnt)->um_dev, 717 dirrem->dm_oldinum); 718 if (vp == NULL || !vn_islocked(vp)) 719 break; 720 } 721 if (wk == NULL) { 722 FREE_LOCK(&lk); 723 return (0); 724 } 725 WORKLIST_REMOVE(wk); 726 num_on_worklist -= 1; 727 FREE_LOCK(&lk); 728 switch (wk->wk_type) { 729 730 case D_DIRREM: 731 /* removal of a directory entry */ 732 if (WK_DIRREM(wk)->dm_mnt == matchmnt) 733 matchcnt += 1; 734 handle_workitem_remove(WK_DIRREM(wk)); 735 break; 736 737 case D_FREEBLKS: 738 /* releasing blocks and/or fragments from a file */ 739 if (WK_FREEBLKS(wk)->fb_fs == matchfs) 740 matchcnt += 1; 741 handle_workitem_freeblocks(WK_FREEBLKS(wk)); 742 break; 743 744 case D_FREEFRAG: 745 /* releasing a fragment when replaced as a file grows */ 746 if (WK_FREEFRAG(wk)->ff_fs == matchfs) 747 matchcnt += 1; 748 handle_workitem_freefrag(WK_FREEFRAG(wk)); 749 break; 750 751 case D_FREEFILE: 752 /* releasing an inode when its link count drops to 0 */ 753 if (WK_FREEFILE(wk)->fx_fs == matchfs) 754 matchcnt += 1; 755 handle_workitem_freefile(WK_FREEFILE(wk)); 756 break; 757 758 default: 759 panic("%s_process_worklist: Unknown type %s", 760 "softdep", TYPENAME(wk->wk_type)); 761 /* NOTREACHED */ 762 } 763 return (matchcnt); 764 } 765 766 /* 767 * Move dependencies from one buffer to another. 768 * 769 * bioops callback - hold io_token 770 */ 771 static void 772 softdep_move_dependencies(struct buf *oldbp, struct buf *newbp) 773 { 774 struct worklist *wk, *wktail; 775 776 get_mplock(); 777 if (LIST_FIRST(&newbp->b_dep) != NULL) 778 panic("softdep_move_dependencies: need merge code"); 779 wktail = NULL; 780 ACQUIRE_LOCK(&lk); 781 while ((wk = LIST_FIRST(&oldbp->b_dep)) != NULL) { 782 LIST_REMOVE(wk, wk_list); 783 if (wktail == NULL) 784 LIST_INSERT_HEAD(&newbp->b_dep, wk, wk_list); 785 else 786 LIST_INSERT_AFTER(wktail, wk, wk_list); 787 wktail = wk; 788 newbp->b_ops = &softdep_bioops; 789 } 790 FREE_LOCK(&lk); 791 rel_mplock(); 792 } 793 794 /* 795 * Purge the work list of all items associated with a particular mount point. 796 */ 797 int 798 softdep_flushfiles(struct mount *oldmnt, int flags) 799 { 800 struct vnode *devvp; 801 int error, loopcnt; 802 803 /* 804 * Await our turn to clear out the queue, then serialize access. 805 */ 806 while (softdep_worklist_busy != 0) { 807 softdep_worklist_req += 1; 808 tsleep(&softdep_worklist_req, 0, "softflush", 0); 809 softdep_worklist_req -= 1; 810 } 811 softdep_worklist_busy = -1; 812 813 if ((error = ffs_flushfiles(oldmnt, flags)) != 0) { 814 softdep_worklist_busy = 0; 815 if (softdep_worklist_req) 816 wakeup(&softdep_worklist_req); 817 return (error); 818 } 819 /* 820 * Alternately flush the block device associated with the mount 821 * point and process any dependencies that the flushing 822 * creates. In theory, this loop can happen at most twice, 823 * but we give it a few extra just to be sure. 824 */ 825 devvp = VFSTOUFS(oldmnt)->um_devvp; 826 for (loopcnt = 10; loopcnt > 0; ) { 827 if (softdep_process_worklist(oldmnt) == 0) { 828 loopcnt--; 829 /* 830 * Do another flush in case any vnodes were brought in 831 * as part of the cleanup operations. 832 */ 833 if ((error = ffs_flushfiles(oldmnt, flags)) != 0) 834 break; 835 /* 836 * If we still found nothing to do, we are really done. 837 */ 838 if (softdep_process_worklist(oldmnt) == 0) 839 break; 840 } 841 vn_lock(devvp, LK_EXCLUSIVE | LK_RETRY); 842 error = VOP_FSYNC(devvp, MNT_WAIT, 0); 843 vn_unlock(devvp); 844 if (error) 845 break; 846 } 847 softdep_worklist_busy = 0; 848 if (softdep_worklist_req) 849 wakeup(&softdep_worklist_req); 850 851 /* 852 * If we are unmounting then it is an error to fail. If we 853 * are simply trying to downgrade to read-only, then filesystem 854 * activity can keep us busy forever, so we just fail with EBUSY. 855 */ 856 if (loopcnt == 0) { 857 if (oldmnt->mnt_kern_flag & MNTK_UNMOUNT) 858 panic("softdep_flushfiles: looping"); 859 error = EBUSY; 860 } 861 return (error); 862 } 863 864 /* 865 * Structure hashing. 866 * 867 * There are three types of structures that can be looked up: 868 * 1) pagedep structures identified by mount point, inode number, 869 * and logical block. 870 * 2) inodedep structures identified by mount point and inode number. 871 * 3) newblk structures identified by mount point and 872 * physical block number. 873 * 874 * The "pagedep" and "inodedep" dependency structures are hashed 875 * separately from the file blocks and inodes to which they correspond. 876 * This separation helps when the in-memory copy of an inode or 877 * file block must be replaced. It also obviates the need to access 878 * an inode or file page when simply updating (or de-allocating) 879 * dependency structures. Lookup of newblk structures is needed to 880 * find newly allocated blocks when trying to associate them with 881 * their allocdirect or allocindir structure. 882 * 883 * The lookup routines optionally create and hash a new instance when 884 * an existing entry is not found. 885 */ 886 #define DEPALLOC 0x0001 /* allocate structure if lookup fails */ 887 #define NODELAY 0x0002 /* cannot do background work */ 888 889 /* 890 * Structures and routines associated with pagedep caching. 891 */ 892 LIST_HEAD(pagedep_hashhead, pagedep) *pagedep_hashtbl; 893 u_long pagedep_hash; /* size of hash table - 1 */ 894 #define PAGEDEP_HASH(mp, inum, lbn) \ 895 (&pagedep_hashtbl[((((register_t)(mp)) >> 13) + (inum) + (lbn)) & \ 896 pagedep_hash]) 897 static struct sema pagedep_in_progress; 898 899 /* 900 * Helper routine for pagedep_lookup() 901 */ 902 static __inline 903 struct pagedep * 904 pagedep_find(struct pagedep_hashhead *pagedephd, ino_t ino, ufs_lbn_t lbn, 905 struct mount *mp) 906 { 907 struct pagedep *pagedep; 908 909 LIST_FOREACH(pagedep, pagedephd, pd_hash) { 910 if (ino == pagedep->pd_ino && 911 lbn == pagedep->pd_lbn && 912 mp == pagedep->pd_mnt) { 913 return (pagedep); 914 } 915 } 916 return(NULL); 917 } 918 919 /* 920 * Look up a pagedep. Return 1 if found, 0 if not found. 921 * If not found, allocate if DEPALLOC flag is passed. 922 * Found or allocated entry is returned in pagedeppp. 923 * This routine must be called with splbio interrupts blocked. 924 */ 925 static int 926 pagedep_lookup(struct inode *ip, ufs_lbn_t lbn, int flags, 927 struct pagedep **pagedeppp) 928 { 929 struct pagedep *pagedep; 930 struct pagedep_hashhead *pagedephd; 931 struct mount *mp; 932 int i; 933 934 #ifdef DEBUG 935 if (lk.lkt_held == NOHOLDER) 936 panic("pagedep_lookup: lock not held"); 937 #endif 938 mp = ITOV(ip)->v_mount; 939 pagedephd = PAGEDEP_HASH(mp, ip->i_number, lbn); 940 top: 941 *pagedeppp = pagedep_find(pagedephd, ip->i_number, lbn, mp); 942 if (*pagedeppp) 943 return(1); 944 if ((flags & DEPALLOC) == 0) 945 return (0); 946 if (sema_get(&pagedep_in_progress, &lk) == 0) { 947 ACQUIRE_LOCK(&lk); 948 goto top; 949 } 950 pagedep = kmalloc(sizeof(struct pagedep), M_PAGEDEP, 951 M_SOFTDEP_FLAGS | M_ZERO); 952 953 if (pagedep_find(pagedephd, ip->i_number, lbn, mp)) { 954 kprintf("pagedep_lookup: blocking race avoided\n"); 955 ACQUIRE_LOCK(&lk); 956 sema_release(&pagedep_in_progress); 957 kfree(pagedep, M_PAGEDEP); 958 goto top; 959 } 960 961 pagedep->pd_list.wk_type = D_PAGEDEP; 962 pagedep->pd_mnt = mp; 963 pagedep->pd_ino = ip->i_number; 964 pagedep->pd_lbn = lbn; 965 LIST_INIT(&pagedep->pd_dirremhd); 966 LIST_INIT(&pagedep->pd_pendinghd); 967 for (i = 0; i < DAHASHSZ; i++) 968 LIST_INIT(&pagedep->pd_diraddhd[i]); 969 ACQUIRE_LOCK(&lk); 970 LIST_INSERT_HEAD(pagedephd, pagedep, pd_hash); 971 sema_release(&pagedep_in_progress); 972 *pagedeppp = pagedep; 973 return (0); 974 } 975 976 /* 977 * Structures and routines associated with inodedep caching. 978 */ 979 LIST_HEAD(inodedep_hashhead, inodedep) *inodedep_hashtbl; 980 static u_long inodedep_hash; /* size of hash table - 1 */ 981 static long num_inodedep; /* number of inodedep allocated */ 982 #define INODEDEP_HASH(fs, inum) \ 983 (&inodedep_hashtbl[((((register_t)(fs)) >> 13) + (inum)) & inodedep_hash]) 984 static struct sema inodedep_in_progress; 985 986 /* 987 * Helper routine for inodedep_lookup() 988 */ 989 static __inline 990 struct inodedep * 991 inodedep_find(struct inodedep_hashhead *inodedephd, struct fs *fs, ino_t inum) 992 { 993 struct inodedep *inodedep; 994 995 LIST_FOREACH(inodedep, inodedephd, id_hash) { 996 if (inum == inodedep->id_ino && fs == inodedep->id_fs) 997 return(inodedep); 998 } 999 return (NULL); 1000 } 1001 1002 /* 1003 * Look up a inodedep. Return 1 if found, 0 if not found. 1004 * If not found, allocate if DEPALLOC flag is passed. 1005 * Found or allocated entry is returned in inodedeppp. 1006 * This routine must be called with splbio interrupts blocked. 1007 */ 1008 static int 1009 inodedep_lookup(struct fs *fs, ino_t inum, int flags, 1010 struct inodedep **inodedeppp) 1011 { 1012 struct inodedep *inodedep; 1013 struct inodedep_hashhead *inodedephd; 1014 int firsttry; 1015 1016 #ifdef DEBUG 1017 if (lk.lkt_held == NOHOLDER) 1018 panic("inodedep_lookup: lock not held"); 1019 #endif 1020 firsttry = 1; 1021 inodedephd = INODEDEP_HASH(fs, inum); 1022 top: 1023 *inodedeppp = inodedep_find(inodedephd, fs, inum); 1024 if (*inodedeppp) 1025 return (1); 1026 if ((flags & DEPALLOC) == 0) 1027 return (0); 1028 /* 1029 * If we are over our limit, try to improve the situation. 1030 */ 1031 if (num_inodedep > max_softdeps && firsttry && 1032 speedup_syncer() == 0 && (flags & NODELAY) == 0 && 1033 request_cleanup(FLUSH_INODES, 1)) { 1034 firsttry = 0; 1035 goto top; 1036 } 1037 if (sema_get(&inodedep_in_progress, &lk) == 0) { 1038 ACQUIRE_LOCK(&lk); 1039 goto top; 1040 } 1041 inodedep = kmalloc(sizeof(struct inodedep), M_INODEDEP, 1042 M_SOFTDEP_FLAGS | M_ZERO); 1043 if (inodedep_find(inodedephd, fs, inum)) { 1044 kprintf("inodedep_lookup: blocking race avoided\n"); 1045 ACQUIRE_LOCK(&lk); 1046 sema_release(&inodedep_in_progress); 1047 kfree(inodedep, M_INODEDEP); 1048 goto top; 1049 } 1050 inodedep->id_list.wk_type = D_INODEDEP; 1051 inodedep->id_fs = fs; 1052 inodedep->id_ino = inum; 1053 inodedep->id_state = ALLCOMPLETE; 1054 inodedep->id_nlinkdelta = 0; 1055 inodedep->id_savedino = NULL; 1056 inodedep->id_savedsize = -1; 1057 inodedep->id_buf = NULL; 1058 LIST_INIT(&inodedep->id_pendinghd); 1059 LIST_INIT(&inodedep->id_inowait); 1060 LIST_INIT(&inodedep->id_bufwait); 1061 TAILQ_INIT(&inodedep->id_inoupdt); 1062 TAILQ_INIT(&inodedep->id_newinoupdt); 1063 ACQUIRE_LOCK(&lk); 1064 num_inodedep += 1; 1065 LIST_INSERT_HEAD(inodedephd, inodedep, id_hash); 1066 sema_release(&inodedep_in_progress); 1067 *inodedeppp = inodedep; 1068 return (0); 1069 } 1070 1071 /* 1072 * Structures and routines associated with newblk caching. 1073 */ 1074 LIST_HEAD(newblk_hashhead, newblk) *newblk_hashtbl; 1075 u_long newblk_hash; /* size of hash table - 1 */ 1076 #define NEWBLK_HASH(fs, inum) \ 1077 (&newblk_hashtbl[((((register_t)(fs)) >> 13) + (inum)) & newblk_hash]) 1078 static struct sema newblk_in_progress; 1079 1080 /* 1081 * Helper routine for newblk_lookup() 1082 */ 1083 static __inline 1084 struct newblk * 1085 newblk_find(struct newblk_hashhead *newblkhd, struct fs *fs, 1086 ufs_daddr_t newblkno) 1087 { 1088 struct newblk *newblk; 1089 1090 LIST_FOREACH(newblk, newblkhd, nb_hash) { 1091 if (newblkno == newblk->nb_newblkno && fs == newblk->nb_fs) 1092 return (newblk); 1093 } 1094 return(NULL); 1095 } 1096 1097 /* 1098 * Look up a newblk. Return 1 if found, 0 if not found. 1099 * If not found, allocate if DEPALLOC flag is passed. 1100 * Found or allocated entry is returned in newblkpp. 1101 */ 1102 static int 1103 newblk_lookup(struct fs *fs, ufs_daddr_t newblkno, int flags, 1104 struct newblk **newblkpp) 1105 { 1106 struct newblk *newblk; 1107 struct newblk_hashhead *newblkhd; 1108 1109 newblkhd = NEWBLK_HASH(fs, newblkno); 1110 top: 1111 *newblkpp = newblk_find(newblkhd, fs, newblkno); 1112 if (*newblkpp) 1113 return(1); 1114 if ((flags & DEPALLOC) == 0) 1115 return (0); 1116 if (sema_get(&newblk_in_progress, 0) == 0) 1117 goto top; 1118 newblk = kmalloc(sizeof(struct newblk), M_NEWBLK, 1119 M_SOFTDEP_FLAGS | M_ZERO); 1120 1121 if (newblk_find(newblkhd, fs, newblkno)) { 1122 kprintf("newblk_lookup: blocking race avoided\n"); 1123 sema_release(&pagedep_in_progress); 1124 kfree(newblk, M_NEWBLK); 1125 goto top; 1126 } 1127 newblk->nb_state = 0; 1128 newblk->nb_fs = fs; 1129 newblk->nb_newblkno = newblkno; 1130 LIST_INSERT_HEAD(newblkhd, newblk, nb_hash); 1131 sema_release(&newblk_in_progress); 1132 *newblkpp = newblk; 1133 return (0); 1134 } 1135 1136 /* 1137 * Executed during filesystem system initialization before 1138 * mounting any filesystems. 1139 */ 1140 void 1141 softdep_initialize(void) 1142 { 1143 callout_init(&handle); 1144 1145 LIST_INIT(&mkdirlisthd); 1146 LIST_INIT(&softdep_workitem_pending); 1147 max_softdeps = min(desiredvnodes * 8, 1148 M_INODEDEP->ks_limit / (2 * sizeof(struct inodedep))); 1149 pagedep_hashtbl = hashinit(desiredvnodes / 5, M_PAGEDEP, 1150 &pagedep_hash); 1151 sema_init(&pagedep_in_progress, "pagedep", 0, 0); 1152 inodedep_hashtbl = hashinit(desiredvnodes, M_INODEDEP, &inodedep_hash); 1153 sema_init(&inodedep_in_progress, "inodedep", 0, 0); 1154 newblk_hashtbl = hashinit(64, M_NEWBLK, &newblk_hash); 1155 sema_init(&newblk_in_progress, "newblk", 0, 0); 1156 add_bio_ops(&softdep_bioops); 1157 } 1158 1159 /* 1160 * Called at mount time to notify the dependency code that a 1161 * filesystem wishes to use it. 1162 */ 1163 int 1164 softdep_mount(struct vnode *devvp, struct mount *mp, struct fs *fs) 1165 { 1166 struct csum cstotal; 1167 struct cg *cgp; 1168 struct buf *bp; 1169 int error, cyl; 1170 1171 mp->mnt_flag &= ~MNT_ASYNC; 1172 mp->mnt_flag |= MNT_SOFTDEP; 1173 mp->mnt_bioops = &softdep_bioops; 1174 /* 1175 * When doing soft updates, the counters in the 1176 * superblock may have gotten out of sync, so we have 1177 * to scan the cylinder groups and recalculate them. 1178 */ 1179 if (fs->fs_clean != 0) 1180 return (0); 1181 bzero(&cstotal, sizeof cstotal); 1182 for (cyl = 0; cyl < fs->fs_ncg; cyl++) { 1183 if ((error = bread(devvp, fsbtodoff(fs, cgtod(fs, cyl)), 1184 fs->fs_cgsize, &bp)) != 0) { 1185 brelse(bp); 1186 return (error); 1187 } 1188 cgp = (struct cg *)bp->b_data; 1189 cstotal.cs_nffree += cgp->cg_cs.cs_nffree; 1190 cstotal.cs_nbfree += cgp->cg_cs.cs_nbfree; 1191 cstotal.cs_nifree += cgp->cg_cs.cs_nifree; 1192 cstotal.cs_ndir += cgp->cg_cs.cs_ndir; 1193 fs->fs_cs(fs, cyl) = cgp->cg_cs; 1194 brelse(bp); 1195 } 1196 #ifdef DEBUG 1197 if (bcmp(&cstotal, &fs->fs_cstotal, sizeof cstotal)) 1198 kprintf("ffs_mountfs: superblock updated for soft updates\n"); 1199 #endif 1200 bcopy(&cstotal, &fs->fs_cstotal, sizeof cstotal); 1201 return (0); 1202 } 1203 1204 /* 1205 * Protecting the freemaps (or bitmaps). 1206 * 1207 * To eliminate the need to execute fsck before mounting a filesystem 1208 * after a power failure, one must (conservatively) guarantee that the 1209 * on-disk copy of the bitmaps never indicate that a live inode or block is 1210 * free. So, when a block or inode is allocated, the bitmap should be 1211 * updated (on disk) before any new pointers. When a block or inode is 1212 * freed, the bitmap should not be updated until all pointers have been 1213 * reset. The latter dependency is handled by the delayed de-allocation 1214 * approach described below for block and inode de-allocation. The former 1215 * dependency is handled by calling the following procedure when a block or 1216 * inode is allocated. When an inode is allocated an "inodedep" is created 1217 * with its DEPCOMPLETE flag cleared until its bitmap is written to disk. 1218 * Each "inodedep" is also inserted into the hash indexing structure so 1219 * that any additional link additions can be made dependent on the inode 1220 * allocation. 1221 * 1222 * The ufs filesystem maintains a number of free block counts (e.g., per 1223 * cylinder group, per cylinder and per <cylinder, rotational position> pair) 1224 * in addition to the bitmaps. These counts are used to improve efficiency 1225 * during allocation and therefore must be consistent with the bitmaps. 1226 * There is no convenient way to guarantee post-crash consistency of these 1227 * counts with simple update ordering, for two main reasons: (1) The counts 1228 * and bitmaps for a single cylinder group block are not in the same disk 1229 * sector. If a disk write is interrupted (e.g., by power failure), one may 1230 * be written and the other not. (2) Some of the counts are located in the 1231 * superblock rather than the cylinder group block. So, we focus our soft 1232 * updates implementation on protecting the bitmaps. When mounting a 1233 * filesystem, we recompute the auxiliary counts from the bitmaps. 1234 */ 1235 1236 /* 1237 * Called just after updating the cylinder group block to allocate an inode. 1238 * 1239 * Parameters: 1240 * bp: buffer for cylgroup block with inode map 1241 * ip: inode related to allocation 1242 * newinum: new inode number being allocated 1243 */ 1244 void 1245 softdep_setup_inomapdep(struct buf *bp, struct inode *ip, ino_t newinum) 1246 { 1247 struct inodedep *inodedep; 1248 struct bmsafemap *bmsafemap; 1249 1250 /* 1251 * Create a dependency for the newly allocated inode. 1252 * Panic if it already exists as something is seriously wrong. 1253 * Otherwise add it to the dependency list for the buffer holding 1254 * the cylinder group map from which it was allocated. 1255 */ 1256 ACQUIRE_LOCK(&lk); 1257 if ((inodedep_lookup(ip->i_fs, newinum, DEPALLOC|NODELAY, &inodedep))) { 1258 FREE_LOCK(&lk); 1259 panic("softdep_setup_inomapdep: found inode"); 1260 } 1261 inodedep->id_buf = bp; 1262 inodedep->id_state &= ~DEPCOMPLETE; 1263 bmsafemap = bmsafemap_lookup(bp); 1264 LIST_INSERT_HEAD(&bmsafemap->sm_inodedephd, inodedep, id_deps); 1265 FREE_LOCK(&lk); 1266 } 1267 1268 /* 1269 * Called just after updating the cylinder group block to 1270 * allocate block or fragment. 1271 * 1272 * Parameters: 1273 * bp: buffer for cylgroup block with block map 1274 * fs: filesystem doing allocation 1275 * newblkno: number of newly allocated block 1276 */ 1277 void 1278 softdep_setup_blkmapdep(struct buf *bp, struct fs *fs, 1279 ufs_daddr_t newblkno) 1280 { 1281 struct newblk *newblk; 1282 struct bmsafemap *bmsafemap; 1283 1284 /* 1285 * Create a dependency for the newly allocated block. 1286 * Add it to the dependency list for the buffer holding 1287 * the cylinder group map from which it was allocated. 1288 */ 1289 if (newblk_lookup(fs, newblkno, DEPALLOC, &newblk) != 0) 1290 panic("softdep_setup_blkmapdep: found block"); 1291 ACQUIRE_LOCK(&lk); 1292 newblk->nb_bmsafemap = bmsafemap = bmsafemap_lookup(bp); 1293 LIST_INSERT_HEAD(&bmsafemap->sm_newblkhd, newblk, nb_deps); 1294 FREE_LOCK(&lk); 1295 } 1296 1297 /* 1298 * Find the bmsafemap associated with a cylinder group buffer. 1299 * If none exists, create one. The buffer must be locked when 1300 * this routine is called and this routine must be called with 1301 * splbio interrupts blocked. 1302 */ 1303 static struct bmsafemap * 1304 bmsafemap_lookup(struct buf *bp) 1305 { 1306 struct bmsafemap *bmsafemap; 1307 struct worklist *wk; 1308 1309 #ifdef DEBUG 1310 if (lk.lkt_held == NOHOLDER) 1311 panic("bmsafemap_lookup: lock not held"); 1312 #endif 1313 LIST_FOREACH(wk, &bp->b_dep, wk_list) { 1314 if (wk->wk_type == D_BMSAFEMAP) 1315 return (WK_BMSAFEMAP(wk)); 1316 } 1317 FREE_LOCK(&lk); 1318 bmsafemap = kmalloc(sizeof(struct bmsafemap), M_BMSAFEMAP, 1319 M_SOFTDEP_FLAGS); 1320 bmsafemap->sm_list.wk_type = D_BMSAFEMAP; 1321 bmsafemap->sm_list.wk_state = 0; 1322 bmsafemap->sm_buf = bp; 1323 LIST_INIT(&bmsafemap->sm_allocdirecthd); 1324 LIST_INIT(&bmsafemap->sm_allocindirhd); 1325 LIST_INIT(&bmsafemap->sm_inodedephd); 1326 LIST_INIT(&bmsafemap->sm_newblkhd); 1327 ACQUIRE_LOCK(&lk); 1328 WORKLIST_INSERT_BP(bp, &bmsafemap->sm_list); 1329 return (bmsafemap); 1330 } 1331 1332 /* 1333 * Direct block allocation dependencies. 1334 * 1335 * When a new block is allocated, the corresponding disk locations must be 1336 * initialized (with zeros or new data) before the on-disk inode points to 1337 * them. Also, the freemap from which the block was allocated must be 1338 * updated (on disk) before the inode's pointer. These two dependencies are 1339 * independent of each other and are needed for all file blocks and indirect 1340 * blocks that are pointed to directly by the inode. Just before the 1341 * "in-core" version of the inode is updated with a newly allocated block 1342 * number, a procedure (below) is called to setup allocation dependency 1343 * structures. These structures are removed when the corresponding 1344 * dependencies are satisfied or when the block allocation becomes obsolete 1345 * (i.e., the file is deleted, the block is de-allocated, or the block is a 1346 * fragment that gets upgraded). All of these cases are handled in 1347 * procedures described later. 1348 * 1349 * When a file extension causes a fragment to be upgraded, either to a larger 1350 * fragment or to a full block, the on-disk location may change (if the 1351 * previous fragment could not simply be extended). In this case, the old 1352 * fragment must be de-allocated, but not until after the inode's pointer has 1353 * been updated. In most cases, this is handled by later procedures, which 1354 * will construct a "freefrag" structure to be added to the workitem queue 1355 * when the inode update is complete (or obsolete). The main exception to 1356 * this is when an allocation occurs while a pending allocation dependency 1357 * (for the same block pointer) remains. This case is handled in the main 1358 * allocation dependency setup procedure by immediately freeing the 1359 * unreferenced fragments. 1360 * 1361 * Parameters: 1362 * ip: inode to which block is being added 1363 * lbn: block pointer within inode 1364 * newblkno: disk block number being added 1365 * oldblkno: previous block number, 0 unless frag 1366 * newsize: size of new block 1367 * oldsize: size of new block 1368 * bp: bp for allocated block 1369 */ 1370 void 1371 softdep_setup_allocdirect(struct inode *ip, ufs_lbn_t lbn, ufs_daddr_t newblkno, 1372 ufs_daddr_t oldblkno, long newsize, long oldsize, 1373 struct buf *bp) 1374 { 1375 struct allocdirect *adp, *oldadp; 1376 struct allocdirectlst *adphead; 1377 struct bmsafemap *bmsafemap; 1378 struct inodedep *inodedep; 1379 struct pagedep *pagedep; 1380 struct newblk *newblk; 1381 1382 adp = kmalloc(sizeof(struct allocdirect), M_ALLOCDIRECT, 1383 M_SOFTDEP_FLAGS | M_ZERO); 1384 adp->ad_list.wk_type = D_ALLOCDIRECT; 1385 adp->ad_lbn = lbn; 1386 adp->ad_newblkno = newblkno; 1387 adp->ad_oldblkno = oldblkno; 1388 adp->ad_newsize = newsize; 1389 adp->ad_oldsize = oldsize; 1390 adp->ad_state = ATTACHED; 1391 if (newblkno == oldblkno) 1392 adp->ad_freefrag = NULL; 1393 else 1394 adp->ad_freefrag = newfreefrag(ip, oldblkno, oldsize); 1395 1396 if (newblk_lookup(ip->i_fs, newblkno, 0, &newblk) == 0) 1397 panic("softdep_setup_allocdirect: lost block"); 1398 1399 ACQUIRE_LOCK(&lk); 1400 inodedep_lookup(ip->i_fs, ip->i_number, DEPALLOC | NODELAY, &inodedep); 1401 adp->ad_inodedep = inodedep; 1402 1403 if (newblk->nb_state == DEPCOMPLETE) { 1404 adp->ad_state |= DEPCOMPLETE; 1405 adp->ad_buf = NULL; 1406 } else { 1407 bmsafemap = newblk->nb_bmsafemap; 1408 adp->ad_buf = bmsafemap->sm_buf; 1409 LIST_REMOVE(newblk, nb_deps); 1410 LIST_INSERT_HEAD(&bmsafemap->sm_allocdirecthd, adp, ad_deps); 1411 } 1412 LIST_REMOVE(newblk, nb_hash); 1413 kfree(newblk, M_NEWBLK); 1414 1415 WORKLIST_INSERT_BP(bp, &adp->ad_list); 1416 if (lbn >= NDADDR) { 1417 /* allocating an indirect block */ 1418 if (oldblkno != 0) { 1419 FREE_LOCK(&lk); 1420 panic("softdep_setup_allocdirect: non-zero indir"); 1421 } 1422 } else { 1423 /* 1424 * Allocating a direct block. 1425 * 1426 * If we are allocating a directory block, then we must 1427 * allocate an associated pagedep to track additions and 1428 * deletions. 1429 */ 1430 if ((ip->i_mode & IFMT) == IFDIR && 1431 pagedep_lookup(ip, lbn, DEPALLOC, &pagedep) == 0) { 1432 WORKLIST_INSERT_BP(bp, &pagedep->pd_list); 1433 } 1434 } 1435 /* 1436 * The list of allocdirects must be kept in sorted and ascending 1437 * order so that the rollback routines can quickly determine the 1438 * first uncommitted block (the size of the file stored on disk 1439 * ends at the end of the lowest committed fragment, or if there 1440 * are no fragments, at the end of the highest committed block). 1441 * Since files generally grow, the typical case is that the new 1442 * block is to be added at the end of the list. We speed this 1443 * special case by checking against the last allocdirect in the 1444 * list before laboriously traversing the list looking for the 1445 * insertion point. 1446 */ 1447 adphead = &inodedep->id_newinoupdt; 1448 oldadp = TAILQ_LAST(adphead, allocdirectlst); 1449 if (oldadp == NULL || oldadp->ad_lbn <= lbn) { 1450 /* insert at end of list */ 1451 TAILQ_INSERT_TAIL(adphead, adp, ad_next); 1452 if (oldadp != NULL && oldadp->ad_lbn == lbn) 1453 allocdirect_merge(adphead, adp, oldadp); 1454 FREE_LOCK(&lk); 1455 return; 1456 } 1457 TAILQ_FOREACH(oldadp, adphead, ad_next) { 1458 if (oldadp->ad_lbn >= lbn) 1459 break; 1460 } 1461 if (oldadp == NULL) { 1462 FREE_LOCK(&lk); 1463 panic("softdep_setup_allocdirect: lost entry"); 1464 } 1465 /* insert in middle of list */ 1466 TAILQ_INSERT_BEFORE(oldadp, adp, ad_next); 1467 if (oldadp->ad_lbn == lbn) 1468 allocdirect_merge(adphead, adp, oldadp); 1469 FREE_LOCK(&lk); 1470 } 1471 1472 /* 1473 * Replace an old allocdirect dependency with a newer one. 1474 * This routine must be called with splbio interrupts blocked. 1475 * 1476 * Parameters: 1477 * adphead: head of list holding allocdirects 1478 * newadp: allocdirect being added 1479 * oldadp: existing allocdirect being checked 1480 */ 1481 static void 1482 allocdirect_merge(struct allocdirectlst *adphead, 1483 struct allocdirect *newadp, 1484 struct allocdirect *oldadp) 1485 { 1486 struct freefrag *freefrag; 1487 1488 #ifdef DEBUG 1489 if (lk.lkt_held == NOHOLDER) 1490 panic("allocdirect_merge: lock not held"); 1491 #endif 1492 if (newadp->ad_oldblkno != oldadp->ad_newblkno || 1493 newadp->ad_oldsize != oldadp->ad_newsize || 1494 newadp->ad_lbn >= NDADDR) { 1495 FREE_LOCK(&lk); 1496 panic("allocdirect_check: old %d != new %d || lbn %ld >= %d", 1497 newadp->ad_oldblkno, oldadp->ad_newblkno, newadp->ad_lbn, 1498 NDADDR); 1499 } 1500 newadp->ad_oldblkno = oldadp->ad_oldblkno; 1501 newadp->ad_oldsize = oldadp->ad_oldsize; 1502 /* 1503 * If the old dependency had a fragment to free or had never 1504 * previously had a block allocated, then the new dependency 1505 * can immediately post its freefrag and adopt the old freefrag. 1506 * This action is done by swapping the freefrag dependencies. 1507 * The new dependency gains the old one's freefrag, and the 1508 * old one gets the new one and then immediately puts it on 1509 * the worklist when it is freed by free_allocdirect. It is 1510 * not possible to do this swap when the old dependency had a 1511 * non-zero size but no previous fragment to free. This condition 1512 * arises when the new block is an extension of the old block. 1513 * Here, the first part of the fragment allocated to the new 1514 * dependency is part of the block currently claimed on disk by 1515 * the old dependency, so cannot legitimately be freed until the 1516 * conditions for the new dependency are fulfilled. 1517 */ 1518 if (oldadp->ad_freefrag != NULL || oldadp->ad_oldblkno == 0) { 1519 freefrag = newadp->ad_freefrag; 1520 newadp->ad_freefrag = oldadp->ad_freefrag; 1521 oldadp->ad_freefrag = freefrag; 1522 } 1523 free_allocdirect(adphead, oldadp, 0); 1524 } 1525 1526 /* 1527 * Allocate a new freefrag structure if needed. 1528 */ 1529 static struct freefrag * 1530 newfreefrag(struct inode *ip, ufs_daddr_t blkno, long size) 1531 { 1532 struct freefrag *freefrag; 1533 struct fs *fs; 1534 1535 if (blkno == 0) 1536 return (NULL); 1537 fs = ip->i_fs; 1538 if (fragnum(fs, blkno) + numfrags(fs, size) > fs->fs_frag) 1539 panic("newfreefrag: frag size"); 1540 freefrag = kmalloc(sizeof(struct freefrag), M_FREEFRAG, 1541 M_SOFTDEP_FLAGS); 1542 freefrag->ff_list.wk_type = D_FREEFRAG; 1543 freefrag->ff_state = ip->i_uid & ~ONWORKLIST; /* XXX - used below */ 1544 freefrag->ff_inum = ip->i_number; 1545 freefrag->ff_fs = fs; 1546 freefrag->ff_devvp = ip->i_devvp; 1547 freefrag->ff_blkno = blkno; 1548 freefrag->ff_fragsize = size; 1549 return (freefrag); 1550 } 1551 1552 /* 1553 * This workitem de-allocates fragments that were replaced during 1554 * file block allocation. 1555 */ 1556 static void 1557 handle_workitem_freefrag(struct freefrag *freefrag) 1558 { 1559 struct inode tip; 1560 1561 tip.i_fs = freefrag->ff_fs; 1562 tip.i_devvp = freefrag->ff_devvp; 1563 tip.i_dev = freefrag->ff_devvp->v_rdev; 1564 tip.i_number = freefrag->ff_inum; 1565 tip.i_uid = freefrag->ff_state & ~ONWORKLIST; /* XXX - set above */ 1566 ffs_blkfree(&tip, freefrag->ff_blkno, freefrag->ff_fragsize); 1567 kfree(freefrag, M_FREEFRAG); 1568 } 1569 1570 /* 1571 * Indirect block allocation dependencies. 1572 * 1573 * The same dependencies that exist for a direct block also exist when 1574 * a new block is allocated and pointed to by an entry in a block of 1575 * indirect pointers. The undo/redo states described above are also 1576 * used here. Because an indirect block contains many pointers that 1577 * may have dependencies, a second copy of the entire in-memory indirect 1578 * block is kept. The buffer cache copy is always completely up-to-date. 1579 * The second copy, which is used only as a source for disk writes, 1580 * contains only the safe pointers (i.e., those that have no remaining 1581 * update dependencies). The second copy is freed when all pointers 1582 * are safe. The cache is not allowed to replace indirect blocks with 1583 * pending update dependencies. If a buffer containing an indirect 1584 * block with dependencies is written, these routines will mark it 1585 * dirty again. It can only be successfully written once all the 1586 * dependencies are removed. The ffs_fsync routine in conjunction with 1587 * softdep_sync_metadata work together to get all the dependencies 1588 * removed so that a file can be successfully written to disk. Three 1589 * procedures are used when setting up indirect block pointer 1590 * dependencies. The division is necessary because of the organization 1591 * of the "balloc" routine and because of the distinction between file 1592 * pages and file metadata blocks. 1593 */ 1594 1595 /* 1596 * Allocate a new allocindir structure. 1597 * 1598 * Parameters: 1599 * ip: inode for file being extended 1600 * ptrno: offset of pointer in indirect block 1601 * newblkno: disk block number being added 1602 * oldblkno: previous block number, 0 if none 1603 */ 1604 static struct allocindir * 1605 newallocindir(struct inode *ip, int ptrno, ufs_daddr_t newblkno, 1606 ufs_daddr_t oldblkno) 1607 { 1608 struct allocindir *aip; 1609 1610 aip = kmalloc(sizeof(struct allocindir), M_ALLOCINDIR, 1611 M_SOFTDEP_FLAGS | M_ZERO); 1612 aip->ai_list.wk_type = D_ALLOCINDIR; 1613 aip->ai_state = ATTACHED; 1614 aip->ai_offset = ptrno; 1615 aip->ai_newblkno = newblkno; 1616 aip->ai_oldblkno = oldblkno; 1617 aip->ai_freefrag = newfreefrag(ip, oldblkno, ip->i_fs->fs_bsize); 1618 return (aip); 1619 } 1620 1621 /* 1622 * Called just before setting an indirect block pointer 1623 * to a newly allocated file page. 1624 * 1625 * Parameters: 1626 * ip: inode for file being extended 1627 * lbn: allocated block number within file 1628 * bp: buffer with indirect blk referencing page 1629 * ptrno: offset of pointer in indirect block 1630 * newblkno: disk block number being added 1631 * oldblkno: previous block number, 0 if none 1632 * nbp: buffer holding allocated page 1633 */ 1634 void 1635 softdep_setup_allocindir_page(struct inode *ip, ufs_lbn_t lbn, 1636 struct buf *bp, int ptrno, 1637 ufs_daddr_t newblkno, ufs_daddr_t oldblkno, 1638 struct buf *nbp) 1639 { 1640 struct allocindir *aip; 1641 struct pagedep *pagedep; 1642 1643 aip = newallocindir(ip, ptrno, newblkno, oldblkno); 1644 ACQUIRE_LOCK(&lk); 1645 /* 1646 * If we are allocating a directory page, then we must 1647 * allocate an associated pagedep to track additions and 1648 * deletions. 1649 */ 1650 if ((ip->i_mode & IFMT) == IFDIR && 1651 pagedep_lookup(ip, lbn, DEPALLOC, &pagedep) == 0) 1652 WORKLIST_INSERT_BP(nbp, &pagedep->pd_list); 1653 WORKLIST_INSERT_BP(nbp, &aip->ai_list); 1654 FREE_LOCK(&lk); 1655 setup_allocindir_phase2(bp, ip, aip); 1656 } 1657 1658 /* 1659 * Called just before setting an indirect block pointer to a 1660 * newly allocated indirect block. 1661 * Parameters: 1662 * nbp: newly allocated indirect block 1663 * ip: inode for file being extended 1664 * bp: indirect block referencing allocated block 1665 * ptrno: offset of pointer in indirect block 1666 * newblkno: disk block number being added 1667 */ 1668 void 1669 softdep_setup_allocindir_meta(struct buf *nbp, struct inode *ip, 1670 struct buf *bp, int ptrno, 1671 ufs_daddr_t newblkno) 1672 { 1673 struct allocindir *aip; 1674 1675 aip = newallocindir(ip, ptrno, newblkno, 0); 1676 ACQUIRE_LOCK(&lk); 1677 WORKLIST_INSERT_BP(nbp, &aip->ai_list); 1678 FREE_LOCK(&lk); 1679 setup_allocindir_phase2(bp, ip, aip); 1680 } 1681 1682 /* 1683 * Called to finish the allocation of the "aip" allocated 1684 * by one of the two routines above. 1685 * 1686 * Parameters: 1687 * bp: in-memory copy of the indirect block 1688 * ip: inode for file being extended 1689 * aip: allocindir allocated by the above routines 1690 */ 1691 static void 1692 setup_allocindir_phase2(struct buf *bp, struct inode *ip, 1693 struct allocindir *aip) 1694 { 1695 struct worklist *wk; 1696 struct indirdep *indirdep, *newindirdep; 1697 struct bmsafemap *bmsafemap; 1698 struct allocindir *oldaip; 1699 struct freefrag *freefrag; 1700 struct newblk *newblk; 1701 1702 if (bp->b_loffset >= 0) 1703 panic("setup_allocindir_phase2: not indir blk"); 1704 for (indirdep = NULL, newindirdep = NULL; ; ) { 1705 ACQUIRE_LOCK(&lk); 1706 LIST_FOREACH(wk, &bp->b_dep, wk_list) { 1707 if (wk->wk_type != D_INDIRDEP) 1708 continue; 1709 indirdep = WK_INDIRDEP(wk); 1710 break; 1711 } 1712 if (indirdep == NULL && newindirdep) { 1713 indirdep = newindirdep; 1714 WORKLIST_INSERT_BP(bp, &indirdep->ir_list); 1715 newindirdep = NULL; 1716 } 1717 FREE_LOCK(&lk); 1718 if (indirdep) { 1719 if (newblk_lookup(ip->i_fs, aip->ai_newblkno, 0, 1720 &newblk) == 0) 1721 panic("setup_allocindir: lost block"); 1722 ACQUIRE_LOCK(&lk); 1723 if (newblk->nb_state == DEPCOMPLETE) { 1724 aip->ai_state |= DEPCOMPLETE; 1725 aip->ai_buf = NULL; 1726 } else { 1727 bmsafemap = newblk->nb_bmsafemap; 1728 aip->ai_buf = bmsafemap->sm_buf; 1729 LIST_REMOVE(newblk, nb_deps); 1730 LIST_INSERT_HEAD(&bmsafemap->sm_allocindirhd, 1731 aip, ai_deps); 1732 } 1733 LIST_REMOVE(newblk, nb_hash); 1734 kfree(newblk, M_NEWBLK); 1735 aip->ai_indirdep = indirdep; 1736 /* 1737 * Check to see if there is an existing dependency 1738 * for this block. If there is, merge the old 1739 * dependency into the new one. 1740 */ 1741 if (aip->ai_oldblkno == 0) 1742 oldaip = NULL; 1743 else 1744 1745 LIST_FOREACH(oldaip, &indirdep->ir_deplisthd, ai_next) 1746 if (oldaip->ai_offset == aip->ai_offset) 1747 break; 1748 if (oldaip != NULL) { 1749 if (oldaip->ai_newblkno != aip->ai_oldblkno) { 1750 FREE_LOCK(&lk); 1751 panic("setup_allocindir_phase2: blkno"); 1752 } 1753 aip->ai_oldblkno = oldaip->ai_oldblkno; 1754 freefrag = oldaip->ai_freefrag; 1755 oldaip->ai_freefrag = aip->ai_freefrag; 1756 aip->ai_freefrag = freefrag; 1757 free_allocindir(oldaip, NULL); 1758 } 1759 LIST_INSERT_HEAD(&indirdep->ir_deplisthd, aip, ai_next); 1760 ((ufs_daddr_t *)indirdep->ir_savebp->b_data) 1761 [aip->ai_offset] = aip->ai_oldblkno; 1762 FREE_LOCK(&lk); 1763 } 1764 if (newindirdep) { 1765 /* 1766 * Avoid any possibility of data corruption by 1767 * ensuring that our old version is thrown away. 1768 */ 1769 newindirdep->ir_savebp->b_flags |= B_INVAL | B_NOCACHE; 1770 brelse(newindirdep->ir_savebp); 1771 WORKITEM_FREE((caddr_t)newindirdep, D_INDIRDEP); 1772 } 1773 if (indirdep) 1774 break; 1775 newindirdep = kmalloc(sizeof(struct indirdep), M_INDIRDEP, 1776 M_SOFTDEP_FLAGS); 1777 newindirdep->ir_list.wk_type = D_INDIRDEP; 1778 newindirdep->ir_state = ATTACHED; 1779 LIST_INIT(&newindirdep->ir_deplisthd); 1780 LIST_INIT(&newindirdep->ir_donehd); 1781 if (bp->b_bio2.bio_offset == NOOFFSET) { 1782 VOP_BMAP(bp->b_vp, bp->b_bio1.bio_offset, 1783 &bp->b_bio2.bio_offset, NULL, NULL, 1784 BUF_CMD_WRITE); 1785 } 1786 KKASSERT(bp->b_bio2.bio_offset != NOOFFSET); 1787 newindirdep->ir_savebp = getblk(ip->i_devvp, 1788 bp->b_bio2.bio_offset, 1789 bp->b_bcount, 0, 0); 1790 BUF_KERNPROC(newindirdep->ir_savebp); 1791 bcopy(bp->b_data, newindirdep->ir_savebp->b_data, bp->b_bcount); 1792 } 1793 } 1794 1795 /* 1796 * Block de-allocation dependencies. 1797 * 1798 * When blocks are de-allocated, the on-disk pointers must be nullified before 1799 * the blocks are made available for use by other files. (The true 1800 * requirement is that old pointers must be nullified before new on-disk 1801 * pointers are set. We chose this slightly more stringent requirement to 1802 * reduce complexity.) Our implementation handles this dependency by updating 1803 * the inode (or indirect block) appropriately but delaying the actual block 1804 * de-allocation (i.e., freemap and free space count manipulation) until 1805 * after the updated versions reach stable storage. After the disk is 1806 * updated, the blocks can be safely de-allocated whenever it is convenient. 1807 * This implementation handles only the common case of reducing a file's 1808 * length to zero. Other cases are handled by the conventional synchronous 1809 * write approach. 1810 * 1811 * The ffs implementation with which we worked double-checks 1812 * the state of the block pointers and file size as it reduces 1813 * a file's length. Some of this code is replicated here in our 1814 * soft updates implementation. The freeblks->fb_chkcnt field is 1815 * used to transfer a part of this information to the procedure 1816 * that eventually de-allocates the blocks. 1817 * 1818 * This routine should be called from the routine that shortens 1819 * a file's length, before the inode's size or block pointers 1820 * are modified. It will save the block pointer information for 1821 * later release and zero the inode so that the calling routine 1822 * can release it. 1823 */ 1824 struct softdep_setup_freeblocks_info { 1825 struct fs *fs; 1826 struct inode *ip; 1827 }; 1828 1829 static int softdep_setup_freeblocks_bp(struct buf *bp, void *data); 1830 1831 /* 1832 * Parameters: 1833 * ip: The inode whose length is to be reduced 1834 * length: The new length for the file 1835 */ 1836 void 1837 softdep_setup_freeblocks(struct inode *ip, off_t length) 1838 { 1839 struct softdep_setup_freeblocks_info info; 1840 struct freeblks *freeblks; 1841 struct inodedep *inodedep; 1842 struct allocdirect *adp; 1843 struct vnode *vp; 1844 struct buf *bp; 1845 struct fs *fs; 1846 int i, error, delay; 1847 int count; 1848 1849 fs = ip->i_fs; 1850 if (length != 0) 1851 panic("softde_setup_freeblocks: non-zero length"); 1852 freeblks = kmalloc(sizeof(struct freeblks), M_FREEBLKS, 1853 M_SOFTDEP_FLAGS | M_ZERO); 1854 freeblks->fb_list.wk_type = D_FREEBLKS; 1855 freeblks->fb_state = ATTACHED; 1856 freeblks->fb_uid = ip->i_uid; 1857 freeblks->fb_previousinum = ip->i_number; 1858 freeblks->fb_devvp = ip->i_devvp; 1859 freeblks->fb_fs = fs; 1860 freeblks->fb_oldsize = ip->i_size; 1861 freeblks->fb_newsize = length; 1862 freeblks->fb_chkcnt = ip->i_blocks; 1863 for (i = 0; i < NDADDR; i++) { 1864 freeblks->fb_dblks[i] = ip->i_db[i]; 1865 ip->i_db[i] = 0; 1866 } 1867 for (i = 0; i < NIADDR; i++) { 1868 freeblks->fb_iblks[i] = ip->i_ib[i]; 1869 ip->i_ib[i] = 0; 1870 } 1871 ip->i_blocks = 0; 1872 ip->i_size = 0; 1873 /* 1874 * Push the zero'ed inode to to its disk buffer so that we are free 1875 * to delete its dependencies below. Once the dependencies are gone 1876 * the buffer can be safely released. 1877 */ 1878 if ((error = bread(ip->i_devvp, 1879 fsbtodoff(fs, ino_to_fsba(fs, ip->i_number)), 1880 (int)fs->fs_bsize, &bp)) != 0) 1881 softdep_error("softdep_setup_freeblocks", error); 1882 *((struct ufs1_dinode *)bp->b_data + ino_to_fsbo(fs, ip->i_number)) = 1883 ip->i_din; 1884 /* 1885 * Find and eliminate any inode dependencies. 1886 */ 1887 ACQUIRE_LOCK(&lk); 1888 (void) inodedep_lookup(fs, ip->i_number, DEPALLOC, &inodedep); 1889 if ((inodedep->id_state & IOSTARTED) != 0) { 1890 FREE_LOCK(&lk); 1891 panic("softdep_setup_freeblocks: inode busy"); 1892 } 1893 /* 1894 * Add the freeblks structure to the list of operations that 1895 * must await the zero'ed inode being written to disk. If we 1896 * still have a bitmap dependency (delay == 0), then the inode 1897 * has never been written to disk, so we can process the 1898 * freeblks below once we have deleted the dependencies. 1899 */ 1900 delay = (inodedep->id_state & DEPCOMPLETE); 1901 if (delay) 1902 WORKLIST_INSERT(&inodedep->id_bufwait, &freeblks->fb_list); 1903 /* 1904 * Because the file length has been truncated to zero, any 1905 * pending block allocation dependency structures associated 1906 * with this inode are obsolete and can simply be de-allocated. 1907 * We must first merge the two dependency lists to get rid of 1908 * any duplicate freefrag structures, then purge the merged list. 1909 */ 1910 merge_inode_lists(inodedep); 1911 while ((adp = TAILQ_FIRST(&inodedep->id_inoupdt)) != NULL) 1912 free_allocdirect(&inodedep->id_inoupdt, adp, 1); 1913 FREE_LOCK(&lk); 1914 bdwrite(bp); 1915 /* 1916 * We must wait for any I/O in progress to finish so that 1917 * all potential buffers on the dirty list will be visible. 1918 * Once they are all there, walk the list and get rid of 1919 * any dependencies. 1920 */ 1921 vp = ITOV(ip); 1922 ACQUIRE_LOCK(&lk); 1923 drain_output(vp, 1); 1924 1925 info.fs = fs; 1926 info.ip = ip; 1927 lwkt_gettoken(&vp->v_token); 1928 do { 1929 count = RB_SCAN(buf_rb_tree, &vp->v_rbdirty_tree, NULL, 1930 softdep_setup_freeblocks_bp, &info); 1931 } while (count != 0); 1932 lwkt_reltoken(&vp->v_token); 1933 1934 if (inodedep_lookup(fs, ip->i_number, 0, &inodedep) != 0) 1935 (void)free_inodedep(inodedep); 1936 1937 if (delay) { 1938 freeblks->fb_state |= DEPCOMPLETE; 1939 /* 1940 * If the inode with zeroed block pointers is now on disk 1941 * we can start freeing blocks. Add freeblks to the worklist 1942 * instead of calling handle_workitem_freeblocks directly as 1943 * it is more likely that additional IO is needed to complete 1944 * the request here than in the !delay case. 1945 */ 1946 if ((freeblks->fb_state & ALLCOMPLETE) == ALLCOMPLETE) 1947 add_to_worklist(&freeblks->fb_list); 1948 } 1949 1950 FREE_LOCK(&lk); 1951 /* 1952 * If the inode has never been written to disk (delay == 0), 1953 * then we can process the freeblks now that we have deleted 1954 * the dependencies. 1955 */ 1956 if (!delay) 1957 handle_workitem_freeblocks(freeblks); 1958 } 1959 1960 static int 1961 softdep_setup_freeblocks_bp(struct buf *bp, void *data) 1962 { 1963 struct softdep_setup_freeblocks_info *info = data; 1964 struct inodedep *inodedep; 1965 1966 if (getdirtybuf(&bp, MNT_WAIT) == 0) { 1967 kprintf("softdep_setup_freeblocks_bp(1): caught bp %p going away\n", bp); 1968 return(-1); 1969 } 1970 if (bp->b_vp != ITOV(info->ip) || (bp->b_flags & B_DELWRI) == 0) { 1971 kprintf("softdep_setup_freeblocks_bp(2): caught bp %p going away\n", bp); 1972 BUF_UNLOCK(bp); 1973 return(-1); 1974 } 1975 (void) inodedep_lookup(info->fs, info->ip->i_number, 0, &inodedep); 1976 deallocate_dependencies(bp, inodedep); 1977 bp->b_flags |= B_INVAL | B_NOCACHE; 1978 FREE_LOCK(&lk); 1979 brelse(bp); 1980 ACQUIRE_LOCK(&lk); 1981 return(1); 1982 } 1983 1984 /* 1985 * Reclaim any dependency structures from a buffer that is about to 1986 * be reallocated to a new vnode. The buffer must be locked, thus, 1987 * no I/O completion operations can occur while we are manipulating 1988 * its associated dependencies. The mutex is held so that other I/O's 1989 * associated with related dependencies do not occur. 1990 */ 1991 static void 1992 deallocate_dependencies(struct buf *bp, struct inodedep *inodedep) 1993 { 1994 struct worklist *wk; 1995 struct indirdep *indirdep; 1996 struct allocindir *aip; 1997 struct pagedep *pagedep; 1998 struct dirrem *dirrem; 1999 struct diradd *dap; 2000 int i; 2001 2002 while ((wk = LIST_FIRST(&bp->b_dep)) != NULL) { 2003 switch (wk->wk_type) { 2004 2005 case D_INDIRDEP: 2006 indirdep = WK_INDIRDEP(wk); 2007 /* 2008 * None of the indirect pointers will ever be visible, 2009 * so they can simply be tossed. GOINGAWAY ensures 2010 * that allocated pointers will be saved in the buffer 2011 * cache until they are freed. Note that they will 2012 * only be able to be found by their physical address 2013 * since the inode mapping the logical address will 2014 * be gone. The save buffer used for the safe copy 2015 * was allocated in setup_allocindir_phase2 using 2016 * the physical address so it could be used for this 2017 * purpose. Hence we swap the safe copy with the real 2018 * copy, allowing the safe copy to be freed and holding 2019 * on to the real copy for later use in indir_trunc. 2020 * 2021 * NOTE: ir_savebp is relative to the block device 2022 * so b_bio1 contains the device block number. 2023 */ 2024 if (indirdep->ir_state & GOINGAWAY) { 2025 FREE_LOCK(&lk); 2026 panic("deallocate_dependencies: already gone"); 2027 } 2028 indirdep->ir_state |= GOINGAWAY; 2029 while ((aip = LIST_FIRST(&indirdep->ir_deplisthd)) != NULL) 2030 free_allocindir(aip, inodedep); 2031 if (bp->b_bio1.bio_offset >= 0 || 2032 bp->b_bio2.bio_offset != indirdep->ir_savebp->b_bio1.bio_offset) { 2033 FREE_LOCK(&lk); 2034 panic("deallocate_dependencies: not indir"); 2035 } 2036 bcopy(bp->b_data, indirdep->ir_savebp->b_data, 2037 bp->b_bcount); 2038 WORKLIST_REMOVE(wk); 2039 WORKLIST_INSERT_BP(indirdep->ir_savebp, wk); 2040 continue; 2041 2042 case D_PAGEDEP: 2043 pagedep = WK_PAGEDEP(wk); 2044 /* 2045 * None of the directory additions will ever be 2046 * visible, so they can simply be tossed. 2047 */ 2048 for (i = 0; i < DAHASHSZ; i++) 2049 while ((dap = 2050 LIST_FIRST(&pagedep->pd_diraddhd[i]))) 2051 free_diradd(dap); 2052 while ((dap = LIST_FIRST(&pagedep->pd_pendinghd)) != NULL) 2053 free_diradd(dap); 2054 /* 2055 * Copy any directory remove dependencies to the list 2056 * to be processed after the zero'ed inode is written. 2057 * If the inode has already been written, then they 2058 * can be dumped directly onto the work list. 2059 */ 2060 LIST_FOREACH(dirrem, &pagedep->pd_dirremhd, dm_next) { 2061 LIST_REMOVE(dirrem, dm_next); 2062 dirrem->dm_dirinum = pagedep->pd_ino; 2063 if (inodedep == NULL || 2064 (inodedep->id_state & ALLCOMPLETE) == 2065 ALLCOMPLETE) 2066 add_to_worklist(&dirrem->dm_list); 2067 else 2068 WORKLIST_INSERT(&inodedep->id_bufwait, 2069 &dirrem->dm_list); 2070 } 2071 WORKLIST_REMOVE(&pagedep->pd_list); 2072 LIST_REMOVE(pagedep, pd_hash); 2073 WORKITEM_FREE(pagedep, D_PAGEDEP); 2074 continue; 2075 2076 case D_ALLOCINDIR: 2077 free_allocindir(WK_ALLOCINDIR(wk), inodedep); 2078 continue; 2079 2080 case D_ALLOCDIRECT: 2081 case D_INODEDEP: 2082 FREE_LOCK(&lk); 2083 panic("deallocate_dependencies: Unexpected type %s", 2084 TYPENAME(wk->wk_type)); 2085 /* NOTREACHED */ 2086 2087 default: 2088 FREE_LOCK(&lk); 2089 panic("deallocate_dependencies: Unknown type %s", 2090 TYPENAME(wk->wk_type)); 2091 /* NOTREACHED */ 2092 } 2093 } 2094 } 2095 2096 /* 2097 * Free an allocdirect. Generate a new freefrag work request if appropriate. 2098 * This routine must be called with splbio interrupts blocked. 2099 */ 2100 static void 2101 free_allocdirect(struct allocdirectlst *adphead, 2102 struct allocdirect *adp, int delay) 2103 { 2104 2105 #ifdef DEBUG 2106 if (lk.lkt_held == NOHOLDER) 2107 panic("free_allocdirect: lock not held"); 2108 #endif 2109 if ((adp->ad_state & DEPCOMPLETE) == 0) 2110 LIST_REMOVE(adp, ad_deps); 2111 TAILQ_REMOVE(adphead, adp, ad_next); 2112 if ((adp->ad_state & COMPLETE) == 0) 2113 WORKLIST_REMOVE(&adp->ad_list); 2114 if (adp->ad_freefrag != NULL) { 2115 if (delay) 2116 WORKLIST_INSERT(&adp->ad_inodedep->id_bufwait, 2117 &adp->ad_freefrag->ff_list); 2118 else 2119 add_to_worklist(&adp->ad_freefrag->ff_list); 2120 } 2121 WORKITEM_FREE(adp, D_ALLOCDIRECT); 2122 } 2123 2124 /* 2125 * Prepare an inode to be freed. The actual free operation is not 2126 * done until the zero'ed inode has been written to disk. 2127 */ 2128 void 2129 softdep_freefile(struct vnode *pvp, ino_t ino, int mode) 2130 { 2131 struct inode *ip = VTOI(pvp); 2132 struct inodedep *inodedep; 2133 struct freefile *freefile; 2134 2135 /* 2136 * This sets up the inode de-allocation dependency. 2137 */ 2138 freefile = kmalloc(sizeof(struct freefile), M_FREEFILE, 2139 M_SOFTDEP_FLAGS); 2140 freefile->fx_list.wk_type = D_FREEFILE; 2141 freefile->fx_list.wk_state = 0; 2142 freefile->fx_mode = mode; 2143 freefile->fx_oldinum = ino; 2144 freefile->fx_devvp = ip->i_devvp; 2145 freefile->fx_fs = ip->i_fs; 2146 2147 /* 2148 * If the inodedep does not exist, then the zero'ed inode has 2149 * been written to disk. If the allocated inode has never been 2150 * written to disk, then the on-disk inode is zero'ed. In either 2151 * case we can free the file immediately. 2152 */ 2153 ACQUIRE_LOCK(&lk); 2154 if (inodedep_lookup(ip->i_fs, ino, 0, &inodedep) == 0 || 2155 check_inode_unwritten(inodedep)) { 2156 FREE_LOCK(&lk); 2157 handle_workitem_freefile(freefile); 2158 return; 2159 } 2160 WORKLIST_INSERT(&inodedep->id_inowait, &freefile->fx_list); 2161 FREE_LOCK(&lk); 2162 } 2163 2164 /* 2165 * Check to see if an inode has never been written to disk. If 2166 * so free the inodedep and return success, otherwise return failure. 2167 * This routine must be called with splbio interrupts blocked. 2168 * 2169 * If we still have a bitmap dependency, then the inode has never 2170 * been written to disk. Drop the dependency as it is no longer 2171 * necessary since the inode is being deallocated. We set the 2172 * ALLCOMPLETE flags since the bitmap now properly shows that the 2173 * inode is not allocated. Even if the inode is actively being 2174 * written, it has been rolled back to its zero'ed state, so we 2175 * are ensured that a zero inode is what is on the disk. For short 2176 * lived files, this change will usually result in removing all the 2177 * dependencies from the inode so that it can be freed immediately. 2178 */ 2179 static int 2180 check_inode_unwritten(struct inodedep *inodedep) 2181 { 2182 2183 if ((inodedep->id_state & DEPCOMPLETE) != 0 || 2184 LIST_FIRST(&inodedep->id_pendinghd) != NULL || 2185 LIST_FIRST(&inodedep->id_bufwait) != NULL || 2186 LIST_FIRST(&inodedep->id_inowait) != NULL || 2187 TAILQ_FIRST(&inodedep->id_inoupdt) != NULL || 2188 TAILQ_FIRST(&inodedep->id_newinoupdt) != NULL || 2189 inodedep->id_nlinkdelta != 0) 2190 return (0); 2191 2192 /* 2193 * Another process might be in initiate_write_inodeblock 2194 * trying to allocate memory without holding "Softdep Lock". 2195 */ 2196 if ((inodedep->id_state & IOSTARTED) != 0 && 2197 inodedep->id_savedino == NULL) 2198 return(0); 2199 2200 inodedep->id_state |= ALLCOMPLETE; 2201 LIST_REMOVE(inodedep, id_deps); 2202 inodedep->id_buf = NULL; 2203 if (inodedep->id_state & ONWORKLIST) 2204 WORKLIST_REMOVE(&inodedep->id_list); 2205 if (inodedep->id_savedino != NULL) { 2206 kfree(inodedep->id_savedino, M_INODEDEP); 2207 inodedep->id_savedino = NULL; 2208 } 2209 if (free_inodedep(inodedep) == 0) { 2210 FREE_LOCK(&lk); 2211 panic("check_inode_unwritten: busy inode"); 2212 } 2213 return (1); 2214 } 2215 2216 /* 2217 * Try to free an inodedep structure. Return 1 if it could be freed. 2218 */ 2219 static int 2220 free_inodedep(struct inodedep *inodedep) 2221 { 2222 2223 if ((inodedep->id_state & ONWORKLIST) != 0 || 2224 (inodedep->id_state & ALLCOMPLETE) != ALLCOMPLETE || 2225 LIST_FIRST(&inodedep->id_pendinghd) != NULL || 2226 LIST_FIRST(&inodedep->id_bufwait) != NULL || 2227 LIST_FIRST(&inodedep->id_inowait) != NULL || 2228 TAILQ_FIRST(&inodedep->id_inoupdt) != NULL || 2229 TAILQ_FIRST(&inodedep->id_newinoupdt) != NULL || 2230 inodedep->id_nlinkdelta != 0 || inodedep->id_savedino != NULL) 2231 return (0); 2232 LIST_REMOVE(inodedep, id_hash); 2233 WORKITEM_FREE(inodedep, D_INODEDEP); 2234 num_inodedep -= 1; 2235 return (1); 2236 } 2237 2238 /* 2239 * This workitem routine performs the block de-allocation. 2240 * The workitem is added to the pending list after the updated 2241 * inode block has been written to disk. As mentioned above, 2242 * checks regarding the number of blocks de-allocated (compared 2243 * to the number of blocks allocated for the file) are also 2244 * performed in this function. 2245 */ 2246 static void 2247 handle_workitem_freeblocks(struct freeblks *freeblks) 2248 { 2249 struct inode tip; 2250 ufs_daddr_t bn; 2251 struct fs *fs; 2252 int i, level, bsize; 2253 long nblocks, blocksreleased = 0; 2254 int error, allerror = 0; 2255 ufs_lbn_t baselbns[NIADDR], tmpval; 2256 2257 tip.i_number = freeblks->fb_previousinum; 2258 tip.i_devvp = freeblks->fb_devvp; 2259 tip.i_dev = freeblks->fb_devvp->v_rdev; 2260 tip.i_fs = freeblks->fb_fs; 2261 tip.i_size = freeblks->fb_oldsize; 2262 tip.i_uid = freeblks->fb_uid; 2263 fs = freeblks->fb_fs; 2264 tmpval = 1; 2265 baselbns[0] = NDADDR; 2266 for (i = 1; i < NIADDR; i++) { 2267 tmpval *= NINDIR(fs); 2268 baselbns[i] = baselbns[i - 1] + tmpval; 2269 } 2270 nblocks = btodb(fs->fs_bsize); 2271 blocksreleased = 0; 2272 /* 2273 * Indirect blocks first. 2274 */ 2275 for (level = (NIADDR - 1); level >= 0; level--) { 2276 if ((bn = freeblks->fb_iblks[level]) == 0) 2277 continue; 2278 if ((error = indir_trunc(&tip, fsbtodoff(fs, bn), level, 2279 baselbns[level], &blocksreleased)) == 0) 2280 allerror = error; 2281 ffs_blkfree(&tip, bn, fs->fs_bsize); 2282 blocksreleased += nblocks; 2283 } 2284 /* 2285 * All direct blocks or frags. 2286 */ 2287 for (i = (NDADDR - 1); i >= 0; i--) { 2288 if ((bn = freeblks->fb_dblks[i]) == 0) 2289 continue; 2290 bsize = blksize(fs, &tip, i); 2291 ffs_blkfree(&tip, bn, bsize); 2292 blocksreleased += btodb(bsize); 2293 } 2294 2295 #ifdef DIAGNOSTIC 2296 if (freeblks->fb_chkcnt != blocksreleased) 2297 kprintf("handle_workitem_freeblocks: block count\n"); 2298 if (allerror) 2299 softdep_error("handle_workitem_freeblks", allerror); 2300 #endif /* DIAGNOSTIC */ 2301 WORKITEM_FREE(freeblks, D_FREEBLKS); 2302 } 2303 2304 /* 2305 * Release blocks associated with the inode ip and stored in the indirect 2306 * block at doffset. If level is greater than SINGLE, the block is an 2307 * indirect block and recursive calls to indirtrunc must be used to 2308 * cleanse other indirect blocks. 2309 */ 2310 static int 2311 indir_trunc(struct inode *ip, off_t doffset, int level, ufs_lbn_t lbn, 2312 long *countp) 2313 { 2314 struct buf *bp; 2315 ufs_daddr_t *bap; 2316 ufs_daddr_t nb; 2317 struct fs *fs; 2318 struct worklist *wk; 2319 struct indirdep *indirdep; 2320 int i, lbnadd, nblocks; 2321 int error, allerror = 0; 2322 2323 fs = ip->i_fs; 2324 lbnadd = 1; 2325 for (i = level; i > 0; i--) 2326 lbnadd *= NINDIR(fs); 2327 /* 2328 * Get buffer of block pointers to be freed. This routine is not 2329 * called until the zero'ed inode has been written, so it is safe 2330 * to free blocks as they are encountered. Because the inode has 2331 * been zero'ed, calls to bmap on these blocks will fail. So, we 2332 * have to use the on-disk address and the block device for the 2333 * filesystem to look them up. If the file was deleted before its 2334 * indirect blocks were all written to disk, the routine that set 2335 * us up (deallocate_dependencies) will have arranged to leave 2336 * a complete copy of the indirect block in memory for our use. 2337 * Otherwise we have to read the blocks in from the disk. 2338 */ 2339 ACQUIRE_LOCK(&lk); 2340 if ((bp = findblk(ip->i_devvp, doffset, FINDBLK_TEST)) != NULL && 2341 (wk = LIST_FIRST(&bp->b_dep)) != NULL) { 2342 /* 2343 * bp must be ir_savebp, which is held locked for our use. 2344 */ 2345 if (wk->wk_type != D_INDIRDEP || 2346 (indirdep = WK_INDIRDEP(wk))->ir_savebp != bp || 2347 (indirdep->ir_state & GOINGAWAY) == 0) { 2348 FREE_LOCK(&lk); 2349 panic("indir_trunc: lost indirdep"); 2350 } 2351 WORKLIST_REMOVE(wk); 2352 WORKITEM_FREE(indirdep, D_INDIRDEP); 2353 if (LIST_FIRST(&bp->b_dep) != NULL) { 2354 FREE_LOCK(&lk); 2355 panic("indir_trunc: dangling dep"); 2356 } 2357 FREE_LOCK(&lk); 2358 } else { 2359 FREE_LOCK(&lk); 2360 error = bread(ip->i_devvp, doffset, (int)fs->fs_bsize, &bp); 2361 if (error) 2362 return (error); 2363 } 2364 /* 2365 * Recursively free indirect blocks. 2366 */ 2367 bap = (ufs_daddr_t *)bp->b_data; 2368 nblocks = btodb(fs->fs_bsize); 2369 for (i = NINDIR(fs) - 1; i >= 0; i--) { 2370 if ((nb = bap[i]) == 0) 2371 continue; 2372 if (level != 0) { 2373 if ((error = indir_trunc(ip, fsbtodoff(fs, nb), 2374 level - 1, lbn + (i * lbnadd), countp)) != 0) 2375 allerror = error; 2376 } 2377 ffs_blkfree(ip, nb, fs->fs_bsize); 2378 *countp += nblocks; 2379 } 2380 bp->b_flags |= B_INVAL | B_NOCACHE; 2381 brelse(bp); 2382 return (allerror); 2383 } 2384 2385 /* 2386 * Free an allocindir. 2387 * This routine must be called with splbio interrupts blocked. 2388 */ 2389 static void 2390 free_allocindir(struct allocindir *aip, struct inodedep *inodedep) 2391 { 2392 struct freefrag *freefrag; 2393 2394 #ifdef DEBUG 2395 if (lk.lkt_held == NOHOLDER) 2396 panic("free_allocindir: lock not held"); 2397 #endif 2398 if ((aip->ai_state & DEPCOMPLETE) == 0) 2399 LIST_REMOVE(aip, ai_deps); 2400 if (aip->ai_state & ONWORKLIST) 2401 WORKLIST_REMOVE(&aip->ai_list); 2402 LIST_REMOVE(aip, ai_next); 2403 if ((freefrag = aip->ai_freefrag) != NULL) { 2404 if (inodedep == NULL) 2405 add_to_worklist(&freefrag->ff_list); 2406 else 2407 WORKLIST_INSERT(&inodedep->id_bufwait, 2408 &freefrag->ff_list); 2409 } 2410 WORKITEM_FREE(aip, D_ALLOCINDIR); 2411 } 2412 2413 /* 2414 * Directory entry addition dependencies. 2415 * 2416 * When adding a new directory entry, the inode (with its incremented link 2417 * count) must be written to disk before the directory entry's pointer to it. 2418 * Also, if the inode is newly allocated, the corresponding freemap must be 2419 * updated (on disk) before the directory entry's pointer. These requirements 2420 * are met via undo/redo on the directory entry's pointer, which consists 2421 * simply of the inode number. 2422 * 2423 * As directory entries are added and deleted, the free space within a 2424 * directory block can become fragmented. The ufs filesystem will compact 2425 * a fragmented directory block to make space for a new entry. When this 2426 * occurs, the offsets of previously added entries change. Any "diradd" 2427 * dependency structures corresponding to these entries must be updated with 2428 * the new offsets. 2429 */ 2430 2431 /* 2432 * This routine is called after the in-memory inode's link 2433 * count has been incremented, but before the directory entry's 2434 * pointer to the inode has been set. 2435 * 2436 * Parameters: 2437 * bp: buffer containing directory block 2438 * dp: inode for directory 2439 * diroffset: offset of new entry in directory 2440 * newinum: inode referenced by new directory entry 2441 * newdirbp: non-NULL => contents of new mkdir 2442 */ 2443 void 2444 softdep_setup_directory_add(struct buf *bp, struct inode *dp, off_t diroffset, 2445 ino_t newinum, struct buf *newdirbp) 2446 { 2447 int offset; /* offset of new entry within directory block */ 2448 ufs_lbn_t lbn; /* block in directory containing new entry */ 2449 struct fs *fs; 2450 struct diradd *dap; 2451 struct pagedep *pagedep; 2452 struct inodedep *inodedep; 2453 struct mkdir *mkdir1, *mkdir2; 2454 2455 /* 2456 * Whiteouts have no dependencies. 2457 */ 2458 if (newinum == WINO) { 2459 if (newdirbp != NULL) 2460 bdwrite(newdirbp); 2461 return; 2462 } 2463 2464 fs = dp->i_fs; 2465 lbn = lblkno(fs, diroffset); 2466 offset = blkoff(fs, diroffset); 2467 dap = kmalloc(sizeof(struct diradd), M_DIRADD, 2468 M_SOFTDEP_FLAGS | M_ZERO); 2469 dap->da_list.wk_type = D_DIRADD; 2470 dap->da_offset = offset; 2471 dap->da_newinum = newinum; 2472 dap->da_state = ATTACHED; 2473 if (newdirbp == NULL) { 2474 dap->da_state |= DEPCOMPLETE; 2475 ACQUIRE_LOCK(&lk); 2476 } else { 2477 dap->da_state |= MKDIR_BODY | MKDIR_PARENT; 2478 mkdir1 = kmalloc(sizeof(struct mkdir), M_MKDIR, 2479 M_SOFTDEP_FLAGS); 2480 mkdir1->md_list.wk_type = D_MKDIR; 2481 mkdir1->md_state = MKDIR_BODY; 2482 mkdir1->md_diradd = dap; 2483 mkdir2 = kmalloc(sizeof(struct mkdir), M_MKDIR, 2484 M_SOFTDEP_FLAGS); 2485 mkdir2->md_list.wk_type = D_MKDIR; 2486 mkdir2->md_state = MKDIR_PARENT; 2487 mkdir2->md_diradd = dap; 2488 /* 2489 * Dependency on "." and ".." being written to disk. 2490 */ 2491 mkdir1->md_buf = newdirbp; 2492 ACQUIRE_LOCK(&lk); 2493 LIST_INSERT_HEAD(&mkdirlisthd, mkdir1, md_mkdirs); 2494 WORKLIST_INSERT_BP(newdirbp, &mkdir1->md_list); 2495 FREE_LOCK(&lk); 2496 bdwrite(newdirbp); 2497 /* 2498 * Dependency on link count increase for parent directory 2499 */ 2500 ACQUIRE_LOCK(&lk); 2501 if (inodedep_lookup(dp->i_fs, dp->i_number, 0, &inodedep) == 0 2502 || (inodedep->id_state & ALLCOMPLETE) == ALLCOMPLETE) { 2503 dap->da_state &= ~MKDIR_PARENT; 2504 WORKITEM_FREE(mkdir2, D_MKDIR); 2505 } else { 2506 LIST_INSERT_HEAD(&mkdirlisthd, mkdir2, md_mkdirs); 2507 WORKLIST_INSERT(&inodedep->id_bufwait,&mkdir2->md_list); 2508 } 2509 } 2510 /* 2511 * Link into parent directory pagedep to await its being written. 2512 */ 2513 if (pagedep_lookup(dp, lbn, DEPALLOC, &pagedep) == 0) 2514 WORKLIST_INSERT_BP(bp, &pagedep->pd_list); 2515 dap->da_pagedep = pagedep; 2516 LIST_INSERT_HEAD(&pagedep->pd_diraddhd[DIRADDHASH(offset)], dap, 2517 da_pdlist); 2518 /* 2519 * Link into its inodedep. Put it on the id_bufwait list if the inode 2520 * is not yet written. If it is written, do the post-inode write 2521 * processing to put it on the id_pendinghd list. 2522 */ 2523 (void) inodedep_lookup(fs, newinum, DEPALLOC, &inodedep); 2524 if ((inodedep->id_state & ALLCOMPLETE) == ALLCOMPLETE) 2525 diradd_inode_written(dap, inodedep); 2526 else 2527 WORKLIST_INSERT(&inodedep->id_bufwait, &dap->da_list); 2528 FREE_LOCK(&lk); 2529 } 2530 2531 /* 2532 * This procedure is called to change the offset of a directory 2533 * entry when compacting a directory block which must be owned 2534 * exclusively by the caller. Note that the actual entry movement 2535 * must be done in this procedure to ensure that no I/O completions 2536 * occur while the move is in progress. 2537 * 2538 * Parameters: 2539 * dp: inode for directory 2540 * base: address of dp->i_offset 2541 * oldloc: address of old directory location 2542 * newloc: address of new directory location 2543 * entrysize: size of directory entry 2544 */ 2545 void 2546 softdep_change_directoryentry_offset(struct inode *dp, caddr_t base, 2547 caddr_t oldloc, caddr_t newloc, 2548 int entrysize) 2549 { 2550 int offset, oldoffset, newoffset; 2551 struct pagedep *pagedep; 2552 struct diradd *dap; 2553 ufs_lbn_t lbn; 2554 2555 ACQUIRE_LOCK(&lk); 2556 lbn = lblkno(dp->i_fs, dp->i_offset); 2557 offset = blkoff(dp->i_fs, dp->i_offset); 2558 if (pagedep_lookup(dp, lbn, 0, &pagedep) == 0) 2559 goto done; 2560 oldoffset = offset + (oldloc - base); 2561 newoffset = offset + (newloc - base); 2562 2563 LIST_FOREACH(dap, &pagedep->pd_diraddhd[DIRADDHASH(oldoffset)], da_pdlist) { 2564 if (dap->da_offset != oldoffset) 2565 continue; 2566 dap->da_offset = newoffset; 2567 if (DIRADDHASH(newoffset) == DIRADDHASH(oldoffset)) 2568 break; 2569 LIST_REMOVE(dap, da_pdlist); 2570 LIST_INSERT_HEAD(&pagedep->pd_diraddhd[DIRADDHASH(newoffset)], 2571 dap, da_pdlist); 2572 break; 2573 } 2574 if (dap == NULL) { 2575 2576 LIST_FOREACH(dap, &pagedep->pd_pendinghd, da_pdlist) { 2577 if (dap->da_offset == oldoffset) { 2578 dap->da_offset = newoffset; 2579 break; 2580 } 2581 } 2582 } 2583 done: 2584 bcopy(oldloc, newloc, entrysize); 2585 FREE_LOCK(&lk); 2586 } 2587 2588 /* 2589 * Free a diradd dependency structure. This routine must be called 2590 * with splbio interrupts blocked. 2591 */ 2592 static void 2593 free_diradd(struct diradd *dap) 2594 { 2595 struct dirrem *dirrem; 2596 struct pagedep *pagedep; 2597 struct inodedep *inodedep; 2598 struct mkdir *mkdir, *nextmd; 2599 2600 #ifdef DEBUG 2601 if (lk.lkt_held == NOHOLDER) 2602 panic("free_diradd: lock not held"); 2603 #endif 2604 WORKLIST_REMOVE(&dap->da_list); 2605 LIST_REMOVE(dap, da_pdlist); 2606 if ((dap->da_state & DIRCHG) == 0) { 2607 pagedep = dap->da_pagedep; 2608 } else { 2609 dirrem = dap->da_previous; 2610 pagedep = dirrem->dm_pagedep; 2611 dirrem->dm_dirinum = pagedep->pd_ino; 2612 add_to_worklist(&dirrem->dm_list); 2613 } 2614 if (inodedep_lookup(VFSTOUFS(pagedep->pd_mnt)->um_fs, dap->da_newinum, 2615 0, &inodedep) != 0) 2616 (void) free_inodedep(inodedep); 2617 if ((dap->da_state & (MKDIR_PARENT | MKDIR_BODY)) != 0) { 2618 for (mkdir = LIST_FIRST(&mkdirlisthd); mkdir; mkdir = nextmd) { 2619 nextmd = LIST_NEXT(mkdir, md_mkdirs); 2620 if (mkdir->md_diradd != dap) 2621 continue; 2622 dap->da_state &= ~mkdir->md_state; 2623 WORKLIST_REMOVE(&mkdir->md_list); 2624 LIST_REMOVE(mkdir, md_mkdirs); 2625 WORKITEM_FREE(mkdir, D_MKDIR); 2626 } 2627 if ((dap->da_state & (MKDIR_PARENT | MKDIR_BODY)) != 0) { 2628 FREE_LOCK(&lk); 2629 panic("free_diradd: unfound ref"); 2630 } 2631 } 2632 WORKITEM_FREE(dap, D_DIRADD); 2633 } 2634 2635 /* 2636 * Directory entry removal dependencies. 2637 * 2638 * When removing a directory entry, the entry's inode pointer must be 2639 * zero'ed on disk before the corresponding inode's link count is decremented 2640 * (possibly freeing the inode for re-use). This dependency is handled by 2641 * updating the directory entry but delaying the inode count reduction until 2642 * after the directory block has been written to disk. After this point, the 2643 * inode count can be decremented whenever it is convenient. 2644 */ 2645 2646 /* 2647 * This routine should be called immediately after removing 2648 * a directory entry. The inode's link count should not be 2649 * decremented by the calling procedure -- the soft updates 2650 * code will do this task when it is safe. 2651 * 2652 * Parameters: 2653 * bp: buffer containing directory block 2654 * dp: inode for the directory being modified 2655 * ip: inode for directory entry being removed 2656 * isrmdir: indicates if doing RMDIR 2657 */ 2658 void 2659 softdep_setup_remove(struct buf *bp, struct inode *dp, struct inode *ip, 2660 int isrmdir) 2661 { 2662 struct dirrem *dirrem, *prevdirrem; 2663 2664 /* 2665 * Allocate a new dirrem if appropriate and ACQUIRE_LOCK. 2666 */ 2667 dirrem = newdirrem(bp, dp, ip, isrmdir, &prevdirrem); 2668 2669 /* 2670 * If the COMPLETE flag is clear, then there were no active 2671 * entries and we want to roll back to a zeroed entry until 2672 * the new inode is committed to disk. If the COMPLETE flag is 2673 * set then we have deleted an entry that never made it to 2674 * disk. If the entry we deleted resulted from a name change, 2675 * then the old name still resides on disk. We cannot delete 2676 * its inode (returned to us in prevdirrem) until the zeroed 2677 * directory entry gets to disk. The new inode has never been 2678 * referenced on the disk, so can be deleted immediately. 2679 */ 2680 if ((dirrem->dm_state & COMPLETE) == 0) { 2681 LIST_INSERT_HEAD(&dirrem->dm_pagedep->pd_dirremhd, dirrem, 2682 dm_next); 2683 FREE_LOCK(&lk); 2684 } else { 2685 if (prevdirrem != NULL) 2686 LIST_INSERT_HEAD(&dirrem->dm_pagedep->pd_dirremhd, 2687 prevdirrem, dm_next); 2688 dirrem->dm_dirinum = dirrem->dm_pagedep->pd_ino; 2689 FREE_LOCK(&lk); 2690 handle_workitem_remove(dirrem); 2691 } 2692 } 2693 2694 /* 2695 * Allocate a new dirrem if appropriate and return it along with 2696 * its associated pagedep. Called without a lock, returns with lock. 2697 */ 2698 static long num_dirrem; /* number of dirrem allocated */ 2699 2700 /* 2701 * Parameters: 2702 * bp: buffer containing directory block 2703 * dp: inode for the directory being modified 2704 * ip: inode for directory entry being removed 2705 * isrmdir: indicates if doing RMDIR 2706 * prevdirremp: previously referenced inode, if any 2707 */ 2708 static struct dirrem * 2709 newdirrem(struct buf *bp, struct inode *dp, struct inode *ip, 2710 int isrmdir, struct dirrem **prevdirremp) 2711 { 2712 int offset; 2713 ufs_lbn_t lbn; 2714 struct diradd *dap; 2715 struct dirrem *dirrem; 2716 struct pagedep *pagedep; 2717 2718 /* 2719 * Whiteouts have no deletion dependencies. 2720 */ 2721 if (ip == NULL) 2722 panic("newdirrem: whiteout"); 2723 /* 2724 * If we are over our limit, try to improve the situation. 2725 * Limiting the number of dirrem structures will also limit 2726 * the number of freefile and freeblks structures. 2727 */ 2728 if (num_dirrem > max_softdeps / 2 && speedup_syncer() == 0) 2729 (void) request_cleanup(FLUSH_REMOVE, 0); 2730 num_dirrem += 1; 2731 dirrem = kmalloc(sizeof(struct dirrem), M_DIRREM, 2732 M_SOFTDEP_FLAGS | M_ZERO); 2733 dirrem->dm_list.wk_type = D_DIRREM; 2734 dirrem->dm_state = isrmdir ? RMDIR : 0; 2735 dirrem->dm_mnt = ITOV(ip)->v_mount; 2736 dirrem->dm_oldinum = ip->i_number; 2737 *prevdirremp = NULL; 2738 2739 ACQUIRE_LOCK(&lk); 2740 lbn = lblkno(dp->i_fs, dp->i_offset); 2741 offset = blkoff(dp->i_fs, dp->i_offset); 2742 if (pagedep_lookup(dp, lbn, DEPALLOC, &pagedep) == 0) 2743 WORKLIST_INSERT_BP(bp, &pagedep->pd_list); 2744 dirrem->dm_pagedep = pagedep; 2745 /* 2746 * Check for a diradd dependency for the same directory entry. 2747 * If present, then both dependencies become obsolete and can 2748 * be de-allocated. Check for an entry on both the pd_dirraddhd 2749 * list and the pd_pendinghd list. 2750 */ 2751 2752 LIST_FOREACH(dap, &pagedep->pd_diraddhd[DIRADDHASH(offset)], da_pdlist) 2753 if (dap->da_offset == offset) 2754 break; 2755 if (dap == NULL) { 2756 2757 LIST_FOREACH(dap, &pagedep->pd_pendinghd, da_pdlist) 2758 if (dap->da_offset == offset) 2759 break; 2760 if (dap == NULL) 2761 return (dirrem); 2762 } 2763 /* 2764 * Must be ATTACHED at this point. 2765 */ 2766 if ((dap->da_state & ATTACHED) == 0) { 2767 FREE_LOCK(&lk); 2768 panic("newdirrem: not ATTACHED"); 2769 } 2770 if (dap->da_newinum != ip->i_number) { 2771 FREE_LOCK(&lk); 2772 panic("newdirrem: inum %"PRId64" should be %"PRId64, 2773 ip->i_number, dap->da_newinum); 2774 } 2775 /* 2776 * If we are deleting a changed name that never made it to disk, 2777 * then return the dirrem describing the previous inode (which 2778 * represents the inode currently referenced from this entry on disk). 2779 */ 2780 if ((dap->da_state & DIRCHG) != 0) { 2781 *prevdirremp = dap->da_previous; 2782 dap->da_state &= ~DIRCHG; 2783 dap->da_pagedep = pagedep; 2784 } 2785 /* 2786 * We are deleting an entry that never made it to disk. 2787 * Mark it COMPLETE so we can delete its inode immediately. 2788 */ 2789 dirrem->dm_state |= COMPLETE; 2790 free_diradd(dap); 2791 return (dirrem); 2792 } 2793 2794 /* 2795 * Directory entry change dependencies. 2796 * 2797 * Changing an existing directory entry requires that an add operation 2798 * be completed first followed by a deletion. The semantics for the addition 2799 * are identical to the description of adding a new entry above except 2800 * that the rollback is to the old inode number rather than zero. Once 2801 * the addition dependency is completed, the removal is done as described 2802 * in the removal routine above. 2803 */ 2804 2805 /* 2806 * This routine should be called immediately after changing 2807 * a directory entry. The inode's link count should not be 2808 * decremented by the calling procedure -- the soft updates 2809 * code will perform this task when it is safe. 2810 * 2811 * Parameters: 2812 * bp: buffer containing directory block 2813 * dp: inode for the directory being modified 2814 * ip: inode for directory entry being removed 2815 * newinum: new inode number for changed entry 2816 * isrmdir: indicates if doing RMDIR 2817 */ 2818 void 2819 softdep_setup_directory_change(struct buf *bp, struct inode *dp, 2820 struct inode *ip, ino_t newinum, 2821 int isrmdir) 2822 { 2823 int offset; 2824 struct diradd *dap = NULL; 2825 struct dirrem *dirrem, *prevdirrem; 2826 struct pagedep *pagedep; 2827 struct inodedep *inodedep; 2828 2829 offset = blkoff(dp->i_fs, dp->i_offset); 2830 2831 /* 2832 * Whiteouts do not need diradd dependencies. 2833 */ 2834 if (newinum != WINO) { 2835 dap = kmalloc(sizeof(struct diradd), M_DIRADD, 2836 M_SOFTDEP_FLAGS | M_ZERO); 2837 dap->da_list.wk_type = D_DIRADD; 2838 dap->da_state = DIRCHG | ATTACHED | DEPCOMPLETE; 2839 dap->da_offset = offset; 2840 dap->da_newinum = newinum; 2841 } 2842 2843 /* 2844 * Allocate a new dirrem and ACQUIRE_LOCK. 2845 */ 2846 dirrem = newdirrem(bp, dp, ip, isrmdir, &prevdirrem); 2847 pagedep = dirrem->dm_pagedep; 2848 /* 2849 * The possible values for isrmdir: 2850 * 0 - non-directory file rename 2851 * 1 - directory rename within same directory 2852 * inum - directory rename to new directory of given inode number 2853 * When renaming to a new directory, we are both deleting and 2854 * creating a new directory entry, so the link count on the new 2855 * directory should not change. Thus we do not need the followup 2856 * dirrem which is usually done in handle_workitem_remove. We set 2857 * the DIRCHG flag to tell handle_workitem_remove to skip the 2858 * followup dirrem. 2859 */ 2860 if (isrmdir > 1) 2861 dirrem->dm_state |= DIRCHG; 2862 2863 /* 2864 * Whiteouts have no additional dependencies, 2865 * so just put the dirrem on the correct list. 2866 */ 2867 if (newinum == WINO) { 2868 if ((dirrem->dm_state & COMPLETE) == 0) { 2869 LIST_INSERT_HEAD(&pagedep->pd_dirremhd, dirrem, 2870 dm_next); 2871 } else { 2872 dirrem->dm_dirinum = pagedep->pd_ino; 2873 add_to_worklist(&dirrem->dm_list); 2874 } 2875 FREE_LOCK(&lk); 2876 return; 2877 } 2878 2879 /* 2880 * If the COMPLETE flag is clear, then there were no active 2881 * entries and we want to roll back to the previous inode until 2882 * the new inode is committed to disk. If the COMPLETE flag is 2883 * set, then we have deleted an entry that never made it to disk. 2884 * If the entry we deleted resulted from a name change, then the old 2885 * inode reference still resides on disk. Any rollback that we do 2886 * needs to be to that old inode (returned to us in prevdirrem). If 2887 * the entry we deleted resulted from a create, then there is 2888 * no entry on the disk, so we want to roll back to zero rather 2889 * than the uncommitted inode. In either of the COMPLETE cases we 2890 * want to immediately free the unwritten and unreferenced inode. 2891 */ 2892 if ((dirrem->dm_state & COMPLETE) == 0) { 2893 dap->da_previous = dirrem; 2894 } else { 2895 if (prevdirrem != NULL) { 2896 dap->da_previous = prevdirrem; 2897 } else { 2898 dap->da_state &= ~DIRCHG; 2899 dap->da_pagedep = pagedep; 2900 } 2901 dirrem->dm_dirinum = pagedep->pd_ino; 2902 add_to_worklist(&dirrem->dm_list); 2903 } 2904 /* 2905 * Link into its inodedep. Put it on the id_bufwait list if the inode 2906 * is not yet written. If it is written, do the post-inode write 2907 * processing to put it on the id_pendinghd list. 2908 */ 2909 if (inodedep_lookup(dp->i_fs, newinum, DEPALLOC, &inodedep) == 0 || 2910 (inodedep->id_state & ALLCOMPLETE) == ALLCOMPLETE) { 2911 dap->da_state |= COMPLETE; 2912 LIST_INSERT_HEAD(&pagedep->pd_pendinghd, dap, da_pdlist); 2913 WORKLIST_INSERT(&inodedep->id_pendinghd, &dap->da_list); 2914 } else { 2915 LIST_INSERT_HEAD(&pagedep->pd_diraddhd[DIRADDHASH(offset)], 2916 dap, da_pdlist); 2917 WORKLIST_INSERT(&inodedep->id_bufwait, &dap->da_list); 2918 } 2919 FREE_LOCK(&lk); 2920 } 2921 2922 /* 2923 * Called whenever the link count on an inode is changed. 2924 * It creates an inode dependency so that the new reference(s) 2925 * to the inode cannot be committed to disk until the updated 2926 * inode has been written. 2927 * 2928 * Parameters: 2929 * ip: the inode with the increased link count 2930 */ 2931 void 2932 softdep_change_linkcnt(struct inode *ip) 2933 { 2934 struct inodedep *inodedep; 2935 2936 ACQUIRE_LOCK(&lk); 2937 (void) inodedep_lookup(ip->i_fs, ip->i_number, DEPALLOC, &inodedep); 2938 if (ip->i_nlink < ip->i_effnlink) { 2939 FREE_LOCK(&lk); 2940 panic("softdep_change_linkcnt: bad delta"); 2941 } 2942 inodedep->id_nlinkdelta = ip->i_nlink - ip->i_effnlink; 2943 FREE_LOCK(&lk); 2944 } 2945 2946 /* 2947 * This workitem decrements the inode's link count. 2948 * If the link count reaches zero, the file is removed. 2949 */ 2950 static void 2951 handle_workitem_remove(struct dirrem *dirrem) 2952 { 2953 struct inodedep *inodedep; 2954 struct vnode *vp; 2955 struct inode *ip; 2956 ino_t oldinum; 2957 int error; 2958 2959 error = VFS_VGET(dirrem->dm_mnt, NULL, dirrem->dm_oldinum, &vp); 2960 if (error) { 2961 softdep_error("handle_workitem_remove: vget", error); 2962 return; 2963 } 2964 ip = VTOI(vp); 2965 ACQUIRE_LOCK(&lk); 2966 if ((inodedep_lookup(ip->i_fs, dirrem->dm_oldinum, 0, &inodedep)) == 0){ 2967 FREE_LOCK(&lk); 2968 panic("handle_workitem_remove: lost inodedep"); 2969 } 2970 /* 2971 * Normal file deletion. 2972 */ 2973 if ((dirrem->dm_state & RMDIR) == 0) { 2974 ip->i_nlink--; 2975 ip->i_flag |= IN_CHANGE; 2976 if (ip->i_nlink < ip->i_effnlink) { 2977 FREE_LOCK(&lk); 2978 panic("handle_workitem_remove: bad file delta"); 2979 } 2980 inodedep->id_nlinkdelta = ip->i_nlink - ip->i_effnlink; 2981 FREE_LOCK(&lk); 2982 vput(vp); 2983 num_dirrem -= 1; 2984 WORKITEM_FREE(dirrem, D_DIRREM); 2985 return; 2986 } 2987 /* 2988 * Directory deletion. Decrement reference count for both the 2989 * just deleted parent directory entry and the reference for ".". 2990 * Next truncate the directory to length zero. When the 2991 * truncation completes, arrange to have the reference count on 2992 * the parent decremented to account for the loss of "..". 2993 */ 2994 ip->i_nlink -= 2; 2995 ip->i_flag |= IN_CHANGE; 2996 if (ip->i_nlink < ip->i_effnlink) { 2997 FREE_LOCK(&lk); 2998 panic("handle_workitem_remove: bad dir delta"); 2999 } 3000 inodedep->id_nlinkdelta = ip->i_nlink - ip->i_effnlink; 3001 FREE_LOCK(&lk); 3002 if ((error = ffs_truncate(vp, (off_t)0, 0, proc0.p_ucred)) != 0) 3003 softdep_error("handle_workitem_remove: truncate", error); 3004 /* 3005 * Rename a directory to a new parent. Since, we are both deleting 3006 * and creating a new directory entry, the link count on the new 3007 * directory should not change. Thus we skip the followup dirrem. 3008 */ 3009 if (dirrem->dm_state & DIRCHG) { 3010 vput(vp); 3011 num_dirrem -= 1; 3012 WORKITEM_FREE(dirrem, D_DIRREM); 3013 return; 3014 } 3015 /* 3016 * If the inodedep does not exist, then the zero'ed inode has 3017 * been written to disk. If the allocated inode has never been 3018 * written to disk, then the on-disk inode is zero'ed. In either 3019 * case we can remove the file immediately. 3020 */ 3021 ACQUIRE_LOCK(&lk); 3022 dirrem->dm_state = 0; 3023 oldinum = dirrem->dm_oldinum; 3024 dirrem->dm_oldinum = dirrem->dm_dirinum; 3025 if (inodedep_lookup(ip->i_fs, oldinum, 0, &inodedep) == 0 || 3026 check_inode_unwritten(inodedep)) { 3027 FREE_LOCK(&lk); 3028 vput(vp); 3029 handle_workitem_remove(dirrem); 3030 return; 3031 } 3032 WORKLIST_INSERT(&inodedep->id_inowait, &dirrem->dm_list); 3033 FREE_LOCK(&lk); 3034 ip->i_flag |= IN_CHANGE; 3035 ffs_update(vp, 0); 3036 vput(vp); 3037 } 3038 3039 /* 3040 * Inode de-allocation dependencies. 3041 * 3042 * When an inode's link count is reduced to zero, it can be de-allocated. We 3043 * found it convenient to postpone de-allocation until after the inode is 3044 * written to disk with its new link count (zero). At this point, all of the 3045 * on-disk inode's block pointers are nullified and, with careful dependency 3046 * list ordering, all dependencies related to the inode will be satisfied and 3047 * the corresponding dependency structures de-allocated. So, if/when the 3048 * inode is reused, there will be no mixing of old dependencies with new 3049 * ones. This artificial dependency is set up by the block de-allocation 3050 * procedure above (softdep_setup_freeblocks) and completed by the 3051 * following procedure. 3052 */ 3053 static void 3054 handle_workitem_freefile(struct freefile *freefile) 3055 { 3056 struct vnode vp; 3057 struct inode tip; 3058 struct inodedep *idp; 3059 int error; 3060 3061 #ifdef DEBUG 3062 ACQUIRE_LOCK(&lk); 3063 error = inodedep_lookup(freefile->fx_fs, freefile->fx_oldinum, 0, &idp); 3064 FREE_LOCK(&lk); 3065 if (error) 3066 panic("handle_workitem_freefile: inodedep survived"); 3067 #endif 3068 tip.i_devvp = freefile->fx_devvp; 3069 tip.i_dev = freefile->fx_devvp->v_rdev; 3070 tip.i_fs = freefile->fx_fs; 3071 vp.v_data = &tip; 3072 if ((error = ffs_freefile(&vp, freefile->fx_oldinum, freefile->fx_mode)) != 0) 3073 softdep_error("handle_workitem_freefile", error); 3074 WORKITEM_FREE(freefile, D_FREEFILE); 3075 } 3076 3077 /* 3078 * Helper function which unlinks marker element from work list and returns 3079 * the next element on the list. 3080 */ 3081 static __inline struct worklist * 3082 markernext(struct worklist *marker) 3083 { 3084 struct worklist *next; 3085 3086 next = LIST_NEXT(marker, wk_list); 3087 LIST_REMOVE(marker, wk_list); 3088 return next; 3089 } 3090 3091 /* 3092 * checkread, checkwrite 3093 * 3094 * bioops callback - hold io_token 3095 */ 3096 static int 3097 softdep_checkread(struct buf *bp) 3098 { 3099 /* nothing to do, mp lock not needed */ 3100 return(0); 3101 } 3102 3103 /* 3104 * bioops callback - hold io_token 3105 */ 3106 static int 3107 softdep_checkwrite(struct buf *bp) 3108 { 3109 /* nothing to do, mp lock not needed */ 3110 return(0); 3111 } 3112 3113 /* 3114 * Disk writes. 3115 * 3116 * The dependency structures constructed above are most actively used when file 3117 * system blocks are written to disk. No constraints are placed on when a 3118 * block can be written, but unsatisfied update dependencies are made safe by 3119 * modifying (or replacing) the source memory for the duration of the disk 3120 * write. When the disk write completes, the memory block is again brought 3121 * up-to-date. 3122 * 3123 * In-core inode structure reclamation. 3124 * 3125 * Because there are a finite number of "in-core" inode structures, they are 3126 * reused regularly. By transferring all inode-related dependencies to the 3127 * in-memory inode block and indexing them separately (via "inodedep"s), we 3128 * can allow "in-core" inode structures to be reused at any time and avoid 3129 * any increase in contention. 3130 * 3131 * Called just before entering the device driver to initiate a new disk I/O. 3132 * The buffer must be locked, thus, no I/O completion operations can occur 3133 * while we are manipulating its associated dependencies. 3134 * 3135 * bioops callback - hold io_token 3136 * 3137 * Parameters: 3138 * bp: structure describing disk write to occur 3139 */ 3140 static void 3141 softdep_disk_io_initiation(struct buf *bp) 3142 { 3143 struct worklist *wk; 3144 struct worklist marker; 3145 struct indirdep *indirdep; 3146 3147 /* 3148 * We only care about write operations. There should never 3149 * be dependencies for reads. 3150 */ 3151 if (bp->b_cmd == BUF_CMD_READ) 3152 panic("softdep_disk_io_initiation: read"); 3153 3154 get_mplock(); 3155 marker.wk_type = D_LAST + 1; /* Not a normal workitem */ 3156 3157 /* 3158 * Do any necessary pre-I/O processing. 3159 */ 3160 for (wk = LIST_FIRST(&bp->b_dep); wk; wk = markernext(&marker)) { 3161 LIST_INSERT_AFTER(wk, &marker, wk_list); 3162 3163 switch (wk->wk_type) { 3164 case D_PAGEDEP: 3165 initiate_write_filepage(WK_PAGEDEP(wk), bp); 3166 continue; 3167 3168 case D_INODEDEP: 3169 initiate_write_inodeblock(WK_INODEDEP(wk), bp); 3170 continue; 3171 3172 case D_INDIRDEP: 3173 indirdep = WK_INDIRDEP(wk); 3174 if (indirdep->ir_state & GOINGAWAY) 3175 panic("disk_io_initiation: indirdep gone"); 3176 /* 3177 * If there are no remaining dependencies, this 3178 * will be writing the real pointers, so the 3179 * dependency can be freed. 3180 */ 3181 if (LIST_FIRST(&indirdep->ir_deplisthd) == NULL) { 3182 indirdep->ir_savebp->b_flags |= B_INVAL | B_NOCACHE; 3183 brelse(indirdep->ir_savebp); 3184 /* inline expand WORKLIST_REMOVE(wk); */ 3185 wk->wk_state &= ~ONWORKLIST; 3186 LIST_REMOVE(wk, wk_list); 3187 WORKITEM_FREE(indirdep, D_INDIRDEP); 3188 continue; 3189 } 3190 /* 3191 * Replace up-to-date version with safe version. 3192 */ 3193 indirdep->ir_saveddata = kmalloc(bp->b_bcount, 3194 M_INDIRDEP, 3195 M_SOFTDEP_FLAGS); 3196 ACQUIRE_LOCK(&lk); 3197 indirdep->ir_state &= ~ATTACHED; 3198 indirdep->ir_state |= UNDONE; 3199 bcopy(bp->b_data, indirdep->ir_saveddata, bp->b_bcount); 3200 bcopy(indirdep->ir_savebp->b_data, bp->b_data, 3201 bp->b_bcount); 3202 FREE_LOCK(&lk); 3203 continue; 3204 3205 case D_MKDIR: 3206 case D_BMSAFEMAP: 3207 case D_ALLOCDIRECT: 3208 case D_ALLOCINDIR: 3209 continue; 3210 3211 default: 3212 panic("handle_disk_io_initiation: Unexpected type %s", 3213 TYPENAME(wk->wk_type)); 3214 /* NOTREACHED */ 3215 } 3216 } 3217 rel_mplock(); 3218 } 3219 3220 /* 3221 * Called from within the procedure above to deal with unsatisfied 3222 * allocation dependencies in a directory. The buffer must be locked, 3223 * thus, no I/O completion operations can occur while we are 3224 * manipulating its associated dependencies. 3225 */ 3226 static void 3227 initiate_write_filepage(struct pagedep *pagedep, struct buf *bp) 3228 { 3229 struct diradd *dap; 3230 struct direct *ep; 3231 int i; 3232 3233 if (pagedep->pd_state & IOSTARTED) { 3234 /* 3235 * This can only happen if there is a driver that does not 3236 * understand chaining. Here biodone will reissue the call 3237 * to strategy for the incomplete buffers. 3238 */ 3239 kprintf("initiate_write_filepage: already started\n"); 3240 return; 3241 } 3242 pagedep->pd_state |= IOSTARTED; 3243 ACQUIRE_LOCK(&lk); 3244 for (i = 0; i < DAHASHSZ; i++) { 3245 LIST_FOREACH(dap, &pagedep->pd_diraddhd[i], da_pdlist) { 3246 ep = (struct direct *) 3247 ((char *)bp->b_data + dap->da_offset); 3248 if (ep->d_ino != dap->da_newinum) { 3249 FREE_LOCK(&lk); 3250 panic("%s: dir inum %d != new %"PRId64, 3251 "initiate_write_filepage", 3252 ep->d_ino, dap->da_newinum); 3253 } 3254 if (dap->da_state & DIRCHG) 3255 ep->d_ino = dap->da_previous->dm_oldinum; 3256 else 3257 ep->d_ino = 0; 3258 dap->da_state &= ~ATTACHED; 3259 dap->da_state |= UNDONE; 3260 } 3261 } 3262 FREE_LOCK(&lk); 3263 } 3264 3265 /* 3266 * Called from within the procedure above to deal with unsatisfied 3267 * allocation dependencies in an inodeblock. The buffer must be 3268 * locked, thus, no I/O completion operations can occur while we 3269 * are manipulating its associated dependencies. 3270 * 3271 * Parameters: 3272 * bp: The inode block 3273 */ 3274 static void 3275 initiate_write_inodeblock(struct inodedep *inodedep, struct buf *bp) 3276 { 3277 struct allocdirect *adp, *lastadp; 3278 struct ufs1_dinode *dp; 3279 struct ufs1_dinode *sip; 3280 struct fs *fs; 3281 ufs_lbn_t prevlbn = 0; 3282 int i, deplist; 3283 3284 if (inodedep->id_state & IOSTARTED) 3285 panic("initiate_write_inodeblock: already started"); 3286 inodedep->id_state |= IOSTARTED; 3287 fs = inodedep->id_fs; 3288 dp = (struct ufs1_dinode *)bp->b_data + 3289 ino_to_fsbo(fs, inodedep->id_ino); 3290 /* 3291 * If the bitmap is not yet written, then the allocated 3292 * inode cannot be written to disk. 3293 */ 3294 if ((inodedep->id_state & DEPCOMPLETE) == 0) { 3295 if (inodedep->id_savedino != NULL) 3296 panic("initiate_write_inodeblock: already doing I/O"); 3297 sip = kmalloc(sizeof(struct ufs1_dinode), M_INODEDEP, 3298 M_SOFTDEP_FLAGS); 3299 inodedep->id_savedino = sip; 3300 *inodedep->id_savedino = *dp; 3301 bzero((caddr_t)dp, sizeof(struct ufs1_dinode)); 3302 dp->di_gen = inodedep->id_savedino->di_gen; 3303 return; 3304 } 3305 /* 3306 * If no dependencies, then there is nothing to roll back. 3307 */ 3308 inodedep->id_savedsize = dp->di_size; 3309 if (TAILQ_FIRST(&inodedep->id_inoupdt) == NULL) 3310 return; 3311 /* 3312 * Set the dependencies to busy. 3313 */ 3314 ACQUIRE_LOCK(&lk); 3315 for (deplist = 0, adp = TAILQ_FIRST(&inodedep->id_inoupdt); adp; 3316 adp = TAILQ_NEXT(adp, ad_next)) { 3317 #ifdef DIAGNOSTIC 3318 if (deplist != 0 && prevlbn >= adp->ad_lbn) { 3319 FREE_LOCK(&lk); 3320 panic("softdep_write_inodeblock: lbn order"); 3321 } 3322 prevlbn = adp->ad_lbn; 3323 if (adp->ad_lbn < NDADDR && 3324 dp->di_db[adp->ad_lbn] != adp->ad_newblkno) { 3325 FREE_LOCK(&lk); 3326 panic("%s: direct pointer #%ld mismatch %d != %d", 3327 "softdep_write_inodeblock", adp->ad_lbn, 3328 dp->di_db[adp->ad_lbn], adp->ad_newblkno); 3329 } 3330 if (adp->ad_lbn >= NDADDR && 3331 dp->di_ib[adp->ad_lbn - NDADDR] != adp->ad_newblkno) { 3332 FREE_LOCK(&lk); 3333 panic("%s: indirect pointer #%ld mismatch %d != %d", 3334 "softdep_write_inodeblock", adp->ad_lbn - NDADDR, 3335 dp->di_ib[adp->ad_lbn - NDADDR], adp->ad_newblkno); 3336 } 3337 deplist |= 1 << adp->ad_lbn; 3338 if ((adp->ad_state & ATTACHED) == 0) { 3339 FREE_LOCK(&lk); 3340 panic("softdep_write_inodeblock: Unknown state 0x%x", 3341 adp->ad_state); 3342 } 3343 #endif /* DIAGNOSTIC */ 3344 adp->ad_state &= ~ATTACHED; 3345 adp->ad_state |= UNDONE; 3346 } 3347 /* 3348 * The on-disk inode cannot claim to be any larger than the last 3349 * fragment that has been written. Otherwise, the on-disk inode 3350 * might have fragments that were not the last block in the file 3351 * which would corrupt the filesystem. 3352 */ 3353 for (lastadp = NULL, adp = TAILQ_FIRST(&inodedep->id_inoupdt); adp; 3354 lastadp = adp, adp = TAILQ_NEXT(adp, ad_next)) { 3355 if (adp->ad_lbn >= NDADDR) 3356 break; 3357 dp->di_db[adp->ad_lbn] = adp->ad_oldblkno; 3358 /* keep going until hitting a rollback to a frag */ 3359 if (adp->ad_oldsize == 0 || adp->ad_oldsize == fs->fs_bsize) 3360 continue; 3361 dp->di_size = fs->fs_bsize * adp->ad_lbn + adp->ad_oldsize; 3362 for (i = adp->ad_lbn + 1; i < NDADDR; i++) { 3363 #ifdef DIAGNOSTIC 3364 if (dp->di_db[i] != 0 && (deplist & (1 << i)) == 0) { 3365 FREE_LOCK(&lk); 3366 panic("softdep_write_inodeblock: lost dep1"); 3367 } 3368 #endif /* DIAGNOSTIC */ 3369 dp->di_db[i] = 0; 3370 } 3371 for (i = 0; i < NIADDR; i++) { 3372 #ifdef DIAGNOSTIC 3373 if (dp->di_ib[i] != 0 && 3374 (deplist & ((1 << NDADDR) << i)) == 0) { 3375 FREE_LOCK(&lk); 3376 panic("softdep_write_inodeblock: lost dep2"); 3377 } 3378 #endif /* DIAGNOSTIC */ 3379 dp->di_ib[i] = 0; 3380 } 3381 FREE_LOCK(&lk); 3382 return; 3383 } 3384 /* 3385 * If we have zero'ed out the last allocated block of the file, 3386 * roll back the size to the last currently allocated block. 3387 * We know that this last allocated block is a full-sized as 3388 * we already checked for fragments in the loop above. 3389 */ 3390 if (lastadp != NULL && 3391 dp->di_size <= (lastadp->ad_lbn + 1) * fs->fs_bsize) { 3392 for (i = lastadp->ad_lbn; i >= 0; i--) 3393 if (dp->di_db[i] != 0) 3394 break; 3395 dp->di_size = (i + 1) * fs->fs_bsize; 3396 } 3397 /* 3398 * The only dependencies are for indirect blocks. 3399 * 3400 * The file size for indirect block additions is not guaranteed. 3401 * Such a guarantee would be non-trivial to achieve. The conventional 3402 * synchronous write implementation also does not make this guarantee. 3403 * Fsck should catch and fix discrepancies. Arguably, the file size 3404 * can be over-estimated without destroying integrity when the file 3405 * moves into the indirect blocks (i.e., is large). If we want to 3406 * postpone fsck, we are stuck with this argument. 3407 */ 3408 for (; adp; adp = TAILQ_NEXT(adp, ad_next)) 3409 dp->di_ib[adp->ad_lbn - NDADDR] = 0; 3410 FREE_LOCK(&lk); 3411 } 3412 3413 /* 3414 * This routine is called during the completion interrupt 3415 * service routine for a disk write (from the procedure called 3416 * by the device driver to inform the filesystem caches of 3417 * a request completion). It should be called early in this 3418 * procedure, before the block is made available to other 3419 * processes or other routines are called. 3420 * 3421 * bioops callback - hold io_token 3422 * 3423 * Parameters: 3424 * bp: describes the completed disk write 3425 */ 3426 static void 3427 softdep_disk_write_complete(struct buf *bp) 3428 { 3429 struct worklist *wk; 3430 struct workhead reattach; 3431 struct newblk *newblk; 3432 struct allocindir *aip; 3433 struct allocdirect *adp; 3434 struct indirdep *indirdep; 3435 struct inodedep *inodedep; 3436 struct bmsafemap *bmsafemap; 3437 3438 get_mplock(); 3439 #ifdef DEBUG 3440 if (lk.lkt_held != NOHOLDER) 3441 panic("softdep_disk_write_complete: lock is held"); 3442 lk.lkt_held = SPECIAL_FLAG; 3443 #endif 3444 LIST_INIT(&reattach); 3445 while ((wk = LIST_FIRST(&bp->b_dep)) != NULL) { 3446 WORKLIST_REMOVE(wk); 3447 switch (wk->wk_type) { 3448 3449 case D_PAGEDEP: 3450 if (handle_written_filepage(WK_PAGEDEP(wk), bp)) 3451 WORKLIST_INSERT(&reattach, wk); 3452 continue; 3453 3454 case D_INODEDEP: 3455 if (handle_written_inodeblock(WK_INODEDEP(wk), bp)) 3456 WORKLIST_INSERT(&reattach, wk); 3457 continue; 3458 3459 case D_BMSAFEMAP: 3460 bmsafemap = WK_BMSAFEMAP(wk); 3461 while ((newblk = LIST_FIRST(&bmsafemap->sm_newblkhd))) { 3462 newblk->nb_state |= DEPCOMPLETE; 3463 newblk->nb_bmsafemap = NULL; 3464 LIST_REMOVE(newblk, nb_deps); 3465 } 3466 while ((adp = 3467 LIST_FIRST(&bmsafemap->sm_allocdirecthd))) { 3468 adp->ad_state |= DEPCOMPLETE; 3469 adp->ad_buf = NULL; 3470 LIST_REMOVE(adp, ad_deps); 3471 handle_allocdirect_partdone(adp); 3472 } 3473 while ((aip = 3474 LIST_FIRST(&bmsafemap->sm_allocindirhd))) { 3475 aip->ai_state |= DEPCOMPLETE; 3476 aip->ai_buf = NULL; 3477 LIST_REMOVE(aip, ai_deps); 3478 handle_allocindir_partdone(aip); 3479 } 3480 while ((inodedep = 3481 LIST_FIRST(&bmsafemap->sm_inodedephd)) != NULL) { 3482 inodedep->id_state |= DEPCOMPLETE; 3483 LIST_REMOVE(inodedep, id_deps); 3484 inodedep->id_buf = NULL; 3485 } 3486 WORKITEM_FREE(bmsafemap, D_BMSAFEMAP); 3487 continue; 3488 3489 case D_MKDIR: 3490 handle_written_mkdir(WK_MKDIR(wk), MKDIR_BODY); 3491 continue; 3492 3493 case D_ALLOCDIRECT: 3494 adp = WK_ALLOCDIRECT(wk); 3495 adp->ad_state |= COMPLETE; 3496 handle_allocdirect_partdone(adp); 3497 continue; 3498 3499 case D_ALLOCINDIR: 3500 aip = WK_ALLOCINDIR(wk); 3501 aip->ai_state |= COMPLETE; 3502 handle_allocindir_partdone(aip); 3503 continue; 3504 3505 case D_INDIRDEP: 3506 indirdep = WK_INDIRDEP(wk); 3507 if (indirdep->ir_state & GOINGAWAY) { 3508 lk.lkt_held = NOHOLDER; 3509 panic("disk_write_complete: indirdep gone"); 3510 } 3511 bcopy(indirdep->ir_saveddata, bp->b_data, bp->b_bcount); 3512 kfree(indirdep->ir_saveddata, M_INDIRDEP); 3513 indirdep->ir_saveddata = 0; 3514 indirdep->ir_state &= ~UNDONE; 3515 indirdep->ir_state |= ATTACHED; 3516 while ((aip = LIST_FIRST(&indirdep->ir_donehd)) != NULL) { 3517 handle_allocindir_partdone(aip); 3518 if (aip == LIST_FIRST(&indirdep->ir_donehd)) { 3519 lk.lkt_held = NOHOLDER; 3520 panic("disk_write_complete: not gone"); 3521 } 3522 } 3523 WORKLIST_INSERT(&reattach, wk); 3524 if ((bp->b_flags & B_DELWRI) == 0) 3525 stat_indir_blk_ptrs++; 3526 bdirty(bp); 3527 continue; 3528 3529 default: 3530 lk.lkt_held = NOHOLDER; 3531 panic("handle_disk_write_complete: Unknown type %s", 3532 TYPENAME(wk->wk_type)); 3533 /* NOTREACHED */ 3534 } 3535 } 3536 /* 3537 * Reattach any requests that must be redone. 3538 */ 3539 while ((wk = LIST_FIRST(&reattach)) != NULL) { 3540 WORKLIST_REMOVE(wk); 3541 WORKLIST_INSERT_BP(bp, wk); 3542 } 3543 #ifdef DEBUG 3544 if (lk.lkt_held != SPECIAL_FLAG) 3545 panic("softdep_disk_write_complete: lock lost"); 3546 lk.lkt_held = NOHOLDER; 3547 #endif 3548 rel_mplock(); 3549 } 3550 3551 /* 3552 * Called from within softdep_disk_write_complete above. Note that 3553 * this routine is always called from interrupt level with further 3554 * splbio interrupts blocked. 3555 * 3556 * Parameters: 3557 * adp: the completed allocdirect 3558 */ 3559 static void 3560 handle_allocdirect_partdone(struct allocdirect *adp) 3561 { 3562 struct allocdirect *listadp; 3563 struct inodedep *inodedep; 3564 long bsize; 3565 3566 if ((adp->ad_state & ALLCOMPLETE) != ALLCOMPLETE) 3567 return; 3568 if (adp->ad_buf != NULL) { 3569 lk.lkt_held = NOHOLDER; 3570 panic("handle_allocdirect_partdone: dangling dep"); 3571 } 3572 /* 3573 * The on-disk inode cannot claim to be any larger than the last 3574 * fragment that has been written. Otherwise, the on-disk inode 3575 * might have fragments that were not the last block in the file 3576 * which would corrupt the filesystem. Thus, we cannot free any 3577 * allocdirects after one whose ad_oldblkno claims a fragment as 3578 * these blocks must be rolled back to zero before writing the inode. 3579 * We check the currently active set of allocdirects in id_inoupdt. 3580 */ 3581 inodedep = adp->ad_inodedep; 3582 bsize = inodedep->id_fs->fs_bsize; 3583 TAILQ_FOREACH(listadp, &inodedep->id_inoupdt, ad_next) { 3584 /* found our block */ 3585 if (listadp == adp) 3586 break; 3587 /* continue if ad_oldlbn is not a fragment */ 3588 if (listadp->ad_oldsize == 0 || 3589 listadp->ad_oldsize == bsize) 3590 continue; 3591 /* hit a fragment */ 3592 return; 3593 } 3594 /* 3595 * If we have reached the end of the current list without 3596 * finding the just finished dependency, then it must be 3597 * on the future dependency list. Future dependencies cannot 3598 * be freed until they are moved to the current list. 3599 */ 3600 if (listadp == NULL) { 3601 #ifdef DEBUG 3602 TAILQ_FOREACH(listadp, &inodedep->id_newinoupdt, ad_next) 3603 /* found our block */ 3604 if (listadp == adp) 3605 break; 3606 if (listadp == NULL) { 3607 lk.lkt_held = NOHOLDER; 3608 panic("handle_allocdirect_partdone: lost dep"); 3609 } 3610 #endif /* DEBUG */ 3611 return; 3612 } 3613 /* 3614 * If we have found the just finished dependency, then free 3615 * it along with anything that follows it that is complete. 3616 */ 3617 for (; adp; adp = listadp) { 3618 listadp = TAILQ_NEXT(adp, ad_next); 3619 if ((adp->ad_state & ALLCOMPLETE) != ALLCOMPLETE) 3620 return; 3621 free_allocdirect(&inodedep->id_inoupdt, adp, 1); 3622 } 3623 } 3624 3625 /* 3626 * Called from within softdep_disk_write_complete above. Note that 3627 * this routine is always called from interrupt level with further 3628 * splbio interrupts blocked. 3629 * 3630 * Parameters: 3631 * aip: the completed allocindir 3632 */ 3633 static void 3634 handle_allocindir_partdone(struct allocindir *aip) 3635 { 3636 struct indirdep *indirdep; 3637 3638 if ((aip->ai_state & ALLCOMPLETE) != ALLCOMPLETE) 3639 return; 3640 if (aip->ai_buf != NULL) { 3641 lk.lkt_held = NOHOLDER; 3642 panic("handle_allocindir_partdone: dangling dependency"); 3643 } 3644 indirdep = aip->ai_indirdep; 3645 if (indirdep->ir_state & UNDONE) { 3646 LIST_REMOVE(aip, ai_next); 3647 LIST_INSERT_HEAD(&indirdep->ir_donehd, aip, ai_next); 3648 return; 3649 } 3650 ((ufs_daddr_t *)indirdep->ir_savebp->b_data)[aip->ai_offset] = 3651 aip->ai_newblkno; 3652 LIST_REMOVE(aip, ai_next); 3653 if (aip->ai_freefrag != NULL) 3654 add_to_worklist(&aip->ai_freefrag->ff_list); 3655 WORKITEM_FREE(aip, D_ALLOCINDIR); 3656 } 3657 3658 /* 3659 * Called from within softdep_disk_write_complete above to restore 3660 * in-memory inode block contents to their most up-to-date state. Note 3661 * that this routine is always called from interrupt level with further 3662 * splbio interrupts blocked. 3663 * 3664 * Parameters: 3665 * bp: buffer containing the inode block 3666 */ 3667 static int 3668 handle_written_inodeblock(struct inodedep *inodedep, struct buf *bp) 3669 { 3670 struct worklist *wk, *filefree; 3671 struct allocdirect *adp, *nextadp; 3672 struct ufs1_dinode *dp; 3673 int hadchanges; 3674 3675 if ((inodedep->id_state & IOSTARTED) == 0) { 3676 lk.lkt_held = NOHOLDER; 3677 panic("handle_written_inodeblock: not started"); 3678 } 3679 inodedep->id_state &= ~IOSTARTED; 3680 dp = (struct ufs1_dinode *)bp->b_data + 3681 ino_to_fsbo(inodedep->id_fs, inodedep->id_ino); 3682 /* 3683 * If we had to rollback the inode allocation because of 3684 * bitmaps being incomplete, then simply restore it. 3685 * Keep the block dirty so that it will not be reclaimed until 3686 * all associated dependencies have been cleared and the 3687 * corresponding updates written to disk. 3688 */ 3689 if (inodedep->id_savedino != NULL) { 3690 *dp = *inodedep->id_savedino; 3691 kfree(inodedep->id_savedino, M_INODEDEP); 3692 inodedep->id_savedino = NULL; 3693 if ((bp->b_flags & B_DELWRI) == 0) 3694 stat_inode_bitmap++; 3695 bdirty(bp); 3696 return (1); 3697 } 3698 inodedep->id_state |= COMPLETE; 3699 /* 3700 * Roll forward anything that had to be rolled back before 3701 * the inode could be updated. 3702 */ 3703 hadchanges = 0; 3704 for (adp = TAILQ_FIRST(&inodedep->id_inoupdt); adp; adp = nextadp) { 3705 nextadp = TAILQ_NEXT(adp, ad_next); 3706 if (adp->ad_state & ATTACHED) { 3707 lk.lkt_held = NOHOLDER; 3708 panic("handle_written_inodeblock: new entry"); 3709 } 3710 if (adp->ad_lbn < NDADDR) { 3711 if (dp->di_db[adp->ad_lbn] != adp->ad_oldblkno) { 3712 lk.lkt_held = NOHOLDER; 3713 panic("%s: %s #%ld mismatch %d != %d", 3714 "handle_written_inodeblock", 3715 "direct pointer", adp->ad_lbn, 3716 dp->di_db[adp->ad_lbn], adp->ad_oldblkno); 3717 } 3718 dp->di_db[adp->ad_lbn] = adp->ad_newblkno; 3719 } else { 3720 if (dp->di_ib[adp->ad_lbn - NDADDR] != 0) { 3721 lk.lkt_held = NOHOLDER; 3722 panic("%s: %s #%ld allocated as %d", 3723 "handle_written_inodeblock", 3724 "indirect pointer", adp->ad_lbn - NDADDR, 3725 dp->di_ib[adp->ad_lbn - NDADDR]); 3726 } 3727 dp->di_ib[adp->ad_lbn - NDADDR] = adp->ad_newblkno; 3728 } 3729 adp->ad_state &= ~UNDONE; 3730 adp->ad_state |= ATTACHED; 3731 hadchanges = 1; 3732 } 3733 if (hadchanges && (bp->b_flags & B_DELWRI) == 0) 3734 stat_direct_blk_ptrs++; 3735 /* 3736 * Reset the file size to its most up-to-date value. 3737 */ 3738 if (inodedep->id_savedsize == -1) { 3739 lk.lkt_held = NOHOLDER; 3740 panic("handle_written_inodeblock: bad size"); 3741 } 3742 if (dp->di_size != inodedep->id_savedsize) { 3743 dp->di_size = inodedep->id_savedsize; 3744 hadchanges = 1; 3745 } 3746 inodedep->id_savedsize = -1; 3747 /* 3748 * If there were any rollbacks in the inode block, then it must be 3749 * marked dirty so that its will eventually get written back in 3750 * its correct form. 3751 */ 3752 if (hadchanges) 3753 bdirty(bp); 3754 /* 3755 * Process any allocdirects that completed during the update. 3756 */ 3757 if ((adp = TAILQ_FIRST(&inodedep->id_inoupdt)) != NULL) 3758 handle_allocdirect_partdone(adp); 3759 /* 3760 * Process deallocations that were held pending until the 3761 * inode had been written to disk. Freeing of the inode 3762 * is delayed until after all blocks have been freed to 3763 * avoid creation of new <vfsid, inum, lbn> triples 3764 * before the old ones have been deleted. 3765 */ 3766 filefree = NULL; 3767 while ((wk = LIST_FIRST(&inodedep->id_bufwait)) != NULL) { 3768 WORKLIST_REMOVE(wk); 3769 switch (wk->wk_type) { 3770 3771 case D_FREEFILE: 3772 /* 3773 * We defer adding filefree to the worklist until 3774 * all other additions have been made to ensure 3775 * that it will be done after all the old blocks 3776 * have been freed. 3777 */ 3778 if (filefree != NULL) { 3779 lk.lkt_held = NOHOLDER; 3780 panic("handle_written_inodeblock: filefree"); 3781 } 3782 filefree = wk; 3783 continue; 3784 3785 case D_MKDIR: 3786 handle_written_mkdir(WK_MKDIR(wk), MKDIR_PARENT); 3787 continue; 3788 3789 case D_DIRADD: 3790 diradd_inode_written(WK_DIRADD(wk), inodedep); 3791 continue; 3792 3793 case D_FREEBLKS: 3794 wk->wk_state |= COMPLETE; 3795 if ((wk->wk_state & ALLCOMPLETE) != ALLCOMPLETE) 3796 continue; 3797 /* -- fall through -- */ 3798 case D_FREEFRAG: 3799 case D_DIRREM: 3800 add_to_worklist(wk); 3801 continue; 3802 3803 default: 3804 lk.lkt_held = NOHOLDER; 3805 panic("handle_written_inodeblock: Unknown type %s", 3806 TYPENAME(wk->wk_type)); 3807 /* NOTREACHED */ 3808 } 3809 } 3810 if (filefree != NULL) { 3811 if (free_inodedep(inodedep) == 0) { 3812 lk.lkt_held = NOHOLDER; 3813 panic("handle_written_inodeblock: live inodedep"); 3814 } 3815 add_to_worklist(filefree); 3816 return (0); 3817 } 3818 3819 /* 3820 * If no outstanding dependencies, free it. 3821 */ 3822 if (free_inodedep(inodedep) || TAILQ_FIRST(&inodedep->id_inoupdt) == 0) 3823 return (0); 3824 return (hadchanges); 3825 } 3826 3827 /* 3828 * Process a diradd entry after its dependent inode has been written. 3829 * This routine must be called with splbio interrupts blocked. 3830 */ 3831 static void 3832 diradd_inode_written(struct diradd *dap, struct inodedep *inodedep) 3833 { 3834 struct pagedep *pagedep; 3835 3836 dap->da_state |= COMPLETE; 3837 if ((dap->da_state & ALLCOMPLETE) == ALLCOMPLETE) { 3838 if (dap->da_state & DIRCHG) 3839 pagedep = dap->da_previous->dm_pagedep; 3840 else 3841 pagedep = dap->da_pagedep; 3842 LIST_REMOVE(dap, da_pdlist); 3843 LIST_INSERT_HEAD(&pagedep->pd_pendinghd, dap, da_pdlist); 3844 } 3845 WORKLIST_INSERT(&inodedep->id_pendinghd, &dap->da_list); 3846 } 3847 3848 /* 3849 * Handle the completion of a mkdir dependency. 3850 */ 3851 static void 3852 handle_written_mkdir(struct mkdir *mkdir, int type) 3853 { 3854 struct diradd *dap; 3855 struct pagedep *pagedep; 3856 3857 if (mkdir->md_state != type) { 3858 lk.lkt_held = NOHOLDER; 3859 panic("handle_written_mkdir: bad type"); 3860 } 3861 dap = mkdir->md_diradd; 3862 dap->da_state &= ~type; 3863 if ((dap->da_state & (MKDIR_PARENT | MKDIR_BODY)) == 0) 3864 dap->da_state |= DEPCOMPLETE; 3865 if ((dap->da_state & ALLCOMPLETE) == ALLCOMPLETE) { 3866 if (dap->da_state & DIRCHG) 3867 pagedep = dap->da_previous->dm_pagedep; 3868 else 3869 pagedep = dap->da_pagedep; 3870 LIST_REMOVE(dap, da_pdlist); 3871 LIST_INSERT_HEAD(&pagedep->pd_pendinghd, dap, da_pdlist); 3872 } 3873 LIST_REMOVE(mkdir, md_mkdirs); 3874 WORKITEM_FREE(mkdir, D_MKDIR); 3875 } 3876 3877 /* 3878 * Called from within softdep_disk_write_complete above. 3879 * A write operation was just completed. Removed inodes can 3880 * now be freed and associated block pointers may be committed. 3881 * Note that this routine is always called from interrupt level 3882 * with further splbio interrupts blocked. 3883 * 3884 * Parameters: 3885 * bp: buffer containing the written page 3886 */ 3887 static int 3888 handle_written_filepage(struct pagedep *pagedep, struct buf *bp) 3889 { 3890 struct dirrem *dirrem; 3891 struct diradd *dap, *nextdap; 3892 struct direct *ep; 3893 int i, chgs; 3894 3895 if ((pagedep->pd_state & IOSTARTED) == 0) { 3896 lk.lkt_held = NOHOLDER; 3897 panic("handle_written_filepage: not started"); 3898 } 3899 pagedep->pd_state &= ~IOSTARTED; 3900 /* 3901 * Process any directory removals that have been committed. 3902 */ 3903 while ((dirrem = LIST_FIRST(&pagedep->pd_dirremhd)) != NULL) { 3904 LIST_REMOVE(dirrem, dm_next); 3905 dirrem->dm_dirinum = pagedep->pd_ino; 3906 add_to_worklist(&dirrem->dm_list); 3907 } 3908 /* 3909 * Free any directory additions that have been committed. 3910 */ 3911 while ((dap = LIST_FIRST(&pagedep->pd_pendinghd)) != NULL) 3912 free_diradd(dap); 3913 /* 3914 * Uncommitted directory entries must be restored. 3915 */ 3916 for (chgs = 0, i = 0; i < DAHASHSZ; i++) { 3917 for (dap = LIST_FIRST(&pagedep->pd_diraddhd[i]); dap; 3918 dap = nextdap) { 3919 nextdap = LIST_NEXT(dap, da_pdlist); 3920 if (dap->da_state & ATTACHED) { 3921 lk.lkt_held = NOHOLDER; 3922 panic("handle_written_filepage: attached"); 3923 } 3924 ep = (struct direct *) 3925 ((char *)bp->b_data + dap->da_offset); 3926 ep->d_ino = dap->da_newinum; 3927 dap->da_state &= ~UNDONE; 3928 dap->da_state |= ATTACHED; 3929 chgs = 1; 3930 /* 3931 * If the inode referenced by the directory has 3932 * been written out, then the dependency can be 3933 * moved to the pending list. 3934 */ 3935 if ((dap->da_state & ALLCOMPLETE) == ALLCOMPLETE) { 3936 LIST_REMOVE(dap, da_pdlist); 3937 LIST_INSERT_HEAD(&pagedep->pd_pendinghd, dap, 3938 da_pdlist); 3939 } 3940 } 3941 } 3942 /* 3943 * If there were any rollbacks in the directory, then it must be 3944 * marked dirty so that its will eventually get written back in 3945 * its correct form. 3946 */ 3947 if (chgs) { 3948 if ((bp->b_flags & B_DELWRI) == 0) 3949 stat_dir_entry++; 3950 bdirty(bp); 3951 } 3952 /* 3953 * If no dependencies remain, the pagedep will be freed. 3954 * Otherwise it will remain to update the page before it 3955 * is written back to disk. 3956 */ 3957 if (LIST_FIRST(&pagedep->pd_pendinghd) == 0) { 3958 for (i = 0; i < DAHASHSZ; i++) 3959 if (LIST_FIRST(&pagedep->pd_diraddhd[i]) != NULL) 3960 break; 3961 if (i == DAHASHSZ) { 3962 LIST_REMOVE(pagedep, pd_hash); 3963 WORKITEM_FREE(pagedep, D_PAGEDEP); 3964 return (0); 3965 } 3966 } 3967 return (1); 3968 } 3969 3970 /* 3971 * Writing back in-core inode structures. 3972 * 3973 * The filesystem only accesses an inode's contents when it occupies an 3974 * "in-core" inode structure. These "in-core" structures are separate from 3975 * the page frames used to cache inode blocks. Only the latter are 3976 * transferred to/from the disk. So, when the updated contents of the 3977 * "in-core" inode structure are copied to the corresponding in-memory inode 3978 * block, the dependencies are also transferred. The following procedure is 3979 * called when copying a dirty "in-core" inode to a cached inode block. 3980 */ 3981 3982 /* 3983 * Called when an inode is loaded from disk. If the effective link count 3984 * differed from the actual link count when it was last flushed, then we 3985 * need to ensure that the correct effective link count is put back. 3986 * 3987 * Parameters: 3988 * ip: the "in_core" copy of the inode 3989 */ 3990 void 3991 softdep_load_inodeblock(struct inode *ip) 3992 { 3993 struct inodedep *inodedep; 3994 3995 /* 3996 * Check for alternate nlink count. 3997 */ 3998 ip->i_effnlink = ip->i_nlink; 3999 ACQUIRE_LOCK(&lk); 4000 if (inodedep_lookup(ip->i_fs, ip->i_number, 0, &inodedep) == 0) { 4001 FREE_LOCK(&lk); 4002 return; 4003 } 4004 ip->i_effnlink -= inodedep->id_nlinkdelta; 4005 FREE_LOCK(&lk); 4006 } 4007 4008 /* 4009 * This routine is called just before the "in-core" inode 4010 * information is to be copied to the in-memory inode block. 4011 * Recall that an inode block contains several inodes. If 4012 * the force flag is set, then the dependencies will be 4013 * cleared so that the update can always be made. Note that 4014 * the buffer is locked when this routine is called, so we 4015 * will never be in the middle of writing the inode block 4016 * to disk. 4017 * 4018 * Parameters: 4019 * ip: the "in_core" copy of the inode 4020 * bp: the buffer containing the inode block 4021 * waitfor: nonzero => update must be allowed 4022 */ 4023 void 4024 softdep_update_inodeblock(struct inode *ip, struct buf *bp, 4025 int waitfor) 4026 { 4027 struct inodedep *inodedep; 4028 struct worklist *wk; 4029 int error, gotit; 4030 4031 /* 4032 * If the effective link count is not equal to the actual link 4033 * count, then we must track the difference in an inodedep while 4034 * the inode is (potentially) tossed out of the cache. Otherwise, 4035 * if there is no existing inodedep, then there are no dependencies 4036 * to track. 4037 */ 4038 ACQUIRE_LOCK(&lk); 4039 if (inodedep_lookup(ip->i_fs, ip->i_number, 0, &inodedep) == 0) { 4040 FREE_LOCK(&lk); 4041 if (ip->i_effnlink != ip->i_nlink) 4042 panic("softdep_update_inodeblock: bad link count"); 4043 return; 4044 } 4045 if (inodedep->id_nlinkdelta != ip->i_nlink - ip->i_effnlink) { 4046 FREE_LOCK(&lk); 4047 panic("softdep_update_inodeblock: bad delta"); 4048 } 4049 /* 4050 * Changes have been initiated. Anything depending on these 4051 * changes cannot occur until this inode has been written. 4052 */ 4053 inodedep->id_state &= ~COMPLETE; 4054 if ((inodedep->id_state & ONWORKLIST) == 0) 4055 WORKLIST_INSERT_BP(bp, &inodedep->id_list); 4056 /* 4057 * Any new dependencies associated with the incore inode must 4058 * now be moved to the list associated with the buffer holding 4059 * the in-memory copy of the inode. Once merged process any 4060 * allocdirects that are completed by the merger. 4061 */ 4062 merge_inode_lists(inodedep); 4063 if (TAILQ_FIRST(&inodedep->id_inoupdt) != NULL) 4064 handle_allocdirect_partdone(TAILQ_FIRST(&inodedep->id_inoupdt)); 4065 /* 4066 * Now that the inode has been pushed into the buffer, the 4067 * operations dependent on the inode being written to disk 4068 * can be moved to the id_bufwait so that they will be 4069 * processed when the buffer I/O completes. 4070 */ 4071 while ((wk = LIST_FIRST(&inodedep->id_inowait)) != NULL) { 4072 WORKLIST_REMOVE(wk); 4073 WORKLIST_INSERT(&inodedep->id_bufwait, wk); 4074 } 4075 /* 4076 * Newly allocated inodes cannot be written until the bitmap 4077 * that allocates them have been written (indicated by 4078 * DEPCOMPLETE being set in id_state). If we are doing a 4079 * forced sync (e.g., an fsync on a file), we force the bitmap 4080 * to be written so that the update can be done. 4081 */ 4082 if ((inodedep->id_state & DEPCOMPLETE) != 0 || waitfor == 0) { 4083 FREE_LOCK(&lk); 4084 return; 4085 } 4086 gotit = getdirtybuf(&inodedep->id_buf, MNT_WAIT); 4087 FREE_LOCK(&lk); 4088 if (gotit && 4089 (error = bwrite(inodedep->id_buf)) != 0) 4090 softdep_error("softdep_update_inodeblock: bwrite", error); 4091 } 4092 4093 /* 4094 * Merge the new inode dependency list (id_newinoupdt) into the old 4095 * inode dependency list (id_inoupdt). This routine must be called 4096 * with splbio interrupts blocked. 4097 */ 4098 static void 4099 merge_inode_lists(struct inodedep *inodedep) 4100 { 4101 struct allocdirect *listadp, *newadp; 4102 4103 newadp = TAILQ_FIRST(&inodedep->id_newinoupdt); 4104 for (listadp = TAILQ_FIRST(&inodedep->id_inoupdt); listadp && newadp;) { 4105 if (listadp->ad_lbn < newadp->ad_lbn) { 4106 listadp = TAILQ_NEXT(listadp, ad_next); 4107 continue; 4108 } 4109 TAILQ_REMOVE(&inodedep->id_newinoupdt, newadp, ad_next); 4110 TAILQ_INSERT_BEFORE(listadp, newadp, ad_next); 4111 if (listadp->ad_lbn == newadp->ad_lbn) { 4112 allocdirect_merge(&inodedep->id_inoupdt, newadp, 4113 listadp); 4114 listadp = newadp; 4115 } 4116 newadp = TAILQ_FIRST(&inodedep->id_newinoupdt); 4117 } 4118 while ((newadp = TAILQ_FIRST(&inodedep->id_newinoupdt)) != NULL) { 4119 TAILQ_REMOVE(&inodedep->id_newinoupdt, newadp, ad_next); 4120 TAILQ_INSERT_TAIL(&inodedep->id_inoupdt, newadp, ad_next); 4121 } 4122 } 4123 4124 /* 4125 * If we are doing an fsync, then we must ensure that any directory 4126 * entries for the inode have been written after the inode gets to disk. 4127 * 4128 * bioops callback - hold io_token 4129 * 4130 * Parameters: 4131 * vp: the "in_core" copy of the inode 4132 */ 4133 static int 4134 softdep_fsync(struct vnode *vp) 4135 { 4136 struct inodedep *inodedep; 4137 struct pagedep *pagedep; 4138 struct worklist *wk; 4139 struct diradd *dap; 4140 struct mount *mnt; 4141 struct vnode *pvp; 4142 struct inode *ip; 4143 struct buf *bp; 4144 struct fs *fs; 4145 int error, flushparent; 4146 ino_t parentino; 4147 ufs_lbn_t lbn; 4148 4149 /* 4150 * Move check from original kernel code, possibly not needed any 4151 * more with the per-mount bioops. 4152 */ 4153 if ((vp->v_mount->mnt_flag & MNT_SOFTDEP) == 0) 4154 return (0); 4155 4156 get_mplock(); 4157 ip = VTOI(vp); 4158 fs = ip->i_fs; 4159 ACQUIRE_LOCK(&lk); 4160 if (inodedep_lookup(fs, ip->i_number, 0, &inodedep) == 0) { 4161 FREE_LOCK(&lk); 4162 rel_mplock(); 4163 return (0); 4164 } 4165 if (LIST_FIRST(&inodedep->id_inowait) != NULL || 4166 LIST_FIRST(&inodedep->id_bufwait) != NULL || 4167 TAILQ_FIRST(&inodedep->id_inoupdt) != NULL || 4168 TAILQ_FIRST(&inodedep->id_newinoupdt) != NULL) { 4169 FREE_LOCK(&lk); 4170 panic("softdep_fsync: pending ops"); 4171 } 4172 for (error = 0, flushparent = 0; ; ) { 4173 if ((wk = LIST_FIRST(&inodedep->id_pendinghd)) == NULL) 4174 break; 4175 if (wk->wk_type != D_DIRADD) { 4176 FREE_LOCK(&lk); 4177 panic("softdep_fsync: Unexpected type %s", 4178 TYPENAME(wk->wk_type)); 4179 } 4180 dap = WK_DIRADD(wk); 4181 /* 4182 * Flush our parent if this directory entry 4183 * has a MKDIR_PARENT dependency. 4184 */ 4185 if (dap->da_state & DIRCHG) 4186 pagedep = dap->da_previous->dm_pagedep; 4187 else 4188 pagedep = dap->da_pagedep; 4189 mnt = pagedep->pd_mnt; 4190 parentino = pagedep->pd_ino; 4191 lbn = pagedep->pd_lbn; 4192 if ((dap->da_state & (MKDIR_BODY | COMPLETE)) != COMPLETE) { 4193 FREE_LOCK(&lk); 4194 panic("softdep_fsync: dirty"); 4195 } 4196 flushparent = dap->da_state & MKDIR_PARENT; 4197 /* 4198 * If we are being fsync'ed as part of vgone'ing this vnode, 4199 * then we will not be able to release and recover the 4200 * vnode below, so we just have to give up on writing its 4201 * directory entry out. It will eventually be written, just 4202 * not now, but then the user was not asking to have it 4203 * written, so we are not breaking any promises. 4204 */ 4205 if (vp->v_flag & VRECLAIMED) 4206 break; 4207 /* 4208 * We prevent deadlock by always fetching inodes from the 4209 * root, moving down the directory tree. Thus, when fetching 4210 * our parent directory, we must unlock ourselves before 4211 * requesting the lock on our parent. See the comment in 4212 * ufs_lookup for details on possible races. 4213 */ 4214 FREE_LOCK(&lk); 4215 vn_unlock(vp); 4216 error = VFS_VGET(mnt, NULL, parentino, &pvp); 4217 vn_lock(vp, LK_EXCLUSIVE | LK_RETRY); 4218 if (error != 0) { 4219 rel_mplock(); 4220 return (error); 4221 } 4222 if (flushparent) { 4223 if ((error = ffs_update(pvp, 1)) != 0) { 4224 vput(pvp); 4225 rel_mplock(); 4226 return (error); 4227 } 4228 } 4229 /* 4230 * Flush directory page containing the inode's name. 4231 */ 4232 error = bread(pvp, lblktodoff(fs, lbn), blksize(fs, VTOI(pvp), lbn), &bp); 4233 if (error == 0) 4234 error = bwrite(bp); 4235 vput(pvp); 4236 if (error != 0) { 4237 rel_mplock(); 4238 return (error); 4239 } 4240 ACQUIRE_LOCK(&lk); 4241 if (inodedep_lookup(fs, ip->i_number, 0, &inodedep) == 0) 4242 break; 4243 } 4244 FREE_LOCK(&lk); 4245 rel_mplock(); 4246 return (0); 4247 } 4248 4249 /* 4250 * Flush all the dirty bitmaps associated with the block device 4251 * before flushing the rest of the dirty blocks so as to reduce 4252 * the number of dependencies that will have to be rolled back. 4253 */ 4254 static int softdep_fsync_mountdev_bp(struct buf *bp, void *data); 4255 4256 void 4257 softdep_fsync_mountdev(struct vnode *vp) 4258 { 4259 if (!vn_isdisk(vp, NULL)) 4260 panic("softdep_fsync_mountdev: vnode not a disk"); 4261 ACQUIRE_LOCK(&lk); 4262 lwkt_gettoken(&vp->v_token); 4263 RB_SCAN(buf_rb_tree, &vp->v_rbdirty_tree, NULL, 4264 softdep_fsync_mountdev_bp, vp); 4265 lwkt_reltoken(&vp->v_token); 4266 drain_output(vp, 1); 4267 FREE_LOCK(&lk); 4268 } 4269 4270 static int 4271 softdep_fsync_mountdev_bp(struct buf *bp, void *data) 4272 { 4273 struct worklist *wk; 4274 struct vnode *vp = data; 4275 4276 /* 4277 * If it is already scheduled, skip to the next buffer. 4278 */ 4279 if (BUF_LOCK(bp, LK_EXCLUSIVE | LK_NOWAIT)) 4280 return(0); 4281 if (bp->b_vp != vp || (bp->b_flags & B_DELWRI) == 0) { 4282 BUF_UNLOCK(bp); 4283 kprintf("softdep_fsync_mountdev_bp: warning, buffer %p ripped out from under vnode %p\n", bp, vp); 4284 return(0); 4285 } 4286 /* 4287 * We are only interested in bitmaps with outstanding 4288 * dependencies. 4289 */ 4290 if ((wk = LIST_FIRST(&bp->b_dep)) == NULL || 4291 wk->wk_type != D_BMSAFEMAP) { 4292 BUF_UNLOCK(bp); 4293 return(0); 4294 } 4295 bremfree(bp); 4296 FREE_LOCK(&lk); 4297 (void) bawrite(bp); 4298 ACQUIRE_LOCK(&lk); 4299 return(0); 4300 } 4301 4302 /* 4303 * This routine is called when we are trying to synchronously flush a 4304 * file. This routine must eliminate any filesystem metadata dependencies 4305 * so that the syncing routine can succeed by pushing the dirty blocks 4306 * associated with the file. If any I/O errors occur, they are returned. 4307 */ 4308 struct softdep_sync_metadata_info { 4309 struct vnode *vp; 4310 int waitfor; 4311 }; 4312 4313 static int softdep_sync_metadata_bp(struct buf *bp, void *data); 4314 4315 int 4316 softdep_sync_metadata(struct vnode *vp, struct thread *td) 4317 { 4318 struct softdep_sync_metadata_info info; 4319 int error, waitfor; 4320 4321 /* 4322 * Check whether this vnode is involved in a filesystem 4323 * that is doing soft dependency processing. 4324 */ 4325 if (!vn_isdisk(vp, NULL)) { 4326 if (!DOINGSOFTDEP(vp)) 4327 return (0); 4328 } else 4329 if (vp->v_rdev->si_mountpoint == NULL || 4330 (vp->v_rdev->si_mountpoint->mnt_flag & MNT_SOFTDEP) == 0) 4331 return (0); 4332 /* 4333 * Ensure that any direct block dependencies have been cleared. 4334 */ 4335 ACQUIRE_LOCK(&lk); 4336 if ((error = flush_inodedep_deps(VTOI(vp)->i_fs, VTOI(vp)->i_number))) { 4337 FREE_LOCK(&lk); 4338 return (error); 4339 } 4340 /* 4341 * For most files, the only metadata dependencies are the 4342 * cylinder group maps that allocate their inode or blocks. 4343 * The block allocation dependencies can be found by traversing 4344 * the dependency lists for any buffers that remain on their 4345 * dirty buffer list. The inode allocation dependency will 4346 * be resolved when the inode is updated with MNT_WAIT. 4347 * This work is done in two passes. The first pass grabs most 4348 * of the buffers and begins asynchronously writing them. The 4349 * only way to wait for these asynchronous writes is to sleep 4350 * on the filesystem vnode which may stay busy for a long time 4351 * if the filesystem is active. So, instead, we make a second 4352 * pass over the dependencies blocking on each write. In the 4353 * usual case we will be blocking against a write that we 4354 * initiated, so when it is done the dependency will have been 4355 * resolved. Thus the second pass is expected to end quickly. 4356 */ 4357 waitfor = MNT_NOWAIT; 4358 top: 4359 /* 4360 * We must wait for any I/O in progress to finish so that 4361 * all potential buffers on the dirty list will be visible. 4362 */ 4363 drain_output(vp, 1); 4364 4365 info.vp = vp; 4366 info.waitfor = waitfor; 4367 lwkt_gettoken(&vp->v_token); 4368 error = RB_SCAN(buf_rb_tree, &vp->v_rbdirty_tree, NULL, 4369 softdep_sync_metadata_bp, &info); 4370 lwkt_reltoken(&vp->v_token); 4371 if (error < 0) { 4372 FREE_LOCK(&lk); 4373 return(-error); /* error code */ 4374 } 4375 4376 /* 4377 * The brief unlock is to allow any pent up dependency 4378 * processing to be done. Then proceed with the second pass. 4379 */ 4380 if (waitfor & MNT_NOWAIT) { 4381 waitfor = MNT_WAIT; 4382 FREE_LOCK(&lk); 4383 ACQUIRE_LOCK(&lk); 4384 goto top; 4385 } 4386 4387 /* 4388 * If we have managed to get rid of all the dirty buffers, 4389 * then we are done. For certain directories and block 4390 * devices, we may need to do further work. 4391 * 4392 * We must wait for any I/O in progress to finish so that 4393 * all potential buffers on the dirty list will be visible. 4394 */ 4395 drain_output(vp, 1); 4396 if (RB_EMPTY(&vp->v_rbdirty_tree)) { 4397 FREE_LOCK(&lk); 4398 return (0); 4399 } 4400 4401 FREE_LOCK(&lk); 4402 /* 4403 * If we are trying to sync a block device, some of its buffers may 4404 * contain metadata that cannot be written until the contents of some 4405 * partially written files have been written to disk. The only easy 4406 * way to accomplish this is to sync the entire filesystem (luckily 4407 * this happens rarely). 4408 */ 4409 if (vn_isdisk(vp, NULL) && 4410 vp->v_rdev && 4411 vp->v_rdev->si_mountpoint && !vn_islocked(vp) && 4412 (error = VFS_SYNC(vp->v_rdev->si_mountpoint, MNT_WAIT)) != 0) 4413 return (error); 4414 return (0); 4415 } 4416 4417 static int 4418 softdep_sync_metadata_bp(struct buf *bp, void *data) 4419 { 4420 struct softdep_sync_metadata_info *info = data; 4421 struct pagedep *pagedep; 4422 struct allocdirect *adp; 4423 struct allocindir *aip; 4424 struct worklist *wk; 4425 struct buf *nbp; 4426 int error; 4427 int i; 4428 4429 if (getdirtybuf(&bp, MNT_WAIT) == 0) { 4430 kprintf("softdep_sync_metadata_bp(1): caught buf %p going away\n", bp); 4431 return (1); 4432 } 4433 if (bp->b_vp != info->vp || (bp->b_flags & B_DELWRI) == 0) { 4434 kprintf("softdep_sync_metadata_bp(2): caught buf %p going away vp %p\n", bp, info->vp); 4435 BUF_UNLOCK(bp); 4436 return(1); 4437 } 4438 4439 /* 4440 * As we hold the buffer locked, none of its dependencies 4441 * will disappear. 4442 */ 4443 LIST_FOREACH(wk, &bp->b_dep, wk_list) { 4444 switch (wk->wk_type) { 4445 4446 case D_ALLOCDIRECT: 4447 adp = WK_ALLOCDIRECT(wk); 4448 if (adp->ad_state & DEPCOMPLETE) 4449 break; 4450 nbp = adp->ad_buf; 4451 if (getdirtybuf(&nbp, info->waitfor) == 0) 4452 break; 4453 FREE_LOCK(&lk); 4454 if (info->waitfor & MNT_NOWAIT) { 4455 bawrite(nbp); 4456 } else if ((error = bwrite(nbp)) != 0) { 4457 bawrite(bp); 4458 ACQUIRE_LOCK(&lk); 4459 return (-error); 4460 } 4461 ACQUIRE_LOCK(&lk); 4462 break; 4463 4464 case D_ALLOCINDIR: 4465 aip = WK_ALLOCINDIR(wk); 4466 if (aip->ai_state & DEPCOMPLETE) 4467 break; 4468 nbp = aip->ai_buf; 4469 if (getdirtybuf(&nbp, info->waitfor) == 0) 4470 break; 4471 FREE_LOCK(&lk); 4472 if (info->waitfor & MNT_NOWAIT) { 4473 bawrite(nbp); 4474 } else if ((error = bwrite(nbp)) != 0) { 4475 bawrite(bp); 4476 ACQUIRE_LOCK(&lk); 4477 return (-error); 4478 } 4479 ACQUIRE_LOCK(&lk); 4480 break; 4481 4482 case D_INDIRDEP: 4483 restart: 4484 4485 LIST_FOREACH(aip, &WK_INDIRDEP(wk)->ir_deplisthd, ai_next) { 4486 if (aip->ai_state & DEPCOMPLETE) 4487 continue; 4488 nbp = aip->ai_buf; 4489 if (getdirtybuf(&nbp, MNT_WAIT) == 0) 4490 goto restart; 4491 FREE_LOCK(&lk); 4492 if ((error = bwrite(nbp)) != 0) { 4493 bawrite(bp); 4494 ACQUIRE_LOCK(&lk); 4495 return (-error); 4496 } 4497 ACQUIRE_LOCK(&lk); 4498 goto restart; 4499 } 4500 break; 4501 4502 case D_INODEDEP: 4503 if ((error = flush_inodedep_deps(WK_INODEDEP(wk)->id_fs, 4504 WK_INODEDEP(wk)->id_ino)) != 0) { 4505 FREE_LOCK(&lk); 4506 bawrite(bp); 4507 ACQUIRE_LOCK(&lk); 4508 return (-error); 4509 } 4510 break; 4511 4512 case D_PAGEDEP: 4513 /* 4514 * We are trying to sync a directory that may 4515 * have dependencies on both its own metadata 4516 * and/or dependencies on the inodes of any 4517 * recently allocated files. We walk its diradd 4518 * lists pushing out the associated inode. 4519 */ 4520 pagedep = WK_PAGEDEP(wk); 4521 for (i = 0; i < DAHASHSZ; i++) { 4522 if (LIST_FIRST(&pagedep->pd_diraddhd[i]) == 0) 4523 continue; 4524 if ((error = 4525 flush_pagedep_deps(info->vp, 4526 pagedep->pd_mnt, 4527 &pagedep->pd_diraddhd[i]))) { 4528 FREE_LOCK(&lk); 4529 bawrite(bp); 4530 ACQUIRE_LOCK(&lk); 4531 return (-error); 4532 } 4533 } 4534 break; 4535 4536 case D_MKDIR: 4537 /* 4538 * This case should never happen if the vnode has 4539 * been properly sync'ed. However, if this function 4540 * is used at a place where the vnode has not yet 4541 * been sync'ed, this dependency can show up. So, 4542 * rather than panic, just flush it. 4543 */ 4544 nbp = WK_MKDIR(wk)->md_buf; 4545 if (getdirtybuf(&nbp, info->waitfor) == 0) 4546 break; 4547 FREE_LOCK(&lk); 4548 if (info->waitfor & MNT_NOWAIT) { 4549 bawrite(nbp); 4550 } else if ((error = bwrite(nbp)) != 0) { 4551 bawrite(bp); 4552 ACQUIRE_LOCK(&lk); 4553 return (-error); 4554 } 4555 ACQUIRE_LOCK(&lk); 4556 break; 4557 4558 case D_BMSAFEMAP: 4559 /* 4560 * This case should never happen if the vnode has 4561 * been properly sync'ed. However, if this function 4562 * is used at a place where the vnode has not yet 4563 * been sync'ed, this dependency can show up. So, 4564 * rather than panic, just flush it. 4565 * 4566 * nbp can wind up == bp if a device node for the 4567 * same filesystem is being fsynced at the same time, 4568 * leading to a panic if we don't catch the case. 4569 */ 4570 nbp = WK_BMSAFEMAP(wk)->sm_buf; 4571 if (nbp == bp) 4572 break; 4573 if (getdirtybuf(&nbp, info->waitfor) == 0) 4574 break; 4575 FREE_LOCK(&lk); 4576 if (info->waitfor & MNT_NOWAIT) { 4577 bawrite(nbp); 4578 } else if ((error = bwrite(nbp)) != 0) { 4579 bawrite(bp); 4580 ACQUIRE_LOCK(&lk); 4581 return (-error); 4582 } 4583 ACQUIRE_LOCK(&lk); 4584 break; 4585 4586 default: 4587 FREE_LOCK(&lk); 4588 panic("softdep_sync_metadata: Unknown type %s", 4589 TYPENAME(wk->wk_type)); 4590 /* NOTREACHED */ 4591 } 4592 } 4593 FREE_LOCK(&lk); 4594 bawrite(bp); 4595 ACQUIRE_LOCK(&lk); 4596 return(0); 4597 } 4598 4599 /* 4600 * Flush the dependencies associated with an inodedep. 4601 * Called with splbio blocked. 4602 */ 4603 static int 4604 flush_inodedep_deps(struct fs *fs, ino_t ino) 4605 { 4606 struct inodedep *inodedep; 4607 struct allocdirect *adp; 4608 int error, waitfor; 4609 struct buf *bp; 4610 4611 /* 4612 * This work is done in two passes. The first pass grabs most 4613 * of the buffers and begins asynchronously writing them. The 4614 * only way to wait for these asynchronous writes is to sleep 4615 * on the filesystem vnode which may stay busy for a long time 4616 * if the filesystem is active. So, instead, we make a second 4617 * pass over the dependencies blocking on each write. In the 4618 * usual case we will be blocking against a write that we 4619 * initiated, so when it is done the dependency will have been 4620 * resolved. Thus the second pass is expected to end quickly. 4621 * We give a brief window at the top of the loop to allow 4622 * any pending I/O to complete. 4623 */ 4624 for (waitfor = MNT_NOWAIT; ; ) { 4625 FREE_LOCK(&lk); 4626 ACQUIRE_LOCK(&lk); 4627 if (inodedep_lookup(fs, ino, 0, &inodedep) == 0) 4628 return (0); 4629 TAILQ_FOREACH(adp, &inodedep->id_inoupdt, ad_next) { 4630 if (adp->ad_state & DEPCOMPLETE) 4631 continue; 4632 bp = adp->ad_buf; 4633 if (getdirtybuf(&bp, waitfor) == 0) { 4634 if (waitfor & MNT_NOWAIT) 4635 continue; 4636 break; 4637 } 4638 FREE_LOCK(&lk); 4639 if (waitfor & MNT_NOWAIT) { 4640 bawrite(bp); 4641 } else if ((error = bwrite(bp)) != 0) { 4642 ACQUIRE_LOCK(&lk); 4643 return (error); 4644 } 4645 ACQUIRE_LOCK(&lk); 4646 break; 4647 } 4648 if (adp != NULL) 4649 continue; 4650 TAILQ_FOREACH(adp, &inodedep->id_newinoupdt, ad_next) { 4651 if (adp->ad_state & DEPCOMPLETE) 4652 continue; 4653 bp = adp->ad_buf; 4654 if (getdirtybuf(&bp, waitfor) == 0) { 4655 if (waitfor & MNT_NOWAIT) 4656 continue; 4657 break; 4658 } 4659 FREE_LOCK(&lk); 4660 if (waitfor & MNT_NOWAIT) { 4661 bawrite(bp); 4662 } else if ((error = bwrite(bp)) != 0) { 4663 ACQUIRE_LOCK(&lk); 4664 return (error); 4665 } 4666 ACQUIRE_LOCK(&lk); 4667 break; 4668 } 4669 if (adp != NULL) 4670 continue; 4671 /* 4672 * If pass2, we are done, otherwise do pass 2. 4673 */ 4674 if (waitfor == MNT_WAIT) 4675 break; 4676 waitfor = MNT_WAIT; 4677 } 4678 /* 4679 * Try freeing inodedep in case all dependencies have been removed. 4680 */ 4681 if (inodedep_lookup(fs, ino, 0, &inodedep) != 0) 4682 (void) free_inodedep(inodedep); 4683 return (0); 4684 } 4685 4686 /* 4687 * Eliminate a pagedep dependency by flushing out all its diradd dependencies. 4688 * Called with splbio blocked. 4689 */ 4690 static int 4691 flush_pagedep_deps(struct vnode *pvp, struct mount *mp, 4692 struct diraddhd *diraddhdp) 4693 { 4694 struct inodedep *inodedep; 4695 struct ufsmount *ump; 4696 struct diradd *dap; 4697 struct vnode *vp; 4698 int gotit, error = 0; 4699 struct buf *bp; 4700 ino_t inum; 4701 4702 ump = VFSTOUFS(mp); 4703 while ((dap = LIST_FIRST(diraddhdp)) != NULL) { 4704 /* 4705 * Flush ourselves if this directory entry 4706 * has a MKDIR_PARENT dependency. 4707 */ 4708 if (dap->da_state & MKDIR_PARENT) { 4709 FREE_LOCK(&lk); 4710 if ((error = ffs_update(pvp, 1)) != 0) 4711 break; 4712 ACQUIRE_LOCK(&lk); 4713 /* 4714 * If that cleared dependencies, go on to next. 4715 */ 4716 if (dap != LIST_FIRST(diraddhdp)) 4717 continue; 4718 if (dap->da_state & MKDIR_PARENT) { 4719 FREE_LOCK(&lk); 4720 panic("flush_pagedep_deps: MKDIR_PARENT"); 4721 } 4722 } 4723 /* 4724 * A newly allocated directory must have its "." and 4725 * ".." entries written out before its name can be 4726 * committed in its parent. We do not want or need 4727 * the full semantics of a synchronous VOP_FSYNC as 4728 * that may end up here again, once for each directory 4729 * level in the filesystem. Instead, we push the blocks 4730 * and wait for them to clear. We have to fsync twice 4731 * because the first call may choose to defer blocks 4732 * that still have dependencies, but deferral will 4733 * happen at most once. 4734 */ 4735 inum = dap->da_newinum; 4736 if (dap->da_state & MKDIR_BODY) { 4737 FREE_LOCK(&lk); 4738 if ((error = VFS_VGET(mp, NULL, inum, &vp)) != 0) 4739 break; 4740 if ((error=VOP_FSYNC(vp, MNT_NOWAIT, 0)) || 4741 (error=VOP_FSYNC(vp, MNT_NOWAIT, 0))) { 4742 vput(vp); 4743 break; 4744 } 4745 drain_output(vp, 0); 4746 vput(vp); 4747 ACQUIRE_LOCK(&lk); 4748 /* 4749 * If that cleared dependencies, go on to next. 4750 */ 4751 if (dap != LIST_FIRST(diraddhdp)) 4752 continue; 4753 if (dap->da_state & MKDIR_BODY) { 4754 FREE_LOCK(&lk); 4755 panic("flush_pagedep_deps: MKDIR_BODY"); 4756 } 4757 } 4758 /* 4759 * Flush the inode on which the directory entry depends. 4760 * Having accounted for MKDIR_PARENT and MKDIR_BODY above, 4761 * the only remaining dependency is that the updated inode 4762 * count must get pushed to disk. The inode has already 4763 * been pushed into its inode buffer (via VOP_UPDATE) at 4764 * the time of the reference count change. So we need only 4765 * locate that buffer, ensure that there will be no rollback 4766 * caused by a bitmap dependency, then write the inode buffer. 4767 */ 4768 if (inodedep_lookup(ump->um_fs, inum, 0, &inodedep) == 0) { 4769 FREE_LOCK(&lk); 4770 panic("flush_pagedep_deps: lost inode"); 4771 } 4772 /* 4773 * If the inode still has bitmap dependencies, 4774 * push them to disk. 4775 */ 4776 if ((inodedep->id_state & DEPCOMPLETE) == 0) { 4777 gotit = getdirtybuf(&inodedep->id_buf, MNT_WAIT); 4778 FREE_LOCK(&lk); 4779 if (gotit && (error = bwrite(inodedep->id_buf)) != 0) 4780 break; 4781 ACQUIRE_LOCK(&lk); 4782 if (dap != LIST_FIRST(diraddhdp)) 4783 continue; 4784 } 4785 /* 4786 * If the inode is still sitting in a buffer waiting 4787 * to be written, push it to disk. 4788 */ 4789 FREE_LOCK(&lk); 4790 if ((error = bread(ump->um_devvp, 4791 fsbtodoff(ump->um_fs, ino_to_fsba(ump->um_fs, inum)), 4792 (int)ump->um_fs->fs_bsize, &bp)) != 0) 4793 break; 4794 if ((error = bwrite(bp)) != 0) 4795 break; 4796 ACQUIRE_LOCK(&lk); 4797 /* 4798 * If we have failed to get rid of all the dependencies 4799 * then something is seriously wrong. 4800 */ 4801 if (dap == LIST_FIRST(diraddhdp)) { 4802 FREE_LOCK(&lk); 4803 panic("flush_pagedep_deps: flush failed"); 4804 } 4805 } 4806 if (error) 4807 ACQUIRE_LOCK(&lk); 4808 return (error); 4809 } 4810 4811 /* 4812 * A large burst of file addition or deletion activity can drive the 4813 * memory load excessively high. First attempt to slow things down 4814 * using the techniques below. If that fails, this routine requests 4815 * the offending operations to fall back to running synchronously 4816 * until the memory load returns to a reasonable level. 4817 */ 4818 int 4819 softdep_slowdown(struct vnode *vp) 4820 { 4821 int max_softdeps_hard; 4822 4823 max_softdeps_hard = max_softdeps * 11 / 10; 4824 if (num_dirrem < max_softdeps_hard / 2 && 4825 num_inodedep < max_softdeps_hard) 4826 return (0); 4827 stat_sync_limit_hit += 1; 4828 return (1); 4829 } 4830 4831 /* 4832 * If memory utilization has gotten too high, deliberately slow things 4833 * down and speed up the I/O processing. 4834 */ 4835 static int 4836 request_cleanup(int resource, int islocked) 4837 { 4838 struct thread *td = curthread; /* XXX */ 4839 4840 /* 4841 * We never hold up the filesystem syncer process. 4842 */ 4843 if (td == filesys_syncer) 4844 return (0); 4845 /* 4846 * First check to see if the work list has gotten backlogged. 4847 * If it has, co-opt this process to help clean up two entries. 4848 * Because this process may hold inodes locked, we cannot 4849 * handle any remove requests that might block on a locked 4850 * inode as that could lead to deadlock. 4851 */ 4852 if (num_on_worklist > max_softdeps / 10) { 4853 if (islocked) 4854 FREE_LOCK(&lk); 4855 process_worklist_item(NULL, LK_NOWAIT); 4856 process_worklist_item(NULL, LK_NOWAIT); 4857 stat_worklist_push += 2; 4858 if (islocked) 4859 ACQUIRE_LOCK(&lk); 4860 return(1); 4861 } 4862 4863 /* 4864 * If we are resource constrained on inode dependencies, try 4865 * flushing some dirty inodes. Otherwise, we are constrained 4866 * by file deletions, so try accelerating flushes of directories 4867 * with removal dependencies. We would like to do the cleanup 4868 * here, but we probably hold an inode locked at this point and 4869 * that might deadlock against one that we try to clean. So, 4870 * the best that we can do is request the syncer daemon to do 4871 * the cleanup for us. 4872 */ 4873 switch (resource) { 4874 4875 case FLUSH_INODES: 4876 stat_ino_limit_push += 1; 4877 req_clear_inodedeps += 1; 4878 stat_countp = &stat_ino_limit_hit; 4879 break; 4880 4881 case FLUSH_REMOVE: 4882 stat_blk_limit_push += 1; 4883 req_clear_remove += 1; 4884 stat_countp = &stat_blk_limit_hit; 4885 break; 4886 4887 default: 4888 if (islocked) 4889 FREE_LOCK(&lk); 4890 panic("request_cleanup: unknown type"); 4891 } 4892 /* 4893 * Hopefully the syncer daemon will catch up and awaken us. 4894 * We wait at most tickdelay before proceeding in any case. 4895 */ 4896 if (islocked == 0) 4897 ACQUIRE_LOCK(&lk); 4898 proc_waiting += 1; 4899 if (!callout_active(&handle)) 4900 callout_reset(&handle, tickdelay > 2 ? tickdelay : 2, 4901 pause_timer, NULL); 4902 interlocked_sleep(&lk, SLEEP, (caddr_t)&proc_waiting, 0, 4903 "softupdate", 0); 4904 proc_waiting -= 1; 4905 if (islocked == 0) 4906 FREE_LOCK(&lk); 4907 return (1); 4908 } 4909 4910 /* 4911 * Awaken processes pausing in request_cleanup and clear proc_waiting 4912 * to indicate that there is no longer a timer running. 4913 */ 4914 void 4915 pause_timer(void *arg) 4916 { 4917 *stat_countp += 1; 4918 wakeup_one(&proc_waiting); 4919 if (proc_waiting > 0) 4920 callout_reset(&handle, tickdelay > 2 ? tickdelay : 2, 4921 pause_timer, NULL); 4922 else 4923 callout_deactivate(&handle); 4924 } 4925 4926 /* 4927 * Flush out a directory with at least one removal dependency in an effort to 4928 * reduce the number of dirrem, freefile, and freeblks dependency structures. 4929 */ 4930 static void 4931 clear_remove(struct thread *td) 4932 { 4933 struct pagedep_hashhead *pagedephd; 4934 struct pagedep *pagedep; 4935 static int next = 0; 4936 struct mount *mp; 4937 struct vnode *vp; 4938 int error, cnt; 4939 ino_t ino; 4940 4941 ACQUIRE_LOCK(&lk); 4942 for (cnt = 0; cnt < pagedep_hash; cnt++) { 4943 pagedephd = &pagedep_hashtbl[next++]; 4944 if (next >= pagedep_hash) 4945 next = 0; 4946 LIST_FOREACH(pagedep, pagedephd, pd_hash) { 4947 if (LIST_FIRST(&pagedep->pd_dirremhd) == NULL) 4948 continue; 4949 mp = pagedep->pd_mnt; 4950 ino = pagedep->pd_ino; 4951 FREE_LOCK(&lk); 4952 if ((error = VFS_VGET(mp, NULL, ino, &vp)) != 0) { 4953 softdep_error("clear_remove: vget", error); 4954 return; 4955 } 4956 if ((error = VOP_FSYNC(vp, MNT_NOWAIT, 0))) 4957 softdep_error("clear_remove: fsync", error); 4958 drain_output(vp, 0); 4959 vput(vp); 4960 return; 4961 } 4962 } 4963 FREE_LOCK(&lk); 4964 } 4965 4966 /* 4967 * Clear out a block of dirty inodes in an effort to reduce 4968 * the number of inodedep dependency structures. 4969 */ 4970 struct clear_inodedeps_info { 4971 struct fs *fs; 4972 struct mount *mp; 4973 }; 4974 4975 static int 4976 clear_inodedeps_mountlist_callback(struct mount *mp, void *data) 4977 { 4978 struct clear_inodedeps_info *info = data; 4979 4980 if ((mp->mnt_flag & MNT_SOFTDEP) && info->fs == VFSTOUFS(mp)->um_fs) { 4981 info->mp = mp; 4982 return(-1); 4983 } 4984 return(0); 4985 } 4986 4987 static void 4988 clear_inodedeps(struct thread *td) 4989 { 4990 struct clear_inodedeps_info info; 4991 struct inodedep_hashhead *inodedephd; 4992 struct inodedep *inodedep; 4993 static int next = 0; 4994 struct vnode *vp; 4995 struct fs *fs; 4996 int error, cnt; 4997 ino_t firstino, lastino, ino; 4998 4999 ACQUIRE_LOCK(&lk); 5000 /* 5001 * Pick a random inode dependency to be cleared. 5002 * We will then gather up all the inodes in its block 5003 * that have dependencies and flush them out. 5004 */ 5005 for (cnt = 0; cnt < inodedep_hash; cnt++) { 5006 inodedephd = &inodedep_hashtbl[next++]; 5007 if (next >= inodedep_hash) 5008 next = 0; 5009 if ((inodedep = LIST_FIRST(inodedephd)) != NULL) 5010 break; 5011 } 5012 if (inodedep == NULL) { 5013 FREE_LOCK(&lk); 5014 return; 5015 } 5016 /* 5017 * Ugly code to find mount point given pointer to superblock. 5018 */ 5019 fs = inodedep->id_fs; 5020 info.mp = NULL; 5021 info.fs = fs; 5022 mountlist_scan(clear_inodedeps_mountlist_callback, 5023 &info, MNTSCAN_FORWARD|MNTSCAN_NOBUSY); 5024 /* 5025 * Find the last inode in the block with dependencies. 5026 */ 5027 firstino = inodedep->id_ino & ~(INOPB(fs) - 1); 5028 for (lastino = firstino + INOPB(fs) - 1; lastino > firstino; lastino--) 5029 if (inodedep_lookup(fs, lastino, 0, &inodedep) != 0) 5030 break; 5031 /* 5032 * Asynchronously push all but the last inode with dependencies. 5033 * Synchronously push the last inode with dependencies to ensure 5034 * that the inode block gets written to free up the inodedeps. 5035 */ 5036 for (ino = firstino; ino <= lastino; ino++) { 5037 if (inodedep_lookup(fs, ino, 0, &inodedep) == 0) 5038 continue; 5039 FREE_LOCK(&lk); 5040 if ((error = VFS_VGET(info.mp, NULL, ino, &vp)) != 0) { 5041 softdep_error("clear_inodedeps: vget", error); 5042 return; 5043 } 5044 if (ino == lastino) { 5045 if ((error = VOP_FSYNC(vp, MNT_WAIT, 0))) 5046 softdep_error("clear_inodedeps: fsync1", error); 5047 } else { 5048 if ((error = VOP_FSYNC(vp, MNT_NOWAIT, 0))) 5049 softdep_error("clear_inodedeps: fsync2", error); 5050 drain_output(vp, 0); 5051 } 5052 vput(vp); 5053 ACQUIRE_LOCK(&lk); 5054 } 5055 FREE_LOCK(&lk); 5056 } 5057 5058 /* 5059 * Function to determine if the buffer has outstanding dependencies 5060 * that will cause a roll-back if the buffer is written. If wantcount 5061 * is set, return number of dependencies, otherwise just yes or no. 5062 * 5063 * bioops callback - hold io_token 5064 */ 5065 static int 5066 softdep_count_dependencies(struct buf *bp, int wantcount) 5067 { 5068 struct worklist *wk; 5069 struct inodedep *inodedep; 5070 struct indirdep *indirdep; 5071 struct allocindir *aip; 5072 struct pagedep *pagedep; 5073 struct diradd *dap; 5074 int i, retval; 5075 5076 get_mplock(); 5077 5078 retval = 0; 5079 ACQUIRE_LOCK(&lk); 5080 5081 LIST_FOREACH(wk, &bp->b_dep, wk_list) { 5082 switch (wk->wk_type) { 5083 5084 case D_INODEDEP: 5085 inodedep = WK_INODEDEP(wk); 5086 if ((inodedep->id_state & DEPCOMPLETE) == 0) { 5087 /* bitmap allocation dependency */ 5088 retval += 1; 5089 if (!wantcount) 5090 goto out; 5091 } 5092 if (TAILQ_FIRST(&inodedep->id_inoupdt)) { 5093 /* direct block pointer dependency */ 5094 retval += 1; 5095 if (!wantcount) 5096 goto out; 5097 } 5098 continue; 5099 5100 case D_INDIRDEP: 5101 indirdep = WK_INDIRDEP(wk); 5102 5103 LIST_FOREACH(aip, &indirdep->ir_deplisthd, ai_next) { 5104 /* indirect block pointer dependency */ 5105 retval += 1; 5106 if (!wantcount) 5107 goto out; 5108 } 5109 continue; 5110 5111 case D_PAGEDEP: 5112 pagedep = WK_PAGEDEP(wk); 5113 for (i = 0; i < DAHASHSZ; i++) { 5114 5115 LIST_FOREACH(dap, &pagedep->pd_diraddhd[i], da_pdlist) { 5116 /* directory entry dependency */ 5117 retval += 1; 5118 if (!wantcount) 5119 goto out; 5120 } 5121 } 5122 continue; 5123 5124 case D_BMSAFEMAP: 5125 case D_ALLOCDIRECT: 5126 case D_ALLOCINDIR: 5127 case D_MKDIR: 5128 /* never a dependency on these blocks */ 5129 continue; 5130 5131 default: 5132 FREE_LOCK(&lk); 5133 panic("softdep_check_for_rollback: Unexpected type %s", 5134 TYPENAME(wk->wk_type)); 5135 /* NOTREACHED */ 5136 } 5137 } 5138 out: 5139 FREE_LOCK(&lk); 5140 rel_mplock(); 5141 5142 return retval; 5143 } 5144 5145 /* 5146 * Acquire exclusive access to a buffer. 5147 * Must be called with splbio blocked. 5148 * Return 1 if buffer was acquired. 5149 */ 5150 static int 5151 getdirtybuf(struct buf **bpp, int waitfor) 5152 { 5153 struct buf *bp; 5154 int error; 5155 5156 for (;;) { 5157 if ((bp = *bpp) == NULL) 5158 return (0); 5159 if (BUF_LOCK(bp, LK_EXCLUSIVE | LK_NOWAIT) == 0) 5160 break; 5161 if (waitfor != MNT_WAIT) 5162 return (0); 5163 error = interlocked_sleep(&lk, LOCKBUF, bp, 5164 LK_EXCLUSIVE | LK_SLEEPFAIL, 0, 0); 5165 if (error != ENOLCK) { 5166 FREE_LOCK(&lk); 5167 panic("getdirtybuf: inconsistent lock"); 5168 } 5169 } 5170 if ((bp->b_flags & B_DELWRI) == 0) { 5171 BUF_UNLOCK(bp); 5172 return (0); 5173 } 5174 bremfree(bp); 5175 return (1); 5176 } 5177 5178 /* 5179 * Wait for pending output on a vnode to complete. 5180 * Must be called with vnode locked. 5181 */ 5182 static void 5183 drain_output(struct vnode *vp, int islocked) 5184 { 5185 5186 if (!islocked) 5187 ACQUIRE_LOCK(&lk); 5188 while (bio_track_active(&vp->v_track_write)) { 5189 FREE_LOCK(&lk); 5190 bio_track_wait(&vp->v_track_write, 0, 0); 5191 ACQUIRE_LOCK(&lk); 5192 } 5193 if (!islocked) 5194 FREE_LOCK(&lk); 5195 } 5196 5197 /* 5198 * Called whenever a buffer that is being invalidated or reallocated 5199 * contains dependencies. This should only happen if an I/O error has 5200 * occurred. The routine is called with the buffer locked. 5201 * 5202 * bioops callback - hold io_token 5203 */ 5204 static void 5205 softdep_deallocate_dependencies(struct buf *bp) 5206 { 5207 /* nothing to do, mp lock not needed */ 5208 if ((bp->b_flags & B_ERROR) == 0) 5209 panic("softdep_deallocate_dependencies: dangling deps"); 5210 softdep_error(bp->b_vp->v_mount->mnt_stat.f_mntfromname, bp->b_error); 5211 panic("softdep_deallocate_dependencies: unrecovered I/O error"); 5212 } 5213 5214 /* 5215 * Function to handle asynchronous write errors in the filesystem. 5216 */ 5217 void 5218 softdep_error(char *func, int error) 5219 { 5220 5221 /* XXX should do something better! */ 5222 kprintf("%s: got error %d while accessing filesystem\n", func, error); 5223 } 5224