1 /* 2 * Copyright 1998, 2000 Marshall Kirk McKusick. All Rights Reserved. 3 * 4 * The soft updates code is derived from the appendix of a University 5 * of Michigan technical report (Gregory R. Ganger and Yale N. Patt, 6 * "Soft Updates: A Solution to the Metadata Update Problem in File 7 * Systems", CSE-TR-254-95, August 1995). 8 * 9 * Further information about soft updates can be obtained from: 10 * 11 * Marshall Kirk McKusick http://www.mckusick.com/softdep/ 12 * 1614 Oxford Street mckusick@mckusick.com 13 * Berkeley, CA 94709-1608 +1-510-843-9542 14 * USA 15 * 16 * Redistribution and use in source and binary forms, with or without 17 * modification, are permitted provided that the following conditions 18 * are met: 19 * 20 * 1. Redistributions of source code must retain the above copyright 21 * notice, this list of conditions and the following disclaimer. 22 * 2. Redistributions in binary form must reproduce the above copyright 23 * notice, this list of conditions and the following disclaimer in the 24 * documentation and/or other materials provided with the distribution. 25 * 26 * THIS SOFTWARE IS PROVIDED BY MARSHALL KIRK MCKUSICK ``AS IS'' AND ANY 27 * EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED 28 * WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE 29 * DISCLAIMED. IN NO EVENT SHALL MARSHALL KIRK MCKUSICK BE LIABLE FOR 30 * ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL 31 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS 32 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) 33 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT 34 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY 35 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF 36 * SUCH DAMAGE. 37 * 38 * from: @(#)ffs_softdep.c 9.59 (McKusick) 6/21/00 39 * $FreeBSD: src/sys/ufs/ffs/ffs_softdep.c,v 1.57.2.11 2002/02/05 18:46:53 dillon Exp $ 40 * $DragonFly: src/sys/vfs/ufs/ffs_softdep.c,v 1.57 2008/06/28 17:59:51 dillon Exp $ 41 */ 42 43 /* 44 * For now we want the safety net that the DIAGNOSTIC and DEBUG flags provide. 45 */ 46 #ifndef DIAGNOSTIC 47 #define DIAGNOSTIC 48 #endif 49 #ifndef DEBUG 50 #define DEBUG 51 #endif 52 53 #include <sys/param.h> 54 #include <sys/kernel.h> 55 #include <sys/systm.h> 56 #include <sys/buf.h> 57 #include <sys/malloc.h> 58 #include <sys/mount.h> 59 #include <sys/proc.h> 60 #include <sys/syslog.h> 61 #include <sys/vnode.h> 62 #include <sys/conf.h> 63 #include <machine/inttypes.h> 64 #include "dir.h" 65 #include "quota.h" 66 #include "inode.h" 67 #include "ufsmount.h" 68 #include "fs.h" 69 #include "softdep.h" 70 #include "ffs_extern.h" 71 #include "ufs_extern.h" 72 73 #include <sys/buf2.h> 74 #include <sys/mplock2.h> 75 #include <sys/thread2.h> 76 77 /* 78 * These definitions need to be adapted to the system to which 79 * this file is being ported. 80 */ 81 /* 82 * malloc types defined for the softdep system. 83 */ 84 MALLOC_DEFINE(M_PAGEDEP, "pagedep","File page dependencies"); 85 MALLOC_DEFINE(M_INODEDEP, "inodedep","Inode dependencies"); 86 MALLOC_DEFINE(M_NEWBLK, "newblk","New block allocation"); 87 MALLOC_DEFINE(M_BMSAFEMAP, "bmsafemap","Block or frag allocated from cyl group map"); 88 MALLOC_DEFINE(M_ALLOCDIRECT, "allocdirect","Block or frag dependency for an inode"); 89 MALLOC_DEFINE(M_INDIRDEP, "indirdep","Indirect block dependencies"); 90 MALLOC_DEFINE(M_ALLOCINDIR, "allocindir","Block dependency for an indirect block"); 91 MALLOC_DEFINE(M_FREEFRAG, "freefrag","Previously used frag for an inode"); 92 MALLOC_DEFINE(M_FREEBLKS, "freeblks","Blocks freed from an inode"); 93 MALLOC_DEFINE(M_FREEFILE, "freefile","Inode deallocated"); 94 MALLOC_DEFINE(M_DIRADD, "diradd","New directory entry"); 95 MALLOC_DEFINE(M_MKDIR, "mkdir","New directory"); 96 MALLOC_DEFINE(M_DIRREM, "dirrem","Directory entry deleted"); 97 98 #define M_SOFTDEP_FLAGS (M_WAITOK | M_USE_RESERVE) 99 100 #define D_PAGEDEP 0 101 #define D_INODEDEP 1 102 #define D_NEWBLK 2 103 #define D_BMSAFEMAP 3 104 #define D_ALLOCDIRECT 4 105 #define D_INDIRDEP 5 106 #define D_ALLOCINDIR 6 107 #define D_FREEFRAG 7 108 #define D_FREEBLKS 8 109 #define D_FREEFILE 9 110 #define D_DIRADD 10 111 #define D_MKDIR 11 112 #define D_DIRREM 12 113 #define D_LAST D_DIRREM 114 115 /* 116 * translate from workitem type to memory type 117 * MUST match the defines above, such that memtype[D_XXX] == M_XXX 118 */ 119 static struct malloc_type *memtype[] = { 120 M_PAGEDEP, 121 M_INODEDEP, 122 M_NEWBLK, 123 M_BMSAFEMAP, 124 M_ALLOCDIRECT, 125 M_INDIRDEP, 126 M_ALLOCINDIR, 127 M_FREEFRAG, 128 M_FREEBLKS, 129 M_FREEFILE, 130 M_DIRADD, 131 M_MKDIR, 132 M_DIRREM 133 }; 134 135 #define DtoM(type) (memtype[type]) 136 137 /* 138 * Names of malloc types. 139 */ 140 #define TYPENAME(type) \ 141 ((unsigned)(type) < D_LAST ? memtype[type]->ks_shortdesc : "???") 142 /* 143 * End system adaptaion definitions. 144 */ 145 146 /* 147 * Internal function prototypes. 148 */ 149 static void softdep_error(char *, int); 150 static void drain_output(struct vnode *, int); 151 static int getdirtybuf(struct buf **, int); 152 static void clear_remove(struct thread *); 153 static void clear_inodedeps(struct thread *); 154 static int flush_pagedep_deps(struct vnode *, struct mount *, 155 struct diraddhd *); 156 static int flush_inodedep_deps(struct fs *, ino_t); 157 static int handle_written_filepage(struct pagedep *, struct buf *); 158 static void diradd_inode_written(struct diradd *, struct inodedep *); 159 static int handle_written_inodeblock(struct inodedep *, struct buf *); 160 static void handle_allocdirect_partdone(struct allocdirect *); 161 static void handle_allocindir_partdone(struct allocindir *); 162 static void initiate_write_filepage(struct pagedep *, struct buf *); 163 static void handle_written_mkdir(struct mkdir *, int); 164 static void initiate_write_inodeblock(struct inodedep *, struct buf *); 165 static void handle_workitem_freefile(struct freefile *); 166 static void handle_workitem_remove(struct dirrem *); 167 static struct dirrem *newdirrem(struct buf *, struct inode *, 168 struct inode *, int, struct dirrem **); 169 static void free_diradd(struct diradd *); 170 static void free_allocindir(struct allocindir *, struct inodedep *); 171 static int indir_trunc (struct inode *, off_t, int, ufs_lbn_t, long *); 172 static void deallocate_dependencies(struct buf *, struct inodedep *); 173 static void free_allocdirect(struct allocdirectlst *, 174 struct allocdirect *, int); 175 static int check_inode_unwritten(struct inodedep *); 176 static int free_inodedep(struct inodedep *); 177 static void handle_workitem_freeblocks(struct freeblks *); 178 static void merge_inode_lists(struct inodedep *); 179 static void setup_allocindir_phase2(struct buf *, struct inode *, 180 struct allocindir *); 181 static struct allocindir *newallocindir(struct inode *, int, ufs_daddr_t, 182 ufs_daddr_t); 183 static void handle_workitem_freefrag(struct freefrag *); 184 static struct freefrag *newfreefrag(struct inode *, ufs_daddr_t, long); 185 static void allocdirect_merge(struct allocdirectlst *, 186 struct allocdirect *, struct allocdirect *); 187 static struct bmsafemap *bmsafemap_lookup(struct buf *); 188 static int newblk_lookup(struct fs *, ufs_daddr_t, int, 189 struct newblk **); 190 static int inodedep_lookup(struct fs *, ino_t, int, struct inodedep **); 191 static int pagedep_lookup(struct inode *, ufs_lbn_t, int, 192 struct pagedep **); 193 static void pause_timer(void *); 194 static int request_cleanup(int, int); 195 static int process_worklist_item(struct mount *, int); 196 static void add_to_worklist(struct worklist *); 197 198 /* 199 * Exported softdep operations. 200 */ 201 static void softdep_disk_io_initiation(struct buf *); 202 static void softdep_disk_write_complete(struct buf *); 203 static void softdep_deallocate_dependencies(struct buf *); 204 static int softdep_fsync(struct vnode *); 205 static int softdep_process_worklist(struct mount *); 206 static void softdep_move_dependencies(struct buf *, struct buf *); 207 static int softdep_count_dependencies(struct buf *bp, int); 208 static int softdep_checkread(struct buf *bp); 209 static int softdep_checkwrite(struct buf *bp); 210 211 static struct bio_ops softdep_bioops = { 212 .io_start = softdep_disk_io_initiation, 213 .io_complete = softdep_disk_write_complete, 214 .io_deallocate = softdep_deallocate_dependencies, 215 .io_fsync = softdep_fsync, 216 .io_sync = softdep_process_worklist, 217 .io_movedeps = softdep_move_dependencies, 218 .io_countdeps = softdep_count_dependencies, 219 .io_checkread = softdep_checkread, 220 .io_checkwrite = softdep_checkwrite 221 }; 222 223 /* 224 * Locking primitives. 225 * 226 * For a uniprocessor, all we need to do is protect against disk 227 * interrupts. For a multiprocessor, this lock would have to be 228 * a mutex. A single mutex is used throughout this file, though 229 * finer grain locking could be used if contention warranted it. 230 * 231 * For a multiprocessor, the sleep call would accept a lock and 232 * release it after the sleep processing was complete. In a uniprocessor 233 * implementation there is no such interlock, so we simple mark 234 * the places where it needs to be done with the `interlocked' form 235 * of the lock calls. Since the uniprocessor sleep already interlocks 236 * the spl, there is nothing that really needs to be done. 237 */ 238 #ifndef /* NOT */ DEBUG 239 static struct lockit { 240 } lk = { 0 }; 241 #define ACQUIRE_LOCK(lk) crit_enter_id("softupdates"); 242 #define FREE_LOCK(lk) crit_exit_id("softupdates"); 243 244 #else /* DEBUG */ 245 #define NOHOLDER ((struct thread *)-1) 246 #define SPECIAL_FLAG ((struct thread *)-2) 247 static struct lockit { 248 int lkt_spl; 249 struct thread *lkt_held; 250 } lk = { 0, NOHOLDER }; 251 static int lockcnt; 252 253 static void acquire_lock(struct lockit *); 254 static void free_lock(struct lockit *); 255 void softdep_panic(char *); 256 257 #define ACQUIRE_LOCK(lk) acquire_lock(lk) 258 #define FREE_LOCK(lk) free_lock(lk) 259 260 static void 261 acquire_lock(struct lockit *lk) 262 { 263 thread_t holder; 264 265 if (lk->lkt_held != NOHOLDER) { 266 holder = lk->lkt_held; 267 FREE_LOCK(lk); 268 if (holder == curthread) 269 panic("softdep_lock: locking against myself"); 270 else 271 panic("softdep_lock: lock held by %p", holder); 272 } 273 crit_enter_id("softupdates"); 274 lk->lkt_held = curthread; 275 lockcnt++; 276 } 277 278 static void 279 free_lock(struct lockit *lk) 280 { 281 282 if (lk->lkt_held == NOHOLDER) 283 panic("softdep_unlock: lock not held"); 284 lk->lkt_held = NOHOLDER; 285 crit_exit_id("softupdates"); 286 } 287 288 /* 289 * Function to release soft updates lock and panic. 290 */ 291 void 292 softdep_panic(char *msg) 293 { 294 295 if (lk.lkt_held != NOHOLDER) 296 FREE_LOCK(&lk); 297 panic(msg); 298 } 299 #endif /* DEBUG */ 300 301 static int interlocked_sleep(struct lockit *, int, void *, int, 302 const char *, int); 303 304 /* 305 * When going to sleep, we must save our SPL so that it does 306 * not get lost if some other process uses the lock while we 307 * are sleeping. We restore it after we have slept. This routine 308 * wraps the interlocking with functions that sleep. The list 309 * below enumerates the available set of operations. 310 */ 311 #define UNKNOWN 0 312 #define SLEEP 1 313 #define LOCKBUF 2 314 315 static int 316 interlocked_sleep(struct lockit *lk, int op, void *ident, int flags, 317 const char *wmesg, int timo) 318 { 319 thread_t holder; 320 int s, retval; 321 322 s = lk->lkt_spl; 323 # ifdef DEBUG 324 if (lk->lkt_held == NOHOLDER) 325 panic("interlocked_sleep: lock not held"); 326 lk->lkt_held = NOHOLDER; 327 # endif /* DEBUG */ 328 switch (op) { 329 case SLEEP: 330 retval = tsleep(ident, flags, wmesg, timo); 331 break; 332 case LOCKBUF: 333 retval = BUF_LOCK((struct buf *)ident, flags); 334 break; 335 default: 336 panic("interlocked_sleep: unknown operation"); 337 } 338 # ifdef DEBUG 339 if (lk->lkt_held != NOHOLDER) { 340 holder = lk->lkt_held; 341 FREE_LOCK(lk); 342 if (holder == curthread) 343 panic("interlocked_sleep: locking against self"); 344 else 345 panic("interlocked_sleep: lock held by %p", holder); 346 } 347 lk->lkt_held = curthread; 348 lockcnt++; 349 # endif /* DEBUG */ 350 lk->lkt_spl = s; 351 return (retval); 352 } 353 354 /* 355 * Place holder for real semaphores. 356 */ 357 struct sema { 358 int value; 359 thread_t holder; 360 char *name; 361 int prio; 362 int timo; 363 }; 364 static void sema_init(struct sema *, char *, int, int); 365 static int sema_get(struct sema *, struct lockit *); 366 static void sema_release(struct sema *); 367 368 static void 369 sema_init(struct sema *semap, char *name, int prio, int timo) 370 { 371 372 semap->holder = NOHOLDER; 373 semap->value = 0; 374 semap->name = name; 375 semap->prio = prio; 376 semap->timo = timo; 377 } 378 379 static int 380 sema_get(struct sema *semap, struct lockit *interlock) 381 { 382 383 if (semap->value++ > 0) { 384 if (interlock != NULL) { 385 interlocked_sleep(interlock, SLEEP, (caddr_t)semap, 386 semap->prio, semap->name, semap->timo); 387 FREE_LOCK(interlock); 388 } else { 389 tsleep((caddr_t)semap, semap->prio, semap->name, 390 semap->timo); 391 } 392 return (0); 393 } 394 semap->holder = curthread; 395 if (interlock != NULL) 396 FREE_LOCK(interlock); 397 return (1); 398 } 399 400 static void 401 sema_release(struct sema *semap) 402 { 403 404 if (semap->value <= 0 || semap->holder != curthread) { 405 if (lk.lkt_held != NOHOLDER) 406 FREE_LOCK(&lk); 407 panic("sema_release: not held"); 408 } 409 if (--semap->value > 0) { 410 semap->value = 0; 411 wakeup(semap); 412 } 413 semap->holder = NOHOLDER; 414 } 415 416 /* 417 * Worklist queue management. 418 * These routines require that the lock be held. 419 */ 420 #ifndef /* NOT */ DEBUG 421 #define WORKLIST_INSERT(head, item) do { \ 422 (item)->wk_state |= ONWORKLIST; \ 423 LIST_INSERT_HEAD(head, item, wk_list); \ 424 } while (0) 425 426 #define WORKLIST_INSERT_BP(bp, item) do { \ 427 (item)->wk_state |= ONWORKLIST; \ 428 (bp)->b_ops = &softdep_bioops; \ 429 LIST_INSERT_HEAD(&(bp)->b_dep, item, wk_list); \ 430 } while (0) 431 432 #define WORKLIST_REMOVE(item) do { \ 433 (item)->wk_state &= ~ONWORKLIST; \ 434 LIST_REMOVE(item, wk_list); \ 435 } while (0) 436 437 #define WORKITEM_FREE(item, type) FREE(item, DtoM(type)) 438 439 #else /* DEBUG */ 440 static void worklist_insert(struct workhead *, struct worklist *); 441 static void worklist_remove(struct worklist *); 442 static void workitem_free(struct worklist *, int); 443 444 #define WORKLIST_INSERT_BP(bp, item) do { \ 445 (bp)->b_ops = &softdep_bioops; \ 446 worklist_insert(&(bp)->b_dep, item); \ 447 } while (0) 448 449 #define WORKLIST_INSERT(head, item) worklist_insert(head, item) 450 #define WORKLIST_REMOVE(item) worklist_remove(item) 451 #define WORKITEM_FREE(item, type) workitem_free((struct worklist *)item, type) 452 453 static void 454 worklist_insert(struct workhead *head, struct worklist *item) 455 { 456 457 if (lk.lkt_held == NOHOLDER) 458 panic("worklist_insert: lock not held"); 459 if (item->wk_state & ONWORKLIST) { 460 FREE_LOCK(&lk); 461 panic("worklist_insert: already on list"); 462 } 463 item->wk_state |= ONWORKLIST; 464 LIST_INSERT_HEAD(head, item, wk_list); 465 } 466 467 static void 468 worklist_remove(struct worklist *item) 469 { 470 471 if (lk.lkt_held == NOHOLDER) 472 panic("worklist_remove: lock not held"); 473 if ((item->wk_state & ONWORKLIST) == 0) { 474 FREE_LOCK(&lk); 475 panic("worklist_remove: not on list"); 476 } 477 item->wk_state &= ~ONWORKLIST; 478 LIST_REMOVE(item, wk_list); 479 } 480 481 static void 482 workitem_free(struct worklist *item, int type) 483 { 484 485 if (item->wk_state & ONWORKLIST) { 486 if (lk.lkt_held != NOHOLDER) 487 FREE_LOCK(&lk); 488 panic("workitem_free: still on list"); 489 } 490 if (item->wk_type != type) { 491 if (lk.lkt_held != NOHOLDER) 492 FREE_LOCK(&lk); 493 panic("workitem_free: type mismatch"); 494 } 495 FREE(item, DtoM(type)); 496 } 497 #endif /* DEBUG */ 498 499 /* 500 * Workitem queue management 501 */ 502 static struct workhead softdep_workitem_pending; 503 static int num_on_worklist; /* number of worklist items to be processed */ 504 static int softdep_worklist_busy; /* 1 => trying to do unmount */ 505 static int softdep_worklist_req; /* serialized waiters */ 506 static int max_softdeps; /* maximum number of structs before slowdown */ 507 static int tickdelay = 2; /* number of ticks to pause during slowdown */ 508 static int *stat_countp; /* statistic to count in proc_waiting timeout */ 509 static int proc_waiting; /* tracks whether we have a timeout posted */ 510 static struct callout handle; /* handle on posted proc_waiting timeout */ 511 static struct thread *filesys_syncer; /* proc of filesystem syncer process */ 512 static int req_clear_inodedeps; /* syncer process flush some inodedeps */ 513 #define FLUSH_INODES 1 514 static int req_clear_remove; /* syncer process flush some freeblks */ 515 #define FLUSH_REMOVE 2 516 /* 517 * runtime statistics 518 */ 519 static int stat_worklist_push; /* number of worklist cleanups */ 520 static int stat_blk_limit_push; /* number of times block limit neared */ 521 static int stat_ino_limit_push; /* number of times inode limit neared */ 522 static int stat_blk_limit_hit; /* number of times block slowdown imposed */ 523 static int stat_ino_limit_hit; /* number of times inode slowdown imposed */ 524 static int stat_sync_limit_hit; /* number of synchronous slowdowns imposed */ 525 static int stat_indir_blk_ptrs; /* bufs redirtied as indir ptrs not written */ 526 static int stat_inode_bitmap; /* bufs redirtied as inode bitmap not written */ 527 static int stat_direct_blk_ptrs;/* bufs redirtied as direct ptrs not written */ 528 static int stat_dir_entry; /* bufs redirtied as dir entry cannot write */ 529 #ifdef DEBUG 530 #include <vm/vm.h> 531 #include <sys/sysctl.h> 532 SYSCTL_INT(_debug, OID_AUTO, max_softdeps, CTLFLAG_RW, &max_softdeps, 0, ""); 533 SYSCTL_INT(_debug, OID_AUTO, tickdelay, CTLFLAG_RW, &tickdelay, 0, ""); 534 SYSCTL_INT(_debug, OID_AUTO, worklist_push, CTLFLAG_RW, &stat_worklist_push, 0,""); 535 SYSCTL_INT(_debug, OID_AUTO, blk_limit_push, CTLFLAG_RW, &stat_blk_limit_push, 0,""); 536 SYSCTL_INT(_debug, OID_AUTO, ino_limit_push, CTLFLAG_RW, &stat_ino_limit_push, 0,""); 537 SYSCTL_INT(_debug, OID_AUTO, blk_limit_hit, CTLFLAG_RW, &stat_blk_limit_hit, 0, ""); 538 SYSCTL_INT(_debug, OID_AUTO, ino_limit_hit, CTLFLAG_RW, &stat_ino_limit_hit, 0, ""); 539 SYSCTL_INT(_debug, OID_AUTO, sync_limit_hit, CTLFLAG_RW, &stat_sync_limit_hit, 0, ""); 540 SYSCTL_INT(_debug, OID_AUTO, indir_blk_ptrs, CTLFLAG_RW, &stat_indir_blk_ptrs, 0, ""); 541 SYSCTL_INT(_debug, OID_AUTO, inode_bitmap, CTLFLAG_RW, &stat_inode_bitmap, 0, ""); 542 SYSCTL_INT(_debug, OID_AUTO, direct_blk_ptrs, CTLFLAG_RW, &stat_direct_blk_ptrs, 0, ""); 543 SYSCTL_INT(_debug, OID_AUTO, dir_entry, CTLFLAG_RW, &stat_dir_entry, 0, ""); 544 #endif /* DEBUG */ 545 546 /* 547 * Add an item to the end of the work queue. 548 * This routine requires that the lock be held. 549 * This is the only routine that adds items to the list. 550 * The following routine is the only one that removes items 551 * and does so in order from first to last. 552 */ 553 static void 554 add_to_worklist(struct worklist *wk) 555 { 556 static struct worklist *worklist_tail; 557 558 if (wk->wk_state & ONWORKLIST) { 559 if (lk.lkt_held != NOHOLDER) 560 FREE_LOCK(&lk); 561 panic("add_to_worklist: already on list"); 562 } 563 wk->wk_state |= ONWORKLIST; 564 if (LIST_FIRST(&softdep_workitem_pending) == NULL) 565 LIST_INSERT_HEAD(&softdep_workitem_pending, wk, wk_list); 566 else 567 LIST_INSERT_AFTER(worklist_tail, wk, wk_list); 568 worklist_tail = wk; 569 num_on_worklist += 1; 570 } 571 572 /* 573 * Process that runs once per second to handle items in the background queue. 574 * 575 * Note that we ensure that everything is done in the order in which they 576 * appear in the queue. The code below depends on this property to ensure 577 * that blocks of a file are freed before the inode itself is freed. This 578 * ordering ensures that no new <vfsid, inum, lbn> triples will be generated 579 * until all the old ones have been purged from the dependency lists. 580 * 581 * bioops callback - hold io_token 582 */ 583 static int 584 softdep_process_worklist(struct mount *matchmnt) 585 { 586 thread_t td = curthread; 587 int matchcnt, loopcount; 588 long starttime; 589 590 get_mplock(); 591 592 /* 593 * Record the process identifier of our caller so that we can give 594 * this process preferential treatment in request_cleanup below. 595 */ 596 filesys_syncer = td; 597 matchcnt = 0; 598 599 /* 600 * There is no danger of having multiple processes run this 601 * code, but we have to single-thread it when softdep_flushfiles() 602 * is in operation to get an accurate count of the number of items 603 * related to its mount point that are in the list. 604 */ 605 if (matchmnt == NULL) { 606 if (softdep_worklist_busy < 0) { 607 matchcnt = -1; 608 goto done; 609 } 610 softdep_worklist_busy += 1; 611 } 612 613 /* 614 * If requested, try removing inode or removal dependencies. 615 */ 616 if (req_clear_inodedeps) { 617 clear_inodedeps(td); 618 req_clear_inodedeps -= 1; 619 wakeup_one(&proc_waiting); 620 } 621 if (req_clear_remove) { 622 clear_remove(td); 623 req_clear_remove -= 1; 624 wakeup_one(&proc_waiting); 625 } 626 loopcount = 1; 627 starttime = time_second; 628 while (num_on_worklist > 0) { 629 matchcnt += process_worklist_item(matchmnt, 0); 630 631 /* 632 * If a umount operation wants to run the worklist 633 * accurately, abort. 634 */ 635 if (softdep_worklist_req && matchmnt == NULL) { 636 matchcnt = -1; 637 break; 638 } 639 640 /* 641 * If requested, try removing inode or removal dependencies. 642 */ 643 if (req_clear_inodedeps) { 644 clear_inodedeps(td); 645 req_clear_inodedeps -= 1; 646 wakeup_one(&proc_waiting); 647 } 648 if (req_clear_remove) { 649 clear_remove(td); 650 req_clear_remove -= 1; 651 wakeup_one(&proc_waiting); 652 } 653 /* 654 * We do not generally want to stop for buffer space, but if 655 * we are really being a buffer hog, we will stop and wait. 656 */ 657 if (loopcount++ % 128 == 0) 658 bwillinode(1); 659 /* 660 * Never allow processing to run for more than one 661 * second. Otherwise the other syncer tasks may get 662 * excessively backlogged. 663 */ 664 if (starttime != time_second && matchmnt == NULL) { 665 matchcnt = -1; 666 break; 667 } 668 } 669 if (matchmnt == NULL) { 670 --softdep_worklist_busy; 671 if (softdep_worklist_req && softdep_worklist_busy == 0) 672 wakeup(&softdep_worklist_req); 673 } 674 done: 675 rel_mplock(); 676 return (matchcnt); 677 } 678 679 /* 680 * Process one item on the worklist. 681 */ 682 static int 683 process_worklist_item(struct mount *matchmnt, int flags) 684 { 685 struct worklist *wk; 686 struct dirrem *dirrem; 687 struct fs *matchfs; 688 struct vnode *vp; 689 int matchcnt = 0; 690 691 matchfs = NULL; 692 if (matchmnt != NULL) 693 matchfs = VFSTOUFS(matchmnt)->um_fs; 694 ACQUIRE_LOCK(&lk); 695 /* 696 * Normally we just process each item on the worklist in order. 697 * However, if we are in a situation where we cannot lock any 698 * inodes, we have to skip over any dirrem requests whose 699 * vnodes are resident and locked. 700 */ 701 LIST_FOREACH(wk, &softdep_workitem_pending, wk_list) { 702 if ((flags & LK_NOWAIT) == 0 || wk->wk_type != D_DIRREM) 703 break; 704 dirrem = WK_DIRREM(wk); 705 vp = ufs_ihashlookup(VFSTOUFS(dirrem->dm_mnt)->um_dev, 706 dirrem->dm_oldinum); 707 if (vp == NULL || !vn_islocked(vp)) 708 break; 709 } 710 if (wk == 0) { 711 FREE_LOCK(&lk); 712 return (0); 713 } 714 WORKLIST_REMOVE(wk); 715 num_on_worklist -= 1; 716 FREE_LOCK(&lk); 717 switch (wk->wk_type) { 718 719 case D_DIRREM: 720 /* removal of a directory entry */ 721 if (WK_DIRREM(wk)->dm_mnt == matchmnt) 722 matchcnt += 1; 723 handle_workitem_remove(WK_DIRREM(wk)); 724 break; 725 726 case D_FREEBLKS: 727 /* releasing blocks and/or fragments from a file */ 728 if (WK_FREEBLKS(wk)->fb_fs == matchfs) 729 matchcnt += 1; 730 handle_workitem_freeblocks(WK_FREEBLKS(wk)); 731 break; 732 733 case D_FREEFRAG: 734 /* releasing a fragment when replaced as a file grows */ 735 if (WK_FREEFRAG(wk)->ff_fs == matchfs) 736 matchcnt += 1; 737 handle_workitem_freefrag(WK_FREEFRAG(wk)); 738 break; 739 740 case D_FREEFILE: 741 /* releasing an inode when its link count drops to 0 */ 742 if (WK_FREEFILE(wk)->fx_fs == matchfs) 743 matchcnt += 1; 744 handle_workitem_freefile(WK_FREEFILE(wk)); 745 break; 746 747 default: 748 panic("%s_process_worklist: Unknown type %s", 749 "softdep", TYPENAME(wk->wk_type)); 750 /* NOTREACHED */ 751 } 752 return (matchcnt); 753 } 754 755 /* 756 * Move dependencies from one buffer to another. 757 * 758 * bioops callback - hold io_token 759 */ 760 static void 761 softdep_move_dependencies(struct buf *oldbp, struct buf *newbp) 762 { 763 struct worklist *wk, *wktail; 764 765 get_mplock(); 766 if (LIST_FIRST(&newbp->b_dep) != NULL) 767 panic("softdep_move_dependencies: need merge code"); 768 wktail = NULL; 769 ACQUIRE_LOCK(&lk); 770 while ((wk = LIST_FIRST(&oldbp->b_dep)) != NULL) { 771 LIST_REMOVE(wk, wk_list); 772 if (wktail == NULL) 773 LIST_INSERT_HEAD(&newbp->b_dep, wk, wk_list); 774 else 775 LIST_INSERT_AFTER(wktail, wk, wk_list); 776 wktail = wk; 777 newbp->b_ops = &softdep_bioops; 778 } 779 FREE_LOCK(&lk); 780 rel_mplock(); 781 } 782 783 /* 784 * Purge the work list of all items associated with a particular mount point. 785 */ 786 int 787 softdep_flushfiles(struct mount *oldmnt, int flags) 788 { 789 struct vnode *devvp; 790 int error, loopcnt; 791 792 /* 793 * Await our turn to clear out the queue, then serialize access. 794 */ 795 while (softdep_worklist_busy != 0) { 796 softdep_worklist_req += 1; 797 tsleep(&softdep_worklist_req, 0, "softflush", 0); 798 softdep_worklist_req -= 1; 799 } 800 softdep_worklist_busy = -1; 801 802 if ((error = ffs_flushfiles(oldmnt, flags)) != 0) { 803 softdep_worklist_busy = 0; 804 if (softdep_worklist_req) 805 wakeup(&softdep_worklist_req); 806 return (error); 807 } 808 /* 809 * Alternately flush the block device associated with the mount 810 * point and process any dependencies that the flushing 811 * creates. In theory, this loop can happen at most twice, 812 * but we give it a few extra just to be sure. 813 */ 814 devvp = VFSTOUFS(oldmnt)->um_devvp; 815 for (loopcnt = 10; loopcnt > 0; ) { 816 if (softdep_process_worklist(oldmnt) == 0) { 817 loopcnt--; 818 /* 819 * Do another flush in case any vnodes were brought in 820 * as part of the cleanup operations. 821 */ 822 if ((error = ffs_flushfiles(oldmnt, flags)) != 0) 823 break; 824 /* 825 * If we still found nothing to do, we are really done. 826 */ 827 if (softdep_process_worklist(oldmnt) == 0) 828 break; 829 } 830 vn_lock(devvp, LK_EXCLUSIVE | LK_RETRY); 831 error = VOP_FSYNC(devvp, MNT_WAIT, 0); 832 vn_unlock(devvp); 833 if (error) 834 break; 835 } 836 softdep_worklist_busy = 0; 837 if (softdep_worklist_req) 838 wakeup(&softdep_worklist_req); 839 840 /* 841 * If we are unmounting then it is an error to fail. If we 842 * are simply trying to downgrade to read-only, then filesystem 843 * activity can keep us busy forever, so we just fail with EBUSY. 844 */ 845 if (loopcnt == 0) { 846 if (oldmnt->mnt_kern_flag & MNTK_UNMOUNT) 847 panic("softdep_flushfiles: looping"); 848 error = EBUSY; 849 } 850 return (error); 851 } 852 853 /* 854 * Structure hashing. 855 * 856 * There are three types of structures that can be looked up: 857 * 1) pagedep structures identified by mount point, inode number, 858 * and logical block. 859 * 2) inodedep structures identified by mount point and inode number. 860 * 3) newblk structures identified by mount point and 861 * physical block number. 862 * 863 * The "pagedep" and "inodedep" dependency structures are hashed 864 * separately from the file blocks and inodes to which they correspond. 865 * This separation helps when the in-memory copy of an inode or 866 * file block must be replaced. It also obviates the need to access 867 * an inode or file page when simply updating (or de-allocating) 868 * dependency structures. Lookup of newblk structures is needed to 869 * find newly allocated blocks when trying to associate them with 870 * their allocdirect or allocindir structure. 871 * 872 * The lookup routines optionally create and hash a new instance when 873 * an existing entry is not found. 874 */ 875 #define DEPALLOC 0x0001 /* allocate structure if lookup fails */ 876 #define NODELAY 0x0002 /* cannot do background work */ 877 878 /* 879 * Structures and routines associated with pagedep caching. 880 */ 881 LIST_HEAD(pagedep_hashhead, pagedep) *pagedep_hashtbl; 882 u_long pagedep_hash; /* size of hash table - 1 */ 883 #define PAGEDEP_HASH(mp, inum, lbn) \ 884 (&pagedep_hashtbl[((((register_t)(mp)) >> 13) + (inum) + (lbn)) & \ 885 pagedep_hash]) 886 static struct sema pagedep_in_progress; 887 888 /* 889 * Helper routine for pagedep_lookup() 890 */ 891 static __inline 892 struct pagedep * 893 pagedep_find(struct pagedep_hashhead *pagedephd, ino_t ino, ufs_lbn_t lbn, 894 struct mount *mp) 895 { 896 struct pagedep *pagedep; 897 898 LIST_FOREACH(pagedep, pagedephd, pd_hash) { 899 if (ino == pagedep->pd_ino && 900 lbn == pagedep->pd_lbn && 901 mp == pagedep->pd_mnt) { 902 return (pagedep); 903 } 904 } 905 return(NULL); 906 } 907 908 /* 909 * Look up a pagedep. Return 1 if found, 0 if not found. 910 * If not found, allocate if DEPALLOC flag is passed. 911 * Found or allocated entry is returned in pagedeppp. 912 * This routine must be called with splbio interrupts blocked. 913 */ 914 static int 915 pagedep_lookup(struct inode *ip, ufs_lbn_t lbn, int flags, 916 struct pagedep **pagedeppp) 917 { 918 struct pagedep *pagedep; 919 struct pagedep_hashhead *pagedephd; 920 struct mount *mp; 921 int i; 922 923 #ifdef DEBUG 924 if (lk.lkt_held == NOHOLDER) 925 panic("pagedep_lookup: lock not held"); 926 #endif 927 mp = ITOV(ip)->v_mount; 928 pagedephd = PAGEDEP_HASH(mp, ip->i_number, lbn); 929 top: 930 *pagedeppp = pagedep_find(pagedephd, ip->i_number, lbn, mp); 931 if (*pagedeppp) 932 return(1); 933 if ((flags & DEPALLOC) == 0) 934 return (0); 935 if (sema_get(&pagedep_in_progress, &lk) == 0) { 936 ACQUIRE_LOCK(&lk); 937 goto top; 938 } 939 MALLOC(pagedep, struct pagedep *, sizeof(struct pagedep), M_PAGEDEP, 940 M_SOFTDEP_FLAGS | M_ZERO); 941 942 if (pagedep_find(pagedephd, ip->i_number, lbn, mp)) { 943 kprintf("pagedep_lookup: blocking race avoided\n"); 944 ACQUIRE_LOCK(&lk); 945 sema_release(&pagedep_in_progress); 946 kfree(pagedep, M_PAGEDEP); 947 goto top; 948 } 949 950 pagedep->pd_list.wk_type = D_PAGEDEP; 951 pagedep->pd_mnt = mp; 952 pagedep->pd_ino = ip->i_number; 953 pagedep->pd_lbn = lbn; 954 LIST_INIT(&pagedep->pd_dirremhd); 955 LIST_INIT(&pagedep->pd_pendinghd); 956 for (i = 0; i < DAHASHSZ; i++) 957 LIST_INIT(&pagedep->pd_diraddhd[i]); 958 ACQUIRE_LOCK(&lk); 959 LIST_INSERT_HEAD(pagedephd, pagedep, pd_hash); 960 sema_release(&pagedep_in_progress); 961 *pagedeppp = pagedep; 962 return (0); 963 } 964 965 /* 966 * Structures and routines associated with inodedep caching. 967 */ 968 LIST_HEAD(inodedep_hashhead, inodedep) *inodedep_hashtbl; 969 static u_long inodedep_hash; /* size of hash table - 1 */ 970 static long num_inodedep; /* number of inodedep allocated */ 971 #define INODEDEP_HASH(fs, inum) \ 972 (&inodedep_hashtbl[((((register_t)(fs)) >> 13) + (inum)) & inodedep_hash]) 973 static struct sema inodedep_in_progress; 974 975 /* 976 * Helper routine for inodedep_lookup() 977 */ 978 static __inline 979 struct inodedep * 980 inodedep_find(struct inodedep_hashhead *inodedephd, struct fs *fs, ino_t inum) 981 { 982 struct inodedep *inodedep; 983 984 LIST_FOREACH(inodedep, inodedephd, id_hash) { 985 if (inum == inodedep->id_ino && fs == inodedep->id_fs) 986 return(inodedep); 987 } 988 return (NULL); 989 } 990 991 /* 992 * Look up a inodedep. Return 1 if found, 0 if not found. 993 * If not found, allocate if DEPALLOC flag is passed. 994 * Found or allocated entry is returned in inodedeppp. 995 * This routine must be called with splbio interrupts blocked. 996 */ 997 static int 998 inodedep_lookup(struct fs *fs, ino_t inum, int flags, 999 struct inodedep **inodedeppp) 1000 { 1001 struct inodedep *inodedep; 1002 struct inodedep_hashhead *inodedephd; 1003 int firsttry; 1004 1005 #ifdef DEBUG 1006 if (lk.lkt_held == NOHOLDER) 1007 panic("inodedep_lookup: lock not held"); 1008 #endif 1009 firsttry = 1; 1010 inodedephd = INODEDEP_HASH(fs, inum); 1011 top: 1012 *inodedeppp = inodedep_find(inodedephd, fs, inum); 1013 if (*inodedeppp) 1014 return (1); 1015 if ((flags & DEPALLOC) == 0) 1016 return (0); 1017 /* 1018 * If we are over our limit, try to improve the situation. 1019 */ 1020 if (num_inodedep > max_softdeps && firsttry && 1021 speedup_syncer() == 0 && (flags & NODELAY) == 0 && 1022 request_cleanup(FLUSH_INODES, 1)) { 1023 firsttry = 0; 1024 goto top; 1025 } 1026 if (sema_get(&inodedep_in_progress, &lk) == 0) { 1027 ACQUIRE_LOCK(&lk); 1028 goto top; 1029 } 1030 MALLOC(inodedep, struct inodedep *, sizeof(struct inodedep), 1031 M_INODEDEP, M_SOFTDEP_FLAGS | M_ZERO); 1032 if (inodedep_find(inodedephd, fs, inum)) { 1033 kprintf("inodedep_lookup: blocking race avoided\n"); 1034 ACQUIRE_LOCK(&lk); 1035 sema_release(&inodedep_in_progress); 1036 kfree(inodedep, M_INODEDEP); 1037 goto top; 1038 } 1039 inodedep->id_list.wk_type = D_INODEDEP; 1040 inodedep->id_fs = fs; 1041 inodedep->id_ino = inum; 1042 inodedep->id_state = ALLCOMPLETE; 1043 inodedep->id_nlinkdelta = 0; 1044 inodedep->id_savedino = NULL; 1045 inodedep->id_savedsize = -1; 1046 inodedep->id_buf = NULL; 1047 LIST_INIT(&inodedep->id_pendinghd); 1048 LIST_INIT(&inodedep->id_inowait); 1049 LIST_INIT(&inodedep->id_bufwait); 1050 TAILQ_INIT(&inodedep->id_inoupdt); 1051 TAILQ_INIT(&inodedep->id_newinoupdt); 1052 ACQUIRE_LOCK(&lk); 1053 num_inodedep += 1; 1054 LIST_INSERT_HEAD(inodedephd, inodedep, id_hash); 1055 sema_release(&inodedep_in_progress); 1056 *inodedeppp = inodedep; 1057 return (0); 1058 } 1059 1060 /* 1061 * Structures and routines associated with newblk caching. 1062 */ 1063 LIST_HEAD(newblk_hashhead, newblk) *newblk_hashtbl; 1064 u_long newblk_hash; /* size of hash table - 1 */ 1065 #define NEWBLK_HASH(fs, inum) \ 1066 (&newblk_hashtbl[((((register_t)(fs)) >> 13) + (inum)) & newblk_hash]) 1067 static struct sema newblk_in_progress; 1068 1069 /* 1070 * Helper routine for newblk_lookup() 1071 */ 1072 static __inline 1073 struct newblk * 1074 newblk_find(struct newblk_hashhead *newblkhd, struct fs *fs, 1075 ufs_daddr_t newblkno) 1076 { 1077 struct newblk *newblk; 1078 1079 LIST_FOREACH(newblk, newblkhd, nb_hash) { 1080 if (newblkno == newblk->nb_newblkno && fs == newblk->nb_fs) 1081 return (newblk); 1082 } 1083 return(NULL); 1084 } 1085 1086 /* 1087 * Look up a newblk. Return 1 if found, 0 if not found. 1088 * If not found, allocate if DEPALLOC flag is passed. 1089 * Found or allocated entry is returned in newblkpp. 1090 */ 1091 static int 1092 newblk_lookup(struct fs *fs, ufs_daddr_t newblkno, int flags, 1093 struct newblk **newblkpp) 1094 { 1095 struct newblk *newblk; 1096 struct newblk_hashhead *newblkhd; 1097 1098 newblkhd = NEWBLK_HASH(fs, newblkno); 1099 top: 1100 *newblkpp = newblk_find(newblkhd, fs, newblkno); 1101 if (*newblkpp) 1102 return(1); 1103 if ((flags & DEPALLOC) == 0) 1104 return (0); 1105 if (sema_get(&newblk_in_progress, 0) == 0) 1106 goto top; 1107 MALLOC(newblk, struct newblk *, sizeof(struct newblk), 1108 M_NEWBLK, M_SOFTDEP_FLAGS | M_ZERO); 1109 1110 if (newblk_find(newblkhd, fs, newblkno)) { 1111 kprintf("newblk_lookup: blocking race avoided\n"); 1112 sema_release(&pagedep_in_progress); 1113 kfree(newblk, M_NEWBLK); 1114 goto top; 1115 } 1116 newblk->nb_state = 0; 1117 newblk->nb_fs = fs; 1118 newblk->nb_newblkno = newblkno; 1119 LIST_INSERT_HEAD(newblkhd, newblk, nb_hash); 1120 sema_release(&newblk_in_progress); 1121 *newblkpp = newblk; 1122 return (0); 1123 } 1124 1125 /* 1126 * Executed during filesystem system initialization before 1127 * mounting any filesystems. 1128 */ 1129 void 1130 softdep_initialize(void) 1131 { 1132 callout_init(&handle); 1133 1134 LIST_INIT(&mkdirlisthd); 1135 LIST_INIT(&softdep_workitem_pending); 1136 max_softdeps = min(desiredvnodes * 8, 1137 M_INODEDEP->ks_limit / (2 * sizeof(struct inodedep))); 1138 pagedep_hashtbl = hashinit(desiredvnodes / 5, M_PAGEDEP, 1139 &pagedep_hash); 1140 sema_init(&pagedep_in_progress, "pagedep", 0, 0); 1141 inodedep_hashtbl = hashinit(desiredvnodes, M_INODEDEP, &inodedep_hash); 1142 sema_init(&inodedep_in_progress, "inodedep", 0, 0); 1143 newblk_hashtbl = hashinit(64, M_NEWBLK, &newblk_hash); 1144 sema_init(&newblk_in_progress, "newblk", 0, 0); 1145 add_bio_ops(&softdep_bioops); 1146 } 1147 1148 /* 1149 * Called at mount time to notify the dependency code that a 1150 * filesystem wishes to use it. 1151 */ 1152 int 1153 softdep_mount(struct vnode *devvp, struct mount *mp, struct fs *fs) 1154 { 1155 struct csum cstotal; 1156 struct cg *cgp; 1157 struct buf *bp; 1158 int error, cyl; 1159 1160 mp->mnt_flag &= ~MNT_ASYNC; 1161 mp->mnt_flag |= MNT_SOFTDEP; 1162 mp->mnt_bioops = &softdep_bioops; 1163 /* 1164 * When doing soft updates, the counters in the 1165 * superblock may have gotten out of sync, so we have 1166 * to scan the cylinder groups and recalculate them. 1167 */ 1168 if (fs->fs_clean != 0) 1169 return (0); 1170 bzero(&cstotal, sizeof cstotal); 1171 for (cyl = 0; cyl < fs->fs_ncg; cyl++) { 1172 if ((error = bread(devvp, fsbtodoff(fs, cgtod(fs, cyl)), 1173 fs->fs_cgsize, &bp)) != 0) { 1174 brelse(bp); 1175 return (error); 1176 } 1177 cgp = (struct cg *)bp->b_data; 1178 cstotal.cs_nffree += cgp->cg_cs.cs_nffree; 1179 cstotal.cs_nbfree += cgp->cg_cs.cs_nbfree; 1180 cstotal.cs_nifree += cgp->cg_cs.cs_nifree; 1181 cstotal.cs_ndir += cgp->cg_cs.cs_ndir; 1182 fs->fs_cs(fs, cyl) = cgp->cg_cs; 1183 brelse(bp); 1184 } 1185 #ifdef DEBUG 1186 if (bcmp(&cstotal, &fs->fs_cstotal, sizeof cstotal)) 1187 kprintf("ffs_mountfs: superblock updated for soft updates\n"); 1188 #endif 1189 bcopy(&cstotal, &fs->fs_cstotal, sizeof cstotal); 1190 return (0); 1191 } 1192 1193 /* 1194 * Protecting the freemaps (or bitmaps). 1195 * 1196 * To eliminate the need to execute fsck before mounting a filesystem 1197 * after a power failure, one must (conservatively) guarantee that the 1198 * on-disk copy of the bitmaps never indicate that a live inode or block is 1199 * free. So, when a block or inode is allocated, the bitmap should be 1200 * updated (on disk) before any new pointers. When a block or inode is 1201 * freed, the bitmap should not be updated until all pointers have been 1202 * reset. The latter dependency is handled by the delayed de-allocation 1203 * approach described below for block and inode de-allocation. The former 1204 * dependency is handled by calling the following procedure when a block or 1205 * inode is allocated. When an inode is allocated an "inodedep" is created 1206 * with its DEPCOMPLETE flag cleared until its bitmap is written to disk. 1207 * Each "inodedep" is also inserted into the hash indexing structure so 1208 * that any additional link additions can be made dependent on the inode 1209 * allocation. 1210 * 1211 * The ufs filesystem maintains a number of free block counts (e.g., per 1212 * cylinder group, per cylinder and per <cylinder, rotational position> pair) 1213 * in addition to the bitmaps. These counts are used to improve efficiency 1214 * during allocation and therefore must be consistent with the bitmaps. 1215 * There is no convenient way to guarantee post-crash consistency of these 1216 * counts with simple update ordering, for two main reasons: (1) The counts 1217 * and bitmaps for a single cylinder group block are not in the same disk 1218 * sector. If a disk write is interrupted (e.g., by power failure), one may 1219 * be written and the other not. (2) Some of the counts are located in the 1220 * superblock rather than the cylinder group block. So, we focus our soft 1221 * updates implementation on protecting the bitmaps. When mounting a 1222 * filesystem, we recompute the auxiliary counts from the bitmaps. 1223 */ 1224 1225 /* 1226 * Called just after updating the cylinder group block to allocate an inode. 1227 * 1228 * Parameters: 1229 * bp: buffer for cylgroup block with inode map 1230 * ip: inode related to allocation 1231 * newinum: new inode number being allocated 1232 */ 1233 void 1234 softdep_setup_inomapdep(struct buf *bp, struct inode *ip, ino_t newinum) 1235 { 1236 struct inodedep *inodedep; 1237 struct bmsafemap *bmsafemap; 1238 1239 /* 1240 * Create a dependency for the newly allocated inode. 1241 * Panic if it already exists as something is seriously wrong. 1242 * Otherwise add it to the dependency list for the buffer holding 1243 * the cylinder group map from which it was allocated. 1244 */ 1245 ACQUIRE_LOCK(&lk); 1246 if ((inodedep_lookup(ip->i_fs, newinum, DEPALLOC|NODELAY, &inodedep))) { 1247 FREE_LOCK(&lk); 1248 panic("softdep_setup_inomapdep: found inode"); 1249 } 1250 inodedep->id_buf = bp; 1251 inodedep->id_state &= ~DEPCOMPLETE; 1252 bmsafemap = bmsafemap_lookup(bp); 1253 LIST_INSERT_HEAD(&bmsafemap->sm_inodedephd, inodedep, id_deps); 1254 FREE_LOCK(&lk); 1255 } 1256 1257 /* 1258 * Called just after updating the cylinder group block to 1259 * allocate block or fragment. 1260 * 1261 * Parameters: 1262 * bp: buffer for cylgroup block with block map 1263 * fs: filesystem doing allocation 1264 * newblkno: number of newly allocated block 1265 */ 1266 void 1267 softdep_setup_blkmapdep(struct buf *bp, struct fs *fs, 1268 ufs_daddr_t newblkno) 1269 { 1270 struct newblk *newblk; 1271 struct bmsafemap *bmsafemap; 1272 1273 /* 1274 * Create a dependency for the newly allocated block. 1275 * Add it to the dependency list for the buffer holding 1276 * the cylinder group map from which it was allocated. 1277 */ 1278 if (newblk_lookup(fs, newblkno, DEPALLOC, &newblk) != 0) 1279 panic("softdep_setup_blkmapdep: found block"); 1280 ACQUIRE_LOCK(&lk); 1281 newblk->nb_bmsafemap = bmsafemap = bmsafemap_lookup(bp); 1282 LIST_INSERT_HEAD(&bmsafemap->sm_newblkhd, newblk, nb_deps); 1283 FREE_LOCK(&lk); 1284 } 1285 1286 /* 1287 * Find the bmsafemap associated with a cylinder group buffer. 1288 * If none exists, create one. The buffer must be locked when 1289 * this routine is called and this routine must be called with 1290 * splbio interrupts blocked. 1291 */ 1292 static struct bmsafemap * 1293 bmsafemap_lookup(struct buf *bp) 1294 { 1295 struct bmsafemap *bmsafemap; 1296 struct worklist *wk; 1297 1298 #ifdef DEBUG 1299 if (lk.lkt_held == NOHOLDER) 1300 panic("bmsafemap_lookup: lock not held"); 1301 #endif 1302 LIST_FOREACH(wk, &bp->b_dep, wk_list) { 1303 if (wk->wk_type == D_BMSAFEMAP) 1304 return (WK_BMSAFEMAP(wk)); 1305 } 1306 FREE_LOCK(&lk); 1307 MALLOC(bmsafemap, struct bmsafemap *, sizeof(struct bmsafemap), 1308 M_BMSAFEMAP, M_SOFTDEP_FLAGS); 1309 bmsafemap->sm_list.wk_type = D_BMSAFEMAP; 1310 bmsafemap->sm_list.wk_state = 0; 1311 bmsafemap->sm_buf = bp; 1312 LIST_INIT(&bmsafemap->sm_allocdirecthd); 1313 LIST_INIT(&bmsafemap->sm_allocindirhd); 1314 LIST_INIT(&bmsafemap->sm_inodedephd); 1315 LIST_INIT(&bmsafemap->sm_newblkhd); 1316 ACQUIRE_LOCK(&lk); 1317 WORKLIST_INSERT_BP(bp, &bmsafemap->sm_list); 1318 return (bmsafemap); 1319 } 1320 1321 /* 1322 * Direct block allocation dependencies. 1323 * 1324 * When a new block is allocated, the corresponding disk locations must be 1325 * initialized (with zeros or new data) before the on-disk inode points to 1326 * them. Also, the freemap from which the block was allocated must be 1327 * updated (on disk) before the inode's pointer. These two dependencies are 1328 * independent of each other and are needed for all file blocks and indirect 1329 * blocks that are pointed to directly by the inode. Just before the 1330 * "in-core" version of the inode is updated with a newly allocated block 1331 * number, a procedure (below) is called to setup allocation dependency 1332 * structures. These structures are removed when the corresponding 1333 * dependencies are satisfied or when the block allocation becomes obsolete 1334 * (i.e., the file is deleted, the block is de-allocated, or the block is a 1335 * fragment that gets upgraded). All of these cases are handled in 1336 * procedures described later. 1337 * 1338 * When a file extension causes a fragment to be upgraded, either to a larger 1339 * fragment or to a full block, the on-disk location may change (if the 1340 * previous fragment could not simply be extended). In this case, the old 1341 * fragment must be de-allocated, but not until after the inode's pointer has 1342 * been updated. In most cases, this is handled by later procedures, which 1343 * will construct a "freefrag" structure to be added to the workitem queue 1344 * when the inode update is complete (or obsolete). The main exception to 1345 * this is when an allocation occurs while a pending allocation dependency 1346 * (for the same block pointer) remains. This case is handled in the main 1347 * allocation dependency setup procedure by immediately freeing the 1348 * unreferenced fragments. 1349 * 1350 * Parameters: 1351 * ip: inode to which block is being added 1352 * lbn: block pointer within inode 1353 * newblkno: disk block number being added 1354 * oldblkno: previous block number, 0 unless frag 1355 * newsize: size of new block 1356 * oldsize: size of new block 1357 * bp: bp for allocated block 1358 */ 1359 void 1360 softdep_setup_allocdirect(struct inode *ip, ufs_lbn_t lbn, ufs_daddr_t newblkno, 1361 ufs_daddr_t oldblkno, long newsize, long oldsize, 1362 struct buf *bp) 1363 { 1364 struct allocdirect *adp, *oldadp; 1365 struct allocdirectlst *adphead; 1366 struct bmsafemap *bmsafemap; 1367 struct inodedep *inodedep; 1368 struct pagedep *pagedep; 1369 struct newblk *newblk; 1370 1371 MALLOC(adp, struct allocdirect *, sizeof(struct allocdirect), 1372 M_ALLOCDIRECT, M_SOFTDEP_FLAGS | M_ZERO); 1373 adp->ad_list.wk_type = D_ALLOCDIRECT; 1374 adp->ad_lbn = lbn; 1375 adp->ad_newblkno = newblkno; 1376 adp->ad_oldblkno = oldblkno; 1377 adp->ad_newsize = newsize; 1378 adp->ad_oldsize = oldsize; 1379 adp->ad_state = ATTACHED; 1380 if (newblkno == oldblkno) 1381 adp->ad_freefrag = NULL; 1382 else 1383 adp->ad_freefrag = newfreefrag(ip, oldblkno, oldsize); 1384 1385 if (newblk_lookup(ip->i_fs, newblkno, 0, &newblk) == 0) 1386 panic("softdep_setup_allocdirect: lost block"); 1387 1388 ACQUIRE_LOCK(&lk); 1389 inodedep_lookup(ip->i_fs, ip->i_number, DEPALLOC | NODELAY, &inodedep); 1390 adp->ad_inodedep = inodedep; 1391 1392 if (newblk->nb_state == DEPCOMPLETE) { 1393 adp->ad_state |= DEPCOMPLETE; 1394 adp->ad_buf = NULL; 1395 } else { 1396 bmsafemap = newblk->nb_bmsafemap; 1397 adp->ad_buf = bmsafemap->sm_buf; 1398 LIST_REMOVE(newblk, nb_deps); 1399 LIST_INSERT_HEAD(&bmsafemap->sm_allocdirecthd, adp, ad_deps); 1400 } 1401 LIST_REMOVE(newblk, nb_hash); 1402 FREE(newblk, M_NEWBLK); 1403 1404 WORKLIST_INSERT_BP(bp, &adp->ad_list); 1405 if (lbn >= NDADDR) { 1406 /* allocating an indirect block */ 1407 if (oldblkno != 0) { 1408 FREE_LOCK(&lk); 1409 panic("softdep_setup_allocdirect: non-zero indir"); 1410 } 1411 } else { 1412 /* 1413 * Allocating a direct block. 1414 * 1415 * If we are allocating a directory block, then we must 1416 * allocate an associated pagedep to track additions and 1417 * deletions. 1418 */ 1419 if ((ip->i_mode & IFMT) == IFDIR && 1420 pagedep_lookup(ip, lbn, DEPALLOC, &pagedep) == 0) { 1421 WORKLIST_INSERT_BP(bp, &pagedep->pd_list); 1422 } 1423 } 1424 /* 1425 * The list of allocdirects must be kept in sorted and ascending 1426 * order so that the rollback routines can quickly determine the 1427 * first uncommitted block (the size of the file stored on disk 1428 * ends at the end of the lowest committed fragment, or if there 1429 * are no fragments, at the end of the highest committed block). 1430 * Since files generally grow, the typical case is that the new 1431 * block is to be added at the end of the list. We speed this 1432 * special case by checking against the last allocdirect in the 1433 * list before laboriously traversing the list looking for the 1434 * insertion point. 1435 */ 1436 adphead = &inodedep->id_newinoupdt; 1437 oldadp = TAILQ_LAST(adphead, allocdirectlst); 1438 if (oldadp == NULL || oldadp->ad_lbn <= lbn) { 1439 /* insert at end of list */ 1440 TAILQ_INSERT_TAIL(adphead, adp, ad_next); 1441 if (oldadp != NULL && oldadp->ad_lbn == lbn) 1442 allocdirect_merge(adphead, adp, oldadp); 1443 FREE_LOCK(&lk); 1444 return; 1445 } 1446 TAILQ_FOREACH(oldadp, adphead, ad_next) { 1447 if (oldadp->ad_lbn >= lbn) 1448 break; 1449 } 1450 if (oldadp == NULL) { 1451 FREE_LOCK(&lk); 1452 panic("softdep_setup_allocdirect: lost entry"); 1453 } 1454 /* insert in middle of list */ 1455 TAILQ_INSERT_BEFORE(oldadp, adp, ad_next); 1456 if (oldadp->ad_lbn == lbn) 1457 allocdirect_merge(adphead, adp, oldadp); 1458 FREE_LOCK(&lk); 1459 } 1460 1461 /* 1462 * Replace an old allocdirect dependency with a newer one. 1463 * This routine must be called with splbio interrupts blocked. 1464 * 1465 * Parameters: 1466 * adphead: head of list holding allocdirects 1467 * newadp: allocdirect being added 1468 * oldadp: existing allocdirect being checked 1469 */ 1470 static void 1471 allocdirect_merge(struct allocdirectlst *adphead, 1472 struct allocdirect *newadp, 1473 struct allocdirect *oldadp) 1474 { 1475 struct freefrag *freefrag; 1476 1477 #ifdef DEBUG 1478 if (lk.lkt_held == NOHOLDER) 1479 panic("allocdirect_merge: lock not held"); 1480 #endif 1481 if (newadp->ad_oldblkno != oldadp->ad_newblkno || 1482 newadp->ad_oldsize != oldadp->ad_newsize || 1483 newadp->ad_lbn >= NDADDR) { 1484 FREE_LOCK(&lk); 1485 panic("allocdirect_check: old %d != new %d || lbn %ld >= %d", 1486 newadp->ad_oldblkno, oldadp->ad_newblkno, newadp->ad_lbn, 1487 NDADDR); 1488 } 1489 newadp->ad_oldblkno = oldadp->ad_oldblkno; 1490 newadp->ad_oldsize = oldadp->ad_oldsize; 1491 /* 1492 * If the old dependency had a fragment to free or had never 1493 * previously had a block allocated, then the new dependency 1494 * can immediately post its freefrag and adopt the old freefrag. 1495 * This action is done by swapping the freefrag dependencies. 1496 * The new dependency gains the old one's freefrag, and the 1497 * old one gets the new one and then immediately puts it on 1498 * the worklist when it is freed by free_allocdirect. It is 1499 * not possible to do this swap when the old dependency had a 1500 * non-zero size but no previous fragment to free. This condition 1501 * arises when the new block is an extension of the old block. 1502 * Here, the first part of the fragment allocated to the new 1503 * dependency is part of the block currently claimed on disk by 1504 * the old dependency, so cannot legitimately be freed until the 1505 * conditions for the new dependency are fulfilled. 1506 */ 1507 if (oldadp->ad_freefrag != NULL || oldadp->ad_oldblkno == 0) { 1508 freefrag = newadp->ad_freefrag; 1509 newadp->ad_freefrag = oldadp->ad_freefrag; 1510 oldadp->ad_freefrag = freefrag; 1511 } 1512 free_allocdirect(adphead, oldadp, 0); 1513 } 1514 1515 /* 1516 * Allocate a new freefrag structure if needed. 1517 */ 1518 static struct freefrag * 1519 newfreefrag(struct inode *ip, ufs_daddr_t blkno, long size) 1520 { 1521 struct freefrag *freefrag; 1522 struct fs *fs; 1523 1524 if (blkno == 0) 1525 return (NULL); 1526 fs = ip->i_fs; 1527 if (fragnum(fs, blkno) + numfrags(fs, size) > fs->fs_frag) 1528 panic("newfreefrag: frag size"); 1529 MALLOC(freefrag, struct freefrag *, sizeof(struct freefrag), 1530 M_FREEFRAG, M_SOFTDEP_FLAGS); 1531 freefrag->ff_list.wk_type = D_FREEFRAG; 1532 freefrag->ff_state = ip->i_uid & ~ONWORKLIST; /* XXX - used below */ 1533 freefrag->ff_inum = ip->i_number; 1534 freefrag->ff_fs = fs; 1535 freefrag->ff_devvp = ip->i_devvp; 1536 freefrag->ff_blkno = blkno; 1537 freefrag->ff_fragsize = size; 1538 return (freefrag); 1539 } 1540 1541 /* 1542 * This workitem de-allocates fragments that were replaced during 1543 * file block allocation. 1544 */ 1545 static void 1546 handle_workitem_freefrag(struct freefrag *freefrag) 1547 { 1548 struct inode tip; 1549 1550 tip.i_fs = freefrag->ff_fs; 1551 tip.i_devvp = freefrag->ff_devvp; 1552 tip.i_dev = freefrag->ff_devvp->v_rdev; 1553 tip.i_number = freefrag->ff_inum; 1554 tip.i_uid = freefrag->ff_state & ~ONWORKLIST; /* XXX - set above */ 1555 ffs_blkfree(&tip, freefrag->ff_blkno, freefrag->ff_fragsize); 1556 FREE(freefrag, M_FREEFRAG); 1557 } 1558 1559 /* 1560 * Indirect block allocation dependencies. 1561 * 1562 * The same dependencies that exist for a direct block also exist when 1563 * a new block is allocated and pointed to by an entry in a block of 1564 * indirect pointers. The undo/redo states described above are also 1565 * used here. Because an indirect block contains many pointers that 1566 * may have dependencies, a second copy of the entire in-memory indirect 1567 * block is kept. The buffer cache copy is always completely up-to-date. 1568 * The second copy, which is used only as a source for disk writes, 1569 * contains only the safe pointers (i.e., those that have no remaining 1570 * update dependencies). The second copy is freed when all pointers 1571 * are safe. The cache is not allowed to replace indirect blocks with 1572 * pending update dependencies. If a buffer containing an indirect 1573 * block with dependencies is written, these routines will mark it 1574 * dirty again. It can only be successfully written once all the 1575 * dependencies are removed. The ffs_fsync routine in conjunction with 1576 * softdep_sync_metadata work together to get all the dependencies 1577 * removed so that a file can be successfully written to disk. Three 1578 * procedures are used when setting up indirect block pointer 1579 * dependencies. The division is necessary because of the organization 1580 * of the "balloc" routine and because of the distinction between file 1581 * pages and file metadata blocks. 1582 */ 1583 1584 /* 1585 * Allocate a new allocindir structure. 1586 * 1587 * Parameters: 1588 * ip: inode for file being extended 1589 * ptrno: offset of pointer in indirect block 1590 * newblkno: disk block number being added 1591 * oldblkno: previous block number, 0 if none 1592 */ 1593 static struct allocindir * 1594 newallocindir(struct inode *ip, int ptrno, ufs_daddr_t newblkno, 1595 ufs_daddr_t oldblkno) 1596 { 1597 struct allocindir *aip; 1598 1599 MALLOC(aip, struct allocindir *, sizeof(struct allocindir), 1600 M_ALLOCINDIR, M_SOFTDEP_FLAGS | M_ZERO); 1601 aip->ai_list.wk_type = D_ALLOCINDIR; 1602 aip->ai_state = ATTACHED; 1603 aip->ai_offset = ptrno; 1604 aip->ai_newblkno = newblkno; 1605 aip->ai_oldblkno = oldblkno; 1606 aip->ai_freefrag = newfreefrag(ip, oldblkno, ip->i_fs->fs_bsize); 1607 return (aip); 1608 } 1609 1610 /* 1611 * Called just before setting an indirect block pointer 1612 * to a newly allocated file page. 1613 * 1614 * Parameters: 1615 * ip: inode for file being extended 1616 * lbn: allocated block number within file 1617 * bp: buffer with indirect blk referencing page 1618 * ptrno: offset of pointer in indirect block 1619 * newblkno: disk block number being added 1620 * oldblkno: previous block number, 0 if none 1621 * nbp: buffer holding allocated page 1622 */ 1623 void 1624 softdep_setup_allocindir_page(struct inode *ip, ufs_lbn_t lbn, 1625 struct buf *bp, int ptrno, 1626 ufs_daddr_t newblkno, ufs_daddr_t oldblkno, 1627 struct buf *nbp) 1628 { 1629 struct allocindir *aip; 1630 struct pagedep *pagedep; 1631 1632 aip = newallocindir(ip, ptrno, newblkno, oldblkno); 1633 ACQUIRE_LOCK(&lk); 1634 /* 1635 * If we are allocating a directory page, then we must 1636 * allocate an associated pagedep to track additions and 1637 * deletions. 1638 */ 1639 if ((ip->i_mode & IFMT) == IFDIR && 1640 pagedep_lookup(ip, lbn, DEPALLOC, &pagedep) == 0) 1641 WORKLIST_INSERT_BP(nbp, &pagedep->pd_list); 1642 WORKLIST_INSERT_BP(nbp, &aip->ai_list); 1643 FREE_LOCK(&lk); 1644 setup_allocindir_phase2(bp, ip, aip); 1645 } 1646 1647 /* 1648 * Called just before setting an indirect block pointer to a 1649 * newly allocated indirect block. 1650 * Parameters: 1651 * nbp: newly allocated indirect block 1652 * ip: inode for file being extended 1653 * bp: indirect block referencing allocated block 1654 * ptrno: offset of pointer in indirect block 1655 * newblkno: disk block number being added 1656 */ 1657 void 1658 softdep_setup_allocindir_meta(struct buf *nbp, struct inode *ip, 1659 struct buf *bp, int ptrno, 1660 ufs_daddr_t newblkno) 1661 { 1662 struct allocindir *aip; 1663 1664 aip = newallocindir(ip, ptrno, newblkno, 0); 1665 ACQUIRE_LOCK(&lk); 1666 WORKLIST_INSERT_BP(nbp, &aip->ai_list); 1667 FREE_LOCK(&lk); 1668 setup_allocindir_phase2(bp, ip, aip); 1669 } 1670 1671 /* 1672 * Called to finish the allocation of the "aip" allocated 1673 * by one of the two routines above. 1674 * 1675 * Parameters: 1676 * bp: in-memory copy of the indirect block 1677 * ip: inode for file being extended 1678 * aip: allocindir allocated by the above routines 1679 */ 1680 static void 1681 setup_allocindir_phase2(struct buf *bp, struct inode *ip, 1682 struct allocindir *aip) 1683 { 1684 struct worklist *wk; 1685 struct indirdep *indirdep, *newindirdep; 1686 struct bmsafemap *bmsafemap; 1687 struct allocindir *oldaip; 1688 struct freefrag *freefrag; 1689 struct newblk *newblk; 1690 1691 if (bp->b_loffset >= 0) 1692 panic("setup_allocindir_phase2: not indir blk"); 1693 for (indirdep = NULL, newindirdep = NULL; ; ) { 1694 ACQUIRE_LOCK(&lk); 1695 LIST_FOREACH(wk, &bp->b_dep, wk_list) { 1696 if (wk->wk_type != D_INDIRDEP) 1697 continue; 1698 indirdep = WK_INDIRDEP(wk); 1699 break; 1700 } 1701 if (indirdep == NULL && newindirdep) { 1702 indirdep = newindirdep; 1703 WORKLIST_INSERT_BP(bp, &indirdep->ir_list); 1704 newindirdep = NULL; 1705 } 1706 FREE_LOCK(&lk); 1707 if (indirdep) { 1708 if (newblk_lookup(ip->i_fs, aip->ai_newblkno, 0, 1709 &newblk) == 0) 1710 panic("setup_allocindir: lost block"); 1711 ACQUIRE_LOCK(&lk); 1712 if (newblk->nb_state == DEPCOMPLETE) { 1713 aip->ai_state |= DEPCOMPLETE; 1714 aip->ai_buf = NULL; 1715 } else { 1716 bmsafemap = newblk->nb_bmsafemap; 1717 aip->ai_buf = bmsafemap->sm_buf; 1718 LIST_REMOVE(newblk, nb_deps); 1719 LIST_INSERT_HEAD(&bmsafemap->sm_allocindirhd, 1720 aip, ai_deps); 1721 } 1722 LIST_REMOVE(newblk, nb_hash); 1723 FREE(newblk, M_NEWBLK); 1724 aip->ai_indirdep = indirdep; 1725 /* 1726 * Check to see if there is an existing dependency 1727 * for this block. If there is, merge the old 1728 * dependency into the new one. 1729 */ 1730 if (aip->ai_oldblkno == 0) 1731 oldaip = NULL; 1732 else 1733 1734 LIST_FOREACH(oldaip, &indirdep->ir_deplisthd, ai_next) 1735 if (oldaip->ai_offset == aip->ai_offset) 1736 break; 1737 if (oldaip != NULL) { 1738 if (oldaip->ai_newblkno != aip->ai_oldblkno) { 1739 FREE_LOCK(&lk); 1740 panic("setup_allocindir_phase2: blkno"); 1741 } 1742 aip->ai_oldblkno = oldaip->ai_oldblkno; 1743 freefrag = oldaip->ai_freefrag; 1744 oldaip->ai_freefrag = aip->ai_freefrag; 1745 aip->ai_freefrag = freefrag; 1746 free_allocindir(oldaip, NULL); 1747 } 1748 LIST_INSERT_HEAD(&indirdep->ir_deplisthd, aip, ai_next); 1749 ((ufs_daddr_t *)indirdep->ir_savebp->b_data) 1750 [aip->ai_offset] = aip->ai_oldblkno; 1751 FREE_LOCK(&lk); 1752 } 1753 if (newindirdep) { 1754 /* 1755 * Avoid any possibility of data corruption by 1756 * ensuring that our old version is thrown away. 1757 */ 1758 newindirdep->ir_savebp->b_flags |= B_INVAL | B_NOCACHE; 1759 brelse(newindirdep->ir_savebp); 1760 WORKITEM_FREE((caddr_t)newindirdep, D_INDIRDEP); 1761 } 1762 if (indirdep) 1763 break; 1764 MALLOC(newindirdep, struct indirdep *, sizeof(struct indirdep), 1765 M_INDIRDEP, M_SOFTDEP_FLAGS); 1766 newindirdep->ir_list.wk_type = D_INDIRDEP; 1767 newindirdep->ir_state = ATTACHED; 1768 LIST_INIT(&newindirdep->ir_deplisthd); 1769 LIST_INIT(&newindirdep->ir_donehd); 1770 if (bp->b_bio2.bio_offset == NOOFFSET) { 1771 VOP_BMAP(bp->b_vp, bp->b_bio1.bio_offset, 1772 &bp->b_bio2.bio_offset, NULL, NULL, 1773 BUF_CMD_WRITE); 1774 } 1775 KKASSERT(bp->b_bio2.bio_offset != NOOFFSET); 1776 newindirdep->ir_savebp = getblk(ip->i_devvp, 1777 bp->b_bio2.bio_offset, 1778 bp->b_bcount, 0, 0); 1779 BUF_KERNPROC(newindirdep->ir_savebp); 1780 bcopy(bp->b_data, newindirdep->ir_savebp->b_data, bp->b_bcount); 1781 } 1782 } 1783 1784 /* 1785 * Block de-allocation dependencies. 1786 * 1787 * When blocks are de-allocated, the on-disk pointers must be nullified before 1788 * the blocks are made available for use by other files. (The true 1789 * requirement is that old pointers must be nullified before new on-disk 1790 * pointers are set. We chose this slightly more stringent requirement to 1791 * reduce complexity.) Our implementation handles this dependency by updating 1792 * the inode (or indirect block) appropriately but delaying the actual block 1793 * de-allocation (i.e., freemap and free space count manipulation) until 1794 * after the updated versions reach stable storage. After the disk is 1795 * updated, the blocks can be safely de-allocated whenever it is convenient. 1796 * This implementation handles only the common case of reducing a file's 1797 * length to zero. Other cases are handled by the conventional synchronous 1798 * write approach. 1799 * 1800 * The ffs implementation with which we worked double-checks 1801 * the state of the block pointers and file size as it reduces 1802 * a file's length. Some of this code is replicated here in our 1803 * soft updates implementation. The freeblks->fb_chkcnt field is 1804 * used to transfer a part of this information to the procedure 1805 * that eventually de-allocates the blocks. 1806 * 1807 * This routine should be called from the routine that shortens 1808 * a file's length, before the inode's size or block pointers 1809 * are modified. It will save the block pointer information for 1810 * later release and zero the inode so that the calling routine 1811 * can release it. 1812 */ 1813 struct softdep_setup_freeblocks_info { 1814 struct fs *fs; 1815 struct inode *ip; 1816 }; 1817 1818 static int softdep_setup_freeblocks_bp(struct buf *bp, void *data); 1819 1820 /* 1821 * Parameters: 1822 * ip: The inode whose length is to be reduced 1823 * length: The new length for the file 1824 */ 1825 void 1826 softdep_setup_freeblocks(struct inode *ip, off_t length) 1827 { 1828 struct softdep_setup_freeblocks_info info; 1829 struct freeblks *freeblks; 1830 struct inodedep *inodedep; 1831 struct allocdirect *adp; 1832 struct vnode *vp; 1833 struct buf *bp; 1834 struct fs *fs; 1835 int i, error, delay; 1836 int count; 1837 1838 fs = ip->i_fs; 1839 if (length != 0) 1840 panic("softde_setup_freeblocks: non-zero length"); 1841 MALLOC(freeblks, struct freeblks *, sizeof(struct freeblks), 1842 M_FREEBLKS, M_SOFTDEP_FLAGS | M_ZERO); 1843 freeblks->fb_list.wk_type = D_FREEBLKS; 1844 freeblks->fb_state = ATTACHED; 1845 freeblks->fb_uid = ip->i_uid; 1846 freeblks->fb_previousinum = ip->i_number; 1847 freeblks->fb_devvp = ip->i_devvp; 1848 freeblks->fb_fs = fs; 1849 freeblks->fb_oldsize = ip->i_size; 1850 freeblks->fb_newsize = length; 1851 freeblks->fb_chkcnt = ip->i_blocks; 1852 for (i = 0; i < NDADDR; i++) { 1853 freeblks->fb_dblks[i] = ip->i_db[i]; 1854 ip->i_db[i] = 0; 1855 } 1856 for (i = 0; i < NIADDR; i++) { 1857 freeblks->fb_iblks[i] = ip->i_ib[i]; 1858 ip->i_ib[i] = 0; 1859 } 1860 ip->i_blocks = 0; 1861 ip->i_size = 0; 1862 /* 1863 * Push the zero'ed inode to to its disk buffer so that we are free 1864 * to delete its dependencies below. Once the dependencies are gone 1865 * the buffer can be safely released. 1866 */ 1867 if ((error = bread(ip->i_devvp, 1868 fsbtodoff(fs, ino_to_fsba(fs, ip->i_number)), 1869 (int)fs->fs_bsize, &bp)) != 0) 1870 softdep_error("softdep_setup_freeblocks", error); 1871 *((struct ufs1_dinode *)bp->b_data + ino_to_fsbo(fs, ip->i_number)) = 1872 ip->i_din; 1873 /* 1874 * Find and eliminate any inode dependencies. 1875 */ 1876 ACQUIRE_LOCK(&lk); 1877 (void) inodedep_lookup(fs, ip->i_number, DEPALLOC, &inodedep); 1878 if ((inodedep->id_state & IOSTARTED) != 0) { 1879 FREE_LOCK(&lk); 1880 panic("softdep_setup_freeblocks: inode busy"); 1881 } 1882 /* 1883 * Add the freeblks structure to the list of operations that 1884 * must await the zero'ed inode being written to disk. If we 1885 * still have a bitmap dependency (delay == 0), then the inode 1886 * has never been written to disk, so we can process the 1887 * freeblks below once we have deleted the dependencies. 1888 */ 1889 delay = (inodedep->id_state & DEPCOMPLETE); 1890 if (delay) 1891 WORKLIST_INSERT(&inodedep->id_bufwait, &freeblks->fb_list); 1892 /* 1893 * Because the file length has been truncated to zero, any 1894 * pending block allocation dependency structures associated 1895 * with this inode are obsolete and can simply be de-allocated. 1896 * We must first merge the two dependency lists to get rid of 1897 * any duplicate freefrag structures, then purge the merged list. 1898 */ 1899 merge_inode_lists(inodedep); 1900 while ((adp = TAILQ_FIRST(&inodedep->id_inoupdt)) != 0) 1901 free_allocdirect(&inodedep->id_inoupdt, adp, 1); 1902 FREE_LOCK(&lk); 1903 bdwrite(bp); 1904 /* 1905 * We must wait for any I/O in progress to finish so that 1906 * all potential buffers on the dirty list will be visible. 1907 * Once they are all there, walk the list and get rid of 1908 * any dependencies. 1909 */ 1910 vp = ITOV(ip); 1911 ACQUIRE_LOCK(&lk); 1912 drain_output(vp, 1); 1913 1914 info.fs = fs; 1915 info.ip = ip; 1916 lwkt_gettoken(&vp->v_token); 1917 do { 1918 count = RB_SCAN(buf_rb_tree, &vp->v_rbdirty_tree, NULL, 1919 softdep_setup_freeblocks_bp, &info); 1920 } while (count != 0); 1921 lwkt_reltoken(&vp->v_token); 1922 1923 if (inodedep_lookup(fs, ip->i_number, 0, &inodedep) != 0) 1924 (void)free_inodedep(inodedep); 1925 1926 if (delay) { 1927 freeblks->fb_state |= DEPCOMPLETE; 1928 /* 1929 * If the inode with zeroed block pointers is now on disk 1930 * we can start freeing blocks. Add freeblks to the worklist 1931 * instead of calling handle_workitem_freeblocks directly as 1932 * it is more likely that additional IO is needed to complete 1933 * the request here than in the !delay case. 1934 */ 1935 if ((freeblks->fb_state & ALLCOMPLETE) == ALLCOMPLETE) 1936 add_to_worklist(&freeblks->fb_list); 1937 } 1938 1939 FREE_LOCK(&lk); 1940 /* 1941 * If the inode has never been written to disk (delay == 0), 1942 * then we can process the freeblks now that we have deleted 1943 * the dependencies. 1944 */ 1945 if (!delay) 1946 handle_workitem_freeblocks(freeblks); 1947 } 1948 1949 static int 1950 softdep_setup_freeblocks_bp(struct buf *bp, void *data) 1951 { 1952 struct softdep_setup_freeblocks_info *info = data; 1953 struct inodedep *inodedep; 1954 1955 if (getdirtybuf(&bp, MNT_WAIT) == 0) { 1956 kprintf("softdep_setup_freeblocks_bp(1): caught bp %p going away\n", bp); 1957 return(-1); 1958 } 1959 if (bp->b_vp != ITOV(info->ip) || (bp->b_flags & B_DELWRI) == 0) { 1960 kprintf("softdep_setup_freeblocks_bp(2): caught bp %p going away\n", bp); 1961 BUF_UNLOCK(bp); 1962 return(-1); 1963 } 1964 (void) inodedep_lookup(info->fs, info->ip->i_number, 0, &inodedep); 1965 deallocate_dependencies(bp, inodedep); 1966 bp->b_flags |= B_INVAL | B_NOCACHE; 1967 FREE_LOCK(&lk); 1968 brelse(bp); 1969 ACQUIRE_LOCK(&lk); 1970 return(1); 1971 } 1972 1973 /* 1974 * Reclaim any dependency structures from a buffer that is about to 1975 * be reallocated to a new vnode. The buffer must be locked, thus, 1976 * no I/O completion operations can occur while we are manipulating 1977 * its associated dependencies. The mutex is held so that other I/O's 1978 * associated with related dependencies do not occur. 1979 */ 1980 static void 1981 deallocate_dependencies(struct buf *bp, struct inodedep *inodedep) 1982 { 1983 struct worklist *wk; 1984 struct indirdep *indirdep; 1985 struct allocindir *aip; 1986 struct pagedep *pagedep; 1987 struct dirrem *dirrem; 1988 struct diradd *dap; 1989 int i; 1990 1991 while ((wk = LIST_FIRST(&bp->b_dep)) != NULL) { 1992 switch (wk->wk_type) { 1993 1994 case D_INDIRDEP: 1995 indirdep = WK_INDIRDEP(wk); 1996 /* 1997 * None of the indirect pointers will ever be visible, 1998 * so they can simply be tossed. GOINGAWAY ensures 1999 * that allocated pointers will be saved in the buffer 2000 * cache until they are freed. Note that they will 2001 * only be able to be found by their physical address 2002 * since the inode mapping the logical address will 2003 * be gone. The save buffer used for the safe copy 2004 * was allocated in setup_allocindir_phase2 using 2005 * the physical address so it could be used for this 2006 * purpose. Hence we swap the safe copy with the real 2007 * copy, allowing the safe copy to be freed and holding 2008 * on to the real copy for later use in indir_trunc. 2009 * 2010 * NOTE: ir_savebp is relative to the block device 2011 * so b_bio1 contains the device block number. 2012 */ 2013 if (indirdep->ir_state & GOINGAWAY) { 2014 FREE_LOCK(&lk); 2015 panic("deallocate_dependencies: already gone"); 2016 } 2017 indirdep->ir_state |= GOINGAWAY; 2018 while ((aip = LIST_FIRST(&indirdep->ir_deplisthd)) != 0) 2019 free_allocindir(aip, inodedep); 2020 if (bp->b_bio1.bio_offset >= 0 || 2021 bp->b_bio2.bio_offset != indirdep->ir_savebp->b_bio1.bio_offset) { 2022 FREE_LOCK(&lk); 2023 panic("deallocate_dependencies: not indir"); 2024 } 2025 bcopy(bp->b_data, indirdep->ir_savebp->b_data, 2026 bp->b_bcount); 2027 WORKLIST_REMOVE(wk); 2028 WORKLIST_INSERT_BP(indirdep->ir_savebp, wk); 2029 continue; 2030 2031 case D_PAGEDEP: 2032 pagedep = WK_PAGEDEP(wk); 2033 /* 2034 * None of the directory additions will ever be 2035 * visible, so they can simply be tossed. 2036 */ 2037 for (i = 0; i < DAHASHSZ; i++) 2038 while ((dap = 2039 LIST_FIRST(&pagedep->pd_diraddhd[i]))) 2040 free_diradd(dap); 2041 while ((dap = LIST_FIRST(&pagedep->pd_pendinghd)) != 0) 2042 free_diradd(dap); 2043 /* 2044 * Copy any directory remove dependencies to the list 2045 * to be processed after the zero'ed inode is written. 2046 * If the inode has already been written, then they 2047 * can be dumped directly onto the work list. 2048 */ 2049 LIST_FOREACH(dirrem, &pagedep->pd_dirremhd, dm_next) { 2050 LIST_REMOVE(dirrem, dm_next); 2051 dirrem->dm_dirinum = pagedep->pd_ino; 2052 if (inodedep == NULL || 2053 (inodedep->id_state & ALLCOMPLETE) == 2054 ALLCOMPLETE) 2055 add_to_worklist(&dirrem->dm_list); 2056 else 2057 WORKLIST_INSERT(&inodedep->id_bufwait, 2058 &dirrem->dm_list); 2059 } 2060 WORKLIST_REMOVE(&pagedep->pd_list); 2061 LIST_REMOVE(pagedep, pd_hash); 2062 WORKITEM_FREE(pagedep, D_PAGEDEP); 2063 continue; 2064 2065 case D_ALLOCINDIR: 2066 free_allocindir(WK_ALLOCINDIR(wk), inodedep); 2067 continue; 2068 2069 case D_ALLOCDIRECT: 2070 case D_INODEDEP: 2071 FREE_LOCK(&lk); 2072 panic("deallocate_dependencies: Unexpected type %s", 2073 TYPENAME(wk->wk_type)); 2074 /* NOTREACHED */ 2075 2076 default: 2077 FREE_LOCK(&lk); 2078 panic("deallocate_dependencies: Unknown type %s", 2079 TYPENAME(wk->wk_type)); 2080 /* NOTREACHED */ 2081 } 2082 } 2083 } 2084 2085 /* 2086 * Free an allocdirect. Generate a new freefrag work request if appropriate. 2087 * This routine must be called with splbio interrupts blocked. 2088 */ 2089 static void 2090 free_allocdirect(struct allocdirectlst *adphead, 2091 struct allocdirect *adp, int delay) 2092 { 2093 2094 #ifdef DEBUG 2095 if (lk.lkt_held == NOHOLDER) 2096 panic("free_allocdirect: lock not held"); 2097 #endif 2098 if ((adp->ad_state & DEPCOMPLETE) == 0) 2099 LIST_REMOVE(adp, ad_deps); 2100 TAILQ_REMOVE(adphead, adp, ad_next); 2101 if ((adp->ad_state & COMPLETE) == 0) 2102 WORKLIST_REMOVE(&adp->ad_list); 2103 if (adp->ad_freefrag != NULL) { 2104 if (delay) 2105 WORKLIST_INSERT(&adp->ad_inodedep->id_bufwait, 2106 &adp->ad_freefrag->ff_list); 2107 else 2108 add_to_worklist(&adp->ad_freefrag->ff_list); 2109 } 2110 WORKITEM_FREE(adp, D_ALLOCDIRECT); 2111 } 2112 2113 /* 2114 * Prepare an inode to be freed. The actual free operation is not 2115 * done until the zero'ed inode has been written to disk. 2116 */ 2117 void 2118 softdep_freefile(struct vnode *pvp, ino_t ino, int mode) 2119 { 2120 struct inode *ip = VTOI(pvp); 2121 struct inodedep *inodedep; 2122 struct freefile *freefile; 2123 2124 /* 2125 * This sets up the inode de-allocation dependency. 2126 */ 2127 MALLOC(freefile, struct freefile *, sizeof(struct freefile), 2128 M_FREEFILE, M_SOFTDEP_FLAGS); 2129 freefile->fx_list.wk_type = D_FREEFILE; 2130 freefile->fx_list.wk_state = 0; 2131 freefile->fx_mode = mode; 2132 freefile->fx_oldinum = ino; 2133 freefile->fx_devvp = ip->i_devvp; 2134 freefile->fx_fs = ip->i_fs; 2135 2136 /* 2137 * If the inodedep does not exist, then the zero'ed inode has 2138 * been written to disk. If the allocated inode has never been 2139 * written to disk, then the on-disk inode is zero'ed. In either 2140 * case we can free the file immediately. 2141 */ 2142 ACQUIRE_LOCK(&lk); 2143 if (inodedep_lookup(ip->i_fs, ino, 0, &inodedep) == 0 || 2144 check_inode_unwritten(inodedep)) { 2145 FREE_LOCK(&lk); 2146 handle_workitem_freefile(freefile); 2147 return; 2148 } 2149 WORKLIST_INSERT(&inodedep->id_inowait, &freefile->fx_list); 2150 FREE_LOCK(&lk); 2151 } 2152 2153 /* 2154 * Check to see if an inode has never been written to disk. If 2155 * so free the inodedep and return success, otherwise return failure. 2156 * This routine must be called with splbio interrupts blocked. 2157 * 2158 * If we still have a bitmap dependency, then the inode has never 2159 * been written to disk. Drop the dependency as it is no longer 2160 * necessary since the inode is being deallocated. We set the 2161 * ALLCOMPLETE flags since the bitmap now properly shows that the 2162 * inode is not allocated. Even if the inode is actively being 2163 * written, it has been rolled back to its zero'ed state, so we 2164 * are ensured that a zero inode is what is on the disk. For short 2165 * lived files, this change will usually result in removing all the 2166 * dependencies from the inode so that it can be freed immediately. 2167 */ 2168 static int 2169 check_inode_unwritten(struct inodedep *inodedep) 2170 { 2171 2172 if ((inodedep->id_state & DEPCOMPLETE) != 0 || 2173 LIST_FIRST(&inodedep->id_pendinghd) != NULL || 2174 LIST_FIRST(&inodedep->id_bufwait) != NULL || 2175 LIST_FIRST(&inodedep->id_inowait) != NULL || 2176 TAILQ_FIRST(&inodedep->id_inoupdt) != NULL || 2177 TAILQ_FIRST(&inodedep->id_newinoupdt) != NULL || 2178 inodedep->id_nlinkdelta != 0) 2179 return (0); 2180 2181 /* 2182 * Another process might be in initiate_write_inodeblock 2183 * trying to allocate memory without holding "Softdep Lock". 2184 */ 2185 if ((inodedep->id_state & IOSTARTED) != 0 && 2186 inodedep->id_savedino == NULL) 2187 return(0); 2188 2189 inodedep->id_state |= ALLCOMPLETE; 2190 LIST_REMOVE(inodedep, id_deps); 2191 inodedep->id_buf = NULL; 2192 if (inodedep->id_state & ONWORKLIST) 2193 WORKLIST_REMOVE(&inodedep->id_list); 2194 if (inodedep->id_savedino != NULL) { 2195 FREE(inodedep->id_savedino, M_INODEDEP); 2196 inodedep->id_savedino = NULL; 2197 } 2198 if (free_inodedep(inodedep) == 0) { 2199 FREE_LOCK(&lk); 2200 panic("check_inode_unwritten: busy inode"); 2201 } 2202 return (1); 2203 } 2204 2205 /* 2206 * Try to free an inodedep structure. Return 1 if it could be freed. 2207 */ 2208 static int 2209 free_inodedep(struct inodedep *inodedep) 2210 { 2211 2212 if ((inodedep->id_state & ONWORKLIST) != 0 || 2213 (inodedep->id_state & ALLCOMPLETE) != ALLCOMPLETE || 2214 LIST_FIRST(&inodedep->id_pendinghd) != NULL || 2215 LIST_FIRST(&inodedep->id_bufwait) != NULL || 2216 LIST_FIRST(&inodedep->id_inowait) != NULL || 2217 TAILQ_FIRST(&inodedep->id_inoupdt) != NULL || 2218 TAILQ_FIRST(&inodedep->id_newinoupdt) != NULL || 2219 inodedep->id_nlinkdelta != 0 || inodedep->id_savedino != NULL) 2220 return (0); 2221 LIST_REMOVE(inodedep, id_hash); 2222 WORKITEM_FREE(inodedep, D_INODEDEP); 2223 num_inodedep -= 1; 2224 return (1); 2225 } 2226 2227 /* 2228 * This workitem routine performs the block de-allocation. 2229 * The workitem is added to the pending list after the updated 2230 * inode block has been written to disk. As mentioned above, 2231 * checks regarding the number of blocks de-allocated (compared 2232 * to the number of blocks allocated for the file) are also 2233 * performed in this function. 2234 */ 2235 static void 2236 handle_workitem_freeblocks(struct freeblks *freeblks) 2237 { 2238 struct inode tip; 2239 ufs_daddr_t bn; 2240 struct fs *fs; 2241 int i, level, bsize; 2242 long nblocks, blocksreleased = 0; 2243 int error, allerror = 0; 2244 ufs_lbn_t baselbns[NIADDR], tmpval; 2245 2246 tip.i_number = freeblks->fb_previousinum; 2247 tip.i_devvp = freeblks->fb_devvp; 2248 tip.i_dev = freeblks->fb_devvp->v_rdev; 2249 tip.i_fs = freeblks->fb_fs; 2250 tip.i_size = freeblks->fb_oldsize; 2251 tip.i_uid = freeblks->fb_uid; 2252 fs = freeblks->fb_fs; 2253 tmpval = 1; 2254 baselbns[0] = NDADDR; 2255 for (i = 1; i < NIADDR; i++) { 2256 tmpval *= NINDIR(fs); 2257 baselbns[i] = baselbns[i - 1] + tmpval; 2258 } 2259 nblocks = btodb(fs->fs_bsize); 2260 blocksreleased = 0; 2261 /* 2262 * Indirect blocks first. 2263 */ 2264 for (level = (NIADDR - 1); level >= 0; level--) { 2265 if ((bn = freeblks->fb_iblks[level]) == 0) 2266 continue; 2267 if ((error = indir_trunc(&tip, fsbtodoff(fs, bn), level, 2268 baselbns[level], &blocksreleased)) == 0) 2269 allerror = error; 2270 ffs_blkfree(&tip, bn, fs->fs_bsize); 2271 blocksreleased += nblocks; 2272 } 2273 /* 2274 * All direct blocks or frags. 2275 */ 2276 for (i = (NDADDR - 1); i >= 0; i--) { 2277 if ((bn = freeblks->fb_dblks[i]) == 0) 2278 continue; 2279 bsize = blksize(fs, &tip, i); 2280 ffs_blkfree(&tip, bn, bsize); 2281 blocksreleased += btodb(bsize); 2282 } 2283 2284 #ifdef DIAGNOSTIC 2285 if (freeblks->fb_chkcnt != blocksreleased) 2286 kprintf("handle_workitem_freeblocks: block count\n"); 2287 if (allerror) 2288 softdep_error("handle_workitem_freeblks", allerror); 2289 #endif /* DIAGNOSTIC */ 2290 WORKITEM_FREE(freeblks, D_FREEBLKS); 2291 } 2292 2293 /* 2294 * Release blocks associated with the inode ip and stored in the indirect 2295 * block at doffset. If level is greater than SINGLE, the block is an 2296 * indirect block and recursive calls to indirtrunc must be used to 2297 * cleanse other indirect blocks. 2298 */ 2299 static int 2300 indir_trunc(struct inode *ip, off_t doffset, int level, ufs_lbn_t lbn, 2301 long *countp) 2302 { 2303 struct buf *bp; 2304 ufs_daddr_t *bap; 2305 ufs_daddr_t nb; 2306 struct fs *fs; 2307 struct worklist *wk; 2308 struct indirdep *indirdep; 2309 int i, lbnadd, nblocks; 2310 int error, allerror = 0; 2311 2312 fs = ip->i_fs; 2313 lbnadd = 1; 2314 for (i = level; i > 0; i--) 2315 lbnadd *= NINDIR(fs); 2316 /* 2317 * Get buffer of block pointers to be freed. This routine is not 2318 * called until the zero'ed inode has been written, so it is safe 2319 * to free blocks as they are encountered. Because the inode has 2320 * been zero'ed, calls to bmap on these blocks will fail. So, we 2321 * have to use the on-disk address and the block device for the 2322 * filesystem to look them up. If the file was deleted before its 2323 * indirect blocks were all written to disk, the routine that set 2324 * us up (deallocate_dependencies) will have arranged to leave 2325 * a complete copy of the indirect block in memory for our use. 2326 * Otherwise we have to read the blocks in from the disk. 2327 */ 2328 ACQUIRE_LOCK(&lk); 2329 if ((bp = findblk(ip->i_devvp, doffset, FINDBLK_TEST)) != NULL && 2330 (wk = LIST_FIRST(&bp->b_dep)) != NULL) { 2331 /* 2332 * bp must be ir_savebp, which is held locked for our use. 2333 */ 2334 if (wk->wk_type != D_INDIRDEP || 2335 (indirdep = WK_INDIRDEP(wk))->ir_savebp != bp || 2336 (indirdep->ir_state & GOINGAWAY) == 0) { 2337 FREE_LOCK(&lk); 2338 panic("indir_trunc: lost indirdep"); 2339 } 2340 WORKLIST_REMOVE(wk); 2341 WORKITEM_FREE(indirdep, D_INDIRDEP); 2342 if (LIST_FIRST(&bp->b_dep) != NULL) { 2343 FREE_LOCK(&lk); 2344 panic("indir_trunc: dangling dep"); 2345 } 2346 FREE_LOCK(&lk); 2347 } else { 2348 FREE_LOCK(&lk); 2349 error = bread(ip->i_devvp, doffset, (int)fs->fs_bsize, &bp); 2350 if (error) 2351 return (error); 2352 } 2353 /* 2354 * Recursively free indirect blocks. 2355 */ 2356 bap = (ufs_daddr_t *)bp->b_data; 2357 nblocks = btodb(fs->fs_bsize); 2358 for (i = NINDIR(fs) - 1; i >= 0; i--) { 2359 if ((nb = bap[i]) == 0) 2360 continue; 2361 if (level != 0) { 2362 if ((error = indir_trunc(ip, fsbtodoff(fs, nb), 2363 level - 1, lbn + (i * lbnadd), countp)) != 0) 2364 allerror = error; 2365 } 2366 ffs_blkfree(ip, nb, fs->fs_bsize); 2367 *countp += nblocks; 2368 } 2369 bp->b_flags |= B_INVAL | B_NOCACHE; 2370 brelse(bp); 2371 return (allerror); 2372 } 2373 2374 /* 2375 * Free an allocindir. 2376 * This routine must be called with splbio interrupts blocked. 2377 */ 2378 static void 2379 free_allocindir(struct allocindir *aip, struct inodedep *inodedep) 2380 { 2381 struct freefrag *freefrag; 2382 2383 #ifdef DEBUG 2384 if (lk.lkt_held == NOHOLDER) 2385 panic("free_allocindir: lock not held"); 2386 #endif 2387 if ((aip->ai_state & DEPCOMPLETE) == 0) 2388 LIST_REMOVE(aip, ai_deps); 2389 if (aip->ai_state & ONWORKLIST) 2390 WORKLIST_REMOVE(&aip->ai_list); 2391 LIST_REMOVE(aip, ai_next); 2392 if ((freefrag = aip->ai_freefrag) != NULL) { 2393 if (inodedep == NULL) 2394 add_to_worklist(&freefrag->ff_list); 2395 else 2396 WORKLIST_INSERT(&inodedep->id_bufwait, 2397 &freefrag->ff_list); 2398 } 2399 WORKITEM_FREE(aip, D_ALLOCINDIR); 2400 } 2401 2402 /* 2403 * Directory entry addition dependencies. 2404 * 2405 * When adding a new directory entry, the inode (with its incremented link 2406 * count) must be written to disk before the directory entry's pointer to it. 2407 * Also, if the inode is newly allocated, the corresponding freemap must be 2408 * updated (on disk) before the directory entry's pointer. These requirements 2409 * are met via undo/redo on the directory entry's pointer, which consists 2410 * simply of the inode number. 2411 * 2412 * As directory entries are added and deleted, the free space within a 2413 * directory block can become fragmented. The ufs filesystem will compact 2414 * a fragmented directory block to make space for a new entry. When this 2415 * occurs, the offsets of previously added entries change. Any "diradd" 2416 * dependency structures corresponding to these entries must be updated with 2417 * the new offsets. 2418 */ 2419 2420 /* 2421 * This routine is called after the in-memory inode's link 2422 * count has been incremented, but before the directory entry's 2423 * pointer to the inode has been set. 2424 * 2425 * Parameters: 2426 * bp: buffer containing directory block 2427 * dp: inode for directory 2428 * diroffset: offset of new entry in directory 2429 * newinum: inode referenced by new directory entry 2430 * newdirbp: non-NULL => contents of new mkdir 2431 */ 2432 void 2433 softdep_setup_directory_add(struct buf *bp, struct inode *dp, off_t diroffset, 2434 ino_t newinum, struct buf *newdirbp) 2435 { 2436 int offset; /* offset of new entry within directory block */ 2437 ufs_lbn_t lbn; /* block in directory containing new entry */ 2438 struct fs *fs; 2439 struct diradd *dap; 2440 struct pagedep *pagedep; 2441 struct inodedep *inodedep; 2442 struct mkdir *mkdir1, *mkdir2; 2443 2444 /* 2445 * Whiteouts have no dependencies. 2446 */ 2447 if (newinum == WINO) { 2448 if (newdirbp != NULL) 2449 bdwrite(newdirbp); 2450 return; 2451 } 2452 2453 fs = dp->i_fs; 2454 lbn = lblkno(fs, diroffset); 2455 offset = blkoff(fs, diroffset); 2456 MALLOC(dap, struct diradd *, sizeof(struct diradd), M_DIRADD, 2457 M_SOFTDEP_FLAGS | M_ZERO); 2458 dap->da_list.wk_type = D_DIRADD; 2459 dap->da_offset = offset; 2460 dap->da_newinum = newinum; 2461 dap->da_state = ATTACHED; 2462 if (newdirbp == NULL) { 2463 dap->da_state |= DEPCOMPLETE; 2464 ACQUIRE_LOCK(&lk); 2465 } else { 2466 dap->da_state |= MKDIR_BODY | MKDIR_PARENT; 2467 MALLOC(mkdir1, struct mkdir *, sizeof(struct mkdir), M_MKDIR, 2468 M_SOFTDEP_FLAGS); 2469 mkdir1->md_list.wk_type = D_MKDIR; 2470 mkdir1->md_state = MKDIR_BODY; 2471 mkdir1->md_diradd = dap; 2472 MALLOC(mkdir2, struct mkdir *, sizeof(struct mkdir), M_MKDIR, 2473 M_SOFTDEP_FLAGS); 2474 mkdir2->md_list.wk_type = D_MKDIR; 2475 mkdir2->md_state = MKDIR_PARENT; 2476 mkdir2->md_diradd = dap; 2477 /* 2478 * Dependency on "." and ".." being written to disk. 2479 */ 2480 mkdir1->md_buf = newdirbp; 2481 ACQUIRE_LOCK(&lk); 2482 LIST_INSERT_HEAD(&mkdirlisthd, mkdir1, md_mkdirs); 2483 WORKLIST_INSERT_BP(newdirbp, &mkdir1->md_list); 2484 FREE_LOCK(&lk); 2485 bdwrite(newdirbp); 2486 /* 2487 * Dependency on link count increase for parent directory 2488 */ 2489 ACQUIRE_LOCK(&lk); 2490 if (inodedep_lookup(dp->i_fs, dp->i_number, 0, &inodedep) == 0 2491 || (inodedep->id_state & ALLCOMPLETE) == ALLCOMPLETE) { 2492 dap->da_state &= ~MKDIR_PARENT; 2493 WORKITEM_FREE(mkdir2, D_MKDIR); 2494 } else { 2495 LIST_INSERT_HEAD(&mkdirlisthd, mkdir2, md_mkdirs); 2496 WORKLIST_INSERT(&inodedep->id_bufwait,&mkdir2->md_list); 2497 } 2498 } 2499 /* 2500 * Link into parent directory pagedep to await its being written. 2501 */ 2502 if (pagedep_lookup(dp, lbn, DEPALLOC, &pagedep) == 0) 2503 WORKLIST_INSERT_BP(bp, &pagedep->pd_list); 2504 dap->da_pagedep = pagedep; 2505 LIST_INSERT_HEAD(&pagedep->pd_diraddhd[DIRADDHASH(offset)], dap, 2506 da_pdlist); 2507 /* 2508 * Link into its inodedep. Put it on the id_bufwait list if the inode 2509 * is not yet written. If it is written, do the post-inode write 2510 * processing to put it on the id_pendinghd list. 2511 */ 2512 (void) inodedep_lookup(fs, newinum, DEPALLOC, &inodedep); 2513 if ((inodedep->id_state & ALLCOMPLETE) == ALLCOMPLETE) 2514 diradd_inode_written(dap, inodedep); 2515 else 2516 WORKLIST_INSERT(&inodedep->id_bufwait, &dap->da_list); 2517 FREE_LOCK(&lk); 2518 } 2519 2520 /* 2521 * This procedure is called to change the offset of a directory 2522 * entry when compacting a directory block which must be owned 2523 * exclusively by the caller. Note that the actual entry movement 2524 * must be done in this procedure to ensure that no I/O completions 2525 * occur while the move is in progress. 2526 * 2527 * Parameters: 2528 * dp: inode for directory 2529 * base: address of dp->i_offset 2530 * oldloc: address of old directory location 2531 * newloc: address of new directory location 2532 * entrysize: size of directory entry 2533 */ 2534 void 2535 softdep_change_directoryentry_offset(struct inode *dp, caddr_t base, 2536 caddr_t oldloc, caddr_t newloc, 2537 int entrysize) 2538 { 2539 int offset, oldoffset, newoffset; 2540 struct pagedep *pagedep; 2541 struct diradd *dap; 2542 ufs_lbn_t lbn; 2543 2544 ACQUIRE_LOCK(&lk); 2545 lbn = lblkno(dp->i_fs, dp->i_offset); 2546 offset = blkoff(dp->i_fs, dp->i_offset); 2547 if (pagedep_lookup(dp, lbn, 0, &pagedep) == 0) 2548 goto done; 2549 oldoffset = offset + (oldloc - base); 2550 newoffset = offset + (newloc - base); 2551 2552 LIST_FOREACH(dap, &pagedep->pd_diraddhd[DIRADDHASH(oldoffset)], da_pdlist) { 2553 if (dap->da_offset != oldoffset) 2554 continue; 2555 dap->da_offset = newoffset; 2556 if (DIRADDHASH(newoffset) == DIRADDHASH(oldoffset)) 2557 break; 2558 LIST_REMOVE(dap, da_pdlist); 2559 LIST_INSERT_HEAD(&pagedep->pd_diraddhd[DIRADDHASH(newoffset)], 2560 dap, da_pdlist); 2561 break; 2562 } 2563 if (dap == NULL) { 2564 2565 LIST_FOREACH(dap, &pagedep->pd_pendinghd, da_pdlist) { 2566 if (dap->da_offset == oldoffset) { 2567 dap->da_offset = newoffset; 2568 break; 2569 } 2570 } 2571 } 2572 done: 2573 bcopy(oldloc, newloc, entrysize); 2574 FREE_LOCK(&lk); 2575 } 2576 2577 /* 2578 * Free a diradd dependency structure. This routine must be called 2579 * with splbio interrupts blocked. 2580 */ 2581 static void 2582 free_diradd(struct diradd *dap) 2583 { 2584 struct dirrem *dirrem; 2585 struct pagedep *pagedep; 2586 struct inodedep *inodedep; 2587 struct mkdir *mkdir, *nextmd; 2588 2589 #ifdef DEBUG 2590 if (lk.lkt_held == NOHOLDER) 2591 panic("free_diradd: lock not held"); 2592 #endif 2593 WORKLIST_REMOVE(&dap->da_list); 2594 LIST_REMOVE(dap, da_pdlist); 2595 if ((dap->da_state & DIRCHG) == 0) { 2596 pagedep = dap->da_pagedep; 2597 } else { 2598 dirrem = dap->da_previous; 2599 pagedep = dirrem->dm_pagedep; 2600 dirrem->dm_dirinum = pagedep->pd_ino; 2601 add_to_worklist(&dirrem->dm_list); 2602 } 2603 if (inodedep_lookup(VFSTOUFS(pagedep->pd_mnt)->um_fs, dap->da_newinum, 2604 0, &inodedep) != 0) 2605 (void) free_inodedep(inodedep); 2606 if ((dap->da_state & (MKDIR_PARENT | MKDIR_BODY)) != 0) { 2607 for (mkdir = LIST_FIRST(&mkdirlisthd); mkdir; mkdir = nextmd) { 2608 nextmd = LIST_NEXT(mkdir, md_mkdirs); 2609 if (mkdir->md_diradd != dap) 2610 continue; 2611 dap->da_state &= ~mkdir->md_state; 2612 WORKLIST_REMOVE(&mkdir->md_list); 2613 LIST_REMOVE(mkdir, md_mkdirs); 2614 WORKITEM_FREE(mkdir, D_MKDIR); 2615 } 2616 if ((dap->da_state & (MKDIR_PARENT | MKDIR_BODY)) != 0) { 2617 FREE_LOCK(&lk); 2618 panic("free_diradd: unfound ref"); 2619 } 2620 } 2621 WORKITEM_FREE(dap, D_DIRADD); 2622 } 2623 2624 /* 2625 * Directory entry removal dependencies. 2626 * 2627 * When removing a directory entry, the entry's inode pointer must be 2628 * zero'ed on disk before the corresponding inode's link count is decremented 2629 * (possibly freeing the inode for re-use). This dependency is handled by 2630 * updating the directory entry but delaying the inode count reduction until 2631 * after the directory block has been written to disk. After this point, the 2632 * inode count can be decremented whenever it is convenient. 2633 */ 2634 2635 /* 2636 * This routine should be called immediately after removing 2637 * a directory entry. The inode's link count should not be 2638 * decremented by the calling procedure -- the soft updates 2639 * code will do this task when it is safe. 2640 * 2641 * Parameters: 2642 * bp: buffer containing directory block 2643 * dp: inode for the directory being modified 2644 * ip: inode for directory entry being removed 2645 * isrmdir: indicates if doing RMDIR 2646 */ 2647 void 2648 softdep_setup_remove(struct buf *bp, struct inode *dp, struct inode *ip, 2649 int isrmdir) 2650 { 2651 struct dirrem *dirrem, *prevdirrem; 2652 2653 /* 2654 * Allocate a new dirrem if appropriate and ACQUIRE_LOCK. 2655 */ 2656 dirrem = newdirrem(bp, dp, ip, isrmdir, &prevdirrem); 2657 2658 /* 2659 * If the COMPLETE flag is clear, then there were no active 2660 * entries and we want to roll back to a zeroed entry until 2661 * the new inode is committed to disk. If the COMPLETE flag is 2662 * set then we have deleted an entry that never made it to 2663 * disk. If the entry we deleted resulted from a name change, 2664 * then the old name still resides on disk. We cannot delete 2665 * its inode (returned to us in prevdirrem) until the zeroed 2666 * directory entry gets to disk. The new inode has never been 2667 * referenced on the disk, so can be deleted immediately. 2668 */ 2669 if ((dirrem->dm_state & COMPLETE) == 0) { 2670 LIST_INSERT_HEAD(&dirrem->dm_pagedep->pd_dirremhd, dirrem, 2671 dm_next); 2672 FREE_LOCK(&lk); 2673 } else { 2674 if (prevdirrem != NULL) 2675 LIST_INSERT_HEAD(&dirrem->dm_pagedep->pd_dirremhd, 2676 prevdirrem, dm_next); 2677 dirrem->dm_dirinum = dirrem->dm_pagedep->pd_ino; 2678 FREE_LOCK(&lk); 2679 handle_workitem_remove(dirrem); 2680 } 2681 } 2682 2683 /* 2684 * Allocate a new dirrem if appropriate and return it along with 2685 * its associated pagedep. Called without a lock, returns with lock. 2686 */ 2687 static long num_dirrem; /* number of dirrem allocated */ 2688 2689 /* 2690 * Parameters: 2691 * bp: buffer containing directory block 2692 * dp: inode for the directory being modified 2693 * ip: inode for directory entry being removed 2694 * isrmdir: indicates if doing RMDIR 2695 * prevdirremp: previously referenced inode, if any 2696 */ 2697 static struct dirrem * 2698 newdirrem(struct buf *bp, struct inode *dp, struct inode *ip, 2699 int isrmdir, struct dirrem **prevdirremp) 2700 { 2701 int offset; 2702 ufs_lbn_t lbn; 2703 struct diradd *dap; 2704 struct dirrem *dirrem; 2705 struct pagedep *pagedep; 2706 2707 /* 2708 * Whiteouts have no deletion dependencies. 2709 */ 2710 if (ip == NULL) 2711 panic("newdirrem: whiteout"); 2712 /* 2713 * If we are over our limit, try to improve the situation. 2714 * Limiting the number of dirrem structures will also limit 2715 * the number of freefile and freeblks structures. 2716 */ 2717 if (num_dirrem > max_softdeps / 2 && speedup_syncer() == 0) 2718 (void) request_cleanup(FLUSH_REMOVE, 0); 2719 num_dirrem += 1; 2720 MALLOC(dirrem, struct dirrem *, sizeof(struct dirrem), 2721 M_DIRREM, M_SOFTDEP_FLAGS | M_ZERO); 2722 dirrem->dm_list.wk_type = D_DIRREM; 2723 dirrem->dm_state = isrmdir ? RMDIR : 0; 2724 dirrem->dm_mnt = ITOV(ip)->v_mount; 2725 dirrem->dm_oldinum = ip->i_number; 2726 *prevdirremp = NULL; 2727 2728 ACQUIRE_LOCK(&lk); 2729 lbn = lblkno(dp->i_fs, dp->i_offset); 2730 offset = blkoff(dp->i_fs, dp->i_offset); 2731 if (pagedep_lookup(dp, lbn, DEPALLOC, &pagedep) == 0) 2732 WORKLIST_INSERT_BP(bp, &pagedep->pd_list); 2733 dirrem->dm_pagedep = pagedep; 2734 /* 2735 * Check for a diradd dependency for the same directory entry. 2736 * If present, then both dependencies become obsolete and can 2737 * be de-allocated. Check for an entry on both the pd_dirraddhd 2738 * list and the pd_pendinghd list. 2739 */ 2740 2741 LIST_FOREACH(dap, &pagedep->pd_diraddhd[DIRADDHASH(offset)], da_pdlist) 2742 if (dap->da_offset == offset) 2743 break; 2744 if (dap == NULL) { 2745 2746 LIST_FOREACH(dap, &pagedep->pd_pendinghd, da_pdlist) 2747 if (dap->da_offset == offset) 2748 break; 2749 if (dap == NULL) 2750 return (dirrem); 2751 } 2752 /* 2753 * Must be ATTACHED at this point. 2754 */ 2755 if ((dap->da_state & ATTACHED) == 0) { 2756 FREE_LOCK(&lk); 2757 panic("newdirrem: not ATTACHED"); 2758 } 2759 if (dap->da_newinum != ip->i_number) { 2760 FREE_LOCK(&lk); 2761 panic("newdirrem: inum %"PRId64" should be %"PRId64, 2762 ip->i_number, dap->da_newinum); 2763 } 2764 /* 2765 * If we are deleting a changed name that never made it to disk, 2766 * then return the dirrem describing the previous inode (which 2767 * represents the inode currently referenced from this entry on disk). 2768 */ 2769 if ((dap->da_state & DIRCHG) != 0) { 2770 *prevdirremp = dap->da_previous; 2771 dap->da_state &= ~DIRCHG; 2772 dap->da_pagedep = pagedep; 2773 } 2774 /* 2775 * We are deleting an entry that never made it to disk. 2776 * Mark it COMPLETE so we can delete its inode immediately. 2777 */ 2778 dirrem->dm_state |= COMPLETE; 2779 free_diradd(dap); 2780 return (dirrem); 2781 } 2782 2783 /* 2784 * Directory entry change dependencies. 2785 * 2786 * Changing an existing directory entry requires that an add operation 2787 * be completed first followed by a deletion. The semantics for the addition 2788 * are identical to the description of adding a new entry above except 2789 * that the rollback is to the old inode number rather than zero. Once 2790 * the addition dependency is completed, the removal is done as described 2791 * in the removal routine above. 2792 */ 2793 2794 /* 2795 * This routine should be called immediately after changing 2796 * a directory entry. The inode's link count should not be 2797 * decremented by the calling procedure -- the soft updates 2798 * code will perform this task when it is safe. 2799 * 2800 * Parameters: 2801 * bp: buffer containing directory block 2802 * dp: inode for the directory being modified 2803 * ip: inode for directory entry being removed 2804 * newinum: new inode number for changed entry 2805 * isrmdir: indicates if doing RMDIR 2806 */ 2807 void 2808 softdep_setup_directory_change(struct buf *bp, struct inode *dp, 2809 struct inode *ip, ino_t newinum, 2810 int isrmdir) 2811 { 2812 int offset; 2813 struct diradd *dap = NULL; 2814 struct dirrem *dirrem, *prevdirrem; 2815 struct pagedep *pagedep; 2816 struct inodedep *inodedep; 2817 2818 offset = blkoff(dp->i_fs, dp->i_offset); 2819 2820 /* 2821 * Whiteouts do not need diradd dependencies. 2822 */ 2823 if (newinum != WINO) { 2824 MALLOC(dap, struct diradd *, sizeof(struct diradd), 2825 M_DIRADD, M_SOFTDEP_FLAGS | M_ZERO); 2826 dap->da_list.wk_type = D_DIRADD; 2827 dap->da_state = DIRCHG | ATTACHED | DEPCOMPLETE; 2828 dap->da_offset = offset; 2829 dap->da_newinum = newinum; 2830 } 2831 2832 /* 2833 * Allocate a new dirrem and ACQUIRE_LOCK. 2834 */ 2835 dirrem = newdirrem(bp, dp, ip, isrmdir, &prevdirrem); 2836 pagedep = dirrem->dm_pagedep; 2837 /* 2838 * The possible values for isrmdir: 2839 * 0 - non-directory file rename 2840 * 1 - directory rename within same directory 2841 * inum - directory rename to new directory of given inode number 2842 * When renaming to a new directory, we are both deleting and 2843 * creating a new directory entry, so the link count on the new 2844 * directory should not change. Thus we do not need the followup 2845 * dirrem which is usually done in handle_workitem_remove. We set 2846 * the DIRCHG flag to tell handle_workitem_remove to skip the 2847 * followup dirrem. 2848 */ 2849 if (isrmdir > 1) 2850 dirrem->dm_state |= DIRCHG; 2851 2852 /* 2853 * Whiteouts have no additional dependencies, 2854 * so just put the dirrem on the correct list. 2855 */ 2856 if (newinum == WINO) { 2857 if ((dirrem->dm_state & COMPLETE) == 0) { 2858 LIST_INSERT_HEAD(&pagedep->pd_dirremhd, dirrem, 2859 dm_next); 2860 } else { 2861 dirrem->dm_dirinum = pagedep->pd_ino; 2862 add_to_worklist(&dirrem->dm_list); 2863 } 2864 FREE_LOCK(&lk); 2865 return; 2866 } 2867 2868 /* 2869 * If the COMPLETE flag is clear, then there were no active 2870 * entries and we want to roll back to the previous inode until 2871 * the new inode is committed to disk. If the COMPLETE flag is 2872 * set, then we have deleted an entry that never made it to disk. 2873 * If the entry we deleted resulted from a name change, then the old 2874 * inode reference still resides on disk. Any rollback that we do 2875 * needs to be to that old inode (returned to us in prevdirrem). If 2876 * the entry we deleted resulted from a create, then there is 2877 * no entry on the disk, so we want to roll back to zero rather 2878 * than the uncommitted inode. In either of the COMPLETE cases we 2879 * want to immediately free the unwritten and unreferenced inode. 2880 */ 2881 if ((dirrem->dm_state & COMPLETE) == 0) { 2882 dap->da_previous = dirrem; 2883 } else { 2884 if (prevdirrem != NULL) { 2885 dap->da_previous = prevdirrem; 2886 } else { 2887 dap->da_state &= ~DIRCHG; 2888 dap->da_pagedep = pagedep; 2889 } 2890 dirrem->dm_dirinum = pagedep->pd_ino; 2891 add_to_worklist(&dirrem->dm_list); 2892 } 2893 /* 2894 * Link into its inodedep. Put it on the id_bufwait list if the inode 2895 * is not yet written. If it is written, do the post-inode write 2896 * processing to put it on the id_pendinghd list. 2897 */ 2898 if (inodedep_lookup(dp->i_fs, newinum, DEPALLOC, &inodedep) == 0 || 2899 (inodedep->id_state & ALLCOMPLETE) == ALLCOMPLETE) { 2900 dap->da_state |= COMPLETE; 2901 LIST_INSERT_HEAD(&pagedep->pd_pendinghd, dap, da_pdlist); 2902 WORKLIST_INSERT(&inodedep->id_pendinghd, &dap->da_list); 2903 } else { 2904 LIST_INSERT_HEAD(&pagedep->pd_diraddhd[DIRADDHASH(offset)], 2905 dap, da_pdlist); 2906 WORKLIST_INSERT(&inodedep->id_bufwait, &dap->da_list); 2907 } 2908 FREE_LOCK(&lk); 2909 } 2910 2911 /* 2912 * Called whenever the link count on an inode is changed. 2913 * It creates an inode dependency so that the new reference(s) 2914 * to the inode cannot be committed to disk until the updated 2915 * inode has been written. 2916 * 2917 * Parameters: 2918 * ip: the inode with the increased link count 2919 */ 2920 void 2921 softdep_change_linkcnt(struct inode *ip) 2922 { 2923 struct inodedep *inodedep; 2924 2925 ACQUIRE_LOCK(&lk); 2926 (void) inodedep_lookup(ip->i_fs, ip->i_number, DEPALLOC, &inodedep); 2927 if (ip->i_nlink < ip->i_effnlink) { 2928 FREE_LOCK(&lk); 2929 panic("softdep_change_linkcnt: bad delta"); 2930 } 2931 inodedep->id_nlinkdelta = ip->i_nlink - ip->i_effnlink; 2932 FREE_LOCK(&lk); 2933 } 2934 2935 /* 2936 * This workitem decrements the inode's link count. 2937 * If the link count reaches zero, the file is removed. 2938 */ 2939 static void 2940 handle_workitem_remove(struct dirrem *dirrem) 2941 { 2942 struct inodedep *inodedep; 2943 struct vnode *vp; 2944 struct inode *ip; 2945 ino_t oldinum; 2946 int error; 2947 2948 error = VFS_VGET(dirrem->dm_mnt, NULL, dirrem->dm_oldinum, &vp); 2949 if (error) { 2950 softdep_error("handle_workitem_remove: vget", error); 2951 return; 2952 } 2953 ip = VTOI(vp); 2954 ACQUIRE_LOCK(&lk); 2955 if ((inodedep_lookup(ip->i_fs, dirrem->dm_oldinum, 0, &inodedep)) == 0){ 2956 FREE_LOCK(&lk); 2957 panic("handle_workitem_remove: lost inodedep"); 2958 } 2959 /* 2960 * Normal file deletion. 2961 */ 2962 if ((dirrem->dm_state & RMDIR) == 0) { 2963 ip->i_nlink--; 2964 ip->i_flag |= IN_CHANGE; 2965 if (ip->i_nlink < ip->i_effnlink) { 2966 FREE_LOCK(&lk); 2967 panic("handle_workitem_remove: bad file delta"); 2968 } 2969 inodedep->id_nlinkdelta = ip->i_nlink - ip->i_effnlink; 2970 FREE_LOCK(&lk); 2971 vput(vp); 2972 num_dirrem -= 1; 2973 WORKITEM_FREE(dirrem, D_DIRREM); 2974 return; 2975 } 2976 /* 2977 * Directory deletion. Decrement reference count for both the 2978 * just deleted parent directory entry and the reference for ".". 2979 * Next truncate the directory to length zero. When the 2980 * truncation completes, arrange to have the reference count on 2981 * the parent decremented to account for the loss of "..". 2982 */ 2983 ip->i_nlink -= 2; 2984 ip->i_flag |= IN_CHANGE; 2985 if (ip->i_nlink < ip->i_effnlink) { 2986 FREE_LOCK(&lk); 2987 panic("handle_workitem_remove: bad dir delta"); 2988 } 2989 inodedep->id_nlinkdelta = ip->i_nlink - ip->i_effnlink; 2990 FREE_LOCK(&lk); 2991 if ((error = ffs_truncate(vp, (off_t)0, 0, proc0.p_ucred)) != 0) 2992 softdep_error("handle_workitem_remove: truncate", error); 2993 /* 2994 * Rename a directory to a new parent. Since, we are both deleting 2995 * and creating a new directory entry, the link count on the new 2996 * directory should not change. Thus we skip the followup dirrem. 2997 */ 2998 if (dirrem->dm_state & DIRCHG) { 2999 vput(vp); 3000 num_dirrem -= 1; 3001 WORKITEM_FREE(dirrem, D_DIRREM); 3002 return; 3003 } 3004 /* 3005 * If the inodedep does not exist, then the zero'ed inode has 3006 * been written to disk. If the allocated inode has never been 3007 * written to disk, then the on-disk inode is zero'ed. In either 3008 * case we can remove the file immediately. 3009 */ 3010 ACQUIRE_LOCK(&lk); 3011 dirrem->dm_state = 0; 3012 oldinum = dirrem->dm_oldinum; 3013 dirrem->dm_oldinum = dirrem->dm_dirinum; 3014 if (inodedep_lookup(ip->i_fs, oldinum, 0, &inodedep) == 0 || 3015 check_inode_unwritten(inodedep)) { 3016 FREE_LOCK(&lk); 3017 vput(vp); 3018 handle_workitem_remove(dirrem); 3019 return; 3020 } 3021 WORKLIST_INSERT(&inodedep->id_inowait, &dirrem->dm_list); 3022 FREE_LOCK(&lk); 3023 ip->i_flag |= IN_CHANGE; 3024 ffs_update(vp, 0); 3025 vput(vp); 3026 } 3027 3028 /* 3029 * Inode de-allocation dependencies. 3030 * 3031 * When an inode's link count is reduced to zero, it can be de-allocated. We 3032 * found it convenient to postpone de-allocation until after the inode is 3033 * written to disk with its new link count (zero). At this point, all of the 3034 * on-disk inode's block pointers are nullified and, with careful dependency 3035 * list ordering, all dependencies related to the inode will be satisfied and 3036 * the corresponding dependency structures de-allocated. So, if/when the 3037 * inode is reused, there will be no mixing of old dependencies with new 3038 * ones. This artificial dependency is set up by the block de-allocation 3039 * procedure above (softdep_setup_freeblocks) and completed by the 3040 * following procedure. 3041 */ 3042 static void 3043 handle_workitem_freefile(struct freefile *freefile) 3044 { 3045 struct vnode vp; 3046 struct inode tip; 3047 struct inodedep *idp; 3048 int error; 3049 3050 #ifdef DEBUG 3051 ACQUIRE_LOCK(&lk); 3052 error = inodedep_lookup(freefile->fx_fs, freefile->fx_oldinum, 0, &idp); 3053 FREE_LOCK(&lk); 3054 if (error) 3055 panic("handle_workitem_freefile: inodedep survived"); 3056 #endif 3057 tip.i_devvp = freefile->fx_devvp; 3058 tip.i_dev = freefile->fx_devvp->v_rdev; 3059 tip.i_fs = freefile->fx_fs; 3060 vp.v_data = &tip; 3061 if ((error = ffs_freefile(&vp, freefile->fx_oldinum, freefile->fx_mode)) != 0) 3062 softdep_error("handle_workitem_freefile", error); 3063 WORKITEM_FREE(freefile, D_FREEFILE); 3064 } 3065 3066 /* 3067 * Helper function which unlinks marker element from work list and returns 3068 * the next element on the list. 3069 */ 3070 static __inline struct worklist * 3071 markernext(struct worklist *marker) 3072 { 3073 struct worklist *next; 3074 3075 next = LIST_NEXT(marker, wk_list); 3076 LIST_REMOVE(marker, wk_list); 3077 return next; 3078 } 3079 3080 /* 3081 * checkread, checkwrite 3082 * 3083 * bioops callback - hold io_token 3084 */ 3085 static int 3086 softdep_checkread(struct buf *bp) 3087 { 3088 /* nothing to do, mp lock not needed */ 3089 return(0); 3090 } 3091 3092 /* 3093 * bioops callback - hold io_token 3094 */ 3095 static int 3096 softdep_checkwrite(struct buf *bp) 3097 { 3098 /* nothing to do, mp lock not needed */ 3099 return(0); 3100 } 3101 3102 /* 3103 * Disk writes. 3104 * 3105 * The dependency structures constructed above are most actively used when file 3106 * system blocks are written to disk. No constraints are placed on when a 3107 * block can be written, but unsatisfied update dependencies are made safe by 3108 * modifying (or replacing) the source memory for the duration of the disk 3109 * write. When the disk write completes, the memory block is again brought 3110 * up-to-date. 3111 * 3112 * In-core inode structure reclamation. 3113 * 3114 * Because there are a finite number of "in-core" inode structures, they are 3115 * reused regularly. By transferring all inode-related dependencies to the 3116 * in-memory inode block and indexing them separately (via "inodedep"s), we 3117 * can allow "in-core" inode structures to be reused at any time and avoid 3118 * any increase in contention. 3119 * 3120 * Called just before entering the device driver to initiate a new disk I/O. 3121 * The buffer must be locked, thus, no I/O completion operations can occur 3122 * while we are manipulating its associated dependencies. 3123 * 3124 * bioops callback - hold io_token 3125 * 3126 * Parameters: 3127 * bp: structure describing disk write to occur 3128 */ 3129 static void 3130 softdep_disk_io_initiation(struct buf *bp) 3131 { 3132 struct worklist *wk; 3133 struct worklist marker; 3134 struct indirdep *indirdep; 3135 3136 /* 3137 * We only care about write operations. There should never 3138 * be dependencies for reads. 3139 */ 3140 if (bp->b_cmd == BUF_CMD_READ) 3141 panic("softdep_disk_io_initiation: read"); 3142 3143 get_mplock(); 3144 marker.wk_type = D_LAST + 1; /* Not a normal workitem */ 3145 3146 /* 3147 * Do any necessary pre-I/O processing. 3148 */ 3149 for (wk = LIST_FIRST(&bp->b_dep); wk; wk = markernext(&marker)) { 3150 LIST_INSERT_AFTER(wk, &marker, wk_list); 3151 3152 switch (wk->wk_type) { 3153 case D_PAGEDEP: 3154 initiate_write_filepage(WK_PAGEDEP(wk), bp); 3155 continue; 3156 3157 case D_INODEDEP: 3158 initiate_write_inodeblock(WK_INODEDEP(wk), bp); 3159 continue; 3160 3161 case D_INDIRDEP: 3162 indirdep = WK_INDIRDEP(wk); 3163 if (indirdep->ir_state & GOINGAWAY) 3164 panic("disk_io_initiation: indirdep gone"); 3165 /* 3166 * If there are no remaining dependencies, this 3167 * will be writing the real pointers, so the 3168 * dependency can be freed. 3169 */ 3170 if (LIST_FIRST(&indirdep->ir_deplisthd) == NULL) { 3171 indirdep->ir_savebp->b_flags |= B_INVAL | B_NOCACHE; 3172 brelse(indirdep->ir_savebp); 3173 /* inline expand WORKLIST_REMOVE(wk); */ 3174 wk->wk_state &= ~ONWORKLIST; 3175 LIST_REMOVE(wk, wk_list); 3176 WORKITEM_FREE(indirdep, D_INDIRDEP); 3177 continue; 3178 } 3179 /* 3180 * Replace up-to-date version with safe version. 3181 */ 3182 MALLOC(indirdep->ir_saveddata, caddr_t, bp->b_bcount, 3183 M_INDIRDEP, M_SOFTDEP_FLAGS); 3184 ACQUIRE_LOCK(&lk); 3185 indirdep->ir_state &= ~ATTACHED; 3186 indirdep->ir_state |= UNDONE; 3187 bcopy(bp->b_data, indirdep->ir_saveddata, bp->b_bcount); 3188 bcopy(indirdep->ir_savebp->b_data, bp->b_data, 3189 bp->b_bcount); 3190 FREE_LOCK(&lk); 3191 continue; 3192 3193 case D_MKDIR: 3194 case D_BMSAFEMAP: 3195 case D_ALLOCDIRECT: 3196 case D_ALLOCINDIR: 3197 continue; 3198 3199 default: 3200 panic("handle_disk_io_initiation: Unexpected type %s", 3201 TYPENAME(wk->wk_type)); 3202 /* NOTREACHED */ 3203 } 3204 } 3205 rel_mplock(); 3206 } 3207 3208 /* 3209 * Called from within the procedure above to deal with unsatisfied 3210 * allocation dependencies in a directory. The buffer must be locked, 3211 * thus, no I/O completion operations can occur while we are 3212 * manipulating its associated dependencies. 3213 */ 3214 static void 3215 initiate_write_filepage(struct pagedep *pagedep, struct buf *bp) 3216 { 3217 struct diradd *dap; 3218 struct direct *ep; 3219 int i; 3220 3221 if (pagedep->pd_state & IOSTARTED) { 3222 /* 3223 * This can only happen if there is a driver that does not 3224 * understand chaining. Here biodone will reissue the call 3225 * to strategy for the incomplete buffers. 3226 */ 3227 kprintf("initiate_write_filepage: already started\n"); 3228 return; 3229 } 3230 pagedep->pd_state |= IOSTARTED; 3231 ACQUIRE_LOCK(&lk); 3232 for (i = 0; i < DAHASHSZ; i++) { 3233 LIST_FOREACH(dap, &pagedep->pd_diraddhd[i], da_pdlist) { 3234 ep = (struct direct *) 3235 ((char *)bp->b_data + dap->da_offset); 3236 if (ep->d_ino != dap->da_newinum) { 3237 FREE_LOCK(&lk); 3238 panic("%s: dir inum %d != new %"PRId64, 3239 "initiate_write_filepage", 3240 ep->d_ino, dap->da_newinum); 3241 } 3242 if (dap->da_state & DIRCHG) 3243 ep->d_ino = dap->da_previous->dm_oldinum; 3244 else 3245 ep->d_ino = 0; 3246 dap->da_state &= ~ATTACHED; 3247 dap->da_state |= UNDONE; 3248 } 3249 } 3250 FREE_LOCK(&lk); 3251 } 3252 3253 /* 3254 * Called from within the procedure above to deal with unsatisfied 3255 * allocation dependencies in an inodeblock. The buffer must be 3256 * locked, thus, no I/O completion operations can occur while we 3257 * are manipulating its associated dependencies. 3258 * 3259 * Parameters: 3260 * bp: The inode block 3261 */ 3262 static void 3263 initiate_write_inodeblock(struct inodedep *inodedep, struct buf *bp) 3264 { 3265 struct allocdirect *adp, *lastadp; 3266 struct ufs1_dinode *dp; 3267 struct ufs1_dinode *sip; 3268 struct fs *fs; 3269 ufs_lbn_t prevlbn = 0; 3270 int i, deplist; 3271 3272 if (inodedep->id_state & IOSTARTED) 3273 panic("initiate_write_inodeblock: already started"); 3274 inodedep->id_state |= IOSTARTED; 3275 fs = inodedep->id_fs; 3276 dp = (struct ufs1_dinode *)bp->b_data + 3277 ino_to_fsbo(fs, inodedep->id_ino); 3278 /* 3279 * If the bitmap is not yet written, then the allocated 3280 * inode cannot be written to disk. 3281 */ 3282 if ((inodedep->id_state & DEPCOMPLETE) == 0) { 3283 if (inodedep->id_savedino != NULL) 3284 panic("initiate_write_inodeblock: already doing I/O"); 3285 MALLOC(sip, struct ufs1_dinode *, 3286 sizeof(struct ufs1_dinode), M_INODEDEP, M_SOFTDEP_FLAGS); 3287 inodedep->id_savedino = sip; 3288 *inodedep->id_savedino = *dp; 3289 bzero((caddr_t)dp, sizeof(struct ufs1_dinode)); 3290 dp->di_gen = inodedep->id_savedino->di_gen; 3291 return; 3292 } 3293 /* 3294 * If no dependencies, then there is nothing to roll back. 3295 */ 3296 inodedep->id_savedsize = dp->di_size; 3297 if (TAILQ_FIRST(&inodedep->id_inoupdt) == NULL) 3298 return; 3299 /* 3300 * Set the dependencies to busy. 3301 */ 3302 ACQUIRE_LOCK(&lk); 3303 for (deplist = 0, adp = TAILQ_FIRST(&inodedep->id_inoupdt); adp; 3304 adp = TAILQ_NEXT(adp, ad_next)) { 3305 #ifdef DIAGNOSTIC 3306 if (deplist != 0 && prevlbn >= adp->ad_lbn) { 3307 FREE_LOCK(&lk); 3308 panic("softdep_write_inodeblock: lbn order"); 3309 } 3310 prevlbn = adp->ad_lbn; 3311 if (adp->ad_lbn < NDADDR && 3312 dp->di_db[adp->ad_lbn] != adp->ad_newblkno) { 3313 FREE_LOCK(&lk); 3314 panic("%s: direct pointer #%ld mismatch %d != %d", 3315 "softdep_write_inodeblock", adp->ad_lbn, 3316 dp->di_db[adp->ad_lbn], adp->ad_newblkno); 3317 } 3318 if (adp->ad_lbn >= NDADDR && 3319 dp->di_ib[adp->ad_lbn - NDADDR] != adp->ad_newblkno) { 3320 FREE_LOCK(&lk); 3321 panic("%s: indirect pointer #%ld mismatch %d != %d", 3322 "softdep_write_inodeblock", adp->ad_lbn - NDADDR, 3323 dp->di_ib[adp->ad_lbn - NDADDR], adp->ad_newblkno); 3324 } 3325 deplist |= 1 << adp->ad_lbn; 3326 if ((adp->ad_state & ATTACHED) == 0) { 3327 FREE_LOCK(&lk); 3328 panic("softdep_write_inodeblock: Unknown state 0x%x", 3329 adp->ad_state); 3330 } 3331 #endif /* DIAGNOSTIC */ 3332 adp->ad_state &= ~ATTACHED; 3333 adp->ad_state |= UNDONE; 3334 } 3335 /* 3336 * The on-disk inode cannot claim to be any larger than the last 3337 * fragment that has been written. Otherwise, the on-disk inode 3338 * might have fragments that were not the last block in the file 3339 * which would corrupt the filesystem. 3340 */ 3341 for (lastadp = NULL, adp = TAILQ_FIRST(&inodedep->id_inoupdt); adp; 3342 lastadp = adp, adp = TAILQ_NEXT(adp, ad_next)) { 3343 if (adp->ad_lbn >= NDADDR) 3344 break; 3345 dp->di_db[adp->ad_lbn] = adp->ad_oldblkno; 3346 /* keep going until hitting a rollback to a frag */ 3347 if (adp->ad_oldsize == 0 || adp->ad_oldsize == fs->fs_bsize) 3348 continue; 3349 dp->di_size = fs->fs_bsize * adp->ad_lbn + adp->ad_oldsize; 3350 for (i = adp->ad_lbn + 1; i < NDADDR; i++) { 3351 #ifdef DIAGNOSTIC 3352 if (dp->di_db[i] != 0 && (deplist & (1 << i)) == 0) { 3353 FREE_LOCK(&lk); 3354 panic("softdep_write_inodeblock: lost dep1"); 3355 } 3356 #endif /* DIAGNOSTIC */ 3357 dp->di_db[i] = 0; 3358 } 3359 for (i = 0; i < NIADDR; i++) { 3360 #ifdef DIAGNOSTIC 3361 if (dp->di_ib[i] != 0 && 3362 (deplist & ((1 << NDADDR) << i)) == 0) { 3363 FREE_LOCK(&lk); 3364 panic("softdep_write_inodeblock: lost dep2"); 3365 } 3366 #endif /* DIAGNOSTIC */ 3367 dp->di_ib[i] = 0; 3368 } 3369 FREE_LOCK(&lk); 3370 return; 3371 } 3372 /* 3373 * If we have zero'ed out the last allocated block of the file, 3374 * roll back the size to the last currently allocated block. 3375 * We know that this last allocated block is a full-sized as 3376 * we already checked for fragments in the loop above. 3377 */ 3378 if (lastadp != NULL && 3379 dp->di_size <= (lastadp->ad_lbn + 1) * fs->fs_bsize) { 3380 for (i = lastadp->ad_lbn; i >= 0; i--) 3381 if (dp->di_db[i] != 0) 3382 break; 3383 dp->di_size = (i + 1) * fs->fs_bsize; 3384 } 3385 /* 3386 * The only dependencies are for indirect blocks. 3387 * 3388 * The file size for indirect block additions is not guaranteed. 3389 * Such a guarantee would be non-trivial to achieve. The conventional 3390 * synchronous write implementation also does not make this guarantee. 3391 * Fsck should catch and fix discrepancies. Arguably, the file size 3392 * can be over-estimated without destroying integrity when the file 3393 * moves into the indirect blocks (i.e., is large). If we want to 3394 * postpone fsck, we are stuck with this argument. 3395 */ 3396 for (; adp; adp = TAILQ_NEXT(adp, ad_next)) 3397 dp->di_ib[adp->ad_lbn - NDADDR] = 0; 3398 FREE_LOCK(&lk); 3399 } 3400 3401 /* 3402 * This routine is called during the completion interrupt 3403 * service routine for a disk write (from the procedure called 3404 * by the device driver to inform the filesystem caches of 3405 * a request completion). It should be called early in this 3406 * procedure, before the block is made available to other 3407 * processes or other routines are called. 3408 * 3409 * bioops callback - hold io_token 3410 * 3411 * Parameters: 3412 * bp: describes the completed disk write 3413 */ 3414 static void 3415 softdep_disk_write_complete(struct buf *bp) 3416 { 3417 struct worklist *wk; 3418 struct workhead reattach; 3419 struct newblk *newblk; 3420 struct allocindir *aip; 3421 struct allocdirect *adp; 3422 struct indirdep *indirdep; 3423 struct inodedep *inodedep; 3424 struct bmsafemap *bmsafemap; 3425 3426 get_mplock(); 3427 #ifdef DEBUG 3428 if (lk.lkt_held != NOHOLDER) 3429 panic("softdep_disk_write_complete: lock is held"); 3430 lk.lkt_held = SPECIAL_FLAG; 3431 #endif 3432 LIST_INIT(&reattach); 3433 while ((wk = LIST_FIRST(&bp->b_dep)) != NULL) { 3434 WORKLIST_REMOVE(wk); 3435 switch (wk->wk_type) { 3436 3437 case D_PAGEDEP: 3438 if (handle_written_filepage(WK_PAGEDEP(wk), bp)) 3439 WORKLIST_INSERT(&reattach, wk); 3440 continue; 3441 3442 case D_INODEDEP: 3443 if (handle_written_inodeblock(WK_INODEDEP(wk), bp)) 3444 WORKLIST_INSERT(&reattach, wk); 3445 continue; 3446 3447 case D_BMSAFEMAP: 3448 bmsafemap = WK_BMSAFEMAP(wk); 3449 while ((newblk = LIST_FIRST(&bmsafemap->sm_newblkhd))) { 3450 newblk->nb_state |= DEPCOMPLETE; 3451 newblk->nb_bmsafemap = NULL; 3452 LIST_REMOVE(newblk, nb_deps); 3453 } 3454 while ((adp = 3455 LIST_FIRST(&bmsafemap->sm_allocdirecthd))) { 3456 adp->ad_state |= DEPCOMPLETE; 3457 adp->ad_buf = NULL; 3458 LIST_REMOVE(adp, ad_deps); 3459 handle_allocdirect_partdone(adp); 3460 } 3461 while ((aip = 3462 LIST_FIRST(&bmsafemap->sm_allocindirhd))) { 3463 aip->ai_state |= DEPCOMPLETE; 3464 aip->ai_buf = NULL; 3465 LIST_REMOVE(aip, ai_deps); 3466 handle_allocindir_partdone(aip); 3467 } 3468 while ((inodedep = 3469 LIST_FIRST(&bmsafemap->sm_inodedephd)) != NULL) { 3470 inodedep->id_state |= DEPCOMPLETE; 3471 LIST_REMOVE(inodedep, id_deps); 3472 inodedep->id_buf = NULL; 3473 } 3474 WORKITEM_FREE(bmsafemap, D_BMSAFEMAP); 3475 continue; 3476 3477 case D_MKDIR: 3478 handle_written_mkdir(WK_MKDIR(wk), MKDIR_BODY); 3479 continue; 3480 3481 case D_ALLOCDIRECT: 3482 adp = WK_ALLOCDIRECT(wk); 3483 adp->ad_state |= COMPLETE; 3484 handle_allocdirect_partdone(adp); 3485 continue; 3486 3487 case D_ALLOCINDIR: 3488 aip = WK_ALLOCINDIR(wk); 3489 aip->ai_state |= COMPLETE; 3490 handle_allocindir_partdone(aip); 3491 continue; 3492 3493 case D_INDIRDEP: 3494 indirdep = WK_INDIRDEP(wk); 3495 if (indirdep->ir_state & GOINGAWAY) { 3496 lk.lkt_held = NOHOLDER; 3497 panic("disk_write_complete: indirdep gone"); 3498 } 3499 bcopy(indirdep->ir_saveddata, bp->b_data, bp->b_bcount); 3500 FREE(indirdep->ir_saveddata, M_INDIRDEP); 3501 indirdep->ir_saveddata = 0; 3502 indirdep->ir_state &= ~UNDONE; 3503 indirdep->ir_state |= ATTACHED; 3504 while ((aip = LIST_FIRST(&indirdep->ir_donehd)) != 0) { 3505 handle_allocindir_partdone(aip); 3506 if (aip == LIST_FIRST(&indirdep->ir_donehd)) { 3507 lk.lkt_held = NOHOLDER; 3508 panic("disk_write_complete: not gone"); 3509 } 3510 } 3511 WORKLIST_INSERT(&reattach, wk); 3512 if ((bp->b_flags & B_DELWRI) == 0) 3513 stat_indir_blk_ptrs++; 3514 bdirty(bp); 3515 continue; 3516 3517 default: 3518 lk.lkt_held = NOHOLDER; 3519 panic("handle_disk_write_complete: Unknown type %s", 3520 TYPENAME(wk->wk_type)); 3521 /* NOTREACHED */ 3522 } 3523 } 3524 /* 3525 * Reattach any requests that must be redone. 3526 */ 3527 while ((wk = LIST_FIRST(&reattach)) != NULL) { 3528 WORKLIST_REMOVE(wk); 3529 WORKLIST_INSERT_BP(bp, wk); 3530 } 3531 #ifdef DEBUG 3532 if (lk.lkt_held != SPECIAL_FLAG) 3533 panic("softdep_disk_write_complete: lock lost"); 3534 lk.lkt_held = NOHOLDER; 3535 #endif 3536 rel_mplock(); 3537 } 3538 3539 /* 3540 * Called from within softdep_disk_write_complete above. Note that 3541 * this routine is always called from interrupt level with further 3542 * splbio interrupts blocked. 3543 * 3544 * Parameters: 3545 * adp: the completed allocdirect 3546 */ 3547 static void 3548 handle_allocdirect_partdone(struct allocdirect *adp) 3549 { 3550 struct allocdirect *listadp; 3551 struct inodedep *inodedep; 3552 long bsize; 3553 3554 if ((adp->ad_state & ALLCOMPLETE) != ALLCOMPLETE) 3555 return; 3556 if (adp->ad_buf != NULL) { 3557 lk.lkt_held = NOHOLDER; 3558 panic("handle_allocdirect_partdone: dangling dep"); 3559 } 3560 /* 3561 * The on-disk inode cannot claim to be any larger than the last 3562 * fragment that has been written. Otherwise, the on-disk inode 3563 * might have fragments that were not the last block in the file 3564 * which would corrupt the filesystem. Thus, we cannot free any 3565 * allocdirects after one whose ad_oldblkno claims a fragment as 3566 * these blocks must be rolled back to zero before writing the inode. 3567 * We check the currently active set of allocdirects in id_inoupdt. 3568 */ 3569 inodedep = adp->ad_inodedep; 3570 bsize = inodedep->id_fs->fs_bsize; 3571 TAILQ_FOREACH(listadp, &inodedep->id_inoupdt, ad_next) { 3572 /* found our block */ 3573 if (listadp == adp) 3574 break; 3575 /* continue if ad_oldlbn is not a fragment */ 3576 if (listadp->ad_oldsize == 0 || 3577 listadp->ad_oldsize == bsize) 3578 continue; 3579 /* hit a fragment */ 3580 return; 3581 } 3582 /* 3583 * If we have reached the end of the current list without 3584 * finding the just finished dependency, then it must be 3585 * on the future dependency list. Future dependencies cannot 3586 * be freed until they are moved to the current list. 3587 */ 3588 if (listadp == NULL) { 3589 #ifdef DEBUG 3590 TAILQ_FOREACH(listadp, &inodedep->id_newinoupdt, ad_next) 3591 /* found our block */ 3592 if (listadp == adp) 3593 break; 3594 if (listadp == NULL) { 3595 lk.lkt_held = NOHOLDER; 3596 panic("handle_allocdirect_partdone: lost dep"); 3597 } 3598 #endif /* DEBUG */ 3599 return; 3600 } 3601 /* 3602 * If we have found the just finished dependency, then free 3603 * it along with anything that follows it that is complete. 3604 */ 3605 for (; adp; adp = listadp) { 3606 listadp = TAILQ_NEXT(adp, ad_next); 3607 if ((adp->ad_state & ALLCOMPLETE) != ALLCOMPLETE) 3608 return; 3609 free_allocdirect(&inodedep->id_inoupdt, adp, 1); 3610 } 3611 } 3612 3613 /* 3614 * Called from within softdep_disk_write_complete above. Note that 3615 * this routine is always called from interrupt level with further 3616 * splbio interrupts blocked. 3617 * 3618 * Parameters: 3619 * aip: the completed allocindir 3620 */ 3621 static void 3622 handle_allocindir_partdone(struct allocindir *aip) 3623 { 3624 struct indirdep *indirdep; 3625 3626 if ((aip->ai_state & ALLCOMPLETE) != ALLCOMPLETE) 3627 return; 3628 if (aip->ai_buf != NULL) { 3629 lk.lkt_held = NOHOLDER; 3630 panic("handle_allocindir_partdone: dangling dependency"); 3631 } 3632 indirdep = aip->ai_indirdep; 3633 if (indirdep->ir_state & UNDONE) { 3634 LIST_REMOVE(aip, ai_next); 3635 LIST_INSERT_HEAD(&indirdep->ir_donehd, aip, ai_next); 3636 return; 3637 } 3638 ((ufs_daddr_t *)indirdep->ir_savebp->b_data)[aip->ai_offset] = 3639 aip->ai_newblkno; 3640 LIST_REMOVE(aip, ai_next); 3641 if (aip->ai_freefrag != NULL) 3642 add_to_worklist(&aip->ai_freefrag->ff_list); 3643 WORKITEM_FREE(aip, D_ALLOCINDIR); 3644 } 3645 3646 /* 3647 * Called from within softdep_disk_write_complete above to restore 3648 * in-memory inode block contents to their most up-to-date state. Note 3649 * that this routine is always called from interrupt level with further 3650 * splbio interrupts blocked. 3651 * 3652 * Parameters: 3653 * bp: buffer containing the inode block 3654 */ 3655 static int 3656 handle_written_inodeblock(struct inodedep *inodedep, struct buf *bp) 3657 { 3658 struct worklist *wk, *filefree; 3659 struct allocdirect *adp, *nextadp; 3660 struct ufs1_dinode *dp; 3661 int hadchanges; 3662 3663 if ((inodedep->id_state & IOSTARTED) == 0) { 3664 lk.lkt_held = NOHOLDER; 3665 panic("handle_written_inodeblock: not started"); 3666 } 3667 inodedep->id_state &= ~IOSTARTED; 3668 dp = (struct ufs1_dinode *)bp->b_data + 3669 ino_to_fsbo(inodedep->id_fs, inodedep->id_ino); 3670 /* 3671 * If we had to rollback the inode allocation because of 3672 * bitmaps being incomplete, then simply restore it. 3673 * Keep the block dirty so that it will not be reclaimed until 3674 * all associated dependencies have been cleared and the 3675 * corresponding updates written to disk. 3676 */ 3677 if (inodedep->id_savedino != NULL) { 3678 *dp = *inodedep->id_savedino; 3679 FREE(inodedep->id_savedino, M_INODEDEP); 3680 inodedep->id_savedino = NULL; 3681 if ((bp->b_flags & B_DELWRI) == 0) 3682 stat_inode_bitmap++; 3683 bdirty(bp); 3684 return (1); 3685 } 3686 inodedep->id_state |= COMPLETE; 3687 /* 3688 * Roll forward anything that had to be rolled back before 3689 * the inode could be updated. 3690 */ 3691 hadchanges = 0; 3692 for (adp = TAILQ_FIRST(&inodedep->id_inoupdt); adp; adp = nextadp) { 3693 nextadp = TAILQ_NEXT(adp, ad_next); 3694 if (adp->ad_state & ATTACHED) { 3695 lk.lkt_held = NOHOLDER; 3696 panic("handle_written_inodeblock: new entry"); 3697 } 3698 if (adp->ad_lbn < NDADDR) { 3699 if (dp->di_db[adp->ad_lbn] != adp->ad_oldblkno) { 3700 lk.lkt_held = NOHOLDER; 3701 panic("%s: %s #%ld mismatch %d != %d", 3702 "handle_written_inodeblock", 3703 "direct pointer", adp->ad_lbn, 3704 dp->di_db[adp->ad_lbn], adp->ad_oldblkno); 3705 } 3706 dp->di_db[adp->ad_lbn] = adp->ad_newblkno; 3707 } else { 3708 if (dp->di_ib[adp->ad_lbn - NDADDR] != 0) { 3709 lk.lkt_held = NOHOLDER; 3710 panic("%s: %s #%ld allocated as %d", 3711 "handle_written_inodeblock", 3712 "indirect pointer", adp->ad_lbn - NDADDR, 3713 dp->di_ib[adp->ad_lbn - NDADDR]); 3714 } 3715 dp->di_ib[adp->ad_lbn - NDADDR] = adp->ad_newblkno; 3716 } 3717 adp->ad_state &= ~UNDONE; 3718 adp->ad_state |= ATTACHED; 3719 hadchanges = 1; 3720 } 3721 if (hadchanges && (bp->b_flags & B_DELWRI) == 0) 3722 stat_direct_blk_ptrs++; 3723 /* 3724 * Reset the file size to its most up-to-date value. 3725 */ 3726 if (inodedep->id_savedsize == -1) { 3727 lk.lkt_held = NOHOLDER; 3728 panic("handle_written_inodeblock: bad size"); 3729 } 3730 if (dp->di_size != inodedep->id_savedsize) { 3731 dp->di_size = inodedep->id_savedsize; 3732 hadchanges = 1; 3733 } 3734 inodedep->id_savedsize = -1; 3735 /* 3736 * If there were any rollbacks in the inode block, then it must be 3737 * marked dirty so that its will eventually get written back in 3738 * its correct form. 3739 */ 3740 if (hadchanges) 3741 bdirty(bp); 3742 /* 3743 * Process any allocdirects that completed during the update. 3744 */ 3745 if ((adp = TAILQ_FIRST(&inodedep->id_inoupdt)) != NULL) 3746 handle_allocdirect_partdone(adp); 3747 /* 3748 * Process deallocations that were held pending until the 3749 * inode had been written to disk. Freeing of the inode 3750 * is delayed until after all blocks have been freed to 3751 * avoid creation of new <vfsid, inum, lbn> triples 3752 * before the old ones have been deleted. 3753 */ 3754 filefree = NULL; 3755 while ((wk = LIST_FIRST(&inodedep->id_bufwait)) != NULL) { 3756 WORKLIST_REMOVE(wk); 3757 switch (wk->wk_type) { 3758 3759 case D_FREEFILE: 3760 /* 3761 * We defer adding filefree to the worklist until 3762 * all other additions have been made to ensure 3763 * that it will be done after all the old blocks 3764 * have been freed. 3765 */ 3766 if (filefree != NULL) { 3767 lk.lkt_held = NOHOLDER; 3768 panic("handle_written_inodeblock: filefree"); 3769 } 3770 filefree = wk; 3771 continue; 3772 3773 case D_MKDIR: 3774 handle_written_mkdir(WK_MKDIR(wk), MKDIR_PARENT); 3775 continue; 3776 3777 case D_DIRADD: 3778 diradd_inode_written(WK_DIRADD(wk), inodedep); 3779 continue; 3780 3781 case D_FREEBLKS: 3782 wk->wk_state |= COMPLETE; 3783 if ((wk->wk_state & ALLCOMPLETE) != ALLCOMPLETE) 3784 continue; 3785 /* -- fall through -- */ 3786 case D_FREEFRAG: 3787 case D_DIRREM: 3788 add_to_worklist(wk); 3789 continue; 3790 3791 default: 3792 lk.lkt_held = NOHOLDER; 3793 panic("handle_written_inodeblock: Unknown type %s", 3794 TYPENAME(wk->wk_type)); 3795 /* NOTREACHED */ 3796 } 3797 } 3798 if (filefree != NULL) { 3799 if (free_inodedep(inodedep) == 0) { 3800 lk.lkt_held = NOHOLDER; 3801 panic("handle_written_inodeblock: live inodedep"); 3802 } 3803 add_to_worklist(filefree); 3804 return (0); 3805 } 3806 3807 /* 3808 * If no outstanding dependencies, free it. 3809 */ 3810 if (free_inodedep(inodedep) || TAILQ_FIRST(&inodedep->id_inoupdt) == 0) 3811 return (0); 3812 return (hadchanges); 3813 } 3814 3815 /* 3816 * Process a diradd entry after its dependent inode has been written. 3817 * This routine must be called with splbio interrupts blocked. 3818 */ 3819 static void 3820 diradd_inode_written(struct diradd *dap, struct inodedep *inodedep) 3821 { 3822 struct pagedep *pagedep; 3823 3824 dap->da_state |= COMPLETE; 3825 if ((dap->da_state & ALLCOMPLETE) == ALLCOMPLETE) { 3826 if (dap->da_state & DIRCHG) 3827 pagedep = dap->da_previous->dm_pagedep; 3828 else 3829 pagedep = dap->da_pagedep; 3830 LIST_REMOVE(dap, da_pdlist); 3831 LIST_INSERT_HEAD(&pagedep->pd_pendinghd, dap, da_pdlist); 3832 } 3833 WORKLIST_INSERT(&inodedep->id_pendinghd, &dap->da_list); 3834 } 3835 3836 /* 3837 * Handle the completion of a mkdir dependency. 3838 */ 3839 static void 3840 handle_written_mkdir(struct mkdir *mkdir, int type) 3841 { 3842 struct diradd *dap; 3843 struct pagedep *pagedep; 3844 3845 if (mkdir->md_state != type) { 3846 lk.lkt_held = NOHOLDER; 3847 panic("handle_written_mkdir: bad type"); 3848 } 3849 dap = mkdir->md_diradd; 3850 dap->da_state &= ~type; 3851 if ((dap->da_state & (MKDIR_PARENT | MKDIR_BODY)) == 0) 3852 dap->da_state |= DEPCOMPLETE; 3853 if ((dap->da_state & ALLCOMPLETE) == ALLCOMPLETE) { 3854 if (dap->da_state & DIRCHG) 3855 pagedep = dap->da_previous->dm_pagedep; 3856 else 3857 pagedep = dap->da_pagedep; 3858 LIST_REMOVE(dap, da_pdlist); 3859 LIST_INSERT_HEAD(&pagedep->pd_pendinghd, dap, da_pdlist); 3860 } 3861 LIST_REMOVE(mkdir, md_mkdirs); 3862 WORKITEM_FREE(mkdir, D_MKDIR); 3863 } 3864 3865 /* 3866 * Called from within softdep_disk_write_complete above. 3867 * A write operation was just completed. Removed inodes can 3868 * now be freed and associated block pointers may be committed. 3869 * Note that this routine is always called from interrupt level 3870 * with further splbio interrupts blocked. 3871 * 3872 * Parameters: 3873 * bp: buffer containing the written page 3874 */ 3875 static int 3876 handle_written_filepage(struct pagedep *pagedep, struct buf *bp) 3877 { 3878 struct dirrem *dirrem; 3879 struct diradd *dap, *nextdap; 3880 struct direct *ep; 3881 int i, chgs; 3882 3883 if ((pagedep->pd_state & IOSTARTED) == 0) { 3884 lk.lkt_held = NOHOLDER; 3885 panic("handle_written_filepage: not started"); 3886 } 3887 pagedep->pd_state &= ~IOSTARTED; 3888 /* 3889 * Process any directory removals that have been committed. 3890 */ 3891 while ((dirrem = LIST_FIRST(&pagedep->pd_dirremhd)) != NULL) { 3892 LIST_REMOVE(dirrem, dm_next); 3893 dirrem->dm_dirinum = pagedep->pd_ino; 3894 add_to_worklist(&dirrem->dm_list); 3895 } 3896 /* 3897 * Free any directory additions that have been committed. 3898 */ 3899 while ((dap = LIST_FIRST(&pagedep->pd_pendinghd)) != NULL) 3900 free_diradd(dap); 3901 /* 3902 * Uncommitted directory entries must be restored. 3903 */ 3904 for (chgs = 0, i = 0; i < DAHASHSZ; i++) { 3905 for (dap = LIST_FIRST(&pagedep->pd_diraddhd[i]); dap; 3906 dap = nextdap) { 3907 nextdap = LIST_NEXT(dap, da_pdlist); 3908 if (dap->da_state & ATTACHED) { 3909 lk.lkt_held = NOHOLDER; 3910 panic("handle_written_filepage: attached"); 3911 } 3912 ep = (struct direct *) 3913 ((char *)bp->b_data + dap->da_offset); 3914 ep->d_ino = dap->da_newinum; 3915 dap->da_state &= ~UNDONE; 3916 dap->da_state |= ATTACHED; 3917 chgs = 1; 3918 /* 3919 * If the inode referenced by the directory has 3920 * been written out, then the dependency can be 3921 * moved to the pending list. 3922 */ 3923 if ((dap->da_state & ALLCOMPLETE) == ALLCOMPLETE) { 3924 LIST_REMOVE(dap, da_pdlist); 3925 LIST_INSERT_HEAD(&pagedep->pd_pendinghd, dap, 3926 da_pdlist); 3927 } 3928 } 3929 } 3930 /* 3931 * If there were any rollbacks in the directory, then it must be 3932 * marked dirty so that its will eventually get written back in 3933 * its correct form. 3934 */ 3935 if (chgs) { 3936 if ((bp->b_flags & B_DELWRI) == 0) 3937 stat_dir_entry++; 3938 bdirty(bp); 3939 } 3940 /* 3941 * If no dependencies remain, the pagedep will be freed. 3942 * Otherwise it will remain to update the page before it 3943 * is written back to disk. 3944 */ 3945 if (LIST_FIRST(&pagedep->pd_pendinghd) == 0) { 3946 for (i = 0; i < DAHASHSZ; i++) 3947 if (LIST_FIRST(&pagedep->pd_diraddhd[i]) != NULL) 3948 break; 3949 if (i == DAHASHSZ) { 3950 LIST_REMOVE(pagedep, pd_hash); 3951 WORKITEM_FREE(pagedep, D_PAGEDEP); 3952 return (0); 3953 } 3954 } 3955 return (1); 3956 } 3957 3958 /* 3959 * Writing back in-core inode structures. 3960 * 3961 * The filesystem only accesses an inode's contents when it occupies an 3962 * "in-core" inode structure. These "in-core" structures are separate from 3963 * the page frames used to cache inode blocks. Only the latter are 3964 * transferred to/from the disk. So, when the updated contents of the 3965 * "in-core" inode structure are copied to the corresponding in-memory inode 3966 * block, the dependencies are also transferred. The following procedure is 3967 * called when copying a dirty "in-core" inode to a cached inode block. 3968 */ 3969 3970 /* 3971 * Called when an inode is loaded from disk. If the effective link count 3972 * differed from the actual link count when it was last flushed, then we 3973 * need to ensure that the correct effective link count is put back. 3974 * 3975 * Parameters: 3976 * ip: the "in_core" copy of the inode 3977 */ 3978 void 3979 softdep_load_inodeblock(struct inode *ip) 3980 { 3981 struct inodedep *inodedep; 3982 3983 /* 3984 * Check for alternate nlink count. 3985 */ 3986 ip->i_effnlink = ip->i_nlink; 3987 ACQUIRE_LOCK(&lk); 3988 if (inodedep_lookup(ip->i_fs, ip->i_number, 0, &inodedep) == 0) { 3989 FREE_LOCK(&lk); 3990 return; 3991 } 3992 ip->i_effnlink -= inodedep->id_nlinkdelta; 3993 FREE_LOCK(&lk); 3994 } 3995 3996 /* 3997 * This routine is called just before the "in-core" inode 3998 * information is to be copied to the in-memory inode block. 3999 * Recall that an inode block contains several inodes. If 4000 * the force flag is set, then the dependencies will be 4001 * cleared so that the update can always be made. Note that 4002 * the buffer is locked when this routine is called, so we 4003 * will never be in the middle of writing the inode block 4004 * to disk. 4005 * 4006 * Parameters: 4007 * ip: the "in_core" copy of the inode 4008 * bp: the buffer containing the inode block 4009 * waitfor: nonzero => update must be allowed 4010 */ 4011 void 4012 softdep_update_inodeblock(struct inode *ip, struct buf *bp, 4013 int waitfor) 4014 { 4015 struct inodedep *inodedep; 4016 struct worklist *wk; 4017 int error, gotit; 4018 4019 /* 4020 * If the effective link count is not equal to the actual link 4021 * count, then we must track the difference in an inodedep while 4022 * the inode is (potentially) tossed out of the cache. Otherwise, 4023 * if there is no existing inodedep, then there are no dependencies 4024 * to track. 4025 */ 4026 ACQUIRE_LOCK(&lk); 4027 if (inodedep_lookup(ip->i_fs, ip->i_number, 0, &inodedep) == 0) { 4028 FREE_LOCK(&lk); 4029 if (ip->i_effnlink != ip->i_nlink) 4030 panic("softdep_update_inodeblock: bad link count"); 4031 return; 4032 } 4033 if (inodedep->id_nlinkdelta != ip->i_nlink - ip->i_effnlink) { 4034 FREE_LOCK(&lk); 4035 panic("softdep_update_inodeblock: bad delta"); 4036 } 4037 /* 4038 * Changes have been initiated. Anything depending on these 4039 * changes cannot occur until this inode has been written. 4040 */ 4041 inodedep->id_state &= ~COMPLETE; 4042 if ((inodedep->id_state & ONWORKLIST) == 0) 4043 WORKLIST_INSERT_BP(bp, &inodedep->id_list); 4044 /* 4045 * Any new dependencies associated with the incore inode must 4046 * now be moved to the list associated with the buffer holding 4047 * the in-memory copy of the inode. Once merged process any 4048 * allocdirects that are completed by the merger. 4049 */ 4050 merge_inode_lists(inodedep); 4051 if (TAILQ_FIRST(&inodedep->id_inoupdt) != NULL) 4052 handle_allocdirect_partdone(TAILQ_FIRST(&inodedep->id_inoupdt)); 4053 /* 4054 * Now that the inode has been pushed into the buffer, the 4055 * operations dependent on the inode being written to disk 4056 * can be moved to the id_bufwait so that they will be 4057 * processed when the buffer I/O completes. 4058 */ 4059 while ((wk = LIST_FIRST(&inodedep->id_inowait)) != NULL) { 4060 WORKLIST_REMOVE(wk); 4061 WORKLIST_INSERT(&inodedep->id_bufwait, wk); 4062 } 4063 /* 4064 * Newly allocated inodes cannot be written until the bitmap 4065 * that allocates them have been written (indicated by 4066 * DEPCOMPLETE being set in id_state). If we are doing a 4067 * forced sync (e.g., an fsync on a file), we force the bitmap 4068 * to be written so that the update can be done. 4069 */ 4070 if ((inodedep->id_state & DEPCOMPLETE) != 0 || waitfor == 0) { 4071 FREE_LOCK(&lk); 4072 return; 4073 } 4074 gotit = getdirtybuf(&inodedep->id_buf, MNT_WAIT); 4075 FREE_LOCK(&lk); 4076 if (gotit && 4077 (error = bwrite(inodedep->id_buf)) != 0) 4078 softdep_error("softdep_update_inodeblock: bwrite", error); 4079 } 4080 4081 /* 4082 * Merge the new inode dependency list (id_newinoupdt) into the old 4083 * inode dependency list (id_inoupdt). This routine must be called 4084 * with splbio interrupts blocked. 4085 */ 4086 static void 4087 merge_inode_lists(struct inodedep *inodedep) 4088 { 4089 struct allocdirect *listadp, *newadp; 4090 4091 newadp = TAILQ_FIRST(&inodedep->id_newinoupdt); 4092 for (listadp = TAILQ_FIRST(&inodedep->id_inoupdt); listadp && newadp;) { 4093 if (listadp->ad_lbn < newadp->ad_lbn) { 4094 listadp = TAILQ_NEXT(listadp, ad_next); 4095 continue; 4096 } 4097 TAILQ_REMOVE(&inodedep->id_newinoupdt, newadp, ad_next); 4098 TAILQ_INSERT_BEFORE(listadp, newadp, ad_next); 4099 if (listadp->ad_lbn == newadp->ad_lbn) { 4100 allocdirect_merge(&inodedep->id_inoupdt, newadp, 4101 listadp); 4102 listadp = newadp; 4103 } 4104 newadp = TAILQ_FIRST(&inodedep->id_newinoupdt); 4105 } 4106 while ((newadp = TAILQ_FIRST(&inodedep->id_newinoupdt)) != NULL) { 4107 TAILQ_REMOVE(&inodedep->id_newinoupdt, newadp, ad_next); 4108 TAILQ_INSERT_TAIL(&inodedep->id_inoupdt, newadp, ad_next); 4109 } 4110 } 4111 4112 /* 4113 * If we are doing an fsync, then we must ensure that any directory 4114 * entries for the inode have been written after the inode gets to disk. 4115 * 4116 * bioops callback - hold io_token 4117 * 4118 * Parameters: 4119 * vp: the "in_core" copy of the inode 4120 */ 4121 static int 4122 softdep_fsync(struct vnode *vp) 4123 { 4124 struct inodedep *inodedep; 4125 struct pagedep *pagedep; 4126 struct worklist *wk; 4127 struct diradd *dap; 4128 struct mount *mnt; 4129 struct vnode *pvp; 4130 struct inode *ip; 4131 struct buf *bp; 4132 struct fs *fs; 4133 int error, flushparent; 4134 ino_t parentino; 4135 ufs_lbn_t lbn; 4136 4137 /* 4138 * Move check from original kernel code, possibly not needed any 4139 * more with the per-mount bioops. 4140 */ 4141 if ((vp->v_mount->mnt_flag & MNT_SOFTDEP) == 0) 4142 return (0); 4143 4144 get_mplock(); 4145 ip = VTOI(vp); 4146 fs = ip->i_fs; 4147 ACQUIRE_LOCK(&lk); 4148 if (inodedep_lookup(fs, ip->i_number, 0, &inodedep) == 0) { 4149 FREE_LOCK(&lk); 4150 rel_mplock(); 4151 return (0); 4152 } 4153 if (LIST_FIRST(&inodedep->id_inowait) != NULL || 4154 LIST_FIRST(&inodedep->id_bufwait) != NULL || 4155 TAILQ_FIRST(&inodedep->id_inoupdt) != NULL || 4156 TAILQ_FIRST(&inodedep->id_newinoupdt) != NULL) { 4157 FREE_LOCK(&lk); 4158 panic("softdep_fsync: pending ops"); 4159 } 4160 for (error = 0, flushparent = 0; ; ) { 4161 if ((wk = LIST_FIRST(&inodedep->id_pendinghd)) == NULL) 4162 break; 4163 if (wk->wk_type != D_DIRADD) { 4164 FREE_LOCK(&lk); 4165 panic("softdep_fsync: Unexpected type %s", 4166 TYPENAME(wk->wk_type)); 4167 } 4168 dap = WK_DIRADD(wk); 4169 /* 4170 * Flush our parent if this directory entry 4171 * has a MKDIR_PARENT dependency. 4172 */ 4173 if (dap->da_state & DIRCHG) 4174 pagedep = dap->da_previous->dm_pagedep; 4175 else 4176 pagedep = dap->da_pagedep; 4177 mnt = pagedep->pd_mnt; 4178 parentino = pagedep->pd_ino; 4179 lbn = pagedep->pd_lbn; 4180 if ((dap->da_state & (MKDIR_BODY | COMPLETE)) != COMPLETE) { 4181 FREE_LOCK(&lk); 4182 panic("softdep_fsync: dirty"); 4183 } 4184 flushparent = dap->da_state & MKDIR_PARENT; 4185 /* 4186 * If we are being fsync'ed as part of vgone'ing this vnode, 4187 * then we will not be able to release and recover the 4188 * vnode below, so we just have to give up on writing its 4189 * directory entry out. It will eventually be written, just 4190 * not now, but then the user was not asking to have it 4191 * written, so we are not breaking any promises. 4192 */ 4193 if (vp->v_flag & VRECLAIMED) 4194 break; 4195 /* 4196 * We prevent deadlock by always fetching inodes from the 4197 * root, moving down the directory tree. Thus, when fetching 4198 * our parent directory, we must unlock ourselves before 4199 * requesting the lock on our parent. See the comment in 4200 * ufs_lookup for details on possible races. 4201 */ 4202 FREE_LOCK(&lk); 4203 vn_unlock(vp); 4204 error = VFS_VGET(mnt, NULL, parentino, &pvp); 4205 vn_lock(vp, LK_EXCLUSIVE | LK_RETRY); 4206 if (error != 0) { 4207 rel_mplock(); 4208 return (error); 4209 } 4210 if (flushparent) { 4211 if ((error = ffs_update(pvp, 1)) != 0) { 4212 vput(pvp); 4213 rel_mplock(); 4214 return (error); 4215 } 4216 } 4217 /* 4218 * Flush directory page containing the inode's name. 4219 */ 4220 error = bread(pvp, lblktodoff(fs, lbn), blksize(fs, VTOI(pvp), lbn), &bp); 4221 if (error == 0) 4222 error = bwrite(bp); 4223 vput(pvp); 4224 if (error != 0) { 4225 rel_mplock(); 4226 return (error); 4227 } 4228 ACQUIRE_LOCK(&lk); 4229 if (inodedep_lookup(fs, ip->i_number, 0, &inodedep) == 0) 4230 break; 4231 } 4232 FREE_LOCK(&lk); 4233 rel_mplock(); 4234 return (0); 4235 } 4236 4237 /* 4238 * Flush all the dirty bitmaps associated with the block device 4239 * before flushing the rest of the dirty blocks so as to reduce 4240 * the number of dependencies that will have to be rolled back. 4241 */ 4242 static int softdep_fsync_mountdev_bp(struct buf *bp, void *data); 4243 4244 void 4245 softdep_fsync_mountdev(struct vnode *vp) 4246 { 4247 if (!vn_isdisk(vp, NULL)) 4248 panic("softdep_fsync_mountdev: vnode not a disk"); 4249 ACQUIRE_LOCK(&lk); 4250 lwkt_gettoken(&vp->v_token); 4251 RB_SCAN(buf_rb_tree, &vp->v_rbdirty_tree, NULL, 4252 softdep_fsync_mountdev_bp, vp); 4253 lwkt_reltoken(&vp->v_token); 4254 drain_output(vp, 1); 4255 FREE_LOCK(&lk); 4256 } 4257 4258 static int 4259 softdep_fsync_mountdev_bp(struct buf *bp, void *data) 4260 { 4261 struct worklist *wk; 4262 struct vnode *vp = data; 4263 4264 /* 4265 * If it is already scheduled, skip to the next buffer. 4266 */ 4267 if (BUF_LOCK(bp, LK_EXCLUSIVE | LK_NOWAIT)) 4268 return(0); 4269 if (bp->b_vp != vp || (bp->b_flags & B_DELWRI) == 0) { 4270 BUF_UNLOCK(bp); 4271 kprintf("softdep_fsync_mountdev_bp: warning, buffer %p ripped out from under vnode %p\n", bp, vp); 4272 return(0); 4273 } 4274 /* 4275 * We are only interested in bitmaps with outstanding 4276 * dependencies. 4277 */ 4278 if ((wk = LIST_FIRST(&bp->b_dep)) == NULL || 4279 wk->wk_type != D_BMSAFEMAP) { 4280 BUF_UNLOCK(bp); 4281 return(0); 4282 } 4283 bremfree(bp); 4284 FREE_LOCK(&lk); 4285 (void) bawrite(bp); 4286 ACQUIRE_LOCK(&lk); 4287 return(0); 4288 } 4289 4290 /* 4291 * This routine is called when we are trying to synchronously flush a 4292 * file. This routine must eliminate any filesystem metadata dependencies 4293 * so that the syncing routine can succeed by pushing the dirty blocks 4294 * associated with the file. If any I/O errors occur, they are returned. 4295 */ 4296 struct softdep_sync_metadata_info { 4297 struct vnode *vp; 4298 int waitfor; 4299 }; 4300 4301 static int softdep_sync_metadata_bp(struct buf *bp, void *data); 4302 4303 int 4304 softdep_sync_metadata(struct vnode *vp, struct thread *td) 4305 { 4306 struct softdep_sync_metadata_info info; 4307 int error, waitfor; 4308 4309 /* 4310 * Check whether this vnode is involved in a filesystem 4311 * that is doing soft dependency processing. 4312 */ 4313 if (!vn_isdisk(vp, NULL)) { 4314 if (!DOINGSOFTDEP(vp)) 4315 return (0); 4316 } else 4317 if (vp->v_rdev->si_mountpoint == NULL || 4318 (vp->v_rdev->si_mountpoint->mnt_flag & MNT_SOFTDEP) == 0) 4319 return (0); 4320 /* 4321 * Ensure that any direct block dependencies have been cleared. 4322 */ 4323 ACQUIRE_LOCK(&lk); 4324 if ((error = flush_inodedep_deps(VTOI(vp)->i_fs, VTOI(vp)->i_number))) { 4325 FREE_LOCK(&lk); 4326 return (error); 4327 } 4328 /* 4329 * For most files, the only metadata dependencies are the 4330 * cylinder group maps that allocate their inode or blocks. 4331 * The block allocation dependencies can be found by traversing 4332 * the dependency lists for any buffers that remain on their 4333 * dirty buffer list. The inode allocation dependency will 4334 * be resolved when the inode is updated with MNT_WAIT. 4335 * This work is done in two passes. The first pass grabs most 4336 * of the buffers and begins asynchronously writing them. The 4337 * only way to wait for these asynchronous writes is to sleep 4338 * on the filesystem vnode which may stay busy for a long time 4339 * if the filesystem is active. So, instead, we make a second 4340 * pass over the dependencies blocking on each write. In the 4341 * usual case we will be blocking against a write that we 4342 * initiated, so when it is done the dependency will have been 4343 * resolved. Thus the second pass is expected to end quickly. 4344 */ 4345 waitfor = MNT_NOWAIT; 4346 top: 4347 /* 4348 * We must wait for any I/O in progress to finish so that 4349 * all potential buffers on the dirty list will be visible. 4350 */ 4351 drain_output(vp, 1); 4352 4353 info.vp = vp; 4354 info.waitfor = waitfor; 4355 lwkt_gettoken(&vp->v_token); 4356 error = RB_SCAN(buf_rb_tree, &vp->v_rbdirty_tree, NULL, 4357 softdep_sync_metadata_bp, &info); 4358 lwkt_reltoken(&vp->v_token); 4359 if (error < 0) { 4360 FREE_LOCK(&lk); 4361 return(-error); /* error code */ 4362 } 4363 4364 /* 4365 * The brief unlock is to allow any pent up dependency 4366 * processing to be done. Then proceed with the second pass. 4367 */ 4368 if (waitfor == MNT_NOWAIT) { 4369 waitfor = MNT_WAIT; 4370 FREE_LOCK(&lk); 4371 ACQUIRE_LOCK(&lk); 4372 goto top; 4373 } 4374 4375 /* 4376 * If we have managed to get rid of all the dirty buffers, 4377 * then we are done. For certain directories and block 4378 * devices, we may need to do further work. 4379 * 4380 * We must wait for any I/O in progress to finish so that 4381 * all potential buffers on the dirty list will be visible. 4382 */ 4383 drain_output(vp, 1); 4384 if (RB_EMPTY(&vp->v_rbdirty_tree)) { 4385 FREE_LOCK(&lk); 4386 return (0); 4387 } 4388 4389 FREE_LOCK(&lk); 4390 /* 4391 * If we are trying to sync a block device, some of its buffers may 4392 * contain metadata that cannot be written until the contents of some 4393 * partially written files have been written to disk. The only easy 4394 * way to accomplish this is to sync the entire filesystem (luckily 4395 * this happens rarely). 4396 */ 4397 if (vn_isdisk(vp, NULL) && 4398 vp->v_rdev && 4399 vp->v_rdev->si_mountpoint && !vn_islocked(vp) && 4400 (error = VFS_SYNC(vp->v_rdev->si_mountpoint, MNT_WAIT)) != 0) 4401 return (error); 4402 return (0); 4403 } 4404 4405 static int 4406 softdep_sync_metadata_bp(struct buf *bp, void *data) 4407 { 4408 struct softdep_sync_metadata_info *info = data; 4409 struct pagedep *pagedep; 4410 struct allocdirect *adp; 4411 struct allocindir *aip; 4412 struct worklist *wk; 4413 struct buf *nbp; 4414 int error; 4415 int i; 4416 4417 if (getdirtybuf(&bp, MNT_WAIT) == 0) { 4418 kprintf("softdep_sync_metadata_bp(1): caught buf %p going away\n", bp); 4419 return (1); 4420 } 4421 if (bp->b_vp != info->vp || (bp->b_flags & B_DELWRI) == 0) { 4422 kprintf("softdep_sync_metadata_bp(2): caught buf %p going away vp %p\n", bp, info->vp); 4423 BUF_UNLOCK(bp); 4424 return(1); 4425 } 4426 4427 /* 4428 * As we hold the buffer locked, none of its dependencies 4429 * will disappear. 4430 */ 4431 LIST_FOREACH(wk, &bp->b_dep, wk_list) { 4432 switch (wk->wk_type) { 4433 4434 case D_ALLOCDIRECT: 4435 adp = WK_ALLOCDIRECT(wk); 4436 if (adp->ad_state & DEPCOMPLETE) 4437 break; 4438 nbp = adp->ad_buf; 4439 if (getdirtybuf(&nbp, info->waitfor) == 0) 4440 break; 4441 FREE_LOCK(&lk); 4442 if (info->waitfor == MNT_NOWAIT) { 4443 bawrite(nbp); 4444 } else if ((error = bwrite(nbp)) != 0) { 4445 bawrite(bp); 4446 ACQUIRE_LOCK(&lk); 4447 return (-error); 4448 } 4449 ACQUIRE_LOCK(&lk); 4450 break; 4451 4452 case D_ALLOCINDIR: 4453 aip = WK_ALLOCINDIR(wk); 4454 if (aip->ai_state & DEPCOMPLETE) 4455 break; 4456 nbp = aip->ai_buf; 4457 if (getdirtybuf(&nbp, info->waitfor) == 0) 4458 break; 4459 FREE_LOCK(&lk); 4460 if (info->waitfor == MNT_NOWAIT) { 4461 bawrite(nbp); 4462 } else if ((error = bwrite(nbp)) != 0) { 4463 bawrite(bp); 4464 ACQUIRE_LOCK(&lk); 4465 return (-error); 4466 } 4467 ACQUIRE_LOCK(&lk); 4468 break; 4469 4470 case D_INDIRDEP: 4471 restart: 4472 4473 LIST_FOREACH(aip, &WK_INDIRDEP(wk)->ir_deplisthd, ai_next) { 4474 if (aip->ai_state & DEPCOMPLETE) 4475 continue; 4476 nbp = aip->ai_buf; 4477 if (getdirtybuf(&nbp, MNT_WAIT) == 0) 4478 goto restart; 4479 FREE_LOCK(&lk); 4480 if ((error = bwrite(nbp)) != 0) { 4481 bawrite(bp); 4482 ACQUIRE_LOCK(&lk); 4483 return (-error); 4484 } 4485 ACQUIRE_LOCK(&lk); 4486 goto restart; 4487 } 4488 break; 4489 4490 case D_INODEDEP: 4491 if ((error = flush_inodedep_deps(WK_INODEDEP(wk)->id_fs, 4492 WK_INODEDEP(wk)->id_ino)) != 0) { 4493 FREE_LOCK(&lk); 4494 bawrite(bp); 4495 ACQUIRE_LOCK(&lk); 4496 return (-error); 4497 } 4498 break; 4499 4500 case D_PAGEDEP: 4501 /* 4502 * We are trying to sync a directory that may 4503 * have dependencies on both its own metadata 4504 * and/or dependencies on the inodes of any 4505 * recently allocated files. We walk its diradd 4506 * lists pushing out the associated inode. 4507 */ 4508 pagedep = WK_PAGEDEP(wk); 4509 for (i = 0; i < DAHASHSZ; i++) { 4510 if (LIST_FIRST(&pagedep->pd_diraddhd[i]) == 0) 4511 continue; 4512 if ((error = 4513 flush_pagedep_deps(info->vp, 4514 pagedep->pd_mnt, 4515 &pagedep->pd_diraddhd[i]))) { 4516 FREE_LOCK(&lk); 4517 bawrite(bp); 4518 ACQUIRE_LOCK(&lk); 4519 return (-error); 4520 } 4521 } 4522 break; 4523 4524 case D_MKDIR: 4525 /* 4526 * This case should never happen if the vnode has 4527 * been properly sync'ed. However, if this function 4528 * is used at a place where the vnode has not yet 4529 * been sync'ed, this dependency can show up. So, 4530 * rather than panic, just flush it. 4531 */ 4532 nbp = WK_MKDIR(wk)->md_buf; 4533 if (getdirtybuf(&nbp, info->waitfor) == 0) 4534 break; 4535 FREE_LOCK(&lk); 4536 if (info->waitfor == MNT_NOWAIT) { 4537 bawrite(nbp); 4538 } else if ((error = bwrite(nbp)) != 0) { 4539 bawrite(bp); 4540 ACQUIRE_LOCK(&lk); 4541 return (-error); 4542 } 4543 ACQUIRE_LOCK(&lk); 4544 break; 4545 4546 case D_BMSAFEMAP: 4547 /* 4548 * This case should never happen if the vnode has 4549 * been properly sync'ed. However, if this function 4550 * is used at a place where the vnode has not yet 4551 * been sync'ed, this dependency can show up. So, 4552 * rather than panic, just flush it. 4553 * 4554 * nbp can wind up == bp if a device node for the 4555 * same filesystem is being fsynced at the same time, 4556 * leading to a panic if we don't catch the case. 4557 */ 4558 nbp = WK_BMSAFEMAP(wk)->sm_buf; 4559 if (nbp == bp) 4560 break; 4561 if (getdirtybuf(&nbp, info->waitfor) == 0) 4562 break; 4563 FREE_LOCK(&lk); 4564 if (info->waitfor == MNT_NOWAIT) { 4565 bawrite(nbp); 4566 } else if ((error = bwrite(nbp)) != 0) { 4567 bawrite(bp); 4568 ACQUIRE_LOCK(&lk); 4569 return (-error); 4570 } 4571 ACQUIRE_LOCK(&lk); 4572 break; 4573 4574 default: 4575 FREE_LOCK(&lk); 4576 panic("softdep_sync_metadata: Unknown type %s", 4577 TYPENAME(wk->wk_type)); 4578 /* NOTREACHED */ 4579 } 4580 } 4581 FREE_LOCK(&lk); 4582 bawrite(bp); 4583 ACQUIRE_LOCK(&lk); 4584 return(0); 4585 } 4586 4587 /* 4588 * Flush the dependencies associated with an inodedep. 4589 * Called with splbio blocked. 4590 */ 4591 static int 4592 flush_inodedep_deps(struct fs *fs, ino_t ino) 4593 { 4594 struct inodedep *inodedep; 4595 struct allocdirect *adp; 4596 int error, waitfor; 4597 struct buf *bp; 4598 4599 /* 4600 * This work is done in two passes. The first pass grabs most 4601 * of the buffers and begins asynchronously writing them. The 4602 * only way to wait for these asynchronous writes is to sleep 4603 * on the filesystem vnode which may stay busy for a long time 4604 * if the filesystem is active. So, instead, we make a second 4605 * pass over the dependencies blocking on each write. In the 4606 * usual case we will be blocking against a write that we 4607 * initiated, so when it is done the dependency will have been 4608 * resolved. Thus the second pass is expected to end quickly. 4609 * We give a brief window at the top of the loop to allow 4610 * any pending I/O to complete. 4611 */ 4612 for (waitfor = MNT_NOWAIT; ; ) { 4613 FREE_LOCK(&lk); 4614 ACQUIRE_LOCK(&lk); 4615 if (inodedep_lookup(fs, ino, 0, &inodedep) == 0) 4616 return (0); 4617 TAILQ_FOREACH(adp, &inodedep->id_inoupdt, ad_next) { 4618 if (adp->ad_state & DEPCOMPLETE) 4619 continue; 4620 bp = adp->ad_buf; 4621 if (getdirtybuf(&bp, waitfor) == 0) { 4622 if (waitfor == MNT_NOWAIT) 4623 continue; 4624 break; 4625 } 4626 FREE_LOCK(&lk); 4627 if (waitfor == MNT_NOWAIT) { 4628 bawrite(bp); 4629 } else if ((error = bwrite(bp)) != 0) { 4630 ACQUIRE_LOCK(&lk); 4631 return (error); 4632 } 4633 ACQUIRE_LOCK(&lk); 4634 break; 4635 } 4636 if (adp != NULL) 4637 continue; 4638 TAILQ_FOREACH(adp, &inodedep->id_newinoupdt, ad_next) { 4639 if (adp->ad_state & DEPCOMPLETE) 4640 continue; 4641 bp = adp->ad_buf; 4642 if (getdirtybuf(&bp, waitfor) == 0) { 4643 if (waitfor == MNT_NOWAIT) 4644 continue; 4645 break; 4646 } 4647 FREE_LOCK(&lk); 4648 if (waitfor == MNT_NOWAIT) { 4649 bawrite(bp); 4650 } else if ((error = bwrite(bp)) != 0) { 4651 ACQUIRE_LOCK(&lk); 4652 return (error); 4653 } 4654 ACQUIRE_LOCK(&lk); 4655 break; 4656 } 4657 if (adp != NULL) 4658 continue; 4659 /* 4660 * If pass2, we are done, otherwise do pass 2. 4661 */ 4662 if (waitfor == MNT_WAIT) 4663 break; 4664 waitfor = MNT_WAIT; 4665 } 4666 /* 4667 * Try freeing inodedep in case all dependencies have been removed. 4668 */ 4669 if (inodedep_lookup(fs, ino, 0, &inodedep) != 0) 4670 (void) free_inodedep(inodedep); 4671 return (0); 4672 } 4673 4674 /* 4675 * Eliminate a pagedep dependency by flushing out all its diradd dependencies. 4676 * Called with splbio blocked. 4677 */ 4678 static int 4679 flush_pagedep_deps(struct vnode *pvp, struct mount *mp, 4680 struct diraddhd *diraddhdp) 4681 { 4682 struct inodedep *inodedep; 4683 struct ufsmount *ump; 4684 struct diradd *dap; 4685 struct vnode *vp; 4686 int gotit, error = 0; 4687 struct buf *bp; 4688 ino_t inum; 4689 4690 ump = VFSTOUFS(mp); 4691 while ((dap = LIST_FIRST(diraddhdp)) != NULL) { 4692 /* 4693 * Flush ourselves if this directory entry 4694 * has a MKDIR_PARENT dependency. 4695 */ 4696 if (dap->da_state & MKDIR_PARENT) { 4697 FREE_LOCK(&lk); 4698 if ((error = ffs_update(pvp, 1)) != 0) 4699 break; 4700 ACQUIRE_LOCK(&lk); 4701 /* 4702 * If that cleared dependencies, go on to next. 4703 */ 4704 if (dap != LIST_FIRST(diraddhdp)) 4705 continue; 4706 if (dap->da_state & MKDIR_PARENT) { 4707 FREE_LOCK(&lk); 4708 panic("flush_pagedep_deps: MKDIR_PARENT"); 4709 } 4710 } 4711 /* 4712 * A newly allocated directory must have its "." and 4713 * ".." entries written out before its name can be 4714 * committed in its parent. We do not want or need 4715 * the full semantics of a synchronous VOP_FSYNC as 4716 * that may end up here again, once for each directory 4717 * level in the filesystem. Instead, we push the blocks 4718 * and wait for them to clear. We have to fsync twice 4719 * because the first call may choose to defer blocks 4720 * that still have dependencies, but deferral will 4721 * happen at most once. 4722 */ 4723 inum = dap->da_newinum; 4724 if (dap->da_state & MKDIR_BODY) { 4725 FREE_LOCK(&lk); 4726 if ((error = VFS_VGET(mp, NULL, inum, &vp)) != 0) 4727 break; 4728 if ((error=VOP_FSYNC(vp, MNT_NOWAIT, 0)) || 4729 (error=VOP_FSYNC(vp, MNT_NOWAIT, 0))) { 4730 vput(vp); 4731 break; 4732 } 4733 drain_output(vp, 0); 4734 vput(vp); 4735 ACQUIRE_LOCK(&lk); 4736 /* 4737 * If that cleared dependencies, go on to next. 4738 */ 4739 if (dap != LIST_FIRST(diraddhdp)) 4740 continue; 4741 if (dap->da_state & MKDIR_BODY) { 4742 FREE_LOCK(&lk); 4743 panic("flush_pagedep_deps: MKDIR_BODY"); 4744 } 4745 } 4746 /* 4747 * Flush the inode on which the directory entry depends. 4748 * Having accounted for MKDIR_PARENT and MKDIR_BODY above, 4749 * the only remaining dependency is that the updated inode 4750 * count must get pushed to disk. The inode has already 4751 * been pushed into its inode buffer (via VOP_UPDATE) at 4752 * the time of the reference count change. So we need only 4753 * locate that buffer, ensure that there will be no rollback 4754 * caused by a bitmap dependency, then write the inode buffer. 4755 */ 4756 if (inodedep_lookup(ump->um_fs, inum, 0, &inodedep) == 0) { 4757 FREE_LOCK(&lk); 4758 panic("flush_pagedep_deps: lost inode"); 4759 } 4760 /* 4761 * If the inode still has bitmap dependencies, 4762 * push them to disk. 4763 */ 4764 if ((inodedep->id_state & DEPCOMPLETE) == 0) { 4765 gotit = getdirtybuf(&inodedep->id_buf, MNT_WAIT); 4766 FREE_LOCK(&lk); 4767 if (gotit && (error = bwrite(inodedep->id_buf)) != 0) 4768 break; 4769 ACQUIRE_LOCK(&lk); 4770 if (dap != LIST_FIRST(diraddhdp)) 4771 continue; 4772 } 4773 /* 4774 * If the inode is still sitting in a buffer waiting 4775 * to be written, push it to disk. 4776 */ 4777 FREE_LOCK(&lk); 4778 if ((error = bread(ump->um_devvp, 4779 fsbtodoff(ump->um_fs, ino_to_fsba(ump->um_fs, inum)), 4780 (int)ump->um_fs->fs_bsize, &bp)) != 0) 4781 break; 4782 if ((error = bwrite(bp)) != 0) 4783 break; 4784 ACQUIRE_LOCK(&lk); 4785 /* 4786 * If we have failed to get rid of all the dependencies 4787 * then something is seriously wrong. 4788 */ 4789 if (dap == LIST_FIRST(diraddhdp)) { 4790 FREE_LOCK(&lk); 4791 panic("flush_pagedep_deps: flush failed"); 4792 } 4793 } 4794 if (error) 4795 ACQUIRE_LOCK(&lk); 4796 return (error); 4797 } 4798 4799 /* 4800 * A large burst of file addition or deletion activity can drive the 4801 * memory load excessively high. First attempt to slow things down 4802 * using the techniques below. If that fails, this routine requests 4803 * the offending operations to fall back to running synchronously 4804 * until the memory load returns to a reasonable level. 4805 */ 4806 int 4807 softdep_slowdown(struct vnode *vp) 4808 { 4809 int max_softdeps_hard; 4810 4811 max_softdeps_hard = max_softdeps * 11 / 10; 4812 if (num_dirrem < max_softdeps_hard / 2 && 4813 num_inodedep < max_softdeps_hard) 4814 return (0); 4815 stat_sync_limit_hit += 1; 4816 return (1); 4817 } 4818 4819 /* 4820 * If memory utilization has gotten too high, deliberately slow things 4821 * down and speed up the I/O processing. 4822 */ 4823 static int 4824 request_cleanup(int resource, int islocked) 4825 { 4826 struct thread *td = curthread; /* XXX */ 4827 4828 /* 4829 * We never hold up the filesystem syncer process. 4830 */ 4831 if (td == filesys_syncer) 4832 return (0); 4833 /* 4834 * First check to see if the work list has gotten backlogged. 4835 * If it has, co-opt this process to help clean up two entries. 4836 * Because this process may hold inodes locked, we cannot 4837 * handle any remove requests that might block on a locked 4838 * inode as that could lead to deadlock. 4839 */ 4840 if (num_on_worklist > max_softdeps / 10) { 4841 if (islocked) 4842 FREE_LOCK(&lk); 4843 process_worklist_item(NULL, LK_NOWAIT); 4844 process_worklist_item(NULL, LK_NOWAIT); 4845 stat_worklist_push += 2; 4846 if (islocked) 4847 ACQUIRE_LOCK(&lk); 4848 return(1); 4849 } 4850 4851 /* 4852 * If we are resource constrained on inode dependencies, try 4853 * flushing some dirty inodes. Otherwise, we are constrained 4854 * by file deletions, so try accelerating flushes of directories 4855 * with removal dependencies. We would like to do the cleanup 4856 * here, but we probably hold an inode locked at this point and 4857 * that might deadlock against one that we try to clean. So, 4858 * the best that we can do is request the syncer daemon to do 4859 * the cleanup for us. 4860 */ 4861 switch (resource) { 4862 4863 case FLUSH_INODES: 4864 stat_ino_limit_push += 1; 4865 req_clear_inodedeps += 1; 4866 stat_countp = &stat_ino_limit_hit; 4867 break; 4868 4869 case FLUSH_REMOVE: 4870 stat_blk_limit_push += 1; 4871 req_clear_remove += 1; 4872 stat_countp = &stat_blk_limit_hit; 4873 break; 4874 4875 default: 4876 if (islocked) 4877 FREE_LOCK(&lk); 4878 panic("request_cleanup: unknown type"); 4879 } 4880 /* 4881 * Hopefully the syncer daemon will catch up and awaken us. 4882 * We wait at most tickdelay before proceeding in any case. 4883 */ 4884 if (islocked == 0) 4885 ACQUIRE_LOCK(&lk); 4886 proc_waiting += 1; 4887 if (!callout_active(&handle)) 4888 callout_reset(&handle, tickdelay > 2 ? tickdelay : 2, 4889 pause_timer, NULL); 4890 interlocked_sleep(&lk, SLEEP, (caddr_t)&proc_waiting, 0, 4891 "softupdate", 0); 4892 proc_waiting -= 1; 4893 if (islocked == 0) 4894 FREE_LOCK(&lk); 4895 return (1); 4896 } 4897 4898 /* 4899 * Awaken processes pausing in request_cleanup and clear proc_waiting 4900 * to indicate that there is no longer a timer running. 4901 */ 4902 void 4903 pause_timer(void *arg) 4904 { 4905 *stat_countp += 1; 4906 wakeup_one(&proc_waiting); 4907 if (proc_waiting > 0) 4908 callout_reset(&handle, tickdelay > 2 ? tickdelay : 2, 4909 pause_timer, NULL); 4910 else 4911 callout_deactivate(&handle); 4912 } 4913 4914 /* 4915 * Flush out a directory with at least one removal dependency in an effort to 4916 * reduce the number of dirrem, freefile, and freeblks dependency structures. 4917 */ 4918 static void 4919 clear_remove(struct thread *td) 4920 { 4921 struct pagedep_hashhead *pagedephd; 4922 struct pagedep *pagedep; 4923 static int next = 0; 4924 struct mount *mp; 4925 struct vnode *vp; 4926 int error, cnt; 4927 ino_t ino; 4928 4929 ACQUIRE_LOCK(&lk); 4930 for (cnt = 0; cnt < pagedep_hash; cnt++) { 4931 pagedephd = &pagedep_hashtbl[next++]; 4932 if (next >= pagedep_hash) 4933 next = 0; 4934 LIST_FOREACH(pagedep, pagedephd, pd_hash) { 4935 if (LIST_FIRST(&pagedep->pd_dirremhd) == NULL) 4936 continue; 4937 mp = pagedep->pd_mnt; 4938 ino = pagedep->pd_ino; 4939 FREE_LOCK(&lk); 4940 if ((error = VFS_VGET(mp, NULL, ino, &vp)) != 0) { 4941 softdep_error("clear_remove: vget", error); 4942 return; 4943 } 4944 if ((error = VOP_FSYNC(vp, MNT_NOWAIT, 0))) 4945 softdep_error("clear_remove: fsync", error); 4946 drain_output(vp, 0); 4947 vput(vp); 4948 return; 4949 } 4950 } 4951 FREE_LOCK(&lk); 4952 } 4953 4954 /* 4955 * Clear out a block of dirty inodes in an effort to reduce 4956 * the number of inodedep dependency structures. 4957 */ 4958 struct clear_inodedeps_info { 4959 struct fs *fs; 4960 struct mount *mp; 4961 }; 4962 4963 static int 4964 clear_inodedeps_mountlist_callback(struct mount *mp, void *data) 4965 { 4966 struct clear_inodedeps_info *info = data; 4967 4968 if ((mp->mnt_flag & MNT_SOFTDEP) && info->fs == VFSTOUFS(mp)->um_fs) { 4969 info->mp = mp; 4970 return(-1); 4971 } 4972 return(0); 4973 } 4974 4975 static void 4976 clear_inodedeps(struct thread *td) 4977 { 4978 struct clear_inodedeps_info info; 4979 struct inodedep_hashhead *inodedephd; 4980 struct inodedep *inodedep; 4981 static int next = 0; 4982 struct vnode *vp; 4983 struct fs *fs; 4984 int error, cnt; 4985 ino_t firstino, lastino, ino; 4986 4987 ACQUIRE_LOCK(&lk); 4988 /* 4989 * Pick a random inode dependency to be cleared. 4990 * We will then gather up all the inodes in its block 4991 * that have dependencies and flush them out. 4992 */ 4993 for (cnt = 0; cnt < inodedep_hash; cnt++) { 4994 inodedephd = &inodedep_hashtbl[next++]; 4995 if (next >= inodedep_hash) 4996 next = 0; 4997 if ((inodedep = LIST_FIRST(inodedephd)) != NULL) 4998 break; 4999 } 5000 if (inodedep == NULL) { 5001 FREE_LOCK(&lk); 5002 return; 5003 } 5004 /* 5005 * Ugly code to find mount point given pointer to superblock. 5006 */ 5007 fs = inodedep->id_fs; 5008 info.mp = NULL; 5009 info.fs = fs; 5010 mountlist_scan(clear_inodedeps_mountlist_callback, 5011 &info, MNTSCAN_FORWARD|MNTSCAN_NOBUSY); 5012 /* 5013 * Find the last inode in the block with dependencies. 5014 */ 5015 firstino = inodedep->id_ino & ~(INOPB(fs) - 1); 5016 for (lastino = firstino + INOPB(fs) - 1; lastino > firstino; lastino--) 5017 if (inodedep_lookup(fs, lastino, 0, &inodedep) != 0) 5018 break; 5019 /* 5020 * Asynchronously push all but the last inode with dependencies. 5021 * Synchronously push the last inode with dependencies to ensure 5022 * that the inode block gets written to free up the inodedeps. 5023 */ 5024 for (ino = firstino; ino <= lastino; ino++) { 5025 if (inodedep_lookup(fs, ino, 0, &inodedep) == 0) 5026 continue; 5027 FREE_LOCK(&lk); 5028 if ((error = VFS_VGET(info.mp, NULL, ino, &vp)) != 0) { 5029 softdep_error("clear_inodedeps: vget", error); 5030 return; 5031 } 5032 if (ino == lastino) { 5033 if ((error = VOP_FSYNC(vp, MNT_WAIT, 0))) 5034 softdep_error("clear_inodedeps: fsync1", error); 5035 } else { 5036 if ((error = VOP_FSYNC(vp, MNT_NOWAIT, 0))) 5037 softdep_error("clear_inodedeps: fsync2", error); 5038 drain_output(vp, 0); 5039 } 5040 vput(vp); 5041 ACQUIRE_LOCK(&lk); 5042 } 5043 FREE_LOCK(&lk); 5044 } 5045 5046 /* 5047 * Function to determine if the buffer has outstanding dependencies 5048 * that will cause a roll-back if the buffer is written. If wantcount 5049 * is set, return number of dependencies, otherwise just yes or no. 5050 * 5051 * bioops callback - hold io_token 5052 */ 5053 static int 5054 softdep_count_dependencies(struct buf *bp, int wantcount) 5055 { 5056 struct worklist *wk; 5057 struct inodedep *inodedep; 5058 struct indirdep *indirdep; 5059 struct allocindir *aip; 5060 struct pagedep *pagedep; 5061 struct diradd *dap; 5062 int i, retval; 5063 5064 get_mplock(); 5065 5066 retval = 0; 5067 ACQUIRE_LOCK(&lk); 5068 5069 LIST_FOREACH(wk, &bp->b_dep, wk_list) { 5070 switch (wk->wk_type) { 5071 5072 case D_INODEDEP: 5073 inodedep = WK_INODEDEP(wk); 5074 if ((inodedep->id_state & DEPCOMPLETE) == 0) { 5075 /* bitmap allocation dependency */ 5076 retval += 1; 5077 if (!wantcount) 5078 goto out; 5079 } 5080 if (TAILQ_FIRST(&inodedep->id_inoupdt)) { 5081 /* direct block pointer dependency */ 5082 retval += 1; 5083 if (!wantcount) 5084 goto out; 5085 } 5086 continue; 5087 5088 case D_INDIRDEP: 5089 indirdep = WK_INDIRDEP(wk); 5090 5091 LIST_FOREACH(aip, &indirdep->ir_deplisthd, ai_next) { 5092 /* indirect block pointer dependency */ 5093 retval += 1; 5094 if (!wantcount) 5095 goto out; 5096 } 5097 continue; 5098 5099 case D_PAGEDEP: 5100 pagedep = WK_PAGEDEP(wk); 5101 for (i = 0; i < DAHASHSZ; i++) { 5102 5103 LIST_FOREACH(dap, &pagedep->pd_diraddhd[i], da_pdlist) { 5104 /* directory entry dependency */ 5105 retval += 1; 5106 if (!wantcount) 5107 goto out; 5108 } 5109 } 5110 continue; 5111 5112 case D_BMSAFEMAP: 5113 case D_ALLOCDIRECT: 5114 case D_ALLOCINDIR: 5115 case D_MKDIR: 5116 /* never a dependency on these blocks */ 5117 continue; 5118 5119 default: 5120 FREE_LOCK(&lk); 5121 panic("softdep_check_for_rollback: Unexpected type %s", 5122 TYPENAME(wk->wk_type)); 5123 /* NOTREACHED */ 5124 } 5125 } 5126 out: 5127 FREE_LOCK(&lk); 5128 rel_mplock(); 5129 5130 return retval; 5131 } 5132 5133 /* 5134 * Acquire exclusive access to a buffer. 5135 * Must be called with splbio blocked. 5136 * Return 1 if buffer was acquired. 5137 */ 5138 static int 5139 getdirtybuf(struct buf **bpp, int waitfor) 5140 { 5141 struct buf *bp; 5142 int error; 5143 5144 for (;;) { 5145 if ((bp = *bpp) == NULL) 5146 return (0); 5147 if (BUF_LOCK(bp, LK_EXCLUSIVE | LK_NOWAIT) == 0) 5148 break; 5149 if (waitfor != MNT_WAIT) 5150 return (0); 5151 error = interlocked_sleep(&lk, LOCKBUF, bp, 5152 LK_EXCLUSIVE | LK_SLEEPFAIL, 0, 0); 5153 if (error != ENOLCK) { 5154 FREE_LOCK(&lk); 5155 panic("getdirtybuf: inconsistent lock"); 5156 } 5157 } 5158 if ((bp->b_flags & B_DELWRI) == 0) { 5159 BUF_UNLOCK(bp); 5160 return (0); 5161 } 5162 bremfree(bp); 5163 return (1); 5164 } 5165 5166 /* 5167 * Wait for pending output on a vnode to complete. 5168 * Must be called with vnode locked. 5169 */ 5170 static void 5171 drain_output(struct vnode *vp, int islocked) 5172 { 5173 5174 if (!islocked) 5175 ACQUIRE_LOCK(&lk); 5176 while (bio_track_active(&vp->v_track_write)) { 5177 FREE_LOCK(&lk); 5178 bio_track_wait(&vp->v_track_write, 0, 0); 5179 ACQUIRE_LOCK(&lk); 5180 } 5181 if (!islocked) 5182 FREE_LOCK(&lk); 5183 } 5184 5185 /* 5186 * Called whenever a buffer that is being invalidated or reallocated 5187 * contains dependencies. This should only happen if an I/O error has 5188 * occurred. The routine is called with the buffer locked. 5189 * 5190 * bioops callback - hold io_token 5191 */ 5192 static void 5193 softdep_deallocate_dependencies(struct buf *bp) 5194 { 5195 /* nothing to do, mp lock not needed */ 5196 if ((bp->b_flags & B_ERROR) == 0) 5197 panic("softdep_deallocate_dependencies: dangling deps"); 5198 softdep_error(bp->b_vp->v_mount->mnt_stat.f_mntfromname, bp->b_error); 5199 panic("softdep_deallocate_dependencies: unrecovered I/O error"); 5200 } 5201 5202 /* 5203 * Function to handle asynchronous write errors in the filesystem. 5204 */ 5205 void 5206 softdep_error(char *func, int error) 5207 { 5208 5209 /* XXX should do something better! */ 5210 kprintf("%s: got error %d while accessing filesystem\n", func, error); 5211 } 5212