1 /* 2 * Copyright (c) 2004-2006 The DragonFly Project. All rights reserved. 3 * 4 * This code is derived from software contributed to The DragonFly Project 5 * by Matthew Dillon <dillon@backplane.com> 6 * 7 * Redistribution and use in source and binary forms, with or without 8 * modification, are permitted provided that the following conditions 9 * are met: 10 * 11 * 1. Redistributions of source code must retain the above copyright 12 * notice, this list of conditions and the following disclaimer. 13 * 2. Redistributions in binary form must reproduce the above copyright 14 * notice, this list of conditions and the following disclaimer in 15 * the documentation and/or other materials provided with the 16 * distribution. 17 * 3. Neither the name of The DragonFly Project nor the names of its 18 * contributors may be used to endorse or promote products derived 19 * from this software without specific, prior written permission. 20 * 21 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS 22 * ``AS IS'' AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT 23 * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS 24 * FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE 25 * COPYRIGHT HOLDERS OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, 26 * INCIDENTAL, SPECIAL, EXEMPLARY OR CONSEQUENTIAL DAMAGES (INCLUDING, 27 * BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; 28 * LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED 29 * AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, 30 * OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT 31 * OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF 32 * SUCH DAMAGE. 33 * 34 * $DragonFly: src/sys/kern/vfs_jops.c,v 1.27 2006/05/08 18:45:51 dillon Exp $ 35 */ 36 /* 37 * Each mount point may have zero or more independantly configured journals 38 * attached to it. Each journal is represented by a memory FIFO and worker 39 * thread. Journal events are streamed through the FIFO to the thread, 40 * batched up (typically on one-second intervals), and written out by the 41 * thread. 42 * 43 * Journal vnode ops are executed instead of mnt_vn_norm_ops when one or 44 * more journals have been installed on a mount point. It becomes the 45 * responsibility of the journal op to call the underlying normal op as 46 * appropriate. 47 */ 48 #include <sys/param.h> 49 #include <sys/systm.h> 50 #include <sys/buf.h> 51 #include <sys/conf.h> 52 #include <sys/kernel.h> 53 #include <sys/queue.h> 54 #include <sys/lock.h> 55 #include <sys/malloc.h> 56 #include <sys/mount.h> 57 #include <sys/unistd.h> 58 #include <sys/vnode.h> 59 #include <sys/poll.h> 60 #include <sys/mountctl.h> 61 #include <sys/journal.h> 62 #include <sys/file.h> 63 #include <sys/proc.h> 64 #include <sys/msfbuf.h> 65 #include <sys/socket.h> 66 #include <sys/socketvar.h> 67 68 #include <machine/limits.h> 69 70 #include <vm/vm.h> 71 #include <vm/vm_object.h> 72 #include <vm/vm_page.h> 73 #include <vm/vm_pager.h> 74 #include <vm/vnode_pager.h> 75 76 #include <sys/file2.h> 77 #include <sys/thread2.h> 78 79 static int journal_attach(struct mount *mp); 80 static void journal_detach(struct mount *mp); 81 static int journal_install_vfs_journal(struct mount *mp, struct file *fp, 82 const struct mountctl_install_journal *info); 83 static int journal_restart_vfs_journal(struct mount *mp, struct file *fp, 84 const struct mountctl_restart_journal *info); 85 static int journal_remove_vfs_journal(struct mount *mp, 86 const struct mountctl_remove_journal *info); 87 static int journal_restart(struct mount *mp, struct file *fp, 88 struct journal *jo, int flags); 89 static int journal_destroy(struct mount *mp, struct journal *jo, int flags); 90 static int journal_resync_vfs_journal(struct mount *mp, const void *ctl); 91 static int journal_status_vfs_journal(struct mount *mp, 92 const struct mountctl_status_journal *info, 93 struct mountctl_journal_ret_status *rstat, 94 int buflen, int *res); 95 96 static void jrecord_undo_file(struct jrecord *jrec, struct vnode *vp, 97 int jrflags, off_t off, off_t bytes); 98 99 static int journal_setattr(struct vop_setattr_args *ap); 100 static int journal_write(struct vop_write_args *ap); 101 static int journal_fsync(struct vop_fsync_args *ap); 102 static int journal_putpages(struct vop_putpages_args *ap); 103 static int journal_setacl(struct vop_setacl_args *ap); 104 static int journal_setextattr(struct vop_setextattr_args *ap); 105 static int journal_ncreate(struct vop_ncreate_args *ap); 106 static int journal_nmknod(struct vop_nmknod_args *ap); 107 static int journal_nlink(struct vop_nlink_args *ap); 108 static int journal_nsymlink(struct vop_nsymlink_args *ap); 109 static int journal_nwhiteout(struct vop_nwhiteout_args *ap); 110 static int journal_nremove(struct vop_nremove_args *ap); 111 static int journal_nmkdir(struct vop_nmkdir_args *ap); 112 static int journal_nrmdir(struct vop_nrmdir_args *ap); 113 static int journal_nrename(struct vop_nrename_args *ap); 114 115 #define JRUNDO_SIZE 0x00000001 116 #define JRUNDO_UID 0x00000002 117 #define JRUNDO_GID 0x00000004 118 #define JRUNDO_FSID 0x00000008 119 #define JRUNDO_MODES 0x00000010 120 #define JRUNDO_INUM 0x00000020 121 #define JRUNDO_ATIME 0x00000040 122 #define JRUNDO_MTIME 0x00000080 123 #define JRUNDO_CTIME 0x00000100 124 #define JRUNDO_GEN 0x00000200 125 #define JRUNDO_FLAGS 0x00000400 126 #define JRUNDO_UDEV 0x00000800 127 #define JRUNDO_NLINK 0x00001000 128 #define JRUNDO_FILEDATA 0x00010000 129 #define JRUNDO_GETVP 0x00020000 130 #define JRUNDO_CONDLINK 0x00040000 /* write file data if link count 1 */ 131 #define JRUNDO_VATTR (JRUNDO_SIZE|JRUNDO_UID|JRUNDO_GID|JRUNDO_FSID|\ 132 JRUNDO_MODES|JRUNDO_INUM|JRUNDO_ATIME|JRUNDO_MTIME|\ 133 JRUNDO_CTIME|JRUNDO_GEN|JRUNDO_FLAGS|JRUNDO_UDEV|\ 134 JRUNDO_NLINK) 135 #define JRUNDO_ALL (JRUNDO_VATTR|JRUNDO_FILEDATA) 136 137 static struct vnodeopv_entry_desc journal_vnodeop_entries[] = { 138 { &vop_default_desc, vop_journal_operate_ap }, 139 { &vop_mountctl_desc, (void *)journal_mountctl }, 140 { &vop_setattr_desc, (void *)journal_setattr }, 141 { &vop_write_desc, (void *)journal_write }, 142 { &vop_fsync_desc, (void *)journal_fsync }, 143 { &vop_putpages_desc, (void *)journal_putpages }, 144 { &vop_setacl_desc, (void *)journal_setacl }, 145 { &vop_setextattr_desc, (void *)journal_setextattr }, 146 { &vop_ncreate_desc, (void *)journal_ncreate }, 147 { &vop_nmknod_desc, (void *)journal_nmknod }, 148 { &vop_nlink_desc, (void *)journal_nlink }, 149 { &vop_nsymlink_desc, (void *)journal_nsymlink }, 150 { &vop_nwhiteout_desc, (void *)journal_nwhiteout }, 151 { &vop_nremove_desc, (void *)journal_nremove }, 152 { &vop_nmkdir_desc, (void *)journal_nmkdir }, 153 { &vop_nrmdir_desc, (void *)journal_nrmdir }, 154 { &vop_nrename_desc, (void *)journal_nrename }, 155 { NULL, NULL } 156 }; 157 158 static MALLOC_DEFINE(M_JOURNAL, "journal", "Journaling structures"); 159 static MALLOC_DEFINE(M_JFIFO, "journal-fifo", "Journal FIFO"); 160 161 int 162 journal_mountctl(struct vop_mountctl_args *ap) 163 { 164 struct mount *mp; 165 int error = 0; 166 167 mp = ap->a_head.a_ops->vv_mount; 168 KKASSERT(mp); 169 170 if (mp->mnt_vn_journal_ops == NULL) { 171 switch(ap->a_op) { 172 case MOUNTCTL_INSTALL_VFS_JOURNAL: 173 error = journal_attach(mp); 174 if (error == 0 && ap->a_ctllen != sizeof(struct mountctl_install_journal)) 175 error = EINVAL; 176 if (error == 0 && ap->a_fp == NULL) 177 error = EBADF; 178 if (error == 0) 179 error = journal_install_vfs_journal(mp, ap->a_fp, ap->a_ctl); 180 if (TAILQ_EMPTY(&mp->mnt_jlist)) 181 journal_detach(mp); 182 break; 183 case MOUNTCTL_RESTART_VFS_JOURNAL: 184 case MOUNTCTL_REMOVE_VFS_JOURNAL: 185 case MOUNTCTL_RESYNC_VFS_JOURNAL: 186 case MOUNTCTL_STATUS_VFS_JOURNAL: 187 error = ENOENT; 188 break; 189 default: 190 error = EOPNOTSUPP; 191 break; 192 } 193 } else { 194 switch(ap->a_op) { 195 case MOUNTCTL_INSTALL_VFS_JOURNAL: 196 if (ap->a_ctllen != sizeof(struct mountctl_install_journal)) 197 error = EINVAL; 198 if (error == 0 && ap->a_fp == NULL) 199 error = EBADF; 200 if (error == 0) 201 error = journal_install_vfs_journal(mp, ap->a_fp, ap->a_ctl); 202 break; 203 case MOUNTCTL_RESTART_VFS_JOURNAL: 204 if (ap->a_ctllen != sizeof(struct mountctl_restart_journal)) 205 error = EINVAL; 206 if (error == 0 && ap->a_fp == NULL) 207 error = EBADF; 208 if (error == 0) 209 error = journal_restart_vfs_journal(mp, ap->a_fp, ap->a_ctl); 210 break; 211 case MOUNTCTL_REMOVE_VFS_JOURNAL: 212 if (ap->a_ctllen != sizeof(struct mountctl_remove_journal)) 213 error = EINVAL; 214 if (error == 0) 215 error = journal_remove_vfs_journal(mp, ap->a_ctl); 216 if (TAILQ_EMPTY(&mp->mnt_jlist)) 217 journal_detach(mp); 218 break; 219 case MOUNTCTL_RESYNC_VFS_JOURNAL: 220 if (ap->a_ctllen != 0) 221 error = EINVAL; 222 error = journal_resync_vfs_journal(mp, ap->a_ctl); 223 break; 224 case MOUNTCTL_STATUS_VFS_JOURNAL: 225 if (ap->a_ctllen != sizeof(struct mountctl_status_journal)) 226 error = EINVAL; 227 if (error == 0) { 228 error = journal_status_vfs_journal(mp, ap->a_ctl, 229 ap->a_buf, ap->a_buflen, ap->a_res); 230 } 231 break; 232 default: 233 error = EOPNOTSUPP; 234 break; 235 } 236 } 237 return (error); 238 } 239 240 /* 241 * High level mount point setup. When a 242 */ 243 static int 244 journal_attach(struct mount *mp) 245 { 246 KKASSERT(mp->mnt_jbitmap == NULL); 247 vfs_add_vnodeops(mp, &mp->mnt_vn_journal_ops, 248 journal_vnodeop_entries, 0); 249 mp->mnt_jbitmap = malloc(JREC_STREAMID_JMAX/8, M_JOURNAL, M_WAITOK|M_ZERO); 250 mp->mnt_streamid = JREC_STREAMID_JMIN; 251 return(0); 252 } 253 254 static void 255 journal_detach(struct mount *mp) 256 { 257 KKASSERT(mp->mnt_jbitmap != NULL); 258 if (mp->mnt_vn_journal_ops) 259 vfs_rm_vnodeops(&mp->mnt_vn_journal_ops); 260 free(mp->mnt_jbitmap, M_JOURNAL); 261 mp->mnt_jbitmap = NULL; 262 } 263 264 /* 265 * Install a journal on a mount point. Each journal has an associated worker 266 * thread which is responsible for buffering and spooling the data to the 267 * target. A mount point may have multiple journals attached to it. An 268 * initial start record is generated when the journal is associated. 269 */ 270 static int 271 journal_install_vfs_journal(struct mount *mp, struct file *fp, 272 const struct mountctl_install_journal *info) 273 { 274 struct journal *jo; 275 struct jrecord jrec; 276 int error = 0; 277 int size; 278 279 jo = malloc(sizeof(struct journal), M_JOURNAL, M_WAITOK|M_ZERO); 280 bcopy(info->id, jo->id, sizeof(jo->id)); 281 jo->flags = info->flags & ~(MC_JOURNAL_WACTIVE | MC_JOURNAL_RACTIVE | 282 MC_JOURNAL_STOP_REQ); 283 284 /* 285 * Memory FIFO size, round to nearest power of 2 286 */ 287 if (info->membufsize) { 288 if (info->membufsize < 65536) 289 size = 65536; 290 else if (info->membufsize > 128 * 1024 * 1024) 291 size = 128 * 1024 * 1024; 292 else 293 size = (int)info->membufsize; 294 } else { 295 size = 1024 * 1024; 296 } 297 jo->fifo.size = 1; 298 while (jo->fifo.size < size) 299 jo->fifo.size <<= 1; 300 301 /* 302 * Other parameters. If not specified the starting transaction id 303 * will be the current date. 304 */ 305 if (info->transid) { 306 jo->transid = info->transid; 307 } else { 308 struct timespec ts; 309 getnanotime(&ts); 310 jo->transid = ((int64_t)ts.tv_sec << 30) | ts.tv_nsec; 311 } 312 313 jo->fp = fp; 314 315 /* 316 * Allocate the memory FIFO 317 */ 318 jo->fifo.mask = jo->fifo.size - 1; 319 jo->fifo.membase = malloc(jo->fifo.size, M_JFIFO, M_WAITOK|M_ZERO|M_NULLOK); 320 if (jo->fifo.membase == NULL) 321 error = ENOMEM; 322 323 /* 324 * Create the worker threads and generate the association record. 325 */ 326 if (error) { 327 free(jo, M_JOURNAL); 328 } else { 329 fhold(fp); 330 journal_create_threads(jo); 331 jrecord_init(jo, &jrec, JREC_STREAMID_DISCONT); 332 jrecord_write(&jrec, JTYPE_ASSOCIATE, 0); 333 jrecord_done(&jrec, 0); 334 TAILQ_INSERT_TAIL(&mp->mnt_jlist, jo, jentry); 335 } 336 return(error); 337 } 338 339 /* 340 * Restart a journal with a new descriptor. The existing reader and writer 341 * threads are terminated and a new descriptor is associated with the 342 * journal. The FIFO rindex is reset to xindex and the threads are then 343 * restarted. 344 */ 345 static int 346 journal_restart_vfs_journal(struct mount *mp, struct file *fp, 347 const struct mountctl_restart_journal *info) 348 { 349 struct journal *jo; 350 int error; 351 352 TAILQ_FOREACH(jo, &mp->mnt_jlist, jentry) { 353 if (bcmp(jo->id, info->id, sizeof(jo->id)) == 0) 354 break; 355 } 356 if (jo) 357 error = journal_restart(mp, fp, jo, info->flags); 358 else 359 error = EINVAL; 360 return (error); 361 } 362 363 static int 364 journal_restart(struct mount *mp, struct file *fp, 365 struct journal *jo, int flags) 366 { 367 /* 368 * XXX lock the jo 369 */ 370 371 #if 0 372 /* 373 * Record the fact that we are doing a restart in the journal. 374 * XXX it isn't safe to do this if the journal is being restarted 375 * because it was locked up and the writer thread has already exited. 376 */ 377 jrecord_init(jo, &jrec, JREC_STREAMID_RESTART); 378 jrecord_write(&jrec, JTYPE_DISASSOCIATE, 0); 379 jrecord_done(&jrec, 0); 380 #endif 381 382 /* 383 * Stop the reader and writer threads and clean up the current 384 * descriptor. 385 */ 386 printf("RESTART WITH FP %p KILLING %p\n", fp, jo->fp); 387 journal_destroy_threads(jo, flags); 388 389 if (jo->fp) 390 fdrop(jo->fp); 391 392 /* 393 * Associate the new descriptor, reset the FIFO index, and recreate 394 * the threads. 395 */ 396 fhold(fp); 397 jo->fp = fp; 398 jo->fifo.rindex = jo->fifo.xindex; 399 journal_create_threads(jo); 400 401 return(0); 402 } 403 404 /* 405 * Disassociate a journal from a mount point and terminate its worker thread. 406 * A final termination record is written out before the file pointer is 407 * dropped. 408 */ 409 static int 410 journal_remove_vfs_journal(struct mount *mp, 411 const struct mountctl_remove_journal *info) 412 { 413 struct journal *jo; 414 int error; 415 416 TAILQ_FOREACH(jo, &mp->mnt_jlist, jentry) { 417 if (bcmp(jo->id, info->id, sizeof(jo->id)) == 0) 418 break; 419 } 420 if (jo) 421 error = journal_destroy(mp, jo, info->flags); 422 else 423 error = EINVAL; 424 return (error); 425 } 426 427 /* 428 * Remove all journals associated with a mount point. Usually called 429 * by the umount code. 430 */ 431 void 432 journal_remove_all_journals(struct mount *mp, int flags) 433 { 434 struct journal *jo; 435 436 while ((jo = TAILQ_FIRST(&mp->mnt_jlist)) != NULL) { 437 journal_destroy(mp, jo, flags); 438 } 439 } 440 441 static int 442 journal_destroy(struct mount *mp, struct journal *jo, int flags) 443 { 444 struct jrecord jrec; 445 446 TAILQ_REMOVE(&mp->mnt_jlist, jo, jentry); 447 448 jrecord_init(jo, &jrec, JREC_STREAMID_DISCONT); 449 jrecord_write(&jrec, JTYPE_DISASSOCIATE, 0); 450 jrecord_done(&jrec, 0); 451 452 journal_destroy_threads(jo, flags); 453 454 if (jo->fp) 455 fdrop(jo->fp); 456 if (jo->fifo.membase) 457 free(jo->fifo.membase, M_JFIFO); 458 free(jo, M_JOURNAL); 459 460 return(0); 461 } 462 463 static int 464 journal_resync_vfs_journal(struct mount *mp, const void *ctl) 465 { 466 return(EINVAL); 467 } 468 469 static int 470 journal_status_vfs_journal(struct mount *mp, 471 const struct mountctl_status_journal *info, 472 struct mountctl_journal_ret_status *rstat, 473 int buflen, int *res) 474 { 475 struct journal *jo; 476 int error = 0; 477 int index; 478 479 index = 0; 480 *res = 0; 481 TAILQ_FOREACH(jo, &mp->mnt_jlist, jentry) { 482 if (info->index == MC_JOURNAL_INDEX_ID) { 483 if (bcmp(jo->id, info->id, sizeof(jo->id)) != 0) 484 continue; 485 } else if (info->index >= 0) { 486 if (info->index < index) 487 continue; 488 } else if (info->index != MC_JOURNAL_INDEX_ALL) { 489 continue; 490 } 491 if (buflen < sizeof(*rstat)) { 492 if (*res) 493 rstat[-1].flags |= MC_JOURNAL_STATUS_MORETOCOME; 494 else 495 error = EINVAL; 496 break; 497 } 498 bzero(rstat, sizeof(*rstat)); 499 rstat->recsize = sizeof(*rstat); 500 bcopy(jo->id, rstat->id, sizeof(jo->id)); 501 rstat->index = index; 502 rstat->membufsize = jo->fifo.size; 503 rstat->membufused = jo->fifo.windex - jo->fifo.xindex; 504 rstat->membufunacked = jo->fifo.rindex - jo->fifo.xindex; 505 rstat->bytessent = jo->total_acked; 506 rstat->fifostalls = jo->fifostalls; 507 ++rstat; 508 ++index; 509 *res += sizeof(*rstat); 510 buflen -= sizeof(*rstat); 511 } 512 return(error); 513 } 514 515 /************************************************************************ 516 * PARALLEL TRANSACTION SUPPORT ROUTINES * 517 ************************************************************************ 518 * 519 * JRECLIST_*() - routines which create and iterate over jrecord structures, 520 * because a mount point may have multiple attached journals. 521 */ 522 523 /* 524 * Initialize the passed jrecord_list and create a jrecord for each 525 * journal we need to write to. Unnecessary mallocs are avoided by 526 * using the passed jrecord structure as the first jrecord in the list. 527 * A starting transaction is pushed for each jrecord. 528 * 529 * Returns non-zero if any of the journals require undo records. 530 */ 531 static 532 int 533 jreclist_init(struct mount *mp, struct jrecord_list *jreclist, 534 struct jrecord *jreccache, int16_t rectype) 535 { 536 struct journal *jo; 537 struct jrecord *jrec; 538 int wantrev; 539 int count; 540 int16_t streamid; 541 542 TAILQ_INIT(&jreclist->list); 543 544 /* 545 * Select the stream ID to use for the transaction. We must select 546 * a stream ID that is not currently in use by some other parallel 547 * transaction. 548 * 549 * Don't bother calculating the next streamid when reassigning 550 * mnt_streamid, since parallel transactions are fairly rare. This 551 * also allows someone observing the raw records to clearly see 552 * when parallel transactions occur. 553 */ 554 streamid = mp->mnt_streamid; 555 count = 0; 556 while (mp->mnt_jbitmap[streamid >> 3] & (1 << (streamid & 7))) { 557 if (++streamid == JREC_STREAMID_JMAX) 558 streamid = JREC_STREAMID_JMIN; 559 if (++count == JREC_STREAMID_JMAX - JREC_STREAMID_JMIN) { 560 printf("jreclist_init: all streamid's in use! sleeping\n"); 561 tsleep(jreclist, 0, "jsidfl", hz * 10); 562 count = 0; 563 } 564 } 565 mp->mnt_jbitmap[streamid >> 3] |= 1 << (streamid & 7); 566 mp->mnt_streamid = streamid; 567 jreclist->streamid = streamid; 568 569 /* 570 * Now initialize a stream on each journal. 571 */ 572 count = 0; 573 wantrev = 0; 574 TAILQ_FOREACH(jo, &mp->mnt_jlist, jentry) { 575 if (count == 0) 576 jrec = jreccache; 577 else 578 jrec = malloc(sizeof(*jrec), M_JOURNAL, M_WAITOK); 579 jrecord_init(jo, jrec, streamid); 580 jrec->user_save = jrecord_push(jrec, rectype); 581 TAILQ_INSERT_TAIL(&jreclist->list, jrec, user_entry); 582 if (jo->flags & MC_JOURNAL_WANT_REVERSABLE) 583 wantrev = 1; 584 ++count; 585 } 586 return(wantrev); 587 } 588 589 /* 590 * Terminate the journaled transactions started by jreclist_init(). If 591 * an error occured, the transaction records will be aborted. 592 */ 593 static 594 void 595 jreclist_done(struct mount *mp, struct jrecord_list *jreclist, int error) 596 { 597 struct jrecord *jrec; 598 int count; 599 600 /* 601 * Cleanup the jrecord state on each journal. 602 */ 603 TAILQ_FOREACH(jrec, &jreclist->list, user_entry) { 604 jrecord_pop(jrec, jrec->user_save); 605 jrecord_done(jrec, error); 606 } 607 608 /* 609 * Free allocated jrec's (the first is always supplied) 610 */ 611 count = 0; 612 while ((jrec = TAILQ_FIRST(&jreclist->list)) != NULL) { 613 TAILQ_REMOVE(&jreclist->list, jrec, user_entry); 614 if (count) 615 free(jrec, M_JOURNAL); 616 ++count; 617 } 618 619 /* 620 * Clear the streamid so it can be reused. 621 */ 622 mp->mnt_jbitmap[jreclist->streamid >> 3] &= ~(1 << (jreclist->streamid & 7)); 623 } 624 625 /* 626 * This procedure writes out UNDO records for available reversable 627 * journals. 628 * 629 * XXX could use improvement. There is no need to re-read the file 630 * for each journal. 631 */ 632 static 633 void 634 jreclist_undo_file(struct jrecord_list *jreclist, struct vnode *vp, 635 int jrflags, off_t off, off_t bytes) 636 { 637 struct jrecord *jrec; 638 int error; 639 640 error = 0; 641 if (jrflags & JRUNDO_GETVP) 642 error = vget(vp, LK_SHARED); 643 if (error == 0) { 644 TAILQ_FOREACH(jrec, &jreclist->list, user_entry) { 645 if (jrec->jo->flags & MC_JOURNAL_WANT_REVERSABLE) { 646 jrecord_undo_file(jrec, vp, jrflags, off, bytes); 647 } 648 } 649 } 650 if (error == 0 && jrflags & JRUNDO_GETVP) 651 vput(vp); 652 } 653 654 /************************************************************************ 655 * LOW LEVEL UNDO SUPPORT ROUTINE * 656 ************************************************************************ 657 * 658 * This function is used to support UNDO records. It will generate an 659 * appropriate record with the requested portion of the file data. Note 660 * that file data is only recorded if JRUNDO_FILEDATA is passed. If bytes 661 * is -1, it will be set to the size of the file. 662 */ 663 static void 664 jrecord_undo_file(struct jrecord *jrec, struct vnode *vp, int jrflags, 665 off_t off, off_t bytes) 666 { 667 struct vattr attr; 668 void *save1; /* warning, save pointers do not always remain valid */ 669 void *save2; 670 int error; 671 672 /* 673 * Setup. Start the UNDO record, obtain a shared lock on the vnode, 674 * and retrieve attribute info. 675 */ 676 save1 = jrecord_push(jrec, JTYPE_UNDO); 677 error = VOP_GETATTR(vp, &attr); 678 if (error) 679 goto done; 680 681 /* 682 * Generate UNDO records as requested. 683 */ 684 if (jrflags & JRUNDO_VATTR) { 685 save2 = jrecord_push(jrec, JTYPE_VATTR); 686 jrecord_leaf(jrec, JLEAF_VTYPE, &attr.va_type, sizeof(attr.va_type)); 687 if ((jrflags & JRUNDO_NLINK) && attr.va_nlink != VNOVAL) 688 jrecord_leaf(jrec, JLEAF_NLINK, &attr.va_nlink, sizeof(attr.va_nlink)); 689 if ((jrflags & JRUNDO_SIZE) && attr.va_size != VNOVAL) 690 jrecord_leaf(jrec, JLEAF_SIZE, &attr.va_size, sizeof(attr.va_size)); 691 if ((jrflags & JRUNDO_UID) && attr.va_uid != VNOVAL) 692 jrecord_leaf(jrec, JLEAF_UID, &attr.va_uid, sizeof(attr.va_uid)); 693 if ((jrflags & JRUNDO_GID) && attr.va_gid != VNOVAL) 694 jrecord_leaf(jrec, JLEAF_GID, &attr.va_gid, sizeof(attr.va_gid)); 695 if ((jrflags & JRUNDO_FSID) && attr.va_fsid != VNOVAL) 696 jrecord_leaf(jrec, JLEAF_FSID, &attr.va_fsid, sizeof(attr.va_fsid)); 697 if ((jrflags & JRUNDO_MODES) && attr.va_mode != (mode_t)VNOVAL) 698 jrecord_leaf(jrec, JLEAF_MODES, &attr.va_mode, sizeof(attr.va_mode)); 699 if ((jrflags & JRUNDO_INUM) && attr.va_fileid != VNOVAL) 700 jrecord_leaf(jrec, JLEAF_INUM, &attr.va_fileid, sizeof(attr.va_fileid)); 701 if ((jrflags & JRUNDO_ATIME) && attr.va_atime.tv_sec != VNOVAL) 702 jrecord_leaf(jrec, JLEAF_ATIME, &attr.va_atime, sizeof(attr.va_atime)); 703 if ((jrflags & JRUNDO_MTIME) && attr.va_mtime.tv_sec != VNOVAL) 704 jrecord_leaf(jrec, JLEAF_MTIME, &attr.va_mtime, sizeof(attr.va_mtime)); 705 if ((jrflags & JRUNDO_CTIME) && attr.va_ctime.tv_sec != VNOVAL) 706 jrecord_leaf(jrec, JLEAF_CTIME, &attr.va_ctime, sizeof(attr.va_ctime)); 707 if ((jrflags & JRUNDO_GEN) && attr.va_gen != VNOVAL) 708 jrecord_leaf(jrec, JLEAF_GEN, &attr.va_gen, sizeof(attr.va_gen)); 709 if ((jrflags & JRUNDO_FLAGS) && attr.va_flags != VNOVAL) 710 jrecord_leaf(jrec, JLEAF_FLAGS, &attr.va_flags, sizeof(attr.va_flags)); 711 if ((jrflags & JRUNDO_UDEV) && attr.va_rdev != VNOVAL) 712 jrecord_leaf(jrec, JLEAF_UDEV, &attr.va_rdev, sizeof(attr.va_rdev)); 713 jrecord_pop(jrec, save2); 714 } 715 716 /* 717 * Output the file data being overwritten by reading the file and 718 * writing it out to the journal prior to the write operation. We 719 * do not need to write out data past the current file EOF. 720 * 721 * XXX support JRUNDO_CONDLINK - do not write out file data for files 722 * with a link count > 1. The undo code needs to locate the inode and 723 * regenerate the hardlink. 724 */ 725 if ((jrflags & JRUNDO_FILEDATA) && attr.va_type == VREG) { 726 if (attr.va_size != VNOVAL) { 727 if (bytes == -1) 728 bytes = attr.va_size - off; 729 if (off + bytes > attr.va_size) 730 bytes = attr.va_size - off; 731 if (bytes > 0) 732 jrecord_file_data(jrec, vp, off, bytes); 733 } else { 734 error = EINVAL; 735 } 736 } 737 if ((jrflags & JRUNDO_FILEDATA) && attr.va_type == VLNK) { 738 struct iovec aiov; 739 struct uio auio; 740 char *buf; 741 742 buf = malloc(PATH_MAX, M_JOURNAL, M_WAITOK); 743 aiov.iov_base = buf; 744 aiov.iov_len = PATH_MAX; 745 auio.uio_iov = &aiov; 746 auio.uio_iovcnt = 1; 747 auio.uio_offset = 0; 748 auio.uio_rw = UIO_READ; 749 auio.uio_segflg = UIO_SYSSPACE; 750 auio.uio_td = curthread; 751 auio.uio_resid = PATH_MAX; 752 error = VOP_READLINK(vp, &auio, proc0.p_ucred); 753 if (error == 0) { 754 jrecord_leaf(jrec, JLEAF_SYMLINKDATA, buf, 755 PATH_MAX - auio.uio_resid); 756 } 757 free(buf, M_JOURNAL); 758 } 759 done: 760 if (error) 761 jrecord_leaf(jrec, JLEAF_ERROR, &error, sizeof(error)); 762 jrecord_pop(jrec, save1); 763 } 764 765 /************************************************************************ 766 * JOURNAL VNOPS * 767 ************************************************************************ 768 * 769 * These are function shims replacing the normal filesystem ops. We become 770 * responsible for calling the underlying filesystem ops. We have the choice 771 * of executing the underlying op first and then generating the journal entry, 772 * or starting the journal entry, executing the underlying op, and then 773 * either completing or aborting it. 774 * 775 * The journal is supposed to be a high-level entity, which generally means 776 * identifying files by name rather then by inode. Supplying both allows 777 * the journal to be used both for inode-number-compatible 'mirrors' and 778 * for simple filesystem replication. 779 * 780 * Writes are particularly difficult to deal with because a single write may 781 * represent a hundred megabyte buffer or more, and both writes and truncations 782 * require the 'old' data to be written out as well as the new data if the 783 * log is reversable. Other issues: 784 * 785 * - How to deal with operations on unlinked files (no path available), 786 * but which may still be filesystem visible due to hard links. 787 * 788 * - How to deal with modifications made via a memory map. 789 * 790 * - Future cache coherency support will require cache coherency API calls 791 * both prior to and after the call to the underlying VFS. 792 * 793 * ALSO NOTE: We do not have to shim compatibility VOPs like MKDIR which have 794 * new VFS equivalents (NMKDIR). 795 */ 796 797 /* 798 * Journal vop_settattr { a_vp, a_vap, a_cred, a_td } 799 */ 800 static 801 int 802 journal_setattr(struct vop_setattr_args *ap) 803 { 804 struct jrecord_list jreclist; 805 struct jrecord jreccache; 806 struct jrecord *jrec; 807 struct mount *mp; 808 void *save; 809 int error; 810 811 mp = ap->a_head.a_ops->vv_mount; 812 if (jreclist_init(mp, &jreclist, &jreccache, JTYPE_SETATTR)) { 813 jreclist_undo_file(&jreclist, ap->a_vp, JRUNDO_VATTR, 0, 0); 814 } 815 error = vop_journal_operate_ap(&ap->a_head); 816 if (error == 0) { 817 TAILQ_FOREACH(jrec, &jreclist.list, user_entry) { 818 jrecord_write_cred(jrec, curthread, ap->a_cred); 819 jrecord_write_vnode_ref(jrec, ap->a_vp); 820 save = jrecord_push(jrec, JTYPE_REDO); 821 jrecord_write_vattr(jrec, ap->a_vap); 822 jrecord_pop(jrec, save); 823 } 824 } 825 jreclist_done(mp, &jreclist, error); 826 return (error); 827 } 828 829 /* 830 * Journal vop_write { a_vp, a_uio, a_ioflag, a_cred } 831 */ 832 static 833 int 834 journal_write(struct vop_write_args *ap) 835 { 836 struct jrecord_list jreclist; 837 struct jrecord jreccache; 838 struct jrecord *jrec; 839 struct mount *mp; 840 struct uio uio_copy; 841 struct iovec uio_one_iovec; 842 void *save; 843 int error; 844 845 /* 846 * This is really nasty. UIO's don't retain sufficient information to 847 * be reusable once they've gone through the VOP chain. The iovecs get 848 * cleared, so we have to copy the UIO. 849 * 850 * XXX fix the UIO code to not destroy iov's during a scan so we can 851 * reuse the uio over and over again. 852 * 853 * XXX UNDO code needs to journal the old data prior to the write. 854 */ 855 uio_copy = *ap->a_uio; 856 if (uio_copy.uio_iovcnt == 1) { 857 uio_one_iovec = ap->a_uio->uio_iov[0]; 858 uio_copy.uio_iov = &uio_one_iovec; 859 } else { 860 uio_copy.uio_iov = malloc(uio_copy.uio_iovcnt * sizeof(struct iovec), 861 M_JOURNAL, M_WAITOK); 862 bcopy(ap->a_uio->uio_iov, uio_copy.uio_iov, 863 uio_copy.uio_iovcnt * sizeof(struct iovec)); 864 } 865 866 /* 867 * Write out undo data. Note that uio_offset is incorrect if 868 * IO_APPEND is set, but fortunately we have no undo file data to 869 * write out in that case. 870 */ 871 mp = ap->a_head.a_ops->vv_mount; 872 if (jreclist_init(mp, &jreclist, &jreccache, JTYPE_WRITE)) { 873 if (ap->a_ioflag & IO_APPEND) { 874 jreclist_undo_file(&jreclist, ap->a_vp, JRUNDO_SIZE|JRUNDO_MTIME, 0, 0); 875 } else { 876 jreclist_undo_file(&jreclist, ap->a_vp, 877 JRUNDO_FILEDATA|JRUNDO_SIZE|JRUNDO_MTIME, 878 uio_copy.uio_offset, uio_copy.uio_resid); 879 } 880 } 881 error = vop_journal_operate_ap(&ap->a_head); 882 883 /* 884 * XXX bad hack to figure out the offset for O_APPEND writes (note: 885 * uio field state after the VFS operation). 886 */ 887 uio_copy.uio_offset = ap->a_uio->uio_offset - 888 (uio_copy.uio_resid - ap->a_uio->uio_resid); 889 890 /* 891 * Output the write data to the journal. 892 */ 893 if (error == 0) { 894 TAILQ_FOREACH(jrec, &jreclist.list, user_entry) { 895 jrecord_write_cred(jrec, NULL, ap->a_cred); 896 jrecord_write_vnode_ref(jrec, ap->a_vp); 897 save = jrecord_push(jrec, JTYPE_REDO); 898 jrecord_write_uio(jrec, JLEAF_FILEDATA, &uio_copy); 899 jrecord_pop(jrec, save); 900 } 901 } 902 jreclist_done(mp, &jreclist, error); 903 904 if (uio_copy.uio_iov != &uio_one_iovec) 905 free(uio_copy.uio_iov, M_JOURNAL); 906 return (error); 907 } 908 909 /* 910 * Journal vop_fsync { a_vp, a_waitfor, a_td } 911 */ 912 static 913 int 914 journal_fsync(struct vop_fsync_args *ap) 915 { 916 #if 0 917 struct mount *mp; 918 struct journal *jo; 919 #endif 920 int error; 921 922 error = vop_journal_operate_ap(&ap->a_head); 923 #if 0 924 mp = ap->a_head.a_ops->vv_mount; 925 if (error == 0) { 926 TAILQ_FOREACH(jo, &mp->mnt_jlist, jentry) { 927 /* XXX synchronize pending journal records */ 928 } 929 } 930 #endif 931 return (error); 932 } 933 934 /* 935 * Journal vop_putpages { a_vp, a_m, a_count, a_sync, a_rtvals, a_offset } 936 * 937 * note: a_count is in bytes. 938 */ 939 static 940 int 941 journal_putpages(struct vop_putpages_args *ap) 942 { 943 struct jrecord_list jreclist; 944 struct jrecord jreccache; 945 struct jrecord *jrec; 946 struct mount *mp; 947 void *save; 948 int error; 949 950 mp = ap->a_head.a_ops->vv_mount; 951 if (jreclist_init(mp, &jreclist, &jreccache, JTYPE_PUTPAGES) && 952 ap->a_count > 0 953 ) { 954 jreclist_undo_file(&jreclist, ap->a_vp, 955 JRUNDO_FILEDATA|JRUNDO_SIZE|JRUNDO_MTIME, 956 ap->a_offset, btoc(ap->a_count)); 957 } 958 error = vop_journal_operate_ap(&ap->a_head); 959 if (error == 0 && ap->a_count > 0) { 960 TAILQ_FOREACH(jrec, &jreclist.list, user_entry) { 961 jrecord_write_vnode_ref(jrec, ap->a_vp); 962 save = jrecord_push(jrec, JTYPE_REDO); 963 jrecord_write_pagelist(jrec, JLEAF_FILEDATA, ap->a_m, ap->a_rtvals, 964 btoc(ap->a_count), ap->a_offset); 965 jrecord_pop(jrec, save); 966 } 967 } 968 jreclist_done(mp, &jreclist, error); 969 return (error); 970 } 971 972 /* 973 * Journal vop_setacl { a_vp, a_type, a_aclp, a_cred, a_td } 974 */ 975 static 976 int 977 journal_setacl(struct vop_setacl_args *ap) 978 { 979 struct jrecord_list jreclist; 980 struct jrecord jreccache; 981 struct jrecord *jrec; 982 struct mount *mp; 983 int error; 984 985 mp = ap->a_head.a_ops->vv_mount; 986 jreclist_init(mp, &jreclist, &jreccache, JTYPE_SETACL); 987 error = vop_journal_operate_ap(&ap->a_head); 988 if (error == 0) { 989 TAILQ_FOREACH(jrec, &jreclist.list, user_entry) { 990 #if 0 991 if ((jo->flags & MC_JOURNAL_WANT_REVERSABLE)) 992 jrecord_undo_file(jrec, ap->a_vp, JRUNDO_XXX, 0, 0); 993 #endif 994 jrecord_write_cred(jrec, curthread, ap->a_cred); 995 jrecord_write_vnode_ref(jrec, ap->a_vp); 996 #if 0 997 save = jrecord_push(jrec, JTYPE_REDO); 998 /* XXX type, aclp */ 999 jrecord_pop(jrec, save); 1000 #endif 1001 } 1002 } 1003 jreclist_done(mp, &jreclist, error); 1004 return (error); 1005 } 1006 1007 /* 1008 * Journal vop_setextattr { a_vp, a_name, a_uio, a_cred, a_td } 1009 */ 1010 static 1011 int 1012 journal_setextattr(struct vop_setextattr_args *ap) 1013 { 1014 struct jrecord_list jreclist; 1015 struct jrecord jreccache; 1016 struct jrecord *jrec; 1017 struct mount *mp; 1018 void *save; 1019 int error; 1020 1021 mp = ap->a_head.a_ops->vv_mount; 1022 jreclist_init(mp, &jreclist, &jreccache, JTYPE_SETEXTATTR); 1023 error = vop_journal_operate_ap(&ap->a_head); 1024 if (error == 0) { 1025 TAILQ_FOREACH(jrec, &jreclist.list, user_entry) { 1026 #if 0 1027 if ((jo->flags & MC_JOURNAL_WANT_REVERSABLE)) 1028 jrecord_undo_file(jrec, ap->a_vp, JRUNDO_XXX, 0, 0); 1029 #endif 1030 jrecord_write_cred(jrec, curthread, ap->a_cred); 1031 jrecord_write_vnode_ref(jrec, ap->a_vp); 1032 jrecord_leaf(jrec, JLEAF_ATTRNAME, ap->a_name, strlen(ap->a_name)); 1033 save = jrecord_push(jrec, JTYPE_REDO); 1034 jrecord_write_uio(jrec, JLEAF_FILEDATA, ap->a_uio); 1035 jrecord_pop(jrec, save); 1036 } 1037 } 1038 jreclist_done(mp, &jreclist, error); 1039 return (error); 1040 } 1041 1042 /* 1043 * Journal vop_ncreate { a_ncp, a_vpp, a_cred, a_vap } 1044 */ 1045 static 1046 int 1047 journal_ncreate(struct vop_ncreate_args *ap) 1048 { 1049 struct jrecord_list jreclist; 1050 struct jrecord jreccache; 1051 struct jrecord *jrec; 1052 struct mount *mp; 1053 void *save; 1054 int error; 1055 1056 mp = ap->a_head.a_ops->vv_mount; 1057 jreclist_init(mp, &jreclist, &jreccache, JTYPE_CREATE); 1058 error = vop_journal_operate_ap(&ap->a_head); 1059 if (error == 0) { 1060 TAILQ_FOREACH(jrec, &jreclist.list, user_entry) { 1061 jrecord_write_cred(jrec, NULL, ap->a_cred); 1062 jrecord_write_path(jrec, JLEAF_PATH1, ap->a_ncp); 1063 if (*ap->a_vpp) 1064 jrecord_write_vnode_ref(jrec, *ap->a_vpp); 1065 save = jrecord_push(jrec, JTYPE_REDO); 1066 jrecord_write_vattr(jrec, ap->a_vap); 1067 jrecord_pop(jrec, save); 1068 } 1069 } 1070 jreclist_done(mp, &jreclist, error); 1071 return (error); 1072 } 1073 1074 /* 1075 * Journal vop_nmknod { a_ncp, a_vpp, a_cred, a_vap } 1076 */ 1077 static 1078 int 1079 journal_nmknod(struct vop_nmknod_args *ap) 1080 { 1081 struct jrecord_list jreclist; 1082 struct jrecord jreccache; 1083 struct jrecord *jrec; 1084 struct mount *mp; 1085 void *save; 1086 int error; 1087 1088 mp = ap->a_head.a_ops->vv_mount; 1089 jreclist_init(mp, &jreclist, &jreccache, JTYPE_MKNOD); 1090 error = vop_journal_operate_ap(&ap->a_head); 1091 if (error == 0) { 1092 TAILQ_FOREACH(jrec, &jreclist.list, user_entry) { 1093 jrecord_write_cred(jrec, NULL, ap->a_cred); 1094 jrecord_write_path(jrec, JLEAF_PATH1, ap->a_ncp); 1095 save = jrecord_push(jrec, JTYPE_REDO); 1096 jrecord_write_vattr(jrec, ap->a_vap); 1097 jrecord_pop(jrec, save); 1098 if (*ap->a_vpp) 1099 jrecord_write_vnode_ref(jrec, *ap->a_vpp); 1100 } 1101 } 1102 jreclist_done(mp, &jreclist, error); 1103 return (error); 1104 } 1105 1106 /* 1107 * Journal vop_nlink { a_ncp, a_vp, a_cred } 1108 */ 1109 static 1110 int 1111 journal_nlink(struct vop_nlink_args *ap) 1112 { 1113 struct jrecord_list jreclist; 1114 struct jrecord jreccache; 1115 struct jrecord *jrec; 1116 struct mount *mp; 1117 void *save; 1118 int error; 1119 1120 mp = ap->a_head.a_ops->vv_mount; 1121 jreclist_init(mp, &jreclist, &jreccache, JTYPE_LINK); 1122 error = vop_journal_operate_ap(&ap->a_head); 1123 if (error == 0) { 1124 TAILQ_FOREACH(jrec, &jreclist.list, user_entry) { 1125 jrecord_write_cred(jrec, NULL, ap->a_cred); 1126 jrecord_write_path(jrec, JLEAF_PATH1, ap->a_ncp); 1127 /* XXX PATH to VP and inode number */ 1128 /* XXX this call may not record the correct path when 1129 * multiple paths are available */ 1130 save = jrecord_push(jrec, JTYPE_REDO); 1131 jrecord_write_vnode_link(jrec, ap->a_vp, ap->a_ncp); 1132 jrecord_pop(jrec, save); 1133 } 1134 } 1135 jreclist_done(mp, &jreclist, error); 1136 return (error); 1137 } 1138 1139 /* 1140 * Journal vop_symlink { a_ncp, a_vpp, a_cred, a_vap, a_target } 1141 */ 1142 static 1143 int 1144 journal_nsymlink(struct vop_nsymlink_args *ap) 1145 { 1146 struct jrecord_list jreclist; 1147 struct jrecord jreccache; 1148 struct jrecord *jrec; 1149 struct mount *mp; 1150 void *save; 1151 int error; 1152 1153 mp = ap->a_head.a_ops->vv_mount; 1154 jreclist_init(mp, &jreclist, &jreccache, JTYPE_SYMLINK); 1155 error = vop_journal_operate_ap(&ap->a_head); 1156 if (error == 0) { 1157 TAILQ_FOREACH(jrec, &jreclist.list, user_entry) { 1158 jrecord_write_cred(jrec, NULL, ap->a_cred); 1159 jrecord_write_path(jrec, JLEAF_PATH1, ap->a_ncp); 1160 save = jrecord_push(jrec, JTYPE_REDO); 1161 jrecord_leaf(jrec, JLEAF_SYMLINKDATA, 1162 ap->a_target, strlen(ap->a_target)); 1163 jrecord_pop(jrec, save); 1164 if (*ap->a_vpp) 1165 jrecord_write_vnode_ref(jrec, *ap->a_vpp); 1166 } 1167 } 1168 jreclist_done(mp, &jreclist, error); 1169 return (error); 1170 } 1171 1172 /* 1173 * Journal vop_nwhiteout { a_ncp, a_cred, a_flags } 1174 */ 1175 static 1176 int 1177 journal_nwhiteout(struct vop_nwhiteout_args *ap) 1178 { 1179 struct jrecord_list jreclist; 1180 struct jrecord jreccache; 1181 struct jrecord *jrec; 1182 struct mount *mp; 1183 int error; 1184 1185 mp = ap->a_head.a_ops->vv_mount; 1186 jreclist_init(mp, &jreclist, &jreccache, JTYPE_WHITEOUT); 1187 error = vop_journal_operate_ap(&ap->a_head); 1188 if (error == 0) { 1189 TAILQ_FOREACH(jrec, &jreclist.list, user_entry) { 1190 jrecord_write_cred(jrec, NULL, ap->a_cred); 1191 jrecord_write_path(jrec, JLEAF_PATH1, ap->a_ncp); 1192 } 1193 } 1194 jreclist_done(mp, &jreclist, error); 1195 return (error); 1196 } 1197 1198 /* 1199 * Journal vop_nremove { a_ncp, a_cred } 1200 */ 1201 static 1202 int 1203 journal_nremove(struct vop_nremove_args *ap) 1204 { 1205 struct jrecord_list jreclist; 1206 struct jrecord jreccache; 1207 struct jrecord *jrec; 1208 struct mount *mp; 1209 int error; 1210 1211 mp = ap->a_head.a_ops->vv_mount; 1212 if (jreclist_init(mp, &jreclist, &jreccache, JTYPE_REMOVE) && 1213 ap->a_ncp->nc_vp 1214 ) { 1215 jreclist_undo_file(&jreclist, ap->a_ncp->nc_vp, 1216 JRUNDO_ALL|JRUNDO_GETVP|JRUNDO_CONDLINK, 0, -1); 1217 } 1218 error = vop_journal_operate_ap(&ap->a_head); 1219 if (error == 0) { 1220 TAILQ_FOREACH(jrec, &jreclist.list, user_entry) { 1221 jrecord_write_cred(jrec, NULL, ap->a_cred); 1222 jrecord_write_path(jrec, JLEAF_PATH1, ap->a_ncp); 1223 } 1224 } 1225 jreclist_done(mp, &jreclist, error); 1226 return (error); 1227 } 1228 1229 /* 1230 * Journal vop_nmkdir { a_ncp, a_vpp, a_cred, a_vap } 1231 */ 1232 static 1233 int 1234 journal_nmkdir(struct vop_nmkdir_args *ap) 1235 { 1236 struct jrecord_list jreclist; 1237 struct jrecord jreccache; 1238 struct jrecord *jrec; 1239 struct mount *mp; 1240 int error; 1241 1242 mp = ap->a_head.a_ops->vv_mount; 1243 jreclist_init(mp, &jreclist, &jreccache, JTYPE_MKDIR); 1244 error = vop_journal_operate_ap(&ap->a_head); 1245 if (error == 0) { 1246 TAILQ_FOREACH(jrec, &jreclist.list, user_entry) { 1247 #if 0 1248 if (jo->flags & MC_JOURNAL_WANT_AUDIT) { 1249 jrecord_write_audit(jrec); 1250 } 1251 #endif 1252 jrecord_write_path(jrec, JLEAF_PATH1, ap->a_ncp); 1253 jrecord_write_cred(jrec, NULL, ap->a_cred); 1254 jrecord_write_vattr(jrec, ap->a_vap); 1255 jrecord_write_path(jrec, JLEAF_PATH1, ap->a_ncp); 1256 if (*ap->a_vpp) 1257 jrecord_write_vnode_ref(jrec, *ap->a_vpp); 1258 } 1259 } 1260 jreclist_done(mp, &jreclist, error); 1261 return (error); 1262 } 1263 1264 /* 1265 * Journal vop_nrmdir { a_ncp, a_cred } 1266 */ 1267 static 1268 int 1269 journal_nrmdir(struct vop_nrmdir_args *ap) 1270 { 1271 struct jrecord_list jreclist; 1272 struct jrecord jreccache; 1273 struct jrecord *jrec; 1274 struct mount *mp; 1275 int error; 1276 1277 mp = ap->a_head.a_ops->vv_mount; 1278 if (jreclist_init(mp, &jreclist, &jreccache, JTYPE_RMDIR)) { 1279 jreclist_undo_file(&jreclist, ap->a_ncp->nc_vp, 1280 JRUNDO_VATTR|JRUNDO_GETVP, 0, 0); 1281 } 1282 error = vop_journal_operate_ap(&ap->a_head); 1283 if (error == 0) { 1284 TAILQ_FOREACH(jrec, &jreclist.list, user_entry) { 1285 jrecord_write_cred(jrec, NULL, ap->a_cred); 1286 jrecord_write_path(jrec, JLEAF_PATH1, ap->a_ncp); 1287 } 1288 } 1289 jreclist_done(mp, &jreclist, error); 1290 return (error); 1291 } 1292 1293 /* 1294 * Journal vop_nrename { a_fncp, a_tncp, a_cred } 1295 */ 1296 static 1297 int 1298 journal_nrename(struct vop_nrename_args *ap) 1299 { 1300 struct jrecord_list jreclist; 1301 struct jrecord jreccache; 1302 struct jrecord *jrec; 1303 struct mount *mp; 1304 int error; 1305 1306 mp = ap->a_head.a_ops->vv_mount; 1307 if (jreclist_init(mp, &jreclist, &jreccache, JTYPE_RENAME) && 1308 ap->a_tncp->nc_vp 1309 ) { 1310 jreclist_undo_file(&jreclist, ap->a_tncp->nc_vp, 1311 JRUNDO_ALL|JRUNDO_GETVP|JRUNDO_CONDLINK, 0, -1); 1312 } 1313 error = vop_journal_operate_ap(&ap->a_head); 1314 if (error == 0) { 1315 TAILQ_FOREACH(jrec, &jreclist.list, user_entry) { 1316 jrecord_write_cred(jrec, NULL, ap->a_cred); 1317 jrecord_write_path(jrec, JLEAF_PATH1, ap->a_fncp); 1318 jrecord_write_path(jrec, JLEAF_PATH2, ap->a_tncp); 1319 } 1320 } 1321 jreclist_done(mp, &jreclist, error); 1322 return (error); 1323 } 1324 1325