xref: /dragonfly/sys/kern/vfs_jops.c (revision 685c703c)
1 /*
2  * Copyright (c) 2004-2006 The DragonFly Project.  All rights reserved.
3  *
4  * This code is derived from software contributed to The DragonFly Project
5  * by Matthew Dillon <dillon@backplane.com>
6  *
7  * Redistribution and use in source and binary forms, with or without
8  * modification, are permitted provided that the following conditions
9  * are met:
10  *
11  * 1. Redistributions of source code must retain the above copyright
12  *    notice, this list of conditions and the following disclaimer.
13  * 2. Redistributions in binary form must reproduce the above copyright
14  *    notice, this list of conditions and the following disclaimer in
15  *    the documentation and/or other materials provided with the
16  *    distribution.
17  * 3. Neither the name of The DragonFly Project nor the names of its
18  *    contributors may be used to endorse or promote products derived
19  *    from this software without specific, prior written permission.
20  *
21  * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
22  * ``AS IS'' AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
23  * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS
24  * FOR A PARTICULAR PURPOSE ARE DISCLAIMED.  IN NO EVENT SHALL THE
25  * COPYRIGHT HOLDERS OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT,
26  * INCIDENTAL, SPECIAL, EXEMPLARY OR CONSEQUENTIAL DAMAGES (INCLUDING,
27  * BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
28  * LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED
29  * AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
30  * OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT
31  * OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
32  * SUCH DAMAGE.
33  *
34  * $DragonFly: src/sys/kern/vfs_jops.c,v 1.28 2006/07/18 22:22:12 dillon Exp $
35  */
36 /*
37  * Each mount point may have zero or more independantly configured journals
38  * attached to it.  Each journal is represented by a memory FIFO and worker
39  * thread.  Journal events are streamed through the FIFO to the thread,
40  * batched up (typically on one-second intervals), and written out by the
41  * thread.
42  *
43  * Journal vnode ops are executed instead of mnt_vn_norm_ops when one or
44  * more journals have been installed on a mount point.  It becomes the
45  * responsibility of the journal op to call the underlying normal op as
46  * appropriate.
47  */
48 #include <sys/param.h>
49 #include <sys/systm.h>
50 #include <sys/buf.h>
51 #include <sys/conf.h>
52 #include <sys/kernel.h>
53 #include <sys/queue.h>
54 #include <sys/lock.h>
55 #include <sys/malloc.h>
56 #include <sys/mount.h>
57 #include <sys/unistd.h>
58 #include <sys/vnode.h>
59 #include <sys/poll.h>
60 #include <sys/mountctl.h>
61 #include <sys/journal.h>
62 #include <sys/file.h>
63 #include <sys/proc.h>
64 #include <sys/msfbuf.h>
65 #include <sys/socket.h>
66 #include <sys/socketvar.h>
67 
68 #include <machine/limits.h>
69 
70 #include <vm/vm.h>
71 #include <vm/vm_object.h>
72 #include <vm/vm_page.h>
73 #include <vm/vm_pager.h>
74 #include <vm/vnode_pager.h>
75 
76 #include <sys/file2.h>
77 #include <sys/thread2.h>
78 
79 static int journal_attach(struct mount *mp);
80 static void journal_detach(struct mount *mp);
81 static int journal_install_vfs_journal(struct mount *mp, struct file *fp,
82 			    const struct mountctl_install_journal *info);
83 static int journal_restart_vfs_journal(struct mount *mp, struct file *fp,
84 			    const struct mountctl_restart_journal *info);
85 static int journal_remove_vfs_journal(struct mount *mp,
86 			    const struct mountctl_remove_journal *info);
87 static int journal_restart(struct mount *mp, struct file *fp,
88 			    struct journal *jo, int flags);
89 static int journal_destroy(struct mount *mp, struct journal *jo, int flags);
90 static int journal_resync_vfs_journal(struct mount *mp, const void *ctl);
91 static int journal_status_vfs_journal(struct mount *mp,
92 		       const struct mountctl_status_journal *info,
93 		       struct mountctl_journal_ret_status *rstat,
94 		       int buflen, int *res);
95 
96 static void jrecord_undo_file(struct jrecord *jrec, struct vnode *vp,
97 			     int jrflags, off_t off, off_t bytes);
98 
99 static int journal_setattr(struct vop_setattr_args *ap);
100 static int journal_write(struct vop_write_args *ap);
101 static int journal_fsync(struct vop_fsync_args *ap);
102 static int journal_putpages(struct vop_putpages_args *ap);
103 static int journal_setacl(struct vop_setacl_args *ap);
104 static int journal_setextattr(struct vop_setextattr_args *ap);
105 static int journal_ncreate(struct vop_ncreate_args *ap);
106 static int journal_nmknod(struct vop_nmknod_args *ap);
107 static int journal_nlink(struct vop_nlink_args *ap);
108 static int journal_nsymlink(struct vop_nsymlink_args *ap);
109 static int journal_nwhiteout(struct vop_nwhiteout_args *ap);
110 static int journal_nremove(struct vop_nremove_args *ap);
111 static int journal_nmkdir(struct vop_nmkdir_args *ap);
112 static int journal_nrmdir(struct vop_nrmdir_args *ap);
113 static int journal_nrename(struct vop_nrename_args *ap);
114 
115 #define JRUNDO_SIZE	0x00000001
116 #define JRUNDO_UID	0x00000002
117 #define JRUNDO_GID	0x00000004
118 #define JRUNDO_FSID	0x00000008
119 #define JRUNDO_MODES	0x00000010
120 #define JRUNDO_INUM	0x00000020
121 #define JRUNDO_ATIME	0x00000040
122 #define JRUNDO_MTIME	0x00000080
123 #define JRUNDO_CTIME	0x00000100
124 #define JRUNDO_GEN	0x00000200
125 #define JRUNDO_FLAGS	0x00000400
126 #define JRUNDO_UDEV	0x00000800
127 #define JRUNDO_NLINK	0x00001000
128 #define JRUNDO_FILEDATA	0x00010000
129 #define JRUNDO_GETVP	0x00020000
130 #define JRUNDO_CONDLINK	0x00040000	/* write file data if link count 1 */
131 #define JRUNDO_VATTR	(JRUNDO_SIZE|JRUNDO_UID|JRUNDO_GID|JRUNDO_FSID|\
132 			 JRUNDO_MODES|JRUNDO_INUM|JRUNDO_ATIME|JRUNDO_MTIME|\
133 			 JRUNDO_CTIME|JRUNDO_GEN|JRUNDO_FLAGS|JRUNDO_UDEV|\
134 			 JRUNDO_NLINK)
135 #define JRUNDO_ALL	(JRUNDO_VATTR|JRUNDO_FILEDATA)
136 
137 static struct vop_ops journal_vnode_vops = {
138     .vop_default =	vop_journal_operate_ap,
139     .vop_mountctl =	journal_mountctl,
140     .vop_setattr =	journal_setattr,
141     .vop_write =	journal_write,
142     .vop_fsync =	journal_fsync,
143     .vop_putpages =	journal_putpages,
144     .vop_setacl =	journal_setacl,
145     .vop_setextattr =	journal_setextattr,
146     .vop_ncreate =	journal_ncreate,
147     .vop_nmknod =	journal_nmknod,
148     .vop_nlink =	journal_nlink,
149     .vop_nsymlink =	journal_nsymlink,
150     .vop_nwhiteout =	journal_nwhiteout,
151     .vop_nremove =	journal_nremove,
152     .vop_nmkdir =	journal_nmkdir,
153     .vop_nrmdir =	journal_nrmdir,
154     .vop_nrename =	journal_nrename
155 };
156 
157 static MALLOC_DEFINE(M_JOURNAL, "journal", "Journaling structures");
158 static MALLOC_DEFINE(M_JFIFO, "journal-fifo", "Journal FIFO");
159 
160 int
161 journal_mountctl(struct vop_mountctl_args *ap)
162 {
163     struct mount *mp;
164     int error = 0;
165 
166     mp = ap->a_head.a_ops->head.vv_mount;
167     KKASSERT(mp);
168 
169     if (mp->mnt_vn_journal_ops == NULL) {
170 	switch(ap->a_op) {
171 	case MOUNTCTL_INSTALL_VFS_JOURNAL:
172 	    error = journal_attach(mp);
173 	    if (error == 0 && ap->a_ctllen != sizeof(struct mountctl_install_journal))
174 		error = EINVAL;
175 	    if (error == 0 && ap->a_fp == NULL)
176 		error = EBADF;
177 	    if (error == 0)
178 		error = journal_install_vfs_journal(mp, ap->a_fp, ap->a_ctl);
179 	    if (TAILQ_EMPTY(&mp->mnt_jlist))
180 		journal_detach(mp);
181 	    break;
182 	case MOUNTCTL_RESTART_VFS_JOURNAL:
183 	case MOUNTCTL_REMOVE_VFS_JOURNAL:
184 	case MOUNTCTL_RESYNC_VFS_JOURNAL:
185 	case MOUNTCTL_STATUS_VFS_JOURNAL:
186 	    error = ENOENT;
187 	    break;
188 	default:
189 	    error = EOPNOTSUPP;
190 	    break;
191 	}
192     } else {
193 	switch(ap->a_op) {
194 	case MOUNTCTL_INSTALL_VFS_JOURNAL:
195 	    if (ap->a_ctllen != sizeof(struct mountctl_install_journal))
196 		error = EINVAL;
197 	    if (error == 0 && ap->a_fp == NULL)
198 		error = EBADF;
199 	    if (error == 0)
200 		error = journal_install_vfs_journal(mp, ap->a_fp, ap->a_ctl);
201 	    break;
202 	case MOUNTCTL_RESTART_VFS_JOURNAL:
203 	    if (ap->a_ctllen != sizeof(struct mountctl_restart_journal))
204 		error = EINVAL;
205 	    if (error == 0 && ap->a_fp == NULL)
206 		error = EBADF;
207 	    if (error == 0)
208 		error = journal_restart_vfs_journal(mp, ap->a_fp, ap->a_ctl);
209 	    break;
210 	case MOUNTCTL_REMOVE_VFS_JOURNAL:
211 	    if (ap->a_ctllen != sizeof(struct mountctl_remove_journal))
212 		error = EINVAL;
213 	    if (error == 0)
214 		error = journal_remove_vfs_journal(mp, ap->a_ctl);
215 	    if (TAILQ_EMPTY(&mp->mnt_jlist))
216 		journal_detach(mp);
217 	    break;
218 	case MOUNTCTL_RESYNC_VFS_JOURNAL:
219 	    if (ap->a_ctllen != 0)
220 		error = EINVAL;
221 	    error = journal_resync_vfs_journal(mp, ap->a_ctl);
222 	    break;
223 	case MOUNTCTL_STATUS_VFS_JOURNAL:
224 	    if (ap->a_ctllen != sizeof(struct mountctl_status_journal))
225 		error = EINVAL;
226 	    if (error == 0) {
227 		error = journal_status_vfs_journal(mp, ap->a_ctl,
228 					ap->a_buf, ap->a_buflen, ap->a_res);
229 	    }
230 	    break;
231 	default:
232 	    error = EOPNOTSUPP;
233 	    break;
234 	}
235     }
236     return (error);
237 }
238 
239 /*
240  * High level mount point setup.  When a
241  */
242 static int
243 journal_attach(struct mount *mp)
244 {
245     KKASSERT(mp->mnt_jbitmap == NULL);
246     vfs_add_vnodeops(mp, &journal_vnode_vops, &mp->mnt_vn_journal_ops);
247     mp->mnt_jbitmap = malloc(JREC_STREAMID_JMAX/8, M_JOURNAL, M_WAITOK|M_ZERO);
248     mp->mnt_streamid = JREC_STREAMID_JMIN;
249     return(0);
250 }
251 
252 static void
253 journal_detach(struct mount *mp)
254 {
255     KKASSERT(mp->mnt_jbitmap != NULL);
256     if (mp->mnt_vn_journal_ops)
257 	vfs_rm_vnodeops(mp, &journal_vnode_vops, &mp->mnt_vn_journal_ops);
258     free(mp->mnt_jbitmap, M_JOURNAL);
259     mp->mnt_jbitmap = NULL;
260 }
261 
262 /*
263  * Install a journal on a mount point.  Each journal has an associated worker
264  * thread which is responsible for buffering and spooling the data to the
265  * target.  A mount point may have multiple journals attached to it.  An
266  * initial start record is generated when the journal is associated.
267  */
268 static int
269 journal_install_vfs_journal(struct mount *mp, struct file *fp,
270 			    const struct mountctl_install_journal *info)
271 {
272     struct journal *jo;
273     struct jrecord jrec;
274     int error = 0;
275     int size;
276 
277     jo = malloc(sizeof(struct journal), M_JOURNAL, M_WAITOK|M_ZERO);
278     bcopy(info->id, jo->id, sizeof(jo->id));
279     jo->flags = info->flags & ~(MC_JOURNAL_WACTIVE | MC_JOURNAL_RACTIVE |
280 				MC_JOURNAL_STOP_REQ);
281 
282     /*
283      * Memory FIFO size, round to nearest power of 2
284      */
285     if (info->membufsize) {
286 	if (info->membufsize < 65536)
287 	    size = 65536;
288 	else if (info->membufsize > 128 * 1024 * 1024)
289 	    size = 128 * 1024 * 1024;
290 	else
291 	    size = (int)info->membufsize;
292     } else {
293 	size = 1024 * 1024;
294     }
295     jo->fifo.size = 1;
296     while (jo->fifo.size < size)
297 	jo->fifo.size <<= 1;
298 
299     /*
300      * Other parameters.  If not specified the starting transaction id
301      * will be the current date.
302      */
303     if (info->transid) {
304 	jo->transid = info->transid;
305     } else {
306 	struct timespec ts;
307 	getnanotime(&ts);
308 	jo->transid = ((int64_t)ts.tv_sec << 30) | ts.tv_nsec;
309     }
310 
311     jo->fp = fp;
312 
313     /*
314      * Allocate the memory FIFO
315      */
316     jo->fifo.mask = jo->fifo.size - 1;
317     jo->fifo.membase = malloc(jo->fifo.size, M_JFIFO, M_WAITOK|M_ZERO|M_NULLOK);
318     if (jo->fifo.membase == NULL)
319 	error = ENOMEM;
320 
321     /*
322      * Create the worker threads and generate the association record.
323      */
324     if (error) {
325 	free(jo, M_JOURNAL);
326     } else {
327 	fhold(fp);
328 	journal_create_threads(jo);
329 	jrecord_init(jo, &jrec, JREC_STREAMID_DISCONT);
330 	jrecord_write(&jrec, JTYPE_ASSOCIATE, 0);
331 	jrecord_done(&jrec, 0);
332 	TAILQ_INSERT_TAIL(&mp->mnt_jlist, jo, jentry);
333     }
334     return(error);
335 }
336 
337 /*
338  * Restart a journal with a new descriptor.   The existing reader and writer
339  * threads are terminated and a new descriptor is associated with the
340  * journal.  The FIFO rindex is reset to xindex and the threads are then
341  * restarted.
342  */
343 static int
344 journal_restart_vfs_journal(struct mount *mp, struct file *fp,
345 			   const struct mountctl_restart_journal *info)
346 {
347     struct journal *jo;
348     int error;
349 
350     TAILQ_FOREACH(jo, &mp->mnt_jlist, jentry) {
351 	if (bcmp(jo->id, info->id, sizeof(jo->id)) == 0)
352 	    break;
353     }
354     if (jo)
355 	error = journal_restart(mp, fp, jo, info->flags);
356     else
357 	error = EINVAL;
358     return (error);
359 }
360 
361 static int
362 journal_restart(struct mount *mp, struct file *fp,
363 		struct journal *jo, int flags)
364 {
365     /*
366      * XXX lock the jo
367      */
368 
369 #if 0
370     /*
371      * Record the fact that we are doing a restart in the journal.
372      * XXX it isn't safe to do this if the journal is being restarted
373      * because it was locked up and the writer thread has already exited.
374      */
375     jrecord_init(jo, &jrec, JREC_STREAMID_RESTART);
376     jrecord_write(&jrec, JTYPE_DISASSOCIATE, 0);
377     jrecord_done(&jrec, 0);
378 #endif
379 
380     /*
381      * Stop the reader and writer threads and clean up the current
382      * descriptor.
383      */
384     printf("RESTART WITH FP %p KILLING %p\n", fp, jo->fp);
385     journal_destroy_threads(jo, flags);
386 
387     if (jo->fp)
388 	fdrop(jo->fp);
389 
390     /*
391      * Associate the new descriptor, reset the FIFO index, and recreate
392      * the threads.
393      */
394     fhold(fp);
395     jo->fp = fp;
396     jo->fifo.rindex = jo->fifo.xindex;
397     journal_create_threads(jo);
398 
399     return(0);
400 }
401 
402 /*
403  * Disassociate a journal from a mount point and terminate its worker thread.
404  * A final termination record is written out before the file pointer is
405  * dropped.
406  */
407 static int
408 journal_remove_vfs_journal(struct mount *mp,
409 			   const struct mountctl_remove_journal *info)
410 {
411     struct journal *jo;
412     int error;
413 
414     TAILQ_FOREACH(jo, &mp->mnt_jlist, jentry) {
415 	if (bcmp(jo->id, info->id, sizeof(jo->id)) == 0)
416 	    break;
417     }
418     if (jo)
419 	error = journal_destroy(mp, jo, info->flags);
420     else
421 	error = EINVAL;
422     return (error);
423 }
424 
425 /*
426  * Remove all journals associated with a mount point.  Usually called
427  * by the umount code.
428  */
429 void
430 journal_remove_all_journals(struct mount *mp, int flags)
431 {
432     struct journal *jo;
433 
434     while ((jo = TAILQ_FIRST(&mp->mnt_jlist)) != NULL) {
435 	journal_destroy(mp, jo, flags);
436     }
437 }
438 
439 static int
440 journal_destroy(struct mount *mp, struct journal *jo, int flags)
441 {
442     struct jrecord jrec;
443 
444     TAILQ_REMOVE(&mp->mnt_jlist, jo, jentry);
445 
446     jrecord_init(jo, &jrec, JREC_STREAMID_DISCONT);
447     jrecord_write(&jrec, JTYPE_DISASSOCIATE, 0);
448     jrecord_done(&jrec, 0);
449 
450     journal_destroy_threads(jo, flags);
451 
452     if (jo->fp)
453 	fdrop(jo->fp);
454     if (jo->fifo.membase)
455 	free(jo->fifo.membase, M_JFIFO);
456     free(jo, M_JOURNAL);
457 
458     return(0);
459 }
460 
461 static int
462 journal_resync_vfs_journal(struct mount *mp, const void *ctl)
463 {
464     return(EINVAL);
465 }
466 
467 static int
468 journal_status_vfs_journal(struct mount *mp,
469 		       const struct mountctl_status_journal *info,
470 		       struct mountctl_journal_ret_status *rstat,
471 		       int buflen, int *res)
472 {
473     struct journal *jo;
474     int error = 0;
475     int index;
476 
477     index = 0;
478     *res = 0;
479     TAILQ_FOREACH(jo, &mp->mnt_jlist, jentry) {
480 	if (info->index == MC_JOURNAL_INDEX_ID) {
481 	    if (bcmp(jo->id, info->id, sizeof(jo->id)) != 0)
482 		continue;
483 	} else if (info->index >= 0) {
484 	    if (info->index < index)
485 		continue;
486 	} else if (info->index != MC_JOURNAL_INDEX_ALL) {
487 	    continue;
488 	}
489 	if (buflen < sizeof(*rstat)) {
490 	    if (*res)
491 		rstat[-1].flags |= MC_JOURNAL_STATUS_MORETOCOME;
492 	    else
493 		error = EINVAL;
494 	    break;
495 	}
496 	bzero(rstat, sizeof(*rstat));
497 	rstat->recsize = sizeof(*rstat);
498 	bcopy(jo->id, rstat->id, sizeof(jo->id));
499 	rstat->index = index;
500 	rstat->membufsize = jo->fifo.size;
501 	rstat->membufused = jo->fifo.windex - jo->fifo.xindex;
502 	rstat->membufunacked = jo->fifo.rindex - jo->fifo.xindex;
503 	rstat->bytessent = jo->total_acked;
504 	rstat->fifostalls = jo->fifostalls;
505 	++rstat;
506 	++index;
507 	*res += sizeof(*rstat);
508 	buflen -= sizeof(*rstat);
509     }
510     return(error);
511 }
512 
513 /************************************************************************
514  *			PARALLEL TRANSACTION SUPPORT ROUTINES		*
515  ************************************************************************
516  *
517  * JRECLIST_*() - routines which create and iterate over jrecord structures,
518  *		  because a mount point may have multiple attached journals.
519  */
520 
521 /*
522  * Initialize the passed jrecord_list and create a jrecord for each
523  * journal we need to write to.  Unnecessary mallocs are avoided by
524  * using the passed jrecord structure as the first jrecord in the list.
525  * A starting transaction is pushed for each jrecord.
526  *
527  * Returns non-zero if any of the journals require undo records.
528  */
529 static
530 int
531 jreclist_init(struct mount *mp, struct jrecord_list *jreclist,
532 	      struct jrecord *jreccache, int16_t rectype)
533 {
534     struct journal *jo;
535     struct jrecord *jrec;
536     int wantrev;
537     int count;
538     int16_t streamid;
539 
540     TAILQ_INIT(&jreclist->list);
541 
542     /*
543      * Select the stream ID to use for the transaction.  We must select
544      * a stream ID that is not currently in use by some other parallel
545      * transaction.
546      *
547      * Don't bother calculating the next streamid when reassigning
548      * mnt_streamid, since parallel transactions are fairly rare.  This
549      * also allows someone observing the raw records to clearly see
550      * when parallel transactions occur.
551      */
552     streamid = mp->mnt_streamid;
553     count = 0;
554     while (mp->mnt_jbitmap[streamid >> 3] & (1 << (streamid & 7))) {
555 	if (++streamid == JREC_STREAMID_JMAX)
556 		streamid = JREC_STREAMID_JMIN;
557 	if (++count == JREC_STREAMID_JMAX - JREC_STREAMID_JMIN) {
558 		printf("jreclist_init: all streamid's in use! sleeping\n");
559 		tsleep(jreclist, 0, "jsidfl", hz * 10);
560 		count = 0;
561 	}
562     }
563     mp->mnt_jbitmap[streamid >> 3] |= 1 << (streamid & 7);
564     mp->mnt_streamid = streamid;
565     jreclist->streamid = streamid;
566 
567     /*
568      * Now initialize a stream on each journal.
569      */
570     count = 0;
571     wantrev = 0;
572     TAILQ_FOREACH(jo, &mp->mnt_jlist, jentry) {
573 	if (count == 0)
574 	    jrec = jreccache;
575 	else
576 	    jrec = malloc(sizeof(*jrec), M_JOURNAL, M_WAITOK);
577 	jrecord_init(jo, jrec, streamid);
578 	jrec->user_save = jrecord_push(jrec, rectype);
579 	TAILQ_INSERT_TAIL(&jreclist->list, jrec, user_entry);
580 	if (jo->flags & MC_JOURNAL_WANT_REVERSABLE)
581 	    wantrev = 1;
582 	++count;
583     }
584     return(wantrev);
585 }
586 
587 /*
588  * Terminate the journaled transactions started by jreclist_init().  If
589  * an error occured, the transaction records will be aborted.
590  */
591 static
592 void
593 jreclist_done(struct mount *mp, struct jrecord_list *jreclist, int error)
594 {
595     struct jrecord *jrec;
596     int count;
597 
598     /*
599      * Cleanup the jrecord state on each journal.
600      */
601     TAILQ_FOREACH(jrec, &jreclist->list, user_entry) {
602 	jrecord_pop(jrec, jrec->user_save);
603 	jrecord_done(jrec, error);
604     }
605 
606     /*
607      * Free allocated jrec's (the first is always supplied)
608      */
609     count = 0;
610     while ((jrec = TAILQ_FIRST(&jreclist->list)) != NULL) {
611 	TAILQ_REMOVE(&jreclist->list, jrec, user_entry);
612 	if (count)
613 	    free(jrec, M_JOURNAL);
614 	++count;
615     }
616 
617     /*
618      * Clear the streamid so it can be reused.
619      */
620     mp->mnt_jbitmap[jreclist->streamid >> 3] &= ~(1 << (jreclist->streamid & 7));
621 }
622 
623 /*
624  * This procedure writes out UNDO records for available reversable
625  * journals.
626  *
627  * XXX could use improvement.  There is no need to re-read the file
628  * for each journal.
629  */
630 static
631 void
632 jreclist_undo_file(struct jrecord_list *jreclist, struct vnode *vp,
633 		   int jrflags, off_t off, off_t bytes)
634 {
635     struct jrecord *jrec;
636     int error;
637 
638     error = 0;
639     if (jrflags & JRUNDO_GETVP)
640 	error = vget(vp, LK_SHARED);
641     if (error == 0) {
642 	TAILQ_FOREACH(jrec, &jreclist->list, user_entry) {
643 	    if (jrec->jo->flags & MC_JOURNAL_WANT_REVERSABLE) {
644 		jrecord_undo_file(jrec, vp, jrflags, off, bytes);
645 	    }
646 	}
647     }
648     if (error == 0 && jrflags & JRUNDO_GETVP)
649 	vput(vp);
650 }
651 
652 /************************************************************************
653  *			LOW LEVEL UNDO SUPPORT ROUTINE			*
654  ************************************************************************
655  *
656  * This function is used to support UNDO records.  It will generate an
657  * appropriate record with the requested portion of the file data.  Note
658  * that file data is only recorded if JRUNDO_FILEDATA is passed.  If bytes
659  * is -1, it will be set to the size of the file.
660  */
661 static void
662 jrecord_undo_file(struct jrecord *jrec, struct vnode *vp, int jrflags,
663 		  off_t off, off_t bytes)
664 {
665     struct vattr attr;
666     void *save1; /* warning, save pointers do not always remain valid */
667     void *save2;
668     int error;
669 
670     /*
671      * Setup.  Start the UNDO record, obtain a shared lock on the vnode,
672      * and retrieve attribute info.
673      */
674     save1 = jrecord_push(jrec, JTYPE_UNDO);
675     error = VOP_GETATTR(vp, &attr);
676     if (error)
677 	goto done;
678 
679     /*
680      * Generate UNDO records as requested.
681      */
682     if (jrflags & JRUNDO_VATTR) {
683 	save2 = jrecord_push(jrec, JTYPE_VATTR);
684 	jrecord_leaf(jrec, JLEAF_VTYPE, &attr.va_type, sizeof(attr.va_type));
685 	if ((jrflags & JRUNDO_NLINK) && attr.va_nlink != VNOVAL)
686 	    jrecord_leaf(jrec, JLEAF_NLINK, &attr.va_nlink, sizeof(attr.va_nlink));
687 	if ((jrflags & JRUNDO_SIZE) && attr.va_size != VNOVAL)
688 	    jrecord_leaf(jrec, JLEAF_SIZE, &attr.va_size, sizeof(attr.va_size));
689 	if ((jrflags & JRUNDO_UID) && attr.va_uid != VNOVAL)
690 	    jrecord_leaf(jrec, JLEAF_UID, &attr.va_uid, sizeof(attr.va_uid));
691 	if ((jrflags & JRUNDO_GID) && attr.va_gid != VNOVAL)
692 	    jrecord_leaf(jrec, JLEAF_GID, &attr.va_gid, sizeof(attr.va_gid));
693 	if ((jrflags & JRUNDO_FSID) && attr.va_fsid != VNOVAL)
694 	    jrecord_leaf(jrec, JLEAF_FSID, &attr.va_fsid, sizeof(attr.va_fsid));
695 	if ((jrflags & JRUNDO_MODES) && attr.va_mode != (mode_t)VNOVAL)
696 	    jrecord_leaf(jrec, JLEAF_MODES, &attr.va_mode, sizeof(attr.va_mode));
697 	if ((jrflags & JRUNDO_INUM) && attr.va_fileid != VNOVAL)
698 	    jrecord_leaf(jrec, JLEAF_INUM, &attr.va_fileid, sizeof(attr.va_fileid));
699 	if ((jrflags & JRUNDO_ATIME) && attr.va_atime.tv_sec != VNOVAL)
700 	    jrecord_leaf(jrec, JLEAF_ATIME, &attr.va_atime, sizeof(attr.va_atime));
701 	if ((jrflags & JRUNDO_MTIME) && attr.va_mtime.tv_sec != VNOVAL)
702 	    jrecord_leaf(jrec, JLEAF_MTIME, &attr.va_mtime, sizeof(attr.va_mtime));
703 	if ((jrflags & JRUNDO_CTIME) && attr.va_ctime.tv_sec != VNOVAL)
704 	    jrecord_leaf(jrec, JLEAF_CTIME, &attr.va_ctime, sizeof(attr.va_ctime));
705 	if ((jrflags & JRUNDO_GEN) && attr.va_gen != VNOVAL)
706 	    jrecord_leaf(jrec, JLEAF_GEN, &attr.va_gen, sizeof(attr.va_gen));
707 	if ((jrflags & JRUNDO_FLAGS) && attr.va_flags != VNOVAL)
708 	    jrecord_leaf(jrec, JLEAF_FLAGS, &attr.va_flags, sizeof(attr.va_flags));
709 	if ((jrflags & JRUNDO_UDEV) && attr.va_rdev != VNOVAL)
710 	    jrecord_leaf(jrec, JLEAF_UDEV, &attr.va_rdev, sizeof(attr.va_rdev));
711 	jrecord_pop(jrec, save2);
712     }
713 
714     /*
715      * Output the file data being overwritten by reading the file and
716      * writing it out to the journal prior to the write operation.  We
717      * do not need to write out data past the current file EOF.
718      *
719      * XXX support JRUNDO_CONDLINK - do not write out file data for files
720      * with a link count > 1.  The undo code needs to locate the inode and
721      * regenerate the hardlink.
722      */
723     if ((jrflags & JRUNDO_FILEDATA) && attr.va_type == VREG) {
724 	if (attr.va_size != VNOVAL) {
725 	    if (bytes == -1)
726 		bytes = attr.va_size - off;
727 	    if (off + bytes > attr.va_size)
728 		bytes = attr.va_size - off;
729 	    if (bytes > 0)
730 		jrecord_file_data(jrec, vp, off, bytes);
731 	} else {
732 	    error = EINVAL;
733 	}
734     }
735     if ((jrflags & JRUNDO_FILEDATA) && attr.va_type == VLNK) {
736 	struct iovec aiov;
737 	struct uio auio;
738 	char *buf;
739 
740 	buf = malloc(PATH_MAX, M_JOURNAL, M_WAITOK);
741 	aiov.iov_base = buf;
742 	aiov.iov_len = PATH_MAX;
743 	auio.uio_iov = &aiov;
744 	auio.uio_iovcnt = 1;
745 	auio.uio_offset = 0;
746 	auio.uio_rw = UIO_READ;
747 	auio.uio_segflg = UIO_SYSSPACE;
748 	auio.uio_td = curthread;
749 	auio.uio_resid = PATH_MAX;
750 	error = VOP_READLINK(vp, &auio, proc0.p_ucred);
751 	if (error == 0) {
752 		jrecord_leaf(jrec, JLEAF_SYMLINKDATA, buf,
753 				PATH_MAX - auio.uio_resid);
754 	}
755 	free(buf, M_JOURNAL);
756     }
757 done:
758     if (error)
759 	jrecord_leaf(jrec, JLEAF_ERROR, &error, sizeof(error));
760     jrecord_pop(jrec, save1);
761 }
762 
763 /************************************************************************
764  *			JOURNAL VNOPS					*
765  ************************************************************************
766  *
767  * These are function shims replacing the normal filesystem ops.  We become
768  * responsible for calling the underlying filesystem ops.  We have the choice
769  * of executing the underlying op first and then generating the journal entry,
770  * or starting the journal entry, executing the underlying op, and then
771  * either completing or aborting it.
772  *
773  * The journal is supposed to be a high-level entity, which generally means
774  * identifying files by name rather then by inode.  Supplying both allows
775  * the journal to be used both for inode-number-compatible 'mirrors' and
776  * for simple filesystem replication.
777  *
778  * Writes are particularly difficult to deal with because a single write may
779  * represent a hundred megabyte buffer or more, and both writes and truncations
780  * require the 'old' data to be written out as well as the new data if the
781  * log is reversable.  Other issues:
782  *
783  * - How to deal with operations on unlinked files (no path available),
784  *   but which may still be filesystem visible due to hard links.
785  *
786  * - How to deal with modifications made via a memory map.
787  *
788  * - Future cache coherency support will require cache coherency API calls
789  *   both prior to and after the call to the underlying VFS.
790  *
791  * ALSO NOTE: We do not have to shim compatibility VOPs like MKDIR which have
792  * new VFS equivalents (NMKDIR).
793  */
794 
795 /*
796  * Journal vop_settattr { a_vp, a_vap, a_cred, a_td }
797  */
798 static
799 int
800 journal_setattr(struct vop_setattr_args *ap)
801 {
802     struct jrecord_list jreclist;
803     struct jrecord jreccache;
804     struct jrecord *jrec;
805     struct mount *mp;
806     void *save;
807     int error;
808 
809     mp = ap->a_head.a_ops->head.vv_mount;
810     if (jreclist_init(mp, &jreclist, &jreccache, JTYPE_SETATTR)) {
811 	jreclist_undo_file(&jreclist, ap->a_vp, JRUNDO_VATTR, 0, 0);
812     }
813     error = vop_journal_operate_ap(&ap->a_head);
814     if (error == 0) {
815 	TAILQ_FOREACH(jrec, &jreclist.list, user_entry) {
816 	    jrecord_write_cred(jrec, curthread, ap->a_cred);
817 	    jrecord_write_vnode_ref(jrec, ap->a_vp);
818 	    save = jrecord_push(jrec, JTYPE_REDO);
819 	    jrecord_write_vattr(jrec, ap->a_vap);
820 	    jrecord_pop(jrec, save);
821 	}
822     }
823     jreclist_done(mp, &jreclist, error);
824     return (error);
825 }
826 
827 /*
828  * Journal vop_write { a_vp, a_uio, a_ioflag, a_cred }
829  */
830 static
831 int
832 journal_write(struct vop_write_args *ap)
833 {
834     struct jrecord_list jreclist;
835     struct jrecord jreccache;
836     struct jrecord *jrec;
837     struct mount *mp;
838     struct uio uio_copy;
839     struct iovec uio_one_iovec;
840     void *save;
841     int error;
842 
843     /*
844      * This is really nasty.  UIO's don't retain sufficient information to
845      * be reusable once they've gone through the VOP chain.  The iovecs get
846      * cleared, so we have to copy the UIO.
847      *
848      * XXX fix the UIO code to not destroy iov's during a scan so we can
849      *     reuse the uio over and over again.
850      *
851      * XXX UNDO code needs to journal the old data prior to the write.
852      */
853     uio_copy = *ap->a_uio;
854     if (uio_copy.uio_iovcnt == 1) {
855 	uio_one_iovec = ap->a_uio->uio_iov[0];
856 	uio_copy.uio_iov = &uio_one_iovec;
857     } else {
858 	uio_copy.uio_iov = malloc(uio_copy.uio_iovcnt * sizeof(struct iovec),
859 				    M_JOURNAL, M_WAITOK);
860 	bcopy(ap->a_uio->uio_iov, uio_copy.uio_iov,
861 		uio_copy.uio_iovcnt * sizeof(struct iovec));
862     }
863 
864     /*
865      * Write out undo data.  Note that uio_offset is incorrect if
866      * IO_APPEND is set, but fortunately we have no undo file data to
867      * write out in that case.
868      */
869     mp = ap->a_head.a_ops->head.vv_mount;
870     if (jreclist_init(mp, &jreclist, &jreccache, JTYPE_WRITE)) {
871 	if (ap->a_ioflag & IO_APPEND) {
872 	    jreclist_undo_file(&jreclist, ap->a_vp, JRUNDO_SIZE|JRUNDO_MTIME, 0, 0);
873 	} else {
874 	    jreclist_undo_file(&jreclist, ap->a_vp,
875 			       JRUNDO_FILEDATA|JRUNDO_SIZE|JRUNDO_MTIME,
876 			       uio_copy.uio_offset, uio_copy.uio_resid);
877 	}
878     }
879     error = vop_journal_operate_ap(&ap->a_head);
880 
881     /*
882      * XXX bad hack to figure out the offset for O_APPEND writes (note:
883      * uio field state after the VFS operation).
884      */
885     uio_copy.uio_offset = ap->a_uio->uio_offset -
886 			  (uio_copy.uio_resid - ap->a_uio->uio_resid);
887 
888     /*
889      * Output the write data to the journal.
890      */
891     if (error == 0) {
892 	TAILQ_FOREACH(jrec, &jreclist.list, user_entry) {
893 	    jrecord_write_cred(jrec, NULL, ap->a_cred);
894 	    jrecord_write_vnode_ref(jrec, ap->a_vp);
895 	    save = jrecord_push(jrec, JTYPE_REDO);
896 	    jrecord_write_uio(jrec, JLEAF_FILEDATA, &uio_copy);
897 	    jrecord_pop(jrec, save);
898 	}
899     }
900     jreclist_done(mp, &jreclist, error);
901 
902     if (uio_copy.uio_iov != &uio_one_iovec)
903 	free(uio_copy.uio_iov, M_JOURNAL);
904     return (error);
905 }
906 
907 /*
908  * Journal vop_fsync { a_vp, a_waitfor, a_td }
909  */
910 static
911 int
912 journal_fsync(struct vop_fsync_args *ap)
913 {
914 #if 0
915     struct mount *mp;
916     struct journal *jo;
917 #endif
918     int error;
919 
920     error = vop_journal_operate_ap(&ap->a_head);
921 #if 0
922     mp = ap->a_head.a_ops->head.vv_mount;
923     if (error == 0) {
924 	TAILQ_FOREACH(jo, &mp->mnt_jlist, jentry) {
925 	    /* XXX synchronize pending journal records */
926 	}
927     }
928 #endif
929     return (error);
930 }
931 
932 /*
933  * Journal vop_putpages { a_vp, a_m, a_count, a_sync, a_rtvals, a_offset }
934  *
935  * note: a_count is in bytes.
936  */
937 static
938 int
939 journal_putpages(struct vop_putpages_args *ap)
940 {
941     struct jrecord_list jreclist;
942     struct jrecord jreccache;
943     struct jrecord *jrec;
944     struct mount *mp;
945     void *save;
946     int error;
947 
948     mp = ap->a_head.a_ops->head.vv_mount;
949     if (jreclist_init(mp, &jreclist, &jreccache, JTYPE_PUTPAGES) &&
950 	ap->a_count > 0
951     ) {
952 	jreclist_undo_file(&jreclist, ap->a_vp,
953 			   JRUNDO_FILEDATA|JRUNDO_SIZE|JRUNDO_MTIME,
954 			   ap->a_offset, btoc(ap->a_count));
955     }
956     error = vop_journal_operate_ap(&ap->a_head);
957     if (error == 0 && ap->a_count > 0) {
958 	TAILQ_FOREACH(jrec, &jreclist.list, user_entry) {
959 	    jrecord_write_vnode_ref(jrec, ap->a_vp);
960 	    save = jrecord_push(jrec, JTYPE_REDO);
961 	    jrecord_write_pagelist(jrec, JLEAF_FILEDATA, ap->a_m, ap->a_rtvals,
962 				   btoc(ap->a_count), ap->a_offset);
963 	    jrecord_pop(jrec, save);
964 	}
965     }
966     jreclist_done(mp, &jreclist, error);
967     return (error);
968 }
969 
970 /*
971  * Journal vop_setacl { a_vp, a_type, a_aclp, a_cred, a_td }
972  */
973 static
974 int
975 journal_setacl(struct vop_setacl_args *ap)
976 {
977     struct jrecord_list jreclist;
978     struct jrecord jreccache;
979     struct jrecord *jrec;
980     struct mount *mp;
981     int error;
982 
983     mp = ap->a_head.a_ops->head.vv_mount;
984     jreclist_init(mp, &jreclist, &jreccache, JTYPE_SETACL);
985     error = vop_journal_operate_ap(&ap->a_head);
986     if (error == 0) {
987 	TAILQ_FOREACH(jrec, &jreclist.list, user_entry) {
988 #if 0
989 	    if ((jo->flags & MC_JOURNAL_WANT_REVERSABLE))
990 		jrecord_undo_file(jrec, ap->a_vp, JRUNDO_XXX, 0, 0);
991 #endif
992 	    jrecord_write_cred(jrec, curthread, ap->a_cred);
993 	    jrecord_write_vnode_ref(jrec, ap->a_vp);
994 #if 0
995 	    save = jrecord_push(jrec, JTYPE_REDO);
996 	    /* XXX type, aclp */
997 	    jrecord_pop(jrec, save);
998 #endif
999 	}
1000     }
1001     jreclist_done(mp, &jreclist, error);
1002     return (error);
1003 }
1004 
1005 /*
1006  * Journal vop_setextattr { a_vp, a_name, a_uio, a_cred, a_td }
1007  */
1008 static
1009 int
1010 journal_setextattr(struct vop_setextattr_args *ap)
1011 {
1012     struct jrecord_list jreclist;
1013     struct jrecord jreccache;
1014     struct jrecord *jrec;
1015     struct mount *mp;
1016     void *save;
1017     int error;
1018 
1019     mp = ap->a_head.a_ops->head.vv_mount;
1020     jreclist_init(mp, &jreclist, &jreccache, JTYPE_SETEXTATTR);
1021     error = vop_journal_operate_ap(&ap->a_head);
1022     if (error == 0) {
1023 	TAILQ_FOREACH(jrec, &jreclist.list, user_entry) {
1024 #if 0
1025 	    if ((jo->flags & MC_JOURNAL_WANT_REVERSABLE))
1026 		jrecord_undo_file(jrec, ap->a_vp, JRUNDO_XXX, 0, 0);
1027 #endif
1028 	    jrecord_write_cred(jrec, curthread, ap->a_cred);
1029 	    jrecord_write_vnode_ref(jrec, ap->a_vp);
1030 	    jrecord_leaf(jrec, JLEAF_ATTRNAME, ap->a_name, strlen(ap->a_name));
1031 	    save = jrecord_push(jrec, JTYPE_REDO);
1032 	    jrecord_write_uio(jrec, JLEAF_FILEDATA, ap->a_uio);
1033 	    jrecord_pop(jrec, save);
1034 	}
1035     }
1036     jreclist_done(mp, &jreclist, error);
1037     return (error);
1038 }
1039 
1040 /*
1041  * Journal vop_ncreate { a_ncp, a_vpp, a_cred, a_vap }
1042  */
1043 static
1044 int
1045 journal_ncreate(struct vop_ncreate_args *ap)
1046 {
1047     struct jrecord_list jreclist;
1048     struct jrecord jreccache;
1049     struct jrecord *jrec;
1050     struct mount *mp;
1051     void *save;
1052     int error;
1053 
1054     mp = ap->a_head.a_ops->head.vv_mount;
1055     jreclist_init(mp, &jreclist, &jreccache, JTYPE_CREATE);
1056     error = vop_journal_operate_ap(&ap->a_head);
1057     if (error == 0) {
1058 	TAILQ_FOREACH(jrec, &jreclist.list, user_entry) {
1059 	    jrecord_write_cred(jrec, NULL, ap->a_cred);
1060 	    jrecord_write_path(jrec, JLEAF_PATH1, ap->a_ncp);
1061 	    if (*ap->a_vpp)
1062 		jrecord_write_vnode_ref(jrec, *ap->a_vpp);
1063 	    save = jrecord_push(jrec, JTYPE_REDO);
1064 	    jrecord_write_vattr(jrec, ap->a_vap);
1065 	    jrecord_pop(jrec, save);
1066 	}
1067     }
1068     jreclist_done(mp, &jreclist, error);
1069     return (error);
1070 }
1071 
1072 /*
1073  * Journal vop_nmknod { a_ncp, a_vpp, a_cred, a_vap }
1074  */
1075 static
1076 int
1077 journal_nmknod(struct vop_nmknod_args *ap)
1078 {
1079     struct jrecord_list jreclist;
1080     struct jrecord jreccache;
1081     struct jrecord *jrec;
1082     struct mount *mp;
1083     void *save;
1084     int error;
1085 
1086     mp = ap->a_head.a_ops->head.vv_mount;
1087     jreclist_init(mp, &jreclist, &jreccache, JTYPE_MKNOD);
1088     error = vop_journal_operate_ap(&ap->a_head);
1089     if (error == 0) {
1090 	TAILQ_FOREACH(jrec, &jreclist.list, user_entry) {
1091 	    jrecord_write_cred(jrec, NULL, ap->a_cred);
1092 	    jrecord_write_path(jrec, JLEAF_PATH1, ap->a_ncp);
1093 	    save = jrecord_push(jrec, JTYPE_REDO);
1094 	    jrecord_write_vattr(jrec, ap->a_vap);
1095 	    jrecord_pop(jrec, save);
1096 	    if (*ap->a_vpp)
1097 		jrecord_write_vnode_ref(jrec, *ap->a_vpp);
1098 	}
1099     }
1100     jreclist_done(mp, &jreclist, error);
1101     return (error);
1102 }
1103 
1104 /*
1105  * Journal vop_nlink { a_ncp, a_vp, a_cred }
1106  */
1107 static
1108 int
1109 journal_nlink(struct vop_nlink_args *ap)
1110 {
1111     struct jrecord_list jreclist;
1112     struct jrecord jreccache;
1113     struct jrecord *jrec;
1114     struct mount *mp;
1115     void *save;
1116     int error;
1117 
1118     mp = ap->a_head.a_ops->head.vv_mount;
1119     jreclist_init(mp, &jreclist, &jreccache, JTYPE_LINK);
1120     error = vop_journal_operate_ap(&ap->a_head);
1121     if (error == 0) {
1122 	TAILQ_FOREACH(jrec, &jreclist.list, user_entry) {
1123 	    jrecord_write_cred(jrec, NULL, ap->a_cred);
1124 	    jrecord_write_path(jrec, JLEAF_PATH1, ap->a_ncp);
1125 	    /* XXX PATH to VP and inode number */
1126 	    /* XXX this call may not record the correct path when
1127 	     * multiple paths are available */
1128 	    save = jrecord_push(jrec, JTYPE_REDO);
1129 	    jrecord_write_vnode_link(jrec, ap->a_vp, ap->a_ncp);
1130 	    jrecord_pop(jrec, save);
1131 	}
1132     }
1133     jreclist_done(mp, &jreclist, error);
1134     return (error);
1135 }
1136 
1137 /*
1138  * Journal vop_symlink { a_ncp, a_vpp, a_cred, a_vap, a_target }
1139  */
1140 static
1141 int
1142 journal_nsymlink(struct vop_nsymlink_args *ap)
1143 {
1144     struct jrecord_list jreclist;
1145     struct jrecord jreccache;
1146     struct jrecord *jrec;
1147     struct mount *mp;
1148     void *save;
1149     int error;
1150 
1151     mp = ap->a_head.a_ops->head.vv_mount;
1152     jreclist_init(mp, &jreclist, &jreccache, JTYPE_SYMLINK);
1153     error = vop_journal_operate_ap(&ap->a_head);
1154     if (error == 0) {
1155 	TAILQ_FOREACH(jrec, &jreclist.list, user_entry) {
1156 	    jrecord_write_cred(jrec, NULL, ap->a_cred);
1157 	    jrecord_write_path(jrec, JLEAF_PATH1, ap->a_ncp);
1158 	    save = jrecord_push(jrec, JTYPE_REDO);
1159 	    jrecord_leaf(jrec, JLEAF_SYMLINKDATA,
1160 			ap->a_target, strlen(ap->a_target));
1161 	    jrecord_pop(jrec, save);
1162 	    if (*ap->a_vpp)
1163 		jrecord_write_vnode_ref(jrec, *ap->a_vpp);
1164 	}
1165     }
1166     jreclist_done(mp, &jreclist, error);
1167     return (error);
1168 }
1169 
1170 /*
1171  * Journal vop_nwhiteout { a_ncp, a_cred, a_flags }
1172  */
1173 static
1174 int
1175 journal_nwhiteout(struct vop_nwhiteout_args *ap)
1176 {
1177     struct jrecord_list jreclist;
1178     struct jrecord jreccache;
1179     struct jrecord *jrec;
1180     struct mount *mp;
1181     int error;
1182 
1183     mp = ap->a_head.a_ops->head.vv_mount;
1184     jreclist_init(mp, &jreclist, &jreccache, JTYPE_WHITEOUT);
1185     error = vop_journal_operate_ap(&ap->a_head);
1186     if (error == 0) {
1187 	TAILQ_FOREACH(jrec, &jreclist.list, user_entry) {
1188 	    jrecord_write_cred(jrec, NULL, ap->a_cred);
1189 	    jrecord_write_path(jrec, JLEAF_PATH1, ap->a_ncp);
1190 	}
1191     }
1192     jreclist_done(mp, &jreclist, error);
1193     return (error);
1194 }
1195 
1196 /*
1197  * Journal vop_nremove { a_ncp, a_cred }
1198  */
1199 static
1200 int
1201 journal_nremove(struct vop_nremove_args *ap)
1202 {
1203     struct jrecord_list jreclist;
1204     struct jrecord jreccache;
1205     struct jrecord *jrec;
1206     struct mount *mp;
1207     int error;
1208 
1209     mp = ap->a_head.a_ops->head.vv_mount;
1210     if (jreclist_init(mp, &jreclist, &jreccache, JTYPE_REMOVE) &&
1211 	ap->a_ncp->nc_vp
1212     ) {
1213 	jreclist_undo_file(&jreclist, ap->a_ncp->nc_vp,
1214 			   JRUNDO_ALL|JRUNDO_GETVP|JRUNDO_CONDLINK, 0, -1);
1215     }
1216     error = vop_journal_operate_ap(&ap->a_head);
1217     if (error == 0) {
1218 	TAILQ_FOREACH(jrec, &jreclist.list, user_entry) {
1219 	    jrecord_write_cred(jrec, NULL, ap->a_cred);
1220 	    jrecord_write_path(jrec, JLEAF_PATH1, ap->a_ncp);
1221 	}
1222     }
1223     jreclist_done(mp, &jreclist, error);
1224     return (error);
1225 }
1226 
1227 /*
1228  * Journal vop_nmkdir { a_ncp, a_vpp, a_cred, a_vap }
1229  */
1230 static
1231 int
1232 journal_nmkdir(struct vop_nmkdir_args *ap)
1233 {
1234     struct jrecord_list jreclist;
1235     struct jrecord jreccache;
1236     struct jrecord *jrec;
1237     struct mount *mp;
1238     int error;
1239 
1240     mp = ap->a_head.a_ops->head.vv_mount;
1241     jreclist_init(mp, &jreclist, &jreccache, JTYPE_MKDIR);
1242     error = vop_journal_operate_ap(&ap->a_head);
1243     if (error == 0) {
1244 	TAILQ_FOREACH(jrec, &jreclist.list, user_entry) {
1245 #if 0
1246 	    if (jo->flags & MC_JOURNAL_WANT_AUDIT) {
1247 		jrecord_write_audit(jrec);
1248 	    }
1249 #endif
1250 	    jrecord_write_path(jrec, JLEAF_PATH1, ap->a_ncp);
1251 	    jrecord_write_cred(jrec, NULL, ap->a_cred);
1252 	    jrecord_write_vattr(jrec, ap->a_vap);
1253 	    jrecord_write_path(jrec, JLEAF_PATH1, ap->a_ncp);
1254 	    if (*ap->a_vpp)
1255 		jrecord_write_vnode_ref(jrec, *ap->a_vpp);
1256 	}
1257     }
1258     jreclist_done(mp, &jreclist, error);
1259     return (error);
1260 }
1261 
1262 /*
1263  * Journal vop_nrmdir { a_ncp, a_cred }
1264  */
1265 static
1266 int
1267 journal_nrmdir(struct vop_nrmdir_args *ap)
1268 {
1269     struct jrecord_list jreclist;
1270     struct jrecord jreccache;
1271     struct jrecord *jrec;
1272     struct mount *mp;
1273     int error;
1274 
1275     mp = ap->a_head.a_ops->head.vv_mount;
1276     if (jreclist_init(mp, &jreclist, &jreccache, JTYPE_RMDIR)) {
1277 	jreclist_undo_file(&jreclist, ap->a_ncp->nc_vp,
1278 			   JRUNDO_VATTR|JRUNDO_GETVP, 0, 0);
1279     }
1280     error = vop_journal_operate_ap(&ap->a_head);
1281     if (error == 0) {
1282 	TAILQ_FOREACH(jrec, &jreclist.list, user_entry) {
1283 	    jrecord_write_cred(jrec, NULL, ap->a_cred);
1284 	    jrecord_write_path(jrec, JLEAF_PATH1, ap->a_ncp);
1285 	}
1286     }
1287     jreclist_done(mp, &jreclist, error);
1288     return (error);
1289 }
1290 
1291 /*
1292  * Journal vop_nrename { a_fncp, a_tncp, a_cred }
1293  */
1294 static
1295 int
1296 journal_nrename(struct vop_nrename_args *ap)
1297 {
1298     struct jrecord_list jreclist;
1299     struct jrecord jreccache;
1300     struct jrecord *jrec;
1301     struct mount *mp;
1302     int error;
1303 
1304     mp = ap->a_head.a_ops->head.vv_mount;
1305     if (jreclist_init(mp, &jreclist, &jreccache, JTYPE_RENAME) &&
1306 	ap->a_tncp->nc_vp
1307     ) {
1308 	jreclist_undo_file(&jreclist, ap->a_tncp->nc_vp,
1309 			   JRUNDO_ALL|JRUNDO_GETVP|JRUNDO_CONDLINK, 0, -1);
1310     }
1311     error = vop_journal_operate_ap(&ap->a_head);
1312     if (error == 0) {
1313 	TAILQ_FOREACH(jrec, &jreclist.list, user_entry) {
1314 	    jrecord_write_cred(jrec, NULL, ap->a_cred);
1315 	    jrecord_write_path(jrec, JLEAF_PATH1, ap->a_fncp);
1316 	    jrecord_write_path(jrec, JLEAF_PATH2, ap->a_tncp);
1317 	}
1318     }
1319     jreclist_done(mp, &jreclist, error);
1320     return (error);
1321 }
1322 
1323