xref: /dragonfly/sys/kern/vfs_jops.c (revision 3f625015)
1 /*
2  * Copyright (c) 2004-2006 The DragonFly Project.  All rights reserved.
3  *
4  * This code is derived from software contributed to The DragonFly Project
5  * by Matthew Dillon <dillon@backplane.com>
6  *
7  * Redistribution and use in source and binary forms, with or without
8  * modification, are permitted provided that the following conditions
9  * are met:
10  *
11  * 1. Redistributions of source code must retain the above copyright
12  *    notice, this list of conditions and the following disclaimer.
13  * 2. Redistributions in binary form must reproduce the above copyright
14  *    notice, this list of conditions and the following disclaimer in
15  *    the documentation and/or other materials provided with the
16  *    distribution.
17  * 3. Neither the name of The DragonFly Project nor the names of its
18  *    contributors may be used to endorse or promote products derived
19  *    from this software without specific, prior written permission.
20  *
21  * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
22  * ``AS IS'' AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
23  * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS
24  * FOR A PARTICULAR PURPOSE ARE DISCLAIMED.  IN NO EVENT SHALL THE
25  * COPYRIGHT HOLDERS OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT,
26  * INCIDENTAL, SPECIAL, EXEMPLARY OR CONSEQUENTIAL DAMAGES (INCLUDING,
27  * BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
28  * LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED
29  * AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
30  * OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT
31  * OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
32  * SUCH DAMAGE.
33  *
34  * $DragonFly: src/sys/kern/vfs_jops.c,v 1.34 2007/05/09 00:53:34 dillon Exp $
35  */
36 /*
37  * Each mount point may have zero or more independantly configured journals
38  * attached to it.  Each journal is represented by a memory FIFO and worker
39  * thread.  Journal events are streamed through the FIFO to the thread,
40  * batched up (typically on one-second intervals), and written out by the
41  * thread.
42  *
43  * Journal vnode ops are executed instead of mnt_vn_norm_ops when one or
44  * more journals have been installed on a mount point.  It becomes the
45  * responsibility of the journal op to call the underlying normal op as
46  * appropriate.
47  */
48 #include <sys/param.h>
49 #include <sys/systm.h>
50 #include <sys/buf.h>
51 #include <sys/conf.h>
52 #include <sys/kernel.h>
53 #include <sys/queue.h>
54 #include <sys/lock.h>
55 #include <sys/malloc.h>
56 #include <sys/mount.h>
57 #include <sys/unistd.h>
58 #include <sys/vnode.h>
59 #include <sys/poll.h>
60 #include <sys/mountctl.h>
61 #include <sys/journal.h>
62 #include <sys/file.h>
63 #include <sys/proc.h>
64 #include <sys/msfbuf.h>
65 #include <sys/socket.h>
66 #include <sys/socketvar.h>
67 
68 #include <machine/limits.h>
69 
70 #include <vm/vm.h>
71 #include <vm/vm_object.h>
72 #include <vm/vm_page.h>
73 #include <vm/vm_pager.h>
74 #include <vm/vnode_pager.h>
75 
76 #include <sys/file2.h>
77 #include <sys/thread2.h>
78 
79 static int journal_attach(struct mount *mp);
80 static void journal_detach(struct mount *mp);
81 static int journal_install_vfs_journal(struct mount *mp, struct file *fp,
82 			    const struct mountctl_install_journal *info);
83 static int journal_restart_vfs_journal(struct mount *mp, struct file *fp,
84 			    const struct mountctl_restart_journal *info);
85 static int journal_remove_vfs_journal(struct mount *mp,
86 			    const struct mountctl_remove_journal *info);
87 static int journal_restart(struct mount *mp, struct file *fp,
88 			    struct journal *jo, int flags);
89 static int journal_destroy(struct mount *mp, struct journal *jo, int flags);
90 static int journal_resync_vfs_journal(struct mount *mp, const void *ctl);
91 static int journal_status_vfs_journal(struct mount *mp,
92 		       const struct mountctl_status_journal *info,
93 		       struct mountctl_journal_ret_status *rstat,
94 		       int buflen, int *res);
95 
96 static void jrecord_undo_file(struct jrecord *jrec, struct vnode *vp,
97 			     int jrflags, off_t off, off_t bytes);
98 
99 static int journal_setattr(struct vop_setattr_args *ap);
100 static int journal_write(struct vop_write_args *ap);
101 static int journal_fsync(struct vop_fsync_args *ap);
102 static int journal_putpages(struct vop_putpages_args *ap);
103 static int journal_setacl(struct vop_setacl_args *ap);
104 static int journal_setextattr(struct vop_setextattr_args *ap);
105 static int journal_ncreate(struct vop_ncreate_args *ap);
106 static int journal_nmknod(struct vop_nmknod_args *ap);
107 static int journal_nlink(struct vop_nlink_args *ap);
108 static int journal_nsymlink(struct vop_nsymlink_args *ap);
109 static int journal_nwhiteout(struct vop_nwhiteout_args *ap);
110 static int journal_nremove(struct vop_nremove_args *ap);
111 static int journal_nmkdir(struct vop_nmkdir_args *ap);
112 static int journal_nrmdir(struct vop_nrmdir_args *ap);
113 static int journal_nrename(struct vop_nrename_args *ap);
114 
115 #define JRUNDO_SIZE	0x00000001
116 #define JRUNDO_UID	0x00000002
117 #define JRUNDO_GID	0x00000004
118 #define JRUNDO_FSID	0x00000008
119 #define JRUNDO_MODES	0x00000010
120 #define JRUNDO_INUM	0x00000020
121 #define JRUNDO_ATIME	0x00000040
122 #define JRUNDO_MTIME	0x00000080
123 #define JRUNDO_CTIME	0x00000100
124 #define JRUNDO_GEN	0x00000200
125 #define JRUNDO_FLAGS	0x00000400
126 #define JRUNDO_UDEV	0x00000800
127 #define JRUNDO_NLINK	0x00001000
128 #define JRUNDO_FILEDATA	0x00010000
129 #define JRUNDO_GETVP	0x00020000
130 #define JRUNDO_CONDLINK	0x00040000	/* write file data if link count 1 */
131 #define JRUNDO_VATTR	(JRUNDO_SIZE|JRUNDO_UID|JRUNDO_GID|JRUNDO_FSID|\
132 			 JRUNDO_MODES|JRUNDO_INUM|JRUNDO_ATIME|JRUNDO_MTIME|\
133 			 JRUNDO_CTIME|JRUNDO_GEN|JRUNDO_FLAGS|JRUNDO_UDEV|\
134 			 JRUNDO_NLINK)
135 #define JRUNDO_ALL	(JRUNDO_VATTR|JRUNDO_FILEDATA)
136 
137 static struct vop_ops journal_vnode_vops = {
138     .vop_default =	vop_journal_operate_ap,
139     .vop_mountctl =	journal_mountctl,
140     .vop_setattr =	journal_setattr,
141     .vop_write =	journal_write,
142     .vop_fsync =	journal_fsync,
143     .vop_putpages =	journal_putpages,
144     .vop_setacl =	journal_setacl,
145     .vop_setextattr =	journal_setextattr,
146     .vop_ncreate =	journal_ncreate,
147     .vop_nmknod =	journal_nmknod,
148     .vop_nlink =	journal_nlink,
149     .vop_nsymlink =	journal_nsymlink,
150     .vop_nwhiteout =	journal_nwhiteout,
151     .vop_nremove =	journal_nremove,
152     .vop_nmkdir =	journal_nmkdir,
153     .vop_nrmdir =	journal_nrmdir,
154     .vop_nrename =	journal_nrename
155 };
156 
157 int
158 journal_mountctl(struct vop_mountctl_args *ap)
159 {
160     struct mount *mp;
161     int error = 0;
162 
163     mp = ap->a_head.a_ops->head.vv_mount;
164     KKASSERT(mp);
165 
166     if (mp->mnt_vn_journal_ops == NULL) {
167 	switch(ap->a_op) {
168 	case MOUNTCTL_INSTALL_VFS_JOURNAL:
169 	    error = journal_attach(mp);
170 	    if (error == 0 && ap->a_ctllen != sizeof(struct mountctl_install_journal))
171 		error = EINVAL;
172 	    if (error == 0 && ap->a_fp == NULL)
173 		error = EBADF;
174 	    if (error == 0)
175 		error = journal_install_vfs_journal(mp, ap->a_fp, ap->a_ctl);
176 	    if (TAILQ_EMPTY(&mp->mnt_jlist))
177 		journal_detach(mp);
178 	    break;
179 	case MOUNTCTL_RESTART_VFS_JOURNAL:
180 	case MOUNTCTL_REMOVE_VFS_JOURNAL:
181 	case MOUNTCTL_RESYNC_VFS_JOURNAL:
182 	case MOUNTCTL_STATUS_VFS_JOURNAL:
183 	    error = ENOENT;
184 	    break;
185 	default:
186 	    error = EOPNOTSUPP;
187 	    break;
188 	}
189     } else {
190 	switch(ap->a_op) {
191 	case MOUNTCTL_INSTALL_VFS_JOURNAL:
192 	    if (ap->a_ctllen != sizeof(struct mountctl_install_journal))
193 		error = EINVAL;
194 	    if (error == 0 && ap->a_fp == NULL)
195 		error = EBADF;
196 	    if (error == 0)
197 		error = journal_install_vfs_journal(mp, ap->a_fp, ap->a_ctl);
198 	    break;
199 	case MOUNTCTL_RESTART_VFS_JOURNAL:
200 	    if (ap->a_ctllen != sizeof(struct mountctl_restart_journal))
201 		error = EINVAL;
202 	    if (error == 0 && ap->a_fp == NULL)
203 		error = EBADF;
204 	    if (error == 0)
205 		error = journal_restart_vfs_journal(mp, ap->a_fp, ap->a_ctl);
206 	    break;
207 	case MOUNTCTL_REMOVE_VFS_JOURNAL:
208 	    if (ap->a_ctllen != sizeof(struct mountctl_remove_journal))
209 		error = EINVAL;
210 	    if (error == 0)
211 		error = journal_remove_vfs_journal(mp, ap->a_ctl);
212 	    if (TAILQ_EMPTY(&mp->mnt_jlist))
213 		journal_detach(mp);
214 	    break;
215 	case MOUNTCTL_RESYNC_VFS_JOURNAL:
216 	    if (ap->a_ctllen != 0)
217 		error = EINVAL;
218 	    error = journal_resync_vfs_journal(mp, ap->a_ctl);
219 	    break;
220 	case MOUNTCTL_STATUS_VFS_JOURNAL:
221 	    if (ap->a_ctllen != sizeof(struct mountctl_status_journal))
222 		error = EINVAL;
223 	    if (error == 0) {
224 		error = journal_status_vfs_journal(mp, ap->a_ctl,
225 					ap->a_buf, ap->a_buflen, ap->a_res);
226 	    }
227 	    break;
228 	default:
229 	    error = EOPNOTSUPP;
230 	    break;
231 	}
232     }
233     return (error);
234 }
235 
236 /*
237  * High level mount point setup.  When a
238  */
239 static int
240 journal_attach(struct mount *mp)
241 {
242     KKASSERT(mp->mnt_jbitmap == NULL);
243     vfs_add_vnodeops(mp, &journal_vnode_vops, &mp->mnt_vn_journal_ops);
244     mp->mnt_jbitmap = kmalloc(JREC_STREAMID_JMAX/8, M_JOURNAL, M_WAITOK|M_ZERO);
245     mp->mnt_streamid = JREC_STREAMID_JMIN;
246     return(0);
247 }
248 
249 static void
250 journal_detach(struct mount *mp)
251 {
252     KKASSERT(mp->mnt_jbitmap != NULL);
253     if (mp->mnt_vn_journal_ops)
254 	vfs_rm_vnodeops(mp, &journal_vnode_vops, &mp->mnt_vn_journal_ops);
255     kfree(mp->mnt_jbitmap, M_JOURNAL);
256     mp->mnt_jbitmap = NULL;
257 }
258 
259 /*
260  * Install a journal on a mount point.  Each journal has an associated worker
261  * thread which is responsible for buffering and spooling the data to the
262  * target.  A mount point may have multiple journals attached to it.  An
263  * initial start record is generated when the journal is associated.
264  */
265 static int
266 journal_install_vfs_journal(struct mount *mp, struct file *fp,
267 			    const struct mountctl_install_journal *info)
268 {
269     struct journal *jo;
270     struct jrecord jrec;
271     int error = 0;
272     int size;
273 
274     jo = kmalloc(sizeof(struct journal), M_JOURNAL, M_WAITOK|M_ZERO);
275     bcopy(info->id, jo->id, sizeof(jo->id));
276     jo->flags = info->flags & ~(MC_JOURNAL_WACTIVE | MC_JOURNAL_RACTIVE |
277 				MC_JOURNAL_STOP_REQ);
278 
279     /*
280      * Memory FIFO size, round to nearest power of 2
281      */
282     if (info->membufsize) {
283 	if (info->membufsize < 65536)
284 	    size = 65536;
285 	else if (info->membufsize > 128 * 1024 * 1024)
286 	    size = 128 * 1024 * 1024;
287 	else
288 	    size = (int)info->membufsize;
289     } else {
290 	size = 1024 * 1024;
291     }
292     jo->fifo.size = 1;
293     while (jo->fifo.size < size)
294 	jo->fifo.size <<= 1;
295 
296     /*
297      * Other parameters.  If not specified the starting transaction id
298      * will be the current date.
299      */
300     if (info->transid) {
301 	jo->transid = info->transid;
302     } else {
303 	struct timespec ts;
304 	getnanotime(&ts);
305 	jo->transid = ((int64_t)ts.tv_sec << 30) | ts.tv_nsec;
306     }
307 
308     jo->fp = fp;
309 
310     /*
311      * Allocate the memory FIFO
312      */
313     jo->fifo.mask = jo->fifo.size - 1;
314     jo->fifo.membase = kmalloc(jo->fifo.size, M_JFIFO, M_WAITOK|M_ZERO|M_NULLOK);
315     if (jo->fifo.membase == NULL)
316 	error = ENOMEM;
317 
318     /*
319      * Create the worker threads and generate the association record.
320      */
321     if (error) {
322 	kfree(jo, M_JOURNAL);
323     } else {
324 	fhold(fp);
325 	journal_create_threads(jo);
326 	jrecord_init(jo, &jrec, JREC_STREAMID_DISCONT);
327 	jrecord_write(&jrec, JTYPE_ASSOCIATE, 0);
328 	jrecord_done(&jrec, 0);
329 	TAILQ_INSERT_TAIL(&mp->mnt_jlist, jo, jentry);
330     }
331     return(error);
332 }
333 
334 /*
335  * Restart a journal with a new descriptor.   The existing reader and writer
336  * threads are terminated and a new descriptor is associated with the
337  * journal.  The FIFO rindex is reset to xindex and the threads are then
338  * restarted.
339  */
340 static int
341 journal_restart_vfs_journal(struct mount *mp, struct file *fp,
342 			   const struct mountctl_restart_journal *info)
343 {
344     struct journal *jo;
345     int error;
346 
347     TAILQ_FOREACH(jo, &mp->mnt_jlist, jentry) {
348 	if (bcmp(jo->id, info->id, sizeof(jo->id)) == 0)
349 	    break;
350     }
351     if (jo)
352 	error = journal_restart(mp, fp, jo, info->flags);
353     else
354 	error = EINVAL;
355     return (error);
356 }
357 
358 static int
359 journal_restart(struct mount *mp, struct file *fp,
360 		struct journal *jo, int flags)
361 {
362     /*
363      * XXX lock the jo
364      */
365 
366 #if 0
367     /*
368      * Record the fact that we are doing a restart in the journal.
369      * XXX it isn't safe to do this if the journal is being restarted
370      * because it was locked up and the writer thread has already exited.
371      */
372     jrecord_init(jo, &jrec, JREC_STREAMID_RESTART);
373     jrecord_write(&jrec, JTYPE_DISASSOCIATE, 0);
374     jrecord_done(&jrec, 0);
375 #endif
376 
377     /*
378      * Stop the reader and writer threads and clean up the current
379      * descriptor.
380      */
381     kprintf("RESTART WITH FP %p KILLING %p\n", fp, jo->fp);
382     journal_destroy_threads(jo, flags);
383 
384     if (jo->fp)
385 	fdrop(jo->fp);
386 
387     /*
388      * Associate the new descriptor, reset the FIFO index, and recreate
389      * the threads.
390      */
391     fhold(fp);
392     jo->fp = fp;
393     jo->fifo.rindex = jo->fifo.xindex;
394     journal_create_threads(jo);
395 
396     return(0);
397 }
398 
399 /*
400  * Disassociate a journal from a mount point and terminate its worker thread.
401  * A final termination record is written out before the file pointer is
402  * dropped.
403  */
404 static int
405 journal_remove_vfs_journal(struct mount *mp,
406 			   const struct mountctl_remove_journal *info)
407 {
408     struct journal *jo;
409     int error;
410 
411     TAILQ_FOREACH(jo, &mp->mnt_jlist, jentry) {
412 	if (bcmp(jo->id, info->id, sizeof(jo->id)) == 0)
413 	    break;
414     }
415     if (jo)
416 	error = journal_destroy(mp, jo, info->flags);
417     else
418 	error = EINVAL;
419     return (error);
420 }
421 
422 /*
423  * Remove all journals associated with a mount point.  Usually called
424  * by the umount code.
425  */
426 void
427 journal_remove_all_journals(struct mount *mp, int flags)
428 {
429     struct journal *jo;
430 
431     while ((jo = TAILQ_FIRST(&mp->mnt_jlist)) != NULL) {
432 	journal_destroy(mp, jo, flags);
433     }
434 }
435 
436 static int
437 journal_destroy(struct mount *mp, struct journal *jo, int flags)
438 {
439     struct jrecord jrec;
440 
441     TAILQ_REMOVE(&mp->mnt_jlist, jo, jentry);
442 
443     jrecord_init(jo, &jrec, JREC_STREAMID_DISCONT);
444     jrecord_write(&jrec, JTYPE_DISASSOCIATE, 0);
445     jrecord_done(&jrec, 0);
446 
447     journal_destroy_threads(jo, flags);
448 
449     if (jo->fp)
450 	fdrop(jo->fp);
451     if (jo->fifo.membase)
452 	kfree(jo->fifo.membase, M_JFIFO);
453     kfree(jo, M_JOURNAL);
454 
455     return(0);
456 }
457 
458 static int
459 journal_resync_vfs_journal(struct mount *mp, const void *ctl)
460 {
461     return(EINVAL);
462 }
463 
464 static int
465 journal_status_vfs_journal(struct mount *mp,
466 		       const struct mountctl_status_journal *info,
467 		       struct mountctl_journal_ret_status *rstat,
468 		       int buflen, int *res)
469 {
470     struct journal *jo;
471     int error = 0;
472     int index;
473 
474     index = 0;
475     *res = 0;
476     TAILQ_FOREACH(jo, &mp->mnt_jlist, jentry) {
477 	if (info->index == MC_JOURNAL_INDEX_ID) {
478 	    if (bcmp(jo->id, info->id, sizeof(jo->id)) != 0)
479 		continue;
480 	} else if (info->index >= 0) {
481 	    if (info->index < index)
482 		continue;
483 	} else if (info->index != MC_JOURNAL_INDEX_ALL) {
484 	    continue;
485 	}
486 	if (buflen < sizeof(*rstat)) {
487 	    if (*res)
488 		rstat[-1].flags |= MC_JOURNAL_STATUS_MORETOCOME;
489 	    else
490 		error = EINVAL;
491 	    break;
492 	}
493 	bzero(rstat, sizeof(*rstat));
494 	rstat->recsize = sizeof(*rstat);
495 	bcopy(jo->id, rstat->id, sizeof(jo->id));
496 	rstat->index = index;
497 	rstat->membufsize = jo->fifo.size;
498 	rstat->membufused = jo->fifo.windex - jo->fifo.xindex;
499 	rstat->membufunacked = jo->fifo.rindex - jo->fifo.xindex;
500 	rstat->bytessent = jo->total_acked;
501 	rstat->fifostalls = jo->fifostalls;
502 	++rstat;
503 	++index;
504 	*res += sizeof(*rstat);
505 	buflen -= sizeof(*rstat);
506     }
507     return(error);
508 }
509 
510 /************************************************************************
511  *			PARALLEL TRANSACTION SUPPORT ROUTINES		*
512  ************************************************************************
513  *
514  * JRECLIST_*() - routines which create and iterate over jrecord structures,
515  *		  because a mount point may have multiple attached journals.
516  */
517 
518 /*
519  * Initialize the passed jrecord_list and create a jrecord for each
520  * journal we need to write to.  Unnecessary mallocs are avoided by
521  * using the passed jrecord structure as the first jrecord in the list.
522  * A starting transaction is pushed for each jrecord.
523  *
524  * Returns non-zero if any of the journals require undo records.
525  */
526 static
527 int
528 jreclist_init(struct mount *mp, struct jrecord_list *jreclist,
529 	      struct jrecord *jreccache, int16_t rectype)
530 {
531     struct journal *jo;
532     struct jrecord *jrec;
533     int wantrev;
534     int count;
535     int16_t streamid;
536 
537     TAILQ_INIT(&jreclist->list);
538 
539     /*
540      * Select the stream ID to use for the transaction.  We must select
541      * a stream ID that is not currently in use by some other parallel
542      * transaction.
543      *
544      * Don't bother calculating the next streamid when reassigning
545      * mnt_streamid, since parallel transactions are fairly rare.  This
546      * also allows someone observing the raw records to clearly see
547      * when parallel transactions occur.
548      */
549     streamid = mp->mnt_streamid;
550     count = 0;
551     while (mp->mnt_jbitmap[streamid >> 3] & (1 << (streamid & 7))) {
552 	if (++streamid == JREC_STREAMID_JMAX)
553 		streamid = JREC_STREAMID_JMIN;
554 	if (++count == JREC_STREAMID_JMAX - JREC_STREAMID_JMIN) {
555 		kprintf("jreclist_init: all streamid's in use! sleeping\n");
556 		tsleep(jreclist, 0, "jsidfl", hz * 10);
557 		count = 0;
558 	}
559     }
560     mp->mnt_jbitmap[streamid >> 3] |= 1 << (streamid & 7);
561     mp->mnt_streamid = streamid;
562     jreclist->streamid = streamid;
563 
564     /*
565      * Now initialize a stream on each journal.
566      */
567     count = 0;
568     wantrev = 0;
569     TAILQ_FOREACH(jo, &mp->mnt_jlist, jentry) {
570 	if (count == 0)
571 	    jrec = jreccache;
572 	else
573 	    jrec = kmalloc(sizeof(*jrec), M_JOURNAL, M_WAITOK);
574 	jrecord_init(jo, jrec, streamid);
575 	jrec->user_save = jrecord_push(jrec, rectype);
576 	TAILQ_INSERT_TAIL(&jreclist->list, jrec, user_entry);
577 	if (jo->flags & MC_JOURNAL_WANT_REVERSABLE)
578 	    wantrev = 1;
579 	++count;
580     }
581     return(wantrev);
582 }
583 
584 /*
585  * Terminate the journaled transactions started by jreclist_init().  If
586  * an error occured, the transaction records will be aborted.
587  */
588 static
589 void
590 jreclist_done(struct mount *mp, struct jrecord_list *jreclist, int error)
591 {
592     struct jrecord *jrec;
593     int count;
594 
595     /*
596      * Cleanup the jrecord state on each journal.
597      */
598     TAILQ_FOREACH(jrec, &jreclist->list, user_entry) {
599 	jrecord_pop(jrec, jrec->user_save);
600 	jrecord_done(jrec, error);
601     }
602 
603     /*
604      * Free allocated jrec's (the first is always supplied)
605      */
606     count = 0;
607     while ((jrec = TAILQ_FIRST(&jreclist->list)) != NULL) {
608 	TAILQ_REMOVE(&jreclist->list, jrec, user_entry);
609 	if (count)
610 	    kfree(jrec, M_JOURNAL);
611 	++count;
612     }
613 
614     /*
615      * Clear the streamid so it can be reused.
616      */
617     mp->mnt_jbitmap[jreclist->streamid >> 3] &= ~(1 << (jreclist->streamid & 7));
618 }
619 
620 /*
621  * This procedure writes out UNDO records for available reversable
622  * journals.
623  *
624  * XXX could use improvement.  There is no need to re-read the file
625  * for each journal.
626  */
627 static
628 void
629 jreclist_undo_file(struct jrecord_list *jreclist, struct vnode *vp,
630 		   int jrflags, off_t off, off_t bytes)
631 {
632     struct jrecord *jrec;
633     int error;
634 
635     error = 0;
636     if (jrflags & JRUNDO_GETVP)
637 	error = vget(vp, LK_SHARED);
638     if (error == 0) {
639 	TAILQ_FOREACH(jrec, &jreclist->list, user_entry) {
640 	    if (jrec->jo->flags & MC_JOURNAL_WANT_REVERSABLE) {
641 		jrecord_undo_file(jrec, vp, jrflags, off, bytes);
642 	    }
643 	}
644     }
645     if (error == 0 && jrflags & JRUNDO_GETVP)
646 	vput(vp);
647 }
648 
649 /************************************************************************
650  *			LOW LEVEL UNDO SUPPORT ROUTINE			*
651  ************************************************************************
652  *
653  * This function is used to support UNDO records.  It will generate an
654  * appropriate record with the requested portion of the file data.  Note
655  * that file data is only recorded if JRUNDO_FILEDATA is passed.  If bytes
656  * is -1, it will be set to the size of the file.
657  */
658 static void
659 jrecord_undo_file(struct jrecord *jrec, struct vnode *vp, int jrflags,
660 		  off_t off, off_t bytes)
661 {
662     struct vattr attr;
663     void *save1; /* warning, save pointers do not always remain valid */
664     void *save2;
665     int error;
666 
667     /*
668      * Setup.  Start the UNDO record, obtain a shared lock on the vnode,
669      * and retrieve attribute info.
670      */
671     save1 = jrecord_push(jrec, JTYPE_UNDO);
672     error = VOP_GETATTR(vp, &attr);
673     if (error)
674 	goto done;
675 
676     /*
677      * Generate UNDO records as requested.
678      */
679     if (jrflags & JRUNDO_VATTR) {
680 	save2 = jrecord_push(jrec, JTYPE_VATTR);
681 	jrecord_leaf(jrec, JLEAF_VTYPE, &attr.va_type, sizeof(attr.va_type));
682 	if ((jrflags & JRUNDO_NLINK) && attr.va_nlink != VNOVAL)
683 	    jrecord_leaf(jrec, JLEAF_NLINK, &attr.va_nlink, sizeof(attr.va_nlink));
684 	if ((jrflags & JRUNDO_SIZE) && attr.va_size != VNOVAL)
685 	    jrecord_leaf(jrec, JLEAF_SIZE, &attr.va_size, sizeof(attr.va_size));
686 	if ((jrflags & JRUNDO_UID) && attr.va_uid != VNOVAL)
687 	    jrecord_leaf(jrec, JLEAF_UID, &attr.va_uid, sizeof(attr.va_uid));
688 	if ((jrflags & JRUNDO_GID) && attr.va_gid != VNOVAL)
689 	    jrecord_leaf(jrec, JLEAF_GID, &attr.va_gid, sizeof(attr.va_gid));
690 	if ((jrflags & JRUNDO_FSID) && attr.va_fsid != VNOVAL)
691 	    jrecord_leaf(jrec, JLEAF_FSID, &attr.va_fsid, sizeof(attr.va_fsid));
692 	if ((jrflags & JRUNDO_MODES) && attr.va_mode != (mode_t)VNOVAL)
693 	    jrecord_leaf(jrec, JLEAF_MODES, &attr.va_mode, sizeof(attr.va_mode));
694 	if ((jrflags & JRUNDO_INUM) && attr.va_fileid != VNOVAL)
695 	    jrecord_leaf(jrec, JLEAF_INUM, &attr.va_fileid, sizeof(attr.va_fileid));
696 	if ((jrflags & JRUNDO_ATIME) && attr.va_atime.tv_sec != VNOVAL)
697 	    jrecord_leaf(jrec, JLEAF_ATIME, &attr.va_atime, sizeof(attr.va_atime));
698 	if ((jrflags & JRUNDO_MTIME) && attr.va_mtime.tv_sec != VNOVAL)
699 	    jrecord_leaf(jrec, JLEAF_MTIME, &attr.va_mtime, sizeof(attr.va_mtime));
700 	if ((jrflags & JRUNDO_CTIME) && attr.va_ctime.tv_sec != VNOVAL)
701 	    jrecord_leaf(jrec, JLEAF_CTIME, &attr.va_ctime, sizeof(attr.va_ctime));
702 	if ((jrflags & JRUNDO_GEN) && attr.va_gen != VNOVAL)
703 	    jrecord_leaf(jrec, JLEAF_GEN, &attr.va_gen, sizeof(attr.va_gen));
704 	if ((jrflags & JRUNDO_FLAGS) && attr.va_flags != VNOVAL)
705 	    jrecord_leaf(jrec, JLEAF_FLAGS, &attr.va_flags, sizeof(attr.va_flags));
706 	if ((jrflags & JRUNDO_UDEV) && attr.va_rmajor != VNOVAL) {
707 	    udev_t rdev = makeudev(attr.va_rmajor, attr.va_rminor);
708 	    jrecord_leaf(jrec, JLEAF_UDEV, &rdev, sizeof(rdev));
709 	    jrecord_leaf(jrec, JLEAF_UMAJOR, &attr.va_rmajor, sizeof(attr.va_rmajor));
710 	    jrecord_leaf(jrec, JLEAF_UMINOR, &attr.va_rminor, sizeof(attr.va_rminor));
711 	}
712 	jrecord_pop(jrec, save2);
713     }
714 
715     /*
716      * Output the file data being overwritten by reading the file and
717      * writing it out to the journal prior to the write operation.  We
718      * do not need to write out data past the current file EOF.
719      *
720      * XXX support JRUNDO_CONDLINK - do not write out file data for files
721      * with a link count > 1.  The undo code needs to locate the inode and
722      * regenerate the hardlink.
723      */
724     if ((jrflags & JRUNDO_FILEDATA) && attr.va_type == VREG) {
725 	if (attr.va_size != VNOVAL) {
726 	    if (bytes == -1)
727 		bytes = attr.va_size - off;
728 	    if (off + bytes > attr.va_size)
729 		bytes = attr.va_size - off;
730 	    if (bytes > 0)
731 		jrecord_file_data(jrec, vp, off, bytes);
732 	} else {
733 	    error = EINVAL;
734 	}
735     }
736     if ((jrflags & JRUNDO_FILEDATA) && attr.va_type == VLNK) {
737 	struct iovec aiov;
738 	struct uio auio;
739 	char *buf;
740 
741 	buf = kmalloc(PATH_MAX, M_JOURNAL, M_WAITOK);
742 	aiov.iov_base = buf;
743 	aiov.iov_len = PATH_MAX;
744 	auio.uio_iov = &aiov;
745 	auio.uio_iovcnt = 1;
746 	auio.uio_offset = 0;
747 	auio.uio_rw = UIO_READ;
748 	auio.uio_segflg = UIO_SYSSPACE;
749 	auio.uio_td = curthread;
750 	auio.uio_resid = PATH_MAX;
751 	error = VOP_READLINK(vp, &auio, proc0.p_ucred);
752 	if (error == 0) {
753 		jrecord_leaf(jrec, JLEAF_SYMLINKDATA, buf,
754 				PATH_MAX - auio.uio_resid);
755 	}
756 	kfree(buf, M_JOURNAL);
757     }
758 done:
759     if (error)
760 	jrecord_leaf(jrec, JLEAF_ERROR, &error, sizeof(error));
761     jrecord_pop(jrec, save1);
762 }
763 
764 /************************************************************************
765  *			JOURNAL VNOPS					*
766  ************************************************************************
767  *
768  * These are function shims replacing the normal filesystem ops.  We become
769  * responsible for calling the underlying filesystem ops.  We have the choice
770  * of executing the underlying op first and then generating the journal entry,
771  * or starting the journal entry, executing the underlying op, and then
772  * either completing or aborting it.
773  *
774  * The journal is supposed to be a high-level entity, which generally means
775  * identifying files by name rather then by inode.  Supplying both allows
776  * the journal to be used both for inode-number-compatible 'mirrors' and
777  * for simple filesystem replication.
778  *
779  * Writes are particularly difficult to deal with because a single write may
780  * represent a hundred megabyte buffer or more, and both writes and truncations
781  * require the 'old' data to be written out as well as the new data if the
782  * log is reversable.  Other issues:
783  *
784  * - How to deal with operations on unlinked files (no path available),
785  *   but which may still be filesystem visible due to hard links.
786  *
787  * - How to deal with modifications made via a memory map.
788  *
789  * - Future cache coherency support will require cache coherency API calls
790  *   both prior to and after the call to the underlying VFS.
791  *
792  * ALSO NOTE: We do not have to shim compatibility VOPs like MKDIR which have
793  * new VFS equivalents (NMKDIR).
794  */
795 
796 /*
797  * Journal vop_settattr { a_vp, a_vap, a_cred, a_td }
798  */
799 static
800 int
801 journal_setattr(struct vop_setattr_args *ap)
802 {
803     struct jrecord_list jreclist;
804     struct jrecord jreccache;
805     struct jrecord *jrec;
806     struct mount *mp;
807     void *save;
808     int error;
809 
810     mp = ap->a_head.a_ops->head.vv_mount;
811     if (jreclist_init(mp, &jreclist, &jreccache, JTYPE_SETATTR)) {
812 	jreclist_undo_file(&jreclist, ap->a_vp, JRUNDO_VATTR, 0, 0);
813     }
814     error = vop_journal_operate_ap(&ap->a_head);
815     if (error == 0) {
816 	TAILQ_FOREACH(jrec, &jreclist.list, user_entry) {
817 	    jrecord_write_cred(jrec, curthread, ap->a_cred);
818 	    jrecord_write_vnode_ref(jrec, ap->a_vp);
819 	    save = jrecord_push(jrec, JTYPE_REDO);
820 	    jrecord_write_vattr(jrec, ap->a_vap);
821 	    jrecord_pop(jrec, save);
822 	}
823     }
824     jreclist_done(mp, &jreclist, error);
825     return (error);
826 }
827 
828 /*
829  * Journal vop_write { a_vp, a_uio, a_ioflag, a_cred }
830  */
831 static
832 int
833 journal_write(struct vop_write_args *ap)
834 {
835     struct jrecord_list jreclist;
836     struct jrecord jreccache;
837     struct jrecord *jrec;
838     struct mount *mp;
839     struct uio uio_copy;
840     struct iovec uio_one_iovec;
841     void *save;
842     int error;
843 
844     /*
845      * This is really nasty.  UIO's don't retain sufficient information to
846      * be reusable once they've gone through the VOP chain.  The iovecs get
847      * cleared, so we have to copy the UIO.
848      *
849      * XXX fix the UIO code to not destroy iov's during a scan so we can
850      *     reuse the uio over and over again.
851      *
852      * XXX UNDO code needs to journal the old data prior to the write.
853      */
854     uio_copy = *ap->a_uio;
855     if (uio_copy.uio_iovcnt == 1) {
856 	uio_one_iovec = ap->a_uio->uio_iov[0];
857 	uio_copy.uio_iov = &uio_one_iovec;
858     } else {
859 	uio_copy.uio_iov = kmalloc(uio_copy.uio_iovcnt * sizeof(struct iovec),
860 				    M_JOURNAL, M_WAITOK);
861 	bcopy(ap->a_uio->uio_iov, uio_copy.uio_iov,
862 		uio_copy.uio_iovcnt * sizeof(struct iovec));
863     }
864 
865     /*
866      * Write out undo data.  Note that uio_offset is incorrect if
867      * IO_APPEND is set, but fortunately we have no undo file data to
868      * write out in that case.
869      */
870     mp = ap->a_head.a_ops->head.vv_mount;
871     if (jreclist_init(mp, &jreclist, &jreccache, JTYPE_WRITE)) {
872 	if (ap->a_ioflag & IO_APPEND) {
873 	    jreclist_undo_file(&jreclist, ap->a_vp, JRUNDO_SIZE|JRUNDO_MTIME, 0, 0);
874 	} else {
875 	    jreclist_undo_file(&jreclist, ap->a_vp,
876 			       JRUNDO_FILEDATA|JRUNDO_SIZE|JRUNDO_MTIME,
877 			       uio_copy.uio_offset, uio_copy.uio_resid);
878 	}
879     }
880     error = vop_journal_operate_ap(&ap->a_head);
881 
882     /*
883      * XXX bad hack to figure out the offset for O_APPEND writes (note:
884      * uio field state after the VFS operation).
885      */
886     uio_copy.uio_offset = ap->a_uio->uio_offset -
887 			  (uio_copy.uio_resid - ap->a_uio->uio_resid);
888 
889     /*
890      * Output the write data to the journal.
891      */
892     if (error == 0) {
893 	TAILQ_FOREACH(jrec, &jreclist.list, user_entry) {
894 	    jrecord_write_cred(jrec, NULL, ap->a_cred);
895 	    jrecord_write_vnode_ref(jrec, ap->a_vp);
896 	    save = jrecord_push(jrec, JTYPE_REDO);
897 	    jrecord_write_uio(jrec, JLEAF_FILEDATA, &uio_copy);
898 	    jrecord_pop(jrec, save);
899 	}
900     }
901     jreclist_done(mp, &jreclist, error);
902 
903     if (uio_copy.uio_iov != &uio_one_iovec)
904 	kfree(uio_copy.uio_iov, M_JOURNAL);
905     return (error);
906 }
907 
908 /*
909  * Journal vop_fsync { a_vp, a_waitfor, a_td }
910  */
911 static
912 int
913 journal_fsync(struct vop_fsync_args *ap)
914 {
915 #if 0
916     struct mount *mp;
917     struct journal *jo;
918 #endif
919     int error;
920 
921     error = vop_journal_operate_ap(&ap->a_head);
922 #if 0
923     mp = ap->a_head.a_ops->head.vv_mount;
924     if (error == 0) {
925 	TAILQ_FOREACH(jo, &mp->mnt_jlist, jentry) {
926 	    /* XXX synchronize pending journal records */
927 	}
928     }
929 #endif
930     return (error);
931 }
932 
933 /*
934  * Journal vop_putpages { a_vp, a_m, a_count, a_sync, a_rtvals, a_offset }
935  *
936  * note: a_count is in bytes.
937  */
938 static
939 int
940 journal_putpages(struct vop_putpages_args *ap)
941 {
942     struct jrecord_list jreclist;
943     struct jrecord jreccache;
944     struct jrecord *jrec;
945     struct mount *mp;
946     void *save;
947     int error;
948 
949     mp = ap->a_head.a_ops->head.vv_mount;
950     if (jreclist_init(mp, &jreclist, &jreccache, JTYPE_PUTPAGES) &&
951 	ap->a_count > 0
952     ) {
953 	jreclist_undo_file(&jreclist, ap->a_vp,
954 			   JRUNDO_FILEDATA|JRUNDO_SIZE|JRUNDO_MTIME,
955 			   ap->a_offset, btoc(ap->a_count));
956     }
957     error = vop_journal_operate_ap(&ap->a_head);
958     if (error == 0 && ap->a_count > 0) {
959 	TAILQ_FOREACH(jrec, &jreclist.list, user_entry) {
960 	    jrecord_write_vnode_ref(jrec, ap->a_vp);
961 	    save = jrecord_push(jrec, JTYPE_REDO);
962 	    jrecord_write_pagelist(jrec, JLEAF_FILEDATA, ap->a_m, ap->a_rtvals,
963 				   btoc(ap->a_count), ap->a_offset);
964 	    jrecord_pop(jrec, save);
965 	}
966     }
967     jreclist_done(mp, &jreclist, error);
968     return (error);
969 }
970 
971 /*
972  * Journal vop_setacl { a_vp, a_type, a_aclp, a_cred, a_td }
973  */
974 static
975 int
976 journal_setacl(struct vop_setacl_args *ap)
977 {
978     struct jrecord_list jreclist;
979     struct jrecord jreccache;
980     struct jrecord *jrec;
981     struct mount *mp;
982     int error;
983 
984     mp = ap->a_head.a_ops->head.vv_mount;
985     jreclist_init(mp, &jreclist, &jreccache, JTYPE_SETACL);
986     error = vop_journal_operate_ap(&ap->a_head);
987     if (error == 0) {
988 	TAILQ_FOREACH(jrec, &jreclist.list, user_entry) {
989 #if 0
990 	    if ((jo->flags & MC_JOURNAL_WANT_REVERSABLE))
991 		jrecord_undo_file(jrec, ap->a_vp, JRUNDO_XXX, 0, 0);
992 #endif
993 	    jrecord_write_cred(jrec, curthread, ap->a_cred);
994 	    jrecord_write_vnode_ref(jrec, ap->a_vp);
995 #if 0
996 	    save = jrecord_push(jrec, JTYPE_REDO);
997 	    /* XXX type, aclp */
998 	    jrecord_pop(jrec, save);
999 #endif
1000 	}
1001     }
1002     jreclist_done(mp, &jreclist, error);
1003     return (error);
1004 }
1005 
1006 /*
1007  * Journal vop_setextattr { a_vp, a_name, a_uio, a_cred, a_td }
1008  */
1009 static
1010 int
1011 journal_setextattr(struct vop_setextattr_args *ap)
1012 {
1013     struct jrecord_list jreclist;
1014     struct jrecord jreccache;
1015     struct jrecord *jrec;
1016     struct mount *mp;
1017     void *save;
1018     int error;
1019 
1020     mp = ap->a_head.a_ops->head.vv_mount;
1021     jreclist_init(mp, &jreclist, &jreccache, JTYPE_SETEXTATTR);
1022     error = vop_journal_operate_ap(&ap->a_head);
1023     if (error == 0) {
1024 	TAILQ_FOREACH(jrec, &jreclist.list, user_entry) {
1025 #if 0
1026 	    if ((jo->flags & MC_JOURNAL_WANT_REVERSABLE))
1027 		jrecord_undo_file(jrec, ap->a_vp, JRUNDO_XXX, 0, 0);
1028 #endif
1029 	    jrecord_write_cred(jrec, curthread, ap->a_cred);
1030 	    jrecord_write_vnode_ref(jrec, ap->a_vp);
1031 	    jrecord_leaf(jrec, JLEAF_ATTRNAME, ap->a_name, strlen(ap->a_name));
1032 	    save = jrecord_push(jrec, JTYPE_REDO);
1033 	    jrecord_write_uio(jrec, JLEAF_FILEDATA, ap->a_uio);
1034 	    jrecord_pop(jrec, save);
1035 	}
1036     }
1037     jreclist_done(mp, &jreclist, error);
1038     return (error);
1039 }
1040 
1041 /*
1042  * Journal vop_ncreate { a_nch, a_vpp, a_cred, a_vap }
1043  */
1044 static
1045 int
1046 journal_ncreate(struct vop_ncreate_args *ap)
1047 {
1048     struct jrecord_list jreclist;
1049     struct jrecord jreccache;
1050     struct jrecord *jrec;
1051     struct mount *mp;
1052     void *save;
1053     int error;
1054 
1055     mp = ap->a_head.a_ops->head.vv_mount;
1056     jreclist_init(mp, &jreclist, &jreccache, JTYPE_CREATE);
1057     error = vop_journal_operate_ap(&ap->a_head);
1058     if (error == 0) {
1059 	TAILQ_FOREACH(jrec, &jreclist.list, user_entry) {
1060 	    jrecord_write_cred(jrec, NULL, ap->a_cred);
1061 	    jrecord_write_path(jrec, JLEAF_PATH1, ap->a_nch->ncp);
1062 	    if (*ap->a_vpp)
1063 		jrecord_write_vnode_ref(jrec, *ap->a_vpp);
1064 	    save = jrecord_push(jrec, JTYPE_REDO);
1065 	    jrecord_write_vattr(jrec, ap->a_vap);
1066 	    jrecord_pop(jrec, save);
1067 	}
1068     }
1069     jreclist_done(mp, &jreclist, error);
1070     return (error);
1071 }
1072 
1073 /*
1074  * Journal vop_nmknod { a_nch, a_vpp, a_cred, a_vap }
1075  */
1076 static
1077 int
1078 journal_nmknod(struct vop_nmknod_args *ap)
1079 {
1080     struct jrecord_list jreclist;
1081     struct jrecord jreccache;
1082     struct jrecord *jrec;
1083     struct mount *mp;
1084     void *save;
1085     int error;
1086 
1087     mp = ap->a_head.a_ops->head.vv_mount;
1088     jreclist_init(mp, &jreclist, &jreccache, JTYPE_MKNOD);
1089     error = vop_journal_operate_ap(&ap->a_head);
1090     if (error == 0) {
1091 	TAILQ_FOREACH(jrec, &jreclist.list, user_entry) {
1092 	    jrecord_write_cred(jrec, NULL, ap->a_cred);
1093 	    jrecord_write_path(jrec, JLEAF_PATH1, ap->a_nch->ncp);
1094 	    save = jrecord_push(jrec, JTYPE_REDO);
1095 	    jrecord_write_vattr(jrec, ap->a_vap);
1096 	    jrecord_pop(jrec, save);
1097 	    if (*ap->a_vpp)
1098 		jrecord_write_vnode_ref(jrec, *ap->a_vpp);
1099 	}
1100     }
1101     jreclist_done(mp, &jreclist, error);
1102     return (error);
1103 }
1104 
1105 /*
1106  * Journal vop_nlink { a_nch, a_vp, a_cred }
1107  */
1108 static
1109 int
1110 journal_nlink(struct vop_nlink_args *ap)
1111 {
1112     struct jrecord_list jreclist;
1113     struct jrecord jreccache;
1114     struct jrecord *jrec;
1115     struct mount *mp;
1116     void *save;
1117     int error;
1118 
1119     mp = ap->a_head.a_ops->head.vv_mount;
1120     jreclist_init(mp, &jreclist, &jreccache, JTYPE_LINK);
1121     error = vop_journal_operate_ap(&ap->a_head);
1122     if (error == 0) {
1123 	TAILQ_FOREACH(jrec, &jreclist.list, user_entry) {
1124 	    jrecord_write_cred(jrec, NULL, ap->a_cred);
1125 	    jrecord_write_path(jrec, JLEAF_PATH1, ap->a_nch->ncp);
1126 	    /* XXX PATH to VP and inode number */
1127 	    /* XXX this call may not record the correct path when
1128 	     * multiple paths are available */
1129 	    save = jrecord_push(jrec, JTYPE_REDO);
1130 	    jrecord_write_vnode_link(jrec, ap->a_vp, ap->a_nch->ncp);
1131 	    jrecord_pop(jrec, save);
1132 	}
1133     }
1134     jreclist_done(mp, &jreclist, error);
1135     return (error);
1136 }
1137 
1138 /*
1139  * Journal vop_symlink { a_nch, a_vpp, a_cred, a_vap, a_target }
1140  */
1141 static
1142 int
1143 journal_nsymlink(struct vop_nsymlink_args *ap)
1144 {
1145     struct jrecord_list jreclist;
1146     struct jrecord jreccache;
1147     struct jrecord *jrec;
1148     struct mount *mp;
1149     void *save;
1150     int error;
1151 
1152     mp = ap->a_head.a_ops->head.vv_mount;
1153     jreclist_init(mp, &jreclist, &jreccache, JTYPE_SYMLINK);
1154     error = vop_journal_operate_ap(&ap->a_head);
1155     if (error == 0) {
1156 	TAILQ_FOREACH(jrec, &jreclist.list, user_entry) {
1157 	    jrecord_write_cred(jrec, NULL, ap->a_cred);
1158 	    jrecord_write_path(jrec, JLEAF_PATH1, ap->a_nch->ncp);
1159 	    save = jrecord_push(jrec, JTYPE_REDO);
1160 	    jrecord_leaf(jrec, JLEAF_SYMLINKDATA,
1161 			ap->a_target, strlen(ap->a_target));
1162 	    jrecord_pop(jrec, save);
1163 	    if (*ap->a_vpp)
1164 		jrecord_write_vnode_ref(jrec, *ap->a_vpp);
1165 	}
1166     }
1167     jreclist_done(mp, &jreclist, error);
1168     return (error);
1169 }
1170 
1171 /*
1172  * Journal vop_nwhiteout { a_nch, a_cred, a_flags }
1173  */
1174 static
1175 int
1176 journal_nwhiteout(struct vop_nwhiteout_args *ap)
1177 {
1178     struct jrecord_list jreclist;
1179     struct jrecord jreccache;
1180     struct jrecord *jrec;
1181     struct mount *mp;
1182     int error;
1183 
1184     mp = ap->a_head.a_ops->head.vv_mount;
1185     jreclist_init(mp, &jreclist, &jreccache, JTYPE_WHITEOUT);
1186     error = vop_journal_operate_ap(&ap->a_head);
1187     if (error == 0) {
1188 	TAILQ_FOREACH(jrec, &jreclist.list, user_entry) {
1189 	    jrecord_write_cred(jrec, NULL, ap->a_cred);
1190 	    jrecord_write_path(jrec, JLEAF_PATH1, ap->a_nch->ncp);
1191 	}
1192     }
1193     jreclist_done(mp, &jreclist, error);
1194     return (error);
1195 }
1196 
1197 /*
1198  * Journal vop_nremove { a_nch, a_cred }
1199  */
1200 static
1201 int
1202 journal_nremove(struct vop_nremove_args *ap)
1203 {
1204     struct jrecord_list jreclist;
1205     struct jrecord jreccache;
1206     struct jrecord *jrec;
1207     struct mount *mp;
1208     int error;
1209 
1210     mp = ap->a_head.a_ops->head.vv_mount;
1211     if (jreclist_init(mp, &jreclist, &jreccache, JTYPE_REMOVE) &&
1212 	ap->a_nch->ncp->nc_vp
1213     ) {
1214 	jreclist_undo_file(&jreclist, ap->a_nch->ncp->nc_vp,
1215 			   JRUNDO_ALL|JRUNDO_GETVP|JRUNDO_CONDLINK, 0, -1);
1216     }
1217     error = vop_journal_operate_ap(&ap->a_head);
1218     if (error == 0) {
1219 	TAILQ_FOREACH(jrec, &jreclist.list, user_entry) {
1220 	    jrecord_write_cred(jrec, NULL, ap->a_cred);
1221 	    jrecord_write_path(jrec, JLEAF_PATH1, ap->a_nch->ncp);
1222 	}
1223     }
1224     jreclist_done(mp, &jreclist, error);
1225     return (error);
1226 }
1227 
1228 /*
1229  * Journal vop_nmkdir { a_nch, a_vpp, a_cred, a_vap }
1230  */
1231 static
1232 int
1233 journal_nmkdir(struct vop_nmkdir_args *ap)
1234 {
1235     struct jrecord_list jreclist;
1236     struct jrecord jreccache;
1237     struct jrecord *jrec;
1238     struct mount *mp;
1239     int error;
1240 
1241     mp = ap->a_head.a_ops->head.vv_mount;
1242     jreclist_init(mp, &jreclist, &jreccache, JTYPE_MKDIR);
1243     error = vop_journal_operate_ap(&ap->a_head);
1244     if (error == 0) {
1245 	TAILQ_FOREACH(jrec, &jreclist.list, user_entry) {
1246 #if 0
1247 	    if (jo->flags & MC_JOURNAL_WANT_AUDIT) {
1248 		jrecord_write_audit(jrec);
1249 	    }
1250 #endif
1251 	    jrecord_write_path(jrec, JLEAF_PATH1, ap->a_nch->ncp);
1252 	    jrecord_write_cred(jrec, NULL, ap->a_cred);
1253 	    jrecord_write_vattr(jrec, ap->a_vap);
1254 	    jrecord_write_path(jrec, JLEAF_PATH1, ap->a_nch->ncp);
1255 	    if (*ap->a_vpp)
1256 		jrecord_write_vnode_ref(jrec, *ap->a_vpp);
1257 	}
1258     }
1259     jreclist_done(mp, &jreclist, error);
1260     return (error);
1261 }
1262 
1263 /*
1264  * Journal vop_nrmdir { a_nch, a_cred }
1265  */
1266 static
1267 int
1268 journal_nrmdir(struct vop_nrmdir_args *ap)
1269 {
1270     struct jrecord_list jreclist;
1271     struct jrecord jreccache;
1272     struct jrecord *jrec;
1273     struct mount *mp;
1274     int error;
1275 
1276     mp = ap->a_head.a_ops->head.vv_mount;
1277     if (jreclist_init(mp, &jreclist, &jreccache, JTYPE_RMDIR)) {
1278 	jreclist_undo_file(&jreclist, ap->a_nch->ncp->nc_vp,
1279 			   JRUNDO_VATTR|JRUNDO_GETVP, 0, 0);
1280     }
1281     error = vop_journal_operate_ap(&ap->a_head);
1282     if (error == 0) {
1283 	TAILQ_FOREACH(jrec, &jreclist.list, user_entry) {
1284 	    jrecord_write_cred(jrec, NULL, ap->a_cred);
1285 	    jrecord_write_path(jrec, JLEAF_PATH1, ap->a_nch->ncp);
1286 	}
1287     }
1288     jreclist_done(mp, &jreclist, error);
1289     return (error);
1290 }
1291 
1292 /*
1293  * Journal vop_nrename { a_fnch, a_tnch, a_cred }
1294  */
1295 static
1296 int
1297 journal_nrename(struct vop_nrename_args *ap)
1298 {
1299     struct jrecord_list jreclist;
1300     struct jrecord jreccache;
1301     struct jrecord *jrec;
1302     struct mount *mp;
1303     int error;
1304 
1305     mp = ap->a_head.a_ops->head.vv_mount;
1306     if (jreclist_init(mp, &jreclist, &jreccache, JTYPE_RENAME) &&
1307 	ap->a_tnch->ncp->nc_vp
1308     ) {
1309 	jreclist_undo_file(&jreclist, ap->a_tnch->ncp->nc_vp,
1310 			   JRUNDO_ALL|JRUNDO_GETVP|JRUNDO_CONDLINK, 0, -1);
1311     }
1312     error = vop_journal_operate_ap(&ap->a_head);
1313     if (error == 0) {
1314 	TAILQ_FOREACH(jrec, &jreclist.list, user_entry) {
1315 	    jrecord_write_cred(jrec, NULL, ap->a_cred);
1316 	    jrecord_write_path(jrec, JLEAF_PATH1, ap->a_fnch->ncp);
1317 	    jrecord_write_path(jrec, JLEAF_PATH2, ap->a_tnch->ncp);
1318 	}
1319     }
1320     jreclist_done(mp, &jreclist, error);
1321     return (error);
1322 }
1323 
1324