xref: /dragonfly/sys/kern/vfs_jops.c (revision 092c2dd1)
1 /*
2  * Copyright (c) 2004-2006 The DragonFly Project.  All rights reserved.
3  *
4  * This code is derived from software contributed to The DragonFly Project
5  * by Matthew Dillon <dillon@backplane.com>
6  *
7  * Redistribution and use in source and binary forms, with or without
8  * modification, are permitted provided that the following conditions
9  * are met:
10  *
11  * 1. Redistributions of source code must retain the above copyright
12  *    notice, this list of conditions and the following disclaimer.
13  * 2. Redistributions in binary form must reproduce the above copyright
14  *    notice, this list of conditions and the following disclaimer in
15  *    the documentation and/or other materials provided with the
16  *    distribution.
17  * 3. Neither the name of The DragonFly Project nor the names of its
18  *    contributors may be used to endorse or promote products derived
19  *    from this software without specific, prior written permission.
20  *
21  * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
22  * ``AS IS'' AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
23  * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS
24  * FOR A PARTICULAR PURPOSE ARE DISCLAIMED.  IN NO EVENT SHALL THE
25  * COPYRIGHT HOLDERS OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT,
26  * INCIDENTAL, SPECIAL, EXEMPLARY OR CONSEQUENTIAL DAMAGES (INCLUDING,
27  * BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
28  * LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED
29  * AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
30  * OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT
31  * OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
32  * SUCH DAMAGE.
33  */
34 /*
35  * Each mount point may have zero or more independantly configured journals
36  * attached to it.  Each journal is represented by a memory FIFO and worker
37  * thread.  Journal events are streamed through the FIFO to the thread,
38  * batched up (typically on one-second intervals), and written out by the
39  * thread.
40  *
41  * Journal vnode ops are executed instead of mnt_vn_norm_ops when one or
42  * more journals have been installed on a mount point.  It becomes the
43  * responsibility of the journal op to call the underlying normal op as
44  * appropriate.
45  */
46 #include <sys/param.h>
47 #include <sys/systm.h>
48 #include <sys/buf.h>
49 #include <sys/conf.h>
50 #include <sys/kernel.h>
51 #include <sys/queue.h>
52 #include <sys/lock.h>
53 #include <sys/malloc.h>
54 #include <sys/mount.h>
55 #include <sys/unistd.h>
56 #include <sys/vnode.h>
57 #include <sys/poll.h>
58 #include <sys/mountctl.h>
59 #include <sys/journal.h>
60 #include <sys/file.h>
61 #include <sys/proc.h>
62 #include <sys/socket.h>
63 #include <sys/socketvar.h>
64 
65 #include <machine/limits.h>
66 
67 #include <vm/vm.h>
68 #include <vm/vm_object.h>
69 #include <vm/vm_page.h>
70 #include <vm/vm_pager.h>
71 #include <vm/vnode_pager.h>
72 
73 #include <sys/file2.h>
74 
75 static int journal_attach(struct mount *mp);
76 static void journal_detach(struct mount *mp);
77 static int journal_install_vfs_journal(struct mount *mp, struct file *fp,
78 			    const struct mountctl_install_journal *info);
79 static int journal_restart_vfs_journal(struct mount *mp, struct file *fp,
80 			    const struct mountctl_restart_journal *info);
81 static int journal_remove_vfs_journal(struct mount *mp,
82 			    const struct mountctl_remove_journal *info);
83 static int journal_restart(struct mount *mp, struct file *fp,
84 			    struct journal *jo, int flags);
85 static int journal_destroy(struct mount *mp, struct journal *jo, int flags);
86 static int journal_resync_vfs_journal(struct mount *mp, const void *ctl);
87 static int journal_status_vfs_journal(struct mount *mp,
88 		       const struct mountctl_status_journal *info,
89 		       struct mountctl_journal_ret_status *rstat,
90 		       int buflen, int *res);
91 
92 static void jrecord_undo_file(struct jrecord *jrec, struct vnode *vp,
93 			     int jrflags, off_t off, off_t bytes);
94 
95 static int journal_setattr(struct vop_setattr_args *ap);
96 static int journal_write(struct vop_write_args *ap);
97 static int journal_fsync(struct vop_fsync_args *ap);
98 static int journal_putpages(struct vop_putpages_args *ap);
99 static int journal_setacl(struct vop_setacl_args *ap);
100 static int journal_setextattr(struct vop_setextattr_args *ap);
101 static int journal_ncreate(struct vop_ncreate_args *ap);
102 static int journal_nmknod(struct vop_nmknod_args *ap);
103 static int journal_nlink(struct vop_nlink_args *ap);
104 static int journal_nsymlink(struct vop_nsymlink_args *ap);
105 static int journal_nwhiteout(struct vop_nwhiteout_args *ap);
106 static int journal_nremove(struct vop_nremove_args *ap);
107 static int journal_nmkdir(struct vop_nmkdir_args *ap);
108 static int journal_nrmdir(struct vop_nrmdir_args *ap);
109 static int journal_nrename(struct vop_nrename_args *ap);
110 
111 #define JRUNDO_SIZE	0x00000001
112 #define JRUNDO_UID	0x00000002
113 #define JRUNDO_GID	0x00000004
114 #define JRUNDO_FSID	0x00000008
115 #define JRUNDO_MODES	0x00000010
116 #define JRUNDO_INUM	0x00000020
117 #define JRUNDO_ATIME	0x00000040
118 #define JRUNDO_MTIME	0x00000080
119 #define JRUNDO_CTIME	0x00000100
120 #define JRUNDO_GEN	0x00000200
121 #define JRUNDO_FLAGS	0x00000400
122 #define JRUNDO_UDEV	0x00000800
123 #define JRUNDO_NLINK	0x00001000
124 #define JRUNDO_FILEDATA	0x00010000
125 #define JRUNDO_GETVP	0x00020000
126 #define JRUNDO_CONDLINK	0x00040000	/* write file data if link count 1 */
127 #define JRUNDO_VATTR	(JRUNDO_SIZE|JRUNDO_UID|JRUNDO_GID|JRUNDO_FSID|\
128 			 JRUNDO_MODES|JRUNDO_INUM|JRUNDO_ATIME|JRUNDO_MTIME|\
129 			 JRUNDO_CTIME|JRUNDO_GEN|JRUNDO_FLAGS|JRUNDO_UDEV|\
130 			 JRUNDO_NLINK)
131 #define JRUNDO_ALL	(JRUNDO_VATTR|JRUNDO_FILEDATA)
132 
133 static struct vop_ops journal_vnode_vops = {
134     .vop_default =	vop_journal_operate_ap,
135     .vop_mountctl =	journal_mountctl,
136     .vop_setattr =	journal_setattr,
137     .vop_write =	journal_write,
138     .vop_fsync =	journal_fsync,
139     .vop_putpages =	journal_putpages,
140     .vop_setacl =	journal_setacl,
141     .vop_setextattr =	journal_setextattr,
142     .vop_ncreate =	journal_ncreate,
143     .vop_nmknod =	journal_nmknod,
144     .vop_nlink =	journal_nlink,
145     .vop_nsymlink =	journal_nsymlink,
146     .vop_nwhiteout =	journal_nwhiteout,
147     .vop_nremove =	journal_nremove,
148     .vop_nmkdir =	journal_nmkdir,
149     .vop_nrmdir =	journal_nrmdir,
150     .vop_nrename =	journal_nrename
151 };
152 
153 int
154 journal_mountctl(struct vop_mountctl_args *ap)
155 {
156     struct mount *mp;
157     int error = 0;
158 
159     mp = ap->a_head.a_ops->head.vv_mount;
160     KKASSERT(mp);
161 
162     if (mp->mnt_vn_journal_ops == NULL) {
163 	switch(ap->a_op) {
164 	case MOUNTCTL_INSTALL_VFS_JOURNAL:
165 	    error = journal_attach(mp);
166 	    if (error == 0 && ap->a_ctllen != sizeof(struct mountctl_install_journal))
167 		error = EINVAL;
168 	    if (error == 0 && ap->a_fp == NULL)
169 		error = EBADF;
170 	    if (error == 0)
171 		error = journal_install_vfs_journal(mp, ap->a_fp, ap->a_ctl);
172 	    if (TAILQ_EMPTY(&mp->mnt_jlist))
173 		journal_detach(mp);
174 	    break;
175 	case MOUNTCTL_RESTART_VFS_JOURNAL:
176 	case MOUNTCTL_REMOVE_VFS_JOURNAL:
177 	case MOUNTCTL_RESYNC_VFS_JOURNAL:
178 	case MOUNTCTL_STATUS_VFS_JOURNAL:
179 	    error = ENOENT;
180 	    break;
181 	default:
182 	    error = EOPNOTSUPP;
183 	    break;
184 	}
185     } else {
186 	switch(ap->a_op) {
187 	case MOUNTCTL_INSTALL_VFS_JOURNAL:
188 	    if (ap->a_ctllen != sizeof(struct mountctl_install_journal))
189 		error = EINVAL;
190 	    if (error == 0 && ap->a_fp == NULL)
191 		error = EBADF;
192 	    if (error == 0)
193 		error = journal_install_vfs_journal(mp, ap->a_fp, ap->a_ctl);
194 	    break;
195 	case MOUNTCTL_RESTART_VFS_JOURNAL:
196 	    if (ap->a_ctllen != sizeof(struct mountctl_restart_journal))
197 		error = EINVAL;
198 	    if (error == 0 && ap->a_fp == NULL)
199 		error = EBADF;
200 	    if (error == 0)
201 		error = journal_restart_vfs_journal(mp, ap->a_fp, ap->a_ctl);
202 	    break;
203 	case MOUNTCTL_REMOVE_VFS_JOURNAL:
204 	    if (ap->a_ctllen != sizeof(struct mountctl_remove_journal))
205 		error = EINVAL;
206 	    if (error == 0)
207 		error = journal_remove_vfs_journal(mp, ap->a_ctl);
208 	    if (TAILQ_EMPTY(&mp->mnt_jlist))
209 		journal_detach(mp);
210 	    break;
211 	case MOUNTCTL_RESYNC_VFS_JOURNAL:
212 	    if (ap->a_ctllen != 0)
213 		error = EINVAL;
214 	    error = journal_resync_vfs_journal(mp, ap->a_ctl);
215 	    break;
216 	case MOUNTCTL_STATUS_VFS_JOURNAL:
217 	    if (ap->a_ctllen != sizeof(struct mountctl_status_journal))
218 		error = EINVAL;
219 	    if (error == 0) {
220 		error = journal_status_vfs_journal(mp, ap->a_ctl,
221 					ap->a_buf, ap->a_buflen, ap->a_res);
222 	    }
223 	    break;
224 	default:
225 	    error = EOPNOTSUPP;
226 	    break;
227 	}
228     }
229     return (error);
230 }
231 
232 /*
233  * High level mount point setup.  When a
234  */
235 static int
236 journal_attach(struct mount *mp)
237 {
238     KKASSERT(mp->mnt_jbitmap == NULL);
239     vfs_add_vnodeops(mp, &journal_vnode_vops, &mp->mnt_vn_journal_ops);
240     mp->mnt_jbitmap = kmalloc(JREC_STREAMID_JMAX/8, M_JOURNAL, M_WAITOK|M_ZERO);
241     mp->mnt_streamid = JREC_STREAMID_JMIN;
242     return(0);
243 }
244 
245 static void
246 journal_detach(struct mount *mp)
247 {
248     KKASSERT(mp->mnt_jbitmap != NULL);
249     if (mp->mnt_vn_journal_ops)
250 	vfs_rm_vnodeops(mp, &journal_vnode_vops, &mp->mnt_vn_journal_ops);
251     kfree(mp->mnt_jbitmap, M_JOURNAL);
252     mp->mnt_jbitmap = NULL;
253 }
254 
255 /*
256  * Install a journal on a mount point.  Each journal has an associated worker
257  * thread which is responsible for buffering and spooling the data to the
258  * target.  A mount point may have multiple journals attached to it.  An
259  * initial start record is generated when the journal is associated.
260  */
261 static int
262 journal_install_vfs_journal(struct mount *mp, struct file *fp,
263 			    const struct mountctl_install_journal *info)
264 {
265     struct journal *jo;
266     struct jrecord jrec;
267     int error = 0;
268     int size;
269 
270     jo = kmalloc(sizeof(struct journal), M_JOURNAL, M_WAITOK|M_ZERO);
271     bcopy(info->id, jo->id, sizeof(jo->id));
272     jo->flags = info->flags & ~(MC_JOURNAL_WACTIVE | MC_JOURNAL_RACTIVE |
273 				MC_JOURNAL_STOP_REQ);
274 
275     /*
276      * Memory FIFO size, round to nearest power of 2
277      */
278     if (info->membufsize) {
279 	if (info->membufsize < 65536)
280 	    size = 65536;
281 	else if (info->membufsize > 128 * 1024 * 1024)
282 	    size = 128 * 1024 * 1024;
283 	else
284 	    size = (int)info->membufsize;
285     } else {
286 	size = 1024 * 1024;
287     }
288     jo->fifo.size = 1;
289     while (jo->fifo.size < size)
290 	jo->fifo.size <<= 1;
291 
292     /*
293      * Other parameters.  If not specified the starting transaction id
294      * will be the current date.
295      */
296     if (info->transid) {
297 	jo->transid = info->transid;
298     } else {
299 	struct timespec ts;
300 	getnanotime(&ts);
301 	jo->transid = ((int64_t)ts.tv_sec << 30) | ts.tv_nsec;
302     }
303 
304     jo->fp = fp;
305 
306     /*
307      * Allocate the memory FIFO
308      */
309     jo->fifo.mask = jo->fifo.size - 1;
310     jo->fifo.membase = kmalloc(jo->fifo.size, M_JFIFO, M_WAITOK|M_ZERO|M_NULLOK);
311     if (jo->fifo.membase == NULL)
312 	error = ENOMEM;
313 
314     /*
315      * Create the worker threads and generate the association record.
316      */
317     if (error) {
318 	kfree(jo, M_JOURNAL);
319     } else {
320 	fhold(fp);
321 	journal_create_threads(jo);
322 	jrecord_init(jo, &jrec, JREC_STREAMID_DISCONT);
323 	jrecord_write(&jrec, JTYPE_ASSOCIATE, 0);
324 	jrecord_done(&jrec, 0);
325 	TAILQ_INSERT_TAIL(&mp->mnt_jlist, jo, jentry);
326     }
327     return(error);
328 }
329 
330 /*
331  * Restart a journal with a new descriptor.   The existing reader and writer
332  * threads are terminated and a new descriptor is associated with the
333  * journal.  The FIFO rindex is reset to xindex and the threads are then
334  * restarted.
335  */
336 static int
337 journal_restart_vfs_journal(struct mount *mp, struct file *fp,
338 			   const struct mountctl_restart_journal *info)
339 {
340     struct journal *jo;
341     int error;
342 
343     TAILQ_FOREACH(jo, &mp->mnt_jlist, jentry) {
344 	if (bcmp(jo->id, info->id, sizeof(jo->id)) == 0)
345 	    break;
346     }
347     if (jo)
348 	error = journal_restart(mp, fp, jo, info->flags);
349     else
350 	error = EINVAL;
351     return (error);
352 }
353 
354 static int
355 journal_restart(struct mount *mp, struct file *fp,
356 		struct journal *jo, int flags)
357 {
358     /*
359      * XXX lock the jo
360      */
361 
362 #if 0
363     /*
364      * Record the fact that we are doing a restart in the journal.
365      * XXX it isn't safe to do this if the journal is being restarted
366      * because it was locked up and the writer thread has already exited.
367      */
368     jrecord_init(jo, &jrec, JREC_STREAMID_RESTART);
369     jrecord_write(&jrec, JTYPE_DISASSOCIATE, 0);
370     jrecord_done(&jrec, 0);
371 #endif
372 
373     /*
374      * Stop the reader and writer threads and clean up the current
375      * descriptor.
376      */
377     kprintf("RESTART WITH FP %p KILLING %p\n", fp, jo->fp);
378     journal_destroy_threads(jo, flags);
379 
380     if (jo->fp)
381 	fdrop(jo->fp);
382 
383     /*
384      * Associate the new descriptor, reset the FIFO index, and recreate
385      * the threads.
386      */
387     fhold(fp);
388     jo->fp = fp;
389     jo->fifo.rindex = jo->fifo.xindex;
390     journal_create_threads(jo);
391 
392     return(0);
393 }
394 
395 /*
396  * Disassociate a journal from a mount point and terminate its worker thread.
397  * A final termination record is written out before the file pointer is
398  * dropped.
399  */
400 static int
401 journal_remove_vfs_journal(struct mount *mp,
402 			   const struct mountctl_remove_journal *info)
403 {
404     struct journal *jo;
405     int error;
406 
407     TAILQ_FOREACH(jo, &mp->mnt_jlist, jentry) {
408 	if (bcmp(jo->id, info->id, sizeof(jo->id)) == 0)
409 	    break;
410     }
411     if (jo)
412 	error = journal_destroy(mp, jo, info->flags);
413     else
414 	error = EINVAL;
415     return (error);
416 }
417 
418 /*
419  * Remove all journals associated with a mount point.  Usually called
420  * by the umount code.
421  */
422 void
423 journal_remove_all_journals(struct mount *mp, int flags)
424 {
425     struct journal *jo;
426 
427     while ((jo = TAILQ_FIRST(&mp->mnt_jlist)) != NULL) {
428 	journal_destroy(mp, jo, flags);
429     }
430 }
431 
432 static int
433 journal_destroy(struct mount *mp, struct journal *jo, int flags)
434 {
435     struct jrecord jrec;
436 
437     TAILQ_REMOVE(&mp->mnt_jlist, jo, jentry);
438 
439     jrecord_init(jo, &jrec, JREC_STREAMID_DISCONT);
440     jrecord_write(&jrec, JTYPE_DISASSOCIATE, 0);
441     jrecord_done(&jrec, 0);
442 
443     journal_destroy_threads(jo, flags);
444 
445     if (jo->fp)
446 	fdrop(jo->fp);
447     if (jo->fifo.membase)
448 	kfree(jo->fifo.membase, M_JFIFO);
449     kfree(jo, M_JOURNAL);
450 
451     return(0);
452 }
453 
454 static int
455 journal_resync_vfs_journal(struct mount *mp, const void *ctl)
456 {
457     return(EINVAL);
458 }
459 
460 static int
461 journal_status_vfs_journal(struct mount *mp,
462 		       const struct mountctl_status_journal *info,
463 		       struct mountctl_journal_ret_status *rstat,
464 		       int buflen, int *res)
465 {
466     struct journal *jo;
467     int error = 0;
468     int index;
469 
470     index = 0;
471     *res = 0;
472     TAILQ_FOREACH(jo, &mp->mnt_jlist, jentry) {
473 	if (info->index == MC_JOURNAL_INDEX_ID) {
474 	    if (bcmp(jo->id, info->id, sizeof(jo->id)) != 0)
475 		continue;
476 	} else if (info->index >= 0) {
477 	    if (info->index < index)
478 		continue;
479 	} else if (info->index != MC_JOURNAL_INDEX_ALL) {
480 	    continue;
481 	}
482 	if (buflen < sizeof(*rstat)) {
483 	    if (*res)
484 		rstat[-1].flags |= MC_JOURNAL_STATUS_MORETOCOME;
485 	    else
486 		error = EINVAL;
487 	    break;
488 	}
489 	bzero(rstat, sizeof(*rstat));
490 	rstat->recsize = sizeof(*rstat);
491 	bcopy(jo->id, rstat->id, sizeof(jo->id));
492 	rstat->index = index;
493 	rstat->membufsize = jo->fifo.size;
494 	rstat->membufused = jo->fifo.windex - jo->fifo.xindex;
495 	rstat->membufunacked = jo->fifo.rindex - jo->fifo.xindex;
496 	rstat->bytessent = jo->total_acked;
497 	rstat->fifostalls = jo->fifostalls;
498 	++rstat;
499 	++index;
500 	*res += sizeof(*rstat);
501 	buflen -= sizeof(*rstat);
502     }
503     return(error);
504 }
505 
506 /************************************************************************
507  *			PARALLEL TRANSACTION SUPPORT ROUTINES		*
508  ************************************************************************
509  *
510  * JRECLIST_*() - routines which create and iterate over jrecord structures,
511  *		  because a mount point may have multiple attached journals.
512  */
513 
514 /*
515  * Initialize the passed jrecord_list and create a jrecord for each
516  * journal we need to write to.  Unnecessary mallocs are avoided by
517  * using the passed jrecord structure as the first jrecord in the list.
518  * A starting transaction is pushed for each jrecord.
519  *
520  * Returns non-zero if any of the journals require undo records.
521  */
522 static
523 int
524 jreclist_init(struct mount *mp, struct jrecord_list *jreclist,
525 	      struct jrecord *jreccache, int16_t rectype)
526 {
527     struct journal *jo;
528     struct jrecord *jrec;
529     int wantrev;
530     int count;
531     int16_t streamid;
532 
533     TAILQ_INIT(&jreclist->list);
534 
535     /*
536      * Select the stream ID to use for the transaction.  We must select
537      * a stream ID that is not currently in use by some other parallel
538      * transaction.
539      *
540      * Don't bother calculating the next streamid when reassigning
541      * mnt_streamid, since parallel transactions are fairly rare.  This
542      * also allows someone observing the raw records to clearly see
543      * when parallel transactions occur.
544      */
545     streamid = mp->mnt_streamid;
546     count = 0;
547     while (mp->mnt_jbitmap[streamid >> 3] & (1 << (streamid & 7))) {
548 	if (++streamid == JREC_STREAMID_JMAX)
549 		streamid = JREC_STREAMID_JMIN;
550 	if (++count == JREC_STREAMID_JMAX - JREC_STREAMID_JMIN) {
551 		kprintf("jreclist_init: all streamid's in use! sleeping\n");
552 		tsleep(jreclist, 0, "jsidfl", hz * 10);
553 		count = 0;
554 	}
555     }
556     mp->mnt_jbitmap[streamid >> 3] |= 1 << (streamid & 7);
557     mp->mnt_streamid = streamid;
558     jreclist->streamid = streamid;
559 
560     /*
561      * Now initialize a stream on each journal.
562      */
563     count = 0;
564     wantrev = 0;
565     TAILQ_FOREACH(jo, &mp->mnt_jlist, jentry) {
566 	if (count == 0)
567 	    jrec = jreccache;
568 	else
569 	    jrec = kmalloc(sizeof(*jrec), M_JOURNAL, M_WAITOK);
570 	jrecord_init(jo, jrec, streamid);
571 	jrec->user_save = jrecord_push(jrec, rectype);
572 	TAILQ_INSERT_TAIL(&jreclist->list, jrec, user_entry);
573 	if (jo->flags & MC_JOURNAL_WANT_REVERSABLE)
574 	    wantrev = 1;
575 	++count;
576     }
577     return(wantrev);
578 }
579 
580 /*
581  * Terminate the journaled transactions started by jreclist_init().  If
582  * an error occured, the transaction records will be aborted.
583  */
584 static
585 void
586 jreclist_done(struct mount *mp, struct jrecord_list *jreclist, int error)
587 {
588     struct jrecord *jrec;
589     int count;
590 
591     /*
592      * Cleanup the jrecord state on each journal.
593      */
594     TAILQ_FOREACH(jrec, &jreclist->list, user_entry) {
595 	jrecord_pop(jrec, jrec->user_save);
596 	jrecord_done(jrec, error);
597     }
598 
599     /*
600      * Free allocated jrec's (the first is always supplied)
601      */
602     count = 0;
603     while ((jrec = TAILQ_FIRST(&jreclist->list)) != NULL) {
604 	TAILQ_REMOVE(&jreclist->list, jrec, user_entry);
605 	if (count)
606 	    kfree(jrec, M_JOURNAL);
607 	++count;
608     }
609 
610     /*
611      * Clear the streamid so it can be reused.
612      */
613     mp->mnt_jbitmap[jreclist->streamid >> 3] &= ~(1 << (jreclist->streamid & 7));
614 }
615 
616 /*
617  * This procedure writes out UNDO records for available reversable
618  * journals.
619  *
620  * XXX could use improvement.  There is no need to re-read the file
621  * for each journal.
622  */
623 static
624 void
625 jreclist_undo_file(struct jrecord_list *jreclist, struct vnode *vp,
626 		   int jrflags, off_t off, off_t bytes)
627 {
628     struct jrecord *jrec;
629     int error;
630 
631     error = 0;
632     if (jrflags & JRUNDO_GETVP)
633 	error = vget(vp, LK_SHARED);
634     if (error == 0) {
635 	TAILQ_FOREACH(jrec, &jreclist->list, user_entry) {
636 	    if (jrec->jo->flags & MC_JOURNAL_WANT_REVERSABLE) {
637 		jrecord_undo_file(jrec, vp, jrflags, off, bytes);
638 	    }
639 	}
640     }
641     if (error == 0 && jrflags & JRUNDO_GETVP)
642 	vput(vp);
643 }
644 
645 /************************************************************************
646  *			LOW LEVEL UNDO SUPPORT ROUTINE			*
647  ************************************************************************
648  *
649  * This function is used to support UNDO records.  It will generate an
650  * appropriate record with the requested portion of the file data.  Note
651  * that file data is only recorded if JRUNDO_FILEDATA is passed.  If bytes
652  * is -1, it will be set to the size of the file.
653  */
654 static void
655 jrecord_undo_file(struct jrecord *jrec, struct vnode *vp, int jrflags,
656 		  off_t off, off_t bytes)
657 {
658     struct vattr attr;
659     void *save1; /* warning, save pointers do not always remain valid */
660     void *save2;
661     int error;
662 
663     /*
664      * Setup.  Start the UNDO record, obtain a shared lock on the vnode,
665      * and retrieve attribute info.
666      */
667     save1 = jrecord_push(jrec, JTYPE_UNDO);
668     error = VOP_GETATTR(vp, &attr);
669     if (error)
670 	goto done;
671 
672     /*
673      * Generate UNDO records as requested.
674      */
675     if (jrflags & JRUNDO_VATTR) {
676 	save2 = jrecord_push(jrec, JTYPE_VATTR);
677 	jrecord_leaf(jrec, JLEAF_VTYPE, &attr.va_type, sizeof(attr.va_type));
678 	if ((jrflags & JRUNDO_NLINK) && attr.va_nlink != VNOVAL)
679 	    jrecord_leaf(jrec, JLEAF_NLINK, &attr.va_nlink, sizeof(attr.va_nlink));
680 	if ((jrflags & JRUNDO_SIZE) && attr.va_size != VNOVAL)
681 	    jrecord_leaf(jrec, JLEAF_SIZE, &attr.va_size, sizeof(attr.va_size));
682 	if ((jrflags & JRUNDO_UID) && attr.va_uid != VNOVAL)
683 	    jrecord_leaf(jrec, JLEAF_UID, &attr.va_uid, sizeof(attr.va_uid));
684 	if ((jrflags & JRUNDO_GID) && attr.va_gid != VNOVAL)
685 	    jrecord_leaf(jrec, JLEAF_GID, &attr.va_gid, sizeof(attr.va_gid));
686 	if ((jrflags & JRUNDO_FSID) && attr.va_fsid != VNOVAL)
687 	    jrecord_leaf(jrec, JLEAF_FSID, &attr.va_fsid, sizeof(attr.va_fsid));
688 	if ((jrflags & JRUNDO_MODES) && attr.va_mode != (mode_t)VNOVAL)
689 	    jrecord_leaf(jrec, JLEAF_MODES, &attr.va_mode, sizeof(attr.va_mode));
690 	if ((jrflags & JRUNDO_INUM) && attr.va_fileid != VNOVAL)
691 	    jrecord_leaf(jrec, JLEAF_INUM, &attr.va_fileid, sizeof(attr.va_fileid));
692 	if ((jrflags & JRUNDO_ATIME) && attr.va_atime.tv_sec != VNOVAL)
693 	    jrecord_leaf(jrec, JLEAF_ATIME, &attr.va_atime, sizeof(attr.va_atime));
694 	if ((jrflags & JRUNDO_MTIME) && attr.va_mtime.tv_sec != VNOVAL)
695 	    jrecord_leaf(jrec, JLEAF_MTIME, &attr.va_mtime, sizeof(attr.va_mtime));
696 	if ((jrflags & JRUNDO_CTIME) && attr.va_ctime.tv_sec != VNOVAL)
697 	    jrecord_leaf(jrec, JLEAF_CTIME, &attr.va_ctime, sizeof(attr.va_ctime));
698 	if ((jrflags & JRUNDO_GEN) && attr.va_gen != VNOVAL)
699 	    jrecord_leaf(jrec, JLEAF_GEN, &attr.va_gen, sizeof(attr.va_gen));
700 	if ((jrflags & JRUNDO_FLAGS) && attr.va_flags != VNOVAL)
701 	    jrecord_leaf(jrec, JLEAF_FLAGS, &attr.va_flags, sizeof(attr.va_flags));
702 	if ((jrflags & JRUNDO_UDEV) && attr.va_rmajor != VNOVAL) {
703 	    udev_t rdev = makeudev(attr.va_rmajor, attr.va_rminor);
704 	    jrecord_leaf(jrec, JLEAF_UDEV, &rdev, sizeof(rdev));
705 	    jrecord_leaf(jrec, JLEAF_UMAJOR, &attr.va_rmajor, sizeof(attr.va_rmajor));
706 	    jrecord_leaf(jrec, JLEAF_UMINOR, &attr.va_rminor, sizeof(attr.va_rminor));
707 	}
708 	jrecord_pop(jrec, save2);
709     }
710 
711     /*
712      * Output the file data being overwritten by reading the file and
713      * writing it out to the journal prior to the write operation.  We
714      * do not need to write out data past the current file EOF.
715      *
716      * XXX support JRUNDO_CONDLINK - do not write out file data for files
717      * with a link count > 1.  The undo code needs to locate the inode and
718      * regenerate the hardlink.
719      */
720     if ((jrflags & JRUNDO_FILEDATA) && attr.va_type == VREG) {
721 	if (attr.va_size != VNOVAL) {
722 	    if (bytes == -1)
723 		bytes = attr.va_size - off;
724 	    if (off + bytes > attr.va_size)
725 		bytes = attr.va_size - off;
726 	    if (bytes > 0)
727 		jrecord_file_data(jrec, vp, off, bytes);
728 	} else {
729 	    error = EINVAL;
730 	}
731     }
732     if ((jrflags & JRUNDO_FILEDATA) && attr.va_type == VLNK) {
733 	struct iovec aiov;
734 	struct uio auio;
735 	char *buf;
736 
737 	buf = kmalloc(PATH_MAX, M_JOURNAL, M_WAITOK);
738 	aiov.iov_base = buf;
739 	aiov.iov_len = PATH_MAX;
740 	auio.uio_iov = &aiov;
741 	auio.uio_iovcnt = 1;
742 	auio.uio_offset = 0;
743 	auio.uio_rw = UIO_READ;
744 	auio.uio_segflg = UIO_SYSSPACE;
745 	auio.uio_td = curthread;
746 	auio.uio_resid = PATH_MAX;
747 	error = VOP_READLINK(vp, &auio, proc0.p_ucred);
748 	if (error == 0) {
749 		jrecord_leaf(jrec, JLEAF_SYMLINKDATA, buf,
750 				PATH_MAX - auio.uio_resid);
751 	}
752 	kfree(buf, M_JOURNAL);
753     }
754 done:
755     if (error)
756 	jrecord_leaf(jrec, JLEAF_ERROR, &error, sizeof(error));
757     jrecord_pop(jrec, save1);
758 }
759 
760 /************************************************************************
761  *			JOURNAL VNOPS					*
762  ************************************************************************
763  *
764  * These are function shims replacing the normal filesystem ops.  We become
765  * responsible for calling the underlying filesystem ops.  We have the choice
766  * of executing the underlying op first and then generating the journal entry,
767  * or starting the journal entry, executing the underlying op, and then
768  * either completing or aborting it.
769  *
770  * The journal is supposed to be a high-level entity, which generally means
771  * identifying files by name rather then by inode.  Supplying both allows
772  * the journal to be used both for inode-number-compatible 'mirrors' and
773  * for simple filesystem replication.
774  *
775  * Writes are particularly difficult to deal with because a single write may
776  * represent a hundred megabyte buffer or more, and both writes and truncations
777  * require the 'old' data to be written out as well as the new data if the
778  * log is reversable.  Other issues:
779  *
780  * - How to deal with operations on unlinked files (no path available),
781  *   but which may still be filesystem visible due to hard links.
782  *
783  * - How to deal with modifications made via a memory map.
784  *
785  * - Future cache coherency support will require cache coherency API calls
786  *   both prior to and after the call to the underlying VFS.
787  *
788  * ALSO NOTE: We do not have to shim compatibility VOPs like MKDIR which have
789  * new VFS equivalents (NMKDIR).
790  */
791 
792 /*
793  * Journal vop_setattr { a_vp, a_vap, a_cred }
794  */
795 static
796 int
797 journal_setattr(struct vop_setattr_args *ap)
798 {
799     struct jrecord_list jreclist;
800     struct jrecord jreccache;
801     struct jrecord *jrec;
802     struct mount *mp;
803     void *save;
804     int error;
805 
806     mp = ap->a_head.a_ops->head.vv_mount;
807     if (jreclist_init(mp, &jreclist, &jreccache, JTYPE_SETATTR)) {
808 	jreclist_undo_file(&jreclist, ap->a_vp, JRUNDO_VATTR, 0, 0);
809     }
810     error = vop_journal_operate_ap(&ap->a_head);
811     if (error == 0) {
812 	TAILQ_FOREACH(jrec, &jreclist.list, user_entry) {
813 	    jrecord_write_cred(jrec, curthread, ap->a_cred);
814 	    jrecord_write_vnode_ref(jrec, ap->a_vp);
815 	    save = jrecord_push(jrec, JTYPE_REDO);
816 	    jrecord_write_vattr(jrec, ap->a_vap);
817 	    jrecord_pop(jrec, save);
818 	}
819     }
820     jreclist_done(mp, &jreclist, error);
821     return (error);
822 }
823 
824 /*
825  * Journal vop_write { a_vp, a_uio, a_ioflag, a_cred }
826  */
827 static
828 int
829 journal_write(struct vop_write_args *ap)
830 {
831     struct jrecord_list jreclist;
832     struct jrecord jreccache;
833     struct jrecord *jrec;
834     struct mount *mp;
835     struct uio uio_copy;
836     struct iovec uio_one_iovec;
837     void *save;
838     int error;
839 
840     /*
841      * Special synchronizing writes for VM backing store do not supply any
842      * real data
843      */
844     if (ap->a_uio->uio_segflg == UIO_NOCOPY) {
845 	    error = vop_journal_operate_ap(&ap->a_head);
846 	    return (error);
847     }
848 
849     /*
850      * This is really nasty.  UIO's don't retain sufficient information to
851      * be reusable once they've gone through the VOP chain.  The iovecs get
852      * cleared, so we have to copy the UIO.
853      *
854      * XXX fix the UIO code to not destroy iov's during a scan so we can
855      *     reuse the uio over and over again.
856      *
857      * XXX UNDO code needs to journal the old data prior to the write.
858      */
859     uio_copy = *ap->a_uio;
860     if (uio_copy.uio_iovcnt == 1) {
861 	uio_one_iovec = ap->a_uio->uio_iov[0];
862 	uio_copy.uio_iov = &uio_one_iovec;
863     } else {
864 	uio_copy.uio_iov = kmalloc(uio_copy.uio_iovcnt * sizeof(struct iovec),
865 				    M_JOURNAL, M_WAITOK);
866 	bcopy(ap->a_uio->uio_iov, uio_copy.uio_iov,
867 		uio_copy.uio_iovcnt * sizeof(struct iovec));
868     }
869 
870     /*
871      * Write out undo data.  Note that uio_offset is incorrect if
872      * IO_APPEND is set, but fortunately we have no undo file data to
873      * write out in that case.
874      */
875     mp = ap->a_head.a_ops->head.vv_mount;
876     if (jreclist_init(mp, &jreclist, &jreccache, JTYPE_WRITE)) {
877 	if (ap->a_ioflag & IO_APPEND) {
878 	    jreclist_undo_file(&jreclist, ap->a_vp, JRUNDO_SIZE|JRUNDO_MTIME, 0, 0);
879 	} else {
880 	    jreclist_undo_file(&jreclist, ap->a_vp,
881 			       JRUNDO_FILEDATA|JRUNDO_SIZE|JRUNDO_MTIME,
882 			       uio_copy.uio_offset, uio_copy.uio_resid);
883 	}
884     }
885     error = vop_journal_operate_ap(&ap->a_head);
886 
887     /*
888      * XXX bad hack to figure out the offset for O_APPEND writes (note:
889      * uio field state after the VFS operation).
890      */
891     uio_copy.uio_offset = ap->a_uio->uio_offset -
892 			  (uio_copy.uio_resid - ap->a_uio->uio_resid);
893 
894     /*
895      * Output the write data to the journal.
896      */
897     if (error == 0) {
898 	TAILQ_FOREACH(jrec, &jreclist.list, user_entry) {
899 	    jrecord_write_cred(jrec, NULL, ap->a_cred);
900 	    jrecord_write_vnode_ref(jrec, ap->a_vp);
901 	    save = jrecord_push(jrec, JTYPE_REDO);
902 	    jrecord_write_uio(jrec, JLEAF_FILEDATA, &uio_copy);
903 	    jrecord_pop(jrec, save);
904 	}
905     }
906     jreclist_done(mp, &jreclist, error);
907 
908     if (uio_copy.uio_iov != &uio_one_iovec)
909 	kfree(uio_copy.uio_iov, M_JOURNAL);
910     return (error);
911 }
912 
913 /*
914  * Journal vop_fsync { a_vp, a_waitfor }
915  */
916 static
917 int
918 journal_fsync(struct vop_fsync_args *ap)
919 {
920 #if 0
921     struct mount *mp;
922     struct journal *jo;
923 #endif
924     int error;
925 
926     error = vop_journal_operate_ap(&ap->a_head);
927 #if 0
928     mp = ap->a_head.a_ops->head.vv_mount;
929     if (error == 0) {
930 	TAILQ_FOREACH(jo, &mp->mnt_jlist, jentry) {
931 	    /* XXX synchronize pending journal records */
932 	}
933     }
934 #endif
935     return (error);
936 }
937 
938 /*
939  * Journal vop_putpages { a_vp, a_m, a_count, a_sync, a_rtvals, a_offset }
940  *
941  * note: a_count is in bytes.
942  */
943 static
944 int
945 journal_putpages(struct vop_putpages_args *ap)
946 {
947     struct jrecord_list jreclist;
948     struct jrecord jreccache;
949     struct jrecord *jrec;
950     struct mount *mp;
951     void *save;
952     int error;
953 
954     mp = ap->a_head.a_ops->head.vv_mount;
955     if (jreclist_init(mp, &jreclist, &jreccache, JTYPE_PUTPAGES) &&
956 	ap->a_count > 0
957     ) {
958 	jreclist_undo_file(&jreclist, ap->a_vp,
959 			   JRUNDO_FILEDATA|JRUNDO_SIZE|JRUNDO_MTIME,
960 			   ap->a_offset, btoc(ap->a_count));
961     }
962     error = vop_journal_operate_ap(&ap->a_head);
963     if (error == 0 && ap->a_count > 0) {
964 	TAILQ_FOREACH(jrec, &jreclist.list, user_entry) {
965 	    jrecord_write_vnode_ref(jrec, ap->a_vp);
966 	    save = jrecord_push(jrec, JTYPE_REDO);
967 	    jrecord_write_pagelist(jrec, JLEAF_FILEDATA, ap->a_m, ap->a_rtvals,
968 				   btoc(ap->a_count), ap->a_offset);
969 	    jrecord_pop(jrec, save);
970 	}
971     }
972     jreclist_done(mp, &jreclist, error);
973     return (error);
974 }
975 
976 /*
977  * Journal vop_setacl { a_vp, a_type, a_aclp, a_cred }
978  */
979 static
980 int
981 journal_setacl(struct vop_setacl_args *ap)
982 {
983     struct jrecord_list jreclist;
984     struct jrecord jreccache;
985     struct jrecord *jrec;
986     struct mount *mp;
987     int error;
988 
989     mp = ap->a_head.a_ops->head.vv_mount;
990     jreclist_init(mp, &jreclist, &jreccache, JTYPE_SETACL);
991     error = vop_journal_operate_ap(&ap->a_head);
992     if (error == 0) {
993 	TAILQ_FOREACH(jrec, &jreclist.list, user_entry) {
994 #if 0
995 	    if ((jo->flags & MC_JOURNAL_WANT_REVERSABLE))
996 		jrecord_undo_file(jrec, ap->a_vp, JRUNDO_XXX, 0, 0);
997 #endif
998 	    jrecord_write_cred(jrec, curthread, ap->a_cred);
999 	    jrecord_write_vnode_ref(jrec, ap->a_vp);
1000 #if 0
1001 	    save = jrecord_push(jrec, JTYPE_REDO);
1002 	    /* XXX type, aclp */
1003 	    jrecord_pop(jrec, save);
1004 #endif
1005 	}
1006     }
1007     jreclist_done(mp, &jreclist, error);
1008     return (error);
1009 }
1010 
1011 /*
1012  * Journal vop_setextattr { a_vp, a_name, a_uio, a_cred }
1013  */
1014 static
1015 int
1016 journal_setextattr(struct vop_setextattr_args *ap)
1017 {
1018     struct jrecord_list jreclist;
1019     struct jrecord jreccache;
1020     struct jrecord *jrec;
1021     struct mount *mp;
1022     void *save;
1023     int error;
1024 
1025     mp = ap->a_head.a_ops->head.vv_mount;
1026     jreclist_init(mp, &jreclist, &jreccache, JTYPE_SETEXTATTR);
1027     error = vop_journal_operate_ap(&ap->a_head);
1028     if (error == 0) {
1029 	TAILQ_FOREACH(jrec, &jreclist.list, user_entry) {
1030 #if 0
1031 	    if ((jo->flags & MC_JOURNAL_WANT_REVERSABLE))
1032 		jrecord_undo_file(jrec, ap->a_vp, JRUNDO_XXX, 0, 0);
1033 #endif
1034 	    jrecord_write_cred(jrec, curthread, ap->a_cred);
1035 	    jrecord_write_vnode_ref(jrec, ap->a_vp);
1036 	    jrecord_leaf(jrec, JLEAF_ATTRNAME, ap->a_attrname,
1037 			strlen(ap->a_attrname));
1038 	    save = jrecord_push(jrec, JTYPE_REDO);
1039 	    jrecord_write_uio(jrec, JLEAF_FILEDATA, ap->a_uio);
1040 	    jrecord_pop(jrec, save);
1041 	}
1042     }
1043     jreclist_done(mp, &jreclist, error);
1044     return (error);
1045 }
1046 
1047 /*
1048  * Journal vop_ncreate { a_nch, a_vpp, a_cred, a_vap }
1049  */
1050 static
1051 int
1052 journal_ncreate(struct vop_ncreate_args *ap)
1053 {
1054     struct jrecord_list jreclist;
1055     struct jrecord jreccache;
1056     struct jrecord *jrec;
1057     struct mount *mp;
1058     void *save;
1059     int error;
1060 
1061     mp = ap->a_head.a_ops->head.vv_mount;
1062     jreclist_init(mp, &jreclist, &jreccache, JTYPE_CREATE);
1063     error = vop_journal_operate_ap(&ap->a_head);
1064     if (error == 0) {
1065 	TAILQ_FOREACH(jrec, &jreclist.list, user_entry) {
1066 	    jrecord_write_cred(jrec, NULL, ap->a_cred);
1067 	    jrecord_write_path(jrec, JLEAF_PATH1, ap->a_nch->ncp);
1068 	    if (*ap->a_vpp)
1069 		jrecord_write_vnode_ref(jrec, *ap->a_vpp);
1070 	    save = jrecord_push(jrec, JTYPE_REDO);
1071 	    jrecord_write_vattr(jrec, ap->a_vap);
1072 	    jrecord_pop(jrec, save);
1073 	}
1074     }
1075     jreclist_done(mp, &jreclist, error);
1076     return (error);
1077 }
1078 
1079 /*
1080  * Journal vop_nmknod { a_nch, a_vpp, a_cred, a_vap }
1081  */
1082 static
1083 int
1084 journal_nmknod(struct vop_nmknod_args *ap)
1085 {
1086     struct jrecord_list jreclist;
1087     struct jrecord jreccache;
1088     struct jrecord *jrec;
1089     struct mount *mp;
1090     void *save;
1091     int error;
1092 
1093     mp = ap->a_head.a_ops->head.vv_mount;
1094     jreclist_init(mp, &jreclist, &jreccache, JTYPE_MKNOD);
1095     error = vop_journal_operate_ap(&ap->a_head);
1096     if (error == 0) {
1097 	TAILQ_FOREACH(jrec, &jreclist.list, user_entry) {
1098 	    jrecord_write_cred(jrec, NULL, ap->a_cred);
1099 	    jrecord_write_path(jrec, JLEAF_PATH1, ap->a_nch->ncp);
1100 	    save = jrecord_push(jrec, JTYPE_REDO);
1101 	    jrecord_write_vattr(jrec, ap->a_vap);
1102 	    jrecord_pop(jrec, save);
1103 	    if (*ap->a_vpp)
1104 		jrecord_write_vnode_ref(jrec, *ap->a_vpp);
1105 	}
1106     }
1107     jreclist_done(mp, &jreclist, error);
1108     return (error);
1109 }
1110 
1111 /*
1112  * Journal vop_nlink { a_nch, a_vp, a_cred }
1113  */
1114 static
1115 int
1116 journal_nlink(struct vop_nlink_args *ap)
1117 {
1118     struct jrecord_list jreclist;
1119     struct jrecord jreccache;
1120     struct jrecord *jrec;
1121     struct mount *mp;
1122     void *save;
1123     int error;
1124 
1125     mp = ap->a_head.a_ops->head.vv_mount;
1126     jreclist_init(mp, &jreclist, &jreccache, JTYPE_LINK);
1127     error = vop_journal_operate_ap(&ap->a_head);
1128     if (error == 0) {
1129 	TAILQ_FOREACH(jrec, &jreclist.list, user_entry) {
1130 	    jrecord_write_cred(jrec, NULL, ap->a_cred);
1131 	    jrecord_write_path(jrec, JLEAF_PATH1, ap->a_nch->ncp);
1132 	    /* XXX PATH to VP and inode number */
1133 	    /* XXX this call may not record the correct path when
1134 	     * multiple paths are available */
1135 	    save = jrecord_push(jrec, JTYPE_REDO);
1136 	    jrecord_write_vnode_link(jrec, ap->a_vp, ap->a_nch->ncp);
1137 	    jrecord_pop(jrec, save);
1138 	}
1139     }
1140     jreclist_done(mp, &jreclist, error);
1141     return (error);
1142 }
1143 
1144 /*
1145  * Journal vop_symlink { a_nch, a_vpp, a_cred, a_vap, a_target }
1146  */
1147 static
1148 int
1149 journal_nsymlink(struct vop_nsymlink_args *ap)
1150 {
1151     struct jrecord_list jreclist;
1152     struct jrecord jreccache;
1153     struct jrecord *jrec;
1154     struct mount *mp;
1155     void *save;
1156     int error;
1157 
1158     mp = ap->a_head.a_ops->head.vv_mount;
1159     jreclist_init(mp, &jreclist, &jreccache, JTYPE_SYMLINK);
1160     error = vop_journal_operate_ap(&ap->a_head);
1161     if (error == 0) {
1162 	TAILQ_FOREACH(jrec, &jreclist.list, user_entry) {
1163 	    jrecord_write_cred(jrec, NULL, ap->a_cred);
1164 	    jrecord_write_path(jrec, JLEAF_PATH1, ap->a_nch->ncp);
1165 	    save = jrecord_push(jrec, JTYPE_REDO);
1166 	    jrecord_leaf(jrec, JLEAF_SYMLINKDATA,
1167 			ap->a_target, strlen(ap->a_target));
1168 	    jrecord_pop(jrec, save);
1169 	    if (*ap->a_vpp)
1170 		jrecord_write_vnode_ref(jrec, *ap->a_vpp);
1171 	}
1172     }
1173     jreclist_done(mp, &jreclist, error);
1174     return (error);
1175 }
1176 
1177 /*
1178  * Journal vop_nwhiteout { a_nch, a_cred, a_flags }
1179  */
1180 static
1181 int
1182 journal_nwhiteout(struct vop_nwhiteout_args *ap)
1183 {
1184     struct jrecord_list jreclist;
1185     struct jrecord jreccache;
1186     struct jrecord *jrec;
1187     struct mount *mp;
1188     int error;
1189 
1190     mp = ap->a_head.a_ops->head.vv_mount;
1191     jreclist_init(mp, &jreclist, &jreccache, JTYPE_WHITEOUT);
1192     error = vop_journal_operate_ap(&ap->a_head);
1193     if (error == 0) {
1194 	TAILQ_FOREACH(jrec, &jreclist.list, user_entry) {
1195 	    jrecord_write_cred(jrec, NULL, ap->a_cred);
1196 	    jrecord_write_path(jrec, JLEAF_PATH1, ap->a_nch->ncp);
1197 	}
1198     }
1199     jreclist_done(mp, &jreclist, error);
1200     return (error);
1201 }
1202 
1203 /*
1204  * Journal vop_nremove { a_nch, a_cred }
1205  */
1206 static
1207 int
1208 journal_nremove(struct vop_nremove_args *ap)
1209 {
1210     struct jrecord_list jreclist;
1211     struct jrecord jreccache;
1212     struct jrecord *jrec;
1213     struct mount *mp;
1214     int error;
1215 
1216     mp = ap->a_head.a_ops->head.vv_mount;
1217     if (jreclist_init(mp, &jreclist, &jreccache, JTYPE_REMOVE) &&
1218 	ap->a_nch->ncp->nc_vp
1219     ) {
1220 	jreclist_undo_file(&jreclist, ap->a_nch->ncp->nc_vp,
1221 			   JRUNDO_ALL|JRUNDO_GETVP|JRUNDO_CONDLINK, 0, -1);
1222     }
1223     error = vop_journal_operate_ap(&ap->a_head);
1224     if (error == 0) {
1225 	TAILQ_FOREACH(jrec, &jreclist.list, user_entry) {
1226 	    jrecord_write_cred(jrec, NULL, ap->a_cred);
1227 	    jrecord_write_path(jrec, JLEAF_PATH1, ap->a_nch->ncp);
1228 	}
1229     }
1230     jreclist_done(mp, &jreclist, error);
1231     return (error);
1232 }
1233 
1234 /*
1235  * Journal vop_nmkdir { a_nch, a_vpp, a_cred, a_vap }
1236  */
1237 static
1238 int
1239 journal_nmkdir(struct vop_nmkdir_args *ap)
1240 {
1241     struct jrecord_list jreclist;
1242     struct jrecord jreccache;
1243     struct jrecord *jrec;
1244     struct mount *mp;
1245     int error;
1246 
1247     mp = ap->a_head.a_ops->head.vv_mount;
1248     jreclist_init(mp, &jreclist, &jreccache, JTYPE_MKDIR);
1249     error = vop_journal_operate_ap(&ap->a_head);
1250     if (error == 0) {
1251 	TAILQ_FOREACH(jrec, &jreclist.list, user_entry) {
1252 #if 0
1253 	    if (jo->flags & MC_JOURNAL_WANT_AUDIT) {
1254 		jrecord_write_audit(jrec);
1255 	    }
1256 #endif
1257 	    jrecord_write_path(jrec, JLEAF_PATH1, ap->a_nch->ncp);
1258 	    jrecord_write_cred(jrec, NULL, ap->a_cred);
1259 	    jrecord_write_vattr(jrec, ap->a_vap);
1260 	    jrecord_write_path(jrec, JLEAF_PATH1, ap->a_nch->ncp);
1261 	    if (*ap->a_vpp)
1262 		jrecord_write_vnode_ref(jrec, *ap->a_vpp);
1263 	}
1264     }
1265     jreclist_done(mp, &jreclist, error);
1266     return (error);
1267 }
1268 
1269 /*
1270  * Journal vop_nrmdir { a_nch, a_cred }
1271  */
1272 static
1273 int
1274 journal_nrmdir(struct vop_nrmdir_args *ap)
1275 {
1276     struct jrecord_list jreclist;
1277     struct jrecord jreccache;
1278     struct jrecord *jrec;
1279     struct mount *mp;
1280     int error;
1281 
1282     mp = ap->a_head.a_ops->head.vv_mount;
1283     if (jreclist_init(mp, &jreclist, &jreccache, JTYPE_RMDIR)) {
1284 	jreclist_undo_file(&jreclist, ap->a_nch->ncp->nc_vp,
1285 			   JRUNDO_VATTR|JRUNDO_GETVP, 0, 0);
1286     }
1287     error = vop_journal_operate_ap(&ap->a_head);
1288     if (error == 0) {
1289 	TAILQ_FOREACH(jrec, &jreclist.list, user_entry) {
1290 	    jrecord_write_cred(jrec, NULL, ap->a_cred);
1291 	    jrecord_write_path(jrec, JLEAF_PATH1, ap->a_nch->ncp);
1292 	}
1293     }
1294     jreclist_done(mp, &jreclist, error);
1295     return (error);
1296 }
1297 
1298 /*
1299  * Journal vop_nrename { a_fnch, a_tnch, a_cred }
1300  */
1301 static
1302 int
1303 journal_nrename(struct vop_nrename_args *ap)
1304 {
1305     struct jrecord_list jreclist;
1306     struct jrecord jreccache;
1307     struct jrecord *jrec;
1308     struct mount *mp;
1309     int error;
1310 
1311     mp = ap->a_head.a_ops->head.vv_mount;
1312     if (jreclist_init(mp, &jreclist, &jreccache, JTYPE_RENAME) &&
1313 	ap->a_tnch->ncp->nc_vp
1314     ) {
1315 	jreclist_undo_file(&jreclist, ap->a_tnch->ncp->nc_vp,
1316 			   JRUNDO_ALL|JRUNDO_GETVP|JRUNDO_CONDLINK, 0, -1);
1317     }
1318     error = vop_journal_operate_ap(&ap->a_head);
1319     if (error == 0) {
1320 	TAILQ_FOREACH(jrec, &jreclist.list, user_entry) {
1321 	    jrecord_write_cred(jrec, NULL, ap->a_cred);
1322 	    jrecord_write_path(jrec, JLEAF_PATH1, ap->a_fnch->ncp);
1323 	    jrecord_write_path(jrec, JLEAF_PATH2, ap->a_tnch->ncp);
1324 	}
1325     }
1326     jreclist_done(mp, &jreclist, error);
1327     return (error);
1328 }
1329 
1330