xref: /freebsd/sys/ufs/ffs/ffs_vnops.c (revision 38069501)
1 /*-
2  * Copyright (c) 2002, 2003 Networks Associates Technology, Inc.
3  * All rights reserved.
4  *
5  * This software was developed for the FreeBSD Project by Marshall
6  * Kirk McKusick and Network Associates Laboratories, the Security
7  * Research Division of Network Associates, Inc. under DARPA/SPAWAR
8  * contract N66001-01-C-8035 ("CBOSS"), as part of the DARPA CHATS
9  * research program
10  *
11  * Redistribution and use in source and binary forms, with or without
12  * modification, are permitted provided that the following conditions
13  * are met:
14  * 1. Redistributions of source code must retain the above copyright
15  *    notice, this list of conditions and the following disclaimer.
16  * 2. Redistributions in binary form must reproduce the above copyright
17  *    notice, this list of conditions and the following disclaimer in the
18  *    documentation and/or other materials provided with the distribution.
19  *
20  * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND
21  * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
22  * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
23  * ARE DISCLAIMED.  IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE
24  * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
25  * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
26  * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
27  * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
28  * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
29  * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
30  * SUCH DAMAGE.
31  *
32  * Copyright (c) 1982, 1986, 1989, 1993
33  *	The Regents of the University of California.  All rights reserved.
34  *
35  * Redistribution and use in source and binary forms, with or without
36  * modification, are permitted provided that the following conditions
37  * are met:
38  * 1. Redistributions of source code must retain the above copyright
39  *    notice, this list of conditions and the following disclaimer.
40  * 2. Redistributions in binary form must reproduce the above copyright
41  *    notice, this list of conditions and the following disclaimer in the
42  *    documentation and/or other materials provided with the distribution.
43  * 3. Neither the name of the University nor the names of its contributors
44  *    may be used to endorse or promote products derived from this software
45  *    without specific prior written permission.
46  *
47  * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND
48  * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
49  * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
50  * ARE DISCLAIMED.  IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE
51  * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
52  * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
53  * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
54  * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
55  * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
56  * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
57  * SUCH DAMAGE.
58  *
59  *	from: @(#)ufs_readwrite.c	8.11 (Berkeley) 5/8/95
60  * from: $FreeBSD: .../ufs/ufs_readwrite.c,v 1.96 2002/08/12 09:22:11 phk ...
61  *	@(#)ffs_vnops.c	8.15 (Berkeley) 5/14/95
62  */
63 
64 #include <sys/cdefs.h>
65 __FBSDID("$FreeBSD$");
66 
67 #include <sys/param.h>
68 #include <sys/bio.h>
69 #include <sys/systm.h>
70 #include <sys/buf.h>
71 #include <sys/conf.h>
72 #include <sys/extattr.h>
73 #include <sys/kernel.h>
74 #include <sys/limits.h>
75 #include <sys/malloc.h>
76 #include <sys/mount.h>
77 #include <sys/priv.h>
78 #include <sys/rwlock.h>
79 #include <sys/stat.h>
80 #include <sys/sysctl.h>
81 #include <sys/vmmeter.h>
82 #include <sys/vnode.h>
83 
84 #include <vm/vm.h>
85 #include <vm/vm_param.h>
86 #include <vm/vm_extern.h>
87 #include <vm/vm_object.h>
88 #include <vm/vm_page.h>
89 #include <vm/vm_pager.h>
90 #include <vm/vnode_pager.h>
91 
92 #include <ufs/ufs/extattr.h>
93 #include <ufs/ufs/quota.h>
94 #include <ufs/ufs/inode.h>
95 #include <ufs/ufs/ufs_extern.h>
96 #include <ufs/ufs/ufsmount.h>
97 
98 #include <ufs/ffs/fs.h>
99 #include <ufs/ffs/ffs_extern.h>
100 #include "opt_directio.h"
101 #include "opt_ffs.h"
102 
103 #define	ALIGNED_TO(ptr, s)	\
104 	(((uintptr_t)(ptr) & (_Alignof(s) - 1)) == 0)
105 
106 #ifdef DIRECTIO
107 extern int	ffs_rawread(struct vnode *vp, struct uio *uio, int *workdone);
108 #endif
109 static vop_fdatasync_t	ffs_fdatasync;
110 static vop_fsync_t	ffs_fsync;
111 static vop_getpages_t	ffs_getpages;
112 static vop_lock1_t	ffs_lock;
113 static vop_read_t	ffs_read;
114 static vop_write_t	ffs_write;
115 static int	ffs_extread(struct vnode *vp, struct uio *uio, int ioflag);
116 static int	ffs_extwrite(struct vnode *vp, struct uio *uio, int ioflag,
117 		    struct ucred *cred);
118 static vop_strategy_t	ffsext_strategy;
119 static vop_closeextattr_t	ffs_closeextattr;
120 static vop_deleteextattr_t	ffs_deleteextattr;
121 static vop_getextattr_t	ffs_getextattr;
122 static vop_listextattr_t	ffs_listextattr;
123 static vop_openextattr_t	ffs_openextattr;
124 static vop_setextattr_t	ffs_setextattr;
125 static vop_vptofh_t	ffs_vptofh;
126 
127 /* Global vfs data structures for ufs. */
128 struct vop_vector ffs_vnodeops1 = {
129 	.vop_default =		&ufs_vnodeops,
130 	.vop_fsync =		ffs_fsync,
131 	.vop_fdatasync =	ffs_fdatasync,
132 	.vop_getpages =		ffs_getpages,
133 	.vop_getpages_async =	vnode_pager_local_getpages_async,
134 	.vop_lock1 =		ffs_lock,
135 	.vop_read =		ffs_read,
136 	.vop_reallocblks =	ffs_reallocblks,
137 	.vop_write =		ffs_write,
138 	.vop_vptofh =		ffs_vptofh,
139 };
140 
141 struct vop_vector ffs_fifoops1 = {
142 	.vop_default =		&ufs_fifoops,
143 	.vop_fsync =		ffs_fsync,
144 	.vop_fdatasync =	ffs_fdatasync,
145 	.vop_reallocblks =	ffs_reallocblks, /* XXX: really ??? */
146 	.vop_vptofh =		ffs_vptofh,
147 };
148 
149 /* Global vfs data structures for ufs. */
150 struct vop_vector ffs_vnodeops2 = {
151 	.vop_default =		&ufs_vnodeops,
152 	.vop_fsync =		ffs_fsync,
153 	.vop_fdatasync =	ffs_fdatasync,
154 	.vop_getpages =		ffs_getpages,
155 	.vop_getpages_async =	vnode_pager_local_getpages_async,
156 	.vop_lock1 =		ffs_lock,
157 	.vop_read =		ffs_read,
158 	.vop_reallocblks =	ffs_reallocblks,
159 	.vop_write =		ffs_write,
160 	.vop_closeextattr =	ffs_closeextattr,
161 	.vop_deleteextattr =	ffs_deleteextattr,
162 	.vop_getextattr =	ffs_getextattr,
163 	.vop_listextattr =	ffs_listextattr,
164 	.vop_openextattr =	ffs_openextattr,
165 	.vop_setextattr =	ffs_setextattr,
166 	.vop_vptofh =		ffs_vptofh,
167 };
168 
169 struct vop_vector ffs_fifoops2 = {
170 	.vop_default =		&ufs_fifoops,
171 	.vop_fsync =		ffs_fsync,
172 	.vop_fdatasync =	ffs_fdatasync,
173 	.vop_lock1 =		ffs_lock,
174 	.vop_reallocblks =	ffs_reallocblks,
175 	.vop_strategy =		ffsext_strategy,
176 	.vop_closeextattr =	ffs_closeextattr,
177 	.vop_deleteextattr =	ffs_deleteextattr,
178 	.vop_getextattr =	ffs_getextattr,
179 	.vop_listextattr =	ffs_listextattr,
180 	.vop_openextattr =	ffs_openextattr,
181 	.vop_setextattr =	ffs_setextattr,
182 	.vop_vptofh =		ffs_vptofh,
183 };
184 
185 /*
186  * Synch an open file.
187  */
188 /* ARGSUSED */
189 static int
190 ffs_fsync(struct vop_fsync_args *ap)
191 {
192 	struct vnode *vp;
193 	struct bufobj *bo;
194 	int error;
195 
196 	vp = ap->a_vp;
197 	bo = &vp->v_bufobj;
198 retry:
199 	error = ffs_syncvnode(vp, ap->a_waitfor, 0);
200 	if (error)
201 		return (error);
202 	if (ap->a_waitfor == MNT_WAIT && DOINGSOFTDEP(vp)) {
203 		error = softdep_fsync(vp);
204 		if (error)
205 			return (error);
206 
207 		/*
208 		 * The softdep_fsync() function may drop vp lock,
209 		 * allowing for dirty buffers to reappear on the
210 		 * bo_dirty list. Recheck and resync as needed.
211 		 */
212 		BO_LOCK(bo);
213 		if ((vp->v_type == VREG || vp->v_type == VDIR) &&
214 		    (bo->bo_numoutput > 0 || bo->bo_dirty.bv_cnt > 0)) {
215 			BO_UNLOCK(bo);
216 			goto retry;
217 		}
218 		BO_UNLOCK(bo);
219 	}
220 	return (0);
221 }
222 
223 int
224 ffs_syncvnode(struct vnode *vp, int waitfor, int flags)
225 {
226 	struct inode *ip;
227 	struct bufobj *bo;
228 	struct buf *bp, *nbp;
229 	ufs_lbn_t lbn;
230 	int error, passes;
231 	bool still_dirty, wait;
232 
233 	ip = VTOI(vp);
234 	ip->i_flag &= ~IN_NEEDSYNC;
235 	bo = &vp->v_bufobj;
236 
237 	/*
238 	 * When doing MNT_WAIT we must first flush all dependencies
239 	 * on the inode.
240 	 */
241 	if (DOINGSOFTDEP(vp) && waitfor == MNT_WAIT &&
242 	    (error = softdep_sync_metadata(vp)) != 0)
243 		return (error);
244 
245 	/*
246 	 * Flush all dirty buffers associated with a vnode.
247 	 */
248 	error = 0;
249 	passes = 0;
250 	wait = false;	/* Always do an async pass first. */
251 	lbn = lblkno(ITOFS(ip), (ip->i_size + ITOFS(ip)->fs_bsize - 1));
252 	BO_LOCK(bo);
253 loop:
254 	TAILQ_FOREACH(bp, &bo->bo_dirty.bv_hd, b_bobufs)
255 		bp->b_vflags &= ~BV_SCANNED;
256 	TAILQ_FOREACH_SAFE(bp, &bo->bo_dirty.bv_hd, b_bobufs, nbp) {
257 		/*
258 		 * Reasons to skip this buffer: it has already been considered
259 		 * on this pass, the buffer has dependencies that will cause
260 		 * it to be redirtied and it has not already been deferred,
261 		 * or it is already being written.
262 		 */
263 		if ((bp->b_vflags & BV_SCANNED) != 0)
264 			continue;
265 		bp->b_vflags |= BV_SCANNED;
266 		/*
267 		 * Flush indirects in order, if requested.
268 		 *
269 		 * Note that if only datasync is requested, we can
270 		 * skip indirect blocks when softupdates are not
271 		 * active.  Otherwise we must flush them with data,
272 		 * since dependencies prevent data block writes.
273 		 */
274 		if (waitfor == MNT_WAIT && bp->b_lblkno <= -UFS_NDADDR &&
275 		    (lbn_level(bp->b_lblkno) >= passes ||
276 		    ((flags & DATA_ONLY) != 0 && !DOINGSOFTDEP(vp))))
277 			continue;
278 		if (bp->b_lblkno > lbn)
279 			panic("ffs_syncvnode: syncing truncated data.");
280 		if (BUF_LOCK(bp, LK_EXCLUSIVE | LK_NOWAIT, NULL) == 0) {
281 			BO_UNLOCK(bo);
282 		} else if (wait) {
283 			if (BUF_LOCK(bp,
284 			    LK_EXCLUSIVE | LK_SLEEPFAIL | LK_INTERLOCK,
285 			    BO_LOCKPTR(bo)) != 0) {
286 				bp->b_vflags &= ~BV_SCANNED;
287 				goto next;
288 			}
289 		} else
290 			continue;
291 		if ((bp->b_flags & B_DELWRI) == 0)
292 			panic("ffs_fsync: not dirty");
293 		/*
294 		 * Check for dependencies and potentially complete them.
295 		 */
296 		if (!LIST_EMPTY(&bp->b_dep) &&
297 		    (error = softdep_sync_buf(vp, bp,
298 		    wait ? MNT_WAIT : MNT_NOWAIT)) != 0) {
299 			/* I/O error. */
300 			if (error != EBUSY) {
301 				BUF_UNLOCK(bp);
302 				return (error);
303 			}
304 			/* If we deferred once, don't defer again. */
305 		    	if ((bp->b_flags & B_DEFERRED) == 0) {
306 				bp->b_flags |= B_DEFERRED;
307 				BUF_UNLOCK(bp);
308 				goto next;
309 			}
310 		}
311 		if (wait) {
312 			bremfree(bp);
313 			if ((error = bwrite(bp)) != 0)
314 				return (error);
315 		} else if ((bp->b_flags & B_CLUSTEROK)) {
316 			(void) vfs_bio_awrite(bp);
317 		} else {
318 			bremfree(bp);
319 			(void) bawrite(bp);
320 		}
321 next:
322 		/*
323 		 * Since we may have slept during the I/O, we need
324 		 * to start from a known point.
325 		 */
326 		BO_LOCK(bo);
327 		nbp = TAILQ_FIRST(&bo->bo_dirty.bv_hd);
328 	}
329 	if (waitfor != MNT_WAIT) {
330 		BO_UNLOCK(bo);
331 		if ((flags & NO_INO_UPDT) != 0)
332 			return (0);
333 		else
334 			return (ffs_update(vp, 0));
335 	}
336 	/* Drain IO to see if we're done. */
337 	bufobj_wwait(bo, 0, 0);
338 	/*
339 	 * Block devices associated with filesystems may have new I/O
340 	 * requests posted for them even if the vnode is locked, so no
341 	 * amount of trying will get them clean.  We make several passes
342 	 * as a best effort.
343 	 *
344 	 * Regular files may need multiple passes to flush all dependency
345 	 * work as it is possible that we must write once per indirect
346 	 * level, once for the leaf, and once for the inode and each of
347 	 * these will be done with one sync and one async pass.
348 	 */
349 	if (bo->bo_dirty.bv_cnt > 0) {
350 		if ((flags & DATA_ONLY) == 0) {
351 			still_dirty = true;
352 		} else {
353 			/*
354 			 * For data-only sync, dirty indirect buffers
355 			 * are ignored.
356 			 */
357 			still_dirty = false;
358 			TAILQ_FOREACH(bp, &bo->bo_dirty.bv_hd, b_bobufs) {
359 				if (bp->b_lblkno > -UFS_NDADDR) {
360 					still_dirty = true;
361 					break;
362 				}
363 			}
364 		}
365 
366 		if (still_dirty) {
367 			/* Write the inode after sync passes to flush deps. */
368 			if (wait && DOINGSOFTDEP(vp) &&
369 			    (flags & NO_INO_UPDT) == 0) {
370 				BO_UNLOCK(bo);
371 				ffs_update(vp, 1);
372 				BO_LOCK(bo);
373 			}
374 			/* switch between sync/async. */
375 			wait = !wait;
376 			if (wait || ++passes < UFS_NIADDR + 2)
377 				goto loop;
378 #ifdef INVARIANTS
379 			if (!vn_isdisk(vp, NULL))
380 				vn_printf(vp, "ffs_fsync: dirty ");
381 #endif
382 		}
383 	}
384 	BO_UNLOCK(bo);
385 	error = 0;
386 	if ((flags & DATA_ONLY) == 0) {
387 		if ((flags & NO_INO_UPDT) == 0)
388 			error = ffs_update(vp, 1);
389 		if (DOINGSUJ(vp))
390 			softdep_journal_fsync(VTOI(vp));
391 	}
392 	return (error);
393 }
394 
395 static int
396 ffs_fdatasync(struct vop_fdatasync_args *ap)
397 {
398 
399 	return (ffs_syncvnode(ap->a_vp, MNT_WAIT, DATA_ONLY));
400 }
401 
402 static int
403 ffs_lock(ap)
404 	struct vop_lock1_args /* {
405 		struct vnode *a_vp;
406 		int a_flags;
407 		struct thread *a_td;
408 		char *file;
409 		int line;
410 	} */ *ap;
411 {
412 #ifndef NO_FFS_SNAPSHOT
413 	struct vnode *vp;
414 	int flags;
415 	struct lock *lkp;
416 	int result;
417 
418 	switch (ap->a_flags & LK_TYPE_MASK) {
419 	case LK_SHARED:
420 	case LK_UPGRADE:
421 	case LK_EXCLUSIVE:
422 		vp = ap->a_vp;
423 		flags = ap->a_flags;
424 		for (;;) {
425 #ifdef DEBUG_VFS_LOCKS
426 			KASSERT(vp->v_holdcnt != 0,
427 			    ("ffs_lock %p: zero hold count", vp));
428 #endif
429 			lkp = vp->v_vnlock;
430 			result = _lockmgr_args(lkp, flags, VI_MTX(vp),
431 			    LK_WMESG_DEFAULT, LK_PRIO_DEFAULT, LK_TIMO_DEFAULT,
432 			    ap->a_file, ap->a_line);
433 			if (lkp == vp->v_vnlock || result != 0)
434 				break;
435 			/*
436 			 * Apparent success, except that the vnode
437 			 * mutated between snapshot file vnode and
438 			 * regular file vnode while this process
439 			 * slept.  The lock currently held is not the
440 			 * right lock.  Release it, and try to get the
441 			 * new lock.
442 			 */
443 			(void) _lockmgr_args(lkp, LK_RELEASE, NULL,
444 			    LK_WMESG_DEFAULT, LK_PRIO_DEFAULT, LK_TIMO_DEFAULT,
445 			    ap->a_file, ap->a_line);
446 			if ((flags & (LK_INTERLOCK | LK_NOWAIT)) ==
447 			    (LK_INTERLOCK | LK_NOWAIT))
448 				return (EBUSY);
449 			if ((flags & LK_TYPE_MASK) == LK_UPGRADE)
450 				flags = (flags & ~LK_TYPE_MASK) | LK_EXCLUSIVE;
451 			flags &= ~LK_INTERLOCK;
452 		}
453 		break;
454 	default:
455 		result = VOP_LOCK1_APV(&ufs_vnodeops, ap);
456 	}
457 	return (result);
458 #else
459 	return (VOP_LOCK1_APV(&ufs_vnodeops, ap));
460 #endif
461 }
462 
463 /*
464  * Vnode op for reading.
465  */
466 static int
467 ffs_read(ap)
468 	struct vop_read_args /* {
469 		struct vnode *a_vp;
470 		struct uio *a_uio;
471 		int a_ioflag;
472 		struct ucred *a_cred;
473 	} */ *ap;
474 {
475 	struct vnode *vp;
476 	struct inode *ip;
477 	struct uio *uio;
478 	struct fs *fs;
479 	struct buf *bp;
480 	ufs_lbn_t lbn, nextlbn;
481 	off_t bytesinfile;
482 	long size, xfersize, blkoffset;
483 	ssize_t orig_resid;
484 	int error;
485 	int seqcount;
486 	int ioflag;
487 
488 	vp = ap->a_vp;
489 	uio = ap->a_uio;
490 	ioflag = ap->a_ioflag;
491 	if (ap->a_ioflag & IO_EXT)
492 #ifdef notyet
493 		return (ffs_extread(vp, uio, ioflag));
494 #else
495 		panic("ffs_read+IO_EXT");
496 #endif
497 #ifdef DIRECTIO
498 	if ((ioflag & IO_DIRECT) != 0) {
499 		int workdone;
500 
501 		error = ffs_rawread(vp, uio, &workdone);
502 		if (error != 0 || workdone != 0)
503 			return error;
504 	}
505 #endif
506 
507 	seqcount = ap->a_ioflag >> IO_SEQSHIFT;
508 	ip = VTOI(vp);
509 
510 #ifdef INVARIANTS
511 	if (uio->uio_rw != UIO_READ)
512 		panic("ffs_read: mode");
513 
514 	if (vp->v_type == VLNK) {
515 		if ((int)ip->i_size < vp->v_mount->mnt_maxsymlinklen)
516 			panic("ffs_read: short symlink");
517 	} else if (vp->v_type != VREG && vp->v_type != VDIR)
518 		panic("ffs_read: type %d",  vp->v_type);
519 #endif
520 	orig_resid = uio->uio_resid;
521 	KASSERT(orig_resid >= 0, ("ffs_read: uio->uio_resid < 0"));
522 	if (orig_resid == 0)
523 		return (0);
524 	KASSERT(uio->uio_offset >= 0, ("ffs_read: uio->uio_offset < 0"));
525 	fs = ITOFS(ip);
526 	if (uio->uio_offset < ip->i_size &&
527 	    uio->uio_offset >= fs->fs_maxfilesize)
528 		return (EOVERFLOW);
529 
530 	for (error = 0, bp = NULL; uio->uio_resid > 0; bp = NULL) {
531 		if ((bytesinfile = ip->i_size - uio->uio_offset) <= 0)
532 			break;
533 		lbn = lblkno(fs, uio->uio_offset);
534 		nextlbn = lbn + 1;
535 
536 		/*
537 		 * size of buffer.  The buffer representing the
538 		 * end of the file is rounded up to the size of
539 		 * the block type ( fragment or full block,
540 		 * depending ).
541 		 */
542 		size = blksize(fs, ip, lbn);
543 		blkoffset = blkoff(fs, uio->uio_offset);
544 
545 		/*
546 		 * The amount we want to transfer in this iteration is
547 		 * one FS block less the amount of the data before
548 		 * our startpoint (duh!)
549 		 */
550 		xfersize = fs->fs_bsize - blkoffset;
551 
552 		/*
553 		 * But if we actually want less than the block,
554 		 * or the file doesn't have a whole block more of data,
555 		 * then use the lesser number.
556 		 */
557 		if (uio->uio_resid < xfersize)
558 			xfersize = uio->uio_resid;
559 		if (bytesinfile < xfersize)
560 			xfersize = bytesinfile;
561 
562 		if (lblktosize(fs, nextlbn) >= ip->i_size) {
563 			/*
564 			 * Don't do readahead if this is the end of the file.
565 			 */
566 			error = bread_gb(vp, lbn, size, NOCRED,
567 			    GB_UNMAPPED, &bp);
568 		} else if ((vp->v_mount->mnt_flag & MNT_NOCLUSTERR) == 0) {
569 			/*
570 			 * Otherwise if we are allowed to cluster,
571 			 * grab as much as we can.
572 			 *
573 			 * XXX  This may not be a win if we are not
574 			 * doing sequential access.
575 			 */
576 			error = cluster_read(vp, ip->i_size, lbn,
577 			    size, NOCRED, blkoffset + uio->uio_resid,
578 			    seqcount, GB_UNMAPPED, &bp);
579 		} else if (seqcount > 1) {
580 			/*
581 			 * If we are NOT allowed to cluster, then
582 			 * if we appear to be acting sequentially,
583 			 * fire off a request for a readahead
584 			 * as well as a read. Note that the 4th and 5th
585 			 * arguments point to arrays of the size specified in
586 			 * the 6th argument.
587 			 */
588 			u_int nextsize = blksize(fs, ip, nextlbn);
589 			error = breadn_flags(vp, lbn, size, &nextlbn,
590 			    &nextsize, 1, NOCRED, GB_UNMAPPED, NULL, &bp);
591 		} else {
592 			/*
593 			 * Failing all of the above, just read what the
594 			 * user asked for. Interestingly, the same as
595 			 * the first option above.
596 			 */
597 			error = bread_gb(vp, lbn, size, NOCRED,
598 			    GB_UNMAPPED, &bp);
599 		}
600 		if (error) {
601 			brelse(bp);
602 			bp = NULL;
603 			break;
604 		}
605 
606 		/*
607 		 * We should only get non-zero b_resid when an I/O error
608 		 * has occurred, which should cause us to break above.
609 		 * However, if the short read did not cause an error,
610 		 * then we want to ensure that we do not uiomove bad
611 		 * or uninitialized data.
612 		 */
613 		size -= bp->b_resid;
614 		if (size < xfersize) {
615 			if (size == 0)
616 				break;
617 			xfersize = size;
618 		}
619 
620 		if (buf_mapped(bp)) {
621 			error = vn_io_fault_uiomove((char *)bp->b_data +
622 			    blkoffset, (int)xfersize, uio);
623 		} else {
624 			error = vn_io_fault_pgmove(bp->b_pages, blkoffset,
625 			    (int)xfersize, uio);
626 		}
627 		if (error)
628 			break;
629 
630 		vfs_bio_brelse(bp, ioflag);
631 	}
632 
633 	/*
634 	 * This can only happen in the case of an error
635 	 * because the loop above resets bp to NULL on each iteration
636 	 * and on normal completion has not set a new value into it.
637 	 * so it must have come from a 'break' statement
638 	 */
639 	if (bp != NULL)
640 		vfs_bio_brelse(bp, ioflag);
641 
642 	if ((error == 0 || uio->uio_resid != orig_resid) &&
643 	    (vp->v_mount->mnt_flag & (MNT_NOATIME | MNT_RDONLY)) == 0 &&
644 	    (ip->i_flag & IN_ACCESS) == 0) {
645 		VI_LOCK(vp);
646 		ip->i_flag |= IN_ACCESS;
647 		VI_UNLOCK(vp);
648 	}
649 	return (error);
650 }
651 
652 /*
653  * Vnode op for writing.
654  */
655 static int
656 ffs_write(ap)
657 	struct vop_write_args /* {
658 		struct vnode *a_vp;
659 		struct uio *a_uio;
660 		int a_ioflag;
661 		struct ucred *a_cred;
662 	} */ *ap;
663 {
664 	struct vnode *vp;
665 	struct uio *uio;
666 	struct inode *ip;
667 	struct fs *fs;
668 	struct buf *bp;
669 	ufs_lbn_t lbn;
670 	off_t osize;
671 	ssize_t resid;
672 	int seqcount;
673 	int blkoffset, error, flags, ioflag, size, xfersize;
674 
675 	vp = ap->a_vp;
676 	uio = ap->a_uio;
677 	ioflag = ap->a_ioflag;
678 	if (ap->a_ioflag & IO_EXT)
679 #ifdef notyet
680 		return (ffs_extwrite(vp, uio, ioflag, ap->a_cred));
681 #else
682 		panic("ffs_write+IO_EXT");
683 #endif
684 
685 	seqcount = ap->a_ioflag >> IO_SEQSHIFT;
686 	ip = VTOI(vp);
687 
688 #ifdef INVARIANTS
689 	if (uio->uio_rw != UIO_WRITE)
690 		panic("ffs_write: mode");
691 #endif
692 
693 	switch (vp->v_type) {
694 	case VREG:
695 		if (ioflag & IO_APPEND)
696 			uio->uio_offset = ip->i_size;
697 		if ((ip->i_flags & APPEND) && uio->uio_offset != ip->i_size)
698 			return (EPERM);
699 		/* FALLTHROUGH */
700 	case VLNK:
701 		break;
702 	case VDIR:
703 		panic("ffs_write: dir write");
704 		break;
705 	default:
706 		panic("ffs_write: type %p %d (%d,%d)", vp, (int)vp->v_type,
707 			(int)uio->uio_offset,
708 			(int)uio->uio_resid
709 		);
710 	}
711 
712 	KASSERT(uio->uio_resid >= 0, ("ffs_write: uio->uio_resid < 0"));
713 	KASSERT(uio->uio_offset >= 0, ("ffs_write: uio->uio_offset < 0"));
714 	fs = ITOFS(ip);
715 	if ((uoff_t)uio->uio_offset + uio->uio_resid > fs->fs_maxfilesize)
716 		return (EFBIG);
717 	/*
718 	 * Maybe this should be above the vnode op call, but so long as
719 	 * file servers have no limits, I don't think it matters.
720 	 */
721 	if (vn_rlimit_fsize(vp, uio, uio->uio_td))
722 		return (EFBIG);
723 
724 	resid = uio->uio_resid;
725 	osize = ip->i_size;
726 	if (seqcount > BA_SEQMAX)
727 		flags = BA_SEQMAX << BA_SEQSHIFT;
728 	else
729 		flags = seqcount << BA_SEQSHIFT;
730 	if (ioflag & IO_SYNC)
731 		flags |= IO_SYNC;
732 	flags |= BA_UNMAPPED;
733 
734 	for (error = 0; uio->uio_resid > 0;) {
735 		lbn = lblkno(fs, uio->uio_offset);
736 		blkoffset = blkoff(fs, uio->uio_offset);
737 		xfersize = fs->fs_bsize - blkoffset;
738 		if (uio->uio_resid < xfersize)
739 			xfersize = uio->uio_resid;
740 		if (uio->uio_offset + xfersize > ip->i_size)
741 			vnode_pager_setsize(vp, uio->uio_offset + xfersize);
742 
743 		/*
744 		 * We must perform a read-before-write if the transfer size
745 		 * does not cover the entire buffer.
746 		 */
747 		if (fs->fs_bsize > xfersize)
748 			flags |= BA_CLRBUF;
749 		else
750 			flags &= ~BA_CLRBUF;
751 /* XXX is uio->uio_offset the right thing here? */
752 		error = UFS_BALLOC(vp, uio->uio_offset, xfersize,
753 		    ap->a_cred, flags, &bp);
754 		if (error != 0) {
755 			vnode_pager_setsize(vp, ip->i_size);
756 			break;
757 		}
758 		if ((ioflag & (IO_SYNC|IO_INVAL)) == (IO_SYNC|IO_INVAL))
759 			bp->b_flags |= B_NOCACHE;
760 
761 		if (uio->uio_offset + xfersize > ip->i_size) {
762 			ip->i_size = uio->uio_offset + xfersize;
763 			DIP_SET(ip, i_size, ip->i_size);
764 		}
765 
766 		size = blksize(fs, ip, lbn) - bp->b_resid;
767 		if (size < xfersize)
768 			xfersize = size;
769 
770 		if (buf_mapped(bp)) {
771 			error = vn_io_fault_uiomove((char *)bp->b_data +
772 			    blkoffset, (int)xfersize, uio);
773 		} else {
774 			error = vn_io_fault_pgmove(bp->b_pages, blkoffset,
775 			    (int)xfersize, uio);
776 		}
777 		/*
778 		 * If the buffer is not already filled and we encounter an
779 		 * error while trying to fill it, we have to clear out any
780 		 * garbage data from the pages instantiated for the buffer.
781 		 * If we do not, a failed uiomove() during a write can leave
782 		 * the prior contents of the pages exposed to a userland mmap.
783 		 *
784 		 * Note that we need only clear buffers with a transfer size
785 		 * equal to the block size because buffers with a shorter
786 		 * transfer size were cleared above by the call to UFS_BALLOC()
787 		 * with the BA_CLRBUF flag set.
788 		 *
789 		 * If the source region for uiomove identically mmaps the
790 		 * buffer, uiomove() performed the NOP copy, and the buffer
791 		 * content remains valid because the page fault handler
792 		 * validated the pages.
793 		 */
794 		if (error != 0 && (bp->b_flags & B_CACHE) == 0 &&
795 		    fs->fs_bsize == xfersize)
796 			vfs_bio_clrbuf(bp);
797 
798 		vfs_bio_set_flags(bp, ioflag);
799 
800 		/*
801 		 * If IO_SYNC each buffer is written synchronously.  Otherwise
802 		 * if we have a severe page deficiency write the buffer
803 		 * asynchronously.  Otherwise try to cluster, and if that
804 		 * doesn't do it then either do an async write (if O_DIRECT),
805 		 * or a delayed write (if not).
806 		 */
807 		if (ioflag & IO_SYNC) {
808 			(void)bwrite(bp);
809 		} else if (vm_page_count_severe() ||
810 			    buf_dirty_count_severe() ||
811 			    (ioflag & IO_ASYNC)) {
812 			bp->b_flags |= B_CLUSTEROK;
813 			bawrite(bp);
814 		} else if (xfersize + blkoffset == fs->fs_bsize) {
815 			if ((vp->v_mount->mnt_flag & MNT_NOCLUSTERW) == 0) {
816 				bp->b_flags |= B_CLUSTEROK;
817 				cluster_write(vp, bp, ip->i_size, seqcount,
818 				    GB_UNMAPPED);
819 			} else {
820 				bawrite(bp);
821 			}
822 		} else if (ioflag & IO_DIRECT) {
823 			bp->b_flags |= B_CLUSTEROK;
824 			bawrite(bp);
825 		} else {
826 			bp->b_flags |= B_CLUSTEROK;
827 			bdwrite(bp);
828 		}
829 		if (error || xfersize == 0)
830 			break;
831 		ip->i_flag |= IN_CHANGE | IN_UPDATE;
832 	}
833 	/*
834 	 * If we successfully wrote any data, and we are not the superuser
835 	 * we clear the setuid and setgid bits as a precaution against
836 	 * tampering.
837 	 */
838 	if ((ip->i_mode & (ISUID | ISGID)) && resid > uio->uio_resid &&
839 	    ap->a_cred) {
840 		if (priv_check_cred(ap->a_cred, PRIV_VFS_RETAINSUGID, 0)) {
841 			ip->i_mode &= ~(ISUID | ISGID);
842 			DIP_SET(ip, i_mode, ip->i_mode);
843 		}
844 	}
845 	if (error) {
846 		if (ioflag & IO_UNIT) {
847 			(void)ffs_truncate(vp, osize,
848 			    IO_NORMAL | (ioflag & IO_SYNC), ap->a_cred);
849 			uio->uio_offset -= resid - uio->uio_resid;
850 			uio->uio_resid = resid;
851 		}
852 	} else if (resid > uio->uio_resid && (ioflag & IO_SYNC))
853 		error = ffs_update(vp, 1);
854 	return (error);
855 }
856 
857 /*
858  * Extended attribute area reading.
859  */
860 static int
861 ffs_extread(struct vnode *vp, struct uio *uio, int ioflag)
862 {
863 	struct inode *ip;
864 	struct ufs2_dinode *dp;
865 	struct fs *fs;
866 	struct buf *bp;
867 	ufs_lbn_t lbn, nextlbn;
868 	off_t bytesinfile;
869 	long size, xfersize, blkoffset;
870 	ssize_t orig_resid;
871 	int error;
872 
873 	ip = VTOI(vp);
874 	fs = ITOFS(ip);
875 	dp = ip->i_din2;
876 
877 #ifdef INVARIANTS
878 	if (uio->uio_rw != UIO_READ || fs->fs_magic != FS_UFS2_MAGIC)
879 		panic("ffs_extread: mode");
880 
881 #endif
882 	orig_resid = uio->uio_resid;
883 	KASSERT(orig_resid >= 0, ("ffs_extread: uio->uio_resid < 0"));
884 	if (orig_resid == 0)
885 		return (0);
886 	KASSERT(uio->uio_offset >= 0, ("ffs_extread: uio->uio_offset < 0"));
887 
888 	for (error = 0, bp = NULL; uio->uio_resid > 0; bp = NULL) {
889 		if ((bytesinfile = dp->di_extsize - uio->uio_offset) <= 0)
890 			break;
891 		lbn = lblkno(fs, uio->uio_offset);
892 		nextlbn = lbn + 1;
893 
894 		/*
895 		 * size of buffer.  The buffer representing the
896 		 * end of the file is rounded up to the size of
897 		 * the block type ( fragment or full block,
898 		 * depending ).
899 		 */
900 		size = sblksize(fs, dp->di_extsize, lbn);
901 		blkoffset = blkoff(fs, uio->uio_offset);
902 
903 		/*
904 		 * The amount we want to transfer in this iteration is
905 		 * one FS block less the amount of the data before
906 		 * our startpoint (duh!)
907 		 */
908 		xfersize = fs->fs_bsize - blkoffset;
909 
910 		/*
911 		 * But if we actually want less than the block,
912 		 * or the file doesn't have a whole block more of data,
913 		 * then use the lesser number.
914 		 */
915 		if (uio->uio_resid < xfersize)
916 			xfersize = uio->uio_resid;
917 		if (bytesinfile < xfersize)
918 			xfersize = bytesinfile;
919 
920 		if (lblktosize(fs, nextlbn) >= dp->di_extsize) {
921 			/*
922 			 * Don't do readahead if this is the end of the info.
923 			 */
924 			error = bread(vp, -1 - lbn, size, NOCRED, &bp);
925 		} else {
926 			/*
927 			 * If we have a second block, then
928 			 * fire off a request for a readahead
929 			 * as well as a read. Note that the 4th and 5th
930 			 * arguments point to arrays of the size specified in
931 			 * the 6th argument.
932 			 */
933 			u_int nextsize = sblksize(fs, dp->di_extsize, nextlbn);
934 
935 			nextlbn = -1 - nextlbn;
936 			error = breadn(vp, -1 - lbn,
937 			    size, &nextlbn, &nextsize, 1, NOCRED, &bp);
938 		}
939 		if (error) {
940 			brelse(bp);
941 			bp = NULL;
942 			break;
943 		}
944 
945 		/*
946 		 * We should only get non-zero b_resid when an I/O error
947 		 * has occurred, which should cause us to break above.
948 		 * However, if the short read did not cause an error,
949 		 * then we want to ensure that we do not uiomove bad
950 		 * or uninitialized data.
951 		 */
952 		size -= bp->b_resid;
953 		if (size < xfersize) {
954 			if (size == 0)
955 				break;
956 			xfersize = size;
957 		}
958 
959 		error = uiomove((char *)bp->b_data + blkoffset,
960 					(int)xfersize, uio);
961 		if (error)
962 			break;
963 		vfs_bio_brelse(bp, ioflag);
964 	}
965 
966 	/*
967 	 * This can only happen in the case of an error
968 	 * because the loop above resets bp to NULL on each iteration
969 	 * and on normal completion has not set a new value into it.
970 	 * so it must have come from a 'break' statement
971 	 */
972 	if (bp != NULL)
973 		vfs_bio_brelse(bp, ioflag);
974 	return (error);
975 }
976 
977 /*
978  * Extended attribute area writing.
979  */
980 static int
981 ffs_extwrite(struct vnode *vp, struct uio *uio, int ioflag, struct ucred *ucred)
982 {
983 	struct inode *ip;
984 	struct ufs2_dinode *dp;
985 	struct fs *fs;
986 	struct buf *bp;
987 	ufs_lbn_t lbn;
988 	off_t osize;
989 	ssize_t resid;
990 	int blkoffset, error, flags, size, xfersize;
991 
992 	ip = VTOI(vp);
993 	fs = ITOFS(ip);
994 	dp = ip->i_din2;
995 
996 #ifdef INVARIANTS
997 	if (uio->uio_rw != UIO_WRITE || fs->fs_magic != FS_UFS2_MAGIC)
998 		panic("ffs_extwrite: mode");
999 #endif
1000 
1001 	if (ioflag & IO_APPEND)
1002 		uio->uio_offset = dp->di_extsize;
1003 	KASSERT(uio->uio_offset >= 0, ("ffs_extwrite: uio->uio_offset < 0"));
1004 	KASSERT(uio->uio_resid >= 0, ("ffs_extwrite: uio->uio_resid < 0"));
1005 	if ((uoff_t)uio->uio_offset + uio->uio_resid >
1006 	    UFS_NXADDR * fs->fs_bsize)
1007 		return (EFBIG);
1008 
1009 	resid = uio->uio_resid;
1010 	osize = dp->di_extsize;
1011 	flags = IO_EXT;
1012 	if (ioflag & IO_SYNC)
1013 		flags |= IO_SYNC;
1014 
1015 	for (error = 0; uio->uio_resid > 0;) {
1016 		lbn = lblkno(fs, uio->uio_offset);
1017 		blkoffset = blkoff(fs, uio->uio_offset);
1018 		xfersize = fs->fs_bsize - blkoffset;
1019 		if (uio->uio_resid < xfersize)
1020 			xfersize = uio->uio_resid;
1021 
1022 		/*
1023 		 * We must perform a read-before-write if the transfer size
1024 		 * does not cover the entire buffer.
1025 		 */
1026 		if (fs->fs_bsize > xfersize)
1027 			flags |= BA_CLRBUF;
1028 		else
1029 			flags &= ~BA_CLRBUF;
1030 		error = UFS_BALLOC(vp, uio->uio_offset, xfersize,
1031 		    ucred, flags, &bp);
1032 		if (error != 0)
1033 			break;
1034 		/*
1035 		 * If the buffer is not valid we have to clear out any
1036 		 * garbage data from the pages instantiated for the buffer.
1037 		 * If we do not, a failed uiomove() during a write can leave
1038 		 * the prior contents of the pages exposed to a userland
1039 		 * mmap().  XXX deal with uiomove() errors a better way.
1040 		 */
1041 		if ((bp->b_flags & B_CACHE) == 0 && fs->fs_bsize <= xfersize)
1042 			vfs_bio_clrbuf(bp);
1043 
1044 		if (uio->uio_offset + xfersize > dp->di_extsize)
1045 			dp->di_extsize = uio->uio_offset + xfersize;
1046 
1047 		size = sblksize(fs, dp->di_extsize, lbn) - bp->b_resid;
1048 		if (size < xfersize)
1049 			xfersize = size;
1050 
1051 		error =
1052 		    uiomove((char *)bp->b_data + blkoffset, (int)xfersize, uio);
1053 
1054 		vfs_bio_set_flags(bp, ioflag);
1055 
1056 		/*
1057 		 * If IO_SYNC each buffer is written synchronously.  Otherwise
1058 		 * if we have a severe page deficiency write the buffer
1059 		 * asynchronously.  Otherwise try to cluster, and if that
1060 		 * doesn't do it then either do an async write (if O_DIRECT),
1061 		 * or a delayed write (if not).
1062 		 */
1063 		if (ioflag & IO_SYNC) {
1064 			(void)bwrite(bp);
1065 		} else if (vm_page_count_severe() ||
1066 			    buf_dirty_count_severe() ||
1067 			    xfersize + blkoffset == fs->fs_bsize ||
1068 			    (ioflag & (IO_ASYNC | IO_DIRECT)))
1069 			bawrite(bp);
1070 		else
1071 			bdwrite(bp);
1072 		if (error || xfersize == 0)
1073 			break;
1074 		ip->i_flag |= IN_CHANGE;
1075 	}
1076 	/*
1077 	 * If we successfully wrote any data, and we are not the superuser
1078 	 * we clear the setuid and setgid bits as a precaution against
1079 	 * tampering.
1080 	 */
1081 	if ((ip->i_mode & (ISUID | ISGID)) && resid > uio->uio_resid && ucred) {
1082 		if (priv_check_cred(ucred, PRIV_VFS_RETAINSUGID, 0)) {
1083 			ip->i_mode &= ~(ISUID | ISGID);
1084 			dp->di_mode = ip->i_mode;
1085 		}
1086 	}
1087 	if (error) {
1088 		if (ioflag & IO_UNIT) {
1089 			(void)ffs_truncate(vp, osize,
1090 			    IO_EXT | (ioflag&IO_SYNC), ucred);
1091 			uio->uio_offset -= resid - uio->uio_resid;
1092 			uio->uio_resid = resid;
1093 		}
1094 	} else if (resid > uio->uio_resid && (ioflag & IO_SYNC))
1095 		error = ffs_update(vp, 1);
1096 	return (error);
1097 }
1098 
1099 
1100 /*
1101  * Vnode operating to retrieve a named extended attribute.
1102  *
1103  * Locate a particular EA (nspace:name) in the area (ptr:length), and return
1104  * the length of the EA, and possibly the pointer to the entry and to the data.
1105  */
1106 static int
1107 ffs_findextattr(u_char *ptr, u_int length, int nspace, const char *name,
1108     struct extattr **eapp, u_char **eac)
1109 {
1110 	struct extattr *eap, *eaend;
1111 	size_t nlen;
1112 
1113 	nlen = strlen(name);
1114 	KASSERT(ALIGNED_TO(ptr, struct extattr), ("unaligned"));
1115 	eap = (struct extattr *)ptr;
1116 	eaend = (struct extattr *)(ptr + length);
1117 	for (; eap < eaend; eap = EXTATTR_NEXT(eap)) {
1118 		/* make sure this entry is complete */
1119 		if (EXTATTR_NEXT(eap) > eaend)
1120 			break;
1121 		if (eap->ea_namespace != nspace || eap->ea_namelength != nlen
1122 		    || memcmp(eap->ea_name, name, nlen) != 0)
1123 			continue;
1124 		if (eapp != NULL)
1125 			*eapp = eap;
1126 		if (eac != NULL)
1127 			*eac = EXTATTR_CONTENT(eap);
1128 		return (EXTATTR_CONTENT_SIZE(eap));
1129 	}
1130 	return (-1);
1131 }
1132 
1133 static int
1134 ffs_rdextattr(u_char **p, struct vnode *vp, struct thread *td, int extra)
1135 {
1136 	struct inode *ip;
1137 	struct ufs2_dinode *dp;
1138 	struct fs *fs;
1139 	struct uio luio;
1140 	struct iovec liovec;
1141 	u_int easize;
1142 	int error;
1143 	u_char *eae;
1144 
1145 	ip = VTOI(vp);
1146 	fs = ITOFS(ip);
1147 	dp = ip->i_din2;
1148 	easize = dp->di_extsize;
1149 	if ((uoff_t)easize + extra > UFS_NXADDR * fs->fs_bsize)
1150 		return (EFBIG);
1151 
1152 	eae = malloc(easize + extra, M_TEMP, M_WAITOK);
1153 
1154 	liovec.iov_base = eae;
1155 	liovec.iov_len = easize;
1156 	luio.uio_iov = &liovec;
1157 	luio.uio_iovcnt = 1;
1158 	luio.uio_offset = 0;
1159 	luio.uio_resid = easize;
1160 	luio.uio_segflg = UIO_SYSSPACE;
1161 	luio.uio_rw = UIO_READ;
1162 	luio.uio_td = td;
1163 
1164 	error = ffs_extread(vp, &luio, IO_EXT | IO_SYNC);
1165 	if (error) {
1166 		free(eae, M_TEMP);
1167 		return(error);
1168 	}
1169 	*p = eae;
1170 	return (0);
1171 }
1172 
1173 static void
1174 ffs_lock_ea(struct vnode *vp)
1175 {
1176 	struct inode *ip;
1177 
1178 	ip = VTOI(vp);
1179 	VI_LOCK(vp);
1180 	while (ip->i_flag & IN_EA_LOCKED) {
1181 		ip->i_flag |= IN_EA_LOCKWAIT;
1182 		msleep(&ip->i_ea_refs, &vp->v_interlock, PINOD + 2, "ufs_ea",
1183 		    0);
1184 	}
1185 	ip->i_flag |= IN_EA_LOCKED;
1186 	VI_UNLOCK(vp);
1187 }
1188 
1189 static void
1190 ffs_unlock_ea(struct vnode *vp)
1191 {
1192 	struct inode *ip;
1193 
1194 	ip = VTOI(vp);
1195 	VI_LOCK(vp);
1196 	if (ip->i_flag & IN_EA_LOCKWAIT)
1197 		wakeup(&ip->i_ea_refs);
1198 	ip->i_flag &= ~(IN_EA_LOCKED | IN_EA_LOCKWAIT);
1199 	VI_UNLOCK(vp);
1200 }
1201 
1202 static int
1203 ffs_open_ea(struct vnode *vp, struct ucred *cred, struct thread *td)
1204 {
1205 	struct inode *ip;
1206 	struct ufs2_dinode *dp;
1207 	int error;
1208 
1209 	ip = VTOI(vp);
1210 
1211 	ffs_lock_ea(vp);
1212 	if (ip->i_ea_area != NULL) {
1213 		ip->i_ea_refs++;
1214 		ffs_unlock_ea(vp);
1215 		return (0);
1216 	}
1217 	dp = ip->i_din2;
1218 	error = ffs_rdextattr(&ip->i_ea_area, vp, td, 0);
1219 	if (error) {
1220 		ffs_unlock_ea(vp);
1221 		return (error);
1222 	}
1223 	ip->i_ea_len = dp->di_extsize;
1224 	ip->i_ea_error = 0;
1225 	ip->i_ea_refs++;
1226 	ffs_unlock_ea(vp);
1227 	return (0);
1228 }
1229 
1230 /*
1231  * Vnode extattr transaction commit/abort
1232  */
1233 static int
1234 ffs_close_ea(struct vnode *vp, int commit, struct ucred *cred, struct thread *td)
1235 {
1236 	struct inode *ip;
1237 	struct uio luio;
1238 	struct iovec liovec;
1239 	int error;
1240 	struct ufs2_dinode *dp;
1241 
1242 	ip = VTOI(vp);
1243 
1244 	ffs_lock_ea(vp);
1245 	if (ip->i_ea_area == NULL) {
1246 		ffs_unlock_ea(vp);
1247 		return (EINVAL);
1248 	}
1249 	dp = ip->i_din2;
1250 	error = ip->i_ea_error;
1251 	if (commit && error == 0) {
1252 		ASSERT_VOP_ELOCKED(vp, "ffs_close_ea commit");
1253 		if (cred == NOCRED)
1254 			cred =  vp->v_mount->mnt_cred;
1255 		liovec.iov_base = ip->i_ea_area;
1256 		liovec.iov_len = ip->i_ea_len;
1257 		luio.uio_iov = &liovec;
1258 		luio.uio_iovcnt = 1;
1259 		luio.uio_offset = 0;
1260 		luio.uio_resid = ip->i_ea_len;
1261 		luio.uio_segflg = UIO_SYSSPACE;
1262 		luio.uio_rw = UIO_WRITE;
1263 		luio.uio_td = td;
1264 		/* XXX: I'm not happy about truncating to zero size */
1265 		if (ip->i_ea_len < dp->di_extsize)
1266 			error = ffs_truncate(vp, 0, IO_EXT, cred);
1267 		error = ffs_extwrite(vp, &luio, IO_EXT | IO_SYNC, cred);
1268 	}
1269 	if (--ip->i_ea_refs == 0) {
1270 		free(ip->i_ea_area, M_TEMP);
1271 		ip->i_ea_area = NULL;
1272 		ip->i_ea_len = 0;
1273 		ip->i_ea_error = 0;
1274 	}
1275 	ffs_unlock_ea(vp);
1276 	return (error);
1277 }
1278 
1279 /*
1280  * Vnode extattr strategy routine for fifos.
1281  *
1282  * We need to check for a read or write of the external attributes.
1283  * Otherwise we just fall through and do the usual thing.
1284  */
1285 static int
1286 ffsext_strategy(struct vop_strategy_args *ap)
1287 /*
1288 struct vop_strategy_args {
1289 	struct vnodeop_desc *a_desc;
1290 	struct vnode *a_vp;
1291 	struct buf *a_bp;
1292 };
1293 */
1294 {
1295 	struct vnode *vp;
1296 	daddr_t lbn;
1297 
1298 	vp = ap->a_vp;
1299 	lbn = ap->a_bp->b_lblkno;
1300 	if (I_IS_UFS2(VTOI(vp)) && lbn < 0 && lbn >= -UFS_NXADDR)
1301 		return (VOP_STRATEGY_APV(&ufs_vnodeops, ap));
1302 	if (vp->v_type == VFIFO)
1303 		return (VOP_STRATEGY_APV(&ufs_fifoops, ap));
1304 	panic("spec nodes went here");
1305 }
1306 
1307 /*
1308  * Vnode extattr transaction commit/abort
1309  */
1310 static int
1311 ffs_openextattr(struct vop_openextattr_args *ap)
1312 /*
1313 struct vop_openextattr_args {
1314 	struct vnodeop_desc *a_desc;
1315 	struct vnode *a_vp;
1316 	IN struct ucred *a_cred;
1317 	IN struct thread *a_td;
1318 };
1319 */
1320 {
1321 
1322 	if (ap->a_vp->v_type == VCHR || ap->a_vp->v_type == VBLK)
1323 		return (EOPNOTSUPP);
1324 
1325 	return (ffs_open_ea(ap->a_vp, ap->a_cred, ap->a_td));
1326 }
1327 
1328 
1329 /*
1330  * Vnode extattr transaction commit/abort
1331  */
1332 static int
1333 ffs_closeextattr(struct vop_closeextattr_args *ap)
1334 /*
1335 struct vop_closeextattr_args {
1336 	struct vnodeop_desc *a_desc;
1337 	struct vnode *a_vp;
1338 	int a_commit;
1339 	IN struct ucred *a_cred;
1340 	IN struct thread *a_td;
1341 };
1342 */
1343 {
1344 
1345 	if (ap->a_vp->v_type == VCHR || ap->a_vp->v_type == VBLK)
1346 		return (EOPNOTSUPP);
1347 
1348 	if (ap->a_commit && (ap->a_vp->v_mount->mnt_flag & MNT_RDONLY))
1349 		return (EROFS);
1350 
1351 	return (ffs_close_ea(ap->a_vp, ap->a_commit, ap->a_cred, ap->a_td));
1352 }
1353 
1354 /*
1355  * Vnode operation to remove a named attribute.
1356  */
1357 static int
1358 ffs_deleteextattr(struct vop_deleteextattr_args *ap)
1359 /*
1360 vop_deleteextattr {
1361 	IN struct vnode *a_vp;
1362 	IN int a_attrnamespace;
1363 	IN const char *a_name;
1364 	IN struct ucred *a_cred;
1365 	IN struct thread *a_td;
1366 };
1367 */
1368 {
1369 	struct inode *ip;
1370 	struct fs *fs;
1371 	struct extattr *eap;
1372 	uint32_t ul;
1373 	int olen, error, i, easize;
1374 	u_char *eae;
1375 	void *tmp;
1376 
1377 	ip = VTOI(ap->a_vp);
1378 	fs = ITOFS(ip);
1379 
1380 	if (ap->a_vp->v_type == VCHR || ap->a_vp->v_type == VBLK)
1381 		return (EOPNOTSUPP);
1382 
1383 	if (strlen(ap->a_name) == 0)
1384 		return (EINVAL);
1385 
1386 	if (ap->a_vp->v_mount->mnt_flag & MNT_RDONLY)
1387 		return (EROFS);
1388 
1389 	error = extattr_check_cred(ap->a_vp, ap->a_attrnamespace,
1390 	    ap->a_cred, ap->a_td, VWRITE);
1391 	if (error) {
1392 
1393 		/*
1394 		 * ffs_lock_ea is not needed there, because the vnode
1395 		 * must be exclusively locked.
1396 		 */
1397 		if (ip->i_ea_area != NULL && ip->i_ea_error == 0)
1398 			ip->i_ea_error = error;
1399 		return (error);
1400 	}
1401 
1402 	error = ffs_open_ea(ap->a_vp, ap->a_cred, ap->a_td);
1403 	if (error)
1404 		return (error);
1405 
1406 	/* CEM: delete could be done in-place instead */
1407 	eae = malloc(ip->i_ea_len, M_TEMP, M_WAITOK);
1408 	bcopy(ip->i_ea_area, eae, ip->i_ea_len);
1409 	easize = ip->i_ea_len;
1410 
1411 	olen = ffs_findextattr(eae, easize, ap->a_attrnamespace, ap->a_name,
1412 	    &eap, NULL);
1413 	if (olen == -1) {
1414 		/* delete but nonexistent */
1415 		free(eae, M_TEMP);
1416 		ffs_close_ea(ap->a_vp, 0, ap->a_cred, ap->a_td);
1417 		return (ENOATTR);
1418 	}
1419 	ul = eap->ea_length;
1420 	i = (u_char *)EXTATTR_NEXT(eap) - eae;
1421 	bcopy(EXTATTR_NEXT(eap), eap, easize - i);
1422 	easize -= ul;
1423 
1424 	tmp = ip->i_ea_area;
1425 	ip->i_ea_area = eae;
1426 	ip->i_ea_len = easize;
1427 	free(tmp, M_TEMP);
1428 	error = ffs_close_ea(ap->a_vp, 1, ap->a_cred, ap->a_td);
1429 	return (error);
1430 }
1431 
1432 /*
1433  * Vnode operation to retrieve a named extended attribute.
1434  */
1435 static int
1436 ffs_getextattr(struct vop_getextattr_args *ap)
1437 /*
1438 vop_getextattr {
1439 	IN struct vnode *a_vp;
1440 	IN int a_attrnamespace;
1441 	IN const char *a_name;
1442 	INOUT struct uio *a_uio;
1443 	OUT size_t *a_size;
1444 	IN struct ucred *a_cred;
1445 	IN struct thread *a_td;
1446 };
1447 */
1448 {
1449 	struct inode *ip;
1450 	u_char *eae, *p;
1451 	unsigned easize;
1452 	int error, ealen;
1453 
1454 	ip = VTOI(ap->a_vp);
1455 
1456 	if (ap->a_vp->v_type == VCHR || ap->a_vp->v_type == VBLK)
1457 		return (EOPNOTSUPP);
1458 
1459 	error = extattr_check_cred(ap->a_vp, ap->a_attrnamespace,
1460 	    ap->a_cred, ap->a_td, VREAD);
1461 	if (error)
1462 		return (error);
1463 
1464 	error = ffs_open_ea(ap->a_vp, ap->a_cred, ap->a_td);
1465 	if (error)
1466 		return (error);
1467 
1468 	eae = ip->i_ea_area;
1469 	easize = ip->i_ea_len;
1470 
1471 	ealen = ffs_findextattr(eae, easize, ap->a_attrnamespace, ap->a_name,
1472 	    NULL, &p);
1473 	if (ealen >= 0) {
1474 		error = 0;
1475 		if (ap->a_size != NULL)
1476 			*ap->a_size = ealen;
1477 		else if (ap->a_uio != NULL)
1478 			error = uiomove(p, ealen, ap->a_uio);
1479 	} else
1480 		error = ENOATTR;
1481 
1482 	ffs_close_ea(ap->a_vp, 0, ap->a_cred, ap->a_td);
1483 	return (error);
1484 }
1485 
1486 /*
1487  * Vnode operation to retrieve extended attributes on a vnode.
1488  */
1489 static int
1490 ffs_listextattr(struct vop_listextattr_args *ap)
1491 /*
1492 vop_listextattr {
1493 	IN struct vnode *a_vp;
1494 	IN int a_attrnamespace;
1495 	INOUT struct uio *a_uio;
1496 	OUT size_t *a_size;
1497 	IN struct ucred *a_cred;
1498 	IN struct thread *a_td;
1499 };
1500 */
1501 {
1502 	struct inode *ip;
1503 	struct extattr *eap, *eaend;
1504 	int error, ealen;
1505 
1506 	ip = VTOI(ap->a_vp);
1507 
1508 	if (ap->a_vp->v_type == VCHR || ap->a_vp->v_type == VBLK)
1509 		return (EOPNOTSUPP);
1510 
1511 	error = extattr_check_cred(ap->a_vp, ap->a_attrnamespace,
1512 	    ap->a_cred, ap->a_td, VREAD);
1513 	if (error)
1514 		return (error);
1515 
1516 	error = ffs_open_ea(ap->a_vp, ap->a_cred, ap->a_td);
1517 	if (error)
1518 		return (error);
1519 
1520 	error = 0;
1521 	if (ap->a_size != NULL)
1522 		*ap->a_size = 0;
1523 
1524 	KASSERT(ALIGNED_TO(ip->i_ea_area, struct extattr), ("unaligned"));
1525 	eap = (struct extattr *)ip->i_ea_area;
1526 	eaend = (struct extattr *)(ip->i_ea_area + ip->i_ea_len);
1527 	for (; error == 0 && eap < eaend; eap = EXTATTR_NEXT(eap)) {
1528 		/* make sure this entry is complete */
1529 		if (EXTATTR_NEXT(eap) > eaend)
1530 			break;
1531 		if (eap->ea_namespace != ap->a_attrnamespace)
1532 			continue;
1533 
1534 		ealen = eap->ea_namelength;
1535 		if (ap->a_size != NULL)
1536 			*ap->a_size += ealen + 1;
1537 		else if (ap->a_uio != NULL)
1538 			error = uiomove(&eap->ea_namelength, ealen + 1,
1539 			    ap->a_uio);
1540 	}
1541 
1542 	ffs_close_ea(ap->a_vp, 0, ap->a_cred, ap->a_td);
1543 	return (error);
1544 }
1545 
1546 /*
1547  * Vnode operation to set a named attribute.
1548  */
1549 static int
1550 ffs_setextattr(struct vop_setextattr_args *ap)
1551 /*
1552 vop_setextattr {
1553 	IN struct vnode *a_vp;
1554 	IN int a_attrnamespace;
1555 	IN const char *a_name;
1556 	INOUT struct uio *a_uio;
1557 	IN struct ucred *a_cred;
1558 	IN struct thread *a_td;
1559 };
1560 */
1561 {
1562 	struct inode *ip;
1563 	struct fs *fs;
1564 	struct extattr *eap;
1565 	uint32_t ealength, ul;
1566 	ssize_t ealen;
1567 	int olen, eapad1, eapad2, error, i, easize;
1568 	u_char *eae;
1569 	void *tmp;
1570 
1571 	ip = VTOI(ap->a_vp);
1572 	fs = ITOFS(ip);
1573 
1574 	if (ap->a_vp->v_type == VCHR || ap->a_vp->v_type == VBLK)
1575 		return (EOPNOTSUPP);
1576 
1577 	if (strlen(ap->a_name) == 0)
1578 		return (EINVAL);
1579 
1580 	/* XXX Now unsupported API to delete EAs using NULL uio. */
1581 	if (ap->a_uio == NULL)
1582 		return (EOPNOTSUPP);
1583 
1584 	if (ap->a_vp->v_mount->mnt_flag & MNT_RDONLY)
1585 		return (EROFS);
1586 
1587 	ealen = ap->a_uio->uio_resid;
1588 	if (ealen < 0 || ealen > lblktosize(fs, UFS_NXADDR))
1589 		return (EINVAL);
1590 
1591 	error = extattr_check_cred(ap->a_vp, ap->a_attrnamespace,
1592 	    ap->a_cred, ap->a_td, VWRITE);
1593 	if (error) {
1594 
1595 		/*
1596 		 * ffs_lock_ea is not needed there, because the vnode
1597 		 * must be exclusively locked.
1598 		 */
1599 		if (ip->i_ea_area != NULL && ip->i_ea_error == 0)
1600 			ip->i_ea_error = error;
1601 		return (error);
1602 	}
1603 
1604 	error = ffs_open_ea(ap->a_vp, ap->a_cred, ap->a_td);
1605 	if (error)
1606 		return (error);
1607 
1608 	ealength = sizeof(uint32_t) + 3 + strlen(ap->a_name);
1609 	eapad1 = roundup2(ealength, 8) - ealength;
1610 	eapad2 = roundup2(ealen, 8) - ealen;
1611 	ealength += eapad1 + ealen + eapad2;
1612 
1613 	/*
1614 	 * CEM: rewrites of the same size or smaller could be done in-place
1615 	 * instead.  (We don't acquire any fine-grained locks in here either,
1616 	 * so we could also do bigger writes in-place.)
1617 	 */
1618 	eae = malloc(ip->i_ea_len + ealength, M_TEMP, M_WAITOK);
1619 	bcopy(ip->i_ea_area, eae, ip->i_ea_len);
1620 	easize = ip->i_ea_len;
1621 
1622 	olen = ffs_findextattr(eae, easize, ap->a_attrnamespace, ap->a_name,
1623 	    &eap, NULL);
1624         if (olen == -1) {
1625 		/* new, append at end */
1626 		KASSERT(ALIGNED_TO(eae + easize, struct extattr),
1627 		    ("unaligned"));
1628 		eap = (struct extattr *)(eae + easize);
1629 		easize += ealength;
1630 	} else {
1631 		ul = eap->ea_length;
1632 		i = (u_char *)EXTATTR_NEXT(eap) - eae;
1633 		if (ul != ealength) {
1634 			bcopy(EXTATTR_NEXT(eap), (u_char *)eap + ealength,
1635 			    easize - i);
1636 			easize += (ealength - ul);
1637 		}
1638 	}
1639 	if (easize > lblktosize(fs, UFS_NXADDR)) {
1640 		free(eae, M_TEMP);
1641 		ffs_close_ea(ap->a_vp, 0, ap->a_cred, ap->a_td);
1642 		if (ip->i_ea_area != NULL && ip->i_ea_error == 0)
1643 			ip->i_ea_error = ENOSPC;
1644 		return (ENOSPC);
1645 	}
1646 	eap->ea_length = ealength;
1647 	eap->ea_namespace = ap->a_attrnamespace;
1648 	eap->ea_contentpadlen = eapad2;
1649 	eap->ea_namelength = strlen(ap->a_name);
1650 	memcpy(eap->ea_name, ap->a_name, strlen(ap->a_name));
1651 	bzero(&eap->ea_name[strlen(ap->a_name)], eapad1);
1652 	error = uiomove(EXTATTR_CONTENT(eap), ealen, ap->a_uio);
1653 	if (error) {
1654 		free(eae, M_TEMP);
1655 		ffs_close_ea(ap->a_vp, 0, ap->a_cred, ap->a_td);
1656 		if (ip->i_ea_area != NULL && ip->i_ea_error == 0)
1657 			ip->i_ea_error = error;
1658 		return (error);
1659 	}
1660 	bzero((u_char *)EXTATTR_CONTENT(eap) + ealen, eapad2);
1661 
1662 	tmp = ip->i_ea_area;
1663 	ip->i_ea_area = eae;
1664 	ip->i_ea_len = easize;
1665 	free(tmp, M_TEMP);
1666 	error = ffs_close_ea(ap->a_vp, 1, ap->a_cred, ap->a_td);
1667 	return (error);
1668 }
1669 
1670 /*
1671  * Vnode pointer to File handle
1672  */
1673 static int
1674 ffs_vptofh(struct vop_vptofh_args *ap)
1675 /*
1676 vop_vptofh {
1677 	IN struct vnode *a_vp;
1678 	IN struct fid *a_fhp;
1679 };
1680 */
1681 {
1682 	struct inode *ip;
1683 	struct ufid *ufhp;
1684 
1685 	ip = VTOI(ap->a_vp);
1686 	ufhp = (struct ufid *)ap->a_fhp;
1687 	ufhp->ufid_len = sizeof(struct ufid);
1688 	ufhp->ufid_ino = ip->i_number;
1689 	ufhp->ufid_gen = ip->i_gen;
1690 	return (0);
1691 }
1692 
1693 SYSCTL_DECL(_vfs_ffs);
1694 static int use_buf_pager = 1;
1695 SYSCTL_INT(_vfs_ffs, OID_AUTO, use_buf_pager, CTLFLAG_RWTUN, &use_buf_pager, 0,
1696     "Always use buffer pager instead of bmap");
1697 
1698 static daddr_t
1699 ffs_gbp_getblkno(struct vnode *vp, vm_ooffset_t off)
1700 {
1701 
1702 	return (lblkno(VFSTOUFS(vp->v_mount)->um_fs, off));
1703 }
1704 
1705 static int
1706 ffs_gbp_getblksz(struct vnode *vp, daddr_t lbn)
1707 {
1708 
1709 	return (blksize(VFSTOUFS(vp->v_mount)->um_fs, VTOI(vp), lbn));
1710 }
1711 
1712 static int
1713 ffs_getpages(struct vop_getpages_args *ap)
1714 {
1715 	struct vnode *vp;
1716 	struct ufsmount *um;
1717 
1718 	vp = ap->a_vp;
1719 	um = VFSTOUFS(vp->v_mount);
1720 
1721 	if (!use_buf_pager && um->um_devvp->v_bufobj.bo_bsize <= PAGE_SIZE)
1722 		return (vnode_pager_generic_getpages(vp, ap->a_m, ap->a_count,
1723 		    ap->a_rbehind, ap->a_rahead, NULL, NULL));
1724 	return (vfs_bio_getpages(vp, ap->a_m, ap->a_count, ap->a_rbehind,
1725 	    ap->a_rahead, ffs_gbp_getblkno, ffs_gbp_getblksz));
1726 }
1727