xref: /illumos-gate/usr/src/uts/common/fs/zfs/zfs_vnops.c (revision a7a845e4)
1 /*
2  * CDDL HEADER START
3  *
4  * The contents of this file are subject to the terms of the
5  * Common Development and Distribution License (the "License").
6  * You may not use this file except in compliance with the License.
7  *
8  * You can obtain a copy of the license at usr/src/OPENSOLARIS.LICENSE
9  * or http://www.opensolaris.org/os/licensing.
10  * See the License for the specific language governing permissions
11  * and limitations under the License.
12  *
13  * When distributing Covered Code, include this CDDL HEADER in each
14  * file and include the License file at usr/src/OPENSOLARIS.LICENSE.
15  * If applicable, add the following below this CDDL HEADER, with the
16  * fields enclosed by brackets "[]" replaced with your own identifying
17  * information: Portions Copyright [yyyy] [name of copyright owner]
18  *
19  * CDDL HEADER END
20  */
21 /*
22  * Copyright (c) 2005, 2010, Oracle and/or its affiliates. All rights reserved.
23  * Copyright (c) 2013 by Delphix. All rights reserved.
24  */
25 
26 /* Portions Copyright 2007 Jeremy Teo */
27 /* Portions Copyright 2010 Robert Milkowski */
28 
29 #include <sys/types.h>
30 #include <sys/param.h>
31 #include <sys/time.h>
32 #include <sys/systm.h>
33 #include <sys/sysmacros.h>
34 #include <sys/resource.h>
35 #include <sys/vfs.h>
36 #include <sys/vfs_opreg.h>
37 #include <sys/vnode.h>
38 #include <sys/file.h>
39 #include <sys/stat.h>
40 #include <sys/kmem.h>
41 #include <sys/taskq.h>
42 #include <sys/uio.h>
43 #include <sys/vmsystm.h>
44 #include <sys/atomic.h>
45 #include <sys/vm.h>
46 #include <vm/seg_vn.h>
47 #include <vm/pvn.h>
48 #include <vm/as.h>
49 #include <vm/kpm.h>
50 #include <vm/seg_kpm.h>
51 #include <sys/mman.h>
52 #include <sys/pathname.h>
53 #include <sys/cmn_err.h>
54 #include <sys/errno.h>
55 #include <sys/unistd.h>
56 #include <sys/zfs_dir.h>
57 #include <sys/zfs_acl.h>
58 #include <sys/zfs_ioctl.h>
59 #include <sys/fs/zfs.h>
60 #include <sys/dmu.h>
61 #include <sys/dmu_objset.h>
62 #include <sys/spa.h>
63 #include <sys/txg.h>
64 #include <sys/dbuf.h>
65 #include <sys/zap.h>
66 #include <sys/sa.h>
67 #include <sys/dirent.h>
68 #include <sys/policy.h>
69 #include <sys/sunddi.h>
70 #include <sys/filio.h>
71 #include <sys/sid.h>
72 #include "fs/fs_subr.h"
73 #include <sys/zfs_ctldir.h>
74 #include <sys/zfs_fuid.h>
75 #include <sys/zfs_sa.h>
76 #include <sys/dnlc.h>
77 #include <sys/zfs_rlock.h>
78 #include <sys/extdirent.h>
79 #include <sys/kidmap.h>
80 #include <sys/cred.h>
81 #include <sys/attr.h>
82 
83 /*
84  * Programming rules.
85  *
86  * Each vnode op performs some logical unit of work.  To do this, the ZPL must
87  * properly lock its in-core state, create a DMU transaction, do the work,
88  * record this work in the intent log (ZIL), commit the DMU transaction,
89  * and wait for the intent log to commit if it is a synchronous operation.
90  * Moreover, the vnode ops must work in both normal and log replay context.
91  * The ordering of events is important to avoid deadlocks and references
92  * to freed memory.  The example below illustrates the following Big Rules:
93  *
94  *  (1)	A check must be made in each zfs thread for a mounted file system.
95  *	This is done avoiding races using ZFS_ENTER(zfsvfs).
96  *	A ZFS_EXIT(zfsvfs) is needed before all returns.  Any znodes
97  *	must be checked with ZFS_VERIFY_ZP(zp).  Both of these macros
98  *	can return EIO from the calling function.
99  *
100  *  (2)	VN_RELE() should always be the last thing except for zil_commit()
101  *	(if necessary) and ZFS_EXIT(). This is for 3 reasons:
102  *	First, if it's the last reference, the vnode/znode
103  *	can be freed, so the zp may point to freed memory.  Second, the last
104  *	reference will call zfs_zinactive(), which may induce a lot of work --
105  *	pushing cached pages (which acquires range locks) and syncing out
106  *	cached atime changes.  Third, zfs_zinactive() may require a new tx,
107  *	which could deadlock the system if you were already holding one.
108  *	If you must call VN_RELE() within a tx then use VN_RELE_ASYNC().
109  *
110  *  (3)	All range locks must be grabbed before calling dmu_tx_assign(),
111  *	as they can span dmu_tx_assign() calls.
112  *
113  *  (4)	Always pass TXG_NOWAIT as the second argument to dmu_tx_assign().
114  *	This is critical because we don't want to block while holding locks.
115  *	Note, in particular, that if a lock is sometimes acquired before
116  *	the tx assigns, and sometimes after (e.g. z_lock), then failing to
117  *	use a non-blocking assign can deadlock the system.  The scenario:
118  *
119  *	Thread A has grabbed a lock before calling dmu_tx_assign().
120  *	Thread B is in an already-assigned tx, and blocks for this lock.
121  *	Thread A calls dmu_tx_assign(TXG_WAIT) and blocks in txg_wait_open()
122  *	forever, because the previous txg can't quiesce until B's tx commits.
123  *
124  *	If dmu_tx_assign() returns ERESTART and zfsvfs->z_assign is TXG_NOWAIT,
125  *	then drop all locks, call dmu_tx_wait(), and try again.
126  *
127  *  (5)	If the operation succeeded, generate the intent log entry for it
128  *	before dropping locks.  This ensures that the ordering of events
129  *	in the intent log matches the order in which they actually occurred.
130  *	During ZIL replay the zfs_log_* functions will update the sequence
131  *	number to indicate the zil transaction has replayed.
132  *
133  *  (6)	At the end of each vnode op, the DMU tx must always commit,
134  *	regardless of whether there were any errors.
135  *
136  *  (7)	After dropping all locks, invoke zil_commit(zilog, foid)
137  *	to ensure that synchronous semantics are provided when necessary.
138  *
139  * In general, this is how things should be ordered in each vnode op:
140  *
141  *	ZFS_ENTER(zfsvfs);		// exit if unmounted
142  * top:
143  *	zfs_dirent_lock(&dl, ...)	// lock directory entry (may VN_HOLD())
144  *	rw_enter(...);			// grab any other locks you need
145  *	tx = dmu_tx_create(...);	// get DMU tx
146  *	dmu_tx_hold_*();		// hold each object you might modify
147  *	error = dmu_tx_assign(tx, TXG_NOWAIT);	// try to assign
148  *	if (error) {
149  *		rw_exit(...);		// drop locks
150  *		zfs_dirent_unlock(dl);	// unlock directory entry
151  *		VN_RELE(...);		// release held vnodes
152  *		if (error == ERESTART) {
153  *			dmu_tx_wait(tx);
154  *			dmu_tx_abort(tx);
155  *			goto top;
156  *		}
157  *		dmu_tx_abort(tx);	// abort DMU tx
158  *		ZFS_EXIT(zfsvfs);	// finished in zfs
159  *		return (error);		// really out of space
160  *	}
161  *	error = do_real_work();		// do whatever this VOP does
162  *	if (error == 0)
163  *		zfs_log_*(...);		// on success, make ZIL entry
164  *	dmu_tx_commit(tx);		// commit DMU tx -- error or not
165  *	rw_exit(...);			// drop locks
166  *	zfs_dirent_unlock(dl);		// unlock directory entry
167  *	VN_RELE(...);			// release held vnodes
168  *	zil_commit(zilog, foid);	// synchronous when necessary
169  *	ZFS_EXIT(zfsvfs);		// finished in zfs
170  *	return (error);			// done, report error
171  */
172 
173 /* ARGSUSED */
174 static int
175 zfs_open(vnode_t **vpp, int flag, cred_t *cr, caller_context_t *ct)
176 {
177 	znode_t	*zp = VTOZ(*vpp);
178 	zfsvfs_t *zfsvfs = zp->z_zfsvfs;
179 
180 	ZFS_ENTER(zfsvfs);
181 	ZFS_VERIFY_ZP(zp);
182 
183 	if ((flag & FWRITE) && (zp->z_pflags & ZFS_APPENDONLY) &&
184 	    ((flag & FAPPEND) == 0)) {
185 		ZFS_EXIT(zfsvfs);
186 		return (SET_ERROR(EPERM));
187 	}
188 
189 	if (!zfs_has_ctldir(zp) && zp->z_zfsvfs->z_vscan &&
190 	    ZTOV(zp)->v_type == VREG &&
191 	    !(zp->z_pflags & ZFS_AV_QUARANTINED) && zp->z_size > 0) {
192 		if (fs_vscan(*vpp, cr, 0) != 0) {
193 			ZFS_EXIT(zfsvfs);
194 			return (SET_ERROR(EACCES));
195 		}
196 	}
197 
198 	/* Keep a count of the synchronous opens in the znode */
199 	if (flag & (FSYNC | FDSYNC))
200 		atomic_inc_32(&zp->z_sync_cnt);
201 
202 	ZFS_EXIT(zfsvfs);
203 	return (0);
204 }
205 
206 /* ARGSUSED */
207 static int
208 zfs_close(vnode_t *vp, int flag, int count, offset_t offset, cred_t *cr,
209     caller_context_t *ct)
210 {
211 	znode_t	*zp = VTOZ(vp);
212 	zfsvfs_t *zfsvfs = zp->z_zfsvfs;
213 
214 	/*
215 	 * Clean up any locks held by this process on the vp.
216 	 */
217 	cleanlocks(vp, ddi_get_pid(), 0);
218 	cleanshares(vp, ddi_get_pid());
219 
220 	ZFS_ENTER(zfsvfs);
221 	ZFS_VERIFY_ZP(zp);
222 
223 	/* Decrement the synchronous opens in the znode */
224 	if ((flag & (FSYNC | FDSYNC)) && (count == 1))
225 		atomic_dec_32(&zp->z_sync_cnt);
226 
227 	if (!zfs_has_ctldir(zp) && zp->z_zfsvfs->z_vscan &&
228 	    ZTOV(zp)->v_type == VREG &&
229 	    !(zp->z_pflags & ZFS_AV_QUARANTINED) && zp->z_size > 0)
230 		VERIFY(fs_vscan(vp, cr, 1) == 0);
231 
232 	ZFS_EXIT(zfsvfs);
233 	return (0);
234 }
235 
236 /*
237  * Lseek support for finding holes (cmd == _FIO_SEEK_HOLE) and
238  * data (cmd == _FIO_SEEK_DATA). "off" is an in/out parameter.
239  */
240 static int
241 zfs_holey(vnode_t *vp, int cmd, offset_t *off)
242 {
243 	znode_t	*zp = VTOZ(vp);
244 	uint64_t noff = (uint64_t)*off; /* new offset */
245 	uint64_t file_sz;
246 	int error;
247 	boolean_t hole;
248 
249 	file_sz = zp->z_size;
250 	if (noff >= file_sz)  {
251 		return (SET_ERROR(ENXIO));
252 	}
253 
254 	if (cmd == _FIO_SEEK_HOLE)
255 		hole = B_TRUE;
256 	else
257 		hole = B_FALSE;
258 
259 	error = dmu_offset_next(zp->z_zfsvfs->z_os, zp->z_id, hole, &noff);
260 
261 	/* end of file? */
262 	if ((error == ESRCH) || (noff > file_sz)) {
263 		/*
264 		 * Handle the virtual hole at the end of file.
265 		 */
266 		if (hole) {
267 			*off = file_sz;
268 			return (0);
269 		}
270 		return (SET_ERROR(ENXIO));
271 	}
272 
273 	if (noff < *off)
274 		return (error);
275 	*off = noff;
276 	return (error);
277 }
278 
279 /* ARGSUSED */
280 static int
281 zfs_ioctl(vnode_t *vp, int com, intptr_t data, int flag, cred_t *cred,
282     int *rvalp, caller_context_t *ct)
283 {
284 	offset_t off;
285 	int error;
286 	zfsvfs_t *zfsvfs;
287 	znode_t *zp;
288 
289 	switch (com) {
290 	case _FIOFFS:
291 		return (zfs_sync(vp->v_vfsp, 0, cred));
292 
293 		/*
294 		 * The following two ioctls are used by bfu.  Faking out,
295 		 * necessary to avoid bfu errors.
296 		 */
297 	case _FIOGDIO:
298 	case _FIOSDIO:
299 		return (0);
300 
301 	case _FIO_SEEK_DATA:
302 	case _FIO_SEEK_HOLE:
303 		if (ddi_copyin((void *)data, &off, sizeof (off), flag))
304 			return (SET_ERROR(EFAULT));
305 
306 		zp = VTOZ(vp);
307 		zfsvfs = zp->z_zfsvfs;
308 		ZFS_ENTER(zfsvfs);
309 		ZFS_VERIFY_ZP(zp);
310 
311 		/* offset parameter is in/out */
312 		error = zfs_holey(vp, com, &off);
313 		ZFS_EXIT(zfsvfs);
314 		if (error)
315 			return (error);
316 		if (ddi_copyout(&off, (void *)data, sizeof (off), flag))
317 			return (SET_ERROR(EFAULT));
318 		return (0);
319 	}
320 	return (SET_ERROR(ENOTTY));
321 }
322 
323 /*
324  * Utility functions to map and unmap a single physical page.  These
325  * are used to manage the mappable copies of ZFS file data, and therefore
326  * do not update ref/mod bits.
327  */
328 caddr_t
329 zfs_map_page(page_t *pp, enum seg_rw rw)
330 {
331 	if (kpm_enable)
332 		return (hat_kpm_mapin(pp, 0));
333 	ASSERT(rw == S_READ || rw == S_WRITE);
334 	return (ppmapin(pp, PROT_READ | ((rw == S_WRITE) ? PROT_WRITE : 0),
335 	    (caddr_t)-1));
336 }
337 
338 void
339 zfs_unmap_page(page_t *pp, caddr_t addr)
340 {
341 	if (kpm_enable) {
342 		hat_kpm_mapout(pp, 0, addr);
343 	} else {
344 		ppmapout(addr);
345 	}
346 }
347 
348 /*
349  * When a file is memory mapped, we must keep the IO data synchronized
350  * between the DMU cache and the memory mapped pages.  What this means:
351  *
352  * On Write:	If we find a memory mapped page, we write to *both*
353  *		the page and the dmu buffer.
354  */
355 static void
356 update_pages(vnode_t *vp, int64_t start, int len, objset_t *os, uint64_t oid)
357 {
358 	int64_t	off;
359 
360 	off = start & PAGEOFFSET;
361 	for (start &= PAGEMASK; len > 0; start += PAGESIZE) {
362 		page_t *pp;
363 		uint64_t nbytes = MIN(PAGESIZE - off, len);
364 
365 		if (pp = page_lookup(vp, start, SE_SHARED)) {
366 			caddr_t va;
367 
368 			va = zfs_map_page(pp, S_WRITE);
369 			(void) dmu_read(os, oid, start+off, nbytes, va+off,
370 			    DMU_READ_PREFETCH);
371 			zfs_unmap_page(pp, va);
372 			page_unlock(pp);
373 		}
374 		len -= nbytes;
375 		off = 0;
376 	}
377 }
378 
379 /*
380  * When a file is memory mapped, we must keep the IO data synchronized
381  * between the DMU cache and the memory mapped pages.  What this means:
382  *
383  * On Read:	We "read" preferentially from memory mapped pages,
384  *		else we default from the dmu buffer.
385  *
386  * NOTE: We will always "break up" the IO into PAGESIZE uiomoves when
387  *	 the file is memory mapped.
388  */
389 static int
390 mappedread(vnode_t *vp, int nbytes, uio_t *uio)
391 {
392 	znode_t *zp = VTOZ(vp);
393 	objset_t *os = zp->z_zfsvfs->z_os;
394 	int64_t	start, off;
395 	int len = nbytes;
396 	int error = 0;
397 
398 	start = uio->uio_loffset;
399 	off = start & PAGEOFFSET;
400 	for (start &= PAGEMASK; len > 0; start += PAGESIZE) {
401 		page_t *pp;
402 		uint64_t bytes = MIN(PAGESIZE - off, len);
403 
404 		if (pp = page_lookup(vp, start, SE_SHARED)) {
405 			caddr_t va;
406 
407 			va = zfs_map_page(pp, S_READ);
408 			error = uiomove(va + off, bytes, UIO_READ, uio);
409 			zfs_unmap_page(pp, va);
410 			page_unlock(pp);
411 		} else {
412 			error = dmu_read_uio(os, zp->z_id, uio, bytes);
413 		}
414 		len -= bytes;
415 		off = 0;
416 		if (error)
417 			break;
418 	}
419 	return (error);
420 }
421 
422 offset_t zfs_read_chunk_size = 1024 * 1024; /* Tunable */
423 
424 /*
425  * Read bytes from specified file into supplied buffer.
426  *
427  *	IN:	vp	- vnode of file to be read from.
428  *		uio	- structure supplying read location, range info,
429  *			  and return buffer.
430  *		ioflag	- SYNC flags; used to provide FRSYNC semantics.
431  *		cr	- credentials of caller.
432  *		ct	- caller context
433  *
434  *	OUT:	uio	- updated offset and range, buffer filled.
435  *
436  *	RETURN:	0 on success, error code on failure.
437  *
438  * Side Effects:
439  *	vp - atime updated if byte count > 0
440  */
441 /* ARGSUSED */
442 static int
443 zfs_read(vnode_t *vp, uio_t *uio, int ioflag, cred_t *cr, caller_context_t *ct)
444 {
445 	znode_t		*zp = VTOZ(vp);
446 	zfsvfs_t	*zfsvfs = zp->z_zfsvfs;
447 	objset_t	*os;
448 	ssize_t		n, nbytes;
449 	int		error = 0;
450 	rl_t		*rl;
451 	xuio_t		*xuio = NULL;
452 
453 	ZFS_ENTER(zfsvfs);
454 	ZFS_VERIFY_ZP(zp);
455 	os = zfsvfs->z_os;
456 
457 	if (zp->z_pflags & ZFS_AV_QUARANTINED) {
458 		ZFS_EXIT(zfsvfs);
459 		return (SET_ERROR(EACCES));
460 	}
461 
462 	/*
463 	 * Validate file offset
464 	 */
465 	if (uio->uio_loffset < (offset_t)0) {
466 		ZFS_EXIT(zfsvfs);
467 		return (SET_ERROR(EINVAL));
468 	}
469 
470 	/*
471 	 * Fasttrack empty reads
472 	 */
473 	if (uio->uio_resid == 0) {
474 		ZFS_EXIT(zfsvfs);
475 		return (0);
476 	}
477 
478 	/*
479 	 * Check for mandatory locks
480 	 */
481 	if (MANDMODE(zp->z_mode)) {
482 		if (error = chklock(vp, FREAD,
483 		    uio->uio_loffset, uio->uio_resid, uio->uio_fmode, ct)) {
484 			ZFS_EXIT(zfsvfs);
485 			return (error);
486 		}
487 	}
488 
489 	/*
490 	 * If we're in FRSYNC mode, sync out this znode before reading it.
491 	 */
492 	if (ioflag & FRSYNC || zfsvfs->z_os->os_sync == ZFS_SYNC_ALWAYS)
493 		zil_commit(zfsvfs->z_log, zp->z_id);
494 
495 	/*
496 	 * Lock the range against changes.
497 	 */
498 	rl = zfs_range_lock(zp, uio->uio_loffset, uio->uio_resid, RL_READER);
499 
500 	/*
501 	 * If we are reading past end-of-file we can skip
502 	 * to the end; but we might still need to set atime.
503 	 */
504 	if (uio->uio_loffset >= zp->z_size) {
505 		error = 0;
506 		goto out;
507 	}
508 
509 	ASSERT(uio->uio_loffset < zp->z_size);
510 	n = MIN(uio->uio_resid, zp->z_size - uio->uio_loffset);
511 
512 	if ((uio->uio_extflg == UIO_XUIO) &&
513 	    (((xuio_t *)uio)->xu_type == UIOTYPE_ZEROCOPY)) {
514 		int nblk;
515 		int blksz = zp->z_blksz;
516 		uint64_t offset = uio->uio_loffset;
517 
518 		xuio = (xuio_t *)uio;
519 		if ((ISP2(blksz))) {
520 			nblk = (P2ROUNDUP(offset + n, blksz) - P2ALIGN(offset,
521 			    blksz)) / blksz;
522 		} else {
523 			ASSERT(offset + n <= blksz);
524 			nblk = 1;
525 		}
526 		(void) dmu_xuio_init(xuio, nblk);
527 
528 		if (vn_has_cached_data(vp)) {
529 			/*
530 			 * For simplicity, we always allocate a full buffer
531 			 * even if we only expect to read a portion of a block.
532 			 */
533 			while (--nblk >= 0) {
534 				(void) dmu_xuio_add(xuio,
535 				    dmu_request_arcbuf(sa_get_db(zp->z_sa_hdl),
536 				    blksz), 0, blksz);
537 			}
538 		}
539 	}
540 
541 	while (n > 0) {
542 		nbytes = MIN(n, zfs_read_chunk_size -
543 		    P2PHASE(uio->uio_loffset, zfs_read_chunk_size));
544 
545 		if (vn_has_cached_data(vp))
546 			error = mappedread(vp, nbytes, uio);
547 		else
548 			error = dmu_read_uio(os, zp->z_id, uio, nbytes);
549 		if (error) {
550 			/* convert checksum errors into IO errors */
551 			if (error == ECKSUM)
552 				error = SET_ERROR(EIO);
553 			break;
554 		}
555 
556 		n -= nbytes;
557 	}
558 out:
559 	zfs_range_unlock(rl);
560 
561 	ZFS_ACCESSTIME_STAMP(zfsvfs, zp);
562 	ZFS_EXIT(zfsvfs);
563 	return (error);
564 }
565 
566 /*
567  * Write the bytes to a file.
568  *
569  *	IN:	vp	- vnode of file to be written to.
570  *		uio	- structure supplying write location, range info,
571  *			  and data buffer.
572  *		ioflag	- FAPPEND, FSYNC, and/or FDSYNC.  FAPPEND is
573  *			  set if in append mode.
574  *		cr	- credentials of caller.
575  *		ct	- caller context (NFS/CIFS fem monitor only)
576  *
577  *	OUT:	uio	- updated offset and range.
578  *
579  *	RETURN:	0 on success, error code on failure.
580  *
581  * Timestamps:
582  *	vp - ctime|mtime updated if byte count > 0
583  */
584 
585 /* ARGSUSED */
586 static int
587 zfs_write(vnode_t *vp, uio_t *uio, int ioflag, cred_t *cr, caller_context_t *ct)
588 {
589 	znode_t		*zp = VTOZ(vp);
590 	rlim64_t	limit = uio->uio_llimit;
591 	ssize_t		start_resid = uio->uio_resid;
592 	ssize_t		tx_bytes;
593 	uint64_t	end_size;
594 	dmu_tx_t	*tx;
595 	zfsvfs_t	*zfsvfs = zp->z_zfsvfs;
596 	zilog_t		*zilog;
597 	offset_t	woff;
598 	ssize_t		n, nbytes;
599 	rl_t		*rl;
600 	int		max_blksz = zfsvfs->z_max_blksz;
601 	int		error = 0;
602 	arc_buf_t	*abuf;
603 	iovec_t		*aiov = NULL;
604 	xuio_t		*xuio = NULL;
605 	int		i_iov = 0;
606 	int		iovcnt = uio->uio_iovcnt;
607 	iovec_t		*iovp = uio->uio_iov;
608 	int		write_eof;
609 	int		count = 0;
610 	sa_bulk_attr_t	bulk[4];
611 	uint64_t	mtime[2], ctime[2];
612 
613 	/*
614 	 * Fasttrack empty write
615 	 */
616 	n = start_resid;
617 	if (n == 0)
618 		return (0);
619 
620 	if (limit == RLIM64_INFINITY || limit > MAXOFFSET_T)
621 		limit = MAXOFFSET_T;
622 
623 	ZFS_ENTER(zfsvfs);
624 	ZFS_VERIFY_ZP(zp);
625 
626 	SA_ADD_BULK_ATTR(bulk, count, SA_ZPL_MTIME(zfsvfs), NULL, &mtime, 16);
627 	SA_ADD_BULK_ATTR(bulk, count, SA_ZPL_CTIME(zfsvfs), NULL, &ctime, 16);
628 	SA_ADD_BULK_ATTR(bulk, count, SA_ZPL_SIZE(zfsvfs), NULL,
629 	    &zp->z_size, 8);
630 	SA_ADD_BULK_ATTR(bulk, count, SA_ZPL_FLAGS(zfsvfs), NULL,
631 	    &zp->z_pflags, 8);
632 
633 	/*
634 	 * If immutable or not appending then return EPERM
635 	 */
636 	if ((zp->z_pflags & (ZFS_IMMUTABLE | ZFS_READONLY)) ||
637 	    ((zp->z_pflags & ZFS_APPENDONLY) && !(ioflag & FAPPEND) &&
638 	    (uio->uio_loffset < zp->z_size))) {
639 		ZFS_EXIT(zfsvfs);
640 		return (SET_ERROR(EPERM));
641 	}
642 
643 	zilog = zfsvfs->z_log;
644 
645 	/*
646 	 * Validate file offset
647 	 */
648 	woff = ioflag & FAPPEND ? zp->z_size : uio->uio_loffset;
649 	if (woff < 0) {
650 		ZFS_EXIT(zfsvfs);
651 		return (SET_ERROR(EINVAL));
652 	}
653 
654 	/*
655 	 * Check for mandatory locks before calling zfs_range_lock()
656 	 * in order to prevent a deadlock with locks set via fcntl().
657 	 */
658 	if (MANDMODE((mode_t)zp->z_mode) &&
659 	    (error = chklock(vp, FWRITE, woff, n, uio->uio_fmode, ct)) != 0) {
660 		ZFS_EXIT(zfsvfs);
661 		return (error);
662 	}
663 
664 	/*
665 	 * Pre-fault the pages to ensure slow (eg NFS) pages
666 	 * don't hold up txg.
667 	 * Skip this if uio contains loaned arc_buf.
668 	 */
669 	if ((uio->uio_extflg == UIO_XUIO) &&
670 	    (((xuio_t *)uio)->xu_type == UIOTYPE_ZEROCOPY))
671 		xuio = (xuio_t *)uio;
672 	else
673 		uio_prefaultpages(MIN(n, max_blksz), uio);
674 
675 	/*
676 	 * If in append mode, set the io offset pointer to eof.
677 	 */
678 	if (ioflag & FAPPEND) {
679 		/*
680 		 * Obtain an appending range lock to guarantee file append
681 		 * semantics.  We reset the write offset once we have the lock.
682 		 */
683 		rl = zfs_range_lock(zp, 0, n, RL_APPEND);
684 		woff = rl->r_off;
685 		if (rl->r_len == UINT64_MAX) {
686 			/*
687 			 * We overlocked the file because this write will cause
688 			 * the file block size to increase.
689 			 * Note that zp_size cannot change with this lock held.
690 			 */
691 			woff = zp->z_size;
692 		}
693 		uio->uio_loffset = woff;
694 	} else {
695 		/*
696 		 * Note that if the file block size will change as a result of
697 		 * this write, then this range lock will lock the entire file
698 		 * so that we can re-write the block safely.
699 		 */
700 		rl = zfs_range_lock(zp, woff, n, RL_WRITER);
701 	}
702 
703 	if (woff >= limit) {
704 		zfs_range_unlock(rl);
705 		ZFS_EXIT(zfsvfs);
706 		return (SET_ERROR(EFBIG));
707 	}
708 
709 	if ((woff + n) > limit || woff > (limit - n))
710 		n = limit - woff;
711 
712 	/* Will this write extend the file length? */
713 	write_eof = (woff + n > zp->z_size);
714 
715 	end_size = MAX(zp->z_size, woff + n);
716 
717 	/*
718 	 * Write the file in reasonable size chunks.  Each chunk is written
719 	 * in a separate transaction; this keeps the intent log records small
720 	 * and allows us to do more fine-grained space accounting.
721 	 */
722 	while (n > 0) {
723 		abuf = NULL;
724 		woff = uio->uio_loffset;
725 again:
726 		if (zfs_owner_overquota(zfsvfs, zp, B_FALSE) ||
727 		    zfs_owner_overquota(zfsvfs, zp, B_TRUE)) {
728 			if (abuf != NULL)
729 				dmu_return_arcbuf(abuf);
730 			error = SET_ERROR(EDQUOT);
731 			break;
732 		}
733 
734 		if (xuio && abuf == NULL) {
735 			ASSERT(i_iov < iovcnt);
736 			aiov = &iovp[i_iov];
737 			abuf = dmu_xuio_arcbuf(xuio, i_iov);
738 			dmu_xuio_clear(xuio, i_iov);
739 			DTRACE_PROBE3(zfs_cp_write, int, i_iov,
740 			    iovec_t *, aiov, arc_buf_t *, abuf);
741 			ASSERT((aiov->iov_base == abuf->b_data) ||
742 			    ((char *)aiov->iov_base - (char *)abuf->b_data +
743 			    aiov->iov_len == arc_buf_size(abuf)));
744 			i_iov++;
745 		} else if (abuf == NULL && n >= max_blksz &&
746 		    woff >= zp->z_size &&
747 		    P2PHASE(woff, max_blksz) == 0 &&
748 		    zp->z_blksz == max_blksz) {
749 			/*
750 			 * This write covers a full block.  "Borrow" a buffer
751 			 * from the dmu so that we can fill it before we enter
752 			 * a transaction.  This avoids the possibility of
753 			 * holding up the transaction if the data copy hangs
754 			 * up on a pagefault (e.g., from an NFS server mapping).
755 			 */
756 			size_t cbytes;
757 
758 			abuf = dmu_request_arcbuf(sa_get_db(zp->z_sa_hdl),
759 			    max_blksz);
760 			ASSERT(abuf != NULL);
761 			ASSERT(arc_buf_size(abuf) == max_blksz);
762 			if (error = uiocopy(abuf->b_data, max_blksz,
763 			    UIO_WRITE, uio, &cbytes)) {
764 				dmu_return_arcbuf(abuf);
765 				break;
766 			}
767 			ASSERT(cbytes == max_blksz);
768 		}
769 
770 		/*
771 		 * Start a transaction.
772 		 */
773 		tx = dmu_tx_create(zfsvfs->z_os);
774 		dmu_tx_hold_sa(tx, zp->z_sa_hdl, B_FALSE);
775 		dmu_tx_hold_write(tx, zp->z_id, woff, MIN(n, max_blksz));
776 		zfs_sa_upgrade_txholds(tx, zp);
777 		error = dmu_tx_assign(tx, TXG_NOWAIT);
778 		if (error) {
779 			if (error == ERESTART) {
780 				dmu_tx_wait(tx);
781 				dmu_tx_abort(tx);
782 				goto again;
783 			}
784 			dmu_tx_abort(tx);
785 			if (abuf != NULL)
786 				dmu_return_arcbuf(abuf);
787 			break;
788 		}
789 
790 		/*
791 		 * If zfs_range_lock() over-locked we grow the blocksize
792 		 * and then reduce the lock range.  This will only happen
793 		 * on the first iteration since zfs_range_reduce() will
794 		 * shrink down r_len to the appropriate size.
795 		 */
796 		if (rl->r_len == UINT64_MAX) {
797 			uint64_t new_blksz;
798 
799 			if (zp->z_blksz > max_blksz) {
800 				ASSERT(!ISP2(zp->z_blksz));
801 				new_blksz = MIN(end_size, SPA_MAXBLOCKSIZE);
802 			} else {
803 				new_blksz = MIN(end_size, max_blksz);
804 			}
805 			zfs_grow_blocksize(zp, new_blksz, tx);
806 			zfs_range_reduce(rl, woff, n);
807 		}
808 
809 		/*
810 		 * XXX - should we really limit each write to z_max_blksz?
811 		 * Perhaps we should use SPA_MAXBLOCKSIZE chunks?
812 		 */
813 		nbytes = MIN(n, max_blksz - P2PHASE(woff, max_blksz));
814 
815 		if (abuf == NULL) {
816 			tx_bytes = uio->uio_resid;
817 			error = dmu_write_uio_dbuf(sa_get_db(zp->z_sa_hdl),
818 			    uio, nbytes, tx);
819 			tx_bytes -= uio->uio_resid;
820 		} else {
821 			tx_bytes = nbytes;
822 			ASSERT(xuio == NULL || tx_bytes == aiov->iov_len);
823 			/*
824 			 * If this is not a full block write, but we are
825 			 * extending the file past EOF and this data starts
826 			 * block-aligned, use assign_arcbuf().  Otherwise,
827 			 * write via dmu_write().
828 			 */
829 			if (tx_bytes < max_blksz && (!write_eof ||
830 			    aiov->iov_base != abuf->b_data)) {
831 				ASSERT(xuio);
832 				dmu_write(zfsvfs->z_os, zp->z_id, woff,
833 				    aiov->iov_len, aiov->iov_base, tx);
834 				dmu_return_arcbuf(abuf);
835 				xuio_stat_wbuf_copied();
836 			} else {
837 				ASSERT(xuio || tx_bytes == max_blksz);
838 				dmu_assign_arcbuf(sa_get_db(zp->z_sa_hdl),
839 				    woff, abuf, tx);
840 			}
841 			ASSERT(tx_bytes <= uio->uio_resid);
842 			uioskip(uio, tx_bytes);
843 		}
844 		if (tx_bytes && vn_has_cached_data(vp)) {
845 			update_pages(vp, woff,
846 			    tx_bytes, zfsvfs->z_os, zp->z_id);
847 		}
848 
849 		/*
850 		 * If we made no progress, we're done.  If we made even
851 		 * partial progress, update the znode and ZIL accordingly.
852 		 */
853 		if (tx_bytes == 0) {
854 			(void) sa_update(zp->z_sa_hdl, SA_ZPL_SIZE(zfsvfs),
855 			    (void *)&zp->z_size, sizeof (uint64_t), tx);
856 			dmu_tx_commit(tx);
857 			ASSERT(error != 0);
858 			break;
859 		}
860 
861 		/*
862 		 * Clear Set-UID/Set-GID bits on successful write if not
863 		 * privileged and at least one of the excute bits is set.
864 		 *
865 		 * It would be nice to to this after all writes have
866 		 * been done, but that would still expose the ISUID/ISGID
867 		 * to another app after the partial write is committed.
868 		 *
869 		 * Note: we don't call zfs_fuid_map_id() here because
870 		 * user 0 is not an ephemeral uid.
871 		 */
872 		mutex_enter(&zp->z_acl_lock);
873 		if ((zp->z_mode & (S_IXUSR | (S_IXUSR >> 3) |
874 		    (S_IXUSR >> 6))) != 0 &&
875 		    (zp->z_mode & (S_ISUID | S_ISGID)) != 0 &&
876 		    secpolicy_vnode_setid_retain(cr,
877 		    (zp->z_mode & S_ISUID) != 0 && zp->z_uid == 0) != 0) {
878 			uint64_t newmode;
879 			zp->z_mode &= ~(S_ISUID | S_ISGID);
880 			newmode = zp->z_mode;
881 			(void) sa_update(zp->z_sa_hdl, SA_ZPL_MODE(zfsvfs),
882 			    (void *)&newmode, sizeof (uint64_t), tx);
883 		}
884 		mutex_exit(&zp->z_acl_lock);
885 
886 		zfs_tstamp_update_setup(zp, CONTENT_MODIFIED, mtime, ctime,
887 		    B_TRUE);
888 
889 		/*
890 		 * Update the file size (zp_size) if it has changed;
891 		 * account for possible concurrent updates.
892 		 */
893 		while ((end_size = zp->z_size) < uio->uio_loffset) {
894 			(void) atomic_cas_64(&zp->z_size, end_size,
895 			    uio->uio_loffset);
896 			ASSERT(error == 0);
897 		}
898 		/*
899 		 * If we are replaying and eof is non zero then force
900 		 * the file size to the specified eof. Note, there's no
901 		 * concurrency during replay.
902 		 */
903 		if (zfsvfs->z_replay && zfsvfs->z_replay_eof != 0)
904 			zp->z_size = zfsvfs->z_replay_eof;
905 
906 		error = sa_bulk_update(zp->z_sa_hdl, bulk, count, tx);
907 
908 		zfs_log_write(zilog, tx, TX_WRITE, zp, woff, tx_bytes, ioflag);
909 		dmu_tx_commit(tx);
910 
911 		if (error != 0)
912 			break;
913 		ASSERT(tx_bytes == nbytes);
914 		n -= nbytes;
915 
916 		if (!xuio && n > 0)
917 			uio_prefaultpages(MIN(n, max_blksz), uio);
918 	}
919 
920 	zfs_range_unlock(rl);
921 
922 	/*
923 	 * If we're in replay mode, or we made no progress, return error.
924 	 * Otherwise, it's at least a partial write, so it's successful.
925 	 */
926 	if (zfsvfs->z_replay || uio->uio_resid == start_resid) {
927 		ZFS_EXIT(zfsvfs);
928 		return (error);
929 	}
930 
931 	if (ioflag & (FSYNC | FDSYNC) ||
932 	    zfsvfs->z_os->os_sync == ZFS_SYNC_ALWAYS)
933 		zil_commit(zilog, zp->z_id);
934 
935 	ZFS_EXIT(zfsvfs);
936 	return (0);
937 }
938 
939 void
940 zfs_get_done(zgd_t *zgd, int error)
941 {
942 	znode_t *zp = zgd->zgd_private;
943 	objset_t *os = zp->z_zfsvfs->z_os;
944 
945 	if (zgd->zgd_db)
946 		dmu_buf_rele(zgd->zgd_db, zgd);
947 
948 	zfs_range_unlock(zgd->zgd_rl);
949 
950 	/*
951 	 * Release the vnode asynchronously as we currently have the
952 	 * txg stopped from syncing.
953 	 */
954 	VN_RELE_ASYNC(ZTOV(zp), dsl_pool_vnrele_taskq(dmu_objset_pool(os)));
955 
956 	if (error == 0 && zgd->zgd_bp)
957 		zil_add_block(zgd->zgd_zilog, zgd->zgd_bp);
958 
959 	kmem_free(zgd, sizeof (zgd_t));
960 }
961 
962 #ifdef DEBUG
963 static int zil_fault_io = 0;
964 #endif
965 
966 /*
967  * Get data to generate a TX_WRITE intent log record.
968  */
969 int
970 zfs_get_data(void *arg, lr_write_t *lr, char *buf, zio_t *zio)
971 {
972 	zfsvfs_t *zfsvfs = arg;
973 	objset_t *os = zfsvfs->z_os;
974 	znode_t *zp;
975 	uint64_t object = lr->lr_foid;
976 	uint64_t offset = lr->lr_offset;
977 	uint64_t size = lr->lr_length;
978 	blkptr_t *bp = &lr->lr_blkptr;
979 	dmu_buf_t *db;
980 	zgd_t *zgd;
981 	int error = 0;
982 
983 	ASSERT(zio != NULL);
984 	ASSERT(size != 0);
985 
986 	/*
987 	 * Nothing to do if the file has been removed
988 	 */
989 	if (zfs_zget(zfsvfs, object, &zp) != 0)
990 		return (SET_ERROR(ENOENT));
991 	if (zp->z_unlinked) {
992 		/*
993 		 * Release the vnode asynchronously as we currently have the
994 		 * txg stopped from syncing.
995 		 */
996 		VN_RELE_ASYNC(ZTOV(zp),
997 		    dsl_pool_vnrele_taskq(dmu_objset_pool(os)));
998 		return (SET_ERROR(ENOENT));
999 	}
1000 
1001 	zgd = (zgd_t *)kmem_zalloc(sizeof (zgd_t), KM_SLEEP);
1002 	zgd->zgd_zilog = zfsvfs->z_log;
1003 	zgd->zgd_private = zp;
1004 
1005 	/*
1006 	 * Write records come in two flavors: immediate and indirect.
1007 	 * For small writes it's cheaper to store the data with the
1008 	 * log record (immediate); for large writes it's cheaper to
1009 	 * sync the data and get a pointer to it (indirect) so that
1010 	 * we don't have to write the data twice.
1011 	 */
1012 	if (buf != NULL) { /* immediate write */
1013 		zgd->zgd_rl = zfs_range_lock(zp, offset, size, RL_READER);
1014 		/* test for truncation needs to be done while range locked */
1015 		if (offset >= zp->z_size) {
1016 			error = SET_ERROR(ENOENT);
1017 		} else {
1018 			error = dmu_read(os, object, offset, size, buf,
1019 			    DMU_READ_NO_PREFETCH);
1020 		}
1021 		ASSERT(error == 0 || error == ENOENT);
1022 	} else { /* indirect write */
1023 		/*
1024 		 * Have to lock the whole block to ensure when it's
1025 		 * written out and it's checksum is being calculated
1026 		 * that no one can change the data. We need to re-check
1027 		 * blocksize after we get the lock in case it's changed!
1028 		 */
1029 		for (;;) {
1030 			uint64_t blkoff;
1031 			size = zp->z_blksz;
1032 			blkoff = ISP2(size) ? P2PHASE(offset, size) : offset;
1033 			offset -= blkoff;
1034 			zgd->zgd_rl = zfs_range_lock(zp, offset, size,
1035 			    RL_READER);
1036 			if (zp->z_blksz == size)
1037 				break;
1038 			offset += blkoff;
1039 			zfs_range_unlock(zgd->zgd_rl);
1040 		}
1041 		/* test for truncation needs to be done while range locked */
1042 		if (lr->lr_offset >= zp->z_size)
1043 			error = SET_ERROR(ENOENT);
1044 #ifdef DEBUG
1045 		if (zil_fault_io) {
1046 			error = SET_ERROR(EIO);
1047 			zil_fault_io = 0;
1048 		}
1049 #endif
1050 		if (error == 0)
1051 			error = dmu_buf_hold(os, object, offset, zgd, &db,
1052 			    DMU_READ_NO_PREFETCH);
1053 
1054 		if (error == 0) {
1055 			blkptr_t *obp = dmu_buf_get_blkptr(db);
1056 			if (obp) {
1057 				ASSERT(BP_IS_HOLE(bp));
1058 				*bp = *obp;
1059 			}
1060 
1061 			zgd->zgd_db = db;
1062 			zgd->zgd_bp = bp;
1063 
1064 			ASSERT(db->db_offset == offset);
1065 			ASSERT(db->db_size == size);
1066 
1067 			error = dmu_sync(zio, lr->lr_common.lrc_txg,
1068 			    zfs_get_done, zgd);
1069 			ASSERT(error || lr->lr_length <= zp->z_blksz);
1070 
1071 			/*
1072 			 * On success, we need to wait for the write I/O
1073 			 * initiated by dmu_sync() to complete before we can
1074 			 * release this dbuf.  We will finish everything up
1075 			 * in the zfs_get_done() callback.
1076 			 */
1077 			if (error == 0)
1078 				return (0);
1079 
1080 			if (error == EALREADY) {
1081 				lr->lr_common.lrc_txtype = TX_WRITE2;
1082 				error = 0;
1083 			}
1084 		}
1085 	}
1086 
1087 	zfs_get_done(zgd, error);
1088 
1089 	return (error);
1090 }
1091 
1092 /*ARGSUSED*/
1093 static int
1094 zfs_access(vnode_t *vp, int mode, int flag, cred_t *cr,
1095     caller_context_t *ct)
1096 {
1097 	znode_t *zp = VTOZ(vp);
1098 	zfsvfs_t *zfsvfs = zp->z_zfsvfs;
1099 	int error;
1100 
1101 	ZFS_ENTER(zfsvfs);
1102 	ZFS_VERIFY_ZP(zp);
1103 
1104 	if (flag & V_ACE_MASK)
1105 		error = zfs_zaccess(zp, mode, flag, B_FALSE, cr);
1106 	else
1107 		error = zfs_zaccess_rwx(zp, mode, flag, cr);
1108 
1109 	ZFS_EXIT(zfsvfs);
1110 	return (error);
1111 }
1112 
1113 /*
1114  * If vnode is for a device return a specfs vnode instead.
1115  */
1116 static int
1117 specvp_check(vnode_t **vpp, cred_t *cr)
1118 {
1119 	int error = 0;
1120 
1121 	if (IS_DEVVP(*vpp)) {
1122 		struct vnode *svp;
1123 
1124 		svp = specvp(*vpp, (*vpp)->v_rdev, (*vpp)->v_type, cr);
1125 		VN_RELE(*vpp);
1126 		if (svp == NULL)
1127 			error = SET_ERROR(ENOSYS);
1128 		*vpp = svp;
1129 	}
1130 	return (error);
1131 }
1132 
1133 
1134 /*
1135  * Lookup an entry in a directory, or an extended attribute directory.
1136  * If it exists, return a held vnode reference for it.
1137  *
1138  *	IN:	dvp	- vnode of directory to search.
1139  *		nm	- name of entry to lookup.
1140  *		pnp	- full pathname to lookup [UNUSED].
1141  *		flags	- LOOKUP_XATTR set if looking for an attribute.
1142  *		rdir	- root directory vnode [UNUSED].
1143  *		cr	- credentials of caller.
1144  *		ct	- caller context
1145  *		direntflags - directory lookup flags
1146  *		realpnp - returned pathname.
1147  *
1148  *	OUT:	vpp	- vnode of located entry, NULL if not found.
1149  *
1150  *	RETURN:	0 on success, error code on failure.
1151  *
1152  * Timestamps:
1153  *	NA
1154  */
1155 /* ARGSUSED */
1156 static int
1157 zfs_lookup(vnode_t *dvp, char *nm, vnode_t **vpp, struct pathname *pnp,
1158     int flags, vnode_t *rdir, cred_t *cr,  caller_context_t *ct,
1159     int *direntflags, pathname_t *realpnp)
1160 {
1161 	znode_t *zdp = VTOZ(dvp);
1162 	zfsvfs_t *zfsvfs = zdp->z_zfsvfs;
1163 	int	error = 0;
1164 
1165 	/* fast path */
1166 	if (!(flags & (LOOKUP_XATTR | FIGNORECASE))) {
1167 
1168 		if (dvp->v_type != VDIR) {
1169 			return (SET_ERROR(ENOTDIR));
1170 		} else if (zdp->z_sa_hdl == NULL) {
1171 			return (SET_ERROR(EIO));
1172 		}
1173 
1174 		if (nm[0] == 0 || (nm[0] == '.' && nm[1] == '\0')) {
1175 			error = zfs_fastaccesschk_execute(zdp, cr);
1176 			if (!error) {
1177 				*vpp = dvp;
1178 				VN_HOLD(*vpp);
1179 				return (0);
1180 			}
1181 			return (error);
1182 		} else {
1183 			vnode_t *tvp = dnlc_lookup(dvp, nm);
1184 
1185 			if (tvp) {
1186 				error = zfs_fastaccesschk_execute(zdp, cr);
1187 				if (error) {
1188 					VN_RELE(tvp);
1189 					return (error);
1190 				}
1191 				if (tvp == DNLC_NO_VNODE) {
1192 					VN_RELE(tvp);
1193 					return (SET_ERROR(ENOENT));
1194 				} else {
1195 					*vpp = tvp;
1196 					return (specvp_check(vpp, cr));
1197 				}
1198 			}
1199 		}
1200 	}
1201 
1202 	DTRACE_PROBE2(zfs__fastpath__lookup__miss, vnode_t *, dvp, char *, nm);
1203 
1204 	ZFS_ENTER(zfsvfs);
1205 	ZFS_VERIFY_ZP(zdp);
1206 
1207 	*vpp = NULL;
1208 
1209 	if (flags & LOOKUP_XATTR) {
1210 		/*
1211 		 * If the xattr property is off, refuse the lookup request.
1212 		 */
1213 		if (!(zfsvfs->z_vfs->vfs_flag & VFS_XATTR)) {
1214 			ZFS_EXIT(zfsvfs);
1215 			return (SET_ERROR(EINVAL));
1216 		}
1217 
1218 		/*
1219 		 * We don't allow recursive attributes..
1220 		 * Maybe someday we will.
1221 		 */
1222 		if (zdp->z_pflags & ZFS_XATTR) {
1223 			ZFS_EXIT(zfsvfs);
1224 			return (SET_ERROR(EINVAL));
1225 		}
1226 
1227 		if (error = zfs_get_xattrdir(VTOZ(dvp), vpp, cr, flags)) {
1228 			ZFS_EXIT(zfsvfs);
1229 			return (error);
1230 		}
1231 
1232 		/*
1233 		 * Do we have permission to get into attribute directory?
1234 		 */
1235 
1236 		if (error = zfs_zaccess(VTOZ(*vpp), ACE_EXECUTE, 0,
1237 		    B_FALSE, cr)) {
1238 			VN_RELE(*vpp);
1239 			*vpp = NULL;
1240 		}
1241 
1242 		ZFS_EXIT(zfsvfs);
1243 		return (error);
1244 	}
1245 
1246 	if (dvp->v_type != VDIR) {
1247 		ZFS_EXIT(zfsvfs);
1248 		return (SET_ERROR(ENOTDIR));
1249 	}
1250 
1251 	/*
1252 	 * Check accessibility of directory.
1253 	 */
1254 
1255 	if (error = zfs_zaccess(zdp, ACE_EXECUTE, 0, B_FALSE, cr)) {
1256 		ZFS_EXIT(zfsvfs);
1257 		return (error);
1258 	}
1259 
1260 	if (zfsvfs->z_utf8 && u8_validate(nm, strlen(nm),
1261 	    NULL, U8_VALIDATE_ENTIRE, &error) < 0) {
1262 		ZFS_EXIT(zfsvfs);
1263 		return (SET_ERROR(EILSEQ));
1264 	}
1265 
1266 	error = zfs_dirlook(zdp, nm, vpp, flags, direntflags, realpnp);
1267 	if (error == 0)
1268 		error = specvp_check(vpp, cr);
1269 
1270 	ZFS_EXIT(zfsvfs);
1271 	return (error);
1272 }
1273 
1274 /*
1275  * Attempt to create a new entry in a directory.  If the entry
1276  * already exists, truncate the file if permissible, else return
1277  * an error.  Return the vp of the created or trunc'd file.
1278  *
1279  *	IN:	dvp	- vnode of directory to put new file entry in.
1280  *		name	- name of new file entry.
1281  *		vap	- attributes of new file.
1282  *		excl	- flag indicating exclusive or non-exclusive mode.
1283  *		mode	- mode to open file with.
1284  *		cr	- credentials of caller.
1285  *		flag	- large file flag [UNUSED].
1286  *		ct	- caller context
1287  *		vsecp 	- ACL to be set
1288  *
1289  *	OUT:	vpp	- vnode of created or trunc'd entry.
1290  *
1291  *	RETURN:	0 on success, error code on failure.
1292  *
1293  * Timestamps:
1294  *	dvp - ctime|mtime updated if new entry created
1295  *	 vp - ctime|mtime always, atime if new
1296  */
1297 
1298 /* ARGSUSED */
1299 static int
1300 zfs_create(vnode_t *dvp, char *name, vattr_t *vap, vcexcl_t excl,
1301     int mode, vnode_t **vpp, cred_t *cr, int flag, caller_context_t *ct,
1302     vsecattr_t *vsecp)
1303 {
1304 	znode_t		*zp, *dzp = VTOZ(dvp);
1305 	zfsvfs_t	*zfsvfs = dzp->z_zfsvfs;
1306 	zilog_t		*zilog;
1307 	objset_t	*os;
1308 	zfs_dirlock_t	*dl;
1309 	dmu_tx_t	*tx;
1310 	int		error;
1311 	ksid_t		*ksid;
1312 	uid_t		uid;
1313 	gid_t		gid = crgetgid(cr);
1314 	zfs_acl_ids_t   acl_ids;
1315 	boolean_t	fuid_dirtied;
1316 	boolean_t	have_acl = B_FALSE;
1317 
1318 	/*
1319 	 * If we have an ephemeral id, ACL, or XVATTR then
1320 	 * make sure file system is at proper version
1321 	 */
1322 
1323 	ksid = crgetsid(cr, KSID_OWNER);
1324 	if (ksid)
1325 		uid = ksid_getid(ksid);
1326 	else
1327 		uid = crgetuid(cr);
1328 
1329 	if (zfsvfs->z_use_fuids == B_FALSE &&
1330 	    (vsecp || (vap->va_mask & AT_XVATTR) ||
1331 	    IS_EPHEMERAL(uid) || IS_EPHEMERAL(gid)))
1332 		return (SET_ERROR(EINVAL));
1333 
1334 	ZFS_ENTER(zfsvfs);
1335 	ZFS_VERIFY_ZP(dzp);
1336 	os = zfsvfs->z_os;
1337 	zilog = zfsvfs->z_log;
1338 
1339 	if (zfsvfs->z_utf8 && u8_validate(name, strlen(name),
1340 	    NULL, U8_VALIDATE_ENTIRE, &error) < 0) {
1341 		ZFS_EXIT(zfsvfs);
1342 		return (SET_ERROR(EILSEQ));
1343 	}
1344 
1345 	if (vap->va_mask & AT_XVATTR) {
1346 		if ((error = secpolicy_xvattr((xvattr_t *)vap,
1347 		    crgetuid(cr), cr, vap->va_type)) != 0) {
1348 			ZFS_EXIT(zfsvfs);
1349 			return (error);
1350 		}
1351 	}
1352 top:
1353 	*vpp = NULL;
1354 
1355 	if ((vap->va_mode & VSVTX) && secpolicy_vnode_stky_modify(cr))
1356 		vap->va_mode &= ~VSVTX;
1357 
1358 	if (*name == '\0') {
1359 		/*
1360 		 * Null component name refers to the directory itself.
1361 		 */
1362 		VN_HOLD(dvp);
1363 		zp = dzp;
1364 		dl = NULL;
1365 		error = 0;
1366 	} else {
1367 		/* possible VN_HOLD(zp) */
1368 		int zflg = 0;
1369 
1370 		if (flag & FIGNORECASE)
1371 			zflg |= ZCILOOK;
1372 
1373 		error = zfs_dirent_lock(&dl, dzp, name, &zp, zflg,
1374 		    NULL, NULL);
1375 		if (error) {
1376 			if (have_acl)
1377 				zfs_acl_ids_free(&acl_ids);
1378 			if (strcmp(name, "..") == 0)
1379 				error = SET_ERROR(EISDIR);
1380 			ZFS_EXIT(zfsvfs);
1381 			return (error);
1382 		}
1383 	}
1384 
1385 	if (zp == NULL) {
1386 		uint64_t txtype;
1387 
1388 		/*
1389 		 * Create a new file object and update the directory
1390 		 * to reference it.
1391 		 */
1392 		if (error = zfs_zaccess(dzp, ACE_ADD_FILE, 0, B_FALSE, cr)) {
1393 			if (have_acl)
1394 				zfs_acl_ids_free(&acl_ids);
1395 			goto out;
1396 		}
1397 
1398 		/*
1399 		 * We only support the creation of regular files in
1400 		 * extended attribute directories.
1401 		 */
1402 
1403 		if ((dzp->z_pflags & ZFS_XATTR) &&
1404 		    (vap->va_type != VREG)) {
1405 			if (have_acl)
1406 				zfs_acl_ids_free(&acl_ids);
1407 			error = SET_ERROR(EINVAL);
1408 			goto out;
1409 		}
1410 
1411 		if (!have_acl && (error = zfs_acl_ids_create(dzp, 0, vap,
1412 		    cr, vsecp, &acl_ids)) != 0)
1413 			goto out;
1414 		have_acl = B_TRUE;
1415 
1416 		if (zfs_acl_ids_overquota(zfsvfs, &acl_ids)) {
1417 			zfs_acl_ids_free(&acl_ids);
1418 			error = SET_ERROR(EDQUOT);
1419 			goto out;
1420 		}
1421 
1422 		tx = dmu_tx_create(os);
1423 
1424 		dmu_tx_hold_sa_create(tx, acl_ids.z_aclp->z_acl_bytes +
1425 		    ZFS_SA_BASE_ATTR_SIZE);
1426 
1427 		fuid_dirtied = zfsvfs->z_fuid_dirty;
1428 		if (fuid_dirtied)
1429 			zfs_fuid_txhold(zfsvfs, tx);
1430 		dmu_tx_hold_zap(tx, dzp->z_id, TRUE, name);
1431 		dmu_tx_hold_sa(tx, dzp->z_sa_hdl, B_FALSE);
1432 		if (!zfsvfs->z_use_sa &&
1433 		    acl_ids.z_aclp->z_acl_bytes > ZFS_ACE_SPACE) {
1434 			dmu_tx_hold_write(tx, DMU_NEW_OBJECT,
1435 			    0, acl_ids.z_aclp->z_acl_bytes);
1436 		}
1437 		error = dmu_tx_assign(tx, TXG_NOWAIT);
1438 		if (error) {
1439 			zfs_dirent_unlock(dl);
1440 			if (error == ERESTART) {
1441 				dmu_tx_wait(tx);
1442 				dmu_tx_abort(tx);
1443 				goto top;
1444 			}
1445 			zfs_acl_ids_free(&acl_ids);
1446 			dmu_tx_abort(tx);
1447 			ZFS_EXIT(zfsvfs);
1448 			return (error);
1449 		}
1450 		zfs_mknode(dzp, vap, tx, cr, 0, &zp, &acl_ids);
1451 
1452 		if (fuid_dirtied)
1453 			zfs_fuid_sync(zfsvfs, tx);
1454 
1455 		(void) zfs_link_create(dl, zp, tx, ZNEW);
1456 		txtype = zfs_log_create_txtype(Z_FILE, vsecp, vap);
1457 		if (flag & FIGNORECASE)
1458 			txtype |= TX_CI;
1459 		zfs_log_create(zilog, tx, txtype, dzp, zp, name,
1460 		    vsecp, acl_ids.z_fuidp, vap);
1461 		zfs_acl_ids_free(&acl_ids);
1462 		dmu_tx_commit(tx);
1463 	} else {
1464 		int aflags = (flag & FAPPEND) ? V_APPEND : 0;
1465 
1466 		if (have_acl)
1467 			zfs_acl_ids_free(&acl_ids);
1468 		have_acl = B_FALSE;
1469 
1470 		/*
1471 		 * A directory entry already exists for this name.
1472 		 */
1473 		/*
1474 		 * Can't truncate an existing file if in exclusive mode.
1475 		 */
1476 		if (excl == EXCL) {
1477 			error = SET_ERROR(EEXIST);
1478 			goto out;
1479 		}
1480 		/*
1481 		 * Can't open a directory for writing.
1482 		 */
1483 		if ((ZTOV(zp)->v_type == VDIR) && (mode & S_IWRITE)) {
1484 			error = SET_ERROR(EISDIR);
1485 			goto out;
1486 		}
1487 		/*
1488 		 * Verify requested access to file.
1489 		 */
1490 		if (mode && (error = zfs_zaccess_rwx(zp, mode, aflags, cr))) {
1491 			goto out;
1492 		}
1493 
1494 		mutex_enter(&dzp->z_lock);
1495 		dzp->z_seq++;
1496 		mutex_exit(&dzp->z_lock);
1497 
1498 		/*
1499 		 * Truncate regular files if requested.
1500 		 */
1501 		if ((ZTOV(zp)->v_type == VREG) &&
1502 		    (vap->va_mask & AT_SIZE) && (vap->va_size == 0)) {
1503 			/* we can't hold any locks when calling zfs_freesp() */
1504 			zfs_dirent_unlock(dl);
1505 			dl = NULL;
1506 			error = zfs_freesp(zp, 0, 0, mode, TRUE);
1507 			if (error == 0) {
1508 				vnevent_create(ZTOV(zp), ct);
1509 			}
1510 		}
1511 	}
1512 out:
1513 
1514 	if (dl)
1515 		zfs_dirent_unlock(dl);
1516 
1517 	if (error) {
1518 		if (zp)
1519 			VN_RELE(ZTOV(zp));
1520 	} else {
1521 		*vpp = ZTOV(zp);
1522 		error = specvp_check(vpp, cr);
1523 	}
1524 
1525 	if (zfsvfs->z_os->os_sync == ZFS_SYNC_ALWAYS)
1526 		zil_commit(zilog, 0);
1527 
1528 	ZFS_EXIT(zfsvfs);
1529 	return (error);
1530 }
1531 
1532 /*
1533  * Remove an entry from a directory.
1534  *
1535  *	IN:	dvp	- vnode of directory to remove entry from.
1536  *		name	- name of entry to remove.
1537  *		cr	- credentials of caller.
1538  *		ct	- caller context
1539  *		flags	- case flags
1540  *
1541  *	RETURN:	0 on success, error code on failure.
1542  *
1543  * Timestamps:
1544  *	dvp - ctime|mtime
1545  *	 vp - ctime (if nlink > 0)
1546  */
1547 
1548 uint64_t null_xattr = 0;
1549 
1550 /*ARGSUSED*/
1551 static int
1552 zfs_remove(vnode_t *dvp, char *name, cred_t *cr, caller_context_t *ct,
1553     int flags)
1554 {
1555 	znode_t		*zp, *dzp = VTOZ(dvp);
1556 	znode_t		*xzp;
1557 	vnode_t		*vp;
1558 	zfsvfs_t	*zfsvfs = dzp->z_zfsvfs;
1559 	zilog_t		*zilog;
1560 	uint64_t	acl_obj, xattr_obj;
1561 	uint64_t 	xattr_obj_unlinked = 0;
1562 	uint64_t	obj = 0;
1563 	zfs_dirlock_t	*dl;
1564 	dmu_tx_t	*tx;
1565 	boolean_t	may_delete_now, delete_now = FALSE;
1566 	boolean_t	unlinked, toobig = FALSE;
1567 	uint64_t	txtype;
1568 	pathname_t	*realnmp = NULL;
1569 	pathname_t	realnm;
1570 	int		error;
1571 	int		zflg = ZEXISTS;
1572 
1573 	ZFS_ENTER(zfsvfs);
1574 	ZFS_VERIFY_ZP(dzp);
1575 	zilog = zfsvfs->z_log;
1576 
1577 	if (flags & FIGNORECASE) {
1578 		zflg |= ZCILOOK;
1579 		pn_alloc(&realnm);
1580 		realnmp = &realnm;
1581 	}
1582 
1583 top:
1584 	xattr_obj = 0;
1585 	xzp = NULL;
1586 	/*
1587 	 * Attempt to lock directory; fail if entry doesn't exist.
1588 	 */
1589 	if (error = zfs_dirent_lock(&dl, dzp, name, &zp, zflg,
1590 	    NULL, realnmp)) {
1591 		if (realnmp)
1592 			pn_free(realnmp);
1593 		ZFS_EXIT(zfsvfs);
1594 		return (error);
1595 	}
1596 
1597 	vp = ZTOV(zp);
1598 
1599 	if (error = zfs_zaccess_delete(dzp, zp, cr)) {
1600 		goto out;
1601 	}
1602 
1603 	/*
1604 	 * Need to use rmdir for removing directories.
1605 	 */
1606 	if (vp->v_type == VDIR) {
1607 		error = SET_ERROR(EPERM);
1608 		goto out;
1609 	}
1610 
1611 	vnevent_remove(vp, dvp, name, ct);
1612 
1613 	if (realnmp)
1614 		dnlc_remove(dvp, realnmp->pn_buf);
1615 	else
1616 		dnlc_remove(dvp, name);
1617 
1618 	mutex_enter(&vp->v_lock);
1619 	may_delete_now = vp->v_count == 1 && !vn_has_cached_data(vp);
1620 	mutex_exit(&vp->v_lock);
1621 
1622 	/*
1623 	 * We may delete the znode now, or we may put it in the unlinked set;
1624 	 * it depends on whether we're the last link, and on whether there are
1625 	 * other holds on the vnode.  So we dmu_tx_hold() the right things to
1626 	 * allow for either case.
1627 	 */
1628 	obj = zp->z_id;
1629 	tx = dmu_tx_create(zfsvfs->z_os);
1630 	dmu_tx_hold_zap(tx, dzp->z_id, FALSE, name);
1631 	dmu_tx_hold_sa(tx, zp->z_sa_hdl, B_FALSE);
1632 	zfs_sa_upgrade_txholds(tx, zp);
1633 	zfs_sa_upgrade_txholds(tx, dzp);
1634 	if (may_delete_now) {
1635 		toobig =
1636 		    zp->z_size > zp->z_blksz * DMU_MAX_DELETEBLKCNT;
1637 		/* if the file is too big, only hold_free a token amount */
1638 		dmu_tx_hold_free(tx, zp->z_id, 0,
1639 		    (toobig ? DMU_MAX_ACCESS : DMU_OBJECT_END));
1640 	}
1641 
1642 	/* are there any extended attributes? */
1643 	error = sa_lookup(zp->z_sa_hdl, SA_ZPL_XATTR(zfsvfs),
1644 	    &xattr_obj, sizeof (xattr_obj));
1645 	if (error == 0 && xattr_obj) {
1646 		error = zfs_zget(zfsvfs, xattr_obj, &xzp);
1647 		ASSERT0(error);
1648 		dmu_tx_hold_sa(tx, zp->z_sa_hdl, B_TRUE);
1649 		dmu_tx_hold_sa(tx, xzp->z_sa_hdl, B_FALSE);
1650 	}
1651 
1652 	mutex_enter(&zp->z_lock);
1653 	if ((acl_obj = zfs_external_acl(zp)) != 0 && may_delete_now)
1654 		dmu_tx_hold_free(tx, acl_obj, 0, DMU_OBJECT_END);
1655 	mutex_exit(&zp->z_lock);
1656 
1657 	/* charge as an update -- would be nice not to charge at all */
1658 	dmu_tx_hold_zap(tx, zfsvfs->z_unlinkedobj, FALSE, NULL);
1659 
1660 	error = dmu_tx_assign(tx, TXG_NOWAIT);
1661 	if (error) {
1662 		zfs_dirent_unlock(dl);
1663 		VN_RELE(vp);
1664 		if (xzp)
1665 			VN_RELE(ZTOV(xzp));
1666 		if (error == ERESTART) {
1667 			dmu_tx_wait(tx);
1668 			dmu_tx_abort(tx);
1669 			goto top;
1670 		}
1671 		if (realnmp)
1672 			pn_free(realnmp);
1673 		dmu_tx_abort(tx);
1674 		ZFS_EXIT(zfsvfs);
1675 		return (error);
1676 	}
1677 
1678 	/*
1679 	 * Remove the directory entry.
1680 	 */
1681 	error = zfs_link_destroy(dl, zp, tx, zflg, &unlinked);
1682 
1683 	if (error) {
1684 		dmu_tx_commit(tx);
1685 		goto out;
1686 	}
1687 
1688 	if (unlinked) {
1689 
1690 		/*
1691 		 * Hold z_lock so that we can make sure that the ACL obj
1692 		 * hasn't changed.  Could have been deleted due to
1693 		 * zfs_sa_upgrade().
1694 		 */
1695 		mutex_enter(&zp->z_lock);
1696 		mutex_enter(&vp->v_lock);
1697 		(void) sa_lookup(zp->z_sa_hdl, SA_ZPL_XATTR(zfsvfs),
1698 		    &xattr_obj_unlinked, sizeof (xattr_obj_unlinked));
1699 		delete_now = may_delete_now && !toobig &&
1700 		    vp->v_count == 1 && !vn_has_cached_data(vp) &&
1701 		    xattr_obj == xattr_obj_unlinked && zfs_external_acl(zp) ==
1702 		    acl_obj;
1703 		mutex_exit(&vp->v_lock);
1704 	}
1705 
1706 	if (delete_now) {
1707 		if (xattr_obj_unlinked) {
1708 			ASSERT3U(xzp->z_links, ==, 2);
1709 			mutex_enter(&xzp->z_lock);
1710 			xzp->z_unlinked = 1;
1711 			xzp->z_links = 0;
1712 			error = sa_update(xzp->z_sa_hdl, SA_ZPL_LINKS(zfsvfs),
1713 			    &xzp->z_links, sizeof (xzp->z_links), tx);
1714 			ASSERT3U(error,  ==,  0);
1715 			mutex_exit(&xzp->z_lock);
1716 			zfs_unlinked_add(xzp, tx);
1717 
1718 			if (zp->z_is_sa)
1719 				error = sa_remove(zp->z_sa_hdl,
1720 				    SA_ZPL_XATTR(zfsvfs), tx);
1721 			else
1722 				error = sa_update(zp->z_sa_hdl,
1723 				    SA_ZPL_XATTR(zfsvfs), &null_xattr,
1724 				    sizeof (uint64_t), tx);
1725 			ASSERT0(error);
1726 		}
1727 		mutex_enter(&vp->v_lock);
1728 		vp->v_count--;
1729 		ASSERT0(vp->v_count);
1730 		mutex_exit(&vp->v_lock);
1731 		mutex_exit(&zp->z_lock);
1732 		zfs_znode_delete(zp, tx);
1733 	} else if (unlinked) {
1734 		mutex_exit(&zp->z_lock);
1735 		zfs_unlinked_add(zp, tx);
1736 	}
1737 
1738 	txtype = TX_REMOVE;
1739 	if (flags & FIGNORECASE)
1740 		txtype |= TX_CI;
1741 	zfs_log_remove(zilog, tx, txtype, dzp, name, obj);
1742 
1743 	dmu_tx_commit(tx);
1744 out:
1745 	if (realnmp)
1746 		pn_free(realnmp);
1747 
1748 	zfs_dirent_unlock(dl);
1749 
1750 	if (!delete_now)
1751 		VN_RELE(vp);
1752 	if (xzp)
1753 		VN_RELE(ZTOV(xzp));
1754 
1755 	if (zfsvfs->z_os->os_sync == ZFS_SYNC_ALWAYS)
1756 		zil_commit(zilog, 0);
1757 
1758 	ZFS_EXIT(zfsvfs);
1759 	return (error);
1760 }
1761 
1762 /*
1763  * Create a new directory and insert it into dvp using the name
1764  * provided.  Return a pointer to the inserted directory.
1765  *
1766  *	IN:	dvp	- vnode of directory to add subdir to.
1767  *		dirname	- name of new directory.
1768  *		vap	- attributes of new directory.
1769  *		cr	- credentials of caller.
1770  *		ct	- caller context
1771  *		flags	- case flags
1772  *		vsecp	- ACL to be set
1773  *
1774  *	OUT:	vpp	- vnode of created directory.
1775  *
1776  *	RETURN:	0 on success, error code on failure.
1777  *
1778  * Timestamps:
1779  *	dvp - ctime|mtime updated
1780  *	 vp - ctime|mtime|atime updated
1781  */
1782 /*ARGSUSED*/
1783 static int
1784 zfs_mkdir(vnode_t *dvp, char *dirname, vattr_t *vap, vnode_t **vpp, cred_t *cr,
1785     caller_context_t *ct, int flags, vsecattr_t *vsecp)
1786 {
1787 	znode_t		*zp, *dzp = VTOZ(dvp);
1788 	zfsvfs_t	*zfsvfs = dzp->z_zfsvfs;
1789 	zilog_t		*zilog;
1790 	zfs_dirlock_t	*dl;
1791 	uint64_t	txtype;
1792 	dmu_tx_t	*tx;
1793 	int		error;
1794 	int		zf = ZNEW;
1795 	ksid_t		*ksid;
1796 	uid_t		uid;
1797 	gid_t		gid = crgetgid(cr);
1798 	zfs_acl_ids_t   acl_ids;
1799 	boolean_t	fuid_dirtied;
1800 
1801 	ASSERT(vap->va_type == VDIR);
1802 
1803 	/*
1804 	 * If we have an ephemeral id, ACL, or XVATTR then
1805 	 * make sure file system is at proper version
1806 	 */
1807 
1808 	ksid = crgetsid(cr, KSID_OWNER);
1809 	if (ksid)
1810 		uid = ksid_getid(ksid);
1811 	else
1812 		uid = crgetuid(cr);
1813 	if (zfsvfs->z_use_fuids == B_FALSE &&
1814 	    (vsecp || (vap->va_mask & AT_XVATTR) ||
1815 	    IS_EPHEMERAL(uid) || IS_EPHEMERAL(gid)))
1816 		return (SET_ERROR(EINVAL));
1817 
1818 	ZFS_ENTER(zfsvfs);
1819 	ZFS_VERIFY_ZP(dzp);
1820 	zilog = zfsvfs->z_log;
1821 
1822 	if (dzp->z_pflags & ZFS_XATTR) {
1823 		ZFS_EXIT(zfsvfs);
1824 		return (SET_ERROR(EINVAL));
1825 	}
1826 
1827 	if (zfsvfs->z_utf8 && u8_validate(dirname,
1828 	    strlen(dirname), NULL, U8_VALIDATE_ENTIRE, &error) < 0) {
1829 		ZFS_EXIT(zfsvfs);
1830 		return (SET_ERROR(EILSEQ));
1831 	}
1832 	if (flags & FIGNORECASE)
1833 		zf |= ZCILOOK;
1834 
1835 	if (vap->va_mask & AT_XVATTR) {
1836 		if ((error = secpolicy_xvattr((xvattr_t *)vap,
1837 		    crgetuid(cr), cr, vap->va_type)) != 0) {
1838 			ZFS_EXIT(zfsvfs);
1839 			return (error);
1840 		}
1841 	}
1842 
1843 	if ((error = zfs_acl_ids_create(dzp, 0, vap, cr,
1844 	    vsecp, &acl_ids)) != 0) {
1845 		ZFS_EXIT(zfsvfs);
1846 		return (error);
1847 	}
1848 	/*
1849 	 * First make sure the new directory doesn't exist.
1850 	 *
1851 	 * Existence is checked first to make sure we don't return
1852 	 * EACCES instead of EEXIST which can cause some applications
1853 	 * to fail.
1854 	 */
1855 top:
1856 	*vpp = NULL;
1857 
1858 	if (error = zfs_dirent_lock(&dl, dzp, dirname, &zp, zf,
1859 	    NULL, NULL)) {
1860 		zfs_acl_ids_free(&acl_ids);
1861 		ZFS_EXIT(zfsvfs);
1862 		return (error);
1863 	}
1864 
1865 	if (error = zfs_zaccess(dzp, ACE_ADD_SUBDIRECTORY, 0, B_FALSE, cr)) {
1866 		zfs_acl_ids_free(&acl_ids);
1867 		zfs_dirent_unlock(dl);
1868 		ZFS_EXIT(zfsvfs);
1869 		return (error);
1870 	}
1871 
1872 	if (zfs_acl_ids_overquota(zfsvfs, &acl_ids)) {
1873 		zfs_acl_ids_free(&acl_ids);
1874 		zfs_dirent_unlock(dl);
1875 		ZFS_EXIT(zfsvfs);
1876 		return (SET_ERROR(EDQUOT));
1877 	}
1878 
1879 	/*
1880 	 * Add a new entry to the directory.
1881 	 */
1882 	tx = dmu_tx_create(zfsvfs->z_os);
1883 	dmu_tx_hold_zap(tx, dzp->z_id, TRUE, dirname);
1884 	dmu_tx_hold_zap(tx, DMU_NEW_OBJECT, FALSE, NULL);
1885 	fuid_dirtied = zfsvfs->z_fuid_dirty;
1886 	if (fuid_dirtied)
1887 		zfs_fuid_txhold(zfsvfs, tx);
1888 	if (!zfsvfs->z_use_sa && acl_ids.z_aclp->z_acl_bytes > ZFS_ACE_SPACE) {
1889 		dmu_tx_hold_write(tx, DMU_NEW_OBJECT, 0,
1890 		    acl_ids.z_aclp->z_acl_bytes);
1891 	}
1892 
1893 	dmu_tx_hold_sa_create(tx, acl_ids.z_aclp->z_acl_bytes +
1894 	    ZFS_SA_BASE_ATTR_SIZE);
1895 
1896 	error = dmu_tx_assign(tx, TXG_NOWAIT);
1897 	if (error) {
1898 		zfs_dirent_unlock(dl);
1899 		if (error == ERESTART) {
1900 			dmu_tx_wait(tx);
1901 			dmu_tx_abort(tx);
1902 			goto top;
1903 		}
1904 		zfs_acl_ids_free(&acl_ids);
1905 		dmu_tx_abort(tx);
1906 		ZFS_EXIT(zfsvfs);
1907 		return (error);
1908 	}
1909 
1910 	/*
1911 	 * Create new node.
1912 	 */
1913 	zfs_mknode(dzp, vap, tx, cr, 0, &zp, &acl_ids);
1914 
1915 	if (fuid_dirtied)
1916 		zfs_fuid_sync(zfsvfs, tx);
1917 
1918 	/*
1919 	 * Now put new name in parent dir.
1920 	 */
1921 	(void) zfs_link_create(dl, zp, tx, ZNEW);
1922 
1923 	*vpp = ZTOV(zp);
1924 
1925 	txtype = zfs_log_create_txtype(Z_DIR, vsecp, vap);
1926 	if (flags & FIGNORECASE)
1927 		txtype |= TX_CI;
1928 	zfs_log_create(zilog, tx, txtype, dzp, zp, dirname, vsecp,
1929 	    acl_ids.z_fuidp, vap);
1930 
1931 	zfs_acl_ids_free(&acl_ids);
1932 
1933 	dmu_tx_commit(tx);
1934 
1935 	zfs_dirent_unlock(dl);
1936 
1937 	if (zfsvfs->z_os->os_sync == ZFS_SYNC_ALWAYS)
1938 		zil_commit(zilog, 0);
1939 
1940 	ZFS_EXIT(zfsvfs);
1941 	return (0);
1942 }
1943 
1944 /*
1945  * Remove a directory subdir entry.  If the current working
1946  * directory is the same as the subdir to be removed, the
1947  * remove will fail.
1948  *
1949  *	IN:	dvp	- vnode of directory to remove from.
1950  *		name	- name of directory to be removed.
1951  *		cwd	- vnode of current working directory.
1952  *		cr	- credentials of caller.
1953  *		ct	- caller context
1954  *		flags	- case flags
1955  *
1956  *	RETURN:	0 on success, error code on failure.
1957  *
1958  * Timestamps:
1959  *	dvp - ctime|mtime updated
1960  */
1961 /*ARGSUSED*/
1962 static int
1963 zfs_rmdir(vnode_t *dvp, char *name, vnode_t *cwd, cred_t *cr,
1964     caller_context_t *ct, int flags)
1965 {
1966 	znode_t		*dzp = VTOZ(dvp);
1967 	znode_t		*zp;
1968 	vnode_t		*vp;
1969 	zfsvfs_t	*zfsvfs = dzp->z_zfsvfs;
1970 	zilog_t		*zilog;
1971 	zfs_dirlock_t	*dl;
1972 	dmu_tx_t	*tx;
1973 	int		error;
1974 	int		zflg = ZEXISTS;
1975 
1976 	ZFS_ENTER(zfsvfs);
1977 	ZFS_VERIFY_ZP(dzp);
1978 	zilog = zfsvfs->z_log;
1979 
1980 	if (flags & FIGNORECASE)
1981 		zflg |= ZCILOOK;
1982 top:
1983 	zp = NULL;
1984 
1985 	/*
1986 	 * Attempt to lock directory; fail if entry doesn't exist.
1987 	 */
1988 	if (error = zfs_dirent_lock(&dl, dzp, name, &zp, zflg,
1989 	    NULL, NULL)) {
1990 		ZFS_EXIT(zfsvfs);
1991 		return (error);
1992 	}
1993 
1994 	vp = ZTOV(zp);
1995 
1996 	if (error = zfs_zaccess_delete(dzp, zp, cr)) {
1997 		goto out;
1998 	}
1999 
2000 	if (vp->v_type != VDIR) {
2001 		error = SET_ERROR(ENOTDIR);
2002 		goto out;
2003 	}
2004 
2005 	if (vp == cwd) {
2006 		error = SET_ERROR(EINVAL);
2007 		goto out;
2008 	}
2009 
2010 	vnevent_rmdir(vp, dvp, name, ct);
2011 
2012 	/*
2013 	 * Grab a lock on the directory to make sure that noone is
2014 	 * trying to add (or lookup) entries while we are removing it.
2015 	 */
2016 	rw_enter(&zp->z_name_lock, RW_WRITER);
2017 
2018 	/*
2019 	 * Grab a lock on the parent pointer to make sure we play well
2020 	 * with the treewalk and directory rename code.
2021 	 */
2022 	rw_enter(&zp->z_parent_lock, RW_WRITER);
2023 
2024 	tx = dmu_tx_create(zfsvfs->z_os);
2025 	dmu_tx_hold_zap(tx, dzp->z_id, FALSE, name);
2026 	dmu_tx_hold_sa(tx, zp->z_sa_hdl, B_FALSE);
2027 	dmu_tx_hold_zap(tx, zfsvfs->z_unlinkedobj, FALSE, NULL);
2028 	zfs_sa_upgrade_txholds(tx, zp);
2029 	zfs_sa_upgrade_txholds(tx, dzp);
2030 	error = dmu_tx_assign(tx, TXG_NOWAIT);
2031 	if (error) {
2032 		rw_exit(&zp->z_parent_lock);
2033 		rw_exit(&zp->z_name_lock);
2034 		zfs_dirent_unlock(dl);
2035 		VN_RELE(vp);
2036 		if (error == ERESTART) {
2037 			dmu_tx_wait(tx);
2038 			dmu_tx_abort(tx);
2039 			goto top;
2040 		}
2041 		dmu_tx_abort(tx);
2042 		ZFS_EXIT(zfsvfs);
2043 		return (error);
2044 	}
2045 
2046 	error = zfs_link_destroy(dl, zp, tx, zflg, NULL);
2047 
2048 	if (error == 0) {
2049 		uint64_t txtype = TX_RMDIR;
2050 		if (flags & FIGNORECASE)
2051 			txtype |= TX_CI;
2052 		zfs_log_remove(zilog, tx, txtype, dzp, name, ZFS_NO_OBJECT);
2053 	}
2054 
2055 	dmu_tx_commit(tx);
2056 
2057 	rw_exit(&zp->z_parent_lock);
2058 	rw_exit(&zp->z_name_lock);
2059 out:
2060 	zfs_dirent_unlock(dl);
2061 
2062 	VN_RELE(vp);
2063 
2064 	if (zfsvfs->z_os->os_sync == ZFS_SYNC_ALWAYS)
2065 		zil_commit(zilog, 0);
2066 
2067 	ZFS_EXIT(zfsvfs);
2068 	return (error);
2069 }
2070 
2071 /*
2072  * Read as many directory entries as will fit into the provided
2073  * buffer from the given directory cursor position (specified in
2074  * the uio structure).
2075  *
2076  *	IN:	vp	- vnode of directory to read.
2077  *		uio	- structure supplying read location, range info,
2078  *			  and return buffer.
2079  *		cr	- credentials of caller.
2080  *		ct	- caller context
2081  *		flags	- case flags
2082  *
2083  *	OUT:	uio	- updated offset and range, buffer filled.
2084  *		eofp	- set to true if end-of-file detected.
2085  *
2086  *	RETURN:	0 on success, error code on failure.
2087  *
2088  * Timestamps:
2089  *	vp - atime updated
2090  *
2091  * Note that the low 4 bits of the cookie returned by zap is always zero.
2092  * This allows us to use the low range for "special" directory entries:
2093  * We use 0 for '.', and 1 for '..'.  If this is the root of the filesystem,
2094  * we use the offset 2 for the '.zfs' directory.
2095  */
2096 /* ARGSUSED */
2097 static int
2098 zfs_readdir(vnode_t *vp, uio_t *uio, cred_t *cr, int *eofp,
2099     caller_context_t *ct, int flags)
2100 {
2101 	znode_t		*zp = VTOZ(vp);
2102 	iovec_t		*iovp;
2103 	edirent_t	*eodp;
2104 	dirent64_t	*odp;
2105 	zfsvfs_t	*zfsvfs = zp->z_zfsvfs;
2106 	objset_t	*os;
2107 	caddr_t		outbuf;
2108 	size_t		bufsize;
2109 	zap_cursor_t	zc;
2110 	zap_attribute_t	zap;
2111 	uint_t		bytes_wanted;
2112 	uint64_t	offset; /* must be unsigned; checks for < 1 */
2113 	uint64_t	parent;
2114 	int		local_eof;
2115 	int		outcount;
2116 	int		error;
2117 	uint8_t		prefetch;
2118 	boolean_t	check_sysattrs;
2119 
2120 	ZFS_ENTER(zfsvfs);
2121 	ZFS_VERIFY_ZP(zp);
2122 
2123 	if ((error = sa_lookup(zp->z_sa_hdl, SA_ZPL_PARENT(zfsvfs),
2124 	    &parent, sizeof (parent))) != 0) {
2125 		ZFS_EXIT(zfsvfs);
2126 		return (error);
2127 	}
2128 
2129 	/*
2130 	 * If we are not given an eof variable,
2131 	 * use a local one.
2132 	 */
2133 	if (eofp == NULL)
2134 		eofp = &local_eof;
2135 
2136 	/*
2137 	 * Check for valid iov_len.
2138 	 */
2139 	if (uio->uio_iov->iov_len <= 0) {
2140 		ZFS_EXIT(zfsvfs);
2141 		return (SET_ERROR(EINVAL));
2142 	}
2143 
2144 	/*
2145 	 * Quit if directory has been removed (posix)
2146 	 */
2147 	if ((*eofp = zp->z_unlinked) != 0) {
2148 		ZFS_EXIT(zfsvfs);
2149 		return (0);
2150 	}
2151 
2152 	error = 0;
2153 	os = zfsvfs->z_os;
2154 	offset = uio->uio_loffset;
2155 	prefetch = zp->z_zn_prefetch;
2156 
2157 	/*
2158 	 * Initialize the iterator cursor.
2159 	 */
2160 	if (offset <= 3) {
2161 		/*
2162 		 * Start iteration from the beginning of the directory.
2163 		 */
2164 		zap_cursor_init(&zc, os, zp->z_id);
2165 	} else {
2166 		/*
2167 		 * The offset is a serialized cursor.
2168 		 */
2169 		zap_cursor_init_serialized(&zc, os, zp->z_id, offset);
2170 	}
2171 
2172 	/*
2173 	 * Get space to change directory entries into fs independent format.
2174 	 */
2175 	iovp = uio->uio_iov;
2176 	bytes_wanted = iovp->iov_len;
2177 	if (uio->uio_segflg != UIO_SYSSPACE || uio->uio_iovcnt != 1) {
2178 		bufsize = bytes_wanted;
2179 		outbuf = kmem_alloc(bufsize, KM_SLEEP);
2180 		odp = (struct dirent64 *)outbuf;
2181 	} else {
2182 		bufsize = bytes_wanted;
2183 		outbuf = NULL;
2184 		odp = (struct dirent64 *)iovp->iov_base;
2185 	}
2186 	eodp = (struct edirent *)odp;
2187 
2188 	/*
2189 	 * If this VFS supports the system attribute view interface; and
2190 	 * we're looking at an extended attribute directory; and we care
2191 	 * about normalization conflicts on this vfs; then we must check
2192 	 * for normalization conflicts with the sysattr name space.
2193 	 */
2194 	check_sysattrs = vfs_has_feature(vp->v_vfsp, VFSFT_SYSATTR_VIEWS) &&
2195 	    (vp->v_flag & V_XATTRDIR) && zfsvfs->z_norm &&
2196 	    (flags & V_RDDIR_ENTFLAGS);
2197 
2198 	/*
2199 	 * Transform to file-system independent format
2200 	 */
2201 	outcount = 0;
2202 	while (outcount < bytes_wanted) {
2203 		ino64_t objnum;
2204 		ushort_t reclen;
2205 		off64_t *next = NULL;
2206 
2207 		/*
2208 		 * Special case `.', `..', and `.zfs'.
2209 		 */
2210 		if (offset == 0) {
2211 			(void) strcpy(zap.za_name, ".");
2212 			zap.za_normalization_conflict = 0;
2213 			objnum = zp->z_id;
2214 		} else if (offset == 1) {
2215 			(void) strcpy(zap.za_name, "..");
2216 			zap.za_normalization_conflict = 0;
2217 			objnum = parent;
2218 		} else if (offset == 2 && zfs_show_ctldir(zp)) {
2219 			(void) strcpy(zap.za_name, ZFS_CTLDIR_NAME);
2220 			zap.za_normalization_conflict = 0;
2221 			objnum = ZFSCTL_INO_ROOT;
2222 		} else {
2223 			/*
2224 			 * Grab next entry.
2225 			 */
2226 			if (error = zap_cursor_retrieve(&zc, &zap)) {
2227 				if ((*eofp = (error == ENOENT)) != 0)
2228 					break;
2229 				else
2230 					goto update;
2231 			}
2232 
2233 			if (zap.za_integer_length != 8 ||
2234 			    zap.za_num_integers != 1) {
2235 				cmn_err(CE_WARN, "zap_readdir: bad directory "
2236 				    "entry, obj = %lld, offset = %lld\n",
2237 				    (u_longlong_t)zp->z_id,
2238 				    (u_longlong_t)offset);
2239 				error = SET_ERROR(ENXIO);
2240 				goto update;
2241 			}
2242 
2243 			objnum = ZFS_DIRENT_OBJ(zap.za_first_integer);
2244 			/*
2245 			 * MacOS X can extract the object type here such as:
2246 			 * uint8_t type = ZFS_DIRENT_TYPE(zap.za_first_integer);
2247 			 */
2248 
2249 			if (check_sysattrs && !zap.za_normalization_conflict) {
2250 				zap.za_normalization_conflict =
2251 				    xattr_sysattr_casechk(zap.za_name);
2252 			}
2253 		}
2254 
2255 		if (flags & V_RDDIR_ACCFILTER) {
2256 			/*
2257 			 * If we have no access at all, don't include
2258 			 * this entry in the returned information
2259 			 */
2260 			znode_t	*ezp;
2261 			if (zfs_zget(zp->z_zfsvfs, objnum, &ezp) != 0)
2262 				goto skip_entry;
2263 			if (!zfs_has_access(ezp, cr)) {
2264 				VN_RELE(ZTOV(ezp));
2265 				goto skip_entry;
2266 			}
2267 			VN_RELE(ZTOV(ezp));
2268 		}
2269 
2270 		if (flags & V_RDDIR_ENTFLAGS)
2271 			reclen = EDIRENT_RECLEN(strlen(zap.za_name));
2272 		else
2273 			reclen = DIRENT64_RECLEN(strlen(zap.za_name));
2274 
2275 		/*
2276 		 * Will this entry fit in the buffer?
2277 		 */
2278 		if (outcount + reclen > bufsize) {
2279 			/*
2280 			 * Did we manage to fit anything in the buffer?
2281 			 */
2282 			if (!outcount) {
2283 				error = SET_ERROR(EINVAL);
2284 				goto update;
2285 			}
2286 			break;
2287 		}
2288 		if (flags & V_RDDIR_ENTFLAGS) {
2289 			/*
2290 			 * Add extended flag entry:
2291 			 */
2292 			eodp->ed_ino = objnum;
2293 			eodp->ed_reclen = reclen;
2294 			/* NOTE: ed_off is the offset for the *next* entry */
2295 			next = &(eodp->ed_off);
2296 			eodp->ed_eflags = zap.za_normalization_conflict ?
2297 			    ED_CASE_CONFLICT : 0;
2298 			(void) strncpy(eodp->ed_name, zap.za_name,
2299 			    EDIRENT_NAMELEN(reclen));
2300 			eodp = (edirent_t *)((intptr_t)eodp + reclen);
2301 		} else {
2302 			/*
2303 			 * Add normal entry:
2304 			 */
2305 			odp->d_ino = objnum;
2306 			odp->d_reclen = reclen;
2307 			/* NOTE: d_off is the offset for the *next* entry */
2308 			next = &(odp->d_off);
2309 			(void) strncpy(odp->d_name, zap.za_name,
2310 			    DIRENT64_NAMELEN(reclen));
2311 			odp = (dirent64_t *)((intptr_t)odp + reclen);
2312 		}
2313 		outcount += reclen;
2314 
2315 		ASSERT(outcount <= bufsize);
2316 
2317 		/* Prefetch znode */
2318 		if (prefetch)
2319 			dmu_prefetch(os, objnum, 0, 0);
2320 
2321 	skip_entry:
2322 		/*
2323 		 * Move to the next entry, fill in the previous offset.
2324 		 */
2325 		if (offset > 2 || (offset == 2 && !zfs_show_ctldir(zp))) {
2326 			zap_cursor_advance(&zc);
2327 			offset = zap_cursor_serialize(&zc);
2328 		} else {
2329 			offset += 1;
2330 		}
2331 		if (next)
2332 			*next = offset;
2333 	}
2334 	zp->z_zn_prefetch = B_FALSE; /* a lookup will re-enable pre-fetching */
2335 
2336 	if (uio->uio_segflg == UIO_SYSSPACE && uio->uio_iovcnt == 1) {
2337 		iovp->iov_base += outcount;
2338 		iovp->iov_len -= outcount;
2339 		uio->uio_resid -= outcount;
2340 	} else if (error = uiomove(outbuf, (long)outcount, UIO_READ, uio)) {
2341 		/*
2342 		 * Reset the pointer.
2343 		 */
2344 		offset = uio->uio_loffset;
2345 	}
2346 
2347 update:
2348 	zap_cursor_fini(&zc);
2349 	if (uio->uio_segflg != UIO_SYSSPACE || uio->uio_iovcnt != 1)
2350 		kmem_free(outbuf, bufsize);
2351 
2352 	if (error == ENOENT)
2353 		error = 0;
2354 
2355 	ZFS_ACCESSTIME_STAMP(zfsvfs, zp);
2356 
2357 	uio->uio_loffset = offset;
2358 	ZFS_EXIT(zfsvfs);
2359 	return (error);
2360 }
2361 
2362 ulong_t zfs_fsync_sync_cnt = 4;
2363 
2364 static int
2365 zfs_fsync(vnode_t *vp, int syncflag, cred_t *cr, caller_context_t *ct)
2366 {
2367 	znode_t	*zp = VTOZ(vp);
2368 	zfsvfs_t *zfsvfs = zp->z_zfsvfs;
2369 
2370 	/*
2371 	 * Regardless of whether this is required for standards conformance,
2372 	 * this is the logical behavior when fsync() is called on a file with
2373 	 * dirty pages.  We use B_ASYNC since the ZIL transactions are already
2374 	 * going to be pushed out as part of the zil_commit().
2375 	 */
2376 	if (vn_has_cached_data(vp) && !(syncflag & FNODSYNC) &&
2377 	    (vp->v_type == VREG) && !(IS_SWAPVP(vp)))
2378 		(void) VOP_PUTPAGE(vp, (offset_t)0, (size_t)0, B_ASYNC, cr, ct);
2379 
2380 	(void) tsd_set(zfs_fsyncer_key, (void *)zfs_fsync_sync_cnt);
2381 
2382 	if (zfsvfs->z_os->os_sync != ZFS_SYNC_DISABLED) {
2383 		ZFS_ENTER(zfsvfs);
2384 		ZFS_VERIFY_ZP(zp);
2385 		zil_commit(zfsvfs->z_log, zp->z_id);
2386 		ZFS_EXIT(zfsvfs);
2387 	}
2388 	return (0);
2389 }
2390 
2391 
2392 /*
2393  * Get the requested file attributes and place them in the provided
2394  * vattr structure.
2395  *
2396  *	IN:	vp	- vnode of file.
2397  *		vap	- va_mask identifies requested attributes.
2398  *			  If AT_XVATTR set, then optional attrs are requested
2399  *		flags	- ATTR_NOACLCHECK (CIFS server context)
2400  *		cr	- credentials of caller.
2401  *		ct	- caller context
2402  *
2403  *	OUT:	vap	- attribute values.
2404  *
2405  *	RETURN:	0 (always succeeds).
2406  */
2407 /* ARGSUSED */
2408 static int
2409 zfs_getattr(vnode_t *vp, vattr_t *vap, int flags, cred_t *cr,
2410     caller_context_t *ct)
2411 {
2412 	znode_t *zp = VTOZ(vp);
2413 	zfsvfs_t *zfsvfs = zp->z_zfsvfs;
2414 	int	error = 0;
2415 	uint64_t links;
2416 	uint64_t mtime[2], ctime[2];
2417 	xvattr_t *xvap = (xvattr_t *)vap;	/* vap may be an xvattr_t * */
2418 	xoptattr_t *xoap = NULL;
2419 	boolean_t skipaclchk = (flags & ATTR_NOACLCHECK) ? B_TRUE : B_FALSE;
2420 	sa_bulk_attr_t bulk[2];
2421 	int count = 0;
2422 
2423 	ZFS_ENTER(zfsvfs);
2424 	ZFS_VERIFY_ZP(zp);
2425 
2426 	zfs_fuid_map_ids(zp, cr, &vap->va_uid, &vap->va_gid);
2427 
2428 	SA_ADD_BULK_ATTR(bulk, count, SA_ZPL_MTIME(zfsvfs), NULL, &mtime, 16);
2429 	SA_ADD_BULK_ATTR(bulk, count, SA_ZPL_CTIME(zfsvfs), NULL, &ctime, 16);
2430 
2431 	if ((error = sa_bulk_lookup(zp->z_sa_hdl, bulk, count)) != 0) {
2432 		ZFS_EXIT(zfsvfs);
2433 		return (error);
2434 	}
2435 
2436 	/*
2437 	 * If ACL is trivial don't bother looking for ACE_READ_ATTRIBUTES.
2438 	 * Also, if we are the owner don't bother, since owner should
2439 	 * always be allowed to read basic attributes of file.
2440 	 */
2441 	if (!(zp->z_pflags & ZFS_ACL_TRIVIAL) &&
2442 	    (vap->va_uid != crgetuid(cr))) {
2443 		if (error = zfs_zaccess(zp, ACE_READ_ATTRIBUTES, 0,
2444 		    skipaclchk, cr)) {
2445 			ZFS_EXIT(zfsvfs);
2446 			return (error);
2447 		}
2448 	}
2449 
2450 	/*
2451 	 * Return all attributes.  It's cheaper to provide the answer
2452 	 * than to determine whether we were asked the question.
2453 	 */
2454 
2455 	mutex_enter(&zp->z_lock);
2456 	vap->va_type = vp->v_type;
2457 	vap->va_mode = zp->z_mode & MODEMASK;
2458 	vap->va_fsid = zp->z_zfsvfs->z_vfs->vfs_dev;
2459 	vap->va_nodeid = zp->z_id;
2460 	if ((vp->v_flag & VROOT) && zfs_show_ctldir(zp))
2461 		links = zp->z_links + 1;
2462 	else
2463 		links = zp->z_links;
2464 	vap->va_nlink = MIN(links, UINT32_MAX);	/* nlink_t limit! */
2465 	vap->va_size = zp->z_size;
2466 	vap->va_rdev = vp->v_rdev;
2467 	vap->va_seq = zp->z_seq;
2468 
2469 	/*
2470 	 * Add in any requested optional attributes and the create time.
2471 	 * Also set the corresponding bits in the returned attribute bitmap.
2472 	 */
2473 	if ((xoap = xva_getxoptattr(xvap)) != NULL && zfsvfs->z_use_fuids) {
2474 		if (XVA_ISSET_REQ(xvap, XAT_ARCHIVE)) {
2475 			xoap->xoa_archive =
2476 			    ((zp->z_pflags & ZFS_ARCHIVE) != 0);
2477 			XVA_SET_RTN(xvap, XAT_ARCHIVE);
2478 		}
2479 
2480 		if (XVA_ISSET_REQ(xvap, XAT_READONLY)) {
2481 			xoap->xoa_readonly =
2482 			    ((zp->z_pflags & ZFS_READONLY) != 0);
2483 			XVA_SET_RTN(xvap, XAT_READONLY);
2484 		}
2485 
2486 		if (XVA_ISSET_REQ(xvap, XAT_SYSTEM)) {
2487 			xoap->xoa_system =
2488 			    ((zp->z_pflags & ZFS_SYSTEM) != 0);
2489 			XVA_SET_RTN(xvap, XAT_SYSTEM);
2490 		}
2491 
2492 		if (XVA_ISSET_REQ(xvap, XAT_HIDDEN)) {
2493 			xoap->xoa_hidden =
2494 			    ((zp->z_pflags & ZFS_HIDDEN) != 0);
2495 			XVA_SET_RTN(xvap, XAT_HIDDEN);
2496 		}
2497 
2498 		if (XVA_ISSET_REQ(xvap, XAT_NOUNLINK)) {
2499 			xoap->xoa_nounlink =
2500 			    ((zp->z_pflags & ZFS_NOUNLINK) != 0);
2501 			XVA_SET_RTN(xvap, XAT_NOUNLINK);
2502 		}
2503 
2504 		if (XVA_ISSET_REQ(xvap, XAT_IMMUTABLE)) {
2505 			xoap->xoa_immutable =
2506 			    ((zp->z_pflags & ZFS_IMMUTABLE) != 0);
2507 			XVA_SET_RTN(xvap, XAT_IMMUTABLE);
2508 		}
2509 
2510 		if (XVA_ISSET_REQ(xvap, XAT_APPENDONLY)) {
2511 			xoap->xoa_appendonly =
2512 			    ((zp->z_pflags & ZFS_APPENDONLY) != 0);
2513 			XVA_SET_RTN(xvap, XAT_APPENDONLY);
2514 		}
2515 
2516 		if (XVA_ISSET_REQ(xvap, XAT_NODUMP)) {
2517 			xoap->xoa_nodump =
2518 			    ((zp->z_pflags & ZFS_NODUMP) != 0);
2519 			XVA_SET_RTN(xvap, XAT_NODUMP);
2520 		}
2521 
2522 		if (XVA_ISSET_REQ(xvap, XAT_OPAQUE)) {
2523 			xoap->xoa_opaque =
2524 			    ((zp->z_pflags & ZFS_OPAQUE) != 0);
2525 			XVA_SET_RTN(xvap, XAT_OPAQUE);
2526 		}
2527 
2528 		if (XVA_ISSET_REQ(xvap, XAT_AV_QUARANTINED)) {
2529 			xoap->xoa_av_quarantined =
2530 			    ((zp->z_pflags & ZFS_AV_QUARANTINED) != 0);
2531 			XVA_SET_RTN(xvap, XAT_AV_QUARANTINED);
2532 		}
2533 
2534 		if (XVA_ISSET_REQ(xvap, XAT_AV_MODIFIED)) {
2535 			xoap->xoa_av_modified =
2536 			    ((zp->z_pflags & ZFS_AV_MODIFIED) != 0);
2537 			XVA_SET_RTN(xvap, XAT_AV_MODIFIED);
2538 		}
2539 
2540 		if (XVA_ISSET_REQ(xvap, XAT_AV_SCANSTAMP) &&
2541 		    vp->v_type == VREG) {
2542 			zfs_sa_get_scanstamp(zp, xvap);
2543 		}
2544 
2545 		if (XVA_ISSET_REQ(xvap, XAT_CREATETIME)) {
2546 			uint64_t times[2];
2547 
2548 			(void) sa_lookup(zp->z_sa_hdl, SA_ZPL_CRTIME(zfsvfs),
2549 			    times, sizeof (times));
2550 			ZFS_TIME_DECODE(&xoap->xoa_createtime, times);
2551 			XVA_SET_RTN(xvap, XAT_CREATETIME);
2552 		}
2553 
2554 		if (XVA_ISSET_REQ(xvap, XAT_REPARSE)) {
2555 			xoap->xoa_reparse = ((zp->z_pflags & ZFS_REPARSE) != 0);
2556 			XVA_SET_RTN(xvap, XAT_REPARSE);
2557 		}
2558 		if (XVA_ISSET_REQ(xvap, XAT_GEN)) {
2559 			xoap->xoa_generation = zp->z_gen;
2560 			XVA_SET_RTN(xvap, XAT_GEN);
2561 		}
2562 
2563 		if (XVA_ISSET_REQ(xvap, XAT_OFFLINE)) {
2564 			xoap->xoa_offline =
2565 			    ((zp->z_pflags & ZFS_OFFLINE) != 0);
2566 			XVA_SET_RTN(xvap, XAT_OFFLINE);
2567 		}
2568 
2569 		if (XVA_ISSET_REQ(xvap, XAT_SPARSE)) {
2570 			xoap->xoa_sparse =
2571 			    ((zp->z_pflags & ZFS_SPARSE) != 0);
2572 			XVA_SET_RTN(xvap, XAT_SPARSE);
2573 		}
2574 	}
2575 
2576 	ZFS_TIME_DECODE(&vap->va_atime, zp->z_atime);
2577 	ZFS_TIME_DECODE(&vap->va_mtime, mtime);
2578 	ZFS_TIME_DECODE(&vap->va_ctime, ctime);
2579 
2580 	mutex_exit(&zp->z_lock);
2581 
2582 	sa_object_size(zp->z_sa_hdl, &vap->va_blksize, &vap->va_nblocks);
2583 
2584 	if (zp->z_blksz == 0) {
2585 		/*
2586 		 * Block size hasn't been set; suggest maximal I/O transfers.
2587 		 */
2588 		vap->va_blksize = zfsvfs->z_max_blksz;
2589 	}
2590 
2591 	ZFS_EXIT(zfsvfs);
2592 	return (0);
2593 }
2594 
2595 /*
2596  * Set the file attributes to the values contained in the
2597  * vattr structure.
2598  *
2599  *	IN:	vp	- vnode of file to be modified.
2600  *		vap	- new attribute values.
2601  *			  If AT_XVATTR set, then optional attrs are being set
2602  *		flags	- ATTR_UTIME set if non-default time values provided.
2603  *			- ATTR_NOACLCHECK (CIFS context only).
2604  *		cr	- credentials of caller.
2605  *		ct	- caller context
2606  *
2607  *	RETURN:	0 on success, error code on failure.
2608  *
2609  * Timestamps:
2610  *	vp - ctime updated, mtime updated if size changed.
2611  */
2612 /* ARGSUSED */
2613 static int
2614 zfs_setattr(vnode_t *vp, vattr_t *vap, int flags, cred_t *cr,
2615     caller_context_t *ct)
2616 {
2617 	znode_t		*zp = VTOZ(vp);
2618 	zfsvfs_t	*zfsvfs = zp->z_zfsvfs;
2619 	zilog_t		*zilog;
2620 	dmu_tx_t	*tx;
2621 	vattr_t		oldva;
2622 	xvattr_t	tmpxvattr;
2623 	uint_t		mask = vap->va_mask;
2624 	uint_t		saved_mask = 0;
2625 	int		trim_mask = 0;
2626 	uint64_t	new_mode;
2627 	uint64_t	new_uid, new_gid;
2628 	uint64_t	xattr_obj;
2629 	uint64_t	mtime[2], ctime[2];
2630 	znode_t		*attrzp;
2631 	int		need_policy = FALSE;
2632 	int		err, err2;
2633 	zfs_fuid_info_t *fuidp = NULL;
2634 	xvattr_t *xvap = (xvattr_t *)vap;	/* vap may be an xvattr_t * */
2635 	xoptattr_t	*xoap;
2636 	zfs_acl_t	*aclp;
2637 	boolean_t skipaclchk = (flags & ATTR_NOACLCHECK) ? B_TRUE : B_FALSE;
2638 	boolean_t	fuid_dirtied = B_FALSE;
2639 	sa_bulk_attr_t	bulk[7], xattr_bulk[7];
2640 	int		count = 0, xattr_count = 0;
2641 
2642 	if (mask == 0)
2643 		return (0);
2644 
2645 	if (mask & AT_NOSET)
2646 		return (SET_ERROR(EINVAL));
2647 
2648 	ZFS_ENTER(zfsvfs);
2649 	ZFS_VERIFY_ZP(zp);
2650 
2651 	zilog = zfsvfs->z_log;
2652 
2653 	/*
2654 	 * Make sure that if we have ephemeral uid/gid or xvattr specified
2655 	 * that file system is at proper version level
2656 	 */
2657 
2658 	if (zfsvfs->z_use_fuids == B_FALSE &&
2659 	    (((mask & AT_UID) && IS_EPHEMERAL(vap->va_uid)) ||
2660 	    ((mask & AT_GID) && IS_EPHEMERAL(vap->va_gid)) ||
2661 	    (mask & AT_XVATTR))) {
2662 		ZFS_EXIT(zfsvfs);
2663 		return (SET_ERROR(EINVAL));
2664 	}
2665 
2666 	if (mask & AT_SIZE && vp->v_type == VDIR) {
2667 		ZFS_EXIT(zfsvfs);
2668 		return (SET_ERROR(EISDIR));
2669 	}
2670 
2671 	if (mask & AT_SIZE && vp->v_type != VREG && vp->v_type != VFIFO) {
2672 		ZFS_EXIT(zfsvfs);
2673 		return (SET_ERROR(EINVAL));
2674 	}
2675 
2676 	/*
2677 	 * If this is an xvattr_t, then get a pointer to the structure of
2678 	 * optional attributes.  If this is NULL, then we have a vattr_t.
2679 	 */
2680 	xoap = xva_getxoptattr(xvap);
2681 
2682 	xva_init(&tmpxvattr);
2683 
2684 	/*
2685 	 * Immutable files can only alter immutable bit and atime
2686 	 */
2687 	if ((zp->z_pflags & ZFS_IMMUTABLE) &&
2688 	    ((mask & (AT_SIZE|AT_UID|AT_GID|AT_MTIME|AT_MODE)) ||
2689 	    ((mask & AT_XVATTR) && XVA_ISSET_REQ(xvap, XAT_CREATETIME)))) {
2690 		ZFS_EXIT(zfsvfs);
2691 		return (SET_ERROR(EPERM));
2692 	}
2693 
2694 	if ((mask & AT_SIZE) && (zp->z_pflags & ZFS_READONLY)) {
2695 		ZFS_EXIT(zfsvfs);
2696 		return (SET_ERROR(EPERM));
2697 	}
2698 
2699 	/*
2700 	 * Verify timestamps doesn't overflow 32 bits.
2701 	 * ZFS can handle large timestamps, but 32bit syscalls can't
2702 	 * handle times greater than 2039.  This check should be removed
2703 	 * once large timestamps are fully supported.
2704 	 */
2705 	if (mask & (AT_ATIME | AT_MTIME)) {
2706 		if (((mask & AT_ATIME) && TIMESPEC_OVERFLOW(&vap->va_atime)) ||
2707 		    ((mask & AT_MTIME) && TIMESPEC_OVERFLOW(&vap->va_mtime))) {
2708 			ZFS_EXIT(zfsvfs);
2709 			return (SET_ERROR(EOVERFLOW));
2710 		}
2711 	}
2712 
2713 top:
2714 	attrzp = NULL;
2715 	aclp = NULL;
2716 
2717 	/* Can this be moved to before the top label? */
2718 	if (zfsvfs->z_vfs->vfs_flag & VFS_RDONLY) {
2719 		ZFS_EXIT(zfsvfs);
2720 		return (SET_ERROR(EROFS));
2721 	}
2722 
2723 	/*
2724 	 * First validate permissions
2725 	 */
2726 
2727 	if (mask & AT_SIZE) {
2728 		err = zfs_zaccess(zp, ACE_WRITE_DATA, 0, skipaclchk, cr);
2729 		if (err) {
2730 			ZFS_EXIT(zfsvfs);
2731 			return (err);
2732 		}
2733 		/*
2734 		 * XXX - Note, we are not providing any open
2735 		 * mode flags here (like FNDELAY), so we may
2736 		 * block if there are locks present... this
2737 		 * should be addressed in openat().
2738 		 */
2739 		/* XXX - would it be OK to generate a log record here? */
2740 		err = zfs_freesp(zp, vap->va_size, 0, 0, FALSE);
2741 		if (err) {
2742 			ZFS_EXIT(zfsvfs);
2743 			return (err);
2744 		}
2745 	}
2746 
2747 	if (mask & (AT_ATIME|AT_MTIME) ||
2748 	    ((mask & AT_XVATTR) && (XVA_ISSET_REQ(xvap, XAT_HIDDEN) ||
2749 	    XVA_ISSET_REQ(xvap, XAT_READONLY) ||
2750 	    XVA_ISSET_REQ(xvap, XAT_ARCHIVE) ||
2751 	    XVA_ISSET_REQ(xvap, XAT_OFFLINE) ||
2752 	    XVA_ISSET_REQ(xvap, XAT_SPARSE) ||
2753 	    XVA_ISSET_REQ(xvap, XAT_CREATETIME) ||
2754 	    XVA_ISSET_REQ(xvap, XAT_SYSTEM)))) {
2755 		need_policy = zfs_zaccess(zp, ACE_WRITE_ATTRIBUTES, 0,
2756 		    skipaclchk, cr);
2757 	}
2758 
2759 	if (mask & (AT_UID|AT_GID)) {
2760 		int	idmask = (mask & (AT_UID|AT_GID));
2761 		int	take_owner;
2762 		int	take_group;
2763 
2764 		/*
2765 		 * NOTE: even if a new mode is being set,
2766 		 * we may clear S_ISUID/S_ISGID bits.
2767 		 */
2768 
2769 		if (!(mask & AT_MODE))
2770 			vap->va_mode = zp->z_mode;
2771 
2772 		/*
2773 		 * Take ownership or chgrp to group we are a member of
2774 		 */
2775 
2776 		take_owner = (mask & AT_UID) && (vap->va_uid == crgetuid(cr));
2777 		take_group = (mask & AT_GID) &&
2778 		    zfs_groupmember(zfsvfs, vap->va_gid, cr);
2779 
2780 		/*
2781 		 * If both AT_UID and AT_GID are set then take_owner and
2782 		 * take_group must both be set in order to allow taking
2783 		 * ownership.
2784 		 *
2785 		 * Otherwise, send the check through secpolicy_vnode_setattr()
2786 		 *
2787 		 */
2788 
2789 		if (((idmask == (AT_UID|AT_GID)) && take_owner && take_group) ||
2790 		    ((idmask == AT_UID) && take_owner) ||
2791 		    ((idmask == AT_GID) && take_group)) {
2792 			if (zfs_zaccess(zp, ACE_WRITE_OWNER, 0,
2793 			    skipaclchk, cr) == 0) {
2794 				/*
2795 				 * Remove setuid/setgid for non-privileged users
2796 				 */
2797 				secpolicy_setid_clear(vap, cr);
2798 				trim_mask = (mask & (AT_UID|AT_GID));
2799 			} else {
2800 				need_policy =  TRUE;
2801 			}
2802 		} else {
2803 			need_policy =  TRUE;
2804 		}
2805 	}
2806 
2807 	mutex_enter(&zp->z_lock);
2808 	oldva.va_mode = zp->z_mode;
2809 	zfs_fuid_map_ids(zp, cr, &oldva.va_uid, &oldva.va_gid);
2810 	if (mask & AT_XVATTR) {
2811 		/*
2812 		 * Update xvattr mask to include only those attributes
2813 		 * that are actually changing.
2814 		 *
2815 		 * the bits will be restored prior to actually setting
2816 		 * the attributes so the caller thinks they were set.
2817 		 */
2818 		if (XVA_ISSET_REQ(xvap, XAT_APPENDONLY)) {
2819 			if (xoap->xoa_appendonly !=
2820 			    ((zp->z_pflags & ZFS_APPENDONLY) != 0)) {
2821 				need_policy = TRUE;
2822 			} else {
2823 				XVA_CLR_REQ(xvap, XAT_APPENDONLY);
2824 				XVA_SET_REQ(&tmpxvattr, XAT_APPENDONLY);
2825 			}
2826 		}
2827 
2828 		if (XVA_ISSET_REQ(xvap, XAT_NOUNLINK)) {
2829 			if (xoap->xoa_nounlink !=
2830 			    ((zp->z_pflags & ZFS_NOUNLINK) != 0)) {
2831 				need_policy = TRUE;
2832 			} else {
2833 				XVA_CLR_REQ(xvap, XAT_NOUNLINK);
2834 				XVA_SET_REQ(&tmpxvattr, XAT_NOUNLINK);
2835 			}
2836 		}
2837 
2838 		if (XVA_ISSET_REQ(xvap, XAT_IMMUTABLE)) {
2839 			if (xoap->xoa_immutable !=
2840 			    ((zp->z_pflags & ZFS_IMMUTABLE) != 0)) {
2841 				need_policy = TRUE;
2842 			} else {
2843 				XVA_CLR_REQ(xvap, XAT_IMMUTABLE);
2844 				XVA_SET_REQ(&tmpxvattr, XAT_IMMUTABLE);
2845 			}
2846 		}
2847 
2848 		if (XVA_ISSET_REQ(xvap, XAT_NODUMP)) {
2849 			if (xoap->xoa_nodump !=
2850 			    ((zp->z_pflags & ZFS_NODUMP) != 0)) {
2851 				need_policy = TRUE;
2852 			} else {
2853 				XVA_CLR_REQ(xvap, XAT_NODUMP);
2854 				XVA_SET_REQ(&tmpxvattr, XAT_NODUMP);
2855 			}
2856 		}
2857 
2858 		if (XVA_ISSET_REQ(xvap, XAT_AV_MODIFIED)) {
2859 			if (xoap->xoa_av_modified !=
2860 			    ((zp->z_pflags & ZFS_AV_MODIFIED) != 0)) {
2861 				need_policy = TRUE;
2862 			} else {
2863 				XVA_CLR_REQ(xvap, XAT_AV_MODIFIED);
2864 				XVA_SET_REQ(&tmpxvattr, XAT_AV_MODIFIED);
2865 			}
2866 		}
2867 
2868 		if (XVA_ISSET_REQ(xvap, XAT_AV_QUARANTINED)) {
2869 			if ((vp->v_type != VREG &&
2870 			    xoap->xoa_av_quarantined) ||
2871 			    xoap->xoa_av_quarantined !=
2872 			    ((zp->z_pflags & ZFS_AV_QUARANTINED) != 0)) {
2873 				need_policy = TRUE;
2874 			} else {
2875 				XVA_CLR_REQ(xvap, XAT_AV_QUARANTINED);
2876 				XVA_SET_REQ(&tmpxvattr, XAT_AV_QUARANTINED);
2877 			}
2878 		}
2879 
2880 		if (XVA_ISSET_REQ(xvap, XAT_REPARSE)) {
2881 			mutex_exit(&zp->z_lock);
2882 			ZFS_EXIT(zfsvfs);
2883 			return (SET_ERROR(EPERM));
2884 		}
2885 
2886 		if (need_policy == FALSE &&
2887 		    (XVA_ISSET_REQ(xvap, XAT_AV_SCANSTAMP) ||
2888 		    XVA_ISSET_REQ(xvap, XAT_OPAQUE))) {
2889 			need_policy = TRUE;
2890 		}
2891 	}
2892 
2893 	mutex_exit(&zp->z_lock);
2894 
2895 	if (mask & AT_MODE) {
2896 		if (zfs_zaccess(zp, ACE_WRITE_ACL, 0, skipaclchk, cr) == 0) {
2897 			err = secpolicy_setid_setsticky_clear(vp, vap,
2898 			    &oldva, cr);
2899 			if (err) {
2900 				ZFS_EXIT(zfsvfs);
2901 				return (err);
2902 			}
2903 			trim_mask |= AT_MODE;
2904 		} else {
2905 			need_policy = TRUE;
2906 		}
2907 	}
2908 
2909 	if (need_policy) {
2910 		/*
2911 		 * If trim_mask is set then take ownership
2912 		 * has been granted or write_acl is present and user
2913 		 * has the ability to modify mode.  In that case remove
2914 		 * UID|GID and or MODE from mask so that
2915 		 * secpolicy_vnode_setattr() doesn't revoke it.
2916 		 */
2917 
2918 		if (trim_mask) {
2919 			saved_mask = vap->va_mask;
2920 			vap->va_mask &= ~trim_mask;
2921 		}
2922 		err = secpolicy_vnode_setattr(cr, vp, vap, &oldva, flags,
2923 		    (int (*)(void *, int, cred_t *))zfs_zaccess_unix, zp);
2924 		if (err) {
2925 			ZFS_EXIT(zfsvfs);
2926 			return (err);
2927 		}
2928 
2929 		if (trim_mask)
2930 			vap->va_mask |= saved_mask;
2931 	}
2932 
2933 	/*
2934 	 * secpolicy_vnode_setattr, or take ownership may have
2935 	 * changed va_mask
2936 	 */
2937 	mask = vap->va_mask;
2938 
2939 	if ((mask & (AT_UID | AT_GID))) {
2940 		err = sa_lookup(zp->z_sa_hdl, SA_ZPL_XATTR(zfsvfs),
2941 		    &xattr_obj, sizeof (xattr_obj));
2942 
2943 		if (err == 0 && xattr_obj) {
2944 			err = zfs_zget(zp->z_zfsvfs, xattr_obj, &attrzp);
2945 			if (err)
2946 				goto out2;
2947 		}
2948 		if (mask & AT_UID) {
2949 			new_uid = zfs_fuid_create(zfsvfs,
2950 			    (uint64_t)vap->va_uid, cr, ZFS_OWNER, &fuidp);
2951 			if (new_uid != zp->z_uid &&
2952 			    zfs_fuid_overquota(zfsvfs, B_FALSE, new_uid)) {
2953 				if (attrzp)
2954 					VN_RELE(ZTOV(attrzp));
2955 				err = SET_ERROR(EDQUOT);
2956 				goto out2;
2957 			}
2958 		}
2959 
2960 		if (mask & AT_GID) {
2961 			new_gid = zfs_fuid_create(zfsvfs, (uint64_t)vap->va_gid,
2962 			    cr, ZFS_GROUP, &fuidp);
2963 			if (new_gid != zp->z_gid &&
2964 			    zfs_fuid_overquota(zfsvfs, B_TRUE, new_gid)) {
2965 				if (attrzp)
2966 					VN_RELE(ZTOV(attrzp));
2967 				err = SET_ERROR(EDQUOT);
2968 				goto out2;
2969 			}
2970 		}
2971 	}
2972 	tx = dmu_tx_create(zfsvfs->z_os);
2973 
2974 	if (mask & AT_MODE) {
2975 		uint64_t pmode = zp->z_mode;
2976 		uint64_t acl_obj;
2977 		new_mode = (pmode & S_IFMT) | (vap->va_mode & ~S_IFMT);
2978 
2979 		if (zp->z_zfsvfs->z_acl_mode == ZFS_ACL_RESTRICTED &&
2980 		    !(zp->z_pflags & ZFS_ACL_TRIVIAL)) {
2981 			err = SET_ERROR(EPERM);
2982 			goto out;
2983 		}
2984 
2985 		if (err = zfs_acl_chmod_setattr(zp, &aclp, new_mode))
2986 			goto out;
2987 
2988 		mutex_enter(&zp->z_lock);
2989 		if (!zp->z_is_sa && ((acl_obj = zfs_external_acl(zp)) != 0)) {
2990 			/*
2991 			 * Are we upgrading ACL from old V0 format
2992 			 * to V1 format?
2993 			 */
2994 			if (zfsvfs->z_version >= ZPL_VERSION_FUID &&
2995 			    zfs_znode_acl_version(zp) ==
2996 			    ZFS_ACL_VERSION_INITIAL) {
2997 				dmu_tx_hold_free(tx, acl_obj, 0,
2998 				    DMU_OBJECT_END);
2999 				dmu_tx_hold_write(tx, DMU_NEW_OBJECT,
3000 				    0, aclp->z_acl_bytes);
3001 			} else {
3002 				dmu_tx_hold_write(tx, acl_obj, 0,
3003 				    aclp->z_acl_bytes);
3004 			}
3005 		} else if (!zp->z_is_sa && aclp->z_acl_bytes > ZFS_ACE_SPACE) {
3006 			dmu_tx_hold_write(tx, DMU_NEW_OBJECT,
3007 			    0, aclp->z_acl_bytes);
3008 		}
3009 		mutex_exit(&zp->z_lock);
3010 		dmu_tx_hold_sa(tx, zp->z_sa_hdl, B_TRUE);
3011 	} else {
3012 		if ((mask & AT_XVATTR) &&
3013 		    XVA_ISSET_REQ(xvap, XAT_AV_SCANSTAMP))
3014 			dmu_tx_hold_sa(tx, zp->z_sa_hdl, B_TRUE);
3015 		else
3016 			dmu_tx_hold_sa(tx, zp->z_sa_hdl, B_FALSE);
3017 	}
3018 
3019 	if (attrzp) {
3020 		dmu_tx_hold_sa(tx, attrzp->z_sa_hdl, B_FALSE);
3021 	}
3022 
3023 	fuid_dirtied = zfsvfs->z_fuid_dirty;
3024 	if (fuid_dirtied)
3025 		zfs_fuid_txhold(zfsvfs, tx);
3026 
3027 	zfs_sa_upgrade_txholds(tx, zp);
3028 
3029 	err = dmu_tx_assign(tx, TXG_NOWAIT);
3030 	if (err) {
3031 		if (err == ERESTART)
3032 			dmu_tx_wait(tx);
3033 		goto out;
3034 	}
3035 
3036 	count = 0;
3037 	/*
3038 	 * Set each attribute requested.
3039 	 * We group settings according to the locks they need to acquire.
3040 	 *
3041 	 * Note: you cannot set ctime directly, although it will be
3042 	 * updated as a side-effect of calling this function.
3043 	 */
3044 
3045 
3046 	if (mask & (AT_UID|AT_GID|AT_MODE))
3047 		mutex_enter(&zp->z_acl_lock);
3048 	mutex_enter(&zp->z_lock);
3049 
3050 	SA_ADD_BULK_ATTR(bulk, count, SA_ZPL_FLAGS(zfsvfs), NULL,
3051 	    &zp->z_pflags, sizeof (zp->z_pflags));
3052 
3053 	if (attrzp) {
3054 		if (mask & (AT_UID|AT_GID|AT_MODE))
3055 			mutex_enter(&attrzp->z_acl_lock);
3056 		mutex_enter(&attrzp->z_lock);
3057 		SA_ADD_BULK_ATTR(xattr_bulk, xattr_count,
3058 		    SA_ZPL_FLAGS(zfsvfs), NULL, &attrzp->z_pflags,
3059 		    sizeof (attrzp->z_pflags));
3060 	}
3061 
3062 	if (mask & (AT_UID|AT_GID)) {
3063 
3064 		if (mask & AT_UID) {
3065 			SA_ADD_BULK_ATTR(bulk, count, SA_ZPL_UID(zfsvfs), NULL,
3066 			    &new_uid, sizeof (new_uid));
3067 			zp->z_uid = new_uid;
3068 			if (attrzp) {
3069 				SA_ADD_BULK_ATTR(xattr_bulk, xattr_count,
3070 				    SA_ZPL_UID(zfsvfs), NULL, &new_uid,
3071 				    sizeof (new_uid));
3072 				attrzp->z_uid = new_uid;
3073 			}
3074 		}
3075 
3076 		if (mask & AT_GID) {
3077 			SA_ADD_BULK_ATTR(bulk, count, SA_ZPL_GID(zfsvfs),
3078 			    NULL, &new_gid, sizeof (new_gid));
3079 			zp->z_gid = new_gid;
3080 			if (attrzp) {
3081 				SA_ADD_BULK_ATTR(xattr_bulk, xattr_count,
3082 				    SA_ZPL_GID(zfsvfs), NULL, &new_gid,
3083 				    sizeof (new_gid));
3084 				attrzp->z_gid = new_gid;
3085 			}
3086 		}
3087 		if (!(mask & AT_MODE)) {
3088 			SA_ADD_BULK_ATTR(bulk, count, SA_ZPL_MODE(zfsvfs),
3089 			    NULL, &new_mode, sizeof (new_mode));
3090 			new_mode = zp->z_mode;
3091 		}
3092 		err = zfs_acl_chown_setattr(zp);
3093 		ASSERT(err == 0);
3094 		if (attrzp) {
3095 			err = zfs_acl_chown_setattr(attrzp);
3096 			ASSERT(err == 0);
3097 		}
3098 	}
3099 
3100 	if (mask & AT_MODE) {
3101 		SA_ADD_BULK_ATTR(bulk, count, SA_ZPL_MODE(zfsvfs), NULL,
3102 		    &new_mode, sizeof (new_mode));
3103 		zp->z_mode = new_mode;
3104 		ASSERT3U((uintptr_t)aclp, !=, NULL);
3105 		err = zfs_aclset_common(zp, aclp, cr, tx);
3106 		ASSERT0(err);
3107 		if (zp->z_acl_cached)
3108 			zfs_acl_free(zp->z_acl_cached);
3109 		zp->z_acl_cached = aclp;
3110 		aclp = NULL;
3111 	}
3112 
3113 
3114 	if (mask & AT_ATIME) {
3115 		ZFS_TIME_ENCODE(&vap->va_atime, zp->z_atime);
3116 		SA_ADD_BULK_ATTR(bulk, count, SA_ZPL_ATIME(zfsvfs), NULL,
3117 		    &zp->z_atime, sizeof (zp->z_atime));
3118 	}
3119 
3120 	if (mask & AT_MTIME) {
3121 		ZFS_TIME_ENCODE(&vap->va_mtime, mtime);
3122 		SA_ADD_BULK_ATTR(bulk, count, SA_ZPL_MTIME(zfsvfs), NULL,
3123 		    mtime, sizeof (mtime));
3124 	}
3125 
3126 	/* XXX - shouldn't this be done *before* the ATIME/MTIME checks? */
3127 	if (mask & AT_SIZE && !(mask & AT_MTIME)) {
3128 		SA_ADD_BULK_ATTR(bulk, count, SA_ZPL_MTIME(zfsvfs),
3129 		    NULL, mtime, sizeof (mtime));
3130 		SA_ADD_BULK_ATTR(bulk, count, SA_ZPL_CTIME(zfsvfs), NULL,
3131 		    &ctime, sizeof (ctime));
3132 		zfs_tstamp_update_setup(zp, CONTENT_MODIFIED, mtime, ctime,
3133 		    B_TRUE);
3134 	} else if (mask != 0) {
3135 		SA_ADD_BULK_ATTR(bulk, count, SA_ZPL_CTIME(zfsvfs), NULL,
3136 		    &ctime, sizeof (ctime));
3137 		zfs_tstamp_update_setup(zp, STATE_CHANGED, mtime, ctime,
3138 		    B_TRUE);
3139 		if (attrzp) {
3140 			SA_ADD_BULK_ATTR(xattr_bulk, xattr_count,
3141 			    SA_ZPL_CTIME(zfsvfs), NULL,
3142 			    &ctime, sizeof (ctime));
3143 			zfs_tstamp_update_setup(attrzp, STATE_CHANGED,
3144 			    mtime, ctime, B_TRUE);
3145 		}
3146 	}
3147 	/*
3148 	 * Do this after setting timestamps to prevent timestamp
3149 	 * update from toggling bit
3150 	 */
3151 
3152 	if (xoap && (mask & AT_XVATTR)) {
3153 
3154 		/*
3155 		 * restore trimmed off masks
3156 		 * so that return masks can be set for caller.
3157 		 */
3158 
3159 		if (XVA_ISSET_REQ(&tmpxvattr, XAT_APPENDONLY)) {
3160 			XVA_SET_REQ(xvap, XAT_APPENDONLY);
3161 		}
3162 		if (XVA_ISSET_REQ(&tmpxvattr, XAT_NOUNLINK)) {
3163 			XVA_SET_REQ(xvap, XAT_NOUNLINK);
3164 		}
3165 		if (XVA_ISSET_REQ(&tmpxvattr, XAT_IMMUTABLE)) {
3166 			XVA_SET_REQ(xvap, XAT_IMMUTABLE);
3167 		}
3168 		if (XVA_ISSET_REQ(&tmpxvattr, XAT_NODUMP)) {
3169 			XVA_SET_REQ(xvap, XAT_NODUMP);
3170 		}
3171 		if (XVA_ISSET_REQ(&tmpxvattr, XAT_AV_MODIFIED)) {
3172 			XVA_SET_REQ(xvap, XAT_AV_MODIFIED);
3173 		}
3174 		if (XVA_ISSET_REQ(&tmpxvattr, XAT_AV_QUARANTINED)) {
3175 			XVA_SET_REQ(xvap, XAT_AV_QUARANTINED);
3176 		}
3177 
3178 		if (XVA_ISSET_REQ(xvap, XAT_AV_SCANSTAMP))
3179 			ASSERT(vp->v_type == VREG);
3180 
3181 		zfs_xvattr_set(zp, xvap, tx);
3182 	}
3183 
3184 	if (fuid_dirtied)
3185 		zfs_fuid_sync(zfsvfs, tx);
3186 
3187 	if (mask != 0)
3188 		zfs_log_setattr(zilog, tx, TX_SETATTR, zp, vap, mask, fuidp);
3189 
3190 	mutex_exit(&zp->z_lock);
3191 	if (mask & (AT_UID|AT_GID|AT_MODE))
3192 		mutex_exit(&zp->z_acl_lock);
3193 
3194 	if (attrzp) {
3195 		if (mask & (AT_UID|AT_GID|AT_MODE))
3196 			mutex_exit(&attrzp->z_acl_lock);
3197 		mutex_exit(&attrzp->z_lock);
3198 	}
3199 out:
3200 	if (err == 0 && attrzp) {
3201 		err2 = sa_bulk_update(attrzp->z_sa_hdl, xattr_bulk,
3202 		    xattr_count, tx);
3203 		ASSERT(err2 == 0);
3204 	}
3205 
3206 	if (attrzp)
3207 		VN_RELE(ZTOV(attrzp));
3208 
3209 	if (aclp)
3210 		zfs_acl_free(aclp);
3211 
3212 	if (fuidp) {
3213 		zfs_fuid_info_free(fuidp);
3214 		fuidp = NULL;
3215 	}
3216 
3217 	if (err) {
3218 		dmu_tx_abort(tx);
3219 		if (err == ERESTART)
3220 			goto top;
3221 	} else {
3222 		err2 = sa_bulk_update(zp->z_sa_hdl, bulk, count, tx);
3223 		dmu_tx_commit(tx);
3224 	}
3225 
3226 out2:
3227 	if (zfsvfs->z_os->os_sync == ZFS_SYNC_ALWAYS)
3228 		zil_commit(zilog, 0);
3229 
3230 	ZFS_EXIT(zfsvfs);
3231 	return (err);
3232 }
3233 
3234 typedef struct zfs_zlock {
3235 	krwlock_t	*zl_rwlock;	/* lock we acquired */
3236 	znode_t		*zl_znode;	/* znode we held */
3237 	struct zfs_zlock *zl_next;	/* next in list */
3238 } zfs_zlock_t;
3239 
3240 /*
3241  * Drop locks and release vnodes that were held by zfs_rename_lock().
3242  */
3243 static void
3244 zfs_rename_unlock(zfs_zlock_t **zlpp)
3245 {
3246 	zfs_zlock_t *zl;
3247 
3248 	while ((zl = *zlpp) != NULL) {
3249 		if (zl->zl_znode != NULL)
3250 			VN_RELE(ZTOV(zl->zl_znode));
3251 		rw_exit(zl->zl_rwlock);
3252 		*zlpp = zl->zl_next;
3253 		kmem_free(zl, sizeof (*zl));
3254 	}
3255 }
3256 
3257 /*
3258  * Search back through the directory tree, using the ".." entries.
3259  * Lock each directory in the chain to prevent concurrent renames.
3260  * Fail any attempt to move a directory into one of its own descendants.
3261  * XXX - z_parent_lock can overlap with map or grow locks
3262  */
3263 static int
3264 zfs_rename_lock(znode_t *szp, znode_t *tdzp, znode_t *sdzp, zfs_zlock_t **zlpp)
3265 {
3266 	zfs_zlock_t	*zl;
3267 	znode_t		*zp = tdzp;
3268 	uint64_t	rootid = zp->z_zfsvfs->z_root;
3269 	uint64_t	oidp = zp->z_id;
3270 	krwlock_t	*rwlp = &szp->z_parent_lock;
3271 	krw_t		rw = RW_WRITER;
3272 
3273 	/*
3274 	 * First pass write-locks szp and compares to zp->z_id.
3275 	 * Later passes read-lock zp and compare to zp->z_parent.
3276 	 */
3277 	do {
3278 		if (!rw_tryenter(rwlp, rw)) {
3279 			/*
3280 			 * Another thread is renaming in this path.
3281 			 * Note that if we are a WRITER, we don't have any
3282 			 * parent_locks held yet.
3283 			 */
3284 			if (rw == RW_READER && zp->z_id > szp->z_id) {
3285 				/*
3286 				 * Drop our locks and restart
3287 				 */
3288 				zfs_rename_unlock(&zl);
3289 				*zlpp = NULL;
3290 				zp = tdzp;
3291 				oidp = zp->z_id;
3292 				rwlp = &szp->z_parent_lock;
3293 				rw = RW_WRITER;
3294 				continue;
3295 			} else {
3296 				/*
3297 				 * Wait for other thread to drop its locks
3298 				 */
3299 				rw_enter(rwlp, rw);
3300 			}
3301 		}
3302 
3303 		zl = kmem_alloc(sizeof (*zl), KM_SLEEP);
3304 		zl->zl_rwlock = rwlp;
3305 		zl->zl_znode = NULL;
3306 		zl->zl_next = *zlpp;
3307 		*zlpp = zl;
3308 
3309 		if (oidp == szp->z_id)		/* We're a descendant of szp */
3310 			return (SET_ERROR(EINVAL));
3311 
3312 		if (oidp == rootid)		/* We've hit the top */
3313 			return (0);
3314 
3315 		if (rw == RW_READER) {		/* i.e. not the first pass */
3316 			int error = zfs_zget(zp->z_zfsvfs, oidp, &zp);
3317 			if (error)
3318 				return (error);
3319 			zl->zl_znode = zp;
3320 		}
3321 		(void) sa_lookup(zp->z_sa_hdl, SA_ZPL_PARENT(zp->z_zfsvfs),
3322 		    &oidp, sizeof (oidp));
3323 		rwlp = &zp->z_parent_lock;
3324 		rw = RW_READER;
3325 
3326 	} while (zp->z_id != sdzp->z_id);
3327 
3328 	return (0);
3329 }
3330 
3331 /*
3332  * Move an entry from the provided source directory to the target
3333  * directory.  Change the entry name as indicated.
3334  *
3335  *	IN:	sdvp	- Source directory containing the "old entry".
3336  *		snm	- Old entry name.
3337  *		tdvp	- Target directory to contain the "new entry".
3338  *		tnm	- New entry name.
3339  *		cr	- credentials of caller.
3340  *		ct	- caller context
3341  *		flags	- case flags
3342  *
3343  *	RETURN:	0 on success, error code on failure.
3344  *
3345  * Timestamps:
3346  *	sdvp,tdvp - ctime|mtime updated
3347  */
3348 /*ARGSUSED*/
3349 static int
3350 zfs_rename(vnode_t *sdvp, char *snm, vnode_t *tdvp, char *tnm, cred_t *cr,
3351     caller_context_t *ct, int flags)
3352 {
3353 	znode_t		*tdzp, *szp, *tzp;
3354 	znode_t		*sdzp = VTOZ(sdvp);
3355 	zfsvfs_t	*zfsvfs = sdzp->z_zfsvfs;
3356 	zilog_t		*zilog;
3357 	vnode_t		*realvp;
3358 	zfs_dirlock_t	*sdl, *tdl;
3359 	dmu_tx_t	*tx;
3360 	zfs_zlock_t	*zl;
3361 	int		cmp, serr, terr;
3362 	int		error = 0;
3363 	int		zflg = 0;
3364 
3365 	ZFS_ENTER(zfsvfs);
3366 	ZFS_VERIFY_ZP(sdzp);
3367 	zilog = zfsvfs->z_log;
3368 
3369 	/*
3370 	 * Make sure we have the real vp for the target directory.
3371 	 */
3372 	if (VOP_REALVP(tdvp, &realvp, ct) == 0)
3373 		tdvp = realvp;
3374 
3375 	if (tdvp->v_vfsp != sdvp->v_vfsp || zfsctl_is_node(tdvp)) {
3376 		ZFS_EXIT(zfsvfs);
3377 		return (SET_ERROR(EXDEV));
3378 	}
3379 
3380 	tdzp = VTOZ(tdvp);
3381 	ZFS_VERIFY_ZP(tdzp);
3382 	if (zfsvfs->z_utf8 && u8_validate(tnm,
3383 	    strlen(tnm), NULL, U8_VALIDATE_ENTIRE, &error) < 0) {
3384 		ZFS_EXIT(zfsvfs);
3385 		return (SET_ERROR(EILSEQ));
3386 	}
3387 
3388 	if (flags & FIGNORECASE)
3389 		zflg |= ZCILOOK;
3390 
3391 top:
3392 	szp = NULL;
3393 	tzp = NULL;
3394 	zl = NULL;
3395 
3396 	/*
3397 	 * This is to prevent the creation of links into attribute space
3398 	 * by renaming a linked file into/outof an attribute directory.
3399 	 * See the comment in zfs_link() for why this is considered bad.
3400 	 */
3401 	if ((tdzp->z_pflags & ZFS_XATTR) != (sdzp->z_pflags & ZFS_XATTR)) {
3402 		ZFS_EXIT(zfsvfs);
3403 		return (SET_ERROR(EINVAL));
3404 	}
3405 
3406 	/*
3407 	 * Lock source and target directory entries.  To prevent deadlock,
3408 	 * a lock ordering must be defined.  We lock the directory with
3409 	 * the smallest object id first, or if it's a tie, the one with
3410 	 * the lexically first name.
3411 	 */
3412 	if (sdzp->z_id < tdzp->z_id) {
3413 		cmp = -1;
3414 	} else if (sdzp->z_id > tdzp->z_id) {
3415 		cmp = 1;
3416 	} else {
3417 		/*
3418 		 * First compare the two name arguments without
3419 		 * considering any case folding.
3420 		 */
3421 		int nofold = (zfsvfs->z_norm & ~U8_TEXTPREP_TOUPPER);
3422 
3423 		cmp = u8_strcmp(snm, tnm, 0, nofold, U8_UNICODE_LATEST, &error);
3424 		ASSERT(error == 0 || !zfsvfs->z_utf8);
3425 		if (cmp == 0) {
3426 			/*
3427 			 * POSIX: "If the old argument and the new argument
3428 			 * both refer to links to the same existing file,
3429 			 * the rename() function shall return successfully
3430 			 * and perform no other action."
3431 			 */
3432 			ZFS_EXIT(zfsvfs);
3433 			return (0);
3434 		}
3435 		/*
3436 		 * If the file system is case-folding, then we may
3437 		 * have some more checking to do.  A case-folding file
3438 		 * system is either supporting mixed case sensitivity
3439 		 * access or is completely case-insensitive.  Note
3440 		 * that the file system is always case preserving.
3441 		 *
3442 		 * In mixed sensitivity mode case sensitive behavior
3443 		 * is the default.  FIGNORECASE must be used to
3444 		 * explicitly request case insensitive behavior.
3445 		 *
3446 		 * If the source and target names provided differ only
3447 		 * by case (e.g., a request to rename 'tim' to 'Tim'),
3448 		 * we will treat this as a special case in the
3449 		 * case-insensitive mode: as long as the source name
3450 		 * is an exact match, we will allow this to proceed as
3451 		 * a name-change request.
3452 		 */
3453 		if ((zfsvfs->z_case == ZFS_CASE_INSENSITIVE ||
3454 		    (zfsvfs->z_case == ZFS_CASE_MIXED &&
3455 		    flags & FIGNORECASE)) &&
3456 		    u8_strcmp(snm, tnm, 0, zfsvfs->z_norm, U8_UNICODE_LATEST,
3457 		    &error) == 0) {
3458 			/*
3459 			 * case preserving rename request, require exact
3460 			 * name matches
3461 			 */
3462 			zflg |= ZCIEXACT;
3463 			zflg &= ~ZCILOOK;
3464 		}
3465 	}
3466 
3467 	/*
3468 	 * If the source and destination directories are the same, we should
3469 	 * grab the z_name_lock of that directory only once.
3470 	 */
3471 	if (sdzp == tdzp) {
3472 		zflg |= ZHAVELOCK;
3473 		rw_enter(&sdzp->z_name_lock, RW_READER);
3474 	}
3475 
3476 	if (cmp < 0) {
3477 		serr = zfs_dirent_lock(&sdl, sdzp, snm, &szp,
3478 		    ZEXISTS | zflg, NULL, NULL);
3479 		terr = zfs_dirent_lock(&tdl,
3480 		    tdzp, tnm, &tzp, ZRENAMING | zflg, NULL, NULL);
3481 	} else {
3482 		terr = zfs_dirent_lock(&tdl,
3483 		    tdzp, tnm, &tzp, zflg, NULL, NULL);
3484 		serr = zfs_dirent_lock(&sdl,
3485 		    sdzp, snm, &szp, ZEXISTS | ZRENAMING | zflg,
3486 		    NULL, NULL);
3487 	}
3488 
3489 	if (serr) {
3490 		/*
3491 		 * Source entry invalid or not there.
3492 		 */
3493 		if (!terr) {
3494 			zfs_dirent_unlock(tdl);
3495 			if (tzp)
3496 				VN_RELE(ZTOV(tzp));
3497 		}
3498 
3499 		if (sdzp == tdzp)
3500 			rw_exit(&sdzp->z_name_lock);
3501 
3502 		if (strcmp(snm, "..") == 0)
3503 			serr = SET_ERROR(EINVAL);
3504 		ZFS_EXIT(zfsvfs);
3505 		return (serr);
3506 	}
3507 	if (terr) {
3508 		zfs_dirent_unlock(sdl);
3509 		VN_RELE(ZTOV(szp));
3510 
3511 		if (sdzp == tdzp)
3512 			rw_exit(&sdzp->z_name_lock);
3513 
3514 		if (strcmp(tnm, "..") == 0)
3515 			terr = SET_ERROR(EINVAL);
3516 		ZFS_EXIT(zfsvfs);
3517 		return (terr);
3518 	}
3519 
3520 	/*
3521 	 * Must have write access at the source to remove the old entry
3522 	 * and write access at the target to create the new entry.
3523 	 * Note that if target and source are the same, this can be
3524 	 * done in a single check.
3525 	 */
3526 
3527 	if (error = zfs_zaccess_rename(sdzp, szp, tdzp, tzp, cr))
3528 		goto out;
3529 
3530 	if (ZTOV(szp)->v_type == VDIR) {
3531 		/*
3532 		 * Check to make sure rename is valid.
3533 		 * Can't do a move like this: /usr/a/b to /usr/a/b/c/d
3534 		 */
3535 		if (error = zfs_rename_lock(szp, tdzp, sdzp, &zl))
3536 			goto out;
3537 	}
3538 
3539 	/*
3540 	 * Does target exist?
3541 	 */
3542 	if (tzp) {
3543 		/*
3544 		 * Source and target must be the same type.
3545 		 */
3546 		if (ZTOV(szp)->v_type == VDIR) {
3547 			if (ZTOV(tzp)->v_type != VDIR) {
3548 				error = SET_ERROR(ENOTDIR);
3549 				goto out;
3550 			}
3551 		} else {
3552 			if (ZTOV(tzp)->v_type == VDIR) {
3553 				error = SET_ERROR(EISDIR);
3554 				goto out;
3555 			}
3556 		}
3557 		/*
3558 		 * POSIX dictates that when the source and target
3559 		 * entries refer to the same file object, rename
3560 		 * must do nothing and exit without error.
3561 		 */
3562 		if (szp->z_id == tzp->z_id) {
3563 			error = 0;
3564 			goto out;
3565 		}
3566 	}
3567 
3568 	vnevent_rename_src(ZTOV(szp), sdvp, snm, ct);
3569 	if (tzp)
3570 		vnevent_rename_dest(ZTOV(tzp), tdvp, tnm, ct);
3571 
3572 	/*
3573 	 * notify the target directory if it is not the same
3574 	 * as source directory.
3575 	 */
3576 	if (tdvp != sdvp) {
3577 		vnevent_rename_dest_dir(tdvp, ct);
3578 	}
3579 
3580 	tx = dmu_tx_create(zfsvfs->z_os);
3581 	dmu_tx_hold_sa(tx, szp->z_sa_hdl, B_FALSE);
3582 	dmu_tx_hold_sa(tx, sdzp->z_sa_hdl, B_FALSE);
3583 	dmu_tx_hold_zap(tx, sdzp->z_id, FALSE, snm);
3584 	dmu_tx_hold_zap(tx, tdzp->z_id, TRUE, tnm);
3585 	if (sdzp != tdzp) {
3586 		dmu_tx_hold_sa(tx, tdzp->z_sa_hdl, B_FALSE);
3587 		zfs_sa_upgrade_txholds(tx, tdzp);
3588 	}
3589 	if (tzp) {
3590 		dmu_tx_hold_sa(tx, tzp->z_sa_hdl, B_FALSE);
3591 		zfs_sa_upgrade_txholds(tx, tzp);
3592 	}
3593 
3594 	zfs_sa_upgrade_txholds(tx, szp);
3595 	dmu_tx_hold_zap(tx, zfsvfs->z_unlinkedobj, FALSE, NULL);
3596 	error = dmu_tx_assign(tx, TXG_NOWAIT);
3597 	if (error) {
3598 		if (zl != NULL)
3599 			zfs_rename_unlock(&zl);
3600 		zfs_dirent_unlock(sdl);
3601 		zfs_dirent_unlock(tdl);
3602 
3603 		if (sdzp == tdzp)
3604 			rw_exit(&sdzp->z_name_lock);
3605 
3606 		VN_RELE(ZTOV(szp));
3607 		if (tzp)
3608 			VN_RELE(ZTOV(tzp));
3609 		if (error == ERESTART) {
3610 			dmu_tx_wait(tx);
3611 			dmu_tx_abort(tx);
3612 			goto top;
3613 		}
3614 		dmu_tx_abort(tx);
3615 		ZFS_EXIT(zfsvfs);
3616 		return (error);
3617 	}
3618 
3619 	if (tzp)	/* Attempt to remove the existing target */
3620 		error = zfs_link_destroy(tdl, tzp, tx, zflg, NULL);
3621 
3622 	if (error == 0) {
3623 		error = zfs_link_create(tdl, szp, tx, ZRENAMING);
3624 		if (error == 0) {
3625 			szp->z_pflags |= ZFS_AV_MODIFIED;
3626 
3627 			error = sa_update(szp->z_sa_hdl, SA_ZPL_FLAGS(zfsvfs),
3628 			    (void *)&szp->z_pflags, sizeof (uint64_t), tx);
3629 			ASSERT0(error);
3630 
3631 			error = zfs_link_destroy(sdl, szp, tx, ZRENAMING, NULL);
3632 			if (error == 0) {
3633 				zfs_log_rename(zilog, tx, TX_RENAME |
3634 				    (flags & FIGNORECASE ? TX_CI : 0), sdzp,
3635 				    sdl->dl_name, tdzp, tdl->dl_name, szp);
3636 
3637 				/*
3638 				 * Update path information for the target vnode
3639 				 */
3640 				vn_renamepath(tdvp, ZTOV(szp), tnm,
3641 				    strlen(tnm));
3642 			} else {
3643 				/*
3644 				 * At this point, we have successfully created
3645 				 * the target name, but have failed to remove
3646 				 * the source name.  Since the create was done
3647 				 * with the ZRENAMING flag, there are
3648 				 * complications; for one, the link count is
3649 				 * wrong.  The easiest way to deal with this
3650 				 * is to remove the newly created target, and
3651 				 * return the original error.  This must
3652 				 * succeed; fortunately, it is very unlikely to
3653 				 * fail, since we just created it.
3654 				 */
3655 				VERIFY3U(zfs_link_destroy(tdl, szp, tx,
3656 				    ZRENAMING, NULL), ==, 0);
3657 			}
3658 		}
3659 	}
3660 
3661 	dmu_tx_commit(tx);
3662 out:
3663 	if (zl != NULL)
3664 		zfs_rename_unlock(&zl);
3665 
3666 	zfs_dirent_unlock(sdl);
3667 	zfs_dirent_unlock(tdl);
3668 
3669 	if (sdzp == tdzp)
3670 		rw_exit(&sdzp->z_name_lock);
3671 
3672 
3673 	VN_RELE(ZTOV(szp));
3674 	if (tzp)
3675 		VN_RELE(ZTOV(tzp));
3676 
3677 	if (zfsvfs->z_os->os_sync == ZFS_SYNC_ALWAYS)
3678 		zil_commit(zilog, 0);
3679 
3680 	ZFS_EXIT(zfsvfs);
3681 	return (error);
3682 }
3683 
3684 /*
3685  * Insert the indicated symbolic reference entry into the directory.
3686  *
3687  *	IN:	dvp	- Directory to contain new symbolic link.
3688  *		link	- Name for new symlink entry.
3689  *		vap	- Attributes of new entry.
3690  *		cr	- credentials of caller.
3691  *		ct	- caller context
3692  *		flags	- case flags
3693  *
3694  *	RETURN:	0 on success, error code on failure.
3695  *
3696  * Timestamps:
3697  *	dvp - ctime|mtime updated
3698  */
3699 /*ARGSUSED*/
3700 static int
3701 zfs_symlink(vnode_t *dvp, char *name, vattr_t *vap, char *link, cred_t *cr,
3702     caller_context_t *ct, int flags)
3703 {
3704 	znode_t		*zp, *dzp = VTOZ(dvp);
3705 	zfs_dirlock_t	*dl;
3706 	dmu_tx_t	*tx;
3707 	zfsvfs_t	*zfsvfs = dzp->z_zfsvfs;
3708 	zilog_t		*zilog;
3709 	uint64_t	len = strlen(link);
3710 	int		error;
3711 	int		zflg = ZNEW;
3712 	zfs_acl_ids_t	acl_ids;
3713 	boolean_t	fuid_dirtied;
3714 	uint64_t	txtype = TX_SYMLINK;
3715 
3716 	ASSERT(vap->va_type == VLNK);
3717 
3718 	ZFS_ENTER(zfsvfs);
3719 	ZFS_VERIFY_ZP(dzp);
3720 	zilog = zfsvfs->z_log;
3721 
3722 	if (zfsvfs->z_utf8 && u8_validate(name, strlen(name),
3723 	    NULL, U8_VALIDATE_ENTIRE, &error) < 0) {
3724 		ZFS_EXIT(zfsvfs);
3725 		return (SET_ERROR(EILSEQ));
3726 	}
3727 	if (flags & FIGNORECASE)
3728 		zflg |= ZCILOOK;
3729 
3730 	if (len > MAXPATHLEN) {
3731 		ZFS_EXIT(zfsvfs);
3732 		return (SET_ERROR(ENAMETOOLONG));
3733 	}
3734 
3735 	if ((error = zfs_acl_ids_create(dzp, 0,
3736 	    vap, cr, NULL, &acl_ids)) != 0) {
3737 		ZFS_EXIT(zfsvfs);
3738 		return (error);
3739 	}
3740 top:
3741 	/*
3742 	 * Attempt to lock directory; fail if entry already exists.
3743 	 */
3744 	error = zfs_dirent_lock(&dl, dzp, name, &zp, zflg, NULL, NULL);
3745 	if (error) {
3746 		zfs_acl_ids_free(&acl_ids);
3747 		ZFS_EXIT(zfsvfs);
3748 		return (error);
3749 	}
3750 
3751 	if (error = zfs_zaccess(dzp, ACE_ADD_FILE, 0, B_FALSE, cr)) {
3752 		zfs_acl_ids_free(&acl_ids);
3753 		zfs_dirent_unlock(dl);
3754 		ZFS_EXIT(zfsvfs);
3755 		return (error);
3756 	}
3757 
3758 	if (zfs_acl_ids_overquota(zfsvfs, &acl_ids)) {
3759 		zfs_acl_ids_free(&acl_ids);
3760 		zfs_dirent_unlock(dl);
3761 		ZFS_EXIT(zfsvfs);
3762 		return (SET_ERROR(EDQUOT));
3763 	}
3764 	tx = dmu_tx_create(zfsvfs->z_os);
3765 	fuid_dirtied = zfsvfs->z_fuid_dirty;
3766 	dmu_tx_hold_write(tx, DMU_NEW_OBJECT, 0, MAX(1, len));
3767 	dmu_tx_hold_zap(tx, dzp->z_id, TRUE, name);
3768 	dmu_tx_hold_sa_create(tx, acl_ids.z_aclp->z_acl_bytes +
3769 	    ZFS_SA_BASE_ATTR_SIZE + len);
3770 	dmu_tx_hold_sa(tx, dzp->z_sa_hdl, B_FALSE);
3771 	if (!zfsvfs->z_use_sa && acl_ids.z_aclp->z_acl_bytes > ZFS_ACE_SPACE) {
3772 		dmu_tx_hold_write(tx, DMU_NEW_OBJECT, 0,
3773 		    acl_ids.z_aclp->z_acl_bytes);
3774 	}
3775 	if (fuid_dirtied)
3776 		zfs_fuid_txhold(zfsvfs, tx);
3777 	error = dmu_tx_assign(tx, TXG_NOWAIT);
3778 	if (error) {
3779 		zfs_dirent_unlock(dl);
3780 		if (error == ERESTART) {
3781 			dmu_tx_wait(tx);
3782 			dmu_tx_abort(tx);
3783 			goto top;
3784 		}
3785 		zfs_acl_ids_free(&acl_ids);
3786 		dmu_tx_abort(tx);
3787 		ZFS_EXIT(zfsvfs);
3788 		return (error);
3789 	}
3790 
3791 	/*
3792 	 * Create a new object for the symlink.
3793 	 * for version 4 ZPL datsets the symlink will be an SA attribute
3794 	 */
3795 	zfs_mknode(dzp, vap, tx, cr, 0, &zp, &acl_ids);
3796 
3797 	if (fuid_dirtied)
3798 		zfs_fuid_sync(zfsvfs, tx);
3799 
3800 	mutex_enter(&zp->z_lock);
3801 	if (zp->z_is_sa)
3802 		error = sa_update(zp->z_sa_hdl, SA_ZPL_SYMLINK(zfsvfs),
3803 		    link, len, tx);
3804 	else
3805 		zfs_sa_symlink(zp, link, len, tx);
3806 	mutex_exit(&zp->z_lock);
3807 
3808 	zp->z_size = len;
3809 	(void) sa_update(zp->z_sa_hdl, SA_ZPL_SIZE(zfsvfs),
3810 	    &zp->z_size, sizeof (zp->z_size), tx);
3811 	/*
3812 	 * Insert the new object into the directory.
3813 	 */
3814 	(void) zfs_link_create(dl, zp, tx, ZNEW);
3815 
3816 	if (flags & FIGNORECASE)
3817 		txtype |= TX_CI;
3818 	zfs_log_symlink(zilog, tx, txtype, dzp, zp, name, link);
3819 
3820 	zfs_acl_ids_free(&acl_ids);
3821 
3822 	dmu_tx_commit(tx);
3823 
3824 	zfs_dirent_unlock(dl);
3825 
3826 	VN_RELE(ZTOV(zp));
3827 
3828 	if (zfsvfs->z_os->os_sync == ZFS_SYNC_ALWAYS)
3829 		zil_commit(zilog, 0);
3830 
3831 	ZFS_EXIT(zfsvfs);
3832 	return (error);
3833 }
3834 
3835 /*
3836  * Return, in the buffer contained in the provided uio structure,
3837  * the symbolic path referred to by vp.
3838  *
3839  *	IN:	vp	- vnode of symbolic link.
3840  *		uio	- structure to contain the link path.
3841  *		cr	- credentials of caller.
3842  *		ct	- caller context
3843  *
3844  *	OUT:	uio	- structure containing the link path.
3845  *
3846  *	RETURN:	0 on success, error code on failure.
3847  *
3848  * Timestamps:
3849  *	vp - atime updated
3850  */
3851 /* ARGSUSED */
3852 static int
3853 zfs_readlink(vnode_t *vp, uio_t *uio, cred_t *cr, caller_context_t *ct)
3854 {
3855 	znode_t		*zp = VTOZ(vp);
3856 	zfsvfs_t	*zfsvfs = zp->z_zfsvfs;
3857 	int		error;
3858 
3859 	ZFS_ENTER(zfsvfs);
3860 	ZFS_VERIFY_ZP(zp);
3861 
3862 	mutex_enter(&zp->z_lock);
3863 	if (zp->z_is_sa)
3864 		error = sa_lookup_uio(zp->z_sa_hdl,
3865 		    SA_ZPL_SYMLINK(zfsvfs), uio);
3866 	else
3867 		error = zfs_sa_readlink(zp, uio);
3868 	mutex_exit(&zp->z_lock);
3869 
3870 	ZFS_ACCESSTIME_STAMP(zfsvfs, zp);
3871 
3872 	ZFS_EXIT(zfsvfs);
3873 	return (error);
3874 }
3875 
3876 /*
3877  * Insert a new entry into directory tdvp referencing svp.
3878  *
3879  *	IN:	tdvp	- Directory to contain new entry.
3880  *		svp	- vnode of new entry.
3881  *		name	- name of new entry.
3882  *		cr	- credentials of caller.
3883  *		ct	- caller context
3884  *
3885  *	RETURN:	0 on success, error code on failure.
3886  *
3887  * Timestamps:
3888  *	tdvp - ctime|mtime updated
3889  *	 svp - ctime updated
3890  */
3891 /* ARGSUSED */
3892 static int
3893 zfs_link(vnode_t *tdvp, vnode_t *svp, char *name, cred_t *cr,
3894     caller_context_t *ct, int flags)
3895 {
3896 	znode_t		*dzp = VTOZ(tdvp);
3897 	znode_t		*tzp, *szp;
3898 	zfsvfs_t	*zfsvfs = dzp->z_zfsvfs;
3899 	zilog_t		*zilog;
3900 	zfs_dirlock_t	*dl;
3901 	dmu_tx_t	*tx;
3902 	vnode_t		*realvp;
3903 	int		error;
3904 	int		zf = ZNEW;
3905 	uint64_t	parent;
3906 	uid_t		owner;
3907 
3908 	ASSERT(tdvp->v_type == VDIR);
3909 
3910 	ZFS_ENTER(zfsvfs);
3911 	ZFS_VERIFY_ZP(dzp);
3912 	zilog = zfsvfs->z_log;
3913 
3914 	if (VOP_REALVP(svp, &realvp, ct) == 0)
3915 		svp = realvp;
3916 
3917 	/*
3918 	 * POSIX dictates that we return EPERM here.
3919 	 * Better choices include ENOTSUP or EISDIR.
3920 	 */
3921 	if (svp->v_type == VDIR) {
3922 		ZFS_EXIT(zfsvfs);
3923 		return (SET_ERROR(EPERM));
3924 	}
3925 
3926 	if (svp->v_vfsp != tdvp->v_vfsp || zfsctl_is_node(svp)) {
3927 		ZFS_EXIT(zfsvfs);
3928 		return (SET_ERROR(EXDEV));
3929 	}
3930 
3931 	szp = VTOZ(svp);
3932 	ZFS_VERIFY_ZP(szp);
3933 
3934 	/* Prevent links to .zfs/shares files */
3935 
3936 	if ((error = sa_lookup(szp->z_sa_hdl, SA_ZPL_PARENT(zfsvfs),
3937 	    &parent, sizeof (uint64_t))) != 0) {
3938 		ZFS_EXIT(zfsvfs);
3939 		return (error);
3940 	}
3941 	if (parent == zfsvfs->z_shares_dir) {
3942 		ZFS_EXIT(zfsvfs);
3943 		return (SET_ERROR(EPERM));
3944 	}
3945 
3946 	if (zfsvfs->z_utf8 && u8_validate(name,
3947 	    strlen(name), NULL, U8_VALIDATE_ENTIRE, &error) < 0) {
3948 		ZFS_EXIT(zfsvfs);
3949 		return (SET_ERROR(EILSEQ));
3950 	}
3951 	if (flags & FIGNORECASE)
3952 		zf |= ZCILOOK;
3953 
3954 	/*
3955 	 * We do not support links between attributes and non-attributes
3956 	 * because of the potential security risk of creating links
3957 	 * into "normal" file space in order to circumvent restrictions
3958 	 * imposed in attribute space.
3959 	 */
3960 	if ((szp->z_pflags & ZFS_XATTR) != (dzp->z_pflags & ZFS_XATTR)) {
3961 		ZFS_EXIT(zfsvfs);
3962 		return (SET_ERROR(EINVAL));
3963 	}
3964 
3965 
3966 	owner = zfs_fuid_map_id(zfsvfs, szp->z_uid, cr, ZFS_OWNER);
3967 	if (owner != crgetuid(cr) && secpolicy_basic_link(cr) != 0) {
3968 		ZFS_EXIT(zfsvfs);
3969 		return (SET_ERROR(EPERM));
3970 	}
3971 
3972 	if (error = zfs_zaccess(dzp, ACE_ADD_FILE, 0, B_FALSE, cr)) {
3973 		ZFS_EXIT(zfsvfs);
3974 		return (error);
3975 	}
3976 
3977 top:
3978 	/*
3979 	 * Attempt to lock directory; fail if entry already exists.
3980 	 */
3981 	error = zfs_dirent_lock(&dl, dzp, name, &tzp, zf, NULL, NULL);
3982 	if (error) {
3983 		ZFS_EXIT(zfsvfs);
3984 		return (error);
3985 	}
3986 
3987 	tx = dmu_tx_create(zfsvfs->z_os);
3988 	dmu_tx_hold_sa(tx, szp->z_sa_hdl, B_FALSE);
3989 	dmu_tx_hold_zap(tx, dzp->z_id, TRUE, name);
3990 	zfs_sa_upgrade_txholds(tx, szp);
3991 	zfs_sa_upgrade_txholds(tx, dzp);
3992 	error = dmu_tx_assign(tx, TXG_NOWAIT);
3993 	if (error) {
3994 		zfs_dirent_unlock(dl);
3995 		if (error == ERESTART) {
3996 			dmu_tx_wait(tx);
3997 			dmu_tx_abort(tx);
3998 			goto top;
3999 		}
4000 		dmu_tx_abort(tx);
4001 		ZFS_EXIT(zfsvfs);
4002 		return (error);
4003 	}
4004 
4005 	error = zfs_link_create(dl, szp, tx, 0);
4006 
4007 	if (error == 0) {
4008 		uint64_t txtype = TX_LINK;
4009 		if (flags & FIGNORECASE)
4010 			txtype |= TX_CI;
4011 		zfs_log_link(zilog, tx, txtype, dzp, szp, name);
4012 	}
4013 
4014 	dmu_tx_commit(tx);
4015 
4016 	zfs_dirent_unlock(dl);
4017 
4018 	if (error == 0) {
4019 		vnevent_link(svp, ct);
4020 	}
4021 
4022 	if (zfsvfs->z_os->os_sync == ZFS_SYNC_ALWAYS)
4023 		zil_commit(zilog, 0);
4024 
4025 	ZFS_EXIT(zfsvfs);
4026 	return (error);
4027 }
4028 
4029 /*
4030  * zfs_null_putapage() is used when the file system has been force
4031  * unmounted. It just drops the pages.
4032  */
4033 /* ARGSUSED */
4034 static int
4035 zfs_null_putapage(vnode_t *vp, page_t *pp, u_offset_t *offp,
4036 		size_t *lenp, int flags, cred_t *cr)
4037 {
4038 	pvn_write_done(pp, B_INVAL|B_FORCE|B_ERROR);
4039 	return (0);
4040 }
4041 
4042 /*
4043  * Push a page out to disk, klustering if possible.
4044  *
4045  *	IN:	vp	- file to push page to.
4046  *		pp	- page to push.
4047  *		flags	- additional flags.
4048  *		cr	- credentials of caller.
4049  *
4050  *	OUT:	offp	- start of range pushed.
4051  *		lenp	- len of range pushed.
4052  *
4053  *	RETURN:	0 on success, error code on failure.
4054  *
4055  * NOTE: callers must have locked the page to be pushed.  On
4056  * exit, the page (and all other pages in the kluster) must be
4057  * unlocked.
4058  */
4059 /* ARGSUSED */
4060 static int
4061 zfs_putapage(vnode_t *vp, page_t *pp, u_offset_t *offp,
4062 		size_t *lenp, int flags, cred_t *cr)
4063 {
4064 	znode_t		*zp = VTOZ(vp);
4065 	zfsvfs_t	*zfsvfs = zp->z_zfsvfs;
4066 	dmu_tx_t	*tx;
4067 	u_offset_t	off, koff;
4068 	size_t		len, klen;
4069 	int		err;
4070 
4071 	off = pp->p_offset;
4072 	len = PAGESIZE;
4073 	/*
4074 	 * If our blocksize is bigger than the page size, try to kluster
4075 	 * multiple pages so that we write a full block (thus avoiding
4076 	 * a read-modify-write).
4077 	 */
4078 	if (off < zp->z_size && zp->z_blksz > PAGESIZE) {
4079 		klen = P2ROUNDUP((ulong_t)zp->z_blksz, PAGESIZE);
4080 		koff = ISP2(klen) ? P2ALIGN(off, (u_offset_t)klen) : 0;
4081 		ASSERT(koff <= zp->z_size);
4082 		if (koff + klen > zp->z_size)
4083 			klen = P2ROUNDUP(zp->z_size - koff, (uint64_t)PAGESIZE);
4084 		pp = pvn_write_kluster(vp, pp, &off, &len, koff, klen, flags);
4085 	}
4086 	ASSERT3U(btop(len), ==, btopr(len));
4087 
4088 	/*
4089 	 * Can't push pages past end-of-file.
4090 	 */
4091 	if (off >= zp->z_size) {
4092 		/* ignore all pages */
4093 		err = 0;
4094 		goto out;
4095 	} else if (off + len > zp->z_size) {
4096 		int npages = btopr(zp->z_size - off);
4097 		page_t *trunc;
4098 
4099 		page_list_break(&pp, &trunc, npages);
4100 		/* ignore pages past end of file */
4101 		if (trunc)
4102 			pvn_write_done(trunc, flags);
4103 		len = zp->z_size - off;
4104 	}
4105 
4106 	if (zfs_owner_overquota(zfsvfs, zp, B_FALSE) ||
4107 	    zfs_owner_overquota(zfsvfs, zp, B_TRUE)) {
4108 		err = SET_ERROR(EDQUOT);
4109 		goto out;
4110 	}
4111 top:
4112 	tx = dmu_tx_create(zfsvfs->z_os);
4113 	dmu_tx_hold_write(tx, zp->z_id, off, len);
4114 
4115 	dmu_tx_hold_sa(tx, zp->z_sa_hdl, B_FALSE);
4116 	zfs_sa_upgrade_txholds(tx, zp);
4117 	err = dmu_tx_assign(tx, TXG_NOWAIT);
4118 	if (err != 0) {
4119 		if (err == ERESTART) {
4120 			dmu_tx_wait(tx);
4121 			dmu_tx_abort(tx);
4122 			goto top;
4123 		}
4124 		dmu_tx_abort(tx);
4125 		goto out;
4126 	}
4127 
4128 	if (zp->z_blksz <= PAGESIZE) {
4129 		caddr_t va = zfs_map_page(pp, S_READ);
4130 		ASSERT3U(len, <=, PAGESIZE);
4131 		dmu_write(zfsvfs->z_os, zp->z_id, off, len, va, tx);
4132 		zfs_unmap_page(pp, va);
4133 	} else {
4134 		err = dmu_write_pages(zfsvfs->z_os, zp->z_id, off, len, pp, tx);
4135 	}
4136 
4137 	if (err == 0) {
4138 		uint64_t mtime[2], ctime[2];
4139 		sa_bulk_attr_t bulk[3];
4140 		int count = 0;
4141 
4142 		SA_ADD_BULK_ATTR(bulk, count, SA_ZPL_MTIME(zfsvfs), NULL,
4143 		    &mtime, 16);
4144 		SA_ADD_BULK_ATTR(bulk, count, SA_ZPL_CTIME(zfsvfs), NULL,
4145 		    &ctime, 16);
4146 		SA_ADD_BULK_ATTR(bulk, count, SA_ZPL_FLAGS(zfsvfs), NULL,
4147 		    &zp->z_pflags, 8);
4148 		zfs_tstamp_update_setup(zp, CONTENT_MODIFIED, mtime, ctime,
4149 		    B_TRUE);
4150 		zfs_log_write(zfsvfs->z_log, tx, TX_WRITE, zp, off, len, 0);
4151 	}
4152 	dmu_tx_commit(tx);
4153 
4154 out:
4155 	pvn_write_done(pp, (err ? B_ERROR : 0) | flags);
4156 	if (offp)
4157 		*offp = off;
4158 	if (lenp)
4159 		*lenp = len;
4160 
4161 	return (err);
4162 }
4163 
4164 /*
4165  * Copy the portion of the file indicated from pages into the file.
4166  * The pages are stored in a page list attached to the files vnode.
4167  *
4168  *	IN:	vp	- vnode of file to push page data to.
4169  *		off	- position in file to put data.
4170  *		len	- amount of data to write.
4171  *		flags	- flags to control the operation.
4172  *		cr	- credentials of caller.
4173  *		ct	- caller context.
4174  *
4175  *	RETURN:	0 on success, error code on failure.
4176  *
4177  * Timestamps:
4178  *	vp - ctime|mtime updated
4179  */
4180 /*ARGSUSED*/
4181 static int
4182 zfs_putpage(vnode_t *vp, offset_t off, size_t len, int flags, cred_t *cr,
4183     caller_context_t *ct)
4184 {
4185 	znode_t		*zp = VTOZ(vp);
4186 	zfsvfs_t	*zfsvfs = zp->z_zfsvfs;
4187 	page_t		*pp;
4188 	size_t		io_len;
4189 	u_offset_t	io_off;
4190 	uint_t		blksz;
4191 	rl_t		*rl;
4192 	int		error = 0;
4193 
4194 	ZFS_ENTER(zfsvfs);
4195 	ZFS_VERIFY_ZP(zp);
4196 
4197 	/*
4198 	 * There's nothing to do if no data is cached.
4199 	 */
4200 	if (!vn_has_cached_data(vp)) {
4201 		ZFS_EXIT(zfsvfs);
4202 		return (0);
4203 	}
4204 
4205 	/*
4206 	 * Align this request to the file block size in case we kluster.
4207 	 * XXX - this can result in pretty aggresive locking, which can
4208 	 * impact simultanious read/write access.  One option might be
4209 	 * to break up long requests (len == 0) into block-by-block
4210 	 * operations to get narrower locking.
4211 	 */
4212 	blksz = zp->z_blksz;
4213 	if (ISP2(blksz))
4214 		io_off = P2ALIGN_TYPED(off, blksz, u_offset_t);
4215 	else
4216 		io_off = 0;
4217 	if (len > 0 && ISP2(blksz))
4218 		io_len = P2ROUNDUP_TYPED(len + (off - io_off), blksz, size_t);
4219 	else
4220 		io_len = 0;
4221 
4222 	if (io_len == 0) {
4223 		/*
4224 		 * Search the entire vp list for pages >= io_off.
4225 		 */
4226 		rl = zfs_range_lock(zp, io_off, UINT64_MAX, RL_WRITER);
4227 		error = pvn_vplist_dirty(vp, io_off, zfs_putapage, flags, cr);
4228 		goto out;
4229 	}
4230 	rl = zfs_range_lock(zp, io_off, io_len, RL_WRITER);
4231 
4232 	if (off > zp->z_size) {
4233 		/* past end of file */
4234 		zfs_range_unlock(rl);
4235 		ZFS_EXIT(zfsvfs);
4236 		return (0);
4237 	}
4238 
4239 	len = MIN(io_len, P2ROUNDUP(zp->z_size, PAGESIZE) - io_off);
4240 
4241 	for (off = io_off; io_off < off + len; io_off += io_len) {
4242 		if ((flags & B_INVAL) || ((flags & B_ASYNC) == 0)) {
4243 			pp = page_lookup(vp, io_off,
4244 			    (flags & (B_INVAL | B_FREE)) ? SE_EXCL : SE_SHARED);
4245 		} else {
4246 			pp = page_lookup_nowait(vp, io_off,
4247 			    (flags & B_FREE) ? SE_EXCL : SE_SHARED);
4248 		}
4249 
4250 		if (pp != NULL && pvn_getdirty(pp, flags)) {
4251 			int err;
4252 
4253 			/*
4254 			 * Found a dirty page to push
4255 			 */
4256 			err = zfs_putapage(vp, pp, &io_off, &io_len, flags, cr);
4257 			if (err)
4258 				error = err;
4259 		} else {
4260 			io_len = PAGESIZE;
4261 		}
4262 	}
4263 out:
4264 	zfs_range_unlock(rl);
4265 	if ((flags & B_ASYNC) == 0 || zfsvfs->z_os->os_sync == ZFS_SYNC_ALWAYS)
4266 		zil_commit(zfsvfs->z_log, zp->z_id);
4267 	ZFS_EXIT(zfsvfs);
4268 	return (error);
4269 }
4270 
4271 /*ARGSUSED*/
4272 void
4273 zfs_inactive(vnode_t *vp, cred_t *cr, caller_context_t *ct)
4274 {
4275 	znode_t	*zp = VTOZ(vp);
4276 	zfsvfs_t *zfsvfs = zp->z_zfsvfs;
4277 	int error;
4278 
4279 	rw_enter(&zfsvfs->z_teardown_inactive_lock, RW_READER);
4280 	if (zp->z_sa_hdl == NULL) {
4281 		/*
4282 		 * The fs has been unmounted, or we did a
4283 		 * suspend/resume and this file no longer exists.
4284 		 */
4285 		if (vn_has_cached_data(vp)) {
4286 			(void) pvn_vplist_dirty(vp, 0, zfs_null_putapage,
4287 			    B_INVAL, cr);
4288 		}
4289 
4290 		mutex_enter(&zp->z_lock);
4291 		mutex_enter(&vp->v_lock);
4292 		ASSERT(vp->v_count == 1);
4293 		vp->v_count = 0;
4294 		mutex_exit(&vp->v_lock);
4295 		mutex_exit(&zp->z_lock);
4296 		rw_exit(&zfsvfs->z_teardown_inactive_lock);
4297 		zfs_znode_free(zp);
4298 		return;
4299 	}
4300 
4301 	/*
4302 	 * Attempt to push any data in the page cache.  If this fails
4303 	 * we will get kicked out later in zfs_zinactive().
4304 	 */
4305 	if (vn_has_cached_data(vp)) {
4306 		(void) pvn_vplist_dirty(vp, 0, zfs_putapage, B_INVAL|B_ASYNC,
4307 		    cr);
4308 	}
4309 
4310 	if (zp->z_atime_dirty && zp->z_unlinked == 0) {
4311 		dmu_tx_t *tx = dmu_tx_create(zfsvfs->z_os);
4312 
4313 		dmu_tx_hold_sa(tx, zp->z_sa_hdl, B_FALSE);
4314 		zfs_sa_upgrade_txholds(tx, zp);
4315 		error = dmu_tx_assign(tx, TXG_WAIT);
4316 		if (error) {
4317 			dmu_tx_abort(tx);
4318 		} else {
4319 			mutex_enter(&zp->z_lock);
4320 			(void) sa_update(zp->z_sa_hdl, SA_ZPL_ATIME(zfsvfs),
4321 			    (void *)&zp->z_atime, sizeof (zp->z_atime), tx);
4322 			zp->z_atime_dirty = 0;
4323 			mutex_exit(&zp->z_lock);
4324 			dmu_tx_commit(tx);
4325 		}
4326 	}
4327 
4328 	zfs_zinactive(zp);
4329 	rw_exit(&zfsvfs->z_teardown_inactive_lock);
4330 }
4331 
4332 /*
4333  * Bounds-check the seek operation.
4334  *
4335  *	IN:	vp	- vnode seeking within
4336  *		ooff	- old file offset
4337  *		noffp	- pointer to new file offset
4338  *		ct	- caller context
4339  *
4340  *	RETURN:	0 on success, EINVAL if new offset invalid.
4341  */
4342 /* ARGSUSED */
4343 static int
4344 zfs_seek(vnode_t *vp, offset_t ooff, offset_t *noffp,
4345     caller_context_t *ct)
4346 {
4347 	if (vp->v_type == VDIR)
4348 		return (0);
4349 	return ((*noffp < 0 || *noffp > MAXOFFSET_T) ? EINVAL : 0);
4350 }
4351 
4352 /*
4353  * Pre-filter the generic locking function to trap attempts to place
4354  * a mandatory lock on a memory mapped file.
4355  */
4356 static int
4357 zfs_frlock(vnode_t *vp, int cmd, flock64_t *bfp, int flag, offset_t offset,
4358     flk_callback_t *flk_cbp, cred_t *cr, caller_context_t *ct)
4359 {
4360 	znode_t *zp = VTOZ(vp);
4361 	zfsvfs_t *zfsvfs = zp->z_zfsvfs;
4362 
4363 	ZFS_ENTER(zfsvfs);
4364 	ZFS_VERIFY_ZP(zp);
4365 
4366 	/*
4367 	 * We are following the UFS semantics with respect to mapcnt
4368 	 * here: If we see that the file is mapped already, then we will
4369 	 * return an error, but we don't worry about races between this
4370 	 * function and zfs_map().
4371 	 */
4372 	if (zp->z_mapcnt > 0 && MANDMODE(zp->z_mode)) {
4373 		ZFS_EXIT(zfsvfs);
4374 		return (SET_ERROR(EAGAIN));
4375 	}
4376 	ZFS_EXIT(zfsvfs);
4377 	return (fs_frlock(vp, cmd, bfp, flag, offset, flk_cbp, cr, ct));
4378 }
4379 
4380 /*
4381  * If we can't find a page in the cache, we will create a new page
4382  * and fill it with file data.  For efficiency, we may try to fill
4383  * multiple pages at once (klustering) to fill up the supplied page
4384  * list.  Note that the pages to be filled are held with an exclusive
4385  * lock to prevent access by other threads while they are being filled.
4386  */
4387 static int
4388 zfs_fillpage(vnode_t *vp, u_offset_t off, struct seg *seg,
4389     caddr_t addr, page_t *pl[], size_t plsz, enum seg_rw rw)
4390 {
4391 	znode_t *zp = VTOZ(vp);
4392 	page_t *pp, *cur_pp;
4393 	objset_t *os = zp->z_zfsvfs->z_os;
4394 	u_offset_t io_off, total;
4395 	size_t io_len;
4396 	int err;
4397 
4398 	if (plsz == PAGESIZE || zp->z_blksz <= PAGESIZE) {
4399 		/*
4400 		 * We only have a single page, don't bother klustering
4401 		 */
4402 		io_off = off;
4403 		io_len = PAGESIZE;
4404 		pp = page_create_va(vp, io_off, io_len,
4405 		    PG_EXCL | PG_WAIT, seg, addr);
4406 	} else {
4407 		/*
4408 		 * Try to find enough pages to fill the page list
4409 		 */
4410 		pp = pvn_read_kluster(vp, off, seg, addr, &io_off,
4411 		    &io_len, off, plsz, 0);
4412 	}
4413 	if (pp == NULL) {
4414 		/*
4415 		 * The page already exists, nothing to do here.
4416 		 */
4417 		*pl = NULL;
4418 		return (0);
4419 	}
4420 
4421 	/*
4422 	 * Fill the pages in the kluster.
4423 	 */
4424 	cur_pp = pp;
4425 	for (total = io_off + io_len; io_off < total; io_off += PAGESIZE) {
4426 		caddr_t va;
4427 
4428 		ASSERT3U(io_off, ==, cur_pp->p_offset);
4429 		va = zfs_map_page(cur_pp, S_WRITE);
4430 		err = dmu_read(os, zp->z_id, io_off, PAGESIZE, va,
4431 		    DMU_READ_PREFETCH);
4432 		zfs_unmap_page(cur_pp, va);
4433 		if (err) {
4434 			/* On error, toss the entire kluster */
4435 			pvn_read_done(pp, B_ERROR);
4436 			/* convert checksum errors into IO errors */
4437 			if (err == ECKSUM)
4438 				err = SET_ERROR(EIO);
4439 			return (err);
4440 		}
4441 		cur_pp = cur_pp->p_next;
4442 	}
4443 
4444 	/*
4445 	 * Fill in the page list array from the kluster starting
4446 	 * from the desired offset `off'.
4447 	 * NOTE: the page list will always be null terminated.
4448 	 */
4449 	pvn_plist_init(pp, pl, plsz, off, io_len, rw);
4450 	ASSERT(pl == NULL || (*pl)->p_offset == off);
4451 
4452 	return (0);
4453 }
4454 
4455 /*
4456  * Return pointers to the pages for the file region [off, off + len]
4457  * in the pl array.  If plsz is greater than len, this function may
4458  * also return page pointers from after the specified region
4459  * (i.e. the region [off, off + plsz]).  These additional pages are
4460  * only returned if they are already in the cache, or were created as
4461  * part of a klustered read.
4462  *
4463  *	IN:	vp	- vnode of file to get data from.
4464  *		off	- position in file to get data from.
4465  *		len	- amount of data to retrieve.
4466  *		plsz	- length of provided page list.
4467  *		seg	- segment to obtain pages for.
4468  *		addr	- virtual address of fault.
4469  *		rw	- mode of created pages.
4470  *		cr	- credentials of caller.
4471  *		ct	- caller context.
4472  *
4473  *	OUT:	protp	- protection mode of created pages.
4474  *		pl	- list of pages created.
4475  *
4476  *	RETURN:	0 on success, error code on failure.
4477  *
4478  * Timestamps:
4479  *	vp - atime updated
4480  */
4481 /* ARGSUSED */
4482 static int
4483 zfs_getpage(vnode_t *vp, offset_t off, size_t len, uint_t *protp,
4484     page_t *pl[], size_t plsz, struct seg *seg, caddr_t addr,
4485     enum seg_rw rw, cred_t *cr, caller_context_t *ct)
4486 {
4487 	znode_t		*zp = VTOZ(vp);
4488 	zfsvfs_t	*zfsvfs = zp->z_zfsvfs;
4489 	page_t		**pl0 = pl;
4490 	int		err = 0;
4491 
4492 	/* we do our own caching, faultahead is unnecessary */
4493 	if (pl == NULL)
4494 		return (0);
4495 	else if (len > plsz)
4496 		len = plsz;
4497 	else
4498 		len = P2ROUNDUP(len, PAGESIZE);
4499 	ASSERT(plsz >= len);
4500 
4501 	ZFS_ENTER(zfsvfs);
4502 	ZFS_VERIFY_ZP(zp);
4503 
4504 	if (protp)
4505 		*protp = PROT_ALL;
4506 
4507 	/*
4508 	 * Loop through the requested range [off, off + len) looking
4509 	 * for pages.  If we don't find a page, we will need to create
4510 	 * a new page and fill it with data from the file.
4511 	 */
4512 	while (len > 0) {
4513 		if (*pl = page_lookup(vp, off, SE_SHARED))
4514 			*(pl+1) = NULL;
4515 		else if (err = zfs_fillpage(vp, off, seg, addr, pl, plsz, rw))
4516 			goto out;
4517 		while (*pl) {
4518 			ASSERT3U((*pl)->p_offset, ==, off);
4519 			off += PAGESIZE;
4520 			addr += PAGESIZE;
4521 			if (len > 0) {
4522 				ASSERT3U(len, >=, PAGESIZE);
4523 				len -= PAGESIZE;
4524 			}
4525 			ASSERT3U(plsz, >=, PAGESIZE);
4526 			plsz -= PAGESIZE;
4527 			pl++;
4528 		}
4529 	}
4530 
4531 	/*
4532 	 * Fill out the page array with any pages already in the cache.
4533 	 */
4534 	while (plsz > 0 &&
4535 	    (*pl++ = page_lookup_nowait(vp, off, SE_SHARED))) {
4536 			off += PAGESIZE;
4537 			plsz -= PAGESIZE;
4538 	}
4539 out:
4540 	if (err) {
4541 		/*
4542 		 * Release any pages we have previously locked.
4543 		 */
4544 		while (pl > pl0)
4545 			page_unlock(*--pl);
4546 	} else {
4547 		ZFS_ACCESSTIME_STAMP(zfsvfs, zp);
4548 	}
4549 
4550 	*pl = NULL;
4551 
4552 	ZFS_EXIT(zfsvfs);
4553 	return (err);
4554 }
4555 
4556 /*
4557  * Request a memory map for a section of a file.  This code interacts
4558  * with common code and the VM system as follows:
4559  *
4560  * - common code calls mmap(), which ends up in smmap_common()
4561  * - this calls VOP_MAP(), which takes you into (say) zfs
4562  * - zfs_map() calls as_map(), passing segvn_create() as the callback
4563  * - segvn_create() creates the new segment and calls VOP_ADDMAP()
4564  * - zfs_addmap() updates z_mapcnt
4565  */
4566 /*ARGSUSED*/
4567 static int
4568 zfs_map(vnode_t *vp, offset_t off, struct as *as, caddr_t *addrp,
4569     size_t len, uchar_t prot, uchar_t maxprot, uint_t flags, cred_t *cr,
4570     caller_context_t *ct)
4571 {
4572 	znode_t *zp = VTOZ(vp);
4573 	zfsvfs_t *zfsvfs = zp->z_zfsvfs;
4574 	segvn_crargs_t	vn_a;
4575 	int		error;
4576 
4577 	ZFS_ENTER(zfsvfs);
4578 	ZFS_VERIFY_ZP(zp);
4579 
4580 	if ((prot & PROT_WRITE) && (zp->z_pflags &
4581 	    (ZFS_IMMUTABLE | ZFS_READONLY | ZFS_APPENDONLY))) {
4582 		ZFS_EXIT(zfsvfs);
4583 		return (SET_ERROR(EPERM));
4584 	}
4585 
4586 	if ((prot & (PROT_READ | PROT_EXEC)) &&
4587 	    (zp->z_pflags & ZFS_AV_QUARANTINED)) {
4588 		ZFS_EXIT(zfsvfs);
4589 		return (SET_ERROR(EACCES));
4590 	}
4591 
4592 	if (vp->v_flag & VNOMAP) {
4593 		ZFS_EXIT(zfsvfs);
4594 		return (SET_ERROR(ENOSYS));
4595 	}
4596 
4597 	if (off < 0 || len > MAXOFFSET_T - off) {
4598 		ZFS_EXIT(zfsvfs);
4599 		return (SET_ERROR(ENXIO));
4600 	}
4601 
4602 	if (vp->v_type != VREG) {
4603 		ZFS_EXIT(zfsvfs);
4604 		return (SET_ERROR(ENODEV));
4605 	}
4606 
4607 	/*
4608 	 * If file is locked, disallow mapping.
4609 	 */
4610 	if (MANDMODE(zp->z_mode) && vn_has_flocks(vp)) {
4611 		ZFS_EXIT(zfsvfs);
4612 		return (SET_ERROR(EAGAIN));
4613 	}
4614 
4615 	as_rangelock(as);
4616 	error = choose_addr(as, addrp, len, off, ADDR_VACALIGN, flags);
4617 	if (error != 0) {
4618 		as_rangeunlock(as);
4619 		ZFS_EXIT(zfsvfs);
4620 		return (error);
4621 	}
4622 
4623 	vn_a.vp = vp;
4624 	vn_a.offset = (u_offset_t)off;
4625 	vn_a.type = flags & MAP_TYPE;
4626 	vn_a.prot = prot;
4627 	vn_a.maxprot = maxprot;
4628 	vn_a.cred = cr;
4629 	vn_a.amp = NULL;
4630 	vn_a.flags = flags & ~MAP_TYPE;
4631 	vn_a.szc = 0;
4632 	vn_a.lgrp_mem_policy_flags = 0;
4633 
4634 	error = as_map(as, *addrp, len, segvn_create, &vn_a);
4635 
4636 	as_rangeunlock(as);
4637 	ZFS_EXIT(zfsvfs);
4638 	return (error);
4639 }
4640 
4641 /* ARGSUSED */
4642 static int
4643 zfs_addmap(vnode_t *vp, offset_t off, struct as *as, caddr_t addr,
4644     size_t len, uchar_t prot, uchar_t maxprot, uint_t flags, cred_t *cr,
4645     caller_context_t *ct)
4646 {
4647 	uint64_t pages = btopr(len);
4648 
4649 	atomic_add_64(&VTOZ(vp)->z_mapcnt, pages);
4650 	return (0);
4651 }
4652 
4653 /*
4654  * The reason we push dirty pages as part of zfs_delmap() is so that we get a
4655  * more accurate mtime for the associated file.  Since we don't have a way of
4656  * detecting when the data was actually modified, we have to resort to
4657  * heuristics.  If an explicit msync() is done, then we mark the mtime when the
4658  * last page is pushed.  The problem occurs when the msync() call is omitted,
4659  * which by far the most common case:
4660  *
4661  * 	open()
4662  * 	mmap()
4663  * 	<modify memory>
4664  * 	munmap()
4665  * 	close()
4666  * 	<time lapse>
4667  * 	putpage() via fsflush
4668  *
4669  * If we wait until fsflush to come along, we can have a modification time that
4670  * is some arbitrary point in the future.  In order to prevent this in the
4671  * common case, we flush pages whenever a (MAP_SHARED, PROT_WRITE) mapping is
4672  * torn down.
4673  */
4674 /* ARGSUSED */
4675 static int
4676 zfs_delmap(vnode_t *vp, offset_t off, struct as *as, caddr_t addr,
4677     size_t len, uint_t prot, uint_t maxprot, uint_t flags, cred_t *cr,
4678     caller_context_t *ct)
4679 {
4680 	uint64_t pages = btopr(len);
4681 
4682 	ASSERT3U(VTOZ(vp)->z_mapcnt, >=, pages);
4683 	atomic_add_64(&VTOZ(vp)->z_mapcnt, -pages);
4684 
4685 	if ((flags & MAP_SHARED) && (prot & PROT_WRITE) &&
4686 	    vn_has_cached_data(vp))
4687 		(void) VOP_PUTPAGE(vp, off, len, B_ASYNC, cr, ct);
4688 
4689 	return (0);
4690 }
4691 
4692 /*
4693  * Free or allocate space in a file.  Currently, this function only
4694  * supports the `F_FREESP' command.  However, this command is somewhat
4695  * misnamed, as its functionality includes the ability to allocate as
4696  * well as free space.
4697  *
4698  *	IN:	vp	- vnode of file to free data in.
4699  *		cmd	- action to take (only F_FREESP supported).
4700  *		bfp	- section of file to free/alloc.
4701  *		flag	- current file open mode flags.
4702  *		offset	- current file offset.
4703  *		cr	- credentials of caller [UNUSED].
4704  *		ct	- caller context.
4705  *
4706  *	RETURN:	0 on success, error code on failure.
4707  *
4708  * Timestamps:
4709  *	vp - ctime|mtime updated
4710  */
4711 /* ARGSUSED */
4712 static int
4713 zfs_space(vnode_t *vp, int cmd, flock64_t *bfp, int flag,
4714     offset_t offset, cred_t *cr, caller_context_t *ct)
4715 {
4716 	znode_t		*zp = VTOZ(vp);
4717 	zfsvfs_t	*zfsvfs = zp->z_zfsvfs;
4718 	uint64_t	off, len;
4719 	int		error;
4720 
4721 	ZFS_ENTER(zfsvfs);
4722 	ZFS_VERIFY_ZP(zp);
4723 
4724 	if (cmd != F_FREESP) {
4725 		ZFS_EXIT(zfsvfs);
4726 		return (SET_ERROR(EINVAL));
4727 	}
4728 
4729 	if (error = convoff(vp, bfp, 0, offset)) {
4730 		ZFS_EXIT(zfsvfs);
4731 		return (error);
4732 	}
4733 
4734 	if (bfp->l_len < 0) {
4735 		ZFS_EXIT(zfsvfs);
4736 		return (SET_ERROR(EINVAL));
4737 	}
4738 
4739 	off = bfp->l_start;
4740 	len = bfp->l_len; /* 0 means from off to end of file */
4741 
4742 	error = zfs_freesp(zp, off, len, flag, TRUE);
4743 
4744 	ZFS_EXIT(zfsvfs);
4745 	return (error);
4746 }
4747 
4748 /*ARGSUSED*/
4749 static int
4750 zfs_fid(vnode_t *vp, fid_t *fidp, caller_context_t *ct)
4751 {
4752 	znode_t		*zp = VTOZ(vp);
4753 	zfsvfs_t	*zfsvfs = zp->z_zfsvfs;
4754 	uint32_t	gen;
4755 	uint64_t	gen64;
4756 	uint64_t	object = zp->z_id;
4757 	zfid_short_t	*zfid;
4758 	int		size, i, error;
4759 
4760 	ZFS_ENTER(zfsvfs);
4761 	ZFS_VERIFY_ZP(zp);
4762 
4763 	if ((error = sa_lookup(zp->z_sa_hdl, SA_ZPL_GEN(zfsvfs),
4764 	    &gen64, sizeof (uint64_t))) != 0) {
4765 		ZFS_EXIT(zfsvfs);
4766 		return (error);
4767 	}
4768 
4769 	gen = (uint32_t)gen64;
4770 
4771 	size = (zfsvfs->z_parent != zfsvfs) ? LONG_FID_LEN : SHORT_FID_LEN;
4772 	if (fidp->fid_len < size) {
4773 		fidp->fid_len = size;
4774 		ZFS_EXIT(zfsvfs);
4775 		return (SET_ERROR(ENOSPC));
4776 	}
4777 
4778 	zfid = (zfid_short_t *)fidp;
4779 
4780 	zfid->zf_len = size;
4781 
4782 	for (i = 0; i < sizeof (zfid->zf_object); i++)
4783 		zfid->zf_object[i] = (uint8_t)(object >> (8 * i));
4784 
4785 	/* Must have a non-zero generation number to distinguish from .zfs */
4786 	if (gen == 0)
4787 		gen = 1;
4788 	for (i = 0; i < sizeof (zfid->zf_gen); i++)
4789 		zfid->zf_gen[i] = (uint8_t)(gen >> (8 * i));
4790 
4791 	if (size == LONG_FID_LEN) {
4792 		uint64_t	objsetid = dmu_objset_id(zfsvfs->z_os);
4793 		zfid_long_t	*zlfid;
4794 
4795 		zlfid = (zfid_long_t *)fidp;
4796 
4797 		for (i = 0; i < sizeof (zlfid->zf_setid); i++)
4798 			zlfid->zf_setid[i] = (uint8_t)(objsetid >> (8 * i));
4799 
4800 		/* XXX - this should be the generation number for the objset */
4801 		for (i = 0; i < sizeof (zlfid->zf_setgen); i++)
4802 			zlfid->zf_setgen[i] = 0;
4803 	}
4804 
4805 	ZFS_EXIT(zfsvfs);
4806 	return (0);
4807 }
4808 
4809 static int
4810 zfs_pathconf(vnode_t *vp, int cmd, ulong_t *valp, cred_t *cr,
4811     caller_context_t *ct)
4812 {
4813 	znode_t		*zp, *xzp;
4814 	zfsvfs_t	*zfsvfs;
4815 	zfs_dirlock_t	*dl;
4816 	int		error;
4817 
4818 	switch (cmd) {
4819 	case _PC_LINK_MAX:
4820 		*valp = ULONG_MAX;
4821 		return (0);
4822 
4823 	case _PC_FILESIZEBITS:
4824 		*valp = 64;
4825 		return (0);
4826 
4827 	case _PC_XATTR_EXISTS:
4828 		zp = VTOZ(vp);
4829 		zfsvfs = zp->z_zfsvfs;
4830 		ZFS_ENTER(zfsvfs);
4831 		ZFS_VERIFY_ZP(zp);
4832 		*valp = 0;
4833 		error = zfs_dirent_lock(&dl, zp, "", &xzp,
4834 		    ZXATTR | ZEXISTS | ZSHARED, NULL, NULL);
4835 		if (error == 0) {
4836 			zfs_dirent_unlock(dl);
4837 			if (!zfs_dirempty(xzp))
4838 				*valp = 1;
4839 			VN_RELE(ZTOV(xzp));
4840 		} else if (error == ENOENT) {
4841 			/*
4842 			 * If there aren't extended attributes, it's the
4843 			 * same as having zero of them.
4844 			 */
4845 			error = 0;
4846 		}
4847 		ZFS_EXIT(zfsvfs);
4848 		return (error);
4849 
4850 	case _PC_SATTR_ENABLED:
4851 	case _PC_SATTR_EXISTS:
4852 		*valp = vfs_has_feature(vp->v_vfsp, VFSFT_SYSATTR_VIEWS) &&
4853 		    (vp->v_type == VREG || vp->v_type == VDIR);
4854 		return (0);
4855 
4856 	case _PC_ACCESS_FILTERING:
4857 		*valp = vfs_has_feature(vp->v_vfsp, VFSFT_ACCESS_FILTER) &&
4858 		    vp->v_type == VDIR;
4859 		return (0);
4860 
4861 	case _PC_ACL_ENABLED:
4862 		*valp = _ACL_ACE_ENABLED;
4863 		return (0);
4864 
4865 	case _PC_MIN_HOLE_SIZE:
4866 		*valp = (ulong_t)SPA_MINBLOCKSIZE;
4867 		return (0);
4868 
4869 	case _PC_TIMESTAMP_RESOLUTION:
4870 		/* nanosecond timestamp resolution */
4871 		*valp = 1L;
4872 		return (0);
4873 
4874 	default:
4875 		return (fs_pathconf(vp, cmd, valp, cr, ct));
4876 	}
4877 }
4878 
4879 /*ARGSUSED*/
4880 static int
4881 zfs_getsecattr(vnode_t *vp, vsecattr_t *vsecp, int flag, cred_t *cr,
4882     caller_context_t *ct)
4883 {
4884 	znode_t *zp = VTOZ(vp);
4885 	zfsvfs_t *zfsvfs = zp->z_zfsvfs;
4886 	int error;
4887 	boolean_t skipaclchk = (flag & ATTR_NOACLCHECK) ? B_TRUE : B_FALSE;
4888 
4889 	ZFS_ENTER(zfsvfs);
4890 	ZFS_VERIFY_ZP(zp);
4891 	error = zfs_getacl(zp, vsecp, skipaclchk, cr);
4892 	ZFS_EXIT(zfsvfs);
4893 
4894 	return (error);
4895 }
4896 
4897 /*ARGSUSED*/
4898 static int
4899 zfs_setsecattr(vnode_t *vp, vsecattr_t *vsecp, int flag, cred_t *cr,
4900     caller_context_t *ct)
4901 {
4902 	znode_t *zp = VTOZ(vp);
4903 	zfsvfs_t *zfsvfs = zp->z_zfsvfs;
4904 	int error;
4905 	boolean_t skipaclchk = (flag & ATTR_NOACLCHECK) ? B_TRUE : B_FALSE;
4906 	zilog_t	*zilog = zfsvfs->z_log;
4907 
4908 	ZFS_ENTER(zfsvfs);
4909 	ZFS_VERIFY_ZP(zp);
4910 
4911 	error = zfs_setacl(zp, vsecp, skipaclchk, cr);
4912 
4913 	if (zfsvfs->z_os->os_sync == ZFS_SYNC_ALWAYS)
4914 		zil_commit(zilog, 0);
4915 
4916 	ZFS_EXIT(zfsvfs);
4917 	return (error);
4918 }
4919 
4920 /*
4921  * The smallest read we may consider to loan out an arcbuf.
4922  * This must be a power of 2.
4923  */
4924 int zcr_blksz_min = (1 << 10);	/* 1K */
4925 /*
4926  * If set to less than the file block size, allow loaning out of an
4927  * arcbuf for a partial block read.  This must be a power of 2.
4928  */
4929 int zcr_blksz_max = (1 << 17);	/* 128K */
4930 
4931 /*ARGSUSED*/
4932 static int
4933 zfs_reqzcbuf(vnode_t *vp, enum uio_rw ioflag, xuio_t *xuio, cred_t *cr,
4934     caller_context_t *ct)
4935 {
4936 	znode_t	*zp = VTOZ(vp);
4937 	zfsvfs_t *zfsvfs = zp->z_zfsvfs;
4938 	int max_blksz = zfsvfs->z_max_blksz;
4939 	uio_t *uio = &xuio->xu_uio;
4940 	ssize_t size = uio->uio_resid;
4941 	offset_t offset = uio->uio_loffset;
4942 	int blksz;
4943 	int fullblk, i;
4944 	arc_buf_t *abuf;
4945 	ssize_t maxsize;
4946 	int preamble, postamble;
4947 
4948 	if (xuio->xu_type != UIOTYPE_ZEROCOPY)
4949 		return (SET_ERROR(EINVAL));
4950 
4951 	ZFS_ENTER(zfsvfs);
4952 	ZFS_VERIFY_ZP(zp);
4953 	switch (ioflag) {
4954 	case UIO_WRITE:
4955 		/*
4956 		 * Loan out an arc_buf for write if write size is bigger than
4957 		 * max_blksz, and the file's block size is also max_blksz.
4958 		 */
4959 		blksz = max_blksz;
4960 		if (size < blksz || zp->z_blksz != blksz) {
4961 			ZFS_EXIT(zfsvfs);
4962 			return (SET_ERROR(EINVAL));
4963 		}
4964 		/*
4965 		 * Caller requests buffers for write before knowing where the
4966 		 * write offset might be (e.g. NFS TCP write).
4967 		 */
4968 		if (offset == -1) {
4969 			preamble = 0;
4970 		} else {
4971 			preamble = P2PHASE(offset, blksz);
4972 			if (preamble) {
4973 				preamble = blksz - preamble;
4974 				size -= preamble;
4975 			}
4976 		}
4977 
4978 		postamble = P2PHASE(size, blksz);
4979 		size -= postamble;
4980 
4981 		fullblk = size / blksz;
4982 		(void) dmu_xuio_init(xuio,
4983 		    (preamble != 0) + fullblk + (postamble != 0));
4984 		DTRACE_PROBE3(zfs_reqzcbuf_align, int, preamble,
4985 		    int, postamble, int,
4986 		    (preamble != 0) + fullblk + (postamble != 0));
4987 
4988 		/*
4989 		 * Have to fix iov base/len for partial buffers.  They
4990 		 * currently represent full arc_buf's.
4991 		 */
4992 		if (preamble) {
4993 			/* data begins in the middle of the arc_buf */
4994 			abuf = dmu_request_arcbuf(sa_get_db(zp->z_sa_hdl),
4995 			    blksz);
4996 			ASSERT(abuf);
4997 			(void) dmu_xuio_add(xuio, abuf,
4998 			    blksz - preamble, preamble);
4999 		}
5000 
5001 		for (i = 0; i < fullblk; i++) {
5002 			abuf = dmu_request_arcbuf(sa_get_db(zp->z_sa_hdl),
5003 			    blksz);
5004 			ASSERT(abuf);
5005 			(void) dmu_xuio_add(xuio, abuf, 0, blksz);
5006 		}
5007 
5008 		if (postamble) {
5009 			/* data ends in the middle of the arc_buf */
5010 			abuf = dmu_request_arcbuf(sa_get_db(zp->z_sa_hdl),
5011 			    blksz);
5012 			ASSERT(abuf);
5013 			(void) dmu_xuio_add(xuio, abuf, 0, postamble);
5014 		}
5015 		break;
5016 	case UIO_READ:
5017 		/*
5018 		 * Loan out an arc_buf for read if the read size is larger than
5019 		 * the current file block size.  Block alignment is not
5020 		 * considered.  Partial arc_buf will be loaned out for read.
5021 		 */
5022 		blksz = zp->z_blksz;
5023 		if (blksz < zcr_blksz_min)
5024 			blksz = zcr_blksz_min;
5025 		if (blksz > zcr_blksz_max)
5026 			blksz = zcr_blksz_max;
5027 		/* avoid potential complexity of dealing with it */
5028 		if (blksz > max_blksz) {
5029 			ZFS_EXIT(zfsvfs);
5030 			return (SET_ERROR(EINVAL));
5031 		}
5032 
5033 		maxsize = zp->z_size - uio->uio_loffset;
5034 		if (size > maxsize)
5035 			size = maxsize;
5036 
5037 		if (size < blksz || vn_has_cached_data(vp)) {
5038 			ZFS_EXIT(zfsvfs);
5039 			return (SET_ERROR(EINVAL));
5040 		}
5041 		break;
5042 	default:
5043 		ZFS_EXIT(zfsvfs);
5044 		return (SET_ERROR(EINVAL));
5045 	}
5046 
5047 	uio->uio_extflg = UIO_XUIO;
5048 	XUIO_XUZC_RW(xuio) = ioflag;
5049 	ZFS_EXIT(zfsvfs);
5050 	return (0);
5051 }
5052 
5053 /*ARGSUSED*/
5054 static int
5055 zfs_retzcbuf(vnode_t *vp, xuio_t *xuio, cred_t *cr, caller_context_t *ct)
5056 {
5057 	int i;
5058 	arc_buf_t *abuf;
5059 	int ioflag = XUIO_XUZC_RW(xuio);
5060 
5061 	ASSERT(xuio->xu_type == UIOTYPE_ZEROCOPY);
5062 
5063 	i = dmu_xuio_cnt(xuio);
5064 	while (i-- > 0) {
5065 		abuf = dmu_xuio_arcbuf(xuio, i);
5066 		/*
5067 		 * if abuf == NULL, it must be a write buffer
5068 		 * that has been returned in zfs_write().
5069 		 */
5070 		if (abuf)
5071 			dmu_return_arcbuf(abuf);
5072 		ASSERT(abuf || ioflag == UIO_WRITE);
5073 	}
5074 
5075 	dmu_xuio_fini(xuio);
5076 	return (0);
5077 }
5078 
5079 /*
5080  * Predeclare these here so that the compiler assumes that
5081  * this is an "old style" function declaration that does
5082  * not include arguments => we won't get type mismatch errors
5083  * in the initializations that follow.
5084  */
5085 static int zfs_inval();
5086 static int zfs_isdir();
5087 
5088 static int
5089 zfs_inval()
5090 {
5091 	return (SET_ERROR(EINVAL));
5092 }
5093 
5094 static int
5095 zfs_isdir()
5096 {
5097 	return (SET_ERROR(EISDIR));
5098 }
5099 /*
5100  * Directory vnode operations template
5101  */
5102 vnodeops_t *zfs_dvnodeops;
5103 const fs_operation_def_t zfs_dvnodeops_template[] = {
5104 	VOPNAME_OPEN,		{ .vop_open = zfs_open },
5105 	VOPNAME_CLOSE,		{ .vop_close = zfs_close },
5106 	VOPNAME_READ,		{ .error = zfs_isdir },
5107 	VOPNAME_WRITE,		{ .error = zfs_isdir },
5108 	VOPNAME_IOCTL,		{ .vop_ioctl = zfs_ioctl },
5109 	VOPNAME_GETATTR,	{ .vop_getattr = zfs_getattr },
5110 	VOPNAME_SETATTR,	{ .vop_setattr = zfs_setattr },
5111 	VOPNAME_ACCESS,		{ .vop_access = zfs_access },
5112 	VOPNAME_LOOKUP,		{ .vop_lookup = zfs_lookup },
5113 	VOPNAME_CREATE,		{ .vop_create = zfs_create },
5114 	VOPNAME_REMOVE,		{ .vop_remove = zfs_remove },
5115 	VOPNAME_LINK,		{ .vop_link = zfs_link },
5116 	VOPNAME_RENAME,		{ .vop_rename = zfs_rename },
5117 	VOPNAME_MKDIR,		{ .vop_mkdir = zfs_mkdir },
5118 	VOPNAME_RMDIR,		{ .vop_rmdir = zfs_rmdir },
5119 	VOPNAME_READDIR,	{ .vop_readdir = zfs_readdir },
5120 	VOPNAME_SYMLINK,	{ .vop_symlink = zfs_symlink },
5121 	VOPNAME_FSYNC,		{ .vop_fsync = zfs_fsync },
5122 	VOPNAME_INACTIVE,	{ .vop_inactive = zfs_inactive },
5123 	VOPNAME_FID,		{ .vop_fid = zfs_fid },
5124 	VOPNAME_SEEK,		{ .vop_seek = zfs_seek },
5125 	VOPNAME_PATHCONF,	{ .vop_pathconf = zfs_pathconf },
5126 	VOPNAME_GETSECATTR,	{ .vop_getsecattr = zfs_getsecattr },
5127 	VOPNAME_SETSECATTR,	{ .vop_setsecattr = zfs_setsecattr },
5128 	VOPNAME_VNEVENT, 	{ .vop_vnevent = fs_vnevent_support },
5129 	NULL,			NULL
5130 };
5131 
5132 /*
5133  * Regular file vnode operations template
5134  */
5135 vnodeops_t *zfs_fvnodeops;
5136 const fs_operation_def_t zfs_fvnodeops_template[] = {
5137 	VOPNAME_OPEN,		{ .vop_open = zfs_open },
5138 	VOPNAME_CLOSE,		{ .vop_close = zfs_close },
5139 	VOPNAME_READ,		{ .vop_read = zfs_read },
5140 	VOPNAME_WRITE,		{ .vop_write = zfs_write },
5141 	VOPNAME_IOCTL,		{ .vop_ioctl = zfs_ioctl },
5142 	VOPNAME_GETATTR,	{ .vop_getattr = zfs_getattr },
5143 	VOPNAME_SETATTR,	{ .vop_setattr = zfs_setattr },
5144 	VOPNAME_ACCESS,		{ .vop_access = zfs_access },
5145 	VOPNAME_LOOKUP,		{ .vop_lookup = zfs_lookup },
5146 	VOPNAME_RENAME,		{ .vop_rename = zfs_rename },
5147 	VOPNAME_FSYNC,		{ .vop_fsync = zfs_fsync },
5148 	VOPNAME_INACTIVE,	{ .vop_inactive = zfs_inactive },
5149 	VOPNAME_FID,		{ .vop_fid = zfs_fid },
5150 	VOPNAME_SEEK,		{ .vop_seek = zfs_seek },
5151 	VOPNAME_FRLOCK,		{ .vop_frlock = zfs_frlock },
5152 	VOPNAME_SPACE,		{ .vop_space = zfs_space },
5153 	VOPNAME_GETPAGE,	{ .vop_getpage = zfs_getpage },
5154 	VOPNAME_PUTPAGE,	{ .vop_putpage = zfs_putpage },
5155 	VOPNAME_MAP,		{ .vop_map = zfs_map },
5156 	VOPNAME_ADDMAP,		{ .vop_addmap = zfs_addmap },
5157 	VOPNAME_DELMAP,		{ .vop_delmap = zfs_delmap },
5158 	VOPNAME_PATHCONF,	{ .vop_pathconf = zfs_pathconf },
5159 	VOPNAME_GETSECATTR,	{ .vop_getsecattr = zfs_getsecattr },
5160 	VOPNAME_SETSECATTR,	{ .vop_setsecattr = zfs_setsecattr },
5161 	VOPNAME_VNEVENT,	{ .vop_vnevent = fs_vnevent_support },
5162 	VOPNAME_REQZCBUF, 	{ .vop_reqzcbuf = zfs_reqzcbuf },
5163 	VOPNAME_RETZCBUF, 	{ .vop_retzcbuf = zfs_retzcbuf },
5164 	NULL,			NULL
5165 };
5166 
5167 /*
5168  * Symbolic link vnode operations template
5169  */
5170 vnodeops_t *zfs_symvnodeops;
5171 const fs_operation_def_t zfs_symvnodeops_template[] = {
5172 	VOPNAME_GETATTR,	{ .vop_getattr = zfs_getattr },
5173 	VOPNAME_SETATTR,	{ .vop_setattr = zfs_setattr },
5174 	VOPNAME_ACCESS,		{ .vop_access = zfs_access },
5175 	VOPNAME_RENAME,		{ .vop_rename = zfs_rename },
5176 	VOPNAME_READLINK,	{ .vop_readlink = zfs_readlink },
5177 	VOPNAME_INACTIVE,	{ .vop_inactive = zfs_inactive },
5178 	VOPNAME_FID,		{ .vop_fid = zfs_fid },
5179 	VOPNAME_PATHCONF,	{ .vop_pathconf = zfs_pathconf },
5180 	VOPNAME_VNEVENT,	{ .vop_vnevent = fs_vnevent_support },
5181 	NULL,			NULL
5182 };
5183 
5184 /*
5185  * special share hidden files vnode operations template
5186  */
5187 vnodeops_t *zfs_sharevnodeops;
5188 const fs_operation_def_t zfs_sharevnodeops_template[] = {
5189 	VOPNAME_GETATTR,	{ .vop_getattr = zfs_getattr },
5190 	VOPNAME_ACCESS,		{ .vop_access = zfs_access },
5191 	VOPNAME_INACTIVE,	{ .vop_inactive = zfs_inactive },
5192 	VOPNAME_FID,		{ .vop_fid = zfs_fid },
5193 	VOPNAME_PATHCONF,	{ .vop_pathconf = zfs_pathconf },
5194 	VOPNAME_GETSECATTR,	{ .vop_getsecattr = zfs_getsecattr },
5195 	VOPNAME_SETSECATTR,	{ .vop_setsecattr = zfs_setsecattr },
5196 	VOPNAME_VNEVENT,	{ .vop_vnevent = fs_vnevent_support },
5197 	NULL,			NULL
5198 };
5199 
5200 /*
5201  * Extended attribute directory vnode operations template
5202  *
5203  * This template is identical to the directory vnodes
5204  * operation template except for restricted operations:
5205  *	VOP_MKDIR()
5206  *	VOP_SYMLINK()
5207  *
5208  * Note that there are other restrictions embedded in:
5209  *	zfs_create()	- restrict type to VREG
5210  *	zfs_link()	- no links into/out of attribute space
5211  *	zfs_rename()	- no moves into/out of attribute space
5212  */
5213 vnodeops_t *zfs_xdvnodeops;
5214 const fs_operation_def_t zfs_xdvnodeops_template[] = {
5215 	VOPNAME_OPEN,		{ .vop_open = zfs_open },
5216 	VOPNAME_CLOSE,		{ .vop_close = zfs_close },
5217 	VOPNAME_IOCTL,		{ .vop_ioctl = zfs_ioctl },
5218 	VOPNAME_GETATTR,	{ .vop_getattr = zfs_getattr },
5219 	VOPNAME_SETATTR,	{ .vop_setattr = zfs_setattr },
5220 	VOPNAME_ACCESS,		{ .vop_access = zfs_access },
5221 	VOPNAME_LOOKUP,		{ .vop_lookup = zfs_lookup },
5222 	VOPNAME_CREATE,		{ .vop_create = zfs_create },
5223 	VOPNAME_REMOVE,		{ .vop_remove = zfs_remove },
5224 	VOPNAME_LINK,		{ .vop_link = zfs_link },
5225 	VOPNAME_RENAME,		{ .vop_rename = zfs_rename },
5226 	VOPNAME_MKDIR,		{ .error = zfs_inval },
5227 	VOPNAME_RMDIR,		{ .vop_rmdir = zfs_rmdir },
5228 	VOPNAME_READDIR,	{ .vop_readdir = zfs_readdir },
5229 	VOPNAME_SYMLINK,	{ .error = zfs_inval },
5230 	VOPNAME_FSYNC,		{ .vop_fsync = zfs_fsync },
5231 	VOPNAME_INACTIVE,	{ .vop_inactive = zfs_inactive },
5232 	VOPNAME_FID,		{ .vop_fid = zfs_fid },
5233 	VOPNAME_SEEK,		{ .vop_seek = zfs_seek },
5234 	VOPNAME_PATHCONF,	{ .vop_pathconf = zfs_pathconf },
5235 	VOPNAME_GETSECATTR,	{ .vop_getsecattr = zfs_getsecattr },
5236 	VOPNAME_SETSECATTR,	{ .vop_setsecattr = zfs_setsecattr },
5237 	VOPNAME_VNEVENT,	{ .vop_vnevent = fs_vnevent_support },
5238 	NULL,			NULL
5239 };
5240 
5241 /*
5242  * Error vnode operations template
5243  */
5244 vnodeops_t *zfs_evnodeops;
5245 const fs_operation_def_t zfs_evnodeops_template[] = {
5246 	VOPNAME_INACTIVE,	{ .vop_inactive = zfs_inactive },
5247 	VOPNAME_PATHCONF,	{ .vop_pathconf = zfs_pathconf },
5248 	NULL,			NULL
5249 };
5250