1 /*
2  * CDDL HEADER START
3  *
4  * The contents of this file are subject to the terms of the
5  * Common Development and Distribution License (the "License").
6  * You may not use this file except in compliance with the License.
7  *
8  * You can obtain a copy of the license at usr/src/OPENSOLARIS.LICENSE
9  * or https://opensource.org/licenses/CDDL-1.0.
10  * See the License for the specific language governing permissions
11  * and limitations under the License.
12  *
13  * When distributing Covered Code, include this CDDL HEADER in each
14  * file and include the License file at usr/src/OPENSOLARIS.LICENSE.
15  * If applicable, add the following below this CDDL HEADER, with the
16  * fields enclosed by brackets "[]" replaced with your own identifying
17  * information: Portions Copyright [yyyy] [name of copyright owner]
18  *
19  * CDDL HEADER END
20  */
21 /*
22  * Copyright (c) 2005, 2010, Oracle and/or its affiliates. All rights reserved.
23  * Copyright (c) 2012, 2014 by Delphix. All rights reserved.
24  * Copyright (c) 2014 Integros [integros.com]
25  */
26 
27 /* Portions Copyright 2007 Jeremy Teo */
28 /* Portions Copyright 2011 Martin Matuska <mm@FreeBSD.org> */
29 
30 #ifdef _KERNEL
31 #include <sys/types.h>
32 #include <sys/param.h>
33 #include <sys/time.h>
34 #include <sys/systm.h>
35 #include <sys/sysmacros.h>
36 #include <sys/resource.h>
37 #include <sys/resourcevar.h>
38 #include <sys/mntent.h>
39 #include <sys/u8_textprep.h>
40 #include <sys/dsl_dataset.h>
41 #include <sys/vfs.h>
42 #include <sys/vnode.h>
43 #include <sys/file.h>
44 #include <sys/kmem.h>
45 #include <sys/errno.h>
46 #include <sys/unistd.h>
47 #include <sys/atomic.h>
48 #include <sys/zfs_dir.h>
49 #include <sys/zfs_acl.h>
50 #include <sys/zfs_ioctl.h>
51 #include <sys/zfs_rlock.h>
52 #include <sys/zfs_fuid.h>
53 #include <sys/dnode.h>
54 #include <sys/fs/zfs.h>
55 #endif /* _KERNEL */
56 
57 #include <sys/dmu.h>
58 #include <sys/dmu_objset.h>
59 #include <sys/dmu_tx.h>
60 #include <sys/zfs_refcount.h>
61 #include <sys/stat.h>
62 #include <sys/zap.h>
63 #include <sys/zfs_znode.h>
64 #include <sys/sa.h>
65 #include <sys/zfs_sa.h>
66 #include <sys/zfs_stat.h>
67 
68 #include "zfs_prop.h"
69 #include "zfs_comutil.h"
70 
71 /* Used by fstat(1). */
72 SYSCTL_INT(_debug_sizeof, OID_AUTO, znode, CTLFLAG_RD,
73 	SYSCTL_NULL_INT_PTR, sizeof (znode_t), "sizeof(znode_t)");
74 
75 /*
76  * Define ZNODE_STATS to turn on statistic gathering. By default, it is only
77  * turned on when DEBUG is also defined.
78  */
79 #ifdef	ZFS_DEBUG
80 #define	ZNODE_STATS
81 #endif	/* DEBUG */
82 
83 #ifdef	ZNODE_STATS
84 #define	ZNODE_STAT_ADD(stat)			((stat)++)
85 #else
86 #define	ZNODE_STAT_ADD(stat)			/* nothing */
87 #endif	/* ZNODE_STATS */
88 
89 /*
90  * Functions needed for userland (ie: libzpool) are not put under
91  * #ifdef_KERNEL; the rest of the functions have dependencies
92  * (such as VFS logic) that will not compile easily in userland.
93  */
94 #ifdef _KERNEL
95 #if !defined(KMEM_DEBUG) && __FreeBSD_version >= 1300102
96 #define	_ZFS_USE_SMR
97 static uma_zone_t znode_uma_zone;
98 #else
99 static kmem_cache_t *znode_cache = NULL;
100 #endif
101 
102 extern struct vop_vector zfs_vnodeops;
103 extern struct vop_vector zfs_fifoops;
104 extern struct vop_vector zfs_shareops;
105 
106 
107 /*
108  * This callback is invoked when acquiring a RL_WRITER or RL_APPEND lock on
109  * z_rangelock. It will modify the offset and length of the lock to reflect
110  * znode-specific information, and convert RL_APPEND to RL_WRITER.  This is
111  * called with the rangelock_t's rl_lock held, which avoids races.
112  */
113 static void
114 zfs_rangelock_cb(zfs_locked_range_t *new, void *arg)
115 {
116 	znode_t *zp = arg;
117 
118 	/*
119 	 * If in append mode, convert to writer and lock starting at the
120 	 * current end of file.
121 	 */
122 	if (new->lr_type == RL_APPEND) {
123 		new->lr_offset = zp->z_size;
124 		new->lr_type = RL_WRITER;
125 	}
126 
127 	/*
128 	 * If we need to grow the block size then lock the whole file range.
129 	 */
130 	uint64_t end_size = MAX(zp->z_size, new->lr_offset + new->lr_length);
131 	if (end_size > zp->z_blksz && (!ISP2(zp->z_blksz) ||
132 	    zp->z_blksz < ZTOZSB(zp)->z_max_blksz)) {
133 		new->lr_offset = 0;
134 		new->lr_length = UINT64_MAX;
135 	}
136 }
137 
138 static int
139 zfs_znode_cache_constructor(void *buf, void *arg, int kmflags)
140 {
141 	znode_t *zp = buf;
142 
143 	POINTER_INVALIDATE(&zp->z_zfsvfs);
144 
145 	list_link_init(&zp->z_link_node);
146 
147 	mutex_init(&zp->z_lock, NULL, MUTEX_DEFAULT, NULL);
148 	mutex_init(&zp->z_acl_lock, NULL, MUTEX_DEFAULT, NULL);
149 	rw_init(&zp->z_xattr_lock, NULL, RW_DEFAULT, NULL);
150 
151 	zfs_rangelock_init(&zp->z_rangelock, zfs_rangelock_cb, zp);
152 
153 	zp->z_acl_cached = NULL;
154 	zp->z_xattr_cached = NULL;
155 	zp->z_xattr_parent = 0;
156 	zp->z_vnode = NULL;
157 	zp->z_sync_writes_cnt = 0;
158 	zp->z_async_writes_cnt = 0;
159 
160 	return (0);
161 }
162 
163 static void
164 zfs_znode_cache_destructor(void *buf, void *arg)
165 {
166 	(void) arg;
167 	znode_t *zp = buf;
168 
169 	ASSERT(!POINTER_IS_VALID(zp->z_zfsvfs));
170 	ASSERT3P(zp->z_vnode, ==, NULL);
171 	ASSERT(!list_link_active(&zp->z_link_node));
172 	mutex_destroy(&zp->z_lock);
173 	mutex_destroy(&zp->z_acl_lock);
174 	rw_destroy(&zp->z_xattr_lock);
175 	zfs_rangelock_fini(&zp->z_rangelock);
176 
177 	ASSERT3P(zp->z_acl_cached, ==, NULL);
178 	ASSERT3P(zp->z_xattr_cached, ==, NULL);
179 
180 	ASSERT0(atomic_load_32(&zp->z_sync_writes_cnt));
181 	ASSERT0(atomic_load_32(&zp->z_async_writes_cnt));
182 }
183 
184 
185 #ifdef _ZFS_USE_SMR
186 VFS_SMR_DECLARE;
187 
188 static int
189 zfs_znode_cache_constructor_smr(void *mem, int size __unused, void *private,
190     int flags)
191 {
192 	return (zfs_znode_cache_constructor(mem, private, flags));
193 }
194 
195 static void
196 zfs_znode_cache_destructor_smr(void *mem, int size __unused, void *private)
197 {
198 	zfs_znode_cache_destructor(mem, private);
199 }
200 
201 void
202 zfs_znode_init(void)
203 {
204 	/*
205 	 * Initialize zcache
206 	 */
207 	ASSERT3P(znode_uma_zone, ==, NULL);
208 	znode_uma_zone = uma_zcreate("zfs_znode_cache",
209 	    sizeof (znode_t), zfs_znode_cache_constructor_smr,
210 	    zfs_znode_cache_destructor_smr, NULL, NULL, 0, 0);
211 	VFS_SMR_ZONE_SET(znode_uma_zone);
212 }
213 
214 static znode_t *
215 zfs_znode_alloc_kmem(int flags)
216 {
217 	return (uma_zalloc_smr(znode_uma_zone, flags));
218 }
219 
220 static void
221 zfs_znode_free_kmem(znode_t *zp)
222 {
223 	if (zp->z_xattr_cached) {
224 		nvlist_free(zp->z_xattr_cached);
225 		zp->z_xattr_cached = NULL;
226 	}
227 	uma_zfree_smr(znode_uma_zone, zp);
228 }
229 #else
230 void
231 zfs_znode_init(void)
232 {
233 	/*
234 	 * Initialize zcache
235 	 */
236 	ASSERT3P(znode_cache, ==, NULL);
237 	znode_cache = kmem_cache_create("zfs_znode_cache",
238 	    sizeof (znode_t), 0, zfs_znode_cache_constructor,
239 	    zfs_znode_cache_destructor, NULL, NULL, NULL, 0);
240 }
241 
242 static znode_t *
243 zfs_znode_alloc_kmem(int flags)
244 {
245 	return (kmem_cache_alloc(znode_cache, flags));
246 }
247 
248 static void
249 zfs_znode_free_kmem(znode_t *zp)
250 {
251 	if (zp->z_xattr_cached) {
252 		nvlist_free(zp->z_xattr_cached);
253 		zp->z_xattr_cached = NULL;
254 	}
255 	kmem_cache_free(znode_cache, zp);
256 }
257 #endif
258 
259 void
260 zfs_znode_fini(void)
261 {
262 	/*
263 	 * Cleanup zcache
264 	 */
265 #ifdef _ZFS_USE_SMR
266 	if (znode_uma_zone) {
267 		uma_zdestroy(znode_uma_zone);
268 		znode_uma_zone = NULL;
269 	}
270 #else
271 	if (znode_cache) {
272 		kmem_cache_destroy(znode_cache);
273 		znode_cache = NULL;
274 	}
275 #endif
276 }
277 
278 
279 static int
280 zfs_create_share_dir(zfsvfs_t *zfsvfs, dmu_tx_t *tx)
281 {
282 	zfs_acl_ids_t acl_ids;
283 	vattr_t vattr;
284 	znode_t *sharezp;
285 	znode_t *zp;
286 	int error;
287 
288 	vattr.va_mask = AT_MODE|AT_UID|AT_GID;
289 	vattr.va_type = VDIR;
290 	vattr.va_mode = S_IFDIR|0555;
291 	vattr.va_uid = crgetuid(kcred);
292 	vattr.va_gid = crgetgid(kcred);
293 
294 	sharezp = zfs_znode_alloc_kmem(KM_SLEEP);
295 	ASSERT(!POINTER_IS_VALID(sharezp->z_zfsvfs));
296 	sharezp->z_unlinked = 0;
297 	sharezp->z_atime_dirty = 0;
298 	sharezp->z_zfsvfs = zfsvfs;
299 	sharezp->z_is_sa = zfsvfs->z_use_sa;
300 
301 	VERIFY0(zfs_acl_ids_create(sharezp, IS_ROOT_NODE, &vattr,
302 	    kcred, NULL, &acl_ids, NULL));
303 	zfs_mknode(sharezp, &vattr, tx, kcred, IS_ROOT_NODE, &zp, &acl_ids);
304 	ASSERT3P(zp, ==, sharezp);
305 	POINTER_INVALIDATE(&sharezp->z_zfsvfs);
306 	error = zap_add(zfsvfs->z_os, MASTER_NODE_OBJ,
307 	    ZFS_SHARES_DIR, 8, 1, &sharezp->z_id, tx);
308 	zfsvfs->z_shares_dir = sharezp->z_id;
309 
310 	zfs_acl_ids_free(&acl_ids);
311 	sa_handle_destroy(sharezp->z_sa_hdl);
312 	zfs_znode_free_kmem(sharezp);
313 
314 	return (error);
315 }
316 
317 /*
318  * define a couple of values we need available
319  * for both 64 and 32 bit environments.
320  */
321 #ifndef NBITSMINOR64
322 #define	NBITSMINOR64	32
323 #endif
324 #ifndef MAXMAJ64
325 #define	MAXMAJ64	0xffffffffUL
326 #endif
327 #ifndef	MAXMIN64
328 #define	MAXMIN64	0xffffffffUL
329 #endif
330 
331 /*
332  * Create special expldev for ZFS private use.
333  * Can't use standard expldev since it doesn't do
334  * what we want.  The standard expldev() takes a
335  * dev32_t in LP64 and expands it to a long dev_t.
336  * We need an interface that takes a dev32_t in ILP32
337  * and expands it to a long dev_t.
338  */
339 static uint64_t
340 zfs_expldev(dev_t dev)
341 {
342 	return (((uint64_t)major(dev) << NBITSMINOR64) | minor(dev));
343 }
344 /*
345  * Special cmpldev for ZFS private use.
346  * Can't use standard cmpldev since it takes
347  * a long dev_t and compresses it to dev32_t in
348  * LP64.  We need to do a compaction of a long dev_t
349  * to a dev32_t in ILP32.
350  */
351 dev_t
352 zfs_cmpldev(uint64_t dev)
353 {
354 	return (makedev((dev >> NBITSMINOR64), (dev & MAXMIN64)));
355 }
356 
357 static void
358 zfs_znode_sa_init(zfsvfs_t *zfsvfs, znode_t *zp,
359     dmu_buf_t *db, dmu_object_type_t obj_type, sa_handle_t *sa_hdl)
360 {
361 	ASSERT(!POINTER_IS_VALID(zp->z_zfsvfs) || (zfsvfs == zp->z_zfsvfs));
362 	ASSERT(MUTEX_HELD(ZFS_OBJ_MUTEX(zfsvfs, zp->z_id)));
363 
364 	ASSERT3P(zp->z_sa_hdl, ==, NULL);
365 	ASSERT3P(zp->z_acl_cached, ==, NULL);
366 	if (sa_hdl == NULL) {
367 		VERIFY0(sa_handle_get_from_db(zfsvfs->z_os, db, zp,
368 		    SA_HDL_SHARED, &zp->z_sa_hdl));
369 	} else {
370 		zp->z_sa_hdl = sa_hdl;
371 		sa_set_userp(sa_hdl, zp);
372 	}
373 
374 	zp->z_is_sa = (obj_type == DMU_OT_SA) ? B_TRUE : B_FALSE;
375 
376 	/*
377 	 * Slap on VROOT if we are the root znode unless we are the root
378 	 * node of a snapshot mounted under .zfs.
379 	 */
380 	if (zp->z_id == zfsvfs->z_root && zfsvfs->z_parent == zfsvfs)
381 		ZTOV(zp)->v_flag |= VROOT;
382 
383 	vn_exists(ZTOV(zp));
384 }
385 
386 void
387 zfs_znode_dmu_fini(znode_t *zp)
388 {
389 	ASSERT(MUTEX_HELD(ZFS_OBJ_MUTEX(zp->z_zfsvfs, zp->z_id)) ||
390 	    ZFS_TEARDOWN_INACTIVE_WRITE_HELD(zp->z_zfsvfs));
391 
392 	sa_handle_destroy(zp->z_sa_hdl);
393 	zp->z_sa_hdl = NULL;
394 }
395 
396 static void
397 zfs_vnode_forget(vnode_t *vp)
398 {
399 
400 	/* copied from insmntque_stddtr */
401 	vp->v_data = NULL;
402 	vp->v_op = &dead_vnodeops;
403 	vgone(vp);
404 	vput(vp);
405 }
406 
407 /*
408  * Construct a new znode/vnode and initialize.
409  *
410  * This does not do a call to dmu_set_user() that is
411  * up to the caller to do, in case you don't want to
412  * return the znode
413  */
414 static znode_t *
415 zfs_znode_alloc(zfsvfs_t *zfsvfs, dmu_buf_t *db, int blksz,
416     dmu_object_type_t obj_type, sa_handle_t *hdl)
417 {
418 	znode_t	*zp;
419 	vnode_t *vp;
420 	uint64_t mode;
421 	uint64_t parent;
422 #ifdef notyet
423 	uint64_t mtime[2], ctime[2];
424 #endif
425 	uint64_t projid = ZFS_DEFAULT_PROJID;
426 	sa_bulk_attr_t bulk[9];
427 	int count = 0;
428 	int error;
429 
430 	zp = zfs_znode_alloc_kmem(KM_SLEEP);
431 
432 #ifndef _ZFS_USE_SMR
433 	KASSERT((zfsvfs->z_parent->z_vfs->mnt_kern_flag & MNTK_FPLOOKUP) == 0,
434 	    ("%s: fast path lookup enabled without smr", __func__));
435 #endif
436 
437 #if __FreeBSD_version >= 1300076
438 	KASSERT(curthread->td_vp_reserved != NULL,
439 	    ("zfs_znode_alloc: getnewvnode without any vnodes reserved"));
440 #else
441 	KASSERT(curthread->td_vp_reserv > 0,
442 	    ("zfs_znode_alloc: getnewvnode without any vnodes reserved"));
443 #endif
444 	error = getnewvnode("zfs", zfsvfs->z_parent->z_vfs, &zfs_vnodeops, &vp);
445 	if (error != 0) {
446 		zfs_znode_free_kmem(zp);
447 		return (NULL);
448 	}
449 	zp->z_vnode = vp;
450 	vp->v_data = zp;
451 
452 	/*
453 	 * Acquire the vnode lock before any possible interaction with the
454 	 * outside world.  Specifically, there is an error path that calls
455 	 * zfs_vnode_forget() and the vnode should be exclusively locked.
456 	 */
457 	vn_lock(vp, LK_EXCLUSIVE | LK_RETRY);
458 
459 	ASSERT(!POINTER_IS_VALID(zp->z_zfsvfs));
460 
461 	zp->z_sa_hdl = NULL;
462 	zp->z_unlinked = 0;
463 	zp->z_atime_dirty = 0;
464 	zp->z_mapcnt = 0;
465 	zp->z_id = db->db_object;
466 	zp->z_blksz = blksz;
467 	zp->z_seq = 0x7A4653;
468 	zp->z_sync_cnt = 0;
469 	zp->z_sync_writes_cnt = 0;
470 	zp->z_async_writes_cnt = 0;
471 #if __FreeBSD_version >= 1300139
472 	atomic_store_ptr(&zp->z_cached_symlink, NULL);
473 #endif
474 
475 	zfs_znode_sa_init(zfsvfs, zp, db, obj_type, hdl);
476 
477 	SA_ADD_BULK_ATTR(bulk, count, SA_ZPL_MODE(zfsvfs), NULL, &mode, 8);
478 	SA_ADD_BULK_ATTR(bulk, count, SA_ZPL_GEN(zfsvfs), NULL, &zp->z_gen, 8);
479 	SA_ADD_BULK_ATTR(bulk, count, SA_ZPL_SIZE(zfsvfs), NULL,
480 	    &zp->z_size, 8);
481 	SA_ADD_BULK_ATTR(bulk, count, SA_ZPL_LINKS(zfsvfs), NULL,
482 	    &zp->z_links, 8);
483 	SA_ADD_BULK_ATTR(bulk, count, SA_ZPL_FLAGS(zfsvfs), NULL,
484 	    &zp->z_pflags, 8);
485 	SA_ADD_BULK_ATTR(bulk, count, SA_ZPL_PARENT(zfsvfs), NULL, &parent, 8);
486 	SA_ADD_BULK_ATTR(bulk, count, SA_ZPL_ATIME(zfsvfs), NULL,
487 	    &zp->z_atime, 16);
488 #ifdef notyet
489 	SA_ADD_BULK_ATTR(bulk, count, SA_ZPL_MTIME(zfsvfs), NULL,
490 	    &mtime, 16);
491 	SA_ADD_BULK_ATTR(bulk, count, SA_ZPL_CTIME(zfsvfs), NULL,
492 	    &ctime, 16);
493 #endif
494 	SA_ADD_BULK_ATTR(bulk, count, SA_ZPL_UID(zfsvfs), NULL,
495 	    &zp->z_uid, 8);
496 	SA_ADD_BULK_ATTR(bulk, count, SA_ZPL_GID(zfsvfs), NULL,
497 	    &zp->z_gid, 8);
498 
499 	if (sa_bulk_lookup(zp->z_sa_hdl, bulk, count) != 0 || zp->z_gen == 0 ||
500 	    (dmu_objset_projectquota_enabled(zfsvfs->z_os) &&
501 	    (zp->z_pflags & ZFS_PROJID) &&
502 	    sa_lookup(zp->z_sa_hdl, SA_ZPL_PROJID(zfsvfs), &projid, 8) != 0)) {
503 		if (hdl == NULL)
504 			sa_handle_destroy(zp->z_sa_hdl);
505 		zfs_vnode_forget(vp);
506 		zp->z_vnode = NULL;
507 		zfs_znode_free_kmem(zp);
508 		return (NULL);
509 	}
510 
511 	zp->z_projid = projid;
512 	zp->z_mode = mode;
513 
514 	/* Cache the xattr parent id */
515 	if (zp->z_pflags & ZFS_XATTR)
516 		zp->z_xattr_parent = parent;
517 
518 	vp->v_type = IFTOVT((mode_t)mode);
519 
520 	switch (vp->v_type) {
521 	case VDIR:
522 		zp->z_zn_prefetch = B_TRUE; /* z_prefetch default is enabled */
523 		break;
524 	case VFIFO:
525 		vp->v_op = &zfs_fifoops;
526 		break;
527 	case VREG:
528 		if (parent == zfsvfs->z_shares_dir) {
529 			ASSERT0(zp->z_uid);
530 			ASSERT0(zp->z_gid);
531 			vp->v_op = &zfs_shareops;
532 		}
533 		break;
534 	default:
535 			break;
536 	}
537 
538 	mutex_enter(&zfsvfs->z_znodes_lock);
539 	list_insert_tail(&zfsvfs->z_all_znodes, zp);
540 	zfsvfs->z_nr_znodes++;
541 	zp->z_zfsvfs = zfsvfs;
542 	mutex_exit(&zfsvfs->z_znodes_lock);
543 
544 #if __FreeBSD_version >= 1400077
545 	vn_set_state(vp, VSTATE_CONSTRUCTED);
546 #endif
547 	VN_LOCK_AREC(vp);
548 	if (vp->v_type != VFIFO)
549 		VN_LOCK_ASHARE(vp);
550 
551 	return (zp);
552 }
553 
554 static uint64_t empty_xattr;
555 static uint64_t pad[4];
556 static zfs_acl_phys_t acl_phys;
557 /*
558  * Create a new DMU object to hold a zfs znode.
559  *
560  *	IN:	dzp	- parent directory for new znode
561  *		vap	- file attributes for new znode
562  *		tx	- dmu transaction id for zap operations
563  *		cr	- credentials of caller
564  *		flag	- flags:
565  *			  IS_ROOT_NODE	- new object will be root
566  *			  IS_XATTR	- new object is an attribute
567  *		bonuslen - length of bonus buffer
568  *		setaclp  - File/Dir initial ACL
569  *		fuidp	 - Tracks fuid allocation.
570  *
571  *	OUT:	zpp	- allocated znode
572  *
573  */
574 void
575 zfs_mknode(znode_t *dzp, vattr_t *vap, dmu_tx_t *tx, cred_t *cr,
576     uint_t flag, znode_t **zpp, zfs_acl_ids_t *acl_ids)
577 {
578 	uint64_t	crtime[2], atime[2], mtime[2], ctime[2];
579 	uint64_t	mode, size, links, parent, pflags;
580 	uint64_t	dzp_pflags = 0;
581 	uint64_t	rdev = 0;
582 	zfsvfs_t	*zfsvfs = dzp->z_zfsvfs;
583 	dmu_buf_t	*db;
584 	timestruc_t	now;
585 	uint64_t	gen, obj;
586 	int		bonuslen;
587 	int		dnodesize;
588 	sa_handle_t	*sa_hdl;
589 	dmu_object_type_t obj_type;
590 	sa_bulk_attr_t	*sa_attrs;
591 	int		cnt = 0;
592 	zfs_acl_locator_cb_t locate = { 0 };
593 
594 	ASSERT3P(vap, !=, NULL);
595 	ASSERT3U((vap->va_mask & AT_MODE), ==, AT_MODE);
596 
597 	if (zfsvfs->z_replay) {
598 		obj = vap->va_nodeid;
599 		now = vap->va_ctime;		/* see zfs_replay_create() */
600 		gen = vap->va_nblocks;		/* ditto */
601 		dnodesize = vap->va_fsid;	/* ditto */
602 	} else {
603 		obj = 0;
604 		vfs_timestamp(&now);
605 		gen = dmu_tx_get_txg(tx);
606 		dnodesize = dmu_objset_dnodesize(zfsvfs->z_os);
607 	}
608 
609 	if (dnodesize == 0)
610 		dnodesize = DNODE_MIN_SIZE;
611 
612 	obj_type = zfsvfs->z_use_sa ? DMU_OT_SA : DMU_OT_ZNODE;
613 	bonuslen = (obj_type == DMU_OT_SA) ?
614 	    DN_BONUS_SIZE(dnodesize) : ZFS_OLD_ZNODE_PHYS_SIZE;
615 
616 	/*
617 	 * Create a new DMU object.
618 	 */
619 	/*
620 	 * There's currently no mechanism for pre-reading the blocks that will
621 	 * be needed to allocate a new object, so we accept the small chance
622 	 * that there will be an i/o error and we will fail one of the
623 	 * assertions below.
624 	 */
625 	if (vap->va_type == VDIR) {
626 		if (zfsvfs->z_replay) {
627 			VERIFY0(zap_create_claim_norm_dnsize(zfsvfs->z_os, obj,
628 			    zfsvfs->z_norm, DMU_OT_DIRECTORY_CONTENTS,
629 			    obj_type, bonuslen, dnodesize, tx));
630 		} else {
631 			obj = zap_create_norm_dnsize(zfsvfs->z_os,
632 			    zfsvfs->z_norm, DMU_OT_DIRECTORY_CONTENTS,
633 			    obj_type, bonuslen, dnodesize, tx);
634 		}
635 	} else {
636 		if (zfsvfs->z_replay) {
637 			VERIFY0(dmu_object_claim_dnsize(zfsvfs->z_os, obj,
638 			    DMU_OT_PLAIN_FILE_CONTENTS, 0,
639 			    obj_type, bonuslen, dnodesize, tx));
640 		} else {
641 			obj = dmu_object_alloc_dnsize(zfsvfs->z_os,
642 			    DMU_OT_PLAIN_FILE_CONTENTS, 0,
643 			    obj_type, bonuslen, dnodesize, tx);
644 		}
645 	}
646 
647 	ZFS_OBJ_HOLD_ENTER(zfsvfs, obj);
648 	VERIFY0(sa_buf_hold(zfsvfs->z_os, obj, NULL, &db));
649 
650 	/*
651 	 * If this is the root, fix up the half-initialized parent pointer
652 	 * to reference the just-allocated physical data area.
653 	 */
654 	if (flag & IS_ROOT_NODE) {
655 		dzp->z_id = obj;
656 	} else {
657 		dzp_pflags = dzp->z_pflags;
658 	}
659 
660 	/*
661 	 * If parent is an xattr, so am I.
662 	 */
663 	if (dzp_pflags & ZFS_XATTR) {
664 		flag |= IS_XATTR;
665 	}
666 
667 	if (zfsvfs->z_use_fuids)
668 		pflags = ZFS_ARCHIVE | ZFS_AV_MODIFIED;
669 	else
670 		pflags = 0;
671 
672 	if (vap->va_type == VDIR) {
673 		size = 2;		/* contents ("." and "..") */
674 		links = (flag & (IS_ROOT_NODE | IS_XATTR)) ? 2 : 1;
675 	} else {
676 		size = links = 0;
677 	}
678 
679 	if (vap->va_type == VBLK || vap->va_type == VCHR) {
680 		rdev = zfs_expldev(vap->va_rdev);
681 	}
682 
683 	parent = dzp->z_id;
684 	mode = acl_ids->z_mode;
685 	if (flag & IS_XATTR)
686 		pflags |= ZFS_XATTR;
687 
688 	/*
689 	 * No execs denied will be determined when zfs_mode_compute() is called.
690 	 */
691 	pflags |= acl_ids->z_aclp->z_hints &
692 	    (ZFS_ACL_TRIVIAL|ZFS_INHERIT_ACE|ZFS_ACL_AUTO_INHERIT|
693 	    ZFS_ACL_DEFAULTED|ZFS_ACL_PROTECTED);
694 
695 	ZFS_TIME_ENCODE(&now, crtime);
696 	ZFS_TIME_ENCODE(&now, ctime);
697 
698 	if (vap->va_mask & AT_ATIME) {
699 		ZFS_TIME_ENCODE(&vap->va_atime, atime);
700 	} else {
701 		ZFS_TIME_ENCODE(&now, atime);
702 	}
703 
704 	if (vap->va_mask & AT_MTIME) {
705 		ZFS_TIME_ENCODE(&vap->va_mtime, mtime);
706 	} else {
707 		ZFS_TIME_ENCODE(&now, mtime);
708 	}
709 
710 	/* Now add in all of the "SA" attributes */
711 	VERIFY0(sa_handle_get_from_db(zfsvfs->z_os, db, NULL, SA_HDL_SHARED,
712 	    &sa_hdl));
713 
714 	/*
715 	 * Setup the array of attributes to be replaced/set on the new file
716 	 *
717 	 * order for  DMU_OT_ZNODE is critical since it needs to be constructed
718 	 * in the old znode_phys_t format.  Don't change this ordering
719 	 */
720 	sa_attrs = kmem_alloc(sizeof (sa_bulk_attr_t) * ZPL_END, KM_SLEEP);
721 
722 	if (obj_type == DMU_OT_ZNODE) {
723 		SA_ADD_BULK_ATTR(sa_attrs, cnt, SA_ZPL_ATIME(zfsvfs),
724 		    NULL, &atime, 16);
725 		SA_ADD_BULK_ATTR(sa_attrs, cnt, SA_ZPL_MTIME(zfsvfs),
726 		    NULL, &mtime, 16);
727 		SA_ADD_BULK_ATTR(sa_attrs, cnt, SA_ZPL_CTIME(zfsvfs),
728 		    NULL, &ctime, 16);
729 		SA_ADD_BULK_ATTR(sa_attrs, cnt, SA_ZPL_CRTIME(zfsvfs),
730 		    NULL, &crtime, 16);
731 		SA_ADD_BULK_ATTR(sa_attrs, cnt, SA_ZPL_GEN(zfsvfs),
732 		    NULL, &gen, 8);
733 		SA_ADD_BULK_ATTR(sa_attrs, cnt, SA_ZPL_MODE(zfsvfs),
734 		    NULL, &mode, 8);
735 		SA_ADD_BULK_ATTR(sa_attrs, cnt, SA_ZPL_SIZE(zfsvfs),
736 		    NULL, &size, 8);
737 		SA_ADD_BULK_ATTR(sa_attrs, cnt, SA_ZPL_PARENT(zfsvfs),
738 		    NULL, &parent, 8);
739 	} else {
740 		SA_ADD_BULK_ATTR(sa_attrs, cnt, SA_ZPL_MODE(zfsvfs),
741 		    NULL, &mode, 8);
742 		SA_ADD_BULK_ATTR(sa_attrs, cnt, SA_ZPL_SIZE(zfsvfs),
743 		    NULL, &size, 8);
744 		SA_ADD_BULK_ATTR(sa_attrs, cnt, SA_ZPL_GEN(zfsvfs),
745 		    NULL, &gen, 8);
746 		SA_ADD_BULK_ATTR(sa_attrs, cnt, SA_ZPL_UID(zfsvfs),
747 		    NULL, &acl_ids->z_fuid, 8);
748 		SA_ADD_BULK_ATTR(sa_attrs, cnt, SA_ZPL_GID(zfsvfs),
749 		    NULL, &acl_ids->z_fgid, 8);
750 		SA_ADD_BULK_ATTR(sa_attrs, cnt, SA_ZPL_PARENT(zfsvfs),
751 		    NULL, &parent, 8);
752 		SA_ADD_BULK_ATTR(sa_attrs, cnt, SA_ZPL_FLAGS(zfsvfs),
753 		    NULL, &pflags, 8);
754 		SA_ADD_BULK_ATTR(sa_attrs, cnt, SA_ZPL_ATIME(zfsvfs),
755 		    NULL, &atime, 16);
756 		SA_ADD_BULK_ATTR(sa_attrs, cnt, SA_ZPL_MTIME(zfsvfs),
757 		    NULL, &mtime, 16);
758 		SA_ADD_BULK_ATTR(sa_attrs, cnt, SA_ZPL_CTIME(zfsvfs),
759 		    NULL, &ctime, 16);
760 		SA_ADD_BULK_ATTR(sa_attrs, cnt, SA_ZPL_CRTIME(zfsvfs),
761 		    NULL, &crtime, 16);
762 	}
763 
764 	SA_ADD_BULK_ATTR(sa_attrs, cnt, SA_ZPL_LINKS(zfsvfs), NULL, &links, 8);
765 
766 	if (obj_type == DMU_OT_ZNODE) {
767 		SA_ADD_BULK_ATTR(sa_attrs, cnt, SA_ZPL_XATTR(zfsvfs), NULL,
768 		    &empty_xattr, 8);
769 	}
770 	if (obj_type == DMU_OT_ZNODE ||
771 	    (vap->va_type == VBLK || vap->va_type == VCHR)) {
772 		SA_ADD_BULK_ATTR(sa_attrs, cnt, SA_ZPL_RDEV(zfsvfs),
773 		    NULL, &rdev, 8);
774 
775 	}
776 	if (obj_type == DMU_OT_ZNODE) {
777 		SA_ADD_BULK_ATTR(sa_attrs, cnt, SA_ZPL_FLAGS(zfsvfs),
778 		    NULL, &pflags, 8);
779 		SA_ADD_BULK_ATTR(sa_attrs, cnt, SA_ZPL_UID(zfsvfs), NULL,
780 		    &acl_ids->z_fuid, 8);
781 		SA_ADD_BULK_ATTR(sa_attrs, cnt, SA_ZPL_GID(zfsvfs), NULL,
782 		    &acl_ids->z_fgid, 8);
783 		SA_ADD_BULK_ATTR(sa_attrs, cnt, SA_ZPL_PAD(zfsvfs), NULL, pad,
784 		    sizeof (uint64_t) * 4);
785 		SA_ADD_BULK_ATTR(sa_attrs, cnt, SA_ZPL_ZNODE_ACL(zfsvfs), NULL,
786 		    &acl_phys, sizeof (zfs_acl_phys_t));
787 	} else if (acl_ids->z_aclp->z_version >= ZFS_ACL_VERSION_FUID) {
788 		SA_ADD_BULK_ATTR(sa_attrs, cnt, SA_ZPL_DACL_COUNT(zfsvfs), NULL,
789 		    &acl_ids->z_aclp->z_acl_count, 8);
790 		locate.cb_aclp = acl_ids->z_aclp;
791 		SA_ADD_BULK_ATTR(sa_attrs, cnt, SA_ZPL_DACL_ACES(zfsvfs),
792 		    zfs_acl_data_locator, &locate,
793 		    acl_ids->z_aclp->z_acl_bytes);
794 		mode = zfs_mode_compute(mode, acl_ids->z_aclp, &pflags,
795 		    acl_ids->z_fuid, acl_ids->z_fgid);
796 	}
797 
798 	VERIFY0(sa_replace_all_by_template(sa_hdl, sa_attrs, cnt, tx));
799 
800 	if (!(flag & IS_ROOT_NODE)) {
801 		*zpp = zfs_znode_alloc(zfsvfs, db, 0, obj_type, sa_hdl);
802 		ASSERT3P(*zpp, !=, NULL);
803 	} else {
804 		/*
805 		 * If we are creating the root node, the "parent" we
806 		 * passed in is the znode for the root.
807 		 */
808 		*zpp = dzp;
809 
810 		(*zpp)->z_sa_hdl = sa_hdl;
811 	}
812 
813 	(*zpp)->z_pflags = pflags;
814 	(*zpp)->z_mode = mode;
815 	(*zpp)->z_dnodesize = dnodesize;
816 
817 	if (vap->va_mask & AT_XVATTR)
818 		zfs_xvattr_set(*zpp, (xvattr_t *)vap, tx);
819 
820 	if (obj_type == DMU_OT_ZNODE ||
821 	    acl_ids->z_aclp->z_version < ZFS_ACL_VERSION_FUID) {
822 		VERIFY0(zfs_aclset_common(*zpp, acl_ids->z_aclp, cr, tx));
823 	}
824 	if (!(flag & IS_ROOT_NODE)) {
825 		vnode_t *vp = ZTOV(*zpp);
826 		vp->v_vflag |= VV_FORCEINSMQ;
827 		int err = insmntque(vp, zfsvfs->z_vfs);
828 		vp->v_vflag &= ~VV_FORCEINSMQ;
829 		(void) err;
830 		KASSERT(err == 0, ("insmntque() failed: error %d", err));
831 	}
832 	kmem_free(sa_attrs, sizeof (sa_bulk_attr_t) * ZPL_END);
833 	ZFS_OBJ_HOLD_EXIT(zfsvfs, obj);
834 }
835 
836 /*
837  * Update in-core attributes.  It is assumed the caller will be doing an
838  * sa_bulk_update to push the changes out.
839  */
840 void
841 zfs_xvattr_set(znode_t *zp, xvattr_t *xvap, dmu_tx_t *tx)
842 {
843 	xoptattr_t *xoap;
844 
845 	xoap = xva_getxoptattr(xvap);
846 	ASSERT3P(xoap, !=, NULL);
847 
848 	if (zp->z_zfsvfs->z_replay == B_FALSE) {
849 		ASSERT_VOP_IN_SEQC(ZTOV(zp));
850 	}
851 
852 	if (XVA_ISSET_REQ(xvap, XAT_CREATETIME)) {
853 		uint64_t times[2];
854 		ZFS_TIME_ENCODE(&xoap->xoa_createtime, times);
855 		(void) sa_update(zp->z_sa_hdl, SA_ZPL_CRTIME(zp->z_zfsvfs),
856 		    &times, sizeof (times), tx);
857 		XVA_SET_RTN(xvap, XAT_CREATETIME);
858 	}
859 	if (XVA_ISSET_REQ(xvap, XAT_READONLY)) {
860 		ZFS_ATTR_SET(zp, ZFS_READONLY, xoap->xoa_readonly,
861 		    zp->z_pflags, tx);
862 		XVA_SET_RTN(xvap, XAT_READONLY);
863 	}
864 	if (XVA_ISSET_REQ(xvap, XAT_HIDDEN)) {
865 		ZFS_ATTR_SET(zp, ZFS_HIDDEN, xoap->xoa_hidden,
866 		    zp->z_pflags, tx);
867 		XVA_SET_RTN(xvap, XAT_HIDDEN);
868 	}
869 	if (XVA_ISSET_REQ(xvap, XAT_SYSTEM)) {
870 		ZFS_ATTR_SET(zp, ZFS_SYSTEM, xoap->xoa_system,
871 		    zp->z_pflags, tx);
872 		XVA_SET_RTN(xvap, XAT_SYSTEM);
873 	}
874 	if (XVA_ISSET_REQ(xvap, XAT_ARCHIVE)) {
875 		ZFS_ATTR_SET(zp, ZFS_ARCHIVE, xoap->xoa_archive,
876 		    zp->z_pflags, tx);
877 		XVA_SET_RTN(xvap, XAT_ARCHIVE);
878 	}
879 	if (XVA_ISSET_REQ(xvap, XAT_IMMUTABLE)) {
880 		ZFS_ATTR_SET(zp, ZFS_IMMUTABLE, xoap->xoa_immutable,
881 		    zp->z_pflags, tx);
882 		XVA_SET_RTN(xvap, XAT_IMMUTABLE);
883 	}
884 	if (XVA_ISSET_REQ(xvap, XAT_NOUNLINK)) {
885 		ZFS_ATTR_SET(zp, ZFS_NOUNLINK, xoap->xoa_nounlink,
886 		    zp->z_pflags, tx);
887 		XVA_SET_RTN(xvap, XAT_NOUNLINK);
888 	}
889 	if (XVA_ISSET_REQ(xvap, XAT_APPENDONLY)) {
890 		ZFS_ATTR_SET(zp, ZFS_APPENDONLY, xoap->xoa_appendonly,
891 		    zp->z_pflags, tx);
892 		XVA_SET_RTN(xvap, XAT_APPENDONLY);
893 	}
894 	if (XVA_ISSET_REQ(xvap, XAT_NODUMP)) {
895 		ZFS_ATTR_SET(zp, ZFS_NODUMP, xoap->xoa_nodump,
896 		    zp->z_pflags, tx);
897 		XVA_SET_RTN(xvap, XAT_NODUMP);
898 	}
899 	if (XVA_ISSET_REQ(xvap, XAT_OPAQUE)) {
900 		ZFS_ATTR_SET(zp, ZFS_OPAQUE, xoap->xoa_opaque,
901 		    zp->z_pflags, tx);
902 		XVA_SET_RTN(xvap, XAT_OPAQUE);
903 	}
904 	if (XVA_ISSET_REQ(xvap, XAT_AV_QUARANTINED)) {
905 		ZFS_ATTR_SET(zp, ZFS_AV_QUARANTINED,
906 		    xoap->xoa_av_quarantined, zp->z_pflags, tx);
907 		XVA_SET_RTN(xvap, XAT_AV_QUARANTINED);
908 	}
909 	if (XVA_ISSET_REQ(xvap, XAT_AV_MODIFIED)) {
910 		ZFS_ATTR_SET(zp, ZFS_AV_MODIFIED, xoap->xoa_av_modified,
911 		    zp->z_pflags, tx);
912 		XVA_SET_RTN(xvap, XAT_AV_MODIFIED);
913 	}
914 	if (XVA_ISSET_REQ(xvap, XAT_AV_SCANSTAMP)) {
915 		zfs_sa_set_scanstamp(zp, xvap, tx);
916 		XVA_SET_RTN(xvap, XAT_AV_SCANSTAMP);
917 	}
918 	if (XVA_ISSET_REQ(xvap, XAT_REPARSE)) {
919 		ZFS_ATTR_SET(zp, ZFS_REPARSE, xoap->xoa_reparse,
920 		    zp->z_pflags, tx);
921 		XVA_SET_RTN(xvap, XAT_REPARSE);
922 	}
923 	if (XVA_ISSET_REQ(xvap, XAT_OFFLINE)) {
924 		ZFS_ATTR_SET(zp, ZFS_OFFLINE, xoap->xoa_offline,
925 		    zp->z_pflags, tx);
926 		XVA_SET_RTN(xvap, XAT_OFFLINE);
927 	}
928 	if (XVA_ISSET_REQ(xvap, XAT_SPARSE)) {
929 		ZFS_ATTR_SET(zp, ZFS_SPARSE, xoap->xoa_sparse,
930 		    zp->z_pflags, tx);
931 		XVA_SET_RTN(xvap, XAT_SPARSE);
932 	}
933 }
934 
935 int
936 zfs_zget(zfsvfs_t *zfsvfs, uint64_t obj_num, znode_t **zpp)
937 {
938 	dmu_object_info_t doi;
939 	dmu_buf_t	*db;
940 	znode_t		*zp;
941 	vnode_t		*vp;
942 	sa_handle_t	*hdl;
943 	int locked;
944 	int err;
945 
946 	getnewvnode_reserve_();
947 again:
948 	*zpp = NULL;
949 	ZFS_OBJ_HOLD_ENTER(zfsvfs, obj_num);
950 
951 	err = sa_buf_hold(zfsvfs->z_os, obj_num, NULL, &db);
952 	if (err) {
953 		ZFS_OBJ_HOLD_EXIT(zfsvfs, obj_num);
954 		getnewvnode_drop_reserve();
955 		return (err);
956 	}
957 
958 	dmu_object_info_from_db(db, &doi);
959 	if (doi.doi_bonus_type != DMU_OT_SA &&
960 	    (doi.doi_bonus_type != DMU_OT_ZNODE ||
961 	    (doi.doi_bonus_type == DMU_OT_ZNODE &&
962 	    doi.doi_bonus_size < sizeof (znode_phys_t)))) {
963 		sa_buf_rele(db, NULL);
964 		ZFS_OBJ_HOLD_EXIT(zfsvfs, obj_num);
965 		getnewvnode_drop_reserve();
966 		return (SET_ERROR(EINVAL));
967 	}
968 
969 	hdl = dmu_buf_get_user(db);
970 	if (hdl != NULL) {
971 		zp = sa_get_userdata(hdl);
972 
973 		/*
974 		 * Since "SA" does immediate eviction we
975 		 * should never find a sa handle that doesn't
976 		 * know about the znode.
977 		 */
978 		ASSERT3P(zp, !=, NULL);
979 		ASSERT3U(zp->z_id, ==, obj_num);
980 		if (zp->z_unlinked) {
981 			err = SET_ERROR(ENOENT);
982 		} else {
983 			vp = ZTOV(zp);
984 			/*
985 			 * Don't let the vnode disappear after
986 			 * ZFS_OBJ_HOLD_EXIT.
987 			 */
988 			VN_HOLD(vp);
989 			*zpp = zp;
990 			err = 0;
991 		}
992 
993 		sa_buf_rele(db, NULL);
994 		ZFS_OBJ_HOLD_EXIT(zfsvfs, obj_num);
995 
996 		if (err) {
997 			getnewvnode_drop_reserve();
998 			return (err);
999 		}
1000 
1001 		locked = VOP_ISLOCKED(vp);
1002 		VI_LOCK(vp);
1003 		if (VN_IS_DOOMED(vp) && locked != LK_EXCLUSIVE) {
1004 			/*
1005 			 * The vnode is doomed and this thread doesn't
1006 			 * hold the exclusive lock on it, so the vnode
1007 			 * must be being reclaimed by another thread.
1008 			 * Otherwise the doomed vnode is being reclaimed
1009 			 * by this thread and zfs_zget is called from
1010 			 * ZIL internals.
1011 			 */
1012 			VI_UNLOCK(vp);
1013 
1014 			/*
1015 			 * XXX vrele() locks the vnode when the last reference
1016 			 * is dropped.  Although in this case the vnode is
1017 			 * doomed / dead and so no inactivation is required,
1018 			 * the vnode lock is still acquired.  That could result
1019 			 * in a LOR with z_teardown_lock if another thread holds
1020 			 * the vnode's lock and tries to take z_teardown_lock.
1021 			 * But that is only possible if the other thread peforms
1022 			 * a ZFS vnode operation on the vnode.  That either
1023 			 * should not happen if the vnode is dead or the thread
1024 			 * should also have a reference to the vnode and thus
1025 			 * our reference is not last.
1026 			 */
1027 			VN_RELE(vp);
1028 			goto again;
1029 		}
1030 		VI_UNLOCK(vp);
1031 		getnewvnode_drop_reserve();
1032 		return (err);
1033 	}
1034 
1035 	/*
1036 	 * Not found create new znode/vnode
1037 	 * but only if file exists.
1038 	 *
1039 	 * There is a small window where zfs_vget() could
1040 	 * find this object while a file create is still in
1041 	 * progress.  This is checked for in zfs_znode_alloc()
1042 	 *
1043 	 * if zfs_znode_alloc() fails it will drop the hold on the
1044 	 * bonus buffer.
1045 	 */
1046 	zp = zfs_znode_alloc(zfsvfs, db, doi.doi_data_block_size,
1047 	    doi.doi_bonus_type, NULL);
1048 	if (zp == NULL) {
1049 		err = SET_ERROR(ENOENT);
1050 	} else {
1051 		*zpp = zp;
1052 	}
1053 	if (err == 0) {
1054 		vnode_t *vp = ZTOV(zp);
1055 
1056 		err = insmntque(vp, zfsvfs->z_vfs);
1057 		if (err == 0) {
1058 			vp->v_hash = obj_num;
1059 			VOP_UNLOCK1(vp);
1060 		} else {
1061 			zp->z_vnode = NULL;
1062 			zfs_znode_dmu_fini(zp);
1063 			zfs_znode_free(zp);
1064 			*zpp = NULL;
1065 		}
1066 	}
1067 	ZFS_OBJ_HOLD_EXIT(zfsvfs, obj_num);
1068 	getnewvnode_drop_reserve();
1069 	return (err);
1070 }
1071 
1072 int
1073 zfs_rezget(znode_t *zp)
1074 {
1075 	zfsvfs_t *zfsvfs = zp->z_zfsvfs;
1076 	dmu_object_info_t doi;
1077 	dmu_buf_t *db;
1078 	vnode_t *vp;
1079 	uint64_t obj_num = zp->z_id;
1080 	uint64_t mode, size;
1081 	sa_bulk_attr_t bulk[8];
1082 	int err;
1083 	int count = 0;
1084 	uint64_t gen;
1085 
1086 	/*
1087 	 * Remove cached pages before reloading the znode, so that they are not
1088 	 * lingering after we run into any error.  Ideally, we should vgone()
1089 	 * the vnode in case of error, but currently we cannot do that
1090 	 * because of the LOR between the vnode lock and z_teardown_lock.
1091 	 * So, instead, we have to "doom" the znode in the illumos style.
1092 	 *
1093 	 * Ignore invalid pages during the scan.  This is to avoid deadlocks
1094 	 * between page busying and the teardown lock, as pages are busied prior
1095 	 * to a VOP_GETPAGES operation, which acquires the teardown read lock.
1096 	 * Such pages will be invalid and can safely be skipped here.
1097 	 */
1098 	vp = ZTOV(zp);
1099 #if __FreeBSD_version >= 1400042
1100 	vn_pages_remove_valid(vp, 0, 0);
1101 #else
1102 	vn_pages_remove(vp, 0, 0);
1103 #endif
1104 
1105 	ZFS_OBJ_HOLD_ENTER(zfsvfs, obj_num);
1106 
1107 	mutex_enter(&zp->z_acl_lock);
1108 	if (zp->z_acl_cached) {
1109 		zfs_acl_free(zp->z_acl_cached);
1110 		zp->z_acl_cached = NULL;
1111 	}
1112 	mutex_exit(&zp->z_acl_lock);
1113 
1114 	rw_enter(&zp->z_xattr_lock, RW_WRITER);
1115 	if (zp->z_xattr_cached) {
1116 		nvlist_free(zp->z_xattr_cached);
1117 		zp->z_xattr_cached = NULL;
1118 	}
1119 	rw_exit(&zp->z_xattr_lock);
1120 
1121 	ASSERT3P(zp->z_sa_hdl, ==, NULL);
1122 	err = sa_buf_hold(zfsvfs->z_os, obj_num, NULL, &db);
1123 	if (err) {
1124 		ZFS_OBJ_HOLD_EXIT(zfsvfs, obj_num);
1125 		return (err);
1126 	}
1127 
1128 	dmu_object_info_from_db(db, &doi);
1129 	if (doi.doi_bonus_type != DMU_OT_SA &&
1130 	    (doi.doi_bonus_type != DMU_OT_ZNODE ||
1131 	    (doi.doi_bonus_type == DMU_OT_ZNODE &&
1132 	    doi.doi_bonus_size < sizeof (znode_phys_t)))) {
1133 		sa_buf_rele(db, NULL);
1134 		ZFS_OBJ_HOLD_EXIT(zfsvfs, obj_num);
1135 		return (SET_ERROR(EINVAL));
1136 	}
1137 
1138 	zfs_znode_sa_init(zfsvfs, zp, db, doi.doi_bonus_type, NULL);
1139 	size = zp->z_size;
1140 
1141 	/* reload cached values */
1142 	SA_ADD_BULK_ATTR(bulk, count, SA_ZPL_GEN(zfsvfs), NULL,
1143 	    &gen, sizeof (gen));
1144 	SA_ADD_BULK_ATTR(bulk, count, SA_ZPL_SIZE(zfsvfs), NULL,
1145 	    &zp->z_size, sizeof (zp->z_size));
1146 	SA_ADD_BULK_ATTR(bulk, count, SA_ZPL_LINKS(zfsvfs), NULL,
1147 	    &zp->z_links, sizeof (zp->z_links));
1148 	SA_ADD_BULK_ATTR(bulk, count, SA_ZPL_FLAGS(zfsvfs), NULL,
1149 	    &zp->z_pflags, sizeof (zp->z_pflags));
1150 	SA_ADD_BULK_ATTR(bulk, count, SA_ZPL_ATIME(zfsvfs), NULL,
1151 	    &zp->z_atime, sizeof (zp->z_atime));
1152 	SA_ADD_BULK_ATTR(bulk, count, SA_ZPL_UID(zfsvfs), NULL,
1153 	    &zp->z_uid, sizeof (zp->z_uid));
1154 	SA_ADD_BULK_ATTR(bulk, count, SA_ZPL_GID(zfsvfs), NULL,
1155 	    &zp->z_gid, sizeof (zp->z_gid));
1156 	SA_ADD_BULK_ATTR(bulk, count, SA_ZPL_MODE(zfsvfs), NULL,
1157 	    &mode, sizeof (mode));
1158 
1159 	if (sa_bulk_lookup(zp->z_sa_hdl, bulk, count)) {
1160 		zfs_znode_dmu_fini(zp);
1161 		ZFS_OBJ_HOLD_EXIT(zfsvfs, obj_num);
1162 		return (SET_ERROR(EIO));
1163 	}
1164 
1165 	zp->z_mode = mode;
1166 
1167 	if (gen != zp->z_gen) {
1168 		zfs_znode_dmu_fini(zp);
1169 		ZFS_OBJ_HOLD_EXIT(zfsvfs, obj_num);
1170 		return (SET_ERROR(EIO));
1171 	}
1172 
1173 	/*
1174 	 * It is highly improbable but still quite possible that two
1175 	 * objects in different datasets are created with the same
1176 	 * object numbers and in transaction groups with the same
1177 	 * numbers.  znodes corresponding to those objects would
1178 	 * have the same z_id and z_gen, but their other attributes
1179 	 * may be different.
1180 	 * zfs recv -F may replace one of such objects with the other.
1181 	 * As a result file properties recorded in the replaced
1182 	 * object's vnode may no longer match the received object's
1183 	 * properties.  At present the only cached property is the
1184 	 * files type recorded in v_type.
1185 	 * So, handle this case by leaving the old vnode and znode
1186 	 * disassociated from the actual object.  A new vnode and a
1187 	 * znode will be created if the object is accessed
1188 	 * (e.g. via a look-up).  The old vnode and znode will be
1189 	 * recycled when the last vnode reference is dropped.
1190 	 */
1191 	if (vp->v_type != IFTOVT((mode_t)zp->z_mode)) {
1192 		zfs_znode_dmu_fini(zp);
1193 		ZFS_OBJ_HOLD_EXIT(zfsvfs, obj_num);
1194 		return (SET_ERROR(EIO));
1195 	}
1196 
1197 	/*
1198 	 * If the file has zero links, then it has been unlinked on the send
1199 	 * side and it must be in the received unlinked set.
1200 	 * We call zfs_znode_dmu_fini() now to prevent any accesses to the
1201 	 * stale data and to prevent automatically removal of the file in
1202 	 * zfs_zinactive().  The file will be removed either when it is removed
1203 	 * on the send side and the next incremental stream is received or
1204 	 * when the unlinked set gets processed.
1205 	 */
1206 	zp->z_unlinked = (zp->z_links == 0);
1207 	if (zp->z_unlinked) {
1208 		zfs_znode_dmu_fini(zp);
1209 		ZFS_OBJ_HOLD_EXIT(zfsvfs, obj_num);
1210 		return (0);
1211 	}
1212 
1213 	zp->z_blksz = doi.doi_data_block_size;
1214 	if (zp->z_size != size)
1215 		vnode_pager_setsize(vp, zp->z_size);
1216 
1217 	ZFS_OBJ_HOLD_EXIT(zfsvfs, obj_num);
1218 
1219 	return (0);
1220 }
1221 
1222 void
1223 zfs_znode_delete(znode_t *zp, dmu_tx_t *tx)
1224 {
1225 	zfsvfs_t *zfsvfs = zp->z_zfsvfs;
1226 	objset_t *os = zfsvfs->z_os;
1227 	uint64_t obj = zp->z_id;
1228 	uint64_t acl_obj = zfs_external_acl(zp);
1229 
1230 	ZFS_OBJ_HOLD_ENTER(zfsvfs, obj);
1231 	if (acl_obj) {
1232 		VERIFY(!zp->z_is_sa);
1233 		VERIFY0(dmu_object_free(os, acl_obj, tx));
1234 	}
1235 	VERIFY0(dmu_object_free(os, obj, tx));
1236 	zfs_znode_dmu_fini(zp);
1237 	ZFS_OBJ_HOLD_EXIT(zfsvfs, obj);
1238 	zfs_znode_free(zp);
1239 }
1240 
1241 void
1242 zfs_zinactive(znode_t *zp)
1243 {
1244 	zfsvfs_t *zfsvfs = zp->z_zfsvfs;
1245 	uint64_t z_id = zp->z_id;
1246 
1247 	ASSERT3P(zp->z_sa_hdl, !=, NULL);
1248 
1249 	/*
1250 	 * Don't allow a zfs_zget() while were trying to release this znode
1251 	 */
1252 	ZFS_OBJ_HOLD_ENTER(zfsvfs, z_id);
1253 
1254 	/*
1255 	 * If this was the last reference to a file with no links, remove
1256 	 * the file from the file system unless the file system is mounted
1257 	 * read-only.  That can happen, for example, if the file system was
1258 	 * originally read-write, the file was opened, then unlinked and
1259 	 * the file system was made read-only before the file was finally
1260 	 * closed.  The file will remain in the unlinked set.
1261 	 */
1262 	if (zp->z_unlinked) {
1263 		ASSERT(!zfsvfs->z_issnap);
1264 		if ((zfsvfs->z_vfs->vfs_flag & VFS_RDONLY) == 0) {
1265 			ZFS_OBJ_HOLD_EXIT(zfsvfs, z_id);
1266 			zfs_rmnode(zp);
1267 			return;
1268 		}
1269 	}
1270 
1271 	zfs_znode_dmu_fini(zp);
1272 	ZFS_OBJ_HOLD_EXIT(zfsvfs, z_id);
1273 	zfs_znode_free(zp);
1274 }
1275 
1276 void
1277 zfs_znode_free(znode_t *zp)
1278 {
1279 	zfsvfs_t *zfsvfs = zp->z_zfsvfs;
1280 #if __FreeBSD_version >= 1300139
1281 	char *symlink;
1282 #endif
1283 
1284 	ASSERT3P(zp->z_sa_hdl, ==, NULL);
1285 	zp->z_vnode = NULL;
1286 	mutex_enter(&zfsvfs->z_znodes_lock);
1287 	POINTER_INVALIDATE(&zp->z_zfsvfs);
1288 	list_remove(&zfsvfs->z_all_znodes, zp);
1289 	zfsvfs->z_nr_znodes--;
1290 	mutex_exit(&zfsvfs->z_znodes_lock);
1291 
1292 #if __FreeBSD_version >= 1300139
1293 	symlink = atomic_load_ptr(&zp->z_cached_symlink);
1294 	if (symlink != NULL) {
1295 		atomic_store_rel_ptr((uintptr_t *)&zp->z_cached_symlink,
1296 		    (uintptr_t)NULL);
1297 		cache_symlink_free(symlink, strlen(symlink) + 1);
1298 	}
1299 #endif
1300 
1301 	if (zp->z_acl_cached) {
1302 		zfs_acl_free(zp->z_acl_cached);
1303 		zp->z_acl_cached = NULL;
1304 	}
1305 
1306 	zfs_znode_free_kmem(zp);
1307 }
1308 
1309 void
1310 zfs_tstamp_update_setup_ext(znode_t *zp, uint_t flag, uint64_t mtime[2],
1311     uint64_t ctime[2], boolean_t have_tx)
1312 {
1313 	timestruc_t	now;
1314 
1315 	vfs_timestamp(&now);
1316 
1317 	if (have_tx) {	/* will sa_bulk_update happen really soon? */
1318 		zp->z_atime_dirty = 0;
1319 		zp->z_seq++;
1320 	} else {
1321 		zp->z_atime_dirty = 1;
1322 	}
1323 
1324 	if (flag & AT_ATIME) {
1325 		ZFS_TIME_ENCODE(&now, zp->z_atime);
1326 	}
1327 
1328 	if (flag & AT_MTIME) {
1329 		ZFS_TIME_ENCODE(&now, mtime);
1330 		if (zp->z_zfsvfs->z_use_fuids) {
1331 			zp->z_pflags |= (ZFS_ARCHIVE |
1332 			    ZFS_AV_MODIFIED);
1333 		}
1334 	}
1335 
1336 	if (flag & AT_CTIME) {
1337 		ZFS_TIME_ENCODE(&now, ctime);
1338 		if (zp->z_zfsvfs->z_use_fuids)
1339 			zp->z_pflags |= ZFS_ARCHIVE;
1340 	}
1341 }
1342 
1343 
1344 void
1345 zfs_tstamp_update_setup(znode_t *zp, uint_t flag, uint64_t mtime[2],
1346     uint64_t ctime[2])
1347 {
1348 	zfs_tstamp_update_setup_ext(zp, flag, mtime, ctime, B_TRUE);
1349 }
1350 /*
1351  * Grow the block size for a file.
1352  *
1353  *	IN:	zp	- znode of file to free data in.
1354  *		size	- requested block size
1355  *		tx	- open transaction.
1356  *
1357  * NOTE: this function assumes that the znode is write locked.
1358  */
1359 void
1360 zfs_grow_blocksize(znode_t *zp, uint64_t size, dmu_tx_t *tx)
1361 {
1362 	int		error;
1363 	u_longlong_t	dummy;
1364 
1365 	if (size <= zp->z_blksz)
1366 		return;
1367 	/*
1368 	 * If the file size is already greater than the current blocksize,
1369 	 * we will not grow.  If there is more than one block in a file,
1370 	 * the blocksize cannot change.
1371 	 */
1372 	if (zp->z_blksz && zp->z_size > zp->z_blksz)
1373 		return;
1374 
1375 	error = dmu_object_set_blocksize(zp->z_zfsvfs->z_os, zp->z_id,
1376 	    size, 0, tx);
1377 
1378 	if (error == ENOTSUP)
1379 		return;
1380 	ASSERT0(error);
1381 
1382 	/* What blocksize did we actually get? */
1383 	dmu_object_size_from_db(sa_get_db(zp->z_sa_hdl), &zp->z_blksz, &dummy);
1384 }
1385 
1386 /*
1387  * Increase the file length
1388  *
1389  *	IN:	zp	- znode of file to free data in.
1390  *		end	- new end-of-file
1391  *
1392  *	RETURN:	0 on success, error code on failure
1393  */
1394 static int
1395 zfs_extend(znode_t *zp, uint64_t end)
1396 {
1397 	zfsvfs_t *zfsvfs = zp->z_zfsvfs;
1398 	dmu_tx_t *tx;
1399 	zfs_locked_range_t *lr;
1400 	uint64_t newblksz;
1401 	int error;
1402 
1403 	/*
1404 	 * We will change zp_size, lock the whole file.
1405 	 */
1406 	lr = zfs_rangelock_enter(&zp->z_rangelock, 0, UINT64_MAX, RL_WRITER);
1407 
1408 	/*
1409 	 * Nothing to do if file already at desired length.
1410 	 */
1411 	if (end <= zp->z_size) {
1412 		zfs_rangelock_exit(lr);
1413 		return (0);
1414 	}
1415 	tx = dmu_tx_create(zfsvfs->z_os);
1416 	dmu_tx_hold_sa(tx, zp->z_sa_hdl, B_FALSE);
1417 	zfs_sa_upgrade_txholds(tx, zp);
1418 	if (end > zp->z_blksz &&
1419 	    (!ISP2(zp->z_blksz) || zp->z_blksz < zfsvfs->z_max_blksz)) {
1420 		/*
1421 		 * We are growing the file past the current block size.
1422 		 */
1423 		if (zp->z_blksz > zp->z_zfsvfs->z_max_blksz) {
1424 			/*
1425 			 * File's blocksize is already larger than the
1426 			 * "recordsize" property.  Only let it grow to
1427 			 * the next power of 2.
1428 			 */
1429 			ASSERT(!ISP2(zp->z_blksz));
1430 			newblksz = MIN(end, 1 << highbit64(zp->z_blksz));
1431 		} else {
1432 			newblksz = MIN(end, zp->z_zfsvfs->z_max_blksz);
1433 		}
1434 		dmu_tx_hold_write(tx, zp->z_id, 0, newblksz);
1435 	} else {
1436 		newblksz = 0;
1437 	}
1438 
1439 	error = dmu_tx_assign(tx, TXG_WAIT);
1440 	if (error) {
1441 		dmu_tx_abort(tx);
1442 		zfs_rangelock_exit(lr);
1443 		return (error);
1444 	}
1445 
1446 	if (newblksz)
1447 		zfs_grow_blocksize(zp, newblksz, tx);
1448 
1449 	zp->z_size = end;
1450 
1451 	VERIFY0(sa_update(zp->z_sa_hdl, SA_ZPL_SIZE(zp->z_zfsvfs),
1452 	    &zp->z_size, sizeof (zp->z_size), tx));
1453 
1454 	vnode_pager_setsize(ZTOV(zp), end);
1455 
1456 	zfs_rangelock_exit(lr);
1457 
1458 	dmu_tx_commit(tx);
1459 
1460 	return (0);
1461 }
1462 
1463 /*
1464  * Free space in a file.
1465  *
1466  *	IN:	zp	- znode of file to free data in.
1467  *		off	- start of section to free.
1468  *		len	- length of section to free.
1469  *
1470  *	RETURN:	0 on success, error code on failure
1471  */
1472 static int
1473 zfs_free_range(znode_t *zp, uint64_t off, uint64_t len)
1474 {
1475 	zfsvfs_t *zfsvfs = zp->z_zfsvfs;
1476 	zfs_locked_range_t *lr;
1477 	int error;
1478 
1479 	/*
1480 	 * Lock the range being freed.
1481 	 */
1482 	lr = zfs_rangelock_enter(&zp->z_rangelock, off, len, RL_WRITER);
1483 
1484 	/*
1485 	 * Nothing to do if file already at desired length.
1486 	 */
1487 	if (off >= zp->z_size) {
1488 		zfs_rangelock_exit(lr);
1489 		return (0);
1490 	}
1491 
1492 	if (off + len > zp->z_size)
1493 		len = zp->z_size - off;
1494 
1495 	error = dmu_free_long_range(zfsvfs->z_os, zp->z_id, off, len);
1496 
1497 	if (error == 0) {
1498 #if __FreeBSD_version >= 1400032
1499 		vnode_pager_purge_range(ZTOV(zp), off, off + len);
1500 #else
1501 		/*
1502 		 * Before __FreeBSD_version 1400032 we cannot free block in the
1503 		 * middle of a file, but only at the end of a file, so this code
1504 		 * path should never happen.
1505 		 */
1506 		vnode_pager_setsize(ZTOV(zp), off);
1507 #endif
1508 	}
1509 
1510 	zfs_rangelock_exit(lr);
1511 
1512 	return (error);
1513 }
1514 
1515 /*
1516  * Truncate a file
1517  *
1518  *	IN:	zp	- znode of file to free data in.
1519  *		end	- new end-of-file.
1520  *
1521  *	RETURN:	0 on success, error code on failure
1522  */
1523 static int
1524 zfs_trunc(znode_t *zp, uint64_t end)
1525 {
1526 	zfsvfs_t *zfsvfs = zp->z_zfsvfs;
1527 	vnode_t *vp = ZTOV(zp);
1528 	dmu_tx_t *tx;
1529 	zfs_locked_range_t *lr;
1530 	int error;
1531 	sa_bulk_attr_t bulk[2];
1532 	int count = 0;
1533 
1534 	/*
1535 	 * We will change zp_size, lock the whole file.
1536 	 */
1537 	lr = zfs_rangelock_enter(&zp->z_rangelock, 0, UINT64_MAX, RL_WRITER);
1538 
1539 	/*
1540 	 * Nothing to do if file already at desired length.
1541 	 */
1542 	if (end >= zp->z_size) {
1543 		zfs_rangelock_exit(lr);
1544 		return (0);
1545 	}
1546 
1547 	error = dmu_free_long_range(zfsvfs->z_os, zp->z_id, end,
1548 	    DMU_OBJECT_END);
1549 	if (error) {
1550 		zfs_rangelock_exit(lr);
1551 		return (error);
1552 	}
1553 	tx = dmu_tx_create(zfsvfs->z_os);
1554 	dmu_tx_hold_sa(tx, zp->z_sa_hdl, B_FALSE);
1555 	zfs_sa_upgrade_txholds(tx, zp);
1556 	dmu_tx_mark_netfree(tx);
1557 	error = dmu_tx_assign(tx, TXG_WAIT);
1558 	if (error) {
1559 		dmu_tx_abort(tx);
1560 		zfs_rangelock_exit(lr);
1561 		return (error);
1562 	}
1563 
1564 	zp->z_size = end;
1565 	SA_ADD_BULK_ATTR(bulk, count, SA_ZPL_SIZE(zfsvfs),
1566 	    NULL, &zp->z_size, sizeof (zp->z_size));
1567 
1568 	if (end == 0) {
1569 		zp->z_pflags &= ~ZFS_SPARSE;
1570 		SA_ADD_BULK_ATTR(bulk, count, SA_ZPL_FLAGS(zfsvfs),
1571 		    NULL, &zp->z_pflags, 8);
1572 	}
1573 	VERIFY0(sa_bulk_update(zp->z_sa_hdl, bulk, count, tx));
1574 
1575 	dmu_tx_commit(tx);
1576 
1577 	/*
1578 	 * Clear any mapped pages in the truncated region.  This has to
1579 	 * happen outside of the transaction to avoid the possibility of
1580 	 * a deadlock with someone trying to push a page that we are
1581 	 * about to invalidate.
1582 	 */
1583 	vnode_pager_setsize(vp, end);
1584 
1585 	zfs_rangelock_exit(lr);
1586 
1587 	return (0);
1588 }
1589 
1590 /*
1591  * Free space in a file
1592  *
1593  *	IN:	zp	- znode of file to free data in.
1594  *		off	- start of range
1595  *		len	- end of range (0 => EOF)
1596  *		flag	- current file open mode flags.
1597  *		log	- TRUE if this action should be logged
1598  *
1599  *	RETURN:	0 on success, error code on failure
1600  */
1601 int
1602 zfs_freesp(znode_t *zp, uint64_t off, uint64_t len, int flag, boolean_t log)
1603 {
1604 	dmu_tx_t *tx;
1605 	zfsvfs_t *zfsvfs = zp->z_zfsvfs;
1606 	zilog_t *zilog = zfsvfs->z_log;
1607 	uint64_t mode;
1608 	uint64_t mtime[2], ctime[2];
1609 	sa_bulk_attr_t bulk[3];
1610 	int count = 0;
1611 	int error;
1612 
1613 	if ((error = sa_lookup(zp->z_sa_hdl, SA_ZPL_MODE(zfsvfs), &mode,
1614 	    sizeof (mode))) != 0)
1615 		return (error);
1616 
1617 	if (off > zp->z_size) {
1618 		error =  zfs_extend(zp, off+len);
1619 		if (error == 0 && log)
1620 			goto log;
1621 		else
1622 			return (error);
1623 	}
1624 
1625 	if (len == 0) {
1626 		error = zfs_trunc(zp, off);
1627 	} else {
1628 		if ((error = zfs_free_range(zp, off, len)) == 0 &&
1629 		    off + len > zp->z_size)
1630 			error = zfs_extend(zp, off+len);
1631 	}
1632 	if (error || !log)
1633 		return (error);
1634 log:
1635 	tx = dmu_tx_create(zfsvfs->z_os);
1636 	dmu_tx_hold_sa(tx, zp->z_sa_hdl, B_FALSE);
1637 	zfs_sa_upgrade_txholds(tx, zp);
1638 	error = dmu_tx_assign(tx, TXG_WAIT);
1639 	if (error) {
1640 		dmu_tx_abort(tx);
1641 		return (error);
1642 	}
1643 
1644 	SA_ADD_BULK_ATTR(bulk, count, SA_ZPL_MTIME(zfsvfs), NULL, mtime, 16);
1645 	SA_ADD_BULK_ATTR(bulk, count, SA_ZPL_CTIME(zfsvfs), NULL, ctime, 16);
1646 	SA_ADD_BULK_ATTR(bulk, count, SA_ZPL_FLAGS(zfsvfs),
1647 	    NULL, &zp->z_pflags, 8);
1648 	zfs_tstamp_update_setup(zp, CONTENT_MODIFIED, mtime, ctime);
1649 	error = sa_bulk_update(zp->z_sa_hdl, bulk, count, tx);
1650 	ASSERT0(error);
1651 
1652 	zfs_log_truncate(zilog, tx, TX_TRUNCATE, zp, off, len);
1653 
1654 	dmu_tx_commit(tx);
1655 	return (0);
1656 }
1657 
1658 void
1659 zfs_create_fs(objset_t *os, cred_t *cr, nvlist_t *zplprops, dmu_tx_t *tx)
1660 {
1661 	uint64_t	moid, obj, sa_obj, version;
1662 	uint64_t	sense = ZFS_CASE_SENSITIVE;
1663 	uint64_t	norm = 0;
1664 	nvpair_t	*elem;
1665 	int		error;
1666 	int		i;
1667 	znode_t		*rootzp = NULL;
1668 	zfsvfs_t	*zfsvfs;
1669 	vattr_t		vattr;
1670 	znode_t		*zp;
1671 	zfs_acl_ids_t	acl_ids;
1672 
1673 	/*
1674 	 * First attempt to create master node.
1675 	 */
1676 	/*
1677 	 * In an empty objset, there are no blocks to read and thus
1678 	 * there can be no i/o errors (which we assert below).
1679 	 */
1680 	moid = MASTER_NODE_OBJ;
1681 	error = zap_create_claim(os, moid, DMU_OT_MASTER_NODE,
1682 	    DMU_OT_NONE, 0, tx);
1683 	ASSERT0(error);
1684 
1685 	/*
1686 	 * Set starting attributes.
1687 	 */
1688 	version = zfs_zpl_version_map(spa_version(dmu_objset_spa(os)));
1689 	elem = NULL;
1690 	while ((elem = nvlist_next_nvpair(zplprops, elem)) != NULL) {
1691 		/* For the moment we expect all zpl props to be uint64_ts */
1692 		uint64_t val;
1693 		const char *name;
1694 
1695 		ASSERT3S(nvpair_type(elem), ==, DATA_TYPE_UINT64);
1696 		val = fnvpair_value_uint64(elem);
1697 		name = nvpair_name(elem);
1698 		if (strcmp(name, zfs_prop_to_name(ZFS_PROP_VERSION)) == 0) {
1699 			if (val < version)
1700 				version = val;
1701 		} else {
1702 			error = zap_update(os, moid, name, 8, 1, &val, tx);
1703 		}
1704 		ASSERT0(error);
1705 		if (strcmp(name, zfs_prop_to_name(ZFS_PROP_NORMALIZE)) == 0)
1706 			norm = val;
1707 		else if (strcmp(name, zfs_prop_to_name(ZFS_PROP_CASE)) == 0)
1708 			sense = val;
1709 	}
1710 	ASSERT3U(version, !=, 0);
1711 	error = zap_update(os, moid, ZPL_VERSION_STR, 8, 1, &version, tx);
1712 	ASSERT0(error);
1713 
1714 	/*
1715 	 * Create zap object used for SA attribute registration
1716 	 */
1717 
1718 	if (version >= ZPL_VERSION_SA) {
1719 		sa_obj = zap_create(os, DMU_OT_SA_MASTER_NODE,
1720 		    DMU_OT_NONE, 0, tx);
1721 		error = zap_add(os, moid, ZFS_SA_ATTRS, 8, 1, &sa_obj, tx);
1722 		ASSERT0(error);
1723 	} else {
1724 		sa_obj = 0;
1725 	}
1726 	/*
1727 	 * Create a delete queue.
1728 	 */
1729 	obj = zap_create(os, DMU_OT_UNLINKED_SET, DMU_OT_NONE, 0, tx);
1730 
1731 	error = zap_add(os, moid, ZFS_UNLINKED_SET, 8, 1, &obj, tx);
1732 	ASSERT0(error);
1733 
1734 	/*
1735 	 * Create root znode.  Create minimal znode/vnode/zfsvfs
1736 	 * to allow zfs_mknode to work.
1737 	 */
1738 	VATTR_NULL(&vattr);
1739 	vattr.va_mask = AT_MODE|AT_UID|AT_GID;
1740 	vattr.va_type = VDIR;
1741 	vattr.va_mode = S_IFDIR|0755;
1742 	vattr.va_uid = crgetuid(cr);
1743 	vattr.va_gid = crgetgid(cr);
1744 
1745 	zfsvfs = kmem_zalloc(sizeof (zfsvfs_t), KM_SLEEP);
1746 
1747 	rootzp = zfs_znode_alloc_kmem(KM_SLEEP);
1748 	ASSERT(!POINTER_IS_VALID(rootzp->z_zfsvfs));
1749 	rootzp->z_unlinked = 0;
1750 	rootzp->z_atime_dirty = 0;
1751 	rootzp->z_is_sa = USE_SA(version, os);
1752 
1753 	zfsvfs->z_os = os;
1754 	zfsvfs->z_parent = zfsvfs;
1755 	zfsvfs->z_version = version;
1756 	zfsvfs->z_use_fuids = USE_FUIDS(version, os);
1757 	zfsvfs->z_use_sa = USE_SA(version, os);
1758 	zfsvfs->z_norm = norm;
1759 
1760 	error = sa_setup(os, sa_obj, zfs_attr_table, ZPL_END,
1761 	    &zfsvfs->z_attr_table);
1762 
1763 	ASSERT0(error);
1764 
1765 	/*
1766 	 * Fold case on file systems that are always or sometimes case
1767 	 * insensitive.
1768 	 */
1769 	if (sense == ZFS_CASE_INSENSITIVE || sense == ZFS_CASE_MIXED)
1770 		zfsvfs->z_norm |= U8_TEXTPREP_TOUPPER;
1771 
1772 	mutex_init(&zfsvfs->z_znodes_lock, NULL, MUTEX_DEFAULT, NULL);
1773 	list_create(&zfsvfs->z_all_znodes, sizeof (znode_t),
1774 	    offsetof(znode_t, z_link_node));
1775 
1776 	for (i = 0; i != ZFS_OBJ_MTX_SZ; i++)
1777 		mutex_init(&zfsvfs->z_hold_mtx[i], NULL, MUTEX_DEFAULT, NULL);
1778 
1779 	rootzp->z_zfsvfs = zfsvfs;
1780 	VERIFY0(zfs_acl_ids_create(rootzp, IS_ROOT_NODE, &vattr,
1781 	    cr, NULL, &acl_ids, NULL));
1782 	zfs_mknode(rootzp, &vattr, tx, cr, IS_ROOT_NODE, &zp, &acl_ids);
1783 	ASSERT3P(zp, ==, rootzp);
1784 	error = zap_add(os, moid, ZFS_ROOT_OBJ, 8, 1, &rootzp->z_id, tx);
1785 	ASSERT0(error);
1786 	zfs_acl_ids_free(&acl_ids);
1787 	POINTER_INVALIDATE(&rootzp->z_zfsvfs);
1788 
1789 	sa_handle_destroy(rootzp->z_sa_hdl);
1790 	zfs_znode_free_kmem(rootzp);
1791 
1792 	/*
1793 	 * Create shares directory
1794 	 */
1795 
1796 	error = zfs_create_share_dir(zfsvfs, tx);
1797 
1798 	ASSERT0(error);
1799 
1800 	for (i = 0; i != ZFS_OBJ_MTX_SZ; i++)
1801 		mutex_destroy(&zfsvfs->z_hold_mtx[i]);
1802 	kmem_free(zfsvfs, sizeof (zfsvfs_t));
1803 }
1804 #endif /* _KERNEL */
1805 
1806 static int
1807 zfs_sa_setup(objset_t *osp, sa_attr_type_t **sa_table)
1808 {
1809 	uint64_t sa_obj = 0;
1810 	int error;
1811 
1812 	error = zap_lookup(osp, MASTER_NODE_OBJ, ZFS_SA_ATTRS, 8, 1, &sa_obj);
1813 	if (error != 0 && error != ENOENT)
1814 		return (error);
1815 
1816 	error = sa_setup(osp, sa_obj, zfs_attr_table, ZPL_END, sa_table);
1817 	return (error);
1818 }
1819 
1820 static int
1821 zfs_grab_sa_handle(objset_t *osp, uint64_t obj, sa_handle_t **hdlp,
1822     dmu_buf_t **db, const void *tag)
1823 {
1824 	dmu_object_info_t doi;
1825 	int error;
1826 
1827 	if ((error = sa_buf_hold(osp, obj, tag, db)) != 0)
1828 		return (error);
1829 
1830 	dmu_object_info_from_db(*db, &doi);
1831 	if ((doi.doi_bonus_type != DMU_OT_SA &&
1832 	    doi.doi_bonus_type != DMU_OT_ZNODE) ||
1833 	    (doi.doi_bonus_type == DMU_OT_ZNODE &&
1834 	    doi.doi_bonus_size < sizeof (znode_phys_t))) {
1835 		sa_buf_rele(*db, tag);
1836 		return (SET_ERROR(ENOTSUP));
1837 	}
1838 
1839 	error = sa_handle_get(osp, obj, NULL, SA_HDL_PRIVATE, hdlp);
1840 	if (error != 0) {
1841 		sa_buf_rele(*db, tag);
1842 		return (error);
1843 	}
1844 
1845 	return (0);
1846 }
1847 
1848 static void
1849 zfs_release_sa_handle(sa_handle_t *hdl, dmu_buf_t *db, const void *tag)
1850 {
1851 	sa_handle_destroy(hdl);
1852 	sa_buf_rele(db, tag);
1853 }
1854 
1855 /*
1856  * Given an object number, return its parent object number and whether
1857  * or not the object is an extended attribute directory.
1858  */
1859 static int
1860 zfs_obj_to_pobj(objset_t *osp, sa_handle_t *hdl, sa_attr_type_t *sa_table,
1861     uint64_t *pobjp, int *is_xattrdir)
1862 {
1863 	uint64_t parent;
1864 	uint64_t pflags;
1865 	uint64_t mode;
1866 	uint64_t parent_mode;
1867 	sa_bulk_attr_t bulk[3];
1868 	sa_handle_t *sa_hdl;
1869 	dmu_buf_t *sa_db;
1870 	int count = 0;
1871 	int error;
1872 
1873 	SA_ADD_BULK_ATTR(bulk, count, sa_table[ZPL_PARENT], NULL,
1874 	    &parent, sizeof (parent));
1875 	SA_ADD_BULK_ATTR(bulk, count, sa_table[ZPL_FLAGS], NULL,
1876 	    &pflags, sizeof (pflags));
1877 	SA_ADD_BULK_ATTR(bulk, count, sa_table[ZPL_MODE], NULL,
1878 	    &mode, sizeof (mode));
1879 
1880 	if ((error = sa_bulk_lookup(hdl, bulk, count)) != 0)
1881 		return (error);
1882 
1883 	/*
1884 	 * When a link is removed its parent pointer is not changed and will
1885 	 * be invalid.  There are two cases where a link is removed but the
1886 	 * file stays around, when it goes to the delete queue and when there
1887 	 * are additional links.
1888 	 */
1889 	error = zfs_grab_sa_handle(osp, parent, &sa_hdl, &sa_db, FTAG);
1890 	if (error != 0)
1891 		return (error);
1892 
1893 	error = sa_lookup(sa_hdl, ZPL_MODE, &parent_mode, sizeof (parent_mode));
1894 	zfs_release_sa_handle(sa_hdl, sa_db, FTAG);
1895 	if (error != 0)
1896 		return (error);
1897 
1898 	*is_xattrdir = ((pflags & ZFS_XATTR) != 0) && S_ISDIR(mode);
1899 
1900 	/*
1901 	 * Extended attributes can be applied to files, directories, etc.
1902 	 * Otherwise the parent must be a directory.
1903 	 */
1904 	if (!*is_xattrdir && !S_ISDIR(parent_mode))
1905 		return (SET_ERROR(EINVAL));
1906 
1907 	*pobjp = parent;
1908 
1909 	return (0);
1910 }
1911 
1912 /*
1913  * Given an object number, return some zpl level statistics
1914  */
1915 static int
1916 zfs_obj_to_stats_impl(sa_handle_t *hdl, sa_attr_type_t *sa_table,
1917     zfs_stat_t *sb)
1918 {
1919 	sa_bulk_attr_t bulk[4];
1920 	int count = 0;
1921 
1922 	SA_ADD_BULK_ATTR(bulk, count, sa_table[ZPL_MODE], NULL,
1923 	    &sb->zs_mode, sizeof (sb->zs_mode));
1924 	SA_ADD_BULK_ATTR(bulk, count, sa_table[ZPL_GEN], NULL,
1925 	    &sb->zs_gen, sizeof (sb->zs_gen));
1926 	SA_ADD_BULK_ATTR(bulk, count, sa_table[ZPL_LINKS], NULL,
1927 	    &sb->zs_links, sizeof (sb->zs_links));
1928 	SA_ADD_BULK_ATTR(bulk, count, sa_table[ZPL_CTIME], NULL,
1929 	    &sb->zs_ctime, sizeof (sb->zs_ctime));
1930 
1931 	return (sa_bulk_lookup(hdl, bulk, count));
1932 }
1933 
1934 static int
1935 zfs_obj_to_path_impl(objset_t *osp, uint64_t obj, sa_handle_t *hdl,
1936     sa_attr_type_t *sa_table, char *buf, int len)
1937 {
1938 	sa_handle_t *sa_hdl;
1939 	sa_handle_t *prevhdl = NULL;
1940 	dmu_buf_t *prevdb = NULL;
1941 	dmu_buf_t *sa_db = NULL;
1942 	char *path = buf + len - 1;
1943 	int error;
1944 
1945 	*path = '\0';
1946 	sa_hdl = hdl;
1947 
1948 	uint64_t deleteq_obj;
1949 	VERIFY0(zap_lookup(osp, MASTER_NODE_OBJ,
1950 	    ZFS_UNLINKED_SET, sizeof (uint64_t), 1, &deleteq_obj));
1951 	error = zap_lookup_int(osp, deleteq_obj, obj);
1952 	if (error == 0) {
1953 		return (ESTALE);
1954 	} else if (error != ENOENT) {
1955 		return (error);
1956 	}
1957 
1958 	for (;;) {
1959 		uint64_t pobj;
1960 		char component[MAXNAMELEN + 2];
1961 		size_t complen;
1962 		int is_xattrdir;
1963 
1964 		if (prevdb) {
1965 			ASSERT3P(prevhdl, !=, NULL);
1966 			zfs_release_sa_handle(prevhdl, prevdb, FTAG);
1967 		}
1968 
1969 		if ((error = zfs_obj_to_pobj(osp, sa_hdl, sa_table, &pobj,
1970 		    &is_xattrdir)) != 0)
1971 			break;
1972 
1973 		if (pobj == obj) {
1974 			if (path[0] != '/')
1975 				*--path = '/';
1976 			break;
1977 		}
1978 
1979 		component[0] = '/';
1980 		if (is_xattrdir) {
1981 			(void) sprintf(component + 1, "<xattrdir>");
1982 		} else {
1983 			error = zap_value_search(osp, pobj, obj,
1984 			    ZFS_DIRENT_OBJ(-1ULL), component + 1);
1985 			if (error != 0)
1986 				break;
1987 		}
1988 
1989 		complen = strlen(component);
1990 		path -= complen;
1991 		ASSERT3P(path, >=, buf);
1992 		memcpy(path, component, complen);
1993 		obj = pobj;
1994 
1995 		if (sa_hdl != hdl) {
1996 			prevhdl = sa_hdl;
1997 			prevdb = sa_db;
1998 		}
1999 		error = zfs_grab_sa_handle(osp, obj, &sa_hdl, &sa_db, FTAG);
2000 		if (error != 0) {
2001 			sa_hdl = prevhdl;
2002 			sa_db = prevdb;
2003 			break;
2004 		}
2005 	}
2006 
2007 	if (sa_hdl != NULL && sa_hdl != hdl) {
2008 		ASSERT3P(sa_db, !=, NULL);
2009 		zfs_release_sa_handle(sa_hdl, sa_db, FTAG);
2010 	}
2011 
2012 	if (error == 0)
2013 		(void) memmove(buf, path, buf + len - path);
2014 
2015 	return (error);
2016 }
2017 
2018 int
2019 zfs_obj_to_path(objset_t *osp, uint64_t obj, char *buf, int len)
2020 {
2021 	sa_attr_type_t *sa_table;
2022 	sa_handle_t *hdl;
2023 	dmu_buf_t *db;
2024 	int error;
2025 
2026 	error = zfs_sa_setup(osp, &sa_table);
2027 	if (error != 0)
2028 		return (error);
2029 
2030 	error = zfs_grab_sa_handle(osp, obj, &hdl, &db, FTAG);
2031 	if (error != 0)
2032 		return (error);
2033 
2034 	error = zfs_obj_to_path_impl(osp, obj, hdl, sa_table, buf, len);
2035 
2036 	zfs_release_sa_handle(hdl, db, FTAG);
2037 	return (error);
2038 }
2039 
2040 int
2041 zfs_obj_to_stats(objset_t *osp, uint64_t obj, zfs_stat_t *sb,
2042     char *buf, int len)
2043 {
2044 	char *path = buf + len - 1;
2045 	sa_attr_type_t *sa_table;
2046 	sa_handle_t *hdl;
2047 	dmu_buf_t *db;
2048 	int error;
2049 
2050 	*path = '\0';
2051 
2052 	error = zfs_sa_setup(osp, &sa_table);
2053 	if (error != 0)
2054 		return (error);
2055 
2056 	error = zfs_grab_sa_handle(osp, obj, &hdl, &db, FTAG);
2057 	if (error != 0)
2058 		return (error);
2059 
2060 	error = zfs_obj_to_stats_impl(hdl, sa_table, sb);
2061 	if (error != 0) {
2062 		zfs_release_sa_handle(hdl, db, FTAG);
2063 		return (error);
2064 	}
2065 
2066 	error = zfs_obj_to_path_impl(osp, obj, hdl, sa_table, buf, len);
2067 
2068 	zfs_release_sa_handle(hdl, db, FTAG);
2069 	return (error);
2070 }
2071 
2072 
2073 void
2074 zfs_znode_update_vfs(znode_t *zp)
2075 {
2076 	vm_object_t object;
2077 
2078 	if ((object = ZTOV(zp)->v_object) == NULL ||
2079 	    zp->z_size == object->un_pager.vnp.vnp_size)
2080 		return;
2081 
2082 	vnode_pager_setsize(ZTOV(zp), zp->z_size);
2083 }
2084 
2085 
2086 #ifdef _KERNEL
2087 int
2088 zfs_znode_parent_and_name(znode_t *zp, znode_t **dzpp, char *buf)
2089 {
2090 	zfsvfs_t *zfsvfs = zp->z_zfsvfs;
2091 	uint64_t parent;
2092 	int is_xattrdir;
2093 	int err;
2094 
2095 	/* Extended attributes should not be visible as regular files. */
2096 	if ((zp->z_pflags & ZFS_XATTR) != 0)
2097 		return (SET_ERROR(EINVAL));
2098 
2099 	err = zfs_obj_to_pobj(zfsvfs->z_os, zp->z_sa_hdl, zfsvfs->z_attr_table,
2100 	    &parent, &is_xattrdir);
2101 	if (err != 0)
2102 		return (err);
2103 	ASSERT0(is_xattrdir);
2104 
2105 	/* No name as this is a root object. */
2106 	if (parent == zp->z_id)
2107 		return (SET_ERROR(EINVAL));
2108 
2109 	err = zap_value_search(zfsvfs->z_os, parent, zp->z_id,
2110 	    ZFS_DIRENT_OBJ(-1ULL), buf);
2111 	if (err != 0)
2112 		return (err);
2113 	err = zfs_zget(zfsvfs, parent, dzpp);
2114 	return (err);
2115 }
2116 #endif /* _KERNEL */
2117 
2118 #ifdef _KERNEL
2119 int
2120 zfs_rlimit_fsize(off_t fsize)
2121 {
2122 	struct thread *td = curthread;
2123 	off_t lim;
2124 
2125 	if (td == NULL)
2126 		return (0);
2127 
2128 	lim = lim_cur(td, RLIMIT_FSIZE);
2129 	if (__predict_true((uoff_t)fsize <= lim))
2130 		return (0);
2131 
2132 	/*
2133 	 * The limit is reached.
2134 	 */
2135 	PROC_LOCK(td->td_proc);
2136 	kern_psignal(td->td_proc, SIGXFSZ);
2137 	PROC_UNLOCK(td->td_proc);
2138 
2139 	return (EFBIG);
2140 }
2141 #endif /* _KERNEL */
2142