1 /*
2 * CDDL HEADER START
3 *
4 * The contents of this file are subject to the terms of the
5 * Common Development and Distribution License (the "License").
6 * You may not use this file except in compliance with the License.
7 *
8 * You can obtain a copy of the license at usr/src/OPENSOLARIS.LICENSE
9 * or https://opensource.org/licenses/CDDL-1.0.
10 * See the License for the specific language governing permissions
11 * and limitations under the License.
12 *
13 * When distributing Covered Code, include this CDDL HEADER in each
14 * file and include the License file at usr/src/OPENSOLARIS.LICENSE.
15 * If applicable, add the following below this CDDL HEADER, with the
16 * fields enclosed by brackets "[]" replaced with your own identifying
17 * information: Portions Copyright [yyyy] [name of copyright owner]
18 *
19 * CDDL HEADER END
20 */
21 /*
22 * Copyright (c) 2005, 2010, Oracle and/or its affiliates. All rights reserved.
23 * Copyright (c) 2012, 2014 by Delphix. All rights reserved.
24 * Copyright (c) 2014 Integros [integros.com]
25 */
26
27 /* Portions Copyright 2007 Jeremy Teo */
28 /* Portions Copyright 2011 Martin Matuska <mm@FreeBSD.org> */
29
30 #ifdef _KERNEL
31 #include <sys/types.h>
32 #include <sys/param.h>
33 #include <sys/time.h>
34 #include <sys/systm.h>
35 #include <sys/sysmacros.h>
36 #include <sys/resource.h>
37 #include <sys/resourcevar.h>
38 #include <sys/mntent.h>
39 #include <sys/u8_textprep.h>
40 #include <sys/dsl_dataset.h>
41 #include <sys/vfs.h>
42 #include <sys/vnode.h>
43 #include <sys/file.h>
44 #include <sys/kmem.h>
45 #include <sys/errno.h>
46 #include <sys/unistd.h>
47 #include <sys/atomic.h>
48 #include <sys/zfs_dir.h>
49 #include <sys/zfs_acl.h>
50 #include <sys/zfs_ioctl.h>
51 #include <sys/zfs_rlock.h>
52 #include <sys/zfs_fuid.h>
53 #include <sys/dnode.h>
54 #include <sys/fs/zfs.h>
55 #endif /* _KERNEL */
56
57 #include <sys/dmu.h>
58 #include <sys/dmu_objset.h>
59 #include <sys/dmu_tx.h>
60 #include <sys/zfs_refcount.h>
61 #include <sys/stat.h>
62 #include <sys/zap.h>
63 #include <sys/zfs_znode.h>
64 #include <sys/sa.h>
65 #include <sys/zfs_sa.h>
66 #include <sys/zfs_stat.h>
67
68 #include "zfs_prop.h"
69 #include "zfs_comutil.h"
70
71 /* Used by fstat(1). */
72 SYSCTL_INT(_debug_sizeof, OID_AUTO, znode, CTLFLAG_RD,
73 SYSCTL_NULL_INT_PTR, sizeof (znode_t), "sizeof(znode_t)");
74
75 /*
76 * Define ZNODE_STATS to turn on statistic gathering. By default, it is only
77 * turned on when DEBUG is also defined.
78 */
79 #ifdef ZFS_DEBUG
80 #define ZNODE_STATS
81 #endif /* DEBUG */
82
83 #ifdef ZNODE_STATS
84 #define ZNODE_STAT_ADD(stat) ((stat)++)
85 #else
86 #define ZNODE_STAT_ADD(stat) /* nothing */
87 #endif /* ZNODE_STATS */
88
89 /*
90 * Functions needed for userland (ie: libzpool) are not put under
91 * #ifdef_KERNEL; the rest of the functions have dependencies
92 * (such as VFS logic) that will not compile easily in userland.
93 */
94 #ifdef _KERNEL
95 #if !defined(KMEM_DEBUG) && __FreeBSD_version >= 1300102
96 #define _ZFS_USE_SMR
97 static uma_zone_t znode_uma_zone;
98 #else
99 static kmem_cache_t *znode_cache = NULL;
100 #endif
101
102 extern struct vop_vector zfs_vnodeops;
103 extern struct vop_vector zfs_fifoops;
104 extern struct vop_vector zfs_shareops;
105
106
107 /*
108 * This callback is invoked when acquiring a RL_WRITER or RL_APPEND lock on
109 * z_rangelock. It will modify the offset and length of the lock to reflect
110 * znode-specific information, and convert RL_APPEND to RL_WRITER. This is
111 * called with the rangelock_t's rl_lock held, which avoids races.
112 */
113 static void
zfs_rangelock_cb(zfs_locked_range_t * new,void * arg)114 zfs_rangelock_cb(zfs_locked_range_t *new, void *arg)
115 {
116 znode_t *zp = arg;
117
118 /*
119 * If in append mode, convert to writer and lock starting at the
120 * current end of file.
121 */
122 if (new->lr_type == RL_APPEND) {
123 new->lr_offset = zp->z_size;
124 new->lr_type = RL_WRITER;
125 }
126
127 /*
128 * If we need to grow the block size then lock the whole file range.
129 */
130 uint64_t end_size = MAX(zp->z_size, new->lr_offset + new->lr_length);
131 if (end_size > zp->z_blksz && (!ISP2(zp->z_blksz) ||
132 zp->z_blksz < ZTOZSB(zp)->z_max_blksz)) {
133 new->lr_offset = 0;
134 new->lr_length = UINT64_MAX;
135 }
136 }
137
138 static int
zfs_znode_cache_constructor(void * buf,void * arg,int kmflags)139 zfs_znode_cache_constructor(void *buf, void *arg, int kmflags)
140 {
141 znode_t *zp = buf;
142
143 POINTER_INVALIDATE(&zp->z_zfsvfs);
144
145 list_link_init(&zp->z_link_node);
146
147 mutex_init(&zp->z_lock, NULL, MUTEX_DEFAULT, NULL);
148 mutex_init(&zp->z_acl_lock, NULL, MUTEX_DEFAULT, NULL);
149 rw_init(&zp->z_xattr_lock, NULL, RW_DEFAULT, NULL);
150
151 zfs_rangelock_init(&zp->z_rangelock, zfs_rangelock_cb, zp);
152
153 zp->z_acl_cached = NULL;
154 zp->z_xattr_cached = NULL;
155 zp->z_xattr_parent = 0;
156 zp->z_vnode = NULL;
157 zp->z_sync_writes_cnt = 0;
158 zp->z_async_writes_cnt = 0;
159
160 return (0);
161 }
162
163 static void
zfs_znode_cache_destructor(void * buf,void * arg)164 zfs_znode_cache_destructor(void *buf, void *arg)
165 {
166 (void) arg;
167 znode_t *zp = buf;
168
169 ASSERT(!POINTER_IS_VALID(zp->z_zfsvfs));
170 ASSERT3P(zp->z_vnode, ==, NULL);
171 ASSERT(!list_link_active(&zp->z_link_node));
172 mutex_destroy(&zp->z_lock);
173 mutex_destroy(&zp->z_acl_lock);
174 rw_destroy(&zp->z_xattr_lock);
175 zfs_rangelock_fini(&zp->z_rangelock);
176
177 ASSERT3P(zp->z_acl_cached, ==, NULL);
178 ASSERT3P(zp->z_xattr_cached, ==, NULL);
179
180 ASSERT0(atomic_load_32(&zp->z_sync_writes_cnt));
181 ASSERT0(atomic_load_32(&zp->z_async_writes_cnt));
182 }
183
184
185 #ifdef _ZFS_USE_SMR
186 VFS_SMR_DECLARE;
187
188 static int
zfs_znode_cache_constructor_smr(void * mem,int size __unused,void * private,int flags)189 zfs_znode_cache_constructor_smr(void *mem, int size __unused, void *private,
190 int flags)
191 {
192 return (zfs_znode_cache_constructor(mem, private, flags));
193 }
194
195 static void
zfs_znode_cache_destructor_smr(void * mem,int size __unused,void * private)196 zfs_znode_cache_destructor_smr(void *mem, int size __unused, void *private)
197 {
198 zfs_znode_cache_destructor(mem, private);
199 }
200
201 void
zfs_znode_init(void)202 zfs_znode_init(void)
203 {
204 /*
205 * Initialize zcache
206 */
207 ASSERT3P(znode_uma_zone, ==, NULL);
208 znode_uma_zone = uma_zcreate("zfs_znode_cache",
209 sizeof (znode_t), zfs_znode_cache_constructor_smr,
210 zfs_znode_cache_destructor_smr, NULL, NULL, 0, 0);
211 VFS_SMR_ZONE_SET(znode_uma_zone);
212 }
213
214 static znode_t *
zfs_znode_alloc_kmem(int flags)215 zfs_znode_alloc_kmem(int flags)
216 {
217 return (uma_zalloc_smr(znode_uma_zone, flags));
218 }
219
220 static void
zfs_znode_free_kmem(znode_t * zp)221 zfs_znode_free_kmem(znode_t *zp)
222 {
223 if (zp->z_xattr_cached) {
224 nvlist_free(zp->z_xattr_cached);
225 zp->z_xattr_cached = NULL;
226 }
227 uma_zfree_smr(znode_uma_zone, zp);
228 }
229 #else
230 void
zfs_znode_init(void)231 zfs_znode_init(void)
232 {
233 /*
234 * Initialize zcache
235 */
236 ASSERT3P(znode_cache, ==, NULL);
237 znode_cache = kmem_cache_create("zfs_znode_cache",
238 sizeof (znode_t), 0, zfs_znode_cache_constructor,
239 zfs_znode_cache_destructor, NULL, NULL, NULL, 0);
240 }
241
242 static znode_t *
zfs_znode_alloc_kmem(int flags)243 zfs_znode_alloc_kmem(int flags)
244 {
245 return (kmem_cache_alloc(znode_cache, flags));
246 }
247
248 static void
zfs_znode_free_kmem(znode_t * zp)249 zfs_znode_free_kmem(znode_t *zp)
250 {
251 if (zp->z_xattr_cached) {
252 nvlist_free(zp->z_xattr_cached);
253 zp->z_xattr_cached = NULL;
254 }
255 kmem_cache_free(znode_cache, zp);
256 }
257 #endif
258
259 void
zfs_znode_fini(void)260 zfs_znode_fini(void)
261 {
262 /*
263 * Cleanup zcache
264 */
265 #ifdef _ZFS_USE_SMR
266 if (znode_uma_zone) {
267 uma_zdestroy(znode_uma_zone);
268 znode_uma_zone = NULL;
269 }
270 #else
271 if (znode_cache) {
272 kmem_cache_destroy(znode_cache);
273 znode_cache = NULL;
274 }
275 #endif
276 }
277
278
279 static int
zfs_create_share_dir(zfsvfs_t * zfsvfs,dmu_tx_t * tx)280 zfs_create_share_dir(zfsvfs_t *zfsvfs, dmu_tx_t *tx)
281 {
282 zfs_acl_ids_t acl_ids;
283 vattr_t vattr;
284 znode_t *sharezp;
285 znode_t *zp;
286 int error;
287
288 vattr.va_mask = AT_MODE|AT_UID|AT_GID;
289 vattr.va_type = VDIR;
290 vattr.va_mode = S_IFDIR|0555;
291 vattr.va_uid = crgetuid(kcred);
292 vattr.va_gid = crgetgid(kcred);
293
294 sharezp = zfs_znode_alloc_kmem(KM_SLEEP);
295 ASSERT(!POINTER_IS_VALID(sharezp->z_zfsvfs));
296 sharezp->z_unlinked = 0;
297 sharezp->z_atime_dirty = 0;
298 sharezp->z_zfsvfs = zfsvfs;
299 sharezp->z_is_sa = zfsvfs->z_use_sa;
300
301 VERIFY0(zfs_acl_ids_create(sharezp, IS_ROOT_NODE, &vattr,
302 kcred, NULL, &acl_ids, NULL));
303 zfs_mknode(sharezp, &vattr, tx, kcred, IS_ROOT_NODE, &zp, &acl_ids);
304 ASSERT3P(zp, ==, sharezp);
305 POINTER_INVALIDATE(&sharezp->z_zfsvfs);
306 error = zap_add(zfsvfs->z_os, MASTER_NODE_OBJ,
307 ZFS_SHARES_DIR, 8, 1, &sharezp->z_id, tx);
308 zfsvfs->z_shares_dir = sharezp->z_id;
309
310 zfs_acl_ids_free(&acl_ids);
311 sa_handle_destroy(sharezp->z_sa_hdl);
312 zfs_znode_free_kmem(sharezp);
313
314 return (error);
315 }
316
317 /*
318 * define a couple of values we need available
319 * for both 64 and 32 bit environments.
320 */
321 #ifndef NBITSMINOR64
322 #define NBITSMINOR64 32
323 #endif
324 #ifndef MAXMAJ64
325 #define MAXMAJ64 0xffffffffUL
326 #endif
327 #ifndef MAXMIN64
328 #define MAXMIN64 0xffffffffUL
329 #endif
330
331 /*
332 * Create special expldev for ZFS private use.
333 * Can't use standard expldev since it doesn't do
334 * what we want. The standard expldev() takes a
335 * dev32_t in LP64 and expands it to a long dev_t.
336 * We need an interface that takes a dev32_t in ILP32
337 * and expands it to a long dev_t.
338 */
339 static uint64_t
zfs_expldev(dev_t dev)340 zfs_expldev(dev_t dev)
341 {
342 return (((uint64_t)major(dev) << NBITSMINOR64) | minor(dev));
343 }
344 /*
345 * Special cmpldev for ZFS private use.
346 * Can't use standard cmpldev since it takes
347 * a long dev_t and compresses it to dev32_t in
348 * LP64. We need to do a compaction of a long dev_t
349 * to a dev32_t in ILP32.
350 */
351 dev_t
zfs_cmpldev(uint64_t dev)352 zfs_cmpldev(uint64_t dev)
353 {
354 return (makedev((dev >> NBITSMINOR64), (dev & MAXMIN64)));
355 }
356
357 static void
zfs_znode_sa_init(zfsvfs_t * zfsvfs,znode_t * zp,dmu_buf_t * db,dmu_object_type_t obj_type,sa_handle_t * sa_hdl)358 zfs_znode_sa_init(zfsvfs_t *zfsvfs, znode_t *zp,
359 dmu_buf_t *db, dmu_object_type_t obj_type, sa_handle_t *sa_hdl)
360 {
361 ASSERT(!POINTER_IS_VALID(zp->z_zfsvfs) || (zfsvfs == zp->z_zfsvfs));
362 ASSERT(MUTEX_HELD(ZFS_OBJ_MUTEX(zfsvfs, zp->z_id)));
363
364 ASSERT3P(zp->z_sa_hdl, ==, NULL);
365 ASSERT3P(zp->z_acl_cached, ==, NULL);
366 if (sa_hdl == NULL) {
367 VERIFY0(sa_handle_get_from_db(zfsvfs->z_os, db, zp,
368 SA_HDL_SHARED, &zp->z_sa_hdl));
369 } else {
370 zp->z_sa_hdl = sa_hdl;
371 sa_set_userp(sa_hdl, zp);
372 }
373
374 zp->z_is_sa = (obj_type == DMU_OT_SA) ? B_TRUE : B_FALSE;
375
376 /*
377 * Slap on VROOT if we are the root znode unless we are the root
378 * node of a snapshot mounted under .zfs.
379 */
380 if (zp->z_id == zfsvfs->z_root && zfsvfs->z_parent == zfsvfs)
381 ZTOV(zp)->v_flag |= VROOT;
382
383 vn_exists(ZTOV(zp));
384 }
385
386 void
zfs_znode_dmu_fini(znode_t * zp)387 zfs_znode_dmu_fini(znode_t *zp)
388 {
389 ASSERT(MUTEX_HELD(ZFS_OBJ_MUTEX(zp->z_zfsvfs, zp->z_id)) ||
390 ZFS_TEARDOWN_INACTIVE_WRITE_HELD(zp->z_zfsvfs));
391
392 sa_handle_destroy(zp->z_sa_hdl);
393 zp->z_sa_hdl = NULL;
394 }
395
396 static void
zfs_vnode_forget(vnode_t * vp)397 zfs_vnode_forget(vnode_t *vp)
398 {
399
400 /* copied from insmntque_stddtr */
401 vp->v_data = NULL;
402 vp->v_op = &dead_vnodeops;
403 vgone(vp);
404 vput(vp);
405 }
406
407 /*
408 * Construct a new znode/vnode and initialize.
409 *
410 * This does not do a call to dmu_set_user() that is
411 * up to the caller to do, in case you don't want to
412 * return the znode
413 */
414 static znode_t *
zfs_znode_alloc(zfsvfs_t * zfsvfs,dmu_buf_t * db,int blksz,dmu_object_type_t obj_type,sa_handle_t * hdl)415 zfs_znode_alloc(zfsvfs_t *zfsvfs, dmu_buf_t *db, int blksz,
416 dmu_object_type_t obj_type, sa_handle_t *hdl)
417 {
418 znode_t *zp;
419 vnode_t *vp;
420 uint64_t mode;
421 uint64_t parent;
422 #ifdef notyet
423 uint64_t mtime[2], ctime[2];
424 #endif
425 uint64_t projid = ZFS_DEFAULT_PROJID;
426 sa_bulk_attr_t bulk[9];
427 int count = 0;
428 int error;
429
430 zp = zfs_znode_alloc_kmem(KM_SLEEP);
431
432 #ifndef _ZFS_USE_SMR
433 KASSERT((zfsvfs->z_parent->z_vfs->mnt_kern_flag & MNTK_FPLOOKUP) == 0,
434 ("%s: fast path lookup enabled without smr", __func__));
435 #endif
436
437 #if __FreeBSD_version >= 1300076
438 KASSERT(curthread->td_vp_reserved != NULL,
439 ("zfs_znode_alloc: getnewvnode without any vnodes reserved"));
440 #else
441 KASSERT(curthread->td_vp_reserv > 0,
442 ("zfs_znode_alloc: getnewvnode without any vnodes reserved"));
443 #endif
444 error = getnewvnode("zfs", zfsvfs->z_parent->z_vfs, &zfs_vnodeops, &vp);
445 if (error != 0) {
446 zfs_znode_free_kmem(zp);
447 return (NULL);
448 }
449 zp->z_vnode = vp;
450 vp->v_data = zp;
451
452 /*
453 * Acquire the vnode lock before any possible interaction with the
454 * outside world. Specifically, there is an error path that calls
455 * zfs_vnode_forget() and the vnode should be exclusively locked.
456 */
457 vn_lock(vp, LK_EXCLUSIVE | LK_RETRY);
458
459 ASSERT(!POINTER_IS_VALID(zp->z_zfsvfs));
460
461 zp->z_sa_hdl = NULL;
462 zp->z_unlinked = 0;
463 zp->z_atime_dirty = 0;
464 zp->z_mapcnt = 0;
465 zp->z_id = db->db_object;
466 zp->z_blksz = blksz;
467 zp->z_seq = 0x7A4653;
468 zp->z_sync_cnt = 0;
469 zp->z_sync_writes_cnt = 0;
470 zp->z_async_writes_cnt = 0;
471 #if __FreeBSD_version >= 1300139
472 atomic_store_ptr(&zp->z_cached_symlink, NULL);
473 #endif
474
475 zfs_znode_sa_init(zfsvfs, zp, db, obj_type, hdl);
476
477 SA_ADD_BULK_ATTR(bulk, count, SA_ZPL_MODE(zfsvfs), NULL, &mode, 8);
478 SA_ADD_BULK_ATTR(bulk, count, SA_ZPL_GEN(zfsvfs), NULL, &zp->z_gen, 8);
479 SA_ADD_BULK_ATTR(bulk, count, SA_ZPL_SIZE(zfsvfs), NULL,
480 &zp->z_size, 8);
481 SA_ADD_BULK_ATTR(bulk, count, SA_ZPL_LINKS(zfsvfs), NULL,
482 &zp->z_links, 8);
483 SA_ADD_BULK_ATTR(bulk, count, SA_ZPL_FLAGS(zfsvfs), NULL,
484 &zp->z_pflags, 8);
485 SA_ADD_BULK_ATTR(bulk, count, SA_ZPL_PARENT(zfsvfs), NULL, &parent, 8);
486 SA_ADD_BULK_ATTR(bulk, count, SA_ZPL_ATIME(zfsvfs), NULL,
487 &zp->z_atime, 16);
488 #ifdef notyet
489 SA_ADD_BULK_ATTR(bulk, count, SA_ZPL_MTIME(zfsvfs), NULL,
490 &mtime, 16);
491 SA_ADD_BULK_ATTR(bulk, count, SA_ZPL_CTIME(zfsvfs), NULL,
492 &ctime, 16);
493 #endif
494 SA_ADD_BULK_ATTR(bulk, count, SA_ZPL_UID(zfsvfs), NULL,
495 &zp->z_uid, 8);
496 SA_ADD_BULK_ATTR(bulk, count, SA_ZPL_GID(zfsvfs), NULL,
497 &zp->z_gid, 8);
498
499 if (sa_bulk_lookup(zp->z_sa_hdl, bulk, count) != 0 || zp->z_gen == 0 ||
500 (dmu_objset_projectquota_enabled(zfsvfs->z_os) &&
501 (zp->z_pflags & ZFS_PROJID) &&
502 sa_lookup(zp->z_sa_hdl, SA_ZPL_PROJID(zfsvfs), &projid, 8) != 0)) {
503 if (hdl == NULL)
504 sa_handle_destroy(zp->z_sa_hdl);
505 zfs_vnode_forget(vp);
506 zp->z_vnode = NULL;
507 zfs_znode_free_kmem(zp);
508 return (NULL);
509 }
510
511 zp->z_projid = projid;
512 zp->z_mode = mode;
513
514 /* Cache the xattr parent id */
515 if (zp->z_pflags & ZFS_XATTR)
516 zp->z_xattr_parent = parent;
517
518 vp->v_type = IFTOVT((mode_t)mode);
519
520 switch (vp->v_type) {
521 case VDIR:
522 zp->z_zn_prefetch = B_TRUE; /* z_prefetch default is enabled */
523 break;
524 case VFIFO:
525 vp->v_op = &zfs_fifoops;
526 break;
527 case VREG:
528 if (parent == zfsvfs->z_shares_dir) {
529 ASSERT0(zp->z_uid);
530 ASSERT0(zp->z_gid);
531 vp->v_op = &zfs_shareops;
532 }
533 break;
534 default:
535 break;
536 }
537
538 mutex_enter(&zfsvfs->z_znodes_lock);
539 list_insert_tail(&zfsvfs->z_all_znodes, zp);
540 zp->z_zfsvfs = zfsvfs;
541 mutex_exit(&zfsvfs->z_znodes_lock);
542
543 #if __FreeBSD_version >= 1400077
544 vn_set_state(vp, VSTATE_CONSTRUCTED);
545 #endif
546 VN_LOCK_AREC(vp);
547 if (vp->v_type != VFIFO)
548 VN_LOCK_ASHARE(vp);
549
550 return (zp);
551 }
552
553 static uint64_t empty_xattr;
554 static uint64_t pad[4];
555 static zfs_acl_phys_t acl_phys;
556 /*
557 * Create a new DMU object to hold a zfs znode.
558 *
559 * IN: dzp - parent directory for new znode
560 * vap - file attributes for new znode
561 * tx - dmu transaction id for zap operations
562 * cr - credentials of caller
563 * flag - flags:
564 * IS_ROOT_NODE - new object will be root
565 * IS_XATTR - new object is an attribute
566 * bonuslen - length of bonus buffer
567 * setaclp - File/Dir initial ACL
568 * fuidp - Tracks fuid allocation.
569 *
570 * OUT: zpp - allocated znode
571 *
572 */
573 void
zfs_mknode(znode_t * dzp,vattr_t * vap,dmu_tx_t * tx,cred_t * cr,uint_t flag,znode_t ** zpp,zfs_acl_ids_t * acl_ids)574 zfs_mknode(znode_t *dzp, vattr_t *vap, dmu_tx_t *tx, cred_t *cr,
575 uint_t flag, znode_t **zpp, zfs_acl_ids_t *acl_ids)
576 {
577 uint64_t crtime[2], atime[2], mtime[2], ctime[2];
578 uint64_t mode, size, links, parent, pflags;
579 uint64_t dzp_pflags = 0;
580 uint64_t rdev = 0;
581 zfsvfs_t *zfsvfs = dzp->z_zfsvfs;
582 dmu_buf_t *db;
583 timestruc_t now;
584 uint64_t gen, obj;
585 int bonuslen;
586 int dnodesize;
587 sa_handle_t *sa_hdl;
588 dmu_object_type_t obj_type;
589 sa_bulk_attr_t *sa_attrs;
590 int cnt = 0;
591 zfs_acl_locator_cb_t locate = { 0 };
592
593 ASSERT3P(vap, !=, NULL);
594 ASSERT3U((vap->va_mask & AT_MODE), ==, AT_MODE);
595
596 if (zfsvfs->z_replay) {
597 obj = vap->va_nodeid;
598 now = vap->va_ctime; /* see zfs_replay_create() */
599 gen = vap->va_nblocks; /* ditto */
600 dnodesize = vap->va_fsid; /* ditto */
601 } else {
602 obj = 0;
603 vfs_timestamp(&now);
604 gen = dmu_tx_get_txg(tx);
605 dnodesize = dmu_objset_dnodesize(zfsvfs->z_os);
606 }
607
608 if (dnodesize == 0)
609 dnodesize = DNODE_MIN_SIZE;
610
611 obj_type = zfsvfs->z_use_sa ? DMU_OT_SA : DMU_OT_ZNODE;
612 bonuslen = (obj_type == DMU_OT_SA) ?
613 DN_BONUS_SIZE(dnodesize) : ZFS_OLD_ZNODE_PHYS_SIZE;
614
615 /*
616 * Create a new DMU object.
617 */
618 /*
619 * There's currently no mechanism for pre-reading the blocks that will
620 * be needed to allocate a new object, so we accept the small chance
621 * that there will be an i/o error and we will fail one of the
622 * assertions below.
623 */
624 if (vap->va_type == VDIR) {
625 if (zfsvfs->z_replay) {
626 VERIFY0(zap_create_claim_norm_dnsize(zfsvfs->z_os, obj,
627 zfsvfs->z_norm, DMU_OT_DIRECTORY_CONTENTS,
628 obj_type, bonuslen, dnodesize, tx));
629 } else {
630 obj = zap_create_norm_dnsize(zfsvfs->z_os,
631 zfsvfs->z_norm, DMU_OT_DIRECTORY_CONTENTS,
632 obj_type, bonuslen, dnodesize, tx);
633 }
634 } else {
635 if (zfsvfs->z_replay) {
636 VERIFY0(dmu_object_claim_dnsize(zfsvfs->z_os, obj,
637 DMU_OT_PLAIN_FILE_CONTENTS, 0,
638 obj_type, bonuslen, dnodesize, tx));
639 } else {
640 obj = dmu_object_alloc_dnsize(zfsvfs->z_os,
641 DMU_OT_PLAIN_FILE_CONTENTS, 0,
642 obj_type, bonuslen, dnodesize, tx);
643 }
644 }
645
646 ZFS_OBJ_HOLD_ENTER(zfsvfs, obj);
647 VERIFY0(sa_buf_hold(zfsvfs->z_os, obj, NULL, &db));
648
649 /*
650 * If this is the root, fix up the half-initialized parent pointer
651 * to reference the just-allocated physical data area.
652 */
653 if (flag & IS_ROOT_NODE) {
654 dzp->z_id = obj;
655 } else {
656 dzp_pflags = dzp->z_pflags;
657 }
658
659 /*
660 * If parent is an xattr, so am I.
661 */
662 if (dzp_pflags & ZFS_XATTR) {
663 flag |= IS_XATTR;
664 }
665
666 if (zfsvfs->z_use_fuids)
667 pflags = ZFS_ARCHIVE | ZFS_AV_MODIFIED;
668 else
669 pflags = 0;
670
671 if (vap->va_type == VDIR) {
672 size = 2; /* contents ("." and "..") */
673 links = (flag & (IS_ROOT_NODE | IS_XATTR)) ? 2 : 1;
674 } else {
675 size = links = 0;
676 }
677
678 if (vap->va_type == VBLK || vap->va_type == VCHR) {
679 rdev = zfs_expldev(vap->va_rdev);
680 }
681
682 parent = dzp->z_id;
683 mode = acl_ids->z_mode;
684 if (flag & IS_XATTR)
685 pflags |= ZFS_XATTR;
686
687 /*
688 * No execs denied will be determined when zfs_mode_compute() is called.
689 */
690 pflags |= acl_ids->z_aclp->z_hints &
691 (ZFS_ACL_TRIVIAL|ZFS_INHERIT_ACE|ZFS_ACL_AUTO_INHERIT|
692 ZFS_ACL_DEFAULTED|ZFS_ACL_PROTECTED);
693
694 ZFS_TIME_ENCODE(&now, crtime);
695 ZFS_TIME_ENCODE(&now, ctime);
696
697 if (vap->va_mask & AT_ATIME) {
698 ZFS_TIME_ENCODE(&vap->va_atime, atime);
699 } else {
700 ZFS_TIME_ENCODE(&now, atime);
701 }
702
703 if (vap->va_mask & AT_MTIME) {
704 ZFS_TIME_ENCODE(&vap->va_mtime, mtime);
705 } else {
706 ZFS_TIME_ENCODE(&now, mtime);
707 }
708
709 /* Now add in all of the "SA" attributes */
710 VERIFY0(sa_handle_get_from_db(zfsvfs->z_os, db, NULL, SA_HDL_SHARED,
711 &sa_hdl));
712
713 /*
714 * Setup the array of attributes to be replaced/set on the new file
715 *
716 * order for DMU_OT_ZNODE is critical since it needs to be constructed
717 * in the old znode_phys_t format. Don't change this ordering
718 */
719 sa_attrs = kmem_alloc(sizeof (sa_bulk_attr_t) * ZPL_END, KM_SLEEP);
720
721 if (obj_type == DMU_OT_ZNODE) {
722 SA_ADD_BULK_ATTR(sa_attrs, cnt, SA_ZPL_ATIME(zfsvfs),
723 NULL, &atime, 16);
724 SA_ADD_BULK_ATTR(sa_attrs, cnt, SA_ZPL_MTIME(zfsvfs),
725 NULL, &mtime, 16);
726 SA_ADD_BULK_ATTR(sa_attrs, cnt, SA_ZPL_CTIME(zfsvfs),
727 NULL, &ctime, 16);
728 SA_ADD_BULK_ATTR(sa_attrs, cnt, SA_ZPL_CRTIME(zfsvfs),
729 NULL, &crtime, 16);
730 SA_ADD_BULK_ATTR(sa_attrs, cnt, SA_ZPL_GEN(zfsvfs),
731 NULL, &gen, 8);
732 SA_ADD_BULK_ATTR(sa_attrs, cnt, SA_ZPL_MODE(zfsvfs),
733 NULL, &mode, 8);
734 SA_ADD_BULK_ATTR(sa_attrs, cnt, SA_ZPL_SIZE(zfsvfs),
735 NULL, &size, 8);
736 SA_ADD_BULK_ATTR(sa_attrs, cnt, SA_ZPL_PARENT(zfsvfs),
737 NULL, &parent, 8);
738 } else {
739 SA_ADD_BULK_ATTR(sa_attrs, cnt, SA_ZPL_MODE(zfsvfs),
740 NULL, &mode, 8);
741 SA_ADD_BULK_ATTR(sa_attrs, cnt, SA_ZPL_SIZE(zfsvfs),
742 NULL, &size, 8);
743 SA_ADD_BULK_ATTR(sa_attrs, cnt, SA_ZPL_GEN(zfsvfs),
744 NULL, &gen, 8);
745 SA_ADD_BULK_ATTR(sa_attrs, cnt, SA_ZPL_UID(zfsvfs),
746 NULL, &acl_ids->z_fuid, 8);
747 SA_ADD_BULK_ATTR(sa_attrs, cnt, SA_ZPL_GID(zfsvfs),
748 NULL, &acl_ids->z_fgid, 8);
749 SA_ADD_BULK_ATTR(sa_attrs, cnt, SA_ZPL_PARENT(zfsvfs),
750 NULL, &parent, 8);
751 SA_ADD_BULK_ATTR(sa_attrs, cnt, SA_ZPL_FLAGS(zfsvfs),
752 NULL, &pflags, 8);
753 SA_ADD_BULK_ATTR(sa_attrs, cnt, SA_ZPL_ATIME(zfsvfs),
754 NULL, &atime, 16);
755 SA_ADD_BULK_ATTR(sa_attrs, cnt, SA_ZPL_MTIME(zfsvfs),
756 NULL, &mtime, 16);
757 SA_ADD_BULK_ATTR(sa_attrs, cnt, SA_ZPL_CTIME(zfsvfs),
758 NULL, &ctime, 16);
759 SA_ADD_BULK_ATTR(sa_attrs, cnt, SA_ZPL_CRTIME(zfsvfs),
760 NULL, &crtime, 16);
761 }
762
763 SA_ADD_BULK_ATTR(sa_attrs, cnt, SA_ZPL_LINKS(zfsvfs), NULL, &links, 8);
764
765 if (obj_type == DMU_OT_ZNODE) {
766 SA_ADD_BULK_ATTR(sa_attrs, cnt, SA_ZPL_XATTR(zfsvfs), NULL,
767 &empty_xattr, 8);
768 }
769 if (obj_type == DMU_OT_ZNODE ||
770 (vap->va_type == VBLK || vap->va_type == VCHR)) {
771 SA_ADD_BULK_ATTR(sa_attrs, cnt, SA_ZPL_RDEV(zfsvfs),
772 NULL, &rdev, 8);
773
774 }
775 if (obj_type == DMU_OT_ZNODE) {
776 SA_ADD_BULK_ATTR(sa_attrs, cnt, SA_ZPL_FLAGS(zfsvfs),
777 NULL, &pflags, 8);
778 SA_ADD_BULK_ATTR(sa_attrs, cnt, SA_ZPL_UID(zfsvfs), NULL,
779 &acl_ids->z_fuid, 8);
780 SA_ADD_BULK_ATTR(sa_attrs, cnt, SA_ZPL_GID(zfsvfs), NULL,
781 &acl_ids->z_fgid, 8);
782 SA_ADD_BULK_ATTR(sa_attrs, cnt, SA_ZPL_PAD(zfsvfs), NULL, pad,
783 sizeof (uint64_t) * 4);
784 SA_ADD_BULK_ATTR(sa_attrs, cnt, SA_ZPL_ZNODE_ACL(zfsvfs), NULL,
785 &acl_phys, sizeof (zfs_acl_phys_t));
786 } else if (acl_ids->z_aclp->z_version >= ZFS_ACL_VERSION_FUID) {
787 SA_ADD_BULK_ATTR(sa_attrs, cnt, SA_ZPL_DACL_COUNT(zfsvfs), NULL,
788 &acl_ids->z_aclp->z_acl_count, 8);
789 locate.cb_aclp = acl_ids->z_aclp;
790 SA_ADD_BULK_ATTR(sa_attrs, cnt, SA_ZPL_DACL_ACES(zfsvfs),
791 zfs_acl_data_locator, &locate,
792 acl_ids->z_aclp->z_acl_bytes);
793 mode = zfs_mode_compute(mode, acl_ids->z_aclp, &pflags,
794 acl_ids->z_fuid, acl_ids->z_fgid);
795 }
796
797 VERIFY0(sa_replace_all_by_template(sa_hdl, sa_attrs, cnt, tx));
798
799 if (!(flag & IS_ROOT_NODE)) {
800 *zpp = zfs_znode_alloc(zfsvfs, db, 0, obj_type, sa_hdl);
801 ASSERT3P(*zpp, !=, NULL);
802 } else {
803 /*
804 * If we are creating the root node, the "parent" we
805 * passed in is the znode for the root.
806 */
807 *zpp = dzp;
808
809 (*zpp)->z_sa_hdl = sa_hdl;
810 }
811
812 (*zpp)->z_pflags = pflags;
813 (*zpp)->z_mode = mode;
814 (*zpp)->z_dnodesize = dnodesize;
815
816 if (vap->va_mask & AT_XVATTR)
817 zfs_xvattr_set(*zpp, (xvattr_t *)vap, tx);
818
819 if (obj_type == DMU_OT_ZNODE ||
820 acl_ids->z_aclp->z_version < ZFS_ACL_VERSION_FUID) {
821 VERIFY0(zfs_aclset_common(*zpp, acl_ids->z_aclp, cr, tx));
822 }
823 if (!(flag & IS_ROOT_NODE)) {
824 vnode_t *vp = ZTOV(*zpp);
825 vp->v_vflag |= VV_FORCEINSMQ;
826 int err = insmntque(vp, zfsvfs->z_vfs);
827 vp->v_vflag &= ~VV_FORCEINSMQ;
828 (void) err;
829 KASSERT(err == 0, ("insmntque() failed: error %d", err));
830 }
831 kmem_free(sa_attrs, sizeof (sa_bulk_attr_t) * ZPL_END);
832 ZFS_OBJ_HOLD_EXIT(zfsvfs, obj);
833 }
834
835 /*
836 * Update in-core attributes. It is assumed the caller will be doing an
837 * sa_bulk_update to push the changes out.
838 */
839 void
zfs_xvattr_set(znode_t * zp,xvattr_t * xvap,dmu_tx_t * tx)840 zfs_xvattr_set(znode_t *zp, xvattr_t *xvap, dmu_tx_t *tx)
841 {
842 xoptattr_t *xoap;
843
844 xoap = xva_getxoptattr(xvap);
845 ASSERT3P(xoap, !=, NULL);
846
847 if (zp->z_zfsvfs->z_replay == B_FALSE) {
848 ASSERT_VOP_IN_SEQC(ZTOV(zp));
849 }
850
851 if (XVA_ISSET_REQ(xvap, XAT_CREATETIME)) {
852 uint64_t times[2];
853 ZFS_TIME_ENCODE(&xoap->xoa_createtime, times);
854 (void) sa_update(zp->z_sa_hdl, SA_ZPL_CRTIME(zp->z_zfsvfs),
855 ×, sizeof (times), tx);
856 XVA_SET_RTN(xvap, XAT_CREATETIME);
857 }
858 if (XVA_ISSET_REQ(xvap, XAT_READONLY)) {
859 ZFS_ATTR_SET(zp, ZFS_READONLY, xoap->xoa_readonly,
860 zp->z_pflags, tx);
861 XVA_SET_RTN(xvap, XAT_READONLY);
862 }
863 if (XVA_ISSET_REQ(xvap, XAT_HIDDEN)) {
864 ZFS_ATTR_SET(zp, ZFS_HIDDEN, xoap->xoa_hidden,
865 zp->z_pflags, tx);
866 XVA_SET_RTN(xvap, XAT_HIDDEN);
867 }
868 if (XVA_ISSET_REQ(xvap, XAT_SYSTEM)) {
869 ZFS_ATTR_SET(zp, ZFS_SYSTEM, xoap->xoa_system,
870 zp->z_pflags, tx);
871 XVA_SET_RTN(xvap, XAT_SYSTEM);
872 }
873 if (XVA_ISSET_REQ(xvap, XAT_ARCHIVE)) {
874 ZFS_ATTR_SET(zp, ZFS_ARCHIVE, xoap->xoa_archive,
875 zp->z_pflags, tx);
876 XVA_SET_RTN(xvap, XAT_ARCHIVE);
877 }
878 if (XVA_ISSET_REQ(xvap, XAT_IMMUTABLE)) {
879 ZFS_ATTR_SET(zp, ZFS_IMMUTABLE, xoap->xoa_immutable,
880 zp->z_pflags, tx);
881 XVA_SET_RTN(xvap, XAT_IMMUTABLE);
882 }
883 if (XVA_ISSET_REQ(xvap, XAT_NOUNLINK)) {
884 ZFS_ATTR_SET(zp, ZFS_NOUNLINK, xoap->xoa_nounlink,
885 zp->z_pflags, tx);
886 XVA_SET_RTN(xvap, XAT_NOUNLINK);
887 }
888 if (XVA_ISSET_REQ(xvap, XAT_APPENDONLY)) {
889 ZFS_ATTR_SET(zp, ZFS_APPENDONLY, xoap->xoa_appendonly,
890 zp->z_pflags, tx);
891 XVA_SET_RTN(xvap, XAT_APPENDONLY);
892 }
893 if (XVA_ISSET_REQ(xvap, XAT_NODUMP)) {
894 ZFS_ATTR_SET(zp, ZFS_NODUMP, xoap->xoa_nodump,
895 zp->z_pflags, tx);
896 XVA_SET_RTN(xvap, XAT_NODUMP);
897 }
898 if (XVA_ISSET_REQ(xvap, XAT_OPAQUE)) {
899 ZFS_ATTR_SET(zp, ZFS_OPAQUE, xoap->xoa_opaque,
900 zp->z_pflags, tx);
901 XVA_SET_RTN(xvap, XAT_OPAQUE);
902 }
903 if (XVA_ISSET_REQ(xvap, XAT_AV_QUARANTINED)) {
904 ZFS_ATTR_SET(zp, ZFS_AV_QUARANTINED,
905 xoap->xoa_av_quarantined, zp->z_pflags, tx);
906 XVA_SET_RTN(xvap, XAT_AV_QUARANTINED);
907 }
908 if (XVA_ISSET_REQ(xvap, XAT_AV_MODIFIED)) {
909 ZFS_ATTR_SET(zp, ZFS_AV_MODIFIED, xoap->xoa_av_modified,
910 zp->z_pflags, tx);
911 XVA_SET_RTN(xvap, XAT_AV_MODIFIED);
912 }
913 if (XVA_ISSET_REQ(xvap, XAT_AV_SCANSTAMP)) {
914 zfs_sa_set_scanstamp(zp, xvap, tx);
915 XVA_SET_RTN(xvap, XAT_AV_SCANSTAMP);
916 }
917 if (XVA_ISSET_REQ(xvap, XAT_REPARSE)) {
918 ZFS_ATTR_SET(zp, ZFS_REPARSE, xoap->xoa_reparse,
919 zp->z_pflags, tx);
920 XVA_SET_RTN(xvap, XAT_REPARSE);
921 }
922 if (XVA_ISSET_REQ(xvap, XAT_OFFLINE)) {
923 ZFS_ATTR_SET(zp, ZFS_OFFLINE, xoap->xoa_offline,
924 zp->z_pflags, tx);
925 XVA_SET_RTN(xvap, XAT_OFFLINE);
926 }
927 if (XVA_ISSET_REQ(xvap, XAT_SPARSE)) {
928 ZFS_ATTR_SET(zp, ZFS_SPARSE, xoap->xoa_sparse,
929 zp->z_pflags, tx);
930 XVA_SET_RTN(xvap, XAT_SPARSE);
931 }
932 }
933
934 int
zfs_zget(zfsvfs_t * zfsvfs,uint64_t obj_num,znode_t ** zpp)935 zfs_zget(zfsvfs_t *zfsvfs, uint64_t obj_num, znode_t **zpp)
936 {
937 dmu_object_info_t doi;
938 dmu_buf_t *db;
939 znode_t *zp;
940 vnode_t *vp;
941 sa_handle_t *hdl;
942 int locked;
943 int err;
944
945 getnewvnode_reserve_();
946 again:
947 *zpp = NULL;
948 ZFS_OBJ_HOLD_ENTER(zfsvfs, obj_num);
949
950 err = sa_buf_hold(zfsvfs->z_os, obj_num, NULL, &db);
951 if (err) {
952 ZFS_OBJ_HOLD_EXIT(zfsvfs, obj_num);
953 getnewvnode_drop_reserve();
954 return (err);
955 }
956
957 dmu_object_info_from_db(db, &doi);
958 if (doi.doi_bonus_type != DMU_OT_SA &&
959 (doi.doi_bonus_type != DMU_OT_ZNODE ||
960 (doi.doi_bonus_type == DMU_OT_ZNODE &&
961 doi.doi_bonus_size < sizeof (znode_phys_t)))) {
962 sa_buf_rele(db, NULL);
963 ZFS_OBJ_HOLD_EXIT(zfsvfs, obj_num);
964 getnewvnode_drop_reserve();
965 return (SET_ERROR(EINVAL));
966 }
967
968 hdl = dmu_buf_get_user(db);
969 if (hdl != NULL) {
970 zp = sa_get_userdata(hdl);
971
972 /*
973 * Since "SA" does immediate eviction we
974 * should never find a sa handle that doesn't
975 * know about the znode.
976 */
977 ASSERT3P(zp, !=, NULL);
978 ASSERT3U(zp->z_id, ==, obj_num);
979 if (zp->z_unlinked) {
980 err = SET_ERROR(ENOENT);
981 } else {
982 vp = ZTOV(zp);
983 /*
984 * Don't let the vnode disappear after
985 * ZFS_OBJ_HOLD_EXIT.
986 */
987 VN_HOLD(vp);
988 *zpp = zp;
989 err = 0;
990 }
991
992 sa_buf_rele(db, NULL);
993 ZFS_OBJ_HOLD_EXIT(zfsvfs, obj_num);
994
995 if (err) {
996 getnewvnode_drop_reserve();
997 return (err);
998 }
999
1000 locked = VOP_ISLOCKED(vp);
1001 VI_LOCK(vp);
1002 if (VN_IS_DOOMED(vp) && locked != LK_EXCLUSIVE) {
1003 /*
1004 * The vnode is doomed and this thread doesn't
1005 * hold the exclusive lock on it, so the vnode
1006 * must be being reclaimed by another thread.
1007 * Otherwise the doomed vnode is being reclaimed
1008 * by this thread and zfs_zget is called from
1009 * ZIL internals.
1010 */
1011 VI_UNLOCK(vp);
1012
1013 /*
1014 * XXX vrele() locks the vnode when the last reference
1015 * is dropped. Although in this case the vnode is
1016 * doomed / dead and so no inactivation is required,
1017 * the vnode lock is still acquired. That could result
1018 * in a LOR with z_teardown_lock if another thread holds
1019 * the vnode's lock and tries to take z_teardown_lock.
1020 * But that is only possible if the other thread peforms
1021 * a ZFS vnode operation on the vnode. That either
1022 * should not happen if the vnode is dead or the thread
1023 * should also have a reference to the vnode and thus
1024 * our reference is not last.
1025 */
1026 VN_RELE(vp);
1027 goto again;
1028 }
1029 VI_UNLOCK(vp);
1030 getnewvnode_drop_reserve();
1031 return (err);
1032 }
1033
1034 /*
1035 * Not found create new znode/vnode
1036 * but only if file exists.
1037 *
1038 * There is a small window where zfs_vget() could
1039 * find this object while a file create is still in
1040 * progress. This is checked for in zfs_znode_alloc()
1041 *
1042 * if zfs_znode_alloc() fails it will drop the hold on the
1043 * bonus buffer.
1044 */
1045 zp = zfs_znode_alloc(zfsvfs, db, doi.doi_data_block_size,
1046 doi.doi_bonus_type, NULL);
1047 if (zp == NULL) {
1048 err = SET_ERROR(ENOENT);
1049 } else {
1050 *zpp = zp;
1051 }
1052 if (err == 0) {
1053 vnode_t *vp = ZTOV(zp);
1054
1055 err = insmntque(vp, zfsvfs->z_vfs);
1056 if (err == 0) {
1057 vp->v_hash = obj_num;
1058 VOP_UNLOCK1(vp);
1059 } else {
1060 zp->z_vnode = NULL;
1061 zfs_znode_dmu_fini(zp);
1062 zfs_znode_free(zp);
1063 *zpp = NULL;
1064 }
1065 }
1066 ZFS_OBJ_HOLD_EXIT(zfsvfs, obj_num);
1067 getnewvnode_drop_reserve();
1068 return (err);
1069 }
1070
1071 int
zfs_rezget(znode_t * zp)1072 zfs_rezget(znode_t *zp)
1073 {
1074 zfsvfs_t *zfsvfs = zp->z_zfsvfs;
1075 dmu_object_info_t doi;
1076 dmu_buf_t *db;
1077 vnode_t *vp;
1078 uint64_t obj_num = zp->z_id;
1079 uint64_t mode, size;
1080 sa_bulk_attr_t bulk[8];
1081 int err;
1082 int count = 0;
1083 uint64_t gen;
1084
1085 /*
1086 * Remove cached pages before reloading the znode, so that they are not
1087 * lingering after we run into any error. Ideally, we should vgone()
1088 * the vnode in case of error, but currently we cannot do that
1089 * because of the LOR between the vnode lock and z_teardown_lock.
1090 * So, instead, we have to "doom" the znode in the illumos style.
1091 *
1092 * Ignore invalid pages during the scan. This is to avoid deadlocks
1093 * between page busying and the teardown lock, as pages are busied prior
1094 * to a VOP_GETPAGES operation, which acquires the teardown read lock.
1095 * Such pages will be invalid and can safely be skipped here.
1096 */
1097 vp = ZTOV(zp);
1098 #if __FreeBSD_version >= 1400042
1099 vn_pages_remove_valid(vp, 0, 0);
1100 #else
1101 vn_pages_remove(vp, 0, 0);
1102 #endif
1103
1104 ZFS_OBJ_HOLD_ENTER(zfsvfs, obj_num);
1105
1106 mutex_enter(&zp->z_acl_lock);
1107 if (zp->z_acl_cached) {
1108 zfs_acl_free(zp->z_acl_cached);
1109 zp->z_acl_cached = NULL;
1110 }
1111 mutex_exit(&zp->z_acl_lock);
1112
1113 rw_enter(&zp->z_xattr_lock, RW_WRITER);
1114 if (zp->z_xattr_cached) {
1115 nvlist_free(zp->z_xattr_cached);
1116 zp->z_xattr_cached = NULL;
1117 }
1118 rw_exit(&zp->z_xattr_lock);
1119
1120 ASSERT3P(zp->z_sa_hdl, ==, NULL);
1121 err = sa_buf_hold(zfsvfs->z_os, obj_num, NULL, &db);
1122 if (err) {
1123 ZFS_OBJ_HOLD_EXIT(zfsvfs, obj_num);
1124 return (err);
1125 }
1126
1127 dmu_object_info_from_db(db, &doi);
1128 if (doi.doi_bonus_type != DMU_OT_SA &&
1129 (doi.doi_bonus_type != DMU_OT_ZNODE ||
1130 (doi.doi_bonus_type == DMU_OT_ZNODE &&
1131 doi.doi_bonus_size < sizeof (znode_phys_t)))) {
1132 sa_buf_rele(db, NULL);
1133 ZFS_OBJ_HOLD_EXIT(zfsvfs, obj_num);
1134 return (SET_ERROR(EINVAL));
1135 }
1136
1137 zfs_znode_sa_init(zfsvfs, zp, db, doi.doi_bonus_type, NULL);
1138 size = zp->z_size;
1139
1140 /* reload cached values */
1141 SA_ADD_BULK_ATTR(bulk, count, SA_ZPL_GEN(zfsvfs), NULL,
1142 &gen, sizeof (gen));
1143 SA_ADD_BULK_ATTR(bulk, count, SA_ZPL_SIZE(zfsvfs), NULL,
1144 &zp->z_size, sizeof (zp->z_size));
1145 SA_ADD_BULK_ATTR(bulk, count, SA_ZPL_LINKS(zfsvfs), NULL,
1146 &zp->z_links, sizeof (zp->z_links));
1147 SA_ADD_BULK_ATTR(bulk, count, SA_ZPL_FLAGS(zfsvfs), NULL,
1148 &zp->z_pflags, sizeof (zp->z_pflags));
1149 SA_ADD_BULK_ATTR(bulk, count, SA_ZPL_ATIME(zfsvfs), NULL,
1150 &zp->z_atime, sizeof (zp->z_atime));
1151 SA_ADD_BULK_ATTR(bulk, count, SA_ZPL_UID(zfsvfs), NULL,
1152 &zp->z_uid, sizeof (zp->z_uid));
1153 SA_ADD_BULK_ATTR(bulk, count, SA_ZPL_GID(zfsvfs), NULL,
1154 &zp->z_gid, sizeof (zp->z_gid));
1155 SA_ADD_BULK_ATTR(bulk, count, SA_ZPL_MODE(zfsvfs), NULL,
1156 &mode, sizeof (mode));
1157
1158 if (sa_bulk_lookup(zp->z_sa_hdl, bulk, count)) {
1159 zfs_znode_dmu_fini(zp);
1160 ZFS_OBJ_HOLD_EXIT(zfsvfs, obj_num);
1161 return (SET_ERROR(EIO));
1162 }
1163
1164 zp->z_mode = mode;
1165
1166 if (gen != zp->z_gen) {
1167 zfs_znode_dmu_fini(zp);
1168 ZFS_OBJ_HOLD_EXIT(zfsvfs, obj_num);
1169 return (SET_ERROR(EIO));
1170 }
1171
1172 /*
1173 * It is highly improbable but still quite possible that two
1174 * objects in different datasets are created with the same
1175 * object numbers and in transaction groups with the same
1176 * numbers. znodes corresponding to those objects would
1177 * have the same z_id and z_gen, but their other attributes
1178 * may be different.
1179 * zfs recv -F may replace one of such objects with the other.
1180 * As a result file properties recorded in the replaced
1181 * object's vnode may no longer match the received object's
1182 * properties. At present the only cached property is the
1183 * files type recorded in v_type.
1184 * So, handle this case by leaving the old vnode and znode
1185 * disassociated from the actual object. A new vnode and a
1186 * znode will be created if the object is accessed
1187 * (e.g. via a look-up). The old vnode and znode will be
1188 * recycled when the last vnode reference is dropped.
1189 */
1190 if (vp->v_type != IFTOVT((mode_t)zp->z_mode)) {
1191 zfs_znode_dmu_fini(zp);
1192 ZFS_OBJ_HOLD_EXIT(zfsvfs, obj_num);
1193 return (SET_ERROR(EIO));
1194 }
1195
1196 /*
1197 * If the file has zero links, then it has been unlinked on the send
1198 * side and it must be in the received unlinked set.
1199 * We call zfs_znode_dmu_fini() now to prevent any accesses to the
1200 * stale data and to prevent automatically removal of the file in
1201 * zfs_zinactive(). The file will be removed either when it is removed
1202 * on the send side and the next incremental stream is received or
1203 * when the unlinked set gets processed.
1204 */
1205 zp->z_unlinked = (zp->z_links == 0);
1206 if (zp->z_unlinked) {
1207 zfs_znode_dmu_fini(zp);
1208 ZFS_OBJ_HOLD_EXIT(zfsvfs, obj_num);
1209 return (0);
1210 }
1211
1212 zp->z_blksz = doi.doi_data_block_size;
1213 if (zp->z_size != size)
1214 vnode_pager_setsize(vp, zp->z_size);
1215
1216 ZFS_OBJ_HOLD_EXIT(zfsvfs, obj_num);
1217
1218 return (0);
1219 }
1220
1221 void
zfs_znode_delete(znode_t * zp,dmu_tx_t * tx)1222 zfs_znode_delete(znode_t *zp, dmu_tx_t *tx)
1223 {
1224 zfsvfs_t *zfsvfs = zp->z_zfsvfs;
1225 objset_t *os = zfsvfs->z_os;
1226 uint64_t obj = zp->z_id;
1227 uint64_t acl_obj = zfs_external_acl(zp);
1228
1229 ZFS_OBJ_HOLD_ENTER(zfsvfs, obj);
1230 if (acl_obj) {
1231 VERIFY(!zp->z_is_sa);
1232 VERIFY0(dmu_object_free(os, acl_obj, tx));
1233 }
1234 VERIFY0(dmu_object_free(os, obj, tx));
1235 zfs_znode_dmu_fini(zp);
1236 ZFS_OBJ_HOLD_EXIT(zfsvfs, obj);
1237 }
1238
1239 void
zfs_zinactive(znode_t * zp)1240 zfs_zinactive(znode_t *zp)
1241 {
1242 zfsvfs_t *zfsvfs = zp->z_zfsvfs;
1243 uint64_t z_id = zp->z_id;
1244
1245 ASSERT3P(zp->z_sa_hdl, !=, NULL);
1246
1247 /*
1248 * Don't allow a zfs_zget() while were trying to release this znode
1249 */
1250 ZFS_OBJ_HOLD_ENTER(zfsvfs, z_id);
1251
1252 /*
1253 * If this was the last reference to a file with no links, remove
1254 * the file from the file system unless the file system is mounted
1255 * read-only. That can happen, for example, if the file system was
1256 * originally read-write, the file was opened, then unlinked and
1257 * the file system was made read-only before the file was finally
1258 * closed. The file will remain in the unlinked set.
1259 */
1260 if (zp->z_unlinked) {
1261 ASSERT(!zfsvfs->z_issnap);
1262 if ((zfsvfs->z_vfs->vfs_flag & VFS_RDONLY) == 0) {
1263 ZFS_OBJ_HOLD_EXIT(zfsvfs, z_id);
1264 zfs_rmnode(zp);
1265 return;
1266 }
1267 }
1268
1269 zfs_znode_dmu_fini(zp);
1270 ZFS_OBJ_HOLD_EXIT(zfsvfs, z_id);
1271 zfs_znode_free(zp);
1272 }
1273
1274 void
zfs_znode_free(znode_t * zp)1275 zfs_znode_free(znode_t *zp)
1276 {
1277 zfsvfs_t *zfsvfs = zp->z_zfsvfs;
1278 #if __FreeBSD_version >= 1300139
1279 char *symlink;
1280 #endif
1281
1282 ASSERT3P(zp->z_sa_hdl, ==, NULL);
1283 zp->z_vnode = NULL;
1284 mutex_enter(&zfsvfs->z_znodes_lock);
1285 POINTER_INVALIDATE(&zp->z_zfsvfs);
1286 list_remove(&zfsvfs->z_all_znodes, zp);
1287 mutex_exit(&zfsvfs->z_znodes_lock);
1288
1289 #if __FreeBSD_version >= 1300139
1290 symlink = atomic_load_ptr(&zp->z_cached_symlink);
1291 if (symlink != NULL) {
1292 atomic_store_rel_ptr((uintptr_t *)&zp->z_cached_symlink,
1293 (uintptr_t)NULL);
1294 cache_symlink_free(symlink, strlen(symlink) + 1);
1295 }
1296 #endif
1297
1298 if (zp->z_acl_cached) {
1299 zfs_acl_free(zp->z_acl_cached);
1300 zp->z_acl_cached = NULL;
1301 }
1302
1303 zfs_znode_free_kmem(zp);
1304 }
1305
1306 void
zfs_tstamp_update_setup_ext(znode_t * zp,uint_t flag,uint64_t mtime[2],uint64_t ctime[2],boolean_t have_tx)1307 zfs_tstamp_update_setup_ext(znode_t *zp, uint_t flag, uint64_t mtime[2],
1308 uint64_t ctime[2], boolean_t have_tx)
1309 {
1310 timestruc_t now;
1311
1312 vfs_timestamp(&now);
1313
1314 if (have_tx) { /* will sa_bulk_update happen really soon? */
1315 zp->z_atime_dirty = 0;
1316 zp->z_seq++;
1317 } else {
1318 zp->z_atime_dirty = 1;
1319 }
1320
1321 if (flag & AT_ATIME) {
1322 ZFS_TIME_ENCODE(&now, zp->z_atime);
1323 }
1324
1325 if (flag & AT_MTIME) {
1326 ZFS_TIME_ENCODE(&now, mtime);
1327 if (zp->z_zfsvfs->z_use_fuids) {
1328 zp->z_pflags |= (ZFS_ARCHIVE |
1329 ZFS_AV_MODIFIED);
1330 }
1331 }
1332
1333 if (flag & AT_CTIME) {
1334 ZFS_TIME_ENCODE(&now, ctime);
1335 if (zp->z_zfsvfs->z_use_fuids)
1336 zp->z_pflags |= ZFS_ARCHIVE;
1337 }
1338 }
1339
1340
1341 void
zfs_tstamp_update_setup(znode_t * zp,uint_t flag,uint64_t mtime[2],uint64_t ctime[2])1342 zfs_tstamp_update_setup(znode_t *zp, uint_t flag, uint64_t mtime[2],
1343 uint64_t ctime[2])
1344 {
1345 zfs_tstamp_update_setup_ext(zp, flag, mtime, ctime, B_TRUE);
1346 }
1347 /*
1348 * Grow the block size for a file.
1349 *
1350 * IN: zp - znode of file to free data in.
1351 * size - requested block size
1352 * tx - open transaction.
1353 *
1354 * NOTE: this function assumes that the znode is write locked.
1355 */
1356 void
zfs_grow_blocksize(znode_t * zp,uint64_t size,dmu_tx_t * tx)1357 zfs_grow_blocksize(znode_t *zp, uint64_t size, dmu_tx_t *tx)
1358 {
1359 int error;
1360 u_longlong_t dummy;
1361
1362 if (size <= zp->z_blksz)
1363 return;
1364 /*
1365 * If the file size is already greater than the current blocksize,
1366 * we will not grow. If there is more than one block in a file,
1367 * the blocksize cannot change.
1368 */
1369 if (zp->z_blksz && zp->z_size > zp->z_blksz)
1370 return;
1371
1372 error = dmu_object_set_blocksize(zp->z_zfsvfs->z_os, zp->z_id,
1373 size, 0, tx);
1374
1375 if (error == ENOTSUP)
1376 return;
1377 ASSERT0(error);
1378
1379 /* What blocksize did we actually get? */
1380 dmu_object_size_from_db(sa_get_db(zp->z_sa_hdl), &zp->z_blksz, &dummy);
1381 }
1382
1383 /*
1384 * Increase the file length
1385 *
1386 * IN: zp - znode of file to free data in.
1387 * end - new end-of-file
1388 *
1389 * RETURN: 0 on success, error code on failure
1390 */
1391 static int
zfs_extend(znode_t * zp,uint64_t end)1392 zfs_extend(znode_t *zp, uint64_t end)
1393 {
1394 zfsvfs_t *zfsvfs = zp->z_zfsvfs;
1395 dmu_tx_t *tx;
1396 zfs_locked_range_t *lr;
1397 uint64_t newblksz;
1398 int error;
1399
1400 /*
1401 * We will change zp_size, lock the whole file.
1402 */
1403 lr = zfs_rangelock_enter(&zp->z_rangelock, 0, UINT64_MAX, RL_WRITER);
1404
1405 /*
1406 * Nothing to do if file already at desired length.
1407 */
1408 if (end <= zp->z_size) {
1409 zfs_rangelock_exit(lr);
1410 return (0);
1411 }
1412 tx = dmu_tx_create(zfsvfs->z_os);
1413 dmu_tx_hold_sa(tx, zp->z_sa_hdl, B_FALSE);
1414 zfs_sa_upgrade_txholds(tx, zp);
1415 if (end > zp->z_blksz &&
1416 (!ISP2(zp->z_blksz) || zp->z_blksz < zfsvfs->z_max_blksz)) {
1417 /*
1418 * We are growing the file past the current block size.
1419 */
1420 if (zp->z_blksz > zp->z_zfsvfs->z_max_blksz) {
1421 /*
1422 * File's blocksize is already larger than the
1423 * "recordsize" property. Only let it grow to
1424 * the next power of 2.
1425 */
1426 ASSERT(!ISP2(zp->z_blksz));
1427 newblksz = MIN(end, 1 << highbit64(zp->z_blksz));
1428 } else {
1429 newblksz = MIN(end, zp->z_zfsvfs->z_max_blksz);
1430 }
1431 dmu_tx_hold_write(tx, zp->z_id, 0, newblksz);
1432 } else {
1433 newblksz = 0;
1434 }
1435
1436 error = dmu_tx_assign(tx, TXG_WAIT);
1437 if (error) {
1438 dmu_tx_abort(tx);
1439 zfs_rangelock_exit(lr);
1440 return (error);
1441 }
1442
1443 if (newblksz)
1444 zfs_grow_blocksize(zp, newblksz, tx);
1445
1446 zp->z_size = end;
1447
1448 VERIFY0(sa_update(zp->z_sa_hdl, SA_ZPL_SIZE(zp->z_zfsvfs),
1449 &zp->z_size, sizeof (zp->z_size), tx));
1450
1451 vnode_pager_setsize(ZTOV(zp), end);
1452
1453 zfs_rangelock_exit(lr);
1454
1455 dmu_tx_commit(tx);
1456
1457 return (0);
1458 }
1459
1460 /*
1461 * Free space in a file.
1462 *
1463 * IN: zp - znode of file to free data in.
1464 * off - start of section to free.
1465 * len - length of section to free.
1466 *
1467 * RETURN: 0 on success, error code on failure
1468 */
1469 static int
zfs_free_range(znode_t * zp,uint64_t off,uint64_t len)1470 zfs_free_range(znode_t *zp, uint64_t off, uint64_t len)
1471 {
1472 zfsvfs_t *zfsvfs = zp->z_zfsvfs;
1473 zfs_locked_range_t *lr;
1474 int error;
1475
1476 /*
1477 * Lock the range being freed.
1478 */
1479 lr = zfs_rangelock_enter(&zp->z_rangelock, off, len, RL_WRITER);
1480
1481 /*
1482 * Nothing to do if file already at desired length.
1483 */
1484 if (off >= zp->z_size) {
1485 zfs_rangelock_exit(lr);
1486 return (0);
1487 }
1488
1489 if (off + len > zp->z_size)
1490 len = zp->z_size - off;
1491
1492 error = dmu_free_long_range(zfsvfs->z_os, zp->z_id, off, len);
1493
1494 if (error == 0) {
1495 #if __FreeBSD_version >= 1400032
1496 vnode_pager_purge_range(ZTOV(zp), off, off + len);
1497 #else
1498 /*
1499 * Before __FreeBSD_version 1400032 we cannot free block in the
1500 * middle of a file, but only at the end of a file, so this code
1501 * path should never happen.
1502 */
1503 vnode_pager_setsize(ZTOV(zp), off);
1504 #endif
1505 }
1506
1507 zfs_rangelock_exit(lr);
1508
1509 return (error);
1510 }
1511
1512 /*
1513 * Truncate a file
1514 *
1515 * IN: zp - znode of file to free data in.
1516 * end - new end-of-file.
1517 *
1518 * RETURN: 0 on success, error code on failure
1519 */
1520 static int
zfs_trunc(znode_t * zp,uint64_t end)1521 zfs_trunc(znode_t *zp, uint64_t end)
1522 {
1523 zfsvfs_t *zfsvfs = zp->z_zfsvfs;
1524 vnode_t *vp = ZTOV(zp);
1525 dmu_tx_t *tx;
1526 zfs_locked_range_t *lr;
1527 int error;
1528 sa_bulk_attr_t bulk[2];
1529 int count = 0;
1530
1531 /*
1532 * We will change zp_size, lock the whole file.
1533 */
1534 lr = zfs_rangelock_enter(&zp->z_rangelock, 0, UINT64_MAX, RL_WRITER);
1535
1536 /*
1537 * Nothing to do if file already at desired length.
1538 */
1539 if (end >= zp->z_size) {
1540 zfs_rangelock_exit(lr);
1541 return (0);
1542 }
1543
1544 error = dmu_free_long_range(zfsvfs->z_os, zp->z_id, end,
1545 DMU_OBJECT_END);
1546 if (error) {
1547 zfs_rangelock_exit(lr);
1548 return (error);
1549 }
1550 tx = dmu_tx_create(zfsvfs->z_os);
1551 dmu_tx_hold_sa(tx, zp->z_sa_hdl, B_FALSE);
1552 zfs_sa_upgrade_txholds(tx, zp);
1553 dmu_tx_mark_netfree(tx);
1554 error = dmu_tx_assign(tx, TXG_WAIT);
1555 if (error) {
1556 dmu_tx_abort(tx);
1557 zfs_rangelock_exit(lr);
1558 return (error);
1559 }
1560
1561 zp->z_size = end;
1562 SA_ADD_BULK_ATTR(bulk, count, SA_ZPL_SIZE(zfsvfs),
1563 NULL, &zp->z_size, sizeof (zp->z_size));
1564
1565 if (end == 0) {
1566 zp->z_pflags &= ~ZFS_SPARSE;
1567 SA_ADD_BULK_ATTR(bulk, count, SA_ZPL_FLAGS(zfsvfs),
1568 NULL, &zp->z_pflags, 8);
1569 }
1570 VERIFY0(sa_bulk_update(zp->z_sa_hdl, bulk, count, tx));
1571
1572 dmu_tx_commit(tx);
1573
1574 /*
1575 * Clear any mapped pages in the truncated region. This has to
1576 * happen outside of the transaction to avoid the possibility of
1577 * a deadlock with someone trying to push a page that we are
1578 * about to invalidate.
1579 */
1580 vnode_pager_setsize(vp, end);
1581
1582 zfs_rangelock_exit(lr);
1583
1584 return (0);
1585 }
1586
1587 /*
1588 * Free space in a file
1589 *
1590 * IN: zp - znode of file to free data in.
1591 * off - start of range
1592 * len - end of range (0 => EOF)
1593 * flag - current file open mode flags.
1594 * log - TRUE if this action should be logged
1595 *
1596 * RETURN: 0 on success, error code on failure
1597 */
1598 int
zfs_freesp(znode_t * zp,uint64_t off,uint64_t len,int flag,boolean_t log)1599 zfs_freesp(znode_t *zp, uint64_t off, uint64_t len, int flag, boolean_t log)
1600 {
1601 dmu_tx_t *tx;
1602 zfsvfs_t *zfsvfs = zp->z_zfsvfs;
1603 zilog_t *zilog = zfsvfs->z_log;
1604 uint64_t mode;
1605 uint64_t mtime[2], ctime[2];
1606 sa_bulk_attr_t bulk[3];
1607 int count = 0;
1608 int error;
1609
1610 if ((error = sa_lookup(zp->z_sa_hdl, SA_ZPL_MODE(zfsvfs), &mode,
1611 sizeof (mode))) != 0)
1612 return (error);
1613
1614 if (off > zp->z_size) {
1615 error = zfs_extend(zp, off+len);
1616 if (error == 0 && log)
1617 goto log;
1618 else
1619 return (error);
1620 }
1621
1622 if (len == 0) {
1623 error = zfs_trunc(zp, off);
1624 } else {
1625 if ((error = zfs_free_range(zp, off, len)) == 0 &&
1626 off + len > zp->z_size)
1627 error = zfs_extend(zp, off+len);
1628 }
1629 if (error || !log)
1630 return (error);
1631 log:
1632 tx = dmu_tx_create(zfsvfs->z_os);
1633 dmu_tx_hold_sa(tx, zp->z_sa_hdl, B_FALSE);
1634 zfs_sa_upgrade_txholds(tx, zp);
1635 error = dmu_tx_assign(tx, TXG_WAIT);
1636 if (error) {
1637 dmu_tx_abort(tx);
1638 return (error);
1639 }
1640
1641 SA_ADD_BULK_ATTR(bulk, count, SA_ZPL_MTIME(zfsvfs), NULL, mtime, 16);
1642 SA_ADD_BULK_ATTR(bulk, count, SA_ZPL_CTIME(zfsvfs), NULL, ctime, 16);
1643 SA_ADD_BULK_ATTR(bulk, count, SA_ZPL_FLAGS(zfsvfs),
1644 NULL, &zp->z_pflags, 8);
1645 zfs_tstamp_update_setup(zp, CONTENT_MODIFIED, mtime, ctime);
1646 error = sa_bulk_update(zp->z_sa_hdl, bulk, count, tx);
1647 ASSERT0(error);
1648
1649 zfs_log_truncate(zilog, tx, TX_TRUNCATE, zp, off, len);
1650
1651 dmu_tx_commit(tx);
1652 return (0);
1653 }
1654
1655 void
zfs_create_fs(objset_t * os,cred_t * cr,nvlist_t * zplprops,dmu_tx_t * tx)1656 zfs_create_fs(objset_t *os, cred_t *cr, nvlist_t *zplprops, dmu_tx_t *tx)
1657 {
1658 uint64_t moid, obj, sa_obj, version;
1659 uint64_t sense = ZFS_CASE_SENSITIVE;
1660 uint64_t norm = 0;
1661 nvpair_t *elem;
1662 int error;
1663 int i;
1664 znode_t *rootzp = NULL;
1665 zfsvfs_t *zfsvfs;
1666 vattr_t vattr;
1667 znode_t *zp;
1668 zfs_acl_ids_t acl_ids;
1669
1670 /*
1671 * First attempt to create master node.
1672 */
1673 /*
1674 * In an empty objset, there are no blocks to read and thus
1675 * there can be no i/o errors (which we assert below).
1676 */
1677 moid = MASTER_NODE_OBJ;
1678 error = zap_create_claim(os, moid, DMU_OT_MASTER_NODE,
1679 DMU_OT_NONE, 0, tx);
1680 ASSERT0(error);
1681
1682 /*
1683 * Set starting attributes.
1684 */
1685 version = zfs_zpl_version_map(spa_version(dmu_objset_spa(os)));
1686 elem = NULL;
1687 while ((elem = nvlist_next_nvpair(zplprops, elem)) != NULL) {
1688 /* For the moment we expect all zpl props to be uint64_ts */
1689 uint64_t val;
1690 const char *name;
1691
1692 ASSERT3S(nvpair_type(elem), ==, DATA_TYPE_UINT64);
1693 val = fnvpair_value_uint64(elem);
1694 name = nvpair_name(elem);
1695 if (strcmp(name, zfs_prop_to_name(ZFS_PROP_VERSION)) == 0) {
1696 if (val < version)
1697 version = val;
1698 } else {
1699 error = zap_update(os, moid, name, 8, 1, &val, tx);
1700 }
1701 ASSERT0(error);
1702 if (strcmp(name, zfs_prop_to_name(ZFS_PROP_NORMALIZE)) == 0)
1703 norm = val;
1704 else if (strcmp(name, zfs_prop_to_name(ZFS_PROP_CASE)) == 0)
1705 sense = val;
1706 }
1707 ASSERT3U(version, !=, 0);
1708 error = zap_update(os, moid, ZPL_VERSION_STR, 8, 1, &version, tx);
1709 ASSERT0(error);
1710
1711 /*
1712 * Create zap object used for SA attribute registration
1713 */
1714
1715 if (version >= ZPL_VERSION_SA) {
1716 sa_obj = zap_create(os, DMU_OT_SA_MASTER_NODE,
1717 DMU_OT_NONE, 0, tx);
1718 error = zap_add(os, moid, ZFS_SA_ATTRS, 8, 1, &sa_obj, tx);
1719 ASSERT0(error);
1720 } else {
1721 sa_obj = 0;
1722 }
1723 /*
1724 * Create a delete queue.
1725 */
1726 obj = zap_create(os, DMU_OT_UNLINKED_SET, DMU_OT_NONE, 0, tx);
1727
1728 error = zap_add(os, moid, ZFS_UNLINKED_SET, 8, 1, &obj, tx);
1729 ASSERT0(error);
1730
1731 /*
1732 * Create root znode. Create minimal znode/vnode/zfsvfs
1733 * to allow zfs_mknode to work.
1734 */
1735 VATTR_NULL(&vattr);
1736 vattr.va_mask = AT_MODE|AT_UID|AT_GID;
1737 vattr.va_type = VDIR;
1738 vattr.va_mode = S_IFDIR|0755;
1739 vattr.va_uid = crgetuid(cr);
1740 vattr.va_gid = crgetgid(cr);
1741
1742 zfsvfs = kmem_zalloc(sizeof (zfsvfs_t), KM_SLEEP);
1743
1744 rootzp = zfs_znode_alloc_kmem(KM_SLEEP);
1745 ASSERT(!POINTER_IS_VALID(rootzp->z_zfsvfs));
1746 rootzp->z_unlinked = 0;
1747 rootzp->z_atime_dirty = 0;
1748 rootzp->z_is_sa = USE_SA(version, os);
1749
1750 zfsvfs->z_os = os;
1751 zfsvfs->z_parent = zfsvfs;
1752 zfsvfs->z_version = version;
1753 zfsvfs->z_use_fuids = USE_FUIDS(version, os);
1754 zfsvfs->z_use_sa = USE_SA(version, os);
1755 zfsvfs->z_norm = norm;
1756
1757 error = sa_setup(os, sa_obj, zfs_attr_table, ZPL_END,
1758 &zfsvfs->z_attr_table);
1759
1760 ASSERT0(error);
1761
1762 /*
1763 * Fold case on file systems that are always or sometimes case
1764 * insensitive.
1765 */
1766 if (sense == ZFS_CASE_INSENSITIVE || sense == ZFS_CASE_MIXED)
1767 zfsvfs->z_norm |= U8_TEXTPREP_TOUPPER;
1768
1769 mutex_init(&zfsvfs->z_znodes_lock, NULL, MUTEX_DEFAULT, NULL);
1770 list_create(&zfsvfs->z_all_znodes, sizeof (znode_t),
1771 offsetof(znode_t, z_link_node));
1772
1773 for (i = 0; i != ZFS_OBJ_MTX_SZ; i++)
1774 mutex_init(&zfsvfs->z_hold_mtx[i], NULL, MUTEX_DEFAULT, NULL);
1775
1776 rootzp->z_zfsvfs = zfsvfs;
1777 VERIFY0(zfs_acl_ids_create(rootzp, IS_ROOT_NODE, &vattr,
1778 cr, NULL, &acl_ids, NULL));
1779 zfs_mknode(rootzp, &vattr, tx, cr, IS_ROOT_NODE, &zp, &acl_ids);
1780 ASSERT3P(zp, ==, rootzp);
1781 error = zap_add(os, moid, ZFS_ROOT_OBJ, 8, 1, &rootzp->z_id, tx);
1782 ASSERT0(error);
1783 zfs_acl_ids_free(&acl_ids);
1784 POINTER_INVALIDATE(&rootzp->z_zfsvfs);
1785
1786 sa_handle_destroy(rootzp->z_sa_hdl);
1787 zfs_znode_free_kmem(rootzp);
1788
1789 /*
1790 * Create shares directory
1791 */
1792
1793 error = zfs_create_share_dir(zfsvfs, tx);
1794
1795 ASSERT0(error);
1796
1797 for (i = 0; i != ZFS_OBJ_MTX_SZ; i++)
1798 mutex_destroy(&zfsvfs->z_hold_mtx[i]);
1799 kmem_free(zfsvfs, sizeof (zfsvfs_t));
1800 }
1801 #endif /* _KERNEL */
1802
1803 static int
zfs_sa_setup(objset_t * osp,sa_attr_type_t ** sa_table)1804 zfs_sa_setup(objset_t *osp, sa_attr_type_t **sa_table)
1805 {
1806 uint64_t sa_obj = 0;
1807 int error;
1808
1809 error = zap_lookup(osp, MASTER_NODE_OBJ, ZFS_SA_ATTRS, 8, 1, &sa_obj);
1810 if (error != 0 && error != ENOENT)
1811 return (error);
1812
1813 error = sa_setup(osp, sa_obj, zfs_attr_table, ZPL_END, sa_table);
1814 return (error);
1815 }
1816
1817 static int
zfs_grab_sa_handle(objset_t * osp,uint64_t obj,sa_handle_t ** hdlp,dmu_buf_t ** db,const void * tag)1818 zfs_grab_sa_handle(objset_t *osp, uint64_t obj, sa_handle_t **hdlp,
1819 dmu_buf_t **db, const void *tag)
1820 {
1821 dmu_object_info_t doi;
1822 int error;
1823
1824 if ((error = sa_buf_hold(osp, obj, tag, db)) != 0)
1825 return (error);
1826
1827 dmu_object_info_from_db(*db, &doi);
1828 if ((doi.doi_bonus_type != DMU_OT_SA &&
1829 doi.doi_bonus_type != DMU_OT_ZNODE) ||
1830 (doi.doi_bonus_type == DMU_OT_ZNODE &&
1831 doi.doi_bonus_size < sizeof (znode_phys_t))) {
1832 sa_buf_rele(*db, tag);
1833 return (SET_ERROR(ENOTSUP));
1834 }
1835
1836 error = sa_handle_get(osp, obj, NULL, SA_HDL_PRIVATE, hdlp);
1837 if (error != 0) {
1838 sa_buf_rele(*db, tag);
1839 return (error);
1840 }
1841
1842 return (0);
1843 }
1844
1845 static void
zfs_release_sa_handle(sa_handle_t * hdl,dmu_buf_t * db,const void * tag)1846 zfs_release_sa_handle(sa_handle_t *hdl, dmu_buf_t *db, const void *tag)
1847 {
1848 sa_handle_destroy(hdl);
1849 sa_buf_rele(db, tag);
1850 }
1851
1852 /*
1853 * Given an object number, return its parent object number and whether
1854 * or not the object is an extended attribute directory.
1855 */
1856 static int
zfs_obj_to_pobj(objset_t * osp,sa_handle_t * hdl,sa_attr_type_t * sa_table,uint64_t * pobjp,int * is_xattrdir)1857 zfs_obj_to_pobj(objset_t *osp, sa_handle_t *hdl, sa_attr_type_t *sa_table,
1858 uint64_t *pobjp, int *is_xattrdir)
1859 {
1860 uint64_t parent;
1861 uint64_t pflags;
1862 uint64_t mode;
1863 uint64_t parent_mode;
1864 sa_bulk_attr_t bulk[3];
1865 sa_handle_t *sa_hdl;
1866 dmu_buf_t *sa_db;
1867 int count = 0;
1868 int error;
1869
1870 SA_ADD_BULK_ATTR(bulk, count, sa_table[ZPL_PARENT], NULL,
1871 &parent, sizeof (parent));
1872 SA_ADD_BULK_ATTR(bulk, count, sa_table[ZPL_FLAGS], NULL,
1873 &pflags, sizeof (pflags));
1874 SA_ADD_BULK_ATTR(bulk, count, sa_table[ZPL_MODE], NULL,
1875 &mode, sizeof (mode));
1876
1877 if ((error = sa_bulk_lookup(hdl, bulk, count)) != 0)
1878 return (error);
1879
1880 /*
1881 * When a link is removed its parent pointer is not changed and will
1882 * be invalid. There are two cases where a link is removed but the
1883 * file stays around, when it goes to the delete queue and when there
1884 * are additional links.
1885 */
1886 error = zfs_grab_sa_handle(osp, parent, &sa_hdl, &sa_db, FTAG);
1887 if (error != 0)
1888 return (error);
1889
1890 error = sa_lookup(sa_hdl, ZPL_MODE, &parent_mode, sizeof (parent_mode));
1891 zfs_release_sa_handle(sa_hdl, sa_db, FTAG);
1892 if (error != 0)
1893 return (error);
1894
1895 *is_xattrdir = ((pflags & ZFS_XATTR) != 0) && S_ISDIR(mode);
1896
1897 /*
1898 * Extended attributes can be applied to files, directories, etc.
1899 * Otherwise the parent must be a directory.
1900 */
1901 if (!*is_xattrdir && !S_ISDIR(parent_mode))
1902 return (SET_ERROR(EINVAL));
1903
1904 *pobjp = parent;
1905
1906 return (0);
1907 }
1908
1909 /*
1910 * Given an object number, return some zpl level statistics
1911 */
1912 static int
zfs_obj_to_stats_impl(sa_handle_t * hdl,sa_attr_type_t * sa_table,zfs_stat_t * sb)1913 zfs_obj_to_stats_impl(sa_handle_t *hdl, sa_attr_type_t *sa_table,
1914 zfs_stat_t *sb)
1915 {
1916 sa_bulk_attr_t bulk[4];
1917 int count = 0;
1918
1919 SA_ADD_BULK_ATTR(bulk, count, sa_table[ZPL_MODE], NULL,
1920 &sb->zs_mode, sizeof (sb->zs_mode));
1921 SA_ADD_BULK_ATTR(bulk, count, sa_table[ZPL_GEN], NULL,
1922 &sb->zs_gen, sizeof (sb->zs_gen));
1923 SA_ADD_BULK_ATTR(bulk, count, sa_table[ZPL_LINKS], NULL,
1924 &sb->zs_links, sizeof (sb->zs_links));
1925 SA_ADD_BULK_ATTR(bulk, count, sa_table[ZPL_CTIME], NULL,
1926 &sb->zs_ctime, sizeof (sb->zs_ctime));
1927
1928 return (sa_bulk_lookup(hdl, bulk, count));
1929 }
1930
1931 static int
zfs_obj_to_path_impl(objset_t * osp,uint64_t obj,sa_handle_t * hdl,sa_attr_type_t * sa_table,char * buf,int len)1932 zfs_obj_to_path_impl(objset_t *osp, uint64_t obj, sa_handle_t *hdl,
1933 sa_attr_type_t *sa_table, char *buf, int len)
1934 {
1935 sa_handle_t *sa_hdl;
1936 sa_handle_t *prevhdl = NULL;
1937 dmu_buf_t *prevdb = NULL;
1938 dmu_buf_t *sa_db = NULL;
1939 char *path = buf + len - 1;
1940 int error;
1941
1942 *path = '\0';
1943 sa_hdl = hdl;
1944
1945 uint64_t deleteq_obj;
1946 VERIFY0(zap_lookup(osp, MASTER_NODE_OBJ,
1947 ZFS_UNLINKED_SET, sizeof (uint64_t), 1, &deleteq_obj));
1948 error = zap_lookup_int(osp, deleteq_obj, obj);
1949 if (error == 0) {
1950 return (ESTALE);
1951 } else if (error != ENOENT) {
1952 return (error);
1953 }
1954
1955 for (;;) {
1956 uint64_t pobj;
1957 char component[MAXNAMELEN + 2];
1958 size_t complen;
1959 int is_xattrdir;
1960
1961 if (prevdb) {
1962 ASSERT3P(prevhdl, !=, NULL);
1963 zfs_release_sa_handle(prevhdl, prevdb, FTAG);
1964 }
1965
1966 if ((error = zfs_obj_to_pobj(osp, sa_hdl, sa_table, &pobj,
1967 &is_xattrdir)) != 0)
1968 break;
1969
1970 if (pobj == obj) {
1971 if (path[0] != '/')
1972 *--path = '/';
1973 break;
1974 }
1975
1976 component[0] = '/';
1977 if (is_xattrdir) {
1978 (void) sprintf(component + 1, "<xattrdir>");
1979 } else {
1980 error = zap_value_search(osp, pobj, obj,
1981 ZFS_DIRENT_OBJ(-1ULL), component + 1);
1982 if (error != 0)
1983 break;
1984 }
1985
1986 complen = strlen(component);
1987 path -= complen;
1988 ASSERT3P(path, >=, buf);
1989 memcpy(path, component, complen);
1990 obj = pobj;
1991
1992 if (sa_hdl != hdl) {
1993 prevhdl = sa_hdl;
1994 prevdb = sa_db;
1995 }
1996 error = zfs_grab_sa_handle(osp, obj, &sa_hdl, &sa_db, FTAG);
1997 if (error != 0) {
1998 sa_hdl = prevhdl;
1999 sa_db = prevdb;
2000 break;
2001 }
2002 }
2003
2004 if (sa_hdl != NULL && sa_hdl != hdl) {
2005 ASSERT3P(sa_db, !=, NULL);
2006 zfs_release_sa_handle(sa_hdl, sa_db, FTAG);
2007 }
2008
2009 if (error == 0)
2010 (void) memmove(buf, path, buf + len - path);
2011
2012 return (error);
2013 }
2014
2015 int
zfs_obj_to_path(objset_t * osp,uint64_t obj,char * buf,int len)2016 zfs_obj_to_path(objset_t *osp, uint64_t obj, char *buf, int len)
2017 {
2018 sa_attr_type_t *sa_table;
2019 sa_handle_t *hdl;
2020 dmu_buf_t *db;
2021 int error;
2022
2023 error = zfs_sa_setup(osp, &sa_table);
2024 if (error != 0)
2025 return (error);
2026
2027 error = zfs_grab_sa_handle(osp, obj, &hdl, &db, FTAG);
2028 if (error != 0)
2029 return (error);
2030
2031 error = zfs_obj_to_path_impl(osp, obj, hdl, sa_table, buf, len);
2032
2033 zfs_release_sa_handle(hdl, db, FTAG);
2034 return (error);
2035 }
2036
2037 int
zfs_obj_to_stats(objset_t * osp,uint64_t obj,zfs_stat_t * sb,char * buf,int len)2038 zfs_obj_to_stats(objset_t *osp, uint64_t obj, zfs_stat_t *sb,
2039 char *buf, int len)
2040 {
2041 char *path = buf + len - 1;
2042 sa_attr_type_t *sa_table;
2043 sa_handle_t *hdl;
2044 dmu_buf_t *db;
2045 int error;
2046
2047 *path = '\0';
2048
2049 error = zfs_sa_setup(osp, &sa_table);
2050 if (error != 0)
2051 return (error);
2052
2053 error = zfs_grab_sa_handle(osp, obj, &hdl, &db, FTAG);
2054 if (error != 0)
2055 return (error);
2056
2057 error = zfs_obj_to_stats_impl(hdl, sa_table, sb);
2058 if (error != 0) {
2059 zfs_release_sa_handle(hdl, db, FTAG);
2060 return (error);
2061 }
2062
2063 error = zfs_obj_to_path_impl(osp, obj, hdl, sa_table, buf, len);
2064
2065 zfs_release_sa_handle(hdl, db, FTAG);
2066 return (error);
2067 }
2068
2069 /*
2070 * Read a property stored within the master node.
2071 */
2072 int
zfs_get_zplprop(objset_t * os,zfs_prop_t prop,uint64_t * value)2073 zfs_get_zplprop(objset_t *os, zfs_prop_t prop, uint64_t *value)
2074 {
2075 uint64_t *cached_copy = NULL;
2076
2077 /*
2078 * Figure out where in the objset_t the cached copy would live, if it
2079 * is available for the requested property.
2080 */
2081 if (os != NULL) {
2082 switch (prop) {
2083 case ZFS_PROP_VERSION:
2084 cached_copy = &os->os_version;
2085 break;
2086 case ZFS_PROP_NORMALIZE:
2087 cached_copy = &os->os_normalization;
2088 break;
2089 case ZFS_PROP_UTF8ONLY:
2090 cached_copy = &os->os_utf8only;
2091 break;
2092 case ZFS_PROP_CASE:
2093 cached_copy = &os->os_casesensitivity;
2094 break;
2095 default:
2096 break;
2097 }
2098 }
2099 if (cached_copy != NULL && *cached_copy != OBJSET_PROP_UNINITIALIZED) {
2100 *value = *cached_copy;
2101 return (0);
2102 }
2103
2104 /*
2105 * If the property wasn't cached, look up the file system's value for
2106 * the property. For the version property, we look up a slightly
2107 * different string.
2108 */
2109 const char *pname;
2110 int error = ENOENT;
2111 if (prop == ZFS_PROP_VERSION) {
2112 pname = ZPL_VERSION_STR;
2113 } else {
2114 pname = zfs_prop_to_name(prop);
2115 }
2116
2117 if (os != NULL) {
2118 ASSERT3U(os->os_phys->os_type, ==, DMU_OST_ZFS);
2119 error = zap_lookup(os, MASTER_NODE_OBJ, pname, 8, 1, value);
2120 }
2121
2122 if (error == ENOENT) {
2123 /* No value set, use the default value */
2124 switch (prop) {
2125 case ZFS_PROP_VERSION:
2126 *value = ZPL_VERSION;
2127 break;
2128 case ZFS_PROP_NORMALIZE:
2129 case ZFS_PROP_UTF8ONLY:
2130 *value = 0;
2131 break;
2132 case ZFS_PROP_CASE:
2133 *value = ZFS_CASE_SENSITIVE;
2134 break;
2135 case ZFS_PROP_ACLTYPE:
2136 *value = ZFS_ACLTYPE_NFSV4;
2137 break;
2138 default:
2139 return (error);
2140 }
2141 error = 0;
2142 }
2143
2144 /*
2145 * If one of the methods for getting the property value above worked,
2146 * copy it into the objset_t's cache.
2147 */
2148 if (error == 0 && cached_copy != NULL) {
2149 *cached_copy = *value;
2150 }
2151
2152 return (error);
2153 }
2154
2155
2156
2157 void
zfs_znode_update_vfs(znode_t * zp)2158 zfs_znode_update_vfs(znode_t *zp)
2159 {
2160 vm_object_t object;
2161
2162 if ((object = ZTOV(zp)->v_object) == NULL ||
2163 zp->z_size == object->un_pager.vnp.vnp_size)
2164 return;
2165
2166 vnode_pager_setsize(ZTOV(zp), zp->z_size);
2167 }
2168
2169
2170 #ifdef _KERNEL
2171 int
zfs_znode_parent_and_name(znode_t * zp,znode_t ** dzpp,char * buf)2172 zfs_znode_parent_and_name(znode_t *zp, znode_t **dzpp, char *buf)
2173 {
2174 zfsvfs_t *zfsvfs = zp->z_zfsvfs;
2175 uint64_t parent;
2176 int is_xattrdir;
2177 int err;
2178
2179 /* Extended attributes should not be visible as regular files. */
2180 if ((zp->z_pflags & ZFS_XATTR) != 0)
2181 return (SET_ERROR(EINVAL));
2182
2183 err = zfs_obj_to_pobj(zfsvfs->z_os, zp->z_sa_hdl, zfsvfs->z_attr_table,
2184 &parent, &is_xattrdir);
2185 if (err != 0)
2186 return (err);
2187 ASSERT0(is_xattrdir);
2188
2189 /* No name as this is a root object. */
2190 if (parent == zp->z_id)
2191 return (SET_ERROR(EINVAL));
2192
2193 err = zap_value_search(zfsvfs->z_os, parent, zp->z_id,
2194 ZFS_DIRENT_OBJ(-1ULL), buf);
2195 if (err != 0)
2196 return (err);
2197 err = zfs_zget(zfsvfs, parent, dzpp);
2198 return (err);
2199 }
2200 #endif /* _KERNEL */
2201
2202 #ifdef _KERNEL
2203 int
zfs_rlimit_fsize(off_t fsize)2204 zfs_rlimit_fsize(off_t fsize)
2205 {
2206 struct thread *td = curthread;
2207 off_t lim;
2208
2209 if (td == NULL)
2210 return (0);
2211
2212 lim = lim_cur(td, RLIMIT_FSIZE);
2213 if (__predict_true((uoff_t)fsize <= lim))
2214 return (0);
2215
2216 /*
2217 * The limit is reached.
2218 */
2219 PROC_LOCK(td->td_proc);
2220 kern_psignal(td->td_proc, SIGXFSZ);
2221 PROC_UNLOCK(td->td_proc);
2222
2223 return (EFBIG);
2224 }
2225 #endif /* _KERNEL */
2226