xref: /illumos-gate/usr/src/uts/common/fs/zfs/dmu_objset.c (revision ceeba6f9)
1 /*
2  * CDDL HEADER START
3  *
4  * The contents of this file are subject to the terms of the
5  * Common Development and Distribution License (the "License").
6  * You may not use this file except in compliance with the License.
7  *
8  * You can obtain a copy of the license at usr/src/OPENSOLARIS.LICENSE
9  * or http://www.opensolaris.org/os/licensing.
10  * See the License for the specific language governing permissions
11  * and limitations under the License.
12  *
13  * When distributing Covered Code, include this CDDL HEADER in each
14  * file and include the License file at usr/src/OPENSOLARIS.LICENSE.
15  * If applicable, add the following below this CDDL HEADER, with the
16  * fields enclosed by brackets "[]" replaced with your own identifying
17  * information: Portions Copyright [yyyy] [name of copyright owner]
18  *
19  * CDDL HEADER END
20  */
21 /*
22  * Copyright (c) 2005, 2010, Oracle and/or its affiliates. All rights reserved.
23  */
24 
25 #include <sys/cred.h>
26 #include <sys/zfs_context.h>
27 #include <sys/dmu_objset.h>
28 #include <sys/dsl_dir.h>
29 #include <sys/dsl_dataset.h>
30 #include <sys/dsl_prop.h>
31 #include <sys/dsl_pool.h>
32 #include <sys/dsl_synctask.h>
33 #include <sys/dsl_deleg.h>
34 #include <sys/dnode.h>
35 #include <sys/dbuf.h>
36 #include <sys/zvol.h>
37 #include <sys/dmu_tx.h>
38 #include <sys/zap.h>
39 #include <sys/zil.h>
40 #include <sys/dmu_impl.h>
41 #include <sys/zfs_ioctl.h>
42 #include <sys/sunddi.h>
43 #include <sys/sa.h>
44 
45 spa_t *
46 dmu_objset_spa(objset_t *os)
47 {
48 	return (os->os_spa);
49 }
50 
51 zilog_t *
52 dmu_objset_zil(objset_t *os)
53 {
54 	return (os->os_zil);
55 }
56 
57 dsl_pool_t *
58 dmu_objset_pool(objset_t *os)
59 {
60 	dsl_dataset_t *ds;
61 
62 	if ((ds = os->os_dsl_dataset) != NULL && ds->ds_dir)
63 		return (ds->ds_dir->dd_pool);
64 	else
65 		return (spa_get_dsl(os->os_spa));
66 }
67 
68 dsl_dataset_t *
69 dmu_objset_ds(objset_t *os)
70 {
71 	return (os->os_dsl_dataset);
72 }
73 
74 dmu_objset_type_t
75 dmu_objset_type(objset_t *os)
76 {
77 	return (os->os_phys->os_type);
78 }
79 
80 void
81 dmu_objset_name(objset_t *os, char *buf)
82 {
83 	dsl_dataset_name(os->os_dsl_dataset, buf);
84 }
85 
86 uint64_t
87 dmu_objset_id(objset_t *os)
88 {
89 	dsl_dataset_t *ds = os->os_dsl_dataset;
90 
91 	return (ds ? ds->ds_object : 0);
92 }
93 
94 uint64_t
95 dmu_objset_logbias(objset_t *os)
96 {
97 	return (os->os_logbias);
98 }
99 
100 static void
101 checksum_changed_cb(void *arg, uint64_t newval)
102 {
103 	objset_t *os = arg;
104 
105 	/*
106 	 * Inheritance should have been done by now.
107 	 */
108 	ASSERT(newval != ZIO_CHECKSUM_INHERIT);
109 
110 	os->os_checksum = zio_checksum_select(newval, ZIO_CHECKSUM_ON_VALUE);
111 }
112 
113 static void
114 compression_changed_cb(void *arg, uint64_t newval)
115 {
116 	objset_t *os = arg;
117 
118 	/*
119 	 * Inheritance and range checking should have been done by now.
120 	 */
121 	ASSERT(newval != ZIO_COMPRESS_INHERIT);
122 
123 	os->os_compress = zio_compress_select(newval, ZIO_COMPRESS_ON_VALUE);
124 }
125 
126 static void
127 copies_changed_cb(void *arg, uint64_t newval)
128 {
129 	objset_t *os = arg;
130 
131 	/*
132 	 * Inheritance and range checking should have been done by now.
133 	 */
134 	ASSERT(newval > 0);
135 	ASSERT(newval <= spa_max_replication(os->os_spa));
136 
137 	os->os_copies = newval;
138 }
139 
140 static void
141 dedup_changed_cb(void *arg, uint64_t newval)
142 {
143 	objset_t *os = arg;
144 	spa_t *spa = os->os_spa;
145 	enum zio_checksum checksum;
146 
147 	/*
148 	 * Inheritance should have been done by now.
149 	 */
150 	ASSERT(newval != ZIO_CHECKSUM_INHERIT);
151 
152 	checksum = zio_checksum_dedup_select(spa, newval, ZIO_CHECKSUM_OFF);
153 
154 	os->os_dedup_checksum = checksum & ZIO_CHECKSUM_MASK;
155 	os->os_dedup_verify = !!(checksum & ZIO_CHECKSUM_VERIFY);
156 }
157 
158 static void
159 primary_cache_changed_cb(void *arg, uint64_t newval)
160 {
161 	objset_t *os = arg;
162 
163 	/*
164 	 * Inheritance and range checking should have been done by now.
165 	 */
166 	ASSERT(newval == ZFS_CACHE_ALL || newval == ZFS_CACHE_NONE ||
167 	    newval == ZFS_CACHE_METADATA);
168 
169 	os->os_primary_cache = newval;
170 }
171 
172 static void
173 secondary_cache_changed_cb(void *arg, uint64_t newval)
174 {
175 	objset_t *os = arg;
176 
177 	/*
178 	 * Inheritance and range checking should have been done by now.
179 	 */
180 	ASSERT(newval == ZFS_CACHE_ALL || newval == ZFS_CACHE_NONE ||
181 	    newval == ZFS_CACHE_METADATA);
182 
183 	os->os_secondary_cache = newval;
184 }
185 
186 static void
187 logbias_changed_cb(void *arg, uint64_t newval)
188 {
189 	objset_t *os = arg;
190 
191 	ASSERT(newval == ZFS_LOGBIAS_LATENCY ||
192 	    newval == ZFS_LOGBIAS_THROUGHPUT);
193 	os->os_logbias = newval;
194 	if (os->os_zil)
195 		zil_set_logbias(os->os_zil, newval);
196 }
197 
198 void
199 dmu_objset_byteswap(void *buf, size_t size)
200 {
201 	objset_phys_t *osp = buf;
202 
203 	ASSERT(size == OBJSET_OLD_PHYS_SIZE || size == sizeof (objset_phys_t));
204 	dnode_byteswap(&osp->os_meta_dnode);
205 	byteswap_uint64_array(&osp->os_zil_header, sizeof (zil_header_t));
206 	osp->os_type = BSWAP_64(osp->os_type);
207 	osp->os_flags = BSWAP_64(osp->os_flags);
208 	if (size == sizeof (objset_phys_t)) {
209 		dnode_byteswap(&osp->os_userused_dnode);
210 		dnode_byteswap(&osp->os_groupused_dnode);
211 	}
212 }
213 
214 int
215 dmu_objset_open_impl(spa_t *spa, dsl_dataset_t *ds, blkptr_t *bp,
216     objset_t **osp)
217 {
218 	objset_t *os;
219 	int i, err;
220 
221 	ASSERT(ds == NULL || MUTEX_HELD(&ds->ds_opening_lock));
222 
223 	os = kmem_zalloc(sizeof (objset_t), KM_SLEEP);
224 	os->os_dsl_dataset = ds;
225 	os->os_spa = spa;
226 	os->os_rootbp = bp;
227 	if (!BP_IS_HOLE(os->os_rootbp)) {
228 		uint32_t aflags = ARC_WAIT;
229 		zbookmark_t zb;
230 		SET_BOOKMARK(&zb, ds ? ds->ds_object : DMU_META_OBJSET,
231 		    ZB_ROOT_OBJECT, ZB_ROOT_LEVEL, ZB_ROOT_BLKID);
232 
233 		if (DMU_OS_IS_L2CACHEABLE(os))
234 			aflags |= ARC_L2CACHE;
235 
236 		dprintf_bp(os->os_rootbp, "reading %s", "");
237 		/*
238 		 * NB: when bprewrite scrub can change the bp,
239 		 * and this is called from dmu_objset_open_ds_os, the bp
240 		 * could change, and we'll need a lock.
241 		 */
242 		err = arc_read_nolock(NULL, spa, os->os_rootbp,
243 		    arc_getbuf_func, &os->os_phys_buf,
244 		    ZIO_PRIORITY_SYNC_READ, ZIO_FLAG_CANFAIL, &aflags, &zb);
245 		if (err) {
246 			kmem_free(os, sizeof (objset_t));
247 			/* convert checksum errors into IO errors */
248 			if (err == ECKSUM)
249 				err = EIO;
250 			return (err);
251 		}
252 
253 		/* Increase the blocksize if we are permitted. */
254 		if (spa_version(spa) >= SPA_VERSION_USERSPACE &&
255 		    arc_buf_size(os->os_phys_buf) < sizeof (objset_phys_t)) {
256 			arc_buf_t *buf = arc_buf_alloc(spa,
257 			    sizeof (objset_phys_t), &os->os_phys_buf,
258 			    ARC_BUFC_METADATA);
259 			bzero(buf->b_data, sizeof (objset_phys_t));
260 			bcopy(os->os_phys_buf->b_data, buf->b_data,
261 			    arc_buf_size(os->os_phys_buf));
262 			(void) arc_buf_remove_ref(os->os_phys_buf,
263 			    &os->os_phys_buf);
264 			os->os_phys_buf = buf;
265 		}
266 
267 		os->os_phys = os->os_phys_buf->b_data;
268 		os->os_flags = os->os_phys->os_flags;
269 	} else {
270 		int size = spa_version(spa) >= SPA_VERSION_USERSPACE ?
271 		    sizeof (objset_phys_t) : OBJSET_OLD_PHYS_SIZE;
272 		os->os_phys_buf = arc_buf_alloc(spa, size,
273 		    &os->os_phys_buf, ARC_BUFC_METADATA);
274 		os->os_phys = os->os_phys_buf->b_data;
275 		bzero(os->os_phys, size);
276 	}
277 
278 	/*
279 	 * Note: the changed_cb will be called once before the register
280 	 * func returns, thus changing the checksum/compression from the
281 	 * default (fletcher2/off).  Snapshots don't need to know about
282 	 * checksum/compression/copies.
283 	 */
284 	if (ds) {
285 		err = dsl_prop_register(ds, "primarycache",
286 		    primary_cache_changed_cb, os);
287 		if (err == 0)
288 			err = dsl_prop_register(ds, "secondarycache",
289 			    secondary_cache_changed_cb, os);
290 		if (!dsl_dataset_is_snapshot(ds)) {
291 			if (err == 0)
292 				err = dsl_prop_register(ds, "checksum",
293 				    checksum_changed_cb, os);
294 			if (err == 0)
295 				err = dsl_prop_register(ds, "compression",
296 				    compression_changed_cb, os);
297 			if (err == 0)
298 				err = dsl_prop_register(ds, "copies",
299 				    copies_changed_cb, os);
300 			if (err == 0)
301 				err = dsl_prop_register(ds, "dedup",
302 				    dedup_changed_cb, os);
303 			if (err == 0)
304 				err = dsl_prop_register(ds, "logbias",
305 				    logbias_changed_cb, os);
306 		}
307 		if (err) {
308 			VERIFY(arc_buf_remove_ref(os->os_phys_buf,
309 			    &os->os_phys_buf) == 1);
310 			kmem_free(os, sizeof (objset_t));
311 			return (err);
312 		}
313 	} else if (ds == NULL) {
314 		/* It's the meta-objset. */
315 		os->os_checksum = ZIO_CHECKSUM_FLETCHER_4;
316 		os->os_compress = ZIO_COMPRESS_LZJB;
317 		os->os_copies = spa_max_replication(spa);
318 		os->os_dedup_checksum = ZIO_CHECKSUM_OFF;
319 		os->os_dedup_verify = 0;
320 		os->os_logbias = 0;
321 		os->os_primary_cache = ZFS_CACHE_ALL;
322 		os->os_secondary_cache = ZFS_CACHE_ALL;
323 	}
324 
325 	os->os_zil_header = os->os_phys->os_zil_header;
326 	os->os_zil = zil_alloc(os, &os->os_zil_header);
327 
328 	for (i = 0; i < TXG_SIZE; i++) {
329 		list_create(&os->os_dirty_dnodes[i], sizeof (dnode_t),
330 		    offsetof(dnode_t, dn_dirty_link[i]));
331 		list_create(&os->os_free_dnodes[i], sizeof (dnode_t),
332 		    offsetof(dnode_t, dn_dirty_link[i]));
333 	}
334 	list_create(&os->os_dnodes, sizeof (dnode_t),
335 	    offsetof(dnode_t, dn_link));
336 	list_create(&os->os_downgraded_dbufs, sizeof (dmu_buf_impl_t),
337 	    offsetof(dmu_buf_impl_t, db_link));
338 
339 	mutex_init(&os->os_lock, NULL, MUTEX_DEFAULT, NULL);
340 	mutex_init(&os->os_obj_lock, NULL, MUTEX_DEFAULT, NULL);
341 	mutex_init(&os->os_user_ptr_lock, NULL, MUTEX_DEFAULT, NULL);
342 
343 	os->os_meta_dnode = dnode_special_open(os,
344 	    &os->os_phys->os_meta_dnode, DMU_META_DNODE_OBJECT);
345 	if (arc_buf_size(os->os_phys_buf) >= sizeof (objset_phys_t)) {
346 		os->os_userused_dnode = dnode_special_open(os,
347 		    &os->os_phys->os_userused_dnode, DMU_USERUSED_OBJECT);
348 		os->os_groupused_dnode = dnode_special_open(os,
349 		    &os->os_phys->os_groupused_dnode, DMU_GROUPUSED_OBJECT);
350 	}
351 
352 	/*
353 	 * We should be the only thread trying to do this because we
354 	 * have ds_opening_lock
355 	 */
356 	if (ds) {
357 		mutex_enter(&ds->ds_lock);
358 		ASSERT(ds->ds_objset == NULL);
359 		ds->ds_objset = os;
360 		mutex_exit(&ds->ds_lock);
361 	}
362 
363 	*osp = os;
364 	return (0);
365 }
366 
367 int
368 dmu_objset_from_ds(dsl_dataset_t *ds, objset_t **osp)
369 {
370 	int err = 0;
371 
372 	mutex_enter(&ds->ds_opening_lock);
373 	*osp = ds->ds_objset;
374 	if (*osp == NULL) {
375 		err = dmu_objset_open_impl(dsl_dataset_get_spa(ds),
376 		    ds, &ds->ds_phys->ds_bp, osp);
377 	}
378 	mutex_exit(&ds->ds_opening_lock);
379 	return (err);
380 }
381 
382 /* called from zpl */
383 int
384 dmu_objset_hold(const char *name, void *tag, objset_t **osp)
385 {
386 	dsl_dataset_t *ds;
387 	int err;
388 
389 	err = dsl_dataset_hold(name, tag, &ds);
390 	if (err)
391 		return (err);
392 
393 	err = dmu_objset_from_ds(ds, osp);
394 	if (err)
395 		dsl_dataset_rele(ds, tag);
396 
397 	return (err);
398 }
399 
400 /* called from zpl */
401 int
402 dmu_objset_own(const char *name, dmu_objset_type_t type,
403     boolean_t readonly, void *tag, objset_t **osp)
404 {
405 	dsl_dataset_t *ds;
406 	int err;
407 
408 	err = dsl_dataset_own(name, B_FALSE, tag, &ds);
409 	if (err)
410 		return (err);
411 
412 	err = dmu_objset_from_ds(ds, osp);
413 	if (err) {
414 		dsl_dataset_disown(ds, tag);
415 	} else if (type != DMU_OST_ANY && type != (*osp)->os_phys->os_type) {
416 		dmu_objset_disown(*osp, tag);
417 		return (EINVAL);
418 	} else if (!readonly && dsl_dataset_is_snapshot(ds)) {
419 		dmu_objset_disown(*osp, tag);
420 		return (EROFS);
421 	}
422 	return (err);
423 }
424 
425 void
426 dmu_objset_rele(objset_t *os, void *tag)
427 {
428 	dsl_dataset_rele(os->os_dsl_dataset, tag);
429 }
430 
431 void
432 dmu_objset_disown(objset_t *os, void *tag)
433 {
434 	dsl_dataset_disown(os->os_dsl_dataset, tag);
435 }
436 
437 int
438 dmu_objset_evict_dbufs(objset_t *os)
439 {
440 	dnode_t *dn;
441 
442 	mutex_enter(&os->os_lock);
443 
444 	/* process the mdn last, since the other dnodes have holds on it */
445 	list_remove(&os->os_dnodes, os->os_meta_dnode);
446 	list_insert_tail(&os->os_dnodes, os->os_meta_dnode);
447 
448 	/*
449 	 * Find the first dnode with holds.  We have to do this dance
450 	 * because dnode_add_ref() only works if you already have a
451 	 * hold.  If there are no holds then it has no dbufs so OK to
452 	 * skip.
453 	 */
454 	for (dn = list_head(&os->os_dnodes);
455 	    dn && !dnode_add_ref(dn, FTAG);
456 	    dn = list_next(&os->os_dnodes, dn))
457 		continue;
458 
459 	while (dn) {
460 		dnode_t *next_dn = dn;
461 
462 		do {
463 			next_dn = list_next(&os->os_dnodes, next_dn);
464 		} while (next_dn && !dnode_add_ref(next_dn, FTAG));
465 
466 		mutex_exit(&os->os_lock);
467 		dnode_evict_dbufs(dn);
468 		dnode_rele(dn, FTAG);
469 		mutex_enter(&os->os_lock);
470 		dn = next_dn;
471 	}
472 	mutex_exit(&os->os_lock);
473 	return (list_head(&os->os_dnodes) != os->os_meta_dnode);
474 }
475 
476 void
477 dmu_objset_evict(objset_t *os)
478 {
479 	dsl_dataset_t *ds = os->os_dsl_dataset;
480 
481 	for (int t = 0; t < TXG_SIZE; t++)
482 		ASSERT(!dmu_objset_is_dirty(os, t));
483 
484 	if (ds) {
485 		if (!dsl_dataset_is_snapshot(ds)) {
486 			VERIFY(0 == dsl_prop_unregister(ds, "checksum",
487 			    checksum_changed_cb, os));
488 			VERIFY(0 == dsl_prop_unregister(ds, "compression",
489 			    compression_changed_cb, os));
490 			VERIFY(0 == dsl_prop_unregister(ds, "copies",
491 			    copies_changed_cb, os));
492 			VERIFY(0 == dsl_prop_unregister(ds, "dedup",
493 			    dedup_changed_cb, os));
494 			VERIFY(0 == dsl_prop_unregister(ds, "logbias",
495 			    logbias_changed_cb, os));
496 		}
497 		VERIFY(0 == dsl_prop_unregister(ds, "primarycache",
498 		    primary_cache_changed_cb, os));
499 		VERIFY(0 == dsl_prop_unregister(ds, "secondarycache",
500 		    secondary_cache_changed_cb, os));
501 	}
502 
503 	if (os->os_sa)
504 		sa_tear_down(os);
505 
506 	/*
507 	 * We should need only a single pass over the dnode list, since
508 	 * nothing can be added to the list at this point.
509 	 */
510 	(void) dmu_objset_evict_dbufs(os);
511 
512 	dnode_special_close(os->os_meta_dnode);
513 	if (os->os_userused_dnode) {
514 		dnode_special_close(os->os_userused_dnode);
515 		dnode_special_close(os->os_groupused_dnode);
516 	}
517 	zil_free(os->os_zil);
518 
519 	ASSERT3P(list_head(&os->os_dnodes), ==, NULL);
520 
521 	VERIFY(arc_buf_remove_ref(os->os_phys_buf, &os->os_phys_buf) == 1);
522 	mutex_destroy(&os->os_lock);
523 	mutex_destroy(&os->os_obj_lock);
524 	mutex_destroy(&os->os_user_ptr_lock);
525 	kmem_free(os, sizeof (objset_t));
526 }
527 
528 timestruc_t
529 dmu_objset_snap_cmtime(objset_t *os)
530 {
531 	return (dsl_dir_snap_cmtime(os->os_dsl_dataset->ds_dir));
532 }
533 
534 /* called from dsl for meta-objset */
535 objset_t *
536 dmu_objset_create_impl(spa_t *spa, dsl_dataset_t *ds, blkptr_t *bp,
537     dmu_objset_type_t type, dmu_tx_t *tx)
538 {
539 	objset_t *os;
540 	dnode_t *mdn;
541 
542 	ASSERT(dmu_tx_is_syncing(tx));
543 	if (ds)
544 		mutex_enter(&ds->ds_opening_lock);
545 	VERIFY(0 == dmu_objset_open_impl(spa, ds, bp, &os));
546 	if (ds)
547 		mutex_exit(&ds->ds_opening_lock);
548 	mdn = os->os_meta_dnode;
549 
550 	dnode_allocate(mdn, DMU_OT_DNODE, 1 << DNODE_BLOCK_SHIFT,
551 	    DN_MAX_INDBLKSHIFT, DMU_OT_NONE, 0, tx);
552 
553 	/*
554 	 * We don't want to have to increase the meta-dnode's nlevels
555 	 * later, because then we could do it in quescing context while
556 	 * we are also accessing it in open context.
557 	 *
558 	 * This precaution is not necessary for the MOS (ds == NULL),
559 	 * because the MOS is only updated in syncing context.
560 	 * This is most fortunate: the MOS is the only objset that
561 	 * needs to be synced multiple times as spa_sync() iterates
562 	 * to convergence, so minimizing its dn_nlevels matters.
563 	 */
564 	if (ds != NULL) {
565 		int levels = 1;
566 
567 		/*
568 		 * Determine the number of levels necessary for the meta-dnode
569 		 * to contain DN_MAX_OBJECT dnodes.
570 		 */
571 		while ((uint64_t)mdn->dn_nblkptr << (mdn->dn_datablkshift +
572 		    (levels - 1) * (mdn->dn_indblkshift - SPA_BLKPTRSHIFT)) <
573 		    DN_MAX_OBJECT * sizeof (dnode_phys_t))
574 			levels++;
575 
576 		mdn->dn_next_nlevels[tx->tx_txg & TXG_MASK] =
577 		    mdn->dn_nlevels = levels;
578 	}
579 
580 	ASSERT(type != DMU_OST_NONE);
581 	ASSERT(type != DMU_OST_ANY);
582 	ASSERT(type < DMU_OST_NUMTYPES);
583 	os->os_phys->os_type = type;
584 	if (dmu_objset_userused_enabled(os)) {
585 		os->os_phys->os_flags |= OBJSET_FLAG_USERACCOUNTING_COMPLETE;
586 		os->os_flags = os->os_phys->os_flags;
587 	}
588 
589 	dsl_dataset_dirty(ds, tx);
590 
591 	return (os);
592 }
593 
594 struct oscarg {
595 	void (*userfunc)(objset_t *os, void *arg, cred_t *cr, dmu_tx_t *tx);
596 	void *userarg;
597 	dsl_dataset_t *clone_origin;
598 	const char *lastname;
599 	dmu_objset_type_t type;
600 	uint64_t flags;
601 };
602 
603 /*ARGSUSED*/
604 static int
605 dmu_objset_create_check(void *arg1, void *arg2, dmu_tx_t *tx)
606 {
607 	dsl_dir_t *dd = arg1;
608 	struct oscarg *oa = arg2;
609 	objset_t *mos = dd->dd_pool->dp_meta_objset;
610 	int err;
611 	uint64_t ddobj;
612 
613 	err = zap_lookup(mos, dd->dd_phys->dd_child_dir_zapobj,
614 	    oa->lastname, sizeof (uint64_t), 1, &ddobj);
615 	if (err != ENOENT)
616 		return (err ? err : EEXIST);
617 
618 	if (oa->clone_origin != NULL) {
619 		/* You can't clone across pools. */
620 		if (oa->clone_origin->ds_dir->dd_pool != dd->dd_pool)
621 			return (EXDEV);
622 
623 		/* You can only clone snapshots, not the head datasets. */
624 		if (!dsl_dataset_is_snapshot(oa->clone_origin))
625 			return (EINVAL);
626 	}
627 
628 	return (0);
629 }
630 
631 static void
632 dmu_objset_create_sync(void *arg1, void *arg2, cred_t *cr, dmu_tx_t *tx)
633 {
634 	dsl_dir_t *dd = arg1;
635 	struct oscarg *oa = arg2;
636 	uint64_t dsobj;
637 
638 	ASSERT(dmu_tx_is_syncing(tx));
639 
640 	dsobj = dsl_dataset_create_sync(dd, oa->lastname,
641 	    oa->clone_origin, oa->flags, cr, tx);
642 
643 	if (oa->clone_origin == NULL) {
644 		dsl_dataset_t *ds;
645 		blkptr_t *bp;
646 		objset_t *os;
647 
648 		VERIFY(0 == dsl_dataset_hold_obj(dd->dd_pool, dsobj,
649 		    FTAG, &ds));
650 		bp = dsl_dataset_get_blkptr(ds);
651 		ASSERT(BP_IS_HOLE(bp));
652 
653 		os = dmu_objset_create_impl(dsl_dataset_get_spa(ds),
654 		    ds, bp, oa->type, tx);
655 
656 		if (oa->userfunc)
657 			oa->userfunc(os, oa->userarg, cr, tx);
658 		dsl_dataset_rele(ds, FTAG);
659 	}
660 
661 	spa_history_internal_log(LOG_DS_CREATE, dd->dd_pool->dp_spa,
662 	    tx, cr, "dataset = %llu", dsobj);
663 }
664 
665 int
666 dmu_objset_create(const char *name, dmu_objset_type_t type, uint64_t flags,
667     void (*func)(objset_t *os, void *arg, cred_t *cr, dmu_tx_t *tx), void *arg)
668 {
669 	dsl_dir_t *pdd;
670 	const char *tail;
671 	int err = 0;
672 	struct oscarg oa = { 0 };
673 
674 	ASSERT(strchr(name, '@') == NULL);
675 	err = dsl_dir_open(name, FTAG, &pdd, &tail);
676 	if (err)
677 		return (err);
678 	if (tail == NULL) {
679 		dsl_dir_close(pdd, FTAG);
680 		return (EEXIST);
681 	}
682 
683 	oa.userfunc = func;
684 	oa.userarg = arg;
685 	oa.lastname = tail;
686 	oa.type = type;
687 	oa.flags = flags;
688 
689 	err = dsl_sync_task_do(pdd->dd_pool, dmu_objset_create_check,
690 	    dmu_objset_create_sync, pdd, &oa, 5);
691 	dsl_dir_close(pdd, FTAG);
692 	return (err);
693 }
694 
695 int
696 dmu_objset_clone(const char *name, dsl_dataset_t *clone_origin, uint64_t flags)
697 {
698 	dsl_dir_t *pdd;
699 	const char *tail;
700 	int err = 0;
701 	struct oscarg oa = { 0 };
702 
703 	ASSERT(strchr(name, '@') == NULL);
704 	err = dsl_dir_open(name, FTAG, &pdd, &tail);
705 	if (err)
706 		return (err);
707 	if (tail == NULL) {
708 		dsl_dir_close(pdd, FTAG);
709 		return (EEXIST);
710 	}
711 
712 	oa.lastname = tail;
713 	oa.clone_origin = clone_origin;
714 	oa.flags = flags;
715 
716 	err = dsl_sync_task_do(pdd->dd_pool, dmu_objset_create_check,
717 	    dmu_objset_create_sync, pdd, &oa, 5);
718 	dsl_dir_close(pdd, FTAG);
719 	return (err);
720 }
721 
722 int
723 dmu_objset_destroy(const char *name, boolean_t defer)
724 {
725 	dsl_dataset_t *ds;
726 	int error;
727 
728 	/*
729 	 * dsl_dataset_destroy() can free any claimed-but-unplayed
730 	 * intent log, but if there is an active log, it has blocks that
731 	 * are allocated, but may not yet be reflected in the on-disk
732 	 * structure.  Only the ZIL knows how to free them, so we have
733 	 * to call into it here.
734 	 */
735 	error = dsl_dataset_own(name, B_TRUE, FTAG, &ds);
736 	if (error == 0) {
737 		objset_t *os;
738 		if (dmu_objset_from_ds(ds, &os) == 0)
739 			zil_destroy(dmu_objset_zil(os), B_FALSE);
740 		error = dsl_dataset_destroy(ds, FTAG, defer);
741 		/* dsl_dataset_destroy() closes the ds. */
742 	}
743 
744 	return (error);
745 }
746 
747 struct snaparg {
748 	dsl_sync_task_group_t *dstg;
749 	char *snapname;
750 	char failed[MAXPATHLEN];
751 	boolean_t recursive;
752 	nvlist_t *props;
753 };
754 
755 static int
756 snapshot_check(void *arg1, void *arg2, dmu_tx_t *tx)
757 {
758 	objset_t *os = arg1;
759 	struct snaparg *sn = arg2;
760 
761 	/* The props have already been checked by zfs_check_userprops(). */
762 
763 	return (dsl_dataset_snapshot_check(os->os_dsl_dataset,
764 	    sn->snapname, tx));
765 }
766 
767 static void
768 snapshot_sync(void *arg1, void *arg2, cred_t *cr, dmu_tx_t *tx)
769 {
770 	objset_t *os = arg1;
771 	dsl_dataset_t *ds = os->os_dsl_dataset;
772 	struct snaparg *sn = arg2;
773 
774 	dsl_dataset_snapshot_sync(ds, sn->snapname, cr, tx);
775 
776 	if (sn->props) {
777 		dsl_props_arg_t pa;
778 		pa.pa_props = sn->props;
779 		pa.pa_source = ZPROP_SRC_LOCAL;
780 		dsl_props_set_sync(ds->ds_prev, &pa, cr, tx);
781 	}
782 }
783 
784 static int
785 dmu_objset_snapshot_one(const char *name, void *arg)
786 {
787 	struct snaparg *sn = arg;
788 	objset_t *os;
789 	int err;
790 	char *cp;
791 
792 	/*
793 	 * If the objset starts with a '%', then ignore it unless it was
794 	 * explicitly named (ie, not recursive).  These hidden datasets
795 	 * are always inconsistent, and by not opening them here, we can
796 	 * avoid a race with dsl_dir_destroy_check().
797 	 */
798 	cp = strrchr(name, '/');
799 	if (cp && cp[1] == '%' && sn->recursive)
800 		return (0);
801 
802 	(void) strcpy(sn->failed, name);
803 
804 	/*
805 	 * Check permissions if we are doing a recursive snapshot.  The
806 	 * permission checks for the starting dataset have already been
807 	 * performed in zfs_secpolicy_snapshot()
808 	 */
809 	if (sn->recursive && (err = zfs_secpolicy_snapshot_perms(name, CRED())))
810 		return (err);
811 
812 	err = dmu_objset_hold(name, sn, &os);
813 	if (err != 0)
814 		return (err);
815 
816 	/*
817 	 * If the objset is in an inconsistent state (eg, in the process
818 	 * of being destroyed), don't snapshot it.  As with %hidden
819 	 * datasets, we return EBUSY if this name was explicitly
820 	 * requested (ie, not recursive), and otherwise ignore it.
821 	 */
822 	if (os->os_dsl_dataset->ds_phys->ds_flags & DS_FLAG_INCONSISTENT) {
823 		dmu_objset_rele(os, sn);
824 		return (sn->recursive ? 0 : EBUSY);
825 	}
826 
827 	/*
828 	 * NB: we need to wait for all in-flight changes to get to disk,
829 	 * so that we snapshot those changes.  zil_suspend does this as
830 	 * a side effect.
831 	 */
832 	err = zil_suspend(dmu_objset_zil(os));
833 	if (err == 0) {
834 		dsl_sync_task_create(sn->dstg, snapshot_check,
835 		    snapshot_sync, os, sn, 3);
836 	} else {
837 		dmu_objset_rele(os, sn);
838 	}
839 
840 	return (err);
841 }
842 
843 int
844 dmu_objset_snapshot(char *fsname, char *snapname,
845     nvlist_t *props, boolean_t recursive)
846 {
847 	dsl_sync_task_t *dst;
848 	struct snaparg sn;
849 	spa_t *spa;
850 	int err;
851 
852 	(void) strcpy(sn.failed, fsname);
853 
854 	err = spa_open(fsname, &spa, FTAG);
855 	if (err)
856 		return (err);
857 
858 	sn.dstg = dsl_sync_task_group_create(spa_get_dsl(spa));
859 	sn.snapname = snapname;
860 	sn.props = props;
861 	sn.recursive = recursive;
862 
863 	if (recursive) {
864 		err = dmu_objset_find(fsname,
865 		    dmu_objset_snapshot_one, &sn, DS_FIND_CHILDREN);
866 	} else {
867 		err = dmu_objset_snapshot_one(fsname, &sn);
868 	}
869 
870 	if (err == 0)
871 		err = dsl_sync_task_group_wait(sn.dstg);
872 
873 	for (dst = list_head(&sn.dstg->dstg_tasks); dst;
874 	    dst = list_next(&sn.dstg->dstg_tasks, dst)) {
875 		objset_t *os = dst->dst_arg1;
876 		dsl_dataset_t *ds = os->os_dsl_dataset;
877 		if (dst->dst_err)
878 			dsl_dataset_name(ds, sn.failed);
879 		zil_resume(dmu_objset_zil(os));
880 		dmu_objset_rele(os, &sn);
881 	}
882 
883 	if (err)
884 		(void) strcpy(fsname, sn.failed);
885 	dsl_sync_task_group_destroy(sn.dstg);
886 	spa_close(spa, FTAG);
887 	return (err);
888 }
889 
890 static void
891 dmu_objset_sync_dnodes(list_t *list, list_t *newlist, dmu_tx_t *tx)
892 {
893 	dnode_t *dn;
894 
895 	while (dn = list_head(list)) {
896 		ASSERT(dn->dn_object != DMU_META_DNODE_OBJECT);
897 		ASSERT(dn->dn_dbuf->db_data_pending);
898 		/*
899 		 * Initialize dn_zio outside dnode_sync() because the
900 		 * meta-dnode needs to set it ouside dnode_sync().
901 		 */
902 		dn->dn_zio = dn->dn_dbuf->db_data_pending->dr_zio;
903 		ASSERT(dn->dn_zio);
904 
905 		ASSERT3U(dn->dn_nlevels, <=, DN_MAX_LEVELS);
906 		list_remove(list, dn);
907 
908 		if (newlist) {
909 			(void) dnode_add_ref(dn, newlist);
910 			list_insert_tail(newlist, dn);
911 		}
912 
913 		dnode_sync(dn, tx);
914 	}
915 }
916 
917 /* ARGSUSED */
918 static void
919 dmu_objset_write_ready(zio_t *zio, arc_buf_t *abuf, void *arg)
920 {
921 	blkptr_t *bp = zio->io_bp;
922 	objset_t *os = arg;
923 	dnode_phys_t *dnp = &os->os_phys->os_meta_dnode;
924 
925 	ASSERT(bp == os->os_rootbp);
926 	ASSERT(BP_GET_TYPE(bp) == DMU_OT_OBJSET);
927 	ASSERT(BP_GET_LEVEL(bp) == 0);
928 
929 	/*
930 	 * Update rootbp fill count: it should be the number of objects
931 	 * allocated in the object set (not counting the "special"
932 	 * objects that are stored in the objset_phys_t -- the meta
933 	 * dnode and user/group accounting objects).
934 	 */
935 	bp->blk_fill = 0;
936 	for (int i = 0; i < dnp->dn_nblkptr; i++)
937 		bp->blk_fill += dnp->dn_blkptr[i].blk_fill;
938 }
939 
940 /* ARGSUSED */
941 static void
942 dmu_objset_write_done(zio_t *zio, arc_buf_t *abuf, void *arg)
943 {
944 	blkptr_t *bp = zio->io_bp;
945 	blkptr_t *bp_orig = &zio->io_bp_orig;
946 	objset_t *os = arg;
947 
948 	if (zio->io_flags & ZIO_FLAG_IO_REWRITE) {
949 		ASSERT(BP_EQUAL(bp, bp_orig));
950 	} else {
951 		dsl_dataset_t *ds = os->os_dsl_dataset;
952 		dmu_tx_t *tx = os->os_synctx;
953 
954 		(void) dsl_dataset_block_kill(ds, bp_orig, tx, B_TRUE);
955 		dsl_dataset_block_born(ds, bp, tx);
956 	}
957 }
958 
959 /* called from dsl */
960 void
961 dmu_objset_sync(objset_t *os, zio_t *pio, dmu_tx_t *tx)
962 {
963 	int txgoff;
964 	zbookmark_t zb;
965 	zio_prop_t zp;
966 	zio_t *zio;
967 	list_t *list;
968 	list_t *newlist = NULL;
969 	dbuf_dirty_record_t *dr;
970 
971 	dprintf_ds(os->os_dsl_dataset, "txg=%llu\n", tx->tx_txg);
972 
973 	ASSERT(dmu_tx_is_syncing(tx));
974 	/* XXX the write_done callback should really give us the tx... */
975 	os->os_synctx = tx;
976 
977 	if (os->os_dsl_dataset == NULL) {
978 		/*
979 		 * This is the MOS.  If we have upgraded,
980 		 * spa_max_replication() could change, so reset
981 		 * os_copies here.
982 		 */
983 		os->os_copies = spa_max_replication(os->os_spa);
984 	}
985 
986 	/*
987 	 * Create the root block IO
988 	 */
989 	arc_release(os->os_phys_buf, &os->os_phys_buf);
990 
991 	SET_BOOKMARK(&zb, os->os_dsl_dataset ?
992 	    os->os_dsl_dataset->ds_object : DMU_META_OBJSET,
993 	    ZB_ROOT_OBJECT, ZB_ROOT_LEVEL, ZB_ROOT_BLKID);
994 
995 	dmu_write_policy(os, NULL, 0, 0, &zp);
996 
997 	zio = arc_write(pio, os->os_spa, tx->tx_txg,
998 	    os->os_rootbp, os->os_phys_buf, DMU_OS_IS_L2CACHEABLE(os), &zp,
999 	    dmu_objset_write_ready, dmu_objset_write_done, os,
1000 	    ZIO_PRIORITY_ASYNC_WRITE, ZIO_FLAG_MUSTSUCCEED, &zb);
1001 
1002 	/*
1003 	 * Sync special dnodes - the parent IO for the sync is the root block
1004 	 */
1005 	os->os_meta_dnode->dn_zio = zio;
1006 	dnode_sync(os->os_meta_dnode, tx);
1007 
1008 	os->os_phys->os_flags = os->os_flags;
1009 
1010 	if (os->os_userused_dnode &&
1011 	    os->os_userused_dnode->dn_type != DMU_OT_NONE) {
1012 		os->os_userused_dnode->dn_zio = zio;
1013 		dnode_sync(os->os_userused_dnode, tx);
1014 		os->os_groupused_dnode->dn_zio = zio;
1015 		dnode_sync(os->os_groupused_dnode, tx);
1016 	}
1017 
1018 	txgoff = tx->tx_txg & TXG_MASK;
1019 
1020 	if (dmu_objset_userused_enabled(os)) {
1021 		newlist = &os->os_synced_dnodes;
1022 		/*
1023 		 * We must create the list here because it uses the
1024 		 * dn_dirty_link[] of this txg.
1025 		 */
1026 		list_create(newlist, sizeof (dnode_t),
1027 		    offsetof(dnode_t, dn_dirty_link[txgoff]));
1028 	}
1029 
1030 	dmu_objset_sync_dnodes(&os->os_free_dnodes[txgoff], newlist, tx);
1031 	dmu_objset_sync_dnodes(&os->os_dirty_dnodes[txgoff], newlist, tx);
1032 
1033 	list = &os->os_meta_dnode->dn_dirty_records[txgoff];
1034 	while (dr = list_head(list)) {
1035 		ASSERT(dr->dr_dbuf->db_level == 0);
1036 		list_remove(list, dr);
1037 		if (dr->dr_zio)
1038 			zio_nowait(dr->dr_zio);
1039 	}
1040 	/*
1041 	 * Free intent log blocks up to this tx.
1042 	 */
1043 	zil_sync(os->os_zil, tx);
1044 	os->os_phys->os_zil_header = os->os_zil_header;
1045 	zio_nowait(zio);
1046 }
1047 
1048 boolean_t
1049 dmu_objset_is_dirty(objset_t *os, uint64_t txg)
1050 {
1051 	return (!list_is_empty(&os->os_dirty_dnodes[txg & TXG_MASK]) ||
1052 	    !list_is_empty(&os->os_free_dnodes[txg & TXG_MASK]));
1053 }
1054 
1055 static objset_used_cb_t *used_cbs[DMU_OST_NUMTYPES];
1056 
1057 void
1058 dmu_objset_register_type(dmu_objset_type_t ost, objset_used_cb_t *cb)
1059 {
1060 	used_cbs[ost] = cb;
1061 }
1062 
1063 boolean_t
1064 dmu_objset_userused_enabled(objset_t *os)
1065 {
1066 	return (spa_version(os->os_spa) >= SPA_VERSION_USERSPACE &&
1067 	    used_cbs[os->os_phys->os_type] &&
1068 	    os->os_userused_dnode);
1069 }
1070 
1071 static void
1072 do_userquota_update(objset_t *os, uint64_t used, uint64_t flags,
1073     uint64_t user, uint64_t group, boolean_t subtract, dmu_tx_t *tx)
1074 {
1075 	if ((flags & DNODE_FLAG_USERUSED_ACCOUNTED)) {
1076 		int64_t delta = DNODE_SIZE + used;
1077 		if (subtract)
1078 			delta = -delta;
1079 		VERIFY3U(0, ==, zap_increment_int(os, DMU_USERUSED_OBJECT,
1080 		    user, delta, tx));
1081 		VERIFY3U(0, ==, zap_increment_int(os, DMU_GROUPUSED_OBJECT,
1082 		    group, delta, tx));
1083 	}
1084 }
1085 
1086 void
1087 dmu_objset_do_userquota_updates(objset_t *os, dmu_tx_t *tx)
1088 {
1089 	dnode_t *dn;
1090 	list_t *list = &os->os_synced_dnodes;
1091 
1092 	ASSERT(list_head(list) == NULL || dmu_objset_userused_enabled(os));
1093 
1094 	while (dn = list_head(list)) {
1095 		ASSERT(!DMU_OBJECT_IS_SPECIAL(dn->dn_object));
1096 		ASSERT(dn->dn_phys->dn_type == DMU_OT_NONE ||
1097 		    dn->dn_phys->dn_flags &
1098 		    DNODE_FLAG_USERUSED_ACCOUNTED);
1099 
1100 		/* Allocate the user/groupused objects if necessary. */
1101 		if (os->os_userused_dnode->dn_type == DMU_OT_NONE) {
1102 			VERIFY(0 == zap_create_claim(os,
1103 			    DMU_USERUSED_OBJECT,
1104 			    DMU_OT_USERGROUP_USED, DMU_OT_NONE, 0, tx));
1105 			VERIFY(0 == zap_create_claim(os,
1106 			    DMU_GROUPUSED_OBJECT,
1107 			    DMU_OT_USERGROUP_USED, DMU_OT_NONE, 0, tx));
1108 		}
1109 
1110 		/*
1111 		 * We intentionally modify the zap object even if the
1112 		 * net delta is zero.  Otherwise
1113 		 * the block of the zap obj could be shared between
1114 		 * datasets but need to be different between them after
1115 		 * a bprewrite.
1116 		 */
1117 
1118 		/*
1119 		 * The mutex is needed here for interlock with dnode_allocate.
1120 		 */
1121 		mutex_enter(&dn->dn_mtx);
1122 		ASSERT(dn->dn_id_flags);
1123 		if (dn->dn_id_flags & DN_ID_OLD_EXIST)  {
1124 			do_userquota_update(os, dn->dn_oldused, dn->dn_oldflags,
1125 			    dn->dn_olduid, dn->dn_oldgid, B_TRUE, tx);
1126 		}
1127 		if (dn->dn_id_flags & DN_ID_NEW_EXIST) {
1128 			do_userquota_update(os, DN_USED_BYTES(dn->dn_phys),
1129 			    dn->dn_phys->dn_flags,  dn->dn_newuid,
1130 			    dn->dn_newgid, B_FALSE, tx);
1131 		}
1132 
1133 		dn->dn_oldused = 0;
1134 		dn->dn_oldflags = 0;
1135 		if (dn->dn_id_flags & DN_ID_NEW_EXIST) {
1136 			dn->dn_olduid = dn->dn_newuid;
1137 			dn->dn_oldgid = dn->dn_newgid;
1138 			dn->dn_id_flags |= DN_ID_OLD_EXIST;
1139 			if (dn->dn_bonuslen == 0)
1140 				dn->dn_id_flags |= DN_ID_CHKED_SPILL;
1141 			else
1142 				dn->dn_id_flags |= DN_ID_CHKED_BONUS;
1143 		}
1144 		dn->dn_id_flags &= ~(DN_ID_NEW_EXIST|DN_ID_SYNC);
1145 		mutex_exit(&dn->dn_mtx);
1146 
1147 		list_remove(list, dn);
1148 		dnode_rele(dn, list);
1149 	}
1150 }
1151 
1152 /*
1153  * Returns a pointer to data to find uid/gid from
1154  *
1155  * If a dirty record for transaction group that is syncing can't
1156  * be found then NULL is returned.  In the NULL case it is assumed
1157  * the uid/gid aren't changing.
1158  */
1159 static void *
1160 dmu_objset_userquota_find_data(dmu_buf_impl_t *db, dmu_tx_t *tx)
1161 {
1162 	dbuf_dirty_record_t *dr, **drp;
1163 	void *data;
1164 
1165 	if (db->db_dirtycnt == 0)
1166 		return (db->db.db_data);  /* Nothing is changing */
1167 
1168 	for (drp = &db->db_last_dirty; (dr = *drp) != NULL; drp = &dr->dr_next)
1169 		if (dr->dr_txg == tx->tx_txg)
1170 			break;
1171 
1172 	if (dr == NULL)
1173 		data = NULL;
1174 	else if (dr->dr_dbuf->db_dnode->dn_bonuslen == 0 &&
1175 	    dr->dr_dbuf->db_blkid == DMU_SPILL_BLKID)
1176 		data = dr->dt.dl.dr_data->b_data;
1177 	else
1178 		data = dr->dt.dl.dr_data;
1179 	return (data);
1180 }
1181 
1182 void
1183 dmu_objset_userquota_get_ids(dnode_t *dn, boolean_t before, dmu_tx_t *tx)
1184 {
1185 	objset_t *os = dn->dn_objset;
1186 	void *data = NULL;
1187 	dmu_buf_impl_t *db = NULL;
1188 	uint64_t *user, *group;
1189 	int flags = dn->dn_id_flags;
1190 	int error;
1191 	boolean_t have_spill = B_FALSE;
1192 
1193 	if (!dmu_objset_userused_enabled(dn->dn_objset))
1194 		return;
1195 
1196 	if (before && (flags & (DN_ID_CHKED_BONUS|DN_ID_OLD_EXIST|
1197 	    DN_ID_CHKED_SPILL)))
1198 		return;
1199 
1200 	if (before && dn->dn_bonuslen != 0)
1201 		data = DN_BONUS(dn->dn_phys);
1202 	else if (!before && dn->dn_bonuslen != 0) {
1203 		if (dn->dn_bonus) {
1204 			db = dn->dn_bonus;
1205 			mutex_enter(&db->db_mtx);
1206 			data = dmu_objset_userquota_find_data(db, tx);
1207 		} else {
1208 			data = DN_BONUS(dn->dn_phys);
1209 		}
1210 	} else if (dn->dn_bonuslen == 0 && dn->dn_bonustype == DMU_OT_SA) {
1211 			int rf = 0;
1212 
1213 			if (RW_WRITE_HELD(&dn->dn_struct_rwlock))
1214 				rf |= DB_RF_HAVESTRUCT;
1215 			error = dmu_spill_hold_by_dnode(dn, rf,
1216 			    FTAG, (dmu_buf_t **)&db);
1217 			ASSERT(error == 0);
1218 			mutex_enter(&db->db_mtx);
1219 			data = (before) ? db->db.db_data :
1220 			    dmu_objset_userquota_find_data(db, tx);
1221 			have_spill = B_TRUE;
1222 	} else {
1223 		mutex_enter(&dn->dn_mtx);
1224 		dn->dn_id_flags |= DN_ID_CHKED_BONUS;
1225 		mutex_exit(&dn->dn_mtx);
1226 		return;
1227 	}
1228 
1229 	if (before) {
1230 		ASSERT(data);
1231 		user = &dn->dn_olduid;
1232 		group = &dn->dn_oldgid;
1233 	} else if (data) {
1234 		user = &dn->dn_newuid;
1235 		group = &dn->dn_newgid;
1236 	}
1237 
1238 	/*
1239 	 * Must always call the callback in case the object
1240 	 * type has changed and that type isn't an object type to track
1241 	 */
1242 	error = used_cbs[os->os_phys->os_type](dn->dn_bonustype, data,
1243 	    user, group);
1244 
1245 	/*
1246 	 * Preserve existing uid/gid when the callback can't determine
1247 	 * what the new uid/gid are and the callback returned EEXIST.
1248 	 * The EEXIST error tells us to just use the existing uid/gid.
1249 	 * If we don't know what the old values are then just assign
1250 	 * them to 0, since that is a new file  being created.
1251 	 */
1252 	if (!before && data == NULL && error == EEXIST) {
1253 		if (flags & DN_ID_OLD_EXIST) {
1254 			dn->dn_newuid = dn->dn_olduid;
1255 			dn->dn_newgid = dn->dn_oldgid;
1256 		} else {
1257 			dn->dn_newuid = 0;
1258 			dn->dn_newgid = 0;
1259 		}
1260 		error = 0;
1261 	}
1262 
1263 	if (db)
1264 		mutex_exit(&db->db_mtx);
1265 
1266 	mutex_enter(&dn->dn_mtx);
1267 	if (error == 0 && before)
1268 		dn->dn_id_flags |= DN_ID_OLD_EXIST;
1269 	if (error == 0 && !before)
1270 		dn->dn_id_flags |= DN_ID_NEW_EXIST;
1271 
1272 	if (have_spill) {
1273 		dn->dn_id_flags |= DN_ID_CHKED_SPILL;
1274 	} else {
1275 		dn->dn_id_flags |= DN_ID_CHKED_BONUS;
1276 	}
1277 	mutex_exit(&dn->dn_mtx);
1278 	if (have_spill)
1279 		dmu_buf_rele((dmu_buf_t *)db, FTAG);
1280 }
1281 
1282 boolean_t
1283 dmu_objset_userspace_present(objset_t *os)
1284 {
1285 	return (os->os_phys->os_flags &
1286 	    OBJSET_FLAG_USERACCOUNTING_COMPLETE);
1287 }
1288 
1289 int
1290 dmu_objset_userspace_upgrade(objset_t *os)
1291 {
1292 	uint64_t obj;
1293 	int err = 0;
1294 
1295 	if (dmu_objset_userspace_present(os))
1296 		return (0);
1297 	if (!dmu_objset_userused_enabled(os))
1298 		return (ENOTSUP);
1299 	if (dmu_objset_is_snapshot(os))
1300 		return (EINVAL);
1301 
1302 	/*
1303 	 * We simply need to mark every object dirty, so that it will be
1304 	 * synced out and now accounted.  If this is called
1305 	 * concurrently, or if we already did some work before crashing,
1306 	 * that's fine, since we track each object's accounted state
1307 	 * independently.
1308 	 */
1309 
1310 	for (obj = 0; err == 0; err = dmu_object_next(os, &obj, FALSE, 0)) {
1311 		dmu_tx_t *tx;
1312 		dmu_buf_t *db;
1313 		int objerr;
1314 
1315 		if (issig(JUSTLOOKING) && issig(FORREAL))
1316 			return (EINTR);
1317 
1318 		objerr = dmu_bonus_hold(os, obj, FTAG, &db);
1319 		if (objerr)
1320 			continue;
1321 		tx = dmu_tx_create(os);
1322 		dmu_tx_hold_bonus(tx, obj);
1323 		objerr = dmu_tx_assign(tx, TXG_WAIT);
1324 		if (objerr) {
1325 			dmu_tx_abort(tx);
1326 			continue;
1327 		}
1328 		dmu_buf_will_dirty(db, tx);
1329 		dmu_buf_rele(db, FTAG);
1330 		dmu_tx_commit(tx);
1331 	}
1332 
1333 	os->os_flags |= OBJSET_FLAG_USERACCOUNTING_COMPLETE;
1334 	txg_wait_synced(dmu_objset_pool(os), 0);
1335 	return (0);
1336 }
1337 
1338 void
1339 dmu_objset_space(objset_t *os, uint64_t *refdbytesp, uint64_t *availbytesp,
1340     uint64_t *usedobjsp, uint64_t *availobjsp)
1341 {
1342 	dsl_dataset_space(os->os_dsl_dataset, refdbytesp, availbytesp,
1343 	    usedobjsp, availobjsp);
1344 }
1345 
1346 uint64_t
1347 dmu_objset_fsid_guid(objset_t *os)
1348 {
1349 	return (dsl_dataset_fsid_guid(os->os_dsl_dataset));
1350 }
1351 
1352 void
1353 dmu_objset_fast_stat(objset_t *os, dmu_objset_stats_t *stat)
1354 {
1355 	stat->dds_type = os->os_phys->os_type;
1356 	if (os->os_dsl_dataset)
1357 		dsl_dataset_fast_stat(os->os_dsl_dataset, stat);
1358 }
1359 
1360 void
1361 dmu_objset_stats(objset_t *os, nvlist_t *nv)
1362 {
1363 	ASSERT(os->os_dsl_dataset ||
1364 	    os->os_phys->os_type == DMU_OST_META);
1365 
1366 	if (os->os_dsl_dataset != NULL)
1367 		dsl_dataset_stats(os->os_dsl_dataset, nv);
1368 
1369 	dsl_prop_nvlist_add_uint64(nv, ZFS_PROP_TYPE,
1370 	    os->os_phys->os_type);
1371 	dsl_prop_nvlist_add_uint64(nv, ZFS_PROP_USERACCOUNTING,
1372 	    dmu_objset_userspace_present(os));
1373 }
1374 
1375 int
1376 dmu_objset_is_snapshot(objset_t *os)
1377 {
1378 	if (os->os_dsl_dataset != NULL)
1379 		return (dsl_dataset_is_snapshot(os->os_dsl_dataset));
1380 	else
1381 		return (B_FALSE);
1382 }
1383 
1384 int
1385 dmu_snapshot_realname(objset_t *os, char *name, char *real, int maxlen,
1386     boolean_t *conflict)
1387 {
1388 	dsl_dataset_t *ds = os->os_dsl_dataset;
1389 	uint64_t ignored;
1390 
1391 	if (ds->ds_phys->ds_snapnames_zapobj == 0)
1392 		return (ENOENT);
1393 
1394 	return (zap_lookup_norm(ds->ds_dir->dd_pool->dp_meta_objset,
1395 	    ds->ds_phys->ds_snapnames_zapobj, name, 8, 1, &ignored, MT_FIRST,
1396 	    real, maxlen, conflict));
1397 }
1398 
1399 int
1400 dmu_snapshot_list_next(objset_t *os, int namelen, char *name,
1401     uint64_t *idp, uint64_t *offp, boolean_t *case_conflict)
1402 {
1403 	dsl_dataset_t *ds = os->os_dsl_dataset;
1404 	zap_cursor_t cursor;
1405 	zap_attribute_t attr;
1406 
1407 	if (ds->ds_phys->ds_snapnames_zapobj == 0)
1408 		return (ENOENT);
1409 
1410 	zap_cursor_init_serialized(&cursor,
1411 	    ds->ds_dir->dd_pool->dp_meta_objset,
1412 	    ds->ds_phys->ds_snapnames_zapobj, *offp);
1413 
1414 	if (zap_cursor_retrieve(&cursor, &attr) != 0) {
1415 		zap_cursor_fini(&cursor);
1416 		return (ENOENT);
1417 	}
1418 
1419 	if (strlen(attr.za_name) + 1 > namelen) {
1420 		zap_cursor_fini(&cursor);
1421 		return (ENAMETOOLONG);
1422 	}
1423 
1424 	(void) strcpy(name, attr.za_name);
1425 	if (idp)
1426 		*idp = attr.za_first_integer;
1427 	if (case_conflict)
1428 		*case_conflict = attr.za_normalization_conflict;
1429 	zap_cursor_advance(&cursor);
1430 	*offp = zap_cursor_serialize(&cursor);
1431 	zap_cursor_fini(&cursor);
1432 
1433 	return (0);
1434 }
1435 
1436 int
1437 dmu_dir_list_next(objset_t *os, int namelen, char *name,
1438     uint64_t *idp, uint64_t *offp)
1439 {
1440 	dsl_dir_t *dd = os->os_dsl_dataset->ds_dir;
1441 	zap_cursor_t cursor;
1442 	zap_attribute_t attr;
1443 
1444 	/* there is no next dir on a snapshot! */
1445 	if (os->os_dsl_dataset->ds_object !=
1446 	    dd->dd_phys->dd_head_dataset_obj)
1447 		return (ENOENT);
1448 
1449 	zap_cursor_init_serialized(&cursor,
1450 	    dd->dd_pool->dp_meta_objset,
1451 	    dd->dd_phys->dd_child_dir_zapobj, *offp);
1452 
1453 	if (zap_cursor_retrieve(&cursor, &attr) != 0) {
1454 		zap_cursor_fini(&cursor);
1455 		return (ENOENT);
1456 	}
1457 
1458 	if (strlen(attr.za_name) + 1 > namelen) {
1459 		zap_cursor_fini(&cursor);
1460 		return (ENAMETOOLONG);
1461 	}
1462 
1463 	(void) strcpy(name, attr.za_name);
1464 	if (idp)
1465 		*idp = attr.za_first_integer;
1466 	zap_cursor_advance(&cursor);
1467 	*offp = zap_cursor_serialize(&cursor);
1468 	zap_cursor_fini(&cursor);
1469 
1470 	return (0);
1471 }
1472 
1473 struct findarg {
1474 	int (*func)(const char *, void *);
1475 	void *arg;
1476 };
1477 
1478 /* ARGSUSED */
1479 static int
1480 findfunc(spa_t *spa, uint64_t dsobj, const char *dsname, void *arg)
1481 {
1482 	struct findarg *fa = arg;
1483 	return (fa->func(dsname, fa->arg));
1484 }
1485 
1486 /*
1487  * Find all objsets under name, and for each, call 'func(child_name, arg)'.
1488  * Perhaps change all callers to use dmu_objset_find_spa()?
1489  */
1490 int
1491 dmu_objset_find(char *name, int func(const char *, void *), void *arg,
1492     int flags)
1493 {
1494 	struct findarg fa;
1495 	fa.func = func;
1496 	fa.arg = arg;
1497 	return (dmu_objset_find_spa(NULL, name, findfunc, &fa, flags));
1498 }
1499 
1500 /*
1501  * Find all objsets under name, call func on each
1502  */
1503 int
1504 dmu_objset_find_spa(spa_t *spa, const char *name,
1505     int func(spa_t *, uint64_t, const char *, void *), void *arg, int flags)
1506 {
1507 	dsl_dir_t *dd;
1508 	dsl_pool_t *dp;
1509 	dsl_dataset_t *ds;
1510 	zap_cursor_t zc;
1511 	zap_attribute_t *attr;
1512 	char *child;
1513 	uint64_t thisobj;
1514 	int err;
1515 
1516 	if (name == NULL)
1517 		name = spa_name(spa);
1518 	err = dsl_dir_open_spa(spa, name, FTAG, &dd, NULL);
1519 	if (err)
1520 		return (err);
1521 
1522 	/* Don't visit hidden ($MOS & $ORIGIN) objsets. */
1523 	if (dd->dd_myname[0] == '$') {
1524 		dsl_dir_close(dd, FTAG);
1525 		return (0);
1526 	}
1527 
1528 	thisobj = dd->dd_phys->dd_head_dataset_obj;
1529 	attr = kmem_alloc(sizeof (zap_attribute_t), KM_SLEEP);
1530 	dp = dd->dd_pool;
1531 
1532 	/*
1533 	 * Iterate over all children.
1534 	 */
1535 	if (flags & DS_FIND_CHILDREN) {
1536 		for (zap_cursor_init(&zc, dp->dp_meta_objset,
1537 		    dd->dd_phys->dd_child_dir_zapobj);
1538 		    zap_cursor_retrieve(&zc, attr) == 0;
1539 		    (void) zap_cursor_advance(&zc)) {
1540 			ASSERT(attr->za_integer_length == sizeof (uint64_t));
1541 			ASSERT(attr->za_num_integers == 1);
1542 
1543 			child = kmem_asprintf("%s/%s", name, attr->za_name);
1544 			err = dmu_objset_find_spa(spa, child, func, arg, flags);
1545 			strfree(child);
1546 			if (err)
1547 				break;
1548 		}
1549 		zap_cursor_fini(&zc);
1550 
1551 		if (err) {
1552 			dsl_dir_close(dd, FTAG);
1553 			kmem_free(attr, sizeof (zap_attribute_t));
1554 			return (err);
1555 		}
1556 	}
1557 
1558 	/*
1559 	 * Iterate over all snapshots.
1560 	 */
1561 	if (flags & DS_FIND_SNAPSHOTS) {
1562 		if (!dsl_pool_sync_context(dp))
1563 			rw_enter(&dp->dp_config_rwlock, RW_READER);
1564 		err = dsl_dataset_hold_obj(dp, thisobj, FTAG, &ds);
1565 		if (!dsl_pool_sync_context(dp))
1566 			rw_exit(&dp->dp_config_rwlock);
1567 
1568 		if (err == 0) {
1569 			uint64_t snapobj = ds->ds_phys->ds_snapnames_zapobj;
1570 			dsl_dataset_rele(ds, FTAG);
1571 
1572 			for (zap_cursor_init(&zc, dp->dp_meta_objset, snapobj);
1573 			    zap_cursor_retrieve(&zc, attr) == 0;
1574 			    (void) zap_cursor_advance(&zc)) {
1575 				ASSERT(attr->za_integer_length ==
1576 				    sizeof (uint64_t));
1577 				ASSERT(attr->za_num_integers == 1);
1578 
1579 				child = kmem_asprintf("%s@%s",
1580 				    name, attr->za_name);
1581 				err = func(spa, attr->za_first_integer,
1582 				    child, arg);
1583 				strfree(child);
1584 				if (err)
1585 					break;
1586 			}
1587 			zap_cursor_fini(&zc);
1588 		}
1589 	}
1590 
1591 	dsl_dir_close(dd, FTAG);
1592 	kmem_free(attr, sizeof (zap_attribute_t));
1593 
1594 	if (err)
1595 		return (err);
1596 
1597 	/*
1598 	 * Apply to self if appropriate.
1599 	 */
1600 	err = func(spa, thisobj, name, arg);
1601 	return (err);
1602 }
1603 
1604 /* ARGSUSED */
1605 int
1606 dmu_objset_prefetch(const char *name, void *arg)
1607 {
1608 	dsl_dataset_t *ds;
1609 
1610 	if (dsl_dataset_hold(name, FTAG, &ds))
1611 		return (0);
1612 
1613 	if (!BP_IS_HOLE(&ds->ds_phys->ds_bp)) {
1614 		mutex_enter(&ds->ds_opening_lock);
1615 		if (ds->ds_objset == NULL) {
1616 			uint32_t aflags = ARC_NOWAIT | ARC_PREFETCH;
1617 			zbookmark_t zb;
1618 
1619 			SET_BOOKMARK(&zb, ds->ds_object, ZB_ROOT_OBJECT,
1620 			    ZB_ROOT_LEVEL, ZB_ROOT_BLKID);
1621 
1622 			(void) arc_read_nolock(NULL, dsl_dataset_get_spa(ds),
1623 			    &ds->ds_phys->ds_bp, NULL, NULL,
1624 			    ZIO_PRIORITY_ASYNC_READ,
1625 			    ZIO_FLAG_CANFAIL | ZIO_FLAG_SPECULATIVE,
1626 			    &aflags, &zb);
1627 		}
1628 		mutex_exit(&ds->ds_opening_lock);
1629 	}
1630 
1631 	dsl_dataset_rele(ds, FTAG);
1632 	return (0);
1633 }
1634 
1635 void
1636 dmu_objset_set_user(objset_t *os, void *user_ptr)
1637 {
1638 	ASSERT(MUTEX_HELD(&os->os_user_ptr_lock));
1639 	os->os_user_ptr = user_ptr;
1640 }
1641 
1642 void *
1643 dmu_objset_get_user(objset_t *os)
1644 {
1645 	ASSERT(MUTEX_HELD(&os->os_user_ptr_lock));
1646 	return (os->os_user_ptr);
1647 }
1648