xref: /freebsd/sys/contrib/openzfs/module/zfs/zfs_log.c (revision 1323ec57)
1 /*
2  * CDDL HEADER START
3  *
4  * The contents of this file are subject to the terms of the
5  * Common Development and Distribution License (the "License").
6  * You may not use this file except in compliance with the License.
7  *
8  * You can obtain a copy of the license at usr/src/OPENSOLARIS.LICENSE
9  * or http://www.opensolaris.org/os/licensing.
10  * See the License for the specific language governing permissions
11  * and limitations under the License.
12  *
13  * When distributing Covered Code, include this CDDL HEADER in each
14  * file and include the License file at usr/src/OPENSOLARIS.LICENSE.
15  * If applicable, add the following below this CDDL HEADER, with the
16  * fields enclosed by brackets "[]" replaced with your own identifying
17  * information: Portions Copyright [yyyy] [name of copyright owner]
18  *
19  * CDDL HEADER END
20  */
21 /*
22  * Copyright (c) 2005, 2010, Oracle and/or its affiliates. All rights reserved.
23  * Copyright (c) 2015, 2018 by Delphix. All rights reserved.
24  */
25 
26 
27 #include <sys/types.h>
28 #include <sys/param.h>
29 #include <sys/sysmacros.h>
30 #include <sys/cmn_err.h>
31 #include <sys/kmem.h>
32 #include <sys/thread.h>
33 #include <sys/file.h>
34 #include <sys/vfs.h>
35 #include <sys/zfs_znode.h>
36 #include <sys/zfs_dir.h>
37 #include <sys/zil.h>
38 #include <sys/zil_impl.h>
39 #include <sys/byteorder.h>
40 #include <sys/policy.h>
41 #include <sys/stat.h>
42 #include <sys/acl.h>
43 #include <sys/dmu.h>
44 #include <sys/dbuf.h>
45 #include <sys/spa.h>
46 #include <sys/zfs_fuid.h>
47 #include <sys/dsl_dataset.h>
48 
49 /*
50  * These zfs_log_* functions must be called within a dmu tx, in one
51  * of 2 contexts depending on zilog->z_replay:
52  *
53  * Non replay mode
54  * ---------------
55  * We need to record the transaction so that if it is committed to
56  * the Intent Log then it can be replayed.  An intent log transaction
57  * structure (itx_t) is allocated and all the information necessary to
58  * possibly replay the transaction is saved in it. The itx is then assigned
59  * a sequence number and inserted in the in-memory list anchored in the zilog.
60  *
61  * Replay mode
62  * -----------
63  * We need to mark the intent log record as replayed in the log header.
64  * This is done in the same transaction as the replay so that they
65  * commit atomically.
66  */
67 
68 int
69 zfs_log_create_txtype(zil_create_t type, vsecattr_t *vsecp, vattr_t *vap)
70 {
71 	int isxvattr = (vap->va_mask & ATTR_XVATTR);
72 	switch (type) {
73 	case Z_FILE:
74 		if (vsecp == NULL && !isxvattr)
75 			return (TX_CREATE);
76 		if (vsecp && isxvattr)
77 			return (TX_CREATE_ACL_ATTR);
78 		if (vsecp)
79 			return (TX_CREATE_ACL);
80 		else
81 			return (TX_CREATE_ATTR);
82 	case Z_DIR:
83 		if (vsecp == NULL && !isxvattr)
84 			return (TX_MKDIR);
85 		if (vsecp && isxvattr)
86 			return (TX_MKDIR_ACL_ATTR);
87 		if (vsecp)
88 			return (TX_MKDIR_ACL);
89 		else
90 			return (TX_MKDIR_ATTR);
91 	case Z_XATTRDIR:
92 		return (TX_MKXATTR);
93 	}
94 	ASSERT(0);
95 	return (TX_MAX_TYPE);
96 }
97 
98 /*
99  * build up the log data necessary for logging xvattr_t
100  * First lr_attr_t is initialized.  following the lr_attr_t
101  * is the mapsize and attribute bitmap copied from the xvattr_t.
102  * Following the bitmap and bitmapsize two 64 bit words are reserved
103  * for the create time which may be set.  Following the create time
104  * records a single 64 bit integer which has the bits to set on
105  * replay for the xvattr.
106  */
107 static void
108 zfs_log_xvattr(lr_attr_t *lrattr, xvattr_t *xvap)
109 {
110 	uint32_t	*bitmap;
111 	uint64_t	*attrs;
112 	uint64_t	*crtime;
113 	xoptattr_t	*xoap;
114 	void		*scanstamp;
115 	int		i;
116 
117 	xoap = xva_getxoptattr(xvap);
118 	ASSERT(xoap);
119 
120 	lrattr->lr_attr_masksize = xvap->xva_mapsize;
121 	bitmap = &lrattr->lr_attr_bitmap;
122 	for (i = 0; i != xvap->xva_mapsize; i++, bitmap++) {
123 		*bitmap = xvap->xva_reqattrmap[i];
124 	}
125 
126 	/* Now pack the attributes up in a single uint64_t */
127 	attrs = (uint64_t *)bitmap;
128 	*attrs = 0;
129 	crtime = attrs + 1;
130 	memset(crtime, 0, 2 * sizeof (uint64_t));
131 	scanstamp = (caddr_t)(crtime + 2);
132 	memset(scanstamp, 0, AV_SCANSTAMP_SZ);
133 	if (XVA_ISSET_REQ(xvap, XAT_READONLY))
134 		*attrs |= (xoap->xoa_readonly == 0) ? 0 :
135 		    XAT0_READONLY;
136 	if (XVA_ISSET_REQ(xvap, XAT_HIDDEN))
137 		*attrs |= (xoap->xoa_hidden == 0) ? 0 :
138 		    XAT0_HIDDEN;
139 	if (XVA_ISSET_REQ(xvap, XAT_SYSTEM))
140 		*attrs |= (xoap->xoa_system == 0) ? 0 :
141 		    XAT0_SYSTEM;
142 	if (XVA_ISSET_REQ(xvap, XAT_ARCHIVE))
143 		*attrs |= (xoap->xoa_archive == 0) ? 0 :
144 		    XAT0_ARCHIVE;
145 	if (XVA_ISSET_REQ(xvap, XAT_IMMUTABLE))
146 		*attrs |= (xoap->xoa_immutable == 0) ? 0 :
147 		    XAT0_IMMUTABLE;
148 	if (XVA_ISSET_REQ(xvap, XAT_NOUNLINK))
149 		*attrs |= (xoap->xoa_nounlink == 0) ? 0 :
150 		    XAT0_NOUNLINK;
151 	if (XVA_ISSET_REQ(xvap, XAT_APPENDONLY))
152 		*attrs |= (xoap->xoa_appendonly == 0) ? 0 :
153 		    XAT0_APPENDONLY;
154 	if (XVA_ISSET_REQ(xvap, XAT_OPAQUE))
155 		*attrs |= (xoap->xoa_opaque == 0) ? 0 :
156 		    XAT0_APPENDONLY;
157 	if (XVA_ISSET_REQ(xvap, XAT_NODUMP))
158 		*attrs |= (xoap->xoa_nodump == 0) ? 0 :
159 		    XAT0_NODUMP;
160 	if (XVA_ISSET_REQ(xvap, XAT_AV_QUARANTINED))
161 		*attrs |= (xoap->xoa_av_quarantined == 0) ? 0 :
162 		    XAT0_AV_QUARANTINED;
163 	if (XVA_ISSET_REQ(xvap, XAT_AV_MODIFIED))
164 		*attrs |= (xoap->xoa_av_modified == 0) ? 0 :
165 		    XAT0_AV_MODIFIED;
166 	if (XVA_ISSET_REQ(xvap, XAT_CREATETIME))
167 		ZFS_TIME_ENCODE(&xoap->xoa_createtime, crtime);
168 	if (XVA_ISSET_REQ(xvap, XAT_AV_SCANSTAMP)) {
169 		ASSERT(!XVA_ISSET_REQ(xvap, XAT_PROJID));
170 
171 		memcpy(scanstamp, xoap->xoa_av_scanstamp, AV_SCANSTAMP_SZ);
172 	} else if (XVA_ISSET_REQ(xvap, XAT_PROJID)) {
173 		/*
174 		 * XAT_PROJID and XAT_AV_SCANSTAMP will never be valid
175 		 * at the same time, so we can share the same space.
176 		 */
177 		memcpy(scanstamp, &xoap->xoa_projid, sizeof (uint64_t));
178 	}
179 	if (XVA_ISSET_REQ(xvap, XAT_REPARSE))
180 		*attrs |= (xoap->xoa_reparse == 0) ? 0 :
181 		    XAT0_REPARSE;
182 	if (XVA_ISSET_REQ(xvap, XAT_OFFLINE))
183 		*attrs |= (xoap->xoa_offline == 0) ? 0 :
184 		    XAT0_OFFLINE;
185 	if (XVA_ISSET_REQ(xvap, XAT_SPARSE))
186 		*attrs |= (xoap->xoa_sparse == 0) ? 0 :
187 		    XAT0_SPARSE;
188 	if (XVA_ISSET_REQ(xvap, XAT_PROJINHERIT))
189 		*attrs |= (xoap->xoa_projinherit == 0) ? 0 :
190 		    XAT0_PROJINHERIT;
191 }
192 
193 static void *
194 zfs_log_fuid_ids(zfs_fuid_info_t *fuidp, void *start)
195 {
196 	zfs_fuid_t *zfuid;
197 	uint64_t *fuidloc = start;
198 
199 	/* First copy in the ACE FUIDs */
200 	for (zfuid = list_head(&fuidp->z_fuids); zfuid;
201 	    zfuid = list_next(&fuidp->z_fuids, zfuid)) {
202 		*fuidloc++ = zfuid->z_logfuid;
203 	}
204 	return (fuidloc);
205 }
206 
207 
208 static void *
209 zfs_log_fuid_domains(zfs_fuid_info_t *fuidp, void *start)
210 {
211 	zfs_fuid_domain_t *zdomain;
212 
213 	/* now copy in the domain info, if any */
214 	if (fuidp->z_domain_str_sz != 0) {
215 		for (zdomain = list_head(&fuidp->z_domains); zdomain;
216 		    zdomain = list_next(&fuidp->z_domains, zdomain)) {
217 			memcpy(start, zdomain->z_domain,
218 			    strlen(zdomain->z_domain) + 1);
219 			start = (caddr_t)start +
220 			    strlen(zdomain->z_domain) + 1;
221 		}
222 	}
223 	return (start);
224 }
225 
226 /*
227  * If zp is an xattr node, check whether the xattr owner is unlinked.
228  * We don't want to log anything if the owner is unlinked.
229  */
230 static int
231 zfs_xattr_owner_unlinked(znode_t *zp)
232 {
233 	int unlinked = 0;
234 	znode_t *dzp;
235 #ifdef __FreeBSD__
236 	znode_t *tzp = zp;
237 
238 	/*
239 	 * zrele drops the vnode lock which violates the VOP locking contract
240 	 * on FreeBSD. See comment at the top of zfs_replay.c for more detail.
241 	 */
242 	/*
243 	 * if zp is XATTR node, keep walking up via z_xattr_parent until we
244 	 * get the owner
245 	 */
246 	while (tzp->z_pflags & ZFS_XATTR) {
247 		ASSERT3U(zp->z_xattr_parent, !=, 0);
248 		if (zfs_zget(ZTOZSB(tzp), tzp->z_xattr_parent, &dzp) != 0) {
249 			unlinked = 1;
250 			break;
251 		}
252 
253 		if (tzp != zp)
254 			zrele(tzp);
255 		tzp = dzp;
256 		unlinked = tzp->z_unlinked;
257 	}
258 	if (tzp != zp)
259 		zrele(tzp);
260 #else
261 	zhold(zp);
262 	/*
263 	 * if zp is XATTR node, keep walking up via z_xattr_parent until we
264 	 * get the owner
265 	 */
266 	while (zp->z_pflags & ZFS_XATTR) {
267 		ASSERT3U(zp->z_xattr_parent, !=, 0);
268 		if (zfs_zget(ZTOZSB(zp), zp->z_xattr_parent, &dzp) != 0) {
269 			unlinked = 1;
270 			break;
271 		}
272 
273 		zrele(zp);
274 		zp = dzp;
275 		unlinked = zp->z_unlinked;
276 	}
277 	zrele(zp);
278 #endif
279 	return (unlinked);
280 }
281 
282 /*
283  * Handles TX_CREATE, TX_CREATE_ATTR, TX_MKDIR, TX_MKDIR_ATTR and
284  * TK_MKXATTR transactions.
285  *
286  * TX_CREATE and TX_MKDIR are standard creates, but they may have FUID
287  * domain information appended prior to the name.  In this case the
288  * uid/gid in the log record will be a log centric FUID.
289  *
290  * TX_CREATE_ACL_ATTR and TX_MKDIR_ACL_ATTR handle special creates that
291  * may contain attributes, ACL and optional fuid information.
292  *
293  * TX_CREATE_ACL and TX_MKDIR_ACL handle special creates that specify
294  * and ACL and normal users/groups in the ACEs.
295  *
296  * There may be an optional xvattr attribute information similar
297  * to zfs_log_setattr.
298  *
299  * Also, after the file name "domain" strings may be appended.
300  */
301 void
302 zfs_log_create(zilog_t *zilog, dmu_tx_t *tx, uint64_t txtype,
303     znode_t *dzp, znode_t *zp, const char *name, vsecattr_t *vsecp,
304     zfs_fuid_info_t *fuidp, vattr_t *vap)
305 {
306 	itx_t *itx;
307 	lr_create_t *lr;
308 	lr_acl_create_t *lracl;
309 	size_t aclsize = 0;
310 	size_t xvatsize = 0;
311 	size_t txsize;
312 	xvattr_t *xvap = (xvattr_t *)vap;
313 	void *end;
314 	size_t lrsize;
315 	size_t namesize = strlen(name) + 1;
316 	size_t fuidsz = 0;
317 
318 	if (zil_replaying(zilog, tx) || zfs_xattr_owner_unlinked(dzp))
319 		return;
320 
321 	/*
322 	 * If we have FUIDs present then add in space for
323 	 * domains and ACE fuid's if any.
324 	 */
325 	if (fuidp) {
326 		fuidsz += fuidp->z_domain_str_sz;
327 		fuidsz += fuidp->z_fuid_cnt * sizeof (uint64_t);
328 	}
329 
330 	if (vap->va_mask & ATTR_XVATTR)
331 		xvatsize = ZIL_XVAT_SIZE(xvap->xva_mapsize);
332 
333 	if ((int)txtype == TX_CREATE_ATTR || (int)txtype == TX_MKDIR_ATTR ||
334 	    (int)txtype == TX_CREATE || (int)txtype == TX_MKDIR ||
335 	    (int)txtype == TX_MKXATTR) {
336 		txsize = sizeof (*lr) + namesize + fuidsz + xvatsize;
337 		lrsize = sizeof (*lr);
338 	} else {
339 		txsize =
340 		    sizeof (lr_acl_create_t) + namesize + fuidsz +
341 		    ZIL_ACE_LENGTH(aclsize) + xvatsize;
342 		lrsize = sizeof (lr_acl_create_t);
343 	}
344 
345 	itx = zil_itx_create(txtype, txsize);
346 
347 	lr = (lr_create_t *)&itx->itx_lr;
348 	lr->lr_doid = dzp->z_id;
349 	lr->lr_foid = zp->z_id;
350 	/* Store dnode slot count in 8 bits above object id. */
351 	LR_FOID_SET_SLOTS(lr->lr_foid, zp->z_dnodesize >> DNODE_SHIFT);
352 	lr->lr_mode = zp->z_mode;
353 	if (!IS_EPHEMERAL(KUID_TO_SUID(ZTOUID(zp)))) {
354 		lr->lr_uid = (uint64_t)KUID_TO_SUID(ZTOUID(zp));
355 	} else {
356 		lr->lr_uid = fuidp->z_fuid_owner;
357 	}
358 	if (!IS_EPHEMERAL(KGID_TO_SGID(ZTOGID(zp)))) {
359 		lr->lr_gid = (uint64_t)KGID_TO_SGID(ZTOGID(zp));
360 	} else {
361 		lr->lr_gid = fuidp->z_fuid_group;
362 	}
363 	(void) sa_lookup(zp->z_sa_hdl, SA_ZPL_GEN(ZTOZSB(zp)), &lr->lr_gen,
364 	    sizeof (uint64_t));
365 	(void) sa_lookup(zp->z_sa_hdl, SA_ZPL_CRTIME(ZTOZSB(zp)),
366 	    lr->lr_crtime, sizeof (uint64_t) * 2);
367 
368 	if (sa_lookup(zp->z_sa_hdl, SA_ZPL_RDEV(ZTOZSB(zp)), &lr->lr_rdev,
369 	    sizeof (lr->lr_rdev)) != 0)
370 		lr->lr_rdev = 0;
371 
372 	/*
373 	 * Fill in xvattr info if any
374 	 */
375 	if (vap->va_mask & ATTR_XVATTR) {
376 		zfs_log_xvattr((lr_attr_t *)((caddr_t)lr + lrsize), xvap);
377 		end = (caddr_t)lr + lrsize + xvatsize;
378 	} else {
379 		end = (caddr_t)lr + lrsize;
380 	}
381 
382 	/* Now fill in any ACL info */
383 
384 	if (vsecp) {
385 		lracl = (lr_acl_create_t *)&itx->itx_lr;
386 		lracl->lr_aclcnt = vsecp->vsa_aclcnt;
387 		lracl->lr_acl_bytes = aclsize;
388 		lracl->lr_domcnt = fuidp ? fuidp->z_domain_cnt : 0;
389 		lracl->lr_fuidcnt  = fuidp ? fuidp->z_fuid_cnt : 0;
390 		if (vsecp->vsa_aclflags & VSA_ACE_ACLFLAGS)
391 			lracl->lr_acl_flags = (uint64_t)vsecp->vsa_aclflags;
392 		else
393 			lracl->lr_acl_flags = 0;
394 
395 		memcpy(end, vsecp->vsa_aclentp, aclsize);
396 		end = (caddr_t)end + ZIL_ACE_LENGTH(aclsize);
397 	}
398 
399 	/* drop in FUID info */
400 	if (fuidp) {
401 		end = zfs_log_fuid_ids(fuidp, end);
402 		end = zfs_log_fuid_domains(fuidp, end);
403 	}
404 	/*
405 	 * Now place file name in log record
406 	 */
407 	memcpy(end, name, namesize);
408 
409 	zil_itx_assign(zilog, itx, tx);
410 }
411 
412 /*
413  * Handles both TX_REMOVE and TX_RMDIR transactions.
414  */
415 void
416 zfs_log_remove(zilog_t *zilog, dmu_tx_t *tx, uint64_t txtype,
417     znode_t *dzp, const char *name, uint64_t foid, boolean_t unlinked)
418 {
419 	itx_t *itx;
420 	lr_remove_t *lr;
421 	size_t namesize = strlen(name) + 1;
422 
423 	if (zil_replaying(zilog, tx) || zfs_xattr_owner_unlinked(dzp))
424 		return;
425 
426 	itx = zil_itx_create(txtype, sizeof (*lr) + namesize);
427 	lr = (lr_remove_t *)&itx->itx_lr;
428 	lr->lr_doid = dzp->z_id;
429 	memcpy(lr + 1, name, namesize);
430 
431 	itx->itx_oid = foid;
432 
433 	/*
434 	 * Object ids can be re-instantiated in the next txg so
435 	 * remove any async transactions to avoid future leaks.
436 	 * This can happen if a fsync occurs on the re-instantiated
437 	 * object for a WR_INDIRECT or WR_NEED_COPY write, which gets
438 	 * the new file data and flushes a write record for the old object.
439 	 */
440 	if (unlinked) {
441 		ASSERT((txtype & ~TX_CI) == TX_REMOVE);
442 		zil_remove_async(zilog, foid);
443 	}
444 	zil_itx_assign(zilog, itx, tx);
445 }
446 
447 /*
448  * Handles TX_LINK transactions.
449  */
450 void
451 zfs_log_link(zilog_t *zilog, dmu_tx_t *tx, uint64_t txtype,
452     znode_t *dzp, znode_t *zp, const char *name)
453 {
454 	itx_t *itx;
455 	lr_link_t *lr;
456 	size_t namesize = strlen(name) + 1;
457 
458 	if (zil_replaying(zilog, tx))
459 		return;
460 
461 	itx = zil_itx_create(txtype, sizeof (*lr) + namesize);
462 	lr = (lr_link_t *)&itx->itx_lr;
463 	lr->lr_doid = dzp->z_id;
464 	lr->lr_link_obj = zp->z_id;
465 	memcpy(lr + 1, name, namesize);
466 
467 	zil_itx_assign(zilog, itx, tx);
468 }
469 
470 /*
471  * Handles TX_SYMLINK transactions.
472  */
473 void
474 zfs_log_symlink(zilog_t *zilog, dmu_tx_t *tx, uint64_t txtype,
475     znode_t *dzp, znode_t *zp, const char *name, const char *link)
476 {
477 	itx_t *itx;
478 	lr_create_t *lr;
479 	size_t namesize = strlen(name) + 1;
480 	size_t linksize = strlen(link) + 1;
481 
482 	if (zil_replaying(zilog, tx))
483 		return;
484 
485 	itx = zil_itx_create(txtype, sizeof (*lr) + namesize + linksize);
486 	lr = (lr_create_t *)&itx->itx_lr;
487 	lr->lr_doid = dzp->z_id;
488 	lr->lr_foid = zp->z_id;
489 	lr->lr_uid = KUID_TO_SUID(ZTOUID(zp));
490 	lr->lr_gid = KGID_TO_SGID(ZTOGID(zp));
491 	lr->lr_mode = zp->z_mode;
492 	(void) sa_lookup(zp->z_sa_hdl, SA_ZPL_GEN(ZTOZSB(zp)), &lr->lr_gen,
493 	    sizeof (uint64_t));
494 	(void) sa_lookup(zp->z_sa_hdl, SA_ZPL_CRTIME(ZTOZSB(zp)),
495 	    lr->lr_crtime, sizeof (uint64_t) * 2);
496 	memcpy((char *)(lr + 1), name, namesize);
497 	memcpy((char *)(lr + 1) + namesize, link, linksize);
498 
499 	zil_itx_assign(zilog, itx, tx);
500 }
501 
502 /*
503  * Handles TX_RENAME transactions.
504  */
505 void
506 zfs_log_rename(zilog_t *zilog, dmu_tx_t *tx, uint64_t txtype, znode_t *sdzp,
507     const char *sname, znode_t *tdzp, const char *dname, znode_t *szp)
508 {
509 	itx_t *itx;
510 	lr_rename_t *lr;
511 	size_t snamesize = strlen(sname) + 1;
512 	size_t dnamesize = strlen(dname) + 1;
513 
514 	if (zil_replaying(zilog, tx))
515 		return;
516 
517 	itx = zil_itx_create(txtype, sizeof (*lr) + snamesize + dnamesize);
518 	lr = (lr_rename_t *)&itx->itx_lr;
519 	lr->lr_sdoid = sdzp->z_id;
520 	lr->lr_tdoid = tdzp->z_id;
521 	memcpy((char *)(lr + 1), sname, snamesize);
522 	memcpy((char *)(lr + 1) + snamesize, dname, dnamesize);
523 	itx->itx_oid = szp->z_id;
524 
525 	zil_itx_assign(zilog, itx, tx);
526 }
527 
528 /*
529  * zfs_log_write() handles TX_WRITE transactions. The specified callback is
530  * called as soon as the write is on stable storage (be it via a DMU sync or a
531  * ZIL commit).
532  */
533 static long zfs_immediate_write_sz = 32768;
534 
535 void
536 zfs_log_write(zilog_t *zilog, dmu_tx_t *tx, int txtype,
537     znode_t *zp, offset_t off, ssize_t resid, int ioflag,
538     zil_callback_t callback, void *callback_data)
539 {
540 	dmu_buf_impl_t *db = (dmu_buf_impl_t *)sa_get_db(zp->z_sa_hdl);
541 	uint32_t blocksize = zp->z_blksz;
542 	itx_wr_state_t write_state;
543 	uintptr_t fsync_cnt;
544 	uint64_t gen = 0;
545 	ssize_t size = resid;
546 
547 	if (zil_replaying(zilog, tx) || zp->z_unlinked ||
548 	    zfs_xattr_owner_unlinked(zp)) {
549 		if (callback != NULL)
550 			callback(callback_data);
551 		return;
552 	}
553 
554 	if (zilog->zl_logbias == ZFS_LOGBIAS_THROUGHPUT)
555 		write_state = WR_INDIRECT;
556 	else if (!spa_has_slogs(zilog->zl_spa) &&
557 	    resid >= zfs_immediate_write_sz)
558 		write_state = WR_INDIRECT;
559 	else if (ioflag & (O_SYNC | O_DSYNC))
560 		write_state = WR_COPIED;
561 	else
562 		write_state = WR_NEED_COPY;
563 
564 	if ((fsync_cnt = (uintptr_t)tsd_get(zfs_fsyncer_key)) != 0) {
565 		(void) tsd_set(zfs_fsyncer_key, (void *)(fsync_cnt - 1));
566 	}
567 
568 	(void) sa_lookup(zp->z_sa_hdl, SA_ZPL_GEN(ZTOZSB(zp)), &gen,
569 	    sizeof (gen));
570 
571 	while (resid) {
572 		itx_t *itx;
573 		lr_write_t *lr;
574 		itx_wr_state_t wr_state = write_state;
575 		ssize_t len = resid;
576 
577 		/*
578 		 * A WR_COPIED record must fit entirely in one log block.
579 		 * Large writes can use WR_NEED_COPY, which the ZIL will
580 		 * split into multiple records across several log blocks
581 		 * if necessary.
582 		 */
583 		if (wr_state == WR_COPIED &&
584 		    resid > zil_max_copied_data(zilog))
585 			wr_state = WR_NEED_COPY;
586 		else if (wr_state == WR_INDIRECT)
587 			len = MIN(blocksize - P2PHASE(off, blocksize), resid);
588 
589 		itx = zil_itx_create(txtype, sizeof (*lr) +
590 		    (wr_state == WR_COPIED ? len : 0));
591 		lr = (lr_write_t *)&itx->itx_lr;
592 
593 		/*
594 		 * For WR_COPIED records, copy the data into the lr_write_t.
595 		 */
596 		if (wr_state == WR_COPIED) {
597 			int err;
598 			DB_DNODE_ENTER(db);
599 			err = dmu_read_by_dnode(DB_DNODE(db), off, len, lr + 1,
600 			    DMU_READ_NO_PREFETCH);
601 			if (err != 0) {
602 				zil_itx_destroy(itx);
603 				itx = zil_itx_create(txtype, sizeof (*lr));
604 				lr = (lr_write_t *)&itx->itx_lr;
605 				wr_state = WR_NEED_COPY;
606 			}
607 			DB_DNODE_EXIT(db);
608 		}
609 
610 		itx->itx_wr_state = wr_state;
611 		lr->lr_foid = zp->z_id;
612 		lr->lr_offset = off;
613 		lr->lr_length = len;
614 		lr->lr_blkoff = 0;
615 		BP_ZERO(&lr->lr_blkptr);
616 
617 		itx->itx_private = ZTOZSB(zp);
618 		itx->itx_gen = gen;
619 
620 		if (!(ioflag & (O_SYNC | O_DSYNC)) && (zp->z_sync_cnt == 0) &&
621 		    (fsync_cnt == 0))
622 			itx->itx_sync = B_FALSE;
623 
624 		itx->itx_callback = callback;
625 		itx->itx_callback_data = callback_data;
626 		zil_itx_assign(zilog, itx, tx);
627 
628 		off += len;
629 		resid -= len;
630 	}
631 
632 	if (write_state == WR_COPIED || write_state == WR_NEED_COPY) {
633 		dsl_pool_wrlog_count(zilog->zl_dmu_pool, size, tx->tx_txg);
634 	}
635 }
636 
637 /*
638  * Handles TX_TRUNCATE transactions.
639  */
640 void
641 zfs_log_truncate(zilog_t *zilog, dmu_tx_t *tx, int txtype,
642     znode_t *zp, uint64_t off, uint64_t len)
643 {
644 	itx_t *itx;
645 	lr_truncate_t *lr;
646 
647 	if (zil_replaying(zilog, tx) || zp->z_unlinked ||
648 	    zfs_xattr_owner_unlinked(zp))
649 		return;
650 
651 	itx = zil_itx_create(txtype, sizeof (*lr));
652 	lr = (lr_truncate_t *)&itx->itx_lr;
653 	lr->lr_foid = zp->z_id;
654 	lr->lr_offset = off;
655 	lr->lr_length = len;
656 
657 	itx->itx_sync = (zp->z_sync_cnt != 0);
658 	zil_itx_assign(zilog, itx, tx);
659 }
660 
661 /*
662  * Handles TX_SETATTR transactions.
663  */
664 void
665 zfs_log_setattr(zilog_t *zilog, dmu_tx_t *tx, int txtype,
666     znode_t *zp, vattr_t *vap, uint_t mask_applied, zfs_fuid_info_t *fuidp)
667 {
668 	itx_t		*itx;
669 	lr_setattr_t	*lr;
670 	xvattr_t	*xvap = (xvattr_t *)vap;
671 	size_t		recsize = sizeof (lr_setattr_t);
672 	void		*start;
673 
674 	if (zil_replaying(zilog, tx) || zp->z_unlinked)
675 		return;
676 
677 	/*
678 	 * If XVATTR set, then log record size needs to allow
679 	 * for lr_attr_t + xvattr mask, mapsize and create time
680 	 * plus actual attribute values
681 	 */
682 	if (vap->va_mask & ATTR_XVATTR)
683 		recsize = sizeof (*lr) + ZIL_XVAT_SIZE(xvap->xva_mapsize);
684 
685 	if (fuidp)
686 		recsize += fuidp->z_domain_str_sz;
687 
688 	itx = zil_itx_create(txtype, recsize);
689 	lr = (lr_setattr_t *)&itx->itx_lr;
690 	lr->lr_foid = zp->z_id;
691 	lr->lr_mask = (uint64_t)mask_applied;
692 	lr->lr_mode = (uint64_t)vap->va_mode;
693 	if ((mask_applied & ATTR_UID) && IS_EPHEMERAL(vap->va_uid))
694 		lr->lr_uid = fuidp->z_fuid_owner;
695 	else
696 		lr->lr_uid = (uint64_t)vap->va_uid;
697 
698 	if ((mask_applied & ATTR_GID) && IS_EPHEMERAL(vap->va_gid))
699 		lr->lr_gid = fuidp->z_fuid_group;
700 	else
701 		lr->lr_gid = (uint64_t)vap->va_gid;
702 
703 	lr->lr_size = (uint64_t)vap->va_size;
704 	ZFS_TIME_ENCODE(&vap->va_atime, lr->lr_atime);
705 	ZFS_TIME_ENCODE(&vap->va_mtime, lr->lr_mtime);
706 	start = (lr_setattr_t *)(lr + 1);
707 	if (vap->va_mask & ATTR_XVATTR) {
708 		zfs_log_xvattr((lr_attr_t *)start, xvap);
709 		start = (caddr_t)start + ZIL_XVAT_SIZE(xvap->xva_mapsize);
710 	}
711 
712 	/*
713 	 * Now stick on domain information if any on end
714 	 */
715 
716 	if (fuidp)
717 		(void) zfs_log_fuid_domains(fuidp, start);
718 
719 	itx->itx_sync = (zp->z_sync_cnt != 0);
720 	zil_itx_assign(zilog, itx, tx);
721 }
722 
723 /*
724  * Handles TX_SETSAXATTR transactions.
725  */
726 void
727 zfs_log_setsaxattr(zilog_t *zilog, dmu_tx_t *tx, int txtype,
728     znode_t *zp, const char *name, const void *value, size_t size)
729 {
730 	itx_t		*itx;
731 	lr_setsaxattr_t	*lr;
732 	size_t		recsize = sizeof (lr_setsaxattr_t);
733 	void		*xattrstart;
734 	int		namelen;
735 
736 	if (zil_replaying(zilog, tx) || zp->z_unlinked)
737 		return;
738 
739 	namelen = strlen(name) + 1;
740 	recsize += (namelen + size);
741 	itx = zil_itx_create(txtype, recsize);
742 	lr = (lr_setsaxattr_t *)&itx->itx_lr;
743 	lr->lr_foid = zp->z_id;
744 	xattrstart = (char *)(lr + 1);
745 	memcpy(xattrstart, name, namelen);
746 	if (value != NULL) {
747 		memcpy((char *)xattrstart + namelen, value, size);
748 		lr->lr_size = size;
749 	} else {
750 		lr->lr_size = 0;
751 	}
752 
753 	itx->itx_sync = (zp->z_sync_cnt != 0);
754 	zil_itx_assign(zilog, itx, tx);
755 }
756 
757 /*
758  * Handles TX_ACL transactions.
759  */
760 void
761 zfs_log_acl(zilog_t *zilog, dmu_tx_t *tx, znode_t *zp,
762     vsecattr_t *vsecp, zfs_fuid_info_t *fuidp)
763 {
764 	itx_t *itx;
765 	lr_acl_v0_t *lrv0;
766 	lr_acl_t *lr;
767 	int txtype;
768 	int lrsize;
769 	size_t txsize;
770 	size_t aclbytes = vsecp->vsa_aclentsz;
771 
772 	if (zil_replaying(zilog, tx) || zp->z_unlinked)
773 		return;
774 
775 	txtype = (ZTOZSB(zp)->z_version < ZPL_VERSION_FUID) ?
776 	    TX_ACL_V0 : TX_ACL;
777 
778 	if (txtype == TX_ACL)
779 		lrsize = sizeof (*lr);
780 	else
781 		lrsize = sizeof (*lrv0);
782 
783 	txsize = lrsize +
784 	    ((txtype == TX_ACL) ? ZIL_ACE_LENGTH(aclbytes) : aclbytes) +
785 	    (fuidp ? fuidp->z_domain_str_sz : 0) +
786 	    sizeof (uint64_t) * (fuidp ? fuidp->z_fuid_cnt : 0);
787 
788 	itx = zil_itx_create(txtype, txsize);
789 
790 	lr = (lr_acl_t *)&itx->itx_lr;
791 	lr->lr_foid = zp->z_id;
792 	if (txtype == TX_ACL) {
793 		lr->lr_acl_bytes = aclbytes;
794 		lr->lr_domcnt = fuidp ? fuidp->z_domain_cnt : 0;
795 		lr->lr_fuidcnt = fuidp ? fuidp->z_fuid_cnt : 0;
796 		if (vsecp->vsa_mask & VSA_ACE_ACLFLAGS)
797 			lr->lr_acl_flags = (uint64_t)vsecp->vsa_aclflags;
798 		else
799 			lr->lr_acl_flags = 0;
800 	}
801 	lr->lr_aclcnt = (uint64_t)vsecp->vsa_aclcnt;
802 
803 	if (txtype == TX_ACL_V0) {
804 		lrv0 = (lr_acl_v0_t *)lr;
805 		memcpy(lrv0 + 1, vsecp->vsa_aclentp, aclbytes);
806 	} else {
807 		void *start = (ace_t *)(lr + 1);
808 
809 		memcpy(start, vsecp->vsa_aclentp, aclbytes);
810 
811 		start = (caddr_t)start + ZIL_ACE_LENGTH(aclbytes);
812 
813 		if (fuidp) {
814 			start = zfs_log_fuid_ids(fuidp, start);
815 			(void) zfs_log_fuid_domains(fuidp, start);
816 		}
817 	}
818 
819 	itx->itx_sync = (zp->z_sync_cnt != 0);
820 	zil_itx_assign(zilog, itx, tx);
821 }
822 
823 ZFS_MODULE_PARAM(zfs, zfs_, immediate_write_sz, LONG, ZMOD_RW,
824 	"Largest data block to write to zil");
825