1 /*
2  * CDDL HEADER START
3  *
4  * The contents of this file are subject to the terms of the
5  * Common Development and Distribution License (the "License").
6  * You may not use this file except in compliance with the License.
7  *
8  * You can obtain a copy of the license at usr/src/OPENSOLARIS.LICENSE
9  * or http://www.opensolaris.org/os/licensing.
10  * See the License for the specific language governing permissions
11  * and limitations under the License.
12  *
13  * When distributing Covered Code, include this CDDL HEADER in each
14  * file and include the License file at usr/src/OPENSOLARIS.LICENSE.
15  * If applicable, add the following below this CDDL HEADER, with the
16  * fields enclosed by brackets "[]" replaced with your own identifying
17  * information: Portions Copyright [yyyy] [name of copyright owner]
18  *
19  * CDDL HEADER END
20  */
21 
22 /*
23  * Copyright (c) 2005, 2010, Oracle and/or its affiliates. All rights reserved.
24  * Copyright (c) 2012, 2018 by Delphix. All rights reserved.
25  * Copyright (c) 2015 by Chunwei Chen. All rights reserved.
26  * Copyright 2017 Nexenta Systems, Inc.
27  */
28 
29 /* Portions Copyright 2007 Jeremy Teo */
30 /* Portions Copyright 2010 Robert Milkowski */
31 
32 #include <sys/types.h>
33 #include <sys/param.h>
34 #include <sys/time.h>
35 #include <sys/sysmacros.h>
36 #include <sys/vfs.h>
37 #include <sys/uio_impl.h>
38 #include <sys/file.h>
39 #include <sys/stat.h>
40 #include <sys/kmem.h>
41 #include <sys/cmn_err.h>
42 #include <sys/errno.h>
43 #include <sys/zfs_dir.h>
44 #include <sys/zfs_acl.h>
45 #include <sys/zfs_ioctl.h>
46 #include <sys/fs/zfs.h>
47 #include <sys/dmu.h>
48 #include <sys/dmu_objset.h>
49 #include <sys/spa.h>
50 #include <sys/txg.h>
51 #include <sys/dbuf.h>
52 #include <sys/policy.h>
53 #include <sys/zfs_vnops.h>
54 #include <sys/zfs_quota.h>
55 #include <sys/zfs_vfsops.h>
56 #include <sys/zfs_znode.h>
57 
58 
59 static ulong_t zfs_fsync_sync_cnt = 4;
60 
61 int
62 zfs_fsync(znode_t *zp, int syncflag, cred_t *cr)
63 {
64 	zfsvfs_t *zfsvfs = ZTOZSB(zp);
65 
66 	(void) tsd_set(zfs_fsyncer_key, (void *)zfs_fsync_sync_cnt);
67 
68 	if (zfsvfs->z_os->os_sync != ZFS_SYNC_DISABLED) {
69 		ZFS_ENTER(zfsvfs);
70 		ZFS_VERIFY_ZP(zp);
71 		zil_commit(zfsvfs->z_log, zp->z_id);
72 		ZFS_EXIT(zfsvfs);
73 	}
74 	tsd_set(zfs_fsyncer_key, NULL);
75 
76 	return (0);
77 }
78 
79 
80 #if defined(SEEK_HOLE) && defined(SEEK_DATA)
81 /*
82  * Lseek support for finding holes (cmd == SEEK_HOLE) and
83  * data (cmd == SEEK_DATA). "off" is an in/out parameter.
84  */
85 static int
86 zfs_holey_common(znode_t *zp, ulong_t cmd, loff_t *off)
87 {
88 	uint64_t noff = (uint64_t)*off; /* new offset */
89 	uint64_t file_sz;
90 	int error;
91 	boolean_t hole;
92 
93 	file_sz = zp->z_size;
94 	if (noff >= file_sz)  {
95 		return (SET_ERROR(ENXIO));
96 	}
97 
98 	if (cmd == F_SEEK_HOLE)
99 		hole = B_TRUE;
100 	else
101 		hole = B_FALSE;
102 
103 	error = dmu_offset_next(ZTOZSB(zp)->z_os, zp->z_id, hole, &noff);
104 
105 	if (error == ESRCH)
106 		return (SET_ERROR(ENXIO));
107 
108 	/* file was dirty, so fall back to using generic logic */
109 	if (error == EBUSY) {
110 		if (hole)
111 			*off = file_sz;
112 
113 		return (0);
114 	}
115 
116 	/*
117 	 * We could find a hole that begins after the logical end-of-file,
118 	 * because dmu_offset_next() only works on whole blocks.  If the
119 	 * EOF falls mid-block, then indicate that the "virtual hole"
120 	 * at the end of the file begins at the logical EOF, rather than
121 	 * at the end of the last block.
122 	 */
123 	if (noff > file_sz) {
124 		ASSERT(hole);
125 		noff = file_sz;
126 	}
127 
128 	if (noff < *off)
129 		return (error);
130 	*off = noff;
131 	return (error);
132 }
133 
134 int
135 zfs_holey(znode_t *zp, ulong_t cmd, loff_t *off)
136 {
137 	zfsvfs_t *zfsvfs = ZTOZSB(zp);
138 	int error;
139 
140 	ZFS_ENTER(zfsvfs);
141 	ZFS_VERIFY_ZP(zp);
142 
143 	error = zfs_holey_common(zp, cmd, off);
144 
145 	ZFS_EXIT(zfsvfs);
146 	return (error);
147 }
148 #endif /* SEEK_HOLE && SEEK_DATA */
149 
150 /*ARGSUSED*/
151 int
152 zfs_access(znode_t *zp, int mode, int flag, cred_t *cr)
153 {
154 	zfsvfs_t *zfsvfs = ZTOZSB(zp);
155 	int error;
156 
157 	ZFS_ENTER(zfsvfs);
158 	ZFS_VERIFY_ZP(zp);
159 
160 	if (flag & V_ACE_MASK)
161 		error = zfs_zaccess(zp, mode, flag, B_FALSE, cr);
162 	else
163 		error = zfs_zaccess_rwx(zp, mode, flag, cr);
164 
165 	ZFS_EXIT(zfsvfs);
166 	return (error);
167 }
168 
169 static unsigned long zfs_vnops_read_chunk_size = 1024 * 1024; /* Tunable */
170 
171 /*
172  * Read bytes from specified file into supplied buffer.
173  *
174  *	IN:	zp	- inode of file to be read from.
175  *		uio	- structure supplying read location, range info,
176  *			  and return buffer.
177  *		ioflag	- O_SYNC flags; used to provide FRSYNC semantics.
178  *			  O_DIRECT flag; used to bypass page cache.
179  *		cr	- credentials of caller.
180  *
181  *	OUT:	uio	- updated offset and range, buffer filled.
182  *
183  *	RETURN:	0 on success, error code on failure.
184  *
185  * Side Effects:
186  *	inode - atime updated if byte count > 0
187  */
188 /* ARGSUSED */
189 int
190 zfs_read(struct znode *zp, zfs_uio_t *uio, int ioflag, cred_t *cr)
191 {
192 	int error = 0;
193 	boolean_t frsync = B_FALSE;
194 
195 	zfsvfs_t *zfsvfs = ZTOZSB(zp);
196 	ZFS_ENTER(zfsvfs);
197 	ZFS_VERIFY_ZP(zp);
198 
199 	if (zp->z_pflags & ZFS_AV_QUARANTINED) {
200 		ZFS_EXIT(zfsvfs);
201 		return (SET_ERROR(EACCES));
202 	}
203 
204 	/* We don't copy out anything useful for directories. */
205 	if (Z_ISDIR(ZTOTYPE(zp))) {
206 		ZFS_EXIT(zfsvfs);
207 		return (SET_ERROR(EISDIR));
208 	}
209 
210 	/*
211 	 * Validate file offset
212 	 */
213 	if (zfs_uio_offset(uio) < (offset_t)0) {
214 		ZFS_EXIT(zfsvfs);
215 		return (SET_ERROR(EINVAL));
216 	}
217 
218 	/*
219 	 * Fasttrack empty reads
220 	 */
221 	if (zfs_uio_resid(uio) == 0) {
222 		ZFS_EXIT(zfsvfs);
223 		return (0);
224 	}
225 
226 #ifdef FRSYNC
227 	/*
228 	 * If we're in FRSYNC mode, sync out this znode before reading it.
229 	 * Only do this for non-snapshots.
230 	 *
231 	 * Some platforms do not support FRSYNC and instead map it
232 	 * to O_SYNC, which results in unnecessary calls to zil_commit. We
233 	 * only honor FRSYNC requests on platforms which support it.
234 	 */
235 	frsync = !!(ioflag & FRSYNC);
236 #endif
237 	if (zfsvfs->z_log &&
238 	    (frsync || zfsvfs->z_os->os_sync == ZFS_SYNC_ALWAYS))
239 		zil_commit(zfsvfs->z_log, zp->z_id);
240 
241 	/*
242 	 * Lock the range against changes.
243 	 */
244 	zfs_locked_range_t *lr = zfs_rangelock_enter(&zp->z_rangelock,
245 	    zfs_uio_offset(uio), zfs_uio_resid(uio), RL_READER);
246 
247 	/*
248 	 * If we are reading past end-of-file we can skip
249 	 * to the end; but we might still need to set atime.
250 	 */
251 	if (zfs_uio_offset(uio) >= zp->z_size) {
252 		error = 0;
253 		goto out;
254 	}
255 
256 	ASSERT(zfs_uio_offset(uio) < zp->z_size);
257 	ssize_t n = MIN(zfs_uio_resid(uio), zp->z_size - zfs_uio_offset(uio));
258 	ssize_t start_resid = n;
259 
260 	while (n > 0) {
261 		ssize_t nbytes = MIN(n, zfs_vnops_read_chunk_size -
262 		    P2PHASE(zfs_uio_offset(uio), zfs_vnops_read_chunk_size));
263 #ifdef UIO_NOCOPY
264 		if (zfs_uio_segflg(uio) == UIO_NOCOPY)
265 			error = mappedread_sf(zp, nbytes, uio);
266 		else
267 #endif
268 		if (zn_has_cached_data(zp) && !(ioflag & O_DIRECT)) {
269 			error = mappedread(zp, nbytes, uio);
270 		} else {
271 			error = dmu_read_uio_dbuf(sa_get_db(zp->z_sa_hdl),
272 			    uio, nbytes);
273 		}
274 
275 		if (error) {
276 			/* convert checksum errors into IO errors */
277 			if (error == ECKSUM)
278 				error = SET_ERROR(EIO);
279 			break;
280 		}
281 
282 		n -= nbytes;
283 	}
284 
285 	int64_t nread = start_resid - n;
286 	dataset_kstats_update_read_kstats(&zfsvfs->z_kstat, nread);
287 	task_io_account_read(nread);
288 out:
289 	zfs_rangelock_exit(lr);
290 
291 	ZFS_ACCESSTIME_STAMP(zfsvfs, zp);
292 	ZFS_EXIT(zfsvfs);
293 	return (error);
294 }
295 
296 /*
297  * Write the bytes to a file.
298  *
299  *	IN:	zp	- znode of file to be written to.
300  *		uio	- structure supplying write location, range info,
301  *			  and data buffer.
302  *		ioflag	- O_APPEND flag set if in append mode.
303  *			  O_DIRECT flag; used to bypass page cache.
304  *		cr	- credentials of caller.
305  *
306  *	OUT:	uio	- updated offset and range.
307  *
308  *	RETURN:	0 if success
309  *		error code if failure
310  *
311  * Timestamps:
312  *	ip - ctime|mtime updated if byte count > 0
313  */
314 
315 /* ARGSUSED */
316 int
317 zfs_write(znode_t *zp, zfs_uio_t *uio, int ioflag, cred_t *cr)
318 {
319 	int error = 0;
320 	ssize_t start_resid = zfs_uio_resid(uio);
321 
322 	/*
323 	 * Fasttrack empty write
324 	 */
325 	ssize_t n = start_resid;
326 	if (n == 0)
327 		return (0);
328 
329 	zfsvfs_t *zfsvfs = ZTOZSB(zp);
330 	ZFS_ENTER(zfsvfs);
331 	ZFS_VERIFY_ZP(zp);
332 
333 	sa_bulk_attr_t bulk[4];
334 	int count = 0;
335 	uint64_t mtime[2], ctime[2];
336 	SA_ADD_BULK_ATTR(bulk, count, SA_ZPL_MTIME(zfsvfs), NULL, &mtime, 16);
337 	SA_ADD_BULK_ATTR(bulk, count, SA_ZPL_CTIME(zfsvfs), NULL, &ctime, 16);
338 	SA_ADD_BULK_ATTR(bulk, count, SA_ZPL_SIZE(zfsvfs), NULL,
339 	    &zp->z_size, 8);
340 	SA_ADD_BULK_ATTR(bulk, count, SA_ZPL_FLAGS(zfsvfs), NULL,
341 	    &zp->z_pflags, 8);
342 
343 	/*
344 	 * Callers might not be able to detect properly that we are read-only,
345 	 * so check it explicitly here.
346 	 */
347 	if (zfs_is_readonly(zfsvfs)) {
348 		ZFS_EXIT(zfsvfs);
349 		return (SET_ERROR(EROFS));
350 	}
351 
352 	/*
353 	 * If immutable or not appending then return EPERM.
354 	 * Intentionally allow ZFS_READONLY through here.
355 	 * See zfs_zaccess_common()
356 	 */
357 	if ((zp->z_pflags & ZFS_IMMUTABLE) ||
358 	    ((zp->z_pflags & ZFS_APPENDONLY) && !(ioflag & O_APPEND) &&
359 	    (zfs_uio_offset(uio) < zp->z_size))) {
360 		ZFS_EXIT(zfsvfs);
361 		return (SET_ERROR(EPERM));
362 	}
363 
364 	/*
365 	 * Validate file offset
366 	 */
367 	offset_t woff = ioflag & O_APPEND ? zp->z_size : zfs_uio_offset(uio);
368 	if (woff < 0) {
369 		ZFS_EXIT(zfsvfs);
370 		return (SET_ERROR(EINVAL));
371 	}
372 
373 	const uint64_t max_blksz = zfsvfs->z_max_blksz;
374 
375 	/*
376 	 * Pre-fault the pages to ensure slow (eg NFS) pages
377 	 * don't hold up txg.
378 	 * Skip this if uio contains loaned arc_buf.
379 	 */
380 	if (zfs_uio_prefaultpages(MIN(n, max_blksz), uio)) {
381 		ZFS_EXIT(zfsvfs);
382 		return (SET_ERROR(EFAULT));
383 	}
384 
385 	/*
386 	 * If in append mode, set the io offset pointer to eof.
387 	 */
388 	zfs_locked_range_t *lr;
389 	if (ioflag & O_APPEND) {
390 		/*
391 		 * Obtain an appending range lock to guarantee file append
392 		 * semantics.  We reset the write offset once we have the lock.
393 		 */
394 		lr = zfs_rangelock_enter(&zp->z_rangelock, 0, n, RL_APPEND);
395 		woff = lr->lr_offset;
396 		if (lr->lr_length == UINT64_MAX) {
397 			/*
398 			 * We overlocked the file because this write will cause
399 			 * the file block size to increase.
400 			 * Note that zp_size cannot change with this lock held.
401 			 */
402 			woff = zp->z_size;
403 		}
404 		zfs_uio_setoffset(uio, woff);
405 	} else {
406 		/*
407 		 * Note that if the file block size will change as a result of
408 		 * this write, then this range lock will lock the entire file
409 		 * so that we can re-write the block safely.
410 		 */
411 		lr = zfs_rangelock_enter(&zp->z_rangelock, woff, n, RL_WRITER);
412 	}
413 
414 	if (zn_rlimit_fsize(zp, uio)) {
415 		zfs_rangelock_exit(lr);
416 		ZFS_EXIT(zfsvfs);
417 		return (SET_ERROR(EFBIG));
418 	}
419 
420 	const rlim64_t limit = MAXOFFSET_T;
421 
422 	if (woff >= limit) {
423 		zfs_rangelock_exit(lr);
424 		ZFS_EXIT(zfsvfs);
425 		return (SET_ERROR(EFBIG));
426 	}
427 
428 	if (n > limit - woff)
429 		n = limit - woff;
430 
431 	uint64_t end_size = MAX(zp->z_size, woff + n);
432 	zilog_t *zilog = zfsvfs->z_log;
433 
434 	const uint64_t uid = KUID_TO_SUID(ZTOUID(zp));
435 	const uint64_t gid = KGID_TO_SGID(ZTOGID(zp));
436 	const uint64_t projid = zp->z_projid;
437 
438 	/*
439 	 * Write the file in reasonable size chunks.  Each chunk is written
440 	 * in a separate transaction; this keeps the intent log records small
441 	 * and allows us to do more fine-grained space accounting.
442 	 */
443 	while (n > 0) {
444 		woff = zfs_uio_offset(uio);
445 
446 		if (zfs_id_overblockquota(zfsvfs, DMU_USERUSED_OBJECT, uid) ||
447 		    zfs_id_overblockquota(zfsvfs, DMU_GROUPUSED_OBJECT, gid) ||
448 		    (projid != ZFS_DEFAULT_PROJID &&
449 		    zfs_id_overblockquota(zfsvfs, DMU_PROJECTUSED_OBJECT,
450 		    projid))) {
451 			error = SET_ERROR(EDQUOT);
452 			break;
453 		}
454 
455 		arc_buf_t *abuf = NULL;
456 		if (n >= max_blksz && woff >= zp->z_size &&
457 		    P2PHASE(woff, max_blksz) == 0 &&
458 		    zp->z_blksz == max_blksz) {
459 			/*
460 			 * This write covers a full block.  "Borrow" a buffer
461 			 * from the dmu so that we can fill it before we enter
462 			 * a transaction.  This avoids the possibility of
463 			 * holding up the transaction if the data copy hangs
464 			 * up on a pagefault (e.g., from an NFS server mapping).
465 			 */
466 			size_t cbytes;
467 
468 			abuf = dmu_request_arcbuf(sa_get_db(zp->z_sa_hdl),
469 			    max_blksz);
470 			ASSERT(abuf != NULL);
471 			ASSERT(arc_buf_size(abuf) == max_blksz);
472 			if ((error = zfs_uiocopy(abuf->b_data, max_blksz,
473 			    UIO_WRITE, uio, &cbytes))) {
474 				dmu_return_arcbuf(abuf);
475 				break;
476 			}
477 			ASSERT3S(cbytes, ==, max_blksz);
478 		}
479 
480 		/*
481 		 * Start a transaction.
482 		 */
483 		dmu_tx_t *tx = dmu_tx_create(zfsvfs->z_os);
484 		dmu_tx_hold_sa(tx, zp->z_sa_hdl, B_FALSE);
485 		dmu_buf_impl_t *db = (dmu_buf_impl_t *)sa_get_db(zp->z_sa_hdl);
486 		DB_DNODE_ENTER(db);
487 		dmu_tx_hold_write_by_dnode(tx, DB_DNODE(db), woff,
488 		    MIN(n, max_blksz));
489 		DB_DNODE_EXIT(db);
490 		zfs_sa_upgrade_txholds(tx, zp);
491 		error = dmu_tx_assign(tx, TXG_WAIT);
492 		if (error) {
493 			dmu_tx_abort(tx);
494 			if (abuf != NULL)
495 				dmu_return_arcbuf(abuf);
496 			break;
497 		}
498 
499 		/*
500 		 * If rangelock_enter() over-locked we grow the blocksize
501 		 * and then reduce the lock range.  This will only happen
502 		 * on the first iteration since rangelock_reduce() will
503 		 * shrink down lr_length to the appropriate size.
504 		 */
505 		if (lr->lr_length == UINT64_MAX) {
506 			uint64_t new_blksz;
507 
508 			if (zp->z_blksz > max_blksz) {
509 				/*
510 				 * File's blocksize is already larger than the
511 				 * "recordsize" property.  Only let it grow to
512 				 * the next power of 2.
513 				 */
514 				ASSERT(!ISP2(zp->z_blksz));
515 				new_blksz = MIN(end_size,
516 				    1 << highbit64(zp->z_blksz));
517 			} else {
518 				new_blksz = MIN(end_size, max_blksz);
519 			}
520 			zfs_grow_blocksize(zp, new_blksz, tx);
521 			zfs_rangelock_reduce(lr, woff, n);
522 		}
523 
524 		/*
525 		 * XXX - should we really limit each write to z_max_blksz?
526 		 * Perhaps we should use SPA_MAXBLOCKSIZE chunks?
527 		 */
528 		const ssize_t nbytes =
529 		    MIN(n, max_blksz - P2PHASE(woff, max_blksz));
530 
531 		ssize_t tx_bytes;
532 		if (abuf == NULL) {
533 			tx_bytes = zfs_uio_resid(uio);
534 			zfs_uio_fault_disable(uio, B_TRUE);
535 			error = dmu_write_uio_dbuf(sa_get_db(zp->z_sa_hdl),
536 			    uio, nbytes, tx);
537 			zfs_uio_fault_disable(uio, B_FALSE);
538 #ifdef __linux__
539 			if (error == EFAULT) {
540 				dmu_tx_commit(tx);
541 				/*
542 				 * Account for partial writes before
543 				 * continuing the loop.
544 				 * Update needs to occur before the next
545 				 * zfs_uio_prefaultpages, or prefaultpages may
546 				 * error, and we may break the loop early.
547 				 */
548 				if (tx_bytes != zfs_uio_resid(uio))
549 					n -= tx_bytes - zfs_uio_resid(uio);
550 				if (zfs_uio_prefaultpages(MIN(n, max_blksz),
551 				    uio)) {
552 					break;
553 				}
554 				continue;
555 			}
556 #endif
557 			if (error != 0) {
558 				dmu_tx_commit(tx);
559 				break;
560 			}
561 			tx_bytes -= zfs_uio_resid(uio);
562 		} else {
563 			/* Implied by abuf != NULL: */
564 			ASSERT3S(n, >=, max_blksz);
565 			ASSERT0(P2PHASE(woff, max_blksz));
566 			/*
567 			 * We can simplify nbytes to MIN(n, max_blksz) since
568 			 * P2PHASE(woff, max_blksz) is 0, and knowing
569 			 * n >= max_blksz lets us simplify further:
570 			 */
571 			ASSERT3S(nbytes, ==, max_blksz);
572 			/*
573 			 * Thus, we're writing a full block at a block-aligned
574 			 * offset and extending the file past EOF.
575 			 *
576 			 * dmu_assign_arcbuf_by_dbuf() will directly assign the
577 			 * arc buffer to a dbuf.
578 			 */
579 			error = dmu_assign_arcbuf_by_dbuf(
580 			    sa_get_db(zp->z_sa_hdl), woff, abuf, tx);
581 			if (error != 0) {
582 				dmu_return_arcbuf(abuf);
583 				dmu_tx_commit(tx);
584 				break;
585 			}
586 			ASSERT3S(nbytes, <=, zfs_uio_resid(uio));
587 			zfs_uioskip(uio, nbytes);
588 			tx_bytes = nbytes;
589 		}
590 		if (tx_bytes && zn_has_cached_data(zp) &&
591 		    !(ioflag & O_DIRECT)) {
592 			update_pages(zp, woff, tx_bytes, zfsvfs->z_os);
593 		}
594 
595 		/*
596 		 * If we made no progress, we're done.  If we made even
597 		 * partial progress, update the znode and ZIL accordingly.
598 		 */
599 		if (tx_bytes == 0) {
600 			(void) sa_update(zp->z_sa_hdl, SA_ZPL_SIZE(zfsvfs),
601 			    (void *)&zp->z_size, sizeof (uint64_t), tx);
602 			dmu_tx_commit(tx);
603 			ASSERT(error != 0);
604 			break;
605 		}
606 
607 		/*
608 		 * Clear Set-UID/Set-GID bits on successful write if not
609 		 * privileged and at least one of the execute bits is set.
610 		 *
611 		 * It would be nice to do this after all writes have
612 		 * been done, but that would still expose the ISUID/ISGID
613 		 * to another app after the partial write is committed.
614 		 *
615 		 * Note: we don't call zfs_fuid_map_id() here because
616 		 * user 0 is not an ephemeral uid.
617 		 */
618 		mutex_enter(&zp->z_acl_lock);
619 		if ((zp->z_mode & (S_IXUSR | (S_IXUSR >> 3) |
620 		    (S_IXUSR >> 6))) != 0 &&
621 		    (zp->z_mode & (S_ISUID | S_ISGID)) != 0 &&
622 		    secpolicy_vnode_setid_retain(zp, cr,
623 		    ((zp->z_mode & S_ISUID) != 0 && uid == 0)) != 0) {
624 			uint64_t newmode;
625 			zp->z_mode &= ~(S_ISUID | S_ISGID);
626 			newmode = zp->z_mode;
627 			(void) sa_update(zp->z_sa_hdl, SA_ZPL_MODE(zfsvfs),
628 			    (void *)&newmode, sizeof (uint64_t), tx);
629 		}
630 		mutex_exit(&zp->z_acl_lock);
631 
632 		zfs_tstamp_update_setup(zp, CONTENT_MODIFIED, mtime, ctime);
633 
634 		/*
635 		 * Update the file size (zp_size) if it has changed;
636 		 * account for possible concurrent updates.
637 		 */
638 		while ((end_size = zp->z_size) < zfs_uio_offset(uio)) {
639 			(void) atomic_cas_64(&zp->z_size, end_size,
640 			    zfs_uio_offset(uio));
641 			ASSERT(error == 0);
642 		}
643 		/*
644 		 * If we are replaying and eof is non zero then force
645 		 * the file size to the specified eof. Note, there's no
646 		 * concurrency during replay.
647 		 */
648 		if (zfsvfs->z_replay && zfsvfs->z_replay_eof != 0)
649 			zp->z_size = zfsvfs->z_replay_eof;
650 
651 		error = sa_bulk_update(zp->z_sa_hdl, bulk, count, tx);
652 
653 		zfs_log_write(zilog, tx, TX_WRITE, zp, woff, tx_bytes, ioflag,
654 		    NULL, NULL);
655 		dmu_tx_commit(tx);
656 
657 		if (error != 0)
658 			break;
659 		ASSERT3S(tx_bytes, ==, nbytes);
660 		n -= nbytes;
661 
662 		if (n > 0) {
663 			if (zfs_uio_prefaultpages(MIN(n, max_blksz), uio)) {
664 				error = SET_ERROR(EFAULT);
665 				break;
666 			}
667 		}
668 	}
669 
670 	zfs_znode_update_vfs(zp);
671 	zfs_rangelock_exit(lr);
672 
673 	/*
674 	 * If we're in replay mode, or we made no progress, or the
675 	 * uio data is inaccessible return an error.  Otherwise, it's
676 	 * at least a partial write, so it's successful.
677 	 */
678 	if (zfsvfs->z_replay || zfs_uio_resid(uio) == start_resid ||
679 	    error == EFAULT) {
680 		ZFS_EXIT(zfsvfs);
681 		return (error);
682 	}
683 
684 	if (ioflag & (O_SYNC | O_DSYNC) ||
685 	    zfsvfs->z_os->os_sync == ZFS_SYNC_ALWAYS)
686 		zil_commit(zilog, zp->z_id);
687 
688 	const int64_t nwritten = start_resid - zfs_uio_resid(uio);
689 	dataset_kstats_update_write_kstats(&zfsvfs->z_kstat, nwritten);
690 	task_io_account_write(nwritten);
691 
692 	ZFS_EXIT(zfsvfs);
693 	return (0);
694 }
695 
696 /*ARGSUSED*/
697 int
698 zfs_getsecattr(znode_t *zp, vsecattr_t *vsecp, int flag, cred_t *cr)
699 {
700 	zfsvfs_t *zfsvfs = ZTOZSB(zp);
701 	int error;
702 	boolean_t skipaclchk = (flag & ATTR_NOACLCHECK) ? B_TRUE : B_FALSE;
703 
704 	ZFS_ENTER(zfsvfs);
705 	ZFS_VERIFY_ZP(zp);
706 	error = zfs_getacl(zp, vsecp, skipaclchk, cr);
707 	ZFS_EXIT(zfsvfs);
708 
709 	return (error);
710 }
711 
712 /*ARGSUSED*/
713 int
714 zfs_setsecattr(znode_t *zp, vsecattr_t *vsecp, int flag, cred_t *cr)
715 {
716 	zfsvfs_t *zfsvfs = ZTOZSB(zp);
717 	int error;
718 	boolean_t skipaclchk = (flag & ATTR_NOACLCHECK) ? B_TRUE : B_FALSE;
719 	zilog_t	*zilog = zfsvfs->z_log;
720 
721 	ZFS_ENTER(zfsvfs);
722 	ZFS_VERIFY_ZP(zp);
723 
724 	error = zfs_setacl(zp, vsecp, skipaclchk, cr);
725 
726 	if (zfsvfs->z_os->os_sync == ZFS_SYNC_ALWAYS)
727 		zil_commit(zilog, 0);
728 
729 	ZFS_EXIT(zfsvfs);
730 	return (error);
731 }
732 
733 #ifdef ZFS_DEBUG
734 static int zil_fault_io = 0;
735 #endif
736 
737 static void zfs_get_done(zgd_t *zgd, int error);
738 
739 /*
740  * Get data to generate a TX_WRITE intent log record.
741  */
742 int
743 zfs_get_data(void *arg, uint64_t gen, lr_write_t *lr, char *buf,
744     struct lwb *lwb, zio_t *zio)
745 {
746 	zfsvfs_t *zfsvfs = arg;
747 	objset_t *os = zfsvfs->z_os;
748 	znode_t *zp;
749 	uint64_t object = lr->lr_foid;
750 	uint64_t offset = lr->lr_offset;
751 	uint64_t size = lr->lr_length;
752 	dmu_buf_t *db;
753 	zgd_t *zgd;
754 	int error = 0;
755 	uint64_t zp_gen;
756 
757 	ASSERT3P(lwb, !=, NULL);
758 	ASSERT3P(zio, !=, NULL);
759 	ASSERT3U(size, !=, 0);
760 
761 	/*
762 	 * Nothing to do if the file has been removed
763 	 */
764 	if (zfs_zget(zfsvfs, object, &zp) != 0)
765 		return (SET_ERROR(ENOENT));
766 	if (zp->z_unlinked) {
767 		/*
768 		 * Release the vnode asynchronously as we currently have the
769 		 * txg stopped from syncing.
770 		 */
771 		zfs_zrele_async(zp);
772 		return (SET_ERROR(ENOENT));
773 	}
774 	/* check if generation number matches */
775 	if (sa_lookup(zp->z_sa_hdl, SA_ZPL_GEN(zfsvfs), &zp_gen,
776 	    sizeof (zp_gen)) != 0) {
777 		zfs_zrele_async(zp);
778 		return (SET_ERROR(EIO));
779 	}
780 	if (zp_gen != gen) {
781 		zfs_zrele_async(zp);
782 		return (SET_ERROR(ENOENT));
783 	}
784 
785 	zgd = (zgd_t *)kmem_zalloc(sizeof (zgd_t), KM_SLEEP);
786 	zgd->zgd_lwb = lwb;
787 	zgd->zgd_private = zp;
788 
789 	/*
790 	 * Write records come in two flavors: immediate and indirect.
791 	 * For small writes it's cheaper to store the data with the
792 	 * log record (immediate); for large writes it's cheaper to
793 	 * sync the data and get a pointer to it (indirect) so that
794 	 * we don't have to write the data twice.
795 	 */
796 	if (buf != NULL) { /* immediate write */
797 		zgd->zgd_lr = zfs_rangelock_enter(&zp->z_rangelock,
798 		    offset, size, RL_READER);
799 		/* test for truncation needs to be done while range locked */
800 		if (offset >= zp->z_size) {
801 			error = SET_ERROR(ENOENT);
802 		} else {
803 			error = dmu_read(os, object, offset, size, buf,
804 			    DMU_READ_NO_PREFETCH);
805 		}
806 		ASSERT(error == 0 || error == ENOENT);
807 	} else { /* indirect write */
808 		/*
809 		 * Have to lock the whole block to ensure when it's
810 		 * written out and its checksum is being calculated
811 		 * that no one can change the data. We need to re-check
812 		 * blocksize after we get the lock in case it's changed!
813 		 */
814 		for (;;) {
815 			uint64_t blkoff;
816 			size = zp->z_blksz;
817 			blkoff = ISP2(size) ? P2PHASE(offset, size) : offset;
818 			offset -= blkoff;
819 			zgd->zgd_lr = zfs_rangelock_enter(&zp->z_rangelock,
820 			    offset, size, RL_READER);
821 			if (zp->z_blksz == size)
822 				break;
823 			offset += blkoff;
824 			zfs_rangelock_exit(zgd->zgd_lr);
825 		}
826 		/* test for truncation needs to be done while range locked */
827 		if (lr->lr_offset >= zp->z_size)
828 			error = SET_ERROR(ENOENT);
829 #ifdef ZFS_DEBUG
830 		if (zil_fault_io) {
831 			error = SET_ERROR(EIO);
832 			zil_fault_io = 0;
833 		}
834 #endif
835 		if (error == 0)
836 			error = dmu_buf_hold(os, object, offset, zgd, &db,
837 			    DMU_READ_NO_PREFETCH);
838 
839 		if (error == 0) {
840 			blkptr_t *bp = &lr->lr_blkptr;
841 
842 			zgd->zgd_db = db;
843 			zgd->zgd_bp = bp;
844 
845 			ASSERT(db->db_offset == offset);
846 			ASSERT(db->db_size == size);
847 
848 			error = dmu_sync(zio, lr->lr_common.lrc_txg,
849 			    zfs_get_done, zgd);
850 			ASSERT(error || lr->lr_length <= size);
851 
852 			/*
853 			 * On success, we need to wait for the write I/O
854 			 * initiated by dmu_sync() to complete before we can
855 			 * release this dbuf.  We will finish everything up
856 			 * in the zfs_get_done() callback.
857 			 */
858 			if (error == 0)
859 				return (0);
860 
861 			if (error == EALREADY) {
862 				lr->lr_common.lrc_txtype = TX_WRITE2;
863 				/*
864 				 * TX_WRITE2 relies on the data previously
865 				 * written by the TX_WRITE that caused
866 				 * EALREADY.  We zero out the BP because
867 				 * it is the old, currently-on-disk BP.
868 				 */
869 				zgd->zgd_bp = NULL;
870 				BP_ZERO(bp);
871 				error = 0;
872 			}
873 		}
874 	}
875 
876 	zfs_get_done(zgd, error);
877 
878 	return (error);
879 }
880 
881 
882 /* ARGSUSED */
883 static void
884 zfs_get_done(zgd_t *zgd, int error)
885 {
886 	znode_t *zp = zgd->zgd_private;
887 
888 	if (zgd->zgd_db)
889 		dmu_buf_rele(zgd->zgd_db, zgd);
890 
891 	zfs_rangelock_exit(zgd->zgd_lr);
892 
893 	/*
894 	 * Release the vnode asynchronously as we currently have the
895 	 * txg stopped from syncing.
896 	 */
897 	zfs_zrele_async(zp);
898 
899 	kmem_free(zgd, sizeof (zgd_t));
900 }
901 
902 EXPORT_SYMBOL(zfs_access);
903 EXPORT_SYMBOL(zfs_fsync);
904 EXPORT_SYMBOL(zfs_holey);
905 EXPORT_SYMBOL(zfs_read);
906 EXPORT_SYMBOL(zfs_write);
907 EXPORT_SYMBOL(zfs_getsecattr);
908 EXPORT_SYMBOL(zfs_setsecattr);
909 
910 ZFS_MODULE_PARAM(zfs_vnops, zfs_vnops_, read_chunk_size, ULONG, ZMOD_RW,
911 	"Bytes to read per chunk");
912