xref: /linux/fs/xfs/xfs_file.c (revision 0be3ff0c)
1 // SPDX-License-Identifier: GPL-2.0
2 /*
3  * Copyright (c) 2000-2005 Silicon Graphics, Inc.
4  * All Rights Reserved.
5  */
6 #include "xfs.h"
7 #include "xfs_fs.h"
8 #include "xfs_shared.h"
9 #include "xfs_format.h"
10 #include "xfs_log_format.h"
11 #include "xfs_trans_resv.h"
12 #include "xfs_mount.h"
13 #include "xfs_inode.h"
14 #include "xfs_trans.h"
15 #include "xfs_inode_item.h"
16 #include "xfs_bmap.h"
17 #include "xfs_bmap_util.h"
18 #include "xfs_dir2.h"
19 #include "xfs_dir2_priv.h"
20 #include "xfs_ioctl.h"
21 #include "xfs_trace.h"
22 #include "xfs_log.h"
23 #include "xfs_icache.h"
24 #include "xfs_pnfs.h"
25 #include "xfs_iomap.h"
26 #include "xfs_reflink.h"
27 
28 #include <linux/falloc.h>
29 #include <linux/backing-dev.h>
30 #include <linux/mman.h>
31 #include <linux/fadvise.h>
32 #include <linux/mount.h>
33 
34 static const struct vm_operations_struct xfs_file_vm_ops;
35 
36 /*
37  * Decide if the given file range is aligned to the size of the fundamental
38  * allocation unit for the file.
39  */
40 static bool
41 xfs_is_falloc_aligned(
42 	struct xfs_inode	*ip,
43 	loff_t			pos,
44 	long long int		len)
45 {
46 	struct xfs_mount	*mp = ip->i_mount;
47 	uint64_t		mask;
48 
49 	if (XFS_IS_REALTIME_INODE(ip)) {
50 		if (!is_power_of_2(mp->m_sb.sb_rextsize)) {
51 			u64	rextbytes;
52 			u32	mod;
53 
54 			rextbytes = XFS_FSB_TO_B(mp, mp->m_sb.sb_rextsize);
55 			div_u64_rem(pos, rextbytes, &mod);
56 			if (mod)
57 				return false;
58 			div_u64_rem(len, rextbytes, &mod);
59 			return mod == 0;
60 		}
61 		mask = XFS_FSB_TO_B(mp, mp->m_sb.sb_rextsize) - 1;
62 	} else {
63 		mask = mp->m_sb.sb_blocksize - 1;
64 	}
65 
66 	return !((pos | len) & mask);
67 }
68 
69 /*
70  * Fsync operations on directories are much simpler than on regular files,
71  * as there is no file data to flush, and thus also no need for explicit
72  * cache flush operations, and there are no non-transaction metadata updates
73  * on directories either.
74  */
75 STATIC int
76 xfs_dir_fsync(
77 	struct file		*file,
78 	loff_t			start,
79 	loff_t			end,
80 	int			datasync)
81 {
82 	struct xfs_inode	*ip = XFS_I(file->f_mapping->host);
83 
84 	trace_xfs_dir_fsync(ip);
85 	return xfs_log_force_inode(ip);
86 }
87 
88 static xfs_csn_t
89 xfs_fsync_seq(
90 	struct xfs_inode	*ip,
91 	bool			datasync)
92 {
93 	if (!xfs_ipincount(ip))
94 		return 0;
95 	if (datasync && !(ip->i_itemp->ili_fsync_fields & ~XFS_ILOG_TIMESTAMP))
96 		return 0;
97 	return ip->i_itemp->ili_commit_seq;
98 }
99 
100 /*
101  * All metadata updates are logged, which means that we just have to flush the
102  * log up to the latest LSN that touched the inode.
103  *
104  * If we have concurrent fsync/fdatasync() calls, we need them to all block on
105  * the log force before we clear the ili_fsync_fields field. This ensures that
106  * we don't get a racing sync operation that does not wait for the metadata to
107  * hit the journal before returning.  If we race with clearing ili_fsync_fields,
108  * then all that will happen is the log force will do nothing as the lsn will
109  * already be on disk.  We can't race with setting ili_fsync_fields because that
110  * is done under XFS_ILOCK_EXCL, and that can't happen because we hold the lock
111  * shared until after the ili_fsync_fields is cleared.
112  */
113 static  int
114 xfs_fsync_flush_log(
115 	struct xfs_inode	*ip,
116 	bool			datasync,
117 	int			*log_flushed)
118 {
119 	int			error = 0;
120 	xfs_csn_t		seq;
121 
122 	xfs_ilock(ip, XFS_ILOCK_SHARED);
123 	seq = xfs_fsync_seq(ip, datasync);
124 	if (seq) {
125 		error = xfs_log_force_seq(ip->i_mount, seq, XFS_LOG_SYNC,
126 					  log_flushed);
127 
128 		spin_lock(&ip->i_itemp->ili_lock);
129 		ip->i_itemp->ili_fsync_fields = 0;
130 		spin_unlock(&ip->i_itemp->ili_lock);
131 	}
132 	xfs_iunlock(ip, XFS_ILOCK_SHARED);
133 	return error;
134 }
135 
136 STATIC int
137 xfs_file_fsync(
138 	struct file		*file,
139 	loff_t			start,
140 	loff_t			end,
141 	int			datasync)
142 {
143 	struct xfs_inode	*ip = XFS_I(file->f_mapping->host);
144 	struct xfs_mount	*mp = ip->i_mount;
145 	int			error = 0;
146 	int			log_flushed = 0;
147 
148 	trace_xfs_file_fsync(ip);
149 
150 	error = file_write_and_wait_range(file, start, end);
151 	if (error)
152 		return error;
153 
154 	if (xfs_is_shutdown(mp))
155 		return -EIO;
156 
157 	xfs_iflags_clear(ip, XFS_ITRUNCATED);
158 
159 	/*
160 	 * If we have an RT and/or log subvolume we need to make sure to flush
161 	 * the write cache the device used for file data first.  This is to
162 	 * ensure newly written file data make it to disk before logging the new
163 	 * inode size in case of an extending write.
164 	 */
165 	if (XFS_IS_REALTIME_INODE(ip))
166 		blkdev_issue_flush(mp->m_rtdev_targp->bt_bdev);
167 	else if (mp->m_logdev_targp != mp->m_ddev_targp)
168 		blkdev_issue_flush(mp->m_ddev_targp->bt_bdev);
169 
170 	/*
171 	 * Any inode that has dirty modifications in the log is pinned.  The
172 	 * racy check here for a pinned inode while not catch modifications
173 	 * that happen concurrently to the fsync call, but fsync semantics
174 	 * only require to sync previously completed I/O.
175 	 */
176 	if (xfs_ipincount(ip))
177 		error = xfs_fsync_flush_log(ip, datasync, &log_flushed);
178 
179 	/*
180 	 * If we only have a single device, and the log force about was
181 	 * a no-op we might have to flush the data device cache here.
182 	 * This can only happen for fdatasync/O_DSYNC if we were overwriting
183 	 * an already allocated file and thus do not have any metadata to
184 	 * commit.
185 	 */
186 	if (!log_flushed && !XFS_IS_REALTIME_INODE(ip) &&
187 	    mp->m_logdev_targp == mp->m_ddev_targp)
188 		blkdev_issue_flush(mp->m_ddev_targp->bt_bdev);
189 
190 	return error;
191 }
192 
193 static int
194 xfs_ilock_iocb(
195 	struct kiocb		*iocb,
196 	unsigned int		lock_mode)
197 {
198 	struct xfs_inode	*ip = XFS_I(file_inode(iocb->ki_filp));
199 
200 	if (iocb->ki_flags & IOCB_NOWAIT) {
201 		if (!xfs_ilock_nowait(ip, lock_mode))
202 			return -EAGAIN;
203 	} else {
204 		xfs_ilock(ip, lock_mode);
205 	}
206 
207 	return 0;
208 }
209 
210 STATIC ssize_t
211 xfs_file_dio_read(
212 	struct kiocb		*iocb,
213 	struct iov_iter		*to)
214 {
215 	struct xfs_inode	*ip = XFS_I(file_inode(iocb->ki_filp));
216 	ssize_t			ret;
217 
218 	trace_xfs_file_direct_read(iocb, to);
219 
220 	if (!iov_iter_count(to))
221 		return 0; /* skip atime */
222 
223 	file_accessed(iocb->ki_filp);
224 
225 	ret = xfs_ilock_iocb(iocb, XFS_IOLOCK_SHARED);
226 	if (ret)
227 		return ret;
228 	ret = iomap_dio_rw(iocb, to, &xfs_read_iomap_ops, NULL, 0, 0);
229 	xfs_iunlock(ip, XFS_IOLOCK_SHARED);
230 
231 	return ret;
232 }
233 
234 static noinline ssize_t
235 xfs_file_dax_read(
236 	struct kiocb		*iocb,
237 	struct iov_iter		*to)
238 {
239 	struct xfs_inode	*ip = XFS_I(iocb->ki_filp->f_mapping->host);
240 	ssize_t			ret = 0;
241 
242 	trace_xfs_file_dax_read(iocb, to);
243 
244 	if (!iov_iter_count(to))
245 		return 0; /* skip atime */
246 
247 	ret = xfs_ilock_iocb(iocb, XFS_IOLOCK_SHARED);
248 	if (ret)
249 		return ret;
250 	ret = dax_iomap_rw(iocb, to, &xfs_read_iomap_ops);
251 	xfs_iunlock(ip, XFS_IOLOCK_SHARED);
252 
253 	file_accessed(iocb->ki_filp);
254 	return ret;
255 }
256 
257 STATIC ssize_t
258 xfs_file_buffered_read(
259 	struct kiocb		*iocb,
260 	struct iov_iter		*to)
261 {
262 	struct xfs_inode	*ip = XFS_I(file_inode(iocb->ki_filp));
263 	ssize_t			ret;
264 
265 	trace_xfs_file_buffered_read(iocb, to);
266 
267 	ret = xfs_ilock_iocb(iocb, XFS_IOLOCK_SHARED);
268 	if (ret)
269 		return ret;
270 	ret = generic_file_read_iter(iocb, to);
271 	xfs_iunlock(ip, XFS_IOLOCK_SHARED);
272 
273 	return ret;
274 }
275 
276 STATIC ssize_t
277 xfs_file_read_iter(
278 	struct kiocb		*iocb,
279 	struct iov_iter		*to)
280 {
281 	struct inode		*inode = file_inode(iocb->ki_filp);
282 	struct xfs_mount	*mp = XFS_I(inode)->i_mount;
283 	ssize_t			ret = 0;
284 
285 	XFS_STATS_INC(mp, xs_read_calls);
286 
287 	if (xfs_is_shutdown(mp))
288 		return -EIO;
289 
290 	if (IS_DAX(inode))
291 		ret = xfs_file_dax_read(iocb, to);
292 	else if (iocb->ki_flags & IOCB_DIRECT)
293 		ret = xfs_file_dio_read(iocb, to);
294 	else
295 		ret = xfs_file_buffered_read(iocb, to);
296 
297 	if (ret > 0)
298 		XFS_STATS_ADD(mp, xs_read_bytes, ret);
299 	return ret;
300 }
301 
302 /*
303  * Common pre-write limit and setup checks.
304  *
305  * Called with the iolocked held either shared and exclusive according to
306  * @iolock, and returns with it held.  Might upgrade the iolock to exclusive
307  * if called for a direct write beyond i_size.
308  */
309 STATIC ssize_t
310 xfs_file_write_checks(
311 	struct kiocb		*iocb,
312 	struct iov_iter		*from,
313 	int			*iolock)
314 {
315 	struct file		*file = iocb->ki_filp;
316 	struct inode		*inode = file->f_mapping->host;
317 	struct xfs_inode	*ip = XFS_I(inode);
318 	ssize_t			error = 0;
319 	size_t			count = iov_iter_count(from);
320 	bool			drained_dio = false;
321 	loff_t			isize;
322 
323 restart:
324 	error = generic_write_checks(iocb, from);
325 	if (error <= 0)
326 		return error;
327 
328 	if (iocb->ki_flags & IOCB_NOWAIT) {
329 		error = break_layout(inode, false);
330 		if (error == -EWOULDBLOCK)
331 			error = -EAGAIN;
332 	} else {
333 		error = xfs_break_layouts(inode, iolock, BREAK_WRITE);
334 	}
335 
336 	if (error)
337 		return error;
338 
339 	/*
340 	 * For changing security info in file_remove_privs() we need i_rwsem
341 	 * exclusively.
342 	 */
343 	if (*iolock == XFS_IOLOCK_SHARED && !IS_NOSEC(inode)) {
344 		xfs_iunlock(ip, *iolock);
345 		*iolock = XFS_IOLOCK_EXCL;
346 		error = xfs_ilock_iocb(iocb, *iolock);
347 		if (error) {
348 			*iolock = 0;
349 			return error;
350 		}
351 		goto restart;
352 	}
353 
354 	/*
355 	 * If the offset is beyond the size of the file, we need to zero any
356 	 * blocks that fall between the existing EOF and the start of this
357 	 * write.  If zeroing is needed and we are currently holding the iolock
358 	 * shared, we need to update it to exclusive which implies having to
359 	 * redo all checks before.
360 	 *
361 	 * We need to serialise against EOF updates that occur in IO completions
362 	 * here. We want to make sure that nobody is changing the size while we
363 	 * do this check until we have placed an IO barrier (i.e.  hold the
364 	 * XFS_IOLOCK_EXCL) that prevents new IO from being dispatched.  The
365 	 * spinlock effectively forms a memory barrier once we have the
366 	 * XFS_IOLOCK_EXCL so we are guaranteed to see the latest EOF value and
367 	 * hence be able to correctly determine if we need to run zeroing.
368 	 *
369 	 * We can do an unlocked check here safely as IO completion can only
370 	 * extend EOF. Truncate is locked out at this point, so the EOF can
371 	 * not move backwards, only forwards. Hence we only need to take the
372 	 * slow path and spin locks when we are at or beyond the current EOF.
373 	 */
374 	if (iocb->ki_pos <= i_size_read(inode))
375 		goto out;
376 
377 	spin_lock(&ip->i_flags_lock);
378 	isize = i_size_read(inode);
379 	if (iocb->ki_pos > isize) {
380 		spin_unlock(&ip->i_flags_lock);
381 
382 		if (iocb->ki_flags & IOCB_NOWAIT)
383 			return -EAGAIN;
384 
385 		if (!drained_dio) {
386 			if (*iolock == XFS_IOLOCK_SHARED) {
387 				xfs_iunlock(ip, *iolock);
388 				*iolock = XFS_IOLOCK_EXCL;
389 				xfs_ilock(ip, *iolock);
390 				iov_iter_reexpand(from, count);
391 			}
392 			/*
393 			 * We now have an IO submission barrier in place, but
394 			 * AIO can do EOF updates during IO completion and hence
395 			 * we now need to wait for all of them to drain. Non-AIO
396 			 * DIO will have drained before we are given the
397 			 * XFS_IOLOCK_EXCL, and so for most cases this wait is a
398 			 * no-op.
399 			 */
400 			inode_dio_wait(inode);
401 			drained_dio = true;
402 			goto restart;
403 		}
404 
405 		trace_xfs_zero_eof(ip, isize, iocb->ki_pos - isize);
406 		error = xfs_zero_range(ip, isize, iocb->ki_pos - isize, NULL);
407 		if (error)
408 			return error;
409 	} else
410 		spin_unlock(&ip->i_flags_lock);
411 
412 out:
413 	return file_modified(file);
414 }
415 
416 static int
417 xfs_dio_write_end_io(
418 	struct kiocb		*iocb,
419 	ssize_t			size,
420 	int			error,
421 	unsigned		flags)
422 {
423 	struct inode		*inode = file_inode(iocb->ki_filp);
424 	struct xfs_inode	*ip = XFS_I(inode);
425 	loff_t			offset = iocb->ki_pos;
426 	unsigned int		nofs_flag;
427 
428 	trace_xfs_end_io_direct_write(ip, offset, size);
429 
430 	if (xfs_is_shutdown(ip->i_mount))
431 		return -EIO;
432 
433 	if (error)
434 		return error;
435 	if (!size)
436 		return 0;
437 
438 	/*
439 	 * Capture amount written on completion as we can't reliably account
440 	 * for it on submission.
441 	 */
442 	XFS_STATS_ADD(ip->i_mount, xs_write_bytes, size);
443 
444 	/*
445 	 * We can allocate memory here while doing writeback on behalf of
446 	 * memory reclaim.  To avoid memory allocation deadlocks set the
447 	 * task-wide nofs context for the following operations.
448 	 */
449 	nofs_flag = memalloc_nofs_save();
450 
451 	if (flags & IOMAP_DIO_COW) {
452 		error = xfs_reflink_end_cow(ip, offset, size);
453 		if (error)
454 			goto out;
455 	}
456 
457 	/*
458 	 * Unwritten conversion updates the in-core isize after extent
459 	 * conversion but before updating the on-disk size. Updating isize any
460 	 * earlier allows a racing dio read to find unwritten extents before
461 	 * they are converted.
462 	 */
463 	if (flags & IOMAP_DIO_UNWRITTEN) {
464 		error = xfs_iomap_write_unwritten(ip, offset, size, true);
465 		goto out;
466 	}
467 
468 	/*
469 	 * We need to update the in-core inode size here so that we don't end up
470 	 * with the on-disk inode size being outside the in-core inode size. We
471 	 * have no other method of updating EOF for AIO, so always do it here
472 	 * if necessary.
473 	 *
474 	 * We need to lock the test/set EOF update as we can be racing with
475 	 * other IO completions here to update the EOF. Failing to serialise
476 	 * here can result in EOF moving backwards and Bad Things Happen when
477 	 * that occurs.
478 	 *
479 	 * As IO completion only ever extends EOF, we can do an unlocked check
480 	 * here to avoid taking the spinlock. If we land within the current EOF,
481 	 * then we do not need to do an extending update at all, and we don't
482 	 * need to take the lock to check this. If we race with an update moving
483 	 * EOF, then we'll either still be beyond EOF and need to take the lock,
484 	 * or we'll be within EOF and we don't need to take it at all.
485 	 */
486 	if (offset + size <= i_size_read(inode))
487 		goto out;
488 
489 	spin_lock(&ip->i_flags_lock);
490 	if (offset + size > i_size_read(inode)) {
491 		i_size_write(inode, offset + size);
492 		spin_unlock(&ip->i_flags_lock);
493 		error = xfs_setfilesize(ip, offset, size);
494 	} else {
495 		spin_unlock(&ip->i_flags_lock);
496 	}
497 
498 out:
499 	memalloc_nofs_restore(nofs_flag);
500 	return error;
501 }
502 
503 static const struct iomap_dio_ops xfs_dio_write_ops = {
504 	.end_io		= xfs_dio_write_end_io,
505 };
506 
507 /*
508  * Handle block aligned direct I/O writes
509  */
510 static noinline ssize_t
511 xfs_file_dio_write_aligned(
512 	struct xfs_inode	*ip,
513 	struct kiocb		*iocb,
514 	struct iov_iter		*from)
515 {
516 	int			iolock = XFS_IOLOCK_SHARED;
517 	ssize_t			ret;
518 
519 	ret = xfs_ilock_iocb(iocb, iolock);
520 	if (ret)
521 		return ret;
522 	ret = xfs_file_write_checks(iocb, from, &iolock);
523 	if (ret)
524 		goto out_unlock;
525 
526 	/*
527 	 * We don't need to hold the IOLOCK exclusively across the IO, so demote
528 	 * the iolock back to shared if we had to take the exclusive lock in
529 	 * xfs_file_write_checks() for other reasons.
530 	 */
531 	if (iolock == XFS_IOLOCK_EXCL) {
532 		xfs_ilock_demote(ip, XFS_IOLOCK_EXCL);
533 		iolock = XFS_IOLOCK_SHARED;
534 	}
535 	trace_xfs_file_direct_write(iocb, from);
536 	ret = iomap_dio_rw(iocb, from, &xfs_direct_write_iomap_ops,
537 			   &xfs_dio_write_ops, 0, 0);
538 out_unlock:
539 	if (iolock)
540 		xfs_iunlock(ip, iolock);
541 	return ret;
542 }
543 
544 /*
545  * Handle block unaligned direct I/O writes
546  *
547  * In most cases direct I/O writes will be done holding IOLOCK_SHARED, allowing
548  * them to be done in parallel with reads and other direct I/O writes.  However,
549  * if the I/O is not aligned to filesystem blocks, the direct I/O layer may need
550  * to do sub-block zeroing and that requires serialisation against other direct
551  * I/O to the same block.  In this case we need to serialise the submission of
552  * the unaligned I/O so that we don't get racing block zeroing in the dio layer.
553  * In the case where sub-block zeroing is not required, we can do concurrent
554  * sub-block dios to the same block successfully.
555  *
556  * Optimistically submit the I/O using the shared lock first, but use the
557  * IOMAP_DIO_OVERWRITE_ONLY flag to tell the lower layers to return -EAGAIN
558  * if block allocation or partial block zeroing would be required.  In that case
559  * we try again with the exclusive lock.
560  */
561 static noinline ssize_t
562 xfs_file_dio_write_unaligned(
563 	struct xfs_inode	*ip,
564 	struct kiocb		*iocb,
565 	struct iov_iter		*from)
566 {
567 	size_t			isize = i_size_read(VFS_I(ip));
568 	size_t			count = iov_iter_count(from);
569 	int			iolock = XFS_IOLOCK_SHARED;
570 	unsigned int		flags = IOMAP_DIO_OVERWRITE_ONLY;
571 	ssize_t			ret;
572 
573 	/*
574 	 * Extending writes need exclusivity because of the sub-block zeroing
575 	 * that the DIO code always does for partial tail blocks beyond EOF, so
576 	 * don't even bother trying the fast path in this case.
577 	 */
578 	if (iocb->ki_pos > isize || iocb->ki_pos + count >= isize) {
579 retry_exclusive:
580 		if (iocb->ki_flags & IOCB_NOWAIT)
581 			return -EAGAIN;
582 		iolock = XFS_IOLOCK_EXCL;
583 		flags = IOMAP_DIO_FORCE_WAIT;
584 	}
585 
586 	ret = xfs_ilock_iocb(iocb, iolock);
587 	if (ret)
588 		return ret;
589 
590 	/*
591 	 * We can't properly handle unaligned direct I/O to reflink files yet,
592 	 * as we can't unshare a partial block.
593 	 */
594 	if (xfs_is_cow_inode(ip)) {
595 		trace_xfs_reflink_bounce_dio_write(iocb, from);
596 		ret = -ENOTBLK;
597 		goto out_unlock;
598 	}
599 
600 	ret = xfs_file_write_checks(iocb, from, &iolock);
601 	if (ret)
602 		goto out_unlock;
603 
604 	/*
605 	 * If we are doing exclusive unaligned I/O, this must be the only I/O
606 	 * in-flight.  Otherwise we risk data corruption due to unwritten extent
607 	 * conversions from the AIO end_io handler.  Wait for all other I/O to
608 	 * drain first.
609 	 */
610 	if (flags & IOMAP_DIO_FORCE_WAIT)
611 		inode_dio_wait(VFS_I(ip));
612 
613 	trace_xfs_file_direct_write(iocb, from);
614 	ret = iomap_dio_rw(iocb, from, &xfs_direct_write_iomap_ops,
615 			   &xfs_dio_write_ops, flags, 0);
616 
617 	/*
618 	 * Retry unaligned I/O with exclusive blocking semantics if the DIO
619 	 * layer rejected it for mapping or locking reasons. If we are doing
620 	 * nonblocking user I/O, propagate the error.
621 	 */
622 	if (ret == -EAGAIN && !(iocb->ki_flags & IOCB_NOWAIT)) {
623 		ASSERT(flags & IOMAP_DIO_OVERWRITE_ONLY);
624 		xfs_iunlock(ip, iolock);
625 		goto retry_exclusive;
626 	}
627 
628 out_unlock:
629 	if (iolock)
630 		xfs_iunlock(ip, iolock);
631 	return ret;
632 }
633 
634 static ssize_t
635 xfs_file_dio_write(
636 	struct kiocb		*iocb,
637 	struct iov_iter		*from)
638 {
639 	struct xfs_inode	*ip = XFS_I(file_inode(iocb->ki_filp));
640 	struct xfs_buftarg      *target = xfs_inode_buftarg(ip);
641 	size_t			count = iov_iter_count(from);
642 
643 	/* direct I/O must be aligned to device logical sector size */
644 	if ((iocb->ki_pos | count) & target->bt_logical_sectormask)
645 		return -EINVAL;
646 	if ((iocb->ki_pos | count) & ip->i_mount->m_blockmask)
647 		return xfs_file_dio_write_unaligned(ip, iocb, from);
648 	return xfs_file_dio_write_aligned(ip, iocb, from);
649 }
650 
651 static noinline ssize_t
652 xfs_file_dax_write(
653 	struct kiocb		*iocb,
654 	struct iov_iter		*from)
655 {
656 	struct inode		*inode = iocb->ki_filp->f_mapping->host;
657 	struct xfs_inode	*ip = XFS_I(inode);
658 	int			iolock = XFS_IOLOCK_EXCL;
659 	ssize_t			ret, error = 0;
660 	loff_t			pos;
661 
662 	ret = xfs_ilock_iocb(iocb, iolock);
663 	if (ret)
664 		return ret;
665 	ret = xfs_file_write_checks(iocb, from, &iolock);
666 	if (ret)
667 		goto out;
668 
669 	pos = iocb->ki_pos;
670 
671 	trace_xfs_file_dax_write(iocb, from);
672 	ret = dax_iomap_rw(iocb, from, &xfs_direct_write_iomap_ops);
673 	if (ret > 0 && iocb->ki_pos > i_size_read(inode)) {
674 		i_size_write(inode, iocb->ki_pos);
675 		error = xfs_setfilesize(ip, pos, ret);
676 	}
677 out:
678 	if (iolock)
679 		xfs_iunlock(ip, iolock);
680 	if (error)
681 		return error;
682 
683 	if (ret > 0) {
684 		XFS_STATS_ADD(ip->i_mount, xs_write_bytes, ret);
685 
686 		/* Handle various SYNC-type writes */
687 		ret = generic_write_sync(iocb, ret);
688 	}
689 	return ret;
690 }
691 
692 STATIC ssize_t
693 xfs_file_buffered_write(
694 	struct kiocb		*iocb,
695 	struct iov_iter		*from)
696 {
697 	struct file		*file = iocb->ki_filp;
698 	struct address_space	*mapping = file->f_mapping;
699 	struct inode		*inode = mapping->host;
700 	struct xfs_inode	*ip = XFS_I(inode);
701 	ssize_t			ret;
702 	bool			cleared_space = false;
703 	int			iolock;
704 
705 	if (iocb->ki_flags & IOCB_NOWAIT)
706 		return -EOPNOTSUPP;
707 
708 write_retry:
709 	iolock = XFS_IOLOCK_EXCL;
710 	xfs_ilock(ip, iolock);
711 
712 	ret = xfs_file_write_checks(iocb, from, &iolock);
713 	if (ret)
714 		goto out;
715 
716 	/* We can write back this queue in page reclaim */
717 	current->backing_dev_info = inode_to_bdi(inode);
718 
719 	trace_xfs_file_buffered_write(iocb, from);
720 	ret = iomap_file_buffered_write(iocb, from,
721 			&xfs_buffered_write_iomap_ops);
722 	if (likely(ret >= 0))
723 		iocb->ki_pos += ret;
724 
725 	/*
726 	 * If we hit a space limit, try to free up some lingering preallocated
727 	 * space before returning an error. In the case of ENOSPC, first try to
728 	 * write back all dirty inodes to free up some of the excess reserved
729 	 * metadata space. This reduces the chances that the eofblocks scan
730 	 * waits on dirty mappings. Since xfs_flush_inodes() is serialized, this
731 	 * also behaves as a filter to prevent too many eofblocks scans from
732 	 * running at the same time.  Use a synchronous scan to increase the
733 	 * effectiveness of the scan.
734 	 */
735 	if (ret == -EDQUOT && !cleared_space) {
736 		xfs_iunlock(ip, iolock);
737 		xfs_blockgc_free_quota(ip, XFS_ICWALK_FLAG_SYNC);
738 		cleared_space = true;
739 		goto write_retry;
740 	} else if (ret == -ENOSPC && !cleared_space) {
741 		struct xfs_icwalk	icw = {0};
742 
743 		cleared_space = true;
744 		xfs_flush_inodes(ip->i_mount);
745 
746 		xfs_iunlock(ip, iolock);
747 		icw.icw_flags = XFS_ICWALK_FLAG_SYNC;
748 		xfs_blockgc_free_space(ip->i_mount, &icw);
749 		goto write_retry;
750 	}
751 
752 	current->backing_dev_info = NULL;
753 out:
754 	if (iolock)
755 		xfs_iunlock(ip, iolock);
756 
757 	if (ret > 0) {
758 		XFS_STATS_ADD(ip->i_mount, xs_write_bytes, ret);
759 		/* Handle various SYNC-type writes */
760 		ret = generic_write_sync(iocb, ret);
761 	}
762 	return ret;
763 }
764 
765 STATIC ssize_t
766 xfs_file_write_iter(
767 	struct kiocb		*iocb,
768 	struct iov_iter		*from)
769 {
770 	struct file		*file = iocb->ki_filp;
771 	struct address_space	*mapping = file->f_mapping;
772 	struct inode		*inode = mapping->host;
773 	struct xfs_inode	*ip = XFS_I(inode);
774 	ssize_t			ret;
775 	size_t			ocount = iov_iter_count(from);
776 
777 	XFS_STATS_INC(ip->i_mount, xs_write_calls);
778 
779 	if (ocount == 0)
780 		return 0;
781 
782 	if (xfs_is_shutdown(ip->i_mount))
783 		return -EIO;
784 
785 	if (IS_DAX(inode))
786 		return xfs_file_dax_write(iocb, from);
787 
788 	if (iocb->ki_flags & IOCB_DIRECT) {
789 		/*
790 		 * Allow a directio write to fall back to a buffered
791 		 * write *only* in the case that we're doing a reflink
792 		 * CoW.  In all other directio scenarios we do not
793 		 * allow an operation to fall back to buffered mode.
794 		 */
795 		ret = xfs_file_dio_write(iocb, from);
796 		if (ret != -ENOTBLK)
797 			return ret;
798 	}
799 
800 	return xfs_file_buffered_write(iocb, from);
801 }
802 
803 static void
804 xfs_wait_dax_page(
805 	struct inode		*inode)
806 {
807 	struct xfs_inode        *ip = XFS_I(inode);
808 
809 	xfs_iunlock(ip, XFS_MMAPLOCK_EXCL);
810 	schedule();
811 	xfs_ilock(ip, XFS_MMAPLOCK_EXCL);
812 }
813 
814 static int
815 xfs_break_dax_layouts(
816 	struct inode		*inode,
817 	bool			*retry)
818 {
819 	struct page		*page;
820 
821 	ASSERT(xfs_isilocked(XFS_I(inode), XFS_MMAPLOCK_EXCL));
822 
823 	page = dax_layout_busy_page(inode->i_mapping);
824 	if (!page)
825 		return 0;
826 
827 	*retry = true;
828 	return ___wait_var_event(&page->_refcount,
829 			atomic_read(&page->_refcount) == 1, TASK_INTERRUPTIBLE,
830 			0, 0, xfs_wait_dax_page(inode));
831 }
832 
833 int
834 xfs_break_layouts(
835 	struct inode		*inode,
836 	uint			*iolock,
837 	enum layout_break_reason reason)
838 {
839 	bool			retry;
840 	int			error;
841 
842 	ASSERT(xfs_isilocked(XFS_I(inode), XFS_IOLOCK_SHARED|XFS_IOLOCK_EXCL));
843 
844 	do {
845 		retry = false;
846 		switch (reason) {
847 		case BREAK_UNMAP:
848 			error = xfs_break_dax_layouts(inode, &retry);
849 			if (error || retry)
850 				break;
851 			fallthrough;
852 		case BREAK_WRITE:
853 			error = xfs_break_leased_layouts(inode, iolock, &retry);
854 			break;
855 		default:
856 			WARN_ON_ONCE(1);
857 			error = -EINVAL;
858 		}
859 	} while (error == 0 && retry);
860 
861 	return error;
862 }
863 
864 /* Does this file, inode, or mount want synchronous writes? */
865 static inline bool xfs_file_sync_writes(struct file *filp)
866 {
867 	struct xfs_inode	*ip = XFS_I(file_inode(filp));
868 
869 	if (xfs_has_wsync(ip->i_mount))
870 		return true;
871 	if (filp->f_flags & (__O_SYNC | O_DSYNC))
872 		return true;
873 	if (IS_SYNC(file_inode(filp)))
874 		return true;
875 
876 	return false;
877 }
878 
879 #define	XFS_FALLOC_FL_SUPPORTED						\
880 		(FALLOC_FL_KEEP_SIZE | FALLOC_FL_PUNCH_HOLE |		\
881 		 FALLOC_FL_COLLAPSE_RANGE | FALLOC_FL_ZERO_RANGE |	\
882 		 FALLOC_FL_INSERT_RANGE | FALLOC_FL_UNSHARE_RANGE)
883 
884 STATIC long
885 xfs_file_fallocate(
886 	struct file		*file,
887 	int			mode,
888 	loff_t			offset,
889 	loff_t			len)
890 {
891 	struct inode		*inode = file_inode(file);
892 	struct xfs_inode	*ip = XFS_I(inode);
893 	long			error;
894 	uint			iolock = XFS_IOLOCK_EXCL | XFS_MMAPLOCK_EXCL;
895 	loff_t			new_size = 0;
896 	bool			do_file_insert = false;
897 
898 	if (!S_ISREG(inode->i_mode))
899 		return -EINVAL;
900 	if (mode & ~XFS_FALLOC_FL_SUPPORTED)
901 		return -EOPNOTSUPP;
902 
903 	xfs_ilock(ip, iolock);
904 	error = xfs_break_layouts(inode, &iolock, BREAK_UNMAP);
905 	if (error)
906 		goto out_unlock;
907 
908 	/*
909 	 * Must wait for all AIO to complete before we continue as AIO can
910 	 * change the file size on completion without holding any locks we
911 	 * currently hold. We must do this first because AIO can update both
912 	 * the on disk and in memory inode sizes, and the operations that follow
913 	 * require the in-memory size to be fully up-to-date.
914 	 */
915 	inode_dio_wait(inode);
916 
917 	/*
918 	 * Now AIO and DIO has drained we flush and (if necessary) invalidate
919 	 * the cached range over the first operation we are about to run.
920 	 *
921 	 * We care about zero and collapse here because they both run a hole
922 	 * punch over the range first. Because that can zero data, and the range
923 	 * of invalidation for the shift operations is much larger, we still do
924 	 * the required flush for collapse in xfs_prepare_shift().
925 	 *
926 	 * Insert has the same range requirements as collapse, and we extend the
927 	 * file first which can zero data. Hence insert has the same
928 	 * flush/invalidate requirements as collapse and so they are both
929 	 * handled at the right time by xfs_prepare_shift().
930 	 */
931 	if (mode & (FALLOC_FL_PUNCH_HOLE | FALLOC_FL_ZERO_RANGE |
932 		    FALLOC_FL_COLLAPSE_RANGE)) {
933 		error = xfs_flush_unmap_range(ip, offset, len);
934 		if (error)
935 			goto out_unlock;
936 	}
937 
938 	error = file_modified(file);
939 	if (error)
940 		goto out_unlock;
941 
942 	if (mode & FALLOC_FL_PUNCH_HOLE) {
943 		error = xfs_free_file_space(ip, offset, len);
944 		if (error)
945 			goto out_unlock;
946 	} else if (mode & FALLOC_FL_COLLAPSE_RANGE) {
947 		if (!xfs_is_falloc_aligned(ip, offset, len)) {
948 			error = -EINVAL;
949 			goto out_unlock;
950 		}
951 
952 		/*
953 		 * There is no need to overlap collapse range with EOF,
954 		 * in which case it is effectively a truncate operation
955 		 */
956 		if (offset + len >= i_size_read(inode)) {
957 			error = -EINVAL;
958 			goto out_unlock;
959 		}
960 
961 		new_size = i_size_read(inode) - len;
962 
963 		error = xfs_collapse_file_space(ip, offset, len);
964 		if (error)
965 			goto out_unlock;
966 	} else if (mode & FALLOC_FL_INSERT_RANGE) {
967 		loff_t		isize = i_size_read(inode);
968 
969 		if (!xfs_is_falloc_aligned(ip, offset, len)) {
970 			error = -EINVAL;
971 			goto out_unlock;
972 		}
973 
974 		/*
975 		 * New inode size must not exceed ->s_maxbytes, accounting for
976 		 * possible signed overflow.
977 		 */
978 		if (inode->i_sb->s_maxbytes - isize < len) {
979 			error = -EFBIG;
980 			goto out_unlock;
981 		}
982 		new_size = isize + len;
983 
984 		/* Offset should be less than i_size */
985 		if (offset >= isize) {
986 			error = -EINVAL;
987 			goto out_unlock;
988 		}
989 		do_file_insert = true;
990 	} else {
991 		if (!(mode & FALLOC_FL_KEEP_SIZE) &&
992 		    offset + len > i_size_read(inode)) {
993 			new_size = offset + len;
994 			error = inode_newsize_ok(inode, new_size);
995 			if (error)
996 				goto out_unlock;
997 		}
998 
999 		if (mode & FALLOC_FL_ZERO_RANGE) {
1000 			/*
1001 			 * Punch a hole and prealloc the range.  We use a hole
1002 			 * punch rather than unwritten extent conversion for two
1003 			 * reasons:
1004 			 *
1005 			 *   1.) Hole punch handles partial block zeroing for us.
1006 			 *   2.) If prealloc returns ENOSPC, the file range is
1007 			 *       still zero-valued by virtue of the hole punch.
1008 			 */
1009 			unsigned int blksize = i_blocksize(inode);
1010 
1011 			trace_xfs_zero_file_space(ip);
1012 
1013 			error = xfs_free_file_space(ip, offset, len);
1014 			if (error)
1015 				goto out_unlock;
1016 
1017 			len = round_up(offset + len, blksize) -
1018 			      round_down(offset, blksize);
1019 			offset = round_down(offset, blksize);
1020 		} else if (mode & FALLOC_FL_UNSHARE_RANGE) {
1021 			error = xfs_reflink_unshare(ip, offset, len);
1022 			if (error)
1023 				goto out_unlock;
1024 		} else {
1025 			/*
1026 			 * If always_cow mode we can't use preallocations and
1027 			 * thus should not create them.
1028 			 */
1029 			if (xfs_is_always_cow_inode(ip)) {
1030 				error = -EOPNOTSUPP;
1031 				goto out_unlock;
1032 			}
1033 		}
1034 
1035 		if (!xfs_is_always_cow_inode(ip)) {
1036 			error = xfs_alloc_file_space(ip, offset, len);
1037 			if (error)
1038 				goto out_unlock;
1039 		}
1040 	}
1041 
1042 	/* Change file size if needed */
1043 	if (new_size) {
1044 		struct iattr iattr;
1045 
1046 		iattr.ia_valid = ATTR_SIZE;
1047 		iattr.ia_size = new_size;
1048 		error = xfs_vn_setattr_size(file_mnt_user_ns(file),
1049 					    file_dentry(file), &iattr);
1050 		if (error)
1051 			goto out_unlock;
1052 	}
1053 
1054 	/*
1055 	 * Perform hole insertion now that the file size has been
1056 	 * updated so that if we crash during the operation we don't
1057 	 * leave shifted extents past EOF and hence losing access to
1058 	 * the data that is contained within them.
1059 	 */
1060 	if (do_file_insert) {
1061 		error = xfs_insert_file_space(ip, offset, len);
1062 		if (error)
1063 			goto out_unlock;
1064 	}
1065 
1066 	if (xfs_file_sync_writes(file))
1067 		error = xfs_log_force_inode(ip);
1068 
1069 out_unlock:
1070 	xfs_iunlock(ip, iolock);
1071 	return error;
1072 }
1073 
1074 STATIC int
1075 xfs_file_fadvise(
1076 	struct file	*file,
1077 	loff_t		start,
1078 	loff_t		end,
1079 	int		advice)
1080 {
1081 	struct xfs_inode *ip = XFS_I(file_inode(file));
1082 	int ret;
1083 	int lockflags = 0;
1084 
1085 	/*
1086 	 * Operations creating pages in page cache need protection from hole
1087 	 * punching and similar ops
1088 	 */
1089 	if (advice == POSIX_FADV_WILLNEED) {
1090 		lockflags = XFS_IOLOCK_SHARED;
1091 		xfs_ilock(ip, lockflags);
1092 	}
1093 	ret = generic_fadvise(file, start, end, advice);
1094 	if (lockflags)
1095 		xfs_iunlock(ip, lockflags);
1096 	return ret;
1097 }
1098 
1099 STATIC loff_t
1100 xfs_file_remap_range(
1101 	struct file		*file_in,
1102 	loff_t			pos_in,
1103 	struct file		*file_out,
1104 	loff_t			pos_out,
1105 	loff_t			len,
1106 	unsigned int		remap_flags)
1107 {
1108 	struct inode		*inode_in = file_inode(file_in);
1109 	struct xfs_inode	*src = XFS_I(inode_in);
1110 	struct inode		*inode_out = file_inode(file_out);
1111 	struct xfs_inode	*dest = XFS_I(inode_out);
1112 	struct xfs_mount	*mp = src->i_mount;
1113 	loff_t			remapped = 0;
1114 	xfs_extlen_t		cowextsize;
1115 	int			ret;
1116 
1117 	if (remap_flags & ~(REMAP_FILE_DEDUP | REMAP_FILE_ADVISORY))
1118 		return -EINVAL;
1119 
1120 	if (!xfs_has_reflink(mp))
1121 		return -EOPNOTSUPP;
1122 
1123 	if (xfs_is_shutdown(mp))
1124 		return -EIO;
1125 
1126 	/* Prepare and then clone file data. */
1127 	ret = xfs_reflink_remap_prep(file_in, pos_in, file_out, pos_out,
1128 			&len, remap_flags);
1129 	if (ret || len == 0)
1130 		return ret;
1131 
1132 	trace_xfs_reflink_remap_range(src, pos_in, len, dest, pos_out);
1133 
1134 	ret = xfs_reflink_remap_blocks(src, pos_in, dest, pos_out, len,
1135 			&remapped);
1136 	if (ret)
1137 		goto out_unlock;
1138 
1139 	/*
1140 	 * Carry the cowextsize hint from src to dest if we're sharing the
1141 	 * entire source file to the entire destination file, the source file
1142 	 * has a cowextsize hint, and the destination file does not.
1143 	 */
1144 	cowextsize = 0;
1145 	if (pos_in == 0 && len == i_size_read(inode_in) &&
1146 	    (src->i_diflags2 & XFS_DIFLAG2_COWEXTSIZE) &&
1147 	    pos_out == 0 && len >= i_size_read(inode_out) &&
1148 	    !(dest->i_diflags2 & XFS_DIFLAG2_COWEXTSIZE))
1149 		cowextsize = src->i_cowextsize;
1150 
1151 	ret = xfs_reflink_update_dest(dest, pos_out + len, cowextsize,
1152 			remap_flags);
1153 	if (ret)
1154 		goto out_unlock;
1155 
1156 	if (xfs_file_sync_writes(file_in) || xfs_file_sync_writes(file_out))
1157 		xfs_log_force_inode(dest);
1158 out_unlock:
1159 	xfs_iunlock2_io_mmap(src, dest);
1160 	if (ret)
1161 		trace_xfs_reflink_remap_range_error(dest, ret, _RET_IP_);
1162 	return remapped > 0 ? remapped : ret;
1163 }
1164 
1165 STATIC int
1166 xfs_file_open(
1167 	struct inode	*inode,
1168 	struct file	*file)
1169 {
1170 	if (!(file->f_flags & O_LARGEFILE) && i_size_read(inode) > MAX_NON_LFS)
1171 		return -EFBIG;
1172 	if (xfs_is_shutdown(XFS_M(inode->i_sb)))
1173 		return -EIO;
1174 	file->f_mode |= FMODE_NOWAIT | FMODE_BUF_RASYNC;
1175 	return 0;
1176 }
1177 
1178 STATIC int
1179 xfs_dir_open(
1180 	struct inode	*inode,
1181 	struct file	*file)
1182 {
1183 	struct xfs_inode *ip = XFS_I(inode);
1184 	int		mode;
1185 	int		error;
1186 
1187 	error = xfs_file_open(inode, file);
1188 	if (error)
1189 		return error;
1190 
1191 	/*
1192 	 * If there are any blocks, read-ahead block 0 as we're almost
1193 	 * certain to have the next operation be a read there.
1194 	 */
1195 	mode = xfs_ilock_data_map_shared(ip);
1196 	if (ip->i_df.if_nextents > 0)
1197 		error = xfs_dir3_data_readahead(ip, 0, 0);
1198 	xfs_iunlock(ip, mode);
1199 	return error;
1200 }
1201 
1202 STATIC int
1203 xfs_file_release(
1204 	struct inode	*inode,
1205 	struct file	*filp)
1206 {
1207 	return xfs_release(XFS_I(inode));
1208 }
1209 
1210 STATIC int
1211 xfs_file_readdir(
1212 	struct file	*file,
1213 	struct dir_context *ctx)
1214 {
1215 	struct inode	*inode = file_inode(file);
1216 	xfs_inode_t	*ip = XFS_I(inode);
1217 	size_t		bufsize;
1218 
1219 	/*
1220 	 * The Linux API doesn't pass down the total size of the buffer
1221 	 * we read into down to the filesystem.  With the filldir concept
1222 	 * it's not needed for correct information, but the XFS dir2 leaf
1223 	 * code wants an estimate of the buffer size to calculate it's
1224 	 * readahead window and size the buffers used for mapping to
1225 	 * physical blocks.
1226 	 *
1227 	 * Try to give it an estimate that's good enough, maybe at some
1228 	 * point we can change the ->readdir prototype to include the
1229 	 * buffer size.  For now we use the current glibc buffer size.
1230 	 */
1231 	bufsize = (size_t)min_t(loff_t, XFS_READDIR_BUFSIZE, ip->i_disk_size);
1232 
1233 	return xfs_readdir(NULL, ip, ctx, bufsize);
1234 }
1235 
1236 STATIC loff_t
1237 xfs_file_llseek(
1238 	struct file	*file,
1239 	loff_t		offset,
1240 	int		whence)
1241 {
1242 	struct inode		*inode = file->f_mapping->host;
1243 
1244 	if (xfs_is_shutdown(XFS_I(inode)->i_mount))
1245 		return -EIO;
1246 
1247 	switch (whence) {
1248 	default:
1249 		return generic_file_llseek(file, offset, whence);
1250 	case SEEK_HOLE:
1251 		offset = iomap_seek_hole(inode, offset, &xfs_seek_iomap_ops);
1252 		break;
1253 	case SEEK_DATA:
1254 		offset = iomap_seek_data(inode, offset, &xfs_seek_iomap_ops);
1255 		break;
1256 	}
1257 
1258 	if (offset < 0)
1259 		return offset;
1260 	return vfs_setpos(file, offset, inode->i_sb->s_maxbytes);
1261 }
1262 
1263 /*
1264  * Locking for serialisation of IO during page faults. This results in a lock
1265  * ordering of:
1266  *
1267  * mmap_lock (MM)
1268  *   sb_start_pagefault(vfs, freeze)
1269  *     invalidate_lock (vfs/XFS_MMAPLOCK - truncate serialisation)
1270  *       page_lock (MM)
1271  *         i_lock (XFS - extent map serialisation)
1272  */
1273 static vm_fault_t
1274 __xfs_filemap_fault(
1275 	struct vm_fault		*vmf,
1276 	enum page_entry_size	pe_size,
1277 	bool			write_fault)
1278 {
1279 	struct inode		*inode = file_inode(vmf->vma->vm_file);
1280 	struct xfs_inode	*ip = XFS_I(inode);
1281 	vm_fault_t		ret;
1282 
1283 	trace_xfs_filemap_fault(ip, pe_size, write_fault);
1284 
1285 	if (write_fault) {
1286 		sb_start_pagefault(inode->i_sb);
1287 		file_update_time(vmf->vma->vm_file);
1288 	}
1289 
1290 	if (IS_DAX(inode)) {
1291 		pfn_t pfn;
1292 
1293 		xfs_ilock(XFS_I(inode), XFS_MMAPLOCK_SHARED);
1294 		ret = dax_iomap_fault(vmf, pe_size, &pfn, NULL,
1295 				(write_fault && !vmf->cow_page) ?
1296 				 &xfs_direct_write_iomap_ops :
1297 				 &xfs_read_iomap_ops);
1298 		if (ret & VM_FAULT_NEEDDSYNC)
1299 			ret = dax_finish_sync_fault(vmf, pe_size, pfn);
1300 		xfs_iunlock(XFS_I(inode), XFS_MMAPLOCK_SHARED);
1301 	} else {
1302 		if (write_fault) {
1303 			xfs_ilock(XFS_I(inode), XFS_MMAPLOCK_SHARED);
1304 			ret = iomap_page_mkwrite(vmf,
1305 					&xfs_buffered_write_iomap_ops);
1306 			xfs_iunlock(XFS_I(inode), XFS_MMAPLOCK_SHARED);
1307 		} else {
1308 			ret = filemap_fault(vmf);
1309 		}
1310 	}
1311 
1312 	if (write_fault)
1313 		sb_end_pagefault(inode->i_sb);
1314 	return ret;
1315 }
1316 
1317 static inline bool
1318 xfs_is_write_fault(
1319 	struct vm_fault		*vmf)
1320 {
1321 	return (vmf->flags & FAULT_FLAG_WRITE) &&
1322 	       (vmf->vma->vm_flags & VM_SHARED);
1323 }
1324 
1325 static vm_fault_t
1326 xfs_filemap_fault(
1327 	struct vm_fault		*vmf)
1328 {
1329 	/* DAX can shortcut the normal fault path on write faults! */
1330 	return __xfs_filemap_fault(vmf, PE_SIZE_PTE,
1331 			IS_DAX(file_inode(vmf->vma->vm_file)) &&
1332 			xfs_is_write_fault(vmf));
1333 }
1334 
1335 static vm_fault_t
1336 xfs_filemap_huge_fault(
1337 	struct vm_fault		*vmf,
1338 	enum page_entry_size	pe_size)
1339 {
1340 	if (!IS_DAX(file_inode(vmf->vma->vm_file)))
1341 		return VM_FAULT_FALLBACK;
1342 
1343 	/* DAX can shortcut the normal fault path on write faults! */
1344 	return __xfs_filemap_fault(vmf, pe_size,
1345 			xfs_is_write_fault(vmf));
1346 }
1347 
1348 static vm_fault_t
1349 xfs_filemap_page_mkwrite(
1350 	struct vm_fault		*vmf)
1351 {
1352 	return __xfs_filemap_fault(vmf, PE_SIZE_PTE, true);
1353 }
1354 
1355 /*
1356  * pfn_mkwrite was originally intended to ensure we capture time stamp updates
1357  * on write faults. In reality, it needs to serialise against truncate and
1358  * prepare memory for writing so handle is as standard write fault.
1359  */
1360 static vm_fault_t
1361 xfs_filemap_pfn_mkwrite(
1362 	struct vm_fault		*vmf)
1363 {
1364 
1365 	return __xfs_filemap_fault(vmf, PE_SIZE_PTE, true);
1366 }
1367 
1368 static vm_fault_t
1369 xfs_filemap_map_pages(
1370 	struct vm_fault		*vmf,
1371 	pgoff_t			start_pgoff,
1372 	pgoff_t			end_pgoff)
1373 {
1374 	struct inode		*inode = file_inode(vmf->vma->vm_file);
1375 	vm_fault_t ret;
1376 
1377 	xfs_ilock(XFS_I(inode), XFS_MMAPLOCK_SHARED);
1378 	ret = filemap_map_pages(vmf, start_pgoff, end_pgoff);
1379 	xfs_iunlock(XFS_I(inode), XFS_MMAPLOCK_SHARED);
1380 	return ret;
1381 }
1382 
1383 static const struct vm_operations_struct xfs_file_vm_ops = {
1384 	.fault		= xfs_filemap_fault,
1385 	.huge_fault	= xfs_filemap_huge_fault,
1386 	.map_pages	= xfs_filemap_map_pages,
1387 	.page_mkwrite	= xfs_filemap_page_mkwrite,
1388 	.pfn_mkwrite	= xfs_filemap_pfn_mkwrite,
1389 };
1390 
1391 STATIC int
1392 xfs_file_mmap(
1393 	struct file		*file,
1394 	struct vm_area_struct	*vma)
1395 {
1396 	struct inode		*inode = file_inode(file);
1397 	struct xfs_buftarg	*target = xfs_inode_buftarg(XFS_I(inode));
1398 
1399 	/*
1400 	 * We don't support synchronous mappings for non-DAX files and
1401 	 * for DAX files if underneath dax_device is not synchronous.
1402 	 */
1403 	if (!daxdev_mapping_supported(vma, target->bt_daxdev))
1404 		return -EOPNOTSUPP;
1405 
1406 	file_accessed(file);
1407 	vma->vm_ops = &xfs_file_vm_ops;
1408 	if (IS_DAX(inode))
1409 		vma->vm_flags |= VM_HUGEPAGE;
1410 	return 0;
1411 }
1412 
1413 const struct file_operations xfs_file_operations = {
1414 	.llseek		= xfs_file_llseek,
1415 	.read_iter	= xfs_file_read_iter,
1416 	.write_iter	= xfs_file_write_iter,
1417 	.splice_read	= generic_file_splice_read,
1418 	.splice_write	= iter_file_splice_write,
1419 	.iopoll		= iocb_bio_iopoll,
1420 	.unlocked_ioctl	= xfs_file_ioctl,
1421 #ifdef CONFIG_COMPAT
1422 	.compat_ioctl	= xfs_file_compat_ioctl,
1423 #endif
1424 	.mmap		= xfs_file_mmap,
1425 	.mmap_supported_flags = MAP_SYNC,
1426 	.open		= xfs_file_open,
1427 	.release	= xfs_file_release,
1428 	.fsync		= xfs_file_fsync,
1429 	.get_unmapped_area = thp_get_unmapped_area,
1430 	.fallocate	= xfs_file_fallocate,
1431 	.fadvise	= xfs_file_fadvise,
1432 	.remap_file_range = xfs_file_remap_range,
1433 };
1434 
1435 const struct file_operations xfs_dir_file_operations = {
1436 	.open		= xfs_dir_open,
1437 	.read		= generic_read_dir,
1438 	.iterate_shared	= xfs_file_readdir,
1439 	.llseek		= generic_file_llseek,
1440 	.unlocked_ioctl	= xfs_file_ioctl,
1441 #ifdef CONFIG_COMPAT
1442 	.compat_ioctl	= xfs_file_compat_ioctl,
1443 #endif
1444 	.fsync		= xfs_dir_fsync,
1445 };
1446