xref: /linux/fs/xfs/xfs_bmap_util.c (revision d642ef71)
1 // SPDX-License-Identifier: GPL-2.0
2 /*
3  * Copyright (c) 2000-2006 Silicon Graphics, Inc.
4  * Copyright (c) 2012 Red Hat, Inc.
5  * All Rights Reserved.
6  */
7 #include "xfs.h"
8 #include "xfs_fs.h"
9 #include "xfs_shared.h"
10 #include "xfs_format.h"
11 #include "xfs_log_format.h"
12 #include "xfs_trans_resv.h"
13 #include "xfs_bit.h"
14 #include "xfs_mount.h"
15 #include "xfs_defer.h"
16 #include "xfs_inode.h"
17 #include "xfs_btree.h"
18 #include "xfs_trans.h"
19 #include "xfs_alloc.h"
20 #include "xfs_bmap.h"
21 #include "xfs_bmap_util.h"
22 #include "xfs_bmap_btree.h"
23 #include "xfs_rtalloc.h"
24 #include "xfs_error.h"
25 #include "xfs_quota.h"
26 #include "xfs_trans_space.h"
27 #include "xfs_trace.h"
28 #include "xfs_icache.h"
29 #include "xfs_iomap.h"
30 #include "xfs_reflink.h"
31 #include "xfs_rtbitmap.h"
32 
33 /* Kernel only BMAP related definitions and functions */
34 
35 /*
36  * Convert the given file system block to a disk block.  We have to treat it
37  * differently based on whether the file is a real time file or not, because the
38  * bmap code does.
39  */
40 xfs_daddr_t
41 xfs_fsb_to_db(struct xfs_inode *ip, xfs_fsblock_t fsb)
42 {
43 	if (XFS_IS_REALTIME_INODE(ip))
44 		return XFS_FSB_TO_BB(ip->i_mount, fsb);
45 	return XFS_FSB_TO_DADDR(ip->i_mount, fsb);
46 }
47 
48 /*
49  * Routine to zero an extent on disk allocated to the specific inode.
50  *
51  * The VFS functions take a linearised filesystem block offset, so we have to
52  * convert the sparse xfs fsb to the right format first.
53  * VFS types are real funky, too.
54  */
55 int
56 xfs_zero_extent(
57 	struct xfs_inode	*ip,
58 	xfs_fsblock_t		start_fsb,
59 	xfs_off_t		count_fsb)
60 {
61 	struct xfs_mount	*mp = ip->i_mount;
62 	struct xfs_buftarg	*target = xfs_inode_buftarg(ip);
63 	xfs_daddr_t		sector = xfs_fsb_to_db(ip, start_fsb);
64 	sector_t		block = XFS_BB_TO_FSBT(mp, sector);
65 
66 	return blkdev_issue_zeroout(target->bt_bdev,
67 		block << (mp->m_super->s_blocksize_bits - 9),
68 		count_fsb << (mp->m_super->s_blocksize_bits - 9),
69 		GFP_NOFS, 0);
70 }
71 
72 #ifdef CONFIG_XFS_RT
73 int
74 xfs_bmap_rtalloc(
75 	struct xfs_bmalloca	*ap)
76 {
77 	struct xfs_mount	*mp = ap->ip->i_mount;
78 	xfs_fileoff_t		orig_offset = ap->offset;
79 	xfs_rtxnum_t		rtx;
80 	xfs_rtxlen_t		prod = 0;  /* product factor for allocators */
81 	xfs_extlen_t		mod = 0;   /* product factor for allocators */
82 	xfs_rtxlen_t		ralen = 0; /* realtime allocation length */
83 	xfs_extlen_t		align;     /* minimum allocation alignment */
84 	xfs_extlen_t		orig_length = ap->length;
85 	xfs_extlen_t		minlen = mp->m_sb.sb_rextsize;
86 	xfs_rtxlen_t		raminlen;
87 	bool			rtlocked = false;
88 	bool			ignore_locality = false;
89 	int			error;
90 
91 	align = xfs_get_extsz_hint(ap->ip);
92 retry:
93 	prod = xfs_extlen_to_rtxlen(mp, align);
94 	error = xfs_bmap_extsize_align(mp, &ap->got, &ap->prev,
95 					align, 1, ap->eof, 0,
96 					ap->conv, &ap->offset, &ap->length);
97 	if (error)
98 		return error;
99 	ASSERT(ap->length);
100 	ASSERT(xfs_extlen_to_rtxmod(mp, ap->length) == 0);
101 
102 	/*
103 	 * If we shifted the file offset downward to satisfy an extent size
104 	 * hint, increase minlen by that amount so that the allocator won't
105 	 * give us an allocation that's too short to cover at least one of the
106 	 * blocks that the caller asked for.
107 	 */
108 	if (ap->offset != orig_offset)
109 		minlen += orig_offset - ap->offset;
110 
111 	/*
112 	 * If the offset & length are not perfectly aligned
113 	 * then kill prod, it will just get us in trouble.
114 	 */
115 	div_u64_rem(ap->offset, align, &mod);
116 	if (mod || ap->length % align)
117 		prod = 1;
118 	/*
119 	 * Set ralen to be the actual requested length in rtextents.
120 	 *
121 	 * If the old value was close enough to XFS_BMBT_MAX_EXTLEN that
122 	 * we rounded up to it, cut it back so it's valid again.
123 	 * Note that if it's a really large request (bigger than
124 	 * XFS_BMBT_MAX_EXTLEN), we don't hear about that number, and can't
125 	 * adjust the starting point to match it.
126 	 */
127 	ralen = xfs_extlen_to_rtxlen(mp, min(ap->length, XFS_MAX_BMBT_EXTLEN));
128 
129 	/*
130 	 * Lock out modifications to both the RT bitmap and summary inodes
131 	 */
132 	if (!rtlocked) {
133 		xfs_ilock(mp->m_rbmip, XFS_ILOCK_EXCL|XFS_ILOCK_RTBITMAP);
134 		xfs_trans_ijoin(ap->tp, mp->m_rbmip, XFS_ILOCK_EXCL);
135 		xfs_ilock(mp->m_rsumip, XFS_ILOCK_EXCL|XFS_ILOCK_RTSUM);
136 		xfs_trans_ijoin(ap->tp, mp->m_rsumip, XFS_ILOCK_EXCL);
137 		rtlocked = true;
138 	}
139 
140 	/*
141 	 * If it's an allocation to an empty file at offset 0,
142 	 * pick an extent that will space things out in the rt area.
143 	 */
144 	if (ap->eof && ap->offset == 0) {
145 		error = xfs_rtpick_extent(mp, ap->tp, ralen, &rtx);
146 		if (error)
147 			return error;
148 		ap->blkno = xfs_rtx_to_rtb(mp, rtx);
149 	} else {
150 		ap->blkno = 0;
151 	}
152 
153 	xfs_bmap_adjacent(ap);
154 
155 	/*
156 	 * Realtime allocation, done through xfs_rtallocate_extent.
157 	 */
158 	if (ignore_locality)
159 		rtx = 0;
160 	else
161 		rtx = xfs_rtb_to_rtx(mp, ap->blkno);
162 	raminlen = max_t(xfs_rtxlen_t, 1, xfs_extlen_to_rtxlen(mp, minlen));
163 	error = xfs_rtallocate_extent(ap->tp, rtx, raminlen, ralen, &ralen,
164 			ap->wasdel, prod, &rtx);
165 	if (error)
166 		return error;
167 
168 	if (rtx != NULLRTEXTNO) {
169 		ap->blkno = xfs_rtx_to_rtb(mp, rtx);
170 		ap->length = xfs_rtxlen_to_extlen(mp, ralen);
171 		ap->ip->i_nblocks += ap->length;
172 		xfs_trans_log_inode(ap->tp, ap->ip, XFS_ILOG_CORE);
173 		if (ap->wasdel)
174 			ap->ip->i_delayed_blks -= ap->length;
175 		/*
176 		 * Adjust the disk quota also. This was reserved
177 		 * earlier.
178 		 */
179 		xfs_trans_mod_dquot_byino(ap->tp, ap->ip,
180 			ap->wasdel ? XFS_TRANS_DQ_DELRTBCOUNT :
181 					XFS_TRANS_DQ_RTBCOUNT, ap->length);
182 		return 0;
183 	}
184 
185 	if (align > mp->m_sb.sb_rextsize) {
186 		/*
187 		 * We previously enlarged the request length to try to satisfy
188 		 * an extent size hint.  The allocator didn't return anything,
189 		 * so reset the parameters to the original values and try again
190 		 * without alignment criteria.
191 		 */
192 		ap->offset = orig_offset;
193 		ap->length = orig_length;
194 		minlen = align = mp->m_sb.sb_rextsize;
195 		goto retry;
196 	}
197 
198 	if (!ignore_locality && ap->blkno != 0) {
199 		/*
200 		 * If we can't allocate near a specific rt extent, try again
201 		 * without locality criteria.
202 		 */
203 		ignore_locality = true;
204 		goto retry;
205 	}
206 
207 	ap->blkno = NULLFSBLOCK;
208 	ap->length = 0;
209 	return 0;
210 }
211 #endif /* CONFIG_XFS_RT */
212 
213 /*
214  * Extent tree block counting routines.
215  */
216 
217 /*
218  * Count leaf blocks given a range of extent records.  Delayed allocation
219  * extents are not counted towards the totals.
220  */
221 xfs_extnum_t
222 xfs_bmap_count_leaves(
223 	struct xfs_ifork	*ifp,
224 	xfs_filblks_t		*count)
225 {
226 	struct xfs_iext_cursor	icur;
227 	struct xfs_bmbt_irec	got;
228 	xfs_extnum_t		numrecs = 0;
229 
230 	for_each_xfs_iext(ifp, &icur, &got) {
231 		if (!isnullstartblock(got.br_startblock)) {
232 			*count += got.br_blockcount;
233 			numrecs++;
234 		}
235 	}
236 
237 	return numrecs;
238 }
239 
240 /*
241  * Count fsblocks of the given fork.  Delayed allocation extents are
242  * not counted towards the totals.
243  */
244 int
245 xfs_bmap_count_blocks(
246 	struct xfs_trans	*tp,
247 	struct xfs_inode	*ip,
248 	int			whichfork,
249 	xfs_extnum_t		*nextents,
250 	xfs_filblks_t		*count)
251 {
252 	struct xfs_mount	*mp = ip->i_mount;
253 	struct xfs_ifork	*ifp = xfs_ifork_ptr(ip, whichfork);
254 	struct xfs_btree_cur	*cur;
255 	xfs_extlen_t		btblocks = 0;
256 	int			error;
257 
258 	*nextents = 0;
259 	*count = 0;
260 
261 	if (!ifp)
262 		return 0;
263 
264 	switch (ifp->if_format) {
265 	case XFS_DINODE_FMT_BTREE:
266 		error = xfs_iread_extents(tp, ip, whichfork);
267 		if (error)
268 			return error;
269 
270 		cur = xfs_bmbt_init_cursor(mp, tp, ip, whichfork);
271 		error = xfs_btree_count_blocks(cur, &btblocks);
272 		xfs_btree_del_cursor(cur, error);
273 		if (error)
274 			return error;
275 
276 		/*
277 		 * xfs_btree_count_blocks includes the root block contained in
278 		 * the inode fork in @btblocks, so subtract one because we're
279 		 * only interested in allocated disk blocks.
280 		 */
281 		*count += btblocks - 1;
282 
283 		fallthrough;
284 	case XFS_DINODE_FMT_EXTENTS:
285 		*nextents = xfs_bmap_count_leaves(ifp, count);
286 		break;
287 	}
288 
289 	return 0;
290 }
291 
292 static int
293 xfs_getbmap_report_one(
294 	struct xfs_inode	*ip,
295 	struct getbmapx		*bmv,
296 	struct kgetbmap		*out,
297 	int64_t			bmv_end,
298 	struct xfs_bmbt_irec	*got)
299 {
300 	struct kgetbmap		*p = out + bmv->bmv_entries;
301 	bool			shared = false;
302 	int			error;
303 
304 	error = xfs_reflink_trim_around_shared(ip, got, &shared);
305 	if (error)
306 		return error;
307 
308 	if (isnullstartblock(got->br_startblock) ||
309 	    got->br_startblock == DELAYSTARTBLOCK) {
310 		/*
311 		 * Take the flush completion as being a point-in-time snapshot
312 		 * where there are no delalloc extents, and if any new ones
313 		 * have been created racily, just skip them as being 'after'
314 		 * the flush and so don't get reported.
315 		 */
316 		if (!(bmv->bmv_iflags & BMV_IF_DELALLOC))
317 			return 0;
318 
319 		p->bmv_oflags |= BMV_OF_DELALLOC;
320 		p->bmv_block = -2;
321 	} else {
322 		p->bmv_block = xfs_fsb_to_db(ip, got->br_startblock);
323 	}
324 
325 	if (got->br_state == XFS_EXT_UNWRITTEN &&
326 	    (bmv->bmv_iflags & BMV_IF_PREALLOC))
327 		p->bmv_oflags |= BMV_OF_PREALLOC;
328 
329 	if (shared)
330 		p->bmv_oflags |= BMV_OF_SHARED;
331 
332 	p->bmv_offset = XFS_FSB_TO_BB(ip->i_mount, got->br_startoff);
333 	p->bmv_length = XFS_FSB_TO_BB(ip->i_mount, got->br_blockcount);
334 
335 	bmv->bmv_offset = p->bmv_offset + p->bmv_length;
336 	bmv->bmv_length = max(0LL, bmv_end - bmv->bmv_offset);
337 	bmv->bmv_entries++;
338 	return 0;
339 }
340 
341 static void
342 xfs_getbmap_report_hole(
343 	struct xfs_inode	*ip,
344 	struct getbmapx		*bmv,
345 	struct kgetbmap		*out,
346 	int64_t			bmv_end,
347 	xfs_fileoff_t		bno,
348 	xfs_fileoff_t		end)
349 {
350 	struct kgetbmap		*p = out + bmv->bmv_entries;
351 
352 	if (bmv->bmv_iflags & BMV_IF_NO_HOLES)
353 		return;
354 
355 	p->bmv_block = -1;
356 	p->bmv_offset = XFS_FSB_TO_BB(ip->i_mount, bno);
357 	p->bmv_length = XFS_FSB_TO_BB(ip->i_mount, end - bno);
358 
359 	bmv->bmv_offset = p->bmv_offset + p->bmv_length;
360 	bmv->bmv_length = max(0LL, bmv_end - bmv->bmv_offset);
361 	bmv->bmv_entries++;
362 }
363 
364 static inline bool
365 xfs_getbmap_full(
366 	struct getbmapx		*bmv)
367 {
368 	return bmv->bmv_length == 0 || bmv->bmv_entries >= bmv->bmv_count - 1;
369 }
370 
371 static bool
372 xfs_getbmap_next_rec(
373 	struct xfs_bmbt_irec	*rec,
374 	xfs_fileoff_t		total_end)
375 {
376 	xfs_fileoff_t		end = rec->br_startoff + rec->br_blockcount;
377 
378 	if (end == total_end)
379 		return false;
380 
381 	rec->br_startoff += rec->br_blockcount;
382 	if (!isnullstartblock(rec->br_startblock) &&
383 	    rec->br_startblock != DELAYSTARTBLOCK)
384 		rec->br_startblock += rec->br_blockcount;
385 	rec->br_blockcount = total_end - end;
386 	return true;
387 }
388 
389 /*
390  * Get inode's extents as described in bmv, and format for output.
391  * Calls formatter to fill the user's buffer until all extents
392  * are mapped, until the passed-in bmv->bmv_count slots have
393  * been filled, or until the formatter short-circuits the loop,
394  * if it is tracking filled-in extents on its own.
395  */
396 int						/* error code */
397 xfs_getbmap(
398 	struct xfs_inode	*ip,
399 	struct getbmapx		*bmv,		/* user bmap structure */
400 	struct kgetbmap		*out)
401 {
402 	struct xfs_mount	*mp = ip->i_mount;
403 	int			iflags = bmv->bmv_iflags;
404 	int			whichfork, lock, error = 0;
405 	int64_t			bmv_end, max_len;
406 	xfs_fileoff_t		bno, first_bno;
407 	struct xfs_ifork	*ifp;
408 	struct xfs_bmbt_irec	got, rec;
409 	xfs_filblks_t		len;
410 	struct xfs_iext_cursor	icur;
411 
412 	if (bmv->bmv_iflags & ~BMV_IF_VALID)
413 		return -EINVAL;
414 #ifndef DEBUG
415 	/* Only allow CoW fork queries if we're debugging. */
416 	if (iflags & BMV_IF_COWFORK)
417 		return -EINVAL;
418 #endif
419 	if ((iflags & BMV_IF_ATTRFORK) && (iflags & BMV_IF_COWFORK))
420 		return -EINVAL;
421 
422 	if (bmv->bmv_length < -1)
423 		return -EINVAL;
424 	bmv->bmv_entries = 0;
425 	if (bmv->bmv_length == 0)
426 		return 0;
427 
428 	if (iflags & BMV_IF_ATTRFORK)
429 		whichfork = XFS_ATTR_FORK;
430 	else if (iflags & BMV_IF_COWFORK)
431 		whichfork = XFS_COW_FORK;
432 	else
433 		whichfork = XFS_DATA_FORK;
434 
435 	xfs_ilock(ip, XFS_IOLOCK_SHARED);
436 	switch (whichfork) {
437 	case XFS_ATTR_FORK:
438 		lock = xfs_ilock_attr_map_shared(ip);
439 		if (!xfs_inode_has_attr_fork(ip))
440 			goto out_unlock_ilock;
441 
442 		max_len = 1LL << 32;
443 		break;
444 	case XFS_COW_FORK:
445 		lock = XFS_ILOCK_SHARED;
446 		xfs_ilock(ip, lock);
447 
448 		/* No CoW fork? Just return */
449 		if (!xfs_ifork_ptr(ip, whichfork))
450 			goto out_unlock_ilock;
451 
452 		if (xfs_get_cowextsz_hint(ip))
453 			max_len = mp->m_super->s_maxbytes;
454 		else
455 			max_len = XFS_ISIZE(ip);
456 		break;
457 	case XFS_DATA_FORK:
458 		if (!(iflags & BMV_IF_DELALLOC) &&
459 		    (ip->i_delayed_blks || XFS_ISIZE(ip) > ip->i_disk_size)) {
460 			error = filemap_write_and_wait(VFS_I(ip)->i_mapping);
461 			if (error)
462 				goto out_unlock_iolock;
463 
464 			/*
465 			 * Even after flushing the inode, there can still be
466 			 * delalloc blocks on the inode beyond EOF due to
467 			 * speculative preallocation.  These are not removed
468 			 * until the release function is called or the inode
469 			 * is inactivated.  Hence we cannot assert here that
470 			 * ip->i_delayed_blks == 0.
471 			 */
472 		}
473 
474 		if (xfs_get_extsz_hint(ip) ||
475 		    (ip->i_diflags &
476 		     (XFS_DIFLAG_PREALLOC | XFS_DIFLAG_APPEND)))
477 			max_len = mp->m_super->s_maxbytes;
478 		else
479 			max_len = XFS_ISIZE(ip);
480 
481 		lock = xfs_ilock_data_map_shared(ip);
482 		break;
483 	}
484 
485 	ifp = xfs_ifork_ptr(ip, whichfork);
486 
487 	switch (ifp->if_format) {
488 	case XFS_DINODE_FMT_EXTENTS:
489 	case XFS_DINODE_FMT_BTREE:
490 		break;
491 	case XFS_DINODE_FMT_LOCAL:
492 		/* Local format inode forks report no extents. */
493 		goto out_unlock_ilock;
494 	default:
495 		error = -EINVAL;
496 		goto out_unlock_ilock;
497 	}
498 
499 	if (bmv->bmv_length == -1) {
500 		max_len = XFS_FSB_TO_BB(mp, XFS_B_TO_FSB(mp, max_len));
501 		bmv->bmv_length = max(0LL, max_len - bmv->bmv_offset);
502 	}
503 
504 	bmv_end = bmv->bmv_offset + bmv->bmv_length;
505 
506 	first_bno = bno = XFS_BB_TO_FSBT(mp, bmv->bmv_offset);
507 	len = XFS_BB_TO_FSB(mp, bmv->bmv_length);
508 
509 	error = xfs_iread_extents(NULL, ip, whichfork);
510 	if (error)
511 		goto out_unlock_ilock;
512 
513 	if (!xfs_iext_lookup_extent(ip, ifp, bno, &icur, &got)) {
514 		/*
515 		 * Report a whole-file hole if the delalloc flag is set to
516 		 * stay compatible with the old implementation.
517 		 */
518 		if (iflags & BMV_IF_DELALLOC)
519 			xfs_getbmap_report_hole(ip, bmv, out, bmv_end, bno,
520 					XFS_B_TO_FSB(mp, XFS_ISIZE(ip)));
521 		goto out_unlock_ilock;
522 	}
523 
524 	while (!xfs_getbmap_full(bmv)) {
525 		xfs_trim_extent(&got, first_bno, len);
526 
527 		/*
528 		 * Report an entry for a hole if this extent doesn't directly
529 		 * follow the previous one.
530 		 */
531 		if (got.br_startoff > bno) {
532 			xfs_getbmap_report_hole(ip, bmv, out, bmv_end, bno,
533 					got.br_startoff);
534 			if (xfs_getbmap_full(bmv))
535 				break;
536 		}
537 
538 		/*
539 		 * In order to report shared extents accurately, we report each
540 		 * distinct shared / unshared part of a single bmbt record with
541 		 * an individual getbmapx record.
542 		 */
543 		bno = got.br_startoff + got.br_blockcount;
544 		rec = got;
545 		do {
546 			error = xfs_getbmap_report_one(ip, bmv, out, bmv_end,
547 					&rec);
548 			if (error || xfs_getbmap_full(bmv))
549 				goto out_unlock_ilock;
550 		} while (xfs_getbmap_next_rec(&rec, bno));
551 
552 		if (!xfs_iext_next_extent(ifp, &icur, &got)) {
553 			xfs_fileoff_t	end = XFS_B_TO_FSB(mp, XFS_ISIZE(ip));
554 
555 			if (bmv->bmv_entries > 0)
556 				out[bmv->bmv_entries - 1].bmv_oflags |=
557 								BMV_OF_LAST;
558 
559 			if (whichfork != XFS_ATTR_FORK && bno < end &&
560 			    !xfs_getbmap_full(bmv)) {
561 				xfs_getbmap_report_hole(ip, bmv, out, bmv_end,
562 						bno, end);
563 			}
564 			break;
565 		}
566 
567 		if (bno >= first_bno + len)
568 			break;
569 	}
570 
571 out_unlock_ilock:
572 	xfs_iunlock(ip, lock);
573 out_unlock_iolock:
574 	xfs_iunlock(ip, XFS_IOLOCK_SHARED);
575 	return error;
576 }
577 
578 /*
579  * Dead simple method of punching delalyed allocation blocks from a range in
580  * the inode.  This will always punch out both the start and end blocks, even
581  * if the ranges only partially overlap them, so it is up to the caller to
582  * ensure that partial blocks are not passed in.
583  */
584 int
585 xfs_bmap_punch_delalloc_range(
586 	struct xfs_inode	*ip,
587 	xfs_off_t		start_byte,
588 	xfs_off_t		end_byte)
589 {
590 	struct xfs_mount	*mp = ip->i_mount;
591 	struct xfs_ifork	*ifp = &ip->i_df;
592 	xfs_fileoff_t		start_fsb = XFS_B_TO_FSBT(mp, start_byte);
593 	xfs_fileoff_t		end_fsb = XFS_B_TO_FSB(mp, end_byte);
594 	struct xfs_bmbt_irec	got, del;
595 	struct xfs_iext_cursor	icur;
596 	int			error = 0;
597 
598 	ASSERT(!xfs_need_iread_extents(ifp));
599 
600 	xfs_ilock(ip, XFS_ILOCK_EXCL);
601 	if (!xfs_iext_lookup_extent_before(ip, ifp, &end_fsb, &icur, &got))
602 		goto out_unlock;
603 
604 	while (got.br_startoff + got.br_blockcount > start_fsb) {
605 		del = got;
606 		xfs_trim_extent(&del, start_fsb, end_fsb - start_fsb);
607 
608 		/*
609 		 * A delete can push the cursor forward. Step back to the
610 		 * previous extent on non-delalloc or extents outside the
611 		 * target range.
612 		 */
613 		if (!del.br_blockcount ||
614 		    !isnullstartblock(del.br_startblock)) {
615 			if (!xfs_iext_prev_extent(ifp, &icur, &got))
616 				break;
617 			continue;
618 		}
619 
620 		error = xfs_bmap_del_extent_delay(ip, XFS_DATA_FORK, &icur,
621 						  &got, &del);
622 		if (error || !xfs_iext_get_extent(ifp, &icur, &got))
623 			break;
624 	}
625 
626 out_unlock:
627 	xfs_iunlock(ip, XFS_ILOCK_EXCL);
628 	return error;
629 }
630 
631 /*
632  * Test whether it is appropriate to check an inode for and free post EOF
633  * blocks. The 'force' parameter determines whether we should also consider
634  * regular files that are marked preallocated or append-only.
635  */
636 bool
637 xfs_can_free_eofblocks(
638 	struct xfs_inode	*ip,
639 	bool			force)
640 {
641 	struct xfs_bmbt_irec	imap;
642 	struct xfs_mount	*mp = ip->i_mount;
643 	xfs_fileoff_t		end_fsb;
644 	xfs_fileoff_t		last_fsb;
645 	int			nimaps = 1;
646 	int			error;
647 
648 	/*
649 	 * Caller must either hold the exclusive io lock; or be inactivating
650 	 * the inode, which guarantees there are no other users of the inode.
651 	 */
652 	ASSERT(xfs_isilocked(ip, XFS_IOLOCK_EXCL) ||
653 	       (VFS_I(ip)->i_state & I_FREEING));
654 
655 	/* prealloc/delalloc exists only on regular files */
656 	if (!S_ISREG(VFS_I(ip)->i_mode))
657 		return false;
658 
659 	/*
660 	 * Zero sized files with no cached pages and delalloc blocks will not
661 	 * have speculative prealloc/delalloc blocks to remove.
662 	 */
663 	if (VFS_I(ip)->i_size == 0 &&
664 	    VFS_I(ip)->i_mapping->nrpages == 0 &&
665 	    ip->i_delayed_blks == 0)
666 		return false;
667 
668 	/* If we haven't read in the extent list, then don't do it now. */
669 	if (xfs_need_iread_extents(&ip->i_df))
670 		return false;
671 
672 	/*
673 	 * Do not free real preallocated or append-only files unless the file
674 	 * has delalloc blocks and we are forced to remove them.
675 	 */
676 	if (ip->i_diflags & (XFS_DIFLAG_PREALLOC | XFS_DIFLAG_APPEND))
677 		if (!force || ip->i_delayed_blks == 0)
678 			return false;
679 
680 	/*
681 	 * Do not try to free post-EOF blocks if EOF is beyond the end of the
682 	 * range supported by the page cache, because the truncation will loop
683 	 * forever.
684 	 */
685 	end_fsb = XFS_B_TO_FSB(mp, (xfs_ufsize_t)XFS_ISIZE(ip));
686 	if (XFS_IS_REALTIME_INODE(ip) && mp->m_sb.sb_rextsize > 1)
687 		end_fsb = xfs_rtb_roundup_rtx(mp, end_fsb);
688 	last_fsb = XFS_B_TO_FSB(mp, mp->m_super->s_maxbytes);
689 	if (last_fsb <= end_fsb)
690 		return false;
691 
692 	/*
693 	 * Look up the mapping for the first block past EOF.  If we can't find
694 	 * it, there's nothing to free.
695 	 */
696 	xfs_ilock(ip, XFS_ILOCK_SHARED);
697 	error = xfs_bmapi_read(ip, end_fsb, last_fsb - end_fsb, &imap, &nimaps,
698 			0);
699 	xfs_iunlock(ip, XFS_ILOCK_SHARED);
700 	if (error || nimaps == 0)
701 		return false;
702 
703 	/*
704 	 * If there's a real mapping there or there are delayed allocation
705 	 * reservations, then we have post-EOF blocks to try to free.
706 	 */
707 	return imap.br_startblock != HOLESTARTBLOCK || ip->i_delayed_blks;
708 }
709 
710 /*
711  * This is called to free any blocks beyond eof. The caller must hold
712  * IOLOCK_EXCL unless we are in the inode reclaim path and have the only
713  * reference to the inode.
714  */
715 int
716 xfs_free_eofblocks(
717 	struct xfs_inode	*ip)
718 {
719 	struct xfs_trans	*tp;
720 	struct xfs_mount	*mp = ip->i_mount;
721 	int			error;
722 
723 	/* Attach the dquots to the inode up front. */
724 	error = xfs_qm_dqattach(ip);
725 	if (error)
726 		return error;
727 
728 	/* Wait on dio to ensure i_size has settled. */
729 	inode_dio_wait(VFS_I(ip));
730 
731 	error = xfs_trans_alloc(mp, &M_RES(mp)->tr_itruncate, 0, 0, 0, &tp);
732 	if (error) {
733 		ASSERT(xfs_is_shutdown(mp));
734 		return error;
735 	}
736 
737 	xfs_ilock(ip, XFS_ILOCK_EXCL);
738 	xfs_trans_ijoin(tp, ip, 0);
739 
740 	/*
741 	 * Do not update the on-disk file size.  If we update the on-disk file
742 	 * size and then the system crashes before the contents of the file are
743 	 * flushed to disk then the files may be full of holes (ie NULL files
744 	 * bug).
745 	 */
746 	error = xfs_itruncate_extents_flags(&tp, ip, XFS_DATA_FORK,
747 				XFS_ISIZE(ip), XFS_BMAPI_NODISCARD);
748 	if (error)
749 		goto err_cancel;
750 
751 	error = xfs_trans_commit(tp);
752 	if (error)
753 		goto out_unlock;
754 
755 	xfs_inode_clear_eofblocks_tag(ip);
756 	goto out_unlock;
757 
758 err_cancel:
759 	/*
760 	 * If we get an error at this point we simply don't
761 	 * bother truncating the file.
762 	 */
763 	xfs_trans_cancel(tp);
764 out_unlock:
765 	xfs_iunlock(ip, XFS_ILOCK_EXCL);
766 	return error;
767 }
768 
769 int
770 xfs_alloc_file_space(
771 	struct xfs_inode	*ip,
772 	xfs_off_t		offset,
773 	xfs_off_t		len)
774 {
775 	xfs_mount_t		*mp = ip->i_mount;
776 	xfs_off_t		count;
777 	xfs_filblks_t		allocatesize_fsb;
778 	xfs_extlen_t		extsz, temp;
779 	xfs_fileoff_t		startoffset_fsb;
780 	xfs_fileoff_t		endoffset_fsb;
781 	int			rt;
782 	xfs_trans_t		*tp;
783 	xfs_bmbt_irec_t		imaps[1], *imapp;
784 	int			error;
785 
786 	trace_xfs_alloc_file_space(ip);
787 
788 	if (xfs_is_shutdown(mp))
789 		return -EIO;
790 
791 	error = xfs_qm_dqattach(ip);
792 	if (error)
793 		return error;
794 
795 	if (len <= 0)
796 		return -EINVAL;
797 
798 	rt = XFS_IS_REALTIME_INODE(ip);
799 	extsz = xfs_get_extsz_hint(ip);
800 
801 	count = len;
802 	imapp = &imaps[0];
803 	startoffset_fsb	= XFS_B_TO_FSBT(mp, offset);
804 	endoffset_fsb = XFS_B_TO_FSB(mp, offset + count);
805 	allocatesize_fsb = endoffset_fsb - startoffset_fsb;
806 
807 	/*
808 	 * Allocate file space until done or until there is an error
809 	 */
810 	while (allocatesize_fsb && !error) {
811 		xfs_fileoff_t	s, e;
812 		unsigned int	dblocks, rblocks, resblks;
813 		int		nimaps = 1;
814 
815 		/*
816 		 * Determine space reservations for data/realtime.
817 		 */
818 		if (unlikely(extsz)) {
819 			s = startoffset_fsb;
820 			do_div(s, extsz);
821 			s *= extsz;
822 			e = startoffset_fsb + allocatesize_fsb;
823 			div_u64_rem(startoffset_fsb, extsz, &temp);
824 			if (temp)
825 				e += temp;
826 			div_u64_rem(e, extsz, &temp);
827 			if (temp)
828 				e += extsz - temp;
829 		} else {
830 			s = 0;
831 			e = allocatesize_fsb;
832 		}
833 
834 		/*
835 		 * The transaction reservation is limited to a 32-bit block
836 		 * count, hence we need to limit the number of blocks we are
837 		 * trying to reserve to avoid an overflow. We can't allocate
838 		 * more than @nimaps extents, and an extent is limited on disk
839 		 * to XFS_BMBT_MAX_EXTLEN (21 bits), so use that to enforce the
840 		 * limit.
841 		 */
842 		resblks = min_t(xfs_fileoff_t, (e - s),
843 				(XFS_MAX_BMBT_EXTLEN * nimaps));
844 		if (unlikely(rt)) {
845 			dblocks = XFS_DIOSTRAT_SPACE_RES(mp, 0);
846 			rblocks = resblks;
847 		} else {
848 			dblocks = XFS_DIOSTRAT_SPACE_RES(mp, resblks);
849 			rblocks = 0;
850 		}
851 
852 		error = xfs_trans_alloc_inode(ip, &M_RES(mp)->tr_write,
853 				dblocks, rblocks, false, &tp);
854 		if (error)
855 			break;
856 
857 		error = xfs_iext_count_may_overflow(ip, XFS_DATA_FORK,
858 				XFS_IEXT_ADD_NOSPLIT_CNT);
859 		if (error == -EFBIG)
860 			error = xfs_iext_count_upgrade(tp, ip,
861 					XFS_IEXT_ADD_NOSPLIT_CNT);
862 		if (error)
863 			goto error;
864 
865 		error = xfs_bmapi_write(tp, ip, startoffset_fsb,
866 				allocatesize_fsb, XFS_BMAPI_PREALLOC, 0, imapp,
867 				&nimaps);
868 		if (error)
869 			goto error;
870 
871 		ip->i_diflags |= XFS_DIFLAG_PREALLOC;
872 		xfs_trans_log_inode(tp, ip, XFS_ILOG_CORE);
873 
874 		error = xfs_trans_commit(tp);
875 		xfs_iunlock(ip, XFS_ILOCK_EXCL);
876 		if (error)
877 			break;
878 
879 		/*
880 		 * If the allocator cannot find a single free extent large
881 		 * enough to cover the start block of the requested range,
882 		 * xfs_bmapi_write will return 0 but leave *nimaps set to 0.
883 		 *
884 		 * In that case we simply need to keep looping with the same
885 		 * startoffset_fsb so that one of the following allocations
886 		 * will eventually reach the requested range.
887 		 */
888 		if (nimaps) {
889 			startoffset_fsb += imapp->br_blockcount;
890 			allocatesize_fsb -= imapp->br_blockcount;
891 		}
892 	}
893 
894 	return error;
895 
896 error:
897 	xfs_trans_cancel(tp);
898 	xfs_iunlock(ip, XFS_ILOCK_EXCL);
899 	return error;
900 }
901 
902 static int
903 xfs_unmap_extent(
904 	struct xfs_inode	*ip,
905 	xfs_fileoff_t		startoffset_fsb,
906 	xfs_filblks_t		len_fsb,
907 	int			*done)
908 {
909 	struct xfs_mount	*mp = ip->i_mount;
910 	struct xfs_trans	*tp;
911 	uint			resblks = XFS_DIOSTRAT_SPACE_RES(mp, 0);
912 	int			error;
913 
914 	error = xfs_trans_alloc_inode(ip, &M_RES(mp)->tr_write, resblks, 0,
915 			false, &tp);
916 	if (error)
917 		return error;
918 
919 	error = xfs_iext_count_may_overflow(ip, XFS_DATA_FORK,
920 			XFS_IEXT_PUNCH_HOLE_CNT);
921 	if (error == -EFBIG)
922 		error = xfs_iext_count_upgrade(tp, ip, XFS_IEXT_PUNCH_HOLE_CNT);
923 	if (error)
924 		goto out_trans_cancel;
925 
926 	error = xfs_bunmapi(tp, ip, startoffset_fsb, len_fsb, 0, 2, done);
927 	if (error)
928 		goto out_trans_cancel;
929 
930 	error = xfs_trans_commit(tp);
931 out_unlock:
932 	xfs_iunlock(ip, XFS_ILOCK_EXCL);
933 	return error;
934 
935 out_trans_cancel:
936 	xfs_trans_cancel(tp);
937 	goto out_unlock;
938 }
939 
940 /* Caller must first wait for the completion of any pending DIOs if required. */
941 int
942 xfs_flush_unmap_range(
943 	struct xfs_inode	*ip,
944 	xfs_off_t		offset,
945 	xfs_off_t		len)
946 {
947 	struct xfs_mount	*mp = ip->i_mount;
948 	struct inode		*inode = VFS_I(ip);
949 	xfs_off_t		rounding, start, end;
950 	int			error;
951 
952 	rounding = max_t(xfs_off_t, mp->m_sb.sb_blocksize, PAGE_SIZE);
953 	start = round_down(offset, rounding);
954 	end = round_up(offset + len, rounding) - 1;
955 
956 	error = filemap_write_and_wait_range(inode->i_mapping, start, end);
957 	if (error)
958 		return error;
959 	truncate_pagecache_range(inode, start, end);
960 	return 0;
961 }
962 
963 int
964 xfs_free_file_space(
965 	struct xfs_inode	*ip,
966 	xfs_off_t		offset,
967 	xfs_off_t		len)
968 {
969 	struct xfs_mount	*mp = ip->i_mount;
970 	xfs_fileoff_t		startoffset_fsb;
971 	xfs_fileoff_t		endoffset_fsb;
972 	int			done = 0, error;
973 
974 	trace_xfs_free_file_space(ip);
975 
976 	error = xfs_qm_dqattach(ip);
977 	if (error)
978 		return error;
979 
980 	if (len <= 0)	/* if nothing being freed */
981 		return 0;
982 
983 	startoffset_fsb = XFS_B_TO_FSB(mp, offset);
984 	endoffset_fsb = XFS_B_TO_FSBT(mp, offset + len);
985 
986 	/* We can only free complete realtime extents. */
987 	if (XFS_IS_REALTIME_INODE(ip) && mp->m_sb.sb_rextsize > 1) {
988 		startoffset_fsb = xfs_rtb_roundup_rtx(mp, startoffset_fsb);
989 		endoffset_fsb = xfs_rtb_rounddown_rtx(mp, endoffset_fsb);
990 	}
991 
992 	/*
993 	 * Need to zero the stuff we're not freeing, on disk.
994 	 */
995 	if (endoffset_fsb > startoffset_fsb) {
996 		while (!done) {
997 			error = xfs_unmap_extent(ip, startoffset_fsb,
998 					endoffset_fsb - startoffset_fsb, &done);
999 			if (error)
1000 				return error;
1001 		}
1002 	}
1003 
1004 	/*
1005 	 * Now that we've unmap all full blocks we'll have to zero out any
1006 	 * partial block at the beginning and/or end.  xfs_zero_range is smart
1007 	 * enough to skip any holes, including those we just created, but we
1008 	 * must take care not to zero beyond EOF and enlarge i_size.
1009 	 */
1010 	if (offset >= XFS_ISIZE(ip))
1011 		return 0;
1012 	if (offset + len > XFS_ISIZE(ip))
1013 		len = XFS_ISIZE(ip) - offset;
1014 	error = xfs_zero_range(ip, offset, len, NULL);
1015 	if (error)
1016 		return error;
1017 
1018 	/*
1019 	 * If we zeroed right up to EOF and EOF straddles a page boundary we
1020 	 * must make sure that the post-EOF area is also zeroed because the
1021 	 * page could be mmap'd and xfs_zero_range doesn't do that for us.
1022 	 * Writeback of the eof page will do this, albeit clumsily.
1023 	 */
1024 	if (offset + len >= XFS_ISIZE(ip) && offset_in_page(offset + len) > 0) {
1025 		error = filemap_write_and_wait_range(VFS_I(ip)->i_mapping,
1026 				round_down(offset + len, PAGE_SIZE), LLONG_MAX);
1027 	}
1028 
1029 	return error;
1030 }
1031 
1032 static int
1033 xfs_prepare_shift(
1034 	struct xfs_inode	*ip,
1035 	loff_t			offset)
1036 {
1037 	struct xfs_mount	*mp = ip->i_mount;
1038 	int			error;
1039 
1040 	/*
1041 	 * Trim eofblocks to avoid shifting uninitialized post-eof preallocation
1042 	 * into the accessible region of the file.
1043 	 */
1044 	if (xfs_can_free_eofblocks(ip, true)) {
1045 		error = xfs_free_eofblocks(ip);
1046 		if (error)
1047 			return error;
1048 	}
1049 
1050 	/*
1051 	 * Shift operations must stabilize the start block offset boundary along
1052 	 * with the full range of the operation. If we don't, a COW writeback
1053 	 * completion could race with an insert, front merge with the start
1054 	 * extent (after split) during the shift and corrupt the file. Start
1055 	 * with the block just prior to the start to stabilize the boundary.
1056 	 */
1057 	offset = round_down(offset, mp->m_sb.sb_blocksize);
1058 	if (offset)
1059 		offset -= mp->m_sb.sb_blocksize;
1060 
1061 	/*
1062 	 * Writeback and invalidate cache for the remainder of the file as we're
1063 	 * about to shift down every extent from offset to EOF.
1064 	 */
1065 	error = xfs_flush_unmap_range(ip, offset, XFS_ISIZE(ip));
1066 	if (error)
1067 		return error;
1068 
1069 	/*
1070 	 * Clean out anything hanging around in the cow fork now that
1071 	 * we've flushed all the dirty data out to disk to avoid having
1072 	 * CoW extents at the wrong offsets.
1073 	 */
1074 	if (xfs_inode_has_cow_data(ip)) {
1075 		error = xfs_reflink_cancel_cow_range(ip, offset, NULLFILEOFF,
1076 				true);
1077 		if (error)
1078 			return error;
1079 	}
1080 
1081 	return 0;
1082 }
1083 
1084 /*
1085  * xfs_collapse_file_space()
1086  *	This routine frees disk space and shift extent for the given file.
1087  *	The first thing we do is to free data blocks in the specified range
1088  *	by calling xfs_free_file_space(). It would also sync dirty data
1089  *	and invalidate page cache over the region on which collapse range
1090  *	is working. And Shift extent records to the left to cover a hole.
1091  * RETURNS:
1092  *	0 on success
1093  *	errno on error
1094  *
1095  */
1096 int
1097 xfs_collapse_file_space(
1098 	struct xfs_inode	*ip,
1099 	xfs_off_t		offset,
1100 	xfs_off_t		len)
1101 {
1102 	struct xfs_mount	*mp = ip->i_mount;
1103 	struct xfs_trans	*tp;
1104 	int			error;
1105 	xfs_fileoff_t		next_fsb = XFS_B_TO_FSB(mp, offset + len);
1106 	xfs_fileoff_t		shift_fsb = XFS_B_TO_FSB(mp, len);
1107 	bool			done = false;
1108 
1109 	ASSERT(xfs_isilocked(ip, XFS_IOLOCK_EXCL));
1110 	ASSERT(xfs_isilocked(ip, XFS_MMAPLOCK_EXCL));
1111 
1112 	trace_xfs_collapse_file_space(ip);
1113 
1114 	error = xfs_free_file_space(ip, offset, len);
1115 	if (error)
1116 		return error;
1117 
1118 	error = xfs_prepare_shift(ip, offset);
1119 	if (error)
1120 		return error;
1121 
1122 	error = xfs_trans_alloc(mp, &M_RES(mp)->tr_write, 0, 0, 0, &tp);
1123 	if (error)
1124 		return error;
1125 
1126 	xfs_ilock(ip, XFS_ILOCK_EXCL);
1127 	xfs_trans_ijoin(tp, ip, 0);
1128 
1129 	while (!done) {
1130 		error = xfs_bmap_collapse_extents(tp, ip, &next_fsb, shift_fsb,
1131 				&done);
1132 		if (error)
1133 			goto out_trans_cancel;
1134 		if (done)
1135 			break;
1136 
1137 		/* finish any deferred frees and roll the transaction */
1138 		error = xfs_defer_finish(&tp);
1139 		if (error)
1140 			goto out_trans_cancel;
1141 	}
1142 
1143 	error = xfs_trans_commit(tp);
1144 	xfs_iunlock(ip, XFS_ILOCK_EXCL);
1145 	return error;
1146 
1147 out_trans_cancel:
1148 	xfs_trans_cancel(tp);
1149 	xfs_iunlock(ip, XFS_ILOCK_EXCL);
1150 	return error;
1151 }
1152 
1153 /*
1154  * xfs_insert_file_space()
1155  *	This routine create hole space by shifting extents for the given file.
1156  *	The first thing we do is to sync dirty data and invalidate page cache
1157  *	over the region on which insert range is working. And split an extent
1158  *	to two extents at given offset by calling xfs_bmap_split_extent.
1159  *	And shift all extent records which are laying between [offset,
1160  *	last allocated extent] to the right to reserve hole range.
1161  * RETURNS:
1162  *	0 on success
1163  *	errno on error
1164  */
1165 int
1166 xfs_insert_file_space(
1167 	struct xfs_inode	*ip,
1168 	loff_t			offset,
1169 	loff_t			len)
1170 {
1171 	struct xfs_mount	*mp = ip->i_mount;
1172 	struct xfs_trans	*tp;
1173 	int			error;
1174 	xfs_fileoff_t		stop_fsb = XFS_B_TO_FSB(mp, offset);
1175 	xfs_fileoff_t		next_fsb = NULLFSBLOCK;
1176 	xfs_fileoff_t		shift_fsb = XFS_B_TO_FSB(mp, len);
1177 	bool			done = false;
1178 
1179 	ASSERT(xfs_isilocked(ip, XFS_IOLOCK_EXCL));
1180 	ASSERT(xfs_isilocked(ip, XFS_MMAPLOCK_EXCL));
1181 
1182 	trace_xfs_insert_file_space(ip);
1183 
1184 	error = xfs_bmap_can_insert_extents(ip, stop_fsb, shift_fsb);
1185 	if (error)
1186 		return error;
1187 
1188 	error = xfs_prepare_shift(ip, offset);
1189 	if (error)
1190 		return error;
1191 
1192 	error = xfs_trans_alloc(mp, &M_RES(mp)->tr_write,
1193 			XFS_DIOSTRAT_SPACE_RES(mp, 0), 0, 0, &tp);
1194 	if (error)
1195 		return error;
1196 
1197 	xfs_ilock(ip, XFS_ILOCK_EXCL);
1198 	xfs_trans_ijoin(tp, ip, 0);
1199 
1200 	error = xfs_iext_count_may_overflow(ip, XFS_DATA_FORK,
1201 			XFS_IEXT_PUNCH_HOLE_CNT);
1202 	if (error == -EFBIG)
1203 		error = xfs_iext_count_upgrade(tp, ip, XFS_IEXT_PUNCH_HOLE_CNT);
1204 	if (error)
1205 		goto out_trans_cancel;
1206 
1207 	/*
1208 	 * The extent shifting code works on extent granularity. So, if stop_fsb
1209 	 * is not the starting block of extent, we need to split the extent at
1210 	 * stop_fsb.
1211 	 */
1212 	error = xfs_bmap_split_extent(tp, ip, stop_fsb);
1213 	if (error)
1214 		goto out_trans_cancel;
1215 
1216 	do {
1217 		error = xfs_defer_finish(&tp);
1218 		if (error)
1219 			goto out_trans_cancel;
1220 
1221 		error = xfs_bmap_insert_extents(tp, ip, &next_fsb, shift_fsb,
1222 				&done, stop_fsb);
1223 		if (error)
1224 			goto out_trans_cancel;
1225 	} while (!done);
1226 
1227 	error = xfs_trans_commit(tp);
1228 	xfs_iunlock(ip, XFS_ILOCK_EXCL);
1229 	return error;
1230 
1231 out_trans_cancel:
1232 	xfs_trans_cancel(tp);
1233 	xfs_iunlock(ip, XFS_ILOCK_EXCL);
1234 	return error;
1235 }
1236 
1237 /*
1238  * We need to check that the format of the data fork in the temporary inode is
1239  * valid for the target inode before doing the swap. This is not a problem with
1240  * attr1 because of the fixed fork offset, but attr2 has a dynamically sized
1241  * data fork depending on the space the attribute fork is taking so we can get
1242  * invalid formats on the target inode.
1243  *
1244  * E.g. target has space for 7 extents in extent format, temp inode only has
1245  * space for 6.  If we defragment down to 7 extents, then the tmp format is a
1246  * btree, but when swapped it needs to be in extent format. Hence we can't just
1247  * blindly swap data forks on attr2 filesystems.
1248  *
1249  * Note that we check the swap in both directions so that we don't end up with
1250  * a corrupt temporary inode, either.
1251  *
1252  * Note that fixing the way xfs_fsr sets up the attribute fork in the source
1253  * inode will prevent this situation from occurring, so all we do here is
1254  * reject and log the attempt. basically we are putting the responsibility on
1255  * userspace to get this right.
1256  */
1257 static int
1258 xfs_swap_extents_check_format(
1259 	struct xfs_inode	*ip,	/* target inode */
1260 	struct xfs_inode	*tip)	/* tmp inode */
1261 {
1262 	struct xfs_ifork	*ifp = &ip->i_df;
1263 	struct xfs_ifork	*tifp = &tip->i_df;
1264 
1265 	/* User/group/project quota ids must match if quotas are enforced. */
1266 	if (XFS_IS_QUOTA_ON(ip->i_mount) &&
1267 	    (!uid_eq(VFS_I(ip)->i_uid, VFS_I(tip)->i_uid) ||
1268 	     !gid_eq(VFS_I(ip)->i_gid, VFS_I(tip)->i_gid) ||
1269 	     ip->i_projid != tip->i_projid))
1270 		return -EINVAL;
1271 
1272 	/* Should never get a local format */
1273 	if (ifp->if_format == XFS_DINODE_FMT_LOCAL ||
1274 	    tifp->if_format == XFS_DINODE_FMT_LOCAL)
1275 		return -EINVAL;
1276 
1277 	/*
1278 	 * if the target inode has less extents that then temporary inode then
1279 	 * why did userspace call us?
1280 	 */
1281 	if (ifp->if_nextents < tifp->if_nextents)
1282 		return -EINVAL;
1283 
1284 	/*
1285 	 * If we have to use the (expensive) rmap swap method, we can
1286 	 * handle any number of extents and any format.
1287 	 */
1288 	if (xfs_has_rmapbt(ip->i_mount))
1289 		return 0;
1290 
1291 	/*
1292 	 * if the target inode is in extent form and the temp inode is in btree
1293 	 * form then we will end up with the target inode in the wrong format
1294 	 * as we already know there are less extents in the temp inode.
1295 	 */
1296 	if (ifp->if_format == XFS_DINODE_FMT_EXTENTS &&
1297 	    tifp->if_format == XFS_DINODE_FMT_BTREE)
1298 		return -EINVAL;
1299 
1300 	/* Check temp in extent form to max in target */
1301 	if (tifp->if_format == XFS_DINODE_FMT_EXTENTS &&
1302 	    tifp->if_nextents > XFS_IFORK_MAXEXT(ip, XFS_DATA_FORK))
1303 		return -EINVAL;
1304 
1305 	/* Check target in extent form to max in temp */
1306 	if (ifp->if_format == XFS_DINODE_FMT_EXTENTS &&
1307 	    ifp->if_nextents > XFS_IFORK_MAXEXT(tip, XFS_DATA_FORK))
1308 		return -EINVAL;
1309 
1310 	/*
1311 	 * If we are in a btree format, check that the temp root block will fit
1312 	 * in the target and that it has enough extents to be in btree format
1313 	 * in the target.
1314 	 *
1315 	 * Note that we have to be careful to allow btree->extent conversions
1316 	 * (a common defrag case) which will occur when the temp inode is in
1317 	 * extent format...
1318 	 */
1319 	if (tifp->if_format == XFS_DINODE_FMT_BTREE) {
1320 		if (xfs_inode_has_attr_fork(ip) &&
1321 		    XFS_BMAP_BMDR_SPACE(tifp->if_broot) > xfs_inode_fork_boff(ip))
1322 			return -EINVAL;
1323 		if (tifp->if_nextents <= XFS_IFORK_MAXEXT(ip, XFS_DATA_FORK))
1324 			return -EINVAL;
1325 	}
1326 
1327 	/* Reciprocal target->temp btree format checks */
1328 	if (ifp->if_format == XFS_DINODE_FMT_BTREE) {
1329 		if (xfs_inode_has_attr_fork(tip) &&
1330 		    XFS_BMAP_BMDR_SPACE(ip->i_df.if_broot) > xfs_inode_fork_boff(tip))
1331 			return -EINVAL;
1332 		if (ifp->if_nextents <= XFS_IFORK_MAXEXT(tip, XFS_DATA_FORK))
1333 			return -EINVAL;
1334 	}
1335 
1336 	return 0;
1337 }
1338 
1339 static int
1340 xfs_swap_extent_flush(
1341 	struct xfs_inode	*ip)
1342 {
1343 	int	error;
1344 
1345 	error = filemap_write_and_wait(VFS_I(ip)->i_mapping);
1346 	if (error)
1347 		return error;
1348 	truncate_pagecache_range(VFS_I(ip), 0, -1);
1349 
1350 	/* Verify O_DIRECT for ftmp */
1351 	if (VFS_I(ip)->i_mapping->nrpages)
1352 		return -EINVAL;
1353 	return 0;
1354 }
1355 
1356 /*
1357  * Move extents from one file to another, when rmap is enabled.
1358  */
1359 STATIC int
1360 xfs_swap_extent_rmap(
1361 	struct xfs_trans		**tpp,
1362 	struct xfs_inode		*ip,
1363 	struct xfs_inode		*tip)
1364 {
1365 	struct xfs_trans		*tp = *tpp;
1366 	struct xfs_bmbt_irec		irec;
1367 	struct xfs_bmbt_irec		uirec;
1368 	struct xfs_bmbt_irec		tirec;
1369 	xfs_fileoff_t			offset_fsb;
1370 	xfs_fileoff_t			end_fsb;
1371 	xfs_filblks_t			count_fsb;
1372 	int				error;
1373 	xfs_filblks_t			ilen;
1374 	xfs_filblks_t			rlen;
1375 	int				nimaps;
1376 	uint64_t			tip_flags2;
1377 
1378 	/*
1379 	 * If the source file has shared blocks, we must flag the donor
1380 	 * file as having shared blocks so that we get the shared-block
1381 	 * rmap functions when we go to fix up the rmaps.  The flags
1382 	 * will be switch for reals later.
1383 	 */
1384 	tip_flags2 = tip->i_diflags2;
1385 	if (ip->i_diflags2 & XFS_DIFLAG2_REFLINK)
1386 		tip->i_diflags2 |= XFS_DIFLAG2_REFLINK;
1387 
1388 	offset_fsb = 0;
1389 	end_fsb = XFS_B_TO_FSB(ip->i_mount, i_size_read(VFS_I(ip)));
1390 	count_fsb = (xfs_filblks_t)(end_fsb - offset_fsb);
1391 
1392 	while (count_fsb) {
1393 		/* Read extent from the donor file */
1394 		nimaps = 1;
1395 		error = xfs_bmapi_read(tip, offset_fsb, count_fsb, &tirec,
1396 				&nimaps, 0);
1397 		if (error)
1398 			goto out;
1399 		ASSERT(nimaps == 1);
1400 		ASSERT(tirec.br_startblock != DELAYSTARTBLOCK);
1401 
1402 		trace_xfs_swap_extent_rmap_remap(tip, &tirec);
1403 		ilen = tirec.br_blockcount;
1404 
1405 		/* Unmap the old blocks in the source file. */
1406 		while (tirec.br_blockcount) {
1407 			ASSERT(tp->t_highest_agno == NULLAGNUMBER);
1408 			trace_xfs_swap_extent_rmap_remap_piece(tip, &tirec);
1409 
1410 			/* Read extent from the source file */
1411 			nimaps = 1;
1412 			error = xfs_bmapi_read(ip, tirec.br_startoff,
1413 					tirec.br_blockcount, &irec,
1414 					&nimaps, 0);
1415 			if (error)
1416 				goto out;
1417 			ASSERT(nimaps == 1);
1418 			ASSERT(tirec.br_startoff == irec.br_startoff);
1419 			trace_xfs_swap_extent_rmap_remap_piece(ip, &irec);
1420 
1421 			/* Trim the extent. */
1422 			uirec = tirec;
1423 			uirec.br_blockcount = rlen = min_t(xfs_filblks_t,
1424 					tirec.br_blockcount,
1425 					irec.br_blockcount);
1426 			trace_xfs_swap_extent_rmap_remap_piece(tip, &uirec);
1427 
1428 			if (xfs_bmap_is_real_extent(&uirec)) {
1429 				error = xfs_iext_count_may_overflow(ip,
1430 						XFS_DATA_FORK,
1431 						XFS_IEXT_SWAP_RMAP_CNT);
1432 				if (error == -EFBIG)
1433 					error = xfs_iext_count_upgrade(tp, ip,
1434 							XFS_IEXT_SWAP_RMAP_CNT);
1435 				if (error)
1436 					goto out;
1437 			}
1438 
1439 			if (xfs_bmap_is_real_extent(&irec)) {
1440 				error = xfs_iext_count_may_overflow(tip,
1441 						XFS_DATA_FORK,
1442 						XFS_IEXT_SWAP_RMAP_CNT);
1443 				if (error == -EFBIG)
1444 					error = xfs_iext_count_upgrade(tp, ip,
1445 							XFS_IEXT_SWAP_RMAP_CNT);
1446 				if (error)
1447 					goto out;
1448 			}
1449 
1450 			/* Remove the mapping from the donor file. */
1451 			xfs_bmap_unmap_extent(tp, tip, &uirec);
1452 
1453 			/* Remove the mapping from the source file. */
1454 			xfs_bmap_unmap_extent(tp, ip, &irec);
1455 
1456 			/* Map the donor file's blocks into the source file. */
1457 			xfs_bmap_map_extent(tp, ip, &uirec);
1458 
1459 			/* Map the source file's blocks into the donor file. */
1460 			xfs_bmap_map_extent(tp, tip, &irec);
1461 
1462 			error = xfs_defer_finish(tpp);
1463 			tp = *tpp;
1464 			if (error)
1465 				goto out;
1466 
1467 			tirec.br_startoff += rlen;
1468 			if (tirec.br_startblock != HOLESTARTBLOCK &&
1469 			    tirec.br_startblock != DELAYSTARTBLOCK)
1470 				tirec.br_startblock += rlen;
1471 			tirec.br_blockcount -= rlen;
1472 		}
1473 
1474 		/* Roll on... */
1475 		count_fsb -= ilen;
1476 		offset_fsb += ilen;
1477 	}
1478 
1479 	tip->i_diflags2 = tip_flags2;
1480 	return 0;
1481 
1482 out:
1483 	trace_xfs_swap_extent_rmap_error(ip, error, _RET_IP_);
1484 	tip->i_diflags2 = tip_flags2;
1485 	return error;
1486 }
1487 
1488 /* Swap the extents of two files by swapping data forks. */
1489 STATIC int
1490 xfs_swap_extent_forks(
1491 	struct xfs_trans	*tp,
1492 	struct xfs_inode	*ip,
1493 	struct xfs_inode	*tip,
1494 	int			*src_log_flags,
1495 	int			*target_log_flags)
1496 {
1497 	xfs_filblks_t		aforkblks = 0;
1498 	xfs_filblks_t		taforkblks = 0;
1499 	xfs_extnum_t		junk;
1500 	uint64_t		tmp;
1501 	int			error;
1502 
1503 	/*
1504 	 * Count the number of extended attribute blocks
1505 	 */
1506 	if (xfs_inode_has_attr_fork(ip) && ip->i_af.if_nextents > 0 &&
1507 	    ip->i_af.if_format != XFS_DINODE_FMT_LOCAL) {
1508 		error = xfs_bmap_count_blocks(tp, ip, XFS_ATTR_FORK, &junk,
1509 				&aforkblks);
1510 		if (error)
1511 			return error;
1512 	}
1513 	if (xfs_inode_has_attr_fork(tip) && tip->i_af.if_nextents > 0 &&
1514 	    tip->i_af.if_format != XFS_DINODE_FMT_LOCAL) {
1515 		error = xfs_bmap_count_blocks(tp, tip, XFS_ATTR_FORK, &junk,
1516 				&taforkblks);
1517 		if (error)
1518 			return error;
1519 	}
1520 
1521 	/*
1522 	 * Btree format (v3) inodes have the inode number stamped in the bmbt
1523 	 * block headers. We can't start changing the bmbt blocks until the
1524 	 * inode owner change is logged so recovery does the right thing in the
1525 	 * event of a crash. Set the owner change log flags now and leave the
1526 	 * bmbt scan as the last step.
1527 	 */
1528 	if (xfs_has_v3inodes(ip->i_mount)) {
1529 		if (ip->i_df.if_format == XFS_DINODE_FMT_BTREE)
1530 			(*target_log_flags) |= XFS_ILOG_DOWNER;
1531 		if (tip->i_df.if_format == XFS_DINODE_FMT_BTREE)
1532 			(*src_log_flags) |= XFS_ILOG_DOWNER;
1533 	}
1534 
1535 	/*
1536 	 * Swap the data forks of the inodes
1537 	 */
1538 	swap(ip->i_df, tip->i_df);
1539 
1540 	/*
1541 	 * Fix the on-disk inode values
1542 	 */
1543 	tmp = (uint64_t)ip->i_nblocks;
1544 	ip->i_nblocks = tip->i_nblocks - taforkblks + aforkblks;
1545 	tip->i_nblocks = tmp + taforkblks - aforkblks;
1546 
1547 	/*
1548 	 * The extents in the source inode could still contain speculative
1549 	 * preallocation beyond EOF (e.g. the file is open but not modified
1550 	 * while defrag is in progress). In that case, we need to copy over the
1551 	 * number of delalloc blocks the data fork in the source inode is
1552 	 * tracking beyond EOF so that when the fork is truncated away when the
1553 	 * temporary inode is unlinked we don't underrun the i_delayed_blks
1554 	 * counter on that inode.
1555 	 */
1556 	ASSERT(tip->i_delayed_blks == 0);
1557 	tip->i_delayed_blks = ip->i_delayed_blks;
1558 	ip->i_delayed_blks = 0;
1559 
1560 	switch (ip->i_df.if_format) {
1561 	case XFS_DINODE_FMT_EXTENTS:
1562 		(*src_log_flags) |= XFS_ILOG_DEXT;
1563 		break;
1564 	case XFS_DINODE_FMT_BTREE:
1565 		ASSERT(!xfs_has_v3inodes(ip->i_mount) ||
1566 		       (*src_log_flags & XFS_ILOG_DOWNER));
1567 		(*src_log_flags) |= XFS_ILOG_DBROOT;
1568 		break;
1569 	}
1570 
1571 	switch (tip->i_df.if_format) {
1572 	case XFS_DINODE_FMT_EXTENTS:
1573 		(*target_log_flags) |= XFS_ILOG_DEXT;
1574 		break;
1575 	case XFS_DINODE_FMT_BTREE:
1576 		(*target_log_flags) |= XFS_ILOG_DBROOT;
1577 		ASSERT(!xfs_has_v3inodes(ip->i_mount) ||
1578 		       (*target_log_flags & XFS_ILOG_DOWNER));
1579 		break;
1580 	}
1581 
1582 	return 0;
1583 }
1584 
1585 /*
1586  * Fix up the owners of the bmbt blocks to refer to the current inode. The
1587  * change owner scan attempts to order all modified buffers in the current
1588  * transaction. In the event of ordered buffer failure, the offending buffer is
1589  * physically logged as a fallback and the scan returns -EAGAIN. We must roll
1590  * the transaction in this case to replenish the fallback log reservation and
1591  * restart the scan. This process repeats until the scan completes.
1592  */
1593 static int
1594 xfs_swap_change_owner(
1595 	struct xfs_trans	**tpp,
1596 	struct xfs_inode	*ip,
1597 	struct xfs_inode	*tmpip)
1598 {
1599 	int			error;
1600 	struct xfs_trans	*tp = *tpp;
1601 
1602 	do {
1603 		error = xfs_bmbt_change_owner(tp, ip, XFS_DATA_FORK, ip->i_ino,
1604 					      NULL);
1605 		/* success or fatal error */
1606 		if (error != -EAGAIN)
1607 			break;
1608 
1609 		error = xfs_trans_roll(tpp);
1610 		if (error)
1611 			break;
1612 		tp = *tpp;
1613 
1614 		/*
1615 		 * Redirty both inodes so they can relog and keep the log tail
1616 		 * moving forward.
1617 		 */
1618 		xfs_trans_ijoin(tp, ip, 0);
1619 		xfs_trans_ijoin(tp, tmpip, 0);
1620 		xfs_trans_log_inode(tp, ip, XFS_ILOG_CORE);
1621 		xfs_trans_log_inode(tp, tmpip, XFS_ILOG_CORE);
1622 	} while (true);
1623 
1624 	return error;
1625 }
1626 
1627 int
1628 xfs_swap_extents(
1629 	struct xfs_inode	*ip,	/* target inode */
1630 	struct xfs_inode	*tip,	/* tmp inode */
1631 	struct xfs_swapext	*sxp)
1632 {
1633 	struct xfs_mount	*mp = ip->i_mount;
1634 	struct xfs_trans	*tp;
1635 	struct xfs_bstat	*sbp = &sxp->sx_stat;
1636 	int			src_log_flags, target_log_flags;
1637 	int			error = 0;
1638 	uint64_t		f;
1639 	int			resblks = 0;
1640 	unsigned int		flags = 0;
1641 	struct timespec64	ctime, mtime;
1642 
1643 	/*
1644 	 * Lock the inodes against other IO, page faults and truncate to
1645 	 * begin with.  Then we can ensure the inodes are flushed and have no
1646 	 * page cache safely. Once we have done this we can take the ilocks and
1647 	 * do the rest of the checks.
1648 	 */
1649 	lock_two_nondirectories(VFS_I(ip), VFS_I(tip));
1650 	filemap_invalidate_lock_two(VFS_I(ip)->i_mapping,
1651 				    VFS_I(tip)->i_mapping);
1652 
1653 	/* Verify that both files have the same format */
1654 	if ((VFS_I(ip)->i_mode & S_IFMT) != (VFS_I(tip)->i_mode & S_IFMT)) {
1655 		error = -EINVAL;
1656 		goto out_unlock;
1657 	}
1658 
1659 	/* Verify both files are either real-time or non-realtime */
1660 	if (XFS_IS_REALTIME_INODE(ip) != XFS_IS_REALTIME_INODE(tip)) {
1661 		error = -EINVAL;
1662 		goto out_unlock;
1663 	}
1664 
1665 	error = xfs_qm_dqattach(ip);
1666 	if (error)
1667 		goto out_unlock;
1668 
1669 	error = xfs_qm_dqattach(tip);
1670 	if (error)
1671 		goto out_unlock;
1672 
1673 	error = xfs_swap_extent_flush(ip);
1674 	if (error)
1675 		goto out_unlock;
1676 	error = xfs_swap_extent_flush(tip);
1677 	if (error)
1678 		goto out_unlock;
1679 
1680 	if (xfs_inode_has_cow_data(tip)) {
1681 		error = xfs_reflink_cancel_cow_range(tip, 0, NULLFILEOFF, true);
1682 		if (error)
1683 			goto out_unlock;
1684 	}
1685 
1686 	/*
1687 	 * Extent "swapping" with rmap requires a permanent reservation and
1688 	 * a block reservation because it's really just a remap operation
1689 	 * performed with log redo items!
1690 	 */
1691 	if (xfs_has_rmapbt(mp)) {
1692 		int		w = XFS_DATA_FORK;
1693 		uint32_t	ipnext = ip->i_df.if_nextents;
1694 		uint32_t	tipnext	= tip->i_df.if_nextents;
1695 
1696 		/*
1697 		 * Conceptually this shouldn't affect the shape of either bmbt,
1698 		 * but since we atomically move extents one by one, we reserve
1699 		 * enough space to rebuild both trees.
1700 		 */
1701 		resblks = XFS_SWAP_RMAP_SPACE_RES(mp, ipnext, w);
1702 		resblks +=  XFS_SWAP_RMAP_SPACE_RES(mp, tipnext, w);
1703 
1704 		/*
1705 		 * If either inode straddles a bmapbt block allocation boundary,
1706 		 * the rmapbt algorithm triggers repeated allocs and frees as
1707 		 * extents are remapped. This can exhaust the block reservation
1708 		 * prematurely and cause shutdown. Return freed blocks to the
1709 		 * transaction reservation to counter this behavior.
1710 		 */
1711 		flags |= XFS_TRANS_RES_FDBLKS;
1712 	}
1713 	error = xfs_trans_alloc(mp, &M_RES(mp)->tr_write, resblks, 0, flags,
1714 				&tp);
1715 	if (error)
1716 		goto out_unlock;
1717 
1718 	/*
1719 	 * Lock and join the inodes to the tansaction so that transaction commit
1720 	 * or cancel will unlock the inodes from this point onwards.
1721 	 */
1722 	xfs_lock_two_inodes(ip, XFS_ILOCK_EXCL, tip, XFS_ILOCK_EXCL);
1723 	xfs_trans_ijoin(tp, ip, 0);
1724 	xfs_trans_ijoin(tp, tip, 0);
1725 
1726 
1727 	/* Verify all data are being swapped */
1728 	if (sxp->sx_offset != 0 ||
1729 	    sxp->sx_length != ip->i_disk_size ||
1730 	    sxp->sx_length != tip->i_disk_size) {
1731 		error = -EFAULT;
1732 		goto out_trans_cancel;
1733 	}
1734 
1735 	trace_xfs_swap_extent_before(ip, 0);
1736 	trace_xfs_swap_extent_before(tip, 1);
1737 
1738 	/* check inode formats now that data is flushed */
1739 	error = xfs_swap_extents_check_format(ip, tip);
1740 	if (error) {
1741 		xfs_notice(mp,
1742 		    "%s: inode 0x%llx format is incompatible for exchanging.",
1743 				__func__, ip->i_ino);
1744 		goto out_trans_cancel;
1745 	}
1746 
1747 	/*
1748 	 * Compare the current change & modify times with that
1749 	 * passed in.  If they differ, we abort this swap.
1750 	 * This is the mechanism used to ensure the calling
1751 	 * process that the file was not changed out from
1752 	 * under it.
1753 	 */
1754 	ctime = inode_get_ctime(VFS_I(ip));
1755 	mtime = inode_get_mtime(VFS_I(ip));
1756 	if ((sbp->bs_ctime.tv_sec != ctime.tv_sec) ||
1757 	    (sbp->bs_ctime.tv_nsec != ctime.tv_nsec) ||
1758 	    (sbp->bs_mtime.tv_sec != mtime.tv_sec) ||
1759 	    (sbp->bs_mtime.tv_nsec != mtime.tv_nsec)) {
1760 		error = -EBUSY;
1761 		goto out_trans_cancel;
1762 	}
1763 
1764 	/*
1765 	 * Note the trickiness in setting the log flags - we set the owner log
1766 	 * flag on the opposite inode (i.e. the inode we are setting the new
1767 	 * owner to be) because once we swap the forks and log that, log
1768 	 * recovery is going to see the fork as owned by the swapped inode,
1769 	 * not the pre-swapped inodes.
1770 	 */
1771 	src_log_flags = XFS_ILOG_CORE;
1772 	target_log_flags = XFS_ILOG_CORE;
1773 
1774 	if (xfs_has_rmapbt(mp))
1775 		error = xfs_swap_extent_rmap(&tp, ip, tip);
1776 	else
1777 		error = xfs_swap_extent_forks(tp, ip, tip, &src_log_flags,
1778 				&target_log_flags);
1779 	if (error)
1780 		goto out_trans_cancel;
1781 
1782 	/* Do we have to swap reflink flags? */
1783 	if ((ip->i_diflags2 & XFS_DIFLAG2_REFLINK) ^
1784 	    (tip->i_diflags2 & XFS_DIFLAG2_REFLINK)) {
1785 		f = ip->i_diflags2 & XFS_DIFLAG2_REFLINK;
1786 		ip->i_diflags2 &= ~XFS_DIFLAG2_REFLINK;
1787 		ip->i_diflags2 |= tip->i_diflags2 & XFS_DIFLAG2_REFLINK;
1788 		tip->i_diflags2 &= ~XFS_DIFLAG2_REFLINK;
1789 		tip->i_diflags2 |= f & XFS_DIFLAG2_REFLINK;
1790 	}
1791 
1792 	/* Swap the cow forks. */
1793 	if (xfs_has_reflink(mp)) {
1794 		ASSERT(!ip->i_cowfp ||
1795 		       ip->i_cowfp->if_format == XFS_DINODE_FMT_EXTENTS);
1796 		ASSERT(!tip->i_cowfp ||
1797 		       tip->i_cowfp->if_format == XFS_DINODE_FMT_EXTENTS);
1798 
1799 		swap(ip->i_cowfp, tip->i_cowfp);
1800 
1801 		if (ip->i_cowfp && ip->i_cowfp->if_bytes)
1802 			xfs_inode_set_cowblocks_tag(ip);
1803 		else
1804 			xfs_inode_clear_cowblocks_tag(ip);
1805 		if (tip->i_cowfp && tip->i_cowfp->if_bytes)
1806 			xfs_inode_set_cowblocks_tag(tip);
1807 		else
1808 			xfs_inode_clear_cowblocks_tag(tip);
1809 	}
1810 
1811 	xfs_trans_log_inode(tp, ip,  src_log_flags);
1812 	xfs_trans_log_inode(tp, tip, target_log_flags);
1813 
1814 	/*
1815 	 * The extent forks have been swapped, but crc=1,rmapbt=0 filesystems
1816 	 * have inode number owner values in the bmbt blocks that still refer to
1817 	 * the old inode. Scan each bmbt to fix up the owner values with the
1818 	 * inode number of the current inode.
1819 	 */
1820 	if (src_log_flags & XFS_ILOG_DOWNER) {
1821 		error = xfs_swap_change_owner(&tp, ip, tip);
1822 		if (error)
1823 			goto out_trans_cancel;
1824 	}
1825 	if (target_log_flags & XFS_ILOG_DOWNER) {
1826 		error = xfs_swap_change_owner(&tp, tip, ip);
1827 		if (error)
1828 			goto out_trans_cancel;
1829 	}
1830 
1831 	/*
1832 	 * If this is a synchronous mount, make sure that the
1833 	 * transaction goes to disk before returning to the user.
1834 	 */
1835 	if (xfs_has_wsync(mp))
1836 		xfs_trans_set_sync(tp);
1837 
1838 	error = xfs_trans_commit(tp);
1839 
1840 	trace_xfs_swap_extent_after(ip, 0);
1841 	trace_xfs_swap_extent_after(tip, 1);
1842 
1843 out_unlock_ilock:
1844 	xfs_iunlock(ip, XFS_ILOCK_EXCL);
1845 	xfs_iunlock(tip, XFS_ILOCK_EXCL);
1846 out_unlock:
1847 	filemap_invalidate_unlock_two(VFS_I(ip)->i_mapping,
1848 				      VFS_I(tip)->i_mapping);
1849 	unlock_two_nondirectories(VFS_I(ip), VFS_I(tip));
1850 	return error;
1851 
1852 out_trans_cancel:
1853 	xfs_trans_cancel(tp);
1854 	goto out_unlock_ilock;
1855 }
1856