1 // SPDX-License-Identifier: GPL-2.0
2 /*
3  * Copyright (c) 2000-2006 Silicon Graphics, Inc.
4  * Copyright (c) 2012 Red Hat, Inc.
5  * All Rights Reserved.
6  */
7 #include "xfs.h"
8 #include "xfs_fs.h"
9 #include "xfs_shared.h"
10 #include "xfs_format.h"
11 #include "xfs_log_format.h"
12 #include "xfs_trans_resv.h"
13 #include "xfs_bit.h"
14 #include "xfs_mount.h"
15 #include "xfs_defer.h"
16 #include "xfs_inode.h"
17 #include "xfs_btree.h"
18 #include "xfs_trans.h"
19 #include "xfs_alloc.h"
20 #include "xfs_bmap.h"
21 #include "xfs_bmap_util.h"
22 #include "xfs_bmap_btree.h"
23 #include "xfs_rtalloc.h"
24 #include "xfs_error.h"
25 #include "xfs_quota.h"
26 #include "xfs_trans_space.h"
27 #include "xfs_trace.h"
28 #include "xfs_icache.h"
29 #include "xfs_iomap.h"
30 #include "xfs_reflink.h"
31 
32 /* Kernel only BMAP related definitions and functions */
33 
34 /*
35  * Convert the given file system block to a disk block.  We have to treat it
36  * differently based on whether the file is a real time file or not, because the
37  * bmap code does.
38  */
39 xfs_daddr_t
xfs_fsb_to_db(struct xfs_inode * ip,xfs_fsblock_t fsb)40 xfs_fsb_to_db(struct xfs_inode *ip, xfs_fsblock_t fsb)
41 {
42 	if (XFS_IS_REALTIME_INODE(ip))
43 		return XFS_FSB_TO_BB(ip->i_mount, fsb);
44 	return XFS_FSB_TO_DADDR(ip->i_mount, fsb);
45 }
46 
47 /*
48  * Routine to zero an extent on disk allocated to the specific inode.
49  *
50  * The VFS functions take a linearised filesystem block offset, so we have to
51  * convert the sparse xfs fsb to the right format first.
52  * VFS types are real funky, too.
53  */
54 int
xfs_zero_extent(struct xfs_inode * ip,xfs_fsblock_t start_fsb,xfs_off_t count_fsb)55 xfs_zero_extent(
56 	struct xfs_inode	*ip,
57 	xfs_fsblock_t		start_fsb,
58 	xfs_off_t		count_fsb)
59 {
60 	struct xfs_mount	*mp = ip->i_mount;
61 	struct xfs_buftarg	*target = xfs_inode_buftarg(ip);
62 	xfs_daddr_t		sector = xfs_fsb_to_db(ip, start_fsb);
63 	sector_t		block = XFS_BB_TO_FSBT(mp, sector);
64 
65 	return blkdev_issue_zeroout(target->bt_bdev,
66 		block << (mp->m_super->s_blocksize_bits - 9),
67 		count_fsb << (mp->m_super->s_blocksize_bits - 9),
68 		GFP_NOFS, 0);
69 }
70 
71 #ifdef CONFIG_XFS_RT
72 int
xfs_bmap_rtalloc(struct xfs_bmalloca * ap)73 xfs_bmap_rtalloc(
74 	struct xfs_bmalloca	*ap)	/* bmap alloc argument struct */
75 {
76 	int		error;		/* error return value */
77 	xfs_mount_t	*mp;		/* mount point structure */
78 	xfs_extlen_t	prod = 0;	/* product factor for allocators */
79 	xfs_extlen_t	mod = 0;	/* product factor for allocators */
80 	xfs_extlen_t	ralen = 0;	/* realtime allocation length */
81 	xfs_extlen_t	align;		/* minimum allocation alignment */
82 	xfs_rtblock_t	rtb;
83 
84 	mp = ap->ip->i_mount;
85 	align = xfs_get_extsz_hint(ap->ip);
86 	prod = align / mp->m_sb.sb_rextsize;
87 	error = xfs_bmap_extsize_align(mp, &ap->got, &ap->prev,
88 					align, 1, ap->eof, 0,
89 					ap->conv, &ap->offset, &ap->length);
90 	if (error)
91 		return error;
92 	ASSERT(ap->length);
93 	ASSERT(ap->length % mp->m_sb.sb_rextsize == 0);
94 
95 	/*
96 	 * If the offset & length are not perfectly aligned
97 	 * then kill prod, it will just get us in trouble.
98 	 */
99 	div_u64_rem(ap->offset, align, &mod);
100 	if (mod || ap->length % align)
101 		prod = 1;
102 	/*
103 	 * Set ralen to be the actual requested length in rtextents.
104 	 */
105 	ralen = ap->length / mp->m_sb.sb_rextsize;
106 	/*
107 	 * If the old value was close enough to MAXEXTLEN that
108 	 * we rounded up to it, cut it back so it's valid again.
109 	 * Note that if it's a really large request (bigger than
110 	 * MAXEXTLEN), we don't hear about that number, and can't
111 	 * adjust the starting point to match it.
112 	 */
113 	if (ralen * mp->m_sb.sb_rextsize >= MAXEXTLEN)
114 		ralen = MAXEXTLEN / mp->m_sb.sb_rextsize;
115 
116 	/*
117 	 * Lock out modifications to both the RT bitmap and summary inodes
118 	 */
119 	xfs_ilock(mp->m_rbmip, XFS_ILOCK_EXCL|XFS_ILOCK_RTBITMAP);
120 	xfs_trans_ijoin(ap->tp, mp->m_rbmip, XFS_ILOCK_EXCL);
121 	xfs_ilock(mp->m_rsumip, XFS_ILOCK_EXCL|XFS_ILOCK_RTSUM);
122 	xfs_trans_ijoin(ap->tp, mp->m_rsumip, XFS_ILOCK_EXCL);
123 
124 	/*
125 	 * If it's an allocation to an empty file at offset 0,
126 	 * pick an extent that will space things out in the rt area.
127 	 */
128 	if (ap->eof && ap->offset == 0) {
129 		xfs_rtblock_t rtx; /* realtime extent no */
130 
131 		error = xfs_rtpick_extent(mp, ap->tp, ralen, &rtx);
132 		if (error)
133 			return error;
134 		ap->blkno = rtx * mp->m_sb.sb_rextsize;
135 	} else {
136 		ap->blkno = 0;
137 	}
138 
139 	xfs_bmap_adjacent(ap);
140 
141 	/*
142 	 * Realtime allocation, done through xfs_rtallocate_extent.
143 	 */
144 	do_div(ap->blkno, mp->m_sb.sb_rextsize);
145 	rtb = ap->blkno;
146 	ap->length = ralen;
147 	error = xfs_rtallocate_extent(ap->tp, ap->blkno, 1, ap->length,
148 				&ralen, ap->wasdel, prod, &rtb);
149 	if (error)
150 		return error;
151 
152 	ap->blkno = rtb;
153 	if (ap->blkno != NULLFSBLOCK) {
154 		ap->blkno *= mp->m_sb.sb_rextsize;
155 		ralen *= mp->m_sb.sb_rextsize;
156 		ap->length = ralen;
157 		ap->ip->i_nblocks += ralen;
158 		xfs_trans_log_inode(ap->tp, ap->ip, XFS_ILOG_CORE);
159 		if (ap->wasdel)
160 			ap->ip->i_delayed_blks -= ralen;
161 		/*
162 		 * Adjust the disk quota also. This was reserved
163 		 * earlier.
164 		 */
165 		xfs_trans_mod_dquot_byino(ap->tp, ap->ip,
166 			ap->wasdel ? XFS_TRANS_DQ_DELRTBCOUNT :
167 					XFS_TRANS_DQ_RTBCOUNT, (long) ralen);
168 	} else {
169 		ap->length = 0;
170 	}
171 	return 0;
172 }
173 #endif /* CONFIG_XFS_RT */
174 
175 /*
176  * Extent tree block counting routines.
177  */
178 
179 /*
180  * Count leaf blocks given a range of extent records.  Delayed allocation
181  * extents are not counted towards the totals.
182  */
183 xfs_extnum_t
xfs_bmap_count_leaves(struct xfs_ifork * ifp,xfs_filblks_t * count)184 xfs_bmap_count_leaves(
185 	struct xfs_ifork	*ifp,
186 	xfs_filblks_t		*count)
187 {
188 	struct xfs_iext_cursor	icur;
189 	struct xfs_bmbt_irec	got;
190 	xfs_extnum_t		numrecs = 0;
191 
192 	for_each_xfs_iext(ifp, &icur, &got) {
193 		if (!isnullstartblock(got.br_startblock)) {
194 			*count += got.br_blockcount;
195 			numrecs++;
196 		}
197 	}
198 
199 	return numrecs;
200 }
201 
202 /*
203  * Count fsblocks of the given fork.  Delayed allocation extents are
204  * not counted towards the totals.
205  */
206 int
xfs_bmap_count_blocks(struct xfs_trans * tp,struct xfs_inode * ip,int whichfork,xfs_extnum_t * nextents,xfs_filblks_t * count)207 xfs_bmap_count_blocks(
208 	struct xfs_trans	*tp,
209 	struct xfs_inode	*ip,
210 	int			whichfork,
211 	xfs_extnum_t		*nextents,
212 	xfs_filblks_t		*count)
213 {
214 	struct xfs_mount	*mp = ip->i_mount;
215 	struct xfs_ifork	*ifp = XFS_IFORK_PTR(ip, whichfork);
216 	struct xfs_btree_cur	*cur;
217 	xfs_extlen_t		btblocks = 0;
218 	int			error;
219 
220 	*nextents = 0;
221 	*count = 0;
222 
223 	if (!ifp)
224 		return 0;
225 
226 	switch (ifp->if_format) {
227 	case XFS_DINODE_FMT_BTREE:
228 		error = xfs_iread_extents(tp, ip, whichfork);
229 		if (error)
230 			return error;
231 
232 		cur = xfs_bmbt_init_cursor(mp, tp, ip, whichfork);
233 		error = xfs_btree_count_blocks(cur, &btblocks);
234 		xfs_btree_del_cursor(cur, error);
235 		if (error)
236 			return error;
237 
238 		/*
239 		 * xfs_btree_count_blocks includes the root block contained in
240 		 * the inode fork in @btblocks, so subtract one because we're
241 		 * only interested in allocated disk blocks.
242 		 */
243 		*count += btblocks - 1;
244 
245 		/* fall through */
246 	case XFS_DINODE_FMT_EXTENTS:
247 		*nextents = xfs_bmap_count_leaves(ifp, count);
248 		break;
249 	}
250 
251 	return 0;
252 }
253 
254 static int
xfs_getbmap_report_one(struct xfs_inode * ip,struct getbmapx * bmv,struct kgetbmap * out,int64_t bmv_end,struct xfs_bmbt_irec * got)255 xfs_getbmap_report_one(
256 	struct xfs_inode	*ip,
257 	struct getbmapx		*bmv,
258 	struct kgetbmap		*out,
259 	int64_t			bmv_end,
260 	struct xfs_bmbt_irec	*got)
261 {
262 	struct kgetbmap		*p = out + bmv->bmv_entries;
263 	bool			shared = false;
264 	int			error;
265 
266 	error = xfs_reflink_trim_around_shared(ip, got, &shared);
267 	if (error)
268 		return error;
269 
270 	if (isnullstartblock(got->br_startblock) ||
271 	    got->br_startblock == DELAYSTARTBLOCK) {
272 		/*
273 		 * Delalloc extents that start beyond EOF can occur due to
274 		 * speculative EOF allocation when the delalloc extent is larger
275 		 * than the largest freespace extent at conversion time.  These
276 		 * extents cannot be converted by data writeback, so can exist
277 		 * here even if we are not supposed to be finding delalloc
278 		 * extents.
279 		 */
280 		if (got->br_startoff < XFS_B_TO_FSB(ip->i_mount, XFS_ISIZE(ip)))
281 			ASSERT((bmv->bmv_iflags & BMV_IF_DELALLOC) != 0);
282 
283 		p->bmv_oflags |= BMV_OF_DELALLOC;
284 		p->bmv_block = -2;
285 	} else {
286 		p->bmv_block = xfs_fsb_to_db(ip, got->br_startblock);
287 	}
288 
289 	if (got->br_state == XFS_EXT_UNWRITTEN &&
290 	    (bmv->bmv_iflags & BMV_IF_PREALLOC))
291 		p->bmv_oflags |= BMV_OF_PREALLOC;
292 
293 	if (shared)
294 		p->bmv_oflags |= BMV_OF_SHARED;
295 
296 	p->bmv_offset = XFS_FSB_TO_BB(ip->i_mount, got->br_startoff);
297 	p->bmv_length = XFS_FSB_TO_BB(ip->i_mount, got->br_blockcount);
298 
299 	bmv->bmv_offset = p->bmv_offset + p->bmv_length;
300 	bmv->bmv_length = max(0LL, bmv_end - bmv->bmv_offset);
301 	bmv->bmv_entries++;
302 	return 0;
303 }
304 
305 static void
xfs_getbmap_report_hole(struct xfs_inode * ip,struct getbmapx * bmv,struct kgetbmap * out,int64_t bmv_end,xfs_fileoff_t bno,xfs_fileoff_t end)306 xfs_getbmap_report_hole(
307 	struct xfs_inode	*ip,
308 	struct getbmapx		*bmv,
309 	struct kgetbmap		*out,
310 	int64_t			bmv_end,
311 	xfs_fileoff_t		bno,
312 	xfs_fileoff_t		end)
313 {
314 	struct kgetbmap		*p = out + bmv->bmv_entries;
315 
316 	if (bmv->bmv_iflags & BMV_IF_NO_HOLES)
317 		return;
318 
319 	p->bmv_block = -1;
320 	p->bmv_offset = XFS_FSB_TO_BB(ip->i_mount, bno);
321 	p->bmv_length = XFS_FSB_TO_BB(ip->i_mount, end - bno);
322 
323 	bmv->bmv_offset = p->bmv_offset + p->bmv_length;
324 	bmv->bmv_length = max(0LL, bmv_end - bmv->bmv_offset);
325 	bmv->bmv_entries++;
326 }
327 
328 static inline bool
xfs_getbmap_full(struct getbmapx * bmv)329 xfs_getbmap_full(
330 	struct getbmapx		*bmv)
331 {
332 	return bmv->bmv_length == 0 || bmv->bmv_entries >= bmv->bmv_count - 1;
333 }
334 
335 static bool
xfs_getbmap_next_rec(struct xfs_bmbt_irec * rec,xfs_fileoff_t total_end)336 xfs_getbmap_next_rec(
337 	struct xfs_bmbt_irec	*rec,
338 	xfs_fileoff_t		total_end)
339 {
340 	xfs_fileoff_t		end = rec->br_startoff + rec->br_blockcount;
341 
342 	if (end == total_end)
343 		return false;
344 
345 	rec->br_startoff += rec->br_blockcount;
346 	if (!isnullstartblock(rec->br_startblock) &&
347 	    rec->br_startblock != DELAYSTARTBLOCK)
348 		rec->br_startblock += rec->br_blockcount;
349 	rec->br_blockcount = total_end - end;
350 	return true;
351 }
352 
353 /*
354  * Get inode's extents as described in bmv, and format for output.
355  * Calls formatter to fill the user's buffer until all extents
356  * are mapped, until the passed-in bmv->bmv_count slots have
357  * been filled, or until the formatter short-circuits the loop,
358  * if it is tracking filled-in extents on its own.
359  */
360 int						/* error code */
xfs_getbmap(struct xfs_inode * ip,struct getbmapx * bmv,struct kgetbmap * out)361 xfs_getbmap(
362 	struct xfs_inode	*ip,
363 	struct getbmapx		*bmv,		/* user bmap structure */
364 	struct kgetbmap		*out)
365 {
366 	struct xfs_mount	*mp = ip->i_mount;
367 	int			iflags = bmv->bmv_iflags;
368 	int			whichfork, lock, error = 0;
369 	int64_t			bmv_end, max_len;
370 	xfs_fileoff_t		bno, first_bno;
371 	struct xfs_ifork	*ifp;
372 	struct xfs_bmbt_irec	got, rec;
373 	xfs_filblks_t		len;
374 	struct xfs_iext_cursor	icur;
375 
376 	if (bmv->bmv_iflags & ~BMV_IF_VALID)
377 		return -EINVAL;
378 #ifndef DEBUG
379 	/* Only allow CoW fork queries if we're debugging. */
380 	if (iflags & BMV_IF_COWFORK)
381 		return -EINVAL;
382 #endif
383 	if ((iflags & BMV_IF_ATTRFORK) && (iflags & BMV_IF_COWFORK))
384 		return -EINVAL;
385 
386 	if (bmv->bmv_length < -1)
387 		return -EINVAL;
388 	bmv->bmv_entries = 0;
389 	if (bmv->bmv_length == 0)
390 		return 0;
391 
392 	if (iflags & BMV_IF_ATTRFORK)
393 		whichfork = XFS_ATTR_FORK;
394 	else if (iflags & BMV_IF_COWFORK)
395 		whichfork = XFS_COW_FORK;
396 	else
397 		whichfork = XFS_DATA_FORK;
398 	ifp = XFS_IFORK_PTR(ip, whichfork);
399 
400 	xfs_ilock(ip, XFS_IOLOCK_SHARED);
401 	switch (whichfork) {
402 	case XFS_ATTR_FORK:
403 		if (!XFS_IFORK_Q(ip))
404 			goto out_unlock_iolock;
405 
406 		max_len = 1LL << 32;
407 		lock = xfs_ilock_attr_map_shared(ip);
408 		break;
409 	case XFS_COW_FORK:
410 		/* No CoW fork? Just return */
411 		if (!ifp)
412 			goto out_unlock_iolock;
413 
414 		if (xfs_get_cowextsz_hint(ip))
415 			max_len = mp->m_super->s_maxbytes;
416 		else
417 			max_len = XFS_ISIZE(ip);
418 
419 		lock = XFS_ILOCK_SHARED;
420 		xfs_ilock(ip, lock);
421 		break;
422 	case XFS_DATA_FORK:
423 		if (!(iflags & BMV_IF_DELALLOC) &&
424 		    (ip->i_delayed_blks || XFS_ISIZE(ip) > ip->i_disk_size)) {
425 			error = filemap_write_and_wait(VFS_I(ip)->i_mapping);
426 			if (error)
427 				goto out_unlock_iolock;
428 
429 			/*
430 			 * Even after flushing the inode, there can still be
431 			 * delalloc blocks on the inode beyond EOF due to
432 			 * speculative preallocation.  These are not removed
433 			 * until the release function is called or the inode
434 			 * is inactivated.  Hence we cannot assert here that
435 			 * ip->i_delayed_blks == 0.
436 			 */
437 		}
438 
439 		if (xfs_get_extsz_hint(ip) ||
440 		    (ip->i_diflags &
441 		     (XFS_DIFLAG_PREALLOC | XFS_DIFLAG_APPEND)))
442 			max_len = mp->m_super->s_maxbytes;
443 		else
444 			max_len = XFS_ISIZE(ip);
445 
446 		lock = xfs_ilock_data_map_shared(ip);
447 		break;
448 	}
449 
450 	switch (ifp->if_format) {
451 	case XFS_DINODE_FMT_EXTENTS:
452 	case XFS_DINODE_FMT_BTREE:
453 		break;
454 	case XFS_DINODE_FMT_LOCAL:
455 		/* Local format inode forks report no extents. */
456 		goto out_unlock_ilock;
457 	default:
458 		error = -EINVAL;
459 		goto out_unlock_ilock;
460 	}
461 
462 	if (bmv->bmv_length == -1) {
463 		max_len = XFS_FSB_TO_BB(mp, XFS_B_TO_FSB(mp, max_len));
464 		bmv->bmv_length = max(0LL, max_len - bmv->bmv_offset);
465 	}
466 
467 	bmv_end = bmv->bmv_offset + bmv->bmv_length;
468 
469 	first_bno = bno = XFS_BB_TO_FSBT(mp, bmv->bmv_offset);
470 	len = XFS_BB_TO_FSB(mp, bmv->bmv_length);
471 
472 	error = xfs_iread_extents(NULL, ip, whichfork);
473 	if (error)
474 		goto out_unlock_ilock;
475 
476 	if (!xfs_iext_lookup_extent(ip, ifp, bno, &icur, &got)) {
477 		/*
478 		 * Report a whole-file hole if the delalloc flag is set to
479 		 * stay compatible with the old implementation.
480 		 */
481 		if (iflags & BMV_IF_DELALLOC)
482 			xfs_getbmap_report_hole(ip, bmv, out, bmv_end, bno,
483 					XFS_B_TO_FSB(mp, XFS_ISIZE(ip)));
484 		goto out_unlock_ilock;
485 	}
486 
487 	while (!xfs_getbmap_full(bmv)) {
488 		xfs_trim_extent(&got, first_bno, len);
489 
490 		/*
491 		 * Report an entry for a hole if this extent doesn't directly
492 		 * follow the previous one.
493 		 */
494 		if (got.br_startoff > bno) {
495 			xfs_getbmap_report_hole(ip, bmv, out, bmv_end, bno,
496 					got.br_startoff);
497 			if (xfs_getbmap_full(bmv))
498 				break;
499 		}
500 
501 		/*
502 		 * In order to report shared extents accurately, we report each
503 		 * distinct shared / unshared part of a single bmbt record with
504 		 * an individual getbmapx record.
505 		 */
506 		bno = got.br_startoff + got.br_blockcount;
507 		rec = got;
508 		do {
509 			error = xfs_getbmap_report_one(ip, bmv, out, bmv_end,
510 					&rec);
511 			if (error || xfs_getbmap_full(bmv))
512 				goto out_unlock_ilock;
513 		} while (xfs_getbmap_next_rec(&rec, bno));
514 
515 		if (!xfs_iext_next_extent(ifp, &icur, &got)) {
516 			xfs_fileoff_t	end = XFS_B_TO_FSB(mp, XFS_ISIZE(ip));
517 
518 			out[bmv->bmv_entries - 1].bmv_oflags |= BMV_OF_LAST;
519 
520 			if (whichfork != XFS_ATTR_FORK && bno < end &&
521 			    !xfs_getbmap_full(bmv)) {
522 				xfs_getbmap_report_hole(ip, bmv, out, bmv_end,
523 						bno, end);
524 			}
525 			break;
526 		}
527 
528 		if (bno >= first_bno + len)
529 			break;
530 	}
531 
532 out_unlock_ilock:
533 	xfs_iunlock(ip, lock);
534 out_unlock_iolock:
535 	xfs_iunlock(ip, XFS_IOLOCK_SHARED);
536 	return error;
537 }
538 
539 /*
540  * Dead simple method of punching delalyed allocation blocks from a range in
541  * the inode.  This will always punch out both the start and end blocks, even
542  * if the ranges only partially overlap them, so it is up to the caller to
543  * ensure that partial blocks are not passed in.
544  */
545 int
xfs_bmap_punch_delalloc_range(struct xfs_inode * ip,xfs_fileoff_t start_fsb,xfs_fileoff_t length)546 xfs_bmap_punch_delalloc_range(
547 	struct xfs_inode	*ip,
548 	xfs_fileoff_t		start_fsb,
549 	xfs_fileoff_t		length)
550 {
551 	struct xfs_ifork	*ifp = &ip->i_df;
552 	xfs_fileoff_t		end_fsb = start_fsb + length;
553 	struct xfs_bmbt_irec	got, del;
554 	struct xfs_iext_cursor	icur;
555 	int			error = 0;
556 
557 	ASSERT(!xfs_need_iread_extents(ifp));
558 
559 	xfs_ilock(ip, XFS_ILOCK_EXCL);
560 	if (!xfs_iext_lookup_extent_before(ip, ifp, &end_fsb, &icur, &got))
561 		goto out_unlock;
562 
563 	while (got.br_startoff + got.br_blockcount > start_fsb) {
564 		del = got;
565 		xfs_trim_extent(&del, start_fsb, length);
566 
567 		/*
568 		 * A delete can push the cursor forward. Step back to the
569 		 * previous extent on non-delalloc or extents outside the
570 		 * target range.
571 		 */
572 		if (!del.br_blockcount ||
573 		    !isnullstartblock(del.br_startblock)) {
574 			if (!xfs_iext_prev_extent(ifp, &icur, &got))
575 				break;
576 			continue;
577 		}
578 
579 		error = xfs_bmap_del_extent_delay(ip, XFS_DATA_FORK, &icur,
580 						  &got, &del);
581 		if (error || !xfs_iext_get_extent(ifp, &icur, &got))
582 			break;
583 	}
584 
585 out_unlock:
586 	xfs_iunlock(ip, XFS_ILOCK_EXCL);
587 	return error;
588 }
589 
590 /*
591  * Test whether it is appropriate to check an inode for and free post EOF
592  * blocks. The 'force' parameter determines whether we should also consider
593  * regular files that are marked preallocated or append-only.
594  */
595 bool
xfs_can_free_eofblocks(struct xfs_inode * ip,bool force)596 xfs_can_free_eofblocks(
597 	struct xfs_inode	*ip,
598 	bool			force)
599 {
600 	struct xfs_bmbt_irec	imap;
601 	struct xfs_mount	*mp = ip->i_mount;
602 	xfs_fileoff_t		end_fsb;
603 	xfs_fileoff_t		last_fsb;
604 	int			nimaps = 1;
605 	int			error;
606 
607 	/*
608 	 * Caller must either hold the exclusive io lock; or be inactivating
609 	 * the inode, which guarantees there are no other users of the inode.
610 	 */
611 	ASSERT(xfs_isilocked(ip, XFS_IOLOCK_EXCL) ||
612 	       (VFS_I(ip)->i_state & I_FREEING));
613 
614 	/* prealloc/delalloc exists only on regular files */
615 	if (!S_ISREG(VFS_I(ip)->i_mode))
616 		return false;
617 
618 	/*
619 	 * Zero sized files with no cached pages and delalloc blocks will not
620 	 * have speculative prealloc/delalloc blocks to remove.
621 	 */
622 	if (VFS_I(ip)->i_size == 0 &&
623 	    VFS_I(ip)->i_mapping->nrpages == 0 &&
624 	    ip->i_delayed_blks == 0)
625 		return false;
626 
627 	/* If we haven't read in the extent list, then don't do it now. */
628 	if (xfs_need_iread_extents(&ip->i_df))
629 		return false;
630 
631 	/*
632 	 * Do not free real preallocated or append-only files unless the file
633 	 * has delalloc blocks and we are forced to remove them.
634 	 */
635 	if (ip->i_diflags & (XFS_DIFLAG_PREALLOC | XFS_DIFLAG_APPEND))
636 		if (!force || ip->i_delayed_blks == 0)
637 			return false;
638 
639 	/*
640 	 * Do not try to free post-EOF blocks if EOF is beyond the end of the
641 	 * range supported by the page cache, because the truncation will loop
642 	 * forever.
643 	 */
644 	end_fsb = XFS_B_TO_FSB(mp, (xfs_ufsize_t)XFS_ISIZE(ip));
645 	last_fsb = XFS_B_TO_FSB(mp, mp->m_super->s_maxbytes);
646 	if (last_fsb <= end_fsb)
647 		return false;
648 
649 	/*
650 	 * Look up the mapping for the first block past EOF.  If we can't find
651 	 * it, there's nothing to free.
652 	 */
653 	xfs_ilock(ip, XFS_ILOCK_SHARED);
654 	error = xfs_bmapi_read(ip, end_fsb, last_fsb - end_fsb, &imap, &nimaps,
655 			0);
656 	xfs_iunlock(ip, XFS_ILOCK_SHARED);
657 	if (error || nimaps == 0)
658 		return false;
659 
660 	/*
661 	 * If there's a real mapping there or there are delayed allocation
662 	 * reservations, then we have post-EOF blocks to try to free.
663 	 */
664 	return imap.br_startblock != HOLESTARTBLOCK || ip->i_delayed_blks;
665 }
666 
667 /*
668  * This is called to free any blocks beyond eof. The caller must hold
669  * IOLOCK_EXCL unless we are in the inode reclaim path and have the only
670  * reference to the inode.
671  */
672 int
xfs_free_eofblocks(struct xfs_inode * ip)673 xfs_free_eofblocks(
674 	struct xfs_inode	*ip)
675 {
676 	struct xfs_trans	*tp;
677 	struct xfs_mount	*mp = ip->i_mount;
678 	int			error;
679 
680 	/* Attach the dquots to the inode up front. */
681 	error = xfs_qm_dqattach(ip);
682 	if (error)
683 		return error;
684 
685 	/* Wait on dio to ensure i_size has settled. */
686 	inode_dio_wait(VFS_I(ip));
687 
688 	error = xfs_trans_alloc(mp, &M_RES(mp)->tr_itruncate, 0, 0, 0, &tp);
689 	if (error) {
690 		ASSERT(XFS_FORCED_SHUTDOWN(mp));
691 		return error;
692 	}
693 
694 	xfs_ilock(ip, XFS_ILOCK_EXCL);
695 	xfs_trans_ijoin(tp, ip, 0);
696 
697 	/*
698 	 * Do not update the on-disk file size.  If we update the on-disk file
699 	 * size and then the system crashes before the contents of the file are
700 	 * flushed to disk then the files may be full of holes (ie NULL files
701 	 * bug).
702 	 */
703 	error = xfs_itruncate_extents_flags(&tp, ip, XFS_DATA_FORK,
704 				XFS_ISIZE(ip), XFS_BMAPI_NODISCARD);
705 	if (error)
706 		goto err_cancel;
707 
708 	error = xfs_trans_commit(tp);
709 	if (error)
710 		goto out_unlock;
711 
712 	xfs_inode_clear_eofblocks_tag(ip);
713 	goto out_unlock;
714 
715 err_cancel:
716 	/*
717 	 * If we get an error at this point we simply don't
718 	 * bother truncating the file.
719 	 */
720 	xfs_trans_cancel(tp);
721 out_unlock:
722 	xfs_iunlock(ip, XFS_ILOCK_EXCL);
723 	return error;
724 }
725 
726 int
xfs_alloc_file_space(struct xfs_inode * ip,xfs_off_t offset,xfs_off_t len,int alloc_type)727 xfs_alloc_file_space(
728 	struct xfs_inode	*ip,
729 	xfs_off_t		offset,
730 	xfs_off_t		len,
731 	int			alloc_type)
732 {
733 	xfs_mount_t		*mp = ip->i_mount;
734 	xfs_off_t		count;
735 	xfs_filblks_t		allocated_fsb;
736 	xfs_filblks_t		allocatesize_fsb;
737 	xfs_extlen_t		extsz, temp;
738 	xfs_fileoff_t		startoffset_fsb;
739 	xfs_fileoff_t		endoffset_fsb;
740 	int			nimaps;
741 	int			rt;
742 	xfs_trans_t		*tp;
743 	xfs_bmbt_irec_t		imaps[1], *imapp;
744 	int			error;
745 
746 	trace_xfs_alloc_file_space(ip);
747 
748 	if (XFS_FORCED_SHUTDOWN(mp))
749 		return -EIO;
750 
751 	error = xfs_qm_dqattach(ip);
752 	if (error)
753 		return error;
754 
755 	if (len <= 0)
756 		return -EINVAL;
757 
758 	rt = XFS_IS_REALTIME_INODE(ip);
759 	extsz = xfs_get_extsz_hint(ip);
760 
761 	count = len;
762 	imapp = &imaps[0];
763 	nimaps = 1;
764 	startoffset_fsb	= XFS_B_TO_FSBT(mp, offset);
765 	endoffset_fsb = XFS_B_TO_FSB(mp, offset + count);
766 	allocatesize_fsb = endoffset_fsb - startoffset_fsb;
767 
768 	/*
769 	 * Allocate file space until done or until there is an error
770 	 */
771 	while (allocatesize_fsb && !error) {
772 		xfs_fileoff_t	s, e;
773 		unsigned int	dblocks, rblocks, resblks;
774 
775 		/*
776 		 * Determine space reservations for data/realtime.
777 		 */
778 		if (unlikely(extsz)) {
779 			s = startoffset_fsb;
780 			do_div(s, extsz);
781 			s *= extsz;
782 			e = startoffset_fsb + allocatesize_fsb;
783 			div_u64_rem(startoffset_fsb, extsz, &temp);
784 			if (temp)
785 				e += temp;
786 			div_u64_rem(e, extsz, &temp);
787 			if (temp)
788 				e += extsz - temp;
789 		} else {
790 			s = 0;
791 			e = allocatesize_fsb;
792 		}
793 
794 		/*
795 		 * The transaction reservation is limited to a 32-bit block
796 		 * count, hence we need to limit the number of blocks we are
797 		 * trying to reserve to avoid an overflow. We can't allocate
798 		 * more than @nimaps extents, and an extent is limited on disk
799 		 * to MAXEXTLEN (21 bits), so use that to enforce the limit.
800 		 */
801 		resblks = min_t(xfs_fileoff_t, (e - s), (MAXEXTLEN * nimaps));
802 		if (unlikely(rt)) {
803 			dblocks = XFS_DIOSTRAT_SPACE_RES(mp, 0);
804 			rblocks = resblks;
805 		} else {
806 			dblocks = XFS_DIOSTRAT_SPACE_RES(mp, resblks);
807 			rblocks = 0;
808 		}
809 
810 		/*
811 		 * Allocate and setup the transaction.
812 		 */
813 		error = xfs_trans_alloc_inode(ip, &M_RES(mp)->tr_write,
814 				dblocks, rblocks, false, &tp);
815 		if (error)
816 			break;
817 
818 		error = xfs_iext_count_may_overflow(ip, XFS_DATA_FORK,
819 				XFS_IEXT_ADD_NOSPLIT_CNT);
820 		if (error)
821 			goto error;
822 
823 		error = xfs_bmapi_write(tp, ip, startoffset_fsb,
824 					allocatesize_fsb, alloc_type, 0, imapp,
825 					&nimaps);
826 		if (error)
827 			goto error;
828 
829 		/*
830 		 * Complete the transaction
831 		 */
832 		error = xfs_trans_commit(tp);
833 		xfs_iunlock(ip, XFS_ILOCK_EXCL);
834 		if (error)
835 			break;
836 
837 		allocated_fsb = imapp->br_blockcount;
838 
839 		if (nimaps == 0) {
840 			error = -ENOSPC;
841 			break;
842 		}
843 
844 		startoffset_fsb += allocated_fsb;
845 		allocatesize_fsb -= allocated_fsb;
846 	}
847 
848 	return error;
849 
850 error:
851 	xfs_trans_cancel(tp);
852 	xfs_iunlock(ip, XFS_ILOCK_EXCL);
853 	return error;
854 }
855 
856 static int
xfs_unmap_extent(struct xfs_inode * ip,xfs_fileoff_t startoffset_fsb,xfs_filblks_t len_fsb,int * done)857 xfs_unmap_extent(
858 	struct xfs_inode	*ip,
859 	xfs_fileoff_t		startoffset_fsb,
860 	xfs_filblks_t		len_fsb,
861 	int			*done)
862 {
863 	struct xfs_mount	*mp = ip->i_mount;
864 	struct xfs_trans	*tp;
865 	uint			resblks = XFS_DIOSTRAT_SPACE_RES(mp, 0);
866 	int			error;
867 
868 	error = xfs_trans_alloc_inode(ip, &M_RES(mp)->tr_write, resblks, 0,
869 			false, &tp);
870 	if (error)
871 		return error;
872 
873 	error = xfs_iext_count_may_overflow(ip, XFS_DATA_FORK,
874 			XFS_IEXT_PUNCH_HOLE_CNT);
875 	if (error)
876 		goto out_trans_cancel;
877 
878 	error = xfs_bunmapi(tp, ip, startoffset_fsb, len_fsb, 0, 2, done);
879 	if (error)
880 		goto out_trans_cancel;
881 
882 	error = xfs_trans_commit(tp);
883 out_unlock:
884 	xfs_iunlock(ip, XFS_ILOCK_EXCL);
885 	return error;
886 
887 out_trans_cancel:
888 	xfs_trans_cancel(tp);
889 	goto out_unlock;
890 }
891 
892 /* Caller must first wait for the completion of any pending DIOs if required. */
893 int
xfs_flush_unmap_range(struct xfs_inode * ip,xfs_off_t offset,xfs_off_t len)894 xfs_flush_unmap_range(
895 	struct xfs_inode	*ip,
896 	xfs_off_t		offset,
897 	xfs_off_t		len)
898 {
899 	struct xfs_mount	*mp = ip->i_mount;
900 	struct inode		*inode = VFS_I(ip);
901 	xfs_off_t		rounding, start, end;
902 	int			error;
903 
904 	rounding = max_t(xfs_off_t, 1 << mp->m_sb.sb_blocklog, PAGE_SIZE);
905 	start = round_down(offset, rounding);
906 	end = round_up(offset + len, rounding) - 1;
907 
908 	error = filemap_write_and_wait_range(inode->i_mapping, start, end);
909 	if (error)
910 		return error;
911 	truncate_pagecache_range(inode, start, end);
912 	return 0;
913 }
914 
915 int
xfs_free_file_space(struct xfs_inode * ip,xfs_off_t offset,xfs_off_t len)916 xfs_free_file_space(
917 	struct xfs_inode	*ip,
918 	xfs_off_t		offset,
919 	xfs_off_t		len)
920 {
921 	struct xfs_mount	*mp = ip->i_mount;
922 	xfs_fileoff_t		startoffset_fsb;
923 	xfs_fileoff_t		endoffset_fsb;
924 	int			done = 0, error;
925 
926 	trace_xfs_free_file_space(ip);
927 
928 	error = xfs_qm_dqattach(ip);
929 	if (error)
930 		return error;
931 
932 	if (len <= 0)	/* if nothing being freed */
933 		return 0;
934 
935 	startoffset_fsb = XFS_B_TO_FSB(mp, offset);
936 	endoffset_fsb = XFS_B_TO_FSBT(mp, offset + len);
937 
938 	/* We can only free complete realtime extents. */
939 	if (XFS_IS_REALTIME_INODE(ip) && mp->m_sb.sb_rextsize > 1) {
940 		startoffset_fsb = roundup_64(startoffset_fsb,
941 					     mp->m_sb.sb_rextsize);
942 		endoffset_fsb = rounddown_64(endoffset_fsb,
943 					     mp->m_sb.sb_rextsize);
944 	}
945 
946 	/*
947 	 * Need to zero the stuff we're not freeing, on disk.
948 	 */
949 	if (endoffset_fsb > startoffset_fsb) {
950 		while (!done) {
951 			error = xfs_unmap_extent(ip, startoffset_fsb,
952 					endoffset_fsb - startoffset_fsb, &done);
953 			if (error)
954 				return error;
955 		}
956 	}
957 
958 	/*
959 	 * Now that we've unmap all full blocks we'll have to zero out any
960 	 * partial block at the beginning and/or end.  iomap_zero_range is smart
961 	 * enough to skip any holes, including those we just created, but we
962 	 * must take care not to zero beyond EOF and enlarge i_size.
963 	 */
964 	if (offset >= XFS_ISIZE(ip))
965 		return 0;
966 	if (offset + len > XFS_ISIZE(ip))
967 		len = XFS_ISIZE(ip) - offset;
968 	error = iomap_zero_range(VFS_I(ip), offset, len, NULL,
969 			&xfs_buffered_write_iomap_ops);
970 	if (error)
971 		return error;
972 
973 	/*
974 	 * If we zeroed right up to EOF and EOF straddles a page boundary we
975 	 * must make sure that the post-EOF area is also zeroed because the
976 	 * page could be mmap'd and iomap_zero_range doesn't do that for us.
977 	 * Writeback of the eof page will do this, albeit clumsily.
978 	 */
979 	if (offset + len >= XFS_ISIZE(ip) && offset_in_page(offset + len) > 0) {
980 		error = filemap_write_and_wait_range(VFS_I(ip)->i_mapping,
981 				round_down(offset + len, PAGE_SIZE), LLONG_MAX);
982 	}
983 
984 	return error;
985 }
986 
987 static int
xfs_prepare_shift(struct xfs_inode * ip,loff_t offset)988 xfs_prepare_shift(
989 	struct xfs_inode	*ip,
990 	loff_t			offset)
991 {
992 	struct xfs_mount	*mp = ip->i_mount;
993 	int			error;
994 
995 	/*
996 	 * Trim eofblocks to avoid shifting uninitialized post-eof preallocation
997 	 * into the accessible region of the file.
998 	 */
999 	if (xfs_can_free_eofblocks(ip, true)) {
1000 		error = xfs_free_eofblocks(ip);
1001 		if (error)
1002 			return error;
1003 	}
1004 
1005 	/*
1006 	 * Shift operations must stabilize the start block offset boundary along
1007 	 * with the full range of the operation. If we don't, a COW writeback
1008 	 * completion could race with an insert, front merge with the start
1009 	 * extent (after split) during the shift and corrupt the file. Start
1010 	 * with the block just prior to the start to stabilize the boundary.
1011 	 */
1012 	offset = round_down(offset, 1 << mp->m_sb.sb_blocklog);
1013 	if (offset)
1014 		offset -= (1 << mp->m_sb.sb_blocklog);
1015 
1016 	/*
1017 	 * Writeback and invalidate cache for the remainder of the file as we're
1018 	 * about to shift down every extent from offset to EOF.
1019 	 */
1020 	error = xfs_flush_unmap_range(ip, offset, XFS_ISIZE(ip));
1021 	if (error)
1022 		return error;
1023 
1024 	/*
1025 	 * Clean out anything hanging around in the cow fork now that
1026 	 * we've flushed all the dirty data out to disk to avoid having
1027 	 * CoW extents at the wrong offsets.
1028 	 */
1029 	if (xfs_inode_has_cow_data(ip)) {
1030 		error = xfs_reflink_cancel_cow_range(ip, offset, NULLFILEOFF,
1031 				true);
1032 		if (error)
1033 			return error;
1034 	}
1035 
1036 	return 0;
1037 }
1038 
1039 /*
1040  * xfs_collapse_file_space()
1041  *	This routine frees disk space and shift extent for the given file.
1042  *	The first thing we do is to free data blocks in the specified range
1043  *	by calling xfs_free_file_space(). It would also sync dirty data
1044  *	and invalidate page cache over the region on which collapse range
1045  *	is working. And Shift extent records to the left to cover a hole.
1046  * RETURNS:
1047  *	0 on success
1048  *	errno on error
1049  *
1050  */
1051 int
xfs_collapse_file_space(struct xfs_inode * ip,xfs_off_t offset,xfs_off_t len)1052 xfs_collapse_file_space(
1053 	struct xfs_inode	*ip,
1054 	xfs_off_t		offset,
1055 	xfs_off_t		len)
1056 {
1057 	struct xfs_mount	*mp = ip->i_mount;
1058 	struct xfs_trans	*tp;
1059 	int			error;
1060 	xfs_fileoff_t		next_fsb = XFS_B_TO_FSB(mp, offset + len);
1061 	xfs_fileoff_t		shift_fsb = XFS_B_TO_FSB(mp, len);
1062 	bool			done = false;
1063 
1064 	ASSERT(xfs_isilocked(ip, XFS_IOLOCK_EXCL));
1065 	ASSERT(xfs_isilocked(ip, XFS_MMAPLOCK_EXCL));
1066 
1067 	trace_xfs_collapse_file_space(ip);
1068 
1069 	error = xfs_free_file_space(ip, offset, len);
1070 	if (error)
1071 		return error;
1072 
1073 	error = xfs_prepare_shift(ip, offset);
1074 	if (error)
1075 		return error;
1076 
1077 	error = xfs_trans_alloc(mp, &M_RES(mp)->tr_write, 0, 0, 0, &tp);
1078 	if (error)
1079 		return error;
1080 
1081 	xfs_ilock(ip, XFS_ILOCK_EXCL);
1082 	xfs_trans_ijoin(tp, ip, 0);
1083 
1084 	while (!done) {
1085 		error = xfs_bmap_collapse_extents(tp, ip, &next_fsb, shift_fsb,
1086 				&done);
1087 		if (error)
1088 			goto out_trans_cancel;
1089 		if (done)
1090 			break;
1091 
1092 		/* finish any deferred frees and roll the transaction */
1093 		error = xfs_defer_finish(&tp);
1094 		if (error)
1095 			goto out_trans_cancel;
1096 	}
1097 
1098 	error = xfs_trans_commit(tp);
1099 	xfs_iunlock(ip, XFS_ILOCK_EXCL);
1100 	return error;
1101 
1102 out_trans_cancel:
1103 	xfs_trans_cancel(tp);
1104 	xfs_iunlock(ip, XFS_ILOCK_EXCL);
1105 	return error;
1106 }
1107 
1108 /*
1109  * xfs_insert_file_space()
1110  *	This routine create hole space by shifting extents for the given file.
1111  *	The first thing we do is to sync dirty data and invalidate page cache
1112  *	over the region on which insert range is working. And split an extent
1113  *	to two extents at given offset by calling xfs_bmap_split_extent.
1114  *	And shift all extent records which are laying between [offset,
1115  *	last allocated extent] to the right to reserve hole range.
1116  * RETURNS:
1117  *	0 on success
1118  *	errno on error
1119  */
1120 int
xfs_insert_file_space(struct xfs_inode * ip,loff_t offset,loff_t len)1121 xfs_insert_file_space(
1122 	struct xfs_inode	*ip,
1123 	loff_t			offset,
1124 	loff_t			len)
1125 {
1126 	struct xfs_mount	*mp = ip->i_mount;
1127 	struct xfs_trans	*tp;
1128 	int			error;
1129 	xfs_fileoff_t		stop_fsb = XFS_B_TO_FSB(mp, offset);
1130 	xfs_fileoff_t		next_fsb = NULLFSBLOCK;
1131 	xfs_fileoff_t		shift_fsb = XFS_B_TO_FSB(mp, len);
1132 	bool			done = false;
1133 
1134 	ASSERT(xfs_isilocked(ip, XFS_IOLOCK_EXCL));
1135 	ASSERT(xfs_isilocked(ip, XFS_MMAPLOCK_EXCL));
1136 
1137 	trace_xfs_insert_file_space(ip);
1138 
1139 	error = xfs_bmap_can_insert_extents(ip, stop_fsb, shift_fsb);
1140 	if (error)
1141 		return error;
1142 
1143 	error = xfs_prepare_shift(ip, offset);
1144 	if (error)
1145 		return error;
1146 
1147 	error = xfs_trans_alloc(mp, &M_RES(mp)->tr_write,
1148 			XFS_DIOSTRAT_SPACE_RES(mp, 0), 0, 0, &tp);
1149 	if (error)
1150 		return error;
1151 
1152 	xfs_ilock(ip, XFS_ILOCK_EXCL);
1153 	xfs_trans_ijoin(tp, ip, 0);
1154 
1155 	error = xfs_iext_count_may_overflow(ip, XFS_DATA_FORK,
1156 			XFS_IEXT_PUNCH_HOLE_CNT);
1157 	if (error)
1158 		goto out_trans_cancel;
1159 
1160 	/*
1161 	 * The extent shifting code works on extent granularity. So, if stop_fsb
1162 	 * is not the starting block of extent, we need to split the extent at
1163 	 * stop_fsb.
1164 	 */
1165 	error = xfs_bmap_split_extent(tp, ip, stop_fsb);
1166 	if (error)
1167 		goto out_trans_cancel;
1168 
1169 	do {
1170 		error = xfs_defer_finish(&tp);
1171 		if (error)
1172 			goto out_trans_cancel;
1173 
1174 		error = xfs_bmap_insert_extents(tp, ip, &next_fsb, shift_fsb,
1175 				&done, stop_fsb);
1176 		if (error)
1177 			goto out_trans_cancel;
1178 	} while (!done);
1179 
1180 	error = xfs_trans_commit(tp);
1181 	xfs_iunlock(ip, XFS_ILOCK_EXCL);
1182 	return error;
1183 
1184 out_trans_cancel:
1185 	xfs_trans_cancel(tp);
1186 	xfs_iunlock(ip, XFS_ILOCK_EXCL);
1187 	return error;
1188 }
1189 
1190 /*
1191  * We need to check that the format of the data fork in the temporary inode is
1192  * valid for the target inode before doing the swap. This is not a problem with
1193  * attr1 because of the fixed fork offset, but attr2 has a dynamically sized
1194  * data fork depending on the space the attribute fork is taking so we can get
1195  * invalid formats on the target inode.
1196  *
1197  * E.g. target has space for 7 extents in extent format, temp inode only has
1198  * space for 6.  If we defragment down to 7 extents, then the tmp format is a
1199  * btree, but when swapped it needs to be in extent format. Hence we can't just
1200  * blindly swap data forks on attr2 filesystems.
1201  *
1202  * Note that we check the swap in both directions so that we don't end up with
1203  * a corrupt temporary inode, either.
1204  *
1205  * Note that fixing the way xfs_fsr sets up the attribute fork in the source
1206  * inode will prevent this situation from occurring, so all we do here is
1207  * reject and log the attempt. basically we are putting the responsibility on
1208  * userspace to get this right.
1209  */
1210 static int
xfs_swap_extents_check_format(struct xfs_inode * ip,struct xfs_inode * tip)1211 xfs_swap_extents_check_format(
1212 	struct xfs_inode	*ip,	/* target inode */
1213 	struct xfs_inode	*tip)	/* tmp inode */
1214 {
1215 	struct xfs_ifork	*ifp = &ip->i_df;
1216 	struct xfs_ifork	*tifp = &tip->i_df;
1217 
1218 	/* User/group/project quota ids must match if quotas are enforced. */
1219 	if (XFS_IS_QUOTA_ON(ip->i_mount) &&
1220 	    (!uid_eq(VFS_I(ip)->i_uid, VFS_I(tip)->i_uid) ||
1221 	     !gid_eq(VFS_I(ip)->i_gid, VFS_I(tip)->i_gid) ||
1222 	     ip->i_projid != tip->i_projid))
1223 		return -EINVAL;
1224 
1225 	/* Should never get a local format */
1226 	if (ifp->if_format == XFS_DINODE_FMT_LOCAL ||
1227 	    tifp->if_format == XFS_DINODE_FMT_LOCAL)
1228 		return -EINVAL;
1229 
1230 	/*
1231 	 * if the target inode has less extents that then temporary inode then
1232 	 * why did userspace call us?
1233 	 */
1234 	if (ifp->if_nextents < tifp->if_nextents)
1235 		return -EINVAL;
1236 
1237 	/*
1238 	 * If we have to use the (expensive) rmap swap method, we can
1239 	 * handle any number of extents and any format.
1240 	 */
1241 	if (xfs_sb_version_hasrmapbt(&ip->i_mount->m_sb))
1242 		return 0;
1243 
1244 	/*
1245 	 * if the target inode is in extent form and the temp inode is in btree
1246 	 * form then we will end up with the target inode in the wrong format
1247 	 * as we already know there are less extents in the temp inode.
1248 	 */
1249 	if (ifp->if_format == XFS_DINODE_FMT_EXTENTS &&
1250 	    tifp->if_format == XFS_DINODE_FMT_BTREE)
1251 		return -EINVAL;
1252 
1253 	/* Check temp in extent form to max in target */
1254 	if (tifp->if_format == XFS_DINODE_FMT_EXTENTS &&
1255 	    tifp->if_nextents > XFS_IFORK_MAXEXT(ip, XFS_DATA_FORK))
1256 		return -EINVAL;
1257 
1258 	/* Check target in extent form to max in temp */
1259 	if (ifp->if_format == XFS_DINODE_FMT_EXTENTS &&
1260 	    ifp->if_nextents > XFS_IFORK_MAXEXT(tip, XFS_DATA_FORK))
1261 		return -EINVAL;
1262 
1263 	/*
1264 	 * If we are in a btree format, check that the temp root block will fit
1265 	 * in the target and that it has enough extents to be in btree format
1266 	 * in the target.
1267 	 *
1268 	 * Note that we have to be careful to allow btree->extent conversions
1269 	 * (a common defrag case) which will occur when the temp inode is in
1270 	 * extent format...
1271 	 */
1272 	if (tifp->if_format == XFS_DINODE_FMT_BTREE) {
1273 		if (XFS_IFORK_Q(ip) &&
1274 		    XFS_BMAP_BMDR_SPACE(tifp->if_broot) > XFS_IFORK_BOFF(ip))
1275 			return -EINVAL;
1276 		if (tifp->if_nextents <= XFS_IFORK_MAXEXT(ip, XFS_DATA_FORK))
1277 			return -EINVAL;
1278 	}
1279 
1280 	/* Reciprocal target->temp btree format checks */
1281 	if (ifp->if_format == XFS_DINODE_FMT_BTREE) {
1282 		if (XFS_IFORK_Q(tip) &&
1283 		    XFS_BMAP_BMDR_SPACE(ip->i_df.if_broot) > XFS_IFORK_BOFF(tip))
1284 			return -EINVAL;
1285 		if (ifp->if_nextents <= XFS_IFORK_MAXEXT(tip, XFS_DATA_FORK))
1286 			return -EINVAL;
1287 	}
1288 
1289 	return 0;
1290 }
1291 
1292 static int
xfs_swap_extent_flush(struct xfs_inode * ip)1293 xfs_swap_extent_flush(
1294 	struct xfs_inode	*ip)
1295 {
1296 	int	error;
1297 
1298 	error = filemap_write_and_wait(VFS_I(ip)->i_mapping);
1299 	if (error)
1300 		return error;
1301 	truncate_pagecache_range(VFS_I(ip), 0, -1);
1302 
1303 	/* Verify O_DIRECT for ftmp */
1304 	if (VFS_I(ip)->i_mapping->nrpages)
1305 		return -EINVAL;
1306 	return 0;
1307 }
1308 
1309 /*
1310  * Move extents from one file to another, when rmap is enabled.
1311  */
1312 STATIC int
xfs_swap_extent_rmap(struct xfs_trans ** tpp,struct xfs_inode * ip,struct xfs_inode * tip)1313 xfs_swap_extent_rmap(
1314 	struct xfs_trans		**tpp,
1315 	struct xfs_inode		*ip,
1316 	struct xfs_inode		*tip)
1317 {
1318 	struct xfs_trans		*tp = *tpp;
1319 	struct xfs_bmbt_irec		irec;
1320 	struct xfs_bmbt_irec		uirec;
1321 	struct xfs_bmbt_irec		tirec;
1322 	xfs_fileoff_t			offset_fsb;
1323 	xfs_fileoff_t			end_fsb;
1324 	xfs_filblks_t			count_fsb;
1325 	int				error;
1326 	xfs_filblks_t			ilen;
1327 	xfs_filblks_t			rlen;
1328 	int				nimaps;
1329 	uint64_t			tip_flags2;
1330 
1331 	/*
1332 	 * If the source file has shared blocks, we must flag the donor
1333 	 * file as having shared blocks so that we get the shared-block
1334 	 * rmap functions when we go to fix up the rmaps.  The flags
1335 	 * will be switch for reals later.
1336 	 */
1337 	tip_flags2 = tip->i_diflags2;
1338 	if (ip->i_diflags2 & XFS_DIFLAG2_REFLINK)
1339 		tip->i_diflags2 |= XFS_DIFLAG2_REFLINK;
1340 
1341 	offset_fsb = 0;
1342 	end_fsb = XFS_B_TO_FSB(ip->i_mount, i_size_read(VFS_I(ip)));
1343 	count_fsb = (xfs_filblks_t)(end_fsb - offset_fsb);
1344 
1345 	while (count_fsb) {
1346 		/* Read extent from the donor file */
1347 		nimaps = 1;
1348 		error = xfs_bmapi_read(tip, offset_fsb, count_fsb, &tirec,
1349 				&nimaps, 0);
1350 		if (error)
1351 			goto out;
1352 		ASSERT(nimaps == 1);
1353 		ASSERT(tirec.br_startblock != DELAYSTARTBLOCK);
1354 
1355 		trace_xfs_swap_extent_rmap_remap(tip, &tirec);
1356 		ilen = tirec.br_blockcount;
1357 
1358 		/* Unmap the old blocks in the source file. */
1359 		while (tirec.br_blockcount) {
1360 			ASSERT(tp->t_firstblock == NULLFSBLOCK);
1361 			trace_xfs_swap_extent_rmap_remap_piece(tip, &tirec);
1362 
1363 			/* Read extent from the source file */
1364 			nimaps = 1;
1365 			error = xfs_bmapi_read(ip, tirec.br_startoff,
1366 					tirec.br_blockcount, &irec,
1367 					&nimaps, 0);
1368 			if (error)
1369 				goto out;
1370 			ASSERT(nimaps == 1);
1371 			ASSERT(tirec.br_startoff == irec.br_startoff);
1372 			trace_xfs_swap_extent_rmap_remap_piece(ip, &irec);
1373 
1374 			/* Trim the extent. */
1375 			uirec = tirec;
1376 			uirec.br_blockcount = rlen = min_t(xfs_filblks_t,
1377 					tirec.br_blockcount,
1378 					irec.br_blockcount);
1379 			trace_xfs_swap_extent_rmap_remap_piece(tip, &uirec);
1380 
1381 			if (xfs_bmap_is_real_extent(&uirec)) {
1382 				error = xfs_iext_count_may_overflow(ip,
1383 						XFS_DATA_FORK,
1384 						XFS_IEXT_SWAP_RMAP_CNT);
1385 				if (error)
1386 					goto out;
1387 			}
1388 
1389 			if (xfs_bmap_is_real_extent(&irec)) {
1390 				error = xfs_iext_count_may_overflow(tip,
1391 						XFS_DATA_FORK,
1392 						XFS_IEXT_SWAP_RMAP_CNT);
1393 				if (error)
1394 					goto out;
1395 			}
1396 
1397 			/* Remove the mapping from the donor file. */
1398 			xfs_bmap_unmap_extent(tp, tip, &uirec);
1399 
1400 			/* Remove the mapping from the source file. */
1401 			xfs_bmap_unmap_extent(tp, ip, &irec);
1402 
1403 			/* Map the donor file's blocks into the source file. */
1404 			xfs_bmap_map_extent(tp, ip, &uirec);
1405 
1406 			/* Map the source file's blocks into the donor file. */
1407 			xfs_bmap_map_extent(tp, tip, &irec);
1408 
1409 			error = xfs_defer_finish(tpp);
1410 			tp = *tpp;
1411 			if (error)
1412 				goto out;
1413 
1414 			tirec.br_startoff += rlen;
1415 			if (tirec.br_startblock != HOLESTARTBLOCK &&
1416 			    tirec.br_startblock != DELAYSTARTBLOCK)
1417 				tirec.br_startblock += rlen;
1418 			tirec.br_blockcount -= rlen;
1419 		}
1420 
1421 		/* Roll on... */
1422 		count_fsb -= ilen;
1423 		offset_fsb += ilen;
1424 	}
1425 
1426 	tip->i_diflags2 = tip_flags2;
1427 	return 0;
1428 
1429 out:
1430 	trace_xfs_swap_extent_rmap_error(ip, error, _RET_IP_);
1431 	tip->i_diflags2 = tip_flags2;
1432 	return error;
1433 }
1434 
1435 /* Swap the extents of two files by swapping data forks. */
1436 STATIC int
xfs_swap_extent_forks(struct xfs_trans * tp,struct xfs_inode * ip,struct xfs_inode * tip,int * src_log_flags,int * target_log_flags)1437 xfs_swap_extent_forks(
1438 	struct xfs_trans	*tp,
1439 	struct xfs_inode	*ip,
1440 	struct xfs_inode	*tip,
1441 	int			*src_log_flags,
1442 	int			*target_log_flags)
1443 {
1444 	xfs_filblks_t		aforkblks = 0;
1445 	xfs_filblks_t		taforkblks = 0;
1446 	xfs_extnum_t		junk;
1447 	uint64_t		tmp;
1448 	int			error;
1449 
1450 	/*
1451 	 * Count the number of extended attribute blocks
1452 	 */
1453 	if (XFS_IFORK_Q(ip) && ip->i_afp->if_nextents > 0 &&
1454 	    ip->i_afp->if_format != XFS_DINODE_FMT_LOCAL) {
1455 		error = xfs_bmap_count_blocks(tp, ip, XFS_ATTR_FORK, &junk,
1456 				&aforkblks);
1457 		if (error)
1458 			return error;
1459 	}
1460 	if (XFS_IFORK_Q(tip) && tip->i_afp->if_nextents > 0 &&
1461 	    tip->i_afp->if_format != XFS_DINODE_FMT_LOCAL) {
1462 		error = xfs_bmap_count_blocks(tp, tip, XFS_ATTR_FORK, &junk,
1463 				&taforkblks);
1464 		if (error)
1465 			return error;
1466 	}
1467 
1468 	/*
1469 	 * Btree format (v3) inodes have the inode number stamped in the bmbt
1470 	 * block headers. We can't start changing the bmbt blocks until the
1471 	 * inode owner change is logged so recovery does the right thing in the
1472 	 * event of a crash. Set the owner change log flags now and leave the
1473 	 * bmbt scan as the last step.
1474 	 */
1475 	if (xfs_sb_version_has_v3inode(&ip->i_mount->m_sb)) {
1476 		if (ip->i_df.if_format == XFS_DINODE_FMT_BTREE)
1477 			(*target_log_flags) |= XFS_ILOG_DOWNER;
1478 		if (tip->i_df.if_format == XFS_DINODE_FMT_BTREE)
1479 			(*src_log_flags) |= XFS_ILOG_DOWNER;
1480 	}
1481 
1482 	/*
1483 	 * Swap the data forks of the inodes
1484 	 */
1485 	swap(ip->i_df, tip->i_df);
1486 
1487 	/*
1488 	 * Fix the on-disk inode values
1489 	 */
1490 	tmp = (uint64_t)ip->i_nblocks;
1491 	ip->i_nblocks = tip->i_nblocks - taforkblks + aforkblks;
1492 	tip->i_nblocks = tmp + taforkblks - aforkblks;
1493 
1494 	/*
1495 	 * The extents in the source inode could still contain speculative
1496 	 * preallocation beyond EOF (e.g. the file is open but not modified
1497 	 * while defrag is in progress). In that case, we need to copy over the
1498 	 * number of delalloc blocks the data fork in the source inode is
1499 	 * tracking beyond EOF so that when the fork is truncated away when the
1500 	 * temporary inode is unlinked we don't underrun the i_delayed_blks
1501 	 * counter on that inode.
1502 	 */
1503 	ASSERT(tip->i_delayed_blks == 0);
1504 	tip->i_delayed_blks = ip->i_delayed_blks;
1505 	ip->i_delayed_blks = 0;
1506 
1507 	switch (ip->i_df.if_format) {
1508 	case XFS_DINODE_FMT_EXTENTS:
1509 		(*src_log_flags) |= XFS_ILOG_DEXT;
1510 		break;
1511 	case XFS_DINODE_FMT_BTREE:
1512 		ASSERT(!xfs_sb_version_has_v3inode(&ip->i_mount->m_sb) ||
1513 		       (*src_log_flags & XFS_ILOG_DOWNER));
1514 		(*src_log_flags) |= XFS_ILOG_DBROOT;
1515 		break;
1516 	}
1517 
1518 	switch (tip->i_df.if_format) {
1519 	case XFS_DINODE_FMT_EXTENTS:
1520 		(*target_log_flags) |= XFS_ILOG_DEXT;
1521 		break;
1522 	case XFS_DINODE_FMT_BTREE:
1523 		(*target_log_flags) |= XFS_ILOG_DBROOT;
1524 		ASSERT(!xfs_sb_version_has_v3inode(&ip->i_mount->m_sb) ||
1525 		       (*target_log_flags & XFS_ILOG_DOWNER));
1526 		break;
1527 	}
1528 
1529 	return 0;
1530 }
1531 
1532 /*
1533  * Fix up the owners of the bmbt blocks to refer to the current inode. The
1534  * change owner scan attempts to order all modified buffers in the current
1535  * transaction. In the event of ordered buffer failure, the offending buffer is
1536  * physically logged as a fallback and the scan returns -EAGAIN. We must roll
1537  * the transaction in this case to replenish the fallback log reservation and
1538  * restart the scan. This process repeats until the scan completes.
1539  */
1540 static int
xfs_swap_change_owner(struct xfs_trans ** tpp,struct xfs_inode * ip,struct xfs_inode * tmpip)1541 xfs_swap_change_owner(
1542 	struct xfs_trans	**tpp,
1543 	struct xfs_inode	*ip,
1544 	struct xfs_inode	*tmpip)
1545 {
1546 	int			error;
1547 	struct xfs_trans	*tp = *tpp;
1548 
1549 	do {
1550 		error = xfs_bmbt_change_owner(tp, ip, XFS_DATA_FORK, ip->i_ino,
1551 					      NULL);
1552 		/* success or fatal error */
1553 		if (error != -EAGAIN)
1554 			break;
1555 
1556 		error = xfs_trans_roll(tpp);
1557 		if (error)
1558 			break;
1559 		tp = *tpp;
1560 
1561 		/*
1562 		 * Redirty both inodes so they can relog and keep the log tail
1563 		 * moving forward.
1564 		 */
1565 		xfs_trans_ijoin(tp, ip, 0);
1566 		xfs_trans_ijoin(tp, tmpip, 0);
1567 		xfs_trans_log_inode(tp, ip, XFS_ILOG_CORE);
1568 		xfs_trans_log_inode(tp, tmpip, XFS_ILOG_CORE);
1569 	} while (true);
1570 
1571 	return error;
1572 }
1573 
1574 int
xfs_swap_extents(struct xfs_inode * ip,struct xfs_inode * tip,struct xfs_swapext * sxp)1575 xfs_swap_extents(
1576 	struct xfs_inode	*ip,	/* target inode */
1577 	struct xfs_inode	*tip,	/* tmp inode */
1578 	struct xfs_swapext	*sxp)
1579 {
1580 	struct xfs_mount	*mp = ip->i_mount;
1581 	struct xfs_trans	*tp;
1582 	struct xfs_bstat	*sbp = &sxp->sx_stat;
1583 	int			src_log_flags, target_log_flags;
1584 	int			error = 0;
1585 	int			lock_flags;
1586 	uint64_t		f;
1587 	int			resblks = 0;
1588 	unsigned int		flags = 0;
1589 
1590 	/*
1591 	 * Lock the inodes against other IO, page faults and truncate to
1592 	 * begin with.  Then we can ensure the inodes are flushed and have no
1593 	 * page cache safely. Once we have done this we can take the ilocks and
1594 	 * do the rest of the checks.
1595 	 */
1596 	lock_two_nondirectories(VFS_I(ip), VFS_I(tip));
1597 	lock_flags = XFS_MMAPLOCK_EXCL;
1598 	xfs_lock_two_inodes(ip, XFS_MMAPLOCK_EXCL, tip, XFS_MMAPLOCK_EXCL);
1599 
1600 	/* Verify that both files have the same format */
1601 	if ((VFS_I(ip)->i_mode & S_IFMT) != (VFS_I(tip)->i_mode & S_IFMT)) {
1602 		error = -EINVAL;
1603 		goto out_unlock;
1604 	}
1605 
1606 	/* Verify both files are either real-time or non-realtime */
1607 	if (XFS_IS_REALTIME_INODE(ip) != XFS_IS_REALTIME_INODE(tip)) {
1608 		error = -EINVAL;
1609 		goto out_unlock;
1610 	}
1611 
1612 	error = xfs_qm_dqattach(ip);
1613 	if (error)
1614 		goto out_unlock;
1615 
1616 	error = xfs_qm_dqattach(tip);
1617 	if (error)
1618 		goto out_unlock;
1619 
1620 	error = xfs_swap_extent_flush(ip);
1621 	if (error)
1622 		goto out_unlock;
1623 	error = xfs_swap_extent_flush(tip);
1624 	if (error)
1625 		goto out_unlock;
1626 
1627 	if (xfs_inode_has_cow_data(tip)) {
1628 		error = xfs_reflink_cancel_cow_range(tip, 0, NULLFILEOFF, true);
1629 		if (error)
1630 			goto out_unlock;
1631 	}
1632 
1633 	/*
1634 	 * Extent "swapping" with rmap requires a permanent reservation and
1635 	 * a block reservation because it's really just a remap operation
1636 	 * performed with log redo items!
1637 	 */
1638 	if (xfs_sb_version_hasrmapbt(&mp->m_sb)) {
1639 		int		w = XFS_DATA_FORK;
1640 		uint32_t	ipnext = ip->i_df.if_nextents;
1641 		uint32_t	tipnext	= tip->i_df.if_nextents;
1642 
1643 		/*
1644 		 * Conceptually this shouldn't affect the shape of either bmbt,
1645 		 * but since we atomically move extents one by one, we reserve
1646 		 * enough space to rebuild both trees.
1647 		 */
1648 		resblks = XFS_SWAP_RMAP_SPACE_RES(mp, ipnext, w);
1649 		resblks +=  XFS_SWAP_RMAP_SPACE_RES(mp, tipnext, w);
1650 
1651 		/*
1652 		 * If either inode straddles a bmapbt block allocation boundary,
1653 		 * the rmapbt algorithm triggers repeated allocs and frees as
1654 		 * extents are remapped. This can exhaust the block reservation
1655 		 * prematurely and cause shutdown. Return freed blocks to the
1656 		 * transaction reservation to counter this behavior.
1657 		 */
1658 		flags |= XFS_TRANS_RES_FDBLKS;
1659 	}
1660 	error = xfs_trans_alloc(mp, &M_RES(mp)->tr_write, resblks, 0, flags,
1661 				&tp);
1662 	if (error)
1663 		goto out_unlock;
1664 
1665 	/*
1666 	 * Lock and join the inodes to the tansaction so that transaction commit
1667 	 * or cancel will unlock the inodes from this point onwards.
1668 	 */
1669 	xfs_lock_two_inodes(ip, XFS_ILOCK_EXCL, tip, XFS_ILOCK_EXCL);
1670 	lock_flags |= XFS_ILOCK_EXCL;
1671 	xfs_trans_ijoin(tp, ip, 0);
1672 	xfs_trans_ijoin(tp, tip, 0);
1673 
1674 
1675 	/* Verify all data are being swapped */
1676 	if (sxp->sx_offset != 0 ||
1677 	    sxp->sx_length != ip->i_disk_size ||
1678 	    sxp->sx_length != tip->i_disk_size) {
1679 		error = -EFAULT;
1680 		goto out_trans_cancel;
1681 	}
1682 
1683 	trace_xfs_swap_extent_before(ip, 0);
1684 	trace_xfs_swap_extent_before(tip, 1);
1685 
1686 	/* check inode formats now that data is flushed */
1687 	error = xfs_swap_extents_check_format(ip, tip);
1688 	if (error) {
1689 		xfs_notice(mp,
1690 		    "%s: inode 0x%llx format is incompatible for exchanging.",
1691 				__func__, ip->i_ino);
1692 		goto out_trans_cancel;
1693 	}
1694 
1695 	/*
1696 	 * Compare the current change & modify times with that
1697 	 * passed in.  If they differ, we abort this swap.
1698 	 * This is the mechanism used to ensure the calling
1699 	 * process that the file was not changed out from
1700 	 * under it.
1701 	 */
1702 	if ((sbp->bs_ctime.tv_sec != VFS_I(ip)->i_ctime.tv_sec) ||
1703 	    (sbp->bs_ctime.tv_nsec != VFS_I(ip)->i_ctime.tv_nsec) ||
1704 	    (sbp->bs_mtime.tv_sec != VFS_I(ip)->i_mtime.tv_sec) ||
1705 	    (sbp->bs_mtime.tv_nsec != VFS_I(ip)->i_mtime.tv_nsec)) {
1706 		error = -EBUSY;
1707 		goto out_trans_cancel;
1708 	}
1709 
1710 	/*
1711 	 * Note the trickiness in setting the log flags - we set the owner log
1712 	 * flag on the opposite inode (i.e. the inode we are setting the new
1713 	 * owner to be) because once we swap the forks and log that, log
1714 	 * recovery is going to see the fork as owned by the swapped inode,
1715 	 * not the pre-swapped inodes.
1716 	 */
1717 	src_log_flags = XFS_ILOG_CORE;
1718 	target_log_flags = XFS_ILOG_CORE;
1719 
1720 	if (xfs_sb_version_hasrmapbt(&mp->m_sb))
1721 		error = xfs_swap_extent_rmap(&tp, ip, tip);
1722 	else
1723 		error = xfs_swap_extent_forks(tp, ip, tip, &src_log_flags,
1724 				&target_log_flags);
1725 	if (error)
1726 		goto out_trans_cancel;
1727 
1728 	/* Do we have to swap reflink flags? */
1729 	if ((ip->i_diflags2 & XFS_DIFLAG2_REFLINK) ^
1730 	    (tip->i_diflags2 & XFS_DIFLAG2_REFLINK)) {
1731 		f = ip->i_diflags2 & XFS_DIFLAG2_REFLINK;
1732 		ip->i_diflags2 &= ~XFS_DIFLAG2_REFLINK;
1733 		ip->i_diflags2 |= tip->i_diflags2 & XFS_DIFLAG2_REFLINK;
1734 		tip->i_diflags2 &= ~XFS_DIFLAG2_REFLINK;
1735 		tip->i_diflags2 |= f & XFS_DIFLAG2_REFLINK;
1736 	}
1737 
1738 	/* Swap the cow forks. */
1739 	if (xfs_sb_version_hasreflink(&mp->m_sb)) {
1740 		ASSERT(!ip->i_cowfp ||
1741 		       ip->i_cowfp->if_format == XFS_DINODE_FMT_EXTENTS);
1742 		ASSERT(!tip->i_cowfp ||
1743 		       tip->i_cowfp->if_format == XFS_DINODE_FMT_EXTENTS);
1744 
1745 		swap(ip->i_cowfp, tip->i_cowfp);
1746 
1747 		if (ip->i_cowfp && ip->i_cowfp->if_bytes)
1748 			xfs_inode_set_cowblocks_tag(ip);
1749 		else
1750 			xfs_inode_clear_cowblocks_tag(ip);
1751 		if (tip->i_cowfp && tip->i_cowfp->if_bytes)
1752 			xfs_inode_set_cowblocks_tag(tip);
1753 		else
1754 			xfs_inode_clear_cowblocks_tag(tip);
1755 	}
1756 
1757 	xfs_trans_log_inode(tp, ip,  src_log_flags);
1758 	xfs_trans_log_inode(tp, tip, target_log_flags);
1759 
1760 	/*
1761 	 * The extent forks have been swapped, but crc=1,rmapbt=0 filesystems
1762 	 * have inode number owner values in the bmbt blocks that still refer to
1763 	 * the old inode. Scan each bmbt to fix up the owner values with the
1764 	 * inode number of the current inode.
1765 	 */
1766 	if (src_log_flags & XFS_ILOG_DOWNER) {
1767 		error = xfs_swap_change_owner(&tp, ip, tip);
1768 		if (error)
1769 			goto out_trans_cancel;
1770 	}
1771 	if (target_log_flags & XFS_ILOG_DOWNER) {
1772 		error = xfs_swap_change_owner(&tp, tip, ip);
1773 		if (error)
1774 			goto out_trans_cancel;
1775 	}
1776 
1777 	/*
1778 	 * If this is a synchronous mount, make sure that the
1779 	 * transaction goes to disk before returning to the user.
1780 	 */
1781 	if (mp->m_flags & XFS_MOUNT_WSYNC)
1782 		xfs_trans_set_sync(tp);
1783 
1784 	error = xfs_trans_commit(tp);
1785 
1786 	trace_xfs_swap_extent_after(ip, 0);
1787 	trace_xfs_swap_extent_after(tip, 1);
1788 
1789 out_unlock:
1790 	xfs_iunlock(ip, lock_flags);
1791 	xfs_iunlock(tip, lock_flags);
1792 	unlock_two_nondirectories(VFS_I(ip), VFS_I(tip));
1793 	return error;
1794 
1795 out_trans_cancel:
1796 	xfs_trans_cancel(tp);
1797 	goto out_unlock;
1798 }
1799