xref: /linux/fs/xfs/libxfs/xfs_alloc.c (revision 44f57d78)
1 // SPDX-License-Identifier: GPL-2.0
2 /*
3  * Copyright (c) 2000-2002,2005 Silicon Graphics, Inc.
4  * All Rights Reserved.
5  */
6 #include "xfs.h"
7 #include "xfs_fs.h"
8 #include "xfs_format.h"
9 #include "xfs_log_format.h"
10 #include "xfs_shared.h"
11 #include "xfs_trans_resv.h"
12 #include "xfs_bit.h"
13 #include "xfs_sb.h"
14 #include "xfs_mount.h"
15 #include "xfs_defer.h"
16 #include "xfs_inode.h"
17 #include "xfs_btree.h"
18 #include "xfs_rmap.h"
19 #include "xfs_alloc_btree.h"
20 #include "xfs_alloc.h"
21 #include "xfs_extent_busy.h"
22 #include "xfs_errortag.h"
23 #include "xfs_error.h"
24 #include "xfs_cksum.h"
25 #include "xfs_trace.h"
26 #include "xfs_trans.h"
27 #include "xfs_buf_item.h"
28 #include "xfs_log.h"
29 #include "xfs_ag_resv.h"
30 #include "xfs_bmap.h"
31 
32 extern kmem_zone_t	*xfs_bmap_free_item_zone;
33 
34 struct workqueue_struct *xfs_alloc_wq;
35 
36 #define XFS_ABSDIFF(a,b)	(((a) <= (b)) ? ((b) - (a)) : ((a) - (b)))
37 
38 #define	XFSA_FIXUP_BNO_OK	1
39 #define	XFSA_FIXUP_CNT_OK	2
40 
41 STATIC int xfs_alloc_ag_vextent_exact(xfs_alloc_arg_t *);
42 STATIC int xfs_alloc_ag_vextent_near(xfs_alloc_arg_t *);
43 STATIC int xfs_alloc_ag_vextent_size(xfs_alloc_arg_t *);
44 STATIC int xfs_alloc_ag_vextent_small(xfs_alloc_arg_t *,
45 		xfs_btree_cur_t *, xfs_agblock_t *, xfs_extlen_t *, int *);
46 
47 /*
48  * Size of the AGFL.  For CRC-enabled filesystes we steal a couple of slots in
49  * the beginning of the block for a proper header with the location information
50  * and CRC.
51  */
52 unsigned int
53 xfs_agfl_size(
54 	struct xfs_mount	*mp)
55 {
56 	unsigned int		size = mp->m_sb.sb_sectsize;
57 
58 	if (xfs_sb_version_hascrc(&mp->m_sb))
59 		size -= sizeof(struct xfs_agfl);
60 
61 	return size / sizeof(xfs_agblock_t);
62 }
63 
64 unsigned int
65 xfs_refc_block(
66 	struct xfs_mount	*mp)
67 {
68 	if (xfs_sb_version_hasrmapbt(&mp->m_sb))
69 		return XFS_RMAP_BLOCK(mp) + 1;
70 	if (xfs_sb_version_hasfinobt(&mp->m_sb))
71 		return XFS_FIBT_BLOCK(mp) + 1;
72 	return XFS_IBT_BLOCK(mp) + 1;
73 }
74 
75 xfs_extlen_t
76 xfs_prealloc_blocks(
77 	struct xfs_mount	*mp)
78 {
79 	if (xfs_sb_version_hasreflink(&mp->m_sb))
80 		return xfs_refc_block(mp) + 1;
81 	if (xfs_sb_version_hasrmapbt(&mp->m_sb))
82 		return XFS_RMAP_BLOCK(mp) + 1;
83 	if (xfs_sb_version_hasfinobt(&mp->m_sb))
84 		return XFS_FIBT_BLOCK(mp) + 1;
85 	return XFS_IBT_BLOCK(mp) + 1;
86 }
87 
88 /*
89  * In order to avoid ENOSPC-related deadlock caused by out-of-order locking of
90  * AGF buffer (PV 947395), we place constraints on the relationship among
91  * actual allocations for data blocks, freelist blocks, and potential file data
92  * bmap btree blocks. However, these restrictions may result in no actual space
93  * allocated for a delayed extent, for example, a data block in a certain AG is
94  * allocated but there is no additional block for the additional bmap btree
95  * block due to a split of the bmap btree of the file. The result of this may
96  * lead to an infinite loop when the file gets flushed to disk and all delayed
97  * extents need to be actually allocated. To get around this, we explicitly set
98  * aside a few blocks which will not be reserved in delayed allocation.
99  *
100  * We need to reserve 4 fsbs _per AG_ for the freelist and 4 more to handle a
101  * potential split of the file's bmap btree.
102  */
103 unsigned int
104 xfs_alloc_set_aside(
105 	struct xfs_mount	*mp)
106 {
107 	return mp->m_sb.sb_agcount * (XFS_ALLOC_AGFL_RESERVE + 4);
108 }
109 
110 /*
111  * When deciding how much space to allocate out of an AG, we limit the
112  * allocation maximum size to the size the AG. However, we cannot use all the
113  * blocks in the AG - some are permanently used by metadata. These
114  * blocks are generally:
115  *	- the AG superblock, AGF, AGI and AGFL
116  *	- the AGF (bno and cnt) and AGI btree root blocks, and optionally
117  *	  the AGI free inode and rmap btree root blocks.
118  *	- blocks on the AGFL according to xfs_alloc_set_aside() limits
119  *	- the rmapbt root block
120  *
121  * The AG headers are sector sized, so the amount of space they take up is
122  * dependent on filesystem geometry. The others are all single blocks.
123  */
124 unsigned int
125 xfs_alloc_ag_max_usable(
126 	struct xfs_mount	*mp)
127 {
128 	unsigned int		blocks;
129 
130 	blocks = XFS_BB_TO_FSB(mp, XFS_FSS_TO_BB(mp, 4)); /* ag headers */
131 	blocks += XFS_ALLOC_AGFL_RESERVE;
132 	blocks += 3;			/* AGF, AGI btree root blocks */
133 	if (xfs_sb_version_hasfinobt(&mp->m_sb))
134 		blocks++;		/* finobt root block */
135 	if (xfs_sb_version_hasrmapbt(&mp->m_sb))
136 		blocks++; 		/* rmap root block */
137 	if (xfs_sb_version_hasreflink(&mp->m_sb))
138 		blocks++;		/* refcount root block */
139 
140 	return mp->m_sb.sb_agblocks - blocks;
141 }
142 
143 /*
144  * Lookup the record equal to [bno, len] in the btree given by cur.
145  */
146 STATIC int				/* error */
147 xfs_alloc_lookup_eq(
148 	struct xfs_btree_cur	*cur,	/* btree cursor */
149 	xfs_agblock_t		bno,	/* starting block of extent */
150 	xfs_extlen_t		len,	/* length of extent */
151 	int			*stat)	/* success/failure */
152 {
153 	cur->bc_rec.a.ar_startblock = bno;
154 	cur->bc_rec.a.ar_blockcount = len;
155 	return xfs_btree_lookup(cur, XFS_LOOKUP_EQ, stat);
156 }
157 
158 /*
159  * Lookup the first record greater than or equal to [bno, len]
160  * in the btree given by cur.
161  */
162 int				/* error */
163 xfs_alloc_lookup_ge(
164 	struct xfs_btree_cur	*cur,	/* btree cursor */
165 	xfs_agblock_t		bno,	/* starting block of extent */
166 	xfs_extlen_t		len,	/* length of extent */
167 	int			*stat)	/* success/failure */
168 {
169 	cur->bc_rec.a.ar_startblock = bno;
170 	cur->bc_rec.a.ar_blockcount = len;
171 	return xfs_btree_lookup(cur, XFS_LOOKUP_GE, stat);
172 }
173 
174 /*
175  * Lookup the first record less than or equal to [bno, len]
176  * in the btree given by cur.
177  */
178 int					/* error */
179 xfs_alloc_lookup_le(
180 	struct xfs_btree_cur	*cur,	/* btree cursor */
181 	xfs_agblock_t		bno,	/* starting block of extent */
182 	xfs_extlen_t		len,	/* length of extent */
183 	int			*stat)	/* success/failure */
184 {
185 	cur->bc_rec.a.ar_startblock = bno;
186 	cur->bc_rec.a.ar_blockcount = len;
187 	return xfs_btree_lookup(cur, XFS_LOOKUP_LE, stat);
188 }
189 
190 /*
191  * Update the record referred to by cur to the value given
192  * by [bno, len].
193  * This either works (return 0) or gets an EFSCORRUPTED error.
194  */
195 STATIC int				/* error */
196 xfs_alloc_update(
197 	struct xfs_btree_cur	*cur,	/* btree cursor */
198 	xfs_agblock_t		bno,	/* starting block of extent */
199 	xfs_extlen_t		len)	/* length of extent */
200 {
201 	union xfs_btree_rec	rec;
202 
203 	rec.alloc.ar_startblock = cpu_to_be32(bno);
204 	rec.alloc.ar_blockcount = cpu_to_be32(len);
205 	return xfs_btree_update(cur, &rec);
206 }
207 
208 /*
209  * Get the data from the pointed-to record.
210  */
211 int					/* error */
212 xfs_alloc_get_rec(
213 	struct xfs_btree_cur	*cur,	/* btree cursor */
214 	xfs_agblock_t		*bno,	/* output: starting block of extent */
215 	xfs_extlen_t		*len,	/* output: length of extent */
216 	int			*stat)	/* output: success/failure */
217 {
218 	struct xfs_mount	*mp = cur->bc_mp;
219 	xfs_agnumber_t		agno = cur->bc_private.a.agno;
220 	union xfs_btree_rec	*rec;
221 	int			error;
222 
223 	error = xfs_btree_get_rec(cur, &rec, stat);
224 	if (error || !(*stat))
225 		return error;
226 
227 	*bno = be32_to_cpu(rec->alloc.ar_startblock);
228 	*len = be32_to_cpu(rec->alloc.ar_blockcount);
229 
230 	if (*len == 0)
231 		goto out_bad_rec;
232 
233 	/* check for valid extent range, including overflow */
234 	if (!xfs_verify_agbno(mp, agno, *bno))
235 		goto out_bad_rec;
236 	if (*bno > *bno + *len)
237 		goto out_bad_rec;
238 	if (!xfs_verify_agbno(mp, agno, *bno + *len - 1))
239 		goto out_bad_rec;
240 
241 	return 0;
242 
243 out_bad_rec:
244 	xfs_warn(mp,
245 		"%s Freespace BTree record corruption in AG %d detected!",
246 		cur->bc_btnum == XFS_BTNUM_BNO ? "Block" : "Size", agno);
247 	xfs_warn(mp,
248 		"start block 0x%x block count 0x%x", *bno, *len);
249 	return -EFSCORRUPTED;
250 }
251 
252 /*
253  * Compute aligned version of the found extent.
254  * Takes alignment and min length into account.
255  */
256 STATIC bool
257 xfs_alloc_compute_aligned(
258 	xfs_alloc_arg_t	*args,		/* allocation argument structure */
259 	xfs_agblock_t	foundbno,	/* starting block in found extent */
260 	xfs_extlen_t	foundlen,	/* length in found extent */
261 	xfs_agblock_t	*resbno,	/* result block number */
262 	xfs_extlen_t	*reslen,	/* result length */
263 	unsigned	*busy_gen)
264 {
265 	xfs_agblock_t	bno = foundbno;
266 	xfs_extlen_t	len = foundlen;
267 	xfs_extlen_t	diff;
268 	bool		busy;
269 
270 	/* Trim busy sections out of found extent */
271 	busy = xfs_extent_busy_trim(args, &bno, &len, busy_gen);
272 
273 	/*
274 	 * If we have a largish extent that happens to start before min_agbno,
275 	 * see if we can shift it into range...
276 	 */
277 	if (bno < args->min_agbno && bno + len > args->min_agbno) {
278 		diff = args->min_agbno - bno;
279 		if (len > diff) {
280 			bno += diff;
281 			len -= diff;
282 		}
283 	}
284 
285 	if (args->alignment > 1 && len >= args->minlen) {
286 		xfs_agblock_t	aligned_bno = roundup(bno, args->alignment);
287 
288 		diff = aligned_bno - bno;
289 
290 		*resbno = aligned_bno;
291 		*reslen = diff >= len ? 0 : len - diff;
292 	} else {
293 		*resbno = bno;
294 		*reslen = len;
295 	}
296 
297 	return busy;
298 }
299 
300 /*
301  * Compute best start block and diff for "near" allocations.
302  * freelen >= wantlen already checked by caller.
303  */
304 STATIC xfs_extlen_t			/* difference value (absolute) */
305 xfs_alloc_compute_diff(
306 	xfs_agblock_t	wantbno,	/* target starting block */
307 	xfs_extlen_t	wantlen,	/* target length */
308 	xfs_extlen_t	alignment,	/* target alignment */
309 	int		datatype,	/* are we allocating data? */
310 	xfs_agblock_t	freebno,	/* freespace's starting block */
311 	xfs_extlen_t	freelen,	/* freespace's length */
312 	xfs_agblock_t	*newbnop)	/* result: best start block from free */
313 {
314 	xfs_agblock_t	freeend;	/* end of freespace extent */
315 	xfs_agblock_t	newbno1;	/* return block number */
316 	xfs_agblock_t	newbno2;	/* other new block number */
317 	xfs_extlen_t	newlen1=0;	/* length with newbno1 */
318 	xfs_extlen_t	newlen2=0;	/* length with newbno2 */
319 	xfs_agblock_t	wantend;	/* end of target extent */
320 	bool		userdata = xfs_alloc_is_userdata(datatype);
321 
322 	ASSERT(freelen >= wantlen);
323 	freeend = freebno + freelen;
324 	wantend = wantbno + wantlen;
325 	/*
326 	 * We want to allocate from the start of a free extent if it is past
327 	 * the desired block or if we are allocating user data and the free
328 	 * extent is before desired block. The second case is there to allow
329 	 * for contiguous allocation from the remaining free space if the file
330 	 * grows in the short term.
331 	 */
332 	if (freebno >= wantbno || (userdata && freeend < wantend)) {
333 		if ((newbno1 = roundup(freebno, alignment)) >= freeend)
334 			newbno1 = NULLAGBLOCK;
335 	} else if (freeend >= wantend && alignment > 1) {
336 		newbno1 = roundup(wantbno, alignment);
337 		newbno2 = newbno1 - alignment;
338 		if (newbno1 >= freeend)
339 			newbno1 = NULLAGBLOCK;
340 		else
341 			newlen1 = XFS_EXTLEN_MIN(wantlen, freeend - newbno1);
342 		if (newbno2 < freebno)
343 			newbno2 = NULLAGBLOCK;
344 		else
345 			newlen2 = XFS_EXTLEN_MIN(wantlen, freeend - newbno2);
346 		if (newbno1 != NULLAGBLOCK && newbno2 != NULLAGBLOCK) {
347 			if (newlen1 < newlen2 ||
348 			    (newlen1 == newlen2 &&
349 			     XFS_ABSDIFF(newbno1, wantbno) >
350 			     XFS_ABSDIFF(newbno2, wantbno)))
351 				newbno1 = newbno2;
352 		} else if (newbno2 != NULLAGBLOCK)
353 			newbno1 = newbno2;
354 	} else if (freeend >= wantend) {
355 		newbno1 = wantbno;
356 	} else if (alignment > 1) {
357 		newbno1 = roundup(freeend - wantlen, alignment);
358 		if (newbno1 > freeend - wantlen &&
359 		    newbno1 - alignment >= freebno)
360 			newbno1 -= alignment;
361 		else if (newbno1 >= freeend)
362 			newbno1 = NULLAGBLOCK;
363 	} else
364 		newbno1 = freeend - wantlen;
365 	*newbnop = newbno1;
366 	return newbno1 == NULLAGBLOCK ? 0 : XFS_ABSDIFF(newbno1, wantbno);
367 }
368 
369 /*
370  * Fix up the length, based on mod and prod.
371  * len should be k * prod + mod for some k.
372  * If len is too small it is returned unchanged.
373  * If len hits maxlen it is left alone.
374  */
375 STATIC void
376 xfs_alloc_fix_len(
377 	xfs_alloc_arg_t	*args)		/* allocation argument structure */
378 {
379 	xfs_extlen_t	k;
380 	xfs_extlen_t	rlen;
381 
382 	ASSERT(args->mod < args->prod);
383 	rlen = args->len;
384 	ASSERT(rlen >= args->minlen);
385 	ASSERT(rlen <= args->maxlen);
386 	if (args->prod <= 1 || rlen < args->mod || rlen == args->maxlen ||
387 	    (args->mod == 0 && rlen < args->prod))
388 		return;
389 	k = rlen % args->prod;
390 	if (k == args->mod)
391 		return;
392 	if (k > args->mod)
393 		rlen = rlen - (k - args->mod);
394 	else
395 		rlen = rlen - args->prod + (args->mod - k);
396 	/* casts to (int) catch length underflows */
397 	if ((int)rlen < (int)args->minlen)
398 		return;
399 	ASSERT(rlen >= args->minlen && rlen <= args->maxlen);
400 	ASSERT(rlen % args->prod == args->mod);
401 	ASSERT(args->pag->pagf_freeblks + args->pag->pagf_flcount >=
402 		rlen + args->minleft);
403 	args->len = rlen;
404 }
405 
406 /*
407  * Update the two btrees, logically removing from freespace the extent
408  * starting at rbno, rlen blocks.  The extent is contained within the
409  * actual (current) free extent fbno for flen blocks.
410  * Flags are passed in indicating whether the cursors are set to the
411  * relevant records.
412  */
413 STATIC int				/* error code */
414 xfs_alloc_fixup_trees(
415 	xfs_btree_cur_t	*cnt_cur,	/* cursor for by-size btree */
416 	xfs_btree_cur_t	*bno_cur,	/* cursor for by-block btree */
417 	xfs_agblock_t	fbno,		/* starting block of free extent */
418 	xfs_extlen_t	flen,		/* length of free extent */
419 	xfs_agblock_t	rbno,		/* starting block of returned extent */
420 	xfs_extlen_t	rlen,		/* length of returned extent */
421 	int		flags)		/* flags, XFSA_FIXUP_... */
422 {
423 	int		error;		/* error code */
424 	int		i;		/* operation results */
425 	xfs_agblock_t	nfbno1;		/* first new free startblock */
426 	xfs_agblock_t	nfbno2;		/* second new free startblock */
427 	xfs_extlen_t	nflen1=0;	/* first new free length */
428 	xfs_extlen_t	nflen2=0;	/* second new free length */
429 	struct xfs_mount *mp;
430 
431 	mp = cnt_cur->bc_mp;
432 
433 	/*
434 	 * Look up the record in the by-size tree if necessary.
435 	 */
436 	if (flags & XFSA_FIXUP_CNT_OK) {
437 #ifdef DEBUG
438 		if ((error = xfs_alloc_get_rec(cnt_cur, &nfbno1, &nflen1, &i)))
439 			return error;
440 		XFS_WANT_CORRUPTED_RETURN(mp,
441 			i == 1 && nfbno1 == fbno && nflen1 == flen);
442 #endif
443 	} else {
444 		if ((error = xfs_alloc_lookup_eq(cnt_cur, fbno, flen, &i)))
445 			return error;
446 		XFS_WANT_CORRUPTED_RETURN(mp, i == 1);
447 	}
448 	/*
449 	 * Look up the record in the by-block tree if necessary.
450 	 */
451 	if (flags & XFSA_FIXUP_BNO_OK) {
452 #ifdef DEBUG
453 		if ((error = xfs_alloc_get_rec(bno_cur, &nfbno1, &nflen1, &i)))
454 			return error;
455 		XFS_WANT_CORRUPTED_RETURN(mp,
456 			i == 1 && nfbno1 == fbno && nflen1 == flen);
457 #endif
458 	} else {
459 		if ((error = xfs_alloc_lookup_eq(bno_cur, fbno, flen, &i)))
460 			return error;
461 		XFS_WANT_CORRUPTED_RETURN(mp, i == 1);
462 	}
463 
464 #ifdef DEBUG
465 	if (bno_cur->bc_nlevels == 1 && cnt_cur->bc_nlevels == 1) {
466 		struct xfs_btree_block	*bnoblock;
467 		struct xfs_btree_block	*cntblock;
468 
469 		bnoblock = XFS_BUF_TO_BLOCK(bno_cur->bc_bufs[0]);
470 		cntblock = XFS_BUF_TO_BLOCK(cnt_cur->bc_bufs[0]);
471 
472 		XFS_WANT_CORRUPTED_RETURN(mp,
473 			bnoblock->bb_numrecs == cntblock->bb_numrecs);
474 	}
475 #endif
476 
477 	/*
478 	 * Deal with all four cases: the allocated record is contained
479 	 * within the freespace record, so we can have new freespace
480 	 * at either (or both) end, or no freespace remaining.
481 	 */
482 	if (rbno == fbno && rlen == flen)
483 		nfbno1 = nfbno2 = NULLAGBLOCK;
484 	else if (rbno == fbno) {
485 		nfbno1 = rbno + rlen;
486 		nflen1 = flen - rlen;
487 		nfbno2 = NULLAGBLOCK;
488 	} else if (rbno + rlen == fbno + flen) {
489 		nfbno1 = fbno;
490 		nflen1 = flen - rlen;
491 		nfbno2 = NULLAGBLOCK;
492 	} else {
493 		nfbno1 = fbno;
494 		nflen1 = rbno - fbno;
495 		nfbno2 = rbno + rlen;
496 		nflen2 = (fbno + flen) - nfbno2;
497 	}
498 	/*
499 	 * Delete the entry from the by-size btree.
500 	 */
501 	if ((error = xfs_btree_delete(cnt_cur, &i)))
502 		return error;
503 	XFS_WANT_CORRUPTED_RETURN(mp, i == 1);
504 	/*
505 	 * Add new by-size btree entry(s).
506 	 */
507 	if (nfbno1 != NULLAGBLOCK) {
508 		if ((error = xfs_alloc_lookup_eq(cnt_cur, nfbno1, nflen1, &i)))
509 			return error;
510 		XFS_WANT_CORRUPTED_RETURN(mp, i == 0);
511 		if ((error = xfs_btree_insert(cnt_cur, &i)))
512 			return error;
513 		XFS_WANT_CORRUPTED_RETURN(mp, i == 1);
514 	}
515 	if (nfbno2 != NULLAGBLOCK) {
516 		if ((error = xfs_alloc_lookup_eq(cnt_cur, nfbno2, nflen2, &i)))
517 			return error;
518 		XFS_WANT_CORRUPTED_RETURN(mp, i == 0);
519 		if ((error = xfs_btree_insert(cnt_cur, &i)))
520 			return error;
521 		XFS_WANT_CORRUPTED_RETURN(mp, i == 1);
522 	}
523 	/*
524 	 * Fix up the by-block btree entry(s).
525 	 */
526 	if (nfbno1 == NULLAGBLOCK) {
527 		/*
528 		 * No remaining freespace, just delete the by-block tree entry.
529 		 */
530 		if ((error = xfs_btree_delete(bno_cur, &i)))
531 			return error;
532 		XFS_WANT_CORRUPTED_RETURN(mp, i == 1);
533 	} else {
534 		/*
535 		 * Update the by-block entry to start later|be shorter.
536 		 */
537 		if ((error = xfs_alloc_update(bno_cur, nfbno1, nflen1)))
538 			return error;
539 	}
540 	if (nfbno2 != NULLAGBLOCK) {
541 		/*
542 		 * 2 resulting free entries, need to add one.
543 		 */
544 		if ((error = xfs_alloc_lookup_eq(bno_cur, nfbno2, nflen2, &i)))
545 			return error;
546 		XFS_WANT_CORRUPTED_RETURN(mp, i == 0);
547 		if ((error = xfs_btree_insert(bno_cur, &i)))
548 			return error;
549 		XFS_WANT_CORRUPTED_RETURN(mp, i == 1);
550 	}
551 	return 0;
552 }
553 
554 static xfs_failaddr_t
555 xfs_agfl_verify(
556 	struct xfs_buf	*bp)
557 {
558 	struct xfs_mount *mp = bp->b_target->bt_mount;
559 	struct xfs_agfl	*agfl = XFS_BUF_TO_AGFL(bp);
560 	int		i;
561 
562 	/*
563 	 * There is no verification of non-crc AGFLs because mkfs does not
564 	 * initialise the AGFL to zero or NULL. Hence the only valid part of the
565 	 * AGFL is what the AGF says is active. We can't get to the AGF, so we
566 	 * can't verify just those entries are valid.
567 	 */
568 	if (!xfs_sb_version_hascrc(&mp->m_sb))
569 		return NULL;
570 
571 	if (!xfs_verify_magic(bp, agfl->agfl_magicnum))
572 		return __this_address;
573 	if (!uuid_equal(&agfl->agfl_uuid, &mp->m_sb.sb_meta_uuid))
574 		return __this_address;
575 	/*
576 	 * during growfs operations, the perag is not fully initialised,
577 	 * so we can't use it for any useful checking. growfs ensures we can't
578 	 * use it by using uncached buffers that don't have the perag attached
579 	 * so we can detect and avoid this problem.
580 	 */
581 	if (bp->b_pag && be32_to_cpu(agfl->agfl_seqno) != bp->b_pag->pag_agno)
582 		return __this_address;
583 
584 	for (i = 0; i < xfs_agfl_size(mp); i++) {
585 		if (be32_to_cpu(agfl->agfl_bno[i]) != NULLAGBLOCK &&
586 		    be32_to_cpu(agfl->agfl_bno[i]) >= mp->m_sb.sb_agblocks)
587 			return __this_address;
588 	}
589 
590 	if (!xfs_log_check_lsn(mp, be64_to_cpu(XFS_BUF_TO_AGFL(bp)->agfl_lsn)))
591 		return __this_address;
592 	return NULL;
593 }
594 
595 static void
596 xfs_agfl_read_verify(
597 	struct xfs_buf	*bp)
598 {
599 	struct xfs_mount *mp = bp->b_target->bt_mount;
600 	xfs_failaddr_t	fa;
601 
602 	/*
603 	 * There is no verification of non-crc AGFLs because mkfs does not
604 	 * initialise the AGFL to zero or NULL. Hence the only valid part of the
605 	 * AGFL is what the AGF says is active. We can't get to the AGF, so we
606 	 * can't verify just those entries are valid.
607 	 */
608 	if (!xfs_sb_version_hascrc(&mp->m_sb))
609 		return;
610 
611 	if (!xfs_buf_verify_cksum(bp, XFS_AGFL_CRC_OFF))
612 		xfs_verifier_error(bp, -EFSBADCRC, __this_address);
613 	else {
614 		fa = xfs_agfl_verify(bp);
615 		if (fa)
616 			xfs_verifier_error(bp, -EFSCORRUPTED, fa);
617 	}
618 }
619 
620 static void
621 xfs_agfl_write_verify(
622 	struct xfs_buf	*bp)
623 {
624 	struct xfs_mount	*mp = bp->b_target->bt_mount;
625 	struct xfs_buf_log_item	*bip = bp->b_log_item;
626 	xfs_failaddr_t		fa;
627 
628 	/* no verification of non-crc AGFLs */
629 	if (!xfs_sb_version_hascrc(&mp->m_sb))
630 		return;
631 
632 	fa = xfs_agfl_verify(bp);
633 	if (fa) {
634 		xfs_verifier_error(bp, -EFSCORRUPTED, fa);
635 		return;
636 	}
637 
638 	if (bip)
639 		XFS_BUF_TO_AGFL(bp)->agfl_lsn = cpu_to_be64(bip->bli_item.li_lsn);
640 
641 	xfs_buf_update_cksum(bp, XFS_AGFL_CRC_OFF);
642 }
643 
644 const struct xfs_buf_ops xfs_agfl_buf_ops = {
645 	.name = "xfs_agfl",
646 	.magic = { cpu_to_be32(XFS_AGFL_MAGIC), cpu_to_be32(XFS_AGFL_MAGIC) },
647 	.verify_read = xfs_agfl_read_verify,
648 	.verify_write = xfs_agfl_write_verify,
649 	.verify_struct = xfs_agfl_verify,
650 };
651 
652 /*
653  * Read in the allocation group free block array.
654  */
655 int					/* error */
656 xfs_alloc_read_agfl(
657 	xfs_mount_t	*mp,		/* mount point structure */
658 	xfs_trans_t	*tp,		/* transaction pointer */
659 	xfs_agnumber_t	agno,		/* allocation group number */
660 	xfs_buf_t	**bpp)		/* buffer for the ag free block array */
661 {
662 	xfs_buf_t	*bp;		/* return value */
663 	int		error;
664 
665 	ASSERT(agno != NULLAGNUMBER);
666 	error = xfs_trans_read_buf(
667 			mp, tp, mp->m_ddev_targp,
668 			XFS_AG_DADDR(mp, agno, XFS_AGFL_DADDR(mp)),
669 			XFS_FSS_TO_BB(mp, 1), 0, &bp, &xfs_agfl_buf_ops);
670 	if (error)
671 		return error;
672 	xfs_buf_set_ref(bp, XFS_AGFL_REF);
673 	*bpp = bp;
674 	return 0;
675 }
676 
677 STATIC int
678 xfs_alloc_update_counters(
679 	struct xfs_trans	*tp,
680 	struct xfs_perag	*pag,
681 	struct xfs_buf		*agbp,
682 	long			len)
683 {
684 	struct xfs_agf		*agf = XFS_BUF_TO_AGF(agbp);
685 
686 	pag->pagf_freeblks += len;
687 	be32_add_cpu(&agf->agf_freeblks, len);
688 
689 	xfs_trans_agblocks_delta(tp, len);
690 	if (unlikely(be32_to_cpu(agf->agf_freeblks) >
691 		     be32_to_cpu(agf->agf_length)))
692 		return -EFSCORRUPTED;
693 
694 	xfs_alloc_log_agf(tp, agbp, XFS_AGF_FREEBLKS);
695 	return 0;
696 }
697 
698 /*
699  * Allocation group level functions.
700  */
701 
702 /*
703  * Allocate a variable extent in the allocation group agno.
704  * Type and bno are used to determine where in the allocation group the
705  * extent will start.
706  * Extent's length (returned in *len) will be between minlen and maxlen,
707  * and of the form k * prod + mod unless there's nothing that large.
708  * Return the starting a.g. block, or NULLAGBLOCK if we can't do it.
709  */
710 STATIC int			/* error */
711 xfs_alloc_ag_vextent(
712 	xfs_alloc_arg_t	*args)	/* argument structure for allocation */
713 {
714 	int		error=0;
715 
716 	ASSERT(args->minlen > 0);
717 	ASSERT(args->maxlen > 0);
718 	ASSERT(args->minlen <= args->maxlen);
719 	ASSERT(args->mod < args->prod);
720 	ASSERT(args->alignment > 0);
721 
722 	/*
723 	 * Branch to correct routine based on the type.
724 	 */
725 	args->wasfromfl = 0;
726 	switch (args->type) {
727 	case XFS_ALLOCTYPE_THIS_AG:
728 		error = xfs_alloc_ag_vextent_size(args);
729 		break;
730 	case XFS_ALLOCTYPE_NEAR_BNO:
731 		error = xfs_alloc_ag_vextent_near(args);
732 		break;
733 	case XFS_ALLOCTYPE_THIS_BNO:
734 		error = xfs_alloc_ag_vextent_exact(args);
735 		break;
736 	default:
737 		ASSERT(0);
738 		/* NOTREACHED */
739 	}
740 
741 	if (error || args->agbno == NULLAGBLOCK)
742 		return error;
743 
744 	ASSERT(args->len >= args->minlen);
745 	ASSERT(args->len <= args->maxlen);
746 	ASSERT(!args->wasfromfl || args->resv != XFS_AG_RESV_AGFL);
747 	ASSERT(args->agbno % args->alignment == 0);
748 
749 	/* if not file data, insert new block into the reverse map btree */
750 	if (!xfs_rmap_should_skip_owner_update(&args->oinfo)) {
751 		error = xfs_rmap_alloc(args->tp, args->agbp, args->agno,
752 				       args->agbno, args->len, &args->oinfo);
753 		if (error)
754 			return error;
755 	}
756 
757 	if (!args->wasfromfl) {
758 		error = xfs_alloc_update_counters(args->tp, args->pag,
759 						  args->agbp,
760 						  -((long)(args->len)));
761 		if (error)
762 			return error;
763 
764 		ASSERT(!xfs_extent_busy_search(args->mp, args->agno,
765 					      args->agbno, args->len));
766 	}
767 
768 	xfs_ag_resv_alloc_extent(args->pag, args->resv, args);
769 
770 	XFS_STATS_INC(args->mp, xs_allocx);
771 	XFS_STATS_ADD(args->mp, xs_allocb, args->len);
772 	return error;
773 }
774 
775 /*
776  * Allocate a variable extent at exactly agno/bno.
777  * Extent's length (returned in *len) will be between minlen and maxlen,
778  * and of the form k * prod + mod unless there's nothing that large.
779  * Return the starting a.g. block (bno), or NULLAGBLOCK if we can't do it.
780  */
781 STATIC int			/* error */
782 xfs_alloc_ag_vextent_exact(
783 	xfs_alloc_arg_t	*args)	/* allocation argument structure */
784 {
785 	xfs_btree_cur_t	*bno_cur;/* by block-number btree cursor */
786 	xfs_btree_cur_t	*cnt_cur;/* by count btree cursor */
787 	int		error;
788 	xfs_agblock_t	fbno;	/* start block of found extent */
789 	xfs_extlen_t	flen;	/* length of found extent */
790 	xfs_agblock_t	tbno;	/* start block of busy extent */
791 	xfs_extlen_t	tlen;	/* length of busy extent */
792 	xfs_agblock_t	tend;	/* end block of busy extent */
793 	int		i;	/* success/failure of operation */
794 	unsigned	busy_gen;
795 
796 	ASSERT(args->alignment == 1);
797 
798 	/*
799 	 * Allocate/initialize a cursor for the by-number freespace btree.
800 	 */
801 	bno_cur = xfs_allocbt_init_cursor(args->mp, args->tp, args->agbp,
802 					  args->agno, XFS_BTNUM_BNO);
803 
804 	/*
805 	 * Lookup bno and minlen in the btree (minlen is irrelevant, really).
806 	 * Look for the closest free block <= bno, it must contain bno
807 	 * if any free block does.
808 	 */
809 	error = xfs_alloc_lookup_le(bno_cur, args->agbno, args->minlen, &i);
810 	if (error)
811 		goto error0;
812 	if (!i)
813 		goto not_found;
814 
815 	/*
816 	 * Grab the freespace record.
817 	 */
818 	error = xfs_alloc_get_rec(bno_cur, &fbno, &flen, &i);
819 	if (error)
820 		goto error0;
821 	XFS_WANT_CORRUPTED_GOTO(args->mp, i == 1, error0);
822 	ASSERT(fbno <= args->agbno);
823 
824 	/*
825 	 * Check for overlapping busy extents.
826 	 */
827 	tbno = fbno;
828 	tlen = flen;
829 	xfs_extent_busy_trim(args, &tbno, &tlen, &busy_gen);
830 
831 	/*
832 	 * Give up if the start of the extent is busy, or the freespace isn't
833 	 * long enough for the minimum request.
834 	 */
835 	if (tbno > args->agbno)
836 		goto not_found;
837 	if (tlen < args->minlen)
838 		goto not_found;
839 	tend = tbno + tlen;
840 	if (tend < args->agbno + args->minlen)
841 		goto not_found;
842 
843 	/*
844 	 * End of extent will be smaller of the freespace end and the
845 	 * maximal requested end.
846 	 *
847 	 * Fix the length according to mod and prod if given.
848 	 */
849 	args->len = XFS_AGBLOCK_MIN(tend, args->agbno + args->maxlen)
850 						- args->agbno;
851 	xfs_alloc_fix_len(args);
852 	ASSERT(args->agbno + args->len <= tend);
853 
854 	/*
855 	 * We are allocating agbno for args->len
856 	 * Allocate/initialize a cursor for the by-size btree.
857 	 */
858 	cnt_cur = xfs_allocbt_init_cursor(args->mp, args->tp, args->agbp,
859 		args->agno, XFS_BTNUM_CNT);
860 	ASSERT(args->agbno + args->len <=
861 		be32_to_cpu(XFS_BUF_TO_AGF(args->agbp)->agf_length));
862 	error = xfs_alloc_fixup_trees(cnt_cur, bno_cur, fbno, flen, args->agbno,
863 				      args->len, XFSA_FIXUP_BNO_OK);
864 	if (error) {
865 		xfs_btree_del_cursor(cnt_cur, XFS_BTREE_ERROR);
866 		goto error0;
867 	}
868 
869 	xfs_btree_del_cursor(bno_cur, XFS_BTREE_NOERROR);
870 	xfs_btree_del_cursor(cnt_cur, XFS_BTREE_NOERROR);
871 
872 	args->wasfromfl = 0;
873 	trace_xfs_alloc_exact_done(args);
874 	return 0;
875 
876 not_found:
877 	/* Didn't find it, return null. */
878 	xfs_btree_del_cursor(bno_cur, XFS_BTREE_NOERROR);
879 	args->agbno = NULLAGBLOCK;
880 	trace_xfs_alloc_exact_notfound(args);
881 	return 0;
882 
883 error0:
884 	xfs_btree_del_cursor(bno_cur, XFS_BTREE_ERROR);
885 	trace_xfs_alloc_exact_error(args);
886 	return error;
887 }
888 
889 /*
890  * Search the btree in a given direction via the search cursor and compare
891  * the records found against the good extent we've already found.
892  */
893 STATIC int
894 xfs_alloc_find_best_extent(
895 	struct xfs_alloc_arg	*args,	/* allocation argument structure */
896 	struct xfs_btree_cur	**gcur,	/* good cursor */
897 	struct xfs_btree_cur	**scur,	/* searching cursor */
898 	xfs_agblock_t		gdiff,	/* difference for search comparison */
899 	xfs_agblock_t		*sbno,	/* extent found by search */
900 	xfs_extlen_t		*slen,	/* extent length */
901 	xfs_agblock_t		*sbnoa,	/* aligned extent found by search */
902 	xfs_extlen_t		*slena,	/* aligned extent length */
903 	int			dir)	/* 0 = search right, 1 = search left */
904 {
905 	xfs_agblock_t		new;
906 	xfs_agblock_t		sdiff;
907 	int			error;
908 	int			i;
909 	unsigned		busy_gen;
910 
911 	/* The good extent is perfect, no need to  search. */
912 	if (!gdiff)
913 		goto out_use_good;
914 
915 	/*
916 	 * Look until we find a better one, run out of space or run off the end.
917 	 */
918 	do {
919 		error = xfs_alloc_get_rec(*scur, sbno, slen, &i);
920 		if (error)
921 			goto error0;
922 		XFS_WANT_CORRUPTED_GOTO(args->mp, i == 1, error0);
923 		xfs_alloc_compute_aligned(args, *sbno, *slen,
924 				sbnoa, slena, &busy_gen);
925 
926 		/*
927 		 * The good extent is closer than this one.
928 		 */
929 		if (!dir) {
930 			if (*sbnoa > args->max_agbno)
931 				goto out_use_good;
932 			if (*sbnoa >= args->agbno + gdiff)
933 				goto out_use_good;
934 		} else {
935 			if (*sbnoa < args->min_agbno)
936 				goto out_use_good;
937 			if (*sbnoa <= args->agbno - gdiff)
938 				goto out_use_good;
939 		}
940 
941 		/*
942 		 * Same distance, compare length and pick the best.
943 		 */
944 		if (*slena >= args->minlen) {
945 			args->len = XFS_EXTLEN_MIN(*slena, args->maxlen);
946 			xfs_alloc_fix_len(args);
947 
948 			sdiff = xfs_alloc_compute_diff(args->agbno, args->len,
949 						       args->alignment,
950 						       args->datatype, *sbnoa,
951 						       *slena, &new);
952 
953 			/*
954 			 * Choose closer size and invalidate other cursor.
955 			 */
956 			if (sdiff < gdiff)
957 				goto out_use_search;
958 			goto out_use_good;
959 		}
960 
961 		if (!dir)
962 			error = xfs_btree_increment(*scur, 0, &i);
963 		else
964 			error = xfs_btree_decrement(*scur, 0, &i);
965 		if (error)
966 			goto error0;
967 	} while (i);
968 
969 out_use_good:
970 	xfs_btree_del_cursor(*scur, XFS_BTREE_NOERROR);
971 	*scur = NULL;
972 	return 0;
973 
974 out_use_search:
975 	xfs_btree_del_cursor(*gcur, XFS_BTREE_NOERROR);
976 	*gcur = NULL;
977 	return 0;
978 
979 error0:
980 	/* caller invalidates cursors */
981 	return error;
982 }
983 
984 /*
985  * Allocate a variable extent near bno in the allocation group agno.
986  * Extent's length (returned in len) will be between minlen and maxlen,
987  * and of the form k * prod + mod unless there's nothing that large.
988  * Return the starting a.g. block, or NULLAGBLOCK if we can't do it.
989  */
990 STATIC int				/* error */
991 xfs_alloc_ag_vextent_near(
992 	xfs_alloc_arg_t	*args)		/* allocation argument structure */
993 {
994 	xfs_btree_cur_t	*bno_cur_gt;	/* cursor for bno btree, right side */
995 	xfs_btree_cur_t	*bno_cur_lt;	/* cursor for bno btree, left side */
996 	xfs_btree_cur_t	*cnt_cur;	/* cursor for count btree */
997 	xfs_agblock_t	gtbno;		/* start bno of right side entry */
998 	xfs_agblock_t	gtbnoa;		/* aligned ... */
999 	xfs_extlen_t	gtdiff;		/* difference to right side entry */
1000 	xfs_extlen_t	gtlen;		/* length of right side entry */
1001 	xfs_extlen_t	gtlena;		/* aligned ... */
1002 	xfs_agblock_t	gtnew;		/* useful start bno of right side */
1003 	int		error;		/* error code */
1004 	int		i;		/* result code, temporary */
1005 	int		j;		/* result code, temporary */
1006 	xfs_agblock_t	ltbno;		/* start bno of left side entry */
1007 	xfs_agblock_t	ltbnoa;		/* aligned ... */
1008 	xfs_extlen_t	ltdiff;		/* difference to left side entry */
1009 	xfs_extlen_t	ltlen;		/* length of left side entry */
1010 	xfs_extlen_t	ltlena;		/* aligned ... */
1011 	xfs_agblock_t	ltnew;		/* useful start bno of left side */
1012 	xfs_extlen_t	rlen;		/* length of returned extent */
1013 	bool		busy;
1014 	unsigned	busy_gen;
1015 #ifdef DEBUG
1016 	/*
1017 	 * Randomly don't execute the first algorithm.
1018 	 */
1019 	int		dofirst;	/* set to do first algorithm */
1020 
1021 	dofirst = prandom_u32() & 1;
1022 #endif
1023 
1024 	/* handle unitialized agbno range so caller doesn't have to */
1025 	if (!args->min_agbno && !args->max_agbno)
1026 		args->max_agbno = args->mp->m_sb.sb_agblocks - 1;
1027 	ASSERT(args->min_agbno <= args->max_agbno);
1028 
1029 	/* clamp agbno to the range if it's outside */
1030 	if (args->agbno < args->min_agbno)
1031 		args->agbno = args->min_agbno;
1032 	if (args->agbno > args->max_agbno)
1033 		args->agbno = args->max_agbno;
1034 
1035 restart:
1036 	bno_cur_lt = NULL;
1037 	bno_cur_gt = NULL;
1038 	ltlen = 0;
1039 	gtlena = 0;
1040 	ltlena = 0;
1041 	busy = false;
1042 
1043 	/*
1044 	 * Get a cursor for the by-size btree.
1045 	 */
1046 	cnt_cur = xfs_allocbt_init_cursor(args->mp, args->tp, args->agbp,
1047 		args->agno, XFS_BTNUM_CNT);
1048 
1049 	/*
1050 	 * See if there are any free extents as big as maxlen.
1051 	 */
1052 	if ((error = xfs_alloc_lookup_ge(cnt_cur, 0, args->maxlen, &i)))
1053 		goto error0;
1054 	/*
1055 	 * If none, then pick up the last entry in the tree unless the
1056 	 * tree is empty.
1057 	 */
1058 	if (!i) {
1059 		if ((error = xfs_alloc_ag_vextent_small(args, cnt_cur, &ltbno,
1060 				&ltlen, &i)))
1061 			goto error0;
1062 		if (i == 0 || ltlen == 0) {
1063 			xfs_btree_del_cursor(cnt_cur, XFS_BTREE_NOERROR);
1064 			trace_xfs_alloc_near_noentry(args);
1065 			return 0;
1066 		}
1067 		ASSERT(i == 1);
1068 	}
1069 	args->wasfromfl = 0;
1070 
1071 	/*
1072 	 * First algorithm.
1073 	 * If the requested extent is large wrt the freespaces available
1074 	 * in this a.g., then the cursor will be pointing to a btree entry
1075 	 * near the right edge of the tree.  If it's in the last btree leaf
1076 	 * block, then we just examine all the entries in that block
1077 	 * that are big enough, and pick the best one.
1078 	 * This is written as a while loop so we can break out of it,
1079 	 * but we never loop back to the top.
1080 	 */
1081 	while (xfs_btree_islastblock(cnt_cur, 0)) {
1082 		xfs_extlen_t	bdiff;
1083 		int		besti=0;
1084 		xfs_extlen_t	blen=0;
1085 		xfs_agblock_t	bnew=0;
1086 
1087 #ifdef DEBUG
1088 		if (dofirst)
1089 			break;
1090 #endif
1091 		/*
1092 		 * Start from the entry that lookup found, sequence through
1093 		 * all larger free blocks.  If we're actually pointing at a
1094 		 * record smaller than maxlen, go to the start of this block,
1095 		 * and skip all those smaller than minlen.
1096 		 */
1097 		if (ltlen || args->alignment > 1) {
1098 			cnt_cur->bc_ptrs[0] = 1;
1099 			do {
1100 				if ((error = xfs_alloc_get_rec(cnt_cur, &ltbno,
1101 						&ltlen, &i)))
1102 					goto error0;
1103 				XFS_WANT_CORRUPTED_GOTO(args->mp, i == 1, error0);
1104 				if (ltlen >= args->minlen)
1105 					break;
1106 				if ((error = xfs_btree_increment(cnt_cur, 0, &i)))
1107 					goto error0;
1108 			} while (i);
1109 			ASSERT(ltlen >= args->minlen);
1110 			if (!i)
1111 				break;
1112 		}
1113 		i = cnt_cur->bc_ptrs[0];
1114 		for (j = 1, blen = 0, bdiff = 0;
1115 		     !error && j && (blen < args->maxlen || bdiff > 0);
1116 		     error = xfs_btree_increment(cnt_cur, 0, &j)) {
1117 			/*
1118 			 * For each entry, decide if it's better than
1119 			 * the previous best entry.
1120 			 */
1121 			if ((error = xfs_alloc_get_rec(cnt_cur, &ltbno, &ltlen, &i)))
1122 				goto error0;
1123 			XFS_WANT_CORRUPTED_GOTO(args->mp, i == 1, error0);
1124 			busy = xfs_alloc_compute_aligned(args, ltbno, ltlen,
1125 					&ltbnoa, &ltlena, &busy_gen);
1126 			if (ltlena < args->minlen)
1127 				continue;
1128 			if (ltbnoa < args->min_agbno || ltbnoa > args->max_agbno)
1129 				continue;
1130 			args->len = XFS_EXTLEN_MIN(ltlena, args->maxlen);
1131 			xfs_alloc_fix_len(args);
1132 			ASSERT(args->len >= args->minlen);
1133 			if (args->len < blen)
1134 				continue;
1135 			ltdiff = xfs_alloc_compute_diff(args->agbno, args->len,
1136 				args->alignment, args->datatype, ltbnoa,
1137 				ltlena, &ltnew);
1138 			if (ltnew != NULLAGBLOCK &&
1139 			    (args->len > blen || ltdiff < bdiff)) {
1140 				bdiff = ltdiff;
1141 				bnew = ltnew;
1142 				blen = args->len;
1143 				besti = cnt_cur->bc_ptrs[0];
1144 			}
1145 		}
1146 		/*
1147 		 * It didn't work.  We COULD be in a case where
1148 		 * there's a good record somewhere, so try again.
1149 		 */
1150 		if (blen == 0)
1151 			break;
1152 		/*
1153 		 * Point at the best entry, and retrieve it again.
1154 		 */
1155 		cnt_cur->bc_ptrs[0] = besti;
1156 		if ((error = xfs_alloc_get_rec(cnt_cur, &ltbno, &ltlen, &i)))
1157 			goto error0;
1158 		XFS_WANT_CORRUPTED_GOTO(args->mp, i == 1, error0);
1159 		ASSERT(ltbno + ltlen <= be32_to_cpu(XFS_BUF_TO_AGF(args->agbp)->agf_length));
1160 		args->len = blen;
1161 
1162 		/*
1163 		 * We are allocating starting at bnew for blen blocks.
1164 		 */
1165 		args->agbno = bnew;
1166 		ASSERT(bnew >= ltbno);
1167 		ASSERT(bnew + blen <= ltbno + ltlen);
1168 		/*
1169 		 * Set up a cursor for the by-bno tree.
1170 		 */
1171 		bno_cur_lt = xfs_allocbt_init_cursor(args->mp, args->tp,
1172 			args->agbp, args->agno, XFS_BTNUM_BNO);
1173 		/*
1174 		 * Fix up the btree entries.
1175 		 */
1176 		if ((error = xfs_alloc_fixup_trees(cnt_cur, bno_cur_lt, ltbno,
1177 				ltlen, bnew, blen, XFSA_FIXUP_CNT_OK)))
1178 			goto error0;
1179 		xfs_btree_del_cursor(cnt_cur, XFS_BTREE_NOERROR);
1180 		xfs_btree_del_cursor(bno_cur_lt, XFS_BTREE_NOERROR);
1181 
1182 		trace_xfs_alloc_near_first(args);
1183 		return 0;
1184 	}
1185 	/*
1186 	 * Second algorithm.
1187 	 * Search in the by-bno tree to the left and to the right
1188 	 * simultaneously, until in each case we find a space big enough,
1189 	 * or run into the edge of the tree.  When we run into the edge,
1190 	 * we deallocate that cursor.
1191 	 * If both searches succeed, we compare the two spaces and pick
1192 	 * the better one.
1193 	 * With alignment, it's possible for both to fail; the upper
1194 	 * level algorithm that picks allocation groups for allocations
1195 	 * is not supposed to do this.
1196 	 */
1197 	/*
1198 	 * Allocate and initialize the cursor for the leftward search.
1199 	 */
1200 	bno_cur_lt = xfs_allocbt_init_cursor(args->mp, args->tp, args->agbp,
1201 		args->agno, XFS_BTNUM_BNO);
1202 	/*
1203 	 * Lookup <= bno to find the leftward search's starting point.
1204 	 */
1205 	if ((error = xfs_alloc_lookup_le(bno_cur_lt, args->agbno, args->maxlen, &i)))
1206 		goto error0;
1207 	if (!i) {
1208 		/*
1209 		 * Didn't find anything; use this cursor for the rightward
1210 		 * search.
1211 		 */
1212 		bno_cur_gt = bno_cur_lt;
1213 		bno_cur_lt = NULL;
1214 	}
1215 	/*
1216 	 * Found something.  Duplicate the cursor for the rightward search.
1217 	 */
1218 	else if ((error = xfs_btree_dup_cursor(bno_cur_lt, &bno_cur_gt)))
1219 		goto error0;
1220 	/*
1221 	 * Increment the cursor, so we will point at the entry just right
1222 	 * of the leftward entry if any, or to the leftmost entry.
1223 	 */
1224 	if ((error = xfs_btree_increment(bno_cur_gt, 0, &i)))
1225 		goto error0;
1226 	if (!i) {
1227 		/*
1228 		 * It failed, there are no rightward entries.
1229 		 */
1230 		xfs_btree_del_cursor(bno_cur_gt, XFS_BTREE_NOERROR);
1231 		bno_cur_gt = NULL;
1232 	}
1233 	/*
1234 	 * Loop going left with the leftward cursor, right with the
1235 	 * rightward cursor, until either both directions give up or
1236 	 * we find an entry at least as big as minlen.
1237 	 */
1238 	do {
1239 		if (bno_cur_lt) {
1240 			if ((error = xfs_alloc_get_rec(bno_cur_lt, &ltbno, &ltlen, &i)))
1241 				goto error0;
1242 			XFS_WANT_CORRUPTED_GOTO(args->mp, i == 1, error0);
1243 			busy |= xfs_alloc_compute_aligned(args, ltbno, ltlen,
1244 					&ltbnoa, &ltlena, &busy_gen);
1245 			if (ltlena >= args->minlen && ltbnoa >= args->min_agbno)
1246 				break;
1247 			if ((error = xfs_btree_decrement(bno_cur_lt, 0, &i)))
1248 				goto error0;
1249 			if (!i || ltbnoa < args->min_agbno) {
1250 				xfs_btree_del_cursor(bno_cur_lt,
1251 						     XFS_BTREE_NOERROR);
1252 				bno_cur_lt = NULL;
1253 			}
1254 		}
1255 		if (bno_cur_gt) {
1256 			if ((error = xfs_alloc_get_rec(bno_cur_gt, &gtbno, &gtlen, &i)))
1257 				goto error0;
1258 			XFS_WANT_CORRUPTED_GOTO(args->mp, i == 1, error0);
1259 			busy |= xfs_alloc_compute_aligned(args, gtbno, gtlen,
1260 					&gtbnoa, &gtlena, &busy_gen);
1261 			if (gtlena >= args->minlen && gtbnoa <= args->max_agbno)
1262 				break;
1263 			if ((error = xfs_btree_increment(bno_cur_gt, 0, &i)))
1264 				goto error0;
1265 			if (!i || gtbnoa > args->max_agbno) {
1266 				xfs_btree_del_cursor(bno_cur_gt,
1267 						     XFS_BTREE_NOERROR);
1268 				bno_cur_gt = NULL;
1269 			}
1270 		}
1271 	} while (bno_cur_lt || bno_cur_gt);
1272 
1273 	/*
1274 	 * Got both cursors still active, need to find better entry.
1275 	 */
1276 	if (bno_cur_lt && bno_cur_gt) {
1277 		if (ltlena >= args->minlen) {
1278 			/*
1279 			 * Left side is good, look for a right side entry.
1280 			 */
1281 			args->len = XFS_EXTLEN_MIN(ltlena, args->maxlen);
1282 			xfs_alloc_fix_len(args);
1283 			ltdiff = xfs_alloc_compute_diff(args->agbno, args->len,
1284 				args->alignment, args->datatype, ltbnoa,
1285 				ltlena, &ltnew);
1286 
1287 			error = xfs_alloc_find_best_extent(args,
1288 						&bno_cur_lt, &bno_cur_gt,
1289 						ltdiff, &gtbno, &gtlen,
1290 						&gtbnoa, &gtlena,
1291 						0 /* search right */);
1292 		} else {
1293 			ASSERT(gtlena >= args->minlen);
1294 
1295 			/*
1296 			 * Right side is good, look for a left side entry.
1297 			 */
1298 			args->len = XFS_EXTLEN_MIN(gtlena, args->maxlen);
1299 			xfs_alloc_fix_len(args);
1300 			gtdiff = xfs_alloc_compute_diff(args->agbno, args->len,
1301 				args->alignment, args->datatype, gtbnoa,
1302 				gtlena, &gtnew);
1303 
1304 			error = xfs_alloc_find_best_extent(args,
1305 						&bno_cur_gt, &bno_cur_lt,
1306 						gtdiff, &ltbno, &ltlen,
1307 						&ltbnoa, &ltlena,
1308 						1 /* search left */);
1309 		}
1310 
1311 		if (error)
1312 			goto error0;
1313 	}
1314 
1315 	/*
1316 	 * If we couldn't get anything, give up.
1317 	 */
1318 	if (bno_cur_lt == NULL && bno_cur_gt == NULL) {
1319 		xfs_btree_del_cursor(cnt_cur, XFS_BTREE_NOERROR);
1320 
1321 		if (busy) {
1322 			trace_xfs_alloc_near_busy(args);
1323 			xfs_extent_busy_flush(args->mp, args->pag, busy_gen);
1324 			goto restart;
1325 		}
1326 		trace_xfs_alloc_size_neither(args);
1327 		args->agbno = NULLAGBLOCK;
1328 		return 0;
1329 	}
1330 
1331 	/*
1332 	 * At this point we have selected a freespace entry, either to the
1333 	 * left or to the right.  If it's on the right, copy all the
1334 	 * useful variables to the "left" set so we only have one
1335 	 * copy of this code.
1336 	 */
1337 	if (bno_cur_gt) {
1338 		bno_cur_lt = bno_cur_gt;
1339 		bno_cur_gt = NULL;
1340 		ltbno = gtbno;
1341 		ltbnoa = gtbnoa;
1342 		ltlen = gtlen;
1343 		ltlena = gtlena;
1344 		j = 1;
1345 	} else
1346 		j = 0;
1347 
1348 	/*
1349 	 * Fix up the length and compute the useful address.
1350 	 */
1351 	args->len = XFS_EXTLEN_MIN(ltlena, args->maxlen);
1352 	xfs_alloc_fix_len(args);
1353 	rlen = args->len;
1354 	(void)xfs_alloc_compute_diff(args->agbno, rlen, args->alignment,
1355 				     args->datatype, ltbnoa, ltlena, &ltnew);
1356 	ASSERT(ltnew >= ltbno);
1357 	ASSERT(ltnew + rlen <= ltbnoa + ltlena);
1358 	ASSERT(ltnew + rlen <= be32_to_cpu(XFS_BUF_TO_AGF(args->agbp)->agf_length));
1359 	ASSERT(ltnew >= args->min_agbno && ltnew <= args->max_agbno);
1360 	args->agbno = ltnew;
1361 
1362 	if ((error = xfs_alloc_fixup_trees(cnt_cur, bno_cur_lt, ltbno, ltlen,
1363 			ltnew, rlen, XFSA_FIXUP_BNO_OK)))
1364 		goto error0;
1365 
1366 	if (j)
1367 		trace_xfs_alloc_near_greater(args);
1368 	else
1369 		trace_xfs_alloc_near_lesser(args);
1370 
1371 	xfs_btree_del_cursor(cnt_cur, XFS_BTREE_NOERROR);
1372 	xfs_btree_del_cursor(bno_cur_lt, XFS_BTREE_NOERROR);
1373 	return 0;
1374 
1375  error0:
1376 	trace_xfs_alloc_near_error(args);
1377 	if (cnt_cur != NULL)
1378 		xfs_btree_del_cursor(cnt_cur, XFS_BTREE_ERROR);
1379 	if (bno_cur_lt != NULL)
1380 		xfs_btree_del_cursor(bno_cur_lt, XFS_BTREE_ERROR);
1381 	if (bno_cur_gt != NULL)
1382 		xfs_btree_del_cursor(bno_cur_gt, XFS_BTREE_ERROR);
1383 	return error;
1384 }
1385 
1386 /*
1387  * Allocate a variable extent anywhere in the allocation group agno.
1388  * Extent's length (returned in len) will be between minlen and maxlen,
1389  * and of the form k * prod + mod unless there's nothing that large.
1390  * Return the starting a.g. block, or NULLAGBLOCK if we can't do it.
1391  */
1392 STATIC int				/* error */
1393 xfs_alloc_ag_vextent_size(
1394 	xfs_alloc_arg_t	*args)		/* allocation argument structure */
1395 {
1396 	xfs_btree_cur_t	*bno_cur;	/* cursor for bno btree */
1397 	xfs_btree_cur_t	*cnt_cur;	/* cursor for cnt btree */
1398 	int		error;		/* error result */
1399 	xfs_agblock_t	fbno;		/* start of found freespace */
1400 	xfs_extlen_t	flen;		/* length of found freespace */
1401 	int		i;		/* temp status variable */
1402 	xfs_agblock_t	rbno;		/* returned block number */
1403 	xfs_extlen_t	rlen;		/* length of returned extent */
1404 	bool		busy;
1405 	unsigned	busy_gen;
1406 
1407 restart:
1408 	/*
1409 	 * Allocate and initialize a cursor for the by-size btree.
1410 	 */
1411 	cnt_cur = xfs_allocbt_init_cursor(args->mp, args->tp, args->agbp,
1412 		args->agno, XFS_BTNUM_CNT);
1413 	bno_cur = NULL;
1414 	busy = false;
1415 
1416 	/*
1417 	 * Look for an entry >= maxlen+alignment-1 blocks.
1418 	 */
1419 	if ((error = xfs_alloc_lookup_ge(cnt_cur, 0,
1420 			args->maxlen + args->alignment - 1, &i)))
1421 		goto error0;
1422 
1423 	/*
1424 	 * If none then we have to settle for a smaller extent. In the case that
1425 	 * there are no large extents, this will return the last entry in the
1426 	 * tree unless the tree is empty. In the case that there are only busy
1427 	 * large extents, this will return the largest small extent unless there
1428 	 * are no smaller extents available.
1429 	 */
1430 	if (!i) {
1431 		error = xfs_alloc_ag_vextent_small(args, cnt_cur,
1432 						   &fbno, &flen, &i);
1433 		if (error)
1434 			goto error0;
1435 		if (i == 0 || flen == 0) {
1436 			xfs_btree_del_cursor(cnt_cur, XFS_BTREE_NOERROR);
1437 			trace_xfs_alloc_size_noentry(args);
1438 			return 0;
1439 		}
1440 		ASSERT(i == 1);
1441 		busy = xfs_alloc_compute_aligned(args, fbno, flen, &rbno,
1442 				&rlen, &busy_gen);
1443 	} else {
1444 		/*
1445 		 * Search for a non-busy extent that is large enough.
1446 		 */
1447 		for (;;) {
1448 			error = xfs_alloc_get_rec(cnt_cur, &fbno, &flen, &i);
1449 			if (error)
1450 				goto error0;
1451 			XFS_WANT_CORRUPTED_GOTO(args->mp, i == 1, error0);
1452 
1453 			busy = xfs_alloc_compute_aligned(args, fbno, flen,
1454 					&rbno, &rlen, &busy_gen);
1455 
1456 			if (rlen >= args->maxlen)
1457 				break;
1458 
1459 			error = xfs_btree_increment(cnt_cur, 0, &i);
1460 			if (error)
1461 				goto error0;
1462 			if (i == 0) {
1463 				/*
1464 				 * Our only valid extents must have been busy.
1465 				 * Make it unbusy by forcing the log out and
1466 				 * retrying.
1467 				 */
1468 				xfs_btree_del_cursor(cnt_cur,
1469 						     XFS_BTREE_NOERROR);
1470 				trace_xfs_alloc_size_busy(args);
1471 				xfs_extent_busy_flush(args->mp,
1472 							args->pag, busy_gen);
1473 				goto restart;
1474 			}
1475 		}
1476 	}
1477 
1478 	/*
1479 	 * In the first case above, we got the last entry in the
1480 	 * by-size btree.  Now we check to see if the space hits maxlen
1481 	 * once aligned; if not, we search left for something better.
1482 	 * This can't happen in the second case above.
1483 	 */
1484 	rlen = XFS_EXTLEN_MIN(args->maxlen, rlen);
1485 	XFS_WANT_CORRUPTED_GOTO(args->mp, rlen == 0 ||
1486 			(rlen <= flen && rbno + rlen <= fbno + flen), error0);
1487 	if (rlen < args->maxlen) {
1488 		xfs_agblock_t	bestfbno;
1489 		xfs_extlen_t	bestflen;
1490 		xfs_agblock_t	bestrbno;
1491 		xfs_extlen_t	bestrlen;
1492 
1493 		bestrlen = rlen;
1494 		bestrbno = rbno;
1495 		bestflen = flen;
1496 		bestfbno = fbno;
1497 		for (;;) {
1498 			if ((error = xfs_btree_decrement(cnt_cur, 0, &i)))
1499 				goto error0;
1500 			if (i == 0)
1501 				break;
1502 			if ((error = xfs_alloc_get_rec(cnt_cur, &fbno, &flen,
1503 					&i)))
1504 				goto error0;
1505 			XFS_WANT_CORRUPTED_GOTO(args->mp, i == 1, error0);
1506 			if (flen < bestrlen)
1507 				break;
1508 			busy = xfs_alloc_compute_aligned(args, fbno, flen,
1509 					&rbno, &rlen, &busy_gen);
1510 			rlen = XFS_EXTLEN_MIN(args->maxlen, rlen);
1511 			XFS_WANT_CORRUPTED_GOTO(args->mp, rlen == 0 ||
1512 				(rlen <= flen && rbno + rlen <= fbno + flen),
1513 				error0);
1514 			if (rlen > bestrlen) {
1515 				bestrlen = rlen;
1516 				bestrbno = rbno;
1517 				bestflen = flen;
1518 				bestfbno = fbno;
1519 				if (rlen == args->maxlen)
1520 					break;
1521 			}
1522 		}
1523 		if ((error = xfs_alloc_lookup_eq(cnt_cur, bestfbno, bestflen,
1524 				&i)))
1525 			goto error0;
1526 		XFS_WANT_CORRUPTED_GOTO(args->mp, i == 1, error0);
1527 		rlen = bestrlen;
1528 		rbno = bestrbno;
1529 		flen = bestflen;
1530 		fbno = bestfbno;
1531 	}
1532 	args->wasfromfl = 0;
1533 	/*
1534 	 * Fix up the length.
1535 	 */
1536 	args->len = rlen;
1537 	if (rlen < args->minlen) {
1538 		if (busy) {
1539 			xfs_btree_del_cursor(cnt_cur, XFS_BTREE_NOERROR);
1540 			trace_xfs_alloc_size_busy(args);
1541 			xfs_extent_busy_flush(args->mp, args->pag, busy_gen);
1542 			goto restart;
1543 		}
1544 		goto out_nominleft;
1545 	}
1546 	xfs_alloc_fix_len(args);
1547 
1548 	rlen = args->len;
1549 	XFS_WANT_CORRUPTED_GOTO(args->mp, rlen <= flen, error0);
1550 	/*
1551 	 * Allocate and initialize a cursor for the by-block tree.
1552 	 */
1553 	bno_cur = xfs_allocbt_init_cursor(args->mp, args->tp, args->agbp,
1554 		args->agno, XFS_BTNUM_BNO);
1555 	if ((error = xfs_alloc_fixup_trees(cnt_cur, bno_cur, fbno, flen,
1556 			rbno, rlen, XFSA_FIXUP_CNT_OK)))
1557 		goto error0;
1558 	xfs_btree_del_cursor(cnt_cur, XFS_BTREE_NOERROR);
1559 	xfs_btree_del_cursor(bno_cur, XFS_BTREE_NOERROR);
1560 	cnt_cur = bno_cur = NULL;
1561 	args->len = rlen;
1562 	args->agbno = rbno;
1563 	XFS_WANT_CORRUPTED_GOTO(args->mp,
1564 		args->agbno + args->len <=
1565 			be32_to_cpu(XFS_BUF_TO_AGF(args->agbp)->agf_length),
1566 		error0);
1567 	trace_xfs_alloc_size_done(args);
1568 	return 0;
1569 
1570 error0:
1571 	trace_xfs_alloc_size_error(args);
1572 	if (cnt_cur)
1573 		xfs_btree_del_cursor(cnt_cur, XFS_BTREE_ERROR);
1574 	if (bno_cur)
1575 		xfs_btree_del_cursor(bno_cur, XFS_BTREE_ERROR);
1576 	return error;
1577 
1578 out_nominleft:
1579 	xfs_btree_del_cursor(cnt_cur, XFS_BTREE_NOERROR);
1580 	trace_xfs_alloc_size_nominleft(args);
1581 	args->agbno = NULLAGBLOCK;
1582 	return 0;
1583 }
1584 
1585 /*
1586  * Deal with the case where only small freespaces remain.
1587  * Either return the contents of the last freespace record,
1588  * or allocate space from the freelist if there is nothing in the tree.
1589  */
1590 STATIC int			/* error */
1591 xfs_alloc_ag_vextent_small(
1592 	xfs_alloc_arg_t	*args,	/* allocation argument structure */
1593 	xfs_btree_cur_t	*ccur,	/* by-size cursor */
1594 	xfs_agblock_t	*fbnop,	/* result block number */
1595 	xfs_extlen_t	*flenp,	/* result length */
1596 	int		*stat)	/* status: 0-freelist, 1-normal/none */
1597 {
1598 	int		error;
1599 	xfs_agblock_t	fbno;
1600 	xfs_extlen_t	flen;
1601 	int		i;
1602 
1603 	if ((error = xfs_btree_decrement(ccur, 0, &i)))
1604 		goto error0;
1605 	if (i) {
1606 		if ((error = xfs_alloc_get_rec(ccur, &fbno, &flen, &i)))
1607 			goto error0;
1608 		XFS_WANT_CORRUPTED_GOTO(args->mp, i == 1, error0);
1609 	}
1610 	/*
1611 	 * Nothing in the btree, try the freelist.  Make sure
1612 	 * to respect minleft even when pulling from the
1613 	 * freelist.
1614 	 */
1615 	else if (args->minlen == 1 && args->alignment == 1 &&
1616 		 args->resv != XFS_AG_RESV_AGFL &&
1617 		 (be32_to_cpu(XFS_BUF_TO_AGF(args->agbp)->agf_flcount)
1618 		  > args->minleft)) {
1619 		error = xfs_alloc_get_freelist(args->tp, args->agbp, &fbno, 0);
1620 		if (error)
1621 			goto error0;
1622 		if (fbno != NULLAGBLOCK) {
1623 			xfs_extent_busy_reuse(args->mp, args->agno, fbno, 1,
1624 			      xfs_alloc_allow_busy_reuse(args->datatype));
1625 
1626 			if (xfs_alloc_is_userdata(args->datatype)) {
1627 				xfs_buf_t	*bp;
1628 
1629 				bp = xfs_btree_get_bufs(args->mp, args->tp,
1630 					args->agno, fbno, 0);
1631 				if (!bp) {
1632 					error = -EFSCORRUPTED;
1633 					goto error0;
1634 				}
1635 				xfs_trans_binval(args->tp, bp);
1636 			}
1637 			args->len = 1;
1638 			args->agbno = fbno;
1639 			XFS_WANT_CORRUPTED_GOTO(args->mp,
1640 				args->agbno + args->len <=
1641 				be32_to_cpu(XFS_BUF_TO_AGF(args->agbp)->agf_length),
1642 				error0);
1643 			args->wasfromfl = 1;
1644 			trace_xfs_alloc_small_freelist(args);
1645 
1646 			/*
1647 			 * If we're feeding an AGFL block to something that
1648 			 * doesn't live in the free space, we need to clear
1649 			 * out the OWN_AG rmap.
1650 			 */
1651 			error = xfs_rmap_free(args->tp, args->agbp, args->agno,
1652 					fbno, 1, &XFS_RMAP_OINFO_AG);
1653 			if (error)
1654 				goto error0;
1655 
1656 			*stat = 0;
1657 			return 0;
1658 		}
1659 		/*
1660 		 * Nothing in the freelist.
1661 		 */
1662 		else
1663 			flen = 0;
1664 	}
1665 	/*
1666 	 * Can't allocate from the freelist for some reason.
1667 	 */
1668 	else {
1669 		fbno = NULLAGBLOCK;
1670 		flen = 0;
1671 	}
1672 	/*
1673 	 * Can't do the allocation, give up.
1674 	 */
1675 	if (flen < args->minlen) {
1676 		args->agbno = NULLAGBLOCK;
1677 		trace_xfs_alloc_small_notenough(args);
1678 		flen = 0;
1679 	}
1680 	*fbnop = fbno;
1681 	*flenp = flen;
1682 	*stat = 1;
1683 	trace_xfs_alloc_small_done(args);
1684 	return 0;
1685 
1686 error0:
1687 	trace_xfs_alloc_small_error(args);
1688 	return error;
1689 }
1690 
1691 /*
1692  * Free the extent starting at agno/bno for length.
1693  */
1694 STATIC int
1695 xfs_free_ag_extent(
1696 	struct xfs_trans		*tp,
1697 	struct xfs_buf			*agbp,
1698 	xfs_agnumber_t			agno,
1699 	xfs_agblock_t			bno,
1700 	xfs_extlen_t			len,
1701 	const struct xfs_owner_info	*oinfo,
1702 	enum xfs_ag_resv_type		type)
1703 {
1704 	struct xfs_mount		*mp;
1705 	struct xfs_perag		*pag;
1706 	struct xfs_btree_cur		*bno_cur;
1707 	struct xfs_btree_cur		*cnt_cur;
1708 	xfs_agblock_t			gtbno; /* start of right neighbor */
1709 	xfs_extlen_t			gtlen; /* length of right neighbor */
1710 	xfs_agblock_t			ltbno; /* start of left neighbor */
1711 	xfs_extlen_t			ltlen; /* length of left neighbor */
1712 	xfs_agblock_t			nbno; /* new starting block of freesp */
1713 	xfs_extlen_t			nlen; /* new length of freespace */
1714 	int				haveleft; /* have a left neighbor */
1715 	int				haveright; /* have a right neighbor */
1716 	int				i;
1717 	int				error;
1718 
1719 	bno_cur = cnt_cur = NULL;
1720 	mp = tp->t_mountp;
1721 
1722 	if (!xfs_rmap_should_skip_owner_update(oinfo)) {
1723 		error = xfs_rmap_free(tp, agbp, agno, bno, len, oinfo);
1724 		if (error)
1725 			goto error0;
1726 	}
1727 
1728 	/*
1729 	 * Allocate and initialize a cursor for the by-block btree.
1730 	 */
1731 	bno_cur = xfs_allocbt_init_cursor(mp, tp, agbp, agno, XFS_BTNUM_BNO);
1732 	/*
1733 	 * Look for a neighboring block on the left (lower block numbers)
1734 	 * that is contiguous with this space.
1735 	 */
1736 	if ((error = xfs_alloc_lookup_le(bno_cur, bno, len, &haveleft)))
1737 		goto error0;
1738 	if (haveleft) {
1739 		/*
1740 		 * There is a block to our left.
1741 		 */
1742 		if ((error = xfs_alloc_get_rec(bno_cur, &ltbno, &ltlen, &i)))
1743 			goto error0;
1744 		XFS_WANT_CORRUPTED_GOTO(mp, i == 1, error0);
1745 		/*
1746 		 * It's not contiguous, though.
1747 		 */
1748 		if (ltbno + ltlen < bno)
1749 			haveleft = 0;
1750 		else {
1751 			/*
1752 			 * If this failure happens the request to free this
1753 			 * space was invalid, it's (partly) already free.
1754 			 * Very bad.
1755 			 */
1756 			XFS_WANT_CORRUPTED_GOTO(mp,
1757 						ltbno + ltlen <= bno, error0);
1758 		}
1759 	}
1760 	/*
1761 	 * Look for a neighboring block on the right (higher block numbers)
1762 	 * that is contiguous with this space.
1763 	 */
1764 	if ((error = xfs_btree_increment(bno_cur, 0, &haveright)))
1765 		goto error0;
1766 	if (haveright) {
1767 		/*
1768 		 * There is a block to our right.
1769 		 */
1770 		if ((error = xfs_alloc_get_rec(bno_cur, &gtbno, &gtlen, &i)))
1771 			goto error0;
1772 		XFS_WANT_CORRUPTED_GOTO(mp, i == 1, error0);
1773 		/*
1774 		 * It's not contiguous, though.
1775 		 */
1776 		if (bno + len < gtbno)
1777 			haveright = 0;
1778 		else {
1779 			/*
1780 			 * If this failure happens the request to free this
1781 			 * space was invalid, it's (partly) already free.
1782 			 * Very bad.
1783 			 */
1784 			XFS_WANT_CORRUPTED_GOTO(mp, gtbno >= bno + len, error0);
1785 		}
1786 	}
1787 	/*
1788 	 * Now allocate and initialize a cursor for the by-size tree.
1789 	 */
1790 	cnt_cur = xfs_allocbt_init_cursor(mp, tp, agbp, agno, XFS_BTNUM_CNT);
1791 	/*
1792 	 * Have both left and right contiguous neighbors.
1793 	 * Merge all three into a single free block.
1794 	 */
1795 	if (haveleft && haveright) {
1796 		/*
1797 		 * Delete the old by-size entry on the left.
1798 		 */
1799 		if ((error = xfs_alloc_lookup_eq(cnt_cur, ltbno, ltlen, &i)))
1800 			goto error0;
1801 		XFS_WANT_CORRUPTED_GOTO(mp, i == 1, error0);
1802 		if ((error = xfs_btree_delete(cnt_cur, &i)))
1803 			goto error0;
1804 		XFS_WANT_CORRUPTED_GOTO(mp, i == 1, error0);
1805 		/*
1806 		 * Delete the old by-size entry on the right.
1807 		 */
1808 		if ((error = xfs_alloc_lookup_eq(cnt_cur, gtbno, gtlen, &i)))
1809 			goto error0;
1810 		XFS_WANT_CORRUPTED_GOTO(mp, i == 1, error0);
1811 		if ((error = xfs_btree_delete(cnt_cur, &i)))
1812 			goto error0;
1813 		XFS_WANT_CORRUPTED_GOTO(mp, i == 1, error0);
1814 		/*
1815 		 * Delete the old by-block entry for the right block.
1816 		 */
1817 		if ((error = xfs_btree_delete(bno_cur, &i)))
1818 			goto error0;
1819 		XFS_WANT_CORRUPTED_GOTO(mp, i == 1, error0);
1820 		/*
1821 		 * Move the by-block cursor back to the left neighbor.
1822 		 */
1823 		if ((error = xfs_btree_decrement(bno_cur, 0, &i)))
1824 			goto error0;
1825 		XFS_WANT_CORRUPTED_GOTO(mp, i == 1, error0);
1826 #ifdef DEBUG
1827 		/*
1828 		 * Check that this is the right record: delete didn't
1829 		 * mangle the cursor.
1830 		 */
1831 		{
1832 			xfs_agblock_t	xxbno;
1833 			xfs_extlen_t	xxlen;
1834 
1835 			if ((error = xfs_alloc_get_rec(bno_cur, &xxbno, &xxlen,
1836 					&i)))
1837 				goto error0;
1838 			XFS_WANT_CORRUPTED_GOTO(mp,
1839 				i == 1 && xxbno == ltbno && xxlen == ltlen,
1840 				error0);
1841 		}
1842 #endif
1843 		/*
1844 		 * Update remaining by-block entry to the new, joined block.
1845 		 */
1846 		nbno = ltbno;
1847 		nlen = len + ltlen + gtlen;
1848 		if ((error = xfs_alloc_update(bno_cur, nbno, nlen)))
1849 			goto error0;
1850 	}
1851 	/*
1852 	 * Have only a left contiguous neighbor.
1853 	 * Merge it together with the new freespace.
1854 	 */
1855 	else if (haveleft) {
1856 		/*
1857 		 * Delete the old by-size entry on the left.
1858 		 */
1859 		if ((error = xfs_alloc_lookup_eq(cnt_cur, ltbno, ltlen, &i)))
1860 			goto error0;
1861 		XFS_WANT_CORRUPTED_GOTO(mp, i == 1, error0);
1862 		if ((error = xfs_btree_delete(cnt_cur, &i)))
1863 			goto error0;
1864 		XFS_WANT_CORRUPTED_GOTO(mp, i == 1, error0);
1865 		/*
1866 		 * Back up the by-block cursor to the left neighbor, and
1867 		 * update its length.
1868 		 */
1869 		if ((error = xfs_btree_decrement(bno_cur, 0, &i)))
1870 			goto error0;
1871 		XFS_WANT_CORRUPTED_GOTO(mp, i == 1, error0);
1872 		nbno = ltbno;
1873 		nlen = len + ltlen;
1874 		if ((error = xfs_alloc_update(bno_cur, nbno, nlen)))
1875 			goto error0;
1876 	}
1877 	/*
1878 	 * Have only a right contiguous neighbor.
1879 	 * Merge it together with the new freespace.
1880 	 */
1881 	else if (haveright) {
1882 		/*
1883 		 * Delete the old by-size entry on the right.
1884 		 */
1885 		if ((error = xfs_alloc_lookup_eq(cnt_cur, gtbno, gtlen, &i)))
1886 			goto error0;
1887 		XFS_WANT_CORRUPTED_GOTO(mp, i == 1, error0);
1888 		if ((error = xfs_btree_delete(cnt_cur, &i)))
1889 			goto error0;
1890 		XFS_WANT_CORRUPTED_GOTO(mp, i == 1, error0);
1891 		/*
1892 		 * Update the starting block and length of the right
1893 		 * neighbor in the by-block tree.
1894 		 */
1895 		nbno = bno;
1896 		nlen = len + gtlen;
1897 		if ((error = xfs_alloc_update(bno_cur, nbno, nlen)))
1898 			goto error0;
1899 	}
1900 	/*
1901 	 * No contiguous neighbors.
1902 	 * Insert the new freespace into the by-block tree.
1903 	 */
1904 	else {
1905 		nbno = bno;
1906 		nlen = len;
1907 		if ((error = xfs_btree_insert(bno_cur, &i)))
1908 			goto error0;
1909 		XFS_WANT_CORRUPTED_GOTO(mp, i == 1, error0);
1910 	}
1911 	xfs_btree_del_cursor(bno_cur, XFS_BTREE_NOERROR);
1912 	bno_cur = NULL;
1913 	/*
1914 	 * In all cases we need to insert the new freespace in the by-size tree.
1915 	 */
1916 	if ((error = xfs_alloc_lookup_eq(cnt_cur, nbno, nlen, &i)))
1917 		goto error0;
1918 	XFS_WANT_CORRUPTED_GOTO(mp, i == 0, error0);
1919 	if ((error = xfs_btree_insert(cnt_cur, &i)))
1920 		goto error0;
1921 	XFS_WANT_CORRUPTED_GOTO(mp, i == 1, error0);
1922 	xfs_btree_del_cursor(cnt_cur, XFS_BTREE_NOERROR);
1923 	cnt_cur = NULL;
1924 
1925 	/*
1926 	 * Update the freespace totals in the ag and superblock.
1927 	 */
1928 	pag = xfs_perag_get(mp, agno);
1929 	error = xfs_alloc_update_counters(tp, pag, agbp, len);
1930 	xfs_ag_resv_free_extent(pag, type, tp, len);
1931 	xfs_perag_put(pag);
1932 	if (error)
1933 		goto error0;
1934 
1935 	XFS_STATS_INC(mp, xs_freex);
1936 	XFS_STATS_ADD(mp, xs_freeb, len);
1937 
1938 	trace_xfs_free_extent(mp, agno, bno, len, type, haveleft, haveright);
1939 
1940 	return 0;
1941 
1942  error0:
1943 	trace_xfs_free_extent(mp, agno, bno, len, type, -1, -1);
1944 	if (bno_cur)
1945 		xfs_btree_del_cursor(bno_cur, XFS_BTREE_ERROR);
1946 	if (cnt_cur)
1947 		xfs_btree_del_cursor(cnt_cur, XFS_BTREE_ERROR);
1948 	return error;
1949 }
1950 
1951 /*
1952  * Visible (exported) allocation/free functions.
1953  * Some of these are used just by xfs_alloc_btree.c and this file.
1954  */
1955 
1956 /*
1957  * Compute and fill in value of m_ag_maxlevels.
1958  */
1959 void
1960 xfs_alloc_compute_maxlevels(
1961 	xfs_mount_t	*mp)	/* file system mount structure */
1962 {
1963 	mp->m_ag_maxlevels = xfs_btree_compute_maxlevels(mp->m_alloc_mnr,
1964 			(mp->m_sb.sb_agblocks + 1) / 2);
1965 }
1966 
1967 /*
1968  * Find the length of the longest extent in an AG.  The 'need' parameter
1969  * specifies how much space we're going to need for the AGFL and the
1970  * 'reserved' parameter tells us how many blocks in this AG are reserved for
1971  * other callers.
1972  */
1973 xfs_extlen_t
1974 xfs_alloc_longest_free_extent(
1975 	struct xfs_perag	*pag,
1976 	xfs_extlen_t		need,
1977 	xfs_extlen_t		reserved)
1978 {
1979 	xfs_extlen_t		delta = 0;
1980 
1981 	/*
1982 	 * If the AGFL needs a recharge, we'll have to subtract that from the
1983 	 * longest extent.
1984 	 */
1985 	if (need > pag->pagf_flcount)
1986 		delta = need - pag->pagf_flcount;
1987 
1988 	/*
1989 	 * If we cannot maintain others' reservations with space from the
1990 	 * not-longest freesp extents, we'll have to subtract /that/ from
1991 	 * the longest extent too.
1992 	 */
1993 	if (pag->pagf_freeblks - pag->pagf_longest < reserved)
1994 		delta += reserved - (pag->pagf_freeblks - pag->pagf_longest);
1995 
1996 	/*
1997 	 * If the longest extent is long enough to satisfy all the
1998 	 * reservations and AGFL rules in place, we can return this extent.
1999 	 */
2000 	if (pag->pagf_longest > delta)
2001 		return pag->pagf_longest - delta;
2002 
2003 	/* Otherwise, let the caller try for 1 block if there's space. */
2004 	return pag->pagf_flcount > 0 || pag->pagf_longest > 0;
2005 }
2006 
2007 unsigned int
2008 xfs_alloc_min_freelist(
2009 	struct xfs_mount	*mp,
2010 	struct xfs_perag	*pag)
2011 {
2012 	unsigned int		min_free;
2013 
2014 	/* space needed by-bno freespace btree */
2015 	min_free = min_t(unsigned int, pag->pagf_levels[XFS_BTNUM_BNOi] + 1,
2016 				       mp->m_ag_maxlevels);
2017 	/* space needed by-size freespace btree */
2018 	min_free += min_t(unsigned int, pag->pagf_levels[XFS_BTNUM_CNTi] + 1,
2019 				       mp->m_ag_maxlevels);
2020 	/* space needed reverse mapping used space btree */
2021 	if (xfs_sb_version_hasrmapbt(&mp->m_sb))
2022 		min_free += min_t(unsigned int,
2023 				  pag->pagf_levels[XFS_BTNUM_RMAPi] + 1,
2024 				  mp->m_rmap_maxlevels);
2025 
2026 	return min_free;
2027 }
2028 
2029 /*
2030  * Check if the operation we are fixing up the freelist for should go ahead or
2031  * not. If we are freeing blocks, we always allow it, otherwise the allocation
2032  * is dependent on whether the size and shape of free space available will
2033  * permit the requested allocation to take place.
2034  */
2035 static bool
2036 xfs_alloc_space_available(
2037 	struct xfs_alloc_arg	*args,
2038 	xfs_extlen_t		min_free,
2039 	int			flags)
2040 {
2041 	struct xfs_perag	*pag = args->pag;
2042 	xfs_extlen_t		alloc_len, longest;
2043 	xfs_extlen_t		reservation; /* blocks that are still reserved */
2044 	int			available;
2045 	xfs_extlen_t		agflcount;
2046 
2047 	if (flags & XFS_ALLOC_FLAG_FREEING)
2048 		return true;
2049 
2050 	reservation = xfs_ag_resv_needed(pag, args->resv);
2051 
2052 	/* do we have enough contiguous free space for the allocation? */
2053 	alloc_len = args->minlen + (args->alignment - 1) + args->minalignslop;
2054 	longest = xfs_alloc_longest_free_extent(pag, min_free, reservation);
2055 	if (longest < alloc_len)
2056 		return false;
2057 
2058 	/*
2059 	 * Do we have enough free space remaining for the allocation? Don't
2060 	 * account extra agfl blocks because we are about to defer free them,
2061 	 * making them unavailable until the current transaction commits.
2062 	 */
2063 	agflcount = min_t(xfs_extlen_t, pag->pagf_flcount, min_free);
2064 	available = (int)(pag->pagf_freeblks + agflcount -
2065 			  reservation - min_free - args->minleft);
2066 	if (available < (int)max(args->total, alloc_len))
2067 		return false;
2068 
2069 	/*
2070 	 * Clamp maxlen to the amount of free space available for the actual
2071 	 * extent allocation.
2072 	 */
2073 	if (available < (int)args->maxlen && !(flags & XFS_ALLOC_FLAG_CHECK)) {
2074 		args->maxlen = available;
2075 		ASSERT(args->maxlen > 0);
2076 		ASSERT(args->maxlen >= args->minlen);
2077 	}
2078 
2079 	return true;
2080 }
2081 
2082 int
2083 xfs_free_agfl_block(
2084 	struct xfs_trans	*tp,
2085 	xfs_agnumber_t		agno,
2086 	xfs_agblock_t		agbno,
2087 	struct xfs_buf		*agbp,
2088 	struct xfs_owner_info	*oinfo)
2089 {
2090 	int			error;
2091 	struct xfs_buf		*bp;
2092 
2093 	error = xfs_free_ag_extent(tp, agbp, agno, agbno, 1, oinfo,
2094 				   XFS_AG_RESV_AGFL);
2095 	if (error)
2096 		return error;
2097 
2098 	bp = xfs_btree_get_bufs(tp->t_mountp, tp, agno, agbno, 0);
2099 	if (!bp)
2100 		return -EFSCORRUPTED;
2101 	xfs_trans_binval(tp, bp);
2102 
2103 	return 0;
2104 }
2105 
2106 /*
2107  * Check the agfl fields of the agf for inconsistency or corruption. The purpose
2108  * is to detect an agfl header padding mismatch between current and early v5
2109  * kernels. This problem manifests as a 1-slot size difference between the
2110  * on-disk flcount and the active [first, last] range of a wrapped agfl. This
2111  * may also catch variants of agfl count corruption unrelated to padding. Either
2112  * way, we'll reset the agfl and warn the user.
2113  *
2114  * Return true if a reset is required before the agfl can be used, false
2115  * otherwise.
2116  */
2117 static bool
2118 xfs_agfl_needs_reset(
2119 	struct xfs_mount	*mp,
2120 	struct xfs_agf		*agf)
2121 {
2122 	uint32_t		f = be32_to_cpu(agf->agf_flfirst);
2123 	uint32_t		l = be32_to_cpu(agf->agf_fllast);
2124 	uint32_t		c = be32_to_cpu(agf->agf_flcount);
2125 	int			agfl_size = xfs_agfl_size(mp);
2126 	int			active;
2127 
2128 	/* no agfl header on v4 supers */
2129 	if (!xfs_sb_version_hascrc(&mp->m_sb))
2130 		return false;
2131 
2132 	/*
2133 	 * The agf read verifier catches severe corruption of these fields.
2134 	 * Repeat some sanity checks to cover a packed -> unpacked mismatch if
2135 	 * the verifier allows it.
2136 	 */
2137 	if (f >= agfl_size || l >= agfl_size)
2138 		return true;
2139 	if (c > agfl_size)
2140 		return true;
2141 
2142 	/*
2143 	 * Check consistency between the on-disk count and the active range. An
2144 	 * agfl padding mismatch manifests as an inconsistent flcount.
2145 	 */
2146 	if (c && l >= f)
2147 		active = l - f + 1;
2148 	else if (c)
2149 		active = agfl_size - f + l + 1;
2150 	else
2151 		active = 0;
2152 
2153 	return active != c;
2154 }
2155 
2156 /*
2157  * Reset the agfl to an empty state. Ignore/drop any existing blocks since the
2158  * agfl content cannot be trusted. Warn the user that a repair is required to
2159  * recover leaked blocks.
2160  *
2161  * The purpose of this mechanism is to handle filesystems affected by the agfl
2162  * header padding mismatch problem. A reset keeps the filesystem online with a
2163  * relatively minor free space accounting inconsistency rather than suffer the
2164  * inevitable crash from use of an invalid agfl block.
2165  */
2166 static void
2167 xfs_agfl_reset(
2168 	struct xfs_trans	*tp,
2169 	struct xfs_buf		*agbp,
2170 	struct xfs_perag	*pag)
2171 {
2172 	struct xfs_mount	*mp = tp->t_mountp;
2173 	struct xfs_agf		*agf = XFS_BUF_TO_AGF(agbp);
2174 
2175 	ASSERT(pag->pagf_agflreset);
2176 	trace_xfs_agfl_reset(mp, agf, 0, _RET_IP_);
2177 
2178 	xfs_warn(mp,
2179 	       "WARNING: Reset corrupted AGFL on AG %u. %d blocks leaked. "
2180 	       "Please unmount and run xfs_repair.",
2181 	         pag->pag_agno, pag->pagf_flcount);
2182 
2183 	agf->agf_flfirst = 0;
2184 	agf->agf_fllast = cpu_to_be32(xfs_agfl_size(mp) - 1);
2185 	agf->agf_flcount = 0;
2186 	xfs_alloc_log_agf(tp, agbp, XFS_AGF_FLFIRST | XFS_AGF_FLLAST |
2187 				    XFS_AGF_FLCOUNT);
2188 
2189 	pag->pagf_flcount = 0;
2190 	pag->pagf_agflreset = false;
2191 }
2192 
2193 /*
2194  * Defer an AGFL block free. This is effectively equivalent to
2195  * xfs_bmap_add_free() with some special handling particular to AGFL blocks.
2196  *
2197  * Deferring AGFL frees helps prevent log reservation overruns due to too many
2198  * allocation operations in a transaction. AGFL frees are prone to this problem
2199  * because for one they are always freed one at a time. Further, an immediate
2200  * AGFL block free can cause a btree join and require another block free before
2201  * the real allocation can proceed. Deferring the free disconnects freeing up
2202  * the AGFL slot from freeing the block.
2203  */
2204 STATIC void
2205 xfs_defer_agfl_block(
2206 	struct xfs_trans		*tp,
2207 	xfs_agnumber_t			agno,
2208 	xfs_fsblock_t			agbno,
2209 	struct xfs_owner_info		*oinfo)
2210 {
2211 	struct xfs_mount		*mp = tp->t_mountp;
2212 	struct xfs_extent_free_item	*new;		/* new element */
2213 
2214 	ASSERT(xfs_bmap_free_item_zone != NULL);
2215 	ASSERT(oinfo != NULL);
2216 
2217 	new = kmem_zone_alloc(xfs_bmap_free_item_zone, KM_SLEEP);
2218 	new->xefi_startblock = XFS_AGB_TO_FSB(mp, agno, agbno);
2219 	new->xefi_blockcount = 1;
2220 	new->xefi_oinfo = *oinfo;
2221 
2222 	trace_xfs_agfl_free_defer(mp, agno, 0, agbno, 1);
2223 
2224 	xfs_defer_add(tp, XFS_DEFER_OPS_TYPE_AGFL_FREE, &new->xefi_list);
2225 }
2226 
2227 /*
2228  * Decide whether to use this allocation group for this allocation.
2229  * If so, fix up the btree freelist's size.
2230  */
2231 int			/* error */
2232 xfs_alloc_fix_freelist(
2233 	struct xfs_alloc_arg	*args,	/* allocation argument structure */
2234 	int			flags)	/* XFS_ALLOC_FLAG_... */
2235 {
2236 	struct xfs_mount	*mp = args->mp;
2237 	struct xfs_perag	*pag = args->pag;
2238 	struct xfs_trans	*tp = args->tp;
2239 	struct xfs_buf		*agbp = NULL;
2240 	struct xfs_buf		*agflbp = NULL;
2241 	struct xfs_alloc_arg	targs;	/* local allocation arguments */
2242 	xfs_agblock_t		bno;	/* freelist block */
2243 	xfs_extlen_t		need;	/* total blocks needed in freelist */
2244 	int			error = 0;
2245 
2246 	/* deferred ops (AGFL block frees) require permanent transactions */
2247 	ASSERT(tp->t_flags & XFS_TRANS_PERM_LOG_RES);
2248 
2249 	if (!pag->pagf_init) {
2250 		error = xfs_alloc_read_agf(mp, tp, args->agno, flags, &agbp);
2251 		if (error)
2252 			goto out_no_agbp;
2253 		if (!pag->pagf_init) {
2254 			ASSERT(flags & XFS_ALLOC_FLAG_TRYLOCK);
2255 			ASSERT(!(flags & XFS_ALLOC_FLAG_FREEING));
2256 			goto out_agbp_relse;
2257 		}
2258 	}
2259 
2260 	/*
2261 	 * If this is a metadata preferred pag and we are user data then try
2262 	 * somewhere else if we are not being asked to try harder at this
2263 	 * point
2264 	 */
2265 	if (pag->pagf_metadata && xfs_alloc_is_userdata(args->datatype) &&
2266 	    (flags & XFS_ALLOC_FLAG_TRYLOCK)) {
2267 		ASSERT(!(flags & XFS_ALLOC_FLAG_FREEING));
2268 		goto out_agbp_relse;
2269 	}
2270 
2271 	need = xfs_alloc_min_freelist(mp, pag);
2272 	if (!xfs_alloc_space_available(args, need, flags |
2273 			XFS_ALLOC_FLAG_CHECK))
2274 		goto out_agbp_relse;
2275 
2276 	/*
2277 	 * Get the a.g. freespace buffer.
2278 	 * Can fail if we're not blocking on locks, and it's held.
2279 	 */
2280 	if (!agbp) {
2281 		error = xfs_alloc_read_agf(mp, tp, args->agno, flags, &agbp);
2282 		if (error)
2283 			goto out_no_agbp;
2284 		if (!agbp) {
2285 			ASSERT(flags & XFS_ALLOC_FLAG_TRYLOCK);
2286 			ASSERT(!(flags & XFS_ALLOC_FLAG_FREEING));
2287 			goto out_no_agbp;
2288 		}
2289 	}
2290 
2291 	/* reset a padding mismatched agfl before final free space check */
2292 	if (pag->pagf_agflreset)
2293 		xfs_agfl_reset(tp, agbp, pag);
2294 
2295 	/* If there isn't enough total space or single-extent, reject it. */
2296 	need = xfs_alloc_min_freelist(mp, pag);
2297 	if (!xfs_alloc_space_available(args, need, flags))
2298 		goto out_agbp_relse;
2299 
2300 	/*
2301 	 * Make the freelist shorter if it's too long.
2302 	 *
2303 	 * Note that from this point onwards, we will always release the agf and
2304 	 * agfl buffers on error. This handles the case where we error out and
2305 	 * the buffers are clean or may not have been joined to the transaction
2306 	 * and hence need to be released manually. If they have been joined to
2307 	 * the transaction, then xfs_trans_brelse() will handle them
2308 	 * appropriately based on the recursion count and dirty state of the
2309 	 * buffer.
2310 	 *
2311 	 * XXX (dgc): When we have lots of free space, does this buy us
2312 	 * anything other than extra overhead when we need to put more blocks
2313 	 * back on the free list? Maybe we should only do this when space is
2314 	 * getting low or the AGFL is more than half full?
2315 	 *
2316 	 * The NOSHRINK flag prevents the AGFL from being shrunk if it's too
2317 	 * big; the NORMAP flag prevents AGFL expand/shrink operations from
2318 	 * updating the rmapbt.  Both flags are used in xfs_repair while we're
2319 	 * rebuilding the rmapbt, and neither are used by the kernel.  They're
2320 	 * both required to ensure that rmaps are correctly recorded for the
2321 	 * regenerated AGFL, bnobt, and cntbt.  See repair/phase5.c and
2322 	 * repair/rmap.c in xfsprogs for details.
2323 	 */
2324 	memset(&targs, 0, sizeof(targs));
2325 	/* struct copy below */
2326 	if (flags & XFS_ALLOC_FLAG_NORMAP)
2327 		targs.oinfo = XFS_RMAP_OINFO_SKIP_UPDATE;
2328 	else
2329 		targs.oinfo = XFS_RMAP_OINFO_AG;
2330 	while (!(flags & XFS_ALLOC_FLAG_NOSHRINK) && pag->pagf_flcount > need) {
2331 		error = xfs_alloc_get_freelist(tp, agbp, &bno, 0);
2332 		if (error)
2333 			goto out_agbp_relse;
2334 
2335 		/* defer agfl frees */
2336 		xfs_defer_agfl_block(tp, args->agno, bno, &targs.oinfo);
2337 	}
2338 
2339 	targs.tp = tp;
2340 	targs.mp = mp;
2341 	targs.agbp = agbp;
2342 	targs.agno = args->agno;
2343 	targs.alignment = targs.minlen = targs.prod = 1;
2344 	targs.type = XFS_ALLOCTYPE_THIS_AG;
2345 	targs.pag = pag;
2346 	error = xfs_alloc_read_agfl(mp, tp, targs.agno, &agflbp);
2347 	if (error)
2348 		goto out_agbp_relse;
2349 
2350 	/* Make the freelist longer if it's too short. */
2351 	while (pag->pagf_flcount < need) {
2352 		targs.agbno = 0;
2353 		targs.maxlen = need - pag->pagf_flcount;
2354 		targs.resv = XFS_AG_RESV_AGFL;
2355 
2356 		/* Allocate as many blocks as possible at once. */
2357 		error = xfs_alloc_ag_vextent(&targs);
2358 		if (error)
2359 			goto out_agflbp_relse;
2360 
2361 		/*
2362 		 * Stop if we run out.  Won't happen if callers are obeying
2363 		 * the restrictions correctly.  Can happen for free calls
2364 		 * on a completely full ag.
2365 		 */
2366 		if (targs.agbno == NULLAGBLOCK) {
2367 			if (flags & XFS_ALLOC_FLAG_FREEING)
2368 				break;
2369 			goto out_agflbp_relse;
2370 		}
2371 		/*
2372 		 * Put each allocated block on the list.
2373 		 */
2374 		for (bno = targs.agbno; bno < targs.agbno + targs.len; bno++) {
2375 			error = xfs_alloc_put_freelist(tp, agbp,
2376 							agflbp, bno, 0);
2377 			if (error)
2378 				goto out_agflbp_relse;
2379 		}
2380 	}
2381 	xfs_trans_brelse(tp, agflbp);
2382 	args->agbp = agbp;
2383 	return 0;
2384 
2385 out_agflbp_relse:
2386 	xfs_trans_brelse(tp, agflbp);
2387 out_agbp_relse:
2388 	if (agbp)
2389 		xfs_trans_brelse(tp, agbp);
2390 out_no_agbp:
2391 	args->agbp = NULL;
2392 	return error;
2393 }
2394 
2395 /*
2396  * Get a block from the freelist.
2397  * Returns with the buffer for the block gotten.
2398  */
2399 int				/* error */
2400 xfs_alloc_get_freelist(
2401 	xfs_trans_t	*tp,	/* transaction pointer */
2402 	xfs_buf_t	*agbp,	/* buffer containing the agf structure */
2403 	xfs_agblock_t	*bnop,	/* block address retrieved from freelist */
2404 	int		btreeblk) /* destination is a AGF btree */
2405 {
2406 	xfs_agf_t	*agf;	/* a.g. freespace structure */
2407 	xfs_buf_t	*agflbp;/* buffer for a.g. freelist structure */
2408 	xfs_agblock_t	bno;	/* block number returned */
2409 	__be32		*agfl_bno;
2410 	int		error;
2411 	int		logflags;
2412 	xfs_mount_t	*mp = tp->t_mountp;
2413 	xfs_perag_t	*pag;	/* per allocation group data */
2414 
2415 	/*
2416 	 * Freelist is empty, give up.
2417 	 */
2418 	agf = XFS_BUF_TO_AGF(agbp);
2419 	if (!agf->agf_flcount) {
2420 		*bnop = NULLAGBLOCK;
2421 		return 0;
2422 	}
2423 	/*
2424 	 * Read the array of free blocks.
2425 	 */
2426 	error = xfs_alloc_read_agfl(mp, tp, be32_to_cpu(agf->agf_seqno),
2427 				    &agflbp);
2428 	if (error)
2429 		return error;
2430 
2431 
2432 	/*
2433 	 * Get the block number and update the data structures.
2434 	 */
2435 	agfl_bno = XFS_BUF_TO_AGFL_BNO(mp, agflbp);
2436 	bno = be32_to_cpu(agfl_bno[be32_to_cpu(agf->agf_flfirst)]);
2437 	be32_add_cpu(&agf->agf_flfirst, 1);
2438 	xfs_trans_brelse(tp, agflbp);
2439 	if (be32_to_cpu(agf->agf_flfirst) == xfs_agfl_size(mp))
2440 		agf->agf_flfirst = 0;
2441 
2442 	pag = xfs_perag_get(mp, be32_to_cpu(agf->agf_seqno));
2443 	ASSERT(!pag->pagf_agflreset);
2444 	be32_add_cpu(&agf->agf_flcount, -1);
2445 	xfs_trans_agflist_delta(tp, -1);
2446 	pag->pagf_flcount--;
2447 
2448 	logflags = XFS_AGF_FLFIRST | XFS_AGF_FLCOUNT;
2449 	if (btreeblk) {
2450 		be32_add_cpu(&agf->agf_btreeblks, 1);
2451 		pag->pagf_btreeblks++;
2452 		logflags |= XFS_AGF_BTREEBLKS;
2453 	}
2454 	xfs_perag_put(pag);
2455 
2456 	xfs_alloc_log_agf(tp, agbp, logflags);
2457 	*bnop = bno;
2458 
2459 	return 0;
2460 }
2461 
2462 /*
2463  * Log the given fields from the agf structure.
2464  */
2465 void
2466 xfs_alloc_log_agf(
2467 	xfs_trans_t	*tp,	/* transaction pointer */
2468 	xfs_buf_t	*bp,	/* buffer for a.g. freelist header */
2469 	int		fields)	/* mask of fields to be logged (XFS_AGF_...) */
2470 {
2471 	int	first;		/* first byte offset */
2472 	int	last;		/* last byte offset */
2473 	static const short	offsets[] = {
2474 		offsetof(xfs_agf_t, agf_magicnum),
2475 		offsetof(xfs_agf_t, agf_versionnum),
2476 		offsetof(xfs_agf_t, agf_seqno),
2477 		offsetof(xfs_agf_t, agf_length),
2478 		offsetof(xfs_agf_t, agf_roots[0]),
2479 		offsetof(xfs_agf_t, agf_levels[0]),
2480 		offsetof(xfs_agf_t, agf_flfirst),
2481 		offsetof(xfs_agf_t, agf_fllast),
2482 		offsetof(xfs_agf_t, agf_flcount),
2483 		offsetof(xfs_agf_t, agf_freeblks),
2484 		offsetof(xfs_agf_t, agf_longest),
2485 		offsetof(xfs_agf_t, agf_btreeblks),
2486 		offsetof(xfs_agf_t, agf_uuid),
2487 		offsetof(xfs_agf_t, agf_rmap_blocks),
2488 		offsetof(xfs_agf_t, agf_refcount_blocks),
2489 		offsetof(xfs_agf_t, agf_refcount_root),
2490 		offsetof(xfs_agf_t, agf_refcount_level),
2491 		/* needed so that we don't log the whole rest of the structure: */
2492 		offsetof(xfs_agf_t, agf_spare64),
2493 		sizeof(xfs_agf_t)
2494 	};
2495 
2496 	trace_xfs_agf(tp->t_mountp, XFS_BUF_TO_AGF(bp), fields, _RET_IP_);
2497 
2498 	xfs_trans_buf_set_type(tp, bp, XFS_BLFT_AGF_BUF);
2499 
2500 	xfs_btree_offsets(fields, offsets, XFS_AGF_NUM_BITS, &first, &last);
2501 	xfs_trans_log_buf(tp, bp, (uint)first, (uint)last);
2502 }
2503 
2504 /*
2505  * Interface for inode allocation to force the pag data to be initialized.
2506  */
2507 int					/* error */
2508 xfs_alloc_pagf_init(
2509 	xfs_mount_t		*mp,	/* file system mount structure */
2510 	xfs_trans_t		*tp,	/* transaction pointer */
2511 	xfs_agnumber_t		agno,	/* allocation group number */
2512 	int			flags)	/* XFS_ALLOC_FLAGS_... */
2513 {
2514 	xfs_buf_t		*bp;
2515 	int			error;
2516 
2517 	if ((error = xfs_alloc_read_agf(mp, tp, agno, flags, &bp)))
2518 		return error;
2519 	if (bp)
2520 		xfs_trans_brelse(tp, bp);
2521 	return 0;
2522 }
2523 
2524 /*
2525  * Put the block on the freelist for the allocation group.
2526  */
2527 int					/* error */
2528 xfs_alloc_put_freelist(
2529 	xfs_trans_t		*tp,	/* transaction pointer */
2530 	xfs_buf_t		*agbp,	/* buffer for a.g. freelist header */
2531 	xfs_buf_t		*agflbp,/* buffer for a.g. free block array */
2532 	xfs_agblock_t		bno,	/* block being freed */
2533 	int			btreeblk) /* block came from a AGF btree */
2534 {
2535 	xfs_agf_t		*agf;	/* a.g. freespace structure */
2536 	__be32			*blockp;/* pointer to array entry */
2537 	int			error;
2538 	int			logflags;
2539 	xfs_mount_t		*mp;	/* mount structure */
2540 	xfs_perag_t		*pag;	/* per allocation group data */
2541 	__be32			*agfl_bno;
2542 	int			startoff;
2543 
2544 	agf = XFS_BUF_TO_AGF(agbp);
2545 	mp = tp->t_mountp;
2546 
2547 	if (!agflbp && (error = xfs_alloc_read_agfl(mp, tp,
2548 			be32_to_cpu(agf->agf_seqno), &agflbp)))
2549 		return error;
2550 	be32_add_cpu(&agf->agf_fllast, 1);
2551 	if (be32_to_cpu(agf->agf_fllast) == xfs_agfl_size(mp))
2552 		agf->agf_fllast = 0;
2553 
2554 	pag = xfs_perag_get(mp, be32_to_cpu(agf->agf_seqno));
2555 	ASSERT(!pag->pagf_agflreset);
2556 	be32_add_cpu(&agf->agf_flcount, 1);
2557 	xfs_trans_agflist_delta(tp, 1);
2558 	pag->pagf_flcount++;
2559 
2560 	logflags = XFS_AGF_FLLAST | XFS_AGF_FLCOUNT;
2561 	if (btreeblk) {
2562 		be32_add_cpu(&agf->agf_btreeblks, -1);
2563 		pag->pagf_btreeblks--;
2564 		logflags |= XFS_AGF_BTREEBLKS;
2565 	}
2566 	xfs_perag_put(pag);
2567 
2568 	xfs_alloc_log_agf(tp, agbp, logflags);
2569 
2570 	ASSERT(be32_to_cpu(agf->agf_flcount) <= xfs_agfl_size(mp));
2571 
2572 	agfl_bno = XFS_BUF_TO_AGFL_BNO(mp, agflbp);
2573 	blockp = &agfl_bno[be32_to_cpu(agf->agf_fllast)];
2574 	*blockp = cpu_to_be32(bno);
2575 	startoff = (char *)blockp - (char *)agflbp->b_addr;
2576 
2577 	xfs_alloc_log_agf(tp, agbp, logflags);
2578 
2579 	xfs_trans_buf_set_type(tp, agflbp, XFS_BLFT_AGFL_BUF);
2580 	xfs_trans_log_buf(tp, agflbp, startoff,
2581 			  startoff + sizeof(xfs_agblock_t) - 1);
2582 	return 0;
2583 }
2584 
2585 static xfs_failaddr_t
2586 xfs_agf_verify(
2587 	struct xfs_buf		*bp)
2588 {
2589 	struct xfs_mount	*mp = bp->b_target->bt_mount;
2590 	struct xfs_agf		*agf = XFS_BUF_TO_AGF(bp);
2591 
2592 	if (xfs_sb_version_hascrc(&mp->m_sb)) {
2593 		if (!uuid_equal(&agf->agf_uuid, &mp->m_sb.sb_meta_uuid))
2594 			return __this_address;
2595 		if (!xfs_log_check_lsn(mp,
2596 				be64_to_cpu(XFS_BUF_TO_AGF(bp)->agf_lsn)))
2597 			return __this_address;
2598 	}
2599 
2600 	if (!xfs_verify_magic(bp, agf->agf_magicnum))
2601 		return __this_address;
2602 
2603 	if (!(XFS_AGF_GOOD_VERSION(be32_to_cpu(agf->agf_versionnum)) &&
2604 	      be32_to_cpu(agf->agf_freeblks) <= be32_to_cpu(agf->agf_length) &&
2605 	      be32_to_cpu(agf->agf_flfirst) < xfs_agfl_size(mp) &&
2606 	      be32_to_cpu(agf->agf_fllast) < xfs_agfl_size(mp) &&
2607 	      be32_to_cpu(agf->agf_flcount) <= xfs_agfl_size(mp)))
2608 		return __this_address;
2609 
2610 	if (be32_to_cpu(agf->agf_levels[XFS_BTNUM_BNO]) < 1 ||
2611 	    be32_to_cpu(agf->agf_levels[XFS_BTNUM_CNT]) < 1 ||
2612 	    be32_to_cpu(agf->agf_levels[XFS_BTNUM_BNO]) > XFS_BTREE_MAXLEVELS ||
2613 	    be32_to_cpu(agf->agf_levels[XFS_BTNUM_CNT]) > XFS_BTREE_MAXLEVELS)
2614 		return __this_address;
2615 
2616 	if (xfs_sb_version_hasrmapbt(&mp->m_sb) &&
2617 	    (be32_to_cpu(agf->agf_levels[XFS_BTNUM_RMAP]) < 1 ||
2618 	     be32_to_cpu(agf->agf_levels[XFS_BTNUM_RMAP]) > XFS_BTREE_MAXLEVELS))
2619 		return __this_address;
2620 
2621 	/*
2622 	 * during growfs operations, the perag is not fully initialised,
2623 	 * so we can't use it for any useful checking. growfs ensures we can't
2624 	 * use it by using uncached buffers that don't have the perag attached
2625 	 * so we can detect and avoid this problem.
2626 	 */
2627 	if (bp->b_pag && be32_to_cpu(agf->agf_seqno) != bp->b_pag->pag_agno)
2628 		return __this_address;
2629 
2630 	if (xfs_sb_version_haslazysbcount(&mp->m_sb) &&
2631 	    be32_to_cpu(agf->agf_btreeblks) > be32_to_cpu(agf->agf_length))
2632 		return __this_address;
2633 
2634 	if (xfs_sb_version_hasreflink(&mp->m_sb) &&
2635 	    (be32_to_cpu(agf->agf_refcount_level) < 1 ||
2636 	     be32_to_cpu(agf->agf_refcount_level) > XFS_BTREE_MAXLEVELS))
2637 		return __this_address;
2638 
2639 	return NULL;
2640 
2641 }
2642 
2643 static void
2644 xfs_agf_read_verify(
2645 	struct xfs_buf	*bp)
2646 {
2647 	struct xfs_mount *mp = bp->b_target->bt_mount;
2648 	xfs_failaddr_t	fa;
2649 
2650 	if (xfs_sb_version_hascrc(&mp->m_sb) &&
2651 	    !xfs_buf_verify_cksum(bp, XFS_AGF_CRC_OFF))
2652 		xfs_verifier_error(bp, -EFSBADCRC, __this_address);
2653 	else {
2654 		fa = xfs_agf_verify(bp);
2655 		if (XFS_TEST_ERROR(fa, mp, XFS_ERRTAG_ALLOC_READ_AGF))
2656 			xfs_verifier_error(bp, -EFSCORRUPTED, fa);
2657 	}
2658 }
2659 
2660 static void
2661 xfs_agf_write_verify(
2662 	struct xfs_buf	*bp)
2663 {
2664 	struct xfs_mount	*mp = bp->b_target->bt_mount;
2665 	struct xfs_buf_log_item	*bip = bp->b_log_item;
2666 	xfs_failaddr_t		fa;
2667 
2668 	fa = xfs_agf_verify(bp);
2669 	if (fa) {
2670 		xfs_verifier_error(bp, -EFSCORRUPTED, fa);
2671 		return;
2672 	}
2673 
2674 	if (!xfs_sb_version_hascrc(&mp->m_sb))
2675 		return;
2676 
2677 	if (bip)
2678 		XFS_BUF_TO_AGF(bp)->agf_lsn = cpu_to_be64(bip->bli_item.li_lsn);
2679 
2680 	xfs_buf_update_cksum(bp, XFS_AGF_CRC_OFF);
2681 }
2682 
2683 const struct xfs_buf_ops xfs_agf_buf_ops = {
2684 	.name = "xfs_agf",
2685 	.magic = { cpu_to_be32(XFS_AGF_MAGIC), cpu_to_be32(XFS_AGF_MAGIC) },
2686 	.verify_read = xfs_agf_read_verify,
2687 	.verify_write = xfs_agf_write_verify,
2688 	.verify_struct = xfs_agf_verify,
2689 };
2690 
2691 /*
2692  * Read in the allocation group header (free/alloc section).
2693  */
2694 int					/* error */
2695 xfs_read_agf(
2696 	struct xfs_mount	*mp,	/* mount point structure */
2697 	struct xfs_trans	*tp,	/* transaction pointer */
2698 	xfs_agnumber_t		agno,	/* allocation group number */
2699 	int			flags,	/* XFS_BUF_ */
2700 	struct xfs_buf		**bpp)	/* buffer for the ag freelist header */
2701 {
2702 	int		error;
2703 
2704 	trace_xfs_read_agf(mp, agno);
2705 
2706 	ASSERT(agno != NULLAGNUMBER);
2707 	error = xfs_trans_read_buf(
2708 			mp, tp, mp->m_ddev_targp,
2709 			XFS_AG_DADDR(mp, agno, XFS_AGF_DADDR(mp)),
2710 			XFS_FSS_TO_BB(mp, 1), flags, bpp, &xfs_agf_buf_ops);
2711 	if (error)
2712 		return error;
2713 	if (!*bpp)
2714 		return 0;
2715 
2716 	ASSERT(!(*bpp)->b_error);
2717 	xfs_buf_set_ref(*bpp, XFS_AGF_REF);
2718 	return 0;
2719 }
2720 
2721 /*
2722  * Read in the allocation group header (free/alloc section).
2723  */
2724 int					/* error */
2725 xfs_alloc_read_agf(
2726 	struct xfs_mount	*mp,	/* mount point structure */
2727 	struct xfs_trans	*tp,	/* transaction pointer */
2728 	xfs_agnumber_t		agno,	/* allocation group number */
2729 	int			flags,	/* XFS_ALLOC_FLAG_... */
2730 	struct xfs_buf		**bpp)	/* buffer for the ag freelist header */
2731 {
2732 	struct xfs_agf		*agf;		/* ag freelist header */
2733 	struct xfs_perag	*pag;		/* per allocation group data */
2734 	int			error;
2735 
2736 	trace_xfs_alloc_read_agf(mp, agno);
2737 
2738 	ASSERT(agno != NULLAGNUMBER);
2739 	error = xfs_read_agf(mp, tp, agno,
2740 			(flags & XFS_ALLOC_FLAG_TRYLOCK) ? XBF_TRYLOCK : 0,
2741 			bpp);
2742 	if (error)
2743 		return error;
2744 	if (!*bpp)
2745 		return 0;
2746 	ASSERT(!(*bpp)->b_error);
2747 
2748 	agf = XFS_BUF_TO_AGF(*bpp);
2749 	pag = xfs_perag_get(mp, agno);
2750 	if (!pag->pagf_init) {
2751 		pag->pagf_freeblks = be32_to_cpu(agf->agf_freeblks);
2752 		pag->pagf_btreeblks = be32_to_cpu(agf->agf_btreeblks);
2753 		pag->pagf_flcount = be32_to_cpu(agf->agf_flcount);
2754 		pag->pagf_longest = be32_to_cpu(agf->agf_longest);
2755 		pag->pagf_levels[XFS_BTNUM_BNOi] =
2756 			be32_to_cpu(agf->agf_levels[XFS_BTNUM_BNOi]);
2757 		pag->pagf_levels[XFS_BTNUM_CNTi] =
2758 			be32_to_cpu(agf->agf_levels[XFS_BTNUM_CNTi]);
2759 		pag->pagf_levels[XFS_BTNUM_RMAPi] =
2760 			be32_to_cpu(agf->agf_levels[XFS_BTNUM_RMAPi]);
2761 		pag->pagf_refcount_level = be32_to_cpu(agf->agf_refcount_level);
2762 		pag->pagf_init = 1;
2763 		pag->pagf_agflreset = xfs_agfl_needs_reset(mp, agf);
2764 	}
2765 #ifdef DEBUG
2766 	else if (!XFS_FORCED_SHUTDOWN(mp)) {
2767 		ASSERT(pag->pagf_freeblks == be32_to_cpu(agf->agf_freeblks));
2768 		ASSERT(pag->pagf_btreeblks == be32_to_cpu(agf->agf_btreeblks));
2769 		ASSERT(pag->pagf_flcount == be32_to_cpu(agf->agf_flcount));
2770 		ASSERT(pag->pagf_longest == be32_to_cpu(agf->agf_longest));
2771 		ASSERT(pag->pagf_levels[XFS_BTNUM_BNOi] ==
2772 		       be32_to_cpu(agf->agf_levels[XFS_BTNUM_BNOi]));
2773 		ASSERT(pag->pagf_levels[XFS_BTNUM_CNTi] ==
2774 		       be32_to_cpu(agf->agf_levels[XFS_BTNUM_CNTi]));
2775 	}
2776 #endif
2777 	xfs_perag_put(pag);
2778 	return 0;
2779 }
2780 
2781 /*
2782  * Allocate an extent (variable-size).
2783  * Depending on the allocation type, we either look in a single allocation
2784  * group or loop over the allocation groups to find the result.
2785  */
2786 int				/* error */
2787 xfs_alloc_vextent(
2788 	struct xfs_alloc_arg	*args)	/* allocation argument structure */
2789 {
2790 	xfs_agblock_t		agsize;	/* allocation group size */
2791 	int			error;
2792 	int			flags;	/* XFS_ALLOC_FLAG_... locking flags */
2793 	struct xfs_mount	*mp;	/* mount structure pointer */
2794 	xfs_agnumber_t		sagno;	/* starting allocation group number */
2795 	xfs_alloctype_t		type;	/* input allocation type */
2796 	int			bump_rotor = 0;
2797 	xfs_agnumber_t		rotorstep = xfs_rotorstep; /* inode32 agf stepper */
2798 
2799 	mp = args->mp;
2800 	type = args->otype = args->type;
2801 	args->agbno = NULLAGBLOCK;
2802 	/*
2803 	 * Just fix this up, for the case where the last a.g. is shorter
2804 	 * (or there's only one a.g.) and the caller couldn't easily figure
2805 	 * that out (xfs_bmap_alloc).
2806 	 */
2807 	agsize = mp->m_sb.sb_agblocks;
2808 	if (args->maxlen > agsize)
2809 		args->maxlen = agsize;
2810 	if (args->alignment == 0)
2811 		args->alignment = 1;
2812 	ASSERT(XFS_FSB_TO_AGNO(mp, args->fsbno) < mp->m_sb.sb_agcount);
2813 	ASSERT(XFS_FSB_TO_AGBNO(mp, args->fsbno) < agsize);
2814 	ASSERT(args->minlen <= args->maxlen);
2815 	ASSERT(args->minlen <= agsize);
2816 	ASSERT(args->mod < args->prod);
2817 	if (XFS_FSB_TO_AGNO(mp, args->fsbno) >= mp->m_sb.sb_agcount ||
2818 	    XFS_FSB_TO_AGBNO(mp, args->fsbno) >= agsize ||
2819 	    args->minlen > args->maxlen || args->minlen > agsize ||
2820 	    args->mod >= args->prod) {
2821 		args->fsbno = NULLFSBLOCK;
2822 		trace_xfs_alloc_vextent_badargs(args);
2823 		return 0;
2824 	}
2825 
2826 	switch (type) {
2827 	case XFS_ALLOCTYPE_THIS_AG:
2828 	case XFS_ALLOCTYPE_NEAR_BNO:
2829 	case XFS_ALLOCTYPE_THIS_BNO:
2830 		/*
2831 		 * These three force us into a single a.g.
2832 		 */
2833 		args->agno = XFS_FSB_TO_AGNO(mp, args->fsbno);
2834 		args->pag = xfs_perag_get(mp, args->agno);
2835 		error = xfs_alloc_fix_freelist(args, 0);
2836 		if (error) {
2837 			trace_xfs_alloc_vextent_nofix(args);
2838 			goto error0;
2839 		}
2840 		if (!args->agbp) {
2841 			trace_xfs_alloc_vextent_noagbp(args);
2842 			break;
2843 		}
2844 		args->agbno = XFS_FSB_TO_AGBNO(mp, args->fsbno);
2845 		if ((error = xfs_alloc_ag_vextent(args)))
2846 			goto error0;
2847 		break;
2848 	case XFS_ALLOCTYPE_START_BNO:
2849 		/*
2850 		 * Try near allocation first, then anywhere-in-ag after
2851 		 * the first a.g. fails.
2852 		 */
2853 		if ((args->datatype & XFS_ALLOC_INITIAL_USER_DATA) &&
2854 		    (mp->m_flags & XFS_MOUNT_32BITINODES)) {
2855 			args->fsbno = XFS_AGB_TO_FSB(mp,
2856 					((mp->m_agfrotor / rotorstep) %
2857 					mp->m_sb.sb_agcount), 0);
2858 			bump_rotor = 1;
2859 		}
2860 		args->agbno = XFS_FSB_TO_AGBNO(mp, args->fsbno);
2861 		args->type = XFS_ALLOCTYPE_NEAR_BNO;
2862 		/* FALLTHROUGH */
2863 	case XFS_ALLOCTYPE_FIRST_AG:
2864 		/*
2865 		 * Rotate through the allocation groups looking for a winner.
2866 		 */
2867 		if (type == XFS_ALLOCTYPE_FIRST_AG) {
2868 			/*
2869 			 * Start with allocation group given by bno.
2870 			 */
2871 			args->agno = XFS_FSB_TO_AGNO(mp, args->fsbno);
2872 			args->type = XFS_ALLOCTYPE_THIS_AG;
2873 			sagno = 0;
2874 			flags = 0;
2875 		} else {
2876 			/*
2877 			 * Start with the given allocation group.
2878 			 */
2879 			args->agno = sagno = XFS_FSB_TO_AGNO(mp, args->fsbno);
2880 			flags = XFS_ALLOC_FLAG_TRYLOCK;
2881 		}
2882 		/*
2883 		 * Loop over allocation groups twice; first time with
2884 		 * trylock set, second time without.
2885 		 */
2886 		for (;;) {
2887 			args->pag = xfs_perag_get(mp, args->agno);
2888 			error = xfs_alloc_fix_freelist(args, flags);
2889 			if (error) {
2890 				trace_xfs_alloc_vextent_nofix(args);
2891 				goto error0;
2892 			}
2893 			/*
2894 			 * If we get a buffer back then the allocation will fly.
2895 			 */
2896 			if (args->agbp) {
2897 				if ((error = xfs_alloc_ag_vextent(args)))
2898 					goto error0;
2899 				break;
2900 			}
2901 
2902 			trace_xfs_alloc_vextent_loopfailed(args);
2903 
2904 			/*
2905 			 * Didn't work, figure out the next iteration.
2906 			 */
2907 			if (args->agno == sagno &&
2908 			    type == XFS_ALLOCTYPE_START_BNO)
2909 				args->type = XFS_ALLOCTYPE_THIS_AG;
2910 			/*
2911 			* For the first allocation, we can try any AG to get
2912 			* space.  However, if we already have allocated a
2913 			* block, we don't want to try AGs whose number is below
2914 			* sagno. Otherwise, we may end up with out-of-order
2915 			* locking of AGF, which might cause deadlock.
2916 			*/
2917 			if (++(args->agno) == mp->m_sb.sb_agcount) {
2918 				if (args->tp->t_firstblock != NULLFSBLOCK)
2919 					args->agno = sagno;
2920 				else
2921 					args->agno = 0;
2922 			}
2923 			/*
2924 			 * Reached the starting a.g., must either be done
2925 			 * or switch to non-trylock mode.
2926 			 */
2927 			if (args->agno == sagno) {
2928 				if (flags == 0) {
2929 					args->agbno = NULLAGBLOCK;
2930 					trace_xfs_alloc_vextent_allfailed(args);
2931 					break;
2932 				}
2933 
2934 				flags = 0;
2935 				if (type == XFS_ALLOCTYPE_START_BNO) {
2936 					args->agbno = XFS_FSB_TO_AGBNO(mp,
2937 						args->fsbno);
2938 					args->type = XFS_ALLOCTYPE_NEAR_BNO;
2939 				}
2940 			}
2941 			xfs_perag_put(args->pag);
2942 		}
2943 		if (bump_rotor) {
2944 			if (args->agno == sagno)
2945 				mp->m_agfrotor = (mp->m_agfrotor + 1) %
2946 					(mp->m_sb.sb_agcount * rotorstep);
2947 			else
2948 				mp->m_agfrotor = (args->agno * rotorstep + 1) %
2949 					(mp->m_sb.sb_agcount * rotorstep);
2950 		}
2951 		break;
2952 	default:
2953 		ASSERT(0);
2954 		/* NOTREACHED */
2955 	}
2956 	if (args->agbno == NULLAGBLOCK)
2957 		args->fsbno = NULLFSBLOCK;
2958 	else {
2959 		args->fsbno = XFS_AGB_TO_FSB(mp, args->agno, args->agbno);
2960 #ifdef DEBUG
2961 		ASSERT(args->len >= args->minlen);
2962 		ASSERT(args->len <= args->maxlen);
2963 		ASSERT(args->agbno % args->alignment == 0);
2964 		XFS_AG_CHECK_DADDR(mp, XFS_FSB_TO_DADDR(mp, args->fsbno),
2965 			args->len);
2966 #endif
2967 
2968 		/* Zero the extent if we were asked to do so */
2969 		if (args->datatype & XFS_ALLOC_USERDATA_ZERO) {
2970 			error = xfs_zero_extent(args->ip, args->fsbno, args->len);
2971 			if (error)
2972 				goto error0;
2973 		}
2974 
2975 	}
2976 	xfs_perag_put(args->pag);
2977 	return 0;
2978 error0:
2979 	xfs_perag_put(args->pag);
2980 	return error;
2981 }
2982 
2983 /* Ensure that the freelist is at full capacity. */
2984 int
2985 xfs_free_extent_fix_freelist(
2986 	struct xfs_trans	*tp,
2987 	xfs_agnumber_t		agno,
2988 	struct xfs_buf		**agbp)
2989 {
2990 	struct xfs_alloc_arg	args;
2991 	int			error;
2992 
2993 	memset(&args, 0, sizeof(struct xfs_alloc_arg));
2994 	args.tp = tp;
2995 	args.mp = tp->t_mountp;
2996 	args.agno = agno;
2997 
2998 	/*
2999 	 * validate that the block number is legal - the enables us to detect
3000 	 * and handle a silent filesystem corruption rather than crashing.
3001 	 */
3002 	if (args.agno >= args.mp->m_sb.sb_agcount)
3003 		return -EFSCORRUPTED;
3004 
3005 	args.pag = xfs_perag_get(args.mp, args.agno);
3006 	ASSERT(args.pag);
3007 
3008 	error = xfs_alloc_fix_freelist(&args, XFS_ALLOC_FLAG_FREEING);
3009 	if (error)
3010 		goto out;
3011 
3012 	*agbp = args.agbp;
3013 out:
3014 	xfs_perag_put(args.pag);
3015 	return error;
3016 }
3017 
3018 /*
3019  * Free an extent.
3020  * Just break up the extent address and hand off to xfs_free_ag_extent
3021  * after fixing up the freelist.
3022  */
3023 int
3024 __xfs_free_extent(
3025 	struct xfs_trans		*tp,
3026 	xfs_fsblock_t			bno,
3027 	xfs_extlen_t			len,
3028 	const struct xfs_owner_info	*oinfo,
3029 	enum xfs_ag_resv_type		type,
3030 	bool				skip_discard)
3031 {
3032 	struct xfs_mount		*mp = tp->t_mountp;
3033 	struct xfs_buf			*agbp;
3034 	xfs_agnumber_t			agno = XFS_FSB_TO_AGNO(mp, bno);
3035 	xfs_agblock_t			agbno = XFS_FSB_TO_AGBNO(mp, bno);
3036 	int				error;
3037 	unsigned int			busy_flags = 0;
3038 
3039 	ASSERT(len != 0);
3040 	ASSERT(type != XFS_AG_RESV_AGFL);
3041 
3042 	if (XFS_TEST_ERROR(false, mp,
3043 			XFS_ERRTAG_FREE_EXTENT))
3044 		return -EIO;
3045 
3046 	error = xfs_free_extent_fix_freelist(tp, agno, &agbp);
3047 	if (error)
3048 		return error;
3049 
3050 	XFS_WANT_CORRUPTED_GOTO(mp, agbno < mp->m_sb.sb_agblocks, err);
3051 
3052 	/* validate the extent size is legal now we have the agf locked */
3053 	XFS_WANT_CORRUPTED_GOTO(mp,
3054 		agbno + len <= be32_to_cpu(XFS_BUF_TO_AGF(agbp)->agf_length),
3055 				err);
3056 
3057 	error = xfs_free_ag_extent(tp, agbp, agno, agbno, len, oinfo, type);
3058 	if (error)
3059 		goto err;
3060 
3061 	if (skip_discard)
3062 		busy_flags |= XFS_EXTENT_BUSY_SKIP_DISCARD;
3063 	xfs_extent_busy_insert(tp, agno, agbno, len, busy_flags);
3064 	return 0;
3065 
3066 err:
3067 	xfs_trans_brelse(tp, agbp);
3068 	return error;
3069 }
3070 
3071 struct xfs_alloc_query_range_info {
3072 	xfs_alloc_query_range_fn	fn;
3073 	void				*priv;
3074 };
3075 
3076 /* Format btree record and pass to our callback. */
3077 STATIC int
3078 xfs_alloc_query_range_helper(
3079 	struct xfs_btree_cur		*cur,
3080 	union xfs_btree_rec		*rec,
3081 	void				*priv)
3082 {
3083 	struct xfs_alloc_query_range_info	*query = priv;
3084 	struct xfs_alloc_rec_incore		irec;
3085 
3086 	irec.ar_startblock = be32_to_cpu(rec->alloc.ar_startblock);
3087 	irec.ar_blockcount = be32_to_cpu(rec->alloc.ar_blockcount);
3088 	return query->fn(cur, &irec, query->priv);
3089 }
3090 
3091 /* Find all free space within a given range of blocks. */
3092 int
3093 xfs_alloc_query_range(
3094 	struct xfs_btree_cur			*cur,
3095 	struct xfs_alloc_rec_incore		*low_rec,
3096 	struct xfs_alloc_rec_incore		*high_rec,
3097 	xfs_alloc_query_range_fn		fn,
3098 	void					*priv)
3099 {
3100 	union xfs_btree_irec			low_brec;
3101 	union xfs_btree_irec			high_brec;
3102 	struct xfs_alloc_query_range_info	query;
3103 
3104 	ASSERT(cur->bc_btnum == XFS_BTNUM_BNO);
3105 	low_brec.a = *low_rec;
3106 	high_brec.a = *high_rec;
3107 	query.priv = priv;
3108 	query.fn = fn;
3109 	return xfs_btree_query_range(cur, &low_brec, &high_brec,
3110 			xfs_alloc_query_range_helper, &query);
3111 }
3112 
3113 /* Find all free space records. */
3114 int
3115 xfs_alloc_query_all(
3116 	struct xfs_btree_cur			*cur,
3117 	xfs_alloc_query_range_fn		fn,
3118 	void					*priv)
3119 {
3120 	struct xfs_alloc_query_range_info	query;
3121 
3122 	ASSERT(cur->bc_btnum == XFS_BTNUM_BNO);
3123 	query.priv = priv;
3124 	query.fn = fn;
3125 	return xfs_btree_query_all(cur, xfs_alloc_query_range_helper, &query);
3126 }
3127 
3128 /* Is there a record covering a given extent? */
3129 int
3130 xfs_alloc_has_record(
3131 	struct xfs_btree_cur	*cur,
3132 	xfs_agblock_t		bno,
3133 	xfs_extlen_t		len,
3134 	bool			*exists)
3135 {
3136 	union xfs_btree_irec	low;
3137 	union xfs_btree_irec	high;
3138 
3139 	memset(&low, 0, sizeof(low));
3140 	low.a.ar_startblock = bno;
3141 	memset(&high, 0xFF, sizeof(high));
3142 	high.a.ar_startblock = bno + len - 1;
3143 
3144 	return xfs_btree_has_record(cur, &low, &high, exists);
3145 }
3146 
3147 /*
3148  * Walk all the blocks in the AGFL.  The @walk_fn can return any negative
3149  * error code or XFS_BTREE_QUERY_RANGE_ABORT.
3150  */
3151 int
3152 xfs_agfl_walk(
3153 	struct xfs_mount	*mp,
3154 	struct xfs_agf		*agf,
3155 	struct xfs_buf		*agflbp,
3156 	xfs_agfl_walk_fn	walk_fn,
3157 	void			*priv)
3158 {
3159 	__be32			*agfl_bno;
3160 	unsigned int		i;
3161 	int			error;
3162 
3163 	agfl_bno = XFS_BUF_TO_AGFL_BNO(mp, agflbp);
3164 	i = be32_to_cpu(agf->agf_flfirst);
3165 
3166 	/* Nothing to walk in an empty AGFL. */
3167 	if (agf->agf_flcount == cpu_to_be32(0))
3168 		return 0;
3169 
3170 	/* Otherwise, walk from first to last, wrapping as needed. */
3171 	for (;;) {
3172 		error = walk_fn(mp, be32_to_cpu(agfl_bno[i]), priv);
3173 		if (error)
3174 			return error;
3175 		if (i == be32_to_cpu(agf->agf_fllast))
3176 			break;
3177 		if (++i == xfs_agfl_size(mp))
3178 			i = 0;
3179 	}
3180 
3181 	return 0;
3182 }
3183