1 // SPDX-License-Identifier: GPL-2.0
2 /*
3 * Copyright (c) 2000-2006 Silicon Graphics, Inc.
4 * All Rights Reserved.
5 */
6 #include "xfs.h"
7 #include "xfs_fs.h"
8 #include "xfs_shared.h"
9 #include "xfs_format.h"
10 #include "xfs_log_format.h"
11 #include "xfs_trans_resv.h"
12 #include "xfs_bit.h"
13 #include "xfs_sb.h"
14 #include "xfs_mount.h"
15 #include "xfs_defer.h"
16 #include "xfs_dir2.h"
17 #include "xfs_inode.h"
18 #include "xfs_btree.h"
19 #include "xfs_trans.h"
20 #include "xfs_alloc.h"
21 #include "xfs_bmap.h"
22 #include "xfs_bmap_util.h"
23 #include "xfs_bmap_btree.h"
24 #include "xfs_rtbitmap.h"
25 #include "xfs_errortag.h"
26 #include "xfs_error.h"
27 #include "xfs_quota.h"
28 #include "xfs_trans_space.h"
29 #include "xfs_buf_item.h"
30 #include "xfs_trace.h"
31 #include "xfs_attr_leaf.h"
32 #include "xfs_filestream.h"
33 #include "xfs_rmap.h"
34 #include "xfs_ag.h"
35 #include "xfs_ag_resv.h"
36 #include "xfs_refcount.h"
37 #include "xfs_icache.h"
38 #include "xfs_iomap.h"
39 #include "xfs_health.h"
40 #include "xfs_bmap_item.h"
41 #include "xfs_symlink_remote.h"
42
43 struct kmem_cache *xfs_bmap_intent_cache;
44
45 /*
46 * Miscellaneous helper functions
47 */
48
49 /*
50 * Compute and fill in the value of the maximum depth of a bmap btree
51 * in this filesystem. Done once, during mount.
52 */
53 void
xfs_bmap_compute_maxlevels(xfs_mount_t * mp,int whichfork)54 xfs_bmap_compute_maxlevels(
55 xfs_mount_t *mp, /* file system mount structure */
56 int whichfork) /* data or attr fork */
57 {
58 uint64_t maxblocks; /* max blocks at this level */
59 xfs_extnum_t maxleafents; /* max leaf entries possible */
60 int level; /* btree level */
61 int maxrootrecs; /* max records in root block */
62 int minleafrecs; /* min records in leaf block */
63 int minnoderecs; /* min records in node block */
64 int sz; /* root block size */
65
66 /*
67 * The maximum number of extents in a fork, hence the maximum number of
68 * leaf entries, is controlled by the size of the on-disk extent count.
69 *
70 * Note that we can no longer assume that if we are in ATTR1 that the
71 * fork offset of all the inodes will be
72 * (xfs_default_attroffset(ip) >> 3) because we could have mounted with
73 * ATTR2 and then mounted back with ATTR1, keeping the i_forkoff's fixed
74 * but probably at various positions. Therefore, for both ATTR1 and
75 * ATTR2 we have to assume the worst case scenario of a minimum size
76 * available.
77 */
78 maxleafents = xfs_iext_max_nextents(xfs_has_large_extent_counts(mp),
79 whichfork);
80 if (whichfork == XFS_DATA_FORK)
81 sz = XFS_BMDR_SPACE_CALC(MINDBTPTRS);
82 else
83 sz = XFS_BMDR_SPACE_CALC(MINABTPTRS);
84
85 maxrootrecs = xfs_bmdr_maxrecs(sz, 0);
86 minleafrecs = mp->m_bmap_dmnr[0];
87 minnoderecs = mp->m_bmap_dmnr[1];
88 maxblocks = howmany_64(maxleafents, minleafrecs);
89 for (level = 1; maxblocks > 1; level++) {
90 if (maxblocks <= maxrootrecs)
91 maxblocks = 1;
92 else
93 maxblocks = howmany_64(maxblocks, minnoderecs);
94 }
95 mp->m_bm_maxlevels[whichfork] = level;
96 ASSERT(mp->m_bm_maxlevels[whichfork] <= xfs_bmbt_maxlevels_ondisk());
97 }
98
99 unsigned int
xfs_bmap_compute_attr_offset(struct xfs_mount * mp)100 xfs_bmap_compute_attr_offset(
101 struct xfs_mount *mp)
102 {
103 if (mp->m_sb.sb_inodesize == 256)
104 return XFS_LITINO(mp) - XFS_BMDR_SPACE_CALC(MINABTPTRS);
105 return XFS_BMDR_SPACE_CALC(6 * MINABTPTRS);
106 }
107
108 STATIC int /* error */
xfs_bmbt_lookup_eq(struct xfs_btree_cur * cur,struct xfs_bmbt_irec * irec,int * stat)109 xfs_bmbt_lookup_eq(
110 struct xfs_btree_cur *cur,
111 struct xfs_bmbt_irec *irec,
112 int *stat) /* success/failure */
113 {
114 cur->bc_rec.b = *irec;
115 return xfs_btree_lookup(cur, XFS_LOOKUP_EQ, stat);
116 }
117
118 STATIC int /* error */
xfs_bmbt_lookup_first(struct xfs_btree_cur * cur,int * stat)119 xfs_bmbt_lookup_first(
120 struct xfs_btree_cur *cur,
121 int *stat) /* success/failure */
122 {
123 cur->bc_rec.b.br_startoff = 0;
124 cur->bc_rec.b.br_startblock = 0;
125 cur->bc_rec.b.br_blockcount = 0;
126 return xfs_btree_lookup(cur, XFS_LOOKUP_GE, stat);
127 }
128
129 /*
130 * Check if the inode needs to be converted to btree format.
131 */
xfs_bmap_needs_btree(struct xfs_inode * ip,int whichfork)132 static inline bool xfs_bmap_needs_btree(struct xfs_inode *ip, int whichfork)
133 {
134 struct xfs_ifork *ifp = xfs_ifork_ptr(ip, whichfork);
135
136 return whichfork != XFS_COW_FORK &&
137 ifp->if_format == XFS_DINODE_FMT_EXTENTS &&
138 ifp->if_nextents > XFS_IFORK_MAXEXT(ip, whichfork);
139 }
140
141 /*
142 * Check if the inode should be converted to extent format.
143 */
xfs_bmap_wants_extents(struct xfs_inode * ip,int whichfork)144 static inline bool xfs_bmap_wants_extents(struct xfs_inode *ip, int whichfork)
145 {
146 struct xfs_ifork *ifp = xfs_ifork_ptr(ip, whichfork);
147
148 return whichfork != XFS_COW_FORK &&
149 ifp->if_format == XFS_DINODE_FMT_BTREE &&
150 ifp->if_nextents <= XFS_IFORK_MAXEXT(ip, whichfork);
151 }
152
153 /*
154 * Update the record referred to by cur to the value given by irec
155 * This either works (return 0) or gets an EFSCORRUPTED error.
156 */
157 STATIC int
xfs_bmbt_update(struct xfs_btree_cur * cur,struct xfs_bmbt_irec * irec)158 xfs_bmbt_update(
159 struct xfs_btree_cur *cur,
160 struct xfs_bmbt_irec *irec)
161 {
162 union xfs_btree_rec rec;
163
164 xfs_bmbt_disk_set_all(&rec.bmbt, irec);
165 return xfs_btree_update(cur, &rec);
166 }
167
168 /*
169 * Compute the worst-case number of indirect blocks that will be used
170 * for ip's delayed extent of length "len".
171 */
172 STATIC xfs_filblks_t
xfs_bmap_worst_indlen(xfs_inode_t * ip,xfs_filblks_t len)173 xfs_bmap_worst_indlen(
174 xfs_inode_t *ip, /* incore inode pointer */
175 xfs_filblks_t len) /* delayed extent length */
176 {
177 int level; /* btree level number */
178 int maxrecs; /* maximum record count at this level */
179 xfs_mount_t *mp; /* mount structure */
180 xfs_filblks_t rval; /* return value */
181
182 mp = ip->i_mount;
183 maxrecs = mp->m_bmap_dmxr[0];
184 for (level = 0, rval = 0;
185 level < XFS_BM_MAXLEVELS(mp, XFS_DATA_FORK);
186 level++) {
187 len += maxrecs - 1;
188 do_div(len, maxrecs);
189 rval += len;
190 if (len == 1)
191 return rval + XFS_BM_MAXLEVELS(mp, XFS_DATA_FORK) -
192 level - 1;
193 if (level == 0)
194 maxrecs = mp->m_bmap_dmxr[1];
195 }
196 return rval;
197 }
198
199 /*
200 * Calculate the default attribute fork offset for newly created inodes.
201 */
202 uint
xfs_default_attroffset(struct xfs_inode * ip)203 xfs_default_attroffset(
204 struct xfs_inode *ip)
205 {
206 if (ip->i_df.if_format == XFS_DINODE_FMT_DEV)
207 return roundup(sizeof(xfs_dev_t), 8);
208 return M_IGEO(ip->i_mount)->attr_fork_offset;
209 }
210
211 /*
212 * Helper routine to reset inode i_forkoff field when switching attribute fork
213 * from local to extent format - we reset it where possible to make space
214 * available for inline data fork extents.
215 */
216 STATIC void
xfs_bmap_forkoff_reset(xfs_inode_t * ip,int whichfork)217 xfs_bmap_forkoff_reset(
218 xfs_inode_t *ip,
219 int whichfork)
220 {
221 if (whichfork == XFS_ATTR_FORK &&
222 ip->i_df.if_format != XFS_DINODE_FMT_DEV &&
223 ip->i_df.if_format != XFS_DINODE_FMT_BTREE) {
224 uint dfl_forkoff = xfs_default_attroffset(ip) >> 3;
225
226 if (dfl_forkoff > ip->i_forkoff)
227 ip->i_forkoff = dfl_forkoff;
228 }
229 }
230
231 static int
xfs_bmap_read_buf(struct xfs_mount * mp,struct xfs_trans * tp,xfs_fsblock_t fsbno,struct xfs_buf ** bpp)232 xfs_bmap_read_buf(
233 struct xfs_mount *mp, /* file system mount point */
234 struct xfs_trans *tp, /* transaction pointer */
235 xfs_fsblock_t fsbno, /* file system block number */
236 struct xfs_buf **bpp) /* buffer for fsbno */
237 {
238 struct xfs_buf *bp; /* return value */
239 int error;
240
241 if (!xfs_verify_fsbno(mp, fsbno))
242 return -EFSCORRUPTED;
243 error = xfs_trans_read_buf(mp, tp, mp->m_ddev_targp,
244 XFS_FSB_TO_DADDR(mp, fsbno), mp->m_bsize, 0, &bp,
245 &xfs_bmbt_buf_ops);
246 if (!error) {
247 xfs_buf_set_ref(bp, XFS_BMAP_BTREE_REF);
248 *bpp = bp;
249 }
250 return error;
251 }
252
253 #ifdef DEBUG
254 STATIC struct xfs_buf *
xfs_bmap_get_bp(struct xfs_btree_cur * cur,xfs_fsblock_t bno)255 xfs_bmap_get_bp(
256 struct xfs_btree_cur *cur,
257 xfs_fsblock_t bno)
258 {
259 struct xfs_log_item *lip;
260 int i;
261
262 if (!cur)
263 return NULL;
264
265 for (i = 0; i < cur->bc_maxlevels; i++) {
266 if (!cur->bc_levels[i].bp)
267 break;
268 if (xfs_buf_daddr(cur->bc_levels[i].bp) == bno)
269 return cur->bc_levels[i].bp;
270 }
271
272 /* Chase down all the log items to see if the bp is there */
273 list_for_each_entry(lip, &cur->bc_tp->t_items, li_trans) {
274 struct xfs_buf_log_item *bip = (struct xfs_buf_log_item *)lip;
275
276 if (bip->bli_item.li_type == XFS_LI_BUF &&
277 xfs_buf_daddr(bip->bli_buf) == bno)
278 return bip->bli_buf;
279 }
280
281 return NULL;
282 }
283
284 STATIC void
xfs_check_block(struct xfs_btree_block * block,xfs_mount_t * mp,int root,short sz)285 xfs_check_block(
286 struct xfs_btree_block *block,
287 xfs_mount_t *mp,
288 int root,
289 short sz)
290 {
291 int i, j, dmxr;
292 __be64 *pp, *thispa; /* pointer to block address */
293 xfs_bmbt_key_t *prevp, *keyp;
294
295 ASSERT(be16_to_cpu(block->bb_level) > 0);
296
297 prevp = NULL;
298 for( i = 1; i <= xfs_btree_get_numrecs(block); i++) {
299 dmxr = mp->m_bmap_dmxr[0];
300 keyp = XFS_BMBT_KEY_ADDR(mp, block, i);
301
302 if (prevp) {
303 ASSERT(be64_to_cpu(prevp->br_startoff) <
304 be64_to_cpu(keyp->br_startoff));
305 }
306 prevp = keyp;
307
308 /*
309 * Compare the block numbers to see if there are dups.
310 */
311 if (root)
312 pp = XFS_BMAP_BROOT_PTR_ADDR(mp, block, i, sz);
313 else
314 pp = XFS_BMBT_PTR_ADDR(mp, block, i, dmxr);
315
316 for (j = i+1; j <= be16_to_cpu(block->bb_numrecs); j++) {
317 if (root)
318 thispa = XFS_BMAP_BROOT_PTR_ADDR(mp, block, j, sz);
319 else
320 thispa = XFS_BMBT_PTR_ADDR(mp, block, j, dmxr);
321 if (*thispa == *pp) {
322 xfs_warn(mp, "%s: thispa(%d) == pp(%d) %lld",
323 __func__, j, i,
324 (unsigned long long)be64_to_cpu(*thispa));
325 xfs_err(mp, "%s: ptrs are equal in node\n",
326 __func__);
327 xfs_force_shutdown(mp, SHUTDOWN_CORRUPT_INCORE);
328 }
329 }
330 }
331 }
332
333 /*
334 * Check that the extents for the inode ip are in the right order in all
335 * btree leaves. THis becomes prohibitively expensive for large extent count
336 * files, so don't bother with inodes that have more than 10,000 extents in
337 * them. The btree record ordering checks will still be done, so for such large
338 * bmapbt constructs that is going to catch most corruptions.
339 */
340 STATIC void
xfs_bmap_check_leaf_extents(struct xfs_btree_cur * cur,xfs_inode_t * ip,int whichfork)341 xfs_bmap_check_leaf_extents(
342 struct xfs_btree_cur *cur, /* btree cursor or null */
343 xfs_inode_t *ip, /* incore inode pointer */
344 int whichfork) /* data or attr fork */
345 {
346 struct xfs_mount *mp = ip->i_mount;
347 struct xfs_ifork *ifp = xfs_ifork_ptr(ip, whichfork);
348 struct xfs_btree_block *block; /* current btree block */
349 xfs_fsblock_t bno; /* block # of "block" */
350 struct xfs_buf *bp; /* buffer for "block" */
351 int error; /* error return value */
352 xfs_extnum_t i=0, j; /* index into the extents list */
353 int level; /* btree level, for checking */
354 __be64 *pp; /* pointer to block address */
355 xfs_bmbt_rec_t *ep; /* pointer to current extent */
356 xfs_bmbt_rec_t last = {0, 0}; /* last extent in prev block */
357 xfs_bmbt_rec_t *nextp; /* pointer to next extent */
358 int bp_release = 0;
359
360 if (ifp->if_format != XFS_DINODE_FMT_BTREE)
361 return;
362
363 /* skip large extent count inodes */
364 if (ip->i_df.if_nextents > 10000)
365 return;
366
367 bno = NULLFSBLOCK;
368 block = ifp->if_broot;
369 /*
370 * Root level must use BMAP_BROOT_PTR_ADDR macro to get ptr out.
371 */
372 level = be16_to_cpu(block->bb_level);
373 ASSERT(level > 0);
374 xfs_check_block(block, mp, 1, ifp->if_broot_bytes);
375 pp = XFS_BMAP_BROOT_PTR_ADDR(mp, block, 1, ifp->if_broot_bytes);
376 bno = be64_to_cpu(*pp);
377
378 ASSERT(bno != NULLFSBLOCK);
379 ASSERT(XFS_FSB_TO_AGNO(mp, bno) < mp->m_sb.sb_agcount);
380 ASSERT(XFS_FSB_TO_AGBNO(mp, bno) < mp->m_sb.sb_agblocks);
381
382 /*
383 * Go down the tree until leaf level is reached, following the first
384 * pointer (leftmost) at each level.
385 */
386 while (level-- > 0) {
387 /* See if buf is in cur first */
388 bp_release = 0;
389 bp = xfs_bmap_get_bp(cur, XFS_FSB_TO_DADDR(mp, bno));
390 if (!bp) {
391 bp_release = 1;
392 error = xfs_bmap_read_buf(mp, NULL, bno, &bp);
393 if (xfs_metadata_is_sick(error))
394 xfs_btree_mark_sick(cur);
395 if (error)
396 goto error_norelse;
397 }
398 block = XFS_BUF_TO_BLOCK(bp);
399 if (level == 0)
400 break;
401
402 /*
403 * Check this block for basic sanity (increasing keys and
404 * no duplicate blocks).
405 */
406
407 xfs_check_block(block, mp, 0, 0);
408 pp = XFS_BMBT_PTR_ADDR(mp, block, 1, mp->m_bmap_dmxr[1]);
409 bno = be64_to_cpu(*pp);
410 if (XFS_IS_CORRUPT(mp, !xfs_verify_fsbno(mp, bno))) {
411 xfs_btree_mark_sick(cur);
412 error = -EFSCORRUPTED;
413 goto error0;
414 }
415 if (bp_release) {
416 bp_release = 0;
417 xfs_trans_brelse(NULL, bp);
418 }
419 }
420
421 /*
422 * Here with bp and block set to the leftmost leaf node in the tree.
423 */
424 i = 0;
425
426 /*
427 * Loop over all leaf nodes checking that all extents are in the right order.
428 */
429 for (;;) {
430 xfs_fsblock_t nextbno;
431 xfs_extnum_t num_recs;
432
433
434 num_recs = xfs_btree_get_numrecs(block);
435
436 /*
437 * Read-ahead the next leaf block, if any.
438 */
439
440 nextbno = be64_to_cpu(block->bb_u.l.bb_rightsib);
441
442 /*
443 * Check all the extents to make sure they are OK.
444 * If we had a previous block, the last entry should
445 * conform with the first entry in this one.
446 */
447
448 ep = XFS_BMBT_REC_ADDR(mp, block, 1);
449 if (i) {
450 ASSERT(xfs_bmbt_disk_get_startoff(&last) +
451 xfs_bmbt_disk_get_blockcount(&last) <=
452 xfs_bmbt_disk_get_startoff(ep));
453 }
454 for (j = 1; j < num_recs; j++) {
455 nextp = XFS_BMBT_REC_ADDR(mp, block, j + 1);
456 ASSERT(xfs_bmbt_disk_get_startoff(ep) +
457 xfs_bmbt_disk_get_blockcount(ep) <=
458 xfs_bmbt_disk_get_startoff(nextp));
459 ep = nextp;
460 }
461
462 last = *ep;
463 i += num_recs;
464 if (bp_release) {
465 bp_release = 0;
466 xfs_trans_brelse(NULL, bp);
467 }
468 bno = nextbno;
469 /*
470 * If we've reached the end, stop.
471 */
472 if (bno == NULLFSBLOCK)
473 break;
474
475 bp_release = 0;
476 bp = xfs_bmap_get_bp(cur, XFS_FSB_TO_DADDR(mp, bno));
477 if (!bp) {
478 bp_release = 1;
479 error = xfs_bmap_read_buf(mp, NULL, bno, &bp);
480 if (xfs_metadata_is_sick(error))
481 xfs_btree_mark_sick(cur);
482 if (error)
483 goto error_norelse;
484 }
485 block = XFS_BUF_TO_BLOCK(bp);
486 }
487
488 return;
489
490 error0:
491 xfs_warn(mp, "%s: at error0", __func__);
492 if (bp_release)
493 xfs_trans_brelse(NULL, bp);
494 error_norelse:
495 xfs_warn(mp, "%s: BAD after btree leaves for %llu extents",
496 __func__, i);
497 xfs_err(mp, "%s: CORRUPTED BTREE OR SOMETHING", __func__);
498 xfs_force_shutdown(mp, SHUTDOWN_CORRUPT_INCORE);
499 return;
500 }
501
502 /*
503 * Validate that the bmbt_irecs being returned from bmapi are valid
504 * given the caller's original parameters. Specifically check the
505 * ranges of the returned irecs to ensure that they only extend beyond
506 * the given parameters if the XFS_BMAPI_ENTIRE flag was set.
507 */
508 STATIC void
xfs_bmap_validate_ret(xfs_fileoff_t bno,xfs_filblks_t len,uint32_t flags,xfs_bmbt_irec_t * mval,int nmap,int ret_nmap)509 xfs_bmap_validate_ret(
510 xfs_fileoff_t bno,
511 xfs_filblks_t len,
512 uint32_t flags,
513 xfs_bmbt_irec_t *mval,
514 int nmap,
515 int ret_nmap)
516 {
517 int i; /* index to map values */
518
519 ASSERT(ret_nmap <= nmap);
520
521 for (i = 0; i < ret_nmap; i++) {
522 ASSERT(mval[i].br_blockcount > 0);
523 if (!(flags & XFS_BMAPI_ENTIRE)) {
524 ASSERT(mval[i].br_startoff >= bno);
525 ASSERT(mval[i].br_blockcount <= len);
526 ASSERT(mval[i].br_startoff + mval[i].br_blockcount <=
527 bno + len);
528 } else {
529 ASSERT(mval[i].br_startoff < bno + len);
530 ASSERT(mval[i].br_startoff + mval[i].br_blockcount >
531 bno);
532 }
533 ASSERT(i == 0 ||
534 mval[i - 1].br_startoff + mval[i - 1].br_blockcount ==
535 mval[i].br_startoff);
536 ASSERT(mval[i].br_startblock != DELAYSTARTBLOCK &&
537 mval[i].br_startblock != HOLESTARTBLOCK);
538 ASSERT(mval[i].br_state == XFS_EXT_NORM ||
539 mval[i].br_state == XFS_EXT_UNWRITTEN);
540 }
541 }
542
543 #else
544 #define xfs_bmap_check_leaf_extents(cur, ip, whichfork) do { } while (0)
545 #define xfs_bmap_validate_ret(bno,len,flags,mval,onmap,nmap) do { } while (0)
546 #endif /* DEBUG */
547
548 /*
549 * Inode fork format manipulation functions
550 */
551
552 /*
553 * Convert the inode format to extent format if it currently is in btree format,
554 * but the extent list is small enough that it fits into the extent format.
555 *
556 * Since the extents are already in-core, all we have to do is give up the space
557 * for the btree root and pitch the leaf block.
558 */
559 STATIC int /* error */
xfs_bmap_btree_to_extents(struct xfs_trans * tp,struct xfs_inode * ip,struct xfs_btree_cur * cur,int * logflagsp,int whichfork)560 xfs_bmap_btree_to_extents(
561 struct xfs_trans *tp, /* transaction pointer */
562 struct xfs_inode *ip, /* incore inode pointer */
563 struct xfs_btree_cur *cur, /* btree cursor */
564 int *logflagsp, /* inode logging flags */
565 int whichfork) /* data or attr fork */
566 {
567 struct xfs_ifork *ifp = xfs_ifork_ptr(ip, whichfork);
568 struct xfs_mount *mp = ip->i_mount;
569 struct xfs_btree_block *rblock = ifp->if_broot;
570 struct xfs_btree_block *cblock;/* child btree block */
571 xfs_fsblock_t cbno; /* child block number */
572 struct xfs_buf *cbp; /* child block's buffer */
573 int error; /* error return value */
574 __be64 *pp; /* ptr to block address */
575 struct xfs_owner_info oinfo;
576
577 /* check if we actually need the extent format first: */
578 if (!xfs_bmap_wants_extents(ip, whichfork))
579 return 0;
580
581 ASSERT(cur);
582 ASSERT(whichfork != XFS_COW_FORK);
583 ASSERT(ifp->if_format == XFS_DINODE_FMT_BTREE);
584 ASSERT(be16_to_cpu(rblock->bb_level) == 1);
585 ASSERT(be16_to_cpu(rblock->bb_numrecs) == 1);
586 ASSERT(xfs_bmbt_maxrecs(mp, ifp->if_broot_bytes, 0) == 1);
587
588 pp = XFS_BMAP_BROOT_PTR_ADDR(mp, rblock, 1, ifp->if_broot_bytes);
589 cbno = be64_to_cpu(*pp);
590 #ifdef DEBUG
591 if (XFS_IS_CORRUPT(cur->bc_mp, !xfs_verify_fsbno(mp, cbno))) {
592 xfs_btree_mark_sick(cur);
593 return -EFSCORRUPTED;
594 }
595 #endif
596 error = xfs_bmap_read_buf(mp, tp, cbno, &cbp);
597 if (xfs_metadata_is_sick(error))
598 xfs_btree_mark_sick(cur);
599 if (error)
600 return error;
601 cblock = XFS_BUF_TO_BLOCK(cbp);
602 if ((error = xfs_btree_check_block(cur, cblock, 0, cbp)))
603 return error;
604
605 xfs_rmap_ino_bmbt_owner(&oinfo, ip->i_ino, whichfork);
606 error = xfs_free_extent_later(cur->bc_tp, cbno, 1, &oinfo,
607 XFS_AG_RESV_NONE, false);
608 if (error)
609 return error;
610
611 ip->i_nblocks--;
612 xfs_trans_mod_dquot_byino(tp, ip, XFS_TRANS_DQ_BCOUNT, -1L);
613 xfs_trans_binval(tp, cbp);
614 if (cur->bc_levels[0].bp == cbp)
615 cur->bc_levels[0].bp = NULL;
616 xfs_iroot_realloc(ip, -1, whichfork);
617 ASSERT(ifp->if_broot == NULL);
618 ifp->if_format = XFS_DINODE_FMT_EXTENTS;
619 *logflagsp |= XFS_ILOG_CORE | xfs_ilog_fext(whichfork);
620 return 0;
621 }
622
623 /*
624 * Convert an extents-format file into a btree-format file.
625 * The new file will have a root block (in the inode) and a single child block.
626 */
627 STATIC int /* error */
xfs_bmap_extents_to_btree(struct xfs_trans * tp,struct xfs_inode * ip,struct xfs_btree_cur ** curp,int wasdel,int * logflagsp,int whichfork)628 xfs_bmap_extents_to_btree(
629 struct xfs_trans *tp, /* transaction pointer */
630 struct xfs_inode *ip, /* incore inode pointer */
631 struct xfs_btree_cur **curp, /* cursor returned to caller */
632 int wasdel, /* converting a delayed alloc */
633 int *logflagsp, /* inode logging flags */
634 int whichfork) /* data or attr fork */
635 {
636 struct xfs_btree_block *ablock; /* allocated (child) bt block */
637 struct xfs_buf *abp; /* buffer for ablock */
638 struct xfs_alloc_arg args; /* allocation arguments */
639 struct xfs_bmbt_rec *arp; /* child record pointer */
640 struct xfs_btree_block *block; /* btree root block */
641 struct xfs_btree_cur *cur; /* bmap btree cursor */
642 int error; /* error return value */
643 struct xfs_ifork *ifp; /* inode fork pointer */
644 struct xfs_bmbt_key *kp; /* root block key pointer */
645 struct xfs_mount *mp; /* mount structure */
646 xfs_bmbt_ptr_t *pp; /* root block address pointer */
647 struct xfs_iext_cursor icur;
648 struct xfs_bmbt_irec rec;
649 xfs_extnum_t cnt = 0;
650
651 mp = ip->i_mount;
652 ASSERT(whichfork != XFS_COW_FORK);
653 ifp = xfs_ifork_ptr(ip, whichfork);
654 ASSERT(ifp->if_format == XFS_DINODE_FMT_EXTENTS);
655
656 /*
657 * Make space in the inode incore. This needs to be undone if we fail
658 * to expand the root.
659 */
660 xfs_iroot_realloc(ip, 1, whichfork);
661
662 /*
663 * Fill in the root.
664 */
665 block = ifp->if_broot;
666 xfs_bmbt_init_block(ip, block, NULL, 1, 1);
667 /*
668 * Need a cursor. Can't allocate until bb_level is filled in.
669 */
670 cur = xfs_bmbt_init_cursor(mp, tp, ip, whichfork);
671 if (wasdel)
672 cur->bc_flags |= XFS_BTREE_BMBT_WASDEL;
673 /*
674 * Convert to a btree with two levels, one record in root.
675 */
676 ifp->if_format = XFS_DINODE_FMT_BTREE;
677 memset(&args, 0, sizeof(args));
678 args.tp = tp;
679 args.mp = mp;
680 xfs_rmap_ino_bmbt_owner(&args.oinfo, ip->i_ino, whichfork);
681
682 args.minlen = args.maxlen = args.prod = 1;
683 args.wasdel = wasdel;
684 *logflagsp = 0;
685 error = xfs_alloc_vextent_start_ag(&args,
686 XFS_INO_TO_FSB(mp, ip->i_ino));
687 if (error)
688 goto out_root_realloc;
689
690 /*
691 * Allocation can't fail, the space was reserved.
692 */
693 if (WARN_ON_ONCE(args.fsbno == NULLFSBLOCK)) {
694 error = -ENOSPC;
695 goto out_root_realloc;
696 }
697
698 cur->bc_bmap.allocated++;
699 ip->i_nblocks++;
700 xfs_trans_mod_dquot_byino(tp, ip, XFS_TRANS_DQ_BCOUNT, 1L);
701 error = xfs_trans_get_buf(tp, mp->m_ddev_targp,
702 XFS_FSB_TO_DADDR(mp, args.fsbno),
703 mp->m_bsize, 0, &abp);
704 if (error)
705 goto out_unreserve_dquot;
706
707 /*
708 * Fill in the child block.
709 */
710 ablock = XFS_BUF_TO_BLOCK(abp);
711 xfs_bmbt_init_block(ip, ablock, abp, 0, 0);
712
713 for_each_xfs_iext(ifp, &icur, &rec) {
714 if (isnullstartblock(rec.br_startblock))
715 continue;
716 arp = XFS_BMBT_REC_ADDR(mp, ablock, 1 + cnt);
717 xfs_bmbt_disk_set_all(arp, &rec);
718 cnt++;
719 }
720 ASSERT(cnt == ifp->if_nextents);
721 xfs_btree_set_numrecs(ablock, cnt);
722
723 /*
724 * Fill in the root key and pointer.
725 */
726 kp = XFS_BMBT_KEY_ADDR(mp, block, 1);
727 arp = XFS_BMBT_REC_ADDR(mp, ablock, 1);
728 kp->br_startoff = cpu_to_be64(xfs_bmbt_disk_get_startoff(arp));
729 pp = XFS_BMBT_PTR_ADDR(mp, block, 1, xfs_bmbt_get_maxrecs(cur,
730 be16_to_cpu(block->bb_level)));
731 *pp = cpu_to_be64(args.fsbno);
732
733 /*
734 * Do all this logging at the end so that
735 * the root is at the right level.
736 */
737 xfs_btree_log_block(cur, abp, XFS_BB_ALL_BITS);
738 xfs_btree_log_recs(cur, abp, 1, be16_to_cpu(ablock->bb_numrecs));
739 ASSERT(*curp == NULL);
740 *curp = cur;
741 *logflagsp = XFS_ILOG_CORE | xfs_ilog_fbroot(whichfork);
742 return 0;
743
744 out_unreserve_dquot:
745 xfs_trans_mod_dquot_byino(tp, ip, XFS_TRANS_DQ_BCOUNT, -1L);
746 out_root_realloc:
747 xfs_iroot_realloc(ip, -1, whichfork);
748 ifp->if_format = XFS_DINODE_FMT_EXTENTS;
749 ASSERT(ifp->if_broot == NULL);
750 xfs_btree_del_cursor(cur, XFS_BTREE_ERROR);
751
752 return error;
753 }
754
755 /*
756 * Convert a local file to an extents file.
757 * This code is out of bounds for data forks of regular files,
758 * since the file data needs to get logged so things will stay consistent.
759 * (The bmap-level manipulations are ok, though).
760 */
761 void
xfs_bmap_local_to_extents_empty(struct xfs_trans * tp,struct xfs_inode * ip,int whichfork)762 xfs_bmap_local_to_extents_empty(
763 struct xfs_trans *tp,
764 struct xfs_inode *ip,
765 int whichfork)
766 {
767 struct xfs_ifork *ifp = xfs_ifork_ptr(ip, whichfork);
768
769 ASSERT(whichfork != XFS_COW_FORK);
770 ASSERT(ifp->if_format == XFS_DINODE_FMT_LOCAL);
771 ASSERT(ifp->if_bytes == 0);
772 ASSERT(ifp->if_nextents == 0);
773
774 xfs_bmap_forkoff_reset(ip, whichfork);
775 ifp->if_data = NULL;
776 ifp->if_height = 0;
777 ifp->if_format = XFS_DINODE_FMT_EXTENTS;
778 xfs_trans_log_inode(tp, ip, XFS_ILOG_CORE);
779 }
780
781
782 int /* error */
xfs_bmap_local_to_extents(xfs_trans_t * tp,xfs_inode_t * ip,xfs_extlen_t total,int * logflagsp,int whichfork,void (* init_fn)(struct xfs_trans * tp,struct xfs_buf * bp,struct xfs_inode * ip,struct xfs_ifork * ifp,void * priv),void * priv)783 xfs_bmap_local_to_extents(
784 xfs_trans_t *tp, /* transaction pointer */
785 xfs_inode_t *ip, /* incore inode pointer */
786 xfs_extlen_t total, /* total blocks needed by transaction */
787 int *logflagsp, /* inode logging flags */
788 int whichfork,
789 void (*init_fn)(struct xfs_trans *tp,
790 struct xfs_buf *bp,
791 struct xfs_inode *ip,
792 struct xfs_ifork *ifp, void *priv),
793 void *priv)
794 {
795 int error = 0;
796 int flags; /* logging flags returned */
797 struct xfs_ifork *ifp; /* inode fork pointer */
798 xfs_alloc_arg_t args; /* allocation arguments */
799 struct xfs_buf *bp; /* buffer for extent block */
800 struct xfs_bmbt_irec rec;
801 struct xfs_iext_cursor icur;
802
803 /*
804 * We don't want to deal with the case of keeping inode data inline yet.
805 * So sending the data fork of a regular inode is invalid.
806 */
807 ASSERT(!(S_ISREG(VFS_I(ip)->i_mode) && whichfork == XFS_DATA_FORK));
808 ifp = xfs_ifork_ptr(ip, whichfork);
809 ASSERT(ifp->if_format == XFS_DINODE_FMT_LOCAL);
810
811 if (!ifp->if_bytes) {
812 xfs_bmap_local_to_extents_empty(tp, ip, whichfork);
813 flags = XFS_ILOG_CORE;
814 goto done;
815 }
816
817 flags = 0;
818 error = 0;
819 memset(&args, 0, sizeof(args));
820 args.tp = tp;
821 args.mp = ip->i_mount;
822 args.total = total;
823 args.minlen = args.maxlen = args.prod = 1;
824 xfs_rmap_ino_owner(&args.oinfo, ip->i_ino, whichfork, 0);
825
826 /*
827 * Allocate a block. We know we need only one, since the
828 * file currently fits in an inode.
829 */
830 args.total = total;
831 args.minlen = args.maxlen = args.prod = 1;
832 error = xfs_alloc_vextent_start_ag(&args,
833 XFS_INO_TO_FSB(args.mp, ip->i_ino));
834 if (error)
835 goto done;
836
837 /* Can't fail, the space was reserved. */
838 ASSERT(args.fsbno != NULLFSBLOCK);
839 ASSERT(args.len == 1);
840 error = xfs_trans_get_buf(tp, args.mp->m_ddev_targp,
841 XFS_FSB_TO_DADDR(args.mp, args.fsbno),
842 args.mp->m_bsize, 0, &bp);
843 if (error)
844 goto done;
845
846 /*
847 * Initialize the block, copy the data and log the remote buffer.
848 *
849 * The callout is responsible for logging because the remote format
850 * might differ from the local format and thus we don't know how much to
851 * log here. Note that init_fn must also set the buffer log item type
852 * correctly.
853 */
854 init_fn(tp, bp, ip, ifp, priv);
855
856 /* account for the change in fork size */
857 xfs_idata_realloc(ip, -ifp->if_bytes, whichfork);
858 xfs_bmap_local_to_extents_empty(tp, ip, whichfork);
859 flags |= XFS_ILOG_CORE;
860
861 ifp->if_data = NULL;
862 ifp->if_height = 0;
863
864 rec.br_startoff = 0;
865 rec.br_startblock = args.fsbno;
866 rec.br_blockcount = 1;
867 rec.br_state = XFS_EXT_NORM;
868 xfs_iext_first(ifp, &icur);
869 xfs_iext_insert(ip, &icur, &rec, 0);
870
871 ifp->if_nextents = 1;
872 ip->i_nblocks = 1;
873 xfs_trans_mod_dquot_byino(tp, ip, XFS_TRANS_DQ_BCOUNT, 1L);
874 flags |= xfs_ilog_fext(whichfork);
875
876 done:
877 *logflagsp = flags;
878 return error;
879 }
880
881 /*
882 * Called from xfs_bmap_add_attrfork to handle btree format files.
883 */
884 STATIC int /* error */
xfs_bmap_add_attrfork_btree(xfs_trans_t * tp,xfs_inode_t * ip,int * flags)885 xfs_bmap_add_attrfork_btree(
886 xfs_trans_t *tp, /* transaction pointer */
887 xfs_inode_t *ip, /* incore inode pointer */
888 int *flags) /* inode logging flags */
889 {
890 struct xfs_btree_block *block = ip->i_df.if_broot;
891 struct xfs_btree_cur *cur; /* btree cursor */
892 int error; /* error return value */
893 xfs_mount_t *mp; /* file system mount struct */
894 int stat; /* newroot status */
895
896 mp = ip->i_mount;
897
898 if (XFS_BMAP_BMDR_SPACE(block) <= xfs_inode_data_fork_size(ip))
899 *flags |= XFS_ILOG_DBROOT;
900 else {
901 cur = xfs_bmbt_init_cursor(mp, tp, ip, XFS_DATA_FORK);
902 error = xfs_bmbt_lookup_first(cur, &stat);
903 if (error)
904 goto error0;
905 /* must be at least one entry */
906 if (XFS_IS_CORRUPT(mp, stat != 1)) {
907 xfs_btree_mark_sick(cur);
908 error = -EFSCORRUPTED;
909 goto error0;
910 }
911 if ((error = xfs_btree_new_iroot(cur, flags, &stat)))
912 goto error0;
913 if (stat == 0) {
914 xfs_btree_del_cursor(cur, XFS_BTREE_NOERROR);
915 return -ENOSPC;
916 }
917 cur->bc_bmap.allocated = 0;
918 xfs_btree_del_cursor(cur, XFS_BTREE_NOERROR);
919 }
920 return 0;
921 error0:
922 xfs_btree_del_cursor(cur, XFS_BTREE_ERROR);
923 return error;
924 }
925
926 /*
927 * Called from xfs_bmap_add_attrfork to handle extents format files.
928 */
929 STATIC int /* error */
xfs_bmap_add_attrfork_extents(struct xfs_trans * tp,struct xfs_inode * ip,int * flags)930 xfs_bmap_add_attrfork_extents(
931 struct xfs_trans *tp, /* transaction pointer */
932 struct xfs_inode *ip, /* incore inode pointer */
933 int *flags) /* inode logging flags */
934 {
935 struct xfs_btree_cur *cur; /* bmap btree cursor */
936 int error; /* error return value */
937
938 if (ip->i_df.if_nextents * sizeof(struct xfs_bmbt_rec) <=
939 xfs_inode_data_fork_size(ip))
940 return 0;
941 cur = NULL;
942 error = xfs_bmap_extents_to_btree(tp, ip, &cur, 0, flags,
943 XFS_DATA_FORK);
944 if (cur) {
945 cur->bc_bmap.allocated = 0;
946 xfs_btree_del_cursor(cur, error);
947 }
948 return error;
949 }
950
951 /*
952 * Called from xfs_bmap_add_attrfork to handle local format files. Each
953 * different data fork content type needs a different callout to do the
954 * conversion. Some are basic and only require special block initialisation
955 * callouts for the data formating, others (directories) are so specialised they
956 * handle everything themselves.
957 *
958 * XXX (dgc): investigate whether directory conversion can use the generic
959 * formatting callout. It should be possible - it's just a very complex
960 * formatter.
961 */
962 STATIC int /* error */
xfs_bmap_add_attrfork_local(struct xfs_trans * tp,struct xfs_inode * ip,int * flags)963 xfs_bmap_add_attrfork_local(
964 struct xfs_trans *tp, /* transaction pointer */
965 struct xfs_inode *ip, /* incore inode pointer */
966 int *flags) /* inode logging flags */
967 {
968 struct xfs_da_args dargs; /* args for dir/attr code */
969
970 if (ip->i_df.if_bytes <= xfs_inode_data_fork_size(ip))
971 return 0;
972
973 if (S_ISDIR(VFS_I(ip)->i_mode)) {
974 memset(&dargs, 0, sizeof(dargs));
975 dargs.geo = ip->i_mount->m_dir_geo;
976 dargs.dp = ip;
977 dargs.total = dargs.geo->fsbcount;
978 dargs.whichfork = XFS_DATA_FORK;
979 dargs.trans = tp;
980 dargs.owner = ip->i_ino;
981 return xfs_dir2_sf_to_block(&dargs);
982 }
983
984 if (S_ISLNK(VFS_I(ip)->i_mode))
985 return xfs_bmap_local_to_extents(tp, ip, 1, flags,
986 XFS_DATA_FORK, xfs_symlink_local_to_remote,
987 NULL);
988
989 /* should only be called for types that support local format data */
990 ASSERT(0);
991 xfs_bmap_mark_sick(ip, XFS_ATTR_FORK);
992 return -EFSCORRUPTED;
993 }
994
995 /*
996 * Set an inode attr fork offset based on the format of the data fork.
997 */
998 static int
xfs_bmap_set_attrforkoff(struct xfs_inode * ip,int size,int * version)999 xfs_bmap_set_attrforkoff(
1000 struct xfs_inode *ip,
1001 int size,
1002 int *version)
1003 {
1004 int default_size = xfs_default_attroffset(ip) >> 3;
1005
1006 switch (ip->i_df.if_format) {
1007 case XFS_DINODE_FMT_DEV:
1008 ip->i_forkoff = default_size;
1009 break;
1010 case XFS_DINODE_FMT_LOCAL:
1011 case XFS_DINODE_FMT_EXTENTS:
1012 case XFS_DINODE_FMT_BTREE:
1013 ip->i_forkoff = xfs_attr_shortform_bytesfit(ip, size);
1014 if (!ip->i_forkoff)
1015 ip->i_forkoff = default_size;
1016 else if (xfs_has_attr2(ip->i_mount) && version)
1017 *version = 2;
1018 break;
1019 default:
1020 ASSERT(0);
1021 return -EINVAL;
1022 }
1023
1024 return 0;
1025 }
1026
1027 /*
1028 * Convert inode from non-attributed to attributed. Caller must hold the
1029 * ILOCK_EXCL and the file cannot have an attr fork.
1030 */
1031 int /* error code */
xfs_bmap_add_attrfork(struct xfs_trans * tp,struct xfs_inode * ip,int size,int rsvd)1032 xfs_bmap_add_attrfork(
1033 struct xfs_trans *tp,
1034 struct xfs_inode *ip, /* incore inode pointer */
1035 int size, /* space new attribute needs */
1036 int rsvd) /* xact may use reserved blks */
1037 {
1038 struct xfs_mount *mp = tp->t_mountp;
1039 int version = 1; /* superblock attr version */
1040 int logflags; /* logging flags */
1041 int error; /* error return value */
1042
1043 xfs_assert_ilocked(ip, XFS_ILOCK_EXCL);
1044 ASSERT(!XFS_NOT_DQATTACHED(mp, ip));
1045 ASSERT(!xfs_inode_has_attr_fork(ip));
1046
1047 xfs_trans_log_inode(tp, ip, XFS_ILOG_CORE);
1048 error = xfs_bmap_set_attrforkoff(ip, size, &version);
1049 if (error)
1050 return error;
1051
1052 xfs_ifork_init_attr(ip, XFS_DINODE_FMT_EXTENTS, 0);
1053 logflags = 0;
1054 switch (ip->i_df.if_format) {
1055 case XFS_DINODE_FMT_LOCAL:
1056 error = xfs_bmap_add_attrfork_local(tp, ip, &logflags);
1057 break;
1058 case XFS_DINODE_FMT_EXTENTS:
1059 error = xfs_bmap_add_attrfork_extents(tp, ip, &logflags);
1060 break;
1061 case XFS_DINODE_FMT_BTREE:
1062 error = xfs_bmap_add_attrfork_btree(tp, ip, &logflags);
1063 break;
1064 default:
1065 error = 0;
1066 break;
1067 }
1068 if (logflags)
1069 xfs_trans_log_inode(tp, ip, logflags);
1070 if (error)
1071 return error;
1072 if (!xfs_has_attr(mp) ||
1073 (!xfs_has_attr2(mp) && version == 2)) {
1074 bool log_sb = false;
1075
1076 spin_lock(&mp->m_sb_lock);
1077 if (!xfs_has_attr(mp)) {
1078 xfs_add_attr(mp);
1079 log_sb = true;
1080 }
1081 if (!xfs_has_attr2(mp) && version == 2) {
1082 xfs_add_attr2(mp);
1083 log_sb = true;
1084 }
1085 spin_unlock(&mp->m_sb_lock);
1086 if (log_sb)
1087 xfs_log_sb(tp);
1088 }
1089
1090 return 0;
1091 }
1092
1093 /*
1094 * Internal and external extent tree search functions.
1095 */
1096
1097 struct xfs_iread_state {
1098 struct xfs_iext_cursor icur;
1099 xfs_extnum_t loaded;
1100 };
1101
1102 int
xfs_bmap_complain_bad_rec(struct xfs_inode * ip,int whichfork,xfs_failaddr_t fa,const struct xfs_bmbt_irec * irec)1103 xfs_bmap_complain_bad_rec(
1104 struct xfs_inode *ip,
1105 int whichfork,
1106 xfs_failaddr_t fa,
1107 const struct xfs_bmbt_irec *irec)
1108 {
1109 struct xfs_mount *mp = ip->i_mount;
1110 const char *forkname;
1111
1112 switch (whichfork) {
1113 case XFS_DATA_FORK: forkname = "data"; break;
1114 case XFS_ATTR_FORK: forkname = "attr"; break;
1115 case XFS_COW_FORK: forkname = "CoW"; break;
1116 default: forkname = "???"; break;
1117 }
1118
1119 xfs_warn(mp,
1120 "Bmap BTree record corruption in inode 0x%llx %s fork detected at %pS!",
1121 ip->i_ino, forkname, fa);
1122 xfs_warn(mp,
1123 "Offset 0x%llx, start block 0x%llx, block count 0x%llx state 0x%x",
1124 irec->br_startoff, irec->br_startblock, irec->br_blockcount,
1125 irec->br_state);
1126
1127 return -EFSCORRUPTED;
1128 }
1129
1130 /* Stuff every bmbt record from this block into the incore extent map. */
1131 static int
xfs_iread_bmbt_block(struct xfs_btree_cur * cur,int level,void * priv)1132 xfs_iread_bmbt_block(
1133 struct xfs_btree_cur *cur,
1134 int level,
1135 void *priv)
1136 {
1137 struct xfs_iread_state *ir = priv;
1138 struct xfs_mount *mp = cur->bc_mp;
1139 struct xfs_inode *ip = cur->bc_ino.ip;
1140 struct xfs_btree_block *block;
1141 struct xfs_buf *bp;
1142 struct xfs_bmbt_rec *frp;
1143 xfs_extnum_t num_recs;
1144 xfs_extnum_t j;
1145 int whichfork = cur->bc_ino.whichfork;
1146 struct xfs_ifork *ifp = xfs_ifork_ptr(ip, whichfork);
1147
1148 block = xfs_btree_get_block(cur, level, &bp);
1149
1150 /* Abort if we find more records than nextents. */
1151 num_recs = xfs_btree_get_numrecs(block);
1152 if (unlikely(ir->loaded + num_recs > ifp->if_nextents)) {
1153 xfs_warn(ip->i_mount, "corrupt dinode %llu, (btree extents).",
1154 (unsigned long long)ip->i_ino);
1155 xfs_inode_verifier_error(ip, -EFSCORRUPTED, __func__, block,
1156 sizeof(*block), __this_address);
1157 xfs_bmap_mark_sick(ip, whichfork);
1158 return -EFSCORRUPTED;
1159 }
1160
1161 /* Copy records into the incore cache. */
1162 frp = XFS_BMBT_REC_ADDR(mp, block, 1);
1163 for (j = 0; j < num_recs; j++, frp++, ir->loaded++) {
1164 struct xfs_bmbt_irec new;
1165 xfs_failaddr_t fa;
1166
1167 xfs_bmbt_disk_get_all(frp, &new);
1168 fa = xfs_bmap_validate_extent(ip, whichfork, &new);
1169 if (fa) {
1170 xfs_inode_verifier_error(ip, -EFSCORRUPTED,
1171 "xfs_iread_extents(2)", frp,
1172 sizeof(*frp), fa);
1173 xfs_bmap_mark_sick(ip, whichfork);
1174 return xfs_bmap_complain_bad_rec(ip, whichfork, fa,
1175 &new);
1176 }
1177 xfs_iext_insert(ip, &ir->icur, &new,
1178 xfs_bmap_fork_to_state(whichfork));
1179 trace_xfs_read_extent(ip, &ir->icur,
1180 xfs_bmap_fork_to_state(whichfork), _THIS_IP_);
1181 xfs_iext_next(ifp, &ir->icur);
1182 }
1183
1184 return 0;
1185 }
1186
1187 /*
1188 * Read in extents from a btree-format inode.
1189 */
1190 int
xfs_iread_extents(struct xfs_trans * tp,struct xfs_inode * ip,int whichfork)1191 xfs_iread_extents(
1192 struct xfs_trans *tp,
1193 struct xfs_inode *ip,
1194 int whichfork)
1195 {
1196 struct xfs_iread_state ir;
1197 struct xfs_ifork *ifp = xfs_ifork_ptr(ip, whichfork);
1198 struct xfs_mount *mp = ip->i_mount;
1199 struct xfs_btree_cur *cur;
1200 int error;
1201
1202 if (!xfs_need_iread_extents(ifp))
1203 return 0;
1204
1205 xfs_assert_ilocked(ip, XFS_ILOCK_EXCL);
1206
1207 ir.loaded = 0;
1208 xfs_iext_first(ifp, &ir.icur);
1209 cur = xfs_bmbt_init_cursor(mp, tp, ip, whichfork);
1210 error = xfs_btree_visit_blocks(cur, xfs_iread_bmbt_block,
1211 XFS_BTREE_VISIT_RECORDS, &ir);
1212 xfs_btree_del_cursor(cur, error);
1213 if (error)
1214 goto out;
1215
1216 if (XFS_IS_CORRUPT(mp, ir.loaded != ifp->if_nextents)) {
1217 xfs_bmap_mark_sick(ip, whichfork);
1218 error = -EFSCORRUPTED;
1219 goto out;
1220 }
1221 ASSERT(ir.loaded == xfs_iext_count(ifp));
1222 /*
1223 * Use release semantics so that we can use acquire semantics in
1224 * xfs_need_iread_extents and be guaranteed to see a valid mapping tree
1225 * after that load.
1226 */
1227 smp_store_release(&ifp->if_needextents, 0);
1228 return 0;
1229 out:
1230 if (xfs_metadata_is_sick(error))
1231 xfs_bmap_mark_sick(ip, whichfork);
1232 xfs_iext_destroy(ifp);
1233 return error;
1234 }
1235
1236 /*
1237 * Returns the relative block number of the first unused block(s) in the given
1238 * fork with at least "len" logically contiguous blocks free. This is the
1239 * lowest-address hole if the fork has holes, else the first block past the end
1240 * of fork. Return 0 if the fork is currently local (in-inode).
1241 */
1242 int /* error */
xfs_bmap_first_unused(struct xfs_trans * tp,struct xfs_inode * ip,xfs_extlen_t len,xfs_fileoff_t * first_unused,int whichfork)1243 xfs_bmap_first_unused(
1244 struct xfs_trans *tp, /* transaction pointer */
1245 struct xfs_inode *ip, /* incore inode */
1246 xfs_extlen_t len, /* size of hole to find */
1247 xfs_fileoff_t *first_unused, /* unused block */
1248 int whichfork) /* data or attr fork */
1249 {
1250 struct xfs_ifork *ifp = xfs_ifork_ptr(ip, whichfork);
1251 struct xfs_bmbt_irec got;
1252 struct xfs_iext_cursor icur;
1253 xfs_fileoff_t lastaddr = 0;
1254 xfs_fileoff_t lowest, max;
1255 int error;
1256
1257 if (ifp->if_format == XFS_DINODE_FMT_LOCAL) {
1258 *first_unused = 0;
1259 return 0;
1260 }
1261
1262 ASSERT(xfs_ifork_has_extents(ifp));
1263
1264 error = xfs_iread_extents(tp, ip, whichfork);
1265 if (error)
1266 return error;
1267
1268 lowest = max = *first_unused;
1269 for_each_xfs_iext(ifp, &icur, &got) {
1270 /*
1271 * See if the hole before this extent will work.
1272 */
1273 if (got.br_startoff >= lowest + len &&
1274 got.br_startoff - max >= len)
1275 break;
1276 lastaddr = got.br_startoff + got.br_blockcount;
1277 max = XFS_FILEOFF_MAX(lastaddr, lowest);
1278 }
1279
1280 *first_unused = max;
1281 return 0;
1282 }
1283
1284 /*
1285 * Returns the file-relative block number of the last block - 1 before
1286 * last_block (input value) in the file.
1287 * This is not based on i_size, it is based on the extent records.
1288 * Returns 0 for local files, as they do not have extent records.
1289 */
1290 int /* error */
xfs_bmap_last_before(struct xfs_trans * tp,struct xfs_inode * ip,xfs_fileoff_t * last_block,int whichfork)1291 xfs_bmap_last_before(
1292 struct xfs_trans *tp, /* transaction pointer */
1293 struct xfs_inode *ip, /* incore inode */
1294 xfs_fileoff_t *last_block, /* last block */
1295 int whichfork) /* data or attr fork */
1296 {
1297 struct xfs_ifork *ifp = xfs_ifork_ptr(ip, whichfork);
1298 struct xfs_bmbt_irec got;
1299 struct xfs_iext_cursor icur;
1300 int error;
1301
1302 switch (ifp->if_format) {
1303 case XFS_DINODE_FMT_LOCAL:
1304 *last_block = 0;
1305 return 0;
1306 case XFS_DINODE_FMT_BTREE:
1307 case XFS_DINODE_FMT_EXTENTS:
1308 break;
1309 default:
1310 ASSERT(0);
1311 xfs_bmap_mark_sick(ip, whichfork);
1312 return -EFSCORRUPTED;
1313 }
1314
1315 error = xfs_iread_extents(tp, ip, whichfork);
1316 if (error)
1317 return error;
1318
1319 if (!xfs_iext_lookup_extent_before(ip, ifp, last_block, &icur, &got))
1320 *last_block = 0;
1321 return 0;
1322 }
1323
1324 int
xfs_bmap_last_extent(struct xfs_trans * tp,struct xfs_inode * ip,int whichfork,struct xfs_bmbt_irec * rec,int * is_empty)1325 xfs_bmap_last_extent(
1326 struct xfs_trans *tp,
1327 struct xfs_inode *ip,
1328 int whichfork,
1329 struct xfs_bmbt_irec *rec,
1330 int *is_empty)
1331 {
1332 struct xfs_ifork *ifp = xfs_ifork_ptr(ip, whichfork);
1333 struct xfs_iext_cursor icur;
1334 int error;
1335
1336 error = xfs_iread_extents(tp, ip, whichfork);
1337 if (error)
1338 return error;
1339
1340 xfs_iext_last(ifp, &icur);
1341 if (!xfs_iext_get_extent(ifp, &icur, rec))
1342 *is_empty = 1;
1343 else
1344 *is_empty = 0;
1345 return 0;
1346 }
1347
1348 /*
1349 * Check the last inode extent to determine whether this allocation will result
1350 * in blocks being allocated at the end of the file. When we allocate new data
1351 * blocks at the end of the file which do not start at the previous data block,
1352 * we will try to align the new blocks at stripe unit boundaries.
1353 *
1354 * Returns 1 in bma->aeof if the file (fork) is empty as any new write will be
1355 * at, or past the EOF.
1356 */
1357 STATIC int
xfs_bmap_isaeof(struct xfs_bmalloca * bma,int whichfork)1358 xfs_bmap_isaeof(
1359 struct xfs_bmalloca *bma,
1360 int whichfork)
1361 {
1362 struct xfs_bmbt_irec rec;
1363 int is_empty;
1364 int error;
1365
1366 bma->aeof = false;
1367 error = xfs_bmap_last_extent(NULL, bma->ip, whichfork, &rec,
1368 &is_empty);
1369 if (error)
1370 return error;
1371
1372 if (is_empty) {
1373 bma->aeof = true;
1374 return 0;
1375 }
1376
1377 /*
1378 * Check if we are allocation or past the last extent, or at least into
1379 * the last delayed allocated extent.
1380 */
1381 bma->aeof = bma->offset >= rec.br_startoff + rec.br_blockcount ||
1382 (bma->offset >= rec.br_startoff &&
1383 isnullstartblock(rec.br_startblock));
1384 return 0;
1385 }
1386
1387 /*
1388 * Returns the file-relative block number of the first block past eof in
1389 * the file. This is not based on i_size, it is based on the extent records.
1390 * Returns 0 for local files, as they do not have extent records.
1391 */
1392 int
xfs_bmap_last_offset(struct xfs_inode * ip,xfs_fileoff_t * last_block,int whichfork)1393 xfs_bmap_last_offset(
1394 struct xfs_inode *ip,
1395 xfs_fileoff_t *last_block,
1396 int whichfork)
1397 {
1398 struct xfs_ifork *ifp = xfs_ifork_ptr(ip, whichfork);
1399 struct xfs_bmbt_irec rec;
1400 int is_empty;
1401 int error;
1402
1403 *last_block = 0;
1404
1405 if (ifp->if_format == XFS_DINODE_FMT_LOCAL)
1406 return 0;
1407
1408 if (XFS_IS_CORRUPT(ip->i_mount, !xfs_ifork_has_extents(ifp))) {
1409 xfs_bmap_mark_sick(ip, whichfork);
1410 return -EFSCORRUPTED;
1411 }
1412
1413 error = xfs_bmap_last_extent(NULL, ip, whichfork, &rec, &is_empty);
1414 if (error || is_empty)
1415 return error;
1416
1417 *last_block = rec.br_startoff + rec.br_blockcount;
1418 return 0;
1419 }
1420
1421 /*
1422 * Extent tree manipulation functions used during allocation.
1423 */
1424
1425 /*
1426 * Convert a delayed allocation to a real allocation.
1427 */
1428 STATIC int /* error */
xfs_bmap_add_extent_delay_real(struct xfs_bmalloca * bma,int whichfork)1429 xfs_bmap_add_extent_delay_real(
1430 struct xfs_bmalloca *bma,
1431 int whichfork)
1432 {
1433 struct xfs_mount *mp = bma->ip->i_mount;
1434 struct xfs_ifork *ifp = xfs_ifork_ptr(bma->ip, whichfork);
1435 struct xfs_bmbt_irec *new = &bma->got;
1436 int error; /* error return value */
1437 int i; /* temp state */
1438 xfs_fileoff_t new_endoff; /* end offset of new entry */
1439 xfs_bmbt_irec_t r[3]; /* neighbor extent entries */
1440 /* left is 0, right is 1, prev is 2 */
1441 int rval=0; /* return value (logging flags) */
1442 uint32_t state = xfs_bmap_fork_to_state(whichfork);
1443 xfs_filblks_t da_new; /* new count del alloc blocks used */
1444 xfs_filblks_t da_old; /* old count del alloc blocks used */
1445 xfs_filblks_t temp=0; /* value for da_new calculations */
1446 int tmp_rval; /* partial logging flags */
1447 struct xfs_bmbt_irec old;
1448
1449 ASSERT(whichfork != XFS_ATTR_FORK);
1450 ASSERT(!isnullstartblock(new->br_startblock));
1451 ASSERT(!bma->cur || (bma->cur->bc_flags & XFS_BTREE_BMBT_WASDEL));
1452
1453 XFS_STATS_INC(mp, xs_add_exlist);
1454
1455 #define LEFT r[0]
1456 #define RIGHT r[1]
1457 #define PREV r[2]
1458
1459 /*
1460 * Set up a bunch of variables to make the tests simpler.
1461 */
1462 xfs_iext_get_extent(ifp, &bma->icur, &PREV);
1463 new_endoff = new->br_startoff + new->br_blockcount;
1464 ASSERT(isnullstartblock(PREV.br_startblock));
1465 ASSERT(PREV.br_startoff <= new->br_startoff);
1466 ASSERT(PREV.br_startoff + PREV.br_blockcount >= new_endoff);
1467
1468 da_old = startblockval(PREV.br_startblock);
1469 da_new = 0;
1470
1471 /*
1472 * Set flags determining what part of the previous delayed allocation
1473 * extent is being replaced by a real allocation.
1474 */
1475 if (PREV.br_startoff == new->br_startoff)
1476 state |= BMAP_LEFT_FILLING;
1477 if (PREV.br_startoff + PREV.br_blockcount == new_endoff)
1478 state |= BMAP_RIGHT_FILLING;
1479
1480 /*
1481 * Check and set flags if this segment has a left neighbor.
1482 * Don't set contiguous if the combined extent would be too large.
1483 */
1484 if (xfs_iext_peek_prev_extent(ifp, &bma->icur, &LEFT)) {
1485 state |= BMAP_LEFT_VALID;
1486 if (isnullstartblock(LEFT.br_startblock))
1487 state |= BMAP_LEFT_DELAY;
1488 }
1489
1490 if ((state & BMAP_LEFT_VALID) && !(state & BMAP_LEFT_DELAY) &&
1491 LEFT.br_startoff + LEFT.br_blockcount == new->br_startoff &&
1492 LEFT.br_startblock + LEFT.br_blockcount == new->br_startblock &&
1493 LEFT.br_state == new->br_state &&
1494 LEFT.br_blockcount + new->br_blockcount <= XFS_MAX_BMBT_EXTLEN)
1495 state |= BMAP_LEFT_CONTIG;
1496
1497 /*
1498 * Check and set flags if this segment has a right neighbor.
1499 * Don't set contiguous if the combined extent would be too large.
1500 * Also check for all-three-contiguous being too large.
1501 */
1502 if (xfs_iext_peek_next_extent(ifp, &bma->icur, &RIGHT)) {
1503 state |= BMAP_RIGHT_VALID;
1504 if (isnullstartblock(RIGHT.br_startblock))
1505 state |= BMAP_RIGHT_DELAY;
1506 }
1507
1508 if ((state & BMAP_RIGHT_VALID) && !(state & BMAP_RIGHT_DELAY) &&
1509 new_endoff == RIGHT.br_startoff &&
1510 new->br_startblock + new->br_blockcount == RIGHT.br_startblock &&
1511 new->br_state == RIGHT.br_state &&
1512 new->br_blockcount + RIGHT.br_blockcount <= XFS_MAX_BMBT_EXTLEN &&
1513 ((state & (BMAP_LEFT_CONTIG | BMAP_LEFT_FILLING |
1514 BMAP_RIGHT_FILLING)) !=
1515 (BMAP_LEFT_CONTIG | BMAP_LEFT_FILLING |
1516 BMAP_RIGHT_FILLING) ||
1517 LEFT.br_blockcount + new->br_blockcount + RIGHT.br_blockcount
1518 <= XFS_MAX_BMBT_EXTLEN))
1519 state |= BMAP_RIGHT_CONTIG;
1520
1521 error = 0;
1522 /*
1523 * Switch out based on the FILLING and CONTIG state bits.
1524 */
1525 switch (state & (BMAP_LEFT_FILLING | BMAP_LEFT_CONTIG |
1526 BMAP_RIGHT_FILLING | BMAP_RIGHT_CONTIG)) {
1527 case BMAP_LEFT_FILLING | BMAP_LEFT_CONTIG |
1528 BMAP_RIGHT_FILLING | BMAP_RIGHT_CONTIG:
1529 /*
1530 * Filling in all of a previously delayed allocation extent.
1531 * The left and right neighbors are both contiguous with new.
1532 */
1533 LEFT.br_blockcount += PREV.br_blockcount + RIGHT.br_blockcount;
1534
1535 xfs_iext_remove(bma->ip, &bma->icur, state);
1536 xfs_iext_remove(bma->ip, &bma->icur, state);
1537 xfs_iext_prev(ifp, &bma->icur);
1538 xfs_iext_update_extent(bma->ip, state, &bma->icur, &LEFT);
1539 ifp->if_nextents--;
1540
1541 if (bma->cur == NULL)
1542 rval = XFS_ILOG_CORE | XFS_ILOG_DEXT;
1543 else {
1544 rval = XFS_ILOG_CORE;
1545 error = xfs_bmbt_lookup_eq(bma->cur, &RIGHT, &i);
1546 if (error)
1547 goto done;
1548 if (XFS_IS_CORRUPT(mp, i != 1)) {
1549 xfs_btree_mark_sick(bma->cur);
1550 error = -EFSCORRUPTED;
1551 goto done;
1552 }
1553 error = xfs_btree_delete(bma->cur, &i);
1554 if (error)
1555 goto done;
1556 if (XFS_IS_CORRUPT(mp, i != 1)) {
1557 xfs_btree_mark_sick(bma->cur);
1558 error = -EFSCORRUPTED;
1559 goto done;
1560 }
1561 error = xfs_btree_decrement(bma->cur, 0, &i);
1562 if (error)
1563 goto done;
1564 if (XFS_IS_CORRUPT(mp, i != 1)) {
1565 xfs_btree_mark_sick(bma->cur);
1566 error = -EFSCORRUPTED;
1567 goto done;
1568 }
1569 error = xfs_bmbt_update(bma->cur, &LEFT);
1570 if (error)
1571 goto done;
1572 }
1573 ASSERT(da_new <= da_old);
1574 break;
1575
1576 case BMAP_LEFT_FILLING | BMAP_RIGHT_FILLING | BMAP_LEFT_CONTIG:
1577 /*
1578 * Filling in all of a previously delayed allocation extent.
1579 * The left neighbor is contiguous, the right is not.
1580 */
1581 old = LEFT;
1582 LEFT.br_blockcount += PREV.br_blockcount;
1583
1584 xfs_iext_remove(bma->ip, &bma->icur, state);
1585 xfs_iext_prev(ifp, &bma->icur);
1586 xfs_iext_update_extent(bma->ip, state, &bma->icur, &LEFT);
1587
1588 if (bma->cur == NULL)
1589 rval = XFS_ILOG_DEXT;
1590 else {
1591 rval = 0;
1592 error = xfs_bmbt_lookup_eq(bma->cur, &old, &i);
1593 if (error)
1594 goto done;
1595 if (XFS_IS_CORRUPT(mp, i != 1)) {
1596 xfs_btree_mark_sick(bma->cur);
1597 error = -EFSCORRUPTED;
1598 goto done;
1599 }
1600 error = xfs_bmbt_update(bma->cur, &LEFT);
1601 if (error)
1602 goto done;
1603 }
1604 ASSERT(da_new <= da_old);
1605 break;
1606
1607 case BMAP_LEFT_FILLING | BMAP_RIGHT_FILLING | BMAP_RIGHT_CONTIG:
1608 /*
1609 * Filling in all of a previously delayed allocation extent.
1610 * The right neighbor is contiguous, the left is not. Take care
1611 * with delay -> unwritten extent allocation here because the
1612 * delalloc record we are overwriting is always written.
1613 */
1614 PREV.br_startblock = new->br_startblock;
1615 PREV.br_blockcount += RIGHT.br_blockcount;
1616 PREV.br_state = new->br_state;
1617
1618 xfs_iext_next(ifp, &bma->icur);
1619 xfs_iext_remove(bma->ip, &bma->icur, state);
1620 xfs_iext_prev(ifp, &bma->icur);
1621 xfs_iext_update_extent(bma->ip, state, &bma->icur, &PREV);
1622
1623 if (bma->cur == NULL)
1624 rval = XFS_ILOG_DEXT;
1625 else {
1626 rval = 0;
1627 error = xfs_bmbt_lookup_eq(bma->cur, &RIGHT, &i);
1628 if (error)
1629 goto done;
1630 if (XFS_IS_CORRUPT(mp, i != 1)) {
1631 xfs_btree_mark_sick(bma->cur);
1632 error = -EFSCORRUPTED;
1633 goto done;
1634 }
1635 error = xfs_bmbt_update(bma->cur, &PREV);
1636 if (error)
1637 goto done;
1638 }
1639 ASSERT(da_new <= da_old);
1640 break;
1641
1642 case BMAP_LEFT_FILLING | BMAP_RIGHT_FILLING:
1643 /*
1644 * Filling in all of a previously delayed allocation extent.
1645 * Neither the left nor right neighbors are contiguous with
1646 * the new one.
1647 */
1648 PREV.br_startblock = new->br_startblock;
1649 PREV.br_state = new->br_state;
1650 xfs_iext_update_extent(bma->ip, state, &bma->icur, &PREV);
1651 ifp->if_nextents++;
1652
1653 if (bma->cur == NULL)
1654 rval = XFS_ILOG_CORE | XFS_ILOG_DEXT;
1655 else {
1656 rval = XFS_ILOG_CORE;
1657 error = xfs_bmbt_lookup_eq(bma->cur, new, &i);
1658 if (error)
1659 goto done;
1660 if (XFS_IS_CORRUPT(mp, i != 0)) {
1661 xfs_btree_mark_sick(bma->cur);
1662 error = -EFSCORRUPTED;
1663 goto done;
1664 }
1665 error = xfs_btree_insert(bma->cur, &i);
1666 if (error)
1667 goto done;
1668 if (XFS_IS_CORRUPT(mp, i != 1)) {
1669 xfs_btree_mark_sick(bma->cur);
1670 error = -EFSCORRUPTED;
1671 goto done;
1672 }
1673 }
1674 ASSERT(da_new <= da_old);
1675 break;
1676
1677 case BMAP_LEFT_FILLING | BMAP_LEFT_CONTIG:
1678 /*
1679 * Filling in the first part of a previous delayed allocation.
1680 * The left neighbor is contiguous.
1681 */
1682 old = LEFT;
1683 temp = PREV.br_blockcount - new->br_blockcount;
1684 da_new = XFS_FILBLKS_MIN(xfs_bmap_worst_indlen(bma->ip, temp),
1685 startblockval(PREV.br_startblock));
1686
1687 LEFT.br_blockcount += new->br_blockcount;
1688
1689 PREV.br_blockcount = temp;
1690 PREV.br_startoff += new->br_blockcount;
1691 PREV.br_startblock = nullstartblock(da_new);
1692
1693 xfs_iext_update_extent(bma->ip, state, &bma->icur, &PREV);
1694 xfs_iext_prev(ifp, &bma->icur);
1695 xfs_iext_update_extent(bma->ip, state, &bma->icur, &LEFT);
1696
1697 if (bma->cur == NULL)
1698 rval = XFS_ILOG_DEXT;
1699 else {
1700 rval = 0;
1701 error = xfs_bmbt_lookup_eq(bma->cur, &old, &i);
1702 if (error)
1703 goto done;
1704 if (XFS_IS_CORRUPT(mp, i != 1)) {
1705 xfs_btree_mark_sick(bma->cur);
1706 error = -EFSCORRUPTED;
1707 goto done;
1708 }
1709 error = xfs_bmbt_update(bma->cur, &LEFT);
1710 if (error)
1711 goto done;
1712 }
1713 ASSERT(da_new <= da_old);
1714 break;
1715
1716 case BMAP_LEFT_FILLING:
1717 /*
1718 * Filling in the first part of a previous delayed allocation.
1719 * The left neighbor is not contiguous.
1720 */
1721 xfs_iext_update_extent(bma->ip, state, &bma->icur, new);
1722 ifp->if_nextents++;
1723
1724 if (bma->cur == NULL)
1725 rval = XFS_ILOG_CORE | XFS_ILOG_DEXT;
1726 else {
1727 rval = XFS_ILOG_CORE;
1728 error = xfs_bmbt_lookup_eq(bma->cur, new, &i);
1729 if (error)
1730 goto done;
1731 if (XFS_IS_CORRUPT(mp, i != 0)) {
1732 xfs_btree_mark_sick(bma->cur);
1733 error = -EFSCORRUPTED;
1734 goto done;
1735 }
1736 error = xfs_btree_insert(bma->cur, &i);
1737 if (error)
1738 goto done;
1739 if (XFS_IS_CORRUPT(mp, i != 1)) {
1740 xfs_btree_mark_sick(bma->cur);
1741 error = -EFSCORRUPTED;
1742 goto done;
1743 }
1744 }
1745
1746 if (xfs_bmap_needs_btree(bma->ip, whichfork)) {
1747 error = xfs_bmap_extents_to_btree(bma->tp, bma->ip,
1748 &bma->cur, 1, &tmp_rval, whichfork);
1749 rval |= tmp_rval;
1750 if (error)
1751 goto done;
1752 }
1753
1754 temp = PREV.br_blockcount - new->br_blockcount;
1755 da_new = XFS_FILBLKS_MIN(xfs_bmap_worst_indlen(bma->ip, temp),
1756 startblockval(PREV.br_startblock) -
1757 (bma->cur ? bma->cur->bc_bmap.allocated : 0));
1758
1759 PREV.br_startoff = new_endoff;
1760 PREV.br_blockcount = temp;
1761 PREV.br_startblock = nullstartblock(da_new);
1762 xfs_iext_next(ifp, &bma->icur);
1763 xfs_iext_insert(bma->ip, &bma->icur, &PREV, state);
1764 xfs_iext_prev(ifp, &bma->icur);
1765 break;
1766
1767 case BMAP_RIGHT_FILLING | BMAP_RIGHT_CONTIG:
1768 /*
1769 * Filling in the last part of a previous delayed allocation.
1770 * The right neighbor is contiguous with the new allocation.
1771 */
1772 old = RIGHT;
1773 RIGHT.br_startoff = new->br_startoff;
1774 RIGHT.br_startblock = new->br_startblock;
1775 RIGHT.br_blockcount += new->br_blockcount;
1776
1777 if (bma->cur == NULL)
1778 rval = XFS_ILOG_DEXT;
1779 else {
1780 rval = 0;
1781 error = xfs_bmbt_lookup_eq(bma->cur, &old, &i);
1782 if (error)
1783 goto done;
1784 if (XFS_IS_CORRUPT(mp, i != 1)) {
1785 xfs_btree_mark_sick(bma->cur);
1786 error = -EFSCORRUPTED;
1787 goto done;
1788 }
1789 error = xfs_bmbt_update(bma->cur, &RIGHT);
1790 if (error)
1791 goto done;
1792 }
1793
1794 temp = PREV.br_blockcount - new->br_blockcount;
1795 da_new = XFS_FILBLKS_MIN(xfs_bmap_worst_indlen(bma->ip, temp),
1796 startblockval(PREV.br_startblock));
1797
1798 PREV.br_blockcount = temp;
1799 PREV.br_startblock = nullstartblock(da_new);
1800
1801 xfs_iext_update_extent(bma->ip, state, &bma->icur, &PREV);
1802 xfs_iext_next(ifp, &bma->icur);
1803 xfs_iext_update_extent(bma->ip, state, &bma->icur, &RIGHT);
1804 ASSERT(da_new <= da_old);
1805 break;
1806
1807 case BMAP_RIGHT_FILLING:
1808 /*
1809 * Filling in the last part of a previous delayed allocation.
1810 * The right neighbor is not contiguous.
1811 */
1812 xfs_iext_update_extent(bma->ip, state, &bma->icur, new);
1813 ifp->if_nextents++;
1814
1815 if (bma->cur == NULL)
1816 rval = XFS_ILOG_CORE | XFS_ILOG_DEXT;
1817 else {
1818 rval = XFS_ILOG_CORE;
1819 error = xfs_bmbt_lookup_eq(bma->cur, new, &i);
1820 if (error)
1821 goto done;
1822 if (XFS_IS_CORRUPT(mp, i != 0)) {
1823 xfs_btree_mark_sick(bma->cur);
1824 error = -EFSCORRUPTED;
1825 goto done;
1826 }
1827 error = xfs_btree_insert(bma->cur, &i);
1828 if (error)
1829 goto done;
1830 if (XFS_IS_CORRUPT(mp, i != 1)) {
1831 xfs_btree_mark_sick(bma->cur);
1832 error = -EFSCORRUPTED;
1833 goto done;
1834 }
1835 }
1836
1837 if (xfs_bmap_needs_btree(bma->ip, whichfork)) {
1838 error = xfs_bmap_extents_to_btree(bma->tp, bma->ip,
1839 &bma->cur, 1, &tmp_rval, whichfork);
1840 rval |= tmp_rval;
1841 if (error)
1842 goto done;
1843 }
1844
1845 temp = PREV.br_blockcount - new->br_blockcount;
1846 da_new = XFS_FILBLKS_MIN(xfs_bmap_worst_indlen(bma->ip, temp),
1847 startblockval(PREV.br_startblock) -
1848 (bma->cur ? bma->cur->bc_bmap.allocated : 0));
1849
1850 PREV.br_startblock = nullstartblock(da_new);
1851 PREV.br_blockcount = temp;
1852 xfs_iext_insert(bma->ip, &bma->icur, &PREV, state);
1853 xfs_iext_next(ifp, &bma->icur);
1854 ASSERT(da_new <= da_old);
1855 break;
1856
1857 case 0:
1858 /*
1859 * Filling in the middle part of a previous delayed allocation.
1860 * Contiguity is impossible here.
1861 * This case is avoided almost all the time.
1862 *
1863 * We start with a delayed allocation:
1864 *
1865 * +ddddddddddddddddddddddddddddddddddddddddddddddddddddddd+
1866 * PREV @ idx
1867 *
1868 * and we are allocating:
1869 * +rrrrrrrrrrrrrrrrr+
1870 * new
1871 *
1872 * and we set it up for insertion as:
1873 * +ddddddddddddddddddd+rrrrrrrrrrrrrrrrr+ddddddddddddddddd+
1874 * new
1875 * PREV @ idx LEFT RIGHT
1876 * inserted at idx + 1
1877 */
1878 old = PREV;
1879
1880 /* LEFT is the new middle */
1881 LEFT = *new;
1882
1883 /* RIGHT is the new right */
1884 RIGHT.br_state = PREV.br_state;
1885 RIGHT.br_startoff = new_endoff;
1886 RIGHT.br_blockcount =
1887 PREV.br_startoff + PREV.br_blockcount - new_endoff;
1888 RIGHT.br_startblock =
1889 nullstartblock(xfs_bmap_worst_indlen(bma->ip,
1890 RIGHT.br_blockcount));
1891
1892 /* truncate PREV */
1893 PREV.br_blockcount = new->br_startoff - PREV.br_startoff;
1894 PREV.br_startblock =
1895 nullstartblock(xfs_bmap_worst_indlen(bma->ip,
1896 PREV.br_blockcount));
1897 xfs_iext_update_extent(bma->ip, state, &bma->icur, &PREV);
1898
1899 xfs_iext_next(ifp, &bma->icur);
1900 xfs_iext_insert(bma->ip, &bma->icur, &RIGHT, state);
1901 xfs_iext_insert(bma->ip, &bma->icur, &LEFT, state);
1902 ifp->if_nextents++;
1903
1904 if (bma->cur == NULL)
1905 rval = XFS_ILOG_CORE | XFS_ILOG_DEXT;
1906 else {
1907 rval = XFS_ILOG_CORE;
1908 error = xfs_bmbt_lookup_eq(bma->cur, new, &i);
1909 if (error)
1910 goto done;
1911 if (XFS_IS_CORRUPT(mp, i != 0)) {
1912 xfs_btree_mark_sick(bma->cur);
1913 error = -EFSCORRUPTED;
1914 goto done;
1915 }
1916 error = xfs_btree_insert(bma->cur, &i);
1917 if (error)
1918 goto done;
1919 if (XFS_IS_CORRUPT(mp, i != 1)) {
1920 xfs_btree_mark_sick(bma->cur);
1921 error = -EFSCORRUPTED;
1922 goto done;
1923 }
1924 }
1925
1926 if (xfs_bmap_needs_btree(bma->ip, whichfork)) {
1927 error = xfs_bmap_extents_to_btree(bma->tp, bma->ip,
1928 &bma->cur, 1, &tmp_rval, whichfork);
1929 rval |= tmp_rval;
1930 if (error)
1931 goto done;
1932 }
1933
1934 da_new = startblockval(PREV.br_startblock) +
1935 startblockval(RIGHT.br_startblock);
1936 break;
1937
1938 case BMAP_LEFT_FILLING | BMAP_LEFT_CONTIG | BMAP_RIGHT_CONTIG:
1939 case BMAP_RIGHT_FILLING | BMAP_LEFT_CONTIG | BMAP_RIGHT_CONTIG:
1940 case BMAP_LEFT_FILLING | BMAP_RIGHT_CONTIG:
1941 case BMAP_RIGHT_FILLING | BMAP_LEFT_CONTIG:
1942 case BMAP_LEFT_CONTIG | BMAP_RIGHT_CONTIG:
1943 case BMAP_LEFT_CONTIG:
1944 case BMAP_RIGHT_CONTIG:
1945 /*
1946 * These cases are all impossible.
1947 */
1948 ASSERT(0);
1949 }
1950
1951 /* add reverse mapping unless caller opted out */
1952 if (!(bma->flags & XFS_BMAPI_NORMAP))
1953 xfs_rmap_map_extent(bma->tp, bma->ip, whichfork, new);
1954
1955 /* convert to a btree if necessary */
1956 if (xfs_bmap_needs_btree(bma->ip, whichfork)) {
1957 int tmp_logflags; /* partial log flag return val */
1958
1959 ASSERT(bma->cur == NULL);
1960 error = xfs_bmap_extents_to_btree(bma->tp, bma->ip,
1961 &bma->cur, da_old > 0, &tmp_logflags,
1962 whichfork);
1963 bma->logflags |= tmp_logflags;
1964 if (error)
1965 goto done;
1966 }
1967
1968 if (da_new != da_old)
1969 xfs_mod_delalloc(bma->ip, 0, (int64_t)da_new - da_old);
1970
1971 if (bma->cur) {
1972 da_new += bma->cur->bc_bmap.allocated;
1973 bma->cur->bc_bmap.allocated = 0;
1974 }
1975
1976 /* adjust for changes in reserved delayed indirect blocks */
1977 if (da_new < da_old)
1978 xfs_add_fdblocks(mp, da_old - da_new);
1979 else if (da_new > da_old)
1980 error = xfs_dec_fdblocks(mp, da_new - da_old, true);
1981
1982 xfs_bmap_check_leaf_extents(bma->cur, bma->ip, whichfork);
1983 done:
1984 if (whichfork != XFS_COW_FORK)
1985 bma->logflags |= rval;
1986 return error;
1987 #undef LEFT
1988 #undef RIGHT
1989 #undef PREV
1990 }
1991
1992 /*
1993 * Convert an unwritten allocation to a real allocation or vice versa.
1994 */
1995 int /* error */
xfs_bmap_add_extent_unwritten_real(struct xfs_trans * tp,xfs_inode_t * ip,int whichfork,struct xfs_iext_cursor * icur,struct xfs_btree_cur ** curp,xfs_bmbt_irec_t * new,int * logflagsp)1996 xfs_bmap_add_extent_unwritten_real(
1997 struct xfs_trans *tp,
1998 xfs_inode_t *ip, /* incore inode pointer */
1999 int whichfork,
2000 struct xfs_iext_cursor *icur,
2001 struct xfs_btree_cur **curp, /* if *curp is null, not a btree */
2002 xfs_bmbt_irec_t *new, /* new data to add to file extents */
2003 int *logflagsp) /* inode logging flags */
2004 {
2005 struct xfs_btree_cur *cur; /* btree cursor */
2006 int error; /* error return value */
2007 int i; /* temp state */
2008 struct xfs_ifork *ifp; /* inode fork pointer */
2009 xfs_fileoff_t new_endoff; /* end offset of new entry */
2010 xfs_bmbt_irec_t r[3]; /* neighbor extent entries */
2011 /* left is 0, right is 1, prev is 2 */
2012 int rval=0; /* return value (logging flags) */
2013 uint32_t state = xfs_bmap_fork_to_state(whichfork);
2014 struct xfs_mount *mp = ip->i_mount;
2015 struct xfs_bmbt_irec old;
2016
2017 *logflagsp = 0;
2018
2019 cur = *curp;
2020 ifp = xfs_ifork_ptr(ip, whichfork);
2021
2022 ASSERT(!isnullstartblock(new->br_startblock));
2023
2024 XFS_STATS_INC(mp, xs_add_exlist);
2025
2026 #define LEFT r[0]
2027 #define RIGHT r[1]
2028 #define PREV r[2]
2029
2030 /*
2031 * Set up a bunch of variables to make the tests simpler.
2032 */
2033 error = 0;
2034 xfs_iext_get_extent(ifp, icur, &PREV);
2035 ASSERT(new->br_state != PREV.br_state);
2036 new_endoff = new->br_startoff + new->br_blockcount;
2037 ASSERT(PREV.br_startoff <= new->br_startoff);
2038 ASSERT(PREV.br_startoff + PREV.br_blockcount >= new_endoff);
2039
2040 /*
2041 * Set flags determining what part of the previous oldext allocation
2042 * extent is being replaced by a newext allocation.
2043 */
2044 if (PREV.br_startoff == new->br_startoff)
2045 state |= BMAP_LEFT_FILLING;
2046 if (PREV.br_startoff + PREV.br_blockcount == new_endoff)
2047 state |= BMAP_RIGHT_FILLING;
2048
2049 /*
2050 * Check and set flags if this segment has a left neighbor.
2051 * Don't set contiguous if the combined extent would be too large.
2052 */
2053 if (xfs_iext_peek_prev_extent(ifp, icur, &LEFT)) {
2054 state |= BMAP_LEFT_VALID;
2055 if (isnullstartblock(LEFT.br_startblock))
2056 state |= BMAP_LEFT_DELAY;
2057 }
2058
2059 if ((state & BMAP_LEFT_VALID) && !(state & BMAP_LEFT_DELAY) &&
2060 LEFT.br_startoff + LEFT.br_blockcount == new->br_startoff &&
2061 LEFT.br_startblock + LEFT.br_blockcount == new->br_startblock &&
2062 LEFT.br_state == new->br_state &&
2063 LEFT.br_blockcount + new->br_blockcount <= XFS_MAX_BMBT_EXTLEN)
2064 state |= BMAP_LEFT_CONTIG;
2065
2066 /*
2067 * Check and set flags if this segment has a right neighbor.
2068 * Don't set contiguous if the combined extent would be too large.
2069 * Also check for all-three-contiguous being too large.
2070 */
2071 if (xfs_iext_peek_next_extent(ifp, icur, &RIGHT)) {
2072 state |= BMAP_RIGHT_VALID;
2073 if (isnullstartblock(RIGHT.br_startblock))
2074 state |= BMAP_RIGHT_DELAY;
2075 }
2076
2077 if ((state & BMAP_RIGHT_VALID) && !(state & BMAP_RIGHT_DELAY) &&
2078 new_endoff == RIGHT.br_startoff &&
2079 new->br_startblock + new->br_blockcount == RIGHT.br_startblock &&
2080 new->br_state == RIGHT.br_state &&
2081 new->br_blockcount + RIGHT.br_blockcount <= XFS_MAX_BMBT_EXTLEN &&
2082 ((state & (BMAP_LEFT_CONTIG | BMAP_LEFT_FILLING |
2083 BMAP_RIGHT_FILLING)) !=
2084 (BMAP_LEFT_CONTIG | BMAP_LEFT_FILLING |
2085 BMAP_RIGHT_FILLING) ||
2086 LEFT.br_blockcount + new->br_blockcount + RIGHT.br_blockcount
2087 <= XFS_MAX_BMBT_EXTLEN))
2088 state |= BMAP_RIGHT_CONTIG;
2089
2090 /*
2091 * Switch out based on the FILLING and CONTIG state bits.
2092 */
2093 switch (state & (BMAP_LEFT_FILLING | BMAP_LEFT_CONTIG |
2094 BMAP_RIGHT_FILLING | BMAP_RIGHT_CONTIG)) {
2095 case BMAP_LEFT_FILLING | BMAP_LEFT_CONTIG |
2096 BMAP_RIGHT_FILLING | BMAP_RIGHT_CONTIG:
2097 /*
2098 * Setting all of a previous oldext extent to newext.
2099 * The left and right neighbors are both contiguous with new.
2100 */
2101 LEFT.br_blockcount += PREV.br_blockcount + RIGHT.br_blockcount;
2102
2103 xfs_iext_remove(ip, icur, state);
2104 xfs_iext_remove(ip, icur, state);
2105 xfs_iext_prev(ifp, icur);
2106 xfs_iext_update_extent(ip, state, icur, &LEFT);
2107 ifp->if_nextents -= 2;
2108 if (cur == NULL)
2109 rval = XFS_ILOG_CORE | XFS_ILOG_DEXT;
2110 else {
2111 rval = XFS_ILOG_CORE;
2112 error = xfs_bmbt_lookup_eq(cur, &RIGHT, &i);
2113 if (error)
2114 goto done;
2115 if (XFS_IS_CORRUPT(mp, i != 1)) {
2116 xfs_btree_mark_sick(cur);
2117 error = -EFSCORRUPTED;
2118 goto done;
2119 }
2120 if ((error = xfs_btree_delete(cur, &i)))
2121 goto done;
2122 if (XFS_IS_CORRUPT(mp, i != 1)) {
2123 xfs_btree_mark_sick(cur);
2124 error = -EFSCORRUPTED;
2125 goto done;
2126 }
2127 if ((error = xfs_btree_decrement(cur, 0, &i)))
2128 goto done;
2129 if (XFS_IS_CORRUPT(mp, i != 1)) {
2130 xfs_btree_mark_sick(cur);
2131 error = -EFSCORRUPTED;
2132 goto done;
2133 }
2134 if ((error = xfs_btree_delete(cur, &i)))
2135 goto done;
2136 if (XFS_IS_CORRUPT(mp, i != 1)) {
2137 xfs_btree_mark_sick(cur);
2138 error = -EFSCORRUPTED;
2139 goto done;
2140 }
2141 if ((error = xfs_btree_decrement(cur, 0, &i)))
2142 goto done;
2143 if (XFS_IS_CORRUPT(mp, i != 1)) {
2144 xfs_btree_mark_sick(cur);
2145 error = -EFSCORRUPTED;
2146 goto done;
2147 }
2148 error = xfs_bmbt_update(cur, &LEFT);
2149 if (error)
2150 goto done;
2151 }
2152 break;
2153
2154 case BMAP_LEFT_FILLING | BMAP_RIGHT_FILLING | BMAP_LEFT_CONTIG:
2155 /*
2156 * Setting all of a previous oldext extent to newext.
2157 * The left neighbor is contiguous, the right is not.
2158 */
2159 LEFT.br_blockcount += PREV.br_blockcount;
2160
2161 xfs_iext_remove(ip, icur, state);
2162 xfs_iext_prev(ifp, icur);
2163 xfs_iext_update_extent(ip, state, icur, &LEFT);
2164 ifp->if_nextents--;
2165 if (cur == NULL)
2166 rval = XFS_ILOG_CORE | XFS_ILOG_DEXT;
2167 else {
2168 rval = XFS_ILOG_CORE;
2169 error = xfs_bmbt_lookup_eq(cur, &PREV, &i);
2170 if (error)
2171 goto done;
2172 if (XFS_IS_CORRUPT(mp, i != 1)) {
2173 xfs_btree_mark_sick(cur);
2174 error = -EFSCORRUPTED;
2175 goto done;
2176 }
2177 if ((error = xfs_btree_delete(cur, &i)))
2178 goto done;
2179 if (XFS_IS_CORRUPT(mp, i != 1)) {
2180 xfs_btree_mark_sick(cur);
2181 error = -EFSCORRUPTED;
2182 goto done;
2183 }
2184 if ((error = xfs_btree_decrement(cur, 0, &i)))
2185 goto done;
2186 if (XFS_IS_CORRUPT(mp, i != 1)) {
2187 xfs_btree_mark_sick(cur);
2188 error = -EFSCORRUPTED;
2189 goto done;
2190 }
2191 error = xfs_bmbt_update(cur, &LEFT);
2192 if (error)
2193 goto done;
2194 }
2195 break;
2196
2197 case BMAP_LEFT_FILLING | BMAP_RIGHT_FILLING | BMAP_RIGHT_CONTIG:
2198 /*
2199 * Setting all of a previous oldext extent to newext.
2200 * The right neighbor is contiguous, the left is not.
2201 */
2202 PREV.br_blockcount += RIGHT.br_blockcount;
2203 PREV.br_state = new->br_state;
2204
2205 xfs_iext_next(ifp, icur);
2206 xfs_iext_remove(ip, icur, state);
2207 xfs_iext_prev(ifp, icur);
2208 xfs_iext_update_extent(ip, state, icur, &PREV);
2209 ifp->if_nextents--;
2210
2211 if (cur == NULL)
2212 rval = XFS_ILOG_CORE | XFS_ILOG_DEXT;
2213 else {
2214 rval = XFS_ILOG_CORE;
2215 error = xfs_bmbt_lookup_eq(cur, &RIGHT, &i);
2216 if (error)
2217 goto done;
2218 if (XFS_IS_CORRUPT(mp, i != 1)) {
2219 xfs_btree_mark_sick(cur);
2220 error = -EFSCORRUPTED;
2221 goto done;
2222 }
2223 if ((error = xfs_btree_delete(cur, &i)))
2224 goto done;
2225 if (XFS_IS_CORRUPT(mp, i != 1)) {
2226 xfs_btree_mark_sick(cur);
2227 error = -EFSCORRUPTED;
2228 goto done;
2229 }
2230 if ((error = xfs_btree_decrement(cur, 0, &i)))
2231 goto done;
2232 if (XFS_IS_CORRUPT(mp, i != 1)) {
2233 xfs_btree_mark_sick(cur);
2234 error = -EFSCORRUPTED;
2235 goto done;
2236 }
2237 error = xfs_bmbt_update(cur, &PREV);
2238 if (error)
2239 goto done;
2240 }
2241 break;
2242
2243 case BMAP_LEFT_FILLING | BMAP_RIGHT_FILLING:
2244 /*
2245 * Setting all of a previous oldext extent to newext.
2246 * Neither the left nor right neighbors are contiguous with
2247 * the new one.
2248 */
2249 PREV.br_state = new->br_state;
2250 xfs_iext_update_extent(ip, state, icur, &PREV);
2251
2252 if (cur == NULL)
2253 rval = XFS_ILOG_DEXT;
2254 else {
2255 rval = 0;
2256 error = xfs_bmbt_lookup_eq(cur, new, &i);
2257 if (error)
2258 goto done;
2259 if (XFS_IS_CORRUPT(mp, i != 1)) {
2260 xfs_btree_mark_sick(cur);
2261 error = -EFSCORRUPTED;
2262 goto done;
2263 }
2264 error = xfs_bmbt_update(cur, &PREV);
2265 if (error)
2266 goto done;
2267 }
2268 break;
2269
2270 case BMAP_LEFT_FILLING | BMAP_LEFT_CONTIG:
2271 /*
2272 * Setting the first part of a previous oldext extent to newext.
2273 * The left neighbor is contiguous.
2274 */
2275 LEFT.br_blockcount += new->br_blockcount;
2276
2277 old = PREV;
2278 PREV.br_startoff += new->br_blockcount;
2279 PREV.br_startblock += new->br_blockcount;
2280 PREV.br_blockcount -= new->br_blockcount;
2281
2282 xfs_iext_update_extent(ip, state, icur, &PREV);
2283 xfs_iext_prev(ifp, icur);
2284 xfs_iext_update_extent(ip, state, icur, &LEFT);
2285
2286 if (cur == NULL)
2287 rval = XFS_ILOG_DEXT;
2288 else {
2289 rval = 0;
2290 error = xfs_bmbt_lookup_eq(cur, &old, &i);
2291 if (error)
2292 goto done;
2293 if (XFS_IS_CORRUPT(mp, i != 1)) {
2294 xfs_btree_mark_sick(cur);
2295 error = -EFSCORRUPTED;
2296 goto done;
2297 }
2298 error = xfs_bmbt_update(cur, &PREV);
2299 if (error)
2300 goto done;
2301 error = xfs_btree_decrement(cur, 0, &i);
2302 if (error)
2303 goto done;
2304 error = xfs_bmbt_update(cur, &LEFT);
2305 if (error)
2306 goto done;
2307 }
2308 break;
2309
2310 case BMAP_LEFT_FILLING:
2311 /*
2312 * Setting the first part of a previous oldext extent to newext.
2313 * The left neighbor is not contiguous.
2314 */
2315 old = PREV;
2316 PREV.br_startoff += new->br_blockcount;
2317 PREV.br_startblock += new->br_blockcount;
2318 PREV.br_blockcount -= new->br_blockcount;
2319
2320 xfs_iext_update_extent(ip, state, icur, &PREV);
2321 xfs_iext_insert(ip, icur, new, state);
2322 ifp->if_nextents++;
2323
2324 if (cur == NULL)
2325 rval = XFS_ILOG_CORE | XFS_ILOG_DEXT;
2326 else {
2327 rval = XFS_ILOG_CORE;
2328 error = xfs_bmbt_lookup_eq(cur, &old, &i);
2329 if (error)
2330 goto done;
2331 if (XFS_IS_CORRUPT(mp, i != 1)) {
2332 xfs_btree_mark_sick(cur);
2333 error = -EFSCORRUPTED;
2334 goto done;
2335 }
2336 error = xfs_bmbt_update(cur, &PREV);
2337 if (error)
2338 goto done;
2339 cur->bc_rec.b = *new;
2340 if ((error = xfs_btree_insert(cur, &i)))
2341 goto done;
2342 if (XFS_IS_CORRUPT(mp, i != 1)) {
2343 xfs_btree_mark_sick(cur);
2344 error = -EFSCORRUPTED;
2345 goto done;
2346 }
2347 }
2348 break;
2349
2350 case BMAP_RIGHT_FILLING | BMAP_RIGHT_CONTIG:
2351 /*
2352 * Setting the last part of a previous oldext extent to newext.
2353 * The right neighbor is contiguous with the new allocation.
2354 */
2355 old = PREV;
2356 PREV.br_blockcount -= new->br_blockcount;
2357
2358 RIGHT.br_startoff = new->br_startoff;
2359 RIGHT.br_startblock = new->br_startblock;
2360 RIGHT.br_blockcount += new->br_blockcount;
2361
2362 xfs_iext_update_extent(ip, state, icur, &PREV);
2363 xfs_iext_next(ifp, icur);
2364 xfs_iext_update_extent(ip, state, icur, &RIGHT);
2365
2366 if (cur == NULL)
2367 rval = XFS_ILOG_DEXT;
2368 else {
2369 rval = 0;
2370 error = xfs_bmbt_lookup_eq(cur, &old, &i);
2371 if (error)
2372 goto done;
2373 if (XFS_IS_CORRUPT(mp, i != 1)) {
2374 xfs_btree_mark_sick(cur);
2375 error = -EFSCORRUPTED;
2376 goto done;
2377 }
2378 error = xfs_bmbt_update(cur, &PREV);
2379 if (error)
2380 goto done;
2381 error = xfs_btree_increment(cur, 0, &i);
2382 if (error)
2383 goto done;
2384 error = xfs_bmbt_update(cur, &RIGHT);
2385 if (error)
2386 goto done;
2387 }
2388 break;
2389
2390 case BMAP_RIGHT_FILLING:
2391 /*
2392 * Setting the last part of a previous oldext extent to newext.
2393 * The right neighbor is not contiguous.
2394 */
2395 old = PREV;
2396 PREV.br_blockcount -= new->br_blockcount;
2397
2398 xfs_iext_update_extent(ip, state, icur, &PREV);
2399 xfs_iext_next(ifp, icur);
2400 xfs_iext_insert(ip, icur, new, state);
2401 ifp->if_nextents++;
2402
2403 if (cur == NULL)
2404 rval = XFS_ILOG_CORE | XFS_ILOG_DEXT;
2405 else {
2406 rval = XFS_ILOG_CORE;
2407 error = xfs_bmbt_lookup_eq(cur, &old, &i);
2408 if (error)
2409 goto done;
2410 if (XFS_IS_CORRUPT(mp, i != 1)) {
2411 xfs_btree_mark_sick(cur);
2412 error = -EFSCORRUPTED;
2413 goto done;
2414 }
2415 error = xfs_bmbt_update(cur, &PREV);
2416 if (error)
2417 goto done;
2418 error = xfs_bmbt_lookup_eq(cur, new, &i);
2419 if (error)
2420 goto done;
2421 if (XFS_IS_CORRUPT(mp, i != 0)) {
2422 xfs_btree_mark_sick(cur);
2423 error = -EFSCORRUPTED;
2424 goto done;
2425 }
2426 if ((error = xfs_btree_insert(cur, &i)))
2427 goto done;
2428 if (XFS_IS_CORRUPT(mp, i != 1)) {
2429 xfs_btree_mark_sick(cur);
2430 error = -EFSCORRUPTED;
2431 goto done;
2432 }
2433 }
2434 break;
2435
2436 case 0:
2437 /*
2438 * Setting the middle part of a previous oldext extent to
2439 * newext. Contiguity is impossible here.
2440 * One extent becomes three extents.
2441 */
2442 old = PREV;
2443 PREV.br_blockcount = new->br_startoff - PREV.br_startoff;
2444
2445 r[0] = *new;
2446 r[1].br_startoff = new_endoff;
2447 r[1].br_blockcount =
2448 old.br_startoff + old.br_blockcount - new_endoff;
2449 r[1].br_startblock = new->br_startblock + new->br_blockcount;
2450 r[1].br_state = PREV.br_state;
2451
2452 xfs_iext_update_extent(ip, state, icur, &PREV);
2453 xfs_iext_next(ifp, icur);
2454 xfs_iext_insert(ip, icur, &r[1], state);
2455 xfs_iext_insert(ip, icur, &r[0], state);
2456 ifp->if_nextents += 2;
2457
2458 if (cur == NULL)
2459 rval = XFS_ILOG_CORE | XFS_ILOG_DEXT;
2460 else {
2461 rval = XFS_ILOG_CORE;
2462 error = xfs_bmbt_lookup_eq(cur, &old, &i);
2463 if (error)
2464 goto done;
2465 if (XFS_IS_CORRUPT(mp, i != 1)) {
2466 xfs_btree_mark_sick(cur);
2467 error = -EFSCORRUPTED;
2468 goto done;
2469 }
2470 /* new right extent - oldext */
2471 error = xfs_bmbt_update(cur, &r[1]);
2472 if (error)
2473 goto done;
2474 /* new left extent - oldext */
2475 cur->bc_rec.b = PREV;
2476 if ((error = xfs_btree_insert(cur, &i)))
2477 goto done;
2478 if (XFS_IS_CORRUPT(mp, i != 1)) {
2479 xfs_btree_mark_sick(cur);
2480 error = -EFSCORRUPTED;
2481 goto done;
2482 }
2483 /*
2484 * Reset the cursor to the position of the new extent
2485 * we are about to insert as we can't trust it after
2486 * the previous insert.
2487 */
2488 error = xfs_bmbt_lookup_eq(cur, new, &i);
2489 if (error)
2490 goto done;
2491 if (XFS_IS_CORRUPT(mp, i != 0)) {
2492 xfs_btree_mark_sick(cur);
2493 error = -EFSCORRUPTED;
2494 goto done;
2495 }
2496 /* new middle extent - newext */
2497 if ((error = xfs_btree_insert(cur, &i)))
2498 goto done;
2499 if (XFS_IS_CORRUPT(mp, i != 1)) {
2500 xfs_btree_mark_sick(cur);
2501 error = -EFSCORRUPTED;
2502 goto done;
2503 }
2504 }
2505 break;
2506
2507 case BMAP_LEFT_FILLING | BMAP_LEFT_CONTIG | BMAP_RIGHT_CONTIG:
2508 case BMAP_RIGHT_FILLING | BMAP_LEFT_CONTIG | BMAP_RIGHT_CONTIG:
2509 case BMAP_LEFT_FILLING | BMAP_RIGHT_CONTIG:
2510 case BMAP_RIGHT_FILLING | BMAP_LEFT_CONTIG:
2511 case BMAP_LEFT_CONTIG | BMAP_RIGHT_CONTIG:
2512 case BMAP_LEFT_CONTIG:
2513 case BMAP_RIGHT_CONTIG:
2514 /*
2515 * These cases are all impossible.
2516 */
2517 ASSERT(0);
2518 }
2519
2520 /* update reverse mappings */
2521 xfs_rmap_convert_extent(mp, tp, ip, whichfork, new);
2522
2523 /* convert to a btree if necessary */
2524 if (xfs_bmap_needs_btree(ip, whichfork)) {
2525 int tmp_logflags; /* partial log flag return val */
2526
2527 ASSERT(cur == NULL);
2528 error = xfs_bmap_extents_to_btree(tp, ip, &cur, 0,
2529 &tmp_logflags, whichfork);
2530 *logflagsp |= tmp_logflags;
2531 if (error)
2532 goto done;
2533 }
2534
2535 /* clear out the allocated field, done with it now in any case. */
2536 if (cur) {
2537 cur->bc_bmap.allocated = 0;
2538 *curp = cur;
2539 }
2540
2541 xfs_bmap_check_leaf_extents(*curp, ip, whichfork);
2542 done:
2543 *logflagsp |= rval;
2544 return error;
2545 #undef LEFT
2546 #undef RIGHT
2547 #undef PREV
2548 }
2549
2550 /*
2551 * Convert a hole to a delayed allocation.
2552 */
2553 STATIC void
xfs_bmap_add_extent_hole_delay(xfs_inode_t * ip,int whichfork,struct xfs_iext_cursor * icur,xfs_bmbt_irec_t * new)2554 xfs_bmap_add_extent_hole_delay(
2555 xfs_inode_t *ip, /* incore inode pointer */
2556 int whichfork,
2557 struct xfs_iext_cursor *icur,
2558 xfs_bmbt_irec_t *new) /* new data to add to file extents */
2559 {
2560 struct xfs_ifork *ifp; /* inode fork pointer */
2561 xfs_bmbt_irec_t left; /* left neighbor extent entry */
2562 xfs_filblks_t newlen=0; /* new indirect size */
2563 xfs_filblks_t oldlen=0; /* old indirect size */
2564 xfs_bmbt_irec_t right; /* right neighbor extent entry */
2565 uint32_t state = xfs_bmap_fork_to_state(whichfork);
2566 xfs_filblks_t temp; /* temp for indirect calculations */
2567
2568 ifp = xfs_ifork_ptr(ip, whichfork);
2569 ASSERT(isnullstartblock(new->br_startblock));
2570
2571 /*
2572 * Check and set flags if this segment has a left neighbor
2573 */
2574 if (xfs_iext_peek_prev_extent(ifp, icur, &left)) {
2575 state |= BMAP_LEFT_VALID;
2576 if (isnullstartblock(left.br_startblock))
2577 state |= BMAP_LEFT_DELAY;
2578 }
2579
2580 /*
2581 * Check and set flags if the current (right) segment exists.
2582 * If it doesn't exist, we're converting the hole at end-of-file.
2583 */
2584 if (xfs_iext_get_extent(ifp, icur, &right)) {
2585 state |= BMAP_RIGHT_VALID;
2586 if (isnullstartblock(right.br_startblock))
2587 state |= BMAP_RIGHT_DELAY;
2588 }
2589
2590 /*
2591 * Set contiguity flags on the left and right neighbors.
2592 * Don't let extents get too large, even if the pieces are contiguous.
2593 */
2594 if ((state & BMAP_LEFT_VALID) && (state & BMAP_LEFT_DELAY) &&
2595 left.br_startoff + left.br_blockcount == new->br_startoff &&
2596 left.br_blockcount + new->br_blockcount <= XFS_MAX_BMBT_EXTLEN)
2597 state |= BMAP_LEFT_CONTIG;
2598
2599 if ((state & BMAP_RIGHT_VALID) && (state & BMAP_RIGHT_DELAY) &&
2600 new->br_startoff + new->br_blockcount == right.br_startoff &&
2601 new->br_blockcount + right.br_blockcount <= XFS_MAX_BMBT_EXTLEN &&
2602 (!(state & BMAP_LEFT_CONTIG) ||
2603 (left.br_blockcount + new->br_blockcount +
2604 right.br_blockcount <= XFS_MAX_BMBT_EXTLEN)))
2605 state |= BMAP_RIGHT_CONTIG;
2606
2607 /*
2608 * Switch out based on the contiguity flags.
2609 */
2610 switch (state & (BMAP_LEFT_CONTIG | BMAP_RIGHT_CONTIG)) {
2611 case BMAP_LEFT_CONTIG | BMAP_RIGHT_CONTIG:
2612 /*
2613 * New allocation is contiguous with delayed allocations
2614 * on the left and on the right.
2615 * Merge all three into a single extent record.
2616 */
2617 temp = left.br_blockcount + new->br_blockcount +
2618 right.br_blockcount;
2619
2620 oldlen = startblockval(left.br_startblock) +
2621 startblockval(new->br_startblock) +
2622 startblockval(right.br_startblock);
2623 newlen = XFS_FILBLKS_MIN(xfs_bmap_worst_indlen(ip, temp),
2624 oldlen);
2625 left.br_startblock = nullstartblock(newlen);
2626 left.br_blockcount = temp;
2627
2628 xfs_iext_remove(ip, icur, state);
2629 xfs_iext_prev(ifp, icur);
2630 xfs_iext_update_extent(ip, state, icur, &left);
2631 break;
2632
2633 case BMAP_LEFT_CONTIG:
2634 /*
2635 * New allocation is contiguous with a delayed allocation
2636 * on the left.
2637 * Merge the new allocation with the left neighbor.
2638 */
2639 temp = left.br_blockcount + new->br_blockcount;
2640
2641 oldlen = startblockval(left.br_startblock) +
2642 startblockval(new->br_startblock);
2643 newlen = XFS_FILBLKS_MIN(xfs_bmap_worst_indlen(ip, temp),
2644 oldlen);
2645 left.br_blockcount = temp;
2646 left.br_startblock = nullstartblock(newlen);
2647
2648 xfs_iext_prev(ifp, icur);
2649 xfs_iext_update_extent(ip, state, icur, &left);
2650 break;
2651
2652 case BMAP_RIGHT_CONTIG:
2653 /*
2654 * New allocation is contiguous with a delayed allocation
2655 * on the right.
2656 * Merge the new allocation with the right neighbor.
2657 */
2658 temp = new->br_blockcount + right.br_blockcount;
2659 oldlen = startblockval(new->br_startblock) +
2660 startblockval(right.br_startblock);
2661 newlen = XFS_FILBLKS_MIN(xfs_bmap_worst_indlen(ip, temp),
2662 oldlen);
2663 right.br_startoff = new->br_startoff;
2664 right.br_startblock = nullstartblock(newlen);
2665 right.br_blockcount = temp;
2666 xfs_iext_update_extent(ip, state, icur, &right);
2667 break;
2668
2669 case 0:
2670 /*
2671 * New allocation is not contiguous with another
2672 * delayed allocation.
2673 * Insert a new entry.
2674 */
2675 oldlen = newlen = 0;
2676 xfs_iext_insert(ip, icur, new, state);
2677 break;
2678 }
2679 if (oldlen != newlen) {
2680 ASSERT(oldlen > newlen);
2681 xfs_add_fdblocks(ip->i_mount, oldlen - newlen);
2682
2683 /*
2684 * Nothing to do for disk quota accounting here.
2685 */
2686 xfs_mod_delalloc(ip, 0, (int64_t)newlen - oldlen);
2687 }
2688 }
2689
2690 /*
2691 * Convert a hole to a real allocation.
2692 */
2693 STATIC int /* error */
xfs_bmap_add_extent_hole_real(struct xfs_trans * tp,struct xfs_inode * ip,int whichfork,struct xfs_iext_cursor * icur,struct xfs_btree_cur ** curp,struct xfs_bmbt_irec * new,int * logflagsp,uint32_t flags)2694 xfs_bmap_add_extent_hole_real(
2695 struct xfs_trans *tp,
2696 struct xfs_inode *ip,
2697 int whichfork,
2698 struct xfs_iext_cursor *icur,
2699 struct xfs_btree_cur **curp,
2700 struct xfs_bmbt_irec *new,
2701 int *logflagsp,
2702 uint32_t flags)
2703 {
2704 struct xfs_ifork *ifp = xfs_ifork_ptr(ip, whichfork);
2705 struct xfs_mount *mp = ip->i_mount;
2706 struct xfs_btree_cur *cur = *curp;
2707 int error; /* error return value */
2708 int i; /* temp state */
2709 xfs_bmbt_irec_t left; /* left neighbor extent entry */
2710 xfs_bmbt_irec_t right; /* right neighbor extent entry */
2711 int rval=0; /* return value (logging flags) */
2712 uint32_t state = xfs_bmap_fork_to_state(whichfork);
2713 struct xfs_bmbt_irec old;
2714
2715 ASSERT(!isnullstartblock(new->br_startblock));
2716 ASSERT(!cur || !(cur->bc_flags & XFS_BTREE_BMBT_WASDEL));
2717
2718 XFS_STATS_INC(mp, xs_add_exlist);
2719
2720 /*
2721 * Check and set flags if this segment has a left neighbor.
2722 */
2723 if (xfs_iext_peek_prev_extent(ifp, icur, &left)) {
2724 state |= BMAP_LEFT_VALID;
2725 if (isnullstartblock(left.br_startblock))
2726 state |= BMAP_LEFT_DELAY;
2727 }
2728
2729 /*
2730 * Check and set flags if this segment has a current value.
2731 * Not true if we're inserting into the "hole" at eof.
2732 */
2733 if (xfs_iext_get_extent(ifp, icur, &right)) {
2734 state |= BMAP_RIGHT_VALID;
2735 if (isnullstartblock(right.br_startblock))
2736 state |= BMAP_RIGHT_DELAY;
2737 }
2738
2739 /*
2740 * We're inserting a real allocation between "left" and "right".
2741 * Set the contiguity flags. Don't let extents get too large.
2742 */
2743 if ((state & BMAP_LEFT_VALID) && !(state & BMAP_LEFT_DELAY) &&
2744 left.br_startoff + left.br_blockcount == new->br_startoff &&
2745 left.br_startblock + left.br_blockcount == new->br_startblock &&
2746 left.br_state == new->br_state &&
2747 left.br_blockcount + new->br_blockcount <= XFS_MAX_BMBT_EXTLEN)
2748 state |= BMAP_LEFT_CONTIG;
2749
2750 if ((state & BMAP_RIGHT_VALID) && !(state & BMAP_RIGHT_DELAY) &&
2751 new->br_startoff + new->br_blockcount == right.br_startoff &&
2752 new->br_startblock + new->br_blockcount == right.br_startblock &&
2753 new->br_state == right.br_state &&
2754 new->br_blockcount + right.br_blockcount <= XFS_MAX_BMBT_EXTLEN &&
2755 (!(state & BMAP_LEFT_CONTIG) ||
2756 left.br_blockcount + new->br_blockcount +
2757 right.br_blockcount <= XFS_MAX_BMBT_EXTLEN))
2758 state |= BMAP_RIGHT_CONTIG;
2759
2760 error = 0;
2761 /*
2762 * Select which case we're in here, and implement it.
2763 */
2764 switch (state & (BMAP_LEFT_CONTIG | BMAP_RIGHT_CONTIG)) {
2765 case BMAP_LEFT_CONTIG | BMAP_RIGHT_CONTIG:
2766 /*
2767 * New allocation is contiguous with real allocations on the
2768 * left and on the right.
2769 * Merge all three into a single extent record.
2770 */
2771 left.br_blockcount += new->br_blockcount + right.br_blockcount;
2772
2773 xfs_iext_remove(ip, icur, state);
2774 xfs_iext_prev(ifp, icur);
2775 xfs_iext_update_extent(ip, state, icur, &left);
2776 ifp->if_nextents--;
2777
2778 if (cur == NULL) {
2779 rval = XFS_ILOG_CORE | xfs_ilog_fext(whichfork);
2780 } else {
2781 rval = XFS_ILOG_CORE;
2782 error = xfs_bmbt_lookup_eq(cur, &right, &i);
2783 if (error)
2784 goto done;
2785 if (XFS_IS_CORRUPT(mp, i != 1)) {
2786 xfs_btree_mark_sick(cur);
2787 error = -EFSCORRUPTED;
2788 goto done;
2789 }
2790 error = xfs_btree_delete(cur, &i);
2791 if (error)
2792 goto done;
2793 if (XFS_IS_CORRUPT(mp, i != 1)) {
2794 xfs_btree_mark_sick(cur);
2795 error = -EFSCORRUPTED;
2796 goto done;
2797 }
2798 error = xfs_btree_decrement(cur, 0, &i);
2799 if (error)
2800 goto done;
2801 if (XFS_IS_CORRUPT(mp, i != 1)) {
2802 xfs_btree_mark_sick(cur);
2803 error = -EFSCORRUPTED;
2804 goto done;
2805 }
2806 error = xfs_bmbt_update(cur, &left);
2807 if (error)
2808 goto done;
2809 }
2810 break;
2811
2812 case BMAP_LEFT_CONTIG:
2813 /*
2814 * New allocation is contiguous with a real allocation
2815 * on the left.
2816 * Merge the new allocation with the left neighbor.
2817 */
2818 old = left;
2819 left.br_blockcount += new->br_blockcount;
2820
2821 xfs_iext_prev(ifp, icur);
2822 xfs_iext_update_extent(ip, state, icur, &left);
2823
2824 if (cur == NULL) {
2825 rval = xfs_ilog_fext(whichfork);
2826 } else {
2827 rval = 0;
2828 error = xfs_bmbt_lookup_eq(cur, &old, &i);
2829 if (error)
2830 goto done;
2831 if (XFS_IS_CORRUPT(mp, i != 1)) {
2832 xfs_btree_mark_sick(cur);
2833 error = -EFSCORRUPTED;
2834 goto done;
2835 }
2836 error = xfs_bmbt_update(cur, &left);
2837 if (error)
2838 goto done;
2839 }
2840 break;
2841
2842 case BMAP_RIGHT_CONTIG:
2843 /*
2844 * New allocation is contiguous with a real allocation
2845 * on the right.
2846 * Merge the new allocation with the right neighbor.
2847 */
2848 old = right;
2849
2850 right.br_startoff = new->br_startoff;
2851 right.br_startblock = new->br_startblock;
2852 right.br_blockcount += new->br_blockcount;
2853 xfs_iext_update_extent(ip, state, icur, &right);
2854
2855 if (cur == NULL) {
2856 rval = xfs_ilog_fext(whichfork);
2857 } else {
2858 rval = 0;
2859 error = xfs_bmbt_lookup_eq(cur, &old, &i);
2860 if (error)
2861 goto done;
2862 if (XFS_IS_CORRUPT(mp, i != 1)) {
2863 xfs_btree_mark_sick(cur);
2864 error = -EFSCORRUPTED;
2865 goto done;
2866 }
2867 error = xfs_bmbt_update(cur, &right);
2868 if (error)
2869 goto done;
2870 }
2871 break;
2872
2873 case 0:
2874 /*
2875 * New allocation is not contiguous with another
2876 * real allocation.
2877 * Insert a new entry.
2878 */
2879 xfs_iext_insert(ip, icur, new, state);
2880 ifp->if_nextents++;
2881
2882 if (cur == NULL) {
2883 rval = XFS_ILOG_CORE | xfs_ilog_fext(whichfork);
2884 } else {
2885 rval = XFS_ILOG_CORE;
2886 error = xfs_bmbt_lookup_eq(cur, new, &i);
2887 if (error)
2888 goto done;
2889 if (XFS_IS_CORRUPT(mp, i != 0)) {
2890 xfs_btree_mark_sick(cur);
2891 error = -EFSCORRUPTED;
2892 goto done;
2893 }
2894 error = xfs_btree_insert(cur, &i);
2895 if (error)
2896 goto done;
2897 if (XFS_IS_CORRUPT(mp, i != 1)) {
2898 xfs_btree_mark_sick(cur);
2899 error = -EFSCORRUPTED;
2900 goto done;
2901 }
2902 }
2903 break;
2904 }
2905
2906 /* add reverse mapping unless caller opted out */
2907 if (!(flags & XFS_BMAPI_NORMAP))
2908 xfs_rmap_map_extent(tp, ip, whichfork, new);
2909
2910 /* convert to a btree if necessary */
2911 if (xfs_bmap_needs_btree(ip, whichfork)) {
2912 int tmp_logflags; /* partial log flag return val */
2913
2914 ASSERT(cur == NULL);
2915 error = xfs_bmap_extents_to_btree(tp, ip, curp, 0,
2916 &tmp_logflags, whichfork);
2917 *logflagsp |= tmp_logflags;
2918 cur = *curp;
2919 if (error)
2920 goto done;
2921 }
2922
2923 /* clear out the allocated field, done with it now in any case. */
2924 if (cur)
2925 cur->bc_bmap.allocated = 0;
2926
2927 xfs_bmap_check_leaf_extents(cur, ip, whichfork);
2928 done:
2929 *logflagsp |= rval;
2930 return error;
2931 }
2932
2933 /*
2934 * Functions used in the extent read, allocate and remove paths
2935 */
2936
2937 /*
2938 * Adjust the size of the new extent based on i_extsize and rt extsize.
2939 */
2940 int
xfs_bmap_extsize_align(xfs_mount_t * mp,xfs_bmbt_irec_t * gotp,xfs_bmbt_irec_t * prevp,xfs_extlen_t extsz,int rt,int eof,int delay,int convert,xfs_fileoff_t * offp,xfs_extlen_t * lenp)2941 xfs_bmap_extsize_align(
2942 xfs_mount_t *mp,
2943 xfs_bmbt_irec_t *gotp, /* next extent pointer */
2944 xfs_bmbt_irec_t *prevp, /* previous extent pointer */
2945 xfs_extlen_t extsz, /* align to this extent size */
2946 int rt, /* is this a realtime inode? */
2947 int eof, /* is extent at end-of-file? */
2948 int delay, /* creating delalloc extent? */
2949 int convert, /* overwriting unwritten extent? */
2950 xfs_fileoff_t *offp, /* in/out: aligned offset */
2951 xfs_extlen_t *lenp) /* in/out: aligned length */
2952 {
2953 xfs_fileoff_t orig_off; /* original offset */
2954 xfs_extlen_t orig_alen; /* original length */
2955 xfs_fileoff_t orig_end; /* original off+len */
2956 xfs_fileoff_t nexto; /* next file offset */
2957 xfs_fileoff_t prevo; /* previous file offset */
2958 xfs_fileoff_t align_off; /* temp for offset */
2959 xfs_extlen_t align_alen; /* temp for length */
2960 xfs_extlen_t temp; /* temp for calculations */
2961
2962 if (convert)
2963 return 0;
2964
2965 orig_off = align_off = *offp;
2966 orig_alen = align_alen = *lenp;
2967 orig_end = orig_off + orig_alen;
2968
2969 /*
2970 * If this request overlaps an existing extent, then don't
2971 * attempt to perform any additional alignment.
2972 */
2973 if (!delay && !eof &&
2974 (orig_off >= gotp->br_startoff) &&
2975 (orig_end <= gotp->br_startoff + gotp->br_blockcount)) {
2976 return 0;
2977 }
2978
2979 /*
2980 * If the file offset is unaligned vs. the extent size
2981 * we need to align it. This will be possible unless
2982 * the file was previously written with a kernel that didn't
2983 * perform this alignment, or if a truncate shot us in the
2984 * foot.
2985 */
2986 div_u64_rem(orig_off, extsz, &temp);
2987 if (temp) {
2988 align_alen += temp;
2989 align_off -= temp;
2990 }
2991
2992 /* Same adjustment for the end of the requested area. */
2993 temp = (align_alen % extsz);
2994 if (temp)
2995 align_alen += extsz - temp;
2996
2997 /*
2998 * For large extent hint sizes, the aligned extent might be larger than
2999 * XFS_BMBT_MAX_EXTLEN. In that case, reduce the size by an extsz so
3000 * that it pulls the length back under XFS_BMBT_MAX_EXTLEN. The outer
3001 * allocation loops handle short allocation just fine, so it is safe to
3002 * do this. We only want to do it when we are forced to, though, because
3003 * it means more allocation operations are required.
3004 */
3005 while (align_alen > XFS_MAX_BMBT_EXTLEN)
3006 align_alen -= extsz;
3007 ASSERT(align_alen <= XFS_MAX_BMBT_EXTLEN);
3008
3009 /*
3010 * If the previous block overlaps with this proposed allocation
3011 * then move the start forward without adjusting the length.
3012 */
3013 if (prevp->br_startoff != NULLFILEOFF) {
3014 if (prevp->br_startblock == HOLESTARTBLOCK)
3015 prevo = prevp->br_startoff;
3016 else
3017 prevo = prevp->br_startoff + prevp->br_blockcount;
3018 } else
3019 prevo = 0;
3020 if (align_off != orig_off && align_off < prevo)
3021 align_off = prevo;
3022 /*
3023 * If the next block overlaps with this proposed allocation
3024 * then move the start back without adjusting the length,
3025 * but not before offset 0.
3026 * This may of course make the start overlap previous block,
3027 * and if we hit the offset 0 limit then the next block
3028 * can still overlap too.
3029 */
3030 if (!eof && gotp->br_startoff != NULLFILEOFF) {
3031 if ((delay && gotp->br_startblock == HOLESTARTBLOCK) ||
3032 (!delay && gotp->br_startblock == DELAYSTARTBLOCK))
3033 nexto = gotp->br_startoff + gotp->br_blockcount;
3034 else
3035 nexto = gotp->br_startoff;
3036 } else
3037 nexto = NULLFILEOFF;
3038 if (!eof &&
3039 align_off + align_alen != orig_end &&
3040 align_off + align_alen > nexto)
3041 align_off = nexto > align_alen ? nexto - align_alen : 0;
3042 /*
3043 * If we're now overlapping the next or previous extent that
3044 * means we can't fit an extsz piece in this hole. Just move
3045 * the start forward to the first valid spot and set
3046 * the length so we hit the end.
3047 */
3048 if (align_off != orig_off && align_off < prevo)
3049 align_off = prevo;
3050 if (align_off + align_alen != orig_end &&
3051 align_off + align_alen > nexto &&
3052 nexto != NULLFILEOFF) {
3053 ASSERT(nexto > prevo);
3054 align_alen = nexto - align_off;
3055 }
3056
3057 /*
3058 * If realtime, and the result isn't a multiple of the realtime
3059 * extent size we need to remove blocks until it is.
3060 */
3061 if (rt && (temp = xfs_extlen_to_rtxmod(mp, align_alen))) {
3062 /*
3063 * We're not covering the original request, or
3064 * we won't be able to once we fix the length.
3065 */
3066 if (orig_off < align_off ||
3067 orig_end > align_off + align_alen ||
3068 align_alen - temp < orig_alen)
3069 return -EINVAL;
3070 /*
3071 * Try to fix it by moving the start up.
3072 */
3073 if (align_off + temp <= orig_off) {
3074 align_alen -= temp;
3075 align_off += temp;
3076 }
3077 /*
3078 * Try to fix it by moving the end in.
3079 */
3080 else if (align_off + align_alen - temp >= orig_end)
3081 align_alen -= temp;
3082 /*
3083 * Set the start to the minimum then trim the length.
3084 */
3085 else {
3086 align_alen -= orig_off - align_off;
3087 align_off = orig_off;
3088 align_alen -= xfs_extlen_to_rtxmod(mp, align_alen);
3089 }
3090 /*
3091 * Result doesn't cover the request, fail it.
3092 */
3093 if (orig_off < align_off || orig_end > align_off + align_alen)
3094 return -EINVAL;
3095 } else {
3096 ASSERT(orig_off >= align_off);
3097 /* see XFS_BMBT_MAX_EXTLEN handling above */
3098 ASSERT(orig_end <= align_off + align_alen ||
3099 align_alen + extsz > XFS_MAX_BMBT_EXTLEN);
3100 }
3101
3102 #ifdef DEBUG
3103 if (!eof && gotp->br_startoff != NULLFILEOFF)
3104 ASSERT(align_off + align_alen <= gotp->br_startoff);
3105 if (prevp->br_startoff != NULLFILEOFF)
3106 ASSERT(align_off >= prevp->br_startoff + prevp->br_blockcount);
3107 #endif
3108
3109 *lenp = align_alen;
3110 *offp = align_off;
3111 return 0;
3112 }
3113
3114 #define XFS_ALLOC_GAP_UNITS 4
3115
3116 /* returns true if ap->blkno was modified */
3117 bool
xfs_bmap_adjacent(struct xfs_bmalloca * ap)3118 xfs_bmap_adjacent(
3119 struct xfs_bmalloca *ap) /* bmap alloc argument struct */
3120 {
3121 xfs_fsblock_t adjust; /* adjustment to block numbers */
3122 xfs_mount_t *mp; /* mount point structure */
3123 int rt; /* true if inode is realtime */
3124
3125 #define ISVALID(x,y) \
3126 (rt ? \
3127 (x) < mp->m_sb.sb_rblocks : \
3128 XFS_FSB_TO_AGNO(mp, x) == XFS_FSB_TO_AGNO(mp, y) && \
3129 XFS_FSB_TO_AGNO(mp, x) < mp->m_sb.sb_agcount && \
3130 XFS_FSB_TO_AGBNO(mp, x) < mp->m_sb.sb_agblocks)
3131
3132 mp = ap->ip->i_mount;
3133 rt = XFS_IS_REALTIME_INODE(ap->ip) &&
3134 (ap->datatype & XFS_ALLOC_USERDATA);
3135 /*
3136 * If allocating at eof, and there's a previous real block,
3137 * try to use its last block as our starting point.
3138 */
3139 if (ap->eof && ap->prev.br_startoff != NULLFILEOFF &&
3140 !isnullstartblock(ap->prev.br_startblock) &&
3141 ISVALID(ap->prev.br_startblock + ap->prev.br_blockcount,
3142 ap->prev.br_startblock)) {
3143 ap->blkno = ap->prev.br_startblock + ap->prev.br_blockcount;
3144 /*
3145 * Adjust for the gap between prevp and us.
3146 */
3147 adjust = ap->offset -
3148 (ap->prev.br_startoff + ap->prev.br_blockcount);
3149 if (adjust &&
3150 ISVALID(ap->blkno + adjust, ap->prev.br_startblock))
3151 ap->blkno += adjust;
3152 return true;
3153 }
3154 /*
3155 * If not at eof, then compare the two neighbor blocks.
3156 * Figure out whether either one gives us a good starting point,
3157 * and pick the better one.
3158 */
3159 if (!ap->eof) {
3160 xfs_fsblock_t gotbno; /* right side block number */
3161 xfs_fsblock_t gotdiff=0; /* right side difference */
3162 xfs_fsblock_t prevbno; /* left side block number */
3163 xfs_fsblock_t prevdiff=0; /* left side difference */
3164
3165 /*
3166 * If there's a previous (left) block, select a requested
3167 * start block based on it.
3168 */
3169 if (ap->prev.br_startoff != NULLFILEOFF &&
3170 !isnullstartblock(ap->prev.br_startblock) &&
3171 (prevbno = ap->prev.br_startblock +
3172 ap->prev.br_blockcount) &&
3173 ISVALID(prevbno, ap->prev.br_startblock)) {
3174 /*
3175 * Calculate gap to end of previous block.
3176 */
3177 adjust = prevdiff = ap->offset -
3178 (ap->prev.br_startoff +
3179 ap->prev.br_blockcount);
3180 /*
3181 * Figure the startblock based on the previous block's
3182 * end and the gap size.
3183 * Heuristic!
3184 * If the gap is large relative to the piece we're
3185 * allocating, or using it gives us an invalid block
3186 * number, then just use the end of the previous block.
3187 */
3188 if (prevdiff <= XFS_ALLOC_GAP_UNITS * ap->length &&
3189 ISVALID(prevbno + prevdiff,
3190 ap->prev.br_startblock))
3191 prevbno += adjust;
3192 else
3193 prevdiff += adjust;
3194 }
3195 /*
3196 * No previous block or can't follow it, just default.
3197 */
3198 else
3199 prevbno = NULLFSBLOCK;
3200 /*
3201 * If there's a following (right) block, select a requested
3202 * start block based on it.
3203 */
3204 if (!isnullstartblock(ap->got.br_startblock)) {
3205 /*
3206 * Calculate gap to start of next block.
3207 */
3208 adjust = gotdiff = ap->got.br_startoff - ap->offset;
3209 /*
3210 * Figure the startblock based on the next block's
3211 * start and the gap size.
3212 */
3213 gotbno = ap->got.br_startblock;
3214 /*
3215 * Heuristic!
3216 * If the gap is large relative to the piece we're
3217 * allocating, or using it gives us an invalid block
3218 * number, then just use the start of the next block
3219 * offset by our length.
3220 */
3221 if (gotdiff <= XFS_ALLOC_GAP_UNITS * ap->length &&
3222 ISVALID(gotbno - gotdiff, gotbno))
3223 gotbno -= adjust;
3224 else if (ISVALID(gotbno - ap->length, gotbno)) {
3225 gotbno -= ap->length;
3226 gotdiff += adjust - ap->length;
3227 } else
3228 gotdiff += adjust;
3229 }
3230 /*
3231 * No next block, just default.
3232 */
3233 else
3234 gotbno = NULLFSBLOCK;
3235 /*
3236 * If both valid, pick the better one, else the only good
3237 * one, else ap->blkno is already set (to 0 or the inode block).
3238 */
3239 if (prevbno != NULLFSBLOCK && gotbno != NULLFSBLOCK) {
3240 ap->blkno = prevdiff <= gotdiff ? prevbno : gotbno;
3241 return true;
3242 }
3243 if (prevbno != NULLFSBLOCK) {
3244 ap->blkno = prevbno;
3245 return true;
3246 }
3247 if (gotbno != NULLFSBLOCK) {
3248 ap->blkno = gotbno;
3249 return true;
3250 }
3251 }
3252 #undef ISVALID
3253 return false;
3254 }
3255
3256 int
xfs_bmap_longest_free_extent(struct xfs_perag * pag,struct xfs_trans * tp,xfs_extlen_t * blen)3257 xfs_bmap_longest_free_extent(
3258 struct xfs_perag *pag,
3259 struct xfs_trans *tp,
3260 xfs_extlen_t *blen)
3261 {
3262 xfs_extlen_t longest;
3263 int error = 0;
3264
3265 if (!xfs_perag_initialised_agf(pag)) {
3266 error = xfs_alloc_read_agf(pag, tp, XFS_ALLOC_FLAG_TRYLOCK,
3267 NULL);
3268 if (error)
3269 return error;
3270 }
3271
3272 longest = xfs_alloc_longest_free_extent(pag,
3273 xfs_alloc_min_freelist(pag->pag_mount, pag),
3274 xfs_ag_resv_needed(pag, XFS_AG_RESV_NONE));
3275 if (*blen < longest)
3276 *blen = longest;
3277
3278 return 0;
3279 }
3280
3281 static xfs_extlen_t
xfs_bmap_select_minlen(struct xfs_bmalloca * ap,struct xfs_alloc_arg * args,xfs_extlen_t blen)3282 xfs_bmap_select_minlen(
3283 struct xfs_bmalloca *ap,
3284 struct xfs_alloc_arg *args,
3285 xfs_extlen_t blen)
3286 {
3287
3288 /*
3289 * Since we used XFS_ALLOC_FLAG_TRYLOCK in _longest_free_extent(), it is
3290 * possible that there is enough contiguous free space for this request.
3291 */
3292 if (blen < ap->minlen)
3293 return ap->minlen;
3294
3295 /*
3296 * If the best seen length is less than the request length,
3297 * use the best as the minimum, otherwise we've got the maxlen we
3298 * were asked for.
3299 */
3300 if (blen < args->maxlen)
3301 return blen;
3302 return args->maxlen;
3303 }
3304
3305 static int
xfs_bmap_btalloc_select_lengths(struct xfs_bmalloca * ap,struct xfs_alloc_arg * args,xfs_extlen_t * blen)3306 xfs_bmap_btalloc_select_lengths(
3307 struct xfs_bmalloca *ap,
3308 struct xfs_alloc_arg *args,
3309 xfs_extlen_t *blen)
3310 {
3311 struct xfs_mount *mp = args->mp;
3312 struct xfs_perag *pag;
3313 xfs_agnumber_t agno, startag;
3314 int error = 0;
3315
3316 if (ap->tp->t_flags & XFS_TRANS_LOWMODE) {
3317 args->total = ap->minlen;
3318 args->minlen = ap->minlen;
3319 return 0;
3320 }
3321
3322 args->total = ap->total;
3323 startag = XFS_FSB_TO_AGNO(mp, ap->blkno);
3324 if (startag == NULLAGNUMBER)
3325 startag = 0;
3326
3327 *blen = 0;
3328 for_each_perag_wrap(mp, startag, agno, pag) {
3329 error = xfs_bmap_longest_free_extent(pag, args->tp, blen);
3330 if (error && error != -EAGAIN)
3331 break;
3332 error = 0;
3333 if (*blen >= args->maxlen)
3334 break;
3335 }
3336 if (pag)
3337 xfs_perag_rele(pag);
3338
3339 args->minlen = xfs_bmap_select_minlen(ap, args, *blen);
3340 return error;
3341 }
3342
3343 /* Update all inode and quota accounting for the allocation we just did. */
3344 void
xfs_bmap_alloc_account(struct xfs_bmalloca * ap)3345 xfs_bmap_alloc_account(
3346 struct xfs_bmalloca *ap)
3347 {
3348 bool isrt = XFS_IS_REALTIME_INODE(ap->ip) &&
3349 !(ap->flags & XFS_BMAPI_ATTRFORK);
3350 uint fld;
3351
3352 if (ap->flags & XFS_BMAPI_COWFORK) {
3353 /*
3354 * COW fork blocks are in-core only and thus are treated as
3355 * in-core quota reservation (like delalloc blocks) even when
3356 * converted to real blocks. The quota reservation is not
3357 * accounted to disk until blocks are remapped to the data
3358 * fork. So if these blocks were previously delalloc, we
3359 * already have quota reservation and there's nothing to do
3360 * yet.
3361 */
3362 if (ap->wasdel) {
3363 xfs_mod_delalloc(ap->ip, -(int64_t)ap->length, 0);
3364 return;
3365 }
3366
3367 /*
3368 * Otherwise, we've allocated blocks in a hole. The transaction
3369 * has acquired in-core quota reservation for this extent.
3370 * Rather than account these as real blocks, however, we reduce
3371 * the transaction quota reservation based on the allocation.
3372 * This essentially transfers the transaction quota reservation
3373 * to that of a delalloc extent.
3374 */
3375 ap->ip->i_delayed_blks += ap->length;
3376 xfs_trans_mod_dquot_byino(ap->tp, ap->ip, isrt ?
3377 XFS_TRANS_DQ_RES_RTBLKS : XFS_TRANS_DQ_RES_BLKS,
3378 -(long)ap->length);
3379 return;
3380 }
3381
3382 /* data/attr fork only */
3383 ap->ip->i_nblocks += ap->length;
3384 xfs_trans_log_inode(ap->tp, ap->ip, XFS_ILOG_CORE);
3385 if (ap->wasdel) {
3386 ap->ip->i_delayed_blks -= ap->length;
3387 xfs_mod_delalloc(ap->ip, -(int64_t)ap->length, 0);
3388 fld = isrt ? XFS_TRANS_DQ_DELRTBCOUNT : XFS_TRANS_DQ_DELBCOUNT;
3389 } else {
3390 fld = isrt ? XFS_TRANS_DQ_RTBCOUNT : XFS_TRANS_DQ_BCOUNT;
3391 }
3392
3393 xfs_trans_mod_dquot_byino(ap->tp, ap->ip, fld, ap->length);
3394 }
3395
3396 static int
xfs_bmap_compute_alignments(struct xfs_bmalloca * ap,struct xfs_alloc_arg * args)3397 xfs_bmap_compute_alignments(
3398 struct xfs_bmalloca *ap,
3399 struct xfs_alloc_arg *args)
3400 {
3401 struct xfs_mount *mp = args->mp;
3402 xfs_extlen_t align = 0; /* minimum allocation alignment */
3403 int stripe_align = 0;
3404
3405 /* stripe alignment for allocation is determined by mount parameters */
3406 if (mp->m_swidth && xfs_has_swalloc(mp))
3407 stripe_align = mp->m_swidth;
3408 else if (mp->m_dalign)
3409 stripe_align = mp->m_dalign;
3410
3411 if (ap->flags & XFS_BMAPI_COWFORK)
3412 align = xfs_get_cowextsz_hint(ap->ip);
3413 else if (ap->datatype & XFS_ALLOC_USERDATA)
3414 align = xfs_get_extsz_hint(ap->ip);
3415 if (align) {
3416 if (xfs_bmap_extsize_align(mp, &ap->got, &ap->prev, align, 0,
3417 ap->eof, 0, ap->conv, &ap->offset,
3418 &ap->length))
3419 ASSERT(0);
3420 ASSERT(ap->length);
3421 }
3422
3423 /* apply extent size hints if obtained earlier */
3424 if (align) {
3425 args->prod = align;
3426 div_u64_rem(ap->offset, args->prod, &args->mod);
3427 if (args->mod)
3428 args->mod = args->prod - args->mod;
3429 } else if (mp->m_sb.sb_blocksize >= PAGE_SIZE) {
3430 args->prod = 1;
3431 args->mod = 0;
3432 } else {
3433 args->prod = PAGE_SIZE >> mp->m_sb.sb_blocklog;
3434 div_u64_rem(ap->offset, args->prod, &args->mod);
3435 if (args->mod)
3436 args->mod = args->prod - args->mod;
3437 }
3438
3439 return stripe_align;
3440 }
3441
3442 static void
xfs_bmap_process_allocated_extent(struct xfs_bmalloca * ap,struct xfs_alloc_arg * args,xfs_fileoff_t orig_offset,xfs_extlen_t orig_length)3443 xfs_bmap_process_allocated_extent(
3444 struct xfs_bmalloca *ap,
3445 struct xfs_alloc_arg *args,
3446 xfs_fileoff_t orig_offset,
3447 xfs_extlen_t orig_length)
3448 {
3449 ap->blkno = args->fsbno;
3450 ap->length = args->len;
3451 /*
3452 * If the extent size hint is active, we tried to round the
3453 * caller's allocation request offset down to extsz and the
3454 * length up to another extsz boundary. If we found a free
3455 * extent we mapped it in starting at this new offset. If the
3456 * newly mapped space isn't long enough to cover any of the
3457 * range of offsets that was originally requested, move the
3458 * mapping up so that we can fill as much of the caller's
3459 * original request as possible. Free space is apparently
3460 * very fragmented so we're unlikely to be able to satisfy the
3461 * hints anyway.
3462 */
3463 if (ap->length <= orig_length)
3464 ap->offset = orig_offset;
3465 else if (ap->offset + ap->length < orig_offset + orig_length)
3466 ap->offset = orig_offset + orig_length - ap->length;
3467 xfs_bmap_alloc_account(ap);
3468 }
3469
3470 #ifdef DEBUG
3471 static int
xfs_bmap_exact_minlen_extent_alloc(struct xfs_bmalloca * ap)3472 xfs_bmap_exact_minlen_extent_alloc(
3473 struct xfs_bmalloca *ap)
3474 {
3475 struct xfs_mount *mp = ap->ip->i_mount;
3476 struct xfs_alloc_arg args = { .tp = ap->tp, .mp = mp };
3477 xfs_fileoff_t orig_offset;
3478 xfs_extlen_t orig_length;
3479 int error;
3480
3481 ASSERT(ap->length);
3482
3483 if (ap->minlen != 1) {
3484 ap->blkno = NULLFSBLOCK;
3485 ap->length = 0;
3486 return 0;
3487 }
3488
3489 orig_offset = ap->offset;
3490 orig_length = ap->length;
3491
3492 args.alloc_minlen_only = 1;
3493
3494 xfs_bmap_compute_alignments(ap, &args);
3495
3496 /*
3497 * Unlike the longest extent available in an AG, we don't track
3498 * the length of an AG's shortest extent.
3499 * XFS_ERRTAG_BMAP_ALLOC_MINLEN_EXTENT is a debug only knob and
3500 * hence we can afford to start traversing from the 0th AG since
3501 * we need not be concerned about a drop in performance in
3502 * "debug only" code paths.
3503 */
3504 ap->blkno = XFS_AGB_TO_FSB(mp, 0, 0);
3505
3506 args.oinfo = XFS_RMAP_OINFO_SKIP_UPDATE;
3507 args.minlen = args.maxlen = ap->minlen;
3508 args.total = ap->total;
3509
3510 args.alignment = 1;
3511 args.minalignslop = 0;
3512
3513 args.minleft = ap->minleft;
3514 args.wasdel = ap->wasdel;
3515 args.resv = XFS_AG_RESV_NONE;
3516 args.datatype = ap->datatype;
3517
3518 error = xfs_alloc_vextent_first_ag(&args, ap->blkno);
3519 if (error)
3520 return error;
3521
3522 if (args.fsbno != NULLFSBLOCK) {
3523 xfs_bmap_process_allocated_extent(ap, &args, orig_offset,
3524 orig_length);
3525 } else {
3526 ap->blkno = NULLFSBLOCK;
3527 ap->length = 0;
3528 }
3529
3530 return 0;
3531 }
3532 #else
3533
3534 #define xfs_bmap_exact_minlen_extent_alloc(bma) (-EFSCORRUPTED)
3535
3536 #endif
3537
3538 /*
3539 * If we are not low on available data blocks and we are allocating at
3540 * EOF, optimise allocation for contiguous file extension and/or stripe
3541 * alignment of the new extent.
3542 *
3543 * NOTE: ap->aeof is only set if the allocation length is >= the
3544 * stripe unit and the allocation offset is at the end of file.
3545 */
3546 static int
xfs_bmap_btalloc_at_eof(struct xfs_bmalloca * ap,struct xfs_alloc_arg * args,xfs_extlen_t blen,int stripe_align,bool ag_only)3547 xfs_bmap_btalloc_at_eof(
3548 struct xfs_bmalloca *ap,
3549 struct xfs_alloc_arg *args,
3550 xfs_extlen_t blen,
3551 int stripe_align,
3552 bool ag_only)
3553 {
3554 struct xfs_mount *mp = args->mp;
3555 struct xfs_perag *caller_pag = args->pag;
3556 int error;
3557
3558 /*
3559 * If there are already extents in the file, try an exact EOF block
3560 * allocation to extend the file as a contiguous extent. If that fails,
3561 * or it's the first allocation in a file, just try for a stripe aligned
3562 * allocation.
3563 */
3564 if (ap->offset) {
3565 xfs_extlen_t nextminlen = 0;
3566
3567 /*
3568 * Compute the minlen+alignment for the next case. Set slop so
3569 * that the value of minlen+alignment+slop doesn't go up between
3570 * the calls.
3571 */
3572 args->alignment = 1;
3573 if (blen > stripe_align && blen <= args->maxlen)
3574 nextminlen = blen - stripe_align;
3575 else
3576 nextminlen = args->minlen;
3577 if (nextminlen + stripe_align > args->minlen + 1)
3578 args->minalignslop = nextminlen + stripe_align -
3579 args->minlen - 1;
3580 else
3581 args->minalignslop = 0;
3582
3583 if (!caller_pag)
3584 args->pag = xfs_perag_get(mp, XFS_FSB_TO_AGNO(mp, ap->blkno));
3585 error = xfs_alloc_vextent_exact_bno(args, ap->blkno);
3586 if (!caller_pag) {
3587 xfs_perag_put(args->pag);
3588 args->pag = NULL;
3589 }
3590 if (error)
3591 return error;
3592
3593 if (args->fsbno != NULLFSBLOCK)
3594 return 0;
3595 /*
3596 * Exact allocation failed. Reset to try an aligned allocation
3597 * according to the original allocation specification.
3598 */
3599 args->alignment = stripe_align;
3600 args->minlen = nextminlen;
3601 args->minalignslop = 0;
3602 } else {
3603 /*
3604 * Adjust minlen to try and preserve alignment if we
3605 * can't guarantee an aligned maxlen extent.
3606 */
3607 args->alignment = stripe_align;
3608 if (blen > args->alignment &&
3609 blen <= args->maxlen + args->alignment)
3610 args->minlen = blen - args->alignment;
3611 args->minalignslop = 0;
3612 }
3613
3614 if (ag_only) {
3615 error = xfs_alloc_vextent_near_bno(args, ap->blkno);
3616 } else {
3617 args->pag = NULL;
3618 error = xfs_alloc_vextent_start_ag(args, ap->blkno);
3619 ASSERT(args->pag == NULL);
3620 args->pag = caller_pag;
3621 }
3622 if (error)
3623 return error;
3624
3625 if (args->fsbno != NULLFSBLOCK)
3626 return 0;
3627
3628 /*
3629 * Allocation failed, so turn return the allocation args to their
3630 * original non-aligned state so the caller can proceed on allocation
3631 * failure as if this function was never called.
3632 */
3633 args->alignment = 1;
3634 return 0;
3635 }
3636
3637 /*
3638 * We have failed multiple allocation attempts so now are in a low space
3639 * allocation situation. Try a locality first full filesystem minimum length
3640 * allocation whilst still maintaining necessary total block reservation
3641 * requirements.
3642 *
3643 * If that fails, we are now critically low on space, so perform a last resort
3644 * allocation attempt: no reserve, no locality, blocking, minimum length, full
3645 * filesystem free space scan. We also indicate to future allocations in this
3646 * transaction that we are critically low on space so they don't waste time on
3647 * allocation modes that are unlikely to succeed.
3648 */
3649 int
xfs_bmap_btalloc_low_space(struct xfs_bmalloca * ap,struct xfs_alloc_arg * args)3650 xfs_bmap_btalloc_low_space(
3651 struct xfs_bmalloca *ap,
3652 struct xfs_alloc_arg *args)
3653 {
3654 int error;
3655
3656 if (args->minlen > ap->minlen) {
3657 args->minlen = ap->minlen;
3658 error = xfs_alloc_vextent_start_ag(args, ap->blkno);
3659 if (error || args->fsbno != NULLFSBLOCK)
3660 return error;
3661 }
3662
3663 /* Last ditch attempt before failure is declared. */
3664 args->total = ap->minlen;
3665 error = xfs_alloc_vextent_first_ag(args, 0);
3666 if (error)
3667 return error;
3668 ap->tp->t_flags |= XFS_TRANS_LOWMODE;
3669 return 0;
3670 }
3671
3672 static int
xfs_bmap_btalloc_filestreams(struct xfs_bmalloca * ap,struct xfs_alloc_arg * args,int stripe_align)3673 xfs_bmap_btalloc_filestreams(
3674 struct xfs_bmalloca *ap,
3675 struct xfs_alloc_arg *args,
3676 int stripe_align)
3677 {
3678 xfs_extlen_t blen = 0;
3679 int error = 0;
3680
3681
3682 error = xfs_filestream_select_ag(ap, args, &blen);
3683 if (error)
3684 return error;
3685 ASSERT(args->pag);
3686
3687 /*
3688 * If we are in low space mode, then optimal allocation will fail so
3689 * prepare for minimal allocation and jump to the low space algorithm
3690 * immediately.
3691 */
3692 if (ap->tp->t_flags & XFS_TRANS_LOWMODE) {
3693 args->minlen = ap->minlen;
3694 ASSERT(args->fsbno == NULLFSBLOCK);
3695 goto out_low_space;
3696 }
3697
3698 args->minlen = xfs_bmap_select_minlen(ap, args, blen);
3699 if (ap->aeof)
3700 error = xfs_bmap_btalloc_at_eof(ap, args, blen, stripe_align,
3701 true);
3702
3703 if (!error && args->fsbno == NULLFSBLOCK)
3704 error = xfs_alloc_vextent_near_bno(args, ap->blkno);
3705
3706 out_low_space:
3707 /*
3708 * We are now done with the perag reference for the filestreams
3709 * association provided by xfs_filestream_select_ag(). Release it now as
3710 * we've either succeeded, had a fatal error or we are out of space and
3711 * need to do a full filesystem scan for free space which will take it's
3712 * own references.
3713 */
3714 xfs_perag_rele(args->pag);
3715 args->pag = NULL;
3716 if (error || args->fsbno != NULLFSBLOCK)
3717 return error;
3718
3719 return xfs_bmap_btalloc_low_space(ap, args);
3720 }
3721
3722 static int
xfs_bmap_btalloc_best_length(struct xfs_bmalloca * ap,struct xfs_alloc_arg * args,int stripe_align)3723 xfs_bmap_btalloc_best_length(
3724 struct xfs_bmalloca *ap,
3725 struct xfs_alloc_arg *args,
3726 int stripe_align)
3727 {
3728 xfs_extlen_t blen = 0;
3729 int error;
3730
3731 ap->blkno = XFS_INO_TO_FSB(args->mp, ap->ip->i_ino);
3732 xfs_bmap_adjacent(ap);
3733
3734 /*
3735 * Search for an allocation group with a single extent large enough for
3736 * the request. If one isn't found, then adjust the minimum allocation
3737 * size to the largest space found.
3738 */
3739 error = xfs_bmap_btalloc_select_lengths(ap, args, &blen);
3740 if (error)
3741 return error;
3742
3743 /*
3744 * Don't attempt optimal EOF allocation if previous allocations barely
3745 * succeeded due to being near ENOSPC. It is highly unlikely we'll get
3746 * optimal or even aligned allocations in this case, so don't waste time
3747 * trying.
3748 */
3749 if (ap->aeof && !(ap->tp->t_flags & XFS_TRANS_LOWMODE)) {
3750 error = xfs_bmap_btalloc_at_eof(ap, args, blen, stripe_align,
3751 false);
3752 if (error || args->fsbno != NULLFSBLOCK)
3753 return error;
3754 }
3755
3756 error = xfs_alloc_vextent_start_ag(args, ap->blkno);
3757 if (error || args->fsbno != NULLFSBLOCK)
3758 return error;
3759
3760 return xfs_bmap_btalloc_low_space(ap, args);
3761 }
3762
3763 static int
xfs_bmap_btalloc(struct xfs_bmalloca * ap)3764 xfs_bmap_btalloc(
3765 struct xfs_bmalloca *ap)
3766 {
3767 struct xfs_mount *mp = ap->ip->i_mount;
3768 struct xfs_alloc_arg args = {
3769 .tp = ap->tp,
3770 .mp = mp,
3771 .fsbno = NULLFSBLOCK,
3772 .oinfo = XFS_RMAP_OINFO_SKIP_UPDATE,
3773 .minleft = ap->minleft,
3774 .wasdel = ap->wasdel,
3775 .resv = XFS_AG_RESV_NONE,
3776 .datatype = ap->datatype,
3777 .alignment = 1,
3778 .minalignslop = 0,
3779 };
3780 xfs_fileoff_t orig_offset;
3781 xfs_extlen_t orig_length;
3782 int error;
3783 int stripe_align;
3784
3785 ASSERT(ap->length);
3786 orig_offset = ap->offset;
3787 orig_length = ap->length;
3788
3789 stripe_align = xfs_bmap_compute_alignments(ap, &args);
3790
3791 /* Trim the allocation back to the maximum an AG can fit. */
3792 args.maxlen = min(ap->length, mp->m_ag_max_usable);
3793
3794 if ((ap->datatype & XFS_ALLOC_USERDATA) &&
3795 xfs_inode_is_filestream(ap->ip))
3796 error = xfs_bmap_btalloc_filestreams(ap, &args, stripe_align);
3797 else
3798 error = xfs_bmap_btalloc_best_length(ap, &args, stripe_align);
3799 if (error)
3800 return error;
3801
3802 if (args.fsbno != NULLFSBLOCK) {
3803 xfs_bmap_process_allocated_extent(ap, &args, orig_offset,
3804 orig_length);
3805 } else {
3806 ap->blkno = NULLFSBLOCK;
3807 ap->length = 0;
3808 }
3809 return 0;
3810 }
3811
3812 /* Trim extent to fit a logical block range. */
3813 void
xfs_trim_extent(struct xfs_bmbt_irec * irec,xfs_fileoff_t bno,xfs_filblks_t len)3814 xfs_trim_extent(
3815 struct xfs_bmbt_irec *irec,
3816 xfs_fileoff_t bno,
3817 xfs_filblks_t len)
3818 {
3819 xfs_fileoff_t distance;
3820 xfs_fileoff_t end = bno + len;
3821
3822 if (irec->br_startoff + irec->br_blockcount <= bno ||
3823 irec->br_startoff >= end) {
3824 irec->br_blockcount = 0;
3825 return;
3826 }
3827
3828 if (irec->br_startoff < bno) {
3829 distance = bno - irec->br_startoff;
3830 if (isnullstartblock(irec->br_startblock))
3831 irec->br_startblock = DELAYSTARTBLOCK;
3832 if (irec->br_startblock != DELAYSTARTBLOCK &&
3833 irec->br_startblock != HOLESTARTBLOCK)
3834 irec->br_startblock += distance;
3835 irec->br_startoff += distance;
3836 irec->br_blockcount -= distance;
3837 }
3838
3839 if (end < irec->br_startoff + irec->br_blockcount) {
3840 distance = irec->br_startoff + irec->br_blockcount - end;
3841 irec->br_blockcount -= distance;
3842 }
3843 }
3844
3845 /*
3846 * Trim the returned map to the required bounds
3847 */
3848 STATIC void
xfs_bmapi_trim_map(struct xfs_bmbt_irec * mval,struct xfs_bmbt_irec * got,xfs_fileoff_t * bno,xfs_filblks_t len,xfs_fileoff_t obno,xfs_fileoff_t end,int n,uint32_t flags)3849 xfs_bmapi_trim_map(
3850 struct xfs_bmbt_irec *mval,
3851 struct xfs_bmbt_irec *got,
3852 xfs_fileoff_t *bno,
3853 xfs_filblks_t len,
3854 xfs_fileoff_t obno,
3855 xfs_fileoff_t end,
3856 int n,
3857 uint32_t flags)
3858 {
3859 if ((flags & XFS_BMAPI_ENTIRE) ||
3860 got->br_startoff + got->br_blockcount <= obno) {
3861 *mval = *got;
3862 if (isnullstartblock(got->br_startblock))
3863 mval->br_startblock = DELAYSTARTBLOCK;
3864 return;
3865 }
3866
3867 if (obno > *bno)
3868 *bno = obno;
3869 ASSERT((*bno >= obno) || (n == 0));
3870 ASSERT(*bno < end);
3871 mval->br_startoff = *bno;
3872 if (isnullstartblock(got->br_startblock))
3873 mval->br_startblock = DELAYSTARTBLOCK;
3874 else
3875 mval->br_startblock = got->br_startblock +
3876 (*bno - got->br_startoff);
3877 /*
3878 * Return the minimum of what we got and what we asked for for
3879 * the length. We can use the len variable here because it is
3880 * modified below and we could have been there before coming
3881 * here if the first part of the allocation didn't overlap what
3882 * was asked for.
3883 */
3884 mval->br_blockcount = XFS_FILBLKS_MIN(end - *bno,
3885 got->br_blockcount - (*bno - got->br_startoff));
3886 mval->br_state = got->br_state;
3887 ASSERT(mval->br_blockcount <= len);
3888 return;
3889 }
3890
3891 /*
3892 * Update and validate the extent map to return
3893 */
3894 STATIC void
xfs_bmapi_update_map(struct xfs_bmbt_irec ** map,xfs_fileoff_t * bno,xfs_filblks_t * len,xfs_fileoff_t obno,xfs_fileoff_t end,int * n,uint32_t flags)3895 xfs_bmapi_update_map(
3896 struct xfs_bmbt_irec **map,
3897 xfs_fileoff_t *bno,
3898 xfs_filblks_t *len,
3899 xfs_fileoff_t obno,
3900 xfs_fileoff_t end,
3901 int *n,
3902 uint32_t flags)
3903 {
3904 xfs_bmbt_irec_t *mval = *map;
3905
3906 ASSERT((flags & XFS_BMAPI_ENTIRE) ||
3907 ((mval->br_startoff + mval->br_blockcount) <= end));
3908 ASSERT((flags & XFS_BMAPI_ENTIRE) || (mval->br_blockcount <= *len) ||
3909 (mval->br_startoff < obno));
3910
3911 *bno = mval->br_startoff + mval->br_blockcount;
3912 *len = end - *bno;
3913 if (*n > 0 && mval->br_startoff == mval[-1].br_startoff) {
3914 /* update previous map with new information */
3915 ASSERT(mval->br_startblock == mval[-1].br_startblock);
3916 ASSERT(mval->br_blockcount > mval[-1].br_blockcount);
3917 ASSERT(mval->br_state == mval[-1].br_state);
3918 mval[-1].br_blockcount = mval->br_blockcount;
3919 mval[-1].br_state = mval->br_state;
3920 } else if (*n > 0 && mval->br_startblock != DELAYSTARTBLOCK &&
3921 mval[-1].br_startblock != DELAYSTARTBLOCK &&
3922 mval[-1].br_startblock != HOLESTARTBLOCK &&
3923 mval->br_startblock == mval[-1].br_startblock +
3924 mval[-1].br_blockcount &&
3925 mval[-1].br_state == mval->br_state) {
3926 ASSERT(mval->br_startoff ==
3927 mval[-1].br_startoff + mval[-1].br_blockcount);
3928 mval[-1].br_blockcount += mval->br_blockcount;
3929 } else if (*n > 0 &&
3930 mval->br_startblock == DELAYSTARTBLOCK &&
3931 mval[-1].br_startblock == DELAYSTARTBLOCK &&
3932 mval->br_startoff ==
3933 mval[-1].br_startoff + mval[-1].br_blockcount) {
3934 mval[-1].br_blockcount += mval->br_blockcount;
3935 mval[-1].br_state = mval->br_state;
3936 } else if (!((*n == 0) &&
3937 ((mval->br_startoff + mval->br_blockcount) <=
3938 obno))) {
3939 mval++;
3940 (*n)++;
3941 }
3942 *map = mval;
3943 }
3944
3945 /*
3946 * Map file blocks to filesystem blocks without allocation.
3947 */
3948 int
xfs_bmapi_read(struct xfs_inode * ip,xfs_fileoff_t bno,xfs_filblks_t len,struct xfs_bmbt_irec * mval,int * nmap,uint32_t flags)3949 xfs_bmapi_read(
3950 struct xfs_inode *ip,
3951 xfs_fileoff_t bno,
3952 xfs_filblks_t len,
3953 struct xfs_bmbt_irec *mval,
3954 int *nmap,
3955 uint32_t flags)
3956 {
3957 struct xfs_mount *mp = ip->i_mount;
3958 int whichfork = xfs_bmapi_whichfork(flags);
3959 struct xfs_ifork *ifp = xfs_ifork_ptr(ip, whichfork);
3960 struct xfs_bmbt_irec got;
3961 xfs_fileoff_t obno;
3962 xfs_fileoff_t end;
3963 struct xfs_iext_cursor icur;
3964 int error;
3965 bool eof = false;
3966 int n = 0;
3967
3968 ASSERT(*nmap >= 1);
3969 ASSERT(!(flags & ~(XFS_BMAPI_ATTRFORK | XFS_BMAPI_ENTIRE)));
3970 xfs_assert_ilocked(ip, XFS_ILOCK_SHARED | XFS_ILOCK_EXCL);
3971
3972 if (WARN_ON_ONCE(!ifp)) {
3973 xfs_bmap_mark_sick(ip, whichfork);
3974 return -EFSCORRUPTED;
3975 }
3976
3977 if (XFS_IS_CORRUPT(mp, !xfs_ifork_has_extents(ifp)) ||
3978 XFS_TEST_ERROR(false, mp, XFS_ERRTAG_BMAPIFORMAT)) {
3979 xfs_bmap_mark_sick(ip, whichfork);
3980 return -EFSCORRUPTED;
3981 }
3982
3983 if (xfs_is_shutdown(mp))
3984 return -EIO;
3985
3986 XFS_STATS_INC(mp, xs_blk_mapr);
3987
3988 error = xfs_iread_extents(NULL, ip, whichfork);
3989 if (error)
3990 return error;
3991
3992 if (!xfs_iext_lookup_extent(ip, ifp, bno, &icur, &got))
3993 eof = true;
3994 end = bno + len;
3995 obno = bno;
3996
3997 while (bno < end && n < *nmap) {
3998 /* Reading past eof, act as though there's a hole up to end. */
3999 if (eof)
4000 got.br_startoff = end;
4001 if (got.br_startoff > bno) {
4002 /* Reading in a hole. */
4003 mval->br_startoff = bno;
4004 mval->br_startblock = HOLESTARTBLOCK;
4005 mval->br_blockcount =
4006 XFS_FILBLKS_MIN(len, got.br_startoff - bno);
4007 mval->br_state = XFS_EXT_NORM;
4008 bno += mval->br_blockcount;
4009 len -= mval->br_blockcount;
4010 mval++;
4011 n++;
4012 continue;
4013 }
4014
4015 /* set up the extent map to return. */
4016 xfs_bmapi_trim_map(mval, &got, &bno, len, obno, end, n, flags);
4017 xfs_bmapi_update_map(&mval, &bno, &len, obno, end, &n, flags);
4018
4019 /* If we're done, stop now. */
4020 if (bno >= end || n >= *nmap)
4021 break;
4022
4023 /* Else go on to the next record. */
4024 if (!xfs_iext_next_extent(ifp, &icur, &got))
4025 eof = true;
4026 }
4027 *nmap = n;
4028 return 0;
4029 }
4030
4031 /*
4032 * Add a delayed allocation extent to an inode. Blocks are reserved from the
4033 * global pool and the extent inserted into the inode in-core extent tree.
4034 *
4035 * On entry, got refers to the first extent beyond the offset of the extent to
4036 * allocate or eof is specified if no such extent exists. On return, got refers
4037 * to the extent record that was inserted to the inode fork.
4038 *
4039 * Note that the allocated extent may have been merged with contiguous extents
4040 * during insertion into the inode fork. Thus, got does not reflect the current
4041 * state of the inode fork on return. If necessary, the caller can use lastx to
4042 * look up the updated record in the inode fork.
4043 */
4044 int
xfs_bmapi_reserve_delalloc(struct xfs_inode * ip,int whichfork,xfs_fileoff_t off,xfs_filblks_t len,xfs_filblks_t prealloc,struct xfs_bmbt_irec * got,struct xfs_iext_cursor * icur,int eof)4045 xfs_bmapi_reserve_delalloc(
4046 struct xfs_inode *ip,
4047 int whichfork,
4048 xfs_fileoff_t off,
4049 xfs_filblks_t len,
4050 xfs_filblks_t prealloc,
4051 struct xfs_bmbt_irec *got,
4052 struct xfs_iext_cursor *icur,
4053 int eof)
4054 {
4055 struct xfs_mount *mp = ip->i_mount;
4056 struct xfs_ifork *ifp = xfs_ifork_ptr(ip, whichfork);
4057 xfs_extlen_t alen;
4058 xfs_extlen_t indlen;
4059 uint64_t fdblocks;
4060 int error;
4061 xfs_fileoff_t aoff = off;
4062
4063 /*
4064 * Cap the alloc length. Keep track of prealloc so we know whether to
4065 * tag the inode before we return.
4066 */
4067 alen = XFS_FILBLKS_MIN(len + prealloc, XFS_MAX_BMBT_EXTLEN);
4068 if (!eof)
4069 alen = XFS_FILBLKS_MIN(alen, got->br_startoff - aoff);
4070 if (prealloc && alen >= len)
4071 prealloc = alen - len;
4072
4073 /* Figure out the extent size, adjust alen */
4074 if (whichfork == XFS_COW_FORK) {
4075 struct xfs_bmbt_irec prev;
4076 xfs_extlen_t extsz = xfs_get_cowextsz_hint(ip);
4077
4078 if (!xfs_iext_peek_prev_extent(ifp, icur, &prev))
4079 prev.br_startoff = NULLFILEOFF;
4080
4081 error = xfs_bmap_extsize_align(mp, got, &prev, extsz, 0, eof,
4082 1, 0, &aoff, &alen);
4083 ASSERT(!error);
4084 }
4085
4086 /*
4087 * Make a transaction-less quota reservation for delayed allocation
4088 * blocks. This number gets adjusted later. We return if we haven't
4089 * allocated blocks already inside this loop.
4090 */
4091 error = xfs_quota_reserve_blkres(ip, alen);
4092 if (error)
4093 return error;
4094
4095 /*
4096 * Split changing sb for alen and indlen since they could be coming
4097 * from different places.
4098 */
4099 indlen = (xfs_extlen_t)xfs_bmap_worst_indlen(ip, alen);
4100 ASSERT(indlen > 0);
4101
4102 fdblocks = indlen;
4103 if (XFS_IS_REALTIME_INODE(ip)) {
4104 error = xfs_dec_frextents(mp, xfs_rtb_to_rtx(mp, alen));
4105 if (error)
4106 goto out_unreserve_quota;
4107 } else {
4108 fdblocks += alen;
4109 }
4110
4111 error = xfs_dec_fdblocks(mp, fdblocks, false);
4112 if (error)
4113 goto out_unreserve_frextents;
4114
4115 ip->i_delayed_blks += alen;
4116 xfs_mod_delalloc(ip, alen, indlen);
4117
4118 got->br_startoff = aoff;
4119 got->br_startblock = nullstartblock(indlen);
4120 got->br_blockcount = alen;
4121 got->br_state = XFS_EXT_NORM;
4122
4123 xfs_bmap_add_extent_hole_delay(ip, whichfork, icur, got);
4124
4125 /*
4126 * Tag the inode if blocks were preallocated. Note that COW fork
4127 * preallocation can occur at the start or end of the extent, even when
4128 * prealloc == 0, so we must also check the aligned offset and length.
4129 */
4130 if (whichfork == XFS_DATA_FORK && prealloc)
4131 xfs_inode_set_eofblocks_tag(ip);
4132 if (whichfork == XFS_COW_FORK && (prealloc || aoff < off || alen > len))
4133 xfs_inode_set_cowblocks_tag(ip);
4134
4135 return 0;
4136
4137 out_unreserve_frextents:
4138 if (XFS_IS_REALTIME_INODE(ip))
4139 xfs_add_frextents(mp, xfs_rtb_to_rtx(mp, alen));
4140 out_unreserve_quota:
4141 if (XFS_IS_QUOTA_ON(mp))
4142 xfs_quota_unreserve_blkres(ip, alen);
4143 return error;
4144 }
4145
4146 static int
xfs_bmap_alloc_userdata(struct xfs_bmalloca * bma)4147 xfs_bmap_alloc_userdata(
4148 struct xfs_bmalloca *bma)
4149 {
4150 struct xfs_mount *mp = bma->ip->i_mount;
4151 int whichfork = xfs_bmapi_whichfork(bma->flags);
4152 int error;
4153
4154 /*
4155 * Set the data type being allocated. For the data fork, the first data
4156 * in the file is treated differently to all other allocations. For the
4157 * attribute fork, we only need to ensure the allocated range is not on
4158 * the busy list.
4159 */
4160 bma->datatype = XFS_ALLOC_NOBUSY;
4161 if (whichfork == XFS_DATA_FORK || whichfork == XFS_COW_FORK) {
4162 bma->datatype |= XFS_ALLOC_USERDATA;
4163 if (bma->offset == 0)
4164 bma->datatype |= XFS_ALLOC_INITIAL_USER_DATA;
4165
4166 if (mp->m_dalign && bma->length >= mp->m_dalign) {
4167 error = xfs_bmap_isaeof(bma, whichfork);
4168 if (error)
4169 return error;
4170 }
4171
4172 if (XFS_IS_REALTIME_INODE(bma->ip))
4173 return xfs_bmap_rtalloc(bma);
4174 }
4175
4176 if (unlikely(XFS_TEST_ERROR(false, mp,
4177 XFS_ERRTAG_BMAP_ALLOC_MINLEN_EXTENT)))
4178 return xfs_bmap_exact_minlen_extent_alloc(bma);
4179
4180 return xfs_bmap_btalloc(bma);
4181 }
4182
4183 static int
xfs_bmapi_allocate(struct xfs_bmalloca * bma)4184 xfs_bmapi_allocate(
4185 struct xfs_bmalloca *bma)
4186 {
4187 struct xfs_mount *mp = bma->ip->i_mount;
4188 int whichfork = xfs_bmapi_whichfork(bma->flags);
4189 struct xfs_ifork *ifp = xfs_ifork_ptr(bma->ip, whichfork);
4190 int error;
4191
4192 ASSERT(bma->length > 0);
4193 ASSERT(bma->length <= XFS_MAX_BMBT_EXTLEN);
4194
4195 if (bma->flags & XFS_BMAPI_CONTIG)
4196 bma->minlen = bma->length;
4197 else
4198 bma->minlen = 1;
4199
4200 if (bma->flags & XFS_BMAPI_METADATA) {
4201 if (unlikely(XFS_TEST_ERROR(false, mp,
4202 XFS_ERRTAG_BMAP_ALLOC_MINLEN_EXTENT)))
4203 error = xfs_bmap_exact_minlen_extent_alloc(bma);
4204 else
4205 error = xfs_bmap_btalloc(bma);
4206 } else {
4207 error = xfs_bmap_alloc_userdata(bma);
4208 }
4209 if (error)
4210 return error;
4211 if (bma->blkno == NULLFSBLOCK)
4212 return -ENOSPC;
4213
4214 if (WARN_ON_ONCE(!xfs_valid_startblock(bma->ip, bma->blkno))) {
4215 xfs_bmap_mark_sick(bma->ip, whichfork);
4216 return -EFSCORRUPTED;
4217 }
4218
4219 if (bma->flags & XFS_BMAPI_ZERO) {
4220 error = xfs_zero_extent(bma->ip, bma->blkno, bma->length);
4221 if (error)
4222 return error;
4223 }
4224
4225 if (ifp->if_format == XFS_DINODE_FMT_BTREE && !bma->cur)
4226 bma->cur = xfs_bmbt_init_cursor(mp, bma->tp, bma->ip, whichfork);
4227 /*
4228 * Bump the number of extents we've allocated
4229 * in this call.
4230 */
4231 bma->nallocs++;
4232
4233 if (bma->cur && bma->wasdel)
4234 bma->cur->bc_flags |= XFS_BTREE_BMBT_WASDEL;
4235
4236 bma->got.br_startoff = bma->offset;
4237 bma->got.br_startblock = bma->blkno;
4238 bma->got.br_blockcount = bma->length;
4239 bma->got.br_state = XFS_EXT_NORM;
4240
4241 if (bma->flags & XFS_BMAPI_PREALLOC)
4242 bma->got.br_state = XFS_EXT_UNWRITTEN;
4243
4244 if (bma->wasdel)
4245 error = xfs_bmap_add_extent_delay_real(bma, whichfork);
4246 else
4247 error = xfs_bmap_add_extent_hole_real(bma->tp, bma->ip,
4248 whichfork, &bma->icur, &bma->cur, &bma->got,
4249 &bma->logflags, bma->flags);
4250 if (error)
4251 return error;
4252
4253 /*
4254 * Update our extent pointer, given that xfs_bmap_add_extent_delay_real
4255 * or xfs_bmap_add_extent_hole_real might have merged it into one of
4256 * the neighbouring ones.
4257 */
4258 xfs_iext_get_extent(ifp, &bma->icur, &bma->got);
4259
4260 ASSERT(bma->got.br_startoff <= bma->offset);
4261 ASSERT(bma->got.br_startoff + bma->got.br_blockcount >=
4262 bma->offset + bma->length);
4263 ASSERT(bma->got.br_state == XFS_EXT_NORM ||
4264 bma->got.br_state == XFS_EXT_UNWRITTEN);
4265 return 0;
4266 }
4267
4268 STATIC int
xfs_bmapi_convert_unwritten(struct xfs_bmalloca * bma,struct xfs_bmbt_irec * mval,xfs_filblks_t len,uint32_t flags)4269 xfs_bmapi_convert_unwritten(
4270 struct xfs_bmalloca *bma,
4271 struct xfs_bmbt_irec *mval,
4272 xfs_filblks_t len,
4273 uint32_t flags)
4274 {
4275 int whichfork = xfs_bmapi_whichfork(flags);
4276 struct xfs_ifork *ifp = xfs_ifork_ptr(bma->ip, whichfork);
4277 int tmp_logflags = 0;
4278 int error;
4279
4280 /* check if we need to do unwritten->real conversion */
4281 if (mval->br_state == XFS_EXT_UNWRITTEN &&
4282 (flags & XFS_BMAPI_PREALLOC))
4283 return 0;
4284
4285 /* check if we need to do real->unwritten conversion */
4286 if (mval->br_state == XFS_EXT_NORM &&
4287 (flags & (XFS_BMAPI_PREALLOC | XFS_BMAPI_CONVERT)) !=
4288 (XFS_BMAPI_PREALLOC | XFS_BMAPI_CONVERT))
4289 return 0;
4290
4291 /*
4292 * Modify (by adding) the state flag, if writing.
4293 */
4294 ASSERT(mval->br_blockcount <= len);
4295 if (ifp->if_format == XFS_DINODE_FMT_BTREE && !bma->cur) {
4296 bma->cur = xfs_bmbt_init_cursor(bma->ip->i_mount, bma->tp,
4297 bma->ip, whichfork);
4298 }
4299 mval->br_state = (mval->br_state == XFS_EXT_UNWRITTEN)
4300 ? XFS_EXT_NORM : XFS_EXT_UNWRITTEN;
4301
4302 /*
4303 * Before insertion into the bmbt, zero the range being converted
4304 * if required.
4305 */
4306 if (flags & XFS_BMAPI_ZERO) {
4307 error = xfs_zero_extent(bma->ip, mval->br_startblock,
4308 mval->br_blockcount);
4309 if (error)
4310 return error;
4311 }
4312
4313 error = xfs_bmap_add_extent_unwritten_real(bma->tp, bma->ip, whichfork,
4314 &bma->icur, &bma->cur, mval, &tmp_logflags);
4315 /*
4316 * Log the inode core unconditionally in the unwritten extent conversion
4317 * path because the conversion might not have done so (e.g., if the
4318 * extent count hasn't changed). We need to make sure the inode is dirty
4319 * in the transaction for the sake of fsync(), even if nothing has
4320 * changed, because fsync() will not force the log for this transaction
4321 * unless it sees the inode pinned.
4322 *
4323 * Note: If we're only converting cow fork extents, there aren't
4324 * any on-disk updates to make, so we don't need to log anything.
4325 */
4326 if (whichfork != XFS_COW_FORK)
4327 bma->logflags |= tmp_logflags | XFS_ILOG_CORE;
4328 if (error)
4329 return error;
4330
4331 /*
4332 * Update our extent pointer, given that
4333 * xfs_bmap_add_extent_unwritten_real might have merged it into one
4334 * of the neighbouring ones.
4335 */
4336 xfs_iext_get_extent(ifp, &bma->icur, &bma->got);
4337
4338 /*
4339 * We may have combined previously unwritten space with written space,
4340 * so generate another request.
4341 */
4342 if (mval->br_blockcount < len)
4343 return -EAGAIN;
4344 return 0;
4345 }
4346
4347 xfs_extlen_t
xfs_bmapi_minleft(struct xfs_trans * tp,struct xfs_inode * ip,int fork)4348 xfs_bmapi_minleft(
4349 struct xfs_trans *tp,
4350 struct xfs_inode *ip,
4351 int fork)
4352 {
4353 struct xfs_ifork *ifp = xfs_ifork_ptr(ip, fork);
4354
4355 if (tp && tp->t_highest_agno != NULLAGNUMBER)
4356 return 0;
4357 if (ifp->if_format != XFS_DINODE_FMT_BTREE)
4358 return 1;
4359 return be16_to_cpu(ifp->if_broot->bb_level) + 1;
4360 }
4361
4362 /*
4363 * Log whatever the flags say, even if error. Otherwise we might miss detecting
4364 * a case where the data is changed, there's an error, and it's not logged so we
4365 * don't shutdown when we should. Don't bother logging extents/btree changes if
4366 * we converted to the other format.
4367 */
4368 static void
xfs_bmapi_finish(struct xfs_bmalloca * bma,int whichfork,int error)4369 xfs_bmapi_finish(
4370 struct xfs_bmalloca *bma,
4371 int whichfork,
4372 int error)
4373 {
4374 struct xfs_ifork *ifp = xfs_ifork_ptr(bma->ip, whichfork);
4375
4376 if ((bma->logflags & xfs_ilog_fext(whichfork)) &&
4377 ifp->if_format != XFS_DINODE_FMT_EXTENTS)
4378 bma->logflags &= ~xfs_ilog_fext(whichfork);
4379 else if ((bma->logflags & xfs_ilog_fbroot(whichfork)) &&
4380 ifp->if_format != XFS_DINODE_FMT_BTREE)
4381 bma->logflags &= ~xfs_ilog_fbroot(whichfork);
4382
4383 if (bma->logflags)
4384 xfs_trans_log_inode(bma->tp, bma->ip, bma->logflags);
4385 if (bma->cur)
4386 xfs_btree_del_cursor(bma->cur, error);
4387 }
4388
4389 /*
4390 * Map file blocks to filesystem blocks, and allocate blocks or convert the
4391 * extent state if necessary. Details behaviour is controlled by the flags
4392 * parameter. Only allocates blocks from a single allocation group, to avoid
4393 * locking problems.
4394 *
4395 * Returns 0 on success and places the extent mappings in mval. nmaps is used
4396 * as an input/output parameter where the caller specifies the maximum number
4397 * of mappings that may be returned and xfs_bmapi_write passes back the number
4398 * of mappings (including existing mappings) it found.
4399 *
4400 * Returns a negative error code on failure, including -ENOSPC when it could not
4401 * allocate any blocks and -ENOSR when it did allocate blocks to convert a
4402 * delalloc range, but those blocks were before the passed in range.
4403 */
4404 int
xfs_bmapi_write(struct xfs_trans * tp,struct xfs_inode * ip,xfs_fileoff_t bno,xfs_filblks_t len,uint32_t flags,xfs_extlen_t total,struct xfs_bmbt_irec * mval,int * nmap)4405 xfs_bmapi_write(
4406 struct xfs_trans *tp, /* transaction pointer */
4407 struct xfs_inode *ip, /* incore inode */
4408 xfs_fileoff_t bno, /* starting file offs. mapped */
4409 xfs_filblks_t len, /* length to map in file */
4410 uint32_t flags, /* XFS_BMAPI_... */
4411 xfs_extlen_t total, /* total blocks needed */
4412 struct xfs_bmbt_irec *mval, /* output: map values */
4413 int *nmap) /* i/o: mval size/count */
4414 {
4415 struct xfs_bmalloca bma = {
4416 .tp = tp,
4417 .ip = ip,
4418 .total = total,
4419 };
4420 struct xfs_mount *mp = ip->i_mount;
4421 int whichfork = xfs_bmapi_whichfork(flags);
4422 struct xfs_ifork *ifp = xfs_ifork_ptr(ip, whichfork);
4423 xfs_fileoff_t end; /* end of mapped file region */
4424 bool eof = false; /* after the end of extents */
4425 int error; /* error return */
4426 int n; /* current extent index */
4427 xfs_fileoff_t obno; /* old block number (offset) */
4428
4429 #ifdef DEBUG
4430 xfs_fileoff_t orig_bno; /* original block number value */
4431 int orig_flags; /* original flags arg value */
4432 xfs_filblks_t orig_len; /* original value of len arg */
4433 struct xfs_bmbt_irec *orig_mval; /* original value of mval */
4434 int orig_nmap; /* original value of *nmap */
4435
4436 orig_bno = bno;
4437 orig_len = len;
4438 orig_flags = flags;
4439 orig_mval = mval;
4440 orig_nmap = *nmap;
4441 #endif
4442
4443 ASSERT(*nmap >= 1);
4444 ASSERT(*nmap <= XFS_BMAP_MAX_NMAP);
4445 ASSERT(tp != NULL);
4446 ASSERT(len > 0);
4447 ASSERT(ifp->if_format != XFS_DINODE_FMT_LOCAL);
4448 xfs_assert_ilocked(ip, XFS_ILOCK_EXCL);
4449 ASSERT(!(flags & XFS_BMAPI_REMAP));
4450
4451 /* zeroing is for currently only for data extents, not metadata */
4452 ASSERT((flags & (XFS_BMAPI_METADATA | XFS_BMAPI_ZERO)) !=
4453 (XFS_BMAPI_METADATA | XFS_BMAPI_ZERO));
4454 /*
4455 * we can allocate unwritten extents or pre-zero allocated blocks,
4456 * but it makes no sense to do both at once. This would result in
4457 * zeroing the unwritten extent twice, but it still being an
4458 * unwritten extent....
4459 */
4460 ASSERT((flags & (XFS_BMAPI_PREALLOC | XFS_BMAPI_ZERO)) !=
4461 (XFS_BMAPI_PREALLOC | XFS_BMAPI_ZERO));
4462
4463 if (XFS_IS_CORRUPT(mp, !xfs_ifork_has_extents(ifp)) ||
4464 XFS_TEST_ERROR(false, mp, XFS_ERRTAG_BMAPIFORMAT)) {
4465 xfs_bmap_mark_sick(ip, whichfork);
4466 return -EFSCORRUPTED;
4467 }
4468
4469 if (xfs_is_shutdown(mp))
4470 return -EIO;
4471
4472 XFS_STATS_INC(mp, xs_blk_mapw);
4473
4474 error = xfs_iread_extents(tp, ip, whichfork);
4475 if (error)
4476 goto error0;
4477
4478 if (!xfs_iext_lookup_extent(ip, ifp, bno, &bma.icur, &bma.got))
4479 eof = true;
4480 if (!xfs_iext_peek_prev_extent(ifp, &bma.icur, &bma.prev))
4481 bma.prev.br_startoff = NULLFILEOFF;
4482 bma.minleft = xfs_bmapi_minleft(tp, ip, whichfork);
4483
4484 n = 0;
4485 end = bno + len;
4486 obno = bno;
4487 while (bno < end && n < *nmap) {
4488 bool need_alloc = false, wasdelay = false;
4489
4490 /* in hole or beyond EOF? */
4491 if (eof || bma.got.br_startoff > bno) {
4492 /*
4493 * CoW fork conversions should /never/ hit EOF or
4494 * holes. There should always be something for us
4495 * to work on.
4496 */
4497 ASSERT(!((flags & XFS_BMAPI_CONVERT) &&
4498 (flags & XFS_BMAPI_COWFORK)));
4499
4500 need_alloc = true;
4501 } else if (isnullstartblock(bma.got.br_startblock)) {
4502 wasdelay = true;
4503 }
4504
4505 /*
4506 * First, deal with the hole before the allocated space
4507 * that we found, if any.
4508 */
4509 if (need_alloc || wasdelay) {
4510 bma.eof = eof;
4511 bma.conv = !!(flags & XFS_BMAPI_CONVERT);
4512 bma.wasdel = wasdelay;
4513 bma.offset = bno;
4514 bma.flags = flags;
4515
4516 /*
4517 * There's a 32/64 bit type mismatch between the
4518 * allocation length request (which can be 64 bits in
4519 * length) and the bma length request, which is
4520 * xfs_extlen_t and therefore 32 bits. Hence we have to
4521 * be careful and do the min() using the larger type to
4522 * avoid overflows.
4523 */
4524 bma.length = XFS_FILBLKS_MIN(len, XFS_MAX_BMBT_EXTLEN);
4525
4526 if (wasdelay) {
4527 bma.length = XFS_FILBLKS_MIN(bma.length,
4528 bma.got.br_blockcount -
4529 (bno - bma.got.br_startoff));
4530 } else {
4531 if (!eof)
4532 bma.length = XFS_FILBLKS_MIN(bma.length,
4533 bma.got.br_startoff - bno);
4534 }
4535
4536 ASSERT(bma.length > 0);
4537 error = xfs_bmapi_allocate(&bma);
4538 if (error) {
4539 /*
4540 * If we already allocated space in a previous
4541 * iteration return what we go so far when
4542 * running out of space.
4543 */
4544 if (error == -ENOSPC && bma.nallocs)
4545 break;
4546 goto error0;
4547 }
4548
4549 /*
4550 * If this is a CoW allocation, record the data in
4551 * the refcount btree for orphan recovery.
4552 */
4553 if (whichfork == XFS_COW_FORK)
4554 xfs_refcount_alloc_cow_extent(tp, bma.blkno,
4555 bma.length);
4556 }
4557
4558 /* Deal with the allocated space we found. */
4559 xfs_bmapi_trim_map(mval, &bma.got, &bno, len, obno,
4560 end, n, flags);
4561
4562 /* Execute unwritten extent conversion if necessary */
4563 error = xfs_bmapi_convert_unwritten(&bma, mval, len, flags);
4564 if (error == -EAGAIN)
4565 continue;
4566 if (error)
4567 goto error0;
4568
4569 /* update the extent map to return */
4570 xfs_bmapi_update_map(&mval, &bno, &len, obno, end, &n, flags);
4571
4572 /*
4573 * If we're done, stop now. Stop when we've allocated
4574 * XFS_BMAP_MAX_NMAP extents no matter what. Otherwise
4575 * the transaction may get too big.
4576 */
4577 if (bno >= end || n >= *nmap || bma.nallocs >= *nmap)
4578 break;
4579
4580 /* Else go on to the next record. */
4581 bma.prev = bma.got;
4582 if (!xfs_iext_next_extent(ifp, &bma.icur, &bma.got))
4583 eof = true;
4584 }
4585
4586 error = xfs_bmap_btree_to_extents(tp, ip, bma.cur, &bma.logflags,
4587 whichfork);
4588 if (error)
4589 goto error0;
4590
4591 ASSERT(ifp->if_format != XFS_DINODE_FMT_BTREE ||
4592 ifp->if_nextents > XFS_IFORK_MAXEXT(ip, whichfork));
4593 xfs_bmapi_finish(&bma, whichfork, 0);
4594 xfs_bmap_validate_ret(orig_bno, orig_len, orig_flags, orig_mval,
4595 orig_nmap, n);
4596
4597 /*
4598 * When converting delayed allocations, xfs_bmapi_allocate ignores
4599 * the passed in bno and always converts from the start of the found
4600 * delalloc extent.
4601 *
4602 * To avoid a successful return with *nmap set to 0, return the magic
4603 * -ENOSR error code for this particular case so that the caller can
4604 * handle it.
4605 */
4606 if (!n) {
4607 ASSERT(bma.nallocs >= *nmap);
4608 return -ENOSR;
4609 }
4610 *nmap = n;
4611 return 0;
4612 error0:
4613 xfs_bmapi_finish(&bma, whichfork, error);
4614 return error;
4615 }
4616
4617 /*
4618 * Convert an existing delalloc extent to real blocks based on file offset. This
4619 * attempts to allocate the entire delalloc extent and may require multiple
4620 * invocations to allocate the target offset if a large enough physical extent
4621 * is not available.
4622 */
4623 static int
xfs_bmapi_convert_one_delalloc(struct xfs_inode * ip,int whichfork,xfs_off_t offset,struct iomap * iomap,unsigned int * seq)4624 xfs_bmapi_convert_one_delalloc(
4625 struct xfs_inode *ip,
4626 int whichfork,
4627 xfs_off_t offset,
4628 struct iomap *iomap,
4629 unsigned int *seq)
4630 {
4631 struct xfs_ifork *ifp = xfs_ifork_ptr(ip, whichfork);
4632 struct xfs_mount *mp = ip->i_mount;
4633 xfs_fileoff_t offset_fsb = XFS_B_TO_FSBT(mp, offset);
4634 struct xfs_bmalloca bma = { NULL };
4635 uint16_t flags = 0;
4636 struct xfs_trans *tp;
4637 int error;
4638
4639 if (whichfork == XFS_COW_FORK)
4640 flags |= IOMAP_F_SHARED;
4641
4642 /*
4643 * Space for the extent and indirect blocks was reserved when the
4644 * delalloc extent was created so there's no need to do so here.
4645 */
4646 error = xfs_trans_alloc(mp, &M_RES(mp)->tr_write, 0, 0,
4647 XFS_TRANS_RESERVE, &tp);
4648 if (error)
4649 return error;
4650
4651 xfs_ilock(ip, XFS_ILOCK_EXCL);
4652 xfs_trans_ijoin(tp, ip, 0);
4653
4654 error = xfs_iext_count_extend(tp, ip, whichfork,
4655 XFS_IEXT_ADD_NOSPLIT_CNT);
4656 if (error)
4657 goto out_trans_cancel;
4658
4659 if (!xfs_iext_lookup_extent(ip, ifp, offset_fsb, &bma.icur, &bma.got) ||
4660 bma.got.br_startoff > offset_fsb) {
4661 /*
4662 * No extent found in the range we are trying to convert. This
4663 * should only happen for the COW fork, where another thread
4664 * might have moved the extent to the data fork in the meantime.
4665 */
4666 WARN_ON_ONCE(whichfork != XFS_COW_FORK);
4667 error = -EAGAIN;
4668 goto out_trans_cancel;
4669 }
4670
4671 /*
4672 * If we find a real extent here we raced with another thread converting
4673 * the extent. Just return the real extent at this offset.
4674 */
4675 if (!isnullstartblock(bma.got.br_startblock)) {
4676 xfs_bmbt_to_iomap(ip, iomap, &bma.got, 0, flags,
4677 xfs_iomap_inode_sequence(ip, flags));
4678 if (seq)
4679 *seq = READ_ONCE(ifp->if_seq);
4680 goto out_trans_cancel;
4681 }
4682
4683 bma.tp = tp;
4684 bma.ip = ip;
4685 bma.wasdel = true;
4686 bma.minleft = xfs_bmapi_minleft(tp, ip, whichfork);
4687
4688 /*
4689 * Always allocate convert from the start of the delalloc extent even if
4690 * that is outside the passed in range to create large contiguous
4691 * extents on disk.
4692 */
4693 bma.offset = bma.got.br_startoff;
4694 bma.length = bma.got.br_blockcount;
4695
4696 /*
4697 * When we're converting the delalloc reservations backing dirty pages
4698 * in the page cache, we must be careful about how we create the new
4699 * extents:
4700 *
4701 * New CoW fork extents are created unwritten, turned into real extents
4702 * when we're about to write the data to disk, and mapped into the data
4703 * fork after the write finishes. End of story.
4704 *
4705 * New data fork extents must be mapped in as unwritten and converted
4706 * to real extents after the write succeeds to avoid exposing stale
4707 * disk contents if we crash.
4708 */
4709 bma.flags = XFS_BMAPI_PREALLOC;
4710 if (whichfork == XFS_COW_FORK)
4711 bma.flags |= XFS_BMAPI_COWFORK;
4712
4713 if (!xfs_iext_peek_prev_extent(ifp, &bma.icur, &bma.prev))
4714 bma.prev.br_startoff = NULLFILEOFF;
4715
4716 error = xfs_bmapi_allocate(&bma);
4717 if (error)
4718 goto out_finish;
4719
4720 XFS_STATS_ADD(mp, xs_xstrat_bytes, XFS_FSB_TO_B(mp, bma.length));
4721 XFS_STATS_INC(mp, xs_xstrat_quick);
4722
4723 ASSERT(!isnullstartblock(bma.got.br_startblock));
4724 xfs_bmbt_to_iomap(ip, iomap, &bma.got, 0, flags,
4725 xfs_iomap_inode_sequence(ip, flags));
4726 if (seq)
4727 *seq = READ_ONCE(ifp->if_seq);
4728
4729 if (whichfork == XFS_COW_FORK)
4730 xfs_refcount_alloc_cow_extent(tp, bma.blkno, bma.length);
4731
4732 error = xfs_bmap_btree_to_extents(tp, ip, bma.cur, &bma.logflags,
4733 whichfork);
4734 if (error)
4735 goto out_finish;
4736
4737 xfs_bmapi_finish(&bma, whichfork, 0);
4738 error = xfs_trans_commit(tp);
4739 xfs_iunlock(ip, XFS_ILOCK_EXCL);
4740 return error;
4741
4742 out_finish:
4743 xfs_bmapi_finish(&bma, whichfork, error);
4744 out_trans_cancel:
4745 xfs_trans_cancel(tp);
4746 xfs_iunlock(ip, XFS_ILOCK_EXCL);
4747 return error;
4748 }
4749
4750 /*
4751 * Pass in a dellalloc extent and convert it to real extents, return the real
4752 * extent that maps offset_fsb in iomap.
4753 */
4754 int
xfs_bmapi_convert_delalloc(struct xfs_inode * ip,int whichfork,loff_t offset,struct iomap * iomap,unsigned int * seq)4755 xfs_bmapi_convert_delalloc(
4756 struct xfs_inode *ip,
4757 int whichfork,
4758 loff_t offset,
4759 struct iomap *iomap,
4760 unsigned int *seq)
4761 {
4762 int error;
4763
4764 /*
4765 * Attempt to allocate whatever delalloc extent currently backs offset
4766 * and put the result into iomap. Allocate in a loop because it may
4767 * take several attempts to allocate real blocks for a contiguous
4768 * delalloc extent if free space is sufficiently fragmented.
4769 */
4770 do {
4771 error = xfs_bmapi_convert_one_delalloc(ip, whichfork, offset,
4772 iomap, seq);
4773 if (error)
4774 return error;
4775 } while (iomap->offset + iomap->length <= offset);
4776
4777 return 0;
4778 }
4779
4780 int
xfs_bmapi_remap(struct xfs_trans * tp,struct xfs_inode * ip,xfs_fileoff_t bno,xfs_filblks_t len,xfs_fsblock_t startblock,uint32_t flags)4781 xfs_bmapi_remap(
4782 struct xfs_trans *tp,
4783 struct xfs_inode *ip,
4784 xfs_fileoff_t bno,
4785 xfs_filblks_t len,
4786 xfs_fsblock_t startblock,
4787 uint32_t flags)
4788 {
4789 struct xfs_mount *mp = ip->i_mount;
4790 struct xfs_ifork *ifp;
4791 struct xfs_btree_cur *cur = NULL;
4792 struct xfs_bmbt_irec got;
4793 struct xfs_iext_cursor icur;
4794 int whichfork = xfs_bmapi_whichfork(flags);
4795 int logflags = 0, error;
4796
4797 ifp = xfs_ifork_ptr(ip, whichfork);
4798 ASSERT(len > 0);
4799 ASSERT(len <= (xfs_filblks_t)XFS_MAX_BMBT_EXTLEN);
4800 xfs_assert_ilocked(ip, XFS_ILOCK_EXCL);
4801 ASSERT(!(flags & ~(XFS_BMAPI_ATTRFORK | XFS_BMAPI_PREALLOC |
4802 XFS_BMAPI_NORMAP)));
4803 ASSERT((flags & (XFS_BMAPI_ATTRFORK | XFS_BMAPI_PREALLOC)) !=
4804 (XFS_BMAPI_ATTRFORK | XFS_BMAPI_PREALLOC));
4805
4806 if (XFS_IS_CORRUPT(mp, !xfs_ifork_has_extents(ifp)) ||
4807 XFS_TEST_ERROR(false, mp, XFS_ERRTAG_BMAPIFORMAT)) {
4808 xfs_bmap_mark_sick(ip, whichfork);
4809 return -EFSCORRUPTED;
4810 }
4811
4812 if (xfs_is_shutdown(mp))
4813 return -EIO;
4814
4815 error = xfs_iread_extents(tp, ip, whichfork);
4816 if (error)
4817 return error;
4818
4819 if (xfs_iext_lookup_extent(ip, ifp, bno, &icur, &got)) {
4820 /* make sure we only reflink into a hole. */
4821 ASSERT(got.br_startoff > bno);
4822 ASSERT(got.br_startoff - bno >= len);
4823 }
4824
4825 ip->i_nblocks += len;
4826 xfs_trans_log_inode(tp, ip, XFS_ILOG_CORE);
4827
4828 if (ifp->if_format == XFS_DINODE_FMT_BTREE)
4829 cur = xfs_bmbt_init_cursor(mp, tp, ip, whichfork);
4830
4831 got.br_startoff = bno;
4832 got.br_startblock = startblock;
4833 got.br_blockcount = len;
4834 if (flags & XFS_BMAPI_PREALLOC)
4835 got.br_state = XFS_EXT_UNWRITTEN;
4836 else
4837 got.br_state = XFS_EXT_NORM;
4838
4839 error = xfs_bmap_add_extent_hole_real(tp, ip, whichfork, &icur,
4840 &cur, &got, &logflags, flags);
4841 if (error)
4842 goto error0;
4843
4844 error = xfs_bmap_btree_to_extents(tp, ip, cur, &logflags, whichfork);
4845
4846 error0:
4847 if (ip->i_df.if_format != XFS_DINODE_FMT_EXTENTS)
4848 logflags &= ~XFS_ILOG_DEXT;
4849 else if (ip->i_df.if_format != XFS_DINODE_FMT_BTREE)
4850 logflags &= ~XFS_ILOG_DBROOT;
4851
4852 if (logflags)
4853 xfs_trans_log_inode(tp, ip, logflags);
4854 if (cur)
4855 xfs_btree_del_cursor(cur, error);
4856 return error;
4857 }
4858
4859 /*
4860 * When a delalloc extent is split (e.g., due to a hole punch), the original
4861 * indlen reservation must be shared across the two new extents that are left
4862 * behind.
4863 *
4864 * Given the original reservation and the worst case indlen for the two new
4865 * extents (as calculated by xfs_bmap_worst_indlen()), split the original
4866 * reservation fairly across the two new extents. If necessary, steal available
4867 * blocks from a deleted extent to make up a reservation deficiency (e.g., if
4868 * ores == 1). The number of stolen blocks is returned. The availability and
4869 * subsequent accounting of stolen blocks is the responsibility of the caller.
4870 */
4871 static void
xfs_bmap_split_indlen(xfs_filblks_t ores,xfs_filblks_t * indlen1,xfs_filblks_t * indlen2)4872 xfs_bmap_split_indlen(
4873 xfs_filblks_t ores, /* original res. */
4874 xfs_filblks_t *indlen1, /* ext1 worst indlen */
4875 xfs_filblks_t *indlen2) /* ext2 worst indlen */
4876 {
4877 xfs_filblks_t len1 = *indlen1;
4878 xfs_filblks_t len2 = *indlen2;
4879 xfs_filblks_t nres = len1 + len2; /* new total res. */
4880 xfs_filblks_t resfactor;
4881
4882 /*
4883 * We can't meet the total required reservation for the two extents.
4884 * Calculate the percent of the overall shortage between both extents
4885 * and apply this percentage to each of the requested indlen values.
4886 * This distributes the shortage fairly and reduces the chances that one
4887 * of the two extents is left with nothing when extents are repeatedly
4888 * split.
4889 */
4890 resfactor = (ores * 100);
4891 do_div(resfactor, nres);
4892 len1 *= resfactor;
4893 do_div(len1, 100);
4894 len2 *= resfactor;
4895 do_div(len2, 100);
4896 ASSERT(len1 + len2 <= ores);
4897 ASSERT(len1 < *indlen1 && len2 < *indlen2);
4898
4899 /*
4900 * Hand out the remainder to each extent. If one of the two reservations
4901 * is zero, we want to make sure that one gets a block first. The loop
4902 * below starts with len1, so hand len2 a block right off the bat if it
4903 * is zero.
4904 */
4905 ores -= (len1 + len2);
4906 ASSERT((*indlen1 - len1) + (*indlen2 - len2) >= ores);
4907 if (ores && !len2 && *indlen2) {
4908 len2++;
4909 ores--;
4910 }
4911 while (ores) {
4912 if (len1 < *indlen1) {
4913 len1++;
4914 ores--;
4915 }
4916 if (!ores)
4917 break;
4918 if (len2 < *indlen2) {
4919 len2++;
4920 ores--;
4921 }
4922 }
4923
4924 *indlen1 = len1;
4925 *indlen2 = len2;
4926 }
4927
4928 void
xfs_bmap_del_extent_delay(struct xfs_inode * ip,int whichfork,struct xfs_iext_cursor * icur,struct xfs_bmbt_irec * got,struct xfs_bmbt_irec * del)4929 xfs_bmap_del_extent_delay(
4930 struct xfs_inode *ip,
4931 int whichfork,
4932 struct xfs_iext_cursor *icur,
4933 struct xfs_bmbt_irec *got,
4934 struct xfs_bmbt_irec *del)
4935 {
4936 struct xfs_mount *mp = ip->i_mount;
4937 struct xfs_ifork *ifp = xfs_ifork_ptr(ip, whichfork);
4938 struct xfs_bmbt_irec new;
4939 int64_t da_old, da_new, da_diff = 0;
4940 xfs_fileoff_t del_endoff, got_endoff;
4941 xfs_filblks_t got_indlen, new_indlen, stolen = 0;
4942 uint32_t state = xfs_bmap_fork_to_state(whichfork);
4943 uint64_t fdblocks;
4944 bool isrt;
4945
4946 XFS_STATS_INC(mp, xs_del_exlist);
4947
4948 isrt = xfs_ifork_is_realtime(ip, whichfork);
4949 del_endoff = del->br_startoff + del->br_blockcount;
4950 got_endoff = got->br_startoff + got->br_blockcount;
4951 da_old = startblockval(got->br_startblock);
4952 da_new = 0;
4953
4954 ASSERT(del->br_blockcount > 0);
4955 ASSERT(got->br_startoff <= del->br_startoff);
4956 ASSERT(got_endoff >= del_endoff);
4957
4958 /*
4959 * Update the inode delalloc counter now and wait to update the
4960 * sb counters as we might have to borrow some blocks for the
4961 * indirect block accounting.
4962 */
4963 xfs_quota_unreserve_blkres(ip, del->br_blockcount);
4964 ip->i_delayed_blks -= del->br_blockcount;
4965
4966 if (got->br_startoff == del->br_startoff)
4967 state |= BMAP_LEFT_FILLING;
4968 if (got_endoff == del_endoff)
4969 state |= BMAP_RIGHT_FILLING;
4970
4971 switch (state & (BMAP_LEFT_FILLING | BMAP_RIGHT_FILLING)) {
4972 case BMAP_LEFT_FILLING | BMAP_RIGHT_FILLING:
4973 /*
4974 * Matches the whole extent. Delete the entry.
4975 */
4976 xfs_iext_remove(ip, icur, state);
4977 xfs_iext_prev(ifp, icur);
4978 break;
4979 case BMAP_LEFT_FILLING:
4980 /*
4981 * Deleting the first part of the extent.
4982 */
4983 got->br_startoff = del_endoff;
4984 got->br_blockcount -= del->br_blockcount;
4985 da_new = XFS_FILBLKS_MIN(xfs_bmap_worst_indlen(ip,
4986 got->br_blockcount), da_old);
4987 got->br_startblock = nullstartblock((int)da_new);
4988 xfs_iext_update_extent(ip, state, icur, got);
4989 break;
4990 case BMAP_RIGHT_FILLING:
4991 /*
4992 * Deleting the last part of the extent.
4993 */
4994 got->br_blockcount = got->br_blockcount - del->br_blockcount;
4995 da_new = XFS_FILBLKS_MIN(xfs_bmap_worst_indlen(ip,
4996 got->br_blockcount), da_old);
4997 got->br_startblock = nullstartblock((int)da_new);
4998 xfs_iext_update_extent(ip, state, icur, got);
4999 break;
5000 case 0:
5001 /*
5002 * Deleting the middle of the extent.
5003 *
5004 * Distribute the original indlen reservation across the two new
5005 * extents. Steal blocks from the deleted extent if necessary.
5006 * Stealing blocks simply fudges the fdblocks accounting below.
5007 * Warn if either of the new indlen reservations is zero as this
5008 * can lead to delalloc problems.
5009 */
5010 got->br_blockcount = del->br_startoff - got->br_startoff;
5011 got_indlen = xfs_bmap_worst_indlen(ip, got->br_blockcount);
5012
5013 new.br_blockcount = got_endoff - del_endoff;
5014 new_indlen = xfs_bmap_worst_indlen(ip, new.br_blockcount);
5015
5016 WARN_ON_ONCE(!got_indlen || !new_indlen);
5017 /*
5018 * Steal as many blocks as we can to try and satisfy the worst
5019 * case indlen for both new extents.
5020 *
5021 * However, we can't just steal reservations from the data
5022 * blocks if this is an RT inodes as the data and metadata
5023 * blocks come from different pools. We'll have to live with
5024 * under-filled indirect reservation in this case.
5025 */
5026 da_new = got_indlen + new_indlen;
5027 if (da_new > da_old && !isrt) {
5028 stolen = XFS_FILBLKS_MIN(da_new - da_old,
5029 del->br_blockcount);
5030 da_old += stolen;
5031 }
5032 if (da_new > da_old)
5033 xfs_bmap_split_indlen(da_old, &got_indlen, &new_indlen);
5034 da_new = got_indlen + new_indlen;
5035
5036 got->br_startblock = nullstartblock((int)got_indlen);
5037
5038 new.br_startoff = del_endoff;
5039 new.br_state = got->br_state;
5040 new.br_startblock = nullstartblock((int)new_indlen);
5041
5042 xfs_iext_update_extent(ip, state, icur, got);
5043 xfs_iext_next(ifp, icur);
5044 xfs_iext_insert(ip, icur, &new, state);
5045
5046 del->br_blockcount -= stolen;
5047 break;
5048 }
5049
5050 ASSERT(da_old >= da_new);
5051 da_diff = da_old - da_new;
5052 fdblocks = da_diff;
5053
5054 if (isrt)
5055 xfs_add_frextents(mp, xfs_rtb_to_rtx(mp, del->br_blockcount));
5056 else
5057 fdblocks += del->br_blockcount;
5058
5059 xfs_add_fdblocks(mp, fdblocks);
5060 xfs_mod_delalloc(ip, -(int64_t)del->br_blockcount, -da_diff);
5061 }
5062
5063 void
xfs_bmap_del_extent_cow(struct xfs_inode * ip,struct xfs_iext_cursor * icur,struct xfs_bmbt_irec * got,struct xfs_bmbt_irec * del)5064 xfs_bmap_del_extent_cow(
5065 struct xfs_inode *ip,
5066 struct xfs_iext_cursor *icur,
5067 struct xfs_bmbt_irec *got,
5068 struct xfs_bmbt_irec *del)
5069 {
5070 struct xfs_mount *mp = ip->i_mount;
5071 struct xfs_ifork *ifp = xfs_ifork_ptr(ip, XFS_COW_FORK);
5072 struct xfs_bmbt_irec new;
5073 xfs_fileoff_t del_endoff, got_endoff;
5074 uint32_t state = BMAP_COWFORK;
5075
5076 XFS_STATS_INC(mp, xs_del_exlist);
5077
5078 del_endoff = del->br_startoff + del->br_blockcount;
5079 got_endoff = got->br_startoff + got->br_blockcount;
5080
5081 ASSERT(del->br_blockcount > 0);
5082 ASSERT(got->br_startoff <= del->br_startoff);
5083 ASSERT(got_endoff >= del_endoff);
5084 ASSERT(!isnullstartblock(got->br_startblock));
5085
5086 if (got->br_startoff == del->br_startoff)
5087 state |= BMAP_LEFT_FILLING;
5088 if (got_endoff == del_endoff)
5089 state |= BMAP_RIGHT_FILLING;
5090
5091 switch (state & (BMAP_LEFT_FILLING | BMAP_RIGHT_FILLING)) {
5092 case BMAP_LEFT_FILLING | BMAP_RIGHT_FILLING:
5093 /*
5094 * Matches the whole extent. Delete the entry.
5095 */
5096 xfs_iext_remove(ip, icur, state);
5097 xfs_iext_prev(ifp, icur);
5098 break;
5099 case BMAP_LEFT_FILLING:
5100 /*
5101 * Deleting the first part of the extent.
5102 */
5103 got->br_startoff = del_endoff;
5104 got->br_blockcount -= del->br_blockcount;
5105 got->br_startblock = del->br_startblock + del->br_blockcount;
5106 xfs_iext_update_extent(ip, state, icur, got);
5107 break;
5108 case BMAP_RIGHT_FILLING:
5109 /*
5110 * Deleting the last part of the extent.
5111 */
5112 got->br_blockcount -= del->br_blockcount;
5113 xfs_iext_update_extent(ip, state, icur, got);
5114 break;
5115 case 0:
5116 /*
5117 * Deleting the middle of the extent.
5118 */
5119 got->br_blockcount = del->br_startoff - got->br_startoff;
5120
5121 new.br_startoff = del_endoff;
5122 new.br_blockcount = got_endoff - del_endoff;
5123 new.br_state = got->br_state;
5124 new.br_startblock = del->br_startblock + del->br_blockcount;
5125
5126 xfs_iext_update_extent(ip, state, icur, got);
5127 xfs_iext_next(ifp, icur);
5128 xfs_iext_insert(ip, icur, &new, state);
5129 break;
5130 }
5131 ip->i_delayed_blks -= del->br_blockcount;
5132 }
5133
5134 /*
5135 * Called by xfs_bmapi to update file extent records and the btree
5136 * after removing space.
5137 */
5138 STATIC int /* error */
xfs_bmap_del_extent_real(xfs_inode_t * ip,xfs_trans_t * tp,struct xfs_iext_cursor * icur,struct xfs_btree_cur * cur,xfs_bmbt_irec_t * del,int * logflagsp,int whichfork,uint32_t bflags)5139 xfs_bmap_del_extent_real(
5140 xfs_inode_t *ip, /* incore inode pointer */
5141 xfs_trans_t *tp, /* current transaction pointer */
5142 struct xfs_iext_cursor *icur,
5143 struct xfs_btree_cur *cur, /* if null, not a btree */
5144 xfs_bmbt_irec_t *del, /* data to remove from extents */
5145 int *logflagsp, /* inode logging flags */
5146 int whichfork, /* data or attr fork */
5147 uint32_t bflags) /* bmapi flags */
5148 {
5149 xfs_fsblock_t del_endblock=0; /* first block past del */
5150 xfs_fileoff_t del_endoff; /* first offset past del */
5151 int error = 0; /* error return value */
5152 struct xfs_bmbt_irec got; /* current extent entry */
5153 xfs_fileoff_t got_endoff; /* first offset past got */
5154 int i; /* temp state */
5155 struct xfs_ifork *ifp; /* inode fork pointer */
5156 xfs_mount_t *mp; /* mount structure */
5157 xfs_filblks_t nblks; /* quota/sb block count */
5158 xfs_bmbt_irec_t new; /* new record to be inserted */
5159 /* REFERENCED */
5160 uint qfield; /* quota field to update */
5161 uint32_t state = xfs_bmap_fork_to_state(whichfork);
5162 struct xfs_bmbt_irec old;
5163
5164 *logflagsp = 0;
5165
5166 mp = ip->i_mount;
5167 XFS_STATS_INC(mp, xs_del_exlist);
5168
5169 ifp = xfs_ifork_ptr(ip, whichfork);
5170 ASSERT(del->br_blockcount > 0);
5171 xfs_iext_get_extent(ifp, icur, &got);
5172 ASSERT(got.br_startoff <= del->br_startoff);
5173 del_endoff = del->br_startoff + del->br_blockcount;
5174 got_endoff = got.br_startoff + got.br_blockcount;
5175 ASSERT(got_endoff >= del_endoff);
5176 ASSERT(!isnullstartblock(got.br_startblock));
5177 qfield = 0;
5178
5179 /*
5180 * If it's the case where the directory code is running with no block
5181 * reservation, and the deleted block is in the middle of its extent,
5182 * and the resulting insert of an extent would cause transformation to
5183 * btree format, then reject it. The calling code will then swap blocks
5184 * around instead. We have to do this now, rather than waiting for the
5185 * conversion to btree format, since the transaction will be dirty then.
5186 */
5187 if (tp->t_blk_res == 0 &&
5188 ifp->if_format == XFS_DINODE_FMT_EXTENTS &&
5189 ifp->if_nextents >= XFS_IFORK_MAXEXT(ip, whichfork) &&
5190 del->br_startoff > got.br_startoff && del_endoff < got_endoff)
5191 return -ENOSPC;
5192
5193 *logflagsp = XFS_ILOG_CORE;
5194 if (xfs_ifork_is_realtime(ip, whichfork))
5195 qfield = XFS_TRANS_DQ_RTBCOUNT;
5196 else
5197 qfield = XFS_TRANS_DQ_BCOUNT;
5198 nblks = del->br_blockcount;
5199
5200 del_endblock = del->br_startblock + del->br_blockcount;
5201 if (cur) {
5202 error = xfs_bmbt_lookup_eq(cur, &got, &i);
5203 if (error)
5204 return error;
5205 if (XFS_IS_CORRUPT(mp, i != 1)) {
5206 xfs_btree_mark_sick(cur);
5207 return -EFSCORRUPTED;
5208 }
5209 }
5210
5211 if (got.br_startoff == del->br_startoff)
5212 state |= BMAP_LEFT_FILLING;
5213 if (got_endoff == del_endoff)
5214 state |= BMAP_RIGHT_FILLING;
5215
5216 switch (state & (BMAP_LEFT_FILLING | BMAP_RIGHT_FILLING)) {
5217 case BMAP_LEFT_FILLING | BMAP_RIGHT_FILLING:
5218 /*
5219 * Matches the whole extent. Delete the entry.
5220 */
5221 xfs_iext_remove(ip, icur, state);
5222 xfs_iext_prev(ifp, icur);
5223 ifp->if_nextents--;
5224
5225 *logflagsp |= XFS_ILOG_CORE;
5226 if (!cur) {
5227 *logflagsp |= xfs_ilog_fext(whichfork);
5228 break;
5229 }
5230 if ((error = xfs_btree_delete(cur, &i)))
5231 return error;
5232 if (XFS_IS_CORRUPT(mp, i != 1)) {
5233 xfs_btree_mark_sick(cur);
5234 return -EFSCORRUPTED;
5235 }
5236 break;
5237 case BMAP_LEFT_FILLING:
5238 /*
5239 * Deleting the first part of the extent.
5240 */
5241 got.br_startoff = del_endoff;
5242 got.br_startblock = del_endblock;
5243 got.br_blockcount -= del->br_blockcount;
5244 xfs_iext_update_extent(ip, state, icur, &got);
5245 if (!cur) {
5246 *logflagsp |= xfs_ilog_fext(whichfork);
5247 break;
5248 }
5249 error = xfs_bmbt_update(cur, &got);
5250 if (error)
5251 return error;
5252 break;
5253 case BMAP_RIGHT_FILLING:
5254 /*
5255 * Deleting the last part of the extent.
5256 */
5257 got.br_blockcount -= del->br_blockcount;
5258 xfs_iext_update_extent(ip, state, icur, &got);
5259 if (!cur) {
5260 *logflagsp |= xfs_ilog_fext(whichfork);
5261 break;
5262 }
5263 error = xfs_bmbt_update(cur, &got);
5264 if (error)
5265 return error;
5266 break;
5267 case 0:
5268 /*
5269 * Deleting the middle of the extent.
5270 */
5271
5272 old = got;
5273
5274 got.br_blockcount = del->br_startoff - got.br_startoff;
5275 xfs_iext_update_extent(ip, state, icur, &got);
5276
5277 new.br_startoff = del_endoff;
5278 new.br_blockcount = got_endoff - del_endoff;
5279 new.br_state = got.br_state;
5280 new.br_startblock = del_endblock;
5281
5282 *logflagsp |= XFS_ILOG_CORE;
5283 if (cur) {
5284 error = xfs_bmbt_update(cur, &got);
5285 if (error)
5286 return error;
5287 error = xfs_btree_increment(cur, 0, &i);
5288 if (error)
5289 return error;
5290 cur->bc_rec.b = new;
5291 error = xfs_btree_insert(cur, &i);
5292 if (error && error != -ENOSPC)
5293 return error;
5294 /*
5295 * If get no-space back from btree insert, it tried a
5296 * split, and we have a zero block reservation. Fix up
5297 * our state and return the error.
5298 */
5299 if (error == -ENOSPC) {
5300 /*
5301 * Reset the cursor, don't trust it after any
5302 * insert operation.
5303 */
5304 error = xfs_bmbt_lookup_eq(cur, &got, &i);
5305 if (error)
5306 return error;
5307 if (XFS_IS_CORRUPT(mp, i != 1)) {
5308 xfs_btree_mark_sick(cur);
5309 return -EFSCORRUPTED;
5310 }
5311 /*
5312 * Update the btree record back
5313 * to the original value.
5314 */
5315 error = xfs_bmbt_update(cur, &old);
5316 if (error)
5317 return error;
5318 /*
5319 * Reset the extent record back
5320 * to the original value.
5321 */
5322 xfs_iext_update_extent(ip, state, icur, &old);
5323 *logflagsp = 0;
5324 return -ENOSPC;
5325 }
5326 if (XFS_IS_CORRUPT(mp, i != 1)) {
5327 xfs_btree_mark_sick(cur);
5328 return -EFSCORRUPTED;
5329 }
5330 } else
5331 *logflagsp |= xfs_ilog_fext(whichfork);
5332
5333 ifp->if_nextents++;
5334 xfs_iext_next(ifp, icur);
5335 xfs_iext_insert(ip, icur, &new, state);
5336 break;
5337 }
5338
5339 /* remove reverse mapping */
5340 xfs_rmap_unmap_extent(tp, ip, whichfork, del);
5341
5342 /*
5343 * If we need to, add to list of extents to delete.
5344 */
5345 if (!(bflags & XFS_BMAPI_REMAP)) {
5346 if (xfs_is_reflink_inode(ip) && whichfork == XFS_DATA_FORK) {
5347 xfs_refcount_decrease_extent(tp, del);
5348 } else if (xfs_ifork_is_realtime(ip, whichfork)) {
5349 /*
5350 * Ensure the bitmap and summary inodes are locked
5351 * and joined to the transaction before modifying them.
5352 */
5353 if (!(tp->t_flags & XFS_TRANS_RTBITMAP_LOCKED)) {
5354 tp->t_flags |= XFS_TRANS_RTBITMAP_LOCKED;
5355 xfs_rtbitmap_lock(tp, mp);
5356 }
5357 error = xfs_rtfree_blocks(tp, del->br_startblock,
5358 del->br_blockcount);
5359 } else {
5360 error = xfs_free_extent_later(tp, del->br_startblock,
5361 del->br_blockcount, NULL,
5362 XFS_AG_RESV_NONE,
5363 ((bflags & XFS_BMAPI_NODISCARD) ||
5364 del->br_state == XFS_EXT_UNWRITTEN));
5365 }
5366 if (error)
5367 return error;
5368 }
5369
5370 /*
5371 * Adjust inode # blocks in the file.
5372 */
5373 if (nblks)
5374 ip->i_nblocks -= nblks;
5375 /*
5376 * Adjust quota data.
5377 */
5378 if (qfield && !(bflags & XFS_BMAPI_REMAP))
5379 xfs_trans_mod_dquot_byino(tp, ip, qfield, (long)-nblks);
5380
5381 return 0;
5382 }
5383
5384 /*
5385 * Unmap (remove) blocks from a file.
5386 * If nexts is nonzero then the number of extents to remove is limited to
5387 * that value. If not all extents in the block range can be removed then
5388 * *done is set.
5389 */
5390 static int
__xfs_bunmapi(struct xfs_trans * tp,struct xfs_inode * ip,xfs_fileoff_t start,xfs_filblks_t * rlen,uint32_t flags,xfs_extnum_t nexts)5391 __xfs_bunmapi(
5392 struct xfs_trans *tp, /* transaction pointer */
5393 struct xfs_inode *ip, /* incore inode */
5394 xfs_fileoff_t start, /* first file offset deleted */
5395 xfs_filblks_t *rlen, /* i/o: amount remaining */
5396 uint32_t flags, /* misc flags */
5397 xfs_extnum_t nexts) /* number of extents max */
5398 {
5399 struct xfs_btree_cur *cur; /* bmap btree cursor */
5400 struct xfs_bmbt_irec del; /* extent being deleted */
5401 int error; /* error return value */
5402 xfs_extnum_t extno; /* extent number in list */
5403 struct xfs_bmbt_irec got; /* current extent record */
5404 struct xfs_ifork *ifp; /* inode fork pointer */
5405 int isrt; /* freeing in rt area */
5406 int logflags; /* transaction logging flags */
5407 xfs_extlen_t mod; /* rt extent offset */
5408 struct xfs_mount *mp = ip->i_mount;
5409 int tmp_logflags; /* partial logging flags */
5410 int wasdel; /* was a delayed alloc extent */
5411 int whichfork; /* data or attribute fork */
5412 xfs_filblks_t len = *rlen; /* length to unmap in file */
5413 xfs_fileoff_t end;
5414 struct xfs_iext_cursor icur;
5415 bool done = false;
5416
5417 trace_xfs_bunmap(ip, start, len, flags, _RET_IP_);
5418
5419 whichfork = xfs_bmapi_whichfork(flags);
5420 ASSERT(whichfork != XFS_COW_FORK);
5421 ifp = xfs_ifork_ptr(ip, whichfork);
5422 if (XFS_IS_CORRUPT(mp, !xfs_ifork_has_extents(ifp))) {
5423 xfs_bmap_mark_sick(ip, whichfork);
5424 return -EFSCORRUPTED;
5425 }
5426 if (xfs_is_shutdown(mp))
5427 return -EIO;
5428
5429 xfs_assert_ilocked(ip, XFS_ILOCK_EXCL);
5430 ASSERT(len > 0);
5431 ASSERT(nexts >= 0);
5432
5433 error = xfs_iread_extents(tp, ip, whichfork);
5434 if (error)
5435 return error;
5436
5437 if (xfs_iext_count(ifp) == 0) {
5438 *rlen = 0;
5439 return 0;
5440 }
5441 XFS_STATS_INC(mp, xs_blk_unmap);
5442 isrt = xfs_ifork_is_realtime(ip, whichfork);
5443 end = start + len;
5444
5445 if (!xfs_iext_lookup_extent_before(ip, ifp, &end, &icur, &got)) {
5446 *rlen = 0;
5447 return 0;
5448 }
5449 end--;
5450
5451 logflags = 0;
5452 if (ifp->if_format == XFS_DINODE_FMT_BTREE) {
5453 ASSERT(ifp->if_format == XFS_DINODE_FMT_BTREE);
5454 cur = xfs_bmbt_init_cursor(mp, tp, ip, whichfork);
5455 } else
5456 cur = NULL;
5457
5458 extno = 0;
5459 while (end != (xfs_fileoff_t)-1 && end >= start &&
5460 (nexts == 0 || extno < nexts)) {
5461 /*
5462 * Is the found extent after a hole in which end lives?
5463 * Just back up to the previous extent, if so.
5464 */
5465 if (got.br_startoff > end &&
5466 !xfs_iext_prev_extent(ifp, &icur, &got)) {
5467 done = true;
5468 break;
5469 }
5470 /*
5471 * Is the last block of this extent before the range
5472 * we're supposed to delete? If so, we're done.
5473 */
5474 end = XFS_FILEOFF_MIN(end,
5475 got.br_startoff + got.br_blockcount - 1);
5476 if (end < start)
5477 break;
5478 /*
5479 * Then deal with the (possibly delayed) allocated space
5480 * we found.
5481 */
5482 del = got;
5483 wasdel = isnullstartblock(del.br_startblock);
5484
5485 if (got.br_startoff < start) {
5486 del.br_startoff = start;
5487 del.br_blockcount -= start - got.br_startoff;
5488 if (!wasdel)
5489 del.br_startblock += start - got.br_startoff;
5490 }
5491 if (del.br_startoff + del.br_blockcount > end + 1)
5492 del.br_blockcount = end + 1 - del.br_startoff;
5493
5494 if (!isrt || (flags & XFS_BMAPI_REMAP))
5495 goto delete;
5496
5497 mod = xfs_rtb_to_rtxoff(mp,
5498 del.br_startblock + del.br_blockcount);
5499 if (mod) {
5500 /*
5501 * Realtime extent not lined up at the end.
5502 * The extent could have been split into written
5503 * and unwritten pieces, or we could just be
5504 * unmapping part of it. But we can't really
5505 * get rid of part of a realtime extent.
5506 */
5507 if (del.br_state == XFS_EXT_UNWRITTEN) {
5508 /*
5509 * This piece is unwritten, or we're not
5510 * using unwritten extents. Skip over it.
5511 */
5512 ASSERT((flags & XFS_BMAPI_REMAP) || end >= mod);
5513 end -= mod > del.br_blockcount ?
5514 del.br_blockcount : mod;
5515 if (end < got.br_startoff &&
5516 !xfs_iext_prev_extent(ifp, &icur, &got)) {
5517 done = true;
5518 break;
5519 }
5520 continue;
5521 }
5522 /*
5523 * It's written, turn it unwritten.
5524 * This is better than zeroing it.
5525 */
5526 ASSERT(del.br_state == XFS_EXT_NORM);
5527 ASSERT(tp->t_blk_res > 0);
5528 /*
5529 * If this spans a realtime extent boundary,
5530 * chop it back to the start of the one we end at.
5531 */
5532 if (del.br_blockcount > mod) {
5533 del.br_startoff += del.br_blockcount - mod;
5534 del.br_startblock += del.br_blockcount - mod;
5535 del.br_blockcount = mod;
5536 }
5537 del.br_state = XFS_EXT_UNWRITTEN;
5538 error = xfs_bmap_add_extent_unwritten_real(tp, ip,
5539 whichfork, &icur, &cur, &del,
5540 &logflags);
5541 if (error)
5542 goto error0;
5543 goto nodelete;
5544 }
5545
5546 mod = xfs_rtb_to_rtxoff(mp, del.br_startblock);
5547 if (mod) {
5548 xfs_extlen_t off = mp->m_sb.sb_rextsize - mod;
5549
5550 /*
5551 * Realtime extent is lined up at the end but not
5552 * at the front. We'll get rid of full extents if
5553 * we can.
5554 */
5555 if (del.br_blockcount > off) {
5556 del.br_blockcount -= off;
5557 del.br_startoff += off;
5558 del.br_startblock += off;
5559 } else if (del.br_startoff == start &&
5560 (del.br_state == XFS_EXT_UNWRITTEN ||
5561 tp->t_blk_res == 0)) {
5562 /*
5563 * Can't make it unwritten. There isn't
5564 * a full extent here so just skip it.
5565 */
5566 ASSERT(end >= del.br_blockcount);
5567 end -= del.br_blockcount;
5568 if (got.br_startoff > end &&
5569 !xfs_iext_prev_extent(ifp, &icur, &got)) {
5570 done = true;
5571 break;
5572 }
5573 continue;
5574 } else if (del.br_state == XFS_EXT_UNWRITTEN) {
5575 struct xfs_bmbt_irec prev;
5576 xfs_fileoff_t unwrite_start;
5577
5578 /*
5579 * This one is already unwritten.
5580 * It must have a written left neighbor.
5581 * Unwrite the killed part of that one and
5582 * try again.
5583 */
5584 if (!xfs_iext_prev_extent(ifp, &icur, &prev))
5585 ASSERT(0);
5586 ASSERT(prev.br_state == XFS_EXT_NORM);
5587 ASSERT(!isnullstartblock(prev.br_startblock));
5588 ASSERT(del.br_startblock ==
5589 prev.br_startblock + prev.br_blockcount);
5590 unwrite_start = max3(start,
5591 del.br_startoff - mod,
5592 prev.br_startoff);
5593 mod = unwrite_start - prev.br_startoff;
5594 prev.br_startoff = unwrite_start;
5595 prev.br_startblock += mod;
5596 prev.br_blockcount -= mod;
5597 prev.br_state = XFS_EXT_UNWRITTEN;
5598 error = xfs_bmap_add_extent_unwritten_real(tp,
5599 ip, whichfork, &icur, &cur,
5600 &prev, &logflags);
5601 if (error)
5602 goto error0;
5603 goto nodelete;
5604 } else {
5605 ASSERT(del.br_state == XFS_EXT_NORM);
5606 del.br_state = XFS_EXT_UNWRITTEN;
5607 error = xfs_bmap_add_extent_unwritten_real(tp,
5608 ip, whichfork, &icur, &cur,
5609 &del, &logflags);
5610 if (error)
5611 goto error0;
5612 goto nodelete;
5613 }
5614 }
5615
5616 delete:
5617 if (wasdel) {
5618 xfs_bmap_del_extent_delay(ip, whichfork, &icur, &got, &del);
5619 } else {
5620 error = xfs_bmap_del_extent_real(ip, tp, &icur, cur,
5621 &del, &tmp_logflags, whichfork,
5622 flags);
5623 logflags |= tmp_logflags;
5624 if (error)
5625 goto error0;
5626 }
5627
5628 end = del.br_startoff - 1;
5629 nodelete:
5630 /*
5631 * If not done go on to the next (previous) record.
5632 */
5633 if (end != (xfs_fileoff_t)-1 && end >= start) {
5634 if (!xfs_iext_get_extent(ifp, &icur, &got) ||
5635 (got.br_startoff > end &&
5636 !xfs_iext_prev_extent(ifp, &icur, &got))) {
5637 done = true;
5638 break;
5639 }
5640 extno++;
5641 }
5642 }
5643 if (done || end == (xfs_fileoff_t)-1 || end < start)
5644 *rlen = 0;
5645 else
5646 *rlen = end - start + 1;
5647
5648 /*
5649 * Convert to a btree if necessary.
5650 */
5651 if (xfs_bmap_needs_btree(ip, whichfork)) {
5652 ASSERT(cur == NULL);
5653 error = xfs_bmap_extents_to_btree(tp, ip, &cur, 0,
5654 &tmp_logflags, whichfork);
5655 logflags |= tmp_logflags;
5656 } else {
5657 error = xfs_bmap_btree_to_extents(tp, ip, cur, &logflags,
5658 whichfork);
5659 }
5660
5661 error0:
5662 /*
5663 * Log everything. Do this after conversion, there's no point in
5664 * logging the extent records if we've converted to btree format.
5665 */
5666 if ((logflags & xfs_ilog_fext(whichfork)) &&
5667 ifp->if_format != XFS_DINODE_FMT_EXTENTS)
5668 logflags &= ~xfs_ilog_fext(whichfork);
5669 else if ((logflags & xfs_ilog_fbroot(whichfork)) &&
5670 ifp->if_format != XFS_DINODE_FMT_BTREE)
5671 logflags &= ~xfs_ilog_fbroot(whichfork);
5672 /*
5673 * Log inode even in the error case, if the transaction
5674 * is dirty we'll need to shut down the filesystem.
5675 */
5676 if (logflags)
5677 xfs_trans_log_inode(tp, ip, logflags);
5678 if (cur) {
5679 if (!error)
5680 cur->bc_bmap.allocated = 0;
5681 xfs_btree_del_cursor(cur, error);
5682 }
5683 return error;
5684 }
5685
5686 /* Unmap a range of a file. */
5687 int
xfs_bunmapi(xfs_trans_t * tp,struct xfs_inode * ip,xfs_fileoff_t bno,xfs_filblks_t len,uint32_t flags,xfs_extnum_t nexts,int * done)5688 xfs_bunmapi(
5689 xfs_trans_t *tp,
5690 struct xfs_inode *ip,
5691 xfs_fileoff_t bno,
5692 xfs_filblks_t len,
5693 uint32_t flags,
5694 xfs_extnum_t nexts,
5695 int *done)
5696 {
5697 int error;
5698
5699 error = __xfs_bunmapi(tp, ip, bno, &len, flags, nexts);
5700 *done = (len == 0);
5701 return error;
5702 }
5703
5704 /*
5705 * Determine whether an extent shift can be accomplished by a merge with the
5706 * extent that precedes the target hole of the shift.
5707 */
5708 STATIC bool
xfs_bmse_can_merge(struct xfs_bmbt_irec * left,struct xfs_bmbt_irec * got,xfs_fileoff_t shift)5709 xfs_bmse_can_merge(
5710 struct xfs_bmbt_irec *left, /* preceding extent */
5711 struct xfs_bmbt_irec *got, /* current extent to shift */
5712 xfs_fileoff_t shift) /* shift fsb */
5713 {
5714 xfs_fileoff_t startoff;
5715
5716 startoff = got->br_startoff - shift;
5717
5718 /*
5719 * The extent, once shifted, must be adjacent in-file and on-disk with
5720 * the preceding extent.
5721 */
5722 if ((left->br_startoff + left->br_blockcount != startoff) ||
5723 (left->br_startblock + left->br_blockcount != got->br_startblock) ||
5724 (left->br_state != got->br_state) ||
5725 (left->br_blockcount + got->br_blockcount > XFS_MAX_BMBT_EXTLEN))
5726 return false;
5727
5728 return true;
5729 }
5730
5731 /*
5732 * A bmap extent shift adjusts the file offset of an extent to fill a preceding
5733 * hole in the file. If an extent shift would result in the extent being fully
5734 * adjacent to the extent that currently precedes the hole, we can merge with
5735 * the preceding extent rather than do the shift.
5736 *
5737 * This function assumes the caller has verified a shift-by-merge is possible
5738 * with the provided extents via xfs_bmse_can_merge().
5739 */
5740 STATIC int
xfs_bmse_merge(struct xfs_trans * tp,struct xfs_inode * ip,int whichfork,xfs_fileoff_t shift,struct xfs_iext_cursor * icur,struct xfs_bmbt_irec * got,struct xfs_bmbt_irec * left,struct xfs_btree_cur * cur,int * logflags)5741 xfs_bmse_merge(
5742 struct xfs_trans *tp,
5743 struct xfs_inode *ip,
5744 int whichfork,
5745 xfs_fileoff_t shift, /* shift fsb */
5746 struct xfs_iext_cursor *icur,
5747 struct xfs_bmbt_irec *got, /* extent to shift */
5748 struct xfs_bmbt_irec *left, /* preceding extent */
5749 struct xfs_btree_cur *cur,
5750 int *logflags) /* output */
5751 {
5752 struct xfs_ifork *ifp = xfs_ifork_ptr(ip, whichfork);
5753 struct xfs_bmbt_irec new;
5754 xfs_filblks_t blockcount;
5755 int error, i;
5756 struct xfs_mount *mp = ip->i_mount;
5757
5758 blockcount = left->br_blockcount + got->br_blockcount;
5759
5760 xfs_assert_ilocked(ip, XFS_IOLOCK_EXCL | XFS_ILOCK_EXCL);
5761 ASSERT(xfs_bmse_can_merge(left, got, shift));
5762
5763 new = *left;
5764 new.br_blockcount = blockcount;
5765
5766 /*
5767 * Update the on-disk extent count, the btree if necessary and log the
5768 * inode.
5769 */
5770 ifp->if_nextents--;
5771 *logflags |= XFS_ILOG_CORE;
5772 if (!cur) {
5773 *logflags |= XFS_ILOG_DEXT;
5774 goto done;
5775 }
5776
5777 /* lookup and remove the extent to merge */
5778 error = xfs_bmbt_lookup_eq(cur, got, &i);
5779 if (error)
5780 return error;
5781 if (XFS_IS_CORRUPT(mp, i != 1)) {
5782 xfs_btree_mark_sick(cur);
5783 return -EFSCORRUPTED;
5784 }
5785
5786 error = xfs_btree_delete(cur, &i);
5787 if (error)
5788 return error;
5789 if (XFS_IS_CORRUPT(mp, i != 1)) {
5790 xfs_btree_mark_sick(cur);
5791 return -EFSCORRUPTED;
5792 }
5793
5794 /* lookup and update size of the previous extent */
5795 error = xfs_bmbt_lookup_eq(cur, left, &i);
5796 if (error)
5797 return error;
5798 if (XFS_IS_CORRUPT(mp, i != 1)) {
5799 xfs_btree_mark_sick(cur);
5800 return -EFSCORRUPTED;
5801 }
5802
5803 error = xfs_bmbt_update(cur, &new);
5804 if (error)
5805 return error;
5806
5807 /* change to extent format if required after extent removal */
5808 error = xfs_bmap_btree_to_extents(tp, ip, cur, logflags, whichfork);
5809 if (error)
5810 return error;
5811
5812 done:
5813 xfs_iext_remove(ip, icur, 0);
5814 xfs_iext_prev(ifp, icur);
5815 xfs_iext_update_extent(ip, xfs_bmap_fork_to_state(whichfork), icur,
5816 &new);
5817
5818 /* update reverse mapping. rmap functions merge the rmaps for us */
5819 xfs_rmap_unmap_extent(tp, ip, whichfork, got);
5820 memcpy(&new, got, sizeof(new));
5821 new.br_startoff = left->br_startoff + left->br_blockcount;
5822 xfs_rmap_map_extent(tp, ip, whichfork, &new);
5823 return 0;
5824 }
5825
5826 static int
xfs_bmap_shift_update_extent(struct xfs_trans * tp,struct xfs_inode * ip,int whichfork,struct xfs_iext_cursor * icur,struct xfs_bmbt_irec * got,struct xfs_btree_cur * cur,int * logflags,xfs_fileoff_t startoff)5827 xfs_bmap_shift_update_extent(
5828 struct xfs_trans *tp,
5829 struct xfs_inode *ip,
5830 int whichfork,
5831 struct xfs_iext_cursor *icur,
5832 struct xfs_bmbt_irec *got,
5833 struct xfs_btree_cur *cur,
5834 int *logflags,
5835 xfs_fileoff_t startoff)
5836 {
5837 struct xfs_mount *mp = ip->i_mount;
5838 struct xfs_bmbt_irec prev = *got;
5839 int error, i;
5840
5841 *logflags |= XFS_ILOG_CORE;
5842
5843 got->br_startoff = startoff;
5844
5845 if (cur) {
5846 error = xfs_bmbt_lookup_eq(cur, &prev, &i);
5847 if (error)
5848 return error;
5849 if (XFS_IS_CORRUPT(mp, i != 1)) {
5850 xfs_btree_mark_sick(cur);
5851 return -EFSCORRUPTED;
5852 }
5853
5854 error = xfs_bmbt_update(cur, got);
5855 if (error)
5856 return error;
5857 } else {
5858 *logflags |= XFS_ILOG_DEXT;
5859 }
5860
5861 xfs_iext_update_extent(ip, xfs_bmap_fork_to_state(whichfork), icur,
5862 got);
5863
5864 /* update reverse mapping */
5865 xfs_rmap_unmap_extent(tp, ip, whichfork, &prev);
5866 xfs_rmap_map_extent(tp, ip, whichfork, got);
5867 return 0;
5868 }
5869
5870 int
xfs_bmap_collapse_extents(struct xfs_trans * tp,struct xfs_inode * ip,xfs_fileoff_t * next_fsb,xfs_fileoff_t offset_shift_fsb,bool * done)5871 xfs_bmap_collapse_extents(
5872 struct xfs_trans *tp,
5873 struct xfs_inode *ip,
5874 xfs_fileoff_t *next_fsb,
5875 xfs_fileoff_t offset_shift_fsb,
5876 bool *done)
5877 {
5878 int whichfork = XFS_DATA_FORK;
5879 struct xfs_mount *mp = ip->i_mount;
5880 struct xfs_ifork *ifp = xfs_ifork_ptr(ip, whichfork);
5881 struct xfs_btree_cur *cur = NULL;
5882 struct xfs_bmbt_irec got, prev;
5883 struct xfs_iext_cursor icur;
5884 xfs_fileoff_t new_startoff;
5885 int error = 0;
5886 int logflags = 0;
5887
5888 if (XFS_IS_CORRUPT(mp, !xfs_ifork_has_extents(ifp)) ||
5889 XFS_TEST_ERROR(false, mp, XFS_ERRTAG_BMAPIFORMAT)) {
5890 xfs_bmap_mark_sick(ip, whichfork);
5891 return -EFSCORRUPTED;
5892 }
5893
5894 if (xfs_is_shutdown(mp))
5895 return -EIO;
5896
5897 xfs_assert_ilocked(ip, XFS_IOLOCK_EXCL | XFS_ILOCK_EXCL);
5898
5899 error = xfs_iread_extents(tp, ip, whichfork);
5900 if (error)
5901 return error;
5902
5903 if (ifp->if_format == XFS_DINODE_FMT_BTREE)
5904 cur = xfs_bmbt_init_cursor(mp, tp, ip, whichfork);
5905
5906 if (!xfs_iext_lookup_extent(ip, ifp, *next_fsb, &icur, &got)) {
5907 *done = true;
5908 goto del_cursor;
5909 }
5910 if (XFS_IS_CORRUPT(mp, isnullstartblock(got.br_startblock))) {
5911 xfs_bmap_mark_sick(ip, whichfork);
5912 error = -EFSCORRUPTED;
5913 goto del_cursor;
5914 }
5915
5916 new_startoff = got.br_startoff - offset_shift_fsb;
5917 if (xfs_iext_peek_prev_extent(ifp, &icur, &prev)) {
5918 if (new_startoff < prev.br_startoff + prev.br_blockcount) {
5919 error = -EINVAL;
5920 goto del_cursor;
5921 }
5922
5923 if (xfs_bmse_can_merge(&prev, &got, offset_shift_fsb)) {
5924 error = xfs_bmse_merge(tp, ip, whichfork,
5925 offset_shift_fsb, &icur, &got, &prev,
5926 cur, &logflags);
5927 if (error)
5928 goto del_cursor;
5929 goto done;
5930 }
5931 } else {
5932 if (got.br_startoff < offset_shift_fsb) {
5933 error = -EINVAL;
5934 goto del_cursor;
5935 }
5936 }
5937
5938 error = xfs_bmap_shift_update_extent(tp, ip, whichfork, &icur, &got,
5939 cur, &logflags, new_startoff);
5940 if (error)
5941 goto del_cursor;
5942
5943 done:
5944 if (!xfs_iext_next_extent(ifp, &icur, &got)) {
5945 *done = true;
5946 goto del_cursor;
5947 }
5948
5949 *next_fsb = got.br_startoff;
5950 del_cursor:
5951 if (cur)
5952 xfs_btree_del_cursor(cur, error);
5953 if (logflags)
5954 xfs_trans_log_inode(tp, ip, logflags);
5955 return error;
5956 }
5957
5958 /* Make sure we won't be right-shifting an extent past the maximum bound. */
5959 int
xfs_bmap_can_insert_extents(struct xfs_inode * ip,xfs_fileoff_t off,xfs_fileoff_t shift)5960 xfs_bmap_can_insert_extents(
5961 struct xfs_inode *ip,
5962 xfs_fileoff_t off,
5963 xfs_fileoff_t shift)
5964 {
5965 struct xfs_bmbt_irec got;
5966 int is_empty;
5967 int error = 0;
5968
5969 xfs_assert_ilocked(ip, XFS_IOLOCK_EXCL);
5970
5971 if (xfs_is_shutdown(ip->i_mount))
5972 return -EIO;
5973
5974 xfs_ilock(ip, XFS_ILOCK_EXCL);
5975 error = xfs_bmap_last_extent(NULL, ip, XFS_DATA_FORK, &got, &is_empty);
5976 if (!error && !is_empty && got.br_startoff >= off &&
5977 ((got.br_startoff + shift) & BMBT_STARTOFF_MASK) < got.br_startoff)
5978 error = -EINVAL;
5979 xfs_iunlock(ip, XFS_ILOCK_EXCL);
5980
5981 return error;
5982 }
5983
5984 int
xfs_bmap_insert_extents(struct xfs_trans * tp,struct xfs_inode * ip,xfs_fileoff_t * next_fsb,xfs_fileoff_t offset_shift_fsb,bool * done,xfs_fileoff_t stop_fsb)5985 xfs_bmap_insert_extents(
5986 struct xfs_trans *tp,
5987 struct xfs_inode *ip,
5988 xfs_fileoff_t *next_fsb,
5989 xfs_fileoff_t offset_shift_fsb,
5990 bool *done,
5991 xfs_fileoff_t stop_fsb)
5992 {
5993 int whichfork = XFS_DATA_FORK;
5994 struct xfs_mount *mp = ip->i_mount;
5995 struct xfs_ifork *ifp = xfs_ifork_ptr(ip, whichfork);
5996 struct xfs_btree_cur *cur = NULL;
5997 struct xfs_bmbt_irec got, next;
5998 struct xfs_iext_cursor icur;
5999 xfs_fileoff_t new_startoff;
6000 int error = 0;
6001 int logflags = 0;
6002
6003 if (XFS_IS_CORRUPT(mp, !xfs_ifork_has_extents(ifp)) ||
6004 XFS_TEST_ERROR(false, mp, XFS_ERRTAG_BMAPIFORMAT)) {
6005 xfs_bmap_mark_sick(ip, whichfork);
6006 return -EFSCORRUPTED;
6007 }
6008
6009 if (xfs_is_shutdown(mp))
6010 return -EIO;
6011
6012 xfs_assert_ilocked(ip, XFS_IOLOCK_EXCL | XFS_ILOCK_EXCL);
6013
6014 error = xfs_iread_extents(tp, ip, whichfork);
6015 if (error)
6016 return error;
6017
6018 if (ifp->if_format == XFS_DINODE_FMT_BTREE)
6019 cur = xfs_bmbt_init_cursor(mp, tp, ip, whichfork);
6020
6021 if (*next_fsb == NULLFSBLOCK) {
6022 xfs_iext_last(ifp, &icur);
6023 if (!xfs_iext_get_extent(ifp, &icur, &got) ||
6024 stop_fsb > got.br_startoff) {
6025 *done = true;
6026 goto del_cursor;
6027 }
6028 } else {
6029 if (!xfs_iext_lookup_extent(ip, ifp, *next_fsb, &icur, &got)) {
6030 *done = true;
6031 goto del_cursor;
6032 }
6033 }
6034 if (XFS_IS_CORRUPT(mp, isnullstartblock(got.br_startblock))) {
6035 xfs_bmap_mark_sick(ip, whichfork);
6036 error = -EFSCORRUPTED;
6037 goto del_cursor;
6038 }
6039
6040 if (XFS_IS_CORRUPT(mp, stop_fsb > got.br_startoff)) {
6041 xfs_bmap_mark_sick(ip, whichfork);
6042 error = -EFSCORRUPTED;
6043 goto del_cursor;
6044 }
6045
6046 new_startoff = got.br_startoff + offset_shift_fsb;
6047 if (xfs_iext_peek_next_extent(ifp, &icur, &next)) {
6048 if (new_startoff + got.br_blockcount > next.br_startoff) {
6049 error = -EINVAL;
6050 goto del_cursor;
6051 }
6052
6053 /*
6054 * Unlike a left shift (which involves a hole punch), a right
6055 * shift does not modify extent neighbors in any way. We should
6056 * never find mergeable extents in this scenario. Check anyways
6057 * and warn if we encounter two extents that could be one.
6058 */
6059 if (xfs_bmse_can_merge(&got, &next, offset_shift_fsb))
6060 WARN_ON_ONCE(1);
6061 }
6062
6063 error = xfs_bmap_shift_update_extent(tp, ip, whichfork, &icur, &got,
6064 cur, &logflags, new_startoff);
6065 if (error)
6066 goto del_cursor;
6067
6068 if (!xfs_iext_prev_extent(ifp, &icur, &got) ||
6069 stop_fsb >= got.br_startoff + got.br_blockcount) {
6070 *done = true;
6071 goto del_cursor;
6072 }
6073
6074 *next_fsb = got.br_startoff;
6075 del_cursor:
6076 if (cur)
6077 xfs_btree_del_cursor(cur, error);
6078 if (logflags)
6079 xfs_trans_log_inode(tp, ip, logflags);
6080 return error;
6081 }
6082
6083 /*
6084 * Splits an extent into two extents at split_fsb block such that it is the
6085 * first block of the current_ext. @ext is a target extent to be split.
6086 * @split_fsb is a block where the extents is split. If split_fsb lies in a
6087 * hole or the first block of extents, just return 0.
6088 */
6089 int
xfs_bmap_split_extent(struct xfs_trans * tp,struct xfs_inode * ip,xfs_fileoff_t split_fsb)6090 xfs_bmap_split_extent(
6091 struct xfs_trans *tp,
6092 struct xfs_inode *ip,
6093 xfs_fileoff_t split_fsb)
6094 {
6095 int whichfork = XFS_DATA_FORK;
6096 struct xfs_ifork *ifp = xfs_ifork_ptr(ip, whichfork);
6097 struct xfs_btree_cur *cur = NULL;
6098 struct xfs_bmbt_irec got;
6099 struct xfs_bmbt_irec new; /* split extent */
6100 struct xfs_mount *mp = ip->i_mount;
6101 xfs_fsblock_t gotblkcnt; /* new block count for got */
6102 struct xfs_iext_cursor icur;
6103 int error = 0;
6104 int logflags = 0;
6105 int i = 0;
6106
6107 if (XFS_IS_CORRUPT(mp, !xfs_ifork_has_extents(ifp)) ||
6108 XFS_TEST_ERROR(false, mp, XFS_ERRTAG_BMAPIFORMAT)) {
6109 xfs_bmap_mark_sick(ip, whichfork);
6110 return -EFSCORRUPTED;
6111 }
6112
6113 if (xfs_is_shutdown(mp))
6114 return -EIO;
6115
6116 /* Read in all the extents */
6117 error = xfs_iread_extents(tp, ip, whichfork);
6118 if (error)
6119 return error;
6120
6121 /*
6122 * If there are not extents, or split_fsb lies in a hole we are done.
6123 */
6124 if (!xfs_iext_lookup_extent(ip, ifp, split_fsb, &icur, &got) ||
6125 got.br_startoff >= split_fsb)
6126 return 0;
6127
6128 gotblkcnt = split_fsb - got.br_startoff;
6129 new.br_startoff = split_fsb;
6130 new.br_startblock = got.br_startblock + gotblkcnt;
6131 new.br_blockcount = got.br_blockcount - gotblkcnt;
6132 new.br_state = got.br_state;
6133
6134 if (ifp->if_format == XFS_DINODE_FMT_BTREE) {
6135 cur = xfs_bmbt_init_cursor(mp, tp, ip, whichfork);
6136 error = xfs_bmbt_lookup_eq(cur, &got, &i);
6137 if (error)
6138 goto del_cursor;
6139 if (XFS_IS_CORRUPT(mp, i != 1)) {
6140 xfs_btree_mark_sick(cur);
6141 error = -EFSCORRUPTED;
6142 goto del_cursor;
6143 }
6144 }
6145
6146 got.br_blockcount = gotblkcnt;
6147 xfs_iext_update_extent(ip, xfs_bmap_fork_to_state(whichfork), &icur,
6148 &got);
6149
6150 logflags = XFS_ILOG_CORE;
6151 if (cur) {
6152 error = xfs_bmbt_update(cur, &got);
6153 if (error)
6154 goto del_cursor;
6155 } else
6156 logflags |= XFS_ILOG_DEXT;
6157
6158 /* Add new extent */
6159 xfs_iext_next(ifp, &icur);
6160 xfs_iext_insert(ip, &icur, &new, 0);
6161 ifp->if_nextents++;
6162
6163 if (cur) {
6164 error = xfs_bmbt_lookup_eq(cur, &new, &i);
6165 if (error)
6166 goto del_cursor;
6167 if (XFS_IS_CORRUPT(mp, i != 0)) {
6168 xfs_btree_mark_sick(cur);
6169 error = -EFSCORRUPTED;
6170 goto del_cursor;
6171 }
6172 error = xfs_btree_insert(cur, &i);
6173 if (error)
6174 goto del_cursor;
6175 if (XFS_IS_CORRUPT(mp, i != 1)) {
6176 xfs_btree_mark_sick(cur);
6177 error = -EFSCORRUPTED;
6178 goto del_cursor;
6179 }
6180 }
6181
6182 /*
6183 * Convert to a btree if necessary.
6184 */
6185 if (xfs_bmap_needs_btree(ip, whichfork)) {
6186 int tmp_logflags; /* partial log flag return val */
6187
6188 ASSERT(cur == NULL);
6189 error = xfs_bmap_extents_to_btree(tp, ip, &cur, 0,
6190 &tmp_logflags, whichfork);
6191 logflags |= tmp_logflags;
6192 }
6193
6194 del_cursor:
6195 if (cur) {
6196 cur->bc_bmap.allocated = 0;
6197 xfs_btree_del_cursor(cur, error);
6198 }
6199
6200 if (logflags)
6201 xfs_trans_log_inode(tp, ip, logflags);
6202 return error;
6203 }
6204
6205 /* Record a bmap intent. */
6206 static inline void
__xfs_bmap_add(struct xfs_trans * tp,enum xfs_bmap_intent_type type,struct xfs_inode * ip,int whichfork,struct xfs_bmbt_irec * bmap)6207 __xfs_bmap_add(
6208 struct xfs_trans *tp,
6209 enum xfs_bmap_intent_type type,
6210 struct xfs_inode *ip,
6211 int whichfork,
6212 struct xfs_bmbt_irec *bmap)
6213 {
6214 struct xfs_bmap_intent *bi;
6215
6216 if ((whichfork != XFS_DATA_FORK && whichfork != XFS_ATTR_FORK) ||
6217 bmap->br_startblock == HOLESTARTBLOCK ||
6218 bmap->br_startblock == DELAYSTARTBLOCK)
6219 return;
6220
6221 bi = kmem_cache_alloc(xfs_bmap_intent_cache, GFP_KERNEL | __GFP_NOFAIL);
6222 INIT_LIST_HEAD(&bi->bi_list);
6223 bi->bi_type = type;
6224 bi->bi_owner = ip;
6225 bi->bi_whichfork = whichfork;
6226 bi->bi_bmap = *bmap;
6227
6228 xfs_bmap_defer_add(tp, bi);
6229 }
6230
6231 /* Map an extent into a file. */
6232 void
xfs_bmap_map_extent(struct xfs_trans * tp,struct xfs_inode * ip,int whichfork,struct xfs_bmbt_irec * PREV)6233 xfs_bmap_map_extent(
6234 struct xfs_trans *tp,
6235 struct xfs_inode *ip,
6236 int whichfork,
6237 struct xfs_bmbt_irec *PREV)
6238 {
6239 __xfs_bmap_add(tp, XFS_BMAP_MAP, ip, whichfork, PREV);
6240 }
6241
6242 /* Unmap an extent out of a file. */
6243 void
xfs_bmap_unmap_extent(struct xfs_trans * tp,struct xfs_inode * ip,int whichfork,struct xfs_bmbt_irec * PREV)6244 xfs_bmap_unmap_extent(
6245 struct xfs_trans *tp,
6246 struct xfs_inode *ip,
6247 int whichfork,
6248 struct xfs_bmbt_irec *PREV)
6249 {
6250 __xfs_bmap_add(tp, XFS_BMAP_UNMAP, ip, whichfork, PREV);
6251 }
6252
6253 /*
6254 * Process one of the deferred bmap operations. We pass back the
6255 * btree cursor to maintain our lock on the bmapbt between calls.
6256 */
6257 int
xfs_bmap_finish_one(struct xfs_trans * tp,struct xfs_bmap_intent * bi)6258 xfs_bmap_finish_one(
6259 struct xfs_trans *tp,
6260 struct xfs_bmap_intent *bi)
6261 {
6262 struct xfs_bmbt_irec *bmap = &bi->bi_bmap;
6263 int error = 0;
6264 int flags = 0;
6265
6266 if (bi->bi_whichfork == XFS_ATTR_FORK)
6267 flags |= XFS_BMAPI_ATTRFORK;
6268
6269 ASSERT(tp->t_highest_agno == NULLAGNUMBER);
6270
6271 trace_xfs_bmap_deferred(bi);
6272
6273 if (XFS_TEST_ERROR(false, tp->t_mountp, XFS_ERRTAG_BMAP_FINISH_ONE))
6274 return -EIO;
6275
6276 switch (bi->bi_type) {
6277 case XFS_BMAP_MAP:
6278 if (bi->bi_bmap.br_state == XFS_EXT_UNWRITTEN)
6279 flags |= XFS_BMAPI_PREALLOC;
6280 error = xfs_bmapi_remap(tp, bi->bi_owner, bmap->br_startoff,
6281 bmap->br_blockcount, bmap->br_startblock,
6282 flags);
6283 bmap->br_blockcount = 0;
6284 break;
6285 case XFS_BMAP_UNMAP:
6286 error = __xfs_bunmapi(tp, bi->bi_owner, bmap->br_startoff,
6287 &bmap->br_blockcount, flags | XFS_BMAPI_REMAP,
6288 1);
6289 break;
6290 default:
6291 ASSERT(0);
6292 xfs_bmap_mark_sick(bi->bi_owner, bi->bi_whichfork);
6293 error = -EFSCORRUPTED;
6294 }
6295
6296 return error;
6297 }
6298
6299 /* Check that an extent does not have invalid flags or bad ranges. */
6300 xfs_failaddr_t
xfs_bmap_validate_extent_raw(struct xfs_mount * mp,bool rtfile,int whichfork,struct xfs_bmbt_irec * irec)6301 xfs_bmap_validate_extent_raw(
6302 struct xfs_mount *mp,
6303 bool rtfile,
6304 int whichfork,
6305 struct xfs_bmbt_irec *irec)
6306 {
6307 if (!xfs_verify_fileext(mp, irec->br_startoff, irec->br_blockcount))
6308 return __this_address;
6309
6310 if (rtfile && whichfork == XFS_DATA_FORK) {
6311 if (!xfs_verify_rtbext(mp, irec->br_startblock,
6312 irec->br_blockcount))
6313 return __this_address;
6314 } else {
6315 if (!xfs_verify_fsbext(mp, irec->br_startblock,
6316 irec->br_blockcount))
6317 return __this_address;
6318 }
6319 if (irec->br_state != XFS_EXT_NORM && whichfork != XFS_DATA_FORK)
6320 return __this_address;
6321 return NULL;
6322 }
6323
6324 int __init
xfs_bmap_intent_init_cache(void)6325 xfs_bmap_intent_init_cache(void)
6326 {
6327 xfs_bmap_intent_cache = kmem_cache_create("xfs_bmap_intent",
6328 sizeof(struct xfs_bmap_intent),
6329 0, 0, NULL);
6330
6331 return xfs_bmap_intent_cache != NULL ? 0 : -ENOMEM;
6332 }
6333
6334 void
xfs_bmap_intent_destroy_cache(void)6335 xfs_bmap_intent_destroy_cache(void)
6336 {
6337 kmem_cache_destroy(xfs_bmap_intent_cache);
6338 xfs_bmap_intent_cache = NULL;
6339 }
6340
6341 /* Check that an inode's extent does not have invalid flags or bad ranges. */
6342 xfs_failaddr_t
xfs_bmap_validate_extent(struct xfs_inode * ip,int whichfork,struct xfs_bmbt_irec * irec)6343 xfs_bmap_validate_extent(
6344 struct xfs_inode *ip,
6345 int whichfork,
6346 struct xfs_bmbt_irec *irec)
6347 {
6348 return xfs_bmap_validate_extent_raw(ip->i_mount,
6349 XFS_IS_REALTIME_INODE(ip), whichfork, irec);
6350 }
6351
6352 /*
6353 * Used in xfs_itruncate_extents(). This is the maximum number of extents
6354 * freed from a file in a single transaction.
6355 */
6356 #define XFS_ITRUNC_MAX_EXTENTS 2
6357
6358 /*
6359 * Unmap every extent in part of an inode's fork. We don't do any higher level
6360 * invalidation work at all.
6361 */
6362 int
xfs_bunmapi_range(struct xfs_trans ** tpp,struct xfs_inode * ip,uint32_t flags,xfs_fileoff_t startoff,xfs_fileoff_t endoff)6363 xfs_bunmapi_range(
6364 struct xfs_trans **tpp,
6365 struct xfs_inode *ip,
6366 uint32_t flags,
6367 xfs_fileoff_t startoff,
6368 xfs_fileoff_t endoff)
6369 {
6370 xfs_filblks_t unmap_len = endoff - startoff + 1;
6371 int error = 0;
6372
6373 xfs_assert_ilocked(ip, XFS_ILOCK_EXCL);
6374
6375 while (unmap_len > 0) {
6376 ASSERT((*tpp)->t_highest_agno == NULLAGNUMBER);
6377 error = __xfs_bunmapi(*tpp, ip, startoff, &unmap_len, flags,
6378 XFS_ITRUNC_MAX_EXTENTS);
6379 if (error)
6380 goto out;
6381
6382 /* free the just unmapped extents */
6383 error = xfs_defer_finish(tpp);
6384 if (error)
6385 goto out;
6386 cond_resched();
6387 }
6388 out:
6389 return error;
6390 }
6391
6392 struct xfs_bmap_query_range {
6393 xfs_bmap_query_range_fn fn;
6394 void *priv;
6395 };
6396
6397 /* Format btree record and pass to our callback. */
6398 STATIC int
xfs_bmap_query_range_helper(struct xfs_btree_cur * cur,const union xfs_btree_rec * rec,void * priv)6399 xfs_bmap_query_range_helper(
6400 struct xfs_btree_cur *cur,
6401 const union xfs_btree_rec *rec,
6402 void *priv)
6403 {
6404 struct xfs_bmap_query_range *query = priv;
6405 struct xfs_bmbt_irec irec;
6406 xfs_failaddr_t fa;
6407
6408 xfs_bmbt_disk_get_all(&rec->bmbt, &irec);
6409 fa = xfs_bmap_validate_extent(cur->bc_ino.ip, cur->bc_ino.whichfork,
6410 &irec);
6411 if (fa) {
6412 xfs_btree_mark_sick(cur);
6413 return xfs_bmap_complain_bad_rec(cur->bc_ino.ip,
6414 cur->bc_ino.whichfork, fa, &irec);
6415 }
6416
6417 return query->fn(cur, &irec, query->priv);
6418 }
6419
6420 /* Find all bmaps. */
6421 int
xfs_bmap_query_all(struct xfs_btree_cur * cur,xfs_bmap_query_range_fn fn,void * priv)6422 xfs_bmap_query_all(
6423 struct xfs_btree_cur *cur,
6424 xfs_bmap_query_range_fn fn,
6425 void *priv)
6426 {
6427 struct xfs_bmap_query_range query = {
6428 .priv = priv,
6429 .fn = fn,
6430 };
6431
6432 return xfs_btree_query_all(cur, xfs_bmap_query_range_helper, &query);
6433 }
6434