xref: /linux/fs/xfs/xfs_bmap_item.c (revision 021bc4b9)
1 // SPDX-License-Identifier: GPL-2.0+
2 /*
3  * Copyright (C) 2016 Oracle.  All Rights Reserved.
4  * Author: Darrick J. Wong <darrick.wong@oracle.com>
5  */
6 #include "xfs.h"
7 #include "xfs_fs.h"
8 #include "xfs_format.h"
9 #include "xfs_log_format.h"
10 #include "xfs_trans_resv.h"
11 #include "xfs_bit.h"
12 #include "xfs_shared.h"
13 #include "xfs_mount.h"
14 #include "xfs_defer.h"
15 #include "xfs_inode.h"
16 #include "xfs_trans.h"
17 #include "xfs_trans_priv.h"
18 #include "xfs_bmap_item.h"
19 #include "xfs_log.h"
20 #include "xfs_bmap.h"
21 #include "xfs_icache.h"
22 #include "xfs_bmap_btree.h"
23 #include "xfs_trans_space.h"
24 #include "xfs_error.h"
25 #include "xfs_log_priv.h"
26 #include "xfs_log_recover.h"
27 #include "xfs_ag.h"
28 
29 struct kmem_cache	*xfs_bui_cache;
30 struct kmem_cache	*xfs_bud_cache;
31 
32 static const struct xfs_item_ops xfs_bui_item_ops;
33 
34 static inline struct xfs_bui_log_item *BUI_ITEM(struct xfs_log_item *lip)
35 {
36 	return container_of(lip, struct xfs_bui_log_item, bui_item);
37 }
38 
39 STATIC void
40 xfs_bui_item_free(
41 	struct xfs_bui_log_item	*buip)
42 {
43 	kmem_free(buip->bui_item.li_lv_shadow);
44 	kmem_cache_free(xfs_bui_cache, buip);
45 }
46 
47 /*
48  * Freeing the BUI requires that we remove it from the AIL if it has already
49  * been placed there. However, the BUI may not yet have been placed in the AIL
50  * when called by xfs_bui_release() from BUD processing due to the ordering of
51  * committed vs unpin operations in bulk insert operations. Hence the reference
52  * count to ensure only the last caller frees the BUI.
53  */
54 STATIC void
55 xfs_bui_release(
56 	struct xfs_bui_log_item	*buip)
57 {
58 	ASSERT(atomic_read(&buip->bui_refcount) > 0);
59 	if (!atomic_dec_and_test(&buip->bui_refcount))
60 		return;
61 
62 	xfs_trans_ail_delete(&buip->bui_item, 0);
63 	xfs_bui_item_free(buip);
64 }
65 
66 
67 STATIC void
68 xfs_bui_item_size(
69 	struct xfs_log_item	*lip,
70 	int			*nvecs,
71 	int			*nbytes)
72 {
73 	struct xfs_bui_log_item	*buip = BUI_ITEM(lip);
74 
75 	*nvecs += 1;
76 	*nbytes += xfs_bui_log_format_sizeof(buip->bui_format.bui_nextents);
77 }
78 
79 /*
80  * This is called to fill in the vector of log iovecs for the
81  * given bui log item. We use only 1 iovec, and we point that
82  * at the bui_log_format structure embedded in the bui item.
83  * It is at this point that we assert that all of the extent
84  * slots in the bui item have been filled.
85  */
86 STATIC void
87 xfs_bui_item_format(
88 	struct xfs_log_item	*lip,
89 	struct xfs_log_vec	*lv)
90 {
91 	struct xfs_bui_log_item	*buip = BUI_ITEM(lip);
92 	struct xfs_log_iovec	*vecp = NULL;
93 
94 	ASSERT(atomic_read(&buip->bui_next_extent) ==
95 			buip->bui_format.bui_nextents);
96 
97 	buip->bui_format.bui_type = XFS_LI_BUI;
98 	buip->bui_format.bui_size = 1;
99 
100 	xlog_copy_iovec(lv, &vecp, XLOG_REG_TYPE_BUI_FORMAT, &buip->bui_format,
101 			xfs_bui_log_format_sizeof(buip->bui_format.bui_nextents));
102 }
103 
104 /*
105  * The unpin operation is the last place an BUI is manipulated in the log. It is
106  * either inserted in the AIL or aborted in the event of a log I/O error. In
107  * either case, the BUI transaction has been successfully committed to make it
108  * this far. Therefore, we expect whoever committed the BUI to either construct
109  * and commit the BUD or drop the BUD's reference in the event of error. Simply
110  * drop the log's BUI reference now that the log is done with it.
111  */
112 STATIC void
113 xfs_bui_item_unpin(
114 	struct xfs_log_item	*lip,
115 	int			remove)
116 {
117 	struct xfs_bui_log_item	*buip = BUI_ITEM(lip);
118 
119 	xfs_bui_release(buip);
120 }
121 
122 /*
123  * The BUI has been either committed or aborted if the transaction has been
124  * cancelled. If the transaction was cancelled, an BUD isn't going to be
125  * constructed and thus we free the BUI here directly.
126  */
127 STATIC void
128 xfs_bui_item_release(
129 	struct xfs_log_item	*lip)
130 {
131 	xfs_bui_release(BUI_ITEM(lip));
132 }
133 
134 /*
135  * Allocate and initialize an bui item with the given number of extents.
136  */
137 STATIC struct xfs_bui_log_item *
138 xfs_bui_init(
139 	struct xfs_mount		*mp)
140 
141 {
142 	struct xfs_bui_log_item		*buip;
143 
144 	buip = kmem_cache_zalloc(xfs_bui_cache, GFP_KERNEL | __GFP_NOFAIL);
145 
146 	xfs_log_item_init(mp, &buip->bui_item, XFS_LI_BUI, &xfs_bui_item_ops);
147 	buip->bui_format.bui_nextents = XFS_BUI_MAX_FAST_EXTENTS;
148 	buip->bui_format.bui_id = (uintptr_t)(void *)buip;
149 	atomic_set(&buip->bui_next_extent, 0);
150 	atomic_set(&buip->bui_refcount, 2);
151 
152 	return buip;
153 }
154 
155 static inline struct xfs_bud_log_item *BUD_ITEM(struct xfs_log_item *lip)
156 {
157 	return container_of(lip, struct xfs_bud_log_item, bud_item);
158 }
159 
160 STATIC void
161 xfs_bud_item_size(
162 	struct xfs_log_item	*lip,
163 	int			*nvecs,
164 	int			*nbytes)
165 {
166 	*nvecs += 1;
167 	*nbytes += sizeof(struct xfs_bud_log_format);
168 }
169 
170 /*
171  * This is called to fill in the vector of log iovecs for the
172  * given bud log item. We use only 1 iovec, and we point that
173  * at the bud_log_format structure embedded in the bud item.
174  * It is at this point that we assert that all of the extent
175  * slots in the bud item have been filled.
176  */
177 STATIC void
178 xfs_bud_item_format(
179 	struct xfs_log_item	*lip,
180 	struct xfs_log_vec	*lv)
181 {
182 	struct xfs_bud_log_item	*budp = BUD_ITEM(lip);
183 	struct xfs_log_iovec	*vecp = NULL;
184 
185 	budp->bud_format.bud_type = XFS_LI_BUD;
186 	budp->bud_format.bud_size = 1;
187 
188 	xlog_copy_iovec(lv, &vecp, XLOG_REG_TYPE_BUD_FORMAT, &budp->bud_format,
189 			sizeof(struct xfs_bud_log_format));
190 }
191 
192 /*
193  * The BUD is either committed or aborted if the transaction is cancelled. If
194  * the transaction is cancelled, drop our reference to the BUI and free the
195  * BUD.
196  */
197 STATIC void
198 xfs_bud_item_release(
199 	struct xfs_log_item	*lip)
200 {
201 	struct xfs_bud_log_item	*budp = BUD_ITEM(lip);
202 
203 	xfs_bui_release(budp->bud_buip);
204 	kmem_free(budp->bud_item.li_lv_shadow);
205 	kmem_cache_free(xfs_bud_cache, budp);
206 }
207 
208 static struct xfs_log_item *
209 xfs_bud_item_intent(
210 	struct xfs_log_item	*lip)
211 {
212 	return &BUD_ITEM(lip)->bud_buip->bui_item;
213 }
214 
215 static const struct xfs_item_ops xfs_bud_item_ops = {
216 	.flags		= XFS_ITEM_RELEASE_WHEN_COMMITTED |
217 			  XFS_ITEM_INTENT_DONE,
218 	.iop_size	= xfs_bud_item_size,
219 	.iop_format	= xfs_bud_item_format,
220 	.iop_release	= xfs_bud_item_release,
221 	.iop_intent	= xfs_bud_item_intent,
222 };
223 
224 /* Sort bmap intents by inode. */
225 static int
226 xfs_bmap_update_diff_items(
227 	void				*priv,
228 	const struct list_head		*a,
229 	const struct list_head		*b)
230 {
231 	struct xfs_bmap_intent		*ba;
232 	struct xfs_bmap_intent		*bb;
233 
234 	ba = container_of(a, struct xfs_bmap_intent, bi_list);
235 	bb = container_of(b, struct xfs_bmap_intent, bi_list);
236 	return ba->bi_owner->i_ino - bb->bi_owner->i_ino;
237 }
238 
239 /* Set the map extent flags for this mapping. */
240 static void
241 xfs_trans_set_bmap_flags(
242 	struct xfs_map_extent		*map,
243 	enum xfs_bmap_intent_type	type,
244 	int				whichfork,
245 	xfs_exntst_t			state)
246 {
247 	map->me_flags = 0;
248 	switch (type) {
249 	case XFS_BMAP_MAP:
250 	case XFS_BMAP_UNMAP:
251 		map->me_flags = type;
252 		break;
253 	default:
254 		ASSERT(0);
255 	}
256 	if (state == XFS_EXT_UNWRITTEN)
257 		map->me_flags |= XFS_BMAP_EXTENT_UNWRITTEN;
258 	if (whichfork == XFS_ATTR_FORK)
259 		map->me_flags |= XFS_BMAP_EXTENT_ATTR_FORK;
260 }
261 
262 /* Log bmap updates in the intent item. */
263 STATIC void
264 xfs_bmap_update_log_item(
265 	struct xfs_trans		*tp,
266 	struct xfs_bui_log_item		*buip,
267 	struct xfs_bmap_intent		*bi)
268 {
269 	uint				next_extent;
270 	struct xfs_map_extent		*map;
271 
272 	/*
273 	 * atomic_inc_return gives us the value after the increment;
274 	 * we want to use it as an array index so we need to subtract 1 from
275 	 * it.
276 	 */
277 	next_extent = atomic_inc_return(&buip->bui_next_extent) - 1;
278 	ASSERT(next_extent < buip->bui_format.bui_nextents);
279 	map = &buip->bui_format.bui_extents[next_extent];
280 	map->me_owner = bi->bi_owner->i_ino;
281 	map->me_startblock = bi->bi_bmap.br_startblock;
282 	map->me_startoff = bi->bi_bmap.br_startoff;
283 	map->me_len = bi->bi_bmap.br_blockcount;
284 	xfs_trans_set_bmap_flags(map, bi->bi_type, bi->bi_whichfork,
285 			bi->bi_bmap.br_state);
286 }
287 
288 static struct xfs_log_item *
289 xfs_bmap_update_create_intent(
290 	struct xfs_trans		*tp,
291 	struct list_head		*items,
292 	unsigned int			count,
293 	bool				sort)
294 {
295 	struct xfs_mount		*mp = tp->t_mountp;
296 	struct xfs_bui_log_item		*buip = xfs_bui_init(mp);
297 	struct xfs_bmap_intent		*bi;
298 
299 	ASSERT(count == XFS_BUI_MAX_FAST_EXTENTS);
300 
301 	if (sort)
302 		list_sort(mp, items, xfs_bmap_update_diff_items);
303 	list_for_each_entry(bi, items, bi_list)
304 		xfs_bmap_update_log_item(tp, buip, bi);
305 	return &buip->bui_item;
306 }
307 
308 /* Get an BUD so we can process all the deferred bmap updates. */
309 static struct xfs_log_item *
310 xfs_bmap_update_create_done(
311 	struct xfs_trans		*tp,
312 	struct xfs_log_item		*intent,
313 	unsigned int			count)
314 {
315 	struct xfs_bui_log_item		*buip = BUI_ITEM(intent);
316 	struct xfs_bud_log_item		*budp;
317 
318 	budp = kmem_cache_zalloc(xfs_bud_cache, GFP_KERNEL | __GFP_NOFAIL);
319 	xfs_log_item_init(tp->t_mountp, &budp->bud_item, XFS_LI_BUD,
320 			  &xfs_bud_item_ops);
321 	budp->bud_buip = buip;
322 	budp->bud_format.bud_bui_id = buip->bui_format.bui_id;
323 
324 	return &budp->bud_item;
325 }
326 
327 /* Take a passive ref to the AG containing the space we're mapping. */
328 void
329 xfs_bmap_update_get_group(
330 	struct xfs_mount	*mp,
331 	struct xfs_bmap_intent	*bi)
332 {
333 	xfs_agnumber_t		agno;
334 
335 	agno = XFS_FSB_TO_AGNO(mp, bi->bi_bmap.br_startblock);
336 
337 	/*
338 	 * Bump the intent count on behalf of the deferred rmap and refcount
339 	 * intent items that that we can queue when we finish this bmap work.
340 	 * This new intent item will bump the intent count before the bmap
341 	 * intent drops the intent count, ensuring that the intent count
342 	 * remains nonzero across the transaction roll.
343 	 */
344 	bi->bi_pag = xfs_perag_intent_get(mp, agno);
345 }
346 
347 /* Release a passive AG ref after finishing mapping work. */
348 static inline void
349 xfs_bmap_update_put_group(
350 	struct xfs_bmap_intent	*bi)
351 {
352 	xfs_perag_intent_put(bi->bi_pag);
353 }
354 
355 /* Process a deferred bmap update. */
356 STATIC int
357 xfs_bmap_update_finish_item(
358 	struct xfs_trans		*tp,
359 	struct xfs_log_item		*done,
360 	struct list_head		*item,
361 	struct xfs_btree_cur		**state)
362 {
363 	struct xfs_bmap_intent		*bi;
364 	int				error;
365 
366 	bi = container_of(item, struct xfs_bmap_intent, bi_list);
367 
368 	error = xfs_bmap_finish_one(tp, bi);
369 	if (!error && bi->bi_bmap.br_blockcount > 0) {
370 		ASSERT(bi->bi_type == XFS_BMAP_UNMAP);
371 		return -EAGAIN;
372 	}
373 
374 	xfs_bmap_update_put_group(bi);
375 	kmem_cache_free(xfs_bmap_intent_cache, bi);
376 	return error;
377 }
378 
379 /* Abort all pending BUIs. */
380 STATIC void
381 xfs_bmap_update_abort_intent(
382 	struct xfs_log_item		*intent)
383 {
384 	xfs_bui_release(BUI_ITEM(intent));
385 }
386 
387 /* Cancel a deferred bmap update. */
388 STATIC void
389 xfs_bmap_update_cancel_item(
390 	struct list_head		*item)
391 {
392 	struct xfs_bmap_intent		*bi;
393 
394 	bi = container_of(item, struct xfs_bmap_intent, bi_list);
395 
396 	xfs_bmap_update_put_group(bi);
397 	kmem_cache_free(xfs_bmap_intent_cache, bi);
398 }
399 
400 /* Is this recovered BUI ok? */
401 static inline bool
402 xfs_bui_validate(
403 	struct xfs_mount		*mp,
404 	struct xfs_bui_log_item		*buip)
405 {
406 	struct xfs_map_extent		*map;
407 
408 	/* Only one mapping operation per BUI... */
409 	if (buip->bui_format.bui_nextents != XFS_BUI_MAX_FAST_EXTENTS)
410 		return false;
411 
412 	map = &buip->bui_format.bui_extents[0];
413 
414 	if (map->me_flags & ~XFS_BMAP_EXTENT_FLAGS)
415 		return false;
416 
417 	switch (map->me_flags & XFS_BMAP_EXTENT_TYPE_MASK) {
418 	case XFS_BMAP_MAP:
419 	case XFS_BMAP_UNMAP:
420 		break;
421 	default:
422 		return false;
423 	}
424 
425 	if (!xfs_verify_ino(mp, map->me_owner))
426 		return false;
427 
428 	if (!xfs_verify_fileext(mp, map->me_startoff, map->me_len))
429 		return false;
430 
431 	return xfs_verify_fsbext(mp, map->me_startblock, map->me_len);
432 }
433 
434 static inline struct xfs_bmap_intent *
435 xfs_bui_recover_work(
436 	struct xfs_mount		*mp,
437 	struct xfs_defer_pending	*dfp,
438 	struct xfs_inode		**ipp,
439 	struct xfs_map_extent		*map)
440 {
441 	struct xfs_bmap_intent		*bi;
442 	int				error;
443 
444 	error = xlog_recover_iget(mp, map->me_owner, ipp);
445 	if (error)
446 		return ERR_PTR(error);
447 
448 	bi = kmem_cache_zalloc(xfs_bmap_intent_cache, GFP_NOFS | __GFP_NOFAIL);
449 	bi->bi_whichfork = (map->me_flags & XFS_BMAP_EXTENT_ATTR_FORK) ?
450 			XFS_ATTR_FORK : XFS_DATA_FORK;
451 	bi->bi_type = map->me_flags & XFS_BMAP_EXTENT_TYPE_MASK;
452 	bi->bi_bmap.br_startblock = map->me_startblock;
453 	bi->bi_bmap.br_startoff = map->me_startoff;
454 	bi->bi_bmap.br_blockcount = map->me_len;
455 	bi->bi_bmap.br_state = (map->me_flags & XFS_BMAP_EXTENT_UNWRITTEN) ?
456 			XFS_EXT_UNWRITTEN : XFS_EXT_NORM;
457 	bi->bi_owner = *ipp;
458 	xfs_bmap_update_get_group(mp, bi);
459 
460 	xfs_defer_add_item(dfp, &bi->bi_list);
461 	return bi;
462 }
463 
464 /*
465  * Process a bmap update intent item that was recovered from the log.
466  * We need to update some inode's bmbt.
467  */
468 STATIC int
469 xfs_bmap_recover_work(
470 	struct xfs_defer_pending	*dfp,
471 	struct list_head		*capture_list)
472 {
473 	struct xfs_trans_res		resv;
474 	struct xfs_log_item		*lip = dfp->dfp_intent;
475 	struct xfs_bui_log_item		*buip = BUI_ITEM(lip);
476 	struct xfs_trans		*tp;
477 	struct xfs_inode		*ip = NULL;
478 	struct xfs_mount		*mp = lip->li_log->l_mp;
479 	struct xfs_map_extent		*map;
480 	struct xfs_bmap_intent		*work;
481 	int				iext_delta;
482 	int				error = 0;
483 
484 	if (!xfs_bui_validate(mp, buip)) {
485 		XFS_CORRUPTION_ERROR(__func__, XFS_ERRLEVEL_LOW, mp,
486 				&buip->bui_format, sizeof(buip->bui_format));
487 		return -EFSCORRUPTED;
488 	}
489 
490 	map = &buip->bui_format.bui_extents[0];
491 	work = xfs_bui_recover_work(mp, dfp, &ip, map);
492 	if (IS_ERR(work))
493 		return PTR_ERR(work);
494 
495 	/* Allocate transaction and do the work. */
496 	resv = xlog_recover_resv(&M_RES(mp)->tr_itruncate);
497 	error = xfs_trans_alloc(mp, &resv,
498 			XFS_EXTENTADD_SPACE_RES(mp, XFS_DATA_FORK), 0, 0, &tp);
499 	if (error)
500 		goto err_rele;
501 
502 	xfs_ilock(ip, XFS_ILOCK_EXCL);
503 	xfs_trans_ijoin(tp, ip, 0);
504 
505 	if (work->bi_type == XFS_BMAP_MAP)
506 		iext_delta = XFS_IEXT_ADD_NOSPLIT_CNT;
507 	else
508 		iext_delta = XFS_IEXT_PUNCH_HOLE_CNT;
509 
510 	error = xfs_iext_count_may_overflow(ip, work->bi_whichfork, iext_delta);
511 	if (error == -EFBIG)
512 		error = xfs_iext_count_upgrade(tp, ip, iext_delta);
513 	if (error)
514 		goto err_cancel;
515 
516 	error = xlog_recover_finish_intent(tp, dfp);
517 	if (error == -EFSCORRUPTED)
518 		XFS_CORRUPTION_ERROR(__func__, XFS_ERRLEVEL_LOW, mp,
519 				&buip->bui_format, sizeof(buip->bui_format));
520 	if (error)
521 		goto err_cancel;
522 
523 	/*
524 	 * Commit transaction, which frees the transaction and saves the inode
525 	 * for later replay activities.
526 	 */
527 	error = xfs_defer_ops_capture_and_commit(tp, capture_list);
528 	if (error)
529 		goto err_unlock;
530 
531 	xfs_iunlock(ip, XFS_ILOCK_EXCL);
532 	xfs_irele(ip);
533 	return 0;
534 
535 err_cancel:
536 	xfs_trans_cancel(tp);
537 err_unlock:
538 	xfs_iunlock(ip, XFS_ILOCK_EXCL);
539 err_rele:
540 	xfs_irele(ip);
541 	return error;
542 }
543 
544 /* Relog an intent item to push the log tail forward. */
545 static struct xfs_log_item *
546 xfs_bmap_relog_intent(
547 	struct xfs_trans		*tp,
548 	struct xfs_log_item		*intent,
549 	struct xfs_log_item		*done_item)
550 {
551 	struct xfs_bui_log_item		*buip;
552 	struct xfs_map_extent		*map;
553 	unsigned int			count;
554 
555 	count = BUI_ITEM(intent)->bui_format.bui_nextents;
556 	map = BUI_ITEM(intent)->bui_format.bui_extents;
557 
558 	buip = xfs_bui_init(tp->t_mountp);
559 	memcpy(buip->bui_format.bui_extents, map, count * sizeof(*map));
560 	atomic_set(&buip->bui_next_extent, count);
561 
562 	return &buip->bui_item;
563 }
564 
565 const struct xfs_defer_op_type xfs_bmap_update_defer_type = {
566 	.name		= "bmap",
567 	.max_items	= XFS_BUI_MAX_FAST_EXTENTS,
568 	.create_intent	= xfs_bmap_update_create_intent,
569 	.abort_intent	= xfs_bmap_update_abort_intent,
570 	.create_done	= xfs_bmap_update_create_done,
571 	.finish_item	= xfs_bmap_update_finish_item,
572 	.cancel_item	= xfs_bmap_update_cancel_item,
573 	.recover_work	= xfs_bmap_recover_work,
574 	.relog_intent	= xfs_bmap_relog_intent,
575 };
576 
577 STATIC bool
578 xfs_bui_item_match(
579 	struct xfs_log_item	*lip,
580 	uint64_t		intent_id)
581 {
582 	return BUI_ITEM(lip)->bui_format.bui_id == intent_id;
583 }
584 
585 static const struct xfs_item_ops xfs_bui_item_ops = {
586 	.flags		= XFS_ITEM_INTENT,
587 	.iop_size	= xfs_bui_item_size,
588 	.iop_format	= xfs_bui_item_format,
589 	.iop_unpin	= xfs_bui_item_unpin,
590 	.iop_release	= xfs_bui_item_release,
591 	.iop_match	= xfs_bui_item_match,
592 };
593 
594 static inline void
595 xfs_bui_copy_format(
596 	struct xfs_bui_log_format	*dst,
597 	const struct xfs_bui_log_format	*src)
598 {
599 	unsigned int			i;
600 
601 	memcpy(dst, src, offsetof(struct xfs_bui_log_format, bui_extents));
602 
603 	for (i = 0; i < src->bui_nextents; i++)
604 		memcpy(&dst->bui_extents[i], &src->bui_extents[i],
605 				sizeof(struct xfs_map_extent));
606 }
607 
608 /*
609  * This routine is called to create an in-core extent bmap update
610  * item from the bui format structure which was logged on disk.
611  * It allocates an in-core bui, copies the extents from the format
612  * structure into it, and adds the bui to the AIL with the given
613  * LSN.
614  */
615 STATIC int
616 xlog_recover_bui_commit_pass2(
617 	struct xlog			*log,
618 	struct list_head		*buffer_list,
619 	struct xlog_recover_item	*item,
620 	xfs_lsn_t			lsn)
621 {
622 	struct xfs_mount		*mp = log->l_mp;
623 	struct xfs_bui_log_item		*buip;
624 	struct xfs_bui_log_format	*bui_formatp;
625 	size_t				len;
626 
627 	bui_formatp = item->ri_buf[0].i_addr;
628 
629 	if (item->ri_buf[0].i_len < xfs_bui_log_format_sizeof(0)) {
630 		XFS_CORRUPTION_ERROR(__func__, XFS_ERRLEVEL_LOW, mp,
631 				item->ri_buf[0].i_addr, item->ri_buf[0].i_len);
632 		return -EFSCORRUPTED;
633 	}
634 
635 	if (bui_formatp->bui_nextents != XFS_BUI_MAX_FAST_EXTENTS) {
636 		XFS_CORRUPTION_ERROR(__func__, XFS_ERRLEVEL_LOW, mp,
637 				item->ri_buf[0].i_addr, item->ri_buf[0].i_len);
638 		return -EFSCORRUPTED;
639 	}
640 
641 	len = xfs_bui_log_format_sizeof(bui_formatp->bui_nextents);
642 	if (item->ri_buf[0].i_len != len) {
643 		XFS_CORRUPTION_ERROR(__func__, XFS_ERRLEVEL_LOW, mp,
644 				item->ri_buf[0].i_addr, item->ri_buf[0].i_len);
645 		return -EFSCORRUPTED;
646 	}
647 
648 	buip = xfs_bui_init(mp);
649 	xfs_bui_copy_format(&buip->bui_format, bui_formatp);
650 	atomic_set(&buip->bui_next_extent, bui_formatp->bui_nextents);
651 
652 	xlog_recover_intent_item(log, &buip->bui_item, lsn,
653 			&xfs_bmap_update_defer_type);
654 	return 0;
655 }
656 
657 const struct xlog_recover_item_ops xlog_bui_item_ops = {
658 	.item_type		= XFS_LI_BUI,
659 	.commit_pass2		= xlog_recover_bui_commit_pass2,
660 };
661 
662 /*
663  * This routine is called when an BUD format structure is found in a committed
664  * transaction in the log. Its purpose is to cancel the corresponding BUI if it
665  * was still in the log. To do this it searches the AIL for the BUI with an id
666  * equal to that in the BUD format structure. If we find it we drop the BUD
667  * reference, which removes the BUI from the AIL and frees it.
668  */
669 STATIC int
670 xlog_recover_bud_commit_pass2(
671 	struct xlog			*log,
672 	struct list_head		*buffer_list,
673 	struct xlog_recover_item	*item,
674 	xfs_lsn_t			lsn)
675 {
676 	struct xfs_bud_log_format	*bud_formatp;
677 
678 	bud_formatp = item->ri_buf[0].i_addr;
679 	if (item->ri_buf[0].i_len != sizeof(struct xfs_bud_log_format)) {
680 		XFS_CORRUPTION_ERROR(__func__, XFS_ERRLEVEL_LOW, log->l_mp,
681 				item->ri_buf[0].i_addr, item->ri_buf[0].i_len);
682 		return -EFSCORRUPTED;
683 	}
684 
685 	xlog_recover_release_intent(log, XFS_LI_BUI, bud_formatp->bud_bui_id);
686 	return 0;
687 }
688 
689 const struct xlog_recover_item_ops xlog_bud_item_ops = {
690 	.item_type		= XFS_LI_BUD,
691 	.commit_pass2		= xlog_recover_bud_commit_pass2,
692 };
693