1 // SPDX-License-Identifier: GPL-2.0+
2 /*
3  * Copyright (C) 2016 Oracle.  All Rights Reserved.
4  * Author: Darrick J. Wong <darrick.wong@oracle.com>
5  */
6 #include "xfs.h"
7 #include "xfs_fs.h"
8 #include "xfs_shared.h"
9 #include "xfs_format.h"
10 #include "xfs_log_format.h"
11 #include "xfs_trans_resv.h"
12 #include "xfs_mount.h"
13 #include "xfs_defer.h"
14 #include "xfs_trans.h"
15 #include "xfs_buf_item.h"
16 #include "xfs_inode.h"
17 #include "xfs_inode_item.h"
18 #include "xfs_trace.h"
19 #include "xfs_icache.h"
20 #include "xfs_log.h"
21 
22 /*
23  * Deferred Operations in XFS
24  *
25  * Due to the way locking rules work in XFS, certain transactions (block
26  * mapping and unmapping, typically) have permanent reservations so that
27  * we can roll the transaction to adhere to AG locking order rules and
28  * to unlock buffers between metadata updates.  Prior to rmap/reflink,
29  * the mapping code had a mechanism to perform these deferrals for
30  * extents that were going to be freed; this code makes that facility
31  * more generic.
32  *
33  * When adding the reverse mapping and reflink features, it became
34  * necessary to perform complex remapping multi-transactions to comply
35  * with AG locking order rules, and to be able to spread a single
36  * refcount update operation (an operation on an n-block extent can
37  * update as many as n records!) among multiple transactions.  XFS can
38  * roll a transaction to facilitate this, but using this facility
39  * requires us to log "intent" items in case log recovery needs to
40  * redo the operation, and to log "done" items to indicate that redo
41  * is not necessary.
42  *
43  * Deferred work is tracked in xfs_defer_pending items.  Each pending
44  * item tracks one type of deferred work.  Incoming work items (which
45  * have not yet had an intent logged) are attached to a pending item
46  * on the dop_intake list, where they wait for the caller to finish
47  * the deferred operations.
48  *
49  * Finishing a set of deferred operations is an involved process.  To
50  * start, we define "rolling a deferred-op transaction" as follows:
51  *
52  * > For each xfs_defer_pending item on the dop_intake list,
53  *   - Sort the work items in AG order.  XFS locking
54  *     order rules require us to lock buffers in AG order.
55  *   - Create a log intent item for that type.
56  *   - Attach it to the pending item.
57  *   - Move the pending item from the dop_intake list to the
58  *     dop_pending list.
59  * > Roll the transaction.
60  *
61  * NOTE: To avoid exceeding the transaction reservation, we limit the
62  * number of items that we attach to a given xfs_defer_pending.
63  *
64  * The actual finishing process looks like this:
65  *
66  * > For each xfs_defer_pending in the dop_pending list,
67  *   - Roll the deferred-op transaction as above.
68  *   - Create a log done item for that type, and attach it to the
69  *     log intent item.
70  *   - For each work item attached to the log intent item,
71  *     * Perform the described action.
72  *     * Attach the work item to the log done item.
73  *     * If the result of doing the work was -EAGAIN, ->finish work
74  *       wants a new transaction.  See the "Requesting a Fresh
75  *       Transaction while Finishing Deferred Work" section below for
76  *       details.
77  *
78  * The key here is that we must log an intent item for all pending
79  * work items every time we roll the transaction, and that we must log
80  * a done item as soon as the work is completed.  With this mechanism
81  * we can perform complex remapping operations, chaining intent items
82  * as needed.
83  *
84  * Requesting a Fresh Transaction while Finishing Deferred Work
85  *
86  * If ->finish_item decides that it needs a fresh transaction to
87  * finish the work, it must ask its caller (xfs_defer_finish) for a
88  * continuation.  The most likely cause of this circumstance are the
89  * refcount adjust functions deciding that they've logged enough items
90  * to be at risk of exceeding the transaction reservation.
91  *
92  * To get a fresh transaction, we want to log the existing log done
93  * item to prevent the log intent item from replaying, immediately log
94  * a new log intent item with the unfinished work items, roll the
95  * transaction, and re-call ->finish_item wherever it left off.  The
96  * log done item and the new log intent item must be in the same
97  * transaction or atomicity cannot be guaranteed; defer_finish ensures
98  * that this happens.
99  *
100  * This requires some coordination between ->finish_item and
101  * defer_finish.  Upon deciding to request a new transaction,
102  * ->finish_item should update the current work item to reflect the
103  * unfinished work.  Next, it should reset the log done item's list
104  * count to the number of items finished, and return -EAGAIN.
105  * defer_finish sees the -EAGAIN, logs the new log intent item
106  * with the remaining work items, and leaves the xfs_defer_pending
107  * item at the head of the dop_work queue.  Then it rolls the
108  * transaction and picks up processing where it left off.  It is
109  * required that ->finish_item must be careful to leave enough
110  * transaction reservation to fit the new log intent item.
111  *
112  * This is an example of remapping the extent (E, E+B) into file X at
113  * offset A and dealing with the extent (C, C+B) already being mapped
114  * there:
115  * +-------------------------------------------------+
116  * | Unmap file X startblock C offset A length B     | t0
117  * | Intent to reduce refcount for extent (C, B)     |
118  * | Intent to remove rmap (X, C, A, B)              |
119  * | Intent to free extent (D, 1) (bmbt block)       |
120  * | Intent to map (X, A, B) at startblock E         |
121  * +-------------------------------------------------+
122  * | Map file X startblock E offset A length B       | t1
123  * | Done mapping (X, E, A, B)                       |
124  * | Intent to increase refcount for extent (E, B)   |
125  * | Intent to add rmap (X, E, A, B)                 |
126  * +-------------------------------------------------+
127  * | Reduce refcount for extent (C, B)               | t2
128  * | Done reducing refcount for extent (C, 9)        |
129  * | Intent to reduce refcount for extent (C+9, B-9) |
130  * | (ran out of space after 9 refcount updates)     |
131  * +-------------------------------------------------+
132  * | Reduce refcount for extent (C+9, B+9)           | t3
133  * | Done reducing refcount for extent (C+9, B-9)    |
134  * | Increase refcount for extent (E, B)             |
135  * | Done increasing refcount for extent (E, B)      |
136  * | Intent to free extent (C, B)                    |
137  * | Intent to free extent (F, 1) (refcountbt block) |
138  * | Intent to remove rmap (F, 1, REFC)              |
139  * +-------------------------------------------------+
140  * | Remove rmap (X, C, A, B)                        | t4
141  * | Done removing rmap (X, C, A, B)                 |
142  * | Add rmap (X, E, A, B)                           |
143  * | Done adding rmap (X, E, A, B)                   |
144  * | Remove rmap (F, 1, REFC)                        |
145  * | Done removing rmap (F, 1, REFC)                 |
146  * +-------------------------------------------------+
147  * | Free extent (C, B)                              | t5
148  * | Done freeing extent (C, B)                      |
149  * | Free extent (D, 1)                              |
150  * | Done freeing extent (D, 1)                      |
151  * | Free extent (F, 1)                              |
152  * | Done freeing extent (F, 1)                      |
153  * +-------------------------------------------------+
154  *
155  * If we should crash before t2 commits, log recovery replays
156  * the following intent items:
157  *
158  * - Intent to reduce refcount for extent (C, B)
159  * - Intent to remove rmap (X, C, A, B)
160  * - Intent to free extent (D, 1) (bmbt block)
161  * - Intent to increase refcount for extent (E, B)
162  * - Intent to add rmap (X, E, A, B)
163  *
164  * In the process of recovering, it should also generate and take care
165  * of these intent items:
166  *
167  * - Intent to free extent (C, B)
168  * - Intent to free extent (F, 1) (refcountbt block)
169  * - Intent to remove rmap (F, 1, REFC)
170  *
171  * Note that the continuation requested between t2 and t3 is likely to
172  * reoccur.
173  */
174 
175 static const struct xfs_defer_op_type *defer_op_types[] = {
176 	[XFS_DEFER_OPS_TYPE_BMAP]	= &xfs_bmap_update_defer_type,
177 	[XFS_DEFER_OPS_TYPE_REFCOUNT]	= &xfs_refcount_update_defer_type,
178 	[XFS_DEFER_OPS_TYPE_RMAP]	= &xfs_rmap_update_defer_type,
179 	[XFS_DEFER_OPS_TYPE_FREE]	= &xfs_extent_free_defer_type,
180 	[XFS_DEFER_OPS_TYPE_AGFL_FREE]	= &xfs_agfl_free_defer_type,
181 };
182 
183 static void
xfs_defer_create_intent(struct xfs_trans * tp,struct xfs_defer_pending * dfp,bool sort)184 xfs_defer_create_intent(
185 	struct xfs_trans		*tp,
186 	struct xfs_defer_pending	*dfp,
187 	bool				sort)
188 {
189 	const struct xfs_defer_op_type	*ops = defer_op_types[dfp->dfp_type];
190 
191 	if (!dfp->dfp_intent)
192 		dfp->dfp_intent = ops->create_intent(tp, &dfp->dfp_work,
193 						     dfp->dfp_count, sort);
194 }
195 
196 /*
197  * For each pending item in the intake list, log its intent item and the
198  * associated extents, then add the entire intake list to the end of
199  * the pending list.
200  */
201 STATIC void
xfs_defer_create_intents(struct xfs_trans * tp)202 xfs_defer_create_intents(
203 	struct xfs_trans		*tp)
204 {
205 	struct xfs_defer_pending	*dfp;
206 
207 	list_for_each_entry(dfp, &tp->t_dfops, dfp_list) {
208 		trace_xfs_defer_create_intent(tp->t_mountp, dfp);
209 		xfs_defer_create_intent(tp, dfp, true);
210 	}
211 }
212 
213 /* Abort all the intents that were committed. */
214 STATIC void
xfs_defer_trans_abort(struct xfs_trans * tp,struct list_head * dop_pending)215 xfs_defer_trans_abort(
216 	struct xfs_trans		*tp,
217 	struct list_head		*dop_pending)
218 {
219 	struct xfs_defer_pending	*dfp;
220 	const struct xfs_defer_op_type	*ops;
221 
222 	trace_xfs_defer_trans_abort(tp, _RET_IP_);
223 
224 	/* Abort intent items that don't have a done item. */
225 	list_for_each_entry(dfp, dop_pending, dfp_list) {
226 		ops = defer_op_types[dfp->dfp_type];
227 		trace_xfs_defer_pending_abort(tp->t_mountp, dfp);
228 		if (dfp->dfp_intent && !dfp->dfp_done) {
229 			ops->abort_intent(dfp->dfp_intent);
230 			dfp->dfp_intent = NULL;
231 		}
232 	}
233 }
234 
235 /* Roll a transaction so we can do some deferred op processing. */
236 STATIC int
xfs_defer_trans_roll(struct xfs_trans ** tpp)237 xfs_defer_trans_roll(
238 	struct xfs_trans		**tpp)
239 {
240 	struct xfs_trans		*tp = *tpp;
241 	struct xfs_buf_log_item		*bli;
242 	struct xfs_inode_log_item	*ili;
243 	struct xfs_log_item		*lip;
244 	struct xfs_buf			*bplist[XFS_DEFER_OPS_NR_BUFS];
245 	struct xfs_inode		*iplist[XFS_DEFER_OPS_NR_INODES];
246 	unsigned int			ordered = 0; /* bitmap */
247 	int				bpcount = 0, ipcount = 0;
248 	int				i;
249 	int				error;
250 
251 	BUILD_BUG_ON(NBBY * sizeof(ordered) < XFS_DEFER_OPS_NR_BUFS);
252 
253 	list_for_each_entry(lip, &tp->t_items, li_trans) {
254 		switch (lip->li_type) {
255 		case XFS_LI_BUF:
256 			bli = container_of(lip, struct xfs_buf_log_item,
257 					   bli_item);
258 			if (bli->bli_flags & XFS_BLI_HOLD) {
259 				if (bpcount >= XFS_DEFER_OPS_NR_BUFS) {
260 					ASSERT(0);
261 					return -EFSCORRUPTED;
262 				}
263 				if (bli->bli_flags & XFS_BLI_ORDERED)
264 					ordered |= (1U << bpcount);
265 				else
266 					xfs_trans_dirty_buf(tp, bli->bli_buf);
267 				bplist[bpcount++] = bli->bli_buf;
268 			}
269 			break;
270 		case XFS_LI_INODE:
271 			ili = container_of(lip, struct xfs_inode_log_item,
272 					   ili_item);
273 			if (ili->ili_lock_flags == 0) {
274 				if (ipcount >= XFS_DEFER_OPS_NR_INODES) {
275 					ASSERT(0);
276 					return -EFSCORRUPTED;
277 				}
278 				xfs_trans_log_inode(tp, ili->ili_inode,
279 						    XFS_ILOG_CORE);
280 				iplist[ipcount++] = ili->ili_inode;
281 			}
282 			break;
283 		default:
284 			break;
285 		}
286 	}
287 
288 	trace_xfs_defer_trans_roll(tp, _RET_IP_);
289 
290 	/*
291 	 * Roll the transaction.  Rolling always given a new transaction (even
292 	 * if committing the old one fails!) to hand back to the caller, so we
293 	 * join the held resources to the new transaction so that we always
294 	 * return with the held resources joined to @tpp, no matter what
295 	 * happened.
296 	 */
297 	error = xfs_trans_roll(tpp);
298 	tp = *tpp;
299 
300 	/* Rejoin the joined inodes. */
301 	for (i = 0; i < ipcount; i++)
302 		xfs_trans_ijoin(tp, iplist[i], 0);
303 
304 	/* Rejoin the buffers and dirty them so the log moves forward. */
305 	for (i = 0; i < bpcount; i++) {
306 		xfs_trans_bjoin(tp, bplist[i]);
307 		if (ordered & (1U << i))
308 			xfs_trans_ordered_buf(tp, bplist[i]);
309 		xfs_trans_bhold(tp, bplist[i]);
310 	}
311 
312 	if (error)
313 		trace_xfs_defer_trans_roll_error(tp, error);
314 	return error;
315 }
316 
317 /*
318  * Free up any items left in the list.
319  */
320 static void
xfs_defer_cancel_list(struct xfs_mount * mp,struct list_head * dop_list)321 xfs_defer_cancel_list(
322 	struct xfs_mount		*mp,
323 	struct list_head		*dop_list)
324 {
325 	struct xfs_defer_pending	*dfp;
326 	struct xfs_defer_pending	*pli;
327 	struct list_head		*pwi;
328 	struct list_head		*n;
329 	const struct xfs_defer_op_type	*ops;
330 
331 	/*
332 	 * Free the pending items.  Caller should already have arranged
333 	 * for the intent items to be released.
334 	 */
335 	list_for_each_entry_safe(dfp, pli, dop_list, dfp_list) {
336 		ops = defer_op_types[dfp->dfp_type];
337 		trace_xfs_defer_cancel_list(mp, dfp);
338 		list_del(&dfp->dfp_list);
339 		list_for_each_safe(pwi, n, &dfp->dfp_work) {
340 			list_del(pwi);
341 			dfp->dfp_count--;
342 			ops->cancel_item(pwi);
343 		}
344 		ASSERT(dfp->dfp_count == 0);
345 		kmem_free(dfp);
346 	}
347 }
348 
349 /*
350  * Prevent a log intent item from pinning the tail of the log by logging a
351  * done item to release the intent item; and then log a new intent item.
352  * The caller should provide a fresh transaction and roll it after we're done.
353  */
354 static int
xfs_defer_relog(struct xfs_trans ** tpp,struct list_head * dfops)355 xfs_defer_relog(
356 	struct xfs_trans		**tpp,
357 	struct list_head		*dfops)
358 {
359 	struct xlog			*log = (*tpp)->t_mountp->m_log;
360 	struct xfs_defer_pending	*dfp;
361 	xfs_lsn_t			threshold_lsn = NULLCOMMITLSN;
362 
363 
364 	ASSERT((*tpp)->t_flags & XFS_TRANS_PERM_LOG_RES);
365 
366 	list_for_each_entry(dfp, dfops, dfp_list) {
367 		/*
368 		 * If the log intent item for this deferred op is not a part of
369 		 * the current log checkpoint, relog the intent item to keep
370 		 * the log tail moving forward.  We're ok with this being racy
371 		 * because an incorrect decision means we'll be a little slower
372 		 * at pushing the tail.
373 		 */
374 		if (dfp->dfp_intent == NULL ||
375 		    xfs_log_item_in_current_chkpt(dfp->dfp_intent))
376 			continue;
377 
378 		/*
379 		 * Figure out where we need the tail to be in order to maintain
380 		 * the minimum required free space in the log.  Only sample
381 		 * the log threshold once per call.
382 		 */
383 		if (threshold_lsn == NULLCOMMITLSN) {
384 			threshold_lsn = xlog_grant_push_threshold(log, 0);
385 			if (threshold_lsn == NULLCOMMITLSN)
386 				break;
387 		}
388 		if (XFS_LSN_CMP(dfp->dfp_intent->li_lsn, threshold_lsn) >= 0)
389 			continue;
390 
391 		trace_xfs_defer_relog_intent((*tpp)->t_mountp, dfp);
392 		XFS_STATS_INC((*tpp)->t_mountp, defer_relog);
393 		dfp->dfp_intent = xfs_trans_item_relog(dfp->dfp_intent, *tpp);
394 	}
395 
396 	if ((*tpp)->t_flags & XFS_TRANS_DIRTY)
397 		return xfs_defer_trans_roll(tpp);
398 	return 0;
399 }
400 
401 /*
402  * Log an intent-done item for the first pending intent, and finish the work
403  * items.
404  */
405 static int
xfs_defer_finish_one(struct xfs_trans * tp,struct xfs_defer_pending * dfp)406 xfs_defer_finish_one(
407 	struct xfs_trans		*tp,
408 	struct xfs_defer_pending	*dfp)
409 {
410 	const struct xfs_defer_op_type	*ops = defer_op_types[dfp->dfp_type];
411 	struct xfs_btree_cur		*state = NULL;
412 	struct list_head		*li, *n;
413 	int				error;
414 
415 	trace_xfs_defer_pending_finish(tp->t_mountp, dfp);
416 
417 	dfp->dfp_done = ops->create_done(tp, dfp->dfp_intent, dfp->dfp_count);
418 	list_for_each_safe(li, n, &dfp->dfp_work) {
419 		list_del(li);
420 		dfp->dfp_count--;
421 		error = ops->finish_item(tp, dfp->dfp_done, li, &state);
422 		if (error == -EAGAIN) {
423 			/*
424 			 * Caller wants a fresh transaction; put the work item
425 			 * back on the list and log a new log intent item to
426 			 * replace the old one.  See "Requesting a Fresh
427 			 * Transaction while Finishing Deferred Work" above.
428 			 */
429 			list_add(li, &dfp->dfp_work);
430 			dfp->dfp_count++;
431 			dfp->dfp_done = NULL;
432 			dfp->dfp_intent = NULL;
433 			xfs_defer_create_intent(tp, dfp, false);
434 		}
435 
436 		if (error)
437 			goto out;
438 	}
439 
440 	/* Done with the dfp, free it. */
441 	list_del(&dfp->dfp_list);
442 	kmem_free(dfp);
443 out:
444 	if (ops->finish_cleanup)
445 		ops->finish_cleanup(tp, state, error);
446 	return error;
447 }
448 
449 /*
450  * Finish all the pending work.  This involves logging intent items for
451  * any work items that wandered in since the last transaction roll (if
452  * one has even happened), rolling the transaction, and finishing the
453  * work items in the first item on the logged-and-pending list.
454  *
455  * If an inode is provided, relog it to the new transaction.
456  */
457 int
xfs_defer_finish_noroll(struct xfs_trans ** tp)458 xfs_defer_finish_noroll(
459 	struct xfs_trans		**tp)
460 {
461 	struct xfs_defer_pending	*dfp;
462 	int				error = 0;
463 	LIST_HEAD(dop_pending);
464 
465 	ASSERT((*tp)->t_flags & XFS_TRANS_PERM_LOG_RES);
466 
467 	trace_xfs_defer_finish(*tp, _RET_IP_);
468 
469 	/* Until we run out of pending work to finish... */
470 	while (!list_empty(&dop_pending) || !list_empty(&(*tp)->t_dfops)) {
471 		/*
472 		 * Deferred items that are created in the process of finishing
473 		 * other deferred work items should be queued at the head of
474 		 * the pending list, which puts them ahead of the deferred work
475 		 * that was created by the caller.  This keeps the number of
476 		 * pending work items to a minimum, which decreases the amount
477 		 * of time that any one intent item can stick around in memory,
478 		 * pinning the log tail.
479 		 */
480 		xfs_defer_create_intents(*tp);
481 		list_splice_init(&(*tp)->t_dfops, &dop_pending);
482 
483 		error = xfs_defer_trans_roll(tp);
484 		if (error)
485 			goto out_shutdown;
486 
487 		/* Possibly relog intent items to keep the log moving. */
488 		error = xfs_defer_relog(tp, &dop_pending);
489 		if (error)
490 			goto out_shutdown;
491 
492 		dfp = list_first_entry(&dop_pending, struct xfs_defer_pending,
493 				       dfp_list);
494 		error = xfs_defer_finish_one(*tp, dfp);
495 		if (error && error != -EAGAIN)
496 			goto out_shutdown;
497 	}
498 
499 	trace_xfs_defer_finish_done(*tp, _RET_IP_);
500 	return 0;
501 
502 out_shutdown:
503 	xfs_defer_trans_abort(*tp, &dop_pending);
504 	xfs_force_shutdown((*tp)->t_mountp, SHUTDOWN_CORRUPT_INCORE);
505 	trace_xfs_defer_finish_error(*tp, error);
506 	xfs_defer_cancel_list((*tp)->t_mountp, &dop_pending);
507 	xfs_defer_cancel(*tp);
508 	return error;
509 }
510 
511 int
xfs_defer_finish(struct xfs_trans ** tp)512 xfs_defer_finish(
513 	struct xfs_trans	**tp)
514 {
515 	int			error;
516 
517 	/*
518 	 * Finish and roll the transaction once more to avoid returning to the
519 	 * caller with a dirty transaction.
520 	 */
521 	error = xfs_defer_finish_noroll(tp);
522 	if (error)
523 		return error;
524 	if ((*tp)->t_flags & XFS_TRANS_DIRTY) {
525 		error = xfs_defer_trans_roll(tp);
526 		if (error) {
527 			xfs_force_shutdown((*tp)->t_mountp,
528 					   SHUTDOWN_CORRUPT_INCORE);
529 			return error;
530 		}
531 	}
532 
533 	/* Reset LOWMODE now that we've finished all the dfops. */
534 	ASSERT(list_empty(&(*tp)->t_dfops));
535 	(*tp)->t_flags &= ~XFS_TRANS_LOWMODE;
536 	return 0;
537 }
538 
539 void
xfs_defer_cancel(struct xfs_trans * tp)540 xfs_defer_cancel(
541 	struct xfs_trans	*tp)
542 {
543 	struct xfs_mount	*mp = tp->t_mountp;
544 
545 	trace_xfs_defer_cancel(tp, _RET_IP_);
546 	xfs_defer_cancel_list(mp, &tp->t_dfops);
547 }
548 
549 /* Add an item for later deferred processing. */
550 void
xfs_defer_add(struct xfs_trans * tp,enum xfs_defer_ops_type type,struct list_head * li)551 xfs_defer_add(
552 	struct xfs_trans		*tp,
553 	enum xfs_defer_ops_type		type,
554 	struct list_head		*li)
555 {
556 	struct xfs_defer_pending	*dfp = NULL;
557 	const struct xfs_defer_op_type	*ops;
558 
559 	ASSERT(tp->t_flags & XFS_TRANS_PERM_LOG_RES);
560 	BUILD_BUG_ON(ARRAY_SIZE(defer_op_types) != XFS_DEFER_OPS_TYPE_MAX);
561 
562 	/*
563 	 * Add the item to a pending item at the end of the intake list.
564 	 * If the last pending item has the same type, reuse it.  Else,
565 	 * create a new pending item at the end of the intake list.
566 	 */
567 	if (!list_empty(&tp->t_dfops)) {
568 		dfp = list_last_entry(&tp->t_dfops,
569 				struct xfs_defer_pending, dfp_list);
570 		ops = defer_op_types[dfp->dfp_type];
571 		if (dfp->dfp_type != type ||
572 		    (ops->max_items && dfp->dfp_count >= ops->max_items))
573 			dfp = NULL;
574 	}
575 	if (!dfp) {
576 		dfp = kmem_alloc(sizeof(struct xfs_defer_pending),
577 				KM_NOFS);
578 		dfp->dfp_type = type;
579 		dfp->dfp_intent = NULL;
580 		dfp->dfp_done = NULL;
581 		dfp->dfp_count = 0;
582 		INIT_LIST_HEAD(&dfp->dfp_work);
583 		list_add_tail(&dfp->dfp_list, &tp->t_dfops);
584 	}
585 
586 	list_add_tail(li, &dfp->dfp_work);
587 	dfp->dfp_count++;
588 }
589 
590 /*
591  * Move deferred ops from one transaction to another and reset the source to
592  * initial state. This is primarily used to carry state forward across
593  * transaction rolls with pending dfops.
594  */
595 void
xfs_defer_move(struct xfs_trans * dtp,struct xfs_trans * stp)596 xfs_defer_move(
597 	struct xfs_trans	*dtp,
598 	struct xfs_trans	*stp)
599 {
600 	list_splice_init(&stp->t_dfops, &dtp->t_dfops);
601 
602 	/*
603 	 * Low free space mode was historically controlled by a dfops field.
604 	 * This meant that low mode state potentially carried across multiple
605 	 * transaction rolls. Transfer low mode on a dfops move to preserve
606 	 * that behavior.
607 	 */
608 	dtp->t_flags |= (stp->t_flags & XFS_TRANS_LOWMODE);
609 	stp->t_flags &= ~XFS_TRANS_LOWMODE;
610 }
611 
612 /*
613  * Prepare a chain of fresh deferred ops work items to be completed later.  Log
614  * recovery requires the ability to put off until later the actual finishing
615  * work so that it can process unfinished items recovered from the log in
616  * correct order.
617  *
618  * Create and log intent items for all the work that we're capturing so that we
619  * can be assured that the items will get replayed if the system goes down
620  * before log recovery gets a chance to finish the work it put off.  The entire
621  * deferred ops state is transferred to the capture structure and the
622  * transaction is then ready for the caller to commit it.  If there are no
623  * intent items to capture, this function returns NULL.
624  *
625  * If capture_ip is not NULL, the capture structure will obtain an extra
626  * reference to the inode.
627  */
628 static struct xfs_defer_capture *
xfs_defer_ops_capture(struct xfs_trans * tp,struct xfs_inode * capture_ip)629 xfs_defer_ops_capture(
630 	struct xfs_trans		*tp,
631 	struct xfs_inode		*capture_ip)
632 {
633 	struct xfs_defer_capture	*dfc;
634 
635 	if (list_empty(&tp->t_dfops))
636 		return NULL;
637 
638 	/* Create an object to capture the defer ops. */
639 	dfc = kmem_zalloc(sizeof(*dfc), KM_NOFS);
640 	INIT_LIST_HEAD(&dfc->dfc_list);
641 	INIT_LIST_HEAD(&dfc->dfc_dfops);
642 
643 	xfs_defer_create_intents(tp);
644 
645 	/* Move the dfops chain and transaction state to the capture struct. */
646 	list_splice_init(&tp->t_dfops, &dfc->dfc_dfops);
647 	dfc->dfc_tpflags = tp->t_flags & XFS_TRANS_LOWMODE;
648 	tp->t_flags &= ~XFS_TRANS_LOWMODE;
649 
650 	/* Capture the remaining block reservations along with the dfops. */
651 	dfc->dfc_blkres = tp->t_blk_res - tp->t_blk_res_used;
652 	dfc->dfc_rtxres = tp->t_rtx_res - tp->t_rtx_res_used;
653 
654 	/* Preserve the log reservation size. */
655 	dfc->dfc_logres = tp->t_log_res;
656 
657 	/*
658 	 * Grab an extra reference to this inode and attach it to the capture
659 	 * structure.
660 	 */
661 	if (capture_ip) {
662 		ihold(VFS_I(capture_ip));
663 		dfc->dfc_capture_ip = capture_ip;
664 	}
665 
666 	return dfc;
667 }
668 
669 /* Release all resources that we used to capture deferred ops. */
670 void
xfs_defer_ops_release(struct xfs_mount * mp,struct xfs_defer_capture * dfc)671 xfs_defer_ops_release(
672 	struct xfs_mount		*mp,
673 	struct xfs_defer_capture	*dfc)
674 {
675 	xfs_defer_cancel_list(mp, &dfc->dfc_dfops);
676 	if (dfc->dfc_capture_ip)
677 		xfs_irele(dfc->dfc_capture_ip);
678 	kmem_free(dfc);
679 }
680 
681 /*
682  * Capture any deferred ops and commit the transaction.  This is the last step
683  * needed to finish a log intent item that we recovered from the log.  If any
684  * of the deferred ops operate on an inode, the caller must pass in that inode
685  * so that the reference can be transferred to the capture structure.  The
686  * caller must hold ILOCK_EXCL on the inode, and must unlock it before calling
687  * xfs_defer_ops_continue.
688  */
689 int
xfs_defer_ops_capture_and_commit(struct xfs_trans * tp,struct xfs_inode * capture_ip,struct list_head * capture_list)690 xfs_defer_ops_capture_and_commit(
691 	struct xfs_trans		*tp,
692 	struct xfs_inode		*capture_ip,
693 	struct list_head		*capture_list)
694 {
695 	struct xfs_mount		*mp = tp->t_mountp;
696 	struct xfs_defer_capture	*dfc;
697 	int				error;
698 
699 	ASSERT(!capture_ip || xfs_isilocked(capture_ip, XFS_ILOCK_EXCL));
700 
701 	/* If we don't capture anything, commit transaction and exit. */
702 	dfc = xfs_defer_ops_capture(tp, capture_ip);
703 	if (!dfc)
704 		return xfs_trans_commit(tp);
705 
706 	/* Commit the transaction and add the capture structure to the list. */
707 	error = xfs_trans_commit(tp);
708 	if (error) {
709 		xfs_defer_ops_release(mp, dfc);
710 		return error;
711 	}
712 
713 	list_add_tail(&dfc->dfc_list, capture_list);
714 	return 0;
715 }
716 
717 /*
718  * Attach a chain of captured deferred ops to a new transaction and free the
719  * capture structure.  If an inode was captured, it will be passed back to the
720  * caller with ILOCK_EXCL held and joined to the transaction with lockflags==0.
721  * The caller now owns the inode reference.
722  */
723 void
xfs_defer_ops_continue(struct xfs_defer_capture * dfc,struct xfs_trans * tp,struct xfs_inode ** captured_ipp)724 xfs_defer_ops_continue(
725 	struct xfs_defer_capture	*dfc,
726 	struct xfs_trans		*tp,
727 	struct xfs_inode		**captured_ipp)
728 {
729 	ASSERT(tp->t_flags & XFS_TRANS_PERM_LOG_RES);
730 	ASSERT(!(tp->t_flags & XFS_TRANS_DIRTY));
731 
732 	/* Lock and join the captured inode to the new transaction. */
733 	if (dfc->dfc_capture_ip) {
734 		xfs_ilock(dfc->dfc_capture_ip, XFS_ILOCK_EXCL);
735 		xfs_trans_ijoin(tp, dfc->dfc_capture_ip, 0);
736 	}
737 	*captured_ipp = dfc->dfc_capture_ip;
738 
739 	/* Move captured dfops chain and state to the transaction. */
740 	list_splice_init(&dfc->dfc_dfops, &tp->t_dfops);
741 	tp->t_flags |= dfc->dfc_tpflags;
742 
743 	kmem_free(dfc);
744 }
745