1 // SPDX-License-Identifier: GPL-2.0
2 /*
3 * Copyright (c) 2000-2006 Silicon Graphics, Inc.
4 * All Rights Reserved.
5 */
6 #include "xfs.h"
7 #include <linux/backing-dev.h>
8 #include <linux/dax.h>
9
10 #include "xfs_shared.h"
11 #include "xfs_format.h"
12 #include "xfs_log_format.h"
13 #include "xfs_trans_resv.h"
14 #include "xfs_mount.h"
15 #include "xfs_trace.h"
16 #include "xfs_log.h"
17 #include "xfs_log_recover.h"
18 #include "xfs_log_priv.h"
19 #include "xfs_trans.h"
20 #include "xfs_buf_item.h"
21 #include "xfs_errortag.h"
22 #include "xfs_error.h"
23 #include "xfs_ag.h"
24 #include "xfs_buf_mem.h"
25
26 struct kmem_cache *xfs_buf_cache;
27
28 /*
29 * Locking orders
30 *
31 * xfs_buf_ioacct_inc:
32 * xfs_buf_ioacct_dec:
33 * b_sema (caller holds)
34 * b_lock
35 *
36 * xfs_buf_stale:
37 * b_sema (caller holds)
38 * b_lock
39 * lru_lock
40 *
41 * xfs_buf_rele:
42 * b_lock
43 * pag_buf_lock
44 * lru_lock
45 *
46 * xfs_buftarg_drain_rele
47 * lru_lock
48 * b_lock (trylock due to inversion)
49 *
50 * xfs_buftarg_isolate
51 * lru_lock
52 * b_lock (trylock due to inversion)
53 */
54
55 static int __xfs_buf_submit(struct xfs_buf *bp, bool wait);
56
57 static inline int
xfs_buf_submit(struct xfs_buf * bp)58 xfs_buf_submit(
59 struct xfs_buf *bp)
60 {
61 return __xfs_buf_submit(bp, !(bp->b_flags & XBF_ASYNC));
62 }
63
xfs_buf_is_uncached(struct xfs_buf * bp)64 static inline bool xfs_buf_is_uncached(struct xfs_buf *bp)
65 {
66 return bp->b_rhash_key == XFS_BUF_DADDR_NULL;
67 }
68
69 static inline int
xfs_buf_is_vmapped(struct xfs_buf * bp)70 xfs_buf_is_vmapped(
71 struct xfs_buf *bp)
72 {
73 /*
74 * Return true if the buffer is vmapped.
75 *
76 * b_addr is null if the buffer is not mapped, but the code is clever
77 * enough to know it doesn't have to map a single page, so the check has
78 * to be both for b_addr and bp->b_page_count > 1.
79 */
80 return bp->b_addr && bp->b_page_count > 1;
81 }
82
83 static inline int
xfs_buf_vmap_len(struct xfs_buf * bp)84 xfs_buf_vmap_len(
85 struct xfs_buf *bp)
86 {
87 return (bp->b_page_count * PAGE_SIZE);
88 }
89
90 /*
91 * Bump the I/O in flight count on the buftarg if we haven't yet done so for
92 * this buffer. The count is incremented once per buffer (per hold cycle)
93 * because the corresponding decrement is deferred to buffer release. Buffers
94 * can undergo I/O multiple times in a hold-release cycle and per buffer I/O
95 * tracking adds unnecessary overhead. This is used for sychronization purposes
96 * with unmount (see xfs_buftarg_drain()), so all we really need is a count of
97 * in-flight buffers.
98 *
99 * Buffers that are never released (e.g., superblock, iclog buffers) must set
100 * the XBF_NO_IOACCT flag before I/O submission. Otherwise, the buftarg count
101 * never reaches zero and unmount hangs indefinitely.
102 */
103 static inline void
xfs_buf_ioacct_inc(struct xfs_buf * bp)104 xfs_buf_ioacct_inc(
105 struct xfs_buf *bp)
106 {
107 if (bp->b_flags & XBF_NO_IOACCT)
108 return;
109
110 ASSERT(bp->b_flags & XBF_ASYNC);
111 spin_lock(&bp->b_lock);
112 if (!(bp->b_state & XFS_BSTATE_IN_FLIGHT)) {
113 bp->b_state |= XFS_BSTATE_IN_FLIGHT;
114 percpu_counter_inc(&bp->b_target->bt_io_count);
115 }
116 spin_unlock(&bp->b_lock);
117 }
118
119 /*
120 * Clear the in-flight state on a buffer about to be released to the LRU or
121 * freed and unaccount from the buftarg.
122 */
123 static inline void
__xfs_buf_ioacct_dec(struct xfs_buf * bp)124 __xfs_buf_ioacct_dec(
125 struct xfs_buf *bp)
126 {
127 lockdep_assert_held(&bp->b_lock);
128
129 if (bp->b_state & XFS_BSTATE_IN_FLIGHT) {
130 bp->b_state &= ~XFS_BSTATE_IN_FLIGHT;
131 percpu_counter_dec(&bp->b_target->bt_io_count);
132 }
133 }
134
135 static inline void
xfs_buf_ioacct_dec(struct xfs_buf * bp)136 xfs_buf_ioacct_dec(
137 struct xfs_buf *bp)
138 {
139 spin_lock(&bp->b_lock);
140 __xfs_buf_ioacct_dec(bp);
141 spin_unlock(&bp->b_lock);
142 }
143
144 /*
145 * When we mark a buffer stale, we remove the buffer from the LRU and clear the
146 * b_lru_ref count so that the buffer is freed immediately when the buffer
147 * reference count falls to zero. If the buffer is already on the LRU, we need
148 * to remove the reference that LRU holds on the buffer.
149 *
150 * This prevents build-up of stale buffers on the LRU.
151 */
152 void
xfs_buf_stale(struct xfs_buf * bp)153 xfs_buf_stale(
154 struct xfs_buf *bp)
155 {
156 ASSERT(xfs_buf_islocked(bp));
157
158 bp->b_flags |= XBF_STALE;
159
160 /*
161 * Clear the delwri status so that a delwri queue walker will not
162 * flush this buffer to disk now that it is stale. The delwri queue has
163 * a reference to the buffer, so this is safe to do.
164 */
165 bp->b_flags &= ~_XBF_DELWRI_Q;
166
167 /*
168 * Once the buffer is marked stale and unlocked, a subsequent lookup
169 * could reset b_flags. There is no guarantee that the buffer is
170 * unaccounted (released to LRU) before that occurs. Drop in-flight
171 * status now to preserve accounting consistency.
172 */
173 spin_lock(&bp->b_lock);
174 __xfs_buf_ioacct_dec(bp);
175
176 atomic_set(&bp->b_lru_ref, 0);
177 if (!(bp->b_state & XFS_BSTATE_DISPOSE) &&
178 (list_lru_del_obj(&bp->b_target->bt_lru, &bp->b_lru)))
179 atomic_dec(&bp->b_hold);
180
181 ASSERT(atomic_read(&bp->b_hold) >= 1);
182 spin_unlock(&bp->b_lock);
183 }
184
185 static int
xfs_buf_get_maps(struct xfs_buf * bp,int map_count)186 xfs_buf_get_maps(
187 struct xfs_buf *bp,
188 int map_count)
189 {
190 ASSERT(bp->b_maps == NULL);
191 bp->b_map_count = map_count;
192
193 if (map_count == 1) {
194 bp->b_maps = &bp->__b_map;
195 return 0;
196 }
197
198 bp->b_maps = kzalloc(map_count * sizeof(struct xfs_buf_map),
199 GFP_KERNEL | __GFP_NOLOCKDEP | __GFP_NOFAIL);
200 if (!bp->b_maps)
201 return -ENOMEM;
202 return 0;
203 }
204
205 /*
206 * Frees b_pages if it was allocated.
207 */
208 static void
xfs_buf_free_maps(struct xfs_buf * bp)209 xfs_buf_free_maps(
210 struct xfs_buf *bp)
211 {
212 if (bp->b_maps != &bp->__b_map) {
213 kfree(bp->b_maps);
214 bp->b_maps = NULL;
215 }
216 }
217
218 static int
_xfs_buf_alloc(struct xfs_buftarg * target,struct xfs_buf_map * map,int nmaps,xfs_buf_flags_t flags,struct xfs_buf ** bpp)219 _xfs_buf_alloc(
220 struct xfs_buftarg *target,
221 struct xfs_buf_map *map,
222 int nmaps,
223 xfs_buf_flags_t flags,
224 struct xfs_buf **bpp)
225 {
226 struct xfs_buf *bp;
227 int error;
228 int i;
229
230 *bpp = NULL;
231 bp = kmem_cache_zalloc(xfs_buf_cache,
232 GFP_KERNEL | __GFP_NOLOCKDEP | __GFP_NOFAIL);
233
234 /*
235 * We don't want certain flags to appear in b_flags unless they are
236 * specifically set by later operations on the buffer.
237 */
238 flags &= ~(XBF_UNMAPPED | XBF_TRYLOCK | XBF_ASYNC | XBF_READ_AHEAD);
239
240 atomic_set(&bp->b_hold, 1);
241 atomic_set(&bp->b_lru_ref, 1);
242 init_completion(&bp->b_iowait);
243 INIT_LIST_HEAD(&bp->b_lru);
244 INIT_LIST_HEAD(&bp->b_list);
245 INIT_LIST_HEAD(&bp->b_li_list);
246 sema_init(&bp->b_sema, 0); /* held, no waiters */
247 spin_lock_init(&bp->b_lock);
248 bp->b_target = target;
249 bp->b_mount = target->bt_mount;
250 bp->b_flags = flags;
251
252 /*
253 * Set length and io_length to the same value initially.
254 * I/O routines should use io_length, which will be the same in
255 * most cases but may be reset (e.g. XFS recovery).
256 */
257 error = xfs_buf_get_maps(bp, nmaps);
258 if (error) {
259 kmem_cache_free(xfs_buf_cache, bp);
260 return error;
261 }
262
263 bp->b_rhash_key = map[0].bm_bn;
264 bp->b_length = 0;
265 for (i = 0; i < nmaps; i++) {
266 bp->b_maps[i].bm_bn = map[i].bm_bn;
267 bp->b_maps[i].bm_len = map[i].bm_len;
268 bp->b_length += map[i].bm_len;
269 }
270
271 atomic_set(&bp->b_pin_count, 0);
272 init_waitqueue_head(&bp->b_waiters);
273
274 XFS_STATS_INC(bp->b_mount, xb_create);
275 trace_xfs_buf_init(bp, _RET_IP_);
276
277 *bpp = bp;
278 return 0;
279 }
280
281 static void
xfs_buf_free_pages(struct xfs_buf * bp)282 xfs_buf_free_pages(
283 struct xfs_buf *bp)
284 {
285 uint i;
286
287 ASSERT(bp->b_flags & _XBF_PAGES);
288
289 if (xfs_buf_is_vmapped(bp))
290 vm_unmap_ram(bp->b_addr, bp->b_page_count);
291
292 for (i = 0; i < bp->b_page_count; i++) {
293 if (bp->b_pages[i])
294 __free_page(bp->b_pages[i]);
295 }
296 mm_account_reclaimed_pages(bp->b_page_count);
297
298 if (bp->b_pages != bp->b_page_array)
299 kfree(bp->b_pages);
300 bp->b_pages = NULL;
301 bp->b_flags &= ~_XBF_PAGES;
302 }
303
304 static void
xfs_buf_free_callback(struct callback_head * cb)305 xfs_buf_free_callback(
306 struct callback_head *cb)
307 {
308 struct xfs_buf *bp = container_of(cb, struct xfs_buf, b_rcu);
309
310 xfs_buf_free_maps(bp);
311 kmem_cache_free(xfs_buf_cache, bp);
312 }
313
314 static void
xfs_buf_free(struct xfs_buf * bp)315 xfs_buf_free(
316 struct xfs_buf *bp)
317 {
318 trace_xfs_buf_free(bp, _RET_IP_);
319
320 ASSERT(list_empty(&bp->b_lru));
321
322 if (xfs_buftarg_is_mem(bp->b_target))
323 xmbuf_unmap_page(bp);
324 else if (bp->b_flags & _XBF_PAGES)
325 xfs_buf_free_pages(bp);
326 else if (bp->b_flags & _XBF_KMEM)
327 kfree(bp->b_addr);
328
329 call_rcu(&bp->b_rcu, xfs_buf_free_callback);
330 }
331
332 static int
xfs_buf_alloc_kmem(struct xfs_buf * bp,xfs_buf_flags_t flags)333 xfs_buf_alloc_kmem(
334 struct xfs_buf *bp,
335 xfs_buf_flags_t flags)
336 {
337 gfp_t gfp_mask = GFP_KERNEL | __GFP_NOLOCKDEP | __GFP_NOFAIL;
338 size_t size = BBTOB(bp->b_length);
339
340 /* Assure zeroed buffer for non-read cases. */
341 if (!(flags & XBF_READ))
342 gfp_mask |= __GFP_ZERO;
343
344 bp->b_addr = kmalloc(size, gfp_mask);
345 if (!bp->b_addr)
346 return -ENOMEM;
347
348 if (((unsigned long)(bp->b_addr + size - 1) & PAGE_MASK) !=
349 ((unsigned long)bp->b_addr & PAGE_MASK)) {
350 /* b_addr spans two pages - use alloc_page instead */
351 kfree(bp->b_addr);
352 bp->b_addr = NULL;
353 return -ENOMEM;
354 }
355 bp->b_offset = offset_in_page(bp->b_addr);
356 bp->b_pages = bp->b_page_array;
357 bp->b_pages[0] = kmem_to_page(bp->b_addr);
358 bp->b_page_count = 1;
359 bp->b_flags |= _XBF_KMEM;
360 return 0;
361 }
362
363 static int
xfs_buf_alloc_pages(struct xfs_buf * bp,xfs_buf_flags_t flags)364 xfs_buf_alloc_pages(
365 struct xfs_buf *bp,
366 xfs_buf_flags_t flags)
367 {
368 gfp_t gfp_mask = GFP_KERNEL | __GFP_NOLOCKDEP | __GFP_NOWARN;
369 long filled = 0;
370
371 if (flags & XBF_READ_AHEAD)
372 gfp_mask |= __GFP_NORETRY;
373
374 /* Make sure that we have a page list */
375 bp->b_page_count = DIV_ROUND_UP(BBTOB(bp->b_length), PAGE_SIZE);
376 if (bp->b_page_count <= XB_PAGES) {
377 bp->b_pages = bp->b_page_array;
378 } else {
379 bp->b_pages = kzalloc(sizeof(struct page *) * bp->b_page_count,
380 gfp_mask);
381 if (!bp->b_pages)
382 return -ENOMEM;
383 }
384 bp->b_flags |= _XBF_PAGES;
385
386 /* Assure zeroed buffer for non-read cases. */
387 if (!(flags & XBF_READ))
388 gfp_mask |= __GFP_ZERO;
389
390 /*
391 * Bulk filling of pages can take multiple calls. Not filling the entire
392 * array is not an allocation failure, so don't back off if we get at
393 * least one extra page.
394 */
395 for (;;) {
396 long last = filled;
397
398 filled = alloc_pages_bulk_array(gfp_mask, bp->b_page_count,
399 bp->b_pages);
400 if (filled == bp->b_page_count) {
401 XFS_STATS_INC(bp->b_mount, xb_page_found);
402 break;
403 }
404
405 if (filled != last)
406 continue;
407
408 if (flags & XBF_READ_AHEAD) {
409 xfs_buf_free_pages(bp);
410 return -ENOMEM;
411 }
412
413 XFS_STATS_INC(bp->b_mount, xb_page_retries);
414 memalloc_retry_wait(gfp_mask);
415 }
416 return 0;
417 }
418
419 /*
420 * Map buffer into kernel address-space if necessary.
421 */
422 STATIC int
_xfs_buf_map_pages(struct xfs_buf * bp,xfs_buf_flags_t flags)423 _xfs_buf_map_pages(
424 struct xfs_buf *bp,
425 xfs_buf_flags_t flags)
426 {
427 ASSERT(bp->b_flags & _XBF_PAGES);
428 if (bp->b_page_count == 1) {
429 /* A single page buffer is always mappable */
430 bp->b_addr = page_address(bp->b_pages[0]);
431 } else if (flags & XBF_UNMAPPED) {
432 bp->b_addr = NULL;
433 } else {
434 int retried = 0;
435 unsigned nofs_flag;
436
437 /*
438 * vm_map_ram() will allocate auxiliary structures (e.g.
439 * pagetables) with GFP_KERNEL, yet we often under a scoped nofs
440 * context here. Mixing GFP_KERNEL with GFP_NOFS allocations
441 * from the same call site that can be run from both above and
442 * below memory reclaim causes lockdep false positives. Hence we
443 * always need to force this allocation to nofs context because
444 * we can't pass __GFP_NOLOCKDEP down to auxillary structures to
445 * prevent false positive lockdep reports.
446 *
447 * XXX(dgc): I think dquot reclaim is the only place we can get
448 * to this function from memory reclaim context now. If we fix
449 * that like we've fixed inode reclaim to avoid writeback from
450 * reclaim, this nofs wrapping can go away.
451 */
452 nofs_flag = memalloc_nofs_save();
453 do {
454 bp->b_addr = vm_map_ram(bp->b_pages, bp->b_page_count,
455 -1);
456 if (bp->b_addr)
457 break;
458 vm_unmap_aliases();
459 } while (retried++ <= 1);
460 memalloc_nofs_restore(nofs_flag);
461
462 if (!bp->b_addr)
463 return -ENOMEM;
464 }
465
466 return 0;
467 }
468
469 /*
470 * Finding and Reading Buffers
471 */
472 static int
_xfs_buf_obj_cmp(struct rhashtable_compare_arg * arg,const void * obj)473 _xfs_buf_obj_cmp(
474 struct rhashtable_compare_arg *arg,
475 const void *obj)
476 {
477 const struct xfs_buf_map *map = arg->key;
478 const struct xfs_buf *bp = obj;
479
480 /*
481 * The key hashing in the lookup path depends on the key being the
482 * first element of the compare_arg, make sure to assert this.
483 */
484 BUILD_BUG_ON(offsetof(struct xfs_buf_map, bm_bn) != 0);
485
486 if (bp->b_rhash_key != map->bm_bn)
487 return 1;
488
489 if (unlikely(bp->b_length != map->bm_len)) {
490 /*
491 * found a block number match. If the range doesn't
492 * match, the only way this is allowed is if the buffer
493 * in the cache is stale and the transaction that made
494 * it stale has not yet committed. i.e. we are
495 * reallocating a busy extent. Skip this buffer and
496 * continue searching for an exact match.
497 *
498 * Note: If we're scanning for incore buffers to stale, don't
499 * complain if we find non-stale buffers.
500 */
501 if (!(map->bm_flags & XBM_LIVESCAN))
502 ASSERT(bp->b_flags & XBF_STALE);
503 return 1;
504 }
505 return 0;
506 }
507
508 static const struct rhashtable_params xfs_buf_hash_params = {
509 .min_size = 32, /* empty AGs have minimal footprint */
510 .nelem_hint = 16,
511 .key_len = sizeof(xfs_daddr_t),
512 .key_offset = offsetof(struct xfs_buf, b_rhash_key),
513 .head_offset = offsetof(struct xfs_buf, b_rhash_head),
514 .automatic_shrinking = true,
515 .obj_cmpfn = _xfs_buf_obj_cmp,
516 };
517
518 int
xfs_buf_cache_init(struct xfs_buf_cache * bch)519 xfs_buf_cache_init(
520 struct xfs_buf_cache *bch)
521 {
522 spin_lock_init(&bch->bc_lock);
523 return rhashtable_init(&bch->bc_hash, &xfs_buf_hash_params);
524 }
525
526 void
xfs_buf_cache_destroy(struct xfs_buf_cache * bch)527 xfs_buf_cache_destroy(
528 struct xfs_buf_cache *bch)
529 {
530 rhashtable_destroy(&bch->bc_hash);
531 }
532
533 static int
xfs_buf_map_verify(struct xfs_buftarg * btp,struct xfs_buf_map * map)534 xfs_buf_map_verify(
535 struct xfs_buftarg *btp,
536 struct xfs_buf_map *map)
537 {
538 xfs_daddr_t eofs;
539
540 /* Check for IOs smaller than the sector size / not sector aligned */
541 ASSERT(!(BBTOB(map->bm_len) < btp->bt_meta_sectorsize));
542 ASSERT(!(BBTOB(map->bm_bn) & (xfs_off_t)btp->bt_meta_sectormask));
543
544 /*
545 * Corrupted block numbers can get through to here, unfortunately, so we
546 * have to check that the buffer falls within the filesystem bounds.
547 */
548 eofs = XFS_FSB_TO_BB(btp->bt_mount, btp->bt_mount->m_sb.sb_dblocks);
549 if (map->bm_bn < 0 || map->bm_bn >= eofs) {
550 xfs_alert(btp->bt_mount,
551 "%s: daddr 0x%llx out of range, EOFS 0x%llx",
552 __func__, map->bm_bn, eofs);
553 WARN_ON(1);
554 return -EFSCORRUPTED;
555 }
556 return 0;
557 }
558
559 static int
xfs_buf_find_lock(struct xfs_buf * bp,xfs_buf_flags_t flags)560 xfs_buf_find_lock(
561 struct xfs_buf *bp,
562 xfs_buf_flags_t flags)
563 {
564 if (flags & XBF_TRYLOCK) {
565 if (!xfs_buf_trylock(bp)) {
566 XFS_STATS_INC(bp->b_mount, xb_busy_locked);
567 return -EAGAIN;
568 }
569 } else {
570 xfs_buf_lock(bp);
571 XFS_STATS_INC(bp->b_mount, xb_get_locked_waited);
572 }
573
574 /*
575 * if the buffer is stale, clear all the external state associated with
576 * it. We need to keep flags such as how we allocated the buffer memory
577 * intact here.
578 */
579 if (bp->b_flags & XBF_STALE) {
580 if (flags & XBF_LIVESCAN) {
581 xfs_buf_unlock(bp);
582 return -ENOENT;
583 }
584 ASSERT((bp->b_flags & _XBF_DELWRI_Q) == 0);
585 bp->b_flags &= _XBF_KMEM | _XBF_PAGES;
586 bp->b_ops = NULL;
587 }
588 return 0;
589 }
590
591 static inline int
xfs_buf_lookup(struct xfs_buf_cache * bch,struct xfs_buf_map * map,xfs_buf_flags_t flags,struct xfs_buf ** bpp)592 xfs_buf_lookup(
593 struct xfs_buf_cache *bch,
594 struct xfs_buf_map *map,
595 xfs_buf_flags_t flags,
596 struct xfs_buf **bpp)
597 {
598 struct xfs_buf *bp;
599 int error;
600
601 rcu_read_lock();
602 bp = rhashtable_lookup(&bch->bc_hash, map, xfs_buf_hash_params);
603 if (!bp || !atomic_inc_not_zero(&bp->b_hold)) {
604 rcu_read_unlock();
605 return -ENOENT;
606 }
607 rcu_read_unlock();
608
609 error = xfs_buf_find_lock(bp, flags);
610 if (error) {
611 xfs_buf_rele(bp);
612 return error;
613 }
614
615 trace_xfs_buf_find(bp, flags, _RET_IP_);
616 *bpp = bp;
617 return 0;
618 }
619
620 /*
621 * Insert the new_bp into the hash table. This consumes the perag reference
622 * taken for the lookup regardless of the result of the insert.
623 */
624 static int
xfs_buf_find_insert(struct xfs_buftarg * btp,struct xfs_buf_cache * bch,struct xfs_perag * pag,struct xfs_buf_map * cmap,struct xfs_buf_map * map,int nmaps,xfs_buf_flags_t flags,struct xfs_buf ** bpp)625 xfs_buf_find_insert(
626 struct xfs_buftarg *btp,
627 struct xfs_buf_cache *bch,
628 struct xfs_perag *pag,
629 struct xfs_buf_map *cmap,
630 struct xfs_buf_map *map,
631 int nmaps,
632 xfs_buf_flags_t flags,
633 struct xfs_buf **bpp)
634 {
635 struct xfs_buf *new_bp;
636 struct xfs_buf *bp;
637 int error;
638
639 error = _xfs_buf_alloc(btp, map, nmaps, flags, &new_bp);
640 if (error)
641 goto out_drop_pag;
642
643 if (xfs_buftarg_is_mem(new_bp->b_target)) {
644 error = xmbuf_map_page(new_bp);
645 } else if (BBTOB(new_bp->b_length) >= PAGE_SIZE ||
646 xfs_buf_alloc_kmem(new_bp, flags) < 0) {
647 /*
648 * For buffers that fit entirely within a single page, first
649 * attempt to allocate the memory from the heap to minimise
650 * memory usage. If we can't get heap memory for these small
651 * buffers, we fall back to using the page allocator.
652 */
653 error = xfs_buf_alloc_pages(new_bp, flags);
654 }
655 if (error)
656 goto out_free_buf;
657
658 spin_lock(&bch->bc_lock);
659 bp = rhashtable_lookup_get_insert_fast(&bch->bc_hash,
660 &new_bp->b_rhash_head, xfs_buf_hash_params);
661 if (IS_ERR(bp)) {
662 error = PTR_ERR(bp);
663 spin_unlock(&bch->bc_lock);
664 goto out_free_buf;
665 }
666 if (bp) {
667 /* found an existing buffer */
668 atomic_inc(&bp->b_hold);
669 spin_unlock(&bch->bc_lock);
670 error = xfs_buf_find_lock(bp, flags);
671 if (error)
672 xfs_buf_rele(bp);
673 else
674 *bpp = bp;
675 goto out_free_buf;
676 }
677
678 /* The new buffer keeps the perag reference until it is freed. */
679 new_bp->b_pag = pag;
680 spin_unlock(&bch->bc_lock);
681 *bpp = new_bp;
682 return 0;
683
684 out_free_buf:
685 xfs_buf_free(new_bp);
686 out_drop_pag:
687 if (pag)
688 xfs_perag_put(pag);
689 return error;
690 }
691
692 static inline struct xfs_perag *
xfs_buftarg_get_pag(struct xfs_buftarg * btp,const struct xfs_buf_map * map)693 xfs_buftarg_get_pag(
694 struct xfs_buftarg *btp,
695 const struct xfs_buf_map *map)
696 {
697 struct xfs_mount *mp = btp->bt_mount;
698
699 if (xfs_buftarg_is_mem(btp))
700 return NULL;
701 return xfs_perag_get(mp, xfs_daddr_to_agno(mp, map->bm_bn));
702 }
703
704 static inline struct xfs_buf_cache *
xfs_buftarg_buf_cache(struct xfs_buftarg * btp,struct xfs_perag * pag)705 xfs_buftarg_buf_cache(
706 struct xfs_buftarg *btp,
707 struct xfs_perag *pag)
708 {
709 if (pag)
710 return &pag->pag_bcache;
711 return btp->bt_cache;
712 }
713
714 /*
715 * Assembles a buffer covering the specified range. The code is optimised for
716 * cache hits, as metadata intensive workloads will see 3 orders of magnitude
717 * more hits than misses.
718 */
719 int
xfs_buf_get_map(struct xfs_buftarg * btp,struct xfs_buf_map * map,int nmaps,xfs_buf_flags_t flags,struct xfs_buf ** bpp)720 xfs_buf_get_map(
721 struct xfs_buftarg *btp,
722 struct xfs_buf_map *map,
723 int nmaps,
724 xfs_buf_flags_t flags,
725 struct xfs_buf **bpp)
726 {
727 struct xfs_buf_cache *bch;
728 struct xfs_perag *pag;
729 struct xfs_buf *bp = NULL;
730 struct xfs_buf_map cmap = { .bm_bn = map[0].bm_bn };
731 int error;
732 int i;
733
734 if (flags & XBF_LIVESCAN)
735 cmap.bm_flags |= XBM_LIVESCAN;
736 for (i = 0; i < nmaps; i++)
737 cmap.bm_len += map[i].bm_len;
738
739 error = xfs_buf_map_verify(btp, &cmap);
740 if (error)
741 return error;
742
743 pag = xfs_buftarg_get_pag(btp, &cmap);
744 bch = xfs_buftarg_buf_cache(btp, pag);
745
746 error = xfs_buf_lookup(bch, &cmap, flags, &bp);
747 if (error && error != -ENOENT)
748 goto out_put_perag;
749
750 /* cache hits always outnumber misses by at least 10:1 */
751 if (unlikely(!bp)) {
752 XFS_STATS_INC(btp->bt_mount, xb_miss_locked);
753
754 if (flags & XBF_INCORE)
755 goto out_put_perag;
756
757 /* xfs_buf_find_insert() consumes the perag reference. */
758 error = xfs_buf_find_insert(btp, bch, pag, &cmap, map, nmaps,
759 flags, &bp);
760 if (error)
761 return error;
762 } else {
763 XFS_STATS_INC(btp->bt_mount, xb_get_locked);
764 if (pag)
765 xfs_perag_put(pag);
766 }
767
768 /* We do not hold a perag reference anymore. */
769 if (!bp->b_addr) {
770 error = _xfs_buf_map_pages(bp, flags);
771 if (unlikely(error)) {
772 xfs_warn_ratelimited(btp->bt_mount,
773 "%s: failed to map %u pages", __func__,
774 bp->b_page_count);
775 xfs_buf_relse(bp);
776 return error;
777 }
778 }
779
780 /*
781 * Clear b_error if this is a lookup from a caller that doesn't expect
782 * valid data to be found in the buffer.
783 */
784 if (!(flags & XBF_READ))
785 xfs_buf_ioerror(bp, 0);
786
787 XFS_STATS_INC(btp->bt_mount, xb_get);
788 trace_xfs_buf_get(bp, flags, _RET_IP_);
789 *bpp = bp;
790 return 0;
791
792 out_put_perag:
793 if (pag)
794 xfs_perag_put(pag);
795 return error;
796 }
797
798 int
_xfs_buf_read(struct xfs_buf * bp,xfs_buf_flags_t flags)799 _xfs_buf_read(
800 struct xfs_buf *bp,
801 xfs_buf_flags_t flags)
802 {
803 ASSERT(!(flags & XBF_WRITE));
804 ASSERT(bp->b_maps[0].bm_bn != XFS_BUF_DADDR_NULL);
805
806 bp->b_flags &= ~(XBF_WRITE | XBF_ASYNC | XBF_READ_AHEAD | XBF_DONE);
807 bp->b_flags |= flags & (XBF_READ | XBF_ASYNC | XBF_READ_AHEAD);
808
809 return xfs_buf_submit(bp);
810 }
811
812 /*
813 * Reverify a buffer found in cache without an attached ->b_ops.
814 *
815 * If the caller passed an ops structure and the buffer doesn't have ops
816 * assigned, set the ops and use it to verify the contents. If verification
817 * fails, clear XBF_DONE. We assume the buffer has no recorded errors and is
818 * already in XBF_DONE state on entry.
819 *
820 * Under normal operations, every in-core buffer is verified on read I/O
821 * completion. There are two scenarios that can lead to in-core buffers without
822 * an assigned ->b_ops. The first is during log recovery of buffers on a V4
823 * filesystem, though these buffers are purged at the end of recovery. The
824 * other is online repair, which intentionally reads with a NULL buffer ops to
825 * run several verifiers across an in-core buffer in order to establish buffer
826 * type. If repair can't establish that, the buffer will be left in memory
827 * with NULL buffer ops.
828 */
829 int
xfs_buf_reverify(struct xfs_buf * bp,const struct xfs_buf_ops * ops)830 xfs_buf_reverify(
831 struct xfs_buf *bp,
832 const struct xfs_buf_ops *ops)
833 {
834 ASSERT(bp->b_flags & XBF_DONE);
835 ASSERT(bp->b_error == 0);
836
837 if (!ops || bp->b_ops)
838 return 0;
839
840 bp->b_ops = ops;
841 bp->b_ops->verify_read(bp);
842 if (bp->b_error)
843 bp->b_flags &= ~XBF_DONE;
844 return bp->b_error;
845 }
846
847 int
xfs_buf_read_map(struct xfs_buftarg * target,struct xfs_buf_map * map,int nmaps,xfs_buf_flags_t flags,struct xfs_buf ** bpp,const struct xfs_buf_ops * ops,xfs_failaddr_t fa)848 xfs_buf_read_map(
849 struct xfs_buftarg *target,
850 struct xfs_buf_map *map,
851 int nmaps,
852 xfs_buf_flags_t flags,
853 struct xfs_buf **bpp,
854 const struct xfs_buf_ops *ops,
855 xfs_failaddr_t fa)
856 {
857 struct xfs_buf *bp;
858 int error;
859
860 flags |= XBF_READ;
861 *bpp = NULL;
862
863 error = xfs_buf_get_map(target, map, nmaps, flags, &bp);
864 if (error)
865 return error;
866
867 trace_xfs_buf_read(bp, flags, _RET_IP_);
868
869 if (!(bp->b_flags & XBF_DONE)) {
870 /* Initiate the buffer read and wait. */
871 XFS_STATS_INC(target->bt_mount, xb_get_read);
872 bp->b_ops = ops;
873 error = _xfs_buf_read(bp, flags);
874
875 /* Readahead iodone already dropped the buffer, so exit. */
876 if (flags & XBF_ASYNC)
877 return 0;
878 } else {
879 /* Buffer already read; all we need to do is check it. */
880 error = xfs_buf_reverify(bp, ops);
881
882 /* Readahead already finished; drop the buffer and exit. */
883 if (flags & XBF_ASYNC) {
884 xfs_buf_relse(bp);
885 return 0;
886 }
887
888 /* We do not want read in the flags */
889 bp->b_flags &= ~XBF_READ;
890 ASSERT(bp->b_ops != NULL || ops == NULL);
891 }
892
893 /*
894 * If we've had a read error, then the contents of the buffer are
895 * invalid and should not be used. To ensure that a followup read tries
896 * to pull the buffer from disk again, we clear the XBF_DONE flag and
897 * mark the buffer stale. This ensures that anyone who has a current
898 * reference to the buffer will interpret it's contents correctly and
899 * future cache lookups will also treat it as an empty, uninitialised
900 * buffer.
901 */
902 if (error) {
903 /*
904 * Check against log shutdown for error reporting because
905 * metadata writeback may require a read first and we need to
906 * report errors in metadata writeback until the log is shut
907 * down. High level transaction read functions already check
908 * against mount shutdown, anyway, so we only need to be
909 * concerned about low level IO interactions here.
910 */
911 if (!xlog_is_shutdown(target->bt_mount->m_log))
912 xfs_buf_ioerror_alert(bp, fa);
913
914 bp->b_flags &= ~XBF_DONE;
915 xfs_buf_stale(bp);
916 xfs_buf_relse(bp);
917
918 /* bad CRC means corrupted metadata */
919 if (error == -EFSBADCRC)
920 error = -EFSCORRUPTED;
921 return error;
922 }
923
924 *bpp = bp;
925 return 0;
926 }
927
928 /*
929 * If we are not low on memory then do the readahead in a deadlock
930 * safe manner.
931 */
932 void
xfs_buf_readahead_map(struct xfs_buftarg * target,struct xfs_buf_map * map,int nmaps,const struct xfs_buf_ops * ops)933 xfs_buf_readahead_map(
934 struct xfs_buftarg *target,
935 struct xfs_buf_map *map,
936 int nmaps,
937 const struct xfs_buf_ops *ops)
938 {
939 struct xfs_buf *bp;
940
941 /*
942 * Currently we don't have a good means or justification for performing
943 * xmbuf_map_page asynchronously, so we don't do readahead.
944 */
945 if (xfs_buftarg_is_mem(target))
946 return;
947
948 xfs_buf_read_map(target, map, nmaps,
949 XBF_TRYLOCK | XBF_ASYNC | XBF_READ_AHEAD, &bp, ops,
950 __this_address);
951 }
952
953 /*
954 * Read an uncached buffer from disk. Allocates and returns a locked
955 * buffer containing the disk contents or nothing. Uncached buffers always have
956 * a cache index of XFS_BUF_DADDR_NULL so we can easily determine if the buffer
957 * is cached or uncached during fault diagnosis.
958 */
959 int
xfs_buf_read_uncached(struct xfs_buftarg * target,xfs_daddr_t daddr,size_t numblks,xfs_buf_flags_t flags,struct xfs_buf ** bpp,const struct xfs_buf_ops * ops)960 xfs_buf_read_uncached(
961 struct xfs_buftarg *target,
962 xfs_daddr_t daddr,
963 size_t numblks,
964 xfs_buf_flags_t flags,
965 struct xfs_buf **bpp,
966 const struct xfs_buf_ops *ops)
967 {
968 struct xfs_buf *bp;
969 int error;
970
971 *bpp = NULL;
972
973 error = xfs_buf_get_uncached(target, numblks, flags, &bp);
974 if (error)
975 return error;
976
977 /* set up the buffer for a read IO */
978 ASSERT(bp->b_map_count == 1);
979 bp->b_rhash_key = XFS_BUF_DADDR_NULL;
980 bp->b_maps[0].bm_bn = daddr;
981 bp->b_flags |= XBF_READ;
982 bp->b_ops = ops;
983
984 xfs_buf_submit(bp);
985 if (bp->b_error) {
986 error = bp->b_error;
987 xfs_buf_relse(bp);
988 return error;
989 }
990
991 *bpp = bp;
992 return 0;
993 }
994
995 int
xfs_buf_get_uncached(struct xfs_buftarg * target,size_t numblks,xfs_buf_flags_t flags,struct xfs_buf ** bpp)996 xfs_buf_get_uncached(
997 struct xfs_buftarg *target,
998 size_t numblks,
999 xfs_buf_flags_t flags,
1000 struct xfs_buf **bpp)
1001 {
1002 int error;
1003 struct xfs_buf *bp;
1004 DEFINE_SINGLE_BUF_MAP(map, XFS_BUF_DADDR_NULL, numblks);
1005
1006 *bpp = NULL;
1007
1008 /* flags might contain irrelevant bits, pass only what we care about */
1009 error = _xfs_buf_alloc(target, &map, 1, flags & XBF_NO_IOACCT, &bp);
1010 if (error)
1011 return error;
1012
1013 if (xfs_buftarg_is_mem(bp->b_target))
1014 error = xmbuf_map_page(bp);
1015 else
1016 error = xfs_buf_alloc_pages(bp, flags);
1017 if (error)
1018 goto fail_free_buf;
1019
1020 error = _xfs_buf_map_pages(bp, 0);
1021 if (unlikely(error)) {
1022 xfs_warn(target->bt_mount,
1023 "%s: failed to map pages", __func__);
1024 goto fail_free_buf;
1025 }
1026
1027 trace_xfs_buf_get_uncached(bp, _RET_IP_);
1028 *bpp = bp;
1029 return 0;
1030
1031 fail_free_buf:
1032 xfs_buf_free(bp);
1033 return error;
1034 }
1035
1036 /*
1037 * Increment reference count on buffer, to hold the buffer concurrently
1038 * with another thread which may release (free) the buffer asynchronously.
1039 * Must hold the buffer already to call this function.
1040 */
1041 void
xfs_buf_hold(struct xfs_buf * bp)1042 xfs_buf_hold(
1043 struct xfs_buf *bp)
1044 {
1045 trace_xfs_buf_hold(bp, _RET_IP_);
1046 atomic_inc(&bp->b_hold);
1047 }
1048
1049 static void
xfs_buf_rele_uncached(struct xfs_buf * bp)1050 xfs_buf_rele_uncached(
1051 struct xfs_buf *bp)
1052 {
1053 ASSERT(list_empty(&bp->b_lru));
1054 if (atomic_dec_and_test(&bp->b_hold)) {
1055 xfs_buf_ioacct_dec(bp);
1056 xfs_buf_free(bp);
1057 }
1058 }
1059
1060 static void
xfs_buf_rele_cached(struct xfs_buf * bp)1061 xfs_buf_rele_cached(
1062 struct xfs_buf *bp)
1063 {
1064 struct xfs_buftarg *btp = bp->b_target;
1065 struct xfs_perag *pag = bp->b_pag;
1066 struct xfs_buf_cache *bch = xfs_buftarg_buf_cache(btp, pag);
1067 bool release;
1068 bool freebuf = false;
1069
1070 trace_xfs_buf_rele(bp, _RET_IP_);
1071
1072 ASSERT(atomic_read(&bp->b_hold) > 0);
1073
1074 /*
1075 * We grab the b_lock here first to serialise racing xfs_buf_rele()
1076 * calls. The pag_buf_lock being taken on the last reference only
1077 * serialises against racing lookups in xfs_buf_find(). IOWs, the second
1078 * to last reference we drop here is not serialised against the last
1079 * reference until we take bp->b_lock. Hence if we don't grab b_lock
1080 * first, the last "release" reference can win the race to the lock and
1081 * free the buffer before the second-to-last reference is processed,
1082 * leading to a use-after-free scenario.
1083 */
1084 spin_lock(&bp->b_lock);
1085 release = atomic_dec_and_lock(&bp->b_hold, &bch->bc_lock);
1086 if (!release) {
1087 /*
1088 * Drop the in-flight state if the buffer is already on the LRU
1089 * and it holds the only reference. This is racy because we
1090 * haven't acquired the pag lock, but the use of _XBF_IN_FLIGHT
1091 * ensures the decrement occurs only once per-buf.
1092 */
1093 if ((atomic_read(&bp->b_hold) == 1) && !list_empty(&bp->b_lru))
1094 __xfs_buf_ioacct_dec(bp);
1095 goto out_unlock;
1096 }
1097
1098 /* the last reference has been dropped ... */
1099 __xfs_buf_ioacct_dec(bp);
1100 if (!(bp->b_flags & XBF_STALE) && atomic_read(&bp->b_lru_ref)) {
1101 /*
1102 * If the buffer is added to the LRU take a new reference to the
1103 * buffer for the LRU and clear the (now stale) dispose list
1104 * state flag
1105 */
1106 if (list_lru_add_obj(&btp->bt_lru, &bp->b_lru)) {
1107 bp->b_state &= ~XFS_BSTATE_DISPOSE;
1108 atomic_inc(&bp->b_hold);
1109 }
1110 spin_unlock(&bch->bc_lock);
1111 } else {
1112 /*
1113 * most of the time buffers will already be removed from the
1114 * LRU, so optimise that case by checking for the
1115 * XFS_BSTATE_DISPOSE flag indicating the last list the buffer
1116 * was on was the disposal list
1117 */
1118 if (!(bp->b_state & XFS_BSTATE_DISPOSE)) {
1119 list_lru_del_obj(&btp->bt_lru, &bp->b_lru);
1120 } else {
1121 ASSERT(list_empty(&bp->b_lru));
1122 }
1123
1124 ASSERT(!(bp->b_flags & _XBF_DELWRI_Q));
1125 rhashtable_remove_fast(&bch->bc_hash, &bp->b_rhash_head,
1126 xfs_buf_hash_params);
1127 spin_unlock(&bch->bc_lock);
1128 if (pag)
1129 xfs_perag_put(pag);
1130 freebuf = true;
1131 }
1132
1133 out_unlock:
1134 spin_unlock(&bp->b_lock);
1135
1136 if (freebuf)
1137 xfs_buf_free(bp);
1138 }
1139
1140 /*
1141 * Release a hold on the specified buffer.
1142 */
1143 void
xfs_buf_rele(struct xfs_buf * bp)1144 xfs_buf_rele(
1145 struct xfs_buf *bp)
1146 {
1147 trace_xfs_buf_rele(bp, _RET_IP_);
1148 if (xfs_buf_is_uncached(bp))
1149 xfs_buf_rele_uncached(bp);
1150 else
1151 xfs_buf_rele_cached(bp);
1152 }
1153
1154 /*
1155 * Lock a buffer object, if it is not already locked.
1156 *
1157 * If we come across a stale, pinned, locked buffer, we know that we are
1158 * being asked to lock a buffer that has been reallocated. Because it is
1159 * pinned, we know that the log has not been pushed to disk and hence it
1160 * will still be locked. Rather than continuing to have trylock attempts
1161 * fail until someone else pushes the log, push it ourselves before
1162 * returning. This means that the xfsaild will not get stuck trying
1163 * to push on stale inode buffers.
1164 */
1165 int
xfs_buf_trylock(struct xfs_buf * bp)1166 xfs_buf_trylock(
1167 struct xfs_buf *bp)
1168 {
1169 int locked;
1170
1171 locked = down_trylock(&bp->b_sema) == 0;
1172 if (locked)
1173 trace_xfs_buf_trylock(bp, _RET_IP_);
1174 else
1175 trace_xfs_buf_trylock_fail(bp, _RET_IP_);
1176 return locked;
1177 }
1178
1179 /*
1180 * Lock a buffer object.
1181 *
1182 * If we come across a stale, pinned, locked buffer, we know that we
1183 * are being asked to lock a buffer that has been reallocated. Because
1184 * it is pinned, we know that the log has not been pushed to disk and
1185 * hence it will still be locked. Rather than sleeping until someone
1186 * else pushes the log, push it ourselves before trying to get the lock.
1187 */
1188 void
xfs_buf_lock(struct xfs_buf * bp)1189 xfs_buf_lock(
1190 struct xfs_buf *bp)
1191 {
1192 trace_xfs_buf_lock(bp, _RET_IP_);
1193
1194 if (atomic_read(&bp->b_pin_count) && (bp->b_flags & XBF_STALE))
1195 xfs_log_force(bp->b_mount, 0);
1196 down(&bp->b_sema);
1197
1198 trace_xfs_buf_lock_done(bp, _RET_IP_);
1199 }
1200
1201 void
xfs_buf_unlock(struct xfs_buf * bp)1202 xfs_buf_unlock(
1203 struct xfs_buf *bp)
1204 {
1205 ASSERT(xfs_buf_islocked(bp));
1206
1207 up(&bp->b_sema);
1208 trace_xfs_buf_unlock(bp, _RET_IP_);
1209 }
1210
1211 STATIC void
xfs_buf_wait_unpin(struct xfs_buf * bp)1212 xfs_buf_wait_unpin(
1213 struct xfs_buf *bp)
1214 {
1215 DECLARE_WAITQUEUE (wait, current);
1216
1217 if (atomic_read(&bp->b_pin_count) == 0)
1218 return;
1219
1220 add_wait_queue(&bp->b_waiters, &wait);
1221 for (;;) {
1222 set_current_state(TASK_UNINTERRUPTIBLE);
1223 if (atomic_read(&bp->b_pin_count) == 0)
1224 break;
1225 io_schedule();
1226 }
1227 remove_wait_queue(&bp->b_waiters, &wait);
1228 set_current_state(TASK_RUNNING);
1229 }
1230
1231 static void
xfs_buf_ioerror_alert_ratelimited(struct xfs_buf * bp)1232 xfs_buf_ioerror_alert_ratelimited(
1233 struct xfs_buf *bp)
1234 {
1235 static unsigned long lasttime;
1236 static struct xfs_buftarg *lasttarg;
1237
1238 if (bp->b_target != lasttarg ||
1239 time_after(jiffies, (lasttime + 5*HZ))) {
1240 lasttime = jiffies;
1241 xfs_buf_ioerror_alert(bp, __this_address);
1242 }
1243 lasttarg = bp->b_target;
1244 }
1245
1246 /*
1247 * Account for this latest trip around the retry handler, and decide if
1248 * we've failed enough times to constitute a permanent failure.
1249 */
1250 static bool
xfs_buf_ioerror_permanent(struct xfs_buf * bp,struct xfs_error_cfg * cfg)1251 xfs_buf_ioerror_permanent(
1252 struct xfs_buf *bp,
1253 struct xfs_error_cfg *cfg)
1254 {
1255 struct xfs_mount *mp = bp->b_mount;
1256
1257 if (cfg->max_retries != XFS_ERR_RETRY_FOREVER &&
1258 ++bp->b_retries > cfg->max_retries)
1259 return true;
1260 if (cfg->retry_timeout != XFS_ERR_RETRY_FOREVER &&
1261 time_after(jiffies, cfg->retry_timeout + bp->b_first_retry_time))
1262 return true;
1263
1264 /* At unmount we may treat errors differently */
1265 if (xfs_is_unmounting(mp) && mp->m_fail_unmount)
1266 return true;
1267
1268 return false;
1269 }
1270
1271 /*
1272 * On a sync write or shutdown we just want to stale the buffer and let the
1273 * caller handle the error in bp->b_error appropriately.
1274 *
1275 * If the write was asynchronous then no one will be looking for the error. If
1276 * this is the first failure of this type, clear the error state and write the
1277 * buffer out again. This means we always retry an async write failure at least
1278 * once, but we also need to set the buffer up to behave correctly now for
1279 * repeated failures.
1280 *
1281 * If we get repeated async write failures, then we take action according to the
1282 * error configuration we have been set up to use.
1283 *
1284 * Returns true if this function took care of error handling and the caller must
1285 * not touch the buffer again. Return false if the caller should proceed with
1286 * normal I/O completion handling.
1287 */
1288 static bool
xfs_buf_ioend_handle_error(struct xfs_buf * bp)1289 xfs_buf_ioend_handle_error(
1290 struct xfs_buf *bp)
1291 {
1292 struct xfs_mount *mp = bp->b_mount;
1293 struct xfs_error_cfg *cfg;
1294
1295 /*
1296 * If we've already shutdown the journal because of I/O errors, there's
1297 * no point in giving this a retry.
1298 */
1299 if (xlog_is_shutdown(mp->m_log))
1300 goto out_stale;
1301
1302 xfs_buf_ioerror_alert_ratelimited(bp);
1303
1304 /*
1305 * We're not going to bother about retrying this during recovery.
1306 * One strike!
1307 */
1308 if (bp->b_flags & _XBF_LOGRECOVERY) {
1309 xfs_force_shutdown(mp, SHUTDOWN_META_IO_ERROR);
1310 return false;
1311 }
1312
1313 /*
1314 * Synchronous writes will have callers process the error.
1315 */
1316 if (!(bp->b_flags & XBF_ASYNC))
1317 goto out_stale;
1318
1319 trace_xfs_buf_iodone_async(bp, _RET_IP_);
1320
1321 cfg = xfs_error_get_cfg(mp, XFS_ERR_METADATA, bp->b_error);
1322 if (bp->b_last_error != bp->b_error ||
1323 !(bp->b_flags & (XBF_STALE | XBF_WRITE_FAIL))) {
1324 bp->b_last_error = bp->b_error;
1325 if (cfg->retry_timeout != XFS_ERR_RETRY_FOREVER &&
1326 !bp->b_first_retry_time)
1327 bp->b_first_retry_time = jiffies;
1328 goto resubmit;
1329 }
1330
1331 /*
1332 * Permanent error - we need to trigger a shutdown if we haven't already
1333 * to indicate that inconsistency will result from this action.
1334 */
1335 if (xfs_buf_ioerror_permanent(bp, cfg)) {
1336 xfs_force_shutdown(mp, SHUTDOWN_META_IO_ERROR);
1337 goto out_stale;
1338 }
1339
1340 /* Still considered a transient error. Caller will schedule retries. */
1341 if (bp->b_flags & _XBF_INODES)
1342 xfs_buf_inode_io_fail(bp);
1343 else if (bp->b_flags & _XBF_DQUOTS)
1344 xfs_buf_dquot_io_fail(bp);
1345 else
1346 ASSERT(list_empty(&bp->b_li_list));
1347 xfs_buf_ioerror(bp, 0);
1348 xfs_buf_relse(bp);
1349 return true;
1350
1351 resubmit:
1352 xfs_buf_ioerror(bp, 0);
1353 bp->b_flags |= (XBF_DONE | XBF_WRITE_FAIL);
1354 xfs_buf_submit(bp);
1355 return true;
1356 out_stale:
1357 xfs_buf_stale(bp);
1358 bp->b_flags |= XBF_DONE;
1359 bp->b_flags &= ~XBF_WRITE;
1360 trace_xfs_buf_error_relse(bp, _RET_IP_);
1361 return false;
1362 }
1363
1364 static void
xfs_buf_ioend(struct xfs_buf * bp)1365 xfs_buf_ioend(
1366 struct xfs_buf *bp)
1367 {
1368 trace_xfs_buf_iodone(bp, _RET_IP_);
1369
1370 /*
1371 * Pull in IO completion errors now. We are guaranteed to be running
1372 * single threaded, so we don't need the lock to read b_io_error.
1373 */
1374 if (!bp->b_error && bp->b_io_error)
1375 xfs_buf_ioerror(bp, bp->b_io_error);
1376
1377 if (bp->b_flags & XBF_READ) {
1378 if (!bp->b_error && bp->b_ops)
1379 bp->b_ops->verify_read(bp);
1380 if (!bp->b_error)
1381 bp->b_flags |= XBF_DONE;
1382 } else {
1383 if (!bp->b_error) {
1384 bp->b_flags &= ~XBF_WRITE_FAIL;
1385 bp->b_flags |= XBF_DONE;
1386 }
1387
1388 if (unlikely(bp->b_error) && xfs_buf_ioend_handle_error(bp))
1389 return;
1390
1391 /* clear the retry state */
1392 bp->b_last_error = 0;
1393 bp->b_retries = 0;
1394 bp->b_first_retry_time = 0;
1395
1396 /*
1397 * Note that for things like remote attribute buffers, there may
1398 * not be a buffer log item here, so processing the buffer log
1399 * item must remain optional.
1400 */
1401 if (bp->b_log_item)
1402 xfs_buf_item_done(bp);
1403
1404 if (bp->b_flags & _XBF_INODES)
1405 xfs_buf_inode_iodone(bp);
1406 else if (bp->b_flags & _XBF_DQUOTS)
1407 xfs_buf_dquot_iodone(bp);
1408
1409 }
1410
1411 bp->b_flags &= ~(XBF_READ | XBF_WRITE | XBF_READ_AHEAD |
1412 _XBF_LOGRECOVERY);
1413
1414 if (bp->b_flags & XBF_ASYNC)
1415 xfs_buf_relse(bp);
1416 else
1417 complete(&bp->b_iowait);
1418 }
1419
1420 static void
xfs_buf_ioend_work(struct work_struct * work)1421 xfs_buf_ioend_work(
1422 struct work_struct *work)
1423 {
1424 struct xfs_buf *bp =
1425 container_of(work, struct xfs_buf, b_ioend_work);
1426
1427 xfs_buf_ioend(bp);
1428 }
1429
1430 static void
xfs_buf_ioend_async(struct xfs_buf * bp)1431 xfs_buf_ioend_async(
1432 struct xfs_buf *bp)
1433 {
1434 INIT_WORK(&bp->b_ioend_work, xfs_buf_ioend_work);
1435 queue_work(bp->b_mount->m_buf_workqueue, &bp->b_ioend_work);
1436 }
1437
1438 void
__xfs_buf_ioerror(struct xfs_buf * bp,int error,xfs_failaddr_t failaddr)1439 __xfs_buf_ioerror(
1440 struct xfs_buf *bp,
1441 int error,
1442 xfs_failaddr_t failaddr)
1443 {
1444 ASSERT(error <= 0 && error >= -1000);
1445 bp->b_error = error;
1446 trace_xfs_buf_ioerror(bp, error, failaddr);
1447 }
1448
1449 void
xfs_buf_ioerror_alert(struct xfs_buf * bp,xfs_failaddr_t func)1450 xfs_buf_ioerror_alert(
1451 struct xfs_buf *bp,
1452 xfs_failaddr_t func)
1453 {
1454 xfs_buf_alert_ratelimited(bp, "XFS: metadata IO error",
1455 "metadata I/O error in \"%pS\" at daddr 0x%llx len %d error %d",
1456 func, (uint64_t)xfs_buf_daddr(bp),
1457 bp->b_length, -bp->b_error);
1458 }
1459
1460 /*
1461 * To simulate an I/O failure, the buffer must be locked and held with at least
1462 * three references. The LRU reference is dropped by the stale call. The buf
1463 * item reference is dropped via ioend processing. The third reference is owned
1464 * by the caller and is dropped on I/O completion if the buffer is XBF_ASYNC.
1465 */
1466 void
xfs_buf_ioend_fail(struct xfs_buf * bp)1467 xfs_buf_ioend_fail(
1468 struct xfs_buf *bp)
1469 {
1470 bp->b_flags &= ~XBF_DONE;
1471 xfs_buf_stale(bp);
1472 xfs_buf_ioerror(bp, -EIO);
1473 xfs_buf_ioend(bp);
1474 }
1475
1476 int
xfs_bwrite(struct xfs_buf * bp)1477 xfs_bwrite(
1478 struct xfs_buf *bp)
1479 {
1480 int error;
1481
1482 ASSERT(xfs_buf_islocked(bp));
1483
1484 bp->b_flags |= XBF_WRITE;
1485 bp->b_flags &= ~(XBF_ASYNC | XBF_READ | _XBF_DELWRI_Q |
1486 XBF_DONE);
1487
1488 error = xfs_buf_submit(bp);
1489 if (error)
1490 xfs_force_shutdown(bp->b_mount, SHUTDOWN_META_IO_ERROR);
1491 return error;
1492 }
1493
1494 static void
xfs_buf_bio_end_io(struct bio * bio)1495 xfs_buf_bio_end_io(
1496 struct bio *bio)
1497 {
1498 struct xfs_buf *bp = (struct xfs_buf *)bio->bi_private;
1499
1500 if (!bio->bi_status &&
1501 (bp->b_flags & XBF_WRITE) && (bp->b_flags & XBF_ASYNC) &&
1502 XFS_TEST_ERROR(false, bp->b_mount, XFS_ERRTAG_BUF_IOERROR))
1503 bio->bi_status = BLK_STS_IOERR;
1504
1505 /*
1506 * don't overwrite existing errors - otherwise we can lose errors on
1507 * buffers that require multiple bios to complete.
1508 */
1509 if (bio->bi_status) {
1510 int error = blk_status_to_errno(bio->bi_status);
1511
1512 cmpxchg(&bp->b_io_error, 0, error);
1513 }
1514
1515 if (!bp->b_error && xfs_buf_is_vmapped(bp) && (bp->b_flags & XBF_READ))
1516 invalidate_kernel_vmap_range(bp->b_addr, xfs_buf_vmap_len(bp));
1517
1518 if (atomic_dec_and_test(&bp->b_io_remaining) == 1)
1519 xfs_buf_ioend_async(bp);
1520 bio_put(bio);
1521 }
1522
1523 static void
xfs_buf_ioapply_map(struct xfs_buf * bp,int map,int * buf_offset,int * count,blk_opf_t op)1524 xfs_buf_ioapply_map(
1525 struct xfs_buf *bp,
1526 int map,
1527 int *buf_offset,
1528 int *count,
1529 blk_opf_t op)
1530 {
1531 int page_index;
1532 unsigned int total_nr_pages = bp->b_page_count;
1533 int nr_pages;
1534 struct bio *bio;
1535 sector_t sector = bp->b_maps[map].bm_bn;
1536 int size;
1537 int offset;
1538
1539 /* skip the pages in the buffer before the start offset */
1540 page_index = 0;
1541 offset = *buf_offset;
1542 while (offset >= PAGE_SIZE) {
1543 page_index++;
1544 offset -= PAGE_SIZE;
1545 }
1546
1547 /*
1548 * Limit the IO size to the length of the current vector, and update the
1549 * remaining IO count for the next time around.
1550 */
1551 size = min_t(int, BBTOB(bp->b_maps[map].bm_len), *count);
1552 *count -= size;
1553 *buf_offset += size;
1554
1555 next_chunk:
1556 atomic_inc(&bp->b_io_remaining);
1557 nr_pages = bio_max_segs(total_nr_pages);
1558
1559 bio = bio_alloc(bp->b_target->bt_bdev, nr_pages, op, GFP_NOIO);
1560 bio->bi_iter.bi_sector = sector;
1561 bio->bi_end_io = xfs_buf_bio_end_io;
1562 bio->bi_private = bp;
1563
1564 for (; size && nr_pages; nr_pages--, page_index++) {
1565 int rbytes, nbytes = PAGE_SIZE - offset;
1566
1567 if (nbytes > size)
1568 nbytes = size;
1569
1570 rbytes = bio_add_page(bio, bp->b_pages[page_index], nbytes,
1571 offset);
1572 if (rbytes < nbytes)
1573 break;
1574
1575 offset = 0;
1576 sector += BTOBB(nbytes);
1577 size -= nbytes;
1578 total_nr_pages--;
1579 }
1580
1581 if (likely(bio->bi_iter.bi_size)) {
1582 if (xfs_buf_is_vmapped(bp)) {
1583 flush_kernel_vmap_range(bp->b_addr,
1584 xfs_buf_vmap_len(bp));
1585 }
1586 submit_bio(bio);
1587 if (size)
1588 goto next_chunk;
1589 } else {
1590 /*
1591 * This is guaranteed not to be the last io reference count
1592 * because the caller (xfs_buf_submit) holds a count itself.
1593 */
1594 atomic_dec(&bp->b_io_remaining);
1595 xfs_buf_ioerror(bp, -EIO);
1596 bio_put(bio);
1597 }
1598
1599 }
1600
1601 STATIC void
_xfs_buf_ioapply(struct xfs_buf * bp)1602 _xfs_buf_ioapply(
1603 struct xfs_buf *bp)
1604 {
1605 struct blk_plug plug;
1606 blk_opf_t op;
1607 int offset;
1608 int size;
1609 int i;
1610
1611 /*
1612 * Make sure we capture only current IO errors rather than stale errors
1613 * left over from previous use of the buffer (e.g. failed readahead).
1614 */
1615 bp->b_error = 0;
1616
1617 if (bp->b_flags & XBF_WRITE) {
1618 op = REQ_OP_WRITE;
1619
1620 /*
1621 * Run the write verifier callback function if it exists. If
1622 * this function fails it will mark the buffer with an error and
1623 * the IO should not be dispatched.
1624 */
1625 if (bp->b_ops) {
1626 bp->b_ops->verify_write(bp);
1627 if (bp->b_error) {
1628 xfs_force_shutdown(bp->b_mount,
1629 SHUTDOWN_CORRUPT_INCORE);
1630 return;
1631 }
1632 } else if (bp->b_rhash_key != XFS_BUF_DADDR_NULL) {
1633 struct xfs_mount *mp = bp->b_mount;
1634
1635 /*
1636 * non-crc filesystems don't attach verifiers during
1637 * log recovery, so don't warn for such filesystems.
1638 */
1639 if (xfs_has_crc(mp)) {
1640 xfs_warn(mp,
1641 "%s: no buf ops on daddr 0x%llx len %d",
1642 __func__, xfs_buf_daddr(bp),
1643 bp->b_length);
1644 xfs_hex_dump(bp->b_addr,
1645 XFS_CORRUPTION_DUMP_LEN);
1646 dump_stack();
1647 }
1648 }
1649 } else {
1650 op = REQ_OP_READ;
1651 if (bp->b_flags & XBF_READ_AHEAD)
1652 op |= REQ_RAHEAD;
1653 }
1654
1655 /* we only use the buffer cache for meta-data */
1656 op |= REQ_META;
1657
1658 /* in-memory targets are directly mapped, no IO required. */
1659 if (xfs_buftarg_is_mem(bp->b_target)) {
1660 xfs_buf_ioend(bp);
1661 return;
1662 }
1663
1664 /*
1665 * Walk all the vectors issuing IO on them. Set up the initial offset
1666 * into the buffer and the desired IO size before we start -
1667 * _xfs_buf_ioapply_vec() will modify them appropriately for each
1668 * subsequent call.
1669 */
1670 offset = bp->b_offset;
1671 size = BBTOB(bp->b_length);
1672 blk_start_plug(&plug);
1673 for (i = 0; i < bp->b_map_count; i++) {
1674 xfs_buf_ioapply_map(bp, i, &offset, &size, op);
1675 if (bp->b_error)
1676 break;
1677 if (size <= 0)
1678 break; /* all done */
1679 }
1680 blk_finish_plug(&plug);
1681 }
1682
1683 /*
1684 * Wait for I/O completion of a sync buffer and return the I/O error code.
1685 */
1686 static int
xfs_buf_iowait(struct xfs_buf * bp)1687 xfs_buf_iowait(
1688 struct xfs_buf *bp)
1689 {
1690 ASSERT(!(bp->b_flags & XBF_ASYNC));
1691
1692 trace_xfs_buf_iowait(bp, _RET_IP_);
1693 wait_for_completion(&bp->b_iowait);
1694 trace_xfs_buf_iowait_done(bp, _RET_IP_);
1695
1696 return bp->b_error;
1697 }
1698
1699 /*
1700 * Buffer I/O submission path, read or write. Asynchronous submission transfers
1701 * the buffer lock ownership and the current reference to the IO. It is not
1702 * safe to reference the buffer after a call to this function unless the caller
1703 * holds an additional reference itself.
1704 */
1705 static int
__xfs_buf_submit(struct xfs_buf * bp,bool wait)1706 __xfs_buf_submit(
1707 struct xfs_buf *bp,
1708 bool wait)
1709 {
1710 int error = 0;
1711
1712 trace_xfs_buf_submit(bp, _RET_IP_);
1713
1714 ASSERT(!(bp->b_flags & _XBF_DELWRI_Q));
1715
1716 /*
1717 * On log shutdown we stale and complete the buffer immediately. We can
1718 * be called to read the superblock before the log has been set up, so
1719 * be careful checking the log state.
1720 *
1721 * Checking the mount shutdown state here can result in the log tail
1722 * moving inappropriately on disk as the log may not yet be shut down.
1723 * i.e. failing this buffer on mount shutdown can remove it from the AIL
1724 * and move the tail of the log forwards without having written this
1725 * buffer to disk. This corrupts the log tail state in memory, and
1726 * because the log may not be shut down yet, it can then be propagated
1727 * to disk before the log is shutdown. Hence we check log shutdown
1728 * state here rather than mount state to avoid corrupting the log tail
1729 * on shutdown.
1730 */
1731 if (bp->b_mount->m_log &&
1732 xlog_is_shutdown(bp->b_mount->m_log)) {
1733 xfs_buf_ioend_fail(bp);
1734 return -EIO;
1735 }
1736
1737 /*
1738 * Grab a reference so the buffer does not go away underneath us. For
1739 * async buffers, I/O completion drops the callers reference, which
1740 * could occur before submission returns.
1741 */
1742 xfs_buf_hold(bp);
1743
1744 if (bp->b_flags & XBF_WRITE)
1745 xfs_buf_wait_unpin(bp);
1746
1747 /* clear the internal error state to avoid spurious errors */
1748 bp->b_io_error = 0;
1749
1750 /*
1751 * Set the count to 1 initially, this will stop an I/O completion
1752 * callout which happens before we have started all the I/O from calling
1753 * xfs_buf_ioend too early.
1754 */
1755 atomic_set(&bp->b_io_remaining, 1);
1756 if (bp->b_flags & XBF_ASYNC)
1757 xfs_buf_ioacct_inc(bp);
1758 _xfs_buf_ioapply(bp);
1759
1760 /*
1761 * If _xfs_buf_ioapply failed, we can get back here with only the IO
1762 * reference we took above. If we drop it to zero, run completion so
1763 * that we don't return to the caller with completion still pending.
1764 */
1765 if (atomic_dec_and_test(&bp->b_io_remaining) == 1) {
1766 if (bp->b_error || !(bp->b_flags & XBF_ASYNC))
1767 xfs_buf_ioend(bp);
1768 else
1769 xfs_buf_ioend_async(bp);
1770 }
1771
1772 if (wait)
1773 error = xfs_buf_iowait(bp);
1774
1775 /*
1776 * Release the hold that keeps the buffer referenced for the entire
1777 * I/O. Note that if the buffer is async, it is not safe to reference
1778 * after this release.
1779 */
1780 xfs_buf_rele(bp);
1781 return error;
1782 }
1783
1784 void *
xfs_buf_offset(struct xfs_buf * bp,size_t offset)1785 xfs_buf_offset(
1786 struct xfs_buf *bp,
1787 size_t offset)
1788 {
1789 struct page *page;
1790
1791 if (bp->b_addr)
1792 return bp->b_addr + offset;
1793
1794 page = bp->b_pages[offset >> PAGE_SHIFT];
1795 return page_address(page) + (offset & (PAGE_SIZE-1));
1796 }
1797
1798 void
xfs_buf_zero(struct xfs_buf * bp,size_t boff,size_t bsize)1799 xfs_buf_zero(
1800 struct xfs_buf *bp,
1801 size_t boff,
1802 size_t bsize)
1803 {
1804 size_t bend;
1805
1806 bend = boff + bsize;
1807 while (boff < bend) {
1808 struct page *page;
1809 int page_index, page_offset, csize;
1810
1811 page_index = (boff + bp->b_offset) >> PAGE_SHIFT;
1812 page_offset = (boff + bp->b_offset) & ~PAGE_MASK;
1813 page = bp->b_pages[page_index];
1814 csize = min_t(size_t, PAGE_SIZE - page_offset,
1815 BBTOB(bp->b_length) - boff);
1816
1817 ASSERT((csize + page_offset) <= PAGE_SIZE);
1818
1819 memset(page_address(page) + page_offset, 0, csize);
1820
1821 boff += csize;
1822 }
1823 }
1824
1825 /*
1826 * Log a message about and stale a buffer that a caller has decided is corrupt.
1827 *
1828 * This function should be called for the kinds of metadata corruption that
1829 * cannot be detect from a verifier, such as incorrect inter-block relationship
1830 * data. Do /not/ call this function from a verifier function.
1831 *
1832 * The buffer must be XBF_DONE prior to the call. Afterwards, the buffer will
1833 * be marked stale, but b_error will not be set. The caller is responsible for
1834 * releasing the buffer or fixing it.
1835 */
1836 void
__xfs_buf_mark_corrupt(struct xfs_buf * bp,xfs_failaddr_t fa)1837 __xfs_buf_mark_corrupt(
1838 struct xfs_buf *bp,
1839 xfs_failaddr_t fa)
1840 {
1841 ASSERT(bp->b_flags & XBF_DONE);
1842
1843 xfs_buf_corruption_error(bp, fa);
1844 xfs_buf_stale(bp);
1845 }
1846
1847 /*
1848 * Handling of buffer targets (buftargs).
1849 */
1850
1851 /*
1852 * Wait for any bufs with callbacks that have been submitted but have not yet
1853 * returned. These buffers will have an elevated hold count, so wait on those
1854 * while freeing all the buffers only held by the LRU.
1855 */
1856 static enum lru_status
xfs_buftarg_drain_rele(struct list_head * item,struct list_lru_one * lru,spinlock_t * lru_lock,void * arg)1857 xfs_buftarg_drain_rele(
1858 struct list_head *item,
1859 struct list_lru_one *lru,
1860 spinlock_t *lru_lock,
1861 void *arg)
1862
1863 {
1864 struct xfs_buf *bp = container_of(item, struct xfs_buf, b_lru);
1865 struct list_head *dispose = arg;
1866
1867 if (atomic_read(&bp->b_hold) > 1) {
1868 /* need to wait, so skip it this pass */
1869 trace_xfs_buf_drain_buftarg(bp, _RET_IP_);
1870 return LRU_SKIP;
1871 }
1872 if (!spin_trylock(&bp->b_lock))
1873 return LRU_SKIP;
1874
1875 /*
1876 * clear the LRU reference count so the buffer doesn't get
1877 * ignored in xfs_buf_rele().
1878 */
1879 atomic_set(&bp->b_lru_ref, 0);
1880 bp->b_state |= XFS_BSTATE_DISPOSE;
1881 list_lru_isolate_move(lru, item, dispose);
1882 spin_unlock(&bp->b_lock);
1883 return LRU_REMOVED;
1884 }
1885
1886 /*
1887 * Wait for outstanding I/O on the buftarg to complete.
1888 */
1889 void
xfs_buftarg_wait(struct xfs_buftarg * btp)1890 xfs_buftarg_wait(
1891 struct xfs_buftarg *btp)
1892 {
1893 /*
1894 * First wait on the buftarg I/O count for all in-flight buffers to be
1895 * released. This is critical as new buffers do not make the LRU until
1896 * they are released.
1897 *
1898 * Next, flush the buffer workqueue to ensure all completion processing
1899 * has finished. Just waiting on buffer locks is not sufficient for
1900 * async IO as the reference count held over IO is not released until
1901 * after the buffer lock is dropped. Hence we need to ensure here that
1902 * all reference counts have been dropped before we start walking the
1903 * LRU list.
1904 */
1905 while (percpu_counter_sum(&btp->bt_io_count))
1906 delay(100);
1907 flush_workqueue(btp->bt_mount->m_buf_workqueue);
1908 }
1909
1910 void
xfs_buftarg_drain(struct xfs_buftarg * btp)1911 xfs_buftarg_drain(
1912 struct xfs_buftarg *btp)
1913 {
1914 LIST_HEAD(dispose);
1915 int loop = 0;
1916 bool write_fail = false;
1917
1918 xfs_buftarg_wait(btp);
1919
1920 /* loop until there is nothing left on the lru list. */
1921 while (list_lru_count(&btp->bt_lru)) {
1922 list_lru_walk(&btp->bt_lru, xfs_buftarg_drain_rele,
1923 &dispose, LONG_MAX);
1924
1925 while (!list_empty(&dispose)) {
1926 struct xfs_buf *bp;
1927 bp = list_first_entry(&dispose, struct xfs_buf, b_lru);
1928 list_del_init(&bp->b_lru);
1929 if (bp->b_flags & XBF_WRITE_FAIL) {
1930 write_fail = true;
1931 xfs_buf_alert_ratelimited(bp,
1932 "XFS: Corruption Alert",
1933 "Corruption Alert: Buffer at daddr 0x%llx had permanent write failures!",
1934 (long long)xfs_buf_daddr(bp));
1935 }
1936 xfs_buf_rele(bp);
1937 }
1938 if (loop++ != 0)
1939 delay(100);
1940 }
1941
1942 /*
1943 * If one or more failed buffers were freed, that means dirty metadata
1944 * was thrown away. This should only ever happen after I/O completion
1945 * handling has elevated I/O error(s) to permanent failures and shuts
1946 * down the journal.
1947 */
1948 if (write_fail) {
1949 ASSERT(xlog_is_shutdown(btp->bt_mount->m_log));
1950 xfs_alert(btp->bt_mount,
1951 "Please run xfs_repair to determine the extent of the problem.");
1952 }
1953 }
1954
1955 static enum lru_status
xfs_buftarg_isolate(struct list_head * item,struct list_lru_one * lru,spinlock_t * lru_lock,void * arg)1956 xfs_buftarg_isolate(
1957 struct list_head *item,
1958 struct list_lru_one *lru,
1959 spinlock_t *lru_lock,
1960 void *arg)
1961 {
1962 struct xfs_buf *bp = container_of(item, struct xfs_buf, b_lru);
1963 struct list_head *dispose = arg;
1964
1965 /*
1966 * we are inverting the lru lock/bp->b_lock here, so use a trylock.
1967 * If we fail to get the lock, just skip it.
1968 */
1969 if (!spin_trylock(&bp->b_lock))
1970 return LRU_SKIP;
1971 /*
1972 * Decrement the b_lru_ref count unless the value is already
1973 * zero. If the value is already zero, we need to reclaim the
1974 * buffer, otherwise it gets another trip through the LRU.
1975 */
1976 if (atomic_add_unless(&bp->b_lru_ref, -1, 0)) {
1977 spin_unlock(&bp->b_lock);
1978 return LRU_ROTATE;
1979 }
1980
1981 bp->b_state |= XFS_BSTATE_DISPOSE;
1982 list_lru_isolate_move(lru, item, dispose);
1983 spin_unlock(&bp->b_lock);
1984 return LRU_REMOVED;
1985 }
1986
1987 static unsigned long
xfs_buftarg_shrink_scan(struct shrinker * shrink,struct shrink_control * sc)1988 xfs_buftarg_shrink_scan(
1989 struct shrinker *shrink,
1990 struct shrink_control *sc)
1991 {
1992 struct xfs_buftarg *btp = shrink->private_data;
1993 LIST_HEAD(dispose);
1994 unsigned long freed;
1995
1996 freed = list_lru_shrink_walk(&btp->bt_lru, sc,
1997 xfs_buftarg_isolate, &dispose);
1998
1999 while (!list_empty(&dispose)) {
2000 struct xfs_buf *bp;
2001 bp = list_first_entry(&dispose, struct xfs_buf, b_lru);
2002 list_del_init(&bp->b_lru);
2003 xfs_buf_rele(bp);
2004 }
2005
2006 return freed;
2007 }
2008
2009 static unsigned long
xfs_buftarg_shrink_count(struct shrinker * shrink,struct shrink_control * sc)2010 xfs_buftarg_shrink_count(
2011 struct shrinker *shrink,
2012 struct shrink_control *sc)
2013 {
2014 struct xfs_buftarg *btp = shrink->private_data;
2015 return list_lru_shrink_count(&btp->bt_lru, sc);
2016 }
2017
2018 void
xfs_destroy_buftarg(struct xfs_buftarg * btp)2019 xfs_destroy_buftarg(
2020 struct xfs_buftarg *btp)
2021 {
2022 shrinker_free(btp->bt_shrinker);
2023 ASSERT(percpu_counter_sum(&btp->bt_io_count) == 0);
2024 percpu_counter_destroy(&btp->bt_io_count);
2025 list_lru_destroy(&btp->bt_lru);
2026 }
2027
2028 void
xfs_free_buftarg(struct xfs_buftarg * btp)2029 xfs_free_buftarg(
2030 struct xfs_buftarg *btp)
2031 {
2032 xfs_destroy_buftarg(btp);
2033 fs_put_dax(btp->bt_daxdev, btp->bt_mount);
2034 /* the main block device is closed by kill_block_super */
2035 if (btp->bt_bdev != btp->bt_mount->m_super->s_bdev)
2036 bdev_fput(btp->bt_bdev_file);
2037 kfree(btp);
2038 }
2039
2040 int
xfs_setsize_buftarg(struct xfs_buftarg * btp,unsigned int sectorsize)2041 xfs_setsize_buftarg(
2042 struct xfs_buftarg *btp,
2043 unsigned int sectorsize)
2044 {
2045 /* Set up metadata sector size info */
2046 btp->bt_meta_sectorsize = sectorsize;
2047 btp->bt_meta_sectormask = sectorsize - 1;
2048
2049 if (set_blocksize(btp->bt_bdev_file, sectorsize)) {
2050 xfs_warn(btp->bt_mount,
2051 "Cannot set_blocksize to %u on device %pg",
2052 sectorsize, btp->bt_bdev);
2053 return -EINVAL;
2054 }
2055
2056 return 0;
2057 }
2058
2059 int
xfs_init_buftarg(struct xfs_buftarg * btp,size_t logical_sectorsize,const char * descr)2060 xfs_init_buftarg(
2061 struct xfs_buftarg *btp,
2062 size_t logical_sectorsize,
2063 const char *descr)
2064 {
2065 /* Set up device logical sector size mask */
2066 btp->bt_logical_sectorsize = logical_sectorsize;
2067 btp->bt_logical_sectormask = logical_sectorsize - 1;
2068
2069 /*
2070 * Buffer IO error rate limiting. Limit it to no more than 10 messages
2071 * per 30 seconds so as to not spam logs too much on repeated errors.
2072 */
2073 ratelimit_state_init(&btp->bt_ioerror_rl, 30 * HZ,
2074 DEFAULT_RATELIMIT_BURST);
2075
2076 if (list_lru_init(&btp->bt_lru))
2077 return -ENOMEM;
2078 if (percpu_counter_init(&btp->bt_io_count, 0, GFP_KERNEL))
2079 goto out_destroy_lru;
2080
2081 btp->bt_shrinker =
2082 shrinker_alloc(SHRINKER_NUMA_AWARE, "xfs-buf:%s", descr);
2083 if (!btp->bt_shrinker)
2084 goto out_destroy_io_count;
2085 btp->bt_shrinker->count_objects = xfs_buftarg_shrink_count;
2086 btp->bt_shrinker->scan_objects = xfs_buftarg_shrink_scan;
2087 btp->bt_shrinker->private_data = btp;
2088 shrinker_register(btp->bt_shrinker);
2089 return 0;
2090
2091 out_destroy_io_count:
2092 percpu_counter_destroy(&btp->bt_io_count);
2093 out_destroy_lru:
2094 list_lru_destroy(&btp->bt_lru);
2095 return -ENOMEM;
2096 }
2097
2098 struct xfs_buftarg *
xfs_alloc_buftarg(struct xfs_mount * mp,struct file * bdev_file)2099 xfs_alloc_buftarg(
2100 struct xfs_mount *mp,
2101 struct file *bdev_file)
2102 {
2103 struct xfs_buftarg *btp;
2104 const struct dax_holder_operations *ops = NULL;
2105
2106 #if defined(CONFIG_FS_DAX) && defined(CONFIG_MEMORY_FAILURE)
2107 ops = &xfs_dax_holder_operations;
2108 #endif
2109 btp = kzalloc(sizeof(*btp), GFP_KERNEL | __GFP_NOFAIL);
2110
2111 btp->bt_mount = mp;
2112 btp->bt_bdev_file = bdev_file;
2113 btp->bt_bdev = file_bdev(bdev_file);
2114 btp->bt_dev = btp->bt_bdev->bd_dev;
2115 btp->bt_daxdev = fs_dax_get_by_bdev(btp->bt_bdev, &btp->bt_dax_part_off,
2116 mp, ops);
2117
2118 /*
2119 * When allocating the buftargs we have not yet read the super block and
2120 * thus don't know the file system sector size yet.
2121 */
2122 if (xfs_setsize_buftarg(btp, bdev_logical_block_size(btp->bt_bdev)))
2123 goto error_free;
2124 if (xfs_init_buftarg(btp, bdev_logical_block_size(btp->bt_bdev),
2125 mp->m_super->s_id))
2126 goto error_free;
2127
2128 return btp;
2129
2130 error_free:
2131 kfree(btp);
2132 return NULL;
2133 }
2134
2135 static inline void
xfs_buf_list_del(struct xfs_buf * bp)2136 xfs_buf_list_del(
2137 struct xfs_buf *bp)
2138 {
2139 list_del_init(&bp->b_list);
2140 wake_up_var(&bp->b_list);
2141 }
2142
2143 /*
2144 * Cancel a delayed write list.
2145 *
2146 * Remove each buffer from the list, clear the delwri queue flag and drop the
2147 * associated buffer reference.
2148 */
2149 void
xfs_buf_delwri_cancel(struct list_head * list)2150 xfs_buf_delwri_cancel(
2151 struct list_head *list)
2152 {
2153 struct xfs_buf *bp;
2154
2155 while (!list_empty(list)) {
2156 bp = list_first_entry(list, struct xfs_buf, b_list);
2157
2158 xfs_buf_lock(bp);
2159 bp->b_flags &= ~_XBF_DELWRI_Q;
2160 xfs_buf_list_del(bp);
2161 xfs_buf_relse(bp);
2162 }
2163 }
2164
2165 /*
2166 * Add a buffer to the delayed write list.
2167 *
2168 * This queues a buffer for writeout if it hasn't already been. Note that
2169 * neither this routine nor the buffer list submission functions perform
2170 * any internal synchronization. It is expected that the lists are thread-local
2171 * to the callers.
2172 *
2173 * Returns true if we queued up the buffer, or false if it already had
2174 * been on the buffer list.
2175 */
2176 bool
xfs_buf_delwri_queue(struct xfs_buf * bp,struct list_head * list)2177 xfs_buf_delwri_queue(
2178 struct xfs_buf *bp,
2179 struct list_head *list)
2180 {
2181 ASSERT(xfs_buf_islocked(bp));
2182 ASSERT(!(bp->b_flags & XBF_READ));
2183
2184 /*
2185 * If the buffer is already marked delwri it already is queued up
2186 * by someone else for imediate writeout. Just ignore it in that
2187 * case.
2188 */
2189 if (bp->b_flags & _XBF_DELWRI_Q) {
2190 trace_xfs_buf_delwri_queued(bp, _RET_IP_);
2191 return false;
2192 }
2193
2194 trace_xfs_buf_delwri_queue(bp, _RET_IP_);
2195
2196 /*
2197 * If a buffer gets written out synchronously or marked stale while it
2198 * is on a delwri list we lazily remove it. To do this, the other party
2199 * clears the _XBF_DELWRI_Q flag but otherwise leaves the buffer alone.
2200 * It remains referenced and on the list. In a rare corner case it
2201 * might get readded to a delwri list after the synchronous writeout, in
2202 * which case we need just need to re-add the flag here.
2203 */
2204 bp->b_flags |= _XBF_DELWRI_Q;
2205 if (list_empty(&bp->b_list)) {
2206 atomic_inc(&bp->b_hold);
2207 list_add_tail(&bp->b_list, list);
2208 }
2209
2210 return true;
2211 }
2212
2213 /*
2214 * Queue a buffer to this delwri list as part of a data integrity operation.
2215 * If the buffer is on any other delwri list, we'll wait for that to clear
2216 * so that the caller can submit the buffer for IO and wait for the result.
2217 * Callers must ensure the buffer is not already on the list.
2218 */
2219 void
xfs_buf_delwri_queue_here(struct xfs_buf * bp,struct list_head * buffer_list)2220 xfs_buf_delwri_queue_here(
2221 struct xfs_buf *bp,
2222 struct list_head *buffer_list)
2223 {
2224 /*
2225 * We need this buffer to end up on the /caller's/ delwri list, not any
2226 * old list. This can happen if the buffer is marked stale (which
2227 * clears DELWRI_Q) after the AIL queues the buffer to its list but
2228 * before the AIL has a chance to submit the list.
2229 */
2230 while (!list_empty(&bp->b_list)) {
2231 xfs_buf_unlock(bp);
2232 wait_var_event(&bp->b_list, list_empty(&bp->b_list));
2233 xfs_buf_lock(bp);
2234 }
2235
2236 ASSERT(!(bp->b_flags & _XBF_DELWRI_Q));
2237
2238 xfs_buf_delwri_queue(bp, buffer_list);
2239 }
2240
2241 /*
2242 * Compare function is more complex than it needs to be because
2243 * the return value is only 32 bits and we are doing comparisons
2244 * on 64 bit values
2245 */
2246 static int
xfs_buf_cmp(void * priv,const struct list_head * a,const struct list_head * b)2247 xfs_buf_cmp(
2248 void *priv,
2249 const struct list_head *a,
2250 const struct list_head *b)
2251 {
2252 struct xfs_buf *ap = container_of(a, struct xfs_buf, b_list);
2253 struct xfs_buf *bp = container_of(b, struct xfs_buf, b_list);
2254 xfs_daddr_t diff;
2255
2256 diff = ap->b_maps[0].bm_bn - bp->b_maps[0].bm_bn;
2257 if (diff < 0)
2258 return -1;
2259 if (diff > 0)
2260 return 1;
2261 return 0;
2262 }
2263
2264 /*
2265 * Submit buffers for write. If wait_list is specified, the buffers are
2266 * submitted using sync I/O and placed on the wait list such that the caller can
2267 * iowait each buffer. Otherwise async I/O is used and the buffers are released
2268 * at I/O completion time. In either case, buffers remain locked until I/O
2269 * completes and the buffer is released from the queue.
2270 */
2271 static int
xfs_buf_delwri_submit_buffers(struct list_head * buffer_list,struct list_head * wait_list)2272 xfs_buf_delwri_submit_buffers(
2273 struct list_head *buffer_list,
2274 struct list_head *wait_list)
2275 {
2276 struct xfs_buf *bp, *n;
2277 int pinned = 0;
2278 struct blk_plug plug;
2279
2280 list_sort(NULL, buffer_list, xfs_buf_cmp);
2281
2282 blk_start_plug(&plug);
2283 list_for_each_entry_safe(bp, n, buffer_list, b_list) {
2284 if (!wait_list) {
2285 if (!xfs_buf_trylock(bp))
2286 continue;
2287 if (xfs_buf_ispinned(bp)) {
2288 xfs_buf_unlock(bp);
2289 pinned++;
2290 continue;
2291 }
2292 } else {
2293 xfs_buf_lock(bp);
2294 }
2295
2296 /*
2297 * Someone else might have written the buffer synchronously or
2298 * marked it stale in the meantime. In that case only the
2299 * _XBF_DELWRI_Q flag got cleared, and we have to drop the
2300 * reference and remove it from the list here.
2301 */
2302 if (!(bp->b_flags & _XBF_DELWRI_Q)) {
2303 xfs_buf_list_del(bp);
2304 xfs_buf_relse(bp);
2305 continue;
2306 }
2307
2308 trace_xfs_buf_delwri_split(bp, _RET_IP_);
2309
2310 /*
2311 * If we have a wait list, each buffer (and associated delwri
2312 * queue reference) transfers to it and is submitted
2313 * synchronously. Otherwise, drop the buffer from the delwri
2314 * queue and submit async.
2315 */
2316 bp->b_flags &= ~_XBF_DELWRI_Q;
2317 bp->b_flags |= XBF_WRITE;
2318 if (wait_list) {
2319 bp->b_flags &= ~XBF_ASYNC;
2320 list_move_tail(&bp->b_list, wait_list);
2321 } else {
2322 bp->b_flags |= XBF_ASYNC;
2323 xfs_buf_list_del(bp);
2324 }
2325 __xfs_buf_submit(bp, false);
2326 }
2327 blk_finish_plug(&plug);
2328
2329 return pinned;
2330 }
2331
2332 /*
2333 * Write out a buffer list asynchronously.
2334 *
2335 * This will take the @buffer_list, write all non-locked and non-pinned buffers
2336 * out and not wait for I/O completion on any of the buffers. This interface
2337 * is only safely useable for callers that can track I/O completion by higher
2338 * level means, e.g. AIL pushing as the @buffer_list is consumed in this
2339 * function.
2340 *
2341 * Note: this function will skip buffers it would block on, and in doing so
2342 * leaves them on @buffer_list so they can be retried on a later pass. As such,
2343 * it is up to the caller to ensure that the buffer list is fully submitted or
2344 * cancelled appropriately when they are finished with the list. Failure to
2345 * cancel or resubmit the list until it is empty will result in leaked buffers
2346 * at unmount time.
2347 */
2348 int
xfs_buf_delwri_submit_nowait(struct list_head * buffer_list)2349 xfs_buf_delwri_submit_nowait(
2350 struct list_head *buffer_list)
2351 {
2352 return xfs_buf_delwri_submit_buffers(buffer_list, NULL);
2353 }
2354
2355 /*
2356 * Write out a buffer list synchronously.
2357 *
2358 * This will take the @buffer_list, write all buffers out and wait for I/O
2359 * completion on all of the buffers. @buffer_list is consumed by the function,
2360 * so callers must have some other way of tracking buffers if they require such
2361 * functionality.
2362 */
2363 int
xfs_buf_delwri_submit(struct list_head * buffer_list)2364 xfs_buf_delwri_submit(
2365 struct list_head *buffer_list)
2366 {
2367 LIST_HEAD (wait_list);
2368 int error = 0, error2;
2369 struct xfs_buf *bp;
2370
2371 xfs_buf_delwri_submit_buffers(buffer_list, &wait_list);
2372
2373 /* Wait for IO to complete. */
2374 while (!list_empty(&wait_list)) {
2375 bp = list_first_entry(&wait_list, struct xfs_buf, b_list);
2376
2377 xfs_buf_list_del(bp);
2378
2379 /*
2380 * Wait on the locked buffer, check for errors and unlock and
2381 * release the delwri queue reference.
2382 */
2383 error2 = xfs_buf_iowait(bp);
2384 xfs_buf_relse(bp);
2385 if (!error)
2386 error = error2;
2387 }
2388
2389 return error;
2390 }
2391
2392 /*
2393 * Push a single buffer on a delwri queue.
2394 *
2395 * The purpose of this function is to submit a single buffer of a delwri queue
2396 * and return with the buffer still on the original queue. The waiting delwri
2397 * buffer submission infrastructure guarantees transfer of the delwri queue
2398 * buffer reference to a temporary wait list. We reuse this infrastructure to
2399 * transfer the buffer back to the original queue.
2400 *
2401 * Note the buffer transitions from the queued state, to the submitted and wait
2402 * listed state and back to the queued state during this call. The buffer
2403 * locking and queue management logic between _delwri_pushbuf() and
2404 * _delwri_queue() guarantee that the buffer cannot be queued to another list
2405 * before returning.
2406 */
2407 int
xfs_buf_delwri_pushbuf(struct xfs_buf * bp,struct list_head * buffer_list)2408 xfs_buf_delwri_pushbuf(
2409 struct xfs_buf *bp,
2410 struct list_head *buffer_list)
2411 {
2412 LIST_HEAD (submit_list);
2413 int error;
2414
2415 ASSERT(bp->b_flags & _XBF_DELWRI_Q);
2416
2417 trace_xfs_buf_delwri_pushbuf(bp, _RET_IP_);
2418
2419 /*
2420 * Isolate the buffer to a new local list so we can submit it for I/O
2421 * independently from the rest of the original list.
2422 */
2423 xfs_buf_lock(bp);
2424 list_move(&bp->b_list, &submit_list);
2425 xfs_buf_unlock(bp);
2426
2427 /*
2428 * Delwri submission clears the DELWRI_Q buffer flag and returns with
2429 * the buffer on the wait list with the original reference. Rather than
2430 * bounce the buffer from a local wait list back to the original list
2431 * after I/O completion, reuse the original list as the wait list.
2432 */
2433 xfs_buf_delwri_submit_buffers(&submit_list, buffer_list);
2434
2435 /*
2436 * The buffer is now locked, under I/O and wait listed on the original
2437 * delwri queue. Wait for I/O completion, restore the DELWRI_Q flag and
2438 * return with the buffer unlocked and on the original queue.
2439 */
2440 error = xfs_buf_iowait(bp);
2441 bp->b_flags |= _XBF_DELWRI_Q;
2442 xfs_buf_unlock(bp);
2443
2444 return error;
2445 }
2446
xfs_buf_set_ref(struct xfs_buf * bp,int lru_ref)2447 void xfs_buf_set_ref(struct xfs_buf *bp, int lru_ref)
2448 {
2449 /*
2450 * Set the lru reference count to 0 based on the error injection tag.
2451 * This allows userspace to disrupt buffer caching for debug/testing
2452 * purposes.
2453 */
2454 if (XFS_TEST_ERROR(false, bp->b_mount, XFS_ERRTAG_BUF_LRU_REF))
2455 lru_ref = 0;
2456
2457 atomic_set(&bp->b_lru_ref, lru_ref);
2458 }
2459
2460 /*
2461 * Verify an on-disk magic value against the magic value specified in the
2462 * verifier structure. The verifier magic is in disk byte order so the caller is
2463 * expected to pass the value directly from disk.
2464 */
2465 bool
xfs_verify_magic(struct xfs_buf * bp,__be32 dmagic)2466 xfs_verify_magic(
2467 struct xfs_buf *bp,
2468 __be32 dmagic)
2469 {
2470 struct xfs_mount *mp = bp->b_mount;
2471 int idx;
2472
2473 idx = xfs_has_crc(mp);
2474 if (WARN_ON(!bp->b_ops || !bp->b_ops->magic[idx]))
2475 return false;
2476 return dmagic == bp->b_ops->magic[idx];
2477 }
2478 /*
2479 * Verify an on-disk magic value against the magic value specified in the
2480 * verifier structure. The verifier magic is in disk byte order so the caller is
2481 * expected to pass the value directly from disk.
2482 */
2483 bool
xfs_verify_magic16(struct xfs_buf * bp,__be16 dmagic)2484 xfs_verify_magic16(
2485 struct xfs_buf *bp,
2486 __be16 dmagic)
2487 {
2488 struct xfs_mount *mp = bp->b_mount;
2489 int idx;
2490
2491 idx = xfs_has_crc(mp);
2492 if (WARN_ON(!bp->b_ops || !bp->b_ops->magic16[idx]))
2493 return false;
2494 return dmagic == bp->b_ops->magic16[idx];
2495 }
2496