1 // SPDX-License-Identifier: GPL-2.0-only
2 /*
3 * linux/fs/buffer.c
4 *
5 * Copyright (C) 1991, 1992, 2002 Linus Torvalds
6 */
7
8 /*
9 * Start bdflush() with kernel_thread not syscall - Paul Gortmaker, 12/95
10 *
11 * Removed a lot of unnecessary code and simplified things now that
12 * the buffer cache isn't our primary cache - Andrew Tridgell 12/96
13 *
14 * Speed up hash, lru, and free list operations. Use gfp() for allocating
15 * hash table, use SLAB cache for buffer heads. SMP threading. -DaveM
16 *
17 * Added 32k buffer block sizes - these are required older ARM systems. - RMK
18 *
19 * async buffer flushing, 1999 Andrea Arcangeli <andrea@suse.de>
20 */
21
22 #include <linux/kernel.h>
23 #include <linux/sched/signal.h>
24 #include <linux/syscalls.h>
25 #include <linux/fs.h>
26 #include <linux/iomap.h>
27 #include <linux/mm.h>
28 #include <linux/percpu.h>
29 #include <linux/slab.h>
30 #include <linux/capability.h>
31 #include <linux/blkdev.h>
32 #include <linux/file.h>
33 #include <linux/quotaops.h>
34 #include <linux/highmem.h>
35 #include <linux/export.h>
36 #include <linux/backing-dev.h>
37 #include <linux/writeback.h>
38 #include <linux/hash.h>
39 #include <linux/suspend.h>
40 #include <linux/buffer_head.h>
41 #include <linux/task_io_accounting_ops.h>
42 #include <linux/bio.h>
43 #include <linux/cpu.h>
44 #include <linux/bitops.h>
45 #include <linux/mpage.h>
46 #include <linux/bit_spinlock.h>
47 #include <linux/pagevec.h>
48 #include <linux/sched/mm.h>
49 #include <trace/events/block.h>
50 #include <linux/fscrypt.h>
51 #include <linux/fsverity.h>
52 #include <linux/sched/isolation.h>
53
54 #include "internal.h"
55
56 static int fsync_buffers_list(spinlock_t *lock, struct list_head *list);
57 static void submit_bh_wbc(blk_opf_t opf, struct buffer_head *bh,
58 enum rw_hint hint, struct writeback_control *wbc);
59
60 #define BH_ENTRY(list) list_entry((list), struct buffer_head, b_assoc_buffers)
61
touch_buffer(struct buffer_head * bh)62 inline void touch_buffer(struct buffer_head *bh)
63 {
64 trace_block_touch_buffer(bh);
65 folio_mark_accessed(bh->b_folio);
66 }
67 EXPORT_SYMBOL(touch_buffer);
68
__lock_buffer(struct buffer_head * bh)69 void __lock_buffer(struct buffer_head *bh)
70 {
71 wait_on_bit_lock_io(&bh->b_state, BH_Lock, TASK_UNINTERRUPTIBLE);
72 }
73 EXPORT_SYMBOL(__lock_buffer);
74
unlock_buffer(struct buffer_head * bh)75 void unlock_buffer(struct buffer_head *bh)
76 {
77 clear_bit_unlock(BH_Lock, &bh->b_state);
78 smp_mb__after_atomic();
79 wake_up_bit(&bh->b_state, BH_Lock);
80 }
81 EXPORT_SYMBOL(unlock_buffer);
82
83 /*
84 * Returns if the folio has dirty or writeback buffers. If all the buffers
85 * are unlocked and clean then the folio_test_dirty information is stale. If
86 * any of the buffers are locked, it is assumed they are locked for IO.
87 */
buffer_check_dirty_writeback(struct folio * folio,bool * dirty,bool * writeback)88 void buffer_check_dirty_writeback(struct folio *folio,
89 bool *dirty, bool *writeback)
90 {
91 struct buffer_head *head, *bh;
92 *dirty = false;
93 *writeback = false;
94
95 BUG_ON(!folio_test_locked(folio));
96
97 head = folio_buffers(folio);
98 if (!head)
99 return;
100
101 if (folio_test_writeback(folio))
102 *writeback = true;
103
104 bh = head;
105 do {
106 if (buffer_locked(bh))
107 *writeback = true;
108
109 if (buffer_dirty(bh))
110 *dirty = true;
111
112 bh = bh->b_this_page;
113 } while (bh != head);
114 }
115
116 /*
117 * Block until a buffer comes unlocked. This doesn't stop it
118 * from becoming locked again - you have to lock it yourself
119 * if you want to preserve its state.
120 */
__wait_on_buffer(struct buffer_head * bh)121 void __wait_on_buffer(struct buffer_head * bh)
122 {
123 wait_on_bit_io(&bh->b_state, BH_Lock, TASK_UNINTERRUPTIBLE);
124 }
125 EXPORT_SYMBOL(__wait_on_buffer);
126
buffer_io_error(struct buffer_head * bh,char * msg)127 static void buffer_io_error(struct buffer_head *bh, char *msg)
128 {
129 if (!test_bit(BH_Quiet, &bh->b_state))
130 printk_ratelimited(KERN_ERR
131 "Buffer I/O error on dev %pg, logical block %llu%s\n",
132 bh->b_bdev, (unsigned long long)bh->b_blocknr, msg);
133 }
134
135 /*
136 * End-of-IO handler helper function which does not touch the bh after
137 * unlocking it.
138 * Note: unlock_buffer() sort-of does touch the bh after unlocking it, but
139 * a race there is benign: unlock_buffer() only use the bh's address for
140 * hashing after unlocking the buffer, so it doesn't actually touch the bh
141 * itself.
142 */
__end_buffer_read_notouch(struct buffer_head * bh,int uptodate)143 static void __end_buffer_read_notouch(struct buffer_head *bh, int uptodate)
144 {
145 if (uptodate) {
146 set_buffer_uptodate(bh);
147 } else {
148 /* This happens, due to failed read-ahead attempts. */
149 clear_buffer_uptodate(bh);
150 }
151 unlock_buffer(bh);
152 }
153
154 /*
155 * Default synchronous end-of-IO handler.. Just mark it up-to-date and
156 * unlock the buffer.
157 */
end_buffer_read_sync(struct buffer_head * bh,int uptodate)158 void end_buffer_read_sync(struct buffer_head *bh, int uptodate)
159 {
160 __end_buffer_read_notouch(bh, uptodate);
161 put_bh(bh);
162 }
163 EXPORT_SYMBOL(end_buffer_read_sync);
164
end_buffer_write_sync(struct buffer_head * bh,int uptodate)165 void end_buffer_write_sync(struct buffer_head *bh, int uptodate)
166 {
167 if (uptodate) {
168 set_buffer_uptodate(bh);
169 } else {
170 buffer_io_error(bh, ", lost sync page write");
171 mark_buffer_write_io_error(bh);
172 clear_buffer_uptodate(bh);
173 }
174 unlock_buffer(bh);
175 put_bh(bh);
176 }
177 EXPORT_SYMBOL(end_buffer_write_sync);
178
179 /*
180 * Various filesystems appear to want __find_get_block to be non-blocking.
181 * But it's the page lock which protects the buffers. To get around this,
182 * we get exclusion from try_to_free_buffers with the blockdev mapping's
183 * i_private_lock.
184 *
185 * Hack idea: for the blockdev mapping, i_private_lock contention
186 * may be quite high. This code could TryLock the page, and if that
187 * succeeds, there is no need to take i_private_lock.
188 */
189 static struct buffer_head *
__find_get_block_slow(struct block_device * bdev,sector_t block)190 __find_get_block_slow(struct block_device *bdev, sector_t block)
191 {
192 struct address_space *bd_mapping = bdev->bd_mapping;
193 const int blkbits = bd_mapping->host->i_blkbits;
194 struct buffer_head *ret = NULL;
195 pgoff_t index;
196 struct buffer_head *bh;
197 struct buffer_head *head;
198 struct folio *folio;
199 int all_mapped = 1;
200 static DEFINE_RATELIMIT_STATE(last_warned, HZ, 1);
201
202 index = ((loff_t)block << blkbits) / PAGE_SIZE;
203 folio = __filemap_get_folio(bd_mapping, index, FGP_ACCESSED, 0);
204 if (IS_ERR(folio))
205 goto out;
206
207 spin_lock(&bd_mapping->i_private_lock);
208 head = folio_buffers(folio);
209 if (!head)
210 goto out_unlock;
211 bh = head;
212 do {
213 if (!buffer_mapped(bh))
214 all_mapped = 0;
215 else if (bh->b_blocknr == block) {
216 ret = bh;
217 get_bh(bh);
218 goto out_unlock;
219 }
220 bh = bh->b_this_page;
221 } while (bh != head);
222
223 /* we might be here because some of the buffers on this page are
224 * not mapped. This is due to various races between
225 * file io on the block device and getblk. It gets dealt with
226 * elsewhere, don't buffer_error if we had some unmapped buffers
227 */
228 ratelimit_set_flags(&last_warned, RATELIMIT_MSG_ON_RELEASE);
229 if (all_mapped && __ratelimit(&last_warned)) {
230 printk("__find_get_block_slow() failed. block=%llu, "
231 "b_blocknr=%llu, b_state=0x%08lx, b_size=%zu, "
232 "device %pg blocksize: %d\n",
233 (unsigned long long)block,
234 (unsigned long long)bh->b_blocknr,
235 bh->b_state, bh->b_size, bdev,
236 1 << blkbits);
237 }
238 out_unlock:
239 spin_unlock(&bd_mapping->i_private_lock);
240 folio_put(folio);
241 out:
242 return ret;
243 }
244
end_buffer_async_read(struct buffer_head * bh,int uptodate)245 static void end_buffer_async_read(struct buffer_head *bh, int uptodate)
246 {
247 unsigned long flags;
248 struct buffer_head *first;
249 struct buffer_head *tmp;
250 struct folio *folio;
251 int folio_uptodate = 1;
252
253 BUG_ON(!buffer_async_read(bh));
254
255 folio = bh->b_folio;
256 if (uptodate) {
257 set_buffer_uptodate(bh);
258 } else {
259 clear_buffer_uptodate(bh);
260 buffer_io_error(bh, ", async page read");
261 }
262
263 /*
264 * Be _very_ careful from here on. Bad things can happen if
265 * two buffer heads end IO at almost the same time and both
266 * decide that the page is now completely done.
267 */
268 first = folio_buffers(folio);
269 spin_lock_irqsave(&first->b_uptodate_lock, flags);
270 clear_buffer_async_read(bh);
271 unlock_buffer(bh);
272 tmp = bh;
273 do {
274 if (!buffer_uptodate(tmp))
275 folio_uptodate = 0;
276 if (buffer_async_read(tmp)) {
277 BUG_ON(!buffer_locked(tmp));
278 goto still_busy;
279 }
280 tmp = tmp->b_this_page;
281 } while (tmp != bh);
282 spin_unlock_irqrestore(&first->b_uptodate_lock, flags);
283
284 folio_end_read(folio, folio_uptodate);
285 return;
286
287 still_busy:
288 spin_unlock_irqrestore(&first->b_uptodate_lock, flags);
289 return;
290 }
291
292 struct postprocess_bh_ctx {
293 struct work_struct work;
294 struct buffer_head *bh;
295 };
296
verify_bh(struct work_struct * work)297 static void verify_bh(struct work_struct *work)
298 {
299 struct postprocess_bh_ctx *ctx =
300 container_of(work, struct postprocess_bh_ctx, work);
301 struct buffer_head *bh = ctx->bh;
302 bool valid;
303
304 valid = fsverity_verify_blocks(bh->b_folio, bh->b_size, bh_offset(bh));
305 end_buffer_async_read(bh, valid);
306 kfree(ctx);
307 }
308
need_fsverity(struct buffer_head * bh)309 static bool need_fsverity(struct buffer_head *bh)
310 {
311 struct folio *folio = bh->b_folio;
312 struct inode *inode = folio->mapping->host;
313
314 return fsverity_active(inode) &&
315 /* needed by ext4 */
316 folio->index < DIV_ROUND_UP(inode->i_size, PAGE_SIZE);
317 }
318
decrypt_bh(struct work_struct * work)319 static void decrypt_bh(struct work_struct *work)
320 {
321 struct postprocess_bh_ctx *ctx =
322 container_of(work, struct postprocess_bh_ctx, work);
323 struct buffer_head *bh = ctx->bh;
324 int err;
325
326 err = fscrypt_decrypt_pagecache_blocks(bh->b_folio, bh->b_size,
327 bh_offset(bh));
328 if (err == 0 && need_fsverity(bh)) {
329 /*
330 * We use different work queues for decryption and for verity
331 * because verity may require reading metadata pages that need
332 * decryption, and we shouldn't recurse to the same workqueue.
333 */
334 INIT_WORK(&ctx->work, verify_bh);
335 fsverity_enqueue_verify_work(&ctx->work);
336 return;
337 }
338 end_buffer_async_read(bh, err == 0);
339 kfree(ctx);
340 }
341
342 /*
343 * I/O completion handler for block_read_full_folio() - pages
344 * which come unlocked at the end of I/O.
345 */
end_buffer_async_read_io(struct buffer_head * bh,int uptodate)346 static void end_buffer_async_read_io(struct buffer_head *bh, int uptodate)
347 {
348 struct inode *inode = bh->b_folio->mapping->host;
349 bool decrypt = fscrypt_inode_uses_fs_layer_crypto(inode);
350 bool verify = need_fsverity(bh);
351
352 /* Decrypt (with fscrypt) and/or verify (with fsverity) if needed. */
353 if (uptodate && (decrypt || verify)) {
354 struct postprocess_bh_ctx *ctx =
355 kmalloc(sizeof(*ctx), GFP_ATOMIC);
356
357 if (ctx) {
358 ctx->bh = bh;
359 if (decrypt) {
360 INIT_WORK(&ctx->work, decrypt_bh);
361 fscrypt_enqueue_decrypt_work(&ctx->work);
362 } else {
363 INIT_WORK(&ctx->work, verify_bh);
364 fsverity_enqueue_verify_work(&ctx->work);
365 }
366 return;
367 }
368 uptodate = 0;
369 }
370 end_buffer_async_read(bh, uptodate);
371 }
372
373 /*
374 * Completion handler for block_write_full_folio() - folios which are unlocked
375 * during I/O, and which have the writeback flag cleared upon I/O completion.
376 */
end_buffer_async_write(struct buffer_head * bh,int uptodate)377 static void end_buffer_async_write(struct buffer_head *bh, int uptodate)
378 {
379 unsigned long flags;
380 struct buffer_head *first;
381 struct buffer_head *tmp;
382 struct folio *folio;
383
384 BUG_ON(!buffer_async_write(bh));
385
386 folio = bh->b_folio;
387 if (uptodate) {
388 set_buffer_uptodate(bh);
389 } else {
390 buffer_io_error(bh, ", lost async page write");
391 mark_buffer_write_io_error(bh);
392 clear_buffer_uptodate(bh);
393 }
394
395 first = folio_buffers(folio);
396 spin_lock_irqsave(&first->b_uptodate_lock, flags);
397
398 clear_buffer_async_write(bh);
399 unlock_buffer(bh);
400 tmp = bh->b_this_page;
401 while (tmp != bh) {
402 if (buffer_async_write(tmp)) {
403 BUG_ON(!buffer_locked(tmp));
404 goto still_busy;
405 }
406 tmp = tmp->b_this_page;
407 }
408 spin_unlock_irqrestore(&first->b_uptodate_lock, flags);
409 folio_end_writeback(folio);
410 return;
411
412 still_busy:
413 spin_unlock_irqrestore(&first->b_uptodate_lock, flags);
414 return;
415 }
416
417 /*
418 * If a page's buffers are under async readin (end_buffer_async_read
419 * completion) then there is a possibility that another thread of
420 * control could lock one of the buffers after it has completed
421 * but while some of the other buffers have not completed. This
422 * locked buffer would confuse end_buffer_async_read() into not unlocking
423 * the page. So the absence of BH_Async_Read tells end_buffer_async_read()
424 * that this buffer is not under async I/O.
425 *
426 * The page comes unlocked when it has no locked buffer_async buffers
427 * left.
428 *
429 * PageLocked prevents anyone starting new async I/O reads any of
430 * the buffers.
431 *
432 * PageWriteback is used to prevent simultaneous writeout of the same
433 * page.
434 *
435 * PageLocked prevents anyone from starting writeback of a page which is
436 * under read I/O (PageWriteback is only ever set against a locked page).
437 */
mark_buffer_async_read(struct buffer_head * bh)438 static void mark_buffer_async_read(struct buffer_head *bh)
439 {
440 bh->b_end_io = end_buffer_async_read_io;
441 set_buffer_async_read(bh);
442 }
443
mark_buffer_async_write_endio(struct buffer_head * bh,bh_end_io_t * handler)444 static void mark_buffer_async_write_endio(struct buffer_head *bh,
445 bh_end_io_t *handler)
446 {
447 bh->b_end_io = handler;
448 set_buffer_async_write(bh);
449 }
450
mark_buffer_async_write(struct buffer_head * bh)451 void mark_buffer_async_write(struct buffer_head *bh)
452 {
453 mark_buffer_async_write_endio(bh, end_buffer_async_write);
454 }
455 EXPORT_SYMBOL(mark_buffer_async_write);
456
457
458 /*
459 * fs/buffer.c contains helper functions for buffer-backed address space's
460 * fsync functions. A common requirement for buffer-based filesystems is
461 * that certain data from the backing blockdev needs to be written out for
462 * a successful fsync(). For example, ext2 indirect blocks need to be
463 * written back and waited upon before fsync() returns.
464 *
465 * The functions mark_buffer_dirty_inode(), fsync_inode_buffers(),
466 * inode_has_buffers() and invalidate_inode_buffers() are provided for the
467 * management of a list of dependent buffers at ->i_mapping->i_private_list.
468 *
469 * Locking is a little subtle: try_to_free_buffers() will remove buffers
470 * from their controlling inode's queue when they are being freed. But
471 * try_to_free_buffers() will be operating against the *blockdev* mapping
472 * at the time, not against the S_ISREG file which depends on those buffers.
473 * So the locking for i_private_list is via the i_private_lock in the address_space
474 * which backs the buffers. Which is different from the address_space
475 * against which the buffers are listed. So for a particular address_space,
476 * mapping->i_private_lock does *not* protect mapping->i_private_list! In fact,
477 * mapping->i_private_list will always be protected by the backing blockdev's
478 * ->i_private_lock.
479 *
480 * Which introduces a requirement: all buffers on an address_space's
481 * ->i_private_list must be from the same address_space: the blockdev's.
482 *
483 * address_spaces which do not place buffers at ->i_private_list via these
484 * utility functions are free to use i_private_lock and i_private_list for
485 * whatever they want. The only requirement is that list_empty(i_private_list)
486 * be true at clear_inode() time.
487 *
488 * FIXME: clear_inode should not call invalidate_inode_buffers(). The
489 * filesystems should do that. invalidate_inode_buffers() should just go
490 * BUG_ON(!list_empty).
491 *
492 * FIXME: mark_buffer_dirty_inode() is a data-plane operation. It should
493 * take an address_space, not an inode. And it should be called
494 * mark_buffer_dirty_fsync() to clearly define why those buffers are being
495 * queued up.
496 *
497 * FIXME: mark_buffer_dirty_inode() doesn't need to add the buffer to the
498 * list if it is already on a list. Because if the buffer is on a list,
499 * it *must* already be on the right one. If not, the filesystem is being
500 * silly. This will save a ton of locking. But first we have to ensure
501 * that buffers are taken *off* the old inode's list when they are freed
502 * (presumably in truncate). That requires careful auditing of all
503 * filesystems (do it inside bforget()). It could also be done by bringing
504 * b_inode back.
505 */
506
507 /*
508 * The buffer's backing address_space's i_private_lock must be held
509 */
__remove_assoc_queue(struct buffer_head * bh)510 static void __remove_assoc_queue(struct buffer_head *bh)
511 {
512 list_del_init(&bh->b_assoc_buffers);
513 WARN_ON(!bh->b_assoc_map);
514 bh->b_assoc_map = NULL;
515 }
516
inode_has_buffers(struct inode * inode)517 int inode_has_buffers(struct inode *inode)
518 {
519 return !list_empty(&inode->i_data.i_private_list);
520 }
521
522 /*
523 * osync is designed to support O_SYNC io. It waits synchronously for
524 * all already-submitted IO to complete, but does not queue any new
525 * writes to the disk.
526 *
527 * To do O_SYNC writes, just queue the buffer writes with write_dirty_buffer
528 * as you dirty the buffers, and then use osync_inode_buffers to wait for
529 * completion. Any other dirty buffers which are not yet queued for
530 * write will not be flushed to disk by the osync.
531 */
osync_buffers_list(spinlock_t * lock,struct list_head * list)532 static int osync_buffers_list(spinlock_t *lock, struct list_head *list)
533 {
534 struct buffer_head *bh;
535 struct list_head *p;
536 int err = 0;
537
538 spin_lock(lock);
539 repeat:
540 list_for_each_prev(p, list) {
541 bh = BH_ENTRY(p);
542 if (buffer_locked(bh)) {
543 get_bh(bh);
544 spin_unlock(lock);
545 wait_on_buffer(bh);
546 if (!buffer_uptodate(bh))
547 err = -EIO;
548 brelse(bh);
549 spin_lock(lock);
550 goto repeat;
551 }
552 }
553 spin_unlock(lock);
554 return err;
555 }
556
557 /**
558 * sync_mapping_buffers - write out & wait upon a mapping's "associated" buffers
559 * @mapping: the mapping which wants those buffers written
560 *
561 * Starts I/O against the buffers at mapping->i_private_list, and waits upon
562 * that I/O.
563 *
564 * Basically, this is a convenience function for fsync().
565 * @mapping is a file or directory which needs those buffers to be written for
566 * a successful fsync().
567 */
sync_mapping_buffers(struct address_space * mapping)568 int sync_mapping_buffers(struct address_space *mapping)
569 {
570 struct address_space *buffer_mapping = mapping->i_private_data;
571
572 if (buffer_mapping == NULL || list_empty(&mapping->i_private_list))
573 return 0;
574
575 return fsync_buffers_list(&buffer_mapping->i_private_lock,
576 &mapping->i_private_list);
577 }
578 EXPORT_SYMBOL(sync_mapping_buffers);
579
580 /**
581 * generic_buffers_fsync_noflush - generic buffer fsync implementation
582 * for simple filesystems with no inode lock
583 *
584 * @file: file to synchronize
585 * @start: start offset in bytes
586 * @end: end offset in bytes (inclusive)
587 * @datasync: only synchronize essential metadata if true
588 *
589 * This is a generic implementation of the fsync method for simple
590 * filesystems which track all non-inode metadata in the buffers list
591 * hanging off the address_space structure.
592 */
generic_buffers_fsync_noflush(struct file * file,loff_t start,loff_t end,bool datasync)593 int generic_buffers_fsync_noflush(struct file *file, loff_t start, loff_t end,
594 bool datasync)
595 {
596 struct inode *inode = file->f_mapping->host;
597 int err;
598 int ret;
599
600 err = file_write_and_wait_range(file, start, end);
601 if (err)
602 return err;
603
604 ret = sync_mapping_buffers(inode->i_mapping);
605 if (!(inode->i_state & I_DIRTY_ALL))
606 goto out;
607 if (datasync && !(inode->i_state & I_DIRTY_DATASYNC))
608 goto out;
609
610 err = sync_inode_metadata(inode, 1);
611 if (ret == 0)
612 ret = err;
613
614 out:
615 /* check and advance again to catch errors after syncing out buffers */
616 err = file_check_and_advance_wb_err(file);
617 if (ret == 0)
618 ret = err;
619 return ret;
620 }
621 EXPORT_SYMBOL(generic_buffers_fsync_noflush);
622
623 /**
624 * generic_buffers_fsync - generic buffer fsync implementation
625 * for simple filesystems with no inode lock
626 *
627 * @file: file to synchronize
628 * @start: start offset in bytes
629 * @end: end offset in bytes (inclusive)
630 * @datasync: only synchronize essential metadata if true
631 *
632 * This is a generic implementation of the fsync method for simple
633 * filesystems which track all non-inode metadata in the buffers list
634 * hanging off the address_space structure. This also makes sure that
635 * a device cache flush operation is called at the end.
636 */
generic_buffers_fsync(struct file * file,loff_t start,loff_t end,bool datasync)637 int generic_buffers_fsync(struct file *file, loff_t start, loff_t end,
638 bool datasync)
639 {
640 struct inode *inode = file->f_mapping->host;
641 int ret;
642
643 ret = generic_buffers_fsync_noflush(file, start, end, datasync);
644 if (!ret)
645 ret = blkdev_issue_flush(inode->i_sb->s_bdev);
646 return ret;
647 }
648 EXPORT_SYMBOL(generic_buffers_fsync);
649
650 /*
651 * Called when we've recently written block `bblock', and it is known that
652 * `bblock' was for a buffer_boundary() buffer. This means that the block at
653 * `bblock + 1' is probably a dirty indirect block. Hunt it down and, if it's
654 * dirty, schedule it for IO. So that indirects merge nicely with their data.
655 */
write_boundary_block(struct block_device * bdev,sector_t bblock,unsigned blocksize)656 void write_boundary_block(struct block_device *bdev,
657 sector_t bblock, unsigned blocksize)
658 {
659 struct buffer_head *bh = __find_get_block(bdev, bblock + 1, blocksize);
660 if (bh) {
661 if (buffer_dirty(bh))
662 write_dirty_buffer(bh, 0);
663 put_bh(bh);
664 }
665 }
666
mark_buffer_dirty_inode(struct buffer_head * bh,struct inode * inode)667 void mark_buffer_dirty_inode(struct buffer_head *bh, struct inode *inode)
668 {
669 struct address_space *mapping = inode->i_mapping;
670 struct address_space *buffer_mapping = bh->b_folio->mapping;
671
672 mark_buffer_dirty(bh);
673 if (!mapping->i_private_data) {
674 mapping->i_private_data = buffer_mapping;
675 } else {
676 BUG_ON(mapping->i_private_data != buffer_mapping);
677 }
678 if (!bh->b_assoc_map) {
679 spin_lock(&buffer_mapping->i_private_lock);
680 list_move_tail(&bh->b_assoc_buffers,
681 &mapping->i_private_list);
682 bh->b_assoc_map = mapping;
683 spin_unlock(&buffer_mapping->i_private_lock);
684 }
685 }
686 EXPORT_SYMBOL(mark_buffer_dirty_inode);
687
688 /**
689 * block_dirty_folio - Mark a folio as dirty.
690 * @mapping: The address space containing this folio.
691 * @folio: The folio to mark dirty.
692 *
693 * Filesystems which use buffer_heads can use this function as their
694 * ->dirty_folio implementation. Some filesystems need to do a little
695 * work before calling this function. Filesystems which do not use
696 * buffer_heads should call filemap_dirty_folio() instead.
697 *
698 * If the folio has buffers, the uptodate buffers are set dirty, to
699 * preserve dirty-state coherency between the folio and the buffers.
700 * Buffers added to a dirty folio are created dirty.
701 *
702 * The buffers are dirtied before the folio is dirtied. There's a small
703 * race window in which writeback may see the folio cleanness but not the
704 * buffer dirtiness. That's fine. If this code were to set the folio
705 * dirty before the buffers, writeback could clear the folio dirty flag,
706 * see a bunch of clean buffers and we'd end up with dirty buffers/clean
707 * folio on the dirty folio list.
708 *
709 * We use i_private_lock to lock against try_to_free_buffers() while
710 * using the folio's buffer list. This also prevents clean buffers
711 * being added to the folio after it was set dirty.
712 *
713 * Context: May only be called from process context. Does not sleep.
714 * Caller must ensure that @folio cannot be truncated during this call,
715 * typically by holding the folio lock or having a page in the folio
716 * mapped and holding the page table lock.
717 *
718 * Return: True if the folio was dirtied; false if it was already dirtied.
719 */
block_dirty_folio(struct address_space * mapping,struct folio * folio)720 bool block_dirty_folio(struct address_space *mapping, struct folio *folio)
721 {
722 struct buffer_head *head;
723 bool newly_dirty;
724
725 spin_lock(&mapping->i_private_lock);
726 head = folio_buffers(folio);
727 if (head) {
728 struct buffer_head *bh = head;
729
730 do {
731 set_buffer_dirty(bh);
732 bh = bh->b_this_page;
733 } while (bh != head);
734 }
735 /*
736 * Lock out page's memcg migration to keep PageDirty
737 * synchronized with per-memcg dirty page counters.
738 */
739 folio_memcg_lock(folio);
740 newly_dirty = !folio_test_set_dirty(folio);
741 spin_unlock(&mapping->i_private_lock);
742
743 if (newly_dirty)
744 __folio_mark_dirty(folio, mapping, 1);
745
746 folio_memcg_unlock(folio);
747
748 if (newly_dirty)
749 __mark_inode_dirty(mapping->host, I_DIRTY_PAGES);
750
751 return newly_dirty;
752 }
753 EXPORT_SYMBOL(block_dirty_folio);
754
755 /*
756 * Write out and wait upon a list of buffers.
757 *
758 * We have conflicting pressures: we want to make sure that all
759 * initially dirty buffers get waited on, but that any subsequently
760 * dirtied buffers don't. After all, we don't want fsync to last
761 * forever if somebody is actively writing to the file.
762 *
763 * Do this in two main stages: first we copy dirty buffers to a
764 * temporary inode list, queueing the writes as we go. Then we clean
765 * up, waiting for those writes to complete.
766 *
767 * During this second stage, any subsequent updates to the file may end
768 * up refiling the buffer on the original inode's dirty list again, so
769 * there is a chance we will end up with a buffer queued for write but
770 * not yet completed on that list. So, as a final cleanup we go through
771 * the osync code to catch these locked, dirty buffers without requeuing
772 * any newly dirty buffers for write.
773 */
fsync_buffers_list(spinlock_t * lock,struct list_head * list)774 static int fsync_buffers_list(spinlock_t *lock, struct list_head *list)
775 {
776 struct buffer_head *bh;
777 struct list_head tmp;
778 struct address_space *mapping;
779 int err = 0, err2;
780 struct blk_plug plug;
781
782 INIT_LIST_HEAD(&tmp);
783 blk_start_plug(&plug);
784
785 spin_lock(lock);
786 while (!list_empty(list)) {
787 bh = BH_ENTRY(list->next);
788 mapping = bh->b_assoc_map;
789 __remove_assoc_queue(bh);
790 /* Avoid race with mark_buffer_dirty_inode() which does
791 * a lockless check and we rely on seeing the dirty bit */
792 smp_mb();
793 if (buffer_dirty(bh) || buffer_locked(bh)) {
794 list_add(&bh->b_assoc_buffers, &tmp);
795 bh->b_assoc_map = mapping;
796 if (buffer_dirty(bh)) {
797 get_bh(bh);
798 spin_unlock(lock);
799 /*
800 * Ensure any pending I/O completes so that
801 * write_dirty_buffer() actually writes the
802 * current contents - it is a noop if I/O is
803 * still in flight on potentially older
804 * contents.
805 */
806 write_dirty_buffer(bh, REQ_SYNC);
807
808 /*
809 * Kick off IO for the previous mapping. Note
810 * that we will not run the very last mapping,
811 * wait_on_buffer() will do that for us
812 * through sync_buffer().
813 */
814 brelse(bh);
815 spin_lock(lock);
816 }
817 }
818 }
819
820 spin_unlock(lock);
821 blk_finish_plug(&plug);
822 spin_lock(lock);
823
824 while (!list_empty(&tmp)) {
825 bh = BH_ENTRY(tmp.prev);
826 get_bh(bh);
827 mapping = bh->b_assoc_map;
828 __remove_assoc_queue(bh);
829 /* Avoid race with mark_buffer_dirty_inode() which does
830 * a lockless check and we rely on seeing the dirty bit */
831 smp_mb();
832 if (buffer_dirty(bh)) {
833 list_add(&bh->b_assoc_buffers,
834 &mapping->i_private_list);
835 bh->b_assoc_map = mapping;
836 }
837 spin_unlock(lock);
838 wait_on_buffer(bh);
839 if (!buffer_uptodate(bh))
840 err = -EIO;
841 brelse(bh);
842 spin_lock(lock);
843 }
844
845 spin_unlock(lock);
846 err2 = osync_buffers_list(lock, list);
847 if (err)
848 return err;
849 else
850 return err2;
851 }
852
853 /*
854 * Invalidate any and all dirty buffers on a given inode. We are
855 * probably unmounting the fs, but that doesn't mean we have already
856 * done a sync(). Just drop the buffers from the inode list.
857 *
858 * NOTE: we take the inode's blockdev's mapping's i_private_lock. Which
859 * assumes that all the buffers are against the blockdev. Not true
860 * for reiserfs.
861 */
invalidate_inode_buffers(struct inode * inode)862 void invalidate_inode_buffers(struct inode *inode)
863 {
864 if (inode_has_buffers(inode)) {
865 struct address_space *mapping = &inode->i_data;
866 struct list_head *list = &mapping->i_private_list;
867 struct address_space *buffer_mapping = mapping->i_private_data;
868
869 spin_lock(&buffer_mapping->i_private_lock);
870 while (!list_empty(list))
871 __remove_assoc_queue(BH_ENTRY(list->next));
872 spin_unlock(&buffer_mapping->i_private_lock);
873 }
874 }
875 EXPORT_SYMBOL(invalidate_inode_buffers);
876
877 /*
878 * Remove any clean buffers from the inode's buffer list. This is called
879 * when we're trying to free the inode itself. Those buffers can pin it.
880 *
881 * Returns true if all buffers were removed.
882 */
remove_inode_buffers(struct inode * inode)883 int remove_inode_buffers(struct inode *inode)
884 {
885 int ret = 1;
886
887 if (inode_has_buffers(inode)) {
888 struct address_space *mapping = &inode->i_data;
889 struct list_head *list = &mapping->i_private_list;
890 struct address_space *buffer_mapping = mapping->i_private_data;
891
892 spin_lock(&buffer_mapping->i_private_lock);
893 while (!list_empty(list)) {
894 struct buffer_head *bh = BH_ENTRY(list->next);
895 if (buffer_dirty(bh)) {
896 ret = 0;
897 break;
898 }
899 __remove_assoc_queue(bh);
900 }
901 spin_unlock(&buffer_mapping->i_private_lock);
902 }
903 return ret;
904 }
905
906 /*
907 * Create the appropriate buffers when given a folio for data area and
908 * the size of each buffer.. Use the bh->b_this_page linked list to
909 * follow the buffers created. Return NULL if unable to create more
910 * buffers.
911 *
912 * The retry flag is used to differentiate async IO (paging, swapping)
913 * which may not fail from ordinary buffer allocations.
914 */
folio_alloc_buffers(struct folio * folio,unsigned long size,gfp_t gfp)915 struct buffer_head *folio_alloc_buffers(struct folio *folio, unsigned long size,
916 gfp_t gfp)
917 {
918 struct buffer_head *bh, *head;
919 long offset;
920 struct mem_cgroup *memcg, *old_memcg;
921
922 /* The folio lock pins the memcg */
923 memcg = folio_memcg(folio);
924 old_memcg = set_active_memcg(memcg);
925
926 head = NULL;
927 offset = folio_size(folio);
928 while ((offset -= size) >= 0) {
929 bh = alloc_buffer_head(gfp);
930 if (!bh)
931 goto no_grow;
932
933 bh->b_this_page = head;
934 bh->b_blocknr = -1;
935 head = bh;
936
937 bh->b_size = size;
938
939 /* Link the buffer to its folio */
940 folio_set_bh(bh, folio, offset);
941 }
942 out:
943 set_active_memcg(old_memcg);
944 return head;
945 /*
946 * In case anything failed, we just free everything we got.
947 */
948 no_grow:
949 if (head) {
950 do {
951 bh = head;
952 head = head->b_this_page;
953 free_buffer_head(bh);
954 } while (head);
955 }
956
957 goto out;
958 }
959 EXPORT_SYMBOL_GPL(folio_alloc_buffers);
960
alloc_page_buffers(struct page * page,unsigned long size,bool retry)961 struct buffer_head *alloc_page_buffers(struct page *page, unsigned long size,
962 bool retry)
963 {
964 gfp_t gfp = GFP_NOFS | __GFP_ACCOUNT;
965 if (retry)
966 gfp |= __GFP_NOFAIL;
967
968 return folio_alloc_buffers(page_folio(page), size, gfp);
969 }
970 EXPORT_SYMBOL_GPL(alloc_page_buffers);
971
link_dev_buffers(struct folio * folio,struct buffer_head * head)972 static inline void link_dev_buffers(struct folio *folio,
973 struct buffer_head *head)
974 {
975 struct buffer_head *bh, *tail;
976
977 bh = head;
978 do {
979 tail = bh;
980 bh = bh->b_this_page;
981 } while (bh);
982 tail->b_this_page = head;
983 folio_attach_private(folio, head);
984 }
985
blkdev_max_block(struct block_device * bdev,unsigned int size)986 static sector_t blkdev_max_block(struct block_device *bdev, unsigned int size)
987 {
988 sector_t retval = ~((sector_t)0);
989 loff_t sz = bdev_nr_bytes(bdev);
990
991 if (sz) {
992 unsigned int sizebits = blksize_bits(size);
993 retval = (sz >> sizebits);
994 }
995 return retval;
996 }
997
998 /*
999 * Initialise the state of a blockdev folio's buffers.
1000 */
folio_init_buffers(struct folio * folio,struct block_device * bdev,unsigned size)1001 static sector_t folio_init_buffers(struct folio *folio,
1002 struct block_device *bdev, unsigned size)
1003 {
1004 struct buffer_head *head = folio_buffers(folio);
1005 struct buffer_head *bh = head;
1006 bool uptodate = folio_test_uptodate(folio);
1007 sector_t block = div_u64(folio_pos(folio), size);
1008 sector_t end_block = blkdev_max_block(bdev, size);
1009
1010 do {
1011 if (!buffer_mapped(bh)) {
1012 bh->b_end_io = NULL;
1013 bh->b_private = NULL;
1014 bh->b_bdev = bdev;
1015 bh->b_blocknr = block;
1016 if (uptodate)
1017 set_buffer_uptodate(bh);
1018 if (block < end_block)
1019 set_buffer_mapped(bh);
1020 }
1021 block++;
1022 bh = bh->b_this_page;
1023 } while (bh != head);
1024
1025 /*
1026 * Caller needs to validate requested block against end of device.
1027 */
1028 return end_block;
1029 }
1030
1031 /*
1032 * Create the page-cache folio that contains the requested block.
1033 *
1034 * This is used purely for blockdev mappings.
1035 *
1036 * Returns false if we have a failure which cannot be cured by retrying
1037 * without sleeping. Returns true if we succeeded, or the caller should retry.
1038 */
grow_dev_folio(struct block_device * bdev,sector_t block,pgoff_t index,unsigned size,gfp_t gfp)1039 static bool grow_dev_folio(struct block_device *bdev, sector_t block,
1040 pgoff_t index, unsigned size, gfp_t gfp)
1041 {
1042 struct address_space *mapping = bdev->bd_mapping;
1043 struct folio *folio;
1044 struct buffer_head *bh;
1045 sector_t end_block = 0;
1046
1047 folio = __filemap_get_folio(mapping, index,
1048 FGP_LOCK | FGP_ACCESSED | FGP_CREAT, gfp);
1049 if (IS_ERR(folio))
1050 return false;
1051
1052 bh = folio_buffers(folio);
1053 if (bh) {
1054 if (bh->b_size == size) {
1055 end_block = folio_init_buffers(folio, bdev, size);
1056 goto unlock;
1057 }
1058
1059 /*
1060 * Retrying may succeed; for example the folio may finish
1061 * writeback, or buffers may be cleaned. This should not
1062 * happen very often; maybe we have old buffers attached to
1063 * this blockdev's page cache and we're trying to change
1064 * the block size?
1065 */
1066 if (!try_to_free_buffers(folio)) {
1067 end_block = ~0ULL;
1068 goto unlock;
1069 }
1070 }
1071
1072 bh = folio_alloc_buffers(folio, size, gfp | __GFP_ACCOUNT);
1073 if (!bh)
1074 goto unlock;
1075
1076 /*
1077 * Link the folio to the buffers and initialise them. Take the
1078 * lock to be atomic wrt __find_get_block(), which does not
1079 * run under the folio lock.
1080 */
1081 spin_lock(&mapping->i_private_lock);
1082 link_dev_buffers(folio, bh);
1083 end_block = folio_init_buffers(folio, bdev, size);
1084 spin_unlock(&mapping->i_private_lock);
1085 unlock:
1086 folio_unlock(folio);
1087 folio_put(folio);
1088 return block < end_block;
1089 }
1090
1091 /*
1092 * Create buffers for the specified block device block's folio. If
1093 * that folio was dirty, the buffers are set dirty also. Returns false
1094 * if we've hit a permanent error.
1095 */
grow_buffers(struct block_device * bdev,sector_t block,unsigned size,gfp_t gfp)1096 static bool grow_buffers(struct block_device *bdev, sector_t block,
1097 unsigned size, gfp_t gfp)
1098 {
1099 loff_t pos;
1100
1101 /*
1102 * Check for a block which lies outside our maximum possible
1103 * pagecache index.
1104 */
1105 if (check_mul_overflow(block, (sector_t)size, &pos) || pos > MAX_LFS_FILESIZE) {
1106 printk(KERN_ERR "%s: requested out-of-range block %llu for device %pg\n",
1107 __func__, (unsigned long long)block,
1108 bdev);
1109 return false;
1110 }
1111
1112 /* Create a folio with the proper size buffers */
1113 return grow_dev_folio(bdev, block, pos / PAGE_SIZE, size, gfp);
1114 }
1115
1116 static struct buffer_head *
__getblk_slow(struct block_device * bdev,sector_t block,unsigned size,gfp_t gfp)1117 __getblk_slow(struct block_device *bdev, sector_t block,
1118 unsigned size, gfp_t gfp)
1119 {
1120 /* Size must be multiple of hard sectorsize */
1121 if (unlikely(size & (bdev_logical_block_size(bdev)-1) ||
1122 (size < 512 || size > PAGE_SIZE))) {
1123 printk(KERN_ERR "getblk(): invalid block size %d requested\n",
1124 size);
1125 printk(KERN_ERR "logical block size: %d\n",
1126 bdev_logical_block_size(bdev));
1127
1128 dump_stack();
1129 return NULL;
1130 }
1131
1132 for (;;) {
1133 struct buffer_head *bh;
1134
1135 bh = __find_get_block(bdev, block, size);
1136 if (bh)
1137 return bh;
1138
1139 if (!grow_buffers(bdev, block, size, gfp))
1140 return NULL;
1141 }
1142 }
1143
1144 /*
1145 * The relationship between dirty buffers and dirty pages:
1146 *
1147 * Whenever a page has any dirty buffers, the page's dirty bit is set, and
1148 * the page is tagged dirty in the page cache.
1149 *
1150 * At all times, the dirtiness of the buffers represents the dirtiness of
1151 * subsections of the page. If the page has buffers, the page dirty bit is
1152 * merely a hint about the true dirty state.
1153 *
1154 * When a page is set dirty in its entirety, all its buffers are marked dirty
1155 * (if the page has buffers).
1156 *
1157 * When a buffer is marked dirty, its page is dirtied, but the page's other
1158 * buffers are not.
1159 *
1160 * Also. When blockdev buffers are explicitly read with bread(), they
1161 * individually become uptodate. But their backing page remains not
1162 * uptodate - even if all of its buffers are uptodate. A subsequent
1163 * block_read_full_folio() against that folio will discover all the uptodate
1164 * buffers, will set the folio uptodate and will perform no I/O.
1165 */
1166
1167 /**
1168 * mark_buffer_dirty - mark a buffer_head as needing writeout
1169 * @bh: the buffer_head to mark dirty
1170 *
1171 * mark_buffer_dirty() will set the dirty bit against the buffer, then set
1172 * its backing page dirty, then tag the page as dirty in the page cache
1173 * and then attach the address_space's inode to its superblock's dirty
1174 * inode list.
1175 *
1176 * mark_buffer_dirty() is atomic. It takes bh->b_folio->mapping->i_private_lock,
1177 * i_pages lock and mapping->host->i_lock.
1178 */
mark_buffer_dirty(struct buffer_head * bh)1179 void mark_buffer_dirty(struct buffer_head *bh)
1180 {
1181 WARN_ON_ONCE(!buffer_uptodate(bh));
1182
1183 trace_block_dirty_buffer(bh);
1184
1185 /*
1186 * Very *carefully* optimize the it-is-already-dirty case.
1187 *
1188 * Don't let the final "is it dirty" escape to before we
1189 * perhaps modified the buffer.
1190 */
1191 if (buffer_dirty(bh)) {
1192 smp_mb();
1193 if (buffer_dirty(bh))
1194 return;
1195 }
1196
1197 if (!test_set_buffer_dirty(bh)) {
1198 struct folio *folio = bh->b_folio;
1199 struct address_space *mapping = NULL;
1200
1201 folio_memcg_lock(folio);
1202 if (!folio_test_set_dirty(folio)) {
1203 mapping = folio->mapping;
1204 if (mapping)
1205 __folio_mark_dirty(folio, mapping, 0);
1206 }
1207 folio_memcg_unlock(folio);
1208 if (mapping)
1209 __mark_inode_dirty(mapping->host, I_DIRTY_PAGES);
1210 }
1211 }
1212 EXPORT_SYMBOL(mark_buffer_dirty);
1213
mark_buffer_write_io_error(struct buffer_head * bh)1214 void mark_buffer_write_io_error(struct buffer_head *bh)
1215 {
1216 set_buffer_write_io_error(bh);
1217 /* FIXME: do we need to set this in both places? */
1218 if (bh->b_folio && bh->b_folio->mapping)
1219 mapping_set_error(bh->b_folio->mapping, -EIO);
1220 if (bh->b_assoc_map) {
1221 mapping_set_error(bh->b_assoc_map, -EIO);
1222 errseq_set(&bh->b_assoc_map->host->i_sb->s_wb_err, -EIO);
1223 }
1224 }
1225 EXPORT_SYMBOL(mark_buffer_write_io_error);
1226
1227 /**
1228 * __brelse - Release a buffer.
1229 * @bh: The buffer to release.
1230 *
1231 * This variant of brelse() can be called if @bh is guaranteed to not be NULL.
1232 */
__brelse(struct buffer_head * bh)1233 void __brelse(struct buffer_head *bh)
1234 {
1235 if (atomic_read(&bh->b_count)) {
1236 put_bh(bh);
1237 return;
1238 }
1239 WARN(1, KERN_ERR "VFS: brelse: Trying to free free buffer\n");
1240 }
1241 EXPORT_SYMBOL(__brelse);
1242
1243 /**
1244 * __bforget - Discard any dirty data in a buffer.
1245 * @bh: The buffer to forget.
1246 *
1247 * This variant of bforget() can be called if @bh is guaranteed to not
1248 * be NULL.
1249 */
__bforget(struct buffer_head * bh)1250 void __bforget(struct buffer_head *bh)
1251 {
1252 clear_buffer_dirty(bh);
1253 if (bh->b_assoc_map) {
1254 struct address_space *buffer_mapping = bh->b_folio->mapping;
1255
1256 spin_lock(&buffer_mapping->i_private_lock);
1257 list_del_init(&bh->b_assoc_buffers);
1258 bh->b_assoc_map = NULL;
1259 spin_unlock(&buffer_mapping->i_private_lock);
1260 }
1261 __brelse(bh);
1262 }
1263 EXPORT_SYMBOL(__bforget);
1264
__bread_slow(struct buffer_head * bh)1265 static struct buffer_head *__bread_slow(struct buffer_head *bh)
1266 {
1267 lock_buffer(bh);
1268 if (buffer_uptodate(bh)) {
1269 unlock_buffer(bh);
1270 return bh;
1271 } else {
1272 get_bh(bh);
1273 bh->b_end_io = end_buffer_read_sync;
1274 submit_bh(REQ_OP_READ, bh);
1275 wait_on_buffer(bh);
1276 if (buffer_uptodate(bh))
1277 return bh;
1278 }
1279 brelse(bh);
1280 return NULL;
1281 }
1282
1283 /*
1284 * Per-cpu buffer LRU implementation. To reduce the cost of __find_get_block().
1285 * The bhs[] array is sorted - newest buffer is at bhs[0]. Buffers have their
1286 * refcount elevated by one when they're in an LRU. A buffer can only appear
1287 * once in a particular CPU's LRU. A single buffer can be present in multiple
1288 * CPU's LRUs at the same time.
1289 *
1290 * This is a transparent caching front-end to sb_bread(), sb_getblk() and
1291 * sb_find_get_block().
1292 *
1293 * The LRUs themselves only need locking against invalidate_bh_lrus. We use
1294 * a local interrupt disable for that.
1295 */
1296
1297 #define BH_LRU_SIZE 16
1298
1299 struct bh_lru {
1300 struct buffer_head *bhs[BH_LRU_SIZE];
1301 };
1302
1303 static DEFINE_PER_CPU(struct bh_lru, bh_lrus) = {{ NULL }};
1304
1305 #ifdef CONFIG_SMP
1306 #define bh_lru_lock() local_irq_disable()
1307 #define bh_lru_unlock() local_irq_enable()
1308 #else
1309 #define bh_lru_lock() preempt_disable()
1310 #define bh_lru_unlock() preempt_enable()
1311 #endif
1312
check_irqs_on(void)1313 static inline void check_irqs_on(void)
1314 {
1315 #ifdef irqs_disabled
1316 BUG_ON(irqs_disabled());
1317 #endif
1318 }
1319
1320 /*
1321 * Install a buffer_head into this cpu's LRU. If not already in the LRU, it is
1322 * inserted at the front, and the buffer_head at the back if any is evicted.
1323 * Or, if already in the LRU it is moved to the front.
1324 */
bh_lru_install(struct buffer_head * bh)1325 static void bh_lru_install(struct buffer_head *bh)
1326 {
1327 struct buffer_head *evictee = bh;
1328 struct bh_lru *b;
1329 int i;
1330
1331 check_irqs_on();
1332 bh_lru_lock();
1333
1334 /*
1335 * the refcount of buffer_head in bh_lru prevents dropping the
1336 * attached page(i.e., try_to_free_buffers) so it could cause
1337 * failing page migration.
1338 * Skip putting upcoming bh into bh_lru until migration is done.
1339 */
1340 if (lru_cache_disabled() || cpu_is_isolated(smp_processor_id())) {
1341 bh_lru_unlock();
1342 return;
1343 }
1344
1345 b = this_cpu_ptr(&bh_lrus);
1346 for (i = 0; i < BH_LRU_SIZE; i++) {
1347 swap(evictee, b->bhs[i]);
1348 if (evictee == bh) {
1349 bh_lru_unlock();
1350 return;
1351 }
1352 }
1353
1354 get_bh(bh);
1355 bh_lru_unlock();
1356 brelse(evictee);
1357 }
1358
1359 /*
1360 * Look up the bh in this cpu's LRU. If it's there, move it to the head.
1361 */
1362 static struct buffer_head *
lookup_bh_lru(struct block_device * bdev,sector_t block,unsigned size)1363 lookup_bh_lru(struct block_device *bdev, sector_t block, unsigned size)
1364 {
1365 struct buffer_head *ret = NULL;
1366 unsigned int i;
1367
1368 check_irqs_on();
1369 bh_lru_lock();
1370 if (cpu_is_isolated(smp_processor_id())) {
1371 bh_lru_unlock();
1372 return NULL;
1373 }
1374 for (i = 0; i < BH_LRU_SIZE; i++) {
1375 struct buffer_head *bh = __this_cpu_read(bh_lrus.bhs[i]);
1376
1377 if (bh && bh->b_blocknr == block && bh->b_bdev == bdev &&
1378 bh->b_size == size) {
1379 if (i) {
1380 while (i) {
1381 __this_cpu_write(bh_lrus.bhs[i],
1382 __this_cpu_read(bh_lrus.bhs[i - 1]));
1383 i--;
1384 }
1385 __this_cpu_write(bh_lrus.bhs[0], bh);
1386 }
1387 get_bh(bh);
1388 ret = bh;
1389 break;
1390 }
1391 }
1392 bh_lru_unlock();
1393 return ret;
1394 }
1395
1396 /*
1397 * Perform a pagecache lookup for the matching buffer. If it's there, refresh
1398 * it in the LRU and mark it as accessed. If it is not present then return
1399 * NULL
1400 */
1401 struct buffer_head *
__find_get_block(struct block_device * bdev,sector_t block,unsigned size)1402 __find_get_block(struct block_device *bdev, sector_t block, unsigned size)
1403 {
1404 struct buffer_head *bh = lookup_bh_lru(bdev, block, size);
1405
1406 if (bh == NULL) {
1407 /* __find_get_block_slow will mark the page accessed */
1408 bh = __find_get_block_slow(bdev, block);
1409 if (bh)
1410 bh_lru_install(bh);
1411 } else
1412 touch_buffer(bh);
1413
1414 return bh;
1415 }
1416 EXPORT_SYMBOL(__find_get_block);
1417
1418 /**
1419 * bdev_getblk - Get a buffer_head in a block device's buffer cache.
1420 * @bdev: The block device.
1421 * @block: The block number.
1422 * @size: The size of buffer_heads for this @bdev.
1423 * @gfp: The memory allocation flags to use.
1424 *
1425 * The returned buffer head has its reference count incremented, but is
1426 * not locked. The caller should call brelse() when it has finished
1427 * with the buffer. The buffer may not be uptodate. If needed, the
1428 * caller can bring it uptodate either by reading it or overwriting it.
1429 *
1430 * Return: The buffer head, or NULL if memory could not be allocated.
1431 */
bdev_getblk(struct block_device * bdev,sector_t block,unsigned size,gfp_t gfp)1432 struct buffer_head *bdev_getblk(struct block_device *bdev, sector_t block,
1433 unsigned size, gfp_t gfp)
1434 {
1435 struct buffer_head *bh = __find_get_block(bdev, block, size);
1436
1437 might_alloc(gfp);
1438 if (bh)
1439 return bh;
1440
1441 return __getblk_slow(bdev, block, size, gfp);
1442 }
1443 EXPORT_SYMBOL(bdev_getblk);
1444
1445 /*
1446 * Do async read-ahead on a buffer..
1447 */
__breadahead(struct block_device * bdev,sector_t block,unsigned size)1448 void __breadahead(struct block_device *bdev, sector_t block, unsigned size)
1449 {
1450 struct buffer_head *bh = bdev_getblk(bdev, block, size,
1451 GFP_NOWAIT | __GFP_MOVABLE);
1452
1453 if (likely(bh)) {
1454 bh_readahead(bh, REQ_RAHEAD);
1455 brelse(bh);
1456 }
1457 }
1458 EXPORT_SYMBOL(__breadahead);
1459
1460 /**
1461 * __bread_gfp() - Read a block.
1462 * @bdev: The block device to read from.
1463 * @block: Block number in units of block size.
1464 * @size: The block size of this device in bytes.
1465 * @gfp: Not page allocation flags; see below.
1466 *
1467 * You are not expected to call this function. You should use one of
1468 * sb_bread(), sb_bread_unmovable() or __bread().
1469 *
1470 * Read a specified block, and return the buffer head that refers to it.
1471 * If @gfp is 0, the memory will be allocated using the block device's
1472 * default GFP flags. If @gfp is __GFP_MOVABLE, the memory may be
1473 * allocated from a movable area. Do not pass in a complete set of
1474 * GFP flags.
1475 *
1476 * The returned buffer head has its refcount increased. The caller should
1477 * call brelse() when it has finished with the buffer.
1478 *
1479 * Context: May sleep waiting for I/O.
1480 * Return: NULL if the block was unreadable.
1481 */
__bread_gfp(struct block_device * bdev,sector_t block,unsigned size,gfp_t gfp)1482 struct buffer_head *__bread_gfp(struct block_device *bdev, sector_t block,
1483 unsigned size, gfp_t gfp)
1484 {
1485 struct buffer_head *bh;
1486
1487 gfp |= mapping_gfp_constraint(bdev->bd_mapping, ~__GFP_FS);
1488
1489 /*
1490 * Prefer looping in the allocator rather than here, at least that
1491 * code knows what it's doing.
1492 */
1493 gfp |= __GFP_NOFAIL;
1494
1495 bh = bdev_getblk(bdev, block, size, gfp);
1496
1497 if (likely(bh) && !buffer_uptodate(bh))
1498 bh = __bread_slow(bh);
1499 return bh;
1500 }
1501 EXPORT_SYMBOL(__bread_gfp);
1502
__invalidate_bh_lrus(struct bh_lru * b)1503 static void __invalidate_bh_lrus(struct bh_lru *b)
1504 {
1505 int i;
1506
1507 for (i = 0; i < BH_LRU_SIZE; i++) {
1508 brelse(b->bhs[i]);
1509 b->bhs[i] = NULL;
1510 }
1511 }
1512 /*
1513 * invalidate_bh_lrus() is called rarely - but not only at unmount.
1514 * This doesn't race because it runs in each cpu either in irq
1515 * or with preempt disabled.
1516 */
invalidate_bh_lru(void * arg)1517 static void invalidate_bh_lru(void *arg)
1518 {
1519 struct bh_lru *b = &get_cpu_var(bh_lrus);
1520
1521 __invalidate_bh_lrus(b);
1522 put_cpu_var(bh_lrus);
1523 }
1524
has_bh_in_lru(int cpu,void * dummy)1525 bool has_bh_in_lru(int cpu, void *dummy)
1526 {
1527 struct bh_lru *b = per_cpu_ptr(&bh_lrus, cpu);
1528 int i;
1529
1530 for (i = 0; i < BH_LRU_SIZE; i++) {
1531 if (b->bhs[i])
1532 return true;
1533 }
1534
1535 return false;
1536 }
1537
invalidate_bh_lrus(void)1538 void invalidate_bh_lrus(void)
1539 {
1540 on_each_cpu_cond(has_bh_in_lru, invalidate_bh_lru, NULL, 1);
1541 }
1542 EXPORT_SYMBOL_GPL(invalidate_bh_lrus);
1543
1544 /*
1545 * It's called from workqueue context so we need a bh_lru_lock to close
1546 * the race with preemption/irq.
1547 */
invalidate_bh_lrus_cpu(void)1548 void invalidate_bh_lrus_cpu(void)
1549 {
1550 struct bh_lru *b;
1551
1552 bh_lru_lock();
1553 b = this_cpu_ptr(&bh_lrus);
1554 __invalidate_bh_lrus(b);
1555 bh_lru_unlock();
1556 }
1557
folio_set_bh(struct buffer_head * bh,struct folio * folio,unsigned long offset)1558 void folio_set_bh(struct buffer_head *bh, struct folio *folio,
1559 unsigned long offset)
1560 {
1561 bh->b_folio = folio;
1562 BUG_ON(offset >= folio_size(folio));
1563 if (folio_test_highmem(folio))
1564 /*
1565 * This catches illegal uses and preserves the offset:
1566 */
1567 bh->b_data = (char *)(0 + offset);
1568 else
1569 bh->b_data = folio_address(folio) + offset;
1570 }
1571 EXPORT_SYMBOL(folio_set_bh);
1572
1573 /*
1574 * Called when truncating a buffer on a page completely.
1575 */
1576
1577 /* Bits that are cleared during an invalidate */
1578 #define BUFFER_FLAGS_DISCARD \
1579 (1 << BH_Mapped | 1 << BH_New | 1 << BH_Req | \
1580 1 << BH_Delay | 1 << BH_Unwritten)
1581
discard_buffer(struct buffer_head * bh)1582 static void discard_buffer(struct buffer_head * bh)
1583 {
1584 unsigned long b_state;
1585
1586 lock_buffer(bh);
1587 clear_buffer_dirty(bh);
1588 bh->b_bdev = NULL;
1589 b_state = READ_ONCE(bh->b_state);
1590 do {
1591 } while (!try_cmpxchg(&bh->b_state, &b_state,
1592 b_state & ~BUFFER_FLAGS_DISCARD));
1593 unlock_buffer(bh);
1594 }
1595
1596 /**
1597 * block_invalidate_folio - Invalidate part or all of a buffer-backed folio.
1598 * @folio: The folio which is affected.
1599 * @offset: start of the range to invalidate
1600 * @length: length of the range to invalidate
1601 *
1602 * block_invalidate_folio() is called when all or part of the folio has been
1603 * invalidated by a truncate operation.
1604 *
1605 * block_invalidate_folio() does not have to release all buffers, but it must
1606 * ensure that no dirty buffer is left outside @offset and that no I/O
1607 * is underway against any of the blocks which are outside the truncation
1608 * point. Because the caller is about to free (and possibly reuse) those
1609 * blocks on-disk.
1610 */
block_invalidate_folio(struct folio * folio,size_t offset,size_t length)1611 void block_invalidate_folio(struct folio *folio, size_t offset, size_t length)
1612 {
1613 struct buffer_head *head, *bh, *next;
1614 size_t curr_off = 0;
1615 size_t stop = length + offset;
1616
1617 BUG_ON(!folio_test_locked(folio));
1618
1619 /*
1620 * Check for overflow
1621 */
1622 BUG_ON(stop > folio_size(folio) || stop < length);
1623
1624 head = folio_buffers(folio);
1625 if (!head)
1626 return;
1627
1628 bh = head;
1629 do {
1630 size_t next_off = curr_off + bh->b_size;
1631 next = bh->b_this_page;
1632
1633 /*
1634 * Are we still fully in range ?
1635 */
1636 if (next_off > stop)
1637 goto out;
1638
1639 /*
1640 * is this block fully invalidated?
1641 */
1642 if (offset <= curr_off)
1643 discard_buffer(bh);
1644 curr_off = next_off;
1645 bh = next;
1646 } while (bh != head);
1647
1648 /*
1649 * We release buffers only if the entire folio is being invalidated.
1650 * The get_block cached value has been unconditionally invalidated,
1651 * so real IO is not possible anymore.
1652 */
1653 if (length == folio_size(folio))
1654 filemap_release_folio(folio, 0);
1655 out:
1656 return;
1657 }
1658 EXPORT_SYMBOL(block_invalidate_folio);
1659
1660 /*
1661 * We attach and possibly dirty the buffers atomically wrt
1662 * block_dirty_folio() via i_private_lock. try_to_free_buffers
1663 * is already excluded via the folio lock.
1664 */
create_empty_buffers(struct folio * folio,unsigned long blocksize,unsigned long b_state)1665 struct buffer_head *create_empty_buffers(struct folio *folio,
1666 unsigned long blocksize, unsigned long b_state)
1667 {
1668 struct buffer_head *bh, *head, *tail;
1669 gfp_t gfp = GFP_NOFS | __GFP_ACCOUNT | __GFP_NOFAIL;
1670
1671 head = folio_alloc_buffers(folio, blocksize, gfp);
1672 bh = head;
1673 do {
1674 bh->b_state |= b_state;
1675 tail = bh;
1676 bh = bh->b_this_page;
1677 } while (bh);
1678 tail->b_this_page = head;
1679
1680 spin_lock(&folio->mapping->i_private_lock);
1681 if (folio_test_uptodate(folio) || folio_test_dirty(folio)) {
1682 bh = head;
1683 do {
1684 if (folio_test_dirty(folio))
1685 set_buffer_dirty(bh);
1686 if (folio_test_uptodate(folio))
1687 set_buffer_uptodate(bh);
1688 bh = bh->b_this_page;
1689 } while (bh != head);
1690 }
1691 folio_attach_private(folio, head);
1692 spin_unlock(&folio->mapping->i_private_lock);
1693
1694 return head;
1695 }
1696 EXPORT_SYMBOL(create_empty_buffers);
1697
1698 /**
1699 * clean_bdev_aliases: clean a range of buffers in block device
1700 * @bdev: Block device to clean buffers in
1701 * @block: Start of a range of blocks to clean
1702 * @len: Number of blocks to clean
1703 *
1704 * We are taking a range of blocks for data and we don't want writeback of any
1705 * buffer-cache aliases starting from return from this function and until the
1706 * moment when something will explicitly mark the buffer dirty (hopefully that
1707 * will not happen until we will free that block ;-) We don't even need to mark
1708 * it not-uptodate - nobody can expect anything from a newly allocated buffer
1709 * anyway. We used to use unmap_buffer() for such invalidation, but that was
1710 * wrong. We definitely don't want to mark the alias unmapped, for example - it
1711 * would confuse anyone who might pick it with bread() afterwards...
1712 *
1713 * Also.. Note that bforget() doesn't lock the buffer. So there can be
1714 * writeout I/O going on against recently-freed buffers. We don't wait on that
1715 * I/O in bforget() - it's more efficient to wait on the I/O only if we really
1716 * need to. That happens here.
1717 */
clean_bdev_aliases(struct block_device * bdev,sector_t block,sector_t len)1718 void clean_bdev_aliases(struct block_device *bdev, sector_t block, sector_t len)
1719 {
1720 struct address_space *bd_mapping = bdev->bd_mapping;
1721 const int blkbits = bd_mapping->host->i_blkbits;
1722 struct folio_batch fbatch;
1723 pgoff_t index = ((loff_t)block << blkbits) / PAGE_SIZE;
1724 pgoff_t end;
1725 int i, count;
1726 struct buffer_head *bh;
1727 struct buffer_head *head;
1728
1729 end = ((loff_t)(block + len - 1) << blkbits) / PAGE_SIZE;
1730 folio_batch_init(&fbatch);
1731 while (filemap_get_folios(bd_mapping, &index, end, &fbatch)) {
1732 count = folio_batch_count(&fbatch);
1733 for (i = 0; i < count; i++) {
1734 struct folio *folio = fbatch.folios[i];
1735
1736 if (!folio_buffers(folio))
1737 continue;
1738 /*
1739 * We use folio lock instead of bd_mapping->i_private_lock
1740 * to pin buffers here since we can afford to sleep and
1741 * it scales better than a global spinlock lock.
1742 */
1743 folio_lock(folio);
1744 /* Recheck when the folio is locked which pins bhs */
1745 head = folio_buffers(folio);
1746 if (!head)
1747 goto unlock_page;
1748 bh = head;
1749 do {
1750 if (!buffer_mapped(bh) || (bh->b_blocknr < block))
1751 goto next;
1752 if (bh->b_blocknr >= block + len)
1753 break;
1754 clear_buffer_dirty(bh);
1755 wait_on_buffer(bh);
1756 clear_buffer_req(bh);
1757 next:
1758 bh = bh->b_this_page;
1759 } while (bh != head);
1760 unlock_page:
1761 folio_unlock(folio);
1762 }
1763 folio_batch_release(&fbatch);
1764 cond_resched();
1765 /* End of range already reached? */
1766 if (index > end || !index)
1767 break;
1768 }
1769 }
1770 EXPORT_SYMBOL(clean_bdev_aliases);
1771
folio_create_buffers(struct folio * folio,struct inode * inode,unsigned int b_state)1772 static struct buffer_head *folio_create_buffers(struct folio *folio,
1773 struct inode *inode,
1774 unsigned int b_state)
1775 {
1776 struct buffer_head *bh;
1777
1778 BUG_ON(!folio_test_locked(folio));
1779
1780 bh = folio_buffers(folio);
1781 if (!bh)
1782 bh = create_empty_buffers(folio,
1783 1 << READ_ONCE(inode->i_blkbits), b_state);
1784 return bh;
1785 }
1786
1787 /*
1788 * NOTE! All mapped/uptodate combinations are valid:
1789 *
1790 * Mapped Uptodate Meaning
1791 *
1792 * No No "unknown" - must do get_block()
1793 * No Yes "hole" - zero-filled
1794 * Yes No "allocated" - allocated on disk, not read in
1795 * Yes Yes "valid" - allocated and up-to-date in memory.
1796 *
1797 * "Dirty" is valid only with the last case (mapped+uptodate).
1798 */
1799
1800 /*
1801 * While block_write_full_folio is writing back the dirty buffers under
1802 * the page lock, whoever dirtied the buffers may decide to clean them
1803 * again at any time. We handle that by only looking at the buffer
1804 * state inside lock_buffer().
1805 *
1806 * If block_write_full_folio() is called for regular writeback
1807 * (wbc->sync_mode == WB_SYNC_NONE) then it will redirty a page which has a
1808 * locked buffer. This only can happen if someone has written the buffer
1809 * directly, with submit_bh(). At the address_space level PageWriteback
1810 * prevents this contention from occurring.
1811 *
1812 * If block_write_full_folio() is called with wbc->sync_mode ==
1813 * WB_SYNC_ALL, the writes are posted using REQ_SYNC; this
1814 * causes the writes to be flagged as synchronous writes.
1815 */
__block_write_full_folio(struct inode * inode,struct folio * folio,get_block_t * get_block,struct writeback_control * wbc)1816 int __block_write_full_folio(struct inode *inode, struct folio *folio,
1817 get_block_t *get_block, struct writeback_control *wbc)
1818 {
1819 int err;
1820 sector_t block;
1821 sector_t last_block;
1822 struct buffer_head *bh, *head;
1823 size_t blocksize;
1824 int nr_underway = 0;
1825 blk_opf_t write_flags = wbc_to_write_flags(wbc);
1826
1827 head = folio_create_buffers(folio, inode,
1828 (1 << BH_Dirty) | (1 << BH_Uptodate));
1829
1830 /*
1831 * Be very careful. We have no exclusion from block_dirty_folio
1832 * here, and the (potentially unmapped) buffers may become dirty at
1833 * any time. If a buffer becomes dirty here after we've inspected it
1834 * then we just miss that fact, and the folio stays dirty.
1835 *
1836 * Buffers outside i_size may be dirtied by block_dirty_folio;
1837 * handle that here by just cleaning them.
1838 */
1839
1840 bh = head;
1841 blocksize = bh->b_size;
1842
1843 block = div_u64(folio_pos(folio), blocksize);
1844 last_block = div_u64(i_size_read(inode) - 1, blocksize);
1845
1846 /*
1847 * Get all the dirty buffers mapped to disk addresses and
1848 * handle any aliases from the underlying blockdev's mapping.
1849 */
1850 do {
1851 if (block > last_block) {
1852 /*
1853 * mapped buffers outside i_size will occur, because
1854 * this folio can be outside i_size when there is a
1855 * truncate in progress.
1856 */
1857 /*
1858 * The buffer was zeroed by block_write_full_folio()
1859 */
1860 clear_buffer_dirty(bh);
1861 set_buffer_uptodate(bh);
1862 } else if ((!buffer_mapped(bh) || buffer_delay(bh)) &&
1863 buffer_dirty(bh)) {
1864 WARN_ON(bh->b_size != blocksize);
1865 err = get_block(inode, block, bh, 1);
1866 if (err)
1867 goto recover;
1868 clear_buffer_delay(bh);
1869 if (buffer_new(bh)) {
1870 /* blockdev mappings never come here */
1871 clear_buffer_new(bh);
1872 clean_bdev_bh_alias(bh);
1873 }
1874 }
1875 bh = bh->b_this_page;
1876 block++;
1877 } while (bh != head);
1878
1879 do {
1880 if (!buffer_mapped(bh))
1881 continue;
1882 /*
1883 * If it's a fully non-blocking write attempt and we cannot
1884 * lock the buffer then redirty the folio. Note that this can
1885 * potentially cause a busy-wait loop from writeback threads
1886 * and kswapd activity, but those code paths have their own
1887 * higher-level throttling.
1888 */
1889 if (wbc->sync_mode != WB_SYNC_NONE) {
1890 lock_buffer(bh);
1891 } else if (!trylock_buffer(bh)) {
1892 folio_redirty_for_writepage(wbc, folio);
1893 continue;
1894 }
1895 if (test_clear_buffer_dirty(bh)) {
1896 mark_buffer_async_write_endio(bh,
1897 end_buffer_async_write);
1898 } else {
1899 unlock_buffer(bh);
1900 }
1901 } while ((bh = bh->b_this_page) != head);
1902
1903 /*
1904 * The folio and its buffers are protected by the writeback flag,
1905 * so we can drop the bh refcounts early.
1906 */
1907 BUG_ON(folio_test_writeback(folio));
1908 folio_start_writeback(folio);
1909
1910 do {
1911 struct buffer_head *next = bh->b_this_page;
1912 if (buffer_async_write(bh)) {
1913 submit_bh_wbc(REQ_OP_WRITE | write_flags, bh,
1914 inode->i_write_hint, wbc);
1915 nr_underway++;
1916 }
1917 bh = next;
1918 } while (bh != head);
1919 folio_unlock(folio);
1920
1921 err = 0;
1922 done:
1923 if (nr_underway == 0) {
1924 /*
1925 * The folio was marked dirty, but the buffers were
1926 * clean. Someone wrote them back by hand with
1927 * write_dirty_buffer/submit_bh. A rare case.
1928 */
1929 folio_end_writeback(folio);
1930
1931 /*
1932 * The folio and buffer_heads can be released at any time from
1933 * here on.
1934 */
1935 }
1936 return err;
1937
1938 recover:
1939 /*
1940 * ENOSPC, or some other error. We may already have added some
1941 * blocks to the file, so we need to write these out to avoid
1942 * exposing stale data.
1943 * The folio is currently locked and not marked for writeback
1944 */
1945 bh = head;
1946 /* Recovery: lock and submit the mapped buffers */
1947 do {
1948 if (buffer_mapped(bh) && buffer_dirty(bh) &&
1949 !buffer_delay(bh)) {
1950 lock_buffer(bh);
1951 mark_buffer_async_write_endio(bh,
1952 end_buffer_async_write);
1953 } else {
1954 /*
1955 * The buffer may have been set dirty during
1956 * attachment to a dirty folio.
1957 */
1958 clear_buffer_dirty(bh);
1959 }
1960 } while ((bh = bh->b_this_page) != head);
1961 BUG_ON(folio_test_writeback(folio));
1962 mapping_set_error(folio->mapping, err);
1963 folio_start_writeback(folio);
1964 do {
1965 struct buffer_head *next = bh->b_this_page;
1966 if (buffer_async_write(bh)) {
1967 clear_buffer_dirty(bh);
1968 submit_bh_wbc(REQ_OP_WRITE | write_flags, bh,
1969 inode->i_write_hint, wbc);
1970 nr_underway++;
1971 }
1972 bh = next;
1973 } while (bh != head);
1974 folio_unlock(folio);
1975 goto done;
1976 }
1977 EXPORT_SYMBOL(__block_write_full_folio);
1978
1979 /*
1980 * If a folio has any new buffers, zero them out here, and mark them uptodate
1981 * and dirty so they'll be written out (in order to prevent uninitialised
1982 * block data from leaking). And clear the new bit.
1983 */
folio_zero_new_buffers(struct folio * folio,size_t from,size_t to)1984 void folio_zero_new_buffers(struct folio *folio, size_t from, size_t to)
1985 {
1986 size_t block_start, block_end;
1987 struct buffer_head *head, *bh;
1988
1989 BUG_ON(!folio_test_locked(folio));
1990 head = folio_buffers(folio);
1991 if (!head)
1992 return;
1993
1994 bh = head;
1995 block_start = 0;
1996 do {
1997 block_end = block_start + bh->b_size;
1998
1999 if (buffer_new(bh)) {
2000 if (block_end > from && block_start < to) {
2001 if (!folio_test_uptodate(folio)) {
2002 size_t start, xend;
2003
2004 start = max(from, block_start);
2005 xend = min(to, block_end);
2006
2007 folio_zero_segment(folio, start, xend);
2008 set_buffer_uptodate(bh);
2009 }
2010
2011 clear_buffer_new(bh);
2012 mark_buffer_dirty(bh);
2013 }
2014 }
2015
2016 block_start = block_end;
2017 bh = bh->b_this_page;
2018 } while (bh != head);
2019 }
2020 EXPORT_SYMBOL(folio_zero_new_buffers);
2021
2022 static int
iomap_to_bh(struct inode * inode,sector_t block,struct buffer_head * bh,const struct iomap * iomap)2023 iomap_to_bh(struct inode *inode, sector_t block, struct buffer_head *bh,
2024 const struct iomap *iomap)
2025 {
2026 loff_t offset = (loff_t)block << inode->i_blkbits;
2027
2028 bh->b_bdev = iomap->bdev;
2029
2030 /*
2031 * Block points to offset in file we need to map, iomap contains
2032 * the offset at which the map starts. If the map ends before the
2033 * current block, then do not map the buffer and let the caller
2034 * handle it.
2035 */
2036 if (offset >= iomap->offset + iomap->length)
2037 return -EIO;
2038
2039 switch (iomap->type) {
2040 case IOMAP_HOLE:
2041 /*
2042 * If the buffer is not up to date or beyond the current EOF,
2043 * we need to mark it as new to ensure sub-block zeroing is
2044 * executed if necessary.
2045 */
2046 if (!buffer_uptodate(bh) ||
2047 (offset >= i_size_read(inode)))
2048 set_buffer_new(bh);
2049 return 0;
2050 case IOMAP_DELALLOC:
2051 if (!buffer_uptodate(bh) ||
2052 (offset >= i_size_read(inode)))
2053 set_buffer_new(bh);
2054 set_buffer_uptodate(bh);
2055 set_buffer_mapped(bh);
2056 set_buffer_delay(bh);
2057 return 0;
2058 case IOMAP_UNWRITTEN:
2059 /*
2060 * For unwritten regions, we always need to ensure that regions
2061 * in the block we are not writing to are zeroed. Mark the
2062 * buffer as new to ensure this.
2063 */
2064 set_buffer_new(bh);
2065 set_buffer_unwritten(bh);
2066 fallthrough;
2067 case IOMAP_MAPPED:
2068 if ((iomap->flags & IOMAP_F_NEW) ||
2069 offset >= i_size_read(inode)) {
2070 /*
2071 * This can happen if truncating the block device races
2072 * with the check in the caller as i_size updates on
2073 * block devices aren't synchronized by i_rwsem for
2074 * block devices.
2075 */
2076 if (S_ISBLK(inode->i_mode))
2077 return -EIO;
2078 set_buffer_new(bh);
2079 }
2080 bh->b_blocknr = (iomap->addr + offset - iomap->offset) >>
2081 inode->i_blkbits;
2082 set_buffer_mapped(bh);
2083 return 0;
2084 default:
2085 WARN_ON_ONCE(1);
2086 return -EIO;
2087 }
2088 }
2089
__block_write_begin_int(struct folio * folio,loff_t pos,unsigned len,get_block_t * get_block,const struct iomap * iomap)2090 int __block_write_begin_int(struct folio *folio, loff_t pos, unsigned len,
2091 get_block_t *get_block, const struct iomap *iomap)
2092 {
2093 size_t from = offset_in_folio(folio, pos);
2094 size_t to = from + len;
2095 struct inode *inode = folio->mapping->host;
2096 size_t block_start, block_end;
2097 sector_t block;
2098 int err = 0;
2099 size_t blocksize;
2100 struct buffer_head *bh, *head, *wait[2], **wait_bh=wait;
2101
2102 BUG_ON(!folio_test_locked(folio));
2103 BUG_ON(to > folio_size(folio));
2104 BUG_ON(from > to);
2105
2106 head = folio_create_buffers(folio, inode, 0);
2107 blocksize = head->b_size;
2108 block = div_u64(folio_pos(folio), blocksize);
2109
2110 for (bh = head, block_start = 0; bh != head || !block_start;
2111 block++, block_start=block_end, bh = bh->b_this_page) {
2112 block_end = block_start + blocksize;
2113 if (block_end <= from || block_start >= to) {
2114 if (folio_test_uptodate(folio)) {
2115 if (!buffer_uptodate(bh))
2116 set_buffer_uptodate(bh);
2117 }
2118 continue;
2119 }
2120 if (buffer_new(bh))
2121 clear_buffer_new(bh);
2122 if (!buffer_mapped(bh)) {
2123 WARN_ON(bh->b_size != blocksize);
2124 if (get_block)
2125 err = get_block(inode, block, bh, 1);
2126 else
2127 err = iomap_to_bh(inode, block, bh, iomap);
2128 if (err)
2129 break;
2130
2131 if (buffer_new(bh)) {
2132 clean_bdev_bh_alias(bh);
2133 if (folio_test_uptodate(folio)) {
2134 clear_buffer_new(bh);
2135 set_buffer_uptodate(bh);
2136 mark_buffer_dirty(bh);
2137 continue;
2138 }
2139 if (block_end > to || block_start < from)
2140 folio_zero_segments(folio,
2141 to, block_end,
2142 block_start, from);
2143 continue;
2144 }
2145 }
2146 if (folio_test_uptodate(folio)) {
2147 if (!buffer_uptodate(bh))
2148 set_buffer_uptodate(bh);
2149 continue;
2150 }
2151 if (!buffer_uptodate(bh) && !buffer_delay(bh) &&
2152 !buffer_unwritten(bh) &&
2153 (block_start < from || block_end > to)) {
2154 bh_read_nowait(bh, 0);
2155 *wait_bh++=bh;
2156 }
2157 }
2158 /*
2159 * If we issued read requests - let them complete.
2160 */
2161 while(wait_bh > wait) {
2162 wait_on_buffer(*--wait_bh);
2163 if (!buffer_uptodate(*wait_bh))
2164 err = -EIO;
2165 }
2166 if (unlikely(err))
2167 folio_zero_new_buffers(folio, from, to);
2168 return err;
2169 }
2170
__block_write_begin(struct page * page,loff_t pos,unsigned len,get_block_t * get_block)2171 int __block_write_begin(struct page *page, loff_t pos, unsigned len,
2172 get_block_t *get_block)
2173 {
2174 return __block_write_begin_int(page_folio(page), pos, len, get_block,
2175 NULL);
2176 }
2177 EXPORT_SYMBOL(__block_write_begin);
2178
__block_commit_write(struct folio * folio,size_t from,size_t to)2179 static void __block_commit_write(struct folio *folio, size_t from, size_t to)
2180 {
2181 size_t block_start, block_end;
2182 bool partial = false;
2183 unsigned blocksize;
2184 struct buffer_head *bh, *head;
2185
2186 bh = head = folio_buffers(folio);
2187 if (!bh)
2188 return;
2189 blocksize = bh->b_size;
2190
2191 block_start = 0;
2192 do {
2193 block_end = block_start + blocksize;
2194 if (block_end <= from || block_start >= to) {
2195 if (!buffer_uptodate(bh))
2196 partial = true;
2197 } else {
2198 set_buffer_uptodate(bh);
2199 mark_buffer_dirty(bh);
2200 }
2201 if (buffer_new(bh))
2202 clear_buffer_new(bh);
2203
2204 block_start = block_end;
2205 bh = bh->b_this_page;
2206 } while (bh != head);
2207
2208 /*
2209 * If this is a partial write which happened to make all buffers
2210 * uptodate then we can optimize away a bogus read_folio() for
2211 * the next read(). Here we 'discover' whether the folio went
2212 * uptodate as a result of this (potentially partial) write.
2213 */
2214 if (!partial)
2215 folio_mark_uptodate(folio);
2216 }
2217
2218 /*
2219 * block_write_begin takes care of the basic task of block allocation and
2220 * bringing partial write blocks uptodate first.
2221 *
2222 * The filesystem needs to handle block truncation upon failure.
2223 */
block_write_begin(struct address_space * mapping,loff_t pos,unsigned len,struct page ** pagep,get_block_t * get_block)2224 int block_write_begin(struct address_space *mapping, loff_t pos, unsigned len,
2225 struct page **pagep, get_block_t *get_block)
2226 {
2227 pgoff_t index = pos >> PAGE_SHIFT;
2228 struct page *page;
2229 int status;
2230
2231 page = grab_cache_page_write_begin(mapping, index);
2232 if (!page)
2233 return -ENOMEM;
2234
2235 status = __block_write_begin(page, pos, len, get_block);
2236 if (unlikely(status)) {
2237 unlock_page(page);
2238 put_page(page);
2239 page = NULL;
2240 }
2241
2242 *pagep = page;
2243 return status;
2244 }
2245 EXPORT_SYMBOL(block_write_begin);
2246
block_write_end(struct file * file,struct address_space * mapping,loff_t pos,unsigned len,unsigned copied,struct page * page,void * fsdata)2247 int block_write_end(struct file *file, struct address_space *mapping,
2248 loff_t pos, unsigned len, unsigned copied,
2249 struct page *page, void *fsdata)
2250 {
2251 struct folio *folio = page_folio(page);
2252 size_t start = pos - folio_pos(folio);
2253
2254 if (unlikely(copied < len)) {
2255 /*
2256 * The buffers that were written will now be uptodate, so
2257 * we don't have to worry about a read_folio reading them
2258 * and overwriting a partial write. However if we have
2259 * encountered a short write and only partially written
2260 * into a buffer, it will not be marked uptodate, so a
2261 * read_folio might come in and destroy our partial write.
2262 *
2263 * Do the simplest thing, and just treat any short write to a
2264 * non uptodate folio as a zero-length write, and force the
2265 * caller to redo the whole thing.
2266 */
2267 if (!folio_test_uptodate(folio))
2268 copied = 0;
2269
2270 folio_zero_new_buffers(folio, start+copied, start+len);
2271 }
2272 flush_dcache_folio(folio);
2273
2274 /* This could be a short (even 0-length) commit */
2275 __block_commit_write(folio, start, start + copied);
2276
2277 return copied;
2278 }
2279 EXPORT_SYMBOL(block_write_end);
2280
generic_write_end(struct file * file,struct address_space * mapping,loff_t pos,unsigned len,unsigned copied,struct page * page,void * fsdata)2281 int generic_write_end(struct file *file, struct address_space *mapping,
2282 loff_t pos, unsigned len, unsigned copied,
2283 struct page *page, void *fsdata)
2284 {
2285 struct inode *inode = mapping->host;
2286 loff_t old_size = inode->i_size;
2287 bool i_size_changed = false;
2288
2289 copied = block_write_end(file, mapping, pos, len, copied, page, fsdata);
2290
2291 /*
2292 * No need to use i_size_read() here, the i_size cannot change under us
2293 * because we hold i_rwsem.
2294 *
2295 * But it's important to update i_size while still holding page lock:
2296 * page writeout could otherwise come in and zero beyond i_size.
2297 */
2298 if (pos + copied > inode->i_size) {
2299 i_size_write(inode, pos + copied);
2300 i_size_changed = true;
2301 }
2302
2303 unlock_page(page);
2304 put_page(page);
2305
2306 if (old_size < pos)
2307 pagecache_isize_extended(inode, old_size, pos);
2308 /*
2309 * Don't mark the inode dirty under page lock. First, it unnecessarily
2310 * makes the holding time of page lock longer. Second, it forces lock
2311 * ordering of page lock and transaction start for journaling
2312 * filesystems.
2313 */
2314 if (i_size_changed)
2315 mark_inode_dirty(inode);
2316 return copied;
2317 }
2318 EXPORT_SYMBOL(generic_write_end);
2319
2320 /*
2321 * block_is_partially_uptodate checks whether buffers within a folio are
2322 * uptodate or not.
2323 *
2324 * Returns true if all buffers which correspond to the specified part
2325 * of the folio are uptodate.
2326 */
block_is_partially_uptodate(struct folio * folio,size_t from,size_t count)2327 bool block_is_partially_uptodate(struct folio *folio, size_t from, size_t count)
2328 {
2329 unsigned block_start, block_end, blocksize;
2330 unsigned to;
2331 struct buffer_head *bh, *head;
2332 bool ret = true;
2333
2334 head = folio_buffers(folio);
2335 if (!head)
2336 return false;
2337 blocksize = head->b_size;
2338 to = min_t(unsigned, folio_size(folio) - from, count);
2339 to = from + to;
2340 if (from < blocksize && to > folio_size(folio) - blocksize)
2341 return false;
2342
2343 bh = head;
2344 block_start = 0;
2345 do {
2346 block_end = block_start + blocksize;
2347 if (block_end > from && block_start < to) {
2348 if (!buffer_uptodate(bh)) {
2349 ret = false;
2350 break;
2351 }
2352 if (block_end >= to)
2353 break;
2354 }
2355 block_start = block_end;
2356 bh = bh->b_this_page;
2357 } while (bh != head);
2358
2359 return ret;
2360 }
2361 EXPORT_SYMBOL(block_is_partially_uptodate);
2362
2363 /*
2364 * Generic "read_folio" function for block devices that have the normal
2365 * get_block functionality. This is most of the block device filesystems.
2366 * Reads the folio asynchronously --- the unlock_buffer() and
2367 * set/clear_buffer_uptodate() functions propagate buffer state into the
2368 * folio once IO has completed.
2369 */
block_read_full_folio(struct folio * folio,get_block_t * get_block)2370 int block_read_full_folio(struct folio *folio, get_block_t *get_block)
2371 {
2372 struct inode *inode = folio->mapping->host;
2373 sector_t iblock, lblock;
2374 struct buffer_head *bh, *head, *arr[MAX_BUF_PER_PAGE];
2375 size_t blocksize;
2376 int nr, i;
2377 int fully_mapped = 1;
2378 bool page_error = false;
2379 loff_t limit = i_size_read(inode);
2380
2381 /* This is needed for ext4. */
2382 if (IS_ENABLED(CONFIG_FS_VERITY) && IS_VERITY(inode))
2383 limit = inode->i_sb->s_maxbytes;
2384
2385 VM_BUG_ON_FOLIO(folio_test_large(folio), folio);
2386
2387 head = folio_create_buffers(folio, inode, 0);
2388 blocksize = head->b_size;
2389
2390 iblock = div_u64(folio_pos(folio), blocksize);
2391 lblock = div_u64(limit + blocksize - 1, blocksize);
2392 bh = head;
2393 nr = 0;
2394 i = 0;
2395
2396 do {
2397 if (buffer_uptodate(bh))
2398 continue;
2399
2400 if (!buffer_mapped(bh)) {
2401 int err = 0;
2402
2403 fully_mapped = 0;
2404 if (iblock < lblock) {
2405 WARN_ON(bh->b_size != blocksize);
2406 err = get_block(inode, iblock, bh, 0);
2407 if (err)
2408 page_error = true;
2409 }
2410 if (!buffer_mapped(bh)) {
2411 folio_zero_range(folio, i * blocksize,
2412 blocksize);
2413 if (!err)
2414 set_buffer_uptodate(bh);
2415 continue;
2416 }
2417 /*
2418 * get_block() might have updated the buffer
2419 * synchronously
2420 */
2421 if (buffer_uptodate(bh))
2422 continue;
2423 }
2424 arr[nr++] = bh;
2425 } while (i++, iblock++, (bh = bh->b_this_page) != head);
2426
2427 if (fully_mapped)
2428 folio_set_mappedtodisk(folio);
2429
2430 if (!nr) {
2431 /*
2432 * All buffers are uptodate or get_block() returned an
2433 * error when trying to map them - we can finish the read.
2434 */
2435 folio_end_read(folio, !page_error);
2436 return 0;
2437 }
2438
2439 /* Stage two: lock the buffers */
2440 for (i = 0; i < nr; i++) {
2441 bh = arr[i];
2442 lock_buffer(bh);
2443 mark_buffer_async_read(bh);
2444 }
2445
2446 /*
2447 * Stage 3: start the IO. Check for uptodateness
2448 * inside the buffer lock in case another process reading
2449 * the underlying blockdev brought it uptodate (the sct fix).
2450 */
2451 for (i = 0; i < nr; i++) {
2452 bh = arr[i];
2453 if (buffer_uptodate(bh))
2454 end_buffer_async_read(bh, 1);
2455 else
2456 submit_bh(REQ_OP_READ, bh);
2457 }
2458 return 0;
2459 }
2460 EXPORT_SYMBOL(block_read_full_folio);
2461
2462 /* utility function for filesystems that need to do work on expanding
2463 * truncates. Uses filesystem pagecache writes to allow the filesystem to
2464 * deal with the hole.
2465 */
generic_cont_expand_simple(struct inode * inode,loff_t size)2466 int generic_cont_expand_simple(struct inode *inode, loff_t size)
2467 {
2468 struct address_space *mapping = inode->i_mapping;
2469 const struct address_space_operations *aops = mapping->a_ops;
2470 struct page *page;
2471 void *fsdata = NULL;
2472 int err;
2473
2474 err = inode_newsize_ok(inode, size);
2475 if (err)
2476 goto out;
2477
2478 err = aops->write_begin(NULL, mapping, size, 0, &page, &fsdata);
2479 if (err)
2480 goto out;
2481
2482 err = aops->write_end(NULL, mapping, size, 0, 0, page, fsdata);
2483 BUG_ON(err > 0);
2484
2485 out:
2486 return err;
2487 }
2488 EXPORT_SYMBOL(generic_cont_expand_simple);
2489
cont_expand_zero(struct file * file,struct address_space * mapping,loff_t pos,loff_t * bytes)2490 static int cont_expand_zero(struct file *file, struct address_space *mapping,
2491 loff_t pos, loff_t *bytes)
2492 {
2493 struct inode *inode = mapping->host;
2494 const struct address_space_operations *aops = mapping->a_ops;
2495 unsigned int blocksize = i_blocksize(inode);
2496 struct page *page;
2497 void *fsdata = NULL;
2498 pgoff_t index, curidx;
2499 loff_t curpos;
2500 unsigned zerofrom, offset, len;
2501 int err = 0;
2502
2503 index = pos >> PAGE_SHIFT;
2504 offset = pos & ~PAGE_MASK;
2505
2506 while (index > (curidx = (curpos = *bytes)>>PAGE_SHIFT)) {
2507 zerofrom = curpos & ~PAGE_MASK;
2508 if (zerofrom & (blocksize-1)) {
2509 *bytes |= (blocksize-1);
2510 (*bytes)++;
2511 }
2512 len = PAGE_SIZE - zerofrom;
2513
2514 err = aops->write_begin(file, mapping, curpos, len,
2515 &page, &fsdata);
2516 if (err)
2517 goto out;
2518 zero_user(page, zerofrom, len);
2519 err = aops->write_end(file, mapping, curpos, len, len,
2520 page, fsdata);
2521 if (err < 0)
2522 goto out;
2523 BUG_ON(err != len);
2524 err = 0;
2525
2526 balance_dirty_pages_ratelimited(mapping);
2527
2528 if (fatal_signal_pending(current)) {
2529 err = -EINTR;
2530 goto out;
2531 }
2532 }
2533
2534 /* page covers the boundary, find the boundary offset */
2535 if (index == curidx) {
2536 zerofrom = curpos & ~PAGE_MASK;
2537 /* if we will expand the thing last block will be filled */
2538 if (offset <= zerofrom) {
2539 goto out;
2540 }
2541 if (zerofrom & (blocksize-1)) {
2542 *bytes |= (blocksize-1);
2543 (*bytes)++;
2544 }
2545 len = offset - zerofrom;
2546
2547 err = aops->write_begin(file, mapping, curpos, len,
2548 &page, &fsdata);
2549 if (err)
2550 goto out;
2551 zero_user(page, zerofrom, len);
2552 err = aops->write_end(file, mapping, curpos, len, len,
2553 page, fsdata);
2554 if (err < 0)
2555 goto out;
2556 BUG_ON(err != len);
2557 err = 0;
2558 }
2559 out:
2560 return err;
2561 }
2562
2563 /*
2564 * For moronic filesystems that do not allow holes in file.
2565 * We may have to extend the file.
2566 */
cont_write_begin(struct file * file,struct address_space * mapping,loff_t pos,unsigned len,struct page ** pagep,void ** fsdata,get_block_t * get_block,loff_t * bytes)2567 int cont_write_begin(struct file *file, struct address_space *mapping,
2568 loff_t pos, unsigned len,
2569 struct page **pagep, void **fsdata,
2570 get_block_t *get_block, loff_t *bytes)
2571 {
2572 struct inode *inode = mapping->host;
2573 unsigned int blocksize = i_blocksize(inode);
2574 unsigned int zerofrom;
2575 int err;
2576
2577 err = cont_expand_zero(file, mapping, pos, bytes);
2578 if (err)
2579 return err;
2580
2581 zerofrom = *bytes & ~PAGE_MASK;
2582 if (pos+len > *bytes && zerofrom & (blocksize-1)) {
2583 *bytes |= (blocksize-1);
2584 (*bytes)++;
2585 }
2586
2587 return block_write_begin(mapping, pos, len, pagep, get_block);
2588 }
2589 EXPORT_SYMBOL(cont_write_begin);
2590
block_commit_write(struct page * page,unsigned from,unsigned to)2591 void block_commit_write(struct page *page, unsigned from, unsigned to)
2592 {
2593 struct folio *folio = page_folio(page);
2594 __block_commit_write(folio, from, to);
2595 }
2596 EXPORT_SYMBOL(block_commit_write);
2597
2598 /*
2599 * block_page_mkwrite() is not allowed to change the file size as it gets
2600 * called from a page fault handler when a page is first dirtied. Hence we must
2601 * be careful to check for EOF conditions here. We set the page up correctly
2602 * for a written page which means we get ENOSPC checking when writing into
2603 * holes and correct delalloc and unwritten extent mapping on filesystems that
2604 * support these features.
2605 *
2606 * We are not allowed to take the i_mutex here so we have to play games to
2607 * protect against truncate races as the page could now be beyond EOF. Because
2608 * truncate writes the inode size before removing pages, once we have the
2609 * page lock we can determine safely if the page is beyond EOF. If it is not
2610 * beyond EOF, then the page is guaranteed safe against truncation until we
2611 * unlock the page.
2612 *
2613 * Direct callers of this function should protect against filesystem freezing
2614 * using sb_start_pagefault() - sb_end_pagefault() functions.
2615 */
block_page_mkwrite(struct vm_area_struct * vma,struct vm_fault * vmf,get_block_t get_block)2616 int block_page_mkwrite(struct vm_area_struct *vma, struct vm_fault *vmf,
2617 get_block_t get_block)
2618 {
2619 struct folio *folio = page_folio(vmf->page);
2620 struct inode *inode = file_inode(vma->vm_file);
2621 unsigned long end;
2622 loff_t size;
2623 int ret;
2624
2625 folio_lock(folio);
2626 size = i_size_read(inode);
2627 if ((folio->mapping != inode->i_mapping) ||
2628 (folio_pos(folio) >= size)) {
2629 /* We overload EFAULT to mean page got truncated */
2630 ret = -EFAULT;
2631 goto out_unlock;
2632 }
2633
2634 end = folio_size(folio);
2635 /* folio is wholly or partially inside EOF */
2636 if (folio_pos(folio) + end > size)
2637 end = size - folio_pos(folio);
2638
2639 ret = __block_write_begin_int(folio, 0, end, get_block, NULL);
2640 if (unlikely(ret))
2641 goto out_unlock;
2642
2643 __block_commit_write(folio, 0, end);
2644
2645 folio_mark_dirty(folio);
2646 folio_wait_stable(folio);
2647 return 0;
2648 out_unlock:
2649 folio_unlock(folio);
2650 return ret;
2651 }
2652 EXPORT_SYMBOL(block_page_mkwrite);
2653
block_truncate_page(struct address_space * mapping,loff_t from,get_block_t * get_block)2654 int block_truncate_page(struct address_space *mapping,
2655 loff_t from, get_block_t *get_block)
2656 {
2657 pgoff_t index = from >> PAGE_SHIFT;
2658 unsigned blocksize;
2659 sector_t iblock;
2660 size_t offset, length, pos;
2661 struct inode *inode = mapping->host;
2662 struct folio *folio;
2663 struct buffer_head *bh;
2664 int err = 0;
2665
2666 blocksize = i_blocksize(inode);
2667 length = from & (blocksize - 1);
2668
2669 /* Block boundary? Nothing to do */
2670 if (!length)
2671 return 0;
2672
2673 length = blocksize - length;
2674 iblock = ((loff_t)index * PAGE_SIZE) >> inode->i_blkbits;
2675
2676 folio = filemap_grab_folio(mapping, index);
2677 if (IS_ERR(folio))
2678 return PTR_ERR(folio);
2679
2680 bh = folio_buffers(folio);
2681 if (!bh)
2682 bh = create_empty_buffers(folio, blocksize, 0);
2683
2684 /* Find the buffer that contains "offset" */
2685 offset = offset_in_folio(folio, from);
2686 pos = blocksize;
2687 while (offset >= pos) {
2688 bh = bh->b_this_page;
2689 iblock++;
2690 pos += blocksize;
2691 }
2692
2693 if (!buffer_mapped(bh)) {
2694 WARN_ON(bh->b_size != blocksize);
2695 err = get_block(inode, iblock, bh, 0);
2696 if (err)
2697 goto unlock;
2698 /* unmapped? It's a hole - nothing to do */
2699 if (!buffer_mapped(bh))
2700 goto unlock;
2701 }
2702
2703 /* Ok, it's mapped. Make sure it's up-to-date */
2704 if (folio_test_uptodate(folio))
2705 set_buffer_uptodate(bh);
2706
2707 if (!buffer_uptodate(bh) && !buffer_delay(bh) && !buffer_unwritten(bh)) {
2708 err = bh_read(bh, 0);
2709 /* Uhhuh. Read error. Complain and punt. */
2710 if (err < 0)
2711 goto unlock;
2712 }
2713
2714 folio_zero_range(folio, offset, length);
2715 mark_buffer_dirty(bh);
2716
2717 unlock:
2718 folio_unlock(folio);
2719 folio_put(folio);
2720
2721 return err;
2722 }
2723 EXPORT_SYMBOL(block_truncate_page);
2724
2725 /*
2726 * The generic ->writepage function for buffer-backed address_spaces
2727 */
block_write_full_folio(struct folio * folio,struct writeback_control * wbc,void * get_block)2728 int block_write_full_folio(struct folio *folio, struct writeback_control *wbc,
2729 void *get_block)
2730 {
2731 struct inode * const inode = folio->mapping->host;
2732 loff_t i_size = i_size_read(inode);
2733
2734 /* Is the folio fully inside i_size? */
2735 if (folio_pos(folio) + folio_size(folio) <= i_size)
2736 return __block_write_full_folio(inode, folio, get_block, wbc);
2737
2738 /* Is the folio fully outside i_size? (truncate in progress) */
2739 if (folio_pos(folio) >= i_size) {
2740 folio_unlock(folio);
2741 return 0; /* don't care */
2742 }
2743
2744 /*
2745 * The folio straddles i_size. It must be zeroed out on each and every
2746 * writepage invocation because it may be mmapped. "A file is mapped
2747 * in multiples of the page size. For a file that is not a multiple of
2748 * the page size, the remaining memory is zeroed when mapped, and
2749 * writes to that region are not written out to the file."
2750 */
2751 folio_zero_segment(folio, offset_in_folio(folio, i_size),
2752 folio_size(folio));
2753 return __block_write_full_folio(inode, folio, get_block, wbc);
2754 }
2755
generic_block_bmap(struct address_space * mapping,sector_t block,get_block_t * get_block)2756 sector_t generic_block_bmap(struct address_space *mapping, sector_t block,
2757 get_block_t *get_block)
2758 {
2759 struct inode *inode = mapping->host;
2760 struct buffer_head tmp = {
2761 .b_size = i_blocksize(inode),
2762 };
2763
2764 get_block(inode, block, &tmp, 0);
2765 return tmp.b_blocknr;
2766 }
2767 EXPORT_SYMBOL(generic_block_bmap);
2768
end_bio_bh_io_sync(struct bio * bio)2769 static void end_bio_bh_io_sync(struct bio *bio)
2770 {
2771 struct buffer_head *bh = bio->bi_private;
2772
2773 if (unlikely(bio_flagged(bio, BIO_QUIET)))
2774 set_bit(BH_Quiet, &bh->b_state);
2775
2776 bh->b_end_io(bh, !bio->bi_status);
2777 bio_put(bio);
2778 }
2779
submit_bh_wbc(blk_opf_t opf,struct buffer_head * bh,enum rw_hint write_hint,struct writeback_control * wbc)2780 static void submit_bh_wbc(blk_opf_t opf, struct buffer_head *bh,
2781 enum rw_hint write_hint,
2782 struct writeback_control *wbc)
2783 {
2784 const enum req_op op = opf & REQ_OP_MASK;
2785 struct bio *bio;
2786
2787 BUG_ON(!buffer_locked(bh));
2788 BUG_ON(!buffer_mapped(bh));
2789 BUG_ON(!bh->b_end_io);
2790 BUG_ON(buffer_delay(bh));
2791 BUG_ON(buffer_unwritten(bh));
2792
2793 /*
2794 * Only clear out a write error when rewriting
2795 */
2796 if (test_set_buffer_req(bh) && (op == REQ_OP_WRITE))
2797 clear_buffer_write_io_error(bh);
2798
2799 if (buffer_meta(bh))
2800 opf |= REQ_META;
2801 if (buffer_prio(bh))
2802 opf |= REQ_PRIO;
2803
2804 bio = bio_alloc(bh->b_bdev, 1, opf, GFP_NOIO);
2805
2806 fscrypt_set_bio_crypt_ctx_bh(bio, bh, GFP_NOIO);
2807
2808 bio->bi_iter.bi_sector = bh->b_blocknr * (bh->b_size >> 9);
2809 bio->bi_write_hint = write_hint;
2810
2811 __bio_add_page(bio, bh->b_page, bh->b_size, bh_offset(bh));
2812
2813 bio->bi_end_io = end_bio_bh_io_sync;
2814 bio->bi_private = bh;
2815
2816 /* Take care of bh's that straddle the end of the device */
2817 guard_bio_eod(bio);
2818
2819 if (wbc) {
2820 wbc_init_bio(wbc, bio);
2821 wbc_account_cgroup_owner(wbc, bh->b_page, bh->b_size);
2822 }
2823
2824 submit_bio(bio);
2825 }
2826
submit_bh(blk_opf_t opf,struct buffer_head * bh)2827 void submit_bh(blk_opf_t opf, struct buffer_head *bh)
2828 {
2829 submit_bh_wbc(opf, bh, WRITE_LIFE_NOT_SET, NULL);
2830 }
2831 EXPORT_SYMBOL(submit_bh);
2832
write_dirty_buffer(struct buffer_head * bh,blk_opf_t op_flags)2833 void write_dirty_buffer(struct buffer_head *bh, blk_opf_t op_flags)
2834 {
2835 lock_buffer(bh);
2836 if (!test_clear_buffer_dirty(bh)) {
2837 unlock_buffer(bh);
2838 return;
2839 }
2840 bh->b_end_io = end_buffer_write_sync;
2841 get_bh(bh);
2842 submit_bh(REQ_OP_WRITE | op_flags, bh);
2843 }
2844 EXPORT_SYMBOL(write_dirty_buffer);
2845
2846 /*
2847 * For a data-integrity writeout, we need to wait upon any in-progress I/O
2848 * and then start new I/O and then wait upon it. The caller must have a ref on
2849 * the buffer_head.
2850 */
__sync_dirty_buffer(struct buffer_head * bh,blk_opf_t op_flags)2851 int __sync_dirty_buffer(struct buffer_head *bh, blk_opf_t op_flags)
2852 {
2853 WARN_ON(atomic_read(&bh->b_count) < 1);
2854 lock_buffer(bh);
2855 if (test_clear_buffer_dirty(bh)) {
2856 /*
2857 * The bh should be mapped, but it might not be if the
2858 * device was hot-removed. Not much we can do but fail the I/O.
2859 */
2860 if (!buffer_mapped(bh)) {
2861 unlock_buffer(bh);
2862 return -EIO;
2863 }
2864
2865 get_bh(bh);
2866 bh->b_end_io = end_buffer_write_sync;
2867 submit_bh(REQ_OP_WRITE | op_flags, bh);
2868 wait_on_buffer(bh);
2869 if (!buffer_uptodate(bh))
2870 return -EIO;
2871 } else {
2872 unlock_buffer(bh);
2873 }
2874 return 0;
2875 }
2876 EXPORT_SYMBOL(__sync_dirty_buffer);
2877
sync_dirty_buffer(struct buffer_head * bh)2878 int sync_dirty_buffer(struct buffer_head *bh)
2879 {
2880 return __sync_dirty_buffer(bh, REQ_SYNC);
2881 }
2882 EXPORT_SYMBOL(sync_dirty_buffer);
2883
buffer_busy(struct buffer_head * bh)2884 static inline int buffer_busy(struct buffer_head *bh)
2885 {
2886 return atomic_read(&bh->b_count) |
2887 (bh->b_state & ((1 << BH_Dirty) | (1 << BH_Lock)));
2888 }
2889
2890 static bool
drop_buffers(struct folio * folio,struct buffer_head ** buffers_to_free)2891 drop_buffers(struct folio *folio, struct buffer_head **buffers_to_free)
2892 {
2893 struct buffer_head *head = folio_buffers(folio);
2894 struct buffer_head *bh;
2895
2896 bh = head;
2897 do {
2898 if (buffer_busy(bh))
2899 goto failed;
2900 bh = bh->b_this_page;
2901 } while (bh != head);
2902
2903 do {
2904 struct buffer_head *next = bh->b_this_page;
2905
2906 if (bh->b_assoc_map)
2907 __remove_assoc_queue(bh);
2908 bh = next;
2909 } while (bh != head);
2910 *buffers_to_free = head;
2911 folio_detach_private(folio);
2912 return true;
2913 failed:
2914 return false;
2915 }
2916
2917 /**
2918 * try_to_free_buffers - Release buffers attached to this folio.
2919 * @folio: The folio.
2920 *
2921 * If any buffers are in use (dirty, under writeback, elevated refcount),
2922 * no buffers will be freed.
2923 *
2924 * If the folio is dirty but all the buffers are clean then we need to
2925 * be sure to mark the folio clean as well. This is because the folio
2926 * may be against a block device, and a later reattachment of buffers
2927 * to a dirty folio will set *all* buffers dirty. Which would corrupt
2928 * filesystem data on the same device.
2929 *
2930 * The same applies to regular filesystem folios: if all the buffers are
2931 * clean then we set the folio clean and proceed. To do that, we require
2932 * total exclusion from block_dirty_folio(). That is obtained with
2933 * i_private_lock.
2934 *
2935 * Exclusion against try_to_free_buffers may be obtained by either
2936 * locking the folio or by holding its mapping's i_private_lock.
2937 *
2938 * Context: Process context. @folio must be locked. Will not sleep.
2939 * Return: true if all buffers attached to this folio were freed.
2940 */
try_to_free_buffers(struct folio * folio)2941 bool try_to_free_buffers(struct folio *folio)
2942 {
2943 struct address_space * const mapping = folio->mapping;
2944 struct buffer_head *buffers_to_free = NULL;
2945 bool ret = 0;
2946
2947 BUG_ON(!folio_test_locked(folio));
2948 if (folio_test_writeback(folio))
2949 return false;
2950
2951 if (mapping == NULL) { /* can this still happen? */
2952 ret = drop_buffers(folio, &buffers_to_free);
2953 goto out;
2954 }
2955
2956 spin_lock(&mapping->i_private_lock);
2957 ret = drop_buffers(folio, &buffers_to_free);
2958
2959 /*
2960 * If the filesystem writes its buffers by hand (eg ext3)
2961 * then we can have clean buffers against a dirty folio. We
2962 * clean the folio here; otherwise the VM will never notice
2963 * that the filesystem did any IO at all.
2964 *
2965 * Also, during truncate, discard_buffer will have marked all
2966 * the folio's buffers clean. We discover that here and clean
2967 * the folio also.
2968 *
2969 * i_private_lock must be held over this entire operation in order
2970 * to synchronise against block_dirty_folio and prevent the
2971 * dirty bit from being lost.
2972 */
2973 if (ret)
2974 folio_cancel_dirty(folio);
2975 spin_unlock(&mapping->i_private_lock);
2976 out:
2977 if (buffers_to_free) {
2978 struct buffer_head *bh = buffers_to_free;
2979
2980 do {
2981 struct buffer_head *next = bh->b_this_page;
2982 free_buffer_head(bh);
2983 bh = next;
2984 } while (bh != buffers_to_free);
2985 }
2986 return ret;
2987 }
2988 EXPORT_SYMBOL(try_to_free_buffers);
2989
2990 /*
2991 * Buffer-head allocation
2992 */
2993 static struct kmem_cache *bh_cachep __ro_after_init;
2994
2995 /*
2996 * Once the number of bh's in the machine exceeds this level, we start
2997 * stripping them in writeback.
2998 */
2999 static unsigned long max_buffer_heads __ro_after_init;
3000
3001 int buffer_heads_over_limit;
3002
3003 struct bh_accounting {
3004 int nr; /* Number of live bh's */
3005 int ratelimit; /* Limit cacheline bouncing */
3006 };
3007
3008 static DEFINE_PER_CPU(struct bh_accounting, bh_accounting) = {0, 0};
3009
recalc_bh_state(void)3010 static void recalc_bh_state(void)
3011 {
3012 int i;
3013 int tot = 0;
3014
3015 if (__this_cpu_inc_return(bh_accounting.ratelimit) - 1 < 4096)
3016 return;
3017 __this_cpu_write(bh_accounting.ratelimit, 0);
3018 for_each_online_cpu(i)
3019 tot += per_cpu(bh_accounting, i).nr;
3020 buffer_heads_over_limit = (tot > max_buffer_heads);
3021 }
3022
alloc_buffer_head(gfp_t gfp_flags)3023 struct buffer_head *alloc_buffer_head(gfp_t gfp_flags)
3024 {
3025 struct buffer_head *ret = kmem_cache_zalloc(bh_cachep, gfp_flags);
3026 if (ret) {
3027 INIT_LIST_HEAD(&ret->b_assoc_buffers);
3028 spin_lock_init(&ret->b_uptodate_lock);
3029 preempt_disable();
3030 __this_cpu_inc(bh_accounting.nr);
3031 recalc_bh_state();
3032 preempt_enable();
3033 }
3034 return ret;
3035 }
3036 EXPORT_SYMBOL(alloc_buffer_head);
3037
free_buffer_head(struct buffer_head * bh)3038 void free_buffer_head(struct buffer_head *bh)
3039 {
3040 BUG_ON(!list_empty(&bh->b_assoc_buffers));
3041 kmem_cache_free(bh_cachep, bh);
3042 preempt_disable();
3043 __this_cpu_dec(bh_accounting.nr);
3044 recalc_bh_state();
3045 preempt_enable();
3046 }
3047 EXPORT_SYMBOL(free_buffer_head);
3048
buffer_exit_cpu_dead(unsigned int cpu)3049 static int buffer_exit_cpu_dead(unsigned int cpu)
3050 {
3051 int i;
3052 struct bh_lru *b = &per_cpu(bh_lrus, cpu);
3053
3054 for (i = 0; i < BH_LRU_SIZE; i++) {
3055 brelse(b->bhs[i]);
3056 b->bhs[i] = NULL;
3057 }
3058 this_cpu_add(bh_accounting.nr, per_cpu(bh_accounting, cpu).nr);
3059 per_cpu(bh_accounting, cpu).nr = 0;
3060 return 0;
3061 }
3062
3063 /**
3064 * bh_uptodate_or_lock - Test whether the buffer is uptodate
3065 * @bh: struct buffer_head
3066 *
3067 * Return true if the buffer is up-to-date and false,
3068 * with the buffer locked, if not.
3069 */
bh_uptodate_or_lock(struct buffer_head * bh)3070 int bh_uptodate_or_lock(struct buffer_head *bh)
3071 {
3072 if (!buffer_uptodate(bh)) {
3073 lock_buffer(bh);
3074 if (!buffer_uptodate(bh))
3075 return 0;
3076 unlock_buffer(bh);
3077 }
3078 return 1;
3079 }
3080 EXPORT_SYMBOL(bh_uptodate_or_lock);
3081
3082 /**
3083 * __bh_read - Submit read for a locked buffer
3084 * @bh: struct buffer_head
3085 * @op_flags: appending REQ_OP_* flags besides REQ_OP_READ
3086 * @wait: wait until reading finish
3087 *
3088 * Returns zero on success or don't wait, and -EIO on error.
3089 */
__bh_read(struct buffer_head * bh,blk_opf_t op_flags,bool wait)3090 int __bh_read(struct buffer_head *bh, blk_opf_t op_flags, bool wait)
3091 {
3092 int ret = 0;
3093
3094 BUG_ON(!buffer_locked(bh));
3095
3096 get_bh(bh);
3097 bh->b_end_io = end_buffer_read_sync;
3098 submit_bh(REQ_OP_READ | op_flags, bh);
3099 if (wait) {
3100 wait_on_buffer(bh);
3101 if (!buffer_uptodate(bh))
3102 ret = -EIO;
3103 }
3104 return ret;
3105 }
3106 EXPORT_SYMBOL(__bh_read);
3107
3108 /**
3109 * __bh_read_batch - Submit read for a batch of unlocked buffers
3110 * @nr: entry number of the buffer batch
3111 * @bhs: a batch of struct buffer_head
3112 * @op_flags: appending REQ_OP_* flags besides REQ_OP_READ
3113 * @force_lock: force to get a lock on the buffer if set, otherwise drops any
3114 * buffer that cannot lock.
3115 *
3116 * Returns zero on success or don't wait, and -EIO on error.
3117 */
__bh_read_batch(int nr,struct buffer_head * bhs[],blk_opf_t op_flags,bool force_lock)3118 void __bh_read_batch(int nr, struct buffer_head *bhs[],
3119 blk_opf_t op_flags, bool force_lock)
3120 {
3121 int i;
3122
3123 for (i = 0; i < nr; i++) {
3124 struct buffer_head *bh = bhs[i];
3125
3126 if (buffer_uptodate(bh))
3127 continue;
3128
3129 if (force_lock)
3130 lock_buffer(bh);
3131 else
3132 if (!trylock_buffer(bh))
3133 continue;
3134
3135 if (buffer_uptodate(bh)) {
3136 unlock_buffer(bh);
3137 continue;
3138 }
3139
3140 bh->b_end_io = end_buffer_read_sync;
3141 get_bh(bh);
3142 submit_bh(REQ_OP_READ | op_flags, bh);
3143 }
3144 }
3145 EXPORT_SYMBOL(__bh_read_batch);
3146
buffer_init(void)3147 void __init buffer_init(void)
3148 {
3149 unsigned long nrpages;
3150 int ret;
3151
3152 bh_cachep = KMEM_CACHE(buffer_head,
3153 SLAB_RECLAIM_ACCOUNT|SLAB_PANIC);
3154 /*
3155 * Limit the bh occupancy to 10% of ZONE_NORMAL
3156 */
3157 nrpages = (nr_free_buffer_pages() * 10) / 100;
3158 max_buffer_heads = nrpages * (PAGE_SIZE / sizeof(struct buffer_head));
3159 ret = cpuhp_setup_state_nocalls(CPUHP_FS_BUFF_DEAD, "fs/buffer:dead",
3160 NULL, buffer_exit_cpu_dead);
3161 WARN_ON(ret < 0);
3162 }
3163