1 // SPDX-License-Identifier: GPL-2.0-only
2 /*
3 * linux/fs/buffer.c
4 *
5 * Copyright (C) 1991, 1992, 2002 Linus Torvalds
6 */
7
8 /*
9 * Start bdflush() with kernel_thread not syscall - Paul Gortmaker, 12/95
10 *
11 * Removed a lot of unnecessary code and simplified things now that
12 * the buffer cache isn't our primary cache - Andrew Tridgell 12/96
13 *
14 * Speed up hash, lru, and free list operations. Use gfp() for allocating
15 * hash table, use SLAB cache for buffer heads. SMP threading. -DaveM
16 *
17 * Added 32k buffer block sizes - these are required older ARM systems. - RMK
18 *
19 * async buffer flushing, 1999 Andrea Arcangeli <andrea@suse.de>
20 */
21
22 #include <linux/kernel.h>
23 #include <linux/sched/signal.h>
24 #include <linux/syscalls.h>
25 #include <linux/fs.h>
26 #include <linux/iomap.h>
27 #include <linux/mm.h>
28 #include <linux/percpu.h>
29 #include <linux/slab.h>
30 #include <linux/capability.h>
31 #include <linux/blkdev.h>
32 #include <linux/file.h>
33 #include <linux/quotaops.h>
34 #include <linux/highmem.h>
35 #include <linux/export.h>
36 #include <linux/backing-dev.h>
37 #include <linux/writeback.h>
38 #include <linux/hash.h>
39 #include <linux/suspend.h>
40 #include <linux/buffer_head.h>
41 #include <linux/task_io_accounting_ops.h>
42 #include <linux/bio.h>
43 #include <linux/cpu.h>
44 #include <linux/bitops.h>
45 #include <linux/mpage.h>
46 #include <linux/bit_spinlock.h>
47 #include <linux/pagevec.h>
48 #include <linux/sched/mm.h>
49 #include <trace/events/block.h>
50 #include <linux/fscrypt.h>
51 #include <linux/fsverity.h>
52 #include <linux/sched/isolation.h>
53
54 #include "internal.h"
55
56 static int fsync_buffers_list(spinlock_t *lock, struct list_head *list);
57 static void submit_bh_wbc(blk_opf_t opf, struct buffer_head *bh,
58 enum rw_hint hint, struct writeback_control *wbc);
59
60 #define BH_ENTRY(list) list_entry((list), struct buffer_head, b_assoc_buffers)
61
touch_buffer(struct buffer_head * bh)62 inline void touch_buffer(struct buffer_head *bh)
63 {
64 trace_block_touch_buffer(bh);
65 folio_mark_accessed(bh->b_folio);
66 }
67 EXPORT_SYMBOL(touch_buffer);
68
__lock_buffer(struct buffer_head * bh)69 void __lock_buffer(struct buffer_head *bh)
70 {
71 wait_on_bit_lock_io(&bh->b_state, BH_Lock, TASK_UNINTERRUPTIBLE);
72 }
73 EXPORT_SYMBOL(__lock_buffer);
74
unlock_buffer(struct buffer_head * bh)75 void unlock_buffer(struct buffer_head *bh)
76 {
77 clear_bit_unlock(BH_Lock, &bh->b_state);
78 smp_mb__after_atomic();
79 wake_up_bit(&bh->b_state, BH_Lock);
80 }
81 EXPORT_SYMBOL(unlock_buffer);
82
83 /*
84 * Returns if the folio has dirty or writeback buffers. If all the buffers
85 * are unlocked and clean then the folio_test_dirty information is stale. If
86 * any of the buffers are locked, it is assumed they are locked for IO.
87 */
buffer_check_dirty_writeback(struct folio * folio,bool * dirty,bool * writeback)88 void buffer_check_dirty_writeback(struct folio *folio,
89 bool *dirty, bool *writeback)
90 {
91 struct buffer_head *head, *bh;
92 *dirty = false;
93 *writeback = false;
94
95 BUG_ON(!folio_test_locked(folio));
96
97 head = folio_buffers(folio);
98 if (!head)
99 return;
100
101 if (folio_test_writeback(folio))
102 *writeback = true;
103
104 bh = head;
105 do {
106 if (buffer_locked(bh))
107 *writeback = true;
108
109 if (buffer_dirty(bh))
110 *dirty = true;
111
112 bh = bh->b_this_page;
113 } while (bh != head);
114 }
115
116 /*
117 * Block until a buffer comes unlocked. This doesn't stop it
118 * from becoming locked again - you have to lock it yourself
119 * if you want to preserve its state.
120 */
__wait_on_buffer(struct buffer_head * bh)121 void __wait_on_buffer(struct buffer_head * bh)
122 {
123 wait_on_bit_io(&bh->b_state, BH_Lock, TASK_UNINTERRUPTIBLE);
124 }
125 EXPORT_SYMBOL(__wait_on_buffer);
126
buffer_io_error(struct buffer_head * bh,char * msg)127 static void buffer_io_error(struct buffer_head *bh, char *msg)
128 {
129 if (!test_bit(BH_Quiet, &bh->b_state))
130 printk_ratelimited(KERN_ERR
131 "Buffer I/O error on dev %pg, logical block %llu%s\n",
132 bh->b_bdev, (unsigned long long)bh->b_blocknr, msg);
133 }
134
135 /*
136 * End-of-IO handler helper function which does not touch the bh after
137 * unlocking it.
138 * Note: unlock_buffer() sort-of does touch the bh after unlocking it, but
139 * a race there is benign: unlock_buffer() only use the bh's address for
140 * hashing after unlocking the buffer, so it doesn't actually touch the bh
141 * itself.
142 */
__end_buffer_read_notouch(struct buffer_head * bh,int uptodate)143 static void __end_buffer_read_notouch(struct buffer_head *bh, int uptodate)
144 {
145 if (uptodate) {
146 set_buffer_uptodate(bh);
147 } else {
148 /* This happens, due to failed read-ahead attempts. */
149 clear_buffer_uptodate(bh);
150 }
151 unlock_buffer(bh);
152 }
153
154 /*
155 * Default synchronous end-of-IO handler.. Just mark it up-to-date and
156 * unlock the buffer.
157 */
end_buffer_read_sync(struct buffer_head * bh,int uptodate)158 void end_buffer_read_sync(struct buffer_head *bh, int uptodate)
159 {
160 __end_buffer_read_notouch(bh, uptodate);
161 put_bh(bh);
162 }
163 EXPORT_SYMBOL(end_buffer_read_sync);
164
end_buffer_write_sync(struct buffer_head * bh,int uptodate)165 void end_buffer_write_sync(struct buffer_head *bh, int uptodate)
166 {
167 if (uptodate) {
168 set_buffer_uptodate(bh);
169 } else {
170 buffer_io_error(bh, ", lost sync page write");
171 mark_buffer_write_io_error(bh);
172 clear_buffer_uptodate(bh);
173 }
174 unlock_buffer(bh);
175 put_bh(bh);
176 }
177 EXPORT_SYMBOL(end_buffer_write_sync);
178
179 /*
180 * Various filesystems appear to want __find_get_block to be non-blocking.
181 * But it's the page lock which protects the buffers. To get around this,
182 * we get exclusion from try_to_free_buffers with the blockdev mapping's
183 * i_private_lock.
184 *
185 * Hack idea: for the blockdev mapping, i_private_lock contention
186 * may be quite high. This code could TryLock the page, and if that
187 * succeeds, there is no need to take i_private_lock.
188 */
189 static struct buffer_head *
__find_get_block_slow(struct block_device * bdev,sector_t block)190 __find_get_block_slow(struct block_device *bdev, sector_t block)
191 {
192 struct address_space *bd_mapping = bdev->bd_mapping;
193 const int blkbits = bd_mapping->host->i_blkbits;
194 struct buffer_head *ret = NULL;
195 pgoff_t index;
196 struct buffer_head *bh;
197 struct buffer_head *head;
198 struct folio *folio;
199 int all_mapped = 1;
200 static DEFINE_RATELIMIT_STATE(last_warned, HZ, 1);
201
202 index = ((loff_t)block << blkbits) / PAGE_SIZE;
203 folio = __filemap_get_folio(bd_mapping, index, FGP_ACCESSED, 0);
204 if (IS_ERR(folio))
205 goto out;
206
207 spin_lock(&bd_mapping->i_private_lock);
208 head = folio_buffers(folio);
209 if (!head)
210 goto out_unlock;
211 bh = head;
212 do {
213 if (!buffer_mapped(bh))
214 all_mapped = 0;
215 else if (bh->b_blocknr == block) {
216 ret = bh;
217 get_bh(bh);
218 goto out_unlock;
219 }
220 bh = bh->b_this_page;
221 } while (bh != head);
222
223 /* we might be here because some of the buffers on this page are
224 * not mapped. This is due to various races between
225 * file io on the block device and getblk. It gets dealt with
226 * elsewhere, don't buffer_error if we had some unmapped buffers
227 */
228 ratelimit_set_flags(&last_warned, RATELIMIT_MSG_ON_RELEASE);
229 if (all_mapped && __ratelimit(&last_warned)) {
230 printk("__find_get_block_slow() failed. block=%llu, "
231 "b_blocknr=%llu, b_state=0x%08lx, b_size=%zu, "
232 "device %pg blocksize: %d\n",
233 (unsigned long long)block,
234 (unsigned long long)bh->b_blocknr,
235 bh->b_state, bh->b_size, bdev,
236 1 << blkbits);
237 }
238 out_unlock:
239 spin_unlock(&bd_mapping->i_private_lock);
240 folio_put(folio);
241 out:
242 return ret;
243 }
244
end_buffer_async_read(struct buffer_head * bh,int uptodate)245 static void end_buffer_async_read(struct buffer_head *bh, int uptodate)
246 {
247 unsigned long flags;
248 struct buffer_head *first;
249 struct buffer_head *tmp;
250 struct folio *folio;
251 int folio_uptodate = 1;
252
253 BUG_ON(!buffer_async_read(bh));
254
255 folio = bh->b_folio;
256 if (uptodate) {
257 set_buffer_uptodate(bh);
258 } else {
259 clear_buffer_uptodate(bh);
260 buffer_io_error(bh, ", async page read");
261 folio_set_error(folio);
262 }
263
264 /*
265 * Be _very_ careful from here on. Bad things can happen if
266 * two buffer heads end IO at almost the same time and both
267 * decide that the page is now completely done.
268 */
269 first = folio_buffers(folio);
270 spin_lock_irqsave(&first->b_uptodate_lock, flags);
271 clear_buffer_async_read(bh);
272 unlock_buffer(bh);
273 tmp = bh;
274 do {
275 if (!buffer_uptodate(tmp))
276 folio_uptodate = 0;
277 if (buffer_async_read(tmp)) {
278 BUG_ON(!buffer_locked(tmp));
279 goto still_busy;
280 }
281 tmp = tmp->b_this_page;
282 } while (tmp != bh);
283 spin_unlock_irqrestore(&first->b_uptodate_lock, flags);
284
285 folio_end_read(folio, folio_uptodate);
286 return;
287
288 still_busy:
289 spin_unlock_irqrestore(&first->b_uptodate_lock, flags);
290 return;
291 }
292
293 struct postprocess_bh_ctx {
294 struct work_struct work;
295 struct buffer_head *bh;
296 };
297
verify_bh(struct work_struct * work)298 static void verify_bh(struct work_struct *work)
299 {
300 struct postprocess_bh_ctx *ctx =
301 container_of(work, struct postprocess_bh_ctx, work);
302 struct buffer_head *bh = ctx->bh;
303 bool valid;
304
305 valid = fsverity_verify_blocks(bh->b_folio, bh->b_size, bh_offset(bh));
306 end_buffer_async_read(bh, valid);
307 kfree(ctx);
308 }
309
need_fsverity(struct buffer_head * bh)310 static bool need_fsverity(struct buffer_head *bh)
311 {
312 struct folio *folio = bh->b_folio;
313 struct inode *inode = folio->mapping->host;
314
315 return fsverity_active(inode) &&
316 /* needed by ext4 */
317 folio->index < DIV_ROUND_UP(inode->i_size, PAGE_SIZE);
318 }
319
decrypt_bh(struct work_struct * work)320 static void decrypt_bh(struct work_struct *work)
321 {
322 struct postprocess_bh_ctx *ctx =
323 container_of(work, struct postprocess_bh_ctx, work);
324 struct buffer_head *bh = ctx->bh;
325 int err;
326
327 err = fscrypt_decrypt_pagecache_blocks(bh->b_folio, bh->b_size,
328 bh_offset(bh));
329 if (err == 0 && need_fsverity(bh)) {
330 /*
331 * We use different work queues for decryption and for verity
332 * because verity may require reading metadata pages that need
333 * decryption, and we shouldn't recurse to the same workqueue.
334 */
335 INIT_WORK(&ctx->work, verify_bh);
336 fsverity_enqueue_verify_work(&ctx->work);
337 return;
338 }
339 end_buffer_async_read(bh, err == 0);
340 kfree(ctx);
341 }
342
343 /*
344 * I/O completion handler for block_read_full_folio() - pages
345 * which come unlocked at the end of I/O.
346 */
end_buffer_async_read_io(struct buffer_head * bh,int uptodate)347 static void end_buffer_async_read_io(struct buffer_head *bh, int uptodate)
348 {
349 struct inode *inode = bh->b_folio->mapping->host;
350 bool decrypt = fscrypt_inode_uses_fs_layer_crypto(inode);
351 bool verify = need_fsverity(bh);
352
353 /* Decrypt (with fscrypt) and/or verify (with fsverity) if needed. */
354 if (uptodate && (decrypt || verify)) {
355 struct postprocess_bh_ctx *ctx =
356 kmalloc(sizeof(*ctx), GFP_ATOMIC);
357
358 if (ctx) {
359 ctx->bh = bh;
360 if (decrypt) {
361 INIT_WORK(&ctx->work, decrypt_bh);
362 fscrypt_enqueue_decrypt_work(&ctx->work);
363 } else {
364 INIT_WORK(&ctx->work, verify_bh);
365 fsverity_enqueue_verify_work(&ctx->work);
366 }
367 return;
368 }
369 uptodate = 0;
370 }
371 end_buffer_async_read(bh, uptodate);
372 }
373
374 /*
375 * Completion handler for block_write_full_folio() - folios which are unlocked
376 * during I/O, and which have the writeback flag cleared upon I/O completion.
377 */
end_buffer_async_write(struct buffer_head * bh,int uptodate)378 static void end_buffer_async_write(struct buffer_head *bh, int uptodate)
379 {
380 unsigned long flags;
381 struct buffer_head *first;
382 struct buffer_head *tmp;
383 struct folio *folio;
384
385 BUG_ON(!buffer_async_write(bh));
386
387 folio = bh->b_folio;
388 if (uptodate) {
389 set_buffer_uptodate(bh);
390 } else {
391 buffer_io_error(bh, ", lost async page write");
392 mark_buffer_write_io_error(bh);
393 clear_buffer_uptodate(bh);
394 folio_set_error(folio);
395 }
396
397 first = folio_buffers(folio);
398 spin_lock_irqsave(&first->b_uptodate_lock, flags);
399
400 clear_buffer_async_write(bh);
401 unlock_buffer(bh);
402 tmp = bh->b_this_page;
403 while (tmp != bh) {
404 if (buffer_async_write(tmp)) {
405 BUG_ON(!buffer_locked(tmp));
406 goto still_busy;
407 }
408 tmp = tmp->b_this_page;
409 }
410 spin_unlock_irqrestore(&first->b_uptodate_lock, flags);
411 folio_end_writeback(folio);
412 return;
413
414 still_busy:
415 spin_unlock_irqrestore(&first->b_uptodate_lock, flags);
416 return;
417 }
418
419 /*
420 * If a page's buffers are under async readin (end_buffer_async_read
421 * completion) then there is a possibility that another thread of
422 * control could lock one of the buffers after it has completed
423 * but while some of the other buffers have not completed. This
424 * locked buffer would confuse end_buffer_async_read() into not unlocking
425 * the page. So the absence of BH_Async_Read tells end_buffer_async_read()
426 * that this buffer is not under async I/O.
427 *
428 * The page comes unlocked when it has no locked buffer_async buffers
429 * left.
430 *
431 * PageLocked prevents anyone starting new async I/O reads any of
432 * the buffers.
433 *
434 * PageWriteback is used to prevent simultaneous writeout of the same
435 * page.
436 *
437 * PageLocked prevents anyone from starting writeback of a page which is
438 * under read I/O (PageWriteback is only ever set against a locked page).
439 */
mark_buffer_async_read(struct buffer_head * bh)440 static void mark_buffer_async_read(struct buffer_head *bh)
441 {
442 bh->b_end_io = end_buffer_async_read_io;
443 set_buffer_async_read(bh);
444 }
445
mark_buffer_async_write_endio(struct buffer_head * bh,bh_end_io_t * handler)446 static void mark_buffer_async_write_endio(struct buffer_head *bh,
447 bh_end_io_t *handler)
448 {
449 bh->b_end_io = handler;
450 set_buffer_async_write(bh);
451 }
452
mark_buffer_async_write(struct buffer_head * bh)453 void mark_buffer_async_write(struct buffer_head *bh)
454 {
455 mark_buffer_async_write_endio(bh, end_buffer_async_write);
456 }
457 EXPORT_SYMBOL(mark_buffer_async_write);
458
459
460 /*
461 * fs/buffer.c contains helper functions for buffer-backed address space's
462 * fsync functions. A common requirement for buffer-based filesystems is
463 * that certain data from the backing blockdev needs to be written out for
464 * a successful fsync(). For example, ext2 indirect blocks need to be
465 * written back and waited upon before fsync() returns.
466 *
467 * The functions mark_buffer_dirty_inode(), fsync_inode_buffers(),
468 * inode_has_buffers() and invalidate_inode_buffers() are provided for the
469 * management of a list of dependent buffers at ->i_mapping->i_private_list.
470 *
471 * Locking is a little subtle: try_to_free_buffers() will remove buffers
472 * from their controlling inode's queue when they are being freed. But
473 * try_to_free_buffers() will be operating against the *blockdev* mapping
474 * at the time, not against the S_ISREG file which depends on those buffers.
475 * So the locking for i_private_list is via the i_private_lock in the address_space
476 * which backs the buffers. Which is different from the address_space
477 * against which the buffers are listed. So for a particular address_space,
478 * mapping->i_private_lock does *not* protect mapping->i_private_list! In fact,
479 * mapping->i_private_list will always be protected by the backing blockdev's
480 * ->i_private_lock.
481 *
482 * Which introduces a requirement: all buffers on an address_space's
483 * ->i_private_list must be from the same address_space: the blockdev's.
484 *
485 * address_spaces which do not place buffers at ->i_private_list via these
486 * utility functions are free to use i_private_lock and i_private_list for
487 * whatever they want. The only requirement is that list_empty(i_private_list)
488 * be true at clear_inode() time.
489 *
490 * FIXME: clear_inode should not call invalidate_inode_buffers(). The
491 * filesystems should do that. invalidate_inode_buffers() should just go
492 * BUG_ON(!list_empty).
493 *
494 * FIXME: mark_buffer_dirty_inode() is a data-plane operation. It should
495 * take an address_space, not an inode. And it should be called
496 * mark_buffer_dirty_fsync() to clearly define why those buffers are being
497 * queued up.
498 *
499 * FIXME: mark_buffer_dirty_inode() doesn't need to add the buffer to the
500 * list if it is already on a list. Because if the buffer is on a list,
501 * it *must* already be on the right one. If not, the filesystem is being
502 * silly. This will save a ton of locking. But first we have to ensure
503 * that buffers are taken *off* the old inode's list when they are freed
504 * (presumably in truncate). That requires careful auditing of all
505 * filesystems (do it inside bforget()). It could also be done by bringing
506 * b_inode back.
507 */
508
509 /*
510 * The buffer's backing address_space's i_private_lock must be held
511 */
__remove_assoc_queue(struct buffer_head * bh)512 static void __remove_assoc_queue(struct buffer_head *bh)
513 {
514 list_del_init(&bh->b_assoc_buffers);
515 WARN_ON(!bh->b_assoc_map);
516 bh->b_assoc_map = NULL;
517 }
518
inode_has_buffers(struct inode * inode)519 int inode_has_buffers(struct inode *inode)
520 {
521 return !list_empty(&inode->i_data.i_private_list);
522 }
523
524 /*
525 * osync is designed to support O_SYNC io. It waits synchronously for
526 * all already-submitted IO to complete, but does not queue any new
527 * writes to the disk.
528 *
529 * To do O_SYNC writes, just queue the buffer writes with write_dirty_buffer
530 * as you dirty the buffers, and then use osync_inode_buffers to wait for
531 * completion. Any other dirty buffers which are not yet queued for
532 * write will not be flushed to disk by the osync.
533 */
osync_buffers_list(spinlock_t * lock,struct list_head * list)534 static int osync_buffers_list(spinlock_t *lock, struct list_head *list)
535 {
536 struct buffer_head *bh;
537 struct list_head *p;
538 int err = 0;
539
540 spin_lock(lock);
541 repeat:
542 list_for_each_prev(p, list) {
543 bh = BH_ENTRY(p);
544 if (buffer_locked(bh)) {
545 get_bh(bh);
546 spin_unlock(lock);
547 wait_on_buffer(bh);
548 if (!buffer_uptodate(bh))
549 err = -EIO;
550 brelse(bh);
551 spin_lock(lock);
552 goto repeat;
553 }
554 }
555 spin_unlock(lock);
556 return err;
557 }
558
559 /**
560 * sync_mapping_buffers - write out & wait upon a mapping's "associated" buffers
561 * @mapping: the mapping which wants those buffers written
562 *
563 * Starts I/O against the buffers at mapping->i_private_list, and waits upon
564 * that I/O.
565 *
566 * Basically, this is a convenience function for fsync().
567 * @mapping is a file or directory which needs those buffers to be written for
568 * a successful fsync().
569 */
sync_mapping_buffers(struct address_space * mapping)570 int sync_mapping_buffers(struct address_space *mapping)
571 {
572 struct address_space *buffer_mapping = mapping->i_private_data;
573
574 if (buffer_mapping == NULL || list_empty(&mapping->i_private_list))
575 return 0;
576
577 return fsync_buffers_list(&buffer_mapping->i_private_lock,
578 &mapping->i_private_list);
579 }
580 EXPORT_SYMBOL(sync_mapping_buffers);
581
582 /**
583 * generic_buffers_fsync_noflush - generic buffer fsync implementation
584 * for simple filesystems with no inode lock
585 *
586 * @file: file to synchronize
587 * @start: start offset in bytes
588 * @end: end offset in bytes (inclusive)
589 * @datasync: only synchronize essential metadata if true
590 *
591 * This is a generic implementation of the fsync method for simple
592 * filesystems which track all non-inode metadata in the buffers list
593 * hanging off the address_space structure.
594 */
generic_buffers_fsync_noflush(struct file * file,loff_t start,loff_t end,bool datasync)595 int generic_buffers_fsync_noflush(struct file *file, loff_t start, loff_t end,
596 bool datasync)
597 {
598 struct inode *inode = file->f_mapping->host;
599 int err;
600 int ret;
601
602 err = file_write_and_wait_range(file, start, end);
603 if (err)
604 return err;
605
606 ret = sync_mapping_buffers(inode->i_mapping);
607 if (!(inode->i_state & I_DIRTY_ALL))
608 goto out;
609 if (datasync && !(inode->i_state & I_DIRTY_DATASYNC))
610 goto out;
611
612 err = sync_inode_metadata(inode, 1);
613 if (ret == 0)
614 ret = err;
615
616 out:
617 /* check and advance again to catch errors after syncing out buffers */
618 err = file_check_and_advance_wb_err(file);
619 if (ret == 0)
620 ret = err;
621 return ret;
622 }
623 EXPORT_SYMBOL(generic_buffers_fsync_noflush);
624
625 /**
626 * generic_buffers_fsync - generic buffer fsync implementation
627 * for simple filesystems with no inode lock
628 *
629 * @file: file to synchronize
630 * @start: start offset in bytes
631 * @end: end offset in bytes (inclusive)
632 * @datasync: only synchronize essential metadata if true
633 *
634 * This is a generic implementation of the fsync method for simple
635 * filesystems which track all non-inode metadata in the buffers list
636 * hanging off the address_space structure. This also makes sure that
637 * a device cache flush operation is called at the end.
638 */
generic_buffers_fsync(struct file * file,loff_t start,loff_t end,bool datasync)639 int generic_buffers_fsync(struct file *file, loff_t start, loff_t end,
640 bool datasync)
641 {
642 struct inode *inode = file->f_mapping->host;
643 int ret;
644
645 ret = generic_buffers_fsync_noflush(file, start, end, datasync);
646 if (!ret)
647 ret = blkdev_issue_flush(inode->i_sb->s_bdev);
648 return ret;
649 }
650 EXPORT_SYMBOL(generic_buffers_fsync);
651
652 /*
653 * Called when we've recently written block `bblock', and it is known that
654 * `bblock' was for a buffer_boundary() buffer. This means that the block at
655 * `bblock + 1' is probably a dirty indirect block. Hunt it down and, if it's
656 * dirty, schedule it for IO. So that indirects merge nicely with their data.
657 */
write_boundary_block(struct block_device * bdev,sector_t bblock,unsigned blocksize)658 void write_boundary_block(struct block_device *bdev,
659 sector_t bblock, unsigned blocksize)
660 {
661 struct buffer_head *bh = __find_get_block(bdev, bblock + 1, blocksize);
662 if (bh) {
663 if (buffer_dirty(bh))
664 write_dirty_buffer(bh, 0);
665 put_bh(bh);
666 }
667 }
668
mark_buffer_dirty_inode(struct buffer_head * bh,struct inode * inode)669 void mark_buffer_dirty_inode(struct buffer_head *bh, struct inode *inode)
670 {
671 struct address_space *mapping = inode->i_mapping;
672 struct address_space *buffer_mapping = bh->b_folio->mapping;
673
674 mark_buffer_dirty(bh);
675 if (!mapping->i_private_data) {
676 mapping->i_private_data = buffer_mapping;
677 } else {
678 BUG_ON(mapping->i_private_data != buffer_mapping);
679 }
680 if (!bh->b_assoc_map) {
681 spin_lock(&buffer_mapping->i_private_lock);
682 list_move_tail(&bh->b_assoc_buffers,
683 &mapping->i_private_list);
684 bh->b_assoc_map = mapping;
685 spin_unlock(&buffer_mapping->i_private_lock);
686 }
687 }
688 EXPORT_SYMBOL(mark_buffer_dirty_inode);
689
690 /**
691 * block_dirty_folio - Mark a folio as dirty.
692 * @mapping: The address space containing this folio.
693 * @folio: The folio to mark dirty.
694 *
695 * Filesystems which use buffer_heads can use this function as their
696 * ->dirty_folio implementation. Some filesystems need to do a little
697 * work before calling this function. Filesystems which do not use
698 * buffer_heads should call filemap_dirty_folio() instead.
699 *
700 * If the folio has buffers, the uptodate buffers are set dirty, to
701 * preserve dirty-state coherency between the folio and the buffers.
702 * Buffers added to a dirty folio are created dirty.
703 *
704 * The buffers are dirtied before the folio is dirtied. There's a small
705 * race window in which writeback may see the folio cleanness but not the
706 * buffer dirtiness. That's fine. If this code were to set the folio
707 * dirty before the buffers, writeback could clear the folio dirty flag,
708 * see a bunch of clean buffers and we'd end up with dirty buffers/clean
709 * folio on the dirty folio list.
710 *
711 * We use i_private_lock to lock against try_to_free_buffers() while
712 * using the folio's buffer list. This also prevents clean buffers
713 * being added to the folio after it was set dirty.
714 *
715 * Context: May only be called from process context. Does not sleep.
716 * Caller must ensure that @folio cannot be truncated during this call,
717 * typically by holding the folio lock or having a page in the folio
718 * mapped and holding the page table lock.
719 *
720 * Return: True if the folio was dirtied; false if it was already dirtied.
721 */
block_dirty_folio(struct address_space * mapping,struct folio * folio)722 bool block_dirty_folio(struct address_space *mapping, struct folio *folio)
723 {
724 struct buffer_head *head;
725 bool newly_dirty;
726
727 spin_lock(&mapping->i_private_lock);
728 head = folio_buffers(folio);
729 if (head) {
730 struct buffer_head *bh = head;
731
732 do {
733 set_buffer_dirty(bh);
734 bh = bh->b_this_page;
735 } while (bh != head);
736 }
737 /*
738 * Lock out page's memcg migration to keep PageDirty
739 * synchronized with per-memcg dirty page counters.
740 */
741 folio_memcg_lock(folio);
742 newly_dirty = !folio_test_set_dirty(folio);
743 spin_unlock(&mapping->i_private_lock);
744
745 if (newly_dirty)
746 __folio_mark_dirty(folio, mapping, 1);
747
748 folio_memcg_unlock(folio);
749
750 if (newly_dirty)
751 __mark_inode_dirty(mapping->host, I_DIRTY_PAGES);
752
753 return newly_dirty;
754 }
755 EXPORT_SYMBOL(block_dirty_folio);
756
757 /*
758 * Write out and wait upon a list of buffers.
759 *
760 * We have conflicting pressures: we want to make sure that all
761 * initially dirty buffers get waited on, but that any subsequently
762 * dirtied buffers don't. After all, we don't want fsync to last
763 * forever if somebody is actively writing to the file.
764 *
765 * Do this in two main stages: first we copy dirty buffers to a
766 * temporary inode list, queueing the writes as we go. Then we clean
767 * up, waiting for those writes to complete.
768 *
769 * During this second stage, any subsequent updates to the file may end
770 * up refiling the buffer on the original inode's dirty list again, so
771 * there is a chance we will end up with a buffer queued for write but
772 * not yet completed on that list. So, as a final cleanup we go through
773 * the osync code to catch these locked, dirty buffers without requeuing
774 * any newly dirty buffers for write.
775 */
fsync_buffers_list(spinlock_t * lock,struct list_head * list)776 static int fsync_buffers_list(spinlock_t *lock, struct list_head *list)
777 {
778 struct buffer_head *bh;
779 struct list_head tmp;
780 struct address_space *mapping;
781 int err = 0, err2;
782 struct blk_plug plug;
783
784 INIT_LIST_HEAD(&tmp);
785 blk_start_plug(&plug);
786
787 spin_lock(lock);
788 while (!list_empty(list)) {
789 bh = BH_ENTRY(list->next);
790 mapping = bh->b_assoc_map;
791 __remove_assoc_queue(bh);
792 /* Avoid race with mark_buffer_dirty_inode() which does
793 * a lockless check and we rely on seeing the dirty bit */
794 smp_mb();
795 if (buffer_dirty(bh) || buffer_locked(bh)) {
796 list_add(&bh->b_assoc_buffers, &tmp);
797 bh->b_assoc_map = mapping;
798 if (buffer_dirty(bh)) {
799 get_bh(bh);
800 spin_unlock(lock);
801 /*
802 * Ensure any pending I/O completes so that
803 * write_dirty_buffer() actually writes the
804 * current contents - it is a noop if I/O is
805 * still in flight on potentially older
806 * contents.
807 */
808 write_dirty_buffer(bh, REQ_SYNC);
809
810 /*
811 * Kick off IO for the previous mapping. Note
812 * that we will not run the very last mapping,
813 * wait_on_buffer() will do that for us
814 * through sync_buffer().
815 */
816 brelse(bh);
817 spin_lock(lock);
818 }
819 }
820 }
821
822 spin_unlock(lock);
823 blk_finish_plug(&plug);
824 spin_lock(lock);
825
826 while (!list_empty(&tmp)) {
827 bh = BH_ENTRY(tmp.prev);
828 get_bh(bh);
829 mapping = bh->b_assoc_map;
830 __remove_assoc_queue(bh);
831 /* Avoid race with mark_buffer_dirty_inode() which does
832 * a lockless check and we rely on seeing the dirty bit */
833 smp_mb();
834 if (buffer_dirty(bh)) {
835 list_add(&bh->b_assoc_buffers,
836 &mapping->i_private_list);
837 bh->b_assoc_map = mapping;
838 }
839 spin_unlock(lock);
840 wait_on_buffer(bh);
841 if (!buffer_uptodate(bh))
842 err = -EIO;
843 brelse(bh);
844 spin_lock(lock);
845 }
846
847 spin_unlock(lock);
848 err2 = osync_buffers_list(lock, list);
849 if (err)
850 return err;
851 else
852 return err2;
853 }
854
855 /*
856 * Invalidate any and all dirty buffers on a given inode. We are
857 * probably unmounting the fs, but that doesn't mean we have already
858 * done a sync(). Just drop the buffers from the inode list.
859 *
860 * NOTE: we take the inode's blockdev's mapping's i_private_lock. Which
861 * assumes that all the buffers are against the blockdev. Not true
862 * for reiserfs.
863 */
invalidate_inode_buffers(struct inode * inode)864 void invalidate_inode_buffers(struct inode *inode)
865 {
866 if (inode_has_buffers(inode)) {
867 struct address_space *mapping = &inode->i_data;
868 struct list_head *list = &mapping->i_private_list;
869 struct address_space *buffer_mapping = mapping->i_private_data;
870
871 spin_lock(&buffer_mapping->i_private_lock);
872 while (!list_empty(list))
873 __remove_assoc_queue(BH_ENTRY(list->next));
874 spin_unlock(&buffer_mapping->i_private_lock);
875 }
876 }
877 EXPORT_SYMBOL(invalidate_inode_buffers);
878
879 /*
880 * Remove any clean buffers from the inode's buffer list. This is called
881 * when we're trying to free the inode itself. Those buffers can pin it.
882 *
883 * Returns true if all buffers were removed.
884 */
remove_inode_buffers(struct inode * inode)885 int remove_inode_buffers(struct inode *inode)
886 {
887 int ret = 1;
888
889 if (inode_has_buffers(inode)) {
890 struct address_space *mapping = &inode->i_data;
891 struct list_head *list = &mapping->i_private_list;
892 struct address_space *buffer_mapping = mapping->i_private_data;
893
894 spin_lock(&buffer_mapping->i_private_lock);
895 while (!list_empty(list)) {
896 struct buffer_head *bh = BH_ENTRY(list->next);
897 if (buffer_dirty(bh)) {
898 ret = 0;
899 break;
900 }
901 __remove_assoc_queue(bh);
902 }
903 spin_unlock(&buffer_mapping->i_private_lock);
904 }
905 return ret;
906 }
907
908 /*
909 * Create the appropriate buffers when given a folio for data area and
910 * the size of each buffer.. Use the bh->b_this_page linked list to
911 * follow the buffers created. Return NULL if unable to create more
912 * buffers.
913 *
914 * The retry flag is used to differentiate async IO (paging, swapping)
915 * which may not fail from ordinary buffer allocations.
916 */
folio_alloc_buffers(struct folio * folio,unsigned long size,gfp_t gfp)917 struct buffer_head *folio_alloc_buffers(struct folio *folio, unsigned long size,
918 gfp_t gfp)
919 {
920 struct buffer_head *bh, *head;
921 long offset;
922 struct mem_cgroup *memcg, *old_memcg;
923
924 /* The folio lock pins the memcg */
925 memcg = folio_memcg(folio);
926 old_memcg = set_active_memcg(memcg);
927
928 head = NULL;
929 offset = folio_size(folio);
930 while ((offset -= size) >= 0) {
931 bh = alloc_buffer_head(gfp);
932 if (!bh)
933 goto no_grow;
934
935 bh->b_this_page = head;
936 bh->b_blocknr = -1;
937 head = bh;
938
939 bh->b_size = size;
940
941 /* Link the buffer to its folio */
942 folio_set_bh(bh, folio, offset);
943 }
944 out:
945 set_active_memcg(old_memcg);
946 return head;
947 /*
948 * In case anything failed, we just free everything we got.
949 */
950 no_grow:
951 if (head) {
952 do {
953 bh = head;
954 head = head->b_this_page;
955 free_buffer_head(bh);
956 } while (head);
957 }
958
959 goto out;
960 }
961 EXPORT_SYMBOL_GPL(folio_alloc_buffers);
962
alloc_page_buffers(struct page * page,unsigned long size,bool retry)963 struct buffer_head *alloc_page_buffers(struct page *page, unsigned long size,
964 bool retry)
965 {
966 gfp_t gfp = GFP_NOFS | __GFP_ACCOUNT;
967 if (retry)
968 gfp |= __GFP_NOFAIL;
969
970 return folio_alloc_buffers(page_folio(page), size, gfp);
971 }
972 EXPORT_SYMBOL_GPL(alloc_page_buffers);
973
link_dev_buffers(struct folio * folio,struct buffer_head * head)974 static inline void link_dev_buffers(struct folio *folio,
975 struct buffer_head *head)
976 {
977 struct buffer_head *bh, *tail;
978
979 bh = head;
980 do {
981 tail = bh;
982 bh = bh->b_this_page;
983 } while (bh);
984 tail->b_this_page = head;
985 folio_attach_private(folio, head);
986 }
987
blkdev_max_block(struct block_device * bdev,unsigned int size)988 static sector_t blkdev_max_block(struct block_device *bdev, unsigned int size)
989 {
990 sector_t retval = ~((sector_t)0);
991 loff_t sz = bdev_nr_bytes(bdev);
992
993 if (sz) {
994 unsigned int sizebits = blksize_bits(size);
995 retval = (sz >> sizebits);
996 }
997 return retval;
998 }
999
1000 /*
1001 * Initialise the state of a blockdev folio's buffers.
1002 */
folio_init_buffers(struct folio * folio,struct block_device * bdev,unsigned size)1003 static sector_t folio_init_buffers(struct folio *folio,
1004 struct block_device *bdev, unsigned size)
1005 {
1006 struct buffer_head *head = folio_buffers(folio);
1007 struct buffer_head *bh = head;
1008 bool uptodate = folio_test_uptodate(folio);
1009 sector_t block = div_u64(folio_pos(folio), size);
1010 sector_t end_block = blkdev_max_block(bdev, size);
1011
1012 do {
1013 if (!buffer_mapped(bh)) {
1014 bh->b_end_io = NULL;
1015 bh->b_private = NULL;
1016 bh->b_bdev = bdev;
1017 bh->b_blocknr = block;
1018 if (uptodate)
1019 set_buffer_uptodate(bh);
1020 if (block < end_block)
1021 set_buffer_mapped(bh);
1022 }
1023 block++;
1024 bh = bh->b_this_page;
1025 } while (bh != head);
1026
1027 /*
1028 * Caller needs to validate requested block against end of device.
1029 */
1030 return end_block;
1031 }
1032
1033 /*
1034 * Create the page-cache folio that contains the requested block.
1035 *
1036 * This is used purely for blockdev mappings.
1037 *
1038 * Returns false if we have a failure which cannot be cured by retrying
1039 * without sleeping. Returns true if we succeeded, or the caller should retry.
1040 */
grow_dev_folio(struct block_device * bdev,sector_t block,pgoff_t index,unsigned size,gfp_t gfp)1041 static bool grow_dev_folio(struct block_device *bdev, sector_t block,
1042 pgoff_t index, unsigned size, gfp_t gfp)
1043 {
1044 struct address_space *mapping = bdev->bd_mapping;
1045 struct folio *folio;
1046 struct buffer_head *bh;
1047 sector_t end_block = 0;
1048
1049 folio = __filemap_get_folio(mapping, index,
1050 FGP_LOCK | FGP_ACCESSED | FGP_CREAT, gfp);
1051 if (IS_ERR(folio))
1052 return false;
1053
1054 bh = folio_buffers(folio);
1055 if (bh) {
1056 if (bh->b_size == size) {
1057 end_block = folio_init_buffers(folio, bdev, size);
1058 goto unlock;
1059 }
1060
1061 /*
1062 * Retrying may succeed; for example the folio may finish
1063 * writeback, or buffers may be cleaned. This should not
1064 * happen very often; maybe we have old buffers attached to
1065 * this blockdev's page cache and we're trying to change
1066 * the block size?
1067 */
1068 if (!try_to_free_buffers(folio)) {
1069 end_block = ~0ULL;
1070 goto unlock;
1071 }
1072 }
1073
1074 bh = folio_alloc_buffers(folio, size, gfp | __GFP_ACCOUNT);
1075 if (!bh)
1076 goto unlock;
1077
1078 /*
1079 * Link the folio to the buffers and initialise them. Take the
1080 * lock to be atomic wrt __find_get_block(), which does not
1081 * run under the folio lock.
1082 */
1083 spin_lock(&mapping->i_private_lock);
1084 link_dev_buffers(folio, bh);
1085 end_block = folio_init_buffers(folio, bdev, size);
1086 spin_unlock(&mapping->i_private_lock);
1087 unlock:
1088 folio_unlock(folio);
1089 folio_put(folio);
1090 return block < end_block;
1091 }
1092
1093 /*
1094 * Create buffers for the specified block device block's folio. If
1095 * that folio was dirty, the buffers are set dirty also. Returns false
1096 * if we've hit a permanent error.
1097 */
grow_buffers(struct block_device * bdev,sector_t block,unsigned size,gfp_t gfp)1098 static bool grow_buffers(struct block_device *bdev, sector_t block,
1099 unsigned size, gfp_t gfp)
1100 {
1101 loff_t pos;
1102
1103 /*
1104 * Check for a block which lies outside our maximum possible
1105 * pagecache index.
1106 */
1107 if (check_mul_overflow(block, (sector_t)size, &pos) || pos > MAX_LFS_FILESIZE) {
1108 printk(KERN_ERR "%s: requested out-of-range block %llu for device %pg\n",
1109 __func__, (unsigned long long)block,
1110 bdev);
1111 return false;
1112 }
1113
1114 /* Create a folio with the proper size buffers */
1115 return grow_dev_folio(bdev, block, pos / PAGE_SIZE, size, gfp);
1116 }
1117
1118 static struct buffer_head *
__getblk_slow(struct block_device * bdev,sector_t block,unsigned size,gfp_t gfp)1119 __getblk_slow(struct block_device *bdev, sector_t block,
1120 unsigned size, gfp_t gfp)
1121 {
1122 /* Size must be multiple of hard sectorsize */
1123 if (unlikely(size & (bdev_logical_block_size(bdev)-1) ||
1124 (size < 512 || size > PAGE_SIZE))) {
1125 printk(KERN_ERR "getblk(): invalid block size %d requested\n",
1126 size);
1127 printk(KERN_ERR "logical block size: %d\n",
1128 bdev_logical_block_size(bdev));
1129
1130 dump_stack();
1131 return NULL;
1132 }
1133
1134 for (;;) {
1135 struct buffer_head *bh;
1136
1137 bh = __find_get_block(bdev, block, size);
1138 if (bh)
1139 return bh;
1140
1141 if (!grow_buffers(bdev, block, size, gfp))
1142 return NULL;
1143 }
1144 }
1145
1146 /*
1147 * The relationship between dirty buffers and dirty pages:
1148 *
1149 * Whenever a page has any dirty buffers, the page's dirty bit is set, and
1150 * the page is tagged dirty in the page cache.
1151 *
1152 * At all times, the dirtiness of the buffers represents the dirtiness of
1153 * subsections of the page. If the page has buffers, the page dirty bit is
1154 * merely a hint about the true dirty state.
1155 *
1156 * When a page is set dirty in its entirety, all its buffers are marked dirty
1157 * (if the page has buffers).
1158 *
1159 * When a buffer is marked dirty, its page is dirtied, but the page's other
1160 * buffers are not.
1161 *
1162 * Also. When blockdev buffers are explicitly read with bread(), they
1163 * individually become uptodate. But their backing page remains not
1164 * uptodate - even if all of its buffers are uptodate. A subsequent
1165 * block_read_full_folio() against that folio will discover all the uptodate
1166 * buffers, will set the folio uptodate and will perform no I/O.
1167 */
1168
1169 /**
1170 * mark_buffer_dirty - mark a buffer_head as needing writeout
1171 * @bh: the buffer_head to mark dirty
1172 *
1173 * mark_buffer_dirty() will set the dirty bit against the buffer, then set
1174 * its backing page dirty, then tag the page as dirty in the page cache
1175 * and then attach the address_space's inode to its superblock's dirty
1176 * inode list.
1177 *
1178 * mark_buffer_dirty() is atomic. It takes bh->b_folio->mapping->i_private_lock,
1179 * i_pages lock and mapping->host->i_lock.
1180 */
mark_buffer_dirty(struct buffer_head * bh)1181 void mark_buffer_dirty(struct buffer_head *bh)
1182 {
1183 WARN_ON_ONCE(!buffer_uptodate(bh));
1184
1185 trace_block_dirty_buffer(bh);
1186
1187 /*
1188 * Very *carefully* optimize the it-is-already-dirty case.
1189 *
1190 * Don't let the final "is it dirty" escape to before we
1191 * perhaps modified the buffer.
1192 */
1193 if (buffer_dirty(bh)) {
1194 smp_mb();
1195 if (buffer_dirty(bh))
1196 return;
1197 }
1198
1199 if (!test_set_buffer_dirty(bh)) {
1200 struct folio *folio = bh->b_folio;
1201 struct address_space *mapping = NULL;
1202
1203 folio_memcg_lock(folio);
1204 if (!folio_test_set_dirty(folio)) {
1205 mapping = folio->mapping;
1206 if (mapping)
1207 __folio_mark_dirty(folio, mapping, 0);
1208 }
1209 folio_memcg_unlock(folio);
1210 if (mapping)
1211 __mark_inode_dirty(mapping->host, I_DIRTY_PAGES);
1212 }
1213 }
1214 EXPORT_SYMBOL(mark_buffer_dirty);
1215
mark_buffer_write_io_error(struct buffer_head * bh)1216 void mark_buffer_write_io_error(struct buffer_head *bh)
1217 {
1218 set_buffer_write_io_error(bh);
1219 /* FIXME: do we need to set this in both places? */
1220 if (bh->b_folio && bh->b_folio->mapping)
1221 mapping_set_error(bh->b_folio->mapping, -EIO);
1222 if (bh->b_assoc_map) {
1223 mapping_set_error(bh->b_assoc_map, -EIO);
1224 errseq_set(&bh->b_assoc_map->host->i_sb->s_wb_err, -EIO);
1225 }
1226 }
1227 EXPORT_SYMBOL(mark_buffer_write_io_error);
1228
1229 /**
1230 * __brelse - Release a buffer.
1231 * @bh: The buffer to release.
1232 *
1233 * This variant of brelse() can be called if @bh is guaranteed to not be NULL.
1234 */
__brelse(struct buffer_head * bh)1235 void __brelse(struct buffer_head *bh)
1236 {
1237 if (atomic_read(&bh->b_count)) {
1238 put_bh(bh);
1239 return;
1240 }
1241 WARN(1, KERN_ERR "VFS: brelse: Trying to free free buffer\n");
1242 }
1243 EXPORT_SYMBOL(__brelse);
1244
1245 /**
1246 * __bforget - Discard any dirty data in a buffer.
1247 * @bh: The buffer to forget.
1248 *
1249 * This variant of bforget() can be called if @bh is guaranteed to not
1250 * be NULL.
1251 */
__bforget(struct buffer_head * bh)1252 void __bforget(struct buffer_head *bh)
1253 {
1254 clear_buffer_dirty(bh);
1255 if (bh->b_assoc_map) {
1256 struct address_space *buffer_mapping = bh->b_folio->mapping;
1257
1258 spin_lock(&buffer_mapping->i_private_lock);
1259 list_del_init(&bh->b_assoc_buffers);
1260 bh->b_assoc_map = NULL;
1261 spin_unlock(&buffer_mapping->i_private_lock);
1262 }
1263 __brelse(bh);
1264 }
1265 EXPORT_SYMBOL(__bforget);
1266
__bread_slow(struct buffer_head * bh)1267 static struct buffer_head *__bread_slow(struct buffer_head *bh)
1268 {
1269 lock_buffer(bh);
1270 if (buffer_uptodate(bh)) {
1271 unlock_buffer(bh);
1272 return bh;
1273 } else {
1274 get_bh(bh);
1275 bh->b_end_io = end_buffer_read_sync;
1276 submit_bh(REQ_OP_READ, bh);
1277 wait_on_buffer(bh);
1278 if (buffer_uptodate(bh))
1279 return bh;
1280 }
1281 brelse(bh);
1282 return NULL;
1283 }
1284
1285 /*
1286 * Per-cpu buffer LRU implementation. To reduce the cost of __find_get_block().
1287 * The bhs[] array is sorted - newest buffer is at bhs[0]. Buffers have their
1288 * refcount elevated by one when they're in an LRU. A buffer can only appear
1289 * once in a particular CPU's LRU. A single buffer can be present in multiple
1290 * CPU's LRUs at the same time.
1291 *
1292 * This is a transparent caching front-end to sb_bread(), sb_getblk() and
1293 * sb_find_get_block().
1294 *
1295 * The LRUs themselves only need locking against invalidate_bh_lrus. We use
1296 * a local interrupt disable for that.
1297 */
1298
1299 #define BH_LRU_SIZE 16
1300
1301 struct bh_lru {
1302 struct buffer_head *bhs[BH_LRU_SIZE];
1303 };
1304
1305 static DEFINE_PER_CPU(struct bh_lru, bh_lrus) = {{ NULL }};
1306
1307 #ifdef CONFIG_SMP
1308 #define bh_lru_lock() local_irq_disable()
1309 #define bh_lru_unlock() local_irq_enable()
1310 #else
1311 #define bh_lru_lock() preempt_disable()
1312 #define bh_lru_unlock() preempt_enable()
1313 #endif
1314
check_irqs_on(void)1315 static inline void check_irqs_on(void)
1316 {
1317 #ifdef irqs_disabled
1318 BUG_ON(irqs_disabled());
1319 #endif
1320 }
1321
1322 /*
1323 * Install a buffer_head into this cpu's LRU. If not already in the LRU, it is
1324 * inserted at the front, and the buffer_head at the back if any is evicted.
1325 * Or, if already in the LRU it is moved to the front.
1326 */
bh_lru_install(struct buffer_head * bh)1327 static void bh_lru_install(struct buffer_head *bh)
1328 {
1329 struct buffer_head *evictee = bh;
1330 struct bh_lru *b;
1331 int i;
1332
1333 check_irqs_on();
1334 bh_lru_lock();
1335
1336 /*
1337 * the refcount of buffer_head in bh_lru prevents dropping the
1338 * attached page(i.e., try_to_free_buffers) so it could cause
1339 * failing page migration.
1340 * Skip putting upcoming bh into bh_lru until migration is done.
1341 */
1342 if (lru_cache_disabled() || cpu_is_isolated(smp_processor_id())) {
1343 bh_lru_unlock();
1344 return;
1345 }
1346
1347 b = this_cpu_ptr(&bh_lrus);
1348 for (i = 0; i < BH_LRU_SIZE; i++) {
1349 swap(evictee, b->bhs[i]);
1350 if (evictee == bh) {
1351 bh_lru_unlock();
1352 return;
1353 }
1354 }
1355
1356 get_bh(bh);
1357 bh_lru_unlock();
1358 brelse(evictee);
1359 }
1360
1361 /*
1362 * Look up the bh in this cpu's LRU. If it's there, move it to the head.
1363 */
1364 static struct buffer_head *
lookup_bh_lru(struct block_device * bdev,sector_t block,unsigned size)1365 lookup_bh_lru(struct block_device *bdev, sector_t block, unsigned size)
1366 {
1367 struct buffer_head *ret = NULL;
1368 unsigned int i;
1369
1370 check_irqs_on();
1371 bh_lru_lock();
1372 if (cpu_is_isolated(smp_processor_id())) {
1373 bh_lru_unlock();
1374 return NULL;
1375 }
1376 for (i = 0; i < BH_LRU_SIZE; i++) {
1377 struct buffer_head *bh = __this_cpu_read(bh_lrus.bhs[i]);
1378
1379 if (bh && bh->b_blocknr == block && bh->b_bdev == bdev &&
1380 bh->b_size == size) {
1381 if (i) {
1382 while (i) {
1383 __this_cpu_write(bh_lrus.bhs[i],
1384 __this_cpu_read(bh_lrus.bhs[i - 1]));
1385 i--;
1386 }
1387 __this_cpu_write(bh_lrus.bhs[0], bh);
1388 }
1389 get_bh(bh);
1390 ret = bh;
1391 break;
1392 }
1393 }
1394 bh_lru_unlock();
1395 return ret;
1396 }
1397
1398 /*
1399 * Perform a pagecache lookup for the matching buffer. If it's there, refresh
1400 * it in the LRU and mark it as accessed. If it is not present then return
1401 * NULL
1402 */
1403 struct buffer_head *
__find_get_block(struct block_device * bdev,sector_t block,unsigned size)1404 __find_get_block(struct block_device *bdev, sector_t block, unsigned size)
1405 {
1406 struct buffer_head *bh = lookup_bh_lru(bdev, block, size);
1407
1408 if (bh == NULL) {
1409 /* __find_get_block_slow will mark the page accessed */
1410 bh = __find_get_block_slow(bdev, block);
1411 if (bh)
1412 bh_lru_install(bh);
1413 } else
1414 touch_buffer(bh);
1415
1416 return bh;
1417 }
1418 EXPORT_SYMBOL(__find_get_block);
1419
1420 /**
1421 * bdev_getblk - Get a buffer_head in a block device's buffer cache.
1422 * @bdev: The block device.
1423 * @block: The block number.
1424 * @size: The size of buffer_heads for this @bdev.
1425 * @gfp: The memory allocation flags to use.
1426 *
1427 * The returned buffer head has its reference count incremented, but is
1428 * not locked. The caller should call brelse() when it has finished
1429 * with the buffer. The buffer may not be uptodate. If needed, the
1430 * caller can bring it uptodate either by reading it or overwriting it.
1431 *
1432 * Return: The buffer head, or NULL if memory could not be allocated.
1433 */
bdev_getblk(struct block_device * bdev,sector_t block,unsigned size,gfp_t gfp)1434 struct buffer_head *bdev_getblk(struct block_device *bdev, sector_t block,
1435 unsigned size, gfp_t gfp)
1436 {
1437 struct buffer_head *bh = __find_get_block(bdev, block, size);
1438
1439 might_alloc(gfp);
1440 if (bh)
1441 return bh;
1442
1443 return __getblk_slow(bdev, block, size, gfp);
1444 }
1445 EXPORT_SYMBOL(bdev_getblk);
1446
1447 /*
1448 * Do async read-ahead on a buffer..
1449 */
__breadahead(struct block_device * bdev,sector_t block,unsigned size)1450 void __breadahead(struct block_device *bdev, sector_t block, unsigned size)
1451 {
1452 struct buffer_head *bh = bdev_getblk(bdev, block, size,
1453 GFP_NOWAIT | __GFP_MOVABLE);
1454
1455 if (likely(bh)) {
1456 bh_readahead(bh, REQ_RAHEAD);
1457 brelse(bh);
1458 }
1459 }
1460 EXPORT_SYMBOL(__breadahead);
1461
1462 /**
1463 * __bread_gfp() - Read a block.
1464 * @bdev: The block device to read from.
1465 * @block: Block number in units of block size.
1466 * @size: The block size of this device in bytes.
1467 * @gfp: Not page allocation flags; see below.
1468 *
1469 * You are not expected to call this function. You should use one of
1470 * sb_bread(), sb_bread_unmovable() or __bread().
1471 *
1472 * Read a specified block, and return the buffer head that refers to it.
1473 * If @gfp is 0, the memory will be allocated using the block device's
1474 * default GFP flags. If @gfp is __GFP_MOVABLE, the memory may be
1475 * allocated from a movable area. Do not pass in a complete set of
1476 * GFP flags.
1477 *
1478 * The returned buffer head has its refcount increased. The caller should
1479 * call brelse() when it has finished with the buffer.
1480 *
1481 * Context: May sleep waiting for I/O.
1482 * Return: NULL if the block was unreadable.
1483 */
__bread_gfp(struct block_device * bdev,sector_t block,unsigned size,gfp_t gfp)1484 struct buffer_head *__bread_gfp(struct block_device *bdev, sector_t block,
1485 unsigned size, gfp_t gfp)
1486 {
1487 struct buffer_head *bh;
1488
1489 gfp |= mapping_gfp_constraint(bdev->bd_mapping, ~__GFP_FS);
1490
1491 /*
1492 * Prefer looping in the allocator rather than here, at least that
1493 * code knows what it's doing.
1494 */
1495 gfp |= __GFP_NOFAIL;
1496
1497 bh = bdev_getblk(bdev, block, size, gfp);
1498
1499 if (likely(bh) && !buffer_uptodate(bh))
1500 bh = __bread_slow(bh);
1501 return bh;
1502 }
1503 EXPORT_SYMBOL(__bread_gfp);
1504
__invalidate_bh_lrus(struct bh_lru * b)1505 static void __invalidate_bh_lrus(struct bh_lru *b)
1506 {
1507 int i;
1508
1509 for (i = 0; i < BH_LRU_SIZE; i++) {
1510 brelse(b->bhs[i]);
1511 b->bhs[i] = NULL;
1512 }
1513 }
1514 /*
1515 * invalidate_bh_lrus() is called rarely - but not only at unmount.
1516 * This doesn't race because it runs in each cpu either in irq
1517 * or with preempt disabled.
1518 */
invalidate_bh_lru(void * arg)1519 static void invalidate_bh_lru(void *arg)
1520 {
1521 struct bh_lru *b = &get_cpu_var(bh_lrus);
1522
1523 __invalidate_bh_lrus(b);
1524 put_cpu_var(bh_lrus);
1525 }
1526
has_bh_in_lru(int cpu,void * dummy)1527 bool has_bh_in_lru(int cpu, void *dummy)
1528 {
1529 struct bh_lru *b = per_cpu_ptr(&bh_lrus, cpu);
1530 int i;
1531
1532 for (i = 0; i < BH_LRU_SIZE; i++) {
1533 if (b->bhs[i])
1534 return true;
1535 }
1536
1537 return false;
1538 }
1539
invalidate_bh_lrus(void)1540 void invalidate_bh_lrus(void)
1541 {
1542 on_each_cpu_cond(has_bh_in_lru, invalidate_bh_lru, NULL, 1);
1543 }
1544 EXPORT_SYMBOL_GPL(invalidate_bh_lrus);
1545
1546 /*
1547 * It's called from workqueue context so we need a bh_lru_lock to close
1548 * the race with preemption/irq.
1549 */
invalidate_bh_lrus_cpu(void)1550 void invalidate_bh_lrus_cpu(void)
1551 {
1552 struct bh_lru *b;
1553
1554 bh_lru_lock();
1555 b = this_cpu_ptr(&bh_lrus);
1556 __invalidate_bh_lrus(b);
1557 bh_lru_unlock();
1558 }
1559
folio_set_bh(struct buffer_head * bh,struct folio * folio,unsigned long offset)1560 void folio_set_bh(struct buffer_head *bh, struct folio *folio,
1561 unsigned long offset)
1562 {
1563 bh->b_folio = folio;
1564 BUG_ON(offset >= folio_size(folio));
1565 if (folio_test_highmem(folio))
1566 /*
1567 * This catches illegal uses and preserves the offset:
1568 */
1569 bh->b_data = (char *)(0 + offset);
1570 else
1571 bh->b_data = folio_address(folio) + offset;
1572 }
1573 EXPORT_SYMBOL(folio_set_bh);
1574
1575 /*
1576 * Called when truncating a buffer on a page completely.
1577 */
1578
1579 /* Bits that are cleared during an invalidate */
1580 #define BUFFER_FLAGS_DISCARD \
1581 (1 << BH_Mapped | 1 << BH_New | 1 << BH_Req | \
1582 1 << BH_Delay | 1 << BH_Unwritten)
1583
discard_buffer(struct buffer_head * bh)1584 static void discard_buffer(struct buffer_head * bh)
1585 {
1586 unsigned long b_state;
1587
1588 lock_buffer(bh);
1589 clear_buffer_dirty(bh);
1590 bh->b_bdev = NULL;
1591 b_state = READ_ONCE(bh->b_state);
1592 do {
1593 } while (!try_cmpxchg(&bh->b_state, &b_state,
1594 b_state & ~BUFFER_FLAGS_DISCARD));
1595 unlock_buffer(bh);
1596 }
1597
1598 /**
1599 * block_invalidate_folio - Invalidate part or all of a buffer-backed folio.
1600 * @folio: The folio which is affected.
1601 * @offset: start of the range to invalidate
1602 * @length: length of the range to invalidate
1603 *
1604 * block_invalidate_folio() is called when all or part of the folio has been
1605 * invalidated by a truncate operation.
1606 *
1607 * block_invalidate_folio() does not have to release all buffers, but it must
1608 * ensure that no dirty buffer is left outside @offset and that no I/O
1609 * is underway against any of the blocks which are outside the truncation
1610 * point. Because the caller is about to free (and possibly reuse) those
1611 * blocks on-disk.
1612 */
block_invalidate_folio(struct folio * folio,size_t offset,size_t length)1613 void block_invalidate_folio(struct folio *folio, size_t offset, size_t length)
1614 {
1615 struct buffer_head *head, *bh, *next;
1616 size_t curr_off = 0;
1617 size_t stop = length + offset;
1618
1619 BUG_ON(!folio_test_locked(folio));
1620
1621 /*
1622 * Check for overflow
1623 */
1624 BUG_ON(stop > folio_size(folio) || stop < length);
1625
1626 head = folio_buffers(folio);
1627 if (!head)
1628 return;
1629
1630 bh = head;
1631 do {
1632 size_t next_off = curr_off + bh->b_size;
1633 next = bh->b_this_page;
1634
1635 /*
1636 * Are we still fully in range ?
1637 */
1638 if (next_off > stop)
1639 goto out;
1640
1641 /*
1642 * is this block fully invalidated?
1643 */
1644 if (offset <= curr_off)
1645 discard_buffer(bh);
1646 curr_off = next_off;
1647 bh = next;
1648 } while (bh != head);
1649
1650 /*
1651 * We release buffers only if the entire folio is being invalidated.
1652 * The get_block cached value has been unconditionally invalidated,
1653 * so real IO is not possible anymore.
1654 */
1655 if (length == folio_size(folio))
1656 filemap_release_folio(folio, 0);
1657 out:
1658 return;
1659 }
1660 EXPORT_SYMBOL(block_invalidate_folio);
1661
1662 /*
1663 * We attach and possibly dirty the buffers atomically wrt
1664 * block_dirty_folio() via i_private_lock. try_to_free_buffers
1665 * is already excluded via the folio lock.
1666 */
create_empty_buffers(struct folio * folio,unsigned long blocksize,unsigned long b_state)1667 struct buffer_head *create_empty_buffers(struct folio *folio,
1668 unsigned long blocksize, unsigned long b_state)
1669 {
1670 struct buffer_head *bh, *head, *tail;
1671 gfp_t gfp = GFP_NOFS | __GFP_ACCOUNT | __GFP_NOFAIL;
1672
1673 head = folio_alloc_buffers(folio, blocksize, gfp);
1674 bh = head;
1675 do {
1676 bh->b_state |= b_state;
1677 tail = bh;
1678 bh = bh->b_this_page;
1679 } while (bh);
1680 tail->b_this_page = head;
1681
1682 spin_lock(&folio->mapping->i_private_lock);
1683 if (folio_test_uptodate(folio) || folio_test_dirty(folio)) {
1684 bh = head;
1685 do {
1686 if (folio_test_dirty(folio))
1687 set_buffer_dirty(bh);
1688 if (folio_test_uptodate(folio))
1689 set_buffer_uptodate(bh);
1690 bh = bh->b_this_page;
1691 } while (bh != head);
1692 }
1693 folio_attach_private(folio, head);
1694 spin_unlock(&folio->mapping->i_private_lock);
1695
1696 return head;
1697 }
1698 EXPORT_SYMBOL(create_empty_buffers);
1699
1700 /**
1701 * clean_bdev_aliases: clean a range of buffers in block device
1702 * @bdev: Block device to clean buffers in
1703 * @block: Start of a range of blocks to clean
1704 * @len: Number of blocks to clean
1705 *
1706 * We are taking a range of blocks for data and we don't want writeback of any
1707 * buffer-cache aliases starting from return from this function and until the
1708 * moment when something will explicitly mark the buffer dirty (hopefully that
1709 * will not happen until we will free that block ;-) We don't even need to mark
1710 * it not-uptodate - nobody can expect anything from a newly allocated buffer
1711 * anyway. We used to use unmap_buffer() for such invalidation, but that was
1712 * wrong. We definitely don't want to mark the alias unmapped, for example - it
1713 * would confuse anyone who might pick it with bread() afterwards...
1714 *
1715 * Also.. Note that bforget() doesn't lock the buffer. So there can be
1716 * writeout I/O going on against recently-freed buffers. We don't wait on that
1717 * I/O in bforget() - it's more efficient to wait on the I/O only if we really
1718 * need to. That happens here.
1719 */
clean_bdev_aliases(struct block_device * bdev,sector_t block,sector_t len)1720 void clean_bdev_aliases(struct block_device *bdev, sector_t block, sector_t len)
1721 {
1722 struct address_space *bd_mapping = bdev->bd_mapping;
1723 const int blkbits = bd_mapping->host->i_blkbits;
1724 struct folio_batch fbatch;
1725 pgoff_t index = ((loff_t)block << blkbits) / PAGE_SIZE;
1726 pgoff_t end;
1727 int i, count;
1728 struct buffer_head *bh;
1729 struct buffer_head *head;
1730
1731 end = ((loff_t)(block + len - 1) << blkbits) / PAGE_SIZE;
1732 folio_batch_init(&fbatch);
1733 while (filemap_get_folios(bd_mapping, &index, end, &fbatch)) {
1734 count = folio_batch_count(&fbatch);
1735 for (i = 0; i < count; i++) {
1736 struct folio *folio = fbatch.folios[i];
1737
1738 if (!folio_buffers(folio))
1739 continue;
1740 /*
1741 * We use folio lock instead of bd_mapping->i_private_lock
1742 * to pin buffers here since we can afford to sleep and
1743 * it scales better than a global spinlock lock.
1744 */
1745 folio_lock(folio);
1746 /* Recheck when the folio is locked which pins bhs */
1747 head = folio_buffers(folio);
1748 if (!head)
1749 goto unlock_page;
1750 bh = head;
1751 do {
1752 if (!buffer_mapped(bh) || (bh->b_blocknr < block))
1753 goto next;
1754 if (bh->b_blocknr >= block + len)
1755 break;
1756 clear_buffer_dirty(bh);
1757 wait_on_buffer(bh);
1758 clear_buffer_req(bh);
1759 next:
1760 bh = bh->b_this_page;
1761 } while (bh != head);
1762 unlock_page:
1763 folio_unlock(folio);
1764 }
1765 folio_batch_release(&fbatch);
1766 cond_resched();
1767 /* End of range already reached? */
1768 if (index > end || !index)
1769 break;
1770 }
1771 }
1772 EXPORT_SYMBOL(clean_bdev_aliases);
1773
folio_create_buffers(struct folio * folio,struct inode * inode,unsigned int b_state)1774 static struct buffer_head *folio_create_buffers(struct folio *folio,
1775 struct inode *inode,
1776 unsigned int b_state)
1777 {
1778 struct buffer_head *bh;
1779
1780 BUG_ON(!folio_test_locked(folio));
1781
1782 bh = folio_buffers(folio);
1783 if (!bh)
1784 bh = create_empty_buffers(folio,
1785 1 << READ_ONCE(inode->i_blkbits), b_state);
1786 return bh;
1787 }
1788
1789 /*
1790 * NOTE! All mapped/uptodate combinations are valid:
1791 *
1792 * Mapped Uptodate Meaning
1793 *
1794 * No No "unknown" - must do get_block()
1795 * No Yes "hole" - zero-filled
1796 * Yes No "allocated" - allocated on disk, not read in
1797 * Yes Yes "valid" - allocated and up-to-date in memory.
1798 *
1799 * "Dirty" is valid only with the last case (mapped+uptodate).
1800 */
1801
1802 /*
1803 * While block_write_full_folio is writing back the dirty buffers under
1804 * the page lock, whoever dirtied the buffers may decide to clean them
1805 * again at any time. We handle that by only looking at the buffer
1806 * state inside lock_buffer().
1807 *
1808 * If block_write_full_folio() is called for regular writeback
1809 * (wbc->sync_mode == WB_SYNC_NONE) then it will redirty a page which has a
1810 * locked buffer. This only can happen if someone has written the buffer
1811 * directly, with submit_bh(). At the address_space level PageWriteback
1812 * prevents this contention from occurring.
1813 *
1814 * If block_write_full_folio() is called with wbc->sync_mode ==
1815 * WB_SYNC_ALL, the writes are posted using REQ_SYNC; this
1816 * causes the writes to be flagged as synchronous writes.
1817 */
__block_write_full_folio(struct inode * inode,struct folio * folio,get_block_t * get_block,struct writeback_control * wbc)1818 int __block_write_full_folio(struct inode *inode, struct folio *folio,
1819 get_block_t *get_block, struct writeback_control *wbc)
1820 {
1821 int err;
1822 sector_t block;
1823 sector_t last_block;
1824 struct buffer_head *bh, *head;
1825 size_t blocksize;
1826 int nr_underway = 0;
1827 blk_opf_t write_flags = wbc_to_write_flags(wbc);
1828
1829 head = folio_create_buffers(folio, inode,
1830 (1 << BH_Dirty) | (1 << BH_Uptodate));
1831
1832 /*
1833 * Be very careful. We have no exclusion from block_dirty_folio
1834 * here, and the (potentially unmapped) buffers may become dirty at
1835 * any time. If a buffer becomes dirty here after we've inspected it
1836 * then we just miss that fact, and the folio stays dirty.
1837 *
1838 * Buffers outside i_size may be dirtied by block_dirty_folio;
1839 * handle that here by just cleaning them.
1840 */
1841
1842 bh = head;
1843 blocksize = bh->b_size;
1844
1845 block = div_u64(folio_pos(folio), blocksize);
1846 last_block = div_u64(i_size_read(inode) - 1, blocksize);
1847
1848 /*
1849 * Get all the dirty buffers mapped to disk addresses and
1850 * handle any aliases from the underlying blockdev's mapping.
1851 */
1852 do {
1853 if (block > last_block) {
1854 /*
1855 * mapped buffers outside i_size will occur, because
1856 * this folio can be outside i_size when there is a
1857 * truncate in progress.
1858 */
1859 /*
1860 * The buffer was zeroed by block_write_full_folio()
1861 */
1862 clear_buffer_dirty(bh);
1863 set_buffer_uptodate(bh);
1864 } else if ((!buffer_mapped(bh) || buffer_delay(bh)) &&
1865 buffer_dirty(bh)) {
1866 WARN_ON(bh->b_size != blocksize);
1867 err = get_block(inode, block, bh, 1);
1868 if (err)
1869 goto recover;
1870 clear_buffer_delay(bh);
1871 if (buffer_new(bh)) {
1872 /* blockdev mappings never come here */
1873 clear_buffer_new(bh);
1874 clean_bdev_bh_alias(bh);
1875 }
1876 }
1877 bh = bh->b_this_page;
1878 block++;
1879 } while (bh != head);
1880
1881 do {
1882 if (!buffer_mapped(bh))
1883 continue;
1884 /*
1885 * If it's a fully non-blocking write attempt and we cannot
1886 * lock the buffer then redirty the folio. Note that this can
1887 * potentially cause a busy-wait loop from writeback threads
1888 * and kswapd activity, but those code paths have their own
1889 * higher-level throttling.
1890 */
1891 if (wbc->sync_mode != WB_SYNC_NONE) {
1892 lock_buffer(bh);
1893 } else if (!trylock_buffer(bh)) {
1894 folio_redirty_for_writepage(wbc, folio);
1895 continue;
1896 }
1897 if (test_clear_buffer_dirty(bh)) {
1898 mark_buffer_async_write_endio(bh,
1899 end_buffer_async_write);
1900 } else {
1901 unlock_buffer(bh);
1902 }
1903 } while ((bh = bh->b_this_page) != head);
1904
1905 /*
1906 * The folio and its buffers are protected by the writeback flag,
1907 * so we can drop the bh refcounts early.
1908 */
1909 BUG_ON(folio_test_writeback(folio));
1910 folio_start_writeback(folio);
1911
1912 do {
1913 struct buffer_head *next = bh->b_this_page;
1914 if (buffer_async_write(bh)) {
1915 submit_bh_wbc(REQ_OP_WRITE | write_flags, bh,
1916 inode->i_write_hint, wbc);
1917 nr_underway++;
1918 }
1919 bh = next;
1920 } while (bh != head);
1921 folio_unlock(folio);
1922
1923 err = 0;
1924 done:
1925 if (nr_underway == 0) {
1926 /*
1927 * The folio was marked dirty, but the buffers were
1928 * clean. Someone wrote them back by hand with
1929 * write_dirty_buffer/submit_bh. A rare case.
1930 */
1931 folio_end_writeback(folio);
1932
1933 /*
1934 * The folio and buffer_heads can be released at any time from
1935 * here on.
1936 */
1937 }
1938 return err;
1939
1940 recover:
1941 /*
1942 * ENOSPC, or some other error. We may already have added some
1943 * blocks to the file, so we need to write these out to avoid
1944 * exposing stale data.
1945 * The folio is currently locked and not marked for writeback
1946 */
1947 bh = head;
1948 /* Recovery: lock and submit the mapped buffers */
1949 do {
1950 if (buffer_mapped(bh) && buffer_dirty(bh) &&
1951 !buffer_delay(bh)) {
1952 lock_buffer(bh);
1953 mark_buffer_async_write_endio(bh,
1954 end_buffer_async_write);
1955 } else {
1956 /*
1957 * The buffer may have been set dirty during
1958 * attachment to a dirty folio.
1959 */
1960 clear_buffer_dirty(bh);
1961 }
1962 } while ((bh = bh->b_this_page) != head);
1963 folio_set_error(folio);
1964 BUG_ON(folio_test_writeback(folio));
1965 mapping_set_error(folio->mapping, err);
1966 folio_start_writeback(folio);
1967 do {
1968 struct buffer_head *next = bh->b_this_page;
1969 if (buffer_async_write(bh)) {
1970 clear_buffer_dirty(bh);
1971 submit_bh_wbc(REQ_OP_WRITE | write_flags, bh,
1972 inode->i_write_hint, wbc);
1973 nr_underway++;
1974 }
1975 bh = next;
1976 } while (bh != head);
1977 folio_unlock(folio);
1978 goto done;
1979 }
1980 EXPORT_SYMBOL(__block_write_full_folio);
1981
1982 /*
1983 * If a folio has any new buffers, zero them out here, and mark them uptodate
1984 * and dirty so they'll be written out (in order to prevent uninitialised
1985 * block data from leaking). And clear the new bit.
1986 */
folio_zero_new_buffers(struct folio * folio,size_t from,size_t to)1987 void folio_zero_new_buffers(struct folio *folio, size_t from, size_t to)
1988 {
1989 size_t block_start, block_end;
1990 struct buffer_head *head, *bh;
1991
1992 BUG_ON(!folio_test_locked(folio));
1993 head = folio_buffers(folio);
1994 if (!head)
1995 return;
1996
1997 bh = head;
1998 block_start = 0;
1999 do {
2000 block_end = block_start + bh->b_size;
2001
2002 if (buffer_new(bh)) {
2003 if (block_end > from && block_start < to) {
2004 if (!folio_test_uptodate(folio)) {
2005 size_t start, xend;
2006
2007 start = max(from, block_start);
2008 xend = min(to, block_end);
2009
2010 folio_zero_segment(folio, start, xend);
2011 set_buffer_uptodate(bh);
2012 }
2013
2014 clear_buffer_new(bh);
2015 mark_buffer_dirty(bh);
2016 }
2017 }
2018
2019 block_start = block_end;
2020 bh = bh->b_this_page;
2021 } while (bh != head);
2022 }
2023 EXPORT_SYMBOL(folio_zero_new_buffers);
2024
2025 static int
iomap_to_bh(struct inode * inode,sector_t block,struct buffer_head * bh,const struct iomap * iomap)2026 iomap_to_bh(struct inode *inode, sector_t block, struct buffer_head *bh,
2027 const struct iomap *iomap)
2028 {
2029 loff_t offset = (loff_t)block << inode->i_blkbits;
2030
2031 bh->b_bdev = iomap->bdev;
2032
2033 /*
2034 * Block points to offset in file we need to map, iomap contains
2035 * the offset at which the map starts. If the map ends before the
2036 * current block, then do not map the buffer and let the caller
2037 * handle it.
2038 */
2039 if (offset >= iomap->offset + iomap->length)
2040 return -EIO;
2041
2042 switch (iomap->type) {
2043 case IOMAP_HOLE:
2044 /*
2045 * If the buffer is not up to date or beyond the current EOF,
2046 * we need to mark it as new to ensure sub-block zeroing is
2047 * executed if necessary.
2048 */
2049 if (!buffer_uptodate(bh) ||
2050 (offset >= i_size_read(inode)))
2051 set_buffer_new(bh);
2052 return 0;
2053 case IOMAP_DELALLOC:
2054 if (!buffer_uptodate(bh) ||
2055 (offset >= i_size_read(inode)))
2056 set_buffer_new(bh);
2057 set_buffer_uptodate(bh);
2058 set_buffer_mapped(bh);
2059 set_buffer_delay(bh);
2060 return 0;
2061 case IOMAP_UNWRITTEN:
2062 /*
2063 * For unwritten regions, we always need to ensure that regions
2064 * in the block we are not writing to are zeroed. Mark the
2065 * buffer as new to ensure this.
2066 */
2067 set_buffer_new(bh);
2068 set_buffer_unwritten(bh);
2069 fallthrough;
2070 case IOMAP_MAPPED:
2071 if ((iomap->flags & IOMAP_F_NEW) ||
2072 offset >= i_size_read(inode)) {
2073 /*
2074 * This can happen if truncating the block device races
2075 * with the check in the caller as i_size updates on
2076 * block devices aren't synchronized by i_rwsem for
2077 * block devices.
2078 */
2079 if (S_ISBLK(inode->i_mode))
2080 return -EIO;
2081 set_buffer_new(bh);
2082 }
2083 bh->b_blocknr = (iomap->addr + offset - iomap->offset) >>
2084 inode->i_blkbits;
2085 set_buffer_mapped(bh);
2086 return 0;
2087 default:
2088 WARN_ON_ONCE(1);
2089 return -EIO;
2090 }
2091 }
2092
__block_write_begin_int(struct folio * folio,loff_t pos,unsigned len,get_block_t * get_block,const struct iomap * iomap)2093 int __block_write_begin_int(struct folio *folio, loff_t pos, unsigned len,
2094 get_block_t *get_block, const struct iomap *iomap)
2095 {
2096 size_t from = offset_in_folio(folio, pos);
2097 size_t to = from + len;
2098 struct inode *inode = folio->mapping->host;
2099 size_t block_start, block_end;
2100 sector_t block;
2101 int err = 0;
2102 size_t blocksize;
2103 struct buffer_head *bh, *head, *wait[2], **wait_bh=wait;
2104
2105 BUG_ON(!folio_test_locked(folio));
2106 BUG_ON(to > folio_size(folio));
2107 BUG_ON(from > to);
2108
2109 head = folio_create_buffers(folio, inode, 0);
2110 blocksize = head->b_size;
2111 block = div_u64(folio_pos(folio), blocksize);
2112
2113 for (bh = head, block_start = 0; bh != head || !block_start;
2114 block++, block_start=block_end, bh = bh->b_this_page) {
2115 block_end = block_start + blocksize;
2116 if (block_end <= from || block_start >= to) {
2117 if (folio_test_uptodate(folio)) {
2118 if (!buffer_uptodate(bh))
2119 set_buffer_uptodate(bh);
2120 }
2121 continue;
2122 }
2123 if (buffer_new(bh))
2124 clear_buffer_new(bh);
2125 if (!buffer_mapped(bh)) {
2126 WARN_ON(bh->b_size != blocksize);
2127 if (get_block)
2128 err = get_block(inode, block, bh, 1);
2129 else
2130 err = iomap_to_bh(inode, block, bh, iomap);
2131 if (err)
2132 break;
2133
2134 if (buffer_new(bh)) {
2135 clean_bdev_bh_alias(bh);
2136 if (folio_test_uptodate(folio)) {
2137 clear_buffer_new(bh);
2138 set_buffer_uptodate(bh);
2139 mark_buffer_dirty(bh);
2140 continue;
2141 }
2142 if (block_end > to || block_start < from)
2143 folio_zero_segments(folio,
2144 to, block_end,
2145 block_start, from);
2146 continue;
2147 }
2148 }
2149 if (folio_test_uptodate(folio)) {
2150 if (!buffer_uptodate(bh))
2151 set_buffer_uptodate(bh);
2152 continue;
2153 }
2154 if (!buffer_uptodate(bh) && !buffer_delay(bh) &&
2155 !buffer_unwritten(bh) &&
2156 (block_start < from || block_end > to)) {
2157 bh_read_nowait(bh, 0);
2158 *wait_bh++=bh;
2159 }
2160 }
2161 /*
2162 * If we issued read requests - let them complete.
2163 */
2164 while(wait_bh > wait) {
2165 wait_on_buffer(*--wait_bh);
2166 if (!buffer_uptodate(*wait_bh))
2167 err = -EIO;
2168 }
2169 if (unlikely(err))
2170 folio_zero_new_buffers(folio, from, to);
2171 return err;
2172 }
2173
__block_write_begin(struct page * page,loff_t pos,unsigned len,get_block_t * get_block)2174 int __block_write_begin(struct page *page, loff_t pos, unsigned len,
2175 get_block_t *get_block)
2176 {
2177 return __block_write_begin_int(page_folio(page), pos, len, get_block,
2178 NULL);
2179 }
2180 EXPORT_SYMBOL(__block_write_begin);
2181
__block_commit_write(struct folio * folio,size_t from,size_t to)2182 static void __block_commit_write(struct folio *folio, size_t from, size_t to)
2183 {
2184 size_t block_start, block_end;
2185 bool partial = false;
2186 unsigned blocksize;
2187 struct buffer_head *bh, *head;
2188
2189 bh = head = folio_buffers(folio);
2190 blocksize = bh->b_size;
2191
2192 block_start = 0;
2193 do {
2194 block_end = block_start + blocksize;
2195 if (block_end <= from || block_start >= to) {
2196 if (!buffer_uptodate(bh))
2197 partial = true;
2198 } else {
2199 set_buffer_uptodate(bh);
2200 mark_buffer_dirty(bh);
2201 }
2202 if (buffer_new(bh))
2203 clear_buffer_new(bh);
2204
2205 block_start = block_end;
2206 bh = bh->b_this_page;
2207 } while (bh != head);
2208
2209 /*
2210 * If this is a partial write which happened to make all buffers
2211 * uptodate then we can optimize away a bogus read_folio() for
2212 * the next read(). Here we 'discover' whether the folio went
2213 * uptodate as a result of this (potentially partial) write.
2214 */
2215 if (!partial)
2216 folio_mark_uptodate(folio);
2217 }
2218
2219 /*
2220 * block_write_begin takes care of the basic task of block allocation and
2221 * bringing partial write blocks uptodate first.
2222 *
2223 * The filesystem needs to handle block truncation upon failure.
2224 */
block_write_begin(struct address_space * mapping,loff_t pos,unsigned len,struct page ** pagep,get_block_t * get_block)2225 int block_write_begin(struct address_space *mapping, loff_t pos, unsigned len,
2226 struct page **pagep, get_block_t *get_block)
2227 {
2228 pgoff_t index = pos >> PAGE_SHIFT;
2229 struct page *page;
2230 int status;
2231
2232 page = grab_cache_page_write_begin(mapping, index);
2233 if (!page)
2234 return -ENOMEM;
2235
2236 status = __block_write_begin(page, pos, len, get_block);
2237 if (unlikely(status)) {
2238 unlock_page(page);
2239 put_page(page);
2240 page = NULL;
2241 }
2242
2243 *pagep = page;
2244 return status;
2245 }
2246 EXPORT_SYMBOL(block_write_begin);
2247
block_write_end(struct file * file,struct address_space * mapping,loff_t pos,unsigned len,unsigned copied,struct page * page,void * fsdata)2248 int block_write_end(struct file *file, struct address_space *mapping,
2249 loff_t pos, unsigned len, unsigned copied,
2250 struct page *page, void *fsdata)
2251 {
2252 struct folio *folio = page_folio(page);
2253 size_t start = pos - folio_pos(folio);
2254
2255 if (unlikely(copied < len)) {
2256 /*
2257 * The buffers that were written will now be uptodate, so
2258 * we don't have to worry about a read_folio reading them
2259 * and overwriting a partial write. However if we have
2260 * encountered a short write and only partially written
2261 * into a buffer, it will not be marked uptodate, so a
2262 * read_folio might come in and destroy our partial write.
2263 *
2264 * Do the simplest thing, and just treat any short write to a
2265 * non uptodate folio as a zero-length write, and force the
2266 * caller to redo the whole thing.
2267 */
2268 if (!folio_test_uptodate(folio))
2269 copied = 0;
2270
2271 folio_zero_new_buffers(folio, start+copied, start+len);
2272 }
2273 flush_dcache_folio(folio);
2274
2275 /* This could be a short (even 0-length) commit */
2276 __block_commit_write(folio, start, start + copied);
2277
2278 return copied;
2279 }
2280 EXPORT_SYMBOL(block_write_end);
2281
generic_write_end(struct file * file,struct address_space * mapping,loff_t pos,unsigned len,unsigned copied,struct page * page,void * fsdata)2282 int generic_write_end(struct file *file, struct address_space *mapping,
2283 loff_t pos, unsigned len, unsigned copied,
2284 struct page *page, void *fsdata)
2285 {
2286 struct inode *inode = mapping->host;
2287 loff_t old_size = inode->i_size;
2288 bool i_size_changed = false;
2289
2290 copied = block_write_end(file, mapping, pos, len, copied, page, fsdata);
2291
2292 /*
2293 * No need to use i_size_read() here, the i_size cannot change under us
2294 * because we hold i_rwsem.
2295 *
2296 * But it's important to update i_size while still holding page lock:
2297 * page writeout could otherwise come in and zero beyond i_size.
2298 */
2299 if (pos + copied > inode->i_size) {
2300 i_size_write(inode, pos + copied);
2301 i_size_changed = true;
2302 }
2303
2304 unlock_page(page);
2305 put_page(page);
2306
2307 if (old_size < pos)
2308 pagecache_isize_extended(inode, old_size, pos);
2309 /*
2310 * Don't mark the inode dirty under page lock. First, it unnecessarily
2311 * makes the holding time of page lock longer. Second, it forces lock
2312 * ordering of page lock and transaction start for journaling
2313 * filesystems.
2314 */
2315 if (i_size_changed)
2316 mark_inode_dirty(inode);
2317 return copied;
2318 }
2319 EXPORT_SYMBOL(generic_write_end);
2320
2321 /*
2322 * block_is_partially_uptodate checks whether buffers within a folio are
2323 * uptodate or not.
2324 *
2325 * Returns true if all buffers which correspond to the specified part
2326 * of the folio are uptodate.
2327 */
block_is_partially_uptodate(struct folio * folio,size_t from,size_t count)2328 bool block_is_partially_uptodate(struct folio *folio, size_t from, size_t count)
2329 {
2330 unsigned block_start, block_end, blocksize;
2331 unsigned to;
2332 struct buffer_head *bh, *head;
2333 bool ret = true;
2334
2335 head = folio_buffers(folio);
2336 if (!head)
2337 return false;
2338 blocksize = head->b_size;
2339 to = min_t(unsigned, folio_size(folio) - from, count);
2340 to = from + to;
2341 if (from < blocksize && to > folio_size(folio) - blocksize)
2342 return false;
2343
2344 bh = head;
2345 block_start = 0;
2346 do {
2347 block_end = block_start + blocksize;
2348 if (block_end > from && block_start < to) {
2349 if (!buffer_uptodate(bh)) {
2350 ret = false;
2351 break;
2352 }
2353 if (block_end >= to)
2354 break;
2355 }
2356 block_start = block_end;
2357 bh = bh->b_this_page;
2358 } while (bh != head);
2359
2360 return ret;
2361 }
2362 EXPORT_SYMBOL(block_is_partially_uptodate);
2363
2364 /*
2365 * Generic "read_folio" function for block devices that have the normal
2366 * get_block functionality. This is most of the block device filesystems.
2367 * Reads the folio asynchronously --- the unlock_buffer() and
2368 * set/clear_buffer_uptodate() functions propagate buffer state into the
2369 * folio once IO has completed.
2370 */
block_read_full_folio(struct folio * folio,get_block_t * get_block)2371 int block_read_full_folio(struct folio *folio, get_block_t *get_block)
2372 {
2373 struct inode *inode = folio->mapping->host;
2374 sector_t iblock, lblock;
2375 struct buffer_head *bh, *head, *arr[MAX_BUF_PER_PAGE];
2376 size_t blocksize;
2377 int nr, i;
2378 int fully_mapped = 1;
2379 bool page_error = false;
2380 loff_t limit = i_size_read(inode);
2381
2382 /* This is needed for ext4. */
2383 if (IS_ENABLED(CONFIG_FS_VERITY) && IS_VERITY(inode))
2384 limit = inode->i_sb->s_maxbytes;
2385
2386 VM_BUG_ON_FOLIO(folio_test_large(folio), folio);
2387
2388 head = folio_create_buffers(folio, inode, 0);
2389 blocksize = head->b_size;
2390
2391 iblock = div_u64(folio_pos(folio), blocksize);
2392 lblock = div_u64(limit + blocksize - 1, blocksize);
2393 bh = head;
2394 nr = 0;
2395 i = 0;
2396
2397 do {
2398 if (buffer_uptodate(bh))
2399 continue;
2400
2401 if (!buffer_mapped(bh)) {
2402 int err = 0;
2403
2404 fully_mapped = 0;
2405 if (iblock < lblock) {
2406 WARN_ON(bh->b_size != blocksize);
2407 err = get_block(inode, iblock, bh, 0);
2408 if (err) {
2409 folio_set_error(folio);
2410 page_error = true;
2411 }
2412 }
2413 if (!buffer_mapped(bh)) {
2414 folio_zero_range(folio, i * blocksize,
2415 blocksize);
2416 if (!err)
2417 set_buffer_uptodate(bh);
2418 continue;
2419 }
2420 /*
2421 * get_block() might have updated the buffer
2422 * synchronously
2423 */
2424 if (buffer_uptodate(bh))
2425 continue;
2426 }
2427 arr[nr++] = bh;
2428 } while (i++, iblock++, (bh = bh->b_this_page) != head);
2429
2430 if (fully_mapped)
2431 folio_set_mappedtodisk(folio);
2432
2433 if (!nr) {
2434 /*
2435 * All buffers are uptodate or get_block() returned an
2436 * error when trying to map them - we can finish the read.
2437 */
2438 folio_end_read(folio, !page_error);
2439 return 0;
2440 }
2441
2442 /* Stage two: lock the buffers */
2443 for (i = 0; i < nr; i++) {
2444 bh = arr[i];
2445 lock_buffer(bh);
2446 mark_buffer_async_read(bh);
2447 }
2448
2449 /*
2450 * Stage 3: start the IO. Check for uptodateness
2451 * inside the buffer lock in case another process reading
2452 * the underlying blockdev brought it uptodate (the sct fix).
2453 */
2454 for (i = 0; i < nr; i++) {
2455 bh = arr[i];
2456 if (buffer_uptodate(bh))
2457 end_buffer_async_read(bh, 1);
2458 else
2459 submit_bh(REQ_OP_READ, bh);
2460 }
2461 return 0;
2462 }
2463 EXPORT_SYMBOL(block_read_full_folio);
2464
2465 /* utility function for filesystems that need to do work on expanding
2466 * truncates. Uses filesystem pagecache writes to allow the filesystem to
2467 * deal with the hole.
2468 */
generic_cont_expand_simple(struct inode * inode,loff_t size)2469 int generic_cont_expand_simple(struct inode *inode, loff_t size)
2470 {
2471 struct address_space *mapping = inode->i_mapping;
2472 const struct address_space_operations *aops = mapping->a_ops;
2473 struct page *page;
2474 void *fsdata = NULL;
2475 int err;
2476
2477 err = inode_newsize_ok(inode, size);
2478 if (err)
2479 goto out;
2480
2481 err = aops->write_begin(NULL, mapping, size, 0, &page, &fsdata);
2482 if (err)
2483 goto out;
2484
2485 err = aops->write_end(NULL, mapping, size, 0, 0, page, fsdata);
2486 BUG_ON(err > 0);
2487
2488 out:
2489 return err;
2490 }
2491 EXPORT_SYMBOL(generic_cont_expand_simple);
2492
cont_expand_zero(struct file * file,struct address_space * mapping,loff_t pos,loff_t * bytes)2493 static int cont_expand_zero(struct file *file, struct address_space *mapping,
2494 loff_t pos, loff_t *bytes)
2495 {
2496 struct inode *inode = mapping->host;
2497 const struct address_space_operations *aops = mapping->a_ops;
2498 unsigned int blocksize = i_blocksize(inode);
2499 struct page *page;
2500 void *fsdata = NULL;
2501 pgoff_t index, curidx;
2502 loff_t curpos;
2503 unsigned zerofrom, offset, len;
2504 int err = 0;
2505
2506 index = pos >> PAGE_SHIFT;
2507 offset = pos & ~PAGE_MASK;
2508
2509 while (index > (curidx = (curpos = *bytes)>>PAGE_SHIFT)) {
2510 zerofrom = curpos & ~PAGE_MASK;
2511 if (zerofrom & (blocksize-1)) {
2512 *bytes |= (blocksize-1);
2513 (*bytes)++;
2514 }
2515 len = PAGE_SIZE - zerofrom;
2516
2517 err = aops->write_begin(file, mapping, curpos, len,
2518 &page, &fsdata);
2519 if (err)
2520 goto out;
2521 zero_user(page, zerofrom, len);
2522 err = aops->write_end(file, mapping, curpos, len, len,
2523 page, fsdata);
2524 if (err < 0)
2525 goto out;
2526 BUG_ON(err != len);
2527 err = 0;
2528
2529 balance_dirty_pages_ratelimited(mapping);
2530
2531 if (fatal_signal_pending(current)) {
2532 err = -EINTR;
2533 goto out;
2534 }
2535 }
2536
2537 /* page covers the boundary, find the boundary offset */
2538 if (index == curidx) {
2539 zerofrom = curpos & ~PAGE_MASK;
2540 /* if we will expand the thing last block will be filled */
2541 if (offset <= zerofrom) {
2542 goto out;
2543 }
2544 if (zerofrom & (blocksize-1)) {
2545 *bytes |= (blocksize-1);
2546 (*bytes)++;
2547 }
2548 len = offset - zerofrom;
2549
2550 err = aops->write_begin(file, mapping, curpos, len,
2551 &page, &fsdata);
2552 if (err)
2553 goto out;
2554 zero_user(page, zerofrom, len);
2555 err = aops->write_end(file, mapping, curpos, len, len,
2556 page, fsdata);
2557 if (err < 0)
2558 goto out;
2559 BUG_ON(err != len);
2560 err = 0;
2561 }
2562 out:
2563 return err;
2564 }
2565
2566 /*
2567 * For moronic filesystems that do not allow holes in file.
2568 * We may have to extend the file.
2569 */
cont_write_begin(struct file * file,struct address_space * mapping,loff_t pos,unsigned len,struct page ** pagep,void ** fsdata,get_block_t * get_block,loff_t * bytes)2570 int cont_write_begin(struct file *file, struct address_space *mapping,
2571 loff_t pos, unsigned len,
2572 struct page **pagep, void **fsdata,
2573 get_block_t *get_block, loff_t *bytes)
2574 {
2575 struct inode *inode = mapping->host;
2576 unsigned int blocksize = i_blocksize(inode);
2577 unsigned int zerofrom;
2578 int err;
2579
2580 err = cont_expand_zero(file, mapping, pos, bytes);
2581 if (err)
2582 return err;
2583
2584 zerofrom = *bytes & ~PAGE_MASK;
2585 if (pos+len > *bytes && zerofrom & (blocksize-1)) {
2586 *bytes |= (blocksize-1);
2587 (*bytes)++;
2588 }
2589
2590 return block_write_begin(mapping, pos, len, pagep, get_block);
2591 }
2592 EXPORT_SYMBOL(cont_write_begin);
2593
block_commit_write(struct page * page,unsigned from,unsigned to)2594 void block_commit_write(struct page *page, unsigned from, unsigned to)
2595 {
2596 struct folio *folio = page_folio(page);
2597 __block_commit_write(folio, from, to);
2598 }
2599 EXPORT_SYMBOL(block_commit_write);
2600
2601 /*
2602 * block_page_mkwrite() is not allowed to change the file size as it gets
2603 * called from a page fault handler when a page is first dirtied. Hence we must
2604 * be careful to check for EOF conditions here. We set the page up correctly
2605 * for a written page which means we get ENOSPC checking when writing into
2606 * holes and correct delalloc and unwritten extent mapping on filesystems that
2607 * support these features.
2608 *
2609 * We are not allowed to take the i_mutex here so we have to play games to
2610 * protect against truncate races as the page could now be beyond EOF. Because
2611 * truncate writes the inode size before removing pages, once we have the
2612 * page lock we can determine safely if the page is beyond EOF. If it is not
2613 * beyond EOF, then the page is guaranteed safe against truncation until we
2614 * unlock the page.
2615 *
2616 * Direct callers of this function should protect against filesystem freezing
2617 * using sb_start_pagefault() - sb_end_pagefault() functions.
2618 */
block_page_mkwrite(struct vm_area_struct * vma,struct vm_fault * vmf,get_block_t get_block)2619 int block_page_mkwrite(struct vm_area_struct *vma, struct vm_fault *vmf,
2620 get_block_t get_block)
2621 {
2622 struct folio *folio = page_folio(vmf->page);
2623 struct inode *inode = file_inode(vma->vm_file);
2624 unsigned long end;
2625 loff_t size;
2626 int ret;
2627
2628 folio_lock(folio);
2629 size = i_size_read(inode);
2630 if ((folio->mapping != inode->i_mapping) ||
2631 (folio_pos(folio) >= size)) {
2632 /* We overload EFAULT to mean page got truncated */
2633 ret = -EFAULT;
2634 goto out_unlock;
2635 }
2636
2637 end = folio_size(folio);
2638 /* folio is wholly or partially inside EOF */
2639 if (folio_pos(folio) + end > size)
2640 end = size - folio_pos(folio);
2641
2642 ret = __block_write_begin_int(folio, 0, end, get_block, NULL);
2643 if (unlikely(ret))
2644 goto out_unlock;
2645
2646 __block_commit_write(folio, 0, end);
2647
2648 folio_mark_dirty(folio);
2649 folio_wait_stable(folio);
2650 return 0;
2651 out_unlock:
2652 folio_unlock(folio);
2653 return ret;
2654 }
2655 EXPORT_SYMBOL(block_page_mkwrite);
2656
block_truncate_page(struct address_space * mapping,loff_t from,get_block_t * get_block)2657 int block_truncate_page(struct address_space *mapping,
2658 loff_t from, get_block_t *get_block)
2659 {
2660 pgoff_t index = from >> PAGE_SHIFT;
2661 unsigned blocksize;
2662 sector_t iblock;
2663 size_t offset, length, pos;
2664 struct inode *inode = mapping->host;
2665 struct folio *folio;
2666 struct buffer_head *bh;
2667 int err = 0;
2668
2669 blocksize = i_blocksize(inode);
2670 length = from & (blocksize - 1);
2671
2672 /* Block boundary? Nothing to do */
2673 if (!length)
2674 return 0;
2675
2676 length = blocksize - length;
2677 iblock = ((loff_t)index * PAGE_SIZE) >> inode->i_blkbits;
2678
2679 folio = filemap_grab_folio(mapping, index);
2680 if (IS_ERR(folio))
2681 return PTR_ERR(folio);
2682
2683 bh = folio_buffers(folio);
2684 if (!bh)
2685 bh = create_empty_buffers(folio, blocksize, 0);
2686
2687 /* Find the buffer that contains "offset" */
2688 offset = offset_in_folio(folio, from);
2689 pos = blocksize;
2690 while (offset >= pos) {
2691 bh = bh->b_this_page;
2692 iblock++;
2693 pos += blocksize;
2694 }
2695
2696 if (!buffer_mapped(bh)) {
2697 WARN_ON(bh->b_size != blocksize);
2698 err = get_block(inode, iblock, bh, 0);
2699 if (err)
2700 goto unlock;
2701 /* unmapped? It's a hole - nothing to do */
2702 if (!buffer_mapped(bh))
2703 goto unlock;
2704 }
2705
2706 /* Ok, it's mapped. Make sure it's up-to-date */
2707 if (folio_test_uptodate(folio))
2708 set_buffer_uptodate(bh);
2709
2710 if (!buffer_uptodate(bh) && !buffer_delay(bh) && !buffer_unwritten(bh)) {
2711 err = bh_read(bh, 0);
2712 /* Uhhuh. Read error. Complain and punt. */
2713 if (err < 0)
2714 goto unlock;
2715 }
2716
2717 folio_zero_range(folio, offset, length);
2718 mark_buffer_dirty(bh);
2719
2720 unlock:
2721 folio_unlock(folio);
2722 folio_put(folio);
2723
2724 return err;
2725 }
2726 EXPORT_SYMBOL(block_truncate_page);
2727
2728 /*
2729 * The generic ->writepage function for buffer-backed address_spaces
2730 */
block_write_full_folio(struct folio * folio,struct writeback_control * wbc,void * get_block)2731 int block_write_full_folio(struct folio *folio, struct writeback_control *wbc,
2732 void *get_block)
2733 {
2734 struct inode * const inode = folio->mapping->host;
2735 loff_t i_size = i_size_read(inode);
2736
2737 /* Is the folio fully inside i_size? */
2738 if (folio_pos(folio) + folio_size(folio) <= i_size)
2739 return __block_write_full_folio(inode, folio, get_block, wbc);
2740
2741 /* Is the folio fully outside i_size? (truncate in progress) */
2742 if (folio_pos(folio) >= i_size) {
2743 folio_unlock(folio);
2744 return 0; /* don't care */
2745 }
2746
2747 /*
2748 * The folio straddles i_size. It must be zeroed out on each and every
2749 * writepage invocation because it may be mmapped. "A file is mapped
2750 * in multiples of the page size. For a file that is not a multiple of
2751 * the page size, the remaining memory is zeroed when mapped, and
2752 * writes to that region are not written out to the file."
2753 */
2754 folio_zero_segment(folio, offset_in_folio(folio, i_size),
2755 folio_size(folio));
2756 return __block_write_full_folio(inode, folio, get_block, wbc);
2757 }
2758
generic_block_bmap(struct address_space * mapping,sector_t block,get_block_t * get_block)2759 sector_t generic_block_bmap(struct address_space *mapping, sector_t block,
2760 get_block_t *get_block)
2761 {
2762 struct inode *inode = mapping->host;
2763 struct buffer_head tmp = {
2764 .b_size = i_blocksize(inode),
2765 };
2766
2767 get_block(inode, block, &tmp, 0);
2768 return tmp.b_blocknr;
2769 }
2770 EXPORT_SYMBOL(generic_block_bmap);
2771
end_bio_bh_io_sync(struct bio * bio)2772 static void end_bio_bh_io_sync(struct bio *bio)
2773 {
2774 struct buffer_head *bh = bio->bi_private;
2775
2776 if (unlikely(bio_flagged(bio, BIO_QUIET)))
2777 set_bit(BH_Quiet, &bh->b_state);
2778
2779 bh->b_end_io(bh, !bio->bi_status);
2780 bio_put(bio);
2781 }
2782
submit_bh_wbc(blk_opf_t opf,struct buffer_head * bh,enum rw_hint write_hint,struct writeback_control * wbc)2783 static void submit_bh_wbc(blk_opf_t opf, struct buffer_head *bh,
2784 enum rw_hint write_hint,
2785 struct writeback_control *wbc)
2786 {
2787 const enum req_op op = opf & REQ_OP_MASK;
2788 struct bio *bio;
2789
2790 BUG_ON(!buffer_locked(bh));
2791 BUG_ON(!buffer_mapped(bh));
2792 BUG_ON(!bh->b_end_io);
2793 BUG_ON(buffer_delay(bh));
2794 BUG_ON(buffer_unwritten(bh));
2795
2796 /*
2797 * Only clear out a write error when rewriting
2798 */
2799 if (test_set_buffer_req(bh) && (op == REQ_OP_WRITE))
2800 clear_buffer_write_io_error(bh);
2801
2802 if (buffer_meta(bh))
2803 opf |= REQ_META;
2804 if (buffer_prio(bh))
2805 opf |= REQ_PRIO;
2806
2807 bio = bio_alloc(bh->b_bdev, 1, opf, GFP_NOIO);
2808
2809 fscrypt_set_bio_crypt_ctx_bh(bio, bh, GFP_NOIO);
2810
2811 bio->bi_iter.bi_sector = bh->b_blocknr * (bh->b_size >> 9);
2812 bio->bi_write_hint = write_hint;
2813
2814 __bio_add_page(bio, bh->b_page, bh->b_size, bh_offset(bh));
2815
2816 bio->bi_end_io = end_bio_bh_io_sync;
2817 bio->bi_private = bh;
2818
2819 /* Take care of bh's that straddle the end of the device */
2820 guard_bio_eod(bio);
2821
2822 if (wbc) {
2823 wbc_init_bio(wbc, bio);
2824 wbc_account_cgroup_owner(wbc, bh->b_page, bh->b_size);
2825 }
2826
2827 submit_bio(bio);
2828 }
2829
submit_bh(blk_opf_t opf,struct buffer_head * bh)2830 void submit_bh(blk_opf_t opf, struct buffer_head *bh)
2831 {
2832 submit_bh_wbc(opf, bh, WRITE_LIFE_NOT_SET, NULL);
2833 }
2834 EXPORT_SYMBOL(submit_bh);
2835
write_dirty_buffer(struct buffer_head * bh,blk_opf_t op_flags)2836 void write_dirty_buffer(struct buffer_head *bh, blk_opf_t op_flags)
2837 {
2838 lock_buffer(bh);
2839 if (!test_clear_buffer_dirty(bh)) {
2840 unlock_buffer(bh);
2841 return;
2842 }
2843 bh->b_end_io = end_buffer_write_sync;
2844 get_bh(bh);
2845 submit_bh(REQ_OP_WRITE | op_flags, bh);
2846 }
2847 EXPORT_SYMBOL(write_dirty_buffer);
2848
2849 /*
2850 * For a data-integrity writeout, we need to wait upon any in-progress I/O
2851 * and then start new I/O and then wait upon it. The caller must have a ref on
2852 * the buffer_head.
2853 */
__sync_dirty_buffer(struct buffer_head * bh,blk_opf_t op_flags)2854 int __sync_dirty_buffer(struct buffer_head *bh, blk_opf_t op_flags)
2855 {
2856 WARN_ON(atomic_read(&bh->b_count) < 1);
2857 lock_buffer(bh);
2858 if (test_clear_buffer_dirty(bh)) {
2859 /*
2860 * The bh should be mapped, but it might not be if the
2861 * device was hot-removed. Not much we can do but fail the I/O.
2862 */
2863 if (!buffer_mapped(bh)) {
2864 unlock_buffer(bh);
2865 return -EIO;
2866 }
2867
2868 get_bh(bh);
2869 bh->b_end_io = end_buffer_write_sync;
2870 submit_bh(REQ_OP_WRITE | op_flags, bh);
2871 wait_on_buffer(bh);
2872 if (!buffer_uptodate(bh))
2873 return -EIO;
2874 } else {
2875 unlock_buffer(bh);
2876 }
2877 return 0;
2878 }
2879 EXPORT_SYMBOL(__sync_dirty_buffer);
2880
sync_dirty_buffer(struct buffer_head * bh)2881 int sync_dirty_buffer(struct buffer_head *bh)
2882 {
2883 return __sync_dirty_buffer(bh, REQ_SYNC);
2884 }
2885 EXPORT_SYMBOL(sync_dirty_buffer);
2886
buffer_busy(struct buffer_head * bh)2887 static inline int buffer_busy(struct buffer_head *bh)
2888 {
2889 return atomic_read(&bh->b_count) |
2890 (bh->b_state & ((1 << BH_Dirty) | (1 << BH_Lock)));
2891 }
2892
2893 static bool
drop_buffers(struct folio * folio,struct buffer_head ** buffers_to_free)2894 drop_buffers(struct folio *folio, struct buffer_head **buffers_to_free)
2895 {
2896 struct buffer_head *head = folio_buffers(folio);
2897 struct buffer_head *bh;
2898
2899 bh = head;
2900 do {
2901 if (buffer_busy(bh))
2902 goto failed;
2903 bh = bh->b_this_page;
2904 } while (bh != head);
2905
2906 do {
2907 struct buffer_head *next = bh->b_this_page;
2908
2909 if (bh->b_assoc_map)
2910 __remove_assoc_queue(bh);
2911 bh = next;
2912 } while (bh != head);
2913 *buffers_to_free = head;
2914 folio_detach_private(folio);
2915 return true;
2916 failed:
2917 return false;
2918 }
2919
2920 /**
2921 * try_to_free_buffers - Release buffers attached to this folio.
2922 * @folio: The folio.
2923 *
2924 * If any buffers are in use (dirty, under writeback, elevated refcount),
2925 * no buffers will be freed.
2926 *
2927 * If the folio is dirty but all the buffers are clean then we need to
2928 * be sure to mark the folio clean as well. This is because the folio
2929 * may be against a block device, and a later reattachment of buffers
2930 * to a dirty folio will set *all* buffers dirty. Which would corrupt
2931 * filesystem data on the same device.
2932 *
2933 * The same applies to regular filesystem folios: if all the buffers are
2934 * clean then we set the folio clean and proceed. To do that, we require
2935 * total exclusion from block_dirty_folio(). That is obtained with
2936 * i_private_lock.
2937 *
2938 * Exclusion against try_to_free_buffers may be obtained by either
2939 * locking the folio or by holding its mapping's i_private_lock.
2940 *
2941 * Context: Process context. @folio must be locked. Will not sleep.
2942 * Return: true if all buffers attached to this folio were freed.
2943 */
try_to_free_buffers(struct folio * folio)2944 bool try_to_free_buffers(struct folio *folio)
2945 {
2946 struct address_space * const mapping = folio->mapping;
2947 struct buffer_head *buffers_to_free = NULL;
2948 bool ret = 0;
2949
2950 BUG_ON(!folio_test_locked(folio));
2951 if (folio_test_writeback(folio))
2952 return false;
2953
2954 if (mapping == NULL) { /* can this still happen? */
2955 ret = drop_buffers(folio, &buffers_to_free);
2956 goto out;
2957 }
2958
2959 spin_lock(&mapping->i_private_lock);
2960 ret = drop_buffers(folio, &buffers_to_free);
2961
2962 /*
2963 * If the filesystem writes its buffers by hand (eg ext3)
2964 * then we can have clean buffers against a dirty folio. We
2965 * clean the folio here; otherwise the VM will never notice
2966 * that the filesystem did any IO at all.
2967 *
2968 * Also, during truncate, discard_buffer will have marked all
2969 * the folio's buffers clean. We discover that here and clean
2970 * the folio also.
2971 *
2972 * i_private_lock must be held over this entire operation in order
2973 * to synchronise against block_dirty_folio and prevent the
2974 * dirty bit from being lost.
2975 */
2976 if (ret)
2977 folio_cancel_dirty(folio);
2978 spin_unlock(&mapping->i_private_lock);
2979 out:
2980 if (buffers_to_free) {
2981 struct buffer_head *bh = buffers_to_free;
2982
2983 do {
2984 struct buffer_head *next = bh->b_this_page;
2985 free_buffer_head(bh);
2986 bh = next;
2987 } while (bh != buffers_to_free);
2988 }
2989 return ret;
2990 }
2991 EXPORT_SYMBOL(try_to_free_buffers);
2992
2993 /*
2994 * Buffer-head allocation
2995 */
2996 static struct kmem_cache *bh_cachep __ro_after_init;
2997
2998 /*
2999 * Once the number of bh's in the machine exceeds this level, we start
3000 * stripping them in writeback.
3001 */
3002 static unsigned long max_buffer_heads __ro_after_init;
3003
3004 int buffer_heads_over_limit;
3005
3006 struct bh_accounting {
3007 int nr; /* Number of live bh's */
3008 int ratelimit; /* Limit cacheline bouncing */
3009 };
3010
3011 static DEFINE_PER_CPU(struct bh_accounting, bh_accounting) = {0, 0};
3012
recalc_bh_state(void)3013 static void recalc_bh_state(void)
3014 {
3015 int i;
3016 int tot = 0;
3017
3018 if (__this_cpu_inc_return(bh_accounting.ratelimit) - 1 < 4096)
3019 return;
3020 __this_cpu_write(bh_accounting.ratelimit, 0);
3021 for_each_online_cpu(i)
3022 tot += per_cpu(bh_accounting, i).nr;
3023 buffer_heads_over_limit = (tot > max_buffer_heads);
3024 }
3025
alloc_buffer_head(gfp_t gfp_flags)3026 struct buffer_head *alloc_buffer_head(gfp_t gfp_flags)
3027 {
3028 struct buffer_head *ret = kmem_cache_zalloc(bh_cachep, gfp_flags);
3029 if (ret) {
3030 INIT_LIST_HEAD(&ret->b_assoc_buffers);
3031 spin_lock_init(&ret->b_uptodate_lock);
3032 preempt_disable();
3033 __this_cpu_inc(bh_accounting.nr);
3034 recalc_bh_state();
3035 preempt_enable();
3036 }
3037 return ret;
3038 }
3039 EXPORT_SYMBOL(alloc_buffer_head);
3040
free_buffer_head(struct buffer_head * bh)3041 void free_buffer_head(struct buffer_head *bh)
3042 {
3043 BUG_ON(!list_empty(&bh->b_assoc_buffers));
3044 kmem_cache_free(bh_cachep, bh);
3045 preempt_disable();
3046 __this_cpu_dec(bh_accounting.nr);
3047 recalc_bh_state();
3048 preempt_enable();
3049 }
3050 EXPORT_SYMBOL(free_buffer_head);
3051
buffer_exit_cpu_dead(unsigned int cpu)3052 static int buffer_exit_cpu_dead(unsigned int cpu)
3053 {
3054 int i;
3055 struct bh_lru *b = &per_cpu(bh_lrus, cpu);
3056
3057 for (i = 0; i < BH_LRU_SIZE; i++) {
3058 brelse(b->bhs[i]);
3059 b->bhs[i] = NULL;
3060 }
3061 this_cpu_add(bh_accounting.nr, per_cpu(bh_accounting, cpu).nr);
3062 per_cpu(bh_accounting, cpu).nr = 0;
3063 return 0;
3064 }
3065
3066 /**
3067 * bh_uptodate_or_lock - Test whether the buffer is uptodate
3068 * @bh: struct buffer_head
3069 *
3070 * Return true if the buffer is up-to-date and false,
3071 * with the buffer locked, if not.
3072 */
bh_uptodate_or_lock(struct buffer_head * bh)3073 int bh_uptodate_or_lock(struct buffer_head *bh)
3074 {
3075 if (!buffer_uptodate(bh)) {
3076 lock_buffer(bh);
3077 if (!buffer_uptodate(bh))
3078 return 0;
3079 unlock_buffer(bh);
3080 }
3081 return 1;
3082 }
3083 EXPORT_SYMBOL(bh_uptodate_or_lock);
3084
3085 /**
3086 * __bh_read - Submit read for a locked buffer
3087 * @bh: struct buffer_head
3088 * @op_flags: appending REQ_OP_* flags besides REQ_OP_READ
3089 * @wait: wait until reading finish
3090 *
3091 * Returns zero on success or don't wait, and -EIO on error.
3092 */
__bh_read(struct buffer_head * bh,blk_opf_t op_flags,bool wait)3093 int __bh_read(struct buffer_head *bh, blk_opf_t op_flags, bool wait)
3094 {
3095 int ret = 0;
3096
3097 BUG_ON(!buffer_locked(bh));
3098
3099 get_bh(bh);
3100 bh->b_end_io = end_buffer_read_sync;
3101 submit_bh(REQ_OP_READ | op_flags, bh);
3102 if (wait) {
3103 wait_on_buffer(bh);
3104 if (!buffer_uptodate(bh))
3105 ret = -EIO;
3106 }
3107 return ret;
3108 }
3109 EXPORT_SYMBOL(__bh_read);
3110
3111 /**
3112 * __bh_read_batch - Submit read for a batch of unlocked buffers
3113 * @nr: entry number of the buffer batch
3114 * @bhs: a batch of struct buffer_head
3115 * @op_flags: appending REQ_OP_* flags besides REQ_OP_READ
3116 * @force_lock: force to get a lock on the buffer if set, otherwise drops any
3117 * buffer that cannot lock.
3118 *
3119 * Returns zero on success or don't wait, and -EIO on error.
3120 */
__bh_read_batch(int nr,struct buffer_head * bhs[],blk_opf_t op_flags,bool force_lock)3121 void __bh_read_batch(int nr, struct buffer_head *bhs[],
3122 blk_opf_t op_flags, bool force_lock)
3123 {
3124 int i;
3125
3126 for (i = 0; i < nr; i++) {
3127 struct buffer_head *bh = bhs[i];
3128
3129 if (buffer_uptodate(bh))
3130 continue;
3131
3132 if (force_lock)
3133 lock_buffer(bh);
3134 else
3135 if (!trylock_buffer(bh))
3136 continue;
3137
3138 if (buffer_uptodate(bh)) {
3139 unlock_buffer(bh);
3140 continue;
3141 }
3142
3143 bh->b_end_io = end_buffer_read_sync;
3144 get_bh(bh);
3145 submit_bh(REQ_OP_READ | op_flags, bh);
3146 }
3147 }
3148 EXPORT_SYMBOL(__bh_read_batch);
3149
buffer_init(void)3150 void __init buffer_init(void)
3151 {
3152 unsigned long nrpages;
3153 int ret;
3154
3155 bh_cachep = KMEM_CACHE(buffer_head,
3156 SLAB_RECLAIM_ACCOUNT|SLAB_PANIC);
3157 /*
3158 * Limit the bh occupancy to 10% of ZONE_NORMAL
3159 */
3160 nrpages = (nr_free_buffer_pages() * 10) / 100;
3161 max_buffer_heads = nrpages * (PAGE_SIZE / sizeof(struct buffer_head));
3162 ret = cpuhp_setup_state_nocalls(CPUHP_FS_BUFF_DEAD, "fs/buffer:dead",
3163 NULL, buffer_exit_cpu_dead);
3164 WARN_ON(ret < 0);
3165 }
3166