xref: /linux/fs/mpage.c (revision d6fd48ef)
1 // SPDX-License-Identifier: GPL-2.0
2 /*
3  * fs/mpage.c
4  *
5  * Copyright (C) 2002, Linus Torvalds.
6  *
7  * Contains functions related to preparing and submitting BIOs which contain
8  * multiple pagecache pages.
9  *
10  * 15May2002	Andrew Morton
11  *		Initial version
12  * 27Jun2002	axboe@suse.de
13  *		use bio_add_page() to build bio's just the right size
14  */
15 
16 #include <linux/kernel.h>
17 #include <linux/export.h>
18 #include <linux/mm.h>
19 #include <linux/kdev_t.h>
20 #include <linux/gfp.h>
21 #include <linux/bio.h>
22 #include <linux/fs.h>
23 #include <linux/buffer_head.h>
24 #include <linux/blkdev.h>
25 #include <linux/highmem.h>
26 #include <linux/prefetch.h>
27 #include <linux/mpage.h>
28 #include <linux/mm_inline.h>
29 #include <linux/writeback.h>
30 #include <linux/backing-dev.h>
31 #include <linux/pagevec.h>
32 #include "internal.h"
33 
34 /*
35  * I/O completion handler for multipage BIOs.
36  *
37  * The mpage code never puts partial pages into a BIO (except for end-of-file).
38  * If a page does not map to a contiguous run of blocks then it simply falls
39  * back to block_read_full_folio().
40  *
41  * Why is this?  If a page's completion depends on a number of different BIOs
42  * which can complete in any order (or at the same time) then determining the
43  * status of that page is hard.  See end_buffer_async_read() for the details.
44  * There is no point in duplicating all that complexity.
45  */
46 static void mpage_end_io(struct bio *bio)
47 {
48 	struct bio_vec *bv;
49 	struct bvec_iter_all iter_all;
50 
51 	bio_for_each_segment_all(bv, bio, iter_all) {
52 		struct page *page = bv->bv_page;
53 		page_endio(page, bio_op(bio),
54 			   blk_status_to_errno(bio->bi_status));
55 	}
56 
57 	bio_put(bio);
58 }
59 
60 static struct bio *mpage_bio_submit(struct bio *bio)
61 {
62 	bio->bi_end_io = mpage_end_io;
63 	guard_bio_eod(bio);
64 	submit_bio(bio);
65 	return NULL;
66 }
67 
68 /*
69  * support function for mpage_readahead.  The fs supplied get_block might
70  * return an up to date buffer.  This is used to map that buffer into
71  * the page, which allows read_folio to avoid triggering a duplicate call
72  * to get_block.
73  *
74  * The idea is to avoid adding buffers to pages that don't already have
75  * them.  So when the buffer is up to date and the page size == block size,
76  * this marks the page up to date instead of adding new buffers.
77  */
78 static void map_buffer_to_folio(struct folio *folio, struct buffer_head *bh,
79 		int page_block)
80 {
81 	struct inode *inode = folio->mapping->host;
82 	struct buffer_head *page_bh, *head;
83 	int block = 0;
84 
85 	head = folio_buffers(folio);
86 	if (!head) {
87 		/*
88 		 * don't make any buffers if there is only one buffer on
89 		 * the folio and the folio just needs to be set up to date
90 		 */
91 		if (inode->i_blkbits == PAGE_SHIFT &&
92 		    buffer_uptodate(bh)) {
93 			folio_mark_uptodate(folio);
94 			return;
95 		}
96 		create_empty_buffers(&folio->page, i_blocksize(inode), 0);
97 		head = folio_buffers(folio);
98 	}
99 
100 	page_bh = head;
101 	do {
102 		if (block == page_block) {
103 			page_bh->b_state = bh->b_state;
104 			page_bh->b_bdev = bh->b_bdev;
105 			page_bh->b_blocknr = bh->b_blocknr;
106 			break;
107 		}
108 		page_bh = page_bh->b_this_page;
109 		block++;
110 	} while (page_bh != head);
111 }
112 
113 struct mpage_readpage_args {
114 	struct bio *bio;
115 	struct folio *folio;
116 	unsigned int nr_pages;
117 	bool is_readahead;
118 	sector_t last_block_in_bio;
119 	struct buffer_head map_bh;
120 	unsigned long first_logical_block;
121 	get_block_t *get_block;
122 };
123 
124 /*
125  * This is the worker routine which does all the work of mapping the disk
126  * blocks and constructs largest possible bios, submits them for IO if the
127  * blocks are not contiguous on the disk.
128  *
129  * We pass a buffer_head back and forth and use its buffer_mapped() flag to
130  * represent the validity of its disk mapping and to decide when to do the next
131  * get_block() call.
132  */
133 static struct bio *do_mpage_readpage(struct mpage_readpage_args *args)
134 {
135 	struct folio *folio = args->folio;
136 	struct inode *inode = folio->mapping->host;
137 	const unsigned blkbits = inode->i_blkbits;
138 	const unsigned blocks_per_page = PAGE_SIZE >> blkbits;
139 	const unsigned blocksize = 1 << blkbits;
140 	struct buffer_head *map_bh = &args->map_bh;
141 	sector_t block_in_file;
142 	sector_t last_block;
143 	sector_t last_block_in_file;
144 	sector_t blocks[MAX_BUF_PER_PAGE];
145 	unsigned page_block;
146 	unsigned first_hole = blocks_per_page;
147 	struct block_device *bdev = NULL;
148 	int length;
149 	int fully_mapped = 1;
150 	blk_opf_t opf = REQ_OP_READ;
151 	unsigned nblocks;
152 	unsigned relative_block;
153 	gfp_t gfp = mapping_gfp_constraint(folio->mapping, GFP_KERNEL);
154 
155 	/* MAX_BUF_PER_PAGE, for example */
156 	VM_BUG_ON_FOLIO(folio_test_large(folio), folio);
157 
158 	if (args->is_readahead) {
159 		opf |= REQ_RAHEAD;
160 		gfp |= __GFP_NORETRY | __GFP_NOWARN;
161 	}
162 
163 	if (folio_buffers(folio))
164 		goto confused;
165 
166 	block_in_file = (sector_t)folio->index << (PAGE_SHIFT - blkbits);
167 	last_block = block_in_file + args->nr_pages * blocks_per_page;
168 	last_block_in_file = (i_size_read(inode) + blocksize - 1) >> blkbits;
169 	if (last_block > last_block_in_file)
170 		last_block = last_block_in_file;
171 	page_block = 0;
172 
173 	/*
174 	 * Map blocks using the result from the previous get_blocks call first.
175 	 */
176 	nblocks = map_bh->b_size >> blkbits;
177 	if (buffer_mapped(map_bh) &&
178 			block_in_file > args->first_logical_block &&
179 			block_in_file < (args->first_logical_block + nblocks)) {
180 		unsigned map_offset = block_in_file - args->first_logical_block;
181 		unsigned last = nblocks - map_offset;
182 
183 		for (relative_block = 0; ; relative_block++) {
184 			if (relative_block == last) {
185 				clear_buffer_mapped(map_bh);
186 				break;
187 			}
188 			if (page_block == blocks_per_page)
189 				break;
190 			blocks[page_block] = map_bh->b_blocknr + map_offset +
191 						relative_block;
192 			page_block++;
193 			block_in_file++;
194 		}
195 		bdev = map_bh->b_bdev;
196 	}
197 
198 	/*
199 	 * Then do more get_blocks calls until we are done with this folio.
200 	 */
201 	map_bh->b_folio = folio;
202 	while (page_block < blocks_per_page) {
203 		map_bh->b_state = 0;
204 		map_bh->b_size = 0;
205 
206 		if (block_in_file < last_block) {
207 			map_bh->b_size = (last_block-block_in_file) << blkbits;
208 			if (args->get_block(inode, block_in_file, map_bh, 0))
209 				goto confused;
210 			args->first_logical_block = block_in_file;
211 		}
212 
213 		if (!buffer_mapped(map_bh)) {
214 			fully_mapped = 0;
215 			if (first_hole == blocks_per_page)
216 				first_hole = page_block;
217 			page_block++;
218 			block_in_file++;
219 			continue;
220 		}
221 
222 		/* some filesystems will copy data into the page during
223 		 * the get_block call, in which case we don't want to
224 		 * read it again.  map_buffer_to_folio copies the data
225 		 * we just collected from get_block into the folio's buffers
226 		 * so read_folio doesn't have to repeat the get_block call
227 		 */
228 		if (buffer_uptodate(map_bh)) {
229 			map_buffer_to_folio(folio, map_bh, page_block);
230 			goto confused;
231 		}
232 
233 		if (first_hole != blocks_per_page)
234 			goto confused;		/* hole -> non-hole */
235 
236 		/* Contiguous blocks? */
237 		if (page_block && blocks[page_block-1] != map_bh->b_blocknr-1)
238 			goto confused;
239 		nblocks = map_bh->b_size >> blkbits;
240 		for (relative_block = 0; ; relative_block++) {
241 			if (relative_block == nblocks) {
242 				clear_buffer_mapped(map_bh);
243 				break;
244 			} else if (page_block == blocks_per_page)
245 				break;
246 			blocks[page_block] = map_bh->b_blocknr+relative_block;
247 			page_block++;
248 			block_in_file++;
249 		}
250 		bdev = map_bh->b_bdev;
251 	}
252 
253 	if (first_hole != blocks_per_page) {
254 		folio_zero_segment(folio, first_hole << blkbits, PAGE_SIZE);
255 		if (first_hole == 0) {
256 			folio_mark_uptodate(folio);
257 			folio_unlock(folio);
258 			goto out;
259 		}
260 	} else if (fully_mapped) {
261 		folio_set_mappedtodisk(folio);
262 	}
263 
264 	/*
265 	 * This folio will go to BIO.  Do we need to send this BIO off first?
266 	 */
267 	if (args->bio && (args->last_block_in_bio != blocks[0] - 1))
268 		args->bio = mpage_bio_submit(args->bio);
269 
270 alloc_new:
271 	if (args->bio == NULL) {
272 		args->bio = bio_alloc(bdev, bio_max_segs(args->nr_pages), opf,
273 				      gfp);
274 		if (args->bio == NULL)
275 			goto confused;
276 		args->bio->bi_iter.bi_sector = blocks[0] << (blkbits - 9);
277 	}
278 
279 	length = first_hole << blkbits;
280 	if (!bio_add_folio(args->bio, folio, length, 0)) {
281 		args->bio = mpage_bio_submit(args->bio);
282 		goto alloc_new;
283 	}
284 
285 	relative_block = block_in_file - args->first_logical_block;
286 	nblocks = map_bh->b_size >> blkbits;
287 	if ((buffer_boundary(map_bh) && relative_block == nblocks) ||
288 	    (first_hole != blocks_per_page))
289 		args->bio = mpage_bio_submit(args->bio);
290 	else
291 		args->last_block_in_bio = blocks[blocks_per_page - 1];
292 out:
293 	return args->bio;
294 
295 confused:
296 	if (args->bio)
297 		args->bio = mpage_bio_submit(args->bio);
298 	if (!folio_test_uptodate(folio))
299 		block_read_full_folio(folio, args->get_block);
300 	else
301 		folio_unlock(folio);
302 	goto out;
303 }
304 
305 /**
306  * mpage_readahead - start reads against pages
307  * @rac: Describes which pages to read.
308  * @get_block: The filesystem's block mapper function.
309  *
310  * This function walks the pages and the blocks within each page, building and
311  * emitting large BIOs.
312  *
313  * If anything unusual happens, such as:
314  *
315  * - encountering a page which has buffers
316  * - encountering a page which has a non-hole after a hole
317  * - encountering a page with non-contiguous blocks
318  *
319  * then this code just gives up and calls the buffer_head-based read function.
320  * It does handle a page which has holes at the end - that is a common case:
321  * the end-of-file on blocksize < PAGE_SIZE setups.
322  *
323  * BH_Boundary explanation:
324  *
325  * There is a problem.  The mpage read code assembles several pages, gets all
326  * their disk mappings, and then submits them all.  That's fine, but obtaining
327  * the disk mappings may require I/O.  Reads of indirect blocks, for example.
328  *
329  * So an mpage read of the first 16 blocks of an ext2 file will cause I/O to be
330  * submitted in the following order:
331  *
332  * 	12 0 1 2 3 4 5 6 7 8 9 10 11 13 14 15 16
333  *
334  * because the indirect block has to be read to get the mappings of blocks
335  * 13,14,15,16.  Obviously, this impacts performance.
336  *
337  * So what we do it to allow the filesystem's get_block() function to set
338  * BH_Boundary when it maps block 11.  BH_Boundary says: mapping of the block
339  * after this one will require I/O against a block which is probably close to
340  * this one.  So you should push what I/O you have currently accumulated.
341  *
342  * This all causes the disk requests to be issued in the correct order.
343  */
344 void mpage_readahead(struct readahead_control *rac, get_block_t get_block)
345 {
346 	struct folio *folio;
347 	struct mpage_readpage_args args = {
348 		.get_block = get_block,
349 		.is_readahead = true,
350 	};
351 
352 	while ((folio = readahead_folio(rac))) {
353 		prefetchw(&folio->flags);
354 		args.folio = folio;
355 		args.nr_pages = readahead_count(rac);
356 		args.bio = do_mpage_readpage(&args);
357 	}
358 	if (args.bio)
359 		mpage_bio_submit(args.bio);
360 }
361 EXPORT_SYMBOL(mpage_readahead);
362 
363 /*
364  * This isn't called much at all
365  */
366 int mpage_read_folio(struct folio *folio, get_block_t get_block)
367 {
368 	struct mpage_readpage_args args = {
369 		.folio = folio,
370 		.nr_pages = 1,
371 		.get_block = get_block,
372 	};
373 
374 	args.bio = do_mpage_readpage(&args);
375 	if (args.bio)
376 		mpage_bio_submit(args.bio);
377 	return 0;
378 }
379 EXPORT_SYMBOL(mpage_read_folio);
380 
381 /*
382  * Writing is not so simple.
383  *
384  * If the page has buffers then they will be used for obtaining the disk
385  * mapping.  We only support pages which are fully mapped-and-dirty, with a
386  * special case for pages which are unmapped at the end: end-of-file.
387  *
388  * If the page has no buffers (preferred) then the page is mapped here.
389  *
390  * If all blocks are found to be contiguous then the page can go into the
391  * BIO.  Otherwise fall back to the mapping's writepage().
392  *
393  * FIXME: This code wants an estimate of how many pages are still to be
394  * written, so it can intelligently allocate a suitably-sized BIO.  For now,
395  * just allocate full-size (16-page) BIOs.
396  */
397 
398 struct mpage_data {
399 	struct bio *bio;
400 	sector_t last_block_in_bio;
401 	get_block_t *get_block;
402 };
403 
404 /*
405  * We have our BIO, so we can now mark the buffers clean.  Make
406  * sure to only clean buffers which we know we'll be writing.
407  */
408 static void clean_buffers(struct page *page, unsigned first_unmapped)
409 {
410 	unsigned buffer_counter = 0;
411 	struct buffer_head *bh, *head;
412 	if (!page_has_buffers(page))
413 		return;
414 	head = page_buffers(page);
415 	bh = head;
416 
417 	do {
418 		if (buffer_counter++ == first_unmapped)
419 			break;
420 		clear_buffer_dirty(bh);
421 		bh = bh->b_this_page;
422 	} while (bh != head);
423 
424 	/*
425 	 * we cannot drop the bh if the page is not uptodate or a concurrent
426 	 * read_folio would fail to serialize with the bh and it would read from
427 	 * disk before we reach the platter.
428 	 */
429 	if (buffer_heads_over_limit && PageUptodate(page))
430 		try_to_free_buffers(page_folio(page));
431 }
432 
433 /*
434  * For situations where we want to clean all buffers attached to a page.
435  * We don't need to calculate how many buffers are attached to the page,
436  * we just need to specify a number larger than the maximum number of buffers.
437  */
438 void clean_page_buffers(struct page *page)
439 {
440 	clean_buffers(page, ~0U);
441 }
442 
443 static int __mpage_writepage(struct folio *folio, struct writeback_control *wbc,
444 		      void *data)
445 {
446 	struct mpage_data *mpd = data;
447 	struct bio *bio = mpd->bio;
448 	struct address_space *mapping = folio->mapping;
449 	struct inode *inode = mapping->host;
450 	const unsigned blkbits = inode->i_blkbits;
451 	const unsigned blocks_per_page = PAGE_SIZE >> blkbits;
452 	sector_t last_block;
453 	sector_t block_in_file;
454 	sector_t blocks[MAX_BUF_PER_PAGE];
455 	unsigned page_block;
456 	unsigned first_unmapped = blocks_per_page;
457 	struct block_device *bdev = NULL;
458 	int boundary = 0;
459 	sector_t boundary_block = 0;
460 	struct block_device *boundary_bdev = NULL;
461 	size_t length;
462 	struct buffer_head map_bh;
463 	loff_t i_size = i_size_read(inode);
464 	int ret = 0;
465 	struct buffer_head *head = folio_buffers(folio);
466 
467 	if (head) {
468 		struct buffer_head *bh = head;
469 
470 		/* If they're all mapped and dirty, do it */
471 		page_block = 0;
472 		do {
473 			BUG_ON(buffer_locked(bh));
474 			if (!buffer_mapped(bh)) {
475 				/*
476 				 * unmapped dirty buffers are created by
477 				 * block_dirty_folio -> mmapped data
478 				 */
479 				if (buffer_dirty(bh))
480 					goto confused;
481 				if (first_unmapped == blocks_per_page)
482 					first_unmapped = page_block;
483 				continue;
484 			}
485 
486 			if (first_unmapped != blocks_per_page)
487 				goto confused;	/* hole -> non-hole */
488 
489 			if (!buffer_dirty(bh) || !buffer_uptodate(bh))
490 				goto confused;
491 			if (page_block) {
492 				if (bh->b_blocknr != blocks[page_block-1] + 1)
493 					goto confused;
494 			}
495 			blocks[page_block++] = bh->b_blocknr;
496 			boundary = buffer_boundary(bh);
497 			if (boundary) {
498 				boundary_block = bh->b_blocknr;
499 				boundary_bdev = bh->b_bdev;
500 			}
501 			bdev = bh->b_bdev;
502 		} while ((bh = bh->b_this_page) != head);
503 
504 		if (first_unmapped)
505 			goto page_is_mapped;
506 
507 		/*
508 		 * Page has buffers, but they are all unmapped. The page was
509 		 * created by pagein or read over a hole which was handled by
510 		 * block_read_full_folio().  If this address_space is also
511 		 * using mpage_readahead then this can rarely happen.
512 		 */
513 		goto confused;
514 	}
515 
516 	/*
517 	 * The page has no buffers: map it to disk
518 	 */
519 	BUG_ON(!folio_test_uptodate(folio));
520 	block_in_file = (sector_t)folio->index << (PAGE_SHIFT - blkbits);
521 	/*
522 	 * Whole page beyond EOF? Skip allocating blocks to avoid leaking
523 	 * space.
524 	 */
525 	if (block_in_file >= (i_size + (1 << blkbits) - 1) >> blkbits)
526 		goto page_is_mapped;
527 	last_block = (i_size - 1) >> blkbits;
528 	map_bh.b_folio = folio;
529 	for (page_block = 0; page_block < blocks_per_page; ) {
530 
531 		map_bh.b_state = 0;
532 		map_bh.b_size = 1 << blkbits;
533 		if (mpd->get_block(inode, block_in_file, &map_bh, 1))
534 			goto confused;
535 		if (!buffer_mapped(&map_bh))
536 			goto confused;
537 		if (buffer_new(&map_bh))
538 			clean_bdev_bh_alias(&map_bh);
539 		if (buffer_boundary(&map_bh)) {
540 			boundary_block = map_bh.b_blocknr;
541 			boundary_bdev = map_bh.b_bdev;
542 		}
543 		if (page_block) {
544 			if (map_bh.b_blocknr != blocks[page_block-1] + 1)
545 				goto confused;
546 		}
547 		blocks[page_block++] = map_bh.b_blocknr;
548 		boundary = buffer_boundary(&map_bh);
549 		bdev = map_bh.b_bdev;
550 		if (block_in_file == last_block)
551 			break;
552 		block_in_file++;
553 	}
554 	BUG_ON(page_block == 0);
555 
556 	first_unmapped = page_block;
557 
558 page_is_mapped:
559 	/* Don't bother writing beyond EOF, truncate will discard the folio */
560 	if (folio_pos(folio) >= i_size)
561 		goto confused;
562 	length = folio_size(folio);
563 	if (folio_pos(folio) + length > i_size) {
564 		/*
565 		 * The page straddles i_size.  It must be zeroed out on each
566 		 * and every writepage invocation because it may be mmapped.
567 		 * "A file is mapped in multiples of the page size.  For a file
568 		 * that is not a multiple of the page size, the remaining memory
569 		 * is zeroed when mapped, and writes to that region are not
570 		 * written out to the file."
571 		 */
572 		length = i_size - folio_pos(folio);
573 		folio_zero_segment(folio, length, folio_size(folio));
574 	}
575 
576 	/*
577 	 * This page will go to BIO.  Do we need to send this BIO off first?
578 	 */
579 	if (bio && mpd->last_block_in_bio != blocks[0] - 1)
580 		bio = mpage_bio_submit(bio);
581 
582 alloc_new:
583 	if (bio == NULL) {
584 		bio = bio_alloc(bdev, BIO_MAX_VECS,
585 				REQ_OP_WRITE | wbc_to_write_flags(wbc),
586 				GFP_NOFS);
587 		bio->bi_iter.bi_sector = blocks[0] << (blkbits - 9);
588 		wbc_init_bio(wbc, bio);
589 	}
590 
591 	/*
592 	 * Must try to add the page before marking the buffer clean or
593 	 * the confused fail path above (OOM) will be very confused when
594 	 * it finds all bh marked clean (i.e. it will not write anything)
595 	 */
596 	wbc_account_cgroup_owner(wbc, &folio->page, folio_size(folio));
597 	length = first_unmapped << blkbits;
598 	if (!bio_add_folio(bio, folio, length, 0)) {
599 		bio = mpage_bio_submit(bio);
600 		goto alloc_new;
601 	}
602 
603 	clean_buffers(&folio->page, first_unmapped);
604 
605 	BUG_ON(folio_test_writeback(folio));
606 	folio_start_writeback(folio);
607 	folio_unlock(folio);
608 	if (boundary || (first_unmapped != blocks_per_page)) {
609 		bio = mpage_bio_submit(bio);
610 		if (boundary_block) {
611 			write_boundary_block(boundary_bdev,
612 					boundary_block, 1 << blkbits);
613 		}
614 	} else {
615 		mpd->last_block_in_bio = blocks[blocks_per_page - 1];
616 	}
617 	goto out;
618 
619 confused:
620 	if (bio)
621 		bio = mpage_bio_submit(bio);
622 
623 	/*
624 	 * The caller has a ref on the inode, so *mapping is stable
625 	 */
626 	ret = block_write_full_page(&folio->page, mpd->get_block, wbc);
627 	mapping_set_error(mapping, ret);
628 out:
629 	mpd->bio = bio;
630 	return ret;
631 }
632 
633 /**
634  * mpage_writepages - walk the list of dirty pages of the given address space & writepage() all of them
635  * @mapping: address space structure to write
636  * @wbc: subtract the number of written pages from *@wbc->nr_to_write
637  * @get_block: the filesystem's block mapper function.
638  *
639  * This is a library function, which implements the writepages()
640  * address_space_operation.
641  */
642 int
643 mpage_writepages(struct address_space *mapping,
644 		struct writeback_control *wbc, get_block_t get_block)
645 {
646 	struct mpage_data mpd = {
647 		.get_block	= get_block,
648 	};
649 	struct blk_plug plug;
650 	int ret;
651 
652 	blk_start_plug(&plug);
653 	ret = write_cache_pages(mapping, wbc, __mpage_writepage, &mpd);
654 	if (mpd.bio)
655 		mpage_bio_submit(mpd.bio);
656 	blk_finish_plug(&plug);
657 	return ret;
658 }
659 EXPORT_SYMBOL(mpage_writepages);
660