xref: /linux/fs/ext4/readpage.c (revision 44f57d78)
1 // SPDX-License-Identifier: GPL-2.0
2 /*
3  * linux/fs/ext4/readpage.c
4  *
5  * Copyright (C) 2002, Linus Torvalds.
6  * Copyright (C) 2015, Google, Inc.
7  *
8  * This was originally taken from fs/mpage.c
9  *
10  * The intent is the ext4_mpage_readpages() function here is intended
11  * to replace mpage_readpages() in the general case, not just for
12  * encrypted files.  It has some limitations (see below), where it
13  * will fall back to read_block_full_page(), but these limitations
14  * should only be hit when page_size != block_size.
15  *
16  * This will allow us to attach a callback function to support ext4
17  * encryption.
18  *
19  * If anything unusual happens, such as:
20  *
21  * - encountering a page which has buffers
22  * - encountering a page which has a non-hole after a hole
23  * - encountering a page with non-contiguous blocks
24  *
25  * then this code just gives up and calls the buffer_head-based read function.
26  * It does handle a page which has holes at the end - that is a common case:
27  * the end-of-file on blocksize < PAGE_SIZE setups.
28  *
29  */
30 
31 #include <linux/kernel.h>
32 #include <linux/export.h>
33 #include <linux/mm.h>
34 #include <linux/kdev_t.h>
35 #include <linux/gfp.h>
36 #include <linux/bio.h>
37 #include <linux/fs.h>
38 #include <linux/buffer_head.h>
39 #include <linux/blkdev.h>
40 #include <linux/highmem.h>
41 #include <linux/prefetch.h>
42 #include <linux/mpage.h>
43 #include <linux/writeback.h>
44 #include <linux/backing-dev.h>
45 #include <linux/pagevec.h>
46 #include <linux/cleancache.h>
47 
48 #include "ext4.h"
49 
50 static inline bool ext4_bio_encrypted(struct bio *bio)
51 {
52 #ifdef CONFIG_FS_ENCRYPTION
53 	return unlikely(bio->bi_private != NULL);
54 #else
55 	return false;
56 #endif
57 }
58 
59 /*
60  * I/O completion handler for multipage BIOs.
61  *
62  * The mpage code never puts partial pages into a BIO (except for end-of-file).
63  * If a page does not map to a contiguous run of blocks then it simply falls
64  * back to block_read_full_page().
65  *
66  * Why is this?  If a page's completion depends on a number of different BIOs
67  * which can complete in any order (or at the same time) then determining the
68  * status of that page is hard.  See end_buffer_async_read() for the details.
69  * There is no point in duplicating all that complexity.
70  */
71 static void mpage_end_io(struct bio *bio)
72 {
73 	struct bio_vec *bv;
74 	struct bvec_iter_all iter_all;
75 
76 	if (ext4_bio_encrypted(bio)) {
77 		if (bio->bi_status) {
78 			fscrypt_release_ctx(bio->bi_private);
79 		} else {
80 			fscrypt_enqueue_decrypt_bio(bio->bi_private, bio);
81 			return;
82 		}
83 	}
84 	bio_for_each_segment_all(bv, bio, iter_all) {
85 		struct page *page = bv->bv_page;
86 
87 		if (!bio->bi_status) {
88 			SetPageUptodate(page);
89 		} else {
90 			ClearPageUptodate(page);
91 			SetPageError(page);
92 		}
93 		unlock_page(page);
94 	}
95 
96 	bio_put(bio);
97 }
98 
99 int ext4_mpage_readpages(struct address_space *mapping,
100 			 struct list_head *pages, struct page *page,
101 			 unsigned nr_pages, bool is_readahead)
102 {
103 	struct bio *bio = NULL;
104 	sector_t last_block_in_bio = 0;
105 
106 	struct inode *inode = mapping->host;
107 	const unsigned blkbits = inode->i_blkbits;
108 	const unsigned blocks_per_page = PAGE_SIZE >> blkbits;
109 	const unsigned blocksize = 1 << blkbits;
110 	sector_t block_in_file;
111 	sector_t last_block;
112 	sector_t last_block_in_file;
113 	sector_t blocks[MAX_BUF_PER_PAGE];
114 	unsigned page_block;
115 	struct block_device *bdev = inode->i_sb->s_bdev;
116 	int length;
117 	unsigned relative_block = 0;
118 	struct ext4_map_blocks map;
119 
120 	map.m_pblk = 0;
121 	map.m_lblk = 0;
122 	map.m_len = 0;
123 	map.m_flags = 0;
124 
125 	for (; nr_pages; nr_pages--) {
126 		int fully_mapped = 1;
127 		unsigned first_hole = blocks_per_page;
128 
129 		if (pages) {
130 			page = lru_to_page(pages);
131 
132 			prefetchw(&page->flags);
133 			list_del(&page->lru);
134 			if (add_to_page_cache_lru(page, mapping, page->index,
135 				  readahead_gfp_mask(mapping)))
136 				goto next_page;
137 		}
138 
139 		if (page_has_buffers(page))
140 			goto confused;
141 
142 		block_in_file = (sector_t)page->index << (PAGE_SHIFT - blkbits);
143 		last_block = block_in_file + nr_pages * blocks_per_page;
144 		last_block_in_file = (i_size_read(inode) + blocksize - 1) >> blkbits;
145 		if (last_block > last_block_in_file)
146 			last_block = last_block_in_file;
147 		page_block = 0;
148 
149 		/*
150 		 * Map blocks using the previous result first.
151 		 */
152 		if ((map.m_flags & EXT4_MAP_MAPPED) &&
153 		    block_in_file > map.m_lblk &&
154 		    block_in_file < (map.m_lblk + map.m_len)) {
155 			unsigned map_offset = block_in_file - map.m_lblk;
156 			unsigned last = map.m_len - map_offset;
157 
158 			for (relative_block = 0; ; relative_block++) {
159 				if (relative_block == last) {
160 					/* needed? */
161 					map.m_flags &= ~EXT4_MAP_MAPPED;
162 					break;
163 				}
164 				if (page_block == blocks_per_page)
165 					break;
166 				blocks[page_block] = map.m_pblk + map_offset +
167 					relative_block;
168 				page_block++;
169 				block_in_file++;
170 			}
171 		}
172 
173 		/*
174 		 * Then do more ext4_map_blocks() calls until we are
175 		 * done with this page.
176 		 */
177 		while (page_block < blocks_per_page) {
178 			if (block_in_file < last_block) {
179 				map.m_lblk = block_in_file;
180 				map.m_len = last_block - block_in_file;
181 
182 				if (ext4_map_blocks(NULL, inode, &map, 0) < 0) {
183 				set_error_page:
184 					SetPageError(page);
185 					zero_user_segment(page, 0,
186 							  PAGE_SIZE);
187 					unlock_page(page);
188 					goto next_page;
189 				}
190 			}
191 			if ((map.m_flags & EXT4_MAP_MAPPED) == 0) {
192 				fully_mapped = 0;
193 				if (first_hole == blocks_per_page)
194 					first_hole = page_block;
195 				page_block++;
196 				block_in_file++;
197 				continue;
198 			}
199 			if (first_hole != blocks_per_page)
200 				goto confused;		/* hole -> non-hole */
201 
202 			/* Contiguous blocks? */
203 			if (page_block && blocks[page_block-1] != map.m_pblk-1)
204 				goto confused;
205 			for (relative_block = 0; ; relative_block++) {
206 				if (relative_block == map.m_len) {
207 					/* needed? */
208 					map.m_flags &= ~EXT4_MAP_MAPPED;
209 					break;
210 				} else if (page_block == blocks_per_page)
211 					break;
212 				blocks[page_block] = map.m_pblk+relative_block;
213 				page_block++;
214 				block_in_file++;
215 			}
216 		}
217 		if (first_hole != blocks_per_page) {
218 			zero_user_segment(page, first_hole << blkbits,
219 					  PAGE_SIZE);
220 			if (first_hole == 0) {
221 				SetPageUptodate(page);
222 				unlock_page(page);
223 				goto next_page;
224 			}
225 		} else if (fully_mapped) {
226 			SetPageMappedToDisk(page);
227 		}
228 		if (fully_mapped && blocks_per_page == 1 &&
229 		    !PageUptodate(page) && cleancache_get_page(page) == 0) {
230 			SetPageUptodate(page);
231 			goto confused;
232 		}
233 
234 		/*
235 		 * This page will go to BIO.  Do we need to send this
236 		 * BIO off first?
237 		 */
238 		if (bio && (last_block_in_bio != blocks[0] - 1)) {
239 		submit_and_realloc:
240 			submit_bio(bio);
241 			bio = NULL;
242 		}
243 		if (bio == NULL) {
244 			struct fscrypt_ctx *ctx = NULL;
245 
246 			if (IS_ENCRYPTED(inode) && S_ISREG(inode->i_mode)) {
247 				ctx = fscrypt_get_ctx(GFP_NOFS);
248 				if (IS_ERR(ctx))
249 					goto set_error_page;
250 			}
251 			bio = bio_alloc(GFP_KERNEL,
252 				min_t(int, nr_pages, BIO_MAX_PAGES));
253 			if (!bio) {
254 				if (ctx)
255 					fscrypt_release_ctx(ctx);
256 				goto set_error_page;
257 			}
258 			bio_set_dev(bio, bdev);
259 			bio->bi_iter.bi_sector = blocks[0] << (blkbits - 9);
260 			bio->bi_end_io = mpage_end_io;
261 			bio->bi_private = ctx;
262 			bio_set_op_attrs(bio, REQ_OP_READ,
263 						is_readahead ? REQ_RAHEAD : 0);
264 		}
265 
266 		length = first_hole << blkbits;
267 		if (bio_add_page(bio, page, length, 0) < length)
268 			goto submit_and_realloc;
269 
270 		if (((map.m_flags & EXT4_MAP_BOUNDARY) &&
271 		     (relative_block == map.m_len)) ||
272 		    (first_hole != blocks_per_page)) {
273 			submit_bio(bio);
274 			bio = NULL;
275 		} else
276 			last_block_in_bio = blocks[blocks_per_page - 1];
277 		goto next_page;
278 	confused:
279 		if (bio) {
280 			submit_bio(bio);
281 			bio = NULL;
282 		}
283 		if (!PageUptodate(page))
284 			block_read_full_page(page, ext4_get_block);
285 		else
286 			unlock_page(page);
287 	next_page:
288 		if (pages)
289 			put_page(page);
290 	}
291 	BUG_ON(pages && !list_empty(pages));
292 	if (bio)
293 		submit_bio(bio);
294 	return 0;
295 }
296