1 // SPDX-License-Identifier: GPL-2.0
2 /*
3 * Copyright (C) 2010 Red Hat, Inc.
4 * Copyright (C) 2016-2023 Christoph Hellwig.
5 */
6 #include <linux/module.h>
7 #include <linux/compiler.h>
8 #include <linux/fs.h>
9 #include <linux/iomap.h>
10 #include <linux/pagemap.h>
11 #include <linux/uio.h>
12 #include <linux/buffer_head.h>
13 #include <linux/dax.h>
14 #include <linux/writeback.h>
15 #include <linux/list_sort.h>
16 #include <linux/swap.h>
17 #include <linux/bio.h>
18 #include <linux/sched/signal.h>
19 #include <linux/migrate.h>
20 #include "trace.h"
21
22 #include "../internal.h"
23
24 #define IOEND_BATCH_SIZE 4096
25
26 typedef int (*iomap_punch_t)(struct inode *inode, loff_t offset, loff_t length);
27 /*
28 * Structure allocated for each folio to track per-block uptodate, dirty state
29 * and I/O completions.
30 */
31 struct iomap_folio_state {
32 spinlock_t state_lock;
33 unsigned int read_bytes_pending;
34 atomic_t write_bytes_pending;
35
36 /*
37 * Each block has two bits in this bitmap:
38 * Bits [0..blocks_per_folio) has the uptodate status.
39 * Bits [b_p_f...(2*b_p_f)) has the dirty status.
40 */
41 unsigned long state[];
42 };
43
44 static struct bio_set iomap_ioend_bioset;
45
ifs_is_fully_uptodate(struct folio * folio,struct iomap_folio_state * ifs)46 static inline bool ifs_is_fully_uptodate(struct folio *folio,
47 struct iomap_folio_state *ifs)
48 {
49 struct inode *inode = folio->mapping->host;
50
51 return bitmap_full(ifs->state, i_blocks_per_folio(inode, folio));
52 }
53
ifs_block_is_uptodate(struct iomap_folio_state * ifs,unsigned int block)54 static inline bool ifs_block_is_uptodate(struct iomap_folio_state *ifs,
55 unsigned int block)
56 {
57 return test_bit(block, ifs->state);
58 }
59
ifs_set_range_uptodate(struct folio * folio,struct iomap_folio_state * ifs,size_t off,size_t len)60 static bool ifs_set_range_uptodate(struct folio *folio,
61 struct iomap_folio_state *ifs, size_t off, size_t len)
62 {
63 struct inode *inode = folio->mapping->host;
64 unsigned int first_blk = off >> inode->i_blkbits;
65 unsigned int last_blk = (off + len - 1) >> inode->i_blkbits;
66 unsigned int nr_blks = last_blk - first_blk + 1;
67
68 bitmap_set(ifs->state, first_blk, nr_blks);
69 return ifs_is_fully_uptodate(folio, ifs);
70 }
71
iomap_set_range_uptodate(struct folio * folio,size_t off,size_t len)72 static void iomap_set_range_uptodate(struct folio *folio, size_t off,
73 size_t len)
74 {
75 struct iomap_folio_state *ifs = folio->private;
76 unsigned long flags;
77 bool uptodate = true;
78
79 if (ifs) {
80 spin_lock_irqsave(&ifs->state_lock, flags);
81 uptodate = ifs_set_range_uptodate(folio, ifs, off, len);
82 spin_unlock_irqrestore(&ifs->state_lock, flags);
83 }
84
85 if (uptodate)
86 folio_mark_uptodate(folio);
87 }
88
ifs_block_is_dirty(struct folio * folio,struct iomap_folio_state * ifs,int block)89 static inline bool ifs_block_is_dirty(struct folio *folio,
90 struct iomap_folio_state *ifs, int block)
91 {
92 struct inode *inode = folio->mapping->host;
93 unsigned int blks_per_folio = i_blocks_per_folio(inode, folio);
94
95 return test_bit(block + blks_per_folio, ifs->state);
96 }
97
ifs_find_dirty_range(struct folio * folio,struct iomap_folio_state * ifs,u64 * range_start,u64 range_end)98 static unsigned ifs_find_dirty_range(struct folio *folio,
99 struct iomap_folio_state *ifs, u64 *range_start, u64 range_end)
100 {
101 struct inode *inode = folio->mapping->host;
102 unsigned start_blk =
103 offset_in_folio(folio, *range_start) >> inode->i_blkbits;
104 unsigned end_blk = min_not_zero(
105 offset_in_folio(folio, range_end) >> inode->i_blkbits,
106 i_blocks_per_folio(inode, folio));
107 unsigned nblks = 1;
108
109 while (!ifs_block_is_dirty(folio, ifs, start_blk))
110 if (++start_blk == end_blk)
111 return 0;
112
113 while (start_blk + nblks < end_blk) {
114 if (!ifs_block_is_dirty(folio, ifs, start_blk + nblks))
115 break;
116 nblks++;
117 }
118
119 *range_start = folio_pos(folio) + (start_blk << inode->i_blkbits);
120 return nblks << inode->i_blkbits;
121 }
122
iomap_find_dirty_range(struct folio * folio,u64 * range_start,u64 range_end)123 static unsigned iomap_find_dirty_range(struct folio *folio, u64 *range_start,
124 u64 range_end)
125 {
126 struct iomap_folio_state *ifs = folio->private;
127
128 if (*range_start >= range_end)
129 return 0;
130
131 if (ifs)
132 return ifs_find_dirty_range(folio, ifs, range_start, range_end);
133 return range_end - *range_start;
134 }
135
ifs_clear_range_dirty(struct folio * folio,struct iomap_folio_state * ifs,size_t off,size_t len)136 static void ifs_clear_range_dirty(struct folio *folio,
137 struct iomap_folio_state *ifs, size_t off, size_t len)
138 {
139 struct inode *inode = folio->mapping->host;
140 unsigned int blks_per_folio = i_blocks_per_folio(inode, folio);
141 unsigned int first_blk = (off >> inode->i_blkbits);
142 unsigned int last_blk = (off + len - 1) >> inode->i_blkbits;
143 unsigned int nr_blks = last_blk - first_blk + 1;
144 unsigned long flags;
145
146 spin_lock_irqsave(&ifs->state_lock, flags);
147 bitmap_clear(ifs->state, first_blk + blks_per_folio, nr_blks);
148 spin_unlock_irqrestore(&ifs->state_lock, flags);
149 }
150
iomap_clear_range_dirty(struct folio * folio,size_t off,size_t len)151 static void iomap_clear_range_dirty(struct folio *folio, size_t off, size_t len)
152 {
153 struct iomap_folio_state *ifs = folio->private;
154
155 if (ifs)
156 ifs_clear_range_dirty(folio, ifs, off, len);
157 }
158
ifs_set_range_dirty(struct folio * folio,struct iomap_folio_state * ifs,size_t off,size_t len)159 static void ifs_set_range_dirty(struct folio *folio,
160 struct iomap_folio_state *ifs, size_t off, size_t len)
161 {
162 struct inode *inode = folio->mapping->host;
163 unsigned int blks_per_folio = i_blocks_per_folio(inode, folio);
164 unsigned int first_blk = (off >> inode->i_blkbits);
165 unsigned int last_blk = (off + len - 1) >> inode->i_blkbits;
166 unsigned int nr_blks = last_blk - first_blk + 1;
167 unsigned long flags;
168
169 spin_lock_irqsave(&ifs->state_lock, flags);
170 bitmap_set(ifs->state, first_blk + blks_per_folio, nr_blks);
171 spin_unlock_irqrestore(&ifs->state_lock, flags);
172 }
173
iomap_set_range_dirty(struct folio * folio,size_t off,size_t len)174 static void iomap_set_range_dirty(struct folio *folio, size_t off, size_t len)
175 {
176 struct iomap_folio_state *ifs = folio->private;
177
178 if (ifs)
179 ifs_set_range_dirty(folio, ifs, off, len);
180 }
181
ifs_alloc(struct inode * inode,struct folio * folio,unsigned int flags)182 static struct iomap_folio_state *ifs_alloc(struct inode *inode,
183 struct folio *folio, unsigned int flags)
184 {
185 struct iomap_folio_state *ifs = folio->private;
186 unsigned int nr_blocks = i_blocks_per_folio(inode, folio);
187 gfp_t gfp;
188
189 if (ifs || nr_blocks <= 1)
190 return ifs;
191
192 if (flags & IOMAP_NOWAIT)
193 gfp = GFP_NOWAIT;
194 else
195 gfp = GFP_NOFS | __GFP_NOFAIL;
196
197 /*
198 * ifs->state tracks two sets of state flags when the
199 * filesystem block size is smaller than the folio size.
200 * The first state tracks per-block uptodate and the
201 * second tracks per-block dirty state.
202 */
203 ifs = kzalloc(struct_size(ifs, state,
204 BITS_TO_LONGS(2 * nr_blocks)), gfp);
205 if (!ifs)
206 return ifs;
207
208 spin_lock_init(&ifs->state_lock);
209 if (folio_test_uptodate(folio))
210 bitmap_set(ifs->state, 0, nr_blocks);
211 if (folio_test_dirty(folio))
212 bitmap_set(ifs->state, nr_blocks, nr_blocks);
213 folio_attach_private(folio, ifs);
214
215 return ifs;
216 }
217
ifs_free(struct folio * folio)218 static void ifs_free(struct folio *folio)
219 {
220 struct iomap_folio_state *ifs = folio_detach_private(folio);
221
222 if (!ifs)
223 return;
224 WARN_ON_ONCE(ifs->read_bytes_pending != 0);
225 WARN_ON_ONCE(atomic_read(&ifs->write_bytes_pending));
226 WARN_ON_ONCE(ifs_is_fully_uptodate(folio, ifs) !=
227 folio_test_uptodate(folio));
228 kfree(ifs);
229 }
230
231 /*
232 * Calculate the range inside the folio that we actually need to read.
233 */
iomap_adjust_read_range(struct inode * inode,struct folio * folio,loff_t * pos,loff_t length,size_t * offp,size_t * lenp)234 static void iomap_adjust_read_range(struct inode *inode, struct folio *folio,
235 loff_t *pos, loff_t length, size_t *offp, size_t *lenp)
236 {
237 struct iomap_folio_state *ifs = folio->private;
238 loff_t orig_pos = *pos;
239 loff_t isize = i_size_read(inode);
240 unsigned block_bits = inode->i_blkbits;
241 unsigned block_size = (1 << block_bits);
242 size_t poff = offset_in_folio(folio, *pos);
243 size_t plen = min_t(loff_t, folio_size(folio) - poff, length);
244 unsigned first = poff >> block_bits;
245 unsigned last = (poff + plen - 1) >> block_bits;
246
247 /*
248 * If the block size is smaller than the page size, we need to check the
249 * per-block uptodate status and adjust the offset and length if needed
250 * to avoid reading in already uptodate ranges.
251 */
252 if (ifs) {
253 unsigned int i;
254
255 /* move forward for each leading block marked uptodate */
256 for (i = first; i <= last; i++) {
257 if (!ifs_block_is_uptodate(ifs, i))
258 break;
259 *pos += block_size;
260 poff += block_size;
261 plen -= block_size;
262 first++;
263 }
264
265 /* truncate len if we find any trailing uptodate block(s) */
266 for ( ; i <= last; i++) {
267 if (ifs_block_is_uptodate(ifs, i)) {
268 plen -= (last - i + 1) * block_size;
269 last = i - 1;
270 break;
271 }
272 }
273 }
274
275 /*
276 * If the extent spans the block that contains the i_size, we need to
277 * handle both halves separately so that we properly zero data in the
278 * page cache for blocks that are entirely outside of i_size.
279 */
280 if (orig_pos <= isize && orig_pos + length > isize) {
281 unsigned end = offset_in_folio(folio, isize - 1) >> block_bits;
282
283 if (first <= end && last > end)
284 plen -= (last - end) * block_size;
285 }
286
287 *offp = poff;
288 *lenp = plen;
289 }
290
iomap_finish_folio_read(struct folio * folio,size_t off,size_t len,int error)291 static void iomap_finish_folio_read(struct folio *folio, size_t off,
292 size_t len, int error)
293 {
294 struct iomap_folio_state *ifs = folio->private;
295 bool uptodate = !error;
296 bool finished = true;
297
298 if (ifs) {
299 unsigned long flags;
300
301 spin_lock_irqsave(&ifs->state_lock, flags);
302 if (!error)
303 uptodate = ifs_set_range_uptodate(folio, ifs, off, len);
304 ifs->read_bytes_pending -= len;
305 finished = !ifs->read_bytes_pending;
306 spin_unlock_irqrestore(&ifs->state_lock, flags);
307 }
308
309 if (error)
310 folio_set_error(folio);
311 if (finished)
312 folio_end_read(folio, uptodate);
313 }
314
iomap_read_end_io(struct bio * bio)315 static void iomap_read_end_io(struct bio *bio)
316 {
317 int error = blk_status_to_errno(bio->bi_status);
318 struct folio_iter fi;
319
320 bio_for_each_folio_all(fi, bio)
321 iomap_finish_folio_read(fi.folio, fi.offset, fi.length, error);
322 bio_put(bio);
323 }
324
325 struct iomap_readpage_ctx {
326 struct folio *cur_folio;
327 bool cur_folio_in_bio;
328 struct bio *bio;
329 struct readahead_control *rac;
330 };
331
332 /**
333 * iomap_read_inline_data - copy inline data into the page cache
334 * @iter: iteration structure
335 * @folio: folio to copy to
336 *
337 * Copy the inline data in @iter into @folio and zero out the rest of the folio.
338 * Only a single IOMAP_INLINE extent is allowed at the end of each file.
339 * Returns zero for success to complete the read, or the usual negative errno.
340 */
iomap_read_inline_data(const struct iomap_iter * iter,struct folio * folio)341 static int iomap_read_inline_data(const struct iomap_iter *iter,
342 struct folio *folio)
343 {
344 const struct iomap *iomap = iomap_iter_srcmap(iter);
345 size_t size = i_size_read(iter->inode) - iomap->offset;
346 size_t offset = offset_in_folio(folio, iomap->offset);
347
348 if (folio_test_uptodate(folio))
349 return 0;
350
351 if (WARN_ON_ONCE(size > iomap->length))
352 return -EIO;
353 if (offset > 0)
354 ifs_alloc(iter->inode, folio, iter->flags);
355
356 folio_fill_tail(folio, offset, iomap->inline_data, size);
357 iomap_set_range_uptodate(folio, offset, folio_size(folio) - offset);
358 return 0;
359 }
360
iomap_block_needs_zeroing(const struct iomap_iter * iter,loff_t pos)361 static inline bool iomap_block_needs_zeroing(const struct iomap_iter *iter,
362 loff_t pos)
363 {
364 const struct iomap *srcmap = iomap_iter_srcmap(iter);
365
366 return srcmap->type != IOMAP_MAPPED ||
367 (srcmap->flags & IOMAP_F_NEW) ||
368 pos >= i_size_read(iter->inode);
369 }
370
iomap_readpage_iter(const struct iomap_iter * iter,struct iomap_readpage_ctx * ctx,loff_t offset)371 static loff_t iomap_readpage_iter(const struct iomap_iter *iter,
372 struct iomap_readpage_ctx *ctx, loff_t offset)
373 {
374 const struct iomap *iomap = &iter->iomap;
375 loff_t pos = iter->pos + offset;
376 loff_t length = iomap_length(iter) - offset;
377 struct folio *folio = ctx->cur_folio;
378 struct iomap_folio_state *ifs;
379 loff_t orig_pos = pos;
380 size_t poff, plen;
381 sector_t sector;
382
383 if (iomap->type == IOMAP_INLINE)
384 return iomap_read_inline_data(iter, folio);
385
386 /* zero post-eof blocks as the page may be mapped */
387 ifs = ifs_alloc(iter->inode, folio, iter->flags);
388 iomap_adjust_read_range(iter->inode, folio, &pos, length, &poff, &plen);
389 if (plen == 0)
390 goto done;
391
392 if (iomap_block_needs_zeroing(iter, pos)) {
393 folio_zero_range(folio, poff, plen);
394 iomap_set_range_uptodate(folio, poff, plen);
395 goto done;
396 }
397
398 ctx->cur_folio_in_bio = true;
399 if (ifs) {
400 spin_lock_irq(&ifs->state_lock);
401 ifs->read_bytes_pending += plen;
402 spin_unlock_irq(&ifs->state_lock);
403 }
404
405 sector = iomap_sector(iomap, pos);
406 if (!ctx->bio ||
407 bio_end_sector(ctx->bio) != sector ||
408 !bio_add_folio(ctx->bio, folio, plen, poff)) {
409 gfp_t gfp = mapping_gfp_constraint(folio->mapping, GFP_KERNEL);
410 gfp_t orig_gfp = gfp;
411 unsigned int nr_vecs = DIV_ROUND_UP(length, PAGE_SIZE);
412
413 if (ctx->bio)
414 submit_bio(ctx->bio);
415
416 if (ctx->rac) /* same as readahead_gfp_mask */
417 gfp |= __GFP_NORETRY | __GFP_NOWARN;
418 ctx->bio = bio_alloc(iomap->bdev, bio_max_segs(nr_vecs),
419 REQ_OP_READ, gfp);
420 /*
421 * If the bio_alloc fails, try it again for a single page to
422 * avoid having to deal with partial page reads. This emulates
423 * what do_mpage_read_folio does.
424 */
425 if (!ctx->bio) {
426 ctx->bio = bio_alloc(iomap->bdev, 1, REQ_OP_READ,
427 orig_gfp);
428 }
429 if (ctx->rac)
430 ctx->bio->bi_opf |= REQ_RAHEAD;
431 ctx->bio->bi_iter.bi_sector = sector;
432 ctx->bio->bi_end_io = iomap_read_end_io;
433 bio_add_folio_nofail(ctx->bio, folio, plen, poff);
434 }
435
436 done:
437 /*
438 * Move the caller beyond our range so that it keeps making progress.
439 * For that, we have to include any leading non-uptodate ranges, but
440 * we can skip trailing ones as they will be handled in the next
441 * iteration.
442 */
443 return pos - orig_pos + plen;
444 }
445
iomap_read_folio(struct folio * folio,const struct iomap_ops * ops)446 int iomap_read_folio(struct folio *folio, const struct iomap_ops *ops)
447 {
448 struct iomap_iter iter = {
449 .inode = folio->mapping->host,
450 .pos = folio_pos(folio),
451 .len = folio_size(folio),
452 };
453 struct iomap_readpage_ctx ctx = {
454 .cur_folio = folio,
455 };
456 int ret;
457
458 trace_iomap_readpage(iter.inode, 1);
459
460 while ((ret = iomap_iter(&iter, ops)) > 0)
461 iter.processed = iomap_readpage_iter(&iter, &ctx, 0);
462
463 if (ret < 0)
464 folio_set_error(folio);
465
466 if (ctx.bio) {
467 submit_bio(ctx.bio);
468 WARN_ON_ONCE(!ctx.cur_folio_in_bio);
469 } else {
470 WARN_ON_ONCE(ctx.cur_folio_in_bio);
471 folio_unlock(folio);
472 }
473
474 /*
475 * Just like mpage_readahead and block_read_full_folio, we always
476 * return 0 and just set the folio error flag on errors. This
477 * should be cleaned up throughout the stack eventually.
478 */
479 return 0;
480 }
481 EXPORT_SYMBOL_GPL(iomap_read_folio);
482
iomap_readahead_iter(const struct iomap_iter * iter,struct iomap_readpage_ctx * ctx)483 static loff_t iomap_readahead_iter(const struct iomap_iter *iter,
484 struct iomap_readpage_ctx *ctx)
485 {
486 loff_t length = iomap_length(iter);
487 loff_t done, ret;
488
489 for (done = 0; done < length; done += ret) {
490 if (ctx->cur_folio &&
491 offset_in_folio(ctx->cur_folio, iter->pos + done) == 0) {
492 if (!ctx->cur_folio_in_bio)
493 folio_unlock(ctx->cur_folio);
494 ctx->cur_folio = NULL;
495 }
496 if (!ctx->cur_folio) {
497 ctx->cur_folio = readahead_folio(ctx->rac);
498 ctx->cur_folio_in_bio = false;
499 }
500 ret = iomap_readpage_iter(iter, ctx, done);
501 if (ret <= 0)
502 return ret;
503 }
504
505 return done;
506 }
507
508 /**
509 * iomap_readahead - Attempt to read pages from a file.
510 * @rac: Describes the pages to be read.
511 * @ops: The operations vector for the filesystem.
512 *
513 * This function is for filesystems to call to implement their readahead
514 * address_space operation.
515 *
516 * Context: The @ops callbacks may submit I/O (eg to read the addresses of
517 * blocks from disc), and may wait for it. The caller may be trying to
518 * access a different page, and so sleeping excessively should be avoided.
519 * It may allocate memory, but should avoid costly allocations. This
520 * function is called with memalloc_nofs set, so allocations will not cause
521 * the filesystem to be reentered.
522 */
iomap_readahead(struct readahead_control * rac,const struct iomap_ops * ops)523 void iomap_readahead(struct readahead_control *rac, const struct iomap_ops *ops)
524 {
525 struct iomap_iter iter = {
526 .inode = rac->mapping->host,
527 .pos = readahead_pos(rac),
528 .len = readahead_length(rac),
529 };
530 struct iomap_readpage_ctx ctx = {
531 .rac = rac,
532 };
533
534 trace_iomap_readahead(rac->mapping->host, readahead_count(rac));
535
536 while (iomap_iter(&iter, ops) > 0)
537 iter.processed = iomap_readahead_iter(&iter, &ctx);
538
539 if (ctx.bio)
540 submit_bio(ctx.bio);
541 if (ctx.cur_folio) {
542 if (!ctx.cur_folio_in_bio)
543 folio_unlock(ctx.cur_folio);
544 }
545 }
546 EXPORT_SYMBOL_GPL(iomap_readahead);
547
548 /*
549 * iomap_is_partially_uptodate checks whether blocks within a folio are
550 * uptodate or not.
551 *
552 * Returns true if all blocks which correspond to the specified part
553 * of the folio are uptodate.
554 */
iomap_is_partially_uptodate(struct folio * folio,size_t from,size_t count)555 bool iomap_is_partially_uptodate(struct folio *folio, size_t from, size_t count)
556 {
557 struct iomap_folio_state *ifs = folio->private;
558 struct inode *inode = folio->mapping->host;
559 unsigned first, last, i;
560
561 if (!ifs)
562 return false;
563
564 /* Caller's range may extend past the end of this folio */
565 count = min(folio_size(folio) - from, count);
566
567 /* First and last blocks in range within folio */
568 first = from >> inode->i_blkbits;
569 last = (from + count - 1) >> inode->i_blkbits;
570
571 for (i = first; i <= last; i++)
572 if (!ifs_block_is_uptodate(ifs, i))
573 return false;
574 return true;
575 }
576 EXPORT_SYMBOL_GPL(iomap_is_partially_uptodate);
577
578 /**
579 * iomap_get_folio - get a folio reference for writing
580 * @iter: iteration structure
581 * @pos: start offset of write
582 * @len: Suggested size of folio to create.
583 *
584 * Returns a locked reference to the folio at @pos, or an error pointer if the
585 * folio could not be obtained.
586 */
iomap_get_folio(struct iomap_iter * iter,loff_t pos,size_t len)587 struct folio *iomap_get_folio(struct iomap_iter *iter, loff_t pos, size_t len)
588 {
589 fgf_t fgp = FGP_WRITEBEGIN | FGP_NOFS;
590
591 if (iter->flags & IOMAP_NOWAIT)
592 fgp |= FGP_NOWAIT;
593 fgp |= fgf_set_order(len);
594
595 return __filemap_get_folio(iter->inode->i_mapping, pos >> PAGE_SHIFT,
596 fgp, mapping_gfp_mask(iter->inode->i_mapping));
597 }
598 EXPORT_SYMBOL_GPL(iomap_get_folio);
599
iomap_release_folio(struct folio * folio,gfp_t gfp_flags)600 bool iomap_release_folio(struct folio *folio, gfp_t gfp_flags)
601 {
602 trace_iomap_release_folio(folio->mapping->host, folio_pos(folio),
603 folio_size(folio));
604
605 /*
606 * If the folio is dirty, we refuse to release our metadata because
607 * it may be partially dirty. Once we track per-block dirty state,
608 * we can release the metadata if every block is dirty.
609 */
610 if (folio_test_dirty(folio))
611 return false;
612 ifs_free(folio);
613 return true;
614 }
615 EXPORT_SYMBOL_GPL(iomap_release_folio);
616
iomap_invalidate_folio(struct folio * folio,size_t offset,size_t len)617 void iomap_invalidate_folio(struct folio *folio, size_t offset, size_t len)
618 {
619 trace_iomap_invalidate_folio(folio->mapping->host,
620 folio_pos(folio) + offset, len);
621
622 /*
623 * If we're invalidating the entire folio, clear the dirty state
624 * from it and release it to avoid unnecessary buildup of the LRU.
625 */
626 if (offset == 0 && len == folio_size(folio)) {
627 WARN_ON_ONCE(folio_test_writeback(folio));
628 folio_cancel_dirty(folio);
629 ifs_free(folio);
630 }
631 }
632 EXPORT_SYMBOL_GPL(iomap_invalidate_folio);
633
iomap_dirty_folio(struct address_space * mapping,struct folio * folio)634 bool iomap_dirty_folio(struct address_space *mapping, struct folio *folio)
635 {
636 struct inode *inode = mapping->host;
637 size_t len = folio_size(folio);
638
639 ifs_alloc(inode, folio, 0);
640 iomap_set_range_dirty(folio, 0, len);
641 return filemap_dirty_folio(mapping, folio);
642 }
643 EXPORT_SYMBOL_GPL(iomap_dirty_folio);
644
645 static void
iomap_write_failed(struct inode * inode,loff_t pos,unsigned len)646 iomap_write_failed(struct inode *inode, loff_t pos, unsigned len)
647 {
648 loff_t i_size = i_size_read(inode);
649
650 /*
651 * Only truncate newly allocated pages beyoned EOF, even if the
652 * write started inside the existing inode size.
653 */
654 if (pos + len > i_size)
655 truncate_pagecache_range(inode, max(pos, i_size),
656 pos + len - 1);
657 }
658
iomap_read_folio_sync(loff_t block_start,struct folio * folio,size_t poff,size_t plen,const struct iomap * iomap)659 static int iomap_read_folio_sync(loff_t block_start, struct folio *folio,
660 size_t poff, size_t plen, const struct iomap *iomap)
661 {
662 struct bio_vec bvec;
663 struct bio bio;
664
665 bio_init(&bio, iomap->bdev, &bvec, 1, REQ_OP_READ);
666 bio.bi_iter.bi_sector = iomap_sector(iomap, block_start);
667 bio_add_folio_nofail(&bio, folio, plen, poff);
668 return submit_bio_wait(&bio);
669 }
670
__iomap_write_begin(const struct iomap_iter * iter,loff_t pos,size_t len,struct folio * folio)671 static int __iomap_write_begin(const struct iomap_iter *iter, loff_t pos,
672 size_t len, struct folio *folio)
673 {
674 const struct iomap *srcmap = iomap_iter_srcmap(iter);
675 struct iomap_folio_state *ifs;
676 loff_t block_size = i_blocksize(iter->inode);
677 loff_t block_start = round_down(pos, block_size);
678 loff_t block_end = round_up(pos + len, block_size);
679 unsigned int nr_blocks = i_blocks_per_folio(iter->inode, folio);
680 size_t from = offset_in_folio(folio, pos), to = from + len;
681 size_t poff, plen;
682
683 /*
684 * If the write or zeroing completely overlaps the current folio, then
685 * entire folio will be dirtied so there is no need for
686 * per-block state tracking structures to be attached to this folio.
687 * For the unshare case, we must read in the ondisk contents because we
688 * are not changing pagecache contents.
689 */
690 if (!(iter->flags & IOMAP_UNSHARE) && pos <= folio_pos(folio) &&
691 pos + len >= folio_pos(folio) + folio_size(folio))
692 return 0;
693
694 ifs = ifs_alloc(iter->inode, folio, iter->flags);
695 if ((iter->flags & IOMAP_NOWAIT) && !ifs && nr_blocks > 1)
696 return -EAGAIN;
697
698 if (folio_test_uptodate(folio))
699 return 0;
700 folio_clear_error(folio);
701
702 do {
703 iomap_adjust_read_range(iter->inode, folio, &block_start,
704 block_end - block_start, &poff, &plen);
705 if (plen == 0)
706 break;
707
708 if (!(iter->flags & IOMAP_UNSHARE) &&
709 (from <= poff || from >= poff + plen) &&
710 (to <= poff || to >= poff + plen))
711 continue;
712
713 if (iomap_block_needs_zeroing(iter, block_start)) {
714 if (WARN_ON_ONCE(iter->flags & IOMAP_UNSHARE))
715 return -EIO;
716 folio_zero_segments(folio, poff, from, to, poff + plen);
717 } else {
718 int status;
719
720 if (iter->flags & IOMAP_NOWAIT)
721 return -EAGAIN;
722
723 status = iomap_read_folio_sync(block_start, folio,
724 poff, plen, srcmap);
725 if (status)
726 return status;
727 }
728 iomap_set_range_uptodate(folio, poff, plen);
729 } while ((block_start += plen) < block_end);
730
731 return 0;
732 }
733
__iomap_get_folio(struct iomap_iter * iter,loff_t pos,size_t len)734 static struct folio *__iomap_get_folio(struct iomap_iter *iter, loff_t pos,
735 size_t len)
736 {
737 const struct iomap_folio_ops *folio_ops = iter->iomap.folio_ops;
738
739 if (folio_ops && folio_ops->get_folio)
740 return folio_ops->get_folio(iter, pos, len);
741 else
742 return iomap_get_folio(iter, pos, len);
743 }
744
__iomap_put_folio(struct iomap_iter * iter,loff_t pos,size_t ret,struct folio * folio)745 static void __iomap_put_folio(struct iomap_iter *iter, loff_t pos, size_t ret,
746 struct folio *folio)
747 {
748 const struct iomap_folio_ops *folio_ops = iter->iomap.folio_ops;
749
750 if (folio_ops && folio_ops->put_folio) {
751 folio_ops->put_folio(iter->inode, pos, ret, folio);
752 } else {
753 folio_unlock(folio);
754 folio_put(folio);
755 }
756 }
757
iomap_write_begin_inline(const struct iomap_iter * iter,struct folio * folio)758 static int iomap_write_begin_inline(const struct iomap_iter *iter,
759 struct folio *folio)
760 {
761 /* needs more work for the tailpacking case; disable for now */
762 if (WARN_ON_ONCE(iomap_iter_srcmap(iter)->offset != 0))
763 return -EIO;
764 return iomap_read_inline_data(iter, folio);
765 }
766
iomap_write_begin(struct iomap_iter * iter,loff_t pos,size_t len,struct folio ** foliop)767 static int iomap_write_begin(struct iomap_iter *iter, loff_t pos,
768 size_t len, struct folio **foliop)
769 {
770 const struct iomap_folio_ops *folio_ops = iter->iomap.folio_ops;
771 const struct iomap *srcmap = iomap_iter_srcmap(iter);
772 struct folio *folio;
773 int status = 0;
774
775 BUG_ON(pos + len > iter->iomap.offset + iter->iomap.length);
776 if (srcmap != &iter->iomap)
777 BUG_ON(pos + len > srcmap->offset + srcmap->length);
778
779 if (fatal_signal_pending(current))
780 return -EINTR;
781
782 if (!mapping_large_folio_support(iter->inode->i_mapping))
783 len = min_t(size_t, len, PAGE_SIZE - offset_in_page(pos));
784
785 folio = __iomap_get_folio(iter, pos, len);
786 if (IS_ERR(folio))
787 return PTR_ERR(folio);
788
789 /*
790 * Now we have a locked folio, before we do anything with it we need to
791 * check that the iomap we have cached is not stale. The inode extent
792 * mapping can change due to concurrent IO in flight (e.g.
793 * IOMAP_UNWRITTEN state can change and memory reclaim could have
794 * reclaimed a previously partially written page at this index after IO
795 * completion before this write reaches this file offset) and hence we
796 * could do the wrong thing here (zero a page range incorrectly or fail
797 * to zero) and corrupt data.
798 */
799 if (folio_ops && folio_ops->iomap_valid) {
800 bool iomap_valid = folio_ops->iomap_valid(iter->inode,
801 &iter->iomap);
802 if (!iomap_valid) {
803 iter->iomap.flags |= IOMAP_F_STALE;
804 status = 0;
805 goto out_unlock;
806 }
807 }
808
809 if (pos + len > folio_pos(folio) + folio_size(folio))
810 len = folio_pos(folio) + folio_size(folio) - pos;
811
812 if (srcmap->type == IOMAP_INLINE)
813 status = iomap_write_begin_inline(iter, folio);
814 else if (srcmap->flags & IOMAP_F_BUFFER_HEAD)
815 status = __block_write_begin_int(folio, pos, len, NULL, srcmap);
816 else
817 status = __iomap_write_begin(iter, pos, len, folio);
818
819 if (unlikely(status))
820 goto out_unlock;
821
822 *foliop = folio;
823 return 0;
824
825 out_unlock:
826 __iomap_put_folio(iter, pos, 0, folio);
827
828 return status;
829 }
830
__iomap_write_end(struct inode * inode,loff_t pos,size_t len,size_t copied,struct folio * folio)831 static bool __iomap_write_end(struct inode *inode, loff_t pos, size_t len,
832 size_t copied, struct folio *folio)
833 {
834 flush_dcache_folio(folio);
835
836 /*
837 * The blocks that were entirely written will now be uptodate, so we
838 * don't have to worry about a read_folio reading them and overwriting a
839 * partial write. However, if we've encountered a short write and only
840 * partially written into a block, it will not be marked uptodate, so a
841 * read_folio might come in and destroy our partial write.
842 *
843 * Do the simplest thing and just treat any short write to a
844 * non-uptodate page as a zero-length write, and force the caller to
845 * redo the whole thing.
846 */
847 if (unlikely(copied < len && !folio_test_uptodate(folio)))
848 return false;
849 iomap_set_range_uptodate(folio, offset_in_folio(folio, pos), len);
850 iomap_set_range_dirty(folio, offset_in_folio(folio, pos), copied);
851 filemap_dirty_folio(inode->i_mapping, folio);
852 return true;
853 }
854
iomap_write_end_inline(const struct iomap_iter * iter,struct folio * folio,loff_t pos,size_t copied)855 static void iomap_write_end_inline(const struct iomap_iter *iter,
856 struct folio *folio, loff_t pos, size_t copied)
857 {
858 const struct iomap *iomap = &iter->iomap;
859 void *addr;
860
861 WARN_ON_ONCE(!folio_test_uptodate(folio));
862 BUG_ON(!iomap_inline_data_valid(iomap));
863
864 flush_dcache_folio(folio);
865 addr = kmap_local_folio(folio, pos);
866 memcpy(iomap_inline_data(iomap, pos), addr, copied);
867 kunmap_local(addr);
868
869 mark_inode_dirty(iter->inode);
870 }
871
872 /*
873 * Returns true if all copied bytes have been written to the pagecache,
874 * otherwise return false.
875 */
iomap_write_end(struct iomap_iter * iter,loff_t pos,size_t len,size_t copied,struct folio * folio)876 static bool iomap_write_end(struct iomap_iter *iter, loff_t pos, size_t len,
877 size_t copied, struct folio *folio)
878 {
879 const struct iomap *srcmap = iomap_iter_srcmap(iter);
880
881 if (srcmap->type == IOMAP_INLINE) {
882 iomap_write_end_inline(iter, folio, pos, copied);
883 return true;
884 }
885
886 if (srcmap->flags & IOMAP_F_BUFFER_HEAD) {
887 size_t bh_written;
888
889 bh_written = block_write_end(NULL, iter->inode->i_mapping, pos,
890 len, copied, &folio->page, NULL);
891 WARN_ON_ONCE(bh_written != copied && bh_written != 0);
892 return bh_written == copied;
893 }
894
895 return __iomap_write_end(iter->inode, pos, len, copied, folio);
896 }
897
iomap_write_iter(struct iomap_iter * iter,struct iov_iter * i)898 static loff_t iomap_write_iter(struct iomap_iter *iter, struct iov_iter *i)
899 {
900 loff_t length = iomap_length(iter);
901 size_t chunk = PAGE_SIZE << MAX_PAGECACHE_ORDER;
902 loff_t pos = iter->pos;
903 ssize_t total_written = 0;
904 long status = 0;
905 struct address_space *mapping = iter->inode->i_mapping;
906 unsigned int bdp_flags = (iter->flags & IOMAP_NOWAIT) ? BDP_ASYNC : 0;
907
908 do {
909 struct folio *folio;
910 loff_t old_size;
911 size_t offset; /* Offset into folio */
912 size_t bytes; /* Bytes to write to folio */
913 size_t copied; /* Bytes copied from user */
914 size_t written; /* Bytes have been written */
915
916 bytes = iov_iter_count(i);
917 retry:
918 offset = pos & (chunk - 1);
919 bytes = min(chunk - offset, bytes);
920 status = balance_dirty_pages_ratelimited_flags(mapping,
921 bdp_flags);
922 if (unlikely(status))
923 break;
924
925 if (bytes > length)
926 bytes = length;
927
928 /*
929 * Bring in the user page that we'll copy from _first_.
930 * Otherwise there's a nasty deadlock on copying from the
931 * same page as we're writing to, without it being marked
932 * up-to-date.
933 *
934 * For async buffered writes the assumption is that the user
935 * page has already been faulted in. This can be optimized by
936 * faulting the user page.
937 */
938 if (unlikely(fault_in_iov_iter_readable(i, bytes) == bytes)) {
939 status = -EFAULT;
940 break;
941 }
942
943 status = iomap_write_begin(iter, pos, bytes, &folio);
944 if (unlikely(status)) {
945 iomap_write_failed(iter->inode, pos, bytes);
946 break;
947 }
948 if (iter->iomap.flags & IOMAP_F_STALE)
949 break;
950
951 offset = offset_in_folio(folio, pos);
952 if (bytes > folio_size(folio) - offset)
953 bytes = folio_size(folio) - offset;
954
955 if (mapping_writably_mapped(mapping))
956 flush_dcache_folio(folio);
957
958 copied = copy_folio_from_iter_atomic(folio, offset, bytes, i);
959 written = iomap_write_end(iter, pos, bytes, copied, folio) ?
960 copied : 0;
961
962 /*
963 * Update the in-memory inode size after copying the data into
964 * the page cache. It's up to the file system to write the
965 * updated size to disk, preferably after I/O completion so that
966 * no stale data is exposed. Only once that's done can we
967 * unlock and release the folio.
968 */
969 old_size = iter->inode->i_size;
970 if (pos + written > old_size) {
971 i_size_write(iter->inode, pos + written);
972 iter->iomap.flags |= IOMAP_F_SIZE_CHANGED;
973 }
974 __iomap_put_folio(iter, pos, written, folio);
975
976 if (old_size < pos)
977 pagecache_isize_extended(iter->inode, old_size, pos);
978
979 cond_resched();
980 if (unlikely(written == 0)) {
981 /*
982 * A short copy made iomap_write_end() reject the
983 * thing entirely. Might be memory poisoning
984 * halfway through, might be a race with munmap,
985 * might be severe memory pressure.
986 */
987 iomap_write_failed(iter->inode, pos, bytes);
988 iov_iter_revert(i, copied);
989
990 if (chunk > PAGE_SIZE)
991 chunk /= 2;
992 if (copied) {
993 bytes = copied;
994 goto retry;
995 }
996 } else {
997 pos += written;
998 total_written += written;
999 length -= written;
1000 }
1001 } while (iov_iter_count(i) && length);
1002
1003 if (status == -EAGAIN) {
1004 iov_iter_revert(i, total_written);
1005 return -EAGAIN;
1006 }
1007 return total_written ? total_written : status;
1008 }
1009
1010 ssize_t
iomap_file_buffered_write(struct kiocb * iocb,struct iov_iter * i,const struct iomap_ops * ops)1011 iomap_file_buffered_write(struct kiocb *iocb, struct iov_iter *i,
1012 const struct iomap_ops *ops)
1013 {
1014 struct iomap_iter iter = {
1015 .inode = iocb->ki_filp->f_mapping->host,
1016 .pos = iocb->ki_pos,
1017 .len = iov_iter_count(i),
1018 .flags = IOMAP_WRITE,
1019 };
1020 ssize_t ret;
1021
1022 if (iocb->ki_flags & IOCB_NOWAIT)
1023 iter.flags |= IOMAP_NOWAIT;
1024
1025 while ((ret = iomap_iter(&iter, ops)) > 0)
1026 iter.processed = iomap_write_iter(&iter, i);
1027
1028 if (unlikely(iter.pos == iocb->ki_pos))
1029 return ret;
1030 ret = iter.pos - iocb->ki_pos;
1031 iocb->ki_pos = iter.pos;
1032 return ret;
1033 }
1034 EXPORT_SYMBOL_GPL(iomap_file_buffered_write);
1035
iomap_write_delalloc_ifs_punch(struct inode * inode,struct folio * folio,loff_t start_byte,loff_t end_byte,iomap_punch_t punch)1036 static int iomap_write_delalloc_ifs_punch(struct inode *inode,
1037 struct folio *folio, loff_t start_byte, loff_t end_byte,
1038 iomap_punch_t punch)
1039 {
1040 unsigned int first_blk, last_blk, i;
1041 loff_t last_byte;
1042 u8 blkbits = inode->i_blkbits;
1043 struct iomap_folio_state *ifs;
1044 int ret = 0;
1045
1046 /*
1047 * When we have per-block dirty tracking, there can be
1048 * blocks within a folio which are marked uptodate
1049 * but not dirty. In that case it is necessary to punch
1050 * out such blocks to avoid leaking any delalloc blocks.
1051 */
1052 ifs = folio->private;
1053 if (!ifs)
1054 return ret;
1055
1056 last_byte = min_t(loff_t, end_byte - 1,
1057 folio_pos(folio) + folio_size(folio) - 1);
1058 first_blk = offset_in_folio(folio, start_byte) >> blkbits;
1059 last_blk = offset_in_folio(folio, last_byte) >> blkbits;
1060 for (i = first_blk; i <= last_blk; i++) {
1061 if (!ifs_block_is_dirty(folio, ifs, i)) {
1062 ret = punch(inode, folio_pos(folio) + (i << blkbits),
1063 1 << blkbits);
1064 if (ret)
1065 return ret;
1066 }
1067 }
1068
1069 return ret;
1070 }
1071
1072
iomap_write_delalloc_punch(struct inode * inode,struct folio * folio,loff_t * punch_start_byte,loff_t start_byte,loff_t end_byte,iomap_punch_t punch)1073 static int iomap_write_delalloc_punch(struct inode *inode, struct folio *folio,
1074 loff_t *punch_start_byte, loff_t start_byte, loff_t end_byte,
1075 iomap_punch_t punch)
1076 {
1077 int ret = 0;
1078
1079 if (!folio_test_dirty(folio))
1080 return ret;
1081
1082 /* if dirty, punch up to offset */
1083 if (start_byte > *punch_start_byte) {
1084 ret = punch(inode, *punch_start_byte,
1085 start_byte - *punch_start_byte);
1086 if (ret)
1087 return ret;
1088 }
1089
1090 /* Punch non-dirty blocks within folio */
1091 ret = iomap_write_delalloc_ifs_punch(inode, folio, start_byte,
1092 end_byte, punch);
1093 if (ret)
1094 return ret;
1095
1096 /*
1097 * Make sure the next punch start is correctly bound to
1098 * the end of this data range, not the end of the folio.
1099 */
1100 *punch_start_byte = min_t(loff_t, end_byte,
1101 folio_pos(folio) + folio_size(folio));
1102
1103 return ret;
1104 }
1105
1106 /*
1107 * Scan the data range passed to us for dirty page cache folios. If we find a
1108 * dirty folio, punch out the preceding range and update the offset from which
1109 * the next punch will start from.
1110 *
1111 * We can punch out storage reservations under clean pages because they either
1112 * contain data that has been written back - in which case the delalloc punch
1113 * over that range is a no-op - or they have been read faults in which case they
1114 * contain zeroes and we can remove the delalloc backing range and any new
1115 * writes to those pages will do the normal hole filling operation...
1116 *
1117 * This makes the logic simple: we only need to keep the delalloc extents only
1118 * over the dirty ranges of the page cache.
1119 *
1120 * This function uses [start_byte, end_byte) intervals (i.e. open ended) to
1121 * simplify range iterations.
1122 */
iomap_write_delalloc_scan(struct inode * inode,loff_t * punch_start_byte,loff_t start_byte,loff_t end_byte,iomap_punch_t punch)1123 static int iomap_write_delalloc_scan(struct inode *inode,
1124 loff_t *punch_start_byte, loff_t start_byte, loff_t end_byte,
1125 iomap_punch_t punch)
1126 {
1127 while (start_byte < end_byte) {
1128 struct folio *folio;
1129 int ret;
1130
1131 /* grab locked page */
1132 folio = filemap_lock_folio(inode->i_mapping,
1133 start_byte >> PAGE_SHIFT);
1134 if (IS_ERR(folio)) {
1135 start_byte = ALIGN_DOWN(start_byte, PAGE_SIZE) +
1136 PAGE_SIZE;
1137 continue;
1138 }
1139
1140 ret = iomap_write_delalloc_punch(inode, folio, punch_start_byte,
1141 start_byte, end_byte, punch);
1142 if (ret) {
1143 folio_unlock(folio);
1144 folio_put(folio);
1145 return ret;
1146 }
1147
1148 /* move offset to start of next folio in range */
1149 start_byte = folio_next_index(folio) << PAGE_SHIFT;
1150 folio_unlock(folio);
1151 folio_put(folio);
1152 }
1153 return 0;
1154 }
1155
1156 /*
1157 * Punch out all the delalloc blocks in the range given except for those that
1158 * have dirty data still pending in the page cache - those are going to be
1159 * written and so must still retain the delalloc backing for writeback.
1160 *
1161 * As we are scanning the page cache for data, we don't need to reimplement the
1162 * wheel - mapping_seek_hole_data() does exactly what we need to identify the
1163 * start and end of data ranges correctly even for sub-folio block sizes. This
1164 * byte range based iteration is especially convenient because it means we
1165 * don't have to care about variable size folios, nor where the start or end of
1166 * the data range lies within a folio, if they lie within the same folio or even
1167 * if there are multiple discontiguous data ranges within the folio.
1168 *
1169 * It should be noted that mapping_seek_hole_data() is not aware of EOF, and so
1170 * can return data ranges that exist in the cache beyond EOF. e.g. a page fault
1171 * spanning EOF will initialise the post-EOF data to zeroes and mark it up to
1172 * date. A write page fault can then mark it dirty. If we then fail a write()
1173 * beyond EOF into that up to date cached range, we allocate a delalloc block
1174 * beyond EOF and then have to punch it out. Because the range is up to date,
1175 * mapping_seek_hole_data() will return it, and we will skip the punch because
1176 * the folio is dirty. THis is incorrect - we always need to punch out delalloc
1177 * beyond EOF in this case as writeback will never write back and covert that
1178 * delalloc block beyond EOF. Hence we limit the cached data scan range to EOF,
1179 * resulting in always punching out the range from the EOF to the end of the
1180 * range the iomap spans.
1181 *
1182 * Intervals are of the form [start_byte, end_byte) (i.e. open ended) because it
1183 * matches the intervals returned by mapping_seek_hole_data(). i.e. SEEK_DATA
1184 * returns the start of a data range (start_byte), and SEEK_HOLE(start_byte)
1185 * returns the end of the data range (data_end). Using closed intervals would
1186 * require sprinkling this code with magic "+ 1" and "- 1" arithmetic and expose
1187 * the code to subtle off-by-one bugs....
1188 */
iomap_write_delalloc_release(struct inode * inode,loff_t start_byte,loff_t end_byte,iomap_punch_t punch)1189 static int iomap_write_delalloc_release(struct inode *inode,
1190 loff_t start_byte, loff_t end_byte, iomap_punch_t punch)
1191 {
1192 loff_t punch_start_byte = start_byte;
1193 loff_t scan_end_byte = min(i_size_read(inode), end_byte);
1194 int error = 0;
1195
1196 /*
1197 * Lock the mapping to avoid races with page faults re-instantiating
1198 * folios and dirtying them via ->page_mkwrite whilst we walk the
1199 * cache and perform delalloc extent removal. Failing to do this can
1200 * leave dirty pages with no space reservation in the cache.
1201 */
1202 filemap_invalidate_lock(inode->i_mapping);
1203 while (start_byte < scan_end_byte) {
1204 loff_t data_end;
1205
1206 start_byte = mapping_seek_hole_data(inode->i_mapping,
1207 start_byte, scan_end_byte, SEEK_DATA);
1208 /*
1209 * If there is no more data to scan, all that is left is to
1210 * punch out the remaining range.
1211 */
1212 if (start_byte == -ENXIO || start_byte == scan_end_byte)
1213 break;
1214 if (start_byte < 0) {
1215 error = start_byte;
1216 goto out_unlock;
1217 }
1218 WARN_ON_ONCE(start_byte < punch_start_byte);
1219 WARN_ON_ONCE(start_byte > scan_end_byte);
1220
1221 /*
1222 * We find the end of this contiguous cached data range by
1223 * seeking from start_byte to the beginning of the next hole.
1224 */
1225 data_end = mapping_seek_hole_data(inode->i_mapping, start_byte,
1226 scan_end_byte, SEEK_HOLE);
1227 if (data_end < 0) {
1228 error = data_end;
1229 goto out_unlock;
1230 }
1231 WARN_ON_ONCE(data_end <= start_byte);
1232 WARN_ON_ONCE(data_end > scan_end_byte);
1233
1234 error = iomap_write_delalloc_scan(inode, &punch_start_byte,
1235 start_byte, data_end, punch);
1236 if (error)
1237 goto out_unlock;
1238
1239 /* The next data search starts at the end of this one. */
1240 start_byte = data_end;
1241 }
1242
1243 if (punch_start_byte < end_byte)
1244 error = punch(inode, punch_start_byte,
1245 end_byte - punch_start_byte);
1246 out_unlock:
1247 filemap_invalidate_unlock(inode->i_mapping);
1248 return error;
1249 }
1250
1251 /*
1252 * When a short write occurs, the filesystem may need to remove reserved space
1253 * that was allocated in ->iomap_begin from it's ->iomap_end method. For
1254 * filesystems that use delayed allocation, we need to punch out delalloc
1255 * extents from the range that are not dirty in the page cache. As the write can
1256 * race with page faults, there can be dirty pages over the delalloc extent
1257 * outside the range of a short write but still within the delalloc extent
1258 * allocated for this iomap.
1259 *
1260 * This function uses [start_byte, end_byte) intervals (i.e. open ended) to
1261 * simplify range iterations.
1262 *
1263 * The punch() callback *must* only punch delalloc extents in the range passed
1264 * to it. It must skip over all other types of extents in the range and leave
1265 * them completely unchanged. It must do this punch atomically with respect to
1266 * other extent modifications.
1267 *
1268 * The punch() callback may be called with a folio locked to prevent writeback
1269 * extent allocation racing at the edge of the range we are currently punching.
1270 * The locked folio may or may not cover the range being punched, so it is not
1271 * safe for the punch() callback to lock folios itself.
1272 *
1273 * Lock order is:
1274 *
1275 * inode->i_rwsem (shared or exclusive)
1276 * inode->i_mapping->invalidate_lock (exclusive)
1277 * folio_lock()
1278 * ->punch
1279 * internal filesystem allocation lock
1280 */
iomap_file_buffered_write_punch_delalloc(struct inode * inode,struct iomap * iomap,loff_t pos,loff_t length,ssize_t written,iomap_punch_t punch)1281 int iomap_file_buffered_write_punch_delalloc(struct inode *inode,
1282 struct iomap *iomap, loff_t pos, loff_t length,
1283 ssize_t written, iomap_punch_t punch)
1284 {
1285 loff_t start_byte;
1286 loff_t end_byte;
1287 unsigned int blocksize = i_blocksize(inode);
1288
1289 if (iomap->type != IOMAP_DELALLOC)
1290 return 0;
1291
1292 /* If we didn't reserve the blocks, we're not allowed to punch them. */
1293 if (!(iomap->flags & IOMAP_F_NEW))
1294 return 0;
1295
1296 /*
1297 * start_byte refers to the first unused block after a short write. If
1298 * nothing was written, round offset down to point at the first block in
1299 * the range.
1300 */
1301 if (unlikely(!written))
1302 start_byte = round_down(pos, blocksize);
1303 else
1304 start_byte = round_up(pos + written, blocksize);
1305 end_byte = round_up(pos + length, blocksize);
1306
1307 /* Nothing to do if we've written the entire delalloc extent */
1308 if (start_byte >= end_byte)
1309 return 0;
1310
1311 return iomap_write_delalloc_release(inode, start_byte, end_byte,
1312 punch);
1313 }
1314 EXPORT_SYMBOL_GPL(iomap_file_buffered_write_punch_delalloc);
1315
iomap_unshare_iter(struct iomap_iter * iter)1316 static loff_t iomap_unshare_iter(struct iomap_iter *iter)
1317 {
1318 struct iomap *iomap = &iter->iomap;
1319 const struct iomap *srcmap = iomap_iter_srcmap(iter);
1320 loff_t pos = iter->pos;
1321 loff_t length = iomap_length(iter);
1322 loff_t written = 0;
1323
1324 /* don't bother with blocks that are not shared to start with */
1325 if (!(iomap->flags & IOMAP_F_SHARED))
1326 return length;
1327 /* don't bother with holes or unwritten extents */
1328 if (srcmap->type == IOMAP_HOLE || srcmap->type == IOMAP_UNWRITTEN)
1329 return length;
1330
1331 do {
1332 struct folio *folio;
1333 int status;
1334 size_t offset;
1335 size_t bytes = min_t(u64, SIZE_MAX, length);
1336 bool ret;
1337
1338 status = iomap_write_begin(iter, pos, bytes, &folio);
1339 if (unlikely(status))
1340 return status;
1341 if (iomap->flags & IOMAP_F_STALE)
1342 break;
1343
1344 offset = offset_in_folio(folio, pos);
1345 if (bytes > folio_size(folio) - offset)
1346 bytes = folio_size(folio) - offset;
1347
1348 ret = iomap_write_end(iter, pos, bytes, bytes, folio);
1349 __iomap_put_folio(iter, pos, bytes, folio);
1350 if (WARN_ON_ONCE(!ret))
1351 return -EIO;
1352
1353 cond_resched();
1354
1355 pos += bytes;
1356 written += bytes;
1357 length -= bytes;
1358
1359 balance_dirty_pages_ratelimited(iter->inode->i_mapping);
1360 } while (length > 0);
1361
1362 return written;
1363 }
1364
1365 int
iomap_file_unshare(struct inode * inode,loff_t pos,loff_t len,const struct iomap_ops * ops)1366 iomap_file_unshare(struct inode *inode, loff_t pos, loff_t len,
1367 const struct iomap_ops *ops)
1368 {
1369 struct iomap_iter iter = {
1370 .inode = inode,
1371 .pos = pos,
1372 .len = len,
1373 .flags = IOMAP_WRITE | IOMAP_UNSHARE,
1374 };
1375 int ret;
1376
1377 while ((ret = iomap_iter(&iter, ops)) > 0)
1378 iter.processed = iomap_unshare_iter(&iter);
1379 return ret;
1380 }
1381 EXPORT_SYMBOL_GPL(iomap_file_unshare);
1382
iomap_zero_iter(struct iomap_iter * iter,bool * did_zero)1383 static loff_t iomap_zero_iter(struct iomap_iter *iter, bool *did_zero)
1384 {
1385 const struct iomap *srcmap = iomap_iter_srcmap(iter);
1386 loff_t pos = iter->pos;
1387 loff_t length = iomap_length(iter);
1388 loff_t written = 0;
1389
1390 /* already zeroed? we're done. */
1391 if (srcmap->type == IOMAP_HOLE || srcmap->type == IOMAP_UNWRITTEN)
1392 return length;
1393
1394 do {
1395 struct folio *folio;
1396 int status;
1397 size_t offset;
1398 size_t bytes = min_t(u64, SIZE_MAX, length);
1399 bool ret;
1400
1401 status = iomap_write_begin(iter, pos, bytes, &folio);
1402 if (status)
1403 return status;
1404 if (iter->iomap.flags & IOMAP_F_STALE)
1405 break;
1406
1407 offset = offset_in_folio(folio, pos);
1408 if (bytes > folio_size(folio) - offset)
1409 bytes = folio_size(folio) - offset;
1410
1411 folio_zero_range(folio, offset, bytes);
1412 folio_mark_accessed(folio);
1413
1414 ret = iomap_write_end(iter, pos, bytes, bytes, folio);
1415 __iomap_put_folio(iter, pos, bytes, folio);
1416 if (WARN_ON_ONCE(!ret))
1417 return -EIO;
1418
1419 pos += bytes;
1420 length -= bytes;
1421 written += bytes;
1422 } while (length > 0);
1423
1424 if (did_zero)
1425 *did_zero = true;
1426 return written;
1427 }
1428
1429 int
iomap_zero_range(struct inode * inode,loff_t pos,loff_t len,bool * did_zero,const struct iomap_ops * ops)1430 iomap_zero_range(struct inode *inode, loff_t pos, loff_t len, bool *did_zero,
1431 const struct iomap_ops *ops)
1432 {
1433 struct iomap_iter iter = {
1434 .inode = inode,
1435 .pos = pos,
1436 .len = len,
1437 .flags = IOMAP_ZERO,
1438 };
1439 int ret;
1440
1441 while ((ret = iomap_iter(&iter, ops)) > 0)
1442 iter.processed = iomap_zero_iter(&iter, did_zero);
1443 return ret;
1444 }
1445 EXPORT_SYMBOL_GPL(iomap_zero_range);
1446
1447 int
iomap_truncate_page(struct inode * inode,loff_t pos,bool * did_zero,const struct iomap_ops * ops)1448 iomap_truncate_page(struct inode *inode, loff_t pos, bool *did_zero,
1449 const struct iomap_ops *ops)
1450 {
1451 unsigned int blocksize = i_blocksize(inode);
1452 unsigned int off = pos & (blocksize - 1);
1453
1454 /* Block boundary? Nothing to do */
1455 if (!off)
1456 return 0;
1457 return iomap_zero_range(inode, pos, blocksize - off, did_zero, ops);
1458 }
1459 EXPORT_SYMBOL_GPL(iomap_truncate_page);
1460
iomap_folio_mkwrite_iter(struct iomap_iter * iter,struct folio * folio)1461 static loff_t iomap_folio_mkwrite_iter(struct iomap_iter *iter,
1462 struct folio *folio)
1463 {
1464 loff_t length = iomap_length(iter);
1465 int ret;
1466
1467 if (iter->iomap.flags & IOMAP_F_BUFFER_HEAD) {
1468 ret = __block_write_begin_int(folio, iter->pos, length, NULL,
1469 &iter->iomap);
1470 if (ret)
1471 return ret;
1472 block_commit_write(&folio->page, 0, length);
1473 } else {
1474 WARN_ON_ONCE(!folio_test_uptodate(folio));
1475 folio_mark_dirty(folio);
1476 }
1477
1478 return length;
1479 }
1480
iomap_page_mkwrite(struct vm_fault * vmf,const struct iomap_ops * ops)1481 vm_fault_t iomap_page_mkwrite(struct vm_fault *vmf, const struct iomap_ops *ops)
1482 {
1483 struct iomap_iter iter = {
1484 .inode = file_inode(vmf->vma->vm_file),
1485 .flags = IOMAP_WRITE | IOMAP_FAULT,
1486 };
1487 struct folio *folio = page_folio(vmf->page);
1488 ssize_t ret;
1489
1490 folio_lock(folio);
1491 ret = folio_mkwrite_check_truncate(folio, iter.inode);
1492 if (ret < 0)
1493 goto out_unlock;
1494 iter.pos = folio_pos(folio);
1495 iter.len = ret;
1496 while ((ret = iomap_iter(&iter, ops)) > 0)
1497 iter.processed = iomap_folio_mkwrite_iter(&iter, folio);
1498
1499 if (ret < 0)
1500 goto out_unlock;
1501 folio_wait_stable(folio);
1502 return VM_FAULT_LOCKED;
1503 out_unlock:
1504 folio_unlock(folio);
1505 return vmf_fs_error(ret);
1506 }
1507 EXPORT_SYMBOL_GPL(iomap_page_mkwrite);
1508
iomap_finish_folio_write(struct inode * inode,struct folio * folio,size_t len)1509 static void iomap_finish_folio_write(struct inode *inode, struct folio *folio,
1510 size_t len)
1511 {
1512 struct iomap_folio_state *ifs = folio->private;
1513
1514 WARN_ON_ONCE(i_blocks_per_folio(inode, folio) > 1 && !ifs);
1515 WARN_ON_ONCE(ifs && atomic_read(&ifs->write_bytes_pending) <= 0);
1516
1517 if (!ifs || atomic_sub_and_test(len, &ifs->write_bytes_pending))
1518 folio_end_writeback(folio);
1519 }
1520
1521 /*
1522 * We're now finished for good with this ioend structure. Update the page
1523 * state, release holds on bios, and finally free up memory. Do not use the
1524 * ioend after this.
1525 */
1526 static u32
iomap_finish_ioend(struct iomap_ioend * ioend,int error)1527 iomap_finish_ioend(struct iomap_ioend *ioend, int error)
1528 {
1529 struct inode *inode = ioend->io_inode;
1530 struct bio *bio = &ioend->io_bio;
1531 struct folio_iter fi;
1532 u32 folio_count = 0;
1533
1534 if (error) {
1535 mapping_set_error(inode->i_mapping, error);
1536 if (!bio_flagged(bio, BIO_QUIET)) {
1537 pr_err_ratelimited(
1538 "%s: writeback error on inode %lu, offset %lld, sector %llu",
1539 inode->i_sb->s_id, inode->i_ino,
1540 ioend->io_offset, ioend->io_sector);
1541 }
1542 }
1543
1544 /* walk all folios in bio, ending page IO on them */
1545 bio_for_each_folio_all(fi, bio) {
1546 if (error)
1547 folio_set_error(fi.folio);
1548 iomap_finish_folio_write(inode, fi.folio, fi.length);
1549 folio_count++;
1550 }
1551
1552 bio_put(bio); /* frees the ioend */
1553 return folio_count;
1554 }
1555
1556 /*
1557 * Ioend completion routine for merged bios. This can only be called from task
1558 * contexts as merged ioends can be of unbound length. Hence we have to break up
1559 * the writeback completions into manageable chunks to avoid long scheduler
1560 * holdoffs. We aim to keep scheduler holdoffs down below 10ms so that we get
1561 * good batch processing throughput without creating adverse scheduler latency
1562 * conditions.
1563 */
1564 void
iomap_finish_ioends(struct iomap_ioend * ioend,int error)1565 iomap_finish_ioends(struct iomap_ioend *ioend, int error)
1566 {
1567 struct list_head tmp;
1568 u32 completions;
1569
1570 might_sleep();
1571
1572 list_replace_init(&ioend->io_list, &tmp);
1573 completions = iomap_finish_ioend(ioend, error);
1574
1575 while (!list_empty(&tmp)) {
1576 if (completions > IOEND_BATCH_SIZE * 8) {
1577 cond_resched();
1578 completions = 0;
1579 }
1580 ioend = list_first_entry(&tmp, struct iomap_ioend, io_list);
1581 list_del_init(&ioend->io_list);
1582 completions += iomap_finish_ioend(ioend, error);
1583 }
1584 }
1585 EXPORT_SYMBOL_GPL(iomap_finish_ioends);
1586
1587 /*
1588 * We can merge two adjacent ioends if they have the same set of work to do.
1589 */
1590 static bool
iomap_ioend_can_merge(struct iomap_ioend * ioend,struct iomap_ioend * next)1591 iomap_ioend_can_merge(struct iomap_ioend *ioend, struct iomap_ioend *next)
1592 {
1593 if (ioend->io_bio.bi_status != next->io_bio.bi_status)
1594 return false;
1595 if ((ioend->io_flags & IOMAP_F_SHARED) ^
1596 (next->io_flags & IOMAP_F_SHARED))
1597 return false;
1598 if ((ioend->io_type == IOMAP_UNWRITTEN) ^
1599 (next->io_type == IOMAP_UNWRITTEN))
1600 return false;
1601 if (ioend->io_offset + ioend->io_size != next->io_offset)
1602 return false;
1603 /*
1604 * Do not merge physically discontiguous ioends. The filesystem
1605 * completion functions will have to iterate the physical
1606 * discontiguities even if we merge the ioends at a logical level, so
1607 * we don't gain anything by merging physical discontiguities here.
1608 *
1609 * We cannot use bio->bi_iter.bi_sector here as it is modified during
1610 * submission so does not point to the start sector of the bio at
1611 * completion.
1612 */
1613 if (ioend->io_sector + (ioend->io_size >> 9) != next->io_sector)
1614 return false;
1615 return true;
1616 }
1617
1618 void
iomap_ioend_try_merge(struct iomap_ioend * ioend,struct list_head * more_ioends)1619 iomap_ioend_try_merge(struct iomap_ioend *ioend, struct list_head *more_ioends)
1620 {
1621 struct iomap_ioend *next;
1622
1623 INIT_LIST_HEAD(&ioend->io_list);
1624
1625 while ((next = list_first_entry_or_null(more_ioends, struct iomap_ioend,
1626 io_list))) {
1627 if (!iomap_ioend_can_merge(ioend, next))
1628 break;
1629 list_move_tail(&next->io_list, &ioend->io_list);
1630 ioend->io_size += next->io_size;
1631 }
1632 }
1633 EXPORT_SYMBOL_GPL(iomap_ioend_try_merge);
1634
1635 static int
iomap_ioend_compare(void * priv,const struct list_head * a,const struct list_head * b)1636 iomap_ioend_compare(void *priv, const struct list_head *a,
1637 const struct list_head *b)
1638 {
1639 struct iomap_ioend *ia = container_of(a, struct iomap_ioend, io_list);
1640 struct iomap_ioend *ib = container_of(b, struct iomap_ioend, io_list);
1641
1642 if (ia->io_offset < ib->io_offset)
1643 return -1;
1644 if (ia->io_offset > ib->io_offset)
1645 return 1;
1646 return 0;
1647 }
1648
1649 void
iomap_sort_ioends(struct list_head * ioend_list)1650 iomap_sort_ioends(struct list_head *ioend_list)
1651 {
1652 list_sort(NULL, ioend_list, iomap_ioend_compare);
1653 }
1654 EXPORT_SYMBOL_GPL(iomap_sort_ioends);
1655
iomap_writepage_end_bio(struct bio * bio)1656 static void iomap_writepage_end_bio(struct bio *bio)
1657 {
1658 iomap_finish_ioend(iomap_ioend_from_bio(bio),
1659 blk_status_to_errno(bio->bi_status));
1660 }
1661
1662 /*
1663 * Submit the final bio for an ioend.
1664 *
1665 * If @error is non-zero, it means that we have a situation where some part of
1666 * the submission process has failed after we've marked pages for writeback.
1667 * We cannot cancel ioend directly in that case, so call the bio end I/O handler
1668 * with the error status here to run the normal I/O completion handler to clear
1669 * the writeback bit and let the file system proess the errors.
1670 */
iomap_submit_ioend(struct iomap_writepage_ctx * wpc,int error)1671 static int iomap_submit_ioend(struct iomap_writepage_ctx *wpc, int error)
1672 {
1673 if (!wpc->ioend)
1674 return error;
1675
1676 /*
1677 * Let the file systems prepare the I/O submission and hook in an I/O
1678 * comletion handler. This also needs to happen in case after a
1679 * failure happened so that the file system end I/O handler gets called
1680 * to clean up.
1681 */
1682 if (wpc->ops->prepare_ioend)
1683 error = wpc->ops->prepare_ioend(wpc->ioend, error);
1684
1685 if (error) {
1686 wpc->ioend->io_bio.bi_status = errno_to_blk_status(error);
1687 bio_endio(&wpc->ioend->io_bio);
1688 } else {
1689 submit_bio(&wpc->ioend->io_bio);
1690 }
1691
1692 wpc->ioend = NULL;
1693 return error;
1694 }
1695
iomap_alloc_ioend(struct iomap_writepage_ctx * wpc,struct writeback_control * wbc,struct inode * inode,loff_t pos)1696 static struct iomap_ioend *iomap_alloc_ioend(struct iomap_writepage_ctx *wpc,
1697 struct writeback_control *wbc, struct inode *inode, loff_t pos)
1698 {
1699 struct iomap_ioend *ioend;
1700 struct bio *bio;
1701
1702 bio = bio_alloc_bioset(wpc->iomap.bdev, BIO_MAX_VECS,
1703 REQ_OP_WRITE | wbc_to_write_flags(wbc),
1704 GFP_NOFS, &iomap_ioend_bioset);
1705 bio->bi_iter.bi_sector = iomap_sector(&wpc->iomap, pos);
1706 bio->bi_end_io = iomap_writepage_end_bio;
1707 wbc_init_bio(wbc, bio);
1708 bio->bi_write_hint = inode->i_write_hint;
1709
1710 ioend = iomap_ioend_from_bio(bio);
1711 INIT_LIST_HEAD(&ioend->io_list);
1712 ioend->io_type = wpc->iomap.type;
1713 ioend->io_flags = wpc->iomap.flags;
1714 ioend->io_inode = inode;
1715 ioend->io_size = 0;
1716 ioend->io_offset = pos;
1717 ioend->io_sector = bio->bi_iter.bi_sector;
1718
1719 wpc->nr_folios = 0;
1720 return ioend;
1721 }
1722
iomap_can_add_to_ioend(struct iomap_writepage_ctx * wpc,loff_t pos)1723 static bool iomap_can_add_to_ioend(struct iomap_writepage_ctx *wpc, loff_t pos)
1724 {
1725 if ((wpc->iomap.flags & IOMAP_F_SHARED) !=
1726 (wpc->ioend->io_flags & IOMAP_F_SHARED))
1727 return false;
1728 if (wpc->iomap.type != wpc->ioend->io_type)
1729 return false;
1730 if (pos != wpc->ioend->io_offset + wpc->ioend->io_size)
1731 return false;
1732 if (iomap_sector(&wpc->iomap, pos) !=
1733 bio_end_sector(&wpc->ioend->io_bio))
1734 return false;
1735 /*
1736 * Limit ioend bio chain lengths to minimise IO completion latency. This
1737 * also prevents long tight loops ending page writeback on all the
1738 * folios in the ioend.
1739 */
1740 if (wpc->nr_folios >= IOEND_BATCH_SIZE)
1741 return false;
1742 return true;
1743 }
1744
1745 /*
1746 * Test to see if we have an existing ioend structure that we could append to
1747 * first; otherwise finish off the current ioend and start another.
1748 *
1749 * If a new ioend is created and cached, the old ioend is submitted to the block
1750 * layer instantly. Batching optimisations are provided by higher level block
1751 * plugging.
1752 *
1753 * At the end of a writeback pass, there will be a cached ioend remaining on the
1754 * writepage context that the caller will need to submit.
1755 */
iomap_add_to_ioend(struct iomap_writepage_ctx * wpc,struct writeback_control * wbc,struct folio * folio,struct inode * inode,loff_t pos,unsigned len)1756 static int iomap_add_to_ioend(struct iomap_writepage_ctx *wpc,
1757 struct writeback_control *wbc, struct folio *folio,
1758 struct inode *inode, loff_t pos, unsigned len)
1759 {
1760 struct iomap_folio_state *ifs = folio->private;
1761 size_t poff = offset_in_folio(folio, pos);
1762 int error;
1763
1764 if (!wpc->ioend || !iomap_can_add_to_ioend(wpc, pos)) {
1765 new_ioend:
1766 error = iomap_submit_ioend(wpc, 0);
1767 if (error)
1768 return error;
1769 wpc->ioend = iomap_alloc_ioend(wpc, wbc, inode, pos);
1770 }
1771
1772 if (!bio_add_folio(&wpc->ioend->io_bio, folio, len, poff))
1773 goto new_ioend;
1774
1775 if (ifs)
1776 atomic_add(len, &ifs->write_bytes_pending);
1777 wpc->ioend->io_size += len;
1778 wbc_account_cgroup_owner(wbc, &folio->page, len);
1779 return 0;
1780 }
1781
iomap_writepage_map_blocks(struct iomap_writepage_ctx * wpc,struct writeback_control * wbc,struct folio * folio,struct inode * inode,u64 pos,unsigned dirty_len,unsigned * count)1782 static int iomap_writepage_map_blocks(struct iomap_writepage_ctx *wpc,
1783 struct writeback_control *wbc, struct folio *folio,
1784 struct inode *inode, u64 pos, unsigned dirty_len,
1785 unsigned *count)
1786 {
1787 int error;
1788
1789 do {
1790 unsigned map_len;
1791
1792 error = wpc->ops->map_blocks(wpc, inode, pos, dirty_len);
1793 if (error)
1794 break;
1795 trace_iomap_writepage_map(inode, pos, dirty_len, &wpc->iomap);
1796
1797 map_len = min_t(u64, dirty_len,
1798 wpc->iomap.offset + wpc->iomap.length - pos);
1799 WARN_ON_ONCE(!folio->private && map_len < dirty_len);
1800
1801 switch (wpc->iomap.type) {
1802 case IOMAP_INLINE:
1803 WARN_ON_ONCE(1);
1804 error = -EIO;
1805 break;
1806 case IOMAP_HOLE:
1807 break;
1808 default:
1809 error = iomap_add_to_ioend(wpc, wbc, folio, inode, pos,
1810 map_len);
1811 if (!error)
1812 (*count)++;
1813 break;
1814 }
1815 dirty_len -= map_len;
1816 pos += map_len;
1817 } while (dirty_len && !error);
1818
1819 /*
1820 * We cannot cancel the ioend directly here on error. We may have
1821 * already set other pages under writeback and hence we have to run I/O
1822 * completion to mark the error state of the pages under writeback
1823 * appropriately.
1824 *
1825 * Just let the file system know what portion of the folio failed to
1826 * map.
1827 */
1828 if (error && wpc->ops->discard_folio)
1829 wpc->ops->discard_folio(folio, pos);
1830 return error;
1831 }
1832
1833 /*
1834 * Check interaction of the folio with the file end.
1835 *
1836 * If the folio is entirely beyond i_size, return false. If it straddles
1837 * i_size, adjust end_pos and zero all data beyond i_size.
1838 */
iomap_writepage_handle_eof(struct folio * folio,struct inode * inode,u64 * end_pos)1839 static bool iomap_writepage_handle_eof(struct folio *folio, struct inode *inode,
1840 u64 *end_pos)
1841 {
1842 u64 isize = i_size_read(inode);
1843
1844 if (*end_pos > isize) {
1845 size_t poff = offset_in_folio(folio, isize);
1846 pgoff_t end_index = isize >> PAGE_SHIFT;
1847
1848 /*
1849 * If the folio is entirely ouside of i_size, skip it.
1850 *
1851 * This can happen due to a truncate operation that is in
1852 * progress and in that case truncate will finish it off once
1853 * we've dropped the folio lock.
1854 *
1855 * Note that the pgoff_t used for end_index is an unsigned long.
1856 * If the given offset is greater than 16TB on a 32-bit system,
1857 * then if we checked if the folio is fully outside i_size with
1858 * "if (folio->index >= end_index + 1)", "end_index + 1" would
1859 * overflow and evaluate to 0. Hence this folio would be
1860 * redirtied and written out repeatedly, which would result in
1861 * an infinite loop; the user program performing this operation
1862 * would hang. Instead, we can detect this situation by
1863 * checking if the folio is totally beyond i_size or if its
1864 * offset is just equal to the EOF.
1865 */
1866 if (folio->index > end_index ||
1867 (folio->index == end_index && poff == 0))
1868 return false;
1869
1870 /*
1871 * The folio straddles i_size.
1872 *
1873 * It must be zeroed out on each and every writepage invocation
1874 * because it may be mmapped:
1875 *
1876 * A file is mapped in multiples of the page size. For a
1877 * file that is not a multiple of the page size, the
1878 * remaining memory is zeroed when mapped, and writes to that
1879 * region are not written out to the file.
1880 *
1881 * Also adjust the writeback range to skip all blocks entirely
1882 * beyond i_size.
1883 */
1884 folio_zero_segment(folio, poff, folio_size(folio));
1885 *end_pos = round_up(isize, i_blocksize(inode));
1886 }
1887
1888 return true;
1889 }
1890
iomap_writepage_map(struct iomap_writepage_ctx * wpc,struct writeback_control * wbc,struct folio * folio)1891 static int iomap_writepage_map(struct iomap_writepage_ctx *wpc,
1892 struct writeback_control *wbc, struct folio *folio)
1893 {
1894 struct iomap_folio_state *ifs = folio->private;
1895 struct inode *inode = folio->mapping->host;
1896 u64 pos = folio_pos(folio);
1897 u64 end_pos = pos + folio_size(folio);
1898 unsigned count = 0;
1899 int error = 0;
1900 u32 rlen;
1901
1902 WARN_ON_ONCE(!folio_test_locked(folio));
1903 WARN_ON_ONCE(folio_test_dirty(folio));
1904 WARN_ON_ONCE(folio_test_writeback(folio));
1905
1906 trace_iomap_writepage(inode, pos, folio_size(folio));
1907
1908 if (!iomap_writepage_handle_eof(folio, inode, &end_pos)) {
1909 folio_unlock(folio);
1910 return 0;
1911 }
1912 WARN_ON_ONCE(end_pos <= pos);
1913
1914 if (i_blocks_per_folio(inode, folio) > 1) {
1915 if (!ifs) {
1916 ifs = ifs_alloc(inode, folio, 0);
1917 iomap_set_range_dirty(folio, 0, end_pos - pos);
1918 }
1919
1920 /*
1921 * Keep the I/O completion handler from clearing the writeback
1922 * bit until we have submitted all blocks by adding a bias to
1923 * ifs->write_bytes_pending, which is dropped after submitting
1924 * all blocks.
1925 */
1926 WARN_ON_ONCE(atomic_read(&ifs->write_bytes_pending) != 0);
1927 atomic_inc(&ifs->write_bytes_pending);
1928 }
1929
1930 /*
1931 * Set the writeback bit ASAP, as the I/O completion for the single
1932 * block per folio case happen hit as soon as we're submitting the bio.
1933 */
1934 folio_start_writeback(folio);
1935
1936 /*
1937 * Walk through the folio to find dirty areas to write back.
1938 */
1939 while ((rlen = iomap_find_dirty_range(folio, &pos, end_pos))) {
1940 error = iomap_writepage_map_blocks(wpc, wbc, folio, inode,
1941 pos, rlen, &count);
1942 if (error)
1943 break;
1944 pos += rlen;
1945 }
1946
1947 if (count)
1948 wpc->nr_folios++;
1949
1950 /*
1951 * We can have dirty bits set past end of file in page_mkwrite path
1952 * while mapping the last partial folio. Hence it's better to clear
1953 * all the dirty bits in the folio here.
1954 */
1955 iomap_clear_range_dirty(folio, 0, folio_size(folio));
1956
1957 /*
1958 * Usually the writeback bit is cleared by the I/O completion handler.
1959 * But we may end up either not actually writing any blocks, or (when
1960 * there are multiple blocks in a folio) all I/O might have finished
1961 * already at this point. In that case we need to clear the writeback
1962 * bit ourselves right after unlocking the page.
1963 */
1964 folio_unlock(folio);
1965 if (ifs) {
1966 if (atomic_dec_and_test(&ifs->write_bytes_pending))
1967 folio_end_writeback(folio);
1968 } else {
1969 if (!count)
1970 folio_end_writeback(folio);
1971 }
1972 mapping_set_error(inode->i_mapping, error);
1973 return error;
1974 }
1975
1976 int
iomap_writepages(struct address_space * mapping,struct writeback_control * wbc,struct iomap_writepage_ctx * wpc,const struct iomap_writeback_ops * ops)1977 iomap_writepages(struct address_space *mapping, struct writeback_control *wbc,
1978 struct iomap_writepage_ctx *wpc,
1979 const struct iomap_writeback_ops *ops)
1980 {
1981 struct folio *folio = NULL;
1982 int error;
1983
1984 /*
1985 * Writeback from reclaim context should never happen except in the case
1986 * of a VM regression so warn about it and refuse to write the data.
1987 */
1988 if (WARN_ON_ONCE((current->flags & (PF_MEMALLOC | PF_KSWAPD)) ==
1989 PF_MEMALLOC))
1990 return -EIO;
1991
1992 wpc->ops = ops;
1993 while ((folio = writeback_iter(mapping, wbc, folio, &error)))
1994 error = iomap_writepage_map(wpc, wbc, folio);
1995 return iomap_submit_ioend(wpc, error);
1996 }
1997 EXPORT_SYMBOL_GPL(iomap_writepages);
1998
iomap_init(void)1999 static int __init iomap_init(void)
2000 {
2001 return bioset_init(&iomap_ioend_bioset, 4 * (PAGE_SIZE / SECTOR_SIZE),
2002 offsetof(struct iomap_ioend, io_bio),
2003 BIOSET_NEED_BVECS);
2004 }
2005 fs_initcall(iomap_init);
2006