1 // SPDX-License-Identifier: GPL-2.0
2 /*
3  * Copyright (C) 2010 Red Hat, Inc.
4  * Copyright (c) 2016-2018 Christoph Hellwig.
5  */
6 #include <linux/module.h>
7 #include <linux/compiler.h>
8 #include <linux/fs.h>
9 #include <linux/iomap.h>
10 #include "trace.h"
11 
12 /*
13  * Execute a iomap write on a segment of the mapping that spans a
14  * contiguous range of pages that have identical block mapping state.
15  *
16  * This avoids the need to map pages individually, do individual allocations
17  * for each page and most importantly avoid the need for filesystem specific
18  * locking per page. Instead, all the operations are amortised over the entire
19  * range of pages. It is assumed that the filesystems will lock whatever
20  * resources they require in the iomap_begin call, and release them in the
21  * iomap_end call.
22  */
23 loff_t
iomap_apply(struct inode * inode,loff_t pos,loff_t length,unsigned flags,const struct iomap_ops * ops,void * data,iomap_actor_t actor)24 iomap_apply(struct inode *inode, loff_t pos, loff_t length, unsigned flags,
25 		const struct iomap_ops *ops, void *data, iomap_actor_t actor)
26 {
27 	struct iomap iomap = { .type = IOMAP_HOLE };
28 	struct iomap srcmap = { .type = IOMAP_HOLE };
29 	loff_t written = 0, ret;
30 	u64 end;
31 
32 	trace_iomap_apply(inode, pos, length, flags, ops, actor, _RET_IP_);
33 
34 	/*
35 	 * Need to map a range from start position for length bytes. This can
36 	 * span multiple pages - it is only guaranteed to return a range of a
37 	 * single type of pages (e.g. all into a hole, all mapped or all
38 	 * unwritten). Failure at this point has nothing to undo.
39 	 *
40 	 * If allocation is required for this range, reserve the space now so
41 	 * that the allocation is guaranteed to succeed later on. Once we copy
42 	 * the data into the page cache pages, then we cannot fail otherwise we
43 	 * expose transient stale data. If the reserve fails, we can safely
44 	 * back out at this point as there is nothing to undo.
45 	 */
46 	ret = ops->iomap_begin(inode, pos, length, flags, &iomap, &srcmap);
47 	if (ret)
48 		return ret;
49 	if (WARN_ON(iomap.offset > pos)) {
50 		written = -EIO;
51 		goto out;
52 	}
53 	if (WARN_ON(iomap.length == 0)) {
54 		written = -EIO;
55 		goto out;
56 	}
57 
58 	trace_iomap_apply_dstmap(inode, &iomap);
59 	if (srcmap.type != IOMAP_HOLE)
60 		trace_iomap_apply_srcmap(inode, &srcmap);
61 
62 	/*
63 	 * Cut down the length to the one actually provided by the filesystem,
64 	 * as it might not be able to give us the whole size that we requested.
65 	 */
66 	end = iomap.offset + iomap.length;
67 	if (srcmap.type != IOMAP_HOLE)
68 		end = min(end, srcmap.offset + srcmap.length);
69 	if (pos + length > end)
70 		length = end - pos;
71 
72 	/*
73 	 * Now that we have guaranteed that the space allocation will succeed,
74 	 * we can do the copy-in page by page without having to worry about
75 	 * failures exposing transient data.
76 	 *
77 	 * To support COW operations, we read in data for partially blocks from
78 	 * the srcmap if the file system filled it in.  In that case we the
79 	 * length needs to be limited to the earlier of the ends of the iomaps.
80 	 * If the file system did not provide a srcmap we pass in the normal
81 	 * iomap into the actors so that they don't need to have special
82 	 * handling for the two cases.
83 	 */
84 	written = actor(inode, pos, length, data, &iomap,
85 			srcmap.type != IOMAP_HOLE ? &srcmap : &iomap);
86 
87 out:
88 	/*
89 	 * Now the data has been copied, commit the range we've copied.  This
90 	 * should not fail unless the filesystem has had a fatal error.
91 	 */
92 	if (ops->iomap_end) {
93 		ret = ops->iomap_end(inode, pos, length,
94 				     written > 0 ? written : 0,
95 				     flags, &iomap);
96 	}
97 
98 	return written ? written : ret;
99 }
100