1 /* SPDX-License-Identifier: GPL-2.0-or-later */
2 /*
3  *	Berkeley style UIO structures	-	Alan Cox 1994.
4  */
5 #ifndef __LINUX_UIO_H
6 #define __LINUX_UIO_H
7 
8 #include <linux/kernel.h>
9 #include <linux/thread_info.h>
10 #include <uapi/linux/uio.h>
11 
12 struct page;
13 struct pipe_inode_info;
14 
15 struct kvec {
16 	void *iov_base; /* and that should *never* hold a userland pointer */
17 	size_t iov_len;
18 };
19 
20 enum iter_type {
21 	/* iter types */
22 	ITER_IOVEC = 4,
23 	ITER_KVEC = 8,
24 	ITER_BVEC = 16,
25 	ITER_PIPE = 32,
26 	ITER_DISCARD = 64,
27 	ITER_XARRAY = 128,
28 };
29 
30 struct iov_iter {
31 	/*
32 	 * Bit 0 is the read/write bit, set if we're writing.
33 	 * Bit 1 is the BVEC_FLAG_NO_REF bit, set if type is a bvec and
34 	 * the caller isn't expecting to drop a page reference when done.
35 	 */
36 	unsigned int type;
37 	size_t iov_offset;
38 	size_t count;
39 	union {
40 		const struct iovec *iov;
41 		const struct kvec *kvec;
42 		const struct bio_vec *bvec;
43 		struct xarray *xarray;
44 		struct pipe_inode_info *pipe;
45 	};
46 	union {
47 		unsigned long nr_segs;
48 		struct {
49 			unsigned int head;
50 			unsigned int start_head;
51 		};
52 		loff_t xarray_start;
53 	};
54 };
55 
iov_iter_type(const struct iov_iter * i)56 static inline enum iter_type iov_iter_type(const struct iov_iter *i)
57 {
58 	return i->type & ~(READ | WRITE);
59 }
60 
iter_is_iovec(const struct iov_iter * i)61 static inline bool iter_is_iovec(const struct iov_iter *i)
62 {
63 	return iov_iter_type(i) == ITER_IOVEC;
64 }
65 
iov_iter_is_kvec(const struct iov_iter * i)66 static inline bool iov_iter_is_kvec(const struct iov_iter *i)
67 {
68 	return iov_iter_type(i) == ITER_KVEC;
69 }
70 
iov_iter_is_bvec(const struct iov_iter * i)71 static inline bool iov_iter_is_bvec(const struct iov_iter *i)
72 {
73 	return iov_iter_type(i) == ITER_BVEC;
74 }
75 
iov_iter_is_pipe(const struct iov_iter * i)76 static inline bool iov_iter_is_pipe(const struct iov_iter *i)
77 {
78 	return iov_iter_type(i) == ITER_PIPE;
79 }
80 
iov_iter_is_discard(const struct iov_iter * i)81 static inline bool iov_iter_is_discard(const struct iov_iter *i)
82 {
83 	return iov_iter_type(i) == ITER_DISCARD;
84 }
85 
iov_iter_is_xarray(const struct iov_iter * i)86 static inline bool iov_iter_is_xarray(const struct iov_iter *i)
87 {
88 	return iov_iter_type(i) == ITER_XARRAY;
89 }
90 
iov_iter_rw(const struct iov_iter * i)91 static inline unsigned char iov_iter_rw(const struct iov_iter *i)
92 {
93 	return i->type & (READ | WRITE);
94 }
95 
96 /*
97  * Total number of bytes covered by an iovec.
98  *
99  * NOTE that it is not safe to use this function until all the iovec's
100  * segment lengths have been validated.  Because the individual lengths can
101  * overflow a size_t when added together.
102  */
iov_length(const struct iovec * iov,unsigned long nr_segs)103 static inline size_t iov_length(const struct iovec *iov, unsigned long nr_segs)
104 {
105 	unsigned long seg;
106 	size_t ret = 0;
107 
108 	for (seg = 0; seg < nr_segs; seg++)
109 		ret += iov[seg].iov_len;
110 	return ret;
111 }
112 
iov_iter_iovec(const struct iov_iter * iter)113 static inline struct iovec iov_iter_iovec(const struct iov_iter *iter)
114 {
115 	return (struct iovec) {
116 		.iov_base = iter->iov->iov_base + iter->iov_offset,
117 		.iov_len = min(iter->count,
118 			       iter->iov->iov_len - iter->iov_offset),
119 	};
120 }
121 
122 size_t iov_iter_copy_from_user_atomic(struct page *page,
123 		struct iov_iter *i, unsigned long offset, size_t bytes);
124 void iov_iter_advance(struct iov_iter *i, size_t bytes);
125 void iov_iter_revert(struct iov_iter *i, size_t bytes);
126 int iov_iter_fault_in_readable(struct iov_iter *i, size_t bytes);
127 size_t iov_iter_single_seg_count(const struct iov_iter *i);
128 size_t copy_page_to_iter(struct page *page, size_t offset, size_t bytes,
129 			 struct iov_iter *i);
130 size_t copy_page_from_iter(struct page *page, size_t offset, size_t bytes,
131 			 struct iov_iter *i);
132 
133 size_t _copy_to_iter(const void *addr, size_t bytes, struct iov_iter *i);
134 size_t _copy_from_iter(void *addr, size_t bytes, struct iov_iter *i);
135 bool _copy_from_iter_full(void *addr, size_t bytes, struct iov_iter *i);
136 size_t _copy_from_iter_nocache(void *addr, size_t bytes, struct iov_iter *i);
137 bool _copy_from_iter_full_nocache(void *addr, size_t bytes, struct iov_iter *i);
138 
139 static __always_inline __must_check
copy_to_iter(const void * addr,size_t bytes,struct iov_iter * i)140 size_t copy_to_iter(const void *addr, size_t bytes, struct iov_iter *i)
141 {
142 	if (unlikely(!check_copy_size(addr, bytes, true)))
143 		return 0;
144 	else
145 		return _copy_to_iter(addr, bytes, i);
146 }
147 
148 static __always_inline __must_check
copy_from_iter(void * addr,size_t bytes,struct iov_iter * i)149 size_t copy_from_iter(void *addr, size_t bytes, struct iov_iter *i)
150 {
151 	if (unlikely(!check_copy_size(addr, bytes, false)))
152 		return 0;
153 	else
154 		return _copy_from_iter(addr, bytes, i);
155 }
156 
157 static __always_inline __must_check
copy_from_iter_full(void * addr,size_t bytes,struct iov_iter * i)158 bool copy_from_iter_full(void *addr, size_t bytes, struct iov_iter *i)
159 {
160 	if (unlikely(!check_copy_size(addr, bytes, false)))
161 		return false;
162 	else
163 		return _copy_from_iter_full(addr, bytes, i);
164 }
165 
166 static __always_inline __must_check
copy_from_iter_nocache(void * addr,size_t bytes,struct iov_iter * i)167 size_t copy_from_iter_nocache(void *addr, size_t bytes, struct iov_iter *i)
168 {
169 	if (unlikely(!check_copy_size(addr, bytes, false)))
170 		return 0;
171 	else
172 		return _copy_from_iter_nocache(addr, bytes, i);
173 }
174 
175 static __always_inline __must_check
copy_from_iter_full_nocache(void * addr,size_t bytes,struct iov_iter * i)176 bool copy_from_iter_full_nocache(void *addr, size_t bytes, struct iov_iter *i)
177 {
178 	if (unlikely(!check_copy_size(addr, bytes, false)))
179 		return false;
180 	else
181 		return _copy_from_iter_full_nocache(addr, bytes, i);
182 }
183 
184 #ifdef CONFIG_ARCH_HAS_UACCESS_FLUSHCACHE
185 /*
186  * Note, users like pmem that depend on the stricter semantics of
187  * copy_from_iter_flushcache() than copy_from_iter_nocache() must check for
188  * IS_ENABLED(CONFIG_ARCH_HAS_UACCESS_FLUSHCACHE) before assuming that the
189  * destination is flushed from the cache on return.
190  */
191 size_t _copy_from_iter_flushcache(void *addr, size_t bytes, struct iov_iter *i);
192 #else
193 #define _copy_from_iter_flushcache _copy_from_iter_nocache
194 #endif
195 
196 #ifdef CONFIG_ARCH_HAS_COPY_MC
197 size_t _copy_mc_to_iter(const void *addr, size_t bytes, struct iov_iter *i);
198 #else
199 #define _copy_mc_to_iter _copy_to_iter
200 #endif
201 
202 static __always_inline __must_check
copy_from_iter_flushcache(void * addr,size_t bytes,struct iov_iter * i)203 size_t copy_from_iter_flushcache(void *addr, size_t bytes, struct iov_iter *i)
204 {
205 	if (unlikely(!check_copy_size(addr, bytes, false)))
206 		return 0;
207 	else
208 		return _copy_from_iter_flushcache(addr, bytes, i);
209 }
210 
211 static __always_inline __must_check
copy_mc_to_iter(void * addr,size_t bytes,struct iov_iter * i)212 size_t copy_mc_to_iter(void *addr, size_t bytes, struct iov_iter *i)
213 {
214 	if (unlikely(!check_copy_size(addr, bytes, true)))
215 		return 0;
216 	else
217 		return _copy_mc_to_iter(addr, bytes, i);
218 }
219 
220 size_t iov_iter_zero(size_t bytes, struct iov_iter *);
221 unsigned long iov_iter_alignment(const struct iov_iter *i);
222 unsigned long iov_iter_gap_alignment(const struct iov_iter *i);
223 void iov_iter_init(struct iov_iter *i, unsigned int direction, const struct iovec *iov,
224 			unsigned long nr_segs, size_t count);
225 void iov_iter_kvec(struct iov_iter *i, unsigned int direction, const struct kvec *kvec,
226 			unsigned long nr_segs, size_t count);
227 void iov_iter_bvec(struct iov_iter *i, unsigned int direction, const struct bio_vec *bvec,
228 			unsigned long nr_segs, size_t count);
229 void iov_iter_pipe(struct iov_iter *i, unsigned int direction, struct pipe_inode_info *pipe,
230 			size_t count);
231 void iov_iter_discard(struct iov_iter *i, unsigned int direction, size_t count);
232 void iov_iter_xarray(struct iov_iter *i, unsigned int direction, struct xarray *xarray,
233 		     loff_t start, size_t count);
234 ssize_t iov_iter_get_pages(struct iov_iter *i, struct page **pages,
235 			size_t maxsize, unsigned maxpages, size_t *start);
236 ssize_t iov_iter_get_pages_alloc(struct iov_iter *i, struct page ***pages,
237 			size_t maxsize, size_t *start);
238 int iov_iter_npages(const struct iov_iter *i, int maxpages);
239 
240 const void *dup_iter(struct iov_iter *new, struct iov_iter *old, gfp_t flags);
241 
iov_iter_count(const struct iov_iter * i)242 static inline size_t iov_iter_count(const struct iov_iter *i)
243 {
244 	return i->count;
245 }
246 
247 /*
248  * Cap the iov_iter by given limit; note that the second argument is
249  * *not* the new size - it's upper limit for such.  Passing it a value
250  * greater than the amount of data in iov_iter is fine - it'll just do
251  * nothing in that case.
252  */
iov_iter_truncate(struct iov_iter * i,u64 count)253 static inline void iov_iter_truncate(struct iov_iter *i, u64 count)
254 {
255 	/*
256 	 * count doesn't have to fit in size_t - comparison extends both
257 	 * operands to u64 here and any value that would be truncated by
258 	 * conversion in assignement is by definition greater than all
259 	 * values of size_t, including old i->count.
260 	 */
261 	if (i->count > count)
262 		i->count = count;
263 }
264 
265 /*
266  * reexpand a previously truncated iterator; count must be no more than how much
267  * we had shrunk it.
268  */
iov_iter_reexpand(struct iov_iter * i,size_t count)269 static inline void iov_iter_reexpand(struct iov_iter *i, size_t count)
270 {
271 	i->count = count;
272 }
273 
274 struct csum_state {
275 	__wsum csum;
276 	size_t off;
277 };
278 
279 size_t csum_and_copy_to_iter(const void *addr, size_t bytes, void *csstate, struct iov_iter *i);
280 size_t csum_and_copy_from_iter(void *addr, size_t bytes, __wsum *csum, struct iov_iter *i);
281 bool csum_and_copy_from_iter_full(void *addr, size_t bytes, __wsum *csum, struct iov_iter *i);
282 size_t hash_and_copy_to_iter(const void *addr, size_t bytes, void *hashp,
283 		struct iov_iter *i);
284 
285 struct iovec *iovec_from_user(const struct iovec __user *uvector,
286 		unsigned long nr_segs, unsigned long fast_segs,
287 		struct iovec *fast_iov, bool compat);
288 ssize_t import_iovec(int type, const struct iovec __user *uvec,
289 		 unsigned nr_segs, unsigned fast_segs, struct iovec **iovp,
290 		 struct iov_iter *i);
291 ssize_t __import_iovec(int type, const struct iovec __user *uvec,
292 		 unsigned nr_segs, unsigned fast_segs, struct iovec **iovp,
293 		 struct iov_iter *i, bool compat);
294 int import_single_range(int type, void __user *buf, size_t len,
295 		 struct iovec *iov, struct iov_iter *i);
296 
297 int iov_iter_for_each_range(struct iov_iter *i, size_t bytes,
298 			    int (*f)(struct kvec *vec, void *context),
299 			    void *context);
300 
301 #endif
302