1 /* SPDX-License-Identifier: GPL-2.0-or-later */
2 /*
3 * Berkeley style UIO structures - Alan Cox 1994.
4 */
5 #ifndef __LINUX_UIO_H
6 #define __LINUX_UIO_H
7
8 #include <linux/kernel.h>
9 #include <linux/thread_info.h>
10 #include <linux/mm_types.h>
11 #include <uapi/linux/uio.h>
12
13 struct page;
14
15 typedef unsigned int __bitwise iov_iter_extraction_t;
16
17 struct kvec {
18 void *iov_base; /* and that should *never* hold a userland pointer */
19 size_t iov_len;
20 };
21
22 enum iter_type {
23 /* iter types */
24 ITER_UBUF,
25 ITER_IOVEC,
26 ITER_BVEC,
27 ITER_KVEC,
28 ITER_XARRAY,
29 ITER_DISCARD,
30 };
31
32 #define ITER_SOURCE 1 // == WRITE
33 #define ITER_DEST 0 // == READ
34
35 struct iov_iter_state {
36 size_t iov_offset;
37 size_t count;
38 unsigned long nr_segs;
39 };
40
41 struct iov_iter {
42 u8 iter_type;
43 bool nofault;
44 bool data_source;
45 size_t iov_offset;
46 /*
47 * Hack alert: overlay ubuf_iovec with iovec + count, so
48 * that the members resolve correctly regardless of the type
49 * of iterator used. This means that you can use:
50 *
51 * &iter->__ubuf_iovec or iter->__iov
52 *
53 * interchangably for the user_backed cases, hence simplifying
54 * some of the cases that need to deal with both.
55 */
56 union {
57 /*
58 * This really should be a const, but we cannot do that without
59 * also modifying any of the zero-filling iter init functions.
60 * Leave it non-const for now, but it should be treated as such.
61 */
62 struct iovec __ubuf_iovec;
63 struct {
64 union {
65 /* use iter_iov() to get the current vec */
66 const struct iovec *__iov;
67 const struct kvec *kvec;
68 const struct bio_vec *bvec;
69 struct xarray *xarray;
70 void __user *ubuf;
71 };
72 size_t count;
73 };
74 };
75 union {
76 unsigned long nr_segs;
77 loff_t xarray_start;
78 };
79 };
80
iter_iov(const struct iov_iter * iter)81 static inline const struct iovec *iter_iov(const struct iov_iter *iter)
82 {
83 if (iter->iter_type == ITER_UBUF)
84 return (const struct iovec *) &iter->__ubuf_iovec;
85 return iter->__iov;
86 }
87
88 #define iter_iov_addr(iter) (iter_iov(iter)->iov_base + (iter)->iov_offset)
89 #define iter_iov_len(iter) (iter_iov(iter)->iov_len - (iter)->iov_offset)
90
iov_iter_type(const struct iov_iter * i)91 static inline enum iter_type iov_iter_type(const struct iov_iter *i)
92 {
93 return i->iter_type;
94 }
95
iov_iter_save_state(struct iov_iter * iter,struct iov_iter_state * state)96 static inline void iov_iter_save_state(struct iov_iter *iter,
97 struct iov_iter_state *state)
98 {
99 state->iov_offset = iter->iov_offset;
100 state->count = iter->count;
101 state->nr_segs = iter->nr_segs;
102 }
103
iter_is_ubuf(const struct iov_iter * i)104 static inline bool iter_is_ubuf(const struct iov_iter *i)
105 {
106 return iov_iter_type(i) == ITER_UBUF;
107 }
108
iter_is_iovec(const struct iov_iter * i)109 static inline bool iter_is_iovec(const struct iov_iter *i)
110 {
111 return iov_iter_type(i) == ITER_IOVEC;
112 }
113
iov_iter_is_kvec(const struct iov_iter * i)114 static inline bool iov_iter_is_kvec(const struct iov_iter *i)
115 {
116 return iov_iter_type(i) == ITER_KVEC;
117 }
118
iov_iter_is_bvec(const struct iov_iter * i)119 static inline bool iov_iter_is_bvec(const struct iov_iter *i)
120 {
121 return iov_iter_type(i) == ITER_BVEC;
122 }
123
iov_iter_is_discard(const struct iov_iter * i)124 static inline bool iov_iter_is_discard(const struct iov_iter *i)
125 {
126 return iov_iter_type(i) == ITER_DISCARD;
127 }
128
iov_iter_is_xarray(const struct iov_iter * i)129 static inline bool iov_iter_is_xarray(const struct iov_iter *i)
130 {
131 return iov_iter_type(i) == ITER_XARRAY;
132 }
133
iov_iter_rw(const struct iov_iter * i)134 static inline unsigned char iov_iter_rw(const struct iov_iter *i)
135 {
136 return i->data_source ? WRITE : READ;
137 }
138
user_backed_iter(const struct iov_iter * i)139 static inline bool user_backed_iter(const struct iov_iter *i)
140 {
141 return iter_is_ubuf(i) || iter_is_iovec(i);
142 }
143
144 /*
145 * Total number of bytes covered by an iovec.
146 *
147 * NOTE that it is not safe to use this function until all the iovec's
148 * segment lengths have been validated. Because the individual lengths can
149 * overflow a size_t when added together.
150 */
iov_length(const struct iovec * iov,unsigned long nr_segs)151 static inline size_t iov_length(const struct iovec *iov, unsigned long nr_segs)
152 {
153 unsigned long seg;
154 size_t ret = 0;
155
156 for (seg = 0; seg < nr_segs; seg++)
157 ret += iov[seg].iov_len;
158 return ret;
159 }
160
161 size_t copy_page_from_iter_atomic(struct page *page, size_t offset,
162 size_t bytes, struct iov_iter *i);
163 void iov_iter_advance(struct iov_iter *i, size_t bytes);
164 void iov_iter_revert(struct iov_iter *i, size_t bytes);
165 size_t fault_in_iov_iter_readable(const struct iov_iter *i, size_t bytes);
166 size_t fault_in_iov_iter_writeable(const struct iov_iter *i, size_t bytes);
167 size_t iov_iter_single_seg_count(const struct iov_iter *i);
168 size_t copy_page_to_iter(struct page *page, size_t offset, size_t bytes,
169 struct iov_iter *i);
170 size_t copy_page_from_iter(struct page *page, size_t offset, size_t bytes,
171 struct iov_iter *i);
172
173 size_t _copy_to_iter(const void *addr, size_t bytes, struct iov_iter *i);
174 size_t _copy_from_iter(void *addr, size_t bytes, struct iov_iter *i);
175 size_t _copy_from_iter_nocache(void *addr, size_t bytes, struct iov_iter *i);
176
copy_folio_to_iter(struct folio * folio,size_t offset,size_t bytes,struct iov_iter * i)177 static inline size_t copy_folio_to_iter(struct folio *folio, size_t offset,
178 size_t bytes, struct iov_iter *i)
179 {
180 return copy_page_to_iter(&folio->page, offset, bytes, i);
181 }
182
copy_folio_from_iter_atomic(struct folio * folio,size_t offset,size_t bytes,struct iov_iter * i)183 static inline size_t copy_folio_from_iter_atomic(struct folio *folio,
184 size_t offset, size_t bytes, struct iov_iter *i)
185 {
186 return copy_page_from_iter_atomic(&folio->page, offset, bytes, i);
187 }
188
189 size_t copy_page_to_iter_nofault(struct page *page, unsigned offset,
190 size_t bytes, struct iov_iter *i);
191
192 static __always_inline __must_check
copy_to_iter(const void * addr,size_t bytes,struct iov_iter * i)193 size_t copy_to_iter(const void *addr, size_t bytes, struct iov_iter *i)
194 {
195 if (check_copy_size(addr, bytes, true))
196 return _copy_to_iter(addr, bytes, i);
197 return 0;
198 }
199
200 static __always_inline __must_check
copy_from_iter(void * addr,size_t bytes,struct iov_iter * i)201 size_t copy_from_iter(void *addr, size_t bytes, struct iov_iter *i)
202 {
203 if (check_copy_size(addr, bytes, false))
204 return _copy_from_iter(addr, bytes, i);
205 return 0;
206 }
207
208 static __always_inline __must_check
copy_to_iter_full(const void * addr,size_t bytes,struct iov_iter * i)209 bool copy_to_iter_full(const void *addr, size_t bytes, struct iov_iter *i)
210 {
211 size_t copied = copy_to_iter(addr, bytes, i);
212 if (likely(copied == bytes))
213 return true;
214 iov_iter_revert(i, copied);
215 return false;
216 }
217
218 static __always_inline __must_check
copy_from_iter_full(void * addr,size_t bytes,struct iov_iter * i)219 bool copy_from_iter_full(void *addr, size_t bytes, struct iov_iter *i)
220 {
221 size_t copied = copy_from_iter(addr, bytes, i);
222 if (likely(copied == bytes))
223 return true;
224 iov_iter_revert(i, copied);
225 return false;
226 }
227
228 static __always_inline __must_check
copy_from_iter_nocache(void * addr,size_t bytes,struct iov_iter * i)229 size_t copy_from_iter_nocache(void *addr, size_t bytes, struct iov_iter *i)
230 {
231 if (check_copy_size(addr, bytes, false))
232 return _copy_from_iter_nocache(addr, bytes, i);
233 return 0;
234 }
235
236 static __always_inline __must_check
copy_from_iter_full_nocache(void * addr,size_t bytes,struct iov_iter * i)237 bool copy_from_iter_full_nocache(void *addr, size_t bytes, struct iov_iter *i)
238 {
239 size_t copied = copy_from_iter_nocache(addr, bytes, i);
240 if (likely(copied == bytes))
241 return true;
242 iov_iter_revert(i, copied);
243 return false;
244 }
245
246 #ifdef CONFIG_ARCH_HAS_UACCESS_FLUSHCACHE
247 /*
248 * Note, users like pmem that depend on the stricter semantics of
249 * _copy_from_iter_flushcache() than _copy_from_iter_nocache() must check for
250 * IS_ENABLED(CONFIG_ARCH_HAS_UACCESS_FLUSHCACHE) before assuming that the
251 * destination is flushed from the cache on return.
252 */
253 size_t _copy_from_iter_flushcache(void *addr, size_t bytes, struct iov_iter *i);
254 #else
255 #define _copy_from_iter_flushcache _copy_from_iter_nocache
256 #endif
257
258 #ifdef CONFIG_ARCH_HAS_COPY_MC
259 size_t _copy_mc_to_iter(const void *addr, size_t bytes, struct iov_iter *i);
260 #else
261 #define _copy_mc_to_iter _copy_to_iter
262 #endif
263
264 size_t iov_iter_zero(size_t bytes, struct iov_iter *);
265 bool iov_iter_is_aligned(const struct iov_iter *i, unsigned addr_mask,
266 unsigned len_mask);
267 unsigned long iov_iter_alignment(const struct iov_iter *i);
268 unsigned long iov_iter_gap_alignment(const struct iov_iter *i);
269 void iov_iter_init(struct iov_iter *i, unsigned int direction, const struct iovec *iov,
270 unsigned long nr_segs, size_t count);
271 void iov_iter_kvec(struct iov_iter *i, unsigned int direction, const struct kvec *kvec,
272 unsigned long nr_segs, size_t count);
273 void iov_iter_bvec(struct iov_iter *i, unsigned int direction, const struct bio_vec *bvec,
274 unsigned long nr_segs, size_t count);
275 void iov_iter_discard(struct iov_iter *i, unsigned int direction, size_t count);
276 void iov_iter_xarray(struct iov_iter *i, unsigned int direction, struct xarray *xarray,
277 loff_t start, size_t count);
278 ssize_t iov_iter_get_pages2(struct iov_iter *i, struct page **pages,
279 size_t maxsize, unsigned maxpages, size_t *start);
280 ssize_t iov_iter_get_pages_alloc2(struct iov_iter *i, struct page ***pages,
281 size_t maxsize, size_t *start);
282 int iov_iter_npages(const struct iov_iter *i, int maxpages);
283 void iov_iter_restore(struct iov_iter *i, struct iov_iter_state *state);
284
285 const void *dup_iter(struct iov_iter *new, struct iov_iter *old, gfp_t flags);
286
iov_iter_count(const struct iov_iter * i)287 static inline size_t iov_iter_count(const struct iov_iter *i)
288 {
289 return i->count;
290 }
291
292 /*
293 * Cap the iov_iter by given limit; note that the second argument is
294 * *not* the new size - it's upper limit for such. Passing it a value
295 * greater than the amount of data in iov_iter is fine - it'll just do
296 * nothing in that case.
297 */
iov_iter_truncate(struct iov_iter * i,u64 count)298 static inline void iov_iter_truncate(struct iov_iter *i, u64 count)
299 {
300 /*
301 * count doesn't have to fit in size_t - comparison extends both
302 * operands to u64 here and any value that would be truncated by
303 * conversion in assignement is by definition greater than all
304 * values of size_t, including old i->count.
305 */
306 if (i->count > count)
307 i->count = count;
308 }
309
310 /*
311 * reexpand a previously truncated iterator; count must be no more than how much
312 * we had shrunk it.
313 */
iov_iter_reexpand(struct iov_iter * i,size_t count)314 static inline void iov_iter_reexpand(struct iov_iter *i, size_t count)
315 {
316 i->count = count;
317 }
318
319 static inline int
iov_iter_npages_cap(struct iov_iter * i,int maxpages,size_t max_bytes)320 iov_iter_npages_cap(struct iov_iter *i, int maxpages, size_t max_bytes)
321 {
322 size_t shorted = 0;
323 int npages;
324
325 if (iov_iter_count(i) > max_bytes) {
326 shorted = iov_iter_count(i) - max_bytes;
327 iov_iter_truncate(i, max_bytes);
328 }
329 npages = iov_iter_npages(i, maxpages);
330 if (shorted)
331 iov_iter_reexpand(i, iov_iter_count(i) + shorted);
332
333 return npages;
334 }
335
336 struct iovec *iovec_from_user(const struct iovec __user *uvector,
337 unsigned long nr_segs, unsigned long fast_segs,
338 struct iovec *fast_iov, bool compat);
339 ssize_t import_iovec(int type, const struct iovec __user *uvec,
340 unsigned nr_segs, unsigned fast_segs, struct iovec **iovp,
341 struct iov_iter *i);
342 ssize_t __import_iovec(int type, const struct iovec __user *uvec,
343 unsigned nr_segs, unsigned fast_segs, struct iovec **iovp,
344 struct iov_iter *i, bool compat);
345 int import_ubuf(int type, void __user *buf, size_t len, struct iov_iter *i);
346
iov_iter_ubuf(struct iov_iter * i,unsigned int direction,void __user * buf,size_t count)347 static inline void iov_iter_ubuf(struct iov_iter *i, unsigned int direction,
348 void __user *buf, size_t count)
349 {
350 WARN_ON(direction & ~(READ | WRITE));
351 *i = (struct iov_iter) {
352 .iter_type = ITER_UBUF,
353 .data_source = direction,
354 .ubuf = buf,
355 .count = count,
356 .nr_segs = 1
357 };
358 }
359 /* Flags for iov_iter_get/extract_pages*() */
360 /* Allow P2PDMA on the extracted pages */
361 #define ITER_ALLOW_P2PDMA ((__force iov_iter_extraction_t)0x01)
362
363 ssize_t iov_iter_extract_pages(struct iov_iter *i, struct page ***pages,
364 size_t maxsize, unsigned int maxpages,
365 iov_iter_extraction_t extraction_flags,
366 size_t *offset0);
367
368 /**
369 * iov_iter_extract_will_pin - Indicate how pages from the iterator will be retained
370 * @iter: The iterator
371 *
372 * Examine the iterator and indicate by returning true or false as to how, if
373 * at all, pages extracted from the iterator will be retained by the extraction
374 * function.
375 *
376 * %true indicates that the pages will have a pin placed in them that the
377 * caller must unpin. This is must be done for DMA/async DIO to force fork()
378 * to forcibly copy a page for the child (the parent must retain the original
379 * page).
380 *
381 * %false indicates that no measures are taken and that it's up to the caller
382 * to retain the pages.
383 */
iov_iter_extract_will_pin(const struct iov_iter * iter)384 static inline bool iov_iter_extract_will_pin(const struct iov_iter *iter)
385 {
386 return user_backed_iter(iter);
387 }
388
389 struct sg_table;
390 ssize_t extract_iter_to_sg(struct iov_iter *iter, size_t len,
391 struct sg_table *sgtable, unsigned int sg_max,
392 iov_iter_extraction_t extraction_flags);
393
394 #endif
395