xref: /dragonfly/sys/kern/kern_subr.c (revision 2020c8fe)
1 /*
2  * Copyright (c) 1982, 1986, 1991, 1993
3  *	The Regents of the University of California.  All rights reserved.
4  * (c) UNIX System Laboratories, Inc.
5  * All or some portions of this file are derived from material licensed
6  * to the University of California by American Telephone and Telegraph
7  * Co. or Unix System Laboratories, Inc. and are reproduced herein with
8  * the permission of UNIX System Laboratories, Inc.
9  *
10  * Redistribution and use in source and binary forms, with or without
11  * modification, are permitted provided that the following conditions
12  * are met:
13  * 1. Redistributions of source code must retain the above copyright
14  *    notice, this list of conditions and the following disclaimer.
15  * 2. Redistributions in binary form must reproduce the above copyright
16  *    notice, this list of conditions and the following disclaimer in the
17  *    documentation and/or other materials provided with the distribution.
18  * 3. All advertising materials mentioning features or use of this software
19  *    must display the following acknowledgement:
20  *	This product includes software developed by the University of
21  *	California, Berkeley and its contributors.
22  * 4. Neither the name of the University nor the names of its contributors
23  *    may be used to endorse or promote products derived from this software
24  *    without specific prior written permission.
25  *
26  * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND
27  * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
28  * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
29  * ARE DISCLAIMED.  IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE
30  * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
31  * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
32  * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
33  * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
34  * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
35  * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
36  * SUCH DAMAGE.
37  *
38  *	@(#)kern_subr.c	8.3 (Berkeley) 1/21/94
39  * $FreeBSD: src/sys/kern/kern_subr.c,v 1.31.2.2 2002/04/21 08:09:37 bde Exp $
40  */
41 
42 #include "opt_ddb.h"
43 
44 #include <sys/param.h>
45 #include <sys/systm.h>
46 #include <sys/kernel.h>
47 #include <sys/proc.h>
48 #include <sys/malloc.h>
49 #include <sys/lock.h>
50 #include <sys/resourcevar.h>
51 #include <sys/sysctl.h>
52 #include <sys/uio.h>
53 #include <sys/vnode.h>
54 #include <sys/thread2.h>
55 #include <machine/limits.h>
56 
57 #include <cpu/lwbuf.h>
58 
59 #include <vm/vm.h>
60 #include <vm/vm_page.h>
61 #include <vm/vm_map.h>
62 
63 SYSCTL_INT(_kern, KERN_IOV_MAX, iov_max, CTLFLAG_RD, NULL, UIO_MAXIOV,
64 	"Maximum number of elements in an I/O vector; sysconf(_SC_IOV_MAX)");
65 
66 /*
67  * UIO_READ:	copy the kernelspace cp to the user or kernelspace UIO
68  * UIO_WRITE:	copy the user or kernelspace UIO to the kernelspace cp
69  *
70  * For userspace UIO's, uio_td must be the current thread.
71  *
72  * The syscall interface is responsible for limiting the length to
73  * ssize_t for things like read() or write() which return the bytes
74  * read or written as ssize_t.  These functions work with unsigned
75  * lengths.
76  */
77 int
78 uiomove(caddr_t cp, size_t n, struct uio *uio)
79 {
80 	thread_t td = curthread;
81 	struct iovec *iov;
82 	size_t cnt;
83 	int error = 0;
84 	int save = 0;
85 
86 	KASSERT(uio->uio_rw == UIO_READ || uio->uio_rw == UIO_WRITE,
87 	    ("uiomove: mode"));
88 	KASSERT(uio->uio_segflg != UIO_USERSPACE || uio->uio_td == td,
89 	    ("uiomove proc"));
90 
91 	crit_enter();
92 	save = td->td_flags & TDF_DEADLKTREAT;
93 	td->td_flags |= TDF_DEADLKTREAT;
94 	crit_exit();
95 
96 	while (n > 0 && uio->uio_resid) {
97 		iov = uio->uio_iov;
98 		cnt = iov->iov_len;
99 		if (cnt == 0) {
100 			uio->uio_iov++;
101 			uio->uio_iovcnt--;
102 			continue;
103 		}
104 		if (cnt > n)
105 			cnt = n;
106 
107 		switch (uio->uio_segflg) {
108 
109 		case UIO_USERSPACE:
110 			lwkt_user_yield();
111 			if (uio->uio_rw == UIO_READ)
112 				error = copyout(cp, iov->iov_base, cnt);
113 			else
114 				error = copyin(iov->iov_base, cp, cnt);
115 			if (error)
116 				break;
117 			break;
118 
119 		case UIO_SYSSPACE:
120 			if (uio->uio_rw == UIO_READ)
121 				bcopy((caddr_t)cp, iov->iov_base, cnt);
122 			else
123 				bcopy(iov->iov_base, (caddr_t)cp, cnt);
124 			break;
125 		case UIO_NOCOPY:
126 			break;
127 		}
128 		iov->iov_base = (char *)iov->iov_base + cnt;
129 		iov->iov_len -= cnt;
130 		uio->uio_resid -= cnt;
131 		uio->uio_offset += cnt;
132 		cp += cnt;
133 		n -= cnt;
134 	}
135 	crit_enter();
136 	td->td_flags = (td->td_flags & ~TDF_DEADLKTREAT) | save;
137 	crit_exit();
138 	return (error);
139 }
140 
141 /*
142  * Like uiomove() but copies zero-fill.  Only allowed for UIO_READ,
143  * for obvious reasons.
144  */
145 int
146 uiomovez(size_t n, struct uio *uio)
147 {
148 	struct iovec *iov;
149 	size_t cnt;
150 	int error = 0;
151 
152 	KASSERT(uio->uio_rw == UIO_READ, ("uiomovez: mode"));
153 	KASSERT(uio->uio_segflg != UIO_USERSPACE || uio->uio_td == curthread,
154 		("uiomove proc"));
155 
156 	while (n > 0 && uio->uio_resid) {
157 		iov = uio->uio_iov;
158 		cnt = iov->iov_len;
159 		if (cnt == 0) {
160 			uio->uio_iov++;
161 			uio->uio_iovcnt--;
162 			continue;
163 		}
164 		if (cnt > n)
165 			cnt = n;
166 
167 		switch (uio->uio_segflg) {
168 		case UIO_USERSPACE:
169 			error = copyout(ZeroPage, iov->iov_base, cnt);
170 			if (error)
171 				break;
172 			break;
173 		case UIO_SYSSPACE:
174 			bzero(iov->iov_base, cnt);
175 			break;
176 		case UIO_NOCOPY:
177 			break;
178 		}
179 		iov->iov_base = (char *)iov->iov_base + cnt;
180 		iov->iov_len -= cnt;
181 		uio->uio_resid -= cnt;
182 		uio->uio_offset += cnt;
183 		n -= cnt;
184 	}
185 	return (error);
186 }
187 
188 /*
189  * Wrapper for uiomove() that validates the arguments against a known-good
190  * kernel buffer.  This function automatically indexes the buffer by
191  * uio_offset and handles all range checking.
192  */
193 int
194 uiomove_frombuf(void *buf, size_t buflen, struct uio *uio)
195 {
196 	size_t offset;
197 
198 	offset = (size_t)uio->uio_offset;
199 	if ((off_t)offset != uio->uio_offset)
200 		return (EINVAL);
201 	if (buflen == 0 || offset >= buflen)
202 		return (0);
203 	return (uiomove((char *)buf + offset, buflen - offset, uio));
204 }
205 
206 /*
207  * Give next character to user as result of read.
208  */
209 int
210 ureadc(int c, struct uio *uio)
211 {
212 	struct iovec *iov;
213 	char *iov_base;
214 
215 again:
216 	if (uio->uio_iovcnt == 0 || uio->uio_resid == 0)
217 		panic("ureadc");
218 	iov = uio->uio_iov;
219 	if (iov->iov_len == 0) {
220 		uio->uio_iovcnt--;
221 		uio->uio_iov++;
222 		goto again;
223 	}
224 	switch (uio->uio_segflg) {
225 
226 	case UIO_USERSPACE:
227 		if (subyte(iov->iov_base, c) < 0)
228 			return (EFAULT);
229 		break;
230 
231 	case UIO_SYSSPACE:
232 		iov_base = iov->iov_base;
233 		*iov_base = c;
234 		iov->iov_base = iov_base;
235 		break;
236 
237 	case UIO_NOCOPY:
238 		break;
239 	}
240 	iov->iov_base = (char *)iov->iov_base + 1;
241 	iov->iov_len--;
242 	uio->uio_resid--;
243 	uio->uio_offset++;
244 	return (0);
245 }
246 
247 /*
248  * General routine to allocate a hash table.  Make the hash table size a
249  * power of 2 greater or equal to the number of elements requested, and
250  * store the masking value in *hashmask.
251  */
252 void *
253 hashinit(int elements, struct malloc_type *type, u_long *hashmask)
254 {
255 	long hashsize;
256 	LIST_HEAD(generic, generic) *hashtbl;
257 	int i;
258 
259 	if (elements <= 0)
260 		panic("hashinit: bad elements");
261 	for (hashsize = 2; hashsize < elements; hashsize <<= 1)
262 		continue;
263 	hashtbl = kmalloc((u_long)hashsize * sizeof(*hashtbl), type, M_WAITOK);
264 	for (i = 0; i < hashsize; i++)
265 		LIST_INIT(&hashtbl[i]);
266 	*hashmask = hashsize - 1;
267 	return (hashtbl);
268 }
269 
270 /*
271  * This is a newer version which allocates a hash table of structures.
272  *
273  * The returned array will be zero'd.  The caller is responsible for
274  * initializing the structures.
275  */
276 void *
277 hashinit_ext(int elements, size_t size, struct malloc_type *type,
278 	     u_long *hashmask)
279 {
280 	long hashsize;
281 	void *hashtbl;
282 
283 	if (elements <= 0)
284 		panic("hashinit: bad elements");
285 	for (hashsize = 2; hashsize < elements; hashsize <<= 1)
286 		continue;
287 	hashtbl = kmalloc((size_t)hashsize * size, type, M_WAITOK | M_ZERO);
288 	*hashmask = hashsize - 1;
289 	return (hashtbl);
290 }
291 
292 static int primes[] = { 1, 13, 31, 61, 127, 251, 509, 761, 1021, 1531, 2039,
293 			2557, 3067, 3583, 4093, 4603, 5119, 5623, 6143, 6653,
294 			7159, 7673, 8191, 12281, 16381, 24571, 32749 };
295 #define NPRIMES NELEM(primes)
296 
297 /*
298  * General routine to allocate a prime number sized hash table.
299  */
300 void *
301 phashinit(int elements, struct malloc_type *type, u_long *nentries)
302 {
303 	long hashsize;
304 	LIST_HEAD(generic, generic) *hashtbl;
305 	int i;
306 
307 	if (elements <= 0)
308 		panic("phashinit: bad elements");
309 	for (i = 1, hashsize = primes[1]; hashsize <= elements;) {
310 		i++;
311 		if (i == NPRIMES)
312 			break;
313 		hashsize = primes[i];
314 	}
315 	hashsize = primes[i - 1];
316 	hashtbl = kmalloc((u_long)hashsize * sizeof(*hashtbl), type, M_WAITOK);
317 	for (i = 0; i < hashsize; i++)
318 		LIST_INIT(&hashtbl[i]);
319 	*nentries = hashsize;
320 	return (hashtbl);
321 }
322 
323 /*
324  * This is a newer version which allocates a hash table of structures
325  * in a prime-number size.
326  *
327  * The returned array will be zero'd.  The caller is responsible for
328  * initializing the structures.
329  */
330 void *
331 phashinit_ext(int elements, size_t size, struct malloc_type *type,
332 	      u_long *nentries)
333 {
334 	long hashsize;
335 	void *hashtbl;
336 	int i;
337 
338 	if (elements <= 0)
339 		panic("phashinit: bad elements");
340 	for (i = 1, hashsize = primes[1]; hashsize <= elements;) {
341 		i++;
342 		if (i == NPRIMES)
343 			break;
344 		hashsize = primes[i];
345 	}
346 	hashsize = primes[i - 1];
347 	hashtbl = kmalloc((size_t)hashsize * size, type, M_WAITOK | M_ZERO);
348 	*nentries = hashsize;
349 	return (hashtbl);
350 }
351 
352 /*
353  * Copyin an iovec.  If the iovec array fits, use the preallocated small
354  * iovec structure.  If it is too big, dynamically allocate an iovec array
355  * of sufficient size.
356  *
357  * MPSAFE
358  */
359 int
360 iovec_copyin(struct iovec *uiov, struct iovec **kiov, struct iovec *siov,
361 	     size_t iov_cnt, size_t *iov_len)
362 {
363 	struct iovec *iovp;
364 	int error, i;
365 	size_t len;
366 
367 	if (iov_cnt > UIO_MAXIOV)
368 		return EMSGSIZE;
369 	if (iov_cnt > UIO_SMALLIOV) {
370 		*kiov = kmalloc(sizeof(struct iovec) * iov_cnt, M_IOV,
371 				M_WAITOK);
372 	} else {
373 		*kiov = siov;
374 	}
375 	error = copyin(uiov, *kiov, iov_cnt * sizeof(struct iovec));
376 	if (error == 0) {
377 		*iov_len = 0;
378 		for (i = 0, iovp = *kiov; i < iov_cnt; i++, iovp++) {
379 			/*
380 			 * Check for both *iov_len overflows and out of
381 			 * range iovp->iov_len's.  We limit to the
382 			 * capabilities of signed integers.
383 			 *
384 			 * GCC4 - overflow check opt requires assign/test.
385 			 */
386 			len = *iov_len + iovp->iov_len;
387 			if (len < *iov_len)
388 				error = EINVAL;
389 			*iov_len = len;
390 		}
391 	}
392 
393 	/*
394 	 * From userland disallow iovec's which exceed the sized size
395 	 * limit as the system calls return ssize_t.
396 	 *
397 	 * NOTE: Internal kernel interfaces can handle the unsigned
398 	 *	 limit.
399 	 */
400 	if (error == 0 && (ssize_t)*iov_len < 0)
401 		error = EINVAL;
402 
403 	if (error)
404 		iovec_free(kiov, siov);
405 	return (error);
406 }
407 
408 
409 /*
410  * Copyright (c) 2004 Alan L. Cox <alc@cs.rice.edu>
411  * Copyright (c) 1982, 1986, 1991, 1993
412  *	The Regents of the University of California.  All rights reserved.
413  * (c) UNIX System Laboratories, Inc.
414  * All or some portions of this file are derived from material licensed
415  * to the University of California by American Telephone and Telegraph
416  * Co. or Unix System Laboratories, Inc. and are reproduced herein with
417  * the permission of UNIX System Laboratories, Inc.
418  *
419  * Redistribution and use in source and binary forms, with or without
420  * modification, are permitted provided that the following conditions
421  * are met:
422  * 1. Redistributions of source code must retain the above copyright
423  *    notice, this list of conditions and the following disclaimer.
424  * 2. Redistributions in binary form must reproduce the above copyright
425  *    notice, this list of conditions and the following disclaimer in the
426  *    documentation and/or other materials provided with the distribution.
427  * 4. Neither the name of the University nor the names of its contributors
428  *    may be used to endorse or promote products derived from this software
429  *    without specific prior written permission.
430  *
431  * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND
432  * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
433  * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
434  * ARE DISCLAIMED.  IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE
435  * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
436  * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
437  * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
438  * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
439  * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
440  * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
441  * SUCH DAMAGE.
442  *
443  * @(#)kern_subr.c	8.3 (Berkeley) 1/21/94
444  * $FreeBSD: src/sys/i386/i386/uio_machdep.c,v 1.1 2004/03/21 20:28:36 alc Exp $
445  */
446 
447 /*
448  * Implement uiomove(9) from physical memory using lwbuf's to reduce
449  * the creation and destruction of ephemeral mappings.
450  */
451 int
452 uiomove_fromphys(vm_page_t *ma, vm_offset_t offset, size_t n, struct uio *uio)
453 {
454 	struct lwbuf lwb_cache;
455 	struct lwbuf *lwb;
456 	struct thread *td = curthread;
457 	struct iovec *iov;
458 	void *cp;
459 	vm_offset_t page_offset;
460 	vm_page_t m;
461 	size_t cnt;
462 	int error = 0;
463 	int save = 0;
464 
465 	KASSERT(uio->uio_rw == UIO_READ || uio->uio_rw == UIO_WRITE,
466 	    ("uiomove_fromphys: mode"));
467 	KASSERT(uio->uio_segflg != UIO_USERSPACE || uio->uio_td == curthread,
468 	    ("uiomove_fromphys proc"));
469 
470 	crit_enter();
471 	save = td->td_flags & TDF_DEADLKTREAT;
472 	td->td_flags |= TDF_DEADLKTREAT;
473 	crit_exit();
474 
475 	while (n > 0 && uio->uio_resid) {
476 		iov = uio->uio_iov;
477 		cnt = iov->iov_len;
478 		if (cnt == 0) {
479 			uio->uio_iov++;
480 			uio->uio_iovcnt--;
481 			continue;
482 		}
483 		if (cnt > n)
484 			cnt = n;
485 		page_offset = offset & PAGE_MASK;
486 		cnt = min(cnt, PAGE_SIZE - page_offset);
487 		m = ma[offset >> PAGE_SHIFT];
488 		lwb = lwbuf_alloc(m, &lwb_cache);
489 		cp = (char *)lwbuf_kva(lwb) + page_offset;
490 		switch (uio->uio_segflg) {
491 		case UIO_USERSPACE:
492 			/*
493 			 * note: removed uioyield (it was the wrong place to
494 			 * put it).
495 			 */
496 			if (uio->uio_rw == UIO_READ)
497 				error = copyout(cp, iov->iov_base, cnt);
498 			else
499 				error = copyin(iov->iov_base, cp, cnt);
500 			if (error) {
501 				lwbuf_free(lwb);
502 				goto out;
503 			}
504 			break;
505 		case UIO_SYSSPACE:
506 			if (uio->uio_rw == UIO_READ)
507 				bcopy(cp, iov->iov_base, cnt);
508 			else
509 				bcopy(iov->iov_base, cp, cnt);
510 			break;
511 		case UIO_NOCOPY:
512 			break;
513 		}
514 		lwbuf_free(lwb);
515 		iov->iov_base = (char *)iov->iov_base + cnt;
516 		iov->iov_len -= cnt;
517 		uio->uio_resid -= cnt;
518 		uio->uio_offset += cnt;
519 		offset += cnt;
520 		n -= cnt;
521 	}
522 out:
523 	if (save == 0) {
524 		crit_enter();
525 		td->td_flags &= ~TDF_DEADLKTREAT;
526 		crit_exit();
527 	}
528 	return (error);
529 }
530 
531