xref: /dragonfly/sys/kern/kern_subr.c (revision 92fc8b5c)
1 /*
2  * Copyright (c) 1982, 1986, 1991, 1993
3  *	The Regents of the University of California.  All rights reserved.
4  * (c) UNIX System Laboratories, Inc.
5  * All or some portions of this file are derived from material licensed
6  * to the University of California by American Telephone and Telegraph
7  * Co. or Unix System Laboratories, Inc. and are reproduced herein with
8  * the permission of UNIX System Laboratories, Inc.
9  *
10  * Redistribution and use in source and binary forms, with or without
11  * modification, are permitted provided that the following conditions
12  * are met:
13  * 1. Redistributions of source code must retain the above copyright
14  *    notice, this list of conditions and the following disclaimer.
15  * 2. Redistributions in binary form must reproduce the above copyright
16  *    notice, this list of conditions and the following disclaimer in the
17  *    documentation and/or other materials provided with the distribution.
18  * 3. All advertising materials mentioning features or use of this software
19  *    must display the following acknowledgement:
20  *	This product includes software developed by the University of
21  *	California, Berkeley and its contributors.
22  * 4. Neither the name of the University nor the names of its contributors
23  *    may be used to endorse or promote products derived from this software
24  *    without specific prior written permission.
25  *
26  * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND
27  * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
28  * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
29  * ARE DISCLAIMED.  IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE
30  * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
31  * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
32  * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
33  * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
34  * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
35  * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
36  * SUCH DAMAGE.
37  *
38  *	@(#)kern_subr.c	8.3 (Berkeley) 1/21/94
39  * $FreeBSD: src/sys/kern/kern_subr.c,v 1.31.2.2 2002/04/21 08:09:37 bde Exp $
40  */
41 
42 #include "opt_ddb.h"
43 
44 #include <sys/param.h>
45 #include <sys/systm.h>
46 #include <sys/kernel.h>
47 #include <sys/proc.h>
48 #include <sys/malloc.h>
49 #include <sys/lock.h>
50 #include <sys/resourcevar.h>
51 #include <sys/sysctl.h>
52 #include <sys/uio.h>
53 #include <sys/vnode.h>
54 #include <sys/thread2.h>
55 #include <machine/limits.h>
56 
57 #include <cpu/lwbuf.h>
58 
59 #include <vm/vm.h>
60 #include <vm/vm_page.h>
61 #include <vm/vm_map.h>
62 
63 SYSCTL_INT(_kern, KERN_IOV_MAX, iov_max, CTLFLAG_RD, NULL, UIO_MAXIOV,
64 	"Maximum number of elements in an I/O vector; sysconf(_SC_IOV_MAX)");
65 
66 /*
67  * UIO_READ:	copy the kernelspace cp to the user or kernelspace UIO
68  * UIO_WRITE:	copy the user or kernelspace UIO to the kernelspace cp
69  *
70  * For userspace UIO's, uio_td must be the current thread.
71  *
72  * The syscall interface is responsible for limiting the length to
73  * ssize_t for things like read() or write() which return the bytes
74  * read or written as ssize_t.  These functions work with unsigned
75  * lengths.
76  */
77 int
78 uiomove(caddr_t cp, size_t n, struct uio *uio)
79 {
80 	struct iovec *iov;
81 	size_t cnt;
82 	int error = 0;
83 	int save = 0;
84 
85 	KASSERT(uio->uio_rw == UIO_READ || uio->uio_rw == UIO_WRITE,
86 	    ("uiomove: mode"));
87 	KASSERT(uio->uio_segflg != UIO_USERSPACE || uio->uio_td == curthread,
88 	    ("uiomove proc"));
89 
90 	if (curproc) {
91 		save = curproc->p_flag & P_DEADLKTREAT;
92 		curproc->p_flag |= P_DEADLKTREAT;
93 	}
94 
95 	while (n > 0 && uio->uio_resid) {
96 		iov = uio->uio_iov;
97 		cnt = iov->iov_len;
98 		if (cnt == 0) {
99 			uio->uio_iov++;
100 			uio->uio_iovcnt--;
101 			continue;
102 		}
103 		if (cnt > n)
104 			cnt = n;
105 
106 		switch (uio->uio_segflg) {
107 
108 		case UIO_USERSPACE:
109 			lwkt_user_yield();
110 			if (uio->uio_rw == UIO_READ)
111 				error = copyout(cp, iov->iov_base, cnt);
112 			else
113 				error = copyin(iov->iov_base, cp, cnt);
114 			if (error)
115 				break;
116 			break;
117 
118 		case UIO_SYSSPACE:
119 			if (uio->uio_rw == UIO_READ)
120 				bcopy((caddr_t)cp, iov->iov_base, cnt);
121 			else
122 				bcopy(iov->iov_base, (caddr_t)cp, cnt);
123 			break;
124 		case UIO_NOCOPY:
125 			break;
126 		}
127 		iov->iov_base = (char *)iov->iov_base + cnt;
128 		iov->iov_len -= cnt;
129 		uio->uio_resid -= cnt;
130 		uio->uio_offset += cnt;
131 		cp += cnt;
132 		n -= cnt;
133 	}
134 	if (curproc)
135 		curproc->p_flag = (curproc->p_flag & ~P_DEADLKTREAT) | save;
136 	return (error);
137 }
138 
139 /*
140  * Like uiomove() but copies zero-fill.  Only allowed for UIO_READ,
141  * for obvious reasons.
142  */
143 int
144 uiomovez(size_t n, struct uio *uio)
145 {
146 	struct iovec *iov;
147 	size_t cnt;
148 	int error = 0;
149 
150 	KASSERT(uio->uio_rw == UIO_READ, ("uiomovez: mode"));
151 	KASSERT(uio->uio_segflg != UIO_USERSPACE || uio->uio_td == curthread,
152 		("uiomove proc"));
153 
154 	while (n > 0 && uio->uio_resid) {
155 		iov = uio->uio_iov;
156 		cnt = iov->iov_len;
157 		if (cnt == 0) {
158 			uio->uio_iov++;
159 			uio->uio_iovcnt--;
160 			continue;
161 		}
162 		if (cnt > n)
163 			cnt = n;
164 
165 		switch (uio->uio_segflg) {
166 		case UIO_USERSPACE:
167 			error = copyout(ZeroPage, iov->iov_base, cnt);
168 			if (error)
169 				break;
170 			break;
171 		case UIO_SYSSPACE:
172 			bzero(iov->iov_base, cnt);
173 			break;
174 		case UIO_NOCOPY:
175 			break;
176 		}
177 		iov->iov_base = (char *)iov->iov_base + cnt;
178 		iov->iov_len -= cnt;
179 		uio->uio_resid -= cnt;
180 		uio->uio_offset += cnt;
181 		n -= cnt;
182 	}
183 	return (error);
184 }
185 
186 /*
187  * Wrapper for uiomove() that validates the arguments against a known-good
188  * kernel buffer.
189  */
190 int
191 uiomove_frombuf(void *buf, size_t buflen, struct uio *uio)
192 {
193 	size_t offset;
194 
195 	offset = (size_t)uio->uio_offset;
196 	if ((off_t)offset != uio->uio_offset)
197 		return (EINVAL);
198 	if (buflen == 0 || offset >= buflen)
199 		return (0);
200 	return (uiomove((char *)buf + offset, buflen - offset, uio));
201 }
202 
203 /*
204  * Give next character to user as result of read.
205  */
206 int
207 ureadc(int c, struct uio *uio)
208 {
209 	struct iovec *iov;
210 	char *iov_base;
211 
212 again:
213 	if (uio->uio_iovcnt == 0 || uio->uio_resid == 0)
214 		panic("ureadc");
215 	iov = uio->uio_iov;
216 	if (iov->iov_len == 0) {
217 		uio->uio_iovcnt--;
218 		uio->uio_iov++;
219 		goto again;
220 	}
221 	switch (uio->uio_segflg) {
222 
223 	case UIO_USERSPACE:
224 		if (subyte(iov->iov_base, c) < 0)
225 			return (EFAULT);
226 		break;
227 
228 	case UIO_SYSSPACE:
229 		iov_base = iov->iov_base;
230 		*iov_base = c;
231 		iov->iov_base = iov_base;
232 		break;
233 
234 	case UIO_NOCOPY:
235 		break;
236 	}
237 	iov->iov_base = (char *)iov->iov_base + 1;
238 	iov->iov_len--;
239 	uio->uio_resid--;
240 	uio->uio_offset++;
241 	return (0);
242 }
243 
244 /*
245  * General routine to allocate a hash table.  Make the hash table size a
246  * power of 2 greater or equal to the number of elements requested, and
247  * store the masking value in *hashmask.
248  */
249 void *
250 hashinit(int elements, struct malloc_type *type, u_long *hashmask)
251 {
252 	long hashsize;
253 	LIST_HEAD(generic, generic) *hashtbl;
254 	int i;
255 
256 	if (elements <= 0)
257 		panic("hashinit: bad elements");
258 	for (hashsize = 2; hashsize < elements; hashsize <<= 1)
259 		continue;
260 	hashtbl = kmalloc((u_long)hashsize * sizeof(*hashtbl), type, M_WAITOK);
261 	for (i = 0; i < hashsize; i++)
262 		LIST_INIT(&hashtbl[i]);
263 	*hashmask = hashsize - 1;
264 	return (hashtbl);
265 }
266 
267 /*
268  * This is a newer version which allocates a hash table of structures.
269  *
270  * The returned array will be zero'd.  The caller is responsible for
271  * initializing the structures.
272  */
273 void *
274 hashinit_ext(int elements, size_t size, struct malloc_type *type,
275 	     u_long *hashmask)
276 {
277 	long hashsize;
278 	void *hashtbl;
279 
280 	if (elements <= 0)
281 		panic("hashinit: bad elements");
282 	for (hashsize = 2; hashsize < elements; hashsize <<= 1)
283 		continue;
284 	hashtbl = kmalloc((size_t)hashsize * size, type, M_WAITOK | M_ZERO);
285 	*hashmask = hashsize - 1;
286 	return (hashtbl);
287 }
288 
289 static int primes[] = { 1, 13, 31, 61, 127, 251, 509, 761, 1021, 1531, 2039,
290 			2557, 3067, 3583, 4093, 4603, 5119, 5623, 6143, 6653,
291 			7159, 7673, 8191, 12281, 16381, 24571, 32749 };
292 #define NPRIMES NELEM(primes)
293 
294 /*
295  * General routine to allocate a prime number sized hash table.
296  */
297 void *
298 phashinit(int elements, struct malloc_type *type, u_long *nentries)
299 {
300 	long hashsize;
301 	LIST_HEAD(generic, generic) *hashtbl;
302 	int i;
303 
304 	if (elements <= 0)
305 		panic("phashinit: bad elements");
306 	for (i = 1, hashsize = primes[1]; hashsize <= elements;) {
307 		i++;
308 		if (i == NPRIMES)
309 			break;
310 		hashsize = primes[i];
311 	}
312 	hashsize = primes[i - 1];
313 	hashtbl = kmalloc((u_long)hashsize * sizeof(*hashtbl), type, M_WAITOK);
314 	for (i = 0; i < hashsize; i++)
315 		LIST_INIT(&hashtbl[i]);
316 	*nentries = hashsize;
317 	return (hashtbl);
318 }
319 
320 /*
321  * This is a newer version which allocates a hash table of structures
322  * in a prime-number size.
323  *
324  * The returned array will be zero'd.  The caller is responsible for
325  * initializing the structures.
326  */
327 void *
328 phashinit_ext(int elements, size_t size, struct malloc_type *type,
329 	      u_long *nentries)
330 {
331 	long hashsize;
332 	void *hashtbl;
333 	int i;
334 
335 	if (elements <= 0)
336 		panic("phashinit: bad elements");
337 	for (i = 1, hashsize = primes[1]; hashsize <= elements;) {
338 		i++;
339 		if (i == NPRIMES)
340 			break;
341 		hashsize = primes[i];
342 	}
343 	hashsize = primes[i - 1];
344 	hashtbl = kmalloc((size_t)hashsize * size, type, M_WAITOK | M_ZERO);
345 	*nentries = hashsize;
346 	return (hashtbl);
347 }
348 
349 /*
350  * Copyin an iovec.  If the iovec array fits, use the preallocated small
351  * iovec structure.  If it is too big, dynamically allocate an iovec array
352  * of sufficient size.
353  *
354  * MPSAFE
355  */
356 int
357 iovec_copyin(struct iovec *uiov, struct iovec **kiov, struct iovec *siov,
358 	     size_t iov_cnt, size_t *iov_len)
359 {
360 	struct iovec *iovp;
361 	int error, i;
362 	size_t len;
363 
364 	if (iov_cnt > UIO_MAXIOV)
365 		return EMSGSIZE;
366 	if (iov_cnt > UIO_SMALLIOV) {
367 		MALLOC(*kiov, struct iovec *, sizeof(struct iovec) * iov_cnt,
368 		    M_IOV, M_WAITOK);
369 	} else {
370 		*kiov = siov;
371 	}
372 	error = copyin(uiov, *kiov, iov_cnt * sizeof(struct iovec));
373 	if (error == 0) {
374 		*iov_len = 0;
375 		for (i = 0, iovp = *kiov; i < iov_cnt; i++, iovp++) {
376 			/*
377 			 * Check for both *iov_len overflows and out of
378 			 * range iovp->iov_len's.  We limit to the
379 			 * capabilities of signed integers.
380 			 *
381 			 * GCC4 - overflow check opt requires assign/test.
382 			 */
383 			len = *iov_len + iovp->iov_len;
384 			if (len < *iov_len)
385 				error = EINVAL;
386 			*iov_len = len;
387 		}
388 	}
389 
390 	/*
391 	 * From userland disallow iovec's which exceed the sized size
392 	 * limit as the system calls return ssize_t.
393 	 *
394 	 * NOTE: Internal kernel interfaces can handle the unsigned
395 	 *	 limit.
396 	 */
397 	if (error == 0 && (ssize_t)*iov_len < 0)
398 		error = EINVAL;
399 
400 	if (error)
401 		iovec_free(kiov, siov);
402 	return (error);
403 }
404 
405 
406 /*
407  * Copyright (c) 2004 Alan L. Cox <alc@cs.rice.edu>
408  * Copyright (c) 1982, 1986, 1991, 1993
409  *	The Regents of the University of California.  All rights reserved.
410  * (c) UNIX System Laboratories, Inc.
411  * All or some portions of this file are derived from material licensed
412  * to the University of California by American Telephone and Telegraph
413  * Co. or Unix System Laboratories, Inc. and are reproduced herein with
414  * the permission of UNIX System Laboratories, Inc.
415  *
416  * Redistribution and use in source and binary forms, with or without
417  * modification, are permitted provided that the following conditions
418  * are met:
419  * 1. Redistributions of source code must retain the above copyright
420  *    notice, this list of conditions and the following disclaimer.
421  * 2. Redistributions in binary form must reproduce the above copyright
422  *    notice, this list of conditions and the following disclaimer in the
423  *    documentation and/or other materials provided with the distribution.
424  * 4. Neither the name of the University nor the names of its contributors
425  *    may be used to endorse or promote products derived from this software
426  *    without specific prior written permission.
427  *
428  * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND
429  * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
430  * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
431  * ARE DISCLAIMED.  IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE
432  * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
433  * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
434  * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
435  * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
436  * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
437  * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
438  * SUCH DAMAGE.
439  *
440  * @(#)kern_subr.c	8.3 (Berkeley) 1/21/94
441  * $FreeBSD: src/sys/i386/i386/uio_machdep.c,v 1.1 2004/03/21 20:28:36 alc Exp $
442  */
443 
444 /*
445  * Implement uiomove(9) from physical memory using lwbuf's to reduce
446  * the creation and destruction of ephemeral mappings.
447  */
448 int
449 uiomove_fromphys(vm_page_t *ma, vm_offset_t offset, size_t n, struct uio *uio)
450 {
451 	struct lwbuf lwb_cache;
452 	struct lwbuf *lwb;
453 	struct thread *td = curthread;
454 	struct iovec *iov;
455 	void *cp;
456 	vm_offset_t page_offset;
457 	vm_page_t m;
458 	size_t cnt;
459 	int error = 0;
460 	int save = 0;
461 
462 	KASSERT(uio->uio_rw == UIO_READ || uio->uio_rw == UIO_WRITE,
463 	    ("uiomove_fromphys: mode"));
464 	KASSERT(uio->uio_segflg != UIO_USERSPACE || uio->uio_td == curthread,
465 	    ("uiomove_fromphys proc"));
466 
467 	crit_enter();
468 	save = td->td_flags & TDF_DEADLKTREAT;
469 	td->td_flags |= TDF_DEADLKTREAT;
470 	crit_exit();
471 
472 	while (n > 0 && uio->uio_resid) {
473 		iov = uio->uio_iov;
474 		cnt = iov->iov_len;
475 		if (cnt == 0) {
476 			uio->uio_iov++;
477 			uio->uio_iovcnt--;
478 			continue;
479 		}
480 		if (cnt > n)
481 			cnt = n;
482 		page_offset = offset & PAGE_MASK;
483 		cnt = min(cnt, PAGE_SIZE - page_offset);
484 		m = ma[offset >> PAGE_SHIFT];
485 		lwb = lwbuf_alloc(m, &lwb_cache);
486 		cp = (char *)lwbuf_kva(lwb) + page_offset;
487 		switch (uio->uio_segflg) {
488 		case UIO_USERSPACE:
489 			/*
490 			 * note: removed uioyield (it was the wrong place to
491 			 * put it).
492 			 */
493 			if (uio->uio_rw == UIO_READ)
494 				error = copyout(cp, iov->iov_base, cnt);
495 			else
496 				error = copyin(iov->iov_base, cp, cnt);
497 			if (error) {
498 				lwbuf_free(lwb);
499 				goto out;
500 			}
501 			break;
502 		case UIO_SYSSPACE:
503 			if (uio->uio_rw == UIO_READ)
504 				bcopy(cp, iov->iov_base, cnt);
505 			else
506 				bcopy(iov->iov_base, cp, cnt);
507 			break;
508 		case UIO_NOCOPY:
509 			break;
510 		}
511 		lwbuf_free(lwb);
512 		iov->iov_base = (char *)iov->iov_base + cnt;
513 		iov->iov_len -= cnt;
514 		uio->uio_resid -= cnt;
515 		uio->uio_offset += cnt;
516 		offset += cnt;
517 		n -= cnt;
518 	}
519 out:
520 	if (save == 0) {
521 		crit_enter();
522 		td->td_flags &= ~TDF_DEADLKTREAT;
523 		crit_exit();
524 	}
525 	return (error);
526 }
527 
528