xref: /dragonfly/sys/kern/kern_subr.c (revision 17b61719)
1 /*
2  * Copyright (c) 1982, 1986, 1991, 1993
3  *	The Regents of the University of California.  All rights reserved.
4  * (c) UNIX System Laboratories, Inc.
5  * All or some portions of this file are derived from material licensed
6  * to the University of California by American Telephone and Telegraph
7  * Co. or Unix System Laboratories, Inc. and are reproduced herein with
8  * the permission of UNIX System Laboratories, Inc.
9  *
10  * Redistribution and use in source and binary forms, with or without
11  * modification, are permitted provided that the following conditions
12  * are met:
13  * 1. Redistributions of source code must retain the above copyright
14  *    notice, this list of conditions and the following disclaimer.
15  * 2. Redistributions in binary form must reproduce the above copyright
16  *    notice, this list of conditions and the following disclaimer in the
17  *    documentation and/or other materials provided with the distribution.
18  * 3. All advertising materials mentioning features or use of this software
19  *    must display the following acknowledgement:
20  *	This product includes software developed by the University of
21  *	California, Berkeley and its contributors.
22  * 4. Neither the name of the University nor the names of its contributors
23  *    may be used to endorse or promote products derived from this software
24  *    without specific prior written permission.
25  *
26  * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND
27  * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
28  * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
29  * ARE DISCLAIMED.  IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE
30  * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
31  * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
32  * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
33  * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
34  * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
35  * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
36  * SUCH DAMAGE.
37  *
38  *	@(#)kern_subr.c	8.3 (Berkeley) 1/21/94
39  * $FreeBSD: src/sys/kern/kern_subr.c,v 1.31.2.2 2002/04/21 08:09:37 bde Exp $
40  * $DragonFly: src/sys/kern/kern_subr.c,v 1.18 2004/07/27 13:50:15 hmp Exp $
41  */
42 
43 #include "opt_ddb.h"
44 
45 #include <sys/param.h>
46 #include <sys/systm.h>
47 #include <sys/kernel.h>
48 #include <sys/proc.h>
49 #include <sys/malloc.h>
50 #include <sys/lock.h>
51 #include <sys/resourcevar.h>
52 #include <sys/vnode.h>
53 #include <machine/limits.h>
54 
55 #include <vm/vm.h>
56 #include <vm/vm_page.h>
57 #include <vm/vm_map.h>
58 
59 /*
60  * UIO_READ:	copy the kernelspace cp to the user or kernelspace UIO
61  * UIO_WRITE:	copy the user or kernelspace UIO to cp
62  *
63  * For userspace UIO's, uio_td must be the current thread.
64  */
65 int
66 uiomove(caddr_t cp, int n, struct uio *uio)
67 {
68 	struct iovec *iov;
69 	u_int cnt;
70 	int error = 0;
71 	int save = 0;
72 	int baseticks = ticks;
73 
74 	KASSERT(uio->uio_rw == UIO_READ || uio->uio_rw == UIO_WRITE,
75 	    ("uiomove: mode"));
76 	KASSERT(uio->uio_segflg != UIO_USERSPACE || uio->uio_td == curthread,
77 	    ("uiomove proc"));
78 
79 	if (curproc) {
80 		save = curproc->p_flag & P_DEADLKTREAT;
81 		curproc->p_flag |= P_DEADLKTREAT;
82 	}
83 
84 	while (n > 0 && uio->uio_resid) {
85 		iov = uio->uio_iov;
86 		cnt = iov->iov_len;
87 		if (cnt == 0) {
88 			uio->uio_iov++;
89 			uio->uio_iovcnt--;
90 			continue;
91 		}
92 		if (cnt > n)
93 			cnt = n;
94 
95 		switch (uio->uio_segflg) {
96 
97 		case UIO_USERSPACE:
98 			if (ticks - baseticks >= hogticks) {
99 				uio_yield();
100 				baseticks = ticks;
101 			}
102 			if (uio->uio_rw == UIO_READ)
103 				error = copyout(cp, iov->iov_base, cnt);
104 			else
105 				error = copyin(iov->iov_base, cp, cnt);
106 			if (error)
107 				break;
108 			break;
109 
110 		case UIO_SYSSPACE:
111 			if (uio->uio_rw == UIO_READ)
112 				bcopy((caddr_t)cp, iov->iov_base, cnt);
113 			else
114 				bcopy(iov->iov_base, (caddr_t)cp, cnt);
115 			break;
116 		case UIO_NOCOPY:
117 			break;
118 		}
119 		iov->iov_base += cnt;
120 		iov->iov_len -= cnt;
121 		uio->uio_resid -= cnt;
122 		uio->uio_offset += cnt;
123 		cp += cnt;
124 		n -= cnt;
125 	}
126 	if (curproc)
127 		curproc->p_flag = (curproc->p_flag & ~P_DEADLKTREAT) | save;
128 	return (error);
129 }
130 /*
131  * Wrapper for uiomove() that validates the arguments against a known-good
132  * kernel buffer.  Currently, uiomove accepts a signed (n) argument, which
133  * is almost definitely a bad thing, so we catch that here as well.  We
134  * return a runtime failure, but it might be desirable to generate a runtime
135  * assertion failure instead.
136  */
137 int
138 uiomove_frombuf(void *buf, int buflen, struct uio *uio)
139 {
140 	unsigned int offset, n;
141 
142 	if (uio->uio_offset < 0 || uio->uio_resid < 0 ||
143 	    (offset = uio->uio_offset) != uio->uio_offset)
144 		return (EINVAL);
145 	if (buflen <= 0 || offset >= buflen)
146 		return (0);
147 	if ((n = buflen - offset) > INT_MAX)
148 		return (EINVAL);
149 	return (uiomove((char *)buf + offset, n, uio));
150 }
151 
152 
153 int
154 uiomoveco(cp, n, uio, obj)
155 	caddr_t cp;
156 	int n;
157 	struct uio *uio;
158 	struct vm_object *obj;
159 {
160 	struct iovec *iov;
161 	u_int cnt;
162 	int error;
163 	int baseticks = ticks;
164 
165 	KASSERT(uio->uio_rw == UIO_READ || uio->uio_rw == UIO_WRITE,
166 	    ("uiomoveco: mode"));
167 	KASSERT(uio->uio_segflg != UIO_USERSPACE || uio->uio_td == curthread,
168 	    ("uiomoveco proc"));
169 
170 	while (n > 0 && uio->uio_resid) {
171 		iov = uio->uio_iov;
172 		cnt = iov->iov_len;
173 		if (cnt == 0) {
174 			uio->uio_iov++;
175 			uio->uio_iovcnt--;
176 			continue;
177 		}
178 		if (cnt > n)
179 			cnt = n;
180 
181 		switch (uio->uio_segflg) {
182 
183 		case UIO_USERSPACE:
184 			if (ticks - baseticks >= hogticks) {
185 				uio_yield();
186 				baseticks = ticks;
187 			}
188 			if (uio->uio_rw == UIO_READ) {
189 #ifdef ENABLE_VFS_IOOPT
190 				if (vfs_ioopt && ((cnt & PAGE_MASK) == 0) &&
191 					((((intptr_t) iov->iov_base) & PAGE_MASK) == 0) &&
192 					((uio->uio_offset & PAGE_MASK) == 0) &&
193 					((((intptr_t) cp) & PAGE_MASK) == 0)) {
194 						error = vm_uiomove(&curproc->p_vmspace->vm_map, obj,
195 								uio->uio_offset, cnt,
196 								(vm_offset_t) iov->iov_base, NULL);
197 				} else
198 #endif
199 				{
200 					error = copyout(cp, iov->iov_base, cnt);
201 				}
202 			} else {
203 				error = copyin(iov->iov_base, cp, cnt);
204 			}
205 			if (error)
206 				return (error);
207 			break;
208 
209 		case UIO_SYSSPACE:
210 			if (uio->uio_rw == UIO_READ)
211 				bcopy((caddr_t)cp, iov->iov_base, cnt);
212 			else
213 				bcopy(iov->iov_base, (caddr_t)cp, cnt);
214 			break;
215 		case UIO_NOCOPY:
216 			break;
217 		}
218 		iov->iov_base += cnt;
219 		iov->iov_len -= cnt;
220 		uio->uio_resid -= cnt;
221 		uio->uio_offset += cnt;
222 		cp += cnt;
223 		n -= cnt;
224 	}
225 	return (0);
226 }
227 
228 #ifdef ENABLE_VFS_IOOPT
229 
230 int
231 uioread(n, uio, obj, nread)
232 	int n;
233 	struct uio *uio;
234 	struct vm_object *obj;
235 	int *nread;
236 {
237 	int npagesmoved;
238 	struct iovec *iov;
239 	u_int cnt, tcnt;
240 	int error;
241 	int baseticks = ticks;
242 
243 	*nread = 0;
244 	if (vfs_ioopt < 2)
245 		return 0;
246 
247 	error = 0;
248 
249 	while (n > 0 && uio->uio_resid) {
250 		iov = uio->uio_iov;
251 		cnt = iov->iov_len;
252 		if (cnt == 0) {
253 			uio->uio_iov++;
254 			uio->uio_iovcnt--;
255 			continue;
256 		}
257 		if (cnt > n)
258 			cnt = n;
259 
260 		if ((uio->uio_segflg == UIO_USERSPACE) &&
261 			((((intptr_t) iov->iov_base) & PAGE_MASK) == 0) &&
262 				 ((uio->uio_offset & PAGE_MASK) == 0) ) {
263 
264 			if (cnt < PAGE_SIZE)
265 				break;
266 
267 			cnt &= ~PAGE_MASK;
268 
269 			if (ticks - baseticks >= hogticks) {
270 				uio_yield();
271 				baseticks = ticks;
272 			}
273 			error = vm_uiomove(&curproc->p_vmspace->vm_map, obj,
274 						uio->uio_offset, cnt,
275 						(vm_offset_t) iov->iov_base, &npagesmoved);
276 
277 			if (npagesmoved == 0)
278 				break;
279 
280 			tcnt = npagesmoved * PAGE_SIZE;
281 			cnt = tcnt;
282 
283 			if (error)
284 				break;
285 
286 			iov->iov_base += cnt;
287 			iov->iov_len -= cnt;
288 			uio->uio_resid -= cnt;
289 			uio->uio_offset += cnt;
290 			*nread += cnt;
291 			n -= cnt;
292 		} else {
293 			break;
294 		}
295 	}
296 	return error;
297 }
298 
299 #endif
300 
301 /*
302  * Give next character to user as result of read.
303  */
304 int
305 ureadc(c, uio)
306 	int c;
307 	struct uio *uio;
308 {
309 	struct iovec *iov;
310 
311 again:
312 	if (uio->uio_iovcnt == 0 || uio->uio_resid == 0)
313 		panic("ureadc");
314 	iov = uio->uio_iov;
315 	if (iov->iov_len == 0) {
316 		uio->uio_iovcnt--;
317 		uio->uio_iov++;
318 		goto again;
319 	}
320 	switch (uio->uio_segflg) {
321 
322 	case UIO_USERSPACE:
323 		if (subyte(iov->iov_base, c) < 0)
324 			return (EFAULT);
325 		break;
326 
327 	case UIO_SYSSPACE:
328 		*iov->iov_base = c;
329 		break;
330 
331 	case UIO_NOCOPY:
332 		break;
333 	}
334 	iov->iov_base++;
335 	iov->iov_len--;
336 	uio->uio_resid--;
337 	uio->uio_offset++;
338 	return (0);
339 }
340 
341 /*
342  * General routine to allocate a hash table.  Make the hash table size a
343  * power of 2 greater or equal to the number of elements requested, and
344  * store the masking value in *hashmask.
345  */
346 void *
347 hashinit(elements, type, hashmask)
348 	int elements;
349 	struct malloc_type *type;
350 	u_long *hashmask;
351 {
352 	long hashsize;
353 	LIST_HEAD(generic, generic) *hashtbl;
354 	int i;
355 
356 	if (elements <= 0)
357 		panic("hashinit: bad elements");
358 	for (hashsize = 2; hashsize < elements; hashsize <<= 1)
359 		continue;
360 	hashtbl = malloc((u_long)hashsize * sizeof(*hashtbl), type, M_WAITOK);
361 	for (i = 0; i < hashsize; i++)
362 		LIST_INIT(&hashtbl[i]);
363 	*hashmask = hashsize - 1;
364 	return (hashtbl);
365 }
366 
367 static int primes[] = { 1, 13, 31, 61, 127, 251, 509, 761, 1021, 1531, 2039,
368 			2557, 3067, 3583, 4093, 4603, 5119, 5623, 6143, 6653,
369 			7159, 7673, 8191, 12281, 16381, 24571, 32749 };
370 #define NPRIMES (sizeof(primes) / sizeof(primes[0]))
371 
372 /*
373  * General routine to allocate a prime number sized hash table.
374  */
375 void *
376 phashinit(elements, type, nentries)
377 	int elements;
378 	struct malloc_type *type;
379 	u_long *nentries;
380 {
381 	long hashsize;
382 	LIST_HEAD(generic, generic) *hashtbl;
383 	int i;
384 
385 	if (elements <= 0)
386 		panic("phashinit: bad elements");
387 	for (i = 1, hashsize = primes[1]; hashsize <= elements;) {
388 		i++;
389 		if (i == NPRIMES)
390 			break;
391 		hashsize = primes[i];
392 	}
393 	hashsize = primes[i - 1];
394 	hashtbl = malloc((u_long)hashsize * sizeof(*hashtbl), type, M_WAITOK);
395 	for (i = 0; i < hashsize; i++)
396 		LIST_INIT(&hashtbl[i]);
397 	*nentries = hashsize;
398 	return (hashtbl);
399 }
400 
401 /*
402  * Copyin an iovec.  If the iovec array fits, use the preallocated small
403  * iovec structure.  If it is too big, dynamically allocate an iovec array
404  * of sufficient size.
405  */
406 int
407 iovec_copyin(struct iovec *uiov, struct iovec **kiov, struct iovec *siov,
408     size_t iov_cnt, size_t *iov_len)
409 {
410 	struct iovec *iovp;
411 	int error, i;
412 
413 	if (iov_cnt >= UIO_MAXIOV)
414 		return EMSGSIZE;
415 	if (iov_cnt >= UIO_SMALLIOV) {
416 		MALLOC(*kiov, struct iovec *, sizeof(struct iovec) * iov_cnt,
417 		    M_IOV, M_WAITOK);
418 	} else {
419 		*kiov = siov;
420 	}
421 	error = copyin(uiov, *kiov, iov_cnt * sizeof(struct iovec));
422 	if (error)
423 		goto cleanup;
424 	*iov_len = 0;
425 	for (i = 0, iovp = *kiov; i < iov_cnt; i++, iovp++)
426 		*iov_len += iovp->iov_len;
427 
428 cleanup:
429 	if (error)
430 		iovec_free(kiov, siov);
431 	return (error);
432 }
433