xref: /dragonfly/sys/kern/kern_subr.c (revision 38a690d7)
1 /*
2  * Copyright (c) 1982, 1986, 1991, 1993
3  *	The Regents of the University of California.  All rights reserved.
4  * (c) UNIX System Laboratories, Inc.
5  * All or some portions of this file are derived from material licensed
6  * to the University of California by American Telephone and Telegraph
7  * Co. or Unix System Laboratories, Inc. and are reproduced herein with
8  * the permission of UNIX System Laboratories, Inc.
9  *
10  * Redistribution and use in source and binary forms, with or without
11  * modification, are permitted provided that the following conditions
12  * are met:
13  * 1. Redistributions of source code must retain the above copyright
14  *    notice, this list of conditions and the following disclaimer.
15  * 2. Redistributions in binary form must reproduce the above copyright
16  *    notice, this list of conditions and the following disclaimer in the
17  *    documentation and/or other materials provided with the distribution.
18  * 3. All advertising materials mentioning features or use of this software
19  *    must display the following acknowledgement:
20  *	This product includes software developed by the University of
21  *	California, Berkeley and its contributors.
22  * 4. Neither the name of the University nor the names of its contributors
23  *    may be used to endorse or promote products derived from this software
24  *    without specific prior written permission.
25  *
26  * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND
27  * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
28  * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
29  * ARE DISCLAIMED.  IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE
30  * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
31  * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
32  * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
33  * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
34  * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
35  * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
36  * SUCH DAMAGE.
37  *
38  *	@(#)kern_subr.c	8.3 (Berkeley) 1/21/94
39  * $FreeBSD: src/sys/kern/kern_subr.c,v 1.31.2.2 2002/04/21 08:09:37 bde Exp $
40  * $DragonFly: src/sys/kern/kern_subr.c,v 1.10 2003/08/03 12:29:05 hmp Exp $
41  */
42 
43 #include "opt_ddb.h"
44 
45 #include <sys/param.h>
46 #include <sys/systm.h>
47 #include <sys/kernel.h>
48 #include <sys/proc.h>
49 #include <sys/malloc.h>
50 #include <sys/lock.h>
51 #include <sys/resourcevar.h>
52 #include <sys/vnode.h>
53 
54 #include <vm/vm.h>
55 #include <vm/vm_page.h>
56 #include <vm/vm_map.h>
57 
58 int
59 uiomove(cp, n, uio)
60 	caddr_t cp;
61 	int n;
62 	struct uio *uio;
63 {
64 	struct iovec *iov;
65 	u_int cnt;
66 	int error = 0;
67 	int save = 0;
68 	int baseticks = ticks;
69 
70 	KASSERT(uio->uio_rw == UIO_READ || uio->uio_rw == UIO_WRITE,
71 	    ("uiomove: mode"));
72 	KASSERT(uio->uio_segflg != UIO_USERSPACE || uio->uio_td == curthread,
73 	    ("uiomove proc"));
74 
75 	if (curproc) {
76 		save = curproc->p_flag & P_DEADLKTREAT;
77 		curproc->p_flag |= P_DEADLKTREAT;
78 	}
79 
80 	while (n > 0 && uio->uio_resid) {
81 		iov = uio->uio_iov;
82 		cnt = iov->iov_len;
83 		if (cnt == 0) {
84 			uio->uio_iov++;
85 			uio->uio_iovcnt--;
86 			continue;
87 		}
88 		if (cnt > n)
89 			cnt = n;
90 
91 		switch (uio->uio_segflg) {
92 
93 		case UIO_USERSPACE:
94 		case UIO_USERISPACE:
95 			if (ticks - baseticks >= hogticks) {
96 				uio_yield();
97 				baseticks = ticks;
98 			}
99 			if (uio->uio_rw == UIO_READ)
100 				error = copyout(cp, iov->iov_base, cnt);
101 			else
102 				error = copyin(iov->iov_base, cp, cnt);
103 			if (error)
104 				break;
105 			break;
106 
107 		case UIO_SYSSPACE:
108 			if (uio->uio_rw == UIO_READ)
109 				bcopy((caddr_t)cp, iov->iov_base, cnt);
110 			else
111 				bcopy(iov->iov_base, (caddr_t)cp, cnt);
112 			break;
113 		case UIO_NOCOPY:
114 			break;
115 		}
116 		iov->iov_base += cnt;
117 		iov->iov_len -= cnt;
118 		uio->uio_resid -= cnt;
119 		uio->uio_offset += cnt;
120 		cp += cnt;
121 		n -= cnt;
122 	}
123 	if (curproc)
124 		curproc->p_flag = (curproc->p_flag & ~P_DEADLKTREAT) | save;
125 	return (error);
126 }
127 
128 int
129 uiomoveco(cp, n, uio, obj)
130 	caddr_t cp;
131 	int n;
132 	struct uio *uio;
133 	struct vm_object *obj;
134 {
135 	struct iovec *iov;
136 	u_int cnt;
137 	int error;
138 	int baseticks = ticks;
139 
140 	KASSERT(uio->uio_rw == UIO_READ || uio->uio_rw == UIO_WRITE,
141 	    ("uiomoveco: mode"));
142 	KASSERT(uio->uio_segflg != UIO_USERSPACE || uio->uio_td == curthread,
143 	    ("uiomoveco proc"));
144 
145 	while (n > 0 && uio->uio_resid) {
146 		iov = uio->uio_iov;
147 		cnt = iov->iov_len;
148 		if (cnt == 0) {
149 			uio->uio_iov++;
150 			uio->uio_iovcnt--;
151 			continue;
152 		}
153 		if (cnt > n)
154 			cnt = n;
155 
156 		switch (uio->uio_segflg) {
157 
158 		case UIO_USERSPACE:
159 		case UIO_USERISPACE:
160 			if (ticks - baseticks >= hogticks) {
161 				uio_yield();
162 				baseticks = ticks;
163 			}
164 			if (uio->uio_rw == UIO_READ) {
165 #ifdef ENABLE_VFS_IOOPT
166 				if (vfs_ioopt && ((cnt & PAGE_MASK) == 0) &&
167 					((((intptr_t) iov->iov_base) & PAGE_MASK) == 0) &&
168 					((uio->uio_offset & PAGE_MASK) == 0) &&
169 					((((intptr_t) cp) & PAGE_MASK) == 0)) {
170 						error = vm_uiomove(&curproc->p_vmspace->vm_map, obj,
171 								uio->uio_offset, cnt,
172 								(vm_offset_t) iov->iov_base, NULL);
173 				} else
174 #endif
175 				{
176 					error = copyout(cp, iov->iov_base, cnt);
177 				}
178 			} else {
179 				error = copyin(iov->iov_base, cp, cnt);
180 			}
181 			if (error)
182 				return (error);
183 			break;
184 
185 		case UIO_SYSSPACE:
186 			if (uio->uio_rw == UIO_READ)
187 				bcopy((caddr_t)cp, iov->iov_base, cnt);
188 			else
189 				bcopy(iov->iov_base, (caddr_t)cp, cnt);
190 			break;
191 		case UIO_NOCOPY:
192 			break;
193 		}
194 		iov->iov_base += cnt;
195 		iov->iov_len -= cnt;
196 		uio->uio_resid -= cnt;
197 		uio->uio_offset += cnt;
198 		cp += cnt;
199 		n -= cnt;
200 	}
201 	return (0);
202 }
203 
204 #ifdef ENABLE_VFS_IOOPT
205 
206 int
207 uioread(n, uio, obj, nread)
208 	int n;
209 	struct uio *uio;
210 	struct vm_object *obj;
211 	int *nread;
212 {
213 	int npagesmoved;
214 	struct iovec *iov;
215 	u_int cnt, tcnt;
216 	int error;
217 	int baseticks = ticks;
218 
219 	*nread = 0;
220 	if (vfs_ioopt < 2)
221 		return 0;
222 
223 	error = 0;
224 
225 	while (n > 0 && uio->uio_resid) {
226 		iov = uio->uio_iov;
227 		cnt = iov->iov_len;
228 		if (cnt == 0) {
229 			uio->uio_iov++;
230 			uio->uio_iovcnt--;
231 			continue;
232 		}
233 		if (cnt > n)
234 			cnt = n;
235 
236 		if ((uio->uio_segflg == UIO_USERSPACE) &&
237 			((((intptr_t) iov->iov_base) & PAGE_MASK) == 0) &&
238 				 ((uio->uio_offset & PAGE_MASK) == 0) ) {
239 
240 			if (cnt < PAGE_SIZE)
241 				break;
242 
243 			cnt &= ~PAGE_MASK;
244 
245 			if (ticks - baseticks >= hogticks) {
246 				uio_yield();
247 				baseticks = ticks;
248 			}
249 			error = vm_uiomove(&curproc->p_vmspace->vm_map, obj,
250 						uio->uio_offset, cnt,
251 						(vm_offset_t) iov->iov_base, &npagesmoved);
252 
253 			if (npagesmoved == 0)
254 				break;
255 
256 			tcnt = npagesmoved * PAGE_SIZE;
257 			cnt = tcnt;
258 
259 			if (error)
260 				break;
261 
262 			iov->iov_base += cnt;
263 			iov->iov_len -= cnt;
264 			uio->uio_resid -= cnt;
265 			uio->uio_offset += cnt;
266 			*nread += cnt;
267 			n -= cnt;
268 		} else {
269 			break;
270 		}
271 	}
272 	return error;
273 }
274 
275 #endif
276 
277 /*
278  * Give next character to user as result of read.
279  */
280 int
281 ureadc(c, uio)
282 	int c;
283 	struct uio *uio;
284 {
285 	struct iovec *iov;
286 
287 again:
288 	if (uio->uio_iovcnt == 0 || uio->uio_resid == 0)
289 		panic("ureadc");
290 	iov = uio->uio_iov;
291 	if (iov->iov_len == 0) {
292 		uio->uio_iovcnt--;
293 		uio->uio_iov++;
294 		goto again;
295 	}
296 	switch (uio->uio_segflg) {
297 
298 	case UIO_USERSPACE:
299 		if (subyte(iov->iov_base, c) < 0)
300 			return (EFAULT);
301 		break;
302 
303 	case UIO_SYSSPACE:
304 		*iov->iov_base = c;
305 		break;
306 
307 	case UIO_USERISPACE:
308 		if (suibyte(iov->iov_base, c) < 0)
309 			return (EFAULT);
310 		break;
311 	case UIO_NOCOPY:
312 		break;
313 	}
314 	iov->iov_base++;
315 	iov->iov_len--;
316 	uio->uio_resid--;
317 	uio->uio_offset++;
318 	return (0);
319 }
320 
321 #ifdef vax	/* unused except by ct.c, other oddities XXX */
322 /*
323  * Get next character written in by user from uio.
324  */
325 int
326 uwritec(uio)
327 	struct uio *uio;
328 {
329 	struct iovec *iov;
330 	int c;
331 
332 	if (uio->uio_resid <= 0)
333 		return (-1);
334 again:
335 	if (uio->uio_iovcnt <= 0)
336 		panic("uwritec");
337 	iov = uio->uio_iov;
338 	if (iov->iov_len == 0) {
339 		uio->uio_iov++;
340 		if (--uio->uio_iovcnt == 0)
341 			return (-1);
342 		goto again;
343 	}
344 	switch (uio->uio_segflg) {
345 
346 	case UIO_USERSPACE:
347 		c = fubyte(iov->iov_base);
348 		break;
349 
350 	case UIO_SYSSPACE:
351 		c = *(u_char *) iov->iov_base;
352 		break;
353 
354 	case UIO_USERISPACE:
355 		c = fuibyte(iov->iov_base);
356 		break;
357 	}
358 	if (c < 0)
359 		return (-1);
360 	iov->iov_base++;
361 	iov->iov_len--;
362 	uio->uio_resid--;
363 	uio->uio_offset++;
364 	return (c);
365 }
366 #endif /* vax */
367 
368 /*
369  * General routine to allocate a hash table.
370  */
371 void *
372 hashinit(elements, type, hashmask)
373 	int elements;
374 	struct malloc_type *type;
375 	u_long *hashmask;
376 {
377 	long hashsize;
378 	LIST_HEAD(generic, generic) *hashtbl;
379 	int i;
380 
381 	if (elements <= 0)
382 		panic("hashinit: bad elements");
383 	for (hashsize = 1; hashsize <= elements; hashsize <<= 1)
384 		continue;
385 	hashsize >>= 1;
386 	hashtbl = malloc((u_long)hashsize * sizeof(*hashtbl), type, M_WAITOK);
387 	for (i = 0; i < hashsize; i++)
388 		LIST_INIT(&hashtbl[i]);
389 	*hashmask = hashsize - 1;
390 	return (hashtbl);
391 }
392 
393 static int primes[] = { 1, 13, 31, 61, 127, 251, 509, 761, 1021, 1531, 2039,
394 			2557, 3067, 3583, 4093, 4603, 5119, 5623, 6143, 6653,
395 			7159, 7673, 8191, 12281, 16381, 24571, 32749 };
396 #define NPRIMES (sizeof(primes) / sizeof(primes[0]))
397 
398 /*
399  * General routine to allocate a prime number sized hash table.
400  */
401 void *
402 phashinit(elements, type, nentries)
403 	int elements;
404 	struct malloc_type *type;
405 	u_long *nentries;
406 {
407 	long hashsize;
408 	LIST_HEAD(generic, generic) *hashtbl;
409 	int i;
410 
411 	if (elements <= 0)
412 		panic("phashinit: bad elements");
413 	for (i = 1, hashsize = primes[1]; hashsize <= elements;) {
414 		i++;
415 		if (i == NPRIMES)
416 			break;
417 		hashsize = primes[i];
418 	}
419 	hashsize = primes[i - 1];
420 	hashtbl = malloc((u_long)hashsize * sizeof(*hashtbl), type, M_WAITOK);
421 	for (i = 0; i < hashsize; i++)
422 		LIST_INIT(&hashtbl[i]);
423 	*nentries = hashsize;
424 	return (hashtbl);
425 }
426