xref: /dragonfly/sys/kern/kern_subr.c (revision 1de703da)
1 /*
2  * Copyright (c) 1982, 1986, 1991, 1993
3  *	The Regents of the University of California.  All rights reserved.
4  * (c) UNIX System Laboratories, Inc.
5  * All or some portions of this file are derived from material licensed
6  * to the University of California by American Telephone and Telegraph
7  * Co. or Unix System Laboratories, Inc. and are reproduced herein with
8  * the permission of UNIX System Laboratories, Inc.
9  *
10  * Redistribution and use in source and binary forms, with or without
11  * modification, are permitted provided that the following conditions
12  * are met:
13  * 1. Redistributions of source code must retain the above copyright
14  *    notice, this list of conditions and the following disclaimer.
15  * 2. Redistributions in binary form must reproduce the above copyright
16  *    notice, this list of conditions and the following disclaimer in the
17  *    documentation and/or other materials provided with the distribution.
18  * 3. All advertising materials mentioning features or use of this software
19  *    must display the following acknowledgement:
20  *	This product includes software developed by the University of
21  *	California, Berkeley and its contributors.
22  * 4. Neither the name of the University nor the names of its contributors
23  *    may be used to endorse or promote products derived from this software
24  *    without specific prior written permission.
25  *
26  * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND
27  * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
28  * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
29  * ARE DISCLAIMED.  IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE
30  * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
31  * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
32  * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
33  * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
34  * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
35  * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
36  * SUCH DAMAGE.
37  *
38  *	@(#)kern_subr.c	8.3 (Berkeley) 1/21/94
39  * $FreeBSD: src/sys/kern/kern_subr.c,v 1.31.2.2 2002/04/21 08:09:37 bde Exp $
40  * $DragonFly: src/sys/kern/kern_subr.c,v 1.2 2003/06/17 04:28:41 dillon Exp $
41  */
42 
43 #include <sys/param.h>
44 #include <sys/systm.h>
45 #include <sys/kernel.h>
46 #include <sys/proc.h>
47 #include <sys/malloc.h>
48 #include <sys/lock.h>
49 #include <sys/resourcevar.h>
50 #include <sys/vnode.h>
51 
52 #include <vm/vm.h>
53 #include <vm/vm_page.h>
54 #include <vm/vm_map.h>
55 
56 int
57 uiomove(cp, n, uio)
58 	register caddr_t cp;
59 	register int n;
60 	register struct uio *uio;
61 {
62 	register struct iovec *iov;
63 	u_int cnt;
64 	int error = 0;
65 	int save = 0;
66 
67 	KASSERT(uio->uio_rw == UIO_READ || uio->uio_rw == UIO_WRITE,
68 	    ("uiomove: mode"));
69 	KASSERT(uio->uio_segflg != UIO_USERSPACE || uio->uio_procp == curproc,
70 	    ("uiomove proc"));
71 
72 	if (curproc) {
73 		save = curproc->p_flag & P_DEADLKTREAT;
74 		curproc->p_flag |= P_DEADLKTREAT;
75 	}
76 
77 	while (n > 0 && uio->uio_resid) {
78 		iov = uio->uio_iov;
79 		cnt = iov->iov_len;
80 		if (cnt == 0) {
81 			uio->uio_iov++;
82 			uio->uio_iovcnt--;
83 			continue;
84 		}
85 		if (cnt > n)
86 			cnt = n;
87 
88 		switch (uio->uio_segflg) {
89 
90 		case UIO_USERSPACE:
91 		case UIO_USERISPACE:
92 			if (ticks - switchticks >= hogticks)
93 				uio_yield();
94 			if (uio->uio_rw == UIO_READ)
95 				error = copyout(cp, iov->iov_base, cnt);
96 			else
97 				error = copyin(iov->iov_base, cp, cnt);
98 			if (error)
99 				break;
100 			break;
101 
102 		case UIO_SYSSPACE:
103 			if (uio->uio_rw == UIO_READ)
104 				bcopy((caddr_t)cp, iov->iov_base, cnt);
105 			else
106 				bcopy(iov->iov_base, (caddr_t)cp, cnt);
107 			break;
108 		case UIO_NOCOPY:
109 			break;
110 		}
111 		iov->iov_base += cnt;
112 		iov->iov_len -= cnt;
113 		uio->uio_resid -= cnt;
114 		uio->uio_offset += cnt;
115 		cp += cnt;
116 		n -= cnt;
117 	}
118 	if (curproc)
119 		curproc->p_flag = (curproc->p_flag & ~P_DEADLKTREAT) | save;
120 	return (error);
121 }
122 
123 int
124 uiomoveco(cp, n, uio, obj)
125 	caddr_t cp;
126 	int n;
127 	struct uio *uio;
128 	struct vm_object *obj;
129 {
130 	struct iovec *iov;
131 	u_int cnt;
132 	int error;
133 
134 	KASSERT(uio->uio_rw == UIO_READ || uio->uio_rw == UIO_WRITE,
135 	    ("uiomoveco: mode"));
136 	KASSERT(uio->uio_segflg != UIO_USERSPACE || uio->uio_procp == curproc,
137 	    ("uiomoveco proc"));
138 
139 	while (n > 0 && uio->uio_resid) {
140 		iov = uio->uio_iov;
141 		cnt = iov->iov_len;
142 		if (cnt == 0) {
143 			uio->uio_iov++;
144 			uio->uio_iovcnt--;
145 			continue;
146 		}
147 		if (cnt > n)
148 			cnt = n;
149 
150 		switch (uio->uio_segflg) {
151 
152 		case UIO_USERSPACE:
153 		case UIO_USERISPACE:
154 			if (ticks - switchticks >= hogticks)
155 				uio_yield();
156 			if (uio->uio_rw == UIO_READ) {
157 #ifdef ENABLE_VFS_IOOPT
158 				if (vfs_ioopt && ((cnt & PAGE_MASK) == 0) &&
159 					((((intptr_t) iov->iov_base) & PAGE_MASK) == 0) &&
160 					((uio->uio_offset & PAGE_MASK) == 0) &&
161 					((((intptr_t) cp) & PAGE_MASK) == 0)) {
162 						error = vm_uiomove(&curproc->p_vmspace->vm_map, obj,
163 								uio->uio_offset, cnt,
164 								(vm_offset_t) iov->iov_base, NULL);
165 				} else
166 #endif
167 				{
168 					error = copyout(cp, iov->iov_base, cnt);
169 				}
170 			} else {
171 				error = copyin(iov->iov_base, cp, cnt);
172 			}
173 			if (error)
174 				return (error);
175 			break;
176 
177 		case UIO_SYSSPACE:
178 			if (uio->uio_rw == UIO_READ)
179 				bcopy((caddr_t)cp, iov->iov_base, cnt);
180 			else
181 				bcopy(iov->iov_base, (caddr_t)cp, cnt);
182 			break;
183 		case UIO_NOCOPY:
184 			break;
185 		}
186 		iov->iov_base += cnt;
187 		iov->iov_len -= cnt;
188 		uio->uio_resid -= cnt;
189 		uio->uio_offset += cnt;
190 		cp += cnt;
191 		n -= cnt;
192 	}
193 	return (0);
194 }
195 
196 #ifdef ENABLE_VFS_IOOPT
197 
198 int
199 uioread(n, uio, obj, nread)
200 	int n;
201 	struct uio *uio;
202 	struct vm_object *obj;
203 	int *nread;
204 {
205 	int npagesmoved;
206 	struct iovec *iov;
207 	u_int cnt, tcnt;
208 	int error;
209 
210 	*nread = 0;
211 	if (vfs_ioopt < 2)
212 		return 0;
213 
214 	error = 0;
215 
216 	while (n > 0 && uio->uio_resid) {
217 		iov = uio->uio_iov;
218 		cnt = iov->iov_len;
219 		if (cnt == 0) {
220 			uio->uio_iov++;
221 			uio->uio_iovcnt--;
222 			continue;
223 		}
224 		if (cnt > n)
225 			cnt = n;
226 
227 		if ((uio->uio_segflg == UIO_USERSPACE) &&
228 			((((intptr_t) iov->iov_base) & PAGE_MASK) == 0) &&
229 				 ((uio->uio_offset & PAGE_MASK) == 0) ) {
230 
231 			if (cnt < PAGE_SIZE)
232 				break;
233 
234 			cnt &= ~PAGE_MASK;
235 
236 			if (ticks - switchticks >= hogticks)
237 				uio_yield();
238 			error = vm_uiomove(&curproc->p_vmspace->vm_map, obj,
239 						uio->uio_offset, cnt,
240 						(vm_offset_t) iov->iov_base, &npagesmoved);
241 
242 			if (npagesmoved == 0)
243 				break;
244 
245 			tcnt = npagesmoved * PAGE_SIZE;
246 			cnt = tcnt;
247 
248 			if (error)
249 				break;
250 
251 			iov->iov_base += cnt;
252 			iov->iov_len -= cnt;
253 			uio->uio_resid -= cnt;
254 			uio->uio_offset += cnt;
255 			*nread += cnt;
256 			n -= cnt;
257 		} else {
258 			break;
259 		}
260 	}
261 	return error;
262 }
263 
264 #endif
265 
266 /*
267  * Give next character to user as result of read.
268  */
269 int
270 ureadc(c, uio)
271 	register int c;
272 	register struct uio *uio;
273 {
274 	register struct iovec *iov;
275 
276 again:
277 	if (uio->uio_iovcnt == 0 || uio->uio_resid == 0)
278 		panic("ureadc");
279 	iov = uio->uio_iov;
280 	if (iov->iov_len == 0) {
281 		uio->uio_iovcnt--;
282 		uio->uio_iov++;
283 		goto again;
284 	}
285 	switch (uio->uio_segflg) {
286 
287 	case UIO_USERSPACE:
288 		if (subyte(iov->iov_base, c) < 0)
289 			return (EFAULT);
290 		break;
291 
292 	case UIO_SYSSPACE:
293 		*iov->iov_base = c;
294 		break;
295 
296 	case UIO_USERISPACE:
297 		if (suibyte(iov->iov_base, c) < 0)
298 			return (EFAULT);
299 		break;
300 	case UIO_NOCOPY:
301 		break;
302 	}
303 	iov->iov_base++;
304 	iov->iov_len--;
305 	uio->uio_resid--;
306 	uio->uio_offset++;
307 	return (0);
308 }
309 
310 #ifdef vax	/* unused except by ct.c, other oddities XXX */
311 /*
312  * Get next character written in by user from uio.
313  */
314 int
315 uwritec(uio)
316 	struct uio *uio;
317 {
318 	register struct iovec *iov;
319 	register int c;
320 
321 	if (uio->uio_resid <= 0)
322 		return (-1);
323 again:
324 	if (uio->uio_iovcnt <= 0)
325 		panic("uwritec");
326 	iov = uio->uio_iov;
327 	if (iov->iov_len == 0) {
328 		uio->uio_iov++;
329 		if (--uio->uio_iovcnt == 0)
330 			return (-1);
331 		goto again;
332 	}
333 	switch (uio->uio_segflg) {
334 
335 	case UIO_USERSPACE:
336 		c = fubyte(iov->iov_base);
337 		break;
338 
339 	case UIO_SYSSPACE:
340 		c = *(u_char *) iov->iov_base;
341 		break;
342 
343 	case UIO_USERISPACE:
344 		c = fuibyte(iov->iov_base);
345 		break;
346 	}
347 	if (c < 0)
348 		return (-1);
349 	iov->iov_base++;
350 	iov->iov_len--;
351 	uio->uio_resid--;
352 	uio->uio_offset++;
353 	return (c);
354 }
355 #endif /* vax */
356 
357 /*
358  * General routine to allocate a hash table.
359  */
360 void *
361 hashinit(elements, type, hashmask)
362 	int elements;
363 	struct malloc_type *type;
364 	u_long *hashmask;
365 {
366 	long hashsize;
367 	LIST_HEAD(generic, generic) *hashtbl;
368 	int i;
369 
370 	if (elements <= 0)
371 		panic("hashinit: bad elements");
372 	for (hashsize = 1; hashsize <= elements; hashsize <<= 1)
373 		continue;
374 	hashsize >>= 1;
375 	hashtbl = malloc((u_long)hashsize * sizeof(*hashtbl), type, M_WAITOK);
376 	for (i = 0; i < hashsize; i++)
377 		LIST_INIT(&hashtbl[i]);
378 	*hashmask = hashsize - 1;
379 	return (hashtbl);
380 }
381 
382 static int primes[] = { 1, 13, 31, 61, 127, 251, 509, 761, 1021, 1531, 2039,
383 			2557, 3067, 3583, 4093, 4603, 5119, 5623, 6143, 6653,
384 			7159, 7673, 8191, 12281, 16381, 24571, 32749 };
385 #define NPRIMES (sizeof(primes) / sizeof(primes[0]))
386 
387 /*
388  * General routine to allocate a prime number sized hash table.
389  */
390 void *
391 phashinit(elements, type, nentries)
392 	int elements;
393 	struct malloc_type *type;
394 	u_long *nentries;
395 {
396 	long hashsize;
397 	LIST_HEAD(generic, generic) *hashtbl;
398 	int i;
399 
400 	if (elements <= 0)
401 		panic("phashinit: bad elements");
402 	for (i = 1, hashsize = primes[1]; hashsize <= elements;) {
403 		i++;
404 		if (i == NPRIMES)
405 			break;
406 		hashsize = primes[i];
407 	}
408 	hashsize = primes[i - 1];
409 	hashtbl = malloc((u_long)hashsize * sizeof(*hashtbl), type, M_WAITOK);
410 	for (i = 0; i < hashsize; i++)
411 		LIST_INIT(&hashtbl[i]);
412 	*nentries = hashsize;
413 	return (hashtbl);
414 }
415 
416 void
417 uio_yield()
418 {
419 	struct proc *p;
420 	int s;
421 
422 	p = curproc;
423 	s = splhigh();
424 	p->p_priority = p->p_usrpri;
425 	setrunqueue(p);
426 	p->p_stats->p_ru.ru_nivcsw++;
427 	mi_switch();
428 	splx(s);
429 }
430