xref: /openbsd/sys/kern/kern_subr.c (revision d62ebcb2)
1 /*	$OpenBSD: kern_subr.c,v 1.52 2023/01/31 15:18:56 deraadt Exp $	*/
2 /*	$NetBSD: kern_subr.c,v 1.15 1996/04/09 17:21:56 ragge Exp $	*/
3 
4 /*
5  * Copyright (c) 1982, 1986, 1991, 1993
6  *	The Regents of the University of California.  All rights reserved.
7  * (c) UNIX System Laboratories, Inc.
8  * All or some portions of this file are derived from material licensed
9  * to the University of California by American Telephone and Telegraph
10  * Co. or Unix System Laboratories, Inc. and are reproduced herein with
11  * the permission of UNIX System Laboratories, Inc.
12  *
13  * Redistribution and use in source and binary forms, with or without
14  * modification, are permitted provided that the following conditions
15  * are met:
16  * 1. Redistributions of source code must retain the above copyright
17  *    notice, this list of conditions and the following disclaimer.
18  * 2. Redistributions in binary form must reproduce the above copyright
19  *    notice, this list of conditions and the following disclaimer in the
20  *    documentation and/or other materials provided with the distribution.
21  * 3. Neither the name of the University nor the names of its contributors
22  *    may be used to endorse or promote products derived from this software
23  *    without specific prior written permission.
24  *
25  * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND
26  * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
27  * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
28  * ARE DISCLAIMED.  IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE
29  * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
30  * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
31  * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
32  * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
33  * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
34  * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
35  * SUCH DAMAGE.
36  *
37  *	@(#)kern_subr.c	8.3 (Berkeley) 1/21/94
38  */
39 
40 #include <sys/param.h>
41 #include <sys/systm.h>
42 #include <sys/proc.h>
43 #include <sys/sched.h>
44 #include <sys/malloc.h>
45 #include <sys/queue.h>
46 #include <uvm/uvm_extern.h>
47 
48 #ifdef PMAP_CHECK_COPYIN
49 
50 static inline int check_copyin(struct proc *, const void *, size_t);
51 extern int _copyinstr(const void *, void *, size_t, size_t *);
52 extern int _copyin(const void *uaddr, void *kaddr, size_t len);
53 
54 /*
55  * If range overlaps an check_copyin region, return EFAULT
56  */
57 static inline int
check_copyin(struct proc * p,const void * vstart,size_t len)58 check_copyin(struct proc *p, const void *vstart, size_t len)
59 {
60 	struct vm_map *map = &p->p_vmspace->vm_map;
61 	const vaddr_t start = (vaddr_t)vstart;
62 	const vaddr_t end = start + len;
63 	int i, max;
64 
65 	/* XXX if the array was sorted, we could shortcut */
66 	max = map->check_copyin_count;
67 	membar_consumer();
68 	for (i = 0; i < max; i++) {
69 		vaddr_t s = map->check_copyin[i].start;
70 		vaddr_t e = map->check_copyin[i].end;
71 		if ((start >= s && start < e) || (end > s && end < e))
72 			return EFAULT;
73 	}
74 	return (0);
75 }
76 
77 int
copyinstr(const void * uaddr,void * kaddr,size_t len,size_t * done)78 copyinstr(const void *uaddr, void *kaddr, size_t len, size_t *done)
79 {
80 	size_t alen;
81 	int error;
82 
83 	/*
84 	 * Must do the copyin checks after figuring out the string length,
85 	 * the buffer size length may cross into another ELF segment
86 	 */
87 	error = _copyinstr(uaddr, kaddr, len, &alen);
88 	if (PMAP_CHECK_COPYIN && error == 0)
89 		error = check_copyin(curproc, uaddr, alen);
90 	if (done)
91 		*done = alen;
92 	return (error);
93 }
94 
95 int
copyin(const void * uaddr,void * kaddr,size_t len)96 copyin(const void *uaddr, void *kaddr, size_t len)
97 {
98 	int error = 0;
99 
100 	if (PMAP_CHECK_COPYIN)
101 		error = check_copyin(curproc, uaddr, len);
102 	if (error == 0)
103 		error = _copyin(uaddr, kaddr, len);
104 	return (error);
105 }
106 #endif /* PMAP_CHECK_COPYIN */
107 
108 int
uiomove(void * cp,size_t n,struct uio * uio)109 uiomove(void *cp, size_t n, struct uio *uio)
110 {
111 	struct iovec *iov;
112 	size_t cnt;
113 	int error = 0;
114 
115 #ifdef DIAGNOSTIC
116 	if (uio->uio_rw != UIO_READ && uio->uio_rw != UIO_WRITE)
117 		panic("uiomove: mode");
118 	if (uio->uio_segflg == UIO_USERSPACE && uio->uio_procp != curproc)
119 		panic("uiomove: proc");
120 #endif
121 
122 	if (n > uio->uio_resid)
123 		n = uio->uio_resid;
124 
125 	while (n > 0) {
126 		iov = uio->uio_iov;
127 		cnt = iov->iov_len;
128 		if (cnt == 0) {
129 			KASSERT(uio->uio_iovcnt > 0);
130 			uio->uio_iov++;
131 			uio->uio_iovcnt--;
132 			continue;
133 		}
134 		if (cnt > n)
135 			cnt = n;
136 		switch (uio->uio_segflg) {
137 
138 		case UIO_USERSPACE:
139 			sched_pause(preempt);
140 			if (uio->uio_rw == UIO_READ)
141 				error = copyout(cp, iov->iov_base, cnt);
142 			else
143 				error = copyin(iov->iov_base, cp, cnt);
144 			if (error)
145 				return (error);
146 			break;
147 
148 		case UIO_SYSSPACE:
149 			if (uio->uio_rw == UIO_READ)
150 				error = kcopy(cp, iov->iov_base, cnt);
151 			else
152 				error = kcopy(iov->iov_base, cp, cnt);
153 			if (error)
154 				return(error);
155 		}
156 		iov->iov_base = (caddr_t)iov->iov_base + cnt;
157 		iov->iov_len -= cnt;
158 		uio->uio_resid -= cnt;
159 		uio->uio_offset += cnt;
160 		cp = (caddr_t)cp + cnt;
161 		n -= cnt;
162 	}
163 	return (error);
164 }
165 
166 /*
167  * Give next character to user as result of read.
168  */
169 int
ureadc(int c,struct uio * uio)170 ureadc(int c, struct uio *uio)
171 {
172 	struct iovec *iov;
173 
174 	if (uio->uio_resid == 0)
175 #ifdef DIAGNOSTIC
176 		panic("ureadc: zero resid");
177 #else
178 		return (EINVAL);
179 #endif
180 again:
181 	if (uio->uio_iovcnt <= 0)
182 #ifdef DIAGNOSTIC
183 		panic("ureadc: non-positive iovcnt");
184 #else
185 		return (EINVAL);
186 #endif
187 	iov = uio->uio_iov;
188 	if (iov->iov_len <= 0) {
189 		uio->uio_iovcnt--;
190 		uio->uio_iov++;
191 		goto again;
192 	}
193 	switch (uio->uio_segflg) {
194 
195 	case UIO_USERSPACE:
196 	{
197 		char tmp = c;
198 
199 		if (copyout(&tmp, iov->iov_base, sizeof(char)) != 0)
200 			return (EFAULT);
201 	}
202 		break;
203 
204 	case UIO_SYSSPACE:
205 		*(char *)iov->iov_base = c;
206 		break;
207 	}
208 	iov->iov_base = (caddr_t)iov->iov_base + 1;
209 	iov->iov_len--;
210 	uio->uio_resid--;
211 	uio->uio_offset++;
212 	return (0);
213 }
214 
215 /*
216  * General routine to allocate a hash table.
217  */
218 void *
hashinit(int elements,int type,int flags,u_long * hashmask)219 hashinit(int elements, int type, int flags, u_long *hashmask)
220 {
221 	u_long hashsize, i;
222 	LIST_HEAD(generic, generic) *hashtbl;
223 
224 	if (elements <= 0)
225 		panic("hashinit: bad cnt");
226 	if ((elements & (elements - 1)) == 0)
227 		hashsize = elements;
228 	else
229 		for (hashsize = 1; hashsize < elements; hashsize <<= 1)
230 			continue;
231 	hashtbl = mallocarray(hashsize, sizeof(*hashtbl), type, flags);
232 	if (hashtbl == NULL)
233 		return NULL;
234 	for (i = 0; i < hashsize; i++)
235 		LIST_INIT(&hashtbl[i]);
236 	*hashmask = hashsize - 1;
237 	return (hashtbl);
238 }
239 
240 void
hashfree(void * hash,int elements,int type)241 hashfree(void *hash, int elements, int type)
242 {
243 	u_long hashsize;
244 	LIST_HEAD(generic, generic) *hashtbl = hash;
245 
246 	if (elements <= 0)
247 		panic("hashfree: bad cnt");
248 	if ((elements & (elements - 1)) == 0)
249 		hashsize = elements;
250 	else
251 		for (hashsize = 1; hashsize < elements; hashsize <<= 1)
252 			continue;
253 
254 	free(hashtbl, type, sizeof(*hashtbl) * hashsize);
255 }
256 
257 /*
258  * "startup hook" types, functions, and variables.
259  */
260 
261 struct hook_desc_head startuphook_list =
262     TAILQ_HEAD_INITIALIZER(startuphook_list);
263 
264 void *
hook_establish(struct hook_desc_head * head,int tail,void (* fn)(void *),void * arg)265 hook_establish(struct hook_desc_head *head, int tail, void (*fn)(void *),
266     void *arg)
267 {
268 	struct hook_desc *hdp;
269 
270 	hdp = malloc(sizeof(*hdp), M_DEVBUF, M_NOWAIT);
271 	if (hdp == NULL)
272 		return (NULL);
273 
274 	hdp->hd_fn = fn;
275 	hdp->hd_arg = arg;
276 	if (tail)
277 		TAILQ_INSERT_TAIL(head, hdp, hd_list);
278 	else
279 		TAILQ_INSERT_HEAD(head, hdp, hd_list);
280 
281 	return (hdp);
282 }
283 
284 void
hook_disestablish(struct hook_desc_head * head,void * vhook)285 hook_disestablish(struct hook_desc_head *head, void *vhook)
286 {
287 	struct hook_desc *hdp;
288 
289 #ifdef DIAGNOSTIC
290 	for (hdp = TAILQ_FIRST(head); hdp != NULL;
291 	    hdp = TAILQ_NEXT(hdp, hd_list))
292                 if (hdp == vhook)
293 			break;
294 	if (hdp == NULL)
295 		return;
296 #endif
297 	hdp = vhook;
298 	TAILQ_REMOVE(head, hdp, hd_list);
299 	free(hdp, M_DEVBUF, sizeof(*hdp));
300 }
301 
302 /*
303  * Run hooks.  Startup hooks are invoked right after scheduler_start but
304  * before root is mounted.  Shutdown hooks are invoked immediately before the
305  * system is halted or rebooted, i.e. after file systems unmounted,
306  * after crash dump done, etc.
307  */
308 void
dohooks(struct hook_desc_head * head,int flags)309 dohooks(struct hook_desc_head *head, int flags)
310 {
311 	struct hook_desc *hdp, *hdp_temp;
312 
313 	if ((flags & HOOK_REMOVE) == 0) {
314 		TAILQ_FOREACH_SAFE(hdp, head, hd_list, hdp_temp) {
315 			(*hdp->hd_fn)(hdp->hd_arg);
316 		}
317 	} else {
318 		while ((hdp = TAILQ_FIRST(head)) != NULL) {
319 			TAILQ_REMOVE(head, hdp, hd_list);
320 			(*hdp->hd_fn)(hdp->hd_arg);
321 			if ((flags & HOOK_FREE) != 0)
322 				free(hdp, M_DEVBUF, sizeof(*hdp));
323 		}
324 	}
325 }
326