xref: /dragonfly/sys/kern/kern_memio.c (revision 21c1c48a)
1 /*-
2  * Copyright (c) 1988 University of Utah.
3  * Copyright (c) 1982, 1986, 1990 The Regents of the University of California.
4  * All rights reserved.
5  *
6  * This code is derived from software contributed to Berkeley by
7  * the Systems Programming Group of the University of Utah Computer
8  * Science Department, and code derived from software contributed to
9  * Berkeley by William Jolitz.
10  *
11  * Redistribution and use in source and binary forms, with or without
12  * modification, are permitted provided that the following conditions
13  * are met:
14  * 1. Redistributions of source code must retain the above copyright
15  *    notice, this list of conditions and the following disclaimer.
16  * 2. Redistributions in binary form must reproduce the above copyright
17  *    notice, this list of conditions and the following disclaimer in the
18  *    documentation and/or other materials provided with the distribution.
19  * 3. All advertising materials mentioning features or use of this software
20  *    must display the following acknowledgement:
21  *	This product includes software developed by the University of
22  *	California, Berkeley and its contributors.
23  * 4. Neither the name of the University nor the names of its contributors
24  *    may be used to endorse or promote products derived from this software
25  *    without specific prior written permission.
26  *
27  * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND
28  * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
29  * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
30  * ARE DISCLAIMED.  IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE
31  * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
32  * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
33  * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
34  * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
35  * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
36  * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
37  * SUCH DAMAGE.
38  *
39  *	from: Utah $Hdr: mem.c 1.13 89/10/08$
40  *	from: @(#)mem.c	7.2 (Berkeley) 5/9/91
41  * $FreeBSD: src/sys/i386/i386/mem.c,v 1.79.2.9 2003/01/04 22:58:01 njl Exp $
42  * $DragonFly: src/sys/kern/kern_memio.c,v 1.32 2008/07/23 16:39:28 dillon Exp $
43  */
44 
45 /*
46  * Memory special file
47  */
48 
49 #include <sys/param.h>
50 #include <sys/systm.h>
51 #include <sys/buf.h>
52 #include <sys/conf.h>
53 #include <sys/fcntl.h>
54 #include <sys/filio.h>
55 #include <sys/kernel.h>
56 #include <sys/malloc.h>
57 #include <sys/memrange.h>
58 #include <sys/proc.h>
59 #include <sys/priv.h>
60 #include <sys/random.h>
61 #include <sys/signalvar.h>
62 #include <sys/signal2.h>
63 #include <sys/uio.h>
64 #include <sys/vnode.h>
65 
66 #include <vm/vm.h>
67 #include <vm/pmap.h>
68 #include <vm/vm_extern.h>
69 
70 
71 static	d_open_t	mmopen;
72 static	d_close_t	mmclose;
73 static	d_read_t	mmread;
74 static	d_write_t	mmwrite;
75 static	d_ioctl_t	mmioctl;
76 static	d_mmap_t	memmmap;
77 static	d_poll_t	mmpoll;
78 
79 #define CDEV_MAJOR 2
80 static struct dev_ops mem_ops = {
81 	{ "mem", CDEV_MAJOR, D_MEM },
82 	.d_open =	mmopen,
83 	.d_close =	mmclose,
84 	.d_read =	mmread,
85 	.d_write =	mmwrite,
86 	.d_ioctl =	mmioctl,
87 	.d_poll =	mmpoll,
88 	.d_mmap =	memmmap,
89 };
90 
91 static int rand_bolt;
92 static caddr_t	zbuf;
93 
94 MALLOC_DEFINE(M_MEMDESC, "memdesc", "memory range descriptors");
95 static int mem_ioctl (cdev_t, u_long, caddr_t, int, struct ucred *);
96 static int random_ioctl (cdev_t, u_long, caddr_t, int, struct ucred *);
97 
98 struct mem_range_softc mem_range_softc;
99 
100 
101 static int
102 mmopen(struct dev_open_args *ap)
103 {
104 	cdev_t dev = ap->a_head.a_dev;
105 	int error;
106 
107 	switch (minor(dev)) {
108 	case 0:
109 	case 1:
110 		if (ap->a_oflags & FWRITE) {
111 			if (securelevel > 0 || kernel_mem_readonly)
112 				return (EPERM);
113 		}
114 		error = 0;
115 		break;
116 	case 14:
117 		error = priv_check_cred(ap->a_cred, PRIV_ROOT, 0);
118 		if (error != 0)
119 			break;
120 		if (securelevel > 0 || kernel_mem_readonly) {
121 			error = EPERM;
122 			break;
123 		}
124 		error = cpu_set_iopl();
125 		break;
126 	default:
127 		error = 0;
128 		break;
129 	}
130 	return (error);
131 }
132 
133 static int
134 mmclose(struct dev_close_args *ap)
135 {
136 	cdev_t dev = ap->a_head.a_dev;
137 	int error;
138 
139 	switch (minor(dev)) {
140 	case 14:
141 		error = cpu_clr_iopl();
142 		break;
143 	default:
144 		error = 0;
145 		break;
146 	}
147 	return (error);
148 }
149 
150 
151 static int
152 mmrw(cdev_t dev, struct uio *uio, int flags)
153 {
154 	int o;
155 	u_int c, v;
156 	u_int poolsize;
157 	struct iovec *iov;
158 	int error = 0;
159 	caddr_t buf = NULL;
160 
161 	while (uio->uio_resid > 0 && error == 0) {
162 		iov = uio->uio_iov;
163 		if (iov->iov_len == 0) {
164 			uio->uio_iov++;
165 			uio->uio_iovcnt--;
166 			if (uio->uio_iovcnt < 0)
167 				panic("mmrw");
168 			continue;
169 		}
170 		switch (minor(dev)) {
171 		case 0:
172 			/*
173 			 * minor device 0 is physical memory, /dev/mem
174 			 */
175 			v = uio->uio_offset;
176 			v &= ~PAGE_MASK;
177 			pmap_kenter((vm_offset_t)ptvmmap, v);
178 			o = (int)uio->uio_offset & PAGE_MASK;
179 			c = (u_int)(PAGE_SIZE - ((int)iov->iov_base & PAGE_MASK));
180 			c = min(c, (u_int)(PAGE_SIZE - o));
181 			c = min(c, (u_int)iov->iov_len);
182 			error = uiomove((caddr_t)&ptvmmap[o], (int)c, uio);
183 			pmap_kremove((vm_offset_t)ptvmmap);
184 			continue;
185 
186 		case 1: {
187 			/*
188 			 * minor device 1 is kernel memory, /dev/kmem
189 			 */
190 			vm_offset_t saddr, eaddr;
191 			int prot;
192 
193 			c = iov->iov_len;
194 
195 			/*
196 			 * Make sure that all of the pages are currently
197 			 * resident so that we don't create any zero-fill
198 			 * pages.
199 			 */
200 			saddr = trunc_page(uio->uio_offset);
201 			eaddr = round_page(uio->uio_offset + c);
202 			if (saddr > eaddr)
203 				return EFAULT;
204 
205 			/*
206 			 * Make sure the kernel addresses are mapped.
207 			 * platform_direct_mapped() can be used to bypass
208 			 * default mapping via the page table (virtual kernels
209 			 * contain a lot of out-of-band data).
210 			 */
211 			prot = VM_PROT_READ;
212 			if (uio->uio_rw != UIO_READ)
213 				prot |= VM_PROT_WRITE;
214 			error = kvm_access_check(saddr, eaddr, prot);
215 			if (error)
216 				return (error);
217 			error = uiomove((caddr_t)(vm_offset_t)uio->uio_offset,
218 					(int)c, uio);
219 			continue;
220 		}
221 		case 2:
222 			/*
223 			 * minor device 2 is EOF/RATHOLE
224 			 */
225 			if (uio->uio_rw == UIO_READ)
226 				return (0);
227 			c = iov->iov_len;
228 			break;
229 		case 3:
230 			/*
231 			 * minor device 3 (/dev/random) is source of filth
232 			 * on read, seeder on write
233 			 */
234 			if (buf == NULL)
235 				buf = kmalloc(PAGE_SIZE, M_TEMP, M_WAITOK);
236 			c = min(iov->iov_len, PAGE_SIZE);
237 			if (uio->uio_rw == UIO_WRITE) {
238 				error = uiomove(buf, (int)c, uio);
239 				if (error == 0)
240 					error = add_buffer_randomness(buf, c);
241 			} else {
242 				poolsize = read_random(buf, c);
243 				if (poolsize == 0) {
244 					if (buf)
245 						kfree(buf, M_TEMP);
246 					if ((flags & IO_NDELAY) != 0)
247 						return (EWOULDBLOCK);
248 					return (0);
249 				}
250 				c = min(c, poolsize);
251 				error = uiomove(buf, (int)c, uio);
252 			}
253 			continue;
254 		case 4:
255 			/*
256 			 * minor device 4 (/dev/urandom) is source of muck
257 			 * on read, writes are disallowed.
258 			 */
259 			c = min(iov->iov_len, PAGE_SIZE);
260 			if (uio->uio_rw == UIO_WRITE) {
261 				error = EPERM;
262 				break;
263 			}
264 			if (CURSIG(curthread->td_lwp) != 0) {
265 				/*
266 				 * Use tsleep() to get the error code right.
267 				 * It should return immediately.
268 				 */
269 				error = tsleep(&rand_bolt, PCATCH, "urand", 1);
270 				if (error != 0 && error != EWOULDBLOCK)
271 					continue;
272 			}
273 			if (buf == NULL)
274 				buf = kmalloc(PAGE_SIZE, M_TEMP, M_WAITOK);
275 			poolsize = read_random_unlimited(buf, c);
276 			c = min(c, poolsize);
277 			error = uiomove(buf, (int)c, uio);
278 			continue;
279 		case 12:
280 			/*
281 			 * minor device 12 (/dev/zero) is source of nulls
282 			 * on read, write are disallowed.
283 			 */
284 			if (uio->uio_rw == UIO_WRITE) {
285 				c = iov->iov_len;
286 				break;
287 			}
288 			if (zbuf == NULL) {
289 				zbuf = (caddr_t)kmalloc(PAGE_SIZE, M_TEMP,
290 				    M_WAITOK | M_ZERO);
291 			}
292 			c = min(iov->iov_len, PAGE_SIZE);
293 			error = uiomove(zbuf, (int)c, uio);
294 			continue;
295 		default:
296 			return (ENODEV);
297 		}
298 		if (error)
299 			break;
300 		iov->iov_base = (char *)iov->iov_base + c;
301 		iov->iov_len -= c;
302 		uio->uio_offset += c;
303 		uio->uio_resid -= c;
304 	}
305 	if (buf)
306 		kfree(buf, M_TEMP);
307 	return (error);
308 }
309 
310 static int
311 mmread(struct dev_read_args *ap)
312 {
313 	return(mmrw(ap->a_head.a_dev, ap->a_uio, ap->a_ioflag));
314 }
315 
316 static int
317 mmwrite(struct dev_write_args *ap)
318 {
319 	return(mmrw(ap->a_head.a_dev, ap->a_uio, ap->a_ioflag));
320 }
321 
322 
323 
324 
325 
326 /*******************************************************\
327 * allow user processes to MMAP some memory sections	*
328 * instead of going through read/write			*
329 \*******************************************************/
330 
331 static int
332 memmmap(struct dev_mmap_args *ap)
333 {
334 	cdev_t dev = ap->a_head.a_dev;
335 
336 	switch (minor(dev)) {
337 	case 0:
338 		/*
339 		 * minor device 0 is physical memory
340 		 */
341 #if defined(__i386__)
342         	ap->a_result = i386_btop(ap->a_offset);
343 #elif defined(__amd64__)
344 		ap->a_result = amd64_btop(ap->a_offset);
345 #endif
346 		return 0;
347 	case 1:
348 		/*
349 		 * minor device 1 is kernel memory
350 		 */
351 #if defined(__i386__)
352         	ap->a_result = i386_btop(vtophys(ap->a_offset));
353 #elif defined(__amd64__)
354         	ap->a_result = amd64_btop(vtophys(ap->a_offset));
355 #endif
356 		return 0;
357 
358 	default:
359 		return EINVAL;
360 	}
361 }
362 
363 static int
364 mmioctl(struct dev_ioctl_args *ap)
365 {
366 	cdev_t dev = ap->a_head.a_dev;
367 
368 	switch (minor(dev)) {
369 	case 0:
370 		return mem_ioctl(dev, ap->a_cmd, ap->a_data,
371 				 ap->a_fflag, ap->a_cred);
372 	case 3:
373 	case 4:
374 		return random_ioctl(dev, ap->a_cmd, ap->a_data,
375 				    ap->a_fflag, ap->a_cred);
376 	}
377 	return (ENODEV);
378 }
379 
380 /*
381  * Operations for changing memory attributes.
382  *
383  * This is basically just an ioctl shim for mem_range_attr_get
384  * and mem_range_attr_set.
385  */
386 static int
387 mem_ioctl(cdev_t dev, u_long cmd, caddr_t data, int flags, struct ucred *cred)
388 {
389 	int nd, error = 0;
390 	struct mem_range_op *mo = (struct mem_range_op *)data;
391 	struct mem_range_desc *md;
392 
393 	/* is this for us? */
394 	if ((cmd != MEMRANGE_GET) &&
395 	    (cmd != MEMRANGE_SET))
396 		return (ENOTTY);
397 
398 	/* any chance we can handle this? */
399 	if (mem_range_softc.mr_op == NULL)
400 		return (EOPNOTSUPP);
401 
402 	/* do we have any descriptors? */
403 	if (mem_range_softc.mr_ndesc == 0)
404 		return (ENXIO);
405 
406 	switch (cmd) {
407 	case MEMRANGE_GET:
408 		nd = imin(mo->mo_arg[0], mem_range_softc.mr_ndesc);
409 		if (nd > 0) {
410 			md = (struct mem_range_desc *)
411 				kmalloc(nd * sizeof(struct mem_range_desc),
412 				       M_MEMDESC, M_WAITOK);
413 			error = mem_range_attr_get(md, &nd);
414 			if (!error)
415 				error = copyout(md, mo->mo_desc,
416 					nd * sizeof(struct mem_range_desc));
417 			kfree(md, M_MEMDESC);
418 		} else {
419 			nd = mem_range_softc.mr_ndesc;
420 		}
421 		mo->mo_arg[0] = nd;
422 		break;
423 
424 	case MEMRANGE_SET:
425 		md = (struct mem_range_desc *)kmalloc(sizeof(struct mem_range_desc),
426 						    M_MEMDESC, M_WAITOK);
427 		error = copyin(mo->mo_desc, md, sizeof(struct mem_range_desc));
428 		/* clamp description string */
429 		md->mr_owner[sizeof(md->mr_owner) - 1] = 0;
430 		if (error == 0)
431 			error = mem_range_attr_set(md, &mo->mo_arg[0]);
432 		kfree(md, M_MEMDESC);
433 		break;
434 	}
435 	return (error);
436 }
437 
438 /*
439  * Implementation-neutral, kernel-callable functions for manipulating
440  * memory range attributes.
441  */
442 int
443 mem_range_attr_get(struct mem_range_desc *mrd, int *arg)
444 {
445 	/* can we handle this? */
446 	if (mem_range_softc.mr_op == NULL)
447 		return (EOPNOTSUPP);
448 
449 	if (*arg == 0) {
450 		*arg = mem_range_softc.mr_ndesc;
451 	} else {
452 		bcopy(mem_range_softc.mr_desc, mrd, (*arg) * sizeof(struct mem_range_desc));
453 	}
454 	return (0);
455 }
456 
457 int
458 mem_range_attr_set(struct mem_range_desc *mrd, int *arg)
459 {
460 	/* can we handle this? */
461 	if (mem_range_softc.mr_op == NULL)
462 		return (EOPNOTSUPP);
463 
464 	return (mem_range_softc.mr_op->set(&mem_range_softc, mrd, arg));
465 }
466 
467 #ifdef SMP
468 void
469 mem_range_AP_init(void)
470 {
471 	if (mem_range_softc.mr_op && mem_range_softc.mr_op->initAP)
472 		return (mem_range_softc.mr_op->initAP(&mem_range_softc));
473 }
474 #endif
475 
476 static int
477 random_ioctl(cdev_t dev, u_long cmd, caddr_t data, int flags, struct ucred *cred)
478 {
479 	int error;
480 	int intr;
481 
482 	/*
483 	 * Even inspecting the state is privileged, since it gives a hint
484 	 * about how easily the randomness might be guessed.
485 	 */
486 	error = 0;
487 
488 	switch (cmd) {
489 	/* Really handled in upper layer */
490 	case FIOASYNC:
491 		break;
492 	case MEM_SETIRQ:
493 		intr = *(int16_t *)data;
494 		if ((error = priv_check_cred(cred, PRIV_ROOT, 0)) != 0)
495 			break;
496 		if (intr < 0 || intr >= MAX_INTS)
497 			return (EINVAL);
498 		register_randintr(intr);
499 		break;
500 	case MEM_CLEARIRQ:
501 		intr = *(int16_t *)data;
502 		if ((error = priv_check_cred(cred, PRIV_ROOT, 0)) != 0)
503 			break;
504 		if (intr < 0 || intr >= MAX_INTS)
505 			return (EINVAL);
506 		unregister_randintr(intr);
507 		break;
508 	case MEM_RETURNIRQ:
509 		error = ENOTSUP;
510 		break;
511 	case MEM_FINDIRQ:
512 		intr = *(int16_t *)data;
513 		if ((error = priv_check_cred(cred, PRIV_ROOT, 0)) != 0)
514 			break;
515 		if (intr < 0 || intr >= MAX_INTS)
516 			return (EINVAL);
517 		intr = next_registered_randintr(intr);
518 		if (intr == MAX_INTS)
519 			return (ENOENT);
520 		*(u_int16_t *)data = intr;
521 		break;
522 	default:
523 		error = ENOTSUP;
524 		break;
525 	}
526 	return (error);
527 }
528 
529 int
530 mmpoll(struct dev_poll_args *ap)
531 {
532 	cdev_t dev = ap->a_head.a_dev;
533 	int revents;
534 
535 	switch (minor(dev)) {
536 	case 3:		/* /dev/random */
537 		revents = random_poll(dev, ap->a_events);
538 		break;
539 	case 4:		/* /dev/urandom */
540 	default:
541 		revents = seltrue(dev, ap->a_events);
542 		break;
543 	}
544 	ap->a_events = revents;
545 	return (0);
546 }
547 
548 int
549 iszerodev(cdev_t dev)
550 {
551 	return ((major(dev) == mem_ops.head.maj)
552 	  && minor(dev) == 12);
553 }
554 
555 static void
556 mem_drvinit(void *unused)
557 {
558 
559 	/* Initialise memory range handling */
560 	if (mem_range_softc.mr_op != NULL)
561 		mem_range_softc.mr_op->init(&mem_range_softc);
562 
563 	dev_ops_add(&mem_ops, 0xf0, 0);
564 	make_dev(&mem_ops, 0, UID_ROOT, GID_KMEM, 0640, "mem");
565 	make_dev(&mem_ops, 1, UID_ROOT, GID_KMEM, 0640, "kmem");
566 	make_dev(&mem_ops, 2, UID_ROOT, GID_WHEEL, 0666, "null");
567 	make_dev(&mem_ops, 3, UID_ROOT, GID_WHEEL, 0644, "random");
568 	make_dev(&mem_ops, 4, UID_ROOT, GID_WHEEL, 0644, "urandom");
569 	make_dev(&mem_ops, 12, UID_ROOT, GID_WHEEL, 0666, "zero");
570 	make_dev(&mem_ops, 14, UID_ROOT, GID_WHEEL, 0600, "io");
571 }
572 
573 SYSINIT(memdev,SI_SUB_DRIVERS,SI_ORDER_MIDDLE+CDEV_MAJOR,mem_drvinit,NULL)
574 
575