1 /*- 2 * Copyright (c) 1988 University of Utah. 3 * Copyright (c) 1982, 1986, 1990 The Regents of the University of California. 4 * All rights reserved. 5 * 6 * This code is derived from software contributed to Berkeley by 7 * the Systems Programming Group of the University of Utah Computer 8 * Science Department, and code derived from software contributed to 9 * Berkeley by William Jolitz. 10 * 11 * Redistribution and use in source and binary forms, with or without 12 * modification, are permitted provided that the following conditions 13 * are met: 14 * 1. Redistributions of source code must retain the above copyright 15 * notice, this list of conditions and the following disclaimer. 16 * 2. Redistributions in binary form must reproduce the above copyright 17 * notice, this list of conditions and the following disclaimer in the 18 * documentation and/or other materials provided with the distribution. 19 * 3. Neither the name of the University nor the names of its contributors 20 * may be used to endorse or promote products derived from this software 21 * without specific prior written permission. 22 * 23 * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND 24 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE 25 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE 26 * ARE DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE 27 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL 28 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS 29 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) 30 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT 31 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY 32 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF 33 * SUCH DAMAGE. 34 * 35 * from: Utah $Hdr: mem.c 1.13 89/10/08$ 36 * from: @(#)mem.c 7.2 (Berkeley) 5/9/91 37 * $FreeBSD: src/sys/i386/i386/mem.c,v 1.79.2.9 2003/01/04 22:58:01 njl Exp $ 38 */ 39 40 /* 41 * Memory special file 42 */ 43 44 #include <sys/param.h> 45 #include <sys/systm.h> 46 #include <sys/buf.h> 47 #include <sys/conf.h> 48 #include <sys/fcntl.h> 49 #include <sys/filio.h> 50 #include <sys/kernel.h> 51 #include <sys/malloc.h> 52 #include <sys/memrange.h> 53 #include <sys/proc.h> 54 #include <sys/priv.h> 55 #include <sys/random.h> 56 #include <sys/signalvar.h> 57 #include <sys/uio.h> 58 #include <sys/vnode.h> 59 #include <sys/sysctl.h> 60 61 #include <sys/signal2.h> 62 #include <sys/mplock2.h> 63 64 #include <vm/vm.h> 65 #include <vm/pmap.h> 66 #include <vm/vm_extern.h> 67 68 69 static d_open_t mmopen; 70 static d_close_t mmclose; 71 static d_read_t mmread; 72 static d_write_t mmwrite; 73 static d_ioctl_t mmioctl; 74 static d_mmap_t memmmap; 75 static d_kqfilter_t mmkqfilter; 76 77 #define CDEV_MAJOR 2 78 static struct dev_ops mem_ops = { 79 { "mem", 0, D_MPSAFE }, 80 .d_open = mmopen, 81 .d_close = mmclose, 82 .d_read = mmread, 83 .d_write = mmwrite, 84 .d_ioctl = mmioctl, 85 .d_kqfilter = mmkqfilter, 86 .d_mmap = memmmap, 87 }; 88 89 static int rand_bolt; 90 static caddr_t zbuf; 91 static cdev_t zerodev = NULL; 92 93 MALLOC_DEFINE(M_MEMDESC, "memdesc", "memory range descriptors"); 94 static int mem_ioctl (cdev_t, u_long, caddr_t, int, struct ucred *); 95 static int random_ioctl (cdev_t, u_long, caddr_t, int, struct ucred *); 96 97 struct mem_range_softc mem_range_softc; 98 99 static int seedenable; 100 SYSCTL_INT(_kern, OID_AUTO, seedenable, CTLFLAG_RW, &seedenable, 0, ""); 101 102 static int 103 mmopen(struct dev_open_args *ap) 104 { 105 cdev_t dev = ap->a_head.a_dev; 106 int error; 107 108 switch (minor(dev)) { 109 case 0: 110 case 1: 111 if (ap->a_oflags & FWRITE) { 112 if (securelevel > 0 || kernel_mem_readonly) 113 return (EPERM); 114 } 115 error = 0; 116 break; 117 case 14: 118 error = priv_check_cred(ap->a_cred, PRIV_ROOT, 0); 119 if (error != 0) 120 break; 121 if (securelevel > 0 || kernel_mem_readonly) { 122 error = EPERM; 123 break; 124 } 125 error = cpu_set_iopl(); 126 break; 127 default: 128 error = 0; 129 break; 130 } 131 return (error); 132 } 133 134 static int 135 mmclose(struct dev_close_args *ap) 136 { 137 cdev_t dev = ap->a_head.a_dev; 138 int error; 139 140 switch (minor(dev)) { 141 case 14: 142 error = cpu_clr_iopl(); 143 break; 144 default: 145 error = 0; 146 break; 147 } 148 return (error); 149 } 150 151 152 static int 153 mmrw(cdev_t dev, struct uio *uio, int flags) 154 { 155 int o; 156 u_int c; 157 u_int poolsize; 158 u_long v; 159 struct iovec *iov; 160 int error = 0; 161 caddr_t buf = NULL; 162 163 while (uio->uio_resid > 0 && error == 0) { 164 iov = uio->uio_iov; 165 if (iov->iov_len == 0) { 166 uio->uio_iov++; 167 uio->uio_iovcnt--; 168 if (uio->uio_iovcnt < 0) 169 panic("mmrw"); 170 continue; 171 } 172 switch (minor(dev)) { 173 case 0: 174 /* 175 * minor device 0 is physical memory, /dev/mem 176 */ 177 v = uio->uio_offset; 178 v &= ~(long)PAGE_MASK; 179 pmap_kenter((vm_offset_t)ptvmmap, v); 180 o = (int)uio->uio_offset & PAGE_MASK; 181 c = (u_int)(PAGE_SIZE - ((uintptr_t)iov->iov_base & PAGE_MASK)); 182 c = min(c, (u_int)(PAGE_SIZE - o)); 183 c = min(c, (u_int)iov->iov_len); 184 error = uiomove((caddr_t)&ptvmmap[o], (int)c, uio); 185 pmap_kremove((vm_offset_t)ptvmmap); 186 continue; 187 188 case 1: { 189 /* 190 * minor device 1 is kernel memory, /dev/kmem 191 */ 192 vm_offset_t saddr, eaddr; 193 int prot; 194 195 c = iov->iov_len; 196 197 /* 198 * Make sure that all of the pages are currently 199 * resident so that we don't create any zero-fill 200 * pages. 201 */ 202 saddr = trunc_page(uio->uio_offset); 203 eaddr = round_page(uio->uio_offset + c); 204 if (saddr > eaddr) 205 return EFAULT; 206 207 /* 208 * Make sure the kernel addresses are mapped. 209 * platform_direct_mapped() can be used to bypass 210 * default mapping via the page table (virtual kernels 211 * contain a lot of out-of-band data). 212 */ 213 prot = VM_PROT_READ; 214 if (uio->uio_rw != UIO_READ) 215 prot |= VM_PROT_WRITE; 216 error = kvm_access_check(saddr, eaddr, prot); 217 if (error) 218 return (error); 219 error = uiomove((caddr_t)(vm_offset_t)uio->uio_offset, 220 (int)c, uio); 221 continue; 222 } 223 case 2: 224 /* 225 * minor device 2 (/dev/null) is EOF/RATHOLE 226 */ 227 if (uio->uio_rw == UIO_READ) 228 return (0); 229 c = iov->iov_len; 230 break; 231 case 3: 232 /* 233 * minor device 3 (/dev/random) is source of filth 234 * on read, seeder on write 235 */ 236 if (buf == NULL) 237 buf = kmalloc(PAGE_SIZE, M_TEMP, M_WAITOK); 238 c = min(iov->iov_len, PAGE_SIZE); 239 if (uio->uio_rw == UIO_WRITE) { 240 error = uiomove(buf, (int)c, uio); 241 if (error == 0 && 242 seedenable && 243 securelevel <= 0) { 244 error = add_buffer_randomness_src(buf, c, RAND_SRC_SEEDING); 245 } else if (error == 0) { 246 error = EPERM; 247 } 248 } else { 249 poolsize = read_random(buf, c); 250 if (poolsize == 0) { 251 if (buf) 252 kfree(buf, M_TEMP); 253 if ((flags & IO_NDELAY) != 0) 254 return (EWOULDBLOCK); 255 return (0); 256 } 257 c = min(c, poolsize); 258 error = uiomove(buf, (int)c, uio); 259 } 260 continue; 261 case 4: 262 /* 263 * minor device 4 (/dev/urandom) is source of muck 264 * on read, writes are disallowed. 265 */ 266 c = min(iov->iov_len, PAGE_SIZE); 267 if (uio->uio_rw == UIO_WRITE) { 268 error = EPERM; 269 break; 270 } 271 if (CURSIG(curthread->td_lwp) != 0) { 272 /* 273 * Use tsleep() to get the error code right. 274 * It should return immediately. 275 */ 276 error = tsleep(&rand_bolt, PCATCH, "urand", 1); 277 if (error != 0 && error != EWOULDBLOCK) 278 continue; 279 } 280 if (buf == NULL) 281 buf = kmalloc(PAGE_SIZE, M_TEMP, M_WAITOK); 282 poolsize = read_random_unlimited(buf, c); 283 c = min(c, poolsize); 284 error = uiomove(buf, (int)c, uio); 285 continue; 286 case 12: 287 /* 288 * minor device 12 (/dev/zero) is source of nulls 289 * on read, write are disallowed. 290 */ 291 if (uio->uio_rw == UIO_WRITE) { 292 c = iov->iov_len; 293 break; 294 } 295 if (zbuf == NULL) { 296 zbuf = (caddr_t)kmalloc(PAGE_SIZE, M_TEMP, 297 M_WAITOK | M_ZERO); 298 } 299 c = min(iov->iov_len, PAGE_SIZE); 300 error = uiomove(zbuf, (int)c, uio); 301 continue; 302 default: 303 return (ENODEV); 304 } 305 if (error) 306 break; 307 iov->iov_base = (char *)iov->iov_base + c; 308 iov->iov_len -= c; 309 uio->uio_offset += c; 310 uio->uio_resid -= c; 311 } 312 if (buf) 313 kfree(buf, M_TEMP); 314 return (error); 315 } 316 317 static int 318 mmread(struct dev_read_args *ap) 319 { 320 return(mmrw(ap->a_head.a_dev, ap->a_uio, ap->a_ioflag)); 321 } 322 323 static int 324 mmwrite(struct dev_write_args *ap) 325 { 326 return(mmrw(ap->a_head.a_dev, ap->a_uio, ap->a_ioflag)); 327 } 328 329 330 331 332 333 /*******************************************************\ 334 * allow user processes to MMAP some memory sections * 335 * instead of going through read/write * 336 \*******************************************************/ 337 338 static int 339 memmmap(struct dev_mmap_args *ap) 340 { 341 cdev_t dev = ap->a_head.a_dev; 342 343 switch (minor(dev)) { 344 case 0: 345 /* 346 * minor device 0 is physical memory 347 */ 348 #if defined(__i386__) 349 ap->a_result = i386_btop(ap->a_offset); 350 #elif defined(__x86_64__) 351 ap->a_result = x86_64_btop(ap->a_offset); 352 #endif 353 return 0; 354 case 1: 355 /* 356 * minor device 1 is kernel memory 357 */ 358 #if defined(__i386__) 359 ap->a_result = i386_btop(vtophys(ap->a_offset)); 360 #elif defined(__x86_64__) 361 ap->a_result = x86_64_btop(vtophys(ap->a_offset)); 362 #endif 363 return 0; 364 365 default: 366 return EINVAL; 367 } 368 } 369 370 static int 371 mmioctl(struct dev_ioctl_args *ap) 372 { 373 cdev_t dev = ap->a_head.a_dev; 374 int error; 375 376 get_mplock(); 377 378 switch (minor(dev)) { 379 case 0: 380 error = mem_ioctl(dev, ap->a_cmd, ap->a_data, 381 ap->a_fflag, ap->a_cred); 382 break; 383 case 3: 384 case 4: 385 error = random_ioctl(dev, ap->a_cmd, ap->a_data, 386 ap->a_fflag, ap->a_cred); 387 break; 388 default: 389 error = ENODEV; 390 break; 391 } 392 393 rel_mplock(); 394 return (error); 395 } 396 397 /* 398 * Operations for changing memory attributes. 399 * 400 * This is basically just an ioctl shim for mem_range_attr_get 401 * and mem_range_attr_set. 402 */ 403 static int 404 mem_ioctl(cdev_t dev, u_long cmd, caddr_t data, int flags, struct ucred *cred) 405 { 406 int nd, error = 0; 407 struct mem_range_op *mo = (struct mem_range_op *)data; 408 struct mem_range_desc *md; 409 410 /* is this for us? */ 411 if ((cmd != MEMRANGE_GET) && 412 (cmd != MEMRANGE_SET)) 413 return (ENOTTY); 414 415 /* any chance we can handle this? */ 416 if (mem_range_softc.mr_op == NULL) 417 return (EOPNOTSUPP); 418 419 /* do we have any descriptors? */ 420 if (mem_range_softc.mr_ndesc == 0) 421 return (ENXIO); 422 423 switch (cmd) { 424 case MEMRANGE_GET: 425 nd = imin(mo->mo_arg[0], mem_range_softc.mr_ndesc); 426 if (nd > 0) { 427 md = (struct mem_range_desc *) 428 kmalloc(nd * sizeof(struct mem_range_desc), 429 M_MEMDESC, M_WAITOK); 430 error = mem_range_attr_get(md, &nd); 431 if (!error) 432 error = copyout(md, mo->mo_desc, 433 nd * sizeof(struct mem_range_desc)); 434 kfree(md, M_MEMDESC); 435 } else { 436 nd = mem_range_softc.mr_ndesc; 437 } 438 mo->mo_arg[0] = nd; 439 break; 440 441 case MEMRANGE_SET: 442 md = (struct mem_range_desc *)kmalloc(sizeof(struct mem_range_desc), 443 M_MEMDESC, M_WAITOK); 444 error = copyin(mo->mo_desc, md, sizeof(struct mem_range_desc)); 445 /* clamp description string */ 446 md->mr_owner[sizeof(md->mr_owner) - 1] = 0; 447 if (error == 0) 448 error = mem_range_attr_set(md, &mo->mo_arg[0]); 449 kfree(md, M_MEMDESC); 450 break; 451 } 452 return (error); 453 } 454 455 /* 456 * Implementation-neutral, kernel-callable functions for manipulating 457 * memory range attributes. 458 */ 459 int 460 mem_range_attr_get(struct mem_range_desc *mrd, int *arg) 461 { 462 /* can we handle this? */ 463 if (mem_range_softc.mr_op == NULL) 464 return (EOPNOTSUPP); 465 466 if (*arg == 0) { 467 *arg = mem_range_softc.mr_ndesc; 468 } else { 469 bcopy(mem_range_softc.mr_desc, mrd, (*arg) * sizeof(struct mem_range_desc)); 470 } 471 return (0); 472 } 473 474 int 475 mem_range_attr_set(struct mem_range_desc *mrd, int *arg) 476 { 477 /* can we handle this? */ 478 if (mem_range_softc.mr_op == NULL) 479 return (EOPNOTSUPP); 480 481 return (mem_range_softc.mr_op->set(&mem_range_softc, mrd, arg)); 482 } 483 484 void 485 mem_range_AP_init(void) 486 { 487 if (mem_range_softc.mr_op && mem_range_softc.mr_op->initAP) 488 mem_range_softc.mr_op->initAP(&mem_range_softc); 489 } 490 491 static int 492 random_ioctl(cdev_t dev, u_long cmd, caddr_t data, int flags, struct ucred *cred) 493 { 494 int error; 495 int intr; 496 497 /* 498 * Even inspecting the state is privileged, since it gives a hint 499 * about how easily the randomness might be guessed. 500 */ 501 error = 0; 502 503 switch (cmd) { 504 /* Really handled in upper layer */ 505 case FIOASYNC: 506 break; 507 case MEM_SETIRQ: 508 intr = *(int16_t *)data; 509 if ((error = priv_check_cred(cred, PRIV_ROOT, 0)) != 0) 510 break; 511 if (intr < 0 || intr >= MAX_INTS) 512 return (EINVAL); 513 register_randintr(intr); 514 break; 515 case MEM_CLEARIRQ: 516 intr = *(int16_t *)data; 517 if ((error = priv_check_cred(cred, PRIV_ROOT, 0)) != 0) 518 break; 519 if (intr < 0 || intr >= MAX_INTS) 520 return (EINVAL); 521 unregister_randintr(intr); 522 break; 523 case MEM_RETURNIRQ: 524 error = ENOTSUP; 525 break; 526 case MEM_FINDIRQ: 527 intr = *(int16_t *)data; 528 if ((error = priv_check_cred(cred, PRIV_ROOT, 0)) != 0) 529 break; 530 if (intr < 0 || intr >= MAX_INTS) 531 return (EINVAL); 532 intr = next_registered_randintr(intr); 533 if (intr == MAX_INTS) 534 return (ENOENT); 535 *(u_int16_t *)data = intr; 536 break; 537 default: 538 error = ENOTSUP; 539 break; 540 } 541 return (error); 542 } 543 544 static int 545 mm_filter_read(struct knote *kn, long hint) 546 { 547 return (1); 548 } 549 550 static int 551 mm_filter_write(struct knote *kn, long hint) 552 { 553 return (1); 554 } 555 556 static void 557 dummy_filter_detach(struct knote *kn) {} 558 559 /* Implemented in kern_nrandom.c */ 560 static struct filterops random_read_filtops = 561 { FILTEROP_ISFD|FILTEROP_MPSAFE, NULL, dummy_filter_detach, random_filter_read }; 562 563 static struct filterops mm_read_filtops = 564 { FILTEROP_ISFD|FILTEROP_MPSAFE, NULL, dummy_filter_detach, mm_filter_read }; 565 566 static struct filterops mm_write_filtops = 567 { FILTEROP_ISFD|FILTEROP_MPSAFE, NULL, dummy_filter_detach, mm_filter_write }; 568 569 int 570 mmkqfilter(struct dev_kqfilter_args *ap) 571 { 572 struct knote *kn = ap->a_kn; 573 cdev_t dev = ap->a_head.a_dev; 574 575 ap->a_result = 0; 576 switch (kn->kn_filter) { 577 case EVFILT_READ: 578 switch (minor(dev)) { 579 case 3: 580 kn->kn_fop = &random_read_filtops; 581 break; 582 default: 583 kn->kn_fop = &mm_read_filtops; 584 break; 585 } 586 break; 587 case EVFILT_WRITE: 588 kn->kn_fop = &mm_write_filtops; 589 break; 590 default: 591 ap->a_result = EOPNOTSUPP; 592 return (0); 593 } 594 595 return (0); 596 } 597 598 int 599 iszerodev(cdev_t dev) 600 { 601 return (zerodev == dev); 602 } 603 604 static void 605 mem_drvinit(void *unused) 606 { 607 608 /* Initialise memory range handling */ 609 if (mem_range_softc.mr_op != NULL) 610 mem_range_softc.mr_op->init(&mem_range_softc); 611 612 make_dev(&mem_ops, 0, UID_ROOT, GID_KMEM, 0640, "mem"); 613 make_dev(&mem_ops, 1, UID_ROOT, GID_KMEM, 0640, "kmem"); 614 make_dev(&mem_ops, 2, UID_ROOT, GID_WHEEL, 0666, "null"); 615 make_dev(&mem_ops, 3, UID_ROOT, GID_WHEEL, 0644, "random"); 616 make_dev(&mem_ops, 4, UID_ROOT, GID_WHEEL, 0644, "urandom"); 617 zerodev = make_dev(&mem_ops, 12, UID_ROOT, GID_WHEEL, 0666, "zero"); 618 make_dev(&mem_ops, 14, UID_ROOT, GID_WHEEL, 0600, "io"); 619 } 620 621 SYSINIT(memdev,SI_SUB_DRIVERS,SI_ORDER_MIDDLE+CDEV_MAJOR,mem_drvinit,NULL) 622 623