1 /*- 2 * Copyright (c) 1988 University of Utah. 3 * Copyright (c) 1982, 1986, 1990 The Regents of the University of California. 4 * All rights reserved. 5 * 6 * This code is derived from software contributed to Berkeley by 7 * the Systems Programming Group of the University of Utah Computer 8 * Science Department, and code derived from software contributed to 9 * Berkeley by William Jolitz. 10 * 11 * Redistribution and use in source and binary forms, with or without 12 * modification, are permitted provided that the following conditions 13 * are met: 14 * 1. Redistributions of source code must retain the above copyright 15 * notice, this list of conditions and the following disclaimer. 16 * 2. Redistributions in binary form must reproduce the above copyright 17 * notice, this list of conditions and the following disclaimer in the 18 * documentation and/or other materials provided with the distribution. 19 * 3. Neither the name of the University nor the names of its contributors 20 * may be used to endorse or promote products derived from this software 21 * without specific prior written permission. 22 * 23 * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND 24 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE 25 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE 26 * ARE DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE 27 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL 28 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS 29 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) 30 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT 31 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY 32 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF 33 * SUCH DAMAGE. 34 * 35 * from: Utah $Hdr: mem.c 1.13 89/10/08$ 36 * from: @(#)mem.c 7.2 (Berkeley) 5/9/91 37 * $FreeBSD: src/sys/i386/i386/mem.c,v 1.79.2.9 2003/01/04 22:58:01 njl Exp $ 38 */ 39 40 /* 41 * Memory special file 42 */ 43 44 #include <sys/param.h> 45 #include <sys/systm.h> 46 #include <sys/buf.h> 47 #include <sys/conf.h> 48 #include <sys/fcntl.h> 49 #include <sys/filio.h> 50 #include <sys/kernel.h> 51 #include <sys/malloc.h> 52 #include <sys/memrange.h> 53 #include <sys/proc.h> 54 #include <sys/priv.h> 55 #include <sys/random.h> 56 #include <sys/signalvar.h> 57 #include <sys/uio.h> 58 #include <sys/vnode.h> 59 #include <sys/sysctl.h> 60 61 #include <sys/signal2.h> 62 63 #include <vm/vm.h> 64 #include <vm/pmap.h> 65 #include <vm/vm_extern.h> 66 67 68 static d_open_t mmopen; 69 static d_close_t mmclose; 70 static d_read_t mmread; 71 static d_write_t mmwrite; 72 static d_ioctl_t mmioctl; 73 #if 0 74 static d_mmap_t memmmap; 75 #endif 76 static d_kqfilter_t mmkqfilter; 77 static int memuksmap(cdev_t dev, vm_page_t fake); 78 79 #define CDEV_MAJOR 2 80 static struct dev_ops mem_ops = { 81 { "mem", 0, D_MPSAFE | D_QUICK }, 82 .d_open = mmopen, 83 .d_close = mmclose, 84 .d_read = mmread, 85 .d_write = mmwrite, 86 .d_ioctl = mmioctl, 87 .d_kqfilter = mmkqfilter, 88 #if 0 89 .d_mmap = memmmap, 90 #endif 91 .d_uksmap = memuksmap 92 }; 93 94 static struct dev_ops mem_ops_mem = { 95 { "mem", 0, D_MEM | D_MPSAFE | D_QUICK }, 96 .d_open = mmopen, 97 .d_close = mmclose, 98 .d_read = mmread, 99 .d_write = mmwrite, 100 .d_ioctl = mmioctl, 101 .d_kqfilter = mmkqfilter, 102 #if 0 103 .d_mmap = memmmap, 104 #endif 105 .d_uksmap = memuksmap 106 }; 107 108 static struct dev_ops mem_ops_noq = { 109 { "mem", 0, D_MPSAFE }, 110 .d_open = mmopen, 111 .d_close = mmclose, 112 .d_read = mmread, 113 .d_write = mmwrite, 114 .d_ioctl = mmioctl, 115 .d_kqfilter = mmkqfilter, 116 #if 0 117 .d_mmap = memmmap, 118 #endif 119 .d_uksmap = memuksmap 120 }; 121 122 static int rand_bolt; 123 static caddr_t zbuf; 124 static cdev_t zerodev = NULL; 125 static struct lock mem_lock = LOCK_INITIALIZER("memlk", 0, 0); 126 127 MALLOC_DEFINE(M_MEMDESC, "memdesc", "memory range descriptors"); 128 static int mem_ioctl (cdev_t, u_long, caddr_t, int, struct ucred *); 129 static int random_ioctl (cdev_t, u_long, caddr_t, int, struct ucred *); 130 131 struct mem_range_softc mem_range_softc; 132 133 static int seedenable; 134 SYSCTL_INT(_kern, OID_AUTO, seedenable, CTLFLAG_RW, &seedenable, 0, ""); 135 136 static int 137 mmopen(struct dev_open_args *ap) 138 { 139 cdev_t dev = ap->a_head.a_dev; 140 int error; 141 142 switch (minor(dev)) { 143 case 0: 144 case 1: 145 /* 146 * /dev/mem and /dev/kmem 147 */ 148 if (ap->a_oflags & FWRITE) { 149 if (securelevel > 0 || kernel_mem_readonly) 150 return (EPERM); 151 } 152 error = 0; 153 break; 154 case 6: 155 /* 156 * /dev/kpmap can only be opened for reading. 157 */ 158 if (ap->a_oflags & FWRITE) 159 return (EPERM); 160 error = 0; 161 break; 162 case 14: 163 error = priv_check_cred(ap->a_cred, PRIV_ROOT, 0); 164 if (error != 0) 165 break; 166 if (securelevel > 0 || kernel_mem_readonly) { 167 error = EPERM; 168 break; 169 } 170 error = cpu_set_iopl(); 171 break; 172 default: 173 error = 0; 174 break; 175 } 176 return (error); 177 } 178 179 static int 180 mmclose(struct dev_close_args *ap) 181 { 182 cdev_t dev = ap->a_head.a_dev; 183 int error; 184 185 switch (minor(dev)) { 186 case 14: 187 error = cpu_clr_iopl(); 188 break; 189 default: 190 error = 0; 191 break; 192 } 193 return (error); 194 } 195 196 197 static int 198 mmrw(cdev_t dev, struct uio *uio, int flags) 199 { 200 int o; 201 u_int c; 202 u_int poolsize; 203 u_long v; 204 struct iovec *iov; 205 int error = 0; 206 caddr_t buf = NULL; 207 208 while (uio->uio_resid > 0 && error == 0) { 209 iov = uio->uio_iov; 210 if (iov->iov_len == 0) { 211 uio->uio_iov++; 212 uio->uio_iovcnt--; 213 if (uio->uio_iovcnt < 0) 214 panic("mmrw"); 215 continue; 216 } 217 switch (minor(dev)) { 218 case 0: 219 /* 220 * minor device 0 is physical memory, /dev/mem 221 */ 222 v = uio->uio_offset; 223 v &= ~(long)PAGE_MASK; 224 pmap_kenter((vm_offset_t)ptvmmap, v); 225 o = (int)uio->uio_offset & PAGE_MASK; 226 c = (u_int)(PAGE_SIZE - ((uintptr_t)iov->iov_base & PAGE_MASK)); 227 c = min(c, (u_int)(PAGE_SIZE - o)); 228 c = min(c, (u_int)iov->iov_len); 229 error = uiomove((caddr_t)&ptvmmap[o], (int)c, uio); 230 pmap_kremove((vm_offset_t)ptvmmap); 231 continue; 232 233 case 1: { 234 /* 235 * minor device 1 is kernel memory, /dev/kmem 236 */ 237 vm_offset_t saddr, eaddr; 238 int prot; 239 240 c = iov->iov_len; 241 242 /* 243 * Make sure that all of the pages are currently 244 * resident so that we don't create any zero-fill 245 * pages. 246 */ 247 saddr = trunc_page(uio->uio_offset); 248 eaddr = round_page(uio->uio_offset + c); 249 if (saddr > eaddr) 250 return EFAULT; 251 252 /* 253 * Make sure the kernel addresses are mapped. 254 * platform_direct_mapped() can be used to bypass 255 * default mapping via the page table (virtual kernels 256 * contain a lot of out-of-band data). 257 */ 258 prot = VM_PROT_READ; 259 if (uio->uio_rw != UIO_READ) 260 prot |= VM_PROT_WRITE; 261 error = kvm_access_check(saddr, eaddr, prot); 262 if (error) 263 return (error); 264 error = uiomove((caddr_t)(vm_offset_t)uio->uio_offset, 265 (int)c, uio); 266 continue; 267 } 268 case 2: 269 /* 270 * minor device 2 (/dev/null) is EOF/RATHOLE 271 */ 272 if (uio->uio_rw == UIO_READ) 273 return (0); 274 c = iov->iov_len; 275 break; 276 case 3: 277 /* 278 * minor device 3 (/dev/random) is source of filth 279 * on read, seeder on write 280 */ 281 if (buf == NULL) 282 buf = kmalloc(PAGE_SIZE, M_TEMP, M_WAITOK); 283 c = min(iov->iov_len, PAGE_SIZE); 284 if (uio->uio_rw == UIO_WRITE) { 285 error = uiomove(buf, (int)c, uio); 286 if (error == 0 && 287 seedenable && 288 securelevel <= 0) { 289 error = add_buffer_randomness_src(buf, c, RAND_SRC_SEEDING); 290 } else if (error == 0) { 291 error = EPERM; 292 } 293 } else { 294 poolsize = read_random(buf, c); 295 if (poolsize == 0) { 296 if (buf) 297 kfree(buf, M_TEMP); 298 if ((flags & IO_NDELAY) != 0) 299 return (EWOULDBLOCK); 300 return (0); 301 } 302 c = min(c, poolsize); 303 error = uiomove(buf, (int)c, uio); 304 } 305 continue; 306 case 4: 307 /* 308 * minor device 4 (/dev/urandom) is source of muck 309 * on read, writes are disallowed. 310 */ 311 c = min(iov->iov_len, PAGE_SIZE); 312 if (uio->uio_rw == UIO_WRITE) { 313 error = EPERM; 314 break; 315 } 316 if (CURSIG(curthread->td_lwp) != 0) { 317 /* 318 * Use tsleep() to get the error code right. 319 * It should return immediately. 320 */ 321 error = tsleep(&rand_bolt, PCATCH, "urand", 1); 322 if (error != 0 && error != EWOULDBLOCK) 323 continue; 324 } 325 if (buf == NULL) 326 buf = kmalloc(PAGE_SIZE, M_TEMP, M_WAITOK); 327 poolsize = read_random_unlimited(buf, c); 328 c = min(c, poolsize); 329 error = uiomove(buf, (int)c, uio); 330 continue; 331 /* case 5: read/write not supported, mmap only */ 332 /* case 6: read/write not supported, mmap only */ 333 case 12: 334 /* 335 * minor device 12 (/dev/zero) is source of nulls 336 * on read, write are disallowed. 337 */ 338 if (uio->uio_rw == UIO_WRITE) { 339 c = iov->iov_len; 340 break; 341 } 342 if (zbuf == NULL) { 343 zbuf = (caddr_t)kmalloc(PAGE_SIZE, M_TEMP, 344 M_WAITOK | M_ZERO); 345 } 346 c = min(iov->iov_len, PAGE_SIZE); 347 error = uiomove(zbuf, (int)c, uio); 348 continue; 349 default: 350 return (ENODEV); 351 } 352 if (error) 353 break; 354 iov->iov_base = (char *)iov->iov_base + c; 355 iov->iov_len -= c; 356 uio->uio_offset += c; 357 uio->uio_resid -= c; 358 } 359 if (buf) 360 kfree(buf, M_TEMP); 361 return (error); 362 } 363 364 static int 365 mmread(struct dev_read_args *ap) 366 { 367 return(mmrw(ap->a_head.a_dev, ap->a_uio, ap->a_ioflag)); 368 } 369 370 static int 371 mmwrite(struct dev_write_args *ap) 372 { 373 return(mmrw(ap->a_head.a_dev, ap->a_uio, ap->a_ioflag)); 374 } 375 376 /*******************************************************\ 377 * allow user processes to MMAP some memory sections * 378 * instead of going through read/write * 379 \*******************************************************/ 380 381 static int user_kernel_mapping(int num, vm_ooffset_t offset, 382 vm_ooffset_t *resultp); 383 384 #if 0 385 386 static int 387 memmmap(struct dev_mmap_args *ap) 388 { 389 cdev_t dev = ap->a_head.a_dev; 390 vm_ooffset_t result; 391 int error; 392 393 switch (minor(dev)) { 394 case 0: 395 /* 396 * minor device 0 is physical memory 397 */ 398 ap->a_result = atop(ap->a_offset); 399 error = 0; 400 break; 401 case 1: 402 /* 403 * minor device 1 is kernel memory 404 */ 405 ap->a_result = atop(vtophys(ap->a_offset)); 406 error = 0; 407 break; 408 case 5: 409 case 6: 410 /* 411 * minor device 5 is /dev/upmap (see sys/upmap.h) 412 * minor device 6 is /dev/kpmap (see sys/upmap.h) 413 */ 414 result = 0; 415 error = user_kernel_mapping(minor(dev), ap->a_offset, &result); 416 ap->a_result = atop(result); 417 break; 418 default: 419 error = EINVAL; 420 break; 421 } 422 return error; 423 } 424 425 #endif 426 427 static int 428 memuksmap(cdev_t dev, vm_page_t fake) 429 { 430 vm_ooffset_t result; 431 int error; 432 433 switch (minor(dev)) { 434 case 0: 435 /* 436 * minor device 0 is physical memory 437 */ 438 fake->phys_addr = ptoa(fake->pindex); 439 error = 0; 440 break; 441 case 1: 442 /* 443 * minor device 1 is kernel memory 444 */ 445 fake->phys_addr = vtophys(ptoa(fake->pindex)); 446 error = 0; 447 break; 448 case 5: 449 case 6: 450 /* 451 * minor device 5 is /dev/upmap (see sys/upmap.h) 452 * minor device 6 is /dev/kpmap (see sys/upmap.h) 453 */ 454 result = 0; 455 error = user_kernel_mapping(minor(dev), 456 ptoa(fake->pindex), &result); 457 fake->phys_addr = result; 458 break; 459 default: 460 error = EINVAL; 461 break; 462 } 463 return error; 464 } 465 466 static int 467 mmioctl(struct dev_ioctl_args *ap) 468 { 469 cdev_t dev = ap->a_head.a_dev; 470 int error; 471 472 lockmgr(&mem_lock, LK_EXCLUSIVE); 473 474 switch (minor(dev)) { 475 case 0: 476 error = mem_ioctl(dev, ap->a_cmd, ap->a_data, 477 ap->a_fflag, ap->a_cred); 478 break; 479 case 3: 480 case 4: 481 error = random_ioctl(dev, ap->a_cmd, ap->a_data, 482 ap->a_fflag, ap->a_cred); 483 break; 484 default: 485 error = ENODEV; 486 break; 487 } 488 489 lockmgr(&mem_lock, LK_RELEASE); 490 491 return (error); 492 } 493 494 /* 495 * Operations for changing memory attributes. 496 * 497 * This is basically just an ioctl shim for mem_range_attr_get 498 * and mem_range_attr_set. 499 */ 500 static int 501 mem_ioctl(cdev_t dev, u_long cmd, caddr_t data, int flags, struct ucred *cred) 502 { 503 int nd, error = 0; 504 struct mem_range_op *mo = (struct mem_range_op *)data; 505 struct mem_range_desc *md; 506 507 /* is this for us? */ 508 if ((cmd != MEMRANGE_GET) && 509 (cmd != MEMRANGE_SET)) 510 return (ENOTTY); 511 512 /* any chance we can handle this? */ 513 if (mem_range_softc.mr_op == NULL) 514 return (EOPNOTSUPP); 515 516 /* do we have any descriptors? */ 517 if (mem_range_softc.mr_ndesc == 0) 518 return (ENXIO); 519 520 switch (cmd) { 521 case MEMRANGE_GET: 522 nd = imin(mo->mo_arg[0], mem_range_softc.mr_ndesc); 523 if (nd > 0) { 524 md = (struct mem_range_desc *) 525 kmalloc(nd * sizeof(struct mem_range_desc), 526 M_MEMDESC, M_WAITOK); 527 error = mem_range_attr_get(md, &nd); 528 if (!error) 529 error = copyout(md, mo->mo_desc, 530 nd * sizeof(struct mem_range_desc)); 531 kfree(md, M_MEMDESC); 532 } else { 533 nd = mem_range_softc.mr_ndesc; 534 } 535 mo->mo_arg[0] = nd; 536 break; 537 538 case MEMRANGE_SET: 539 md = (struct mem_range_desc *)kmalloc(sizeof(struct mem_range_desc), 540 M_MEMDESC, M_WAITOK); 541 error = copyin(mo->mo_desc, md, sizeof(struct mem_range_desc)); 542 /* clamp description string */ 543 md->mr_owner[sizeof(md->mr_owner) - 1] = 0; 544 if (error == 0) 545 error = mem_range_attr_set(md, &mo->mo_arg[0]); 546 kfree(md, M_MEMDESC); 547 break; 548 } 549 return (error); 550 } 551 552 /* 553 * Implementation-neutral, kernel-callable functions for manipulating 554 * memory range attributes. 555 */ 556 int 557 mem_range_attr_get(struct mem_range_desc *mrd, int *arg) 558 { 559 /* can we handle this? */ 560 if (mem_range_softc.mr_op == NULL) 561 return (EOPNOTSUPP); 562 563 if (*arg == 0) { 564 *arg = mem_range_softc.mr_ndesc; 565 } else { 566 bcopy(mem_range_softc.mr_desc, mrd, (*arg) * sizeof(struct mem_range_desc)); 567 } 568 return (0); 569 } 570 571 int 572 mem_range_attr_set(struct mem_range_desc *mrd, int *arg) 573 { 574 /* can we handle this? */ 575 if (mem_range_softc.mr_op == NULL) 576 return (EOPNOTSUPP); 577 578 return (mem_range_softc.mr_op->set(&mem_range_softc, mrd, arg)); 579 } 580 581 void 582 mem_range_AP_init(void) 583 { 584 if (mem_range_softc.mr_op && mem_range_softc.mr_op->initAP) 585 mem_range_softc.mr_op->initAP(&mem_range_softc); 586 } 587 588 static int 589 random_ioctl(cdev_t dev, u_long cmd, caddr_t data, int flags, struct ucred *cred) 590 { 591 int error; 592 int intr; 593 594 /* 595 * Even inspecting the state is privileged, since it gives a hint 596 * about how easily the randomness might be guessed. 597 */ 598 error = 0; 599 600 switch (cmd) { 601 /* Really handled in upper layer */ 602 case FIOASYNC: 603 break; 604 case MEM_SETIRQ: 605 intr = *(int16_t *)data; 606 if ((error = priv_check_cred(cred, PRIV_ROOT, 0)) != 0) 607 break; 608 if (intr < 0 || intr >= MAX_INTS) 609 return (EINVAL); 610 register_randintr(intr); 611 break; 612 case MEM_CLEARIRQ: 613 intr = *(int16_t *)data; 614 if ((error = priv_check_cred(cred, PRIV_ROOT, 0)) != 0) 615 break; 616 if (intr < 0 || intr >= MAX_INTS) 617 return (EINVAL); 618 unregister_randintr(intr); 619 break; 620 case MEM_RETURNIRQ: 621 error = ENOTSUP; 622 break; 623 case MEM_FINDIRQ: 624 intr = *(int16_t *)data; 625 if ((error = priv_check_cred(cred, PRIV_ROOT, 0)) != 0) 626 break; 627 if (intr < 0 || intr >= MAX_INTS) 628 return (EINVAL); 629 intr = next_registered_randintr(intr); 630 if (intr == MAX_INTS) 631 return (ENOENT); 632 *(u_int16_t *)data = intr; 633 break; 634 default: 635 error = ENOTSUP; 636 break; 637 } 638 return (error); 639 } 640 641 static int 642 mm_filter_read(struct knote *kn, long hint) 643 { 644 return (1); 645 } 646 647 static int 648 mm_filter_write(struct knote *kn, long hint) 649 { 650 return (1); 651 } 652 653 static void 654 dummy_filter_detach(struct knote *kn) {} 655 656 /* Implemented in kern_nrandom.c */ 657 static struct filterops random_read_filtops = 658 { FILTEROP_ISFD|FILTEROP_MPSAFE, NULL, dummy_filter_detach, random_filter_read }; 659 660 static struct filterops mm_read_filtops = 661 { FILTEROP_ISFD|FILTEROP_MPSAFE, NULL, dummy_filter_detach, mm_filter_read }; 662 663 static struct filterops mm_write_filtops = 664 { FILTEROP_ISFD|FILTEROP_MPSAFE, NULL, dummy_filter_detach, mm_filter_write }; 665 666 static int 667 mmkqfilter(struct dev_kqfilter_args *ap) 668 { 669 struct knote *kn = ap->a_kn; 670 cdev_t dev = ap->a_head.a_dev; 671 672 ap->a_result = 0; 673 switch (kn->kn_filter) { 674 case EVFILT_READ: 675 switch (minor(dev)) { 676 case 3: 677 kn->kn_fop = &random_read_filtops; 678 break; 679 default: 680 kn->kn_fop = &mm_read_filtops; 681 break; 682 } 683 break; 684 case EVFILT_WRITE: 685 kn->kn_fop = &mm_write_filtops; 686 break; 687 default: 688 ap->a_result = EOPNOTSUPP; 689 return (0); 690 } 691 692 return (0); 693 } 694 695 int 696 iszerodev(cdev_t dev) 697 { 698 return (zerodev == dev); 699 } 700 701 /* 702 * /dev/upmap and /dev/kpmap. 703 */ 704 static int 705 user_kernel_mapping(int num, vm_ooffset_t offset, vm_ooffset_t *resultp) 706 { 707 struct proc *p; 708 int error; 709 int invfork; 710 711 if ((p = curproc) == NULL) 712 return (EINVAL); 713 714 /* 715 * If this is a child currently in vfork the pmap is shared with 716 * the parent! We need to actually set-up the parent's p_upmap, 717 * not the child's, and we need to set the invfork flag. Userland 718 * will probably adjust its static state so it must be consistent 719 * with the parent or userland will be really badly confused. 720 * 721 * (this situation can happen when user code in vfork() calls 722 * libc's getpid() or some other function which then decides 723 * it wants the upmap). 724 */ 725 if (p->p_flags & P_PPWAIT) { 726 p = p->p_pptr; 727 if (p == NULL) 728 return (EINVAL); 729 invfork = 1; 730 } else { 731 invfork = 0; 732 } 733 734 error = EINVAL; 735 736 switch(num) { 737 case 5: 738 /* 739 * /dev/upmap - maps RW per-process shared user-kernel area. 740 */ 741 if (p->p_upmap == NULL) 742 proc_usermap(p, invfork); 743 else if (invfork) 744 p->p_upmap->invfork = invfork; 745 746 if (p->p_upmap && 747 offset < roundup2(sizeof(*p->p_upmap), PAGE_SIZE)) { 748 /* only good for current process */ 749 *resultp = pmap_kextract((vm_offset_t)p->p_upmap + 750 offset); 751 error = 0; 752 } 753 break; 754 case 6: 755 /* 756 * /dev/kpmap - maps RO shared kernel global page 757 */ 758 if (kpmap && 759 offset < roundup2(sizeof(*kpmap), PAGE_SIZE)) { 760 *resultp = pmap_kextract((vm_offset_t)kpmap + 761 offset); 762 error = 0; 763 } 764 break; 765 default: 766 break; 767 } 768 return error; 769 } 770 771 static void 772 mem_drvinit(void *unused) 773 { 774 775 /* Initialise memory range handling */ 776 if (mem_range_softc.mr_op != NULL) 777 mem_range_softc.mr_op->init(&mem_range_softc); 778 779 make_dev(&mem_ops_mem, 0, UID_ROOT, GID_KMEM, 0640, "mem"); 780 make_dev(&mem_ops_mem, 1, UID_ROOT, GID_KMEM, 0640, "kmem"); 781 make_dev(&mem_ops, 2, UID_ROOT, GID_WHEEL, 0666, "null"); 782 make_dev(&mem_ops, 3, UID_ROOT, GID_WHEEL, 0644, "random"); 783 make_dev(&mem_ops, 4, UID_ROOT, GID_WHEEL, 0644, "urandom"); 784 make_dev(&mem_ops, 5, UID_ROOT, GID_WHEEL, 0666, "upmap"); 785 make_dev(&mem_ops, 6, UID_ROOT, GID_WHEEL, 0444, "kpmap"); 786 zerodev = make_dev(&mem_ops, 12, UID_ROOT, GID_WHEEL, 0666, "zero"); 787 make_dev(&mem_ops_noq, 14, UID_ROOT, GID_WHEEL, 0600, "io"); 788 } 789 790 SYSINIT(memdev, SI_SUB_DRIVERS, SI_ORDER_MIDDLE + CDEV_MAJOR, mem_drvinit, 791 NULL); 792 793