1 /*- 2 * Copyright (c) 1988 University of Utah. 3 * Copyright (c) 1982, 1986, 1990 The Regents of the University of California. 4 * All rights reserved. 5 * 6 * This code is derived from software contributed to Berkeley by 7 * the Systems Programming Group of the University of Utah Computer 8 * Science Department, and code derived from software contributed to 9 * Berkeley by William Jolitz. 10 * 11 * Redistribution and use in source and binary forms, with or without 12 * modification, are permitted provided that the following conditions 13 * are met: 14 * 1. Redistributions of source code must retain the above copyright 15 * notice, this list of conditions and the following disclaimer. 16 * 2. Redistributions in binary form must reproduce the above copyright 17 * notice, this list of conditions and the following disclaimer in the 18 * documentation and/or other materials provided with the distribution. 19 * 3. Neither the name of the University nor the names of its contributors 20 * may be used to endorse or promote products derived from this software 21 * without specific prior written permission. 22 * 23 * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND 24 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE 25 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE 26 * ARE DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE 27 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL 28 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS 29 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) 30 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT 31 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY 32 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF 33 * SUCH DAMAGE. 34 * 35 * from: Utah $Hdr: mem.c 1.13 89/10/08$ 36 * from: @(#)mem.c 7.2 (Berkeley) 5/9/91 37 * $FreeBSD: src/sys/i386/i386/mem.c,v 1.79.2.9 2003/01/04 22:58:01 njl Exp $ 38 */ 39 40 /* 41 * Memory special file 42 */ 43 44 #include <sys/param.h> 45 #include <sys/systm.h> 46 #include <sys/buf.h> 47 #include <sys/conf.h> 48 #include <sys/fcntl.h> 49 #include <sys/filio.h> 50 #include <sys/kernel.h> 51 #include <sys/malloc.h> 52 #include <sys/memrange.h> 53 #include <sys/proc.h> 54 #include <sys/priv.h> 55 #include <sys/random.h> 56 #include <sys/signalvar.h> 57 #include <sys/uio.h> 58 #include <sys/vnode.h> 59 #include <sys/sysctl.h> 60 61 #include <sys/signal2.h> 62 63 #include <vm/vm.h> 64 #include <vm/pmap.h> 65 #include <vm/vm_extern.h> 66 67 68 static d_open_t mmopen; 69 static d_close_t mmclose; 70 static d_read_t mmread; 71 static d_write_t mmwrite; 72 static d_ioctl_t mmioctl; 73 #if 0 74 static d_mmap_t memmmap; 75 #endif 76 static d_kqfilter_t mmkqfilter; 77 static int memuksmap(cdev_t dev, vm_page_t fake); 78 79 #define CDEV_MAJOR 2 80 static struct dev_ops mem_ops = { 81 { "mem", 0, D_MPSAFE }, 82 .d_open = mmopen, 83 .d_close = mmclose, 84 .d_read = mmread, 85 .d_write = mmwrite, 86 .d_ioctl = mmioctl, 87 .d_kqfilter = mmkqfilter, 88 #if 0 89 .d_mmap = memmmap, 90 #endif 91 .d_uksmap = memuksmap 92 }; 93 94 static int rand_bolt; 95 static caddr_t zbuf; 96 static cdev_t zerodev = NULL; 97 static struct lock mem_lock = LOCK_INITIALIZER("memlk", 0, 0); 98 99 MALLOC_DEFINE(M_MEMDESC, "memdesc", "memory range descriptors"); 100 static int mem_ioctl (cdev_t, u_long, caddr_t, int, struct ucred *); 101 static int random_ioctl (cdev_t, u_long, caddr_t, int, struct ucred *); 102 103 struct mem_range_softc mem_range_softc; 104 105 static int seedenable; 106 SYSCTL_INT(_kern, OID_AUTO, seedenable, CTLFLAG_RW, &seedenable, 0, ""); 107 108 static int 109 mmopen(struct dev_open_args *ap) 110 { 111 cdev_t dev = ap->a_head.a_dev; 112 int error; 113 114 switch (minor(dev)) { 115 case 0: 116 case 1: 117 /* 118 * /dev/mem and /dev/kmem 119 */ 120 if (ap->a_oflags & FWRITE) { 121 if (securelevel > 0 || kernel_mem_readonly) 122 return (EPERM); 123 } 124 error = 0; 125 break; 126 case 6: 127 /* 128 * /dev/kpmap can only be opened for reading. 129 */ 130 if (ap->a_oflags & FWRITE) 131 return (EPERM); 132 error = 0; 133 break; 134 case 14: 135 error = priv_check_cred(ap->a_cred, PRIV_ROOT, 0); 136 if (error != 0) 137 break; 138 if (securelevel > 0 || kernel_mem_readonly) { 139 error = EPERM; 140 break; 141 } 142 error = cpu_set_iopl(); 143 break; 144 default: 145 error = 0; 146 break; 147 } 148 return (error); 149 } 150 151 static int 152 mmclose(struct dev_close_args *ap) 153 { 154 cdev_t dev = ap->a_head.a_dev; 155 int error; 156 157 switch (minor(dev)) { 158 case 14: 159 error = cpu_clr_iopl(); 160 break; 161 default: 162 error = 0; 163 break; 164 } 165 return (error); 166 } 167 168 169 static int 170 mmrw(cdev_t dev, struct uio *uio, int flags) 171 { 172 int o; 173 u_int c; 174 u_int poolsize; 175 u_long v; 176 struct iovec *iov; 177 int error = 0; 178 caddr_t buf = NULL; 179 180 while (uio->uio_resid > 0 && error == 0) { 181 iov = uio->uio_iov; 182 if (iov->iov_len == 0) { 183 uio->uio_iov++; 184 uio->uio_iovcnt--; 185 if (uio->uio_iovcnt < 0) 186 panic("mmrw"); 187 continue; 188 } 189 switch (minor(dev)) { 190 case 0: 191 /* 192 * minor device 0 is physical memory, /dev/mem 193 */ 194 v = uio->uio_offset; 195 v &= ~(long)PAGE_MASK; 196 pmap_kenter((vm_offset_t)ptvmmap, v); 197 o = (int)uio->uio_offset & PAGE_MASK; 198 c = (u_int)(PAGE_SIZE - ((uintptr_t)iov->iov_base & PAGE_MASK)); 199 c = min(c, (u_int)(PAGE_SIZE - o)); 200 c = min(c, (u_int)iov->iov_len); 201 error = uiomove((caddr_t)&ptvmmap[o], (int)c, uio); 202 pmap_kremove((vm_offset_t)ptvmmap); 203 continue; 204 205 case 1: { 206 /* 207 * minor device 1 is kernel memory, /dev/kmem 208 */ 209 vm_offset_t saddr, eaddr; 210 int prot; 211 212 c = iov->iov_len; 213 214 /* 215 * Make sure that all of the pages are currently 216 * resident so that we don't create any zero-fill 217 * pages. 218 */ 219 saddr = trunc_page(uio->uio_offset); 220 eaddr = round_page(uio->uio_offset + c); 221 if (saddr > eaddr) 222 return EFAULT; 223 224 /* 225 * Make sure the kernel addresses are mapped. 226 * platform_direct_mapped() can be used to bypass 227 * default mapping via the page table (virtual kernels 228 * contain a lot of out-of-band data). 229 */ 230 prot = VM_PROT_READ; 231 if (uio->uio_rw != UIO_READ) 232 prot |= VM_PROT_WRITE; 233 error = kvm_access_check(saddr, eaddr, prot); 234 if (error) 235 return (error); 236 error = uiomove((caddr_t)(vm_offset_t)uio->uio_offset, 237 (int)c, uio); 238 continue; 239 } 240 case 2: 241 /* 242 * minor device 2 (/dev/null) is EOF/RATHOLE 243 */ 244 if (uio->uio_rw == UIO_READ) 245 return (0); 246 c = iov->iov_len; 247 break; 248 case 3: 249 /* 250 * minor device 3 (/dev/random) is source of filth 251 * on read, seeder on write 252 */ 253 if (buf == NULL) 254 buf = kmalloc(PAGE_SIZE, M_TEMP, M_WAITOK); 255 c = min(iov->iov_len, PAGE_SIZE); 256 if (uio->uio_rw == UIO_WRITE) { 257 error = uiomove(buf, (int)c, uio); 258 if (error == 0 && 259 seedenable && 260 securelevel <= 0) { 261 error = add_buffer_randomness_src(buf, c, RAND_SRC_SEEDING); 262 } else if (error == 0) { 263 error = EPERM; 264 } 265 } else { 266 poolsize = read_random(buf, c); 267 if (poolsize == 0) { 268 if (buf) 269 kfree(buf, M_TEMP); 270 if ((flags & IO_NDELAY) != 0) 271 return (EWOULDBLOCK); 272 return (0); 273 } 274 c = min(c, poolsize); 275 error = uiomove(buf, (int)c, uio); 276 } 277 continue; 278 case 4: 279 /* 280 * minor device 4 (/dev/urandom) is source of muck 281 * on read, writes are disallowed. 282 */ 283 c = min(iov->iov_len, PAGE_SIZE); 284 if (uio->uio_rw == UIO_WRITE) { 285 error = EPERM; 286 break; 287 } 288 if (CURSIG(curthread->td_lwp) != 0) { 289 /* 290 * Use tsleep() to get the error code right. 291 * It should return immediately. 292 */ 293 error = tsleep(&rand_bolt, PCATCH, "urand", 1); 294 if (error != 0 && error != EWOULDBLOCK) 295 continue; 296 } 297 if (buf == NULL) 298 buf = kmalloc(PAGE_SIZE, M_TEMP, M_WAITOK); 299 poolsize = read_random_unlimited(buf, c); 300 c = min(c, poolsize); 301 error = uiomove(buf, (int)c, uio); 302 continue; 303 /* case 5: read/write not supported, mmap only */ 304 /* case 6: read/write not supported, mmap only */ 305 case 12: 306 /* 307 * minor device 12 (/dev/zero) is source of nulls 308 * on read, write are disallowed. 309 */ 310 if (uio->uio_rw == UIO_WRITE) { 311 c = iov->iov_len; 312 break; 313 } 314 if (zbuf == NULL) { 315 zbuf = (caddr_t)kmalloc(PAGE_SIZE, M_TEMP, 316 M_WAITOK | M_ZERO); 317 } 318 c = min(iov->iov_len, PAGE_SIZE); 319 error = uiomove(zbuf, (int)c, uio); 320 continue; 321 default: 322 return (ENODEV); 323 } 324 if (error) 325 break; 326 iov->iov_base = (char *)iov->iov_base + c; 327 iov->iov_len -= c; 328 uio->uio_offset += c; 329 uio->uio_resid -= c; 330 } 331 if (buf) 332 kfree(buf, M_TEMP); 333 return (error); 334 } 335 336 static int 337 mmread(struct dev_read_args *ap) 338 { 339 return(mmrw(ap->a_head.a_dev, ap->a_uio, ap->a_ioflag)); 340 } 341 342 static int 343 mmwrite(struct dev_write_args *ap) 344 { 345 return(mmrw(ap->a_head.a_dev, ap->a_uio, ap->a_ioflag)); 346 } 347 348 /*******************************************************\ 349 * allow user processes to MMAP some memory sections * 350 * instead of going through read/write * 351 \*******************************************************/ 352 353 static int user_kernel_mapping(int num, vm_ooffset_t offset, 354 vm_ooffset_t *resultp); 355 356 #if 0 357 358 static int 359 memmmap(struct dev_mmap_args *ap) 360 { 361 cdev_t dev = ap->a_head.a_dev; 362 vm_ooffset_t result; 363 int error; 364 365 switch (minor(dev)) { 366 case 0: 367 /* 368 * minor device 0 is physical memory 369 */ 370 ap->a_result = atop(ap->a_offset); 371 error = 0; 372 break; 373 case 1: 374 /* 375 * minor device 1 is kernel memory 376 */ 377 ap->a_result = atop(vtophys(ap->a_offset)); 378 error = 0; 379 break; 380 case 5: 381 case 6: 382 /* 383 * minor device 5 is /dev/upmap (see sys/upmap.h) 384 * minor device 6 is /dev/kpmap (see sys/upmap.h) 385 */ 386 result = 0; 387 error = user_kernel_mapping(minor(dev), ap->a_offset, &result); 388 ap->a_result = atop(result); 389 break; 390 default: 391 error = EINVAL; 392 break; 393 } 394 return error; 395 } 396 397 #endif 398 399 static int 400 memuksmap(cdev_t dev, vm_page_t fake) 401 { 402 vm_ooffset_t result; 403 int error; 404 405 switch (minor(dev)) { 406 case 0: 407 /* 408 * minor device 0 is physical memory 409 */ 410 fake->phys_addr = ptoa(fake->pindex); 411 error = 0; 412 break; 413 case 1: 414 /* 415 * minor device 1 is kernel memory 416 */ 417 fake->phys_addr = vtophys(ptoa(fake->pindex)); 418 error = 0; 419 break; 420 case 5: 421 case 6: 422 /* 423 * minor device 5 is /dev/upmap (see sys/upmap.h) 424 * minor device 6 is /dev/kpmap (see sys/upmap.h) 425 */ 426 result = 0; 427 error = user_kernel_mapping(minor(dev), 428 ptoa(fake->pindex), &result); 429 fake->phys_addr = result; 430 break; 431 default: 432 error = EINVAL; 433 break; 434 } 435 return error; 436 } 437 438 static int 439 mmioctl(struct dev_ioctl_args *ap) 440 { 441 cdev_t dev = ap->a_head.a_dev; 442 int error; 443 444 lockmgr(&mem_lock, LK_EXCLUSIVE); 445 446 switch (minor(dev)) { 447 case 0: 448 error = mem_ioctl(dev, ap->a_cmd, ap->a_data, 449 ap->a_fflag, ap->a_cred); 450 break; 451 case 3: 452 case 4: 453 error = random_ioctl(dev, ap->a_cmd, ap->a_data, 454 ap->a_fflag, ap->a_cred); 455 break; 456 default: 457 error = ENODEV; 458 break; 459 } 460 461 lockmgr(&mem_lock, LK_RELEASE); 462 463 return (error); 464 } 465 466 /* 467 * Operations for changing memory attributes. 468 * 469 * This is basically just an ioctl shim for mem_range_attr_get 470 * and mem_range_attr_set. 471 */ 472 static int 473 mem_ioctl(cdev_t dev, u_long cmd, caddr_t data, int flags, struct ucred *cred) 474 { 475 int nd, error = 0; 476 struct mem_range_op *mo = (struct mem_range_op *)data; 477 struct mem_range_desc *md; 478 479 /* is this for us? */ 480 if ((cmd != MEMRANGE_GET) && 481 (cmd != MEMRANGE_SET)) 482 return (ENOTTY); 483 484 /* any chance we can handle this? */ 485 if (mem_range_softc.mr_op == NULL) 486 return (EOPNOTSUPP); 487 488 /* do we have any descriptors? */ 489 if (mem_range_softc.mr_ndesc == 0) 490 return (ENXIO); 491 492 switch (cmd) { 493 case MEMRANGE_GET: 494 nd = imin(mo->mo_arg[0], mem_range_softc.mr_ndesc); 495 if (nd > 0) { 496 md = (struct mem_range_desc *) 497 kmalloc(nd * sizeof(struct mem_range_desc), 498 M_MEMDESC, M_WAITOK); 499 error = mem_range_attr_get(md, &nd); 500 if (!error) 501 error = copyout(md, mo->mo_desc, 502 nd * sizeof(struct mem_range_desc)); 503 kfree(md, M_MEMDESC); 504 } else { 505 nd = mem_range_softc.mr_ndesc; 506 } 507 mo->mo_arg[0] = nd; 508 break; 509 510 case MEMRANGE_SET: 511 md = (struct mem_range_desc *)kmalloc(sizeof(struct mem_range_desc), 512 M_MEMDESC, M_WAITOK); 513 error = copyin(mo->mo_desc, md, sizeof(struct mem_range_desc)); 514 /* clamp description string */ 515 md->mr_owner[sizeof(md->mr_owner) - 1] = 0; 516 if (error == 0) 517 error = mem_range_attr_set(md, &mo->mo_arg[0]); 518 kfree(md, M_MEMDESC); 519 break; 520 } 521 return (error); 522 } 523 524 /* 525 * Implementation-neutral, kernel-callable functions for manipulating 526 * memory range attributes. 527 */ 528 int 529 mem_range_attr_get(struct mem_range_desc *mrd, int *arg) 530 { 531 /* can we handle this? */ 532 if (mem_range_softc.mr_op == NULL) 533 return (EOPNOTSUPP); 534 535 if (*arg == 0) { 536 *arg = mem_range_softc.mr_ndesc; 537 } else { 538 bcopy(mem_range_softc.mr_desc, mrd, (*arg) * sizeof(struct mem_range_desc)); 539 } 540 return (0); 541 } 542 543 int 544 mem_range_attr_set(struct mem_range_desc *mrd, int *arg) 545 { 546 /* can we handle this? */ 547 if (mem_range_softc.mr_op == NULL) 548 return (EOPNOTSUPP); 549 550 return (mem_range_softc.mr_op->set(&mem_range_softc, mrd, arg)); 551 } 552 553 void 554 mem_range_AP_init(void) 555 { 556 if (mem_range_softc.mr_op && mem_range_softc.mr_op->initAP) 557 mem_range_softc.mr_op->initAP(&mem_range_softc); 558 } 559 560 static int 561 random_ioctl(cdev_t dev, u_long cmd, caddr_t data, int flags, struct ucred *cred) 562 { 563 int error; 564 int intr; 565 566 /* 567 * Even inspecting the state is privileged, since it gives a hint 568 * about how easily the randomness might be guessed. 569 */ 570 error = 0; 571 572 switch (cmd) { 573 /* Really handled in upper layer */ 574 case FIOASYNC: 575 break; 576 case MEM_SETIRQ: 577 intr = *(int16_t *)data; 578 if ((error = priv_check_cred(cred, PRIV_ROOT, 0)) != 0) 579 break; 580 if (intr < 0 || intr >= MAX_INTS) 581 return (EINVAL); 582 register_randintr(intr); 583 break; 584 case MEM_CLEARIRQ: 585 intr = *(int16_t *)data; 586 if ((error = priv_check_cred(cred, PRIV_ROOT, 0)) != 0) 587 break; 588 if (intr < 0 || intr >= MAX_INTS) 589 return (EINVAL); 590 unregister_randintr(intr); 591 break; 592 case MEM_RETURNIRQ: 593 error = ENOTSUP; 594 break; 595 case MEM_FINDIRQ: 596 intr = *(int16_t *)data; 597 if ((error = priv_check_cred(cred, PRIV_ROOT, 0)) != 0) 598 break; 599 if (intr < 0 || intr >= MAX_INTS) 600 return (EINVAL); 601 intr = next_registered_randintr(intr); 602 if (intr == MAX_INTS) 603 return (ENOENT); 604 *(u_int16_t *)data = intr; 605 break; 606 default: 607 error = ENOTSUP; 608 break; 609 } 610 return (error); 611 } 612 613 static int 614 mm_filter_read(struct knote *kn, long hint) 615 { 616 return (1); 617 } 618 619 static int 620 mm_filter_write(struct knote *kn, long hint) 621 { 622 return (1); 623 } 624 625 static void 626 dummy_filter_detach(struct knote *kn) {} 627 628 /* Implemented in kern_nrandom.c */ 629 static struct filterops random_read_filtops = 630 { FILTEROP_ISFD|FILTEROP_MPSAFE, NULL, dummy_filter_detach, random_filter_read }; 631 632 static struct filterops mm_read_filtops = 633 { FILTEROP_ISFD|FILTEROP_MPSAFE, NULL, dummy_filter_detach, mm_filter_read }; 634 635 static struct filterops mm_write_filtops = 636 { FILTEROP_ISFD|FILTEROP_MPSAFE, NULL, dummy_filter_detach, mm_filter_write }; 637 638 static int 639 mmkqfilter(struct dev_kqfilter_args *ap) 640 { 641 struct knote *kn = ap->a_kn; 642 cdev_t dev = ap->a_head.a_dev; 643 644 ap->a_result = 0; 645 switch (kn->kn_filter) { 646 case EVFILT_READ: 647 switch (minor(dev)) { 648 case 3: 649 kn->kn_fop = &random_read_filtops; 650 break; 651 default: 652 kn->kn_fop = &mm_read_filtops; 653 break; 654 } 655 break; 656 case EVFILT_WRITE: 657 kn->kn_fop = &mm_write_filtops; 658 break; 659 default: 660 ap->a_result = EOPNOTSUPP; 661 return (0); 662 } 663 664 return (0); 665 } 666 667 int 668 iszerodev(cdev_t dev) 669 { 670 return (zerodev == dev); 671 } 672 673 /* 674 * /dev/upmap and /dev/kpmap. 675 */ 676 static int 677 user_kernel_mapping(int num, vm_ooffset_t offset, vm_ooffset_t *resultp) 678 { 679 struct proc *p; 680 int error; 681 int invfork; 682 683 if ((p = curproc) == NULL) 684 return (EINVAL); 685 686 /* 687 * If this is a child currently in vfork the pmap is shared with 688 * the parent! We need to actually set-up the parent's p_upmap, 689 * not the child's, and we need to set the invfork flag. Userland 690 * will probably adjust its static state so it must be consistent 691 * with the parent or userland will be really badly confused. 692 * 693 * (this situation can happen when user code in vfork() calls 694 * libc's getpid() or some other function which then decides 695 * it wants the upmap). 696 */ 697 if (p->p_flags & P_PPWAIT) { 698 p = p->p_pptr; 699 if (p == NULL) 700 return (EINVAL); 701 invfork = 1; 702 } else { 703 invfork = 0; 704 } 705 706 error = EINVAL; 707 708 switch(num) { 709 case 5: 710 /* 711 * /dev/upmap - maps RW per-process shared user-kernel area. 712 */ 713 if (p->p_upmap == NULL) 714 proc_usermap(p, invfork); 715 else if (invfork) 716 p->p_upmap->invfork = invfork; 717 718 if (p->p_upmap && 719 offset < roundup2(sizeof(*p->p_upmap), PAGE_SIZE)) { 720 /* only good for current process */ 721 *resultp = pmap_kextract((vm_offset_t)p->p_upmap + 722 offset); 723 error = 0; 724 } 725 break; 726 case 6: 727 /* 728 * /dev/kpmap - maps RO shared kernel global page 729 */ 730 if (kpmap && 731 offset < roundup2(sizeof(*kpmap), PAGE_SIZE)) { 732 *resultp = pmap_kextract((vm_offset_t)kpmap + 733 offset); 734 error = 0; 735 } 736 break; 737 default: 738 break; 739 } 740 return error; 741 } 742 743 static void 744 mem_drvinit(void *unused) 745 { 746 747 /* Initialise memory range handling */ 748 if (mem_range_softc.mr_op != NULL) 749 mem_range_softc.mr_op->init(&mem_range_softc); 750 751 make_dev(&mem_ops, 0, UID_ROOT, GID_KMEM, 0640, "mem"); 752 make_dev(&mem_ops, 1, UID_ROOT, GID_KMEM, 0640, "kmem"); 753 make_dev(&mem_ops, 2, UID_ROOT, GID_WHEEL, 0666, "null"); 754 make_dev(&mem_ops, 3, UID_ROOT, GID_WHEEL, 0644, "random"); 755 make_dev(&mem_ops, 4, UID_ROOT, GID_WHEEL, 0644, "urandom"); 756 make_dev(&mem_ops, 5, UID_ROOT, GID_WHEEL, 0666, "upmap"); 757 make_dev(&mem_ops, 6, UID_ROOT, GID_WHEEL, 0444, "kpmap"); 758 zerodev = make_dev(&mem_ops, 12, UID_ROOT, GID_WHEEL, 0666, "zero"); 759 make_dev(&mem_ops, 14, UID_ROOT, GID_WHEEL, 0600, "io"); 760 } 761 762 SYSINIT(memdev, SI_SUB_DRIVERS, SI_ORDER_MIDDLE + CDEV_MAJOR, mem_drvinit, 763 NULL); 764 765