1 /* This file contains a collection of miscellaneous procedures. Some of them 2 * perform simple system calls. Some others do a little part of system calls 3 * that are mostly performed by the Memory Manager. 4 * 5 * The entry points into this file are 6 * do_fcntl: perform the FCNTL system call 7 * do_sync: perform the SYNC system call 8 * do_fsync: perform the FSYNC system call 9 * pm_setsid: perform VFS's side of setsid system call 10 * pm_reboot: sync disks and prepare for shutdown 11 * pm_fork: adjust the tables after PM has performed a FORK system call 12 * do_exec: handle files with FD_CLOEXEC on after PM has done an EXEC 13 * do_exit: a process has exited; note that in the tables 14 * do_set: set uid or gid for some process 15 * do_revive: revive a process that was waiting for something (e.g. TTY) 16 * do_svrctl: file system control 17 * do_getsysinfo: request copy of FS data structure 18 * pm_dumpcore: create a core dump 19 */ 20 21 #include "fs.h" 22 #include <fcntl.h> 23 #include <assert.h> 24 #include <unistd.h> 25 #include <string.h> 26 #include <minix/callnr.h> 27 #include <minix/safecopies.h> 28 #include <minix/endpoint.h> 29 #include <minix/com.h> 30 #include <minix/sysinfo.h> 31 #include <minix/u64.h> 32 #include <sys/ptrace.h> 33 #include <sys/svrctl.h> 34 #include <sys/resource.h> 35 #include "file.h" 36 #include <minix/vfsif.h> 37 #include "vnode.h" 38 #include "vmnt.h" 39 40 #define CORE_NAME "core" 41 #define CORE_MODE 0777 /* mode to use on core image files */ 42 43 #if ENABLE_SYSCALL_STATS 44 unsigned long calls_stats[NR_VFS_CALLS]; 45 #endif 46 47 static void free_proc(int flags); 48 49 /*===========================================================================* 50 * do_getsysinfo * 51 *===========================================================================*/ 52 int do_getsysinfo(void) 53 { 54 struct fproc *rfp; 55 struct fproc_light *rfpl; 56 struct smap *sp; 57 vir_bytes src_addr, dst_addr; 58 size_t len, buf_size; 59 int what; 60 61 what = job_m_in.m_lsys_getsysinfo.what; 62 dst_addr = job_m_in.m_lsys_getsysinfo.where; 63 buf_size = job_m_in.m_lsys_getsysinfo.size; 64 65 /* Only su may call do_getsysinfo. This call may leak information (and is not 66 * stable enough to be part of the API/ABI). In the future, requests from 67 * non-system processes should be denied. 68 */ 69 70 if (!super_user) return(EPERM); 71 72 switch(what) { 73 case SI_PROC_TAB: 74 src_addr = (vir_bytes) fproc; 75 len = sizeof(struct fproc) * NR_PROCS; 76 break; 77 case SI_DMAP_TAB: 78 src_addr = (vir_bytes) dmap; 79 len = sizeof(struct dmap) * NR_DEVICES; 80 break; 81 case SI_PROCLIGHT_TAB: 82 /* Fill the light process table for the MIB service upon request. */ 83 rfpl = &fproc_light[0]; 84 for (rfp = &fproc[0]; rfp < &fproc[NR_PROCS]; rfp++, rfpl++) { 85 rfpl->fpl_tty = rfp->fp_tty; 86 rfpl->fpl_blocked_on = rfp->fp_blocked_on; 87 if (rfp->fp_blocked_on == FP_BLOCKED_ON_CDEV) 88 rfpl->fpl_task = rfp->fp_cdev.endpt; 89 else if (rfp->fp_blocked_on == FP_BLOCKED_ON_SDEV && 90 (sp = get_smap_by_dev(rfp->fp_sdev.dev, NULL)) != NULL) 91 rfpl->fpl_task = sp->smap_endpt; 92 else 93 rfpl->fpl_task = NONE; 94 } 95 src_addr = (vir_bytes) fproc_light; 96 len = sizeof(fproc_light); 97 break; 98 #if ENABLE_SYSCALL_STATS 99 case SI_CALL_STATS: 100 src_addr = (vir_bytes) calls_stats; 101 len = sizeof(calls_stats); 102 break; 103 #endif 104 default: 105 return(EINVAL); 106 } 107 108 if (len != buf_size) 109 return(EINVAL); 110 111 return sys_datacopy_wrapper(SELF, src_addr, who_e, dst_addr, len); 112 } 113 114 /*===========================================================================* 115 * do_fcntl * 116 *===========================================================================*/ 117 int do_fcntl(void) 118 { 119 /* Perform the fcntl(fd, cmd, ...) system call. */ 120 struct filp *f; 121 int fd, new_fd, fl, r = OK, fcntl_req, fcntl_argx; 122 vir_bytes addr; 123 tll_access_t locktype; 124 125 fd = job_m_in.m_lc_vfs_fcntl.fd; 126 fcntl_req = job_m_in.m_lc_vfs_fcntl.cmd; 127 fcntl_argx = job_m_in.m_lc_vfs_fcntl.arg_int; 128 addr = job_m_in.m_lc_vfs_fcntl.arg_ptr; 129 130 /* Is the file descriptor valid? */ 131 locktype = (fcntl_req == F_FREESP) ? VNODE_WRITE : VNODE_READ; 132 if ((f = get_filp(fd, locktype)) == NULL) 133 return(err_code); 134 135 switch (fcntl_req) { 136 case F_DUPFD: 137 case F_DUPFD_CLOEXEC: 138 /* This replaces the old dup() system call. */ 139 if (fcntl_argx < 0 || fcntl_argx >= OPEN_MAX) r = EINVAL; 140 else if ((r = get_fd(fp, fcntl_argx, 0, &new_fd, NULL)) == OK) { 141 f->filp_count++; 142 fp->fp_filp[new_fd] = f; 143 assert(!FD_ISSET(new_fd, &fp->fp_cloexec_set)); 144 if (fcntl_req == F_DUPFD_CLOEXEC) 145 FD_SET(new_fd, &fp->fp_cloexec_set); 146 r = new_fd; 147 } 148 break; 149 150 case F_GETFD: 151 /* Get close-on-exec flag (FD_CLOEXEC in POSIX Table 6-2). */ 152 r = 0; 153 if (FD_ISSET(fd, &fp->fp_cloexec_set)) 154 r = FD_CLOEXEC; 155 break; 156 157 case F_SETFD: 158 /* Set close-on-exec flag (FD_CLOEXEC in POSIX Table 6-2). */ 159 if (fcntl_argx & FD_CLOEXEC) 160 FD_SET(fd, &fp->fp_cloexec_set); 161 else 162 FD_CLR(fd, &fp->fp_cloexec_set); 163 break; 164 165 case F_GETFL: 166 /* Get file status flags (O_NONBLOCK and O_APPEND). */ 167 fl = f->filp_flags & (O_NONBLOCK | O_APPEND | O_ACCMODE); 168 r = fl; 169 break; 170 171 case F_SETFL: 172 /* Set file status flags (O_NONBLOCK and O_APPEND). */ 173 fl = O_NONBLOCK | O_APPEND; 174 f->filp_flags = (f->filp_flags & ~fl) | (fcntl_argx & fl); 175 break; 176 177 case F_GETLK: 178 case F_SETLK: 179 case F_SETLKW: 180 /* Set or clear a file lock. */ 181 r = lock_op(fd, fcntl_req, addr); 182 break; 183 184 case F_FREESP: 185 { 186 /* Free a section of a file */ 187 off_t start, end, offset; 188 struct flock flock_arg; 189 190 /* Check if it's a regular file. */ 191 if (!S_ISREG(f->filp_vno->v_mode)) r = EINVAL; 192 else if (!(f->filp_mode & W_BIT)) r = EBADF; 193 else { 194 /* Copy flock data from userspace. */ 195 r = sys_datacopy_wrapper(who_e, addr, SELF, 196 (vir_bytes)&flock_arg, sizeof(flock_arg)); 197 } 198 199 if (r != OK) break; 200 201 /* Convert starting offset to signed. */ 202 offset = (off_t) flock_arg.l_start; 203 204 /* Figure out starting position base. */ 205 switch(flock_arg.l_whence) { 206 case SEEK_SET: start = 0; break; 207 case SEEK_CUR: start = f->filp_pos; break; 208 case SEEK_END: start = f->filp_vno->v_size; break; 209 default: r = EINVAL; 210 } 211 if (r != OK) break; 212 213 /* Check for overflow or underflow. */ 214 if (offset > 0 && start + offset < start) r = EINVAL; 215 else if (offset < 0 && start + offset > start) r = EINVAL; 216 else { 217 start += offset; 218 if (start < 0) r = EINVAL; 219 } 220 if (r != OK) break; 221 222 if (flock_arg.l_len != 0) { 223 if (start >= f->filp_vno->v_size) r = EINVAL; 224 else if ((end = start + flock_arg.l_len) <= start) r = EINVAL; 225 else if (end > f->filp_vno->v_size) end = f->filp_vno->v_size; 226 } else { 227 end = 0; 228 } 229 if (r != OK) break; 230 231 r = req_ftrunc(f->filp_vno->v_fs_e, f->filp_vno->v_inode_nr,start,end); 232 233 if (r == OK && flock_arg.l_len == 0) 234 f->filp_vno->v_size = start; 235 236 break; 237 } 238 case F_GETNOSIGPIPE: 239 r = !!(f->filp_flags & O_NOSIGPIPE); 240 break; 241 case F_SETNOSIGPIPE: 242 if (fcntl_argx) 243 f->filp_flags |= O_NOSIGPIPE; 244 else 245 f->filp_flags &= ~O_NOSIGPIPE; 246 break; 247 case F_FLUSH_FS_CACHE: 248 { 249 struct vnode *vn = f->filp_vno; 250 mode_t mode = f->filp_vno->v_mode; 251 if (!super_user) { 252 r = EPERM; 253 } else if (S_ISBLK(mode)) { 254 /* Block device; flush corresponding device blocks. */ 255 r = req_flush(vn->v_bfs_e, vn->v_sdev); 256 } else if (S_ISREG(mode) || S_ISDIR(mode)) { 257 /* Directory or regular file; flush hosting FS blocks. */ 258 r = req_flush(vn->v_fs_e, vn->v_dev); 259 } else { 260 /* Remaining cases.. Meaning unclear. */ 261 r = ENODEV; 262 } 263 break; 264 } 265 default: 266 r = EINVAL; 267 } 268 269 unlock_filp(f); 270 return(r); 271 } 272 273 /*===========================================================================* 274 * do_sync * 275 *===========================================================================*/ 276 int do_sync(void) 277 { 278 struct vmnt *vmp; 279 int r = OK; 280 281 for (vmp = &vmnt[0]; vmp < &vmnt[NR_MNTS]; ++vmp) { 282 if ((r = lock_vmnt(vmp, VMNT_READ)) != OK) 283 break; 284 if (vmp->m_dev != NO_DEV && vmp->m_fs_e != NONE && 285 vmp->m_root_node != NULL) { 286 req_sync(vmp->m_fs_e); 287 } 288 unlock_vmnt(vmp); 289 } 290 291 return(r); 292 } 293 294 /*===========================================================================* 295 * do_fsync * 296 *===========================================================================*/ 297 int do_fsync(void) 298 { 299 /* Perform the fsync() system call. */ 300 struct filp *rfilp; 301 struct vmnt *vmp; 302 dev_t dev; 303 int fd, r = OK; 304 305 fd = job_m_in.m_lc_vfs_fsync.fd; 306 307 if ((rfilp = get_filp(fd, VNODE_READ)) == NULL) 308 return(err_code); 309 310 dev = rfilp->filp_vno->v_dev; 311 unlock_filp(rfilp); 312 313 for (vmp = &vmnt[0]; vmp < &vmnt[NR_MNTS]; ++vmp) { 314 if (vmp->m_dev != dev) continue; 315 if ((r = lock_vmnt(vmp, VMNT_READ)) != OK) 316 break; 317 if (vmp->m_dev != NO_DEV && vmp->m_dev == dev && 318 vmp->m_fs_e != NONE && vmp->m_root_node != NULL) { 319 320 req_sync(vmp->m_fs_e); 321 } 322 unlock_vmnt(vmp); 323 } 324 325 return(r); 326 } 327 328 int dupvm(struct fproc *rfp, int pfd, int *vmfd, struct filp **newfilp) 329 { 330 int result, procfd; 331 struct filp *f = NULL; 332 struct fproc *vmf = fproc_addr(VM_PROC_NR); 333 334 *newfilp = NULL; 335 336 if ((f = get_filp2(rfp, pfd, VNODE_READ)) == NULL) { 337 printf("VFS dupvm: get_filp2 failed\n"); 338 return EBADF; 339 } 340 341 if(!(f->filp_vno->v_vmnt->m_fs_flags & RES_HASPEEK)) { 342 unlock_filp(f); 343 #if 0 /* Noisy diagnostic for mmap() by ld.so */ 344 printf("VFS dupvm: no peek available\n"); 345 #endif 346 return EINVAL; 347 } 348 349 assert(f->filp_vno); 350 assert(f->filp_vno->v_vmnt); 351 352 if (!S_ISREG(f->filp_vno->v_mode) && !S_ISBLK(f->filp_vno->v_mode)) { 353 printf("VFS: mmap regular/blockdev only; dev 0x%llx ino %llu has mode 0%o\n", 354 f->filp_vno->v_dev, f->filp_vno->v_inode_nr, f->filp_vno->v_mode); 355 unlock_filp(f); 356 return EINVAL; 357 } 358 359 /* get free FD in VM */ 360 if((result=get_fd(vmf, 0, 0, &procfd, NULL)) != OK) { 361 unlock_filp(f); 362 printf("VFS dupvm: getfd failed\n"); 363 return result; 364 } 365 366 *vmfd = procfd; 367 368 f->filp_count++; 369 assert(f->filp_count > 0); 370 vmf->fp_filp[procfd] = f; 371 372 *newfilp = f; 373 374 return OK; 375 } 376 377 /*===========================================================================* 378 * do_vm_call * 379 *===========================================================================*/ 380 int do_vm_call(void) 381 { 382 /* A call that VM does to VFS. 383 * We must reply with the fixed type VM_VFS_REPLY (and put our result info 384 * in the rest of the message) so VM can tell the difference between a 385 * request from VFS and a reply to this call. 386 */ 387 int req = job_m_in.VFS_VMCALL_REQ; 388 int req_fd = job_m_in.VFS_VMCALL_FD; 389 u32_t req_id = job_m_in.VFS_VMCALL_REQID; 390 endpoint_t ep = job_m_in.VFS_VMCALL_ENDPOINT; 391 u64_t offset = job_m_in.VFS_VMCALL_OFFSET; 392 u32_t length = job_m_in.VFS_VMCALL_LENGTH; 393 int result = OK; 394 int slot; 395 struct fproc *rfp; 396 #if !defined(NDEBUG) 397 struct fproc *vmf; 398 #endif /* !defined(NDEBUG) */ 399 struct filp *f = NULL; 400 int r; 401 402 if(job_m_in.m_source != VM_PROC_NR) 403 return ENOSYS; 404 405 if(isokendpt(ep, &slot) != OK) rfp = NULL; 406 else rfp = &fproc[slot]; 407 408 #if !defined(NDEBUG) 409 vmf = fproc_addr(VM_PROC_NR); 410 #endif /* !defined(NDEBUG) */ 411 assert(fp == vmf); 412 assert(rfp != vmf); 413 414 switch(req) { 415 case VMVFSREQ_FDLOOKUP: 416 { 417 int procfd; 418 419 /* Lookup fd in referenced process. */ 420 421 if(!rfp) { 422 printf("VFS: why isn't ep %d here?!\n", ep); 423 result = ESRCH; 424 goto reqdone; 425 } 426 427 if((result = dupvm(rfp, req_fd, &procfd, &f)) != OK) { 428 #if 0 /* Noisy diagnostic for mmap() by ld.so */ 429 printf("vfs: dupvm failed\n"); 430 #endif 431 goto reqdone; 432 } 433 434 if(S_ISBLK(f->filp_vno->v_mode)) { 435 assert(f->filp_vno->v_sdev != NO_DEV); 436 job_m_out.VMV_DEV = f->filp_vno->v_sdev; 437 job_m_out.VMV_INO = VMC_NO_INODE; 438 job_m_out.VMV_SIZE_PAGES = LONG_MAX; 439 } else { 440 job_m_out.VMV_DEV = f->filp_vno->v_dev; 441 job_m_out.VMV_INO = f->filp_vno->v_inode_nr; 442 job_m_out.VMV_SIZE_PAGES = 443 roundup(f->filp_vno->v_size, 444 PAGE_SIZE)/PAGE_SIZE; 445 } 446 447 job_m_out.VMV_FD = procfd; 448 449 result = OK; 450 451 break; 452 } 453 case VMVFSREQ_FDCLOSE: 454 { 455 result = close_fd(fp, req_fd, FALSE /*may_suspend*/); 456 if(result != OK) { 457 printf("VFS: VM fd close for fd %d, %d (%d)\n", 458 req_fd, fp->fp_endpoint, result); 459 } 460 break; 461 } 462 case VMVFSREQ_FDIO: 463 { 464 result = actual_lseek(fp, req_fd, SEEK_SET, offset, 465 NULL); 466 467 if(result == OK) { 468 result = actual_read_write_peek(fp, PEEKING, 469 req_fd, /* vir_bytes */ 0, length); 470 } 471 472 break; 473 } 474 default: 475 panic("VFS: bad request code from VM\n"); 476 break; 477 } 478 479 reqdone: 480 if(f) 481 unlock_filp(f); 482 483 /* fp is VM still. */ 484 assert(fp == vmf); 485 job_m_out.VMV_ENDPOINT = ep; 486 job_m_out.VMV_RESULT = result; 487 job_m_out.VMV_REQID = req_id; 488 489 /* Reply asynchronously as VM may not be able to receive 490 * an ipc_sendnb() message. 491 */ 492 job_m_out.m_type = VM_VFS_REPLY; 493 r = asynsend3(VM_PROC_NR, &job_m_out, 0); 494 if(r != OK) printf("VFS: couldn't asynsend3() to VM\n"); 495 496 /* VFS does not reply any further */ 497 return SUSPEND; 498 } 499 500 /*===========================================================================* 501 * pm_reboot * 502 *===========================================================================*/ 503 void 504 pm_reboot(void) 505 { 506 /* Perform the VFS side of the reboot call. This call is performed from the PM 507 * process context. 508 */ 509 message m_out; 510 int i, r; 511 struct fproc *rfp, *pmfp; 512 513 pmfp = fp; 514 515 do_sync(); 516 517 /* Do exit processing for all leftover processes and servers, but don't 518 * actually exit them (if they were really gone, PM will tell us about it). 519 * Skip processes that handle parts of the file system; we first need to give 520 * them the chance to unmount (which should be possible as all normal 521 * processes have no open files anymore). 522 */ 523 /* This is the only place where we allow special modification of "fp". The 524 * reboot procedure should really be implemented as a PM message broadcasted 525 * to all processes, so that each process will be shut down cleanly by a 526 * thread operating on its behalf. Doing everything here is simpler, but it 527 * requires an exception to the strict model of having "fp" be the process 528 * that owns the current worker thread. 529 */ 530 for (i = 0; i < NR_PROCS; i++) { 531 rfp = &fproc[i]; 532 533 /* Don't just free the proc right away, but let it finish what it was 534 * doing first */ 535 if (rfp != fp) lock_proc(rfp); 536 if (rfp->fp_endpoint != NONE && find_vmnt(rfp->fp_endpoint) == NULL) { 537 worker_set_proc(rfp); /* temporarily fake process context */ 538 free_proc(0); 539 worker_set_proc(pmfp); /* restore original process context */ 540 } 541 if (rfp != fp) unlock_proc(rfp); 542 } 543 544 do_sync(); 545 unmount_all(0 /* Don't force */); 546 547 /* Try to exit all processes again including File Servers */ 548 for (i = 0; i < NR_PROCS; i++) { 549 rfp = &fproc[i]; 550 551 /* Don't just free the proc right away, but let it finish what it was 552 * doing first */ 553 if (rfp != fp) lock_proc(rfp); 554 if (rfp->fp_endpoint != NONE) { 555 worker_set_proc(rfp); /* temporarily fake process context */ 556 free_proc(0); 557 worker_set_proc(pmfp); /* restore original process context */ 558 } 559 if (rfp != fp) unlock_proc(rfp); 560 } 561 562 do_sync(); 563 unmount_all(1 /* Force */); 564 565 /* Reply to PM for synchronization */ 566 memset(&m_out, 0, sizeof(m_out)); 567 568 m_out.m_type = VFS_PM_REBOOT_REPLY; 569 570 if ((r = ipc_send(PM_PROC_NR, &m_out)) != OK) 571 panic("pm_reboot: ipc_send failed: %d", r); 572 } 573 574 /*===========================================================================* 575 * pm_fork * 576 *===========================================================================*/ 577 void pm_fork(endpoint_t pproc, endpoint_t cproc, pid_t cpid) 578 { 579 /* Perform those aspects of the fork() system call that relate to files. 580 * In particular, let the child inherit its parent's file descriptors. 581 * The parent and child parameters tell who forked off whom. The file 582 * system uses the same slot numbers as the kernel. Only PM makes this call. 583 */ 584 struct fproc *cp, *pp; 585 int i, parentno, childno; 586 mutex_t c_fp_lock; 587 588 /* Check up-to-dateness of fproc. */ 589 okendpt(pproc, &parentno); 590 591 /* PM gives child endpoint, which implies process slot information. 592 * Don't call isokendpt, because that will verify if the endpoint 593 * number is correct in fproc, which it won't be. 594 */ 595 childno = _ENDPOINT_P(cproc); 596 if (childno < 0 || childno >= NR_PROCS) 597 panic("VFS: bogus child for forking: %d", cproc); 598 if (fproc[childno].fp_pid != PID_FREE) 599 panic("VFS: forking on top of in-use child: %d", childno); 600 601 /* Copy the parent's fproc struct to the child. */ 602 /* However, the mutex variables belong to a slot and must stay the same. */ 603 c_fp_lock = fproc[childno].fp_lock; 604 fproc[childno] = fproc[parentno]; 605 fproc[childno].fp_lock = c_fp_lock; 606 607 /* Increase the counters in the 'filp' table. */ 608 cp = &fproc[childno]; 609 pp = &fproc[parentno]; 610 611 for (i = 0; i < OPEN_MAX; i++) 612 if (cp->fp_filp[i] != NULL) cp->fp_filp[i]->filp_count++; 613 614 /* Fill in new process and endpoint id. */ 615 cp->fp_pid = cpid; 616 cp->fp_endpoint = cproc; 617 618 /* A forking process cannot possibly be suspended on anything. */ 619 assert(pp->fp_blocked_on == FP_BLOCKED_ON_NONE); 620 621 /* A child is not a process leader, not being revived, etc. */ 622 cp->fp_flags = FP_NOFLAGS; 623 624 /* Record the fact that both root and working dir have another user. */ 625 if (cp->fp_rd) dup_vnode(cp->fp_rd); 626 if (cp->fp_wd) dup_vnode(cp->fp_wd); 627 } 628 629 /*===========================================================================* 630 * free_proc * 631 *===========================================================================*/ 632 static void free_proc(int flags) 633 { 634 int i; 635 register struct fproc *rfp; 636 register struct filp *rfilp; 637 register struct vnode *vp; 638 dev_t dev; 639 640 if (fp->fp_endpoint == NONE) 641 panic("free_proc: already free"); 642 643 if (fp_is_blocked(fp)) 644 unpause(); 645 646 /* Loop on file descriptors, closing any that are open. */ 647 for (i = 0; i < OPEN_MAX; i++) { 648 (void) close_fd(fp, i, FALSE /*may_suspend*/); 649 } 650 651 /* Release root and working directories. */ 652 if (fp->fp_rd) { put_vnode(fp->fp_rd); fp->fp_rd = NULL; } 653 if (fp->fp_wd) { put_vnode(fp->fp_wd); fp->fp_wd = NULL; } 654 655 /* The rest of these actions is only done when processes actually exit. */ 656 if (!(flags & FP_EXITING)) return; 657 658 fp->fp_flags |= FP_EXITING; 659 660 /* Check if any process is SUSPENDed on this driver. 661 * If a driver exits, unmap its entries in the dmap table. 662 * (unmapping has to be done after the first step, because the 663 * dmap/smap tables are used in the first step.) 664 */ 665 unsuspend_by_endpt(fp->fp_endpoint); 666 dmap_unmap_by_endpt(fp->fp_endpoint); 667 smap_unmap_by_endpt(fp->fp_endpoint); 668 669 worker_stop_by_endpt(fp->fp_endpoint); /* Unblock waiting threads */ 670 vmnt_unmap_by_endpt(fp->fp_endpoint); /* Invalidate open files if this 671 * was an active FS */ 672 673 /* If a session leader exits and it has a controlling tty, then revoke 674 * access to its controlling tty from all other processes using it. 675 */ 676 if ((fp->fp_flags & FP_SESLDR) && fp->fp_tty != 0) { 677 dev = fp->fp_tty; 678 for (rfp = &fproc[0]; rfp < &fproc[NR_PROCS]; rfp++) { 679 if(rfp->fp_pid == PID_FREE) continue; 680 if (rfp->fp_tty == dev) rfp->fp_tty = 0; 681 682 for (i = 0; i < OPEN_MAX; i++) { 683 if ((rfilp = rfp->fp_filp[i]) == NULL) continue; 684 if (rfilp->filp_mode == FILP_CLOSED) continue; 685 vp = rfilp->filp_vno; 686 if (!S_ISCHR(vp->v_mode)) continue; 687 if (vp->v_sdev != dev) continue; 688 lock_filp(rfilp, VNODE_READ); 689 (void) cdev_close(dev); /* Ignore any errors. */ 690 /* FIXME: missing select check */ 691 rfilp->filp_mode = FILP_CLOSED; 692 unlock_filp(rfilp); 693 } 694 } 695 } 696 697 /* Exit done. Mark slot as free. */ 698 fp->fp_endpoint = NONE; 699 fp->fp_pid = PID_FREE; 700 fp->fp_flags = FP_NOFLAGS; 701 } 702 703 /*===========================================================================* 704 * pm_exit * 705 *===========================================================================*/ 706 void pm_exit(void) 707 { 708 /* Perform the file system portion of the exit(status) system call. 709 * This function is called from the context of the exiting process. 710 */ 711 712 free_proc(FP_EXITING); 713 } 714 715 /*===========================================================================* 716 * pm_setgid * 717 *===========================================================================*/ 718 void 719 pm_setgid(endpoint_t proc_e, int egid, int rgid) 720 { 721 register struct fproc *tfp; 722 int slot; 723 724 okendpt(proc_e, &slot); 725 tfp = &fproc[slot]; 726 727 tfp->fp_effgid = egid; 728 tfp->fp_realgid = rgid; 729 } 730 731 732 /*===========================================================================* 733 * pm_setgroups * 734 *===========================================================================*/ 735 void 736 pm_setgroups(endpoint_t proc_e, int ngroups, gid_t *groups) 737 { 738 struct fproc *rfp; 739 int slot; 740 741 okendpt(proc_e, &slot); 742 rfp = &fproc[slot]; 743 if (ngroups * sizeof(gid_t) > sizeof(rfp->fp_sgroups)) 744 panic("VFS: pm_setgroups: too much data to copy"); 745 if (sys_datacopy_wrapper(who_e, (vir_bytes) groups, SELF, (vir_bytes) rfp->fp_sgroups, 746 ngroups * sizeof(gid_t)) == OK) { 747 rfp->fp_ngroups = ngroups; 748 } else 749 panic("VFS: pm_setgroups: datacopy failed"); 750 } 751 752 753 /*===========================================================================* 754 * pm_setuid * 755 *===========================================================================*/ 756 void 757 pm_setuid(endpoint_t proc_e, int euid, int ruid) 758 { 759 struct fproc *tfp; 760 int slot; 761 762 okendpt(proc_e, &slot); 763 tfp = &fproc[slot]; 764 765 tfp->fp_effuid = euid; 766 tfp->fp_realuid = ruid; 767 } 768 769 /*===========================================================================* 770 * pm_setsid * 771 *===========================================================================*/ 772 void pm_setsid(endpoint_t proc_e) 773 { 774 /* Perform the VFS side of the SETSID call, i.e. get rid of the controlling 775 * terminal of a process, and make the process a session leader. 776 */ 777 struct fproc *rfp; 778 int slot; 779 780 /* Make the process a session leader with no controlling tty. */ 781 okendpt(proc_e, &slot); 782 rfp = &fproc[slot]; 783 rfp->fp_flags |= FP_SESLDR; 784 rfp->fp_tty = 0; 785 } 786 787 /*===========================================================================* 788 * do_svrctl * 789 *===========================================================================*/ 790 int do_svrctl(void) 791 { 792 unsigned long svrctl; 793 vir_bytes ptr; 794 795 svrctl = job_m_in.m_lc_svrctl.request; 796 ptr = job_m_in.m_lc_svrctl.arg; 797 798 if (IOCGROUP(svrctl) != 'F') return(EINVAL); 799 800 switch (svrctl) { 801 case VFSSETPARAM: 802 case VFSGETPARAM: 803 { 804 struct sysgetenv sysgetenv; 805 char search_key[64]; 806 char val[64]; 807 int r, s; 808 809 /* Copy sysgetenv structure to VFS */ 810 if (sys_datacopy_wrapper(who_e, ptr, SELF, (vir_bytes) &sysgetenv, 811 sizeof(sysgetenv)) != OK) 812 return(EFAULT); 813 814 /* Basic sanity checking */ 815 if (svrctl == VFSSETPARAM) { 816 if (sysgetenv.keylen <= 0 || 817 sysgetenv.keylen > (sizeof(search_key) - 1) || 818 sysgetenv.vallen <= 0 || 819 sysgetenv.vallen >= sizeof(val)) { 820 return(EINVAL); 821 } 822 } 823 824 /* Copy parameter "key" */ 825 if ((s = sys_datacopy_wrapper(who_e, (vir_bytes) sysgetenv.key, 826 SELF, (vir_bytes) search_key, 827 sysgetenv.keylen)) != OK) 828 return(s); 829 search_key[sysgetenv.keylen] = '\0'; /* Limit string */ 830 831 /* Is it a parameter we know? */ 832 if (svrctl == VFSSETPARAM) { 833 if (!strcmp(search_key, "verbose")) { 834 int verbose_val; 835 if ((s = sys_datacopy_wrapper(who_e, 836 (vir_bytes) sysgetenv.val, SELF, 837 (vir_bytes) &val, sysgetenv.vallen)) != OK) 838 return(s); 839 val[sysgetenv.vallen] = '\0'; /* Limit string */ 840 verbose_val = atoi(val); 841 if (verbose_val < 0 || verbose_val > 4) { 842 return(EINVAL); 843 } 844 verbose = verbose_val; 845 r = OK; 846 } else { 847 r = ESRCH; 848 } 849 } else { /* VFSGETPARAM */ 850 char small_buf[60]; 851 852 r = ESRCH; 853 if (!strcmp(search_key, "print_traces")) { 854 mthread_stacktraces(); 855 sysgetenv.val = 0; 856 sysgetenv.vallen = 0; 857 r = OK; 858 } else if (!strcmp(search_key, "print_select")) { 859 select_dump(); 860 sysgetenv.val = 0; 861 sysgetenv.vallen = 0; 862 r = OK; 863 } else if (!strcmp(search_key, "active_threads")) { 864 int active = NR_WTHREADS - worker_available(); 865 snprintf(small_buf, sizeof(small_buf) - 1, 866 "%d", active); 867 sysgetenv.vallen = strlen(small_buf); 868 r = OK; 869 } 870 871 if (r == OK) { 872 if ((s = sys_datacopy_wrapper(SELF, 873 (vir_bytes) &sysgetenv, who_e, ptr, 874 sizeof(sysgetenv))) != OK) 875 return(s); 876 if (sysgetenv.val != 0) { 877 if ((s = sys_datacopy_wrapper(SELF, 878 (vir_bytes) small_buf, who_e, 879 (vir_bytes) sysgetenv.val, 880 sysgetenv.vallen)) != OK) 881 return(s); 882 } 883 } 884 } 885 886 return(r); 887 } 888 default: 889 return(EINVAL); 890 } 891 } 892 893 /*===========================================================================* 894 * pm_dumpcore * 895 *===========================================================================*/ 896 int pm_dumpcore(int csig, vir_bytes exe_name) 897 { 898 int r, core_fd; 899 struct filp *f; 900 char core_path[PATH_MAX]; 901 char proc_name[PROC_NAME_LEN]; 902 903 /* In effect, the coredump is generated through the use of calls as if made 904 * by the process itself. As such, the process must not be doing anything 905 * else. Therefore, if the process was blocked on anything, unblock it 906 * first. This step is the reason we cannot use this function to generate a 907 * core dump of a process while it is still running (i.e., without 908 * terminating it), as it changes the state of the process. 909 */ 910 if (fp_is_blocked(fp)) 911 unpause(); 912 913 /* open core file */ 914 snprintf(core_path, PATH_MAX, "%s.%d", CORE_NAME, fp->fp_pid); 915 r = core_fd = common_open(core_path, O_WRONLY | O_CREAT | O_TRUNC, 916 CORE_MODE, FALSE /*for_exec*/); 917 if (r < 0) goto core_exit; 918 919 /* get process name */ 920 r = sys_datacopy_wrapper(PM_PROC_NR, exe_name, VFS_PROC_NR, 921 (vir_bytes) proc_name, PROC_NAME_LEN); 922 if (r != OK) goto core_exit; 923 proc_name[PROC_NAME_LEN - 1] = '\0'; 924 925 /* write the core dump */ 926 f = get_filp(core_fd, VNODE_WRITE); 927 assert(f != NULL); 928 write_elf_core_file(f, csig, proc_name); 929 unlock_filp(f); 930 931 core_exit: 932 /* The core file descriptor will be closed as part of the process exit. */ 933 free_proc(FP_EXITING); 934 935 return(r); 936 } 937 938 /*===========================================================================* 939 * ds_event * 940 *===========================================================================*/ 941 void 942 ds_event(void) 943 { 944 char key[DS_MAX_KEYLEN]; 945 char *blkdrv_prefix = "drv.blk."; 946 char *chrdrv_prefix = "drv.chr."; 947 char *sckdrv_prefix = "drv.sck."; 948 u32_t value; 949 int type, ftype, r; 950 endpoint_t owner_endpoint; 951 952 /* Get the event and the owner from DS. */ 953 while ((r = ds_check(key, &type, &owner_endpoint)) == OK) { 954 /* Only check for block, character, socket driver up events. */ 955 if (!strncmp(key, blkdrv_prefix, strlen(blkdrv_prefix))) { 956 ftype = S_IFBLK; 957 } else if (!strncmp(key, chrdrv_prefix, strlen(chrdrv_prefix))) { 958 ftype = S_IFCHR; 959 } else if (!strncmp(key, sckdrv_prefix, strlen(sckdrv_prefix))) { 960 ftype = S_IFSOCK; 961 } else { 962 continue; 963 } 964 965 if ((r = ds_retrieve_u32(key, &value)) != OK) { 966 printf("VFS: ds_event: ds_retrieve_u32 failed\n"); 967 break; 968 } 969 if (value != DS_DRIVER_UP) continue; 970 971 /* Perform up. */ 972 if (ftype == S_IFBLK || ftype == S_IFCHR) 973 dmap_endpt_up(owner_endpoint, (ftype == S_IFBLK)); 974 else 975 smap_endpt_up(owner_endpoint); 976 } 977 978 if (r != ENOENT) printf("VFS: ds_event: ds_check failed: %d\n", r); 979 } 980 981 /* A function to be called on panic(). */ 982 void panic_hook(void) 983 { 984 printf("VFS mthread stacktraces:\n"); 985 mthread_stacktraces(); 986 } 987 988 /*===========================================================================* 989 * do_getrusage * 990 *===========================================================================*/ 991 int do_getrusage(void) 992 { 993 /* Obsolete vfs_getrusage(2) call from userland. The getrusage call is 994 * now fully handled by PM, and for any future fields that should be 995 * supplied by VFS, VFS should be queried by PM rather than by the user 996 * program directly. TODO: remove this call after the next release. 997 */ 998 return OK; 999 } 1000