1 /* kern_proc.c 3.25 10/11/80 */ 2 3 #include "../h/param.h" 4 #include "../h/systm.h" 5 #include "../h/map.h" 6 #include "../h/mtpr.h" 7 #include "../h/dir.h" 8 #include "../h/user.h" 9 #include "../h/proc.h" 10 #include "../h/buf.h" 11 #include "../h/reg.h" 12 #include "../h/inode.h" 13 #include "../h/seg.h" 14 #include "../h/acct.h" 15 #include "/usr/include/wait.h" 16 #include "../h/pte.h" 17 #include "../h/vm.h" 18 #include "../h/text.h" 19 #include "../h/psl.h" 20 #include "../h/vlimit.h" 21 #include "../h/file.h" 22 23 /* 24 * exec system call, with and without environments. 25 */ 26 struct execa { 27 char *fname; 28 char **argp; 29 char **envp; 30 }; 31 32 exec() 33 { 34 ((struct execa *)u.u_ap)->envp = NULL; 35 exece(); 36 } 37 38 exece() 39 { 40 register nc; 41 register char *cp; 42 register struct buf *bp; 43 register struct execa *uap; 44 int na, ne, ucp, ap, c; 45 struct inode *ip; 46 swblk_t bno; 47 48 if ((ip = namei(uchar, 0)) == NULL) 49 return; 50 bno = 0; 51 bp = 0; 52 if(access(ip, IEXEC)) 53 goto bad; 54 if((ip->i_mode & IFMT) != IFREG || 55 (ip->i_mode & (IEXEC|(IEXEC>>3)|(IEXEC>>6))) == 0) { 56 u.u_error = EACCES; 57 goto bad; 58 } 59 /* 60 * Collect arguments on "file" in swap space. 61 */ 62 na = 0; 63 ne = 0; 64 nc = 0; 65 uap = (struct execa *)u.u_ap; 66 if ((bno = malloc(argmap, ctod(clrnd((int) btoc(NCARGS))))) == 0) { 67 swkill(u.u_procp, "exece"); 68 goto bad; 69 } 70 if (bno % CLSIZE) 71 panic("execa malloc"); 72 if (uap->argp) for (;;) { 73 ap = NULL; 74 if (uap->argp) { 75 ap = fuword((caddr_t)uap->argp); 76 uap->argp++; 77 } 78 if (ap==NULL && uap->envp) { 79 uap->argp = NULL; 80 if ((ap = fuword((caddr_t)uap->envp)) == NULL) 81 break; 82 uap->envp++; 83 ne++; 84 } 85 if (ap==NULL) 86 break; 87 na++; 88 if(ap == -1) 89 u.u_error = EFAULT; 90 do { 91 if (nc >= NCARGS-1) 92 u.u_error = E2BIG; 93 if ((c = fubyte((caddr_t)ap++)) < 0) 94 u.u_error = EFAULT; 95 if (u.u_error) { 96 if (bp) 97 brelse(bp); 98 bp = 0; 99 goto badarg; 100 } 101 if ((nc&BMASK) == 0) { 102 if (bp) 103 bdwrite(bp); 104 bp = getblk(argdev, 105 (daddr_t)(dbtofsb(bno)+(nc>>BSHIFT))); 106 cp = bp->b_un.b_addr; 107 } 108 nc++; 109 *cp++ = c; 110 } while (c>0); 111 } 112 if (bp) 113 bdwrite(bp); 114 bp = 0; 115 nc = (nc + NBPW-1) & ~(NBPW-1); 116 getxfile(ip, nc + (na+4)*NBPW); 117 if (u.u_error) { 118 badarg: 119 for (c = 0; c < nc; c += BSIZE) 120 if (bp = baddr(argdev, dbtofsb(bno)+(c>>BSHIFT))) { 121 bp->b_flags |= B_AGE; /* throw away */ 122 bp->b_flags &= ~B_DELWRI; /* cancel io */ 123 brelse(bp); 124 bp = 0; 125 } 126 goto bad; 127 } 128 129 /* 130 * copy back arglist 131 */ 132 133 ucp = USRSTACK - nc - NBPW; 134 ap = ucp - na*NBPW - 3*NBPW; 135 u.u_ar0[SP] = ap; 136 (void) suword((caddr_t)ap, na-ne); 137 nc = 0; 138 for (;;) { 139 ap += NBPW; 140 if (na==ne) { 141 (void) suword((caddr_t)ap, 0); 142 ap += NBPW; 143 } 144 if (--na < 0) 145 break; 146 (void) suword((caddr_t)ap, ucp); 147 do { 148 if ((nc&BMASK) == 0) { 149 if (bp) 150 brelse(bp); 151 bp = bread(argdev, 152 (daddr_t)(dbtofsb(bno)+(nc>>BSHIFT))); 153 bp->b_flags |= B_AGE; /* throw away */ 154 bp->b_flags &= ~B_DELWRI; /* cancel io */ 155 cp = bp->b_un.b_addr; 156 } 157 (void) subyte((caddr_t)ucp++, (c = *cp++)); 158 nc++; 159 } while(c&0377); 160 } 161 (void) suword((caddr_t)ap, 0); 162 (void) suword((caddr_t)ucp, 0); 163 setregs(); 164 bad: 165 if (bp) 166 brelse(bp); 167 if (bno) 168 mfree(argmap, ctod(clrnd((int) btoc(NCARGS))), bno); 169 iput(ip); 170 } 171 172 /* 173 * Read in and set up memory for executed file. 174 */ 175 getxfile(ip, nargc) 176 register struct inode *ip; 177 { 178 register size_t ts, ds, ss; 179 int pagi = 0; 180 181 /* 182 * read in first few bytes 183 * of file for segment 184 * sizes: 185 * ux_mag = 407/410/413 186 * 407 is plain executable 187 * 410 is RO text 188 * 413 is demand paged RO text 189 */ 190 191 u.u_base = (caddr_t)&u.u_exdata; 192 u.u_count = sizeof(u.u_exdata); 193 u.u_offset = 0; 194 u.u_segflg = 1; 195 readi(ip); 196 u.u_segflg = 0; 197 if(u.u_error) 198 goto bad; 199 if (u.u_count!=0) { 200 u.u_error = ENOEXEC; 201 goto bad; 202 } 203 switch (u.u_exdata.ux_mag) { 204 205 case 0407: 206 u.u_exdata.ux_dsize += u.u_exdata.ux_tsize; 207 u.u_exdata.ux_tsize = 0; 208 break; 209 210 case 0413: 211 pagi = SPAGI; 212 /* fall into ... */ 213 214 case 0410: 215 if (u.u_exdata.ux_tsize == 0) { 216 u.u_error = ENOEXEC; 217 goto bad; 218 } 219 break; 220 221 default: 222 u.u_error = ENOEXEC; 223 goto bad; 224 } 225 if(u.u_exdata.ux_tsize!=0 && (ip->i_flag&ITEXT)==0 && ip->i_count!=1) { 226 register struct file *fp; 227 228 for (fp = file; fp < &file[NFILE]; fp++) 229 if (fp->f_inode == ip && (fp->f_flag&FWRITE)) { 230 u.u_error = ETXTBSY; 231 goto bad; 232 } 233 } 234 235 /* 236 * find text and data sizes 237 * try them out for possible 238 * exceed of max sizes 239 */ 240 241 ts = clrnd(btoc(u.u_exdata.ux_tsize)); 242 ds = clrnd(btoc((u.u_exdata.ux_dsize+u.u_exdata.ux_bsize))); 243 ss = clrnd(SSIZE + btoc(nargc)); 244 if (chksize(ts, ds, ss)) 245 goto bad; 246 u.u_cdmap = zdmap; 247 u.u_csmap = zdmap; 248 if (swpexpand(ds, ss, &u.u_cdmap, &u.u_csmap) == NULL) 249 goto bad; 250 251 /* 252 * At this point, committed to the new image! 253 * Release virtual memory resources of old process, and 254 * initialize the virtual memory of the new process. 255 * If we resulted from vfork(), instead wakeup our 256 * parent who will set SVFDONE when he has taken back 257 * our resources. 258 */ 259 u.u_prof.pr_scale = 0; 260 if ((u.u_procp->p_flag & SVFORK) == 0) 261 vrelvm(); 262 else { 263 u.u_procp->p_flag &= ~SVFORK; 264 u.u_procp->p_flag |= SKEEP; 265 wakeup((caddr_t)u.u_procp); 266 while ((u.u_procp->p_flag & SVFDONE) == 0) 267 sleep((caddr_t)u.u_procp, PZERO - 1); 268 u.u_procp->p_flag &= ~(SVFDONE|SKEEP); 269 } 270 u.u_procp->p_flag &= ~(SPAGI|SANOM|SUANOM|SNUSIG); 271 u.u_procp->p_flag |= pagi; 272 u.u_dmap = u.u_cdmap; 273 u.u_smap = u.u_csmap; 274 vgetvm(ts, ds, ss); 275 276 if (pagi == 0) { 277 /* 278 * Read in data segment. 279 */ 280 u.u_base = (char *)ctob(ts); 281 u.u_offset = sizeof(u.u_exdata)+u.u_exdata.ux_tsize; 282 u.u_count = u.u_exdata.ux_dsize; 283 readi(ip); 284 } 285 xalloc(ip, pagi); 286 if (pagi && u.u_procp->p_textp) 287 vinifod((struct fpte *)dptopte(u.u_procp, 0), 288 PG_FTEXT, u.u_procp->p_textp->x_iptr, 289 1 + ts/CLSIZE, (int)btoc(u.u_exdata.ux_dsize)); 290 291 /* THIS SHOULD BE DONE AT A LOWER LEVEL, IF AT ALL */ 292 mtpr(TBIA, 0); 293 294 /* 295 * set SUID/SGID protections, if no tracing 296 */ 297 if ((u.u_procp->p_flag&STRC)==0) { 298 if(ip->i_mode&ISUID) 299 if(u.u_uid != 0) { 300 u.u_uid = ip->i_uid; 301 u.u_procp->p_uid = ip->i_uid; 302 } 303 if(ip->i_mode&ISGID) 304 u.u_gid = ip->i_gid; 305 } else 306 psignal(u.u_procp, SIGTRAP); 307 u.u_tsize = ts; 308 u.u_dsize = ds; 309 u.u_ssize = ss; 310 bad: 311 return; 312 } 313 314 /* 315 * Clear registers on exec 316 */ 317 setregs() 318 { 319 register int (**rp)(); 320 register i; 321 long sigmask; 322 323 for(rp = &u.u_signal[0], sigmask = 1L; rp < &u.u_signal[NSIG]; 324 sigmask <<= 1, rp++) { 325 switch (*rp) { 326 327 case SIG_IGN: 328 case SIG_DFL: 329 case SIG_HOLD: 330 continue; 331 332 default: 333 /* 334 * Normal or deferring catch; revert to default. 335 */ 336 (void) spl6(); 337 *rp = SIG_DFL; 338 if ((int)*rp & 1) 339 u.u_procp->p_siga0 |= sigmask; 340 else 341 u.u_procp->p_siga1 &= ~sigmask; 342 if ((int)*rp & 2) 343 u.u_procp->p_siga1 |= sigmask; 344 else 345 u.u_procp->p_siga1 &= ~sigmask; 346 (void) spl0(); 347 continue; 348 } 349 } 350 /* 351 for(rp = &u.u_ar0[0]; rp < &u.u_ar0[16];) 352 *rp++ = 0; 353 */ 354 u.u_ar0[PC] = u.u_exdata.ux_entloc + 2; /* skip over entry mask */ 355 for(i=0; i<NOFILE; i++) { 356 if (u.u_pofile[i]&EXCLOSE) { 357 closef(u.u_ofile[i]); 358 u.u_ofile[i] = NULL; 359 u.u_pofile[i] &= ~EXCLOSE; 360 } 361 } 362 /* 363 * Remember file name for accounting. 364 */ 365 u.u_acflag &= ~AFORK; 366 bcopy((caddr_t)u.u_dbuf, (caddr_t)u.u_comm, DIRSIZ); 367 } 368 369 /* 370 * exit system call: 371 * pass back caller's arg 372 */ 373 rexit() 374 { 375 register struct a { 376 int rval; 377 } *uap; 378 379 uap = (struct a *)u.u_ap; 380 exit((uap->rval & 0377) << 8); 381 } 382 383 /* 384 * Release resources. 385 * Save u. area for parent to look at. 386 * Enter zombie state. 387 * Wake up parent and init processes, 388 * and dispose of children. 389 */ 390 exit(rv) 391 { 392 register int i; 393 register struct proc *p, *q; 394 register struct file *f; 395 register int x; 396 397 #ifdef PGINPROF 398 vmsizmon(); 399 #endif 400 p = u.u_procp; 401 p->p_flag &= ~(STRC|SULOCK); 402 p->p_flag |= SWEXIT; 403 p->p_clktim = 0; 404 (void) spl6(); 405 if ((int)SIG_IGN & 1) 406 p->p_siga0 = ~0; 407 else 408 p->p_siga0 = 0; 409 if ((int)SIG_IGN & 2) 410 p->p_siga1 = ~0; 411 else 412 p->p_siga1 = 0; 413 (void) spl0(); 414 p->p_cpticks = 0; 415 p->p_pctcpu = 0; 416 for(i=0; i<NSIG; i++) 417 u.u_signal[i] = SIG_IGN; 418 /* 419 * Release virtual memory. If we resulted from 420 * a vfork(), instead give the resources back to 421 * the parent. 422 */ 423 if ((p->p_flag & SVFORK) == 0) 424 vrelvm(); 425 else { 426 p->p_flag &= ~SVFORK; 427 wakeup((caddr_t)p); 428 while ((p->p_flag & SVFDONE) == 0) 429 sleep((caddr_t)p, PZERO - 1); 430 p->p_flag &= ~SVFDONE; 431 } 432 for(i=0; i<NOFILE; i++) { 433 f = u.u_ofile[i]; 434 u.u_ofile[i] = NULL; 435 closef(f); 436 } 437 plock(u.u_cdir); 438 iput(u.u_cdir); 439 if (u.u_rdir) { 440 plock(u.u_rdir); 441 iput(u.u_rdir); 442 } 443 u.u_limit[LIM_FSIZE] = INFINITY; 444 acct(); 445 vrelpt(u.u_procp); 446 vrelu(u.u_procp, 0); 447 multprog--; 448 /* spl7(); /* clock will get mad because of overlaying */ 449 p->p_stat = SZOMB; 450 noproc = 1; 451 i = PIDHASH(p->p_pid); 452 x = p - proc; 453 if (pidhash[i] == x) 454 pidhash[i] = p->p_idhash; 455 else { 456 for (i = pidhash[i]; i != 0; i = proc[i].p_idhash) 457 if (proc[i].p_idhash == x) { 458 proc[i].p_idhash = p->p_idhash; 459 goto done; 460 } 461 panic("exit"); 462 } 463 done: 464 ((struct xproc *)p)->xp_xstat = rv; /* overlay */ 465 ((struct xproc *)p)->xp_vm = u.u_vm; /* overlay */ 466 vmsadd(&((struct xproc *)p)->xp_vm, &u.u_cvm); 467 for(q = &proc[0]; q < &proc[NPROC]; q++) 468 if(q->p_pptr == p) { 469 q->p_pptr = &proc[1]; 470 q->p_ppid = 1; 471 wakeup((caddr_t)&proc[1]); 472 /* 473 * Traced processes are killed 474 * since their existence means someone is screwing up. 475 * Stopped processes are sent a hangup and a continue. 476 * This is designed to be ``safe'' for setuid 477 * processes since they must be willing to tolerate 478 * hangups anyways. 479 */ 480 if (q->p_flag&STRC) { 481 q->p_flag &= ~STRC; 482 psignal(q, SIGKILL); 483 } else if (q->p_stat == SSTOP) { 484 psignal(q, SIGHUP); 485 psignal(q, SIGCONT); 486 } 487 /* 488 * Protect this process from future 489 * tty signals, clear TSTP/TTIN/TTOU if pending, 490 * and set SDETACH bit on procs. 491 */ 492 spgrp(q, -1); 493 } 494 wakeup((caddr_t)p->p_pptr); 495 psignal(p->p_pptr, SIGCHLD); 496 swtch(); 497 } 498 499 wait() 500 { 501 struct vtimes vm; 502 struct vtimes *vp; 503 504 if ((u.u_ar0[PS] & PSL_ALLCC) != PSL_ALLCC) { 505 wait1(0, (struct vtimes *)0); 506 return; 507 } 508 vp = (struct vtimes *)u.u_ar0[R1]; 509 wait1(u.u_ar0[R0], &vm); 510 if (u.u_error) 511 return; 512 (void) copyout((caddr_t)&vm, (caddr_t)vp, sizeof (struct vtimes)); 513 } 514 515 /* 516 * Wait system call. 517 * Search for a terminated (zombie) child, 518 * finally lay it to rest, and collect its status. 519 * Look also for stopped (traced) children, 520 * and pass back status from them. 521 */ 522 wait1(options, vp) 523 register options; 524 struct vtimes *vp; 525 { 526 register f; 527 register struct proc *p; 528 529 f = 0; 530 loop: 531 for(p = &proc[0]; p < &proc[NPROC]; p++) 532 if(p->p_pptr == u.u_procp) { 533 f++; 534 if(p->p_stat == SZOMB) { 535 u.u_r.r_val1 = p->p_pid; 536 u.u_r.r_val2 = ((struct xproc *)p)->xp_xstat; 537 ((struct xproc *)p)->xp_xstat = 0; 538 if (vp) 539 *vp = ((struct xproc *)p)->xp_vm; 540 vmsadd(&u.u_cvm, &((struct xproc *)p)->xp_vm); 541 ((struct xproc *)p)->xp_vm = zvms; 542 p->p_stat = NULL; 543 p->p_pid = 0; 544 p->p_ppid = 0; 545 p->p_pptr = 0; 546 p->p_sig = 0; 547 p->p_siga0 = 0; 548 p->p_siga1 = 0; 549 p->p_pgrp = 0; 550 p->p_flag = 0; 551 p->p_wchan = 0; 552 p->p_cursig = 0; 553 return; 554 } 555 if (p->p_stat == SSTOP && (p->p_flag&SWTED)==0 && 556 (p->p_flag&STRC || options&WUNTRACED)) { 557 p->p_flag |= SWTED; 558 u.u_r.r_val1 = p->p_pid; 559 u.u_r.r_val2 = (p->p_cursig<<8) | WSTOPPED; 560 return; 561 } 562 } 563 if (f==0) { 564 u.u_error = ECHILD; 565 return; 566 } 567 if (options&WNOHANG) { 568 u.u_r.r_val1 = 0; 569 return; 570 } 571 if ((u.u_procp->p_flag&SNUSIG) && setjmp(u.u_qsav)) { 572 u.u_eosys = RESTARTSYS; 573 return; 574 } 575 sleep((caddr_t)u.u_procp, PWAIT); 576 goto loop; 577 } 578 579 /* 580 * fork system call. 581 */ 582 fork() 583 { 584 585 u.u_cdmap = zdmap; 586 u.u_csmap = zdmap; 587 if (swpexpand(u.u_dsize, u.u_ssize, &u.u_cdmap, &u.u_csmap) == 0) { 588 u.u_r.r_val2 = 0; 589 return; 590 } 591 fork1(0); 592 } 593 594 fork1(isvfork) 595 { 596 register struct proc *p1, *p2; 597 register a; 598 599 a = 0; 600 p2 = NULL; 601 for(p1 = &proc[0]; p1 < &proc[NPROC]; p1++) { 602 if (p1->p_stat==NULL && p2==NULL) 603 p2 = p1; 604 else { 605 if (p1->p_uid==u.u_uid && p1->p_stat!=NULL) 606 a++; 607 } 608 } 609 /* 610 * Disallow if 611 * No processes at all; 612 * not su and too many procs owned; or 613 * not su and would take last slot. 614 */ 615 if (p2==NULL || (u.u_uid!=0 && (p2==&proc[NPROC-1] || a>MAXUPRC))) { 616 u.u_error = EAGAIN; 617 if (!isvfork) { 618 (void) vsexpand(0, &u.u_cdmap, 1); 619 (void) vsexpand(0, &u.u_csmap, 1); 620 } 621 goto out; 622 } 623 p1 = u.u_procp; 624 if(newproc(isvfork)) { 625 u.u_r.r_val1 = p1->p_pid; 626 u.u_r.r_val2 = 1; /* child */ 627 u.u_start = time; 628 u.u_acflag = AFORK; 629 return; 630 } 631 u.u_r.r_val1 = p2->p_pid; 632 633 out: 634 u.u_r.r_val2 = 0; 635 } 636 637 /* 638 * break system call. 639 * -- bad planning: "break" is a dirty word in C. 640 */ 641 sbreak() 642 { 643 struct a { 644 char *nsiz; 645 }; 646 register int n, d; 647 648 /* 649 * set n to new data size 650 * set d to new-old 651 */ 652 653 n = btoc(((struct a *)u.u_ap)->nsiz); 654 if (!u.u_sep) 655 n -= ctos(u.u_tsize) * stoc(1); 656 if (n < 0) 657 n = 0; 658 d = clrnd(n - u.u_dsize); 659 if (ctob(u.u_dsize+d) > u.u_limit[LIM_DATA]) { 660 u.u_error = ENOMEM; 661 return; 662 } 663 if (chksize(u.u_tsize, u.u_dsize+d, u.u_ssize)) 664 return; 665 if (swpexpand(u.u_dsize+d, u.u_ssize, &u.u_dmap, &u.u_smap)==0) 666 return; 667 expand(d, P0BR); 668 } 669