1 /* kern_synch.c 4.16 82/01/24 */ 2 3 #include "../h/param.h" 4 #include "../h/systm.h" 5 #include "../h/dir.h" 6 #include "../h/user.h" 7 #include "../h/proc.h" 8 #include "../h/file.h" 9 #include "../h/inode.h" 10 #include "../h/vm.h" 11 #include "../h/pte.h" 12 #include "../h/inline.h" 13 #include "../h/mtpr.h" 14 15 #define SQSIZE 0100 /* Must be power of 2 */ 16 #define HASH(x) (( (int) x >> 5) & (SQSIZE-1)) 17 struct proc *slpque[SQSIZE]; 18 19 /* 20 * Give up the processor till a wakeup occurs 21 * on chan, at which time the process 22 * enters the scheduling queue at priority pri. 23 * The most important effect of pri is that when 24 * pri<=PZERO a signal cannot disturb the sleep; 25 * if pri>PZERO signals will be processed. 26 * Callers of this routine must be prepared for 27 * premature return, and check that the reason for 28 * sleeping has gone away. 29 */ 30 sleep(chan, pri) 31 caddr_t chan; 32 { 33 register struct proc *rp, **hp; 34 register s; 35 36 rp = u.u_procp; 37 s = spl6(); 38 if (chan==0 || rp->p_stat != SRUN || rp->p_rlink) 39 panic("sleep"); 40 rp->p_wchan = chan; 41 rp->p_slptime = 0; 42 rp->p_pri = pri; 43 hp = &slpque[HASH(chan)]; 44 rp->p_link = *hp; 45 *hp = rp; 46 if (pri > PZERO) { 47 if (ISSIG(rp)) { 48 if (rp->p_wchan) 49 unsleep(rp); 50 rp->p_stat = SRUN; 51 (void) spl0(); 52 goto psig; 53 } 54 if (rp->p_wchan == 0) 55 goto out; 56 rp->p_stat = SSLEEP; 57 (void) spl0(); 58 swtch(); 59 if (ISSIG(rp)) 60 goto psig; 61 } else { 62 rp->p_stat = SSLEEP; 63 (void) spl0(); 64 swtch(); 65 } 66 out: 67 splx(s); 68 return; 69 70 /* 71 * If priority was low (>PZERO) and 72 * there has been a signal, execute non-local goto through 73 * u.u_qsav, aborting the system call in progress (see trap.c) 74 * (or finishing a tsleep, see below) 75 */ 76 psig: 77 longjmp(u.u_qsav); 78 /*NOTREACHED*/ 79 } 80 81 /* 82 * Sleep on chan at pri. 83 * Return in no more than the indicated number of seconds. 84 * (If seconds==0, no timeout implied) 85 * Return TS_OK if chan was awakened normally 86 * TS_TIME if timeout occurred 87 * TS_SIG if asynchronous signal occurred 88 * 89 * SHOULD HAVE OPTION TO SLEEP TO ABSOLUTE TIME OR AN 90 * INCREMENT IN MILLISECONDS! 91 */ 92 tsleep(chan, pri, seconds) 93 caddr_t chan; 94 int pri, seconds; 95 { 96 label_t lqsav; 97 register struct proc *pp; 98 register sec, n, rval; 99 100 pp = u.u_procp; 101 n = spl7(); 102 sec = 0; 103 rval = 0; 104 if (pp->p_clktim && pp->p_clktim<seconds) 105 seconds = 0; 106 if (seconds) { 107 pp->p_flag |= STIMO; 108 sec = pp->p_clktim-seconds; 109 pp->p_clktim = seconds; 110 } 111 bcopy((caddr_t)u.u_qsav, (caddr_t)lqsav, sizeof (label_t)); 112 if (setjmp(u.u_qsav)) 113 rval = TS_SIG; 114 else { 115 sleep(chan, pri); 116 if ((pp->p_flag&STIMO)==0 && seconds) 117 rval = TS_TIME; 118 else 119 rval = TS_OK; 120 } 121 pp->p_flag &= ~STIMO; 122 bcopy((caddr_t)lqsav, (caddr_t)u.u_qsav, sizeof (label_t)); 123 if (sec > 0) 124 pp->p_clktim += sec; 125 else 126 pp->p_clktim = 0; 127 splx(n); 128 return (rval); 129 } 130 131 /* 132 * Remove a process from its wait queue 133 */ 134 unsleep(p) 135 register struct proc *p; 136 { 137 register struct proc **hp; 138 register s; 139 140 s = spl6(); 141 if (p->p_wchan) { 142 hp = &slpque[HASH(p->p_wchan)]; 143 while (*hp != p) 144 hp = &(*hp)->p_link; 145 *hp = p->p_link; 146 p->p_wchan = 0; 147 } 148 splx(s); 149 } 150 151 /* 152 * Wake up all processes sleeping on chan. 153 */ 154 wakeup(chan) 155 register caddr_t chan; 156 { 157 register struct proc *p, **q, **h; 158 int s; 159 160 s = spl6(); 161 h = &slpque[HASH(chan)]; 162 restart: 163 for (q = h; p = *q; ) { 164 if (p->p_rlink || p->p_stat != SSLEEP && p->p_stat != SSTOP) 165 panic("wakeup"); 166 if (p->p_wchan==chan) { 167 p->p_wchan = 0; 168 *q = p->p_link; 169 p->p_slptime = 0; 170 if (p->p_stat == SSLEEP) { 171 /* OPTIMIZED INLINE EXPANSION OF setrun(p) */ 172 p->p_stat = SRUN; 173 if (p->p_flag & SLOAD) 174 setrq(p); 175 if (p->p_pri < curpri) { 176 runrun++; 177 aston(); 178 } 179 if ((p->p_flag&SLOAD) == 0) { 180 if (runout != 0) { 181 runout = 0; 182 wakeup((caddr_t)&runout); 183 } 184 wantin++; 185 } 186 /* END INLINE EXPANSION */ 187 goto restart; 188 } 189 } else 190 q = &p->p_link; 191 } 192 splx(s); 193 } 194 195 /* 196 * Initialize the (doubly-linked) run queues 197 * to be empty. 198 */ 199 rqinit() 200 { 201 register int i; 202 203 for (i = 0; i < NQS; i++) 204 qs[i].ph_link = qs[i].ph_rlink = (struct proc *)&qs[i]; 205 } 206 207 /* 208 * Set the process running; 209 * arrange for it to be swapped in if necessary. 210 */ 211 setrun(p) 212 register struct proc *p; 213 { 214 register int s; 215 216 s = spl6(); 217 switch (p->p_stat) { 218 219 case 0: 220 case SWAIT: 221 case SRUN: 222 case SZOMB: 223 default: 224 panic("setrun"); 225 226 case SSTOP: 227 case SSLEEP: 228 unsleep(p); /* e.g. when sending signals */ 229 break; 230 231 case SIDL: 232 break; 233 } 234 p->p_stat = SRUN; 235 if (p->p_flag & SLOAD) 236 setrq(p); 237 splx(s); 238 if (p->p_pri < curpri) { 239 runrun++; 240 aston(); 241 } 242 if ((p->p_flag&SLOAD) == 0) { 243 if (runout != 0) { 244 runout = 0; 245 wakeup((caddr_t)&runout); 246 } 247 wantin++; 248 } 249 } 250 251 /* 252 * Set user priority. 253 * The rescheduling flag (runrun) 254 * is set if the priority is better 255 * than the currently running process. 256 */ 257 setpri(pp) 258 register struct proc *pp; 259 { 260 register int p; 261 262 p = (pp->p_cpu & 0377)/4; 263 p += PUSER + 2*(pp->p_nice - NZERO); 264 if (pp->p_rssize > pp->p_maxrss && freemem < desfree) 265 p += 2*4; /* effectively, nice(4) */ 266 if (p > 127) 267 p = 127; 268 if (p < curpri) { 269 runrun++; 270 aston(); 271 } 272 pp->p_usrpri = p; 273 return (p); 274 } 275 276 /* 277 * Create a new process-- the internal version of 278 * sys fork. 279 * It returns 1 in the new process, 0 in the old. 280 */ 281 newproc(isvfork) 282 int isvfork; 283 { 284 register struct proc *p; 285 register struct proc *rpp, *rip; 286 register int n; 287 288 p = NULL; 289 /* 290 * First, just locate a slot for a process 291 * and copy the useful info from this process into it. 292 * The panic "cannot happen" because fork has already 293 * checked for the existence of a slot. 294 */ 295 retry: 296 mpid++; 297 if (mpid >= 30000) { 298 mpid = 0; 299 goto retry; 300 } 301 for (rpp = proc; rpp < procNPROC; rpp++) { 302 if (rpp->p_stat == NULL && p==NULL) 303 p = rpp; 304 if (rpp->p_pid==mpid || rpp->p_pgrp==mpid) 305 goto retry; 306 } 307 if ((rpp = p) == NULL) 308 panic("no procs"); 309 310 /* 311 * Make a proc table entry for the new process. 312 */ 313 rip = u.u_procp; 314 rpp->p_stat = SIDL; 315 rpp->p_clktim = 0; 316 rpp->p_flag = SLOAD | (rip->p_flag & (SPAGI|SNUSIG)); 317 if (isvfork) { 318 rpp->p_flag |= SVFORK; 319 rpp->p_ndx = rip->p_ndx; 320 } else 321 rpp->p_ndx = rpp - proc; 322 rpp->p_uid = rip->p_uid; 323 rpp->p_pgrp = rip->p_pgrp; 324 rpp->p_nice = rip->p_nice; 325 rpp->p_textp = isvfork ? 0 : rip->p_textp; 326 rpp->p_pid = mpid; 327 rpp->p_ppid = rip->p_pid; 328 rpp->p_pptr = rip; 329 rpp->p_time = 0; 330 rpp->p_cpu = 0; 331 rpp->p_siga0 = rip->p_siga0; 332 rpp->p_siga1 = rip->p_siga1; 333 /* take along any pending signals, like stops? */ 334 if (isvfork) { 335 rpp->p_tsize = rpp->p_dsize = rpp->p_ssize = 0; 336 rpp->p_szpt = clrnd(ctopt(UPAGES)); 337 forkstat.cntvfork++; 338 forkstat.sizvfork += rip->p_dsize + rip->p_ssize; 339 } else { 340 rpp->p_tsize = rip->p_tsize; 341 rpp->p_dsize = rip->p_dsize; 342 rpp->p_ssize = rip->p_ssize; 343 rpp->p_szpt = rip->p_szpt; 344 forkstat.cntfork++; 345 forkstat.sizfork += rip->p_dsize + rip->p_ssize; 346 } 347 rpp->p_rssize = 0; 348 rpp->p_maxrss = rip->p_maxrss; 349 rpp->p_wchan = 0; 350 rpp->p_slptime = 0; 351 rpp->p_pctcpu = 0; 352 rpp->p_cpticks = 0; 353 n = PIDHASH(rpp->p_pid); 354 p->p_idhash = pidhash[n]; 355 pidhash[n] = rpp - proc; 356 multprog++; 357 358 /* 359 * Increase reference counts on shared objects. 360 */ 361 for(n=0; n<NOFILE; n++) 362 if (u.u_ofile[n] != NULL) 363 u.u_ofile[n]->f_count++; 364 u.u_cdir->i_count++; 365 if (u.u_rdir) 366 u.u_rdir->i_count++; 367 368 /* 369 * Partially simulate the environment 370 * of the new process so that when it is actually 371 * created (by copying) it will look right. 372 * This begins the section where we must prevent the parent 373 * from being swapped. 374 */ 375 rip->p_flag |= SKEEP; 376 if (procdup(rpp, isvfork)) 377 return (1); 378 379 /* 380 * Make child runnable and add to run queue. 381 */ 382 (void) spl6(); 383 rpp->p_stat = SRUN; 384 setrq(rpp); 385 (void) spl0(); 386 387 /* 388 * Cause child to take a non-local goto as soon as it runs. 389 * On older systems this was done with SSWAP bit in proc 390 * table; on VAX we use u.u_pcb.pcb_sswap so don't need 391 * to do rpp->p_flag |= SSWAP. Actually do nothing here. 392 */ 393 /* rpp->p_flag |= SSWAP; */ 394 395 /* 396 * Now can be swapped. 397 */ 398 rip->p_flag &= ~SKEEP; 399 400 /* 401 * If vfork make chain from parent process to child 402 * (where virtal memory is temporarily). Wait for 403 * child to finish, steal virtual memory back, 404 * and wakeup child to let it die. 405 */ 406 if (isvfork) { 407 u.u_procp->p_xlink = rpp; 408 u.u_procp->p_flag |= SNOVM; 409 while (rpp->p_flag & SVFORK) 410 sleep((caddr_t)rpp, PZERO - 1); 411 if ((rpp->p_flag & SLOAD) == 0) 412 panic("newproc vfork"); 413 uaccess(rpp, Vfmap, &vfutl); 414 u.u_procp->p_xlink = 0; 415 vpassvm(rpp, u.u_procp, &vfutl, &u, Vfmap); 416 u.u_procp->p_flag &= ~SNOVM; 417 rpp->p_ndx = rpp - proc; 418 rpp->p_flag |= SVFDONE; 419 wakeup((caddr_t)rpp); 420 } 421 422 /* 423 * 0 return means parent. 424 */ 425 return (0); 426 } 427