xref: /xv6-public/proc.c (revision 7c00ce81)
1 #include "types.h"
2 #include "defs.h"
3 #include "param.h"
4 #include "memlayout.h"
5 #include "mmu.h"
6 #include "x86.h"
7 #include "proc.h"
8 #include "spinlock.h"
9 
10 struct {
11   struct spinlock lock;
12   struct proc proc[NPROC];
13 } ptable;
14 
15 static struct proc *initproc;
16 
17 int nextpid = 1;
18 extern void forkret(void);
19 extern void trapret(void);
20 
21 static void wakeup1(void *chan);
22 
23 void
24 pinit(void)
25 {
26   initlock(&ptable.lock, "ptable");
27 }
28 
29 // Must be called with interrupts disabled
30 int
31 cpuid() {
32   return mycpu()-cpus;
33 }
34 
35 // Must be called with interrupts disabled
36 struct cpu*
37 mycpu(void)
38 {
39   if(readeflags()&FL_IF){
40     // Would prefer to panic but panic calls mycpu().
41     cprintf("mycpu called from %x with interrupts enabled\n",
42         __builtin_return_address(0));
43   }
44 
45   return &cpus[lapiccpunum()];
46 }
47 
48 // Disable interrupts so that we are not rescheduled
49 // while reading proc from the cpu structure
50 struct proc*
51 myproc(void) {
52   struct cpu *c;
53   struct proc *p;
54   pushcli();
55   c = mycpu();
56   p = c->proc;
57   popcli();
58   return p;
59 }
60 
61 //PAGEBREAK: 32
62 // Look in the process table for an UNUSED proc.
63 // If found, change state to EMBRYO and initialize
64 // state required to run in the kernel.
65 // Otherwise return 0.
66 static struct proc*
67 allocproc(void)
68 {
69   struct proc *p;
70   char *sp;
71 
72   acquire(&ptable.lock);
73 
74   for(p = ptable.proc; p < &ptable.proc[NPROC]; p++)
75     if(p->state == UNUSED)
76       goto found;
77 
78   release(&ptable.lock);
79   return 0;
80 
81 found:
82   p->state = EMBRYO;
83   p->pid = nextpid++;
84 
85   release(&ptable.lock);
86 
87   // Allocate kernel stack.
88   if((p->kstack = kalloc()) == 0){
89     p->state = UNUSED;
90     return 0;
91   }
92   sp = p->kstack + KSTACKSIZE;
93 
94   // Leave room for trap frame.
95   sp -= sizeof *p->tf;
96   p->tf = (struct trapframe*)sp;
97 
98   // Set up new context to start executing at forkret,
99   // which returns to trapret.
100   sp -= 4;
101   *(uint*)sp = (uint)trapret;
102 
103   sp -= sizeof *p->context;
104   p->context = (struct context*)sp;
105   memset(p->context, 0, sizeof *p->context);
106   p->context->eip = (uint)forkret;
107 
108   return p;
109 }
110 
111 //PAGEBREAK: 32
112 // Set up first user process.
113 void
114 userinit(void)
115 {
116   struct proc *p;
117   extern char _binary_initcode_start[], _binary_initcode_size[];
118 
119   p = allocproc();
120 
121   initproc = p;
122   if((p->pgdir = setupkvm()) == 0)
123     panic("userinit: out of memory?");
124   inituvm(p->pgdir, _binary_initcode_start, (int)_binary_initcode_size);
125   p->sz = PGSIZE;
126   memset(p->tf, 0, sizeof(*p->tf));
127   p->tf->cs = (SEG_UCODE << 3) | DPL_USER;
128   p->tf->ds = (SEG_UDATA << 3) | DPL_USER;
129   p->tf->es = p->tf->ds;
130   p->tf->ss = p->tf->ds;
131   p->tf->eflags = FL_IF;
132   p->tf->esp = PGSIZE;
133   p->tf->eip = 0;  // beginning of initcode.S
134 
135   safestrcpy(p->name, "initcode", sizeof(p->name));
136   p->cwd = namei("/");
137 
138   // this assignment to p->state lets other cores
139   // run this process. the acquire forces the above
140   // writes to be visible, and the lock is also needed
141   // because the assignment might not be atomic.
142   acquire(&ptable.lock);
143 
144   p->state = RUNNABLE;
145 
146   release(&ptable.lock);
147 }
148 
149 // Grow current process's memory by n bytes.
150 // Return 0 on success, -1 on failure.
151 int
152 growproc(int n)
153 {
154   uint sz;
155   struct proc *curproc = myproc();
156 
157   sz = curproc->sz;
158   if(n > 0){
159     if((sz = allocuvm(curproc->pgdir, sz, sz + n)) == 0)
160       return -1;
161   } else if(n < 0){
162     if((sz = deallocuvm(curproc->pgdir, sz, sz + n)) == 0)
163       return -1;
164   }
165   curproc->sz = sz;
166   switchuvm(curproc);
167   return 0;
168 }
169 
170 // Create a new process copying p as the parent.
171 // Sets up stack to return as if from system call.
172 // Caller must set state of returned proc to RUNNABLE.
173 int
174 fork(void)
175 {
176   int i, pid;
177   struct proc *np;
178   struct proc *curproc = myproc();
179 
180   // Allocate process.
181   if((np = allocproc()) == 0){
182     return -1;
183   }
184 
185   // Copy process state from proc.
186   if((np->pgdir = copyuvm(curproc->pgdir, curproc->sz)) == 0){
187     kfree(np->kstack);
188     np->kstack = 0;
189     np->state = UNUSED;
190     return -1;
191   }
192   np->sz = curproc->sz;
193   np->parent = curproc;
194   *np->tf = *curproc->tf;
195 
196   // Clear %eax so that fork returns 0 in the child.
197   np->tf->eax = 0;
198 
199   for(i = 0; i < NOFILE; i++)
200     if(curproc->ofile[i])
201       np->ofile[i] = filedup(curproc->ofile[i]);
202   np->cwd = idup(curproc->cwd);
203 
204   safestrcpy(np->name, curproc->name, sizeof(curproc->name));
205 
206   pid = np->pid;
207 
208   acquire(&ptable.lock);
209 
210   np->state = RUNNABLE;
211 
212   release(&ptable.lock);
213 
214   return pid;
215 }
216 
217 // Exit the current process.  Does not return.
218 // An exited process remains in the zombie state
219 // until its parent calls wait() to find out it exited.
220 void
221 exit(void)
222 {
223   struct proc *curproc = myproc();
224   struct proc *p;
225   int fd;
226 
227   if(curproc == initproc)
228     panic("init exiting");
229 
230   // Close all open files.
231   for(fd = 0; fd < NOFILE; fd++){
232     if(curproc->ofile[fd]){
233       fileclose(curproc->ofile[fd]);
234       curproc->ofile[fd] = 0;
235     }
236   }
237 
238   begin_op();
239   iput(curproc->cwd);
240   end_op();
241   curproc->cwd = 0;
242 
243   acquire(&ptable.lock);
244 
245   // Parent might be sleeping in wait().
246   wakeup1(curproc->parent);
247 
248   // Pass abandoned children to init.
249   for(p = ptable.proc; p < &ptable.proc[NPROC]; p++){
250     if(p->parent == curproc){
251       p->parent = initproc;
252       if(p->state == ZOMBIE)
253         wakeup1(initproc);
254     }
255   }
256 
257   // Jump into the scheduler, never to return.
258   curproc->state = ZOMBIE;
259   sched();
260   panic("zombie exit");
261 }
262 
263 // Wait for a child process to exit and return its pid.
264 // Return -1 if this process has no children.
265 int
266 wait(void)
267 {
268   struct proc *p;
269   int havekids, pid;
270   struct proc *curproc = myproc();
271 
272   acquire(&ptable.lock);
273   for(;;){
274     // Scan through table looking for exited children.
275     havekids = 0;
276     for(p = ptable.proc; p < &ptable.proc[NPROC]; p++){
277       if(p->parent != curproc)
278         continue;
279       havekids = 1;
280       if(p->state == ZOMBIE){
281         // Found one.
282         pid = p->pid;
283         kfree(p->kstack);
284         p->kstack = 0;
285         freevm(p->pgdir);
286         p->pid = 0;
287         p->parent = 0;
288         p->name[0] = 0;
289         p->killed = 0;
290         p->state = UNUSED;
291         release(&ptable.lock);
292         return pid;
293       }
294     }
295 
296     // No point waiting if we don't have any children.
297     if(!havekids || curproc->killed){
298       release(&ptable.lock);
299       return -1;
300     }
301 
302     // Wait for children to exit.  (See wakeup1 call in proc_exit.)
303     sleep(curproc, &ptable.lock);  //DOC: wait-sleep
304   }
305 }
306 
307 //PAGEBREAK: 42
308 // Per-CPU process scheduler.
309 // Each CPU calls scheduler() after setting itself up.
310 // Scheduler never returns.  It loops, doing:
311 //  - choose a process to run
312 //  - swtch to start running that process
313 //  - eventually that process transfers control
314 //      via swtch back to the scheduler.
315 void
316 scheduler(void)
317 {
318   struct proc *p;
319   struct cpu *c = mycpu();
320   c->proc = 0;
321 
322   for(;;){
323     // Enable interrupts on this processor.
324     sti();
325 
326     // Loop over process table looking for process to run.
327     acquire(&ptable.lock);
328     for(p = ptable.proc; p < &ptable.proc[NPROC]; p++){
329       if(p->state != RUNNABLE)
330         continue;
331 
332       // Switch to chosen process.  It is the process's job
333       // to release ptable.lock and then reacquire it
334       // before jumping back to us.
335       c->proc = p;
336       switchuvm(p);
337       p->state = RUNNING;
338 
339       swtch(&(c->scheduler), p->context);
340       switchkvm();
341 
342       // Process is done running for now.
343       // It should have changed its p->state before coming back.
344       c->proc = 0;
345     }
346     release(&ptable.lock);
347 
348   }
349 }
350 
351 // Enter scheduler.  Must hold only ptable.lock
352 // and have changed proc->state. Saves and restores
353 // intena because intena is a property of this
354 // kernel thread, not this CPU. It should
355 // be proc->intena and proc->ncli, but that would
356 // break in the few places where a lock is held but
357 // there's no process.
358 void
359 sched(void)
360 {
361   int intena;
362   struct proc *p = myproc();
363 
364   if(!holding(&ptable.lock))
365     panic("sched ptable.lock");
366   if(mycpu()->ncli != 1)
367     panic("sched locks");
368   if(p->state == RUNNING)
369     panic("sched running");
370   if(readeflags()&FL_IF)
371     panic("sched interruptible");
372   intena = mycpu()->intena;
373   swtch(&p->context, mycpu()->scheduler);
374   mycpu()->intena = intena;
375 }
376 
377 // Give up the CPU for one scheduling round.
378 void
379 yield(void)
380 {
381   acquire(&ptable.lock);  //DOC: yieldlock
382   myproc()->state = RUNNABLE;
383   sched();
384   release(&ptable.lock);
385 }
386 
387 // A fork child's very first scheduling by scheduler()
388 // will swtch here.  "Return" to user space.
389 void
390 forkret(void)
391 {
392   static int first = 1;
393   // Still holding ptable.lock from scheduler.
394   release(&ptable.lock);
395 
396   if (first) {
397     // Some initialization functions must be run in the context
398     // of a regular process (e.g., they call sleep), and thus cannot
399     // be run from main().
400     first = 0;
401     iinit(ROOTDEV);
402     initlog(ROOTDEV);
403   }
404 
405   // Return to "caller", actually trapret (see allocproc).
406 }
407 
408 // Atomically release lock and sleep on chan.
409 // Reacquires lock when awakened.
410 void
411 sleep(void *chan, struct spinlock *lk)
412 {
413   struct proc *p = myproc();
414 
415   if(p == 0)
416     panic("sleep");
417 
418   if(lk == 0)
419     panic("sleep without lk");
420 
421   // Must acquire ptable.lock in order to
422   // change p->state and then call sched.
423   // Once we hold ptable.lock, we can be
424   // guaranteed that we won't miss any wakeup
425   // (wakeup runs with ptable.lock locked),
426   // so it's okay to release lk.
427   if(lk != &ptable.lock){  //DOC: sleeplock0
428     acquire(&ptable.lock);  //DOC: sleeplock1
429     release(lk);
430   }
431   // Go to sleep.
432   p->chan = chan;
433   p->state = SLEEPING;
434 
435   sched();
436 
437   // Tidy up.
438   p->chan = 0;
439 
440   // Reacquire original lock.
441   if(lk != &ptable.lock){  //DOC: sleeplock2
442     release(&ptable.lock);
443     acquire(lk);
444   }
445 }
446 
447 //PAGEBREAK!
448 // Wake up all processes sleeping on chan.
449 // The ptable lock must be held.
450 static void
451 wakeup1(void *chan)
452 {
453   struct proc *p;
454 
455   for(p = ptable.proc; p < &ptable.proc[NPROC]; p++)
456     if(p->state == SLEEPING && p->chan == chan)
457       p->state = RUNNABLE;
458 }
459 
460 // Wake up all processes sleeping on chan.
461 void
462 wakeup(void *chan)
463 {
464   acquire(&ptable.lock);
465   wakeup1(chan);
466   release(&ptable.lock);
467 }
468 
469 // Kill the process with the given pid.
470 // Process won't exit until it returns
471 // to user space (see trap in trap.c).
472 int
473 kill(int pid)
474 {
475   struct proc *p;
476 
477   acquire(&ptable.lock);
478   for(p = ptable.proc; p < &ptable.proc[NPROC]; p++){
479     if(p->pid == pid){
480       p->killed = 1;
481       // Wake process from sleep if necessary.
482       if(p->state == SLEEPING)
483         p->state = RUNNABLE;
484       release(&ptable.lock);
485       return 0;
486     }
487   }
488   release(&ptable.lock);
489   return -1;
490 }
491 
492 //PAGEBREAK: 36
493 // Print a process listing to console.  For debugging.
494 // Runs when user types ^P on console.
495 // No lock to avoid wedging a stuck machine further.
496 void
497 procdump(void)
498 {
499   static char *states[] = {
500   [UNUSED]    "unused",
501   [EMBRYO]    "embryo",
502   [SLEEPING]  "sleep ",
503   [RUNNABLE]  "runble",
504   [RUNNING]   "run   ",
505   [ZOMBIE]    "zombie"
506   };
507   int i;
508   struct proc *p;
509   char *state;
510   uint pc[10];
511 
512   for(p = ptable.proc; p < &ptable.proc[NPROC]; p++){
513     if(p->state == UNUSED)
514       continue;
515     if(p->state >= 0 && p->state < NELEM(states) && states[p->state])
516       state = states[p->state];
517     else
518       state = "???";
519     cprintf("%d %s %s", p->pid, state, p->name);
520     if(p->state == SLEEPING){
521       getcallerpcs((uint*)p->context->ebp+2, pc);
522       for(i=0; i<10 && pc[i] != 0; i++)
523         cprintf(" %p", pc[i]);
524     }
525     cprintf("\n");
526   }
527 }
528