xref: /xv6-public/proc.c (revision bc8221a5)
1 #include "types.h"
2 #include "defs.h"
3 #include "param.h"
4 #include "memlayout.h"
5 #include "mmu.h"
6 #include "x86.h"
7 #include "proc.h"
8 #include "spinlock.h"
9 
10 struct {
11   struct spinlock lock;
12   struct proc proc[NPROC];
13 } ptable;
14 
15 static struct proc *initproc;
16 
17 int nextpid = 1;
18 extern void forkret(void);
19 extern void trapret(void);
20 
21 static void wakeup1(void *chan);
22 
23 void
24 pinit(void)
25 {
26   initlock(&ptable.lock, "ptable");
27 }
28 
29 //PAGEBREAK: 32
30 // Look in the process table for an UNUSED proc.
31 // If found, change state to EMBRYO and initialize
32 // state required to run in the kernel.
33 // Otherwise return 0.
34 // Must hold ptable.lock.
35 static struct proc*
36 allocproc(void)
37 {
38   struct proc *p;
39   char *sp;
40 
41   for(p = ptable.proc; p < &ptable.proc[NPROC]; p++)
42     if(p->state == UNUSED)
43       goto found;
44   return 0;
45 
46 found:
47   p->state = EMBRYO;
48   p->pid = nextpid++;
49 
50   // Allocate kernel stack.
51   if((p->kstack = kalloc()) == 0){
52     p->state = UNUSED;
53     return 0;
54   }
55   sp = p->kstack + KSTACKSIZE;
56 
57   // Leave room for trap frame.
58   sp -= sizeof *p->tf;
59   p->tf = (struct trapframe*)sp;
60 
61   // Set up new context to start executing at forkret,
62   // which returns to trapret.
63   sp -= 4;
64   *(uint*)sp = (uint)trapret;
65 
66   sp -= sizeof *p->context;
67   p->context = (struct context*)sp;
68   memset(p->context, 0, sizeof *p->context);
69   p->context->eip = (uint)forkret;
70 
71   return p;
72 }
73 
74 //PAGEBREAK: 32
75 // Set up first user process.
76 void
77 userinit(void)
78 {
79   struct proc *p;
80   extern char _binary_initcode_start[], _binary_initcode_size[];
81 
82   acquire(&ptable.lock);
83 
84   p = allocproc();
85   initproc = p;
86   if((p->pgdir = setupkvm()) == 0)
87     panic("userinit: out of memory?");
88   inituvm(p->pgdir, _binary_initcode_start, (int)_binary_initcode_size);
89   p->sz = PGSIZE;
90   memset(p->tf, 0, sizeof(*p->tf));
91   p->tf->cs = (SEG_UCODE << 3) | DPL_USER;
92   p->tf->ds = (SEG_UDATA << 3) | DPL_USER;
93   p->tf->es = p->tf->ds;
94   p->tf->ss = p->tf->ds;
95   p->tf->eflags = FL_IF;
96   p->tf->esp = PGSIZE;
97   p->tf->eip = 0;  // beginning of initcode.S
98 
99   safestrcpy(p->name, "initcode", sizeof(p->name));
100   p->cwd = namei("/");
101 
102   p->state = RUNNABLE;
103 
104   release(&ptable.lock);
105 }
106 
107 // Grow current process's memory by n bytes.
108 // Return 0 on success, -1 on failure.
109 int
110 growproc(int n)
111 {
112   uint sz;
113 
114   sz = proc->sz;
115   if(n > 0){
116     if((sz = allocuvm(proc->pgdir, sz, sz + n)) == 0)
117       return -1;
118   } else if(n < 0){
119     if((sz = deallocuvm(proc->pgdir, sz, sz + n)) == 0)
120       return -1;
121   }
122   proc->sz = sz;
123   switchuvm(proc);
124   return 0;
125 }
126 
127 // Create a new process copying p as the parent.
128 // Sets up stack to return as if from system call.
129 // Caller must set state of returned proc to RUNNABLE.
130 int
131 fork(void)
132 {
133   int i, pid;
134   struct proc *np;
135 
136   acquire(&ptable.lock);
137 
138   // Allocate process.
139   if((np = allocproc()) == 0){
140     release(&ptable.lock);
141     return -1;
142   }
143 
144   // Copy process state from p.
145   if((np->pgdir = copyuvm(proc->pgdir, proc->sz)) == 0){
146     kfree(np->kstack);
147     np->kstack = 0;
148     np->state = UNUSED;
149     release(&ptable.lock);
150     return -1;
151   }
152   np->sz = proc->sz;
153   np->parent = proc;
154   *np->tf = *proc->tf;
155 
156   // Clear %eax so that fork returns 0 in the child.
157   np->tf->eax = 0;
158 
159   for(i = 0; i < NOFILE; i++)
160     if(proc->ofile[i])
161       np->ofile[i] = filedup(proc->ofile[i]);
162   np->cwd = idup(proc->cwd);
163 
164   safestrcpy(np->name, proc->name, sizeof(proc->name));
165 
166   pid = np->pid;
167 
168   np->state = RUNNABLE;
169 
170   release(&ptable.lock);
171 
172   return pid;
173 }
174 
175 // Exit the current process.  Does not return.
176 // An exited process remains in the zombie state
177 // until its parent calls wait() to find out it exited.
178 void
179 exit(void)
180 {
181   struct proc *p;
182   int fd;
183 
184   if(proc == initproc)
185     panic("init exiting");
186 
187   // Close all open files.
188   for(fd = 0; fd < NOFILE; fd++){
189     if(proc->ofile[fd]){
190       fileclose(proc->ofile[fd]);
191       proc->ofile[fd] = 0;
192     }
193   }
194 
195   begin_op();
196   iput(proc->cwd);
197   end_op();
198   proc->cwd = 0;
199 
200   acquire(&ptable.lock);
201 
202   // Parent might be sleeping in wait().
203   wakeup1(proc->parent);
204 
205   // Pass abandoned children to init.
206   for(p = ptable.proc; p < &ptable.proc[NPROC]; p++){
207     if(p->parent == proc){
208       p->parent = initproc;
209       if(p->state == ZOMBIE)
210         wakeup1(initproc);
211     }
212   }
213 
214   // Jump into the scheduler, never to return.
215   proc->state = ZOMBIE;
216   sched();
217   panic("zombie exit");
218 }
219 
220 // Wait for a child process to exit and return its pid.
221 // Return -1 if this process has no children.
222 int
223 wait(void)
224 {
225   struct proc *p;
226   int havekids, pid;
227 
228   acquire(&ptable.lock);
229   for(;;){
230     // Scan through table looking for zombie children.
231     havekids = 0;
232     for(p = ptable.proc; p < &ptable.proc[NPROC]; p++){
233       if(p->parent != proc)
234         continue;
235       havekids = 1;
236       if(p->state == ZOMBIE){
237         // Found one.
238         pid = p->pid;
239         kfree(p->kstack);
240         p->kstack = 0;
241         freevm(p->pgdir);
242         p->pid = 0;
243         p->parent = 0;
244         p->name[0] = 0;
245         p->killed = 0;
246         p->state = UNUSED;
247         release(&ptable.lock);
248         return pid;
249       }
250     }
251 
252     // No point waiting if we don't have any children.
253     if(!havekids || proc->killed){
254       release(&ptable.lock);
255       return -1;
256     }
257 
258     // Wait for children to exit.  (See wakeup1 call in proc_exit.)
259     sleep(proc, &ptable.lock);  //DOC: wait-sleep
260   }
261 }
262 
263 //PAGEBREAK: 42
264 // Per-CPU process scheduler.
265 // Each CPU calls scheduler() after setting itself up.
266 // Scheduler never returns.  It loops, doing:
267 //  - choose a process to run
268 //  - swtch to start running that process
269 //  - eventually that process transfers control
270 //      via swtch back to the scheduler.
271 void
272 scheduler(void)
273 {
274   struct proc *p;
275 
276   for(;;){
277     // Enable interrupts on this processor.
278     sti();
279 
280     // Loop over process table looking for process to run.
281     acquire(&ptable.lock);
282     for(p = ptable.proc; p < &ptable.proc[NPROC]; p++){
283       if(p->state != RUNNABLE)
284         continue;
285 
286       // Switch to chosen process.  It is the process's job
287       // to release ptable.lock and then reacquire it
288       // before jumping back to us.
289       proc = p;
290       switchuvm(p);
291       p->state = RUNNING;
292       swtch(&cpu->scheduler, p->context);
293       switchkvm();
294 
295       // Process is done running for now.
296       // It should have changed its p->state before coming back.
297       proc = 0;
298     }
299     release(&ptable.lock);
300 
301   }
302 }
303 
304 // Enter scheduler.  Must hold only ptable.lock
305 // and have changed proc->state. Saves and restores
306 // intena because intena is a property of this
307 // kernel thread, not this CPU. It should
308 // be proc->intena and proc->ncli, but that would
309 // break in the few places where a lock is held but
310 // there's no process.
311 void
312 sched(void)
313 {
314   int intena;
315 
316   if(!holding(&ptable.lock))
317     panic("sched ptable.lock");
318   if(cpu->ncli != 1)
319     panic("sched locks");
320   if(proc->state == RUNNING)
321     panic("sched running");
322   if(readeflags()&FL_IF)
323     panic("sched interruptible");
324   intena = cpu->intena;
325   swtch(&proc->context, cpu->scheduler);
326   cpu->intena = intena;
327 }
328 
329 // Give up the CPU for one scheduling round.
330 void
331 yield(void)
332 {
333   acquire(&ptable.lock);  //DOC: yieldlock
334   proc->state = RUNNABLE;
335   sched();
336   release(&ptable.lock);
337 }
338 
339 // A fork child's very first scheduling by scheduler()
340 // will swtch here.  "Return" to user space.
341 void
342 forkret(void)
343 {
344   static int first = 1;
345   // Still holding ptable.lock from scheduler.
346   release(&ptable.lock);
347 
348   if (first) {
349     // Some initialization functions must be run in the context
350     // of a regular process (e.g., they call sleep), and thus cannot
351     // be run from main().
352     first = 0;
353     iinit(ROOTDEV);
354     initlog(ROOTDEV);
355   }
356 
357   // Return to "caller", actually trapret (see allocproc).
358 }
359 
360 // Atomically release lock and sleep on chan.
361 // Reacquires lock when awakened.
362 void
363 sleep(void *chan, struct spinlock *lk)
364 {
365   if(proc == 0)
366     panic("sleep");
367 
368   if(lk == 0)
369     panic("sleep without lk");
370 
371   // Must acquire ptable.lock in order to
372   // change p->state and then call sched.
373   // Once we hold ptable.lock, we can be
374   // guaranteed that we won't miss any wakeup
375   // (wakeup runs with ptable.lock locked),
376   // so it's okay to release lk.
377   if(lk != &ptable.lock){  //DOC: sleeplock0
378     acquire(&ptable.lock);  //DOC: sleeplock1
379     release(lk);
380   }
381 
382   // Go to sleep.
383   proc->chan = chan;
384   proc->state = SLEEPING;
385   sched();
386 
387   // Tidy up.
388   proc->chan = 0;
389 
390   // Reacquire original lock.
391   if(lk != &ptable.lock){  //DOC: sleeplock2
392     release(&ptable.lock);
393     acquire(lk);
394   }
395 }
396 
397 //PAGEBREAK!
398 // Wake up all processes sleeping on chan.
399 // The ptable lock must be held.
400 static void
401 wakeup1(void *chan)
402 {
403   struct proc *p;
404 
405   for(p = ptable.proc; p < &ptable.proc[NPROC]; p++)
406     if(p->state == SLEEPING && p->chan == chan)
407       p->state = RUNNABLE;
408 }
409 
410 // Wake up all processes sleeping on chan.
411 void
412 wakeup(void *chan)
413 {
414   acquire(&ptable.lock);
415   wakeup1(chan);
416   release(&ptable.lock);
417 }
418 
419 // Kill the process with the given pid.
420 // Process won't exit until it returns
421 // to user space (see trap in trap.c).
422 int
423 kill(int pid)
424 {
425   struct proc *p;
426 
427   acquire(&ptable.lock);
428   for(p = ptable.proc; p < &ptable.proc[NPROC]; p++){
429     if(p->pid == pid){
430       p->killed = 1;
431       // Wake process from sleep if necessary.
432       if(p->state == SLEEPING)
433         p->state = RUNNABLE;
434       release(&ptable.lock);
435       return 0;
436     }
437   }
438   release(&ptable.lock);
439   return -1;
440 }
441 
442 //PAGEBREAK: 36
443 // Print a process listing to console.  For debugging.
444 // Runs when user types ^P on console.
445 // No lock to avoid wedging a stuck machine further.
446 void
447 procdump(void)
448 {
449   static char *states[] = {
450   [UNUSED]    "unused",
451   [EMBRYO]    "embryo",
452   [SLEEPING]  "sleep ",
453   [RUNNABLE]  "runble",
454   [RUNNING]   "run   ",
455   [ZOMBIE]    "zombie"
456   };
457   int i;
458   struct proc *p;
459   char *state;
460   uint pc[10];
461 
462   for(p = ptable.proc; p < &ptable.proc[NPROC]; p++){
463     if(p->state == UNUSED)
464       continue;
465     if(p->state >= 0 && p->state < NELEM(states) && states[p->state])
466       state = states[p->state];
467     else
468       state = "???";
469     cprintf("%d %s %s", p->pid, state, p->name);
470     if(p->state == SLEEPING){
471       getcallerpcs((uint*)p->context->ebp+2, pc);
472       for(i=0; i<10 && pc[i] != 0; i++)
473         cprintf(" %p", pc[i]);
474     }
475     cprintf("\n");
476   }
477 }
478