xref: /xv6-public/proc.c (revision 7894fcd2)
1 #include "types.h"
2 #include "defs.h"
3 #include "param.h"
4 #include "memlayout.h"
5 #include "mmu.h"
6 #include "x86.h"
7 #include "proc.h"
8 #include "spinlock.h"
9 
10 struct {
11   struct spinlock lock;
12   struct proc proc[NPROC];
13 } ptable;
14 
15 static struct proc *initproc;
16 
17 int nextpid = 1;
18 extern void forkret(void);
19 extern void trapret(void);
20 
21 static void wakeup1(void *chan);
22 
23 void
24 pinit(void)
25 {
26   initlock(&ptable.lock, "ptable");
27 }
28 
29 //PAGEBREAK: 32
30 // Look in the process table for an UNUSED proc.
31 // If found, change state to EMBRYO and initialize
32 // state required to run in the kernel.
33 // Otherwise return 0.
34 // Must hold ptable.lock.
35 static struct proc*
36 allocproc(void)
37 {
38   struct proc *p;
39   char *sp;
40 
41   for(p = ptable.proc; p < &ptable.proc[NPROC]; p++)
42     if(p->state == UNUSED)
43       goto found;
44   return 0;
45 
46 found:
47   p->state = EMBRYO;
48   p->pid = nextpid++;
49 
50   // Allocate kernel stack.
51   if((p->kstack = kalloc()) == 0){
52     p->state = UNUSED;
53     return 0;
54   }
55   sp = p->kstack + KSTACKSIZE;
56 
57   // Leave room for trap frame.
58   sp -= sizeof *p->tf;
59   p->tf = (struct trapframe*)sp;
60 
61   // Set up new context to start executing at forkret,
62   // which returns to trapret.
63   sp -= 4;
64   *(uint*)sp = (uint)trapret;
65 
66   sp -= sizeof *p->context;
67   p->context = (struct context*)sp;
68   memset(p->context, 0, sizeof *p->context);
69   p->context->eip = (uint)forkret;
70 
71   return p;
72 }
73 
74 //PAGEBREAK: 32
75 // Set up first user process.
76 void
77 userinit(void)
78 {
79   struct proc *p;
80   extern char _binary_initcode_start[], _binary_initcode_size[];
81 
82   acquire(&ptable.lock);
83 
84   p = allocproc();
85   initproc = p;
86   if((p->pgdir = setupkvm()) == 0)
87     panic("userinit: out of memory?");
88   inituvm(p->pgdir, _binary_initcode_start, (int)_binary_initcode_size);
89   p->sz = PGSIZE;
90   memset(p->tf, 0, sizeof(*p->tf));
91   p->tf->cs = (SEG_UCODE << 3) | DPL_USER;
92   p->tf->ds = (SEG_UDATA << 3) | DPL_USER;
93   p->tf->es = p->tf->ds;
94   p->tf->ss = p->tf->ds;
95   p->tf->eflags = FL_IF;
96   p->tf->esp = PGSIZE;
97   p->tf->eip = 0;  // beginning of initcode.S
98 
99   safestrcpy(p->name, "initcode", sizeof(p->name));
100   p->cwd = namei("/");
101 
102   p->state = RUNNABLE;
103 
104   release(&ptable.lock);
105 }
106 
107 // Grow current process's memory by n bytes.
108 // Return 0 on success, -1 on failure.
109 int
110 growproc(int n)
111 {
112   uint sz;
113 
114   sz = proc->sz;
115   if(n > 0){
116     if((sz = allocuvm(proc->pgdir, sz, sz + n)) == 0)
117       return -1;
118   } else if(n < 0){
119     if((sz = deallocuvm(proc->pgdir, sz, sz + n)) == 0)
120       return -1;
121   }
122   proc->sz = sz;
123   switchuvm(proc);
124   return 0;
125 }
126 
127 // Create a new process copying p as the parent.
128 // Sets up stack to return as if from system call.
129 // Caller must set state of returned proc to RUNNABLE.
130 int
131 fork(void)
132 {
133   int i, pid;
134   struct proc *np;
135 
136   acquire(&ptable.lock);
137 
138   // Allocate process.
139   if((np = allocproc()) == 0){
140     release(&ptable.lock);
141     return -1;
142   }
143 
144   // Copy process state from p.
145   if((np->pgdir = copyuvm(proc->pgdir, proc->sz)) == 0){
146     kfree(np->kstack);
147     np->kstack = 0;
148     np->state = UNUSED;
149     release(&ptable.lock);
150     return -1;
151   }
152   np->sz = proc->sz;
153   np->parent = proc;
154   *np->tf = *proc->tf;
155 
156   // Clear %eax so that fork returns 0 in the child.
157   np->tf->eax = 0;
158 
159   for(i = 0; i < NOFILE; i++)
160     if(proc->ofile[i])
161       np->ofile[i] = filedup(proc->ofile[i]);
162   np->cwd = idup(proc->cwd);
163 
164   safestrcpy(np->name, proc->name, sizeof(proc->name));
165 
166   pid = np->pid;
167 
168   np->state = RUNNABLE;
169 
170   release(&ptable.lock);
171 
172   return pid;
173 }
174 
175 // Exit the current process.  Does not return.
176 // An exited process remains in the zombie state
177 // until its parent calls wait() to find out it exited.
178 void
179 exit(void)
180 {
181   struct proc *p;
182   int fd;
183 
184   if(proc == initproc)
185     panic("init exiting");
186 
187   // Close all open files.
188   for(fd = 0; fd < NOFILE; fd++){
189     if(proc->ofile[fd]){
190       fileclose(proc->ofile[fd]);
191       proc->ofile[fd] = 0;
192     }
193   }
194 
195   begin_op();
196   iput(proc->cwd);
197   end_op();
198   proc->cwd = 0;
199 
200   acquire(&ptable.lock);
201 
202   // Parent might be sleeping in wait().
203   wakeup1(proc->parent);
204 
205   // Pass abandoned children to init.
206   for(p = ptable.proc; p < &ptable.proc[NPROC]; p++){
207     if(p->parent == proc){
208       p->parent = initproc;
209       if(p->state == ZOMBIE)
210         wakeup1(initproc);
211     }
212   }
213 
214   // Jump into the scheduler, never to return.
215   proc->state = ZOMBIE;
216   sched();
217   panic("zombie exit");
218 }
219 
220 // Wait for a child process to exit and return its pid.
221 // Return -1 if this process has no children.
222 int
223 wait(void)
224 {
225   struct proc *p;
226   int havekids, pid;
227 
228   acquire(&ptable.lock);
229   for(;;){
230     // Scan through table looking for zombie children.
231     havekids = 0;
232     for(p = ptable.proc; p < &ptable.proc[NPROC]; p++){
233       if(p->parent != proc)
234         continue;
235       havekids = 1;
236       if(p->state == ZOMBIE){
237         // Found one.
238         pid = p->pid;
239         kfree(p->kstack);
240         p->kstack = 0;
241         freevm(p->pgdir);
242         p->pid = 0;
243         p->parent = 0;
244         p->name[0] = 0;
245         p->killed = 0;
246         p->state = UNUSED;
247         release(&ptable.lock);
248         return pid;
249       }
250     }
251 
252     // No point waiting if we don't have any children.
253     if(!havekids || proc->killed){
254       release(&ptable.lock);
255       return -1;
256     }
257 
258     // Wait for children to exit.  (See wakeup1 call in proc_exit.)
259     sleep(proc, &ptable.lock);  //DOC: wait-sleep
260   }
261 }
262 
263 //PAGEBREAK: 42
264 // Per-CPU process scheduler.
265 // Each CPU calls scheduler() after setting itself up.
266 // Scheduler never returns.  It loops, doing:
267 //  - choose a process to run
268 //  - swtch to start running that process
269 //  - eventually that process transfers control
270 //      via swtch back to the scheduler.
271 void
272 scheduler(void)
273 {
274   struct proc *p;
275 
276   for(;;){
277     // Enable interrupts on this processor.
278     sti();
279 
280     // Loop over process table looking for process to run.
281     acquire(&ptable.lock);
282     for(p = ptable.proc; p < &ptable.proc[NPROC]; p++){
283       if(p->state != RUNNABLE)
284         continue;
285 
286       // Switch to chosen process.  It is the process's job
287       // to release ptable.lock and then reacquire it
288       // before jumping back to us.
289       proc = p;
290       switchuvm(p);
291       p->state = RUNNING;
292       swtch(&cpu->scheduler, p->context);
293       switchkvm();
294 
295       // Process is done running for now.
296       // It should have changed its p->state before coming back.
297       proc = 0;
298     }
299     release(&ptable.lock);
300 
301   }
302 }
303 
304 // Enter scheduler.  Must hold only ptable.lock
305 // and have changed proc->state.
306 void
307 sched(void)
308 {
309   int intena;
310 
311   if(!holding(&ptable.lock))
312     panic("sched ptable.lock");
313   if(cpu->ncli != 1)
314     panic("sched locks");
315   if(proc->state == RUNNING)
316     panic("sched running");
317   if(readeflags()&FL_IF)
318     panic("sched interruptible");
319   intena = cpu->intena;
320   swtch(&proc->context, cpu->scheduler);
321   cpu->intena = intena;
322 }
323 
324 // Give up the CPU for one scheduling round.
325 void
326 yield(void)
327 {
328   acquire(&ptable.lock);  //DOC: yieldlock
329   proc->state = RUNNABLE;
330   sched();
331   release(&ptable.lock);
332 }
333 
334 // A fork child's very first scheduling by scheduler()
335 // will swtch here.  "Return" to user space.
336 void
337 forkret(void)
338 {
339   static int first = 1;
340   // Still holding ptable.lock from scheduler.
341   release(&ptable.lock);
342 
343   if (first) {
344     // Some initialization functions must be run in the context
345     // of a regular process (e.g., they call sleep), and thus cannot
346     // be run from main().
347     first = 0;
348     iinit(ROOTDEV);
349     initlog(ROOTDEV);
350   }
351 
352   // Return to "caller", actually trapret (see allocproc).
353 }
354 
355 // Atomically release lock and sleep on chan.
356 // Reacquires lock when awakened.
357 void
358 sleep(void *chan, struct spinlock *lk)
359 {
360   if(proc == 0)
361     panic("sleep");
362 
363   if(lk == 0)
364     panic("sleep without lk");
365 
366   // Must acquire ptable.lock in order to
367   // change p->state and then call sched.
368   // Once we hold ptable.lock, we can be
369   // guaranteed that we won't miss any wakeup
370   // (wakeup runs with ptable.lock locked),
371   // so it's okay to release lk.
372   if(lk != &ptable.lock){  //DOC: sleeplock0
373     acquire(&ptable.lock);  //DOC: sleeplock1
374     release(lk);
375   }
376 
377   // Go to sleep.
378   proc->chan = chan;
379   proc->state = SLEEPING;
380   sched();
381 
382   // Tidy up.
383   proc->chan = 0;
384 
385   // Reacquire original lock.
386   if(lk != &ptable.lock){  //DOC: sleeplock2
387     release(&ptable.lock);
388     acquire(lk);
389   }
390 }
391 
392 //PAGEBREAK!
393 // Wake up all processes sleeping on chan.
394 // The ptable lock must be held.
395 static void
396 wakeup1(void *chan)
397 {
398   struct proc *p;
399 
400   for(p = ptable.proc; p < &ptable.proc[NPROC]; p++)
401     if(p->state == SLEEPING && p->chan == chan)
402       p->state = RUNNABLE;
403 }
404 
405 // Wake up all processes sleeping on chan.
406 void
407 wakeup(void *chan)
408 {
409   acquire(&ptable.lock);
410   wakeup1(chan);
411   release(&ptable.lock);
412 }
413 
414 // Kill the process with the given pid.
415 // Process won't exit until it returns
416 // to user space (see trap in trap.c).
417 int
418 kill(int pid)
419 {
420   struct proc *p;
421 
422   acquire(&ptable.lock);
423   for(p = ptable.proc; p < &ptable.proc[NPROC]; p++){
424     if(p->pid == pid){
425       p->killed = 1;
426       // Wake process from sleep if necessary.
427       if(p->state == SLEEPING)
428         p->state = RUNNABLE;
429       release(&ptable.lock);
430       return 0;
431     }
432   }
433   release(&ptable.lock);
434   return -1;
435 }
436 
437 //PAGEBREAK: 36
438 // Print a process listing to console.  For debugging.
439 // Runs when user types ^P on console.
440 // No lock to avoid wedging a stuck machine further.
441 void
442 procdump(void)
443 {
444   static char *states[] = {
445   [UNUSED]    "unused",
446   [EMBRYO]    "embryo",
447   [SLEEPING]  "sleep ",
448   [RUNNABLE]  "runble",
449   [RUNNING]   "run   ",
450   [ZOMBIE]    "zombie"
451   };
452   int i;
453   struct proc *p;
454   char *state;
455   uint pc[10];
456 
457   for(p = ptable.proc; p < &ptable.proc[NPROC]; p++){
458     if(p->state == UNUSED)
459       continue;
460     if(p->state >= 0 && p->state < NELEM(states) && states[p->state])
461       state = states[p->state];
462     else
463       state = "???";
464     cprintf("%d %s %s", p->pid, state, p->name);
465     if(p->state == SLEEPING){
466       getcallerpcs((uint*)p->context->ebp+2, pc);
467       for(i=0; i<10 && pc[i] != 0; i++)
468         cprintf(" %p", pc[i]);
469     }
470     cprintf("\n");
471   }
472 }
473