xref: /xv6-public/proc.c (revision 2e2d14c2)
1 #include "types.h"
2 #include "defs.h"
3 #include "param.h"
4 #include "memlayout.h"
5 #include "mmu.h"
6 #include "x86.h"
7 #include "proc.h"
8 #include "spinlock.h"
9 
10 struct {
11   struct spinlock lock;
12   struct proc proc[NPROC];
13 } ptable;
14 
15 static struct proc *initproc;
16 
17 int nextpid = 1;
18 extern void forkret(void);
19 extern void trapret(void);
20 
21 static void wakeup1(void *chan);
22 
23 void
24 pinit(void)
25 {
26   initlock(&ptable.lock, "ptable");
27 }
28 
29 // Must be called with interrupts disabled
30 int
31 cpuid() {
32   return mycpu()-cpus;
33 }
34 
35 // Must be called with interrupts disabled
36 struct cpu*
37 mycpu(void)
38 {
39   if(readeflags()&FL_IF)
40     panic("mycpu called with interrupts enabled\n");
41   return &cpus[lapiccpunum()];
42 }
43 
44 // Disable interrupts so that we are not rescheduled
45 // while reading proc from the cpu structure
46 struct proc*
47 myproc(void) {
48   struct cpu *c;
49   struct proc *p;
50   pushcli();
51   c = mycpu();
52   p = c->proc;
53   popcli();
54   return p;
55 }
56 
57 //PAGEBREAK: 32
58 // Look in the process table for an UNUSED proc.
59 // If found, change state to EMBRYO and initialize
60 // state required to run in the kernel.
61 // Otherwise return 0.
62 static struct proc*
63 allocproc(void)
64 {
65   struct proc *p;
66   char *sp;
67 
68   acquire(&ptable.lock);
69 
70   for(p = ptable.proc; p < &ptable.proc[NPROC]; p++)
71     if(p->state == UNUSED)
72       goto found;
73 
74   release(&ptable.lock);
75   return 0;
76 
77 found:
78   p->state = EMBRYO;
79   p->pid = nextpid++;
80 
81   release(&ptable.lock);
82 
83   // Allocate kernel stack.
84   if((p->kstack = kalloc()) == 0){
85     p->state = UNUSED;
86     return 0;
87   }
88   sp = p->kstack + KSTACKSIZE;
89 
90   // Leave room for trap frame.
91   sp -= sizeof *p->tf;
92   p->tf = (struct trapframe*)sp;
93 
94   // Set up new context to start executing at forkret,
95   // which returns to trapret.
96   sp -= 4;
97   *(uint*)sp = (uint)trapret;
98 
99   sp -= sizeof *p->context;
100   p->context = (struct context*)sp;
101   memset(p->context, 0, sizeof *p->context);
102   p->context->eip = (uint)forkret;
103 
104   return p;
105 }
106 
107 //PAGEBREAK: 32
108 // Set up first user process.
109 void
110 userinit(void)
111 {
112   struct proc *p;
113   extern char _binary_initcode_start[], _binary_initcode_size[];
114 
115   p = allocproc();
116 
117   initproc = p;
118   if((p->pgdir = setupkvm()) == 0)
119     panic("userinit: out of memory?");
120   inituvm(p->pgdir, _binary_initcode_start, (int)_binary_initcode_size);
121   p->sz = PGSIZE;
122   memset(p->tf, 0, sizeof(*p->tf));
123   p->tf->cs = (SEG_UCODE << 3) | DPL_USER;
124   p->tf->ds = (SEG_UDATA << 3) | DPL_USER;
125   p->tf->es = p->tf->ds;
126   p->tf->ss = p->tf->ds;
127   p->tf->eflags = FL_IF;
128   p->tf->esp = PGSIZE;
129   p->tf->eip = 0;  // beginning of initcode.S
130 
131   safestrcpy(p->name, "initcode", sizeof(p->name));
132   p->cwd = namei("/");
133 
134   // this assignment to p->state lets other cores
135   // run this process. the acquire forces the above
136   // writes to be visible, and the lock is also needed
137   // because the assignment might not be atomic.
138   acquire(&ptable.lock);
139 
140   p->state = RUNNABLE;
141 
142   release(&ptable.lock);
143 }
144 
145 // Grow current process's memory by n bytes.
146 // Return 0 on success, -1 on failure.
147 int
148 growproc(int n)
149 {
150   uint sz;
151   struct proc *curproc = myproc();
152 
153   sz = curproc->sz;
154   if(n > 0){
155     if((sz = allocuvm(curproc->pgdir, sz, sz + n)) == 0)
156       return -1;
157   } else if(n < 0){
158     if((sz = deallocuvm(curproc->pgdir, sz, sz + n)) == 0)
159       return -1;
160   }
161   curproc->sz = sz;
162   switchuvm(curproc);
163   return 0;
164 }
165 
166 // Create a new process copying p as the parent.
167 // Sets up stack to return as if from system call.
168 // Caller must set state of returned proc to RUNNABLE.
169 int
170 fork(void)
171 {
172   int i, pid;
173   struct proc *np;
174   struct proc *curproc = myproc();
175 
176   // Allocate process.
177   if((np = allocproc()) == 0){
178     return -1;
179   }
180 
181   // Copy process state from proc.
182   if((np->pgdir = copyuvm(curproc->pgdir, curproc->sz)) == 0){
183     kfree(np->kstack);
184     np->kstack = 0;
185     np->state = UNUSED;
186     return -1;
187   }
188   np->sz = curproc->sz;
189   np->parent = curproc;
190   *np->tf = *curproc->tf;
191 
192   // Clear %eax so that fork returns 0 in the child.
193   np->tf->eax = 0;
194 
195   for(i = 0; i < NOFILE; i++)
196     if(curproc->ofile[i])
197       np->ofile[i] = filedup(curproc->ofile[i]);
198   np->cwd = idup(curproc->cwd);
199 
200   safestrcpy(np->name, curproc->name, sizeof(curproc->name));
201 
202   pid = np->pid;
203 
204   acquire(&ptable.lock);
205 
206   np->state = RUNNABLE;
207 
208   release(&ptable.lock);
209 
210   return pid;
211 }
212 
213 // Exit the current process.  Does not return.
214 // An exited process remains in the zombie state
215 // until its parent calls wait() to find out it exited.
216 void
217 exit(void)
218 {
219   struct proc *curproc = myproc();
220   struct proc *p;
221   int fd;
222 
223   if(curproc == initproc)
224     panic("init exiting");
225 
226   // Close all open files.
227   for(fd = 0; fd < NOFILE; fd++){
228     if(curproc->ofile[fd]){
229       fileclose(curproc->ofile[fd]);
230       curproc->ofile[fd] = 0;
231     }
232   }
233 
234   begin_op();
235   iput(curproc->cwd);
236   end_op();
237   curproc->cwd = 0;
238 
239   acquire(&ptable.lock);
240 
241   // Parent might be sleeping in wait().
242   wakeup1(curproc->parent);
243 
244   // Pass abandoned children to init.
245   for(p = ptable.proc; p < &ptable.proc[NPROC]; p++){
246     if(p->parent == curproc){
247       p->parent = initproc;
248       if(p->state == ZOMBIE)
249         wakeup1(initproc);
250     }
251   }
252 
253   // Jump into the scheduler, never to return.
254   curproc->state = ZOMBIE;
255   sched();
256   panic("zombie exit");
257 }
258 
259 // Wait for a child process to exit and return its pid.
260 // Return -1 if this process has no children.
261 int
262 wait(void)
263 {
264   struct proc *p;
265   int havekids, pid;
266   struct proc *curproc = myproc();
267 
268   acquire(&ptable.lock);
269   for(;;){
270     // Scan through table looking for exited children.
271     havekids = 0;
272     for(p = ptable.proc; p < &ptable.proc[NPROC]; p++){
273       if(p->parent != curproc)
274         continue;
275       havekids = 1;
276       if(p->state == ZOMBIE){
277         // Found one.
278         pid = p->pid;
279         kfree(p->kstack);
280         p->kstack = 0;
281         freevm(p->pgdir);
282         p->pid = 0;
283         p->parent = 0;
284         p->name[0] = 0;
285         p->killed = 0;
286         p->state = UNUSED;
287         release(&ptable.lock);
288         return pid;
289       }
290     }
291 
292     // No point waiting if we don't have any children.
293     if(!havekids || curproc->killed){
294       release(&ptable.lock);
295       return -1;
296     }
297 
298     // Wait for children to exit.  (See wakeup1 call in proc_exit.)
299     sleep(curproc, &ptable.lock);  //DOC: wait-sleep
300   }
301 }
302 
303 //PAGEBREAK: 42
304 // Per-CPU process scheduler.
305 // Each CPU calls scheduler() after setting itself up.
306 // Scheduler never returns.  It loops, doing:
307 //  - choose a process to run
308 //  - swtch to start running that process
309 //  - eventually that process transfers control
310 //      via swtch back to the scheduler.
311 void
312 scheduler(void)
313 {
314   struct proc *p;
315   struct cpu *c = mycpu();
316   c->proc = 0;
317 
318   for(;;){
319     // Enable interrupts on this processor.
320     sti();
321 
322     // Loop over process table looking for process to run.
323     acquire(&ptable.lock);
324     for(p = ptable.proc; p < &ptable.proc[NPROC]; p++){
325       if(p->state != RUNNABLE)
326         continue;
327 
328       // Switch to chosen process.  It is the process's job
329       // to release ptable.lock and then reacquire it
330       // before jumping back to us.
331       c->proc = p;
332       switchuvm(p);
333       p->state = RUNNING;
334 
335       swtch(&(c->scheduler), p->context);
336       switchkvm();
337 
338       // Process is done running for now.
339       // It should have changed its p->state before coming back.
340       c->proc = 0;
341     }
342     release(&ptable.lock);
343 
344   }
345 }
346 
347 // Enter scheduler.  Must hold only ptable.lock
348 // and have changed proc->state. Saves and restores
349 // intena because intena is a property of this
350 // kernel thread, not this CPU. It should
351 // be proc->intena and proc->ncli, but that would
352 // break in the few places where a lock is held but
353 // there's no process.
354 void
355 sched(void)
356 {
357   int intena;
358   struct proc *p = myproc();
359 
360   if(!holding(&ptable.lock))
361     panic("sched ptable.lock");
362   if(mycpu()->ncli != 1)
363     panic("sched locks");
364   if(p->state == RUNNING)
365     panic("sched running");
366   if(readeflags()&FL_IF)
367     panic("sched interruptible");
368   intena = mycpu()->intena;
369   swtch(&p->context, mycpu()->scheduler);
370   mycpu()->intena = intena;
371 }
372 
373 // Give up the CPU for one scheduling round.
374 void
375 yield(void)
376 {
377   acquire(&ptable.lock);  //DOC: yieldlock
378   myproc()->state = RUNNABLE;
379   sched();
380   release(&ptable.lock);
381 }
382 
383 // A fork child's very first scheduling by scheduler()
384 // will swtch here.  "Return" to user space.
385 void
386 forkret(void)
387 {
388   static int first = 1;
389   // Still holding ptable.lock from scheduler.
390   release(&ptable.lock);
391 
392   if (first) {
393     // Some initialization functions must be run in the context
394     // of a regular process (e.g., they call sleep), and thus cannot
395     // be run from main().
396     first = 0;
397     iinit(ROOTDEV);
398     initlog(ROOTDEV);
399   }
400 
401   // Return to "caller", actually trapret (see allocproc).
402 }
403 
404 // Atomically release lock and sleep on chan.
405 // Reacquires lock when awakened.
406 void
407 sleep(void *chan, struct spinlock *lk)
408 {
409   struct proc *p = myproc();
410 
411   if(p == 0)
412     panic("sleep");
413 
414   if(lk == 0)
415     panic("sleep without lk");
416 
417   // Must acquire ptable.lock in order to
418   // change p->state and then call sched.
419   // Once we hold ptable.lock, we can be
420   // guaranteed that we won't miss any wakeup
421   // (wakeup runs with ptable.lock locked),
422   // so it's okay to release lk.
423   if(lk != &ptable.lock){  //DOC: sleeplock0
424     acquire(&ptable.lock);  //DOC: sleeplock1
425     release(lk);
426   }
427   // Go to sleep.
428   p->chan = chan;
429   p->state = SLEEPING;
430 
431   sched();
432 
433   // Tidy up.
434   p->chan = 0;
435 
436   // Reacquire original lock.
437   if(lk != &ptable.lock){  //DOC: sleeplock2
438     release(&ptable.lock);
439     acquire(lk);
440   }
441 }
442 
443 //PAGEBREAK!
444 // Wake up all processes sleeping on chan.
445 // The ptable lock must be held.
446 static void
447 wakeup1(void *chan)
448 {
449   struct proc *p;
450 
451   for(p = ptable.proc; p < &ptable.proc[NPROC]; p++)
452     if(p->state == SLEEPING && p->chan == chan)
453       p->state = RUNNABLE;
454 }
455 
456 // Wake up all processes sleeping on chan.
457 void
458 wakeup(void *chan)
459 {
460   acquire(&ptable.lock);
461   wakeup1(chan);
462   release(&ptable.lock);
463 }
464 
465 // Kill the process with the given pid.
466 // Process won't exit until it returns
467 // to user space (see trap in trap.c).
468 int
469 kill(int pid)
470 {
471   struct proc *p;
472 
473   acquire(&ptable.lock);
474   for(p = ptable.proc; p < &ptable.proc[NPROC]; p++){
475     if(p->pid == pid){
476       p->killed = 1;
477       // Wake process from sleep if necessary.
478       if(p->state == SLEEPING)
479         p->state = RUNNABLE;
480       release(&ptable.lock);
481       return 0;
482     }
483   }
484   release(&ptable.lock);
485   return -1;
486 }
487 
488 //PAGEBREAK: 36
489 // Print a process listing to console.  For debugging.
490 // Runs when user types ^P on console.
491 // No lock to avoid wedging a stuck machine further.
492 void
493 procdump(void)
494 {
495   static char *states[] = {
496   [UNUSED]    "unused",
497   [EMBRYO]    "embryo",
498   [SLEEPING]  "sleep ",
499   [RUNNABLE]  "runble",
500   [RUNNING]   "run   ",
501   [ZOMBIE]    "zombie"
502   };
503   int i;
504   struct proc *p;
505   char *state;
506   uint pc[10];
507 
508   for(p = ptable.proc; p < &ptable.proc[NPROC]; p++){
509     if(p->state == UNUSED)
510       continue;
511     if(p->state >= 0 && p->state < NELEM(states) && states[p->state])
512       state = states[p->state];
513     else
514       state = "???";
515     cprintf("%d %s %s", p->pid, state, p->name);
516     if(p->state == SLEEPING){
517       getcallerpcs((uint*)p->context->ebp+2, pc);
518       for(i=0; i<10 && pc[i] != 0; i++)
519         cprintf(" %p", pc[i]);
520     }
521     cprintf("\n");
522   }
523 }
524