xref: /xv6-public/proc.c (revision fbb4c094)
1 #include "types.h"
2 #include "defs.h"
3 #include "param.h"
4 #include "memlayout.h"
5 #include "mmu.h"
6 #include "x86.h"
7 #include "proc.h"
8 #include "spinlock.h"
9 
10 struct {
11   struct spinlock lock;
12   struct proc proc[NPROC];
13 } ptable;
14 
15 static struct proc *initproc;
16 
17 int nextpid = 1;
18 extern void forkret(void);
19 extern void trapret(void);
20 
21 static void wakeup1(void *chan);
22 
23 void
24 pinit(void)
25 {
26   initlock(&ptable.lock, "ptable");
27 }
28 
29 // XXX get rid off?
30 int
31 cpuid() {
32   return mycpu()-cpus;
33 }
34 
35 // Disable interrupts so that we are not rescheduled
36 // while reading proc from the cpu structure
37 struct proc*
38 myproc(void) {
39   struct cpu *c;
40   struct proc *p;
41   pushcli();
42   c = mycpu();
43   p = c->proc;
44   popcli();
45   return p;
46 }
47 
48 //PAGEBREAK: 32
49 // Look in the process table for an UNUSED proc.
50 // If found, change state to EMBRYO and initialize
51 // state required to run in the kernel.
52 // Otherwise return 0.
53 static struct proc*
54 allocproc(void)
55 {
56   struct proc *p;
57   char *sp;
58 
59   acquire(&ptable.lock);
60 
61   for(p = ptable.proc; p < &ptable.proc[NPROC]; p++)
62     if(p->state == UNUSED)
63       goto found;
64 
65   release(&ptable.lock);
66   return 0;
67 
68 found:
69   p->state = EMBRYO;
70   p->pid = nextpid++;
71 
72   release(&ptable.lock);
73 
74   // Allocate kernel stack.
75   if((p->kstack = kalloc()) == 0){
76     p->state = UNUSED;
77     return 0;
78   }
79   sp = p->kstack + KSTACKSIZE;
80 
81   // Leave room for trap frame.
82   sp -= sizeof *p->tf;
83   p->tf = (struct trapframe*)sp;
84 
85   // Set up new context to start executing at forkret,
86   // which returns to trapret.
87   sp -= 4;
88   *(uint*)sp = (uint)trapret;
89 
90   sp -= sizeof *p->context;
91   p->context = (struct context*)sp;
92   memset(p->context, 0, sizeof *p->context);
93   p->context->eip = (uint)forkret;
94 
95   return p;
96 }
97 
98 //PAGEBREAK: 32
99 // Set up first user process.
100 void
101 userinit(void)
102 {
103   struct proc *p;
104   extern char _binary_initcode_start[], _binary_initcode_size[];
105 
106   p = allocproc();
107 
108   initproc = p;
109   if((p->pgdir = setupkvm()) == 0)
110     panic("userinit: out of memory?");
111   inituvm(p->pgdir, _binary_initcode_start, (int)_binary_initcode_size);
112   p->sz = PGSIZE;
113   memset(p->tf, 0, sizeof(*p->tf));
114   p->tf->cs = (SEG_UCODE << 3) | DPL_USER;
115   p->tf->ds = (SEG_UDATA << 3) | DPL_USER;
116   p->tf->es = p->tf->ds;
117   p->tf->ss = p->tf->ds;
118   p->tf->eflags = FL_IF;
119   p->tf->esp = PGSIZE;
120   p->tf->eip = 0;  // beginning of initcode.S
121 
122   safestrcpy(p->name, "initcode", sizeof(p->name));
123   p->cwd = namei("/");
124 
125   // this assignment to p->state lets other cores
126   // run this process. the acquire forces the above
127   // writes to be visible, and the lock is also needed
128   // because the assignment might not be atomic.
129   acquire(&ptable.lock);
130 
131   p->state = RUNNABLE;
132 
133   release(&ptable.lock);
134 }
135 
136 // Grow current process's memory by n bytes.
137 // Return 0 on success, -1 on failure.
138 int
139 growproc(int n)
140 {
141   uint sz;
142   struct proc *curproc = myproc();
143 
144   sz = curproc->sz;
145   if(n > 0){
146     if((sz = allocuvm(curproc->pgdir, sz, sz + n)) == 0)
147       return -1;
148   } else if(n < 0){
149     if((sz = deallocuvm(curproc->pgdir, sz, sz + n)) == 0)
150       return -1;
151   }
152   curproc->sz = sz;
153   switchuvm(curproc);
154   return 0;
155 }
156 
157 // Create a new process copying p as the parent.
158 // Sets up stack to return as if from system call.
159 // Caller must set state of returned proc to RUNNABLE.
160 int
161 fork(void)
162 {
163   int i, pid;
164   struct proc *np;
165   struct proc *curproc = myproc();
166 
167   // Allocate process.
168   if((np = allocproc()) == 0){
169     return -1;
170   }
171 
172   // Copy process state from proc.
173   if((np->pgdir = copyuvm(curproc->pgdir, curproc->sz)) == 0){
174     kfree(np->kstack);
175     np->kstack = 0;
176     np->state = UNUSED;
177     return -1;
178   }
179   np->sz = curproc->sz;
180   np->parent = curproc;
181   *np->tf = *curproc->tf;
182 
183   // Clear %eax so that fork returns 0 in the child.
184   np->tf->eax = 0;
185 
186   for(i = 0; i < NOFILE; i++)
187     if(curproc->ofile[i])
188       np->ofile[i] = filedup(curproc->ofile[i]);
189   np->cwd = idup(curproc->cwd);
190 
191   safestrcpy(np->name, curproc->name, sizeof(curproc->name));
192 
193   pid = np->pid;
194 
195   acquire(&ptable.lock);
196 
197   np->state = RUNNABLE;
198 
199   release(&ptable.lock);
200 
201   return pid;
202 }
203 
204 // Exit the current process.  Does not return.
205 // An exited process remains in the zombie state
206 // until its parent calls wait() to find out it exited.
207 void
208 exit(void)
209 {
210   struct proc *curproc = myproc();
211   struct proc *p;
212   int fd;
213 
214   if(curproc == initproc)
215     panic("init exiting");
216 
217   // Close all open files.
218   for(fd = 0; fd < NOFILE; fd++){
219     if(curproc->ofile[fd]){
220       fileclose(curproc->ofile[fd]);
221       curproc->ofile[fd] = 0;
222     }
223   }
224 
225   begin_op();
226   iput(curproc->cwd);
227   end_op();
228   curproc->cwd = 0;
229 
230   acquire(&ptable.lock);
231 
232   // Parent might be sleeping in wait().
233   wakeup1(curproc->parent);
234 
235   // Pass abandoned children to init.
236   for(p = ptable.proc; p < &ptable.proc[NPROC]; p++){
237     if(p->parent == curproc){
238       p->parent = initproc;
239       if(p->state == ZOMBIE)
240         wakeup1(initproc);
241     }
242   }
243 
244   // Jump into the scheduler, never to return.
245   curproc->state = ZOMBIE;
246   sched();
247   panic("zombie exit");
248 }
249 
250 // Wait for a child process to exit and return its pid.
251 // Return -1 if this process has no children.
252 int
253 wait(void)
254 {
255   struct proc *p;
256   int havekids, pid;
257   struct proc *curproc = myproc();
258 
259   acquire(&ptable.lock);
260   for(;;){
261     // Scan through table looking for exited children.
262     havekids = 0;
263     for(p = ptable.proc; p < &ptable.proc[NPROC]; p++){
264       if(p->parent != curproc)
265         continue;
266       havekids = 1;
267       if(p->state == ZOMBIE){
268         // Found one.
269         pid = p->pid;
270         kfree(p->kstack);
271         p->kstack = 0;
272         freevm(p->pgdir);
273         p->pid = 0;
274         p->parent = 0;
275         p->name[0] = 0;
276         p->killed = 0;
277         p->state = UNUSED;
278         release(&ptable.lock);
279         return pid;
280       }
281     }
282 
283     // No point waiting if we don't have any children.
284     if(!havekids || curproc->killed){
285       release(&ptable.lock);
286       return -1;
287     }
288 
289     // Wait for children to exit.  (See wakeup1 call in proc_exit.)
290     sleep(curproc, &ptable.lock);  //DOC: wait-sleep
291   }
292 }
293 
294 //PAGEBREAK: 42
295 // Per-CPU process scheduler.
296 // Each CPU calls scheduler() after setting itself up.
297 // Scheduler never returns.  It loops, doing:
298 //  - choose a process to run
299 //  - swtch to start running that process
300 //  - eventually that process transfers control
301 //      via swtch back to the scheduler.
302 void
303 scheduler(void)
304 {
305   struct proc *p;
306   struct cpu *c = mycpu();
307 
308   for(;;){
309     // Enable interrupts on this processor.
310     sti();
311 
312     // Loop over process table looking for process to run.
313     acquire(&ptable.lock);
314     for(p = ptable.proc; p < &ptable.proc[NPROC]; p++){
315       if(p->state != RUNNABLE)
316         continue;
317 
318       // Switch to chosen process.  It is the process's job
319       // to release ptable.lock and then reacquire it
320       // before jumping back to us.
321       c->proc = p;
322       switchuvm(p);
323       p->state = RUNNING;
324       p->cpu = c;
325       // cprintf("%d: switch to %d\n", c-cpus, p->pid);
326       swtch(&(p->cpu->scheduler), p->context);
327       switchkvm();
328 
329       // Process is done running for now.
330       // It should have changed its p->state before coming back.
331       c->proc = 0;
332       p->cpu = 0;
333     }
334     release(&ptable.lock);
335 
336   }
337 }
338 
339 // Enter scheduler.  Must hold only ptable.lock
340 // and have changed proc->state. Saves and restores
341 // intena because intena is a property of this
342 // kernel thread, not this CPU. It should
343 // be proc->intena and proc->ncli, but that would
344 // break in the few places where a lock is held but
345 // there's no process.
346 void
347 sched(void)
348 {
349   int intena;
350   struct proc *p = myproc();
351 
352   if(!holding(&ptable.lock))
353     panic("sched ptable.lock");
354   if(mycpu()->ncli != 1)
355     panic("sched locks");
356   if(p->state == RUNNING)
357     panic("sched running");
358   if(readeflags()&FL_IF)
359     panic("sched interruptible");
360   intena = mycpu()->intena;
361   // cprintf("%d: before swtch %d %x\n", p->cpu-cpus, p->pid, * (int *) 0x1d);
362   swtch(&p->context, p->cpu->scheduler);
363   // cprintf("%d/%d: after swtch %d %x\n", cpuid(), p->cpu-cpus, p->pid, * (int *) 0x1d);
364   mycpu()->intena = intena;
365 }
366 
367 // Give up the CPU for one scheduling round.
368 void
369 yield(void)
370 {
371   acquire(&ptable.lock);  //DOC: yieldlock
372   myproc()->state = RUNNABLE;
373   sched();
374   release(&ptable.lock);
375 }
376 
377 // A fork child's very first scheduling by scheduler()
378 // will swtch here.  "Return" to user space.
379 void
380 forkret(void)
381 {
382   static int first = 1;
383   // Still holding ptable.lock from scheduler.
384   release(&ptable.lock);
385 
386   if (first) {
387     // Some initialization functions must be run in the context
388     // of a regular process (e.g., they call sleep), and thus cannot
389     // be run from main().
390     first = 0;
391     iinit(ROOTDEV);
392     initlog(ROOTDEV);
393   }
394 
395   // Return to "caller", actually trapret (see allocproc).
396 }
397 
398 // Atomically release lock and sleep on chan.
399 // Reacquires lock when awakened.
400 void
401 sleep(void *chan, struct spinlock *lk)
402 {
403   struct proc *p = myproc();
404 
405   if(p == 0)
406     panic("sleep");
407 
408   if(lk == 0)
409     panic("sleep without lk");
410 
411   // Must acquire ptable.lock in order to
412   // change p->state and then call sched.
413   // Once we hold ptable.lock, we can be
414   // guaranteed that we won't miss any wakeup
415   // (wakeup runs with ptable.lock locked),
416   // so it's okay to release lk.
417   if(lk != &ptable.lock){  //DOC: sleeplock0
418     acquire(&ptable.lock);  //DOC: sleeplock1
419     release(lk);
420   }
421   // Go to sleep.
422   p->chan = chan;
423   p->state = SLEEPING;
424 
425   // cprintf("sleep %d\n", p->pid);
426 
427   sched();
428 
429   // Tidy up.
430   p->chan = 0;
431 
432   // Reacquire original lock.
433   if(lk != &ptable.lock){  //DOC: sleeplock2
434     release(&ptable.lock);
435     acquire(lk);
436   }
437 }
438 
439 //PAGEBREAK!
440 // Wake up all processes sleeping on chan.
441 // The ptable lock must be held.
442 static void
443 wakeup1(void *chan)
444 {
445   struct proc *p;
446 
447   for(p = ptable.proc; p < &ptable.proc[NPROC]; p++)
448     if(p->state == SLEEPING && p->chan == chan)
449       p->state = RUNNABLE;
450 }
451 
452 // Wake up all processes sleeping on chan.
453 void
454 wakeup(void *chan)
455 {
456   acquire(&ptable.lock);
457   wakeup1(chan);
458   release(&ptable.lock);
459 }
460 
461 // Kill the process with the given pid.
462 // Process won't exit until it returns
463 // to user space (see trap in trap.c).
464 int
465 kill(int pid)
466 {
467   struct proc *p;
468 
469   acquire(&ptable.lock);
470   for(p = ptable.proc; p < &ptable.proc[NPROC]; p++){
471     if(p->pid == pid){
472       p->killed = 1;
473       // Wake process from sleep if necessary.
474       if(p->state == SLEEPING)
475         p->state = RUNNABLE;
476       release(&ptable.lock);
477       return 0;
478     }
479   }
480   release(&ptable.lock);
481   return -1;
482 }
483 
484 //PAGEBREAK: 36
485 // Print a process listing to console.  For debugging.
486 // Runs when user types ^P on console.
487 // No lock to avoid wedging a stuck machine further.
488 void
489 procdump(void)
490 {
491   static char *states[] = {
492   [UNUSED]    "unused",
493   [EMBRYO]    "embryo",
494   [SLEEPING]  "sleep ",
495   [RUNNABLE]  "runble",
496   [RUNNING]   "run   ",
497   [ZOMBIE]    "zombie"
498   };
499   int i;
500   struct proc *p;
501   char *state;
502   uint pc[10];
503 
504   for(p = ptable.proc; p < &ptable.proc[NPROC]; p++){
505     if(p->state == UNUSED)
506       continue;
507     if(p->state >= 0 && p->state < NELEM(states) && states[p->state])
508       state = states[p->state];
509     else
510       state = "???";
511     cprintf("%d %s %s", p->pid, state, p->name);
512     if(p->state == SLEEPING){
513       getcallerpcs((uint*)p->context->ebp+2, pc);
514       for(i=0; i<10 && pc[i] != 0; i++)
515         cprintf(" %p", pc[i]);
516     }
517     cprintf("\n");
518   }
519 }
520