xref: /xv6-public/spinlock.c (revision be38c841)
1 // Mutual exclusion spin locks.
2 
3 #include "types.h"
4 #include "defs.h"
5 #include "param.h"
6 #include "x86.h"
7 #include "mmu.h"
8 #include "proc.h"
9 #include "spinlock.h"
10 
11 extern int use_console_lock;
12 
13 void
14 initlock(struct spinlock *lock, char *name)
15 {
16   lock->name = name;
17   lock->locked = 0;
18   lock->cpu = 0xffffffff;
19 }
20 
21 // Acquire the lock.
22 // Loops (spins) until the lock is acquired.
23 // Holding a lock for a long time may cause
24 // other CPUs to waste time spinning to acquire it.
25 void
26 acquire(struct spinlock *lock)
27 {
28   pushcli();
29   if(holding(lock))
30     panic("acquire");
31 
32   // The xchg is atomic.
33   // It also serializes, so that reads after acquire are not
34   // reordered before it.
35   while(xchg(&lock->locked, 1) == 1)
36     ;
37 
38   // Record info about lock acquisition for debugging.
39   // The +10 is only so that we can tell the difference
40   // between forgetting to initialize lock->cpu
41   // and holding a lock on cpu 0.
42   lock->cpu = cpu() + 10;
43   getcallerpcs(&lock, lock->pcs);
44 }
45 
46 // Release the lock.
47 void
48 release(struct spinlock *lock)
49 {
50   if(!holding(lock))
51     panic("release");
52 
53   lock->pcs[0] = 0;
54   lock->cpu = 0xffffffff;
55 
56   // The xchg serializes, so that reads before release are
57   // not reordered after it.  The 1996 PentiumPro manual (Volume 3,
58   // 7.2) says reads can be carried out speculatively and in
59   // any order, which implies we need to serialize here.
60   // But the 2007 Intel 64 Architecture Memory Ordering White
61   // Paper says that Intel 64 and IA-32 will not move a load
62   // after a store. So lock->locked = 0 would work here.
63   // The xchg being asm volatile ensures gcc emits it after
64   // the above assignments (and after the critical section).
65   xchg(&lock->locked, 0);
66 
67   popcli();
68 }
69 
70 // Record the current call stack in pcs[] by following the %ebp chain.
71 void
72 getcallerpcs(void *v, uint pcs[])
73 {
74   uint *ebp;
75   int i;
76 
77   ebp = (uint*)v - 2;
78   for(i = 0; i < 10; i++){
79     if(ebp == 0 || ebp == (uint*)0xffffffff)
80       break;
81     pcs[i] = ebp[1];     // saved %eip
82     ebp = (uint*)ebp[0]; // saved %ebp
83   }
84   for(; i < 10; i++)
85     pcs[i] = 0;
86 }
87 
88 // Check whether this cpu is holding the lock.
89 int
90 holding(struct spinlock *lock)
91 {
92   return lock->locked && lock->cpu == cpu() + 10;
93 }
94 
95 
96 // Pushcli/popcli are like cli/sti except that they are matched:
97 // it takes two popcli to undo two pushcli.  Also, if interrupts
98 // are off, then pushcli, popcli leaves them off.
99 
100 void
101 pushcli(void)
102 {
103   int eflags;
104 
105   eflags = read_eflags();
106   cli();
107   if(cpus[cpu()].ncli++ == 0)
108     cpus[cpu()].intena = eflags & FL_IF;
109 }
110 
111 void
112 popcli(void)
113 {
114   if(read_eflags()&FL_IF)
115     panic("popcli - interruptible");
116   if(--cpus[cpu()].ncli < 0)
117     panic("popcli");
118   if(cpus[cpu()].ncli == 0 && cpus[cpu()].intena)
119     sti();
120 }
121 
122