xref: /xv6-public/spinlock.c (revision c780dbf9)
1 // Mutual exclusion spin locks.
2 
3 #include "types.h"
4 #include "defs.h"
5 #include "param.h"
6 #include "x86.h"
7 #include "mmu.h"
8 #include "proc.h"
9 #include "spinlock.h"
10 
11 void
12 initlock(struct spinlock *lock, char *name)
13 {
14   lock->name = name;
15   lock->locked = 0;
16   lock->cpu = 0xffffffff;
17 }
18 
19 // Acquire the lock.
20 // Loops (spins) until the lock is acquired.
21 // Holding a lock for a long time may cause
22 // other CPUs to waste time spinning to acquire it.
23 void
24 acquire(struct spinlock *lock)
25 {
26   pushcli();
27   if(holding(lock))
28     panic("acquire");
29 
30   // The xchg is atomic.
31   // It also serializes, so that reads after acquire are not
32   // reordered before it.
33   while(xchg(&lock->locked, 1) == 1)
34     ;
35 
36   // Record info about lock acquisition for debugging.
37   // The +10 is only so that we can tell the difference
38   // between forgetting to initialize lock->cpu
39   // and holding a lock on cpu 0.
40   lock->cpu = cpu() + 10;
41   getcallerpcs(&lock, lock->pcs);
42 }
43 
44 // Release the lock.
45 void
46 release(struct spinlock *lock)
47 {
48   if(!holding(lock))
49     panic("release");
50 
51   lock->pcs[0] = 0;
52   lock->cpu = 0xffffffff;
53 
54   // The xchg serializes, so that reads before release are
55   // not reordered after it.  The 1996 PentiumPro manual (Volume 3,
56   // 7.2) says reads can be carried out speculatively and in
57   // any order, which implies we need to serialize here.
58   // But the 2007 Intel 64 Architecture Memory Ordering White
59   // Paper says that Intel 64 and IA-32 will not move a load
60   // after a store. So lock->locked = 0 would work here.
61   // The xchg being asm volatile ensures gcc emits it after
62   // the above assignments (and after the critical section).
63   xchg(&lock->locked, 0);
64 
65   popcli();
66 }
67 
68 // Record the current call stack in pcs[] by following the %ebp chain.
69 void
70 getcallerpcs(void *v, uint pcs[])
71 {
72   uint *ebp;
73   int i;
74 
75   ebp = (uint*)v - 2;
76   for(i = 0; i < 10; i++){
77     if(ebp == 0 || ebp == (uint*)0xffffffff)
78       break;
79     pcs[i] = ebp[1];     // saved %eip
80     ebp = (uint*)ebp[0]; // saved %ebp
81   }
82   for(; i < 10; i++)
83     pcs[i] = 0;
84 }
85 
86 // Check whether this cpu is holding the lock.
87 int
88 holding(struct spinlock *lock)
89 {
90   return lock->locked && lock->cpu == cpu() + 10;
91 }
92 
93 
94 // Pushcli/popcli are like cli/sti except that they are matched:
95 // it takes two popcli to undo two pushcli.  Also, if interrupts
96 // are off, then pushcli, popcli leaves them off.
97 
98 void
99 pushcli(void)
100 {
101   int eflags;
102 
103   eflags = read_eflags();
104   cli();
105   if(cpus[cpu()].ncli++ == 0)
106     cpus[cpu()].intena = eflags & FL_IF;
107 }
108 
109 void
110 popcli(void)
111 {
112   if(read_eflags()&FL_IF)
113     panic("popcli - interruptible");
114   if(--cpus[cpu()].ncli < 0)
115     panic("popcli");
116   if(cpus[cpu()].ncli == 0 && cpus[cpu()].intena)
117     sti();
118 }
119 
120