xref: /xv6-public/spinlock.c (revision 1d19081e)
1 // Mutual exclusion spin locks.
2 
3 #include "types.h"
4 #include "defs.h"
5 #include "param.h"
6 #include "x86.h"
7 #include "memlayout.h"
8 #include "mmu.h"
9 #include "proc.h"
10 #include "spinlock.h"
11 
12 void
initlock(struct spinlock * lk,char * name)13 initlock(struct spinlock *lk, char *name)
14 {
15   lk->name = name;
16   lk->locked = 0;
17   lk->cpu = 0;
18 }
19 
20 // Acquire the lock.
21 // Loops (spins) until the lock is acquired.
22 // Holding a lock for a long time may cause
23 // other CPUs to waste time spinning to acquire it.
24 void
acquire(struct spinlock * lk)25 acquire(struct spinlock *lk)
26 {
27   pushcli(); // disable interrupts to avoid deadlock.
28   if(holding(lk))
29     panic("acquire");
30 
31   // The xchg is atomic.
32   while(xchg(&lk->locked, 1) != 0)
33     ;
34 
35   // Tell the C compiler and the processor to not move loads or stores
36   // past this point, to ensure that the critical section's memory
37   // references happen after the lock is acquired.
38   __sync_synchronize();
39 
40   // Record info about lock acquisition for debugging.
41   lk->cpu = mycpu();
42   getcallerpcs(&lk, lk->pcs);
43 }
44 
45 // Release the lock.
46 void
release(struct spinlock * lk)47 release(struct spinlock *lk)
48 {
49   if(!holding(lk))
50     panic("release");
51 
52   lk->pcs[0] = 0;
53   lk->cpu = 0;
54 
55   // Tell the C compiler and the processor to not move loads or stores
56   // past this point, to ensure that all the stores in the critical
57   // section are visible to other cores before the lock is released.
58   // Both the C compiler and the hardware may re-order loads and
59   // stores; __sync_synchronize() tells them both not to.
60   __sync_synchronize();
61 
62   // Release the lock, equivalent to lk->locked = 0.
63   // This code can't use a C assignment, since it might
64   // not be atomic. A real OS would use C atomics here.
65   asm volatile("movl $0, %0" : "+m" (lk->locked) : );
66 
67   popcli();
68 }
69 
70 // Record the current call stack in pcs[] by following the %ebp chain.
71 void
getcallerpcs(void * v,uint pcs[])72 getcallerpcs(void *v, uint pcs[])
73 {
74   uint *ebp;
75   int i;
76 
77   ebp = (uint*)v - 2;
78   for(i = 0; i < 10; i++){
79     if(ebp == 0 || ebp < (uint*)KERNBASE || ebp == (uint*)0xffffffff)
80       break;
81     pcs[i] = ebp[1];     // saved %eip
82     ebp = (uint*)ebp[0]; // saved %ebp
83   }
84   for(; i < 10; i++)
85     pcs[i] = 0;
86 }
87 
88 // Check whether this cpu is holding the lock.
89 int
holding(struct spinlock * lock)90 holding(struct spinlock *lock)
91 {
92   int r;
93   pushcli();
94   r = lock->locked && lock->cpu == mycpu();
95   popcli();
96   return r;
97 }
98 
99 
100 // Pushcli/popcli are like cli/sti except that they are matched:
101 // it takes two popcli to undo two pushcli.  Also, if interrupts
102 // are off, then pushcli, popcli leaves them off.
103 
104 void
pushcli(void)105 pushcli(void)
106 {
107   int eflags;
108 
109   eflags = readeflags();
110   cli();
111   if(mycpu()->ncli == 0)
112     mycpu()->intena = eflags & FL_IF;
113   mycpu()->ncli += 1;
114 }
115 
116 void
popcli(void)117 popcli(void)
118 {
119   if(readeflags()&FL_IF)
120     panic("popcli - interruptible");
121   if(--mycpu()->ncli < 0)
122     panic("popcli");
123   if(mycpu()->ncli == 0 && mycpu()->intena)
124     sti();
125 }
126 
127