xref: /xv6-public/spinlock.c (revision 1d19081e)
131085bb4Srsc // Mutual exclusion spin locks.
231085bb4Srsc 
321a88fd4Skaashoek #include "types.h"
421a88fd4Skaashoek #include "defs.h"
5558ab49fSrsc #include "param.h"
621a88fd4Skaashoek #include "x86.h"
79aa0337dSFrans Kaashoek #include "memlayout.h"
87837c71bSkaashoek #include "mmu.h"
94e8f237bSrtm #include "proc.h"
104e8f237bSrtm #include "spinlock.h"
1121a88fd4Skaashoek 
120e84a0ecSrtm void
initlock(struct spinlock * lk,char * name)13b121486cSRuss Cox initlock(struct spinlock *lk, char *name)
145be0039cSrtm {
15b121486cSRuss Cox   lk->name = name;
16b121486cSRuss Cox   lk->locked = 0;
1748755214SRuss Cox   lk->cpu = 0;
185be0039cSrtm }
195be0039cSrtm 
201ddfbbb1SFrans Kaashoek // Acquire the lock.
211ddfbbb1SFrans Kaashoek // Loops (spins) until the lock is acquired.
221ddfbbb1SFrans Kaashoek // Holding a lock for a long time may cause
231ddfbbb1SFrans Kaashoek // other CPUs to waste time spinning to acquire it.
2421a88fd4Skaashoek void
acquire(struct spinlock * lk)25b121486cSRuss Cox acquire(struct spinlock *lk)
2621a88fd4Skaashoek {
27faad047aSRobert Morris   pushcli(); // disable interrupts to avoid deadlock.
28b121486cSRuss Cox   if(holding(lk))
290dd42537Srsc     panic("acquire");
300dd42537Srsc 
31943fd378Srsc   // The xchg is atomic.
32b121486cSRuss Cox   while(xchg(&lk->locked, 1) != 0)
3365bd8e13Srsc     ;
34e7a5b3c5Srsc 
3520d05d44SRobert Morris   // Tell the C compiler and the processor to not move loads or stores
3620d05d44SRobert Morris   // past this point, to ensure that the critical section's memory
3720d05d44SRobert Morris   // references happen after the lock is acquired.
3820d05d44SRobert Morris   __sync_synchronize();
3920d05d44SRobert Morris 
4031085bb4Srsc   // Record info about lock acquisition for debugging.
41abf847a0SFrans Kaashoek   lk->cpu = mycpu();
42b121486cSRuss Cox   getcallerpcs(&lk, lk->pcs);
4321a88fd4Skaashoek }
4421a88fd4Skaashoek 
4531085bb4Srsc // Release the lock.
4621a88fd4Skaashoek void
release(struct spinlock * lk)47b121486cSRuss Cox release(struct spinlock *lk)
4821a88fd4Skaashoek {
49b121486cSRuss Cox   if(!holding(lk))
500dd42537Srsc     panic("release");
510dd42537Srsc 
52b121486cSRuss Cox   lk->pcs[0] = 0;
5348755214SRuss Cox   lk->cpu = 0;
54e7a5b3c5Srsc 
5520d05d44SRobert Morris   // Tell the C compiler and the processor to not move loads or stores
5620d05d44SRobert Morris   // past this point, to ensure that all the stores in the critical
5720d05d44SRobert Morris   // section are visible to other cores before the lock is released.
5820d05d44SRobert Morris   // Both the C compiler and the hardware may re-order loads and
59469aa8b9SRobert Morris   // stores; __sync_synchronize() tells them both not to.
6020d05d44SRobert Morris   __sync_synchronize();
6120d05d44SRobert Morris 
6234c2efc1SRobert Morris   // Release the lock, equivalent to lk->locked = 0.
6334c2efc1SRobert Morris   // This code can't use a C assignment, since it might
646cec0211SFrans Kaashoek   // not be atomic. A real OS would use C atomics here.
6534c2efc1SRobert Morris   asm volatile("movl $0, %0" : "+m" (lk->locked) : );
669fd9f804Srsc 
673807c1f2Srsc   popcli();
6821a88fd4Skaashoek }
695af5f6aaSrsc 
705af5f6aaSrsc // Record the current call stack in pcs[] by following the %ebp chain.
715af5f6aaSrsc void
getcallerpcs(void * v,uint pcs[])725af5f6aaSrsc getcallerpcs(void *v, uint pcs[])
735af5f6aaSrsc {
745af5f6aaSrsc   uint *ebp;
755af5f6aaSrsc   int i;
765af5f6aaSrsc 
775af5f6aaSrsc   ebp = (uint*)v - 2;
785af5f6aaSrsc   for(i = 0; i < 10; i++){
799aa0337dSFrans Kaashoek     if(ebp == 0 || ebp < (uint*)KERNBASE || ebp == (uint*)0xffffffff)
805af5f6aaSrsc       break;
815af5f6aaSrsc     pcs[i] = ebp[1];     // saved %eip
825af5f6aaSrsc     ebp = (uint*)ebp[0]; // saved %ebp
835af5f6aaSrsc   }
845af5f6aaSrsc   for(; i < 10; i++)
855af5f6aaSrsc     pcs[i] = 0;
865af5f6aaSrsc }
875af5f6aaSrsc 
885af5f6aaSrsc // Check whether this cpu is holding the lock.
895af5f6aaSrsc int
holding(struct spinlock * lock)905af5f6aaSrsc holding(struct spinlock *lock)
915af5f6aaSrsc {
92*1d19081eSFrans Kaashoek   int r;
93*1d19081eSFrans Kaashoek   pushcli();
94*1d19081eSFrans Kaashoek   r = lock->locked && lock->cpu == mycpu();
95*1d19081eSFrans Kaashoek   popcli();
96*1d19081eSFrans Kaashoek   return r;
975af5f6aaSrsc }
985af5f6aaSrsc 
99c8919e65Srsc 
100ab08960fSrsc // Pushcli/popcli are like cli/sti except that they are matched:
101ab08960fSrsc // it takes two popcli to undo two pushcli.  Also, if interrupts
102ab08960fSrsc // are off, then pushcli, popcli leaves them off.
103c8919e65Srsc 
104c8919e65Srsc void
pushcli(void)1053807c1f2Srsc pushcli(void)
106c8919e65Srsc {
107ab08960fSrsc   int eflags;
108ab08960fSrsc 
10921575761Srsc   eflags = readeflags();
110c8919e65Srsc   cli();
111abf847a0SFrans Kaashoek   if(mycpu()->ncli == 0)
112abf847a0SFrans Kaashoek     mycpu()->intena = eflags & FL_IF;
113abf847a0SFrans Kaashoek   mycpu()->ncli += 1;
114c8919e65Srsc }
115c8919e65Srsc 
116c8919e65Srsc void
popcli(void)1173807c1f2Srsc popcli(void)
118c8919e65Srsc {
11921575761Srsc   if(readeflags()&FL_IF)
1203807c1f2Srsc     panic("popcli - interruptible");
121abf847a0SFrans Kaashoek   if(--mycpu()->ncli < 0)
1223807c1f2Srsc     panic("popcli");
123abf847a0SFrans Kaashoek   if(mycpu()->ncli == 0 && mycpu()->intena)
124c8919e65Srsc     sti();
125c8919e65Srsc }
126c8919e65Srsc 
127