1 // Mutual exclusion spin locks. 2 3 #include "types.h" 4 #include "defs.h" 5 #include "param.h" 6 #include "x86.h" 7 #include "memlayout.h" 8 #include "mmu.h" 9 #include "proc.h" 10 #include "spinlock.h" 11 12 void 13 initlock(struct spinlock *lk, char *name) 14 { 15 lk->name = name; 16 lk->locked = 0; 17 lk->cpu = 0; 18 } 19 20 // Acquire the lock. 21 // Loops (spins) until the lock is acquired. 22 // Holding a lock for a long time may cause 23 // other CPUs to waste time spinning to acquire it. 24 void 25 acquire(struct spinlock *lk) 26 { 27 pushcli(); // disable interrupts to avoid deadlock. 28 if(holding(lk)) 29 panic("acquire"); 30 31 // The xchg is atomic. 32 while(xchg(&lk->locked, 1) != 0) 33 ; 34 35 // Tell the C compiler and the processor to not move loads or stores 36 // past this point, to ensure that the critical section's memory 37 // references happen after the lock is acquired. 38 __sync_synchronize(); 39 40 // Record info about lock acquisition for debugging. 41 lk->cpu = cpu; 42 getcallerpcs(&lk, lk->pcs); 43 } 44 45 // Release the lock. 46 void 47 release(struct spinlock *lk) 48 { 49 if(!holding(lk)) 50 panic("release"); 51 52 lk->pcs[0] = 0; 53 lk->cpu = 0; 54 55 // Tell the C compiler and the processor to not move loads or stores 56 // past this point, to ensure that all the stores in the critical 57 // section are visible to other cores before the lock is released. 58 // Both the C compiler and the hardware may re-order loads and 59 // stores; __sync_synchronize() tells them both to not re-order. 60 __sync_synchronize(); 61 62 // Release the lock, equivalent to lk->locked = 0. 63 // This code can't use a C assignment, since it might 64 // not be atomic. A real OS would use C atomics here. 65 asm volatile("movl $0, %0" : "+m" (lk->locked) : ); 66 67 popcli(); 68 } 69 70 // Record the current call stack in pcs[] by following the %ebp chain. 71 void 72 getcallerpcs(void *v, uint pcs[]) 73 { 74 uint *ebp; 75 int i; 76 77 ebp = (uint*)v - 2; 78 for(i = 0; i < 10; i++){ 79 if(ebp == 0 || ebp < (uint*)KERNBASE || ebp == (uint*)0xffffffff) 80 break; 81 pcs[i] = ebp[1]; // saved %eip 82 ebp = (uint*)ebp[0]; // saved %ebp 83 } 84 for(; i < 10; i++) 85 pcs[i] = 0; 86 } 87 88 // Check whether this cpu is holding the lock. 89 int 90 holding(struct spinlock *lock) 91 { 92 return lock->locked && lock->cpu == cpu; 93 } 94 95 96 // Pushcli/popcli are like cli/sti except that they are matched: 97 // it takes two popcli to undo two pushcli. Also, if interrupts 98 // are off, then pushcli, popcli leaves them off. 99 100 void 101 pushcli(void) 102 { 103 int eflags; 104 105 eflags = readeflags(); 106 cli(); 107 if(cpu->ncli == 0) 108 cpu->intena = eflags & FL_IF; 109 cpu->ncli += 1; 110 } 111 112 void 113 popcli(void) 114 { 115 if(readeflags()&FL_IF) 116 panic("popcli - interruptible"); 117 if(--cpu->ncli < 0) 118 panic("popcli"); 119 if(cpu->ncli == 0 && cpu->intena) 120 sti(); 121 } 122 123