1 // Mutual exclusion spin locks. 2 3 #include "types.h" 4 #include "defs.h" 5 #include "param.h" 6 #include "x86.h" 7 #include "memlayout.h" 8 #include "mmu.h" 9 #include "proc.h" 10 #include "spinlock.h" 11 12 void 13 initlock(struct spinlock *lk, char *name) 14 { 15 lk->name = name; 16 lk->locked = 0; 17 lk->cpu = 0; 18 } 19 20 // Acquire a spin lock. Loops (spins) until the lock is acquired. 21 // Holding a lock for a long time may cause other CPUs to waste time spinning to acquire it. 22 // Spinlocks shouldn't be held across sleep(); for those cases, use sleeplocks. 23 void 24 acquire(struct spinlock *lk) 25 { 26 pushcli(); // disable interrupts to avoid deadlock. 27 if(holding(lk)) 28 panic("acquire"); 29 30 // The xchg is atomic. 31 // It also serializes, so that reads after acquire are not 32 // reordered before it. 33 while(xchg(&lk->locked, 1) != 0) 34 ; 35 36 // Record info about lock acquisition for debugging. 37 lk->cpu = cpu; 38 getcallerpcs(&lk, lk->pcs); 39 } 40 41 // Release the lock. 42 void 43 release(struct spinlock *lk) 44 { 45 if(!holding(lk)) 46 panic("release"); 47 48 lk->pcs[0] = 0; 49 lk->cpu = 0; 50 51 // The xchg serializes, so that reads before release are 52 // not reordered after it. The 1996 PentiumPro manual (Volume 3, 53 // 7.2) says reads can be carried out speculatively and in 54 // any order, which implies we need to serialize here. 55 // But the 2007 Intel 64 Architecture Memory Ordering White 56 // Paper says that Intel 64 and IA-32 will not move a load 57 // after a store. So lock->locked = 0 would work here. 58 // The xchg being asm volatile ensures gcc emits it after 59 // the above assignments (and after the critical section). 60 xchg(&lk->locked, 0); 61 62 popcli(); 63 } 64 65 // Record the current call stack in pcs[] by following the %ebp chain. 66 void 67 getcallerpcs(void *v, uint pcs[]) 68 { 69 uint *ebp; 70 int i; 71 72 ebp = (uint*)v - 2; 73 for(i = 0; i < 10; i++){ 74 if(ebp == 0 || ebp < (uint*)KERNBASE || ebp == (uint*)0xffffffff) 75 break; 76 pcs[i] = ebp[1]; // saved %eip 77 ebp = (uint*)ebp[0]; // saved %ebp 78 } 79 for(; i < 10; i++) 80 pcs[i] = 0; 81 } 82 83 // Check whether this cpu is holding the lock. 84 int 85 holding(struct spinlock *lock) 86 { 87 return lock->locked && lock->cpu == cpu; 88 } 89 90 91 // Pushcli/popcli are like cli/sti except that they are matched: 92 // it takes two popcli to undo two pushcli. Also, if interrupts 93 // are off, then pushcli, popcli leaves them off. 94 95 void 96 pushcli(void) 97 { 98 int eflags; 99 100 eflags = readeflags(); 101 cli(); 102 if(cpu->ncli++ == 0) 103 cpu->intena = eflags & FL_IF; 104 } 105 106 void 107 popcli(void) 108 { 109 if(readeflags()&FL_IF) 110 panic("popcli - interruptible"); 111 if(--cpu->ncli < 0) 112 panic("popcli"); 113 if(cpu->ncli == 0 && cpu->intena) 114 sti(); 115 } 116 117 void 118 initsleeplock(struct sleeplock *l) 119 { 120 l->locked = 0; 121 } 122 123 // Grab the sleeplock that is protected by spinl. Sleeplocks allow a process to lock 124 // a data structure for long times, including across sleeps. Other processes that try 125 // to acquire a sleeplock will be put to sleep when another process hold the sleeplock. 126 // To update status of the sleeplock atomically, the caller must hold spinl 127 void 128 acquire_sleeplock(struct sleeplock *sleepl, struct spinlock *spinl) 129 { 130 while (sleepl->locked) { 131 sleep(sleepl, spinl); 132 } 133 sleepl->locked = 1; 134 } 135 136 // Release the sleeplock that is protected by a spin lock 137 // Caller must hold the spinlock that protects the sleeplock 138 void 139 release_sleeplock(struct sleeplock *sleepl) 140 { 141 sleepl->locked = 0; 142 wakeup(sleepl); 143 } 144 145 // Is the sleeplock acquired? 146 // Caller must hold the spinlock that protects the sleeplock 147 int 148 acquired_sleeplock(struct sleeplock *sleepl) 149 { 150 return sleepl->locked; 151 } 152