1 /*-------------------------------------------------------------------------
2  *
3  * spin.c
4  *	   Hardware-independent implementation of spinlocks.
5  *
6  *
7  * For machines that have test-and-set (TAS) instructions, s_lock.h/.c
8  * define the spinlock implementation.  This file contains only a stub
9  * implementation for spinlocks using PGSemaphores.  Unless semaphores
10  * are implemented in a way that doesn't involve a kernel call, this
11  * is too slow to be very useful :-(
12  *
13  *
14  * Portions Copyright (c) 1996-2017, PostgreSQL Global Development Group
15  * Portions Copyright (c) 1994, Regents of the University of California
16  *
17  *
18  * IDENTIFICATION
19  *	  src/backend/storage/lmgr/spin.c
20  *
21  *-------------------------------------------------------------------------
22  */
23 #include "postgres.h"
24 
25 #include "storage/pg_sema.h"
26 #include "storage/shmem.h"
27 #include "storage/spin.h"
28 
29 
30 #ifndef HAVE_SPINLOCKS
31 
32 /*
33  * No TAS, so spinlocks are implemented as PGSemaphores.
34  */
35 
36 #ifndef HAVE_ATOMICS
37 #define NUM_EMULATION_SEMAPHORES (NUM_SPINLOCK_SEMAPHORES + NUM_ATOMICS_SEMAPHORES)
38 #else
39 #define NUM_EMULATION_SEMAPHORES (NUM_SPINLOCK_SEMAPHORES)
40 #endif /* DISABLE_ATOMICS */
41 
42 PGSemaphore *SpinlockSemaArray;
43 
44 #else							/* !HAVE_SPINLOCKS */
45 
46 #define NUM_EMULATION_SEMAPHORES 0
47 
48 #endif							/* HAVE_SPINLOCKS */
49 
50 /*
51  * Report the amount of shared memory needed to store semaphores for spinlock
52  * support.
53  */
54 Size
SpinlockSemaSize(void)55 SpinlockSemaSize(void)
56 {
57 	return NUM_EMULATION_SEMAPHORES * sizeof(PGSemaphore);
58 }
59 
60 /*
61  * Report number of semaphores needed to support spinlocks.
62  */
63 int
SpinlockSemas(void)64 SpinlockSemas(void)
65 {
66 	return NUM_EMULATION_SEMAPHORES;
67 }
68 
69 #ifndef HAVE_SPINLOCKS
70 
71 /*
72  * Initialize spinlock emulation.
73  *
74  * This must be called after PGReserveSemaphores().
75  */
76 void
SpinlockSemaInit(void)77 SpinlockSemaInit(void)
78 {
79 	PGSemaphore *spinsemas;
80 	int			nsemas = SpinlockSemas();
81 	int			i;
82 
83 	/*
84 	 * We must use ShmemAllocUnlocked(), since the spinlock protecting
85 	 * ShmemAlloc() obviously can't be ready yet.
86 	 */
87 	spinsemas = (PGSemaphore *) ShmemAllocUnlocked(SpinlockSemaSize());
88 	for (i = 0; i < nsemas; ++i)
89 		spinsemas[i] = PGSemaphoreCreate();
90 	SpinlockSemaArray = spinsemas;
91 }
92 
93 /*
94  * s_lock.h hardware-spinlock emulation using semaphores
95  *
96  * We map all spinlocks onto NUM_EMULATION_SEMAPHORES semaphores.  It's okay to
97  * map multiple spinlocks onto one semaphore because no process should ever
98  * hold more than one at a time.  We just need enough semaphores so that we
99  * aren't adding too much extra contention from that.
100  *
101  * There is one exception to the restriction of only holding one spinlock at a
102  * time, which is that it's ok if emulated atomic operations are nested inside
103  * spinlocks. To avoid the danger of spinlocks and atomic using the same sema,
104  * we make sure "normal" spinlocks and atomics backed by spinlocks use
105  * distinct semaphores (see the nested argument to s_init_lock_sema).
106  *
107  * slock_t is just an int for this implementation; it holds the spinlock
108  * number from 1..NUM_EMULATION_SEMAPHORES.  We intentionally ensure that 0
109  * is not a valid value, so that testing with this code can help find
110  * failures to initialize spinlocks.
111  */
112 
113 static inline void
s_check_valid(int lockndx)114 s_check_valid(int lockndx)
115 {
116 	if (unlikely(lockndx <= 0 || lockndx > NUM_EMULATION_SEMAPHORES))
117 		elog(ERROR, "invalid spinlock number: %d", lockndx);
118 }
119 
120 void
s_init_lock_sema(volatile slock_t * lock,bool nested)121 s_init_lock_sema(volatile slock_t *lock, bool nested)
122 {
123 	static uint32 counter = 0;
124 	uint32		offset;
125 	uint32		sema_total;
126 	uint32		idx;
127 
128 	if (nested)
129 	{
130 		/*
131 		 * To allow nesting atomics inside spinlocked sections, use a
132 		 * different spinlock. See comment above.
133 		 */
134 		offset = 1 + NUM_SPINLOCK_SEMAPHORES;
135 		sema_total = NUM_ATOMICS_SEMAPHORES;
136 	}
137 	else
138 	{
139 		offset = 1;
140 		sema_total = NUM_SPINLOCK_SEMAPHORES;
141 	}
142 
143 	idx = (counter++ % sema_total) + offset;
144 
145 	/* double check we did things correctly */
146 	s_check_valid(idx);
147 
148 	*lock = idx;
149 }
150 
151 void
s_unlock_sema(volatile slock_t * lock)152 s_unlock_sema(volatile slock_t *lock)
153 {
154 	int			lockndx = *lock;
155 
156 	s_check_valid(lockndx);
157 
158 	PGSemaphoreUnlock(SpinlockSemaArray[lockndx - 1]);
159 }
160 
161 bool
s_lock_free_sema(volatile slock_t * lock)162 s_lock_free_sema(volatile slock_t *lock)
163 {
164 	/* We don't currently use S_LOCK_FREE anyway */
165 	elog(ERROR, "spin.c does not support S_LOCK_FREE()");
166 	return false;
167 }
168 
169 int
tas_sema(volatile slock_t * lock)170 tas_sema(volatile slock_t *lock)
171 {
172 	int			lockndx = *lock;
173 
174 	s_check_valid(lockndx);
175 
176 	/* Note that TAS macros return 0 if *success* */
177 	return !PGSemaphoreTryLock(SpinlockSemaArray[lockndx - 1]);
178 }
179 
180 #endif							/* !HAVE_SPINLOCKS */
181