1 /* Copyright (C) 2006 MySQL AB
2
3 This program is free software; you can redistribute it and/or modify
4 it under the terms of the GNU General Public License as published by
5 the Free Software Foundation; version 2 of the License.
6
7 This program is distributed in the hope that it will be useful,
8 but WITHOUT ANY WARRANTY; without even the implied warranty of
9 MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
10 GNU General Public License for more details.
11
12 You should have received a copy of the GNU General Public License
13 along with this program; if not, write to the Free Software
14 Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1335 USA */
15
16 /*
17 lockman for row locks, tablockman for table locks
18 */
19
20 /* #define EXTRA_VERBOSE */
21
22 #include <tap.h>
23
24 #include <my_global.h>
25 #include <my_sys.h>
26 #include <lf.h>
27 #include "../lockman.h"
28 #include "../tablockman.h"
29
30 #define Nlos 100
31 #define Ntbls 10
32 LOCK_OWNER loarray[Nlos];
33 TABLE_LOCK_OWNER loarray1[Nlos];
34 pthread_mutex_t mutexes[Nlos];
35 pthread_cond_t conds[Nlos];
36 LOCKED_TABLE ltarray[Ntbls];
37 LOCKMAN lockman;
38 TABLOCKMAN tablockman;
39
40 #ifndef EXTRA_VERBOSE
41 #define print_lo1(X) /* no-op */
42 #define DIAG(X) /* no-op */
43 #else
44 #define DIAG(X) diag X
45 #endif
46
loid2lo(uint16 loid)47 LOCK_OWNER *loid2lo(uint16 loid)
48 {
49 return loarray+loid-1;
50 }
loid2lo1(uint16 loid)51 TABLE_LOCK_OWNER *loid2lo1(uint16 loid)
52 {
53 return loarray1+loid-1;
54 }
55
56 #define unlock_all(O) diag("lo" #O "> release all locks"); \
57 tablockman_release_locks(&tablockman, loid2lo1(O));
58 #define test_lock(O, R, L, S, RES) \
59 ok(tablockman_getlock(&tablockman, loid2lo1(O), <array[R], L) == RES, \
60 "lo" #O "> " S "lock resource " #R " with " #L "-lock"); \
61 print_lo1(loid2lo1(O));
62 #define lock_ok_a(O, R, L) \
63 test_lock(O, R, L, "", GOT_THE_LOCK)
64 #define lock_ok_i(O, R, L) \
65 test_lock(O, R, L, "", GOT_THE_LOCK_NEED_TO_LOCK_A_SUBRESOURCE)
66 #define lock_ok_l(O, R, L) \
67 test_lock(O, R, L, "", GOT_THE_LOCK_NEED_TO_INSTANT_LOCK_A_SUBRESOURCE)
68 #define lock_conflict(O, R, L) \
69 test_lock(O, R, L, "cannot ", LOCK_TIMEOUT);
70
test_tablockman_simple()71 void test_tablockman_simple()
72 {
73 /* simple */
74 lock_ok_a(1, 1, S);
75 lock_ok_i(2, 2, IS);
76 lock_ok_i(1, 2, IX);
77 /* lock escalation */
78 lock_ok_a(1, 1, X);
79 lock_ok_i(2, 2, IX);
80 /* failures */
81 lock_conflict(2, 1, X);
82 unlock_all(2);
83 lock_ok_a(1, 2, S);
84 lock_ok_a(1, 2, IS);
85 lock_ok_a(1, 2, LS);
86 lock_ok_i(1, 3, IX);
87 lock_ok_a(2, 3, LS);
88 lock_ok_i(1, 3, IX);
89 lock_ok_l(2, 3, IS);
90 unlock_all(1);
91 unlock_all(2);
92
93 lock_ok_i(1, 1, IX);
94 lock_conflict(2, 1, S);
95 lock_ok_a(1, 1, LS);
96 unlock_all(1);
97 unlock_all(2);
98
99 lock_ok_i(1, 1, IX);
100 lock_ok_a(2, 1, LS);
101 lock_ok_a(1, 1, LS);
102 lock_ok_i(1, 1, IX);
103 lock_ok_i(3, 1, IS);
104 unlock_all(1);
105 unlock_all(2);
106 unlock_all(3);
107
108 lock_ok_i(1, 4, IS);
109 lock_ok_i(2, 4, IS);
110 lock_ok_i(3, 4, IS);
111 lock_ok_a(3, 4, LS);
112 lock_ok_i(4, 4, IS);
113 lock_conflict(4, 4, IX);
114 lock_conflict(2, 4, IX);
115 lock_ok_a(1, 4, LS);
116 unlock_all(1);
117 unlock_all(2);
118 unlock_all(3);
119 unlock_all(4);
120
121 lock_ok_i(1, 1, IX);
122 lock_ok_i(2, 1, IX);
123 lock_conflict(1, 1, S);
124 lock_conflict(2, 1, X);
125 unlock_all(1);
126 unlock_all(2);
127 }
128
129 int rt_num_threads;
130 int litmus;
131 int thread_number= 0, timeouts= 0;
run_test(const char * test,pthread_handler handler,int n,int m)132 void run_test(const char *test, pthread_handler handler, int n, int m)
133 {
134 pthread_t *threads;
135 ulonglong now= my_getsystime();
136 int i;
137
138 thread_number= timeouts= 0;
139 litmus= 0;
140
141 threads= (pthread_t *)my_malloc(sizeof(void *)*n, MYF(0));
142 if (!threads)
143 {
144 diag("Out of memory");
145 abort();
146 }
147
148 diag("Running %s with %d threads, %d iterations... ", test, n, m);
149 rt_num_threads= n;
150 for (i= 0; i < n ; i++)
151 if (pthread_create(threads+i, 0, handler, &m))
152 {
153 diag("Could not create thread");
154 abort();
155 }
156 for (i= 0 ; i < n ; i++)
157 pthread_join(threads[i], 0);
158 now= my_getsystime()-now;
159 ok(litmus == 0, "Finished %s in %g secs (%d)", test, ((double)now)/1e7, litmus);
160 my_free((void*)threads, MYF(0));
161 }
162
163 pthread_mutex_t rt_mutex;
164 int Nrows= 100;
165 int Ntables= 10;
166 int table_lock_ratio= 10;
167 enum lockman_lock_type lock_array[6]= {S, X, LS, LX, IS, IX};
168 char *lock2str[6]= {"S", "X", "LS", "LX", "IS", "IX"};
169 char *res2str[]= {
170 "DIDN'T GET THE LOCK",
171 "OUT OF MEMORY",
172 "DEADLOCK",
173 "LOCK TIMEOUT",
174 "GOT THE LOCK",
175 "GOT THE LOCK NEED TO LOCK A SUBRESOURCE",
176 "GOT THE LOCK NEED TO INSTANT LOCK A SUBRESOURCE"};
test_lockman(void * arg)177 pthread_handler_t test_lockman(void *arg)
178 {
179 int m= (*(int *)arg);
180 uint x, loid, row, table, res, locklevel, timeout= 0;
181 LOCK_OWNER *lo; TABLE_LOCK_OWNER *lo1; DBUG_ASSERT(Ntables <= Ntbls);
182
183 pthread_mutex_lock(&rt_mutex);
184 loid= ++thread_number;
185 pthread_mutex_unlock(&rt_mutex);
186 lo= loid2lo(loid); lo1= loid2lo1(loid);
187
188 for (x= ((int)(intptr)(&m)); m > 0; m--)
189 {
190 x= (x*3628273133 + 1500450271) % 9576890767; /* three prime numbers */
191 row= x % Nrows + Ntables;
192 table= row % Ntables;
193 locklevel= (x/Nrows) & 3;
194 if (table_lock_ratio && (x/Nrows/4) % table_lock_ratio == 0)
195 { /* table lock */
196 res= tablockman_getlock(&tablockman, lo1, ltarray+table, lock_array[locklevel]);
197 DIAG(("loid %2d, table %d, lock %s, res %s", loid, table,
198 lock2str[locklevel], res2str[res]));
199 if (res < GOT_THE_LOCK)
200 {
201 lockman_release_locks(&lockman, lo); tablockman_release_locks(&tablockman, lo1);
202 DIAG(("loid %2d, release all locks", loid));
203 timeout++;
204 continue;
205 }
206 DBUG_ASSERT(res == GOT_THE_LOCK);
207 }
208 else
209 { /* row lock */
210 locklevel&= 1;
211 res= tablockman_getlock(&tablockman, lo1, ltarray+table, lock_array[locklevel + 4]);
212 DIAG(("loid %2d, row %d, lock %s, res %s", loid, row,
213 lock2str[locklevel+4], res2str[res]));
214 switch (res)
215 {
216 case GOT_THE_LOCK:
217 continue;
218 case GOT_THE_LOCK_NEED_TO_INSTANT_LOCK_A_SUBRESOURCE:
219 /* not implemented, so take a regular lock */
220 case GOT_THE_LOCK_NEED_TO_LOCK_A_SUBRESOURCE:
221 res= lockman_getlock(&lockman, lo, row, lock_array[locklevel]);
222 DIAG(("loid %2d, ROW %d, lock %s, res %s", loid, row,
223 lock2str[locklevel], res2str[res]));
224 if (res == DIDNT_GET_THE_LOCK)
225 {
226 lockman_release_locks(&lockman, lo);
227 tablockman_release_locks(&tablockman, lo1);
228 DIAG(("loid %2d, release all locks", loid));
229 timeout++;
230 continue;
231 }
232 DBUG_ASSERT(res == GOT_THE_LOCK);
233 continue;
234 default:
235 lockman_release_locks(&lockman, lo); tablockman_release_locks(&tablockman, lo1);
236 DIAG(("loid %2d, release all locks", loid));
237 timeout++;
238 continue;
239 }
240 }
241 }
242
243 lockman_release_locks(&lockman, lo);
244 tablockman_release_locks(&tablockman, lo1);
245
246 pthread_mutex_lock(&rt_mutex);
247 rt_num_threads--;
248 timeouts+= timeout;
249 if (!rt_num_threads)
250 diag("number of timeouts: %d", timeouts);
251 pthread_mutex_unlock(&rt_mutex);
252
253 return 0;
254 }
255
main()256 int main()
257 {
258 int i;
259
260 my_init();
261 pthread_mutex_init(&rt_mutex, 0);
262
263 plan(35);
264
265 lockman_init(&lockman, &loid2lo, 50);
266 tablockman_init(&tablockman, &loid2lo1, 50);
267
268 for (i= 0; i < Nlos; i++)
269 {
270 pthread_mutex_init(&mutexes[i], MY_MUTEX_INIT_FAST);
271 pthread_cond_init (&conds[i], 0);
272
273 loarray[i].pins= lf_alloc_get_pins(&lockman.alloc);
274 loarray[i].all_locks= 0;
275 loarray[i].waiting_for= 0;
276 loarray[i].mutex= &mutexes[i];
277 loarray[i].cond= &conds[i];
278 loarray[i].loid= i+1;
279
280 loarray1[i].active_locks= 0;
281 loarray1[i].waiting_lock= 0;
282 loarray1[i].waiting_for= 0;
283 loarray1[i].mutex= &mutexes[i];
284 loarray1[i].cond= &conds[i];
285 loarray1[i].loid= i+1;
286 }
287
288 for (i= 0; i < Ntbls; i++)
289 {
290 tablockman_init_locked_table(ltarray+i, Nlos);
291 }
292
293 test_tablockman_simple();
294
295 #define CYCLES 10000
296 #define THREADS Nlos /* don't change this line */
297
298 /* mixed load, stress-test with random locks */
299 Nrows= 100;
300 Ntables= 10;
301 table_lock_ratio= 10;
302 run_test("\"random lock\" stress test", test_lockman, THREADS, CYCLES);
303
304 /* "real-life" simulation - many rows, no table locks */
305 Nrows= 1000000;
306 Ntables= 10;
307 table_lock_ratio= 0;
308 run_test("\"real-life\" simulation test", test_lockman, THREADS, CYCLES*10);
309
310 for (i= 0; i < Nlos; i++)
311 {
312 lockman_release_locks(&lockman, &loarray[i]);
313 pthread_mutex_destroy(loarray[i].mutex);
314 pthread_cond_destroy(loarray[i].cond);
315 lf_pinbox_put_pins(loarray[i].pins);
316 }
317
318 {
319 ulonglong now= my_getsystime();
320 lockman_destroy(&lockman);
321 now= my_getsystime()-now;
322 diag("lockman_destroy: %g secs", ((double)now)/1e7);
323 }
324
325 pthread_mutex_destroy(&rt_mutex);
326 my_end(0);
327 return exit_status();
328 }
329
330