1 /* Copyright (C) 2008 MySQL AB, 2008-2009 Sun Microsystems, Inc.
2 
3    This program is free software; you can redistribute it and/or modify
4    it under the terms of the GNU General Public License as published by
5    the Free Software Foundation; version 2 of the License.
6 
7    This program is distributed in the hope that it will be useful,
8    but WITHOUT ANY WARRANTY; without even the implied warranty of
9    MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
10    GNU General Public License for more details.
11 
12    You should have received a copy of the GNU General Public License
13    along with this program; if not, write to the Free Software
14    Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1335 USA */
15 
16 #include "thr_template.c"
17 #include <waiting_threads.h>
18 #include <m_string.h>
19 
20 struct test_wt_thd {
21   WT_THD thd;
22   pthread_mutex_t lock;
23 } thds[THREADS];
24 
25 uint i, cnt;
26 pthread_mutex_t lock;
27 pthread_cond_t thread_sync;
28 
29 ulong wt_timeout_short=100, wt_deadlock_search_depth_short=4;
30 ulong wt_timeout_long=10000, wt_deadlock_search_depth_long=15;
31 
32 #define reset(ARRAY) bzero(ARRAY, sizeof(ARRAY))
33 
34 /* see explanation of the kill strategies in waiting_threads.h */
35 enum { LATEST, RANDOM, YOUNGEST, LOCKS } kill_strategy;
36 
37 WT_RESOURCE_TYPE restype={ wt_resource_id_memcmp, 0};
38 
39 #define rnd() ((uint)(my_rnd(&rand) * INT_MAX32))
40 
41 /*
42   stress test: wait on a random number of random threads.
43   it always succeeds (unless crashes or hangs).
44 */
test_wt(void * arg)45 pthread_handler_t test_wt(void *arg)
46 {
47   int    m, n, i, id, res;
48   struct my_rnd_struct rand;
49 
50   my_thread_init();
51 
52   pthread_mutex_lock(&mutex);
53   id= cnt++;
54   wt_thd_lazy_init(& thds[id].thd,
55                    & wt_deadlock_search_depth_short, & wt_timeout_short,
56                    & wt_deadlock_search_depth_long, & wt_timeout_long);
57 
58   /* now, wait for everybody to be ready to run */
59   if (cnt >= THREADS)
60     pthread_cond_broadcast(&thread_sync);
61   else
62     while (cnt < THREADS)
63       pthread_cond_wait(&thread_sync, &mutex);
64   pthread_mutex_unlock(&mutex);
65 
66   my_rnd_init(&rand, (ulong)(intptr)&m, id);
67   if (kill_strategy == YOUNGEST)
68     thds[id].thd.weight= (ulong) ~ my_interval_timer();
69   if (kill_strategy == LOCKS)
70     thds[id].thd.weight= 0;
71 
72   for (m= *(int *)arg; m ; m--)
73   {
74     WT_RESOURCE_ID resid;
75     int blockers[THREADS/10], j, k;
76 
77     resid.value= id;
78     resid.type= &restype;
79 
80     res= 0;
81 
82     /* prepare for waiting for a random number of random threads */
83     for (j= n= (rnd() % THREADS)/10; !res && j >= 0; j--)
84     {
85 retry:
86       i= rnd() % (THREADS-1); /* pick a random thread */
87       if (i >= id) i++;   /* with a number from 0 to THREADS-1 excluding ours */
88 
89       for (k=n; k >=j; k--) /* the one we didn't pick before */
90         if (blockers[k] == i)
91           goto retry;
92       blockers[j]= i;
93 
94       if (kill_strategy == RANDOM)
95         thds[id].thd.weight= rnd();
96 
97       pthread_mutex_lock(& thds[i].lock);
98       res= wt_thd_will_wait_for(& thds[id].thd, & thds[i].thd, &resid);
99       pthread_mutex_unlock(& thds[i].lock);
100     }
101 
102     if (!res)
103     {
104       pthread_mutex_lock(&lock);
105       res= wt_thd_cond_timedwait(& thds[id].thd, &lock);
106       pthread_mutex_unlock(&lock);
107     }
108 
109     if (res)
110     {
111       pthread_mutex_lock(& thds[id].lock);
112       pthread_mutex_lock(&lock);
113       wt_thd_release_all(& thds[id].thd);
114       pthread_mutex_unlock(&lock);
115       pthread_mutex_unlock(& thds[id].lock);
116       if (kill_strategy == LOCKS)
117         thds[id].thd.weight= 0;
118       if (kill_strategy == YOUNGEST)
119         thds[id].thd.weight= (ulong)~ my_interval_timer();
120     }
121     else if (kill_strategy == LOCKS)
122       thds[id].thd.weight++;
123   }
124 
125   pthread_mutex_lock(&mutex);
126   /* wait for everybody to finish */
127   if (!--cnt)
128     pthread_cond_broadcast(&thread_sync);
129   else
130     while (cnt)
131       pthread_cond_wait(&thread_sync, &mutex);
132 
133   pthread_mutex_lock(& thds[id].lock);
134   pthread_mutex_lock(&lock);
135   wt_thd_release_all(& thds[id].thd);
136   pthread_mutex_unlock(&lock);
137   pthread_mutex_unlock(& thds[id].lock);
138   wt_thd_destroy(& thds[id].thd);
139   pthread_mutex_unlock(&mutex);
140 
141   DBUG_PRINT("wt", ("exiting"));
142   my_thread_end();
143   return 0;
144 }
145 
do_one_test()146 void do_one_test()
147 {
148   double sum, sum0;
149   DBUG_ENTER("do_one_test");
150 
151   reset(wt_cycle_stats);
152   reset(wt_wait_stats);
153   wt_success_stats=0;
154   cnt=0;
155   test_concurrently("waiting_threads", test_wt, THREADS, CYCLES);
156 
157   sum=sum0=0;
158   for (cnt=0; cnt < WT_CYCLE_STATS; cnt++)
159     sum+= wt_cycle_stats[0][cnt] + wt_cycle_stats[1][cnt];
160   for (cnt=0; cnt < WT_CYCLE_STATS; cnt++)
161     if (wt_cycle_stats[0][cnt] + wt_cycle_stats[1][cnt] > 0)
162     {
163       sum0+=wt_cycle_stats[0][cnt] + wt_cycle_stats[1][cnt];
164       diag("deadlock cycles of length %2u: %4u %4u %8.2f %%", cnt,
165            wt_cycle_stats[0][cnt], wt_cycle_stats[1][cnt], 1e2*sum0/sum);
166     }
167   diag("depth exceeded: %u %u",
168        wt_cycle_stats[0][cnt], wt_cycle_stats[1][cnt]);
169   for (cnt=0; cnt < WT_WAIT_STATS; cnt++)
170     if (wt_wait_stats[cnt]>0)
171       diag("deadlock waits up to %7llu us: %5u",
172            wt_wait_table[cnt], wt_wait_stats[cnt]);
173   diag("timed out: %u", wt_wait_stats[cnt]);
174   diag("successes: %u", wt_success_stats);
175 
176   DBUG_VOID_RETURN;
177 }
178 
do_tests()179 void do_tests()
180 {
181   DBUG_ENTER("do_tests");
182   if (skip_big_tests)
183   {
184     skip(1, "Big test skipped");
185     return;
186   }
187   plan(13);
188   compile_time_assert(THREADS >= 4);
189 
190   DBUG_PRINT("wt", ("================= initialization ==================="));
191 
192   pthread_cond_init(&thread_sync, 0);
193   pthread_mutex_init(&lock, 0);
194   wt_init();
195   for (cnt=0; cnt < THREADS; cnt++)
196     pthread_mutex_init(& thds[cnt].lock, 0);
197   {
198     WT_RESOURCE_ID resid[4];
199     for (i=0; i < array_elements(resid); i++)
200     {
201       wt_thd_lazy_init(& thds[i].thd,
202                        & wt_deadlock_search_depth_short, & wt_timeout_short,
203                        & wt_deadlock_search_depth_long, & wt_timeout_long);
204       resid[i].value= i+1;
205       resid[i].type= &restype;
206     }
207 
208     DBUG_PRINT("wt", ("================= manual test ==================="));
209 
210 #define ok_wait(X,Y, R) \
211     ok(wt_thd_will_wait_for(& thds[X].thd, & thds[Y].thd, &resid[R]) == 0, \
212       "thd[" #X "] will wait for thd[" #Y "]")
213 #define ok_deadlock(X,Y,R) \
214     ok(wt_thd_will_wait_for(& thds[X].thd, & thds[Y].thd, &resid[R]) == WT_DEADLOCK, \
215       "thd[" #X "] will wait for thd[" #Y "] - deadlock")
216 
217     ok_wait(0,1,0);
218     ok_wait(0,2,0);
219     ok_wait(0,3,0);
220 
221     pthread_mutex_lock(&lock);
222     bad= wt_thd_cond_timedwait(& thds[0].thd, &lock);
223     pthread_mutex_unlock(&lock);
224     ok(bad == WT_TIMEOUT, "timeout test returned %d", bad);
225 
226     ok_wait(0,1,0);
227     ok_wait(1,2,1);
228     ok_deadlock(2,0,2);
229 
230     pthread_mutex_lock(&lock);
231     ok(wt_thd_cond_timedwait(& thds[0].thd, &lock) == WT_TIMEOUT, "as always");
232     ok(wt_thd_cond_timedwait(& thds[1].thd, &lock) == WT_TIMEOUT, "as always");
233     wt_thd_release_all(& thds[0].thd);
234     wt_thd_release_all(& thds[1].thd);
235     wt_thd_release_all(& thds[2].thd);
236     wt_thd_release_all(& thds[3].thd);
237 
238     for (i=0; i < array_elements(resid); i++)
239     {
240       wt_thd_release_all(& thds[i].thd);
241       wt_thd_destroy(& thds[i].thd);
242     }
243     pthread_mutex_unlock(&lock);
244   }
245 
246   wt_deadlock_search_depth_short=6;
247   wt_timeout_short=1000;
248   wt_timeout_long= 100;
249   wt_deadlock_search_depth_long=16;
250   DBUG_PRINT("wt", ("================= stress test ==================="));
251 
252   diag("timeout_short=%lu us, deadlock_search_depth_short=%lu",
253        wt_timeout_short, wt_deadlock_search_depth_short);
254   diag("timeout_long=%lu us, deadlock_search_depth_long=%lu",
255        wt_timeout_long, wt_deadlock_search_depth_long);
256 
257 #ifndef _WIN32
258 #define test_kill_strategy(X)                   \
259   diag("kill strategy: " #X);                   \
260   DBUG_EXECUTE("reset_file",                    \
261                { rewind(DBUG_FILE); my_chsize(fileno(DBUG_FILE), 0, 0, MYF(MY_WME)); }); \
262   DBUG_PRINT("info", ("kill strategy: " #X));   \
263   kill_strategy=X;                              \
264   do_one_test();
265 #else
266 #define test_kill_strategy(X)                   \
267   diag("kill strategy: " #X);                   \
268   DBUG_PRINT("info", ("kill strategy: " #X));   \
269   kill_strategy=X;                              \
270   do_one_test();
271 #endif
272 
273   test_kill_strategy(LATEST);
274   test_kill_strategy(RANDOM);
275   test_kill_strategy(YOUNGEST);
276   test_kill_strategy(LOCKS);
277 
278   DBUG_PRINT("wt", ("================= cleanup ==================="));
279   for (cnt=0; cnt < THREADS; cnt++)
280     pthread_mutex_destroy(& thds[cnt].lock);
281   wt_end();
282   pthread_mutex_destroy(&lock);
283   pthread_cond_destroy(&thread_sync);
284   DBUG_VOID_RETURN;
285 }
286 
287