1 /*-
2  * See the file LICENSE for redistribution information.
3  *
4  * Copyright (c) 1999, 2013 Oracle and/or its affiliates.  All rights reserved.
5  *
6  * Standalone mutex tester for Berkeley DB mutexes.
7  *
8  * $Id$
9  */
10 
11 #include "db_config.h"
12 
13 #include "db_int.h"
14 
15 #ifdef DB_WIN32
16 #define	MUTEX_THREAD_TEST	1
17 
18 extern int getopt(int, char * const *, const char *);
19 
20 typedef HANDLE os_pid_t;
21 typedef HANDLE os_thread_t;
22 
23 #define	os_thread_create(thrp, attr, func, arg)				\
24     (((*(thrp) = CreateThread(NULL, 0,					\
25 	(LPTHREAD_START_ROUTINE)(func), (arg), 0, NULL)) == NULL) ? -1 : 0)
26 #define	os_thread_join(thr, statusp)					\
27     ((WaitForSingleObject((thr), INFINITE) == WAIT_OBJECT_0) &&		\
28     GetExitCodeThread((thr), (LPDWORD)(statusp)) ? 0 : -1)
29 #define	os_thread_self() GetCurrentThreadId()
30 
31 #else /* !DB_WIN32 */
32 
33 #include <sys/wait.h>
34 
35 typedef pid_t os_pid_t;
36 
37 /*
38  * There's only one mutex implementation that can't support thread-level
39  * locking: UNIX/fcntl mutexes.
40  *
41  * The general Berkeley DB library configuration doesn't look for the POSIX
42  * pthread functions, with one exception -- pthread_yield.
43  *
44  * Use these two facts to decide if we're going to build with or without
45  * threads.
46  */
47 #if !defined(HAVE_MUTEX_FCNTL) && defined(HAVE_PTHREAD_YIELD)
48 #define	MUTEX_THREAD_TEST	1
49 
50 #include <pthread.h>
51 
52 typedef pthread_t os_thread_t;
53 
54 #define	os_thread_create(thrp, attr, func, arg)				\
55     pthread_create((thrp), (attr), (func), (arg))
56 #define	os_thread_join(thr, statusp) pthread_join((thr), (statusp))
57 #define	os_thread_self() pthread_self()
58 #endif /* HAVE_PTHREAD_YIELD */
59 #endif /* !DB_WIN32 */
60 
61 #define	OS_BAD_PID ((os_pid_t)-1)
62 
63 #define	TESTDIR		"TESTDIR"		/* Working area */
64 #define	MT_FILE		"TESTDIR/mutex.file"
65 #define	MT_FILE_QUIT	"TESTDIR/mutex.file.quit"
66 
67 /*
68  * The backing data layout:
69  *	TM[1]			per-thread mutex array lock
70  *	TM[nthreads]		per-thread mutex array
71  *	TM[maxlocks]		per-lock mutex array
72  */
73 typedef struct {
74 	db_mutex_t mutex;			/* Mutex. */
75 	u_long	   id;				/* Holder's ID. */
76 	u_int	   wakeme;			/* Request to awake. */
77 } TM;
78 
79 DB_ENV	*dbenv;					/* Backing environment */
80 ENV	*env;
81 size_t	 len;					/* Backing data chunk size. */
82 
83 u_int8_t *gm_addr;				/* Global mutex */
84 u_int8_t *lm_addr;				/* Locker mutexes */
85 u_int8_t *tm_addr;				/* Thread mutexes */
86 
87 #ifdef MUTEX_THREAD_TEST
88 os_thread_t *kidsp;				/* Locker threads */
89 os_thread_t  wakep;				/* Wakeup thread */
90 #endif
91 
92 #ifndef	HAVE_MMAP
93 u_int	nprocs = 1;				/* -p: Processes. */
94 u_int	nthreads = 20;				/* -t: Threads. */
95 #elif	MUTEX_THREAD_TEST
96 u_int	nprocs = 5;				/* -p: Processes. */
97 u_int	nthreads = 4;				/* -t: Threads. */
98 #else
99 u_int	nprocs = 20;				/* -p: Processes. */
100 u_int	nthreads = 1;				/* -t: Threads. */
101 #endif
102 
103 u_int	maxlocks = 20;				/* -l: Backing locks. */
104 u_int	nlocks = 10000;				/* -n: Locks per process. */
105 int	verbose;				/* -v: Verbosity. */
106 
107 const char *progname;
108 
109 void	 data_off(u_int8_t *, DB_FH *);
110 void	 data_on(u_int8_t **, u_int8_t **, u_int8_t **, DB_FH **, int);
111 int	 locker_start(u_long);
112 int	 locker_wait(void);
113 os_pid_t os_spawn(const char *, char *const[]);
114 int	 os_wait(os_pid_t *, u_int);
115 void	*run_lthread(void *);
116 void	*run_wthread(void *);
117 os_pid_t spawn_proc(u_long, char *, char *);
118 void	 tm_env_close(void);
119 int	 tm_env_init(void);
120 void	 tm_mutex_destroy(void);
121 void	 tm_mutex_init(void);
122 void	 tm_mutex_stats(void);
123 int	 usage(void);
124 int	 wakeup_start(u_long);
125 int	 wakeup_wait(void);
126 
127 int
main(argc,argv)128 main(argc, argv)
129 	int argc;
130 	char *argv[];
131 {
132 	enum {LOCKER, WAKEUP, PARENT} rtype;
133 	extern int optind;
134 	extern char *optarg;
135 	os_pid_t wakeup_pid, *pids;
136 	u_long id;
137 	u_int i;
138 	DB_FH *fhp, *map_fhp;
139 	int ch, err;
140 	char *p, *tmpath, cmd[1024];
141 
142 	if ((progname = __db_rpath(argv[0])) == NULL)
143 		progname = argv[0];
144 	else
145 		++progname;
146 
147 	rtype = PARENT;
148 	id = 0;
149 	tmpath = argv[0];
150 	while ((ch = getopt(argc, argv, "l:n:p:T:t:v")) != EOF)
151 		switch (ch) {
152 		case 'l':
153 			maxlocks = (u_int)atoi(optarg);
154 			break;
155 		case 'n':
156 			nlocks = (u_int)atoi(optarg);
157 			break;
158 		case 'p':
159 			nprocs = (u_int)atoi(optarg);
160 			break;
161 		case 't':
162 			if ((nthreads = (u_int)atoi(optarg)) == 0)
163 				nthreads = 1;
164 #if !defined(MUTEX_THREAD_TEST)
165 			if (nthreads != 1) {
166 				fprintf(stderr,
167     "%s: thread support not available or not compiled for this platform.\n",
168 				    progname);
169 				return (EXIT_FAILURE);
170 			}
171 #endif
172 			break;
173 		case 'T':
174 			if (!memcmp(optarg, "locker", sizeof("locker") - 1))
175 				rtype = LOCKER;
176 			else if (
177 			    !memcmp(optarg, "wakeup", sizeof("wakeup") - 1))
178 				rtype = WAKEUP;
179 			else
180 				return (usage());
181 			if ((p = strchr(optarg, '=')) == NULL)
182 				return (usage());
183 			id = (u_long)atoi(p + 1);
184 			break;
185 		case 'v':
186 			verbose = 1;
187 			break;
188 		case '?':
189 		default:
190 			return (usage());
191 		}
192 	argc -= optind;
193 	argv += optind;
194 
195 	/*
196 	 * If we're not running a multi-process test, we should be running
197 	 * a multi-thread test.
198 	 */
199 	if (nprocs == 1 && nthreads == 1) {
200 		fprintf(stderr,
201 	    "%s: running in a single process requires multiple threads\n",
202 		    progname);
203 		return (EXIT_FAILURE);
204 	}
205 
206 	len = sizeof(TM) * (1 + nthreads * nprocs + maxlocks);
207 
208 	/*
209 	 * In the multi-process test, the parent spawns processes that exec
210 	 * the original binary, ending up here.  Each process joins the DB
211 	 * environment separately and then calls the supporting function.
212 	 */
213 	if (rtype == LOCKER || rtype == WAKEUP) {
214 		__os_yield(env, 3, 0);		/* Let everyone catch up. */
215 						/* Initialize random numbers. */
216 		srand((u_int)time(NULL) % (u_int)getpid());
217 
218 		if (tm_env_init() != 0)		/* Join the environment. */
219 			exit(EXIT_FAILURE);
220 						/* Join the backing data. */
221 		data_on(&gm_addr, &tm_addr, &lm_addr, &map_fhp, 0);
222 		if (verbose)
223 			printf(
224 	    "Backing file: global (%#lx), threads (%#lx), locks (%#lx)\n",
225 			    (u_long)gm_addr, (u_long)tm_addr, (u_long)lm_addr);
226 
227 		if ((rtype == LOCKER ?
228 		    locker_start(id) : wakeup_start(id)) != 0)
229 			exit(EXIT_FAILURE);
230 		if ((rtype == LOCKER ? locker_wait() : wakeup_wait()) != 0)
231 			exit(EXIT_FAILURE);
232 
233 		data_off(gm_addr, map_fhp);	/* Detach from backing data. */
234 
235 		tm_env_close();			/* Detach from environment. */
236 
237 		exit(EXIT_SUCCESS);
238 	}
239 
240 	/*
241 	 * The following code is only executed by the original parent process.
242 	 *
243 	 * Clean up from any previous runs.
244 	 */
245 	snprintf(cmd, sizeof(cmd), "rm -rf %s", TESTDIR);
246 	(void)system(cmd);
247 	snprintf(cmd, sizeof(cmd), "mkdir %s", TESTDIR);
248 	(void)system(cmd);
249 
250 	printf(
251     "%s: %u processes, %u threads/process, %u lock requests from %u locks\n",
252 	    progname, nprocs, nthreads, nlocks, maxlocks);
253 	printf("%s: backing data %lu bytes\n", progname, (u_long)len);
254 
255 	if (tm_env_init() != 0)		/* Create the environment. */
256 		exit(EXIT_FAILURE);
257 					/* Create the backing data. */
258 	data_on(&gm_addr, &tm_addr, &lm_addr, &map_fhp, 1);
259 	if (verbose)
260 		printf(
261 	    "backing data: global (%#lx), threads (%#lx), locks (%#lx)\n",
262 		    (u_long)gm_addr, (u_long)tm_addr, (u_long)lm_addr);
263 
264 	tm_mutex_init();		/* Initialize mutexes. */
265 
266 	if (nprocs > 1) {		/* Run the multi-process test. */
267 		/* Allocate array of locker process IDs. */
268 		if ((pids = calloc(nprocs, sizeof(os_pid_t))) == NULL) {
269 			fprintf(stderr, "%s: %s\n", progname, strerror(errno));
270 			goto fail;
271 		}
272 
273 		/* Spawn locker processes and threads. */
274 		for (i = 0; i < nprocs; ++i) {
275 			if ((pids[i] =
276 			    spawn_proc(id, tmpath, "locker")) == OS_BAD_PID) {
277 				fprintf(stderr,
278 				    "%s: failed to spawn a locker\n", progname);
279 				goto fail;
280 			}
281 			id += nthreads;
282 		}
283 
284 		/* Spawn wakeup process/thread. */
285 		if ((wakeup_pid =
286 		    spawn_proc(id, tmpath, "wakeup")) == OS_BAD_PID) {
287 			fprintf(stderr,
288 			    "%s: failed to spawn waker\n", progname);
289 			goto fail;
290 		}
291 		++id;
292 
293 		/* Wait for all lockers to exit. */
294 		if ((err = os_wait(pids, nprocs)) != 0) {
295 			fprintf(stderr, "%s: locker wait failed with %d\n",
296 			    progname, err);
297 			goto fail;
298 		}
299 
300 		/* Signal wakeup process to exit. */
301 		if ((err = __os_open(
302 		    env, MT_FILE_QUIT, 0, DB_OSO_CREATE, 0664, &fhp)) != 0) {
303 			fprintf(stderr,
304 			    "%s: open %s\n", progname, db_strerror(err));
305 			goto fail;
306 		}
307 		(void)__os_closehandle(env, fhp);
308 
309 		/* Wait for wakeup process/thread. */
310 		if ((err = os_wait(&wakeup_pid, 1)) != 0) {
311 			fprintf(stderr, "%s: %lu: exited %d\n",
312 			    progname, (u_long)wakeup_pid, err);
313 			goto fail;
314 		}
315 	} else {			/* Run the single-process test. */
316 		/* Spawn locker threads. */
317 		if (locker_start(0) != 0)
318 			goto fail;
319 
320 		/* Spawn wakeup thread. */
321 		if (wakeup_start(nthreads) != 0)
322 			goto fail;
323 
324 		/* Wait for all lockers to exit. */
325 		if (locker_wait() != 0)
326 			goto fail;
327 
328 		/* Signal wakeup process to exit. */
329 		if ((err = __os_open(
330 		    env, MT_FILE_QUIT, 0, DB_OSO_CREATE, 0664, &fhp)) != 0) {
331 			fprintf(stderr,
332 			    "%s: open %s\n", progname, db_strerror(err));
333 			goto fail;
334 		}
335 		(void)__os_closehandle(env, fhp);
336 
337 		/* Wait for wakeup thread. */
338 		if (wakeup_wait() != 0)
339 			goto fail;
340 	}
341 
342 	tm_mutex_stats();		/* Display run statistics. */
343 	tm_mutex_destroy();		/* Destroy mutexes. */
344 
345 	data_off(gm_addr, map_fhp);	/* Detach from backing data. */
346 
347 	tm_env_close();			/* Detach from environment. */
348 
349 	printf("%s: test succeeded\n", progname);
350 	return (EXIT_SUCCESS);
351 
352 fail:	printf("%s: FAILED!\n", progname);
353 	return (EXIT_FAILURE);
354 }
355 
356 int
locker_start(id)357 locker_start(id)
358 	u_long id;
359 {
360 #if defined(MUTEX_THREAD_TEST)
361 	u_int i;
362 	int err;
363 
364 	/*
365 	 * Spawn off threads.  We have nthreads all locking and going to
366 	 * sleep, and one other thread cycling through and waking them up.
367 	 */
368 	if ((kidsp =
369 	    (os_thread_t *)calloc(sizeof(os_thread_t), nthreads)) == NULL) {
370 		fprintf(stderr, "%s: %s\n", progname, strerror(errno));
371 		return (1);
372 	}
373 	for (i = 0; i < nthreads; i++)
374 		if ((err = os_thread_create(
375 		    &kidsp[i], NULL, run_lthread, (void *)(id + i))) != 0) {
376 			fprintf(stderr, "%s: failed spawning thread: %s\n",
377 			    progname, db_strerror(err));
378 			return (1);
379 		}
380 	return (0);
381 #else
382 	return (run_lthread((void *)id) == NULL ? 0 : 1);
383 #endif
384 }
385 
386 int
locker_wait()387 locker_wait()
388 {
389 #if defined(MUTEX_THREAD_TEST)
390 	u_int i;
391 	void *retp;
392 
393 	/* Wait for the threads to exit. */
394 	for (i = 0; i < nthreads; i++) {
395 		(void)os_thread_join(kidsp[i], &retp);
396 		if (retp != NULL) {
397 			fprintf(stderr,
398 			    "%s: thread exited with error\n", progname);
399 			return (1);
400 		}
401 	}
402 	free(kidsp);
403 #endif
404 	return (0);
405 }
406 
407 void *
run_lthread(arg)408 run_lthread(arg)
409 	void *arg;
410 {
411 	TM *gp, *mp, *tp;
412 	u_long id, tid;
413 	u_int lock, nl;
414 	int err, i;
415 
416 	id = (u_long)arg;
417 #if defined(MUTEX_THREAD_TEST)
418 	tid = (u_long)os_thread_self();
419 #else
420 	tid = 0;
421 #endif
422 	printf("Locker: ID %03lu (PID: %lu; TID: %lx)\n",
423 	    id, (u_long)getpid(), tid);
424 
425 	gp = (TM *)gm_addr;
426 	tp = (TM *)(tm_addr + id * sizeof(TM));
427 
428 	for (nl = nlocks; nl > 0;) {
429 		/* Select and acquire a data lock. */
430 		lock = (u_int)rand() % maxlocks;
431 		mp = (TM *)(lm_addr + lock * sizeof(TM));
432 		if (verbose)
433 			printf("%03lu: lock %d (mtx: %lu)\n",
434 			    id, lock, (u_long)mp->mutex);
435 
436 		if ((err = dbenv->mutex_lock(dbenv, mp->mutex)) != 0) {
437 			fprintf(stderr, "%s: %03lu: never got lock %d: %s\n",
438 			    progname, id, lock, db_strerror(err));
439 			return ((void *)1);
440 		}
441 		if (mp->id != 0) {
442 			fprintf(stderr,
443 			    "%s: RACE! (%03lu granted lock %d held by %03lu)\n",
444 			    progname, id, lock, mp->id);
445 			return ((void *)1);
446 		}
447 		mp->id = id;
448 
449 		/*
450 		 * Pretend to do some work, periodically checking to see if
451 		 * we still hold the mutex.
452 		 */
453 		for (i = 0; i < 3; ++i) {
454 			__os_yield(env, 0, (u_long)rand() % 3);
455 			if (mp->id != id) {
456 				fprintf(stderr,
457 			    "%s: RACE! (%03lu stole lock %d from %03lu)\n",
458 				    progname, mp->id, lock, id);
459 				return ((void *)1);
460 			}
461 		}
462 
463 		/*
464 		 * Test self-blocking and unlocking by other threads/processes:
465 		 *
466 		 *	acquire the global lock
467 		 *	set our wakeup flag
468 		 *	release the global lock
469 		 *	acquire our per-thread lock
470 		 *
471 		 * The wakeup thread will wake us up.
472 		 */
473 		if ((err = dbenv->mutex_lock(dbenv, gp->mutex)) != 0) {
474 			fprintf(stderr, "%s: %03lu: global lock: %s\n",
475 			    progname, id, db_strerror(err));
476 			return ((void *)1);
477 		}
478 		if (tp->id != 0 && tp->id != id) {
479 			fprintf(stderr,
480 		    "%s: %03lu: per-thread mutex isn't mine, owned by %03lu\n",
481 			    progname, id, tp->id);
482 			return ((void *)1);
483 		}
484 		tp->id = id;
485 		if (verbose)
486 			printf("%03lu: self-blocking (mtx: %lu)\n",
487 			    id, (u_long)tp->mutex);
488 		if (tp->wakeme) {
489 			fprintf(stderr,
490 			    "%s: %03lu: wakeup flag incorrectly set\n",
491 			    progname, id);
492 			return ((void *)1);
493 		}
494 		tp->wakeme = 1;
495 		if ((err = dbenv->mutex_unlock(dbenv, gp->mutex)) != 0) {
496 			fprintf(stderr,
497 			    "%s: %03lu: global unlock: %s\n",
498 			    progname, id, db_strerror(err));
499 			return ((void *)1);
500 		}
501 		if ((err = dbenv->mutex_lock(dbenv, tp->mutex)) != 0) {
502 			fprintf(stderr, "%s: %03lu: per-thread lock: %s\n",
503 			    progname, id, db_strerror(err));
504 			return ((void *)1);
505 		}
506 		/* Time passes... */
507 		if (tp->wakeme) {
508 			fprintf(stderr, "%s: %03lu: wakeup flag not cleared\n",
509 			    progname, id);
510 			return ((void *)1);
511 		}
512 
513 		if (verbose)
514 			printf("%03lu: release %d (mtx: %lu)\n",
515 			    id, lock, (u_long)mp->mutex);
516 
517 		/* Release the data lock. */
518 		mp->id = 0;
519 		if ((err = dbenv->mutex_unlock(dbenv, mp->mutex)) != 0) {
520 			fprintf(stderr,
521 			    "%s: %03lu: lock release: %s\n",
522 			    progname, id, db_strerror(err));
523 			return ((void *)1);
524 		}
525 
526 		if (--nl % 1000 == 0)
527 			printf("%03lu: %d\n", id, nl);
528 	}
529 
530 	return (NULL);
531 }
532 
533 int
wakeup_start(id)534 wakeup_start(id)
535 	u_long id;
536 {
537 #if defined(MUTEX_THREAD_TEST)
538 	int err;
539 
540 	/*
541 	 * Spawn off wakeup thread.
542 	 */
543 	if ((err = os_thread_create(
544 	    &wakep, NULL, run_wthread, (void *)id)) != 0) {
545 		fprintf(stderr, "%s: failed spawning wakeup thread: %s\n",
546 		    progname, db_strerror(err));
547 		return (1);
548 	}
549 	return (0);
550 #else
551 	return (run_wthread((void *)id) == NULL ? 0 : 1);
552 #endif
553 }
554 
555 int
wakeup_wait()556 wakeup_wait()
557 {
558 #if defined(MUTEX_THREAD_TEST)
559 	void *retp;
560 
561 	/*
562 	 * A file is created when the wakeup thread is no longer needed.
563 	 */
564 	(void)os_thread_join(wakep, &retp);
565 	if (retp != NULL) {
566 		fprintf(stderr,
567 		    "%s: wakeup thread exited with error\n", progname);
568 		return (1);
569 	}
570 #endif
571 	return (0);
572 }
573 
574 /*
575  * run_wthread --
576  *	Thread to wake up other threads that are sleeping.
577  */
578 void *
run_wthread(arg)579 run_wthread(arg)
580 	void *arg;
581 {
582 	TM *gp, *tp;
583 	u_long id, tid;
584 	u_int check_id;
585 	int err, quitcheck;
586 
587 	id = (u_long)arg;
588 	quitcheck = 0;
589 #if defined(MUTEX_THREAD_TEST)
590 	tid = (u_long)os_thread_self();
591 #else
592 	tid = 0;
593 #endif
594 	printf("Wakeup: ID %03lu (PID: %lu; TID: %lx)\n",
595 	    id, (u_long)getpid(), tid);
596 
597 	gp = (TM *)gm_addr;
598 
599 	/* Loop, waking up sleepers and periodically sleeping ourselves. */
600 	for (check_id = 0;; ++check_id) {
601 		/* Check to see if the locking threads have finished. */
602 		if (++quitcheck >= 100) {
603 			quitcheck = 0;
604 		if (__os_exists(env, MT_FILE_QUIT, NULL) == 0)
605 			break;
606 		}
607 
608 		/* Check for ID wraparound. */
609 		if (check_id == nthreads * nprocs)
610 			check_id = 0;
611 
612 		/* Check for a thread that needs a wakeup. */
613 		tp = (TM *)(tm_addr + check_id * sizeof(TM));
614 		if (!tp->wakeme)
615 			continue;
616 
617 		if (verbose) {
618 			printf("%03lu: wakeup thread %03lu (mtx: %lu)\n",
619 			    id, tp->id, (u_long)tp->mutex);
620 			(void)fflush(stdout);
621 		}
622 
623 		/* Acquire the global lock. */
624 		if ((err = dbenv->mutex_lock(dbenv, gp->mutex)) != 0) {
625 			fprintf(stderr, "%s: wakeup: global lock: %s\n",
626 			    progname, db_strerror(err));
627 			return ((void *)1);
628 		}
629 
630 		tp->wakeme = 0;
631 		if ((err = dbenv->mutex_unlock(dbenv, tp->mutex)) != 0) {
632 			fprintf(stderr, "%s: wakeup: unlock: %s\n",
633 			    progname, db_strerror(err));
634 			return ((void *)1);
635 		}
636 
637 		if ((err = dbenv->mutex_unlock(dbenv, gp->mutex)) != 0) {
638 			fprintf(stderr, "%s: wakeup: global unlock: %s\n",
639 			    progname, db_strerror(err));
640 			return ((void *)1);
641 		}
642 
643 		__os_yield(env, 0, (u_long)rand() % 3);
644 	}
645 	return (NULL);
646 }
647 
648 /*
649  * tm_env_init --
650  *	Create the backing database environment.
651  */
652 int
tm_env_init()653 tm_env_init()
654 {
655 	u_int32_t flags;
656 	int ret;
657 	char *home;
658 
659 	/*
660 	 * Create an environment object and initialize it for error
661 	 * reporting.
662 	 */
663 	if ((ret = db_env_create(&dbenv, 0)) != 0) {
664 		fprintf(stderr, "%s: %s\n", progname, db_strerror(ret));
665 		return (1);
666 	}
667 	env = dbenv->env;
668 	dbenv->set_errfile(dbenv, stderr);
669 	dbenv->set_errpfx(dbenv, progname);
670 
671 	/* Allocate enough mutexes. */
672 	if ((ret = dbenv->mutex_set_increment(dbenv,
673 	    1 + nthreads * nprocs + maxlocks)) != 0) {
674 		dbenv->err(dbenv, ret, "dbenv->mutex_set_increment");
675 		return (1);
676 	}
677 
678 	flags = DB_CREATE;
679 	if (nprocs == 1) {
680 		home = NULL;
681 		flags |= DB_PRIVATE;
682 	} else
683 		home = TESTDIR;
684 	if (nthreads != 1)
685 		flags |= DB_THREAD;
686 	if ((ret = dbenv->open(dbenv, home, flags, 0)) != 0) {
687 		dbenv->err(dbenv, ret, "environment open: %s", home);
688 		return (1);
689 	}
690 
691 	return (0);
692 }
693 
694 /*
695  * tm_env_close --
696  *	Close the backing database environment.
697  */
698 void
tm_env_close()699 tm_env_close()
700 {
701 	(void)dbenv->close(dbenv, 0);
702 }
703 
704 /*
705  * tm_mutex_init --
706  *	Initialize the mutexes.
707  */
708 void
tm_mutex_init()709 tm_mutex_init()
710 {
711 	TM *mp;
712 	u_int i;
713 	int err;
714 
715 	if (verbose)
716 		printf("Allocate the global mutex: ");
717 	mp = (TM *)gm_addr;
718 	if ((err = dbenv->mutex_alloc(dbenv, 0, &mp->mutex)) != 0) {
719 		fprintf(stderr, "%s: DB_ENV->mutex_alloc (global): %s\n",
720 		    progname, db_strerror(err));
721 		exit(EXIT_FAILURE);
722 	}
723 	if (verbose)
724 		printf("%lu\n", (u_long)mp->mutex);
725 
726 	if (verbose)
727 		printf(
728 		    "Allocate %d per-thread, self-blocking mutexes: ",
729 		    nthreads * nprocs);
730 	for (i = 0; i < nthreads * nprocs; ++i) {
731 		mp = (TM *)(tm_addr + i * sizeof(TM));
732 		if ((err = dbenv->mutex_alloc(
733 		    dbenv, DB_MUTEX_SELF_BLOCK, &mp->mutex)) != 0) {
734 			fprintf(stderr,
735 			    "%s: DB_ENV->mutex_alloc (per-thread %d): %s\n",
736 			    progname, i, db_strerror(err));
737 			exit(EXIT_FAILURE);
738 		}
739 		if ((err = dbenv->mutex_lock(dbenv, mp->mutex)) != 0) {
740 			fprintf(stderr,
741 			    "%s: DB_ENV->mutex_lock (per-thread %d): %s\n",
742 			    progname, i, db_strerror(err));
743 			exit(EXIT_FAILURE);
744 		}
745 		if (verbose)
746 			printf("%lu ", (u_long)mp->mutex);
747 	}
748 	if (verbose)
749 		printf("\n");
750 
751 	if (verbose)
752 		printf("Allocate %d per-lock mutexes: ", maxlocks);
753 	for (i = 0; i < maxlocks; ++i) {
754 		mp = (TM *)(lm_addr + i * sizeof(TM));
755 		if ((err = dbenv->mutex_alloc(dbenv, 0, &mp->mutex)) != 0) {
756 			fprintf(stderr,
757 			    "%s: DB_ENV->mutex_alloc (per-lock: %d): %s\n",
758 			    progname, i, db_strerror(err));
759 			exit(EXIT_FAILURE);
760 		}
761 		if (verbose)
762 			printf("%lu ", (u_long)mp->mutex);
763 	}
764 	if (verbose)
765 		printf("\n");
766 }
767 
768 /*
769  * tm_mutex_destroy --
770  *	Destroy the mutexes.
771  */
772 void
tm_mutex_destroy()773 tm_mutex_destroy()
774 {
775 	TM *gp, *mp;
776 	u_int i;
777 	int err;
778 
779 	if (verbose)
780 		printf("Destroy the global mutex.\n");
781 	gp = (TM *)gm_addr;
782 	if ((err = dbenv->mutex_free(dbenv, gp->mutex)) != 0) {
783 		fprintf(stderr, "%s: DB_ENV->mutex_free (global): %s\n",
784 		    progname, db_strerror(err));
785 		exit(EXIT_FAILURE);
786 	}
787 
788 	if (verbose)
789 		printf("Destroy the per-thread mutexes.\n");
790 	for (i = 0; i < nthreads * nprocs; ++i) {
791 		mp = (TM *)(tm_addr + i * sizeof(TM));
792 		if ((err = dbenv->mutex_free(dbenv, mp->mutex)) != 0) {
793 			fprintf(stderr,
794 			    "%s: DB_ENV->mutex_free (per-thread %d): %s\n",
795 			    progname, i, db_strerror(err));
796 			exit(EXIT_FAILURE);
797 		}
798 	}
799 
800 	if (verbose)
801 		printf("Destroy the per-lock mutexes.\n");
802 	for (i = 0; i < maxlocks; ++i) {
803 		mp = (TM *)(lm_addr + i * sizeof(TM));
804 		if ((err = dbenv->mutex_free(dbenv, mp->mutex)) != 0) {
805 			fprintf(stderr,
806 			    "%s: DB_ENV->mutex_free (per-lock: %d): %s\n",
807 			    progname, i, db_strerror(err));
808 			exit(EXIT_FAILURE);
809 		}
810 	}
811 }
812 
813 /*
814  * tm_mutex_stats --
815  *	Display mutex statistics.
816  */
817 void
tm_mutex_stats()818 tm_mutex_stats()
819 {
820 #ifdef HAVE_STATISTICS
821 	TM *mp;
822 	uintmax_t set_wait, set_nowait;
823 	u_int i;
824 
825 	printf("Per-lock mutex statistics.\n");
826 	for (i = 0; i < maxlocks; ++i) {
827 		mp = (TM *)(lm_addr + i * sizeof(TM));
828 		__mutex_set_wait_info(env, mp->mutex, &set_wait, &set_nowait);
829 		printf("mutex %2d: wait: %lu; no wait %lu\n", i,
830 		    (u_long)set_wait, (u_long)set_nowait);
831 	}
832 #endif
833 }
834 
835 /*
836  * data_on --
837  *	Map in or allocate the backing data space.
838  */
839 void
data_on(gm_addrp,tm_addrp,lm_addrp,fhpp,init)840 data_on(gm_addrp, tm_addrp, lm_addrp, fhpp, init)
841 	u_int8_t **gm_addrp, **tm_addrp, **lm_addrp;
842 	DB_FH **fhpp;
843 	int init;
844 {
845 	DB_FH *fhp;
846 	size_t nwrite;
847 	int err;
848 	void *addr;
849 
850 	fhp = NULL;
851 
852 	/*
853 	 * In a single process, use heap memory.
854 	 */
855 	if (nprocs == 1) {
856 		if (init) {
857 			if ((err =
858 			    __os_calloc(env, (size_t)len, 1, &addr)) != 0)
859 				exit(EXIT_FAILURE);
860 		} else {
861 			fprintf(stderr,
862 			    "%s: init should be set for single process call\n",
863 			    progname);
864 			exit(EXIT_FAILURE);
865 		}
866 	} else {
867 		if (init) {
868 			if (verbose)
869 				printf("Create the backing file.\n");
870 
871 			if ((err = __os_open(env, MT_FILE, 0,
872 			    DB_OSO_CREATE | DB_OSO_TRUNC, 0666, &fhp)) == -1) {
873 				fprintf(stderr, "%s: %s: open: %s\n",
874 				    progname, MT_FILE, db_strerror(err));
875 				exit(EXIT_FAILURE);
876 			}
877 
878 			if ((err =
879 			    __os_seek(env, fhp, 0, 0, (u_int32_t)len)) != 0 ||
880 			    (err =
881 			    __os_write(env, fhp, &err, 1, &nwrite)) != 0 ||
882 			    nwrite != 1) {
883 				fprintf(stderr, "%s: %s: seek/write: %s\n",
884 				    progname, MT_FILE, db_strerror(err));
885 				exit(EXIT_FAILURE);
886 			}
887 		} else
888 			if ((err = __os_open(env, MT_FILE, 0, 0, 0, &fhp)) != 0)
889 				exit(EXIT_FAILURE);
890 
891 		if ((err =
892 		    __os_mapfile(env, MT_FILE, fhp, len, 0, &addr)) != 0)
893 			exit(EXIT_FAILURE);
894 	}
895 
896 	*gm_addrp = (u_int8_t *)addr;
897 	addr = (u_int8_t *)addr + sizeof(TM);
898 	*tm_addrp = (u_int8_t *)addr;
899 	addr = (u_int8_t *)addr + sizeof(TM) * (nthreads * nprocs);
900 	*lm_addrp = (u_int8_t *)addr;
901 
902 	if (fhpp != NULL)
903 		*fhpp = fhp;
904 }
905 
906 /*
907  * data_off --
908  *	Discard or de-allocate the backing data space.
909  */
910 void
data_off(addr,fhp)911 data_off(addr, fhp)
912 	u_int8_t *addr;
913 	DB_FH *fhp;
914 {
915 	if (nprocs == 1)
916 		__os_free(env, addr);
917 	else {
918 		if (__os_unmapfile(env, addr, len) != 0)
919 			exit(EXIT_FAILURE);
920 		if (__os_closehandle(env, fhp) != 0)
921 			exit(EXIT_FAILURE);
922 	}
923 }
924 
925 /*
926  * usage --
927  *
928  */
929 int
usage()930 usage()
931 {
932 	fprintf(stderr, "usage: %s %s\n\t%s\n", progname,
933 	    "[-v] [-l maxlocks]",
934 	    "[-n locks] [-p procs] [-T locker=ID|wakeup=ID] [-t threads]");
935 	return (EXIT_FAILURE);
936 }
937 
938 /*
939  * os_wait --
940  *	Wait for an array of N procs.
941  */
942 int
os_wait(procs,n)943 os_wait(procs, n)
944 	os_pid_t *procs;
945 	u_int n;
946 {
947 	u_int i;
948 	int status;
949 #if defined(DB_WIN32)
950 	DWORD ret;
951 #endif
952 
953 	status = 0;
954 
955 #if defined(DB_WIN32)
956 	do {
957 		ret = WaitForMultipleObjects(n, procs, FALSE, INFINITE);
958 		i = ret - WAIT_OBJECT_0;
959 		if (i < 0 || i >= n)
960 			return (__os_posix_err(__os_get_syserr()));
961 
962 		if ((GetExitCodeProcess(procs[i], &ret) == 0) || (ret != 0))
963 			return (ret);
964 
965 		/* remove the process handle from the list */
966 		while (++i < n)
967 			procs[i - 1] = procs[i];
968 	} while (--n);
969 #elif !defined(HAVE_VXWORKS)
970 	do {
971 		if (wait(&status) == -1)
972 			return (__os_posix_err(__os_get_syserr()));
973 
974 		if (WIFEXITED(status) == 0 || WEXITSTATUS(status) != 0) {
975 			for (i = 0; i < n; i++)
976 				(void)kill(procs[i], SIGKILL);
977 			return (WEXITSTATUS(status));
978 		}
979 	} while (--n);
980 #endif
981 
982 	return (0);
983 }
984 
985 os_pid_t
spawn_proc(id,tmpath,typearg)986 spawn_proc(id, tmpath, typearg)
987 	u_long id;
988 	char *tmpath, *typearg;
989 {
990 	char *const vbuf = verbose ?  "-v" : NULL;
991 	char *args[13], lbuf[16], nbuf[16], pbuf[16], tbuf[16], Tbuf[256];
992 
993 	args[0] = tmpath;
994 	args[1] = "-l";
995 	snprintf(lbuf, sizeof(lbuf),  "%d", maxlocks);
996 	args[2] = lbuf;
997 	args[3] = "-n";
998 	snprintf(nbuf, sizeof(nbuf),  "%d", nlocks);
999 	args[4] = nbuf;
1000 	args[5] = "-p";
1001 	snprintf(pbuf, sizeof(pbuf),  "%d", nprocs);
1002 	args[6] = pbuf;
1003 	args[7] = "-t";
1004 	snprintf(tbuf, sizeof(tbuf),  "%d", nthreads);
1005 	args[8] = tbuf;
1006 	args[9] = "-T";
1007 	snprintf(Tbuf, sizeof(Tbuf),  "%s=%lu", typearg, id);
1008 	args[10] = Tbuf;
1009 	args[11] = vbuf;
1010 	args[12] = NULL;
1011 
1012 	return (os_spawn(tmpath, args));
1013 }
1014 
1015 os_pid_t
os_spawn(path,argv)1016 os_spawn(path, argv)
1017 	const char *path;
1018 	char *const argv[];
1019 {
1020 	os_pid_t pid;
1021 	int status;
1022 
1023 	COMPQUIET(pid, 0);
1024 	COMPQUIET(status, 0);
1025 
1026 #ifdef HAVE_VXWORKS
1027 	fprintf(stderr, "%s: os_spawn not supported for VxWorks.\n", progname);
1028 	return (OS_BAD_PID);
1029 #elif defined(HAVE_QNX)
1030 	/*
1031 	 * For QNX, we cannot fork if we've ever used threads.  So
1032 	 * we'll use their spawn function.  We use 'spawnl' which
1033 	 * is NOT a POSIX function.
1034 	 *
1035 	 * The return value of spawnl is just what we want depending
1036 	 * on the value of the 'wait' arg.
1037 	 */
1038 	return (spawnv(P_NOWAIT, path, argv));
1039 #elif defined(DB_WIN32)
1040 	return (os_pid_t)(_spawnv(P_NOWAIT, path, argv));
1041 #else
1042 	if ((pid = fork()) != 0) {
1043 		if (pid == -1)
1044 			return (OS_BAD_PID);
1045 		return (pid);
1046 	} else {
1047 		(void)execv(path, argv);
1048 		exit(EXIT_FAILURE);
1049 	}
1050 #endif
1051 }
1052