xref: /freebsd/sys/dev/acpica/Osd/OsdSynch.c (revision aa0a1e58)
1 /*-
2  * Copyright (c) 2000 Michael Smith
3  * Copyright (c) 2000 BSDi
4  * Copyright (c) 2007-2009 Jung-uk Kim <jkim@FreeBSD.org>
5  * All rights reserved.
6  *
7  * Redistribution and use in source and binary forms, with or without
8  * modification, are permitted provided that the following conditions
9  * are met:
10  * 1. Redistributions of source code must retain the above copyright
11  *    notice, this list of conditions and the following disclaimer.
12  * 2. Redistributions in binary form must reproduce the above copyright
13  *    notice, this list of conditions and the following disclaimer in the
14  *    documentation and/or other materials provided with the distribution.
15  *
16  * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND
17  * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
18  * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
19  * ARE DISCLAIMED.  IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE
20  * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
21  * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
22  * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
23  * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
24  * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
25  * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
26  * SUCH DAMAGE.
27  */
28 
29 /*
30  * 6.1 : Mutual Exclusion and Synchronisation
31  */
32 
33 #include <sys/cdefs.h>
34 __FBSDID("$FreeBSD$");
35 
36 #include <contrib/dev/acpica/include/acpi.h>
37 #include <contrib/dev/acpica/include/accommon.h>
38 
39 #include <sys/condvar.h>
40 #include <sys/kernel.h>
41 #include <sys/lock.h>
42 #include <sys/malloc.h>
43 #include <sys/mutex.h>
44 
45 #define	_COMPONENT	ACPI_OS_SERVICES
46 ACPI_MODULE_NAME("SYNCH")
47 
48 MALLOC_DEFINE(M_ACPISEM, "acpisem", "ACPI semaphore");
49 
50 /*
51  * Convert milliseconds to ticks.
52  */
53 static int
54 timeout2hz(UINT16 Timeout)
55 {
56 	struct timeval		tv;
57 
58 	tv.tv_sec = (time_t)(Timeout / 1000);
59 	tv.tv_usec = (suseconds_t)(Timeout % 1000) * 1000;
60 
61 	return (tvtohz(&tv));
62 }
63 
64 /*
65  * ACPI_SEMAPHORE
66  */
67 struct acpi_sema {
68 	struct mtx	as_lock;
69 	char		as_name[32];
70 	struct cv	as_cv;
71 	UINT32		as_maxunits;
72 	UINT32		as_units;
73 	int		as_waiters;
74 	int		as_reset;
75 };
76 
77 ACPI_STATUS
78 AcpiOsCreateSemaphore(UINT32 MaxUnits, UINT32 InitialUnits,
79     ACPI_SEMAPHORE *OutHandle)
80 {
81 	struct acpi_sema	*as;
82 
83 	ACPI_FUNCTION_TRACE((char *)(uintptr_t)__func__);
84 
85 	if (OutHandle == NULL || MaxUnits == 0 || InitialUnits > MaxUnits)
86 		return_ACPI_STATUS (AE_BAD_PARAMETER);
87 
88 	if ((as = malloc(sizeof(*as), M_ACPISEM, M_NOWAIT | M_ZERO)) == NULL)
89 		return_ACPI_STATUS (AE_NO_MEMORY);
90 
91 	snprintf(as->as_name, sizeof(as->as_name), "ACPI sema (%p)", as);
92 	mtx_init(&as->as_lock, as->as_name, NULL, MTX_DEF);
93 	cv_init(&as->as_cv, as->as_name);
94 	as->as_maxunits = MaxUnits;
95 	as->as_units = InitialUnits;
96 
97 	*OutHandle = (ACPI_SEMAPHORE)as;
98 
99 	ACPI_DEBUG_PRINT((ACPI_DB_MUTEX, "created %s, max %u, initial %u\n",
100 	    as->as_name, MaxUnits, InitialUnits));
101 
102 	return_ACPI_STATUS (AE_OK);
103 }
104 
105 ACPI_STATUS
106 AcpiOsDeleteSemaphore(ACPI_SEMAPHORE Handle)
107 {
108 	struct acpi_sema	*as = (struct acpi_sema *)Handle;
109 
110 	ACPI_FUNCTION_TRACE((char *)(uintptr_t)__func__);
111 
112 	if (as == NULL)
113 		return_ACPI_STATUS (AE_BAD_PARAMETER);
114 
115 	mtx_lock(&as->as_lock);
116 
117 	ACPI_DEBUG_PRINT((ACPI_DB_MUTEX, "delete %s\n", as->as_name));
118 
119 	if (as->as_waiters > 0) {
120 		ACPI_DEBUG_PRINT((ACPI_DB_MUTEX,
121 		    "reset %s, units %u, waiters %d\n",
122 		    as->as_name, as->as_units, as->as_waiters));
123 		as->as_reset = 1;
124 		cv_broadcast(&as->as_cv);
125 		while (as->as_waiters > 0) {
126 			if (mtx_sleep(&as->as_reset, &as->as_lock,
127 			    PCATCH, "acsrst", hz) == EINTR) {
128 				ACPI_DEBUG_PRINT((ACPI_DB_MUTEX,
129 				    "failed to reset %s, waiters %d\n",
130 				    as->as_name, as->as_waiters));
131 				mtx_unlock(&as->as_lock);
132 				return_ACPI_STATUS (AE_ERROR);
133 			}
134 			ACPI_DEBUG_PRINT((ACPI_DB_MUTEX,
135 			    "wait %s, units %u, waiters %d\n",
136 			    as->as_name, as->as_units, as->as_waiters));
137 		}
138 	}
139 
140 	mtx_unlock(&as->as_lock);
141 
142 	mtx_destroy(&as->as_lock);
143 	cv_destroy(&as->as_cv);
144 	free(as, M_ACPISEM);
145 
146 	return_ACPI_STATUS (AE_OK);
147 }
148 
149 #define	ACPISEM_AVAIL(s, u)	((s)->as_units >= (u))
150 
151 ACPI_STATUS
152 AcpiOsWaitSemaphore(ACPI_SEMAPHORE Handle, UINT32 Units, UINT16 Timeout)
153 {
154 	struct acpi_sema	*as = (struct acpi_sema *)Handle;
155 	int			error, prevtick, slptick, tmo;
156 	ACPI_STATUS		status = AE_OK;
157 
158 	ACPI_FUNCTION_TRACE((char *)(uintptr_t)__func__);
159 
160 	if (as == NULL || Units == 0)
161 		return_ACPI_STATUS (AE_BAD_PARAMETER);
162 
163 	mtx_lock(&as->as_lock);
164 
165 	ACPI_DEBUG_PRINT((ACPI_DB_MUTEX,
166 	    "get %u unit(s) from %s, units %u, waiters %d, timeout %u\n",
167 	    Units, as->as_name, as->as_units, as->as_waiters, Timeout));
168 
169 	if (as->as_maxunits != ACPI_NO_UNIT_LIMIT && as->as_maxunits < Units) {
170 		mtx_unlock(&as->as_lock);
171 		return_ACPI_STATUS (AE_LIMIT);
172 	}
173 
174 	switch (Timeout) {
175 	case ACPI_DO_NOT_WAIT:
176 		if (!ACPISEM_AVAIL(as, Units))
177 			status = AE_TIME;
178 		break;
179 	case ACPI_WAIT_FOREVER:
180 		while (!ACPISEM_AVAIL(as, Units)) {
181 			as->as_waiters++;
182 			error = cv_wait_sig(&as->as_cv, &as->as_lock);
183 			as->as_waiters--;
184 			if (error == EINTR || as->as_reset) {
185 				status = AE_ERROR;
186 				break;
187 			}
188 		}
189 		break;
190 	default:
191 		tmo = timeout2hz(Timeout);
192 		while (!ACPISEM_AVAIL(as, Units)) {
193 			prevtick = ticks;
194 			as->as_waiters++;
195 			error = cv_timedwait_sig(&as->as_cv, &as->as_lock, tmo);
196 			as->as_waiters--;
197 			if (error == EINTR || as->as_reset) {
198 				status = AE_ERROR;
199 				break;
200 			}
201 			if (ACPISEM_AVAIL(as, Units))
202 				break;
203 			slptick = ticks - prevtick;
204 			if (slptick >= tmo || slptick < 0) {
205 				status = AE_TIME;
206 				break;
207 			}
208 			tmo -= slptick;
209 		}
210 	}
211 	if (status == AE_OK)
212 		as->as_units -= Units;
213 
214 	mtx_unlock(&as->as_lock);
215 
216 	return_ACPI_STATUS (status);
217 }
218 
219 ACPI_STATUS
220 AcpiOsSignalSemaphore(ACPI_SEMAPHORE Handle, UINT32 Units)
221 {
222 	struct acpi_sema	*as = (struct acpi_sema *)Handle;
223 	UINT32			i;
224 
225 	ACPI_FUNCTION_TRACE((char *)(uintptr_t)__func__);
226 
227 	if (as == NULL || Units == 0)
228 		return_ACPI_STATUS (AE_BAD_PARAMETER);
229 
230 	mtx_lock(&as->as_lock);
231 
232 	ACPI_DEBUG_PRINT((ACPI_DB_MUTEX,
233 	    "return %u units to %s, units %u, waiters %d\n",
234 	    Units, as->as_name, as->as_units, as->as_waiters));
235 
236 	if (as->as_maxunits != ACPI_NO_UNIT_LIMIT &&
237 	    (as->as_maxunits < Units ||
238 	    as->as_maxunits - Units < as->as_units)) {
239 		ACPI_DEBUG_PRINT((ACPI_DB_MUTEX,
240 		    "exceeded max units %u\n", as->as_maxunits));
241 		mtx_unlock(&as->as_lock);
242 		return_ACPI_STATUS (AE_LIMIT);
243 	}
244 
245 	as->as_units += Units;
246 	if (as->as_waiters > 0 && ACPISEM_AVAIL(as, Units))
247 		for (i = 0; i < Units; i++)
248 			cv_signal(&as->as_cv);
249 
250 	mtx_unlock(&as->as_lock);
251 
252 	return_ACPI_STATUS (AE_OK);
253 }
254 
255 #undef ACPISEM_AVAIL
256 
257 /*
258  * ACPI_MUTEX
259  */
260 struct acpi_mutex {
261 	struct mtx	am_lock;
262 	char		am_name[32];
263 	struct thread	*am_owner;
264 	int		am_nested;
265 	int		am_waiters;
266 	int		am_reset;
267 };
268 
269 ACPI_STATUS
270 AcpiOsCreateMutex(ACPI_MUTEX *OutHandle)
271 {
272 	struct acpi_mutex	*am;
273 
274 	ACPI_FUNCTION_TRACE((char *)(uintptr_t)__func__);
275 
276 	if (OutHandle == NULL)
277 		return_ACPI_STATUS (AE_BAD_PARAMETER);
278 
279 	if ((am = malloc(sizeof(*am), M_ACPISEM, M_NOWAIT | M_ZERO)) == NULL)
280 		return_ACPI_STATUS (AE_NO_MEMORY);
281 
282 	snprintf(am->am_name, sizeof(am->am_name), "ACPI mutex (%p)", am);
283 	mtx_init(&am->am_lock, am->am_name, NULL, MTX_DEF);
284 
285 	ACPI_DEBUG_PRINT((ACPI_DB_MUTEX, "created %s\n", am->am_name));
286 
287 	*OutHandle = (ACPI_MUTEX)am;
288 
289 	return_ACPI_STATUS (AE_OK);
290 }
291 
292 #define	ACPIMTX_AVAIL(m)	((m)->am_owner == NULL)
293 #define	ACPIMTX_OWNED(m)	((m)->am_owner == curthread)
294 
295 void
296 AcpiOsDeleteMutex(ACPI_MUTEX Handle)
297 {
298 	struct acpi_mutex	*am = (struct acpi_mutex *)Handle;
299 
300 	ACPI_FUNCTION_TRACE((char *)(uintptr_t)__func__);
301 
302 	if (am == NULL) {
303 		ACPI_DEBUG_PRINT((ACPI_DB_MUTEX, "cannot delete null mutex\n"));
304 		return_VOID;
305 	}
306 
307 	mtx_lock(&am->am_lock);
308 
309 	ACPI_DEBUG_PRINT((ACPI_DB_MUTEX, "delete %s\n", am->am_name));
310 
311 	if (am->am_waiters > 0) {
312 		ACPI_DEBUG_PRINT((ACPI_DB_MUTEX,
313 		    "reset %s, owner %p\n", am->am_name, am->am_owner));
314 		am->am_reset = 1;
315 		wakeup(am);
316 		while (am->am_waiters > 0) {
317 			if (mtx_sleep(&am->am_reset, &am->am_lock,
318 			    PCATCH, "acmrst", hz) == EINTR) {
319 				ACPI_DEBUG_PRINT((ACPI_DB_MUTEX,
320 				    "failed to reset %s, waiters %d\n",
321 				    am->am_name, am->am_waiters));
322 				mtx_unlock(&am->am_lock);
323 				return_VOID;
324 			}
325 			if (ACPIMTX_AVAIL(am))
326 				ACPI_DEBUG_PRINT((ACPI_DB_MUTEX,
327 				    "wait %s, waiters %d\n",
328 				    am->am_name, am->am_waiters));
329 			else
330 				ACPI_DEBUG_PRINT((ACPI_DB_MUTEX,
331 				    "wait %s, owner %p, waiters %d\n",
332 				    am->am_name, am->am_owner, am->am_waiters));
333 		}
334 	}
335 
336 	mtx_unlock(&am->am_lock);
337 
338 	mtx_destroy(&am->am_lock);
339 	free(am, M_ACPISEM);
340 }
341 
342 ACPI_STATUS
343 AcpiOsAcquireMutex(ACPI_MUTEX Handle, UINT16 Timeout)
344 {
345 	struct acpi_mutex	*am = (struct acpi_mutex *)Handle;
346 	int			error, prevtick, slptick, tmo;
347 	ACPI_STATUS		status = AE_OK;
348 
349 	ACPI_FUNCTION_TRACE((char *)(uintptr_t)__func__);
350 
351 	if (am == NULL)
352 		return_ACPI_STATUS (AE_BAD_PARAMETER);
353 
354 	mtx_lock(&am->am_lock);
355 
356 	ACPI_DEBUG_PRINT((ACPI_DB_MUTEX, "acquire %s\n", am->am_name));
357 
358 	if (ACPIMTX_OWNED(am)) {
359 		am->am_nested++;
360 		ACPI_DEBUG_PRINT((ACPI_DB_MUTEX,
361 		    "acquire nested %s, depth %d\n",
362 		    am->am_name, am->am_nested));
363 		mtx_unlock(&am->am_lock);
364 		return_ACPI_STATUS (AE_OK);
365 	}
366 
367 	switch (Timeout) {
368 	case ACPI_DO_NOT_WAIT:
369 		if (!ACPIMTX_AVAIL(am))
370 			status = AE_TIME;
371 		break;
372 	case ACPI_WAIT_FOREVER:
373 		while (!ACPIMTX_AVAIL(am)) {
374 			am->am_waiters++;
375 			error = mtx_sleep(am, &am->am_lock, PCATCH, "acmtx", 0);
376 			am->am_waiters--;
377 			if (error == EINTR || am->am_reset) {
378 				status = AE_ERROR;
379 				break;
380 			}
381 		}
382 		break;
383 	default:
384 		tmo = timeout2hz(Timeout);
385 		while (!ACPIMTX_AVAIL(am)) {
386 			prevtick = ticks;
387 			am->am_waiters++;
388 			error = mtx_sleep(am, &am->am_lock, PCATCH,
389 			    "acmtx", tmo);
390 			am->am_waiters--;
391 			if (error == EINTR || am->am_reset) {
392 				status = AE_ERROR;
393 				break;
394 			}
395 			if (ACPIMTX_AVAIL(am))
396 				break;
397 			slptick = ticks - prevtick;
398 			if (slptick >= tmo || slptick < 0) {
399 				status = AE_TIME;
400 				break;
401 			}
402 			tmo -= slptick;
403 		}
404 	}
405 	if (status == AE_OK)
406 		am->am_owner = curthread;
407 
408 	mtx_unlock(&am->am_lock);
409 
410 	return_ACPI_STATUS (status);
411 }
412 
413 void
414 AcpiOsReleaseMutex(ACPI_MUTEX Handle)
415 {
416 	struct acpi_mutex	*am = (struct acpi_mutex *)Handle;
417 
418 	ACPI_FUNCTION_TRACE((char *)(uintptr_t)__func__);
419 
420 	if (am == NULL) {
421 		ACPI_DEBUG_PRINT((ACPI_DB_MUTEX,
422 		    "cannot release null mutex\n"));
423 		return_VOID;
424 	}
425 
426 	mtx_lock(&am->am_lock);
427 
428 	ACPI_DEBUG_PRINT((ACPI_DB_MUTEX, "release %s\n", am->am_name));
429 
430 	if (ACPIMTX_OWNED(am)) {
431 		if (am->am_nested > 0) {
432 			ACPI_DEBUG_PRINT((ACPI_DB_MUTEX,
433 			    "release nested %s, depth %d\n",
434 			    am->am_name, am->am_nested));
435 			am->am_nested--;
436 		} else
437 			am->am_owner = NULL;
438 	} else {
439 		if (ACPIMTX_AVAIL(am))
440 			ACPI_DEBUG_PRINT((ACPI_DB_MUTEX,
441 			    "release already available %s\n", am->am_name));
442 		else
443 			ACPI_DEBUG_PRINT((ACPI_DB_MUTEX,
444 			    "release unowned %s from %p, depth %d\n",
445 			    am->am_name, am->am_owner, am->am_nested));
446 	}
447 	if (am->am_waiters > 0 && ACPIMTX_AVAIL(am))
448 		wakeup_one(am);
449 
450 	mtx_unlock(&am->am_lock);
451 }
452 
453 #undef ACPIMTX_AVAIL
454 #undef ACPIMTX_OWNED
455 
456 /*
457  * ACPI_SPINLOCK
458  */
459 struct acpi_spinlock {
460 	struct mtx	al_lock;
461 	char		al_name[32];
462 	int		al_nested;
463 };
464 
465 ACPI_STATUS
466 AcpiOsCreateLock(ACPI_SPINLOCK *OutHandle)
467 {
468 	struct acpi_spinlock	*al;
469 
470 	ACPI_FUNCTION_TRACE((char *)(uintptr_t)__func__);
471 
472 	if (OutHandle == NULL)
473 		return_ACPI_STATUS (AE_BAD_PARAMETER);
474 
475 	if ((al = malloc(sizeof(*al), M_ACPISEM, M_NOWAIT | M_ZERO)) == NULL)
476 		return_ACPI_STATUS (AE_NO_MEMORY);
477 
478 #ifdef ACPI_DEBUG
479 	if (OutHandle == &AcpiGbl_GpeLock)
480 		snprintf(al->al_name, sizeof(al->al_name), "ACPI lock (GPE)");
481 	else if (OutHandle == &AcpiGbl_HardwareLock)
482 		snprintf(al->al_name, sizeof(al->al_name), "ACPI lock (HW)");
483 	else
484 #endif
485 	snprintf(al->al_name, sizeof(al->al_name), "ACPI lock (%p)", al);
486 	mtx_init(&al->al_lock, al->al_name, NULL, MTX_SPIN);
487 
488 	ACPI_DEBUG_PRINT((ACPI_DB_MUTEX, "created %s\n", al->al_name));
489 
490 	*OutHandle = (ACPI_SPINLOCK)al;
491 
492 	return_ACPI_STATUS (AE_OK);
493 }
494 
495 void
496 AcpiOsDeleteLock(ACPI_SPINLOCK Handle)
497 {
498 	struct acpi_spinlock	*al = (struct acpi_spinlock *)Handle;
499 
500 	ACPI_FUNCTION_TRACE((char *)(uintptr_t)__func__);
501 
502 	if (al == NULL) {
503 		ACPI_DEBUG_PRINT((ACPI_DB_MUTEX,
504 		    "cannot delete null spinlock\n"));
505 		return_VOID;
506 	}
507 
508 	ACPI_DEBUG_PRINT((ACPI_DB_MUTEX, "delete %s\n", al->al_name));
509 
510 	mtx_destroy(&al->al_lock);
511 	free(al, M_ACPISEM);
512 }
513 
514 ACPI_CPU_FLAGS
515 AcpiOsAcquireLock(ACPI_SPINLOCK Handle)
516 {
517 	struct acpi_spinlock	*al = (struct acpi_spinlock *)Handle;
518 
519 	ACPI_FUNCTION_TRACE((char *)(uintptr_t)__func__);
520 
521 	if (al == NULL) {
522 		ACPI_DEBUG_PRINT((ACPI_DB_MUTEX,
523 		    "cannot acquire null spinlock\n"));
524 		return (0);
525 	}
526 
527 	ACPI_DEBUG_PRINT((ACPI_DB_MUTEX, "acquire %s\n", al->al_name));
528 
529 	if (mtx_owned(&al->al_lock)) {
530 		al->al_nested++;
531 		ACPI_DEBUG_PRINT((ACPI_DB_MUTEX,
532 		    "acquire nested %s, depth %d\n",
533 		    al->al_name, al->al_nested));
534 	} else
535 		mtx_lock_spin(&al->al_lock);
536 
537 	return (0);
538 }
539 
540 void
541 AcpiOsReleaseLock(ACPI_SPINLOCK Handle, ACPI_CPU_FLAGS Flags)
542 {
543 	struct acpi_spinlock	*al = (struct acpi_spinlock *)Handle;
544 
545 	ACPI_FUNCTION_TRACE((char *)(uintptr_t)__func__);
546 
547 	if (al == NULL) {
548 		ACPI_DEBUG_PRINT((ACPI_DB_MUTEX,
549 		    "cannot release null spinlock\n"));
550 		return_VOID;
551 	}
552 
553 	ACPI_DEBUG_PRINT((ACPI_DB_MUTEX, "release %s\n", al->al_name));
554 
555 	if (mtx_owned(&al->al_lock)) {
556 		if (al->al_nested > 0) {
557 			ACPI_DEBUG_PRINT((ACPI_DB_MUTEX,
558 			    "release nested %s, depth %d\n",
559 			    al->al_name, al->al_nested));
560 			al->al_nested--;
561 		} else
562 			mtx_unlock_spin(&al->al_lock);
563 	} else
564 		ACPI_DEBUG_PRINT((ACPI_DB_MUTEX,
565 		    "cannot release unowned %s\n", al->al_name));
566 }
567 
568 /* Section 5.2.10.1: global lock acquire/release functions */
569 #define	GL_ACQUIRED	(-1)
570 #define	GL_BUSY		0
571 #define	GL_BIT_PENDING	0x01
572 #define	GL_BIT_OWNED	0x02
573 #define	GL_BIT_MASK	(GL_BIT_PENDING | GL_BIT_OWNED)
574 
575 /*
576  * Acquire the global lock.  If busy, set the pending bit.  The caller
577  * will wait for notification from the BIOS that the lock is available
578  * and then attempt to acquire it again.
579  */
580 int
581 acpi_acquire_global_lock(uint32_t *lock)
582 {
583 	uint32_t	new, old;
584 
585 	do {
586 		old = *lock;
587 		new = ((old & ~GL_BIT_MASK) | GL_BIT_OWNED) |
588 			((old >> 1) & GL_BIT_PENDING);
589 	} while (atomic_cmpset_acq_int(lock, old, new) == 0);
590 
591 	return ((new < GL_BIT_MASK) ? GL_ACQUIRED : GL_BUSY);
592 }
593 
594 /*
595  * Release the global lock, returning whether there is a waiter pending.
596  * If the BIOS set the pending bit, OSPM must notify the BIOS when it
597  * releases the lock.
598  */
599 int
600 acpi_release_global_lock(uint32_t *lock)
601 {
602 	uint32_t	new, old;
603 
604 	do {
605 		old = *lock;
606 		new = old & ~GL_BIT_MASK;
607 	} while (atomic_cmpset_rel_int(lock, old, new) == 0);
608 
609 	return (old & GL_BIT_PENDING);
610 }
611