xref: /dragonfly/sys/dev/acpica/Osd/OsdSynch.c (revision 82730a9c)
1 /*-
2  * Copyright (c) 2000 Michael Smith
3  * Copyright (c) 2000 BSDi
4  * All rights reserved.
5  *
6  * Redistribution and use in source and binary forms, with or without
7  * modification, are permitted provided that the following conditions
8  * are met:
9  * 1. Redistributions of source code must retain the above copyright
10  *    notice, this list of conditions and the following disclaimer.
11  * 2. Redistributions in binary form must reproduce the above copyright
12  *    notice, this list of conditions and the following disclaimer in the
13  *    documentation and/or other materials provided with the distribution.
14  *
15  * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND
16  * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
17  * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
18  * ARE DISCLAIMED.  IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE
19  * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
20  * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
21  * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
22  * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
23  * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
24  * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
25  * SUCH DAMAGE.
26  *
27  * $FreeBSD: src/sys/dev/acpica/Osd/OsdSynch.c,v 1.21 2004/05/05 20:07:52 njl Exp $
28  */
29 
30 /*
31  * 6.1 : Mutual Exclusion and Synchronisation
32  */
33 
34 #include "acpi.h"
35 #include "accommon.h"
36 
37 #include "opt_acpi.h"
38 
39 #include <sys/kernel.h>
40 #include <sys/malloc.h>
41 #include <sys/sysctl.h>
42 #include <sys/lock.h>
43 #include <sys/thread.h>
44 #include <sys/thread2.h>
45 #include <sys/spinlock2.h>
46 
47 #define _COMPONENT	ACPI_OS_SERVICES
48 ACPI_MODULE_NAME("SYNCH")
49 
50 MALLOC_DEFINE(M_ACPISEM, "acpisem", "ACPI semaphore");
51 
52 #define AS_LOCK(as)		spin_lock(&(as)->as_spin)
53 #define AS_UNLOCK(as)		spin_unlock(&(as)->as_spin)
54 #define AS_LOCK_DECL
55 
56 /*
57  * Simple counting semaphore implemented using a mutex.  (Subsequently used
58  * in the OSI code to implement a mutex.  Go figure.)
59  */
60 struct acpi_semaphore {
61     struct	spinlock as_spin;
62     UINT32	as_units;
63     UINT32	as_maxunits;
64     UINT32	as_pendings;
65     UINT32	as_resetting;
66     UINT32	as_timeouts;
67 };
68 
69 #ifndef ACPI_NO_SEMAPHORES
70 #ifndef ACPI_SEMAPHORES_MAX_PENDING
71 #define ACPI_SEMAPHORES_MAX_PENDING	4
72 #endif
73 static int	acpi_semaphore_debug = 0;
74 TUNABLE_INT("debug.acpi_semaphore_debug", &acpi_semaphore_debug);
75 SYSCTL_DECL(_debug_acpi);
76 SYSCTL_INT(_debug_acpi, OID_AUTO, semaphore_debug, CTLFLAG_RW,
77 	   &acpi_semaphore_debug, 0, "Enable ACPI semaphore debug messages");
78 #endif /* !ACPI_NO_SEMAPHORES */
79 
80 ACPI_STATUS
81 AcpiOsCreateSemaphore(UINT32 MaxUnits, UINT32 InitialUnits,
82     ACPI_HANDLE *OutHandle)
83 {
84 #ifndef ACPI_NO_SEMAPHORES
85     struct acpi_semaphore	*as;
86 
87     ACPI_FUNCTION_TRACE((char *)(uintptr_t)__func__);
88 
89     if (OutHandle == NULL)
90 	return_ACPI_STATUS (AE_BAD_PARAMETER);
91     if (InitialUnits > MaxUnits)
92 	return_ACPI_STATUS (AE_BAD_PARAMETER);
93 
94     as = kmalloc(sizeof(*as), M_ACPISEM, M_INTWAIT | M_ZERO);
95 
96     spin_init(&as->as_spin);
97     as->as_units = InitialUnits;
98     as->as_maxunits = MaxUnits;
99     as->as_pendings = as->as_resetting = as->as_timeouts = 0;
100 
101     ACPI_DEBUG_PRINT((ACPI_DB_MUTEX,
102 	"created semaphore %p max %d, initial %d\n",
103 	as, InitialUnits, MaxUnits));
104 
105     *OutHandle = (ACPI_HANDLE)as;
106 #else
107     *OutHandle = (ACPI_HANDLE)OutHandle;
108 #endif /* !ACPI_NO_SEMAPHORES */
109 
110     return_ACPI_STATUS (AE_OK);
111 }
112 
113 ACPI_STATUS
114 AcpiOsDeleteSemaphore(ACPI_HANDLE Handle)
115 {
116 #ifndef ACPI_NO_SEMAPHORES
117     struct acpi_semaphore *as = (struct acpi_semaphore *)Handle;
118 
119     ACPI_FUNCTION_TRACE((char *)(uintptr_t)__func__);
120 
121     ACPI_DEBUG_PRINT((ACPI_DB_MUTEX, "destroyed semaphore %p\n", as));
122     spin_uninit(&as->as_spin);
123     kfree(as, M_ACPISEM);
124 #endif /* !ACPI_NO_SEMAPHORES */
125 
126     return_ACPI_STATUS (AE_OK);
127 }
128 
129 ACPI_STATUS
130 AcpiOsWaitSemaphore(ACPI_HANDLE Handle, UINT32 Units, UINT16 Timeout)
131 {
132 #ifndef ACPI_NO_SEMAPHORES
133     ACPI_STATUS			result;
134     struct acpi_semaphore	*as = (struct acpi_semaphore *)Handle;
135     int				rv, tmo;
136     struct timeval		timeouttv, currenttv, timelefttv;
137     AS_LOCK_DECL;
138 
139     ACPI_FUNCTION_TRACE((char *)(uintptr_t)__func__);
140 
141     if (as == NULL)
142 	return_ACPI_STATUS (AE_BAD_PARAMETER);
143 
144     if (cold)
145 	return_ACPI_STATUS (AE_OK);
146 
147 #if 0
148     if (as->as_units < Units && as->as_timeouts > 10) {
149 	kprintf("%s: semaphore %p too many timeouts, resetting\n", __func__, as);
150 	AS_LOCK(as);
151 	as->as_units = as->as_maxunits;
152 	if (as->as_pendings)
153 	    as->as_resetting = 1;
154 	as->as_timeouts = 0;
155 	wakeup(as);
156 	AS_UNLOCK(as);
157 	return_ACPI_STATUS (AE_TIME);
158     }
159 
160     if (as->as_resetting)
161 	return_ACPI_STATUS (AE_TIME);
162 #endif
163 
164     /* a timeout of ACPI_WAIT_FOREVER means "forever" */
165     if (Timeout == ACPI_WAIT_FOREVER) {
166 	tmo = 0;
167 	timeouttv.tv_sec = ((0xffff/1000) + 1);	/* cf. ACPI spec */
168 	timeouttv.tv_usec = 0;
169     } else {
170 	/* compute timeout using microseconds per tick */
171 	tmo = (Timeout * 1000) / (1000000 / hz);
172 	if (tmo <= 0)
173 	    tmo = 1;
174 	timeouttv.tv_sec  = Timeout / 1000;
175 	timeouttv.tv_usec = (Timeout % 1000) * 1000;
176     }
177 
178     /* calculate timeout value in timeval */
179     getmicrouptime(&currenttv);
180     timevaladd(&timeouttv, &currenttv);
181 
182     AS_LOCK(as);
183     ACPI_DEBUG_PRINT((ACPI_DB_MUTEX,
184 	"get %d units from semaphore %p (has %d), timeout %d\n",
185 	Units, as, as->as_units, Timeout));
186     for (;;) {
187 	if (as->as_maxunits == ACPI_NO_UNIT_LIMIT) {
188 	    result = AE_OK;
189 	    break;
190 	}
191 	if (as->as_units >= Units) {
192 	    as->as_units -= Units;
193 	    result = AE_OK;
194 	    break;
195 	}
196 
197 	/* limit number of pending treads */
198 	if (as->as_pendings >= ACPI_SEMAPHORES_MAX_PENDING) {
199 	    result = AE_TIME;
200 	    break;
201 	}
202 
203 	/* if timeout values of zero is specified, return immediately */
204 	if (Timeout == 0) {
205 	    result = AE_TIME;
206 	    break;
207 	}
208 
209 	ACPI_DEBUG_PRINT((ACPI_DB_MUTEX,
210 	    "semaphore blocked, calling ssleep(%p, %p, %d, \"acsem\", %d)\n",
211 	    as, &as->as_spin, PCATCH, tmo));
212 
213 	as->as_pendings++;
214 
215 	if (acpi_semaphore_debug) {
216 	    kprintf("%s: Sleep %jd, pending %jd, semaphore %p, thread %jd\n",
217 		__func__, (intmax_t)Timeout,
218 		(intmax_t)as->as_pendings, as,
219 		(intmax_t)AcpiOsGetThreadId());
220 	}
221 
222 	rv = ssleep(as, &as->as_spin, PCATCH, "acsem", tmo);
223 
224 	as->as_pendings--;
225 
226 #if 0
227 	if (as->as_resetting) {
228 	    /* semaphore reset, return immediately */
229 	    if (as->as_pendings == 0) {
230 		as->as_resetting = 0;
231 	    }
232 	    result = AE_TIME;
233 	    break;
234 	}
235 #endif
236 
237 	ACPI_DEBUG_PRINT((ACPI_DB_MUTEX, "ssleep(%d) returned %d\n", tmo, rv));
238 	if (rv == EWOULDBLOCK) {
239 	    result = AE_TIME;
240 	    break;
241 	}
242 
243 	/* check if we already awaited enough */
244 	timelefttv = timeouttv;
245 	getmicrouptime(&currenttv);
246 	timevalsub(&timelefttv, &currenttv);
247 	if (timelefttv.tv_sec < 0) {
248 	    ACPI_DEBUG_PRINT((ACPI_DB_MUTEX, "await semaphore %p timeout\n",
249 		as));
250 	    result = AE_TIME;
251 	    break;
252 	}
253 
254 	/* adjust timeout for the next sleep */
255 	tmo = (timelefttv.tv_sec * 1000000 + timelefttv.tv_usec) /
256 	    (1000000 / hz);
257 	if (tmo <= 0)
258 	    tmo = 1;
259 
260 	if (acpi_semaphore_debug) {
261 	    kprintf("%s: Wakeup timeleft(%ju, %ju), tmo %ju, sem %p, thread %jd\n",
262 		__func__,
263 		(intmax_t)timelefttv.tv_sec, (intmax_t)timelefttv.tv_usec,
264 		(intmax_t)tmo, as, (intmax_t)AcpiOsGetThreadId());
265 	}
266     }
267 
268     if (acpi_semaphore_debug) {
269 	if (result == AE_TIME && Timeout > 0) {
270 	    kprintf("%s: Timeout %d, pending %d, semaphore %p\n",
271 		__func__, Timeout, as->as_pendings, as);
272 	}
273 	if (result == AE_OK && (as->as_timeouts > 0 || as->as_pendings > 0)) {
274 	    kprintf("%s: Acquire %d, units %d, pending %d, sem %p, thread %jd\n",
275 		__func__, Units, as->as_units, as->as_pendings, as,
276 		(intmax_t)AcpiOsGetThreadId());
277 	}
278     }
279 
280     if (result == AE_TIME)
281 	as->as_timeouts++;
282     else
283 	as->as_timeouts = 0;
284 
285     AS_UNLOCK(as);
286     return_ACPI_STATUS (result);
287 #else
288     return_ACPI_STATUS (AE_OK);
289 #endif /* !ACPI_NO_SEMAPHORES */
290 }
291 
292 ACPI_STATUS
293 AcpiOsSignalSemaphore(ACPI_HANDLE Handle, UINT32 Units)
294 {
295 #ifndef ACPI_NO_SEMAPHORES
296     struct acpi_semaphore	*as = (struct acpi_semaphore *)Handle;
297     AS_LOCK_DECL;
298 
299     ACPI_FUNCTION_TRACE((char *)(uintptr_t)__func__);
300 
301     if (as == NULL)
302 	return_ACPI_STATUS(AE_BAD_PARAMETER);
303 
304     AS_LOCK(as);
305     ACPI_DEBUG_PRINT((ACPI_DB_MUTEX,
306 	"return %d units to semaphore %p (has %d)\n",
307 	Units, as, as->as_units));
308     if (as->as_maxunits != ACPI_NO_UNIT_LIMIT) {
309 	as->as_units += Units;
310 	if (as->as_units > as->as_maxunits)
311 	    as->as_units = as->as_maxunits;
312     }
313 
314     if (acpi_semaphore_debug && (as->as_timeouts > 0 || as->as_pendings > 0)) {
315 	kprintf("%s: Release %d, units %d, pending %d, semaphore %p, thread %jd\n",
316 	    __func__, Units, as->as_units, as->as_pendings, as,
317 	    (intmax_t)AcpiOsGetThreadId());
318     }
319 
320     wakeup(as);
321     AS_UNLOCK(as);
322 #endif /* !ACPI_NO_SEMAPHORES */
323 
324     return_ACPI_STATUS (AE_OK);
325 }
326 
327 struct acpi_spinlock {
328     struct spinlock lock;
329 #ifdef ACPI_DEBUG_LOCKS
330     thread_t	owner;
331     const char *func;
332     int line;
333 #endif
334 };
335 
336 ACPI_STATUS
337 AcpiOsCreateLock(ACPI_SPINLOCK *OutHandle)
338 {
339     ACPI_SPINLOCK spin;
340 
341     if (OutHandle == NULL)
342 	return (AE_BAD_PARAMETER);
343     spin = kmalloc(sizeof(*spin), M_ACPISEM, M_INTWAIT|M_ZERO);
344     spin_init(&spin->lock);
345 #ifdef ACPI_DEBUG_LOCKS
346     spin->owner = NULL;
347     spin->func = "";
348     spin->line = 0;
349 #endif
350     *OutHandle = spin;
351     return (AE_OK);
352 }
353 
354 void
355 AcpiOsDeleteLock (ACPI_SPINLOCK Spin)
356 {
357     if (Spin == NULL)
358 	return;
359     spin_uninit(&Spin->lock);
360     kfree(Spin, M_ACPISEM);
361 }
362 
363 /*
364  * OS-dependent locking primitives.  These routines should be able to be
365  * called from an interrupt-handler or cpu_idle thread.
366  *
367  * NB: some of ACPI-CA functions with locking flags, say AcpiSetRegister(),
368  * are changed to unconditionally call AcpiOsAcquireLock/AcpiOsReleaseLock.
369  */
370 ACPI_CPU_FLAGS
371 #ifdef ACPI_DEBUG_LOCKS
372 _AcpiOsAcquireLock (ACPI_SPINLOCK Spin, const char *func, int line)
373 #else
374 AcpiOsAcquireLock (ACPI_SPINLOCK Spin)
375 #endif
376 {
377     spin_lock(&Spin->lock);
378 
379 #ifdef ACPI_DEBUG_LOCKS
380     if (Spin->owner) {
381 	kprintf("%p(%s:%d): acpi_spinlock %p already held by %p(%s:%d)\n",
382 		curthread, func, line, Spin, Spin->owner, Spin->func,
383 		Spin->line);
384 	print_backtrace(-1);
385     } else {
386 	Spin->owner = curthread;
387 	Spin->func = func;
388 	Spin->line = line;
389     }
390 #endif
391     return(0);
392 }
393 
394 void
395 AcpiOsReleaseLock (ACPI_SPINLOCK Spin, ACPI_CPU_FLAGS Flags)
396 {
397 #ifdef ACPI_DEBUG_LOCKS
398     if (Flags) {
399 	if (Spin->owner != NULL) {
400 	    kprintf("%p: acpi_spinlock %p is unexectedly held by %p(%s:%d)\n",
401 		    curthread, Spin, Spin->owner, Spin->func, Spin->line);
402 	    print_backtrace(-1);
403 	} else
404 	    return;
405     }
406     Spin->owner = NULL;
407     Spin->func = "";
408     Spin->line = 0;
409 #endif
410     spin_unlock(&Spin->lock);
411 }
412 
413 /* Section 5.2.9.1:  global lock acquire/release functions */
414 #define GL_ACQUIRED	(-1)
415 #define GL_BUSY		0
416 #define GL_BIT_PENDING	0x1
417 #define GL_BIT_OWNED	0x2
418 #define GL_BIT_MASK	(GL_BIT_PENDING | GL_BIT_OWNED)
419 
420 /*
421  * Acquire the global lock.  If busy, set the pending bit.  The caller
422  * will wait for notification from the BIOS that the lock is available
423  * and then attempt to acquire it again.
424  */
425 int
426 acpi_acquire_global_lock(uint32_t *lock)
427 {
428 	uint32_t new, old;
429 
430 	do {
431 		old = *lock;
432 		new = ((old & ~GL_BIT_MASK) | GL_BIT_OWNED) |
433 			((old >> 1) & GL_BIT_PENDING);
434 	} while (atomic_cmpset_int(lock, old, new) == 0);
435 
436 	return ((new < GL_BIT_MASK) ? GL_ACQUIRED : GL_BUSY);
437 }
438 
439 /*
440  * Release the global lock, returning whether there is a waiter pending.
441  * If the BIOS set the pending bit, OSPM must notify the BIOS when it
442  * releases the lock.
443  */
444 int
445 acpi_release_global_lock(uint32_t *lock)
446 {
447 	uint32_t new, old;
448 
449 	do {
450 		old = *lock;
451 		new = old & ~GL_BIT_MASK;
452 	} while (atomic_cmpset_int(lock, old, new) == 0);
453 
454 	return (old & GL_BIT_PENDING);
455 }
456