xref: /dragonfly/sys/dev/acpica/Osd/OsdSynch.c (revision 7d3e9a5b)
1 /*-
2  * Copyright (c) 2000 Michael Smith
3  * Copyright (c) 2000 BSDi
4  * All rights reserved.
5  *
6  * Redistribution and use in source and binary forms, with or without
7  * modification, are permitted provided that the following conditions
8  * are met:
9  * 1. Redistributions of source code must retain the above copyright
10  *    notice, this list of conditions and the following disclaimer.
11  * 2. Redistributions in binary form must reproduce the above copyright
12  *    notice, this list of conditions and the following disclaimer in the
13  *    documentation and/or other materials provided with the distribution.
14  *
15  * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND
16  * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
17  * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
18  * ARE DISCLAIMED.  IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE
19  * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
20  * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
21  * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
22  * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
23  * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
24  * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
25  * SUCH DAMAGE.
26  *
27  * $FreeBSD: src/sys/dev/acpica/Osd/OsdSynch.c,v 1.21 2004/05/05 20:07:52 njl Exp $
28  */
29 
30 /*
31  * Mutual Exclusion and Synchronisation
32  */
33 
34 #include "acpi.h"
35 #include "accommon.h"
36 
37 #include "opt_acpi.h"
38 
39 #include <sys/kernel.h>
40 #include <sys/bus.h>
41 #include <sys/malloc.h>
42 #include <sys/sysctl.h>
43 #include <sys/lock.h>
44 #include <sys/thread.h>
45 #include <sys/thread2.h>
46 #include <sys/spinlock2.h>
47 
48 #include <dev/acpica/acpivar.h>
49 
50 #define _COMPONENT	ACPI_OS_SERVICES
51 ACPI_MODULE_NAME("SYNCH")
52 
53 MALLOC_DEFINE(M_ACPISEM, "acpisem", "ACPI semaphore");
54 
55 #define AS_LOCK(as)		spin_lock(&(as)->as_spin)
56 #define AS_UNLOCK(as)		spin_unlock(&(as)->as_spin)
57 #define AS_LOCK_DECL
58 
59 /*
60  * Simple counting semaphore implemented using a mutex.  (Subsequently used
61  * in the OSI code to implement a mutex.  Go figure.)
62  */
63 struct acpi_semaphore {
64     struct	spinlock as_spin;
65     UINT32	as_units;
66     UINT32	as_maxunits;
67     UINT32	as_pendings;
68     UINT32	as_resetting;
69     UINT32	as_timeouts;
70 };
71 
72 #ifndef ACPI_SEMAPHORES_MAX_PENDING
73 #define ACPI_SEMAPHORES_MAX_PENDING	0x1FFFFFFF
74 #endif
75 static int	acpi_semaphore_debug = 0;
76 TUNABLE_INT("debug.acpi_semaphore_debug", &acpi_semaphore_debug);
77 SYSCTL_INT(_debug_acpi, OID_AUTO, semaphore_debug, CTLFLAG_RW,
78 	   &acpi_semaphore_debug, 0, "Enable ACPI semaphore debug messages");
79 
80 ACPI_STATUS
81 AcpiOsCreateSemaphore(UINT32 MaxUnits, UINT32 InitialUnits,
82     ACPI_HANDLE *OutHandle)
83 {
84     struct acpi_semaphore	*as;
85 
86     ACPI_FUNCTION_TRACE((char *)(uintptr_t)__func__);
87 
88     if (OutHandle == NULL)
89 	return_ACPI_STATUS (AE_BAD_PARAMETER);
90     if (InitialUnits > MaxUnits)
91 	return_ACPI_STATUS (AE_BAD_PARAMETER);
92 
93     as = kmalloc(sizeof(*as), M_ACPISEM, M_INTWAIT | M_ZERO);
94 
95     spin_init(&as->as_spin, "AcpiOsSem");
96     as->as_units = InitialUnits;
97     as->as_maxunits = MaxUnits;
98     as->as_pendings = as->as_resetting = as->as_timeouts = 0;
99 
100     ACPI_DEBUG_PRINT((ACPI_DB_MUTEX,
101 	"created semaphore %p max %d, initial %d\n",
102 	as, InitialUnits, MaxUnits));
103 
104     *OutHandle = (ACPI_HANDLE)as;
105 
106     return_ACPI_STATUS (AE_OK);
107 }
108 
109 ACPI_STATUS
110 AcpiOsDeleteSemaphore(ACPI_HANDLE Handle)
111 {
112     struct acpi_semaphore *as = (struct acpi_semaphore *)Handle;
113 
114     ACPI_FUNCTION_TRACE((char *)(uintptr_t)__func__);
115 
116     ACPI_DEBUG_PRINT((ACPI_DB_MUTEX, "destroyed semaphore %p\n", as));
117     spin_uninit(&as->as_spin);
118     kfree(as, M_ACPISEM);
119 
120     return_ACPI_STATUS (AE_OK);
121 }
122 
123 ACPI_STATUS
124 AcpiOsWaitSemaphore(ACPI_HANDLE Handle, UINT32 Units, UINT16 Timeout)
125 {
126     ACPI_STATUS			result;
127     struct acpi_semaphore	*as = (struct acpi_semaphore *)Handle;
128     int				rv, tmo;
129     struct timeval		timeouttv, currenttv, timelefttv;
130     AS_LOCK_DECL;
131 
132     ACPI_FUNCTION_TRACE((char *)(uintptr_t)__func__);
133 
134     if (as == NULL)
135 	return_ACPI_STATUS (AE_BAD_PARAMETER);
136 
137     if (cold)
138 	return_ACPI_STATUS (AE_OK);
139 
140 #if 0
141     if (as->as_units < Units && as->as_timeouts > 10) {
142 	kprintf("%s: semaphore %p too many timeouts, resetting\n", __func__, as);
143 	AS_LOCK(as);
144 	as->as_units = as->as_maxunits;
145 	if (as->as_pendings)
146 	    as->as_resetting = 1;
147 	as->as_timeouts = 0;
148 	wakeup(as);
149 	AS_UNLOCK(as);
150 	return_ACPI_STATUS (AE_TIME);
151     }
152 
153     if (as->as_resetting)
154 	return_ACPI_STATUS (AE_TIME);
155 #endif
156 
157     /* a timeout of ACPI_WAIT_FOREVER means "forever" */
158     if (Timeout == ACPI_WAIT_FOREVER) {
159 	tmo = 0;
160 	timeouttv.tv_sec = ((0xffff/1000) + 1);	/* cf. ACPI spec */
161 	timeouttv.tv_usec = 0;
162     } else {
163 	/* compute timeout using microseconds per tick */
164 	tmo = (Timeout * 1000) / (1000000 / hz);
165 	if (tmo <= 0)
166 	    tmo = 1;
167 	timeouttv.tv_sec  = Timeout / 1000;
168 	timeouttv.tv_usec = (Timeout % 1000) * 1000;
169     }
170 
171     /* calculate timeout value in timeval */
172     getmicrouptime(&currenttv);
173     timevaladd(&timeouttv, &currenttv);
174 
175     AS_LOCK(as);
176     ACPI_DEBUG_PRINT((ACPI_DB_MUTEX,
177 	"get %d units from semaphore %p (has %d), timeout %d\n",
178 	Units, as, as->as_units, Timeout));
179     for (;;) {
180 	if (as->as_maxunits == ACPI_NO_UNIT_LIMIT) {
181 	    result = AE_OK;
182 	    break;
183 	}
184 	if (as->as_units >= Units) {
185 	    as->as_units -= Units;
186 	    result = AE_OK;
187 	    break;
188 	}
189 
190 	/* limit number of pending treads */
191 	if (as->as_pendings >= ACPI_SEMAPHORES_MAX_PENDING) {
192 	    result = AE_TIME;
193 	    break;
194 	}
195 
196 	/* if timeout values of zero is specified, return immediately */
197 	if (Timeout == 0) {
198 	    result = AE_TIME;
199 	    break;
200 	}
201 
202 	ACPI_DEBUG_PRINT((ACPI_DB_MUTEX,
203 	    "semaphore blocked, calling ssleep(%p, %p, %d, \"acsem\", %d)\n",
204 	    as, &as->as_spin, PCATCH, tmo));
205 
206 	as->as_pendings++;
207 
208 	if (acpi_semaphore_debug) {
209 	    kprintf("%s: Sleep %jd, pending %jd, semaphore %p, thread %#jx\n",
210 		__func__, (intmax_t)Timeout,
211 		(intmax_t)as->as_pendings, as,
212 		(uintmax_t)AcpiOsGetThreadId());
213 	}
214 
215 	rv = ssleep(as, &as->as_spin, PCATCH, "acsem", tmo);
216 
217 	as->as_pendings--;
218 
219 #if 0
220 	if (as->as_resetting) {
221 	    /* semaphore reset, return immediately */
222 	    if (as->as_pendings == 0) {
223 		as->as_resetting = 0;
224 	    }
225 	    result = AE_TIME;
226 	    break;
227 	}
228 #endif
229 
230 	ACPI_DEBUG_PRINT((ACPI_DB_MUTEX, "ssleep(%d) returned %d\n", tmo, rv));
231 	if (rv == EWOULDBLOCK) {
232 	    result = AE_TIME;
233 	    break;
234 	}
235 
236 	/* check if we already awaited enough */
237 	timelefttv = timeouttv;
238 	getmicrouptime(&currenttv);
239 	timevalsub(&timelefttv, &currenttv);
240 	if (timelefttv.tv_sec < 0) {
241 	    ACPI_DEBUG_PRINT((ACPI_DB_MUTEX, "await semaphore %p timeout\n",
242 		as));
243 	    result = AE_TIME;
244 	    break;
245 	}
246 
247 	/* adjust timeout for the next sleep */
248 	tmo = (timelefttv.tv_sec * 1000000 + timelefttv.tv_usec) /
249 	    (1000000 / hz);
250 	if (tmo <= 0)
251 	    tmo = 1;
252 
253 	if (acpi_semaphore_debug) {
254 	    kprintf("%s: Wakeup timeleft(%ju, %ju), tmo %ju, sem %p, thread %#jx\n",
255 		__func__,
256 		(intmax_t)timelefttv.tv_sec, (intmax_t)timelefttv.tv_usec,
257 		(intmax_t)tmo, as, (uintmax_t)AcpiOsGetThreadId());
258 	}
259     }
260 
261     if (acpi_semaphore_debug) {
262 	if (result == AE_TIME && Timeout > 0) {
263 	    kprintf("%s: Timeout %d, pending %d, semaphore %p\n",
264 		__func__, Timeout, as->as_pendings, as);
265 	}
266 	if (ACPI_SUCCESS(result) &&
267 	    (as->as_timeouts > 0 || as->as_pendings > 0))
268 	{
269 	    kprintf("%s: Acquire %d, units %d, pending %d, sem %p, thread %#jx\n",
270 		__func__, Units, as->as_units, as->as_pendings, as,
271 		(uintmax_t)AcpiOsGetThreadId());
272 	}
273     }
274 
275     if (result == AE_TIME)
276 	as->as_timeouts++;
277     else
278 	as->as_timeouts = 0;
279 
280     AS_UNLOCK(as);
281     return_ACPI_STATUS (result);
282 }
283 
284 ACPI_STATUS
285 AcpiOsSignalSemaphore(ACPI_HANDLE Handle, UINT32 Units)
286 {
287     struct acpi_semaphore	*as = (struct acpi_semaphore *)Handle;
288     AS_LOCK_DECL;
289 
290     ACPI_FUNCTION_TRACE((char *)(uintptr_t)__func__);
291 
292     if (as == NULL)
293 	return_ACPI_STATUS(AE_BAD_PARAMETER);
294 
295     AS_LOCK(as);
296     ACPI_DEBUG_PRINT((ACPI_DB_MUTEX,
297 	"return %d units to semaphore %p (has %d)\n",
298 	Units, as, as->as_units));
299     if (as->as_maxunits != ACPI_NO_UNIT_LIMIT) {
300 	as->as_units += Units;
301 	if (as->as_units > as->as_maxunits)
302 	    as->as_units = as->as_maxunits;
303     }
304 
305     if (acpi_semaphore_debug && (as->as_timeouts > 0 || as->as_pendings > 0)) {
306 	kprintf("%s: Release %d, units %d, pending %d, semaphore %p, thread %#jx\n",
307 	    __func__, Units, as->as_units, as->as_pendings, as,
308 	    (uintmax_t)AcpiOsGetThreadId());
309     }
310 
311     wakeup(as);
312     AS_UNLOCK(as);
313 
314     return_ACPI_STATUS (AE_OK);
315 }
316 
317 /*
318  * This represents a bit of a problem, it looks like the ACPI contrib
319  * code holds Os locks across potentially blocking system calls.  So
320  * we can't safely use spinlocks in all situations.  But any use-cases
321  * from the idle thread have to use spinlocks.
322  *
323  * For now use the spinlock for idlethread operation and the lockmgr lock
324  * otherwise.  The only thing the idlethread can issue ACPI-wise is related
325  * to cpu low power modes, hopefully this will not interfere with ACPI
326  * operations on other cpus on other threads.
327  */
328 struct acpi_spinlock {
329     struct lock lock;
330     struct spinlock slock;
331 #ifdef ACPI_DEBUG_LOCKS
332     thread_t	owner;
333     const char *func;
334     int line;
335 #endif
336 };
337 
338 ACPI_STATUS
339 AcpiOsCreateLock(ACPI_SPINLOCK *OutHandle)
340 {
341     ACPI_SPINLOCK spin;
342 
343     if (OutHandle == NULL)
344 	return (AE_BAD_PARAMETER);
345     spin = kmalloc(sizeof(*spin), M_ACPISEM, M_INTWAIT|M_ZERO);
346     spin_init(&spin->slock, "AcpiOsLock");
347     lockinit(&spin->lock, "AcpiOsLock", 0, 0);
348 #ifdef ACPI_DEBUG_LOCKS
349     spin->owner = NULL;
350     spin->func = "";
351     spin->line = 0;
352 #endif
353     *OutHandle = spin;
354     return (AE_OK);
355 }
356 
357 void
358 AcpiOsDeleteLock (ACPI_SPINLOCK Spin)
359 {
360     if (Spin == NULL)
361 	return;
362     spin_uninit(&Spin->slock);
363     lockuninit(&Spin->lock);
364     kfree(Spin, M_ACPISEM);
365 }
366 
367 /*
368  * OS-dependent locking primitives.  These routines should be able to be
369  * called from an interrupt-handler or cpu_idle thread.
370  *
371  * NB: some of ACPICA functions with locking flags, say AcpiSetRegister(),
372  * are changed to unconditionally call AcpiOsAcquireLock/AcpiOsReleaseLock.
373  */
374 ACPI_CPU_FLAGS
375 #ifdef ACPI_DEBUG_LOCKS
376 _AcpiOsAcquireLock (ACPI_SPINLOCK Spin, const char *func, int line)
377 #else
378 AcpiOsAcquireLock (ACPI_SPINLOCK Spin)
379 #endif
380 {
381     globaldata_t gd = mycpu;
382 
383     if (gd->gd_curthread == &gd->gd_idlethread) {
384 	spin_lock(&Spin->slock);
385     } else {
386 	lockmgr(&Spin->lock, LK_EXCLUSIVE);
387 	crit_enter();
388     }
389 
390 #ifdef ACPI_DEBUG_LOCKS
391     if (Spin->owner) {
392 	kprintf("%p(%s:%d): acpi_spinlock %p already held by %p(%s:%d)\n",
393 		curthread, func, line, Spin, Spin->owner, Spin->func,
394 		Spin->line);
395 	print_backtrace(-1);
396     } else {
397 	Spin->owner = curthread;
398 	Spin->func = func;
399 	Spin->line = line;
400     }
401 #endif
402     return(0);
403 }
404 
405 void
406 AcpiOsReleaseLock (ACPI_SPINLOCK Spin, ACPI_CPU_FLAGS Flags)
407 {
408 #ifdef ACPI_DEBUG_LOCKS
409     if (Flags) {
410 	if (Spin->owner != NULL) {
411 	    kprintf("%p: acpi_spinlock %p is unexectedly held by %p(%s:%d)\n",
412 		    curthread, Spin, Spin->owner, Spin->func, Spin->line);
413 	    print_backtrace(-1);
414 	} else
415 	    return;
416     }
417     Spin->owner = NULL;
418     Spin->func = "";
419     Spin->line = 0;
420 #endif
421     globaldata_t gd = mycpu;
422 
423     if (gd->gd_curthread == &gd->gd_idlethread) {
424 	spin_unlock(&Spin->slock);
425     } else {
426 	crit_exit();
427 	lockmgr(&Spin->lock, LK_RELEASE);
428     }
429 }
430 
431 /* Section 5.2.9.1:  global lock acquire/release functions */
432 #define GL_ACQUIRED	(-1)
433 #define GL_BUSY		0
434 #define GL_BIT_PENDING	0x1
435 #define GL_BIT_OWNED	0x2
436 #define GL_BIT_MASK	(GL_BIT_PENDING | GL_BIT_OWNED)
437 
438 /*
439  * Acquire the global lock.  If busy, set the pending bit.  The caller
440  * will wait for notification from the BIOS that the lock is available
441  * and then attempt to acquire it again.
442  */
443 int
444 acpi_acquire_global_lock(uint32_t *lock)
445 {
446 	uint32_t new, old;
447 
448 	do {
449 		old = *lock;
450 		new = ((old & ~GL_BIT_MASK) | GL_BIT_OWNED) |
451 			((old >> 1) & GL_BIT_PENDING);
452 	} while (atomic_cmpset_int(lock, old, new) == 0);
453 
454 	return ((new < GL_BIT_MASK) ? GL_ACQUIRED : GL_BUSY);
455 }
456 
457 /*
458  * Release the global lock, returning whether there is a waiter pending.
459  * If the BIOS set the pending bit, OSPM must notify the BIOS when it
460  * releases the lock.
461  */
462 int
463 acpi_release_global_lock(uint32_t *lock)
464 {
465 	uint32_t new, old;
466 
467 	do {
468 		old = *lock;
469 		new = old & ~GL_BIT_MASK;
470 	} while (atomic_cmpset_int(lock, old, new) == 0);
471 
472 	return (old & GL_BIT_PENDING);
473 }
474