xref: /dragonfly/sys/dev/acpica/Osd/OsdSynch.c (revision 335b9e93)
1 /*-
2  * Copyright (c) 2000 Michael Smith
3  * Copyright (c) 2000 BSDi
4  * All rights reserved.
5  *
6  * Redistribution and use in source and binary forms, with or without
7  * modification, are permitted provided that the following conditions
8  * are met:
9  * 1. Redistributions of source code must retain the above copyright
10  *    notice, this list of conditions and the following disclaimer.
11  * 2. Redistributions in binary form must reproduce the above copyright
12  *    notice, this list of conditions and the following disclaimer in the
13  *    documentation and/or other materials provided with the distribution.
14  *
15  * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND
16  * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
17  * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
18  * ARE DISCLAIMED.  IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE
19  * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
20  * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
21  * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
22  * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
23  * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
24  * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
25  * SUCH DAMAGE.
26  *
27  * $FreeBSD: src/sys/dev/acpica/Osd/OsdSynch.c,v 1.21 2004/05/05 20:07:52 njl Exp $
28  */
29 
30 /*
31  * Mutual Exclusion and Synchronisation
32  */
33 
34 #include "acpi.h"
35 #include "accommon.h"
36 
37 #include "opt_acpi.h"
38 
39 #include <sys/kernel.h>
40 #include <sys/bus.h>
41 #include <sys/malloc.h>
42 #include <sys/sysctl.h>
43 #include <sys/lock.h>
44 #include <sys/thread.h>
45 #include <sys/thread2.h>
46 #include <sys/spinlock2.h>
47 
48 #include <dev/acpica/acpivar.h>
49 
50 #define _COMPONENT	ACPI_OS_SERVICES
51 ACPI_MODULE_NAME("SYNCH")
52 
53 MALLOC_DEFINE(M_ACPISEM, "acpisem", "ACPI semaphore");
54 
55 #define AS_LOCK(as)		spin_lock(&(as)->as_spin)
56 #define AS_UNLOCK(as)		spin_unlock(&(as)->as_spin)
57 #define AS_LOCK_DECL
58 
59 /*
60  * Simple counting semaphore implemented using a mutex.  (Subsequently used
61  * in the OSI code to implement a mutex.  Go figure.)
62  */
63 struct acpi_semaphore {
64     struct	spinlock as_spin;
65     UINT32	as_units;
66     UINT32	as_maxunits;
67     UINT32	as_pendings;
68     UINT32	as_resetting;
69     UINT32	as_timeouts;
70 };
71 
72 #ifndef ACPI_NO_SEMAPHORES
73 #ifndef ACPI_SEMAPHORES_MAX_PENDING
74 #define ACPI_SEMAPHORES_MAX_PENDING	0x1FFFFFFF
75 #endif
76 static int	acpi_semaphore_debug = 0;
77 TUNABLE_INT("debug.acpi_semaphore_debug", &acpi_semaphore_debug);
78 SYSCTL_INT(_debug_acpi, OID_AUTO, semaphore_debug, CTLFLAG_RW,
79 	   &acpi_semaphore_debug, 0, "Enable ACPI semaphore debug messages");
80 #endif /* !ACPI_NO_SEMAPHORES */
81 
82 ACPI_STATUS
83 AcpiOsCreateSemaphore(UINT32 MaxUnits, UINT32 InitialUnits,
84     ACPI_HANDLE *OutHandle)
85 {
86 #ifndef ACPI_NO_SEMAPHORES
87     struct acpi_semaphore	*as;
88 
89     ACPI_FUNCTION_TRACE((char *)(uintptr_t)__func__);
90 
91     if (OutHandle == NULL)
92 	return_ACPI_STATUS (AE_BAD_PARAMETER);
93     if (InitialUnits > MaxUnits)
94 	return_ACPI_STATUS (AE_BAD_PARAMETER);
95 
96     as = kmalloc(sizeof(*as), M_ACPISEM, M_INTWAIT | M_ZERO);
97 
98     spin_init(&as->as_spin, "AcpiOsSem");
99     as->as_units = InitialUnits;
100     as->as_maxunits = MaxUnits;
101     as->as_pendings = as->as_resetting = as->as_timeouts = 0;
102 
103     ACPI_DEBUG_PRINT((ACPI_DB_MUTEX,
104 	"created semaphore %p max %d, initial %d\n",
105 	as, InitialUnits, MaxUnits));
106 
107     *OutHandle = (ACPI_HANDLE)as;
108 #else
109     *OutHandle = (ACPI_HANDLE)OutHandle;
110 #endif /* !ACPI_NO_SEMAPHORES */
111 
112     return_ACPI_STATUS (AE_OK);
113 }
114 
115 ACPI_STATUS
116 AcpiOsDeleteSemaphore(ACPI_HANDLE Handle)
117 {
118 #ifndef ACPI_NO_SEMAPHORES
119     struct acpi_semaphore *as = (struct acpi_semaphore *)Handle;
120 
121     ACPI_FUNCTION_TRACE((char *)(uintptr_t)__func__);
122 
123     ACPI_DEBUG_PRINT((ACPI_DB_MUTEX, "destroyed semaphore %p\n", as));
124     spin_uninit(&as->as_spin);
125     kfree(as, M_ACPISEM);
126 #endif /* !ACPI_NO_SEMAPHORES */
127 
128     return_ACPI_STATUS (AE_OK);
129 }
130 
131 ACPI_STATUS
132 AcpiOsWaitSemaphore(ACPI_HANDLE Handle, UINT32 Units, UINT16 Timeout)
133 {
134 #ifndef ACPI_NO_SEMAPHORES
135     ACPI_STATUS			result;
136     struct acpi_semaphore	*as = (struct acpi_semaphore *)Handle;
137     int				rv, tmo;
138     struct timeval		timeouttv, currenttv, timelefttv;
139     AS_LOCK_DECL;
140 
141     ACPI_FUNCTION_TRACE((char *)(uintptr_t)__func__);
142 
143     if (as == NULL)
144 	return_ACPI_STATUS (AE_BAD_PARAMETER);
145 
146     if (cold)
147 	return_ACPI_STATUS (AE_OK);
148 
149 #if 0
150     if (as->as_units < Units && as->as_timeouts > 10) {
151 	kprintf("%s: semaphore %p too many timeouts, resetting\n", __func__, as);
152 	AS_LOCK(as);
153 	as->as_units = as->as_maxunits;
154 	if (as->as_pendings)
155 	    as->as_resetting = 1;
156 	as->as_timeouts = 0;
157 	wakeup(as);
158 	AS_UNLOCK(as);
159 	return_ACPI_STATUS (AE_TIME);
160     }
161 
162     if (as->as_resetting)
163 	return_ACPI_STATUS (AE_TIME);
164 #endif
165 
166     /* a timeout of ACPI_WAIT_FOREVER means "forever" */
167     if (Timeout == ACPI_WAIT_FOREVER) {
168 	tmo = 0;
169 	timeouttv.tv_sec = ((0xffff/1000) + 1);	/* cf. ACPI spec */
170 	timeouttv.tv_usec = 0;
171     } else {
172 	/* compute timeout using microseconds per tick */
173 	tmo = (Timeout * 1000) / (1000000 / hz);
174 	if (tmo <= 0)
175 	    tmo = 1;
176 	timeouttv.tv_sec  = Timeout / 1000;
177 	timeouttv.tv_usec = (Timeout % 1000) * 1000;
178     }
179 
180     /* calculate timeout value in timeval */
181     getmicrouptime(&currenttv);
182     timevaladd(&timeouttv, &currenttv);
183 
184     AS_LOCK(as);
185     ACPI_DEBUG_PRINT((ACPI_DB_MUTEX,
186 	"get %d units from semaphore %p (has %d), timeout %d\n",
187 	Units, as, as->as_units, Timeout));
188     for (;;) {
189 	if (as->as_maxunits == ACPI_NO_UNIT_LIMIT) {
190 	    result = AE_OK;
191 	    break;
192 	}
193 	if (as->as_units >= Units) {
194 	    as->as_units -= Units;
195 	    result = AE_OK;
196 	    break;
197 	}
198 
199 	/* limit number of pending treads */
200 	if (as->as_pendings >= ACPI_SEMAPHORES_MAX_PENDING) {
201 	    result = AE_TIME;
202 	    break;
203 	}
204 
205 	/* if timeout values of zero is specified, return immediately */
206 	if (Timeout == 0) {
207 	    result = AE_TIME;
208 	    break;
209 	}
210 
211 	ACPI_DEBUG_PRINT((ACPI_DB_MUTEX,
212 	    "semaphore blocked, calling ssleep(%p, %p, %d, \"acsem\", %d)\n",
213 	    as, &as->as_spin, PCATCH, tmo));
214 
215 	as->as_pendings++;
216 
217 	if (acpi_semaphore_debug) {
218 	    kprintf("%s: Sleep %jd, pending %jd, semaphore %p, thread %#jx\n",
219 		__func__, (intmax_t)Timeout,
220 		(intmax_t)as->as_pendings, as,
221 		(uintmax_t)AcpiOsGetThreadId());
222 	}
223 
224 	rv = ssleep(as, &as->as_spin, PCATCH, "acsem", tmo);
225 
226 	as->as_pendings--;
227 
228 #if 0
229 	if (as->as_resetting) {
230 	    /* semaphore reset, return immediately */
231 	    if (as->as_pendings == 0) {
232 		as->as_resetting = 0;
233 	    }
234 	    result = AE_TIME;
235 	    break;
236 	}
237 #endif
238 
239 	ACPI_DEBUG_PRINT((ACPI_DB_MUTEX, "ssleep(%d) returned %d\n", tmo, rv));
240 	if (rv == EWOULDBLOCK) {
241 	    result = AE_TIME;
242 	    break;
243 	}
244 
245 	/* check if we already awaited enough */
246 	timelefttv = timeouttv;
247 	getmicrouptime(&currenttv);
248 	timevalsub(&timelefttv, &currenttv);
249 	if (timelefttv.tv_sec < 0) {
250 	    ACPI_DEBUG_PRINT((ACPI_DB_MUTEX, "await semaphore %p timeout\n",
251 		as));
252 	    result = AE_TIME;
253 	    break;
254 	}
255 
256 	/* adjust timeout for the next sleep */
257 	tmo = (timelefttv.tv_sec * 1000000 + timelefttv.tv_usec) /
258 	    (1000000 / hz);
259 	if (tmo <= 0)
260 	    tmo = 1;
261 
262 	if (acpi_semaphore_debug) {
263 	    kprintf("%s: Wakeup timeleft(%ju, %ju), tmo %ju, sem %p, thread %#jx\n",
264 		__func__,
265 		(intmax_t)timelefttv.tv_sec, (intmax_t)timelefttv.tv_usec,
266 		(intmax_t)tmo, as, (uintmax_t)AcpiOsGetThreadId());
267 	}
268     }
269 
270     if (acpi_semaphore_debug) {
271 	if (result == AE_TIME && Timeout > 0) {
272 	    kprintf("%s: Timeout %d, pending %d, semaphore %p\n",
273 		__func__, Timeout, as->as_pendings, as);
274 	}
275 	if (ACPI_SUCCESS(result) &&
276 	    (as->as_timeouts > 0 || as->as_pendings > 0))
277 	{
278 	    kprintf("%s: Acquire %d, units %d, pending %d, sem %p, thread %#jx\n",
279 		__func__, Units, as->as_units, as->as_pendings, as,
280 		(uintmax_t)AcpiOsGetThreadId());
281 	}
282     }
283 
284     if (result == AE_TIME)
285 	as->as_timeouts++;
286     else
287 	as->as_timeouts = 0;
288 
289     AS_UNLOCK(as);
290     return_ACPI_STATUS (result);
291 #else
292     return_ACPI_STATUS (AE_OK);
293 #endif /* !ACPI_NO_SEMAPHORES */
294 }
295 
296 ACPI_STATUS
297 AcpiOsSignalSemaphore(ACPI_HANDLE Handle, UINT32 Units)
298 {
299 #ifndef ACPI_NO_SEMAPHORES
300     struct acpi_semaphore	*as = (struct acpi_semaphore *)Handle;
301     AS_LOCK_DECL;
302 
303     ACPI_FUNCTION_TRACE((char *)(uintptr_t)__func__);
304 
305     if (as == NULL)
306 	return_ACPI_STATUS(AE_BAD_PARAMETER);
307 
308     AS_LOCK(as);
309     ACPI_DEBUG_PRINT((ACPI_DB_MUTEX,
310 	"return %d units to semaphore %p (has %d)\n",
311 	Units, as, as->as_units));
312     if (as->as_maxunits != ACPI_NO_UNIT_LIMIT) {
313 	as->as_units += Units;
314 	if (as->as_units > as->as_maxunits)
315 	    as->as_units = as->as_maxunits;
316     }
317 
318     if (acpi_semaphore_debug && (as->as_timeouts > 0 || as->as_pendings > 0)) {
319 	kprintf("%s: Release %d, units %d, pending %d, semaphore %p, thread %#jx\n",
320 	    __func__, Units, as->as_units, as->as_pendings, as,
321 	    (uintmax_t)AcpiOsGetThreadId());
322     }
323 
324     wakeup(as);
325     AS_UNLOCK(as);
326 #endif /* !ACPI_NO_SEMAPHORES */
327 
328     return_ACPI_STATUS (AE_OK);
329 }
330 
331 /*
332  * This represents a bit of a problem, it looks like the ACPI contrib
333  * code holds Os locks across potentially blocking system calls.  So
334  * we can't safely use spinlocks in all situations.  But any use-cases
335  * from the idle thread have to use spinlocks.
336  *
337  * For now use the spinlock for idlethread operation and the lockmgr lock
338  * otherwise.  The only thing the idlethread can issue ACPI-wise is related
339  * to cpu low power modes, hopefully this will not interfere with ACPI
340  * operations on other cpus on other threads.
341  */
342 struct acpi_spinlock {
343     struct lock lock;
344     struct spinlock slock;
345 #ifdef ACPI_DEBUG_LOCKS
346     thread_t	owner;
347     const char *func;
348     int line;
349 #endif
350 };
351 
352 ACPI_STATUS
353 AcpiOsCreateLock(ACPI_SPINLOCK *OutHandle)
354 {
355     ACPI_SPINLOCK spin;
356 
357     if (OutHandle == NULL)
358 	return (AE_BAD_PARAMETER);
359     spin = kmalloc(sizeof(*spin), M_ACPISEM, M_INTWAIT|M_ZERO);
360     spin_init(&spin->slock, "AcpiOsLock");
361     lockinit(&spin->lock, "AcpiOsLock", 0, 0);
362 #ifdef ACPI_DEBUG_LOCKS
363     spin->owner = NULL;
364     spin->func = "";
365     spin->line = 0;
366 #endif
367     *OutHandle = spin;
368     return (AE_OK);
369 }
370 
371 void
372 AcpiOsDeleteLock (ACPI_SPINLOCK Spin)
373 {
374     if (Spin == NULL)
375 	return;
376     spin_uninit(&Spin->slock);
377     lockuninit(&Spin->lock);
378     kfree(Spin, M_ACPISEM);
379 }
380 
381 /*
382  * OS-dependent locking primitives.  These routines should be able to be
383  * called from an interrupt-handler or cpu_idle thread.
384  *
385  * NB: some of ACPICA functions with locking flags, say AcpiSetRegister(),
386  * are changed to unconditionally call AcpiOsAcquireLock/AcpiOsReleaseLock.
387  */
388 ACPI_CPU_FLAGS
389 #ifdef ACPI_DEBUG_LOCKS
390 _AcpiOsAcquireLock (ACPI_SPINLOCK Spin, const char *func, int line)
391 #else
392 AcpiOsAcquireLock (ACPI_SPINLOCK Spin)
393 #endif
394 {
395     globaldata_t gd = mycpu;
396 
397     if (gd->gd_curthread == &gd->gd_idlethread) {
398 	spin_lock(&Spin->slock);
399     } else {
400 	lockmgr(&Spin->lock, LK_EXCLUSIVE);
401 	crit_enter();
402     }
403 
404 #ifdef ACPI_DEBUG_LOCKS
405     if (Spin->owner) {
406 	kprintf("%p(%s:%d): acpi_spinlock %p already held by %p(%s:%d)\n",
407 		curthread, func, line, Spin, Spin->owner, Spin->func,
408 		Spin->line);
409 	print_backtrace(-1);
410     } else {
411 	Spin->owner = curthread;
412 	Spin->func = func;
413 	Spin->line = line;
414     }
415 #endif
416     return(0);
417 }
418 
419 void
420 AcpiOsReleaseLock (ACPI_SPINLOCK Spin, ACPI_CPU_FLAGS Flags)
421 {
422 #ifdef ACPI_DEBUG_LOCKS
423     if (Flags) {
424 	if (Spin->owner != NULL) {
425 	    kprintf("%p: acpi_spinlock %p is unexectedly held by %p(%s:%d)\n",
426 		    curthread, Spin, Spin->owner, Spin->func, Spin->line);
427 	    print_backtrace(-1);
428 	} else
429 	    return;
430     }
431     Spin->owner = NULL;
432     Spin->func = "";
433     Spin->line = 0;
434 #endif
435     globaldata_t gd = mycpu;
436 
437     if (gd->gd_curthread == &gd->gd_idlethread) {
438 	spin_unlock(&Spin->slock);
439     } else {
440 	crit_exit();
441 	lockmgr(&Spin->lock, LK_RELEASE);
442     }
443 }
444 
445 /* Section 5.2.9.1:  global lock acquire/release functions */
446 #define GL_ACQUIRED	(-1)
447 #define GL_BUSY		0
448 #define GL_BIT_PENDING	0x1
449 #define GL_BIT_OWNED	0x2
450 #define GL_BIT_MASK	(GL_BIT_PENDING | GL_BIT_OWNED)
451 
452 /*
453  * Acquire the global lock.  If busy, set the pending bit.  The caller
454  * will wait for notification from the BIOS that the lock is available
455  * and then attempt to acquire it again.
456  */
457 int
458 acpi_acquire_global_lock(uint32_t *lock)
459 {
460 	uint32_t new, old;
461 
462 	do {
463 		old = *lock;
464 		new = ((old & ~GL_BIT_MASK) | GL_BIT_OWNED) |
465 			((old >> 1) & GL_BIT_PENDING);
466 	} while (atomic_cmpset_int(lock, old, new) == 0);
467 
468 	return ((new < GL_BIT_MASK) ? GL_ACQUIRED : GL_BUSY);
469 }
470 
471 /*
472  * Release the global lock, returning whether there is a waiter pending.
473  * If the BIOS set the pending bit, OSPM must notify the BIOS when it
474  * releases the lock.
475  */
476 int
477 acpi_release_global_lock(uint32_t *lock)
478 {
479 	uint32_t new, old;
480 
481 	do {
482 		old = *lock;
483 		new = old & ~GL_BIT_MASK;
484 	} while (atomic_cmpset_int(lock, old, new) == 0);
485 
486 	return (old & GL_BIT_PENDING);
487 }
488