1 /*- 2 * Copyright (c) 2000 Michael Smith 3 * Copyright (c) 2000 BSDi 4 * All rights reserved. 5 * 6 * Redistribution and use in source and binary forms, with or without 7 * modification, are permitted provided that the following conditions 8 * are met: 9 * 1. Redistributions of source code must retain the above copyright 10 * notice, this list of conditions and the following disclaimer. 11 * 2. Redistributions in binary form must reproduce the above copyright 12 * notice, this list of conditions and the following disclaimer in the 13 * documentation and/or other materials provided with the distribution. 14 * 15 * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND 16 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE 17 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE 18 * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE 19 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL 20 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS 21 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) 22 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT 23 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY 24 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF 25 * SUCH DAMAGE. 26 * 27 * $FreeBSD: src/sys/dev/acpica/Osd/OsdSynch.c,v 1.21 2004/05/05 20:07:52 njl Exp $ 28 */ 29 30 /* 31 * Mutual Exclusion and Synchronisation 32 */ 33 34 #include "acpi.h" 35 #include "accommon.h" 36 37 #include "opt_acpi.h" 38 39 #include <sys/kernel.h> 40 #include <sys/bus.h> 41 #include <sys/malloc.h> 42 #include <sys/sysctl.h> 43 #include <sys/lock.h> 44 #include <sys/thread.h> 45 #include <sys/thread2.h> 46 #include <sys/spinlock2.h> 47 48 #include <dev/acpica/acpivar.h> 49 50 #define _COMPONENT ACPI_OS_SERVICES 51 ACPI_MODULE_NAME("SYNCH") 52 53 MALLOC_DEFINE(M_ACPISEM, "acpisem", "ACPI semaphore"); 54 55 #define AS_LOCK(as) spin_lock(&(as)->as_spin) 56 #define AS_UNLOCK(as) spin_unlock(&(as)->as_spin) 57 #define AS_LOCK_DECL 58 59 /* 60 * Simple counting semaphore implemented using a mutex. (Subsequently used 61 * in the OSI code to implement a mutex. Go figure.) 62 */ 63 struct acpi_semaphore { 64 struct spinlock as_spin; 65 UINT32 as_units; 66 UINT32 as_maxunits; 67 UINT32 as_pendings; 68 UINT32 as_resetting; 69 UINT32 as_timeouts; 70 }; 71 72 #ifndef ACPI_NO_SEMAPHORES 73 #ifndef ACPI_SEMAPHORES_MAX_PENDING 74 #define ACPI_SEMAPHORES_MAX_PENDING 0x1FFFFFFF 75 #endif 76 static int acpi_semaphore_debug = 0; 77 TUNABLE_INT("debug.acpi_semaphore_debug", &acpi_semaphore_debug); 78 SYSCTL_INT(_debug_acpi, OID_AUTO, semaphore_debug, CTLFLAG_RW, 79 &acpi_semaphore_debug, 0, "Enable ACPI semaphore debug messages"); 80 #endif /* !ACPI_NO_SEMAPHORES */ 81 82 ACPI_STATUS 83 AcpiOsCreateSemaphore(UINT32 MaxUnits, UINT32 InitialUnits, 84 ACPI_HANDLE *OutHandle) 85 { 86 #ifndef ACPI_NO_SEMAPHORES 87 struct acpi_semaphore *as; 88 89 ACPI_FUNCTION_TRACE((char *)(uintptr_t)__func__); 90 91 if (OutHandle == NULL) 92 return_ACPI_STATUS (AE_BAD_PARAMETER); 93 if (InitialUnits > MaxUnits) 94 return_ACPI_STATUS (AE_BAD_PARAMETER); 95 96 as = kmalloc(sizeof(*as), M_ACPISEM, M_INTWAIT | M_ZERO); 97 98 spin_init(&as->as_spin, "AcpiOsSem"); 99 as->as_units = InitialUnits; 100 as->as_maxunits = MaxUnits; 101 as->as_pendings = as->as_resetting = as->as_timeouts = 0; 102 103 ACPI_DEBUG_PRINT((ACPI_DB_MUTEX, 104 "created semaphore %p max %d, initial %d\n", 105 as, InitialUnits, MaxUnits)); 106 107 *OutHandle = (ACPI_HANDLE)as; 108 #else 109 *OutHandle = (ACPI_HANDLE)OutHandle; 110 #endif /* !ACPI_NO_SEMAPHORES */ 111 112 return_ACPI_STATUS (AE_OK); 113 } 114 115 ACPI_STATUS 116 AcpiOsDeleteSemaphore(ACPI_HANDLE Handle) 117 { 118 #ifndef ACPI_NO_SEMAPHORES 119 struct acpi_semaphore *as = (struct acpi_semaphore *)Handle; 120 121 ACPI_FUNCTION_TRACE((char *)(uintptr_t)__func__); 122 123 ACPI_DEBUG_PRINT((ACPI_DB_MUTEX, "destroyed semaphore %p\n", as)); 124 spin_uninit(&as->as_spin); 125 kfree(as, M_ACPISEM); 126 #endif /* !ACPI_NO_SEMAPHORES */ 127 128 return_ACPI_STATUS (AE_OK); 129 } 130 131 ACPI_STATUS 132 AcpiOsWaitSemaphore(ACPI_HANDLE Handle, UINT32 Units, UINT16 Timeout) 133 { 134 #ifndef ACPI_NO_SEMAPHORES 135 ACPI_STATUS result; 136 struct acpi_semaphore *as = (struct acpi_semaphore *)Handle; 137 int rv, tmo; 138 struct timeval timeouttv, currenttv, timelefttv; 139 AS_LOCK_DECL; 140 141 ACPI_FUNCTION_TRACE((char *)(uintptr_t)__func__); 142 143 if (as == NULL) 144 return_ACPI_STATUS (AE_BAD_PARAMETER); 145 146 if (cold) 147 return_ACPI_STATUS (AE_OK); 148 149 #if 0 150 if (as->as_units < Units && as->as_timeouts > 10) { 151 kprintf("%s: semaphore %p too many timeouts, resetting\n", __func__, as); 152 AS_LOCK(as); 153 as->as_units = as->as_maxunits; 154 if (as->as_pendings) 155 as->as_resetting = 1; 156 as->as_timeouts = 0; 157 wakeup(as); 158 AS_UNLOCK(as); 159 return_ACPI_STATUS (AE_TIME); 160 } 161 162 if (as->as_resetting) 163 return_ACPI_STATUS (AE_TIME); 164 #endif 165 166 /* a timeout of ACPI_WAIT_FOREVER means "forever" */ 167 if (Timeout == ACPI_WAIT_FOREVER) { 168 tmo = 0; 169 timeouttv.tv_sec = ((0xffff/1000) + 1); /* cf. ACPI spec */ 170 timeouttv.tv_usec = 0; 171 } else { 172 /* compute timeout using microseconds per tick */ 173 tmo = (Timeout * 1000) / (1000000 / hz); 174 if (tmo <= 0) 175 tmo = 1; 176 timeouttv.tv_sec = Timeout / 1000; 177 timeouttv.tv_usec = (Timeout % 1000) * 1000; 178 } 179 180 /* calculate timeout value in timeval */ 181 getmicrouptime(¤ttv); 182 timevaladd(&timeouttv, ¤ttv); 183 184 AS_LOCK(as); 185 ACPI_DEBUG_PRINT((ACPI_DB_MUTEX, 186 "get %d units from semaphore %p (has %d), timeout %d\n", 187 Units, as, as->as_units, Timeout)); 188 for (;;) { 189 if (as->as_maxunits == ACPI_NO_UNIT_LIMIT) { 190 result = AE_OK; 191 break; 192 } 193 if (as->as_units >= Units) { 194 as->as_units -= Units; 195 result = AE_OK; 196 break; 197 } 198 199 /* limit number of pending treads */ 200 if (as->as_pendings >= ACPI_SEMAPHORES_MAX_PENDING) { 201 result = AE_TIME; 202 break; 203 } 204 205 /* if timeout values of zero is specified, return immediately */ 206 if (Timeout == 0) { 207 result = AE_TIME; 208 break; 209 } 210 211 ACPI_DEBUG_PRINT((ACPI_DB_MUTEX, 212 "semaphore blocked, calling ssleep(%p, %p, %d, \"acsem\", %d)\n", 213 as, &as->as_spin, PCATCH, tmo)); 214 215 as->as_pendings++; 216 217 if (acpi_semaphore_debug) { 218 kprintf("%s: Sleep %jd, pending %jd, semaphore %p, thread %#jx\n", 219 __func__, (intmax_t)Timeout, 220 (intmax_t)as->as_pendings, as, 221 (uintmax_t)AcpiOsGetThreadId()); 222 } 223 224 rv = ssleep(as, &as->as_spin, PCATCH, "acsem", tmo); 225 226 as->as_pendings--; 227 228 #if 0 229 if (as->as_resetting) { 230 /* semaphore reset, return immediately */ 231 if (as->as_pendings == 0) { 232 as->as_resetting = 0; 233 } 234 result = AE_TIME; 235 break; 236 } 237 #endif 238 239 ACPI_DEBUG_PRINT((ACPI_DB_MUTEX, "ssleep(%d) returned %d\n", tmo, rv)); 240 if (rv == EWOULDBLOCK) { 241 result = AE_TIME; 242 break; 243 } 244 245 /* check if we already awaited enough */ 246 timelefttv = timeouttv; 247 getmicrouptime(¤ttv); 248 timevalsub(&timelefttv, ¤ttv); 249 if (timelefttv.tv_sec < 0) { 250 ACPI_DEBUG_PRINT((ACPI_DB_MUTEX, "await semaphore %p timeout\n", 251 as)); 252 result = AE_TIME; 253 break; 254 } 255 256 /* adjust timeout for the next sleep */ 257 tmo = (timelefttv.tv_sec * 1000000 + timelefttv.tv_usec) / 258 (1000000 / hz); 259 if (tmo <= 0) 260 tmo = 1; 261 262 if (acpi_semaphore_debug) { 263 kprintf("%s: Wakeup timeleft(%ju, %ju), tmo %ju, sem %p, thread %#jx\n", 264 __func__, 265 (intmax_t)timelefttv.tv_sec, (intmax_t)timelefttv.tv_usec, 266 (intmax_t)tmo, as, (uintmax_t)AcpiOsGetThreadId()); 267 } 268 } 269 270 if (acpi_semaphore_debug) { 271 if (result == AE_TIME && Timeout > 0) { 272 kprintf("%s: Timeout %d, pending %d, semaphore %p\n", 273 __func__, Timeout, as->as_pendings, as); 274 } 275 if (ACPI_SUCCESS(result) && 276 (as->as_timeouts > 0 || as->as_pendings > 0)) 277 { 278 kprintf("%s: Acquire %d, units %d, pending %d, sem %p, thread %#jx\n", 279 __func__, Units, as->as_units, as->as_pendings, as, 280 (uintmax_t)AcpiOsGetThreadId()); 281 } 282 } 283 284 if (result == AE_TIME) 285 as->as_timeouts++; 286 else 287 as->as_timeouts = 0; 288 289 AS_UNLOCK(as); 290 return_ACPI_STATUS (result); 291 #else 292 return_ACPI_STATUS (AE_OK); 293 #endif /* !ACPI_NO_SEMAPHORES */ 294 } 295 296 ACPI_STATUS 297 AcpiOsSignalSemaphore(ACPI_HANDLE Handle, UINT32 Units) 298 { 299 #ifndef ACPI_NO_SEMAPHORES 300 struct acpi_semaphore *as = (struct acpi_semaphore *)Handle; 301 AS_LOCK_DECL; 302 303 ACPI_FUNCTION_TRACE((char *)(uintptr_t)__func__); 304 305 if (as == NULL) 306 return_ACPI_STATUS(AE_BAD_PARAMETER); 307 308 AS_LOCK(as); 309 ACPI_DEBUG_PRINT((ACPI_DB_MUTEX, 310 "return %d units to semaphore %p (has %d)\n", 311 Units, as, as->as_units)); 312 if (as->as_maxunits != ACPI_NO_UNIT_LIMIT) { 313 as->as_units += Units; 314 if (as->as_units > as->as_maxunits) 315 as->as_units = as->as_maxunits; 316 } 317 318 if (acpi_semaphore_debug && (as->as_timeouts > 0 || as->as_pendings > 0)) { 319 kprintf("%s: Release %d, units %d, pending %d, semaphore %p, thread %#jx\n", 320 __func__, Units, as->as_units, as->as_pendings, as, 321 (uintmax_t)AcpiOsGetThreadId()); 322 } 323 324 wakeup(as); 325 AS_UNLOCK(as); 326 #endif /* !ACPI_NO_SEMAPHORES */ 327 328 return_ACPI_STATUS (AE_OK); 329 } 330 331 struct acpi_spinlock { 332 struct spinlock lock; 333 #ifdef ACPI_DEBUG_LOCKS 334 thread_t owner; 335 const char *func; 336 int line; 337 #endif 338 }; 339 340 ACPI_STATUS 341 AcpiOsCreateLock(ACPI_SPINLOCK *OutHandle) 342 { 343 ACPI_SPINLOCK spin; 344 345 if (OutHandle == NULL) 346 return (AE_BAD_PARAMETER); 347 spin = kmalloc(sizeof(*spin), M_ACPISEM, M_INTWAIT|M_ZERO); 348 spin_init(&spin->lock, "AcpiOsLock"); 349 #ifdef ACPI_DEBUG_LOCKS 350 spin->owner = NULL; 351 spin->func = ""; 352 spin->line = 0; 353 #endif 354 *OutHandle = spin; 355 return (AE_OK); 356 } 357 358 void 359 AcpiOsDeleteLock (ACPI_SPINLOCK Spin) 360 { 361 if (Spin == NULL) 362 return; 363 spin_uninit(&Spin->lock); 364 kfree(Spin, M_ACPISEM); 365 } 366 367 /* 368 * OS-dependent locking primitives. These routines should be able to be 369 * called from an interrupt-handler or cpu_idle thread. 370 * 371 * NB: some of ACPICA functions with locking flags, say AcpiSetRegister(), 372 * are changed to unconditionally call AcpiOsAcquireLock/AcpiOsReleaseLock. 373 */ 374 ACPI_CPU_FLAGS 375 #ifdef ACPI_DEBUG_LOCKS 376 _AcpiOsAcquireLock (ACPI_SPINLOCK Spin, const char *func, int line) 377 #else 378 AcpiOsAcquireLock (ACPI_SPINLOCK Spin) 379 #endif 380 { 381 spin_lock(&Spin->lock); 382 383 #ifdef ACPI_DEBUG_LOCKS 384 if (Spin->owner) { 385 kprintf("%p(%s:%d): acpi_spinlock %p already held by %p(%s:%d)\n", 386 curthread, func, line, Spin, Spin->owner, Spin->func, 387 Spin->line); 388 print_backtrace(-1); 389 } else { 390 Spin->owner = curthread; 391 Spin->func = func; 392 Spin->line = line; 393 } 394 #endif 395 return(0); 396 } 397 398 void 399 AcpiOsReleaseLock (ACPI_SPINLOCK Spin, ACPI_CPU_FLAGS Flags) 400 { 401 #ifdef ACPI_DEBUG_LOCKS 402 if (Flags) { 403 if (Spin->owner != NULL) { 404 kprintf("%p: acpi_spinlock %p is unexectedly held by %p(%s:%d)\n", 405 curthread, Spin, Spin->owner, Spin->func, Spin->line); 406 print_backtrace(-1); 407 } else 408 return; 409 } 410 Spin->owner = NULL; 411 Spin->func = ""; 412 Spin->line = 0; 413 #endif 414 spin_unlock(&Spin->lock); 415 } 416 417 /* Section 5.2.9.1: global lock acquire/release functions */ 418 #define GL_ACQUIRED (-1) 419 #define GL_BUSY 0 420 #define GL_BIT_PENDING 0x1 421 #define GL_BIT_OWNED 0x2 422 #define GL_BIT_MASK (GL_BIT_PENDING | GL_BIT_OWNED) 423 424 /* 425 * Acquire the global lock. If busy, set the pending bit. The caller 426 * will wait for notification from the BIOS that the lock is available 427 * and then attempt to acquire it again. 428 */ 429 int 430 acpi_acquire_global_lock(uint32_t *lock) 431 { 432 uint32_t new, old; 433 434 do { 435 old = *lock; 436 new = ((old & ~GL_BIT_MASK) | GL_BIT_OWNED) | 437 ((old >> 1) & GL_BIT_PENDING); 438 } while (atomic_cmpset_int(lock, old, new) == 0); 439 440 return ((new < GL_BIT_MASK) ? GL_ACQUIRED : GL_BUSY); 441 } 442 443 /* 444 * Release the global lock, returning whether there is a waiter pending. 445 * If the BIOS set the pending bit, OSPM must notify the BIOS when it 446 * releases the lock. 447 */ 448 int 449 acpi_release_global_lock(uint32_t *lock) 450 { 451 uint32_t new, old; 452 453 do { 454 old = *lock; 455 new = old & ~GL_BIT_MASK; 456 } while (atomic_cmpset_int(lock, old, new) == 0); 457 458 return (old & GL_BIT_PENDING); 459 } 460