1 /*
2 * PROJECT: ReactOS Kernel
3 * LICENSE: GPL - See COPYING in the top level directory
4 * FILE: ntoskrnl/include/internal/ke_x.h
5 * PURPOSE: Internal Inlined Functions for the Kernel
6 * PROGRAMMERS: Alex Ionescu (alex.ionescu@reactos.org)
7 */
8
9 #ifdef __cplusplus
10 extern "C"
11 {
12 #endif
13
14 #ifndef _M_ARM
15 FORCEINLINE
16 KPROCESSOR_MODE
KeGetPreviousMode(VOID)17 KeGetPreviousMode(VOID)
18 {
19 /* Return the current mode */
20 return KeGetCurrentThread()->PreviousMode;
21 }
22 #endif
23
24 //
25 // Enters a Guarded Region
26 //
27 #define KeEnterGuardedRegionThread(_Thread) \
28 { \
29 /* Sanity checks */ \
30 ASSERT(KeGetCurrentIrql() <= APC_LEVEL); \
31 ASSERT((_Thread) == KeGetCurrentThread()); \
32 ASSERT(((_Thread)->SpecialApcDisable <= 0) && \
33 ((_Thread)->SpecialApcDisable != -32768)); \
34 \
35 /* Disable Special APCs */ \
36 (_Thread)->SpecialApcDisable--; \
37 }
38
39 #define KeEnterGuardedRegion() \
40 { \
41 PKTHREAD _Thread = KeGetCurrentThread(); \
42 KeEnterGuardedRegionThread(_Thread); \
43 }
44
45 //
46 // Leaves a Guarded Region
47 //
48 #define KeLeaveGuardedRegionThread(_Thread) \
49 { \
50 /* Sanity checks */ \
51 ASSERT(KeGetCurrentIrql() <= APC_LEVEL); \
52 ASSERT((_Thread) == KeGetCurrentThread()); \
53 ASSERT((_Thread)->SpecialApcDisable < 0); \
54 \
55 /* Leave region and check if APCs are OK now */ \
56 if (!(++(_Thread)->SpecialApcDisable)) \
57 { \
58 /* Check for Kernel APCs on the list */ \
59 if (!IsListEmpty(&(_Thread)->ApcState. \
60 ApcListHead[KernelMode])) \
61 { \
62 /* Check for APC Delivery */ \
63 KiCheckForKernelApcDelivery(); \
64 } \
65 } \
66 }
67
68 #define KeLeaveGuardedRegion() \
69 { \
70 PKTHREAD _Thread = KeGetCurrentThread(); \
71 KeLeaveGuardedRegionThread(_Thread); \
72 }
73
74 //
75 // Enters a Critical Region
76 //
77 #define KeEnterCriticalRegionThread(_Thread) \
78 { \
79 /* Sanity checks */ \
80 ASSERT((_Thread) == KeGetCurrentThread()); \
81 ASSERT(((_Thread)->KernelApcDisable <= 0) && \
82 ((_Thread)->KernelApcDisable != -32768)); \
83 \
84 /* Disable Kernel APCs */ \
85 (_Thread)->KernelApcDisable--; \
86 }
87
88 #define KeEnterCriticalRegion() \
89 { \
90 PKTHREAD _Thread = KeGetCurrentThread(); \
91 KeEnterCriticalRegionThread(_Thread); \
92 }
93
94 //
95 // Leaves a Critical Region
96 //
97 #define KeLeaveCriticalRegionThread(_Thread) \
98 { \
99 /* Sanity checks */ \
100 ASSERT((_Thread) == KeGetCurrentThread()); \
101 ASSERT((_Thread)->KernelApcDisable < 0); \
102 \
103 /* Enable Kernel APCs */ \
104 (_Thread)->KernelApcDisable++; \
105 \
106 /* Check if Kernel APCs are now enabled */ \
107 if (!((_Thread)->KernelApcDisable)) \
108 { \
109 /* Check if we need to request an APC Delivery */ \
110 if (!(IsListEmpty(&(_Thread)->ApcState.ApcListHead[KernelMode])) && \
111 !((_Thread)->SpecialApcDisable)) \
112 { \
113 /* Check for the right environment */ \
114 KiCheckForKernelApcDelivery(); \
115 } \
116 } \
117 }
118
119 #define KeLeaveCriticalRegion() \
120 { \
121 PKTHREAD _Thread = KeGetCurrentThread(); \
122 KeLeaveCriticalRegionThread(_Thread); \
123 }
124
125 #ifndef CONFIG_SMP
126
127 //
128 // This routine protects against multiple CPU acquires, it's meaningless on UP.
129 //
130 FORCEINLINE
131 VOID
KiAcquireDispatcherObject(IN DISPATCHER_HEADER * Object)132 KiAcquireDispatcherObject(IN DISPATCHER_HEADER* Object)
133 {
134 UNREFERENCED_PARAMETER(Object);
135 }
136
137 //
138 // This routine protects against multiple CPU acquires, it's meaningless on UP.
139 //
140 FORCEINLINE
141 VOID
KiReleaseDispatcherObject(IN DISPATCHER_HEADER * Object)142 KiReleaseDispatcherObject(IN DISPATCHER_HEADER* Object)
143 {
144 UNREFERENCED_PARAMETER(Object);
145 }
146
147 FORCEINLINE
148 KIRQL
KiAcquireDispatcherLock(VOID)149 KiAcquireDispatcherLock(VOID)
150 {
151 /* Raise to synch level */
152 return KfRaiseIrql(SYNCH_LEVEL);
153 }
154
155 FORCEINLINE
156 VOID
KiReleaseDispatcherLock(IN KIRQL OldIrql)157 KiReleaseDispatcherLock(IN KIRQL OldIrql)
158 {
159 /* Just exit the dispatcher */
160 KiExitDispatcher(OldIrql);
161 }
162
163 FORCEINLINE
164 VOID
KiAcquireDispatcherLockAtSynchLevel(VOID)165 KiAcquireDispatcherLockAtSynchLevel(VOID)
166 {
167 /* This is a no-op at SYNCH_LEVEL for UP systems */
168 ASSERT(KeGetCurrentIrql() >= SYNCH_LEVEL);
169 return;
170 }
171
172 FORCEINLINE
173 VOID
KiReleaseDispatcherLockFromSynchLevel(VOID)174 KiReleaseDispatcherLockFromSynchLevel(VOID)
175 {
176 /* This is a no-op at SYNCH_LEVEL for UP systems */
177 return;
178 }
179
180 //
181 // This routine makes the thread deferred ready on the boot CPU.
182 //
183 FORCEINLINE
184 VOID
KiInsertDeferredReadyList(IN PKTHREAD Thread)185 KiInsertDeferredReadyList(IN PKTHREAD Thread)
186 {
187 /* Set the thread to deferred state and boot CPU */
188 Thread->State = DeferredReady;
189 Thread->DeferredProcessor = 0;
190
191 /* Make the thread ready immediately */
192 KiDeferredReadyThread(Thread);
193 }
194
195 FORCEINLINE
196 VOID
KiRescheduleThread(IN BOOLEAN NewThread,IN ULONG Cpu)197 KiRescheduleThread(IN BOOLEAN NewThread,
198 IN ULONG Cpu)
199 {
200 /* This is meaningless on UP systems */
201 UNREFERENCED_PARAMETER(NewThread);
202 UNREFERENCED_PARAMETER(Cpu);
203 }
204
205 //
206 // This routine protects against multiple CPU acquires, it's meaningless on UP.
207 //
208 FORCEINLINE
209 VOID
KiSetThreadSwapBusy(IN PKTHREAD Thread)210 KiSetThreadSwapBusy(IN PKTHREAD Thread)
211 {
212 UNREFERENCED_PARAMETER(Thread);
213 }
214
215 //
216 // This routine protects against multiple CPU acquires, it's meaningless on UP.
217 //
218 FORCEINLINE
219 VOID
KiAcquirePrcbLock(IN PKPRCB Prcb)220 KiAcquirePrcbLock(IN PKPRCB Prcb)
221 {
222 UNREFERENCED_PARAMETER(Prcb);
223 }
224
225 //
226 // This routine protects against multiple CPU acquires, it's meaningless on UP.
227 //
228 FORCEINLINE
229 VOID
KiReleasePrcbLock(IN PKPRCB Prcb)230 KiReleasePrcbLock(IN PKPRCB Prcb)
231 {
232 UNREFERENCED_PARAMETER(Prcb);
233 }
234
235 //
236 // This routine protects against multiple CPU acquires, it's meaningless on UP.
237 //
238 FORCEINLINE
239 VOID
KiAcquireThreadLock(IN PKTHREAD Thread)240 KiAcquireThreadLock(IN PKTHREAD Thread)
241 {
242 UNREFERENCED_PARAMETER(Thread);
243 }
244
245 //
246 // This routine protects against multiple CPU acquires, it's meaningless on UP.
247 //
248 FORCEINLINE
249 VOID
KiReleaseThreadLock(IN PKTHREAD Thread)250 KiReleaseThreadLock(IN PKTHREAD Thread)
251 {
252 UNREFERENCED_PARAMETER(Thread);
253 }
254
255 //
256 // This routine protects against multiple CPU acquires, it's meaningless on UP.
257 //
258 FORCEINLINE
259 BOOLEAN
KiTryThreadLock(IN PKTHREAD Thread)260 KiTryThreadLock(IN PKTHREAD Thread)
261 {
262 UNREFERENCED_PARAMETER(Thread);
263 return FALSE;
264 }
265
266 FORCEINLINE
267 VOID
KiCheckDeferredReadyList(IN PKPRCB Prcb)268 KiCheckDeferredReadyList(IN PKPRCB Prcb)
269 {
270 /* There are no deferred ready lists on UP systems */
271 UNREFERENCED_PARAMETER(Prcb);
272 }
273
274 FORCEINLINE
275 VOID
KiRequestApcInterrupt(IN BOOLEAN NeedApc,IN UCHAR Processor)276 KiRequestApcInterrupt(IN BOOLEAN NeedApc,
277 IN UCHAR Processor)
278 {
279 /* We deliver instantly on UP */
280 UNREFERENCED_PARAMETER(NeedApc);
281 UNREFERENCED_PARAMETER(Processor);
282 }
283
284 FORCEINLINE
285 PKSPIN_LOCK_QUEUE
KiAcquireTimerLock(IN ULONG Hand)286 KiAcquireTimerLock(IN ULONG Hand)
287 {
288 ASSERT(KeGetCurrentIrql() >= DISPATCH_LEVEL);
289
290 /* Nothing to do on UP */
291 UNREFERENCED_PARAMETER(Hand);
292 return NULL;
293 }
294
295 FORCEINLINE
296 VOID
KiReleaseTimerLock(IN PKSPIN_LOCK_QUEUE LockQueue)297 KiReleaseTimerLock(IN PKSPIN_LOCK_QUEUE LockQueue)
298 {
299 ASSERT(KeGetCurrentIrql() >= DISPATCH_LEVEL);
300
301 /* Nothing to do on UP */
302 UNREFERENCED_PARAMETER(LockQueue);
303 }
304
305 #else
306
307 FORCEINLINE
308 VOID
KiAcquireDispatcherObject(IN DISPATCHER_HEADER * Object)309 KiAcquireDispatcherObject(IN DISPATCHER_HEADER* Object)
310 {
311 LONG OldValue;
312
313 /* Make sure we're at a safe level to touch the lock */
314 ASSERT(KeGetCurrentIrql() >= DISPATCH_LEVEL);
315
316 /* Start acquire loop */
317 do
318 {
319 /* Loop until the other CPU releases it */
320 while (TRUE)
321 {
322 /* Check if it got released */
323 OldValue = Object->Lock;
324 if ((OldValue & KOBJECT_LOCK_BIT) == 0) break;
325
326 /* Let the CPU know that this is a loop */
327 YieldProcessor();
328 }
329
330 /* Try acquiring the lock now */
331 } while (InterlockedCompareExchange(&Object->Lock,
332 OldValue | KOBJECT_LOCK_BIT,
333 OldValue) != OldValue);
334 }
335
336 FORCEINLINE
337 VOID
KiReleaseDispatcherObject(IN DISPATCHER_HEADER * Object)338 KiReleaseDispatcherObject(IN DISPATCHER_HEADER* Object)
339 {
340 /* Make sure we're at a safe level to touch the lock */
341 ASSERT(KeGetCurrentIrql() >= DISPATCH_LEVEL);
342
343 /* Release it */
344 InterlockedAnd(&Object->Lock, ~KOBJECT_LOCK_BIT);
345 }
346
347 FORCEINLINE
348 KIRQL
KiAcquireDispatcherLock(VOID)349 KiAcquireDispatcherLock(VOID)
350 {
351 /* Raise to synchronization level and acquire the dispatcher lock */
352 return KeAcquireQueuedSpinLockRaiseToSynch(LockQueueDispatcherLock);
353 }
354
355 FORCEINLINE
356 VOID
KiReleaseDispatcherLock(IN KIRQL OldIrql)357 KiReleaseDispatcherLock(IN KIRQL OldIrql)
358 {
359 /* First release the lock */
360 KeReleaseQueuedSpinLockFromDpcLevel(&KeGetCurrentPrcb()->
361 LockQueue[LockQueueDispatcherLock]);
362
363 /* Then exit the dispatcher */
364 KiExitDispatcher(OldIrql);
365 }
366
367 FORCEINLINE
368 VOID
KiAcquireDispatcherLockAtSynchLevel(VOID)369 KiAcquireDispatcherLockAtSynchLevel(VOID)
370 {
371 /* Acquire the dispatcher lock */
372 ASSERT(KeGetCurrentIrql() >= SYNCH_LEVEL);
373 KeAcquireQueuedSpinLockAtDpcLevel(&KeGetCurrentPrcb()->
374 LockQueue[LockQueueDispatcherLock]);
375 }
376
377 FORCEINLINE
378 VOID
KiReleaseDispatcherLockFromSynchLevel(VOID)379 KiReleaseDispatcherLockFromSynchLevel(VOID)
380 {
381 /* Release the dispatcher lock */
382 KeReleaseQueuedSpinLockFromDpcLevel(&KeGetCurrentPrcb()->
383 LockQueue[LockQueueDispatcherLock]);
384 }
385
386 //
387 // This routine inserts a thread into the deferred ready list of the current CPU
388 //
389 FORCEINLINE
390 VOID
KiInsertDeferredReadyList(IN PKTHREAD Thread)391 KiInsertDeferredReadyList(IN PKTHREAD Thread)
392 {
393 PKPRCB Prcb = KeGetCurrentPrcb();
394
395 /* Set the thread to deferred state and CPU */
396 Thread->State = DeferredReady;
397 Thread->DeferredProcessor = Prcb->Number;
398
399 /* Add it on the list */
400 PushEntryList(&Prcb->DeferredReadyListHead, &Thread->SwapListEntry);
401 }
402
403 FORCEINLINE
404 VOID
KiRescheduleThread(IN BOOLEAN NewThread,IN ULONG Cpu)405 KiRescheduleThread(IN BOOLEAN NewThread,
406 IN ULONG Cpu)
407 {
408 /* Check if a new thread needs to be scheduled on a different CPU */
409 if ((NewThread) && !(KeGetCurrentPrcb()->Number == Cpu))
410 {
411 /* Send an IPI to request delivery */
412 KiIpiSend(AFFINITY_MASK(Cpu), IPI_DPC);
413 }
414 }
415
416 //
417 // This routine sets the current thread in a swap busy state, which ensure that
418 // nobody else tries to swap it concurrently.
419 //
420 FORCEINLINE
421 VOID
KiSetThreadSwapBusy(IN PKTHREAD Thread)422 KiSetThreadSwapBusy(IN PKTHREAD Thread)
423 {
424 /* Make sure nobody already set it */
425 ASSERT(Thread->SwapBusy == FALSE);
426
427 /* Set it ourselves */
428 Thread->SwapBusy = TRUE;
429 }
430
431 //
432 // This routine acquires the PRCB lock so that only one caller can touch
433 // volatile PRCB data.
434 //
435 // Since this is a simple optimized spin-lock, it must only be acquired
436 // at dispatcher level or higher!
437 //
438 FORCEINLINE
439 VOID
KiAcquirePrcbLock(IN PKPRCB Prcb)440 KiAcquirePrcbLock(IN PKPRCB Prcb)
441 {
442 /* Make sure we're at a safe level to touch the PRCB lock */
443 ASSERT(KeGetCurrentIrql() >= DISPATCH_LEVEL);
444
445 /* Start acquire loop */
446 for (;;)
447 {
448 /* Acquire the lock and break out if we acquired it first */
449 if (!InterlockedExchange((PLONG)&Prcb->PrcbLock, 1)) break;
450
451 /* Loop until the other CPU releases it */
452 do
453 {
454 /* Let the CPU know that this is a loop */
455 YieldProcessor();
456 } while (Prcb->PrcbLock);
457 }
458 }
459
460 //
461 // This routine releases the PRCB lock so that other callers can touch
462 // volatile PRCB data.
463 //
464 // Since this is a simple optimized spin-lock, it must be be only acquired
465 // at dispatcher level or higher!
466 //
467 FORCEINLINE
468 VOID
KiReleasePrcbLock(IN PKPRCB Prcb)469 KiReleasePrcbLock(IN PKPRCB Prcb)
470 {
471 /* Make sure we are above dispatch and the lock is acquired! */
472 ASSERT(KeGetCurrentIrql() >= DISPATCH_LEVEL);
473 ASSERT(Prcb->PrcbLock != 0);
474
475 /* Release it */
476 InterlockedAnd((PLONG)&Prcb->PrcbLock, 0);
477 }
478
479 //
480 // This routine acquires the thread lock so that only one caller can touch
481 // volatile thread data.
482 //
483 // Since this is a simple optimized spin-lock, it must be be only acquired
484 // at dispatcher level or higher!
485 //
486 FORCEINLINE
487 VOID
KiAcquireThreadLock(IN PKTHREAD Thread)488 KiAcquireThreadLock(IN PKTHREAD Thread)
489 {
490 /* Make sure we're at a safe level to touch the thread lock */
491 ASSERT(KeGetCurrentIrql() >= DISPATCH_LEVEL);
492
493 /* Start acquire loop */
494 for (;;)
495 {
496 /* Acquire the lock and break out if we acquired it first */
497 if (!InterlockedExchange((PLONG)&Thread->ThreadLock, 1)) break;
498
499 /* Loop until the other CPU releases it */
500 do
501 {
502 /* Let the CPU know that this is a loop */
503 YieldProcessor();
504 } while (Thread->ThreadLock);
505 }
506 }
507
508 //
509 // This routine releases the thread lock so that other callers can touch
510 // volatile thread data.
511 //
512 // Since this is a simple optimized spin-lock, it must be be only acquired
513 // at dispatcher level or higher!
514 //
515 FORCEINLINE
516 VOID
KiReleaseThreadLock(IN PKTHREAD Thread)517 KiReleaseThreadLock(IN PKTHREAD Thread)
518 {
519 /* Make sure we are still above dispatch */
520 ASSERT(KeGetCurrentIrql() >= DISPATCH_LEVEL);
521
522 /* Release it */
523 InterlockedAnd((PLONG)&Thread->ThreadLock, 0);
524 }
525
526 FORCEINLINE
527 BOOLEAN
KiTryThreadLock(IN PKTHREAD Thread)528 KiTryThreadLock(IN PKTHREAD Thread)
529 {
530 LONG Value;
531
532 /* If the lock isn't acquired, return false */
533 if (!Thread->ThreadLock) return FALSE;
534
535 /* Otherwise, try to acquire it and check the result */
536 Value = 1;
537 Value = InterlockedExchange((PLONG)&Thread->ThreadLock, Value);
538
539 /* Return the lock state */
540 return (Value == 1);
541 }
542
543 FORCEINLINE
544 VOID
KiCheckDeferredReadyList(IN PKPRCB Prcb)545 KiCheckDeferredReadyList(IN PKPRCB Prcb)
546 {
547 /* Scan the deferred ready lists if required */
548 if (Prcb->DeferredReadyListHead.Next) KiProcessDeferredReadyList(Prcb);
549 }
550
551 FORCEINLINE
552 VOID
KiRequestApcInterrupt(IN BOOLEAN NeedApc,IN UCHAR Processor)553 KiRequestApcInterrupt(IN BOOLEAN NeedApc,
554 IN UCHAR Processor)
555 {
556 /* Check if we need to request APC delivery */
557 if (NeedApc)
558 {
559 /* Check if it's on another CPU */
560 if (KeGetCurrentPrcb()->Number != Processor)
561 {
562 /* Send an IPI to request delivery */
563 KiIpiSend(AFFINITY_MASK(Processor), IPI_APC);
564 }
565 else
566 {
567 /* Request a software interrupt */
568 HalRequestSoftwareInterrupt(APC_LEVEL);
569 }
570 }
571 }
572
573 FORCEINLINE
574 PKSPIN_LOCK_QUEUE
KiAcquireTimerLock(IN ULONG Hand)575 KiAcquireTimerLock(IN ULONG Hand)
576 {
577 PKSPIN_LOCK_QUEUE LockQueue;
578 ULONG LockIndex;
579 ASSERT(KeGetCurrentIrql() >= DISPATCH_LEVEL);
580
581 /* Get the lock index */
582 LockIndex = Hand >> LOCK_QUEUE_TIMER_LOCK_SHIFT;
583 LockIndex &= (LOCK_QUEUE_TIMER_TABLE_LOCKS - 1);
584
585 /* Now get the lock */
586 LockQueue = &KeGetCurrentPrcb()->LockQueue[LockQueueTimerTableLock + LockIndex];
587
588 /* Acquire it and return */
589 KeAcquireQueuedSpinLockAtDpcLevel(LockQueue);
590 return LockQueue;
591 }
592
593 FORCEINLINE
594 VOID
KiReleaseTimerLock(IN PKSPIN_LOCK_QUEUE LockQueue)595 KiReleaseTimerLock(IN PKSPIN_LOCK_QUEUE LockQueue)
596 {
597 ASSERT(KeGetCurrentIrql() >= DISPATCH_LEVEL);
598
599 /* Release the lock */
600 KeReleaseQueuedSpinLockFromDpcLevel(LockQueue);
601 }
602
603 #endif
604
605 FORCEINLINE
606 VOID
KiAcquireApcLockRaiseToSynch(IN PKTHREAD Thread,IN PKLOCK_QUEUE_HANDLE Handle)607 KiAcquireApcLockRaiseToSynch(IN PKTHREAD Thread,
608 IN PKLOCK_QUEUE_HANDLE Handle)
609 {
610 /* Acquire the lock and raise to synchronization level */
611 KeAcquireInStackQueuedSpinLockRaiseToSynch(&Thread->ApcQueueLock, Handle);
612 }
613
614 FORCEINLINE
615 VOID
KiAcquireApcLockAtSynchLevel(IN PKTHREAD Thread,IN PKLOCK_QUEUE_HANDLE Handle)616 KiAcquireApcLockAtSynchLevel(IN PKTHREAD Thread,
617 IN PKLOCK_QUEUE_HANDLE Handle)
618 {
619 /* Acquire the lock */
620 ASSERT(KeGetCurrentIrql() >= SYNCH_LEVEL);
621 KeAcquireInStackQueuedSpinLockAtDpcLevel(&Thread->ApcQueueLock, Handle);
622 }
623
624 FORCEINLINE
625 VOID
KiAcquireApcLockRaiseToDpc(IN PKTHREAD Thread,IN PKLOCK_QUEUE_HANDLE Handle)626 KiAcquireApcLockRaiseToDpc(IN PKTHREAD Thread,
627 IN PKLOCK_QUEUE_HANDLE Handle)
628 {
629 /* Acquire the lock */
630 KeAcquireInStackQueuedSpinLock(&Thread->ApcQueueLock, Handle);
631 }
632
633 FORCEINLINE
634 VOID
KiReleaseApcLock(IN PKLOCK_QUEUE_HANDLE Handle)635 KiReleaseApcLock(IN PKLOCK_QUEUE_HANDLE Handle)
636 {
637 /* Release the lock */
638 KeReleaseInStackQueuedSpinLock(Handle);
639 }
640
641 FORCEINLINE
642 VOID
KiReleaseApcLockFromSynchLevel(IN PKLOCK_QUEUE_HANDLE Handle)643 KiReleaseApcLockFromSynchLevel(IN PKLOCK_QUEUE_HANDLE Handle)
644 {
645 /* Release the lock */
646 KeReleaseInStackQueuedSpinLockFromDpcLevel(Handle);
647 }
648
649 FORCEINLINE
650 VOID
KiAcquireProcessLockRaiseToSynch(IN PKPROCESS Process,IN PKLOCK_QUEUE_HANDLE Handle)651 KiAcquireProcessLockRaiseToSynch(IN PKPROCESS Process,
652 IN PKLOCK_QUEUE_HANDLE Handle)
653 {
654 /* Acquire the lock and raise to synchronization level */
655 KeAcquireInStackQueuedSpinLockRaiseToSynch(&Process->ProcessLock, Handle);
656 }
657
658 FORCEINLINE
659 VOID
KiReleaseProcessLock(IN PKLOCK_QUEUE_HANDLE Handle)660 KiReleaseProcessLock(IN PKLOCK_QUEUE_HANDLE Handle)
661 {
662 /* Release the lock and restore previous IRQL */
663 KeReleaseInStackQueuedSpinLock(Handle);
664 }
665
666 FORCEINLINE
667 VOID
KiReleaseProcessLockFromSynchLevel(IN PKLOCK_QUEUE_HANDLE Handle)668 KiReleaseProcessLockFromSynchLevel(IN PKLOCK_QUEUE_HANDLE Handle)
669 {
670 /* Release the lock without lowering IRQL */
671 KeReleaseInStackQueuedSpinLockFromDpcLevel(Handle);
672 }
673
674 FORCEINLINE
675 VOID
KiAcquireDeviceQueueLock(IN PKDEVICE_QUEUE DeviceQueue,IN PKLOCK_QUEUE_HANDLE DeviceLock)676 KiAcquireDeviceQueueLock(IN PKDEVICE_QUEUE DeviceQueue,
677 IN PKLOCK_QUEUE_HANDLE DeviceLock)
678 {
679 /* Check if we were called from a threaded DPC */
680 if (KeGetCurrentPrcb()->DpcThreadActive)
681 {
682 /* Lock the Queue, we're not at DPC level */
683 KeAcquireInStackQueuedSpinLock(&DeviceQueue->Lock, DeviceLock);
684 }
685 else
686 {
687 /* We must be at DPC level, acquire the lock safely */
688 ASSERT(KeGetCurrentIrql() == DISPATCH_LEVEL);
689 KeAcquireInStackQueuedSpinLockAtDpcLevel(&DeviceQueue->Lock,
690 DeviceLock);
691 }
692 }
693
694 FORCEINLINE
695 VOID
KiReleaseDeviceQueueLock(IN PKLOCK_QUEUE_HANDLE DeviceLock)696 KiReleaseDeviceQueueLock(IN PKLOCK_QUEUE_HANDLE DeviceLock)
697 {
698 /* Check if we were called from a threaded DPC */
699 if (KeGetCurrentPrcb()->DpcThreadActive)
700 {
701 /* Unlock the Queue, we're not at DPC level */
702 KeReleaseInStackQueuedSpinLock(DeviceLock);
703 }
704 else
705 {
706 /* We must be at DPC level, release the lock safely */
707 ASSERT(KeGetCurrentIrql() == DISPATCH_LEVEL);
708 KeReleaseInStackQueuedSpinLockFromDpcLevel(DeviceLock);
709 }
710 }
711
712 //
713 // Satisfies the wait of a mutant dispatcher object
714 //
715 #define KiSatisfyMutantWait(Object, Thread) \
716 { \
717 /* Decrease the Signal State */ \
718 (Object)->Header.SignalState--; \
719 \
720 /* Check if it's now non-signaled */ \
721 if (!(Object)->Header.SignalState) \
722 { \
723 /* Set the Owner Thread */ \
724 (Object)->OwnerThread = Thread; \
725 \
726 /* Disable APCs if needed */ \
727 Thread->KernelApcDisable = Thread->KernelApcDisable - \
728 (Object)->ApcDisable; \
729 \
730 /* Check if it's abandoned */ \
731 if ((Object)->Abandoned) \
732 { \
733 /* Unabandon it */ \
734 (Object)->Abandoned = FALSE; \
735 \
736 /* Return Status */ \
737 Thread->WaitStatus = STATUS_ABANDONED; \
738 } \
739 \
740 /* Insert it into the Mutant List */ \
741 InsertHeadList(Thread->MutantListHead.Blink, \
742 &(Object)->MutantListEntry); \
743 } \
744 }
745
746 //
747 // Satisfies the wait of any nonmutant dispatcher object
748 //
749 #define KiSatisfyNonMutantWait(Object) \
750 { \
751 if (((Object)->Header.Type & TIMER_OR_EVENT_TYPE) == \
752 EventSynchronizationObject) \
753 { \
754 /* Synchronization Timers and Events just get un-signaled */ \
755 (Object)->Header.SignalState = 0; \
756 } \
757 else if ((Object)->Header.Type == SemaphoreObject) \
758 { \
759 /* These ones can have multiple states, so we only decrease it */ \
760 (Object)->Header.SignalState--; \
761 } \
762 }
763
764 //
765 // Satisfies the wait of any dispatcher object
766 //
767 #define KiSatisfyObjectWait(Object, Thread) \
768 { \
769 /* Special case for Mutants */ \
770 if ((Object)->Header.Type == MutantObject) \
771 { \
772 KiSatisfyMutantWait((Object), (Thread)); \
773 } \
774 else \
775 { \
776 KiSatisfyNonMutantWait(Object); \
777 } \
778 }
779
780 //
781 // Recalculates the due time
782 //
783 FORCEINLINE
784 PLARGE_INTEGER
KiRecalculateDueTime(IN PLARGE_INTEGER OriginalDueTime,IN PLARGE_INTEGER DueTime,IN OUT PLARGE_INTEGER NewDueTime)785 KiRecalculateDueTime(IN PLARGE_INTEGER OriginalDueTime,
786 IN PLARGE_INTEGER DueTime,
787 IN OUT PLARGE_INTEGER NewDueTime)
788 {
789 /* Don't do anything for absolute waits */
790 if (OriginalDueTime->QuadPart >= 0) return OriginalDueTime;
791
792 /* Otherwise, query the interrupt time and recalculate */
793 NewDueTime->QuadPart = KeQueryInterruptTime();
794 NewDueTime->QuadPart -= DueTime->QuadPart;
795 return NewDueTime;
796 }
797
798 //
799 // Determines whether a thread should be added to the wait list
800 //
801 FORCEINLINE
802 BOOLEAN
KiCheckThreadStackSwap(IN PKTHREAD Thread,IN KPROCESSOR_MODE WaitMode)803 KiCheckThreadStackSwap(IN PKTHREAD Thread,
804 IN KPROCESSOR_MODE WaitMode)
805 {
806 /* Check the required conditions */
807 if ((WaitMode != KernelMode) &&
808 (Thread->EnableStackSwap) &&
809 (Thread->Priority >= (LOW_REALTIME_PRIORITY + 9)))
810 {
811 /* We are go for swap */
812 return TRUE;
813 }
814 else
815 {
816 /* Don't swap the thread */
817 return FALSE;
818 }
819 }
820
821 //
822 // Adds a thread to the wait list
823 //
824 #define KiAddThreadToWaitList(Thread, Swappable) \
825 { \
826 /* Make sure it's swappable */ \
827 if (Swappable) \
828 { \
829 /* Insert it into the PRCB's List */ \
830 InsertTailList(&KeGetCurrentPrcb()->WaitListHead, \
831 &Thread->WaitListEntry); \
832 } \
833 }
834
835 //
836 // Checks if a wait in progress should be interrupted by APCs or an alertable
837 // state.
838 //
839 FORCEINLINE
840 NTSTATUS
KiCheckAlertability(IN PKTHREAD Thread,IN BOOLEAN Alertable,IN KPROCESSOR_MODE WaitMode)841 KiCheckAlertability(IN PKTHREAD Thread,
842 IN BOOLEAN Alertable,
843 IN KPROCESSOR_MODE WaitMode)
844 {
845 /* Check if the wait is alertable */
846 if (Alertable)
847 {
848 /* It is, first check if the thread is alerted in this mode */
849 if (Thread->Alerted[WaitMode])
850 {
851 /* It is, so bail out of the wait */
852 Thread->Alerted[WaitMode] = FALSE;
853 return STATUS_ALERTED;
854 }
855 else if ((WaitMode != KernelMode) &&
856 (!IsListEmpty(&Thread->ApcState.ApcListHead[UserMode])))
857 {
858 /* It's isn't, but this is a user wait with queued user APCs */
859 Thread->ApcState.UserApcPending = TRUE;
860 return STATUS_USER_APC;
861 }
862 else if (Thread->Alerted[KernelMode])
863 {
864 /* It isn't that either, but we're alered in kernel mode */
865 Thread->Alerted[KernelMode] = FALSE;
866 return STATUS_ALERTED;
867 }
868 }
869 else if ((WaitMode != KernelMode) && (Thread->ApcState.UserApcPending))
870 {
871 /* Not alertable, but this is a user wait with pending user APCs */
872 return STATUS_USER_APC;
873 }
874
875 /* Otherwise, we're fine */
876 return STATUS_WAIT_0;
877 }
878
879 FORCEINLINE
880 ULONG
KiComputeTimerTableIndex(IN ULONGLONG DueTime)881 KiComputeTimerTableIndex(IN ULONGLONG DueTime)
882 {
883 return (DueTime / KeMaximumIncrement) & (TIMER_TABLE_SIZE - 1);
884 }
885
886 //
887 // Called from KiCompleteTimer, KiInsertTreeTimer, KeSetSystemTime
888 // to remove timer entries
889 // See Windows HPI blog for more information.
890 FORCEINLINE
891 VOID
KiRemoveEntryTimer(IN PKTIMER Timer)892 KiRemoveEntryTimer(IN PKTIMER Timer)
893 {
894 ULONG Hand;
895 PKTIMER_TABLE_ENTRY TableEntry;
896
897 /* Remove the timer from the timer list and check if it's empty */
898 Hand = Timer->Header.Hand;
899 if (RemoveEntryList(&Timer->TimerListEntry))
900 {
901 /* Get the respective timer table entry */
902 TableEntry = &KiTimerTableListHead[Hand];
903 if (&TableEntry->Entry == TableEntry->Entry.Flink)
904 {
905 /* Set the entry to an infinite absolute time */
906 TableEntry->Time.HighPart = 0xFFFFFFFF;
907 }
908 }
909
910 /* Clear the list entries on dbg builds so we can tell the timer is gone */
911 #if DBG
912 Timer->TimerListEntry.Flink = NULL;
913 Timer->TimerListEntry.Blink = NULL;
914 #endif
915 }
916
917 //
918 // Called by Wait and Queue code to insert a timer for dispatching.
919 // Also called by KeSetTimerEx to insert a timer from the caller.
920 //
921 FORCEINLINE
922 VOID
KxInsertTimer(IN PKTIMER Timer,IN ULONG Hand)923 KxInsertTimer(IN PKTIMER Timer,
924 IN ULONG Hand)
925 {
926 PKSPIN_LOCK_QUEUE LockQueue;
927 ASSERT(KeGetCurrentIrql() >= SYNCH_LEVEL);
928
929 /* Acquire the lock and release the dispatcher lock */
930 LockQueue = KiAcquireTimerLock(Hand);
931 KiReleaseDispatcherLockFromSynchLevel();
932
933 /* Try to insert the timer */
934 if (KiInsertTimerTable(Timer, Hand))
935 {
936 /* Complete it */
937 KiCompleteTimer(Timer, LockQueue);
938 }
939 else
940 {
941 /* Do nothing, just release the lock */
942 KiReleaseTimerLock(LockQueue);
943 }
944 }
945
946 //
947 // Called by KeSetTimerEx and KiInsertTreeTimer to calculate Due Time
948 // See the Windows HPI Blog for more information
949 //
950 FORCEINLINE
951 BOOLEAN
KiComputeDueTime(IN PKTIMER Timer,IN LARGE_INTEGER DueTime,OUT PULONG Hand)952 KiComputeDueTime(IN PKTIMER Timer,
953 IN LARGE_INTEGER DueTime,
954 OUT PULONG Hand)
955 {
956 LARGE_INTEGER InterruptTime, SystemTime, DifferenceTime;
957
958 /* Convert to relative time if needed */
959 Timer->Header.Absolute = FALSE;
960 if (DueTime.HighPart >= 0)
961 {
962 /* Get System Time */
963 KeQuerySystemTime(&SystemTime);
964
965 /* Do the conversion */
966 DifferenceTime.QuadPart = SystemTime.QuadPart - DueTime.QuadPart;
967
968 /* Make sure it hasn't already expired */
969 Timer->Header.Absolute = TRUE;
970 if (DifferenceTime.HighPart >= 0)
971 {
972 /* Cancel everything */
973 Timer->Header.SignalState = TRUE;
974 Timer->Header.Hand = 0;
975 Timer->DueTime.QuadPart = 0;
976 *Hand = 0;
977 return FALSE;
978 }
979
980 /* Set the time as Absolute */
981 DueTime = DifferenceTime;
982 }
983
984 /* Get the Interrupt Time */
985 InterruptTime.QuadPart = KeQueryInterruptTime();
986
987 /* Recalculate due time */
988 Timer->DueTime.QuadPart = InterruptTime.QuadPart - DueTime.QuadPart;
989
990 /* Get the handle */
991 *Hand = KiComputeTimerTableIndex(Timer->DueTime.QuadPart);
992 Timer->Header.Hand = (UCHAR)*Hand;
993 Timer->Header.Inserted = TRUE;
994 return TRUE;
995 }
996
997 //
998 // Called from Unlink and Queue Insert Code.
999 // Also called by timer code when canceling an inserted timer.
1000 // Removes a timer from it's tree.
1001 //
1002 FORCEINLINE
1003 VOID
KxRemoveTreeTimer(IN PKTIMER Timer)1004 KxRemoveTreeTimer(IN PKTIMER Timer)
1005 {
1006 ULONG Hand = Timer->Header.Hand;
1007 PKSPIN_LOCK_QUEUE LockQueue;
1008 PKTIMER_TABLE_ENTRY TimerEntry;
1009
1010 /* Acquire timer lock */
1011 LockQueue = KiAcquireTimerLock(Hand);
1012
1013 /* Set the timer as non-inserted */
1014 Timer->Header.Inserted = FALSE;
1015
1016 /* Remove it from the timer list */
1017 if (RemoveEntryList(&Timer->TimerListEntry))
1018 {
1019 /* Get the entry and check if it's empty */
1020 TimerEntry = &KiTimerTableListHead[Hand];
1021 if (IsListEmpty(&TimerEntry->Entry))
1022 {
1023 /* Clear the time then */
1024 TimerEntry->Time.HighPart = 0xFFFFFFFF;
1025 }
1026 }
1027
1028 /* Release the timer lock */
1029 KiReleaseTimerLock(LockQueue);
1030 }
1031
1032 FORCEINLINE
1033 VOID
KxSetTimerForThreadWait(IN PKTIMER Timer,IN LARGE_INTEGER Interval,OUT PULONG Hand)1034 KxSetTimerForThreadWait(IN PKTIMER Timer,
1035 IN LARGE_INTEGER Interval,
1036 OUT PULONG Hand)
1037 {
1038 ULONGLONG DueTime;
1039 LARGE_INTEGER InterruptTime, SystemTime, TimeDifference;
1040
1041 /* Check the timer's interval to see if it's absolute */
1042 Timer->Header.Absolute = FALSE;
1043 if (Interval.HighPart >= 0)
1044 {
1045 /* Get the system time and calculate the relative time */
1046 KeQuerySystemTime(&SystemTime);
1047 TimeDifference.QuadPart = SystemTime.QuadPart - Interval.QuadPart;
1048 Timer->Header.Absolute = TRUE;
1049
1050 /* Check if we've already expired */
1051 if (TimeDifference.HighPart >= 0)
1052 {
1053 /* Reset everything */
1054 Timer->DueTime.QuadPart = 0;
1055 *Hand = 0;
1056 Timer->Header.Hand = 0;
1057 return;
1058 }
1059 else
1060 {
1061 /* Update the interval */
1062 Interval = TimeDifference;
1063 }
1064 }
1065
1066 /* Calculate the due time */
1067 InterruptTime.QuadPart = KeQueryInterruptTime();
1068 DueTime = InterruptTime.QuadPart - Interval.QuadPart;
1069 Timer->DueTime.QuadPart = DueTime;
1070
1071 /* Calculate the timer handle */
1072 *Hand = KiComputeTimerTableIndex(DueTime);
1073 Timer->Header.Hand = (UCHAR)*Hand;
1074 }
1075
1076 #define KxDelayThreadWait() \
1077 \
1078 /* Setup the Wait Block */ \
1079 Thread->WaitBlockList = TimerBlock; \
1080 \
1081 /* Setup the timer */ \
1082 KxSetTimerForThreadWait(Timer, *Interval, &Hand); \
1083 \
1084 /* Save the due time for the caller */ \
1085 DueTime.QuadPart = Timer->DueTime.QuadPart; \
1086 \
1087 /* Link the timer to this Wait Block */ \
1088 TimerBlock->NextWaitBlock = TimerBlock; \
1089 Timer->Header.WaitListHead.Flink = &TimerBlock->WaitListEntry; \
1090 Timer->Header.WaitListHead.Blink = &TimerBlock->WaitListEntry; \
1091 \
1092 /* Clear wait status */ \
1093 Thread->WaitStatus = STATUS_SUCCESS; \
1094 \
1095 /* Setup wait fields */ \
1096 Thread->Alertable = Alertable; \
1097 Thread->WaitReason = DelayExecution; \
1098 Thread->WaitMode = WaitMode; \
1099 \
1100 /* Check if we can swap the thread's stack */ \
1101 Thread->WaitListEntry.Flink = NULL; \
1102 Swappable = KiCheckThreadStackSwap(Thread, WaitMode); \
1103 \
1104 /* Set the wait time */ \
1105 Thread->WaitTime = KeTickCount.LowPart;
1106
1107 #define KxMultiThreadWait() \
1108 /* Link wait block array to the thread */ \
1109 Thread->WaitBlockList = WaitBlockArray; \
1110 \
1111 /* Reset the index */ \
1112 Index = 0; \
1113 \
1114 /* Loop wait blocks */ \
1115 do \
1116 { \
1117 /* Fill out the wait block */ \
1118 WaitBlock = &WaitBlockArray[Index]; \
1119 WaitBlock->Object = Object[Index]; \
1120 WaitBlock->WaitKey = (USHORT)Index; \
1121 WaitBlock->WaitType = WaitType; \
1122 WaitBlock->Thread = Thread; \
1123 \
1124 /* Link to next block */ \
1125 WaitBlock->NextWaitBlock = &WaitBlockArray[Index + 1]; \
1126 Index++; \
1127 } while (Index < Count); \
1128 \
1129 /* Link the last block */ \
1130 WaitBlock->NextWaitBlock = WaitBlockArray; \
1131 \
1132 /* Set default wait status */ \
1133 Thread->WaitStatus = STATUS_WAIT_0; \
1134 \
1135 /* Check if we have a timer */ \
1136 if (Timeout) \
1137 { \
1138 /* Link to the block */ \
1139 TimerBlock->NextWaitBlock = WaitBlockArray; \
1140 \
1141 /* Setup the timer */ \
1142 KxSetTimerForThreadWait(Timer, *Timeout, &Hand); \
1143 \
1144 /* Save the due time for the caller */ \
1145 DueTime.QuadPart = Timer->DueTime.QuadPart; \
1146 \
1147 /* Initialize the list */ \
1148 InitializeListHead(&Timer->Header.WaitListHead); \
1149 } \
1150 \
1151 /* Set wait settings */ \
1152 Thread->Alertable = Alertable; \
1153 Thread->WaitMode = WaitMode; \
1154 Thread->WaitReason = WaitReason; \
1155 \
1156 /* Check if we can swap the thread's stack */ \
1157 Thread->WaitListEntry.Flink = NULL; \
1158 Swappable = KiCheckThreadStackSwap(Thread, WaitMode); \
1159 \
1160 /* Set the wait time */ \
1161 Thread->WaitTime = KeTickCount.LowPart;
1162
1163 #define KxSingleThreadWait() \
1164 /* Setup the Wait Block */ \
1165 Thread->WaitBlockList = WaitBlock; \
1166 WaitBlock->WaitKey = STATUS_SUCCESS; \
1167 WaitBlock->Object = Object; \
1168 WaitBlock->WaitType = WaitAny; \
1169 \
1170 /* Clear wait status */ \
1171 Thread->WaitStatus = STATUS_SUCCESS; \
1172 \
1173 /* Check if we have a timer */ \
1174 if (Timeout) \
1175 { \
1176 /* Setup the timer */ \
1177 KxSetTimerForThreadWait(Timer, *Timeout, &Hand); \
1178 \
1179 /* Save the due time for the caller */ \
1180 DueTime.QuadPart = Timer->DueTime.QuadPart; \
1181 \
1182 /* Pointer to timer block */ \
1183 WaitBlock->NextWaitBlock = TimerBlock; \
1184 TimerBlock->NextWaitBlock = WaitBlock; \
1185 \
1186 /* Link the timer to this Wait Block */ \
1187 Timer->Header.WaitListHead.Flink = &TimerBlock->WaitListEntry; \
1188 Timer->Header.WaitListHead.Blink = &TimerBlock->WaitListEntry; \
1189 } \
1190 else \
1191 { \
1192 /* No timer block, just ourselves */ \
1193 WaitBlock->NextWaitBlock = WaitBlock; \
1194 } \
1195 \
1196 /* Set wait settings */ \
1197 Thread->Alertable = Alertable; \
1198 Thread->WaitMode = WaitMode; \
1199 Thread->WaitReason = WaitReason; \
1200 \
1201 /* Check if we can swap the thread's stack */ \
1202 Thread->WaitListEntry.Flink = NULL; \
1203 Swappable = KiCheckThreadStackSwap(Thread, WaitMode); \
1204 \
1205 /* Set the wait time */ \
1206 Thread->WaitTime = KeTickCount.LowPart;
1207
1208 #define KxQueueThreadWait() \
1209 /* Setup the Wait Block */ \
1210 Thread->WaitBlockList = WaitBlock; \
1211 WaitBlock->WaitKey = STATUS_SUCCESS; \
1212 WaitBlock->Object = Queue; \
1213 WaitBlock->WaitType = WaitAny; \
1214 WaitBlock->Thread = Thread; \
1215 \
1216 /* Clear wait status */ \
1217 Thread->WaitStatus = STATUS_SUCCESS; \
1218 \
1219 /* Check if we have a timer */ \
1220 if (Timeout) \
1221 { \
1222 /* Setup the timer */ \
1223 KxSetTimerForThreadWait(Timer, *Timeout, &Hand); \
1224 \
1225 /* Save the due time for the caller */ \
1226 DueTime.QuadPart = Timer->DueTime.QuadPart; \
1227 \
1228 /* Pointer to timer block */ \
1229 WaitBlock->NextWaitBlock = TimerBlock; \
1230 TimerBlock->NextWaitBlock = WaitBlock; \
1231 \
1232 /* Link the timer to this Wait Block */ \
1233 Timer->Header.WaitListHead.Flink = &TimerBlock->WaitListEntry; \
1234 Timer->Header.WaitListHead.Blink = &TimerBlock->WaitListEntry; \
1235 } \
1236 else \
1237 { \
1238 /* No timer block, just ourselves */ \
1239 WaitBlock->NextWaitBlock = WaitBlock; \
1240 } \
1241 \
1242 /* Set wait settings */ \
1243 Thread->Alertable = FALSE; \
1244 Thread->WaitMode = WaitMode; \
1245 Thread->WaitReason = WrQueue; \
1246 \
1247 /* Check if we can swap the thread's stack */ \
1248 Thread->WaitListEntry.Flink = NULL; \
1249 Swappable = KiCheckThreadStackSwap(Thread, WaitMode); \
1250 \
1251 /* Set the wait time */ \
1252 Thread->WaitTime = KeTickCount.LowPart;
1253
1254 //
1255 // Unwaits a Thread
1256 //
1257 FORCEINLINE
1258 VOID
KxUnwaitThread(IN DISPATCHER_HEADER * Object,IN KPRIORITY Increment)1259 KxUnwaitThread(IN DISPATCHER_HEADER *Object,
1260 IN KPRIORITY Increment)
1261 {
1262 PLIST_ENTRY WaitEntry, WaitList;
1263 PKWAIT_BLOCK WaitBlock;
1264 PKTHREAD WaitThread;
1265 ULONG WaitKey;
1266
1267 /* Loop the Wait Entries */
1268 WaitList = &Object->WaitListHead;
1269 ASSERT(IsListEmpty(&Object->WaitListHead) == FALSE);
1270 WaitEntry = WaitList->Flink;
1271 do
1272 {
1273 /* Get the current wait block */
1274 WaitBlock = CONTAINING_RECORD(WaitEntry, KWAIT_BLOCK, WaitListEntry);
1275
1276 /* Get the waiting thread */
1277 WaitThread = WaitBlock->Thread;
1278
1279 /* Check the current Wait Mode */
1280 if (WaitBlock->WaitType == WaitAny)
1281 {
1282 /* Use the actual wait key */
1283 WaitKey = WaitBlock->WaitKey;
1284 }
1285 else
1286 {
1287 /* Otherwise, use STATUS_KERNEL_APC */
1288 WaitKey = STATUS_KERNEL_APC;
1289 }
1290
1291 /* Unwait the thread */
1292 KiUnwaitThread(WaitThread, WaitKey, Increment);
1293
1294 /* Next entry */
1295 WaitEntry = WaitList->Flink;
1296 } while (WaitEntry != WaitList);
1297 }
1298
1299 //
1300 // Unwaits a Thread waiting on an event
1301 //
1302 FORCEINLINE
1303 VOID
KxUnwaitThreadForEvent(IN PKEVENT Event,IN KPRIORITY Increment)1304 KxUnwaitThreadForEvent(IN PKEVENT Event,
1305 IN KPRIORITY Increment)
1306 {
1307 PLIST_ENTRY WaitEntry, WaitList;
1308 PKWAIT_BLOCK WaitBlock;
1309 PKTHREAD WaitThread;
1310
1311 /* Loop the Wait Entries */
1312 WaitList = &Event->Header.WaitListHead;
1313 ASSERT(IsListEmpty(&Event->Header.WaitListHead) == FALSE);
1314 WaitEntry = WaitList->Flink;
1315 do
1316 {
1317 /* Get the current wait block */
1318 WaitBlock = CONTAINING_RECORD(WaitEntry, KWAIT_BLOCK, WaitListEntry);
1319
1320 /* Get the waiting thread */
1321 WaitThread = WaitBlock->Thread;
1322
1323 /* Check the current Wait Mode */
1324 if (WaitBlock->WaitType == WaitAny)
1325 {
1326 /* Un-signal it */
1327 Event->Header.SignalState = 0;
1328
1329 /* Un-signal the event and unwait the thread */
1330 KiUnwaitThread(WaitThread, WaitBlock->WaitKey, Increment);
1331 break;
1332 }
1333
1334 /* Unwait the thread with STATUS_KERNEL_APC */
1335 KiUnwaitThread(WaitThread, STATUS_KERNEL_APC, Increment);
1336
1337 /* Next entry */
1338 WaitEntry = WaitList->Flink;
1339 } while (WaitEntry != WaitList);
1340 }
1341
1342 //
1343 // This routine queues a thread that is ready on the PRCB's ready lists.
1344 // If this thread cannot currently run on this CPU, then the thread is
1345 // added to the deferred ready list instead.
1346 //
1347 // This routine must be entered with the PRCB lock held and it will exit
1348 // with the PRCB lock released!
1349 //
1350 _Requires_lock_held_(Prcb->PrcbLock)
1351 _Releases_lock_(Prcb->PrcbLock)
1352 FORCEINLINE
1353 VOID
KxQueueReadyThread(IN PKTHREAD Thread,IN PKPRCB Prcb)1354 KxQueueReadyThread(IN PKTHREAD Thread,
1355 IN PKPRCB Prcb)
1356 {
1357 BOOLEAN Preempted;
1358 KPRIORITY Priority;
1359
1360 /* Sanity checks */
1361 ASSERT(Prcb == KeGetCurrentPrcb());
1362 #ifdef CONFIG_SMP
1363 ASSERT(Prcb->PrcbLock != 0);
1364 #endif
1365 ASSERT(Thread->State == Running);
1366 ASSERT(Thread->NextProcessor == Prcb->Number);
1367
1368 /* Check if this thread is allowed to run in this CPU */
1369 #ifdef CONFIG_SMP
1370 if ((Thread->Affinity) & (Prcb->SetMember))
1371 #else
1372 if (TRUE)
1373 #endif
1374 {
1375 /* Set thread ready for execution */
1376 Thread->State = Ready;
1377
1378 /* Save current priority and if someone had pre-empted it */
1379 Priority = Thread->Priority;
1380 Preempted = Thread->Preempted;
1381
1382 /* We're not pre-empting now, and set the wait time */
1383 Thread->Preempted = FALSE;
1384 Thread->WaitTime = KeTickCount.LowPart;
1385
1386 /* Sanity check */
1387 ASSERT((Priority >= 0) && (Priority <= HIGH_PRIORITY));
1388
1389 /* Insert this thread in the appropriate order */
1390 Preempted ? InsertHeadList(&Prcb->DispatcherReadyListHead[Priority],
1391 &Thread->WaitListEntry) :
1392 InsertTailList(&Prcb->DispatcherReadyListHead[Priority],
1393 &Thread->WaitListEntry);
1394
1395 /* Update the ready summary */
1396 Prcb->ReadySummary |= PRIORITY_MASK(Priority);
1397
1398 /* Sanity check */
1399 ASSERT(Priority == Thread->Priority);
1400
1401 /* Release the PRCB lock */
1402 KiReleasePrcbLock(Prcb);
1403 }
1404 else
1405 {
1406 /* Otherwise, prepare this thread to be deferred */
1407 Thread->State = DeferredReady;
1408 Thread->DeferredProcessor = Prcb->Number;
1409
1410 /* Release the lock and defer scheduling */
1411 KiReleasePrcbLock(Prcb);
1412 KiDeferredReadyThread(Thread);
1413 }
1414 }
1415
1416 //
1417 // This routine scans for an appropriate ready thread to select at the
1418 // given priority and for the given CPU.
1419 //
1420 FORCEINLINE
1421 PKTHREAD
KiSelectReadyThread(IN KPRIORITY Priority,IN PKPRCB Prcb)1422 KiSelectReadyThread(IN KPRIORITY Priority,
1423 IN PKPRCB Prcb)
1424 {
1425 ULONG PrioritySet;
1426 LONG HighPriority;
1427 PLIST_ENTRY ListEntry;
1428 PKTHREAD Thread = NULL;
1429
1430 /* Save the current mask and get the priority set for the CPU */
1431 PrioritySet = Prcb->ReadySummary >> Priority;
1432 if (!PrioritySet) goto Quickie;
1433
1434 /* Get the highest priority possible */
1435 BitScanReverse((PULONG)&HighPriority, PrioritySet);
1436 ASSERT((PrioritySet & PRIORITY_MASK(HighPriority)) != 0);
1437 HighPriority += Priority;
1438
1439 /* Make sure the list isn't empty at the highest priority */
1440 ASSERT(IsListEmpty(&Prcb->DispatcherReadyListHead[HighPriority]) == FALSE);
1441
1442 /* Get the first thread on the list */
1443 ListEntry = Prcb->DispatcherReadyListHead[HighPriority].Flink;
1444 Thread = CONTAINING_RECORD(ListEntry, KTHREAD, WaitListEntry);
1445
1446 /* Make sure this thread is here for a reason */
1447 ASSERT(HighPriority == Thread->Priority);
1448 ASSERT(Thread->Affinity & AFFINITY_MASK(Prcb->Number));
1449 ASSERT(Thread->NextProcessor == Prcb->Number);
1450
1451 /* Remove it from the list */
1452 if (RemoveEntryList(&Thread->WaitListEntry))
1453 {
1454 /* The list is empty now, reset the ready summary */
1455 Prcb->ReadySummary ^= PRIORITY_MASK(HighPriority);
1456 }
1457
1458 /* Sanity check and return the thread */
1459 Quickie:
1460 ASSERT((Thread == NULL) ||
1461 (Thread->BasePriority == 0) ||
1462 (Thread->Priority != 0));
1463 return Thread;
1464 }
1465
1466 //
1467 // This routine computes the new priority for a thread. It is only valid for
1468 // threads with priorities in the dynamic priority range.
1469 //
1470 FORCEINLINE
1471 SCHAR
KiComputeNewPriority(IN PKTHREAD Thread,IN SCHAR Adjustment)1472 KiComputeNewPriority(IN PKTHREAD Thread,
1473 IN SCHAR Adjustment)
1474 {
1475 SCHAR Priority;
1476
1477 /* Priority sanity checks */
1478 ASSERT((Thread->PriorityDecrement >= 0) &&
1479 (Thread->PriorityDecrement <= Thread->Priority));
1480 ASSERT((Thread->Priority < LOW_REALTIME_PRIORITY) ?
1481 TRUE : (Thread->PriorityDecrement == 0));
1482
1483 /* Get the current priority */
1484 Priority = Thread->Priority;
1485 if (Priority < LOW_REALTIME_PRIORITY)
1486 {
1487 /* Decrease priority by the priority decrement */
1488 Priority -= (Thread->PriorityDecrement + Adjustment);
1489
1490 /* Don't go out of bounds */
1491 if (Priority < Thread->BasePriority) Priority = Thread->BasePriority;
1492
1493 /* Reset the priority decrement */
1494 Thread->PriorityDecrement = 0;
1495 }
1496
1497 /* Sanity check */
1498 ASSERT((Thread->BasePriority == 0) || (Priority != 0));
1499
1500 /* Return the new priority */
1501 return Priority;
1502 }
1503
1504 //
1505 // Guarded Mutex Routines
1506 //
1507 FORCEINLINE
1508 VOID
_KeInitializeGuardedMutex(OUT PKGUARDED_MUTEX GuardedMutex)1509 _KeInitializeGuardedMutex(OUT PKGUARDED_MUTEX GuardedMutex)
1510 {
1511 /* Setup the Initial Data */
1512 GuardedMutex->Count = GM_LOCK_BIT;
1513 GuardedMutex->Owner = NULL;
1514 GuardedMutex->Contention = 0;
1515
1516 /* Initialize the Wait Gate */
1517 KeInitializeGate(&GuardedMutex->Gate);
1518 }
1519
1520 FORCEINLINE
1521 VOID
_KeAcquireGuardedMutexUnsafe(IN OUT PKGUARDED_MUTEX GuardedMutex)1522 _KeAcquireGuardedMutexUnsafe(IN OUT PKGUARDED_MUTEX GuardedMutex)
1523 {
1524 PKTHREAD Thread = KeGetCurrentThread();
1525
1526 /* Sanity checks */
1527 ASSERT((KeGetCurrentIrql() == APC_LEVEL) ||
1528 (Thread->SpecialApcDisable < 0) ||
1529 (Thread->Teb == NULL) ||
1530 (Thread->Teb >= (PTEB)MM_SYSTEM_RANGE_START));
1531 ASSERT(GuardedMutex->Owner != Thread);
1532
1533 /* Remove the lock */
1534 if (!InterlockedBitTestAndReset(&GuardedMutex->Count, GM_LOCK_BIT_V))
1535 {
1536 /* The Guarded Mutex was already locked, enter contented case */
1537 KiAcquireGuardedMutex(GuardedMutex);
1538 }
1539
1540 /* Set the Owner */
1541 GuardedMutex->Owner = Thread;
1542 }
1543
1544 FORCEINLINE
1545 VOID
_KeReleaseGuardedMutexUnsafe(IN OUT PKGUARDED_MUTEX GuardedMutex)1546 _KeReleaseGuardedMutexUnsafe(IN OUT PKGUARDED_MUTEX GuardedMutex)
1547 {
1548 LONG OldValue, NewValue;
1549
1550 /* Sanity checks */
1551 ASSERT((KeGetCurrentIrql() == APC_LEVEL) ||
1552 (KeGetCurrentThread()->SpecialApcDisable < 0) ||
1553 (KeGetCurrentThread()->Teb == NULL) ||
1554 (KeGetCurrentThread()->Teb >= (PTEB)MM_SYSTEM_RANGE_START));
1555 ASSERT(GuardedMutex->Owner == KeGetCurrentThread());
1556
1557 /* Destroy the Owner */
1558 GuardedMutex->Owner = NULL;
1559
1560 /* Add the Lock Bit */
1561 OldValue = InterlockedExchangeAdd(&GuardedMutex->Count, GM_LOCK_BIT);
1562 ASSERT((OldValue & GM_LOCK_BIT) == 0);
1563
1564 /* Check if it was already locked, but not woken */
1565 if ((OldValue) && !(OldValue & GM_LOCK_WAITER_WOKEN))
1566 {
1567 /* Update the Oldvalue to what it should be now */
1568 OldValue += GM_LOCK_BIT;
1569
1570 /* The mutex will be woken, minus one waiter */
1571 NewValue = OldValue + GM_LOCK_WAITER_WOKEN -
1572 GM_LOCK_WAITER_INC;
1573
1574 /* Remove the Woken bit */
1575 if (InterlockedCompareExchange(&GuardedMutex->Count,
1576 NewValue,
1577 OldValue) == OldValue)
1578 {
1579 /* Signal the Gate */
1580 KeSignalGateBoostPriority(&GuardedMutex->Gate);
1581 }
1582 }
1583 }
1584
1585 FORCEINLINE
1586 VOID
_KeAcquireGuardedMutex(IN PKGUARDED_MUTEX GuardedMutex)1587 _KeAcquireGuardedMutex(IN PKGUARDED_MUTEX GuardedMutex)
1588 {
1589 PKTHREAD Thread = KeGetCurrentThread();
1590
1591 /* Sanity checks */
1592 ASSERT(KeGetCurrentIrql() <= APC_LEVEL);
1593 ASSERT(GuardedMutex->Owner != Thread);
1594
1595 /* Disable Special APCs */
1596 KeEnterGuardedRegionThread(Thread);
1597
1598 /* Remove the lock */
1599 if (!InterlockedBitTestAndReset(&GuardedMutex->Count, GM_LOCK_BIT_V))
1600 {
1601 /* The Guarded Mutex was already locked, enter contented case */
1602 KiAcquireGuardedMutex(GuardedMutex);
1603 }
1604
1605 /* Set the Owner and Special APC Disable state */
1606 GuardedMutex->Owner = Thread;
1607 GuardedMutex->SpecialApcDisable = Thread->SpecialApcDisable;
1608 }
1609
1610 FORCEINLINE
1611 VOID
_KeReleaseGuardedMutex(IN OUT PKGUARDED_MUTEX GuardedMutex)1612 _KeReleaseGuardedMutex(IN OUT PKGUARDED_MUTEX GuardedMutex)
1613 {
1614 PKTHREAD Thread = KeGetCurrentThread();
1615 LONG OldValue, NewValue;
1616
1617 /* Sanity checks */
1618 ASSERT(KeGetCurrentIrql() <= APC_LEVEL);
1619 ASSERT(GuardedMutex->Owner == Thread);
1620 ASSERT(Thread->SpecialApcDisable == GuardedMutex->SpecialApcDisable);
1621
1622 /* Destroy the Owner */
1623 GuardedMutex->Owner = NULL;
1624
1625 /* Add the Lock Bit */
1626 OldValue = InterlockedExchangeAdd(&GuardedMutex->Count, GM_LOCK_BIT);
1627 ASSERT((OldValue & GM_LOCK_BIT) == 0);
1628
1629 /* Check if it was already locked, but not woken */
1630 if ((OldValue) && !(OldValue & GM_LOCK_WAITER_WOKEN))
1631 {
1632 /* Update the Oldvalue to what it should be now */
1633 OldValue += GM_LOCK_BIT;
1634
1635 /* The mutex will be woken, minus one waiter */
1636 NewValue = OldValue + GM_LOCK_WAITER_WOKEN -
1637 GM_LOCK_WAITER_INC;
1638
1639 /* Remove the Woken bit */
1640 if (InterlockedCompareExchange(&GuardedMutex->Count,
1641 NewValue,
1642 OldValue) == OldValue)
1643 {
1644 /* Signal the Gate */
1645 KeSignalGateBoostPriority(&GuardedMutex->Gate);
1646 }
1647 }
1648
1649 /* Re-enable APCs */
1650 KeLeaveGuardedRegionThread(Thread);
1651 }
1652
1653 FORCEINLINE
1654 BOOLEAN
_KeTryToAcquireGuardedMutex(IN OUT PKGUARDED_MUTEX GuardedMutex)1655 _KeTryToAcquireGuardedMutex(IN OUT PKGUARDED_MUTEX GuardedMutex)
1656 {
1657 PKTHREAD Thread = KeGetCurrentThread();
1658
1659 /* Block APCs */
1660 KeEnterGuardedRegionThread(Thread);
1661
1662 /* Remove the lock */
1663 if (!InterlockedBitTestAndReset(&GuardedMutex->Count, GM_LOCK_BIT_V))
1664 {
1665 /* Re-enable APCs */
1666 KeLeaveGuardedRegionThread(Thread);
1667 YieldProcessor();
1668
1669 /* Return failure */
1670 return FALSE;
1671 }
1672
1673 /* Set the Owner and APC State */
1674 GuardedMutex->Owner = Thread;
1675 GuardedMutex->SpecialApcDisable = Thread->SpecialApcDisable;
1676 return TRUE;
1677 }
1678
1679
1680 FORCEINLINE
1681 VOID
KiAcquireNmiListLock(OUT PKIRQL OldIrql)1682 KiAcquireNmiListLock(OUT PKIRQL OldIrql)
1683 {
1684 KeAcquireSpinLock(&KiNmiCallbackListLock, OldIrql);
1685 }
1686
1687 FORCEINLINE
1688 VOID
KiReleaseNmiListLock(IN KIRQL OldIrql)1689 KiReleaseNmiListLock(IN KIRQL OldIrql)
1690 {
1691 KeReleaseSpinLock(&KiNmiCallbackListLock, OldIrql);
1692 }
1693
1694 #if defined(_M_IX86) || defined(_M_AMD64)
1695 FORCEINLINE
1696 VOID
KiCpuId(PCPU_INFO CpuInfo,ULONG Function)1697 KiCpuId(
1698 PCPU_INFO CpuInfo,
1699 ULONG Function)
1700 {
1701 __cpuid((INT*)CpuInfo->AsUINT32, Function);
1702 }
1703
1704 FORCEINLINE
1705 VOID
KiCpuIdEx(PCPU_INFO CpuInfo,ULONG Function,ULONG SubFunction)1706 KiCpuIdEx(
1707 PCPU_INFO CpuInfo,
1708 ULONG Function,
1709 ULONG SubFunction)
1710 {
1711 __cpuidex((INT*)CpuInfo->AsUINT32, Function, SubFunction);
1712 }
1713 #endif /* _M_IX86 || _M_AMD64 */
1714
1715 #ifdef __cplusplus
1716 } // extern "C"
1717 #endif
1718