1/* 2 * COPYRIGHT: See COPYING in the top level directory 3 * PROJECT: ReactOS Kernel 4 * FILE: ntoskrnl/include/internal/i386/asmmacro.S 5 * PURPOSE: Assembly Macros for Spinlocks and common Trap Code 6 * PROGRAMMERS: Alex Ionescu (alex@relsoft.net) 7 * Timo Kreuzer (timo.kreuzer@reactos.org) 8 */ 9 10// Arguments for idt 11#define INT_32_DPL0 HEX(08E00) 12#define INT_32_DPL3 HEX(0EE00) 13 14// 15// These macros are inlined equivalents of KiAcquire/ReleaseSpinlock, that is, 16// they will not be compiled into non-SMP builds. Usage is as follows: 17// 18// .BeginYourFunction 19// mov reg, lockaddr 20// ACQUIRE_SPINLOCK(reg, .spin) 21// <thread-safe code here> 22// RELEASE_SPINLOCK(reg) 23// <misc code here> 24// retn 25// #IFDEF CONFIG_SMP 26// .spin 27// <any necessary steps to be able to jump back safely> 28// SPIN_ON_LOCK(reg, .BeginYourFunction) 29// #ENDIF 30// 31#ifdef CONFIG_SMP 32#define LOCK lock 33#define ACQUIRE_SPINLOCK(x, y) \ 34 lock bts dword ptr [x], 0; \ 35 jb y 36#define RELEASE_SPINLOCK(x) mov byte ptr [x], 0 37#define SPIN_ON_LOCK(x, y) \ 381: \ 39 test dword ptr [x], 1; \ 40 jz y; \ 41 pause; \ 42 jmp 1b 43#else 44#define LOCK 45#define ACQUIRE_SPINLOCK(x, y) 46#define RELEASE_SPINLOCK(x) 47#endif 48 49// 50// @name IDT 51// 52// This macro creates an IDT entry for the given handler 53// 54// @param Handler 55// Pointer to the IDT handler 56// 57// @param Bits 58// Descriptor Bits to associate 59// 60// @remark None. 61// 62MACRO(idt, Handler, Bits) 63 .long VAL(Handler) 64 .short VAL(Bits) 65 .short KGDT_R0_CODE 66ENDM 67 68 69#define KI_PUSH_FAKE_ERROR_CODE HEX(0001) 70#define KI_UNUSED HEX(0002) 71#define KI_NONVOLATILES_ONLY HEX(0004) 72#define KI_FAST_SYSTEM_CALL HEX(0008) 73#define KI_SOFTWARE_TRAP HEX(0010) 74#define KI_HARDWARE_INT HEX(0020) 75#define KI_DONT_SAVE_SEGS HEX(0100) 76 77MACRO(KiEnterTrap, Flags) 78 LOCAL not_v86_trap 79 LOCAL set_sane_segs 80 81 /* Check what kind of trap frame this trap requires */ 82 if (Flags AND KI_FAST_SYSTEM_CALL) 83 84 /* SYSENTER requires us to build a complete ring transition trap frame */ 85 FrameSize = KTRAP_FRAME_EIP 86 87 /* Fixup fs. cx is free to clobber */ 88 mov cx, KGDT_R0_PCR 89 mov fs, cx 90 91 /* Get pointer to the TSS */ 92 mov ecx, fs:[KPCR_TSS] 93 94 /* Get a stack pointer */ 95 mov esp, [ecx + KTSS_ESP0] 96 97 /* Set up a fake hardware trap frame */ 98 push KGDT_R3_DATA or RPL_MASK 99 push edx 100 pushfd 101 push KGDT_R3_CODE or RPL_MASK 102 push dword ptr ds:[KUSER_SHARED_SYSCALL_RET] 103 104 elseif (Flags AND KI_SOFTWARE_TRAP) 105 106 /* Software traps need a complete non-ring transition trap frame */ 107 FrameSize = KTRAP_FRAME_ESP 108 109 /* Software traps need to get their EIP from the caller's frame */ 110 pop eax 111 112 elseif (Flags AND KI_PUSH_FAKE_ERROR_CODE) 113 114 /* If the trap doesn't have an error code, we'll make space for it */ 115 FrameSize = KTRAP_FRAME_EIP 116 117 else 118 119 /* The trap already has an error code, so just make space for the rest */ 120 FrameSize = KTRAP_FRAME_ERROR_CODE 121 122 endif 123 124 /* Make space for this frame */ 125 sub esp, FrameSize 126 127 /* Save nonvolatile registers */ 128 mov [esp + KTRAP_FRAME_EBP], ebp 129 mov [esp + KTRAP_FRAME_EBX], ebx 130 mov [esp + KTRAP_FRAME_ESI], esi 131 mov [esp + KTRAP_FRAME_EDI], edi 132 133 /* Save eax for system calls, for use by the C handler */ 134 mov [esp + KTRAP_FRAME_EAX], eax 135 136 /* Does the caller want nonvolatiles only? */ 137 if (NOT (Flags AND KI_NONVOLATILES_ONLY)) 138 /* Otherwise, save the volatiles as well */ 139 mov [esp + KTRAP_FRAME_ECX], ecx 140 mov [esp + KTRAP_FRAME_EDX], edx 141 endif 142 143 /* Save segment registers? */ 144 if (Flags AND KI_DONT_SAVE_SEGS) 145 146 /* Initialize TrapFrame segment registers with sane values */ 147 mov eax, KGDT_R3_DATA OR RPL_MASK 148 mov ecx, fs 149 mov [esp + KTRAP_FRAME_DS], eax 150 mov [esp + KTRAP_FRAME_ES], eax 151 mov [esp + KTRAP_FRAME_FS], ecx 152 mov dword ptr [esp + KTRAP_FRAME_GS], 0 153 154 else 155 156 /* Check for V86 mode */ 157 test byte ptr [esp + KTRAP_FRAME_EFLAGS + 2], (EFLAGS_V86_MASK / HEX(10000)) 158 jz not_v86_trap 159 160 /* Restore V8086 segments into Protected Mode segments */ 161 mov eax, [esp + KTRAP_FRAME_V86_DS] 162 mov ecx, [esp + KTRAP_FRAME_V86_ES] 163 mov [esp + KTRAP_FRAME_DS], eax 164 mov [esp + KTRAP_FRAME_ES], ecx 165 mov eax, [esp + KTRAP_FRAME_V86_FS] 166 mov ecx, [esp + KTRAP_FRAME_V86_GS] 167 mov [esp + KTRAP_FRAME_FS], eax 168 mov [esp + KTRAP_FRAME_GS], ecx 169 jmp set_sane_segs 170 171 not_v86_trap: 172 173 /* Save segment selectors */ 174 mov eax, ds 175 mov ecx, es 176 mov [esp + KTRAP_FRAME_DS], eax 177 mov [esp + KTRAP_FRAME_ES], ecx 178 mov eax, fs 179 mov ecx, gs 180 mov [esp + KTRAP_FRAME_FS], eax 181 mov [esp + KTRAP_FRAME_GS], ecx 182 183 endif 184 185set_sane_segs: 186 /* Load correct data segments */ 187 mov ax, KGDT_R3_DATA OR RPL_MASK 188 mov ds, ax 189 mov es, ax 190 191 /* Fast system calls have fs already fixed */ 192 if (Flags AND KI_FAST_SYSTEM_CALL) 193 194 /* Enable interrupts and set a sane FS value */ 195 or dword ptr [esp + KTRAP_FRAME_EFLAGS], EFLAGS_INTERRUPT_MASK 196 mov dword ptr [esp + KTRAP_FRAME_FS], KGDT_R3_TEB or RPL_MASK 197 198 /* Set sane active EFLAGS */ 199 push 2 200 popfd 201 202 /* Point edx to the usermode parameters */ 203 add edx, 8 204 else 205 /* Otherwise fix fs now */ 206 mov ax, KGDT_R0_PCR 207 mov fs, ax 208 endif 209 210 /* Save ExceptionList, since the C handler might use SEH */ 211 mov eax, fs:[KPCR_EXCEPTION_LIST] 212 mov [esp + KTRAP_FRAME_EXCEPTION_LIST], eax 213 214#if DBG 215 /* Keep the frame chain intact */ 216 mov eax, [esp + KTRAP_FRAME_EIP] 217 mov [esp + KTRAP_FRAME_DEBUGEIP], eax 218 mov [esp + KTRAP_FRAME_DEBUGEBP], ebp 219 mov ebp, esp 220#endif 221 222 /* Set parameter 1 (ECX) to point to the frame */ 223 mov ecx, esp 224 225 /* Clear direction flag */ 226 cld 227 228ENDM 229 230MACRO(KiCallHandler, Handler) 231#if DBG 232 /* Use a call to get the return address for back traces */ 233 call Handler 234#else 235 /* Use the faster jmp */ 236 jmp Handler 237#endif 238 nop 239ENDM 240 241MACRO(TRAP_ENTRY, Trap, Flags) 242 EXTERN @&Trap&Handler@4 :PROC 243 PUBLIC _&Trap 244 .PROC _&Trap 245 /* Generate proper debugging symbols */ 246 FPO 0, 0, 0, 0, 1, FRAME_TRAP 247 248 /* Common code to create the trap frame */ 249 KiEnterTrap Flags 250 251 /* Call the C handler */ 252 KiCallHandler @&Trap&Handler@4 253 .ENDP 254ENDM 255 256#define KI_NMI HEX(0001) 257 258MACRO(TASK_ENTRY, Trap, Flags) 259 EXTERN _&Trap&Handler :PROC 260 PUBLIC _&Trap 261 .PROC _&Trap 262 /* Generate proper debugging symbols */ 263 FPO 0, 0, 0, 0, 0, FRAME_TSS 264 265 /* Call the C handler */ 266 call _&Trap&Handler 267 268 if (Flags AND KI_NMI) 269 /* Return from NMI: return with iret and handle NMI recursion */ 270 iretd 271 jmp _&Trap 272 endif 273 274 .ENDP 275ENDM 276 277#define KI_RESTORE_EAX HEX(0001) 278#define KI_RESTORE_ECX_EDX HEX(0002) 279#define KI_RESTORE_FS HEX(0004) 280#define KI_RESTORE_SEGMENTS HEX(0008) 281#define KI_RESTORE_EFLAGS HEX(0010) 282#define KI_EXIT_SYSCALL HEX(0020) 283#define KI_EXIT_JMP HEX(0040) 284#define KI_EXIT_RET HEX(0080) 285#define KI_EXIT_IRET HEX(0100) 286#define KI_EDITED_FRAME HEX(0200) 287#define KI_EXIT_RET8 HEX(0400) 288#define KI_RESTORE_VOLATILES (KI_RESTORE_EAX OR KI_RESTORE_ECX_EDX) 289 290MACRO(KiTrapExitStub, Name, Flags) 291 LOCAL ret8_instruction 292 LOCAL not_nested_int 293 294PUBLIC @&Name&@4 295@&Name&@4: 296 297 if (Flags AND KI_EXIT_RET8) OR (Flags AND KI_EXIT_IRET) 298 299 /* This is the IRET frame */ 300 OffsetEsp = KTRAP_FRAME_EIP 301 302 elseif (Flags AND KI_RESTORE_EFLAGS) 303 304 /* We will pop EFlags off the stack */ 305 OffsetEsp = KTRAP_FRAME_EFLAGS 306 307 else 308 309 OffsetEsp = 0 310 311 endif 312 313 if (Flags AND KI_EDITED_FRAME) 314 315 /* Load the requested ESP */ 316 mov esp, [ecx + KTRAP_FRAME_TEMPESP] 317 318 /* Put return address on the new stack */ 319 push [ecx + KTRAP_FRAME_EIP] 320 321 /* Put EFLAGS on the new stack */ 322 push [ecx + KTRAP_FRAME_EFLAGS] 323 324 else 325 326 /* Point esp to an appropriate member of the frame */ 327 lea esp, [ecx + OffsetEsp] 328 329 endif 330 331 /* Restore non volatiles */ 332 mov ebx, [ecx + KTRAP_FRAME_EBX] 333 mov esi, [ecx + KTRAP_FRAME_ESI] 334 mov edi, [ecx + KTRAP_FRAME_EDI] 335 mov ebp, [ecx + KTRAP_FRAME_EBP] 336 337 if (Flags AND KI_RESTORE_EAX) 338 339 /* Restore eax */ 340 mov eax, [ecx + KTRAP_FRAME_EAX] 341 342 endif 343 344 if (Flags AND KI_RESTORE_ECX_EDX) 345 346 /* Restore volatiles */ 347 mov edx, [ecx + KTRAP_FRAME_EDX] 348 mov ecx, [ecx + KTRAP_FRAME_ECX] 349 350 elseif (Flags AND KI_EXIT_JMP) 351 352 /* Load return address into edx */ 353 mov edx, [esp - OffsetEsp + KTRAP_FRAME_EIP] 354 355 elseif (Flags AND KI_EXIT_SYSCALL) 356 357 /* Set sysexit parameters */ 358 mov edx, [esp - OffsetEsp + KTRAP_FRAME_EIP] 359 mov ecx, [esp - OffsetEsp + KTRAP_FRAME_ESP] 360 361 /* Keep interrupts disabled until the sti / sysexit */ 362 and byte ptr [esp - OffsetEsp + KTRAP_FRAME_EFLAGS + 1], NOT (EFLAGS_INTERRUPT_MASK / HEX(100)) 363 364 endif 365 366 if (Flags AND KI_RESTORE_SEGMENTS) 367 368 /* Restore segments for user mode */ 369 mov ds, [esp - OffsetEsp + KTRAP_FRAME_DS] 370 mov es, [esp - OffsetEsp + KTRAP_FRAME_ES] 371 mov gs, [esp - OffsetEsp + KTRAP_FRAME_GS] 372 373 endif 374 375 if ((Flags AND KI_RESTORE_FS) OR (Flags AND KI_RESTORE_SEGMENTS)) 376 377 /* Restore user mode FS */ 378 mov fs, [esp - OffsetEsp + KTRAP_FRAME_FS] 379 380 endif 381 382 if (Flags AND KI_RESTORE_EFLAGS) 383 384 if (Flags AND KI_EXIT_RET8) 385 386 /* Check if we return from a nested interrupt, i.e. an interrupt 387 that occurred in the ret8 return path between restoring 388 EFLAGS and returning with the ret instruction. */ 389 cmp dword ptr [esp], offset ret8_instruction 390 jne not_nested_int 391 392 /* This is a nested interrupt, so we have 2 IRET frames. 393 Skip the first, and go directly to the previous return address. 394 Do not pass Go. Do not collect $200 */ 395 add esp, 12 396 397not_nested_int: 398 /* We are at the IRET frame, so push EFLAGS first */ 399 push dword ptr [esp + 8] 400 401 endif 402 403 /* Restore EFLAGS */ 404 popfd 405 406 endif 407 408 if (Flags AND KI_EXIT_SYSCALL) 409 410 /* Enable interrupts and return to user mode. 411 Both must follow directly after another to be "atomic". */ 412 sti 413 sysexit 414 415 elseif (Flags AND KI_EXIT_IRET) 416 417 /* Return with iret */ 418 iretd 419 420 elseif (Flags AND KI_EXIT_JMP) 421 422 /* Return to kernel mode with a jmp */ 423 jmp edx 424 425 elseif (Flags AND KI_EXIT_RET8) 426 427 /* Return to kernel mode with a ret 8 */ 428ret8_instruction: 429 ret 8 430 431 elseif (Flags AND KI_EXIT_RET) 432 433 /* Return to kernel mode with a ret */ 434 ret 435 436 endif 437 438ENDM 439 440