1 /* $NetBSD: frameasm.h,v 1.15 2011/07/26 12:57:35 yamt Exp $ */ 2 3 #ifndef _I386_FRAMEASM_H_ 4 #define _I386_FRAMEASM_H_ 5 6 #ifdef _KERNEL_OPT 7 #include "opt_multiprocessor.h" 8 #include "opt_xen.h" 9 #endif 10 11 #if !defined(XEN) 12 #define CLI(reg) cli 13 #define STI(reg) sti 14 #else 15 /* XXX assym.h */ 16 #define TRAP_INSTR int $0x82 17 #define XEN_BLOCK_EVENTS(reg) movb $1,EVTCHN_UPCALL_MASK(reg) 18 #define XEN_UNBLOCK_EVENTS(reg) movb $0,EVTCHN_UPCALL_MASK(reg) 19 #define XEN_TEST_PENDING(reg) testb $0xFF,EVTCHN_UPCALL_PENDING(reg) 20 21 #define CLI(reg) movl CPUVAR(VCPU),reg ; \ 22 XEN_BLOCK_EVENTS(reg) 23 #define STI(reg) movl CPUVAR(VCPU),reg ; \ 24 XEN_UNBLOCK_EVENTS(reg) 25 #define STIC(reg) movl CPUVAR(VCPU),reg ; \ 26 XEN_UNBLOCK_EVENTS(reg) ; \ 27 testb $0xff,EVTCHN_UPCALL_PENDING(reg) 28 #endif 29 30 #ifndef TRAPLOG 31 #define TLOG /**/ 32 #else 33 /* 34 * Fill in trap record 35 */ 36 #define TLOG \ 37 9: \ 38 movl %fs:CPU_TLOG_OFFSET, %eax; \ 39 movl %fs:CPU_TLOG_BASE, %ebx; \ 40 addl $SIZEOF_TREC,%eax; \ 41 andl $SIZEOF_TLOG-1,%eax; \ 42 addl %eax,%ebx; \ 43 movl %eax,%fs:CPU_TLOG_OFFSET; \ 44 movl %esp,TREC_SP(%ebx); \ 45 movl $9b,TREC_HPC(%ebx); \ 46 movl TF_EIP(%esp),%eax; \ 47 movl %eax,TREC_IPC(%ebx); \ 48 rdtsc ; \ 49 movl %eax,TREC_TSC(%ebx); \ 50 movl $MSR_LASTBRANCHFROMIP,%ecx; \ 51 rdmsr ; \ 52 movl %eax,TREC_LBF(%ebx); \ 53 incl %ecx ; \ 54 rdmsr ; \ 55 movl %eax,TREC_LBT(%ebx); \ 56 incl %ecx ; \ 57 rdmsr ; \ 58 movl %eax,TREC_IBF(%ebx); \ 59 incl %ecx ; \ 60 rdmsr ; \ 61 movl %eax,TREC_IBT(%ebx) 62 #endif 63 64 /* 65 * These are used on interrupt or trap entry or exit. 66 */ 67 #define INTRENTRY \ 68 subl $TF_PUSHSIZE,%esp ; \ 69 movw %gs,TF_GS(%esp) ; \ 70 movw %fs,TF_FS(%esp) ; \ 71 movl %eax,TF_EAX(%esp) ; \ 72 movw %es,TF_ES(%esp) ; \ 73 movw %ds,TF_DS(%esp) ; \ 74 movl $GSEL(GDATA_SEL, SEL_KPL),%eax ; \ 75 movl %edi,TF_EDI(%esp) ; \ 76 movl %esi,TF_ESI(%esp) ; \ 77 movw %ax,%ds ; \ 78 movl %ebp,TF_EBP(%esp) ; \ 79 movw %ax,%es ; \ 80 movl %ebx,TF_EBX(%esp) ; \ 81 movw %ax,%gs ; \ 82 movl %edx,TF_EDX(%esp) ; \ 83 movl $GSEL(GCPU_SEL, SEL_KPL),%eax ; \ 84 movl %ecx,TF_ECX(%esp) ; \ 85 movl %eax,%fs ; \ 86 cld ; \ 87 TLOG 88 89 /* 90 * INTRFASTEXIT should be in sync with trap(), resume_iret and friends. 91 */ 92 #define INTRFASTEXIT \ 93 movw TF_GS(%esp),%gs ; \ 94 movw TF_FS(%esp),%fs ; \ 95 movw TF_ES(%esp),%es ; \ 96 movw TF_DS(%esp),%ds ; \ 97 movl TF_EDI(%esp),%edi ; \ 98 movl TF_ESI(%esp),%esi ; \ 99 movl TF_EBP(%esp),%ebp ; \ 100 movl TF_EBX(%esp),%ebx ; \ 101 movl TF_EDX(%esp),%edx ; \ 102 movl TF_ECX(%esp),%ecx ; \ 103 movl TF_EAX(%esp),%eax ; \ 104 addl $(TF_PUSHSIZE+8),%esp ; \ 105 iret 106 107 #define DO_DEFERRED_SWITCH \ 108 cmpl $0, CPUVAR(WANT_PMAPLOAD) ; \ 109 jz 1f ; \ 110 call _C_LABEL(pmap_load) ; \ 111 1: 112 113 #define DO_DEFERRED_SWITCH_RETRY \ 114 1: ; \ 115 cmpl $0, CPUVAR(WANT_PMAPLOAD) ; \ 116 jz 1f ; \ 117 call _C_LABEL(pmap_load) ; \ 118 jmp 1b ; \ 119 1: 120 121 #define CHECK_DEFERRED_SWITCH \ 122 cmpl $0, CPUVAR(WANT_PMAPLOAD) 123 124 #define CHECK_ASTPENDING(reg) movl CPUVAR(CURLWP),reg ; \ 125 cmpl $0, L_MD_ASTPENDING(reg) 126 #define CLEAR_ASTPENDING(reg) movl $0, L_MD_ASTPENDING(reg) 127 128 /* 129 * IDEPTH_INCR: 130 * increase ci_idepth and switch to the interrupt stack if necessary. 131 * note that the initial value of ci_idepth is -1. 132 * 133 * => should be called with interrupt disabled. 134 * => save the old value of %esp in %eax. 135 */ 136 137 #define IDEPTH_INCR \ 138 incl CPUVAR(IDEPTH); \ 139 movl %esp, %eax; \ 140 jne 999f; \ 141 movl CPUVAR(INTRSTACK), %esp; \ 142 999: pushl %eax; /* eax == pointer to intrframe */ \ 143 144 /* 145 * IDEPTH_DECR: 146 * decrement ci_idepth and switch back to 147 * the original stack saved by IDEPTH_INCR. 148 * 149 * => should be called with interrupt disabled. 150 */ 151 152 #define IDEPTH_DECR \ 153 popl %esp; \ 154 decl CPUVAR(IDEPTH) 155 156 #endif /* _I386_FRAMEASM_H_ */ 157