1 /////////////////////////////////////////////////////////////////////////
2 // $Id: event.cc 14233 2021-04-27 08:22:04Z sshwarts $
3 /////////////////////////////////////////////////////////////////////////
4 //
5 //   Copyright (c) 2011-2013 Stanislav Shwartsman
6 //          Written by Stanislav Shwartsman [sshwarts at sourceforge net]
7 //
8 //  This library is free software; you can redistribute it and/or
9 //  modify it under the terms of the GNU Lesser General Public
10 //  License as published by the Free Software Foundation; either
11 //  version 2 of the License, or (at your option) any later version.
12 //
13 //  This library is distributed in the hope that it will be useful,
14 //  but WITHOUT ANY WARRANTY; without even the implied warranty of
15 //  MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
16 //  Lesser General Public License for more details.
17 //
18 //  You should have received a copy of the GNU Lesser General Public
19 //  License along with this library; if not, write to the Free Software
20 //  Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA B 02110-1301 USA
21 /////////////////////////////////////////////////////////////////////////
22 
23 #define NEED_CPU_REG_SHORTCUTS 1
24 #include "bochs.h"
25 #include "cpu.h"
26 #define LOG_THIS BX_CPU_THIS_PTR
27 
28 #include "iodev/iodev.h"
29 
handleWaitForEvent(void)30 bool BX_CPU_C::handleWaitForEvent(void)
31 {
32   if (BX_CPU_THIS_PTR activity_state == BX_ACTIVITY_STATE_WAIT_FOR_SIPI) {
33     // HALT condition remains, return so other CPUs have a chance
34 #if BX_DEBUGGER
35     BX_CPU_THIS_PTR stop_reason = STOP_CPU_HALTED;
36 #endif
37     return 1; // Return to caller of cpu_loop.
38   }
39 
40   // For one processor, pass the time as quickly as possible until
41   // an interrupt wakes up the CPU.
42   while (1)
43   {
44     if ((is_pending(BX_EVENT_PENDING_INTR | BX_EVENT_PENDING_LAPIC_INTR) && (BX_CPU_THIS_PTR get_IF() || BX_CPU_THIS_PTR activity_state == BX_ACTIVITY_STATE_MWAIT_IF)) ||
45          is_unmasked_event_pending(BX_EVENT_NMI | BX_EVENT_SMI | BX_EVENT_INIT |
46             BX_EVENT_VMX_VTPR_UPDATE |
47             BX_EVENT_VMX_VEOI_UPDATE |
48             BX_EVENT_VMX_VIRTUAL_APIC_WRITE |
49             BX_EVENT_VMX_MONITOR_TRAP_FLAG |
50             BX_EVENT_VMX_VIRTUAL_NMI))
51     {
52       // interrupt ends the HALT condition
53 #if BX_SUPPORT_MONITOR_MWAIT
54       if (BX_CPU_THIS_PTR activity_state >= BX_ACTIVITY_STATE_MWAIT)
55         BX_CPU_THIS_PTR monitor.reset_monitor();
56 #endif
57       BX_CPU_THIS_PTR activity_state = BX_ACTIVITY_STATE_ACTIVE;
58       BX_CPU_THIS_PTR inhibit_mask = 0; // clear inhibits for after resume
59       break;
60     }
61 
62     if (is_unmasked_event_pending(BX_EVENT_VMX_PREEMPTION_TIMER_EXPIRED)) {
63       // Exit from waiting loop and proceed to VMEXIT
64       break;
65     }
66 
67     if (BX_CPU_THIS_PTR activity_state == BX_ACTIVITY_STATE_ACTIVE) {
68       // happens also when MWAIT monitor was hit
69 //    BX_INFO(("handleWaitForEvent: reset detected in HLT state"));
70       break;
71     }
72 
73     if (BX_HRQ && BX_DBG_ASYNC_DMA) {
74       // handle DMA also when CPU is halted
75       DEV_dma_raise_hlda();
76     }
77 
78     // for multiprocessor simulation, even if this CPU is halted we still
79     // must give the others a chance to simulate.  If an interrupt has
80     // arrived, then clear the HALT condition; otherwise just return from
81     // the CPU loop with stop_reason STOP_CPU_HALTED.
82 #if BX_SUPPORT_SMP
83     if (BX_SMP_PROCESSORS > 1) {
84       // HALT condition remains, return so other CPUs have a chance
85 #if BX_DEBUGGER
86       BX_CPU_THIS_PTR stop_reason = STOP_CPU_HALTED;
87 #endif
88       return 1; // Return to caller of cpu_loop.
89     }
90 #endif
91 
92 #if BX_DEBUGGER
93     if (bx_guard.interrupt_requested)
94       return 1; // Return to caller of cpu_loop.
95 #endif
96 
97     if (bx_pc_system.kill_bochs_request) {
98       // setting kill_bochs_request causes the cpu loop to return ASAP.
99       return 1; // Return to caller of cpu_loop.
100     }
101 
102     BX_TICKN(10); // when in HLT run time faster for single CPU
103   }
104 
105   return 0;
106 }
107 
InterruptAcknowledge(void)108 void BX_CPU_C::InterruptAcknowledge(void)
109 {
110   Bit8u vector;
111 
112 #if BX_SUPPORT_SVM
113   if (BX_CPU_THIS_PTR in_svm_guest) {
114     if (SVM_INTERCEPT(SVM_INTERCEPT0_INTR)) Svm_Vmexit(SVM_VMEXIT_INTR);
115   }
116 #endif
117 
118 #if BX_SUPPORT_VMX
119   if (BX_CPU_THIS_PTR in_vmx_guest) {
120 
121 #if BX_SUPPORT_VMX >= 2
122     if (is_pending(BX_EVENT_PENDING_VMX_VIRTUAL_INTR)) {
123       VMX_Deliver_Virtual_Interrupt();
124       return;
125     }
126 #endif
127 
128     VMexit_ExtInterrupt();
129   }
130 #endif
131 
132   // NOTE: similar code in ::take_irq()
133 #if BX_SUPPORT_APIC
134   if (is_pending(BX_EVENT_PENDING_LAPIC_INTR))
135     vector = BX_CPU_THIS_PTR lapic.acknowledge_int();
136   else
137 #endif
138     // if no local APIC, always acknowledge the PIC.
139     vector = DEV_pic_iac(); // may set INTR with next interrupt
140 
141   BX_CPU_THIS_PTR EXT = 1; /* external event */
142 #if BX_SUPPORT_VMX
143   VMexit_Event(BX_EXTERNAL_INTERRUPT, vector, 0, 0);
144 #endif
145 
146   BX_INSTR_HWINTERRUPT(BX_CPU_ID, vector,
147       BX_CPU_THIS_PTR sregs[BX_SEG_REG_CS].selector.value, RIP);
148   interrupt(vector, BX_EXTERNAL_INTERRUPT, 0, 0);
149 
150   BX_CPU_THIS_PTR prev_rip = RIP; // commit new RIP
151 }
152 
153 #if BX_SUPPORT_SVM
VirtualInterruptAcknowledge(void)154 void BX_CPU_C::VirtualInterruptAcknowledge(void)
155 {
156   Bit8u vector = SVM_V_INTR_VECTOR;
157 
158   if (SVM_INTERCEPT(SVM_INTERCEPT0_VINTR)) Svm_Vmexit(SVM_VMEXIT_VINTR);
159 
160   clear_event(BX_EVENT_SVM_VIRQ_PENDING);
161 
162   BX_CPU_THIS_PTR EXT = 1; /* external event */
163 
164   BX_INSTR_HWINTERRUPT(BX_CPU_ID, vector,
165       BX_CPU_THIS_PTR sregs[BX_SEG_REG_CS].selector.value, RIP);
166   interrupt(vector, BX_EXTERNAL_INTERRUPT, 0, 0);
167 
168   BX_CPU_THIS_PTR prev_rip = RIP; // commit new RIP
169 }
170 #endif
171 
handleAsyncEvent(void)172 bool BX_CPU_C::handleAsyncEvent(void)
173 {
174   //
175   // This area is where we process special conditions and events.
176   //
177   if (BX_CPU_THIS_PTR activity_state != BX_ACTIVITY_STATE_ACTIVE) {
178     // For one processor, pass the time as quickly as possible until
179     // an interrupt wakes up the CPU.
180     if (handleWaitForEvent()) return 1;
181   }
182 
183   if (bx_pc_system.kill_bochs_request) {
184     // setting kill_bochs_request causes the cpu loop to return ASAP.
185     return 1; // Return to caller of cpu_loop.
186   }
187 
188   // Priority 1: Hardware Reset and Machine Checks
189   //   RESET
190   //   Machine Check
191   // (bochs doesn't support these)
192 
193 #if BX_SUPPORT_SVM
194   // debug exceptions or trap due to breakpoint register match
195   // ignored and discarded if GIF == 0
196   // debug traps due to EFLAGS.TF remain untouched
197   if (! BX_CPU_THIS_PTR svm_gif)
198     BX_CPU_THIS_PTR debug_trap &= BX_DEBUG_SINGLE_STEP_BIT;
199 #endif
200 
201   // APIC virtualization trap take priority over SMI, INIT and lower priority events and
202   // not blocked by EFLAGS.IF or interrupt inhibits by MOV_SS and STI
203 #if BX_SUPPORT_VMX && BX_SUPPORT_X86_64
204   if (is_unmasked_event_pending(BX_EVENT_VMX_VTPR_UPDATE |
205                                 BX_EVENT_VMX_VEOI_UPDATE | BX_EVENT_VMX_VIRTUAL_APIC_WRITE))
206   {
207     VMX_Virtual_Apic_Access_Trap();
208   }
209 #endif
210 
211   // Priority 2: Trap on Task Switch
212   //   T flag in TSS is set
213   if (BX_CPU_THIS_PTR debug_trap & BX_DEBUG_TRAP_TASK_SWITCH_BIT) {
214     exception(BX_DB_EXCEPTION, 0); // no error, not interrupt
215   }
216 
217   // Priority 3: External Hardware Interventions
218   //   FLUSH
219   //   STOPCLK
220   //   SMI
221   //   INIT
222   if (is_unmasked_event_pending(BX_EVENT_SMI) && SVM_GIF)
223   {
224 #if BX_SUPPORT_SVM
225     if (BX_CPU_THIS_PTR in_svm_guest) {
226       if (SVM_INTERCEPT(SVM_INTERCEPT0_SMI)) Svm_Vmexit(SVM_VMEXIT_SMI);
227     }
228 #endif
229     clear_event(BX_EVENT_SMI); // clear SMI pending flag
230     enter_system_management_mode(); // would disable NMI when SMM was accepted
231   }
232 
233   if (is_unmasked_event_pending(BX_EVENT_INIT) && SVM_GIF) {
234 #if BX_SUPPORT_SVM
235     if (BX_CPU_THIS_PTR in_svm_guest) {
236       if (SVM_INTERCEPT(SVM_INTERCEPT0_INIT)) Svm_Vmexit(SVM_VMEXIT_INIT);
237     }
238 #endif
239 #if BX_SUPPORT_VMX
240     if (BX_CPU_THIS_PTR in_vmx_guest) {
241       VMexit(VMX_VMEXIT_INIT, 0);
242     }
243 #endif
244     // reset will clear pending INIT
245     reset(BX_RESET_SOFTWARE);
246 
247 #if BX_SUPPORT_SMP
248     if (BX_SMP_PROCESSORS > 1) {
249       // if HALT condition remains, return so other CPUs have a chance
250       if (BX_CPU_THIS_PTR activity_state != BX_ACTIVITY_STATE_ACTIVE) {
251 #if BX_DEBUGGER
252         BX_CPU_THIS_PTR stop_reason = STOP_CPU_HALTED;
253 #endif
254         return 1; // Return to caller of cpu_loop.
255       }
256     }
257 #endif
258   }
259 
260 #if BX_SUPPORT_VMX
261   if (is_unmasked_event_pending(BX_EVENT_VMX_MONITOR_TRAP_FLAG)) {
262     VMexit(VMX_VMEXIT_MONITOR_TRAP_FLAG, 0);
263   }
264 #endif
265 
266   // Priority 4: Traps on Previous Instruction
267   //   Breakpoints
268   //   Debug Trap Exceptions (TF flag set or data/IO breakpoint)
269   if (! interrupts_inhibited(BX_INHIBIT_DEBUG)) {
270     // A trap may be inhibited on this boundary due to an instruction which loaded SS
271 #if BX_X86_DEBUGGER
272     // Pages with code breakpoints always have async_event=1 and therefore come here
273     BX_CPU_THIS_PTR debug_trap |= code_breakpoint_match(get_laddr(BX_SEG_REG_CS, BX_CPU_THIS_PTR prev_rip));
274 #endif
275     if (BX_CPU_THIS_PTR debug_trap & 0xf000) {
276       exception(BX_DB_EXCEPTION, 0); // no error, not interrupt
277     }
278     else {
279       BX_CPU_THIS_PTR debug_trap = 0;
280     }
281   }
282 
283   // Priority 5: External Interrupts
284   //   VMX Preemption Timer Expired.
285   //   NMI Interrupts
286   //   Maskable Hardware Interrupts
287   if (interrupts_inhibited(BX_INHIBIT_INTERRUPTS) || ! SVM_GIF) {
288     // Processing external interrupts is inhibited on this
289     // boundary because of certain instructions like STI.
290   }
291 #if BX_SUPPORT_VMX >= 2
292   else if (is_unmasked_event_pending(BX_EVENT_VMX_PREEMPTION_TIMER_EXPIRED)) {
293     VMexit(VMX_VMEXIT_VMX_PREEMPTION_TIMER_EXPIRED, 0);
294   }
295 #endif
296 #if BX_SUPPORT_VMX
297   else if (is_unmasked_event_pending(BX_EVENT_VMX_VIRTUAL_NMI)) {
298     VMexit(VMX_VMEXIT_NMI_WINDOW, 0);
299   }
300 #endif
301   else if (is_unmasked_event_pending(BX_EVENT_NMI)) {
302 #if BX_SUPPORT_SVM
303     if (BX_CPU_THIS_PTR in_svm_guest) {
304       if (SVM_INTERCEPT(SVM_INTERCEPT0_NMI)) Svm_Vmexit(SVM_VMEXIT_NMI);
305     }
306 #endif
307     clear_event(BX_EVENT_NMI);
308      mask_event(BX_EVENT_NMI);
309     BX_CPU_THIS_PTR EXT = 1; /* external event */
310 #if BX_SUPPORT_VMX
311     VMexit_Event(BX_NMI, 2, 0, 0);
312 #endif
313     BX_INSTR_HWINTERRUPT(BX_CPU_ID, 2, BX_CPU_THIS_PTR sregs[BX_SEG_REG_CS].selector.value, RIP);
314     interrupt(2, BX_NMI, 0, 0);
315   }
316 #if BX_SUPPORT_VMX
317   else if (is_pending(BX_EVENT_VMX_INTERRUPT_WINDOW_EXITING) && BX_CPU_THIS_PTR get_IF()) {
318     // interrupt-window exiting
319     VMexit(VMX_VMEXIT_INTERRUPT_WINDOW, 0);
320   }
321 #endif
322   else if (is_unmasked_event_pending(BX_EVENT_PENDING_INTR | BX_EVENT_PENDING_LAPIC_INTR |
323                                      BX_EVENT_PENDING_VMX_VIRTUAL_INTR))
324   {
325     InterruptAcknowledge();
326   }
327 #if BX_SUPPORT_SVM
328   else if (is_unmasked_event_pending(BX_EVENT_SVM_VIRQ_PENDING))
329   {
330     // virtual interrupt acknowledge
331     VirtualInterruptAcknowledge();
332   }
333 #endif
334   else if (BX_HRQ && BX_DBG_ASYNC_DMA) {
335     // NOTE: similar code in ::take_dma()
336     // assert Hold Acknowledge (HLDA) and go into a bus hold state
337     DEV_dma_raise_hlda();
338   }
339 
340   if (BX_CPU_THIS_PTR get_TF())
341   {
342     // TF is set before execution of next instruction.  Schedule
343     // a debug trap (#DB) after execution.  After completion of
344     // next instruction, the code above will invoke the trap.
345     BX_CPU_THIS_PTR debug_trap |= BX_DEBUG_SINGLE_STEP_BIT;
346   }
347 
348   // Priority 6: Faults from fetching next instruction
349   //   Code breakpoint fault
350   //   Code segment limit violation (priority 7 on 486/Pentium)
351   //   Code page fault (priority 7 on 486/Pentium)
352   // (handled in main decode loop)
353 
354   // Priority 7: Faults from decoding next instruction
355   //   Instruction length > 15 bytes
356   //   Illegal opcode
357   //   Coprocessor not available
358   // (handled in main decode loop etc)
359 
360   // Priority 8: Faults on executing an instruction
361   //   Floating point execution
362   //   Overflow
363   //   Bound error
364   //   Invalid TSS
365   //   Segment not present
366   //   Stack fault
367   //   General protection
368   //   Data page fault
369   //   Alignment check
370   // (handled by rest of the code)
371 
372   if (!((SVM_GIF && unmasked_events_pending()) || BX_CPU_THIS_PTR debug_trap ||
373 //      BX_CPU_THIS_PTR get_TF() || // implies debug_trap is set
374         BX_HRQ))
375   {
376     BX_CPU_THIS_PTR async_event = 0;
377   }
378 
379   return 0; // Continue executing cpu_loop.
380 }
381 
382 // Certain instructions inhibit interrupts, some debug exceptions and single-step traps.
inhibit_interrupts(unsigned mask)383 void BX_CPU_C::inhibit_interrupts(unsigned mask)
384 {
385   // Loading of SS disables interrupts until the next instruction completes
386   // but only under assumption that previous instruction didn't load SS also.
387   if (mask != BX_INHIBIT_INTERRUPTS_BY_MOVSS || ! interrupts_inhibited(BX_INHIBIT_INTERRUPTS_BY_MOVSS)) {
388     BX_DEBUG(("inhibit interrupts mask = %d", mask));
389     BX_CPU_THIS_PTR inhibit_mask = mask;
390     BX_CPU_THIS_PTR inhibit_icount = get_icount() + 1; // inhibit for next instruction
391   }
392 }
393 
interrupts_inhibited(unsigned mask)394 bool BX_CPU_C::interrupts_inhibited(unsigned mask)
395 {
396   return (get_icount() <= BX_CPU_THIS_PTR inhibit_icount) && (BX_CPU_THIS_PTR inhibit_mask & mask) == mask;
397 }
398 
deliver_SIPI(unsigned vector)399 void BX_CPU_C::deliver_SIPI(unsigned vector)
400 {
401   if (BX_CPU_THIS_PTR activity_state == BX_ACTIVITY_STATE_WAIT_FOR_SIPI) {
402 #if BX_SUPPORT_VMX
403     if (BX_CPU_THIS_PTR in_vmx_guest)
404       VMexit(VMX_VMEXIT_SIPI, vector);
405 #endif
406     BX_CPU_THIS_PTR activity_state = BX_ACTIVITY_STATE_ACTIVE;
407     RIP = 0;
408     load_seg_reg(&BX_CPU_THIS_PTR sregs[BX_SEG_REG_CS], vector*0x100);
409     unmask_event(BX_EVENT_INIT | BX_EVENT_SMI | BX_EVENT_NMI);
410     BX_INFO(("CPU %d started up at %04X:%08X by APIC",
411                    BX_CPU_THIS_PTR bx_cpuid, vector*0x100, EIP));
412   } else {
413     BX_INFO(("CPU %d started up by APIC, but was not halted at that time", BX_CPU_THIS_PTR bx_cpuid));
414   }
415 }
416 
deliver_INIT(void)417 void BX_CPU_C::deliver_INIT(void)
418 {
419   if (! is_masked_event(BX_EVENT_INIT)) {
420     signal_event(BX_EVENT_INIT);
421   }
422 }
423 
deliver_NMI(void)424 void BX_CPU_C::deliver_NMI(void)
425 {
426   signal_event(BX_EVENT_NMI);
427 }
428 
deliver_SMI(void)429 void BX_CPU_C::deliver_SMI(void)
430 {
431   signal_event(BX_EVENT_SMI);
432 }
433 
raise_INTR(void)434 void BX_CPU_C::raise_INTR(void)
435 {
436   signal_event(BX_EVENT_PENDING_INTR);
437 }
438 
clear_INTR(void)439 void BX_CPU_C::clear_INTR(void)
440 {
441   clear_event(BX_EVENT_PENDING_INTR);
442 }
443 
444 #if BX_DEBUGGER
445 
dbg_take_dma(void)446 void BX_CPU_C::dbg_take_dma(void)
447 {
448   // NOTE: similar code in ::cpu_loop()
449   if (BX_HRQ) {
450     BX_CPU_THIS_PTR async_event = 1; // set in case INTR is triggered
451     DEV_dma_raise_hlda();
452   }
453 }
454 
455 #endif  // #if BX_DEBUGGER
456