1 /////////////////////////////////////////////////////////////////////////
2 // $Id: vm8086.cc 13699 2019-12-20 07:42:07Z sshwarts $
3 /////////////////////////////////////////////////////////////////////////
4 //
5 //  Copyright (C) 2001-2012  The Bochs Project
6 //
7 //  This library is free software; you can redistribute it and/or
8 //  modify it under the terms of the GNU Lesser General Public
9 //  License as published by the Free Software Foundation; either
10 //  version 2 of the License, or (at your option) any later version.
11 //
12 //  This library is distributed in the hope that it will be useful,
13 //  but WITHOUT ANY WARRANTY; without even the implied warranty of
14 //  MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
15 //  Lesser General Public License for more details.
16 //
17 //  You should have received a copy of the GNU Lesser General Public
18 //  License along with this library; if not, write to the Free Software
19 //  Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA B 02110-1301 USA
20 /////////////////////////////////////////////////////////////////////////
21 
22 #define NEED_CPU_REG_SHORTCUTS 1
23 #include "bochs.h"
24 #include "cpu.h"
25 #define LOG_THIS BX_CPU_THIS_PTR
26 
27 //
28 // Notes:
29 //
30 // The high bits of the 32bit eip image are ignored by
31 // the IRET to VM.  The high bits of the 32bit esp image
32 // are loaded into ESP.  A subsequent push uses
33 // only the low 16bits since it's in VM.  In neither case
34 // did a protection fault occur during actual tests.  This
35 // is contrary to the Intel docs which claim a #GP for
36 // eIP out of code limits.
37 //
38 // IRET to VM does affect IOPL, IF, VM, and RF
39 //
40 
41 #if BX_CPU_LEVEL >= 3
42 
stack_return_to_v86(Bit32u new_eip,Bit32u raw_cs_selector,Bit32u flags32)43 void BX_CPU_C::stack_return_to_v86(Bit32u new_eip, Bit32u raw_cs_selector, Bit32u flags32)
44 {
45   Bit32u temp_ESP, new_esp;
46   Bit16u raw_es_selector, raw_ds_selector, raw_fs_selector,
47          raw_gs_selector, raw_ss_selector;
48 
49   // Must be 32bit effective opsize, VM is set in upper 16bits of eFLAGS
50   // and CPL = 0 to get here
51 
52   BX_ASSERT(CPL == 0);
53   BX_ASSERT(protected_mode());
54 
55 #if BX_SUPPORT_CET
56   // If shadow stack or indirect branch tracking at CPL3 in vm8086 then #GP(0)
57   if (ShadowStackEnabled(3) || EndbranchEnabled(3)) {
58     BX_ERROR(("stack_return_to_v86: CR4.CET and shadow stack controls enabled in v8086 mode !"));
59     exception(BX_GP_EXCEPTION, 0);
60   }
61 #endif
62 
63   // ----------------
64   // |     | OLD GS | eSP+32
65   // |     | OLD FS | eSP+28
66   // |     | OLD DS | eSP+24
67   // |     | OLD ES | eSP+20
68   // |     | OLD SS | eSP+16
69   // |  OLD ESP     | eSP+12
70   // | OLD EFLAGS   | eSP+8
71   // |     | OLD CS | eSP+4
72   // |  OLD EIP     | eSP+0
73   // ----------------
74 
75 //
76 //  if (new_eip > 0xffff) {
77 //    BX_ERROR(("stack_return_to_v86: EIP not within CS limits !"));
78 //    exception(BX_GP_EXCEPTION, 0);
79 //  }
80 
81   if (BX_CPU_THIS_PTR sregs[BX_SEG_REG_SS].cache.u.segment.d_b)
82     temp_ESP = ESP;
83   else
84     temp_ESP = SP;
85 
86   // load SS:ESP from stack
87   new_esp         =          stack_read_dword(temp_ESP+12);
88   raw_ss_selector = (Bit16u) stack_read_dword(temp_ESP+16);
89 
90   // load ES,DS,FS,GS from stack
91   raw_es_selector = (Bit16u) stack_read_dword(temp_ESP+20);
92   raw_ds_selector = (Bit16u) stack_read_dword(temp_ESP+24);
93   raw_fs_selector = (Bit16u) stack_read_dword(temp_ESP+28);
94   raw_gs_selector = (Bit16u) stack_read_dword(temp_ESP+32);
95 
96 #if BX_SUPPORT_CET
97   if (ShadowStackEnabled(0)) {
98     if (SSP & 0x7) {
99       BX_ERROR(("stack_return_to_v86: SSP is not 8-byte aligned"));
100       exception(BX_CP_EXCEPTION, BX_CP_FAR_RET_IRET);
101     }
102   }
103 #endif
104 
105   writeEFlags(flags32, EFlagsValidMask);
106 
107   // load CS:IP from stack; already read and passed as args
108   BX_CPU_THIS_PTR sregs[BX_SEG_REG_CS].selector.value = raw_cs_selector;
109   EIP = new_eip & 0xffff;
110 
111   BX_CPU_THIS_PTR sregs[BX_SEG_REG_ES].selector.value = raw_es_selector;
112   BX_CPU_THIS_PTR sregs[BX_SEG_REG_DS].selector.value = raw_ds_selector;
113   BX_CPU_THIS_PTR sregs[BX_SEG_REG_FS].selector.value = raw_fs_selector;
114   BX_CPU_THIS_PTR sregs[BX_SEG_REG_GS].selector.value = raw_gs_selector;
115   BX_CPU_THIS_PTR sregs[BX_SEG_REG_SS].selector.value = raw_ss_selector;
116   ESP = new_esp;	// full 32 bit are loaded
117 
118   init_v8086_mode();
119 
120 #if BX_SUPPORT_CET
121   if (ShadowStackEnabled(0))
122     shadow_stack_atomic_clear_busy(SSP, 0);
123 #endif
124 }
125 
126 #if BX_CPU_LEVEL >= 5
127   #define BX_CR4_VME_ENABLED (BX_CPU_THIS_PTR cr4.get_VME())
128 #else
129   #define BX_CR4_VME_ENABLED (0)
130 #endif
131 
iret16_stack_return_from_v86(bxInstruction_c * i)132 void BX_CPU_C::iret16_stack_return_from_v86(bxInstruction_c *i)
133 {
134   if ((BX_CPU_THIS_PTR get_IOPL() < 3) && (BX_CR4_VME_ENABLED == 0)) {
135     // trap to virtual 8086 monitor
136     BX_DEBUG(("IRET in vm86 with IOPL != 3, VME = 0"));
137     exception(BX_GP_EXCEPTION, 0);
138   }
139 
140   Bit16u ip, cs_raw, flags16;
141 
142   ip      = pop_16();
143   cs_raw  = pop_16();
144   flags16 = pop_16();
145 
146 #if BX_CPU_LEVEL >= 5
147   if (BX_CPU_THIS_PTR cr4.get_VME() && BX_CPU_THIS_PTR get_IOPL() < 3)
148   {
149     if (((flags16 & EFlagsIFMask) && BX_CPU_THIS_PTR get_VIP()) ||
150          (flags16 & EFlagsTFMask))
151     {
152       BX_DEBUG(("iret16_stack_return_from_v86(): #GP(0) in VME mode"));
153       exception(BX_GP_EXCEPTION, 0);
154     }
155 
156     load_seg_reg(&BX_CPU_THIS_PTR sregs[BX_SEG_REG_CS], cs_raw);
157     EIP = (Bit32u) ip;
158 
159     // IF, IOPL unchanged, EFLAGS.VIF = TMP_FLAGS.IF
160     Bit32u changeMask = EFlagsOSZAPCMask | EFlagsTFMask |
161                             EFlagsDFMask | EFlagsNTMask | EFlagsVIFMask;
162     Bit32u flags32 = (Bit32u) flags16;
163     if (flags16 & EFlagsIFMask) flags32 |= EFlagsVIFMask;
164     writeEFlags(flags32, changeMask);
165 
166     return;
167   }
168 #endif
169 
170   load_seg_reg(&BX_CPU_THIS_PTR sregs[BX_SEG_REG_CS], cs_raw);
171   EIP = (Bit32u) ip;
172   write_flags(flags16, /*IOPL*/ 0, /*IF*/ 1);
173 }
174 
iret32_stack_return_from_v86(bxInstruction_c * i)175 void BX_CPU_C::iret32_stack_return_from_v86(bxInstruction_c *i)
176 {
177   if (BX_CPU_THIS_PTR get_IOPL() < 3) {
178     // trap to virtual 8086 monitor
179     BX_DEBUG(("IRET in vm86 with IOPL != 3, VME = 0"));
180     exception(BX_GP_EXCEPTION, 0);
181   }
182 
183   Bit32u eip, cs_raw, flags32;
184   // Build a mask of the following bits:
185   // ID,VIP,VIF,AC,VM,RF,x,NT,IOPL,OF,DF,IF,TF,SF,ZF,x,AF,x,PF,x,CF
186   Bit32u change_mask = EFlagsOSZAPCMask | EFlagsTFMask | EFlagsIFMask
187                          | EFlagsDFMask | EFlagsNTMask | EFlagsRFMask;
188 
189 #if BX_CPU_LEVEL >= 4
190   change_mask |= (EFlagsIDMask | EFlagsACMask);  // ID/AC
191 #endif
192 
193   eip     = pop_32();
194   cs_raw  = pop_32();
195   flags32 = pop_32();
196 
197   load_seg_reg(&BX_CPU_THIS_PTR sregs[BX_SEG_REG_CS], (Bit16u) cs_raw);
198   EIP = eip;
199   // VIF, VIP, VM, IOPL unchanged
200   writeEFlags(flags32, change_mask);
201 }
202 
v86_redirect_interrupt(Bit8u vector)203 int BX_CPU_C::v86_redirect_interrupt(Bit8u vector)
204 {
205 #if BX_CPU_LEVEL >= 5
206   if (BX_CPU_THIS_PTR cr4.get_VME())
207   {
208     bx_address tr_base = BX_CPU_THIS_PTR tr.cache.u.segment.base;
209     if (BX_CPU_THIS_PTR tr.cache.u.segment.limit_scaled < 103) {
210       BX_ERROR(("v86_redirect_interrupt(): TR.limit < 103 in VME"));
211       exception(BX_GP_EXCEPTION, 0);
212     }
213 
214     Bit32u io_base = system_read_word(tr_base + 102), offset = io_base - 32 + (vector >> 3);
215     if (offset > BX_CPU_THIS_PTR tr.cache.u.segment.limit_scaled) {
216       BX_ERROR(("v86_redirect_interrupt(): failed to fetch VME redirection bitmap"));
217       exception(BX_GP_EXCEPTION, 0);
218     }
219 
220     Bit8u vme_redirection_bitmap = system_read_byte(tr_base + offset);
221     if (!(vme_redirection_bitmap & (1 << (vector & 7))))
222     {
223       // redirect interrupt through virtual-mode idt
224       Bit16u temp_flags = (Bit16u) read_eflags();
225 
226       Bit16u temp_CS = system_read_word(vector*4 + 2);
227       Bit16u temp_IP = system_read_word(vector*4);
228 
229       if (BX_CPU_THIS_PTR get_IOPL() < 3) {
230         temp_flags |= EFlagsIOPLMask;
231         if (BX_CPU_THIS_PTR get_VIF())
232           temp_flags |=  EFlagsIFMask;
233         else
234           temp_flags &= ~EFlagsIFMask;
235       }
236 
237       Bit16u old_IP = IP;
238       Bit16u old_CS = BX_CPU_THIS_PTR sregs[BX_SEG_REG_CS].selector.value;
239 
240       push_16(temp_flags);
241       // push return address onto new stack
242       push_16(old_CS);
243       push_16(old_IP);
244 
245       load_seg_reg(&BX_CPU_THIS_PTR sregs[BX_SEG_REG_CS], (Bit16u) temp_CS);
246       EIP = temp_IP;
247 
248       BX_CPU_THIS_PTR clear_TF();
249       BX_CPU_THIS_PTR clear_RF();
250       if (BX_CPU_THIS_PTR get_IOPL() == 3)
251         BX_CPU_THIS_PTR clear_IF();
252       else
253         BX_CPU_THIS_PTR clear_VIF();
254 
255       return 1;
256     }
257   }
258 #endif
259   // interrupt is not redirected or VME is OFF
260   if (BX_CPU_THIS_PTR get_IOPL() < 3)
261   {
262     BX_DEBUG(("v86_redirect_interrupt(): interrupt cannot be redirected, generate #GP(0)"));
263     exception(BX_GP_EXCEPTION, 0);
264   }
265 
266   return 0;
267 }
268 
init_v8086_mode(void)269 void BX_CPU_C::init_v8086_mode(void)
270 {
271   for(unsigned sreg = 0; sreg < 6; sreg++) {
272     BX_CPU_THIS_PTR sregs[sreg].cache.valid   = SegValidCache | SegAccessROK | SegAccessWOK;
273     BX_CPU_THIS_PTR sregs[sreg].cache.p       = 1;
274     BX_CPU_THIS_PTR sregs[sreg].cache.dpl     = 3;
275     BX_CPU_THIS_PTR sregs[sreg].cache.segment = 1;
276     BX_CPU_THIS_PTR sregs[sreg].cache.type    = BX_DATA_READ_WRITE_ACCESSED;
277 
278     BX_CPU_THIS_PTR sregs[sreg].cache.u.segment.base =
279         BX_CPU_THIS_PTR sregs[sreg].selector.value << 4;
280     BX_CPU_THIS_PTR sregs[sreg].cache.u.segment.limit_scaled = 0xffff;
281     BX_CPU_THIS_PTR sregs[sreg].cache.u.segment.g            = 0;
282     BX_CPU_THIS_PTR sregs[sreg].cache.u.segment.d_b          = 0;
283     BX_CPU_THIS_PTR sregs[sreg].cache.u.segment.avl          = 0;
284     BX_CPU_THIS_PTR sregs[sreg].selector.rpl                 = 3;
285   }
286 
287   handleCpuModeChange();
288 
289 #if BX_CPU_LEVEL >= 4
290   handleAlignmentCheck(/* CPL change */);
291 #endif
292 
293   invalidate_stack_cache();
294 }
295 
296 #endif /* BX_CPU_LEVEL >= 3 */
297