1 /////////////////////////////////////////////////////////////////////////
2 // $Id: ctrl_xfer32.cc 13699 2019-12-20 07:42:07Z sshwarts $
3 /////////////////////////////////////////////////////////////////////////
4 //
5 //  Copyright (C) 2001-2019  The Bochs Project
6 //
7 //  This library is free software; you can redistribute it and/or
8 //  modify it under the terms of the GNU Lesser General Public
9 //  License as published by the Free Software Foundation; either
10 //  version 2 of the License, or (at your option) any later version.
11 //
12 //  This library is distributed in the hope that it will be useful,
13 //  but WITHOUT ANY WARRANTY; without even the implied warranty of
14 //  MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
15 //  Lesser General Public License for more details.
16 //
17 //  You should have received a copy of the GNU Lesser General Public
18 //  License along with this library; if not, write to the Free Software
19 //  Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA B 02110-1301 USA
20 /////////////////////////////////////////////////////////////////////////
21 
22 #define NEED_CPU_REG_SHORTCUTS 1
23 #include "bochs.h"
24 #include "cpu.h"
25 #define LOG_THIS BX_CPU_THIS_PTR
26 
27 #if BX_CPU_LEVEL >= 3
28 
branch_near32(Bit32u new_EIP)29 BX_CPP_INLINE void BX_CPP_AttrRegparmN(1) BX_CPU_C::branch_near32(Bit32u new_EIP)
30 {
31   BX_ASSERT(BX_CPU_THIS_PTR cpu_mode != BX_MODE_LONG_64);
32 
33   // check always, not only in protected mode
34   if (new_EIP > BX_CPU_THIS_PTR sregs[BX_SEG_REG_CS].cache.u.segment.limit_scaled)
35   {
36     BX_ERROR(("branch_near32: offset outside of CS limits"));
37     exception(BX_GP_EXCEPTION, 0);
38   }
39 
40   EIP = new_EIP;
41 
42 #if BX_SUPPORT_HANDLERS_CHAINING_SPEEDUPS == 0
43   // assert magic async_event to stop trace execution
44   BX_CPU_THIS_PTR async_event |= BX_ASYNC_EVENT_STOP_TRACE;
45 #endif
46 }
47 
call_far32(bxInstruction_c * i,Bit16u cs_raw,Bit32u disp32)48 void BX_CPU_C::call_far32(bxInstruction_c *i, Bit16u cs_raw, Bit32u disp32)
49 {
50   BX_INSTR_FAR_BRANCH_ORIGIN();
51 
52   invalidate_prefetch_q();
53 
54 #if BX_DEBUGGER
55   BX_CPU_THIS_PTR show_flag |= Flag_call;
56 #endif
57 
58   RSP_SPECULATIVE;
59 
60   if (protected_mode()) {
61     call_protected(i, cs_raw, disp32);
62   }
63   else {
64     // CS.LIMIT can't change when in real/v8086 mode
65     if (disp32 > BX_CPU_THIS_PTR sregs[BX_SEG_REG_CS].cache.u.segment.limit_scaled) {
66       BX_ERROR(("%s: instruction pointer not within code segment limits", i->getIaOpcodeNameShort()));
67       exception(BX_GP_EXCEPTION, 0);
68     }
69 
70     push_32(BX_CPU_THIS_PTR sregs[BX_SEG_REG_CS].selector.value);
71     push_32(EIP);
72 
73     load_seg_reg(&BX_CPU_THIS_PTR sregs[BX_SEG_REG_CS], cs_raw);
74     EIP = disp32;
75   }
76 
77   RSP_COMMIT;
78 
79   BX_INSTR_FAR_BRANCH(BX_CPU_ID, BX_INSTR_IS_CALL,
80                       FAR_BRANCH_PREV_CS, FAR_BRANCH_PREV_RIP,
81                       BX_CPU_THIS_PTR sregs[BX_SEG_REG_CS].selector.value, EIP);
82 }
83 
jmp_far32(bxInstruction_c * i,Bit16u cs_raw,Bit32u disp32)84 void BX_CPU_C::jmp_far32(bxInstruction_c *i, Bit16u cs_raw, Bit32u disp32)
85 {
86   BX_INSTR_FAR_BRANCH_ORIGIN();
87 
88   invalidate_prefetch_q();
89 
90   // jump_protected doesn't affect ESP so it is ESP safe
91   if (protected_mode()) {
92     jump_protected(i, cs_raw, disp32);
93   }
94   else {
95     // CS.LIMIT can't change when in real/v8086 mode
96     if (disp32 > BX_CPU_THIS_PTR sregs[BX_SEG_REG_CS].cache.u.segment.limit_scaled) {
97       BX_ERROR(("%s: instruction pointer not within code segment limits", i->getIaOpcodeNameShort()));
98       exception(BX_GP_EXCEPTION, 0);
99     }
100 
101     load_seg_reg(&BX_CPU_THIS_PTR sregs[BX_SEG_REG_CS], cs_raw);
102     EIP = disp32;
103   }
104 
105   BX_INSTR_FAR_BRANCH(BX_CPU_ID, BX_INSTR_IS_JMP,
106                       FAR_BRANCH_PREV_CS, FAR_BRANCH_PREV_RIP,
107                       BX_CPU_THIS_PTR sregs[BX_SEG_REG_CS].selector.value, EIP);
108 }
109 
RETnear32_Iw(bxInstruction_c * i)110 void BX_CPP_AttrRegparmN(1) BX_CPU_C::RETnear32_Iw(bxInstruction_c *i)
111 {
112   BX_ASSERT(BX_CPU_THIS_PTR cpu_mode != BX_MODE_LONG_64);
113 
114 #if BX_DEBUGGER
115   BX_CPU_THIS_PTR show_flag |= Flag_ret;
116 #endif
117 
118   RSP_SPECULATIVE;
119 
120   Bit32u return_EIP = pop_32();
121 #if BX_SUPPORT_CET
122   if (ShadowStackEnabled(CPL)) {
123     Bit32u shadow_EIP = shadow_stack_pop_32();
124     if (shadow_EIP != return_EIP)
125       exception(BX_CP_EXCEPTION, BX_CP_NEAR_RET);
126   }
127 #endif
128 
129   if (return_EIP > BX_CPU_THIS_PTR sregs[BX_SEG_REG_CS].cache.u.segment.limit_scaled)
130   {
131     BX_ERROR(("%s: offset outside of CS limits", i->getIaOpcodeNameShort()));
132     exception(BX_GP_EXCEPTION, 0);
133   }
134   EIP = return_EIP;
135 
136   Bit16u imm16 = i->Iw();
137   if (BX_CPU_THIS_PTR sregs[BX_SEG_REG_SS].cache.u.segment.d_b)
138     ESP += imm16;
139   else
140      SP += imm16;
141 
142   RSP_COMMIT;
143 
144   BX_INSTR_UCNEAR_BRANCH(BX_CPU_ID, BX_INSTR_IS_RET, PREV_RIP, EIP);
145 
146   BX_NEXT_TRACE(i);
147 }
148 
RETfar32_Iw(bxInstruction_c * i)149 void BX_CPP_AttrRegparmN(1) BX_CPU_C::RETfar32_Iw(bxInstruction_c *i)
150 {
151   invalidate_prefetch_q();
152 
153   BX_INSTR_FAR_BRANCH_ORIGIN();
154 
155 #if BX_DEBUGGER
156   BX_CPU_THIS_PTR show_flag |= Flag_ret;
157 #endif
158 
159   Bit16u imm16 = i->Iw();
160 
161   RSP_SPECULATIVE;
162 
163   if (protected_mode()) {
164     return_protected(i, imm16);
165   }
166   else {
167     Bit32u eip    =          pop_32();
168     Bit16u cs_raw = (Bit16u) pop_32(); /* 32bit pop, MSW discarded */
169 
170     // CS.LIMIT can't change when in real/v8086 mode
171     if (eip > BX_CPU_THIS_PTR sregs[BX_SEG_REG_CS].cache.u.segment.limit_scaled) {
172       BX_ERROR(("%s: instruction pointer not within code segment limits", i->getIaOpcodeNameShort()));
173       exception(BX_GP_EXCEPTION, 0);
174     }
175 
176     load_seg_reg(&BX_CPU_THIS_PTR sregs[BX_SEG_REG_CS], cs_raw);
177     EIP = eip;
178 
179     if (BX_CPU_THIS_PTR sregs[BX_SEG_REG_SS].cache.u.segment.d_b)
180       ESP += imm16;
181     else
182        SP += imm16;
183   }
184 
185   RSP_COMMIT;
186 
187   BX_INSTR_FAR_BRANCH(BX_CPU_ID, BX_INSTR_IS_RET,
188                       FAR_BRANCH_PREV_CS, FAR_BRANCH_PREV_RIP,
189                       BX_CPU_THIS_PTR sregs[BX_SEG_REG_CS].selector.value, RIP);
190 
191   BX_NEXT_TRACE(i);
192 }
193 
CALL_Jd(bxInstruction_c * i)194 void BX_CPP_AttrRegparmN(1) BX_CPU_C::CALL_Jd(bxInstruction_c *i)
195 {
196 #if BX_DEBUGGER
197   BX_CPU_THIS_PTR show_flag |= Flag_call;
198 #endif
199 
200   RSP_SPECULATIVE;
201 
202   /* push 32 bit EA of next instruction */
203   push_32(EIP);
204 #if BX_SUPPORT_CET
205   if (ShadowStackEnabled(CPL) && i->Id())
206     shadow_stack_push_32(EIP);
207 #endif
208 
209   Bit32u new_EIP = EIP + i->Id();
210   branch_near32(new_EIP);
211 
212   RSP_COMMIT;
213 
214   BX_INSTR_UCNEAR_BRANCH(BX_CPU_ID, BX_INSTR_IS_CALL, PREV_RIP, EIP);
215 
216   BX_LINK_TRACE(i);
217 }
218 
CALL32_Ap(bxInstruction_c * i)219 void BX_CPP_AttrRegparmN(1) BX_CPU_C::CALL32_Ap(bxInstruction_c *i)
220 {
221   BX_ASSERT(BX_CPU_THIS_PTR cpu_mode != BX_MODE_LONG_64);
222 
223   Bit16u cs_raw = i->Iw2();
224   Bit32u disp32 = i->Id();
225 
226   call_far32(i, cs_raw, disp32);
227 
228   BX_NEXT_TRACE(i);
229 }
230 
CALL_EdR(bxInstruction_c * i)231 void BX_CPP_AttrRegparmN(1) BX_CPU_C::CALL_EdR(bxInstruction_c *i)
232 {
233 #if BX_DEBUGGER
234   BX_CPU_THIS_PTR show_flag |= Flag_call;
235 #endif
236 
237   Bit32u new_EIP = BX_READ_32BIT_REG(i->dst());
238 
239   RSP_SPECULATIVE;
240 
241   /* push 32 bit EA of next instruction */
242   push_32(EIP);
243 #if BX_SUPPORT_CET
244   if (ShadowStackEnabled(CPL))
245     shadow_stack_push_32(EIP);
246 #endif
247 
248   branch_near32(new_EIP);
249 
250   RSP_COMMIT;
251 
252 #if BX_SUPPORT_CET
253   track_indirect_if_not_suppressed(i, CPL);
254 #endif
255 
256   BX_INSTR_UCNEAR_BRANCH(BX_CPU_ID, BX_INSTR_IS_CALL_INDIRECT, PREV_RIP, EIP);
257 
258   BX_NEXT_TRACE(i);
259 }
260 
CALL32_Ep(bxInstruction_c * i)261 void BX_CPP_AttrRegparmN(1) BX_CPU_C::CALL32_Ep(bxInstruction_c *i)
262 {
263   bx_address eaddr = BX_CPU_RESOLVE_ADDR(i);
264 
265   Bit32u op1_32 = read_virtual_dword(i->seg(), eaddr);
266   Bit16u cs_raw = read_virtual_word (i->seg(), (eaddr+4) & i->asize_mask());
267 
268   call_far32(i, cs_raw, op1_32);
269 
270   BX_NEXT_TRACE(i);
271 }
272 
JMP_Jd(bxInstruction_c * i)273 void BX_CPP_AttrRegparmN(1) BX_CPU_C::JMP_Jd(bxInstruction_c *i)
274 {
275   Bit32u new_EIP = EIP + (Bit32s) i->Id();
276   branch_near32(new_EIP);
277   BX_INSTR_UCNEAR_BRANCH(BX_CPU_ID, BX_INSTR_IS_JMP, PREV_RIP, new_EIP);
278 
279   BX_LINK_TRACE(i);
280 }
281 
JO_Jd(bxInstruction_c * i)282 void BX_CPP_AttrRegparmN(1) BX_CPU_C::JO_Jd(bxInstruction_c *i)
283 {
284   if (get_OF()) {
285     Bit32u new_EIP = EIP + (Bit32s) i->Id();
286     branch_near32(new_EIP);
287     BX_INSTR_CNEAR_BRANCH_TAKEN(BX_CPU_ID, PREV_RIP, new_EIP);
288     BX_LINK_TRACE(i);
289   }
290 
291   BX_INSTR_CNEAR_BRANCH_NOT_TAKEN(BX_CPU_ID, PREV_RIP);
292   BX_NEXT_INSTR(i); // trace can continue over non-taken branch
293 }
294 
JNO_Jd(bxInstruction_c * i)295 void BX_CPP_AttrRegparmN(1) BX_CPU_C::JNO_Jd(bxInstruction_c *i)
296 {
297   if (! get_OF()) {
298     Bit32u new_EIP = EIP + (Bit32s) i->Id();
299     branch_near32(new_EIP);
300     BX_INSTR_CNEAR_BRANCH_TAKEN(BX_CPU_ID, PREV_RIP, new_EIP);
301     BX_LINK_TRACE(i);
302   }
303 
304   BX_INSTR_CNEAR_BRANCH_NOT_TAKEN(BX_CPU_ID, PREV_RIP);
305   BX_NEXT_INSTR(i); // trace can continue over non-taken branch
306 }
307 
JB_Jd(bxInstruction_c * i)308 void BX_CPP_AttrRegparmN(1) BX_CPU_C::JB_Jd(bxInstruction_c *i)
309 {
310   if (get_CF()) {
311     Bit32u new_EIP = EIP + (Bit32s) i->Id();
312     branch_near32(new_EIP);
313     BX_INSTR_CNEAR_BRANCH_TAKEN(BX_CPU_ID, PREV_RIP, new_EIP);
314     BX_LINK_TRACE(i);
315   }
316 
317   BX_INSTR_CNEAR_BRANCH_NOT_TAKEN(BX_CPU_ID, PREV_RIP);
318   BX_NEXT_INSTR(i); // trace can continue over non-taken branch
319 }
320 
JNB_Jd(bxInstruction_c * i)321 void BX_CPP_AttrRegparmN(1) BX_CPU_C::JNB_Jd(bxInstruction_c *i)
322 {
323   if (! get_CF()) {
324     Bit32u new_EIP = EIP + (Bit32s) i->Id();
325     branch_near32(new_EIP);
326     BX_INSTR_CNEAR_BRANCH_TAKEN(BX_CPU_ID, PREV_RIP, new_EIP);
327     BX_LINK_TRACE(i);
328   }
329 
330   BX_INSTR_CNEAR_BRANCH_NOT_TAKEN(BX_CPU_ID, PREV_RIP);
331   BX_NEXT_INSTR(i); // trace can continue over non-taken branch
332 }
333 
JZ_Jd(bxInstruction_c * i)334 void BX_CPP_AttrRegparmN(1) BX_CPU_C::JZ_Jd(bxInstruction_c *i)
335 {
336   if (get_ZF()) {
337     Bit32u new_EIP = EIP + (Bit32s) i->Id();
338     branch_near32(new_EIP);
339     BX_INSTR_CNEAR_BRANCH_TAKEN(BX_CPU_ID, PREV_RIP, new_EIP);
340     BX_LINK_TRACE(i);
341   }
342 
343   BX_INSTR_CNEAR_BRANCH_NOT_TAKEN(BX_CPU_ID, PREV_RIP);
344   BX_NEXT_INSTR(i); // trace can continue over non-taken branch
345 }
346 
JNZ_Jd(bxInstruction_c * i)347 void BX_CPP_AttrRegparmN(1) BX_CPU_C::JNZ_Jd(bxInstruction_c *i)
348 {
349   if (! get_ZF()) {
350     Bit32u new_EIP = EIP + (Bit32s) i->Id();
351     branch_near32(new_EIP);
352     BX_INSTR_CNEAR_BRANCH_TAKEN(BX_CPU_ID, PREV_RIP, new_EIP);
353     BX_LINK_TRACE(i);
354   }
355 
356   BX_INSTR_CNEAR_BRANCH_NOT_TAKEN(BX_CPU_ID, PREV_RIP);
357   BX_NEXT_INSTR(i); // trace can continue over non-taken branch
358 }
359 
JBE_Jd(bxInstruction_c * i)360 void BX_CPP_AttrRegparmN(1) BX_CPU_C::JBE_Jd(bxInstruction_c *i)
361 {
362   if (get_CF() || get_ZF()) {
363     Bit32u new_EIP = EIP + (Bit32s) i->Id();
364     branch_near32(new_EIP);
365     BX_INSTR_CNEAR_BRANCH_TAKEN(BX_CPU_ID, PREV_RIP, new_EIP);
366     BX_LINK_TRACE(i);
367   }
368 
369   BX_INSTR_CNEAR_BRANCH_NOT_TAKEN(BX_CPU_ID, PREV_RIP);
370   BX_NEXT_INSTR(i); // trace can continue over non-taken branch
371 }
372 
JNBE_Jd(bxInstruction_c * i)373 void BX_CPP_AttrRegparmN(1) BX_CPU_C::JNBE_Jd(bxInstruction_c *i)
374 {
375   if (! (get_CF() || get_ZF())) {
376     Bit32u new_EIP = EIP + (Bit32s) i->Id();
377     branch_near32(new_EIP);
378     BX_INSTR_CNEAR_BRANCH_TAKEN(BX_CPU_ID, PREV_RIP, new_EIP);
379     BX_LINK_TRACE(i);
380   }
381 
382   BX_INSTR_CNEAR_BRANCH_NOT_TAKEN(BX_CPU_ID, PREV_RIP);
383   BX_NEXT_INSTR(i); // trace can continue over non-taken branch
384 }
385 
JS_Jd(bxInstruction_c * i)386 void BX_CPP_AttrRegparmN(1) BX_CPU_C::JS_Jd(bxInstruction_c *i)
387 {
388   if (get_SF()) {
389     Bit32u new_EIP = EIP + (Bit32s) i->Id();
390     branch_near32(new_EIP);
391     BX_INSTR_CNEAR_BRANCH_TAKEN(BX_CPU_ID, PREV_RIP, new_EIP);
392     BX_LINK_TRACE(i);
393   }
394 
395   BX_INSTR_CNEAR_BRANCH_NOT_TAKEN(BX_CPU_ID, PREV_RIP);
396   BX_NEXT_INSTR(i); // trace can continue over non-taken branch
397 }
398 
JNS_Jd(bxInstruction_c * i)399 void BX_CPP_AttrRegparmN(1) BX_CPU_C::JNS_Jd(bxInstruction_c *i)
400 {
401   if (! get_SF()) {
402     Bit32u new_EIP = EIP + (Bit32s) i->Id();
403     branch_near32(new_EIP);
404     BX_INSTR_CNEAR_BRANCH_TAKEN(BX_CPU_ID, PREV_RIP, new_EIP);
405     BX_LINK_TRACE(i);
406   }
407 
408   BX_INSTR_CNEAR_BRANCH_NOT_TAKEN(BX_CPU_ID, PREV_RIP);
409   BX_NEXT_INSTR(i); // trace can continue over non-taken branch
410 }
411 
JP_Jd(bxInstruction_c * i)412 void BX_CPP_AttrRegparmN(1) BX_CPU_C::JP_Jd(bxInstruction_c *i)
413 {
414   if (get_PF()) {
415     Bit32u new_EIP = EIP + (Bit32s) i->Id();
416     branch_near32(new_EIP);
417     BX_INSTR_CNEAR_BRANCH_TAKEN(BX_CPU_ID, PREV_RIP, new_EIP);
418     BX_LINK_TRACE(i);
419   }
420 
421   BX_INSTR_CNEAR_BRANCH_NOT_TAKEN(BX_CPU_ID, PREV_RIP);
422   BX_NEXT_INSTR(i); // trace can continue over non-taken branch
423 }
424 
JNP_Jd(bxInstruction_c * i)425 void BX_CPP_AttrRegparmN(1) BX_CPU_C::JNP_Jd(bxInstruction_c *i)
426 {
427   if (! get_PF()) {
428     Bit32u new_EIP = EIP + (Bit32s) i->Id();
429     branch_near32(new_EIP);
430     BX_INSTR_CNEAR_BRANCH_TAKEN(BX_CPU_ID, PREV_RIP, new_EIP);
431     BX_LINK_TRACE(i);
432   }
433 
434   BX_INSTR_CNEAR_BRANCH_NOT_TAKEN(BX_CPU_ID, PREV_RIP);
435   BX_NEXT_INSTR(i); // trace can continue over non-taken branch
436 }
437 
JL_Jd(bxInstruction_c * i)438 void BX_CPP_AttrRegparmN(1) BX_CPU_C::JL_Jd(bxInstruction_c *i)
439 {
440   if (getB_SF() != getB_OF()) {
441     Bit32u new_EIP = EIP + (Bit32s) i->Id();
442     branch_near32(new_EIP);
443     BX_INSTR_CNEAR_BRANCH_TAKEN(BX_CPU_ID, PREV_RIP, new_EIP);
444     BX_LINK_TRACE(i);
445   }
446 
447   BX_INSTR_CNEAR_BRANCH_NOT_TAKEN(BX_CPU_ID, PREV_RIP);
448   BX_NEXT_INSTR(i); // trace can continue over non-taken branch
449 }
450 
JNL_Jd(bxInstruction_c * i)451 void BX_CPP_AttrRegparmN(1) BX_CPU_C::JNL_Jd(bxInstruction_c *i)
452 {
453   if (getB_SF() == getB_OF()) {
454     Bit32u new_EIP = EIP + (Bit32s) i->Id();
455     branch_near32(new_EIP);
456     BX_INSTR_CNEAR_BRANCH_TAKEN(BX_CPU_ID, PREV_RIP, new_EIP);
457     BX_LINK_TRACE(i);
458   }
459 
460   BX_INSTR_CNEAR_BRANCH_NOT_TAKEN(BX_CPU_ID, PREV_RIP);
461   BX_NEXT_INSTR(i); // trace can continue over non-taken branch
462 }
463 
JLE_Jd(bxInstruction_c * i)464 void BX_CPP_AttrRegparmN(1) BX_CPU_C::JLE_Jd(bxInstruction_c *i)
465 {
466   if (get_ZF() || (getB_SF() != getB_OF())) {
467     Bit32u new_EIP = EIP + (Bit32s) i->Id();
468     branch_near32(new_EIP);
469     BX_INSTR_CNEAR_BRANCH_TAKEN(BX_CPU_ID, PREV_RIP, new_EIP);
470     BX_LINK_TRACE(i);
471   }
472 
473   BX_INSTR_CNEAR_BRANCH_NOT_TAKEN(BX_CPU_ID, PREV_RIP);
474   BX_NEXT_INSTR(i); // trace can continue over non-taken branch
475 }
476 
JNLE_Jd(bxInstruction_c * i)477 void BX_CPP_AttrRegparmN(1) BX_CPU_C::JNLE_Jd(bxInstruction_c *i)
478 {
479   if (! get_ZF() && (getB_SF() == getB_OF())) {
480     Bit32u new_EIP = EIP + (Bit32s) i->Id();
481     branch_near32(new_EIP);
482     BX_INSTR_CNEAR_BRANCH_TAKEN(BX_CPU_ID, PREV_RIP, new_EIP);
483     BX_LINK_TRACE(i);
484   }
485 
486   BX_INSTR_CNEAR_BRANCH_NOT_TAKEN(BX_CPU_ID, PREV_RIP);
487   BX_NEXT_INSTR(i); // trace can continue over non-taken branch
488 }
489 
JMP_Ap(bxInstruction_c * i)490 void BX_CPP_AttrRegparmN(1) BX_CPU_C::JMP_Ap(bxInstruction_c *i)
491 {
492   BX_ASSERT(BX_CPU_THIS_PTR cpu_mode != BX_MODE_LONG_64);
493 
494   Bit32u disp32;
495   Bit16u cs_raw;
496 
497   if (i->os32L()) {
498     disp32 = i->Id();
499   }
500   else {
501     disp32 = i->Iw();
502   }
503   cs_raw = i->Iw2();
504 
505   jmp_far32(i, cs_raw, disp32);
506 
507   BX_NEXT_TRACE(i);
508 }
509 
JMP_EdR(bxInstruction_c * i)510 void BX_CPP_AttrRegparmN(1) BX_CPU_C::JMP_EdR(bxInstruction_c *i)
511 {
512   Bit32u new_EIP = BX_READ_32BIT_REG(i->dst());
513   branch_near32(new_EIP);
514   BX_INSTR_UCNEAR_BRANCH(BX_CPU_ID, BX_INSTR_IS_JMP_INDIRECT, PREV_RIP, new_EIP);
515 
516 #if BX_SUPPORT_CET
517   track_indirect_if_not_suppressed(i, CPL);
518 #endif
519 
520   BX_NEXT_TRACE(i);
521 }
522 
523 /* Far indirect jump */
JMP32_Ep(bxInstruction_c * i)524 void BX_CPP_AttrRegparmN(1) BX_CPU_C::JMP32_Ep(bxInstruction_c *i)
525 {
526   bx_address eaddr = BX_CPU_RESOLVE_ADDR(i);
527 
528   Bit32u op1_32 = read_virtual_dword(i->seg(), eaddr);
529   Bit16u cs_raw = read_virtual_word (i->seg(), (eaddr+4) & i->asize_mask());
530 
531   jmp_far32(i, cs_raw, op1_32);
532 
533   BX_NEXT_TRACE(i);
534 }
535 
IRET32(bxInstruction_c * i)536 void BX_CPP_AttrRegparmN(1) BX_CPU_C::IRET32(bxInstruction_c *i)
537 {
538   BX_ASSERT(BX_CPU_THIS_PTR cpu_mode != BX_MODE_LONG_64);
539 
540   invalidate_prefetch_q();
541 
542   BX_INSTR_FAR_BRANCH_ORIGIN();
543 
544 #if BX_SUPPORT_SVM
545   if (BX_CPU_THIS_PTR in_svm_guest) {
546     if (SVM_INTERCEPT(SVM_INTERCEPT0_IRET)) Svm_Vmexit(SVM_VMEXIT_IRET);
547   }
548 #endif
549 
550 #if BX_SUPPORT_VMX
551   if (BX_CPU_THIS_PTR in_vmx_guest)
552     if (is_masked_event(PIN_VMEXIT(VMX_VM_EXEC_CTRL1_VIRTUAL_NMI) ? BX_EVENT_VMX_VIRTUAL_NMI : BX_EVENT_NMI))
553       BX_CPU_THIS_PTR nmi_unblocking_iret = 1;
554 
555   if (BX_CPU_THIS_PTR in_vmx_guest && PIN_VMEXIT(VMX_VM_EXEC_CTRL1_NMI_EXITING)) {
556     if (PIN_VMEXIT(VMX_VM_EXEC_CTRL1_VIRTUAL_NMI)) unmask_event(BX_EVENT_VMX_VIRTUAL_NMI);
557   }
558   else
559 #endif
560     unmask_event(BX_EVENT_NMI);
561 
562 #if BX_DEBUGGER
563   BX_CPU_THIS_PTR show_flag |= Flag_iret;
564 #endif
565 
566   RSP_SPECULATIVE;
567 
568   if (protected_mode()) {
569     iret_protected(i);
570   }
571   else {
572     if (v8086_mode()) {
573       // IOPL check in stack_return_from_v86()
574       iret32_stack_return_from_v86(i);
575     }
576     else {
577       Bit32u eip      =          pop_32();
578       Bit16u cs_raw   = (Bit16u) pop_32(); // #SS has higher priority
579       Bit32u eflags32 =          pop_32();
580 
581       // CS.LIMIT can't change when in real/v8086 mode
582       if (eip > BX_CPU_THIS_PTR sregs[BX_SEG_REG_CS].cache.u.segment.limit_scaled) {
583         BX_ERROR(("%s: instruction pointer not within code segment limits", i->getIaOpcodeNameShort()));
584         exception(BX_GP_EXCEPTION, 0);
585       }
586 
587       load_seg_reg(&BX_CPU_THIS_PTR sregs[BX_SEG_REG_CS], cs_raw);
588       EIP = eip;
589       writeEFlags(eflags32, 0x00257fd5); // VIF, VIP, VM unchanged
590     }
591   }
592 
593   RSP_COMMIT;
594 
595 #if BX_SUPPORT_VMX
596   BX_CPU_THIS_PTR nmi_unblocking_iret = 0;
597 #endif
598 
599   BX_INSTR_FAR_BRANCH(BX_CPU_ID, BX_INSTR_IS_IRET,
600                       FAR_BRANCH_PREV_CS, FAR_BRANCH_PREV_RIP,
601                       BX_CPU_THIS_PTR sregs[BX_SEG_REG_CS].selector.value, EIP);
602 
603   BX_NEXT_TRACE(i);
604 }
605 
JECXZ_Jb(bxInstruction_c * i)606 void BX_CPP_AttrRegparmN(1) BX_CPU_C::JECXZ_Jb(bxInstruction_c *i)
607 {
608   // it is impossible to get this instruction in long mode
609   BX_ASSERT(i->as64L() == 0);
610 
611   Bit32u temp_ECX;
612 
613   if (i->as32L())
614     temp_ECX = ECX;
615   else
616     temp_ECX = CX;
617 
618   if (temp_ECX == 0) {
619     Bit32u new_EIP = EIP + (Bit32s) i->Id();
620     branch_near32(new_EIP);
621     BX_INSTR_CNEAR_BRANCH_TAKEN(BX_CPU_ID, PREV_RIP, new_EIP);
622     BX_LINK_TRACE(i);
623   }
624 
625   BX_INSTR_CNEAR_BRANCH_NOT_TAKEN(BX_CPU_ID, PREV_RIP);
626   BX_NEXT_TRACE(i);
627 }
628 
629 //
630 // There is some weirdness in LOOP instructions definition. If an exception
631 // was generated during the instruction execution (for example #GP fault
632 // because EIP was beyond CS segment limits) CPU state should restore the
633 // state prior to instruction execution.
634 //
635 // The final point that we are not allowed to decrement ECX register before
636 // it is known that no exceptions can happen.
637 //
638 
LOOPNE32_Jb(bxInstruction_c * i)639 void BX_CPP_AttrRegparmN(1) BX_CPU_C::LOOPNE32_Jb(bxInstruction_c *i)
640 {
641   // it is impossible to get this instruction in long mode
642   BX_ASSERT(i->as64L() == 0);
643 
644   if (i->as32L()) {
645     Bit32u count = ECX;
646 
647     count--;
648     if (count != 0 && (get_ZF()==0)) {
649       Bit32u new_EIP = EIP + (Bit32s) i->Id();
650       branch_near32(new_EIP);
651       BX_INSTR_CNEAR_BRANCH_TAKEN(BX_CPU_ID, PREV_RIP, new_EIP);
652     }
653 #if BX_INSTRUMENTATION
654     else {
655       BX_INSTR_CNEAR_BRANCH_NOT_TAKEN(BX_CPU_ID, PREV_RIP);
656     }
657 #endif
658 
659     ECX = count;
660   }
661   else {
662     Bit16u count = CX;
663 
664     count--;
665     if (count != 0 && (get_ZF()==0)) {
666       Bit32u new_EIP = EIP + (Bit32s) i->Id();
667       branch_near32(new_EIP);
668       BX_INSTR_CNEAR_BRANCH_TAKEN(BX_CPU_ID, PREV_RIP, new_EIP);
669     }
670 #if BX_INSTRUMENTATION
671     else {
672       BX_INSTR_CNEAR_BRANCH_NOT_TAKEN(BX_CPU_ID, PREV_RIP);
673     }
674 #endif
675 
676     CX = count;
677   }
678 
679   BX_NEXT_TRACE(i);
680 }
681 
LOOPE32_Jb(bxInstruction_c * i)682 void BX_CPP_AttrRegparmN(1) BX_CPU_C::LOOPE32_Jb(bxInstruction_c *i)
683 {
684   // it is impossible to get this instruction in long mode
685   BX_ASSERT(i->as64L() == 0);
686 
687   if (i->as32L()) {
688     Bit32u count = ECX;
689 
690     count--;
691     if (count != 0 && get_ZF()) {
692       Bit32u new_EIP = EIP + (Bit32s) i->Id();
693       branch_near32(new_EIP);
694       BX_INSTR_CNEAR_BRANCH_TAKEN(BX_CPU_ID, PREV_RIP, new_EIP);
695     }
696 #if BX_INSTRUMENTATION
697     else {
698       BX_INSTR_CNEAR_BRANCH_NOT_TAKEN(BX_CPU_ID, PREV_RIP);
699     }
700 #endif
701 
702     ECX = count;
703   }
704   else {
705     Bit16u count = CX;
706 
707     count--;
708     if (count != 0 && get_ZF()) {
709       Bit32u new_EIP = EIP + (Bit32s) i->Id();
710       branch_near32(new_EIP);
711       BX_INSTR_CNEAR_BRANCH_TAKEN(BX_CPU_ID, PREV_RIP, new_EIP);
712     }
713 #if BX_INSTRUMENTATION
714     else {
715       BX_INSTR_CNEAR_BRANCH_NOT_TAKEN(BX_CPU_ID, PREV_RIP);
716     }
717 #endif
718 
719     CX = count;
720   }
721 
722   BX_NEXT_TRACE(i);
723 }
724 
LOOP32_Jb(bxInstruction_c * i)725 void BX_CPP_AttrRegparmN(1) BX_CPU_C::LOOP32_Jb(bxInstruction_c *i)
726 {
727   // it is impossible to get this instruction in long mode
728   BX_ASSERT(i->as64L() == 0);
729 
730   if (i->as32L()) {
731     Bit32u count = ECX;
732 
733     count--;
734     if (count != 0) {
735       Bit32u new_EIP = EIP + (Bit32s) i->Id();
736       branch_near32(new_EIP);
737       BX_INSTR_CNEAR_BRANCH_TAKEN(BX_CPU_ID, PREV_RIP, new_EIP);
738     }
739 #if BX_INSTRUMENTATION
740     else {
741       BX_INSTR_CNEAR_BRANCH_NOT_TAKEN(BX_CPU_ID, PREV_RIP);
742     }
743 #endif
744 
745     ECX = count;
746   }
747   else {
748     Bit16u count = CX;
749 
750     count--;
751     if (count != 0) {
752       Bit32u new_EIP = EIP + (Bit32s) i->Id();
753       branch_near32(new_EIP);
754       BX_INSTR_CNEAR_BRANCH_TAKEN(BX_CPU_ID, PREV_RIP, new_EIP);
755     }
756 #if BX_INSTRUMENTATION
757     else {
758       BX_INSTR_CNEAR_BRANCH_NOT_TAKEN(BX_CPU_ID, PREV_RIP);
759     }
760 #endif
761 
762     CX = count;
763   }
764 
765   BX_NEXT_TRACE(i);
766 }
767 
768 #endif
769