1 /////////////////////////////////////////////////////////////////////////
2 // $Id: ctrl_xfer16.cc 13699 2019-12-20 07:42:07Z sshwarts $
3 /////////////////////////////////////////////////////////////////////////
4 //
5 //  Copyright (C) 2001-2019  The Bochs Project
6 //
7 //  This library is free software; you can redistribute it and/or
8 //  modify it under the terms of the GNU Lesser General Public
9 //  License as published by the Free Software Foundation; either
10 //  version 2 of the License, or (at your option) any later version.
11 //
12 //  This library is distributed in the hope that it will be useful,
13 //  but WITHOUT ANY WARRANTY; without even the implied warranty of
14 //  MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
15 //  Lesser General Public License for more details.
16 //
17 //  You should have received a copy of the GNU Lesser General Public
18 //  License along with this library; if not, write to the Free Software
19 //  Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA B 02110-1301 USA
20 /////////////////////////////////////////////////////////////////////////
21 
22 #define NEED_CPU_REG_SHORTCUTS 1
23 #include "bochs.h"
24 #include "cpu.h"
25 #define LOG_THIS BX_CPU_THIS_PTR
26 
branch_near16(Bit16u new_IP)27 BX_CPP_INLINE void BX_CPP_AttrRegparmN(1) BX_CPU_C::branch_near16(Bit16u new_IP)
28 {
29   BX_ASSERT(BX_CPU_THIS_PTR cpu_mode != BX_MODE_LONG_64);
30 
31   // check always, not only in protected mode
32   if (new_IP > BX_CPU_THIS_PTR sregs[BX_SEG_REG_CS].cache.u.segment.limit_scaled)
33   {
34     BX_ERROR(("branch_near16: offset outside of CS limits"));
35     exception(BX_GP_EXCEPTION, 0);
36   }
37 
38   EIP = new_IP;
39 
40 #if BX_SUPPORT_HANDLERS_CHAINING_SPEEDUPS == 0
41   // assert magic async_event to stop trace execution
42   BX_CPU_THIS_PTR async_event |= BX_ASYNC_EVENT_STOP_TRACE;
43 #endif
44 }
45 
call_far16(bxInstruction_c * i,Bit16u cs_raw,Bit16u disp16)46 void BX_CPU_C::call_far16(bxInstruction_c *i, Bit16u cs_raw, Bit16u disp16)
47 {
48   BX_INSTR_FAR_BRANCH_ORIGIN();
49 
50   invalidate_prefetch_q();
51 
52 #if BX_DEBUGGER
53   BX_CPU_THIS_PTR show_flag |= Flag_call;
54 #endif
55 
56   RSP_SPECULATIVE;
57 
58   if (protected_mode()) {
59     call_protected(i, cs_raw, disp16);
60   }
61   else {
62     // CS.LIMIT can't change when in real/v8086 mode
63     if (disp16 > BX_CPU_THIS_PTR sregs[BX_SEG_REG_CS].cache.u.segment.limit_scaled) {
64       BX_ERROR(("%s: instruction pointer not within code segment limits", i->getIaOpcodeNameShort()));
65       exception(BX_GP_EXCEPTION, 0);
66     }
67 
68     push_16(BX_CPU_THIS_PTR sregs[BX_SEG_REG_CS].selector.value);
69     push_16(IP);
70 
71     load_seg_reg(&BX_CPU_THIS_PTR sregs[BX_SEG_REG_CS], cs_raw);
72     EIP = (Bit32u) disp16;
73   }
74 
75   RSP_COMMIT;
76 
77   BX_INSTR_FAR_BRANCH(BX_CPU_ID, BX_INSTR_IS_CALL,
78                       FAR_BRANCH_PREV_CS, FAR_BRANCH_PREV_RIP,
79                       BX_CPU_THIS_PTR sregs[BX_SEG_REG_CS].selector.value, RIP);
80 }
81 
jmp_far16(bxInstruction_c * i,Bit16u cs_raw,Bit16u disp16)82 void BX_CPU_C::jmp_far16(bxInstruction_c *i, Bit16u cs_raw, Bit16u disp16)
83 {
84   BX_INSTR_FAR_BRANCH_ORIGIN();
85 
86   invalidate_prefetch_q();
87 
88   // jump_protected doesn't affect RSP so it is RSP safe
89   if (protected_mode()) {
90     jump_protected(i, cs_raw, disp16);
91   }
92   else {
93     // CS.LIMIT can't change when in real/v8086 mode
94     if (disp16 > BX_CPU_THIS_PTR sregs[BX_SEG_REG_CS].cache.u.segment.limit_scaled) {
95       BX_ERROR(("%s: instruction pointer not within code segment limits", i->getIaOpcodeNameShort()));
96       exception(BX_GP_EXCEPTION, 0);
97     }
98 
99     load_seg_reg(&BX_CPU_THIS_PTR sregs[BX_SEG_REG_CS], cs_raw);
100     EIP = disp16;
101   }
102 
103   BX_INSTR_FAR_BRANCH(BX_CPU_ID, BX_INSTR_IS_JMP_INDIRECT,
104                       FAR_BRANCH_PREV_CS, FAR_BRANCH_PREV_RIP,
105                       BX_CPU_THIS_PTR sregs[BX_SEG_REG_CS].selector.value, EIP);
106 }
107 
RETnear16_Iw(bxInstruction_c * i)108 void BX_CPP_AttrRegparmN(1) BX_CPU_C::RETnear16_Iw(bxInstruction_c *i)
109 {
110   BX_ASSERT(BX_CPU_THIS_PTR cpu_mode != BX_MODE_LONG_64);
111 
112 #if BX_DEBUGGER
113   BX_CPU_THIS_PTR show_flag |= Flag_ret;
114 #endif
115 
116   RSP_SPECULATIVE;
117 
118   Bit16u return_IP = pop_16();
119 #if BX_SUPPORT_CET
120   if (ShadowStackEnabled(CPL)) {
121     Bit32u shadow_IP = shadow_stack_pop_32();
122     if (shadow_IP != Bit32u(return_IP))
123       exception(BX_CP_EXCEPTION, BX_CP_NEAR_RET);
124   }
125 #endif
126 
127   if (return_IP > BX_CPU_THIS_PTR sregs[BX_SEG_REG_CS].cache.u.segment.limit_scaled)
128   {
129     BX_ERROR(("%s: offset outside of CS limits", i->getIaOpcodeNameShort()));
130     exception(BX_GP_EXCEPTION, 0);
131   }
132 
133   EIP = return_IP;
134 
135   Bit16u imm16 = i->Iw();
136 
137   if (BX_CPU_THIS_PTR sregs[BX_SEG_REG_SS].cache.u.segment.d_b) /* 32bit stack */
138     ESP += imm16;
139   else
140      SP += imm16;
141 
142   RSP_COMMIT;
143 
144   BX_INSTR_UCNEAR_BRANCH(BX_CPU_ID, BX_INSTR_IS_RET, PREV_RIP, EIP);
145 
146   BX_NEXT_TRACE(i);
147 }
148 
RETfar16_Iw(bxInstruction_c * i)149 void BX_CPP_AttrRegparmN(1) BX_CPU_C::RETfar16_Iw(bxInstruction_c *i)
150 {
151   BX_INSTR_FAR_BRANCH_ORIGIN();
152 
153   invalidate_prefetch_q();
154 
155 #if BX_DEBUGGER
156   BX_CPU_THIS_PTR show_flag |= Flag_ret;
157 #endif
158 
159   Bit16s imm16 = (Bit16s) i->Iw();
160 
161   RSP_SPECULATIVE;
162 
163   if (protected_mode()) {
164     return_protected(i, imm16);
165   }
166   else {
167     Bit16u ip     = pop_16();
168     Bit16u cs_raw = pop_16();
169 
170     // CS.LIMIT can't change when in real/v8086 mode
171     if (ip > BX_CPU_THIS_PTR sregs[BX_SEG_REG_CS].cache.u.segment.limit_scaled) {
172       BX_ERROR(("%s: instruction pointer not within code segment limits", i->getIaOpcodeNameShort()));
173       exception(BX_GP_EXCEPTION, 0);
174     }
175 
176     load_seg_reg(&BX_CPU_THIS_PTR sregs[BX_SEG_REG_CS], cs_raw);
177     EIP = (Bit32u) ip;
178 
179     if (BX_CPU_THIS_PTR sregs[BX_SEG_REG_SS].cache.u.segment.d_b)
180       ESP += imm16;
181     else
182        SP += imm16;
183   }
184 
185   RSP_COMMIT;
186 
187   BX_INSTR_FAR_BRANCH(BX_CPU_ID, BX_INSTR_IS_RET,
188                       FAR_BRANCH_PREV_CS, FAR_BRANCH_PREV_RIP,
189                       BX_CPU_THIS_PTR sregs[BX_SEG_REG_CS].selector.value, RIP);
190 
191   BX_NEXT_TRACE(i);
192 }
193 
CALL_Jw(bxInstruction_c * i)194 void BX_CPP_AttrRegparmN(1) BX_CPU_C::CALL_Jw(bxInstruction_c *i)
195 {
196 #if BX_DEBUGGER
197   BX_CPU_THIS_PTR show_flag |= Flag_call;
198 #endif
199 
200   RSP_SPECULATIVE;
201 
202   /* push 16 bit EA of next instruction */
203   push_16(IP);
204 #if BX_SUPPORT_CET
205   if (ShadowStackEnabled(CPL) && i->Iw())
206     shadow_stack_push_32(IP);
207 #endif
208 
209   Bit16u new_IP = IP + i->Iw();
210   branch_near16(new_IP);
211 
212   RSP_COMMIT;
213 
214   BX_INSTR_UCNEAR_BRANCH(BX_CPU_ID, BX_INSTR_IS_CALL, PREV_RIP, EIP);
215 
216   BX_LINK_TRACE(i);
217 }
218 
CALL16_Ap(bxInstruction_c * i)219 void BX_CPP_AttrRegparmN(1) BX_CPU_C::CALL16_Ap(bxInstruction_c *i)
220 {
221   BX_ASSERT(BX_CPU_THIS_PTR cpu_mode != BX_MODE_LONG_64);
222 
223   Bit16u disp16 = i->Iw();
224   Bit16u cs_raw = i->Iw2();
225 
226   call_far16(i, cs_raw, disp16);
227 
228   BX_NEXT_TRACE(i);
229 }
230 
CALL_EwR(bxInstruction_c * i)231 void BX_CPP_AttrRegparmN(1) BX_CPU_C::CALL_EwR(bxInstruction_c *i)
232 {
233   Bit16u new_IP = BX_READ_16BIT_REG(i->dst());
234 
235 #if BX_DEBUGGER
236   BX_CPU_THIS_PTR show_flag |= Flag_call;
237 #endif
238 
239   RSP_SPECULATIVE;
240 
241   /* push 16 bit EA of next instruction */
242   push_16(IP);
243 #if BX_SUPPORT_CET
244   if (ShadowStackEnabled(CPL))
245     shadow_stack_push_32(IP);
246 #endif
247 
248   branch_near16(new_IP);
249 
250   RSP_COMMIT;
251 
252 #if BX_SUPPORT_CET
253   track_indirect_if_not_suppressed(i, CPL);
254 #endif
255 
256   BX_INSTR_UCNEAR_BRANCH(BX_CPU_ID, BX_INSTR_IS_CALL_INDIRECT, PREV_RIP, EIP);
257 
258   BX_NEXT_TRACE(i);
259 }
260 
CALL16_Ep(bxInstruction_c * i)261 void BX_CPP_AttrRegparmN(1) BX_CPU_C::CALL16_Ep(bxInstruction_c *i)
262 {
263   bx_address eaddr = BX_CPU_RESOLVE_ADDR(i);
264 
265   Bit16u op1_16 = read_virtual_word(i->seg(), eaddr);
266   Bit16u cs_raw = read_virtual_word(i->seg(), (eaddr+2) & i->asize_mask());
267 
268   call_far16(i, cs_raw, op1_16);
269 
270   BX_NEXT_TRACE(i);
271 }
272 
JMP_Jw(bxInstruction_c * i)273 void BX_CPP_AttrRegparmN(1) BX_CPU_C::JMP_Jw(bxInstruction_c *i)
274 {
275   Bit16u new_IP = IP + i->Iw();
276   branch_near16(new_IP);
277   BX_INSTR_UCNEAR_BRANCH(BX_CPU_ID, BX_INSTR_IS_JMP, PREV_RIP, new_IP);
278 
279   BX_LINK_TRACE(i);
280 }
281 
JO_Jw(bxInstruction_c * i)282 void BX_CPP_AttrRegparmN(1) BX_CPU_C::JO_Jw(bxInstruction_c *i)
283 {
284   if (get_OF()) {
285     Bit16u new_IP = IP + i->Iw();
286     branch_near16(new_IP);
287     BX_INSTR_CNEAR_BRANCH_TAKEN(BX_CPU_ID, PREV_RIP, new_IP);
288     BX_LINK_TRACE(i);
289   }
290 
291   BX_INSTR_CNEAR_BRANCH_NOT_TAKEN(BX_CPU_ID, PREV_RIP);
292   BX_NEXT_INSTR(i); // trace can continue over non-taken branch
293 }
294 
JNO_Jw(bxInstruction_c * i)295 void BX_CPP_AttrRegparmN(1) BX_CPU_C::JNO_Jw(bxInstruction_c *i)
296 {
297   if (! get_OF()) {
298     Bit16u new_IP = IP + i->Iw();
299     branch_near16(new_IP);
300     BX_INSTR_CNEAR_BRANCH_TAKEN(BX_CPU_ID, PREV_RIP, new_IP);
301     BX_LINK_TRACE(i);
302   }
303 
304   BX_INSTR_CNEAR_BRANCH_NOT_TAKEN(BX_CPU_ID, PREV_RIP);
305   BX_NEXT_INSTR(i); // trace can continue over non-taken branch
306 }
307 
JB_Jw(bxInstruction_c * i)308 void BX_CPP_AttrRegparmN(1) BX_CPU_C::JB_Jw(bxInstruction_c *i)
309 {
310   if (get_CF()) {
311     Bit16u new_IP = IP + i->Iw();
312     branch_near16(new_IP);
313     BX_INSTR_CNEAR_BRANCH_TAKEN(BX_CPU_ID, PREV_RIP, new_IP);
314     BX_LINK_TRACE(i);
315   }
316 
317   BX_INSTR_CNEAR_BRANCH_NOT_TAKEN(BX_CPU_ID, PREV_RIP);
318   BX_NEXT_INSTR(i); // trace can continue over non-taken branch
319 }
320 
JNB_Jw(bxInstruction_c * i)321 void BX_CPP_AttrRegparmN(1) BX_CPU_C::JNB_Jw(bxInstruction_c *i)
322 {
323   if (! get_CF()) {
324     Bit16u new_IP = IP + i->Iw();
325     branch_near16(new_IP);
326     BX_INSTR_CNEAR_BRANCH_TAKEN(BX_CPU_ID, PREV_RIP, new_IP);
327     BX_LINK_TRACE(i);
328   }
329 
330   BX_INSTR_CNEAR_BRANCH_NOT_TAKEN(BX_CPU_ID, PREV_RIP);
331   BX_NEXT_INSTR(i); // trace can continue over non-taken branch
332 }
333 
JZ_Jw(bxInstruction_c * i)334 void BX_CPP_AttrRegparmN(1) BX_CPU_C::JZ_Jw(bxInstruction_c *i)
335 {
336   if (get_ZF()) {
337     Bit16u new_IP = IP + i->Iw();
338     branch_near16(new_IP);
339     BX_INSTR_CNEAR_BRANCH_TAKEN(BX_CPU_ID, PREV_RIP, new_IP);
340     BX_LINK_TRACE(i);
341   }
342 
343   BX_INSTR_CNEAR_BRANCH_NOT_TAKEN(BX_CPU_ID, PREV_RIP);
344   BX_NEXT_INSTR(i); // trace can continue over non-taken branch
345 }
346 
JNZ_Jw(bxInstruction_c * i)347 void BX_CPP_AttrRegparmN(1) BX_CPU_C::JNZ_Jw(bxInstruction_c *i)
348 {
349   if (! get_ZF()) {
350     Bit16u new_IP = IP + i->Iw();
351     branch_near16(new_IP);
352     BX_INSTR_CNEAR_BRANCH_TAKEN(BX_CPU_ID, PREV_RIP, new_IP);
353     BX_LINK_TRACE(i);
354   }
355 
356   BX_INSTR_CNEAR_BRANCH_NOT_TAKEN(BX_CPU_ID, PREV_RIP);
357   BX_NEXT_INSTR(i); // trace can continue over non-taken branch
358 }
359 
JBE_Jw(bxInstruction_c * i)360 void BX_CPP_AttrRegparmN(1) BX_CPU_C::JBE_Jw(bxInstruction_c *i)
361 {
362   if (get_CF() || get_ZF()) {
363     Bit16u new_IP = IP + i->Iw();
364     branch_near16(new_IP);
365     BX_INSTR_CNEAR_BRANCH_TAKEN(BX_CPU_ID, PREV_RIP, new_IP);
366     BX_LINK_TRACE(i);
367   }
368 
369   BX_INSTR_CNEAR_BRANCH_NOT_TAKEN(BX_CPU_ID, PREV_RIP);
370   BX_NEXT_INSTR(i); // trace can continue over non-taken branch
371 }
372 
JNBE_Jw(bxInstruction_c * i)373 void BX_CPP_AttrRegparmN(1) BX_CPU_C::JNBE_Jw(bxInstruction_c *i)
374 {
375   if (! (get_CF() || get_ZF())) {
376     Bit16u new_IP = IP + i->Iw();
377     branch_near16(new_IP);
378     BX_INSTR_CNEAR_BRANCH_TAKEN(BX_CPU_ID, PREV_RIP, new_IP);
379     BX_LINK_TRACE(i);
380   }
381 
382   BX_INSTR_CNEAR_BRANCH_NOT_TAKEN(BX_CPU_ID, PREV_RIP);
383   BX_NEXT_INSTR(i); // trace can continue over non-taken branch
384 }
385 
JS_Jw(bxInstruction_c * i)386 void BX_CPP_AttrRegparmN(1) BX_CPU_C::JS_Jw(bxInstruction_c *i)
387 {
388   if (get_SF()) {
389     Bit16u new_IP = IP + i->Iw();
390     branch_near16(new_IP);
391     BX_INSTR_CNEAR_BRANCH_TAKEN(BX_CPU_ID, PREV_RIP, new_IP);
392     BX_LINK_TRACE(i);
393   }
394 
395   BX_INSTR_CNEAR_BRANCH_NOT_TAKEN(BX_CPU_ID, PREV_RIP);
396   BX_NEXT_INSTR(i); // trace can continue over non-taken branch
397 }
398 
JNS_Jw(bxInstruction_c * i)399 void BX_CPP_AttrRegparmN(1) BX_CPU_C::JNS_Jw(bxInstruction_c *i)
400 {
401   if (! get_SF()) {
402     Bit16u new_IP = IP + i->Iw();
403     branch_near16(new_IP);
404     BX_INSTR_CNEAR_BRANCH_TAKEN(BX_CPU_ID, PREV_RIP, new_IP);
405     BX_LINK_TRACE(i);
406   }
407 
408   BX_INSTR_CNEAR_BRANCH_NOT_TAKEN(BX_CPU_ID, PREV_RIP);
409   BX_NEXT_INSTR(i); // trace can continue over non-taken branch
410 }
411 
JP_Jw(bxInstruction_c * i)412 void BX_CPP_AttrRegparmN(1) BX_CPU_C::JP_Jw(bxInstruction_c *i)
413 {
414   if (get_PF()) {
415     Bit16u new_IP = IP + i->Iw();
416     branch_near16(new_IP);
417     BX_INSTR_CNEAR_BRANCH_TAKEN(BX_CPU_ID, PREV_RIP, new_IP);
418     BX_LINK_TRACE(i);
419   }
420 
421   BX_INSTR_CNEAR_BRANCH_NOT_TAKEN(BX_CPU_ID, PREV_RIP);
422   BX_NEXT_INSTR(i); // trace can continue over non-taken branch
423 }
424 
JNP_Jw(bxInstruction_c * i)425 void BX_CPP_AttrRegparmN(1) BX_CPU_C::JNP_Jw(bxInstruction_c *i)
426 {
427   if (! get_PF()) {
428     Bit16u new_IP = IP + i->Iw();
429     branch_near16(new_IP);
430     BX_INSTR_CNEAR_BRANCH_TAKEN(BX_CPU_ID, PREV_RIP, new_IP);
431     BX_LINK_TRACE(i);
432   }
433 
434   BX_INSTR_CNEAR_BRANCH_NOT_TAKEN(BX_CPU_ID, PREV_RIP);
435   BX_NEXT_INSTR(i); // trace can continue over non-taken branch
436 }
437 
JL_Jw(bxInstruction_c * i)438 void BX_CPP_AttrRegparmN(1) BX_CPU_C::JL_Jw(bxInstruction_c *i)
439 {
440   if (getB_SF() != getB_OF()) {
441     Bit16u new_IP = IP + i->Iw();
442     branch_near16(new_IP);
443     BX_INSTR_CNEAR_BRANCH_TAKEN(BX_CPU_ID, PREV_RIP, new_IP);
444     BX_LINK_TRACE(i);
445   }
446 
447   BX_INSTR_CNEAR_BRANCH_NOT_TAKEN(BX_CPU_ID, PREV_RIP);
448   BX_NEXT_INSTR(i); // trace can continue over non-taken branch
449 }
450 
JNL_Jw(bxInstruction_c * i)451 void BX_CPP_AttrRegparmN(1) BX_CPU_C::JNL_Jw(bxInstruction_c *i)
452 {
453   if (getB_SF() == getB_OF()) {
454     Bit16u new_IP = IP + i->Iw();
455     branch_near16(new_IP);
456     BX_INSTR_CNEAR_BRANCH_TAKEN(BX_CPU_ID, PREV_RIP, new_IP);
457     BX_LINK_TRACE(i);
458   }
459 
460   BX_INSTR_CNEAR_BRANCH_NOT_TAKEN(BX_CPU_ID, PREV_RIP);
461   BX_NEXT_INSTR(i); // trace can continue over non-taken branch
462 }
463 
JLE_Jw(bxInstruction_c * i)464 void BX_CPP_AttrRegparmN(1) BX_CPU_C::JLE_Jw(bxInstruction_c *i)
465 {
466   if (get_ZF() || (getB_SF() != getB_OF())) {
467     Bit16u new_IP = IP + i->Iw();
468     branch_near16(new_IP);
469     BX_INSTR_CNEAR_BRANCH_TAKEN(BX_CPU_ID, PREV_RIP, new_IP);
470     BX_LINK_TRACE(i);
471   }
472 
473   BX_INSTR_CNEAR_BRANCH_NOT_TAKEN(BX_CPU_ID, PREV_RIP);
474   BX_NEXT_INSTR(i); // trace can continue over non-taken branch
475 }
476 
JNLE_Jw(bxInstruction_c * i)477 void BX_CPP_AttrRegparmN(1) BX_CPU_C::JNLE_Jw(bxInstruction_c *i)
478 {
479   if (! get_ZF() && (getB_SF() == getB_OF())) {
480     Bit16u new_IP = IP + i->Iw();
481     branch_near16(new_IP);
482     BX_INSTR_CNEAR_BRANCH_TAKEN(BX_CPU_ID, PREV_RIP, new_IP);
483     BX_LINK_TRACE(i);
484   }
485 
486   BX_INSTR_CNEAR_BRANCH_NOT_TAKEN(BX_CPU_ID, PREV_RIP);
487   BX_NEXT_INSTR(i); // trace can continue over non-taken branch
488 }
489 
JMP_EwR(bxInstruction_c * i)490 void BX_CPP_AttrRegparmN(1) BX_CPU_C::JMP_EwR(bxInstruction_c *i)
491 {
492   Bit16u new_IP = BX_READ_16BIT_REG(i->dst());
493   branch_near16(new_IP);
494   BX_INSTR_UCNEAR_BRANCH(BX_CPU_ID, BX_INSTR_IS_JMP_INDIRECT, PREV_RIP, new_IP);
495 
496 #if BX_SUPPORT_CET
497   track_indirect_if_not_suppressed(i, CPL);
498 #endif
499 
500   BX_NEXT_TRACE(i);
501 }
502 
503 /* Far indirect jump */
JMP16_Ep(bxInstruction_c * i)504 void BX_CPP_AttrRegparmN(1) BX_CPU_C::JMP16_Ep(bxInstruction_c *i)
505 {
506   bx_address eaddr = BX_CPU_RESOLVE_ADDR(i);
507 
508   Bit16u op1_16 = read_virtual_word(i->seg(), eaddr);
509   Bit16u cs_raw = read_virtual_word(i->seg(), (eaddr+2) & i->asize_mask());
510 
511   jmp_far16(i, cs_raw, op1_16);
512 
513   BX_NEXT_TRACE(i);
514 }
515 
IRET16(bxInstruction_c * i)516 void BX_CPP_AttrRegparmN(1) BX_CPU_C::IRET16(bxInstruction_c *i)
517 {
518   BX_INSTR_FAR_BRANCH_ORIGIN();
519 
520   invalidate_prefetch_q();
521 
522 #if BX_SUPPORT_SVM
523   if (BX_CPU_THIS_PTR in_svm_guest) {
524     if (SVM_INTERCEPT(SVM_INTERCEPT0_IRET)) Svm_Vmexit(SVM_VMEXIT_IRET);
525   }
526 #endif
527 
528 #if BX_SUPPORT_VMX
529   if (BX_CPU_THIS_PTR in_vmx_guest)
530     if (is_masked_event(PIN_VMEXIT(VMX_VM_EXEC_CTRL1_VIRTUAL_NMI) ? BX_EVENT_VMX_VIRTUAL_NMI : BX_EVENT_NMI))
531       BX_CPU_THIS_PTR nmi_unblocking_iret = 1;
532 
533   if (BX_CPU_THIS_PTR in_vmx_guest && PIN_VMEXIT(VMX_VM_EXEC_CTRL1_NMI_EXITING)) {
534     if (PIN_VMEXIT(VMX_VM_EXEC_CTRL1_VIRTUAL_NMI)) unmask_event(BX_EVENT_VMX_VIRTUAL_NMI);
535   }
536   else
537 #endif
538     unmask_event(BX_EVENT_NMI);
539 
540 #if BX_DEBUGGER
541   BX_CPU_THIS_PTR show_flag |= Flag_iret;
542 #endif
543 
544   RSP_SPECULATIVE;
545 
546   if (protected_mode()) {
547     iret_protected(i);
548   }
549   else {
550     if (v8086_mode()) {
551       // IOPL check in stack_return_from_v86()
552       iret16_stack_return_from_v86(i);
553     }
554     else {
555       Bit16u ip     = pop_16();
556       Bit16u cs_raw = pop_16(); // #SS has higher priority
557       Bit16u flags  = pop_16();
558 
559       // CS.LIMIT can't change when in real/v8086 mode
560       if(ip > BX_CPU_THIS_PTR sregs[BX_SEG_REG_CS].cache.u.segment.limit_scaled) {
561         BX_ERROR(("%s: instruction pointer not within code segment limits", i->getIaOpcodeNameShort()));
562         exception(BX_GP_EXCEPTION, 0);
563       }
564 
565       load_seg_reg(&BX_CPU_THIS_PTR sregs[BX_SEG_REG_CS], cs_raw);
566       EIP = (Bit32u) ip;
567       write_flags(flags, /* change IOPL? */ 1, /* change IF? */ 1);
568     }
569   }
570 
571   RSP_COMMIT;
572 
573 #if BX_SUPPORT_VMX
574   BX_CPU_THIS_PTR nmi_unblocking_iret = 0;
575 #endif
576 
577   BX_INSTR_FAR_BRANCH(BX_CPU_ID, BX_INSTR_IS_IRET,
578                       FAR_BRANCH_PREV_CS, FAR_BRANCH_PREV_RIP,
579                       BX_CPU_THIS_PTR sregs[BX_SEG_REG_CS].selector.value, EIP);
580 
581   BX_NEXT_TRACE(i);
582 }
583 
JCXZ_Jb(bxInstruction_c * i)584 void BX_CPP_AttrRegparmN(1) BX_CPU_C::JCXZ_Jb(bxInstruction_c *i)
585 {
586   // it is impossible to get this instruction in long mode
587   BX_ASSERT(i->as64L() == 0);
588 
589   Bit32u temp_ECX;
590 
591   if (i->as32L())
592     temp_ECX = ECX;
593   else
594     temp_ECX = CX;
595 
596   if (temp_ECX == 0) {
597     Bit16u new_IP = IP + i->Iw();
598     branch_near16(new_IP);
599     BX_INSTR_CNEAR_BRANCH_TAKEN(BX_CPU_ID, PREV_RIP, new_IP);
600     BX_LINK_TRACE(i);
601   }
602 
603   BX_INSTR_CNEAR_BRANCH_NOT_TAKEN(BX_CPU_ID, PREV_RIP);
604   BX_NEXT_TRACE(i);
605 }
606 
607 //
608 // There is some weirdness in LOOP instructions definition. If an exception
609 // was generated during the instruction execution (for example #GP fault
610 // because EIP was beyond CS segment limits) CPU state should restore the
611 // state prior to instruction execution.
612 //
613 // The final point that we are not allowed to decrement ECX register before
614 // it is known that no exceptions can happen.
615 //
616 
LOOPNE16_Jb(bxInstruction_c * i)617 void BX_CPP_AttrRegparmN(1) BX_CPU_C::LOOPNE16_Jb(bxInstruction_c *i)
618 {
619   // it is impossible to get this instruction in long mode
620   BX_ASSERT(i->as64L() == 0);
621 
622   if (i->as32L()) {
623     Bit32u count = ECX;
624 
625     count--;
626     if (count != 0 && (get_ZF()==0)) {
627       Bit16u new_IP = IP + i->Iw();
628       branch_near16(new_IP);
629       BX_INSTR_CNEAR_BRANCH_TAKEN(BX_CPU_ID, PREV_RIP, new_IP);
630     }
631 #if BX_INSTRUMENTATION
632     else {
633       BX_INSTR_CNEAR_BRANCH_NOT_TAKEN(BX_CPU_ID, PREV_RIP);
634     }
635 #endif
636 
637     ECX = count;
638   }
639   else {
640     Bit16u count = CX;
641 
642     count--;
643     if (count != 0 && (get_ZF()==0)) {
644       Bit16u new_IP = IP + i->Iw();
645       branch_near16(new_IP);
646       BX_INSTR_CNEAR_BRANCH_TAKEN(BX_CPU_ID, PREV_RIP, new_IP);
647     }
648 #if BX_INSTRUMENTATION
649     else {
650       BX_INSTR_CNEAR_BRANCH_NOT_TAKEN(BX_CPU_ID, PREV_RIP);
651     }
652 #endif
653 
654     CX = count;
655   }
656 
657   BX_NEXT_TRACE(i);
658 }
659 
LOOPE16_Jb(bxInstruction_c * i)660 void BX_CPP_AttrRegparmN(1) BX_CPU_C::LOOPE16_Jb(bxInstruction_c *i)
661 {
662   // it is impossible to get this instruction in long mode
663   BX_ASSERT(i->as64L() == 0);
664 
665   if (i->as32L()) {
666     Bit32u count = ECX;
667 
668     count--;
669     if (count != 0 && get_ZF()) {
670       Bit16u new_IP = IP + i->Iw();
671       branch_near16(new_IP);
672       BX_INSTR_CNEAR_BRANCH_TAKEN(BX_CPU_ID, PREV_RIP, new_IP);
673     }
674 #if BX_INSTRUMENTATION
675     else {
676       BX_INSTR_CNEAR_BRANCH_NOT_TAKEN(BX_CPU_ID, PREV_RIP);
677     }
678 #endif
679 
680     ECX = count;
681   }
682   else {
683     Bit16u count = CX;
684 
685     count--;
686     if (count != 0 && get_ZF()) {
687       Bit16u new_IP = IP + i->Iw();
688       branch_near16(new_IP);
689       BX_INSTR_CNEAR_BRANCH_TAKEN(BX_CPU_ID, PREV_RIP, new_IP);
690     }
691 #if BX_INSTRUMENTATION
692     else {
693       BX_INSTR_CNEAR_BRANCH_NOT_TAKEN(BX_CPU_ID, PREV_RIP);
694     }
695 #endif
696 
697     CX = count;
698   }
699 
700   BX_NEXT_TRACE(i);
701 }
702 
LOOP16_Jb(bxInstruction_c * i)703 void BX_CPP_AttrRegparmN(1) BX_CPU_C::LOOP16_Jb(bxInstruction_c *i)
704 {
705   // it is impossible to get this instruction in long mode
706   BX_ASSERT(i->as64L() == 0);
707 
708   if (i->as32L()) {
709     Bit32u count = ECX;
710 
711     count--;
712     if (count != 0) {
713       Bit16u new_IP = IP + i->Iw();
714       branch_near16(new_IP);
715       BX_INSTR_CNEAR_BRANCH_TAKEN(BX_CPU_ID, PREV_RIP, new_IP);
716     }
717 #if BX_INSTRUMENTATION
718     else {
719       BX_INSTR_CNEAR_BRANCH_NOT_TAKEN(BX_CPU_ID, PREV_RIP);
720     }
721 #endif
722 
723     ECX = count;
724   }
725   else {
726     Bit16u count = CX;
727 
728     count--;
729     if (count != 0) {
730       Bit16u new_IP = IP + i->Iw();
731       branch_near16(new_IP);
732       BX_INSTR_CNEAR_BRANCH_TAKEN(BX_CPU_ID, PREV_RIP, new_IP);
733     }
734 #if BX_INSTRUMENTATION
735     else {
736       BX_INSTR_CNEAR_BRANCH_NOT_TAKEN(BX_CPU_ID, PREV_RIP);
737     }
738 #endif
739 
740     CX = count;
741   }
742 
743   BX_NEXT_TRACE(i);
744 }
745