1 /////////////////////////////////////////////////////////////////////////
2 // $Id: access2.cc 14086 2021-01-30 08:35:35Z sshwarts $
3 /////////////////////////////////////////////////////////////////////////
4 //
5 //   Copyright (c) 2008-2019 Stanislav Shwartsman
6 //          Written by Stanislav Shwartsman [sshwarts at sourceforge net]
7 //
8 //  This library is free software; you can redistribute it and/or
9 //  modify it under the terms of the GNU Lesser General Public
10 //  License as published by the Free Software Foundation; either
11 //  version 2 of the License, or (at your option) any later version.
12 //
13 //  This library is distributed in the hope that it will be useful,
14 //  but WITHOUT ANY WARRANTY; without even the implied warranty of
15 //  MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
16 //  Lesser General Public License for more details.
17 //
18 //  You should have received a copy of the GNU Lesser General Public
19 //  License along with this library; if not, write to the Free Software
20 //  Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA B 02110-1301 USA
21 //
22 /////////////////////////////////////////////////////////////////////////
23 
24 #define NEED_CPU_REG_SHORTCUTS 1
25 #include "bochs.h"
26 #include "cpu.h"
27 #define LOG_THIS BX_CPU_THIS_PTR
28 
29   void BX_CPP_AttrRegparmN(3)
write_linear_byte(unsigned s,bx_address laddr,Bit8u data)30 BX_CPU_C::write_linear_byte(unsigned s, bx_address laddr, Bit8u data)
31 {
32   bx_address lpf = LPFOf(laddr);
33   bx_TLB_entry *tlbEntry = BX_DTLB_ENTRY_OF(laddr, 0);
34   if (tlbEntry->lpf == lpf) {
35     // See if the TLB entry privilege level allows us write access from this CPL
36     if (isWriteOK(tlbEntry, USER_PL)) {
37       bx_hostpageaddr_t hostPageAddr = tlbEntry->hostPageAddr;
38       Bit32u pageOffset = PAGE_OFFSET(laddr);
39       bx_phy_address pAddr = tlbEntry->ppf | pageOffset;
40       BX_NOTIFY_LIN_MEMORY_ACCESS(laddr, pAddr, 1, tlbEntry->get_memtype(), BX_WRITE, (Bit8u*) &data);
41       Bit8u *hostAddr = (Bit8u*) (hostPageAddr | pageOffset);
42       pageWriteStampTable.decWriteStamp(pAddr, 1);
43       *hostAddr = data;
44       return;
45     }
46   }
47 
48   if (access_write_linear(laddr, 1, CPL, BX_WRITE, 0x0, (void *) &data) < 0)
49     exception(int_number(s), 0);
50 }
51 
52   void BX_CPP_AttrRegparmN(3)
write_linear_word(unsigned s,bx_address laddr,Bit16u data)53 BX_CPU_C::write_linear_word(unsigned s, bx_address laddr, Bit16u data)
54 {
55   bx_TLB_entry *tlbEntry = BX_DTLB_ENTRY_OF(laddr, 1);
56 #if BX_SUPPORT_ALIGNMENT_CHECK && BX_CPU_LEVEL >= 4
57   bx_address lpf = AlignedAccessLPFOf(laddr, (1 & BX_CPU_THIS_PTR alignment_check_mask));
58 #else
59   bx_address lpf = LPFOf(laddr);
60 #endif
61   if (tlbEntry->lpf == lpf) {
62     // See if the TLB entry privilege level allows us write access from this CPL
63     if (isWriteOK(tlbEntry, USER_PL)) {
64       bx_hostpageaddr_t hostPageAddr = tlbEntry->hostPageAddr;
65       Bit32u pageOffset = PAGE_OFFSET(laddr);
66       bx_phy_address pAddr = tlbEntry->ppf | pageOffset;
67       BX_NOTIFY_LIN_MEMORY_ACCESS(laddr, pAddr, 2, tlbEntry->get_memtype(), BX_WRITE, (Bit8u*) &data);
68       Bit16u *hostAddr = (Bit16u*) (hostPageAddr | pageOffset);
69       pageWriteStampTable.decWriteStamp(pAddr, 2);
70       WriteHostWordToLittleEndian(hostAddr, data);
71       return;
72     }
73   }
74 
75   if (access_write_linear(laddr, 2, CPL, BX_WRITE, 0x1, (void *) &data) < 0)
76     exception(int_number(s), 0);
77 }
78 
79   void BX_CPP_AttrRegparmN(3)
write_linear_dword(unsigned s,bx_address laddr,Bit32u data)80 BX_CPU_C::write_linear_dword(unsigned s, bx_address laddr, Bit32u data)
81 {
82   bx_TLB_entry *tlbEntry = BX_DTLB_ENTRY_OF(laddr, 3);
83 #if BX_SUPPORT_ALIGNMENT_CHECK && BX_CPU_LEVEL >= 4
84   bx_address lpf = AlignedAccessLPFOf(laddr, (3 & BX_CPU_THIS_PTR alignment_check_mask));
85 #else
86   bx_address lpf = LPFOf(laddr);
87 #endif
88   if (tlbEntry->lpf == lpf) {
89     // See if the TLB entry privilege level allows us write access from this CPL
90     if (isWriteOK(tlbEntry, USER_PL)) {
91       bx_hostpageaddr_t hostPageAddr = tlbEntry->hostPageAddr;
92       Bit32u pageOffset = PAGE_OFFSET(laddr);
93       bx_phy_address pAddr = tlbEntry->ppf | pageOffset;
94       BX_NOTIFY_LIN_MEMORY_ACCESS(laddr, pAddr, 4, tlbEntry->get_memtype(), BX_WRITE, (Bit8u*) &data);
95       Bit32u *hostAddr = (Bit32u*) (hostPageAddr | pageOffset);
96       pageWriteStampTable.decWriteStamp(pAddr, 4);
97       WriteHostDWordToLittleEndian(hostAddr, data);
98       return;
99     }
100   }
101 
102   if (access_write_linear(laddr, 4, CPL, BX_WRITE, 0x3, (void *) &data) < 0)
103     exception(int_number(s), 0);
104 }
105 
106   void BX_CPP_AttrRegparmN(3)
write_linear_qword(unsigned s,bx_address laddr,Bit64u data)107 BX_CPU_C::write_linear_qword(unsigned s, bx_address laddr, Bit64u data)
108 {
109   bx_TLB_entry *tlbEntry = BX_DTLB_ENTRY_OF(laddr, 7);
110 #if BX_SUPPORT_ALIGNMENT_CHECK && BX_CPU_LEVEL >= 4
111   bx_address lpf = AlignedAccessLPFOf(laddr, (7 & BX_CPU_THIS_PTR alignment_check_mask));
112 #else
113   bx_address lpf = LPFOf(laddr);
114 #endif
115   if (tlbEntry->lpf == lpf) {
116     // See if the TLB entry privilege level allows us write access from this CPL
117     if (isWriteOK(tlbEntry, USER_PL)) {
118       bx_hostpageaddr_t hostPageAddr = tlbEntry->hostPageAddr;
119       Bit32u pageOffset = PAGE_OFFSET(laddr);
120       bx_phy_address pAddr = tlbEntry->ppf | pageOffset;
121       BX_NOTIFY_LIN_MEMORY_ACCESS(laddr, pAddr, 8, tlbEntry->get_memtype(), BX_WRITE, (Bit8u*) &data);
122       Bit64u *hostAddr = (Bit64u*) (hostPageAddr | pageOffset);
123       pageWriteStampTable.decWriteStamp(pAddr, 8);
124       WriteHostQWordToLittleEndian(hostAddr, data);
125       return;
126     }
127   }
128 
129   if (access_write_linear(laddr, 8, CPL, BX_WRITE, 0x7, (void *) &data) < 0)
130     exception(int_number(s), 0);
131 }
132 
133 #if BX_CPU_LEVEL >= 6
134 
135   void BX_CPP_AttrRegparmN(3)
write_linear_xmmword(unsigned s,bx_address laddr,const BxPackedXmmRegister * data)136 BX_CPU_C::write_linear_xmmword(unsigned s, bx_address laddr, const BxPackedXmmRegister *data)
137 {
138   bx_address lpf = LPFOf(laddr);
139   bx_TLB_entry *tlbEntry = BX_DTLB_ENTRY_OF(laddr, 15);
140   if (tlbEntry->lpf == lpf) {
141     // See if the TLB entry privilege level allows us write access from this CPL
142     if (isWriteOK(tlbEntry, USER_PL)) {
143       bx_hostpageaddr_t hostPageAddr = tlbEntry->hostPageAddr;
144       Bit32u pageOffset = PAGE_OFFSET(laddr);
145       bx_phy_address pAddr = tlbEntry->ppf | pageOffset;
146       BX_NOTIFY_LIN_MEMORY_ACCESS(laddr, pAddr, 16, tlbEntry->get_memtype(), BX_WRITE, (Bit8u*) data);
147       Bit64u *hostAddr = (Bit64u*) (hostPageAddr | pageOffset);
148       pageWriteStampTable.decWriteStamp(pAddr, 16);
149       WriteHostQWordToLittleEndian(hostAddr,   data->xmm64u(0));
150       WriteHostQWordToLittleEndian(hostAddr+1, data->xmm64u(1));
151       return;
152     }
153   }
154 
155   if (access_write_linear(laddr, 16, CPL, BX_WRITE, 0x0, (void *) data) < 0)
156     exception(int_number(s), 0);
157 }
158 
159   void BX_CPP_AttrRegparmN(3)
write_linear_xmmword_aligned(unsigned s,bx_address laddr,const BxPackedXmmRegister * data)160 BX_CPU_C::write_linear_xmmword_aligned(unsigned s, bx_address laddr, const BxPackedXmmRegister *data)
161 {
162   bx_address lpf = AlignedAccessLPFOf(laddr, 15);
163   bx_TLB_entry *tlbEntry = BX_DTLB_ENTRY_OF(laddr, 0);
164   if (tlbEntry->lpf == lpf) {
165     // See if the TLB entry privilege level allows us write access from this CPL
166     if (isWriteOK(tlbEntry, USER_PL)) {
167       bx_hostpageaddr_t hostPageAddr = tlbEntry->hostPageAddr;
168       Bit32u pageOffset = PAGE_OFFSET(laddr);
169       bx_phy_address pAddr = tlbEntry->ppf | pageOffset;
170       BX_NOTIFY_LIN_MEMORY_ACCESS(laddr, pAddr, 16, tlbEntry->get_memtype(), BX_WRITE, (Bit8u*) data);
171       Bit64u *hostAddr = (Bit64u*) (hostPageAddr | pageOffset);
172       pageWriteStampTable.decWriteStamp(pAddr, 16);
173       WriteHostQWordToLittleEndian(hostAddr,   data->xmm64u(0));
174       WriteHostQWordToLittleEndian(hostAddr+1, data->xmm64u(1));
175       return;
176     }
177   }
178 
179   if (laddr & 15) {
180     BX_ERROR(("write_linear_xmmword_aligned(): #GP misaligned access"));
181     exception(BX_GP_EXCEPTION, 0);
182   }
183 
184   if (access_write_linear(laddr, 16, CPL, BX_WRITE, 0x0, (void *) data) < 0)
185     exception(int_number(s), 0);
186 }
187 
188   void BX_CPP_AttrRegparmN(3)
write_linear_ymmword(unsigned s,bx_address laddr,const BxPackedYmmRegister * data)189 BX_CPU_C::write_linear_ymmword(unsigned s, bx_address laddr, const BxPackedYmmRegister *data)
190 {
191   bx_address lpf = LPFOf(laddr);
192   bx_TLB_entry *tlbEntry = BX_DTLB_ENTRY_OF(laddr, 31);
193   if (tlbEntry->lpf == lpf) {
194     // See if the TLB entry privilege level allows us write access from this CPL
195     if (isWriteOK(tlbEntry, USER_PL)) {
196       bx_hostpageaddr_t hostPageAddr = tlbEntry->hostPageAddr;
197       Bit32u pageOffset = PAGE_OFFSET(laddr);
198       bx_phy_address pAddr = tlbEntry->ppf | pageOffset;
199       BX_NOTIFY_LIN_MEMORY_ACCESS(laddr, pAddr, 32, tlbEntry->get_memtype(), BX_WRITE, (Bit8u*) data);
200       Bit64u *hostAddr = (Bit64u*) (hostPageAddr | pageOffset);
201       pageWriteStampTable.decWriteStamp(pAddr, 32);
202       for (unsigned n = 0; n < 4; n++) {
203         WriteHostQWordToLittleEndian(hostAddr+n, data->ymm64u(n));
204       }
205       return;
206     }
207   }
208 
209   if (access_write_linear(laddr, 32, CPL, BX_WRITE, 0x0, (void *) data) < 0)
210     exception(int_number(s), 0);
211 }
212 
213   void BX_CPP_AttrRegparmN(3)
write_linear_ymmword_aligned(unsigned s,bx_address laddr,const BxPackedYmmRegister * data)214 BX_CPU_C::write_linear_ymmword_aligned(unsigned s, bx_address laddr, const BxPackedYmmRegister *data)
215 {
216   bx_address lpf = AlignedAccessLPFOf(laddr, 31);
217   bx_TLB_entry *tlbEntry = BX_DTLB_ENTRY_OF(laddr, 0);
218   if (tlbEntry->lpf == lpf) {
219     // See if the TLB entry privilege level allows us write access from this CPL
220     if (isWriteOK(tlbEntry, USER_PL)) {
221       bx_hostpageaddr_t hostPageAddr = tlbEntry->hostPageAddr;
222       Bit32u pageOffset = PAGE_OFFSET(laddr);
223       bx_phy_address pAddr = tlbEntry->ppf | pageOffset;
224       BX_NOTIFY_LIN_MEMORY_ACCESS(laddr, pAddr, 32, tlbEntry->get_memtype(), BX_WRITE, (Bit8u*) data);
225       Bit64u *hostAddr = (Bit64u*) (hostPageAddr | pageOffset);
226       pageWriteStampTable.decWriteStamp(pAddr, 32);
227       for (unsigned n = 0; n < 4; n++) {
228         WriteHostQWordToLittleEndian(hostAddr+n, data->ymm64u(n));
229       }
230       return;
231     }
232   }
233 
234   if (laddr & 31) {
235     BX_ERROR(("write_linear_ymmword_aligned(): #GP misaligned access"));
236     exception(BX_GP_EXCEPTION, 0);
237   }
238 
239   if (access_write_linear(laddr, 32, CPL, BX_WRITE, 0x0, (void *) data) < 0)
240     exception(int_number(s), 0);
241 }
242 
243   void BX_CPP_AttrRegparmN(3)
write_linear_zmmword(unsigned s,bx_address laddr,const BxPackedZmmRegister * data)244 BX_CPU_C::write_linear_zmmword(unsigned s, bx_address laddr, const BxPackedZmmRegister *data)
245 {
246   bx_address lpf = LPFOf(laddr);
247   bx_TLB_entry *tlbEntry = BX_DTLB_ENTRY_OF(laddr, 63);
248   if (tlbEntry->lpf == lpf) {
249     // See if the TLB entry privilege level allows us write access from this CPL
250     if (isWriteOK(tlbEntry, USER_PL)) {
251       bx_hostpageaddr_t hostPageAddr = tlbEntry->hostPageAddr;
252       Bit32u pageOffset = PAGE_OFFSET(laddr);
253       bx_phy_address pAddr = tlbEntry->ppf | pageOffset;
254       BX_NOTIFY_LIN_MEMORY_ACCESS(laddr, pAddr, 64, tlbEntry->get_memtype(), BX_WRITE, (Bit8u*) data);
255       Bit64u *hostAddr = (Bit64u*) (hostPageAddr | pageOffset);
256       pageWriteStampTable.decWriteStamp(pAddr, 64);
257       for (unsigned n = 0; n < 8; n++) {
258         WriteHostQWordToLittleEndian(hostAddr+n, data->zmm64u(n));
259       }
260       return;
261     }
262   }
263 
264   if (access_write_linear(laddr, 64, CPL, BX_WRITE, 0x0, (void *) data) < 0)
265     exception(int_number(s), 0);
266 }
267 
268   void BX_CPP_AttrRegparmN(3)
write_linear_zmmword_aligned(unsigned s,bx_address laddr,const BxPackedZmmRegister * data)269 BX_CPU_C::write_linear_zmmword_aligned(unsigned s, bx_address laddr, const BxPackedZmmRegister *data)
270 {
271   bx_address lpf = AlignedAccessLPFOf(laddr, 63);
272   bx_TLB_entry *tlbEntry = BX_DTLB_ENTRY_OF(laddr, 0);
273   if (tlbEntry->lpf == lpf) {
274     // See if the TLB entry privilege level allows us write access from this CPL
275     if (isWriteOK(tlbEntry, USER_PL)) {
276       bx_hostpageaddr_t hostPageAddr = tlbEntry->hostPageAddr;
277       Bit32u pageOffset = PAGE_OFFSET(laddr);
278       bx_phy_address pAddr = tlbEntry->ppf | pageOffset;
279       BX_NOTIFY_LIN_MEMORY_ACCESS(laddr, pAddr, 64, tlbEntry->get_memtype(), BX_WRITE, (Bit8u*) data);
280       Bit64u *hostAddr = (Bit64u*) (hostPageAddr | pageOffset);
281       pageWriteStampTable.decWriteStamp(pAddr, 64);
282       for (unsigned n = 0; n < 8; n++) {
283         WriteHostQWordToLittleEndian(hostAddr+n, data->zmm64u(n));
284       }
285       return;
286     }
287   }
288 
289   if (laddr & 63) {
290     BX_ERROR(("write_linear_zmmword_aligned(): #GP misaligned access"));
291     exception(BX_GP_EXCEPTION, 0);
292   }
293 
294   if (access_write_linear(laddr, 64, CPL, BX_WRITE, 0x0, (void *) data) < 0)
295     exception(int_number(s), 0);
296 }
297 
298 #endif
299 
300   void BX_CPP_AttrRegparmN(2)
tickle_read_linear(unsigned s,bx_address laddr)301 BX_CPU_C::tickle_read_linear(unsigned s, bx_address laddr)
302 {
303   bx_address lpf = LPFOf(laddr);
304   bx_TLB_entry *tlbEntry = BX_DTLB_ENTRY_OF(laddr, 0);
305   if (tlbEntry->lpf == lpf) {
306     // See if the TLB entry privilege level allows us read access from this CPL
307     if (isReadOK(tlbEntry, USER_PL)) return;
308   }
309 
310 #if BX_SUPPORT_X86_64
311   if (! IsCanonical(laddr)) {
312     BX_ERROR(("tickle_read_linear(): canonical failure"));
313     exception(int_number(s), 0);
314   }
315 #endif
316 
317   // Access within single page
318   BX_CPU_THIS_PTR address_xlation.paddress1 = translate_linear(tlbEntry, laddr, USER_PL, BX_READ);
319   BX_CPU_THIS_PTR address_xlation.pages     = 1;
320 #if BX_SUPPORT_MEMTYPE
321   BX_CPU_THIS_PTR address_xlation.memtype1  = tlbEntry->get_memtype();
322 #endif
323 
324 #if BX_X86_DEBUGGER
325   hwbreakpoint_match(laddr, 1, BX_READ);
326 #endif
327 }
328 
329   Bit8u BX_CPP_AttrRegparmN(2)
read_linear_byte(unsigned s,bx_address laddr)330 BX_CPU_C::read_linear_byte(unsigned s, bx_address laddr)
331 {
332   Bit8u data;
333 
334   bx_address lpf = LPFOf(laddr);
335   bx_TLB_entry *tlbEntry = BX_DTLB_ENTRY_OF(laddr, 0);
336   if (tlbEntry->lpf == lpf) {
337     // See if the TLB entry privilege level allows us read access from this CPL
338     if (isReadOK(tlbEntry, USER_PL)) {
339       bx_hostpageaddr_t hostPageAddr = tlbEntry->hostPageAddr;
340       Bit32u pageOffset = PAGE_OFFSET(laddr);
341       Bit8u *hostAddr = (Bit8u*) (hostPageAddr | pageOffset);
342       data = *hostAddr;
343       BX_NOTIFY_LIN_MEMORY_ACCESS(laddr, (tlbEntry->ppf | pageOffset), 1, tlbEntry->get_memtype(), BX_READ, (Bit8u*) &data);
344       return data;
345     }
346   }
347 
348   if (access_read_linear(laddr, 1, CPL, BX_READ, 0x0, (void *) &data) < 0)
349     exception(int_number(s), 0);
350 
351   return data;
352 }
353 
354   Bit16u BX_CPP_AttrRegparmN(2)
read_linear_word(unsigned s,bx_address laddr)355 BX_CPU_C::read_linear_word(unsigned s, bx_address laddr)
356 {
357   Bit16u data;
358 
359   bx_TLB_entry *tlbEntry = BX_DTLB_ENTRY_OF(laddr, 1);
360 #if BX_SUPPORT_ALIGNMENT_CHECK && BX_CPU_LEVEL >= 4
361   bx_address lpf = AlignedAccessLPFOf(laddr, (1 & BX_CPU_THIS_PTR alignment_check_mask));
362 #else
363   bx_address lpf = LPFOf(laddr);
364 #endif
365   if (tlbEntry->lpf == lpf) {
366     // See if the TLB entry privilege level allows us read access from this CPL
367     if (isReadOK(tlbEntry, USER_PL)) {
368       bx_hostpageaddr_t hostPageAddr = tlbEntry->hostPageAddr;
369       Bit32u pageOffset = PAGE_OFFSET(laddr);
370       Bit16u *hostAddr = (Bit16u*) (hostPageAddr | pageOffset);
371       data = ReadHostWordFromLittleEndian(hostAddr);
372       BX_NOTIFY_LIN_MEMORY_ACCESS(laddr, (tlbEntry->ppf | pageOffset), 2, tlbEntry->get_memtype(), BX_READ, (Bit8u*) &data);
373       return data;
374     }
375   }
376 
377   if (access_read_linear(laddr, 2, CPL, BX_READ, 0x1, (void *) &data) < 0)
378     exception(int_number(s), 0);
379 
380   return data;
381 }
382 
383   Bit32u BX_CPP_AttrRegparmN(2)
read_linear_dword(unsigned s,bx_address laddr)384 BX_CPU_C::read_linear_dword(unsigned s, bx_address laddr)
385 {
386   Bit32u data;
387 
388   bx_TLB_entry *tlbEntry = BX_DTLB_ENTRY_OF(laddr, 3);
389 #if BX_SUPPORT_ALIGNMENT_CHECK && BX_CPU_LEVEL >= 4
390   bx_address lpf = AlignedAccessLPFOf(laddr, (3 & BX_CPU_THIS_PTR alignment_check_mask));
391 #else
392   bx_address lpf = LPFOf(laddr);
393 #endif
394   if (tlbEntry->lpf == lpf) {
395     // See if the TLB entry privilege level allows us read access from this CPL
396     if (isReadOK(tlbEntry, USER_PL)) {
397       bx_hostpageaddr_t hostPageAddr = tlbEntry->hostPageAddr;
398       Bit32u pageOffset = PAGE_OFFSET(laddr);
399       Bit32u *hostAddr = (Bit32u*) (hostPageAddr | pageOffset);
400       data = ReadHostDWordFromLittleEndian(hostAddr);
401       BX_NOTIFY_LIN_MEMORY_ACCESS(laddr, (tlbEntry->ppf | pageOffset), 4, tlbEntry->get_memtype(), BX_READ, (Bit8u*) &data);
402       return data;
403     }
404   }
405 
406   if (access_read_linear(laddr, 4, CPL, BX_READ, 0x3, (void *) &data) < 0)
407     exception(int_number(s), 0);
408 
409   return data;
410 }
411 
412   Bit64u BX_CPP_AttrRegparmN(2)
read_linear_qword(unsigned s,bx_address laddr)413 BX_CPU_C::read_linear_qword(unsigned s, bx_address laddr)
414 {
415   Bit64u data;
416 
417   bx_TLB_entry *tlbEntry = BX_DTLB_ENTRY_OF(laddr, 7);
418 #if BX_SUPPORT_ALIGNMENT_CHECK && BX_CPU_LEVEL >= 4
419   bx_address lpf = AlignedAccessLPFOf(laddr, (7 & BX_CPU_THIS_PTR alignment_check_mask));
420 #else
421   bx_address lpf = LPFOf(laddr);
422 #endif
423   if (tlbEntry->lpf == lpf) {
424     // See if the TLB entry privilege level allows us read access from this CPL
425     if (isReadOK(tlbEntry, USER_PL)) {
426       bx_hostpageaddr_t hostPageAddr = tlbEntry->hostPageAddr;
427       Bit32u pageOffset = PAGE_OFFSET(laddr);
428       Bit64u *hostAddr = (Bit64u*) (hostPageAddr | pageOffset);
429       data = ReadHostQWordFromLittleEndian(hostAddr);
430       BX_NOTIFY_LIN_MEMORY_ACCESS(laddr, (tlbEntry->ppf | pageOffset), 8, tlbEntry->get_memtype(), BX_READ, (Bit8u*) &data);
431       return data;
432     }
433   }
434 
435   if (access_read_linear(laddr, 8, CPL, BX_READ, 0x7, (void *) &data) < 0)
436     exception(int_number(s), 0);
437 
438   return data;
439 }
440 
441 #if BX_CPU_LEVEL >= 6
442 
443   void BX_CPP_AttrRegparmN(3)
read_linear_xmmword(unsigned s,bx_address laddr,BxPackedXmmRegister * data)444 BX_CPU_C::read_linear_xmmword(unsigned s, bx_address laddr, BxPackedXmmRegister *data)
445 {
446   bx_TLB_entry *tlbEntry = BX_DTLB_ENTRY_OF(laddr, 15);
447   bx_address lpf = LPFOf(laddr);
448   if (tlbEntry->lpf == lpf) {
449     // See if the TLB entry privilege level allows us read access from this CPL
450     if (isReadOK(tlbEntry, USER_PL)) {
451       bx_hostpageaddr_t hostPageAddr = tlbEntry->hostPageAddr;
452       Bit32u pageOffset = PAGE_OFFSET(laddr);
453       Bit64u *hostAddr = (Bit64u*) (hostPageAddr | pageOffset);
454       data->xmm64u(0) = ReadHostQWordFromLittleEndian(hostAddr);
455       data->xmm64u(1) = ReadHostQWordFromLittleEndian(hostAddr+1);
456       BX_NOTIFY_LIN_MEMORY_ACCESS(laddr, (tlbEntry->ppf | pageOffset), 16, tlbEntry->get_memtype(), BX_READ, (Bit8u*) data);
457       return;
458     }
459   }
460 
461   if (access_read_linear(laddr, 16, CPL, BX_READ, 0x0, (void *) data) < 0)
462     exception(int_number(s), 0);
463 }
464 
465   void BX_CPP_AttrRegparmN(3)
read_linear_xmmword_aligned(unsigned s,bx_address laddr,BxPackedXmmRegister * data)466 BX_CPU_C::read_linear_xmmword_aligned(unsigned s, bx_address laddr, BxPackedXmmRegister *data)
467 {
468   bx_address lpf = AlignedAccessLPFOf(laddr, 15);
469   bx_TLB_entry *tlbEntry = BX_DTLB_ENTRY_OF(laddr, 0);
470   if (tlbEntry->lpf == lpf) {
471     // See if the TLB entry privilege level allows us read access from this CPL
472     if (isReadOK(tlbEntry, USER_PL)) {
473       bx_hostpageaddr_t hostPageAddr = tlbEntry->hostPageAddr;
474       Bit32u pageOffset = PAGE_OFFSET(laddr);
475       Bit64u *hostAddr = (Bit64u*) (hostPageAddr | pageOffset);
476       data->xmm64u(0) = ReadHostQWordFromLittleEndian(hostAddr);
477       data->xmm64u(1) = ReadHostQWordFromLittleEndian(hostAddr+1);
478       BX_NOTIFY_LIN_MEMORY_ACCESS(laddr, (tlbEntry->ppf | pageOffset), 16, tlbEntry->get_memtype(), BX_READ, (Bit8u*) data);
479       return;
480     }
481   }
482 
483   if (laddr & 15) {
484     BX_ERROR(("read_linear_xmmword_aligned(): #GP misaligned access"));
485     exception(BX_GP_EXCEPTION, 0);
486   }
487 
488   if (access_read_linear(laddr, 16, CPL, BX_READ, 0x0, (void *) data) < 0)
489     exception(int_number(s), 0);
490 }
491 
492   void BX_CPP_AttrRegparmN(3)
read_linear_ymmword(unsigned s,bx_address laddr,BxPackedYmmRegister * data)493 BX_CPU_C::read_linear_ymmword(unsigned s, bx_address laddr, BxPackedYmmRegister *data)
494 {
495   bx_TLB_entry *tlbEntry = BX_DTLB_ENTRY_OF(laddr, 31);
496   bx_address lpf = LPFOf(laddr);
497   if (tlbEntry->lpf == lpf) {
498     // See if the TLB entry privilege level allows us read access from this CPL
499     if (isReadOK(tlbEntry, USER_PL)) {
500       bx_hostpageaddr_t hostPageAddr = tlbEntry->hostPageAddr;
501       Bit32u pageOffset = PAGE_OFFSET(laddr);
502       Bit64u *hostAddr = (Bit64u*) (hostPageAddr | pageOffset);
503       for (unsigned n=0; n < 4; n++) {
504         data->ymm64u(n) = ReadHostQWordFromLittleEndian(hostAddr+n);
505       }
506       BX_NOTIFY_LIN_MEMORY_ACCESS(laddr, (tlbEntry->ppf | pageOffset), 32, tlbEntry->get_memtype(), BX_READ, (Bit8u*) data);
507       return;
508     }
509   }
510 
511   if (access_read_linear(laddr, 32, CPL, BX_READ, 0x0, (void *) data) < 0)
512     exception(int_number(s), 0);
513 }
514 
515   void BX_CPP_AttrRegparmN(3)
read_linear_ymmword_aligned(unsigned s,bx_address laddr,BxPackedYmmRegister * data)516 BX_CPU_C::read_linear_ymmword_aligned(unsigned s, bx_address laddr, BxPackedYmmRegister *data)
517 {
518   bx_address lpf = AlignedAccessLPFOf(laddr, 31);
519   bx_TLB_entry *tlbEntry = BX_DTLB_ENTRY_OF(laddr, 0);
520   if (tlbEntry->lpf == lpf) {
521     // See if the TLB entry privilege level allows us read access from this CPL
522     if (isReadOK(tlbEntry, USER_PL)) {
523       bx_hostpageaddr_t hostPageAddr = tlbEntry->hostPageAddr;
524       Bit32u pageOffset = PAGE_OFFSET(laddr);
525       Bit64u *hostAddr = (Bit64u*) (hostPageAddr | pageOffset);
526       for (unsigned n=0; n < 4; n++) {
527         data->ymm64u(n) = ReadHostQWordFromLittleEndian(hostAddr+n);
528       }
529       BX_NOTIFY_LIN_MEMORY_ACCESS(laddr, (tlbEntry->ppf | pageOffset), 32, tlbEntry->get_memtype(), BX_READ, (Bit8u*) data);
530       return;
531     }
532   }
533 
534   if (laddr & 31) {
535     BX_ERROR(("read_linear_ymmword_aligned(): #GP misaligned access"));
536     exception(BX_GP_EXCEPTION, 0);
537   }
538 
539   if (access_read_linear(laddr, 32, CPL, BX_READ, 0x0, (void *) data) < 0)
540     exception(int_number(s), 0);
541 }
542 
543   void BX_CPP_AttrRegparmN(3)
read_linear_zmmword(unsigned s,bx_address laddr,BxPackedZmmRegister * data)544 BX_CPU_C::read_linear_zmmword(unsigned s, bx_address laddr, BxPackedZmmRegister *data)
545 {
546   bx_TLB_entry *tlbEntry = BX_DTLB_ENTRY_OF(laddr, 63);
547   bx_address lpf = LPFOf(laddr);
548   if (tlbEntry->lpf == lpf) {
549     // See if the TLB entry privilege level allows us read access from this CPL
550     if (isReadOK(tlbEntry, USER_PL)) {
551       bx_hostpageaddr_t hostPageAddr = tlbEntry->hostPageAddr;
552       Bit32u pageOffset = PAGE_OFFSET(laddr);
553       Bit64u *hostAddr = (Bit64u*) (hostPageAddr | pageOffset);
554       for (unsigned n=0; n < 8; n++) {
555         data->zmm64u(n) = ReadHostQWordFromLittleEndian(hostAddr+n);
556       }
557       BX_NOTIFY_LIN_MEMORY_ACCESS(laddr, (tlbEntry->ppf | pageOffset), 64, tlbEntry->get_memtype(), BX_READ, (Bit8u*) data);
558       return;
559     }
560   }
561 
562   if (access_read_linear(laddr, 64, CPL, BX_READ, 0x0, (void *) data) < 0)
563     exception(int_number(s), 0);
564 }
565 
566   void BX_CPP_AttrRegparmN(3)
read_linear_zmmword_aligned(unsigned s,bx_address laddr,BxPackedZmmRegister * data)567 BX_CPU_C::read_linear_zmmword_aligned(unsigned s, bx_address laddr, BxPackedZmmRegister *data)
568 {
569   bx_address lpf = AlignedAccessLPFOf(laddr, 63);
570   bx_TLB_entry *tlbEntry = BX_DTLB_ENTRY_OF(laddr, 0);
571   if (tlbEntry->lpf == lpf) {
572     // See if the TLB entry privilege level allows us read access from this CPL
573     if (isReadOK(tlbEntry, USER_PL)) {
574       bx_hostpageaddr_t hostPageAddr = tlbEntry->hostPageAddr;
575       Bit32u pageOffset = PAGE_OFFSET(laddr);
576       Bit64u *hostAddr = (Bit64u*) (hostPageAddr | pageOffset);
577       for (unsigned n=0; n < 8; n++) {
578         data->zmm64u(n) = ReadHostQWordFromLittleEndian(hostAddr+n);
579       }
580       BX_NOTIFY_LIN_MEMORY_ACCESS(laddr, (tlbEntry->ppf | pageOffset), 64, tlbEntry->get_memtype(), BX_READ, (Bit8u*) data);
581       return;
582     }
583   }
584 
585   if (laddr & 63) {
586     BX_ERROR(("read_linear_zmmword_aligned(): #GP misaligned access"));
587     exception(BX_GP_EXCEPTION, 0);
588   }
589 
590   if (access_read_linear(laddr, 64, CPL, BX_READ, 0x0, (void *) data) < 0)
591     exception(int_number(s), 0);
592 }
593 
594 #endif
595 
596 //////////////////////////////////////////////////////////////
597 // special Read-Modify-Write operations                     //
598 // address translation info is kept across read/write calls //
599 //////////////////////////////////////////////////////////////
600 
601   Bit8u BX_CPP_AttrRegparmN(2)
read_RMW_linear_byte(unsigned s,bx_address laddr)602 BX_CPU_C::read_RMW_linear_byte(unsigned s, bx_address laddr)
603 {
604   Bit8u data;
605   bx_address lpf = LPFOf(laddr);
606   bx_TLB_entry *tlbEntry = BX_DTLB_ENTRY_OF(laddr, 0);
607   if (tlbEntry->lpf == lpf) {
608     // See if the TLB entry privilege level allows us write access from this CPL
609     if (isWriteOK(tlbEntry, USER_PL)) {
610       bx_hostpageaddr_t hostPageAddr = tlbEntry->hostPageAddr;
611       Bit32u pageOffset = PAGE_OFFSET(laddr);
612       bx_phy_address pAddr = tlbEntry->ppf | pageOffset;
613       Bit8u *hostAddr = (Bit8u*) (hostPageAddr | pageOffset);
614       pageWriteStampTable.decWriteStamp(pAddr, 1);
615       data = *hostAddr;
616       BX_CPU_THIS_PTR address_xlation.pages = (bx_ptr_equiv_t) hostAddr;
617       BX_CPU_THIS_PTR address_xlation.paddress1 = pAddr;
618 #if BX_SUPPORT_MEMTYPE
619       BX_CPU_THIS_PTR address_xlation.memtype1 = tlbEntry->get_memtype();
620 #endif
621       BX_NOTIFY_LIN_MEMORY_ACCESS(laddr, pAddr, 1, tlbEntry->get_memtype(), BX_RW, (Bit8u*) &data);
622       return data;
623     }
624   }
625 
626   if (access_read_linear(laddr, 1, CPL, BX_RW, 0x0, (void *) &data) < 0)
627     exception(int_number(s), 0);
628 
629   return data;
630 }
631 
632   Bit16u BX_CPP_AttrRegparmN(2)
read_RMW_linear_word(unsigned s,bx_address laddr)633 BX_CPU_C::read_RMW_linear_word(unsigned s, bx_address laddr)
634 {
635   Bit16u data;
636   bx_TLB_entry *tlbEntry = BX_DTLB_ENTRY_OF(laddr, 1);
637 #if BX_SUPPORT_ALIGNMENT_CHECK && BX_CPU_LEVEL >= 4
638   bx_address lpf = AlignedAccessLPFOf(laddr, (1 & BX_CPU_THIS_PTR alignment_check_mask));
639 #else
640   bx_address lpf = LPFOf(laddr);
641 #endif
642   if (tlbEntry->lpf == lpf) {
643     // See if the TLB entry privilege level allows us write access from this CPL
644     if (isWriteOK(tlbEntry, USER_PL)) {
645       bx_hostpageaddr_t hostPageAddr = tlbEntry->hostPageAddr;
646       Bit32u pageOffset = PAGE_OFFSET(laddr);
647       bx_phy_address pAddr = tlbEntry->ppf | pageOffset;
648       Bit16u *hostAddr = (Bit16u*) (hostPageAddr | pageOffset);
649       pageWriteStampTable.decWriteStamp(pAddr, 2);
650       data = ReadHostWordFromLittleEndian(hostAddr);
651       BX_CPU_THIS_PTR address_xlation.pages = (bx_ptr_equiv_t) hostAddr;
652       BX_CPU_THIS_PTR address_xlation.paddress1 = pAddr;
653 #if BX_SUPPORT_MEMTYPE
654       BX_CPU_THIS_PTR address_xlation.memtype1 = tlbEntry->get_memtype();
655 #endif
656       BX_NOTIFY_LIN_MEMORY_ACCESS(laddr, pAddr, 2, tlbEntry->get_memtype(), BX_RW, (Bit8u*) &data);
657       return data;
658     }
659   }
660 
661   if (access_read_linear(laddr, 2, CPL, BX_RW, 0x1, (void *) &data) < 0)
662     exception(int_number(s), 0);
663 
664   return data;
665 }
666 
667   Bit32u BX_CPP_AttrRegparmN(2)
read_RMW_linear_dword(unsigned s,bx_address laddr)668 BX_CPU_C::read_RMW_linear_dword(unsigned s, bx_address laddr)
669 {
670   Bit32u data;
671   bx_TLB_entry *tlbEntry = BX_DTLB_ENTRY_OF(laddr, 3);
672 #if BX_SUPPORT_ALIGNMENT_CHECK && BX_CPU_LEVEL >= 4
673   bx_address lpf = AlignedAccessLPFOf(laddr, (3 & BX_CPU_THIS_PTR alignment_check_mask));
674 #else
675   bx_address lpf = LPFOf(laddr);
676 #endif
677   if (tlbEntry->lpf == lpf) {
678     // See if the TLB entry privilege level allows us write access from this CPL
679     if (isWriteOK(tlbEntry, USER_PL)) {
680       bx_hostpageaddr_t hostPageAddr = tlbEntry->hostPageAddr;
681       Bit32u pageOffset = PAGE_OFFSET(laddr);
682       bx_phy_address pAddr = tlbEntry->ppf | pageOffset;
683       Bit32u *hostAddr = (Bit32u*) (hostPageAddr | pageOffset);
684       pageWriteStampTable.decWriteStamp(pAddr, 4);
685       data = ReadHostDWordFromLittleEndian(hostAddr);
686       BX_CPU_THIS_PTR address_xlation.pages = (bx_ptr_equiv_t) hostAddr;
687       BX_CPU_THIS_PTR address_xlation.paddress1 = pAddr;
688 #if BX_SUPPORT_MEMTYPE
689       BX_CPU_THIS_PTR address_xlation.memtype1 = tlbEntry->get_memtype();
690 #endif
691       BX_NOTIFY_LIN_MEMORY_ACCESS(laddr, pAddr, 4, tlbEntry->get_memtype(), BX_RW, (Bit8u*) &data);
692       return data;
693     }
694   }
695 
696   if (access_read_linear(laddr, 4, CPL, BX_RW, 0x3, (void *) &data) < 0)
697     exception(int_number(s), 0);
698 
699   return data;
700 }
701 
702   Bit64u BX_CPP_AttrRegparmN(2)
read_RMW_linear_qword(unsigned s,bx_address laddr)703 BX_CPU_C::read_RMW_linear_qword(unsigned s, bx_address laddr)
704 {
705   Bit64u data;
706   bx_TLB_entry *tlbEntry = BX_DTLB_ENTRY_OF(laddr, 7);
707 #if BX_SUPPORT_ALIGNMENT_CHECK && BX_CPU_LEVEL >= 4
708   bx_address lpf = AlignedAccessLPFOf(laddr, (7 & BX_CPU_THIS_PTR alignment_check_mask));
709 #else
710   bx_address lpf = LPFOf(laddr);
711 #endif
712   if (tlbEntry->lpf == lpf) {
713     // See if the TLB entry privilege level allows us write access from this CPL
714     if (isWriteOK(tlbEntry, USER_PL)) {
715       bx_hostpageaddr_t hostPageAddr = tlbEntry->hostPageAddr;
716       Bit32u pageOffset = PAGE_OFFSET(laddr);
717       bx_phy_address pAddr = tlbEntry->ppf | pageOffset;
718       Bit64u *hostAddr = (Bit64u*) (hostPageAddr | pageOffset);
719       pageWriteStampTable.decWriteStamp(pAddr, 8);
720       data = ReadHostQWordFromLittleEndian(hostAddr);
721       BX_CPU_THIS_PTR address_xlation.pages = (bx_ptr_equiv_t) hostAddr;
722       BX_CPU_THIS_PTR address_xlation.paddress1 = pAddr;
723 #if BX_SUPPORT_MEMTYPE
724       BX_CPU_THIS_PTR address_xlation.memtype1 = tlbEntry->get_memtype();
725 #endif
726       BX_NOTIFY_LIN_MEMORY_ACCESS(laddr, pAddr, 8, tlbEntry->get_memtype(), BX_RW, (Bit8u*) &data);
727       return data;
728     }
729   }
730 
731   if (access_read_linear(laddr, 8, CPL, BX_RW, 0x7, (void *) &data) < 0)
732     exception(int_number(s), 0);
733 
734   return data;
735 }
736 
737   void BX_CPP_AttrRegparmN(1)
write_RMW_linear_byte(Bit8u val8)738 BX_CPU_C::write_RMW_linear_byte(Bit8u val8)
739 {
740   BX_DBG_PHY_MEMORY_ACCESS(BX_CPU_ID,
741     BX_CPU_THIS_PTR address_xlation.paddress1, 1, MEMTYPE(BX_CPU_THIS_PTR address_xlation.memtype1), BX_WRITE, 0, (Bit8u*) &val8);
742 
743   if (BX_CPU_THIS_PTR address_xlation.pages > 2) {
744     // Pages > 2 means it stores a host address for direct access.
745     Bit8u *hostAddr = (Bit8u *) BX_CPU_THIS_PTR address_xlation.pages;
746     *hostAddr = val8;
747   }
748   else {
749     // address_xlation.pages must be 1
750     access_write_physical(BX_CPU_THIS_PTR address_xlation.paddress1, 1, &val8);
751   }
752 }
753 
754   void BX_CPP_AttrRegparmN(1)
write_RMW_linear_word(Bit16u val16)755 BX_CPU_C::write_RMW_linear_word(Bit16u val16)
756 {
757   if (BX_CPU_THIS_PTR address_xlation.pages > 2) {
758     // Pages > 2 means it stores a host address for direct access.
759     Bit16u *hostAddr = (Bit16u *) BX_CPU_THIS_PTR address_xlation.pages;
760     WriteHostWordToLittleEndian(hostAddr, val16);
761     BX_DBG_PHY_MEMORY_ACCESS(BX_CPU_ID,
762         BX_CPU_THIS_PTR address_xlation.paddress1, 2, MEMTYPE(BX_CPU_THIS_PTR address_xlation.memtype1),
763         BX_WRITE, 0, (Bit8u*) &val16);
764   }
765   else if (BX_CPU_THIS_PTR address_xlation.pages == 1) {
766     access_write_physical(BX_CPU_THIS_PTR address_xlation.paddress1, 2, &val16);
767     BX_DBG_PHY_MEMORY_ACCESS(BX_CPU_ID,
768         BX_CPU_THIS_PTR address_xlation.paddress1, 2, MEMTYPE(BX_CPU_THIS_PTR address_xlation.memtype1),
769         BX_WRITE, 0, (Bit8u*) &val16);
770   }
771   else {
772 #ifdef BX_LITTLE_ENDIAN
773     access_write_physical(BX_CPU_THIS_PTR address_xlation.paddress1, 1, &val16);
774     BX_DBG_PHY_MEMORY_ACCESS(BX_CPU_ID,
775         BX_CPU_THIS_PTR address_xlation.paddress1, 1, MEMTYPE(BX_CPU_THIS_PTR address_xlation.memtype1),
776         BX_WRITE, 0,  (Bit8u*) &val16);
777     access_write_physical(BX_CPU_THIS_PTR address_xlation.paddress2, 1, ((Bit8u *) &val16) + 1);
778     BX_DBG_PHY_MEMORY_ACCESS(BX_CPU_ID,
779         BX_CPU_THIS_PTR address_xlation.paddress2, 1, MEMTYPE(BX_CPU_THIS_PTR address_xlation.memtype2),
780          BX_WRITE, 0, ((Bit8u*) &val16)+1);
781 #else
782     access_write_physical(BX_CPU_THIS_PTR address_xlation.paddress1, 1, ((Bit8u *) &val16) + 1);
783     BX_DBG_PHY_MEMORY_ACCESS(BX_CPU_ID,
784         BX_CPU_THIS_PTR address_xlation.paddress1, 1, MEMTYPE(BX_CPU_THIS_PTR address_xlation.memtype1),
785         BX_WRITE, 0, ((Bit8u*) &val16)+1);
786     access_write_physical(BX_CPU_THIS_PTR address_xlation.paddress2, 1, &val16);
787     BX_DBG_PHY_MEMORY_ACCESS(BX_CPU_ID,
788         BX_CPU_THIS_PTR address_xlation.paddress2, 1, MEMTYPE(BX_CPU_THIS_PTR address_xlation.memtype2),
789         BX_WRITE, 0,  (Bit8u*) &val16);
790 #endif
791   }
792 }
793 
794   void BX_CPP_AttrRegparmN(1)
write_RMW_linear_dword(Bit32u val32)795 BX_CPU_C::write_RMW_linear_dword(Bit32u val32)
796 {
797   if (BX_CPU_THIS_PTR address_xlation.pages > 2) {
798     // Pages > 2 means it stores a host address for direct access.
799     Bit32u *hostAddr = (Bit32u *) BX_CPU_THIS_PTR address_xlation.pages;
800     WriteHostDWordToLittleEndian(hostAddr, val32);
801     BX_DBG_PHY_MEMORY_ACCESS(BX_CPU_ID,
802         BX_CPU_THIS_PTR address_xlation.paddress1, 4, MEMTYPE(BX_CPU_THIS_PTR address_xlation.memtype1),
803         BX_WRITE, 0, (Bit8u*) &val32);
804   }
805   else if (BX_CPU_THIS_PTR address_xlation.pages == 1) {
806     access_write_physical(BX_CPU_THIS_PTR address_xlation.paddress1, 4, &val32);
807     BX_DBG_PHY_MEMORY_ACCESS(BX_CPU_ID,
808         BX_CPU_THIS_PTR address_xlation.paddress1, 4, MEMTYPE(BX_CPU_THIS_PTR address_xlation.memtype1),
809         BX_WRITE, 0, (Bit8u*) &val32);
810   }
811   else {
812 #ifdef BX_LITTLE_ENDIAN
813     access_write_physical(BX_CPU_THIS_PTR address_xlation.paddress1,
814         BX_CPU_THIS_PTR address_xlation.len1, &val32);
815     BX_DBG_PHY_MEMORY_ACCESS(BX_CPU_ID,
816         BX_CPU_THIS_PTR address_xlation.paddress1,
817         BX_CPU_THIS_PTR address_xlation.len1, MEMTYPE(BX_CPU_THIS_PTR address_xlation.memtype1),
818         BX_WRITE, 0, (Bit8u*) &val32);
819     access_write_physical(BX_CPU_THIS_PTR address_xlation.paddress2,
820         BX_CPU_THIS_PTR address_xlation.len2,
821         ((Bit8u *) &val32) + BX_CPU_THIS_PTR address_xlation.len1);
822     BX_DBG_PHY_MEMORY_ACCESS(BX_CPU_ID,
823         BX_CPU_THIS_PTR address_xlation.paddress2,
824         BX_CPU_THIS_PTR address_xlation.len2, MEMTYPE(BX_CPU_THIS_PTR address_xlation.memtype2),
825         BX_WRITE, 0, ((Bit8u *) &val32) + BX_CPU_THIS_PTR address_xlation.len1);
826 #else
827     access_write_physical(BX_CPU_THIS_PTR address_xlation.paddress1,
828         BX_CPU_THIS_PTR address_xlation.len1,
829         ((Bit8u *) &val32) + (4 - BX_CPU_THIS_PTR address_xlation.len1));
830     BX_DBG_PHY_MEMORY_ACCESS(BX_CPU_ID,
831         BX_CPU_THIS_PTR address_xlation.paddress1,
832         BX_CPU_THIS_PTR address_xlation.len1, MEMTYPE(BX_CPU_THIS_PTR address_xlation.memtype1),
833         BX_WRITE, 0, ((Bit8u *) &val32) + (4 - BX_CPU_THIS_PTR address_xlation.len1));
834     access_write_physical(BX_CPU_THIS_PTR address_xlation.paddress2,
835         BX_CPU_THIS_PTR address_xlation.len2, &val32);
836     BX_DBG_PHY_MEMORY_ACCESS(BX_CPU_ID,
837         BX_CPU_THIS_PTR address_xlation.paddress2,
838         BX_CPU_THIS_PTR address_xlation.len2, MEMTYPE(BX_CPU_THIS_PTR address_xlation.memtype2),
839         BX_WRITE, 0, (Bit8u*) &val32);
840 #endif
841   }
842 }
843 
844   void BX_CPP_AttrRegparmN(1)
write_RMW_linear_qword(Bit64u val64)845 BX_CPU_C::write_RMW_linear_qword(Bit64u val64)
846 {
847   if (BX_CPU_THIS_PTR address_xlation.pages > 2) {
848     // Pages > 2 means it stores a host address for direct access.
849     Bit64u *hostAddr = (Bit64u *) BX_CPU_THIS_PTR address_xlation.pages;
850     WriteHostQWordToLittleEndian(hostAddr, val64);
851     BX_DBG_PHY_MEMORY_ACCESS(BX_CPU_ID,
852         BX_CPU_THIS_PTR address_xlation.paddress1, 8, MEMTYPE(BX_CPU_THIS_PTR address_xlation.memtype1),
853         BX_WRITE, 0, (Bit8u*) &val64);
854   }
855   else if (BX_CPU_THIS_PTR address_xlation.pages == 1) {
856     access_write_physical(BX_CPU_THIS_PTR address_xlation.paddress1, 8, &val64);
857     BX_DBG_PHY_MEMORY_ACCESS(BX_CPU_ID,
858         BX_CPU_THIS_PTR address_xlation.paddress1, 8, MEMTYPE(BX_CPU_THIS_PTR address_xlation.memtype1),
859         BX_WRITE, 0, (Bit8u*) &val64);
860   }
861   else {
862 #ifdef BX_LITTLE_ENDIAN
863     access_write_physical(BX_CPU_THIS_PTR address_xlation.paddress1,
864         BX_CPU_THIS_PTR address_xlation.len1, &val64);
865     BX_DBG_PHY_MEMORY_ACCESS(BX_CPU_ID,
866         BX_CPU_THIS_PTR address_xlation.paddress1,
867         BX_CPU_THIS_PTR address_xlation.len1, MEMTYPE(BX_CPU_THIS_PTR address_xlation.memtype1),
868         BX_WRITE, 0, (Bit8u*) &val64);
869     access_write_physical(BX_CPU_THIS_PTR address_xlation.paddress2,
870         BX_CPU_THIS_PTR address_xlation.len2,
871         ((Bit8u *) &val64) + BX_CPU_THIS_PTR address_xlation.len1);
872     BX_DBG_PHY_MEMORY_ACCESS(BX_CPU_ID,
873         BX_CPU_THIS_PTR address_xlation.paddress2,
874         BX_CPU_THIS_PTR address_xlation.len2, MEMTYPE(BX_CPU_THIS_PTR address_xlation.memtype2),
875         BX_WRITE, 0, ((Bit8u *) &val64) + BX_CPU_THIS_PTR address_xlation.len1);
876 #else
877     access_write_physical(BX_CPU_THIS_PTR address_xlation.paddress1,
878         BX_CPU_THIS_PTR address_xlation.len1,
879         ((Bit8u *) &val64) + (8 - BX_CPU_THIS_PTR address_xlation.len1));
880     BX_DBG_PHY_MEMORY_ACCESS(BX_CPU_ID,
881         BX_CPU_THIS_PTR address_xlation.paddress1,
882         BX_CPU_THIS_PTR address_xlation.len1, MEMTYPE(BX_CPU_THIS_PTR address_xlation.memtype1),
883         BX_WRITE, 0, ((Bit8u *) &val64) + (8 - BX_CPU_THIS_PTR address_xlation.len1));
884     access_write_physical(BX_CPU_THIS_PTR address_xlation.paddress2,
885         BX_CPU_THIS_PTR address_xlation.len2, &val64);
886     BX_DBG_PHY_MEMORY_ACCESS(BX_CPU_ID,
887         BX_CPU_THIS_PTR address_xlation.paddress2,
888         BX_CPU_THIS_PTR address_xlation.len2, MEMTYPE(BX_CPU_THIS_PTR address_xlation.memtype2),
889         BX_WRITE, 0, (Bit8u*) &val64);
890 #endif
891   }
892 }
893 
894 #if BX_SUPPORT_X86_64
895 
read_RMW_linear_dqword_aligned_64(unsigned s,bx_address laddr,Bit64u * hi,Bit64u * lo)896 void BX_CPU_C::read_RMW_linear_dqword_aligned_64(unsigned s, bx_address laddr, Bit64u *hi, Bit64u *lo)
897 {
898   bx_address lpf = AlignedAccessLPFOf(laddr, 15);
899   bx_TLB_entry *tlbEntry = BX_DTLB_ENTRY_OF(laddr, 0);
900   if (tlbEntry->lpf == lpf) {
901     // See if the TLB entry privilege level allows us write access from this CPL
902     if (isWriteOK(tlbEntry, USER_PL)) {
903       bx_hostpageaddr_t hostPageAddr = tlbEntry->hostPageAddr;
904       Bit32u pageOffset = PAGE_OFFSET(laddr);
905       bx_phy_address pAddr = tlbEntry->ppf | pageOffset;
906       Bit64u *hostAddr = (Bit64u*) (hostPageAddr | pageOffset);
907       pageWriteStampTable.decWriteStamp(pAddr, 16);
908       *lo = ReadHostQWordFromLittleEndian(hostAddr);
909       *hi = ReadHostQWordFromLittleEndian(hostAddr + 1);
910       BX_CPU_THIS_PTR address_xlation.pages = (bx_ptr_equiv_t) hostAddr;
911       BX_CPU_THIS_PTR address_xlation.paddress1 = pAddr;
912 #if BX_SUPPORT_MEMTYPE
913       BX_CPU_THIS_PTR address_xlation.memtype1 = tlbEntry->get_memtype();
914 #endif
915       BX_NOTIFY_LIN_MEMORY_ACCESS(laddr,     pAddr,     8, tlbEntry->get_memtype(), BX_RW, (Bit8u*) lo);
916       BX_NOTIFY_LIN_MEMORY_ACCESS(laddr + 8, pAddr + 8, 8, tlbEntry->get_memtype(), BX_RW, (Bit8u*) hi);
917       return;
918     }
919   }
920 
921   if (laddr & 15) {
922     BX_ERROR(("read_RMW_virtual_dqword_aligned_64(): #GP misaligned access"));
923     exception(BX_GP_EXCEPTION, 0);
924   }
925 
926   BxPackedXmmRegister data;
927   if (access_read_linear(laddr, 16, CPL, BX_RW, 0x0, (void *) &data) < 0)
928     exception(int_number(s), 0);
929 
930   *lo = data.xmm64u(0);
931   *hi = data.xmm64u(1);
932 }
933 
write_RMW_linear_dqword(Bit64u hi,Bit64u lo)934 void BX_CPU_C::write_RMW_linear_dqword(Bit64u hi, Bit64u lo)
935 {
936   write_RMW_linear_qword(lo);
937 
938   BX_CPU_THIS_PTR address_xlation.paddress1 += 8;
939   if (BX_CPU_THIS_PTR address_xlation.pages > 2) {
940     // Pages > 2 means it stores a host address for direct access
941     BX_CPU_THIS_PTR address_xlation.pages += 8;
942   }
943   else {
944     BX_ASSERT(BX_CPU_THIS_PTR address_xlation.pages == 1);
945   }
946 
947   write_RMW_linear_qword(hi);
948 }
949 
950 #endif
951 
952 //
953 // Write data to new stack, these methods are required for emulation
954 // correctness but not performance critical.
955 //
956 
write_new_stack_word(bx_address laddr,unsigned curr_pl,Bit16u data)957 void BX_CPU_C::write_new_stack_word(bx_address laddr, unsigned curr_pl, Bit16u data)
958 {
959   bool user = (curr_pl == 3);
960   bx_TLB_entry *tlbEntry = BX_DTLB_ENTRY_OF(laddr, 1);
961 #if BX_SUPPORT_ALIGNMENT_CHECK && BX_CPU_LEVEL >= 4
962   bx_address lpf = AlignedAccessLPFOf(laddr, (1 & BX_CPU_THIS_PTR alignment_check_mask));
963 #else
964   bx_address lpf = LPFOf(laddr);
965 #endif
966   if (tlbEntry->lpf == lpf) {
967     // See if the TLB entry privilege level allows us write access from this CPL
968     if (isWriteOK(tlbEntry, user)) {
969       bx_hostpageaddr_t hostPageAddr = tlbEntry->hostPageAddr;
970       Bit32u pageOffset = PAGE_OFFSET(laddr);
971       bx_phy_address pAddr = tlbEntry->ppf | pageOffset;
972       BX_NOTIFY_LIN_MEMORY_ACCESS(laddr, pAddr, 2, tlbEntry->get_memtype(), BX_WRITE, (Bit8u*) &data);
973       Bit16u *hostAddr = (Bit16u*) (hostPageAddr | pageOffset);
974       pageWriteStampTable.decWriteStamp(pAddr, 2);
975       WriteHostWordToLittleEndian(hostAddr, data);
976       return;
977     }
978   }
979 
980   if (access_write_linear(laddr, 2, curr_pl, BX_WRITE, 0x1, (void *) &data) < 0)
981     exception(BX_SS_EXCEPTION, 0);
982 }
983 
write_new_stack_dword(bx_address laddr,unsigned curr_pl,Bit32u data)984 void BX_CPU_C::write_new_stack_dword(bx_address laddr, unsigned curr_pl, Bit32u data)
985 {
986   bool user = (curr_pl == 3);
987   bx_TLB_entry *tlbEntry = BX_DTLB_ENTRY_OF(laddr, 3);
988 #if BX_SUPPORT_ALIGNMENT_CHECK && BX_CPU_LEVEL >= 4
989   bx_address lpf = AlignedAccessLPFOf(laddr, (3 & BX_CPU_THIS_PTR alignment_check_mask));
990 #else
991   bx_address lpf = LPFOf(laddr);
992 #endif
993   if (tlbEntry->lpf == lpf) {
994     // See if the TLB entry privilege level allows us write access from this CPL
995     if (isWriteOK(tlbEntry, user)) {
996       bx_hostpageaddr_t hostPageAddr = tlbEntry->hostPageAddr;
997       Bit32u pageOffset = PAGE_OFFSET(laddr);
998       bx_phy_address pAddr = tlbEntry->ppf | pageOffset;
999       BX_NOTIFY_LIN_MEMORY_ACCESS(laddr, pAddr, 4, tlbEntry->get_memtype(), BX_WRITE, (Bit8u*) &data);
1000       Bit32u *hostAddr = (Bit32u*) (hostPageAddr | pageOffset);
1001       pageWriteStampTable.decWriteStamp(pAddr, 4);
1002       WriteHostDWordToLittleEndian(hostAddr, data);
1003       return;
1004     }
1005   }
1006 
1007   if (access_write_linear(laddr, 4, curr_pl, BX_WRITE, 0x3, (void *) &data) < 0)
1008     exception(BX_SS_EXCEPTION, 0);
1009 }
1010 
write_new_stack_qword(bx_address laddr,unsigned curr_pl,Bit64u data)1011 void BX_CPU_C::write_new_stack_qword(bx_address laddr, unsigned curr_pl, Bit64u data)
1012 {
1013   bool user = (curr_pl == 3);
1014   bx_TLB_entry *tlbEntry = BX_DTLB_ENTRY_OF(laddr, 7);
1015 #if BX_SUPPORT_ALIGNMENT_CHECK && BX_CPU_LEVEL >= 4
1016   bx_address lpf = AlignedAccessLPFOf(laddr, (7 & BX_CPU_THIS_PTR alignment_check_mask));
1017 #else
1018   bx_address lpf = LPFOf(laddr);
1019 #endif
1020   if (tlbEntry->lpf == lpf) {
1021     // See if the TLB entry privilege level allows us write access from this CPL
1022     if (isWriteOK(tlbEntry, user)) {
1023       bx_hostpageaddr_t hostPageAddr = tlbEntry->hostPageAddr;
1024       Bit32u pageOffset = PAGE_OFFSET(laddr);
1025       bx_phy_address pAddr = tlbEntry->ppf | pageOffset;
1026       BX_NOTIFY_LIN_MEMORY_ACCESS(laddr, pAddr, 8, tlbEntry->get_memtype(), BX_WRITE, (Bit8u*) &data);
1027       Bit64u *hostAddr = (Bit64u*) (hostPageAddr | pageOffset);
1028       pageWriteStampTable.decWriteStamp(pAddr, 8);
1029       WriteHostQWordToLittleEndian(hostAddr, data);
1030       return;
1031     }
1032   }
1033 
1034   if (access_write_linear(laddr, 8, curr_pl, BX_WRITE, 0x7, (void *) &data) < 0)
1035     exception(BX_SS_EXCEPTION, 0);
1036 }
1037 
1038 // assuming the write happens in 32-bit mode
write_new_stack_word(bx_segment_reg_t * seg,Bit32u offset,unsigned curr_pl,Bit16u data)1039 void BX_CPU_C::write_new_stack_word(bx_segment_reg_t *seg, Bit32u offset, unsigned curr_pl, Bit16u data)
1040 {
1041   Bit32u laddr;
1042 
1043   if (seg->cache.valid & SegAccessWOK4G) {
1044     goto accessOK;
1045   }
1046 
1047   if (seg->cache.valid & SegAccessWOK) {
1048     if (offset < seg->cache.u.segment.limit_scaled) {
1049 accessOK:
1050       laddr = (Bit32u)(seg->cache.u.segment.base) + offset;
1051       write_new_stack_word(laddr, curr_pl, data);
1052       return;
1053     }
1054   }
1055 
1056   // add error code when segment violation occurs when pushing into new stack
1057   if (!write_virtual_checks(seg, offset, 2)) {
1058     BX_ERROR(("write_new_stack_word(): segment limit violation"));
1059     exception(BX_SS_EXCEPTION,
1060          seg->selector.rpl != CPL ? (seg->selector.value & 0xfffc) : 0);
1061   }
1062   goto accessOK;
1063 }
1064 
1065 // assuming the write happens in 32-bit mode
write_new_stack_dword(bx_segment_reg_t * seg,Bit32u offset,unsigned curr_pl,Bit32u data)1066 void BX_CPU_C::write_new_stack_dword(bx_segment_reg_t *seg, Bit32u offset, unsigned curr_pl, Bit32u data)
1067 {
1068   Bit32u laddr;
1069 
1070   if (seg->cache.valid & SegAccessWOK4G) {
1071     goto accessOK;
1072   }
1073 
1074   if (seg->cache.valid & SegAccessWOK) {
1075     if (offset < (seg->cache.u.segment.limit_scaled-2)) {
1076 accessOK:
1077       laddr = (Bit32u)(seg->cache.u.segment.base) + offset;
1078       write_new_stack_dword(laddr, curr_pl, data);
1079       return;
1080     }
1081   }
1082 
1083   // add error code when segment violation occurs when pushing into new stack
1084   if (!write_virtual_checks(seg, offset, 4)) {
1085     BX_ERROR(("write_new_stack_dword(): segment limit violation"));
1086     exception(BX_SS_EXCEPTION,
1087          seg->selector.rpl != CPL ? (seg->selector.value & 0xfffc) : 0);
1088   }
1089   goto accessOK;
1090 }
1091 
1092 // assuming the write happens in 32-bit mode
write_new_stack_qword(bx_segment_reg_t * seg,Bit32u offset,unsigned curr_pl,Bit64u data)1093 void BX_CPU_C::write_new_stack_qword(bx_segment_reg_t *seg, Bit32u offset, unsigned curr_pl, Bit64u data)
1094 {
1095   Bit32u laddr;
1096 
1097   if (seg->cache.valid & SegAccessWOK4G) {
1098     goto accessOK;
1099   }
1100 
1101   if (seg->cache.valid & SegAccessWOK) {
1102     if (offset <= (seg->cache.u.segment.limit_scaled-7)) {
1103 accessOK:
1104       laddr = (Bit32u)(seg->cache.u.segment.base) + offset;
1105       write_new_stack_qword(laddr, curr_pl, data);
1106       return;
1107     }
1108   }
1109 
1110   // add error code when segment violation occurs when pushing into new stack
1111   if (!write_virtual_checks(seg, offset, 8)) {
1112     BX_ERROR(("write_new_stack_qword(): segment limit violation"));
1113     exception(BX_SS_EXCEPTION,
1114         seg->selector.rpl != CPL ? (seg->selector.value & 0xfffc) : 0);
1115   }
1116   goto accessOK;
1117 }
1118 
1119 #if BX_SUPPORT_CET
shadow_stack_read_dword(bx_address offset,unsigned curr_pl)1120 Bit32u BX_CPP_AttrRegparmN(2) BX_CPU_C::shadow_stack_read_dword(bx_address offset, unsigned curr_pl)
1121 {
1122   Bit32u data;
1123 
1124   bool user = (curr_pl == 3);
1125   bx_TLB_entry *tlbEntry = BX_DTLB_ENTRY_OF(offset, 3);
1126   bx_address lpf = AlignedAccessLPFOf(offset, 3);
1127   if (tlbEntry->lpf == lpf) {
1128     // See if the TLB entry privilege level allows us read access from this CPL
1129     if (isShadowStackReadOK(tlbEntry, user)) {
1130       bx_hostpageaddr_t hostPageAddr = tlbEntry->hostPageAddr;
1131       Bit32u pageOffset = PAGE_OFFSET(offset);
1132       Bit32u *hostAddr = (Bit32u*) (hostPageAddr | pageOffset);
1133       data = ReadHostDWordFromLittleEndian(hostAddr);
1134       BX_NOTIFY_LIN_MEMORY_ACCESS(offset, (tlbEntry->ppf | pageOffset), 4, tlbEntry->get_memtype(), BX_SHADOW_STACK_READ, (Bit8u*) &data);
1135       return data;
1136     }
1137   }
1138 
1139   if (access_read_linear(offset, 4, curr_pl, BX_SHADOW_STACK_READ, 0, (void *) &data) < 0)
1140     exception(BX_GP_EXCEPTION, 0);
1141 
1142   return data;
1143 }
1144 
shadow_stack_read_qword(bx_address offset,unsigned curr_pl)1145 Bit64u BX_CPP_AttrRegparmN(2) BX_CPU_C::shadow_stack_read_qword(bx_address offset, unsigned curr_pl)
1146 {
1147   Bit64u data;
1148 
1149   bool user = (curr_pl == 3);
1150   bx_TLB_entry *tlbEntry = BX_DTLB_ENTRY_OF(offset, 7);
1151   bx_address lpf = AlignedAccessLPFOf(offset, 7);
1152   if (tlbEntry->lpf == lpf) {
1153     // See if the TLB entry privilege level allows us read access from this CPL
1154     if (isShadowStackReadOK(tlbEntry, user)) {
1155       bx_hostpageaddr_t hostPageAddr = tlbEntry->hostPageAddr;
1156       Bit32u pageOffset = PAGE_OFFSET(offset);
1157       Bit64u *hostAddr = (Bit64u*) (hostPageAddr | pageOffset);
1158       data = ReadHostQWordFromLittleEndian(hostAddr);
1159       BX_NOTIFY_LIN_MEMORY_ACCESS(offset, (tlbEntry->ppf | pageOffset), 8, tlbEntry->get_memtype(), BX_SHADOW_STACK_READ, (Bit8u*) &data);
1160       return data;
1161     }
1162   }
1163 
1164   if (access_read_linear(offset, 8, curr_pl, BX_SHADOW_STACK_READ, 0, (void *) &data) < 0)
1165     exception(BX_GP_EXCEPTION, 0);
1166 
1167   return data;
1168 }
1169 
shadow_stack_write_dword(bx_address offset,unsigned curr_pl,Bit32u data)1170 void BX_CPP_AttrRegparmN(3) BX_CPU_C::shadow_stack_write_dword(bx_address offset, unsigned curr_pl, Bit32u data)
1171 {
1172   bool user = (curr_pl == 3);
1173   bx_TLB_entry *tlbEntry = BX_DTLB_ENTRY_OF(offset, 3);
1174   bx_address lpf = AlignedAccessLPFOf(offset, 3);
1175   if (tlbEntry->lpf == lpf) {
1176     // See if the TLB entry privilege level allows us write access from this CPL
1177     if (isShadowStackWriteOK(tlbEntry, user)) {
1178       bx_hostpageaddr_t hostPageAddr = tlbEntry->hostPageAddr;
1179       Bit32u pageOffset = PAGE_OFFSET(offset);
1180       bx_phy_address pAddr = tlbEntry->ppf | pageOffset;
1181       BX_NOTIFY_LIN_MEMORY_ACCESS(offset, pAddr, 4, tlbEntry->get_memtype(), BX_SHADOW_STACK_WRITE, (Bit8u*) &data);
1182       Bit32u *hostAddr = (Bit32u*) (hostPageAddr | pageOffset);
1183       pageWriteStampTable.decWriteStamp(pAddr, 4);
1184       WriteHostDWordToLittleEndian(hostAddr, data);
1185       return;
1186     }
1187   }
1188 
1189   if (access_write_linear(offset, 4, curr_pl, BX_SHADOW_STACK_WRITE, 0, (void *) &data) < 0)
1190     exception(BX_GP_EXCEPTION, 0);
1191 }
1192 
shadow_stack_write_qword(bx_address offset,unsigned curr_pl,Bit64u data)1193 void BX_CPP_AttrRegparmN(3) BX_CPU_C::shadow_stack_write_qword(bx_address offset, unsigned curr_pl, Bit64u data)
1194 {
1195   bool user = (curr_pl == 3);
1196   bx_TLB_entry *tlbEntry = BX_DTLB_ENTRY_OF(offset, 7);
1197   bx_address lpf = AlignedAccessLPFOf(offset, 7);
1198   if (tlbEntry->lpf == lpf) {
1199     // See if the TLB entry privilege level allows us write access from this CPL
1200     if (isShadowStackWriteOK(tlbEntry, user)) {
1201       bx_hostpageaddr_t hostPageAddr = tlbEntry->hostPageAddr;
1202       Bit32u pageOffset = PAGE_OFFSET(offset);
1203       bx_phy_address pAddr = tlbEntry->ppf | pageOffset;
1204       BX_NOTIFY_LIN_MEMORY_ACCESS(offset, pAddr, 8, tlbEntry->get_memtype(), BX_SHADOW_STACK_WRITE, (Bit8u*) &data);
1205       Bit64u *hostAddr = (Bit64u*) (hostPageAddr | pageOffset);
1206       pageWriteStampTable.decWriteStamp(pAddr, 8);
1207       WriteHostQWordToLittleEndian(hostAddr, data);
1208       return;
1209     }
1210   }
1211 
1212   if (access_write_linear(offset, 8, curr_pl, BX_SHADOW_STACK_WRITE, 0, (void *) &data) < 0)
1213     exception(BX_GP_EXCEPTION, 0);
1214 }
1215 
shadow_stack_lock_cmpxchg8b(bx_address offset,unsigned curr_pl,Bit64u data,Bit64u expected_data)1216 bool BX_CPP_AttrRegparmN(4) BX_CPU_C::shadow_stack_lock_cmpxchg8b(bx_address offset, unsigned curr_pl, Bit64u data, Bit64u expected_data)
1217 {
1218   Bit64u val64 = shadow_stack_read_qword(offset, curr_pl);
1219   if (val64 == expected_data) {
1220     shadow_stack_write_qword(offset, curr_pl, data);
1221     return true;
1222   }
1223   else {
1224     shadow_stack_write_qword(offset, curr_pl, val64);
1225     return false;
1226   }
1227 }
1228 
shadow_stack_atomic_set_busy(bx_address offset,unsigned curr_pl)1229 bool BX_CPP_AttrRegparmN(2) BX_CPU_C::shadow_stack_atomic_set_busy(bx_address offset, unsigned curr_pl)
1230 {
1231   return shadow_stack_lock_cmpxchg8b(offset, curr_pl, offset | 0x1, offset);
1232 }
1233 
shadow_stack_atomic_clear_busy(bx_address offset,unsigned curr_pl)1234 bool BX_CPP_AttrRegparmN(2) BX_CPU_C::shadow_stack_atomic_clear_busy(bx_address offset, unsigned curr_pl)
1235 {
1236   return shadow_stack_lock_cmpxchg8b(offset, curr_pl, offset, offset | 0x1);
1237 }
1238 #endif
1239