1 /////////////////////////////////////////////////////////////////////////
2 // $Id: cet.cc 14086 2021-01-30 08:35:35Z sshwarts $
3 /////////////////////////////////////////////////////////////////////////
4 //
5 //   Copyright (c) 2019 Stanislav Shwartsman
6 //          Written by Stanislav Shwartsman [sshwarts at sourceforge net]
7 //
8 //  This library is free software; you can redistribute it and/or
9 //  modify it under the terms of the GNU Lesser General Public
10 //  License as published by the Free Software Foundation; either
11 //  version 2 of the License, or (at your option) any later version.
12 //
13 //  This library is distributed in the hope that it will be useful,
14 //  but WITHOUT ANY WARRANTY; without even the implied warranty of
15 //  MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
16 //  Lesser General Public License for more details.
17 //
18 //  You should have received a copy of the GNU Lesser General Public
19 //  License along with this library; if not, write to the Free Software
20 //  Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA B 02110-1301 USA
21 //
22 /////////////////////////////////////////////////////////////////////////
23 
24 #define NEED_CPU_REG_SHORTCUTS 1
25 #include "bochs.h"
26 #include "cpu.h"
27 #include "msr.h"
28 #define LOG_THIS BX_CPU_THIS_PTR
29 
30 #if BX_SUPPORT_CET
31 
32 const Bit64u BX_CET_SHADOW_STACK_ENABLED                   = (1 << 0);
33 const Bit64u BX_CET_SHADOW_STACK_WRITE_ENABLED             = (1 << 1);
34 const Bit64u BX_CET_ENDBRANCH_ENABLED                      = (1 << 2);
35 const Bit64u BX_CET_LEGACY_INDIRECT_BRANCH_TREATMENT       = (1 << 3);
36 const Bit64u BX_CET_ENABLE_NO_TRACK_INDIRECT_BRANCH_PREFIX = (1 << 4);
37 const Bit64u BX_CET_SUPPRESS_DIS                           = (1 << 5);
38 const Bit64u BX_CET_SUPPRESS_INDIRECT_BRANCH_TRACKING      = (1 << 10);
39 const Bit64u BX_CET_WAIT_FOR_ENBRANCH                      = (1 << 11);
40 
is_invalid_cet_control(bx_address val)41 bool is_invalid_cet_control(bx_address val)
42 {
43   if ((val & (BX_CET_SUPPRESS_INDIRECT_BRANCH_TRACKING | BX_CET_WAIT_FOR_ENBRANCH)) ==
44              (BX_CET_SUPPRESS_INDIRECT_BRANCH_TRACKING | BX_CET_WAIT_FOR_ENBRANCH)) return true;
45 
46   if (val & 0x3c0) return true; // reserved bits check
47   return false;
48 }
49 
ShadowStackEnabled(unsigned cpl)50 bool BX_CPP_AttrRegparmN(1) BX_CPU_C::ShadowStackEnabled(unsigned cpl)
51 {
52   return BX_CPU_THIS_PTR cr4.get_CET() && protected_mode() &&
53          BX_CPU_THIS_PTR msr.ia32_cet_control[cpl==3] & BX_CET_SHADOW_STACK_ENABLED;
54 }
55 
ShadowStackWriteEnabled(unsigned cpl)56 bool BX_CPP_AttrRegparmN(1) BX_CPU_C::ShadowStackWriteEnabled(unsigned cpl)
57 {
58   return BX_CPU_THIS_PTR cr4.get_CET() && protected_mode() &&
59         (BX_CPU_THIS_PTR msr.ia32_cet_control[cpl==3] & (BX_CET_SHADOW_STACK_ENABLED | BX_CET_SHADOW_STACK_WRITE_ENABLED)) == (BX_CET_SHADOW_STACK_ENABLED | BX_CET_SHADOW_STACK_WRITE_ENABLED);
60 }
61 
EndbranchEnabled(unsigned cpl)62 bool BX_CPP_AttrRegparmN(1) BX_CPU_C::EndbranchEnabled(unsigned cpl)
63 {
64   return BX_CPU_THIS_PTR cr4.get_CET() && protected_mode() &&
65          BX_CPU_THIS_PTR msr.ia32_cet_control[cpl==3] & BX_CET_ENDBRANCH_ENABLED;
66 }
67 
EndbranchEnabledAndNotSuppressed(unsigned cpl)68 bool BX_CPP_AttrRegparmN(1) BX_CPU_C::EndbranchEnabledAndNotSuppressed(unsigned cpl)
69 {
70   return BX_CPU_THIS_PTR cr4.get_CET() && protected_mode() &&
71         (BX_CPU_THIS_PTR msr.ia32_cet_control[cpl==3] & (BX_CET_ENDBRANCH_ENABLED | BX_CET_SUPPRESS_INDIRECT_BRANCH_TRACKING)) == BX_CET_ENDBRANCH_ENABLED;
72 }
73 
WaitingForEndbranch(unsigned cpl)74 bool BX_CPP_AttrRegparmN(1) BX_CPU_C::WaitingForEndbranch(unsigned cpl)
75 {
76   return BX_CPU_THIS_PTR cr4.get_CET() && protected_mode() &&
77         (BX_CPU_THIS_PTR msr.ia32_cet_control[cpl==3] & (BX_CET_ENDBRANCH_ENABLED | BX_CET_WAIT_FOR_ENBRANCH)) == (BX_CET_ENDBRANCH_ENABLED | BX_CET_WAIT_FOR_ENBRANCH);
78 }
79 
track_indirect(unsigned cpl)80 void BX_CPP_AttrRegparmN(1) BX_CPU_C::track_indirect(unsigned cpl)
81 {
82   if (EndbranchEnabled(cpl)) {
83     BX_CPU_THIS_PTR msr.ia32_cet_control[cpl==3] |= BX_CET_WAIT_FOR_ENBRANCH;
84     BX_CPU_THIS_PTR msr.ia32_cet_control[cpl==3] &= ~BX_CET_SUPPRESS_INDIRECT_BRANCH_TRACKING;
85   }
86 }
87 
track_indirect_if_not_suppressed(bxInstruction_c * i,unsigned cpl)88 void BX_CPP_AttrRegparmN(2) BX_CPU_C::track_indirect_if_not_suppressed(bxInstruction_c *i, unsigned cpl)
89 {
90   if (EndbranchEnabledAndNotSuppressed(cpl)) {
91     if (i->segOverrideCet() == BX_SEG_REG_DS && (BX_CPU_THIS_PTR msr.ia32_cet_control[cpl==3] & BX_CET_ENABLE_NO_TRACK_INDIRECT_BRANCH_PREFIX) != 0)
92       return;
93 
94     BX_CPU_THIS_PTR msr.ia32_cet_control[cpl==3] |= BX_CET_WAIT_FOR_ENBRANCH;
95   }
96 }
97 
reset_endbranch_tracker(unsigned cpl,bool suppress)98 void BX_CPP_AttrRegparmN(2) BX_CPU_C::reset_endbranch_tracker(unsigned cpl, bool suppress)
99 {
100   BX_CPU_THIS_PTR msr.ia32_cet_control[cpl==3] &= ~(BX_CET_WAIT_FOR_ENBRANCH | BX_CET_SUPPRESS_INDIRECT_BRANCH_TRACKING);
101   if (suppress && !(BX_CPU_THIS_PTR msr.ia32_cet_control[cpl==3] & BX_CET_SUPPRESS_DIS))
102     BX_CPU_THIS_PTR msr.ia32_cet_control[cpl==3] |= BX_CET_SUPPRESS_INDIRECT_BRANCH_TRACKING;
103 }
104 
LegacyEndbranchTreatment(unsigned cpl)105 bool BX_CPP_AttrRegparmN(1) BX_CPU_C::LegacyEndbranchTreatment(unsigned cpl)
106 {
107   if (BX_CPU_THIS_PTR msr.ia32_cet_control[cpl==3] & BX_CET_LEGACY_INDIRECT_BRANCH_TREATMENT)
108   {
109     bx_address lip = get_laddr(BX_SEG_REG_CS, RIP);
110     bx_address bitmap_addr = LPFOf(BX_CPU_THIS_PTR msr.ia32_cet_control[cpl==3]) + ((lip & BX_CONST64(0xFFFFFFFFFFFF)) >> 15);
111     unsigned bitmap_index = (lip>>12) & 0x7;
112     Bit8u bitmap = system_read_byte(bitmap_addr);
113     if ((bitmap & (1 << bitmap_index)) != 0) {
114       reset_endbranch_tracker(cpl, true);
115       return false;
116     }
117   }
118   return true;
119 }
120 
INCSSPD(bxInstruction_c * i)121 void BX_CPP_AttrRegparmN(1) BX_CPU_C::INCSSPD(bxInstruction_c *i)
122 {
123   if (! ShadowStackEnabled(CPL)) {
124     BX_ERROR(("%s: shadow stack not enabled", i->getIaOpcodeNameShort()));
125     exception(BX_UD_EXCEPTION, 0);
126   }
127 
128   Bit32u src = BX_READ_32BIT_REG(i->dst()) & 0xff;
129   Bit32u tmpsrc = (src == 0) ? 1 : src;
130 
131   shadow_stack_read_dword(SSP, CPL);
132   shadow_stack_read_dword(SSP + (tmpsrc-1) * 4, CPL);
133   SSP += src*4;
134 
135   BX_NEXT_INSTR(i);
136 }
137 
INCSSPQ(bxInstruction_c * i)138 void BX_CPP_AttrRegparmN(1) BX_CPU_C::INCSSPQ(bxInstruction_c *i)
139 {
140   if (! ShadowStackEnabled(CPL)) {
141     BX_ERROR(("%s: shadow stack not enabled", i->getIaOpcodeNameShort()));
142     exception(BX_UD_EXCEPTION, 0);
143   }
144 
145   Bit32u src = BX_READ_32BIT_REG(i->dst()) & 0xff;
146   Bit32u tmpsrc = (src == 0) ? 1 : src;
147 
148   shadow_stack_read_qword(SSP, CPL);
149   shadow_stack_read_qword(SSP + (tmpsrc-1) * 8, CPL);
150   SSP += src*8;
151 
152   BX_NEXT_INSTR(i);
153 }
154 
RDSSPD(bxInstruction_c * i)155 void BX_CPP_AttrRegparmN(1) BX_CPU_C::RDSSPD(bxInstruction_c *i)
156 {
157   if (ShadowStackEnabled(CPL)) {
158     BX_WRITE_32BIT_REGZ(i->dst(), BX_READ_32BIT_REG(BX_32BIT_REG_SSP));
159   }
160 
161   BX_NEXT_INSTR(i);
162 }
163 
RDSSPQ(bxInstruction_c * i)164 void BX_CPP_AttrRegparmN(1) BX_CPU_C::RDSSPQ(bxInstruction_c *i)
165 {
166   if (ShadowStackEnabled(CPL)) {
167     BX_WRITE_64BIT_REG(i->dst(), SSP);
168   }
169 
170   BX_NEXT_INSTR(i);
171 }
172 
SAVEPREVSSP(bxInstruction_c * i)173 void BX_CPP_AttrRegparmN(1) BX_CPU_C::SAVEPREVSSP(bxInstruction_c *i)
174 {
175   if (! ShadowStackEnabled(CPL)) {
176     BX_ERROR(("%s: shadow stack not enabled", i->getIaOpcodeNameShort()));
177     exception(BX_UD_EXCEPTION, 0);
178   }
179 
180   if (SSP & 7) {
181     BX_ERROR(("%s: shadow stack not aligned to 8 byte boundary", i->getIaOpcodeNameShort()));
182     exception(BX_GP_EXCEPTION, 0);
183   }
184 
185   Bit64u previous_ssp_token = shadow_stack_read_qword(SSP, CPL);
186 
187   // If the CF flag indicates there was a alignment hole on current shadow stack then pop that alignment hole
188   // Note that the alignment hole can be present only when in legacy/compatibility mode
189   if (BX_CPU_THIS_PTR get_CF()) {
190     if (long64_mode()) {
191       BX_ERROR(("%s: shadow stack alignment hole in long64 mode", i->getIaOpcodeNameShort()));
192       exception(BX_GP_EXCEPTION, 0);
193     }
194     else {
195       // pop the alignment hole
196       if (shadow_stack_pop_32() != 0) {
197         BX_ERROR(("%s: shadow stack alignment hole must be zero", i->getIaOpcodeNameShort()));
198         exception(BX_GP_EXCEPTION, 0);
199       }
200     }
201   }
202 
203   if ((previous_ssp_token & 0x02) == 0) {
204     BX_ERROR(("%s: previous SSP token reserved bits set", i->getIaOpcodeNameShort()));
205     exception(BX_GP_EXCEPTION, 0);
206   }
207 
208   if (!long64_mode() && GET32H(previous_ssp_token) != 0) {
209     BX_ERROR(("%s: previous SSP token reserved bits set", i->getIaOpcodeNameShort()));
210     exception(BX_GP_EXCEPTION, 0);
211   }
212 
213   // Save Prev SSP from previous_ssp_token to the old shadow stack at next 8 byte aligned address
214   Bit64u old_ssp = previous_ssp_token & ~BX_CONST64(0x03);
215   Bit64u tmp = old_ssp | long64_mode();
216   shadow_stack_write_dword(old_ssp - 4, CPL, 0);
217   old_ssp = old_ssp & ~BX_CONST64(0x07);
218   shadow_stack_write_qword(old_ssp - 8, CPL, tmp);
219 
220   SSP += 8;
221 
222   BX_NEXT_INSTR(i);
223 }
224 
RSTORSSP(bxInstruction_c * i)225 void BX_CPP_AttrRegparmN(1) BX_CPU_C::RSTORSSP(bxInstruction_c *i)
226 {
227   if (! ShadowStackEnabled(CPL)) {
228     BX_ERROR(("%s: shadow stack not enabled", i->getIaOpcodeNameShort()));
229     exception(BX_UD_EXCEPTION, 0);
230   }
231 
232   bx_address eaddr = BX_CPU_RESOLVE_ADDR(i);
233   bx_address laddr = agen_read_aligned(i->seg(), eaddr, 8);
234   if (laddr & 0x7) {
235     BX_ERROR(("%s: SSP_LA must be 8 bytes aligned", i->getIaOpcodeNameShort()));
236     exception(BX_GP_EXCEPTION, 0);
237   }
238 
239   Bit64u previous_ssp_token = SSP | long64_mode() | 0x02;
240 
241 // should be done atomically
242   Bit64u SSP_tmp = shadow_stack_read_qword(laddr, CPL); // should be LWSI
243   if ((SSP_tmp & 0x03) != long64_mode()) {
244     BX_ERROR(("%s: CS.L of shadow stack token doesn't match or bit1 is not 0", i->getIaOpcodeNameShort()));
245     exception(BX_CP_EXCEPTION, BX_CP_RSTORSSP);
246   }
247   if (!long64_mode() && GET32H(SSP_tmp) != 0) {
248     BX_ERROR(("%s: 64-bit SSP token not in 64-bit mode", i->getIaOpcodeNameShort()));
249     exception(BX_CP_EXCEPTION, BX_CP_RSTORSSP);
250   }
251 
252   Bit64u tmp = SSP_tmp & ~BX_CONST64(0x01);
253   tmp = (tmp-8) & ~BX_CONST64(0x07);
254   if (tmp != laddr) {
255     BX_ERROR(("%s: address in SSP token doesn't match requested top of stack", i->getIaOpcodeNameShort()));
256     exception(BX_CP_EXCEPTION, BX_CP_RSTORSSP);
257   }
258   shadow_stack_write_qword(laddr, CPL, previous_ssp_token);
259 // should be done atomically
260 
261   SSP = laddr;
262 
263   clearEFlagsOSZAPC();
264   // Set the CF if the SSP in the restore token was 4 byte aligned and not 8 byte aligned i.e. there is an alignment hole
265   if (SSP_tmp & 0x04) assert_CF();
266 
267   BX_NEXT_INSTR(i);
268 }
269 
WRSSD(bxInstruction_c * i)270 void BX_CPP_AttrRegparmN(1) BX_CPU_C::WRSSD(bxInstruction_c *i)
271 {
272   if (! ShadowStackWriteEnabled(CPL)) {
273     BX_ERROR(("%s: shadow stack not enabled", i->getIaOpcodeNameShort()));
274     exception(BX_UD_EXCEPTION, 0);
275   }
276 
277   bx_address eaddr = BX_CPU_RESOLVE_ADDR(i);
278   bx_address laddr = agen_read_aligned(i->seg(), eaddr, 4);
279   if (laddr & 0x3) {
280     BX_ERROR(("%s: must be 4 bytes aligned", i->getIaOpcodeNameShort()));
281     exception(BX_GP_EXCEPTION, 0);
282   }
283   shadow_stack_write_dword(laddr, CPL, BX_READ_32BIT_REG(i->src()));
284 
285   BX_NEXT_INSTR(i);
286 }
287 
WRSSQ(bxInstruction_c * i)288 void BX_CPP_AttrRegparmN(1) BX_CPU_C::WRSSQ(bxInstruction_c *i)
289 {
290   if (! ShadowStackWriteEnabled(CPL)) {
291     BX_ERROR(("%s: shadow stack not enabled", i->getIaOpcodeNameShort()));
292     exception(BX_UD_EXCEPTION, 0);
293   }
294 
295   bx_address eaddr = BX_CPU_RESOLVE_ADDR(i);
296   bx_address laddr = agen_read_aligned(i->seg(), eaddr, 8);
297   if (laddr & 0x7) {
298     BX_ERROR(("%s: must be 8 bytes aligned", i->getIaOpcodeNameShort()));
299     exception(BX_GP_EXCEPTION, 0);
300   }
301   shadow_stack_write_qword(laddr, CPL, BX_READ_64BIT_REG(i->src()));
302 
303   BX_NEXT_INSTR(i);
304 }
305 
WRUSSD(bxInstruction_c * i)306 void BX_CPP_AttrRegparmN(1) BX_CPU_C::WRUSSD(bxInstruction_c *i)
307 {
308   if (!BX_CPU_THIS_PTR cr4.get_CET()) {
309     BX_ERROR(("%s: shadow stack not enabled", i->getIaOpcodeNameShort()));
310     exception(BX_UD_EXCEPTION, 0);
311   }
312 
313   if (CPL > 0) {
314     BX_ERROR(("%s: CPL != 0", i->getIaOpcodeNameShort()));
315     exception(BX_GP_EXCEPTION, 0);
316   }
317 
318   bx_address eaddr = BX_CPU_RESOLVE_ADDR(i);
319   bx_address laddr = agen_read_aligned(i->seg(), eaddr, 4);
320   if (laddr & 0x3) {
321     BX_ERROR(("%s: must be 4 bytes aligned", i->getIaOpcodeNameShort()));
322     exception(BX_GP_EXCEPTION, 0);
323   }
324   shadow_stack_write_dword(laddr, 3, BX_READ_32BIT_REG(i->src()));
325 
326   BX_NEXT_INSTR(i);
327 }
328 
WRUSSQ(bxInstruction_c * i)329 void BX_CPP_AttrRegparmN(1) BX_CPU_C::WRUSSQ(bxInstruction_c *i)
330 {
331   if (!BX_CPU_THIS_PTR cr4.get_CET()) {
332     BX_ERROR(("%s: shadow stack not enabled", i->getIaOpcodeNameShort()));
333     exception(BX_UD_EXCEPTION, 0);
334   }
335 
336   if (CPL > 0) {
337     BX_ERROR(("%s: CPL != 0", i->getIaOpcodeNameShort()));
338     exception(BX_GP_EXCEPTION, 0);
339   }
340 
341   bx_address eaddr = BX_CPU_RESOLVE_ADDR(i);
342   bx_address laddr = agen_read_aligned(i->seg(), eaddr, 8);
343   if (laddr & 0x7) {
344     BX_ERROR(("%s: must be 8 bytes aligned", i->getIaOpcodeNameShort()));
345     exception(BX_GP_EXCEPTION, 0);
346   }
347   shadow_stack_write_qword(laddr, 3, BX_READ_64BIT_REG(i->src()));
348 
349   BX_NEXT_INSTR(i);
350 }
351 
SETSSBSY(bxInstruction_c * i)352 void BX_CPP_AttrRegparmN(1) BX_CPU_C::SETSSBSY(bxInstruction_c *i)
353 {
354   if (! ShadowStackEnabled(0)) {
355     BX_ERROR(("%s: shadow stack not enabled", i->getIaOpcodeNameShort()));
356     exception(BX_UD_EXCEPTION, 0);
357   }
358 
359   if (CPL > 0) {
360     BX_ERROR(("%s: CPL != 0", i->getIaOpcodeNameShort()));
361     exception(BX_GP_EXCEPTION, 0);
362   }
363 
364   Bit64u ssp_laddr = BX_CPU_THIS_PTR msr.ia32_pl_ssp[0];
365   if (ssp_laddr & 0x7) {
366     BX_ERROR(("%s: SSP_LA not aligned to 8 bytes boundary", i->getIaOpcodeNameShort()));
367     exception(BX_GP_EXCEPTION, 0);
368   }
369 
370   if (!shadow_stack_atomic_set_busy(ssp_laddr, CPL)) {
371     BX_ERROR(("%s: failed to set SSP busy bit", i->getIaOpcodeNameShort()));
372     exception(BX_CP_EXCEPTION, BX_CP_SETSSBSY);
373   }
374 
375   SSP = ssp_laddr;
376 
377   BX_NEXT_INSTR(i);
378 }
379 
CLRSSBSY(bxInstruction_c * i)380 void BX_CPP_AttrRegparmN(1) BX_CPU_C::CLRSSBSY(bxInstruction_c *i)
381 {
382   if (! ShadowStackEnabled(0)) {
383     BX_ERROR(("%s: shadow stack not enabled", i->getIaOpcodeNameShort()));
384     exception(BX_UD_EXCEPTION, 0);
385   }
386 
387   if (CPL > 0) {
388     BX_ERROR(("%s: CPL != 0", i->getIaOpcodeNameShort()));
389     exception(BX_GP_EXCEPTION, 0);
390   }
391 
392   bx_address eaddr = BX_CPU_RESOLVE_ADDR(i);
393   bx_address laddr = agen_read_aligned(i->seg(), eaddr, 8);
394   if (laddr & 0x7) {
395     BX_ERROR(("%s: SSP_LA not aligned to 8 bytes boundary", i->getIaOpcodeNameShort()));
396     exception(BX_GP_EXCEPTION, 0);
397   }
398 
399   bool invalid_token = shadow_stack_atomic_clear_busy(laddr, CPL);
400   clearEFlagsOSZAPC();
401   if (invalid_token) assert_CF();
402   SSP = 0;
403 
404   BX_NEXT_INSTR(i);
405 }
406 
ENDBRANCH32(bxInstruction_c * i)407 void BX_CPP_AttrRegparmN(1) BX_CPU_C::ENDBRANCH32(bxInstruction_c *i)
408 {
409   if (! long64_mode()) {
410     reset_endbranch_tracker(CPL);
411     BX_NEXT_INSTR(i);
412   }
413 
414   BX_NEXT_TRACE(i);
415 }
416 
ENDBRANCH64(bxInstruction_c * i)417 void BX_CPP_AttrRegparmN(1) BX_CPU_C::ENDBRANCH64(bxInstruction_c *i)
418 {
419   if (long64_mode()) {
420     reset_endbranch_tracker(CPL);
421     BX_NEXT_INSTR(i);
422   }
423 
424   BX_NEXT_TRACE(i);
425 }
426 
427 #endif
428