xref: /qemu/target/i386/tcg/sysemu/smm_helper.c (revision b355f08a)
1 /*
2  *  x86 SMM helpers (sysemu-only)
3  *
4  *  Copyright (c) 2003 Fabrice Bellard
5  *
6  * This library is free software; you can redistribute it and/or
7  * modify it under the terms of the GNU Lesser General Public
8  * License as published by the Free Software Foundation; either
9  * version 2.1 of the License, or (at your option) any later version.
10  *
11  * This library is distributed in the hope that it will be useful,
12  * but WITHOUT ANY WARRANTY; without even the implied warranty of
13  * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
14  * Lesser General Public License for more details.
15  *
16  * You should have received a copy of the GNU Lesser General Public
17  * License along with this library; if not, see <http://www.gnu.org/licenses/>.
18  */
19 
20 #include "qemu/osdep.h"
21 #include "cpu.h"
22 #include "exec/helper-proto.h"
23 #include "exec/log.h"
24 #include "tcg/helper-tcg.h"
25 
26 
27 /* SMM support */
28 
29 #ifdef TARGET_X86_64
30 #define SMM_REVISION_ID 0x00020064
31 #else
32 #define SMM_REVISION_ID 0x00020000
33 #endif
34 
35 void do_smm_enter(X86CPU *cpu)
36 {
37     CPUX86State *env = &cpu->env;
38     CPUState *cs = CPU(cpu);
39     target_ulong sm_state;
40     SegmentCache *dt;
41     int i, offset;
42 
43     qemu_log_mask(CPU_LOG_INT, "SMM: enter\n");
44     log_cpu_state_mask(CPU_LOG_INT, CPU(cpu), CPU_DUMP_CCOP);
45 
46     env->msr_smi_count++;
47     env->hflags |= HF_SMM_MASK;
48     if (env->hflags2 & HF2_NMI_MASK) {
49         env->hflags2 |= HF2_SMM_INSIDE_NMI_MASK;
50     } else {
51         env->hflags2 |= HF2_NMI_MASK;
52     }
53 
54     sm_state = env->smbase + 0x8000;
55 
56 #ifdef TARGET_X86_64
57     for (i = 0; i < 6; i++) {
58         dt = &env->segs[i];
59         offset = 0x7e00 + i * 16;
60         x86_stw_phys(cs, sm_state + offset, dt->selector);
61         x86_stw_phys(cs, sm_state + offset + 2, (dt->flags >> 8) & 0xf0ff);
62         x86_stl_phys(cs, sm_state + offset + 4, dt->limit);
63         x86_stq_phys(cs, sm_state + offset + 8, dt->base);
64     }
65 
66     x86_stq_phys(cs, sm_state + 0x7e68, env->gdt.base);
67     x86_stl_phys(cs, sm_state + 0x7e64, env->gdt.limit);
68 
69     x86_stw_phys(cs, sm_state + 0x7e70, env->ldt.selector);
70     x86_stq_phys(cs, sm_state + 0x7e78, env->ldt.base);
71     x86_stl_phys(cs, sm_state + 0x7e74, env->ldt.limit);
72     x86_stw_phys(cs, sm_state + 0x7e72, (env->ldt.flags >> 8) & 0xf0ff);
73 
74     x86_stq_phys(cs, sm_state + 0x7e88, env->idt.base);
75     x86_stl_phys(cs, sm_state + 0x7e84, env->idt.limit);
76 
77     x86_stw_phys(cs, sm_state + 0x7e90, env->tr.selector);
78     x86_stq_phys(cs, sm_state + 0x7e98, env->tr.base);
79     x86_stl_phys(cs, sm_state + 0x7e94, env->tr.limit);
80     x86_stw_phys(cs, sm_state + 0x7e92, (env->tr.flags >> 8) & 0xf0ff);
81 
82     /* ??? Vol 1, 16.5.6 Intel MPX and SMM says that IA32_BNDCFGS
83        is saved at offset 7ED0.  Vol 3, 34.4.1.1, Table 32-2, has
84        7EA0-7ED7 as "reserved".  What's this, and what's really
85        supposed to happen?  */
86     x86_stq_phys(cs, sm_state + 0x7ed0, env->efer);
87 
88     x86_stq_phys(cs, sm_state + 0x7ff8, env->regs[R_EAX]);
89     x86_stq_phys(cs, sm_state + 0x7ff0, env->regs[R_ECX]);
90     x86_stq_phys(cs, sm_state + 0x7fe8, env->regs[R_EDX]);
91     x86_stq_phys(cs, sm_state + 0x7fe0, env->regs[R_EBX]);
92     x86_stq_phys(cs, sm_state + 0x7fd8, env->regs[R_ESP]);
93     x86_stq_phys(cs, sm_state + 0x7fd0, env->regs[R_EBP]);
94     x86_stq_phys(cs, sm_state + 0x7fc8, env->regs[R_ESI]);
95     x86_stq_phys(cs, sm_state + 0x7fc0, env->regs[R_EDI]);
96     for (i = 8; i < 16; i++) {
97         x86_stq_phys(cs, sm_state + 0x7ff8 - i * 8, env->regs[i]);
98     }
99     x86_stq_phys(cs, sm_state + 0x7f78, env->eip);
100     x86_stl_phys(cs, sm_state + 0x7f70, cpu_compute_eflags(env));
101     x86_stl_phys(cs, sm_state + 0x7f68, env->dr[6]);
102     x86_stl_phys(cs, sm_state + 0x7f60, env->dr[7]);
103 
104     x86_stl_phys(cs, sm_state + 0x7f48, env->cr[4]);
105     x86_stq_phys(cs, sm_state + 0x7f50, env->cr[3]);
106     x86_stl_phys(cs, sm_state + 0x7f58, env->cr[0]);
107 
108     x86_stl_phys(cs, sm_state + 0x7efc, SMM_REVISION_ID);
109     x86_stl_phys(cs, sm_state + 0x7f00, env->smbase);
110 #else
111     x86_stl_phys(cs, sm_state + 0x7ffc, env->cr[0]);
112     x86_stl_phys(cs, sm_state + 0x7ff8, env->cr[3]);
113     x86_stl_phys(cs, sm_state + 0x7ff4, cpu_compute_eflags(env));
114     x86_stl_phys(cs, sm_state + 0x7ff0, env->eip);
115     x86_stl_phys(cs, sm_state + 0x7fec, env->regs[R_EDI]);
116     x86_stl_phys(cs, sm_state + 0x7fe8, env->regs[R_ESI]);
117     x86_stl_phys(cs, sm_state + 0x7fe4, env->regs[R_EBP]);
118     x86_stl_phys(cs, sm_state + 0x7fe0, env->regs[R_ESP]);
119     x86_stl_phys(cs, sm_state + 0x7fdc, env->regs[R_EBX]);
120     x86_stl_phys(cs, sm_state + 0x7fd8, env->regs[R_EDX]);
121     x86_stl_phys(cs, sm_state + 0x7fd4, env->regs[R_ECX]);
122     x86_stl_phys(cs, sm_state + 0x7fd0, env->regs[R_EAX]);
123     x86_stl_phys(cs, sm_state + 0x7fcc, env->dr[6]);
124     x86_stl_phys(cs, sm_state + 0x7fc8, env->dr[7]);
125 
126     x86_stl_phys(cs, sm_state + 0x7fc4, env->tr.selector);
127     x86_stl_phys(cs, sm_state + 0x7f64, env->tr.base);
128     x86_stl_phys(cs, sm_state + 0x7f60, env->tr.limit);
129     x86_stl_phys(cs, sm_state + 0x7f5c, (env->tr.flags >> 8) & 0xf0ff);
130 
131     x86_stl_phys(cs, sm_state + 0x7fc0, env->ldt.selector);
132     x86_stl_phys(cs, sm_state + 0x7f80, env->ldt.base);
133     x86_stl_phys(cs, sm_state + 0x7f7c, env->ldt.limit);
134     x86_stl_phys(cs, sm_state + 0x7f78, (env->ldt.flags >> 8) & 0xf0ff);
135 
136     x86_stl_phys(cs, sm_state + 0x7f74, env->gdt.base);
137     x86_stl_phys(cs, sm_state + 0x7f70, env->gdt.limit);
138 
139     x86_stl_phys(cs, sm_state + 0x7f58, env->idt.base);
140     x86_stl_phys(cs, sm_state + 0x7f54, env->idt.limit);
141 
142     for (i = 0; i < 6; i++) {
143         dt = &env->segs[i];
144         if (i < 3) {
145             offset = 0x7f84 + i * 12;
146         } else {
147             offset = 0x7f2c + (i - 3) * 12;
148         }
149         x86_stl_phys(cs, sm_state + 0x7fa8 + i * 4, dt->selector);
150         x86_stl_phys(cs, sm_state + offset + 8, dt->base);
151         x86_stl_phys(cs, sm_state + offset + 4, dt->limit);
152         x86_stl_phys(cs, sm_state + offset, (dt->flags >> 8) & 0xf0ff);
153     }
154     x86_stl_phys(cs, sm_state + 0x7f14, env->cr[4]);
155 
156     x86_stl_phys(cs, sm_state + 0x7efc, SMM_REVISION_ID);
157     x86_stl_phys(cs, sm_state + 0x7ef8, env->smbase);
158 #endif
159     /* init SMM cpu state */
160 
161 #ifdef TARGET_X86_64
162     cpu_load_efer(env, 0);
163 #endif
164     cpu_load_eflags(env, 0, ~(CC_O | CC_S | CC_Z | CC_A | CC_P | CC_C |
165                               DF_MASK));
166     env->eip = 0x00008000;
167     cpu_x86_update_cr0(env,
168                        env->cr[0] & ~(CR0_PE_MASK | CR0_EM_MASK | CR0_TS_MASK |
169                                       CR0_PG_MASK));
170     cpu_x86_update_cr4(env, 0);
171     env->dr[7] = 0x00000400;
172 
173     cpu_x86_load_seg_cache(env, R_CS, (env->smbase >> 4) & 0xffff, env->smbase,
174                            0xffffffff,
175                            DESC_P_MASK | DESC_S_MASK | DESC_W_MASK |
176                            DESC_G_MASK | DESC_A_MASK);
177     cpu_x86_load_seg_cache(env, R_DS, 0, 0, 0xffffffff,
178                            DESC_P_MASK | DESC_S_MASK | DESC_W_MASK |
179                            DESC_G_MASK | DESC_A_MASK);
180     cpu_x86_load_seg_cache(env, R_ES, 0, 0, 0xffffffff,
181                            DESC_P_MASK | DESC_S_MASK | DESC_W_MASK |
182                            DESC_G_MASK | DESC_A_MASK);
183     cpu_x86_load_seg_cache(env, R_SS, 0, 0, 0xffffffff,
184                            DESC_P_MASK | DESC_S_MASK | DESC_W_MASK |
185                            DESC_G_MASK | DESC_A_MASK);
186     cpu_x86_load_seg_cache(env, R_FS, 0, 0, 0xffffffff,
187                            DESC_P_MASK | DESC_S_MASK | DESC_W_MASK |
188                            DESC_G_MASK | DESC_A_MASK);
189     cpu_x86_load_seg_cache(env, R_GS, 0, 0, 0xffffffff,
190                            DESC_P_MASK | DESC_S_MASK | DESC_W_MASK |
191                            DESC_G_MASK | DESC_A_MASK);
192 }
193 
194 void helper_rsm(CPUX86State *env)
195 {
196     X86CPU *cpu = env_archcpu(env);
197     CPUState *cs = env_cpu(env);
198     target_ulong sm_state;
199     int i, offset;
200     uint32_t val;
201 
202     sm_state = env->smbase + 0x8000;
203 #ifdef TARGET_X86_64
204     cpu_load_efer(env, x86_ldq_phys(cs, sm_state + 0x7ed0));
205 
206     env->gdt.base = x86_ldq_phys(cs, sm_state + 0x7e68);
207     env->gdt.limit = x86_ldl_phys(cs, sm_state + 0x7e64);
208 
209     env->ldt.selector = x86_lduw_phys(cs, sm_state + 0x7e70);
210     env->ldt.base = x86_ldq_phys(cs, sm_state + 0x7e78);
211     env->ldt.limit = x86_ldl_phys(cs, sm_state + 0x7e74);
212     env->ldt.flags = (x86_lduw_phys(cs, sm_state + 0x7e72) & 0xf0ff) << 8;
213 
214     env->idt.base = x86_ldq_phys(cs, sm_state + 0x7e88);
215     env->idt.limit = x86_ldl_phys(cs, sm_state + 0x7e84);
216 
217     env->tr.selector = x86_lduw_phys(cs, sm_state + 0x7e90);
218     env->tr.base = x86_ldq_phys(cs, sm_state + 0x7e98);
219     env->tr.limit = x86_ldl_phys(cs, sm_state + 0x7e94);
220     env->tr.flags = (x86_lduw_phys(cs, sm_state + 0x7e92) & 0xf0ff) << 8;
221 
222     env->regs[R_EAX] = x86_ldq_phys(cs, sm_state + 0x7ff8);
223     env->regs[R_ECX] = x86_ldq_phys(cs, sm_state + 0x7ff0);
224     env->regs[R_EDX] = x86_ldq_phys(cs, sm_state + 0x7fe8);
225     env->regs[R_EBX] = x86_ldq_phys(cs, sm_state + 0x7fe0);
226     env->regs[R_ESP] = x86_ldq_phys(cs, sm_state + 0x7fd8);
227     env->regs[R_EBP] = x86_ldq_phys(cs, sm_state + 0x7fd0);
228     env->regs[R_ESI] = x86_ldq_phys(cs, sm_state + 0x7fc8);
229     env->regs[R_EDI] = x86_ldq_phys(cs, sm_state + 0x7fc0);
230     for (i = 8; i < 16; i++) {
231         env->regs[i] = x86_ldq_phys(cs, sm_state + 0x7ff8 - i * 8);
232     }
233     env->eip = x86_ldq_phys(cs, sm_state + 0x7f78);
234     cpu_load_eflags(env, x86_ldl_phys(cs, sm_state + 0x7f70),
235                     ~(CC_O | CC_S | CC_Z | CC_A | CC_P | CC_C | DF_MASK));
236     env->dr[6] = x86_ldl_phys(cs, sm_state + 0x7f68);
237     env->dr[7] = x86_ldl_phys(cs, sm_state + 0x7f60);
238 
239     cpu_x86_update_cr4(env, x86_ldl_phys(cs, sm_state + 0x7f48));
240     cpu_x86_update_cr3(env, x86_ldq_phys(cs, sm_state + 0x7f50));
241     cpu_x86_update_cr0(env, x86_ldl_phys(cs, sm_state + 0x7f58));
242 
243     for (i = 0; i < 6; i++) {
244         offset = 0x7e00 + i * 16;
245         cpu_x86_load_seg_cache(env, i,
246                                x86_lduw_phys(cs, sm_state + offset),
247                                x86_ldq_phys(cs, sm_state + offset + 8),
248                                x86_ldl_phys(cs, sm_state + offset + 4),
249                                (x86_lduw_phys(cs, sm_state + offset + 2) &
250                                 0xf0ff) << 8);
251     }
252 
253     val = x86_ldl_phys(cs, sm_state + 0x7efc); /* revision ID */
254     if (val & 0x20000) {
255         env->smbase = x86_ldl_phys(cs, sm_state + 0x7f00);
256     }
257 #else
258     cpu_x86_update_cr0(env, x86_ldl_phys(cs, sm_state + 0x7ffc));
259     cpu_x86_update_cr3(env, x86_ldl_phys(cs, sm_state + 0x7ff8));
260     cpu_load_eflags(env, x86_ldl_phys(cs, sm_state + 0x7ff4),
261                     ~(CC_O | CC_S | CC_Z | CC_A | CC_P | CC_C | DF_MASK));
262     env->eip = x86_ldl_phys(cs, sm_state + 0x7ff0);
263     env->regs[R_EDI] = x86_ldl_phys(cs, sm_state + 0x7fec);
264     env->regs[R_ESI] = x86_ldl_phys(cs, sm_state + 0x7fe8);
265     env->regs[R_EBP] = x86_ldl_phys(cs, sm_state + 0x7fe4);
266     env->regs[R_ESP] = x86_ldl_phys(cs, sm_state + 0x7fe0);
267     env->regs[R_EBX] = x86_ldl_phys(cs, sm_state + 0x7fdc);
268     env->regs[R_EDX] = x86_ldl_phys(cs, sm_state + 0x7fd8);
269     env->regs[R_ECX] = x86_ldl_phys(cs, sm_state + 0x7fd4);
270     env->regs[R_EAX] = x86_ldl_phys(cs, sm_state + 0x7fd0);
271     env->dr[6] = x86_ldl_phys(cs, sm_state + 0x7fcc);
272     env->dr[7] = x86_ldl_phys(cs, sm_state + 0x7fc8);
273 
274     env->tr.selector = x86_ldl_phys(cs, sm_state + 0x7fc4) & 0xffff;
275     env->tr.base = x86_ldl_phys(cs, sm_state + 0x7f64);
276     env->tr.limit = x86_ldl_phys(cs, sm_state + 0x7f60);
277     env->tr.flags = (x86_ldl_phys(cs, sm_state + 0x7f5c) & 0xf0ff) << 8;
278 
279     env->ldt.selector = x86_ldl_phys(cs, sm_state + 0x7fc0) & 0xffff;
280     env->ldt.base = x86_ldl_phys(cs, sm_state + 0x7f80);
281     env->ldt.limit = x86_ldl_phys(cs, sm_state + 0x7f7c);
282     env->ldt.flags = (x86_ldl_phys(cs, sm_state + 0x7f78) & 0xf0ff) << 8;
283 
284     env->gdt.base = x86_ldl_phys(cs, sm_state + 0x7f74);
285     env->gdt.limit = x86_ldl_phys(cs, sm_state + 0x7f70);
286 
287     env->idt.base = x86_ldl_phys(cs, sm_state + 0x7f58);
288     env->idt.limit = x86_ldl_phys(cs, sm_state + 0x7f54);
289 
290     for (i = 0; i < 6; i++) {
291         if (i < 3) {
292             offset = 0x7f84 + i * 12;
293         } else {
294             offset = 0x7f2c + (i - 3) * 12;
295         }
296         cpu_x86_load_seg_cache(env, i,
297                                x86_ldl_phys(cs,
298                                         sm_state + 0x7fa8 + i * 4) & 0xffff,
299                                x86_ldl_phys(cs, sm_state + offset + 8),
300                                x86_ldl_phys(cs, sm_state + offset + 4),
301                                (x86_ldl_phys(cs,
302                                          sm_state + offset) & 0xf0ff) << 8);
303     }
304     cpu_x86_update_cr4(env, x86_ldl_phys(cs, sm_state + 0x7f14));
305 
306     val = x86_ldl_phys(cs, sm_state + 0x7efc); /* revision ID */
307     if (val & 0x20000) {
308         env->smbase = x86_ldl_phys(cs, sm_state + 0x7ef8);
309     }
310 #endif
311     if ((env->hflags2 & HF2_SMM_INSIDE_NMI_MASK) == 0) {
312         env->hflags2 &= ~HF2_NMI_MASK;
313     }
314     env->hflags2 &= ~HF2_SMM_INSIDE_NMI_MASK;
315     env->hflags &= ~HF_SMM_MASK;
316 
317     qemu_log_mask(CPU_LOG_INT, "SMM: after RSM\n");
318     log_cpu_state_mask(CPU_LOG_INT, CPU(cpu), CPU_DUMP_CCOP);
319 }
320