1 // System Management Mode support (on emulators)
2 //
3 // Copyright (C) 2008-2014  Kevin O'Connor <kevin@koconnor.net>
4 // Copyright (C) 2006 Fabrice Bellard
5 //
6 // This file may be distributed under the terms of the GNU LGPLv3 license.
7 
8 #include "config.h" // CONFIG_*
9 #include "dev-q35.h"
10 #include "dev-piix.h"
11 #include "hw/pci.h" // pci_config_writel
12 #include "hw/pcidevice.h" // pci_find_device
13 #include "hw/pci_ids.h" // PCI_VENDOR_ID_INTEL
14 #include "hw/pci_regs.h" // PCI_DEVICE_ID
15 #include "output.h" // dprintf
16 #include "paravirt.h" // PORT_SMI_STATUS
17 #include "stacks.h" // HaveSmmCall32
18 #include "string.h" // memcpy
19 #include "util.h" // smm_setup
20 #include "x86.h" // wbinvd
21 
22 /*
23  * Check SMM state save area format (bits 0-15) and require support
24  * for SMBASE relocation.
25  */
26 #define SMM_REV_MASK 0x0002ffff
27 
28 #define SMM_REV_I32  0x00020000
29 #define SMM_REV_I64  0x00020064
30 
31 struct smm_state {
32     union {
33         struct {
34             u8 pad_000[0xf8];
35             u32 smm_base;
36             u32 smm_rev;
37             u8 pad_100[0xd0];
38             u32 eax, ecx, edx, ebx, esp, ebp, esi, edi, eip, eflags;
39             u8 pad_1f8[0x08];
40         } i32;
41         struct {
42             u8 pad_000[0xfc];
43             u32 smm_rev;
44             u32 smm_base;
45             u8 pad_104[0x6c];
46             u64 rflags, rip, r15, r14, r13, r12, r11, r10, r9, r8;
47             u64 rdi, rsi, rbp, rsp, rbx, rdx, rcx, rax;
48         } i64;
49     };
50 };
51 
52 struct smm_layout {
53     struct smm_state backup1;
54     struct smm_state backup2;
55     u32 backup_a20;
56     u8 stack[0x8000 - sizeof(struct smm_state)*2 - sizeof(u32)];
57     u64 codeentry;
58     u8 pad_8008[0x7df8];
59     struct smm_state cpu;
60 };
61 
62 void VISIBLE32FLAT
handle_smi(u16 cs)63 handle_smi(u16 cs)
64 {
65     if (!CONFIG_USE_SMM)
66         return;
67     u8 cmd = inb(PORT_SMI_CMD);
68     struct smm_layout *smm = MAKE_FLATPTR(cs, 0);
69     u32 rev = smm->cpu.i32.smm_rev & SMM_REV_MASK;
70     dprintf(DEBUG_HDL_smi, "handle_smi cmd=%x smbase=%p\n", cmd, smm);
71 
72     if (smm == (void*)BUILD_SMM_INIT_ADDR) {
73         // relocate SMBASE to 0xa0000
74         if (rev == SMM_REV_I32) {
75             smm->cpu.i32.smm_base = BUILD_SMM_ADDR;
76         } else if (rev == SMM_REV_I64) {
77             smm->cpu.i64.smm_base = BUILD_SMM_ADDR;
78         } else {
79             warn_internalerror();
80             return;
81         }
82         // indicate to smm_relocate_and_restore() that the SMM code was executed
83         outb(0x00, PORT_SMI_STATUS);
84 
85         if (CONFIG_CALL32_SMM) {
86             // Backup current cpu state for SMM trampolining
87             struct smm_layout *newsmm = (void*)BUILD_SMM_ADDR;
88             memcpy(&newsmm->backup1, &smm->cpu, sizeof(newsmm->backup1));
89             memcpy(&newsmm->backup2, &smm->cpu, sizeof(newsmm->backup2));
90             HaveSmmCall32 = 1;
91         }
92 
93         return;
94     }
95 
96     if (CONFIG_CALL32_SMM && cmd == CALL32SMM_CMDID) {
97         if (rev == SMM_REV_I32) {
98             u32 regs[8];
99             memcpy(regs, &smm->cpu.i32.eax, sizeof(regs));
100             if (smm->cpu.i32.ecx == CALL32SMM_ENTERID) {
101                 dprintf(9, "smm cpu call pc=%x esp=%x\n", regs[3], regs[4]);
102                 memcpy(&smm->backup2, &smm->cpu, sizeof(smm->backup2));
103                 memcpy(&smm->cpu, &smm->backup1, sizeof(smm->cpu));
104                 memcpy(&smm->cpu.i32.eax, regs, sizeof(regs));
105                 smm->cpu.i32.eip = regs[3];
106                 // Enable a20 and backup its previous state
107                 smm->backup_a20 = set_a20(1);
108             } else if (smm->cpu.i32.ecx == CALL32SMM_RETURNID) {
109                 dprintf(9, "smm cpu ret %x esp=%x\n", regs[3], regs[4]);
110                 memcpy(&smm->cpu, &smm->backup2, sizeof(smm->cpu));
111                 memcpy(&smm->cpu.i32.eax, regs, sizeof(regs));
112                 if (!smm->backup_a20)
113                     set_a20(0);
114                 smm->cpu.i32.eip = regs[3];
115             }
116         } else if (rev == SMM_REV_I64) {
117             u64 regs[8];
118             memcpy(regs, &smm->cpu.i64.rdi, sizeof(regs));
119             if ((u32)smm->cpu.i64.rcx == CALL32SMM_ENTERID) {
120                 memcpy(&smm->backup2, &smm->cpu, sizeof(smm->backup2));
121                 memcpy(&smm->cpu, &smm->backup1, sizeof(smm->cpu));
122                 memcpy(&smm->cpu.i64.rdi, regs, sizeof(regs));
123                 smm->cpu.i64.rip = (u32)regs[4];
124                 // Enable a20 and backup its previous state
125                 smm->backup_a20 = set_a20(1);
126             } else if ((u32)smm->cpu.i64.rcx == CALL32SMM_RETURNID) {
127                 memcpy(&smm->cpu, &smm->backup2, sizeof(smm->cpu));
128                 memcpy(&smm->cpu.i64.rdi, regs, sizeof(regs));
129                 if (!smm->backup_a20)
130                     set_a20(0);
131                 smm->cpu.i64.rip = (u32)regs[4];
132             }
133         }
134     }
135 }
136 
137 extern void entry_smi(void);
138 // movw %cs, %ax; ljmpw $SEG_BIOS, $(entry_smi - BUILD_BIOS_ADDR)
139 #define SMI_INSN (0xeac88c | ((u64)SEG_BIOS<<40) \
140                   | ((u64)((u32)entry_smi - BUILD_BIOS_ADDR) << 24))
141 
142 static void
smm_save_and_copy(void)143 smm_save_and_copy(void)
144 {
145     // save original memory content
146     struct smm_layout *initsmm = (void*)BUILD_SMM_INIT_ADDR;
147     struct smm_layout *smm = (void*)BUILD_SMM_ADDR;
148     memcpy(&smm->cpu, &initsmm->cpu, sizeof(smm->cpu));
149     memcpy(&smm->codeentry, &initsmm->codeentry, sizeof(smm->codeentry));
150 
151     // Setup code entry point.
152     initsmm->codeentry = SMI_INSN;
153 }
154 
155 static void
smm_relocate_and_restore(void)156 smm_relocate_and_restore(void)
157 {
158     /* init APM status port */
159     outb(0x01, PORT_SMI_STATUS);
160 
161     /* raise an SMI interrupt */
162     outb(0x00, PORT_SMI_CMD);
163 
164     /* wait until SMM code executed */
165     while (inb(PORT_SMI_STATUS) != 0x00)
166         ;
167 
168     /* restore original memory content */
169     struct smm_layout *initsmm = (void*)BUILD_SMM_INIT_ADDR;
170     struct smm_layout *smm = (void*)BUILD_SMM_ADDR;
171     memcpy(&initsmm->cpu, &smm->cpu, sizeof(initsmm->cpu));
172     memcpy(&initsmm->codeentry, &smm->codeentry, sizeof(initsmm->codeentry));
173 
174     // Setup code entry point.
175     smm->codeentry = SMI_INSN;
176     wbinvd();
177 }
178 
179 // This code is hardcoded for PIIX4 Power Management device.
piix4_apmc_smm_setup(int isabdf,int i440_bdf)180 static void piix4_apmc_smm_setup(int isabdf, int i440_bdf)
181 {
182     /* check if SMM init is already done */
183     u32 value = pci_config_readl(isabdf, PIIX_DEVACTB);
184     if (value & PIIX_DEVACTB_APMC_EN)
185         return;
186 
187     /* enable the SMM memory window */
188     pci_config_writeb(i440_bdf, I440FX_SMRAM, 0x02 | 0x48);
189 
190     smm_save_and_copy();
191 
192     /* enable SMI generation when writing to the APMC register */
193     pci_config_writel(isabdf, PIIX_DEVACTB, value | PIIX_DEVACTB_APMC_EN);
194 
195     /* enable SMI generation */
196     value = inl(acpi_pm_base + PIIX_PMIO_GLBCTL);
197     outl(value | PIIX_PMIO_GLBCTL_SMI_EN, acpi_pm_base + PIIX_PMIO_GLBCTL);
198 
199     smm_relocate_and_restore();
200 
201     /* close the SMM memory window and enable normal SMM */
202     pci_config_writeb(i440_bdf, I440FX_SMRAM, 0x02 | 0x08);
203 }
204 
205 /* PCI_VENDOR_ID_INTEL && PCI_DEVICE_ID_INTEL_ICH9_LPC */
ich9_lpc_apmc_smm_setup(int isabdf,int mch_bdf)206 void ich9_lpc_apmc_smm_setup(int isabdf, int mch_bdf)
207 {
208     /* check if SMM init is already done */
209     u32 value = inl(acpi_pm_base + ICH9_PMIO_SMI_EN);
210     if (value & ICH9_PMIO_SMI_EN_APMC_EN)
211         return;
212 
213     /* enable the SMM memory window */
214     pci_config_writeb(mch_bdf, Q35_HOST_BRIDGE_SMRAM, 0x02 | 0x48);
215 
216     smm_save_and_copy();
217 
218     /* enable SMI generation when writing to the APMC register */
219     outl(value | ICH9_PMIO_SMI_EN_APMC_EN | ICH9_PMIO_SMI_EN_GLB_SMI_EN,
220          acpi_pm_base + ICH9_PMIO_SMI_EN);
221 
222     /* lock SMI generation */
223     value = pci_config_readw(isabdf, ICH9_LPC_GEN_PMCON_1);
224     pci_config_writel(isabdf, ICH9_LPC_GEN_PMCON_1,
225                       value | ICH9_LPC_GEN_PMCON_1_SMI_LOCK);
226 
227     smm_relocate_and_restore();
228 
229     /* close the SMM memory window and enable normal SMM */
230     pci_config_writeb(mch_bdf, Q35_HOST_BRIDGE_SMRAM, 0x02 | 0x08);
231 }
232 
233 static int SMMISADeviceBDF = -1, SMMPMDeviceBDF = -1;
234 
235 void
smm_device_setup(void)236 smm_device_setup(void)
237 {
238     if (!CONFIG_USE_SMM)
239         return;
240 
241     struct pci_device *isapci, *pmpci;
242     isapci = pci_find_device(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_82371AB_3);
243     pmpci = pci_find_device(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_82441);
244     if (isapci && pmpci) {
245         SMMISADeviceBDF = isapci->bdf;
246         SMMPMDeviceBDF = pmpci->bdf;
247         return;
248     }
249     isapci = pci_find_device(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_ICH9_LPC);
250     pmpci = pci_find_device(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_Q35_MCH);
251     if (isapci && pmpci) {
252         SMMISADeviceBDF = isapci->bdf;
253         SMMPMDeviceBDF = pmpci->bdf;
254     }
255 }
256 
257 void
smm_setup(void)258 smm_setup(void)
259 {
260     if (!CONFIG_USE_SMM || SMMISADeviceBDF < 0)
261         return;
262 
263     dprintf(3, "init smm\n");
264     u16 device = pci_config_readw(SMMISADeviceBDF, PCI_DEVICE_ID);
265     if (device == PCI_DEVICE_ID_INTEL_82371AB_3)
266         piix4_apmc_smm_setup(SMMISADeviceBDF, SMMPMDeviceBDF);
267     else
268         ich9_lpc_apmc_smm_setup(SMMISADeviceBDF, SMMPMDeviceBDF);
269 }
270