1 // SPDX-License-Identifier: GPL-2.0
2 /*
3 * Copyright 2019 Google LLC
4 */
5
6 #include <common.h>
7 #include <cpu.h>
8 #include <dm.h>
9 #include <log.h>
10 #include <acpi/acpigen.h>
11 #include <acpi/acpi_table.h>
12 #include <asm/cpu_common.h>
13 #include <asm/cpu_x86.h>
14 #include <asm/global_data.h>
15 #include <asm/intel_acpi.h>
16 #include <asm/msr.h>
17 #include <asm/mtrr.h>
18 #include <asm/arch/cpu.h>
19 #include <asm/arch/iomap.h>
20 #include <dm/acpi.h>
21
22 #ifdef CONFIG_ACPIGEN
23 #define CSTATE_RES(address_space, width, offset, address) \
24 { \
25 .space_id = address_space, \
26 .bit_width = width, \
27 .bit_offset = offset, \
28 .addrl = address, \
29 }
30
31 static struct acpi_cstate cstate_map[] = {
32 {
33 /* C1 */
34 .ctype = 1, /* ACPI C1 */
35 .latency = 1,
36 .power = 1000,
37 .resource = {
38 .space_id = ACPI_ADDRESS_SPACE_FIXED,
39 },
40 }, {
41 .ctype = 2, /* ACPI C2 */
42 .latency = 50,
43 .power = 10,
44 .resource = {
45 .space_id = ACPI_ADDRESS_SPACE_IO,
46 .bit_width = 8,
47 .addrl = 0x415,
48 },
49 }, {
50 .ctype = 3, /* ACPI C3 */
51 .latency = 150,
52 .power = 10,
53 .resource = {
54 .space_id = ACPI_ADDRESS_SPACE_IO,
55 .bit_width = 8,
56 .addrl = 0x419,
57 },
58 },
59 };
60
acpi_cpu_fill_ssdt(const struct udevice * dev,struct acpi_ctx * ctx)61 static int acpi_cpu_fill_ssdt(const struct udevice *dev, struct acpi_ctx *ctx)
62 {
63 uint core_id = dev_seq(dev);
64 int cores_per_package;
65 int ret;
66
67 cores_per_package = cpu_get_cores_per_package();
68 ret = acpi_generate_cpu_header(ctx, core_id, cstate_map,
69 ARRAY_SIZE(cstate_map));
70
71 /* Generate P-state tables */
72 generate_p_state_entries(ctx, core_id, cores_per_package);
73
74 /* Generate T-state tables */
75 generate_t_state_entries(ctx, core_id, cores_per_package, NULL, 0);
76
77 acpigen_pop_len(ctx);
78
79 if (device_is_last_sibling(dev)) {
80 ret = acpi_generate_cpu_package_final(ctx, cores_per_package);
81
82 if (ret)
83 return ret;
84 }
85
86 return 0;
87 }
88 #endif /* CONFIG_ACPIGEN */
89
apl_get_info(const struct udevice * dev,struct cpu_info * info)90 static int apl_get_info(const struct udevice *dev, struct cpu_info *info)
91 {
92 return cpu_intel_get_info(info, INTEL_BCLK_MHZ);
93 }
94
update_fixed_mtrrs(void)95 static void update_fixed_mtrrs(void)
96 {
97 native_write_msr(MTRR_FIX_64K_00000_MSR,
98 MTRR_FIX_TYPE(MTRR_TYPE_WRBACK),
99 MTRR_FIX_TYPE(MTRR_TYPE_WRBACK));
100 native_write_msr(MTRR_FIX_16K_80000_MSR,
101 MTRR_FIX_TYPE(MTRR_TYPE_WRBACK),
102 MTRR_FIX_TYPE(MTRR_TYPE_WRBACK));
103 native_write_msr(MTRR_FIX_4K_E0000_MSR,
104 MTRR_FIX_TYPE(MTRR_TYPE_WRBACK),
105 MTRR_FIX_TYPE(MTRR_TYPE_WRBACK));
106 native_write_msr(MTRR_FIX_4K_E8000_MSR,
107 MTRR_FIX_TYPE(MTRR_TYPE_WRBACK),
108 MTRR_FIX_TYPE(MTRR_TYPE_WRBACK));
109 native_write_msr(MTRR_FIX_4K_F0000_MSR,
110 MTRR_FIX_TYPE(MTRR_TYPE_WRBACK),
111 MTRR_FIX_TYPE(MTRR_TYPE_WRBACK));
112 native_write_msr(MTRR_FIX_4K_F8000_MSR,
113 MTRR_FIX_TYPE(MTRR_TYPE_WRBACK),
114 MTRR_FIX_TYPE(MTRR_TYPE_WRBACK));
115 }
116
setup_core_msrs(void)117 static void setup_core_msrs(void)
118 {
119 wrmsrl(MSR_PMG_CST_CONFIG_CONTROL,
120 PKG_C_STATE_LIMIT_C2_MASK | CORE_C_STATE_LIMIT_C10_MASK |
121 IO_MWAIT_REDIRECT_MASK | CST_CFG_LOCK_MASK);
122 /* Power Management I/O base address for I/O trapping to C-states */
123 wrmsrl(MSR_PMG_IO_CAPTURE_ADR, ACPI_PMIO_CST_REG |
124 (PMG_IO_BASE_CST_RNG_BLK_SIZE << 16));
125 /* Disable C1E */
126 msr_clrsetbits_64(MSR_POWER_CTL, 0x2, 0);
127 /* Disable support for MONITOR and MWAIT instructions */
128 msr_clrsetbits_64(MSR_IA32_MISC_ENABLE, MISC_ENABLE_MWAIT, 0);
129 /*
130 * Enable and Lock the Advanced Encryption Standard (AES-NI)
131 * feature register
132 */
133 msr_clrsetbits_64(MSR_FEATURE_CONFIG, FEATURE_CONFIG_RESERVED_MASK,
134 FEATURE_CONFIG_LOCK);
135
136 update_fixed_mtrrs();
137 }
138
soc_core_init(void)139 static int soc_core_init(void)
140 {
141 struct udevice *pmc;
142 int ret;
143
144 /* Clear out pending MCEs */
145 cpu_mca_configure();
146
147 /* Set core MSRs */
148 setup_core_msrs();
149 /*
150 * Enable ACPI PM timer emulation, which also lets microcode know
151 * location of ACPI_BASE_ADDRESS. This also enables other features
152 * implemented in microcode.
153 */
154 ret = uclass_first_device_err(UCLASS_ACPI_PMC, &pmc);
155 if (ret)
156 return log_msg_ret("PMC", ret);
157 enable_pm_timer_emulation(pmc);
158
159 return 0;
160 }
161
cpu_apl_probe(struct udevice * dev)162 static int cpu_apl_probe(struct udevice *dev)
163 {
164 if (gd->flags & GD_FLG_RELOC) {
165 int ret;
166
167 ret = soc_core_init();
168 if (ret)
169 return log_ret(ret);
170 }
171
172 return 0;
173 }
174
175 #ifdef CONFIG_ACPIGEN
176 struct acpi_ops apl_cpu_acpi_ops = {
177 .fill_ssdt = acpi_cpu_fill_ssdt,
178 };
179 #endif
180
181 static const struct cpu_ops cpu_x86_apl_ops = {
182 .get_desc = cpu_x86_get_desc,
183 .get_info = apl_get_info,
184 .get_count = cpu_x86_get_count,
185 .get_vendor = cpu_x86_get_vendor,
186 };
187
188 static const struct udevice_id cpu_x86_apl_ids[] = {
189 { .compatible = "intel,apl-cpu" },
190 { }
191 };
192
193 U_BOOT_DRIVER(intel_apl_cpu) = {
194 .name = "intel_apl_cpu",
195 .id = UCLASS_CPU,
196 .of_match = cpu_x86_apl_ids,
197 .bind = cpu_x86_bind,
198 .probe = cpu_apl_probe,
199 .ops = &cpu_x86_apl_ops,
200 ACPI_OPS_PTR(&apl_cpu_acpi_ops)
201 .flags = DM_FLAG_PRE_RELOC,
202 };
203