xref: /linux/arch/x86/kernel/cpu/topology_common.c (revision 1e525507)
1 // SPDX-License-Identifier: GPL-2.0
2 #include <linux/cpu.h>
3 
4 #include <xen/xen.h>
5 
6 #include <asm/apic.h>
7 #include <asm/processor.h>
8 #include <asm/smp.h>
9 
10 #include "cpu.h"
11 
12 struct x86_topology_system x86_topo_system __ro_after_init;
13 EXPORT_SYMBOL_GPL(x86_topo_system);
14 
15 unsigned int __amd_nodes_per_pkg __ro_after_init;
16 EXPORT_SYMBOL_GPL(__amd_nodes_per_pkg);
17 
18 void topology_set_dom(struct topo_scan *tscan, enum x86_topology_domains dom,
19 		      unsigned int shift, unsigned int ncpus)
20 {
21 	topology_update_dom(tscan, dom, shift, ncpus);
22 
23 	/* Propagate to the upper levels */
24 	for (dom++; dom < TOPO_MAX_DOMAIN; dom++) {
25 		tscan->dom_shifts[dom] = tscan->dom_shifts[dom - 1];
26 		tscan->dom_ncpus[dom] = tscan->dom_ncpus[dom - 1];
27 	}
28 }
29 
30 static unsigned int __maybe_unused parse_num_cores_legacy(struct cpuinfo_x86 *c)
31 {
32 	struct {
33 		u32	cache_type	:  5,
34 			unused		: 21,
35 			ncores		:  6;
36 	} eax;
37 
38 	if (c->cpuid_level < 4)
39 		return 1;
40 
41 	cpuid_subleaf_reg(4, 0, CPUID_EAX, &eax);
42 	if (!eax.cache_type)
43 		return 1;
44 
45 	return eax.ncores + 1;
46 }
47 
48 static void parse_legacy(struct topo_scan *tscan)
49 {
50 	unsigned int cores, core_shift, smt_shift = 0;
51 	struct cpuinfo_x86 *c = tscan->c;
52 
53 	cores = parse_num_cores_legacy(c);
54 	core_shift = get_count_order(cores);
55 
56 	if (cpu_has(c, X86_FEATURE_HT)) {
57 		if (!WARN_ON_ONCE(tscan->ebx1_nproc_shift < core_shift))
58 			smt_shift = tscan->ebx1_nproc_shift - core_shift;
59 		/*
60 		 * The parser expects leaf 0xb/0x1f format, which means
61 		 * the number of logical processors at core level is
62 		 * counting threads.
63 		 */
64 		core_shift += smt_shift;
65 		cores <<= smt_shift;
66 	}
67 
68 	topology_set_dom(tscan, TOPO_SMT_DOMAIN, smt_shift, 1U << smt_shift);
69 	topology_set_dom(tscan, TOPO_CORE_DOMAIN, core_shift, cores);
70 }
71 
72 static bool fake_topology(struct topo_scan *tscan)
73 {
74 	/*
75 	 * Preset the CORE level shift for CPUID less systems and XEN_PV,
76 	 * which has useless CPUID information.
77 	 */
78 	topology_set_dom(tscan, TOPO_SMT_DOMAIN, 0, 1);
79 	topology_set_dom(tscan, TOPO_CORE_DOMAIN, 0, 1);
80 
81 	return tscan->c->cpuid_level < 1;
82 }
83 
84 static void parse_topology(struct topo_scan *tscan, bool early)
85 {
86 	const struct cpuinfo_topology topo_defaults = {
87 		.cu_id			= 0xff,
88 		.llc_id			= BAD_APICID,
89 		.l2c_id			= BAD_APICID,
90 	};
91 	struct cpuinfo_x86 *c = tscan->c;
92 	struct {
93 		u32	unused0		: 16,
94 			nproc		:  8,
95 			apicid		:  8;
96 	} ebx;
97 
98 	c->topo = topo_defaults;
99 
100 	if (fake_topology(tscan))
101 		return;
102 
103 	/* Preset Initial APIC ID from CPUID leaf 1 */
104 	cpuid_leaf_reg(1, CPUID_EBX, &ebx);
105 	c->topo.initial_apicid = ebx.apicid;
106 
107 	/*
108 	 * The initial invocation from early_identify_cpu() happens before
109 	 * the APIC is mapped or X2APIC enabled. For establishing the
110 	 * topology, that's not required. Use the initial APIC ID.
111 	 */
112 	if (early)
113 		c->topo.apicid = c->topo.initial_apicid;
114 	else
115 		c->topo.apicid = read_apic_id();
116 
117 	/* The above is sufficient for UP */
118 	if (!IS_ENABLED(CONFIG_SMP))
119 		return;
120 
121 	tscan->ebx1_nproc_shift = get_count_order(ebx.nproc);
122 
123 	switch (c->x86_vendor) {
124 	case X86_VENDOR_AMD:
125 		if (IS_ENABLED(CONFIG_CPU_SUP_AMD))
126 			cpu_parse_topology_amd(tscan);
127 		break;
128 	case X86_VENDOR_CENTAUR:
129 	case X86_VENDOR_ZHAOXIN:
130 		parse_legacy(tscan);
131 		break;
132 	case X86_VENDOR_INTEL:
133 		if (!IS_ENABLED(CONFIG_CPU_SUP_INTEL) || !cpu_parse_topology_ext(tscan))
134 			parse_legacy(tscan);
135 		break;
136 	case X86_VENDOR_HYGON:
137 		if (IS_ENABLED(CONFIG_CPU_SUP_HYGON))
138 			cpu_parse_topology_amd(tscan);
139 		break;
140 	}
141 }
142 
143 static void topo_set_ids(struct topo_scan *tscan, bool early)
144 {
145 	struct cpuinfo_x86 *c = tscan->c;
146 	u32 apicid = c->topo.apicid;
147 
148 	c->topo.pkg_id = topo_shift_apicid(apicid, TOPO_PKG_DOMAIN);
149 	c->topo.die_id = topo_shift_apicid(apicid, TOPO_DIE_DOMAIN);
150 
151 	if (!early) {
152 		c->topo.logical_pkg_id = topology_get_logical_id(apicid, TOPO_PKG_DOMAIN);
153 		c->topo.logical_die_id = topology_get_logical_id(apicid, TOPO_DIE_DOMAIN);
154 	}
155 
156 	/* Package relative core ID */
157 	c->topo.core_id = (apicid & topo_domain_mask(TOPO_PKG_DOMAIN)) >>
158 		x86_topo_system.dom_shifts[TOPO_SMT_DOMAIN];
159 
160 	c->topo.amd_node_id = tscan->amd_node_id;
161 
162 	if (c->x86_vendor == X86_VENDOR_AMD)
163 		cpu_topology_fixup_amd(tscan);
164 }
165 
166 void cpu_parse_topology(struct cpuinfo_x86 *c)
167 {
168 	unsigned int dom, cpu = smp_processor_id();
169 	struct topo_scan tscan = { .c = c, };
170 
171 	parse_topology(&tscan, false);
172 
173 	if (IS_ENABLED(CONFIG_X86_LOCAL_APIC)) {
174 		if (c->topo.initial_apicid != c->topo.apicid) {
175 			pr_err(FW_BUG "CPU%4u: APIC ID mismatch. CPUID: 0x%04x APIC: 0x%04x\n",
176 			       cpu, c->topo.initial_apicid, c->topo.apicid);
177 		}
178 
179 		if (c->topo.apicid != cpuid_to_apicid[cpu]) {
180 			pr_err(FW_BUG "CPU%4u: APIC ID mismatch. Firmware: 0x%04x APIC: 0x%04x\n",
181 			       cpu, cpuid_to_apicid[cpu], c->topo.apicid);
182 		}
183 	}
184 
185 	for (dom = TOPO_SMT_DOMAIN; dom < TOPO_MAX_DOMAIN; dom++) {
186 		if (tscan.dom_shifts[dom] == x86_topo_system.dom_shifts[dom])
187 			continue;
188 		pr_err(FW_BUG "CPU%d: Topology domain %u shift %u != %u\n", cpu, dom,
189 		       tscan.dom_shifts[dom], x86_topo_system.dom_shifts[dom]);
190 	}
191 
192 	topo_set_ids(&tscan, false);
193 }
194 
195 void __init cpu_init_topology(struct cpuinfo_x86 *c)
196 {
197 	struct topo_scan tscan = { .c = c, };
198 	unsigned int dom, sft;
199 
200 	parse_topology(&tscan, true);
201 
202 	/* Copy the shift values and calculate the unit sizes. */
203 	memcpy(x86_topo_system.dom_shifts, tscan.dom_shifts, sizeof(x86_topo_system.dom_shifts));
204 
205 	dom = TOPO_SMT_DOMAIN;
206 	x86_topo_system.dom_size[dom] = 1U << x86_topo_system.dom_shifts[dom];
207 
208 	for (dom++; dom < TOPO_MAX_DOMAIN; dom++) {
209 		sft = x86_topo_system.dom_shifts[dom] - x86_topo_system.dom_shifts[dom - 1];
210 		x86_topo_system.dom_size[dom] = 1U << sft;
211 	}
212 
213 	topo_set_ids(&tscan, true);
214 
215 	/*
216 	 * AMD systems have Nodes per package which cannot be mapped to
217 	 * APIC ID.
218 	 */
219 	__amd_nodes_per_pkg = tscan.amd_nodes_per_pkg;
220 }
221