1 /*
2  * CDDL HEADER START
3  *
4  * The contents of this file are subject to the terms of the
5  * Common Development and Distribution License (the "License").
6  * You may not use this file except in compliance with the License.
7  *
8  * You can obtain a copy of the license at usr/src/OPENSOLARIS.LICENSE
9  * or http://www.opensolaris.org/os/licensing.
10  * See the License for the specific language governing permissions
11  * and limitations under the License.
12  *
13  * When distributing Covered Code, include this CDDL HEADER in each
14  * file and include the License file at usr/src/OPENSOLARIS.LICENSE.
15  * If applicable, add the following below this CDDL HEADER, with the
16  * fields enclosed by brackets "[]" replaced with your own identifying
17  * information: Portions Copyright [yyyy] [name of copyright owner]
18  *
19  * CDDL HEADER END
20  */
21 
22 /*
23  * Copyright 2008 Sun Microsystems, Inc.  All rights reserved.
24  * Use is subject to license terms.
25  */
26 /*
27  * Copyright (c) 2010, Intel Corporation.
28  * All rights reserved.
29  */
30 
31 /*
32  * Copyright (c) 2018, Joyent, Inc.
33  */
34 
35 /*
36  * Generic x86 CPU Module
37  *
38  * This CPU module is used for generic x86 CPUs when Solaris has no other
39  * CPU-specific support module available.  Code in this module should be the
40  * absolute bare-bones support and must be cognizant of both Intel and AMD etc.
41  */
42 
43 #include <sys/types.h>
44 #include <sys/cpu_module_impl.h>
45 #include <sys/cpuvar.h>
46 #include <sys/kmem.h>
47 #include <sys/modctl.h>
48 #include <sys/pghw.h>
49 #include <sys/x86_archext.h>
50 
51 #include "gcpu.h"
52 
53 /*
54  * Prevent generic cpu support from loading.
55  */
56 int gcpu_disable = 0;
57 
58 #define	GCPU_MAX_CHIPID		32
59 static struct gcpu_chipshared *gcpu_shared[GCPU_MAX_CHIPID];
60 #ifdef	DEBUG
61 int gcpu_id_disable = 0;
62 static const char *gcpu_id_override[GCPU_MAX_CHIPID] = { NULL };
63 #endif
64 
65 #ifndef	__xpv
66 /*
67  * This should probably be delegated to a CPU specific module. However, as those
68  * haven't been developed as actively for recent CPUs, we should revisit this
69  * when we do have it and move this out of gcpu.
70  *
71  * This method is only supported on Intel Xeon platforms. It relies on a
72  * combination of the PPIN and the cpuid signature. Both are required to form
73  * the synthetic ID. This ID is preceded with iv0-INTC to represent that this is
74  * an Intel synthetic ID. The iv0 is the illumos version zero of the ID for
75  * Intel. If we have a new scheme for a new generation of processors, then that
76  * should rev the version field, otherwise for a given processor, this synthetic
77  * ID should not change. For more information on PPIN and these MSRS, see the
78  * relevant processor external design specification.
79  */
80 static char *
81 gcpu_init_ident_intc(cmi_hdl_t hdl)
82 {
83 	uint64_t msr;
84 
85 	/*
86 	 * This list should be extended as new Intel Xeon family processors come
87 	 * out.
88 	 */
89 	switch (cmi_hdl_model(hdl)) {
90 	case INTC_MODEL_IVYBRIDGE_XEON:
91 	case INTC_MODEL_HASWELL_XEON:
92 	case INTC_MODEL_BROADWELL_XEON:
93 	case INTC_MODEL_BROADWELL_XEON_D:
94 	case INTC_MODEL_SKYLAKE_XEON:
95 		break;
96 	default:
97 		return (NULL);
98 	}
99 
100 	if (cmi_hdl_rdmsr(hdl, MSR_PLATFORM_INFO, &msr) != CMI_SUCCESS) {
101 		return (NULL);
102 	}
103 
104 	if ((msr & MSR_PLATFORM_INFO_PPIN) == 0) {
105 		return (NULL);
106 	}
107 
108 	if (cmi_hdl_rdmsr(hdl, MSR_PPIN_CTL, &msr) != CMI_SUCCESS) {
109 		return (NULL);
110 	}
111 
112 	if ((msr & MSR_PPIN_CTL_ENABLED) == 0) {
113 		if ((msr & MSR_PPIN_CTL_LOCKED) != 0) {
114 			return (NULL);
115 		}
116 
117 		if (cmi_hdl_wrmsr(hdl, MSR_PPIN_CTL, MSR_PPIN_CTL_ENABLED) !=
118 		    CMI_SUCCESS) {
119 			return (NULL);
120 		}
121 	}
122 
123 	if (cmi_hdl_rdmsr(hdl, MSR_PPIN, &msr) != CMI_SUCCESS) {
124 		return (NULL);
125 	}
126 
127 	/*
128 	 * Now that we've read data, lock the PPIN. Don't worry about success or
129 	 * failure of this part, as we will have gotten everything that we need.
130 	 * It is possible that it locked open, for example.
131 	 */
132 	(void) cmi_hdl_wrmsr(hdl, MSR_PPIN_CTL, MSR_PPIN_CTL_LOCKED);
133 
134 	return (kmem_asprintf("iv0-INTC-%x-%llx", cmi_hdl_chipsig(hdl), msr));
135 }
136 #endif	/* __xpv */
137 
138 static void
139 gcpu_init_ident(cmi_hdl_t hdl, struct gcpu_chipshared *sp)
140 {
141 #ifdef	DEBUG
142 	uint_t chipid;
143 
144 	/*
145 	 * On debug, allow a developer to override the string to more
146 	 * easily test CPU autoreplace without needing to physically
147 	 * replace a CPU.
148 	 */
149 	if (gcpu_id_disable != 0) {
150 		return;
151 	}
152 
153 	chipid = cmi_hdl_chipid(hdl);
154 	if (gcpu_id_override[chipid] != NULL) {
155 		sp->gcpus_ident = strdup(gcpu_id_override[chipid]);
156 		return;
157 	}
158 #endif
159 
160 #ifndef __xpv
161 	switch (cmi_hdl_vendor(hdl)) {
162 	case X86_VENDOR_Intel:
163 		sp->gcpus_ident = gcpu_init_ident_intc(hdl);
164 	default:
165 		break;
166 	}
167 #endif	/* __xpv */
168 }
169 
170 /*
171  * Our cmi_init entry point, called during startup of each cpu instance.
172  */
173 int
174 gcpu_init(cmi_hdl_t hdl, void **datap)
175 {
176 	uint_t chipid = cmi_hdl_chipid(hdl);
177 	struct gcpu_chipshared *sp, *osp;
178 	gcpu_data_t *gcpu;
179 
180 	if (gcpu_disable || chipid >= GCPU_MAX_CHIPID)
181 		return (ENOTSUP);
182 
183 	/*
184 	 * Allocate the state structure for this cpu.  We will only
185 	 * allocate the bank logout areas in gcpu_mca_init once we
186 	 * know how many banks there are.
187 	 */
188 	gcpu = *datap = kmem_zalloc(sizeof (gcpu_data_t), KM_SLEEP);
189 	cmi_hdl_hold(hdl);	/* release in gcpu_fini */
190 	gcpu->gcpu_hdl = hdl;
191 
192 	/*
193 	 * Allocate a chipshared structure if no sibling cpu has already
194 	 * allocated it, but allow for the fact that a sibling core may
195 	 * be starting up in parallel.
196 	 */
197 	if ((sp = gcpu_shared[chipid]) == NULL) {
198 		sp = kmem_zalloc(sizeof (struct gcpu_chipshared), KM_SLEEP);
199 		mutex_init(&sp->gcpus_poll_lock, NULL, MUTEX_DRIVER, NULL);
200 		mutex_init(&sp->gcpus_cfglock, NULL, MUTEX_DRIVER, NULL);
201 		osp = atomic_cas_ptr(&gcpu_shared[chipid], NULL, sp);
202 		if (osp != NULL) {
203 			mutex_destroy(&sp->gcpus_cfglock);
204 			mutex_destroy(&sp->gcpus_poll_lock);
205 			kmem_free(sp, sizeof (struct gcpu_chipshared));
206 			sp = osp;
207 		} else {
208 			gcpu_init_ident(hdl, sp);
209 		}
210 	}
211 
212 	atomic_inc_32(&sp->gcpus_actv_cnt);
213 	gcpu->gcpu_shared = sp;
214 
215 	return (0);
216 }
217 
218 /*
219  * deconfigure gcpu_init()
220  */
221 void
222 gcpu_fini(cmi_hdl_t hdl)
223 {
224 	uint_t chipid = cmi_hdl_chipid(hdl);
225 	gcpu_data_t *gcpu = cmi_hdl_getcmidata(hdl);
226 	struct gcpu_chipshared *sp;
227 
228 	if (gcpu_disable || chipid >= GCPU_MAX_CHIPID)
229 		return;
230 
231 	gcpu_mca_fini(hdl);
232 
233 	/*
234 	 * Keep shared data in cache for reuse.
235 	 */
236 	sp = gcpu_shared[chipid];
237 	ASSERT(sp != NULL);
238 	atomic_dec_32(&sp->gcpus_actv_cnt);
239 
240 	if (gcpu != NULL)
241 		kmem_free(gcpu, sizeof (gcpu_data_t));
242 
243 	/* Release reference count held in gcpu_init(). */
244 	cmi_hdl_rele(hdl);
245 }
246 
247 void
248 gcpu_post_startup(cmi_hdl_t hdl)
249 {
250 	gcpu_data_t *gcpu = cmi_hdl_getcmidata(hdl);
251 
252 	if (gcpu_disable)
253 		return;
254 
255 	if (gcpu != NULL)
256 		cms_post_startup(hdl);
257 #ifdef __xpv
258 	/*
259 	 * All cpu handles are initialized so we can begin polling now.
260 	 * Furthermore, our virq mechanism requires that everything
261 	 * be run on cpu 0 so we can assure that by starting from here.
262 	 */
263 	gcpu_mca_poll_start(hdl);
264 #endif
265 }
266 
267 void
268 gcpu_post_mpstartup(cmi_hdl_t hdl)
269 {
270 	if (gcpu_disable)
271 		return;
272 
273 	cms_post_mpstartup(hdl);
274 
275 #ifndef __xpv
276 	/*
277 	 * All cpu handles are initialized only once all cpus are started, so we
278 	 * can begin polling post mp startup.
279 	 */
280 	gcpu_mca_poll_start(hdl);
281 #endif
282 }
283 
284 const char *
285 gcpu_ident(cmi_hdl_t hdl)
286 {
287 	uint_t chipid;
288 	struct gcpu_chipshared *sp;
289 
290 	if (gcpu_disable)
291 		return (NULL);
292 
293 	chipid = cmi_hdl_chipid(hdl);
294 	if (chipid >= GCPU_MAX_CHIPID)
295 		return (NULL);
296 
297 	if (cmi_hdl_getcmidata(hdl) == NULL)
298 		return (NULL);
299 
300 	sp = gcpu_shared[cmi_hdl_chipid(hdl)];
301 	return (sp->gcpus_ident);
302 }
303 
304 #ifdef __xpv
305 #define	GCPU_OP(ntvop, xpvop)	xpvop
306 #else
307 #define	GCPU_OP(ntvop, xpvop)	ntvop
308 #endif
309 
310 cmi_api_ver_t _cmi_api_version = CMI_API_VERSION_3;
311 
312 const cmi_ops_t _cmi_ops = {
313 	gcpu_init,				/* cmi_init */
314 	gcpu_post_startup,			/* cmi_post_startup */
315 	gcpu_post_mpstartup,			/* cmi_post_mpstartup */
316 	gcpu_faulted_enter,			/* cmi_faulted_enter */
317 	gcpu_faulted_exit,			/* cmi_faulted_exit */
318 	gcpu_mca_init,				/* cmi_mca_init */
319 	GCPU_OP(gcpu_mca_trap, NULL),		/* cmi_mca_trap */
320 	GCPU_OP(gcpu_cmci_trap, NULL),		/* cmi_cmci_trap */
321 	gcpu_msrinject,				/* cmi_msrinject */
322 	GCPU_OP(gcpu_hdl_poke, NULL),		/* cmi_hdl_poke */
323 	gcpu_fini,				/* cmi_fini */
324 	GCPU_OP(NULL, gcpu_xpv_panic_callback),	/* cmi_panic_callback */
325 	gcpu_ident				/* cmi_ident */
326 };
327 
328 static struct modlcpu modlcpu = {
329 	&mod_cpuops,
330 	"Generic x86 CPU Module"
331 };
332 
333 static struct modlinkage modlinkage = {
334 	MODREV_1,
335 	(void *)&modlcpu,
336 	NULL
337 };
338 
339 int
340 _init(void)
341 {
342 	return (mod_install(&modlinkage));
343 }
344 
345 int
346 _info(struct modinfo *modinfop)
347 {
348 	return (mod_info(&modlinkage, modinfop));
349 }
350 
351 int
352 _fini(void)
353 {
354 	return (mod_remove(&modlinkage));
355 }
356