1 /*
2  * CDDL HEADER START
3  *
4  * The contents of this file are subject to the terms of the
5  * Common Development and Distribution License (the "License").
6  * You may not use this file except in compliance with the License.
7  *
8  * You can obtain a copy of the license at usr/src/OPENSOLARIS.LICENSE
9  * or http://www.opensolaris.org/os/licensing.
10  * See the License for the specific language governing permissions
11  * and limitations under the License.
12  *
13  * When distributing Covered Code, include this CDDL HEADER in each
14  * file and include the License file at usr/src/OPENSOLARIS.LICENSE.
15  * If applicable, add the following below this CDDL HEADER, with the
16  * fields enclosed by brackets "[]" replaced with your own identifying
17  * information: Portions Copyright [yyyy] [name of copyright owner]
18  *
19  * CDDL HEADER END
20  */
21 
22 /*
23  * Copyright 2008 Sun Microsystems, Inc.  All rights reserved.
24  * Use is subject to license terms.
25  */
26 /*
27  * Copyright (c) 2010, Intel Corporation.
28  * All rights reserved.
29  */
30 
31 /*
32  * Generic x86 CPU Module
33  *
34  * This CPU module is used for generic x86 CPUs when Solaris has no other
35  * CPU-specific support module available.  Code in this module should be the
36  * absolute bare-bones support and must be cognizant of both Intel and AMD etc.
37  */
38 
39 #include <sys/types.h>
40 #include <sys/cpu_module_impl.h>
41 #include <sys/cpuvar.h>
42 #include <sys/kmem.h>
43 #include <sys/modctl.h>
44 #include <sys/pghw.h>
45 #include <sys/x86_archext.h>
46 
47 #include "gcpu.h"
48 
49 /*
50  * Prevent generic cpu support from loading.
51  */
52 int gcpu_disable = 0;
53 
54 #define	GCPU_MAX_CHIPID		32
55 static struct gcpu_chipshared *gcpu_shared[GCPU_MAX_CHIPID];
56 #ifdef	DEBUG
57 int gcpu_id_disable = 0;
58 static const char *gcpu_id_override[GCPU_MAX_CHIPID] = { NULL };
59 #endif
60 
61 #ifndef	__xpv
62 /*
63  * This should probably be delegated to a CPU specific module. However, as those
64  * haven't been developed as actively for recent CPUs, we should revisit this
65  * when we do have it and move this out of gcpu.
66  *
67  * This method is only supported on Intel Xeon platforms. It relies on a
68  * combination of the PPIN and the cpuid signature. Both are required to form
69  * the synthetic ID. This ID is preceded with iv0-INTC to represent that this is
70  * an Intel synthetic ID. The iv0 is the illumos version zero of the ID for
71  * Intel. If we have a new scheme for a new generation of processors, then that
72  * should rev the version field, otherwise for a given processor, this synthetic
73  * ID should not change. For more information on PPIN and these MSRS, see the
74  * relevant processor external design specification.
75  */
76 static char *
77 gcpu_init_ident_intc(cmi_hdl_t hdl)
78 {
79 	uint64_t msr;
80 
81 	/*
82 	 * This list should be extended as new Intel Xeon family processors come
83 	 * out.
84 	 */
85 	switch (cmi_hdl_model(hdl)) {
86 	case INTC_MODEL_IVYBRIDGE_XEON:
87 	case INTC_MODEL_HASWELL_XEON:
88 	case INTC_MODEL_BROADWELL_XEON:
89 	case INTC_MODEL_BROADWELL_XEON_D:
90 	case INTC_MODEL_SKYLAKE_XEON:
91 		break;
92 	default:
93 		return (NULL);
94 	}
95 
96 	if (cmi_hdl_rdmsr(hdl, MSR_PLATFORM_INFO, &msr) != CMI_SUCCESS) {
97 		return (NULL);
98 	}
99 
100 	if ((msr & MSR_PLATFORM_INFO_PPIN) == 0) {
101 		return (NULL);
102 	}
103 
104 	if (cmi_hdl_rdmsr(hdl, MSR_PPIN_CTL, &msr) != CMI_SUCCESS) {
105 		return (NULL);
106 	}
107 
108 	if ((msr & MSR_PPIN_CTL_ENABLED) == 0) {
109 		if ((msr & MSR_PPIN_CTL_LOCKED) != 0) {
110 			return (NULL);
111 		}
112 
113 		if (cmi_hdl_wrmsr(hdl, MSR_PPIN_CTL, MSR_PPIN_CTL_ENABLED) !=
114 		    CMI_SUCCESS) {
115 			return (NULL);
116 		}
117 	}
118 
119 	if (cmi_hdl_rdmsr(hdl, MSR_PPIN, &msr) != CMI_SUCCESS) {
120 		return (NULL);
121 	}
122 
123 	/*
124 	 * Now that we've read data, lock the PPIN. Don't worry about success or
125 	 * failure of this part, as we will have gotten everything that we need.
126 	 * It is possible that it locked open, for example.
127 	 */
128 	(void) cmi_hdl_wrmsr(hdl, MSR_PPIN_CTL, MSR_PPIN_CTL_LOCKED);
129 
130 	return (kmem_asprintf("iv0-INTC-%x-%llx", cmi_hdl_chipsig(hdl), msr));
131 }
132 #endif	/* __xpv */
133 
134 static void
135 gcpu_init_ident(cmi_hdl_t hdl, struct gcpu_chipshared *sp)
136 {
137 #ifdef	DEBUG
138 	uint_t chipid;
139 
140 	/*
141 	 * On debug, allow a developer to override the string to more
142 	 * easily test CPU autoreplace without needing to physically
143 	 * replace a CPU.
144 	 */
145 	if (gcpu_id_disable != 0) {
146 		return;
147 	}
148 
149 	chipid = cmi_hdl_chipid(hdl);
150 	if (gcpu_id_override[chipid] != NULL) {
151 		sp->gcpus_ident = strdup(gcpu_id_override[chipid]);
152 		return;
153 	}
154 #endif
155 
156 #ifndef __xpv
157 	switch (cmi_hdl_vendor(hdl)) {
158 	case X86_VENDOR_Intel:
159 		sp->gcpus_ident = gcpu_init_ident_intc(hdl);
160 	default:
161 		break;
162 	}
163 #endif	/* __xpv */
164 }
165 
166 /*
167  * Our cmi_init entry point, called during startup of each cpu instance.
168  */
169 int
170 gcpu_init(cmi_hdl_t hdl, void **datap)
171 {
172 	uint_t chipid = cmi_hdl_chipid(hdl);
173 	struct gcpu_chipshared *sp, *osp;
174 	gcpu_data_t *gcpu;
175 
176 	if (gcpu_disable || chipid >= GCPU_MAX_CHIPID)
177 		return (ENOTSUP);
178 
179 	/*
180 	 * Allocate the state structure for this cpu.  We will only
181 	 * allocate the bank logout areas in gcpu_mca_init once we
182 	 * know how many banks there are.
183 	 */
184 	gcpu = *datap = kmem_zalloc(sizeof (gcpu_data_t), KM_SLEEP);
185 	cmi_hdl_hold(hdl);	/* release in gcpu_fini */
186 	gcpu->gcpu_hdl = hdl;
187 
188 	/*
189 	 * Allocate a chipshared structure if no sibling cpu has already
190 	 * allocated it, but allow for the fact that a sibling core may
191 	 * be starting up in parallel.
192 	 */
193 	if ((sp = gcpu_shared[chipid]) == NULL) {
194 		sp = kmem_zalloc(sizeof (struct gcpu_chipshared), KM_SLEEP);
195 		mutex_init(&sp->gcpus_poll_lock, NULL, MUTEX_DRIVER, NULL);
196 		mutex_init(&sp->gcpus_cfglock, NULL, MUTEX_DRIVER, NULL);
197 		osp = atomic_cas_ptr(&gcpu_shared[chipid], NULL, sp);
198 		if (osp != NULL) {
199 			mutex_destroy(&sp->gcpus_cfglock);
200 			mutex_destroy(&sp->gcpus_poll_lock);
201 			kmem_free(sp, sizeof (struct gcpu_chipshared));
202 			sp = osp;
203 		} else {
204 			gcpu_init_ident(hdl, sp);
205 		}
206 	}
207 
208 	atomic_inc_32(&sp->gcpus_actv_cnt);
209 	gcpu->gcpu_shared = sp;
210 
211 	return (0);
212 }
213 
214 /*
215  * deconfigure gcpu_init()
216  */
217 void
218 gcpu_fini(cmi_hdl_t hdl)
219 {
220 	uint_t chipid = cmi_hdl_chipid(hdl);
221 	gcpu_data_t *gcpu = cmi_hdl_getcmidata(hdl);
222 	struct gcpu_chipshared *sp;
223 
224 	if (gcpu_disable || chipid >= GCPU_MAX_CHIPID)
225 		return;
226 
227 	gcpu_mca_fini(hdl);
228 
229 	/*
230 	 * Keep shared data in cache for reuse.
231 	 */
232 	sp = gcpu_shared[chipid];
233 	ASSERT(sp != NULL);
234 	atomic_dec_32(&sp->gcpus_actv_cnt);
235 
236 	if (gcpu != NULL)
237 		kmem_free(gcpu, sizeof (gcpu_data_t));
238 
239 	/* Release reference count held in gcpu_init(). */
240 	cmi_hdl_rele(hdl);
241 }
242 
243 void
244 gcpu_post_startup(cmi_hdl_t hdl)
245 {
246 	gcpu_data_t *gcpu = cmi_hdl_getcmidata(hdl);
247 
248 	if (gcpu_disable)
249 		return;
250 
251 	if (gcpu != NULL)
252 		cms_post_startup(hdl);
253 #ifdef __xpv
254 	/*
255 	 * All cpu handles are initialized so we can begin polling now.
256 	 * Furthermore, our virq mechanism requires that everything
257 	 * be run on cpu 0 so we can assure that by starting from here.
258 	 */
259 	gcpu_mca_poll_start(hdl);
260 #endif
261 }
262 
263 void
264 gcpu_post_mpstartup(cmi_hdl_t hdl)
265 {
266 	if (gcpu_disable)
267 		return;
268 
269 	cms_post_mpstartup(hdl);
270 
271 #ifndef __xpv
272 		/*
273 		 * All cpu handles are initialized only once all cpus
274 		 * are started, so we can begin polling post mp startup.
275 		 */
276 		gcpu_mca_poll_start(hdl);
277 #endif
278 }
279 
280 const char *
281 gcpu_ident(cmi_hdl_t hdl)
282 {
283 	uint_t chipid;
284 	struct gcpu_chipshared *sp;
285 
286 	if (gcpu_disable)
287 		return (NULL);
288 
289 	chipid = cmi_hdl_chipid(hdl);
290 	if (chipid >= GCPU_MAX_CHIPID)
291 		return (NULL);
292 
293 	if (cmi_hdl_getcmidata(hdl) == NULL)
294 		return (NULL);
295 
296 	sp = gcpu_shared[cmi_hdl_chipid(hdl)];
297 	return (sp->gcpus_ident);
298 }
299 
300 #ifdef __xpv
301 #define	GCPU_OP(ntvop, xpvop)	xpvop
302 #else
303 #define	GCPU_OP(ntvop, xpvop)	ntvop
304 #endif
305 
306 cmi_api_ver_t _cmi_api_version = CMI_API_VERSION_3;
307 
308 const cmi_ops_t _cmi_ops = {
309 	gcpu_init,				/* cmi_init */
310 	gcpu_post_startup,			/* cmi_post_startup */
311 	gcpu_post_mpstartup,			/* cmi_post_mpstartup */
312 	gcpu_faulted_enter,			/* cmi_faulted_enter */
313 	gcpu_faulted_exit,			/* cmi_faulted_exit */
314 	gcpu_mca_init,				/* cmi_mca_init */
315 	GCPU_OP(gcpu_mca_trap, NULL),		/* cmi_mca_trap */
316 	GCPU_OP(gcpu_cmci_trap, NULL),		/* cmi_cmci_trap */
317 	gcpu_msrinject,				/* cmi_msrinject */
318 	GCPU_OP(gcpu_hdl_poke, NULL),		/* cmi_hdl_poke */
319 	gcpu_fini,				/* cmi_fini */
320 	GCPU_OP(NULL, gcpu_xpv_panic_callback),	/* cmi_panic_callback */
321 	gcpu_ident				/* cmi_ident */
322 };
323 
324 static struct modlcpu modlcpu = {
325 	&mod_cpuops,
326 	"Generic x86 CPU Module"
327 };
328 
329 static struct modlinkage modlinkage = {
330 	MODREV_1,
331 	(void *)&modlcpu,
332 	NULL
333 };
334 
335 int
336 _init(void)
337 {
338 	return (mod_install(&modlinkage));
339 }
340 
341 int
342 _info(struct modinfo *modinfop)
343 {
344 	return (mod_info(&modlinkage, modinfop));
345 }
346 
347 int
348 _fini(void)
349 {
350 	return (mod_remove(&modlinkage));
351 }
352