1 /*
2 * CDDL HEADER START
3 *
4 * The contents of this file are subject to the terms of the
5 * Common Development and Distribution License (the "License").
6 * You may not use this file except in compliance with the License.
7 *
8 * You can obtain a copy of the license at usr/src/OPENSOLARIS.LICENSE
9 * or http://www.opensolaris.org/os/licensing.
10 * See the License for the specific language governing permissions
11 * and limitations under the License.
12 *
13 * When distributing Covered Code, include this CDDL HEADER in each
14 * file and include the License file at usr/src/OPENSOLARIS.LICENSE.
15 * If applicable, add the following below this CDDL HEADER, with the
16 * fields enclosed by brackets "[]" replaced with your own identifying
17 * information: Portions Copyright [yyyy] [name of copyright owner]
18 *
19 * CDDL HEADER END
20 */
21
22 /*
23 * Copyright 2009 Sun Microsystems, Inc. All rights reserved.
24 * Use is subject to license terms.
25 */
26
27 #ifndef __xpv
28 #error "This file is for i86xpv only"
29 #endif
30
31 #include <sys/types.h>
32 #include <sys/mca_x86.h>
33 #include <sys/archsystm.h>
34 #include <sys/hypervisor.h>
35
36 #include "../../i86pc/cpu/generic_cpu/gcpu.h"
37
38 extern xpv_mca_panic_data_t *xpv_mca_panic_data;
39
40 mc_info_t gcpu_mce_data;
41
42 enum mctelem_direction {
43 MCTELEM_FORWARD,
44 MCTELEM_REVERSE
45 };
46
47 static uint32_t gcpu_xpv_hdl_lookupfails;
48 static uint32_t gcpu_xpv_bankhdr_found;
49 static uint32_t gcpu_xpv_spechdr_found;
50
51 static uint32_t gcpu_xpv_mca_hcall_fails[16];
52 static uint32_t gcpu_xpv_globalhdr_found;
53
54 static cmi_mca_regs_t *gcpu_xpv_bankregs;
55 size_t gcpu_xpv_bankregs_sz;
56
57 #define GCPU_XPV_ARCH_NREGS 3
58
59 void
gcpu_xpv_mca_init(int nbanks)60 gcpu_xpv_mca_init(int nbanks)
61 {
62 if (gcpu_xpv_bankregs == NULL) {
63 gcpu_xpv_bankregs_sz = nbanks * GCPU_XPV_ARCH_NREGS *
64 sizeof (cmi_mca_regs_t);
65
66 gcpu_xpv_bankregs = kmem_zalloc(gcpu_xpv_bankregs_sz, KM_SLEEP);
67 }
68 }
69
70 static void
gcpu_xpv_proxy_logout(int what,struct mc_info * mi,struct mcinfo_common ** micp,int * idxp,cmi_mca_regs_t * bankregs,size_t bankregs_sz)71 gcpu_xpv_proxy_logout(int what, struct mc_info *mi, struct mcinfo_common **micp,
72 int *idxp, cmi_mca_regs_t *bankregs, size_t bankregs_sz)
73 {
74 struct mcinfo_global *mgi = (struct mcinfo_global *)(uintptr_t)*micp;
75 struct mcinfo_common *mic;
76 struct mcinfo_bank *mib;
77 cmi_hdl_t hdl = NULL;
78 cmi_mca_regs_t *mcrp;
79 int idx = *idxp;
80 int tried = 0;
81 int j;
82
83 /* Skip over the MC_TYPE_GLOBAL record */
84 ASSERT(mgi->common.type == MC_TYPE_GLOBAL);
85 mcrp = NULL;
86 mic = x86_mcinfo_next((struct mcinfo_common *)(uintptr_t)mgi);
87 idx++;
88
89 /*
90 * Process all MC_TYPE_BANK and MC_TYPE_EXTENDED records that
91 * follow the MC_TYPE_GLOBAL record, ending when we reach any
92 * other record type or when we're out of record.
93 *
94 * We skip over MC_TYPE_EXTENDED for now - nothing consumes
95 * the extended MSR data even in native Solaris.
96 */
97 while (idx < x86_mcinfo_nentries(mi) &&
98 (mic->type == MC_TYPE_BANK || mic->type == MC_TYPE_EXTENDED)) {
99 if (mic->type == MC_TYPE_EXTENDED) {
100 gcpu_xpv_spechdr_found++;
101 goto next_record;
102 } else {
103 gcpu_xpv_bankhdr_found++;
104 }
105
106 if (hdl == NULL && !tried++) {
107 if ((hdl = cmi_hdl_lookup(CMI_HDL_SOLARIS_xVM_MCA,
108 mgi->mc_socketid, mgi->mc_coreid,
109 mgi->mc_core_threadid)) == NULL) {
110 gcpu_xpv_hdl_lookupfails++;
111 goto next_record;
112 } else {
113 bzero(bankregs, bankregs_sz);
114 mcrp = bankregs;
115 }
116 }
117
118 mib = (struct mcinfo_bank *)(uintptr_t)mic;
119
120 mcrp->cmr_msrnum = IA32_MSR_MC(mib->mc_bank, STATUS);
121 mcrp->cmr_msrval = mib->mc_status;
122 mcrp++;
123
124 mcrp->cmr_msrnum = IA32_MSR_MC(mib->mc_bank, ADDR);
125 mcrp->cmr_msrval = mib->mc_addr;
126 mcrp++;
127
128 mcrp->cmr_msrnum = IA32_MSR_MC(mib->mc_bank, MISC);
129 mcrp->cmr_msrval = mib->mc_misc;
130 mcrp++;
131
132 next_record:
133 idx++;
134 mic = x86_mcinfo_next(mic);
135 }
136
137 /*
138 * If we found some telemetry and a handle to associate it with
139 * then "forward" that telemetry into the MSR interpose layer
140 * and then request logout which will find that interposed
141 * telemetry. Indicate that logout code should clear bank
142 * status registers so that it can invalidate them in the interpose
143 * layer - they won't actually make it as far as real MSR writes.
144 */
145 if (hdl != NULL) {
146 cmi_mca_regs_t gsr;
147 gcpu_mce_status_t mce;
148
149 gsr.cmr_msrnum = IA32_MSR_MCG_STATUS;
150 gsr.cmr_msrval = mgi->mc_gstatus;
151 cmi_hdl_msrforward(hdl, &gsr, 1);
152
153 cmi_hdl_msrforward(hdl, bankregs, mcrp - bankregs);
154 gcpu_mca_logout(hdl, NULL, (uint64_t)-1, &mce, B_TRUE, what);
155 cmi_hdl_rele(hdl);
156 }
157
158 /*
159 * We must move the index on at least one record or our caller
160 * may loop forever; our initial increment over the global
161 * record assures this.
162 */
163 ASSERT(idx > *idxp);
164 *idxp = idx;
165 *micp = mic;
166 }
167
168 /*
169 * Process a struct mc_info.
170 *
171 * There are x86_mcinfo_nentries(mi) entries. An entry of type
172 * MC_TYPE_GLOBAL precedes a number (potentially zero) of
173 * entries of type MC_TYPE_BANK for telemetry from MCA banks
174 * of the resource identified in the MC_TYPE_GLOBAL entry.
175 * I think there can be multiple MC_TYPE_GLOBAL entries per buffer.
176 */
177 void
gcpu_xpv_mci_process(mc_info_t * mi,int type,cmi_mca_regs_t * bankregs,size_t bankregs_sz)178 gcpu_xpv_mci_process(mc_info_t *mi, int type,
179 cmi_mca_regs_t *bankregs, size_t bankregs_sz)
180 {
181 struct mcinfo_common *mic;
182 int idx;
183
184 mic = x86_mcinfo_first(mi);
185
186 idx = 0;
187 while (idx < x86_mcinfo_nentries(mi)) {
188 if (mic->type == MC_TYPE_GLOBAL) {
189 gcpu_xpv_globalhdr_found++;
190 gcpu_xpv_proxy_logout(type == XEN_MC_URGENT ?
191 GCPU_MPT_WHAT_MC_ERR : GCPU_MPT_WHAT_XPV_VIRQ,
192 mi, &mic, &idx, bankregs, bankregs_sz);
193 } else {
194 idx++;
195 mic = x86_mcinfo_next(mic);
196 }
197 }
198 }
199
200 int
gcpu_xpv_telem_read(mc_info_t * mci,int type,uint64_t * idp)201 gcpu_xpv_telem_read(mc_info_t *mci, int type, uint64_t *idp)
202 {
203 xen_mc_t xmc;
204 xen_mc_fetch_t *mcf = &xmc.u.mc_fetch;
205 long err;
206
207 mcf->flags = type;
208 set_xen_guest_handle(mcf->data, mci);
209
210 if ((err = HYPERVISOR_mca(XEN_MC_fetch, &xmc)) != 0) {
211 gcpu_xpv_mca_hcall_fails[err < 16 ? err : 0]++;
212 return (0);
213 }
214
215 if (mcf->flags == XEN_MC_OK) {
216 *idp = mcf->fetch_id;
217 return (1);
218 } else {
219 *idp = 0;
220 return (0);
221 }
222 }
223
224 void
gcpu_xpv_telem_ack(int type,uint64_t fetch_id)225 gcpu_xpv_telem_ack(int type, uint64_t fetch_id)
226 {
227 xen_mc_t xmc;
228 struct xen_mc_fetch *mcf = &xmc.u.mc_fetch;
229
230 mcf->flags = type | XEN_MC_ACK;
231 mcf->fetch_id = fetch_id;
232 (void) HYPERVISOR_mca(XEN_MC_fetch, &xmc);
233 }
234
235 static void
mctelem_traverse(void * head,enum mctelem_direction direction,boolean_t urgent)236 mctelem_traverse(void *head, enum mctelem_direction direction,
237 boolean_t urgent)
238 {
239 char *tep = head, **ntepp;
240 int noff = (direction == MCTELEM_FORWARD) ?
241 xpv_mca_panic_data->mpd_fwdptr_offset :
242 xpv_mca_panic_data->mpd_revptr_offset;
243
244
245 while (tep != NULL) {
246 struct mc_info **mcip = (struct mc_info **)
247 (tep + xpv_mca_panic_data->mpd_dataptr_offset);
248
249 gcpu_xpv_mci_process(*mcip,
250 urgent ? XEN_MC_URGENT : XEN_MC_NONURGENT,
251 gcpu_xpv_bankregs, gcpu_xpv_bankregs_sz);
252
253 ntepp = (char **)(tep + noff);
254 tep = *ntepp;
255 }
256 }
257
258 /*
259 * Callback made from panicsys. We may have reached panicsys from a
260 * Solaris-initiated panic or a hypervisor-initiated panic; for the
261 * latter we may not perform any hypercalls. Our task is to retrieve
262 * unprocessed MCA telemetry from the hypervisor and shovel it into
263 * errorqs for later processing during panic.
264 */
265 void
gcpu_xpv_panic_callback(void)266 gcpu_xpv_panic_callback(void)
267 {
268 if (IN_XPV_PANIC()) {
269 xpv_mca_panic_data_t *ti = xpv_mca_panic_data;
270
271 if (ti == NULL ||
272 ti->mpd_magic != MCA_PANICDATA_MAGIC ||
273 ti->mpd_version != MCA_PANICDATA_VERS)
274 return;
275
276 mctelem_traverse(ti->mpd_urgent_processing, MCTELEM_FORWARD,
277 B_TRUE);
278 mctelem_traverse(ti->mpd_urgent_dangling, MCTELEM_REVERSE,
279 B_TRUE);
280 mctelem_traverse(ti->mpd_urgent_committed, MCTELEM_REVERSE,
281 B_TRUE);
282
283 mctelem_traverse(ti->mpd_nonurgent_processing, MCTELEM_FORWARD,
284 B_FALSE);
285 mctelem_traverse(ti->mpd_nonurgent_dangling, MCTELEM_REVERSE,
286 B_FALSE);
287 mctelem_traverse(ti->mpd_nonurgent_committed, MCTELEM_REVERSE,
288 B_FALSE);
289 } else {
290 int types[] = { XEN_MC_URGENT, XEN_MC_NONURGENT };
291 uint64_t fetch_id;
292 int i;
293
294 for (i = 0; i < sizeof (types) / sizeof (types[0]); i++) {
295 while (gcpu_xpv_telem_read(&gcpu_mce_data,
296 types[i], &fetch_id)) {
297 gcpu_xpv_mci_process(&gcpu_mce_data, types[i],
298 gcpu_xpv_bankregs, gcpu_xpv_bankregs_sz);
299 gcpu_xpv_telem_ack(types[i], fetch_id);
300 }
301 }
302 }
303 }
304