1 /*
2  * Copyright (c) 2009 The DragonFly Project.  All rights reserved.
3  *
4  * This code is derived from software contributed to The DragonFly Project
5  * by Sepherosa Ziehau <sepherosa@gmail.com>
6  *
7  * Redistribution and use in source and binary forms, with or without
8  * modification, are permitted provided that the following conditions
9  * are met:
10  *
11  * 1. Redistributions of source code must retain the above copyright
12  *    notice, this list of conditions and the following disclaimer.
13  * 2. Redistributions in binary form must reproduce the above copyright
14  *    notice, this list of conditions and the following disclaimer in
15  *    the documentation and/or other materials provided with the
16  *    distribution.
17  * 3. Neither the name of The DragonFly Project nor the names of its
18  *    contributors may be used to endorse or promote products derived
19  *    from this software without specific, prior written permission.
20  *
21  * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
22  * ``AS IS'' AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
23  * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS
24  * FOR A PARTICULAR PURPOSE ARE DISCLAIMED.  IN NO EVENT SHALL THE
25  * COPYRIGHT HOLDERS OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT,
26  * INCIDENTAL, SPECIAL, EXEMPLARY OR CONSEQUENTIAL DAMAGES (INCLUDING,
27  * BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
28  * LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED
29  * AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
30  * OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT
31  * OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
32  * SUCH DAMAGE.
33  */
34 
35 #include <sys/param.h>
36 #include <sys/kernel.h>
37 #include <sys/systm.h>
38 #include <sys/globaldata.h>
39 
40 #include <machine/md_var.h>
41 #include <machine/cpufunc.h>
42 #include <machine/cpufreq.h>
43 #include <machine/cputypes.h>
44 #include <machine/specialreg.h>
45 
46 #include "acpi.h"
47 #include "acpi_cpu_pstate.h"
48 
49 #define AMD_APMI_HWPSTATE		0x80
50 
51 #define AMD_MSR_PSTATE_CSR_MASK		0x7ULL
52 #define AMD1X_MSR_PSTATE_CTL		0xc0010062
53 #define AMD1X_MSR_PSTATE_ST		0xc0010063
54 
55 #define AMD_MSR_PSTATE_EN		0x8000000000000000ULL
56 
57 #define AMD1X_MSR_PSTATE_START		0xc0010064
58 #define AMD10_MSR_PSTATE_COUNT		5
59 #define AMD11_MSR_PSTATE_COUNT		8	/* starting from 11h */
60 
61 #define AMD0F_PST_CTL_FID(cval)		(((cval) >> 0)  & 0x3f)
62 #define AMD0F_PST_CTL_VID(cval)		(((cval) >> 6)  & 0x1f)
63 #define AMD0F_PST_CTL_VST(cval)		(((cval) >> 11) & 0x7f)
64 #define AMD0F_PST_CTL_MVS(cval)		(((cval) >> 18) & 0x3)
65 #define AMD0F_PST_CTL_PLLTIME(cval)	(((cval) >> 20) & 0x7f)
66 #define AMD0F_PST_CTL_RVO(cval)		(((cval) >> 28) & 0x3)
67 #define AMD0F_PST_CTL_IRT(cval)		(((cval) >> 30) & 0x3)
68 
69 #define AMD0F_PST_ST_FID(sval)		(((sval) >> 0) & 0x3f)
70 #define AMD0F_PST_ST_VID(sval)		(((sval) >> 6) & 0x3f)
71 
72 #define INTEL_MSR_MISC_ENABLE		0x1a0
73 #define INTEL_MSR_MISC_EST_EN		0x10000ULL
74 
75 #define INTEL_MSR_PERF_STATUS		0x198
76 #define INTEL_MSR_PERF_CTL		0x199
77 #define INTEL_MSR_PERF_MASK		0xffffULL
78 
79 static const struct acpi_pst_md *
80 		acpi_pst_amd_probe(void);
81 static int	acpi_pst_amd_check_csr(const struct acpi_pst_res *,
82 		    const struct acpi_pst_res *);
83 static int	acpi_pst_amd1x_check_pstates1(const struct acpi_pstate *, int,
84 		    uint32_t, uint32_t);
85 static int	acpi_pst_amd1x_check_pstates(const struct acpi_pstate *, int);
86 static int	acpi_pst_amd0f_check_pstates(const struct acpi_pstate *, int);
87 static int	acpi_pst_amd_init(const struct acpi_pst_res *,
88 		    const struct acpi_pst_res *);
89 static int	acpi_pst_amd1x_set_pstate(const struct acpi_pst_res *,
90 		    const struct acpi_pst_res *, const struct acpi_pstate *);
91 static int	acpi_pst_amd0f_set_pstate(const struct acpi_pst_res *,
92 		    const struct acpi_pst_res *, const struct acpi_pstate *);
93 static const struct acpi_pstate *
94 		acpi_pst_amd1x_get_pstate(const struct acpi_pst_res *,
95 		    const struct acpi_pstate *, int);
96 static const struct acpi_pstate *
97 		acpi_pst_amd0f_get_pstate(const struct acpi_pst_res *,
98 		    const struct acpi_pstate *, int);
99 
100 static const struct acpi_pst_md *
101 		acpi_pst_intel_probe(void);
102 static int	acpi_pst_intel_check_csr(const struct acpi_pst_res *,
103 		    const struct acpi_pst_res *);
104 static int	acpi_pst_intel_check_pstates(const struct acpi_pstate *, int);
105 static int	acpi_pst_intel_init(const struct acpi_pst_res *,
106 		    const struct acpi_pst_res *);
107 static int	acpi_pst_intel_set_pstate(const struct acpi_pst_res *,
108 		    const struct acpi_pst_res *, const struct acpi_pstate *);
109 static const struct acpi_pstate *
110 		acpi_pst_intel_get_pstate(const struct acpi_pst_res *,
111 		    const struct acpi_pstate *, int);
112 
113 static int	acpi_pst_md_gas_asz(const ACPI_GENERIC_ADDRESS *);
114 static int	acpi_pst_md_gas_verify(const ACPI_GENERIC_ADDRESS *);
115 static uint32_t	acpi_pst_md_res_read(const struct acpi_pst_res *);
116 static void	acpi_pst_md_res_write(const struct acpi_pst_res *, uint32_t);
117 
118 static const struct acpi_pst_md	acpi_pst_amd1x = {
119 	.pmd_check_csr		= acpi_pst_amd_check_csr,
120 	.pmd_check_pstates	= acpi_pst_amd1x_check_pstates,
121 	.pmd_init		= acpi_pst_amd_init,
122 	.pmd_set_pstate		= acpi_pst_amd1x_set_pstate,
123 	.pmd_get_pstate		= acpi_pst_amd1x_get_pstate
124 };
125 
126 static const struct acpi_pst_md	acpi_pst_amd0f = {
127 	.pmd_check_csr		= acpi_pst_amd_check_csr,
128 	.pmd_check_pstates	= acpi_pst_amd0f_check_pstates,
129 	.pmd_init		= acpi_pst_amd_init,
130 	.pmd_set_pstate		= acpi_pst_amd0f_set_pstate,
131 	.pmd_get_pstate		= acpi_pst_amd0f_get_pstate
132 };
133 
134 static const struct acpi_pst_md acpi_pst_intel = {
135 	.pmd_check_csr		= acpi_pst_intel_check_csr,
136 	.pmd_check_pstates	= acpi_pst_intel_check_pstates,
137 	.pmd_init		= acpi_pst_intel_init,
138 	.pmd_set_pstate		= acpi_pst_intel_set_pstate,
139 	.pmd_get_pstate		= acpi_pst_intel_get_pstate
140 };
141 
142 static int acpi_pst_stringent_check = 1;
143 TUNABLE_INT("hw.acpi.cpu.pstate.strigent_check", &acpi_pst_stringent_check);
144 
145 static int acpi_pst_amd1x_msr_pstate_count = AMD10_MSR_PSTATE_COUNT;
146 
147 const struct acpi_pst_md *
148 acpi_pst_md_probe(void)
149 {
150 	if (cpu_vendor_id == CPU_VENDOR_AMD)
151 		return acpi_pst_amd_probe();
152 	else if (cpu_vendor_id == CPU_VENDOR_INTEL)
153 		return acpi_pst_intel_probe();
154 	return NULL;
155 }
156 
157 static const struct acpi_pst_md *
158 acpi_pst_amd_probe(void)
159 {
160 	uint32_t regs[4];
161 
162 	/* Only Family >= 0fh has P-State support */
163 	if (CPUID_TO_FAMILY(cpu_id) < 0xf)
164 		return NULL;
165 
166 	/* Check whether APMI exists */
167 	if (cpu_exthigh < 0x80000007)
168 		return NULL;
169 
170 	/* Fetch APMI */
171 	do_cpuid(0x80000007, regs);
172 
173 	if (CPUID_TO_FAMILY(cpu_id) == 0xf) {		/* Family 0fh */
174 		if ((regs[3] & 0x06) == 0x06)
175 			return &acpi_pst_amd0f;
176 	} else if (CPUID_TO_FAMILY(cpu_id) >= 0x10) {	/* Family >= 10h */
177 		if (CPUID_TO_FAMILY(cpu_id) >= 0x11) {
178 			acpi_pst_amd1x_msr_pstate_count =
179 			    AMD11_MSR_PSTATE_COUNT;
180 		}
181 		if (regs[3] & 0x80)
182 			return &acpi_pst_amd1x;
183 	}
184 	return NULL;
185 }
186 
187 static int
188 acpi_pst_amd_check_csr(const struct acpi_pst_res *ctrl,
189 		       const struct acpi_pst_res *status)
190 {
191 	if (ctrl->pr_gas.SpaceId != ACPI_ADR_SPACE_FIXED_HARDWARE) {
192 		kprintf("cpu%d: Invalid P-State control register\n", mycpuid);
193 		return EINVAL;
194 	}
195 	if (status->pr_gas.SpaceId != ACPI_ADR_SPACE_FIXED_HARDWARE) {
196 		kprintf("cpu%d: Invalid P-State status register\n", mycpuid);
197 		return EINVAL;
198 	}
199 	return 0;
200 }
201 
202 static int
203 acpi_pst_amd1x_check_pstates1(const struct acpi_pstate *pstates, int npstates,
204     uint32_t msr_start, uint32_t msr_end)
205 {
206 	int i;
207 
208 	/*
209 	 * Make sure that related MSR P-State registers are enabled.
210 	 *
211 	 * NOTE:
212 	 * We don't check status register value here;
213 	 * it will not be used.
214 	 */
215 	for (i = 0; i < npstates; ++i) {
216 		uint64_t pstate;
217 		uint32_t msr;
218 
219 		msr = msr_start +
220 		      (pstates[i].st_cval & AMD_MSR_PSTATE_CSR_MASK);
221 		if (msr >= msr_end) {
222 			kprintf("cpu%d: MSR P-State register %#08x "
223 				"does not exist\n", mycpuid, msr);
224 			return EINVAL;
225 		}
226 
227 		pstate = rdmsr(msr);
228 		if ((pstate & AMD_MSR_PSTATE_EN) == 0) {
229 			kprintf("cpu%d: MSR P-State register %#08x "
230 				"is not enabled\n", mycpuid, msr);
231 			return EINVAL;
232 		}
233 	}
234 	return 0;
235 }
236 
237 static int
238 acpi_pst_amd1x_check_pstates(const struct acpi_pstate *pstates, int npstates)
239 {
240 	if (npstates > acpi_pst_amd1x_msr_pstate_count) {
241 		kprintf("cpu%d: only %d P-states are allowed\n", mycpuid,
242 		    acpi_pst_amd1x_msr_pstate_count);
243 		return EINVAL;
244 	}
245 
246 	return acpi_pst_amd1x_check_pstates1(pstates, npstates,
247 	    AMD1X_MSR_PSTATE_START,
248 	    AMD1X_MSR_PSTATE_START + acpi_pst_amd1x_msr_pstate_count);
249 }
250 
251 static int
252 acpi_pst_amd1x_set_pstate(const struct acpi_pst_res *ctrl __unused,
253 			  const struct acpi_pst_res *status __unused,
254 			  const struct acpi_pstate *pstate)
255 {
256 	uint64_t cval;
257 
258 	cval = pstate->st_cval & AMD_MSR_PSTATE_CSR_MASK;
259 	wrmsr(AMD1X_MSR_PSTATE_CTL, cval);
260 
261 	/*
262 	 * Don't check AMD1X_MSR_PSTATE_ST here, since it is
263 	 * affected by various P-State limits.
264 	 *
265 	 * For details:
266 	 * AMD Family 10h Processor BKDG Rev 3.20 (#31116)
267 	 * 2.4.2.4 P-state Transition Behavior
268 	 */
269 
270 	return 0;
271 }
272 
273 static const struct acpi_pstate *
274 acpi_pst_amd1x_get_pstate(const struct acpi_pst_res *status __unused,
275 			  const struct acpi_pstate *pstates, int npstates)
276 {
277 	uint64_t sval;
278 	int i;
279 
280 	sval = rdmsr(AMD1X_MSR_PSTATE_ST) & AMD_MSR_PSTATE_CSR_MASK;
281 	for (i = 0; i < npstates; ++i) {
282 		if ((pstates[i].st_sval & AMD_MSR_PSTATE_CSR_MASK) == sval)
283 			return &pstates[i];
284 	}
285 	return NULL;
286 }
287 
288 static int
289 acpi_pst_amd0f_check_pstates(const struct acpi_pstate *pstates, int npstates)
290 {
291 	struct amd0f_fidvid fv_max, fv_min;
292 	int i;
293 
294 	amd0f_fidvid_limit(&fv_min, &fv_max);
295 
296 	if (fv_min.fid == fv_max.fid && fv_min.vid == fv_max.vid) {
297 		kprintf("cpu%d: only one P-State is supported\n", mycpuid);
298 		if (acpi_pst_stringent_check)
299 			return EOPNOTSUPP;
300 	}
301 
302 	for (i = 0; i < npstates; ++i) {
303 		const struct acpi_pstate *p = &pstates[i];
304 		uint32_t fid, vid, mvs, rvo;
305 		int mvs_mv, rvo_mv;
306 
307 		fid = AMD0F_PST_CTL_FID(p->st_cval);
308 		vid = AMD0F_PST_CTL_VID(p->st_cval);
309 
310 		if (i == 0) {
311 			if (vid != fv_max.vid) {
312 				kprintf("cpu%d: max VID mismatch "
313 					"real %u, lim %d\n", mycpuid,
314 					vid, fv_max.vid);
315 			}
316 			if (fid != fv_max.fid) {
317 				kprintf("cpu%d: max FID mismatch "
318 					"real %u, lim %d\n", mycpuid,
319 					fid, fv_max.fid);
320 			}
321 		} else if (i == npstates - 1) {
322 			if (vid != fv_min.vid) {
323 				kprintf("cpu%d: min VID mismatch "
324 					"real %u, lim %d\n", mycpuid,
325 					vid, fv_min.vid);
326 			}
327 			if (fid != fv_min.fid) {
328 				kprintf("cpu%d: min FID mismatch "
329 					"real %u, lim %d\n", mycpuid,
330 					fid, fv_min.fid);
331 			}
332 		} else {
333 			if (fid >= fv_max.fid || fid < (fv_min.fid + 0x8)) {
334 				kprintf("cpu%d: Invalid FID %#x, "
335 					"out [%#x, %#x]\n", mycpuid, fid,
336 					fv_min.fid + 0x8, fv_max.fid);
337 				if (acpi_pst_stringent_check)
338 					return EINVAL;
339 			}
340 			if (vid < fv_max.vid || vid > fv_min.vid) {
341 				kprintf("cpu%d: Invalid VID %#x, "
342 					"in [%#x, %#x]\n", mycpuid, vid,
343 					fv_max.vid, fv_min.vid);
344 				if (acpi_pst_stringent_check)
345 					return EINVAL;
346 			}
347 		}
348 
349 		mvs = AMD0F_PST_CTL_MVS(p->st_cval);
350 		rvo = AMD0F_PST_CTL_RVO(p->st_cval);
351 
352 		/* Only 0 is allowed, i.e. 25mV stepping */
353 		if (mvs != 0) {
354 			kprintf("cpu%d: Invalid MVS %#x\n", mycpuid, mvs);
355 			return EINVAL;
356 		}
357 
358 		/* -> mV */
359 		mvs_mv = 25 * (1 << mvs);
360 		rvo_mv = 25 * rvo;
361 		if (rvo_mv % mvs_mv != 0) {
362 			kprintf("cpu%d: Invalid MVS/RVO (%#x/%#x)\n",
363 				mycpuid, mvs, rvo);
364 			return EINVAL;
365 		}
366 	}
367 	return 0;
368 }
369 
370 static int
371 acpi_pst_amd0f_set_pstate(const struct acpi_pst_res *ctrl __unused,
372 			  const struct acpi_pst_res *status __unused,
373 			  const struct acpi_pstate *pstate)
374 {
375 	struct amd0f_fidvid fv;
376 	struct amd0f_xsit xsit;
377 
378 	fv.fid = AMD0F_PST_CTL_FID(pstate->st_cval);
379 	fv.vid = AMD0F_PST_CTL_VID(pstate->st_cval);
380 
381 	xsit.rvo = AMD0F_PST_CTL_RVO(pstate->st_cval);
382 	xsit.mvs = AMD0F_PST_CTL_MVS(pstate->st_cval);
383 	xsit.vst = AMD0F_PST_CTL_VST(pstate->st_cval);
384 	xsit.pll_time = AMD0F_PST_CTL_PLLTIME(pstate->st_cval);
385 	xsit.irt = AMD0F_PST_CTL_IRT(pstate->st_cval);
386 
387 	return amd0f_set_fidvid(&fv, &xsit);
388 }
389 
390 static const struct acpi_pstate *
391 acpi_pst_amd0f_get_pstate(const struct acpi_pst_res *status __unused,
392 			  const struct acpi_pstate *pstates, int npstates)
393 {
394 	struct amd0f_fidvid fv;
395 	int error, i;
396 
397 	error = amd0f_get_fidvid(&fv);
398 	if (error)
399 		return NULL;
400 
401 	for (i = 0; i < npstates; ++i) {
402 		const struct acpi_pstate *p = &pstates[i];
403 
404 		if (fv.fid == AMD0F_PST_ST_FID(p->st_sval) &&
405 		    fv.vid == AMD0F_PST_ST_VID(p->st_sval))
406 			return p;
407 	}
408 	return NULL;
409 }
410 
411 static int
412 acpi_pst_amd_init(const struct acpi_pst_res *ctrl __unused,
413 		  const struct acpi_pst_res *status __unused)
414 {
415 	return 0;
416 }
417 
418 static const struct acpi_pst_md *
419 acpi_pst_intel_probe(void)
420 {
421 	if ((cpu_feature2 & CPUID2_EST) == 0)
422 		return NULL;
423 
424 	if (CPUID_TO_FAMILY(cpu_id) >= 0xf || CPUID_TO_FAMILY(cpu_id) == 0x6)
425 		return &acpi_pst_intel;
426 
427 	return NULL;
428 }
429 
430 static int
431 acpi_pst_intel_check_csr(const struct acpi_pst_res *ctrl,
432 			 const struct acpi_pst_res *status)
433 {
434 	int error;
435 
436 	if (ctrl->pr_gas.SpaceId != status->pr_gas.SpaceId) {
437 		kprintf("cpu%d: P-State control(%d)/status(%d) registers have "
438 			"different SpaceId", mycpuid,
439 			ctrl->pr_gas.SpaceId, status->pr_gas.SpaceId);
440 		return EINVAL;
441 	}
442 
443 	switch (ctrl->pr_gas.SpaceId) {
444 	case ACPI_ADR_SPACE_FIXED_HARDWARE:
445 		if (ctrl->pr_res != NULL || status->pr_res != NULL) {
446 			/* XXX should panic() */
447 			kprintf("cpu%d: Allocated resource for fixed hardware "
448 				"registers\n", mycpuid);
449 			return EINVAL;
450 		}
451 		break;
452 
453 	case ACPI_ADR_SPACE_SYSTEM_IO:
454 		if (ctrl->pr_res == NULL) {
455 			kprintf("cpu%d: ioport allocation failed for control "
456 				"register\n", mycpuid);
457 			return ENXIO;
458 		}
459 		error = acpi_pst_md_gas_verify(&ctrl->pr_gas);
460 		if (error) {
461 			kprintf("cpu%d: Invalid control register GAS\n",
462 				mycpuid);
463 			return error;
464 		}
465 
466 		if (status->pr_res == NULL) {
467 			kprintf("cpu%d: ioport allocation failed for status "
468 				"register\n", mycpuid);
469 			return ENXIO;
470 		}
471 		error = acpi_pst_md_gas_verify(&status->pr_gas);
472 		if (error) {
473 			kprintf("cpu%d: Invalid status register GAS\n",
474 				mycpuid);
475 			return error;
476 		}
477 		break;
478 
479 	default:
480 		kprintf("cpu%d: Invalid P-State control/status register "
481 			"SpaceId %d\n", mycpuid, ctrl->pr_gas.SpaceId);
482 		return EOPNOTSUPP;
483 	}
484 	return 0;
485 }
486 
487 static int
488 acpi_pst_intel_check_pstates(const struct acpi_pstate *pstates __unused,
489 			     int npstates __unused)
490 {
491 	return 0;
492 }
493 
494 static int
495 acpi_pst_intel_init(const struct acpi_pst_res *ctrl __unused,
496 		    const struct acpi_pst_res *status __unused)
497 {
498 	uint64_t misc_enable;
499 
500 	if (CPUID_TO_FAMILY(cpu_id) == 0xf ||
501 	    (CPUID_TO_FAMILY(cpu_id) == 0x6 && CPUID_TO_MODEL(cpu_id) < 0xd)) {
502 		/* EST enable bit is reserved in INTEL_MSR_MISC_ENABLE */
503 		return 0;
504 	}
505 
506 	misc_enable = rdmsr(INTEL_MSR_MISC_ENABLE);
507 	if ((misc_enable & INTEL_MSR_MISC_EST_EN) == 0) {
508 		misc_enable |= INTEL_MSR_MISC_EST_EN;
509 		wrmsr(INTEL_MSR_MISC_ENABLE, misc_enable);
510 
511 		misc_enable = rdmsr(INTEL_MSR_MISC_ENABLE);
512 		if ((misc_enable & INTEL_MSR_MISC_EST_EN) == 0) {
513 			kprintf("cpu%d: Can't enable EST\n", mycpuid);
514 			return EIO;
515 		}
516 	}
517 	return 0;
518 }
519 
520 static int
521 acpi_pst_intel_set_pstate(const struct acpi_pst_res *ctrl,
522 			  const struct acpi_pst_res *status __unused,
523 			  const struct acpi_pstate *pstate)
524 {
525 	if (ctrl->pr_res != NULL) {
526 		acpi_pst_md_res_write(ctrl, pstate->st_cval);
527 	} else {
528 		uint64_t ctl;
529 
530 		ctl = rdmsr(INTEL_MSR_PERF_CTL);
531 		ctl &= ~INTEL_MSR_PERF_MASK;
532 		ctl |= (pstate->st_cval & INTEL_MSR_PERF_MASK);
533 		wrmsr(INTEL_MSR_PERF_CTL, ctl);
534 	}
535 	return 0;
536 }
537 
538 static const struct acpi_pstate *
539 acpi_pst_intel_get_pstate(const struct acpi_pst_res *status,
540 			  const struct acpi_pstate *pstates, int npstates)
541 {
542 	int i;
543 
544 	if (status->pr_res != NULL) {
545 		uint32_t st;
546 
547 		st = acpi_pst_md_res_read(status);
548 		for (i = 0; i < npstates; ++i) {
549 			if (pstates[i].st_sval == st)
550 				return &pstates[i];
551 		}
552 	} else {
553 		uint64_t sval;
554 
555 		sval = rdmsr(INTEL_MSR_PERF_STATUS) & INTEL_MSR_PERF_MASK;
556 		for (i = 0; i < npstates; ++i) {
557 			if ((pstates[i].st_sval & INTEL_MSR_PERF_MASK) == sval)
558 				return &pstates[i];
559 		}
560 	}
561 	return NULL;
562 }
563 
564 static int
565 acpi_pst_md_gas_asz(const ACPI_GENERIC_ADDRESS *gas)
566 {
567 	int asz;
568 
569 	if (gas->AccessWidth != 0)
570 		asz = gas->AccessWidth;
571 	else
572 		asz = gas->BitWidth / NBBY;
573 	switch (asz) {
574 	case 1:
575 	case 2:
576 	case 4:
577 		break;
578 	default:
579 		asz = 0;
580 		break;
581 	}
582 	return asz;
583 }
584 
585 static int
586 acpi_pst_md_gas_verify(const ACPI_GENERIC_ADDRESS *gas)
587 {
588 	int reg, end, asz;
589 
590 	if (gas->BitOffset % NBBY != 0)
591 		return EINVAL;
592 
593 	end = gas->BitWidth / NBBY;
594 	reg = gas->BitOffset / NBBY;
595 
596 	if (reg >= end)
597 		return EINVAL;
598 
599 	asz = acpi_pst_md_gas_asz(gas);
600 	if (asz == 0)
601 		return EINVAL;
602 
603 	if (reg + asz > end)
604 		return EINVAL;
605 	return 0;
606 }
607 
608 static uint32_t
609 acpi_pst_md_res_read(const struct acpi_pst_res *res)
610 {
611 	int asz, reg;
612 
613 	KKASSERT(res->pr_res != NULL);
614 	asz = acpi_pst_md_gas_asz(&res->pr_gas);
615 	reg = res->pr_gas.BitOffset / NBBY;
616 
617 	switch (asz) {
618 	case 1:
619 		return bus_space_read_1(res->pr_bt, res->pr_bh, reg);
620 	case 2:
621 		return bus_space_read_2(res->pr_bt, res->pr_bh, reg);
622 	case 4:
623 		return bus_space_read_4(res->pr_bt, res->pr_bh, reg);
624 	}
625 	panic("unsupported access width %d", asz);
626 
627 	/* NEVER REACHED */
628 	return 0;
629 }
630 
631 static void
632 acpi_pst_md_res_write(const struct acpi_pst_res *res, uint32_t val)
633 {
634 	int asz, reg;
635 
636 	KKASSERT(res->pr_res != NULL);
637 	asz = acpi_pst_md_gas_asz(&res->pr_gas);
638 	reg = res->pr_gas.BitOffset / NBBY;
639 
640 	switch (asz) {
641 	case 1:
642 		bus_space_write_1(res->pr_bt, res->pr_bh, reg, val);
643 		break;
644 	case 2:
645 		bus_space_write_2(res->pr_bt, res->pr_bh, reg, val);
646 		break;
647 	case 4:
648 		bus_space_write_4(res->pr_bt, res->pr_bh, reg, val);
649 		break;
650 	default:
651 		panic("unsupported access width %d", asz);
652 	}
653 }
654