xref: /openbsd/sys/arch/amd64/amd64/powernow-k8.c (revision 8188786b)
1 /*	$OpenBSD: powernow-k8.c,v 1.29 2022/02/21 08:16:08 jsg Exp $ */
2 /*
3  * Copyright (c) 2004 Martin V�giard.
4  * Copyright (c) 2004-2005 Bruno Ducrot
5  * Copyright (c) 2004 FUKUDA Nobuhiko <nfukuda@spa.is.uec.ac.jp>
6  *
7  * Redistribution and use in source and binary forms, with or without
8  * modification, are permitted provided that the following conditions
9  * are met:
10  * 1. Redistributions of source code must retain the above copyright
11  *    notice, this list of conditions and the following disclaimer.
12  * 2. Redistributions in binary form must reproduce the above copyright
13  *    notice, this list of conditions and the following disclaimer in the
14  *    documentation and/or other materials provided with the distribution.
15  *
16  * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR
17  * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES
18  * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
19  * IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT,
20  * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT
21  * NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
22  * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
23  * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
24  * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF
25  * THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
26  */
27 /* AMD POWERNOW K8 driver */
28 
29 #include <sys/param.h>
30 #include <sys/systm.h>
31 #include <sys/malloc.h>
32 #include <sys/sysctl.h>
33 
34 #include <dev/isa/isareg.h>
35 #include <amd64/include/isa_machdep.h>
36 
37 #include <machine/cpu.h>
38 #include <machine/cpufunc.h>
39 #include <machine/bus.h>
40 
41 #include "acpicpu.h"
42 
43 #if NACPICPU > 0
44 #include <dev/acpi/acpidev.h>
45 #endif
46 
47 #define BIOS_START			0xe0000
48 #define	BIOS_LEN			0x20000
49 
50 extern int setperf_prio;
51 extern int perflevel;
52 
53 /*
54  * MSRs and bits used by PowerNow technology
55  */
56 #define MSR_AMDK7_FIDVID_CTL		0xc0010041
57 #define MSR_AMDK7_FIDVID_STATUS		0xc0010042
58 
59 /* Bitfields used by K8 */
60 
61 #define PN8_CTR_FID(x)			((x) & 0x3f)
62 #define PN8_CTR_VID(x)			(((x) & 0x1f) << 8)
63 #define PN8_CTR_PENDING(x)		(((x) & 1) << 32)
64 
65 #define PN8_STA_CFID(x)			((x) & 0x3f)
66 #define PN8_STA_SFID(x)			(((x) >> 8) & 0x3f)
67 #define PN8_STA_MFID(x)			(((x) >> 16) & 0x3f)
68 #define PN8_STA_PENDING(x)		(((x) >> 31) & 0x01)
69 #define PN8_STA_CVID(x)			(((x) >> 32) & 0x1f)
70 #define PN8_STA_SVID(x)			(((x) >> 40) & 0x1f)
71 #define PN8_STA_MVID(x)			(((x) >> 48) & 0x1f)
72 
73 /* Reserved1 to PowerNow K8 configuration */
74 #define PN8_PSB_TO_RVO(x)		((x) & 0x03)
75 #define PN8_PSB_TO_IRT(x)		(((x) >> 2) & 0x03)
76 #define PN8_PSB_TO_MVS(x)		(((x) >> 4) & 0x03)
77 #define PN8_PSB_TO_BATT(x)		(((x) >> 6) & 0x03)
78 
79 /* ACPI ctr_val status register to PowerNow K8 configuration */
80 #define PN8_ACPI_CTRL_TO_FID(x)		((x) & 0x3f)
81 #define PN8_ACPI_CTRL_TO_VID(x)		(((x) >> 6) & 0x1f)
82 #define PN8_ACPI_CTRL_TO_VST(x)		(((x) >> 11) & 0x1f)
83 #define PN8_ACPI_CTRL_TO_MVS(x)		(((x) >> 18) & 0x03)
84 #define PN8_ACPI_CTRL_TO_PLL(x)		(((x) >> 20) & 0x7f)
85 #define PN8_ACPI_CTRL_TO_RVO(x)		(((x) >> 28) & 0x03)
86 #define PN8_ACPI_CTRL_TO_IRT(x)		(((x) >> 30) & 0x03)
87 
88 #define PN8_PSS_CFID(x)			((x) & 0x3f)
89 #define PN8_PSS_CVID(x)			(((x) >> 6) & 0x1f)
90 
91 #define WRITE_FIDVID(fid, vid, ctrl)	\
92 	wrmsr(MSR_AMDK7_FIDVID_CTL,	\
93 	    (((ctrl) << 32) | (1ULL << 16) | ((vid) << 8) | (fid)))
94 
95 
96 #define COUNT_OFF_IRT(irt)	DELAY(10 * (1 << (irt)))
97 #define COUNT_OFF_VST(vst)	DELAY(20 * (vst))
98 
99 #define FID_TO_VCO_FID(fid)	\
100 	(((fid) < 8) ? (8 + ((fid) << 1)) : (fid))
101 
102 #define POWERNOW_MAX_STATES		16
103 
104 struct k8pnow_state {
105 	int freq;
106 	uint8_t fid;
107 	uint8_t vid;
108 };
109 
110 struct k8pnow_cpu_state {
111 	struct k8pnow_state state_table[POWERNOW_MAX_STATES];
112 	unsigned int n_states;
113 	unsigned int sgtc;
114 	unsigned int vst;
115 	unsigned int mvs;
116 	unsigned int pll;
117 	unsigned int rvo;
118 	unsigned int irt;
119 	int low;
120 };
121 
122 struct psb_s {
123 	char signature[10];     /* AMDK7PNOW! */
124 	uint8_t version;
125 	uint8_t flags;
126 	uint16_t ttime;		/* Min Settling time */
127 	uint8_t reserved;
128 	uint8_t n_pst;
129 };
130 
131 struct pst_s {
132 	uint32_t cpuid;
133 	uint8_t pll;
134 	uint8_t fid;
135 	uint8_t vid;
136 	uint8_t n_states;
137 };
138 
139 struct k8pnow_cpu_state *k8pnow_current_state;
140 
141 int k8pnow_read_pending_wait(uint64_t *);
142 int k8pnow_decode_pst(struct k8pnow_cpu_state *, uint8_t *);
143 int k8pnow_states(struct k8pnow_cpu_state *, uint32_t, unsigned int, unsigned int);
144 void k8pnow_transition(struct k8pnow_cpu_state *e, int);
145 
146 #if NACPICPU > 0
147 int k8pnow_acpi_init(struct k8pnow_cpu_state *, uint64_t);
148 void k8pnow_acpi_pss_changed(struct acpicpu_pss *, int);
149 int k8pnow_acpi_states(struct k8pnow_cpu_state *, struct acpicpu_pss *, int,
150     uint64_t);
151 #endif
152 
153 int
k8pnow_read_pending_wait(uint64_t * status)154 k8pnow_read_pending_wait(uint64_t *status)
155 {
156 	unsigned int i = 100000;
157 
158 	while (i--) {
159 		*status = rdmsr(MSR_AMDK7_FIDVID_STATUS);
160 		if (!PN8_STA_PENDING(*status))
161 			return 0;
162 
163 	}
164 	printf("k8pnow_read_pending_wait: change pending stuck.\n");
165 	return 1;
166 }
167 
168 void
k8_powernow_setperf(int level)169 k8_powernow_setperf(int level)
170 {
171 	unsigned int i;
172 	struct k8pnow_cpu_state *cstate;
173 
174 	cstate = k8pnow_current_state;
175 
176 	i = ((level * cstate->n_states) + 1) / 101;
177 	if (i >= cstate->n_states)
178 		i = cstate->n_states - 1;
179 
180 	k8pnow_transition(cstate, i);
181 }
182 
183 void
k8pnow_transition(struct k8pnow_cpu_state * cstate,int level)184 k8pnow_transition(struct k8pnow_cpu_state *cstate, int level)
185 {
186 	uint64_t status;
187 	int cfid, cvid, fid = 0, vid = 0;
188 	int rvo;
189 	u_int val;
190 
191 	/*
192 	 * We dont do a k8pnow_read_pending_wait here, need to ensure that the
193 	 * change pending bit isn't stuck,
194 	 */
195 	status = rdmsr(MSR_AMDK7_FIDVID_STATUS);
196 	if (PN8_STA_PENDING(status))
197 		return;
198 	cfid = PN8_STA_CFID(status);
199 	cvid = PN8_STA_CVID(status);
200 
201 	fid = cstate->state_table[level].fid;
202 	vid = cstate->state_table[level].vid;
203 
204 	if (fid == cfid && vid == cvid)
205 		return;
206 
207 	/*
208 	 * Phase 1: Raise core voltage to requested VID if frequency is
209 	 * going up.
210 	 */
211 	while (cvid > vid) {
212 		val = cvid - (1 << cstate->mvs);
213 		WRITE_FIDVID(cfid, (val > 0) ? val : 0, 1ULL);
214 		if (k8pnow_read_pending_wait(&status))
215 			return;
216 		cvid = PN8_STA_CVID(status);
217 		COUNT_OFF_VST(cstate->vst);
218 	}
219 
220 	/* ... then raise to voltage + RVO (if required) */
221 	for (rvo = cstate->rvo; rvo > 0 && cvid > 0; --rvo) {
222 		/* XXX It's not clear from spec if we have to do that
223 		 * in 0.25 step or in MVS.  Therefore do it as it's done
224 		 * under Linux */
225 		WRITE_FIDVID(cfid, cvid - 1, 1ULL);
226 		if (k8pnow_read_pending_wait(&status))
227 			return;
228 		cvid = PN8_STA_CVID(status);
229 		COUNT_OFF_VST(cstate->vst);
230 	}
231 
232 	/* Phase 2: change to requested core frequency */
233 	if (cfid != fid) {
234 		int vco_fid, vco_cfid;
235 
236 		vco_fid = FID_TO_VCO_FID(fid);
237 		vco_cfid = FID_TO_VCO_FID(cfid);
238 
239 		while (abs(vco_fid - vco_cfid) > 2) {
240 			if (fid > cfid) {
241 				if (cfid > 6)
242 					val = cfid + 2;
243 				else
244 					val = FID_TO_VCO_FID(cfid) + 2;
245 			} else
246 				val = cfid - 2;
247 			WRITE_FIDVID(val, cvid, (uint64_t)cstate->pll * 1000 / 5);
248 
249 			if (k8pnow_read_pending_wait(&status))
250 				return;
251 			cfid = PN8_STA_CFID(status);
252 			COUNT_OFF_IRT(cstate->irt);
253 
254 			vco_cfid = FID_TO_VCO_FID(cfid);
255 		}
256 
257 		WRITE_FIDVID(fid, cvid, (uint64_t) cstate->pll * 1000 / 5);
258 		if (k8pnow_read_pending_wait(&status))
259 			return;
260 		cfid = PN8_STA_CFID(status);
261 		COUNT_OFF_IRT(cstate->irt);
262 	}
263 
264 	/* Phase 3: change to requested voltage */
265 	if (cvid != vid) {
266 		WRITE_FIDVID(cfid, vid, 1ULL);
267 		if (k8pnow_read_pending_wait(&status))
268 			return;
269 		cvid = PN8_STA_CVID(status);
270 		COUNT_OFF_VST(cstate->vst);
271 	}
272 
273 	if (cfid == fid || cvid == vid)
274 		cpuspeed = cstate->state_table[level].freq;
275 }
276 
277 /*
278  * Given a set of pair of fid/vid, and number of performance states,
279  * compute state_table via an insertion sort.
280  */
281 int
k8pnow_decode_pst(struct k8pnow_cpu_state * cstate,uint8_t * p)282 k8pnow_decode_pst(struct k8pnow_cpu_state *cstate, uint8_t *p)
283 {
284 	int i, j, n;
285 	struct k8pnow_state state;
286 	for (n = 0, i = 0; i < cstate->n_states; i++) {
287 		state.fid = *p++;
288 		state.vid = *p++;
289 
290 		/*
291 		 * The minimum supported frequency per the data sheet is 800MHz
292 		 * The maximum supported frequency is 5000MHz.
293 		 */
294 		state.freq = 800 + state.fid * 100;
295 		j = n;
296 		while (j > 0 && cstate->state_table[j - 1].freq > state.freq) {
297 			memcpy(&cstate->state_table[j],
298 			    &cstate->state_table[j - 1],
299 			    sizeof(struct k8pnow_state));
300 			--j;
301 		}
302 		memcpy(&cstate->state_table[j], &state,
303 		    sizeof(struct k8pnow_state));
304 		n++;
305 	}
306 	return 1;
307 }
308 
309 #if NACPICPU > 0
310 
311 int
k8pnow_acpi_states(struct k8pnow_cpu_state * cstate,struct acpicpu_pss * pss,int nstates,uint64_t status)312 k8pnow_acpi_states(struct k8pnow_cpu_state * cstate, struct acpicpu_pss * pss,
313     int nstates, uint64_t status)
314 {
315 	struct k8pnow_state state;
316 	int j, k, n;
317 	uint32_t ctrl;
318 
319 	k = -1;
320 
321 	for (n = 0; n < cstate->n_states; n++) {
322 		if ((PN8_STA_CFID(status) == PN8_PSS_CFID(pss[n].pss_status)) &&
323 		    (PN8_STA_CVID(status) == PN8_PSS_CVID(pss[n].pss_status)))
324 			k = n;
325 		ctrl = pss[n].pss_ctrl;
326 		state.fid = PN8_ACPI_CTRL_TO_FID(ctrl);
327 		state.vid = PN8_ACPI_CTRL_TO_VID(ctrl);
328 
329 		state.freq = pss[n].pss_core_freq;
330 		j = n;
331 		while (j > 0 && cstate->state_table[j - 1].freq > state.freq) {
332 			memcpy(&cstate->state_table[j],
333 			    &cstate->state_table[j - 1],
334 			    sizeof(struct k8pnow_state));
335 			--j;
336 		}
337 		memcpy(&cstate->state_table[j], &state,
338 		    sizeof(struct k8pnow_state));
339 	}
340 
341 	return k;
342 }
343 
344 void
k8pnow_acpi_pss_changed(struct acpicpu_pss * pss,int npss)345 k8pnow_acpi_pss_changed(struct acpicpu_pss * pss, int npss)
346 {
347 	int curs, needtran;
348 	struct k8pnow_cpu_state *cstate, *nstate;
349 	uint32_t ctrl;
350 	uint64_t status;
351 
352 	status = rdmsr(MSR_AMDK7_FIDVID_STATUS);
353 	cstate = k8pnow_current_state;
354 
355 	nstate = malloc(sizeof(struct k8pnow_cpu_state), M_DEVBUF, M_NOWAIT);
356 	if (!nstate)
357 		return;
358 
359 	curs = k8pnow_acpi_states(nstate, pss, npss, status);
360 	needtran = 0;
361 
362 	if (curs < 0) {
363 		/*
364 		 * Our current operating state is not among
365 		 * the ones found the new PSS.
366 		 */
367 		curs = ((perflevel * npss) + 1) / 101;
368 		if (curs >= npss)
369 			curs = npss - 1;
370 		needtran = 1;
371 	}
372 
373 	ctrl = pss[curs].pss_ctrl;
374 
375 	nstate->rvo = PN8_ACPI_CTRL_TO_RVO(ctrl);
376 	nstate->vst = PN8_ACPI_CTRL_TO_VST(ctrl);
377 	nstate->mvs = PN8_ACPI_CTRL_TO_MVS(ctrl);
378 	nstate->pll = PN8_ACPI_CTRL_TO_PLL(ctrl);
379 	nstate->irt = PN8_ACPI_CTRL_TO_IRT(ctrl);
380 	nstate->low = 0;
381 	nstate->n_states = npss;
382 
383 	if (needtran)
384 		k8pnow_transition(nstate, curs);
385 
386 	free(cstate, M_DEVBUF, sizeof(*cstate));
387 	k8pnow_current_state = nstate;
388 }
389 
390 int
k8pnow_acpi_init(struct k8pnow_cpu_state * cstate,uint64_t status)391 k8pnow_acpi_init(struct k8pnow_cpu_state * cstate, uint64_t status)
392 {
393 	int curs;
394 	uint32_t ctrl;
395 	struct acpicpu_pss *pss;
396 
397 	cstate->n_states = acpicpu_fetch_pss(&pss);
398 	if (cstate->n_states == 0)
399 		return 0;
400 	acpicpu_set_notify(k8pnow_acpi_pss_changed);
401 
402 	curs = k8pnow_acpi_states(cstate, pss, cstate->n_states, status);
403 	ctrl = pss[curs].pss_ctrl;
404 
405 	cstate->rvo = PN8_ACPI_CTRL_TO_RVO(ctrl);
406 	cstate->vst = PN8_ACPI_CTRL_TO_VST(ctrl);
407 	cstate->mvs = PN8_ACPI_CTRL_TO_MVS(ctrl);
408 	cstate->pll = PN8_ACPI_CTRL_TO_PLL(ctrl);
409 	cstate->irt = PN8_ACPI_CTRL_TO_IRT(ctrl);
410 	cstate->low = 0;
411 
412 	return 1;
413 }
414 
415 #endif /* NACPICPU */
416 
417 int
k8pnow_states(struct k8pnow_cpu_state * cstate,uint32_t cpusig,unsigned int fid,unsigned int vid)418 k8pnow_states(struct k8pnow_cpu_state *cstate, uint32_t cpusig,
419     unsigned int fid, unsigned int vid)
420 {
421 	struct psb_s *psb;
422 	struct pst_s *pst;
423 	uint8_t *p;
424 	int i;
425 
426 	for (p = (u_int8_t *)ISA_HOLE_VADDR(BIOS_START);
427 	    p < (u_int8_t *)ISA_HOLE_VADDR(BIOS_START + BIOS_LEN); p += 16) {
428 		if (memcmp(p, "AMDK7PNOW!", 10) == 0) {
429 			psb = (struct psb_s *)p;
430 			if (psb->version != 0x14)
431 				return 0;
432 
433 			cstate->vst = psb->ttime;
434 			cstate->rvo = PN8_PSB_TO_RVO(psb->reserved);
435 			cstate->irt = PN8_PSB_TO_IRT(psb->reserved);
436 			cstate->mvs = PN8_PSB_TO_MVS(psb->reserved);
437 			cstate->low = PN8_PSB_TO_BATT(psb->reserved);
438 			p+= sizeof(struct psb_s);
439 
440 			for(i = 0; i < psb->n_pst; ++i) {
441 				pst = (struct pst_s *) p;
442 
443 				cstate->pll = pst->pll;
444 				cstate->n_states = pst->n_states;
445 				if (cpusig == pst->cpuid &&
446 				    pst->fid == fid && pst->vid == vid) {
447 					return (k8pnow_decode_pst(cstate,
448 					    p+= sizeof (struct pst_s)));
449 				}
450 				p += sizeof(struct pst_s) + 2
451 				    * cstate->n_states;
452 			}
453 		}
454 	}
455 
456 	return 0;
457 
458 }
459 
460 void
k8_powernow_init(struct cpu_info * ci)461 k8_powernow_init(struct cpu_info *ci)
462 {
463 	uint64_t status;
464 	u_int maxfid, maxvid, i;
465 	u_int32_t extcpuid, dummy;
466 	struct k8pnow_cpu_state *cstate;
467 	struct k8pnow_state *state;
468 	char * techname = NULL;
469 
470 	if (setperf_prio > 1)
471 		return;
472 
473 	cstate = malloc(sizeof(struct k8pnow_cpu_state), M_DEVBUF, M_NOWAIT);
474 	if (!cstate)
475 		return;
476 
477 	cstate->n_states = 0;
478 	status = rdmsr(MSR_AMDK7_FIDVID_STATUS);
479 	maxfid = PN8_STA_MFID(status);
480 	maxvid = PN8_STA_MVID(status);
481 
482 	/*
483 	* If start FID is different to max FID, then it is a
484 	* mobile processor.  If not, it is a low powered desktop
485 	* processor.
486 	*/
487 	if (PN8_STA_SFID(status) != PN8_STA_MFID(status))
488 		techname = "PowerNow! K8";
489 	else
490 		techname = "Cool'n'Quiet K8";
491 
492 #if NACPICPU > 0
493 	/* If we have acpi check acpi first */
494 	if (!k8pnow_acpi_init(cstate, status))
495 #endif
496 	{
497 		if (!k8pnow_states(cstate, ci->ci_signature, maxfid, maxvid)) {
498 			/* Extended CPUID signature value */
499 			CPUID(0x80000001, extcpuid, dummy, dummy, dummy);
500 			k8pnow_states(cstate, extcpuid, maxfid, maxvid);
501 		}
502 	}
503 	if (cstate->n_states) {
504 		printf("%s: %s %d MHz: speeds:",
505 		    ci->ci_dev->dv_xname, techname, cpuspeed);
506 		for (i = cstate->n_states; i > 0; i--) {
507 			state = &cstate->state_table[i-1];
508 			printf(" %d", state->freq);
509 		}
510 		printf(" MHz\n");
511 		k8pnow_current_state = cstate;
512 		cpu_setperf = k8_powernow_setperf;
513 		setperf_prio = 1;
514 		return;
515 	}
516 	free(cstate, M_DEVBUF, sizeof(*cstate));
517 }
518