1 /*
2  * Copyright (c) 2014 The DragonFly Project.  All rights reserved.
3  *
4  * This code is derived from software contributed to The DragonFly Project
5  * by Sepherosa Ziehau <sepherosa@gmail.com>
6  *
7  * Redistribution and use in source and binary forms, with or without
8  * modification, are permitted provided that the following conditions
9  * are met:
10  *
11  * 1. Redistributions of source code must retain the above copyright
12  *    notice, this list of conditions and the following disclaimer.
13  * 2. Redistributions in binary form must reproduce the above copyright
14  *    notice, this list of conditions and the following disclaimer in
15  *    the documentation and/or other materials provided with the
16  *    distribution.
17  * 3. Neither the name of The DragonFly Project nor the names of its
18  *    contributors may be used to endorse or promote products derived
19  *    from this software without specific, prior written permission.
20  *
21  * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
22  * ``AS IS'' AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
23  * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS
24  * FOR A PARTICULAR PURPOSE ARE DISCLAIMED.  IN NO EVENT SHALL THE
25  * COPYRIGHT HOLDERS OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT,
26  * INCIDENTAL, SPECIAL, EXEMPLARY OR CONSEQUENTIAL DAMAGES (INCLUDING,
27  * BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
28  * LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED
29  * AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
30  * OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT
31  * OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
32  * SUCH DAMAGE.
33  */
34 
35 #include <sys/param.h>
36 #include <sys/kernel.h>
37 #include <sys/systm.h>
38 #include <sys/globaldata.h>
39 
40 #include <machine/md_var.h>
41 #include <machine/cpufunc.h>
42 #include <machine/cpufreq.h>
43 #include <machine/cputypes.h>
44 #include <machine/specialreg.h>
45 
46 #include "acpi.h"
47 #include "acpi_cpu_cstate.h"
48 
49 /* GAS.BitWidth */
50 #define ACPI_GAS_INTEL_VENDOR			1
51 
52 /* GAS.BitOffset */
53 #define ACPI_GAS_INTEL_CLASS_C1_IO_HALT		1
54 #define ACPI_GAS_INTEL_CLASS_CX_NATIVE		2
55 
56 /* GAS.AccessWidth */
57 #define ACPI_GAS_INTEL_ARG1_HWCOORD		0x1
58 #define ACPI_GAS_INTEL_ARG1_BM_STS		0x2
59 
60 /* GAS.Address */
61 #define ACPI_GAS_INTEL_ARG0_MWAIT_HINTMASK	0xffffffff
62 
63 static int		acpi_cst_cx_mwait_setup(struct acpi_cst_cx *);
64 static void		acpi_cst_cx_mwait_enter(const struct acpi_cst_cx *);
65 
66 int
67 acpi_cst_md_cx_setup(struct acpi_cst_cx *cx)
68 {
69 	int error;
70 
71 	if (cpu_vendor_id != CPU_VENDOR_INTEL) {
72 		/*
73 		 * No optimization for non-Intel CPUs so far.
74 		 *
75 		 * Hardware fixed resource is not supported for
76 		 * C1+ state yet.
77 		 */
78 		if (cx->type == ACPI_STATE_C1 &&
79 		    cx->gas.SpaceId == ACPI_ADR_SPACE_FIXED_HARDWARE)
80 			return 0;
81 		if (cx->gas.SpaceId != ACPI_ADR_SPACE_SYSTEM_IO &&
82 		    cx->gas.SpaceId != ACPI_ADR_SPACE_SYSTEM_MEMORY) {
83 			kprintf("C%d: invalid SpaceId %d\n", cx->type,
84 			    cx->gas.SpaceId);
85 			return EINVAL;
86 		}
87 		return 0;
88 	}
89 
90 	switch (cx->gas.SpaceId) {
91 	case ACPI_ADR_SPACE_SYSTEM_IO:
92 	case ACPI_ADR_SPACE_SYSTEM_MEMORY:
93 		break;
94 
95 	case ACPI_ADR_SPACE_FIXED_HARDWARE:
96 		error = acpi_cst_cx_mwait_setup(cx);
97 		if (error)
98 			return error;
99 		break;
100 
101 	default:
102 		kprintf("C%d: invalid SpaceId %d\n", cx->type, cx->gas.SpaceId);
103 		return EINVAL;
104 	}
105 
106 	if (cx->type >= ACPI_STATE_C3) {
107 		if ((CPUID_TO_FAMILY(cpu_id) > 0xf ||
108 		     (CPUID_TO_FAMILY(cpu_id) == 0x6 &&
109 		      CPUID_TO_MODEL(cpu_id) >= 0xf)) &&
110 		    !acpi_cst_force_bmarb) {
111 			/*
112 			 * Pentium dual-core, Core 2 and beyond do not
113 			 * need any additional activities to enter C3(+).
114 			 */
115 			cx->preamble = ACPI_CST_CX_PREAMBLE_NONE;
116 		} else if ((acpi_cst_quirks & ACPI_CST_QUIRK_NO_BM) == 0) {
117 			/*
118 			 * Intel CPUs support bus master operation for
119 			 * entering C3(+) even on MP system.
120 			 */
121 			cx->preamble = ACPI_CST_CX_PREAMBLE_BM_ARB;
122 		}
123 	}
124 	return 0;
125 }
126 
127 static int
128 acpi_cst_cx_mwait_setup(struct acpi_cst_cx *cx)
129 {
130 	uint32_t eax_hint;
131 	int error;
132 
133 	if (bootverbose) {
134 		kprintf("C%d: BitWidth(vendor) %d, BitOffset(class) %d, "
135 		    "Address(arg0) 0x%jx, AccessWidth(arg1) 0x%x\n", cx->type,
136 		    cx->gas.BitWidth, cx->gas.BitOffset,
137 		    (uintmax_t)cx->gas.Address, cx->gas.AccessWidth);
138 	}
139 
140 	if (cx->type == ACPI_STATE_C1) {
141 		/* XXX mwait */
142 		/* XXX I/O then halt */
143 		return 0;
144 	}
145 
146 	if (cx->gas.BitOffset != ACPI_GAS_INTEL_CLASS_CX_NATIVE)
147 		return EINVAL;
148 
149 	if ((cpu_feature2 & CPUID2_MON) == 0)
150 		return EOPNOTSUPP;
151 	if ((cpu_mwait_feature & (CPUID_MWAIT_EXT | CPUID_MWAIT_INTBRK)) !=
152 	    (CPUID_MWAIT_EXT | CPUID_MWAIT_INTBRK))
153 		return EOPNOTSUPP;
154 
155 	eax_hint = cx->gas.Address & ACPI_GAS_INTEL_ARG0_MWAIT_HINTMASK;
156 	if (bootverbose) {
157 		kprintf("C%d -> cpu specific C%d sub state %d\n", cx->type,
158 		    MWAIT_EAX_TO_CX(eax_hint), MWAIT_EAX_TO_CX_SUB(eax_hint));
159 	}
160 
161 	if (!cpu_mwait_hint_valid(eax_hint)) {
162 		kprintf("C%d: invalid mwait hint 0x%08x\n", cx->type, eax_hint);
163 		error = EINVAL;
164 		goto done;
165 	}
166 
167 	cx->md_arg0 = eax_hint;
168 	cx->enter = acpi_cst_cx_mwait_enter;
169 	error = 0;
170 
171 done:
172 	if ((cx->gas.AccessWidth & ACPI_GAS_INTEL_ARG1_BM_STS) == 0 &&
173 	    !acpi_cst_force_bmsts) {
174 		cpu_mwait_cx_no_bmsts();
175 		if (cx->type >= ACPI_STATE_C3)
176 			cx->flags &= ~ACPI_CST_CX_FLAG_BM_STS;
177 	}
178 
179 	if (cx->type < ACPI_STATE_C3 && MWAIT_EAX_TO_CX(eax_hint) >= 3) {
180 		/*
181 		 * If BIOS maps shallow ACPI C-state (<C3) to deep CPU
182 		 * specific C-state (>=C3), it implies no bus mastering
183 		 * operations are needed before entering deep CPU specific
184 		 * C-states.
185 		 */
186 		if (!acpi_cst_force_bmsts)
187 			cpu_mwait_cx_no_bmsts();
188 		if (!acpi_cst_force_bmarb)
189 			cpu_mwait_cx_no_bmarb();
190 	}
191 
192 	return error;
193 }
194 
195 static void
196 acpi_cst_cx_mwait_enter(const struct acpi_cst_cx *cx)
197 {
198 	struct globaldata *gd = mycpu;
199 	int reqflags;
200 
201 	reqflags = gd->gd_reqflags;
202 	if ((reqflags & RQF_IDLECHECK_WK_MASK) == 0) {
203 		cpu_mmw_pause_int(&gd->gd_reqflags, reqflags, cx->md_arg0,
204 		    MWAIT_ECX_INTBRK);
205 	}
206 }
207