1 // SPDX-License-Identifier: GPL-2.0-only
2 /*
3 * acpi_pad.c ACPI Processor Aggregator Driver
4 *
5 * Copyright (c) 2009, Intel Corporation.
6 */
7
8 #include <linux/kernel.h>
9 #include <linux/cpumask.h>
10 #include <linux/module.h>
11 #include <linux/init.h>
12 #include <linux/types.h>
13 #include <linux/kthread.h>
14 #include <uapi/linux/sched/types.h>
15 #include <linux/freezer.h>
16 #include <linux/cpu.h>
17 #include <linux/tick.h>
18 #include <linux/slab.h>
19 #include <linux/acpi.h>
20 #include <linux/perf_event.h>
21 #include <linux/platform_device.h>
22 #include <asm/mwait.h>
23 #include <xen/xen.h>
24
25 #define ACPI_PROCESSOR_AGGREGATOR_CLASS "acpi_pad"
26 #define ACPI_PROCESSOR_AGGREGATOR_DEVICE_NAME "Processor Aggregator"
27 #define ACPI_PROCESSOR_AGGREGATOR_NOTIFY 0x80
28
29 #define ACPI_PROCESSOR_AGGREGATOR_STATUS_SUCCESS 0
30 #define ACPI_PROCESSOR_AGGREGATOR_STATUS_NO_ACTION 1
31
32 static DEFINE_MUTEX(isolated_cpus_lock);
33 static DEFINE_MUTEX(round_robin_lock);
34
35 static unsigned long power_saving_mwait_eax;
36
37 static unsigned char tsc_detected_unstable;
38 static unsigned char tsc_marked_unstable;
39
power_saving_mwait_init(void)40 static void power_saving_mwait_init(void)
41 {
42 unsigned int eax, ebx, ecx, edx;
43 unsigned int highest_cstate = 0;
44 unsigned int highest_subcstate = 0;
45 int i;
46
47 if (!boot_cpu_has(X86_FEATURE_MWAIT))
48 return;
49 if (boot_cpu_data.cpuid_level < CPUID_MWAIT_LEAF)
50 return;
51
52 cpuid(CPUID_MWAIT_LEAF, &eax, &ebx, &ecx, &edx);
53
54 if (!(ecx & CPUID5_ECX_EXTENSIONS_SUPPORTED) ||
55 !(ecx & CPUID5_ECX_INTERRUPT_BREAK))
56 return;
57
58 edx >>= MWAIT_SUBSTATE_SIZE;
59 for (i = 0; i < 7 && edx; i++, edx >>= MWAIT_SUBSTATE_SIZE) {
60 if (edx & MWAIT_SUBSTATE_MASK) {
61 highest_cstate = i;
62 highest_subcstate = edx & MWAIT_SUBSTATE_MASK;
63 }
64 }
65 power_saving_mwait_eax = (highest_cstate << MWAIT_SUBSTATE_SIZE) |
66 (highest_subcstate - 1);
67
68 #if defined(CONFIG_X86)
69 switch (boot_cpu_data.x86_vendor) {
70 case X86_VENDOR_HYGON:
71 case X86_VENDOR_AMD:
72 case X86_VENDOR_INTEL:
73 case X86_VENDOR_ZHAOXIN:
74 case X86_VENDOR_CENTAUR:
75 /*
76 * AMD Fam10h TSC will tick in all
77 * C/P/S0/S1 states when this bit is set.
78 */
79 if (!boot_cpu_has(X86_FEATURE_NONSTOP_TSC))
80 tsc_detected_unstable = 1;
81 break;
82 default:
83 /* TSC could halt in idle */
84 tsc_detected_unstable = 1;
85 }
86 #endif
87 }
88
89 static unsigned long cpu_weight[NR_CPUS];
90 static int tsk_in_cpu[NR_CPUS] = {[0 ... NR_CPUS-1] = -1};
91 static DECLARE_BITMAP(pad_busy_cpus_bits, NR_CPUS);
round_robin_cpu(unsigned int tsk_index)92 static void round_robin_cpu(unsigned int tsk_index)
93 {
94 struct cpumask *pad_busy_cpus = to_cpumask(pad_busy_cpus_bits);
95 cpumask_var_t tmp;
96 int cpu;
97 unsigned long min_weight = -1;
98 unsigned long preferred_cpu;
99
100 if (!alloc_cpumask_var(&tmp, GFP_KERNEL))
101 return;
102
103 mutex_lock(&round_robin_lock);
104 cpumask_clear(tmp);
105 for_each_cpu(cpu, pad_busy_cpus)
106 cpumask_or(tmp, tmp, topology_sibling_cpumask(cpu));
107 cpumask_andnot(tmp, cpu_online_mask, tmp);
108 /* avoid HT siblings if possible */
109 if (cpumask_empty(tmp))
110 cpumask_andnot(tmp, cpu_online_mask, pad_busy_cpus);
111 if (cpumask_empty(tmp)) {
112 mutex_unlock(&round_robin_lock);
113 free_cpumask_var(tmp);
114 return;
115 }
116 for_each_cpu(cpu, tmp) {
117 if (cpu_weight[cpu] < min_weight) {
118 min_weight = cpu_weight[cpu];
119 preferred_cpu = cpu;
120 }
121 }
122
123 if (tsk_in_cpu[tsk_index] != -1)
124 cpumask_clear_cpu(tsk_in_cpu[tsk_index], pad_busy_cpus);
125 tsk_in_cpu[tsk_index] = preferred_cpu;
126 cpumask_set_cpu(preferred_cpu, pad_busy_cpus);
127 cpu_weight[preferred_cpu]++;
128 mutex_unlock(&round_robin_lock);
129
130 set_cpus_allowed_ptr(current, cpumask_of(preferred_cpu));
131
132 free_cpumask_var(tmp);
133 }
134
exit_round_robin(unsigned int tsk_index)135 static void exit_round_robin(unsigned int tsk_index)
136 {
137 struct cpumask *pad_busy_cpus = to_cpumask(pad_busy_cpus_bits);
138
139 cpumask_clear_cpu(tsk_in_cpu[tsk_index], pad_busy_cpus);
140 tsk_in_cpu[tsk_index] = -1;
141 }
142
143 static unsigned int idle_pct = 5; /* percentage */
144 static unsigned int round_robin_time = 1; /* second */
power_saving_thread(void * data)145 static int power_saving_thread(void *data)
146 {
147 int do_sleep;
148 unsigned int tsk_index = (unsigned long)data;
149 u64 last_jiffies = 0;
150
151 sched_set_fifo_low(current);
152
153 while (!kthread_should_stop()) {
154 unsigned long expire_time;
155
156 /* round robin to cpus */
157 expire_time = last_jiffies + round_robin_time * HZ;
158 if (time_before(expire_time, jiffies)) {
159 last_jiffies = jiffies;
160 round_robin_cpu(tsk_index);
161 }
162
163 do_sleep = 0;
164
165 expire_time = jiffies + HZ * (100 - idle_pct) / 100;
166
167 while (!need_resched()) {
168 if (tsc_detected_unstable && !tsc_marked_unstable) {
169 /* TSC could halt in idle, so notify users */
170 mark_tsc_unstable("TSC halts in idle");
171 tsc_marked_unstable = 1;
172 }
173 local_irq_disable();
174
175 perf_lopwr_cb(true);
176
177 tick_broadcast_enable();
178 tick_broadcast_enter();
179 stop_critical_timings();
180
181 mwait_idle_with_hints(power_saving_mwait_eax, 1);
182
183 start_critical_timings();
184 tick_broadcast_exit();
185
186 perf_lopwr_cb(false);
187
188 local_irq_enable();
189
190 if (time_before(expire_time, jiffies)) {
191 do_sleep = 1;
192 break;
193 }
194 }
195
196 /*
197 * current sched_rt has threshold for rt task running time.
198 * When a rt task uses 95% CPU time, the rt thread will be
199 * scheduled out for 5% CPU time to not starve other tasks. But
200 * the mechanism only works when all CPUs have RT task running,
201 * as if one CPU hasn't RT task, RT task from other CPUs will
202 * borrow CPU time from this CPU and cause RT task use > 95%
203 * CPU time. To make 'avoid starvation' work, takes a nap here.
204 */
205 if (unlikely(do_sleep))
206 schedule_timeout_killable(HZ * idle_pct / 100);
207
208 /* If an external event has set the need_resched flag, then
209 * we need to deal with it, or this loop will continue to
210 * spin without calling __mwait().
211 */
212 if (unlikely(need_resched()))
213 schedule();
214 }
215
216 exit_round_robin(tsk_index);
217 return 0;
218 }
219
220 static struct task_struct *ps_tsks[NR_CPUS];
221 static unsigned int ps_tsk_num;
create_power_saving_task(void)222 static int create_power_saving_task(void)
223 {
224 int rc;
225
226 ps_tsks[ps_tsk_num] = kthread_run(power_saving_thread,
227 (void *)(unsigned long)ps_tsk_num,
228 "acpi_pad/%d", ps_tsk_num);
229
230 if (IS_ERR(ps_tsks[ps_tsk_num])) {
231 rc = PTR_ERR(ps_tsks[ps_tsk_num]);
232 ps_tsks[ps_tsk_num] = NULL;
233 } else {
234 rc = 0;
235 ps_tsk_num++;
236 }
237
238 return rc;
239 }
240
destroy_power_saving_task(void)241 static void destroy_power_saving_task(void)
242 {
243 if (ps_tsk_num > 0) {
244 ps_tsk_num--;
245 kthread_stop(ps_tsks[ps_tsk_num]);
246 ps_tsks[ps_tsk_num] = NULL;
247 }
248 }
249
set_power_saving_task_num(unsigned int num)250 static void set_power_saving_task_num(unsigned int num)
251 {
252 if (num > ps_tsk_num) {
253 while (ps_tsk_num < num) {
254 if (create_power_saving_task())
255 return;
256 }
257 } else if (num < ps_tsk_num) {
258 while (ps_tsk_num > num)
259 destroy_power_saving_task();
260 }
261 }
262
acpi_pad_idle_cpus(unsigned int num_cpus)263 static void acpi_pad_idle_cpus(unsigned int num_cpus)
264 {
265 cpus_read_lock();
266
267 num_cpus = min_t(unsigned int, num_cpus, num_online_cpus());
268 set_power_saving_task_num(num_cpus);
269
270 cpus_read_unlock();
271 }
272
acpi_pad_idle_cpus_num(void)273 static uint32_t acpi_pad_idle_cpus_num(void)
274 {
275 return ps_tsk_num;
276 }
277
rrtime_store(struct device * dev,struct device_attribute * attr,const char * buf,size_t count)278 static ssize_t rrtime_store(struct device *dev,
279 struct device_attribute *attr, const char *buf, size_t count)
280 {
281 unsigned long num;
282
283 if (kstrtoul(buf, 0, &num))
284 return -EINVAL;
285 if (num < 1 || num >= 100)
286 return -EINVAL;
287 mutex_lock(&isolated_cpus_lock);
288 round_robin_time = num;
289 mutex_unlock(&isolated_cpus_lock);
290 return count;
291 }
292
rrtime_show(struct device * dev,struct device_attribute * attr,char * buf)293 static ssize_t rrtime_show(struct device *dev,
294 struct device_attribute *attr, char *buf)
295 {
296 return sysfs_emit(buf, "%d\n", round_robin_time);
297 }
298 static DEVICE_ATTR_RW(rrtime);
299
idlepct_store(struct device * dev,struct device_attribute * attr,const char * buf,size_t count)300 static ssize_t idlepct_store(struct device *dev,
301 struct device_attribute *attr, const char *buf, size_t count)
302 {
303 unsigned long num;
304
305 if (kstrtoul(buf, 0, &num))
306 return -EINVAL;
307 if (num < 1 || num >= 100)
308 return -EINVAL;
309 mutex_lock(&isolated_cpus_lock);
310 idle_pct = num;
311 mutex_unlock(&isolated_cpus_lock);
312 return count;
313 }
314
idlepct_show(struct device * dev,struct device_attribute * attr,char * buf)315 static ssize_t idlepct_show(struct device *dev,
316 struct device_attribute *attr, char *buf)
317 {
318 return sysfs_emit(buf, "%d\n", idle_pct);
319 }
320 static DEVICE_ATTR_RW(idlepct);
321
idlecpus_store(struct device * dev,struct device_attribute * attr,const char * buf,size_t count)322 static ssize_t idlecpus_store(struct device *dev,
323 struct device_attribute *attr, const char *buf, size_t count)
324 {
325 unsigned long num;
326
327 if (kstrtoul(buf, 0, &num))
328 return -EINVAL;
329 mutex_lock(&isolated_cpus_lock);
330 acpi_pad_idle_cpus(num);
331 mutex_unlock(&isolated_cpus_lock);
332 return count;
333 }
334
idlecpus_show(struct device * dev,struct device_attribute * attr,char * buf)335 static ssize_t idlecpus_show(struct device *dev,
336 struct device_attribute *attr, char *buf)
337 {
338 return cpumap_print_to_pagebuf(false, buf,
339 to_cpumask(pad_busy_cpus_bits));
340 }
341
342 static DEVICE_ATTR_RW(idlecpus);
343
344 static struct attribute *acpi_pad_attrs[] = {
345 &dev_attr_idlecpus.attr,
346 &dev_attr_idlepct.attr,
347 &dev_attr_rrtime.attr,
348 NULL
349 };
350
351 ATTRIBUTE_GROUPS(acpi_pad);
352
353 /*
354 * Query firmware how many CPUs should be idle
355 * return -1 on failure
356 */
acpi_pad_pur(acpi_handle handle)357 static int acpi_pad_pur(acpi_handle handle)
358 {
359 struct acpi_buffer buffer = {ACPI_ALLOCATE_BUFFER, NULL};
360 union acpi_object *package;
361 int num = -1;
362
363 if (ACPI_FAILURE(acpi_evaluate_object(handle, "_PUR", NULL, &buffer)))
364 return num;
365
366 if (!buffer.length || !buffer.pointer)
367 return num;
368
369 package = buffer.pointer;
370
371 if (package->type == ACPI_TYPE_PACKAGE &&
372 package->package.count == 2 &&
373 package->package.elements[0].integer.value == 1) /* rev 1 */
374
375 num = package->package.elements[1].integer.value;
376
377 kfree(buffer.pointer);
378 return num;
379 }
380
acpi_pad_handle_notify(acpi_handle handle)381 static void acpi_pad_handle_notify(acpi_handle handle)
382 {
383 int num_cpus;
384 uint32_t idle_cpus;
385 struct acpi_buffer param = {
386 .length = 4,
387 .pointer = (void *)&idle_cpus,
388 };
389 u32 status;
390
391 mutex_lock(&isolated_cpus_lock);
392 num_cpus = acpi_pad_pur(handle);
393 if (num_cpus < 0) {
394 /* The ACPI specification says that if no action was performed when
395 * processing the _PUR object, _OST should still be evaluated, albeit
396 * with a different status code.
397 */
398 status = ACPI_PROCESSOR_AGGREGATOR_STATUS_NO_ACTION;
399 } else {
400 status = ACPI_PROCESSOR_AGGREGATOR_STATUS_SUCCESS;
401 acpi_pad_idle_cpus(num_cpus);
402 }
403
404 idle_cpus = acpi_pad_idle_cpus_num();
405 acpi_evaluate_ost(handle, ACPI_PROCESSOR_AGGREGATOR_NOTIFY, status, ¶m);
406 mutex_unlock(&isolated_cpus_lock);
407 }
408
acpi_pad_notify(acpi_handle handle,u32 event,void * data)409 static void acpi_pad_notify(acpi_handle handle, u32 event,
410 void *data)
411 {
412 struct acpi_device *adev = data;
413
414 switch (event) {
415 case ACPI_PROCESSOR_AGGREGATOR_NOTIFY:
416 acpi_pad_handle_notify(handle);
417 acpi_bus_generate_netlink_event(adev->pnp.device_class,
418 dev_name(&adev->dev), event, 0);
419 break;
420 default:
421 pr_warn("Unsupported event [0x%x]\n", event);
422 break;
423 }
424 }
425
acpi_pad_probe(struct platform_device * pdev)426 static int acpi_pad_probe(struct platform_device *pdev)
427 {
428 struct acpi_device *adev = ACPI_COMPANION(&pdev->dev);
429 acpi_status status;
430
431 strcpy(acpi_device_name(adev), ACPI_PROCESSOR_AGGREGATOR_DEVICE_NAME);
432 strcpy(acpi_device_class(adev), ACPI_PROCESSOR_AGGREGATOR_CLASS);
433
434 status = acpi_install_notify_handler(adev->handle,
435 ACPI_DEVICE_NOTIFY, acpi_pad_notify, adev);
436
437 if (ACPI_FAILURE(status))
438 return -ENODEV;
439
440 return 0;
441 }
442
acpi_pad_remove(struct platform_device * pdev)443 static void acpi_pad_remove(struct platform_device *pdev)
444 {
445 struct acpi_device *adev = ACPI_COMPANION(&pdev->dev);
446
447 mutex_lock(&isolated_cpus_lock);
448 acpi_pad_idle_cpus(0);
449 mutex_unlock(&isolated_cpus_lock);
450
451 acpi_remove_notify_handler(adev->handle,
452 ACPI_DEVICE_NOTIFY, acpi_pad_notify);
453 }
454
455 static const struct acpi_device_id pad_device_ids[] = {
456 {"ACPI000C", 0},
457 {"", 0},
458 };
459 MODULE_DEVICE_TABLE(acpi, pad_device_ids);
460
461 static struct platform_driver acpi_pad_driver = {
462 .probe = acpi_pad_probe,
463 .remove_new = acpi_pad_remove,
464 .driver = {
465 .dev_groups = acpi_pad_groups,
466 .name = "processor_aggregator",
467 .acpi_match_table = pad_device_ids,
468 },
469 };
470
acpi_pad_init(void)471 static int __init acpi_pad_init(void)
472 {
473 /* Xen ACPI PAD is used when running as Xen Dom0. */
474 if (xen_initial_domain())
475 return -ENODEV;
476
477 power_saving_mwait_init();
478 if (power_saving_mwait_eax == 0)
479 return -EINVAL;
480
481 return platform_driver_register(&acpi_pad_driver);
482 }
483
acpi_pad_exit(void)484 static void __exit acpi_pad_exit(void)
485 {
486 platform_driver_unregister(&acpi_pad_driver);
487 }
488
489 module_init(acpi_pad_init);
490 module_exit(acpi_pad_exit);
491 MODULE_AUTHOR("Shaohua Li<shaohua.li@intel.com>");
492 MODULE_DESCRIPTION("ACPI Processor Aggregator Driver");
493 MODULE_LICENSE("GPL");
494