1 // SPDX-License-Identifier: GPL-2.0-or-later
2 /*
3 * processor_throttling.c - Throttling submodule of the ACPI processor driver
4 *
5 * Copyright (C) 2001, 2002 Andy Grover <andrew.grover@intel.com>
6 * Copyright (C) 2001, 2002 Paul Diefenbaugh <paul.s.diefenbaugh@intel.com>
7 * Copyright (C) 2004 Dominik Brodowski <linux@brodo.de>
8 * Copyright (C) 2004 Anil S Keshavamurthy <anil.s.keshavamurthy@intel.com>
9 * - Added processor hotplug support
10 */
11
12 #include <linux/kernel.h>
13 #include <linux/module.h>
14 #include <linux/slab.h>
15 #include <linux/init.h>
16 #include <linux/sched.h>
17 #include <linux/cpufreq.h>
18 #include <linux/acpi.h>
19 #include <acpi/processor.h>
20 #include <asm/io.h>
21 #include <linux/uaccess.h>
22
23 #define PREFIX "ACPI: "
24
25 /* ignore_tpc:
26 * 0 -> acpi processor driver doesn't ignore _TPC values
27 * 1 -> acpi processor driver ignores _TPC values
28 */
29 static int ignore_tpc;
30 module_param(ignore_tpc, int, 0644);
31 MODULE_PARM_DESC(ignore_tpc, "Disable broken BIOS _TPC throttling support");
32
33 struct throttling_tstate {
34 unsigned int cpu; /* cpu nr */
35 int target_state; /* target T-state */
36 };
37
38 struct acpi_processor_throttling_arg {
39 struct acpi_processor *pr;
40 int target_state;
41 bool force;
42 };
43
44 #define THROTTLING_PRECHANGE (1)
45 #define THROTTLING_POSTCHANGE (2)
46
47 static int acpi_processor_get_throttling(struct acpi_processor *pr);
48 static int __acpi_processor_set_throttling(struct acpi_processor *pr,
49 int state, bool force, bool direct);
50
acpi_processor_update_tsd_coord(void)51 static int acpi_processor_update_tsd_coord(void)
52 {
53 int count, count_target;
54 int retval = 0;
55 unsigned int i, j;
56 cpumask_var_t covered_cpus;
57 struct acpi_processor *pr, *match_pr;
58 struct acpi_tsd_package *pdomain, *match_pdomain;
59 struct acpi_processor_throttling *pthrottling, *match_pthrottling;
60
61 if (!zalloc_cpumask_var(&covered_cpus, GFP_KERNEL))
62 return -ENOMEM;
63
64 /*
65 * Now that we have _TSD data from all CPUs, lets setup T-state
66 * coordination between all CPUs.
67 */
68 for_each_possible_cpu(i) {
69 pr = per_cpu(processors, i);
70 if (!pr)
71 continue;
72
73 /* Basic validity check for domain info */
74 pthrottling = &(pr->throttling);
75
76 /*
77 * If tsd package for one cpu is invalid, the coordination
78 * among all CPUs is thought as invalid.
79 * Maybe it is ugly.
80 */
81 if (!pthrottling->tsd_valid_flag) {
82 retval = -EINVAL;
83 break;
84 }
85 }
86 if (retval)
87 goto err_ret;
88
89 for_each_possible_cpu(i) {
90 pr = per_cpu(processors, i);
91 if (!pr)
92 continue;
93
94 if (cpumask_test_cpu(i, covered_cpus))
95 continue;
96 pthrottling = &pr->throttling;
97
98 pdomain = &(pthrottling->domain_info);
99 cpumask_set_cpu(i, pthrottling->shared_cpu_map);
100 cpumask_set_cpu(i, covered_cpus);
101 /*
102 * If the number of processor in the TSD domain is 1, it is
103 * unnecessary to parse the coordination for this CPU.
104 */
105 if (pdomain->num_processors <= 1)
106 continue;
107
108 /* Validate the Domain info */
109 count_target = pdomain->num_processors;
110 count = 1;
111
112 for_each_possible_cpu(j) {
113 if (i == j)
114 continue;
115
116 match_pr = per_cpu(processors, j);
117 if (!match_pr)
118 continue;
119
120 match_pthrottling = &(match_pr->throttling);
121 match_pdomain = &(match_pthrottling->domain_info);
122 if (match_pdomain->domain != pdomain->domain)
123 continue;
124
125 /* Here i and j are in the same domain.
126 * If two TSD packages have the same domain, they
127 * should have the same num_porcessors and
128 * coordination type. Otherwise it will be regarded
129 * as illegal.
130 */
131 if (match_pdomain->num_processors != count_target) {
132 retval = -EINVAL;
133 goto err_ret;
134 }
135
136 if (pdomain->coord_type != match_pdomain->coord_type) {
137 retval = -EINVAL;
138 goto err_ret;
139 }
140
141 cpumask_set_cpu(j, covered_cpus);
142 cpumask_set_cpu(j, pthrottling->shared_cpu_map);
143 count++;
144 }
145 for_each_possible_cpu(j) {
146 if (i == j)
147 continue;
148
149 match_pr = per_cpu(processors, j);
150 if (!match_pr)
151 continue;
152
153 match_pthrottling = &(match_pr->throttling);
154 match_pdomain = &(match_pthrottling->domain_info);
155 if (match_pdomain->domain != pdomain->domain)
156 continue;
157
158 /*
159 * If some CPUS have the same domain, they
160 * will have the same shared_cpu_map.
161 */
162 cpumask_copy(match_pthrottling->shared_cpu_map,
163 pthrottling->shared_cpu_map);
164 }
165 }
166
167 err_ret:
168 free_cpumask_var(covered_cpus);
169
170 for_each_possible_cpu(i) {
171 pr = per_cpu(processors, i);
172 if (!pr)
173 continue;
174
175 /*
176 * Assume no coordination on any error parsing domain info.
177 * The coordination type will be forced as SW_ALL.
178 */
179 if (retval) {
180 pthrottling = &(pr->throttling);
181 cpumask_clear(pthrottling->shared_cpu_map);
182 cpumask_set_cpu(i, pthrottling->shared_cpu_map);
183 pthrottling->shared_type = DOMAIN_COORD_TYPE_SW_ALL;
184 }
185 }
186
187 return retval;
188 }
189
190 /*
191 * Update the T-state coordination after the _TSD
192 * data for all cpus is obtained.
193 */
acpi_processor_throttling_init(void)194 void acpi_processor_throttling_init(void)
195 {
196 if (acpi_processor_update_tsd_coord())
197 pr_debug("Assume no T-state coordination\n");
198
199 return;
200 }
201
acpi_processor_throttling_notifier(unsigned long event,void * data)202 static int acpi_processor_throttling_notifier(unsigned long event, void *data)
203 {
204 struct throttling_tstate *p_tstate = data;
205 struct acpi_processor *pr;
206 unsigned int cpu ;
207 int target_state;
208 struct acpi_processor_limit *p_limit;
209 struct acpi_processor_throttling *p_throttling;
210
211 cpu = p_tstate->cpu;
212 pr = per_cpu(processors, cpu);
213 if (!pr) {
214 pr_debug("Invalid pr pointer\n");
215 return 0;
216 }
217 if (!pr->flags.throttling) {
218 acpi_handle_debug(pr->handle,
219 "Throttling control unsupported on CPU %d\n",
220 cpu);
221 return 0;
222 }
223 target_state = p_tstate->target_state;
224 p_throttling = &(pr->throttling);
225 switch (event) {
226 case THROTTLING_PRECHANGE:
227 /*
228 * Prechange event is used to choose one proper t-state,
229 * which meets the limits of thermal, user and _TPC.
230 */
231 p_limit = &pr->limit;
232 if (p_limit->thermal.tx > target_state)
233 target_state = p_limit->thermal.tx;
234 if (p_limit->user.tx > target_state)
235 target_state = p_limit->user.tx;
236 if (pr->throttling_platform_limit > target_state)
237 target_state = pr->throttling_platform_limit;
238 if (target_state >= p_throttling->state_count) {
239 printk(KERN_WARNING
240 "Exceed the limit of T-state \n");
241 target_state = p_throttling->state_count - 1;
242 }
243 p_tstate->target_state = target_state;
244 acpi_handle_debug(pr->handle,
245 "PreChange Event: target T-state of CPU %d is T%d\n",
246 cpu, target_state);
247 break;
248 case THROTTLING_POSTCHANGE:
249 /*
250 * Postchange event is only used to update the
251 * T-state flag of acpi_processor_throttling.
252 */
253 p_throttling->state = target_state;
254 acpi_handle_debug(pr->handle,
255 "PostChange Event: CPU %d is switched to T%d\n",
256 cpu, target_state);
257 break;
258 default:
259 printk(KERN_WARNING
260 "Unsupported Throttling notifier event\n");
261 break;
262 }
263
264 return 0;
265 }
266
267 /*
268 * _TPC - Throttling Present Capabilities
269 */
acpi_processor_get_platform_limit(struct acpi_processor * pr)270 static int acpi_processor_get_platform_limit(struct acpi_processor *pr)
271 {
272 acpi_status status = 0;
273 unsigned long long tpc = 0;
274
275 if (!pr)
276 return -EINVAL;
277
278 if (ignore_tpc)
279 goto end;
280
281 status = acpi_evaluate_integer(pr->handle, "_TPC", NULL, &tpc);
282 if (ACPI_FAILURE(status)) {
283 if (status != AE_NOT_FOUND)
284 acpi_evaluation_failure_warn(pr->handle, "_TPC", status);
285
286 return -ENODEV;
287 }
288
289 end:
290 pr->throttling_platform_limit = (int)tpc;
291 return 0;
292 }
293
acpi_processor_tstate_has_changed(struct acpi_processor * pr)294 int acpi_processor_tstate_has_changed(struct acpi_processor *pr)
295 {
296 int result = 0;
297 int throttling_limit;
298 int current_state;
299 struct acpi_processor_limit *limit;
300 int target_state;
301
302 if (ignore_tpc)
303 return 0;
304
305 result = acpi_processor_get_platform_limit(pr);
306 if (result) {
307 /* Throttling Limit is unsupported */
308 return result;
309 }
310
311 throttling_limit = pr->throttling_platform_limit;
312 if (throttling_limit >= pr->throttling.state_count) {
313 /* Uncorrect Throttling Limit */
314 return -EINVAL;
315 }
316
317 current_state = pr->throttling.state;
318 if (current_state > throttling_limit) {
319 /*
320 * The current state can meet the requirement of
321 * _TPC limit. But it is reasonable that OSPM changes
322 * t-states from high to low for better performance.
323 * Of course the limit condition of thermal
324 * and user should be considered.
325 */
326 limit = &pr->limit;
327 target_state = throttling_limit;
328 if (limit->thermal.tx > target_state)
329 target_state = limit->thermal.tx;
330 if (limit->user.tx > target_state)
331 target_state = limit->user.tx;
332 } else if (current_state == throttling_limit) {
333 /*
334 * Unnecessary to change the throttling state
335 */
336 return 0;
337 } else {
338 /*
339 * If the current state is lower than the limit of _TPC, it
340 * will be forced to switch to the throttling state defined
341 * by throttling_platfor_limit.
342 * Because the previous state meets with the limit condition
343 * of thermal and user, it is unnecessary to check it again.
344 */
345 target_state = throttling_limit;
346 }
347 return acpi_processor_set_throttling(pr, target_state, false);
348 }
349
350 /*
351 * This function is used to reevaluate whether the T-state is valid
352 * after one CPU is onlined/offlined.
353 * It is noted that it won't reevaluate the following properties for
354 * the T-state.
355 * 1. Control method.
356 * 2. the number of supported T-state
357 * 3. TSD domain
358 */
acpi_processor_reevaluate_tstate(struct acpi_processor * pr,bool is_dead)359 void acpi_processor_reevaluate_tstate(struct acpi_processor *pr,
360 bool is_dead)
361 {
362 int result = 0;
363
364 if (is_dead) {
365 /* When one CPU is offline, the T-state throttling
366 * will be invalidated.
367 */
368 pr->flags.throttling = 0;
369 return;
370 }
371 /* the following is to recheck whether the T-state is valid for
372 * the online CPU
373 */
374 if (!pr->throttling.state_count) {
375 /* If the number of T-state is invalid, it is
376 * invalidated.
377 */
378 pr->flags.throttling = 0;
379 return;
380 }
381 pr->flags.throttling = 1;
382
383 /* Disable throttling (if enabled). We'll let subsequent
384 * policy (e.g.thermal) decide to lower performance if it
385 * so chooses, but for now we'll crank up the speed.
386 */
387
388 result = acpi_processor_get_throttling(pr);
389 if (result)
390 goto end;
391
392 if (pr->throttling.state) {
393 result = acpi_processor_set_throttling(pr, 0, false);
394 if (result)
395 goto end;
396 }
397
398 end:
399 if (result)
400 pr->flags.throttling = 0;
401 }
402 /*
403 * _PTC - Processor Throttling Control (and status) register location
404 */
acpi_processor_get_throttling_control(struct acpi_processor * pr)405 static int acpi_processor_get_throttling_control(struct acpi_processor *pr)
406 {
407 int result = 0;
408 acpi_status status = 0;
409 struct acpi_buffer buffer = { ACPI_ALLOCATE_BUFFER, NULL };
410 union acpi_object *ptc = NULL;
411 union acpi_object obj = { 0 };
412 struct acpi_processor_throttling *throttling;
413
414 status = acpi_evaluate_object(pr->handle, "_PTC", NULL, &buffer);
415 if (ACPI_FAILURE(status)) {
416 if (status != AE_NOT_FOUND)
417 acpi_evaluation_failure_warn(pr->handle, "_PTC", status);
418
419 return -ENODEV;
420 }
421
422 ptc = (union acpi_object *)buffer.pointer;
423 if (!ptc || (ptc->type != ACPI_TYPE_PACKAGE)
424 || (ptc->package.count != 2)) {
425 printk(KERN_ERR PREFIX "Invalid _PTC data\n");
426 result = -EFAULT;
427 goto end;
428 }
429
430 /*
431 * control_register
432 */
433
434 obj = ptc->package.elements[0];
435
436 if ((obj.type != ACPI_TYPE_BUFFER)
437 || (obj.buffer.length < sizeof(struct acpi_ptc_register))
438 || (obj.buffer.pointer == NULL)) {
439 printk(KERN_ERR PREFIX
440 "Invalid _PTC data (control_register)\n");
441 result = -EFAULT;
442 goto end;
443 }
444 memcpy(&pr->throttling.control_register, obj.buffer.pointer,
445 sizeof(struct acpi_ptc_register));
446
447 /*
448 * status_register
449 */
450
451 obj = ptc->package.elements[1];
452
453 if ((obj.type != ACPI_TYPE_BUFFER)
454 || (obj.buffer.length < sizeof(struct acpi_ptc_register))
455 || (obj.buffer.pointer == NULL)) {
456 printk(KERN_ERR PREFIX "Invalid _PTC data (status_register)\n");
457 result = -EFAULT;
458 goto end;
459 }
460
461 memcpy(&pr->throttling.status_register, obj.buffer.pointer,
462 sizeof(struct acpi_ptc_register));
463
464 throttling = &pr->throttling;
465
466 if ((throttling->control_register.bit_width +
467 throttling->control_register.bit_offset) > 32) {
468 printk(KERN_ERR PREFIX "Invalid _PTC control register\n");
469 result = -EFAULT;
470 goto end;
471 }
472
473 if ((throttling->status_register.bit_width +
474 throttling->status_register.bit_offset) > 32) {
475 printk(KERN_ERR PREFIX "Invalid _PTC status register\n");
476 result = -EFAULT;
477 goto end;
478 }
479
480 end:
481 kfree(buffer.pointer);
482
483 return result;
484 }
485
486 /*
487 * _TSS - Throttling Supported States
488 */
acpi_processor_get_throttling_states(struct acpi_processor * pr)489 static int acpi_processor_get_throttling_states(struct acpi_processor *pr)
490 {
491 int result = 0;
492 acpi_status status = AE_OK;
493 struct acpi_buffer buffer = { ACPI_ALLOCATE_BUFFER, NULL };
494 struct acpi_buffer format = { sizeof("NNNNN"), "NNNNN" };
495 struct acpi_buffer state = { 0, NULL };
496 union acpi_object *tss = NULL;
497 int i;
498
499 status = acpi_evaluate_object(pr->handle, "_TSS", NULL, &buffer);
500 if (ACPI_FAILURE(status)) {
501 if (status != AE_NOT_FOUND)
502 acpi_evaluation_failure_warn(pr->handle, "_TSS", status);
503
504 return -ENODEV;
505 }
506
507 tss = buffer.pointer;
508 if (!tss || (tss->type != ACPI_TYPE_PACKAGE)) {
509 printk(KERN_ERR PREFIX "Invalid _TSS data\n");
510 result = -EFAULT;
511 goto end;
512 }
513
514 acpi_handle_debug(pr->handle, "Found %d throttling states\n",
515 tss->package.count);
516
517 pr->throttling.state_count = tss->package.count;
518 pr->throttling.states_tss =
519 kmalloc_array(tss->package.count,
520 sizeof(struct acpi_processor_tx_tss),
521 GFP_KERNEL);
522 if (!pr->throttling.states_tss) {
523 result = -ENOMEM;
524 goto end;
525 }
526
527 for (i = 0; i < pr->throttling.state_count; i++) {
528
529 struct acpi_processor_tx_tss *tx =
530 (struct acpi_processor_tx_tss *)&(pr->throttling.
531 states_tss[i]);
532
533 state.length = sizeof(struct acpi_processor_tx_tss);
534 state.pointer = tx;
535
536 acpi_handle_debug(pr->handle, "Extracting state %d\n", i);
537
538 status = acpi_extract_package(&(tss->package.elements[i]),
539 &format, &state);
540 if (ACPI_FAILURE(status)) {
541 acpi_handle_warn(pr->handle, "Invalid _TSS data: %s\n",
542 acpi_format_exception(status));
543 result = -EFAULT;
544 kfree(pr->throttling.states_tss);
545 goto end;
546 }
547
548 if (!tx->freqpercentage) {
549 printk(KERN_ERR PREFIX
550 "Invalid _TSS data: freq is zero\n");
551 result = -EFAULT;
552 kfree(pr->throttling.states_tss);
553 goto end;
554 }
555 }
556
557 end:
558 kfree(buffer.pointer);
559
560 return result;
561 }
562
563 /*
564 * _TSD - T-State Dependencies
565 */
acpi_processor_get_tsd(struct acpi_processor * pr)566 static int acpi_processor_get_tsd(struct acpi_processor *pr)
567 {
568 int result = 0;
569 acpi_status status = AE_OK;
570 struct acpi_buffer buffer = { ACPI_ALLOCATE_BUFFER, NULL };
571 struct acpi_buffer format = { sizeof("NNNNN"), "NNNNN" };
572 struct acpi_buffer state = { 0, NULL };
573 union acpi_object *tsd = NULL;
574 struct acpi_tsd_package *pdomain;
575 struct acpi_processor_throttling *pthrottling;
576
577 pthrottling = &pr->throttling;
578 pthrottling->tsd_valid_flag = 0;
579
580 status = acpi_evaluate_object(pr->handle, "_TSD", NULL, &buffer);
581 if (ACPI_FAILURE(status)) {
582 if (status != AE_NOT_FOUND)
583 acpi_evaluation_failure_warn(pr->handle, "_TSD", status);
584
585 return -ENODEV;
586 }
587
588 tsd = buffer.pointer;
589 if (!tsd || (tsd->type != ACPI_TYPE_PACKAGE)) {
590 printk(KERN_ERR PREFIX "Invalid _TSD data\n");
591 result = -EFAULT;
592 goto end;
593 }
594
595 if (tsd->package.count != 1) {
596 printk(KERN_ERR PREFIX "Invalid _TSD data\n");
597 result = -EFAULT;
598 goto end;
599 }
600
601 pdomain = &(pr->throttling.domain_info);
602
603 state.length = sizeof(struct acpi_tsd_package);
604 state.pointer = pdomain;
605
606 status = acpi_extract_package(&(tsd->package.elements[0]),
607 &format, &state);
608 if (ACPI_FAILURE(status)) {
609 printk(KERN_ERR PREFIX "Invalid _TSD data\n");
610 result = -EFAULT;
611 goto end;
612 }
613
614 if (pdomain->num_entries != ACPI_TSD_REV0_ENTRIES) {
615 printk(KERN_ERR PREFIX "Unknown _TSD:num_entries\n");
616 result = -EFAULT;
617 goto end;
618 }
619
620 if (pdomain->revision != ACPI_TSD_REV0_REVISION) {
621 printk(KERN_ERR PREFIX "Unknown _TSD:revision\n");
622 result = -EFAULT;
623 goto end;
624 }
625
626 pthrottling = &pr->throttling;
627 pthrottling->tsd_valid_flag = 1;
628 pthrottling->shared_type = pdomain->coord_type;
629 cpumask_set_cpu(pr->id, pthrottling->shared_cpu_map);
630 /*
631 * If the coordination type is not defined in ACPI spec,
632 * the tsd_valid_flag will be clear and coordination type
633 * will be forecd as DOMAIN_COORD_TYPE_SW_ALL.
634 */
635 if (pdomain->coord_type != DOMAIN_COORD_TYPE_SW_ALL &&
636 pdomain->coord_type != DOMAIN_COORD_TYPE_SW_ANY &&
637 pdomain->coord_type != DOMAIN_COORD_TYPE_HW_ALL) {
638 pthrottling->tsd_valid_flag = 0;
639 pthrottling->shared_type = DOMAIN_COORD_TYPE_SW_ALL;
640 }
641
642 end:
643 kfree(buffer.pointer);
644 return result;
645 }
646
647 /* --------------------------------------------------------------------------
648 Throttling Control
649 -------------------------------------------------------------------------- */
acpi_processor_get_throttling_fadt(struct acpi_processor * pr)650 static int acpi_processor_get_throttling_fadt(struct acpi_processor *pr)
651 {
652 int state = 0;
653 u32 value = 0;
654 u32 duty_mask = 0;
655 u32 duty_value = 0;
656
657 if (!pr)
658 return -EINVAL;
659
660 if (!pr->flags.throttling)
661 return -ENODEV;
662
663 /*
664 * We don't care about error returns - we just try to mark
665 * these reserved so that nobody else is confused into thinking
666 * that this region might be unused..
667 *
668 * (In particular, allocating the IO range for Cardbus)
669 */
670 request_region(pr->throttling.address, 6, "ACPI CPU throttle");
671
672 pr->throttling.state = 0;
673
674 duty_mask = pr->throttling.state_count - 1;
675
676 duty_mask <<= pr->throttling.duty_offset;
677
678 local_irq_disable();
679
680 value = inl(pr->throttling.address);
681
682 /*
683 * Compute the current throttling state when throttling is enabled
684 * (bit 4 is on).
685 */
686 if (value & 0x10) {
687 duty_value = value & duty_mask;
688 duty_value >>= pr->throttling.duty_offset;
689
690 if (duty_value)
691 state = pr->throttling.state_count - duty_value;
692 }
693
694 pr->throttling.state = state;
695
696 local_irq_enable();
697
698 acpi_handle_debug(pr->handle,
699 "Throttling state is T%d (%d%% throttling applied)\n",
700 state, pr->throttling.states[state].performance);
701
702 return 0;
703 }
704
705 #ifdef CONFIG_X86
acpi_throttling_rdmsr(u64 * value)706 static int acpi_throttling_rdmsr(u64 *value)
707 {
708 u64 msr_high, msr_low;
709 u64 msr = 0;
710 int ret = -1;
711
712 if ((this_cpu_read(cpu_info.x86_vendor) != X86_VENDOR_INTEL) ||
713 !this_cpu_has(X86_FEATURE_ACPI)) {
714 printk(KERN_ERR PREFIX
715 "HARDWARE addr space,NOT supported yet\n");
716 } else {
717 msr_low = 0;
718 msr_high = 0;
719 rdmsr_safe(MSR_IA32_THERM_CONTROL,
720 (u32 *)&msr_low , (u32 *) &msr_high);
721 msr = (msr_high << 32) | msr_low;
722 *value = (u64) msr;
723 ret = 0;
724 }
725 return ret;
726 }
727
acpi_throttling_wrmsr(u64 value)728 static int acpi_throttling_wrmsr(u64 value)
729 {
730 int ret = -1;
731 u64 msr;
732
733 if ((this_cpu_read(cpu_info.x86_vendor) != X86_VENDOR_INTEL) ||
734 !this_cpu_has(X86_FEATURE_ACPI)) {
735 printk(KERN_ERR PREFIX
736 "HARDWARE addr space,NOT supported yet\n");
737 } else {
738 msr = value;
739 wrmsr_safe(MSR_IA32_THERM_CONTROL,
740 msr & 0xffffffff, msr >> 32);
741 ret = 0;
742 }
743 return ret;
744 }
745 #else
acpi_throttling_rdmsr(u64 * value)746 static int acpi_throttling_rdmsr(u64 *value)
747 {
748 printk(KERN_ERR PREFIX
749 "HARDWARE addr space,NOT supported yet\n");
750 return -1;
751 }
752
acpi_throttling_wrmsr(u64 value)753 static int acpi_throttling_wrmsr(u64 value)
754 {
755 printk(KERN_ERR PREFIX
756 "HARDWARE addr space,NOT supported yet\n");
757 return -1;
758 }
759 #endif
760
acpi_read_throttling_status(struct acpi_processor * pr,u64 * value)761 static int acpi_read_throttling_status(struct acpi_processor *pr,
762 u64 *value)
763 {
764 u32 bit_width, bit_offset;
765 u32 ptc_value;
766 u64 ptc_mask;
767 struct acpi_processor_throttling *throttling;
768 int ret = -1;
769
770 throttling = &pr->throttling;
771 switch (throttling->status_register.space_id) {
772 case ACPI_ADR_SPACE_SYSTEM_IO:
773 bit_width = throttling->status_register.bit_width;
774 bit_offset = throttling->status_register.bit_offset;
775
776 acpi_os_read_port((acpi_io_address) throttling->status_register.
777 address, &ptc_value,
778 (u32) (bit_width + bit_offset));
779 ptc_mask = (1 << bit_width) - 1;
780 *value = (u64) ((ptc_value >> bit_offset) & ptc_mask);
781 ret = 0;
782 break;
783 case ACPI_ADR_SPACE_FIXED_HARDWARE:
784 ret = acpi_throttling_rdmsr(value);
785 break;
786 default:
787 printk(KERN_ERR PREFIX "Unknown addr space %d\n",
788 (u32) (throttling->status_register.space_id));
789 }
790 return ret;
791 }
792
acpi_write_throttling_state(struct acpi_processor * pr,u64 value)793 static int acpi_write_throttling_state(struct acpi_processor *pr,
794 u64 value)
795 {
796 u32 bit_width, bit_offset;
797 u64 ptc_value;
798 u64 ptc_mask;
799 struct acpi_processor_throttling *throttling;
800 int ret = -1;
801
802 throttling = &pr->throttling;
803 switch (throttling->control_register.space_id) {
804 case ACPI_ADR_SPACE_SYSTEM_IO:
805 bit_width = throttling->control_register.bit_width;
806 bit_offset = throttling->control_register.bit_offset;
807 ptc_mask = (1 << bit_width) - 1;
808 ptc_value = value & ptc_mask;
809
810 acpi_os_write_port((acpi_io_address) throttling->
811 control_register.address,
812 (u32) (ptc_value << bit_offset),
813 (u32) (bit_width + bit_offset));
814 ret = 0;
815 break;
816 case ACPI_ADR_SPACE_FIXED_HARDWARE:
817 ret = acpi_throttling_wrmsr(value);
818 break;
819 default:
820 printk(KERN_ERR PREFIX "Unknown addr space %d\n",
821 (u32) (throttling->control_register.space_id));
822 }
823 return ret;
824 }
825
acpi_get_throttling_state(struct acpi_processor * pr,u64 value)826 static int acpi_get_throttling_state(struct acpi_processor *pr,
827 u64 value)
828 {
829 int i;
830
831 for (i = 0; i < pr->throttling.state_count; i++) {
832 struct acpi_processor_tx_tss *tx =
833 (struct acpi_processor_tx_tss *)&(pr->throttling.
834 states_tss[i]);
835 if (tx->control == value)
836 return i;
837 }
838 return -1;
839 }
840
acpi_get_throttling_value(struct acpi_processor * pr,int state,u64 * value)841 static int acpi_get_throttling_value(struct acpi_processor *pr,
842 int state, u64 *value)
843 {
844 int ret = -1;
845
846 if (state >= 0 && state <= pr->throttling.state_count) {
847 struct acpi_processor_tx_tss *tx =
848 (struct acpi_processor_tx_tss *)&(pr->throttling.
849 states_tss[state]);
850 *value = tx->control;
851 ret = 0;
852 }
853 return ret;
854 }
855
acpi_processor_get_throttling_ptc(struct acpi_processor * pr)856 static int acpi_processor_get_throttling_ptc(struct acpi_processor *pr)
857 {
858 int state = 0;
859 int ret;
860 u64 value;
861
862 if (!pr)
863 return -EINVAL;
864
865 if (!pr->flags.throttling)
866 return -ENODEV;
867
868 pr->throttling.state = 0;
869
870 value = 0;
871 ret = acpi_read_throttling_status(pr, &value);
872 if (ret >= 0) {
873 state = acpi_get_throttling_state(pr, value);
874 if (state == -1) {
875 acpi_handle_debug(pr->handle,
876 "Invalid throttling state, reset\n");
877 state = 0;
878 ret = __acpi_processor_set_throttling(pr, state, true,
879 true);
880 if (ret)
881 return ret;
882 }
883 pr->throttling.state = state;
884 }
885
886 return 0;
887 }
888
__acpi_processor_get_throttling(void * data)889 static long __acpi_processor_get_throttling(void *data)
890 {
891 struct acpi_processor *pr = data;
892
893 return pr->throttling.acpi_processor_get_throttling(pr);
894 }
895
acpi_processor_get_throttling(struct acpi_processor * pr)896 static int acpi_processor_get_throttling(struct acpi_processor *pr)
897 {
898 if (!pr)
899 return -EINVAL;
900
901 if (!pr->flags.throttling)
902 return -ENODEV;
903
904 /*
905 * This is either called from the CPU hotplug callback of
906 * processor_driver or via the ACPI probe function. In the latter
907 * case the CPU is not guaranteed to be online. Both call sites are
908 * protected against CPU hotplug.
909 */
910 if (!cpu_online(pr->id))
911 return -ENODEV;
912
913 return call_on_cpu(pr->id, __acpi_processor_get_throttling, pr, false);
914 }
915
acpi_processor_get_fadt_info(struct acpi_processor * pr)916 static int acpi_processor_get_fadt_info(struct acpi_processor *pr)
917 {
918 int i, step;
919
920 if (!pr->throttling.address) {
921 acpi_handle_debug(pr->handle, "No throttling register\n");
922 return -EINVAL;
923 } else if (!pr->throttling.duty_width) {
924 acpi_handle_debug(pr->handle, "No throttling states\n");
925 return -EINVAL;
926 }
927 /* TBD: Support duty_cycle values that span bit 4. */
928 else if ((pr->throttling.duty_offset + pr->throttling.duty_width) > 4) {
929 printk(KERN_WARNING PREFIX "duty_cycle spans bit 4\n");
930 return -EINVAL;
931 }
932
933 pr->throttling.state_count = 1 << acpi_gbl_FADT.duty_width;
934
935 /*
936 * Compute state values. Note that throttling displays a linear power
937 * performance relationship (at 50% performance the CPU will consume
938 * 50% power). Values are in 1/10th of a percent to preserve accuracy.
939 */
940
941 step = (1000 / pr->throttling.state_count);
942
943 for (i = 0; i < pr->throttling.state_count; i++) {
944 pr->throttling.states[i].performance = 1000 - step * i;
945 pr->throttling.states[i].power = 1000 - step * i;
946 }
947 return 0;
948 }
949
acpi_processor_set_throttling_fadt(struct acpi_processor * pr,int state,bool force)950 static int acpi_processor_set_throttling_fadt(struct acpi_processor *pr,
951 int state, bool force)
952 {
953 u32 value = 0;
954 u32 duty_mask = 0;
955 u32 duty_value = 0;
956
957 if (!pr)
958 return -EINVAL;
959
960 if ((state < 0) || (state > (pr->throttling.state_count - 1)))
961 return -EINVAL;
962
963 if (!pr->flags.throttling)
964 return -ENODEV;
965
966 if (!force && (state == pr->throttling.state))
967 return 0;
968
969 if (state < pr->throttling_platform_limit)
970 return -EPERM;
971 /*
972 * Calculate the duty_value and duty_mask.
973 */
974 if (state) {
975 duty_value = pr->throttling.state_count - state;
976
977 duty_value <<= pr->throttling.duty_offset;
978
979 /* Used to clear all duty_value bits */
980 duty_mask = pr->throttling.state_count - 1;
981
982 duty_mask <<= acpi_gbl_FADT.duty_offset;
983 duty_mask = ~duty_mask;
984 }
985
986 local_irq_disable();
987
988 /*
989 * Disable throttling by writing a 0 to bit 4. Note that we must
990 * turn it off before you can change the duty_value.
991 */
992 value = inl(pr->throttling.address);
993 if (value & 0x10) {
994 value &= 0xFFFFFFEF;
995 outl(value, pr->throttling.address);
996 }
997
998 /*
999 * Write the new duty_value and then enable throttling. Note
1000 * that a state value of 0 leaves throttling disabled.
1001 */
1002 if (state) {
1003 value &= duty_mask;
1004 value |= duty_value;
1005 outl(value, pr->throttling.address);
1006
1007 value |= 0x00000010;
1008 outl(value, pr->throttling.address);
1009 }
1010
1011 pr->throttling.state = state;
1012
1013 local_irq_enable();
1014
1015 acpi_handle_debug(pr->handle,
1016 "Throttling state set to T%d (%d%%)\n", state,
1017 (pr->throttling.states[state].performance ? pr->
1018 throttling.states[state].performance / 10 : 0));
1019
1020 return 0;
1021 }
1022
acpi_processor_set_throttling_ptc(struct acpi_processor * pr,int state,bool force)1023 static int acpi_processor_set_throttling_ptc(struct acpi_processor *pr,
1024 int state, bool force)
1025 {
1026 int ret;
1027 u64 value;
1028
1029 if (!pr)
1030 return -EINVAL;
1031
1032 if ((state < 0) || (state > (pr->throttling.state_count - 1)))
1033 return -EINVAL;
1034
1035 if (!pr->flags.throttling)
1036 return -ENODEV;
1037
1038 if (!force && (state == pr->throttling.state))
1039 return 0;
1040
1041 if (state < pr->throttling_platform_limit)
1042 return -EPERM;
1043
1044 value = 0;
1045 ret = acpi_get_throttling_value(pr, state, &value);
1046 if (ret >= 0) {
1047 acpi_write_throttling_state(pr, value);
1048 pr->throttling.state = state;
1049 }
1050
1051 return 0;
1052 }
1053
acpi_processor_throttling_fn(void * data)1054 static long acpi_processor_throttling_fn(void *data)
1055 {
1056 struct acpi_processor_throttling_arg *arg = data;
1057 struct acpi_processor *pr = arg->pr;
1058
1059 return pr->throttling.acpi_processor_set_throttling(pr,
1060 arg->target_state, arg->force);
1061 }
1062
__acpi_processor_set_throttling(struct acpi_processor * pr,int state,bool force,bool direct)1063 static int __acpi_processor_set_throttling(struct acpi_processor *pr,
1064 int state, bool force, bool direct)
1065 {
1066 int ret = 0;
1067 unsigned int i;
1068 struct acpi_processor *match_pr;
1069 struct acpi_processor_throttling *p_throttling;
1070 struct acpi_processor_throttling_arg arg;
1071 struct throttling_tstate t_state;
1072
1073 if (!pr)
1074 return -EINVAL;
1075
1076 if (!pr->flags.throttling)
1077 return -ENODEV;
1078
1079 if ((state < 0) || (state > (pr->throttling.state_count - 1)))
1080 return -EINVAL;
1081
1082 if (cpu_is_offline(pr->id)) {
1083 /*
1084 * the cpu pointed by pr->id is offline. Unnecessary to change
1085 * the throttling state any more.
1086 */
1087 return -ENODEV;
1088 }
1089
1090 t_state.target_state = state;
1091 p_throttling = &(pr->throttling);
1092
1093 /*
1094 * The throttling notifier will be called for every
1095 * affected cpu in order to get one proper T-state.
1096 * The notifier event is THROTTLING_PRECHANGE.
1097 */
1098 for_each_cpu_and(i, cpu_online_mask, p_throttling->shared_cpu_map) {
1099 t_state.cpu = i;
1100 acpi_processor_throttling_notifier(THROTTLING_PRECHANGE,
1101 &t_state);
1102 }
1103 /*
1104 * The function of acpi_processor_set_throttling will be called
1105 * to switch T-state. If the coordination type is SW_ALL or HW_ALL,
1106 * it is necessary to call it for every affected cpu. Otherwise
1107 * it can be called only for the cpu pointed by pr.
1108 */
1109 if (p_throttling->shared_type == DOMAIN_COORD_TYPE_SW_ANY) {
1110 arg.pr = pr;
1111 arg.target_state = state;
1112 arg.force = force;
1113 ret = call_on_cpu(pr->id, acpi_processor_throttling_fn, &arg,
1114 direct);
1115 } else {
1116 /*
1117 * When the T-state coordination is SW_ALL or HW_ALL,
1118 * it is necessary to set T-state for every affected
1119 * cpus.
1120 */
1121 for_each_cpu_and(i, cpu_online_mask,
1122 p_throttling->shared_cpu_map) {
1123 match_pr = per_cpu(processors, i);
1124 /*
1125 * If the pointer is invalid, we will report the
1126 * error message and continue.
1127 */
1128 if (!match_pr) {
1129 acpi_handle_debug(pr->handle,
1130 "Invalid Pointer for CPU %d\n", i);
1131 continue;
1132 }
1133 /*
1134 * If the throttling control is unsupported on CPU i,
1135 * we will report the error message and continue.
1136 */
1137 if (!match_pr->flags.throttling) {
1138 acpi_handle_debug(pr->handle,
1139 "Throttling Control unsupported on CPU %d\n", i);
1140 continue;
1141 }
1142
1143 arg.pr = match_pr;
1144 arg.target_state = state;
1145 arg.force = force;
1146 ret = call_on_cpu(pr->id, acpi_processor_throttling_fn,
1147 &arg, direct);
1148 }
1149 }
1150 /*
1151 * After the set_throttling is called, the
1152 * throttling notifier is called for every
1153 * affected cpu to update the T-states.
1154 * The notifier event is THROTTLING_POSTCHANGE
1155 */
1156 for_each_cpu_and(i, cpu_online_mask, p_throttling->shared_cpu_map) {
1157 t_state.cpu = i;
1158 acpi_processor_throttling_notifier(THROTTLING_POSTCHANGE,
1159 &t_state);
1160 }
1161
1162 return ret;
1163 }
1164
acpi_processor_set_throttling(struct acpi_processor * pr,int state,bool force)1165 int acpi_processor_set_throttling(struct acpi_processor *pr, int state,
1166 bool force)
1167 {
1168 return __acpi_processor_set_throttling(pr, state, force, false);
1169 }
1170
acpi_processor_get_throttling_info(struct acpi_processor * pr)1171 int acpi_processor_get_throttling_info(struct acpi_processor *pr)
1172 {
1173 int result = 0;
1174 struct acpi_processor_throttling *pthrottling;
1175
1176 acpi_handle_debug(pr->handle,
1177 "pblk_address[0x%08x] duty_offset[%d] duty_width[%d]\n",
1178 pr->throttling.address,
1179 pr->throttling.duty_offset,
1180 pr->throttling.duty_width);
1181
1182 /*
1183 * Evaluate _PTC, _TSS and _TPC
1184 * They must all be present or none of them can be used.
1185 */
1186 if (acpi_processor_get_throttling_control(pr) ||
1187 acpi_processor_get_throttling_states(pr) ||
1188 acpi_processor_get_platform_limit(pr))
1189 {
1190 pr->throttling.acpi_processor_get_throttling =
1191 &acpi_processor_get_throttling_fadt;
1192 pr->throttling.acpi_processor_set_throttling =
1193 &acpi_processor_set_throttling_fadt;
1194 if (acpi_processor_get_fadt_info(pr))
1195 return 0;
1196 } else {
1197 pr->throttling.acpi_processor_get_throttling =
1198 &acpi_processor_get_throttling_ptc;
1199 pr->throttling.acpi_processor_set_throttling =
1200 &acpi_processor_set_throttling_ptc;
1201 }
1202
1203 /*
1204 * If TSD package for one CPU can't be parsed successfully, it means
1205 * that this CPU will have no coordination with other CPUs.
1206 */
1207 if (acpi_processor_get_tsd(pr)) {
1208 pthrottling = &pr->throttling;
1209 pthrottling->tsd_valid_flag = 0;
1210 cpumask_set_cpu(pr->id, pthrottling->shared_cpu_map);
1211 pthrottling->shared_type = DOMAIN_COORD_TYPE_SW_ALL;
1212 }
1213
1214 /*
1215 * PIIX4 Errata: We don't support throttling on the original PIIX4.
1216 * This shouldn't be an issue as few (if any) mobile systems ever
1217 * used this part.
1218 */
1219 if (errata.piix4.throttle) {
1220 acpi_handle_debug(pr->handle,
1221 "Throttling not supported on PIIX4 A- or B-step\n");
1222 return 0;
1223 }
1224
1225 acpi_handle_debug(pr->handle, "Found %d throttling states\n",
1226 pr->throttling.state_count);
1227
1228 pr->flags.throttling = 1;
1229
1230 /*
1231 * Disable throttling (if enabled). We'll let subsequent policy (e.g.
1232 * thermal) decide to lower performance if it so chooses, but for now
1233 * we'll crank up the speed.
1234 */
1235
1236 result = acpi_processor_get_throttling(pr);
1237 if (result)
1238 goto end;
1239
1240 if (pr->throttling.state) {
1241 acpi_handle_debug(pr->handle,
1242 "Disabling throttling (was T%d)\n",
1243 pr->throttling.state);
1244 result = acpi_processor_set_throttling(pr, 0, false);
1245 if (result)
1246 goto end;
1247 }
1248
1249 end:
1250 if (result)
1251 pr->flags.throttling = 0;
1252
1253 return result;
1254 }
1255
1256