1 // SPDX-License-Identifier: GPL-2.0-only
2 /*
3  * Copyright 2020 Linaro Limited
4  *
5  * Author: Daniel Lezcano <daniel.lezcano@linaro.org>
6  *
7  * The DTPM CPU is based on the energy model. It hooks the CPU in the
8  * DTPM tree which in turns update the power number by propagating the
9  * power number from the CPU energy model information to the parents.
10  *
11  * The association between the power and the performance state, allows
12  * to set the power of the CPU at the OPP granularity.
13  *
14  * The CPU hotplug is supported and the power numbers will be updated
15  * if a CPU is hot plugged / unplugged.
16  */
17 #include <linux/cpumask.h>
18 #include <linux/cpufreq.h>
19 #include <linux/cpuhotplug.h>
20 #include <linux/dtpm.h>
21 #include <linux/energy_model.h>
22 #include <linux/pm_qos.h>
23 #include <linux/slab.h>
24 #include <linux/units.h>
25 
26 static struct dtpm *__parent;
27 
28 static DEFINE_PER_CPU(struct dtpm *, dtpm_per_cpu);
29 
30 struct dtpm_cpu {
31 	struct freq_qos_request qos_req;
32 	int cpu;
33 };
34 
35 /*
36  * When a new CPU is inserted at hotplug or boot time, add the power
37  * contribution and update the dtpm tree.
38  */
power_add(struct dtpm * dtpm,struct em_perf_domain * em)39 static int power_add(struct dtpm *dtpm, struct em_perf_domain *em)
40 {
41 	u64 power_min, power_max;
42 
43 	power_min = em->table[0].power;
44 	power_min *= MICROWATT_PER_MILLIWATT;
45 	power_min += dtpm->power_min;
46 
47 	power_max = em->table[em->nr_perf_states - 1].power;
48 	power_max *= MICROWATT_PER_MILLIWATT;
49 	power_max += dtpm->power_max;
50 
51 	return dtpm_update_power(dtpm, power_min, power_max);
52 }
53 
54 /*
55  * When a CPU is unplugged, remove its power contribution from the
56  * dtpm tree.
57  */
power_sub(struct dtpm * dtpm,struct em_perf_domain * em)58 static int power_sub(struct dtpm *dtpm, struct em_perf_domain *em)
59 {
60 	u64 power_min, power_max;
61 
62 	power_min = em->table[0].power;
63 	power_min *= MICROWATT_PER_MILLIWATT;
64 	power_min = dtpm->power_min - power_min;
65 
66 	power_max = em->table[em->nr_perf_states - 1].power;
67 	power_max *= MICROWATT_PER_MILLIWATT;
68 	power_max = dtpm->power_max - power_max;
69 
70 	return dtpm_update_power(dtpm, power_min, power_max);
71 }
72 
set_pd_power_limit(struct dtpm * dtpm,u64 power_limit)73 static u64 set_pd_power_limit(struct dtpm *dtpm, u64 power_limit)
74 {
75 	struct dtpm_cpu *dtpm_cpu = dtpm->private;
76 	struct em_perf_domain *pd;
77 	struct cpumask cpus;
78 	unsigned long freq;
79 	u64 power;
80 	int i, nr_cpus;
81 
82 	pd = em_cpu_get(dtpm_cpu->cpu);
83 
84 	cpumask_and(&cpus, cpu_online_mask, to_cpumask(pd->cpus));
85 
86 	nr_cpus = cpumask_weight(&cpus);
87 
88 	for (i = 0; i < pd->nr_perf_states; i++) {
89 
90 		power = pd->table[i].power * MICROWATT_PER_MILLIWATT * nr_cpus;
91 
92 		if (power > power_limit)
93 			break;
94 	}
95 
96 	freq = pd->table[i - 1].frequency;
97 
98 	freq_qos_update_request(&dtpm_cpu->qos_req, freq);
99 
100 	power_limit = pd->table[i - 1].power *
101 		MICROWATT_PER_MILLIWATT * nr_cpus;
102 
103 	return power_limit;
104 }
105 
get_pd_power_uw(struct dtpm * dtpm)106 static u64 get_pd_power_uw(struct dtpm *dtpm)
107 {
108 	struct dtpm_cpu *dtpm_cpu = dtpm->private;
109 	struct em_perf_domain *pd;
110 	struct cpumask cpus;
111 	unsigned long freq;
112 	int i, nr_cpus;
113 
114 	pd = em_cpu_get(dtpm_cpu->cpu);
115 	freq = cpufreq_quick_get(dtpm_cpu->cpu);
116 	cpumask_and(&cpus, cpu_online_mask, to_cpumask(pd->cpus));
117 	nr_cpus = cpumask_weight(&cpus);
118 
119 	for (i = 0; i < pd->nr_perf_states; i++) {
120 
121 		if (pd->table[i].frequency < freq)
122 			continue;
123 
124 		return pd->table[i].power *
125 			MICROWATT_PER_MILLIWATT * nr_cpus;
126 	}
127 
128 	return 0;
129 }
130 
pd_release(struct dtpm * dtpm)131 static void pd_release(struct dtpm *dtpm)
132 {
133 	struct dtpm_cpu *dtpm_cpu = dtpm->private;
134 
135 	if (freq_qos_request_active(&dtpm_cpu->qos_req))
136 		freq_qos_remove_request(&dtpm_cpu->qos_req);
137 
138 	kfree(dtpm_cpu);
139 }
140 
141 static struct dtpm_ops dtpm_ops = {
142 	.set_power_uw = set_pd_power_limit,
143 	.get_power_uw = get_pd_power_uw,
144 	.release = pd_release,
145 };
146 
cpuhp_dtpm_cpu_offline(unsigned int cpu)147 static int cpuhp_dtpm_cpu_offline(unsigned int cpu)
148 {
149 	struct cpufreq_policy *policy;
150 	struct em_perf_domain *pd;
151 	struct dtpm *dtpm;
152 
153 	policy = cpufreq_cpu_get(cpu);
154 
155 	if (!policy)
156 		return 0;
157 
158 	pd = em_cpu_get(cpu);
159 	if (!pd)
160 		return -EINVAL;
161 
162 	dtpm = per_cpu(dtpm_per_cpu, cpu);
163 
164 	power_sub(dtpm, pd);
165 
166 	if (cpumask_weight(policy->cpus) != 1)
167 		return 0;
168 
169 	for_each_cpu(cpu, policy->related_cpus)
170 		per_cpu(dtpm_per_cpu, cpu) = NULL;
171 
172 	dtpm_unregister(dtpm);
173 
174 	return 0;
175 }
176 
cpuhp_dtpm_cpu_online(unsigned int cpu)177 static int cpuhp_dtpm_cpu_online(unsigned int cpu)
178 {
179 	struct dtpm *dtpm;
180 	struct dtpm_cpu *dtpm_cpu;
181 	struct cpufreq_policy *policy;
182 	struct em_perf_domain *pd;
183 	char name[CPUFREQ_NAME_LEN];
184 	int ret = -ENOMEM;
185 
186 	policy = cpufreq_cpu_get(cpu);
187 
188 	if (!policy)
189 		return 0;
190 
191 	pd = em_cpu_get(cpu);
192 	if (!pd)
193 		return -EINVAL;
194 
195 	dtpm = per_cpu(dtpm_per_cpu, cpu);
196 	if (dtpm)
197 		return power_add(dtpm, pd);
198 
199 	dtpm = dtpm_alloc(&dtpm_ops);
200 	if (!dtpm)
201 		return -EINVAL;
202 
203 	dtpm_cpu = kzalloc(sizeof(*dtpm_cpu), GFP_KERNEL);
204 	if (!dtpm_cpu)
205 		goto out_kfree_dtpm;
206 
207 	dtpm->private = dtpm_cpu;
208 	dtpm_cpu->cpu = cpu;
209 
210 	for_each_cpu(cpu, policy->related_cpus)
211 		per_cpu(dtpm_per_cpu, cpu) = dtpm;
212 
213 	sprintf(name, "cpu%d", dtpm_cpu->cpu);
214 
215 	ret = dtpm_register(name, dtpm, __parent);
216 	if (ret)
217 		goto out_kfree_dtpm_cpu;
218 
219 	ret = power_add(dtpm, pd);
220 	if (ret)
221 		goto out_dtpm_unregister;
222 
223 	ret = freq_qos_add_request(&policy->constraints,
224 				   &dtpm_cpu->qos_req, FREQ_QOS_MAX,
225 				   pd->table[pd->nr_perf_states - 1].frequency);
226 	if (ret)
227 		goto out_power_sub;
228 
229 	return 0;
230 
231 out_power_sub:
232 	power_sub(dtpm, pd);
233 
234 out_dtpm_unregister:
235 	dtpm_unregister(dtpm);
236 	dtpm_cpu = NULL;
237 	dtpm = NULL;
238 
239 out_kfree_dtpm_cpu:
240 	for_each_cpu(cpu, policy->related_cpus)
241 		per_cpu(dtpm_per_cpu, cpu) = NULL;
242 	kfree(dtpm_cpu);
243 
244 out_kfree_dtpm:
245 	kfree(dtpm);
246 	return ret;
247 }
248 
dtpm_register_cpu(struct dtpm * parent)249 int dtpm_register_cpu(struct dtpm *parent)
250 {
251 	__parent = parent;
252 
253 	return cpuhp_setup_state(CPUHP_AP_DTPM_CPU_ONLINE,
254 				 "dtpm_cpu:online",
255 				 cpuhp_dtpm_cpu_online,
256 				 cpuhp_dtpm_cpu_offline);
257 }
258