1 /*
2 * PMU emulation helpers for TCG IBM POWER chips
3 *
4 * Copyright IBM Corp. 2021
5 *
6 * Authors:
7 * Daniel Henrique Barboza <danielhb413@gmail.com>
8 *
9 * This work is licensed under the terms of the GNU GPL, version 2 or later.
10 * See the COPYING file in the top-level directory.
11 */
12
13 #include "qemu/osdep.h"
14 #include "cpu.h"
15 #include "helper_regs.h"
16 #include "exec/exec-all.h"
17 #include "exec/helper-proto.h"
18 #include "qemu/error-report.h"
19 #include "qemu/timer.h"
20 #include "hw/ppc/ppc.h"
21 #include "power8-pmu.h"
22
23 #if defined(TARGET_PPC64) && !defined(CONFIG_USER_ONLY)
24
pmc_has_overflow_enabled(CPUPPCState * env,int sprn)25 static bool pmc_has_overflow_enabled(CPUPPCState *env, int sprn)
26 {
27 if (sprn == SPR_POWER_PMC1) {
28 return env->spr[SPR_POWER_MMCR0] & MMCR0_PMC1CE;
29 }
30
31 return env->spr[SPR_POWER_MMCR0] & MMCR0_PMCjCE;
32 }
33
34 /*
35 * Called after MMCR0 or MMCR1 changes to update pmc_ins_cnt and pmc_cyc_cnt.
36 * hflags must subsequently be updated.
37 */
pmu_update_summaries(CPUPPCState * env)38 static void pmu_update_summaries(CPUPPCState *env)
39 {
40 target_ulong mmcr0 = env->spr[SPR_POWER_MMCR0];
41 target_ulong mmcr1 = env->spr[SPR_POWER_MMCR1];
42 int ins_cnt = 0;
43 int cyc_cnt = 0;
44
45 if (mmcr0 & MMCR0_FC) {
46 goto out;
47 }
48
49 if (!(mmcr0 & MMCR0_FC14) && mmcr1 != 0) {
50 target_ulong sel;
51
52 sel = extract64(mmcr1, MMCR1_PMC1EVT_EXTR, MMCR1_EVT_SIZE);
53 switch (sel) {
54 case 0x02:
55 case 0xfe:
56 ins_cnt |= 1 << 1;
57 break;
58 case 0x1e:
59 case 0xf0:
60 cyc_cnt |= 1 << 1;
61 break;
62 }
63
64 sel = extract64(mmcr1, MMCR1_PMC2EVT_EXTR, MMCR1_EVT_SIZE);
65 ins_cnt |= (sel == 0x02) << 2;
66 cyc_cnt |= (sel == 0x1e) << 2;
67
68 sel = extract64(mmcr1, MMCR1_PMC3EVT_EXTR, MMCR1_EVT_SIZE);
69 ins_cnt |= (sel == 0x02) << 3;
70 cyc_cnt |= (sel == 0x1e) << 3;
71
72 sel = extract64(mmcr1, MMCR1_PMC4EVT_EXTR, MMCR1_EVT_SIZE);
73 ins_cnt |= ((sel == 0xfa) || (sel == 0x2)) << 4;
74 cyc_cnt |= (sel == 0x1e) << 4;
75 }
76
77 ins_cnt |= !(mmcr0 & MMCR0_FC56) << 5;
78 cyc_cnt |= !(mmcr0 & MMCR0_FC56) << 6;
79
80 out:
81 env->pmc_ins_cnt = ins_cnt;
82 env->pmc_cyc_cnt = cyc_cnt;
83 }
84
hreg_bhrb_filter_update(CPUPPCState * env)85 static void hreg_bhrb_filter_update(CPUPPCState *env)
86 {
87 target_long ifm;
88
89 if (!(env->spr[SPR_POWER_MMCR0] & MMCR0_PMAE)) {
90 /* disable recording to BHRB */
91 env->bhrb_filter = BHRB_TYPE_NORECORD;
92 return;
93 }
94
95 ifm = (env->spr[SPR_POWER_MMCRA] & MMCRA_IFM_MASK) >> MMCRA_IFM_SHIFT;
96 switch (ifm) {
97 case 0:
98 /* record all branches */
99 env->bhrb_filter = -1;
100 break;
101 case 1:
102 /* only record calls (LK = 1) */
103 env->bhrb_filter = BHRB_TYPE_CALL;
104 break;
105 case 2:
106 /* only record indirect branches */
107 env->bhrb_filter = BHRB_TYPE_INDIRECT;
108 break;
109 case 3:
110 /* only record conditional branches */
111 env->bhrb_filter = BHRB_TYPE_COND;
112 break;
113 }
114 }
115
pmu_mmcr01a_updated(CPUPPCState * env)116 void pmu_mmcr01a_updated(CPUPPCState *env)
117 {
118 PowerPCCPU *cpu = env_archcpu(env);
119
120 pmu_update_summaries(env);
121 hreg_update_pmu_hflags(env);
122
123 if (env->spr[SPR_POWER_MMCR0] & MMCR0_PMAO) {
124 ppc_set_irq(cpu, PPC_INTERRUPT_PERFM, 1);
125 } else {
126 ppc_set_irq(cpu, PPC_INTERRUPT_PERFM, 0);
127 }
128
129 hreg_bhrb_filter_update(env);
130
131 /*
132 * Should this update overflow timers (if mmcr0 is updated) so they
133 * get set in cpu_post_load?
134 */
135 }
136
pmu_increment_insns(CPUPPCState * env,uint32_t num_insns)137 static bool pmu_increment_insns(CPUPPCState *env, uint32_t num_insns)
138 {
139 target_ulong mmcr0 = env->spr[SPR_POWER_MMCR0];
140 unsigned ins_cnt = env->pmc_ins_cnt;
141 bool overflow_triggered = false;
142 target_ulong tmp;
143
144 if (ins_cnt & (1 << 1)) {
145 tmp = env->spr[SPR_POWER_PMC1];
146 tmp += num_insns;
147 if (tmp >= PMC_COUNTER_NEGATIVE_VAL && (mmcr0 & MMCR0_PMC1CE)) {
148 tmp = PMC_COUNTER_NEGATIVE_VAL;
149 overflow_triggered = true;
150 }
151 env->spr[SPR_POWER_PMC1] = tmp;
152 }
153
154 if (ins_cnt & (1 << 2)) {
155 tmp = env->spr[SPR_POWER_PMC2];
156 tmp += num_insns;
157 if (tmp >= PMC_COUNTER_NEGATIVE_VAL && (mmcr0 & MMCR0_PMCjCE)) {
158 tmp = PMC_COUNTER_NEGATIVE_VAL;
159 overflow_triggered = true;
160 }
161 env->spr[SPR_POWER_PMC2] = tmp;
162 }
163
164 if (ins_cnt & (1 << 3)) {
165 tmp = env->spr[SPR_POWER_PMC3];
166 tmp += num_insns;
167 if (tmp >= PMC_COUNTER_NEGATIVE_VAL && (mmcr0 & MMCR0_PMCjCE)) {
168 tmp = PMC_COUNTER_NEGATIVE_VAL;
169 overflow_triggered = true;
170 }
171 env->spr[SPR_POWER_PMC3] = tmp;
172 }
173
174 if (ins_cnt & (1 << 4)) {
175 target_ulong mmcr1 = env->spr[SPR_POWER_MMCR1];
176 int sel = extract64(mmcr1, MMCR1_PMC4EVT_EXTR, MMCR1_EVT_SIZE);
177 if (sel == 0x02 || (env->spr[SPR_CTRL] & CTRL_RUN)) {
178 tmp = env->spr[SPR_POWER_PMC4];
179 tmp += num_insns;
180 if (tmp >= PMC_COUNTER_NEGATIVE_VAL && (mmcr0 & MMCR0_PMCjCE)) {
181 tmp = PMC_COUNTER_NEGATIVE_VAL;
182 overflow_triggered = true;
183 }
184 env->spr[SPR_POWER_PMC4] = tmp;
185 }
186 }
187
188 if (ins_cnt & (1 << 5)) {
189 tmp = env->spr[SPR_POWER_PMC5];
190 tmp += num_insns;
191 if (tmp >= PMC_COUNTER_NEGATIVE_VAL && (mmcr0 & MMCR0_PMCjCE)) {
192 tmp = PMC_COUNTER_NEGATIVE_VAL;
193 overflow_triggered = true;
194 }
195 env->spr[SPR_POWER_PMC5] = tmp;
196 }
197
198 return overflow_triggered;
199 }
200
pmu_update_cycles(CPUPPCState * env)201 static void pmu_update_cycles(CPUPPCState *env)
202 {
203 uint64_t now = qemu_clock_get_ns(QEMU_CLOCK_VIRTUAL);
204 uint64_t time_delta = now - env->pmu_base_time;
205 int sprn, cyc_cnt = env->pmc_cyc_cnt;
206
207 for (sprn = SPR_POWER_PMC1; sprn <= SPR_POWER_PMC6; sprn++) {
208 if (cyc_cnt & (1 << (sprn - SPR_POWER_PMC1 + 1))) {
209 /*
210 * The pseries and powernv clock runs at 1Ghz, meaning
211 * that 1 nanosec equals 1 cycle.
212 */
213 env->spr[sprn] += time_delta;
214 }
215 }
216
217 /* Update base_time for future calculations */
218 env->pmu_base_time = now;
219 }
220
221 /*
222 * Helper function to retrieve the cycle overflow timer of the
223 * 'sprn' counter.
224 */
get_cyc_overflow_timer(CPUPPCState * env,int sprn)225 static QEMUTimer *get_cyc_overflow_timer(CPUPPCState *env, int sprn)
226 {
227 return env->pmu_cyc_overflow_timers[sprn - SPR_POWER_PMC1];
228 }
229
pmc_update_overflow_timer(CPUPPCState * env,int sprn)230 static void pmc_update_overflow_timer(CPUPPCState *env, int sprn)
231 {
232 QEMUTimer *pmc_overflow_timer = get_cyc_overflow_timer(env, sprn);
233 int64_t timeout;
234
235 /*
236 * PMC5 does not have an overflow timer and this pointer
237 * will be NULL.
238 */
239 if (!pmc_overflow_timer) {
240 return;
241 }
242
243 if (!(env->pmc_cyc_cnt & (1 << (sprn - SPR_POWER_PMC1 + 1))) ||
244 !pmc_has_overflow_enabled(env, sprn)) {
245 /* Overflow timer is not needed for this counter */
246 timer_del(pmc_overflow_timer);
247 return;
248 }
249
250 if (env->spr[sprn] >= PMC_COUNTER_NEGATIVE_VAL) {
251 timeout = 0;
252 } else {
253 timeout = PMC_COUNTER_NEGATIVE_VAL - env->spr[sprn];
254 }
255
256 /*
257 * Use timer_mod_anticipate() because an overflow timer might
258 * be already running for this PMC.
259 */
260 timer_mod_anticipate(pmc_overflow_timer, env->pmu_base_time + timeout);
261 }
262
pmu_update_overflow_timers(CPUPPCState * env)263 static void pmu_update_overflow_timers(CPUPPCState *env)
264 {
265 int sprn;
266
267 /*
268 * Scroll through all PMCs and start counter overflow timers for
269 * PM_CYC events, if needed.
270 */
271 for (sprn = SPR_POWER_PMC1; sprn <= SPR_POWER_PMC6; sprn++) {
272 pmc_update_overflow_timer(env, sprn);
273 }
274 }
275
pmu_delete_timers(CPUPPCState * env)276 static void pmu_delete_timers(CPUPPCState *env)
277 {
278 QEMUTimer *pmc_overflow_timer;
279 int sprn;
280
281 for (sprn = SPR_POWER_PMC1; sprn <= SPR_POWER_PMC6; sprn++) {
282 pmc_overflow_timer = get_cyc_overflow_timer(env, sprn);
283
284 if (pmc_overflow_timer) {
285 timer_del(pmc_overflow_timer);
286 }
287 }
288 }
289
helper_store_mmcr0(CPUPPCState * env,target_ulong value)290 void helper_store_mmcr0(CPUPPCState *env, target_ulong value)
291 {
292 pmu_update_cycles(env);
293
294 env->spr[SPR_POWER_MMCR0] = value;
295
296 pmu_mmcr01a_updated(env);
297
298 /* Update cycle overflow timers with the current MMCR0 state */
299 pmu_update_overflow_timers(env);
300 }
301
helper_store_mmcr1(CPUPPCState * env,uint64_t value)302 void helper_store_mmcr1(CPUPPCState *env, uint64_t value)
303 {
304 pmu_update_cycles(env);
305
306 env->spr[SPR_POWER_MMCR1] = value;
307
308 pmu_mmcr01a_updated(env);
309 }
310
helper_store_mmcrA(CPUPPCState * env,uint64_t value)311 void helper_store_mmcrA(CPUPPCState *env, uint64_t value)
312 {
313 env->spr[SPR_POWER_MMCRA] = value;
314
315 pmu_mmcr01a_updated(env);
316 }
317
helper_read_pmc(CPUPPCState * env,uint32_t sprn)318 target_ulong helper_read_pmc(CPUPPCState *env, uint32_t sprn)
319 {
320 pmu_update_cycles(env);
321
322 return env->spr[sprn];
323 }
324
helper_store_pmc(CPUPPCState * env,uint32_t sprn,uint64_t value)325 void helper_store_pmc(CPUPPCState *env, uint32_t sprn, uint64_t value)
326 {
327 pmu_update_cycles(env);
328
329 env->spr[sprn] = (uint32_t)value;
330
331 pmc_update_overflow_timer(env, sprn);
332 }
333
perfm_alert(PowerPCCPU * cpu)334 static void perfm_alert(PowerPCCPU *cpu)
335 {
336 CPUPPCState *env = &cpu->env;
337
338 pmu_update_cycles(env);
339
340 if (env->spr[SPR_POWER_MMCR0] & MMCR0_FCECE) {
341 env->spr[SPR_POWER_MMCR0] |= MMCR0_FC;
342
343 /* Changing MMCR0_FC requires summaries and hflags update */
344 pmu_mmcr01a_updated(env);
345
346 /*
347 * Delete all pending timers if we need to freeze
348 * the PMC. We'll restart them when the PMC starts
349 * running again.
350 */
351 pmu_delete_timers(env);
352 }
353
354 if (env->spr[SPR_POWER_MMCR0] & MMCR0_PMAE) {
355 /* These MMCR0 bits do not require summaries or hflags update. */
356 env->spr[SPR_POWER_MMCR0] &= ~MMCR0_PMAE;
357 env->spr[SPR_POWER_MMCR0] |= MMCR0_PMAO;
358 ppc_set_irq(cpu, PPC_INTERRUPT_PERFM, 1);
359 }
360
361 raise_ebb_perfm_exception(env);
362 }
363
helper_handle_pmc5_overflow(CPUPPCState * env)364 void helper_handle_pmc5_overflow(CPUPPCState *env)
365 {
366 env->spr[SPR_POWER_PMC5] = PMC_COUNTER_NEGATIVE_VAL;
367 perfm_alert(env_archcpu(env));
368 }
369
370 /* This helper assumes that the PMC is running. */
helper_insns_inc(CPUPPCState * env,uint32_t num_insns)371 void helper_insns_inc(CPUPPCState *env, uint32_t num_insns)
372 {
373 bool overflow_triggered;
374
375 overflow_triggered = pmu_increment_insns(env, num_insns);
376 if (overflow_triggered) {
377 perfm_alert(env_archcpu(env));
378 }
379 }
380
cpu_ppc_pmu_timer_cb(void * opaque)381 static void cpu_ppc_pmu_timer_cb(void *opaque)
382 {
383 PowerPCCPU *cpu = opaque;
384
385 perfm_alert(cpu);
386 }
387
cpu_ppc_pmu_init(CPUPPCState * env)388 void cpu_ppc_pmu_init(CPUPPCState *env)
389 {
390 PowerPCCPU *cpu = env_archcpu(env);
391 int i, sprn;
392
393 for (sprn = SPR_POWER_PMC1; sprn <= SPR_POWER_PMC6; sprn++) {
394 if (sprn == SPR_POWER_PMC5) {
395 continue;
396 }
397
398 i = sprn - SPR_POWER_PMC1;
399
400 env->pmu_cyc_overflow_timers[i] = timer_new_ns(QEMU_CLOCK_VIRTUAL,
401 &cpu_ppc_pmu_timer_cb,
402 cpu);
403 }
404 }
405 #endif /* defined(TARGET_PPC64) && !defined(CONFIG_USER_ONLY) */
406