1 /*
2 * RISC-V PMU file.
3 *
4 * Copyright (c) 2021 Western Digital Corporation or its affiliates.
5 *
6 * This program is free software; you can redistribute it and/or modify it
7 * under the terms and conditions of the GNU General Public License,
8 * version 2 or later, as published by the Free Software Foundation.
9 *
10 * This program is distributed in the hope it will be useful, but WITHOUT
11 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
12 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
13 * more details.
14 *
15 * You should have received a copy of the GNU General Public License along with
16 * this program. If not, see <http://www.gnu.org/licenses/>.
17 */
18
19 #include "qemu/osdep.h"
20 #include "qemu/log.h"
21 #include "qemu/error-report.h"
22 #include "qemu/timer.h"
23 #include "cpu.h"
24 #include "pmu.h"
25 #include "sysemu/cpu-timers.h"
26 #include "sysemu/device_tree.h"
27
28 #define RISCV_TIMEBASE_FREQ 1000000000 /* 1Ghz */
29
30 /*
31 * To keep it simple, any event can be mapped to any programmable counters in
32 * QEMU. The generic cycle & instruction count events can also be monitored
33 * using programmable counters. In that case, mcycle & minstret must continue
34 * to provide the correct value as well. Heterogeneous PMU per hart is not
35 * supported yet. Thus, number of counters are same across all harts.
36 */
riscv_pmu_generate_fdt_node(void * fdt,uint32_t cmask,char * pmu_name)37 void riscv_pmu_generate_fdt_node(void *fdt, uint32_t cmask, char *pmu_name)
38 {
39 uint32_t fdt_event_ctr_map[15] = {};
40
41 /*
42 * The event encoding is specified in the SBI specification
43 * Event idx is a 20bits wide number encoded as follows:
44 * event_idx[19:16] = type
45 * event_idx[15:0] = code
46 * The code field in cache events are encoded as follows:
47 * event_idx.code[15:3] = cache_id
48 * event_idx.code[2:1] = op_id
49 * event_idx.code[0:0] = result_id
50 */
51
52 /* SBI_PMU_HW_CPU_CYCLES: 0x01 : type(0x00) */
53 fdt_event_ctr_map[0] = cpu_to_be32(0x00000001);
54 fdt_event_ctr_map[1] = cpu_to_be32(0x00000001);
55 fdt_event_ctr_map[2] = cpu_to_be32(cmask | 1 << 0);
56
57 /* SBI_PMU_HW_INSTRUCTIONS: 0x02 : type(0x00) */
58 fdt_event_ctr_map[3] = cpu_to_be32(0x00000002);
59 fdt_event_ctr_map[4] = cpu_to_be32(0x00000002);
60 fdt_event_ctr_map[5] = cpu_to_be32(cmask | 1 << 2);
61
62 /* SBI_PMU_HW_CACHE_DTLB : 0x03 READ : 0x00 MISS : 0x00 type(0x01) */
63 fdt_event_ctr_map[6] = cpu_to_be32(0x00010019);
64 fdt_event_ctr_map[7] = cpu_to_be32(0x00010019);
65 fdt_event_ctr_map[8] = cpu_to_be32(cmask);
66
67 /* SBI_PMU_HW_CACHE_DTLB : 0x03 WRITE : 0x01 MISS : 0x00 type(0x01) */
68 fdt_event_ctr_map[9] = cpu_to_be32(0x0001001B);
69 fdt_event_ctr_map[10] = cpu_to_be32(0x0001001B);
70 fdt_event_ctr_map[11] = cpu_to_be32(cmask);
71
72 /* SBI_PMU_HW_CACHE_ITLB : 0x04 READ : 0x00 MISS : 0x00 type(0x01) */
73 fdt_event_ctr_map[12] = cpu_to_be32(0x00010021);
74 fdt_event_ctr_map[13] = cpu_to_be32(0x00010021);
75 fdt_event_ctr_map[14] = cpu_to_be32(cmask);
76
77 /* This a OpenSBI specific DT property documented in OpenSBI docs */
78 qemu_fdt_setprop(fdt, pmu_name, "riscv,event-to-mhpmcounters",
79 fdt_event_ctr_map, sizeof(fdt_event_ctr_map));
80 }
81
riscv_pmu_counter_valid(RISCVCPU * cpu,uint32_t ctr_idx)82 static bool riscv_pmu_counter_valid(RISCVCPU *cpu, uint32_t ctr_idx)
83 {
84 if (ctr_idx < 3 || ctr_idx >= RV_MAX_MHPMCOUNTERS ||
85 !(cpu->pmu_avail_ctrs & BIT(ctr_idx))) {
86 return false;
87 } else {
88 return true;
89 }
90 }
91
riscv_pmu_counter_enabled(RISCVCPU * cpu,uint32_t ctr_idx)92 static bool riscv_pmu_counter_enabled(RISCVCPU *cpu, uint32_t ctr_idx)
93 {
94 CPURISCVState *env = &cpu->env;
95
96 if (riscv_pmu_counter_valid(cpu, ctr_idx) &&
97 !get_field(env->mcountinhibit, BIT(ctr_idx))) {
98 return true;
99 } else {
100 return false;
101 }
102 }
103
riscv_pmu_incr_ctr_rv32(RISCVCPU * cpu,uint32_t ctr_idx)104 static int riscv_pmu_incr_ctr_rv32(RISCVCPU *cpu, uint32_t ctr_idx)
105 {
106 CPURISCVState *env = &cpu->env;
107 target_ulong max_val = UINT32_MAX;
108 PMUCTRState *counter = &env->pmu_ctrs[ctr_idx];
109 bool virt_on = env->virt_enabled;
110
111 /* Privilege mode filtering */
112 if ((env->priv == PRV_M &&
113 (env->mhpmeventh_val[ctr_idx] & MHPMEVENTH_BIT_MINH)) ||
114 (env->priv == PRV_S && virt_on &&
115 (env->mhpmeventh_val[ctr_idx] & MHPMEVENTH_BIT_VSINH)) ||
116 (env->priv == PRV_U && virt_on &&
117 (env->mhpmeventh_val[ctr_idx] & MHPMEVENTH_BIT_VUINH)) ||
118 (env->priv == PRV_S && !virt_on &&
119 (env->mhpmeventh_val[ctr_idx] & MHPMEVENTH_BIT_SINH)) ||
120 (env->priv == PRV_U && !virt_on &&
121 (env->mhpmeventh_val[ctr_idx] & MHPMEVENTH_BIT_UINH))) {
122 return 0;
123 }
124
125 /* Handle the overflow scenario */
126 if (counter->mhpmcounter_val == max_val) {
127 if (counter->mhpmcounterh_val == max_val) {
128 counter->mhpmcounter_val = 0;
129 counter->mhpmcounterh_val = 0;
130 /* Generate interrupt only if OF bit is clear */
131 if (!(env->mhpmeventh_val[ctr_idx] & MHPMEVENTH_BIT_OF)) {
132 env->mhpmeventh_val[ctr_idx] |= MHPMEVENTH_BIT_OF;
133 riscv_cpu_update_mip(env, MIP_LCOFIP, BOOL_TO_MASK(1));
134 }
135 } else {
136 counter->mhpmcounterh_val++;
137 }
138 } else {
139 counter->mhpmcounter_val++;
140 }
141
142 return 0;
143 }
144
riscv_pmu_incr_ctr_rv64(RISCVCPU * cpu,uint32_t ctr_idx)145 static int riscv_pmu_incr_ctr_rv64(RISCVCPU *cpu, uint32_t ctr_idx)
146 {
147 CPURISCVState *env = &cpu->env;
148 PMUCTRState *counter = &env->pmu_ctrs[ctr_idx];
149 uint64_t max_val = UINT64_MAX;
150 bool virt_on = env->virt_enabled;
151
152 /* Privilege mode filtering */
153 if ((env->priv == PRV_M &&
154 (env->mhpmevent_val[ctr_idx] & MHPMEVENT_BIT_MINH)) ||
155 (env->priv == PRV_S && virt_on &&
156 (env->mhpmevent_val[ctr_idx] & MHPMEVENT_BIT_VSINH)) ||
157 (env->priv == PRV_U && virt_on &&
158 (env->mhpmevent_val[ctr_idx] & MHPMEVENT_BIT_VUINH)) ||
159 (env->priv == PRV_S && !virt_on &&
160 (env->mhpmevent_val[ctr_idx] & MHPMEVENT_BIT_SINH)) ||
161 (env->priv == PRV_U && !virt_on &&
162 (env->mhpmevent_val[ctr_idx] & MHPMEVENT_BIT_UINH))) {
163 return 0;
164 }
165
166 /* Handle the overflow scenario */
167 if (counter->mhpmcounter_val == max_val) {
168 counter->mhpmcounter_val = 0;
169 /* Generate interrupt only if OF bit is clear */
170 if (!(env->mhpmevent_val[ctr_idx] & MHPMEVENT_BIT_OF)) {
171 env->mhpmevent_val[ctr_idx] |= MHPMEVENT_BIT_OF;
172 riscv_cpu_update_mip(env, MIP_LCOFIP, BOOL_TO_MASK(1));
173 }
174 } else {
175 counter->mhpmcounter_val++;
176 }
177 return 0;
178 }
179
180 /*
181 * Information needed to update counters:
182 * new_priv, new_virt: To correctly save starting snapshot for the newly
183 * started mode. Look at array being indexed with newprv.
184 * old_priv, old_virt: To correctly select previous snapshot for old priv
185 * and compute delta. Also to select correct counter
186 * to inc. Look at arrays being indexed with env->priv.
187 *
188 * To avoid the complexity of calling this function, we assume that
189 * env->priv and env->virt_enabled contain old priv and old virt and
190 * new priv and new virt values are passed in as arguments.
191 */
riscv_pmu_icount_update_priv(CPURISCVState * env,target_ulong newpriv,bool new_virt)192 static void riscv_pmu_icount_update_priv(CPURISCVState *env,
193 target_ulong newpriv, bool new_virt)
194 {
195 uint64_t *snapshot_prev, *snapshot_new;
196 uint64_t current_icount;
197 uint64_t *counter_arr;
198 uint64_t delta;
199
200 if (icount_enabled()) {
201 current_icount = icount_get_raw();
202 } else {
203 current_icount = cpu_get_host_ticks();
204 }
205
206 if (env->virt_enabled) {
207 g_assert(env->priv <= PRV_S);
208 counter_arr = env->pmu_fixed_ctrs[1].counter_virt;
209 snapshot_prev = env->pmu_fixed_ctrs[1].counter_virt_prev;
210 } else {
211 counter_arr = env->pmu_fixed_ctrs[1].counter;
212 snapshot_prev = env->pmu_fixed_ctrs[1].counter_prev;
213 }
214
215 if (new_virt) {
216 g_assert(newpriv <= PRV_S);
217 snapshot_new = env->pmu_fixed_ctrs[1].counter_virt_prev;
218 } else {
219 snapshot_new = env->pmu_fixed_ctrs[1].counter_prev;
220 }
221
222 /*
223 * new_priv can be same as env->priv. So we need to calculate
224 * delta first before updating snapshot_new[new_priv].
225 */
226 delta = current_icount - snapshot_prev[env->priv];
227 snapshot_new[newpriv] = current_icount;
228
229 counter_arr[env->priv] += delta;
230 }
231
riscv_pmu_cycle_update_priv(CPURISCVState * env,target_ulong newpriv,bool new_virt)232 static void riscv_pmu_cycle_update_priv(CPURISCVState *env,
233 target_ulong newpriv, bool new_virt)
234 {
235 uint64_t *snapshot_prev, *snapshot_new;
236 uint64_t current_ticks;
237 uint64_t *counter_arr;
238 uint64_t delta;
239
240 if (icount_enabled()) {
241 current_ticks = icount_get();
242 } else {
243 current_ticks = cpu_get_host_ticks();
244 }
245
246 if (env->virt_enabled) {
247 g_assert(env->priv <= PRV_S);
248 counter_arr = env->pmu_fixed_ctrs[0].counter_virt;
249 snapshot_prev = env->pmu_fixed_ctrs[0].counter_virt_prev;
250 } else {
251 counter_arr = env->pmu_fixed_ctrs[0].counter;
252 snapshot_prev = env->pmu_fixed_ctrs[0].counter_prev;
253 }
254
255 if (new_virt) {
256 g_assert(newpriv <= PRV_S);
257 snapshot_new = env->pmu_fixed_ctrs[0].counter_virt_prev;
258 } else {
259 snapshot_new = env->pmu_fixed_ctrs[0].counter_prev;
260 }
261
262 delta = current_ticks - snapshot_prev[env->priv];
263 snapshot_new[newpriv] = current_ticks;
264
265 counter_arr[env->priv] += delta;
266 }
267
riscv_pmu_update_fixed_ctrs(CPURISCVState * env,target_ulong newpriv,bool new_virt)268 void riscv_pmu_update_fixed_ctrs(CPURISCVState *env, target_ulong newpriv,
269 bool new_virt)
270 {
271 riscv_pmu_cycle_update_priv(env, newpriv, new_virt);
272 riscv_pmu_icount_update_priv(env, newpriv, new_virt);
273 }
274
riscv_pmu_incr_ctr(RISCVCPU * cpu,enum riscv_pmu_event_idx event_idx)275 int riscv_pmu_incr_ctr(RISCVCPU *cpu, enum riscv_pmu_event_idx event_idx)
276 {
277 uint32_t ctr_idx;
278 int ret;
279 CPURISCVState *env = &cpu->env;
280 gpointer value;
281
282 if (!cpu->cfg.pmu_mask) {
283 return 0;
284 }
285 value = g_hash_table_lookup(cpu->pmu_event_ctr_map,
286 GUINT_TO_POINTER(event_idx));
287 if (!value) {
288 return -1;
289 }
290
291 ctr_idx = GPOINTER_TO_UINT(value);
292 if (!riscv_pmu_counter_enabled(cpu, ctr_idx)) {
293 return -1;
294 }
295
296 if (riscv_cpu_mxl(env) == MXL_RV32) {
297 ret = riscv_pmu_incr_ctr_rv32(cpu, ctr_idx);
298 } else {
299 ret = riscv_pmu_incr_ctr_rv64(cpu, ctr_idx);
300 }
301
302 return ret;
303 }
304
riscv_pmu_ctr_monitor_instructions(CPURISCVState * env,uint32_t target_ctr)305 bool riscv_pmu_ctr_monitor_instructions(CPURISCVState *env,
306 uint32_t target_ctr)
307 {
308 RISCVCPU *cpu;
309 uint32_t event_idx;
310 uint32_t ctr_idx;
311
312 /* Fixed instret counter */
313 if (target_ctr == 2) {
314 return true;
315 }
316
317 cpu = env_archcpu(env);
318 if (!cpu->pmu_event_ctr_map) {
319 return false;
320 }
321
322 event_idx = RISCV_PMU_EVENT_HW_INSTRUCTIONS;
323 ctr_idx = GPOINTER_TO_UINT(g_hash_table_lookup(cpu->pmu_event_ctr_map,
324 GUINT_TO_POINTER(event_idx)));
325 if (!ctr_idx) {
326 return false;
327 }
328
329 return target_ctr == ctr_idx ? true : false;
330 }
331
riscv_pmu_ctr_monitor_cycles(CPURISCVState * env,uint32_t target_ctr)332 bool riscv_pmu_ctr_monitor_cycles(CPURISCVState *env, uint32_t target_ctr)
333 {
334 RISCVCPU *cpu;
335 uint32_t event_idx;
336 uint32_t ctr_idx;
337
338 /* Fixed mcycle counter */
339 if (target_ctr == 0) {
340 return true;
341 }
342
343 cpu = env_archcpu(env);
344 if (!cpu->pmu_event_ctr_map) {
345 return false;
346 }
347
348 event_idx = RISCV_PMU_EVENT_HW_CPU_CYCLES;
349 ctr_idx = GPOINTER_TO_UINT(g_hash_table_lookup(cpu->pmu_event_ctr_map,
350 GUINT_TO_POINTER(event_idx)));
351
352 /* Counter zero is not used for event_ctr_map */
353 if (!ctr_idx) {
354 return false;
355 }
356
357 return (target_ctr == ctr_idx) ? true : false;
358 }
359
pmu_remove_event_map(gpointer key,gpointer value,gpointer udata)360 static gboolean pmu_remove_event_map(gpointer key, gpointer value,
361 gpointer udata)
362 {
363 return (GPOINTER_TO_UINT(value) == GPOINTER_TO_UINT(udata)) ? true : false;
364 }
365
pmu_icount_ticks_to_ns(int64_t value)366 static int64_t pmu_icount_ticks_to_ns(int64_t value)
367 {
368 int64_t ret = 0;
369
370 if (icount_enabled()) {
371 ret = icount_to_ns(value);
372 } else {
373 ret = (NANOSECONDS_PER_SECOND / RISCV_TIMEBASE_FREQ) * value;
374 }
375
376 return ret;
377 }
378
riscv_pmu_update_event_map(CPURISCVState * env,uint64_t value,uint32_t ctr_idx)379 int riscv_pmu_update_event_map(CPURISCVState *env, uint64_t value,
380 uint32_t ctr_idx)
381 {
382 uint32_t event_idx;
383 RISCVCPU *cpu = env_archcpu(env);
384
385 if (!riscv_pmu_counter_valid(cpu, ctr_idx) || !cpu->pmu_event_ctr_map) {
386 return -1;
387 }
388
389 /*
390 * Expected mhpmevent value is zero for reset case. Remove the current
391 * mapping.
392 */
393 if (!value) {
394 g_hash_table_foreach_remove(cpu->pmu_event_ctr_map,
395 pmu_remove_event_map,
396 GUINT_TO_POINTER(ctr_idx));
397 return 0;
398 }
399
400 event_idx = value & MHPMEVENT_IDX_MASK;
401 if (g_hash_table_lookup(cpu->pmu_event_ctr_map,
402 GUINT_TO_POINTER(event_idx))) {
403 return 0;
404 }
405
406 switch (event_idx) {
407 case RISCV_PMU_EVENT_HW_CPU_CYCLES:
408 case RISCV_PMU_EVENT_HW_INSTRUCTIONS:
409 case RISCV_PMU_EVENT_CACHE_DTLB_READ_MISS:
410 case RISCV_PMU_EVENT_CACHE_DTLB_WRITE_MISS:
411 case RISCV_PMU_EVENT_CACHE_ITLB_PREFETCH_MISS:
412 break;
413 default:
414 /* We don't support any raw events right now */
415 return -1;
416 }
417 g_hash_table_insert(cpu->pmu_event_ctr_map, GUINT_TO_POINTER(event_idx),
418 GUINT_TO_POINTER(ctr_idx));
419
420 return 0;
421 }
422
pmu_hpmevent_is_of_set(CPURISCVState * env,uint32_t ctr_idx)423 static bool pmu_hpmevent_is_of_set(CPURISCVState *env, uint32_t ctr_idx)
424 {
425 target_ulong mhpmevent_val;
426 uint64_t of_bit_mask;
427
428 if (riscv_cpu_mxl(env) == MXL_RV32) {
429 mhpmevent_val = env->mhpmeventh_val[ctr_idx];
430 of_bit_mask = MHPMEVENTH_BIT_OF;
431 } else {
432 mhpmevent_val = env->mhpmevent_val[ctr_idx];
433 of_bit_mask = MHPMEVENT_BIT_OF;
434 }
435
436 return get_field(mhpmevent_val, of_bit_mask);
437 }
438
pmu_hpmevent_set_of_if_clear(CPURISCVState * env,uint32_t ctr_idx)439 static bool pmu_hpmevent_set_of_if_clear(CPURISCVState *env, uint32_t ctr_idx)
440 {
441 target_ulong *mhpmevent_val;
442 uint64_t of_bit_mask;
443
444 if (riscv_cpu_mxl(env) == MXL_RV32) {
445 mhpmevent_val = &env->mhpmeventh_val[ctr_idx];
446 of_bit_mask = MHPMEVENTH_BIT_OF;
447 } else {
448 mhpmevent_val = &env->mhpmevent_val[ctr_idx];
449 of_bit_mask = MHPMEVENT_BIT_OF;
450 }
451
452 if (!get_field(*mhpmevent_val, of_bit_mask)) {
453 *mhpmevent_val |= of_bit_mask;
454 return true;
455 }
456
457 return false;
458 }
459
pmu_timer_trigger_irq(RISCVCPU * cpu,enum riscv_pmu_event_idx evt_idx)460 static void pmu_timer_trigger_irq(RISCVCPU *cpu,
461 enum riscv_pmu_event_idx evt_idx)
462 {
463 uint32_t ctr_idx;
464 CPURISCVState *env = &cpu->env;
465 PMUCTRState *counter;
466 int64_t irq_trigger_at;
467 uint64_t curr_ctr_val, curr_ctrh_val;
468 uint64_t ctr_val;
469
470 if (evt_idx != RISCV_PMU_EVENT_HW_CPU_CYCLES &&
471 evt_idx != RISCV_PMU_EVENT_HW_INSTRUCTIONS) {
472 return;
473 }
474
475 ctr_idx = GPOINTER_TO_UINT(g_hash_table_lookup(cpu->pmu_event_ctr_map,
476 GUINT_TO_POINTER(evt_idx)));
477 if (!riscv_pmu_counter_enabled(cpu, ctr_idx)) {
478 return;
479 }
480
481 /* Generate interrupt only if OF bit is clear */
482 if (pmu_hpmevent_is_of_set(env, ctr_idx)) {
483 return;
484 }
485
486 counter = &env->pmu_ctrs[ctr_idx];
487 if (counter->irq_overflow_left > 0) {
488 irq_trigger_at = qemu_clock_get_ns(QEMU_CLOCK_VIRTUAL) +
489 counter->irq_overflow_left;
490 timer_mod_anticipate_ns(cpu->pmu_timer, irq_trigger_at);
491 counter->irq_overflow_left = 0;
492 return;
493 }
494
495 riscv_pmu_read_ctr(env, (target_ulong *)&curr_ctr_val, false, ctr_idx);
496 ctr_val = counter->mhpmcounter_val;
497 if (riscv_cpu_mxl(env) == MXL_RV32) {
498 riscv_pmu_read_ctr(env, (target_ulong *)&curr_ctrh_val, true, ctr_idx);
499 curr_ctr_val = curr_ctr_val | (curr_ctrh_val << 32);
500 ctr_val = ctr_val |
501 ((uint64_t)counter->mhpmcounterh_val << 32);
502 }
503
504 /*
505 * We can not accommodate for inhibited modes when setting up timer. Check
506 * if the counter has actually overflowed or not by comparing current
507 * counter value (accommodated for inhibited modes) with software written
508 * counter value.
509 */
510 if (curr_ctr_val >= ctr_val) {
511 riscv_pmu_setup_timer(env, curr_ctr_val, ctr_idx);
512 return;
513 }
514
515 if (cpu->pmu_avail_ctrs & BIT(ctr_idx)) {
516 if (pmu_hpmevent_set_of_if_clear(env, ctr_idx)) {
517 riscv_cpu_update_mip(env, MIP_LCOFIP, BOOL_TO_MASK(1));
518 }
519 }
520 }
521
522 /* Timer callback for instret and cycle counter overflow */
riscv_pmu_timer_cb(void * priv)523 void riscv_pmu_timer_cb(void *priv)
524 {
525 RISCVCPU *cpu = priv;
526
527 /* Timer event was triggered only for these events */
528 pmu_timer_trigger_irq(cpu, RISCV_PMU_EVENT_HW_CPU_CYCLES);
529 pmu_timer_trigger_irq(cpu, RISCV_PMU_EVENT_HW_INSTRUCTIONS);
530 }
531
riscv_pmu_setup_timer(CPURISCVState * env,uint64_t value,uint32_t ctr_idx)532 int riscv_pmu_setup_timer(CPURISCVState *env, uint64_t value, uint32_t ctr_idx)
533 {
534 uint64_t overflow_delta, overflow_at, curr_ns;
535 int64_t overflow_ns, overflow_left = 0;
536 RISCVCPU *cpu = env_archcpu(env);
537 PMUCTRState *counter = &env->pmu_ctrs[ctr_idx];
538
539 /* No need to setup a timer if LCOFI is disabled when OF is set */
540 if (!riscv_pmu_counter_valid(cpu, ctr_idx) || !cpu->cfg.ext_sscofpmf ||
541 pmu_hpmevent_is_of_set(env, ctr_idx)) {
542 return -1;
543 }
544
545 if (value) {
546 overflow_delta = UINT64_MAX - value + 1;
547 } else {
548 overflow_delta = UINT64_MAX;
549 }
550
551 /*
552 * QEMU supports only int64_t timers while RISC-V counters are uint64_t.
553 * Compute the leftover and save it so that it can be reprogrammed again
554 * when timer expires.
555 */
556 if (overflow_delta > INT64_MAX) {
557 overflow_left = overflow_delta - INT64_MAX;
558 }
559
560 if (riscv_pmu_ctr_monitor_cycles(env, ctr_idx) ||
561 riscv_pmu_ctr_monitor_instructions(env, ctr_idx)) {
562 overflow_ns = pmu_icount_ticks_to_ns((int64_t)overflow_delta);
563 overflow_left = pmu_icount_ticks_to_ns(overflow_left) ;
564 } else {
565 return -1;
566 }
567 curr_ns = (uint64_t)qemu_clock_get_ns(QEMU_CLOCK_VIRTUAL);
568 overflow_at = curr_ns + overflow_ns;
569 if (overflow_at <= curr_ns)
570 overflow_at = UINT64_MAX;
571
572 if (overflow_at > INT64_MAX) {
573 overflow_left += overflow_at - INT64_MAX;
574 counter->irq_overflow_left = overflow_left;
575 overflow_at = INT64_MAX;
576 }
577 timer_mod_anticipate_ns(cpu->pmu_timer, overflow_at);
578
579 return 0;
580 }
581
582
riscv_pmu_init(RISCVCPU * cpu,Error ** errp)583 void riscv_pmu_init(RISCVCPU *cpu, Error **errp)
584 {
585 if (cpu->cfg.pmu_mask & (COUNTEREN_CY | COUNTEREN_TM | COUNTEREN_IR)) {
586 error_setg(errp, "\"pmu-mask\" contains invalid bits (0-2) set");
587 return;
588 }
589
590 if (ctpop32(cpu->cfg.pmu_mask) > (RV_MAX_MHPMCOUNTERS - 3)) {
591 error_setg(errp, "Number of counters exceeds maximum available");
592 return;
593 }
594
595 cpu->pmu_event_ctr_map = g_hash_table_new(g_direct_hash, g_direct_equal);
596 if (!cpu->pmu_event_ctr_map) {
597 error_setg(errp, "Unable to allocate PMU event hash table");
598 return;
599 }
600
601 cpu->pmu_avail_ctrs = cpu->cfg.pmu_mask;
602 }
603