1 // SPDX-License-Identifier: GPL-2.0-only
2 /*
3  * Linux performance counter support for MIPS.
4  *
5  * Copyright (C) 2010 MIPS Technologies, Inc.
6  * Copyright (C) 2011 Cavium Networks, Inc.
7  * Author: Deng-Cheng Zhu
8  *
9  * This code is based on the implementation for ARM, which is in turn
10  * based on the sparc64 perf event code and the x86 code. Performance
11  * counter access is based on the MIPS Oprofile code. And the callchain
12  * support references the code of MIPS stacktrace.c.
13  */
14 
15 #include <linux/cpumask.h>
16 #include <linux/interrupt.h>
17 #include <linux/smp.h>
18 #include <linux/kernel.h>
19 #include <linux/perf_event.h>
20 #include <linux/uaccess.h>
21 
22 #include <asm/irq.h>
23 #include <asm/irq_regs.h>
24 #include <asm/stacktrace.h>
25 #include <asm/time.h> /* For perf_irq */
26 
27 #define MIPS_MAX_HWEVENTS 4
28 #define MIPS_TCS_PER_COUNTER 2
29 #define MIPS_CPUID_TO_COUNTER_MASK (MIPS_TCS_PER_COUNTER - 1)
30 
31 struct cpu_hw_events {
32 	/* Array of events on this cpu. */
33 	struct perf_event	*events[MIPS_MAX_HWEVENTS];
34 
35 	/*
36 	 * Set the bit (indexed by the counter number) when the counter
37 	 * is used for an event.
38 	 */
39 	unsigned long		used_mask[BITS_TO_LONGS(MIPS_MAX_HWEVENTS)];
40 
41 	/*
42 	 * Software copy of the control register for each performance counter.
43 	 * MIPS CPUs vary in performance counters. They use this differently,
44 	 * and even may not use it.
45 	 */
46 	unsigned int		saved_ctrl[MIPS_MAX_HWEVENTS];
47 };
48 DEFINE_PER_CPU(struct cpu_hw_events, cpu_hw_events) = {
49 	.saved_ctrl = {0},
50 };
51 
52 /* The description of MIPS performance events. */
53 struct mips_perf_event {
54 	unsigned int event_id;
55 	/*
56 	 * MIPS performance counters are indexed starting from 0.
57 	 * CNTR_EVEN indicates the indexes of the counters to be used are
58 	 * even numbers.
59 	 */
60 	unsigned int cntr_mask;
61 	#define CNTR_EVEN	0x55555555
62 	#define CNTR_ODD	0xaaaaaaaa
63 	#define CNTR_ALL	0xffffffff
64 	enum {
65 		T  = 0,
66 		V  = 1,
67 		P  = 2,
68 	} range;
69 };
70 
71 static struct mips_perf_event raw_event;
72 static DEFINE_MUTEX(raw_event_mutex);
73 
74 #define C(x) PERF_COUNT_HW_CACHE_##x
75 
76 struct mips_pmu {
77 	u64		max_period;
78 	u64		valid_count;
79 	u64		overflow;
80 	const char	*name;
81 	int		irq;
82 	u64		(*read_counter)(unsigned int idx);
83 	void		(*write_counter)(unsigned int idx, u64 val);
84 	const struct mips_perf_event *(*map_raw_event)(u64 config);
85 	const struct mips_perf_event (*general_event_map)[PERF_COUNT_HW_MAX];
86 	const struct mips_perf_event (*cache_event_map)
87 				[PERF_COUNT_HW_CACHE_MAX]
88 				[PERF_COUNT_HW_CACHE_OP_MAX]
89 				[PERF_COUNT_HW_CACHE_RESULT_MAX];
90 	unsigned int	num_counters;
91 };
92 
93 static int counter_bits;
94 static struct mips_pmu mipspmu;
95 
96 #define M_PERFCTL_EVENT(event)		(((event) << MIPS_PERFCTRL_EVENT_S) & \
97 					 MIPS_PERFCTRL_EVENT)
98 #define M_PERFCTL_VPEID(vpe)		((vpe)	  << MIPS_PERFCTRL_VPEID_S)
99 
100 #ifdef CONFIG_CPU_BMIPS5000
101 #define M_PERFCTL_MT_EN(filter)		0
102 #else /* !CONFIG_CPU_BMIPS5000 */
103 #define M_PERFCTL_MT_EN(filter)		(filter)
104 #endif /* CONFIG_CPU_BMIPS5000 */
105 
106 #define	   M_TC_EN_ALL			M_PERFCTL_MT_EN(MIPS_PERFCTRL_MT_EN_ALL)
107 #define	   M_TC_EN_VPE			M_PERFCTL_MT_EN(MIPS_PERFCTRL_MT_EN_VPE)
108 #define	   M_TC_EN_TC			M_PERFCTL_MT_EN(MIPS_PERFCTRL_MT_EN_TC)
109 
110 #define M_PERFCTL_COUNT_EVENT_WHENEVER	(MIPS_PERFCTRL_EXL |		\
111 					 MIPS_PERFCTRL_K |		\
112 					 MIPS_PERFCTRL_U |		\
113 					 MIPS_PERFCTRL_S |		\
114 					 MIPS_PERFCTRL_IE)
115 
116 #ifdef CONFIG_MIPS_MT_SMP
117 #define M_PERFCTL_CONFIG_MASK		0x3fff801f
118 #else
119 #define M_PERFCTL_CONFIG_MASK		0x1f
120 #endif
121 
122 #define CNTR_BIT_MASK(n)	(((n) == 64) ? ~0ULL : ((1ULL<<(n))-1))
123 
124 #ifdef CONFIG_MIPS_PERF_SHARED_TC_COUNTERS
125 static DEFINE_RWLOCK(pmuint_rwlock);
126 
127 #if defined(CONFIG_CPU_BMIPS5000)
128 #define vpe_id()	(cpu_has_mipsmt_pertccounters ? \
129 			 0 : (smp_processor_id() & MIPS_CPUID_TO_COUNTER_MASK))
130 #else
131 #define vpe_id()	(cpu_has_mipsmt_pertccounters ? \
132 			 0 : cpu_vpe_id(&current_cpu_data))
133 #endif
134 
135 /* Copied from op_model_mipsxx.c */
vpe_shift(void)136 static unsigned int vpe_shift(void)
137 {
138 	if (num_possible_cpus() > 1)
139 		return 1;
140 
141 	return 0;
142 }
143 
counters_total_to_per_cpu(unsigned int counters)144 static unsigned int counters_total_to_per_cpu(unsigned int counters)
145 {
146 	return counters >> vpe_shift();
147 }
148 
149 #else /* !CONFIG_MIPS_PERF_SHARED_TC_COUNTERS */
150 #define vpe_id()	0
151 
152 #endif /* CONFIG_MIPS_PERF_SHARED_TC_COUNTERS */
153 
154 static void resume_local_counters(void);
155 static void pause_local_counters(void);
156 static irqreturn_t mipsxx_pmu_handle_irq(int, void *);
157 static int mipsxx_pmu_handle_shared_irq(void);
158 
159 /* 0: Not Loongson-3
160  * 1: Loongson-3A1000/3B1000/3B1500
161  * 2: Loongson-3A2000/3A3000
162  * 3: Loongson-3A4000+
163  */
164 
165 #define LOONGSON_PMU_TYPE0 0
166 #define LOONGSON_PMU_TYPE1 1
167 #define LOONGSON_PMU_TYPE2 2
168 #define LOONGSON_PMU_TYPE3 3
169 
get_loongson3_pmu_type(void)170 static inline int get_loongson3_pmu_type(void)
171 {
172 	if (boot_cpu_type() != CPU_LOONGSON64)
173 		return LOONGSON_PMU_TYPE0;
174 	if ((boot_cpu_data.processor_id & PRID_COMP_MASK) == PRID_COMP_LEGACY)
175 		return LOONGSON_PMU_TYPE1;
176 	if ((boot_cpu_data.processor_id & PRID_IMP_MASK) == PRID_IMP_LOONGSON_64C)
177 		return LOONGSON_PMU_TYPE2;
178 	if ((boot_cpu_data.processor_id & PRID_IMP_MASK) == PRID_IMP_LOONGSON_64G)
179 		return LOONGSON_PMU_TYPE3;
180 
181 	return LOONGSON_PMU_TYPE0;
182 }
183 
mipsxx_pmu_swizzle_perf_idx(unsigned int idx)184 static unsigned int mipsxx_pmu_swizzle_perf_idx(unsigned int idx)
185 {
186 	if (vpe_id() == 1)
187 		idx = (idx + 2) & 3;
188 	return idx;
189 }
190 
mipsxx_pmu_read_counter(unsigned int idx)191 static u64 mipsxx_pmu_read_counter(unsigned int idx)
192 {
193 	idx = mipsxx_pmu_swizzle_perf_idx(idx);
194 
195 	switch (idx) {
196 	case 0:
197 		/*
198 		 * The counters are unsigned, we must cast to truncate
199 		 * off the high bits.
200 		 */
201 		return (u32)read_c0_perfcntr0();
202 	case 1:
203 		return (u32)read_c0_perfcntr1();
204 	case 2:
205 		return (u32)read_c0_perfcntr2();
206 	case 3:
207 		return (u32)read_c0_perfcntr3();
208 	default:
209 		WARN_ONCE(1, "Invalid performance counter number (%d)\n", idx);
210 		return 0;
211 	}
212 }
213 
mipsxx_pmu_read_counter_64(unsigned int idx)214 static u64 mipsxx_pmu_read_counter_64(unsigned int idx)
215 {
216 	u64 mask = CNTR_BIT_MASK(counter_bits);
217 	idx = mipsxx_pmu_swizzle_perf_idx(idx);
218 
219 	switch (idx) {
220 	case 0:
221 		return read_c0_perfcntr0_64() & mask;
222 	case 1:
223 		return read_c0_perfcntr1_64() & mask;
224 	case 2:
225 		return read_c0_perfcntr2_64() & mask;
226 	case 3:
227 		return read_c0_perfcntr3_64() & mask;
228 	default:
229 		WARN_ONCE(1, "Invalid performance counter number (%d)\n", idx);
230 		return 0;
231 	}
232 }
233 
mipsxx_pmu_write_counter(unsigned int idx,u64 val)234 static void mipsxx_pmu_write_counter(unsigned int idx, u64 val)
235 {
236 	idx = mipsxx_pmu_swizzle_perf_idx(idx);
237 
238 	switch (idx) {
239 	case 0:
240 		write_c0_perfcntr0(val);
241 		return;
242 	case 1:
243 		write_c0_perfcntr1(val);
244 		return;
245 	case 2:
246 		write_c0_perfcntr2(val);
247 		return;
248 	case 3:
249 		write_c0_perfcntr3(val);
250 		return;
251 	}
252 }
253 
mipsxx_pmu_write_counter_64(unsigned int idx,u64 val)254 static void mipsxx_pmu_write_counter_64(unsigned int idx, u64 val)
255 {
256 	val &= CNTR_BIT_MASK(counter_bits);
257 	idx = mipsxx_pmu_swizzle_perf_idx(idx);
258 
259 	switch (idx) {
260 	case 0:
261 		write_c0_perfcntr0_64(val);
262 		return;
263 	case 1:
264 		write_c0_perfcntr1_64(val);
265 		return;
266 	case 2:
267 		write_c0_perfcntr2_64(val);
268 		return;
269 	case 3:
270 		write_c0_perfcntr3_64(val);
271 		return;
272 	}
273 }
274 
mipsxx_pmu_read_control(unsigned int idx)275 static unsigned int mipsxx_pmu_read_control(unsigned int idx)
276 {
277 	idx = mipsxx_pmu_swizzle_perf_idx(idx);
278 
279 	switch (idx) {
280 	case 0:
281 		return read_c0_perfctrl0();
282 	case 1:
283 		return read_c0_perfctrl1();
284 	case 2:
285 		return read_c0_perfctrl2();
286 	case 3:
287 		return read_c0_perfctrl3();
288 	default:
289 		WARN_ONCE(1, "Invalid performance counter number (%d)\n", idx);
290 		return 0;
291 	}
292 }
293 
mipsxx_pmu_write_control(unsigned int idx,unsigned int val)294 static void mipsxx_pmu_write_control(unsigned int idx, unsigned int val)
295 {
296 	idx = mipsxx_pmu_swizzle_perf_idx(idx);
297 
298 	switch (idx) {
299 	case 0:
300 		write_c0_perfctrl0(val);
301 		return;
302 	case 1:
303 		write_c0_perfctrl1(val);
304 		return;
305 	case 2:
306 		write_c0_perfctrl2(val);
307 		return;
308 	case 3:
309 		write_c0_perfctrl3(val);
310 		return;
311 	}
312 }
313 
mipsxx_pmu_alloc_counter(struct cpu_hw_events * cpuc,struct hw_perf_event * hwc)314 static int mipsxx_pmu_alloc_counter(struct cpu_hw_events *cpuc,
315 				    struct hw_perf_event *hwc)
316 {
317 	int i;
318 	unsigned long cntr_mask;
319 
320 	/*
321 	 * We only need to care the counter mask. The range has been
322 	 * checked definitely.
323 	 */
324 	if (get_loongson3_pmu_type() == LOONGSON_PMU_TYPE2)
325 		cntr_mask = (hwc->event_base >> 10) & 0xffff;
326 	else
327 		cntr_mask = (hwc->event_base >> 8) & 0xffff;
328 
329 	for (i = mipspmu.num_counters - 1; i >= 0; i--) {
330 		/*
331 		 * Note that some MIPS perf events can be counted by both
332 		 * even and odd counters, wheresas many other are only by
333 		 * even _or_ odd counters. This introduces an issue that
334 		 * when the former kind of event takes the counter the
335 		 * latter kind of event wants to use, then the "counter
336 		 * allocation" for the latter event will fail. In fact if
337 		 * they can be dynamically swapped, they both feel happy.
338 		 * But here we leave this issue alone for now.
339 		 */
340 		if (test_bit(i, &cntr_mask) &&
341 			!test_and_set_bit(i, cpuc->used_mask))
342 			return i;
343 	}
344 
345 	return -EAGAIN;
346 }
347 
mipsxx_pmu_enable_event(struct hw_perf_event * evt,int idx)348 static void mipsxx_pmu_enable_event(struct hw_perf_event *evt, int idx)
349 {
350 	struct perf_event *event = container_of(evt, struct perf_event, hw);
351 	struct cpu_hw_events *cpuc = this_cpu_ptr(&cpu_hw_events);
352 	unsigned int range = evt->event_base >> 24;
353 
354 	WARN_ON(idx < 0 || idx >= mipspmu.num_counters);
355 
356 	if (get_loongson3_pmu_type() == LOONGSON_PMU_TYPE2)
357 		cpuc->saved_ctrl[idx] = M_PERFCTL_EVENT(evt->event_base & 0x3ff) |
358 			(evt->config_base & M_PERFCTL_CONFIG_MASK) |
359 			/* Make sure interrupt enabled. */
360 			MIPS_PERFCTRL_IE;
361 	else
362 		cpuc->saved_ctrl[idx] = M_PERFCTL_EVENT(evt->event_base & 0xff) |
363 			(evt->config_base & M_PERFCTL_CONFIG_MASK) |
364 			/* Make sure interrupt enabled. */
365 			MIPS_PERFCTRL_IE;
366 
367 	if (IS_ENABLED(CONFIG_CPU_BMIPS5000)) {
368 		/* enable the counter for the calling thread */
369 		cpuc->saved_ctrl[idx] |=
370 			(1 << (12 + vpe_id())) | BRCM_PERFCTRL_TC;
371 	} else if (IS_ENABLED(CONFIG_MIPS_MT_SMP) && range > V) {
372 		/* The counter is processor wide. Set it up to count all TCs. */
373 		pr_debug("Enabling perf counter for all TCs\n");
374 		cpuc->saved_ctrl[idx] |= M_TC_EN_ALL;
375 	} else {
376 		unsigned int cpu, ctrl;
377 
378 		/*
379 		 * Set up the counter for a particular CPU when event->cpu is
380 		 * a valid CPU number. Otherwise set up the counter for the CPU
381 		 * scheduling this thread.
382 		 */
383 		cpu = (event->cpu >= 0) ? event->cpu : smp_processor_id();
384 
385 		ctrl = M_PERFCTL_VPEID(cpu_vpe_id(&cpu_data[cpu]));
386 		ctrl |= M_TC_EN_VPE;
387 		cpuc->saved_ctrl[idx] |= ctrl;
388 		pr_debug("Enabling perf counter for CPU%d\n", cpu);
389 	}
390 	/*
391 	 * We do not actually let the counter run. Leave it until start().
392 	 */
393 }
394 
mipsxx_pmu_disable_event(int idx)395 static void mipsxx_pmu_disable_event(int idx)
396 {
397 	struct cpu_hw_events *cpuc = this_cpu_ptr(&cpu_hw_events);
398 	unsigned long flags;
399 
400 	WARN_ON(idx < 0 || idx >= mipspmu.num_counters);
401 
402 	local_irq_save(flags);
403 	cpuc->saved_ctrl[idx] = mipsxx_pmu_read_control(idx) &
404 		~M_PERFCTL_COUNT_EVENT_WHENEVER;
405 	mipsxx_pmu_write_control(idx, cpuc->saved_ctrl[idx]);
406 	local_irq_restore(flags);
407 }
408 
mipspmu_event_set_period(struct perf_event * event,struct hw_perf_event * hwc,int idx)409 static int mipspmu_event_set_period(struct perf_event *event,
410 				    struct hw_perf_event *hwc,
411 				    int idx)
412 {
413 	u64 left = local64_read(&hwc->period_left);
414 	u64 period = hwc->sample_period;
415 	int ret = 0;
416 
417 	if (unlikely((left + period) & (1ULL << 63))) {
418 		/* left underflowed by more than period. */
419 		left = period;
420 		local64_set(&hwc->period_left, left);
421 		hwc->last_period = period;
422 		ret = 1;
423 	} else	if (unlikely((left + period) <= period)) {
424 		/* left underflowed by less than period. */
425 		left += period;
426 		local64_set(&hwc->period_left, left);
427 		hwc->last_period = period;
428 		ret = 1;
429 	}
430 
431 	if (left > mipspmu.max_period) {
432 		left = mipspmu.max_period;
433 		local64_set(&hwc->period_left, left);
434 	}
435 
436 	local64_set(&hwc->prev_count, mipspmu.overflow - left);
437 
438 	if (get_loongson3_pmu_type() == LOONGSON_PMU_TYPE2)
439 		mipsxx_pmu_write_control(idx,
440 				M_PERFCTL_EVENT(hwc->event_base & 0x3ff));
441 
442 	mipspmu.write_counter(idx, mipspmu.overflow - left);
443 
444 	perf_event_update_userpage(event);
445 
446 	return ret;
447 }
448 
mipspmu_event_update(struct perf_event * event,struct hw_perf_event * hwc,int idx)449 static void mipspmu_event_update(struct perf_event *event,
450 				 struct hw_perf_event *hwc,
451 				 int idx)
452 {
453 	u64 prev_raw_count, new_raw_count;
454 	u64 delta;
455 
456 again:
457 	prev_raw_count = local64_read(&hwc->prev_count);
458 	new_raw_count = mipspmu.read_counter(idx);
459 
460 	if (local64_cmpxchg(&hwc->prev_count, prev_raw_count,
461 				new_raw_count) != prev_raw_count)
462 		goto again;
463 
464 	delta = new_raw_count - prev_raw_count;
465 
466 	local64_add(delta, &event->count);
467 	local64_sub(delta, &hwc->period_left);
468 }
469 
mipspmu_start(struct perf_event * event,int flags)470 static void mipspmu_start(struct perf_event *event, int flags)
471 {
472 	struct hw_perf_event *hwc = &event->hw;
473 
474 	if (flags & PERF_EF_RELOAD)
475 		WARN_ON_ONCE(!(hwc->state & PERF_HES_UPTODATE));
476 
477 	hwc->state = 0;
478 
479 	/* Set the period for the event. */
480 	mipspmu_event_set_period(event, hwc, hwc->idx);
481 
482 	/* Enable the event. */
483 	mipsxx_pmu_enable_event(hwc, hwc->idx);
484 }
485 
mipspmu_stop(struct perf_event * event,int flags)486 static void mipspmu_stop(struct perf_event *event, int flags)
487 {
488 	struct hw_perf_event *hwc = &event->hw;
489 
490 	if (!(hwc->state & PERF_HES_STOPPED)) {
491 		/* We are working on a local event. */
492 		mipsxx_pmu_disable_event(hwc->idx);
493 		barrier();
494 		mipspmu_event_update(event, hwc, hwc->idx);
495 		hwc->state |= PERF_HES_STOPPED | PERF_HES_UPTODATE;
496 	}
497 }
498 
mipspmu_add(struct perf_event * event,int flags)499 static int mipspmu_add(struct perf_event *event, int flags)
500 {
501 	struct cpu_hw_events *cpuc = this_cpu_ptr(&cpu_hw_events);
502 	struct hw_perf_event *hwc = &event->hw;
503 	int idx;
504 	int err = 0;
505 
506 	perf_pmu_disable(event->pmu);
507 
508 	/* To look for a free counter for this event. */
509 	idx = mipsxx_pmu_alloc_counter(cpuc, hwc);
510 	if (idx < 0) {
511 		err = idx;
512 		goto out;
513 	}
514 
515 	/*
516 	 * If there is an event in the counter we are going to use then
517 	 * make sure it is disabled.
518 	 */
519 	event->hw.idx = idx;
520 	mipsxx_pmu_disable_event(idx);
521 	cpuc->events[idx] = event;
522 
523 	hwc->state = PERF_HES_STOPPED | PERF_HES_UPTODATE;
524 	if (flags & PERF_EF_START)
525 		mipspmu_start(event, PERF_EF_RELOAD);
526 
527 	/* Propagate our changes to the userspace mapping. */
528 	perf_event_update_userpage(event);
529 
530 out:
531 	perf_pmu_enable(event->pmu);
532 	return err;
533 }
534 
mipspmu_del(struct perf_event * event,int flags)535 static void mipspmu_del(struct perf_event *event, int flags)
536 {
537 	struct cpu_hw_events *cpuc = this_cpu_ptr(&cpu_hw_events);
538 	struct hw_perf_event *hwc = &event->hw;
539 	int idx = hwc->idx;
540 
541 	WARN_ON(idx < 0 || idx >= mipspmu.num_counters);
542 
543 	mipspmu_stop(event, PERF_EF_UPDATE);
544 	cpuc->events[idx] = NULL;
545 	clear_bit(idx, cpuc->used_mask);
546 
547 	perf_event_update_userpage(event);
548 }
549 
mipspmu_read(struct perf_event * event)550 static void mipspmu_read(struct perf_event *event)
551 {
552 	struct hw_perf_event *hwc = &event->hw;
553 
554 	/* Don't read disabled counters! */
555 	if (hwc->idx < 0)
556 		return;
557 
558 	mipspmu_event_update(event, hwc, hwc->idx);
559 }
560 
mipspmu_enable(struct pmu * pmu)561 static void mipspmu_enable(struct pmu *pmu)
562 {
563 #ifdef CONFIG_MIPS_PERF_SHARED_TC_COUNTERS
564 	write_unlock(&pmuint_rwlock);
565 #endif
566 	resume_local_counters();
567 }
568 
569 /*
570  * MIPS performance counters can be per-TC. The control registers can
571  * not be directly accessed across CPUs. Hence if we want to do global
572  * control, we need cross CPU calls. on_each_cpu() can help us, but we
573  * can not make sure this function is called with interrupts enabled. So
574  * here we pause local counters and then grab a rwlock and leave the
575  * counters on other CPUs alone. If any counter interrupt raises while
576  * we own the write lock, simply pause local counters on that CPU and
577  * spin in the handler. Also we know we won't be switched to another
578  * CPU after pausing local counters and before grabbing the lock.
579  */
mipspmu_disable(struct pmu * pmu)580 static void mipspmu_disable(struct pmu *pmu)
581 {
582 	pause_local_counters();
583 #ifdef CONFIG_MIPS_PERF_SHARED_TC_COUNTERS
584 	write_lock(&pmuint_rwlock);
585 #endif
586 }
587 
588 static atomic_t active_events = ATOMIC_INIT(0);
589 static DEFINE_MUTEX(pmu_reserve_mutex);
590 static int (*save_perf_irq)(void);
591 
mipspmu_get_irq(void)592 static int mipspmu_get_irq(void)
593 {
594 	int err;
595 
596 	if (mipspmu.irq >= 0) {
597 		/* Request my own irq handler. */
598 		err = request_irq(mipspmu.irq, mipsxx_pmu_handle_irq,
599 				  IRQF_PERCPU | IRQF_NOBALANCING |
600 				  IRQF_NO_THREAD | IRQF_NO_SUSPEND |
601 				  IRQF_SHARED,
602 				  "mips_perf_pmu", &mipspmu);
603 		if (err) {
604 			pr_warn("Unable to request IRQ%d for MIPS performance counters!\n",
605 				mipspmu.irq);
606 		}
607 	} else if (cp0_perfcount_irq < 0) {
608 		/*
609 		 * We are sharing the irq number with the timer interrupt.
610 		 */
611 		save_perf_irq = perf_irq;
612 		perf_irq = mipsxx_pmu_handle_shared_irq;
613 		err = 0;
614 	} else {
615 		pr_warn("The platform hasn't properly defined its interrupt controller\n");
616 		err = -ENOENT;
617 	}
618 
619 	return err;
620 }
621 
mipspmu_free_irq(void)622 static void mipspmu_free_irq(void)
623 {
624 	if (mipspmu.irq >= 0)
625 		free_irq(mipspmu.irq, &mipspmu);
626 	else if (cp0_perfcount_irq < 0)
627 		perf_irq = save_perf_irq;
628 }
629 
630 /*
631  * mipsxx/rm9000/loongson2 have different performance counters, they have
632  * specific low-level init routines.
633  */
634 static void reset_counters(void *arg);
635 static int __hw_perf_event_init(struct perf_event *event);
636 
hw_perf_event_destroy(struct perf_event * event)637 static void hw_perf_event_destroy(struct perf_event *event)
638 {
639 	if (atomic_dec_and_mutex_lock(&active_events,
640 				&pmu_reserve_mutex)) {
641 		/*
642 		 * We must not call the destroy function with interrupts
643 		 * disabled.
644 		 */
645 		on_each_cpu(reset_counters,
646 			(void *)(long)mipspmu.num_counters, 1);
647 		mipspmu_free_irq();
648 		mutex_unlock(&pmu_reserve_mutex);
649 	}
650 }
651 
mipspmu_event_init(struct perf_event * event)652 static int mipspmu_event_init(struct perf_event *event)
653 {
654 	int err = 0;
655 
656 	/* does not support taken branch sampling */
657 	if (has_branch_stack(event))
658 		return -EOPNOTSUPP;
659 
660 	switch (event->attr.type) {
661 	case PERF_TYPE_RAW:
662 	case PERF_TYPE_HARDWARE:
663 	case PERF_TYPE_HW_CACHE:
664 		break;
665 
666 	default:
667 		return -ENOENT;
668 	}
669 
670 	if (event->cpu >= 0 && !cpu_online(event->cpu))
671 		return -ENODEV;
672 
673 	if (!atomic_inc_not_zero(&active_events)) {
674 		mutex_lock(&pmu_reserve_mutex);
675 		if (atomic_read(&active_events) == 0)
676 			err = mipspmu_get_irq();
677 
678 		if (!err)
679 			atomic_inc(&active_events);
680 		mutex_unlock(&pmu_reserve_mutex);
681 	}
682 
683 	if (err)
684 		return err;
685 
686 	return __hw_perf_event_init(event);
687 }
688 
689 static struct pmu pmu = {
690 	.pmu_enable	= mipspmu_enable,
691 	.pmu_disable	= mipspmu_disable,
692 	.event_init	= mipspmu_event_init,
693 	.add		= mipspmu_add,
694 	.del		= mipspmu_del,
695 	.start		= mipspmu_start,
696 	.stop		= mipspmu_stop,
697 	.read		= mipspmu_read,
698 };
699 
mipspmu_perf_event_encode(const struct mips_perf_event * pev)700 static unsigned int mipspmu_perf_event_encode(const struct mips_perf_event *pev)
701 {
702 /*
703  * Top 8 bits for range, next 16 bits for cntr_mask, lowest 8 bits for
704  * event_id.
705  */
706 #ifdef CONFIG_MIPS_MT_SMP
707 	if (num_possible_cpus() > 1)
708 		return ((unsigned int)pev->range << 24) |
709 			(pev->cntr_mask & 0xffff00) |
710 			(pev->event_id & 0xff);
711 	else
712 #endif /* CONFIG_MIPS_MT_SMP */
713 	{
714 		if (get_loongson3_pmu_type() == LOONGSON_PMU_TYPE2)
715 			return (pev->cntr_mask & 0xfffc00) |
716 				(pev->event_id & 0x3ff);
717 		else
718 			return (pev->cntr_mask & 0xffff00) |
719 				(pev->event_id & 0xff);
720 	}
721 }
722 
mipspmu_map_general_event(int idx)723 static const struct mips_perf_event *mipspmu_map_general_event(int idx)
724 {
725 
726 	if ((*mipspmu.general_event_map)[idx].cntr_mask == 0)
727 		return ERR_PTR(-EOPNOTSUPP);
728 	return &(*mipspmu.general_event_map)[idx];
729 }
730 
mipspmu_map_cache_event(u64 config)731 static const struct mips_perf_event *mipspmu_map_cache_event(u64 config)
732 {
733 	unsigned int cache_type, cache_op, cache_result;
734 	const struct mips_perf_event *pev;
735 
736 	cache_type = (config >> 0) & 0xff;
737 	if (cache_type >= PERF_COUNT_HW_CACHE_MAX)
738 		return ERR_PTR(-EINVAL);
739 
740 	cache_op = (config >> 8) & 0xff;
741 	if (cache_op >= PERF_COUNT_HW_CACHE_OP_MAX)
742 		return ERR_PTR(-EINVAL);
743 
744 	cache_result = (config >> 16) & 0xff;
745 	if (cache_result >= PERF_COUNT_HW_CACHE_RESULT_MAX)
746 		return ERR_PTR(-EINVAL);
747 
748 	pev = &((*mipspmu.cache_event_map)
749 					[cache_type]
750 					[cache_op]
751 					[cache_result]);
752 
753 	if (pev->cntr_mask == 0)
754 		return ERR_PTR(-EOPNOTSUPP);
755 
756 	return pev;
757 
758 }
759 
validate_group(struct perf_event * event)760 static int validate_group(struct perf_event *event)
761 {
762 	struct perf_event *sibling, *leader = event->group_leader;
763 	struct cpu_hw_events fake_cpuc;
764 
765 	memset(&fake_cpuc, 0, sizeof(fake_cpuc));
766 
767 	if (mipsxx_pmu_alloc_counter(&fake_cpuc, &leader->hw) < 0)
768 		return -EINVAL;
769 
770 	for_each_sibling_event(sibling, leader) {
771 		if (mipsxx_pmu_alloc_counter(&fake_cpuc, &sibling->hw) < 0)
772 			return -EINVAL;
773 	}
774 
775 	if (mipsxx_pmu_alloc_counter(&fake_cpuc, &event->hw) < 0)
776 		return -EINVAL;
777 
778 	return 0;
779 }
780 
781 /* This is needed by specific irq handlers in perf_event_*.c */
handle_associated_event(struct cpu_hw_events * cpuc,int idx,struct perf_sample_data * data,struct pt_regs * regs)782 static void handle_associated_event(struct cpu_hw_events *cpuc,
783 				    int idx, struct perf_sample_data *data,
784 				    struct pt_regs *regs)
785 {
786 	struct perf_event *event = cpuc->events[idx];
787 	struct hw_perf_event *hwc = &event->hw;
788 
789 	mipspmu_event_update(event, hwc, idx);
790 	data->period = event->hw.last_period;
791 	if (!mipspmu_event_set_period(event, hwc, idx))
792 		return;
793 
794 	if (perf_event_overflow(event, data, regs))
795 		mipsxx_pmu_disable_event(idx);
796 }
797 
798 
__n_counters(void)799 static int __n_counters(void)
800 {
801 	if (!cpu_has_perf)
802 		return 0;
803 	if (!(read_c0_perfctrl0() & MIPS_PERFCTRL_M))
804 		return 1;
805 	if (!(read_c0_perfctrl1() & MIPS_PERFCTRL_M))
806 		return 2;
807 	if (!(read_c0_perfctrl2() & MIPS_PERFCTRL_M))
808 		return 3;
809 
810 	return 4;
811 }
812 
n_counters(void)813 static int n_counters(void)
814 {
815 	int counters;
816 
817 	switch (current_cpu_type()) {
818 	case CPU_R10000:
819 		counters = 2;
820 		break;
821 
822 	case CPU_R12000:
823 	case CPU_R14000:
824 	case CPU_R16000:
825 		counters = 4;
826 		break;
827 
828 	default:
829 		counters = __n_counters();
830 	}
831 
832 	return counters;
833 }
834 
loongson3_reset_counters(void * arg)835 static void loongson3_reset_counters(void *arg)
836 {
837 	int counters = (int)(long)arg;
838 
839 	switch (counters) {
840 	case 4:
841 		mipsxx_pmu_write_control(3, 0);
842 		mipspmu.write_counter(3, 0);
843 		mipsxx_pmu_write_control(3, 127<<5);
844 		mipspmu.write_counter(3, 0);
845 		mipsxx_pmu_write_control(3, 191<<5);
846 		mipspmu.write_counter(3, 0);
847 		mipsxx_pmu_write_control(3, 255<<5);
848 		mipspmu.write_counter(3, 0);
849 		mipsxx_pmu_write_control(3, 319<<5);
850 		mipspmu.write_counter(3, 0);
851 		mipsxx_pmu_write_control(3, 383<<5);
852 		mipspmu.write_counter(3, 0);
853 		mipsxx_pmu_write_control(3, 575<<5);
854 		mipspmu.write_counter(3, 0);
855 		fallthrough;
856 	case 3:
857 		mipsxx_pmu_write_control(2, 0);
858 		mipspmu.write_counter(2, 0);
859 		mipsxx_pmu_write_control(2, 127<<5);
860 		mipspmu.write_counter(2, 0);
861 		mipsxx_pmu_write_control(2, 191<<5);
862 		mipspmu.write_counter(2, 0);
863 		mipsxx_pmu_write_control(2, 255<<5);
864 		mipspmu.write_counter(2, 0);
865 		mipsxx_pmu_write_control(2, 319<<5);
866 		mipspmu.write_counter(2, 0);
867 		mipsxx_pmu_write_control(2, 383<<5);
868 		mipspmu.write_counter(2, 0);
869 		mipsxx_pmu_write_control(2, 575<<5);
870 		mipspmu.write_counter(2, 0);
871 		fallthrough;
872 	case 2:
873 		mipsxx_pmu_write_control(1, 0);
874 		mipspmu.write_counter(1, 0);
875 		mipsxx_pmu_write_control(1, 127<<5);
876 		mipspmu.write_counter(1, 0);
877 		mipsxx_pmu_write_control(1, 191<<5);
878 		mipspmu.write_counter(1, 0);
879 		mipsxx_pmu_write_control(1, 255<<5);
880 		mipspmu.write_counter(1, 0);
881 		mipsxx_pmu_write_control(1, 319<<5);
882 		mipspmu.write_counter(1, 0);
883 		mipsxx_pmu_write_control(1, 383<<5);
884 		mipspmu.write_counter(1, 0);
885 		mipsxx_pmu_write_control(1, 575<<5);
886 		mipspmu.write_counter(1, 0);
887 		fallthrough;
888 	case 1:
889 		mipsxx_pmu_write_control(0, 0);
890 		mipspmu.write_counter(0, 0);
891 		mipsxx_pmu_write_control(0, 127<<5);
892 		mipspmu.write_counter(0, 0);
893 		mipsxx_pmu_write_control(0, 191<<5);
894 		mipspmu.write_counter(0, 0);
895 		mipsxx_pmu_write_control(0, 255<<5);
896 		mipspmu.write_counter(0, 0);
897 		mipsxx_pmu_write_control(0, 319<<5);
898 		mipspmu.write_counter(0, 0);
899 		mipsxx_pmu_write_control(0, 383<<5);
900 		mipspmu.write_counter(0, 0);
901 		mipsxx_pmu_write_control(0, 575<<5);
902 		mipspmu.write_counter(0, 0);
903 		break;
904 	}
905 }
906 
reset_counters(void * arg)907 static void reset_counters(void *arg)
908 {
909 	int counters = (int)(long)arg;
910 
911 	if (get_loongson3_pmu_type() == LOONGSON_PMU_TYPE2) {
912 		loongson3_reset_counters(arg);
913 		return;
914 	}
915 
916 	switch (counters) {
917 	case 4:
918 		mipsxx_pmu_write_control(3, 0);
919 		mipspmu.write_counter(3, 0);
920 		fallthrough;
921 	case 3:
922 		mipsxx_pmu_write_control(2, 0);
923 		mipspmu.write_counter(2, 0);
924 		fallthrough;
925 	case 2:
926 		mipsxx_pmu_write_control(1, 0);
927 		mipspmu.write_counter(1, 0);
928 		fallthrough;
929 	case 1:
930 		mipsxx_pmu_write_control(0, 0);
931 		mipspmu.write_counter(0, 0);
932 		break;
933 	}
934 }
935 
936 /* 24K/34K/1004K/interAptiv/loongson1 cores share the same event map. */
937 static const struct mips_perf_event mipsxxcore_event_map
938 				[PERF_COUNT_HW_MAX] = {
939 	[PERF_COUNT_HW_CPU_CYCLES] = { 0x00, CNTR_EVEN | CNTR_ODD, P },
940 	[PERF_COUNT_HW_INSTRUCTIONS] = { 0x01, CNTR_EVEN | CNTR_ODD, T },
941 	[PERF_COUNT_HW_BRANCH_INSTRUCTIONS] = { 0x02, CNTR_EVEN, T },
942 	[PERF_COUNT_HW_BRANCH_MISSES] = { 0x02, CNTR_ODD, T },
943 };
944 
945 /* 74K/proAptiv core has different branch event code. */
946 static const struct mips_perf_event mipsxxcore_event_map2
947 				[PERF_COUNT_HW_MAX] = {
948 	[PERF_COUNT_HW_CPU_CYCLES] = { 0x00, CNTR_EVEN | CNTR_ODD, P },
949 	[PERF_COUNT_HW_INSTRUCTIONS] = { 0x01, CNTR_EVEN | CNTR_ODD, T },
950 	[PERF_COUNT_HW_BRANCH_INSTRUCTIONS] = { 0x27, CNTR_EVEN, T },
951 	[PERF_COUNT_HW_BRANCH_MISSES] = { 0x27, CNTR_ODD, T },
952 };
953 
954 static const struct mips_perf_event i6x00_event_map[PERF_COUNT_HW_MAX] = {
955 	[PERF_COUNT_HW_CPU_CYCLES]          = { 0x00, CNTR_EVEN | CNTR_ODD },
956 	[PERF_COUNT_HW_INSTRUCTIONS]        = { 0x01, CNTR_EVEN | CNTR_ODD },
957 	/* These only count dcache, not icache */
958 	[PERF_COUNT_HW_CACHE_REFERENCES]    = { 0x45, CNTR_EVEN | CNTR_ODD },
959 	[PERF_COUNT_HW_CACHE_MISSES]        = { 0x48, CNTR_EVEN | CNTR_ODD },
960 	[PERF_COUNT_HW_BRANCH_INSTRUCTIONS] = { 0x15, CNTR_EVEN | CNTR_ODD },
961 	[PERF_COUNT_HW_BRANCH_MISSES]       = { 0x16, CNTR_EVEN | CNTR_ODD },
962 };
963 
964 static const struct mips_perf_event loongson3_event_map1[PERF_COUNT_HW_MAX] = {
965 	[PERF_COUNT_HW_CPU_CYCLES] = { 0x00, CNTR_EVEN },
966 	[PERF_COUNT_HW_INSTRUCTIONS] = { 0x00, CNTR_ODD },
967 	[PERF_COUNT_HW_BRANCH_INSTRUCTIONS] = { 0x01, CNTR_EVEN },
968 	[PERF_COUNT_HW_BRANCH_MISSES] = { 0x01, CNTR_ODD },
969 };
970 
971 static const struct mips_perf_event loongson3_event_map2[PERF_COUNT_HW_MAX] = {
972 	[PERF_COUNT_HW_CPU_CYCLES] = { 0x80, CNTR_ALL },
973 	[PERF_COUNT_HW_INSTRUCTIONS] = { 0x81, CNTR_ALL },
974 	[PERF_COUNT_HW_CACHE_MISSES] = { 0x18, CNTR_ALL },
975 	[PERF_COUNT_HW_BRANCH_INSTRUCTIONS] = { 0x94, CNTR_ALL },
976 	[PERF_COUNT_HW_BRANCH_MISSES] = { 0x9c, CNTR_ALL },
977 };
978 
979 static const struct mips_perf_event loongson3_event_map3[PERF_COUNT_HW_MAX] = {
980 	[PERF_COUNT_HW_CPU_CYCLES] = { 0x00, CNTR_ALL },
981 	[PERF_COUNT_HW_INSTRUCTIONS] = { 0x01, CNTR_ALL },
982 	[PERF_COUNT_HW_CACHE_REFERENCES] = { 0x1c, CNTR_ALL },
983 	[PERF_COUNT_HW_CACHE_MISSES] = { 0x1d, CNTR_ALL },
984 	[PERF_COUNT_HW_BRANCH_INSTRUCTIONS] = { 0x02, CNTR_ALL },
985 	[PERF_COUNT_HW_BRANCH_MISSES] = { 0x08, CNTR_ALL },
986 };
987 
988 static const struct mips_perf_event octeon_event_map[PERF_COUNT_HW_MAX] = {
989 	[PERF_COUNT_HW_CPU_CYCLES] = { 0x01, CNTR_ALL },
990 	[PERF_COUNT_HW_INSTRUCTIONS] = { 0x03, CNTR_ALL },
991 	[PERF_COUNT_HW_CACHE_REFERENCES] = { 0x2b, CNTR_ALL },
992 	[PERF_COUNT_HW_CACHE_MISSES] = { 0x2e, CNTR_ALL	 },
993 	[PERF_COUNT_HW_BRANCH_INSTRUCTIONS] = { 0x08, CNTR_ALL },
994 	[PERF_COUNT_HW_BRANCH_MISSES] = { 0x09, CNTR_ALL },
995 	[PERF_COUNT_HW_BUS_CYCLES] = { 0x25, CNTR_ALL },
996 };
997 
998 static const struct mips_perf_event bmips5000_event_map
999 				[PERF_COUNT_HW_MAX] = {
1000 	[PERF_COUNT_HW_CPU_CYCLES] = { 0x00, CNTR_EVEN | CNTR_ODD, T },
1001 	[PERF_COUNT_HW_INSTRUCTIONS] = { 0x01, CNTR_EVEN | CNTR_ODD, T },
1002 	[PERF_COUNT_HW_BRANCH_MISSES] = { 0x02, CNTR_ODD, T },
1003 };
1004 
1005 static const struct mips_perf_event xlp_event_map[PERF_COUNT_HW_MAX] = {
1006 	[PERF_COUNT_HW_CPU_CYCLES] = { 0x01, CNTR_ALL },
1007 	[PERF_COUNT_HW_INSTRUCTIONS] = { 0x18, CNTR_ALL }, /* PAPI_TOT_INS */
1008 	[PERF_COUNT_HW_CACHE_REFERENCES] = { 0x04, CNTR_ALL }, /* PAPI_L1_ICA */
1009 	[PERF_COUNT_HW_CACHE_MISSES] = { 0x07, CNTR_ALL }, /* PAPI_L1_ICM */
1010 	[PERF_COUNT_HW_BRANCH_INSTRUCTIONS] = { 0x1b, CNTR_ALL }, /* PAPI_BR_CN */
1011 	[PERF_COUNT_HW_BRANCH_MISSES] = { 0x1c, CNTR_ALL }, /* PAPI_BR_MSP */
1012 };
1013 
1014 /* 24K/34K/1004K/interAptiv/loongson1 cores share the same cache event map. */
1015 static const struct mips_perf_event mipsxxcore_cache_map
1016 				[PERF_COUNT_HW_CACHE_MAX]
1017 				[PERF_COUNT_HW_CACHE_OP_MAX]
1018 				[PERF_COUNT_HW_CACHE_RESULT_MAX] = {
1019 [C(L1D)] = {
1020 	/*
1021 	 * Like some other architectures (e.g. ARM), the performance
1022 	 * counters don't differentiate between read and write
1023 	 * accesses/misses, so this isn't strictly correct, but it's the
1024 	 * best we can do. Writes and reads get combined.
1025 	 */
1026 	[C(OP_READ)] = {
1027 		[C(RESULT_ACCESS)]	= { 0x0a, CNTR_EVEN, T },
1028 		[C(RESULT_MISS)]	= { 0x0b, CNTR_EVEN | CNTR_ODD, T },
1029 	},
1030 	[C(OP_WRITE)] = {
1031 		[C(RESULT_ACCESS)]	= { 0x0a, CNTR_EVEN, T },
1032 		[C(RESULT_MISS)]	= { 0x0b, CNTR_EVEN | CNTR_ODD, T },
1033 	},
1034 },
1035 [C(L1I)] = {
1036 	[C(OP_READ)] = {
1037 		[C(RESULT_ACCESS)]	= { 0x09, CNTR_EVEN, T },
1038 		[C(RESULT_MISS)]	= { 0x09, CNTR_ODD, T },
1039 	},
1040 	[C(OP_WRITE)] = {
1041 		[C(RESULT_ACCESS)]	= { 0x09, CNTR_EVEN, T },
1042 		[C(RESULT_MISS)]	= { 0x09, CNTR_ODD, T },
1043 	},
1044 	[C(OP_PREFETCH)] = {
1045 		[C(RESULT_ACCESS)]	= { 0x14, CNTR_EVEN, T },
1046 		/*
1047 		 * Note that MIPS has only "hit" events countable for
1048 		 * the prefetch operation.
1049 		 */
1050 	},
1051 },
1052 [C(LL)] = {
1053 	[C(OP_READ)] = {
1054 		[C(RESULT_ACCESS)]	= { 0x15, CNTR_ODD, P },
1055 		[C(RESULT_MISS)]	= { 0x16, CNTR_EVEN, P },
1056 	},
1057 	[C(OP_WRITE)] = {
1058 		[C(RESULT_ACCESS)]	= { 0x15, CNTR_ODD, P },
1059 		[C(RESULT_MISS)]	= { 0x16, CNTR_EVEN, P },
1060 	},
1061 },
1062 [C(DTLB)] = {
1063 	[C(OP_READ)] = {
1064 		[C(RESULT_ACCESS)]	= { 0x06, CNTR_EVEN, T },
1065 		[C(RESULT_MISS)]	= { 0x06, CNTR_ODD, T },
1066 	},
1067 	[C(OP_WRITE)] = {
1068 		[C(RESULT_ACCESS)]	= { 0x06, CNTR_EVEN, T },
1069 		[C(RESULT_MISS)]	= { 0x06, CNTR_ODD, T },
1070 	},
1071 },
1072 [C(ITLB)] = {
1073 	[C(OP_READ)] = {
1074 		[C(RESULT_ACCESS)]	= { 0x05, CNTR_EVEN, T },
1075 		[C(RESULT_MISS)]	= { 0x05, CNTR_ODD, T },
1076 	},
1077 	[C(OP_WRITE)] = {
1078 		[C(RESULT_ACCESS)]	= { 0x05, CNTR_EVEN, T },
1079 		[C(RESULT_MISS)]	= { 0x05, CNTR_ODD, T },
1080 	},
1081 },
1082 [C(BPU)] = {
1083 	/* Using the same code for *HW_BRANCH* */
1084 	[C(OP_READ)] = {
1085 		[C(RESULT_ACCESS)]	= { 0x02, CNTR_EVEN, T },
1086 		[C(RESULT_MISS)]	= { 0x02, CNTR_ODD, T },
1087 	},
1088 	[C(OP_WRITE)] = {
1089 		[C(RESULT_ACCESS)]	= { 0x02, CNTR_EVEN, T },
1090 		[C(RESULT_MISS)]	= { 0x02, CNTR_ODD, T },
1091 	},
1092 },
1093 };
1094 
1095 /* 74K/proAptiv core has completely different cache event map. */
1096 static const struct mips_perf_event mipsxxcore_cache_map2
1097 				[PERF_COUNT_HW_CACHE_MAX]
1098 				[PERF_COUNT_HW_CACHE_OP_MAX]
1099 				[PERF_COUNT_HW_CACHE_RESULT_MAX] = {
1100 [C(L1D)] = {
1101 	/*
1102 	 * Like some other architectures (e.g. ARM), the performance
1103 	 * counters don't differentiate between read and write
1104 	 * accesses/misses, so this isn't strictly correct, but it's the
1105 	 * best we can do. Writes and reads get combined.
1106 	 */
1107 	[C(OP_READ)] = {
1108 		[C(RESULT_ACCESS)]	= { 0x17, CNTR_ODD, T },
1109 		[C(RESULT_MISS)]	= { 0x18, CNTR_ODD, T },
1110 	},
1111 	[C(OP_WRITE)] = {
1112 		[C(RESULT_ACCESS)]	= { 0x17, CNTR_ODD, T },
1113 		[C(RESULT_MISS)]	= { 0x18, CNTR_ODD, T },
1114 	},
1115 },
1116 [C(L1I)] = {
1117 	[C(OP_READ)] = {
1118 		[C(RESULT_ACCESS)]	= { 0x06, CNTR_EVEN, T },
1119 		[C(RESULT_MISS)]	= { 0x06, CNTR_ODD, T },
1120 	},
1121 	[C(OP_WRITE)] = {
1122 		[C(RESULT_ACCESS)]	= { 0x06, CNTR_EVEN, T },
1123 		[C(RESULT_MISS)]	= { 0x06, CNTR_ODD, T },
1124 	},
1125 	[C(OP_PREFETCH)] = {
1126 		[C(RESULT_ACCESS)]	= { 0x34, CNTR_EVEN, T },
1127 		/*
1128 		 * Note that MIPS has only "hit" events countable for
1129 		 * the prefetch operation.
1130 		 */
1131 	},
1132 },
1133 [C(LL)] = {
1134 	[C(OP_READ)] = {
1135 		[C(RESULT_ACCESS)]	= { 0x1c, CNTR_ODD, P },
1136 		[C(RESULT_MISS)]	= { 0x1d, CNTR_EVEN, P },
1137 	},
1138 	[C(OP_WRITE)] = {
1139 		[C(RESULT_ACCESS)]	= { 0x1c, CNTR_ODD, P },
1140 		[C(RESULT_MISS)]	= { 0x1d, CNTR_EVEN, P },
1141 	},
1142 },
1143 /*
1144  * 74K core does not have specific DTLB events. proAptiv core has
1145  * "speculative" DTLB events which are numbered 0x63 (even/odd) and
1146  * not included here. One can use raw events if really needed.
1147  */
1148 [C(ITLB)] = {
1149 	[C(OP_READ)] = {
1150 		[C(RESULT_ACCESS)]	= { 0x04, CNTR_EVEN, T },
1151 		[C(RESULT_MISS)]	= { 0x04, CNTR_ODD, T },
1152 	},
1153 	[C(OP_WRITE)] = {
1154 		[C(RESULT_ACCESS)]	= { 0x04, CNTR_EVEN, T },
1155 		[C(RESULT_MISS)]	= { 0x04, CNTR_ODD, T },
1156 	},
1157 },
1158 [C(BPU)] = {
1159 	/* Using the same code for *HW_BRANCH* */
1160 	[C(OP_READ)] = {
1161 		[C(RESULT_ACCESS)]	= { 0x27, CNTR_EVEN, T },
1162 		[C(RESULT_MISS)]	= { 0x27, CNTR_ODD, T },
1163 	},
1164 	[C(OP_WRITE)] = {
1165 		[C(RESULT_ACCESS)]	= { 0x27, CNTR_EVEN, T },
1166 		[C(RESULT_MISS)]	= { 0x27, CNTR_ODD, T },
1167 	},
1168 },
1169 };
1170 
1171 static const struct mips_perf_event i6x00_cache_map
1172 				[PERF_COUNT_HW_CACHE_MAX]
1173 				[PERF_COUNT_HW_CACHE_OP_MAX]
1174 				[PERF_COUNT_HW_CACHE_RESULT_MAX] = {
1175 [C(L1D)] = {
1176 	[C(OP_READ)] = {
1177 		[C(RESULT_ACCESS)]	= { 0x46, CNTR_EVEN | CNTR_ODD },
1178 		[C(RESULT_MISS)]	= { 0x49, CNTR_EVEN | CNTR_ODD },
1179 	},
1180 	[C(OP_WRITE)] = {
1181 		[C(RESULT_ACCESS)]	= { 0x47, CNTR_EVEN | CNTR_ODD },
1182 		[C(RESULT_MISS)]	= { 0x4a, CNTR_EVEN | CNTR_ODD },
1183 	},
1184 },
1185 [C(L1I)] = {
1186 	[C(OP_READ)] = {
1187 		[C(RESULT_ACCESS)]	= { 0x84, CNTR_EVEN | CNTR_ODD },
1188 		[C(RESULT_MISS)]	= { 0x85, CNTR_EVEN | CNTR_ODD },
1189 	},
1190 },
1191 [C(DTLB)] = {
1192 	/* Can't distinguish read & write */
1193 	[C(OP_READ)] = {
1194 		[C(RESULT_ACCESS)]	= { 0x40, CNTR_EVEN | CNTR_ODD },
1195 		[C(RESULT_MISS)]	= { 0x41, CNTR_EVEN | CNTR_ODD },
1196 	},
1197 	[C(OP_WRITE)] = {
1198 		[C(RESULT_ACCESS)]	= { 0x40, CNTR_EVEN | CNTR_ODD },
1199 		[C(RESULT_MISS)]	= { 0x41, CNTR_EVEN | CNTR_ODD },
1200 	},
1201 },
1202 [C(BPU)] = {
1203 	/* Conditional branches / mispredicted */
1204 	[C(OP_READ)] = {
1205 		[C(RESULT_ACCESS)]	= { 0x15, CNTR_EVEN | CNTR_ODD },
1206 		[C(RESULT_MISS)]	= { 0x16, CNTR_EVEN | CNTR_ODD },
1207 	},
1208 },
1209 };
1210 
1211 static const struct mips_perf_event loongson3_cache_map1
1212 				[PERF_COUNT_HW_CACHE_MAX]
1213 				[PERF_COUNT_HW_CACHE_OP_MAX]
1214 				[PERF_COUNT_HW_CACHE_RESULT_MAX] = {
1215 [C(L1D)] = {
1216 	/*
1217 	 * Like some other architectures (e.g. ARM), the performance
1218 	 * counters don't differentiate between read and write
1219 	 * accesses/misses, so this isn't strictly correct, but it's the
1220 	 * best we can do. Writes and reads get combined.
1221 	 */
1222 	[C(OP_READ)] = {
1223 		[C(RESULT_MISS)]        = { 0x04, CNTR_ODD },
1224 	},
1225 	[C(OP_WRITE)] = {
1226 		[C(RESULT_MISS)]        = { 0x04, CNTR_ODD },
1227 	},
1228 },
1229 [C(L1I)] = {
1230 	[C(OP_READ)] = {
1231 		[C(RESULT_MISS)]        = { 0x04, CNTR_EVEN },
1232 	},
1233 	[C(OP_WRITE)] = {
1234 		[C(RESULT_MISS)]        = { 0x04, CNTR_EVEN },
1235 	},
1236 },
1237 [C(DTLB)] = {
1238 	[C(OP_READ)] = {
1239 		[C(RESULT_MISS)]        = { 0x09, CNTR_ODD },
1240 	},
1241 	[C(OP_WRITE)] = {
1242 		[C(RESULT_MISS)]        = { 0x09, CNTR_ODD },
1243 	},
1244 },
1245 [C(ITLB)] = {
1246 	[C(OP_READ)] = {
1247 		[C(RESULT_MISS)]        = { 0x0c, CNTR_ODD },
1248 	},
1249 	[C(OP_WRITE)] = {
1250 		[C(RESULT_MISS)]        = { 0x0c, CNTR_ODD },
1251 	},
1252 },
1253 [C(BPU)] = {
1254 	/* Using the same code for *HW_BRANCH* */
1255 	[C(OP_READ)] = {
1256 		[C(RESULT_ACCESS)]      = { 0x01, CNTR_EVEN },
1257 		[C(RESULT_MISS)]        = { 0x01, CNTR_ODD },
1258 	},
1259 	[C(OP_WRITE)] = {
1260 		[C(RESULT_ACCESS)]      = { 0x01, CNTR_EVEN },
1261 		[C(RESULT_MISS)]        = { 0x01, CNTR_ODD },
1262 	},
1263 },
1264 };
1265 
1266 static const struct mips_perf_event loongson3_cache_map2
1267 				[PERF_COUNT_HW_CACHE_MAX]
1268 				[PERF_COUNT_HW_CACHE_OP_MAX]
1269 				[PERF_COUNT_HW_CACHE_RESULT_MAX] = {
1270 [C(L1D)] = {
1271 	/*
1272 	 * Like some other architectures (e.g. ARM), the performance
1273 	 * counters don't differentiate between read and write
1274 	 * accesses/misses, so this isn't strictly correct, but it's the
1275 	 * best we can do. Writes and reads get combined.
1276 	 */
1277 	[C(OP_READ)] = {
1278 		[C(RESULT_ACCESS)]	= { 0x156, CNTR_ALL },
1279 	},
1280 	[C(OP_WRITE)] = {
1281 		[C(RESULT_ACCESS)]	= { 0x155, CNTR_ALL },
1282 		[C(RESULT_MISS)]        = { 0x153, CNTR_ALL },
1283 	},
1284 },
1285 [C(L1I)] = {
1286 	[C(OP_READ)] = {
1287 		[C(RESULT_MISS)]	= { 0x18, CNTR_ALL },
1288 	},
1289 	[C(OP_WRITE)] = {
1290 		[C(RESULT_MISS)]        = { 0x18, CNTR_ALL },
1291 	},
1292 },
1293 [C(LL)] = {
1294 	[C(OP_READ)] = {
1295 		[C(RESULT_ACCESS)]	= { 0x1b6, CNTR_ALL },
1296 	},
1297 	[C(OP_WRITE)] = {
1298 		[C(RESULT_ACCESS)]	= { 0x1b7, CNTR_ALL },
1299 	},
1300 	[C(OP_PREFETCH)] = {
1301 		[C(RESULT_ACCESS)]	= { 0x1bf, CNTR_ALL },
1302 	},
1303 },
1304 [C(DTLB)] = {
1305 	[C(OP_READ)] = {
1306 		[C(RESULT_MISS)]        = { 0x92, CNTR_ALL },
1307 	},
1308 	[C(OP_WRITE)] = {
1309 		[C(RESULT_MISS)]        = { 0x92, CNTR_ALL },
1310 	},
1311 },
1312 [C(ITLB)] = {
1313 	[C(OP_READ)] = {
1314 		[C(RESULT_MISS)]	= { 0x1a, CNTR_ALL },
1315 	},
1316 	[C(OP_WRITE)] = {
1317 		[C(RESULT_MISS)]	= { 0x1a, CNTR_ALL },
1318 	},
1319 },
1320 [C(BPU)] = {
1321 	/* Using the same code for *HW_BRANCH* */
1322 	[C(OP_READ)] = {
1323 		[C(RESULT_ACCESS)]      = { 0x94, CNTR_ALL },
1324 		[C(RESULT_MISS)]        = { 0x9c, CNTR_ALL },
1325 	},
1326 },
1327 };
1328 
1329 static const struct mips_perf_event loongson3_cache_map3
1330 				[PERF_COUNT_HW_CACHE_MAX]
1331 				[PERF_COUNT_HW_CACHE_OP_MAX]
1332 				[PERF_COUNT_HW_CACHE_RESULT_MAX] = {
1333 [C(L1D)] = {
1334 	/*
1335 	 * Like some other architectures (e.g. ARM), the performance
1336 	 * counters don't differentiate between read and write
1337 	 * accesses/misses, so this isn't strictly correct, but it's the
1338 	 * best we can do. Writes and reads get combined.
1339 	 */
1340 	[C(OP_READ)] = {
1341 		[C(RESULT_ACCESS)]      = { 0x1e, CNTR_ALL },
1342 		[C(RESULT_MISS)]        = { 0x1f, CNTR_ALL },
1343 	},
1344 	[C(OP_PREFETCH)] = {
1345 		[C(RESULT_ACCESS)]	= { 0xaa, CNTR_ALL },
1346 		[C(RESULT_MISS)]	= { 0xa9, CNTR_ALL },
1347 	},
1348 },
1349 [C(L1I)] = {
1350 	[C(OP_READ)] = {
1351 		[C(RESULT_ACCESS)]	= { 0x1c, CNTR_ALL },
1352 		[C(RESULT_MISS)]	= { 0x1d, CNTR_ALL },
1353 	},
1354 },
1355 [C(LL)] = {
1356 	[C(OP_READ)] = {
1357 		[C(RESULT_ACCESS)]	= { 0x2e, CNTR_ALL },
1358 		[C(RESULT_MISS)]	= { 0x2f, CNTR_ALL },
1359 	},
1360 },
1361 [C(DTLB)] = {
1362 	[C(OP_READ)] = {
1363 		[C(RESULT_ACCESS)]      = { 0x14, CNTR_ALL },
1364 		[C(RESULT_MISS)]	= { 0x1b, CNTR_ALL },
1365 	},
1366 },
1367 [C(ITLB)] = {
1368 	[C(OP_READ)] = {
1369 		[C(RESULT_MISS)]	= { 0x1a, CNTR_ALL },
1370 	},
1371 },
1372 [C(BPU)] = {
1373 	/* Using the same code for *HW_BRANCH* */
1374 	[C(OP_READ)] = {
1375 		[C(RESULT_ACCESS)]      = { 0x02, CNTR_ALL },
1376 		[C(RESULT_MISS)]        = { 0x08, CNTR_ALL },
1377 	},
1378 },
1379 };
1380 
1381 /* BMIPS5000 */
1382 static const struct mips_perf_event bmips5000_cache_map
1383 				[PERF_COUNT_HW_CACHE_MAX]
1384 				[PERF_COUNT_HW_CACHE_OP_MAX]
1385 				[PERF_COUNT_HW_CACHE_RESULT_MAX] = {
1386 [C(L1D)] = {
1387 	/*
1388 	 * Like some other architectures (e.g. ARM), the performance
1389 	 * counters don't differentiate between read and write
1390 	 * accesses/misses, so this isn't strictly correct, but it's the
1391 	 * best we can do. Writes and reads get combined.
1392 	 */
1393 	[C(OP_READ)] = {
1394 		[C(RESULT_ACCESS)]	= { 12, CNTR_EVEN, T },
1395 		[C(RESULT_MISS)]	= { 12, CNTR_ODD, T },
1396 	},
1397 	[C(OP_WRITE)] = {
1398 		[C(RESULT_ACCESS)]	= { 12, CNTR_EVEN, T },
1399 		[C(RESULT_MISS)]	= { 12, CNTR_ODD, T },
1400 	},
1401 },
1402 [C(L1I)] = {
1403 	[C(OP_READ)] = {
1404 		[C(RESULT_ACCESS)]	= { 10, CNTR_EVEN, T },
1405 		[C(RESULT_MISS)]	= { 10, CNTR_ODD, T },
1406 	},
1407 	[C(OP_WRITE)] = {
1408 		[C(RESULT_ACCESS)]	= { 10, CNTR_EVEN, T },
1409 		[C(RESULT_MISS)]	= { 10, CNTR_ODD, T },
1410 	},
1411 	[C(OP_PREFETCH)] = {
1412 		[C(RESULT_ACCESS)]	= { 23, CNTR_EVEN, T },
1413 		/*
1414 		 * Note that MIPS has only "hit" events countable for
1415 		 * the prefetch operation.
1416 		 */
1417 	},
1418 },
1419 [C(LL)] = {
1420 	[C(OP_READ)] = {
1421 		[C(RESULT_ACCESS)]	= { 28, CNTR_EVEN, P },
1422 		[C(RESULT_MISS)]	= { 28, CNTR_ODD, P },
1423 	},
1424 	[C(OP_WRITE)] = {
1425 		[C(RESULT_ACCESS)]	= { 28, CNTR_EVEN, P },
1426 		[C(RESULT_MISS)]	= { 28, CNTR_ODD, P },
1427 	},
1428 },
1429 [C(BPU)] = {
1430 	/* Using the same code for *HW_BRANCH* */
1431 	[C(OP_READ)] = {
1432 		[C(RESULT_MISS)]	= { 0x02, CNTR_ODD, T },
1433 	},
1434 	[C(OP_WRITE)] = {
1435 		[C(RESULT_MISS)]	= { 0x02, CNTR_ODD, T },
1436 	},
1437 },
1438 };
1439 
1440 static const struct mips_perf_event octeon_cache_map
1441 				[PERF_COUNT_HW_CACHE_MAX]
1442 				[PERF_COUNT_HW_CACHE_OP_MAX]
1443 				[PERF_COUNT_HW_CACHE_RESULT_MAX] = {
1444 [C(L1D)] = {
1445 	[C(OP_READ)] = {
1446 		[C(RESULT_ACCESS)]	= { 0x2b, CNTR_ALL },
1447 		[C(RESULT_MISS)]	= { 0x2e, CNTR_ALL },
1448 	},
1449 	[C(OP_WRITE)] = {
1450 		[C(RESULT_ACCESS)]	= { 0x30, CNTR_ALL },
1451 	},
1452 },
1453 [C(L1I)] = {
1454 	[C(OP_READ)] = {
1455 		[C(RESULT_ACCESS)]	= { 0x18, CNTR_ALL },
1456 	},
1457 	[C(OP_PREFETCH)] = {
1458 		[C(RESULT_ACCESS)]	= { 0x19, CNTR_ALL },
1459 	},
1460 },
1461 [C(DTLB)] = {
1462 	/*
1463 	 * Only general DTLB misses are counted use the same event for
1464 	 * read and write.
1465 	 */
1466 	[C(OP_READ)] = {
1467 		[C(RESULT_MISS)]	= { 0x35, CNTR_ALL },
1468 	},
1469 	[C(OP_WRITE)] = {
1470 		[C(RESULT_MISS)]	= { 0x35, CNTR_ALL },
1471 	},
1472 },
1473 [C(ITLB)] = {
1474 	[C(OP_READ)] = {
1475 		[C(RESULT_MISS)]	= { 0x37, CNTR_ALL },
1476 	},
1477 },
1478 };
1479 
1480 static const struct mips_perf_event xlp_cache_map
1481 				[PERF_COUNT_HW_CACHE_MAX]
1482 				[PERF_COUNT_HW_CACHE_OP_MAX]
1483 				[PERF_COUNT_HW_CACHE_RESULT_MAX] = {
1484 [C(L1D)] = {
1485 	[C(OP_READ)] = {
1486 		[C(RESULT_ACCESS)]	= { 0x31, CNTR_ALL }, /* PAPI_L1_DCR */
1487 		[C(RESULT_MISS)]	= { 0x30, CNTR_ALL }, /* PAPI_L1_LDM */
1488 	},
1489 	[C(OP_WRITE)] = {
1490 		[C(RESULT_ACCESS)]	= { 0x2f, CNTR_ALL }, /* PAPI_L1_DCW */
1491 		[C(RESULT_MISS)]	= { 0x2e, CNTR_ALL }, /* PAPI_L1_STM */
1492 	},
1493 },
1494 [C(L1I)] = {
1495 	[C(OP_READ)] = {
1496 		[C(RESULT_ACCESS)]	= { 0x04, CNTR_ALL }, /* PAPI_L1_ICA */
1497 		[C(RESULT_MISS)]	= { 0x07, CNTR_ALL }, /* PAPI_L1_ICM */
1498 	},
1499 },
1500 [C(LL)] = {
1501 	[C(OP_READ)] = {
1502 		[C(RESULT_ACCESS)]	= { 0x35, CNTR_ALL }, /* PAPI_L2_DCR */
1503 		[C(RESULT_MISS)]	= { 0x37, CNTR_ALL }, /* PAPI_L2_LDM */
1504 	},
1505 	[C(OP_WRITE)] = {
1506 		[C(RESULT_ACCESS)]	= { 0x34, CNTR_ALL }, /* PAPI_L2_DCA */
1507 		[C(RESULT_MISS)]	= { 0x36, CNTR_ALL }, /* PAPI_L2_DCM */
1508 	},
1509 },
1510 [C(DTLB)] = {
1511 	/*
1512 	 * Only general DTLB misses are counted use the same event for
1513 	 * read and write.
1514 	 */
1515 	[C(OP_READ)] = {
1516 		[C(RESULT_MISS)]	= { 0x2d, CNTR_ALL }, /* PAPI_TLB_DM */
1517 	},
1518 	[C(OP_WRITE)] = {
1519 		[C(RESULT_MISS)]	= { 0x2d, CNTR_ALL }, /* PAPI_TLB_DM */
1520 	},
1521 },
1522 [C(ITLB)] = {
1523 	[C(OP_READ)] = {
1524 		[C(RESULT_MISS)]	= { 0x08, CNTR_ALL }, /* PAPI_TLB_IM */
1525 	},
1526 	[C(OP_WRITE)] = {
1527 		[C(RESULT_MISS)]	= { 0x08, CNTR_ALL }, /* PAPI_TLB_IM */
1528 	},
1529 },
1530 [C(BPU)] = {
1531 	[C(OP_READ)] = {
1532 		[C(RESULT_MISS)]	= { 0x25, CNTR_ALL },
1533 	},
1534 },
1535 };
1536 
__hw_perf_event_init(struct perf_event * event)1537 static int __hw_perf_event_init(struct perf_event *event)
1538 {
1539 	struct perf_event_attr *attr = &event->attr;
1540 	struct hw_perf_event *hwc = &event->hw;
1541 	const struct mips_perf_event *pev;
1542 	int err;
1543 
1544 	/* Returning MIPS event descriptor for generic perf event. */
1545 	if (PERF_TYPE_HARDWARE == event->attr.type) {
1546 		if (event->attr.config >= PERF_COUNT_HW_MAX)
1547 			return -EINVAL;
1548 		pev = mipspmu_map_general_event(event->attr.config);
1549 	} else if (PERF_TYPE_HW_CACHE == event->attr.type) {
1550 		pev = mipspmu_map_cache_event(event->attr.config);
1551 	} else if (PERF_TYPE_RAW == event->attr.type) {
1552 		/* We are working on the global raw event. */
1553 		mutex_lock(&raw_event_mutex);
1554 		pev = mipspmu.map_raw_event(event->attr.config);
1555 	} else {
1556 		/* The event type is not (yet) supported. */
1557 		return -EOPNOTSUPP;
1558 	}
1559 
1560 	if (IS_ERR(pev)) {
1561 		if (PERF_TYPE_RAW == event->attr.type)
1562 			mutex_unlock(&raw_event_mutex);
1563 		return PTR_ERR(pev);
1564 	}
1565 
1566 	/*
1567 	 * We allow max flexibility on how each individual counter shared
1568 	 * by the single CPU operates (the mode exclusion and the range).
1569 	 */
1570 	hwc->config_base = MIPS_PERFCTRL_IE;
1571 
1572 	hwc->event_base = mipspmu_perf_event_encode(pev);
1573 	if (PERF_TYPE_RAW == event->attr.type)
1574 		mutex_unlock(&raw_event_mutex);
1575 
1576 	if (!attr->exclude_user)
1577 		hwc->config_base |= MIPS_PERFCTRL_U;
1578 	if (!attr->exclude_kernel) {
1579 		hwc->config_base |= MIPS_PERFCTRL_K;
1580 		/* MIPS kernel mode: KSU == 00b || EXL == 1 || ERL == 1 */
1581 		hwc->config_base |= MIPS_PERFCTRL_EXL;
1582 	}
1583 	if (!attr->exclude_hv)
1584 		hwc->config_base |= MIPS_PERFCTRL_S;
1585 
1586 	hwc->config_base &= M_PERFCTL_CONFIG_MASK;
1587 	/*
1588 	 * The event can belong to another cpu. We do not assign a local
1589 	 * counter for it for now.
1590 	 */
1591 	hwc->idx = -1;
1592 	hwc->config = 0;
1593 
1594 	if (!hwc->sample_period) {
1595 		hwc->sample_period  = mipspmu.max_period;
1596 		hwc->last_period    = hwc->sample_period;
1597 		local64_set(&hwc->period_left, hwc->sample_period);
1598 	}
1599 
1600 	err = 0;
1601 	if (event->group_leader != event)
1602 		err = validate_group(event);
1603 
1604 	event->destroy = hw_perf_event_destroy;
1605 
1606 	if (err)
1607 		event->destroy(event);
1608 
1609 	return err;
1610 }
1611 
pause_local_counters(void)1612 static void pause_local_counters(void)
1613 {
1614 	struct cpu_hw_events *cpuc = this_cpu_ptr(&cpu_hw_events);
1615 	int ctr = mipspmu.num_counters;
1616 	unsigned long flags;
1617 
1618 	local_irq_save(flags);
1619 	do {
1620 		ctr--;
1621 		cpuc->saved_ctrl[ctr] = mipsxx_pmu_read_control(ctr);
1622 		mipsxx_pmu_write_control(ctr, cpuc->saved_ctrl[ctr] &
1623 					 ~M_PERFCTL_COUNT_EVENT_WHENEVER);
1624 	} while (ctr > 0);
1625 	local_irq_restore(flags);
1626 }
1627 
resume_local_counters(void)1628 static void resume_local_counters(void)
1629 {
1630 	struct cpu_hw_events *cpuc = this_cpu_ptr(&cpu_hw_events);
1631 	int ctr = mipspmu.num_counters;
1632 
1633 	do {
1634 		ctr--;
1635 		mipsxx_pmu_write_control(ctr, cpuc->saved_ctrl[ctr]);
1636 	} while (ctr > 0);
1637 }
1638 
mipsxx_pmu_handle_shared_irq(void)1639 static int mipsxx_pmu_handle_shared_irq(void)
1640 {
1641 	struct cpu_hw_events *cpuc = this_cpu_ptr(&cpu_hw_events);
1642 	struct perf_sample_data data;
1643 	unsigned int counters = mipspmu.num_counters;
1644 	u64 counter;
1645 	int n, handled = IRQ_NONE;
1646 	struct pt_regs *regs;
1647 
1648 	if (cpu_has_perf_cntr_intr_bit && !(read_c0_cause() & CAUSEF_PCI))
1649 		return handled;
1650 	/*
1651 	 * First we pause the local counters, so that when we are locked
1652 	 * here, the counters are all paused. When it gets locked due to
1653 	 * perf_disable(), the timer interrupt handler will be delayed.
1654 	 *
1655 	 * See also mipsxx_pmu_start().
1656 	 */
1657 	pause_local_counters();
1658 #ifdef CONFIG_MIPS_PERF_SHARED_TC_COUNTERS
1659 	read_lock(&pmuint_rwlock);
1660 #endif
1661 
1662 	regs = get_irq_regs();
1663 
1664 	perf_sample_data_init(&data, 0, 0);
1665 
1666 	for (n = counters - 1; n >= 0; n--) {
1667 		if (!test_bit(n, cpuc->used_mask))
1668 			continue;
1669 
1670 		counter = mipspmu.read_counter(n);
1671 		if (!(counter & mipspmu.overflow))
1672 			continue;
1673 
1674 		handle_associated_event(cpuc, n, &data, regs);
1675 		handled = IRQ_HANDLED;
1676 	}
1677 
1678 #ifdef CONFIG_MIPS_PERF_SHARED_TC_COUNTERS
1679 	read_unlock(&pmuint_rwlock);
1680 #endif
1681 	resume_local_counters();
1682 
1683 	/*
1684 	 * Do all the work for the pending perf events. We can do this
1685 	 * in here because the performance counter interrupt is a regular
1686 	 * interrupt, not NMI.
1687 	 */
1688 	if (handled == IRQ_HANDLED)
1689 		irq_work_run();
1690 
1691 	return handled;
1692 }
1693 
mipsxx_pmu_handle_irq(int irq,void * dev)1694 static irqreturn_t mipsxx_pmu_handle_irq(int irq, void *dev)
1695 {
1696 	return mipsxx_pmu_handle_shared_irq();
1697 }
1698 
1699 /* 24K */
1700 #define IS_BOTH_COUNTERS_24K_EVENT(b)					\
1701 	((b) == 0 || (b) == 1 || (b) == 11)
1702 
1703 /* 34K */
1704 #define IS_BOTH_COUNTERS_34K_EVENT(b)					\
1705 	((b) == 0 || (b) == 1 || (b) == 11)
1706 #ifdef CONFIG_MIPS_MT_SMP
1707 #define IS_RANGE_P_34K_EVENT(r, b)					\
1708 	((b) == 0 || (r) == 18 || (b) == 21 || (b) == 22 ||		\
1709 	 (b) == 25 || (b) == 39 || (r) == 44 || (r) == 174 ||		\
1710 	 (r) == 176 || ((b) >= 50 && (b) <= 55) ||			\
1711 	 ((b) >= 64 && (b) <= 67))
1712 #define IS_RANGE_V_34K_EVENT(r) ((r) == 47)
1713 #endif
1714 
1715 /* 74K */
1716 #define IS_BOTH_COUNTERS_74K_EVENT(b)					\
1717 	((b) == 0 || (b) == 1)
1718 
1719 /* proAptiv */
1720 #define IS_BOTH_COUNTERS_PROAPTIV_EVENT(b)				\
1721 	((b) == 0 || (b) == 1)
1722 /* P5600 */
1723 #define IS_BOTH_COUNTERS_P5600_EVENT(b)					\
1724 	((b) == 0 || (b) == 1)
1725 
1726 /* 1004K */
1727 #define IS_BOTH_COUNTERS_1004K_EVENT(b)					\
1728 	((b) == 0 || (b) == 1 || (b) == 11)
1729 #ifdef CONFIG_MIPS_MT_SMP
1730 #define IS_RANGE_P_1004K_EVENT(r, b)					\
1731 	((b) == 0 || (r) == 18 || (b) == 21 || (b) == 22 ||		\
1732 	 (b) == 25 || (b) == 36 || (b) == 39 || (r) == 44 ||		\
1733 	 (r) == 174 || (r) == 176 || ((b) >= 50 && (b) <= 59) ||	\
1734 	 (r) == 188 || (b) == 61 || (b) == 62 ||			\
1735 	 ((b) >= 64 && (b) <= 67))
1736 #define IS_RANGE_V_1004K_EVENT(r)	((r) == 47)
1737 #endif
1738 
1739 /* interAptiv */
1740 #define IS_BOTH_COUNTERS_INTERAPTIV_EVENT(b)				\
1741 	((b) == 0 || (b) == 1 || (b) == 11)
1742 #ifdef CONFIG_MIPS_MT_SMP
1743 /* The P/V/T info is not provided for "(b) == 38" in SUM, assume P. */
1744 #define IS_RANGE_P_INTERAPTIV_EVENT(r, b)				\
1745 	((b) == 0 || (r) == 18 || (b) == 21 || (b) == 22 ||		\
1746 	 (b) == 25 || (b) == 36 || (b) == 38 || (b) == 39 ||		\
1747 	 (r) == 44 || (r) == 174 || (r) == 176 || ((b) >= 50 &&		\
1748 	 (b) <= 59) || (r) == 188 || (b) == 61 || (b) == 62 ||		\
1749 	 ((b) >= 64 && (b) <= 67))
1750 #define IS_RANGE_V_INTERAPTIV_EVENT(r)	((r) == 47 || (r) == 175)
1751 #endif
1752 
1753 /* BMIPS5000 */
1754 #define IS_BOTH_COUNTERS_BMIPS5000_EVENT(b)				\
1755 	((b) == 0 || (b) == 1)
1756 
1757 
1758 /*
1759  * For most cores the user can use 0-255 raw events, where 0-127 for the events
1760  * of even counters, and 128-255 for odd counters. Note that bit 7 is used to
1761  * indicate the even/odd bank selector. So, for example, when user wants to take
1762  * the Event Num of 15 for odd counters (by referring to the user manual), then
1763  * 128 needs to be added to 15 as the input for the event config, i.e., 143 (0x8F)
1764  * to be used.
1765  *
1766  * Some newer cores have even more events, in which case the user can use raw
1767  * events 0-511, where 0-255 are for the events of even counters, and 256-511
1768  * are for odd counters, so bit 8 is used to indicate the even/odd bank selector.
1769  */
mipsxx_pmu_map_raw_event(u64 config)1770 static const struct mips_perf_event *mipsxx_pmu_map_raw_event(u64 config)
1771 {
1772 	/* currently most cores have 7-bit event numbers */
1773 	int pmu_type;
1774 	unsigned int raw_id = config & 0xff;
1775 	unsigned int base_id = raw_id & 0x7f;
1776 
1777 	switch (current_cpu_type()) {
1778 	case CPU_24K:
1779 		if (IS_BOTH_COUNTERS_24K_EVENT(base_id))
1780 			raw_event.cntr_mask = CNTR_EVEN | CNTR_ODD;
1781 		else
1782 			raw_event.cntr_mask =
1783 				raw_id > 127 ? CNTR_ODD : CNTR_EVEN;
1784 #ifdef CONFIG_MIPS_MT_SMP
1785 		/*
1786 		 * This is actually doing nothing. Non-multithreading
1787 		 * CPUs will not check and calculate the range.
1788 		 */
1789 		raw_event.range = P;
1790 #endif
1791 		break;
1792 	case CPU_34K:
1793 		if (IS_BOTH_COUNTERS_34K_EVENT(base_id))
1794 			raw_event.cntr_mask = CNTR_EVEN | CNTR_ODD;
1795 		else
1796 			raw_event.cntr_mask =
1797 				raw_id > 127 ? CNTR_ODD : CNTR_EVEN;
1798 #ifdef CONFIG_MIPS_MT_SMP
1799 		if (IS_RANGE_P_34K_EVENT(raw_id, base_id))
1800 			raw_event.range = P;
1801 		else if (unlikely(IS_RANGE_V_34K_EVENT(raw_id)))
1802 			raw_event.range = V;
1803 		else
1804 			raw_event.range = T;
1805 #endif
1806 		break;
1807 	case CPU_74K:
1808 	case CPU_1074K:
1809 		if (IS_BOTH_COUNTERS_74K_EVENT(base_id))
1810 			raw_event.cntr_mask = CNTR_EVEN | CNTR_ODD;
1811 		else
1812 			raw_event.cntr_mask =
1813 				raw_id > 127 ? CNTR_ODD : CNTR_EVEN;
1814 #ifdef CONFIG_MIPS_MT_SMP
1815 		raw_event.range = P;
1816 #endif
1817 		break;
1818 	case CPU_PROAPTIV:
1819 		if (IS_BOTH_COUNTERS_PROAPTIV_EVENT(base_id))
1820 			raw_event.cntr_mask = CNTR_EVEN | CNTR_ODD;
1821 		else
1822 			raw_event.cntr_mask =
1823 				raw_id > 127 ? CNTR_ODD : CNTR_EVEN;
1824 #ifdef CONFIG_MIPS_MT_SMP
1825 		raw_event.range = P;
1826 #endif
1827 		break;
1828 	case CPU_P5600:
1829 	case CPU_P6600:
1830 		/* 8-bit event numbers */
1831 		raw_id = config & 0x1ff;
1832 		base_id = raw_id & 0xff;
1833 		if (IS_BOTH_COUNTERS_P5600_EVENT(base_id))
1834 			raw_event.cntr_mask = CNTR_EVEN | CNTR_ODD;
1835 		else
1836 			raw_event.cntr_mask =
1837 				raw_id > 255 ? CNTR_ODD : CNTR_EVEN;
1838 #ifdef CONFIG_MIPS_MT_SMP
1839 		raw_event.range = P;
1840 #endif
1841 		break;
1842 	case CPU_I6400:
1843 	case CPU_I6500:
1844 		/* 8-bit event numbers */
1845 		base_id = config & 0xff;
1846 		raw_event.cntr_mask = CNTR_EVEN | CNTR_ODD;
1847 		break;
1848 	case CPU_1004K:
1849 		if (IS_BOTH_COUNTERS_1004K_EVENT(base_id))
1850 			raw_event.cntr_mask = CNTR_EVEN | CNTR_ODD;
1851 		else
1852 			raw_event.cntr_mask =
1853 				raw_id > 127 ? CNTR_ODD : CNTR_EVEN;
1854 #ifdef CONFIG_MIPS_MT_SMP
1855 		if (IS_RANGE_P_1004K_EVENT(raw_id, base_id))
1856 			raw_event.range = P;
1857 		else if (unlikely(IS_RANGE_V_1004K_EVENT(raw_id)))
1858 			raw_event.range = V;
1859 		else
1860 			raw_event.range = T;
1861 #endif
1862 		break;
1863 	case CPU_INTERAPTIV:
1864 		if (IS_BOTH_COUNTERS_INTERAPTIV_EVENT(base_id))
1865 			raw_event.cntr_mask = CNTR_EVEN | CNTR_ODD;
1866 		else
1867 			raw_event.cntr_mask =
1868 				raw_id > 127 ? CNTR_ODD : CNTR_EVEN;
1869 #ifdef CONFIG_MIPS_MT_SMP
1870 		if (IS_RANGE_P_INTERAPTIV_EVENT(raw_id, base_id))
1871 			raw_event.range = P;
1872 		else if (unlikely(IS_RANGE_V_INTERAPTIV_EVENT(raw_id)))
1873 			raw_event.range = V;
1874 		else
1875 			raw_event.range = T;
1876 #endif
1877 		break;
1878 	case CPU_BMIPS5000:
1879 		if (IS_BOTH_COUNTERS_BMIPS5000_EVENT(base_id))
1880 			raw_event.cntr_mask = CNTR_EVEN | CNTR_ODD;
1881 		else
1882 			raw_event.cntr_mask =
1883 				raw_id > 127 ? CNTR_ODD : CNTR_EVEN;
1884 		break;
1885 	case CPU_LOONGSON64:
1886 		pmu_type = get_loongson3_pmu_type();
1887 
1888 		switch (pmu_type) {
1889 		case LOONGSON_PMU_TYPE1:
1890 			raw_event.cntr_mask =
1891 				raw_id > 127 ? CNTR_ODD : CNTR_EVEN;
1892 			break;
1893 		case LOONGSON_PMU_TYPE2:
1894 			base_id = config & 0x3ff;
1895 			raw_event.cntr_mask = CNTR_ALL;
1896 
1897 			if ((base_id >= 1 && base_id < 28) ||
1898 				(base_id >= 64 && base_id < 90) ||
1899 				(base_id >= 128 && base_id < 164) ||
1900 				(base_id >= 192 && base_id < 200) ||
1901 				(base_id >= 256 && base_id < 275) ||
1902 				(base_id >= 320 && base_id < 361) ||
1903 				(base_id >= 384 && base_id < 574))
1904 				break;
1905 
1906 			return ERR_PTR(-EOPNOTSUPP);
1907 		case LOONGSON_PMU_TYPE3:
1908 			base_id = raw_id;
1909 			raw_event.cntr_mask = CNTR_ALL;
1910 			break;
1911 		}
1912 		break;
1913 	}
1914 
1915 	raw_event.event_id = base_id;
1916 
1917 	return &raw_event;
1918 }
1919 
octeon_pmu_map_raw_event(u64 config)1920 static const struct mips_perf_event *octeon_pmu_map_raw_event(u64 config)
1921 {
1922 	unsigned int base_id = config & 0x7f;
1923 	unsigned int event_max;
1924 
1925 
1926 	raw_event.cntr_mask = CNTR_ALL;
1927 	raw_event.event_id = base_id;
1928 
1929 	if (current_cpu_type() == CPU_CAVIUM_OCTEON3)
1930 		event_max = 0x5f;
1931 	else if (current_cpu_type() == CPU_CAVIUM_OCTEON2)
1932 		event_max = 0x42;
1933 	else
1934 		event_max = 0x3a;
1935 
1936 	if (base_id > event_max) {
1937 		return ERR_PTR(-EOPNOTSUPP);
1938 	}
1939 
1940 	switch (base_id) {
1941 	case 0x00:
1942 	case 0x0f:
1943 	case 0x1e:
1944 	case 0x1f:
1945 	case 0x2f:
1946 	case 0x34:
1947 	case 0x3e ... 0x3f:
1948 		return ERR_PTR(-EOPNOTSUPP);
1949 	default:
1950 		break;
1951 	}
1952 
1953 	return &raw_event;
1954 }
1955 
xlp_pmu_map_raw_event(u64 config)1956 static const struct mips_perf_event *xlp_pmu_map_raw_event(u64 config)
1957 {
1958 	unsigned int raw_id = config & 0xff;
1959 
1960 	/* Only 1-63 are defined */
1961 	if ((raw_id < 0x01) || (raw_id > 0x3f))
1962 		return ERR_PTR(-EOPNOTSUPP);
1963 
1964 	raw_event.cntr_mask = CNTR_ALL;
1965 	raw_event.event_id = raw_id;
1966 
1967 	return &raw_event;
1968 }
1969 
1970 static int __init
init_hw_perf_events(void)1971 init_hw_perf_events(void)
1972 {
1973 	int counters, irq, pmu_type;
1974 
1975 	pr_info("Performance counters: ");
1976 
1977 	counters = n_counters();
1978 	if (counters == 0) {
1979 		pr_cont("No available PMU.\n");
1980 		return -ENODEV;
1981 	}
1982 
1983 #ifdef CONFIG_MIPS_PERF_SHARED_TC_COUNTERS
1984 	if (!cpu_has_mipsmt_pertccounters)
1985 		counters = counters_total_to_per_cpu(counters);
1986 #endif
1987 
1988 	if (get_c0_perfcount_int)
1989 		irq = get_c0_perfcount_int();
1990 	else if (cp0_perfcount_irq >= 0)
1991 		irq = MIPS_CPU_IRQ_BASE + cp0_perfcount_irq;
1992 	else
1993 		irq = -1;
1994 
1995 	mipspmu.map_raw_event = mipsxx_pmu_map_raw_event;
1996 
1997 	switch (current_cpu_type()) {
1998 	case CPU_24K:
1999 		mipspmu.name = "mips/24K";
2000 		mipspmu.general_event_map = &mipsxxcore_event_map;
2001 		mipspmu.cache_event_map = &mipsxxcore_cache_map;
2002 		break;
2003 	case CPU_34K:
2004 		mipspmu.name = "mips/34K";
2005 		mipspmu.general_event_map = &mipsxxcore_event_map;
2006 		mipspmu.cache_event_map = &mipsxxcore_cache_map;
2007 		break;
2008 	case CPU_74K:
2009 		mipspmu.name = "mips/74K";
2010 		mipspmu.general_event_map = &mipsxxcore_event_map2;
2011 		mipspmu.cache_event_map = &mipsxxcore_cache_map2;
2012 		break;
2013 	case CPU_PROAPTIV:
2014 		mipspmu.name = "mips/proAptiv";
2015 		mipspmu.general_event_map = &mipsxxcore_event_map2;
2016 		mipspmu.cache_event_map = &mipsxxcore_cache_map2;
2017 		break;
2018 	case CPU_P5600:
2019 		mipspmu.name = "mips/P5600";
2020 		mipspmu.general_event_map = &mipsxxcore_event_map2;
2021 		mipspmu.cache_event_map = &mipsxxcore_cache_map2;
2022 		break;
2023 	case CPU_P6600:
2024 		mipspmu.name = "mips/P6600";
2025 		mipspmu.general_event_map = &mipsxxcore_event_map2;
2026 		mipspmu.cache_event_map = &mipsxxcore_cache_map2;
2027 		break;
2028 	case CPU_I6400:
2029 		mipspmu.name = "mips/I6400";
2030 		mipspmu.general_event_map = &i6x00_event_map;
2031 		mipspmu.cache_event_map = &i6x00_cache_map;
2032 		break;
2033 	case CPU_I6500:
2034 		mipspmu.name = "mips/I6500";
2035 		mipspmu.general_event_map = &i6x00_event_map;
2036 		mipspmu.cache_event_map = &i6x00_cache_map;
2037 		break;
2038 	case CPU_1004K:
2039 		mipspmu.name = "mips/1004K";
2040 		mipspmu.general_event_map = &mipsxxcore_event_map;
2041 		mipspmu.cache_event_map = &mipsxxcore_cache_map;
2042 		break;
2043 	case CPU_1074K:
2044 		mipspmu.name = "mips/1074K";
2045 		mipspmu.general_event_map = &mipsxxcore_event_map;
2046 		mipspmu.cache_event_map = &mipsxxcore_cache_map;
2047 		break;
2048 	case CPU_INTERAPTIV:
2049 		mipspmu.name = "mips/interAptiv";
2050 		mipspmu.general_event_map = &mipsxxcore_event_map;
2051 		mipspmu.cache_event_map = &mipsxxcore_cache_map;
2052 		break;
2053 	case CPU_LOONGSON32:
2054 		mipspmu.name = "mips/loongson1";
2055 		mipspmu.general_event_map = &mipsxxcore_event_map;
2056 		mipspmu.cache_event_map = &mipsxxcore_cache_map;
2057 		break;
2058 	case CPU_LOONGSON64:
2059 		mipspmu.name = "mips/loongson3";
2060 		pmu_type = get_loongson3_pmu_type();
2061 
2062 		switch (pmu_type) {
2063 		case LOONGSON_PMU_TYPE1:
2064 			counters = 2;
2065 			mipspmu.general_event_map = &loongson3_event_map1;
2066 			mipspmu.cache_event_map = &loongson3_cache_map1;
2067 			break;
2068 		case LOONGSON_PMU_TYPE2:
2069 			counters = 4;
2070 			mipspmu.general_event_map = &loongson3_event_map2;
2071 			mipspmu.cache_event_map = &loongson3_cache_map2;
2072 			break;
2073 		case LOONGSON_PMU_TYPE3:
2074 			counters = 4;
2075 			mipspmu.general_event_map = &loongson3_event_map3;
2076 			mipspmu.cache_event_map = &loongson3_cache_map3;
2077 			break;
2078 		}
2079 		break;
2080 	case CPU_CAVIUM_OCTEON:
2081 	case CPU_CAVIUM_OCTEON_PLUS:
2082 	case CPU_CAVIUM_OCTEON2:
2083 	case CPU_CAVIUM_OCTEON3:
2084 		mipspmu.name = "octeon";
2085 		mipspmu.general_event_map = &octeon_event_map;
2086 		mipspmu.cache_event_map = &octeon_cache_map;
2087 		mipspmu.map_raw_event = octeon_pmu_map_raw_event;
2088 		break;
2089 	case CPU_BMIPS5000:
2090 		mipspmu.name = "BMIPS5000";
2091 		mipspmu.general_event_map = &bmips5000_event_map;
2092 		mipspmu.cache_event_map = &bmips5000_cache_map;
2093 		break;
2094 	case CPU_XLP:
2095 		mipspmu.name = "xlp";
2096 		mipspmu.general_event_map = &xlp_event_map;
2097 		mipspmu.cache_event_map = &xlp_cache_map;
2098 		mipspmu.map_raw_event = xlp_pmu_map_raw_event;
2099 		break;
2100 	default:
2101 		pr_cont("Either hardware does not support performance "
2102 			"counters, or not yet implemented.\n");
2103 		return -ENODEV;
2104 	}
2105 
2106 	mipspmu.num_counters = counters;
2107 	mipspmu.irq = irq;
2108 
2109 	if (read_c0_perfctrl0() & MIPS_PERFCTRL_W) {
2110 		if (get_loongson3_pmu_type() == LOONGSON_PMU_TYPE2) {
2111 			counter_bits = 48;
2112 			mipspmu.max_period = (1ULL << 47) - 1;
2113 			mipspmu.valid_count = (1ULL << 47) - 1;
2114 			mipspmu.overflow = 1ULL << 47;
2115 		} else {
2116 			counter_bits = 64;
2117 			mipspmu.max_period = (1ULL << 63) - 1;
2118 			mipspmu.valid_count = (1ULL << 63) - 1;
2119 			mipspmu.overflow = 1ULL << 63;
2120 		}
2121 		mipspmu.read_counter = mipsxx_pmu_read_counter_64;
2122 		mipspmu.write_counter = mipsxx_pmu_write_counter_64;
2123 	} else {
2124 		counter_bits = 32;
2125 		mipspmu.max_period = (1ULL << 31) - 1;
2126 		mipspmu.valid_count = (1ULL << 31) - 1;
2127 		mipspmu.overflow = 1ULL << 31;
2128 		mipspmu.read_counter = mipsxx_pmu_read_counter;
2129 		mipspmu.write_counter = mipsxx_pmu_write_counter;
2130 	}
2131 
2132 	on_each_cpu(reset_counters, (void *)(long)counters, 1);
2133 
2134 	pr_cont("%s PMU enabled, %d %d-bit counters available to each "
2135 		"CPU, irq %d%s\n", mipspmu.name, counters, counter_bits, irq,
2136 		irq < 0 ? " (share with timer interrupt)" : "");
2137 
2138 	perf_pmu_register(&pmu, "cpu", PERF_TYPE_RAW);
2139 
2140 	return 0;
2141 }
2142 early_initcall(init_hw_perf_events);
2143