xref: /linux/arch/sparc/kernel/pcr.c (revision cdd6c482)
1 /* pcr.c: Generic sparc64 performance counter infrastructure.
2  *
3  * Copyright (C) 2009 David S. Miller (davem@davemloft.net)
4  */
5 #include <linux/kernel.h>
6 #include <linux/module.h>
7 #include <linux/init.h>
8 #include <linux/irq.h>
9 
10 #include <linux/perf_event.h>
11 
12 #include <asm/pil.h>
13 #include <asm/pcr.h>
14 #include <asm/nmi.h>
15 
16 /* This code is shared between various users of the performance
17  * counters.  Users will be oprofile, pseudo-NMI watchdog, and the
18  * perf_event support layer.
19  */
20 
21 #define PCR_SUN4U_ENABLE	(PCR_PIC_PRIV | PCR_STRACE | PCR_UTRACE)
22 #define PCR_N2_ENABLE		(PCR_PIC_PRIV | PCR_STRACE | PCR_UTRACE | \
23 				 PCR_N2_TOE_OV1 | \
24 				 (2 << PCR_N2_SL1_SHIFT) | \
25 				 (0xff << PCR_N2_MASK1_SHIFT))
26 
27 u64 pcr_enable;
28 unsigned int picl_shift;
29 
30 /* Performance counter interrupts run unmasked at PIL level 15.
31  * Therefore we can't do things like wakeups and other work
32  * that expects IRQ disabling to be adhered to in locking etc.
33  *
34  * Therefore in such situations we defer the work by signalling
35  * a lower level cpu IRQ.
36  */
37 void deferred_pcr_work_irq(int irq, struct pt_regs *regs)
38 {
39 	struct pt_regs *old_regs;
40 
41 	clear_softint(1 << PIL_DEFERRED_PCR_WORK);
42 
43 	old_regs = set_irq_regs(regs);
44 	irq_enter();
45 #ifdef CONFIG_PERF_EVENTS
46 	perf_event_do_pending();
47 #endif
48 	irq_exit();
49 	set_irq_regs(old_regs);
50 }
51 
52 void set_perf_event_pending(void)
53 {
54 	set_softint(1 << PIL_DEFERRED_PCR_WORK);
55 }
56 
57 const struct pcr_ops *pcr_ops;
58 EXPORT_SYMBOL_GPL(pcr_ops);
59 
60 static u64 direct_pcr_read(void)
61 {
62 	u64 val;
63 
64 	read_pcr(val);
65 	return val;
66 }
67 
68 static void direct_pcr_write(u64 val)
69 {
70 	write_pcr(val);
71 }
72 
73 static const struct pcr_ops direct_pcr_ops = {
74 	.read	= direct_pcr_read,
75 	.write	= direct_pcr_write,
76 };
77 
78 static void n2_pcr_write(u64 val)
79 {
80 	unsigned long ret;
81 
82 	ret = sun4v_niagara2_setperf(HV_N2_PERF_SPARC_CTL, val);
83 	if (val != HV_EOK)
84 		write_pcr(val);
85 }
86 
87 static const struct pcr_ops n2_pcr_ops = {
88 	.read	= direct_pcr_read,
89 	.write	= n2_pcr_write,
90 };
91 
92 static unsigned long perf_hsvc_group;
93 static unsigned long perf_hsvc_major;
94 static unsigned long perf_hsvc_minor;
95 
96 static int __init register_perf_hsvc(void)
97 {
98 	if (tlb_type == hypervisor) {
99 		switch (sun4v_chip_type) {
100 		case SUN4V_CHIP_NIAGARA1:
101 			perf_hsvc_group = HV_GRP_NIAG_PERF;
102 			break;
103 
104 		case SUN4V_CHIP_NIAGARA2:
105 			perf_hsvc_group = HV_GRP_N2_CPU;
106 			break;
107 
108 		default:
109 			return -ENODEV;
110 		}
111 
112 
113 		perf_hsvc_major = 1;
114 		perf_hsvc_minor = 0;
115 		if (sun4v_hvapi_register(perf_hsvc_group,
116 					 perf_hsvc_major,
117 					 &perf_hsvc_minor)) {
118 			printk("perfmon: Could not register hvapi.\n");
119 			return -ENODEV;
120 		}
121 	}
122 	return 0;
123 }
124 
125 static void __init unregister_perf_hsvc(void)
126 {
127 	if (tlb_type != hypervisor)
128 		return;
129 	sun4v_hvapi_unregister(perf_hsvc_group);
130 }
131 
132 int __init pcr_arch_init(void)
133 {
134 	int err = register_perf_hsvc();
135 
136 	if (err)
137 		return err;
138 
139 	switch (tlb_type) {
140 	case hypervisor:
141 		pcr_ops = &n2_pcr_ops;
142 		pcr_enable = PCR_N2_ENABLE;
143 		picl_shift = 2;
144 		break;
145 
146 	case cheetah:
147 	case cheetah_plus:
148 		pcr_ops = &direct_pcr_ops;
149 		pcr_enable = PCR_SUN4U_ENABLE;
150 		break;
151 
152 	case spitfire:
153 		/* UltraSPARC-I/II and derivatives lack a profile
154 		 * counter overflow interrupt so we can't make use of
155 		 * their hardware currently.
156 		 */
157 		/* fallthrough */
158 	default:
159 		err = -ENODEV;
160 		goto out_unregister;
161 	}
162 
163 	return nmi_init();
164 
165 out_unregister:
166 	unregister_perf_hsvc();
167 	return err;
168 }
169 
170 arch_initcall(pcr_arch_init);
171