xref: /linux/arch/sparc/kernel/pcr.c (revision e360adbe)
1 /* pcr.c: Generic sparc64 performance counter infrastructure.
2  *
3  * Copyright (C) 2009 David S. Miller (davem@davemloft.net)
4  */
5 #include <linux/kernel.h>
6 #include <linux/module.h>
7 #include <linux/init.h>
8 #include <linux/irq.h>
9 
10 #include <linux/irq_work.h>
11 #include <linux/ftrace.h>
12 
13 #include <asm/pil.h>
14 #include <asm/pcr.h>
15 #include <asm/nmi.h>
16 
17 /* This code is shared between various users of the performance
18  * counters.  Users will be oprofile, pseudo-NMI watchdog, and the
19  * perf_event support layer.
20  */
21 
22 #define PCR_SUN4U_ENABLE	(PCR_PIC_PRIV | PCR_STRACE | PCR_UTRACE)
23 #define PCR_N2_ENABLE		(PCR_PIC_PRIV | PCR_STRACE | PCR_UTRACE | \
24 				 PCR_N2_TOE_OV1 | \
25 				 (2 << PCR_N2_SL1_SHIFT) | \
26 				 (0xff << PCR_N2_MASK1_SHIFT))
27 
28 u64 pcr_enable;
29 unsigned int picl_shift;
30 
31 /* Performance counter interrupts run unmasked at PIL level 15.
32  * Therefore we can't do things like wakeups and other work
33  * that expects IRQ disabling to be adhered to in locking etc.
34  *
35  * Therefore in such situations we defer the work by signalling
36  * a lower level cpu IRQ.
37  */
38 void __irq_entry deferred_pcr_work_irq(int irq, struct pt_regs *regs)
39 {
40 	struct pt_regs *old_regs;
41 
42 	clear_softint(1 << PIL_DEFERRED_PCR_WORK);
43 
44 	old_regs = set_irq_regs(regs);
45 	irq_enter();
46 #ifdef CONFIG_IRQ_WORK
47 	irq_work_run();
48 #endif
49 	irq_exit();
50 	set_irq_regs(old_regs);
51 }
52 
53 void arch_irq_work_raise(void)
54 {
55 	set_softint(1 << PIL_DEFERRED_PCR_WORK);
56 }
57 
58 const struct pcr_ops *pcr_ops;
59 EXPORT_SYMBOL_GPL(pcr_ops);
60 
61 static u64 direct_pcr_read(void)
62 {
63 	u64 val;
64 
65 	read_pcr(val);
66 	return val;
67 }
68 
69 static void direct_pcr_write(u64 val)
70 {
71 	write_pcr(val);
72 }
73 
74 static const struct pcr_ops direct_pcr_ops = {
75 	.read	= direct_pcr_read,
76 	.write	= direct_pcr_write,
77 };
78 
79 static void n2_pcr_write(u64 val)
80 {
81 	unsigned long ret;
82 
83 	ret = sun4v_niagara2_setperf(HV_N2_PERF_SPARC_CTL, val);
84 	if (val != HV_EOK)
85 		write_pcr(val);
86 }
87 
88 static const struct pcr_ops n2_pcr_ops = {
89 	.read	= direct_pcr_read,
90 	.write	= n2_pcr_write,
91 };
92 
93 static unsigned long perf_hsvc_group;
94 static unsigned long perf_hsvc_major;
95 static unsigned long perf_hsvc_minor;
96 
97 static int __init register_perf_hsvc(void)
98 {
99 	if (tlb_type == hypervisor) {
100 		switch (sun4v_chip_type) {
101 		case SUN4V_CHIP_NIAGARA1:
102 			perf_hsvc_group = HV_GRP_NIAG_PERF;
103 			break;
104 
105 		case SUN4V_CHIP_NIAGARA2:
106 			perf_hsvc_group = HV_GRP_N2_CPU;
107 			break;
108 
109 		default:
110 			return -ENODEV;
111 		}
112 
113 
114 		perf_hsvc_major = 1;
115 		perf_hsvc_minor = 0;
116 		if (sun4v_hvapi_register(perf_hsvc_group,
117 					 perf_hsvc_major,
118 					 &perf_hsvc_minor)) {
119 			printk("perfmon: Could not register hvapi.\n");
120 			return -ENODEV;
121 		}
122 	}
123 	return 0;
124 }
125 
126 static void __init unregister_perf_hsvc(void)
127 {
128 	if (tlb_type != hypervisor)
129 		return;
130 	sun4v_hvapi_unregister(perf_hsvc_group);
131 }
132 
133 int __init pcr_arch_init(void)
134 {
135 	int err = register_perf_hsvc();
136 
137 	if (err)
138 		return err;
139 
140 	switch (tlb_type) {
141 	case hypervisor:
142 		pcr_ops = &n2_pcr_ops;
143 		pcr_enable = PCR_N2_ENABLE;
144 		picl_shift = 2;
145 		break;
146 
147 	case cheetah:
148 	case cheetah_plus:
149 		pcr_ops = &direct_pcr_ops;
150 		pcr_enable = PCR_SUN4U_ENABLE;
151 		break;
152 
153 	case spitfire:
154 		/* UltraSPARC-I/II and derivatives lack a profile
155 		 * counter overflow interrupt so we can't make use of
156 		 * their hardware currently.
157 		 */
158 		/* fallthrough */
159 	default:
160 		err = -ENODEV;
161 		goto out_unregister;
162 	}
163 
164 	return nmi_init();
165 
166 out_unregister:
167 	unregister_perf_hsvc();
168 	return err;
169 }
170 
171 arch_initcall(pcr_arch_init);
172