1 /* pcr.c: Generic sparc64 performance counter infrastructure.
2  *
3  * Copyright (C) 2009 David S. Miller (davem@davemloft.net)
4  */
5 #include <linux/kernel.h>
6 #include <linux/export.h>
7 #include <linux/init.h>
8 #include <linux/irq.h>
9 
10 #include <linux/irq_work.h>
11 #include <linux/ftrace.h>
12 
13 #include <asm/pil.h>
14 #include <asm/pcr.h>
15 #include <asm/nmi.h>
16 #include <asm/asi.h>
17 #include <asm/spitfire.h>
18 
19 /* This code is shared between various users of the performance
20  * counters.  Users will be oprofile, pseudo-NMI watchdog, and the
21  * perf_event support layer.
22  */
23 
24 /* Performance counter interrupts run unmasked at PIL level 15.
25  * Therefore we can't do things like wakeups and other work
26  * that expects IRQ disabling to be adhered to in locking etc.
27  *
28  * Therefore in such situations we defer the work by signalling
29  * a lower level cpu IRQ.
30  */
deferred_pcr_work_irq(int irq,struct pt_regs * regs)31 void __irq_entry deferred_pcr_work_irq(int irq, struct pt_regs *regs)
32 {
33 	struct pt_regs *old_regs;
34 
35 	clear_softint(1 << PIL_DEFERRED_PCR_WORK);
36 
37 	old_regs = set_irq_regs(regs);
38 	irq_enter();
39 #ifdef CONFIG_IRQ_WORK
40 	irq_work_run();
41 #endif
42 	irq_exit();
43 	set_irq_regs(old_regs);
44 }
45 
arch_irq_work_raise(void)46 void arch_irq_work_raise(void)
47 {
48 	set_softint(1 << PIL_DEFERRED_PCR_WORK);
49 }
50 
51 const struct pcr_ops *pcr_ops;
52 EXPORT_SYMBOL_GPL(pcr_ops);
53 
direct_pcr_read(unsigned long reg_num)54 static u64 direct_pcr_read(unsigned long reg_num)
55 {
56 	u64 val;
57 
58 	WARN_ON_ONCE(reg_num != 0);
59 	__asm__ __volatile__("rd %%pcr, %0" : "=r" (val));
60 	return val;
61 }
62 
direct_pcr_write(unsigned long reg_num,u64 val)63 static void direct_pcr_write(unsigned long reg_num, u64 val)
64 {
65 	WARN_ON_ONCE(reg_num != 0);
66 	__asm__ __volatile__("wr %0, 0x0, %%pcr" : : "r" (val));
67 }
68 
direct_pic_read(unsigned long reg_num)69 static u64 direct_pic_read(unsigned long reg_num)
70 {
71 	u64 val;
72 
73 	WARN_ON_ONCE(reg_num != 0);
74 	__asm__ __volatile__("rd %%pic, %0" : "=r" (val));
75 	return val;
76 }
77 
direct_pic_write(unsigned long reg_num,u64 val)78 static void direct_pic_write(unsigned long reg_num, u64 val)
79 {
80 	WARN_ON_ONCE(reg_num != 0);
81 
82 	/* Blackbird errata workaround.  See commentary in
83 	 * arch/sparc64/kernel/smp.c:smp_percpu_timer_interrupt()
84 	 * for more information.
85 	 */
86 	__asm__ __volatile__("ba,pt	%%xcc, 99f\n\t"
87 			     " nop\n\t"
88 			     ".align	64\n"
89 			  "99:wr	%0, 0x0, %%pic\n\t"
90 			     "rd	%%pic, %%g0" : : "r" (val));
91 }
92 
direct_picl_value(unsigned int nmi_hz)93 static u64 direct_picl_value(unsigned int nmi_hz)
94 {
95 	u32 delta = local_cpu_data().clock_tick / nmi_hz;
96 
97 	return ((u64)((0 - delta) & 0xffffffff)) << 32;
98 }
99 
100 static const struct pcr_ops direct_pcr_ops = {
101 	.read_pcr		= direct_pcr_read,
102 	.write_pcr		= direct_pcr_write,
103 	.read_pic		= direct_pic_read,
104 	.write_pic		= direct_pic_write,
105 	.nmi_picl_value		= direct_picl_value,
106 	.pcr_nmi_enable		= (PCR_PIC_PRIV | PCR_STRACE | PCR_UTRACE),
107 	.pcr_nmi_disable	= PCR_PIC_PRIV,
108 };
109 
n2_pcr_write(unsigned long reg_num,u64 val)110 static void n2_pcr_write(unsigned long reg_num, u64 val)
111 {
112 	unsigned long ret;
113 
114 	WARN_ON_ONCE(reg_num != 0);
115 	if (val & PCR_N2_HTRACE) {
116 		ret = sun4v_niagara2_setperf(HV_N2_PERF_SPARC_CTL, val);
117 		if (ret != HV_EOK)
118 			direct_pcr_write(reg_num, val);
119 	} else
120 		direct_pcr_write(reg_num, val);
121 }
122 
n2_picl_value(unsigned int nmi_hz)123 static u64 n2_picl_value(unsigned int nmi_hz)
124 {
125 	u32 delta = local_cpu_data().clock_tick / (nmi_hz << 2);
126 
127 	return ((u64)((0 - delta) & 0xffffffff)) << 32;
128 }
129 
130 static const struct pcr_ops n2_pcr_ops = {
131 	.read_pcr		= direct_pcr_read,
132 	.write_pcr		= n2_pcr_write,
133 	.read_pic		= direct_pic_read,
134 	.write_pic		= direct_pic_write,
135 	.nmi_picl_value		= n2_picl_value,
136 	.pcr_nmi_enable		= (PCR_PIC_PRIV | PCR_STRACE | PCR_UTRACE |
137 				   PCR_N2_TOE_OV1 |
138 				   (2 << PCR_N2_SL1_SHIFT) |
139 				   (0xff << PCR_N2_MASK1_SHIFT)),
140 	.pcr_nmi_disable	= PCR_PIC_PRIV,
141 };
142 
n4_pcr_read(unsigned long reg_num)143 static u64 n4_pcr_read(unsigned long reg_num)
144 {
145 	unsigned long val;
146 
147 	(void) sun4v_vt_get_perfreg(reg_num, &val);
148 
149 	return val;
150 }
151 
n4_pcr_write(unsigned long reg_num,u64 val)152 static void n4_pcr_write(unsigned long reg_num, u64 val)
153 {
154 	(void) sun4v_vt_set_perfreg(reg_num, val);
155 }
156 
n4_pic_read(unsigned long reg_num)157 static u64 n4_pic_read(unsigned long reg_num)
158 {
159 	unsigned long val;
160 
161 	__asm__ __volatile__("ldxa [%1] %2, %0"
162 			     : "=r" (val)
163 			     : "r" (reg_num * 0x8UL), "i" (ASI_PIC));
164 
165 	return val;
166 }
167 
n4_pic_write(unsigned long reg_num,u64 val)168 static void n4_pic_write(unsigned long reg_num, u64 val)
169 {
170 	__asm__ __volatile__("stxa %0, [%1] %2"
171 			     : /* no outputs */
172 			     : "r" (val), "r" (reg_num * 0x8UL), "i" (ASI_PIC));
173 }
174 
n4_picl_value(unsigned int nmi_hz)175 static u64 n4_picl_value(unsigned int nmi_hz)
176 {
177 	u32 delta = local_cpu_data().clock_tick / (nmi_hz << 2);
178 
179 	return ((u64)((0 - delta) & 0xffffffff));
180 }
181 
182 static const struct pcr_ops n4_pcr_ops = {
183 	.read_pcr		= n4_pcr_read,
184 	.write_pcr		= n4_pcr_write,
185 	.read_pic		= n4_pic_read,
186 	.write_pic		= n4_pic_write,
187 	.nmi_picl_value		= n4_picl_value,
188 	.pcr_nmi_enable		= (PCR_N4_PICNPT | PCR_N4_STRACE |
189 				   PCR_N4_UTRACE | PCR_N4_TOE |
190 				   (26 << PCR_N4_SL_SHIFT)),
191 	.pcr_nmi_disable	= PCR_N4_PICNPT,
192 };
193 
n5_pcr_read(unsigned long reg_num)194 static u64 n5_pcr_read(unsigned long reg_num)
195 {
196 	unsigned long val;
197 
198 	(void) sun4v_t5_get_perfreg(reg_num, &val);
199 
200 	return val;
201 }
202 
n5_pcr_write(unsigned long reg_num,u64 val)203 static void n5_pcr_write(unsigned long reg_num, u64 val)
204 {
205 	(void) sun4v_t5_set_perfreg(reg_num, val);
206 }
207 
208 static const struct pcr_ops n5_pcr_ops = {
209 	.read_pcr		= n5_pcr_read,
210 	.write_pcr		= n5_pcr_write,
211 	.read_pic		= n4_pic_read,
212 	.write_pic		= n4_pic_write,
213 	.nmi_picl_value		= n4_picl_value,
214 	.pcr_nmi_enable		= (PCR_N4_PICNPT | PCR_N4_STRACE |
215 				   PCR_N4_UTRACE | PCR_N4_TOE |
216 				   (26 << PCR_N4_SL_SHIFT)),
217 	.pcr_nmi_disable	= PCR_N4_PICNPT,
218 };
219 
m7_pcr_read(unsigned long reg_num)220 static u64 m7_pcr_read(unsigned long reg_num)
221 {
222 	unsigned long val;
223 
224 	(void) sun4v_m7_get_perfreg(reg_num, &val);
225 
226 	return val;
227 }
228 
m7_pcr_write(unsigned long reg_num,u64 val)229 static void m7_pcr_write(unsigned long reg_num, u64 val)
230 {
231 	(void) sun4v_m7_set_perfreg(reg_num, val);
232 }
233 
234 static const struct pcr_ops m7_pcr_ops = {
235 	.read_pcr		= m7_pcr_read,
236 	.write_pcr		= m7_pcr_write,
237 	.read_pic		= n4_pic_read,
238 	.write_pic		= n4_pic_write,
239 	.nmi_picl_value		= n4_picl_value,
240 	.pcr_nmi_enable		= (PCR_N4_PICNPT | PCR_N4_STRACE |
241 				   PCR_N4_UTRACE | PCR_N4_TOE |
242 				   (26 << PCR_N4_SL_SHIFT)),
243 	.pcr_nmi_disable	= PCR_N4_PICNPT,
244 };
245 
246 static unsigned long perf_hsvc_group;
247 static unsigned long perf_hsvc_major;
248 static unsigned long perf_hsvc_minor;
249 
register_perf_hsvc(void)250 static int __init register_perf_hsvc(void)
251 {
252 	unsigned long hverror;
253 
254 	if (tlb_type == hypervisor) {
255 		switch (sun4v_chip_type) {
256 		case SUN4V_CHIP_NIAGARA1:
257 			perf_hsvc_group = HV_GRP_NIAG_PERF;
258 			break;
259 
260 		case SUN4V_CHIP_NIAGARA2:
261 			perf_hsvc_group = HV_GRP_N2_CPU;
262 			break;
263 
264 		case SUN4V_CHIP_NIAGARA3:
265 			perf_hsvc_group = HV_GRP_KT_CPU;
266 			break;
267 
268 		case SUN4V_CHIP_NIAGARA4:
269 			perf_hsvc_group = HV_GRP_VT_CPU;
270 			break;
271 
272 		case SUN4V_CHIP_NIAGARA5:
273 			perf_hsvc_group = HV_GRP_T5_CPU;
274 			break;
275 
276 		case SUN4V_CHIP_SPARC_M7:
277 			perf_hsvc_group = HV_GRP_M7_PERF;
278 			break;
279 
280 		default:
281 			return -ENODEV;
282 		}
283 
284 
285 		perf_hsvc_major = 1;
286 		perf_hsvc_minor = 0;
287 		hverror = sun4v_hvapi_register(perf_hsvc_group,
288 					       perf_hsvc_major,
289 					       &perf_hsvc_minor);
290 		if (hverror) {
291 			pr_err("perfmon: Could not register hvapi(0x%lx).\n",
292 			       hverror);
293 			return -ENODEV;
294 		}
295 	}
296 	return 0;
297 }
298 
unregister_perf_hsvc(void)299 static void __init unregister_perf_hsvc(void)
300 {
301 	if (tlb_type != hypervisor)
302 		return;
303 	sun4v_hvapi_unregister(perf_hsvc_group);
304 }
305 
setup_sun4v_pcr_ops(void)306 static int __init setup_sun4v_pcr_ops(void)
307 {
308 	int ret = 0;
309 
310 	switch (sun4v_chip_type) {
311 	case SUN4V_CHIP_NIAGARA1:
312 	case SUN4V_CHIP_NIAGARA2:
313 	case SUN4V_CHIP_NIAGARA3:
314 		pcr_ops = &n2_pcr_ops;
315 		break;
316 
317 	case SUN4V_CHIP_NIAGARA4:
318 		pcr_ops = &n4_pcr_ops;
319 		break;
320 
321 	case SUN4V_CHIP_NIAGARA5:
322 		pcr_ops = &n5_pcr_ops;
323 		break;
324 
325 	case SUN4V_CHIP_SPARC_M7:
326 		pcr_ops = &m7_pcr_ops;
327 		break;
328 
329 	default:
330 		ret = -ENODEV;
331 		break;
332 	}
333 
334 	return ret;
335 }
336 
pcr_arch_init(void)337 int __init pcr_arch_init(void)
338 {
339 	int err = register_perf_hsvc();
340 
341 	if (err)
342 		return err;
343 
344 	switch (tlb_type) {
345 	case hypervisor:
346 		err = setup_sun4v_pcr_ops();
347 		if (err)
348 			goto out_unregister;
349 		break;
350 
351 	case cheetah:
352 	case cheetah_plus:
353 		pcr_ops = &direct_pcr_ops;
354 		break;
355 
356 	case spitfire:
357 		/* UltraSPARC-I/II and derivatives lack a profile
358 		 * counter overflow interrupt so we can't make use of
359 		 * their hardware currently.
360 		 */
361 		/* fallthrough */
362 	default:
363 		err = -ENODEV;
364 		goto out_unregister;
365 	}
366 
367 	return nmi_init();
368 
369 out_unregister:
370 	unregister_perf_hsvc();
371 	return err;
372 }
373