1 // SPDX-License-Identifier: GPL-2.0-or-later
2 /*
3 * Freescale Embedded oprofile support, based on ppc64 oprofile support
4 * Copyright (C) 2004 Anton Blanchard <anton@au.ibm.com>, IBM
5 *
6 * Copyright (c) 2004, 2010 Freescale Semiconductor, Inc
7 *
8 * Author: Andy Fleming
9 * Maintainer: Kumar Gala <galak@kernel.crashing.org>
10 */
11
12 #include <linux/oprofile.h>
13 #include <linux/smp.h>
14 #include <asm/ptrace.h>
15 #include <asm/processor.h>
16 #include <asm/cputable.h>
17 #include <asm/reg_fsl_emb.h>
18 #include <asm/page.h>
19 #include <asm/pmc.h>
20 #include <asm/oprofile_impl.h>
21
22 static unsigned long reset_value[OP_MAX_COUNTER];
23
24 static int num_counters;
25 static int oprofile_running;
26
get_pmlca(int ctr)27 static inline u32 get_pmlca(int ctr)
28 {
29 u32 pmlca;
30
31 switch (ctr) {
32 case 0:
33 pmlca = mfpmr(PMRN_PMLCA0);
34 break;
35 case 1:
36 pmlca = mfpmr(PMRN_PMLCA1);
37 break;
38 case 2:
39 pmlca = mfpmr(PMRN_PMLCA2);
40 break;
41 case 3:
42 pmlca = mfpmr(PMRN_PMLCA3);
43 break;
44 case 4:
45 pmlca = mfpmr(PMRN_PMLCA4);
46 break;
47 case 5:
48 pmlca = mfpmr(PMRN_PMLCA5);
49 break;
50 default:
51 panic("Bad ctr number\n");
52 }
53
54 return pmlca;
55 }
56
set_pmlca(int ctr,u32 pmlca)57 static inline void set_pmlca(int ctr, u32 pmlca)
58 {
59 switch (ctr) {
60 case 0:
61 mtpmr(PMRN_PMLCA0, pmlca);
62 break;
63 case 1:
64 mtpmr(PMRN_PMLCA1, pmlca);
65 break;
66 case 2:
67 mtpmr(PMRN_PMLCA2, pmlca);
68 break;
69 case 3:
70 mtpmr(PMRN_PMLCA3, pmlca);
71 break;
72 case 4:
73 mtpmr(PMRN_PMLCA4, pmlca);
74 break;
75 case 5:
76 mtpmr(PMRN_PMLCA5, pmlca);
77 break;
78 default:
79 panic("Bad ctr number\n");
80 }
81 }
82
ctr_read(unsigned int i)83 static inline unsigned int ctr_read(unsigned int i)
84 {
85 switch(i) {
86 case 0:
87 return mfpmr(PMRN_PMC0);
88 case 1:
89 return mfpmr(PMRN_PMC1);
90 case 2:
91 return mfpmr(PMRN_PMC2);
92 case 3:
93 return mfpmr(PMRN_PMC3);
94 case 4:
95 return mfpmr(PMRN_PMC4);
96 case 5:
97 return mfpmr(PMRN_PMC5);
98 default:
99 return 0;
100 }
101 }
102
ctr_write(unsigned int i,unsigned int val)103 static inline void ctr_write(unsigned int i, unsigned int val)
104 {
105 switch(i) {
106 case 0:
107 mtpmr(PMRN_PMC0, val);
108 break;
109 case 1:
110 mtpmr(PMRN_PMC1, val);
111 break;
112 case 2:
113 mtpmr(PMRN_PMC2, val);
114 break;
115 case 3:
116 mtpmr(PMRN_PMC3, val);
117 break;
118 case 4:
119 mtpmr(PMRN_PMC4, val);
120 break;
121 case 5:
122 mtpmr(PMRN_PMC5, val);
123 break;
124 default:
125 break;
126 }
127 }
128
129
init_pmc_stop(int ctr)130 static void init_pmc_stop(int ctr)
131 {
132 u32 pmlca = (PMLCA_FC | PMLCA_FCS | PMLCA_FCU |
133 PMLCA_FCM1 | PMLCA_FCM0);
134 u32 pmlcb = 0;
135
136 switch (ctr) {
137 case 0:
138 mtpmr(PMRN_PMLCA0, pmlca);
139 mtpmr(PMRN_PMLCB0, pmlcb);
140 break;
141 case 1:
142 mtpmr(PMRN_PMLCA1, pmlca);
143 mtpmr(PMRN_PMLCB1, pmlcb);
144 break;
145 case 2:
146 mtpmr(PMRN_PMLCA2, pmlca);
147 mtpmr(PMRN_PMLCB2, pmlcb);
148 break;
149 case 3:
150 mtpmr(PMRN_PMLCA3, pmlca);
151 mtpmr(PMRN_PMLCB3, pmlcb);
152 break;
153 case 4:
154 mtpmr(PMRN_PMLCA4, pmlca);
155 mtpmr(PMRN_PMLCB4, pmlcb);
156 break;
157 case 5:
158 mtpmr(PMRN_PMLCA5, pmlca);
159 mtpmr(PMRN_PMLCB5, pmlcb);
160 break;
161 default:
162 panic("Bad ctr number!\n");
163 }
164 }
165
set_pmc_event(int ctr,int event)166 static void set_pmc_event(int ctr, int event)
167 {
168 u32 pmlca;
169
170 pmlca = get_pmlca(ctr);
171
172 pmlca = (pmlca & ~PMLCA_EVENT_MASK) |
173 ((event << PMLCA_EVENT_SHIFT) &
174 PMLCA_EVENT_MASK);
175
176 set_pmlca(ctr, pmlca);
177 }
178
set_pmc_user_kernel(int ctr,int user,int kernel)179 static void set_pmc_user_kernel(int ctr, int user, int kernel)
180 {
181 u32 pmlca;
182
183 pmlca = get_pmlca(ctr);
184
185 if(user)
186 pmlca &= ~PMLCA_FCU;
187 else
188 pmlca |= PMLCA_FCU;
189
190 if(kernel)
191 pmlca &= ~PMLCA_FCS;
192 else
193 pmlca |= PMLCA_FCS;
194
195 set_pmlca(ctr, pmlca);
196 }
197
set_pmc_marked(int ctr,int mark0,int mark1)198 static void set_pmc_marked(int ctr, int mark0, int mark1)
199 {
200 u32 pmlca = get_pmlca(ctr);
201
202 if(mark0)
203 pmlca &= ~PMLCA_FCM0;
204 else
205 pmlca |= PMLCA_FCM0;
206
207 if(mark1)
208 pmlca &= ~PMLCA_FCM1;
209 else
210 pmlca |= PMLCA_FCM1;
211
212 set_pmlca(ctr, pmlca);
213 }
214
pmc_start_ctr(int ctr,int enable)215 static void pmc_start_ctr(int ctr, int enable)
216 {
217 u32 pmlca = get_pmlca(ctr);
218
219 pmlca &= ~PMLCA_FC;
220
221 if (enable)
222 pmlca |= PMLCA_CE;
223 else
224 pmlca &= ~PMLCA_CE;
225
226 set_pmlca(ctr, pmlca);
227 }
228
pmc_start_ctrs(int enable)229 static void pmc_start_ctrs(int enable)
230 {
231 u32 pmgc0 = mfpmr(PMRN_PMGC0);
232
233 pmgc0 &= ~PMGC0_FAC;
234 pmgc0 |= PMGC0_FCECE;
235
236 if (enable)
237 pmgc0 |= PMGC0_PMIE;
238 else
239 pmgc0 &= ~PMGC0_PMIE;
240
241 mtpmr(PMRN_PMGC0, pmgc0);
242 }
243
pmc_stop_ctrs(void)244 static void pmc_stop_ctrs(void)
245 {
246 u32 pmgc0 = mfpmr(PMRN_PMGC0);
247
248 pmgc0 |= PMGC0_FAC;
249
250 pmgc0 &= ~(PMGC0_PMIE | PMGC0_FCECE);
251
252 mtpmr(PMRN_PMGC0, pmgc0);
253 }
254
fsl_emb_cpu_setup(struct op_counter_config * ctr)255 static int fsl_emb_cpu_setup(struct op_counter_config *ctr)
256 {
257 int i;
258
259 /* freeze all counters */
260 pmc_stop_ctrs();
261
262 for (i = 0;i < num_counters;i++) {
263 init_pmc_stop(i);
264
265 set_pmc_event(i, ctr[i].event);
266
267 set_pmc_user_kernel(i, ctr[i].user, ctr[i].kernel);
268 }
269
270 return 0;
271 }
272
fsl_emb_reg_setup(struct op_counter_config * ctr,struct op_system_config * sys,int num_ctrs)273 static int fsl_emb_reg_setup(struct op_counter_config *ctr,
274 struct op_system_config *sys,
275 int num_ctrs)
276 {
277 int i;
278
279 num_counters = num_ctrs;
280
281 /* Our counters count up, and "count" refers to
282 * how much before the next interrupt, and we interrupt
283 * on overflow. So we calculate the starting value
284 * which will give us "count" until overflow.
285 * Then we set the events on the enabled counters */
286 for (i = 0; i < num_counters; ++i)
287 reset_value[i] = 0x80000000UL - ctr[i].count;
288
289 return 0;
290 }
291
fsl_emb_start(struct op_counter_config * ctr)292 static int fsl_emb_start(struct op_counter_config *ctr)
293 {
294 int i;
295
296 mtmsr(mfmsr() | MSR_PMM);
297
298 for (i = 0; i < num_counters; ++i) {
299 if (ctr[i].enabled) {
300 ctr_write(i, reset_value[i]);
301 /* Set each enabled counter to only
302 * count when the Mark bit is *not* set */
303 set_pmc_marked(i, 1, 0);
304 pmc_start_ctr(i, 1);
305 } else {
306 ctr_write(i, 0);
307
308 /* Set the ctr to be stopped */
309 pmc_start_ctr(i, 0);
310 }
311 }
312
313 /* Clear the freeze bit, and enable the interrupt.
314 * The counters won't actually start until the rfi clears
315 * the PMM bit */
316 pmc_start_ctrs(1);
317
318 oprofile_running = 1;
319
320 pr_debug("start on cpu %d, pmgc0 %x\n", smp_processor_id(),
321 mfpmr(PMRN_PMGC0));
322
323 return 0;
324 }
325
fsl_emb_stop(void)326 static void fsl_emb_stop(void)
327 {
328 /* freeze counters */
329 pmc_stop_ctrs();
330
331 oprofile_running = 0;
332
333 pr_debug("stop on cpu %d, pmgc0 %x\n", smp_processor_id(),
334 mfpmr(PMRN_PMGC0));
335
336 mb();
337 }
338
339
fsl_emb_handle_interrupt(struct pt_regs * regs,struct op_counter_config * ctr)340 static void fsl_emb_handle_interrupt(struct pt_regs *regs,
341 struct op_counter_config *ctr)
342 {
343 unsigned long pc;
344 int is_kernel;
345 int val;
346 int i;
347
348 pc = regs->nip;
349 is_kernel = is_kernel_addr(pc);
350
351 for (i = 0; i < num_counters; ++i) {
352 val = ctr_read(i);
353 if (val < 0) {
354 if (oprofile_running && ctr[i].enabled) {
355 oprofile_add_ext_sample(pc, regs, i, is_kernel);
356 ctr_write(i, reset_value[i]);
357 } else {
358 ctr_write(i, 0);
359 }
360 }
361 }
362
363 /* The freeze bit was set by the interrupt. */
364 /* Clear the freeze bit, and reenable the interrupt. The
365 * counters won't actually start until the rfi clears the PMM
366 * bit. The PMM bit should not be set until after the interrupt
367 * is cleared to avoid it getting lost in some hypervisor
368 * environments.
369 */
370 mtmsr(mfmsr() | MSR_PMM);
371 pmc_start_ctrs(1);
372 }
373
374 struct op_powerpc_model op_model_fsl_emb = {
375 .reg_setup = fsl_emb_reg_setup,
376 .cpu_setup = fsl_emb_cpu_setup,
377 .start = fsl_emb_start,
378 .stop = fsl_emb_stop,
379 .handle_interrupt = fsl_emb_handle_interrupt,
380 };
381