1 /**
2 * @file arch/alpha/oprofile/op_model_ev67.c
3 *
4 * @remark Copyright 2002 OProfile authors
5 * @remark Read the file COPYING
6 *
7 * @author Richard Henderson <rth@twiddle.net>
8 * @author Falk Hueffner <falk@debian.org>
9 */
10
11 #include <linux/oprofile.h>
12 #include <linux/smp.h>
13 #include <asm/ptrace.h>
14
15 #include "op_impl.h"
16
17
18 /* Compute all of the registers in preparation for enabling profiling. */
19
20 static void
ev67_reg_setup(struct op_register_config * reg,struct op_counter_config * ctr,struct op_system_config * sys)21 ev67_reg_setup(struct op_register_config *reg,
22 struct op_counter_config *ctr,
23 struct op_system_config *sys)
24 {
25 unsigned long ctl, reset, need_reset, i;
26
27 /* Select desired events. */
28 ctl = 1UL << 4; /* Enable ProfileMe mode. */
29
30 /* The event numbers are chosen so we can use them directly if
31 PCTR1 is enabled. */
32 if (ctr[1].enabled) {
33 ctl |= (ctr[1].event & 3) << 2;
34 } else {
35 if (ctr[0].event == 0) /* cycles */
36 ctl |= 1UL << 2;
37 }
38 reg->mux_select = ctl;
39
40 /* Select logging options. */
41 /* ??? Need to come up with some mechanism to trace only
42 selected processes. EV67 does not have a mechanism to
43 select kernel or user mode only. For now, enable always. */
44 reg->proc_mode = 0;
45
46 /* EV67 cannot change the width of the counters as with the
47 other implementations. But fortunately, we can write to
48 the counters and set the value such that it will overflow
49 at the right time. */
50 reset = need_reset = 0;
51 for (i = 0; i < 2; ++i) {
52 unsigned long count = ctr[i].count;
53 if (!ctr[i].enabled)
54 continue;
55
56 if (count > 0x100000)
57 count = 0x100000;
58 ctr[i].count = count;
59 reset |= (0x100000 - count) << (i ? 6 : 28);
60 if (count != 0x100000)
61 need_reset |= 1 << i;
62 }
63 reg->reset_values = reset;
64 reg->need_reset = need_reset;
65 }
66
67 /* Program all of the registers in preparation for enabling profiling. */
68
69 static void
ev67_cpu_setup(void * x)70 ev67_cpu_setup (void *x)
71 {
72 struct op_register_config *reg = x;
73
74 wrperfmon(2, reg->mux_select);
75 wrperfmon(3, reg->proc_mode);
76 wrperfmon(6, reg->reset_values | 3);
77 }
78
79 /* CTR is a counter for which the user has requested an interrupt count
80 in between one of the widths selectable in hardware. Reset the count
81 for CTR to the value stored in REG->RESET_VALUES. */
82
83 static void
ev67_reset_ctr(struct op_register_config * reg,unsigned long ctr)84 ev67_reset_ctr(struct op_register_config *reg, unsigned long ctr)
85 {
86 wrperfmon(6, reg->reset_values | (1 << ctr));
87 }
88
89 /* ProfileMe conditions which will show up as counters. We can also
90 detect the following, but it seems unlikely that anybody is
91 interested in counting them:
92 * Reset
93 * MT_FPCR (write to floating point control register)
94 * Arithmetic trap
95 * Dstream Fault
96 * Machine Check (ECC fault, etc.)
97 * OPCDEC (illegal opcode)
98 * Floating point disabled
99 * Differentiate between DTB single/double misses and 3 or 4 level
100 page tables
101 * Istream access violation
102 * Interrupt
103 * Icache Parity Error.
104 * Instruction killed (nop, trapb)
105
106 Unfortunately, there seems to be no way to detect Dcache and Bcache
107 misses; the latter could be approximated by making the counter
108 count Bcache misses, but that is not precise.
109
110 We model this as 20 counters:
111 * PCTR0
112 * PCTR1
113 * 9 ProfileMe events, induced by PCTR0
114 * 9 ProfileMe events, induced by PCTR1
115 */
116
117 enum profileme_counters {
118 PM_STALLED, /* Stalled for at least one cycle
119 between the fetch and map stages */
120 PM_TAKEN, /* Conditional branch taken */
121 PM_MISPREDICT, /* Branch caused mispredict trap */
122 PM_ITB_MISS, /* ITB miss */
123 PM_DTB_MISS, /* DTB miss */
124 PM_REPLAY, /* Replay trap */
125 PM_LOAD_STORE, /* Load-store order trap */
126 PM_ICACHE_MISS, /* Icache miss */
127 PM_UNALIGNED, /* Unaligned Load/Store */
128 PM_NUM_COUNTERS
129 };
130
131 static inline void
op_add_pm(unsigned long pc,int kern,unsigned long counter,struct op_counter_config * ctr,unsigned long event)132 op_add_pm(unsigned long pc, int kern, unsigned long counter,
133 struct op_counter_config *ctr, unsigned long event)
134 {
135 unsigned long fake_counter = 2 + event;
136 if (counter == 1)
137 fake_counter += PM_NUM_COUNTERS;
138 if (ctr[fake_counter].enabled)
139 oprofile_add_pc(pc, kern, fake_counter);
140 }
141
142 static void
ev67_handle_interrupt(unsigned long which,struct pt_regs * regs,struct op_counter_config * ctr)143 ev67_handle_interrupt(unsigned long which, struct pt_regs *regs,
144 struct op_counter_config *ctr)
145 {
146 unsigned long pmpc, pctr_ctl;
147 int kern = !user_mode(regs);
148 int mispredict = 0;
149 union {
150 unsigned long v;
151 struct {
152 unsigned reserved: 30; /* 0-29 */
153 unsigned overcount: 3; /* 30-32 */
154 unsigned icache_miss: 1; /* 33 */
155 unsigned trap_type: 4; /* 34-37 */
156 unsigned load_store: 1; /* 38 */
157 unsigned trap: 1; /* 39 */
158 unsigned mispredict: 1; /* 40 */
159 } fields;
160 } i_stat;
161
162 enum trap_types {
163 TRAP_REPLAY,
164 TRAP_INVALID0,
165 TRAP_DTB_DOUBLE_MISS_3,
166 TRAP_DTB_DOUBLE_MISS_4,
167 TRAP_FP_DISABLED,
168 TRAP_UNALIGNED,
169 TRAP_DTB_SINGLE_MISS,
170 TRAP_DSTREAM_FAULT,
171 TRAP_OPCDEC,
172 TRAP_INVALID1,
173 TRAP_MACHINE_CHECK,
174 TRAP_INVALID2,
175 TRAP_ARITHMETIC,
176 TRAP_INVALID3,
177 TRAP_MT_FPCR,
178 TRAP_RESET
179 };
180
181 pmpc = wrperfmon(9, 0);
182 /* ??? Don't know how to handle physical-mode PALcode address. */
183 if (pmpc & 1)
184 return;
185 pmpc &= ~2; /* clear reserved bit */
186
187 i_stat.v = wrperfmon(8, 0);
188 if (i_stat.fields.trap) {
189 switch (i_stat.fields.trap_type) {
190 case TRAP_INVALID1:
191 case TRAP_INVALID2:
192 case TRAP_INVALID3:
193 /* Pipeline redirection occurred. PMPC points
194 to PALcode. Recognize ITB miss by PALcode
195 offset address, and get actual PC from
196 EXC_ADDR. */
197 oprofile_add_pc(regs->pc, kern, which);
198 if ((pmpc & ((1 << 15) - 1)) == 581)
199 op_add_pm(regs->pc, kern, which,
200 ctr, PM_ITB_MISS);
201 /* Most other bit and counter values will be
202 those for the first instruction in the
203 fault handler, so we're done. */
204 return;
205 case TRAP_REPLAY:
206 op_add_pm(pmpc, kern, which, ctr,
207 (i_stat.fields.load_store
208 ? PM_LOAD_STORE : PM_REPLAY));
209 break;
210 case TRAP_DTB_DOUBLE_MISS_3:
211 case TRAP_DTB_DOUBLE_MISS_4:
212 case TRAP_DTB_SINGLE_MISS:
213 op_add_pm(pmpc, kern, which, ctr, PM_DTB_MISS);
214 break;
215 case TRAP_UNALIGNED:
216 op_add_pm(pmpc, kern, which, ctr, PM_UNALIGNED);
217 break;
218 case TRAP_INVALID0:
219 case TRAP_FP_DISABLED:
220 case TRAP_DSTREAM_FAULT:
221 case TRAP_OPCDEC:
222 case TRAP_MACHINE_CHECK:
223 case TRAP_ARITHMETIC:
224 case TRAP_MT_FPCR:
225 case TRAP_RESET:
226 break;
227 }
228
229 /* ??? JSR/JMP/RET/COR or HW_JSR/HW_JMP/HW_RET/HW_COR
230 mispredicts do not set this bit but can be
231 recognized by the presence of one of these
232 instructions at the PMPC location with bit 39
233 set. */
234 if (i_stat.fields.mispredict) {
235 mispredict = 1;
236 op_add_pm(pmpc, kern, which, ctr, PM_MISPREDICT);
237 }
238 }
239
240 oprofile_add_pc(pmpc, kern, which);
241
242 pctr_ctl = wrperfmon(5, 0);
243 if (pctr_ctl & (1UL << 27))
244 op_add_pm(pmpc, kern, which, ctr, PM_STALLED);
245
246 /* Unfortunately, TAK is undefined on mispredicted branches.
247 ??? It is also undefined for non-cbranch insns, should
248 check that. */
249 if (!mispredict && pctr_ctl & (1UL << 0))
250 op_add_pm(pmpc, kern, which, ctr, PM_TAKEN);
251 }
252
253 struct op_axp_model op_model_ev67 = {
254 .reg_setup = ev67_reg_setup,
255 .cpu_setup = ev67_cpu_setup,
256 .reset_ctr = ev67_reset_ctr,
257 .handle_interrupt = ev67_handle_interrupt,
258 .cpu_type = "alpha/ev67",
259 .num_counters = 20,
260 .can_set_proc_mode = 0,
261 };
262