1 /*
2 * palinfo.c
3 *
4 * Prints processor specific information reported by PAL.
5 * This code is based on specification of PAL as of the
6 * Intel IA-64 Architecture Software Developer's Manual v1.0.
7 *
8 *
9 * Copyright (C) 2000-2001, 2003 Hewlett-Packard Co
10 * Stephane Eranian <eranian@hpl.hp.com>
11 * Copyright (C) 2004 Intel Corporation
12 * Ashok Raj <ashok.raj@intel.com>
13 *
14 * 05/26/2000 S.Eranian initial release
15 * 08/21/2000 S.Eranian updated to July 2000 PAL specs
16 * 02/05/2001 S.Eranian fixed module support
17 * 10/23/2001 S.Eranian updated pal_perf_mon_info bug fixes
18 * 03/24/2004 Ashok Raj updated to work with CPU Hotplug
19 * 10/26/2006 Russ Anderson updated processor features to rev 2.2 spec
20 */
21 #include <linux/types.h>
22 #include <linux/errno.h>
23 #include <linux/init.h>
24 #include <linux/proc_fs.h>
25 #include <linux/seq_file.h>
26 #include <linux/mm.h>
27 #include <linux/module.h>
28 #include <linux/efi.h>
29 #include <linux/notifier.h>
30 #include <linux/cpu.h>
31 #include <linux/cpumask.h>
32
33 #include <asm/pal.h>
34 #include <asm/sal.h>
35 #include <asm/page.h>
36 #include <asm/processor.h>
37 #include <linux/smp.h>
38
39 MODULE_AUTHOR("Stephane Eranian <eranian@hpl.hp.com>");
40 MODULE_DESCRIPTION("/proc interface to IA-64 PAL");
41 MODULE_LICENSE("GPL");
42
43 #define PALINFO_VERSION "0.5"
44
45 typedef int (*palinfo_func_t)(struct seq_file *);
46
47 typedef struct {
48 const char *name; /* name of the proc entry */
49 palinfo_func_t proc_read; /* function to call for reading */
50 struct proc_dir_entry *entry; /* registered entry (removal) */
51 } palinfo_entry_t;
52
53
54 /*
55 * A bunch of string array to get pretty printing
56 */
57
58 static const char *cache_types[] = {
59 "", /* not used */
60 "Instruction",
61 "Data",
62 "Data/Instruction" /* unified */
63 };
64
65 static const char *cache_mattrib[]={
66 "WriteThrough",
67 "WriteBack",
68 "", /* reserved */
69 "" /* reserved */
70 };
71
72 static const char *cache_st_hints[]={
73 "Temporal, level 1",
74 "Reserved",
75 "Reserved",
76 "Non-temporal, all levels",
77 "Reserved",
78 "Reserved",
79 "Reserved",
80 "Reserved"
81 };
82
83 static const char *cache_ld_hints[]={
84 "Temporal, level 1",
85 "Non-temporal, level 1",
86 "Reserved",
87 "Non-temporal, all levels",
88 "Reserved",
89 "Reserved",
90 "Reserved",
91 "Reserved"
92 };
93
94 static const char *rse_hints[]={
95 "enforced lazy",
96 "eager stores",
97 "eager loads",
98 "eager loads and stores"
99 };
100
101 #define RSE_HINTS_COUNT ARRAY_SIZE(rse_hints)
102
103 static const char *mem_attrib[]={
104 "WB", /* 000 */
105 "SW", /* 001 */
106 "010", /* 010 */
107 "011", /* 011 */
108 "UC", /* 100 */
109 "UCE", /* 101 */
110 "WC", /* 110 */
111 "NaTPage" /* 111 */
112 };
113
114 /*
115 * Take a 64bit vector and produces a string such that
116 * if bit n is set then 2^n in clear text is generated. The adjustment
117 * to the right unit is also done.
118 *
119 * Input:
120 * - a pointer to a buffer to hold the string
121 * - a 64-bit vector
122 * Ouput:
123 * - a pointer to the end of the buffer
124 *
125 */
bitvector_process(struct seq_file * m,u64 vector)126 static void bitvector_process(struct seq_file *m, u64 vector)
127 {
128 int i,j;
129 static const char *units[]={ "", "K", "M", "G", "T" };
130
131 for (i=0, j=0; i < 64; i++ , j=i/10) {
132 if (vector & 0x1)
133 seq_printf(m, "%d%s ", 1 << (i-j*10), units[j]);
134 vector >>= 1;
135 }
136 }
137
138 /*
139 * Take a 64bit vector and produces a string such that
140 * if bit n is set then register n is present. The function
141 * takes into account consecutive registers and prints out ranges.
142 *
143 * Input:
144 * - a pointer to a buffer to hold the string
145 * - a 64-bit vector
146 * Ouput:
147 * - a pointer to the end of the buffer
148 *
149 */
bitregister_process(struct seq_file * m,u64 * reg_info,int max)150 static void bitregister_process(struct seq_file *m, u64 *reg_info, int max)
151 {
152 int i, begin, skip = 0;
153 u64 value = reg_info[0];
154
155 value >>= i = begin = ffs(value) - 1;
156
157 for(; i < max; i++ ) {
158
159 if (i != 0 && (i%64) == 0) value = *++reg_info;
160
161 if ((value & 0x1) == 0 && skip == 0) {
162 if (begin <= i - 2)
163 seq_printf(m, "%d-%d ", begin, i-1);
164 else
165 seq_printf(m, "%d ", i-1);
166 skip = 1;
167 begin = -1;
168 } else if ((value & 0x1) && skip == 1) {
169 skip = 0;
170 begin = i;
171 }
172 value >>=1;
173 }
174 if (begin > -1) {
175 if (begin < 127)
176 seq_printf(m, "%d-127", begin);
177 else
178 seq_puts(m, "127");
179 }
180 }
181
power_info(struct seq_file * m)182 static int power_info(struct seq_file *m)
183 {
184 s64 status;
185 u64 halt_info_buffer[8];
186 pal_power_mgmt_info_u_t *halt_info =(pal_power_mgmt_info_u_t *)halt_info_buffer;
187 int i;
188
189 status = ia64_pal_halt_info(halt_info);
190 if (status != 0) return 0;
191
192 for (i=0; i < 8 ; i++ ) {
193 if (halt_info[i].pal_power_mgmt_info_s.im == 1) {
194 seq_printf(m,
195 "Power level %d:\n"
196 "\tentry_latency : %d cycles\n"
197 "\texit_latency : %d cycles\n"
198 "\tpower consumption : %d mW\n"
199 "\tCache+TLB coherency : %s\n", i,
200 halt_info[i].pal_power_mgmt_info_s.entry_latency,
201 halt_info[i].pal_power_mgmt_info_s.exit_latency,
202 halt_info[i].pal_power_mgmt_info_s.power_consumption,
203 halt_info[i].pal_power_mgmt_info_s.co ? "Yes" : "No");
204 } else {
205 seq_printf(m,"Power level %d: not implemented\n", i);
206 }
207 }
208 return 0;
209 }
210
cache_info(struct seq_file * m)211 static int cache_info(struct seq_file *m)
212 {
213 unsigned long i, levels, unique_caches;
214 pal_cache_config_info_t cci;
215 int j, k;
216 long status;
217
218 if ((status = ia64_pal_cache_summary(&levels, &unique_caches)) != 0) {
219 printk(KERN_ERR "ia64_pal_cache_summary=%ld\n", status);
220 return 0;
221 }
222
223 seq_printf(m, "Cache levels : %ld\nUnique caches : %ld\n\n",
224 levels, unique_caches);
225
226 for (i=0; i < levels; i++) {
227 for (j=2; j >0 ; j--) {
228 /* even without unification some level may not be present */
229 if ((status=ia64_pal_cache_config_info(i,j, &cci)) != 0)
230 continue;
231
232 seq_printf(m,
233 "%s Cache level %lu:\n"
234 "\tSize : %u bytes\n"
235 "\tAttributes : ",
236 cache_types[j+cci.pcci_unified], i+1,
237 cci.pcci_cache_size);
238
239 if (cci.pcci_unified)
240 seq_puts(m, "Unified ");
241
242 seq_printf(m, "%s\n", cache_mattrib[cci.pcci_cache_attr]);
243
244 seq_printf(m,
245 "\tAssociativity : %d\n"
246 "\tLine size : %d bytes\n"
247 "\tStride : %d bytes\n",
248 cci.pcci_assoc,
249 1<<cci.pcci_line_size,
250 1<<cci.pcci_stride);
251 if (j == 1)
252 seq_puts(m, "\tStore latency : N/A\n");
253 else
254 seq_printf(m, "\tStore latency : %d cycle(s)\n",
255 cci.pcci_st_latency);
256
257 seq_printf(m,
258 "\tLoad latency : %d cycle(s)\n"
259 "\tStore hints : ", cci.pcci_ld_latency);
260
261 for(k=0; k < 8; k++ ) {
262 if ( cci.pcci_st_hints & 0x1)
263 seq_printf(m, "[%s]", cache_st_hints[k]);
264 cci.pcci_st_hints >>=1;
265 }
266 seq_puts(m, "\n\tLoad hints : ");
267
268 for(k=0; k < 8; k++ ) {
269 if (cci.pcci_ld_hints & 0x1)
270 seq_printf(m, "[%s]", cache_ld_hints[k]);
271 cci.pcci_ld_hints >>=1;
272 }
273 seq_printf(m,
274 "\n\tAlias boundary : %d byte(s)\n"
275 "\tTag LSB : %d\n"
276 "\tTag MSB : %d\n",
277 1<<cci.pcci_alias_boundary, cci.pcci_tag_lsb,
278 cci.pcci_tag_msb);
279
280 /* when unified, data(j=2) is enough */
281 if (cci.pcci_unified)
282 break;
283 }
284 }
285 return 0;
286 }
287
288
vm_info(struct seq_file * m)289 static int vm_info(struct seq_file *m)
290 {
291 u64 tr_pages =0, vw_pages=0, tc_pages;
292 u64 attrib;
293 pal_vm_info_1_u_t vm_info_1;
294 pal_vm_info_2_u_t vm_info_2;
295 pal_tc_info_u_t tc_info;
296 ia64_ptce_info_t ptce;
297 const char *sep;
298 int i, j;
299 long status;
300
301 if ((status = ia64_pal_vm_summary(&vm_info_1, &vm_info_2)) !=0) {
302 printk(KERN_ERR "ia64_pal_vm_summary=%ld\n", status);
303 } else {
304
305 seq_printf(m,
306 "Physical Address Space : %d bits\n"
307 "Virtual Address Space : %d bits\n"
308 "Protection Key Registers(PKR) : %d\n"
309 "Implemented bits in PKR.key : %d\n"
310 "Hash Tag ID : 0x%x\n"
311 "Size of RR.rid : %d\n"
312 "Max Purges : ",
313 vm_info_1.pal_vm_info_1_s.phys_add_size,
314 vm_info_2.pal_vm_info_2_s.impl_va_msb+1,
315 vm_info_1.pal_vm_info_1_s.max_pkr+1,
316 vm_info_1.pal_vm_info_1_s.key_size,
317 vm_info_1.pal_vm_info_1_s.hash_tag_id,
318 vm_info_2.pal_vm_info_2_s.rid_size);
319 if (vm_info_2.pal_vm_info_2_s.max_purges == PAL_MAX_PURGES)
320 seq_puts(m, "unlimited\n");
321 else
322 seq_printf(m, "%d\n",
323 vm_info_2.pal_vm_info_2_s.max_purges ?
324 vm_info_2.pal_vm_info_2_s.max_purges : 1);
325 }
326
327 if (ia64_pal_mem_attrib(&attrib) == 0) {
328 seq_puts(m, "Supported memory attributes : ");
329 sep = "";
330 for (i = 0; i < 8; i++) {
331 if (attrib & (1 << i)) {
332 seq_printf(m, "%s%s", sep, mem_attrib[i]);
333 sep = ", ";
334 }
335 }
336 seq_putc(m, '\n');
337 }
338
339 if ((status = ia64_pal_vm_page_size(&tr_pages, &vw_pages)) !=0) {
340 printk(KERN_ERR "ia64_pal_vm_page_size=%ld\n", status);
341 } else {
342
343 seq_printf(m,
344 "\nTLB walker : %simplemented\n"
345 "Number of DTR : %d\n"
346 "Number of ITR : %d\n"
347 "TLB insertable page sizes : ",
348 vm_info_1.pal_vm_info_1_s.vw ? "" : "not ",
349 vm_info_1.pal_vm_info_1_s.max_dtr_entry+1,
350 vm_info_1.pal_vm_info_1_s.max_itr_entry+1);
351
352 bitvector_process(m, tr_pages);
353
354 seq_puts(m, "\nTLB purgeable page sizes : ");
355
356 bitvector_process(m, vw_pages);
357 }
358
359 if ((status = ia64_get_ptce(&ptce)) != 0) {
360 printk(KERN_ERR "ia64_get_ptce=%ld\n", status);
361 } else {
362 seq_printf(m,
363 "\nPurge base address : 0x%016lx\n"
364 "Purge outer loop count : %d\n"
365 "Purge inner loop count : %d\n"
366 "Purge outer loop stride : %d\n"
367 "Purge inner loop stride : %d\n",
368 ptce.base, ptce.count[0], ptce.count[1],
369 ptce.stride[0], ptce.stride[1]);
370
371 seq_printf(m,
372 "TC Levels : %d\n"
373 "Unique TC(s) : %d\n",
374 vm_info_1.pal_vm_info_1_s.num_tc_levels,
375 vm_info_1.pal_vm_info_1_s.max_unique_tcs);
376
377 for(i=0; i < vm_info_1.pal_vm_info_1_s.num_tc_levels; i++) {
378 for (j=2; j>0 ; j--) {
379 tc_pages = 0; /* just in case */
380
381 /* even without unification, some levels may not be present */
382 if ((status=ia64_pal_vm_info(i,j, &tc_info, &tc_pages)) != 0)
383 continue;
384
385 seq_printf(m,
386 "\n%s Translation Cache Level %d:\n"
387 "\tHash sets : %d\n"
388 "\tAssociativity : %d\n"
389 "\tNumber of entries : %d\n"
390 "\tFlags : ",
391 cache_types[j+tc_info.tc_unified], i+1,
392 tc_info.tc_num_sets,
393 tc_info.tc_associativity,
394 tc_info.tc_num_entries);
395
396 if (tc_info.tc_pf)
397 seq_puts(m, "PreferredPageSizeOptimized ");
398 if (tc_info.tc_unified)
399 seq_puts(m, "Unified ");
400 if (tc_info.tc_reduce_tr)
401 seq_puts(m, "TCReduction");
402
403 seq_puts(m, "\n\tSupported page sizes: ");
404
405 bitvector_process(m, tc_pages);
406
407 /* when unified date (j=2) is enough */
408 if (tc_info.tc_unified)
409 break;
410 }
411 }
412 }
413
414 seq_putc(m, '\n');
415 return 0;
416 }
417
418
register_info(struct seq_file * m)419 static int register_info(struct seq_file *m)
420 {
421 u64 reg_info[2];
422 u64 info;
423 unsigned long phys_stacked;
424 pal_hints_u_t hints;
425 unsigned long iregs, dregs;
426 static const char * const info_type[] = {
427 "Implemented AR(s)",
428 "AR(s) with read side-effects",
429 "Implemented CR(s)",
430 "CR(s) with read side-effects",
431 };
432
433 for(info=0; info < 4; info++) {
434 if (ia64_pal_register_info(info, ®_info[0], ®_info[1]) != 0)
435 return 0;
436 seq_printf(m, "%-32s : ", info_type[info]);
437 bitregister_process(m, reg_info, 128);
438 seq_putc(m, '\n');
439 }
440
441 if (ia64_pal_rse_info(&phys_stacked, &hints) == 0)
442 seq_printf(m,
443 "RSE stacked physical registers : %ld\n"
444 "RSE load/store hints : %ld (%s)\n",
445 phys_stacked, hints.ph_data,
446 hints.ph_data < RSE_HINTS_COUNT ? rse_hints[hints.ph_data]: "(??)");
447
448 if (ia64_pal_debug_info(&iregs, &dregs))
449 return 0;
450
451 seq_printf(m,
452 "Instruction debug register pairs : %ld\n"
453 "Data debug register pairs : %ld\n", iregs, dregs);
454
455 return 0;
456 }
457
458 static const char *const proc_features_0[]={ /* Feature set 0 */
459 NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,
460 NULL,NULL,NULL,NULL,NULL,NULL,NULL, NULL,NULL,
461 NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,
462 NULL,NULL,NULL,NULL,NULL, NULL,NULL,NULL,NULL,
463 "Unimplemented instruction address fault",
464 "INIT, PMI, and LINT pins",
465 "Simple unimplemented instr addresses",
466 "Variable P-state performance",
467 "Virtual machine features implemented",
468 "XIP,XPSR,XFS implemented",
469 "XR1-XR3 implemented",
470 "Disable dynamic predicate prediction",
471 "Disable processor physical number",
472 "Disable dynamic data cache prefetch",
473 "Disable dynamic inst cache prefetch",
474 "Disable dynamic branch prediction",
475 NULL, NULL, NULL, NULL,
476 "Disable P-states",
477 "Enable MCA on Data Poisoning",
478 "Enable vmsw instruction",
479 "Enable extern environmental notification",
480 "Disable BINIT on processor time-out",
481 "Disable dynamic power management (DPM)",
482 "Disable coherency",
483 "Disable cache",
484 "Enable CMCI promotion",
485 "Enable MCA to BINIT promotion",
486 "Enable MCA promotion",
487 "Enable BERR promotion"
488 };
489
490 static const char *const proc_features_16[]={ /* Feature set 16 */
491 "Disable ETM",
492 "Enable ETM",
493 "Enable MCA on half-way timer",
494 "Enable snoop WC",
495 NULL,
496 "Enable Fast Deferral",
497 "Disable MCA on memory aliasing",
498 "Enable RSB",
499 NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL,
500 "DP system processor",
501 "Low Voltage",
502 "HT supported",
503 NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL,
504 NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL,
505 NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL,
506 NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL,
507 NULL, NULL, NULL, NULL, NULL
508 };
509
510 static const char *const *const proc_features[]={
511 proc_features_0,
512 NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL,
513 NULL, NULL, NULL, NULL,
514 proc_features_16,
515 NULL, NULL, NULL, NULL,
516 };
517
feature_set_info(struct seq_file * m,u64 avail,u64 status,u64 control,unsigned long set)518 static void feature_set_info(struct seq_file *m, u64 avail, u64 status, u64 control,
519 unsigned long set)
520 {
521 const char *const *vf, *const *v;
522 int i;
523
524 vf = v = proc_features[set];
525 for(i=0; i < 64; i++, avail >>=1, status >>=1, control >>=1) {
526
527 if (!(control)) /* No remaining bits set */
528 break;
529 if (!(avail & 0x1)) /* Print only bits that are available */
530 continue;
531 if (vf)
532 v = vf + i;
533 if ( v && *v ) {
534 seq_printf(m, "%-40s : %s %s\n", *v,
535 avail & 0x1 ? (status & 0x1 ?
536 "On " : "Off"): "",
537 avail & 0x1 ? (control & 0x1 ?
538 "Ctrl" : "NoCtrl"): "");
539 } else {
540 seq_printf(m, "Feature set %2ld bit %2d\t\t\t"
541 " : %s %s\n",
542 set, i,
543 avail & 0x1 ? (status & 0x1 ?
544 "On " : "Off"): "",
545 avail & 0x1 ? (control & 0x1 ?
546 "Ctrl" : "NoCtrl"): "");
547 }
548 }
549 }
550
processor_info(struct seq_file * m)551 static int processor_info(struct seq_file *m)
552 {
553 u64 avail=1, status=1, control=1, feature_set=0;
554 s64 ret;
555
556 do {
557 ret = ia64_pal_proc_get_features(&avail, &status, &control,
558 feature_set);
559 if (ret < 0)
560 return 0;
561
562 if (ret == 1) {
563 feature_set++;
564 continue;
565 }
566
567 feature_set_info(m, avail, status, control, feature_set);
568 feature_set++;
569 } while(1);
570
571 return 0;
572 }
573
574 static const char *const bus_features[]={
575 NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,
576 NULL,NULL,NULL,NULL,NULL,NULL,NULL, NULL,NULL,
577 NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,
578 NULL,NULL,
579 "Request Bus Parking",
580 "Bus Lock Mask",
581 "Enable Half Transfer",
582 NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL,
583 NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL,
584 NULL, NULL, NULL, NULL,
585 "Enable Cache Line Repl. Shared",
586 "Enable Cache Line Repl. Exclusive",
587 "Disable Transaction Queuing",
588 "Disable Response Error Checking",
589 "Disable Bus Error Checking",
590 "Disable Bus Requester Internal Error Signalling",
591 "Disable Bus Requester Error Signalling",
592 "Disable Bus Initialization Event Checking",
593 "Disable Bus Initialization Event Signalling",
594 "Disable Bus Address Error Checking",
595 "Disable Bus Address Error Signalling",
596 "Disable Bus Data Error Checking"
597 };
598
599
bus_info(struct seq_file * m)600 static int bus_info(struct seq_file *m)
601 {
602 const char *const *v = bus_features;
603 pal_bus_features_u_t av, st, ct;
604 u64 avail, status, control;
605 int i;
606 s64 ret;
607
608 if ((ret=ia64_pal_bus_get_features(&av, &st, &ct)) != 0)
609 return 0;
610
611 avail = av.pal_bus_features_val;
612 status = st.pal_bus_features_val;
613 control = ct.pal_bus_features_val;
614
615 for(i=0; i < 64; i++, v++, avail >>=1, status >>=1, control >>=1) {
616 if ( ! *v )
617 continue;
618 seq_printf(m, "%-48s : %s%s %s\n", *v,
619 avail & 0x1 ? "" : "NotImpl",
620 avail & 0x1 ? (status & 0x1 ? "On" : "Off"): "",
621 avail & 0x1 ? (control & 0x1 ? "Ctrl" : "NoCtrl"): "");
622 }
623 return 0;
624 }
625
version_info(struct seq_file * m)626 static int version_info(struct seq_file *m)
627 {
628 pal_version_u_t min_ver, cur_ver;
629
630 if (ia64_pal_version(&min_ver, &cur_ver) != 0)
631 return 0;
632
633 seq_printf(m,
634 "PAL_vendor : 0x%02x (min=0x%02x)\n"
635 "PAL_A : %02x.%02x (min=%02x.%02x)\n"
636 "PAL_B : %02x.%02x (min=%02x.%02x)\n",
637 cur_ver.pal_version_s.pv_pal_vendor,
638 min_ver.pal_version_s.pv_pal_vendor,
639 cur_ver.pal_version_s.pv_pal_a_model,
640 cur_ver.pal_version_s.pv_pal_a_rev,
641 min_ver.pal_version_s.pv_pal_a_model,
642 min_ver.pal_version_s.pv_pal_a_rev,
643 cur_ver.pal_version_s.pv_pal_b_model,
644 cur_ver.pal_version_s.pv_pal_b_rev,
645 min_ver.pal_version_s.pv_pal_b_model,
646 min_ver.pal_version_s.pv_pal_b_rev);
647 return 0;
648 }
649
perfmon_info(struct seq_file * m)650 static int perfmon_info(struct seq_file *m)
651 {
652 u64 pm_buffer[16];
653 pal_perf_mon_info_u_t pm_info;
654
655 if (ia64_pal_perf_mon_info(pm_buffer, &pm_info) != 0)
656 return 0;
657
658 seq_printf(m,
659 "PMC/PMD pairs : %d\n"
660 "Counter width : %d bits\n"
661 "Cycle event number : %d\n"
662 "Retired event number : %d\n"
663 "Implemented PMC : ",
664 pm_info.pal_perf_mon_info_s.generic,
665 pm_info.pal_perf_mon_info_s.width,
666 pm_info.pal_perf_mon_info_s.cycles,
667 pm_info.pal_perf_mon_info_s.retired);
668
669 bitregister_process(m, pm_buffer, 256);
670 seq_puts(m, "\nImplemented PMD : ");
671 bitregister_process(m, pm_buffer+4, 256);
672 seq_puts(m, "\nCycles count capable : ");
673 bitregister_process(m, pm_buffer+8, 256);
674 seq_puts(m, "\nRetired bundles count capable : ");
675
676 #ifdef CONFIG_ITANIUM
677 /*
678 * PAL_PERF_MON_INFO reports that only PMC4 can be used to count CPU_CYCLES
679 * which is wrong, both PMC4 and PMD5 support it.
680 */
681 if (pm_buffer[12] == 0x10)
682 pm_buffer[12]=0x30;
683 #endif
684
685 bitregister_process(m, pm_buffer+12, 256);
686 seq_putc(m, '\n');
687 return 0;
688 }
689
frequency_info(struct seq_file * m)690 static int frequency_info(struct seq_file *m)
691 {
692 struct pal_freq_ratio proc, itc, bus;
693 unsigned long base;
694
695 if (ia64_pal_freq_base(&base) == -1)
696 seq_puts(m, "Output clock : not implemented\n");
697 else
698 seq_printf(m, "Output clock : %ld ticks/s\n", base);
699
700 if (ia64_pal_freq_ratios(&proc, &bus, &itc) != 0) return 0;
701
702 seq_printf(m,
703 "Processor/Clock ratio : %d/%d\n"
704 "Bus/Clock ratio : %d/%d\n"
705 "ITC/Clock ratio : %d/%d\n",
706 proc.num, proc.den, bus.num, bus.den, itc.num, itc.den);
707 return 0;
708 }
709
tr_info(struct seq_file * m)710 static int tr_info(struct seq_file *m)
711 {
712 long status;
713 pal_tr_valid_u_t tr_valid;
714 u64 tr_buffer[4];
715 pal_vm_info_1_u_t vm_info_1;
716 pal_vm_info_2_u_t vm_info_2;
717 unsigned long i, j;
718 unsigned long max[3], pgm;
719 struct ifa_reg {
720 unsigned long valid:1;
721 unsigned long ig:11;
722 unsigned long vpn:52;
723 } *ifa_reg;
724 struct itir_reg {
725 unsigned long rv1:2;
726 unsigned long ps:6;
727 unsigned long key:24;
728 unsigned long rv2:32;
729 } *itir_reg;
730 struct gr_reg {
731 unsigned long p:1;
732 unsigned long rv1:1;
733 unsigned long ma:3;
734 unsigned long a:1;
735 unsigned long d:1;
736 unsigned long pl:2;
737 unsigned long ar:3;
738 unsigned long ppn:38;
739 unsigned long rv2:2;
740 unsigned long ed:1;
741 unsigned long ig:11;
742 } *gr_reg;
743 struct rid_reg {
744 unsigned long ig1:1;
745 unsigned long rv1:1;
746 unsigned long ig2:6;
747 unsigned long rid:24;
748 unsigned long rv2:32;
749 } *rid_reg;
750
751 if ((status = ia64_pal_vm_summary(&vm_info_1, &vm_info_2)) !=0) {
752 printk(KERN_ERR "ia64_pal_vm_summary=%ld\n", status);
753 return 0;
754 }
755 max[0] = vm_info_1.pal_vm_info_1_s.max_itr_entry+1;
756 max[1] = vm_info_1.pal_vm_info_1_s.max_dtr_entry+1;
757
758 for (i=0; i < 2; i++ ) {
759 for (j=0; j < max[i]; j++) {
760
761 status = ia64_pal_tr_read(j, i, tr_buffer, &tr_valid);
762 if (status != 0) {
763 printk(KERN_ERR "palinfo: pal call failed on tr[%lu:%lu]=%ld\n",
764 i, j, status);
765 continue;
766 }
767
768 ifa_reg = (struct ifa_reg *)&tr_buffer[2];
769
770 if (ifa_reg->valid == 0)
771 continue;
772
773 gr_reg = (struct gr_reg *)tr_buffer;
774 itir_reg = (struct itir_reg *)&tr_buffer[1];
775 rid_reg = (struct rid_reg *)&tr_buffer[3];
776
777 pgm = -1 << (itir_reg->ps - 12);
778 seq_printf(m,
779 "%cTR%lu: av=%d pv=%d dv=%d mv=%d\n"
780 "\tppn : 0x%lx\n"
781 "\tvpn : 0x%lx\n"
782 "\tps : ",
783 "ID"[i], j,
784 tr_valid.pal_tr_valid_s.access_rights_valid,
785 tr_valid.pal_tr_valid_s.priv_level_valid,
786 tr_valid.pal_tr_valid_s.dirty_bit_valid,
787 tr_valid.pal_tr_valid_s.mem_attr_valid,
788 (gr_reg->ppn & pgm)<< 12, (ifa_reg->vpn & pgm)<< 12);
789
790 bitvector_process(m, 1<< itir_reg->ps);
791
792 seq_printf(m,
793 "\n\tpl : %d\n"
794 "\tar : %d\n"
795 "\trid : %x\n"
796 "\tp : %d\n"
797 "\tma : %d\n"
798 "\td : %d\n",
799 gr_reg->pl, gr_reg->ar, rid_reg->rid, gr_reg->p, gr_reg->ma,
800 gr_reg->d);
801 }
802 }
803 return 0;
804 }
805
806
807
808 /*
809 * List {name,function} pairs for every entry in /proc/palinfo/cpu*
810 */
811 static const palinfo_entry_t palinfo_entries[]={
812 { "version_info", version_info, },
813 { "vm_info", vm_info, },
814 { "cache_info", cache_info, },
815 { "power_info", power_info, },
816 { "register_info", register_info, },
817 { "processor_info", processor_info, },
818 { "perfmon_info", perfmon_info, },
819 { "frequency_info", frequency_info, },
820 { "bus_info", bus_info },
821 { "tr_info", tr_info, }
822 };
823
824 #define NR_PALINFO_ENTRIES (int) ARRAY_SIZE(palinfo_entries)
825
826 static struct proc_dir_entry *palinfo_dir;
827
828 /*
829 * This data structure is used to pass which cpu,function is being requested
830 * It must fit in a 64bit quantity to be passed to the proc callback routine
831 *
832 * In SMP mode, when we get a request for another CPU, we must call that
833 * other CPU using IPI and wait for the result before returning.
834 */
835 typedef union {
836 u64 value;
837 struct {
838 unsigned req_cpu: 32; /* for which CPU this info is */
839 unsigned func_id: 32; /* which function is requested */
840 } pal_func_cpu;
841 } pal_func_cpu_u_t;
842
843 #define req_cpu pal_func_cpu.req_cpu
844 #define func_id pal_func_cpu.func_id
845
846 #ifdef CONFIG_SMP
847
848 /*
849 * used to hold information about final function to call
850 */
851 typedef struct {
852 palinfo_func_t func; /* pointer to function to call */
853 struct seq_file *m; /* buffer to store results */
854 int ret; /* return value from call */
855 } palinfo_smp_data_t;
856
857
858 /*
859 * this function does the actual final call and he called
860 * from the smp code, i.e., this is the palinfo callback routine
861 */
862 static void
palinfo_smp_call(void * info)863 palinfo_smp_call(void *info)
864 {
865 palinfo_smp_data_t *data = (palinfo_smp_data_t *)info;
866 data->ret = (*data->func)(data->m);
867 }
868
869 /*
870 * function called to trigger the IPI, we need to access a remote CPU
871 * Return:
872 * 0 : error or nothing to output
873 * otherwise how many bytes in the "page" buffer were written
874 */
875 static
palinfo_handle_smp(struct seq_file * m,pal_func_cpu_u_t * f)876 int palinfo_handle_smp(struct seq_file *m, pal_func_cpu_u_t *f)
877 {
878 palinfo_smp_data_t ptr;
879 int ret;
880
881 ptr.func = palinfo_entries[f->func_id].proc_read;
882 ptr.m = m;
883 ptr.ret = 0; /* just in case */
884
885
886 /* will send IPI to other CPU and wait for completion of remote call */
887 if ((ret=smp_call_function_single(f->req_cpu, palinfo_smp_call, &ptr, 1))) {
888 printk(KERN_ERR "palinfo: remote CPU call from %d to %d on function %d: "
889 "error %d\n", smp_processor_id(), f->req_cpu, f->func_id, ret);
890 return 0;
891 }
892 return ptr.ret;
893 }
894 #else /* ! CONFIG_SMP */
895 static
palinfo_handle_smp(struct seq_file * m,pal_func_cpu_u_t * f)896 int palinfo_handle_smp(struct seq_file *m, pal_func_cpu_u_t *f)
897 {
898 printk(KERN_ERR "palinfo: should not be called with non SMP kernel\n");
899 return 0;
900 }
901 #endif /* CONFIG_SMP */
902
903 /*
904 * Entry point routine: all calls go through this function
905 */
proc_palinfo_show(struct seq_file * m,void * v)906 static int proc_palinfo_show(struct seq_file *m, void *v)
907 {
908 pal_func_cpu_u_t *f = (pal_func_cpu_u_t *)&m->private;
909
910 /*
911 * in SMP mode, we may need to call another CPU to get correct
912 * information. PAL, by definition, is processor specific
913 */
914 if (f->req_cpu == get_cpu())
915 (*palinfo_entries[f->func_id].proc_read)(m);
916 else
917 palinfo_handle_smp(m, f);
918
919 put_cpu();
920 return 0;
921 }
922
palinfo_add_proc(unsigned int cpu)923 static int palinfo_add_proc(unsigned int cpu)
924 {
925 pal_func_cpu_u_t f;
926 struct proc_dir_entry *cpu_dir;
927 int j;
928 char cpustr[3+4+1]; /* cpu numbers are up to 4095 on itanic */
929 sprintf(cpustr, "cpu%d", cpu);
930
931 cpu_dir = proc_mkdir(cpustr, palinfo_dir);
932 if (!cpu_dir)
933 return -EINVAL;
934
935 f.req_cpu = cpu;
936
937 for (j=0; j < NR_PALINFO_ENTRIES; j++) {
938 f.func_id = j;
939 proc_create_single_data(palinfo_entries[j].name, 0, cpu_dir,
940 proc_palinfo_show, (void *)f.value);
941 }
942 return 0;
943 }
944
palinfo_del_proc(unsigned int hcpu)945 static int palinfo_del_proc(unsigned int hcpu)
946 {
947 char cpustr[3+4+1]; /* cpu numbers are up to 4095 on itanic */
948
949 sprintf(cpustr, "cpu%d", hcpu);
950 remove_proc_subtree(cpustr, palinfo_dir);
951 return 0;
952 }
953
954 static enum cpuhp_state hp_online;
955
palinfo_init(void)956 static int __init palinfo_init(void)
957 {
958 int i = 0;
959
960 printk(KERN_INFO "PAL Information Facility v%s\n", PALINFO_VERSION);
961 palinfo_dir = proc_mkdir("pal", NULL);
962 if (!palinfo_dir)
963 return -ENOMEM;
964
965 i = cpuhp_setup_state(CPUHP_AP_ONLINE_DYN, "ia64/palinfo:online",
966 palinfo_add_proc, palinfo_del_proc);
967 if (i < 0) {
968 remove_proc_subtree("pal", NULL);
969 return i;
970 }
971 hp_online = i;
972 return 0;
973 }
974
palinfo_exit(void)975 static void __exit palinfo_exit(void)
976 {
977 cpuhp_remove_state(hp_online);
978 remove_proc_subtree("pal", NULL);
979 }
980
981 module_init(palinfo_init);
982 module_exit(palinfo_exit);
983