1 /*
2 * Copyright (c) 2012 Intel Corporation. All rights reserved.
3 * Copyright (c) 2006 - 2012 QLogic Corporation. All rights reserved.
4 * Copyright (c) 2006 PathScale, Inc. All rights reserved.
5 *
6 * This software is available to you under a choice of one of two
7 * licenses. You may choose to be licensed under the terms of the GNU
8 * General Public License (GPL) Version 2, available from the file
9 * COPYING in the main directory of this source tree, or the
10 * OpenIB.org BSD license below:
11 *
12 * Redistribution and use in source and binary forms, with or
13 * without modification, are permitted provided that the following
14 * conditions are met:
15 *
16 * - Redistributions of source code must retain the above
17 * copyright notice, this list of conditions and the following
18 * disclaimer.
19 *
20 * - Redistributions in binary form must reproduce the above
21 * copyright notice, this list of conditions and the following
22 * disclaimer in the documentation and/or other materials
23 * provided with the distribution.
24 *
25 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
26 * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
27 * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
28 * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
29 * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
30 * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
31 * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
32 * SOFTWARE.
33 */
34 #include <linux/ctype.h>
35
36 #include "qib.h"
37 #include "qib_mad.h"
38
39 /* start of per-port functions */
40 /*
41 * Get/Set heartbeat enable. OR of 1=enabled, 2=auto
42 */
show_hrtbt_enb(struct qib_pportdata * ppd,char * buf)43 static ssize_t show_hrtbt_enb(struct qib_pportdata *ppd, char *buf)
44 {
45 struct qib_devdata *dd = ppd->dd;
46 int ret;
47
48 ret = dd->f_get_ib_cfg(ppd, QIB_IB_CFG_HRTBT);
49 ret = scnprintf(buf, PAGE_SIZE, "%d\n", ret);
50 return ret;
51 }
52
store_hrtbt_enb(struct qib_pportdata * ppd,const char * buf,size_t count)53 static ssize_t store_hrtbt_enb(struct qib_pportdata *ppd, const char *buf,
54 size_t count)
55 {
56 struct qib_devdata *dd = ppd->dd;
57 int ret;
58 u16 val;
59
60 ret = kstrtou16(buf, 0, &val);
61 if (ret) {
62 qib_dev_err(dd, "attempt to set invalid Heartbeat enable\n");
63 return ret;
64 }
65
66 /*
67 * Set the "intentional" heartbeat enable per either of
68 * "Enable" and "Auto", as these are normally set together.
69 * This bit is consulted when leaving loopback mode,
70 * because entering loopback mode overrides it and automatically
71 * disables heartbeat.
72 */
73 ret = dd->f_set_ib_cfg(ppd, QIB_IB_CFG_HRTBT, val);
74 return ret < 0 ? ret : count;
75 }
76
store_loopback(struct qib_pportdata * ppd,const char * buf,size_t count)77 static ssize_t store_loopback(struct qib_pportdata *ppd, const char *buf,
78 size_t count)
79 {
80 struct qib_devdata *dd = ppd->dd;
81 int ret = count, r;
82
83 r = dd->f_set_ib_loopback(ppd, buf);
84 if (r < 0)
85 ret = r;
86
87 return ret;
88 }
89
store_led_override(struct qib_pportdata * ppd,const char * buf,size_t count)90 static ssize_t store_led_override(struct qib_pportdata *ppd, const char *buf,
91 size_t count)
92 {
93 struct qib_devdata *dd = ppd->dd;
94 int ret;
95 u16 val;
96
97 ret = kstrtou16(buf, 0, &val);
98 if (ret) {
99 qib_dev_err(dd, "attempt to set invalid LED override\n");
100 return ret;
101 }
102
103 qib_set_led_override(ppd, val);
104 return count;
105 }
106
show_status(struct qib_pportdata * ppd,char * buf)107 static ssize_t show_status(struct qib_pportdata *ppd, char *buf)
108 {
109 ssize_t ret;
110
111 if (!ppd->statusp)
112 ret = -EINVAL;
113 else
114 ret = scnprintf(buf, PAGE_SIZE, "0x%llx\n",
115 (unsigned long long) *(ppd->statusp));
116 return ret;
117 }
118
119 /*
120 * For userland compatibility, these offsets must remain fixed.
121 * They are strings for QIB_STATUS_*
122 */
123 static const char * const qib_status_str[] = {
124 "Initted",
125 "",
126 "",
127 "",
128 "",
129 "Present",
130 "IB_link_up",
131 "IB_configured",
132 "",
133 "Fatal_Hardware_Error",
134 NULL,
135 };
136
show_status_str(struct qib_pportdata * ppd,char * buf)137 static ssize_t show_status_str(struct qib_pportdata *ppd, char *buf)
138 {
139 int i, any;
140 u64 s;
141 ssize_t ret;
142
143 if (!ppd->statusp) {
144 ret = -EINVAL;
145 goto bail;
146 }
147
148 s = *(ppd->statusp);
149 *buf = '\0';
150 for (any = i = 0; s && qib_status_str[i]; i++) {
151 if (s & 1) {
152 /* if overflow */
153 if (any && strlcat(buf, " ", PAGE_SIZE) >= PAGE_SIZE)
154 break;
155 if (strlcat(buf, qib_status_str[i], PAGE_SIZE) >=
156 PAGE_SIZE)
157 break;
158 any = 1;
159 }
160 s >>= 1;
161 }
162 if (any)
163 strlcat(buf, "\n", PAGE_SIZE);
164
165 ret = strlen(buf);
166
167 bail:
168 return ret;
169 }
170
171 /* end of per-port functions */
172
173 /*
174 * Start of per-port file structures and support code
175 * Because we are fitting into other infrastructure, we have to supply the
176 * full set of kobject/sysfs_ops structures and routines.
177 */
178 #define QIB_PORT_ATTR(name, mode, show, store) \
179 static struct qib_port_attr qib_port_attr_##name = \
180 __ATTR(name, mode, show, store)
181
182 struct qib_port_attr {
183 struct attribute attr;
184 ssize_t (*show)(struct qib_pportdata *, char *);
185 ssize_t (*store)(struct qib_pportdata *, const char *, size_t);
186 };
187
188 QIB_PORT_ATTR(loopback, S_IWUSR, NULL, store_loopback);
189 QIB_PORT_ATTR(led_override, S_IWUSR, NULL, store_led_override);
190 QIB_PORT_ATTR(hrtbt_enable, S_IWUSR | S_IRUGO, show_hrtbt_enb,
191 store_hrtbt_enb);
192 QIB_PORT_ATTR(status, S_IRUGO, show_status, NULL);
193 QIB_PORT_ATTR(status_str, S_IRUGO, show_status_str, NULL);
194
195 static struct attribute *port_default_attributes[] = {
196 &qib_port_attr_loopback.attr,
197 &qib_port_attr_led_override.attr,
198 &qib_port_attr_hrtbt_enable.attr,
199 &qib_port_attr_status.attr,
200 &qib_port_attr_status_str.attr,
201 NULL
202 };
203
204 /*
205 * Start of per-port congestion control structures and support code
206 */
207
208 /*
209 * Congestion control table size followed by table entries
210 */
read_cc_table_bin(struct file * filp,struct kobject * kobj,struct bin_attribute * bin_attr,char * buf,loff_t pos,size_t count)211 static ssize_t read_cc_table_bin(struct file *filp, struct kobject *kobj,
212 struct bin_attribute *bin_attr,
213 char *buf, loff_t pos, size_t count)
214 {
215 int ret;
216 struct qib_pportdata *ppd =
217 container_of(kobj, struct qib_pportdata, pport_cc_kobj);
218
219 if (!qib_cc_table_size || !ppd->ccti_entries_shadow)
220 return -EINVAL;
221
222 ret = ppd->total_cct_entry * sizeof(struct ib_cc_table_entry_shadow)
223 + sizeof(__be16);
224
225 if (pos > ret)
226 return -EINVAL;
227
228 if (count > ret - pos)
229 count = ret - pos;
230
231 if (!count)
232 return count;
233
234 spin_lock(&ppd->cc_shadow_lock);
235 memcpy(buf, ppd->ccti_entries_shadow, count);
236 spin_unlock(&ppd->cc_shadow_lock);
237
238 return count;
239 }
240
qib_port_release(struct kobject * kobj)241 static void qib_port_release(struct kobject *kobj)
242 {
243 /* nothing to do since memory is freed by qib_free_devdata() */
244 }
245
246 static struct kobj_type qib_port_cc_ktype = {
247 .release = qib_port_release,
248 };
249
250 static const struct bin_attribute cc_table_bin_attr = {
251 .attr = {.name = "cc_table_bin", .mode = 0444},
252 .read = read_cc_table_bin,
253 .size = PAGE_SIZE,
254 };
255
256 /*
257 * Congestion settings: port control, control map and an array of 16
258 * entries for the congestion entries - increase, timer, event log
259 * trigger threshold and the minimum injection rate delay.
260 */
read_cc_setting_bin(struct file * filp,struct kobject * kobj,struct bin_attribute * bin_attr,char * buf,loff_t pos,size_t count)261 static ssize_t read_cc_setting_bin(struct file *filp, struct kobject *kobj,
262 struct bin_attribute *bin_attr,
263 char *buf, loff_t pos, size_t count)
264 {
265 int ret;
266 struct qib_pportdata *ppd =
267 container_of(kobj, struct qib_pportdata, pport_cc_kobj);
268
269 if (!qib_cc_table_size || !ppd->congestion_entries_shadow)
270 return -EINVAL;
271
272 ret = sizeof(struct ib_cc_congestion_setting_attr_shadow);
273
274 if (pos > ret)
275 return -EINVAL;
276 if (count > ret - pos)
277 count = ret - pos;
278
279 if (!count)
280 return count;
281
282 spin_lock(&ppd->cc_shadow_lock);
283 memcpy(buf, ppd->congestion_entries_shadow, count);
284 spin_unlock(&ppd->cc_shadow_lock);
285
286 return count;
287 }
288
289 static const struct bin_attribute cc_setting_bin_attr = {
290 .attr = {.name = "cc_settings_bin", .mode = 0444},
291 .read = read_cc_setting_bin,
292 .size = PAGE_SIZE,
293 };
294
295
qib_portattr_show(struct kobject * kobj,struct attribute * attr,char * buf)296 static ssize_t qib_portattr_show(struct kobject *kobj,
297 struct attribute *attr, char *buf)
298 {
299 struct qib_port_attr *pattr =
300 container_of(attr, struct qib_port_attr, attr);
301 struct qib_pportdata *ppd =
302 container_of(kobj, struct qib_pportdata, pport_kobj);
303
304 return pattr->show(ppd, buf);
305 }
306
qib_portattr_store(struct kobject * kobj,struct attribute * attr,const char * buf,size_t len)307 static ssize_t qib_portattr_store(struct kobject *kobj,
308 struct attribute *attr, const char *buf, size_t len)
309 {
310 struct qib_port_attr *pattr =
311 container_of(attr, struct qib_port_attr, attr);
312 struct qib_pportdata *ppd =
313 container_of(kobj, struct qib_pportdata, pport_kobj);
314
315 return pattr->store(ppd, buf, len);
316 }
317
318
319 static const struct sysfs_ops qib_port_ops = {
320 .show = qib_portattr_show,
321 .store = qib_portattr_store,
322 };
323
324 static struct kobj_type qib_port_ktype = {
325 .release = qib_port_release,
326 .sysfs_ops = &qib_port_ops,
327 .default_attrs = port_default_attributes
328 };
329
330 /* Start sl2vl */
331
332 #define QIB_SL2VL_ATTR(N) \
333 static struct qib_sl2vl_attr qib_sl2vl_attr_##N = { \
334 .attr = { .name = __stringify(N), .mode = 0444 }, \
335 .sl = N \
336 }
337
338 struct qib_sl2vl_attr {
339 struct attribute attr;
340 int sl;
341 };
342
343 QIB_SL2VL_ATTR(0);
344 QIB_SL2VL_ATTR(1);
345 QIB_SL2VL_ATTR(2);
346 QIB_SL2VL_ATTR(3);
347 QIB_SL2VL_ATTR(4);
348 QIB_SL2VL_ATTR(5);
349 QIB_SL2VL_ATTR(6);
350 QIB_SL2VL_ATTR(7);
351 QIB_SL2VL_ATTR(8);
352 QIB_SL2VL_ATTR(9);
353 QIB_SL2VL_ATTR(10);
354 QIB_SL2VL_ATTR(11);
355 QIB_SL2VL_ATTR(12);
356 QIB_SL2VL_ATTR(13);
357 QIB_SL2VL_ATTR(14);
358 QIB_SL2VL_ATTR(15);
359
360 static struct attribute *sl2vl_default_attributes[] = {
361 &qib_sl2vl_attr_0.attr,
362 &qib_sl2vl_attr_1.attr,
363 &qib_sl2vl_attr_2.attr,
364 &qib_sl2vl_attr_3.attr,
365 &qib_sl2vl_attr_4.attr,
366 &qib_sl2vl_attr_5.attr,
367 &qib_sl2vl_attr_6.attr,
368 &qib_sl2vl_attr_7.attr,
369 &qib_sl2vl_attr_8.attr,
370 &qib_sl2vl_attr_9.attr,
371 &qib_sl2vl_attr_10.attr,
372 &qib_sl2vl_attr_11.attr,
373 &qib_sl2vl_attr_12.attr,
374 &qib_sl2vl_attr_13.attr,
375 &qib_sl2vl_attr_14.attr,
376 &qib_sl2vl_attr_15.attr,
377 NULL
378 };
379
sl2vl_attr_show(struct kobject * kobj,struct attribute * attr,char * buf)380 static ssize_t sl2vl_attr_show(struct kobject *kobj, struct attribute *attr,
381 char *buf)
382 {
383 struct qib_sl2vl_attr *sattr =
384 container_of(attr, struct qib_sl2vl_attr, attr);
385 struct qib_pportdata *ppd =
386 container_of(kobj, struct qib_pportdata, sl2vl_kobj);
387 struct qib_ibport *qibp = &ppd->ibport_data;
388
389 return sprintf(buf, "%u\n", qibp->sl_to_vl[sattr->sl]);
390 }
391
392 static const struct sysfs_ops qib_sl2vl_ops = {
393 .show = sl2vl_attr_show,
394 };
395
396 static struct kobj_type qib_sl2vl_ktype = {
397 .release = qib_port_release,
398 .sysfs_ops = &qib_sl2vl_ops,
399 .default_attrs = sl2vl_default_attributes
400 };
401
402 /* End sl2vl */
403
404 /* Start diag_counters */
405
406 #define QIB_DIAGC_ATTR(N) \
407 static struct qib_diagc_attr qib_diagc_attr_##N = { \
408 .attr = { .name = __stringify(N), .mode = 0664 }, \
409 .counter = offsetof(struct qib_ibport, rvp.n_##N) \
410 }
411
412 #define QIB_DIAGC_ATTR_PER_CPU(N) \
413 static struct qib_diagc_attr qib_diagc_attr_##N = { \
414 .attr = { .name = __stringify(N), .mode = 0664 }, \
415 .counter = offsetof(struct qib_ibport, rvp.z_##N) \
416 }
417
418 struct qib_diagc_attr {
419 struct attribute attr;
420 size_t counter;
421 };
422
423 QIB_DIAGC_ATTR_PER_CPU(rc_acks);
424 QIB_DIAGC_ATTR_PER_CPU(rc_qacks);
425 QIB_DIAGC_ATTR_PER_CPU(rc_delayed_comp);
426
427 QIB_DIAGC_ATTR(rc_resends);
428 QIB_DIAGC_ATTR(seq_naks);
429 QIB_DIAGC_ATTR(rdma_seq);
430 QIB_DIAGC_ATTR(rnr_naks);
431 QIB_DIAGC_ATTR(other_naks);
432 QIB_DIAGC_ATTR(rc_timeouts);
433 QIB_DIAGC_ATTR(loop_pkts);
434 QIB_DIAGC_ATTR(pkt_drops);
435 QIB_DIAGC_ATTR(dmawait);
436 QIB_DIAGC_ATTR(unaligned);
437 QIB_DIAGC_ATTR(rc_dupreq);
438 QIB_DIAGC_ATTR(rc_seqnak);
439 QIB_DIAGC_ATTR(rc_crwaits);
440
441 static struct attribute *diagc_default_attributes[] = {
442 &qib_diagc_attr_rc_resends.attr,
443 &qib_diagc_attr_rc_acks.attr,
444 &qib_diagc_attr_rc_qacks.attr,
445 &qib_diagc_attr_rc_delayed_comp.attr,
446 &qib_diagc_attr_seq_naks.attr,
447 &qib_diagc_attr_rdma_seq.attr,
448 &qib_diagc_attr_rnr_naks.attr,
449 &qib_diagc_attr_other_naks.attr,
450 &qib_diagc_attr_rc_timeouts.attr,
451 &qib_diagc_attr_loop_pkts.attr,
452 &qib_diagc_attr_pkt_drops.attr,
453 &qib_diagc_attr_dmawait.attr,
454 &qib_diagc_attr_unaligned.attr,
455 &qib_diagc_attr_rc_dupreq.attr,
456 &qib_diagc_attr_rc_seqnak.attr,
457 &qib_diagc_attr_rc_crwaits.attr,
458 NULL
459 };
460
get_all_cpu_total(u64 __percpu * cntr)461 static u64 get_all_cpu_total(u64 __percpu *cntr)
462 {
463 int cpu;
464 u64 counter = 0;
465
466 for_each_possible_cpu(cpu)
467 counter += *per_cpu_ptr(cntr, cpu);
468 return counter;
469 }
470
471 #define def_write_per_cpu(cntr) \
472 static void write_per_cpu_##cntr(struct qib_pportdata *ppd, u32 data) \
473 { \
474 struct qib_devdata *dd = ppd->dd; \
475 struct qib_ibport *qibp = &ppd->ibport_data; \
476 /* A write can only zero the counter */ \
477 if (data == 0) \
478 qibp->rvp.z_##cntr = get_all_cpu_total(qibp->rvp.cntr); \
479 else \
480 qib_dev_err(dd, "Per CPU cntrs can only be zeroed"); \
481 }
482
483 def_write_per_cpu(rc_acks)
def_write_per_cpu(rc_qacks)484 def_write_per_cpu(rc_qacks)
485 def_write_per_cpu(rc_delayed_comp)
486
487 #define READ_PER_CPU_CNTR(cntr) (get_all_cpu_total(qibp->rvp.cntr) - \
488 qibp->rvp.z_##cntr)
489
490 static ssize_t diagc_attr_show(struct kobject *kobj, struct attribute *attr,
491 char *buf)
492 {
493 struct qib_diagc_attr *dattr =
494 container_of(attr, struct qib_diagc_attr, attr);
495 struct qib_pportdata *ppd =
496 container_of(kobj, struct qib_pportdata, diagc_kobj);
497 struct qib_ibport *qibp = &ppd->ibport_data;
498
499 if (!strncmp(dattr->attr.name, "rc_acks", 7))
500 return sprintf(buf, "%llu\n", READ_PER_CPU_CNTR(rc_acks));
501 else if (!strncmp(dattr->attr.name, "rc_qacks", 8))
502 return sprintf(buf, "%llu\n", READ_PER_CPU_CNTR(rc_qacks));
503 else if (!strncmp(dattr->attr.name, "rc_delayed_comp", 15))
504 return sprintf(buf, "%llu\n",
505 READ_PER_CPU_CNTR(rc_delayed_comp));
506 else
507 return sprintf(buf, "%u\n",
508 *(u32 *)((char *)qibp + dattr->counter));
509 }
510
diagc_attr_store(struct kobject * kobj,struct attribute * attr,const char * buf,size_t size)511 static ssize_t diagc_attr_store(struct kobject *kobj, struct attribute *attr,
512 const char *buf, size_t size)
513 {
514 struct qib_diagc_attr *dattr =
515 container_of(attr, struct qib_diagc_attr, attr);
516 struct qib_pportdata *ppd =
517 container_of(kobj, struct qib_pportdata, diagc_kobj);
518 struct qib_ibport *qibp = &ppd->ibport_data;
519 u32 val;
520 int ret;
521
522 ret = kstrtou32(buf, 0, &val);
523 if (ret)
524 return ret;
525
526 if (!strncmp(dattr->attr.name, "rc_acks", 7))
527 write_per_cpu_rc_acks(ppd, val);
528 else if (!strncmp(dattr->attr.name, "rc_qacks", 8))
529 write_per_cpu_rc_qacks(ppd, val);
530 else if (!strncmp(dattr->attr.name, "rc_delayed_comp", 15))
531 write_per_cpu_rc_delayed_comp(ppd, val);
532 else
533 *(u32 *)((char *)qibp + dattr->counter) = val;
534 return size;
535 }
536
537 static const struct sysfs_ops qib_diagc_ops = {
538 .show = diagc_attr_show,
539 .store = diagc_attr_store,
540 };
541
542 static struct kobj_type qib_diagc_ktype = {
543 .release = qib_port_release,
544 .sysfs_ops = &qib_diagc_ops,
545 .default_attrs = diagc_default_attributes
546 };
547
548 /* End diag_counters */
549
550 /* end of per-port file structures and support code */
551
552 /*
553 * Start of per-unit (or driver, in some cases, but replicated
554 * per unit) functions (these get a device *)
555 */
hw_rev_show(struct device * device,struct device_attribute * attr,char * buf)556 static ssize_t hw_rev_show(struct device *device, struct device_attribute *attr,
557 char *buf)
558 {
559 struct qib_ibdev *dev =
560 rdma_device_to_drv_device(device, struct qib_ibdev, rdi.ibdev);
561
562 return sprintf(buf, "%x\n", dd_from_dev(dev)->minrev);
563 }
564 static DEVICE_ATTR_RO(hw_rev);
565
hca_type_show(struct device * device,struct device_attribute * attr,char * buf)566 static ssize_t hca_type_show(struct device *device,
567 struct device_attribute *attr, char *buf)
568 {
569 struct qib_ibdev *dev =
570 rdma_device_to_drv_device(device, struct qib_ibdev, rdi.ibdev);
571 struct qib_devdata *dd = dd_from_dev(dev);
572 int ret;
573
574 if (!dd->boardname)
575 ret = -EINVAL;
576 else
577 ret = scnprintf(buf, PAGE_SIZE, "%s\n", dd->boardname);
578 return ret;
579 }
580 static DEVICE_ATTR_RO(hca_type);
581 static DEVICE_ATTR(board_id, 0444, hca_type_show, NULL);
582
version_show(struct device * device,struct device_attribute * attr,char * buf)583 static ssize_t version_show(struct device *device,
584 struct device_attribute *attr, char *buf)
585 {
586 /* The string printed here is already newline-terminated. */
587 return scnprintf(buf, PAGE_SIZE, "%s", (char *)ib_qib_version);
588 }
589 static DEVICE_ATTR_RO(version);
590
boardversion_show(struct device * device,struct device_attribute * attr,char * buf)591 static ssize_t boardversion_show(struct device *device,
592 struct device_attribute *attr, char *buf)
593 {
594 struct qib_ibdev *dev =
595 rdma_device_to_drv_device(device, struct qib_ibdev, rdi.ibdev);
596 struct qib_devdata *dd = dd_from_dev(dev);
597
598 /* The string printed here is already newline-terminated. */
599 return scnprintf(buf, PAGE_SIZE, "%s", dd->boardversion);
600 }
601 static DEVICE_ATTR_RO(boardversion);
602
localbus_info_show(struct device * device,struct device_attribute * attr,char * buf)603 static ssize_t localbus_info_show(struct device *device,
604 struct device_attribute *attr, char *buf)
605 {
606 struct qib_ibdev *dev =
607 rdma_device_to_drv_device(device, struct qib_ibdev, rdi.ibdev);
608 struct qib_devdata *dd = dd_from_dev(dev);
609
610 /* The string printed here is already newline-terminated. */
611 return scnprintf(buf, PAGE_SIZE, "%s", dd->lbus_info);
612 }
613 static DEVICE_ATTR_RO(localbus_info);
614
nctxts_show(struct device * device,struct device_attribute * attr,char * buf)615 static ssize_t nctxts_show(struct device *device,
616 struct device_attribute *attr, char *buf)
617 {
618 struct qib_ibdev *dev =
619 rdma_device_to_drv_device(device, struct qib_ibdev, rdi.ibdev);
620 struct qib_devdata *dd = dd_from_dev(dev);
621
622 /* Return the number of user ports (contexts) available. */
623 /* The calculation below deals with a special case where
624 * cfgctxts is set to 1 on a single-port board. */
625 return scnprintf(buf, PAGE_SIZE, "%u\n",
626 (dd->first_user_ctxt > dd->cfgctxts) ? 0 :
627 (dd->cfgctxts - dd->first_user_ctxt));
628 }
629 static DEVICE_ATTR_RO(nctxts);
630
nfreectxts_show(struct device * device,struct device_attribute * attr,char * buf)631 static ssize_t nfreectxts_show(struct device *device,
632 struct device_attribute *attr, char *buf)
633 {
634 struct qib_ibdev *dev =
635 rdma_device_to_drv_device(device, struct qib_ibdev, rdi.ibdev);
636 struct qib_devdata *dd = dd_from_dev(dev);
637
638 /* Return the number of free user ports (contexts) available. */
639 return scnprintf(buf, PAGE_SIZE, "%u\n", dd->freectxts);
640 }
641 static DEVICE_ATTR_RO(nfreectxts);
642
serial_show(struct device * device,struct device_attribute * attr,char * buf)643 static ssize_t serial_show(struct device *device,
644 struct device_attribute *attr, char *buf)
645 {
646 struct qib_ibdev *dev =
647 rdma_device_to_drv_device(device, struct qib_ibdev, rdi.ibdev);
648 struct qib_devdata *dd = dd_from_dev(dev);
649
650 buf[sizeof(dd->serial)] = '\0';
651 memcpy(buf, dd->serial, sizeof(dd->serial));
652 strcat(buf, "\n");
653 return strlen(buf);
654 }
655 static DEVICE_ATTR_RO(serial);
656
chip_reset_store(struct device * device,struct device_attribute * attr,const char * buf,size_t count)657 static ssize_t chip_reset_store(struct device *device,
658 struct device_attribute *attr, const char *buf,
659 size_t count)
660 {
661 struct qib_ibdev *dev =
662 rdma_device_to_drv_device(device, struct qib_ibdev, rdi.ibdev);
663 struct qib_devdata *dd = dd_from_dev(dev);
664 int ret;
665
666 if (count < 5 || memcmp(buf, "reset", 5) || !dd->diag_client) {
667 ret = -EINVAL;
668 goto bail;
669 }
670
671 ret = qib_reset_device(dd->unit);
672 bail:
673 return ret < 0 ? ret : count;
674 }
675 static DEVICE_ATTR_WO(chip_reset);
676
677 /*
678 * Dump tempsense regs. in decimal, to ease shell-scripts.
679 */
tempsense_show(struct device * device,struct device_attribute * attr,char * buf)680 static ssize_t tempsense_show(struct device *device,
681 struct device_attribute *attr, char *buf)
682 {
683 struct qib_ibdev *dev =
684 rdma_device_to_drv_device(device, struct qib_ibdev, rdi.ibdev);
685 struct qib_devdata *dd = dd_from_dev(dev);
686 int ret;
687 int idx;
688 u8 regvals[8];
689
690 ret = -ENXIO;
691 for (idx = 0; idx < 8; ++idx) {
692 if (idx == 6)
693 continue;
694 ret = dd->f_tempsense_rd(dd, idx);
695 if (ret < 0)
696 break;
697 regvals[idx] = ret;
698 }
699 if (idx == 8)
700 ret = scnprintf(buf, PAGE_SIZE, "%d %d %02X %02X %d %d\n",
701 *(signed char *)(regvals),
702 *(signed char *)(regvals + 1),
703 regvals[2], regvals[3],
704 *(signed char *)(regvals + 5),
705 *(signed char *)(regvals + 7));
706 return ret;
707 }
708 static DEVICE_ATTR_RO(tempsense);
709
710 /*
711 * end of per-unit (or driver, in some cases, but replicated
712 * per unit) functions
713 */
714
715 /* start of per-unit file structures and support code */
716 static struct attribute *qib_attributes[] = {
717 &dev_attr_hw_rev.attr,
718 &dev_attr_hca_type.attr,
719 &dev_attr_board_id.attr,
720 &dev_attr_version.attr,
721 &dev_attr_nctxts.attr,
722 &dev_attr_nfreectxts.attr,
723 &dev_attr_serial.attr,
724 &dev_attr_boardversion.attr,
725 &dev_attr_tempsense.attr,
726 &dev_attr_localbus_info.attr,
727 &dev_attr_chip_reset.attr,
728 NULL,
729 };
730
731 const struct attribute_group qib_attr_group = {
732 .attrs = qib_attributes,
733 };
734
qib_create_port_files(struct ib_device * ibdev,u8 port_num,struct kobject * kobj)735 int qib_create_port_files(struct ib_device *ibdev, u8 port_num,
736 struct kobject *kobj)
737 {
738 struct qib_pportdata *ppd;
739 struct qib_devdata *dd = dd_from_ibdev(ibdev);
740 int ret;
741
742 if (!port_num || port_num > dd->num_pports) {
743 qib_dev_err(dd,
744 "Skipping infiniband class with invalid port %u\n",
745 port_num);
746 ret = -ENODEV;
747 goto bail;
748 }
749 ppd = &dd->pport[port_num - 1];
750
751 ret = kobject_init_and_add(&ppd->pport_kobj, &qib_port_ktype, kobj,
752 "linkcontrol");
753 if (ret) {
754 qib_dev_err(dd,
755 "Skipping linkcontrol sysfs info, (err %d) port %u\n",
756 ret, port_num);
757 goto bail;
758 }
759 kobject_uevent(&ppd->pport_kobj, KOBJ_ADD);
760
761 ret = kobject_init_and_add(&ppd->sl2vl_kobj, &qib_sl2vl_ktype, kobj,
762 "sl2vl");
763 if (ret) {
764 qib_dev_err(dd,
765 "Skipping sl2vl sysfs info, (err %d) port %u\n",
766 ret, port_num);
767 goto bail_link;
768 }
769 kobject_uevent(&ppd->sl2vl_kobj, KOBJ_ADD);
770
771 ret = kobject_init_and_add(&ppd->diagc_kobj, &qib_diagc_ktype, kobj,
772 "diag_counters");
773 if (ret) {
774 qib_dev_err(dd,
775 "Skipping diag_counters sysfs info, (err %d) port %u\n",
776 ret, port_num);
777 goto bail_sl;
778 }
779 kobject_uevent(&ppd->diagc_kobj, KOBJ_ADD);
780
781 if (!qib_cc_table_size || !ppd->congestion_entries_shadow)
782 return 0;
783
784 ret = kobject_init_and_add(&ppd->pport_cc_kobj, &qib_port_cc_ktype,
785 kobj, "CCMgtA");
786 if (ret) {
787 qib_dev_err(dd,
788 "Skipping Congestion Control sysfs info, (err %d) port %u\n",
789 ret, port_num);
790 goto bail_diagc;
791 }
792
793 kobject_uevent(&ppd->pport_cc_kobj, KOBJ_ADD);
794
795 ret = sysfs_create_bin_file(&ppd->pport_cc_kobj,
796 &cc_setting_bin_attr);
797 if (ret) {
798 qib_dev_err(dd,
799 "Skipping Congestion Control setting sysfs info, (err %d) port %u\n",
800 ret, port_num);
801 goto bail_cc;
802 }
803
804 ret = sysfs_create_bin_file(&ppd->pport_cc_kobj,
805 &cc_table_bin_attr);
806 if (ret) {
807 qib_dev_err(dd,
808 "Skipping Congestion Control table sysfs info, (err %d) port %u\n",
809 ret, port_num);
810 goto bail_cc_entry_bin;
811 }
812
813 qib_devinfo(dd->pcidev,
814 "IB%u: Congestion Control Agent enabled for port %d\n",
815 dd->unit, port_num);
816
817 return 0;
818
819 bail_cc_entry_bin:
820 sysfs_remove_bin_file(&ppd->pport_cc_kobj, &cc_setting_bin_attr);
821 bail_cc:
822 kobject_put(&ppd->pport_cc_kobj);
823 bail_diagc:
824 kobject_put(&ppd->diagc_kobj);
825 bail_sl:
826 kobject_put(&ppd->sl2vl_kobj);
827 bail_link:
828 kobject_put(&ppd->pport_kobj);
829 bail:
830 return ret;
831 }
832
833 /*
834 * Unregister and remove our files in /sys/class/infiniband.
835 */
qib_verbs_unregister_sysfs(struct qib_devdata * dd)836 void qib_verbs_unregister_sysfs(struct qib_devdata *dd)
837 {
838 struct qib_pportdata *ppd;
839 int i;
840
841 for (i = 0; i < dd->num_pports; i++) {
842 ppd = &dd->pport[i];
843 if (qib_cc_table_size &&
844 ppd->congestion_entries_shadow) {
845 sysfs_remove_bin_file(&ppd->pport_cc_kobj,
846 &cc_setting_bin_attr);
847 sysfs_remove_bin_file(&ppd->pport_cc_kobj,
848 &cc_table_bin_attr);
849 kobject_put(&ppd->pport_cc_kobj);
850 }
851 kobject_put(&ppd->sl2vl_kobj);
852 kobject_put(&ppd->pport_kobj);
853 }
854 }
855