]> git.proxmox.com Git - mirror_ubuntu-artful-kernel.git/blob - drivers/infiniband/hw/qib/qib_sysfs.c
Merge branch 'sched/urgent' into sched/core
[mirror_ubuntu-artful-kernel.git] / drivers / infiniband / hw / qib / qib_sysfs.c
1 /*
2 * Copyright (c) 2012 Intel Corporation. All rights reserved.
3 * Copyright (c) 2006 - 2012 QLogic Corporation. All rights reserved.
4 * Copyright (c) 2006 PathScale, Inc. All rights reserved.
5 *
6 * This software is available to you under a choice of one of two
7 * licenses. You may choose to be licensed under the terms of the GNU
8 * General Public License (GPL) Version 2, available from the file
9 * COPYING in the main directory of this source tree, or the
10 * OpenIB.org BSD license below:
11 *
12 * Redistribution and use in source and binary forms, with or
13 * without modification, are permitted provided that the following
14 * conditions are met:
15 *
16 * - Redistributions of source code must retain the above
17 * copyright notice, this list of conditions and the following
18 * disclaimer.
19 *
20 * - Redistributions in binary form must reproduce the above
21 * copyright notice, this list of conditions and the following
22 * disclaimer in the documentation and/or other materials
23 * provided with the distribution.
24 *
25 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
26 * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
27 * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
28 * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
29 * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
30 * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
31 * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
32 * SOFTWARE.
33 */
34 #include <linux/ctype.h>
35
36 #include "qib.h"
37 #include "qib_mad.h"
38
39 /* start of per-port functions */
40 /*
41 * Get/Set heartbeat enable. OR of 1=enabled, 2=auto
42 */
43 static ssize_t show_hrtbt_enb(struct qib_pportdata *ppd, char *buf)
44 {
45 struct qib_devdata *dd = ppd->dd;
46 int ret;
47
48 ret = dd->f_get_ib_cfg(ppd, QIB_IB_CFG_HRTBT);
49 ret = scnprintf(buf, PAGE_SIZE, "%d\n", ret);
50 return ret;
51 }
52
53 static ssize_t store_hrtbt_enb(struct qib_pportdata *ppd, const char *buf,
54 size_t count)
55 {
56 struct qib_devdata *dd = ppd->dd;
57 int ret;
58 u16 val;
59
60 ret = kstrtou16(buf, 0, &val);
61 if (ret) {
62 qib_dev_err(dd, "attempt to set invalid Heartbeat enable\n");
63 return ret;
64 }
65
66 /*
67 * Set the "intentional" heartbeat enable per either of
68 * "Enable" and "Auto", as these are normally set together.
69 * This bit is consulted when leaving loopback mode,
70 * because entering loopback mode overrides it and automatically
71 * disables heartbeat.
72 */
73 ret = dd->f_set_ib_cfg(ppd, QIB_IB_CFG_HRTBT, val);
74 return ret < 0 ? ret : count;
75 }
76
77 static ssize_t store_loopback(struct qib_pportdata *ppd, const char *buf,
78 size_t count)
79 {
80 struct qib_devdata *dd = ppd->dd;
81 int ret = count, r;
82
83 r = dd->f_set_ib_loopback(ppd, buf);
84 if (r < 0)
85 ret = r;
86
87 return ret;
88 }
89
90 static ssize_t store_led_override(struct qib_pportdata *ppd, const char *buf,
91 size_t count)
92 {
93 struct qib_devdata *dd = ppd->dd;
94 int ret;
95 u16 val;
96
97 ret = kstrtou16(buf, 0, &val);
98 if (ret) {
99 qib_dev_err(dd, "attempt to set invalid LED override\n");
100 return ret;
101 }
102
103 qib_set_led_override(ppd, val);
104 return count;
105 }
106
107 static ssize_t show_status(struct qib_pportdata *ppd, char *buf)
108 {
109 ssize_t ret;
110
111 if (!ppd->statusp)
112 ret = -EINVAL;
113 else
114 ret = scnprintf(buf, PAGE_SIZE, "0x%llx\n",
115 (unsigned long long) *(ppd->statusp));
116 return ret;
117 }
118
119 /*
120 * For userland compatibility, these offsets must remain fixed.
121 * They are strings for QIB_STATUS_*
122 */
123 static const char * const qib_status_str[] = {
124 "Initted",
125 "",
126 "",
127 "",
128 "",
129 "Present",
130 "IB_link_up",
131 "IB_configured",
132 "",
133 "Fatal_Hardware_Error",
134 NULL,
135 };
136
137 static ssize_t show_status_str(struct qib_pportdata *ppd, char *buf)
138 {
139 int i, any;
140 u64 s;
141 ssize_t ret;
142
143 if (!ppd->statusp) {
144 ret = -EINVAL;
145 goto bail;
146 }
147
148 s = *(ppd->statusp);
149 *buf = '\0';
150 for (any = i = 0; s && qib_status_str[i]; i++) {
151 if (s & 1) {
152 /* if overflow */
153 if (any && strlcat(buf, " ", PAGE_SIZE) >= PAGE_SIZE)
154 break;
155 if (strlcat(buf, qib_status_str[i], PAGE_SIZE) >=
156 PAGE_SIZE)
157 break;
158 any = 1;
159 }
160 s >>= 1;
161 }
162 if (any)
163 strlcat(buf, "\n", PAGE_SIZE);
164
165 ret = strlen(buf);
166
167 bail:
168 return ret;
169 }
170
171 /* end of per-port functions */
172
173 /*
174 * Start of per-port file structures and support code
175 * Because we are fitting into other infrastructure, we have to supply the
176 * full set of kobject/sysfs_ops structures and routines.
177 */
178 #define QIB_PORT_ATTR(name, mode, show, store) \
179 static struct qib_port_attr qib_port_attr_##name = \
180 __ATTR(name, mode, show, store)
181
182 struct qib_port_attr {
183 struct attribute attr;
184 ssize_t (*show)(struct qib_pportdata *, char *);
185 ssize_t (*store)(struct qib_pportdata *, const char *, size_t);
186 };
187
188 QIB_PORT_ATTR(loopback, S_IWUSR, NULL, store_loopback);
189 QIB_PORT_ATTR(led_override, S_IWUSR, NULL, store_led_override);
190 QIB_PORT_ATTR(hrtbt_enable, S_IWUSR | S_IRUGO, show_hrtbt_enb,
191 store_hrtbt_enb);
192 QIB_PORT_ATTR(status, S_IRUGO, show_status, NULL);
193 QIB_PORT_ATTR(status_str, S_IRUGO, show_status_str, NULL);
194
195 static struct attribute *port_default_attributes[] = {
196 &qib_port_attr_loopback.attr,
197 &qib_port_attr_led_override.attr,
198 &qib_port_attr_hrtbt_enable.attr,
199 &qib_port_attr_status.attr,
200 &qib_port_attr_status_str.attr,
201 NULL
202 };
203
204 /*
205 * Start of per-port congestion control structures and support code
206 */
207
208 /*
209 * Congestion control table size followed by table entries
210 */
211 static ssize_t read_cc_table_bin(struct file *filp, struct kobject *kobj,
212 struct bin_attribute *bin_attr,
213 char *buf, loff_t pos, size_t count)
214 {
215 int ret;
216 struct qib_pportdata *ppd =
217 container_of(kobj, struct qib_pportdata, pport_cc_kobj);
218
219 if (!qib_cc_table_size || !ppd->ccti_entries_shadow)
220 return -EINVAL;
221
222 ret = ppd->total_cct_entry * sizeof(struct ib_cc_table_entry_shadow)
223 + sizeof(__be16);
224
225 if (pos > ret)
226 return -EINVAL;
227
228 if (count > ret - pos)
229 count = ret - pos;
230
231 if (!count)
232 return count;
233
234 spin_lock(&ppd->cc_shadow_lock);
235 memcpy(buf, ppd->ccti_entries_shadow, count);
236 spin_unlock(&ppd->cc_shadow_lock);
237
238 return count;
239 }
240
241 static void qib_port_release(struct kobject *kobj)
242 {
243 /* nothing to do since memory is freed by qib_free_devdata() */
244 }
245
246 static struct kobj_type qib_port_cc_ktype = {
247 .release = qib_port_release,
248 };
249
250 static struct bin_attribute cc_table_bin_attr = {
251 .attr = {.name = "cc_table_bin", .mode = 0444},
252 .read = read_cc_table_bin,
253 .size = PAGE_SIZE,
254 };
255
256 /*
257 * Congestion settings: port control, control map and an array of 16
258 * entries for the congestion entries - increase, timer, event log
259 * trigger threshold and the minimum injection rate delay.
260 */
261 static ssize_t read_cc_setting_bin(struct file *filp, struct kobject *kobj,
262 struct bin_attribute *bin_attr,
263 char *buf, loff_t pos, size_t count)
264 {
265 int ret;
266 struct qib_pportdata *ppd =
267 container_of(kobj, struct qib_pportdata, pport_cc_kobj);
268
269 if (!qib_cc_table_size || !ppd->congestion_entries_shadow)
270 return -EINVAL;
271
272 ret = sizeof(struct ib_cc_congestion_setting_attr_shadow);
273
274 if (pos > ret)
275 return -EINVAL;
276 if (count > ret - pos)
277 count = ret - pos;
278
279 if (!count)
280 return count;
281
282 spin_lock(&ppd->cc_shadow_lock);
283 memcpy(buf, ppd->congestion_entries_shadow, count);
284 spin_unlock(&ppd->cc_shadow_lock);
285
286 return count;
287 }
288
289 static struct bin_attribute cc_setting_bin_attr = {
290 .attr = {.name = "cc_settings_bin", .mode = 0444},
291 .read = read_cc_setting_bin,
292 .size = PAGE_SIZE,
293 };
294
295
296 static ssize_t qib_portattr_show(struct kobject *kobj,
297 struct attribute *attr, char *buf)
298 {
299 struct qib_port_attr *pattr =
300 container_of(attr, struct qib_port_attr, attr);
301 struct qib_pportdata *ppd =
302 container_of(kobj, struct qib_pportdata, pport_kobj);
303
304 return pattr->show(ppd, buf);
305 }
306
307 static ssize_t qib_portattr_store(struct kobject *kobj,
308 struct attribute *attr, const char *buf, size_t len)
309 {
310 struct qib_port_attr *pattr =
311 container_of(attr, struct qib_port_attr, attr);
312 struct qib_pportdata *ppd =
313 container_of(kobj, struct qib_pportdata, pport_kobj);
314
315 return pattr->store(ppd, buf, len);
316 }
317
318
319 static const struct sysfs_ops qib_port_ops = {
320 .show = qib_portattr_show,
321 .store = qib_portattr_store,
322 };
323
324 static struct kobj_type qib_port_ktype = {
325 .release = qib_port_release,
326 .sysfs_ops = &qib_port_ops,
327 .default_attrs = port_default_attributes
328 };
329
330 /* Start sl2vl */
331
332 #define QIB_SL2VL_ATTR(N) \
333 static struct qib_sl2vl_attr qib_sl2vl_attr_##N = { \
334 .attr = { .name = __stringify(N), .mode = 0444 }, \
335 .sl = N \
336 }
337
338 struct qib_sl2vl_attr {
339 struct attribute attr;
340 int sl;
341 };
342
343 QIB_SL2VL_ATTR(0);
344 QIB_SL2VL_ATTR(1);
345 QIB_SL2VL_ATTR(2);
346 QIB_SL2VL_ATTR(3);
347 QIB_SL2VL_ATTR(4);
348 QIB_SL2VL_ATTR(5);
349 QIB_SL2VL_ATTR(6);
350 QIB_SL2VL_ATTR(7);
351 QIB_SL2VL_ATTR(8);
352 QIB_SL2VL_ATTR(9);
353 QIB_SL2VL_ATTR(10);
354 QIB_SL2VL_ATTR(11);
355 QIB_SL2VL_ATTR(12);
356 QIB_SL2VL_ATTR(13);
357 QIB_SL2VL_ATTR(14);
358 QIB_SL2VL_ATTR(15);
359
360 static struct attribute *sl2vl_default_attributes[] = {
361 &qib_sl2vl_attr_0.attr,
362 &qib_sl2vl_attr_1.attr,
363 &qib_sl2vl_attr_2.attr,
364 &qib_sl2vl_attr_3.attr,
365 &qib_sl2vl_attr_4.attr,
366 &qib_sl2vl_attr_5.attr,
367 &qib_sl2vl_attr_6.attr,
368 &qib_sl2vl_attr_7.attr,
369 &qib_sl2vl_attr_8.attr,
370 &qib_sl2vl_attr_9.attr,
371 &qib_sl2vl_attr_10.attr,
372 &qib_sl2vl_attr_11.attr,
373 &qib_sl2vl_attr_12.attr,
374 &qib_sl2vl_attr_13.attr,
375 &qib_sl2vl_attr_14.attr,
376 &qib_sl2vl_attr_15.attr,
377 NULL
378 };
379
380 static ssize_t sl2vl_attr_show(struct kobject *kobj, struct attribute *attr,
381 char *buf)
382 {
383 struct qib_sl2vl_attr *sattr =
384 container_of(attr, struct qib_sl2vl_attr, attr);
385 struct qib_pportdata *ppd =
386 container_of(kobj, struct qib_pportdata, sl2vl_kobj);
387 struct qib_ibport *qibp = &ppd->ibport_data;
388
389 return sprintf(buf, "%u\n", qibp->sl_to_vl[sattr->sl]);
390 }
391
392 static const struct sysfs_ops qib_sl2vl_ops = {
393 .show = sl2vl_attr_show,
394 };
395
396 static struct kobj_type qib_sl2vl_ktype = {
397 .release = qib_port_release,
398 .sysfs_ops = &qib_sl2vl_ops,
399 .default_attrs = sl2vl_default_attributes
400 };
401
402 /* End sl2vl */
403
404 /* Start diag_counters */
405
406 #define QIB_DIAGC_ATTR(N) \
407 static struct qib_diagc_attr qib_diagc_attr_##N = { \
408 .attr = { .name = __stringify(N), .mode = 0664 }, \
409 .counter = offsetof(struct qib_ibport, n_##N) \
410 }
411
412 struct qib_diagc_attr {
413 struct attribute attr;
414 size_t counter;
415 };
416
417 QIB_DIAGC_ATTR(rc_resends);
418 QIB_DIAGC_ATTR(rc_acks);
419 QIB_DIAGC_ATTR(rc_qacks);
420 QIB_DIAGC_ATTR(rc_delayed_comp);
421 QIB_DIAGC_ATTR(seq_naks);
422 QIB_DIAGC_ATTR(rdma_seq);
423 QIB_DIAGC_ATTR(rnr_naks);
424 QIB_DIAGC_ATTR(other_naks);
425 QIB_DIAGC_ATTR(rc_timeouts);
426 QIB_DIAGC_ATTR(loop_pkts);
427 QIB_DIAGC_ATTR(pkt_drops);
428 QIB_DIAGC_ATTR(dmawait);
429 QIB_DIAGC_ATTR(unaligned);
430 QIB_DIAGC_ATTR(rc_dupreq);
431 QIB_DIAGC_ATTR(rc_seqnak);
432
433 static struct attribute *diagc_default_attributes[] = {
434 &qib_diagc_attr_rc_resends.attr,
435 &qib_diagc_attr_rc_acks.attr,
436 &qib_diagc_attr_rc_qacks.attr,
437 &qib_diagc_attr_rc_delayed_comp.attr,
438 &qib_diagc_attr_seq_naks.attr,
439 &qib_diagc_attr_rdma_seq.attr,
440 &qib_diagc_attr_rnr_naks.attr,
441 &qib_diagc_attr_other_naks.attr,
442 &qib_diagc_attr_rc_timeouts.attr,
443 &qib_diagc_attr_loop_pkts.attr,
444 &qib_diagc_attr_pkt_drops.attr,
445 &qib_diagc_attr_dmawait.attr,
446 &qib_diagc_attr_unaligned.attr,
447 &qib_diagc_attr_rc_dupreq.attr,
448 &qib_diagc_attr_rc_seqnak.attr,
449 NULL
450 };
451
452 static ssize_t diagc_attr_show(struct kobject *kobj, struct attribute *attr,
453 char *buf)
454 {
455 struct qib_diagc_attr *dattr =
456 container_of(attr, struct qib_diagc_attr, attr);
457 struct qib_pportdata *ppd =
458 container_of(kobj, struct qib_pportdata, diagc_kobj);
459 struct qib_ibport *qibp = &ppd->ibport_data;
460
461 return sprintf(buf, "%u\n", *(u32 *)((char *)qibp + dattr->counter));
462 }
463
464 static ssize_t diagc_attr_store(struct kobject *kobj, struct attribute *attr,
465 const char *buf, size_t size)
466 {
467 struct qib_diagc_attr *dattr =
468 container_of(attr, struct qib_diagc_attr, attr);
469 struct qib_pportdata *ppd =
470 container_of(kobj, struct qib_pportdata, diagc_kobj);
471 struct qib_ibport *qibp = &ppd->ibport_data;
472 u32 val;
473 int ret;
474
475 ret = kstrtou32(buf, 0, &val);
476 if (ret)
477 return ret;
478 *(u32 *)((char *) qibp + dattr->counter) = val;
479 return size;
480 }
481
482 static const struct sysfs_ops qib_diagc_ops = {
483 .show = diagc_attr_show,
484 .store = diagc_attr_store,
485 };
486
487 static struct kobj_type qib_diagc_ktype = {
488 .release = qib_port_release,
489 .sysfs_ops = &qib_diagc_ops,
490 .default_attrs = diagc_default_attributes
491 };
492
493 /* End diag_counters */
494
495 /* end of per-port file structures and support code */
496
497 /*
498 * Start of per-unit (or driver, in some cases, but replicated
499 * per unit) functions (these get a device *)
500 */
501 static ssize_t show_rev(struct device *device, struct device_attribute *attr,
502 char *buf)
503 {
504 struct qib_ibdev *dev =
505 container_of(device, struct qib_ibdev, ibdev.dev);
506
507 return sprintf(buf, "%x\n", dd_from_dev(dev)->minrev);
508 }
509
510 static ssize_t show_hca(struct device *device, struct device_attribute *attr,
511 char *buf)
512 {
513 struct qib_ibdev *dev =
514 container_of(device, struct qib_ibdev, ibdev.dev);
515 struct qib_devdata *dd = dd_from_dev(dev);
516 int ret;
517
518 if (!dd->boardname)
519 ret = -EINVAL;
520 else
521 ret = scnprintf(buf, PAGE_SIZE, "%s\n", dd->boardname);
522 return ret;
523 }
524
525 static ssize_t show_version(struct device *device,
526 struct device_attribute *attr, char *buf)
527 {
528 /* The string printed here is already newline-terminated. */
529 return scnprintf(buf, PAGE_SIZE, "%s", (char *)ib_qib_version);
530 }
531
532 static ssize_t show_boardversion(struct device *device,
533 struct device_attribute *attr, char *buf)
534 {
535 struct qib_ibdev *dev =
536 container_of(device, struct qib_ibdev, ibdev.dev);
537 struct qib_devdata *dd = dd_from_dev(dev);
538
539 /* The string printed here is already newline-terminated. */
540 return scnprintf(buf, PAGE_SIZE, "%s", dd->boardversion);
541 }
542
543
544 static ssize_t show_localbus_info(struct device *device,
545 struct device_attribute *attr, char *buf)
546 {
547 struct qib_ibdev *dev =
548 container_of(device, struct qib_ibdev, ibdev.dev);
549 struct qib_devdata *dd = dd_from_dev(dev);
550
551 /* The string printed here is already newline-terminated. */
552 return scnprintf(buf, PAGE_SIZE, "%s", dd->lbus_info);
553 }
554
555
556 static ssize_t show_nctxts(struct device *device,
557 struct device_attribute *attr, char *buf)
558 {
559 struct qib_ibdev *dev =
560 container_of(device, struct qib_ibdev, ibdev.dev);
561 struct qib_devdata *dd = dd_from_dev(dev);
562
563 /* Return the number of user ports (contexts) available. */
564 /* The calculation below deals with a special case where
565 * cfgctxts is set to 1 on a single-port board. */
566 return scnprintf(buf, PAGE_SIZE, "%u\n",
567 (dd->first_user_ctxt > dd->cfgctxts) ? 0 :
568 (dd->cfgctxts - dd->first_user_ctxt));
569 }
570
571 static ssize_t show_nfreectxts(struct device *device,
572 struct device_attribute *attr, char *buf)
573 {
574 struct qib_ibdev *dev =
575 container_of(device, struct qib_ibdev, ibdev.dev);
576 struct qib_devdata *dd = dd_from_dev(dev);
577
578 /* Return the number of free user ports (contexts) available. */
579 return scnprintf(buf, PAGE_SIZE, "%u\n", dd->freectxts);
580 }
581
582 static ssize_t show_serial(struct device *device,
583 struct device_attribute *attr, char *buf)
584 {
585 struct qib_ibdev *dev =
586 container_of(device, struct qib_ibdev, ibdev.dev);
587 struct qib_devdata *dd = dd_from_dev(dev);
588
589 buf[sizeof dd->serial] = '\0';
590 memcpy(buf, dd->serial, sizeof dd->serial);
591 strcat(buf, "\n");
592 return strlen(buf);
593 }
594
595 static ssize_t store_chip_reset(struct device *device,
596 struct device_attribute *attr, const char *buf,
597 size_t count)
598 {
599 struct qib_ibdev *dev =
600 container_of(device, struct qib_ibdev, ibdev.dev);
601 struct qib_devdata *dd = dd_from_dev(dev);
602 int ret;
603
604 if (count < 5 || memcmp(buf, "reset", 5) || !dd->diag_client) {
605 ret = -EINVAL;
606 goto bail;
607 }
608
609 ret = qib_reset_device(dd->unit);
610 bail:
611 return ret < 0 ? ret : count;
612 }
613
614 static ssize_t show_logged_errs(struct device *device,
615 struct device_attribute *attr, char *buf)
616 {
617 struct qib_ibdev *dev =
618 container_of(device, struct qib_ibdev, ibdev.dev);
619 struct qib_devdata *dd = dd_from_dev(dev);
620 int idx, count;
621
622 /* force consistency with actual EEPROM */
623 if (qib_update_eeprom_log(dd) != 0)
624 return -ENXIO;
625
626 count = 0;
627 for (idx = 0; idx < QIB_EEP_LOG_CNT; ++idx) {
628 count += scnprintf(buf + count, PAGE_SIZE - count, "%d%c",
629 dd->eep_st_errs[idx],
630 idx == (QIB_EEP_LOG_CNT - 1) ? '\n' : ' ');
631 }
632
633 return count;
634 }
635
636 /*
637 * Dump tempsense regs. in decimal, to ease shell-scripts.
638 */
639 static ssize_t show_tempsense(struct device *device,
640 struct device_attribute *attr, char *buf)
641 {
642 struct qib_ibdev *dev =
643 container_of(device, struct qib_ibdev, ibdev.dev);
644 struct qib_devdata *dd = dd_from_dev(dev);
645 int ret;
646 int idx;
647 u8 regvals[8];
648
649 ret = -ENXIO;
650 for (idx = 0; idx < 8; ++idx) {
651 if (idx == 6)
652 continue;
653 ret = dd->f_tempsense_rd(dd, idx);
654 if (ret < 0)
655 break;
656 regvals[idx] = ret;
657 }
658 if (idx == 8)
659 ret = scnprintf(buf, PAGE_SIZE, "%d %d %02X %02X %d %d\n",
660 *(signed char *)(regvals),
661 *(signed char *)(regvals + 1),
662 regvals[2], regvals[3],
663 *(signed char *)(regvals + 5),
664 *(signed char *)(regvals + 7));
665 return ret;
666 }
667
668 /*
669 * end of per-unit (or driver, in some cases, but replicated
670 * per unit) functions
671 */
672
673 /* start of per-unit file structures and support code */
674 static DEVICE_ATTR(hw_rev, S_IRUGO, show_rev, NULL);
675 static DEVICE_ATTR(hca_type, S_IRUGO, show_hca, NULL);
676 static DEVICE_ATTR(board_id, S_IRUGO, show_hca, NULL);
677 static DEVICE_ATTR(version, S_IRUGO, show_version, NULL);
678 static DEVICE_ATTR(nctxts, S_IRUGO, show_nctxts, NULL);
679 static DEVICE_ATTR(nfreectxts, S_IRUGO, show_nfreectxts, NULL);
680 static DEVICE_ATTR(serial, S_IRUGO, show_serial, NULL);
681 static DEVICE_ATTR(boardversion, S_IRUGO, show_boardversion, NULL);
682 static DEVICE_ATTR(logged_errors, S_IRUGO, show_logged_errs, NULL);
683 static DEVICE_ATTR(tempsense, S_IRUGO, show_tempsense, NULL);
684 static DEVICE_ATTR(localbus_info, S_IRUGO, show_localbus_info, NULL);
685 static DEVICE_ATTR(chip_reset, S_IWUSR, NULL, store_chip_reset);
686
687 static struct device_attribute *qib_attributes[] = {
688 &dev_attr_hw_rev,
689 &dev_attr_hca_type,
690 &dev_attr_board_id,
691 &dev_attr_version,
692 &dev_attr_nctxts,
693 &dev_attr_nfreectxts,
694 &dev_attr_serial,
695 &dev_attr_boardversion,
696 &dev_attr_logged_errors,
697 &dev_attr_tempsense,
698 &dev_attr_localbus_info,
699 &dev_attr_chip_reset,
700 };
701
702 int qib_create_port_files(struct ib_device *ibdev, u8 port_num,
703 struct kobject *kobj)
704 {
705 struct qib_pportdata *ppd;
706 struct qib_devdata *dd = dd_from_ibdev(ibdev);
707 int ret;
708
709 if (!port_num || port_num > dd->num_pports) {
710 qib_dev_err(dd,
711 "Skipping infiniband class with invalid port %u\n",
712 port_num);
713 ret = -ENODEV;
714 goto bail;
715 }
716 ppd = &dd->pport[port_num - 1];
717
718 ret = kobject_init_and_add(&ppd->pport_kobj, &qib_port_ktype, kobj,
719 "linkcontrol");
720 if (ret) {
721 qib_dev_err(dd,
722 "Skipping linkcontrol sysfs info, (err %d) port %u\n",
723 ret, port_num);
724 goto bail;
725 }
726 kobject_uevent(&ppd->pport_kobj, KOBJ_ADD);
727
728 ret = kobject_init_and_add(&ppd->sl2vl_kobj, &qib_sl2vl_ktype, kobj,
729 "sl2vl");
730 if (ret) {
731 qib_dev_err(dd,
732 "Skipping sl2vl sysfs info, (err %d) port %u\n",
733 ret, port_num);
734 goto bail_link;
735 }
736 kobject_uevent(&ppd->sl2vl_kobj, KOBJ_ADD);
737
738 ret = kobject_init_and_add(&ppd->diagc_kobj, &qib_diagc_ktype, kobj,
739 "diag_counters");
740 if (ret) {
741 qib_dev_err(dd,
742 "Skipping diag_counters sysfs info, (err %d) port %u\n",
743 ret, port_num);
744 goto bail_sl;
745 }
746 kobject_uevent(&ppd->diagc_kobj, KOBJ_ADD);
747
748 if (!qib_cc_table_size || !ppd->congestion_entries_shadow)
749 return 0;
750
751 ret = kobject_init_and_add(&ppd->pport_cc_kobj, &qib_port_cc_ktype,
752 kobj, "CCMgtA");
753 if (ret) {
754 qib_dev_err(dd,
755 "Skipping Congestion Control sysfs info, (err %d) port %u\n",
756 ret, port_num);
757 goto bail_diagc;
758 }
759
760 kobject_uevent(&ppd->pport_cc_kobj, KOBJ_ADD);
761
762 ret = sysfs_create_bin_file(&ppd->pport_cc_kobj,
763 &cc_setting_bin_attr);
764 if (ret) {
765 qib_dev_err(dd,
766 "Skipping Congestion Control setting sysfs info, (err %d) port %u\n",
767 ret, port_num);
768 goto bail_cc;
769 }
770
771 ret = sysfs_create_bin_file(&ppd->pport_cc_kobj,
772 &cc_table_bin_attr);
773 if (ret) {
774 qib_dev_err(dd,
775 "Skipping Congestion Control table sysfs info, (err %d) port %u\n",
776 ret, port_num);
777 goto bail_cc_entry_bin;
778 }
779
780 qib_devinfo(dd->pcidev,
781 "IB%u: Congestion Control Agent enabled for port %d\n",
782 dd->unit, port_num);
783
784 return 0;
785
786 bail_cc_entry_bin:
787 sysfs_remove_bin_file(&ppd->pport_cc_kobj, &cc_setting_bin_attr);
788 bail_cc:
789 kobject_put(&ppd->pport_cc_kobj);
790 bail_diagc:
791 kobject_put(&ppd->diagc_kobj);
792 bail_sl:
793 kobject_put(&ppd->sl2vl_kobj);
794 bail_link:
795 kobject_put(&ppd->pport_kobj);
796 bail:
797 return ret;
798 }
799
800 /*
801 * Register and create our files in /sys/class/infiniband.
802 */
803 int qib_verbs_register_sysfs(struct qib_devdata *dd)
804 {
805 struct ib_device *dev = &dd->verbs_dev.ibdev;
806 int i, ret;
807
808 for (i = 0; i < ARRAY_SIZE(qib_attributes); ++i) {
809 ret = device_create_file(&dev->dev, qib_attributes[i]);
810 if (ret)
811 goto bail;
812 }
813
814 return 0;
815 bail:
816 for (i = 0; i < ARRAY_SIZE(qib_attributes); ++i)
817 device_remove_file(&dev->dev, qib_attributes[i]);
818 return ret;
819 }
820
821 /*
822 * Unregister and remove our files in /sys/class/infiniband.
823 */
824 void qib_verbs_unregister_sysfs(struct qib_devdata *dd)
825 {
826 struct qib_pportdata *ppd;
827 int i;
828
829 for (i = 0; i < dd->num_pports; i++) {
830 ppd = &dd->pport[i];
831 if (qib_cc_table_size &&
832 ppd->congestion_entries_shadow) {
833 sysfs_remove_bin_file(&ppd->pport_cc_kobj,
834 &cc_setting_bin_attr);
835 sysfs_remove_bin_file(&ppd->pport_cc_kobj,
836 &cc_table_bin_attr);
837 kobject_put(&ppd->pport_cc_kobj);
838 }
839 kobject_put(&ppd->sl2vl_kobj);
840 kobject_put(&ppd->pport_kobj);
841 }
842 }