]> git.proxmox.com Git - mirror_ubuntu-hirsute-kernel.git/blame - drivers/infiniband/hw/qib/qib_sysfs.c
RDMA/qib: Validate ->show()/store() callbacks before calling them
[mirror_ubuntu-hirsute-kernel.git] / drivers / infiniband / hw / qib / qib_sysfs.c
CommitLineData
f931551b 1/*
36a8f01c
MM
2 * Copyright (c) 2012 Intel Corporation. All rights reserved.
3 * Copyright (c) 2006 - 2012 QLogic Corporation. All rights reserved.
f931551b
RC
4 * Copyright (c) 2006 PathScale, Inc. All rights reserved.
5 *
6 * This software is available to you under a choice of one of two
7 * licenses. You may choose to be licensed under the terms of the GNU
8 * General Public License (GPL) Version 2, available from the file
9 * COPYING in the main directory of this source tree, or the
10 * OpenIB.org BSD license below:
11 *
12 * Redistribution and use in source and binary forms, with or
13 * without modification, are permitted provided that the following
14 * conditions are met:
15 *
16 * - Redistributions of source code must retain the above
17 * copyright notice, this list of conditions and the following
18 * disclaimer.
19 *
20 * - Redistributions in binary form must reproduce the above
21 * copyright notice, this list of conditions and the following
22 * disclaimer in the documentation and/or other materials
23 * provided with the distribution.
24 *
25 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
26 * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
27 * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
28 * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
29 * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
30 * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
31 * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
32 * SOFTWARE.
33 */
34#include <linux/ctype.h>
35
36#include "qib.h"
36a8f01c 37#include "qib_mad.h"
f931551b 38
f931551b
RC
39/* start of per-port functions */
40/*
41 * Get/Set heartbeat enable. OR of 1=enabled, 2=auto
42 */
43static ssize_t show_hrtbt_enb(struct qib_pportdata *ppd, char *buf)
44{
45 struct qib_devdata *dd = ppd->dd;
46 int ret;
47
48 ret = dd->f_get_ib_cfg(ppd, QIB_IB_CFG_HRTBT);
49 ret = scnprintf(buf, PAGE_SIZE, "%d\n", ret);
50 return ret;
51}
52
53static ssize_t store_hrtbt_enb(struct qib_pportdata *ppd, const char *buf,
54 size_t count)
55{
56 struct qib_devdata *dd = ppd->dd;
57 int ret;
58 u16 val;
59
7fac3301
MM
60 ret = kstrtou16(buf, 0, &val);
61 if (ret) {
62 qib_dev_err(dd, "attempt to set invalid Heartbeat enable\n");
63 return ret;
64 }
f931551b
RC
65
66 /*
67 * Set the "intentional" heartbeat enable per either of
68 * "Enable" and "Auto", as these are normally set together.
69 * This bit is consulted when leaving loopback mode,
70 * because entering loopback mode overrides it and automatically
71 * disables heartbeat.
72 */
7fac3301 73 ret = dd->f_set_ib_cfg(ppd, QIB_IB_CFG_HRTBT, val);
f931551b
RC
74 return ret < 0 ? ret : count;
75}
76
77static ssize_t store_loopback(struct qib_pportdata *ppd, const char *buf,
78 size_t count)
79{
80 struct qib_devdata *dd = ppd->dd;
81 int ret = count, r;
82
83 r = dd->f_set_ib_loopback(ppd, buf);
84 if (r < 0)
85 ret = r;
86
87 return ret;
88}
89
90static ssize_t store_led_override(struct qib_pportdata *ppd, const char *buf,
91 size_t count)
92{
93 struct qib_devdata *dd = ppd->dd;
94 int ret;
95 u16 val;
96
7fac3301
MM
97 ret = kstrtou16(buf, 0, &val);
98 if (ret) {
f931551b 99 qib_dev_err(dd, "attempt to set invalid LED override\n");
7fac3301
MM
100 return ret;
101 }
102
103 qib_set_led_override(ppd, val);
104 return count;
f931551b
RC
105}
106
107static ssize_t show_status(struct qib_pportdata *ppd, char *buf)
108{
109 ssize_t ret;
110
111 if (!ppd->statusp)
112 ret = -EINVAL;
113 else
114 ret = scnprintf(buf, PAGE_SIZE, "0x%llx\n",
115 (unsigned long long) *(ppd->statusp));
116 return ret;
117}
118
119/*
120 * For userland compatibility, these offsets must remain fixed.
121 * They are strings for QIB_STATUS_*
122 */
865b64be 123static const char * const qib_status_str[] = {
f931551b
RC
124 "Initted",
125 "",
126 "",
127 "",
128 "",
129 "Present",
130 "IB_link_up",
131 "IB_configured",
132 "",
133 "Fatal_Hardware_Error",
134 NULL,
135};
136
137static ssize_t show_status_str(struct qib_pportdata *ppd, char *buf)
138{
139 int i, any;
140 u64 s;
141 ssize_t ret;
142
143 if (!ppd->statusp) {
144 ret = -EINVAL;
145 goto bail;
146 }
147
148 s = *(ppd->statusp);
149 *buf = '\0';
150 for (any = i = 0; s && qib_status_str[i]; i++) {
151 if (s & 1) {
152 /* if overflow */
153 if (any && strlcat(buf, " ", PAGE_SIZE) >= PAGE_SIZE)
154 break;
155 if (strlcat(buf, qib_status_str[i], PAGE_SIZE) >=
156 PAGE_SIZE)
157 break;
158 any = 1;
159 }
160 s >>= 1;
161 }
162 if (any)
163 strlcat(buf, "\n", PAGE_SIZE);
164
165 ret = strlen(buf);
166
167bail:
168 return ret;
169}
170
171/* end of per-port functions */
172
173/*
174 * Start of per-port file structures and support code
175 * Because we are fitting into other infrastructure, we have to supply the
176 * full set of kobject/sysfs_ops structures and routines.
177 */
178#define QIB_PORT_ATTR(name, mode, show, store) \
179 static struct qib_port_attr qib_port_attr_##name = \
180 __ATTR(name, mode, show, store)
181
182struct qib_port_attr {
183 struct attribute attr;
184 ssize_t (*show)(struct qib_pportdata *, char *);
185 ssize_t (*store)(struct qib_pportdata *, const char *, size_t);
186};
187
188QIB_PORT_ATTR(loopback, S_IWUSR, NULL, store_loopback);
189QIB_PORT_ATTR(led_override, S_IWUSR, NULL, store_led_override);
190QIB_PORT_ATTR(hrtbt_enable, S_IWUSR | S_IRUGO, show_hrtbt_enb,
191 store_hrtbt_enb);
192QIB_PORT_ATTR(status, S_IRUGO, show_status, NULL);
193QIB_PORT_ATTR(status_str, S_IRUGO, show_status_str, NULL);
194
195static struct attribute *port_default_attributes[] = {
196 &qib_port_attr_loopback.attr,
197 &qib_port_attr_led_override.attr,
198 &qib_port_attr_hrtbt_enable.attr,
199 &qib_port_attr_status.attr,
200 &qib_port_attr_status_str.attr,
201 NULL
202};
203
36a8f01c
MM
204/*
205 * Start of per-port congestion control structures and support code
206 */
207
208/*
209 * Congestion control table size followed by table entries
210 */
211static ssize_t read_cc_table_bin(struct file *filp, struct kobject *kobj,
212 struct bin_attribute *bin_attr,
213 char *buf, loff_t pos, size_t count)
214{
215 int ret;
216 struct qib_pportdata *ppd =
217 container_of(kobj, struct qib_pportdata, pport_cc_kobj);
218
219 if (!qib_cc_table_size || !ppd->ccti_entries_shadow)
220 return -EINVAL;
221
222 ret = ppd->total_cct_entry * sizeof(struct ib_cc_table_entry_shadow)
223 + sizeof(__be16);
224
225 if (pos > ret)
226 return -EINVAL;
227
228 if (count > ret - pos)
229 count = ret - pos;
230
231 if (!count)
232 return count;
233
234 spin_lock(&ppd->cc_shadow_lock);
235 memcpy(buf, ppd->ccti_entries_shadow, count);
236 spin_unlock(&ppd->cc_shadow_lock);
237
238 return count;
239}
240
241static void qib_port_release(struct kobject *kobj)
242{
243 /* nothing to do since memory is freed by qib_free_devdata() */
244}
245
246static struct kobj_type qib_port_cc_ktype = {
247 .release = qib_port_release,
248};
249
238f43a7 250static const struct bin_attribute cc_table_bin_attr = {
36a8f01c
MM
251 .attr = {.name = "cc_table_bin", .mode = 0444},
252 .read = read_cc_table_bin,
253 .size = PAGE_SIZE,
254};
255
256/*
257 * Congestion settings: port control, control map and an array of 16
258 * entries for the congestion entries - increase, timer, event log
259 * trigger threshold and the minimum injection rate delay.
260 */
261static ssize_t read_cc_setting_bin(struct file *filp, struct kobject *kobj,
262 struct bin_attribute *bin_attr,
263 char *buf, loff_t pos, size_t count)
264{
265 int ret;
266 struct qib_pportdata *ppd =
267 container_of(kobj, struct qib_pportdata, pport_cc_kobj);
268
269 if (!qib_cc_table_size || !ppd->congestion_entries_shadow)
270 return -EINVAL;
271
272 ret = sizeof(struct ib_cc_congestion_setting_attr_shadow);
273
274 if (pos > ret)
275 return -EINVAL;
276 if (count > ret - pos)
277 count = ret - pos;
278
279 if (!count)
280 return count;
281
282 spin_lock(&ppd->cc_shadow_lock);
283 memcpy(buf, ppd->congestion_entries_shadow, count);
284 spin_unlock(&ppd->cc_shadow_lock);
285
286 return count;
287}
288
238f43a7 289static const struct bin_attribute cc_setting_bin_attr = {
36a8f01c
MM
290 .attr = {.name = "cc_settings_bin", .mode = 0444},
291 .read = read_cc_setting_bin,
292 .size = PAGE_SIZE,
293};
294
295
f931551b
RC
296static ssize_t qib_portattr_show(struct kobject *kobj,
297 struct attribute *attr, char *buf)
298{
299 struct qib_port_attr *pattr =
300 container_of(attr, struct qib_port_attr, attr);
301 struct qib_pportdata *ppd =
302 container_of(kobj, struct qib_pportdata, pport_kobj);
303
7ee23491
VK
304 if (!pattr->show)
305 return -EIO;
306
f931551b
RC
307 return pattr->show(ppd, buf);
308}
309
310static ssize_t qib_portattr_store(struct kobject *kobj,
311 struct attribute *attr, const char *buf, size_t len)
312{
313 struct qib_port_attr *pattr =
314 container_of(attr, struct qib_port_attr, attr);
315 struct qib_pportdata *ppd =
316 container_of(kobj, struct qib_pportdata, pport_kobj);
317
7ee23491
VK
318 if (!pattr->store)
319 return -EIO;
320
f931551b
RC
321 return pattr->store(ppd, buf, len);
322}
323
f931551b
RC
324
325static const struct sysfs_ops qib_port_ops = {
326 .show = qib_portattr_show,
327 .store = qib_portattr_store,
328};
329
330static struct kobj_type qib_port_ktype = {
331 .release = qib_port_release,
332 .sysfs_ops = &qib_port_ops,
333 .default_attrs = port_default_attributes
334};
335
336/* Start sl2vl */
337
338#define QIB_SL2VL_ATTR(N) \
339 static struct qib_sl2vl_attr qib_sl2vl_attr_##N = { \
340 .attr = { .name = __stringify(N), .mode = 0444 }, \
341 .sl = N \
342 }
343
344struct qib_sl2vl_attr {
345 struct attribute attr;
346 int sl;
347};
348
349QIB_SL2VL_ATTR(0);
350QIB_SL2VL_ATTR(1);
351QIB_SL2VL_ATTR(2);
352QIB_SL2VL_ATTR(3);
353QIB_SL2VL_ATTR(4);
354QIB_SL2VL_ATTR(5);
355QIB_SL2VL_ATTR(6);
356QIB_SL2VL_ATTR(7);
357QIB_SL2VL_ATTR(8);
358QIB_SL2VL_ATTR(9);
359QIB_SL2VL_ATTR(10);
360QIB_SL2VL_ATTR(11);
361QIB_SL2VL_ATTR(12);
362QIB_SL2VL_ATTR(13);
363QIB_SL2VL_ATTR(14);
364QIB_SL2VL_ATTR(15);
365
366static struct attribute *sl2vl_default_attributes[] = {
367 &qib_sl2vl_attr_0.attr,
368 &qib_sl2vl_attr_1.attr,
369 &qib_sl2vl_attr_2.attr,
370 &qib_sl2vl_attr_3.attr,
371 &qib_sl2vl_attr_4.attr,
372 &qib_sl2vl_attr_5.attr,
373 &qib_sl2vl_attr_6.attr,
374 &qib_sl2vl_attr_7.attr,
375 &qib_sl2vl_attr_8.attr,
376 &qib_sl2vl_attr_9.attr,
377 &qib_sl2vl_attr_10.attr,
378 &qib_sl2vl_attr_11.attr,
379 &qib_sl2vl_attr_12.attr,
380 &qib_sl2vl_attr_13.attr,
381 &qib_sl2vl_attr_14.attr,
382 &qib_sl2vl_attr_15.attr,
383 NULL
384};
385
386static ssize_t sl2vl_attr_show(struct kobject *kobj, struct attribute *attr,
387 char *buf)
388{
389 struct qib_sl2vl_attr *sattr =
390 container_of(attr, struct qib_sl2vl_attr, attr);
391 struct qib_pportdata *ppd =
392 container_of(kobj, struct qib_pportdata, sl2vl_kobj);
393 struct qib_ibport *qibp = &ppd->ibport_data;
394
395 return sprintf(buf, "%u\n", qibp->sl_to_vl[sattr->sl]);
396}
397
398static const struct sysfs_ops qib_sl2vl_ops = {
399 .show = sl2vl_attr_show,
400};
401
402static struct kobj_type qib_sl2vl_ktype = {
403 .release = qib_port_release,
404 .sysfs_ops = &qib_sl2vl_ops,
405 .default_attrs = sl2vl_default_attributes
406};
407
408/* End sl2vl */
409
410/* Start diag_counters */
411
412#define QIB_DIAGC_ATTR(N) \
413 static struct qib_diagc_attr qib_diagc_attr_##N = { \
4c6931f5 414 .attr = { .name = __stringify(N), .mode = 0664 }, \
f24a6d48
HC
415 .counter = offsetof(struct qib_ibport, rvp.n_##N) \
416 }
417
418#define QIB_DIAGC_ATTR_PER_CPU(N) \
419 static struct qib_diagc_attr qib_diagc_attr_##N = { \
420 .attr = { .name = __stringify(N), .mode = 0664 }, \
421 .counter = offsetof(struct qib_ibport, rvp.z_##N) \
f931551b
RC
422 }
423
424struct qib_diagc_attr {
425 struct attribute attr;
426 size_t counter;
427};
428
f24a6d48
HC
429QIB_DIAGC_ATTR_PER_CPU(rc_acks);
430QIB_DIAGC_ATTR_PER_CPU(rc_qacks);
431QIB_DIAGC_ATTR_PER_CPU(rc_delayed_comp);
432
f931551b 433QIB_DIAGC_ATTR(rc_resends);
f931551b
RC
434QIB_DIAGC_ATTR(seq_naks);
435QIB_DIAGC_ATTR(rdma_seq);
436QIB_DIAGC_ATTR(rnr_naks);
437QIB_DIAGC_ATTR(other_naks);
438QIB_DIAGC_ATTR(rc_timeouts);
439QIB_DIAGC_ATTR(loop_pkts);
440QIB_DIAGC_ATTR(pkt_drops);
441QIB_DIAGC_ATTR(dmawait);
442QIB_DIAGC_ATTR(unaligned);
443QIB_DIAGC_ATTR(rc_dupreq);
444QIB_DIAGC_ATTR(rc_seqnak);
71994354 445QIB_DIAGC_ATTR(rc_crwaits);
f931551b
RC
446
447static struct attribute *diagc_default_attributes[] = {
448 &qib_diagc_attr_rc_resends.attr,
449 &qib_diagc_attr_rc_acks.attr,
450 &qib_diagc_attr_rc_qacks.attr,
451 &qib_diagc_attr_rc_delayed_comp.attr,
452 &qib_diagc_attr_seq_naks.attr,
453 &qib_diagc_attr_rdma_seq.attr,
454 &qib_diagc_attr_rnr_naks.attr,
455 &qib_diagc_attr_other_naks.attr,
456 &qib_diagc_attr_rc_timeouts.attr,
457 &qib_diagc_attr_loop_pkts.attr,
458 &qib_diagc_attr_pkt_drops.attr,
459 &qib_diagc_attr_dmawait.attr,
460 &qib_diagc_attr_unaligned.attr,
461 &qib_diagc_attr_rc_dupreq.attr,
462 &qib_diagc_attr_rc_seqnak.attr,
71994354 463 &qib_diagc_attr_rc_crwaits.attr,
f931551b
RC
464 NULL
465};
466
f24a6d48
HC
467static u64 get_all_cpu_total(u64 __percpu *cntr)
468{
469 int cpu;
470 u64 counter = 0;
471
472 for_each_possible_cpu(cpu)
473 counter += *per_cpu_ptr(cntr, cpu);
474 return counter;
475}
476
477#define def_write_per_cpu(cntr) \
478static void write_per_cpu_##cntr(struct qib_pportdata *ppd, u32 data) \
479{ \
480 struct qib_devdata *dd = ppd->dd; \
481 struct qib_ibport *qibp = &ppd->ibport_data; \
482 /* A write can only zero the counter */ \
483 if (data == 0) \
484 qibp->rvp.z_##cntr = get_all_cpu_total(qibp->rvp.cntr); \
485 else \
486 qib_dev_err(dd, "Per CPU cntrs can only be zeroed"); \
487}
488
489def_write_per_cpu(rc_acks)
490def_write_per_cpu(rc_qacks)
491def_write_per_cpu(rc_delayed_comp)
492
493#define READ_PER_CPU_CNTR(cntr) (get_all_cpu_total(qibp->rvp.cntr) - \
494 qibp->rvp.z_##cntr)
495
f931551b
RC
496static ssize_t diagc_attr_show(struct kobject *kobj, struct attribute *attr,
497 char *buf)
498{
499 struct qib_diagc_attr *dattr =
500 container_of(attr, struct qib_diagc_attr, attr);
501 struct qib_pportdata *ppd =
502 container_of(kobj, struct qib_pportdata, diagc_kobj);
503 struct qib_ibport *qibp = &ppd->ibport_data;
504
f24a6d48
HC
505 if (!strncmp(dattr->attr.name, "rc_acks", 7))
506 return sprintf(buf, "%llu\n", READ_PER_CPU_CNTR(rc_acks));
507 else if (!strncmp(dattr->attr.name, "rc_qacks", 8))
508 return sprintf(buf, "%llu\n", READ_PER_CPU_CNTR(rc_qacks));
509 else if (!strncmp(dattr->attr.name, "rc_delayed_comp", 15))
510 return sprintf(buf, "%llu\n",
511 READ_PER_CPU_CNTR(rc_delayed_comp));
512 else
513 return sprintf(buf, "%u\n",
514 *(u32 *)((char *)qibp + dattr->counter));
f931551b
RC
515}
516
4c6931f5
IW
517static ssize_t diagc_attr_store(struct kobject *kobj, struct attribute *attr,
518 const char *buf, size_t size)
519{
520 struct qib_diagc_attr *dattr =
521 container_of(attr, struct qib_diagc_attr, attr);
522 struct qib_pportdata *ppd =
523 container_of(kobj, struct qib_pportdata, diagc_kobj);
524 struct qib_ibport *qibp = &ppd->ibport_data;
7fac3301
MM
525 u32 val;
526 int ret;
4c6931f5 527
7fac3301
MM
528 ret = kstrtou32(buf, 0, &val);
529 if (ret)
530 return ret;
f24a6d48
HC
531
532 if (!strncmp(dattr->attr.name, "rc_acks", 7))
533 write_per_cpu_rc_acks(ppd, val);
534 else if (!strncmp(dattr->attr.name, "rc_qacks", 8))
535 write_per_cpu_rc_qacks(ppd, val);
536 else if (!strncmp(dattr->attr.name, "rc_delayed_comp", 15))
537 write_per_cpu_rc_delayed_comp(ppd, val);
538 else
539 *(u32 *)((char *)qibp + dattr->counter) = val;
4c6931f5
IW
540 return size;
541}
542
f931551b
RC
543static const struct sysfs_ops qib_diagc_ops = {
544 .show = diagc_attr_show,
4c6931f5 545 .store = diagc_attr_store,
f931551b
RC
546};
547
548static struct kobj_type qib_diagc_ktype = {
549 .release = qib_port_release,
550 .sysfs_ops = &qib_diagc_ops,
551 .default_attrs = diagc_default_attributes
552};
553
554/* End diag_counters */
555
556/* end of per-port file structures and support code */
557
558/*
559 * Start of per-unit (or driver, in some cases, but replicated
560 * per unit) functions (these get a device *)
561 */
508a523f
PP
562static ssize_t hw_rev_show(struct device *device, struct device_attribute *attr,
563 char *buf)
f931551b
RC
564{
565 struct qib_ibdev *dev =
54747231 566 rdma_device_to_drv_device(device, struct qib_ibdev, rdi.ibdev);
f931551b
RC
567
568 return sprintf(buf, "%x\n", dd_from_dev(dev)->minrev);
569}
508a523f 570static DEVICE_ATTR_RO(hw_rev);
f931551b 571
508a523f
PP
572static ssize_t hca_type_show(struct device *device,
573 struct device_attribute *attr, char *buf)
f931551b
RC
574{
575 struct qib_ibdev *dev =
54747231 576 rdma_device_to_drv_device(device, struct qib_ibdev, rdi.ibdev);
f931551b
RC
577 struct qib_devdata *dd = dd_from_dev(dev);
578 int ret;
579
580 if (!dd->boardname)
581 ret = -EINVAL;
582 else
583 ret = scnprintf(buf, PAGE_SIZE, "%s\n", dd->boardname);
584 return ret;
585}
508a523f
PP
586static DEVICE_ATTR_RO(hca_type);
587static DEVICE_ATTR(board_id, 0444, hca_type_show, NULL);
f931551b 588
508a523f 589static ssize_t version_show(struct device *device,
f931551b
RC
590 struct device_attribute *attr, char *buf)
591{
592 /* The string printed here is already newline-terminated. */
593 return scnprintf(buf, PAGE_SIZE, "%s", (char *)ib_qib_version);
594}
508a523f 595static DEVICE_ATTR_RO(version);
f931551b 596
508a523f 597static ssize_t boardversion_show(struct device *device,
f931551b
RC
598 struct device_attribute *attr, char *buf)
599{
600 struct qib_ibdev *dev =
54747231 601 rdma_device_to_drv_device(device, struct qib_ibdev, rdi.ibdev);
f931551b
RC
602 struct qib_devdata *dd = dd_from_dev(dev);
603
604 /* The string printed here is already newline-terminated. */
605 return scnprintf(buf, PAGE_SIZE, "%s", dd->boardversion);
606}
508a523f 607static DEVICE_ATTR_RO(boardversion);
f931551b 608
508a523f 609static ssize_t localbus_info_show(struct device *device,
f931551b
RC
610 struct device_attribute *attr, char *buf)
611{
612 struct qib_ibdev *dev =
54747231 613 rdma_device_to_drv_device(device, struct qib_ibdev, rdi.ibdev);
f931551b
RC
614 struct qib_devdata *dd = dd_from_dev(dev);
615
616 /* The string printed here is already newline-terminated. */
617 return scnprintf(buf, PAGE_SIZE, "%s", dd->lbus_info);
618}
508a523f 619static DEVICE_ATTR_RO(localbus_info);
f931551b 620
508a523f 621static ssize_t nctxts_show(struct device *device,
f931551b
RC
622 struct device_attribute *attr, char *buf)
623{
624 struct qib_ibdev *dev =
54747231 625 rdma_device_to_drv_device(device, struct qib_ibdev, rdi.ibdev);
f931551b
RC
626 struct qib_devdata *dd = dd_from_dev(dev);
627
628 /* Return the number of user ports (contexts) available. */
6ceaadee
MH
629 /* The calculation below deals with a special case where
630 * cfgctxts is set to 1 on a single-port board. */
631 return scnprintf(buf, PAGE_SIZE, "%u\n",
632 (dd->first_user_ctxt > dd->cfgctxts) ? 0 :
633 (dd->cfgctxts - dd->first_user_ctxt));
f931551b 634}
508a523f 635static DEVICE_ATTR_RO(nctxts);
f931551b 636
508a523f
PP
637static ssize_t nfreectxts_show(struct device *device,
638 struct device_attribute *attr, char *buf)
2df4f757
RV
639{
640 struct qib_ibdev *dev =
54747231 641 rdma_device_to_drv_device(device, struct qib_ibdev, rdi.ibdev);
2df4f757
RV
642 struct qib_devdata *dd = dd_from_dev(dev);
643
644 /* Return the number of free user ports (contexts) available. */
53ab1c64 645 return scnprintf(buf, PAGE_SIZE, "%u\n", dd->freectxts);
2df4f757 646}
508a523f 647static DEVICE_ATTR_RO(nfreectxts);
2df4f757 648
508a523f 649static ssize_t serial_show(struct device *device,
f931551b
RC
650 struct device_attribute *attr, char *buf)
651{
652 struct qib_ibdev *dev =
54747231 653 rdma_device_to_drv_device(device, struct qib_ibdev, rdi.ibdev);
f931551b
RC
654 struct qib_devdata *dd = dd_from_dev(dev);
655
041af0bb
MM
656 buf[sizeof(dd->serial)] = '\0';
657 memcpy(buf, dd->serial, sizeof(dd->serial));
f931551b
RC
658 strcat(buf, "\n");
659 return strlen(buf);
660}
508a523f 661static DEVICE_ATTR_RO(serial);
f931551b 662
508a523f 663static ssize_t chip_reset_store(struct device *device,
f931551b
RC
664 struct device_attribute *attr, const char *buf,
665 size_t count)
666{
667 struct qib_ibdev *dev =
54747231 668 rdma_device_to_drv_device(device, struct qib_ibdev, rdi.ibdev);
f931551b
RC
669 struct qib_devdata *dd = dd_from_dev(dev);
670 int ret;
671
672 if (count < 5 || memcmp(buf, "reset", 5) || !dd->diag_client) {
673 ret = -EINVAL;
674 goto bail;
675 }
676
677 ret = qib_reset_device(dd->unit);
678bail:
679 return ret < 0 ? ret : count;
680}
508a523f 681static DEVICE_ATTR_WO(chip_reset);
f931551b 682
f931551b
RC
683/*
684 * Dump tempsense regs. in decimal, to ease shell-scripts.
685 */
508a523f 686static ssize_t tempsense_show(struct device *device,
f931551b
RC
687 struct device_attribute *attr, char *buf)
688{
689 struct qib_ibdev *dev =
54747231 690 rdma_device_to_drv_device(device, struct qib_ibdev, rdi.ibdev);
f931551b
RC
691 struct qib_devdata *dd = dd_from_dev(dev);
692 int ret;
693 int idx;
694 u8 regvals[8];
695
696 ret = -ENXIO;
697 for (idx = 0; idx < 8; ++idx) {
698 if (idx == 6)
699 continue;
700 ret = dd->f_tempsense_rd(dd, idx);
701 if (ret < 0)
702 break;
703 regvals[idx] = ret;
704 }
705 if (idx == 8)
706 ret = scnprintf(buf, PAGE_SIZE, "%d %d %02X %02X %d %d\n",
707 *(signed char *)(regvals),
708 *(signed char *)(regvals + 1),
709 regvals[2], regvals[3],
710 *(signed char *)(regvals + 5),
711 *(signed char *)(regvals + 7));
712 return ret;
713}
508a523f 714static DEVICE_ATTR_RO(tempsense);
f931551b
RC
715
716/*
717 * end of per-unit (or driver, in some cases, but replicated
718 * per unit) functions
719 */
720
721/* start of per-unit file structures and support code */
508a523f
PP
722static struct attribute *qib_attributes[] = {
723 &dev_attr_hw_rev.attr,
724 &dev_attr_hca_type.attr,
725 &dev_attr_board_id.attr,
726 &dev_attr_version.attr,
727 &dev_attr_nctxts.attr,
728 &dev_attr_nfreectxts.attr,
729 &dev_attr_serial.attr,
730 &dev_attr_boardversion.attr,
731 &dev_attr_tempsense.attr,
732 &dev_attr_localbus_info.attr,
733 &dev_attr_chip_reset.attr,
734 NULL,
735};
736
737const struct attribute_group qib_attr_group = {
738 .attrs = qib_attributes,
f931551b
RC
739};
740
741int qib_create_port_files(struct ib_device *ibdev, u8 port_num,
742 struct kobject *kobj)
743{
744 struct qib_pportdata *ppd;
745 struct qib_devdata *dd = dd_from_ibdev(ibdev);
746 int ret;
747
748 if (!port_num || port_num > dd->num_pports) {
7fac3301
MM
749 qib_dev_err(dd,
750 "Skipping infiniband class with invalid port %u\n",
751 port_num);
f931551b
RC
752 ret = -ENODEV;
753 goto bail;
754 }
755 ppd = &dd->pport[port_num - 1];
756
757 ret = kobject_init_and_add(&ppd->pport_kobj, &qib_port_ktype, kobj,
758 "linkcontrol");
759 if (ret) {
7fac3301
MM
760 qib_dev_err(dd,
761 "Skipping linkcontrol sysfs info, (err %d) port %u\n",
762 ret, port_num);
f931551b
RC
763 goto bail;
764 }
765 kobject_uevent(&ppd->pport_kobj, KOBJ_ADD);
766
767 ret = kobject_init_and_add(&ppd->sl2vl_kobj, &qib_sl2vl_ktype, kobj,
768 "sl2vl");
769 if (ret) {
7fac3301
MM
770 qib_dev_err(dd,
771 "Skipping sl2vl sysfs info, (err %d) port %u\n",
772 ret, port_num);
36a8f01c 773 goto bail_link;
f931551b
RC
774 }
775 kobject_uevent(&ppd->sl2vl_kobj, KOBJ_ADD);
776
777 ret = kobject_init_and_add(&ppd->diagc_kobj, &qib_diagc_ktype, kobj,
778 "diag_counters");
779 if (ret) {
7fac3301
MM
780 qib_dev_err(dd,
781 "Skipping diag_counters sysfs info, (err %d) port %u\n",
782 ret, port_num);
36a8f01c 783 goto bail_sl;
f931551b
RC
784 }
785 kobject_uevent(&ppd->diagc_kobj, KOBJ_ADD);
786
36a8f01c
MM
787 if (!qib_cc_table_size || !ppd->congestion_entries_shadow)
788 return 0;
789
790 ret = kobject_init_and_add(&ppd->pport_cc_kobj, &qib_port_cc_ktype,
791 kobj, "CCMgtA");
792 if (ret) {
793 qib_dev_err(dd,
794 "Skipping Congestion Control sysfs info, (err %d) port %u\n",
795 ret, port_num);
796 goto bail_diagc;
797 }
798
799 kobject_uevent(&ppd->pport_cc_kobj, KOBJ_ADD);
800
801 ret = sysfs_create_bin_file(&ppd->pport_cc_kobj,
802 &cc_setting_bin_attr);
803 if (ret) {
804 qib_dev_err(dd,
805 "Skipping Congestion Control setting sysfs info, (err %d) port %u\n",
806 ret, port_num);
807 goto bail_cc;
808 }
809
810 ret = sysfs_create_bin_file(&ppd->pport_cc_kobj,
811 &cc_table_bin_attr);
812 if (ret) {
813 qib_dev_err(dd,
814 "Skipping Congestion Control table sysfs info, (err %d) port %u\n",
815 ret, port_num);
816 goto bail_cc_entry_bin;
817 }
818
819 qib_devinfo(dd->pcidev,
820 "IB%u: Congestion Control Agent enabled for port %d\n",
821 dd->unit, port_num);
822
f931551b
RC
823 return 0;
824
36a8f01c
MM
825bail_cc_entry_bin:
826 sysfs_remove_bin_file(&ppd->pport_cc_kobj, &cc_setting_bin_attr);
827bail_cc:
828 kobject_put(&ppd->pport_cc_kobj);
f931551b 829bail_diagc:
36a8f01c 830 kobject_put(&ppd->diagc_kobj);
f931551b 831bail_sl:
36a8f01c
MM
832 kobject_put(&ppd->sl2vl_kobj);
833bail_link:
f931551b
RC
834 kobject_put(&ppd->pport_kobj);
835bail:
836 return ret;
837}
838
f931551b
RC
839/*
840 * Unregister and remove our files in /sys/class/infiniband.
841 */
842void qib_verbs_unregister_sysfs(struct qib_devdata *dd)
843{
844 struct qib_pportdata *ppd;
845 int i;
846
847 for (i = 0; i < dd->num_pports; i++) {
848 ppd = &dd->pport[i];
36a8f01c
MM
849 if (qib_cc_table_size &&
850 ppd->congestion_entries_shadow) {
851 sysfs_remove_bin_file(&ppd->pport_cc_kobj,
852 &cc_setting_bin_attr);
853 sysfs_remove_bin_file(&ppd->pport_cc_kobj,
854 &cc_table_bin_attr);
855 kobject_put(&ppd->pport_cc_kobj);
856 }
f931551b 857 kobject_put(&ppd->sl2vl_kobj);
36a8f01c 858 kobject_put(&ppd->pport_kobj);
f931551b
RC
859 }
860}