]> git.proxmox.com Git - mirror_ubuntu-bionic-kernel.git/blob - drivers/net/ethernet/mellanox/mlx5/core/debugfs.c
{net,IB}/mlx5: QP/XRCD commands via mlx5 ifc
[mirror_ubuntu-bionic-kernel.git] / drivers / net / ethernet / mellanox / mlx5 / core / debugfs.c
1 /*
2 * Copyright (c) 2013-2015, Mellanox Technologies. All rights reserved.
3 *
4 * This software is available to you under a choice of one of two
5 * licenses. You may choose to be licensed under the terms of the GNU
6 * General Public License (GPL) Version 2, available from the file
7 * COPYING in the main directory of this source tree, or the
8 * OpenIB.org BSD license below:
9 *
10 * Redistribution and use in source and binary forms, with or
11 * without modification, are permitted provided that the following
12 * conditions are met:
13 *
14 * - Redistributions of source code must retain the above
15 * copyright notice, this list of conditions and the following
16 * disclaimer.
17 *
18 * - Redistributions in binary form must reproduce the above
19 * copyright notice, this list of conditions and the following
20 * disclaimer in the documentation and/or other materials
21 * provided with the distribution.
22 *
23 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
24 * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
25 * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
26 * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
27 * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
28 * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
29 * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
30 * SOFTWARE.
31 */
32
33 #include <linux/module.h>
34 #include <linux/debugfs.h>
35 #include <linux/mlx5/qp.h>
36 #include <linux/mlx5/cq.h>
37 #include <linux/mlx5/driver.h>
38 #include "mlx5_core.h"
39
40 enum {
41 QP_PID,
42 QP_STATE,
43 QP_XPORT,
44 QP_MTU,
45 QP_N_RECV,
46 QP_RECV_SZ,
47 QP_N_SEND,
48 QP_LOG_PG_SZ,
49 QP_RQPN,
50 };
51
52 static char *qp_fields[] = {
53 [QP_PID] = "pid",
54 [QP_STATE] = "state",
55 [QP_XPORT] = "transport",
56 [QP_MTU] = "mtu",
57 [QP_N_RECV] = "num_recv",
58 [QP_RECV_SZ] = "rcv_wqe_sz",
59 [QP_N_SEND] = "num_send",
60 [QP_LOG_PG_SZ] = "log2_page_sz",
61 [QP_RQPN] = "remote_qpn",
62 };
63
64 enum {
65 EQ_NUM_EQES,
66 EQ_INTR,
67 EQ_LOG_PG_SZ,
68 };
69
70 static char *eq_fields[] = {
71 [EQ_NUM_EQES] = "num_eqes",
72 [EQ_INTR] = "intr",
73 [EQ_LOG_PG_SZ] = "log_page_size",
74 };
75
76 enum {
77 CQ_PID,
78 CQ_NUM_CQES,
79 CQ_LOG_PG_SZ,
80 };
81
82 static char *cq_fields[] = {
83 [CQ_PID] = "pid",
84 [CQ_NUM_CQES] = "num_cqes",
85 [CQ_LOG_PG_SZ] = "log_page_size",
86 };
87
88 struct dentry *mlx5_debugfs_root;
89 EXPORT_SYMBOL(mlx5_debugfs_root);
90
91 void mlx5_register_debugfs(void)
92 {
93 mlx5_debugfs_root = debugfs_create_dir("mlx5", NULL);
94 if (IS_ERR_OR_NULL(mlx5_debugfs_root))
95 mlx5_debugfs_root = NULL;
96 }
97
98 void mlx5_unregister_debugfs(void)
99 {
100 debugfs_remove(mlx5_debugfs_root);
101 }
102
103 int mlx5_qp_debugfs_init(struct mlx5_core_dev *dev)
104 {
105 if (!mlx5_debugfs_root)
106 return 0;
107
108 atomic_set(&dev->num_qps, 0);
109
110 dev->priv.qp_debugfs = debugfs_create_dir("QPs", dev->priv.dbg_root);
111 if (!dev->priv.qp_debugfs)
112 return -ENOMEM;
113
114 return 0;
115 }
116
117 void mlx5_qp_debugfs_cleanup(struct mlx5_core_dev *dev)
118 {
119 if (!mlx5_debugfs_root)
120 return;
121
122 debugfs_remove_recursive(dev->priv.qp_debugfs);
123 }
124
125 int mlx5_eq_debugfs_init(struct mlx5_core_dev *dev)
126 {
127 if (!mlx5_debugfs_root)
128 return 0;
129
130 dev->priv.eq_debugfs = debugfs_create_dir("EQs", dev->priv.dbg_root);
131 if (!dev->priv.eq_debugfs)
132 return -ENOMEM;
133
134 return 0;
135 }
136
137 void mlx5_eq_debugfs_cleanup(struct mlx5_core_dev *dev)
138 {
139 if (!mlx5_debugfs_root)
140 return;
141
142 debugfs_remove_recursive(dev->priv.eq_debugfs);
143 }
144
145 static ssize_t average_read(struct file *filp, char __user *buf, size_t count,
146 loff_t *pos)
147 {
148 struct mlx5_cmd_stats *stats;
149 u64 field = 0;
150 int ret;
151 char tbuf[22];
152
153 if (*pos)
154 return 0;
155
156 stats = filp->private_data;
157 spin_lock_irq(&stats->lock);
158 if (stats->n)
159 field = div64_u64(stats->sum, stats->n);
160 spin_unlock_irq(&stats->lock);
161 ret = snprintf(tbuf, sizeof(tbuf), "%llu\n", field);
162 if (ret > 0) {
163 if (copy_to_user(buf, tbuf, ret))
164 return -EFAULT;
165 }
166
167 *pos += ret;
168 return ret;
169 }
170
171
172 static ssize_t average_write(struct file *filp, const char __user *buf,
173 size_t count, loff_t *pos)
174 {
175 struct mlx5_cmd_stats *stats;
176
177 stats = filp->private_data;
178 spin_lock_irq(&stats->lock);
179 stats->sum = 0;
180 stats->n = 0;
181 spin_unlock_irq(&stats->lock);
182
183 *pos += count;
184
185 return count;
186 }
187
188 static const struct file_operations stats_fops = {
189 .owner = THIS_MODULE,
190 .open = simple_open,
191 .read = average_read,
192 .write = average_write,
193 };
194
195 int mlx5_cmdif_debugfs_init(struct mlx5_core_dev *dev)
196 {
197 struct mlx5_cmd_stats *stats;
198 struct dentry **cmd;
199 const char *namep;
200 int err;
201 int i;
202
203 if (!mlx5_debugfs_root)
204 return 0;
205
206 cmd = &dev->priv.cmdif_debugfs;
207 *cmd = debugfs_create_dir("commands", dev->priv.dbg_root);
208 if (!*cmd)
209 return -ENOMEM;
210
211 for (i = 0; i < ARRAY_SIZE(dev->cmd.stats); i++) {
212 stats = &dev->cmd.stats[i];
213 namep = mlx5_command_str(i);
214 if (strcmp(namep, "unknown command opcode")) {
215 stats->root = debugfs_create_dir(namep, *cmd);
216 if (!stats->root) {
217 mlx5_core_warn(dev, "failed adding command %d\n",
218 i);
219 err = -ENOMEM;
220 goto out;
221 }
222
223 stats->avg = debugfs_create_file("average", 0400,
224 stats->root, stats,
225 &stats_fops);
226 if (!stats->avg) {
227 mlx5_core_warn(dev, "failed creating debugfs file\n");
228 err = -ENOMEM;
229 goto out;
230 }
231
232 stats->count = debugfs_create_u64("n", 0400,
233 stats->root,
234 &stats->n);
235 if (!stats->count) {
236 mlx5_core_warn(dev, "failed creating debugfs file\n");
237 err = -ENOMEM;
238 goto out;
239 }
240 }
241 }
242
243 return 0;
244 out:
245 debugfs_remove_recursive(dev->priv.cmdif_debugfs);
246 return err;
247 }
248
249 void mlx5_cmdif_debugfs_cleanup(struct mlx5_core_dev *dev)
250 {
251 if (!mlx5_debugfs_root)
252 return;
253
254 debugfs_remove_recursive(dev->priv.cmdif_debugfs);
255 }
256
257 int mlx5_cq_debugfs_init(struct mlx5_core_dev *dev)
258 {
259 if (!mlx5_debugfs_root)
260 return 0;
261
262 dev->priv.cq_debugfs = debugfs_create_dir("CQs", dev->priv.dbg_root);
263 if (!dev->priv.cq_debugfs)
264 return -ENOMEM;
265
266 return 0;
267 }
268
269 void mlx5_cq_debugfs_cleanup(struct mlx5_core_dev *dev)
270 {
271 if (!mlx5_debugfs_root)
272 return;
273
274 debugfs_remove_recursive(dev->priv.cq_debugfs);
275 }
276
277 static u64 qp_read_field(struct mlx5_core_dev *dev, struct mlx5_core_qp *qp,
278 int index, int *is_str)
279 {
280 int outlen = MLX5_ST_SZ_BYTES(query_qp_out);
281 struct mlx5_qp_context *ctx;
282 u64 param = 0;
283 u32 *out;
284 int err;
285 int no_sq;
286
287 out = kzalloc(outlen, GFP_KERNEL);
288 if (!out)
289 return param;
290
291 err = mlx5_core_qp_query(dev, qp, out, outlen);
292 if (err) {
293 mlx5_core_warn(dev, "failed to query qp err=%d\n", err);
294 goto out;
295 }
296
297 *is_str = 0;
298
299 /* FIXME: use MLX5_GET rather than mlx5_qp_context manual struct */
300 ctx = (struct mlx5_qp_context *)MLX5_ADDR_OF(query_qp_out, out, qpc);
301
302 switch (index) {
303 case QP_PID:
304 param = qp->pid;
305 break;
306 case QP_STATE:
307 param = (unsigned long)mlx5_qp_state_str(be32_to_cpu(ctx->flags) >> 28);
308 *is_str = 1;
309 break;
310 case QP_XPORT:
311 param = (unsigned long)mlx5_qp_type_str((be32_to_cpu(ctx->flags) >> 16) & 0xff);
312 *is_str = 1;
313 break;
314 case QP_MTU:
315 switch (ctx->mtu_msgmax >> 5) {
316 case IB_MTU_256:
317 param = 256;
318 break;
319 case IB_MTU_512:
320 param = 512;
321 break;
322 case IB_MTU_1024:
323 param = 1024;
324 break;
325 case IB_MTU_2048:
326 param = 2048;
327 break;
328 case IB_MTU_4096:
329 param = 4096;
330 break;
331 default:
332 param = 0;
333 }
334 break;
335 case QP_N_RECV:
336 param = 1 << ((ctx->rq_size_stride >> 3) & 0xf);
337 break;
338 case QP_RECV_SZ:
339 param = 1 << ((ctx->rq_size_stride & 7) + 4);
340 break;
341 case QP_N_SEND:
342 no_sq = be16_to_cpu(ctx->sq_crq_size) >> 15;
343 if (!no_sq)
344 param = 1 << (be16_to_cpu(ctx->sq_crq_size) >> 11);
345 else
346 param = 0;
347 break;
348 case QP_LOG_PG_SZ:
349 param = (be32_to_cpu(ctx->log_pg_sz_remote_qpn) >> 24) & 0x1f;
350 param += 12;
351 break;
352 case QP_RQPN:
353 param = be32_to_cpu(ctx->log_pg_sz_remote_qpn) & 0xffffff;
354 break;
355 }
356
357 out:
358 kfree(out);
359 return param;
360 }
361
362 static u64 eq_read_field(struct mlx5_core_dev *dev, struct mlx5_eq *eq,
363 int index)
364 {
365 int outlen = MLX5_ST_SZ_BYTES(query_eq_out);
366 u64 param = 0;
367 void *ctx;
368 u32 *out;
369 int err;
370
371 out = kzalloc(outlen, GFP_KERNEL);
372 if (!out)
373 return param;
374
375 err = mlx5_core_eq_query(dev, eq, out, outlen);
376 if (err) {
377 mlx5_core_warn(dev, "failed to query eq\n");
378 goto out;
379 }
380 ctx = MLX5_ADDR_OF(query_eq_out, out, eq_context_entry);
381
382 switch (index) {
383 case EQ_NUM_EQES:
384 param = 1 << MLX5_GET(eqc, ctx, log_eq_size);
385 break;
386 case EQ_INTR:
387 param = MLX5_GET(eqc, ctx, intr);
388 break;
389 case EQ_LOG_PG_SZ:
390 param = MLX5_GET(eqc, ctx, log_page_size) + 12;
391 break;
392 }
393
394 out:
395 kfree(out);
396 return param;
397 }
398
399 static u64 cq_read_field(struct mlx5_core_dev *dev, struct mlx5_core_cq *cq,
400 int index)
401 {
402 int outlen = MLX5_ST_SZ_BYTES(query_cq_out);
403 u64 param = 0;
404 void *ctx;
405 u32 *out;
406 int err;
407
408 out = mlx5_vzalloc(outlen);
409 if (!out)
410 return param;
411
412 err = mlx5_core_query_cq(dev, cq, out, outlen);
413 if (err) {
414 mlx5_core_warn(dev, "failed to query cq\n");
415 goto out;
416 }
417 ctx = MLX5_ADDR_OF(query_cq_out, out, cq_context);
418
419 switch (index) {
420 case CQ_PID:
421 param = cq->pid;
422 break;
423 case CQ_NUM_CQES:
424 param = 1 << MLX5_GET(cqc, ctx, log_cq_size);
425 break;
426 case CQ_LOG_PG_SZ:
427 param = MLX5_GET(cqc, ctx, log_page_size);
428 break;
429 }
430
431 out:
432 kvfree(out);
433 return param;
434 }
435
436 static ssize_t dbg_read(struct file *filp, char __user *buf, size_t count,
437 loff_t *pos)
438 {
439 struct mlx5_field_desc *desc;
440 struct mlx5_rsc_debug *d;
441 char tbuf[18];
442 int is_str = 0;
443 u64 field;
444 int ret;
445
446 if (*pos)
447 return 0;
448
449 desc = filp->private_data;
450 d = (void *)(desc - desc->i) - sizeof(*d);
451 switch (d->type) {
452 case MLX5_DBG_RSC_QP:
453 field = qp_read_field(d->dev, d->object, desc->i, &is_str);
454 break;
455
456 case MLX5_DBG_RSC_EQ:
457 field = eq_read_field(d->dev, d->object, desc->i);
458 break;
459
460 case MLX5_DBG_RSC_CQ:
461 field = cq_read_field(d->dev, d->object, desc->i);
462 break;
463
464 default:
465 mlx5_core_warn(d->dev, "invalid resource type %d\n", d->type);
466 return -EINVAL;
467 }
468
469
470 if (is_str)
471 ret = snprintf(tbuf, sizeof(tbuf), "%s\n", (const char *)(unsigned long)field);
472 else
473 ret = snprintf(tbuf, sizeof(tbuf), "0x%llx\n", field);
474
475 if (ret > 0) {
476 if (copy_to_user(buf, tbuf, ret))
477 return -EFAULT;
478 }
479
480 *pos += ret;
481 return ret;
482 }
483
484 static const struct file_operations fops = {
485 .owner = THIS_MODULE,
486 .open = simple_open,
487 .read = dbg_read,
488 };
489
490 static int add_res_tree(struct mlx5_core_dev *dev, enum dbg_rsc_type type,
491 struct dentry *root, struct mlx5_rsc_debug **dbg,
492 int rsn, char **field, int nfile, void *data)
493 {
494 struct mlx5_rsc_debug *d;
495 char resn[32];
496 int err;
497 int i;
498
499 d = kzalloc(sizeof(*d) + nfile * sizeof(d->fields[0]), GFP_KERNEL);
500 if (!d)
501 return -ENOMEM;
502
503 d->dev = dev;
504 d->object = data;
505 d->type = type;
506 sprintf(resn, "0x%x", rsn);
507 d->root = debugfs_create_dir(resn, root);
508 if (!d->root) {
509 err = -ENOMEM;
510 goto out_free;
511 }
512
513 for (i = 0; i < nfile; i++) {
514 d->fields[i].i = i;
515 d->fields[i].dent = debugfs_create_file(field[i], 0400,
516 d->root, &d->fields[i],
517 &fops);
518 if (!d->fields[i].dent) {
519 err = -ENOMEM;
520 goto out_rem;
521 }
522 }
523 *dbg = d;
524
525 return 0;
526 out_rem:
527 debugfs_remove_recursive(d->root);
528
529 out_free:
530 kfree(d);
531 return err;
532 }
533
534 static void rem_res_tree(struct mlx5_rsc_debug *d)
535 {
536 debugfs_remove_recursive(d->root);
537 kfree(d);
538 }
539
540 int mlx5_debug_qp_add(struct mlx5_core_dev *dev, struct mlx5_core_qp *qp)
541 {
542 int err;
543
544 if (!mlx5_debugfs_root)
545 return 0;
546
547 err = add_res_tree(dev, MLX5_DBG_RSC_QP, dev->priv.qp_debugfs,
548 &qp->dbg, qp->qpn, qp_fields,
549 ARRAY_SIZE(qp_fields), qp);
550 if (err)
551 qp->dbg = NULL;
552
553 return err;
554 }
555
556 void mlx5_debug_qp_remove(struct mlx5_core_dev *dev, struct mlx5_core_qp *qp)
557 {
558 if (!mlx5_debugfs_root)
559 return;
560
561 if (qp->dbg)
562 rem_res_tree(qp->dbg);
563 }
564
565
566 int mlx5_debug_eq_add(struct mlx5_core_dev *dev, struct mlx5_eq *eq)
567 {
568 int err;
569
570 if (!mlx5_debugfs_root)
571 return 0;
572
573 err = add_res_tree(dev, MLX5_DBG_RSC_EQ, dev->priv.eq_debugfs,
574 &eq->dbg, eq->eqn, eq_fields,
575 ARRAY_SIZE(eq_fields), eq);
576 if (err)
577 eq->dbg = NULL;
578
579 return err;
580 }
581
582 void mlx5_debug_eq_remove(struct mlx5_core_dev *dev, struct mlx5_eq *eq)
583 {
584 if (!mlx5_debugfs_root)
585 return;
586
587 if (eq->dbg)
588 rem_res_tree(eq->dbg);
589 }
590
591 int mlx5_debug_cq_add(struct mlx5_core_dev *dev, struct mlx5_core_cq *cq)
592 {
593 int err;
594
595 if (!mlx5_debugfs_root)
596 return 0;
597
598 err = add_res_tree(dev, MLX5_DBG_RSC_CQ, dev->priv.cq_debugfs,
599 &cq->dbg, cq->cqn, cq_fields,
600 ARRAY_SIZE(cq_fields), cq);
601 if (err)
602 cq->dbg = NULL;
603
604 return err;
605 }
606
607 void mlx5_debug_cq_remove(struct mlx5_core_dev *dev, struct mlx5_core_cq *cq)
608 {
609 if (!mlx5_debugfs_root)
610 return;
611
612 if (cq->dbg)
613 rem_res_tree(cq->dbg);
614 }