]> git.proxmox.com Git - mirror_ubuntu-bionic-kernel.git/blame_incremental - drivers/net/ethernet/mellanox/mlx5/core/debugfs.c
UBUNTU: Ubuntu-4.15.0-96.97
[mirror_ubuntu-bionic-kernel.git] / drivers / net / ethernet / mellanox / mlx5 / core / debugfs.c
... / ...
CommitLineData
1/*
2 * Copyright (c) 2013-2015, Mellanox Technologies. All rights reserved.
3 *
4 * This software is available to you under a choice of one of two
5 * licenses. You may choose to be licensed under the terms of the GNU
6 * General Public License (GPL) Version 2, available from the file
7 * COPYING in the main directory of this source tree, or the
8 * OpenIB.org BSD license below:
9 *
10 * Redistribution and use in source and binary forms, with or
11 * without modification, are permitted provided that the following
12 * conditions are met:
13 *
14 * - Redistributions of source code must retain the above
15 * copyright notice, this list of conditions and the following
16 * disclaimer.
17 *
18 * - Redistributions in binary form must reproduce the above
19 * copyright notice, this list of conditions and the following
20 * disclaimer in the documentation and/or other materials
21 * provided with the distribution.
22 *
23 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
24 * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
25 * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
26 * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
27 * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
28 * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
29 * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
30 * SOFTWARE.
31 */
32
33#include <linux/module.h>
34#include <linux/debugfs.h>
35#include <linux/mlx5/qp.h>
36#include <linux/mlx5/cq.h>
37#include <linux/mlx5/driver.h>
38#include "mlx5_core.h"
39
40enum {
41 QP_PID,
42 QP_STATE,
43 QP_XPORT,
44 QP_MTU,
45 QP_N_RECV,
46 QP_RECV_SZ,
47 QP_N_SEND,
48 QP_LOG_PG_SZ,
49 QP_RQPN,
50};
51
52static char *qp_fields[] = {
53 [QP_PID] = "pid",
54 [QP_STATE] = "state",
55 [QP_XPORT] = "transport",
56 [QP_MTU] = "mtu",
57 [QP_N_RECV] = "num_recv",
58 [QP_RECV_SZ] = "rcv_wqe_sz",
59 [QP_N_SEND] = "num_send",
60 [QP_LOG_PG_SZ] = "log2_page_sz",
61 [QP_RQPN] = "remote_qpn",
62};
63
64enum {
65 EQ_NUM_EQES,
66 EQ_INTR,
67 EQ_LOG_PG_SZ,
68};
69
70static char *eq_fields[] = {
71 [EQ_NUM_EQES] = "num_eqes",
72 [EQ_INTR] = "intr",
73 [EQ_LOG_PG_SZ] = "log_page_size",
74};
75
76enum {
77 CQ_PID,
78 CQ_NUM_CQES,
79 CQ_LOG_PG_SZ,
80};
81
82static char *cq_fields[] = {
83 [CQ_PID] = "pid",
84 [CQ_NUM_CQES] = "num_cqes",
85 [CQ_LOG_PG_SZ] = "log_page_size",
86};
87
88struct dentry *mlx5_debugfs_root;
89EXPORT_SYMBOL(mlx5_debugfs_root);
90
91void mlx5_register_debugfs(void)
92{
93 mlx5_debugfs_root = debugfs_create_dir("mlx5", NULL);
94 if (IS_ERR_OR_NULL(mlx5_debugfs_root))
95 mlx5_debugfs_root = NULL;
96}
97
98void mlx5_unregister_debugfs(void)
99{
100 debugfs_remove(mlx5_debugfs_root);
101}
102
103int mlx5_qp_debugfs_init(struct mlx5_core_dev *dev)
104{
105 if (!mlx5_debugfs_root)
106 return 0;
107
108 atomic_set(&dev->num_qps, 0);
109
110 dev->priv.qp_debugfs = debugfs_create_dir("QPs", dev->priv.dbg_root);
111 if (!dev->priv.qp_debugfs)
112 return -ENOMEM;
113
114 return 0;
115}
116
117void mlx5_qp_debugfs_cleanup(struct mlx5_core_dev *dev)
118{
119 if (!mlx5_debugfs_root)
120 return;
121
122 debugfs_remove_recursive(dev->priv.qp_debugfs);
123}
124
125int mlx5_eq_debugfs_init(struct mlx5_core_dev *dev)
126{
127 if (!mlx5_debugfs_root)
128 return 0;
129
130 dev->priv.eq_debugfs = debugfs_create_dir("EQs", dev->priv.dbg_root);
131 if (!dev->priv.eq_debugfs)
132 return -ENOMEM;
133
134 return 0;
135}
136
137void mlx5_eq_debugfs_cleanup(struct mlx5_core_dev *dev)
138{
139 if (!mlx5_debugfs_root)
140 return;
141
142 debugfs_remove_recursive(dev->priv.eq_debugfs);
143}
144
145static ssize_t average_read(struct file *filp, char __user *buf, size_t count,
146 loff_t *pos)
147{
148 struct mlx5_cmd_stats *stats;
149 u64 field = 0;
150 int ret;
151 char tbuf[22];
152
153 if (*pos)
154 return 0;
155
156 stats = filp->private_data;
157 spin_lock_irq(&stats->lock);
158 if (stats->n)
159 field = div64_u64(stats->sum, stats->n);
160 spin_unlock_irq(&stats->lock);
161 ret = snprintf(tbuf, sizeof(tbuf), "%llu\n", field);
162 if (ret > 0) {
163 if (copy_to_user(buf, tbuf, ret))
164 return -EFAULT;
165 }
166
167 *pos += ret;
168 return ret;
169}
170
171static ssize_t average_write(struct file *filp, const char __user *buf,
172 size_t count, loff_t *pos)
173{
174 struct mlx5_cmd_stats *stats;
175
176 stats = filp->private_data;
177 spin_lock_irq(&stats->lock);
178 stats->sum = 0;
179 stats->n = 0;
180 spin_unlock_irq(&stats->lock);
181
182 *pos += count;
183
184 return count;
185}
186
187static const struct file_operations stats_fops = {
188 .owner = THIS_MODULE,
189 .open = simple_open,
190 .read = average_read,
191 .write = average_write,
192};
193
194int mlx5_cmdif_debugfs_init(struct mlx5_core_dev *dev)
195{
196 struct mlx5_cmd_stats *stats;
197 struct dentry **cmd;
198 const char *namep;
199 int err;
200 int i;
201
202 if (!mlx5_debugfs_root)
203 return 0;
204
205 cmd = &dev->priv.cmdif_debugfs;
206 *cmd = debugfs_create_dir("commands", dev->priv.dbg_root);
207 if (!*cmd)
208 return -ENOMEM;
209
210 for (i = 0; i < ARRAY_SIZE(dev->cmd.stats); i++) {
211 stats = &dev->cmd.stats[i];
212 namep = mlx5_command_str(i);
213 if (strcmp(namep, "unknown command opcode")) {
214 stats->root = debugfs_create_dir(namep, *cmd);
215 if (!stats->root) {
216 mlx5_core_warn(dev, "failed adding command %d\n",
217 i);
218 err = -ENOMEM;
219 goto out;
220 }
221
222 stats->avg = debugfs_create_file("average", 0400,
223 stats->root, stats,
224 &stats_fops);
225 if (!stats->avg) {
226 mlx5_core_warn(dev, "failed creating debugfs file\n");
227 err = -ENOMEM;
228 goto out;
229 }
230
231 stats->count = debugfs_create_u64("n", 0400,
232 stats->root,
233 &stats->n);
234 if (!stats->count) {
235 mlx5_core_warn(dev, "failed creating debugfs file\n");
236 err = -ENOMEM;
237 goto out;
238 }
239 }
240 }
241
242 return 0;
243out:
244 debugfs_remove_recursive(dev->priv.cmdif_debugfs);
245 return err;
246}
247
248void mlx5_cmdif_debugfs_cleanup(struct mlx5_core_dev *dev)
249{
250 if (!mlx5_debugfs_root)
251 return;
252
253 debugfs_remove_recursive(dev->priv.cmdif_debugfs);
254}
255
256int mlx5_cq_debugfs_init(struct mlx5_core_dev *dev)
257{
258 if (!mlx5_debugfs_root)
259 return 0;
260
261 dev->priv.cq_debugfs = debugfs_create_dir("CQs", dev->priv.dbg_root);
262 if (!dev->priv.cq_debugfs)
263 return -ENOMEM;
264
265 return 0;
266}
267
268void mlx5_cq_debugfs_cleanup(struct mlx5_core_dev *dev)
269{
270 if (!mlx5_debugfs_root)
271 return;
272
273 debugfs_remove_recursive(dev->priv.cq_debugfs);
274}
275
276static u64 qp_read_field(struct mlx5_core_dev *dev, struct mlx5_core_qp *qp,
277 int index, int *is_str)
278{
279 int outlen = MLX5_ST_SZ_BYTES(query_qp_out);
280 struct mlx5_qp_context *ctx;
281 u64 param = 0;
282 u32 *out;
283 int err;
284 int no_sq;
285
286 out = kzalloc(outlen, GFP_KERNEL);
287 if (!out)
288 return param;
289
290 err = mlx5_core_qp_query(dev, qp, out, outlen);
291 if (err) {
292 mlx5_core_warn(dev, "failed to query qp err=%d\n", err);
293 goto out;
294 }
295
296 *is_str = 0;
297
298 /* FIXME: use MLX5_GET rather than mlx5_qp_context manual struct */
299 ctx = (struct mlx5_qp_context *)MLX5_ADDR_OF(query_qp_out, out, qpc);
300
301 switch (index) {
302 case QP_PID:
303 param = qp->pid;
304 break;
305 case QP_STATE:
306 param = (unsigned long)mlx5_qp_state_str(be32_to_cpu(ctx->flags) >> 28);
307 *is_str = 1;
308 break;
309 case QP_XPORT:
310 param = (unsigned long)mlx5_qp_type_str((be32_to_cpu(ctx->flags) >> 16) & 0xff);
311 *is_str = 1;
312 break;
313 case QP_MTU:
314 switch (ctx->mtu_msgmax >> 5) {
315 case IB_MTU_256:
316 param = 256;
317 break;
318 case IB_MTU_512:
319 param = 512;
320 break;
321 case IB_MTU_1024:
322 param = 1024;
323 break;
324 case IB_MTU_2048:
325 param = 2048;
326 break;
327 case IB_MTU_4096:
328 param = 4096;
329 break;
330 default:
331 param = 0;
332 }
333 break;
334 case QP_N_RECV:
335 param = 1 << ((ctx->rq_size_stride >> 3) & 0xf);
336 break;
337 case QP_RECV_SZ:
338 param = 1 << ((ctx->rq_size_stride & 7) + 4);
339 break;
340 case QP_N_SEND:
341 no_sq = be16_to_cpu(ctx->sq_crq_size) >> 15;
342 if (!no_sq)
343 param = 1 << (be16_to_cpu(ctx->sq_crq_size) >> 11);
344 else
345 param = 0;
346 break;
347 case QP_LOG_PG_SZ:
348 param = (be32_to_cpu(ctx->log_pg_sz_remote_qpn) >> 24) & 0x1f;
349 param += 12;
350 break;
351 case QP_RQPN:
352 param = be32_to_cpu(ctx->log_pg_sz_remote_qpn) & 0xffffff;
353 break;
354 }
355
356out:
357 kfree(out);
358 return param;
359}
360
361static u64 eq_read_field(struct mlx5_core_dev *dev, struct mlx5_eq *eq,
362 int index)
363{
364 int outlen = MLX5_ST_SZ_BYTES(query_eq_out);
365 u64 param = 0;
366 void *ctx;
367 u32 *out;
368 int err;
369
370 out = kzalloc(outlen, GFP_KERNEL);
371 if (!out)
372 return param;
373
374 err = mlx5_core_eq_query(dev, eq, out, outlen);
375 if (err) {
376 mlx5_core_warn(dev, "failed to query eq\n");
377 goto out;
378 }
379 ctx = MLX5_ADDR_OF(query_eq_out, out, eq_context_entry);
380
381 switch (index) {
382 case EQ_NUM_EQES:
383 param = 1 << MLX5_GET(eqc, ctx, log_eq_size);
384 break;
385 case EQ_INTR:
386 param = MLX5_GET(eqc, ctx, intr);
387 break;
388 case EQ_LOG_PG_SZ:
389 param = MLX5_GET(eqc, ctx, log_page_size) + 12;
390 break;
391 }
392
393out:
394 kfree(out);
395 return param;
396}
397
398static u64 cq_read_field(struct mlx5_core_dev *dev, struct mlx5_core_cq *cq,
399 int index)
400{
401 int outlen = MLX5_ST_SZ_BYTES(query_cq_out);
402 u64 param = 0;
403 void *ctx;
404 u32 *out;
405 int err;
406
407 out = kvzalloc(outlen, GFP_KERNEL);
408 if (!out)
409 return param;
410
411 err = mlx5_core_query_cq(dev, cq, out, outlen);
412 if (err) {
413 mlx5_core_warn(dev, "failed to query cq\n");
414 goto out;
415 }
416 ctx = MLX5_ADDR_OF(query_cq_out, out, cq_context);
417
418 switch (index) {
419 case CQ_PID:
420 param = cq->pid;
421 break;
422 case CQ_NUM_CQES:
423 param = 1 << MLX5_GET(cqc, ctx, log_cq_size);
424 break;
425 case CQ_LOG_PG_SZ:
426 param = MLX5_GET(cqc, ctx, log_page_size);
427 break;
428 }
429
430out:
431 kvfree(out);
432 return param;
433}
434
435static ssize_t dbg_read(struct file *filp, char __user *buf, size_t count,
436 loff_t *pos)
437{
438 struct mlx5_field_desc *desc;
439 struct mlx5_rsc_debug *d;
440 char tbuf[18];
441 int is_str = 0;
442 u64 field;
443 int ret;
444
445 if (*pos)
446 return 0;
447
448 desc = filp->private_data;
449 d = (void *)(desc - desc->i) - sizeof(*d);
450 switch (d->type) {
451 case MLX5_DBG_RSC_QP:
452 field = qp_read_field(d->dev, d->object, desc->i, &is_str);
453 break;
454
455 case MLX5_DBG_RSC_EQ:
456 field = eq_read_field(d->dev, d->object, desc->i);
457 break;
458
459 case MLX5_DBG_RSC_CQ:
460 field = cq_read_field(d->dev, d->object, desc->i);
461 break;
462
463 default:
464 mlx5_core_warn(d->dev, "invalid resource type %d\n", d->type);
465 return -EINVAL;
466 }
467
468 if (is_str)
469 ret = snprintf(tbuf, sizeof(tbuf), "%s\n", (const char *)(unsigned long)field);
470 else
471 ret = snprintf(tbuf, sizeof(tbuf), "0x%llx\n", field);
472
473 if (ret > 0) {
474 if (copy_to_user(buf, tbuf, ret))
475 return -EFAULT;
476 }
477
478 *pos += ret;
479 return ret;
480}
481
482static const struct file_operations fops = {
483 .owner = THIS_MODULE,
484 .open = simple_open,
485 .read = dbg_read,
486};
487
488static int add_res_tree(struct mlx5_core_dev *dev, enum dbg_rsc_type type,
489 struct dentry *root, struct mlx5_rsc_debug **dbg,
490 int rsn, char **field, int nfile, void *data)
491{
492 struct mlx5_rsc_debug *d;
493 char resn[32];
494 int err;
495 int i;
496
497 d = kzalloc(sizeof(*d) + nfile * sizeof(d->fields[0]), GFP_KERNEL);
498 if (!d)
499 return -ENOMEM;
500
501 d->dev = dev;
502 d->object = data;
503 d->type = type;
504 sprintf(resn, "0x%x", rsn);
505 d->root = debugfs_create_dir(resn, root);
506 if (!d->root) {
507 err = -ENOMEM;
508 goto out_free;
509 }
510
511 for (i = 0; i < nfile; i++) {
512 d->fields[i].i = i;
513 d->fields[i].dent = debugfs_create_file(field[i], 0400,
514 d->root, &d->fields[i],
515 &fops);
516 if (!d->fields[i].dent) {
517 err = -ENOMEM;
518 goto out_rem;
519 }
520 }
521 *dbg = d;
522
523 return 0;
524out_rem:
525 debugfs_remove_recursive(d->root);
526
527out_free:
528 kfree(d);
529 return err;
530}
531
532static void rem_res_tree(struct mlx5_rsc_debug *d)
533{
534 debugfs_remove_recursive(d->root);
535 kfree(d);
536}
537
538int mlx5_debug_qp_add(struct mlx5_core_dev *dev, struct mlx5_core_qp *qp)
539{
540 int err;
541
542 if (!mlx5_debugfs_root)
543 return 0;
544
545 err = add_res_tree(dev, MLX5_DBG_RSC_QP, dev->priv.qp_debugfs,
546 &qp->dbg, qp->qpn, qp_fields,
547 ARRAY_SIZE(qp_fields), qp);
548 if (err)
549 qp->dbg = NULL;
550
551 return err;
552}
553
554void mlx5_debug_qp_remove(struct mlx5_core_dev *dev, struct mlx5_core_qp *qp)
555{
556 if (!mlx5_debugfs_root)
557 return;
558
559 if (qp->dbg)
560 rem_res_tree(qp->dbg);
561}
562
563int mlx5_debug_eq_add(struct mlx5_core_dev *dev, struct mlx5_eq *eq)
564{
565 int err;
566
567 if (!mlx5_debugfs_root)
568 return 0;
569
570 err = add_res_tree(dev, MLX5_DBG_RSC_EQ, dev->priv.eq_debugfs,
571 &eq->dbg, eq->eqn, eq_fields,
572 ARRAY_SIZE(eq_fields), eq);
573 if (err)
574 eq->dbg = NULL;
575
576 return err;
577}
578
579void mlx5_debug_eq_remove(struct mlx5_core_dev *dev, struct mlx5_eq *eq)
580{
581 if (!mlx5_debugfs_root)
582 return;
583
584 if (eq->dbg)
585 rem_res_tree(eq->dbg);
586}
587
588int mlx5_debug_cq_add(struct mlx5_core_dev *dev, struct mlx5_core_cq *cq)
589{
590 int err;
591
592 if (!mlx5_debugfs_root)
593 return 0;
594
595 err = add_res_tree(dev, MLX5_DBG_RSC_CQ, dev->priv.cq_debugfs,
596 &cq->dbg, cq->cqn, cq_fields,
597 ARRAY_SIZE(cq_fields), cq);
598 if (err)
599 cq->dbg = NULL;
600
601 return err;
602}
603
604void mlx5_debug_cq_remove(struct mlx5_core_dev *dev, struct mlx5_core_cq *cq)
605{
606 if (!mlx5_debugfs_root)
607 return;
608
609 if (cq->dbg)
610 rem_res_tree(cq->dbg);
611}