2 * Copyright (c) 2013, Mellanox Technologies inc. All rights reserved.
4 * This software is available to you under a choice of one of two
5 * licenses. You may choose to be licensed under the terms of the GNU
6 * General Public License (GPL) Version 2, available from the file
7 * COPYING in the main directory of this source tree, or the
8 * OpenIB.org BSD license below:
10 * Redistribution and use in source and binary forms, with or
11 * without modification, are permitted provided that the following
14 * - Redistributions of source code must retain the above
15 * copyright notice, this list of conditions and the following
18 * - Redistributions in binary form must reproduce the above
19 * copyright notice, this list of conditions and the following
20 * disclaimer in the documentation and/or other materials
21 * provided with the distribution.
23 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
24 * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
25 * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
26 * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
27 * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
28 * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
29 * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
33 #include <asm-generic/kmap_types.h>
34 #include <linux/module.h>
35 #include <linux/errno.h>
36 #include <linux/pci.h>
37 #include <linux/dma-mapping.h>
38 #include <linux/slab.h>
39 #include <linux/delay.h>
40 #include <linux/random.h>
41 #include <linux/io-mapping.h>
42 #include <linux/mlx5/driver.h>
43 #include <linux/debugfs.h>
45 #include "mlx5_core.h"
59 LONG_LIST_SIZE
= (2ULL * 1024 * 1024 * 1024 / PAGE_SIZE
) * 8 + 16 +
60 MLX5_CMD_DATA_BLOCK_SIZE
,
61 MED_LIST_SIZE
= 16 + MLX5_CMD_DATA_BLOCK_SIZE
,
65 MLX5_CMD_DELIVERY_STAT_OK
= 0x0,
66 MLX5_CMD_DELIVERY_STAT_SIGNAT_ERR
= 0x1,
67 MLX5_CMD_DELIVERY_STAT_TOK_ERR
= 0x2,
68 MLX5_CMD_DELIVERY_STAT_BAD_BLK_NUM_ERR
= 0x3,
69 MLX5_CMD_DELIVERY_STAT_OUT_PTR_ALIGN_ERR
= 0x4,
70 MLX5_CMD_DELIVERY_STAT_IN_PTR_ALIGN_ERR
= 0x5,
71 MLX5_CMD_DELIVERY_STAT_FW_ERR
= 0x6,
72 MLX5_CMD_DELIVERY_STAT_IN_LENGTH_ERR
= 0x7,
73 MLX5_CMD_DELIVERY_STAT_OUT_LENGTH_ERR
= 0x8,
74 MLX5_CMD_DELIVERY_STAT_RES_FLD_NOT_CLR_ERR
= 0x9,
75 MLX5_CMD_DELIVERY_STAT_CMD_DESCR_ERR
= 0x10,
79 MLX5_CMD_STAT_OK
= 0x0,
80 MLX5_CMD_STAT_INT_ERR
= 0x1,
81 MLX5_CMD_STAT_BAD_OP_ERR
= 0x2,
82 MLX5_CMD_STAT_BAD_PARAM_ERR
= 0x3,
83 MLX5_CMD_STAT_BAD_SYS_STATE_ERR
= 0x4,
84 MLX5_CMD_STAT_BAD_RES_ERR
= 0x5,
85 MLX5_CMD_STAT_RES_BUSY
= 0x6,
86 MLX5_CMD_STAT_LIM_ERR
= 0x8,
87 MLX5_CMD_STAT_BAD_RES_STATE_ERR
= 0x9,
88 MLX5_CMD_STAT_IX_ERR
= 0xa,
89 MLX5_CMD_STAT_NO_RES_ERR
= 0xf,
90 MLX5_CMD_STAT_BAD_INP_LEN_ERR
= 0x50,
91 MLX5_CMD_STAT_BAD_OUTP_LEN_ERR
= 0x51,
92 MLX5_CMD_STAT_BAD_QP_STATE_ERR
= 0x10,
93 MLX5_CMD_STAT_BAD_PKT_ERR
= 0x30,
94 MLX5_CMD_STAT_BAD_SIZE_OUTS_CQES_ERR
= 0x40,
97 static struct mlx5_cmd_work_ent
*alloc_cmd(struct mlx5_cmd
*cmd
,
98 struct mlx5_cmd_msg
*in
,
99 struct mlx5_cmd_msg
*out
,
100 void *uout
, int uout_size
,
102 void *context
, int page_queue
)
104 gfp_t alloc_flags
= cbk
? GFP_ATOMIC
: GFP_KERNEL
;
105 struct mlx5_cmd_work_ent
*ent
;
107 ent
= kzalloc(sizeof(*ent
), alloc_flags
);
109 return ERR_PTR(-ENOMEM
);
114 ent
->uout_size
= uout_size
;
116 ent
->context
= context
;
118 ent
->page_queue
= page_queue
;
123 static u8
alloc_token(struct mlx5_cmd
*cmd
)
127 spin_lock(&cmd
->token_lock
);
128 token
= cmd
->token
++ % 255 + 1;
129 spin_unlock(&cmd
->token_lock
);
134 static int alloc_ent(struct mlx5_cmd
*cmd
)
139 spin_lock_irqsave(&cmd
->alloc_lock
, flags
);
140 ret
= find_first_bit(&cmd
->bitmask
, cmd
->max_reg_cmds
);
141 if (ret
< cmd
->max_reg_cmds
)
142 clear_bit(ret
, &cmd
->bitmask
);
143 spin_unlock_irqrestore(&cmd
->alloc_lock
, flags
);
145 return ret
< cmd
->max_reg_cmds
? ret
: -ENOMEM
;
148 static void free_ent(struct mlx5_cmd
*cmd
, int idx
)
152 spin_lock_irqsave(&cmd
->alloc_lock
, flags
);
153 set_bit(idx
, &cmd
->bitmask
);
154 spin_unlock_irqrestore(&cmd
->alloc_lock
, flags
);
157 static struct mlx5_cmd_layout
*get_inst(struct mlx5_cmd
*cmd
, int idx
)
159 return cmd
->cmd_buf
+ (idx
<< cmd
->log_stride
);
162 static u8
xor8_buf(void *buf
, int len
)
168 for (i
= 0; i
< len
; i
++)
174 static int verify_block_sig(struct mlx5_cmd_prot_block
*block
)
176 if (xor8_buf(block
->rsvd0
, sizeof(*block
) - sizeof(block
->data
) - 1) != 0xff)
179 if (xor8_buf(block
, sizeof(*block
)) != 0xff)
185 static void calc_block_sig(struct mlx5_cmd_prot_block
*block
, u8 token
,
188 block
->token
= token
;
190 block
->ctrl_sig
= ~xor8_buf(block
->rsvd0
, sizeof(*block
) -
191 sizeof(block
->data
) - 2);
192 block
->sig
= ~xor8_buf(block
, sizeof(*block
) - 1);
196 static void calc_chain_sig(struct mlx5_cmd_msg
*msg
, u8 token
, int csum
)
198 struct mlx5_cmd_mailbox
*next
= msg
->next
;
201 calc_block_sig(next
->buf
, token
, csum
);
206 static void set_signature(struct mlx5_cmd_work_ent
*ent
, int csum
)
208 ent
->lay
->sig
= ~xor8_buf(ent
->lay
, sizeof(*ent
->lay
));
209 calc_chain_sig(ent
->in
, ent
->token
, csum
);
210 calc_chain_sig(ent
->out
, ent
->token
, csum
);
213 static void poll_timeout(struct mlx5_cmd_work_ent
*ent
)
215 unsigned long poll_end
= jiffies
+ msecs_to_jiffies(MLX5_CMD_TIMEOUT_MSEC
+ 1000);
219 own
= ent
->lay
->status_own
;
220 if (!(own
& CMD_OWNER_HW
)) {
224 usleep_range(5000, 10000);
225 } while (time_before(jiffies
, poll_end
));
227 ent
->ret
= -ETIMEDOUT
;
230 static void free_cmd(struct mlx5_cmd_work_ent
*ent
)
236 static int verify_signature(struct mlx5_cmd_work_ent
*ent
)
238 struct mlx5_cmd_mailbox
*next
= ent
->out
->next
;
242 sig
= xor8_buf(ent
->lay
, sizeof(*ent
->lay
));
247 err
= verify_block_sig(next
->buf
);
257 static void dump_buf(void *buf
, int size
, int data_only
, int offset
)
262 for (i
= 0; i
< size
; i
+= 16) {
263 pr_debug("%03x: %08x %08x %08x %08x\n", offset
, be32_to_cpu(p
[0]),
264 be32_to_cpu(p
[1]), be32_to_cpu(p
[2]),
273 const char *mlx5_command_str(int command
)
276 case MLX5_CMD_OP_QUERY_HCA_CAP
:
277 return "QUERY_HCA_CAP";
279 case MLX5_CMD_OP_SET_HCA_CAP
:
280 return "SET_HCA_CAP";
282 case MLX5_CMD_OP_QUERY_ADAPTER
:
283 return "QUERY_ADAPTER";
285 case MLX5_CMD_OP_INIT_HCA
:
288 case MLX5_CMD_OP_TEARDOWN_HCA
:
289 return "TEARDOWN_HCA";
291 case MLX5_CMD_OP_ENABLE_HCA
:
292 return "MLX5_CMD_OP_ENABLE_HCA";
294 case MLX5_CMD_OP_DISABLE_HCA
:
295 return "MLX5_CMD_OP_DISABLE_HCA";
297 case MLX5_CMD_OP_QUERY_PAGES
:
298 return "QUERY_PAGES";
300 case MLX5_CMD_OP_MANAGE_PAGES
:
301 return "MANAGE_PAGES";
303 case MLX5_CMD_OP_CREATE_MKEY
:
304 return "CREATE_MKEY";
306 case MLX5_CMD_OP_QUERY_MKEY
:
309 case MLX5_CMD_OP_DESTROY_MKEY
:
310 return "DESTROY_MKEY";
312 case MLX5_CMD_OP_QUERY_SPECIAL_CONTEXTS
:
313 return "QUERY_SPECIAL_CONTEXTS";
315 case MLX5_CMD_OP_CREATE_EQ
:
318 case MLX5_CMD_OP_DESTROY_EQ
:
321 case MLX5_CMD_OP_QUERY_EQ
:
324 case MLX5_CMD_OP_CREATE_CQ
:
327 case MLX5_CMD_OP_DESTROY_CQ
:
330 case MLX5_CMD_OP_QUERY_CQ
:
333 case MLX5_CMD_OP_MODIFY_CQ
:
336 case MLX5_CMD_OP_CREATE_QP
:
339 case MLX5_CMD_OP_DESTROY_QP
:
342 case MLX5_CMD_OP_RST2INIT_QP
:
343 return "RST2INIT_QP";
345 case MLX5_CMD_OP_INIT2RTR_QP
:
346 return "INIT2RTR_QP";
348 case MLX5_CMD_OP_RTR2RTS_QP
:
351 case MLX5_CMD_OP_RTS2RTS_QP
:
354 case MLX5_CMD_OP_SQERR2RTS_QP
:
355 return "SQERR2RTS_QP";
357 case MLX5_CMD_OP_2ERR_QP
:
360 case MLX5_CMD_OP_RTS2SQD_QP
:
363 case MLX5_CMD_OP_SQD2RTS_QP
:
366 case MLX5_CMD_OP_2RST_QP
:
369 case MLX5_CMD_OP_QUERY_QP
:
372 case MLX5_CMD_OP_CONF_SQP
:
375 case MLX5_CMD_OP_MAD_IFC
:
378 case MLX5_CMD_OP_INIT2INIT_QP
:
379 return "INIT2INIT_QP";
381 case MLX5_CMD_OP_SUSPEND_QP
:
384 case MLX5_CMD_OP_UNSUSPEND_QP
:
385 return "UNSUSPEND_QP";
387 case MLX5_CMD_OP_SQD2SQD_QP
:
390 case MLX5_CMD_OP_ALLOC_QP_COUNTER_SET
:
391 return "ALLOC_QP_COUNTER_SET";
393 case MLX5_CMD_OP_DEALLOC_QP_COUNTER_SET
:
394 return "DEALLOC_QP_COUNTER_SET";
396 case MLX5_CMD_OP_QUERY_QP_COUNTER_SET
:
397 return "QUERY_QP_COUNTER_SET";
399 case MLX5_CMD_OP_CREATE_PSV
:
402 case MLX5_CMD_OP_DESTROY_PSV
:
403 return "DESTROY_PSV";
405 case MLX5_CMD_OP_QUERY_PSV
:
408 case MLX5_CMD_OP_QUERY_SIG_RULE_TABLE
:
409 return "QUERY_SIG_RULE_TABLE";
411 case MLX5_CMD_OP_QUERY_BLOCK_SIZE_TABLE
:
412 return "QUERY_BLOCK_SIZE_TABLE";
414 case MLX5_CMD_OP_CREATE_SRQ
:
417 case MLX5_CMD_OP_DESTROY_SRQ
:
418 return "DESTROY_SRQ";
420 case MLX5_CMD_OP_QUERY_SRQ
:
423 case MLX5_CMD_OP_ARM_RQ
:
426 case MLX5_CMD_OP_RESIZE_SRQ
:
429 case MLX5_CMD_OP_ALLOC_PD
:
432 case MLX5_CMD_OP_DEALLOC_PD
:
435 case MLX5_CMD_OP_ALLOC_UAR
:
438 case MLX5_CMD_OP_DEALLOC_UAR
:
439 return "DEALLOC_UAR";
441 case MLX5_CMD_OP_ATTACH_TO_MCG
:
442 return "ATTACH_TO_MCG";
444 case MLX5_CMD_OP_DETACH_FROM_MCG
:
445 return "DETACH_FROM_MCG";
447 case MLX5_CMD_OP_ALLOC_XRCD
:
450 case MLX5_CMD_OP_DEALLOC_XRCD
:
451 return "DEALLOC_XRCD";
453 case MLX5_CMD_OP_ACCESS_REG
:
454 return "MLX5_CMD_OP_ACCESS_REG";
456 default: return "unknown command opcode";
460 static void dump_command(struct mlx5_core_dev
*dev
,
461 struct mlx5_cmd_work_ent
*ent
, int input
)
463 u16 op
= be16_to_cpu(((struct mlx5_inbox_hdr
*)(ent
->lay
->in
))->opcode
);
464 struct mlx5_cmd_msg
*msg
= input
? ent
->in
: ent
->out
;
465 struct mlx5_cmd_mailbox
*next
= msg
->next
;
470 data_only
= !!(mlx5_core_debug_mask
& (1 << MLX5_CMD_DATA
));
473 mlx5_core_dbg_mask(dev
, 1 << MLX5_CMD_DATA
,
474 "dump command data %s(0x%x) %s\n",
475 mlx5_command_str(op
), op
,
476 input
? "INPUT" : "OUTPUT");
478 mlx5_core_dbg(dev
, "dump command %s(0x%x) %s\n",
479 mlx5_command_str(op
), op
,
480 input
? "INPUT" : "OUTPUT");
484 dump_buf(ent
->lay
->in
, sizeof(ent
->lay
->in
), 1, offset
);
485 offset
+= sizeof(ent
->lay
->in
);
487 dump_buf(ent
->lay
->out
, sizeof(ent
->lay
->out
), 1, offset
);
488 offset
+= sizeof(ent
->lay
->out
);
491 dump_buf(ent
->lay
, sizeof(*ent
->lay
), 0, offset
);
492 offset
+= sizeof(*ent
->lay
);
495 while (next
&& offset
< msg
->len
) {
497 dump_len
= min_t(int, MLX5_CMD_DATA_BLOCK_SIZE
, msg
->len
- offset
);
498 dump_buf(next
->buf
, dump_len
, 1, offset
);
499 offset
+= MLX5_CMD_DATA_BLOCK_SIZE
;
501 mlx5_core_dbg(dev
, "command block:\n");
502 dump_buf(next
->buf
, sizeof(struct mlx5_cmd_prot_block
), 0, offset
);
503 offset
+= sizeof(struct mlx5_cmd_prot_block
);
512 static void cmd_work_handler(struct work_struct
*work
)
514 struct mlx5_cmd_work_ent
*ent
= container_of(work
, struct mlx5_cmd_work_ent
, work
);
515 struct mlx5_cmd
*cmd
= ent
->cmd
;
516 struct mlx5_core_dev
*dev
= container_of(cmd
, struct mlx5_core_dev
, cmd
);
517 struct mlx5_cmd_layout
*lay
;
518 struct semaphore
*sem
;
520 sem
= ent
->page_queue
? &cmd
->pages_sem
: &cmd
->sem
;
522 if (!ent
->page_queue
) {
523 ent
->idx
= alloc_ent(cmd
);
525 mlx5_core_err(dev
, "failed to allocate command entry\n");
530 ent
->idx
= cmd
->max_reg_cmds
;
533 ent
->token
= alloc_token(cmd
);
534 cmd
->ent_arr
[ent
->idx
] = ent
;
535 lay
= get_inst(cmd
, ent
->idx
);
537 memset(lay
, 0, sizeof(*lay
));
538 memcpy(lay
->in
, ent
->in
->first
.data
, sizeof(lay
->in
));
539 ent
->op
= be32_to_cpu(lay
->in
[0]) >> 16;
541 lay
->in_ptr
= cpu_to_be64(ent
->in
->next
->dma
);
542 lay
->inlen
= cpu_to_be32(ent
->in
->len
);
544 lay
->out_ptr
= cpu_to_be64(ent
->out
->next
->dma
);
545 lay
->outlen
= cpu_to_be32(ent
->out
->len
);
546 lay
->type
= MLX5_PCI_CMD_XPORT
;
547 lay
->token
= ent
->token
;
548 lay
->status_own
= CMD_OWNER_HW
;
549 set_signature(ent
, !cmd
->checksum_disabled
);
550 dump_command(dev
, ent
, 1);
551 ktime_get_ts(&ent
->ts1
);
553 /* ring doorbell after the descriptor is valid */
555 iowrite32be(1 << ent
->idx
, &dev
->iseg
->cmd_dbell
);
556 mlx5_core_dbg(dev
, "write 0x%x to command doorbell\n", 1 << ent
->idx
);
558 if (cmd
->mode
== CMD_MODE_POLLING
) {
560 /* make sure we read the descriptor after ownership is SW */
562 mlx5_cmd_comp_handler(dev
, 1UL << ent
->idx
);
566 static const char *deliv_status_to_str(u8 status
)
569 case MLX5_CMD_DELIVERY_STAT_OK
:
571 case MLX5_CMD_DELIVERY_STAT_SIGNAT_ERR
:
572 return "signature error";
573 case MLX5_CMD_DELIVERY_STAT_TOK_ERR
:
574 return "token error";
575 case MLX5_CMD_DELIVERY_STAT_BAD_BLK_NUM_ERR
:
576 return "bad block number";
577 case MLX5_CMD_DELIVERY_STAT_OUT_PTR_ALIGN_ERR
:
578 return "output pointer not aligned to block size";
579 case MLX5_CMD_DELIVERY_STAT_IN_PTR_ALIGN_ERR
:
580 return "input pointer not aligned to block size";
581 case MLX5_CMD_DELIVERY_STAT_FW_ERR
:
582 return "firmware internal error";
583 case MLX5_CMD_DELIVERY_STAT_IN_LENGTH_ERR
:
584 return "command input length error";
585 case MLX5_CMD_DELIVERY_STAT_OUT_LENGTH_ERR
:
586 return "command ouput length error";
587 case MLX5_CMD_DELIVERY_STAT_RES_FLD_NOT_CLR_ERR
:
588 return "reserved fields not cleared";
589 case MLX5_CMD_DELIVERY_STAT_CMD_DESCR_ERR
:
590 return "bad command descriptor type";
592 return "unknown status code";
596 static u16
msg_to_opcode(struct mlx5_cmd_msg
*in
)
598 struct mlx5_inbox_hdr
*hdr
= (struct mlx5_inbox_hdr
*)(in
->first
.data
);
600 return be16_to_cpu(hdr
->opcode
);
603 static int wait_func(struct mlx5_core_dev
*dev
, struct mlx5_cmd_work_ent
*ent
)
605 unsigned long timeout
= msecs_to_jiffies(MLX5_CMD_TIMEOUT_MSEC
);
606 struct mlx5_cmd
*cmd
= &dev
->cmd
;
609 if (cmd
->mode
== CMD_MODE_POLLING
) {
610 wait_for_completion(&ent
->done
);
613 if (!wait_for_completion_timeout(&ent
->done
, timeout
))
618 if (err
== -ETIMEDOUT
) {
619 mlx5_core_warn(dev
, "%s(0x%x) timeout. Will cause a leak of a command resource\n",
620 mlx5_command_str(msg_to_opcode(ent
->in
)),
621 msg_to_opcode(ent
->in
));
623 mlx5_core_dbg(dev
, "err %d, delivery status %s(%d)\n", err
,
624 deliv_status_to_str(ent
->status
), ent
->status
);
630 * 1. Callback functions may not sleep
631 * 2. page queue commands do not support asynchrous completion
633 static int mlx5_cmd_invoke(struct mlx5_core_dev
*dev
, struct mlx5_cmd_msg
*in
,
634 struct mlx5_cmd_msg
*out
, void *uout
, int uout_size
,
635 mlx5_cmd_cbk_t callback
,
636 void *context
, int page_queue
, u8
*status
)
638 struct mlx5_cmd
*cmd
= &dev
->cmd
;
639 struct mlx5_cmd_work_ent
*ent
;
640 ktime_t t1
, t2
, delta
;
641 struct mlx5_cmd_stats
*stats
;
646 if (callback
&& page_queue
)
649 ent
= alloc_cmd(cmd
, in
, out
, uout
, uout_size
, callback
, context
,
655 init_completion(&ent
->done
);
657 INIT_WORK(&ent
->work
, cmd_work_handler
);
659 cmd_work_handler(&ent
->work
);
660 } else if (!queue_work(cmd
->wq
, &ent
->work
)) {
661 mlx5_core_warn(dev
, "failed to queue work\n");
667 err
= wait_func(dev
, ent
);
668 if (err
== -ETIMEDOUT
)
671 t1
= timespec_to_ktime(ent
->ts1
);
672 t2
= timespec_to_ktime(ent
->ts2
);
673 delta
= ktime_sub(t2
, t1
);
674 ds
= ktime_to_ns(delta
);
675 op
= be16_to_cpu(((struct mlx5_inbox_hdr
*)in
->first
.data
)->opcode
);
676 if (op
< ARRAY_SIZE(cmd
->stats
)) {
677 stats
= &cmd
->stats
[op
];
678 spin_lock_irq(&stats
->lock
);
681 spin_unlock_irq(&stats
->lock
);
683 mlx5_core_dbg_mask(dev
, 1 << MLX5_CMD_TIME
,
684 "fw exec time for %s is %lld nsec\n",
685 mlx5_command_str(op
), ds
);
686 *status
= ent
->status
;
698 static ssize_t
dbg_write(struct file
*filp
, const char __user
*buf
,
699 size_t count
, loff_t
*pos
)
701 struct mlx5_core_dev
*dev
= filp
->private_data
;
702 struct mlx5_cmd_debug
*dbg
= &dev
->cmd
.dbg
;
706 if (!dbg
->in_msg
|| !dbg
->out_msg
)
709 if (copy_from_user(lbuf
, buf
, sizeof(lbuf
)))
712 lbuf
[sizeof(lbuf
) - 1] = 0;
714 if (strcmp(lbuf
, "go"))
717 err
= mlx5_cmd_exec(dev
, dbg
->in_msg
, dbg
->inlen
, dbg
->out_msg
, dbg
->outlen
);
719 return err
? err
: count
;
723 static const struct file_operations fops
= {
724 .owner
= THIS_MODULE
,
729 static int mlx5_copy_to_msg(struct mlx5_cmd_msg
*to
, void *from
, int size
)
731 struct mlx5_cmd_prot_block
*block
;
732 struct mlx5_cmd_mailbox
*next
;
738 copy
= min_t(int, size
, sizeof(to
->first
.data
));
739 memcpy(to
->first
.data
, from
, copy
);
750 copy
= min_t(int, size
, MLX5_CMD_DATA_BLOCK_SIZE
);
752 memcpy(block
->data
, from
, copy
);
761 static int mlx5_copy_from_msg(void *to
, struct mlx5_cmd_msg
*from
, int size
)
763 struct mlx5_cmd_prot_block
*block
;
764 struct mlx5_cmd_mailbox
*next
;
770 copy
= min_t(int, size
, sizeof(from
->first
.data
));
771 memcpy(to
, from
->first
.data
, copy
);
782 copy
= min_t(int, size
, MLX5_CMD_DATA_BLOCK_SIZE
);
785 memcpy(to
, block
->data
, copy
);
794 static struct mlx5_cmd_mailbox
*alloc_cmd_box(struct mlx5_core_dev
*dev
,
797 struct mlx5_cmd_mailbox
*mailbox
;
799 mailbox
= kmalloc(sizeof(*mailbox
), flags
);
801 return ERR_PTR(-ENOMEM
);
803 mailbox
->buf
= pci_pool_alloc(dev
->cmd
.pool
, flags
,
806 mlx5_core_dbg(dev
, "failed allocation\n");
808 return ERR_PTR(-ENOMEM
);
810 memset(mailbox
->buf
, 0, sizeof(struct mlx5_cmd_prot_block
));
811 mailbox
->next
= NULL
;
816 static void free_cmd_box(struct mlx5_core_dev
*dev
,
817 struct mlx5_cmd_mailbox
*mailbox
)
819 pci_pool_free(dev
->cmd
.pool
, mailbox
->buf
, mailbox
->dma
);
823 static struct mlx5_cmd_msg
*mlx5_alloc_cmd_msg(struct mlx5_core_dev
*dev
,
824 gfp_t flags
, int size
)
826 struct mlx5_cmd_mailbox
*tmp
, *head
= NULL
;
827 struct mlx5_cmd_prot_block
*block
;
828 struct mlx5_cmd_msg
*msg
;
834 msg
= kzalloc(sizeof(*msg
), flags
);
836 return ERR_PTR(-ENOMEM
);
838 blen
= size
- min_t(int, sizeof(msg
->first
.data
), size
);
839 n
= (blen
+ MLX5_CMD_DATA_BLOCK_SIZE
- 1) / MLX5_CMD_DATA_BLOCK_SIZE
;
841 for (i
= 0; i
< n
; i
++) {
842 tmp
= alloc_cmd_box(dev
, flags
);
844 mlx5_core_warn(dev
, "failed allocating block\n");
851 block
->next
= cpu_to_be64(tmp
->next
? tmp
->next
->dma
: 0);
852 block
->block_num
= cpu_to_be32(n
- i
- 1);
862 free_cmd_box(dev
, head
);
870 static void mlx5_free_cmd_msg(struct mlx5_core_dev
*dev
,
871 struct mlx5_cmd_msg
*msg
)
873 struct mlx5_cmd_mailbox
*head
= msg
->next
;
874 struct mlx5_cmd_mailbox
*next
;
878 free_cmd_box(dev
, head
);
884 static ssize_t
data_write(struct file
*filp
, const char __user
*buf
,
885 size_t count
, loff_t
*pos
)
887 struct mlx5_core_dev
*dev
= filp
->private_data
;
888 struct mlx5_cmd_debug
*dbg
= &dev
->cmd
.dbg
;
899 ptr
= kzalloc(count
, GFP_KERNEL
);
903 if (copy_from_user(ptr
, buf
, count
)) {
919 static ssize_t
data_read(struct file
*filp
, char __user
*buf
, size_t count
,
922 struct mlx5_core_dev
*dev
= filp
->private_data
;
923 struct mlx5_cmd_debug
*dbg
= &dev
->cmd
.dbg
;
932 copy
= min_t(int, count
, dbg
->outlen
);
933 if (copy_to_user(buf
, dbg
->out_msg
, copy
))
941 static const struct file_operations dfops
= {
942 .owner
= THIS_MODULE
,
948 static ssize_t
outlen_read(struct file
*filp
, char __user
*buf
, size_t count
,
951 struct mlx5_core_dev
*dev
= filp
->private_data
;
952 struct mlx5_cmd_debug
*dbg
= &dev
->cmd
.dbg
;
959 err
= snprintf(outlen
, sizeof(outlen
), "%d", dbg
->outlen
);
963 if (copy_to_user(buf
, &outlen
, err
))
971 static ssize_t
outlen_write(struct file
*filp
, const char __user
*buf
,
972 size_t count
, loff_t
*pos
)
974 struct mlx5_core_dev
*dev
= filp
->private_data
;
975 struct mlx5_cmd_debug
*dbg
= &dev
->cmd
.dbg
;
981 if (*pos
!= 0 || count
> 6)
988 if (copy_from_user(outlen_str
, buf
, count
))
993 err
= sscanf(outlen_str
, "%d", &outlen
);
997 ptr
= kzalloc(outlen
, GFP_KERNEL
);
1002 dbg
->outlen
= outlen
;
1009 static const struct file_operations olfops
= {
1010 .owner
= THIS_MODULE
,
1011 .open
= simple_open
,
1012 .write
= outlen_write
,
1013 .read
= outlen_read
,
1016 static void set_wqname(struct mlx5_core_dev
*dev
)
1018 struct mlx5_cmd
*cmd
= &dev
->cmd
;
1020 snprintf(cmd
->wq_name
, sizeof(cmd
->wq_name
), "mlx5_cmd_%s",
1021 dev_name(&dev
->pdev
->dev
));
1024 static void clean_debug_files(struct mlx5_core_dev
*dev
)
1026 struct mlx5_cmd_debug
*dbg
= &dev
->cmd
.dbg
;
1028 if (!mlx5_debugfs_root
)
1031 mlx5_cmdif_debugfs_cleanup(dev
);
1032 debugfs_remove_recursive(dbg
->dbg_root
);
1035 static int create_debugfs_files(struct mlx5_core_dev
*dev
)
1037 struct mlx5_cmd_debug
*dbg
= &dev
->cmd
.dbg
;
1040 if (!mlx5_debugfs_root
)
1043 dbg
->dbg_root
= debugfs_create_dir("cmd", dev
->priv
.dbg_root
);
1047 dbg
->dbg_in
= debugfs_create_file("in", 0400, dbg
->dbg_root
,
1052 dbg
->dbg_out
= debugfs_create_file("out", 0200, dbg
->dbg_root
,
1057 dbg
->dbg_outlen
= debugfs_create_file("out_len", 0600, dbg
->dbg_root
,
1059 if (!dbg
->dbg_outlen
)
1062 dbg
->dbg_status
= debugfs_create_u8("status", 0600, dbg
->dbg_root
,
1064 if (!dbg
->dbg_status
)
1067 dbg
->dbg_run
= debugfs_create_file("run", 0200, dbg
->dbg_root
, dev
, &fops
);
1071 mlx5_cmdif_debugfs_init(dev
);
1076 clean_debug_files(dev
);
1080 void mlx5_cmd_use_events(struct mlx5_core_dev
*dev
)
1082 struct mlx5_cmd
*cmd
= &dev
->cmd
;
1085 for (i
= 0; i
< cmd
->max_reg_cmds
; i
++)
1088 down(&cmd
->pages_sem
);
1090 flush_workqueue(cmd
->wq
);
1092 cmd
->mode
= CMD_MODE_EVENTS
;
1094 up(&cmd
->pages_sem
);
1095 for (i
= 0; i
< cmd
->max_reg_cmds
; i
++)
1099 void mlx5_cmd_use_polling(struct mlx5_core_dev
*dev
)
1101 struct mlx5_cmd
*cmd
= &dev
->cmd
;
1104 for (i
= 0; i
< cmd
->max_reg_cmds
; i
++)
1107 down(&cmd
->pages_sem
);
1109 flush_workqueue(cmd
->wq
);
1110 cmd
->mode
= CMD_MODE_POLLING
;
1112 up(&cmd
->pages_sem
);
1113 for (i
= 0; i
< cmd
->max_reg_cmds
; i
++)
1117 static void free_msg(struct mlx5_core_dev
*dev
, struct mlx5_cmd_msg
*msg
)
1119 unsigned long flags
;
1122 spin_lock_irqsave(&msg
->cache
->lock
, flags
);
1123 list_add_tail(&msg
->list
, &msg
->cache
->head
);
1124 spin_unlock_irqrestore(&msg
->cache
->lock
, flags
);
1126 mlx5_free_cmd_msg(dev
, msg
);
1130 void mlx5_cmd_comp_handler(struct mlx5_core_dev
*dev
, unsigned long vector
)
1132 struct mlx5_cmd
*cmd
= &dev
->cmd
;
1133 struct mlx5_cmd_work_ent
*ent
;
1134 mlx5_cmd_cbk_t callback
;
1138 ktime_t t1
, t2
, delta
;
1140 struct mlx5_cmd_stats
*stats
;
1141 unsigned long flags
;
1143 for (i
= 0; i
< (1 << cmd
->log_sz
); i
++) {
1144 if (test_bit(i
, &vector
)) {
1145 struct semaphore
*sem
;
1147 ent
= cmd
->ent_arr
[i
];
1148 if (ent
->page_queue
)
1149 sem
= &cmd
->pages_sem
;
1152 ktime_get_ts(&ent
->ts2
);
1153 memcpy(ent
->out
->first
.data
, ent
->lay
->out
, sizeof(ent
->lay
->out
));
1154 dump_command(dev
, ent
, 0);
1156 if (!cmd
->checksum_disabled
)
1157 ent
->ret
= verify_signature(ent
);
1160 ent
->status
= ent
->lay
->status_own
>> 1;
1161 mlx5_core_dbg(dev
, "command completed. ret 0x%x, delivery status %s(0x%x)\n",
1162 ent
->ret
, deliv_status_to_str(ent
->status
), ent
->status
);
1164 free_ent(cmd
, ent
->idx
);
1165 if (ent
->callback
) {
1166 t1
= timespec_to_ktime(ent
->ts1
);
1167 t2
= timespec_to_ktime(ent
->ts2
);
1168 delta
= ktime_sub(t2
, t1
);
1169 ds
= ktime_to_ns(delta
);
1170 if (ent
->op
< ARRAY_SIZE(cmd
->stats
)) {
1171 stats
= &cmd
->stats
[ent
->op
];
1172 spin_lock_irqsave(&stats
->lock
, flags
);
1175 spin_unlock_irqrestore(&stats
->lock
, flags
);
1178 callback
= ent
->callback
;
1179 context
= ent
->context
;
1182 err
= mlx5_copy_from_msg(ent
->uout
,
1186 mlx5_free_cmd_msg(dev
, ent
->out
);
1187 free_msg(dev
, ent
->in
);
1190 callback(err
, context
);
1192 complete(&ent
->done
);
1198 EXPORT_SYMBOL(mlx5_cmd_comp_handler
);
1200 static int status_to_err(u8 status
)
1202 return status
? -1 : 0; /* TBD more meaningful codes */
1205 static struct mlx5_cmd_msg
*alloc_msg(struct mlx5_core_dev
*dev
, int in_size
,
1208 struct mlx5_cmd_msg
*msg
= ERR_PTR(-ENOMEM
);
1209 struct mlx5_cmd
*cmd
= &dev
->cmd
;
1210 struct cache_ent
*ent
= NULL
;
1212 if (in_size
> MED_LIST_SIZE
&& in_size
<= LONG_LIST_SIZE
)
1213 ent
= &cmd
->cache
.large
;
1214 else if (in_size
> 16 && in_size
<= MED_LIST_SIZE
)
1215 ent
= &cmd
->cache
.med
;
1218 spin_lock_irq(&ent
->lock
);
1219 if (!list_empty(&ent
->head
)) {
1220 msg
= list_entry(ent
->head
.next
, typeof(*msg
), list
);
1221 /* For cached lists, we must explicitly state what is
1225 list_del(&msg
->list
);
1227 spin_unlock_irq(&ent
->lock
);
1231 msg
= mlx5_alloc_cmd_msg(dev
, gfp
, in_size
);
1236 static int is_manage_pages(struct mlx5_inbox_hdr
*in
)
1238 return be16_to_cpu(in
->opcode
) == MLX5_CMD_OP_MANAGE_PAGES
;
1241 static int cmd_exec(struct mlx5_core_dev
*dev
, void *in
, int in_size
, void *out
,
1242 int out_size
, mlx5_cmd_cbk_t callback
, void *context
)
1244 struct mlx5_cmd_msg
*inb
;
1245 struct mlx5_cmd_msg
*outb
;
1251 pages_queue
= is_manage_pages(in
);
1252 gfp
= callback
? GFP_ATOMIC
: GFP_KERNEL
;
1254 inb
= alloc_msg(dev
, in_size
, gfp
);
1260 err
= mlx5_copy_to_msg(inb
, in
, in_size
);
1262 mlx5_core_warn(dev
, "err %d\n", err
);
1266 outb
= mlx5_alloc_cmd_msg(dev
, gfp
, out_size
);
1268 err
= PTR_ERR(outb
);
1272 err
= mlx5_cmd_invoke(dev
, inb
, outb
, out
, out_size
, callback
, context
,
1273 pages_queue
, &status
);
1277 mlx5_core_dbg(dev
, "err %d, status %d\n", err
, status
);
1279 err
= status_to_err(status
);
1283 err
= mlx5_copy_from_msg(out
, outb
, out_size
);
1287 mlx5_free_cmd_msg(dev
, outb
);
1295 int mlx5_cmd_exec(struct mlx5_core_dev
*dev
, void *in
, int in_size
, void *out
,
1298 return cmd_exec(dev
, in
, in_size
, out
, out_size
, NULL
, NULL
);
1300 EXPORT_SYMBOL(mlx5_cmd_exec
);
1302 int mlx5_cmd_exec_cb(struct mlx5_core_dev
*dev
, void *in
, int in_size
,
1303 void *out
, int out_size
, mlx5_cmd_cbk_t callback
,
1306 return cmd_exec(dev
, in
, in_size
, out
, out_size
, callback
, context
);
1308 EXPORT_SYMBOL(mlx5_cmd_exec_cb
);
1310 static void destroy_msg_cache(struct mlx5_core_dev
*dev
)
1312 struct mlx5_cmd
*cmd
= &dev
->cmd
;
1313 struct mlx5_cmd_msg
*msg
;
1314 struct mlx5_cmd_msg
*n
;
1316 list_for_each_entry_safe(msg
, n
, &cmd
->cache
.large
.head
, list
) {
1317 list_del(&msg
->list
);
1318 mlx5_free_cmd_msg(dev
, msg
);
1321 list_for_each_entry_safe(msg
, n
, &cmd
->cache
.med
.head
, list
) {
1322 list_del(&msg
->list
);
1323 mlx5_free_cmd_msg(dev
, msg
);
1327 static int create_msg_cache(struct mlx5_core_dev
*dev
)
1329 struct mlx5_cmd
*cmd
= &dev
->cmd
;
1330 struct mlx5_cmd_msg
*msg
;
1334 spin_lock_init(&cmd
->cache
.large
.lock
);
1335 INIT_LIST_HEAD(&cmd
->cache
.large
.head
);
1336 spin_lock_init(&cmd
->cache
.med
.lock
);
1337 INIT_LIST_HEAD(&cmd
->cache
.med
.head
);
1339 for (i
= 0; i
< NUM_LONG_LISTS
; i
++) {
1340 msg
= mlx5_alloc_cmd_msg(dev
, GFP_KERNEL
, LONG_LIST_SIZE
);
1345 msg
->cache
= &cmd
->cache
.large
;
1346 list_add_tail(&msg
->list
, &cmd
->cache
.large
.head
);
1349 for (i
= 0; i
< NUM_MED_LISTS
; i
++) {
1350 msg
= mlx5_alloc_cmd_msg(dev
, GFP_KERNEL
, MED_LIST_SIZE
);
1355 msg
->cache
= &cmd
->cache
.med
;
1356 list_add_tail(&msg
->list
, &cmd
->cache
.med
.head
);
1362 destroy_msg_cache(dev
);
1366 int mlx5_cmd_init(struct mlx5_core_dev
*dev
)
1368 int size
= sizeof(struct mlx5_cmd_prot_block
);
1369 int align
= roundup_pow_of_two(size
);
1370 struct mlx5_cmd
*cmd
= &dev
->cmd
;
1376 cmd_if_rev
= cmdif_rev(dev
);
1377 if (cmd_if_rev
!= CMD_IF_REV
) {
1378 dev_err(&dev
->pdev
->dev
,
1379 "Driver cmdif rev(%d) differs from firmware's(%d)\n",
1380 CMD_IF_REV
, cmd_if_rev
);
1384 cmd
->pool
= pci_pool_create("mlx5_cmd", dev
->pdev
, size
, align
, 0);
1388 cmd
->cmd_buf
= (void *)__get_free_pages(GFP_ATOMIC
, 0);
1389 if (!cmd
->cmd_buf
) {
1393 cmd
->dma
= dma_map_single(&dev
->pdev
->dev
, cmd
->cmd_buf
, PAGE_SIZE
,
1395 if (dma_mapping_error(&dev
->pdev
->dev
, cmd
->dma
)) {
1400 cmd_l
= ioread32be(&dev
->iseg
->cmdq_addr_l_sz
) & 0xff;
1401 cmd
->log_sz
= cmd_l
>> 4 & 0xf;
1402 cmd
->log_stride
= cmd_l
& 0xf;
1403 if (1 << cmd
->log_sz
> MLX5_MAX_COMMANDS
) {
1404 dev_err(&dev
->pdev
->dev
, "firmware reports too many outstanding commands %d\n",
1410 if (cmd
->log_sz
+ cmd
->log_stride
> PAGE_SHIFT
) {
1411 dev_err(&dev
->pdev
->dev
, "command queue size overflow\n");
1416 cmd
->checksum_disabled
= 1;
1417 cmd
->max_reg_cmds
= (1 << cmd
->log_sz
) - 1;
1418 cmd
->bitmask
= (1 << cmd
->max_reg_cmds
) - 1;
1420 cmd
->cmdif_rev
= ioread32be(&dev
->iseg
->cmdif_rev_fw_sub
) >> 16;
1421 if (cmd
->cmdif_rev
> CMD_IF_REV
) {
1422 dev_err(&dev
->pdev
->dev
, "driver does not support command interface version. driver %d, firmware %d\n",
1423 CMD_IF_REV
, cmd
->cmdif_rev
);
1428 spin_lock_init(&cmd
->alloc_lock
);
1429 spin_lock_init(&cmd
->token_lock
);
1430 for (i
= 0; i
< ARRAY_SIZE(cmd
->stats
); i
++)
1431 spin_lock_init(&cmd
->stats
[i
].lock
);
1433 sema_init(&cmd
->sem
, cmd
->max_reg_cmds
);
1434 sema_init(&cmd
->pages_sem
, 1);
1436 cmd_h
= (u32
)((u64
)(cmd
->dma
) >> 32);
1437 cmd_l
= (u32
)(cmd
->dma
);
1438 if (cmd_l
& 0xfff) {
1439 dev_err(&dev
->pdev
->dev
, "invalid command queue address\n");
1444 iowrite32be(cmd_h
, &dev
->iseg
->cmdq_addr_h
);
1445 iowrite32be(cmd_l
, &dev
->iseg
->cmdq_addr_l_sz
);
1447 /* Make sure firmware sees the complete address before we proceed */
1450 mlx5_core_dbg(dev
, "descriptor at dma 0x%llx\n", (unsigned long long)(cmd
->dma
));
1452 cmd
->mode
= CMD_MODE_POLLING
;
1454 err
= create_msg_cache(dev
);
1456 dev_err(&dev
->pdev
->dev
, "failed to create command cache\n");
1461 cmd
->wq
= create_singlethread_workqueue(cmd
->wq_name
);
1463 dev_err(&dev
->pdev
->dev
, "failed to create command workqueue\n");
1468 err
= create_debugfs_files(dev
);
1477 destroy_workqueue(cmd
->wq
);
1480 destroy_msg_cache(dev
);
1483 dma_unmap_single(&dev
->pdev
->dev
, cmd
->dma
, PAGE_SIZE
,
1486 free_pages((unsigned long)cmd
->cmd_buf
, 0);
1489 pci_pool_destroy(cmd
->pool
);
1493 EXPORT_SYMBOL(mlx5_cmd_init
);
1495 void mlx5_cmd_cleanup(struct mlx5_core_dev
*dev
)
1497 struct mlx5_cmd
*cmd
= &dev
->cmd
;
1499 clean_debug_files(dev
);
1500 destroy_workqueue(cmd
->wq
);
1501 destroy_msg_cache(dev
);
1502 dma_unmap_single(&dev
->pdev
->dev
, cmd
->dma
, PAGE_SIZE
,
1504 free_pages((unsigned long)cmd
->cmd_buf
, 0);
1505 pci_pool_destroy(cmd
->pool
);
1507 EXPORT_SYMBOL(mlx5_cmd_cleanup
);
1509 static const char *cmd_status_str(u8 status
)
1512 case MLX5_CMD_STAT_OK
:
1514 case MLX5_CMD_STAT_INT_ERR
:
1515 return "internal error";
1516 case MLX5_CMD_STAT_BAD_OP_ERR
:
1517 return "bad operation";
1518 case MLX5_CMD_STAT_BAD_PARAM_ERR
:
1519 return "bad parameter";
1520 case MLX5_CMD_STAT_BAD_SYS_STATE_ERR
:
1521 return "bad system state";
1522 case MLX5_CMD_STAT_BAD_RES_ERR
:
1523 return "bad resource";
1524 case MLX5_CMD_STAT_RES_BUSY
:
1525 return "resource busy";
1526 case MLX5_CMD_STAT_LIM_ERR
:
1527 return "limits exceeded";
1528 case MLX5_CMD_STAT_BAD_RES_STATE_ERR
:
1529 return "bad resource state";
1530 case MLX5_CMD_STAT_IX_ERR
:
1532 case MLX5_CMD_STAT_NO_RES_ERR
:
1533 return "no resources";
1534 case MLX5_CMD_STAT_BAD_INP_LEN_ERR
:
1535 return "bad input length";
1536 case MLX5_CMD_STAT_BAD_OUTP_LEN_ERR
:
1537 return "bad output length";
1538 case MLX5_CMD_STAT_BAD_QP_STATE_ERR
:
1539 return "bad QP state";
1540 case MLX5_CMD_STAT_BAD_PKT_ERR
:
1541 return "bad packet (discarded)";
1542 case MLX5_CMD_STAT_BAD_SIZE_OUTS_CQES_ERR
:
1543 return "bad size too many outstanding CQEs";
1545 return "unknown status";
1549 int mlx5_cmd_status_to_err(struct mlx5_outbox_hdr
*hdr
)
1554 pr_warn("command failed, status %s(0x%x), syndrome 0x%x\n",
1555 cmd_status_str(hdr
->status
), hdr
->status
,
1556 be32_to_cpu(hdr
->syndrome
));
1558 switch (hdr
->status
) {
1559 case MLX5_CMD_STAT_OK
: return 0;
1560 case MLX5_CMD_STAT_INT_ERR
: return -EIO
;
1561 case MLX5_CMD_STAT_BAD_OP_ERR
: return -EINVAL
;
1562 case MLX5_CMD_STAT_BAD_PARAM_ERR
: return -EINVAL
;
1563 case MLX5_CMD_STAT_BAD_SYS_STATE_ERR
: return -EIO
;
1564 case MLX5_CMD_STAT_BAD_RES_ERR
: return -EINVAL
;
1565 case MLX5_CMD_STAT_RES_BUSY
: return -EBUSY
;
1566 case MLX5_CMD_STAT_LIM_ERR
: return -ENOMEM
;
1567 case MLX5_CMD_STAT_BAD_RES_STATE_ERR
: return -EINVAL
;
1568 case MLX5_CMD_STAT_IX_ERR
: return -EINVAL
;
1569 case MLX5_CMD_STAT_NO_RES_ERR
: return -EAGAIN
;
1570 case MLX5_CMD_STAT_BAD_INP_LEN_ERR
: return -EIO
;
1571 case MLX5_CMD_STAT_BAD_OUTP_LEN_ERR
: return -EIO
;
1572 case MLX5_CMD_STAT_BAD_QP_STATE_ERR
: return -EINVAL
;
1573 case MLX5_CMD_STAT_BAD_PKT_ERR
: return -EINVAL
;
1574 case MLX5_CMD_STAT_BAD_SIZE_OUTS_CQES_ERR
: return -EINVAL
;
1575 default: return -EIO
;