2 * Copyright (c) 2013-2015, Mellanox Technologies. All rights reserved.
4 * This software is available to you under a choice of one of two
5 * licenses. You may choose to be licensed under the terms of the GNU
6 * General Public License (GPL) Version 2, available from the file
7 * COPYING in the main directory of this source tree, or the
8 * OpenIB.org BSD license below:
10 * Redistribution and use in source and binary forms, with or
11 * without modification, are permitted provided that the following
14 * - Redistributions of source code must retain the above
15 * copyright notice, this list of conditions and the following
18 * - Redistributions in binary form must reproduce the above
19 * copyright notice, this list of conditions and the following
20 * disclaimer in the documentation and/or other materials
21 * provided with the distribution.
23 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
24 * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
25 * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
26 * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
27 * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
28 * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
29 * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
33 #include <asm-generic/kmap_types.h>
34 #include <linux/module.h>
35 #include <linux/errno.h>
36 #include <linux/pci.h>
37 #include <linux/dma-mapping.h>
38 #include <linux/slab.h>
39 #include <linux/delay.h>
40 #include <linux/random.h>
41 #include <linux/io-mapping.h>
42 #include <linux/mlx5/driver.h>
43 #include <linux/debugfs.h>
45 #include "mlx5_core.h"
59 LONG_LIST_SIZE
= (2ULL * 1024 * 1024 * 1024 / PAGE_SIZE
) * 8 + 16 +
60 MLX5_CMD_DATA_BLOCK_SIZE
,
61 MED_LIST_SIZE
= 16 + MLX5_CMD_DATA_BLOCK_SIZE
,
65 MLX5_CMD_DELIVERY_STAT_OK
= 0x0,
66 MLX5_CMD_DELIVERY_STAT_SIGNAT_ERR
= 0x1,
67 MLX5_CMD_DELIVERY_STAT_TOK_ERR
= 0x2,
68 MLX5_CMD_DELIVERY_STAT_BAD_BLK_NUM_ERR
= 0x3,
69 MLX5_CMD_DELIVERY_STAT_OUT_PTR_ALIGN_ERR
= 0x4,
70 MLX5_CMD_DELIVERY_STAT_IN_PTR_ALIGN_ERR
= 0x5,
71 MLX5_CMD_DELIVERY_STAT_FW_ERR
= 0x6,
72 MLX5_CMD_DELIVERY_STAT_IN_LENGTH_ERR
= 0x7,
73 MLX5_CMD_DELIVERY_STAT_OUT_LENGTH_ERR
= 0x8,
74 MLX5_CMD_DELIVERY_STAT_RES_FLD_NOT_CLR_ERR
= 0x9,
75 MLX5_CMD_DELIVERY_STAT_CMD_DESCR_ERR
= 0x10,
78 static struct mlx5_cmd_work_ent
*alloc_cmd(struct mlx5_cmd
*cmd
,
79 struct mlx5_cmd_msg
*in
,
80 struct mlx5_cmd_msg
*out
,
81 void *uout
, int uout_size
,
83 void *context
, int page_queue
)
85 gfp_t alloc_flags
= cbk
? GFP_ATOMIC
: GFP_KERNEL
;
86 struct mlx5_cmd_work_ent
*ent
;
88 ent
= kzalloc(sizeof(*ent
), alloc_flags
);
90 return ERR_PTR(-ENOMEM
);
95 ent
->uout_size
= uout_size
;
97 ent
->context
= context
;
99 ent
->page_queue
= page_queue
;
104 static u8
alloc_token(struct mlx5_cmd
*cmd
)
108 spin_lock(&cmd
->token_lock
);
113 spin_unlock(&cmd
->token_lock
);
118 static int alloc_ent(struct mlx5_cmd
*cmd
)
123 spin_lock_irqsave(&cmd
->alloc_lock
, flags
);
124 ret
= find_first_bit(&cmd
->bitmask
, cmd
->max_reg_cmds
);
125 if (ret
< cmd
->max_reg_cmds
)
126 clear_bit(ret
, &cmd
->bitmask
);
127 spin_unlock_irqrestore(&cmd
->alloc_lock
, flags
);
129 return ret
< cmd
->max_reg_cmds
? ret
: -ENOMEM
;
132 static void free_ent(struct mlx5_cmd
*cmd
, int idx
)
136 spin_lock_irqsave(&cmd
->alloc_lock
, flags
);
137 set_bit(idx
, &cmd
->bitmask
);
138 spin_unlock_irqrestore(&cmd
->alloc_lock
, flags
);
141 static struct mlx5_cmd_layout
*get_inst(struct mlx5_cmd
*cmd
, int idx
)
143 return cmd
->cmd_buf
+ (idx
<< cmd
->log_stride
);
146 static u8
xor8_buf(void *buf
, int len
)
152 for (i
= 0; i
< len
; i
++)
158 static int verify_block_sig(struct mlx5_cmd_prot_block
*block
)
160 if (xor8_buf(block
->rsvd0
, sizeof(*block
) - sizeof(block
->data
) - 1) != 0xff)
163 if (xor8_buf(block
, sizeof(*block
)) != 0xff)
169 static void calc_block_sig(struct mlx5_cmd_prot_block
*block
, u8 token
,
172 block
->token
= token
;
174 block
->ctrl_sig
= ~xor8_buf(block
->rsvd0
, sizeof(*block
) -
175 sizeof(block
->data
) - 2);
176 block
->sig
= ~xor8_buf(block
, sizeof(*block
) - 1);
180 static void calc_chain_sig(struct mlx5_cmd_msg
*msg
, u8 token
, int csum
)
182 struct mlx5_cmd_mailbox
*next
= msg
->next
;
185 calc_block_sig(next
->buf
, token
, csum
);
190 static void set_signature(struct mlx5_cmd_work_ent
*ent
, int csum
)
192 ent
->lay
->sig
= ~xor8_buf(ent
->lay
, sizeof(*ent
->lay
));
193 calc_chain_sig(ent
->in
, ent
->token
, csum
);
194 calc_chain_sig(ent
->out
, ent
->token
, csum
);
197 static void poll_timeout(struct mlx5_cmd_work_ent
*ent
)
199 unsigned long poll_end
= jiffies
+ msecs_to_jiffies(MLX5_CMD_TIMEOUT_MSEC
+ 1000);
203 own
= ent
->lay
->status_own
;
204 if (!(own
& CMD_OWNER_HW
)) {
208 usleep_range(5000, 10000);
209 } while (time_before(jiffies
, poll_end
));
211 ent
->ret
= -ETIMEDOUT
;
214 static void free_cmd(struct mlx5_cmd_work_ent
*ent
)
220 static int verify_signature(struct mlx5_cmd_work_ent
*ent
)
222 struct mlx5_cmd_mailbox
*next
= ent
->out
->next
;
226 sig
= xor8_buf(ent
->lay
, sizeof(*ent
->lay
));
231 err
= verify_block_sig(next
->buf
);
241 static void dump_buf(void *buf
, int size
, int data_only
, int offset
)
246 for (i
= 0; i
< size
; i
+= 16) {
247 pr_debug("%03x: %08x %08x %08x %08x\n", offset
, be32_to_cpu(p
[0]),
248 be32_to_cpu(p
[1]), be32_to_cpu(p
[2]),
257 const char *mlx5_command_str(int command
)
260 case MLX5_CMD_OP_QUERY_HCA_CAP
:
261 return "QUERY_HCA_CAP";
263 case MLX5_CMD_OP_SET_HCA_CAP
:
264 return "SET_HCA_CAP";
266 case MLX5_CMD_OP_QUERY_ADAPTER
:
267 return "QUERY_ADAPTER";
269 case MLX5_CMD_OP_INIT_HCA
:
272 case MLX5_CMD_OP_TEARDOWN_HCA
:
273 return "TEARDOWN_HCA";
275 case MLX5_CMD_OP_ENABLE_HCA
:
276 return "MLX5_CMD_OP_ENABLE_HCA";
278 case MLX5_CMD_OP_DISABLE_HCA
:
279 return "MLX5_CMD_OP_DISABLE_HCA";
281 case MLX5_CMD_OP_QUERY_PAGES
:
282 return "QUERY_PAGES";
284 case MLX5_CMD_OP_MANAGE_PAGES
:
285 return "MANAGE_PAGES";
287 case MLX5_CMD_OP_CREATE_MKEY
:
288 return "CREATE_MKEY";
290 case MLX5_CMD_OP_QUERY_MKEY
:
293 case MLX5_CMD_OP_DESTROY_MKEY
:
294 return "DESTROY_MKEY";
296 case MLX5_CMD_OP_QUERY_SPECIAL_CONTEXTS
:
297 return "QUERY_SPECIAL_CONTEXTS";
299 case MLX5_CMD_OP_CREATE_EQ
:
302 case MLX5_CMD_OP_DESTROY_EQ
:
305 case MLX5_CMD_OP_QUERY_EQ
:
308 case MLX5_CMD_OP_CREATE_CQ
:
311 case MLX5_CMD_OP_DESTROY_CQ
:
314 case MLX5_CMD_OP_QUERY_CQ
:
317 case MLX5_CMD_OP_MODIFY_CQ
:
320 case MLX5_CMD_OP_CREATE_QP
:
323 case MLX5_CMD_OP_DESTROY_QP
:
326 case MLX5_CMD_OP_RST2INIT_QP
:
327 return "RST2INIT_QP";
329 case MLX5_CMD_OP_INIT2RTR_QP
:
330 return "INIT2RTR_QP";
332 case MLX5_CMD_OP_RTR2RTS_QP
:
335 case MLX5_CMD_OP_RTS2RTS_QP
:
338 case MLX5_CMD_OP_SQERR2RTS_QP
:
339 return "SQERR2RTS_QP";
341 case MLX5_CMD_OP_2ERR_QP
:
344 case MLX5_CMD_OP_2RST_QP
:
347 case MLX5_CMD_OP_QUERY_QP
:
350 case MLX5_CMD_OP_MAD_IFC
:
353 case MLX5_CMD_OP_INIT2INIT_QP
:
354 return "INIT2INIT_QP";
356 case MLX5_CMD_OP_CREATE_PSV
:
359 case MLX5_CMD_OP_DESTROY_PSV
:
360 return "DESTROY_PSV";
362 case MLX5_CMD_OP_CREATE_SRQ
:
365 case MLX5_CMD_OP_DESTROY_SRQ
:
366 return "DESTROY_SRQ";
368 case MLX5_CMD_OP_QUERY_SRQ
:
371 case MLX5_CMD_OP_ARM_RQ
:
374 case MLX5_CMD_OP_CREATE_XRC_SRQ
:
375 return "CREATE_XRC_SRQ";
377 case MLX5_CMD_OP_DESTROY_XRC_SRQ
:
378 return "DESTROY_XRC_SRQ";
380 case MLX5_CMD_OP_QUERY_XRC_SRQ
:
381 return "QUERY_XRC_SRQ";
383 case MLX5_CMD_OP_ARM_XRC_SRQ
:
384 return "ARM_XRC_SRQ";
386 case MLX5_CMD_OP_ALLOC_PD
:
389 case MLX5_CMD_OP_DEALLOC_PD
:
392 case MLX5_CMD_OP_ALLOC_UAR
:
395 case MLX5_CMD_OP_DEALLOC_UAR
:
396 return "DEALLOC_UAR";
398 case MLX5_CMD_OP_ATTACH_TO_MCG
:
399 return "ATTACH_TO_MCG";
401 case MLX5_CMD_OP_DETTACH_FROM_MCG
:
402 return "DETTACH_FROM_MCG";
404 case MLX5_CMD_OP_ALLOC_XRCD
:
407 case MLX5_CMD_OP_DEALLOC_XRCD
:
408 return "DEALLOC_XRCD";
410 case MLX5_CMD_OP_ACCESS_REG
:
411 return "MLX5_CMD_OP_ACCESS_REG";
413 default: return "unknown command opcode";
417 static void dump_command(struct mlx5_core_dev
*dev
,
418 struct mlx5_cmd_work_ent
*ent
, int input
)
420 u16 op
= be16_to_cpu(((struct mlx5_inbox_hdr
*)(ent
->lay
->in
))->opcode
);
421 struct mlx5_cmd_msg
*msg
= input
? ent
->in
: ent
->out
;
422 struct mlx5_cmd_mailbox
*next
= msg
->next
;
427 data_only
= !!(mlx5_core_debug_mask
& (1 << MLX5_CMD_DATA
));
430 mlx5_core_dbg_mask(dev
, 1 << MLX5_CMD_DATA
,
431 "dump command data %s(0x%x) %s\n",
432 mlx5_command_str(op
), op
,
433 input
? "INPUT" : "OUTPUT");
435 mlx5_core_dbg(dev
, "dump command %s(0x%x) %s\n",
436 mlx5_command_str(op
), op
,
437 input
? "INPUT" : "OUTPUT");
441 dump_buf(ent
->lay
->in
, sizeof(ent
->lay
->in
), 1, offset
);
442 offset
+= sizeof(ent
->lay
->in
);
444 dump_buf(ent
->lay
->out
, sizeof(ent
->lay
->out
), 1, offset
);
445 offset
+= sizeof(ent
->lay
->out
);
448 dump_buf(ent
->lay
, sizeof(*ent
->lay
), 0, offset
);
449 offset
+= sizeof(*ent
->lay
);
452 while (next
&& offset
< msg
->len
) {
454 dump_len
= min_t(int, MLX5_CMD_DATA_BLOCK_SIZE
, msg
->len
- offset
);
455 dump_buf(next
->buf
, dump_len
, 1, offset
);
456 offset
+= MLX5_CMD_DATA_BLOCK_SIZE
;
458 mlx5_core_dbg(dev
, "command block:\n");
459 dump_buf(next
->buf
, sizeof(struct mlx5_cmd_prot_block
), 0, offset
);
460 offset
+= sizeof(struct mlx5_cmd_prot_block
);
469 static void cmd_work_handler(struct work_struct
*work
)
471 struct mlx5_cmd_work_ent
*ent
= container_of(work
, struct mlx5_cmd_work_ent
, work
);
472 struct mlx5_cmd
*cmd
= ent
->cmd
;
473 struct mlx5_core_dev
*dev
= container_of(cmd
, struct mlx5_core_dev
, cmd
);
474 struct mlx5_cmd_layout
*lay
;
475 struct semaphore
*sem
;
477 sem
= ent
->page_queue
? &cmd
->pages_sem
: &cmd
->sem
;
479 if (!ent
->page_queue
) {
480 ent
->idx
= alloc_ent(cmd
);
482 mlx5_core_err(dev
, "failed to allocate command entry\n");
487 ent
->idx
= cmd
->max_reg_cmds
;
490 ent
->token
= alloc_token(cmd
);
491 cmd
->ent_arr
[ent
->idx
] = ent
;
492 lay
= get_inst(cmd
, ent
->idx
);
494 memset(lay
, 0, sizeof(*lay
));
495 memcpy(lay
->in
, ent
->in
->first
.data
, sizeof(lay
->in
));
496 ent
->op
= be32_to_cpu(lay
->in
[0]) >> 16;
498 lay
->in_ptr
= cpu_to_be64(ent
->in
->next
->dma
);
499 lay
->inlen
= cpu_to_be32(ent
->in
->len
);
501 lay
->out_ptr
= cpu_to_be64(ent
->out
->next
->dma
);
502 lay
->outlen
= cpu_to_be32(ent
->out
->len
);
503 lay
->type
= MLX5_PCI_CMD_XPORT
;
504 lay
->token
= ent
->token
;
505 lay
->status_own
= CMD_OWNER_HW
;
506 set_signature(ent
, !cmd
->checksum_disabled
);
507 dump_command(dev
, ent
, 1);
508 ent
->ts1
= ktime_get_ns();
510 /* ring doorbell after the descriptor is valid */
511 mlx5_core_dbg(dev
, "writing 0x%x to command doorbell\n", 1 << ent
->idx
);
513 iowrite32be(1 << ent
->idx
, &dev
->iseg
->cmd_dbell
);
515 /* if not in polling don't use ent after this point */
516 if (cmd
->mode
== CMD_MODE_POLLING
) {
518 /* make sure we read the descriptor after ownership is SW */
520 mlx5_cmd_comp_handler(dev
, 1UL << ent
->idx
);
524 static const char *deliv_status_to_str(u8 status
)
527 case MLX5_CMD_DELIVERY_STAT_OK
:
529 case MLX5_CMD_DELIVERY_STAT_SIGNAT_ERR
:
530 return "signature error";
531 case MLX5_CMD_DELIVERY_STAT_TOK_ERR
:
532 return "token error";
533 case MLX5_CMD_DELIVERY_STAT_BAD_BLK_NUM_ERR
:
534 return "bad block number";
535 case MLX5_CMD_DELIVERY_STAT_OUT_PTR_ALIGN_ERR
:
536 return "output pointer not aligned to block size";
537 case MLX5_CMD_DELIVERY_STAT_IN_PTR_ALIGN_ERR
:
538 return "input pointer not aligned to block size";
539 case MLX5_CMD_DELIVERY_STAT_FW_ERR
:
540 return "firmware internal error";
541 case MLX5_CMD_DELIVERY_STAT_IN_LENGTH_ERR
:
542 return "command input length error";
543 case MLX5_CMD_DELIVERY_STAT_OUT_LENGTH_ERR
:
544 return "command ouput length error";
545 case MLX5_CMD_DELIVERY_STAT_RES_FLD_NOT_CLR_ERR
:
546 return "reserved fields not cleared";
547 case MLX5_CMD_DELIVERY_STAT_CMD_DESCR_ERR
:
548 return "bad command descriptor type";
550 return "unknown status code";
554 static u16
msg_to_opcode(struct mlx5_cmd_msg
*in
)
556 struct mlx5_inbox_hdr
*hdr
= (struct mlx5_inbox_hdr
*)(in
->first
.data
);
558 return be16_to_cpu(hdr
->opcode
);
561 static int wait_func(struct mlx5_core_dev
*dev
, struct mlx5_cmd_work_ent
*ent
)
563 unsigned long timeout
= msecs_to_jiffies(MLX5_CMD_TIMEOUT_MSEC
);
564 struct mlx5_cmd
*cmd
= &dev
->cmd
;
567 if (cmd
->mode
== CMD_MODE_POLLING
) {
568 wait_for_completion(&ent
->done
);
571 if (!wait_for_completion_timeout(&ent
->done
, timeout
))
576 if (err
== -ETIMEDOUT
) {
577 mlx5_core_warn(dev
, "%s(0x%x) timeout. Will cause a leak of a command resource\n",
578 mlx5_command_str(msg_to_opcode(ent
->in
)),
579 msg_to_opcode(ent
->in
));
581 mlx5_core_dbg(dev
, "err %d, delivery status %s(%d)\n",
582 err
, deliv_status_to_str(ent
->status
), ent
->status
);
588 * 1. Callback functions may not sleep
589 * 2. page queue commands do not support asynchrous completion
591 static int mlx5_cmd_invoke(struct mlx5_core_dev
*dev
, struct mlx5_cmd_msg
*in
,
592 struct mlx5_cmd_msg
*out
, void *uout
, int uout_size
,
593 mlx5_cmd_cbk_t callback
,
594 void *context
, int page_queue
, u8
*status
)
596 struct mlx5_cmd
*cmd
= &dev
->cmd
;
597 struct mlx5_cmd_work_ent
*ent
;
598 struct mlx5_cmd_stats
*stats
;
603 if (callback
&& page_queue
)
606 ent
= alloc_cmd(cmd
, in
, out
, uout
, uout_size
, callback
, context
,
612 init_completion(&ent
->done
);
614 INIT_WORK(&ent
->work
, cmd_work_handler
);
616 cmd_work_handler(&ent
->work
);
617 } else if (!queue_work(cmd
->wq
, &ent
->work
)) {
618 mlx5_core_warn(dev
, "failed to queue work\n");
624 err
= wait_func(dev
, ent
);
625 if (err
== -ETIMEDOUT
)
628 ds
= ent
->ts2
- ent
->ts1
;
629 op
= be16_to_cpu(((struct mlx5_inbox_hdr
*)in
->first
.data
)->opcode
);
630 if (op
< ARRAY_SIZE(cmd
->stats
)) {
631 stats
= &cmd
->stats
[op
];
632 spin_lock_irq(&stats
->lock
);
635 spin_unlock_irq(&stats
->lock
);
637 mlx5_core_dbg_mask(dev
, 1 << MLX5_CMD_TIME
,
638 "fw exec time for %s is %lld nsec\n",
639 mlx5_command_str(op
), ds
);
640 *status
= ent
->status
;
652 static ssize_t
dbg_write(struct file
*filp
, const char __user
*buf
,
653 size_t count
, loff_t
*pos
)
655 struct mlx5_core_dev
*dev
= filp
->private_data
;
656 struct mlx5_cmd_debug
*dbg
= &dev
->cmd
.dbg
;
660 if (!dbg
->in_msg
|| !dbg
->out_msg
)
663 if (copy_from_user(lbuf
, buf
, sizeof(lbuf
)))
666 lbuf
[sizeof(lbuf
) - 1] = 0;
668 if (strcmp(lbuf
, "go"))
671 err
= mlx5_cmd_exec(dev
, dbg
->in_msg
, dbg
->inlen
, dbg
->out_msg
, dbg
->outlen
);
673 return err
? err
: count
;
677 static const struct file_operations fops
= {
678 .owner
= THIS_MODULE
,
683 static int mlx5_copy_to_msg(struct mlx5_cmd_msg
*to
, void *from
, int size
)
685 struct mlx5_cmd_prot_block
*block
;
686 struct mlx5_cmd_mailbox
*next
;
692 copy
= min_t(int, size
, sizeof(to
->first
.data
));
693 memcpy(to
->first
.data
, from
, copy
);
704 copy
= min_t(int, size
, MLX5_CMD_DATA_BLOCK_SIZE
);
706 memcpy(block
->data
, from
, copy
);
715 static int mlx5_copy_from_msg(void *to
, struct mlx5_cmd_msg
*from
, int size
)
717 struct mlx5_cmd_prot_block
*block
;
718 struct mlx5_cmd_mailbox
*next
;
724 copy
= min_t(int, size
, sizeof(from
->first
.data
));
725 memcpy(to
, from
->first
.data
, copy
);
736 copy
= min_t(int, size
, MLX5_CMD_DATA_BLOCK_SIZE
);
739 memcpy(to
, block
->data
, copy
);
748 static struct mlx5_cmd_mailbox
*alloc_cmd_box(struct mlx5_core_dev
*dev
,
751 struct mlx5_cmd_mailbox
*mailbox
;
753 mailbox
= kmalloc(sizeof(*mailbox
), flags
);
755 return ERR_PTR(-ENOMEM
);
757 mailbox
->buf
= pci_pool_alloc(dev
->cmd
.pool
, flags
,
760 mlx5_core_dbg(dev
, "failed allocation\n");
762 return ERR_PTR(-ENOMEM
);
764 memset(mailbox
->buf
, 0, sizeof(struct mlx5_cmd_prot_block
));
765 mailbox
->next
= NULL
;
770 static void free_cmd_box(struct mlx5_core_dev
*dev
,
771 struct mlx5_cmd_mailbox
*mailbox
)
773 pci_pool_free(dev
->cmd
.pool
, mailbox
->buf
, mailbox
->dma
);
777 static struct mlx5_cmd_msg
*mlx5_alloc_cmd_msg(struct mlx5_core_dev
*dev
,
778 gfp_t flags
, int size
)
780 struct mlx5_cmd_mailbox
*tmp
, *head
= NULL
;
781 struct mlx5_cmd_prot_block
*block
;
782 struct mlx5_cmd_msg
*msg
;
788 msg
= kzalloc(sizeof(*msg
), flags
);
790 return ERR_PTR(-ENOMEM
);
792 blen
= size
- min_t(int, sizeof(msg
->first
.data
), size
);
793 n
= (blen
+ MLX5_CMD_DATA_BLOCK_SIZE
- 1) / MLX5_CMD_DATA_BLOCK_SIZE
;
795 for (i
= 0; i
< n
; i
++) {
796 tmp
= alloc_cmd_box(dev
, flags
);
798 mlx5_core_warn(dev
, "failed allocating block\n");
805 block
->next
= cpu_to_be64(tmp
->next
? tmp
->next
->dma
: 0);
806 block
->block_num
= cpu_to_be32(n
- i
- 1);
816 free_cmd_box(dev
, head
);
824 static void mlx5_free_cmd_msg(struct mlx5_core_dev
*dev
,
825 struct mlx5_cmd_msg
*msg
)
827 struct mlx5_cmd_mailbox
*head
= msg
->next
;
828 struct mlx5_cmd_mailbox
*next
;
832 free_cmd_box(dev
, head
);
838 static ssize_t
data_write(struct file
*filp
, const char __user
*buf
,
839 size_t count
, loff_t
*pos
)
841 struct mlx5_core_dev
*dev
= filp
->private_data
;
842 struct mlx5_cmd_debug
*dbg
= &dev
->cmd
.dbg
;
853 ptr
= kzalloc(count
, GFP_KERNEL
);
857 if (copy_from_user(ptr
, buf
, count
)) {
873 static ssize_t
data_read(struct file
*filp
, char __user
*buf
, size_t count
,
876 struct mlx5_core_dev
*dev
= filp
->private_data
;
877 struct mlx5_cmd_debug
*dbg
= &dev
->cmd
.dbg
;
886 copy
= min_t(int, count
, dbg
->outlen
);
887 if (copy_to_user(buf
, dbg
->out_msg
, copy
))
895 static const struct file_operations dfops
= {
896 .owner
= THIS_MODULE
,
902 static ssize_t
outlen_read(struct file
*filp
, char __user
*buf
, size_t count
,
905 struct mlx5_core_dev
*dev
= filp
->private_data
;
906 struct mlx5_cmd_debug
*dbg
= &dev
->cmd
.dbg
;
913 err
= snprintf(outlen
, sizeof(outlen
), "%d", dbg
->outlen
);
917 if (copy_to_user(buf
, &outlen
, err
))
925 static ssize_t
outlen_write(struct file
*filp
, const char __user
*buf
,
926 size_t count
, loff_t
*pos
)
928 struct mlx5_core_dev
*dev
= filp
->private_data
;
929 struct mlx5_cmd_debug
*dbg
= &dev
->cmd
.dbg
;
935 if (*pos
!= 0 || count
> 6)
942 if (copy_from_user(outlen_str
, buf
, count
))
947 err
= sscanf(outlen_str
, "%d", &outlen
);
951 ptr
= kzalloc(outlen
, GFP_KERNEL
);
956 dbg
->outlen
= outlen
;
963 static const struct file_operations olfops
= {
964 .owner
= THIS_MODULE
,
966 .write
= outlen_write
,
970 static void set_wqname(struct mlx5_core_dev
*dev
)
972 struct mlx5_cmd
*cmd
= &dev
->cmd
;
974 snprintf(cmd
->wq_name
, sizeof(cmd
->wq_name
), "mlx5_cmd_%s",
975 dev_name(&dev
->pdev
->dev
));
978 static void clean_debug_files(struct mlx5_core_dev
*dev
)
980 struct mlx5_cmd_debug
*dbg
= &dev
->cmd
.dbg
;
982 if (!mlx5_debugfs_root
)
985 mlx5_cmdif_debugfs_cleanup(dev
);
986 debugfs_remove_recursive(dbg
->dbg_root
);
989 static int create_debugfs_files(struct mlx5_core_dev
*dev
)
991 struct mlx5_cmd_debug
*dbg
= &dev
->cmd
.dbg
;
994 if (!mlx5_debugfs_root
)
997 dbg
->dbg_root
= debugfs_create_dir("cmd", dev
->priv
.dbg_root
);
1001 dbg
->dbg_in
= debugfs_create_file("in", 0400, dbg
->dbg_root
,
1006 dbg
->dbg_out
= debugfs_create_file("out", 0200, dbg
->dbg_root
,
1011 dbg
->dbg_outlen
= debugfs_create_file("out_len", 0600, dbg
->dbg_root
,
1013 if (!dbg
->dbg_outlen
)
1016 dbg
->dbg_status
= debugfs_create_u8("status", 0600, dbg
->dbg_root
,
1018 if (!dbg
->dbg_status
)
1021 dbg
->dbg_run
= debugfs_create_file("run", 0200, dbg
->dbg_root
, dev
, &fops
);
1025 mlx5_cmdif_debugfs_init(dev
);
1030 clean_debug_files(dev
);
1034 void mlx5_cmd_use_events(struct mlx5_core_dev
*dev
)
1036 struct mlx5_cmd
*cmd
= &dev
->cmd
;
1039 for (i
= 0; i
< cmd
->max_reg_cmds
; i
++)
1042 down(&cmd
->pages_sem
);
1044 flush_workqueue(cmd
->wq
);
1046 cmd
->mode
= CMD_MODE_EVENTS
;
1048 up(&cmd
->pages_sem
);
1049 for (i
= 0; i
< cmd
->max_reg_cmds
; i
++)
1053 void mlx5_cmd_use_polling(struct mlx5_core_dev
*dev
)
1055 struct mlx5_cmd
*cmd
= &dev
->cmd
;
1058 for (i
= 0; i
< cmd
->max_reg_cmds
; i
++)
1061 down(&cmd
->pages_sem
);
1063 flush_workqueue(cmd
->wq
);
1064 cmd
->mode
= CMD_MODE_POLLING
;
1066 up(&cmd
->pages_sem
);
1067 for (i
= 0; i
< cmd
->max_reg_cmds
; i
++)
1071 static void free_msg(struct mlx5_core_dev
*dev
, struct mlx5_cmd_msg
*msg
)
1073 unsigned long flags
;
1076 spin_lock_irqsave(&msg
->cache
->lock
, flags
);
1077 list_add_tail(&msg
->list
, &msg
->cache
->head
);
1078 spin_unlock_irqrestore(&msg
->cache
->lock
, flags
);
1080 mlx5_free_cmd_msg(dev
, msg
);
1084 void mlx5_cmd_comp_handler(struct mlx5_core_dev
*dev
, unsigned long vector
)
1086 struct mlx5_cmd
*cmd
= &dev
->cmd
;
1087 struct mlx5_cmd_work_ent
*ent
;
1088 mlx5_cmd_cbk_t callback
;
1093 struct mlx5_cmd_stats
*stats
;
1094 unsigned long flags
;
1096 for (i
= 0; i
< (1 << cmd
->log_sz
); i
++) {
1097 if (test_bit(i
, &vector
)) {
1098 struct semaphore
*sem
;
1100 ent
= cmd
->ent_arr
[i
];
1101 if (ent
->page_queue
)
1102 sem
= &cmd
->pages_sem
;
1105 ent
->ts2
= ktime_get_ns();
1106 memcpy(ent
->out
->first
.data
, ent
->lay
->out
, sizeof(ent
->lay
->out
));
1107 dump_command(dev
, ent
, 0);
1109 if (!cmd
->checksum_disabled
)
1110 ent
->ret
= verify_signature(ent
);
1113 ent
->status
= ent
->lay
->status_own
>> 1;
1114 mlx5_core_dbg(dev
, "command completed. ret 0x%x, delivery status %s(0x%x)\n",
1115 ent
->ret
, deliv_status_to_str(ent
->status
), ent
->status
);
1117 free_ent(cmd
, ent
->idx
);
1118 if (ent
->callback
) {
1119 ds
= ent
->ts2
- ent
->ts1
;
1120 if (ent
->op
< ARRAY_SIZE(cmd
->stats
)) {
1121 stats
= &cmd
->stats
[ent
->op
];
1122 spin_lock_irqsave(&stats
->lock
, flags
);
1125 spin_unlock_irqrestore(&stats
->lock
, flags
);
1128 callback
= ent
->callback
;
1129 context
= ent
->context
;
1132 err
= mlx5_copy_from_msg(ent
->uout
,
1136 mlx5_free_cmd_msg(dev
, ent
->out
);
1137 free_msg(dev
, ent
->in
);
1140 callback(err
, context
);
1142 complete(&ent
->done
);
1148 EXPORT_SYMBOL(mlx5_cmd_comp_handler
);
1150 static int status_to_err(u8 status
)
1152 return status
? -1 : 0; /* TBD more meaningful codes */
1155 static struct mlx5_cmd_msg
*alloc_msg(struct mlx5_core_dev
*dev
, int in_size
,
1158 struct mlx5_cmd_msg
*msg
= ERR_PTR(-ENOMEM
);
1159 struct mlx5_cmd
*cmd
= &dev
->cmd
;
1160 struct cache_ent
*ent
= NULL
;
1162 if (in_size
> MED_LIST_SIZE
&& in_size
<= LONG_LIST_SIZE
)
1163 ent
= &cmd
->cache
.large
;
1164 else if (in_size
> 16 && in_size
<= MED_LIST_SIZE
)
1165 ent
= &cmd
->cache
.med
;
1168 spin_lock_irq(&ent
->lock
);
1169 if (!list_empty(&ent
->head
)) {
1170 msg
= list_entry(ent
->head
.next
, typeof(*msg
), list
);
1171 /* For cached lists, we must explicitly state what is
1175 list_del(&msg
->list
);
1177 spin_unlock_irq(&ent
->lock
);
1181 msg
= mlx5_alloc_cmd_msg(dev
, gfp
, in_size
);
1186 static int is_manage_pages(struct mlx5_inbox_hdr
*in
)
1188 return be16_to_cpu(in
->opcode
) == MLX5_CMD_OP_MANAGE_PAGES
;
1191 static int cmd_exec(struct mlx5_core_dev
*dev
, void *in
, int in_size
, void *out
,
1192 int out_size
, mlx5_cmd_cbk_t callback
, void *context
)
1194 struct mlx5_cmd_msg
*inb
;
1195 struct mlx5_cmd_msg
*outb
;
1201 pages_queue
= is_manage_pages(in
);
1202 gfp
= callback
? GFP_ATOMIC
: GFP_KERNEL
;
1204 inb
= alloc_msg(dev
, in_size
, gfp
);
1210 err
= mlx5_copy_to_msg(inb
, in
, in_size
);
1212 mlx5_core_warn(dev
, "err %d\n", err
);
1216 outb
= mlx5_alloc_cmd_msg(dev
, gfp
, out_size
);
1218 err
= PTR_ERR(outb
);
1222 err
= mlx5_cmd_invoke(dev
, inb
, outb
, out
, out_size
, callback
, context
,
1223 pages_queue
, &status
);
1227 mlx5_core_dbg(dev
, "err %d, status %d\n", err
, status
);
1229 err
= status_to_err(status
);
1234 err
= mlx5_copy_from_msg(out
, outb
, out_size
);
1238 mlx5_free_cmd_msg(dev
, outb
);
1246 int mlx5_cmd_exec(struct mlx5_core_dev
*dev
, void *in
, int in_size
, void *out
,
1249 return cmd_exec(dev
, in
, in_size
, out
, out_size
, NULL
, NULL
);
1251 EXPORT_SYMBOL(mlx5_cmd_exec
);
1253 int mlx5_cmd_exec_cb(struct mlx5_core_dev
*dev
, void *in
, int in_size
,
1254 void *out
, int out_size
, mlx5_cmd_cbk_t callback
,
1257 return cmd_exec(dev
, in
, in_size
, out
, out_size
, callback
, context
);
1259 EXPORT_SYMBOL(mlx5_cmd_exec_cb
);
1261 static void destroy_msg_cache(struct mlx5_core_dev
*dev
)
1263 struct mlx5_cmd
*cmd
= &dev
->cmd
;
1264 struct mlx5_cmd_msg
*msg
;
1265 struct mlx5_cmd_msg
*n
;
1267 list_for_each_entry_safe(msg
, n
, &cmd
->cache
.large
.head
, list
) {
1268 list_del(&msg
->list
);
1269 mlx5_free_cmd_msg(dev
, msg
);
1272 list_for_each_entry_safe(msg
, n
, &cmd
->cache
.med
.head
, list
) {
1273 list_del(&msg
->list
);
1274 mlx5_free_cmd_msg(dev
, msg
);
1278 static int create_msg_cache(struct mlx5_core_dev
*dev
)
1280 struct mlx5_cmd
*cmd
= &dev
->cmd
;
1281 struct mlx5_cmd_msg
*msg
;
1285 spin_lock_init(&cmd
->cache
.large
.lock
);
1286 INIT_LIST_HEAD(&cmd
->cache
.large
.head
);
1287 spin_lock_init(&cmd
->cache
.med
.lock
);
1288 INIT_LIST_HEAD(&cmd
->cache
.med
.head
);
1290 for (i
= 0; i
< NUM_LONG_LISTS
; i
++) {
1291 msg
= mlx5_alloc_cmd_msg(dev
, GFP_KERNEL
, LONG_LIST_SIZE
);
1296 msg
->cache
= &cmd
->cache
.large
;
1297 list_add_tail(&msg
->list
, &cmd
->cache
.large
.head
);
1300 for (i
= 0; i
< NUM_MED_LISTS
; i
++) {
1301 msg
= mlx5_alloc_cmd_msg(dev
, GFP_KERNEL
, MED_LIST_SIZE
);
1306 msg
->cache
= &cmd
->cache
.med
;
1307 list_add_tail(&msg
->list
, &cmd
->cache
.med
.head
);
1313 destroy_msg_cache(dev
);
1317 static int alloc_cmd_page(struct mlx5_core_dev
*dev
, struct mlx5_cmd
*cmd
)
1319 struct device
*ddev
= &dev
->pdev
->dev
;
1321 cmd
->cmd_alloc_buf
= dma_zalloc_coherent(ddev
, MLX5_ADAPTER_PAGE_SIZE
,
1322 &cmd
->alloc_dma
, GFP_KERNEL
);
1323 if (!cmd
->cmd_alloc_buf
)
1326 /* make sure it is aligned to 4K */
1327 if (!((uintptr_t)cmd
->cmd_alloc_buf
& (MLX5_ADAPTER_PAGE_SIZE
- 1))) {
1328 cmd
->cmd_buf
= cmd
->cmd_alloc_buf
;
1329 cmd
->dma
= cmd
->alloc_dma
;
1330 cmd
->alloc_size
= MLX5_ADAPTER_PAGE_SIZE
;
1334 dma_free_coherent(ddev
, MLX5_ADAPTER_PAGE_SIZE
, cmd
->cmd_alloc_buf
,
1336 cmd
->cmd_alloc_buf
= dma_zalloc_coherent(ddev
,
1337 2 * MLX5_ADAPTER_PAGE_SIZE
- 1,
1338 &cmd
->alloc_dma
, GFP_KERNEL
);
1339 if (!cmd
->cmd_alloc_buf
)
1342 cmd
->cmd_buf
= PTR_ALIGN(cmd
->cmd_alloc_buf
, MLX5_ADAPTER_PAGE_SIZE
);
1343 cmd
->dma
= ALIGN(cmd
->alloc_dma
, MLX5_ADAPTER_PAGE_SIZE
);
1344 cmd
->alloc_size
= 2 * MLX5_ADAPTER_PAGE_SIZE
- 1;
1348 static void free_cmd_page(struct mlx5_core_dev
*dev
, struct mlx5_cmd
*cmd
)
1350 struct device
*ddev
= &dev
->pdev
->dev
;
1352 dma_free_coherent(ddev
, cmd
->alloc_size
, cmd
->cmd_alloc_buf
,
1356 int mlx5_cmd_init(struct mlx5_core_dev
*dev
)
1358 int size
= sizeof(struct mlx5_cmd_prot_block
);
1359 int align
= roundup_pow_of_two(size
);
1360 struct mlx5_cmd
*cmd
= &dev
->cmd
;
1366 cmd_if_rev
= cmdif_rev(dev
);
1367 if (cmd_if_rev
!= CMD_IF_REV
) {
1368 dev_err(&dev
->pdev
->dev
,
1369 "Driver cmdif rev(%d) differs from firmware's(%d)\n",
1370 CMD_IF_REV
, cmd_if_rev
);
1374 cmd
->pool
= pci_pool_create("mlx5_cmd", dev
->pdev
, size
, align
, 0);
1378 err
= alloc_cmd_page(dev
, cmd
);
1382 cmd_l
= ioread32be(&dev
->iseg
->cmdq_addr_l_sz
) & 0xff;
1383 cmd
->log_sz
= cmd_l
>> 4 & 0xf;
1384 cmd
->log_stride
= cmd_l
& 0xf;
1385 if (1 << cmd
->log_sz
> MLX5_MAX_COMMANDS
) {
1386 dev_err(&dev
->pdev
->dev
, "firmware reports too many outstanding commands %d\n",
1392 if (cmd
->log_sz
+ cmd
->log_stride
> MLX5_ADAPTER_PAGE_SHIFT
) {
1393 dev_err(&dev
->pdev
->dev
, "command queue size overflow\n");
1398 cmd
->checksum_disabled
= 1;
1399 cmd
->max_reg_cmds
= (1 << cmd
->log_sz
) - 1;
1400 cmd
->bitmask
= (1 << cmd
->max_reg_cmds
) - 1;
1402 cmd
->cmdif_rev
= ioread32be(&dev
->iseg
->cmdif_rev_fw_sub
) >> 16;
1403 if (cmd
->cmdif_rev
> CMD_IF_REV
) {
1404 dev_err(&dev
->pdev
->dev
, "driver does not support command interface version. driver %d, firmware %d\n",
1405 CMD_IF_REV
, cmd
->cmdif_rev
);
1410 spin_lock_init(&cmd
->alloc_lock
);
1411 spin_lock_init(&cmd
->token_lock
);
1412 for (i
= 0; i
< ARRAY_SIZE(cmd
->stats
); i
++)
1413 spin_lock_init(&cmd
->stats
[i
].lock
);
1415 sema_init(&cmd
->sem
, cmd
->max_reg_cmds
);
1416 sema_init(&cmd
->pages_sem
, 1);
1418 cmd_h
= (u32
)((u64
)(cmd
->dma
) >> 32);
1419 cmd_l
= (u32
)(cmd
->dma
);
1420 if (cmd_l
& 0xfff) {
1421 dev_err(&dev
->pdev
->dev
, "invalid command queue address\n");
1426 iowrite32be(cmd_h
, &dev
->iseg
->cmdq_addr_h
);
1427 iowrite32be(cmd_l
, &dev
->iseg
->cmdq_addr_l_sz
);
1429 /* Make sure firmware sees the complete address before we proceed */
1432 mlx5_core_dbg(dev
, "descriptor at dma 0x%llx\n", (unsigned long long)(cmd
->dma
));
1434 cmd
->mode
= CMD_MODE_POLLING
;
1436 err
= create_msg_cache(dev
);
1438 dev_err(&dev
->pdev
->dev
, "failed to create command cache\n");
1443 cmd
->wq
= create_singlethread_workqueue(cmd
->wq_name
);
1445 dev_err(&dev
->pdev
->dev
, "failed to create command workqueue\n");
1450 err
= create_debugfs_files(dev
);
1459 destroy_workqueue(cmd
->wq
);
1462 destroy_msg_cache(dev
);
1465 free_cmd_page(dev
, cmd
);
1468 pci_pool_destroy(cmd
->pool
);
1472 EXPORT_SYMBOL(mlx5_cmd_init
);
1474 void mlx5_cmd_cleanup(struct mlx5_core_dev
*dev
)
1476 struct mlx5_cmd
*cmd
= &dev
->cmd
;
1478 clean_debug_files(dev
);
1479 destroy_workqueue(cmd
->wq
);
1480 destroy_msg_cache(dev
);
1481 free_cmd_page(dev
, cmd
);
1482 pci_pool_destroy(cmd
->pool
);
1484 EXPORT_SYMBOL(mlx5_cmd_cleanup
);
1486 static const char *cmd_status_str(u8 status
)
1489 case MLX5_CMD_STAT_OK
:
1491 case MLX5_CMD_STAT_INT_ERR
:
1492 return "internal error";
1493 case MLX5_CMD_STAT_BAD_OP_ERR
:
1494 return "bad operation";
1495 case MLX5_CMD_STAT_BAD_PARAM_ERR
:
1496 return "bad parameter";
1497 case MLX5_CMD_STAT_BAD_SYS_STATE_ERR
:
1498 return "bad system state";
1499 case MLX5_CMD_STAT_BAD_RES_ERR
:
1500 return "bad resource";
1501 case MLX5_CMD_STAT_RES_BUSY
:
1502 return "resource busy";
1503 case MLX5_CMD_STAT_LIM_ERR
:
1504 return "limits exceeded";
1505 case MLX5_CMD_STAT_BAD_RES_STATE_ERR
:
1506 return "bad resource state";
1507 case MLX5_CMD_STAT_IX_ERR
:
1509 case MLX5_CMD_STAT_NO_RES_ERR
:
1510 return "no resources";
1511 case MLX5_CMD_STAT_BAD_INP_LEN_ERR
:
1512 return "bad input length";
1513 case MLX5_CMD_STAT_BAD_OUTP_LEN_ERR
:
1514 return "bad output length";
1515 case MLX5_CMD_STAT_BAD_QP_STATE_ERR
:
1516 return "bad QP state";
1517 case MLX5_CMD_STAT_BAD_PKT_ERR
:
1518 return "bad packet (discarded)";
1519 case MLX5_CMD_STAT_BAD_SIZE_OUTS_CQES_ERR
:
1520 return "bad size too many outstanding CQEs";
1522 return "unknown status";
1526 static int cmd_status_to_err(u8 status
)
1529 case MLX5_CMD_STAT_OK
: return 0;
1530 case MLX5_CMD_STAT_INT_ERR
: return -EIO
;
1531 case MLX5_CMD_STAT_BAD_OP_ERR
: return -EINVAL
;
1532 case MLX5_CMD_STAT_BAD_PARAM_ERR
: return -EINVAL
;
1533 case MLX5_CMD_STAT_BAD_SYS_STATE_ERR
: return -EIO
;
1534 case MLX5_CMD_STAT_BAD_RES_ERR
: return -EINVAL
;
1535 case MLX5_CMD_STAT_RES_BUSY
: return -EBUSY
;
1536 case MLX5_CMD_STAT_LIM_ERR
: return -ENOMEM
;
1537 case MLX5_CMD_STAT_BAD_RES_STATE_ERR
: return -EINVAL
;
1538 case MLX5_CMD_STAT_IX_ERR
: return -EINVAL
;
1539 case MLX5_CMD_STAT_NO_RES_ERR
: return -EAGAIN
;
1540 case MLX5_CMD_STAT_BAD_INP_LEN_ERR
: return -EIO
;
1541 case MLX5_CMD_STAT_BAD_OUTP_LEN_ERR
: return -EIO
;
1542 case MLX5_CMD_STAT_BAD_QP_STATE_ERR
: return -EINVAL
;
1543 case MLX5_CMD_STAT_BAD_PKT_ERR
: return -EINVAL
;
1544 case MLX5_CMD_STAT_BAD_SIZE_OUTS_CQES_ERR
: return -EINVAL
;
1545 default: return -EIO
;
1549 /* this will be available till all the commands use set/get macros */
1550 int mlx5_cmd_status_to_err(struct mlx5_outbox_hdr
*hdr
)
1555 pr_warn("command failed, status %s(0x%x), syndrome 0x%x\n",
1556 cmd_status_str(hdr
->status
), hdr
->status
,
1557 be32_to_cpu(hdr
->syndrome
));
1559 return cmd_status_to_err(hdr
->status
);
1562 int mlx5_cmd_status_to_err_v2(void *ptr
)
1567 status
= be32_to_cpu(*(__be32
*)ptr
) >> 24;
1571 syndrome
= be32_to_cpu(*(__be32
*)(ptr
+ 4));
1573 pr_warn("command failed, status %s(0x%x), syndrome 0x%x\n",
1574 cmd_status_str(status
), status
, syndrome
);
1576 return cmd_status_to_err(status
);