2 * Copyright (c) 2004, 2005 Topspin Communications. All rights reserved.
3 * Copyright (c) 2005, 2006, 2007, 2008 Mellanox Technologies. All rights reserved.
4 * Copyright (c) 2005, 2006, 2007 Cisco Systems, Inc. All rights reserved.
6 * This software is available to you under a choice of one of two
7 * licenses. You may choose to be licensed under the terms of the GNU
8 * General Public License (GPL) Version 2, available from the file
9 * COPYING in the main directory of this source tree, or the
10 * OpenIB.org BSD license below:
12 * Redistribution and use in source and binary forms, with or
13 * without modification, are permitted provided that the following
16 * - Redistributions of source code must retain the above
17 * copyright notice, this list of conditions and the following
20 * - Redistributions in binary form must reproduce the above
21 * copyright notice, this list of conditions and the following
22 * disclaimer in the documentation and/or other materials
23 * provided with the distribution.
25 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
26 * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
27 * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
28 * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
29 * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
30 * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
31 * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
35 #include <linux/sched.h>
36 #include <linux/slab.h>
37 #include <linux/export.h>
38 #include <linux/pci.h>
39 #include <linux/errno.h>
41 #include <linux/mlx4/cmd.h>
42 #include <linux/mlx4/device.h>
43 #include <linux/semaphore.h>
44 #include <rdma/ib_smi.h>
45 #include <linux/delay.h>
53 #define CMD_POLL_TOKEN 0xffff
54 #define INBOX_MASK 0xffffffffffffff00ULL
56 #define CMD_CHAN_VER 1
57 #define CMD_CHAN_IF_REV 1
60 /* command completed successfully: */
62 /* Internal error (such as a bus error) occurred while processing command: */
63 CMD_STAT_INTERNAL_ERR
= 0x01,
64 /* Operation/command not supported or opcode modifier not supported: */
65 CMD_STAT_BAD_OP
= 0x02,
66 /* Parameter not supported or parameter out of range: */
67 CMD_STAT_BAD_PARAM
= 0x03,
68 /* System not enabled or bad system state: */
69 CMD_STAT_BAD_SYS_STATE
= 0x04,
70 /* Attempt to access reserved or unallocaterd resource: */
71 CMD_STAT_BAD_RESOURCE
= 0x05,
72 /* Requested resource is currently executing a command, or is otherwise busy: */
73 CMD_STAT_RESOURCE_BUSY
= 0x06,
74 /* Required capability exceeds device limits: */
75 CMD_STAT_EXCEED_LIM
= 0x08,
76 /* Resource is not in the appropriate state or ownership: */
77 CMD_STAT_BAD_RES_STATE
= 0x09,
78 /* Index out of range: */
79 CMD_STAT_BAD_INDEX
= 0x0a,
80 /* FW image corrupted: */
81 CMD_STAT_BAD_NVMEM
= 0x0b,
82 /* Error in ICM mapping (e.g. not enough auxiliary ICM pages to execute command): */
83 CMD_STAT_ICM_ERROR
= 0x0c,
84 /* Attempt to modify a QP/EE which is not in the presumed state: */
85 CMD_STAT_BAD_QP_STATE
= 0x10,
86 /* Bad segment parameters (Address/Size): */
87 CMD_STAT_BAD_SEG_PARAM
= 0x20,
88 /* Memory Region has Memory Windows bound to: */
89 CMD_STAT_REG_BOUND
= 0x21,
90 /* HCA local attached memory not present: */
91 CMD_STAT_LAM_NOT_PRE
= 0x22,
92 /* Bad management packet (silently discarded): */
93 CMD_STAT_BAD_PKT
= 0x30,
94 /* More outstanding CQEs in CQ than new CQ size: */
95 CMD_STAT_BAD_SIZE
= 0x40,
96 /* Multi Function device support required: */
97 CMD_STAT_MULTI_FUNC_REQ
= 0x50,
101 HCR_IN_PARAM_OFFSET
= 0x00,
102 HCR_IN_MODIFIER_OFFSET
= 0x08,
103 HCR_OUT_PARAM_OFFSET
= 0x0c,
104 HCR_TOKEN_OFFSET
= 0x14,
105 HCR_STATUS_OFFSET
= 0x18,
107 HCR_OPMOD_SHIFT
= 12,
114 GO_BIT_TIMEOUT_MSECS
= 10000
117 enum mlx4_vlan_transition
{
118 MLX4_VLAN_TRANSITION_VST_VST
= 0,
119 MLX4_VLAN_TRANSITION_VST_VGT
= 1,
120 MLX4_VLAN_TRANSITION_VGT_VST
= 2,
121 MLX4_VLAN_TRANSITION_VGT_VGT
= 3,
125 struct mlx4_cmd_context
{
126 struct completion done
;
134 static int mlx4_master_process_vhcr(struct mlx4_dev
*dev
, int slave
,
135 struct mlx4_vhcr_cmd
*in_vhcr
);
137 static int mlx4_status_to_errno(u8 status
)
139 static const int trans_table
[] = {
140 [CMD_STAT_INTERNAL_ERR
] = -EIO
,
141 [CMD_STAT_BAD_OP
] = -EPERM
,
142 [CMD_STAT_BAD_PARAM
] = -EINVAL
,
143 [CMD_STAT_BAD_SYS_STATE
] = -ENXIO
,
144 [CMD_STAT_BAD_RESOURCE
] = -EBADF
,
145 [CMD_STAT_RESOURCE_BUSY
] = -EBUSY
,
146 [CMD_STAT_EXCEED_LIM
] = -ENOMEM
,
147 [CMD_STAT_BAD_RES_STATE
] = -EBADF
,
148 [CMD_STAT_BAD_INDEX
] = -EBADF
,
149 [CMD_STAT_BAD_NVMEM
] = -EFAULT
,
150 [CMD_STAT_ICM_ERROR
] = -ENFILE
,
151 [CMD_STAT_BAD_QP_STATE
] = -EINVAL
,
152 [CMD_STAT_BAD_SEG_PARAM
] = -EFAULT
,
153 [CMD_STAT_REG_BOUND
] = -EBUSY
,
154 [CMD_STAT_LAM_NOT_PRE
] = -EAGAIN
,
155 [CMD_STAT_BAD_PKT
] = -EINVAL
,
156 [CMD_STAT_BAD_SIZE
] = -ENOMEM
,
157 [CMD_STAT_MULTI_FUNC_REQ
] = -EACCES
,
160 if (status
>= ARRAY_SIZE(trans_table
) ||
161 (status
!= CMD_STAT_OK
&& trans_table
[status
] == 0))
164 return trans_table
[status
];
167 static u8
mlx4_errno_to_status(int errno
)
171 return CMD_STAT_BAD_OP
;
173 return CMD_STAT_BAD_PARAM
;
175 return CMD_STAT_BAD_SYS_STATE
;
177 return CMD_STAT_RESOURCE_BUSY
;
179 return CMD_STAT_EXCEED_LIM
;
181 return CMD_STAT_ICM_ERROR
;
183 return CMD_STAT_INTERNAL_ERR
;
187 static int mlx4_internal_err_ret_value(struct mlx4_dev
*dev
, u16 op
,
191 case MLX4_CMD_UNMAP_ICM
:
192 case MLX4_CMD_UNMAP_ICM_AUX
:
193 case MLX4_CMD_UNMAP_FA
:
194 case MLX4_CMD_2RST_QP
:
195 case MLX4_CMD_HW2SW_EQ
:
196 case MLX4_CMD_HW2SW_CQ
:
197 case MLX4_CMD_HW2SW_SRQ
:
198 case MLX4_CMD_HW2SW_MPT
:
199 case MLX4_CMD_CLOSE_HCA
:
200 case MLX4_QP_FLOW_STEERING_DETACH
:
201 case MLX4_CMD_FREE_RES
:
202 case MLX4_CMD_CLOSE_PORT
:
205 case MLX4_CMD_QP_ATTACH
:
206 /* On Detach case return success */
207 if (op_modifier
== 0)
209 return mlx4_status_to_errno(CMD_STAT_INTERNAL_ERR
);
212 return mlx4_status_to_errno(CMD_STAT_INTERNAL_ERR
);
216 static int mlx4_closing_cmd_fatal_error(u16 op
, u8 fw_status
)
218 /* Any error during the closing commands below is considered fatal */
219 if (op
== MLX4_CMD_CLOSE_HCA
||
220 op
== MLX4_CMD_HW2SW_EQ
||
221 op
== MLX4_CMD_HW2SW_CQ
||
222 op
== MLX4_CMD_2RST_QP
||
223 op
== MLX4_CMD_HW2SW_SRQ
||
224 op
== MLX4_CMD_SYNC_TPT
||
225 op
== MLX4_CMD_UNMAP_ICM
||
226 op
== MLX4_CMD_UNMAP_ICM_AUX
||
227 op
== MLX4_CMD_UNMAP_FA
)
229 /* Error on MLX4_CMD_HW2SW_MPT is fatal except when fw status equals
230 * CMD_STAT_REG_BOUND.
231 * This status indicates that memory region has memory windows bound to it
232 * which may result from invalid user space usage and is not fatal.
234 if (op
== MLX4_CMD_HW2SW_MPT
&& fw_status
!= CMD_STAT_REG_BOUND
)
239 static int mlx4_cmd_reset_flow(struct mlx4_dev
*dev
, u16 op
, u8 op_modifier
,
242 /* Only if reset flow is really active return code is based on
243 * command, otherwise current error code is returned.
245 if (mlx4_internal_err_reset
) {
246 mlx4_enter_error_state(dev
->persist
);
247 err
= mlx4_internal_err_ret_value(dev
, op
, op_modifier
);
253 static int comm_pending(struct mlx4_dev
*dev
)
255 struct mlx4_priv
*priv
= mlx4_priv(dev
);
256 u32 status
= readl(&priv
->mfunc
.comm
->slave_read
);
258 return (swab32(status
) >> 31) != priv
->cmd
.comm_toggle
;
261 static int mlx4_comm_cmd_post(struct mlx4_dev
*dev
, u8 cmd
, u16 param
)
263 struct mlx4_priv
*priv
= mlx4_priv(dev
);
266 /* To avoid writing to unknown addresses after the device state was
267 * changed to internal error and the function was rest,
268 * check the INTERNAL_ERROR flag which is updated under
269 * device_state_mutex lock.
271 mutex_lock(&dev
->persist
->device_state_mutex
);
273 if (dev
->persist
->state
& MLX4_DEVICE_STATE_INTERNAL_ERROR
) {
274 mutex_unlock(&dev
->persist
->device_state_mutex
);
278 priv
->cmd
.comm_toggle
^= 1;
279 val
= param
| (cmd
<< 16) | (priv
->cmd
.comm_toggle
<< 31);
280 __raw_writel((__force u32
) cpu_to_be32(val
),
281 &priv
->mfunc
.comm
->slave_write
);
283 mutex_unlock(&dev
->persist
->device_state_mutex
);
287 static int mlx4_comm_cmd_poll(struct mlx4_dev
*dev
, u8 cmd
, u16 param
,
288 unsigned long timeout
)
290 struct mlx4_priv
*priv
= mlx4_priv(dev
);
293 int ret_from_pending
= 0;
295 /* First, verify that the master reports correct status */
296 if (comm_pending(dev
)) {
297 mlx4_warn(dev
, "Communication channel is not idle - my toggle is %d (cmd:0x%x)\n",
298 priv
->cmd
.comm_toggle
, cmd
);
303 down(&priv
->cmd
.poll_sem
);
304 if (mlx4_comm_cmd_post(dev
, cmd
, param
)) {
305 /* Only in case the device state is INTERNAL_ERROR,
306 * mlx4_comm_cmd_post returns with an error
308 err
= mlx4_status_to_errno(CMD_STAT_INTERNAL_ERR
);
312 end
= msecs_to_jiffies(timeout
) + jiffies
;
313 while (comm_pending(dev
) && time_before(jiffies
, end
))
315 ret_from_pending
= comm_pending(dev
);
316 if (ret_from_pending
) {
317 /* check if the slave is trying to boot in the middle of
318 * FLR process. The only non-zero result in the RESET command
319 * is MLX4_DELAY_RESET_SLAVE*/
320 if ((MLX4_COMM_CMD_RESET
== cmd
)) {
321 err
= MLX4_DELAY_RESET_SLAVE
;
324 mlx4_warn(dev
, "Communication channel command 0x%x timed out\n",
326 err
= mlx4_status_to_errno(CMD_STAT_INTERNAL_ERR
);
331 mlx4_enter_error_state(dev
->persist
);
333 up(&priv
->cmd
.poll_sem
);
337 static int mlx4_comm_cmd_wait(struct mlx4_dev
*dev
, u8 vhcr_cmd
,
338 u16 param
, u16 op
, unsigned long timeout
)
340 struct mlx4_cmd
*cmd
= &mlx4_priv(dev
)->cmd
;
341 struct mlx4_cmd_context
*context
;
345 down(&cmd
->event_sem
);
347 spin_lock(&cmd
->context_lock
);
348 BUG_ON(cmd
->free_head
< 0);
349 context
= &cmd
->context
[cmd
->free_head
];
350 context
->token
+= cmd
->token_mask
+ 1;
351 cmd
->free_head
= context
->next
;
352 spin_unlock(&cmd
->context_lock
);
354 reinit_completion(&context
->done
);
356 if (mlx4_comm_cmd_post(dev
, vhcr_cmd
, param
)) {
357 /* Only in case the device state is INTERNAL_ERROR,
358 * mlx4_comm_cmd_post returns with an error
360 err
= mlx4_status_to_errno(CMD_STAT_INTERNAL_ERR
);
364 if (!wait_for_completion_timeout(&context
->done
,
365 msecs_to_jiffies(timeout
))) {
366 mlx4_warn(dev
, "communication channel command 0x%x (op=0x%x) timed out\n",
371 err
= context
->result
;
372 if (err
&& context
->fw_status
!= CMD_STAT_MULTI_FUNC_REQ
) {
373 mlx4_err(dev
, "command 0x%x failed: fw status = 0x%x\n",
374 vhcr_cmd
, context
->fw_status
);
375 if (mlx4_closing_cmd_fatal_error(op
, context
->fw_status
))
379 /* wait for comm channel ready
380 * this is necessary for prevention the race
381 * when switching between event to polling mode
382 * Skipping this section in case the device is in FATAL_ERROR state,
383 * In this state, no commands are sent via the comm channel until
384 * the device has returned from reset.
386 if (!(dev
->persist
->state
& MLX4_DEVICE_STATE_INTERNAL_ERROR
)) {
387 end
= msecs_to_jiffies(timeout
) + jiffies
;
388 while (comm_pending(dev
) && time_before(jiffies
, end
))
394 err
= mlx4_status_to_errno(CMD_STAT_INTERNAL_ERR
);
395 mlx4_enter_error_state(dev
->persist
);
397 spin_lock(&cmd
->context_lock
);
398 context
->next
= cmd
->free_head
;
399 cmd
->free_head
= context
- cmd
->context
;
400 spin_unlock(&cmd
->context_lock
);
406 int mlx4_comm_cmd(struct mlx4_dev
*dev
, u8 cmd
, u16 param
,
407 u16 op
, unsigned long timeout
)
409 if (dev
->persist
->state
& MLX4_DEVICE_STATE_INTERNAL_ERROR
)
410 return mlx4_status_to_errno(CMD_STAT_INTERNAL_ERR
);
412 if (mlx4_priv(dev
)->cmd
.use_events
)
413 return mlx4_comm_cmd_wait(dev
, cmd
, param
, op
, timeout
);
414 return mlx4_comm_cmd_poll(dev
, cmd
, param
, timeout
);
417 static int cmd_pending(struct mlx4_dev
*dev
)
421 if (pci_channel_offline(dev
->persist
->pdev
))
424 status
= readl(mlx4_priv(dev
)->cmd
.hcr
+ HCR_STATUS_OFFSET
);
426 return (status
& swab32(1 << HCR_GO_BIT
)) ||
427 (mlx4_priv(dev
)->cmd
.toggle
==
428 !!(status
& swab32(1 << HCR_T_BIT
)));
431 static int mlx4_cmd_post(struct mlx4_dev
*dev
, u64 in_param
, u64 out_param
,
432 u32 in_modifier
, u8 op_modifier
, u16 op
, u16 token
,
435 struct mlx4_cmd
*cmd
= &mlx4_priv(dev
)->cmd
;
436 u32 __iomem
*hcr
= cmd
->hcr
;
440 mutex_lock(&dev
->persist
->device_state_mutex
);
441 /* To avoid writing to unknown addresses after the device state was
442 * changed to internal error and the chip was reset,
443 * check the INTERNAL_ERROR flag which is updated under
444 * device_state_mutex lock.
446 if (pci_channel_offline(dev
->persist
->pdev
) ||
447 (dev
->persist
->state
& MLX4_DEVICE_STATE_INTERNAL_ERROR
)) {
449 * Device is going through error recovery
450 * and cannot accept commands.
457 end
+= msecs_to_jiffies(GO_BIT_TIMEOUT_MSECS
);
459 while (cmd_pending(dev
)) {
460 if (pci_channel_offline(dev
->persist
->pdev
)) {
462 * Device is going through error recovery
463 * and cannot accept commands.
468 if (time_after_eq(jiffies
, end
)) {
469 mlx4_err(dev
, "%s:cmd_pending failed\n", __func__
);
476 * We use writel (instead of something like memcpy_toio)
477 * because writes of less than 32 bits to the HCR don't work
478 * (and some architectures such as ia64 implement memcpy_toio
479 * in terms of writeb).
481 __raw_writel((__force u32
) cpu_to_be32(in_param
>> 32), hcr
+ 0);
482 __raw_writel((__force u32
) cpu_to_be32(in_param
& 0xfffffffful
), hcr
+ 1);
483 __raw_writel((__force u32
) cpu_to_be32(in_modifier
), hcr
+ 2);
484 __raw_writel((__force u32
) cpu_to_be32(out_param
>> 32), hcr
+ 3);
485 __raw_writel((__force u32
) cpu_to_be32(out_param
& 0xfffffffful
), hcr
+ 4);
486 __raw_writel((__force u32
) cpu_to_be32(token
<< 16), hcr
+ 5);
488 /* __raw_writel may not order writes. */
491 __raw_writel((__force u32
) cpu_to_be32((1 << HCR_GO_BIT
) |
492 (cmd
->toggle
<< HCR_T_BIT
) |
493 (event
? (1 << HCR_E_BIT
) : 0) |
494 (op_modifier
<< HCR_OPMOD_SHIFT
) |
498 * Make sure that our HCR writes don't get mixed in with
499 * writes from another CPU starting a FW command.
503 cmd
->toggle
= cmd
->toggle
^ 1;
509 mlx4_warn(dev
, "Could not post command 0x%x: ret=%d, in_param=0x%llx, in_mod=0x%x, op_mod=0x%x\n",
510 op
, ret
, in_param
, in_modifier
, op_modifier
);
511 mutex_unlock(&dev
->persist
->device_state_mutex
);
516 static int mlx4_slave_cmd(struct mlx4_dev
*dev
, u64 in_param
, u64
*out_param
,
517 int out_is_imm
, u32 in_modifier
, u8 op_modifier
,
518 u16 op
, unsigned long timeout
)
520 struct mlx4_priv
*priv
= mlx4_priv(dev
);
521 struct mlx4_vhcr_cmd
*vhcr
= priv
->mfunc
.vhcr
;
524 mutex_lock(&priv
->cmd
.slave_cmd_mutex
);
526 vhcr
->in_param
= cpu_to_be64(in_param
);
527 vhcr
->out_param
= out_param
? cpu_to_be64(*out_param
) : 0;
528 vhcr
->in_modifier
= cpu_to_be32(in_modifier
);
529 vhcr
->opcode
= cpu_to_be16((((u16
) op_modifier
) << 12) | (op
& 0xfff));
530 vhcr
->token
= cpu_to_be16(CMD_POLL_TOKEN
);
532 vhcr
->flags
= !!(priv
->cmd
.use_events
) << 6;
534 if (mlx4_is_master(dev
)) {
535 ret
= mlx4_master_process_vhcr(dev
, dev
->caps
.function
, vhcr
);
540 be64_to_cpu(vhcr
->out_param
);
542 mlx4_err(dev
, "response expected while output mailbox is NULL for command 0x%x\n",
544 vhcr
->status
= CMD_STAT_BAD_PARAM
;
547 ret
= mlx4_status_to_errno(vhcr
->status
);
550 dev
->persist
->state
& MLX4_DEVICE_STATE_INTERNAL_ERROR
)
551 ret
= mlx4_internal_err_ret_value(dev
, op
, op_modifier
);
553 ret
= mlx4_comm_cmd(dev
, MLX4_COMM_CMD_VHCR_POST
, 0, op
,
554 MLX4_COMM_TIME
+ timeout
);
559 be64_to_cpu(vhcr
->out_param
);
561 mlx4_err(dev
, "response expected while output mailbox is NULL for command 0x%x\n",
563 vhcr
->status
= CMD_STAT_BAD_PARAM
;
566 ret
= mlx4_status_to_errno(vhcr
->status
);
568 if (dev
->persist
->state
&
569 MLX4_DEVICE_STATE_INTERNAL_ERROR
)
570 ret
= mlx4_internal_err_ret_value(dev
, op
,
573 mlx4_err(dev
, "failed execution of VHCR_POST command opcode 0x%x\n", op
);
577 mutex_unlock(&priv
->cmd
.slave_cmd_mutex
);
581 static int mlx4_cmd_poll(struct mlx4_dev
*dev
, u64 in_param
, u64
*out_param
,
582 int out_is_imm
, u32 in_modifier
, u8 op_modifier
,
583 u16 op
, unsigned long timeout
)
585 struct mlx4_priv
*priv
= mlx4_priv(dev
);
586 void __iomem
*hcr
= priv
->cmd
.hcr
;
591 down(&priv
->cmd
.poll_sem
);
593 if (dev
->persist
->state
& MLX4_DEVICE_STATE_INTERNAL_ERROR
) {
595 * Device is going through error recovery
596 * and cannot accept commands.
598 err
= mlx4_internal_err_ret_value(dev
, op
, op_modifier
);
602 if (out_is_imm
&& !out_param
) {
603 mlx4_err(dev
, "response expected while output mailbox is NULL for command 0x%x\n",
609 err
= mlx4_cmd_post(dev
, in_param
, out_param
? *out_param
: 0,
610 in_modifier
, op_modifier
, op
, CMD_POLL_TOKEN
, 0);
614 end
= msecs_to_jiffies(timeout
) + jiffies
;
615 while (cmd_pending(dev
) && time_before(jiffies
, end
)) {
616 if (pci_channel_offline(dev
->persist
->pdev
)) {
618 * Device is going through error recovery
619 * and cannot accept commands.
625 if (dev
->persist
->state
& MLX4_DEVICE_STATE_INTERNAL_ERROR
) {
626 err
= mlx4_internal_err_ret_value(dev
, op
, op_modifier
);
633 if (cmd_pending(dev
)) {
634 mlx4_warn(dev
, "command 0x%x timed out (go bit not cleared)\n",
642 (u64
) be32_to_cpu((__force __be32
)
643 __raw_readl(hcr
+ HCR_OUT_PARAM_OFFSET
)) << 32 |
644 (u64
) be32_to_cpu((__force __be32
)
645 __raw_readl(hcr
+ HCR_OUT_PARAM_OFFSET
+ 4));
646 stat
= be32_to_cpu((__force __be32
)
647 __raw_readl(hcr
+ HCR_STATUS_OFFSET
)) >> 24;
648 err
= mlx4_status_to_errno(stat
);
650 mlx4_err(dev
, "command 0x%x failed: fw status = 0x%x\n",
652 if (mlx4_closing_cmd_fatal_error(op
, stat
))
659 err
= mlx4_cmd_reset_flow(dev
, op
, op_modifier
, err
);
661 up(&priv
->cmd
.poll_sem
);
665 void mlx4_cmd_event(struct mlx4_dev
*dev
, u16 token
, u8 status
, u64 out_param
)
667 struct mlx4_priv
*priv
= mlx4_priv(dev
);
668 struct mlx4_cmd_context
*context
=
669 &priv
->cmd
.context
[token
& priv
->cmd
.token_mask
];
671 /* previously timed out command completing at long last */
672 if (token
!= context
->token
)
675 context
->fw_status
= status
;
676 context
->result
= mlx4_status_to_errno(status
);
677 context
->out_param
= out_param
;
679 complete(&context
->done
);
682 static int mlx4_cmd_wait(struct mlx4_dev
*dev
, u64 in_param
, u64
*out_param
,
683 int out_is_imm
, u32 in_modifier
, u8 op_modifier
,
684 u16 op
, unsigned long timeout
)
686 struct mlx4_cmd
*cmd
= &mlx4_priv(dev
)->cmd
;
687 struct mlx4_cmd_context
*context
;
690 down(&cmd
->event_sem
);
692 spin_lock(&cmd
->context_lock
);
693 BUG_ON(cmd
->free_head
< 0);
694 context
= &cmd
->context
[cmd
->free_head
];
695 context
->token
+= cmd
->token_mask
+ 1;
696 cmd
->free_head
= context
->next
;
697 spin_unlock(&cmd
->context_lock
);
699 if (out_is_imm
&& !out_param
) {
700 mlx4_err(dev
, "response expected while output mailbox is NULL for command 0x%x\n",
706 reinit_completion(&context
->done
);
708 err
= mlx4_cmd_post(dev
, in_param
, out_param
? *out_param
: 0,
709 in_modifier
, op_modifier
, op
, context
->token
, 1);
713 if (!wait_for_completion_timeout(&context
->done
,
714 msecs_to_jiffies(timeout
))) {
715 mlx4_warn(dev
, "command 0x%x timed out (go bit not cleared)\n",
721 err
= context
->result
;
723 /* Since we do not want to have this error message always
724 * displayed at driver start when there are ConnectX2 HCAs
725 * on the host, we deprecate the error message for this
726 * specific command/input_mod/opcode_mod/fw-status to be debug.
728 if (op
== MLX4_CMD_SET_PORT
&&
729 (in_modifier
== 1 || in_modifier
== 2) &&
730 op_modifier
== MLX4_SET_PORT_IB_OPCODE
&&
731 context
->fw_status
== CMD_STAT_BAD_SIZE
)
732 mlx4_dbg(dev
, "command 0x%x failed: fw status = 0x%x\n",
733 op
, context
->fw_status
);
735 mlx4_err(dev
, "command 0x%x failed: fw status = 0x%x\n",
736 op
, context
->fw_status
);
737 if (dev
->persist
->state
& MLX4_DEVICE_STATE_INTERNAL_ERROR
)
738 err
= mlx4_internal_err_ret_value(dev
, op
, op_modifier
);
739 else if (mlx4_closing_cmd_fatal_error(op
, context
->fw_status
))
746 *out_param
= context
->out_param
;
750 err
= mlx4_cmd_reset_flow(dev
, op
, op_modifier
, err
);
752 spin_lock(&cmd
->context_lock
);
753 context
->next
= cmd
->free_head
;
754 cmd
->free_head
= context
- cmd
->context
;
755 spin_unlock(&cmd
->context_lock
);
761 int __mlx4_cmd(struct mlx4_dev
*dev
, u64 in_param
, u64
*out_param
,
762 int out_is_imm
, u32 in_modifier
, u8 op_modifier
,
763 u16 op
, unsigned long timeout
, int native
)
765 if (pci_channel_offline(dev
->persist
->pdev
))
766 return mlx4_cmd_reset_flow(dev
, op
, op_modifier
, -EIO
);
768 if (!mlx4_is_mfunc(dev
) || (native
&& mlx4_is_master(dev
))) {
769 if (dev
->persist
->state
& MLX4_DEVICE_STATE_INTERNAL_ERROR
)
770 return mlx4_internal_err_ret_value(dev
, op
,
772 if (mlx4_priv(dev
)->cmd
.use_events
)
773 return mlx4_cmd_wait(dev
, in_param
, out_param
,
774 out_is_imm
, in_modifier
,
775 op_modifier
, op
, timeout
);
777 return mlx4_cmd_poll(dev
, in_param
, out_param
,
778 out_is_imm
, in_modifier
,
779 op_modifier
, op
, timeout
);
781 return mlx4_slave_cmd(dev
, in_param
, out_param
, out_is_imm
,
782 in_modifier
, op_modifier
, op
, timeout
);
784 EXPORT_SYMBOL_GPL(__mlx4_cmd
);
787 int mlx4_ARM_COMM_CHANNEL(struct mlx4_dev
*dev
)
789 return mlx4_cmd(dev
, 0, 0, 0, MLX4_CMD_ARM_COMM_CHANNEL
,
790 MLX4_CMD_TIME_CLASS_B
, MLX4_CMD_NATIVE
);
793 static int mlx4_ACCESS_MEM(struct mlx4_dev
*dev
, u64 master_addr
,
794 int slave
, u64 slave_addr
,
795 int size
, int is_read
)
800 if ((slave_addr
& 0xfff) | (master_addr
& 0xfff) |
801 (slave
& ~0x7f) | (size
& 0xff)) {
802 mlx4_err(dev
, "Bad access mem params - slave_addr:0x%llx master_addr:0x%llx slave_id:%d size:%d\n",
803 slave_addr
, master_addr
, slave
, size
);
808 in_param
= (u64
) slave
| slave_addr
;
809 out_param
= (u64
) dev
->caps
.function
| master_addr
;
811 in_param
= (u64
) dev
->caps
.function
| master_addr
;
812 out_param
= (u64
) slave
| slave_addr
;
815 return mlx4_cmd_imm(dev
, in_param
, &out_param
, size
, 0,
817 MLX4_CMD_TIME_CLASS_A
, MLX4_CMD_NATIVE
);
820 static int query_pkey_block(struct mlx4_dev
*dev
, u8 port
, u16 index
, u16
*pkey
,
821 struct mlx4_cmd_mailbox
*inbox
,
822 struct mlx4_cmd_mailbox
*outbox
)
824 struct ib_smp
*in_mad
= (struct ib_smp
*)(inbox
->buf
);
825 struct ib_smp
*out_mad
= (struct ib_smp
*)(outbox
->buf
);
832 in_mad
->attr_mod
= cpu_to_be32(index
/ 32);
834 err
= mlx4_cmd_box(dev
, inbox
->dma
, outbox
->dma
, port
, 3,
835 MLX4_CMD_MAD_IFC
, MLX4_CMD_TIME_CLASS_C
,
840 for (i
= 0; i
< 32; ++i
)
841 pkey
[i
] = be16_to_cpu(((__be16
*) out_mad
->data
)[i
]);
846 static int get_full_pkey_table(struct mlx4_dev
*dev
, u8 port
, u16
*table
,
847 struct mlx4_cmd_mailbox
*inbox
,
848 struct mlx4_cmd_mailbox
*outbox
)
853 for (i
= 0; i
< dev
->caps
.pkey_table_len
[port
]; i
+= 32) {
854 err
= query_pkey_block(dev
, port
, i
, table
+ i
, inbox
, outbox
);
861 #define PORT_CAPABILITY_LOCATION_IN_SMP 20
862 #define PORT_STATE_OFFSET 32
864 static enum ib_port_state
vf_port_state(struct mlx4_dev
*dev
, int port
, int vf
)
866 if (mlx4_get_slave_port_state(dev
, vf
, port
) == SLAVE_PORT_UP
)
867 return IB_PORT_ACTIVE
;
872 static int mlx4_MAD_IFC_wrapper(struct mlx4_dev
*dev
, int slave
,
873 struct mlx4_vhcr
*vhcr
,
874 struct mlx4_cmd_mailbox
*inbox
,
875 struct mlx4_cmd_mailbox
*outbox
,
876 struct mlx4_cmd_info
*cmd
)
878 struct ib_smp
*smp
= inbox
->buf
;
886 struct mlx4_priv
*priv
= mlx4_priv(dev
);
887 struct ib_smp
*outsmp
= outbox
->buf
;
888 __be16
*outtab
= (__be16
*)(outsmp
->data
);
889 __be32 slave_cap_mask
;
890 __be64 slave_node_guid
;
892 port
= vhcr
->in_modifier
;
894 /* network-view bit is for driver use only, and should not be passed to FW */
895 opcode_modifier
= vhcr
->op_modifier
& ~0x8; /* clear netw view bit */
896 network_view
= !!(vhcr
->op_modifier
& 0x8);
898 if (smp
->base_version
== 1 &&
899 smp
->mgmt_class
== IB_MGMT_CLASS_SUBN_LID_ROUTED
&&
900 smp
->class_version
== 1) {
901 /* host view is paravirtualized */
902 if (!network_view
&& smp
->method
== IB_MGMT_METHOD_GET
) {
903 if (smp
->attr_id
== IB_SMP_ATTR_PKEY_TABLE
) {
904 index
= be32_to_cpu(smp
->attr_mod
);
905 if (port
< 1 || port
> dev
->caps
.num_ports
)
907 table
= kcalloc((dev
->caps
.pkey_table_len
[port
] / 32) + 1,
908 sizeof(*table
) * 32, GFP_KERNEL
);
912 /* need to get the full pkey table because the paravirtualized
913 * pkeys may be scattered among several pkey blocks.
915 err
= get_full_pkey_table(dev
, port
, table
, inbox
, outbox
);
917 for (vidx
= index
* 32; vidx
< (index
+ 1) * 32; ++vidx
) {
918 pidx
= priv
->virt2phys_pkey
[slave
][port
- 1][vidx
];
919 outtab
[vidx
% 32] = cpu_to_be16(table
[pidx
]);
925 if (smp
->attr_id
== IB_SMP_ATTR_PORT_INFO
) {
926 /*get the slave specific caps:*/
928 err
= mlx4_cmd_box(dev
, inbox
->dma
, outbox
->dma
,
929 vhcr
->in_modifier
, opcode_modifier
,
930 vhcr
->op
, MLX4_CMD_TIME_CLASS_C
, MLX4_CMD_NATIVE
);
931 /* modify the response for slaves */
932 if (!err
&& slave
!= mlx4_master_func_num(dev
)) {
933 u8
*state
= outsmp
->data
+ PORT_STATE_OFFSET
;
935 *state
= (*state
& 0xf0) | vf_port_state(dev
, port
, slave
);
936 slave_cap_mask
= priv
->mfunc
.master
.slave_state
[slave
].ib_cap_mask
[port
];
937 memcpy(outsmp
->data
+ PORT_CAPABILITY_LOCATION_IN_SMP
, &slave_cap_mask
, 4);
941 if (smp
->attr_id
== IB_SMP_ATTR_GUID_INFO
) {
942 __be64 guid
= mlx4_get_admin_guid(dev
, slave
,
945 /* set the PF admin guid to the FW/HW burned
946 * GUID, if it wasn't yet set
948 if (slave
== 0 && guid
== 0) {
950 err
= mlx4_cmd_box(dev
,
956 MLX4_CMD_TIME_CLASS_C
,
960 mlx4_set_admin_guid(dev
,
964 memcpy(outsmp
->data
, &guid
, 8);
967 /* clean all other gids */
968 memset(outsmp
->data
+ 8, 0, 56);
971 if (smp
->attr_id
== IB_SMP_ATTR_NODE_INFO
) {
972 err
= mlx4_cmd_box(dev
, inbox
->dma
, outbox
->dma
,
973 vhcr
->in_modifier
, opcode_modifier
,
974 vhcr
->op
, MLX4_CMD_TIME_CLASS_C
, MLX4_CMD_NATIVE
);
976 slave_node_guid
= mlx4_get_slave_node_guid(dev
, slave
);
977 memcpy(outsmp
->data
+ 12, &slave_node_guid
, 8);
984 /* Non-privileged VFs are only allowed "host" view LID-routed 'Get' MADs.
985 * These are the MADs used by ib verbs (such as ib_query_gids).
987 if (slave
!= mlx4_master_func_num(dev
) &&
988 !mlx4_vf_smi_enabled(dev
, slave
, port
)) {
989 if (!(smp
->mgmt_class
== IB_MGMT_CLASS_SUBN_LID_ROUTED
&&
990 smp
->method
== IB_MGMT_METHOD_GET
) || network_view
) {
991 mlx4_err(dev
, "Unprivileged slave %d is trying to execute a Subnet MGMT MAD, class 0x%x, method 0x%x, view=%s for attr 0x%x. Rejecting\n",
992 slave
, smp
->method
, smp
->mgmt_class
,
993 network_view
? "Network" : "Host",
994 be16_to_cpu(smp
->attr_id
));
999 return mlx4_cmd_box(dev
, inbox
->dma
, outbox
->dma
,
1000 vhcr
->in_modifier
, opcode_modifier
,
1001 vhcr
->op
, MLX4_CMD_TIME_CLASS_C
, MLX4_CMD_NATIVE
);
1004 static int mlx4_CMD_EPERM_wrapper(struct mlx4_dev
*dev
, int slave
,
1005 struct mlx4_vhcr
*vhcr
,
1006 struct mlx4_cmd_mailbox
*inbox
,
1007 struct mlx4_cmd_mailbox
*outbox
,
1008 struct mlx4_cmd_info
*cmd
)
1013 int mlx4_DMA_wrapper(struct mlx4_dev
*dev
, int slave
,
1014 struct mlx4_vhcr
*vhcr
,
1015 struct mlx4_cmd_mailbox
*inbox
,
1016 struct mlx4_cmd_mailbox
*outbox
,
1017 struct mlx4_cmd_info
*cmd
)
1023 in_param
= cmd
->has_inbox
? (u64
) inbox
->dma
: vhcr
->in_param
;
1024 out_param
= cmd
->has_outbox
? (u64
) outbox
->dma
: vhcr
->out_param
;
1025 if (cmd
->encode_slave_id
) {
1026 in_param
&= 0xffffffffffffff00ll
;
1030 err
= __mlx4_cmd(dev
, in_param
, &out_param
, cmd
->out_is_imm
,
1031 vhcr
->in_modifier
, vhcr
->op_modifier
, vhcr
->op
,
1032 MLX4_CMD_TIME_CLASS_A
, MLX4_CMD_NATIVE
);
1034 if (cmd
->out_is_imm
)
1035 vhcr
->out_param
= out_param
;
1040 static struct mlx4_cmd_info cmd_info
[] = {
1042 .opcode
= MLX4_CMD_QUERY_FW
,
1045 .out_is_imm
= false,
1046 .encode_slave_id
= false,
1048 .wrapper
= mlx4_QUERY_FW_wrapper
1051 .opcode
= MLX4_CMD_QUERY_HCA
,
1054 .out_is_imm
= false,
1055 .encode_slave_id
= false,
1060 .opcode
= MLX4_CMD_QUERY_DEV_CAP
,
1063 .out_is_imm
= false,
1064 .encode_slave_id
= false,
1066 .wrapper
= mlx4_QUERY_DEV_CAP_wrapper
1069 .opcode
= MLX4_CMD_QUERY_FUNC_CAP
,
1072 .out_is_imm
= false,
1073 .encode_slave_id
= false,
1075 .wrapper
= mlx4_QUERY_FUNC_CAP_wrapper
1078 .opcode
= MLX4_CMD_QUERY_ADAPTER
,
1081 .out_is_imm
= false,
1082 .encode_slave_id
= false,
1087 .opcode
= MLX4_CMD_INIT_PORT
,
1089 .has_outbox
= false,
1090 .out_is_imm
= false,
1091 .encode_slave_id
= false,
1093 .wrapper
= mlx4_INIT_PORT_wrapper
1096 .opcode
= MLX4_CMD_CLOSE_PORT
,
1098 .has_outbox
= false,
1099 .out_is_imm
= false,
1100 .encode_slave_id
= false,
1102 .wrapper
= mlx4_CLOSE_PORT_wrapper
1105 .opcode
= MLX4_CMD_QUERY_PORT
,
1108 .out_is_imm
= false,
1109 .encode_slave_id
= false,
1111 .wrapper
= mlx4_QUERY_PORT_wrapper
1114 .opcode
= MLX4_CMD_SET_PORT
,
1116 .has_outbox
= false,
1117 .out_is_imm
= false,
1118 .encode_slave_id
= false,
1120 .wrapper
= mlx4_SET_PORT_wrapper
1123 .opcode
= MLX4_CMD_MAP_EQ
,
1125 .has_outbox
= false,
1126 .out_is_imm
= false,
1127 .encode_slave_id
= false,
1129 .wrapper
= mlx4_MAP_EQ_wrapper
1132 .opcode
= MLX4_CMD_SW2HW_EQ
,
1134 .has_outbox
= false,
1135 .out_is_imm
= false,
1136 .encode_slave_id
= true,
1138 .wrapper
= mlx4_SW2HW_EQ_wrapper
1141 .opcode
= MLX4_CMD_HW_HEALTH_CHECK
,
1143 .has_outbox
= false,
1144 .out_is_imm
= false,
1145 .encode_slave_id
= false,
1150 .opcode
= MLX4_CMD_NOP
,
1152 .has_outbox
= false,
1153 .out_is_imm
= false,
1154 .encode_slave_id
= false,
1159 .opcode
= MLX4_CMD_CONFIG_DEV
,
1162 .out_is_imm
= false,
1163 .encode_slave_id
= false,
1165 .wrapper
= mlx4_CONFIG_DEV_wrapper
1168 .opcode
= MLX4_CMD_ALLOC_RES
,
1170 .has_outbox
= false,
1172 .encode_slave_id
= false,
1174 .wrapper
= mlx4_ALLOC_RES_wrapper
1177 .opcode
= MLX4_CMD_FREE_RES
,
1179 .has_outbox
= false,
1180 .out_is_imm
= false,
1181 .encode_slave_id
= false,
1183 .wrapper
= mlx4_FREE_RES_wrapper
1186 .opcode
= MLX4_CMD_SW2HW_MPT
,
1188 .has_outbox
= false,
1189 .out_is_imm
= false,
1190 .encode_slave_id
= true,
1192 .wrapper
= mlx4_SW2HW_MPT_wrapper
1195 .opcode
= MLX4_CMD_QUERY_MPT
,
1198 .out_is_imm
= false,
1199 .encode_slave_id
= false,
1201 .wrapper
= mlx4_QUERY_MPT_wrapper
1204 .opcode
= MLX4_CMD_HW2SW_MPT
,
1206 .has_outbox
= false,
1207 .out_is_imm
= false,
1208 .encode_slave_id
= false,
1210 .wrapper
= mlx4_HW2SW_MPT_wrapper
1213 .opcode
= MLX4_CMD_READ_MTT
,
1216 .out_is_imm
= false,
1217 .encode_slave_id
= false,
1222 .opcode
= MLX4_CMD_WRITE_MTT
,
1224 .has_outbox
= false,
1225 .out_is_imm
= false,
1226 .encode_slave_id
= false,
1228 .wrapper
= mlx4_WRITE_MTT_wrapper
1231 .opcode
= MLX4_CMD_SYNC_TPT
,
1233 .has_outbox
= false,
1234 .out_is_imm
= false,
1235 .encode_slave_id
= false,
1240 .opcode
= MLX4_CMD_HW2SW_EQ
,
1242 .has_outbox
= false,
1243 .out_is_imm
= false,
1244 .encode_slave_id
= true,
1246 .wrapper
= mlx4_HW2SW_EQ_wrapper
1249 .opcode
= MLX4_CMD_QUERY_EQ
,
1252 .out_is_imm
= false,
1253 .encode_slave_id
= true,
1255 .wrapper
= mlx4_QUERY_EQ_wrapper
1258 .opcode
= MLX4_CMD_SW2HW_CQ
,
1260 .has_outbox
= false,
1261 .out_is_imm
= false,
1262 .encode_slave_id
= true,
1264 .wrapper
= mlx4_SW2HW_CQ_wrapper
1267 .opcode
= MLX4_CMD_HW2SW_CQ
,
1269 .has_outbox
= false,
1270 .out_is_imm
= false,
1271 .encode_slave_id
= false,
1273 .wrapper
= mlx4_HW2SW_CQ_wrapper
1276 .opcode
= MLX4_CMD_QUERY_CQ
,
1279 .out_is_imm
= false,
1280 .encode_slave_id
= false,
1282 .wrapper
= mlx4_QUERY_CQ_wrapper
1285 .opcode
= MLX4_CMD_MODIFY_CQ
,
1287 .has_outbox
= false,
1289 .encode_slave_id
= false,
1291 .wrapper
= mlx4_MODIFY_CQ_wrapper
1294 .opcode
= MLX4_CMD_SW2HW_SRQ
,
1296 .has_outbox
= false,
1297 .out_is_imm
= false,
1298 .encode_slave_id
= true,
1300 .wrapper
= mlx4_SW2HW_SRQ_wrapper
1303 .opcode
= MLX4_CMD_HW2SW_SRQ
,
1305 .has_outbox
= false,
1306 .out_is_imm
= false,
1307 .encode_slave_id
= false,
1309 .wrapper
= mlx4_HW2SW_SRQ_wrapper
1312 .opcode
= MLX4_CMD_QUERY_SRQ
,
1315 .out_is_imm
= false,
1316 .encode_slave_id
= false,
1318 .wrapper
= mlx4_QUERY_SRQ_wrapper
1321 .opcode
= MLX4_CMD_ARM_SRQ
,
1323 .has_outbox
= false,
1324 .out_is_imm
= false,
1325 .encode_slave_id
= false,
1327 .wrapper
= mlx4_ARM_SRQ_wrapper
1330 .opcode
= MLX4_CMD_RST2INIT_QP
,
1332 .has_outbox
= false,
1333 .out_is_imm
= false,
1334 .encode_slave_id
= true,
1336 .wrapper
= mlx4_RST2INIT_QP_wrapper
1339 .opcode
= MLX4_CMD_INIT2INIT_QP
,
1341 .has_outbox
= false,
1342 .out_is_imm
= false,
1343 .encode_slave_id
= false,
1345 .wrapper
= mlx4_INIT2INIT_QP_wrapper
1348 .opcode
= MLX4_CMD_INIT2RTR_QP
,
1350 .has_outbox
= false,
1351 .out_is_imm
= false,
1352 .encode_slave_id
= false,
1354 .wrapper
= mlx4_INIT2RTR_QP_wrapper
1357 .opcode
= MLX4_CMD_RTR2RTS_QP
,
1359 .has_outbox
= false,
1360 .out_is_imm
= false,
1361 .encode_slave_id
= false,
1363 .wrapper
= mlx4_RTR2RTS_QP_wrapper
1366 .opcode
= MLX4_CMD_RTS2RTS_QP
,
1368 .has_outbox
= false,
1369 .out_is_imm
= false,
1370 .encode_slave_id
= false,
1372 .wrapper
= mlx4_RTS2RTS_QP_wrapper
1375 .opcode
= MLX4_CMD_SQERR2RTS_QP
,
1377 .has_outbox
= false,
1378 .out_is_imm
= false,
1379 .encode_slave_id
= false,
1381 .wrapper
= mlx4_SQERR2RTS_QP_wrapper
1384 .opcode
= MLX4_CMD_2ERR_QP
,
1386 .has_outbox
= false,
1387 .out_is_imm
= false,
1388 .encode_slave_id
= false,
1390 .wrapper
= mlx4_GEN_QP_wrapper
1393 .opcode
= MLX4_CMD_RTS2SQD_QP
,
1395 .has_outbox
= false,
1396 .out_is_imm
= false,
1397 .encode_slave_id
= false,
1399 .wrapper
= mlx4_GEN_QP_wrapper
1402 .opcode
= MLX4_CMD_SQD2SQD_QP
,
1404 .has_outbox
= false,
1405 .out_is_imm
= false,
1406 .encode_slave_id
= false,
1408 .wrapper
= mlx4_SQD2SQD_QP_wrapper
1411 .opcode
= MLX4_CMD_SQD2RTS_QP
,
1413 .has_outbox
= false,
1414 .out_is_imm
= false,
1415 .encode_slave_id
= false,
1417 .wrapper
= mlx4_SQD2RTS_QP_wrapper
1420 .opcode
= MLX4_CMD_2RST_QP
,
1422 .has_outbox
= false,
1423 .out_is_imm
= false,
1424 .encode_slave_id
= false,
1426 .wrapper
= mlx4_2RST_QP_wrapper
1429 .opcode
= MLX4_CMD_QUERY_QP
,
1432 .out_is_imm
= false,
1433 .encode_slave_id
= false,
1435 .wrapper
= mlx4_GEN_QP_wrapper
1438 .opcode
= MLX4_CMD_SUSPEND_QP
,
1440 .has_outbox
= false,
1441 .out_is_imm
= false,
1442 .encode_slave_id
= false,
1444 .wrapper
= mlx4_GEN_QP_wrapper
1447 .opcode
= MLX4_CMD_UNSUSPEND_QP
,
1449 .has_outbox
= false,
1450 .out_is_imm
= false,
1451 .encode_slave_id
= false,
1453 .wrapper
= mlx4_GEN_QP_wrapper
1456 .opcode
= MLX4_CMD_UPDATE_QP
,
1458 .has_outbox
= false,
1459 .out_is_imm
= false,
1460 .encode_slave_id
= false,
1462 .wrapper
= mlx4_UPDATE_QP_wrapper
1465 .opcode
= MLX4_CMD_GET_OP_REQ
,
1467 .has_outbox
= false,
1468 .out_is_imm
= false,
1469 .encode_slave_id
= false,
1471 .wrapper
= mlx4_CMD_EPERM_wrapper
,
1474 .opcode
= MLX4_CMD_ALLOCATE_VPP
,
1477 .out_is_imm
= false,
1478 .encode_slave_id
= false,
1480 .wrapper
= mlx4_CMD_EPERM_wrapper
,
1483 .opcode
= MLX4_CMD_SET_VPORT_QOS
,
1486 .out_is_imm
= false,
1487 .encode_slave_id
= false,
1489 .wrapper
= mlx4_CMD_EPERM_wrapper
,
1492 .opcode
= MLX4_CMD_CONF_SPECIAL_QP
,
1494 .has_outbox
= false,
1495 .out_is_imm
= false,
1496 .encode_slave_id
= false,
1497 .verify
= NULL
, /* XXX verify: only demux can do this */
1501 .opcode
= MLX4_CMD_MAD_IFC
,
1504 .out_is_imm
= false,
1505 .encode_slave_id
= false,
1507 .wrapper
= mlx4_MAD_IFC_wrapper
1510 .opcode
= MLX4_CMD_MAD_DEMUX
,
1512 .has_outbox
= false,
1513 .out_is_imm
= false,
1514 .encode_slave_id
= false,
1516 .wrapper
= mlx4_CMD_EPERM_wrapper
1519 .opcode
= MLX4_CMD_QUERY_IF_STAT
,
1522 .out_is_imm
= false,
1523 .encode_slave_id
= false,
1525 .wrapper
= mlx4_QUERY_IF_STAT_wrapper
1528 .opcode
= MLX4_CMD_ACCESS_REG
,
1531 .out_is_imm
= false,
1532 .encode_slave_id
= false,
1534 .wrapper
= mlx4_ACCESS_REG_wrapper
,
1537 .opcode
= MLX4_CMD_CONGESTION_CTRL_OPCODE
,
1539 .has_outbox
= false,
1540 .out_is_imm
= false,
1541 .encode_slave_id
= false,
1543 .wrapper
= mlx4_CMD_EPERM_wrapper
,
1545 /* Native multicast commands are not available for guests */
1547 .opcode
= MLX4_CMD_QP_ATTACH
,
1549 .has_outbox
= false,
1550 .out_is_imm
= false,
1551 .encode_slave_id
= false,
1553 .wrapper
= mlx4_QP_ATTACH_wrapper
1556 .opcode
= MLX4_CMD_PROMISC
,
1558 .has_outbox
= false,
1559 .out_is_imm
= false,
1560 .encode_slave_id
= false,
1562 .wrapper
= mlx4_PROMISC_wrapper
1564 /* Ethernet specific commands */
1566 .opcode
= MLX4_CMD_SET_VLAN_FLTR
,
1568 .has_outbox
= false,
1569 .out_is_imm
= false,
1570 .encode_slave_id
= false,
1572 .wrapper
= mlx4_SET_VLAN_FLTR_wrapper
1575 .opcode
= MLX4_CMD_SET_MCAST_FLTR
,
1577 .has_outbox
= false,
1578 .out_is_imm
= false,
1579 .encode_slave_id
= false,
1581 .wrapper
= mlx4_SET_MCAST_FLTR_wrapper
1584 .opcode
= MLX4_CMD_DUMP_ETH_STATS
,
1587 .out_is_imm
= false,
1588 .encode_slave_id
= false,
1590 .wrapper
= mlx4_DUMP_ETH_STATS_wrapper
1593 .opcode
= MLX4_CMD_INFORM_FLR_DONE
,
1595 .has_outbox
= false,
1596 .out_is_imm
= false,
1597 .encode_slave_id
= false,
1601 /* flow steering commands */
1603 .opcode
= MLX4_QP_FLOW_STEERING_ATTACH
,
1605 .has_outbox
= false,
1607 .encode_slave_id
= false,
1609 .wrapper
= mlx4_QP_FLOW_STEERING_ATTACH_wrapper
1612 .opcode
= MLX4_QP_FLOW_STEERING_DETACH
,
1614 .has_outbox
= false,
1615 .out_is_imm
= false,
1616 .encode_slave_id
= false,
1618 .wrapper
= mlx4_QP_FLOW_STEERING_DETACH_wrapper
1621 .opcode
= MLX4_FLOW_STEERING_IB_UC_QP_RANGE
,
1623 .has_outbox
= false,
1624 .out_is_imm
= false,
1625 .encode_slave_id
= false,
1627 .wrapper
= mlx4_CMD_EPERM_wrapper
1630 .opcode
= MLX4_CMD_VIRT_PORT_MAP
,
1632 .has_outbox
= false,
1633 .out_is_imm
= false,
1634 .encode_slave_id
= false,
1636 .wrapper
= mlx4_CMD_EPERM_wrapper
1640 static int mlx4_master_process_vhcr(struct mlx4_dev
*dev
, int slave
,
1641 struct mlx4_vhcr_cmd
*in_vhcr
)
1643 struct mlx4_priv
*priv
= mlx4_priv(dev
);
1644 struct mlx4_cmd_info
*cmd
= NULL
;
1645 struct mlx4_vhcr_cmd
*vhcr_cmd
= in_vhcr
? in_vhcr
: priv
->mfunc
.vhcr
;
1646 struct mlx4_vhcr
*vhcr
;
1647 struct mlx4_cmd_mailbox
*inbox
= NULL
;
1648 struct mlx4_cmd_mailbox
*outbox
= NULL
;
1655 /* Create sw representation of Virtual HCR */
1656 vhcr
= kzalloc(sizeof(struct mlx4_vhcr
), GFP_KERNEL
);
1660 /* DMA in the vHCR */
1662 ret
= mlx4_ACCESS_MEM(dev
, priv
->mfunc
.vhcr_dma
, slave
,
1663 priv
->mfunc
.master
.slave_state
[slave
].vhcr_dma
,
1664 ALIGN(sizeof(struct mlx4_vhcr_cmd
),
1665 MLX4_ACCESS_MEM_ALIGN
), 1);
1667 if (!(dev
->persist
->state
&
1668 MLX4_DEVICE_STATE_INTERNAL_ERROR
))
1669 mlx4_err(dev
, "%s: Failed reading vhcr ret: 0x%x\n",
1676 /* Fill SW VHCR fields */
1677 vhcr
->in_param
= be64_to_cpu(vhcr_cmd
->in_param
);
1678 vhcr
->out_param
= be64_to_cpu(vhcr_cmd
->out_param
);
1679 vhcr
->in_modifier
= be32_to_cpu(vhcr_cmd
->in_modifier
);
1680 vhcr
->token
= be16_to_cpu(vhcr_cmd
->token
);
1681 vhcr
->op
= be16_to_cpu(vhcr_cmd
->opcode
) & 0xfff;
1682 vhcr
->op_modifier
= (u8
) (be16_to_cpu(vhcr_cmd
->opcode
) >> 12);
1683 vhcr
->e_bit
= vhcr_cmd
->flags
& (1 << 6);
1685 /* Lookup command */
1686 for (i
= 0; i
< ARRAY_SIZE(cmd_info
); ++i
) {
1687 if (vhcr
->op
== cmd_info
[i
].opcode
) {
1693 mlx4_err(dev
, "Unknown command:0x%x accepted from slave:%d\n",
1695 vhcr_cmd
->status
= CMD_STAT_BAD_PARAM
;
1700 if (cmd
->has_inbox
) {
1701 vhcr
->in_param
&= INBOX_MASK
;
1702 inbox
= mlx4_alloc_cmd_mailbox(dev
);
1703 if (IS_ERR(inbox
)) {
1704 vhcr_cmd
->status
= CMD_STAT_BAD_SIZE
;
1709 ret
= mlx4_ACCESS_MEM(dev
, inbox
->dma
, slave
,
1711 MLX4_MAILBOX_SIZE
, 1);
1713 if (!(dev
->persist
->state
&
1714 MLX4_DEVICE_STATE_INTERNAL_ERROR
))
1715 mlx4_err(dev
, "%s: Failed reading inbox (cmd:0x%x)\n",
1716 __func__
, cmd
->opcode
);
1717 vhcr_cmd
->status
= CMD_STAT_INTERNAL_ERR
;
1722 /* Apply permission and bound checks if applicable */
1723 if (cmd
->verify
&& cmd
->verify(dev
, slave
, vhcr
, inbox
)) {
1724 mlx4_warn(dev
, "Command:0x%x from slave: %d failed protection checks for resource_id:%d\n",
1725 vhcr
->op
, slave
, vhcr
->in_modifier
);
1726 vhcr_cmd
->status
= CMD_STAT_BAD_OP
;
1730 /* Allocate outbox */
1731 if (cmd
->has_outbox
) {
1732 outbox
= mlx4_alloc_cmd_mailbox(dev
);
1733 if (IS_ERR(outbox
)) {
1734 vhcr_cmd
->status
= CMD_STAT_BAD_SIZE
;
1740 /* Execute the command! */
1742 err
= cmd
->wrapper(dev
, slave
, vhcr
, inbox
, outbox
,
1744 if (cmd
->out_is_imm
)
1745 vhcr_cmd
->out_param
= cpu_to_be64(vhcr
->out_param
);
1747 in_param
= cmd
->has_inbox
? (u64
) inbox
->dma
:
1749 out_param
= cmd
->has_outbox
? (u64
) outbox
->dma
:
1751 err
= __mlx4_cmd(dev
, in_param
, &out_param
,
1752 cmd
->out_is_imm
, vhcr
->in_modifier
,
1753 vhcr
->op_modifier
, vhcr
->op
,
1754 MLX4_CMD_TIME_CLASS_A
,
1757 if (cmd
->out_is_imm
) {
1758 vhcr
->out_param
= out_param
;
1759 vhcr_cmd
->out_param
= cpu_to_be64(vhcr
->out_param
);
1764 if (!(dev
->persist
->state
& MLX4_DEVICE_STATE_INTERNAL_ERROR
))
1765 mlx4_warn(dev
, "vhcr command:0x%x slave:%d failed with error:%d, status %d\n",
1766 vhcr
->op
, slave
, vhcr
->errno
, err
);
1767 vhcr_cmd
->status
= mlx4_errno_to_status(err
);
1772 /* Write outbox if command completed successfully */
1773 if (cmd
->has_outbox
&& !vhcr_cmd
->status
) {
1774 ret
= mlx4_ACCESS_MEM(dev
, outbox
->dma
, slave
,
1776 MLX4_MAILBOX_SIZE
, MLX4_CMD_WRAPPED
);
1778 /* If we failed to write back the outbox after the
1779 *command was successfully executed, we must fail this
1780 * slave, as it is now in undefined state */
1781 if (!(dev
->persist
->state
&
1782 MLX4_DEVICE_STATE_INTERNAL_ERROR
))
1783 mlx4_err(dev
, "%s:Failed writing outbox\n", __func__
);
1789 /* DMA back vhcr result */
1791 ret
= mlx4_ACCESS_MEM(dev
, priv
->mfunc
.vhcr_dma
, slave
,
1792 priv
->mfunc
.master
.slave_state
[slave
].vhcr_dma
,
1793 ALIGN(sizeof(struct mlx4_vhcr
),
1794 MLX4_ACCESS_MEM_ALIGN
),
1797 mlx4_err(dev
, "%s:Failed writing vhcr result\n",
1799 else if (vhcr
->e_bit
&&
1800 mlx4_GEN_EQE(dev
, slave
, &priv
->mfunc
.master
.cmd_eqe
))
1801 mlx4_warn(dev
, "Failed to generate command completion eqe for slave %d\n",
1807 mlx4_free_cmd_mailbox(dev
, inbox
);
1808 mlx4_free_cmd_mailbox(dev
, outbox
);
1812 static int mlx4_master_immediate_activate_vlan_qos(struct mlx4_priv
*priv
,
1813 int slave
, int port
)
1815 struct mlx4_vport_oper_state
*vp_oper
;
1816 struct mlx4_vport_state
*vp_admin
;
1817 struct mlx4_vf_immed_vlan_work
*work
;
1818 struct mlx4_dev
*dev
= &(priv
->dev
);
1820 int admin_vlan_ix
= NO_INDX
;
1822 vp_oper
= &priv
->mfunc
.master
.vf_oper
[slave
].vport
[port
];
1823 vp_admin
= &priv
->mfunc
.master
.vf_admin
[slave
].vport
[port
];
1825 if (vp_oper
->state
.default_vlan
== vp_admin
->default_vlan
&&
1826 vp_oper
->state
.default_qos
== vp_admin
->default_qos
&&
1827 vp_oper
->state
.link_state
== vp_admin
->link_state
&&
1828 vp_oper
->state
.qos_vport
== vp_admin
->qos_vport
)
1831 if (!(priv
->mfunc
.master
.slave_state
[slave
].active
&&
1832 dev
->caps
.flags2
& MLX4_DEV_CAP_FLAG2_UPDATE_QP
)) {
1833 /* even if the UPDATE_QP command isn't supported, we still want
1834 * to set this VF link according to the admin directive
1836 vp_oper
->state
.link_state
= vp_admin
->link_state
;
1840 mlx4_dbg(dev
, "updating immediately admin params slave %d port %d\n",
1842 mlx4_dbg(dev
, "vlan %d QoS %d link down %d\n",
1843 vp_admin
->default_vlan
, vp_admin
->default_qos
,
1844 vp_admin
->link_state
);
1846 work
= kzalloc(sizeof(*work
), GFP_KERNEL
);
1850 if (vp_oper
->state
.default_vlan
!= vp_admin
->default_vlan
) {
1851 if (MLX4_VGT
!= vp_admin
->default_vlan
) {
1852 err
= __mlx4_register_vlan(&priv
->dev
, port
,
1853 vp_admin
->default_vlan
,
1857 mlx4_warn(&priv
->dev
,
1858 "No vlan resources slave %d, port %d\n",
1863 admin_vlan_ix
= NO_INDX
;
1865 work
->flags
|= MLX4_VF_IMMED_VLAN_FLAG_VLAN
;
1866 mlx4_dbg(&priv
->dev
,
1867 "alloc vlan %d idx %d slave %d port %d\n",
1868 (int)(vp_admin
->default_vlan
),
1869 admin_vlan_ix
, slave
, port
);
1872 /* save original vlan ix and vlan id */
1873 work
->orig_vlan_id
= vp_oper
->state
.default_vlan
;
1874 work
->orig_vlan_ix
= vp_oper
->vlan_idx
;
1876 /* handle new qos */
1877 if (vp_oper
->state
.default_qos
!= vp_admin
->default_qos
)
1878 work
->flags
|= MLX4_VF_IMMED_VLAN_FLAG_QOS
;
1880 if (work
->flags
& MLX4_VF_IMMED_VLAN_FLAG_VLAN
)
1881 vp_oper
->vlan_idx
= admin_vlan_ix
;
1883 vp_oper
->state
.default_vlan
= vp_admin
->default_vlan
;
1884 vp_oper
->state
.default_qos
= vp_admin
->default_qos
;
1885 vp_oper
->state
.link_state
= vp_admin
->link_state
;
1886 vp_oper
->state
.qos_vport
= vp_admin
->qos_vport
;
1888 if (vp_admin
->link_state
== IFLA_VF_LINK_STATE_DISABLE
)
1889 work
->flags
|= MLX4_VF_IMMED_VLAN_FLAG_LINK_DISABLE
;
1891 /* iterate over QPs owned by this slave, using UPDATE_QP */
1893 work
->slave
= slave
;
1894 work
->qos
= vp_oper
->state
.default_qos
;
1895 work
->qos_vport
= vp_oper
->state
.qos_vport
;
1896 work
->vlan_id
= vp_oper
->state
.default_vlan
;
1897 work
->vlan_ix
= vp_oper
->vlan_idx
;
1899 INIT_WORK(&work
->work
, mlx4_vf_immed_vlan_work_handler
);
1900 queue_work(priv
->mfunc
.master
.comm_wq
, &work
->work
);
1905 static void mlx4_set_default_port_qos(struct mlx4_dev
*dev
, int port
)
1907 struct mlx4_qos_manager
*port_qos_ctl
;
1908 struct mlx4_priv
*priv
= mlx4_priv(dev
);
1910 port_qos_ctl
= &priv
->mfunc
.master
.qos_ctl
[port
];
1911 bitmap_zero(port_qos_ctl
->priority_bm
, MLX4_NUM_UP
);
1913 /* Enable only default prio at PF init routine */
1914 set_bit(MLX4_DEFAULT_QOS_PRIO
, port_qos_ctl
->priority_bm
);
1917 static void mlx4_allocate_port_vpps(struct mlx4_dev
*dev
, int port
)
1923 u8 vpp_param
[MLX4_NUM_UP
];
1924 struct mlx4_qos_manager
*port_qos
;
1925 struct mlx4_priv
*priv
= mlx4_priv(dev
);
1927 err
= mlx4_ALLOCATE_VPP_get(dev
, port
, &availible_vpp
, vpp_param
);
1929 mlx4_info(dev
, "Failed query availible VPPs\n");
1933 port_qos
= &priv
->mfunc
.master
.qos_ctl
[port
];
1934 num_vfs
= (availible_vpp
/
1935 bitmap_weight(port_qos
->priority_bm
, MLX4_NUM_UP
));
1937 for (i
= 0; i
< MLX4_NUM_UP
; i
++) {
1938 if (test_bit(i
, port_qos
->priority_bm
))
1939 vpp_param
[i
] = num_vfs
;
1942 err
= mlx4_ALLOCATE_VPP_set(dev
, port
, vpp_param
);
1944 mlx4_info(dev
, "Failed allocating VPPs\n");
1948 /* Query actual allocated VPP, just to make sure */
1949 err
= mlx4_ALLOCATE_VPP_get(dev
, port
, &availible_vpp
, vpp_param
);
1951 mlx4_info(dev
, "Failed query availible VPPs\n");
1955 port_qos
->num_of_qos_vfs
= num_vfs
;
1956 mlx4_dbg(dev
, "Port %d Availible VPPs %d\n", port
, availible_vpp
);
1958 for (i
= 0; i
< MLX4_NUM_UP
; i
++)
1959 mlx4_dbg(dev
, "Port %d UP %d Allocated %d VPPs\n", port
, i
,
1963 static int mlx4_master_activate_admin_state(struct mlx4_priv
*priv
, int slave
)
1966 struct mlx4_vport_state
*vp_admin
;
1967 struct mlx4_vport_oper_state
*vp_oper
;
1968 struct mlx4_active_ports actv_ports
= mlx4_get_active_ports(
1970 int min_port
= find_first_bit(actv_ports
.ports
,
1971 priv
->dev
.caps
.num_ports
) + 1;
1972 int max_port
= min_port
- 1 +
1973 bitmap_weight(actv_ports
.ports
, priv
->dev
.caps
.num_ports
);
1975 for (port
= min_port
; port
<= max_port
; port
++) {
1976 if (!test_bit(port
- 1, actv_ports
.ports
))
1978 priv
->mfunc
.master
.vf_oper
[slave
].smi_enabled
[port
] =
1979 priv
->mfunc
.master
.vf_admin
[slave
].enable_smi
[port
];
1980 vp_oper
= &priv
->mfunc
.master
.vf_oper
[slave
].vport
[port
];
1981 vp_admin
= &priv
->mfunc
.master
.vf_admin
[slave
].vport
[port
];
1982 vp_oper
->state
= *vp_admin
;
1983 if (MLX4_VGT
!= vp_admin
->default_vlan
) {
1984 err
= __mlx4_register_vlan(&priv
->dev
, port
,
1985 vp_admin
->default_vlan
, &(vp_oper
->vlan_idx
));
1987 vp_oper
->vlan_idx
= NO_INDX
;
1988 mlx4_warn(&priv
->dev
,
1989 "No vlan resources slave %d, port %d\n",
1993 mlx4_dbg(&priv
->dev
, "alloc vlan %d idx %d slave %d port %d\n",
1994 (int)(vp_oper
->state
.default_vlan
),
1995 vp_oper
->vlan_idx
, slave
, port
);
1997 if (vp_admin
->spoofchk
) {
1998 vp_oper
->mac_idx
= __mlx4_register_mac(&priv
->dev
,
2001 if (0 > vp_oper
->mac_idx
) {
2002 err
= vp_oper
->mac_idx
;
2003 vp_oper
->mac_idx
= NO_INDX
;
2004 mlx4_warn(&priv
->dev
,
2005 "No mac resources slave %d, port %d\n",
2009 mlx4_dbg(&priv
->dev
, "alloc mac %llx idx %d slave %d port %d\n",
2010 vp_oper
->state
.mac
, vp_oper
->mac_idx
, slave
, port
);
2016 static void mlx4_master_deactivate_admin_state(struct mlx4_priv
*priv
, int slave
)
2019 struct mlx4_vport_oper_state
*vp_oper
;
2020 struct mlx4_active_ports actv_ports
= mlx4_get_active_ports(
2022 int min_port
= find_first_bit(actv_ports
.ports
,
2023 priv
->dev
.caps
.num_ports
) + 1;
2024 int max_port
= min_port
- 1 +
2025 bitmap_weight(actv_ports
.ports
, priv
->dev
.caps
.num_ports
);
2028 for (port
= min_port
; port
<= max_port
; port
++) {
2029 if (!test_bit(port
- 1, actv_ports
.ports
))
2031 priv
->mfunc
.master
.vf_oper
[slave
].smi_enabled
[port
] =
2032 MLX4_VF_SMI_DISABLED
;
2033 vp_oper
= &priv
->mfunc
.master
.vf_oper
[slave
].vport
[port
];
2034 if (NO_INDX
!= vp_oper
->vlan_idx
) {
2035 __mlx4_unregister_vlan(&priv
->dev
,
2036 port
, vp_oper
->state
.default_vlan
);
2037 vp_oper
->vlan_idx
= NO_INDX
;
2039 if (NO_INDX
!= vp_oper
->mac_idx
) {
2040 __mlx4_unregister_mac(&priv
->dev
, port
, vp_oper
->state
.mac
);
2041 vp_oper
->mac_idx
= NO_INDX
;
2047 static void mlx4_master_do_cmd(struct mlx4_dev
*dev
, int slave
, u8 cmd
,
2048 u16 param
, u8 toggle
)
2050 struct mlx4_priv
*priv
= mlx4_priv(dev
);
2051 struct mlx4_slave_state
*slave_state
= priv
->mfunc
.master
.slave_state
;
2053 u8 is_going_down
= 0;
2055 unsigned long flags
;
2057 slave_state
[slave
].comm_toggle
^= 1;
2058 reply
= (u32
) slave_state
[slave
].comm_toggle
<< 31;
2059 if (toggle
!= slave_state
[slave
].comm_toggle
) {
2060 mlx4_warn(dev
, "Incorrect toggle %d from slave %d. *** MASTER STATE COMPROMISED ***\n",
2064 if (cmd
== MLX4_COMM_CMD_RESET
) {
2065 mlx4_warn(dev
, "Received reset from slave:%d\n", slave
);
2066 slave_state
[slave
].active
= false;
2067 slave_state
[slave
].old_vlan_api
= false;
2068 mlx4_master_deactivate_admin_state(priv
, slave
);
2069 for (i
= 0; i
< MLX4_EVENT_TYPES_NUM
; ++i
) {
2070 slave_state
[slave
].event_eq
[i
].eqn
= -1;
2071 slave_state
[slave
].event_eq
[i
].token
= 0;
2073 /*check if we are in the middle of FLR process,
2074 if so return "retry" status to the slave*/
2075 if (MLX4_COMM_CMD_FLR
== slave_state
[slave
].last_cmd
)
2076 goto inform_slave_state
;
2078 mlx4_dispatch_event(dev
, MLX4_DEV_EVENT_SLAVE_SHUTDOWN
, slave
);
2080 /* write the version in the event field */
2081 reply
|= mlx4_comm_get_version();
2085 /*command from slave in the middle of FLR*/
2086 if (cmd
!= MLX4_COMM_CMD_RESET
&&
2087 MLX4_COMM_CMD_FLR
== slave_state
[slave
].last_cmd
) {
2088 mlx4_warn(dev
, "slave:%d is Trying to run cmd(0x%x) in the middle of FLR\n",
2094 case MLX4_COMM_CMD_VHCR0
:
2095 if (slave_state
[slave
].last_cmd
!= MLX4_COMM_CMD_RESET
)
2097 slave_state
[slave
].vhcr_dma
= ((u64
) param
) << 48;
2098 priv
->mfunc
.master
.slave_state
[slave
].cookie
= 0;
2100 case MLX4_COMM_CMD_VHCR1
:
2101 if (slave_state
[slave
].last_cmd
!= MLX4_COMM_CMD_VHCR0
)
2103 slave_state
[slave
].vhcr_dma
|= ((u64
) param
) << 32;
2105 case MLX4_COMM_CMD_VHCR2
:
2106 if (slave_state
[slave
].last_cmd
!= MLX4_COMM_CMD_VHCR1
)
2108 slave_state
[slave
].vhcr_dma
|= ((u64
) param
) << 16;
2110 case MLX4_COMM_CMD_VHCR_EN
:
2111 if (slave_state
[slave
].last_cmd
!= MLX4_COMM_CMD_VHCR2
)
2113 slave_state
[slave
].vhcr_dma
|= param
;
2114 if (mlx4_master_activate_admin_state(priv
, slave
))
2116 slave_state
[slave
].active
= true;
2117 mlx4_dispatch_event(dev
, MLX4_DEV_EVENT_SLAVE_INIT
, slave
);
2119 case MLX4_COMM_CMD_VHCR_POST
:
2120 if ((slave_state
[slave
].last_cmd
!= MLX4_COMM_CMD_VHCR_EN
) &&
2121 (slave_state
[slave
].last_cmd
!= MLX4_COMM_CMD_VHCR_POST
)) {
2122 mlx4_warn(dev
, "slave:%d is out of sync, cmd=0x%x, last command=0x%x, reset is needed\n",
2123 slave
, cmd
, slave_state
[slave
].last_cmd
);
2127 mutex_lock(&priv
->cmd
.slave_cmd_mutex
);
2128 if (mlx4_master_process_vhcr(dev
, slave
, NULL
)) {
2129 mlx4_err(dev
, "Failed processing vhcr for slave:%d, resetting slave\n",
2131 mutex_unlock(&priv
->cmd
.slave_cmd_mutex
);
2134 mutex_unlock(&priv
->cmd
.slave_cmd_mutex
);
2137 mlx4_warn(dev
, "Bad comm cmd:%d from slave:%d\n", cmd
, slave
);
2140 spin_lock_irqsave(&priv
->mfunc
.master
.slave_state_lock
, flags
);
2141 if (!slave_state
[slave
].is_slave_going_down
)
2142 slave_state
[slave
].last_cmd
= cmd
;
2145 spin_unlock_irqrestore(&priv
->mfunc
.master
.slave_state_lock
, flags
);
2146 if (is_going_down
) {
2147 mlx4_warn(dev
, "Slave is going down aborting command(%d) executing from slave:%d\n",
2151 __raw_writel((__force u32
) cpu_to_be32(reply
),
2152 &priv
->mfunc
.comm
[slave
].slave_read
);
2158 /* cleanup any slave resources */
2159 if (dev
->persist
->interface_state
& MLX4_INTERFACE_STATE_UP
)
2160 mlx4_delete_all_resources_for_slave(dev
, slave
);
2162 if (cmd
!= MLX4_COMM_CMD_RESET
) {
2163 mlx4_warn(dev
, "Turn on internal error to force reset, slave=%d, cmd=0x%x\n",
2165 /* Turn on internal error letting slave reset itself immeditaly,
2166 * otherwise it might take till timeout on command is passed
2168 reply
|= ((u32
)COMM_CHAN_EVENT_INTERNAL_ERR
);
2171 spin_lock_irqsave(&priv
->mfunc
.master
.slave_state_lock
, flags
);
2172 if (!slave_state
[slave
].is_slave_going_down
)
2173 slave_state
[slave
].last_cmd
= MLX4_COMM_CMD_RESET
;
2174 spin_unlock_irqrestore(&priv
->mfunc
.master
.slave_state_lock
, flags
);
2175 /*with slave in the middle of flr, no need to clean resources again.*/
2177 memset(&slave_state
[slave
].event_eq
, 0,
2178 sizeof(struct mlx4_slave_event_eq_info
));
2179 __raw_writel((__force u32
) cpu_to_be32(reply
),
2180 &priv
->mfunc
.comm
[slave
].slave_read
);
2184 /* master command processing */
2185 void mlx4_master_comm_channel(struct work_struct
*work
)
2187 struct mlx4_mfunc_master_ctx
*master
=
2189 struct mlx4_mfunc_master_ctx
,
2191 struct mlx4_mfunc
*mfunc
=
2192 container_of(master
, struct mlx4_mfunc
, master
);
2193 struct mlx4_priv
*priv
=
2194 container_of(mfunc
, struct mlx4_priv
, mfunc
);
2195 struct mlx4_dev
*dev
= &priv
->dev
;
2205 bit_vec
= master
->comm_arm_bit_vector
;
2206 for (i
= 0; i
< COMM_CHANNEL_BIT_ARRAY_SIZE
; i
++) {
2207 vec
= be32_to_cpu(bit_vec
[i
]);
2208 for (j
= 0; j
< 32; j
++) {
2209 if (!(vec
& (1 << j
)))
2212 slave
= (i
* 32) + j
;
2213 comm_cmd
= swab32(readl(
2214 &mfunc
->comm
[slave
].slave_write
));
2215 slt
= swab32(readl(&mfunc
->comm
[slave
].slave_read
))
2217 toggle
= comm_cmd
>> 31;
2218 if (toggle
!= slt
) {
2219 if (master
->slave_state
[slave
].comm_toggle
2221 pr_info("slave %d out of sync. read toggle %d, state toggle %d. Resynching.\n",
2223 master
->slave_state
[slave
].comm_toggle
);
2224 master
->slave_state
[slave
].comm_toggle
=
2227 mlx4_master_do_cmd(dev
, slave
,
2228 comm_cmd
>> 16 & 0xff,
2229 comm_cmd
& 0xffff, toggle
);
2235 if (reported
&& reported
!= served
)
2236 mlx4_warn(dev
, "Got command event with bitmask from %d slaves but %d were served\n",
2239 if (mlx4_ARM_COMM_CHANNEL(dev
))
2240 mlx4_warn(dev
, "Failed to arm comm channel events\n");
2243 static int sync_toggles(struct mlx4_dev
*dev
)
2245 struct mlx4_priv
*priv
= mlx4_priv(dev
);
2250 wr_toggle
= swab32(readl(&priv
->mfunc
.comm
->slave_write
));
2251 if (wr_toggle
== 0xffffffff)
2252 end
= jiffies
+ msecs_to_jiffies(30000);
2254 end
= jiffies
+ msecs_to_jiffies(5000);
2256 while (time_before(jiffies
, end
)) {
2257 rd_toggle
= swab32(readl(&priv
->mfunc
.comm
->slave_read
));
2258 if (wr_toggle
== 0xffffffff || rd_toggle
== 0xffffffff) {
2259 /* PCI might be offline */
2261 wr_toggle
= swab32(readl(&priv
->mfunc
.comm
->
2266 if (rd_toggle
>> 31 == wr_toggle
>> 31) {
2267 priv
->cmd
.comm_toggle
= rd_toggle
>> 31;
2275 * we could reach here if for example the previous VM using this
2276 * function misbehaved and left the channel with unsynced state. We
2277 * should fix this here and give this VM a chance to use a properly
2280 mlx4_warn(dev
, "recovering from previously mis-behaved VM\n");
2281 __raw_writel((__force u32
) 0, &priv
->mfunc
.comm
->slave_read
);
2282 __raw_writel((__force u32
) 0, &priv
->mfunc
.comm
->slave_write
);
2283 priv
->cmd
.comm_toggle
= 0;
2288 int mlx4_multi_func_init(struct mlx4_dev
*dev
)
2290 struct mlx4_priv
*priv
= mlx4_priv(dev
);
2291 struct mlx4_slave_state
*s_state
;
2292 int i
, j
, err
, port
;
2294 if (mlx4_is_master(dev
))
2296 ioremap(pci_resource_start(dev
->persist
->pdev
,
2297 priv
->fw
.comm_bar
) +
2298 priv
->fw
.comm_base
, MLX4_COMM_PAGESIZE
);
2301 ioremap(pci_resource_start(dev
->persist
->pdev
, 2) +
2302 MLX4_SLAVE_COMM_BASE
, MLX4_COMM_PAGESIZE
);
2303 if (!priv
->mfunc
.comm
) {
2304 mlx4_err(dev
, "Couldn't map communication vector\n");
2308 if (mlx4_is_master(dev
)) {
2309 struct mlx4_vf_oper_state
*vf_oper
;
2310 struct mlx4_vf_admin_state
*vf_admin
;
2312 priv
->mfunc
.master
.slave_state
=
2313 kzalloc(dev
->num_slaves
*
2314 sizeof(struct mlx4_slave_state
), GFP_KERNEL
);
2315 if (!priv
->mfunc
.master
.slave_state
)
2318 priv
->mfunc
.master
.vf_admin
=
2319 kzalloc(dev
->num_slaves
*
2320 sizeof(struct mlx4_vf_admin_state
), GFP_KERNEL
);
2321 if (!priv
->mfunc
.master
.vf_admin
)
2322 goto err_comm_admin
;
2324 priv
->mfunc
.master
.vf_oper
=
2325 kzalloc(dev
->num_slaves
*
2326 sizeof(struct mlx4_vf_oper_state
), GFP_KERNEL
);
2327 if (!priv
->mfunc
.master
.vf_oper
)
2330 for (i
= 0; i
< dev
->num_slaves
; ++i
) {
2331 vf_admin
= &priv
->mfunc
.master
.vf_admin
[i
];
2332 vf_oper
= &priv
->mfunc
.master
.vf_oper
[i
];
2333 s_state
= &priv
->mfunc
.master
.slave_state
[i
];
2334 s_state
->last_cmd
= MLX4_COMM_CMD_RESET
;
2335 mutex_init(&priv
->mfunc
.master
.gen_eqe_mutex
[i
]);
2336 for (j
= 0; j
< MLX4_EVENT_TYPES_NUM
; ++j
)
2337 s_state
->event_eq
[j
].eqn
= -1;
2338 __raw_writel((__force u32
) 0,
2339 &priv
->mfunc
.comm
[i
].slave_write
);
2340 __raw_writel((__force u32
) 0,
2341 &priv
->mfunc
.comm
[i
].slave_read
);
2343 for (port
= 1; port
<= MLX4_MAX_PORTS
; port
++) {
2344 struct mlx4_vport_state
*admin_vport
;
2345 struct mlx4_vport_state
*oper_vport
;
2347 s_state
->vlan_filter
[port
] =
2348 kzalloc(sizeof(struct mlx4_vlan_fltr
),
2350 if (!s_state
->vlan_filter
[port
]) {
2352 kfree(s_state
->vlan_filter
[port
]);
2356 admin_vport
= &vf_admin
->vport
[port
];
2357 oper_vport
= &vf_oper
->vport
[port
].state
;
2358 INIT_LIST_HEAD(&s_state
->mcast_filters
[port
]);
2359 admin_vport
->default_vlan
= MLX4_VGT
;
2360 oper_vport
->default_vlan
= MLX4_VGT
;
2361 admin_vport
->qos_vport
=
2362 MLX4_VPP_DEFAULT_VPORT
;
2363 oper_vport
->qos_vport
= MLX4_VPP_DEFAULT_VPORT
;
2364 vf_oper
->vport
[port
].vlan_idx
= NO_INDX
;
2365 vf_oper
->vport
[port
].mac_idx
= NO_INDX
;
2366 mlx4_set_random_admin_guid(dev
, i
, port
);
2368 spin_lock_init(&s_state
->lock
);
2371 if (dev
->caps
.flags2
& MLX4_DEV_CAP_FLAG2_QOS_VPP
) {
2372 for (port
= 1; port
<= dev
->caps
.num_ports
; port
++) {
2373 if (mlx4_is_eth(dev
, port
)) {
2374 mlx4_set_default_port_qos(dev
, port
);
2375 mlx4_allocate_port_vpps(dev
, port
);
2380 memset(&priv
->mfunc
.master
.cmd_eqe
, 0, dev
->caps
.eqe_size
);
2381 priv
->mfunc
.master
.cmd_eqe
.type
= MLX4_EVENT_TYPE_CMD
;
2382 INIT_WORK(&priv
->mfunc
.master
.comm_work
,
2383 mlx4_master_comm_channel
);
2384 INIT_WORK(&priv
->mfunc
.master
.slave_event_work
,
2385 mlx4_gen_slave_eqe
);
2386 INIT_WORK(&priv
->mfunc
.master
.slave_flr_event_work
,
2387 mlx4_master_handle_slave_flr
);
2388 spin_lock_init(&priv
->mfunc
.master
.slave_state_lock
);
2389 spin_lock_init(&priv
->mfunc
.master
.slave_eq
.event_lock
);
2390 priv
->mfunc
.master
.comm_wq
=
2391 create_singlethread_workqueue("mlx4_comm");
2392 if (!priv
->mfunc
.master
.comm_wq
)
2395 if (mlx4_init_resource_tracker(dev
))
2399 err
= sync_toggles(dev
);
2401 mlx4_err(dev
, "Couldn't sync toggles\n");
2408 flush_workqueue(priv
->mfunc
.master
.comm_wq
);
2409 destroy_workqueue(priv
->mfunc
.master
.comm_wq
);
2412 for (port
= 1; port
<= MLX4_MAX_PORTS
; port
++)
2413 kfree(priv
->mfunc
.master
.slave_state
[i
].vlan_filter
[port
]);
2415 kfree(priv
->mfunc
.master
.vf_oper
);
2417 kfree(priv
->mfunc
.master
.vf_admin
);
2419 kfree(priv
->mfunc
.master
.slave_state
);
2421 iounmap(priv
->mfunc
.comm
);
2423 dma_free_coherent(&dev
->persist
->pdev
->dev
, PAGE_SIZE
,
2425 priv
->mfunc
.vhcr_dma
);
2426 priv
->mfunc
.vhcr
= NULL
;
2430 int mlx4_cmd_init(struct mlx4_dev
*dev
)
2432 struct mlx4_priv
*priv
= mlx4_priv(dev
);
2435 if (!priv
->cmd
.initialized
) {
2436 mutex_init(&priv
->cmd
.slave_cmd_mutex
);
2437 sema_init(&priv
->cmd
.poll_sem
, 1);
2438 priv
->cmd
.use_events
= 0;
2439 priv
->cmd
.toggle
= 1;
2440 priv
->cmd
.initialized
= 1;
2441 flags
|= MLX4_CMD_CLEANUP_STRUCT
;
2444 if (!mlx4_is_slave(dev
) && !priv
->cmd
.hcr
) {
2445 priv
->cmd
.hcr
= ioremap(pci_resource_start(dev
->persist
->pdev
,
2446 0) + MLX4_HCR_BASE
, MLX4_HCR_SIZE
);
2447 if (!priv
->cmd
.hcr
) {
2448 mlx4_err(dev
, "Couldn't map command register\n");
2451 flags
|= MLX4_CMD_CLEANUP_HCR
;
2454 if (mlx4_is_mfunc(dev
) && !priv
->mfunc
.vhcr
) {
2455 priv
->mfunc
.vhcr
= dma_alloc_coherent(&dev
->persist
->pdev
->dev
,
2457 &priv
->mfunc
.vhcr_dma
,
2459 if (!priv
->mfunc
.vhcr
)
2462 flags
|= MLX4_CMD_CLEANUP_VHCR
;
2465 if (!priv
->cmd
.pool
) {
2466 priv
->cmd
.pool
= pci_pool_create("mlx4_cmd",
2469 MLX4_MAILBOX_SIZE
, 0);
2470 if (!priv
->cmd
.pool
)
2473 flags
|= MLX4_CMD_CLEANUP_POOL
;
2479 mlx4_cmd_cleanup(dev
, flags
);
2483 void mlx4_report_internal_err_comm_event(struct mlx4_dev
*dev
)
2485 struct mlx4_priv
*priv
= mlx4_priv(dev
);
2489 /* Report an internal error event to all
2490 * communication channels.
2492 for (slave
= 0; slave
< dev
->num_slaves
; slave
++) {
2493 slave_read
= swab32(readl(&priv
->mfunc
.comm
[slave
].slave_read
));
2494 slave_read
|= (u32
)COMM_CHAN_EVENT_INTERNAL_ERR
;
2495 __raw_writel((__force u32
)cpu_to_be32(slave_read
),
2496 &priv
->mfunc
.comm
[slave
].slave_read
);
2497 /* Make sure that our comm channel write doesn't
2498 * get mixed in with writes from another CPU.
2504 void mlx4_multi_func_cleanup(struct mlx4_dev
*dev
)
2506 struct mlx4_priv
*priv
= mlx4_priv(dev
);
2509 if (mlx4_is_master(dev
)) {
2510 flush_workqueue(priv
->mfunc
.master
.comm_wq
);
2511 destroy_workqueue(priv
->mfunc
.master
.comm_wq
);
2512 for (i
= 0; i
< dev
->num_slaves
; i
++) {
2513 for (port
= 1; port
<= MLX4_MAX_PORTS
; port
++)
2514 kfree(priv
->mfunc
.master
.slave_state
[i
].vlan_filter
[port
]);
2516 kfree(priv
->mfunc
.master
.slave_state
);
2517 kfree(priv
->mfunc
.master
.vf_admin
);
2518 kfree(priv
->mfunc
.master
.vf_oper
);
2519 dev
->num_slaves
= 0;
2522 iounmap(priv
->mfunc
.comm
);
2525 void mlx4_cmd_cleanup(struct mlx4_dev
*dev
, int cleanup_mask
)
2527 struct mlx4_priv
*priv
= mlx4_priv(dev
);
2529 if (priv
->cmd
.pool
&& (cleanup_mask
& MLX4_CMD_CLEANUP_POOL
)) {
2530 pci_pool_destroy(priv
->cmd
.pool
);
2531 priv
->cmd
.pool
= NULL
;
2534 if (!mlx4_is_slave(dev
) && priv
->cmd
.hcr
&&
2535 (cleanup_mask
& MLX4_CMD_CLEANUP_HCR
)) {
2536 iounmap(priv
->cmd
.hcr
);
2537 priv
->cmd
.hcr
= NULL
;
2539 if (mlx4_is_mfunc(dev
) && priv
->mfunc
.vhcr
&&
2540 (cleanup_mask
& MLX4_CMD_CLEANUP_VHCR
)) {
2541 dma_free_coherent(&dev
->persist
->pdev
->dev
, PAGE_SIZE
,
2542 priv
->mfunc
.vhcr
, priv
->mfunc
.vhcr_dma
);
2543 priv
->mfunc
.vhcr
= NULL
;
2545 if (priv
->cmd
.initialized
&& (cleanup_mask
& MLX4_CMD_CLEANUP_STRUCT
))
2546 priv
->cmd
.initialized
= 0;
2550 * Switch to using events to issue FW commands (can only be called
2551 * after event queue for command events has been initialized).
2553 int mlx4_cmd_use_events(struct mlx4_dev
*dev
)
2555 struct mlx4_priv
*priv
= mlx4_priv(dev
);
2559 priv
->cmd
.context
= kmalloc(priv
->cmd
.max_cmds
*
2560 sizeof (struct mlx4_cmd_context
),
2562 if (!priv
->cmd
.context
)
2565 for (i
= 0; i
< priv
->cmd
.max_cmds
; ++i
) {
2566 priv
->cmd
.context
[i
].token
= i
;
2567 priv
->cmd
.context
[i
].next
= i
+ 1;
2568 /* To support fatal error flow, initialize all
2569 * cmd contexts to allow simulating completions
2570 * with complete() at any time.
2572 init_completion(&priv
->cmd
.context
[i
].done
);
2575 priv
->cmd
.context
[priv
->cmd
.max_cmds
- 1].next
= -1;
2576 priv
->cmd
.free_head
= 0;
2578 sema_init(&priv
->cmd
.event_sem
, priv
->cmd
.max_cmds
);
2579 spin_lock_init(&priv
->cmd
.context_lock
);
2581 for (priv
->cmd
.token_mask
= 1;
2582 priv
->cmd
.token_mask
< priv
->cmd
.max_cmds
;
2583 priv
->cmd
.token_mask
<<= 1)
2585 --priv
->cmd
.token_mask
;
2587 down(&priv
->cmd
.poll_sem
);
2588 priv
->cmd
.use_events
= 1;
2594 * Switch back to polling (used when shutting down the device)
2596 void mlx4_cmd_use_polling(struct mlx4_dev
*dev
)
2598 struct mlx4_priv
*priv
= mlx4_priv(dev
);
2601 priv
->cmd
.use_events
= 0;
2603 for (i
= 0; i
< priv
->cmd
.max_cmds
; ++i
)
2604 down(&priv
->cmd
.event_sem
);
2606 kfree(priv
->cmd
.context
);
2608 up(&priv
->cmd
.poll_sem
);
2611 struct mlx4_cmd_mailbox
*mlx4_alloc_cmd_mailbox(struct mlx4_dev
*dev
)
2613 struct mlx4_cmd_mailbox
*mailbox
;
2615 mailbox
= kmalloc(sizeof *mailbox
, GFP_KERNEL
);
2617 return ERR_PTR(-ENOMEM
);
2619 mailbox
->buf
= pci_pool_alloc(mlx4_priv(dev
)->cmd
.pool
, GFP_KERNEL
,
2621 if (!mailbox
->buf
) {
2623 return ERR_PTR(-ENOMEM
);
2626 memset(mailbox
->buf
, 0, MLX4_MAILBOX_SIZE
);
2630 EXPORT_SYMBOL_GPL(mlx4_alloc_cmd_mailbox
);
2632 void mlx4_free_cmd_mailbox(struct mlx4_dev
*dev
,
2633 struct mlx4_cmd_mailbox
*mailbox
)
2638 pci_pool_free(mlx4_priv(dev
)->cmd
.pool
, mailbox
->buf
, mailbox
->dma
);
2641 EXPORT_SYMBOL_GPL(mlx4_free_cmd_mailbox
);
2643 u32
mlx4_comm_get_version(void)
2645 return ((u32
) CMD_CHAN_IF_REV
<< 8) | (u32
) CMD_CHAN_VER
;
2648 static int mlx4_get_slave_indx(struct mlx4_dev
*dev
, int vf
)
2650 if ((vf
< 0) || (vf
>= dev
->persist
->num_vfs
)) {
2651 mlx4_err(dev
, "Bad vf number:%d (number of activated vf: %d)\n",
2652 vf
, dev
->persist
->num_vfs
);
2659 int mlx4_get_vf_indx(struct mlx4_dev
*dev
, int slave
)
2661 if (slave
< 1 || slave
> dev
->persist
->num_vfs
) {
2663 "Bad slave number:%d (number of activated slaves: %lu)\n",
2664 slave
, dev
->num_slaves
);
2670 void mlx4_cmd_wake_completions(struct mlx4_dev
*dev
)
2672 struct mlx4_priv
*priv
= mlx4_priv(dev
);
2673 struct mlx4_cmd_context
*context
;
2676 spin_lock(&priv
->cmd
.context_lock
);
2677 if (priv
->cmd
.context
) {
2678 for (i
= 0; i
< priv
->cmd
.max_cmds
; ++i
) {
2679 context
= &priv
->cmd
.context
[i
];
2680 context
->fw_status
= CMD_STAT_INTERNAL_ERR
;
2682 mlx4_status_to_errno(CMD_STAT_INTERNAL_ERR
);
2683 complete(&context
->done
);
2686 spin_unlock(&priv
->cmd
.context_lock
);
2689 struct mlx4_active_ports
mlx4_get_active_ports(struct mlx4_dev
*dev
, int slave
)
2691 struct mlx4_active_ports actv_ports
;
2694 bitmap_zero(actv_ports
.ports
, MLX4_MAX_PORTS
);
2697 bitmap_fill(actv_ports
.ports
, dev
->caps
.num_ports
);
2701 vf
= mlx4_get_vf_indx(dev
, slave
);
2705 bitmap_set(actv_ports
.ports
, dev
->dev_vfs
[vf
].min_port
- 1,
2706 min((int)dev
->dev_vfs
[mlx4_get_vf_indx(dev
, slave
)].n_ports
,
2707 dev
->caps
.num_ports
));
2711 EXPORT_SYMBOL_GPL(mlx4_get_active_ports
);
2713 int mlx4_slave_convert_port(struct mlx4_dev
*dev
, int slave
, int port
)
2716 struct mlx4_active_ports actv_ports
= mlx4_get_active_ports(dev
, slave
);
2717 unsigned m
= bitmap_weight(actv_ports
.ports
, dev
->caps
.num_ports
);
2719 if (port
<= 0 || port
> m
)
2722 n
= find_first_bit(actv_ports
.ports
, dev
->caps
.num_ports
);
2728 EXPORT_SYMBOL_GPL(mlx4_slave_convert_port
);
2730 int mlx4_phys_to_slave_port(struct mlx4_dev
*dev
, int slave
, int port
)
2732 struct mlx4_active_ports actv_ports
= mlx4_get_active_ports(dev
, slave
);
2733 if (test_bit(port
- 1, actv_ports
.ports
))
2735 find_first_bit(actv_ports
.ports
, dev
->caps
.num_ports
);
2739 EXPORT_SYMBOL_GPL(mlx4_phys_to_slave_port
);
2741 struct mlx4_slaves_pport
mlx4_phys_to_slaves_pport(struct mlx4_dev
*dev
,
2745 struct mlx4_slaves_pport slaves_pport
;
2747 bitmap_zero(slaves_pport
.slaves
, MLX4_MFUNC_MAX
);
2749 if (port
<= 0 || port
> dev
->caps
.num_ports
)
2750 return slaves_pport
;
2752 for (i
= 0; i
< dev
->persist
->num_vfs
+ 1; i
++) {
2753 struct mlx4_active_ports actv_ports
=
2754 mlx4_get_active_ports(dev
, i
);
2755 if (test_bit(port
- 1, actv_ports
.ports
))
2756 set_bit(i
, slaves_pport
.slaves
);
2759 return slaves_pport
;
2761 EXPORT_SYMBOL_GPL(mlx4_phys_to_slaves_pport
);
2763 struct mlx4_slaves_pport
mlx4_phys_to_slaves_pport_actv(
2764 struct mlx4_dev
*dev
,
2765 const struct mlx4_active_ports
*crit_ports
)
2768 struct mlx4_slaves_pport slaves_pport
;
2770 bitmap_zero(slaves_pport
.slaves
, MLX4_MFUNC_MAX
);
2772 for (i
= 0; i
< dev
->persist
->num_vfs
+ 1; i
++) {
2773 struct mlx4_active_ports actv_ports
=
2774 mlx4_get_active_ports(dev
, i
);
2775 if (bitmap_equal(crit_ports
->ports
, actv_ports
.ports
,
2776 dev
->caps
.num_ports
))
2777 set_bit(i
, slaves_pport
.slaves
);
2780 return slaves_pport
;
2782 EXPORT_SYMBOL_GPL(mlx4_phys_to_slaves_pport_actv
);
2784 static int mlx4_slaves_closest_port(struct mlx4_dev
*dev
, int slave
, int port
)
2786 struct mlx4_active_ports actv_ports
= mlx4_get_active_ports(dev
, slave
);
2787 int min_port
= find_first_bit(actv_ports
.ports
, dev
->caps
.num_ports
)
2789 int max_port
= min_port
+
2790 bitmap_weight(actv_ports
.ports
, dev
->caps
.num_ports
);
2792 if (port
< min_port
)
2794 else if (port
>= max_port
)
2795 port
= max_port
- 1;
2800 static int mlx4_set_vport_qos(struct mlx4_priv
*priv
, int slave
, int port
,
2805 struct mlx4_qos_manager
*port_qos
;
2806 struct mlx4_dev
*dev
= &priv
->dev
;
2807 struct mlx4_vport_qos_param vpp_qos
[MLX4_NUM_UP
];
2809 port_qos
= &priv
->mfunc
.master
.qos_ctl
[port
];
2810 memset(vpp_qos
, 0, sizeof(struct mlx4_vport_qos_param
) * MLX4_NUM_UP
);
2812 if (slave
> port_qos
->num_of_qos_vfs
) {
2813 mlx4_info(dev
, "No availible VPP resources for this VF\n");
2817 /* Query for default QoS values from Vport 0 is needed */
2818 err
= mlx4_SET_VPORT_QOS_get(dev
, port
, 0, vpp_qos
);
2820 mlx4_info(dev
, "Failed to query Vport 0 QoS values\n");
2824 for (i
= 0; i
< MLX4_NUM_UP
; i
++) {
2825 if (test_bit(i
, port_qos
->priority_bm
) && max_tx_rate
) {
2826 vpp_qos
[i
].max_avg_bw
= max_tx_rate
;
2827 vpp_qos
[i
].enable
= 1;
2829 /* if user supplied tx_rate == 0, meaning no rate limit
2830 * configuration is required. so we are leaving the
2831 * value of max_avg_bw as queried from Vport 0.
2833 vpp_qos
[i
].enable
= 0;
2837 err
= mlx4_SET_VPORT_QOS_set(dev
, port
, slave
, vpp_qos
);
2839 mlx4_info(dev
, "Failed to set Vport %d QoS values\n", slave
);
2846 static bool mlx4_is_vf_vst_and_prio_qos(struct mlx4_dev
*dev
, int port
,
2847 struct mlx4_vport_state
*vf_admin
)
2849 struct mlx4_qos_manager
*info
;
2850 struct mlx4_priv
*priv
= mlx4_priv(dev
);
2852 if (!mlx4_is_master(dev
) ||
2853 !(dev
->caps
.flags2
& MLX4_DEV_CAP_FLAG2_QOS_VPP
))
2856 info
= &priv
->mfunc
.master
.qos_ctl
[port
];
2858 if (vf_admin
->default_vlan
!= MLX4_VGT
&&
2859 test_bit(vf_admin
->default_qos
, info
->priority_bm
))
2865 static bool mlx4_valid_vf_state_change(struct mlx4_dev
*dev
, int port
,
2866 struct mlx4_vport_state
*vf_admin
,
2869 struct mlx4_vport_state dummy_admin
= {0};
2871 if (!mlx4_is_vf_vst_and_prio_qos(dev
, port
, vf_admin
) ||
2875 dummy_admin
.default_qos
= qos
;
2876 dummy_admin
.default_vlan
= vlan
;
2878 /* VF wants to move to other VST state which is valid with current
2879 * rate limit. Either differnt default vlan in VST or other
2880 * supported QoS priority. Otherwise we don't allow this change when
2881 * the TX rate is still configured.
2883 if (mlx4_is_vf_vst_and_prio_qos(dev
, port
, &dummy_admin
))
2886 mlx4_info(dev
, "Cannot change VF state to %s while rate is set\n",
2887 (vlan
== MLX4_VGT
) ? "VGT" : "VST");
2889 if (vlan
!= MLX4_VGT
)
2890 mlx4_info(dev
, "VST priority %d not supported for QoS\n", qos
);
2892 mlx4_info(dev
, "Please set rate to 0 prior to this VF state change\n");
2897 int mlx4_set_vf_mac(struct mlx4_dev
*dev
, int port
, int vf
, u64 mac
)
2899 struct mlx4_priv
*priv
= mlx4_priv(dev
);
2900 struct mlx4_vport_state
*s_info
;
2903 if (!mlx4_is_master(dev
))
2904 return -EPROTONOSUPPORT
;
2906 slave
= mlx4_get_slave_indx(dev
, vf
);
2910 port
= mlx4_slaves_closest_port(dev
, slave
, port
);
2911 s_info
= &priv
->mfunc
.master
.vf_admin
[slave
].vport
[port
];
2913 mlx4_info(dev
, "default mac on vf %d port %d to %llX will take afect only after vf restart\n",
2914 vf
, port
, s_info
->mac
);
2917 EXPORT_SYMBOL_GPL(mlx4_set_vf_mac
);
2920 int mlx4_set_vf_vlan(struct mlx4_dev
*dev
, int port
, int vf
, u16 vlan
, u8 qos
)
2922 struct mlx4_priv
*priv
= mlx4_priv(dev
);
2923 struct mlx4_vport_state
*vf_admin
;
2926 if ((!mlx4_is_master(dev
)) ||
2927 !(dev
->caps
.flags2
& MLX4_DEV_CAP_FLAG2_VLAN_CONTROL
))
2928 return -EPROTONOSUPPORT
;
2930 if ((vlan
> 4095) || (qos
> 7))
2933 slave
= mlx4_get_slave_indx(dev
, vf
);
2937 port
= mlx4_slaves_closest_port(dev
, slave
, port
);
2938 vf_admin
= &priv
->mfunc
.master
.vf_admin
[slave
].vport
[port
];
2940 if (!mlx4_valid_vf_state_change(dev
, port
, vf_admin
, vlan
, qos
))
2943 if ((0 == vlan
) && (0 == qos
))
2944 vf_admin
->default_vlan
= MLX4_VGT
;
2946 vf_admin
->default_vlan
= vlan
;
2947 vf_admin
->default_qos
= qos
;
2949 /* If rate was configured prior to VST, we saved the configured rate
2950 * in vf_admin->rate and now, if priority supported we enforce the QoS
2952 if (mlx4_is_vf_vst_and_prio_qos(dev
, port
, vf_admin
) &&
2954 vf_admin
->qos_vport
= slave
;
2956 if (mlx4_master_immediate_activate_vlan_qos(priv
, slave
, port
))
2958 "updating vf %d port %d config will take effect on next VF restart\n",
2962 EXPORT_SYMBOL_GPL(mlx4_set_vf_vlan
);
2964 int mlx4_set_vf_rate(struct mlx4_dev
*dev
, int port
, int vf
, int min_tx_rate
,
2969 struct mlx4_vport_state
*vf_admin
;
2970 struct mlx4_priv
*priv
= mlx4_priv(dev
);
2972 if (!mlx4_is_master(dev
) ||
2973 !(dev
->caps
.flags2
& MLX4_DEV_CAP_FLAG2_QOS_VPP
))
2974 return -EPROTONOSUPPORT
;
2977 mlx4_info(dev
, "Minimum BW share not supported\n");
2978 return -EPROTONOSUPPORT
;
2981 slave
= mlx4_get_slave_indx(dev
, vf
);
2985 port
= mlx4_slaves_closest_port(dev
, slave
, port
);
2986 vf_admin
= &priv
->mfunc
.master
.vf_admin
[slave
].vport
[port
];
2988 err
= mlx4_set_vport_qos(priv
, slave
, port
, max_tx_rate
);
2990 mlx4_info(dev
, "vf %d failed to set rate %d\n", vf
,
2995 vf_admin
->tx_rate
= max_tx_rate
;
2996 /* if VF is not in supported mode (VST with supported prio),
2997 * we do not change vport configuration for its QPs, but save
2998 * the rate, so it will be enforced when it moves to supported
3001 if (!mlx4_is_vf_vst_and_prio_qos(dev
, port
, vf_admin
)) {
3003 "rate set for VF %d when not in valid state\n", vf
);
3005 if (vf_admin
->default_vlan
!= MLX4_VGT
)
3006 mlx4_info(dev
, "VST priority not supported by QoS\n");
3008 mlx4_info(dev
, "VF in VGT mode (needed VST)\n");
3011 "rate %d take affect when VF moves to valid state\n",
3016 /* If user sets rate 0 assigning default vport for its QPs */
3017 vf_admin
->qos_vport
= max_tx_rate
? slave
: MLX4_VPP_DEFAULT_VPORT
;
3019 if (priv
->mfunc
.master
.slave_state
[slave
].active
&&
3020 dev
->caps
.flags2
& MLX4_DEV_CAP_FLAG2_UPDATE_QP
)
3021 mlx4_master_immediate_activate_vlan_qos(priv
, slave
, port
);
3025 EXPORT_SYMBOL_GPL(mlx4_set_vf_rate
);
3027 /* mlx4_get_slave_default_vlan -
3028 * return true if VST ( default vlan)
3029 * if VST, will return vlan & qos (if not NULL)
3031 bool mlx4_get_slave_default_vlan(struct mlx4_dev
*dev
, int port
, int slave
,
3034 struct mlx4_vport_oper_state
*vp_oper
;
3035 struct mlx4_priv
*priv
;
3037 priv
= mlx4_priv(dev
);
3038 port
= mlx4_slaves_closest_port(dev
, slave
, port
);
3039 vp_oper
= &priv
->mfunc
.master
.vf_oper
[slave
].vport
[port
];
3041 if (MLX4_VGT
!= vp_oper
->state
.default_vlan
) {
3043 *vlan
= vp_oper
->state
.default_vlan
;
3045 *qos
= vp_oper
->state
.default_qos
;
3050 EXPORT_SYMBOL_GPL(mlx4_get_slave_default_vlan
);
3052 int mlx4_set_vf_spoofchk(struct mlx4_dev
*dev
, int port
, int vf
, bool setting
)
3054 struct mlx4_priv
*priv
= mlx4_priv(dev
);
3055 struct mlx4_vport_state
*s_info
;
3058 if ((!mlx4_is_master(dev
)) ||
3059 !(dev
->caps
.flags2
& MLX4_DEV_CAP_FLAG2_FSM
))
3060 return -EPROTONOSUPPORT
;
3062 slave
= mlx4_get_slave_indx(dev
, vf
);
3066 port
= mlx4_slaves_closest_port(dev
, slave
, port
);
3067 s_info
= &priv
->mfunc
.master
.vf_admin
[slave
].vport
[port
];
3068 s_info
->spoofchk
= setting
;
3072 EXPORT_SYMBOL_GPL(mlx4_set_vf_spoofchk
);
3074 int mlx4_get_vf_config(struct mlx4_dev
*dev
, int port
, int vf
, struct ifla_vf_info
*ivf
)
3076 struct mlx4_priv
*priv
= mlx4_priv(dev
);
3077 struct mlx4_vport_state
*s_info
;
3080 if (!mlx4_is_master(dev
))
3081 return -EPROTONOSUPPORT
;
3083 slave
= mlx4_get_slave_indx(dev
, vf
);
3087 s_info
= &priv
->mfunc
.master
.vf_admin
[slave
].vport
[port
];
3090 /* need to convert it to a func */
3091 ivf
->mac
[0] = ((s_info
->mac
>> (5*8)) & 0xff);
3092 ivf
->mac
[1] = ((s_info
->mac
>> (4*8)) & 0xff);
3093 ivf
->mac
[2] = ((s_info
->mac
>> (3*8)) & 0xff);
3094 ivf
->mac
[3] = ((s_info
->mac
>> (2*8)) & 0xff);
3095 ivf
->mac
[4] = ((s_info
->mac
>> (1*8)) & 0xff);
3096 ivf
->mac
[5] = ((s_info
->mac
) & 0xff);
3098 ivf
->vlan
= s_info
->default_vlan
;
3099 ivf
->qos
= s_info
->default_qos
;
3101 if (mlx4_is_vf_vst_and_prio_qos(dev
, port
, s_info
))
3102 ivf
->max_tx_rate
= s_info
->tx_rate
;
3104 ivf
->max_tx_rate
= 0;
3106 ivf
->min_tx_rate
= 0;
3107 ivf
->spoofchk
= s_info
->spoofchk
;
3108 ivf
->linkstate
= s_info
->link_state
;
3112 EXPORT_SYMBOL_GPL(mlx4_get_vf_config
);
3114 int mlx4_set_vf_link_state(struct mlx4_dev
*dev
, int port
, int vf
, int link_state
)
3116 struct mlx4_priv
*priv
= mlx4_priv(dev
);
3117 struct mlx4_vport_state
*s_info
;
3121 slave
= mlx4_get_slave_indx(dev
, vf
);
3125 port
= mlx4_slaves_closest_port(dev
, slave
, port
);
3126 switch (link_state
) {
3127 case IFLA_VF_LINK_STATE_AUTO
:
3128 /* get current link state */
3129 if (!priv
->sense
.do_sense_port
[port
])
3130 link_stat_event
= MLX4_PORT_CHANGE_SUBTYPE_ACTIVE
;
3132 link_stat_event
= MLX4_PORT_CHANGE_SUBTYPE_DOWN
;
3135 case IFLA_VF_LINK_STATE_ENABLE
:
3136 link_stat_event
= MLX4_PORT_CHANGE_SUBTYPE_ACTIVE
;
3139 case IFLA_VF_LINK_STATE_DISABLE
:
3140 link_stat_event
= MLX4_PORT_CHANGE_SUBTYPE_DOWN
;
3144 mlx4_warn(dev
, "unknown value for link_state %02x on slave %d port %d\n",
3145 link_state
, slave
, port
);
3148 s_info
= &priv
->mfunc
.master
.vf_admin
[slave
].vport
[port
];
3149 s_info
->link_state
= link_state
;
3152 mlx4_gen_port_state_change_eqe(dev
, slave
, port
, link_stat_event
);
3154 if (mlx4_master_immediate_activate_vlan_qos(priv
, slave
, port
))
3156 "updating vf %d port %d no link state HW enforcment\n",
3160 EXPORT_SYMBOL_GPL(mlx4_set_vf_link_state
);
3162 int mlx4_vf_smi_enabled(struct mlx4_dev
*dev
, int slave
, int port
)
3164 struct mlx4_priv
*priv
= mlx4_priv(dev
);
3166 if (slave
< 1 || slave
>= dev
->num_slaves
||
3167 port
< 1 || port
> MLX4_MAX_PORTS
)
3170 return priv
->mfunc
.master
.vf_oper
[slave
].smi_enabled
[port
] ==
3171 MLX4_VF_SMI_ENABLED
;
3173 EXPORT_SYMBOL_GPL(mlx4_vf_smi_enabled
);
3175 int mlx4_vf_get_enable_smi_admin(struct mlx4_dev
*dev
, int slave
, int port
)
3177 struct mlx4_priv
*priv
= mlx4_priv(dev
);
3179 if (slave
== mlx4_master_func_num(dev
))
3182 if (slave
< 1 || slave
>= dev
->num_slaves
||
3183 port
< 1 || port
> MLX4_MAX_PORTS
)
3186 return priv
->mfunc
.master
.vf_admin
[slave
].enable_smi
[port
] ==
3187 MLX4_VF_SMI_ENABLED
;
3189 EXPORT_SYMBOL_GPL(mlx4_vf_get_enable_smi_admin
);
3191 int mlx4_vf_set_enable_smi_admin(struct mlx4_dev
*dev
, int slave
, int port
,
3194 struct mlx4_priv
*priv
= mlx4_priv(dev
);
3196 if (slave
== mlx4_master_func_num(dev
))
3199 if (slave
< 1 || slave
>= dev
->num_slaves
||
3200 port
< 1 || port
> MLX4_MAX_PORTS
||
3201 enabled
< 0 || enabled
> 1)
3204 priv
->mfunc
.master
.vf_admin
[slave
].enable_smi
[port
] = enabled
;
3207 EXPORT_SYMBOL_GPL(mlx4_vf_set_enable_smi_admin
);