2 * Copyright (c) 2004, 2005 Topspin Communications. All rights reserved.
3 * Copyright (c) 2005, 2006, 2007, 2008 Mellanox Technologies. All rights reserved.
4 * Copyright (c) 2005, 2006, 2007 Cisco Systems, Inc. All rights reserved.
6 * This software is available to you under a choice of one of two
7 * licenses. You may choose to be licensed under the terms of the GNU
8 * General Public License (GPL) Version 2, available from the file
9 * COPYING in the main directory of this source tree, or the
10 * OpenIB.org BSD license below:
12 * Redistribution and use in source and binary forms, with or
13 * without modification, are permitted provided that the following
16 * - Redistributions of source code must retain the above
17 * copyright notice, this list of conditions and the following
20 * - Redistributions in binary form must reproduce the above
21 * copyright notice, this list of conditions and the following
22 * disclaimer in the documentation and/or other materials
23 * provided with the distribution.
25 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
26 * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
27 * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
28 * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
29 * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
30 * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
31 * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
35 #include <linux/sched.h>
36 #include <linux/slab.h>
37 #include <linux/export.h>
38 #include <linux/pci.h>
39 #include <linux/errno.h>
41 #include <linux/mlx4/cmd.h>
42 #include <linux/mlx4/device.h>
43 #include <linux/semaphore.h>
44 #include <rdma/ib_smi.h>
45 #include <linux/delay.h>
52 #include "mlx4_stats.h"
54 #define CMD_POLL_TOKEN 0xffff
55 #define INBOX_MASK 0xffffffffffffff00ULL
57 #define CMD_CHAN_VER 1
58 #define CMD_CHAN_IF_REV 1
61 /* command completed successfully: */
63 /* Internal error (such as a bus error) occurred while processing command: */
64 CMD_STAT_INTERNAL_ERR
= 0x01,
65 /* Operation/command not supported or opcode modifier not supported: */
66 CMD_STAT_BAD_OP
= 0x02,
67 /* Parameter not supported or parameter out of range: */
68 CMD_STAT_BAD_PARAM
= 0x03,
69 /* System not enabled or bad system state: */
70 CMD_STAT_BAD_SYS_STATE
= 0x04,
71 /* Attempt to access reserved or unallocaterd resource: */
72 CMD_STAT_BAD_RESOURCE
= 0x05,
73 /* Requested resource is currently executing a command, or is otherwise busy: */
74 CMD_STAT_RESOURCE_BUSY
= 0x06,
75 /* Required capability exceeds device limits: */
76 CMD_STAT_EXCEED_LIM
= 0x08,
77 /* Resource is not in the appropriate state or ownership: */
78 CMD_STAT_BAD_RES_STATE
= 0x09,
79 /* Index out of range: */
80 CMD_STAT_BAD_INDEX
= 0x0a,
81 /* FW image corrupted: */
82 CMD_STAT_BAD_NVMEM
= 0x0b,
83 /* Error in ICM mapping (e.g. not enough auxiliary ICM pages to execute command): */
84 CMD_STAT_ICM_ERROR
= 0x0c,
85 /* Attempt to modify a QP/EE which is not in the presumed state: */
86 CMD_STAT_BAD_QP_STATE
= 0x10,
87 /* Bad segment parameters (Address/Size): */
88 CMD_STAT_BAD_SEG_PARAM
= 0x20,
89 /* Memory Region has Memory Windows bound to: */
90 CMD_STAT_REG_BOUND
= 0x21,
91 /* HCA local attached memory not present: */
92 CMD_STAT_LAM_NOT_PRE
= 0x22,
93 /* Bad management packet (silently discarded): */
94 CMD_STAT_BAD_PKT
= 0x30,
95 /* More outstanding CQEs in CQ than new CQ size: */
96 CMD_STAT_BAD_SIZE
= 0x40,
97 /* Multi Function device support required: */
98 CMD_STAT_MULTI_FUNC_REQ
= 0x50,
102 HCR_IN_PARAM_OFFSET
= 0x00,
103 HCR_IN_MODIFIER_OFFSET
= 0x08,
104 HCR_OUT_PARAM_OFFSET
= 0x0c,
105 HCR_TOKEN_OFFSET
= 0x14,
106 HCR_STATUS_OFFSET
= 0x18,
108 HCR_OPMOD_SHIFT
= 12,
115 GO_BIT_TIMEOUT_MSECS
= 10000
118 enum mlx4_vlan_transition
{
119 MLX4_VLAN_TRANSITION_VST_VST
= 0,
120 MLX4_VLAN_TRANSITION_VST_VGT
= 1,
121 MLX4_VLAN_TRANSITION_VGT_VST
= 2,
122 MLX4_VLAN_TRANSITION_VGT_VGT
= 3,
126 struct mlx4_cmd_context
{
127 struct completion done
;
135 static int mlx4_master_process_vhcr(struct mlx4_dev
*dev
, int slave
,
136 struct mlx4_vhcr_cmd
*in_vhcr
);
138 static int mlx4_status_to_errno(u8 status
)
140 static const int trans_table
[] = {
141 [CMD_STAT_INTERNAL_ERR
] = -EIO
,
142 [CMD_STAT_BAD_OP
] = -EPERM
,
143 [CMD_STAT_BAD_PARAM
] = -EINVAL
,
144 [CMD_STAT_BAD_SYS_STATE
] = -ENXIO
,
145 [CMD_STAT_BAD_RESOURCE
] = -EBADF
,
146 [CMD_STAT_RESOURCE_BUSY
] = -EBUSY
,
147 [CMD_STAT_EXCEED_LIM
] = -ENOMEM
,
148 [CMD_STAT_BAD_RES_STATE
] = -EBADF
,
149 [CMD_STAT_BAD_INDEX
] = -EBADF
,
150 [CMD_STAT_BAD_NVMEM
] = -EFAULT
,
151 [CMD_STAT_ICM_ERROR
] = -ENFILE
,
152 [CMD_STAT_BAD_QP_STATE
] = -EINVAL
,
153 [CMD_STAT_BAD_SEG_PARAM
] = -EFAULT
,
154 [CMD_STAT_REG_BOUND
] = -EBUSY
,
155 [CMD_STAT_LAM_NOT_PRE
] = -EAGAIN
,
156 [CMD_STAT_BAD_PKT
] = -EINVAL
,
157 [CMD_STAT_BAD_SIZE
] = -ENOMEM
,
158 [CMD_STAT_MULTI_FUNC_REQ
] = -EACCES
,
161 if (status
>= ARRAY_SIZE(trans_table
) ||
162 (status
!= CMD_STAT_OK
&& trans_table
[status
] == 0))
165 return trans_table
[status
];
168 static u8
mlx4_errno_to_status(int errno
)
172 return CMD_STAT_BAD_OP
;
174 return CMD_STAT_BAD_PARAM
;
176 return CMD_STAT_BAD_SYS_STATE
;
178 return CMD_STAT_RESOURCE_BUSY
;
180 return CMD_STAT_EXCEED_LIM
;
182 return CMD_STAT_ICM_ERROR
;
184 return CMD_STAT_INTERNAL_ERR
;
188 static int mlx4_internal_err_ret_value(struct mlx4_dev
*dev
, u16 op
,
192 case MLX4_CMD_UNMAP_ICM
:
193 case MLX4_CMD_UNMAP_ICM_AUX
:
194 case MLX4_CMD_UNMAP_FA
:
195 case MLX4_CMD_2RST_QP
:
196 case MLX4_CMD_HW2SW_EQ
:
197 case MLX4_CMD_HW2SW_CQ
:
198 case MLX4_CMD_HW2SW_SRQ
:
199 case MLX4_CMD_HW2SW_MPT
:
200 case MLX4_CMD_CLOSE_HCA
:
201 case MLX4_QP_FLOW_STEERING_DETACH
:
202 case MLX4_CMD_FREE_RES
:
203 case MLX4_CMD_CLOSE_PORT
:
206 case MLX4_CMD_QP_ATTACH
:
207 /* On Detach case return success */
208 if (op_modifier
== 0)
210 return mlx4_status_to_errno(CMD_STAT_INTERNAL_ERR
);
213 return mlx4_status_to_errno(CMD_STAT_INTERNAL_ERR
);
217 static int mlx4_closing_cmd_fatal_error(u16 op
, u8 fw_status
)
219 /* Any error during the closing commands below is considered fatal */
220 if (op
== MLX4_CMD_CLOSE_HCA
||
221 op
== MLX4_CMD_HW2SW_EQ
||
222 op
== MLX4_CMD_HW2SW_CQ
||
223 op
== MLX4_CMD_2RST_QP
||
224 op
== MLX4_CMD_HW2SW_SRQ
||
225 op
== MLX4_CMD_SYNC_TPT
||
226 op
== MLX4_CMD_UNMAP_ICM
||
227 op
== MLX4_CMD_UNMAP_ICM_AUX
||
228 op
== MLX4_CMD_UNMAP_FA
)
230 /* Error on MLX4_CMD_HW2SW_MPT is fatal except when fw status equals
231 * CMD_STAT_REG_BOUND.
232 * This status indicates that memory region has memory windows bound to it
233 * which may result from invalid user space usage and is not fatal.
235 if (op
== MLX4_CMD_HW2SW_MPT
&& fw_status
!= CMD_STAT_REG_BOUND
)
240 static int mlx4_cmd_reset_flow(struct mlx4_dev
*dev
, u16 op
, u8 op_modifier
,
243 /* Only if reset flow is really active return code is based on
244 * command, otherwise current error code is returned.
246 if (mlx4_internal_err_reset
) {
247 mlx4_enter_error_state(dev
->persist
);
248 err
= mlx4_internal_err_ret_value(dev
, op
, op_modifier
);
254 static int comm_pending(struct mlx4_dev
*dev
)
256 struct mlx4_priv
*priv
= mlx4_priv(dev
);
257 u32 status
= readl(&priv
->mfunc
.comm
->slave_read
);
259 return (swab32(status
) >> 31) != priv
->cmd
.comm_toggle
;
262 static int mlx4_comm_cmd_post(struct mlx4_dev
*dev
, u8 cmd
, u16 param
)
264 struct mlx4_priv
*priv
= mlx4_priv(dev
);
267 /* To avoid writing to unknown addresses after the device state was
268 * changed to internal error and the function was rest,
269 * check the INTERNAL_ERROR flag which is updated under
270 * device_state_mutex lock.
272 mutex_lock(&dev
->persist
->device_state_mutex
);
274 if (dev
->persist
->state
& MLX4_DEVICE_STATE_INTERNAL_ERROR
) {
275 mutex_unlock(&dev
->persist
->device_state_mutex
);
279 priv
->cmd
.comm_toggle
^= 1;
280 val
= param
| (cmd
<< 16) | (priv
->cmd
.comm_toggle
<< 31);
281 __raw_writel((__force u32
) cpu_to_be32(val
),
282 &priv
->mfunc
.comm
->slave_write
);
284 mutex_unlock(&dev
->persist
->device_state_mutex
);
288 static int mlx4_comm_cmd_poll(struct mlx4_dev
*dev
, u8 cmd
, u16 param
,
289 unsigned long timeout
)
291 struct mlx4_priv
*priv
= mlx4_priv(dev
);
294 int ret_from_pending
= 0;
296 /* First, verify that the master reports correct status */
297 if (comm_pending(dev
)) {
298 mlx4_warn(dev
, "Communication channel is not idle - my toggle is %d (cmd:0x%x)\n",
299 priv
->cmd
.comm_toggle
, cmd
);
304 down(&priv
->cmd
.poll_sem
);
305 if (mlx4_comm_cmd_post(dev
, cmd
, param
)) {
306 /* Only in case the device state is INTERNAL_ERROR,
307 * mlx4_comm_cmd_post returns with an error
309 err
= mlx4_status_to_errno(CMD_STAT_INTERNAL_ERR
);
313 end
= msecs_to_jiffies(timeout
) + jiffies
;
314 while (comm_pending(dev
) && time_before(jiffies
, end
))
316 ret_from_pending
= comm_pending(dev
);
317 if (ret_from_pending
) {
318 /* check if the slave is trying to boot in the middle of
319 * FLR process. The only non-zero result in the RESET command
320 * is MLX4_DELAY_RESET_SLAVE*/
321 if ((MLX4_COMM_CMD_RESET
== cmd
)) {
322 err
= MLX4_DELAY_RESET_SLAVE
;
325 mlx4_warn(dev
, "Communication channel command 0x%x timed out\n",
327 err
= mlx4_status_to_errno(CMD_STAT_INTERNAL_ERR
);
332 mlx4_enter_error_state(dev
->persist
);
334 up(&priv
->cmd
.poll_sem
);
338 static int mlx4_comm_cmd_wait(struct mlx4_dev
*dev
, u8 vhcr_cmd
,
339 u16 param
, u16 op
, unsigned long timeout
)
341 struct mlx4_cmd
*cmd
= &mlx4_priv(dev
)->cmd
;
342 struct mlx4_cmd_context
*context
;
346 down(&cmd
->event_sem
);
348 spin_lock(&cmd
->context_lock
);
349 BUG_ON(cmd
->free_head
< 0);
350 context
= &cmd
->context
[cmd
->free_head
];
351 context
->token
+= cmd
->token_mask
+ 1;
352 cmd
->free_head
= context
->next
;
353 spin_unlock(&cmd
->context_lock
);
355 reinit_completion(&context
->done
);
357 if (mlx4_comm_cmd_post(dev
, vhcr_cmd
, param
)) {
358 /* Only in case the device state is INTERNAL_ERROR,
359 * mlx4_comm_cmd_post returns with an error
361 err
= mlx4_status_to_errno(CMD_STAT_INTERNAL_ERR
);
365 if (!wait_for_completion_timeout(&context
->done
,
366 msecs_to_jiffies(timeout
))) {
367 mlx4_warn(dev
, "communication channel command 0x%x (op=0x%x) timed out\n",
372 err
= context
->result
;
373 if (err
&& context
->fw_status
!= CMD_STAT_MULTI_FUNC_REQ
) {
374 mlx4_err(dev
, "command 0x%x failed: fw status = 0x%x\n",
375 vhcr_cmd
, context
->fw_status
);
376 if (mlx4_closing_cmd_fatal_error(op
, context
->fw_status
))
380 /* wait for comm channel ready
381 * this is necessary for prevention the race
382 * when switching between event to polling mode
383 * Skipping this section in case the device is in FATAL_ERROR state,
384 * In this state, no commands are sent via the comm channel until
385 * the device has returned from reset.
387 if (!(dev
->persist
->state
& MLX4_DEVICE_STATE_INTERNAL_ERROR
)) {
388 end
= msecs_to_jiffies(timeout
) + jiffies
;
389 while (comm_pending(dev
) && time_before(jiffies
, end
))
395 err
= mlx4_status_to_errno(CMD_STAT_INTERNAL_ERR
);
396 mlx4_enter_error_state(dev
->persist
);
398 spin_lock(&cmd
->context_lock
);
399 context
->next
= cmd
->free_head
;
400 cmd
->free_head
= context
- cmd
->context
;
401 spin_unlock(&cmd
->context_lock
);
407 int mlx4_comm_cmd(struct mlx4_dev
*dev
, u8 cmd
, u16 param
,
408 u16 op
, unsigned long timeout
)
410 if (dev
->persist
->state
& MLX4_DEVICE_STATE_INTERNAL_ERROR
)
411 return mlx4_status_to_errno(CMD_STAT_INTERNAL_ERR
);
413 if (mlx4_priv(dev
)->cmd
.use_events
)
414 return mlx4_comm_cmd_wait(dev
, cmd
, param
, op
, timeout
);
415 return mlx4_comm_cmd_poll(dev
, cmd
, param
, timeout
);
418 static int cmd_pending(struct mlx4_dev
*dev
)
422 if (pci_channel_offline(dev
->persist
->pdev
))
425 status
= readl(mlx4_priv(dev
)->cmd
.hcr
+ HCR_STATUS_OFFSET
);
427 return (status
& swab32(1 << HCR_GO_BIT
)) ||
428 (mlx4_priv(dev
)->cmd
.toggle
==
429 !!(status
& swab32(1 << HCR_T_BIT
)));
432 static int mlx4_cmd_post(struct mlx4_dev
*dev
, u64 in_param
, u64 out_param
,
433 u32 in_modifier
, u8 op_modifier
, u16 op
, u16 token
,
436 struct mlx4_cmd
*cmd
= &mlx4_priv(dev
)->cmd
;
437 u32 __iomem
*hcr
= cmd
->hcr
;
441 mutex_lock(&dev
->persist
->device_state_mutex
);
442 /* To avoid writing to unknown addresses after the device state was
443 * changed to internal error and the chip was reset,
444 * check the INTERNAL_ERROR flag which is updated under
445 * device_state_mutex lock.
447 if (pci_channel_offline(dev
->persist
->pdev
) ||
448 (dev
->persist
->state
& MLX4_DEVICE_STATE_INTERNAL_ERROR
)) {
450 * Device is going through error recovery
451 * and cannot accept commands.
458 end
+= msecs_to_jiffies(GO_BIT_TIMEOUT_MSECS
);
460 while (cmd_pending(dev
)) {
461 if (pci_channel_offline(dev
->persist
->pdev
)) {
463 * Device is going through error recovery
464 * and cannot accept commands.
469 if (time_after_eq(jiffies
, end
)) {
470 mlx4_err(dev
, "%s:cmd_pending failed\n", __func__
);
477 * We use writel (instead of something like memcpy_toio)
478 * because writes of less than 32 bits to the HCR don't work
479 * (and some architectures such as ia64 implement memcpy_toio
480 * in terms of writeb).
482 __raw_writel((__force u32
) cpu_to_be32(in_param
>> 32), hcr
+ 0);
483 __raw_writel((__force u32
) cpu_to_be32(in_param
& 0xfffffffful
), hcr
+ 1);
484 __raw_writel((__force u32
) cpu_to_be32(in_modifier
), hcr
+ 2);
485 __raw_writel((__force u32
) cpu_to_be32(out_param
>> 32), hcr
+ 3);
486 __raw_writel((__force u32
) cpu_to_be32(out_param
& 0xfffffffful
), hcr
+ 4);
487 __raw_writel((__force u32
) cpu_to_be32(token
<< 16), hcr
+ 5);
489 /* __raw_writel may not order writes. */
492 __raw_writel((__force u32
) cpu_to_be32((1 << HCR_GO_BIT
) |
493 (cmd
->toggle
<< HCR_T_BIT
) |
494 (event
? (1 << HCR_E_BIT
) : 0) |
495 (op_modifier
<< HCR_OPMOD_SHIFT
) |
499 * Make sure that our HCR writes don't get mixed in with
500 * writes from another CPU starting a FW command.
504 cmd
->toggle
= cmd
->toggle
^ 1;
510 mlx4_warn(dev
, "Could not post command 0x%x: ret=%d, in_param=0x%llx, in_mod=0x%x, op_mod=0x%x\n",
511 op
, ret
, in_param
, in_modifier
, op_modifier
);
512 mutex_unlock(&dev
->persist
->device_state_mutex
);
517 static int mlx4_slave_cmd(struct mlx4_dev
*dev
, u64 in_param
, u64
*out_param
,
518 int out_is_imm
, u32 in_modifier
, u8 op_modifier
,
519 u16 op
, unsigned long timeout
)
521 struct mlx4_priv
*priv
= mlx4_priv(dev
);
522 struct mlx4_vhcr_cmd
*vhcr
= priv
->mfunc
.vhcr
;
525 mutex_lock(&priv
->cmd
.slave_cmd_mutex
);
527 vhcr
->in_param
= cpu_to_be64(in_param
);
528 vhcr
->out_param
= out_param
? cpu_to_be64(*out_param
) : 0;
529 vhcr
->in_modifier
= cpu_to_be32(in_modifier
);
530 vhcr
->opcode
= cpu_to_be16((((u16
) op_modifier
) << 12) | (op
& 0xfff));
531 vhcr
->token
= cpu_to_be16(CMD_POLL_TOKEN
);
533 vhcr
->flags
= !!(priv
->cmd
.use_events
) << 6;
535 if (mlx4_is_master(dev
)) {
536 ret
= mlx4_master_process_vhcr(dev
, dev
->caps
.function
, vhcr
);
541 be64_to_cpu(vhcr
->out_param
);
543 mlx4_err(dev
, "response expected while output mailbox is NULL for command 0x%x\n",
545 vhcr
->status
= CMD_STAT_BAD_PARAM
;
548 ret
= mlx4_status_to_errno(vhcr
->status
);
551 dev
->persist
->state
& MLX4_DEVICE_STATE_INTERNAL_ERROR
)
552 ret
= mlx4_internal_err_ret_value(dev
, op
, op_modifier
);
554 ret
= mlx4_comm_cmd(dev
, MLX4_COMM_CMD_VHCR_POST
, 0, op
,
555 MLX4_COMM_TIME
+ timeout
);
560 be64_to_cpu(vhcr
->out_param
);
562 mlx4_err(dev
, "response expected while output mailbox is NULL for command 0x%x\n",
564 vhcr
->status
= CMD_STAT_BAD_PARAM
;
567 ret
= mlx4_status_to_errno(vhcr
->status
);
569 if (dev
->persist
->state
&
570 MLX4_DEVICE_STATE_INTERNAL_ERROR
)
571 ret
= mlx4_internal_err_ret_value(dev
, op
,
574 mlx4_err(dev
, "failed execution of VHCR_POST command opcode 0x%x\n", op
);
578 mutex_unlock(&priv
->cmd
.slave_cmd_mutex
);
582 static int mlx4_cmd_poll(struct mlx4_dev
*dev
, u64 in_param
, u64
*out_param
,
583 int out_is_imm
, u32 in_modifier
, u8 op_modifier
,
584 u16 op
, unsigned long timeout
)
586 struct mlx4_priv
*priv
= mlx4_priv(dev
);
587 void __iomem
*hcr
= priv
->cmd
.hcr
;
592 down(&priv
->cmd
.poll_sem
);
594 if (dev
->persist
->state
& MLX4_DEVICE_STATE_INTERNAL_ERROR
) {
596 * Device is going through error recovery
597 * and cannot accept commands.
599 err
= mlx4_internal_err_ret_value(dev
, op
, op_modifier
);
603 if (out_is_imm
&& !out_param
) {
604 mlx4_err(dev
, "response expected while output mailbox is NULL for command 0x%x\n",
610 err
= mlx4_cmd_post(dev
, in_param
, out_param
? *out_param
: 0,
611 in_modifier
, op_modifier
, op
, CMD_POLL_TOKEN
, 0);
615 end
= msecs_to_jiffies(timeout
) + jiffies
;
616 while (cmd_pending(dev
) && time_before(jiffies
, end
)) {
617 if (pci_channel_offline(dev
->persist
->pdev
)) {
619 * Device is going through error recovery
620 * and cannot accept commands.
626 if (dev
->persist
->state
& MLX4_DEVICE_STATE_INTERNAL_ERROR
) {
627 err
= mlx4_internal_err_ret_value(dev
, op
, op_modifier
);
634 if (cmd_pending(dev
)) {
635 mlx4_warn(dev
, "command 0x%x timed out (go bit not cleared)\n",
643 (u64
) be32_to_cpu((__force __be32
)
644 __raw_readl(hcr
+ HCR_OUT_PARAM_OFFSET
)) << 32 |
645 (u64
) be32_to_cpu((__force __be32
)
646 __raw_readl(hcr
+ HCR_OUT_PARAM_OFFSET
+ 4));
647 stat
= be32_to_cpu((__force __be32
)
648 __raw_readl(hcr
+ HCR_STATUS_OFFSET
)) >> 24;
649 err
= mlx4_status_to_errno(stat
);
651 mlx4_err(dev
, "command 0x%x failed: fw status = 0x%x\n",
653 if (mlx4_closing_cmd_fatal_error(op
, stat
))
660 err
= mlx4_cmd_reset_flow(dev
, op
, op_modifier
, err
);
662 up(&priv
->cmd
.poll_sem
);
666 void mlx4_cmd_event(struct mlx4_dev
*dev
, u16 token
, u8 status
, u64 out_param
)
668 struct mlx4_priv
*priv
= mlx4_priv(dev
);
669 struct mlx4_cmd_context
*context
=
670 &priv
->cmd
.context
[token
& priv
->cmd
.token_mask
];
672 /* previously timed out command completing at long last */
673 if (token
!= context
->token
)
676 context
->fw_status
= status
;
677 context
->result
= mlx4_status_to_errno(status
);
678 context
->out_param
= out_param
;
680 complete(&context
->done
);
683 static int mlx4_cmd_wait(struct mlx4_dev
*dev
, u64 in_param
, u64
*out_param
,
684 int out_is_imm
, u32 in_modifier
, u8 op_modifier
,
685 u16 op
, unsigned long timeout
)
687 struct mlx4_cmd
*cmd
= &mlx4_priv(dev
)->cmd
;
688 struct mlx4_cmd_context
*context
;
692 down(&cmd
->event_sem
);
694 spin_lock(&cmd
->context_lock
);
695 BUG_ON(cmd
->free_head
< 0);
696 context
= &cmd
->context
[cmd
->free_head
];
697 context
->token
+= cmd
->token_mask
+ 1;
698 cmd
->free_head
= context
->next
;
699 spin_unlock(&cmd
->context_lock
);
701 if (out_is_imm
&& !out_param
) {
702 mlx4_err(dev
, "response expected while output mailbox is NULL for command 0x%x\n",
708 reinit_completion(&context
->done
);
710 err
= mlx4_cmd_post(dev
, in_param
, out_param
? *out_param
: 0,
711 in_modifier
, op_modifier
, op
, context
->token
, 1);
715 if (op
== MLX4_CMD_SENSE_PORT
) {
717 wait_for_completion_interruptible_timeout(&context
->done
,
718 msecs_to_jiffies(timeout
));
720 context
->fw_status
= 0;
721 context
->out_param
= 0;
725 ret_wait
= (long)wait_for_completion_timeout(&context
->done
,
726 msecs_to_jiffies(timeout
));
729 mlx4_warn(dev
, "command 0x%x timed out (go bit not cleared)\n",
731 if (op
== MLX4_CMD_NOP
) {
740 err
= context
->result
;
742 /* Since we do not want to have this error message always
743 * displayed at driver start when there are ConnectX2 HCAs
744 * on the host, we deprecate the error message for this
745 * specific command/input_mod/opcode_mod/fw-status to be debug.
747 if (op
== MLX4_CMD_SET_PORT
&&
748 (in_modifier
== 1 || in_modifier
== 2) &&
749 op_modifier
== MLX4_SET_PORT_IB_OPCODE
&&
750 context
->fw_status
== CMD_STAT_BAD_SIZE
)
751 mlx4_dbg(dev
, "command 0x%x failed: fw status = 0x%x\n",
752 op
, context
->fw_status
);
754 mlx4_err(dev
, "command 0x%x failed: fw status = 0x%x\n",
755 op
, context
->fw_status
);
756 if (dev
->persist
->state
& MLX4_DEVICE_STATE_INTERNAL_ERROR
)
757 err
= mlx4_internal_err_ret_value(dev
, op
, op_modifier
);
758 else if (mlx4_closing_cmd_fatal_error(op
, context
->fw_status
))
765 *out_param
= context
->out_param
;
769 err
= mlx4_cmd_reset_flow(dev
, op
, op_modifier
, err
);
771 spin_lock(&cmd
->context_lock
);
772 context
->next
= cmd
->free_head
;
773 cmd
->free_head
= context
- cmd
->context
;
774 spin_unlock(&cmd
->context_lock
);
780 int __mlx4_cmd(struct mlx4_dev
*dev
, u64 in_param
, u64
*out_param
,
781 int out_is_imm
, u32 in_modifier
, u8 op_modifier
,
782 u16 op
, unsigned long timeout
, int native
)
784 if (pci_channel_offline(dev
->persist
->pdev
))
785 return mlx4_cmd_reset_flow(dev
, op
, op_modifier
, -EIO
);
787 if (!mlx4_is_mfunc(dev
) || (native
&& mlx4_is_master(dev
))) {
790 if (dev
->persist
->state
& MLX4_DEVICE_STATE_INTERNAL_ERROR
)
791 return mlx4_internal_err_ret_value(dev
, op
,
793 down_read(&mlx4_priv(dev
)->cmd
.switch_sem
);
794 if (mlx4_priv(dev
)->cmd
.use_events
)
795 ret
= mlx4_cmd_wait(dev
, in_param
, out_param
,
796 out_is_imm
, in_modifier
,
797 op_modifier
, op
, timeout
);
799 ret
= mlx4_cmd_poll(dev
, in_param
, out_param
,
800 out_is_imm
, in_modifier
,
801 op_modifier
, op
, timeout
);
803 up_read(&mlx4_priv(dev
)->cmd
.switch_sem
);
806 return mlx4_slave_cmd(dev
, in_param
, out_param
, out_is_imm
,
807 in_modifier
, op_modifier
, op
, timeout
);
809 EXPORT_SYMBOL_GPL(__mlx4_cmd
);
812 int mlx4_ARM_COMM_CHANNEL(struct mlx4_dev
*dev
)
814 return mlx4_cmd(dev
, 0, 0, 0, MLX4_CMD_ARM_COMM_CHANNEL
,
815 MLX4_CMD_TIME_CLASS_B
, MLX4_CMD_NATIVE
);
818 static int mlx4_ACCESS_MEM(struct mlx4_dev
*dev
, u64 master_addr
,
819 int slave
, u64 slave_addr
,
820 int size
, int is_read
)
825 if ((slave_addr
& 0xfff) | (master_addr
& 0xfff) |
826 (slave
& ~0x7f) | (size
& 0xff)) {
827 mlx4_err(dev
, "Bad access mem params - slave_addr:0x%llx master_addr:0x%llx slave_id:%d size:%d\n",
828 slave_addr
, master_addr
, slave
, size
);
833 in_param
= (u64
) slave
| slave_addr
;
834 out_param
= (u64
) dev
->caps
.function
| master_addr
;
836 in_param
= (u64
) dev
->caps
.function
| master_addr
;
837 out_param
= (u64
) slave
| slave_addr
;
840 return mlx4_cmd_imm(dev
, in_param
, &out_param
, size
, 0,
842 MLX4_CMD_TIME_CLASS_A
, MLX4_CMD_NATIVE
);
845 static int query_pkey_block(struct mlx4_dev
*dev
, u8 port
, u16 index
, u16
*pkey
,
846 struct mlx4_cmd_mailbox
*inbox
,
847 struct mlx4_cmd_mailbox
*outbox
)
849 struct ib_smp
*in_mad
= (struct ib_smp
*)(inbox
->buf
);
850 struct ib_smp
*out_mad
= (struct ib_smp
*)(outbox
->buf
);
857 in_mad
->attr_mod
= cpu_to_be32(index
/ 32);
859 err
= mlx4_cmd_box(dev
, inbox
->dma
, outbox
->dma
, port
, 3,
860 MLX4_CMD_MAD_IFC
, MLX4_CMD_TIME_CLASS_C
,
865 for (i
= 0; i
< 32; ++i
)
866 pkey
[i
] = be16_to_cpu(((__be16
*) out_mad
->data
)[i
]);
871 static int get_full_pkey_table(struct mlx4_dev
*dev
, u8 port
, u16
*table
,
872 struct mlx4_cmd_mailbox
*inbox
,
873 struct mlx4_cmd_mailbox
*outbox
)
878 for (i
= 0; i
< dev
->caps
.pkey_table_len
[port
]; i
+= 32) {
879 err
= query_pkey_block(dev
, port
, i
, table
+ i
, inbox
, outbox
);
886 #define PORT_CAPABILITY_LOCATION_IN_SMP 20
887 #define PORT_STATE_OFFSET 32
889 static enum ib_port_state
vf_port_state(struct mlx4_dev
*dev
, int port
, int vf
)
891 if (mlx4_get_slave_port_state(dev
, vf
, port
) == SLAVE_PORT_UP
)
892 return IB_PORT_ACTIVE
;
897 static int mlx4_MAD_IFC_wrapper(struct mlx4_dev
*dev
, int slave
,
898 struct mlx4_vhcr
*vhcr
,
899 struct mlx4_cmd_mailbox
*inbox
,
900 struct mlx4_cmd_mailbox
*outbox
,
901 struct mlx4_cmd_info
*cmd
)
903 struct ib_smp
*smp
= inbox
->buf
;
911 struct mlx4_priv
*priv
= mlx4_priv(dev
);
912 struct ib_smp
*outsmp
= outbox
->buf
;
913 __be16
*outtab
= (__be16
*)(outsmp
->data
);
914 __be32 slave_cap_mask
;
915 __be64 slave_node_guid
;
917 slave_port
= vhcr
->in_modifier
;
918 port
= mlx4_slave_convert_port(dev
, slave
, slave_port
);
920 /* network-view bit is for driver use only, and should not be passed to FW */
921 opcode_modifier
= vhcr
->op_modifier
& ~0x8; /* clear netw view bit */
922 network_view
= !!(vhcr
->op_modifier
& 0x8);
924 if (smp
->base_version
== 1 &&
925 smp
->mgmt_class
== IB_MGMT_CLASS_SUBN_LID_ROUTED
&&
926 smp
->class_version
== 1) {
927 /* host view is paravirtualized */
928 if (!network_view
&& smp
->method
== IB_MGMT_METHOD_GET
) {
929 if (smp
->attr_id
== IB_SMP_ATTR_PKEY_TABLE
) {
930 index
= be32_to_cpu(smp
->attr_mod
);
931 if (port
< 1 || port
> dev
->caps
.num_ports
)
933 table
= kcalloc((dev
->caps
.pkey_table_len
[port
] / 32) + 1,
934 sizeof(*table
) * 32, GFP_KERNEL
);
938 /* need to get the full pkey table because the paravirtualized
939 * pkeys may be scattered among several pkey blocks.
941 err
= get_full_pkey_table(dev
, port
, table
, inbox
, outbox
);
943 for (vidx
= index
* 32; vidx
< (index
+ 1) * 32; ++vidx
) {
944 pidx
= priv
->virt2phys_pkey
[slave
][port
- 1][vidx
];
945 outtab
[vidx
% 32] = cpu_to_be16(table
[pidx
]);
951 if (smp
->attr_id
== IB_SMP_ATTR_PORT_INFO
) {
952 /*get the slave specific caps:*/
954 smp
->attr_mod
= cpu_to_be32(port
);
955 err
= mlx4_cmd_box(dev
, inbox
->dma
, outbox
->dma
,
956 port
, opcode_modifier
,
957 vhcr
->op
, MLX4_CMD_TIME_CLASS_C
, MLX4_CMD_NATIVE
);
958 /* modify the response for slaves */
959 if (!err
&& slave
!= mlx4_master_func_num(dev
)) {
960 u8
*state
= outsmp
->data
+ PORT_STATE_OFFSET
;
962 *state
= (*state
& 0xf0) | vf_port_state(dev
, port
, slave
);
963 slave_cap_mask
= priv
->mfunc
.master
.slave_state
[slave
].ib_cap_mask
[port
];
964 memcpy(outsmp
->data
+ PORT_CAPABILITY_LOCATION_IN_SMP
, &slave_cap_mask
, 4);
968 if (smp
->attr_id
== IB_SMP_ATTR_GUID_INFO
) {
969 __be64 guid
= mlx4_get_admin_guid(dev
, slave
,
972 /* set the PF admin guid to the FW/HW burned
973 * GUID, if it wasn't yet set
975 if (slave
== 0 && guid
== 0) {
977 err
= mlx4_cmd_box(dev
,
983 MLX4_CMD_TIME_CLASS_C
,
987 mlx4_set_admin_guid(dev
,
991 memcpy(outsmp
->data
, &guid
, 8);
994 /* clean all other gids */
995 memset(outsmp
->data
+ 8, 0, 56);
998 if (smp
->attr_id
== IB_SMP_ATTR_NODE_INFO
) {
999 err
= mlx4_cmd_box(dev
, inbox
->dma
, outbox
->dma
,
1000 port
, opcode_modifier
,
1001 vhcr
->op
, MLX4_CMD_TIME_CLASS_C
, MLX4_CMD_NATIVE
);
1003 slave_node_guid
= mlx4_get_slave_node_guid(dev
, slave
);
1004 memcpy(outsmp
->data
+ 12, &slave_node_guid
, 8);
1011 /* Non-privileged VFs are only allowed "host" view LID-routed 'Get' MADs.
1012 * These are the MADs used by ib verbs (such as ib_query_gids).
1014 if (slave
!= mlx4_master_func_num(dev
) &&
1015 !mlx4_vf_smi_enabled(dev
, slave
, port
)) {
1016 if (!(smp
->mgmt_class
== IB_MGMT_CLASS_SUBN_LID_ROUTED
&&
1017 smp
->method
== IB_MGMT_METHOD_GET
) || network_view
) {
1018 mlx4_err(dev
, "Unprivileged slave %d is trying to execute a Subnet MGMT MAD, class 0x%x, method 0x%x, view=%s for attr 0x%x. Rejecting\n",
1019 slave
, smp
->mgmt_class
, smp
->method
,
1020 network_view
? "Network" : "Host",
1021 be16_to_cpu(smp
->attr_id
));
1026 return mlx4_cmd_box(dev
, inbox
->dma
, outbox
->dma
,
1027 vhcr
->in_modifier
, opcode_modifier
,
1028 vhcr
->op
, MLX4_CMD_TIME_CLASS_C
, MLX4_CMD_NATIVE
);
1031 static int mlx4_CMD_EPERM_wrapper(struct mlx4_dev
*dev
, int slave
,
1032 struct mlx4_vhcr
*vhcr
,
1033 struct mlx4_cmd_mailbox
*inbox
,
1034 struct mlx4_cmd_mailbox
*outbox
,
1035 struct mlx4_cmd_info
*cmd
)
1040 int mlx4_DMA_wrapper(struct mlx4_dev
*dev
, int slave
,
1041 struct mlx4_vhcr
*vhcr
,
1042 struct mlx4_cmd_mailbox
*inbox
,
1043 struct mlx4_cmd_mailbox
*outbox
,
1044 struct mlx4_cmd_info
*cmd
)
1050 in_param
= cmd
->has_inbox
? (u64
) inbox
->dma
: vhcr
->in_param
;
1051 out_param
= cmd
->has_outbox
? (u64
) outbox
->dma
: vhcr
->out_param
;
1052 if (cmd
->encode_slave_id
) {
1053 in_param
&= 0xffffffffffffff00ll
;
1057 err
= __mlx4_cmd(dev
, in_param
, &out_param
, cmd
->out_is_imm
,
1058 vhcr
->in_modifier
, vhcr
->op_modifier
, vhcr
->op
,
1059 MLX4_CMD_TIME_CLASS_A
, MLX4_CMD_NATIVE
);
1061 if (cmd
->out_is_imm
)
1062 vhcr
->out_param
= out_param
;
1067 static struct mlx4_cmd_info cmd_info
[] = {
1069 .opcode
= MLX4_CMD_QUERY_FW
,
1072 .out_is_imm
= false,
1073 .encode_slave_id
= false,
1075 .wrapper
= mlx4_QUERY_FW_wrapper
1078 .opcode
= MLX4_CMD_QUERY_HCA
,
1081 .out_is_imm
= false,
1082 .encode_slave_id
= false,
1087 .opcode
= MLX4_CMD_QUERY_DEV_CAP
,
1090 .out_is_imm
= false,
1091 .encode_slave_id
= false,
1093 .wrapper
= mlx4_QUERY_DEV_CAP_wrapper
1096 .opcode
= MLX4_CMD_QUERY_FUNC_CAP
,
1099 .out_is_imm
= false,
1100 .encode_slave_id
= false,
1102 .wrapper
= mlx4_QUERY_FUNC_CAP_wrapper
1105 .opcode
= MLX4_CMD_QUERY_ADAPTER
,
1108 .out_is_imm
= false,
1109 .encode_slave_id
= false,
1114 .opcode
= MLX4_CMD_INIT_PORT
,
1116 .has_outbox
= false,
1117 .out_is_imm
= false,
1118 .encode_slave_id
= false,
1120 .wrapper
= mlx4_INIT_PORT_wrapper
1123 .opcode
= MLX4_CMD_CLOSE_PORT
,
1125 .has_outbox
= false,
1126 .out_is_imm
= false,
1127 .encode_slave_id
= false,
1129 .wrapper
= mlx4_CLOSE_PORT_wrapper
1132 .opcode
= MLX4_CMD_QUERY_PORT
,
1135 .out_is_imm
= false,
1136 .encode_slave_id
= false,
1138 .wrapper
= mlx4_QUERY_PORT_wrapper
1141 .opcode
= MLX4_CMD_SET_PORT
,
1143 .has_outbox
= false,
1144 .out_is_imm
= false,
1145 .encode_slave_id
= false,
1147 .wrapper
= mlx4_SET_PORT_wrapper
1150 .opcode
= MLX4_CMD_MAP_EQ
,
1152 .has_outbox
= false,
1153 .out_is_imm
= false,
1154 .encode_slave_id
= false,
1156 .wrapper
= mlx4_MAP_EQ_wrapper
1159 .opcode
= MLX4_CMD_SW2HW_EQ
,
1161 .has_outbox
= false,
1162 .out_is_imm
= false,
1163 .encode_slave_id
= true,
1165 .wrapper
= mlx4_SW2HW_EQ_wrapper
1168 .opcode
= MLX4_CMD_HW_HEALTH_CHECK
,
1170 .has_outbox
= false,
1171 .out_is_imm
= false,
1172 .encode_slave_id
= false,
1177 .opcode
= MLX4_CMD_NOP
,
1179 .has_outbox
= false,
1180 .out_is_imm
= false,
1181 .encode_slave_id
= false,
1186 .opcode
= MLX4_CMD_CONFIG_DEV
,
1189 .out_is_imm
= false,
1190 .encode_slave_id
= false,
1192 .wrapper
= mlx4_CONFIG_DEV_wrapper
1195 .opcode
= MLX4_CMD_ALLOC_RES
,
1197 .has_outbox
= false,
1199 .encode_slave_id
= false,
1201 .wrapper
= mlx4_ALLOC_RES_wrapper
1204 .opcode
= MLX4_CMD_FREE_RES
,
1206 .has_outbox
= false,
1207 .out_is_imm
= false,
1208 .encode_slave_id
= false,
1210 .wrapper
= mlx4_FREE_RES_wrapper
1213 .opcode
= MLX4_CMD_SW2HW_MPT
,
1215 .has_outbox
= false,
1216 .out_is_imm
= false,
1217 .encode_slave_id
= true,
1219 .wrapper
= mlx4_SW2HW_MPT_wrapper
1222 .opcode
= MLX4_CMD_QUERY_MPT
,
1225 .out_is_imm
= false,
1226 .encode_slave_id
= false,
1228 .wrapper
= mlx4_QUERY_MPT_wrapper
1231 .opcode
= MLX4_CMD_HW2SW_MPT
,
1233 .has_outbox
= false,
1234 .out_is_imm
= false,
1235 .encode_slave_id
= false,
1237 .wrapper
= mlx4_HW2SW_MPT_wrapper
1240 .opcode
= MLX4_CMD_READ_MTT
,
1243 .out_is_imm
= false,
1244 .encode_slave_id
= false,
1249 .opcode
= MLX4_CMD_WRITE_MTT
,
1251 .has_outbox
= false,
1252 .out_is_imm
= false,
1253 .encode_slave_id
= false,
1255 .wrapper
= mlx4_WRITE_MTT_wrapper
1258 .opcode
= MLX4_CMD_SYNC_TPT
,
1260 .has_outbox
= false,
1261 .out_is_imm
= false,
1262 .encode_slave_id
= false,
1267 .opcode
= MLX4_CMD_HW2SW_EQ
,
1269 .has_outbox
= false,
1270 .out_is_imm
= false,
1271 .encode_slave_id
= true,
1273 .wrapper
= mlx4_HW2SW_EQ_wrapper
1276 .opcode
= MLX4_CMD_QUERY_EQ
,
1279 .out_is_imm
= false,
1280 .encode_slave_id
= true,
1282 .wrapper
= mlx4_QUERY_EQ_wrapper
1285 .opcode
= MLX4_CMD_SW2HW_CQ
,
1287 .has_outbox
= false,
1288 .out_is_imm
= false,
1289 .encode_slave_id
= true,
1291 .wrapper
= mlx4_SW2HW_CQ_wrapper
1294 .opcode
= MLX4_CMD_HW2SW_CQ
,
1296 .has_outbox
= false,
1297 .out_is_imm
= false,
1298 .encode_slave_id
= false,
1300 .wrapper
= mlx4_HW2SW_CQ_wrapper
1303 .opcode
= MLX4_CMD_QUERY_CQ
,
1306 .out_is_imm
= false,
1307 .encode_slave_id
= false,
1309 .wrapper
= mlx4_QUERY_CQ_wrapper
1312 .opcode
= MLX4_CMD_MODIFY_CQ
,
1314 .has_outbox
= false,
1316 .encode_slave_id
= false,
1318 .wrapper
= mlx4_MODIFY_CQ_wrapper
1321 .opcode
= MLX4_CMD_SW2HW_SRQ
,
1323 .has_outbox
= false,
1324 .out_is_imm
= false,
1325 .encode_slave_id
= true,
1327 .wrapper
= mlx4_SW2HW_SRQ_wrapper
1330 .opcode
= MLX4_CMD_HW2SW_SRQ
,
1332 .has_outbox
= false,
1333 .out_is_imm
= false,
1334 .encode_slave_id
= false,
1336 .wrapper
= mlx4_HW2SW_SRQ_wrapper
1339 .opcode
= MLX4_CMD_QUERY_SRQ
,
1342 .out_is_imm
= false,
1343 .encode_slave_id
= false,
1345 .wrapper
= mlx4_QUERY_SRQ_wrapper
1348 .opcode
= MLX4_CMD_ARM_SRQ
,
1350 .has_outbox
= false,
1351 .out_is_imm
= false,
1352 .encode_slave_id
= false,
1354 .wrapper
= mlx4_ARM_SRQ_wrapper
1357 .opcode
= MLX4_CMD_RST2INIT_QP
,
1359 .has_outbox
= false,
1360 .out_is_imm
= false,
1361 .encode_slave_id
= true,
1363 .wrapper
= mlx4_RST2INIT_QP_wrapper
1366 .opcode
= MLX4_CMD_INIT2INIT_QP
,
1368 .has_outbox
= false,
1369 .out_is_imm
= false,
1370 .encode_slave_id
= false,
1372 .wrapper
= mlx4_INIT2INIT_QP_wrapper
1375 .opcode
= MLX4_CMD_INIT2RTR_QP
,
1377 .has_outbox
= false,
1378 .out_is_imm
= false,
1379 .encode_slave_id
= false,
1381 .wrapper
= mlx4_INIT2RTR_QP_wrapper
1384 .opcode
= MLX4_CMD_RTR2RTS_QP
,
1386 .has_outbox
= false,
1387 .out_is_imm
= false,
1388 .encode_slave_id
= false,
1390 .wrapper
= mlx4_RTR2RTS_QP_wrapper
1393 .opcode
= MLX4_CMD_RTS2RTS_QP
,
1395 .has_outbox
= false,
1396 .out_is_imm
= false,
1397 .encode_slave_id
= false,
1399 .wrapper
= mlx4_RTS2RTS_QP_wrapper
1402 .opcode
= MLX4_CMD_SQERR2RTS_QP
,
1404 .has_outbox
= false,
1405 .out_is_imm
= false,
1406 .encode_slave_id
= false,
1408 .wrapper
= mlx4_SQERR2RTS_QP_wrapper
1411 .opcode
= MLX4_CMD_2ERR_QP
,
1413 .has_outbox
= false,
1414 .out_is_imm
= false,
1415 .encode_slave_id
= false,
1417 .wrapper
= mlx4_GEN_QP_wrapper
1420 .opcode
= MLX4_CMD_RTS2SQD_QP
,
1422 .has_outbox
= false,
1423 .out_is_imm
= false,
1424 .encode_slave_id
= false,
1426 .wrapper
= mlx4_GEN_QP_wrapper
1429 .opcode
= MLX4_CMD_SQD2SQD_QP
,
1431 .has_outbox
= false,
1432 .out_is_imm
= false,
1433 .encode_slave_id
= false,
1435 .wrapper
= mlx4_SQD2SQD_QP_wrapper
1438 .opcode
= MLX4_CMD_SQD2RTS_QP
,
1440 .has_outbox
= false,
1441 .out_is_imm
= false,
1442 .encode_slave_id
= false,
1444 .wrapper
= mlx4_SQD2RTS_QP_wrapper
1447 .opcode
= MLX4_CMD_2RST_QP
,
1449 .has_outbox
= false,
1450 .out_is_imm
= false,
1451 .encode_slave_id
= false,
1453 .wrapper
= mlx4_2RST_QP_wrapper
1456 .opcode
= MLX4_CMD_QUERY_QP
,
1459 .out_is_imm
= false,
1460 .encode_slave_id
= false,
1462 .wrapper
= mlx4_GEN_QP_wrapper
1465 .opcode
= MLX4_CMD_SUSPEND_QP
,
1467 .has_outbox
= false,
1468 .out_is_imm
= false,
1469 .encode_slave_id
= false,
1471 .wrapper
= mlx4_GEN_QP_wrapper
1474 .opcode
= MLX4_CMD_UNSUSPEND_QP
,
1476 .has_outbox
= false,
1477 .out_is_imm
= false,
1478 .encode_slave_id
= false,
1480 .wrapper
= mlx4_GEN_QP_wrapper
1483 .opcode
= MLX4_CMD_UPDATE_QP
,
1485 .has_outbox
= false,
1486 .out_is_imm
= false,
1487 .encode_slave_id
= false,
1489 .wrapper
= mlx4_UPDATE_QP_wrapper
1492 .opcode
= MLX4_CMD_GET_OP_REQ
,
1494 .has_outbox
= false,
1495 .out_is_imm
= false,
1496 .encode_slave_id
= false,
1498 .wrapper
= mlx4_CMD_EPERM_wrapper
,
1501 .opcode
= MLX4_CMD_ALLOCATE_VPP
,
1504 .out_is_imm
= false,
1505 .encode_slave_id
= false,
1507 .wrapper
= mlx4_CMD_EPERM_wrapper
,
1510 .opcode
= MLX4_CMD_SET_VPORT_QOS
,
1513 .out_is_imm
= false,
1514 .encode_slave_id
= false,
1516 .wrapper
= mlx4_CMD_EPERM_wrapper
,
1519 .opcode
= MLX4_CMD_CONF_SPECIAL_QP
,
1521 .has_outbox
= false,
1522 .out_is_imm
= false,
1523 .encode_slave_id
= false,
1524 .verify
= NULL
, /* XXX verify: only demux can do this */
1528 .opcode
= MLX4_CMD_MAD_IFC
,
1531 .out_is_imm
= false,
1532 .encode_slave_id
= false,
1534 .wrapper
= mlx4_MAD_IFC_wrapper
1537 .opcode
= MLX4_CMD_MAD_DEMUX
,
1539 .has_outbox
= false,
1540 .out_is_imm
= false,
1541 .encode_slave_id
= false,
1543 .wrapper
= mlx4_CMD_EPERM_wrapper
1546 .opcode
= MLX4_CMD_QUERY_IF_STAT
,
1549 .out_is_imm
= false,
1550 .encode_slave_id
= false,
1552 .wrapper
= mlx4_QUERY_IF_STAT_wrapper
1555 .opcode
= MLX4_CMD_ACCESS_REG
,
1558 .out_is_imm
= false,
1559 .encode_slave_id
= false,
1561 .wrapper
= mlx4_ACCESS_REG_wrapper
,
1564 .opcode
= MLX4_CMD_CONGESTION_CTRL_OPCODE
,
1566 .has_outbox
= false,
1567 .out_is_imm
= false,
1568 .encode_slave_id
= false,
1570 .wrapper
= mlx4_CMD_EPERM_wrapper
,
1572 /* Native multicast commands are not available for guests */
1574 .opcode
= MLX4_CMD_QP_ATTACH
,
1576 .has_outbox
= false,
1577 .out_is_imm
= false,
1578 .encode_slave_id
= false,
1580 .wrapper
= mlx4_QP_ATTACH_wrapper
1583 .opcode
= MLX4_CMD_PROMISC
,
1585 .has_outbox
= false,
1586 .out_is_imm
= false,
1587 .encode_slave_id
= false,
1589 .wrapper
= mlx4_PROMISC_wrapper
1591 /* Ethernet specific commands */
1593 .opcode
= MLX4_CMD_SET_VLAN_FLTR
,
1595 .has_outbox
= false,
1596 .out_is_imm
= false,
1597 .encode_slave_id
= false,
1599 .wrapper
= mlx4_SET_VLAN_FLTR_wrapper
1602 .opcode
= MLX4_CMD_SET_MCAST_FLTR
,
1604 .has_outbox
= false,
1605 .out_is_imm
= false,
1606 .encode_slave_id
= false,
1608 .wrapper
= mlx4_SET_MCAST_FLTR_wrapper
1611 .opcode
= MLX4_CMD_DUMP_ETH_STATS
,
1614 .out_is_imm
= false,
1615 .encode_slave_id
= false,
1617 .wrapper
= mlx4_DUMP_ETH_STATS_wrapper
1620 .opcode
= MLX4_CMD_INFORM_FLR_DONE
,
1622 .has_outbox
= false,
1623 .out_is_imm
= false,
1624 .encode_slave_id
= false,
1628 /* flow steering commands */
1630 .opcode
= MLX4_QP_FLOW_STEERING_ATTACH
,
1632 .has_outbox
= false,
1634 .encode_slave_id
= false,
1636 .wrapper
= mlx4_QP_FLOW_STEERING_ATTACH_wrapper
1639 .opcode
= MLX4_QP_FLOW_STEERING_DETACH
,
1641 .has_outbox
= false,
1642 .out_is_imm
= false,
1643 .encode_slave_id
= false,
1645 .wrapper
= mlx4_QP_FLOW_STEERING_DETACH_wrapper
1648 .opcode
= MLX4_FLOW_STEERING_IB_UC_QP_RANGE
,
1650 .has_outbox
= false,
1651 .out_is_imm
= false,
1652 .encode_slave_id
= false,
1654 .wrapper
= mlx4_CMD_EPERM_wrapper
1657 .opcode
= MLX4_CMD_VIRT_PORT_MAP
,
1659 .has_outbox
= false,
1660 .out_is_imm
= false,
1661 .encode_slave_id
= false,
1663 .wrapper
= mlx4_CMD_EPERM_wrapper
1667 static int mlx4_master_process_vhcr(struct mlx4_dev
*dev
, int slave
,
1668 struct mlx4_vhcr_cmd
*in_vhcr
)
1670 struct mlx4_priv
*priv
= mlx4_priv(dev
);
1671 struct mlx4_cmd_info
*cmd
= NULL
;
1672 struct mlx4_vhcr_cmd
*vhcr_cmd
= in_vhcr
? in_vhcr
: priv
->mfunc
.vhcr
;
1673 struct mlx4_vhcr
*vhcr
;
1674 struct mlx4_cmd_mailbox
*inbox
= NULL
;
1675 struct mlx4_cmd_mailbox
*outbox
= NULL
;
1682 /* Create sw representation of Virtual HCR */
1683 vhcr
= kzalloc(sizeof(struct mlx4_vhcr
), GFP_KERNEL
);
1687 /* DMA in the vHCR */
1689 ret
= mlx4_ACCESS_MEM(dev
, priv
->mfunc
.vhcr_dma
, slave
,
1690 priv
->mfunc
.master
.slave_state
[slave
].vhcr_dma
,
1691 ALIGN(sizeof(struct mlx4_vhcr_cmd
),
1692 MLX4_ACCESS_MEM_ALIGN
), 1);
1694 if (!(dev
->persist
->state
&
1695 MLX4_DEVICE_STATE_INTERNAL_ERROR
))
1696 mlx4_err(dev
, "%s: Failed reading vhcr ret: 0x%x\n",
1703 /* Fill SW VHCR fields */
1704 vhcr
->in_param
= be64_to_cpu(vhcr_cmd
->in_param
);
1705 vhcr
->out_param
= be64_to_cpu(vhcr_cmd
->out_param
);
1706 vhcr
->in_modifier
= be32_to_cpu(vhcr_cmd
->in_modifier
);
1707 vhcr
->token
= be16_to_cpu(vhcr_cmd
->token
);
1708 vhcr
->op
= be16_to_cpu(vhcr_cmd
->opcode
) & 0xfff;
1709 vhcr
->op_modifier
= (u8
) (be16_to_cpu(vhcr_cmd
->opcode
) >> 12);
1710 vhcr
->e_bit
= vhcr_cmd
->flags
& (1 << 6);
1712 /* Lookup command */
1713 for (i
= 0; i
< ARRAY_SIZE(cmd_info
); ++i
) {
1714 if (vhcr
->op
== cmd_info
[i
].opcode
) {
1720 mlx4_err(dev
, "Unknown command:0x%x accepted from slave:%d\n",
1722 vhcr_cmd
->status
= CMD_STAT_BAD_PARAM
;
1727 if (cmd
->has_inbox
) {
1728 vhcr
->in_param
&= INBOX_MASK
;
1729 inbox
= mlx4_alloc_cmd_mailbox(dev
);
1730 if (IS_ERR(inbox
)) {
1731 vhcr_cmd
->status
= CMD_STAT_BAD_SIZE
;
1736 ret
= mlx4_ACCESS_MEM(dev
, inbox
->dma
, slave
,
1738 MLX4_MAILBOX_SIZE
, 1);
1740 if (!(dev
->persist
->state
&
1741 MLX4_DEVICE_STATE_INTERNAL_ERROR
))
1742 mlx4_err(dev
, "%s: Failed reading inbox (cmd:0x%x)\n",
1743 __func__
, cmd
->opcode
);
1744 vhcr_cmd
->status
= CMD_STAT_INTERNAL_ERR
;
1749 /* Apply permission and bound checks if applicable */
1750 if (cmd
->verify
&& cmd
->verify(dev
, slave
, vhcr
, inbox
)) {
1751 mlx4_warn(dev
, "Command:0x%x from slave: %d failed protection checks for resource_id:%d\n",
1752 vhcr
->op
, slave
, vhcr
->in_modifier
);
1753 vhcr_cmd
->status
= CMD_STAT_BAD_OP
;
1757 /* Allocate outbox */
1758 if (cmd
->has_outbox
) {
1759 outbox
= mlx4_alloc_cmd_mailbox(dev
);
1760 if (IS_ERR(outbox
)) {
1761 vhcr_cmd
->status
= CMD_STAT_BAD_SIZE
;
1767 /* Execute the command! */
1769 err
= cmd
->wrapper(dev
, slave
, vhcr
, inbox
, outbox
,
1771 if (cmd
->out_is_imm
)
1772 vhcr_cmd
->out_param
= cpu_to_be64(vhcr
->out_param
);
1774 in_param
= cmd
->has_inbox
? (u64
) inbox
->dma
:
1776 out_param
= cmd
->has_outbox
? (u64
) outbox
->dma
:
1778 err
= __mlx4_cmd(dev
, in_param
, &out_param
,
1779 cmd
->out_is_imm
, vhcr
->in_modifier
,
1780 vhcr
->op_modifier
, vhcr
->op
,
1781 MLX4_CMD_TIME_CLASS_A
,
1784 if (cmd
->out_is_imm
) {
1785 vhcr
->out_param
= out_param
;
1786 vhcr_cmd
->out_param
= cpu_to_be64(vhcr
->out_param
);
1791 if (!(dev
->persist
->state
& MLX4_DEVICE_STATE_INTERNAL_ERROR
))
1792 mlx4_warn(dev
, "vhcr command:0x%x slave:%d failed with error:%d, status %d\n",
1793 vhcr
->op
, slave
, vhcr
->errno
, err
);
1794 vhcr_cmd
->status
= mlx4_errno_to_status(err
);
1799 /* Write outbox if command completed successfully */
1800 if (cmd
->has_outbox
&& !vhcr_cmd
->status
) {
1801 ret
= mlx4_ACCESS_MEM(dev
, outbox
->dma
, slave
,
1803 MLX4_MAILBOX_SIZE
, MLX4_CMD_WRAPPED
);
1805 /* If we failed to write back the outbox after the
1806 *command was successfully executed, we must fail this
1807 * slave, as it is now in undefined state */
1808 if (!(dev
->persist
->state
&
1809 MLX4_DEVICE_STATE_INTERNAL_ERROR
))
1810 mlx4_err(dev
, "%s:Failed writing outbox\n", __func__
);
1816 /* DMA back vhcr result */
1818 ret
= mlx4_ACCESS_MEM(dev
, priv
->mfunc
.vhcr_dma
, slave
,
1819 priv
->mfunc
.master
.slave_state
[slave
].vhcr_dma
,
1820 ALIGN(sizeof(struct mlx4_vhcr
),
1821 MLX4_ACCESS_MEM_ALIGN
),
1824 mlx4_err(dev
, "%s:Failed writing vhcr result\n",
1826 else if (vhcr
->e_bit
&&
1827 mlx4_GEN_EQE(dev
, slave
, &priv
->mfunc
.master
.cmd_eqe
))
1828 mlx4_warn(dev
, "Failed to generate command completion eqe for slave %d\n",
1834 mlx4_free_cmd_mailbox(dev
, inbox
);
1835 mlx4_free_cmd_mailbox(dev
, outbox
);
1839 static int mlx4_master_immediate_activate_vlan_qos(struct mlx4_priv
*priv
,
1840 int slave
, int port
)
1842 struct mlx4_vport_oper_state
*vp_oper
;
1843 struct mlx4_vport_state
*vp_admin
;
1844 struct mlx4_vf_immed_vlan_work
*work
;
1845 struct mlx4_dev
*dev
= &(priv
->dev
);
1847 int admin_vlan_ix
= NO_INDX
;
1849 vp_oper
= &priv
->mfunc
.master
.vf_oper
[slave
].vport
[port
];
1850 vp_admin
= &priv
->mfunc
.master
.vf_admin
[slave
].vport
[port
];
1852 if (vp_oper
->state
.default_vlan
== vp_admin
->default_vlan
&&
1853 vp_oper
->state
.default_qos
== vp_admin
->default_qos
&&
1854 vp_oper
->state
.vlan_proto
== vp_admin
->vlan_proto
&&
1855 vp_oper
->state
.link_state
== vp_admin
->link_state
&&
1856 vp_oper
->state
.qos_vport
== vp_admin
->qos_vport
)
1859 if (!(priv
->mfunc
.master
.slave_state
[slave
].active
&&
1860 dev
->caps
.flags2
& MLX4_DEV_CAP_FLAG2_UPDATE_QP
)) {
1861 /* even if the UPDATE_QP command isn't supported, we still want
1862 * to set this VF link according to the admin directive
1864 vp_oper
->state
.link_state
= vp_admin
->link_state
;
1868 mlx4_dbg(dev
, "updating immediately admin params slave %d port %d\n",
1870 mlx4_dbg(dev
, "vlan %d QoS %d link down %d\n",
1871 vp_admin
->default_vlan
, vp_admin
->default_qos
,
1872 vp_admin
->link_state
);
1874 work
= kzalloc(sizeof(*work
), GFP_KERNEL
);
1878 if (vp_oper
->state
.default_vlan
!= vp_admin
->default_vlan
) {
1879 if (MLX4_VGT
!= vp_admin
->default_vlan
) {
1880 err
= __mlx4_register_vlan(&priv
->dev
, port
,
1881 vp_admin
->default_vlan
,
1885 mlx4_warn(&priv
->dev
,
1886 "No vlan resources slave %d, port %d\n",
1891 admin_vlan_ix
= NO_INDX
;
1893 work
->flags
|= MLX4_VF_IMMED_VLAN_FLAG_VLAN
;
1894 mlx4_dbg(&priv
->dev
,
1895 "alloc vlan %d idx %d slave %d port %d\n",
1896 (int)(vp_admin
->default_vlan
),
1897 admin_vlan_ix
, slave
, port
);
1900 /* save original vlan ix and vlan id */
1901 work
->orig_vlan_id
= vp_oper
->state
.default_vlan
;
1902 work
->orig_vlan_ix
= vp_oper
->vlan_idx
;
1904 /* handle new qos */
1905 if (vp_oper
->state
.default_qos
!= vp_admin
->default_qos
)
1906 work
->flags
|= MLX4_VF_IMMED_VLAN_FLAG_QOS
;
1908 if (work
->flags
& MLX4_VF_IMMED_VLAN_FLAG_VLAN
)
1909 vp_oper
->vlan_idx
= admin_vlan_ix
;
1911 vp_oper
->state
.default_vlan
= vp_admin
->default_vlan
;
1912 vp_oper
->state
.default_qos
= vp_admin
->default_qos
;
1913 vp_oper
->state
.vlan_proto
= vp_admin
->vlan_proto
;
1914 vp_oper
->state
.link_state
= vp_admin
->link_state
;
1915 vp_oper
->state
.qos_vport
= vp_admin
->qos_vport
;
1917 if (vp_admin
->link_state
== IFLA_VF_LINK_STATE_DISABLE
)
1918 work
->flags
|= MLX4_VF_IMMED_VLAN_FLAG_LINK_DISABLE
;
1920 /* iterate over QPs owned by this slave, using UPDATE_QP */
1922 work
->slave
= slave
;
1923 work
->qos
= vp_oper
->state
.default_qos
;
1924 work
->qos_vport
= vp_oper
->state
.qos_vport
;
1925 work
->vlan_id
= vp_oper
->state
.default_vlan
;
1926 work
->vlan_ix
= vp_oper
->vlan_idx
;
1927 work
->vlan_proto
= vp_oper
->state
.vlan_proto
;
1929 INIT_WORK(&work
->work
, mlx4_vf_immed_vlan_work_handler
);
1930 queue_work(priv
->mfunc
.master
.comm_wq
, &work
->work
);
1935 static void mlx4_set_default_port_qos(struct mlx4_dev
*dev
, int port
)
1937 struct mlx4_qos_manager
*port_qos_ctl
;
1938 struct mlx4_priv
*priv
= mlx4_priv(dev
);
1940 port_qos_ctl
= &priv
->mfunc
.master
.qos_ctl
[port
];
1941 bitmap_zero(port_qos_ctl
->priority_bm
, MLX4_NUM_UP
);
1943 /* Enable only default prio at PF init routine */
1944 set_bit(MLX4_DEFAULT_QOS_PRIO
, port_qos_ctl
->priority_bm
);
1947 static void mlx4_allocate_port_vpps(struct mlx4_dev
*dev
, int port
)
1953 u8 vpp_param
[MLX4_NUM_UP
];
1954 struct mlx4_qos_manager
*port_qos
;
1955 struct mlx4_priv
*priv
= mlx4_priv(dev
);
1957 err
= mlx4_ALLOCATE_VPP_get(dev
, port
, &availible_vpp
, vpp_param
);
1959 mlx4_info(dev
, "Failed query availible VPPs\n");
1963 port_qos
= &priv
->mfunc
.master
.qos_ctl
[port
];
1964 num_vfs
= (availible_vpp
/
1965 bitmap_weight(port_qos
->priority_bm
, MLX4_NUM_UP
));
1967 for (i
= 0; i
< MLX4_NUM_UP
; i
++) {
1968 if (test_bit(i
, port_qos
->priority_bm
))
1969 vpp_param
[i
] = num_vfs
;
1972 err
= mlx4_ALLOCATE_VPP_set(dev
, port
, vpp_param
);
1974 mlx4_info(dev
, "Failed allocating VPPs\n");
1978 /* Query actual allocated VPP, just to make sure */
1979 err
= mlx4_ALLOCATE_VPP_get(dev
, port
, &availible_vpp
, vpp_param
);
1981 mlx4_info(dev
, "Failed query availible VPPs\n");
1985 port_qos
->num_of_qos_vfs
= num_vfs
;
1986 mlx4_dbg(dev
, "Port %d Availible VPPs %d\n", port
, availible_vpp
);
1988 for (i
= 0; i
< MLX4_NUM_UP
; i
++)
1989 mlx4_dbg(dev
, "Port %d UP %d Allocated %d VPPs\n", port
, i
,
1993 static int mlx4_master_activate_admin_state(struct mlx4_priv
*priv
, int slave
)
1996 struct mlx4_vport_state
*vp_admin
;
1997 struct mlx4_vport_oper_state
*vp_oper
;
1998 struct mlx4_slave_state
*slave_state
=
1999 &priv
->mfunc
.master
.slave_state
[slave
];
2000 struct mlx4_active_ports actv_ports
= mlx4_get_active_ports(
2002 int min_port
= find_first_bit(actv_ports
.ports
,
2003 priv
->dev
.caps
.num_ports
) + 1;
2004 int max_port
= min_port
- 1 +
2005 bitmap_weight(actv_ports
.ports
, priv
->dev
.caps
.num_ports
);
2007 for (port
= min_port
; port
<= max_port
; port
++) {
2008 if (!test_bit(port
- 1, actv_ports
.ports
))
2010 priv
->mfunc
.master
.vf_oper
[slave
].smi_enabled
[port
] =
2011 priv
->mfunc
.master
.vf_admin
[slave
].enable_smi
[port
];
2012 vp_oper
= &priv
->mfunc
.master
.vf_oper
[slave
].vport
[port
];
2013 vp_admin
= &priv
->mfunc
.master
.vf_admin
[slave
].vport
[port
];
2014 if (vp_admin
->vlan_proto
!= htons(ETH_P_8021AD
) ||
2015 slave_state
->vst_qinq_supported
) {
2016 vp_oper
->state
.vlan_proto
= vp_admin
->vlan_proto
;
2017 vp_oper
->state
.default_vlan
= vp_admin
->default_vlan
;
2018 vp_oper
->state
.default_qos
= vp_admin
->default_qos
;
2020 vp_oper
->state
.link_state
= vp_admin
->link_state
;
2021 vp_oper
->state
.mac
= vp_admin
->mac
;
2022 vp_oper
->state
.spoofchk
= vp_admin
->spoofchk
;
2023 vp_oper
->state
.tx_rate
= vp_admin
->tx_rate
;
2024 vp_oper
->state
.qos_vport
= vp_admin
->qos_vport
;
2025 vp_oper
->state
.guid
= vp_admin
->guid
;
2027 if (MLX4_VGT
!= vp_admin
->default_vlan
) {
2028 err
= __mlx4_register_vlan(&priv
->dev
, port
,
2029 vp_admin
->default_vlan
, &(vp_oper
->vlan_idx
));
2031 vp_oper
->vlan_idx
= NO_INDX
;
2032 vp_oper
->state
.default_vlan
= MLX4_VGT
;
2033 vp_oper
->state
.vlan_proto
= htons(ETH_P_8021Q
);
2034 mlx4_warn(&priv
->dev
,
2035 "No vlan resources slave %d, port %d\n",
2039 mlx4_dbg(&priv
->dev
, "alloc vlan %d idx %d slave %d port %d\n",
2040 (int)(vp_oper
->state
.default_vlan
),
2041 vp_oper
->vlan_idx
, slave
, port
);
2043 if (vp_admin
->spoofchk
) {
2044 vp_oper
->mac_idx
= __mlx4_register_mac(&priv
->dev
,
2047 if (0 > vp_oper
->mac_idx
) {
2048 err
= vp_oper
->mac_idx
;
2049 vp_oper
->mac_idx
= NO_INDX
;
2050 mlx4_warn(&priv
->dev
,
2051 "No mac resources slave %d, port %d\n",
2055 mlx4_dbg(&priv
->dev
, "alloc mac %llx idx %d slave %d port %d\n",
2056 vp_oper
->state
.mac
, vp_oper
->mac_idx
, slave
, port
);
2062 static void mlx4_master_deactivate_admin_state(struct mlx4_priv
*priv
, int slave
)
2065 struct mlx4_vport_oper_state
*vp_oper
;
2066 struct mlx4_active_ports actv_ports
= mlx4_get_active_ports(
2068 int min_port
= find_first_bit(actv_ports
.ports
,
2069 priv
->dev
.caps
.num_ports
) + 1;
2070 int max_port
= min_port
- 1 +
2071 bitmap_weight(actv_ports
.ports
, priv
->dev
.caps
.num_ports
);
2074 for (port
= min_port
; port
<= max_port
; port
++) {
2075 if (!test_bit(port
- 1, actv_ports
.ports
))
2077 priv
->mfunc
.master
.vf_oper
[slave
].smi_enabled
[port
] =
2078 MLX4_VF_SMI_DISABLED
;
2079 vp_oper
= &priv
->mfunc
.master
.vf_oper
[slave
].vport
[port
];
2080 if (NO_INDX
!= vp_oper
->vlan_idx
) {
2081 __mlx4_unregister_vlan(&priv
->dev
,
2082 port
, vp_oper
->state
.default_vlan
);
2083 vp_oper
->vlan_idx
= NO_INDX
;
2085 if (NO_INDX
!= vp_oper
->mac_idx
) {
2086 __mlx4_unregister_mac(&priv
->dev
, port
, vp_oper
->state
.mac
);
2087 vp_oper
->mac_idx
= NO_INDX
;
2093 static void mlx4_master_do_cmd(struct mlx4_dev
*dev
, int slave
, u8 cmd
,
2094 u16 param
, u8 toggle
)
2096 struct mlx4_priv
*priv
= mlx4_priv(dev
);
2097 struct mlx4_slave_state
*slave_state
= priv
->mfunc
.master
.slave_state
;
2099 u8 is_going_down
= 0;
2101 unsigned long flags
;
2103 slave_state
[slave
].comm_toggle
^= 1;
2104 reply
= (u32
) slave_state
[slave
].comm_toggle
<< 31;
2105 if (toggle
!= slave_state
[slave
].comm_toggle
) {
2106 mlx4_warn(dev
, "Incorrect toggle %d from slave %d. *** MASTER STATE COMPROMISED ***\n",
2110 if (cmd
== MLX4_COMM_CMD_RESET
) {
2111 mlx4_warn(dev
, "Received reset from slave:%d\n", slave
);
2112 slave_state
[slave
].active
= false;
2113 slave_state
[slave
].old_vlan_api
= false;
2114 slave_state
[slave
].vst_qinq_supported
= false;
2115 mlx4_master_deactivate_admin_state(priv
, slave
);
2116 for (i
= 0; i
< MLX4_EVENT_TYPES_NUM
; ++i
) {
2117 slave_state
[slave
].event_eq
[i
].eqn
= -1;
2118 slave_state
[slave
].event_eq
[i
].token
= 0;
2120 /*check if we are in the middle of FLR process,
2121 if so return "retry" status to the slave*/
2122 if (MLX4_COMM_CMD_FLR
== slave_state
[slave
].last_cmd
)
2123 goto inform_slave_state
;
2125 mlx4_dispatch_event(dev
, MLX4_DEV_EVENT_SLAVE_SHUTDOWN
, slave
);
2127 /* write the version in the event field */
2128 reply
|= mlx4_comm_get_version();
2132 /*command from slave in the middle of FLR*/
2133 if (cmd
!= MLX4_COMM_CMD_RESET
&&
2134 MLX4_COMM_CMD_FLR
== slave_state
[slave
].last_cmd
) {
2135 mlx4_warn(dev
, "slave:%d is Trying to run cmd(0x%x) in the middle of FLR\n",
2141 case MLX4_COMM_CMD_VHCR0
:
2142 if (slave_state
[slave
].last_cmd
!= MLX4_COMM_CMD_RESET
)
2144 slave_state
[slave
].vhcr_dma
= ((u64
) param
) << 48;
2145 priv
->mfunc
.master
.slave_state
[slave
].cookie
= 0;
2147 case MLX4_COMM_CMD_VHCR1
:
2148 if (slave_state
[slave
].last_cmd
!= MLX4_COMM_CMD_VHCR0
)
2150 slave_state
[slave
].vhcr_dma
|= ((u64
) param
) << 32;
2152 case MLX4_COMM_CMD_VHCR2
:
2153 if (slave_state
[slave
].last_cmd
!= MLX4_COMM_CMD_VHCR1
)
2155 slave_state
[slave
].vhcr_dma
|= ((u64
) param
) << 16;
2157 case MLX4_COMM_CMD_VHCR_EN
:
2158 if (slave_state
[slave
].last_cmd
!= MLX4_COMM_CMD_VHCR2
)
2160 slave_state
[slave
].vhcr_dma
|= param
;
2161 if (mlx4_master_activate_admin_state(priv
, slave
))
2163 slave_state
[slave
].active
= true;
2164 mlx4_dispatch_event(dev
, MLX4_DEV_EVENT_SLAVE_INIT
, slave
);
2166 case MLX4_COMM_CMD_VHCR_POST
:
2167 if ((slave_state
[slave
].last_cmd
!= MLX4_COMM_CMD_VHCR_EN
) &&
2168 (slave_state
[slave
].last_cmd
!= MLX4_COMM_CMD_VHCR_POST
)) {
2169 mlx4_warn(dev
, "slave:%d is out of sync, cmd=0x%x, last command=0x%x, reset is needed\n",
2170 slave
, cmd
, slave_state
[slave
].last_cmd
);
2174 mutex_lock(&priv
->cmd
.slave_cmd_mutex
);
2175 if (mlx4_master_process_vhcr(dev
, slave
, NULL
)) {
2176 mlx4_err(dev
, "Failed processing vhcr for slave:%d, resetting slave\n",
2178 mutex_unlock(&priv
->cmd
.slave_cmd_mutex
);
2181 mutex_unlock(&priv
->cmd
.slave_cmd_mutex
);
2184 mlx4_warn(dev
, "Bad comm cmd:%d from slave:%d\n", cmd
, slave
);
2187 spin_lock_irqsave(&priv
->mfunc
.master
.slave_state_lock
, flags
);
2188 if (!slave_state
[slave
].is_slave_going_down
)
2189 slave_state
[slave
].last_cmd
= cmd
;
2192 spin_unlock_irqrestore(&priv
->mfunc
.master
.slave_state_lock
, flags
);
2193 if (is_going_down
) {
2194 mlx4_warn(dev
, "Slave is going down aborting command(%d) executing from slave:%d\n",
2198 __raw_writel((__force u32
) cpu_to_be32(reply
),
2199 &priv
->mfunc
.comm
[slave
].slave_read
);
2205 /* cleanup any slave resources */
2206 if (dev
->persist
->interface_state
& MLX4_INTERFACE_STATE_UP
)
2207 mlx4_delete_all_resources_for_slave(dev
, slave
);
2209 if (cmd
!= MLX4_COMM_CMD_RESET
) {
2210 mlx4_warn(dev
, "Turn on internal error to force reset, slave=%d, cmd=0x%x\n",
2212 /* Turn on internal error letting slave reset itself immeditaly,
2213 * otherwise it might take till timeout on command is passed
2215 reply
|= ((u32
)COMM_CHAN_EVENT_INTERNAL_ERR
);
2218 spin_lock_irqsave(&priv
->mfunc
.master
.slave_state_lock
, flags
);
2219 if (!slave_state
[slave
].is_slave_going_down
)
2220 slave_state
[slave
].last_cmd
= MLX4_COMM_CMD_RESET
;
2221 spin_unlock_irqrestore(&priv
->mfunc
.master
.slave_state_lock
, flags
);
2222 /*with slave in the middle of flr, no need to clean resources again.*/
2224 memset(&slave_state
[slave
].event_eq
, 0,
2225 sizeof(struct mlx4_slave_event_eq_info
));
2226 __raw_writel((__force u32
) cpu_to_be32(reply
),
2227 &priv
->mfunc
.comm
[slave
].slave_read
);
2231 /* master command processing */
2232 void mlx4_master_comm_channel(struct work_struct
*work
)
2234 struct mlx4_mfunc_master_ctx
*master
=
2236 struct mlx4_mfunc_master_ctx
,
2238 struct mlx4_mfunc
*mfunc
=
2239 container_of(master
, struct mlx4_mfunc
, master
);
2240 struct mlx4_priv
*priv
=
2241 container_of(mfunc
, struct mlx4_priv
, mfunc
);
2242 struct mlx4_dev
*dev
= &priv
->dev
;
2252 bit_vec
= master
->comm_arm_bit_vector
;
2253 for (i
= 0; i
< COMM_CHANNEL_BIT_ARRAY_SIZE
; i
++) {
2254 vec
= be32_to_cpu(bit_vec
[i
]);
2255 for (j
= 0; j
< 32; j
++) {
2256 if (!(vec
& (1 << j
)))
2259 slave
= (i
* 32) + j
;
2260 comm_cmd
= swab32(readl(
2261 &mfunc
->comm
[slave
].slave_write
));
2262 slt
= swab32(readl(&mfunc
->comm
[slave
].slave_read
))
2264 toggle
= comm_cmd
>> 31;
2265 if (toggle
!= slt
) {
2266 if (master
->slave_state
[slave
].comm_toggle
2268 pr_info("slave %d out of sync. read toggle %d, state toggle %d. Resynching.\n",
2270 master
->slave_state
[slave
].comm_toggle
);
2271 master
->slave_state
[slave
].comm_toggle
=
2274 mlx4_master_do_cmd(dev
, slave
,
2275 comm_cmd
>> 16 & 0xff,
2276 comm_cmd
& 0xffff, toggle
);
2282 if (reported
&& reported
!= served
)
2283 mlx4_warn(dev
, "Got command event with bitmask from %d slaves but %d were served\n",
2286 if (mlx4_ARM_COMM_CHANNEL(dev
))
2287 mlx4_warn(dev
, "Failed to arm comm channel events\n");
2290 static int sync_toggles(struct mlx4_dev
*dev
)
2292 struct mlx4_priv
*priv
= mlx4_priv(dev
);
2297 wr_toggle
= swab32(readl(&priv
->mfunc
.comm
->slave_write
));
2298 if (wr_toggle
== 0xffffffff)
2299 end
= jiffies
+ msecs_to_jiffies(30000);
2301 end
= jiffies
+ msecs_to_jiffies(5000);
2303 while (time_before(jiffies
, end
)) {
2304 rd_toggle
= swab32(readl(&priv
->mfunc
.comm
->slave_read
));
2305 if (wr_toggle
== 0xffffffff || rd_toggle
== 0xffffffff) {
2306 /* PCI might be offline */
2308 /* If device removal has been requested,
2309 * do not continue retrying.
2311 if (dev
->persist
->interface_state
&
2312 MLX4_INTERFACE_STATE_NOWAIT
) {
2314 "communication channel is offline\n");
2319 wr_toggle
= swab32(readl(&priv
->mfunc
.comm
->
2324 if (rd_toggle
>> 31 == wr_toggle
>> 31) {
2325 priv
->cmd
.comm_toggle
= rd_toggle
>> 31;
2333 * we could reach here if for example the previous VM using this
2334 * function misbehaved and left the channel with unsynced state. We
2335 * should fix this here and give this VM a chance to use a properly
2338 mlx4_warn(dev
, "recovering from previously mis-behaved VM\n");
2339 __raw_writel((__force u32
) 0, &priv
->mfunc
.comm
->slave_read
);
2340 __raw_writel((__force u32
) 0, &priv
->mfunc
.comm
->slave_write
);
2341 priv
->cmd
.comm_toggle
= 0;
2346 int mlx4_multi_func_init(struct mlx4_dev
*dev
)
2348 struct mlx4_priv
*priv
= mlx4_priv(dev
);
2349 struct mlx4_slave_state
*s_state
;
2350 int i
, j
, err
, port
;
2352 if (mlx4_is_master(dev
))
2354 ioremap(pci_resource_start(dev
->persist
->pdev
,
2355 priv
->fw
.comm_bar
) +
2356 priv
->fw
.comm_base
, MLX4_COMM_PAGESIZE
);
2359 ioremap(pci_resource_start(dev
->persist
->pdev
, 2) +
2360 MLX4_SLAVE_COMM_BASE
, MLX4_COMM_PAGESIZE
);
2361 if (!priv
->mfunc
.comm
) {
2362 mlx4_err(dev
, "Couldn't map communication vector\n");
2366 if (mlx4_is_master(dev
)) {
2367 struct mlx4_vf_oper_state
*vf_oper
;
2368 struct mlx4_vf_admin_state
*vf_admin
;
2370 priv
->mfunc
.master
.slave_state
=
2371 kzalloc(dev
->num_slaves
*
2372 sizeof(struct mlx4_slave_state
), GFP_KERNEL
);
2373 if (!priv
->mfunc
.master
.slave_state
)
2376 priv
->mfunc
.master
.vf_admin
=
2377 kzalloc(dev
->num_slaves
*
2378 sizeof(struct mlx4_vf_admin_state
), GFP_KERNEL
);
2379 if (!priv
->mfunc
.master
.vf_admin
)
2380 goto err_comm_admin
;
2382 priv
->mfunc
.master
.vf_oper
=
2383 kzalloc(dev
->num_slaves
*
2384 sizeof(struct mlx4_vf_oper_state
), GFP_KERNEL
);
2385 if (!priv
->mfunc
.master
.vf_oper
)
2388 for (i
= 0; i
< dev
->num_slaves
; ++i
) {
2389 vf_admin
= &priv
->mfunc
.master
.vf_admin
[i
];
2390 vf_oper
= &priv
->mfunc
.master
.vf_oper
[i
];
2391 s_state
= &priv
->mfunc
.master
.slave_state
[i
];
2392 s_state
->last_cmd
= MLX4_COMM_CMD_RESET
;
2393 s_state
->vst_qinq_supported
= false;
2394 mutex_init(&priv
->mfunc
.master
.gen_eqe_mutex
[i
]);
2395 for (j
= 0; j
< MLX4_EVENT_TYPES_NUM
; ++j
)
2396 s_state
->event_eq
[j
].eqn
= -1;
2397 __raw_writel((__force u32
) 0,
2398 &priv
->mfunc
.comm
[i
].slave_write
);
2399 __raw_writel((__force u32
) 0,
2400 &priv
->mfunc
.comm
[i
].slave_read
);
2402 for (port
= 1; port
<= MLX4_MAX_PORTS
; port
++) {
2403 struct mlx4_vport_state
*admin_vport
;
2404 struct mlx4_vport_state
*oper_vport
;
2406 s_state
->vlan_filter
[port
] =
2407 kzalloc(sizeof(struct mlx4_vlan_fltr
),
2409 if (!s_state
->vlan_filter
[port
]) {
2411 kfree(s_state
->vlan_filter
[port
]);
2415 admin_vport
= &vf_admin
->vport
[port
];
2416 oper_vport
= &vf_oper
->vport
[port
].state
;
2417 INIT_LIST_HEAD(&s_state
->mcast_filters
[port
]);
2418 admin_vport
->default_vlan
= MLX4_VGT
;
2419 oper_vport
->default_vlan
= MLX4_VGT
;
2420 admin_vport
->qos_vport
=
2421 MLX4_VPP_DEFAULT_VPORT
;
2422 oper_vport
->qos_vport
= MLX4_VPP_DEFAULT_VPORT
;
2423 admin_vport
->vlan_proto
= htons(ETH_P_8021Q
);
2424 oper_vport
->vlan_proto
= htons(ETH_P_8021Q
);
2425 vf_oper
->vport
[port
].vlan_idx
= NO_INDX
;
2426 vf_oper
->vport
[port
].mac_idx
= NO_INDX
;
2427 mlx4_set_random_admin_guid(dev
, i
, port
);
2429 spin_lock_init(&s_state
->lock
);
2432 if (dev
->caps
.flags2
& MLX4_DEV_CAP_FLAG2_QOS_VPP
) {
2433 for (port
= 1; port
<= dev
->caps
.num_ports
; port
++) {
2434 if (mlx4_is_eth(dev
, port
)) {
2435 mlx4_set_default_port_qos(dev
, port
);
2436 mlx4_allocate_port_vpps(dev
, port
);
2441 memset(&priv
->mfunc
.master
.cmd_eqe
, 0, sizeof(struct mlx4_eqe
));
2442 priv
->mfunc
.master
.cmd_eqe
.type
= MLX4_EVENT_TYPE_CMD
;
2443 INIT_WORK(&priv
->mfunc
.master
.comm_work
,
2444 mlx4_master_comm_channel
);
2445 INIT_WORK(&priv
->mfunc
.master
.slave_event_work
,
2446 mlx4_gen_slave_eqe
);
2447 INIT_WORK(&priv
->mfunc
.master
.slave_flr_event_work
,
2448 mlx4_master_handle_slave_flr
);
2449 spin_lock_init(&priv
->mfunc
.master
.slave_state_lock
);
2450 spin_lock_init(&priv
->mfunc
.master
.slave_eq
.event_lock
);
2451 priv
->mfunc
.master
.comm_wq
=
2452 create_singlethread_workqueue("mlx4_comm");
2453 if (!priv
->mfunc
.master
.comm_wq
)
2456 if (mlx4_init_resource_tracker(dev
))
2460 err
= sync_toggles(dev
);
2462 mlx4_err(dev
, "Couldn't sync toggles\n");
2469 flush_workqueue(priv
->mfunc
.master
.comm_wq
);
2470 destroy_workqueue(priv
->mfunc
.master
.comm_wq
);
2473 for (port
= 1; port
<= MLX4_MAX_PORTS
; port
++)
2474 kfree(priv
->mfunc
.master
.slave_state
[i
].vlan_filter
[port
]);
2476 kfree(priv
->mfunc
.master
.vf_oper
);
2478 kfree(priv
->mfunc
.master
.vf_admin
);
2480 kfree(priv
->mfunc
.master
.slave_state
);
2482 iounmap(priv
->mfunc
.comm
);
2483 priv
->mfunc
.comm
= NULL
;
2485 dma_free_coherent(&dev
->persist
->pdev
->dev
, PAGE_SIZE
,
2487 priv
->mfunc
.vhcr_dma
);
2488 priv
->mfunc
.vhcr
= NULL
;
2492 int mlx4_cmd_init(struct mlx4_dev
*dev
)
2494 struct mlx4_priv
*priv
= mlx4_priv(dev
);
2497 if (!priv
->cmd
.initialized
) {
2498 init_rwsem(&priv
->cmd
.switch_sem
);
2499 mutex_init(&priv
->cmd
.slave_cmd_mutex
);
2500 sema_init(&priv
->cmd
.poll_sem
, 1);
2501 priv
->cmd
.use_events
= 0;
2502 priv
->cmd
.toggle
= 1;
2503 priv
->cmd
.initialized
= 1;
2504 flags
|= MLX4_CMD_CLEANUP_STRUCT
;
2507 if (!mlx4_is_slave(dev
) && !priv
->cmd
.hcr
) {
2508 priv
->cmd
.hcr
= ioremap(pci_resource_start(dev
->persist
->pdev
,
2509 0) + MLX4_HCR_BASE
, MLX4_HCR_SIZE
);
2510 if (!priv
->cmd
.hcr
) {
2511 mlx4_err(dev
, "Couldn't map command register\n");
2514 flags
|= MLX4_CMD_CLEANUP_HCR
;
2517 if (mlx4_is_mfunc(dev
) && !priv
->mfunc
.vhcr
) {
2518 priv
->mfunc
.vhcr
= dma_alloc_coherent(&dev
->persist
->pdev
->dev
,
2520 &priv
->mfunc
.vhcr_dma
,
2522 if (!priv
->mfunc
.vhcr
)
2525 flags
|= MLX4_CMD_CLEANUP_VHCR
;
2528 if (!priv
->cmd
.pool
) {
2529 priv
->cmd
.pool
= pci_pool_create("mlx4_cmd",
2532 MLX4_MAILBOX_SIZE
, 0);
2533 if (!priv
->cmd
.pool
)
2536 flags
|= MLX4_CMD_CLEANUP_POOL
;
2542 mlx4_cmd_cleanup(dev
, flags
);
2546 void mlx4_report_internal_err_comm_event(struct mlx4_dev
*dev
)
2548 struct mlx4_priv
*priv
= mlx4_priv(dev
);
2552 /* If the comm channel has not yet been initialized,
2553 * skip reporting the internal error event to all
2554 * the communication channels.
2556 if (!priv
->mfunc
.comm
)
2559 /* Report an internal error event to all
2560 * communication channels.
2562 for (slave
= 0; slave
< dev
->num_slaves
; slave
++) {
2563 slave_read
= swab32(readl(&priv
->mfunc
.comm
[slave
].slave_read
));
2564 slave_read
|= (u32
)COMM_CHAN_EVENT_INTERNAL_ERR
;
2565 __raw_writel((__force u32
)cpu_to_be32(slave_read
),
2566 &priv
->mfunc
.comm
[slave
].slave_read
);
2567 /* Make sure that our comm channel write doesn't
2568 * get mixed in with writes from another CPU.
2574 void mlx4_multi_func_cleanup(struct mlx4_dev
*dev
)
2576 struct mlx4_priv
*priv
= mlx4_priv(dev
);
2579 if (mlx4_is_master(dev
)) {
2580 flush_workqueue(priv
->mfunc
.master
.comm_wq
);
2581 destroy_workqueue(priv
->mfunc
.master
.comm_wq
);
2582 for (i
= 0; i
< dev
->num_slaves
; i
++) {
2583 for (port
= 1; port
<= MLX4_MAX_PORTS
; port
++)
2584 kfree(priv
->mfunc
.master
.slave_state
[i
].vlan_filter
[port
]);
2586 kfree(priv
->mfunc
.master
.slave_state
);
2587 kfree(priv
->mfunc
.master
.vf_admin
);
2588 kfree(priv
->mfunc
.master
.vf_oper
);
2589 dev
->num_slaves
= 0;
2592 iounmap(priv
->mfunc
.comm
);
2593 priv
->mfunc
.comm
= NULL
;
2596 void mlx4_cmd_cleanup(struct mlx4_dev
*dev
, int cleanup_mask
)
2598 struct mlx4_priv
*priv
= mlx4_priv(dev
);
2600 if (priv
->cmd
.pool
&& (cleanup_mask
& MLX4_CMD_CLEANUP_POOL
)) {
2601 pci_pool_destroy(priv
->cmd
.pool
);
2602 priv
->cmd
.pool
= NULL
;
2605 if (!mlx4_is_slave(dev
) && priv
->cmd
.hcr
&&
2606 (cleanup_mask
& MLX4_CMD_CLEANUP_HCR
)) {
2607 iounmap(priv
->cmd
.hcr
);
2608 priv
->cmd
.hcr
= NULL
;
2610 if (mlx4_is_mfunc(dev
) && priv
->mfunc
.vhcr
&&
2611 (cleanup_mask
& MLX4_CMD_CLEANUP_VHCR
)) {
2612 dma_free_coherent(&dev
->persist
->pdev
->dev
, PAGE_SIZE
,
2613 priv
->mfunc
.vhcr
, priv
->mfunc
.vhcr_dma
);
2614 priv
->mfunc
.vhcr
= NULL
;
2616 if (priv
->cmd
.initialized
&& (cleanup_mask
& MLX4_CMD_CLEANUP_STRUCT
))
2617 priv
->cmd
.initialized
= 0;
2621 * Switch to using events to issue FW commands (can only be called
2622 * after event queue for command events has been initialized).
2624 int mlx4_cmd_use_events(struct mlx4_dev
*dev
)
2626 struct mlx4_priv
*priv
= mlx4_priv(dev
);
2630 priv
->cmd
.context
= kmalloc(priv
->cmd
.max_cmds
*
2631 sizeof (struct mlx4_cmd_context
),
2633 if (!priv
->cmd
.context
)
2636 down_write(&priv
->cmd
.switch_sem
);
2637 for (i
= 0; i
< priv
->cmd
.max_cmds
; ++i
) {
2638 priv
->cmd
.context
[i
].token
= i
;
2639 priv
->cmd
.context
[i
].next
= i
+ 1;
2640 /* To support fatal error flow, initialize all
2641 * cmd contexts to allow simulating completions
2642 * with complete() at any time.
2644 init_completion(&priv
->cmd
.context
[i
].done
);
2647 priv
->cmd
.context
[priv
->cmd
.max_cmds
- 1].next
= -1;
2648 priv
->cmd
.free_head
= 0;
2650 sema_init(&priv
->cmd
.event_sem
, priv
->cmd
.max_cmds
);
2652 for (priv
->cmd
.token_mask
= 1;
2653 priv
->cmd
.token_mask
< priv
->cmd
.max_cmds
;
2654 priv
->cmd
.token_mask
<<= 1)
2656 --priv
->cmd
.token_mask
;
2658 down(&priv
->cmd
.poll_sem
);
2659 priv
->cmd
.use_events
= 1;
2660 up_write(&priv
->cmd
.switch_sem
);
2666 * Switch back to polling (used when shutting down the device)
2668 void mlx4_cmd_use_polling(struct mlx4_dev
*dev
)
2670 struct mlx4_priv
*priv
= mlx4_priv(dev
);
2673 down_write(&priv
->cmd
.switch_sem
);
2674 priv
->cmd
.use_events
= 0;
2676 for (i
= 0; i
< priv
->cmd
.max_cmds
; ++i
)
2677 down(&priv
->cmd
.event_sem
);
2679 kfree(priv
->cmd
.context
);
2681 up(&priv
->cmd
.poll_sem
);
2682 up_write(&priv
->cmd
.switch_sem
);
2685 struct mlx4_cmd_mailbox
*mlx4_alloc_cmd_mailbox(struct mlx4_dev
*dev
)
2687 struct mlx4_cmd_mailbox
*mailbox
;
2689 mailbox
= kmalloc(sizeof *mailbox
, GFP_KERNEL
);
2691 return ERR_PTR(-ENOMEM
);
2693 mailbox
->buf
= pci_pool_zalloc(mlx4_priv(dev
)->cmd
.pool
, GFP_KERNEL
,
2695 if (!mailbox
->buf
) {
2697 return ERR_PTR(-ENOMEM
);
2702 EXPORT_SYMBOL_GPL(mlx4_alloc_cmd_mailbox
);
2704 void mlx4_free_cmd_mailbox(struct mlx4_dev
*dev
,
2705 struct mlx4_cmd_mailbox
*mailbox
)
2710 pci_pool_free(mlx4_priv(dev
)->cmd
.pool
, mailbox
->buf
, mailbox
->dma
);
2713 EXPORT_SYMBOL_GPL(mlx4_free_cmd_mailbox
);
2715 u32
mlx4_comm_get_version(void)
2717 return ((u32
) CMD_CHAN_IF_REV
<< 8) | (u32
) CMD_CHAN_VER
;
2720 static int mlx4_get_slave_indx(struct mlx4_dev
*dev
, int vf
)
2722 if ((vf
< 0) || (vf
>= dev
->persist
->num_vfs
)) {
2723 mlx4_err(dev
, "Bad vf number:%d (number of activated vf: %d)\n",
2724 vf
, dev
->persist
->num_vfs
);
2731 int mlx4_get_vf_indx(struct mlx4_dev
*dev
, int slave
)
2733 if (slave
< 1 || slave
> dev
->persist
->num_vfs
) {
2735 "Bad slave number:%d (number of activated slaves: %lu)\n",
2736 slave
, dev
->num_slaves
);
2742 void mlx4_cmd_wake_completions(struct mlx4_dev
*dev
)
2744 struct mlx4_priv
*priv
= mlx4_priv(dev
);
2745 struct mlx4_cmd_context
*context
;
2748 spin_lock(&priv
->cmd
.context_lock
);
2749 if (priv
->cmd
.context
) {
2750 for (i
= 0; i
< priv
->cmd
.max_cmds
; ++i
) {
2751 context
= &priv
->cmd
.context
[i
];
2752 context
->fw_status
= CMD_STAT_INTERNAL_ERR
;
2754 mlx4_status_to_errno(CMD_STAT_INTERNAL_ERR
);
2755 complete(&context
->done
);
2758 spin_unlock(&priv
->cmd
.context_lock
);
2761 struct mlx4_active_ports
mlx4_get_active_ports(struct mlx4_dev
*dev
, int slave
)
2763 struct mlx4_active_ports actv_ports
;
2766 bitmap_zero(actv_ports
.ports
, MLX4_MAX_PORTS
);
2769 bitmap_fill(actv_ports
.ports
, dev
->caps
.num_ports
);
2773 vf
= mlx4_get_vf_indx(dev
, slave
);
2777 bitmap_set(actv_ports
.ports
, dev
->dev_vfs
[vf
].min_port
- 1,
2778 min((int)dev
->dev_vfs
[mlx4_get_vf_indx(dev
, slave
)].n_ports
,
2779 dev
->caps
.num_ports
));
2783 EXPORT_SYMBOL_GPL(mlx4_get_active_ports
);
2785 int mlx4_slave_convert_port(struct mlx4_dev
*dev
, int slave
, int port
)
2788 struct mlx4_active_ports actv_ports
= mlx4_get_active_ports(dev
, slave
);
2789 unsigned m
= bitmap_weight(actv_ports
.ports
, dev
->caps
.num_ports
);
2791 if (port
<= 0 || port
> m
)
2794 n
= find_first_bit(actv_ports
.ports
, dev
->caps
.num_ports
);
2800 EXPORT_SYMBOL_GPL(mlx4_slave_convert_port
);
2802 int mlx4_phys_to_slave_port(struct mlx4_dev
*dev
, int slave
, int port
)
2804 struct mlx4_active_ports actv_ports
= mlx4_get_active_ports(dev
, slave
);
2805 if (test_bit(port
- 1, actv_ports
.ports
))
2807 find_first_bit(actv_ports
.ports
, dev
->caps
.num_ports
);
2811 EXPORT_SYMBOL_GPL(mlx4_phys_to_slave_port
);
2813 struct mlx4_slaves_pport
mlx4_phys_to_slaves_pport(struct mlx4_dev
*dev
,
2817 struct mlx4_slaves_pport slaves_pport
;
2819 bitmap_zero(slaves_pport
.slaves
, MLX4_MFUNC_MAX
);
2821 if (port
<= 0 || port
> dev
->caps
.num_ports
)
2822 return slaves_pport
;
2824 for (i
= 0; i
< dev
->persist
->num_vfs
+ 1; i
++) {
2825 struct mlx4_active_ports actv_ports
=
2826 mlx4_get_active_ports(dev
, i
);
2827 if (test_bit(port
- 1, actv_ports
.ports
))
2828 set_bit(i
, slaves_pport
.slaves
);
2831 return slaves_pport
;
2833 EXPORT_SYMBOL_GPL(mlx4_phys_to_slaves_pport
);
2835 struct mlx4_slaves_pport
mlx4_phys_to_slaves_pport_actv(
2836 struct mlx4_dev
*dev
,
2837 const struct mlx4_active_ports
*crit_ports
)
2840 struct mlx4_slaves_pport slaves_pport
;
2842 bitmap_zero(slaves_pport
.slaves
, MLX4_MFUNC_MAX
);
2844 for (i
= 0; i
< dev
->persist
->num_vfs
+ 1; i
++) {
2845 struct mlx4_active_ports actv_ports
=
2846 mlx4_get_active_ports(dev
, i
);
2847 if (bitmap_equal(crit_ports
->ports
, actv_ports
.ports
,
2848 dev
->caps
.num_ports
))
2849 set_bit(i
, slaves_pport
.slaves
);
2852 return slaves_pport
;
2854 EXPORT_SYMBOL_GPL(mlx4_phys_to_slaves_pport_actv
);
2856 static int mlx4_slaves_closest_port(struct mlx4_dev
*dev
, int slave
, int port
)
2858 struct mlx4_active_ports actv_ports
= mlx4_get_active_ports(dev
, slave
);
2859 int min_port
= find_first_bit(actv_ports
.ports
, dev
->caps
.num_ports
)
2861 int max_port
= min_port
+
2862 bitmap_weight(actv_ports
.ports
, dev
->caps
.num_ports
);
2864 if (port
< min_port
)
2866 else if (port
>= max_port
)
2867 port
= max_port
- 1;
2872 static int mlx4_set_vport_qos(struct mlx4_priv
*priv
, int slave
, int port
,
2877 struct mlx4_qos_manager
*port_qos
;
2878 struct mlx4_dev
*dev
= &priv
->dev
;
2879 struct mlx4_vport_qos_param vpp_qos
[MLX4_NUM_UP
];
2881 port_qos
= &priv
->mfunc
.master
.qos_ctl
[port
];
2882 memset(vpp_qos
, 0, sizeof(struct mlx4_vport_qos_param
) * MLX4_NUM_UP
);
2884 if (slave
> port_qos
->num_of_qos_vfs
) {
2885 mlx4_info(dev
, "No availible VPP resources for this VF\n");
2889 /* Query for default QoS values from Vport 0 is needed */
2890 err
= mlx4_SET_VPORT_QOS_get(dev
, port
, 0, vpp_qos
);
2892 mlx4_info(dev
, "Failed to query Vport 0 QoS values\n");
2896 for (i
= 0; i
< MLX4_NUM_UP
; i
++) {
2897 if (test_bit(i
, port_qos
->priority_bm
) && max_tx_rate
) {
2898 vpp_qos
[i
].max_avg_bw
= max_tx_rate
;
2899 vpp_qos
[i
].enable
= 1;
2901 /* if user supplied tx_rate == 0, meaning no rate limit
2902 * configuration is required. so we are leaving the
2903 * value of max_avg_bw as queried from Vport 0.
2905 vpp_qos
[i
].enable
= 0;
2909 err
= mlx4_SET_VPORT_QOS_set(dev
, port
, slave
, vpp_qos
);
2911 mlx4_info(dev
, "Failed to set Vport %d QoS values\n", slave
);
2918 static bool mlx4_is_vf_vst_and_prio_qos(struct mlx4_dev
*dev
, int port
,
2919 struct mlx4_vport_state
*vf_admin
)
2921 struct mlx4_qos_manager
*info
;
2922 struct mlx4_priv
*priv
= mlx4_priv(dev
);
2924 if (!mlx4_is_master(dev
) ||
2925 !(dev
->caps
.flags2
& MLX4_DEV_CAP_FLAG2_QOS_VPP
))
2928 info
= &priv
->mfunc
.master
.qos_ctl
[port
];
2930 if (vf_admin
->default_vlan
!= MLX4_VGT
&&
2931 test_bit(vf_admin
->default_qos
, info
->priority_bm
))
2937 static bool mlx4_valid_vf_state_change(struct mlx4_dev
*dev
, int port
,
2938 struct mlx4_vport_state
*vf_admin
,
2941 struct mlx4_vport_state dummy_admin
= {0};
2943 if (!mlx4_is_vf_vst_and_prio_qos(dev
, port
, vf_admin
) ||
2947 dummy_admin
.default_qos
= qos
;
2948 dummy_admin
.default_vlan
= vlan
;
2950 /* VF wants to move to other VST state which is valid with current
2951 * rate limit. Either differnt default vlan in VST or other
2952 * supported QoS priority. Otherwise we don't allow this change when
2953 * the TX rate is still configured.
2955 if (mlx4_is_vf_vst_and_prio_qos(dev
, port
, &dummy_admin
))
2958 mlx4_info(dev
, "Cannot change VF state to %s while rate is set\n",
2959 (vlan
== MLX4_VGT
) ? "VGT" : "VST");
2961 if (vlan
!= MLX4_VGT
)
2962 mlx4_info(dev
, "VST priority %d not supported for QoS\n", qos
);
2964 mlx4_info(dev
, "Please set rate to 0 prior to this VF state change\n");
2969 int mlx4_set_vf_mac(struct mlx4_dev
*dev
, int port
, int vf
, u64 mac
)
2971 struct mlx4_priv
*priv
= mlx4_priv(dev
);
2972 struct mlx4_vport_state
*s_info
;
2975 if (!mlx4_is_master(dev
))
2976 return -EPROTONOSUPPORT
;
2978 slave
= mlx4_get_slave_indx(dev
, vf
);
2982 port
= mlx4_slaves_closest_port(dev
, slave
, port
);
2983 s_info
= &priv
->mfunc
.master
.vf_admin
[slave
].vport
[port
];
2985 mlx4_info(dev
, "default mac on vf %d port %d to %llX will take effect only after vf restart\n",
2986 vf
, port
, s_info
->mac
);
2989 EXPORT_SYMBOL_GPL(mlx4_set_vf_mac
);
2992 int mlx4_set_vf_vlan(struct mlx4_dev
*dev
, int port
, int vf
, u16 vlan
, u8 qos
,
2995 struct mlx4_priv
*priv
= mlx4_priv(dev
);
2996 struct mlx4_vport_state
*vf_admin
;
2997 struct mlx4_slave_state
*slave_state
;
2998 struct mlx4_vport_oper_state
*vf_oper
;
3001 if ((!mlx4_is_master(dev
)) ||
3002 !(dev
->caps
.flags2
& MLX4_DEV_CAP_FLAG2_VLAN_CONTROL
))
3003 return -EPROTONOSUPPORT
;
3005 if ((vlan
> 4095) || (qos
> 7))
3008 if (proto
== htons(ETH_P_8021AD
) &&
3009 !(dev
->caps
.flags2
& MLX4_DEV_CAP_FLAG2_SVLAN_BY_QP
))
3010 return -EPROTONOSUPPORT
;
3012 if (proto
!= htons(ETH_P_8021Q
) &&
3013 proto
!= htons(ETH_P_8021AD
))
3016 if ((proto
== htons(ETH_P_8021AD
)) &&
3017 ((vlan
== 0) || (vlan
== MLX4_VGT
)))
3020 slave
= mlx4_get_slave_indx(dev
, vf
);
3024 slave_state
= &priv
->mfunc
.master
.slave_state
[slave
];
3025 if ((proto
== htons(ETH_P_8021AD
)) && (slave_state
->active
) &&
3026 (!slave_state
->vst_qinq_supported
)) {
3027 mlx4_err(dev
, "vf %d does not support VST QinQ mode\n", vf
);
3028 return -EPROTONOSUPPORT
;
3030 port
= mlx4_slaves_closest_port(dev
, slave
, port
);
3031 vf_admin
= &priv
->mfunc
.master
.vf_admin
[slave
].vport
[port
];
3032 vf_oper
= &priv
->mfunc
.master
.vf_oper
[slave
].vport
[port
];
3034 if (!mlx4_valid_vf_state_change(dev
, port
, vf_admin
, vlan
, qos
))
3037 if ((0 == vlan
) && (0 == qos
))
3038 vf_admin
->default_vlan
= MLX4_VGT
;
3040 vf_admin
->default_vlan
= vlan
;
3041 vf_admin
->default_qos
= qos
;
3042 vf_admin
->vlan_proto
= proto
;
3044 /* If rate was configured prior to VST, we saved the configured rate
3045 * in vf_admin->rate and now, if priority supported we enforce the QoS
3047 if (mlx4_is_vf_vst_and_prio_qos(dev
, port
, vf_admin
) &&
3049 vf_admin
->qos_vport
= slave
;
3051 /* Try to activate new vf state without restart,
3052 * this option is not supported while moving to VST QinQ mode.
3054 if ((proto
== htons(ETH_P_8021AD
) &&
3055 vf_oper
->state
.vlan_proto
!= proto
) ||
3056 mlx4_master_immediate_activate_vlan_qos(priv
, slave
, port
))
3058 "updating vf %d port %d config will take effect on next VF restart\n",
3062 EXPORT_SYMBOL_GPL(mlx4_set_vf_vlan
);
3064 int mlx4_set_vf_rate(struct mlx4_dev
*dev
, int port
, int vf
, int min_tx_rate
,
3069 struct mlx4_vport_state
*vf_admin
;
3070 struct mlx4_priv
*priv
= mlx4_priv(dev
);
3072 if (!mlx4_is_master(dev
) ||
3073 !(dev
->caps
.flags2
& MLX4_DEV_CAP_FLAG2_QOS_VPP
))
3074 return -EPROTONOSUPPORT
;
3077 mlx4_info(dev
, "Minimum BW share not supported\n");
3078 return -EPROTONOSUPPORT
;
3081 slave
= mlx4_get_slave_indx(dev
, vf
);
3085 port
= mlx4_slaves_closest_port(dev
, slave
, port
);
3086 vf_admin
= &priv
->mfunc
.master
.vf_admin
[slave
].vport
[port
];
3088 err
= mlx4_set_vport_qos(priv
, slave
, port
, max_tx_rate
);
3090 mlx4_info(dev
, "vf %d failed to set rate %d\n", vf
,
3095 vf_admin
->tx_rate
= max_tx_rate
;
3096 /* if VF is not in supported mode (VST with supported prio),
3097 * we do not change vport configuration for its QPs, but save
3098 * the rate, so it will be enforced when it moves to supported
3101 if (!mlx4_is_vf_vst_and_prio_qos(dev
, port
, vf_admin
)) {
3103 "rate set for VF %d when not in valid state\n", vf
);
3105 if (vf_admin
->default_vlan
!= MLX4_VGT
)
3106 mlx4_info(dev
, "VST priority not supported by QoS\n");
3108 mlx4_info(dev
, "VF in VGT mode (needed VST)\n");
3111 "rate %d take affect when VF moves to valid state\n",
3116 /* If user sets rate 0 assigning default vport for its QPs */
3117 vf_admin
->qos_vport
= max_tx_rate
? slave
: MLX4_VPP_DEFAULT_VPORT
;
3119 if (priv
->mfunc
.master
.slave_state
[slave
].active
&&
3120 dev
->caps
.flags2
& MLX4_DEV_CAP_FLAG2_UPDATE_QP
)
3121 mlx4_master_immediate_activate_vlan_qos(priv
, slave
, port
);
3125 EXPORT_SYMBOL_GPL(mlx4_set_vf_rate
);
3127 /* mlx4_get_slave_default_vlan -
3128 * return true if VST ( default vlan)
3129 * if VST, will return vlan & qos (if not NULL)
3131 bool mlx4_get_slave_default_vlan(struct mlx4_dev
*dev
, int port
, int slave
,
3134 struct mlx4_vport_oper_state
*vp_oper
;
3135 struct mlx4_priv
*priv
;
3137 priv
= mlx4_priv(dev
);
3138 port
= mlx4_slaves_closest_port(dev
, slave
, port
);
3139 vp_oper
= &priv
->mfunc
.master
.vf_oper
[slave
].vport
[port
];
3141 if (MLX4_VGT
!= vp_oper
->state
.default_vlan
) {
3143 *vlan
= vp_oper
->state
.default_vlan
;
3145 *qos
= vp_oper
->state
.default_qos
;
3150 EXPORT_SYMBOL_GPL(mlx4_get_slave_default_vlan
);
3152 int mlx4_set_vf_spoofchk(struct mlx4_dev
*dev
, int port
, int vf
, bool setting
)
3154 struct mlx4_priv
*priv
= mlx4_priv(dev
);
3155 struct mlx4_vport_state
*s_info
;
3158 if ((!mlx4_is_master(dev
)) ||
3159 !(dev
->caps
.flags2
& MLX4_DEV_CAP_FLAG2_FSM
))
3160 return -EPROTONOSUPPORT
;
3162 slave
= mlx4_get_slave_indx(dev
, vf
);
3166 port
= mlx4_slaves_closest_port(dev
, slave
, port
);
3167 s_info
= &priv
->mfunc
.master
.vf_admin
[slave
].vport
[port
];
3168 s_info
->spoofchk
= setting
;
3172 EXPORT_SYMBOL_GPL(mlx4_set_vf_spoofchk
);
3174 int mlx4_get_vf_config(struct mlx4_dev
*dev
, int port
, int vf
, struct ifla_vf_info
*ivf
)
3176 struct mlx4_priv
*priv
= mlx4_priv(dev
);
3177 struct mlx4_vport_state
*s_info
;
3180 if (!mlx4_is_master(dev
))
3181 return -EPROTONOSUPPORT
;
3183 slave
= mlx4_get_slave_indx(dev
, vf
);
3187 s_info
= &priv
->mfunc
.master
.vf_admin
[slave
].vport
[port
];
3190 /* need to convert it to a func */
3191 ivf
->mac
[0] = ((s_info
->mac
>> (5*8)) & 0xff);
3192 ivf
->mac
[1] = ((s_info
->mac
>> (4*8)) & 0xff);
3193 ivf
->mac
[2] = ((s_info
->mac
>> (3*8)) & 0xff);
3194 ivf
->mac
[3] = ((s_info
->mac
>> (2*8)) & 0xff);
3195 ivf
->mac
[4] = ((s_info
->mac
>> (1*8)) & 0xff);
3196 ivf
->mac
[5] = ((s_info
->mac
) & 0xff);
3198 ivf
->vlan
= s_info
->default_vlan
;
3199 ivf
->qos
= s_info
->default_qos
;
3200 ivf
->vlan_proto
= s_info
->vlan_proto
;
3202 if (mlx4_is_vf_vst_and_prio_qos(dev
, port
, s_info
))
3203 ivf
->max_tx_rate
= s_info
->tx_rate
;
3205 ivf
->max_tx_rate
= 0;
3207 ivf
->min_tx_rate
= 0;
3208 ivf
->spoofchk
= s_info
->spoofchk
;
3209 ivf
->linkstate
= s_info
->link_state
;
3213 EXPORT_SYMBOL_GPL(mlx4_get_vf_config
);
3215 int mlx4_set_vf_link_state(struct mlx4_dev
*dev
, int port
, int vf
, int link_state
)
3217 struct mlx4_priv
*priv
= mlx4_priv(dev
);
3218 struct mlx4_vport_state
*s_info
;
3222 slave
= mlx4_get_slave_indx(dev
, vf
);
3226 port
= mlx4_slaves_closest_port(dev
, slave
, port
);
3227 switch (link_state
) {
3228 case IFLA_VF_LINK_STATE_AUTO
:
3229 /* get current link state */
3230 if (!priv
->sense
.do_sense_port
[port
])
3231 link_stat_event
= MLX4_PORT_CHANGE_SUBTYPE_ACTIVE
;
3233 link_stat_event
= MLX4_PORT_CHANGE_SUBTYPE_DOWN
;
3236 case IFLA_VF_LINK_STATE_ENABLE
:
3237 link_stat_event
= MLX4_PORT_CHANGE_SUBTYPE_ACTIVE
;
3240 case IFLA_VF_LINK_STATE_DISABLE
:
3241 link_stat_event
= MLX4_PORT_CHANGE_SUBTYPE_DOWN
;
3245 mlx4_warn(dev
, "unknown value for link_state %02x on slave %d port %d\n",
3246 link_state
, slave
, port
);
3249 s_info
= &priv
->mfunc
.master
.vf_admin
[slave
].vport
[port
];
3250 s_info
->link_state
= link_state
;
3253 mlx4_gen_port_state_change_eqe(dev
, slave
, port
, link_stat_event
);
3255 if (mlx4_master_immediate_activate_vlan_qos(priv
, slave
, port
))
3257 "updating vf %d port %d no link state HW enforcment\n",
3261 EXPORT_SYMBOL_GPL(mlx4_set_vf_link_state
);
3263 int mlx4_get_counter_stats(struct mlx4_dev
*dev
, int counter_index
,
3264 struct mlx4_counter
*counter_stats
, int reset
)
3266 struct mlx4_cmd_mailbox
*mailbox
= NULL
;
3267 struct mlx4_counter
*tmp_counter
;
3274 if (counter_index
== MLX4_SINK_COUNTER_INDEX(dev
))
3277 mailbox
= mlx4_alloc_cmd_mailbox(dev
);
3278 if (IS_ERR(mailbox
))
3279 return PTR_ERR(mailbox
);
3281 memset(mailbox
->buf
, 0, sizeof(struct mlx4_counter
));
3282 if_stat_in_mod
= counter_index
;
3284 if_stat_in_mod
|= MLX4_QUERY_IF_STAT_RESET
;
3285 err
= mlx4_cmd_box(dev
, 0, mailbox
->dma
,
3287 MLX4_CMD_QUERY_IF_STAT
,
3288 MLX4_CMD_TIME_CLASS_C
,
3291 mlx4_dbg(dev
, "%s: failed to read statistics for counter index %d\n",
3292 __func__
, counter_index
);
3295 tmp_counter
= (struct mlx4_counter
*)mailbox
->buf
;
3296 counter_stats
->counter_mode
= tmp_counter
->counter_mode
;
3297 if (counter_stats
->counter_mode
== 0) {
3298 counter_stats
->rx_frames
=
3299 cpu_to_be64(be64_to_cpu(counter_stats
->rx_frames
) +
3300 be64_to_cpu(tmp_counter
->rx_frames
));
3301 counter_stats
->tx_frames
=
3302 cpu_to_be64(be64_to_cpu(counter_stats
->tx_frames
) +
3303 be64_to_cpu(tmp_counter
->tx_frames
));
3304 counter_stats
->rx_bytes
=
3305 cpu_to_be64(be64_to_cpu(counter_stats
->rx_bytes
) +
3306 be64_to_cpu(tmp_counter
->rx_bytes
));
3307 counter_stats
->tx_bytes
=
3308 cpu_to_be64(be64_to_cpu(counter_stats
->tx_bytes
) +
3309 be64_to_cpu(tmp_counter
->tx_bytes
));
3313 mlx4_free_cmd_mailbox(dev
, mailbox
);
3317 EXPORT_SYMBOL_GPL(mlx4_get_counter_stats
);
3319 int mlx4_get_vf_stats(struct mlx4_dev
*dev
, int port
, int vf_idx
,
3320 struct ifla_vf_stats
*vf_stats
)
3322 struct mlx4_counter tmp_vf_stats
;
3329 if (!mlx4_is_master(dev
))
3330 return -EPROTONOSUPPORT
;
3332 slave
= mlx4_get_slave_indx(dev
, vf_idx
);
3336 port
= mlx4_slaves_closest_port(dev
, slave
, port
);
3337 err
= mlx4_calc_vf_counters(dev
, slave
, port
, &tmp_vf_stats
);
3338 if (!err
&& tmp_vf_stats
.counter_mode
== 0) {
3339 vf_stats
->rx_packets
= be64_to_cpu(tmp_vf_stats
.rx_frames
);
3340 vf_stats
->tx_packets
= be64_to_cpu(tmp_vf_stats
.tx_frames
);
3341 vf_stats
->rx_bytes
= be64_to_cpu(tmp_vf_stats
.rx_bytes
);
3342 vf_stats
->tx_bytes
= be64_to_cpu(tmp_vf_stats
.tx_bytes
);
3347 EXPORT_SYMBOL_GPL(mlx4_get_vf_stats
);
3349 int mlx4_vf_smi_enabled(struct mlx4_dev
*dev
, int slave
, int port
)
3351 struct mlx4_priv
*priv
= mlx4_priv(dev
);
3353 if (slave
< 1 || slave
>= dev
->num_slaves
||
3354 port
< 1 || port
> MLX4_MAX_PORTS
)
3357 return priv
->mfunc
.master
.vf_oper
[slave
].smi_enabled
[port
] ==
3358 MLX4_VF_SMI_ENABLED
;
3360 EXPORT_SYMBOL_GPL(mlx4_vf_smi_enabled
);
3362 int mlx4_vf_get_enable_smi_admin(struct mlx4_dev
*dev
, int slave
, int port
)
3364 struct mlx4_priv
*priv
= mlx4_priv(dev
);
3366 if (slave
== mlx4_master_func_num(dev
))
3369 if (slave
< 1 || slave
>= dev
->num_slaves
||
3370 port
< 1 || port
> MLX4_MAX_PORTS
)
3373 return priv
->mfunc
.master
.vf_admin
[slave
].enable_smi
[port
] ==
3374 MLX4_VF_SMI_ENABLED
;
3376 EXPORT_SYMBOL_GPL(mlx4_vf_get_enable_smi_admin
);
3378 int mlx4_vf_set_enable_smi_admin(struct mlx4_dev
*dev
, int slave
, int port
,
3381 struct mlx4_priv
*priv
= mlx4_priv(dev
);
3382 struct mlx4_active_ports actv_ports
= mlx4_get_active_ports(
3384 int min_port
= find_first_bit(actv_ports
.ports
,
3385 priv
->dev
.caps
.num_ports
) + 1;
3386 int max_port
= min_port
- 1 +
3387 bitmap_weight(actv_ports
.ports
, priv
->dev
.caps
.num_ports
);
3389 if (slave
== mlx4_master_func_num(dev
))
3392 if (slave
< 1 || slave
>= dev
->num_slaves
||
3393 port
< 1 || port
> MLX4_MAX_PORTS
||
3394 enabled
< 0 || enabled
> 1)
3397 if (min_port
== max_port
&& dev
->caps
.num_ports
> 1) {
3398 mlx4_info(dev
, "SMI access disallowed for single ported VFs\n");
3399 return -EPROTONOSUPPORT
;
3402 priv
->mfunc
.master
.vf_admin
[slave
].enable_smi
[port
] = enabled
;
3405 EXPORT_SYMBOL_GPL(mlx4_vf_set_enable_smi_admin
);