1 /* Broadcom NetXtreme-C/E network driver.
3 * Copyright (c) 2014-2015 Broadcom Corporation
5 * This program is free software; you can redistribute it and/or modify
6 * it under the terms of the GNU General Public License as published by
7 * the Free Software Foundation.
10 #include <linux/module.h>
11 #include <linux/pci.h>
12 #include <linux/netdevice.h>
13 #include <linux/if_vlan.h>
14 #include <linux/interrupt.h>
15 #include <linux/etherdevice.h>
18 #include "bnxt_sriov.h"
19 #include "bnxt_ethtool.h"
21 #ifdef CONFIG_BNXT_SRIOV
22 static int bnxt_vf_ndo_prep(struct bnxt
*bp
, int vf_id
)
24 if (bp
->state
!= BNXT_STATE_OPEN
) {
25 netdev_err(bp
->dev
, "vf ndo called though PF is down\n");
28 if (!bp
->pf
.active_vfs
) {
29 netdev_err(bp
->dev
, "vf ndo called though sriov is disabled\n");
32 if (vf_id
>= bp
->pf
.max_vfs
) {
33 netdev_err(bp
->dev
, "Invalid VF id %d\n", vf_id
);
39 int bnxt_set_vf_spoofchk(struct net_device
*dev
, int vf_id
, bool setting
)
41 struct hwrm_func_cfg_input req
= {0};
42 struct bnxt
*bp
= netdev_priv(dev
);
43 struct bnxt_vf_info
*vf
;
44 bool old_setting
= false;
48 rc
= bnxt_vf_ndo_prep(bp
, vf_id
);
52 vf
= &bp
->pf
.vf
[vf_id
];
53 if (vf
->flags
& BNXT_VF_SPOOFCHK
)
55 if (old_setting
== setting
)
58 func_flags
= vf
->func_flags
;
60 func_flags
|= FUNC_CFG_REQ_FLAGS_SRC_MAC_ADDR_CHECK
;
62 func_flags
&= ~FUNC_CFG_REQ_FLAGS_SRC_MAC_ADDR_CHECK
;
63 /*TODO: if the driver supports VLAN filter on guest VLAN,
64 * the spoof check should also include vlan anti-spoofing
66 bnxt_hwrm_cmd_hdr_init(bp
, &req
, HWRM_FUNC_CFG
, -1, -1);
67 req
.vf_id
= cpu_to_le16(vf
->fw_fid
);
68 req
.flags
= cpu_to_le32(func_flags
);
69 rc
= hwrm_send_message(bp
, &req
, sizeof(req
), HWRM_CMD_TIMEOUT
);
71 vf
->func_flags
= func_flags
;
73 vf
->flags
|= BNXT_VF_SPOOFCHK
;
75 vf
->flags
&= ~BNXT_VF_SPOOFCHK
;
80 int bnxt_get_vf_config(struct net_device
*dev
, int vf_id
,
81 struct ifla_vf_info
*ivi
)
83 struct bnxt
*bp
= netdev_priv(dev
);
84 struct bnxt_vf_info
*vf
;
87 rc
= bnxt_vf_ndo_prep(bp
, vf_id
);
92 vf
= &bp
->pf
.vf
[vf_id
];
94 memcpy(&ivi
->mac
, vf
->mac_addr
, ETH_ALEN
);
95 ivi
->max_tx_rate
= vf
->max_tx_rate
;
96 ivi
->min_tx_rate
= vf
->min_tx_rate
;
98 ivi
->qos
= vf
->flags
& BNXT_VF_QOS
;
99 ivi
->spoofchk
= vf
->flags
& BNXT_VF_SPOOFCHK
;
100 if (!(vf
->flags
& BNXT_VF_LINK_FORCED
))
101 ivi
->linkstate
= IFLA_VF_LINK_STATE_AUTO
;
102 else if (vf
->flags
& BNXT_VF_LINK_UP
)
103 ivi
->linkstate
= IFLA_VF_LINK_STATE_ENABLE
;
105 ivi
->linkstate
= IFLA_VF_LINK_STATE_DISABLE
;
110 int bnxt_set_vf_mac(struct net_device
*dev
, int vf_id
, u8
*mac
)
112 struct hwrm_func_cfg_input req
= {0};
113 struct bnxt
*bp
= netdev_priv(dev
);
114 struct bnxt_vf_info
*vf
;
117 rc
= bnxt_vf_ndo_prep(bp
, vf_id
);
120 /* reject bc or mc mac addr, zero mac addr means allow
121 * VF to use its own mac addr
123 if (is_multicast_ether_addr(mac
)) {
124 netdev_err(dev
, "Invalid VF ethernet address\n");
127 vf
= &bp
->pf
.vf
[vf_id
];
129 memcpy(vf
->mac_addr
, mac
, ETH_ALEN
);
130 bnxt_hwrm_cmd_hdr_init(bp
, &req
, HWRM_FUNC_CFG
, -1, -1);
131 req
.vf_id
= cpu_to_le16(vf
->fw_fid
);
132 req
.flags
= cpu_to_le32(vf
->func_flags
);
133 req
.enables
= cpu_to_le32(FUNC_CFG_REQ_ENABLES_DFLT_MAC_ADDR
);
134 memcpy(req
.dflt_mac_addr
, mac
, ETH_ALEN
);
135 return hwrm_send_message(bp
, &req
, sizeof(req
), HWRM_CMD_TIMEOUT
);
138 int bnxt_set_vf_vlan(struct net_device
*dev
, int vf_id
, u16 vlan_id
, u8 qos
)
140 struct hwrm_func_cfg_input req
= {0};
141 struct bnxt
*bp
= netdev_priv(dev
);
142 struct bnxt_vf_info
*vf
;
146 rc
= bnxt_vf_ndo_prep(bp
, vf_id
);
150 /* TODO: needed to implement proper handling of user priority,
151 * currently fail the command if there is valid priority
153 if (vlan_id
> 4095 || qos
)
156 vf
= &bp
->pf
.vf
[vf_id
];
158 if (vlan_tag
== vf
->vlan
)
161 bnxt_hwrm_cmd_hdr_init(bp
, &req
, HWRM_FUNC_CFG
, -1, -1);
162 req
.vf_id
= cpu_to_le16(vf
->fw_fid
);
163 req
.flags
= cpu_to_le32(vf
->func_flags
);
164 req
.dflt_vlan
= cpu_to_le16(vlan_tag
);
165 req
.enables
= cpu_to_le32(FUNC_CFG_REQ_ENABLES_DFLT_VLAN
);
166 rc
= hwrm_send_message(bp
, &req
, sizeof(req
), HWRM_CMD_TIMEOUT
);
172 int bnxt_set_vf_bw(struct net_device
*dev
, int vf_id
, int min_tx_rate
,
175 struct hwrm_func_cfg_input req
= {0};
176 struct bnxt
*bp
= netdev_priv(dev
);
177 struct bnxt_vf_info
*vf
;
181 rc
= bnxt_vf_ndo_prep(bp
, vf_id
);
185 vf
= &bp
->pf
.vf
[vf_id
];
186 pf_link_speed
= bnxt_fw_to_ethtool_speed(bp
->link_info
.link_speed
);
187 if (max_tx_rate
> pf_link_speed
) {
188 netdev_info(bp
->dev
, "max tx rate %d exceed PF link speed for VF %d\n",
193 if (min_tx_rate
> pf_link_speed
|| min_tx_rate
> max_tx_rate
) {
194 netdev_info(bp
->dev
, "min tx rate %d is invalid for VF %d\n",
198 if (min_tx_rate
== vf
->min_tx_rate
&& max_tx_rate
== vf
->max_tx_rate
)
200 bnxt_hwrm_cmd_hdr_init(bp
, &req
, HWRM_FUNC_CFG
, -1, -1);
201 req
.vf_id
= cpu_to_le16(vf
->fw_fid
);
202 req
.flags
= cpu_to_le32(vf
->func_flags
);
203 req
.enables
= cpu_to_le32(FUNC_CFG_REQ_ENABLES_MAX_BW
);
204 req
.max_bw
= cpu_to_le32(max_tx_rate
);
205 req
.enables
|= cpu_to_le32(FUNC_CFG_REQ_ENABLES_MIN_BW
);
206 req
.min_bw
= cpu_to_le32(min_tx_rate
);
207 rc
= hwrm_send_message(bp
, &req
, sizeof(req
), HWRM_CMD_TIMEOUT
);
209 vf
->min_tx_rate
= min_tx_rate
;
210 vf
->max_tx_rate
= max_tx_rate
;
215 int bnxt_set_vf_link_state(struct net_device
*dev
, int vf_id
, int link
)
217 struct bnxt
*bp
= netdev_priv(dev
);
218 struct bnxt_vf_info
*vf
;
221 rc
= bnxt_vf_ndo_prep(bp
, vf_id
);
225 vf
= &bp
->pf
.vf
[vf_id
];
227 vf
->flags
&= ~(BNXT_VF_LINK_UP
| BNXT_VF_LINK_FORCED
);
229 case IFLA_VF_LINK_STATE_AUTO
:
230 vf
->flags
|= BNXT_VF_LINK_UP
;
232 case IFLA_VF_LINK_STATE_DISABLE
:
233 vf
->flags
|= BNXT_VF_LINK_FORCED
;
235 case IFLA_VF_LINK_STATE_ENABLE
:
236 vf
->flags
|= BNXT_VF_LINK_UP
| BNXT_VF_LINK_FORCED
;
239 netdev_err(bp
->dev
, "Invalid link option\n");
243 /* CHIMP TODO: send msg to VF to update new link state */
248 static int bnxt_set_vf_attr(struct bnxt
*bp
, int num_vfs
)
251 struct bnxt_vf_info
*vf
;
253 for (i
= 0; i
< num_vfs
; i
++) {
255 memset(vf
, 0, sizeof(*vf
));
256 vf
->flags
= BNXT_VF_QOS
| BNXT_VF_LINK_UP
;
261 static int bnxt_hwrm_func_vf_resource_free(struct bnxt
*bp
, int num_vfs
)
264 struct bnxt_pf_info
*pf
= &bp
->pf
;
265 struct hwrm_func_vf_resc_free_input req
= {0};
267 bnxt_hwrm_cmd_hdr_init(bp
, &req
, HWRM_FUNC_VF_RESC_FREE
, -1, -1);
269 mutex_lock(&bp
->hwrm_cmd_lock
);
270 for (i
= pf
->first_vf_id
; i
< pf
->first_vf_id
+ num_vfs
; i
++) {
271 req
.vf_id
= cpu_to_le16(i
);
272 rc
= _hwrm_send_message(bp
, &req
, sizeof(req
),
277 mutex_unlock(&bp
->hwrm_cmd_lock
);
281 static void bnxt_free_vf_resources(struct bnxt
*bp
)
283 struct pci_dev
*pdev
= bp
->pdev
;
286 kfree(bp
->pf
.vf_event_bmap
);
287 bp
->pf
.vf_event_bmap
= NULL
;
289 for (i
= 0; i
< 4; i
++) {
290 if (bp
->pf
.hwrm_cmd_req_addr
[i
]) {
291 dma_free_coherent(&pdev
->dev
, BNXT_PAGE_SIZE
,
292 bp
->pf
.hwrm_cmd_req_addr
[i
],
293 bp
->pf
.hwrm_cmd_req_dma_addr
[i
]);
294 bp
->pf
.hwrm_cmd_req_addr
[i
] = NULL
;
302 static int bnxt_alloc_vf_resources(struct bnxt
*bp
, int num_vfs
)
304 struct pci_dev
*pdev
= bp
->pdev
;
305 u32 nr_pages
, size
, i
, j
, k
= 0;
307 bp
->pf
.vf
= kcalloc(num_vfs
, sizeof(struct bnxt_vf_info
), GFP_KERNEL
);
311 bnxt_set_vf_attr(bp
, num_vfs
);
313 size
= num_vfs
* BNXT_HWRM_REQ_MAX_SIZE
;
314 nr_pages
= size
/ BNXT_PAGE_SIZE
;
315 if (size
& (BNXT_PAGE_SIZE
- 1))
318 for (i
= 0; i
< nr_pages
; i
++) {
319 bp
->pf
.hwrm_cmd_req_addr
[i
] =
320 dma_alloc_coherent(&pdev
->dev
, BNXT_PAGE_SIZE
,
321 &bp
->pf
.hwrm_cmd_req_dma_addr
[i
],
324 if (!bp
->pf
.hwrm_cmd_req_addr
[i
])
327 for (j
= 0; j
< BNXT_HWRM_REQS_PER_PAGE
&& k
< num_vfs
; j
++) {
328 struct bnxt_vf_info
*vf
= &bp
->pf
.vf
[k
];
330 vf
->hwrm_cmd_req_addr
= bp
->pf
.hwrm_cmd_req_addr
[i
] +
331 j
* BNXT_HWRM_REQ_MAX_SIZE
;
332 vf
->hwrm_cmd_req_dma_addr
=
333 bp
->pf
.hwrm_cmd_req_dma_addr
[i
] + j
*
334 BNXT_HWRM_REQ_MAX_SIZE
;
340 bp
->pf
.vf_event_bmap
= kzalloc(16, GFP_KERNEL
);
341 if (!bp
->pf
.vf_event_bmap
)
344 bp
->pf
.hwrm_cmd_req_pages
= nr_pages
;
348 static int bnxt_hwrm_func_buf_rgtr(struct bnxt
*bp
)
350 struct hwrm_func_buf_rgtr_input req
= {0};
352 bnxt_hwrm_cmd_hdr_init(bp
, &req
, HWRM_FUNC_BUF_RGTR
, -1, -1);
354 req
.req_buf_num_pages
= cpu_to_le16(bp
->pf
.hwrm_cmd_req_pages
);
355 req
.req_buf_page_size
= cpu_to_le16(BNXT_PAGE_SHIFT
);
356 req
.req_buf_len
= cpu_to_le16(BNXT_HWRM_REQ_MAX_SIZE
);
357 req
.req_buf_page_addr0
= cpu_to_le64(bp
->pf
.hwrm_cmd_req_dma_addr
[0]);
358 req
.req_buf_page_addr1
= cpu_to_le64(bp
->pf
.hwrm_cmd_req_dma_addr
[1]);
359 req
.req_buf_page_addr2
= cpu_to_le64(bp
->pf
.hwrm_cmd_req_dma_addr
[2]);
360 req
.req_buf_page_addr3
= cpu_to_le64(bp
->pf
.hwrm_cmd_req_dma_addr
[3]);
362 return hwrm_send_message(bp
, &req
, sizeof(req
), HWRM_CMD_TIMEOUT
);
365 /* only call by PF to reserve resources for VF */
366 static int bnxt_hwrm_func_cfg(struct bnxt
*bp
, int *num_vfs
)
369 u16 vf_tx_rings
, vf_rx_rings
, vf_cp_rings
, vf_stat_ctx
, vf_vnics
;
370 struct hwrm_func_cfg_input req
= {0};
371 struct bnxt_pf_info
*pf
= &bp
->pf
;
373 bnxt_hwrm_cmd_hdr_init(bp
, &req
, HWRM_FUNC_CFG
, -1, -1);
375 /* Remaining rings are distributed equally amongs VF's for now */
376 /* TODO: the following workaroud is needed to restrict total number
377 * of vf_cp_rings not exceed number of HW ring groups. This WA should
378 * be removed once new HWRM provides HW ring groups capability in
381 vf_cp_rings
= min_t(u16
, bp
->pf
.max_cp_rings
, bp
->pf
.max_stat_ctxs
);
382 vf_cp_rings
= (vf_cp_rings
- bp
->cp_nr_rings
) / *num_vfs
;
383 /* TODO: restore this logic below once the WA above is removed */
384 /* vf_cp_rings = (bp->pf.max_cp_rings - bp->cp_nr_rings) / *num_vfs; */
385 vf_stat_ctx
= (bp
->pf
.max_stat_ctxs
- bp
->num_stat_ctxs
) / *num_vfs
;
386 if (bp
->flags
& BNXT_FLAG_AGG_RINGS
)
387 vf_rx_rings
= (bp
->pf
.max_rx_rings
- bp
->rx_nr_rings
* 2) /
390 vf_rx_rings
= (bp
->pf
.max_rx_rings
- bp
->rx_nr_rings
) /
392 vf_tx_rings
= (bp
->pf
.max_tx_rings
- bp
->tx_nr_rings
) / *num_vfs
;
394 req
.enables
= cpu_to_le32(FUNC_CFG_REQ_ENABLES_MTU
|
395 FUNC_CFG_REQ_ENABLES_MRU
|
396 FUNC_CFG_REQ_ENABLES_NUM_RSSCOS_CTXS
|
397 FUNC_CFG_REQ_ENABLES_NUM_STAT_CTXS
|
398 FUNC_CFG_REQ_ENABLES_NUM_CMPL_RINGS
|
399 FUNC_CFG_REQ_ENABLES_NUM_TX_RINGS
|
400 FUNC_CFG_REQ_ENABLES_NUM_RX_RINGS
|
401 FUNC_CFG_REQ_ENABLES_NUM_L2_CTXS
|
402 FUNC_CFG_REQ_ENABLES_NUM_VNICS
);
404 mtu
= bp
->dev
->mtu
+ ETH_HLEN
+ ETH_FCS_LEN
+ VLAN_HLEN
;
405 req
.mru
= cpu_to_le16(mtu
);
406 req
.mtu
= cpu_to_le16(mtu
);
408 req
.num_rsscos_ctxs
= cpu_to_le16(1);
409 req
.num_cmpl_rings
= cpu_to_le16(vf_cp_rings
);
410 req
.num_tx_rings
= cpu_to_le16(vf_tx_rings
);
411 req
.num_rx_rings
= cpu_to_le16(vf_rx_rings
);
412 req
.num_l2_ctxs
= cpu_to_le16(4);
415 req
.num_vnics
= cpu_to_le16(vf_vnics
);
416 /* FIXME spec currently uses 1 bit for stats ctx */
417 req
.num_stat_ctxs
= cpu_to_le16(vf_stat_ctx
);
419 mutex_lock(&bp
->hwrm_cmd_lock
);
420 for (i
= 0; i
< *num_vfs
; i
++) {
421 req
.vf_id
= cpu_to_le16(pf
->first_vf_id
+ i
);
422 rc
= _hwrm_send_message(bp
, &req
, sizeof(req
),
426 bp
->pf
.active_vfs
= i
+ 1;
427 bp
->pf
.vf
[i
].fw_fid
= le16_to_cpu(req
.vf_id
);
429 mutex_unlock(&bp
->hwrm_cmd_lock
);
431 bp
->pf
.max_pf_tx_rings
= bp
->tx_nr_rings
;
432 if (bp
->flags
& BNXT_FLAG_AGG_RINGS
)
433 bp
->pf
.max_pf_rx_rings
= bp
->rx_nr_rings
* 2;
435 bp
->pf
.max_pf_rx_rings
= bp
->rx_nr_rings
;
440 static int bnxt_sriov_enable(struct bnxt
*bp
, int *num_vfs
)
442 int rc
= 0, vfs_supported
;
443 int min_rx_rings
, min_tx_rings
, min_rss_ctxs
;
444 int tx_ok
= 0, rx_ok
= 0, rss_ok
= 0;
446 /* Check if we can enable requested num of vf's. At a mininum
447 * we require 1 RX 1 TX rings for each VF. In this minimum conf
448 * features like TPA will not be available.
450 vfs_supported
= *num_vfs
;
452 while (vfs_supported
) {
453 min_rx_rings
= vfs_supported
;
454 min_tx_rings
= vfs_supported
;
455 min_rss_ctxs
= vfs_supported
;
457 if (bp
->flags
& BNXT_FLAG_AGG_RINGS
) {
458 if (bp
->pf
.max_rx_rings
- bp
->rx_nr_rings
* 2 >=
462 if (bp
->pf
.max_rx_rings
- bp
->rx_nr_rings
>=
467 if (bp
->pf
.max_tx_rings
- bp
->tx_nr_rings
>= min_tx_rings
)
470 if (bp
->pf
.max_rsscos_ctxs
- bp
->rsscos_nr_ctxs
>= min_rss_ctxs
)
473 if (tx_ok
&& rx_ok
&& rss_ok
)
479 if (!vfs_supported
) {
480 netdev_err(bp
->dev
, "Cannot enable VF's as all resources are used by PF\n");
484 if (vfs_supported
!= *num_vfs
) {
485 netdev_info(bp
->dev
, "Requested VFs %d, can enable %d\n",
486 *num_vfs
, vfs_supported
);
487 *num_vfs
= vfs_supported
;
490 rc
= bnxt_alloc_vf_resources(bp
, *num_vfs
);
494 /* Reserve resources for VFs */
495 rc
= bnxt_hwrm_func_cfg(bp
, num_vfs
);
499 /* Register buffers for VFs */
500 rc
= bnxt_hwrm_func_buf_rgtr(bp
);
504 rc
= pci_enable_sriov(bp
->pdev
, *num_vfs
);
511 /* Free the resources reserved for various VF's */
512 bnxt_hwrm_func_vf_resource_free(bp
, *num_vfs
);
515 bnxt_free_vf_resources(bp
);
520 void bnxt_sriov_disable(struct bnxt
*bp
)
522 u16 num_vfs
= pci_num_vf(bp
->pdev
);
527 if (pci_vfs_assigned(bp
->pdev
)) {
528 netdev_warn(bp
->dev
, "Unable to free %d VFs because some are assigned to VMs.\n",
531 pci_disable_sriov(bp
->pdev
);
532 /* Free the HW resources reserved for various VF's */
533 bnxt_hwrm_func_vf_resource_free(bp
, num_vfs
);
536 bnxt_free_vf_resources(bp
);
538 bp
->pf
.active_vfs
= 0;
539 bp
->pf
.max_pf_rx_rings
= bp
->pf
.max_rx_rings
;
540 bp
->pf
.max_pf_tx_rings
= bp
->pf
.max_tx_rings
;
543 int bnxt_sriov_configure(struct pci_dev
*pdev
, int num_vfs
)
545 struct net_device
*dev
= pci_get_drvdata(pdev
);
546 struct bnxt
*bp
= netdev_priv(dev
);
548 if (!(bp
->flags
& BNXT_FLAG_USING_MSIX
)) {
549 netdev_warn(dev
, "Not allow SRIOV if the irq mode is not MSIX\n");
554 if (!netif_running(dev
)) {
555 netdev_warn(dev
, "Reject SRIOV config request since if is down!\n");
559 bp
->sriov_cfg
= true;
562 if (pci_vfs_assigned(bp
->pdev
)) {
563 netdev_warn(dev
, "Unable to configure SRIOV since some VFs are assigned to VMs.\n");
568 /* Check if enabled VFs is same as requested */
569 if (num_vfs
&& num_vfs
== bp
->pf
.active_vfs
)
572 /* if there are previous existing VFs, clean them up */
573 bnxt_sriov_disable(bp
);
577 bnxt_sriov_enable(bp
, &num_vfs
);
580 bp
->sriov_cfg
= false;
581 wake_up(&bp
->sriov_cfg_wait
);
586 static int bnxt_hwrm_fwd_resp(struct bnxt
*bp
, struct bnxt_vf_info
*vf
,
587 void *encap_resp
, __le64 encap_resp_addr
,
588 __le16 encap_resp_cpr
, u32 msg_size
)
591 struct hwrm_fwd_resp_input req
= {0};
592 struct hwrm_fwd_resp_output
*resp
= bp
->hwrm_cmd_resp_addr
;
594 bnxt_hwrm_cmd_hdr_init(bp
, &req
, HWRM_FWD_RESP
, -1, -1);
596 /* Set the new target id */
597 req
.target_id
= cpu_to_le16(vf
->fw_fid
);
598 req
.encap_resp_len
= cpu_to_le16(msg_size
);
599 req
.encap_resp_addr
= encap_resp_addr
;
600 req
.encap_resp_cmpl_ring
= encap_resp_cpr
;
601 memcpy(req
.encap_resp
, encap_resp
, msg_size
);
603 mutex_lock(&bp
->hwrm_cmd_lock
);
604 rc
= _hwrm_send_message(bp
, &req
, sizeof(req
), HWRM_CMD_TIMEOUT
);
607 netdev_err(bp
->dev
, "hwrm_fwd_resp failed. rc:%d\n", rc
);
611 if (resp
->error_code
) {
612 netdev_err(bp
->dev
, "hwrm_fwd_resp error %d\n",
618 mutex_unlock(&bp
->hwrm_cmd_lock
);
622 static int bnxt_hwrm_fwd_err_resp(struct bnxt
*bp
, struct bnxt_vf_info
*vf
,
626 struct hwrm_reject_fwd_resp_input req
= {0};
627 struct hwrm_reject_fwd_resp_output
*resp
= bp
->hwrm_cmd_resp_addr
;
629 bnxt_hwrm_cmd_hdr_init(bp
, &req
, HWRM_REJECT_FWD_RESP
, -1, -1);
630 /* Set the new target id */
631 req
.target_id
= cpu_to_le16(vf
->fw_fid
);
632 memcpy(req
.encap_request
, vf
->hwrm_cmd_req_addr
, msg_size
);
634 mutex_lock(&bp
->hwrm_cmd_lock
);
635 rc
= _hwrm_send_message(bp
, &req
, sizeof(req
), HWRM_CMD_TIMEOUT
);
638 netdev_err(bp
->dev
, "hwrm_fwd_err_resp failed. rc:%d\n", rc
);
639 goto fwd_err_resp_exit
;
642 if (resp
->error_code
) {
643 netdev_err(bp
->dev
, "hwrm_fwd_err_resp error %d\n",
649 mutex_unlock(&bp
->hwrm_cmd_lock
);
653 static int bnxt_hwrm_exec_fwd_resp(struct bnxt
*bp
, struct bnxt_vf_info
*vf
,
657 struct hwrm_exec_fwd_resp_input req
= {0};
658 struct hwrm_exec_fwd_resp_output
*resp
= bp
->hwrm_cmd_resp_addr
;
660 bnxt_hwrm_cmd_hdr_init(bp
, &req
, HWRM_EXEC_FWD_RESP
, -1, -1);
661 /* Set the new target id */
662 req
.target_id
= cpu_to_le16(vf
->fw_fid
);
663 memcpy(req
.encap_request
, vf
->hwrm_cmd_req_addr
, msg_size
);
665 mutex_lock(&bp
->hwrm_cmd_lock
);
666 rc
= _hwrm_send_message(bp
, &req
, sizeof(req
), HWRM_CMD_TIMEOUT
);
669 netdev_err(bp
->dev
, "hwrm_exec_fw_resp failed. rc:%d\n", rc
);
670 goto exec_fwd_resp_exit
;
673 if (resp
->error_code
) {
674 netdev_err(bp
->dev
, "hwrm_exec_fw_resp error %d\n",
680 mutex_unlock(&bp
->hwrm_cmd_lock
);
684 static int bnxt_vf_validate_set_mac(struct bnxt
*bp
, struct bnxt_vf_info
*vf
)
686 u32 msg_size
= sizeof(struct hwrm_cfa_l2_filter_alloc_input
);
687 struct hwrm_cfa_l2_filter_alloc_input
*req
=
688 (struct hwrm_cfa_l2_filter_alloc_input
*)vf
->hwrm_cmd_req_addr
;
690 if (!is_valid_ether_addr(vf
->mac_addr
) ||
691 ether_addr_equal((const u8
*)req
->l2_addr
, vf
->mac_addr
))
692 return bnxt_hwrm_exec_fwd_resp(bp
, vf
, msg_size
);
694 return bnxt_hwrm_fwd_err_resp(bp
, vf
, msg_size
);
697 static int bnxt_vf_set_link(struct bnxt
*bp
, struct bnxt_vf_info
*vf
)
701 if (!(vf
->flags
& BNXT_VF_LINK_FORCED
)) {
703 rc
= bnxt_hwrm_exec_fwd_resp(
704 bp
, vf
, sizeof(struct hwrm_port_phy_qcfg_input
));
706 struct hwrm_port_phy_qcfg_output phy_qcfg_resp
;
707 struct hwrm_port_phy_qcfg_input
*phy_qcfg_req
;
710 (struct hwrm_port_phy_qcfg_input
*)vf
->hwrm_cmd_req_addr
;
711 mutex_lock(&bp
->hwrm_cmd_lock
);
712 memcpy(&phy_qcfg_resp
, &bp
->link_info
.phy_qcfg_resp
,
713 sizeof(phy_qcfg_resp
));
714 mutex_unlock(&bp
->hwrm_cmd_lock
);
715 phy_qcfg_resp
.seq_id
= phy_qcfg_req
->seq_id
;
717 if (vf
->flags
& BNXT_VF_LINK_UP
) {
718 /* if physical link is down, force link up on VF */
719 if (phy_qcfg_resp
.link
==
720 PORT_PHY_QCFG_RESP_LINK_NO_LINK
) {
722 PORT_PHY_QCFG_RESP_LINK_LINK
;
723 if (phy_qcfg_resp
.auto_link_speed
)
724 phy_qcfg_resp
.link_speed
=
725 phy_qcfg_resp
.auto_link_speed
;
727 phy_qcfg_resp
.link_speed
=
728 phy_qcfg_resp
.force_link_speed
;
729 phy_qcfg_resp
.duplex
=
730 PORT_PHY_QCFG_RESP_DUPLEX_FULL
;
731 phy_qcfg_resp
.pause
=
732 (PORT_PHY_QCFG_RESP_PAUSE_TX
|
733 PORT_PHY_QCFG_RESP_PAUSE_RX
);
736 /* force link down */
737 phy_qcfg_resp
.link
= PORT_PHY_QCFG_RESP_LINK_NO_LINK
;
738 phy_qcfg_resp
.link_speed
= 0;
739 phy_qcfg_resp
.duplex
= PORT_PHY_QCFG_RESP_DUPLEX_HALF
;
740 phy_qcfg_resp
.pause
= 0;
742 rc
= bnxt_hwrm_fwd_resp(bp
, vf
, &phy_qcfg_resp
,
743 phy_qcfg_req
->resp_addr
,
744 phy_qcfg_req
->cmpl_ring
,
745 sizeof(phy_qcfg_resp
));
750 static int bnxt_vf_req_validate_snd(struct bnxt
*bp
, struct bnxt_vf_info
*vf
)
753 struct hwrm_cmd_req_hdr
*encap_req
= vf
->hwrm_cmd_req_addr
;
754 u32 req_type
= le32_to_cpu(encap_req
->cmpl_ring_req_type
) & 0xffff;
757 case HWRM_CFA_L2_FILTER_ALLOC
:
758 rc
= bnxt_vf_validate_set_mac(bp
, vf
);
761 /* TODO Validate if VF is allowed to change mac address,
762 * mtu, num of rings etc
764 rc
= bnxt_hwrm_exec_fwd_resp(
765 bp
, vf
, sizeof(struct hwrm_func_cfg_input
));
767 case HWRM_PORT_PHY_QCFG
:
768 rc
= bnxt_vf_set_link(bp
, vf
);
776 void bnxt_hwrm_exec_fwd_req(struct bnxt
*bp
)
778 u32 i
= 0, active_vfs
= bp
->pf
.active_vfs
, vf_id
;
780 /* Scan through VF's and process commands */
782 vf_id
= find_next_bit(bp
->pf
.vf_event_bmap
, active_vfs
, i
);
783 if (vf_id
>= active_vfs
)
786 clear_bit(vf_id
, bp
->pf
.vf_event_bmap
);
787 bnxt_vf_req_validate_snd(bp
, &bp
->pf
.vf
[vf_id
]);
792 void bnxt_update_vf_mac(struct bnxt
*bp
)
794 struct hwrm_func_qcaps_input req
= {0};
795 struct hwrm_func_qcaps_output
*resp
= bp
->hwrm_cmd_resp_addr
;
797 bnxt_hwrm_cmd_hdr_init(bp
, &req
, HWRM_FUNC_QCAPS
, -1, -1);
798 req
.fid
= cpu_to_le16(0xffff);
800 mutex_lock(&bp
->hwrm_cmd_lock
);
801 if (_hwrm_send_message(bp
, &req
, sizeof(req
), HWRM_CMD_TIMEOUT
))
802 goto update_vf_mac_exit
;
804 if (!is_valid_ether_addr(resp
->perm_mac_address
))
805 goto update_vf_mac_exit
;
807 if (ether_addr_equal(resp
->perm_mac_address
, bp
->vf
.mac_addr
))
808 goto update_vf_mac_exit
;
810 memcpy(bp
->vf
.mac_addr
, resp
->perm_mac_address
, ETH_ALEN
);
811 memcpy(bp
->dev
->dev_addr
, bp
->vf
.mac_addr
, ETH_ALEN
);
813 mutex_unlock(&bp
->hwrm_cmd_lock
);
818 void bnxt_sriov_disable(struct bnxt
*bp
)
822 void bnxt_hwrm_exec_fwd_req(struct bnxt
*bp
)
824 netdev_err(bp
->dev
, "Invalid VF message received when SRIOV is not enable\n");
827 void bnxt_update_vf_mac(struct bnxt
*bp
)