1 /* bnx2x_sriov.c: Broadcom Everest network driver.
3 * Copyright 2009-2013 Broadcom Corporation
5 * Unless you and Broadcom execute a separate written software license
6 * agreement governing use of this software, this software is licensed to you
7 * under the terms of the GNU General Public License version 2, available
8 * at http://www.gnu.org/licenses/old-licenses/gpl-2.0.html (the "GPL").
10 * Notwithstanding the above, under no circumstances may you combine this
11 * software in any way with any other Broadcom software provided under a
12 * license other than the GPL, without Broadcom's express prior written
15 * Maintained by: Eilon Greenstein <eilong@broadcom.com>
16 * Written by: Shmulik Ravid <shmulikr@broadcom.com>
17 * Ariel Elior <ariele@broadcom.com>
21 #include "bnx2x_init.h"
22 #include "bnx2x_cmn.h"
24 #include <linux/crc32.h>
25 #include <linux/if_vlan.h>
27 /* General service functions */
28 static void storm_memset_vf_to_pf(struct bnx2x
*bp
, u16 abs_fid
,
31 REG_WR8(bp
, BAR_XSTRORM_INTMEM
+ XSTORM_VF_TO_PF_OFFSET(abs_fid
),
33 REG_WR8(bp
, BAR_CSTRORM_INTMEM
+ CSTORM_VF_TO_PF_OFFSET(abs_fid
),
35 REG_WR8(bp
, BAR_TSTRORM_INTMEM
+ TSTORM_VF_TO_PF_OFFSET(abs_fid
),
37 REG_WR8(bp
, BAR_USTRORM_INTMEM
+ USTORM_VF_TO_PF_OFFSET(abs_fid
),
41 static void storm_memset_func_en(struct bnx2x
*bp
, u16 abs_fid
,
44 REG_WR8(bp
, BAR_XSTRORM_INTMEM
+ XSTORM_FUNC_EN_OFFSET(abs_fid
),
46 REG_WR8(bp
, BAR_CSTRORM_INTMEM
+ CSTORM_FUNC_EN_OFFSET(abs_fid
),
48 REG_WR8(bp
, BAR_TSTRORM_INTMEM
+ TSTORM_FUNC_EN_OFFSET(abs_fid
),
50 REG_WR8(bp
, BAR_USTRORM_INTMEM
+ USTORM_FUNC_EN_OFFSET(abs_fid
),
54 int bnx2x_vf_idx_by_abs_fid(struct bnx2x
*bp
, u16 abs_vfid
)
59 if (bnx2x_vf(bp
, idx
, abs_vfid
) == abs_vfid
)
65 struct bnx2x_virtf
*bnx2x_vf_by_abs_fid(struct bnx2x
*bp
, u16 abs_vfid
)
67 u16 idx
= (u16
)bnx2x_vf_idx_by_abs_fid(bp
, abs_vfid
);
68 return (idx
< BNX2X_NR_VIRTFN(bp
)) ? BP_VF(bp
, idx
) : NULL
;
71 static void bnx2x_vf_igu_ack_sb(struct bnx2x
*bp
, struct bnx2x_virtf
*vf
,
72 u8 igu_sb_id
, u8 segment
, u16 index
, u8 op
,
75 /* acking a VF sb through the PF - use the GRC */
77 u32 igu_addr_data
= IGU_REG_COMMAND_REG_32LSB_DATA
;
78 u32 igu_addr_ctl
= IGU_REG_COMMAND_REG_CTRL
;
79 u32 func_encode
= vf
->abs_vfid
;
80 u32 addr_encode
= IGU_CMD_E2_PROD_UPD_BASE
+ igu_sb_id
;
81 struct igu_regular cmd_data
= {0};
83 cmd_data
.sb_id_and_flags
=
84 ((index
<< IGU_REGULAR_SB_INDEX_SHIFT
) |
85 (segment
<< IGU_REGULAR_SEGMENT_ACCESS_SHIFT
) |
86 (update
<< IGU_REGULAR_BUPDATE_SHIFT
) |
87 (op
<< IGU_REGULAR_ENABLE_INT_SHIFT
));
89 ctl
= addr_encode
<< IGU_CTRL_REG_ADDRESS_SHIFT
|
90 func_encode
<< IGU_CTRL_REG_FID_SHIFT
|
91 IGU_CTRL_CMD_TYPE_WR
<< IGU_CTRL_REG_TYPE_SHIFT
;
93 DP(NETIF_MSG_HW
, "write 0x%08x to IGU(via GRC) addr 0x%x\n",
94 cmd_data
.sb_id_and_flags
, igu_addr_data
);
95 REG_WR(bp
, igu_addr_data
, cmd_data
.sb_id_and_flags
);
99 DP(NETIF_MSG_HW
, "write 0x%08x to IGU(via GRC) addr 0x%x\n",
101 REG_WR(bp
, igu_addr_ctl
, ctl
);
106 static bool bnx2x_validate_vf_sp_objs(struct bnx2x
*bp
,
107 struct bnx2x_virtf
*vf
,
110 if (!bnx2x_leading_vfq(vf
, sp_initialized
)) {
112 BNX2X_ERR("Slowpath objects not yet initialized!\n");
114 DP(BNX2X_MSG_IOV
, "Slowpath objects not yet initialized!\n");
120 /* VFOP operations states */
121 void bnx2x_vfop_qctor_dump_tx(struct bnx2x
*bp
, struct bnx2x_virtf
*vf
,
122 struct bnx2x_queue_init_params
*init_params
,
123 struct bnx2x_queue_setup_params
*setup_params
,
124 u16 q_idx
, u16 sb_idx
)
127 "VF[%d] Q_SETUP: txq[%d]-- vfsb=%d, sb-index=%d, hc-rate=%d, flags=0x%lx, traffic-type=%d",
131 init_params
->tx
.sb_cq_index
,
132 init_params
->tx
.hc_rate
,
134 setup_params
->txq_params
.traffic_type
);
137 void bnx2x_vfop_qctor_dump_rx(struct bnx2x
*bp
, struct bnx2x_virtf
*vf
,
138 struct bnx2x_queue_init_params
*init_params
,
139 struct bnx2x_queue_setup_params
*setup_params
,
140 u16 q_idx
, u16 sb_idx
)
142 struct bnx2x_rxq_setup_params
*rxq_params
= &setup_params
->rxq_params
;
144 DP(BNX2X_MSG_IOV
, "VF[%d] Q_SETUP: rxq[%d]-- vfsb=%d, sb-index=%d, hc-rate=%d, mtu=%d, buf-size=%d\n"
145 "sge-size=%d, max_sge_pkt=%d, tpa-agg-size=%d, flags=0x%lx, drop-flags=0x%x, cache-log=%d\n",
149 init_params
->rx
.sb_cq_index
,
150 init_params
->rx
.hc_rate
,
151 setup_params
->gen_params
.mtu
,
153 rxq_params
->sge_buf_sz
,
154 rxq_params
->max_sges_pkt
,
155 rxq_params
->tpa_agg_sz
,
157 rxq_params
->drop_flags
,
158 rxq_params
->cache_line_log
);
161 void bnx2x_vfop_qctor_prep(struct bnx2x
*bp
,
162 struct bnx2x_virtf
*vf
,
163 struct bnx2x_vf_queue
*q
,
164 struct bnx2x_vf_queue_construct_params
*p
,
165 unsigned long q_type
)
167 struct bnx2x_queue_init_params
*init_p
= &p
->qstate
.params
.init
;
168 struct bnx2x_queue_setup_params
*setup_p
= &p
->prep_qsetup
;
172 /* Enable host coalescing in the transition to INIT state */
173 if (test_bit(BNX2X_Q_FLG_HC
, &init_p
->rx
.flags
))
174 __set_bit(BNX2X_Q_FLG_HC_EN
, &init_p
->rx
.flags
);
176 if (test_bit(BNX2X_Q_FLG_HC
, &init_p
->tx
.flags
))
177 __set_bit(BNX2X_Q_FLG_HC_EN
, &init_p
->tx
.flags
);
180 init_p
->rx
.fw_sb_id
= vf_igu_sb(vf
, q
->sb_idx
);
181 init_p
->tx
.fw_sb_id
= vf_igu_sb(vf
, q
->sb_idx
);
184 init_p
->cxts
[0] = q
->cxt
;
188 /* Setup-op general parameters */
189 setup_p
->gen_params
.spcl_id
= vf
->sp_cl_id
;
190 setup_p
->gen_params
.stat_id
= vfq_stat_id(vf
, q
);
192 /* Setup-op pause params:
193 * Nothing to do, the pause thresholds are set by default to 0 which
194 * effectively turns off the feature for this queue. We don't want
195 * one queue (VF) to interfering with another queue (another VF)
197 if (vf
->cfg_flags
& VF_CFG_FW_FC
)
198 BNX2X_ERR("No support for pause to VFs (abs_vfid: %d)\n",
201 * collect statistics, zero statistics, local-switching, security,
202 * OV for Flex10, RSS and MCAST for leading
204 if (test_bit(BNX2X_Q_FLG_STATS
, &setup_p
->flags
))
205 __set_bit(BNX2X_Q_FLG_ZERO_STATS
, &setup_p
->flags
);
207 /* for VFs, enable tx switching, bd coherency, and mac address
210 __set_bit(BNX2X_Q_FLG_TX_SWITCH
, &setup_p
->flags
);
211 __set_bit(BNX2X_Q_FLG_TX_SEC
, &setup_p
->flags
);
212 __set_bit(BNX2X_Q_FLG_ANTI_SPOOF
, &setup_p
->flags
);
214 /* Setup-op rx parameters */
215 if (test_bit(BNX2X_Q_TYPE_HAS_RX
, &q_type
)) {
216 struct bnx2x_rxq_setup_params
*rxq_p
= &setup_p
->rxq_params
;
218 rxq_p
->cl_qzone_id
= vfq_qzone_id(vf
, q
);
219 rxq_p
->fw_sb_id
= vf_igu_sb(vf
, q
->sb_idx
);
220 rxq_p
->rss_engine_id
= FW_VF_HANDLE(vf
->abs_vfid
);
222 if (test_bit(BNX2X_Q_FLG_TPA
, &setup_p
->flags
))
223 rxq_p
->max_tpa_queues
= BNX2X_VF_MAX_TPA_AGG_QUEUES
;
226 /* Setup-op tx parameters */
227 if (test_bit(BNX2X_Q_TYPE_HAS_TX
, &q_type
)) {
228 setup_p
->txq_params
.tss_leading_cl_id
= vf
->leading_rss
;
229 setup_p
->txq_params
.fw_sb_id
= vf_igu_sb(vf
, q
->sb_idx
);
233 static int bnx2x_vf_queue_create(struct bnx2x
*bp
,
234 struct bnx2x_virtf
*vf
, int qid
,
235 struct bnx2x_vf_queue_construct_params
*qctor
)
237 struct bnx2x_queue_state_params
*q_params
;
240 DP(BNX2X_MSG_IOV
, "vf[%d:%d]\n", vf
->abs_vfid
, qid
);
242 /* Prepare ramrod information */
243 q_params
= &qctor
->qstate
;
244 q_params
->q_obj
= &bnx2x_vfq(vf
, qid
, sp_obj
);
245 set_bit(RAMROD_COMP_WAIT
, &q_params
->ramrod_flags
);
247 if (bnx2x_get_q_logical_state(bp
, q_params
->q_obj
) ==
248 BNX2X_Q_LOGICAL_STATE_ACTIVE
) {
249 DP(BNX2X_MSG_IOV
, "queue was already up. Aborting gracefully\n");
253 /* Run Queue 'construction' ramrods */
254 q_params
->cmd
= BNX2X_Q_CMD_INIT
;
255 rc
= bnx2x_queue_state_change(bp
, q_params
);
259 memcpy(&q_params
->params
.setup
, &qctor
->prep_qsetup
,
260 sizeof(struct bnx2x_queue_setup_params
));
261 q_params
->cmd
= BNX2X_Q_CMD_SETUP
;
262 rc
= bnx2x_queue_state_change(bp
, q_params
);
266 /* enable interrupts */
267 bnx2x_vf_igu_ack_sb(bp
, vf
, vf_igu_sb(vf
, bnx2x_vfq(vf
, qid
, sb_idx
)),
268 USTORM_ID
, 0, IGU_INT_ENABLE
, 0);
273 static int bnx2x_vf_queue_destroy(struct bnx2x
*bp
, struct bnx2x_virtf
*vf
,
276 enum bnx2x_queue_cmd cmds
[] = {BNX2X_Q_CMD_HALT
,
277 BNX2X_Q_CMD_TERMINATE
,
278 BNX2X_Q_CMD_CFC_DEL
};
279 struct bnx2x_queue_state_params q_params
;
282 DP(BNX2X_MSG_IOV
, "vf[%d]\n", vf
->abs_vfid
);
284 /* Prepare ramrod information */
285 memset(&q_params
, 0, sizeof(struct bnx2x_queue_state_params
));
286 q_params
.q_obj
= &bnx2x_vfq(vf
, qid
, sp_obj
);
287 set_bit(RAMROD_COMP_WAIT
, &q_params
.ramrod_flags
);
289 if (bnx2x_get_q_logical_state(bp
, q_params
.q_obj
) ==
290 BNX2X_Q_LOGICAL_STATE_STOPPED
) {
291 DP(BNX2X_MSG_IOV
, "queue was already stopped. Aborting gracefully\n");
295 /* Run Queue 'destruction' ramrods */
296 for (i
= 0; i
< ARRAY_SIZE(cmds
); i
++) {
297 q_params
.cmd
= cmds
[i
];
298 rc
= bnx2x_queue_state_change(bp
, &q_params
);
300 BNX2X_ERR("Failed to run Queue command %d\n", cmds
[i
]);
306 if (bnx2x_vfq(vf
, qid
, cxt
)) {
307 bnx2x_vfq(vf
, qid
, cxt
)->ustorm_ag_context
.cdu_usage
= 0;
308 bnx2x_vfq(vf
, qid
, cxt
)->xstorm_ag_context
.cdu_reserved
= 0;
315 bnx2x_vf_set_igu_info(struct bnx2x
*bp
, u8 igu_sb_id
, u8 abs_vfid
)
317 struct bnx2x_virtf
*vf
= bnx2x_vf_by_abs_fid(bp
, abs_vfid
);
319 /* the first igu entry belonging to VFs of this PF */
320 if (!BP_VFDB(bp
)->first_vf_igu_entry
)
321 BP_VFDB(bp
)->first_vf_igu_entry
= igu_sb_id
;
323 /* the first igu entry belonging to this VF */
324 if (!vf_sb_count(vf
))
325 vf
->igu_base_id
= igu_sb_id
;
330 BP_VFDB(bp
)->vf_sbs_pool
++;
333 static inline void bnx2x_vf_vlan_credit(struct bnx2x
*bp
,
334 struct bnx2x_vlan_mac_obj
*obj
,
337 struct list_head
*pos
;
341 read_lock
= bnx2x_vlan_mac_h_read_lock(bp
, obj
);
343 DP(BNX2X_MSG_SP
, "Failed to take vlan mac read head; continuing anyway\n");
345 list_for_each(pos
, &obj
->head
)
349 bnx2x_vlan_mac_h_read_unlock(bp
, obj
);
351 atomic_set(counter
, cnt
);
354 static int bnx2x_vf_vlan_mac_clear(struct bnx2x
*bp
, struct bnx2x_virtf
*vf
,
355 int qid
, bool drv_only
, bool mac
)
357 struct bnx2x_vlan_mac_ramrod_params ramrod
;
360 DP(BNX2X_MSG_IOV
, "vf[%d] - deleting all %s\n", vf
->abs_vfid
,
361 mac
? "MACs" : "VLANs");
363 /* Prepare ramrod params */
364 memset(&ramrod
, 0, sizeof(struct bnx2x_vlan_mac_ramrod_params
));
366 set_bit(BNX2X_ETH_MAC
, &ramrod
.user_req
.vlan_mac_flags
);
367 ramrod
.vlan_mac_obj
= &bnx2x_vfq(vf
, qid
, mac_obj
);
369 set_bit(BNX2X_DONT_CONSUME_CAM_CREDIT
,
370 &ramrod
.user_req
.vlan_mac_flags
);
371 ramrod
.vlan_mac_obj
= &bnx2x_vfq(vf
, qid
, vlan_obj
);
373 ramrod
.user_req
.cmd
= BNX2X_VLAN_MAC_DEL
;
375 set_bit(RAMROD_EXEC
, &ramrod
.ramrod_flags
);
377 set_bit(RAMROD_DRV_CLR_ONLY
, &ramrod
.ramrod_flags
);
379 set_bit(RAMROD_COMP_WAIT
, &ramrod
.ramrod_flags
);
382 rc
= ramrod
.vlan_mac_obj
->delete_all(bp
,
384 &ramrod
.user_req
.vlan_mac_flags
,
385 &ramrod
.ramrod_flags
);
387 BNX2X_ERR("Failed to delete all %s\n",
388 mac
? "MACs" : "VLANs");
392 /* Clear the vlan counters */
394 atomic_set(&bnx2x_vfq(vf
, qid
, vlan_count
), 0);
399 static int bnx2x_vf_mac_vlan_config(struct bnx2x
*bp
,
400 struct bnx2x_virtf
*vf
, int qid
,
401 struct bnx2x_vf_mac_vlan_filter
*filter
,
404 struct bnx2x_vlan_mac_ramrod_params ramrod
;
407 DP(BNX2X_MSG_IOV
, "vf[%d] - %s a %s filter\n",
408 vf
->abs_vfid
, filter
->add
? "Adding" : "Deleting",
409 filter
->type
== BNX2X_VF_FILTER_MAC
? "MAC" : "VLAN");
411 /* Prepare ramrod params */
412 memset(&ramrod
, 0, sizeof(struct bnx2x_vlan_mac_ramrod_params
));
413 if (filter
->type
== BNX2X_VF_FILTER_VLAN
) {
414 set_bit(BNX2X_DONT_CONSUME_CAM_CREDIT
,
415 &ramrod
.user_req
.vlan_mac_flags
);
416 ramrod
.vlan_mac_obj
= &bnx2x_vfq(vf
, qid
, vlan_obj
);
417 ramrod
.user_req
.u
.vlan
.vlan
= filter
->vid
;
419 set_bit(BNX2X_ETH_MAC
, &ramrod
.user_req
.vlan_mac_flags
);
420 ramrod
.vlan_mac_obj
= &bnx2x_vfq(vf
, qid
, mac_obj
);
421 memcpy(&ramrod
.user_req
.u
.mac
.mac
, filter
->mac
, ETH_ALEN
);
423 ramrod
.user_req
.cmd
= filter
->add
? BNX2X_VLAN_MAC_ADD
:
426 /* Verify there are available vlan credits */
427 if (filter
->add
&& filter
->type
== BNX2X_VF_FILTER_VLAN
&&
428 (atomic_read(&bnx2x_vfq(vf
, qid
, vlan_count
)) >=
429 vf_vlan_rules_cnt(vf
))) {
430 BNX2X_ERR("No credits for vlan\n");
434 set_bit(RAMROD_EXEC
, &ramrod
.ramrod_flags
);
436 set_bit(RAMROD_DRV_CLR_ONLY
, &ramrod
.ramrod_flags
);
438 set_bit(RAMROD_COMP_WAIT
, &ramrod
.ramrod_flags
);
440 /* Add/Remove the filter */
441 rc
= bnx2x_config_vlan_mac(bp
, &ramrod
);
442 if (rc
&& rc
!= -EEXIST
) {
443 BNX2X_ERR("Failed to %s %s\n",
444 filter
->add
? "add" : "delete",
445 filter
->type
== BNX2X_VF_FILTER_MAC
? "MAC" :
450 /* Update the vlan counters */
451 if (filter
->type
== BNX2X_VF_FILTER_VLAN
)
452 bnx2x_vf_vlan_credit(bp
, ramrod
.vlan_mac_obj
,
453 &bnx2x_vfq(vf
, qid
, vlan_count
));
458 int bnx2x_vf_mac_vlan_config_list(struct bnx2x
*bp
, struct bnx2x_virtf
*vf
,
459 struct bnx2x_vf_mac_vlan_filters
*filters
,
460 int qid
, bool drv_only
)
464 DP(BNX2X_MSG_IOV
, "vf[%d]\n", vf
->abs_vfid
);
466 if (!bnx2x_validate_vf_sp_objs(bp
, vf
, true))
469 /* Prepare ramrod params */
470 for (i
= 0; i
< filters
->count
; i
++) {
471 rc
= bnx2x_vf_mac_vlan_config(bp
, vf
, qid
,
472 &filters
->filters
[i
], drv_only
);
477 /* Rollback if needed */
478 if (i
!= filters
->count
) {
479 BNX2X_ERR("Managed only %d/%d filters - rolling back\n",
480 i
, filters
->count
+ 1);
482 filters
->filters
[i
].add
= !filters
->filters
[i
].add
;
483 bnx2x_vf_mac_vlan_config(bp
, vf
, qid
,
484 &filters
->filters
[i
],
489 /* It's our responsibility to free the filters */
495 int bnx2x_vf_queue_setup(struct bnx2x
*bp
, struct bnx2x_virtf
*vf
, int qid
,
496 struct bnx2x_vf_queue_construct_params
*qctor
)
500 DP(BNX2X_MSG_IOV
, "vf[%d:%d]\n", vf
->abs_vfid
, qid
);
502 rc
= bnx2x_vf_queue_create(bp
, vf
, qid
, qctor
);
506 /* Configure vlan0 for leading queue */
508 struct bnx2x_vf_mac_vlan_filter filter
;
510 memset(&filter
, 0, sizeof(struct bnx2x_vf_mac_vlan_filter
));
511 filter
.type
= BNX2X_VF_FILTER_VLAN
;
514 rc
= bnx2x_vf_mac_vlan_config(bp
, vf
, qid
, &filter
, false);
519 /* Schedule the configuration of any pending vlan filters */
520 vf
->cfg_flags
|= VF_CFG_VLAN
;
521 bnx2x_schedule_sp_rtnl(bp
, BNX2X_SP_RTNL_HYPERVISOR_VLAN
,
525 BNX2X_ERR("QSETUP[%d:%d] error: rc %d\n", vf
->abs_vfid
, qid
, rc
);
529 static int bnx2x_vf_queue_flr(struct bnx2x
*bp
, struct bnx2x_virtf
*vf
,
534 DP(BNX2X_MSG_IOV
, "vf[%d:%d]\n", vf
->abs_vfid
, qid
);
536 /* If needed, clean the filtering data base */
537 if ((qid
== LEADING_IDX
) &&
538 bnx2x_validate_vf_sp_objs(bp
, vf
, false)) {
539 rc
= bnx2x_vf_vlan_mac_clear(bp
, vf
, qid
, true, false);
542 rc
= bnx2x_vf_vlan_mac_clear(bp
, vf
, qid
, true, true);
547 /* Terminate queue */
548 if (bnx2x_vfq(vf
, qid
, sp_obj
).state
!= BNX2X_Q_STATE_RESET
) {
549 struct bnx2x_queue_state_params qstate
;
551 memset(&qstate
, 0, sizeof(struct bnx2x_queue_state_params
));
552 qstate
.q_obj
= &bnx2x_vfq(vf
, qid
, sp_obj
);
553 qstate
.q_obj
->state
= BNX2X_Q_STATE_STOPPED
;
554 qstate
.cmd
= BNX2X_Q_CMD_TERMINATE
;
555 set_bit(RAMROD_COMP_WAIT
, &qstate
.ramrod_flags
);
556 rc
= bnx2x_queue_state_change(bp
, &qstate
);
563 BNX2X_ERR("vf[%d:%d] error: rc %d\n", vf
->abs_vfid
, qid
, rc
);
567 int bnx2x_vf_mcast(struct bnx2x
*bp
, struct bnx2x_virtf
*vf
,
568 bnx2x_mac_addr_t
*mcasts
, int mc_num
, bool drv_only
)
570 struct bnx2x_mcast_list_elem
*mc
= NULL
;
571 struct bnx2x_mcast_ramrod_params mcast
;
574 DP(BNX2X_MSG_IOV
, "vf[%d]\n", vf
->abs_vfid
);
576 /* Prepare Multicast command */
577 memset(&mcast
, 0, sizeof(struct bnx2x_mcast_ramrod_params
));
578 mcast
.mcast_obj
= &vf
->mcast_obj
;
580 set_bit(RAMROD_DRV_CLR_ONLY
, &mcast
.ramrod_flags
);
582 set_bit(RAMROD_COMP_WAIT
, &mcast
.ramrod_flags
);
584 mc
= kzalloc(mc_num
* sizeof(struct bnx2x_mcast_list_elem
),
587 BNX2X_ERR("Cannot Configure mulicasts due to lack of memory\n");
592 /* clear existing mcasts */
593 mcast
.mcast_list_len
= vf
->mcast_list_len
;
594 vf
->mcast_list_len
= mc_num
;
595 rc
= bnx2x_config_mcast(bp
, &mcast
, BNX2X_MCAST_CMD_DEL
);
597 BNX2X_ERR("Failed to remove multicasts\n");
603 /* update mcast list on the ramrod params */
605 INIT_LIST_HEAD(&mcast
.mcast_list
);
606 for (i
= 0; i
< mc_num
; i
++) {
607 mc
[i
].mac
= mcasts
[i
];
608 list_add_tail(&mc
[i
].link
,
613 rc
= bnx2x_config_mcast(bp
, &mcast
, BNX2X_MCAST_CMD_ADD
);
615 BNX2X_ERR("Faled to add multicasts\n");
622 static void bnx2x_vf_prep_rx_mode(struct bnx2x
*bp
, u8 qid
,
623 struct bnx2x_rx_mode_ramrod_params
*ramrod
,
624 struct bnx2x_virtf
*vf
,
625 unsigned long accept_flags
)
627 struct bnx2x_vf_queue
*vfq
= vfq_get(vf
, qid
);
629 memset(ramrod
, 0, sizeof(*ramrod
));
630 ramrod
->cid
= vfq
->cid
;
631 ramrod
->cl_id
= vfq_cl_id(vf
, vfq
);
632 ramrod
->rx_mode_obj
= &bp
->rx_mode_obj
;
633 ramrod
->func_id
= FW_VF_HANDLE(vf
->abs_vfid
);
634 ramrod
->rx_accept_flags
= accept_flags
;
635 ramrod
->tx_accept_flags
= accept_flags
;
636 ramrod
->pstate
= &vf
->filter_state
;
637 ramrod
->state
= BNX2X_FILTER_RX_MODE_PENDING
;
639 set_bit(BNX2X_FILTER_RX_MODE_PENDING
, &vf
->filter_state
);
640 set_bit(RAMROD_RX
, &ramrod
->ramrod_flags
);
641 set_bit(RAMROD_TX
, &ramrod
->ramrod_flags
);
643 ramrod
->rdata
= bnx2x_vf_sp(bp
, vf
, rx_mode_rdata
.e2
);
644 ramrod
->rdata_mapping
= bnx2x_vf_sp_map(bp
, vf
, rx_mode_rdata
.e2
);
647 int bnx2x_vf_rxmode(struct bnx2x
*bp
, struct bnx2x_virtf
*vf
,
648 int qid
, unsigned long accept_flags
)
650 struct bnx2x_rx_mode_ramrod_params ramrod
;
652 DP(BNX2X_MSG_IOV
, "vf[%d]\n", vf
->abs_vfid
);
654 bnx2x_vf_prep_rx_mode(bp
, qid
, &ramrod
, vf
, accept_flags
);
655 set_bit(RAMROD_COMP_WAIT
, &ramrod
.ramrod_flags
);
656 vfq_get(vf
, qid
)->accept_flags
= ramrod
.rx_accept_flags
;
657 return bnx2x_config_rx_mode(bp
, &ramrod
);
660 int bnx2x_vf_queue_teardown(struct bnx2x
*bp
, struct bnx2x_virtf
*vf
, int qid
)
664 DP(BNX2X_MSG_IOV
, "vf[%d:%d]\n", vf
->abs_vfid
, qid
);
666 /* Remove all classification configuration for leading queue */
667 if (qid
== LEADING_IDX
) {
668 rc
= bnx2x_vf_rxmode(bp
, vf
, qid
, 0);
672 /* Remove filtering if feasible */
673 if (bnx2x_validate_vf_sp_objs(bp
, vf
, true)) {
674 rc
= bnx2x_vf_vlan_mac_clear(bp
, vf
, qid
,
678 rc
= bnx2x_vf_vlan_mac_clear(bp
, vf
, qid
,
682 rc
= bnx2x_vf_mcast(bp
, vf
, NULL
, 0, false);
689 rc
= bnx2x_vf_queue_destroy(bp
, vf
, qid
);
694 BNX2X_ERR("vf[%d:%d] error: rc %d\n",
695 vf
->abs_vfid
, qid
, rc
);
699 /* VF enable primitives
700 * when pretend is required the caller is responsible
701 * for calling pretend prior to calling these routines
704 /* internal vf enable - until vf is enabled internally all transactions
705 * are blocked. This routine should always be called last with pretend.
707 static void bnx2x_vf_enable_internal(struct bnx2x
*bp
, u8 enable
)
709 REG_WR(bp
, PGLUE_B_REG_INTERNAL_VFID_ENABLE
, enable
? 1 : 0);
712 /* clears vf error in all semi blocks */
713 static void bnx2x_vf_semi_clear_err(struct bnx2x
*bp
, u8 abs_vfid
)
715 REG_WR(bp
, TSEM_REG_VFPF_ERR_NUM
, abs_vfid
);
716 REG_WR(bp
, USEM_REG_VFPF_ERR_NUM
, abs_vfid
);
717 REG_WR(bp
, CSEM_REG_VFPF_ERR_NUM
, abs_vfid
);
718 REG_WR(bp
, XSEM_REG_VFPF_ERR_NUM
, abs_vfid
);
721 static void bnx2x_vf_pglue_clear_err(struct bnx2x
*bp
, u8 abs_vfid
)
723 u32 was_err_group
= (2 * BP_PATH(bp
) + abs_vfid
) >> 5;
726 switch (was_err_group
) {
728 was_err_reg
= PGLUE_B_REG_WAS_ERROR_VF_31_0_CLR
;
731 was_err_reg
= PGLUE_B_REG_WAS_ERROR_VF_63_32_CLR
;
734 was_err_reg
= PGLUE_B_REG_WAS_ERROR_VF_95_64_CLR
;
737 was_err_reg
= PGLUE_B_REG_WAS_ERROR_VF_127_96_CLR
;
740 REG_WR(bp
, was_err_reg
, 1 << (abs_vfid
& 0x1f));
743 static void bnx2x_vf_igu_reset(struct bnx2x
*bp
, struct bnx2x_virtf
*vf
)
748 /* Set VF masks and configuration - pretend */
749 bnx2x_pretend_func(bp
, HW_VF_HANDLE(bp
, vf
->abs_vfid
));
751 REG_WR(bp
, IGU_REG_SB_INT_BEFORE_MASK_LSB
, 0);
752 REG_WR(bp
, IGU_REG_SB_INT_BEFORE_MASK_MSB
, 0);
753 REG_WR(bp
, IGU_REG_SB_MASK_LSB
, 0);
754 REG_WR(bp
, IGU_REG_SB_MASK_MSB
, 0);
755 REG_WR(bp
, IGU_REG_PBA_STATUS_LSB
, 0);
756 REG_WR(bp
, IGU_REG_PBA_STATUS_MSB
, 0);
758 val
= REG_RD(bp
, IGU_REG_VF_CONFIGURATION
);
759 val
|= (IGU_VF_CONF_FUNC_EN
| IGU_VF_CONF_MSI_MSIX_EN
);
760 if (vf
->cfg_flags
& VF_CFG_INT_SIMD
)
761 val
|= IGU_VF_CONF_SINGLE_ISR_EN
;
762 val
&= ~IGU_VF_CONF_PARENT_MASK
;
763 val
|= (BP_ABS_FUNC(bp
) >> 1) << IGU_VF_CONF_PARENT_SHIFT
;
764 REG_WR(bp
, IGU_REG_VF_CONFIGURATION
, val
);
767 "value in IGU_REG_VF_CONFIGURATION of vf %d after write is 0x%08x\n",
770 bnx2x_pretend_func(bp
, BP_ABS_FUNC(bp
));
772 /* iterate over all queues, clear sb consumer */
773 for (i
= 0; i
< vf_sb_count(vf
); i
++) {
774 u8 igu_sb_id
= vf_igu_sb(vf
, i
);
776 /* zero prod memory */
777 REG_WR(bp
, IGU_REG_PROD_CONS_MEMORY
+ igu_sb_id
* 4, 0);
779 /* clear sb state machine */
780 bnx2x_igu_clear_sb_gen(bp
, vf
->abs_vfid
, igu_sb_id
,
783 /* disable + update */
784 bnx2x_vf_igu_ack_sb(bp
, vf
, igu_sb_id
, USTORM_ID
, 0,
789 void bnx2x_vf_enable_access(struct bnx2x
*bp
, u8 abs_vfid
)
791 /* set the VF-PF association in the FW */
792 storm_memset_vf_to_pf(bp
, FW_VF_HANDLE(abs_vfid
), BP_FUNC(bp
));
793 storm_memset_func_en(bp
, FW_VF_HANDLE(abs_vfid
), 1);
796 bnx2x_vf_semi_clear_err(bp
, abs_vfid
);
797 bnx2x_vf_pglue_clear_err(bp
, abs_vfid
);
799 /* internal vf-enable - pretend */
800 bnx2x_pretend_func(bp
, HW_VF_HANDLE(bp
, abs_vfid
));
801 DP(BNX2X_MSG_IOV
, "enabling internal access for vf %x\n", abs_vfid
);
802 bnx2x_vf_enable_internal(bp
, true);
803 bnx2x_pretend_func(bp
, BP_ABS_FUNC(bp
));
806 static void bnx2x_vf_enable_traffic(struct bnx2x
*bp
, struct bnx2x_virtf
*vf
)
808 /* Reset vf in IGU interrupts are still disabled */
809 bnx2x_vf_igu_reset(bp
, vf
);
811 /* pretend to enable the vf with the PBF */
812 bnx2x_pretend_func(bp
, HW_VF_HANDLE(bp
, vf
->abs_vfid
));
813 REG_WR(bp
, PBF_REG_DISABLE_VF
, 0);
814 bnx2x_pretend_func(bp
, BP_ABS_FUNC(bp
));
817 static u8
bnx2x_vf_is_pcie_pending(struct bnx2x
*bp
, u8 abs_vfid
)
820 struct bnx2x_virtf
*vf
= bnx2x_vf_by_abs_fid(bp
, abs_vfid
);
825 dev
= pci_get_bus_and_slot(vf
->bus
, vf
->devfn
);
827 return bnx2x_is_pcie_pending(dev
);
831 int bnx2x_vf_flr_clnup_epilog(struct bnx2x
*bp
, u8 abs_vfid
)
833 /* Verify no pending pci transactions */
834 if (bnx2x_vf_is_pcie_pending(bp
, abs_vfid
))
835 BNX2X_ERR("PCIE Transactions still pending\n");
840 /* must be called after the number of PF queues and the number of VFs are
844 bnx2x_iov_static_resc(struct bnx2x
*bp
, struct bnx2x_virtf
*vf
)
846 struct vf_pf_resc_request
*resc
= &vf
->alloc_resc
;
849 /* will be set only during VF-ACQUIRE */
853 /* no credit calculations for macs (just yet) */
854 resc
->num_mac_filters
= 1;
856 /* divvy up vlan rules */
857 vlan_count
= bp
->vlans_pool
.check(&bp
->vlans_pool
);
858 vlan_count
= 1 << ilog2(vlan_count
);
859 resc
->num_vlan_filters
= vlan_count
/ BNX2X_NR_VIRTFN(bp
);
861 /* no real limitation */
862 resc
->num_mc_filters
= 0;
864 /* num_sbs already set */
865 resc
->num_sbs
= vf
->sb_count
;
869 static void bnx2x_vf_free_resc(struct bnx2x
*bp
, struct bnx2x_virtf
*vf
)
871 /* reset the state variables */
872 bnx2x_iov_static_resc(bp
, vf
);
876 static void bnx2x_vf_flr_clnup_hw(struct bnx2x
*bp
, struct bnx2x_virtf
*vf
)
878 u32 poll_cnt
= bnx2x_flr_clnup_poll_count(bp
);
880 /* DQ usage counter */
881 bnx2x_pretend_func(bp
, HW_VF_HANDLE(bp
, vf
->abs_vfid
));
882 bnx2x_flr_clnup_poll_hw_counter(bp
, DORQ_REG_VF_USAGE_CNT
,
883 "DQ VF usage counter timed out",
885 bnx2x_pretend_func(bp
, BP_ABS_FUNC(bp
));
887 /* FW cleanup command - poll for the results */
888 if (bnx2x_send_final_clnup(bp
, (u8
)FW_VF_HANDLE(vf
->abs_vfid
),
890 BNX2X_ERR("VF[%d] Final cleanup timed-out\n", vf
->abs_vfid
);
892 /* verify TX hw is flushed */
893 bnx2x_tx_hw_flushed(bp
, poll_cnt
);
896 static void bnx2x_vf_flr(struct bnx2x
*bp
, struct bnx2x_virtf
*vf
)
900 DP(BNX2X_MSG_IOV
, "vf[%d]\n", vf
->abs_vfid
);
902 /* the cleanup operations are valid if and only if the VF
903 * was first acquired.
905 for (i
= 0; i
< vf_rxq_count(vf
); i
++) {
906 rc
= bnx2x_vf_queue_flr(bp
, vf
, i
);
911 /* remove multicasts */
912 bnx2x_vf_mcast(bp
, vf
, NULL
, 0, true);
914 /* dispatch final cleanup and wait for HW queues to flush */
915 bnx2x_vf_flr_clnup_hw(bp
, vf
);
917 /* release VF resources */
918 bnx2x_vf_free_resc(bp
, vf
);
920 /* re-open the mailbox */
921 bnx2x_vf_enable_mbx(bp
, vf
->abs_vfid
);
924 BNX2X_ERR("vf[%d:%d] failed flr: rc %d\n",
925 vf
->abs_vfid
, i
, rc
);
928 static void bnx2x_vf_flr_clnup(struct bnx2x
*bp
)
930 struct bnx2x_virtf
*vf
;
933 for (i
= 0; i
< BNX2X_NR_VIRTFN(bp
); i
++) {
934 /* VF should be RESET & in FLR cleanup states */
935 if (bnx2x_vf(bp
, i
, state
) != VF_RESET
||
936 !bnx2x_vf(bp
, i
, flr_clnup_stage
))
939 DP(BNX2X_MSG_IOV
, "next vf to cleanup: %d. Num of vfs: %d\n",
940 i
, BNX2X_NR_VIRTFN(bp
));
944 /* lock the vf pf channel */
945 bnx2x_lock_vf_pf_channel(bp
, vf
, CHANNEL_TLV_FLR
);
947 /* invoke the VF FLR SM */
948 bnx2x_vf_flr(bp
, vf
);
950 /* mark the VF to be ACKED and continue */
951 vf
->flr_clnup_stage
= false;
952 bnx2x_unlock_vf_pf_channel(bp
, vf
, CHANNEL_TLV_FLR
);
955 /* Acknowledge the handled VFs.
956 * we are acknowledge all the vfs which an flr was requested for, even
957 * if amongst them there are such that we never opened, since the mcp
958 * will interrupt us immediately again if we only ack some of the bits,
959 * resulting in an endless loop. This can happen for example in KVM
960 * where an 'all ones' flr request is sometimes given by hyper visor
962 DP(BNX2X_MSG_MCP
, "DRV_STATUS_VF_DISABLED ACK for vfs 0x%x 0x%x\n",
963 bp
->vfdb
->flrd_vfs
[0], bp
->vfdb
->flrd_vfs
[1]);
964 for (i
= 0; i
< FLRD_VFS_DWORDS
; i
++)
965 SHMEM2_WR(bp
, drv_ack_vf_disabled
[BP_FW_MB_IDX(bp
)][i
],
966 bp
->vfdb
->flrd_vfs
[i
]);
968 bnx2x_fw_command(bp
, DRV_MSG_CODE_VF_DISABLED_DONE
, 0);
970 /* clear the acked bits - better yet if the MCP implemented
971 * write to clear semantics
973 for (i
= 0; i
< FLRD_VFS_DWORDS
; i
++)
974 SHMEM2_WR(bp
, drv_ack_vf_disabled
[BP_FW_MB_IDX(bp
)][i
], 0);
977 void bnx2x_vf_handle_flr_event(struct bnx2x
*bp
)
982 for (i
= 0; i
< FLRD_VFS_DWORDS
; i
++)
983 bp
->vfdb
->flrd_vfs
[i
] = SHMEM2_RD(bp
, mcp_vf_disabled
[i
]);
986 "DRV_STATUS_VF_DISABLED received for vfs 0x%x 0x%x\n",
987 bp
->vfdb
->flrd_vfs
[0], bp
->vfdb
->flrd_vfs
[1]);
990 struct bnx2x_virtf
*vf
= BP_VF(bp
, i
);
993 if (vf
->abs_vfid
< 32)
994 reset
= bp
->vfdb
->flrd_vfs
[0] & (1 << vf
->abs_vfid
);
996 reset
= bp
->vfdb
->flrd_vfs
[1] &
997 (1 << (vf
->abs_vfid
- 32));
1000 /* set as reset and ready for cleanup */
1001 vf
->state
= VF_RESET
;
1002 vf
->flr_clnup_stage
= true;
1005 "Initiating Final cleanup for VF %d\n",
1010 /* do the FLR cleanup for all marked VFs*/
1011 bnx2x_vf_flr_clnup(bp
);
1014 /* IOV global initialization routines */
1015 void bnx2x_iov_init_dq(struct bnx2x
*bp
)
1020 /* Set the DQ such that the CID reflect the abs_vfid */
1021 REG_WR(bp
, DORQ_REG_VF_NORM_VF_BASE
, 0);
1022 REG_WR(bp
, DORQ_REG_MAX_RVFID_SIZE
, ilog2(BNX2X_MAX_NUM_OF_VFS
));
1024 /* Set VFs starting CID. If its > 0 the preceding CIDs are belong to
1027 REG_WR(bp
, DORQ_REG_VF_NORM_CID_BASE
, BNX2X_FIRST_VF_CID
);
1029 /* The VF window size is the log2 of the max number of CIDs per VF */
1030 REG_WR(bp
, DORQ_REG_VF_NORM_CID_WND_SIZE
, BNX2X_VF_CID_WND
);
1032 /* The VF doorbell size 0 - *B, 4 - 128B. We set it here to match
1033 * the Pf doorbell size although the 2 are independent.
1035 REG_WR(bp
, DORQ_REG_VF_NORM_CID_OFST
, 3);
1037 /* No security checks for now -
1038 * configure single rule (out of 16) mask = 0x1, value = 0x0,
1039 * CID range 0 - 0x1ffff
1041 REG_WR(bp
, DORQ_REG_VF_TYPE_MASK_0
, 1);
1042 REG_WR(bp
, DORQ_REG_VF_TYPE_VALUE_0
, 0);
1043 REG_WR(bp
, DORQ_REG_VF_TYPE_MIN_MCID_0
, 0);
1044 REG_WR(bp
, DORQ_REG_VF_TYPE_MAX_MCID_0
, 0x1ffff);
1046 /* set the VF doorbell threshold */
1047 REG_WR(bp
, DORQ_REG_VF_USAGE_CT_LIMIT
, 4);
1050 void bnx2x_iov_init_dmae(struct bnx2x
*bp
)
1052 if (pci_find_ext_capability(bp
->pdev
, PCI_EXT_CAP_ID_SRIOV
))
1053 REG_WR(bp
, DMAE_REG_BACKWARD_COMP_EN
, 0);
1056 static int bnx2x_vf_bus(struct bnx2x
*bp
, int vfid
)
1058 struct pci_dev
*dev
= bp
->pdev
;
1059 struct bnx2x_sriov
*iov
= &bp
->vfdb
->sriov
;
1061 return dev
->bus
->number
+ ((dev
->devfn
+ iov
->offset
+
1062 iov
->stride
* vfid
) >> 8);
1065 static int bnx2x_vf_devfn(struct bnx2x
*bp
, int vfid
)
1067 struct pci_dev
*dev
= bp
->pdev
;
1068 struct bnx2x_sriov
*iov
= &bp
->vfdb
->sriov
;
1070 return (dev
->devfn
+ iov
->offset
+ iov
->stride
* vfid
) & 0xff;
1073 static void bnx2x_vf_set_bars(struct bnx2x
*bp
, struct bnx2x_virtf
*vf
)
1076 struct pci_dev
*dev
= bp
->pdev
;
1077 struct bnx2x_sriov
*iov
= &bp
->vfdb
->sriov
;
1079 for (i
= 0, n
= 0; i
< PCI_SRIOV_NUM_BARS
; i
+= 2, n
++) {
1080 u64 start
= pci_resource_start(dev
, PCI_IOV_RESOURCES
+ i
);
1081 u32 size
= pci_resource_len(dev
, PCI_IOV_RESOURCES
+ i
);
1084 vf
->bars
[n
].bar
= start
+ size
* vf
->abs_vfid
;
1085 vf
->bars
[n
].size
= size
;
1089 static int bnx2x_ari_enabled(struct pci_dev
*dev
)
1091 return dev
->bus
->self
&& dev
->bus
->self
->ari_enabled
;
1095 bnx2x_get_vf_igu_cam_info(struct bnx2x
*bp
)
1099 u8 fid
, current_pf
= 0;
1101 /* IGU in normal mode - read CAM */
1102 for (sb_id
= 0; sb_id
< IGU_REG_MAPPING_MEMORY_SIZE
; sb_id
++) {
1103 val
= REG_RD(bp
, IGU_REG_MAPPING_MEMORY
+ sb_id
* 4);
1104 if (!(val
& IGU_REG_MAPPING_MEMORY_VALID
))
1106 fid
= GET_FIELD((val
), IGU_REG_MAPPING_MEMORY_FID
);
1107 if (fid
& IGU_FID_ENCODE_IS_PF
)
1108 current_pf
= fid
& IGU_FID_PF_NUM_MASK
;
1109 else if (current_pf
== BP_FUNC(bp
))
1110 bnx2x_vf_set_igu_info(bp
, sb_id
,
1111 (fid
& IGU_FID_VF_NUM_MASK
));
1112 DP(BNX2X_MSG_IOV
, "%s[%d], igu_sb_id=%d, msix=%d\n",
1113 ((fid
& IGU_FID_ENCODE_IS_PF
) ? "PF" : "VF"),
1114 ((fid
& IGU_FID_ENCODE_IS_PF
) ? (fid
& IGU_FID_PF_NUM_MASK
) :
1115 (fid
& IGU_FID_VF_NUM_MASK
)), sb_id
,
1116 GET_FIELD((val
), IGU_REG_MAPPING_MEMORY_VECTOR
));
1118 DP(BNX2X_MSG_IOV
, "vf_sbs_pool is %d\n", BP_VFDB(bp
)->vf_sbs_pool
);
1121 static void __bnx2x_iov_free_vfdb(struct bnx2x
*bp
)
1124 kfree(bp
->vfdb
->vfqs
);
1125 kfree(bp
->vfdb
->vfs
);
1131 static int bnx2x_sriov_pci_cfg_info(struct bnx2x
*bp
, struct bnx2x_sriov
*iov
)
1134 struct pci_dev
*dev
= bp
->pdev
;
1136 pos
= pci_find_ext_capability(dev
, PCI_EXT_CAP_ID_SRIOV
);
1138 BNX2X_ERR("failed to find SRIOV capability in device\n");
1143 DP(BNX2X_MSG_IOV
, "sriov ext pos %d\n", pos
);
1144 pci_read_config_word(dev
, pos
+ PCI_SRIOV_CTRL
, &iov
->ctrl
);
1145 pci_read_config_word(dev
, pos
+ PCI_SRIOV_TOTAL_VF
, &iov
->total
);
1146 pci_read_config_word(dev
, pos
+ PCI_SRIOV_INITIAL_VF
, &iov
->initial
);
1147 pci_read_config_word(dev
, pos
+ PCI_SRIOV_VF_OFFSET
, &iov
->offset
);
1148 pci_read_config_word(dev
, pos
+ PCI_SRIOV_VF_STRIDE
, &iov
->stride
);
1149 pci_read_config_dword(dev
, pos
+ PCI_SRIOV_SUP_PGSIZE
, &iov
->pgsz
);
1150 pci_read_config_dword(dev
, pos
+ PCI_SRIOV_CAP
, &iov
->cap
);
1151 pci_read_config_byte(dev
, pos
+ PCI_SRIOV_FUNC_LINK
, &iov
->link
);
1156 static int bnx2x_sriov_info(struct bnx2x
*bp
, struct bnx2x_sriov
*iov
)
1160 /* read the SRIOV capability structure
1161 * The fields can be read via configuration read or
1162 * directly from the device (starting at offset PCICFG_OFFSET)
1164 if (bnx2x_sriov_pci_cfg_info(bp
, iov
))
1167 /* get the number of SRIOV bars */
1170 /* read the first_vfid */
1171 val
= REG_RD(bp
, PCICFG_OFFSET
+ GRC_CONFIG_REG_PF_INIT_VF
);
1172 iov
->first_vf_in_pf
= ((val
& GRC_CR_PF_INIT_VF_PF_FIRST_VF_NUM_MASK
)
1173 * 8) - (BNX2X_MAX_NUM_OF_VFS
* BP_PATH(bp
));
1176 "IOV info[%d]: first vf %d, nres %d, cap 0x%x, ctrl 0x%x, total %d, initial %d, num vfs %d, offset %d, stride %d, page size 0x%x\n",
1178 iov
->first_vf_in_pf
, iov
->nres
, iov
->cap
, iov
->ctrl
, iov
->total
,
1179 iov
->initial
, iov
->nr_virtfn
, iov
->offset
, iov
->stride
, iov
->pgsz
);
1184 /* must be called after PF bars are mapped */
1185 int bnx2x_iov_init_one(struct bnx2x
*bp
, int int_mode_param
,
1189 struct bnx2x_sriov
*iov
;
1190 struct pci_dev
*dev
= bp
->pdev
;
1198 /* verify sriov capability is present in configuration space */
1199 if (!pci_find_ext_capability(dev
, PCI_EXT_CAP_ID_SRIOV
))
1202 /* verify chip revision */
1203 if (CHIP_IS_E1x(bp
))
1206 /* check if SRIOV support is turned off */
1210 /* SRIOV assumes that num of PF CIDs < BNX2X_FIRST_VF_CID */
1211 if (BNX2X_L2_MAX_CID(bp
) >= BNX2X_FIRST_VF_CID
) {
1212 BNX2X_ERR("PF cids %d are overspilling into vf space (starts at %d). Abort SRIOV\n",
1213 BNX2X_L2_MAX_CID(bp
), BNX2X_FIRST_VF_CID
);
1217 /* SRIOV can be enabled only with MSIX */
1218 if (int_mode_param
== BNX2X_INT_MODE_MSI
||
1219 int_mode_param
== BNX2X_INT_MODE_INTX
) {
1220 BNX2X_ERR("Forced MSI/INTx mode is incompatible with SRIOV\n");
1225 /* verify ari is enabled */
1226 if (!bnx2x_ari_enabled(bp
->pdev
)) {
1227 BNX2X_ERR("ARI not supported (check pci bridge ARI forwarding), SRIOV can not be enabled\n");
1231 /* verify igu is in normal mode */
1232 if (CHIP_INT_MODE_IS_BC(bp
)) {
1233 BNX2X_ERR("IGU not normal mode, SRIOV can not be enabled\n");
1237 /* allocate the vfs database */
1238 bp
->vfdb
= kzalloc(sizeof(*(bp
->vfdb
)), GFP_KERNEL
);
1240 BNX2X_ERR("failed to allocate vf database\n");
1245 /* get the sriov info - Linux already collected all the pertinent
1246 * information, however the sriov structure is for the private use
1247 * of the pci module. Also we want this information regardless
1248 * of the hyper-visor.
1250 iov
= &(bp
->vfdb
->sriov
);
1251 err
= bnx2x_sriov_info(bp
, iov
);
1255 /* SR-IOV capability was enabled but there are no VFs*/
1256 if (iov
->total
== 0)
1259 iov
->nr_virtfn
= min_t(u16
, iov
->total
, num_vfs_param
);
1261 DP(BNX2X_MSG_IOV
, "num_vfs_param was %d, nr_virtfn was %d\n",
1262 num_vfs_param
, iov
->nr_virtfn
);
1264 /* allocate the vf array */
1265 bp
->vfdb
->vfs
= kzalloc(sizeof(struct bnx2x_virtf
) *
1266 BNX2X_NR_VIRTFN(bp
), GFP_KERNEL
);
1267 if (!bp
->vfdb
->vfs
) {
1268 BNX2X_ERR("failed to allocate vf array\n");
1273 /* Initial VF init - index and abs_vfid - nr_virtfn must be set */
1274 for_each_vf(bp
, i
) {
1275 bnx2x_vf(bp
, i
, index
) = i
;
1276 bnx2x_vf(bp
, i
, abs_vfid
) = iov
->first_vf_in_pf
+ i
;
1277 bnx2x_vf(bp
, i
, state
) = VF_FREE
;
1278 mutex_init(&bnx2x_vf(bp
, i
, op_mutex
));
1279 bnx2x_vf(bp
, i
, op_current
) = CHANNEL_TLV_NONE
;
1282 /* re-read the IGU CAM for VFs - index and abs_vfid must be set */
1283 bnx2x_get_vf_igu_cam_info(bp
);
1285 /* allocate the queue arrays for all VFs */
1286 bp
->vfdb
->vfqs
= kzalloc(
1287 BNX2X_MAX_NUM_VF_QUEUES
* sizeof(struct bnx2x_vf_queue
),
1290 DP(BNX2X_MSG_IOV
, "bp->vfdb->vfqs was %p\n", bp
->vfdb
->vfqs
);
1292 if (!bp
->vfdb
->vfqs
) {
1293 BNX2X_ERR("failed to allocate vf queue array\n");
1298 /* Prepare the VFs event synchronization mechanism */
1299 mutex_init(&bp
->vfdb
->event_mutex
);
1303 DP(BNX2X_MSG_IOV
, "Failed err=%d\n", err
);
1304 __bnx2x_iov_free_vfdb(bp
);
1308 void bnx2x_iov_remove_one(struct bnx2x
*bp
)
1312 /* if SRIOV is not enabled there's nothing to do */
1316 DP(BNX2X_MSG_IOV
, "about to call disable sriov\n");
1317 pci_disable_sriov(bp
->pdev
);
1318 DP(BNX2X_MSG_IOV
, "sriov disabled\n");
1320 /* disable access to all VFs */
1321 for (vf_idx
= 0; vf_idx
< bp
->vfdb
->sriov
.total
; vf_idx
++) {
1322 bnx2x_pretend_func(bp
,
1324 bp
->vfdb
->sriov
.first_vf_in_pf
+
1326 DP(BNX2X_MSG_IOV
, "disabling internal access for vf %d\n",
1327 bp
->vfdb
->sriov
.first_vf_in_pf
+ vf_idx
);
1328 bnx2x_vf_enable_internal(bp
, 0);
1329 bnx2x_pretend_func(bp
, BP_ABS_FUNC(bp
));
1332 /* free vf database */
1333 __bnx2x_iov_free_vfdb(bp
);
1336 void bnx2x_iov_free_mem(struct bnx2x
*bp
)
1343 /* free vfs hw contexts */
1344 for (i
= 0; i
< BNX2X_VF_CIDS
/ILT_PAGE_CIDS
; i
++) {
1345 struct hw_dma
*cxt
= &bp
->vfdb
->context
[i
];
1346 BNX2X_PCI_FREE(cxt
->addr
, cxt
->mapping
, cxt
->size
);
1349 BNX2X_PCI_FREE(BP_VFDB(bp
)->sp_dma
.addr
,
1350 BP_VFDB(bp
)->sp_dma
.mapping
,
1351 BP_VFDB(bp
)->sp_dma
.size
);
1353 BNX2X_PCI_FREE(BP_VF_MBX_DMA(bp
)->addr
,
1354 BP_VF_MBX_DMA(bp
)->mapping
,
1355 BP_VF_MBX_DMA(bp
)->size
);
1357 BNX2X_PCI_FREE(BP_VF_BULLETIN_DMA(bp
)->addr
,
1358 BP_VF_BULLETIN_DMA(bp
)->mapping
,
1359 BP_VF_BULLETIN_DMA(bp
)->size
);
1362 int bnx2x_iov_alloc_mem(struct bnx2x
*bp
)
1370 /* allocate vfs hw contexts */
1371 tot_size
= (BP_VFDB(bp
)->sriov
.first_vf_in_pf
+ BNX2X_NR_VIRTFN(bp
)) *
1372 BNX2X_CIDS_PER_VF
* sizeof(union cdu_context
);
1374 for (i
= 0; i
< BNX2X_VF_CIDS
/ILT_PAGE_CIDS
; i
++) {
1375 struct hw_dma
*cxt
= BP_VF_CXT_PAGE(bp
, i
);
1376 cxt
->size
= min_t(size_t, tot_size
, CDU_ILT_PAGE_SZ
);
1379 cxt
->addr
= BNX2X_PCI_ALLOC(&cxt
->mapping
, cxt
->size
);
1386 tot_size
-= cxt
->size
;
1389 /* allocate vfs ramrods dma memory - client_init and set_mac */
1390 tot_size
= BNX2X_NR_VIRTFN(bp
) * sizeof(struct bnx2x_vf_sp
);
1391 BP_VFDB(bp
)->sp_dma
.addr
= BNX2X_PCI_ALLOC(&BP_VFDB(bp
)->sp_dma
.mapping
,
1393 if (!BP_VFDB(bp
)->sp_dma
.addr
)
1395 BP_VFDB(bp
)->sp_dma
.size
= tot_size
;
1397 /* allocate mailboxes */
1398 tot_size
= BNX2X_NR_VIRTFN(bp
) * MBX_MSG_ALIGNED_SIZE
;
1399 BP_VF_MBX_DMA(bp
)->addr
= BNX2X_PCI_ALLOC(&BP_VF_MBX_DMA(bp
)->mapping
,
1401 if (!BP_VF_MBX_DMA(bp
)->addr
)
1404 BP_VF_MBX_DMA(bp
)->size
= tot_size
;
1406 /* allocate local bulletin boards */
1407 tot_size
= BNX2X_NR_VIRTFN(bp
) * BULLETIN_CONTENT_SIZE
;
1408 BP_VF_BULLETIN_DMA(bp
)->addr
= BNX2X_PCI_ALLOC(&BP_VF_BULLETIN_DMA(bp
)->mapping
,
1410 if (!BP_VF_BULLETIN_DMA(bp
)->addr
)
1413 BP_VF_BULLETIN_DMA(bp
)->size
= tot_size
;
1421 static void bnx2x_vfq_init(struct bnx2x
*bp
, struct bnx2x_virtf
*vf
,
1422 struct bnx2x_vf_queue
*q
)
1424 u8 cl_id
= vfq_cl_id(vf
, q
);
1425 u8 func_id
= FW_VF_HANDLE(vf
->abs_vfid
);
1426 unsigned long q_type
= 0;
1428 set_bit(BNX2X_Q_TYPE_HAS_TX
, &q_type
);
1429 set_bit(BNX2X_Q_TYPE_HAS_RX
, &q_type
);
1431 /* Queue State object */
1432 bnx2x_init_queue_obj(bp
, &q
->sp_obj
,
1433 cl_id
, &q
->cid
, 1, func_id
,
1434 bnx2x_vf_sp(bp
, vf
, q_data
),
1435 bnx2x_vf_sp_map(bp
, vf
, q_data
),
1438 /* sp indication is set only when vlan/mac/etc. are initialized */
1439 q
->sp_initialized
= false;
1442 "initialized vf %d's queue object. func id set to %d. cid set to 0x%x\n",
1443 vf
->abs_vfid
, q
->sp_obj
.func_id
, q
->cid
);
1446 /* called by bnx2x_nic_load */
1447 int bnx2x_iov_nic_init(struct bnx2x
*bp
)
1451 if (!IS_SRIOV(bp
)) {
1452 DP(BNX2X_MSG_IOV
, "vfdb was not allocated\n");
1456 DP(BNX2X_MSG_IOV
, "num of vfs: %d\n", (bp
)->vfdb
->sriov
.nr_virtfn
);
1458 /* let FLR complete ... */
1461 /* initialize vf database */
1462 for_each_vf(bp
, vfid
) {
1463 struct bnx2x_virtf
*vf
= BP_VF(bp
, vfid
);
1465 int base_vf_cid
= (BP_VFDB(bp
)->sriov
.first_vf_in_pf
+ vfid
) *
1468 union cdu_context
*base_cxt
= (union cdu_context
*)
1469 BP_VF_CXT_PAGE(bp
, base_vf_cid
/ILT_PAGE_CIDS
)->addr
+
1470 (base_vf_cid
& (ILT_PAGE_CIDS
-1));
1473 "VF[%d] Max IGU SBs: %d, base vf cid 0x%x, base cid 0x%x, base cxt %p\n",
1474 vf
->abs_vfid
, vf_sb_count(vf
), base_vf_cid
,
1475 BNX2X_FIRST_VF_CID
+ base_vf_cid
, base_cxt
);
1477 /* init statically provisioned resources */
1478 bnx2x_iov_static_resc(bp
, vf
);
1480 /* queues are initialized during VF-ACQUIRE */
1482 /* reserve the vf vlan credit */
1483 bp
->vlans_pool
.get(&bp
->vlans_pool
, vf_vlan_rules_cnt(vf
));
1485 vf
->filter_state
= 0;
1486 vf
->sp_cl_id
= bnx2x_fp(bp
, 0, cl_id
);
1488 /* init mcast object - This object will be re-initialized
1489 * during VF-ACQUIRE with the proper cl_id and cid.
1490 * It needs to be initialized here so that it can be safely
1491 * handled by a subsequent FLR flow.
1493 vf
->mcast_list_len
= 0;
1494 bnx2x_init_mcast_obj(bp
, &vf
->mcast_obj
, 0xFF,
1496 bnx2x_vf_sp(bp
, vf
, mcast_rdata
),
1497 bnx2x_vf_sp_map(bp
, vf
, mcast_rdata
),
1498 BNX2X_FILTER_MCAST_PENDING
,
1500 BNX2X_OBJ_TYPE_RX_TX
);
1502 /* set the mailbox message addresses */
1503 BP_VF_MBX(bp
, vfid
)->msg
= (struct bnx2x_vf_mbx_msg
*)
1504 (((u8
*)BP_VF_MBX_DMA(bp
)->addr
) + vfid
*
1505 MBX_MSG_ALIGNED_SIZE
);
1507 BP_VF_MBX(bp
, vfid
)->msg_mapping
= BP_VF_MBX_DMA(bp
)->mapping
+
1508 vfid
* MBX_MSG_ALIGNED_SIZE
;
1510 /* Enable vf mailbox */
1511 bnx2x_vf_enable_mbx(bp
, vf
->abs_vfid
);
1515 for_each_vf(bp
, vfid
) {
1516 struct bnx2x_virtf
*vf
= BP_VF(bp
, vfid
);
1518 /* fill in the BDF and bars */
1519 vf
->bus
= bnx2x_vf_bus(bp
, vfid
);
1520 vf
->devfn
= bnx2x_vf_devfn(bp
, vfid
);
1521 bnx2x_vf_set_bars(bp
, vf
);
1524 "VF info[%d]: bus 0x%x, devfn 0x%x, bar0 [0x%x, %d], bar1 [0x%x, %d], bar2 [0x%x, %d]\n",
1525 vf
->abs_vfid
, vf
->bus
, vf
->devfn
,
1526 (unsigned)vf
->bars
[0].bar
, vf
->bars
[0].size
,
1527 (unsigned)vf
->bars
[1].bar
, vf
->bars
[1].size
,
1528 (unsigned)vf
->bars
[2].bar
, vf
->bars
[2].size
);
1534 /* called by bnx2x_chip_cleanup */
1535 int bnx2x_iov_chip_cleanup(struct bnx2x
*bp
)
1542 /* release all the VFs */
1544 bnx2x_vf_release(bp
, BP_VF(bp
, i
));
1549 /* called by bnx2x_init_hw_func, returns the next ilt line */
1550 int bnx2x_iov_init_ilt(struct bnx2x
*bp
, u16 line
)
1553 struct bnx2x_ilt
*ilt
= BP_ILT(bp
);
1558 /* set vfs ilt lines */
1559 for (i
= 0; i
< BNX2X_VF_CIDS
/ILT_PAGE_CIDS
; i
++) {
1560 struct hw_dma
*hw_cxt
= BP_VF_CXT_PAGE(bp
, i
);
1562 ilt
->lines
[line
+i
].page
= hw_cxt
->addr
;
1563 ilt
->lines
[line
+i
].page_mapping
= hw_cxt
->mapping
;
1564 ilt
->lines
[line
+i
].size
= hw_cxt
->size
; /* doesn't matter */
1569 static u8
bnx2x_iov_is_vf_cid(struct bnx2x
*bp
, u16 cid
)
1571 return ((cid
>= BNX2X_FIRST_VF_CID
) &&
1572 ((cid
- BNX2X_FIRST_VF_CID
) < BNX2X_VF_CIDS
));
1576 void bnx2x_vf_handle_classification_eqe(struct bnx2x
*bp
,
1577 struct bnx2x_vf_queue
*vfq
,
1578 union event_ring_elem
*elem
)
1580 unsigned long ramrod_flags
= 0;
1583 /* Always push next commands out, don't wait here */
1584 set_bit(RAMROD_CONT
, &ramrod_flags
);
1586 switch (elem
->message
.data
.eth_event
.echo
>> BNX2X_SWCID_SHIFT
) {
1587 case BNX2X_FILTER_MAC_PENDING
:
1588 rc
= vfq
->mac_obj
.complete(bp
, &vfq
->mac_obj
, elem
,
1591 case BNX2X_FILTER_VLAN_PENDING
:
1592 rc
= vfq
->vlan_obj
.complete(bp
, &vfq
->vlan_obj
, elem
,
1596 BNX2X_ERR("Unsupported classification command: %d\n",
1597 elem
->message
.data
.eth_event
.echo
);
1601 BNX2X_ERR("Failed to schedule new commands: %d\n", rc
);
1603 DP(BNX2X_MSG_IOV
, "Scheduled next pending commands...\n");
1607 void bnx2x_vf_handle_mcast_eqe(struct bnx2x
*bp
,
1608 struct bnx2x_virtf
*vf
)
1610 struct bnx2x_mcast_ramrod_params rparam
= {NULL
};
1613 rparam
.mcast_obj
= &vf
->mcast_obj
;
1614 vf
->mcast_obj
.raw
.clear_pending(&vf
->mcast_obj
.raw
);
1616 /* If there are pending mcast commands - send them */
1617 if (vf
->mcast_obj
.check_pending(&vf
->mcast_obj
)) {
1618 rc
= bnx2x_config_mcast(bp
, &rparam
, BNX2X_MCAST_CMD_CONT
);
1620 BNX2X_ERR("Failed to send pending mcast commands: %d\n",
1626 void bnx2x_vf_handle_filters_eqe(struct bnx2x
*bp
,
1627 struct bnx2x_virtf
*vf
)
1629 smp_mb__before_clear_bit();
1630 clear_bit(BNX2X_FILTER_RX_MODE_PENDING
, &vf
->filter_state
);
1631 smp_mb__after_clear_bit();
1634 static void bnx2x_vf_handle_rss_update_eqe(struct bnx2x
*bp
,
1635 struct bnx2x_virtf
*vf
)
1637 vf
->rss_conf_obj
.raw
.clear_pending(&vf
->rss_conf_obj
.raw
);
1640 int bnx2x_iov_eq_sp_event(struct bnx2x
*bp
, union event_ring_elem
*elem
)
1642 struct bnx2x_virtf
*vf
;
1643 int qidx
= 0, abs_vfid
;
1650 /* first get the cid - the only events we handle here are cfc-delete
1651 * and set-mac completion
1653 opcode
= elem
->message
.opcode
;
1656 case EVENT_RING_OPCODE_CFC_DEL
:
1657 cid
= SW_CID((__force __le32
)
1658 elem
->message
.data
.cfc_del_event
.cid
);
1659 DP(BNX2X_MSG_IOV
, "checking cfc-del comp cid=%d\n", cid
);
1661 case EVENT_RING_OPCODE_CLASSIFICATION_RULES
:
1662 case EVENT_RING_OPCODE_MULTICAST_RULES
:
1663 case EVENT_RING_OPCODE_FILTERS_RULES
:
1664 case EVENT_RING_OPCODE_RSS_UPDATE_RULES
:
1665 cid
= (elem
->message
.data
.eth_event
.echo
&
1667 DP(BNX2X_MSG_IOV
, "checking filtering comp cid=%d\n", cid
);
1669 case EVENT_RING_OPCODE_VF_FLR
:
1670 abs_vfid
= elem
->message
.data
.vf_flr_event
.vf_id
;
1671 DP(BNX2X_MSG_IOV
, "Got VF FLR notification abs_vfid=%d\n",
1674 case EVENT_RING_OPCODE_MALICIOUS_VF
:
1675 abs_vfid
= elem
->message
.data
.malicious_vf_event
.vf_id
;
1676 BNX2X_ERR("Got VF MALICIOUS notification abs_vfid=%d err_id=0x%x\n",
1678 elem
->message
.data
.malicious_vf_event
.err_id
);
1684 /* check if the cid is the VF range */
1685 if (!bnx2x_iov_is_vf_cid(bp
, cid
)) {
1686 DP(BNX2X_MSG_IOV
, "cid is outside vf range: %d\n", cid
);
1690 /* extract vf and rxq index from vf_cid - relies on the following:
1691 * 1. vfid on cid reflects the true abs_vfid
1692 * 2. The max number of VFs (per path) is 64
1694 qidx
= cid
& ((1 << BNX2X_VF_CID_WND
)-1);
1695 abs_vfid
= (cid
>> BNX2X_VF_CID_WND
) & (BNX2X_MAX_NUM_OF_VFS
-1);
1697 vf
= bnx2x_vf_by_abs_fid(bp
, abs_vfid
);
1700 BNX2X_ERR("EQ completion for unknown VF, cid %d, abs_vfid %d\n",
1706 case EVENT_RING_OPCODE_CFC_DEL
:
1707 DP(BNX2X_MSG_IOV
, "got VF [%d:%d] cfc delete ramrod\n",
1708 vf
->abs_vfid
, qidx
);
1709 vfq_get(vf
, qidx
)->sp_obj
.complete_cmd(bp
,
1712 BNX2X_Q_CMD_CFC_DEL
);
1714 case EVENT_RING_OPCODE_CLASSIFICATION_RULES
:
1715 DP(BNX2X_MSG_IOV
, "got VF [%d:%d] set mac/vlan ramrod\n",
1716 vf
->abs_vfid
, qidx
);
1717 bnx2x_vf_handle_classification_eqe(bp
, vfq_get(vf
, qidx
), elem
);
1719 case EVENT_RING_OPCODE_MULTICAST_RULES
:
1720 DP(BNX2X_MSG_IOV
, "got VF [%d:%d] set mcast ramrod\n",
1721 vf
->abs_vfid
, qidx
);
1722 bnx2x_vf_handle_mcast_eqe(bp
, vf
);
1724 case EVENT_RING_OPCODE_FILTERS_RULES
:
1725 DP(BNX2X_MSG_IOV
, "got VF [%d:%d] set rx-mode ramrod\n",
1726 vf
->abs_vfid
, qidx
);
1727 bnx2x_vf_handle_filters_eqe(bp
, vf
);
1729 case EVENT_RING_OPCODE_RSS_UPDATE_RULES
:
1730 DP(BNX2X_MSG_IOV
, "got VF [%d:%d] RSS update ramrod\n",
1731 vf
->abs_vfid
, qidx
);
1732 bnx2x_vf_handle_rss_update_eqe(bp
, vf
);
1733 case EVENT_RING_OPCODE_VF_FLR
:
1734 case EVENT_RING_OPCODE_MALICIOUS_VF
:
1735 /* Do nothing for now */
1742 static struct bnx2x_virtf
*bnx2x_vf_by_cid(struct bnx2x
*bp
, int vf_cid
)
1744 /* extract the vf from vf_cid - relies on the following:
1745 * 1. vfid on cid reflects the true abs_vfid
1746 * 2. The max number of VFs (per path) is 64
1748 int abs_vfid
= (vf_cid
>> BNX2X_VF_CID_WND
) & (BNX2X_MAX_NUM_OF_VFS
-1);
1749 return bnx2x_vf_by_abs_fid(bp
, abs_vfid
);
1752 void bnx2x_iov_set_queue_sp_obj(struct bnx2x
*bp
, int vf_cid
,
1753 struct bnx2x_queue_sp_obj
**q_obj
)
1755 struct bnx2x_virtf
*vf
;
1760 vf
= bnx2x_vf_by_cid(bp
, vf_cid
);
1763 /* extract queue index from vf_cid - relies on the following:
1764 * 1. vfid on cid reflects the true abs_vfid
1765 * 2. The max number of VFs (per path) is 64
1767 int q_index
= vf_cid
& ((1 << BNX2X_VF_CID_WND
)-1);
1768 *q_obj
= &bnx2x_vfq(vf
, q_index
, sp_obj
);
1770 BNX2X_ERR("No vf matching cid %d\n", vf_cid
);
1774 void bnx2x_iov_adjust_stats_req(struct bnx2x
*bp
)
1777 int first_queue_query_index
, num_queues_req
;
1778 dma_addr_t cur_data_offset
;
1779 struct stats_query_entry
*cur_query_entry
;
1781 bool is_fcoe
= false;
1789 /* fcoe adds one global request and one queue request */
1790 num_queues_req
= BNX2X_NUM_ETH_QUEUES(bp
) + is_fcoe
;
1791 first_queue_query_index
= BNX2X_FIRST_QUEUE_QUERY_IDX
-
1794 DP_AND((BNX2X_MSG_IOV
| BNX2X_MSG_STATS
),
1795 "BNX2X_NUM_ETH_QUEUES %d, is_fcoe %d, first_queue_query_index %d => determined the last non virtual statistics query index is %d. Will add queries on top of that\n",
1796 BNX2X_NUM_ETH_QUEUES(bp
), is_fcoe
, first_queue_query_index
,
1797 first_queue_query_index
+ num_queues_req
);
1799 cur_data_offset
= bp
->fw_stats_data_mapping
+
1800 offsetof(struct bnx2x_fw_stats_data
, queue_stats
) +
1801 num_queues_req
* sizeof(struct per_queue_stats
);
1803 cur_query_entry
= &bp
->fw_stats_req
->
1804 query
[first_queue_query_index
+ num_queues_req
];
1806 for_each_vf(bp
, i
) {
1808 struct bnx2x_virtf
*vf
= BP_VF(bp
, i
);
1810 if (vf
->state
!= VF_ENABLED
) {
1811 DP_AND((BNX2X_MSG_IOV
| BNX2X_MSG_STATS
),
1812 "vf %d not enabled so no stats for it\n",
1817 DP(BNX2X_MSG_IOV
, "add addresses for vf %d\n", vf
->abs_vfid
);
1818 for_each_vfq(vf
, j
) {
1819 struct bnx2x_vf_queue
*rxq
= vfq_get(vf
, j
);
1821 dma_addr_t q_stats_addr
=
1822 vf
->fw_stat_map
+ j
* vf
->stats_stride
;
1824 /* collect stats fro active queues only */
1825 if (bnx2x_get_q_logical_state(bp
, &rxq
->sp_obj
) ==
1826 BNX2X_Q_LOGICAL_STATE_STOPPED
)
1829 /* create stats query entry for this queue */
1830 cur_query_entry
->kind
= STATS_TYPE_QUEUE
;
1831 cur_query_entry
->index
= vfq_stat_id(vf
, rxq
);
1832 cur_query_entry
->funcID
=
1833 cpu_to_le16(FW_VF_HANDLE(vf
->abs_vfid
));
1834 cur_query_entry
->address
.hi
=
1835 cpu_to_le32(U64_HI(q_stats_addr
));
1836 cur_query_entry
->address
.lo
=
1837 cpu_to_le32(U64_LO(q_stats_addr
));
1839 "added address %x %x for vf %d queue %d client %d\n",
1840 cur_query_entry
->address
.hi
,
1841 cur_query_entry
->address
.lo
, cur_query_entry
->funcID
,
1842 j
, cur_query_entry
->index
);
1844 cur_data_offset
+= sizeof(struct per_queue_stats
);
1847 /* all stats are coalesced to the leading queue */
1848 if (vf
->cfg_flags
& VF_CFG_STATS_COALESCE
)
1852 bp
->fw_stats_req
->hdr
.cmd_num
= bp
->fw_stats_num
+ stats_count
;
1856 struct bnx2x_virtf
*__vf_from_stat_id(struct bnx2x
*bp
, u8 stat_id
)
1859 struct bnx2x_virtf
*vf
= NULL
;
1861 for_each_vf(bp
, i
) {
1863 if (stat_id
>= vf
->igu_base_id
&&
1864 stat_id
< vf
->igu_base_id
+ vf_sb_count(vf
))
1870 /* VF API helpers */
1871 static void bnx2x_vf_qtbl_set_q(struct bnx2x
*bp
, u8 abs_vfid
, u8 qid
,
1874 u32 reg
= PXP_REG_HST_ZONE_PERMISSION_TABLE
+ qid
* 4;
1875 u32 val
= enable
? (abs_vfid
| (1 << 6)) : 0;
1877 REG_WR(bp
, reg
, val
);
1880 static void bnx2x_vf_clr_qtbl(struct bnx2x
*bp
, struct bnx2x_virtf
*vf
)
1885 bnx2x_vf_qtbl_set_q(bp
, vf
->abs_vfid
,
1886 vfq_qzone_id(vf
, vfq_get(vf
, i
)), false);
1889 static void bnx2x_vf_igu_disable(struct bnx2x
*bp
, struct bnx2x_virtf
*vf
)
1893 /* clear the VF configuration - pretend */
1894 bnx2x_pretend_func(bp
, HW_VF_HANDLE(bp
, vf
->abs_vfid
));
1895 val
= REG_RD(bp
, IGU_REG_VF_CONFIGURATION
);
1896 val
&= ~(IGU_VF_CONF_MSI_MSIX_EN
| IGU_VF_CONF_SINGLE_ISR_EN
|
1897 IGU_VF_CONF_FUNC_EN
| IGU_VF_CONF_PARENT_MASK
);
1898 REG_WR(bp
, IGU_REG_VF_CONFIGURATION
, val
);
1899 bnx2x_pretend_func(bp
, BP_ABS_FUNC(bp
));
1902 u8
bnx2x_vf_max_queue_cnt(struct bnx2x
*bp
, struct bnx2x_virtf
*vf
)
1904 return min_t(u8
, min_t(u8
, vf_sb_count(vf
), BNX2X_CIDS_PER_VF
),
1905 BNX2X_VF_MAX_QUEUES
);
1909 int bnx2x_vf_chk_avail_resc(struct bnx2x
*bp
, struct bnx2x_virtf
*vf
,
1910 struct vf_pf_resc_request
*req_resc
)
1912 u8 rxq_cnt
= vf_rxq_count(vf
) ? : bnx2x_vf_max_queue_cnt(bp
, vf
);
1913 u8 txq_cnt
= vf_txq_count(vf
) ? : bnx2x_vf_max_queue_cnt(bp
, vf
);
1915 return ((req_resc
->num_rxqs
<= rxq_cnt
) &&
1916 (req_resc
->num_txqs
<= txq_cnt
) &&
1917 (req_resc
->num_sbs
<= vf_sb_count(vf
)) &&
1918 (req_resc
->num_mac_filters
<= vf_mac_rules_cnt(vf
)) &&
1919 (req_resc
->num_vlan_filters
<= vf_vlan_rules_cnt(vf
)));
1923 int bnx2x_vf_acquire(struct bnx2x
*bp
, struct bnx2x_virtf
*vf
,
1924 struct vf_pf_resc_request
*resc
)
1926 int base_vf_cid
= (BP_VFDB(bp
)->sriov
.first_vf_in_pf
+ vf
->index
) *
1929 union cdu_context
*base_cxt
= (union cdu_context
*)
1930 BP_VF_CXT_PAGE(bp
, base_vf_cid
/ILT_PAGE_CIDS
)->addr
+
1931 (base_vf_cid
& (ILT_PAGE_CIDS
-1));
1934 /* if state is 'acquired' the VF was not released or FLR'd, in
1935 * this case the returned resources match the acquired already
1936 * acquired resources. Verify that the requested numbers do
1937 * not exceed the already acquired numbers.
1939 if (vf
->state
== VF_ACQUIRED
) {
1940 DP(BNX2X_MSG_IOV
, "VF[%d] Trying to re-acquire resources (VF was not released or FLR'd)\n",
1943 if (!bnx2x_vf_chk_avail_resc(bp
, vf
, resc
)) {
1944 BNX2X_ERR("VF[%d] When re-acquiring resources, requested numbers must be <= then previously acquired numbers\n",
1951 /* Otherwise vf state must be 'free' or 'reset' */
1952 if (vf
->state
!= VF_FREE
&& vf
->state
!= VF_RESET
) {
1953 BNX2X_ERR("VF[%d] Can not acquire a VF with state %d\n",
1954 vf
->abs_vfid
, vf
->state
);
1958 /* static allocation:
1959 * the global maximum number are fixed per VF. Fail the request if
1960 * requested number exceed these globals
1962 if (!bnx2x_vf_chk_avail_resc(bp
, vf
, resc
)) {
1964 "cannot fulfill vf resource request. Placing maximal available values in response\n");
1965 /* set the max resource in the vf */
1969 /* Set resources counters - 0 request means max available */
1970 vf_sb_count(vf
) = resc
->num_sbs
;
1971 vf_rxq_count(vf
) = resc
->num_rxqs
? : bnx2x_vf_max_queue_cnt(bp
, vf
);
1972 vf_txq_count(vf
) = resc
->num_txqs
? : bnx2x_vf_max_queue_cnt(bp
, vf
);
1973 if (resc
->num_mac_filters
)
1974 vf_mac_rules_cnt(vf
) = resc
->num_mac_filters
;
1975 if (resc
->num_vlan_filters
)
1976 vf_vlan_rules_cnt(vf
) = resc
->num_vlan_filters
;
1979 "Fulfilling vf request: sb count %d, tx_count %d, rx_count %d, mac_rules_count %d, vlan_rules_count %d\n",
1980 vf_sb_count(vf
), vf_rxq_count(vf
),
1981 vf_txq_count(vf
), vf_mac_rules_cnt(vf
),
1982 vf_vlan_rules_cnt(vf
));
1984 /* Initialize the queues */
1986 DP(BNX2X_MSG_IOV
, "vf->vfqs was not allocated\n");
1990 for_each_vfq(vf
, i
) {
1991 struct bnx2x_vf_queue
*q
= vfq_get(vf
, i
);
1994 BNX2X_ERR("q number %d was not allocated\n", i
);
1999 q
->cxt
= &((base_cxt
+ i
)->eth
);
2000 q
->cid
= BNX2X_FIRST_VF_CID
+ base_vf_cid
+ i
;
2002 DP(BNX2X_MSG_IOV
, "VFQ[%d:%d]: index %d, cid 0x%x, cxt %p\n",
2003 vf
->abs_vfid
, i
, q
->index
, q
->cid
, q
->cxt
);
2005 /* init SP objects */
2006 bnx2x_vfq_init(bp
, vf
, q
);
2008 vf
->state
= VF_ACQUIRED
;
2012 int bnx2x_vf_init(struct bnx2x
*bp
, struct bnx2x_virtf
*vf
, dma_addr_t
*sb_map
)
2014 struct bnx2x_func_init_params func_init
= {0};
2018 /* the sb resources are initialized at this point, do the
2019 * FW/HW initializations
2021 for_each_vf_sb(vf
, i
)
2022 bnx2x_init_sb(bp
, (dma_addr_t
)sb_map
[i
], vf
->abs_vfid
, true,
2023 vf_igu_sb(vf
, i
), vf_igu_sb(vf
, i
));
2026 if (vf
->state
!= VF_ACQUIRED
) {
2027 DP(BNX2X_MSG_IOV
, "VF[%d] is not in VF_ACQUIRED, but %d\n",
2028 vf
->abs_vfid
, vf
->state
);
2032 /* let FLR complete ... */
2035 /* FLR cleanup epilogue */
2036 if (bnx2x_vf_flr_clnup_epilog(bp
, vf
->abs_vfid
))
2039 /* reset IGU VF statistics: MSIX */
2040 REG_WR(bp
, IGU_REG_STATISTIC_NUM_MESSAGE_SENT
+ vf
->abs_vfid
* 4 , 0);
2043 if (vf
->cfg_flags
& VF_CFG_STATS
)
2044 flags
|= (FUNC_FLG_STATS
| FUNC_FLG_SPQ
);
2046 if (vf
->cfg_flags
& VF_CFG_TPA
)
2047 flags
|= FUNC_FLG_TPA
;
2049 if (is_vf_multi(vf
))
2050 flags
|= FUNC_FLG_RSS
;
2052 /* function setup */
2053 func_init
.func_flgs
= flags
;
2054 func_init
.pf_id
= BP_FUNC(bp
);
2055 func_init
.func_id
= FW_VF_HANDLE(vf
->abs_vfid
);
2056 func_init
.fw_stat_map
= vf
->fw_stat_map
;
2057 func_init
.spq_map
= vf
->spq_map
;
2058 func_init
.spq_prod
= 0;
2059 bnx2x_func_init(bp
, &func_init
);
2062 bnx2x_vf_enable_access(bp
, vf
->abs_vfid
);
2063 bnx2x_vf_enable_traffic(bp
, vf
);
2065 /* queue protection table */
2067 bnx2x_vf_qtbl_set_q(bp
, vf
->abs_vfid
,
2068 vfq_qzone_id(vf
, vfq_get(vf
, i
)), true);
2070 vf
->state
= VF_ENABLED
;
2072 /* update vf bulletin board */
2073 bnx2x_post_vf_bulletin(bp
, vf
->index
);
2078 struct set_vf_state_cookie
{
2079 struct bnx2x_virtf
*vf
;
2083 static void bnx2x_set_vf_state(void *cookie
)
2085 struct set_vf_state_cookie
*p
= (struct set_vf_state_cookie
*)cookie
;
2087 p
->vf
->state
= p
->state
;
2090 int bnx2x_vf_close(struct bnx2x
*bp
, struct bnx2x_virtf
*vf
)
2094 DP(BNX2X_MSG_IOV
, "vf[%d]\n", vf
->abs_vfid
);
2096 /* Close all queues */
2097 for (i
= 0; i
< vf_rxq_count(vf
); i
++) {
2098 rc
= bnx2x_vf_queue_teardown(bp
, vf
, i
);
2103 /* disable the interrupts */
2104 DP(BNX2X_MSG_IOV
, "disabling igu\n");
2105 bnx2x_vf_igu_disable(bp
, vf
);
2107 /* disable the VF */
2108 DP(BNX2X_MSG_IOV
, "clearing qtbl\n");
2109 bnx2x_vf_clr_qtbl(bp
, vf
);
2111 /* need to make sure there are no outstanding stats ramrods which may
2112 * cause the device to access the VF's stats buffer which it will free
2113 * as soon as we return from the close flow.
2116 struct set_vf_state_cookie cookie
;
2119 cookie
.state
= VF_ACQUIRED
;
2120 bnx2x_stats_safe_exec(bp
, bnx2x_set_vf_state
, &cookie
);
2123 DP(BNX2X_MSG_IOV
, "set state to acquired\n");
2127 BNX2X_ERR("vf[%d] CLOSE error: rc %d\n", vf
->abs_vfid
, rc
);
2131 /* VF release can be called either: 1. The VF was acquired but
2132 * not enabled 2. the vf was enabled or in the process of being
2135 int bnx2x_vf_free(struct bnx2x
*bp
, struct bnx2x_virtf
*vf
)
2139 DP(BNX2X_MSG_IOV
, "VF[%d] STATE: %s\n", vf
->abs_vfid
,
2140 vf
->state
== VF_FREE
? "Free" :
2141 vf
->state
== VF_ACQUIRED
? "Acquired" :
2142 vf
->state
== VF_ENABLED
? "Enabled" :
2143 vf
->state
== VF_RESET
? "Reset" :
2146 switch (vf
->state
) {
2148 rc
= bnx2x_vf_close(bp
, vf
);
2151 /* Fallthrough to release resources */
2153 DP(BNX2X_MSG_IOV
, "about to free resources\n");
2154 bnx2x_vf_free_resc(bp
, vf
);
2164 BNX2X_ERR("VF[%d] RELEASE error: rc %d\n", vf
->abs_vfid
, rc
);
2168 int bnx2x_vf_rss_update(struct bnx2x
*bp
, struct bnx2x_virtf
*vf
,
2169 struct bnx2x_config_rss_params
*rss
)
2171 DP(BNX2X_MSG_IOV
, "vf[%d]\n", vf
->abs_vfid
);
2172 set_bit(RAMROD_COMP_WAIT
, &rss
->ramrod_flags
);
2173 return bnx2x_config_rss(bp
, rss
);
2176 int bnx2x_vf_tpa_update(struct bnx2x
*bp
, struct bnx2x_virtf
*vf
,
2177 struct vfpf_tpa_tlv
*tlv
,
2178 struct bnx2x_queue_update_tpa_params
*params
)
2180 aligned_u64
*sge_addr
= tlv
->tpa_client_info
.sge_addr
;
2181 struct bnx2x_queue_state_params qstate
;
2184 DP(BNX2X_MSG_IOV
, "vf[%d]\n", vf
->abs_vfid
);
2186 /* Set ramrod params */
2187 memset(&qstate
, 0, sizeof(struct bnx2x_queue_state_params
));
2188 memcpy(&qstate
.params
.update_tpa
, params
,
2189 sizeof(struct bnx2x_queue_update_tpa_params
));
2190 qstate
.cmd
= BNX2X_Q_CMD_UPDATE_TPA
;
2191 set_bit(RAMROD_COMP_WAIT
, &qstate
.ramrod_flags
);
2193 for (qid
= 0; qid
< vf_rxq_count(vf
); qid
++) {
2194 qstate
.q_obj
= &bnx2x_vfq(vf
, qid
, sp_obj
);
2195 qstate
.params
.update_tpa
.sge_map
= sge_addr
[qid
];
2196 DP(BNX2X_MSG_IOV
, "sge_addr[%d:%d] %08x:%08x\n",
2197 vf
->abs_vfid
, qid
, U64_HI(sge_addr
[qid
]),
2198 U64_LO(sge_addr
[qid
]));
2199 rc
= bnx2x_queue_state_change(bp
, &qstate
);
2201 BNX2X_ERR("Failed to configure sge_addr %08x:%08x for [%d:%d]\n",
2202 U64_HI(sge_addr
[qid
]), U64_LO(sge_addr
[qid
]),
2211 /* VF release ~ VF close + VF release-resources
2212 * Release is the ultimate SW shutdown and is called whenever an
2213 * irrecoverable error is encountered.
2215 int bnx2x_vf_release(struct bnx2x
*bp
, struct bnx2x_virtf
*vf
)
2219 DP(BNX2X_MSG_IOV
, "PF releasing vf %d\n", vf
->abs_vfid
);
2220 bnx2x_lock_vf_pf_channel(bp
, vf
, CHANNEL_TLV_PF_RELEASE_VF
);
2222 rc
= bnx2x_vf_free(bp
, vf
);
2225 "VF[%d] Failed to allocate resources for release op- rc=%d\n",
2227 bnx2x_unlock_vf_pf_channel(bp
, vf
, CHANNEL_TLV_PF_RELEASE_VF
);
2231 static inline void bnx2x_vf_get_sbdf(struct bnx2x
*bp
,
2232 struct bnx2x_virtf
*vf
, u32
*sbdf
)
2234 *sbdf
= vf
->devfn
| (vf
->bus
<< 8);
2237 void bnx2x_lock_vf_pf_channel(struct bnx2x
*bp
, struct bnx2x_virtf
*vf
,
2238 enum channel_tlvs tlv
)
2240 /* we don't lock the channel for unsupported tlvs */
2241 if (!bnx2x_tlv_supported(tlv
)) {
2242 BNX2X_ERR("attempting to lock with unsupported tlv. Aborting\n");
2246 /* lock the channel */
2247 mutex_lock(&vf
->op_mutex
);
2249 /* record the locking op */
2250 vf
->op_current
= tlv
;
2253 DP(BNX2X_MSG_IOV
, "VF[%d]: vf pf channel locked by %d\n",
2257 void bnx2x_unlock_vf_pf_channel(struct bnx2x
*bp
, struct bnx2x_virtf
*vf
,
2258 enum channel_tlvs expected_tlv
)
2260 enum channel_tlvs current_tlv
;
2263 BNX2X_ERR("VF was %p\n", vf
);
2267 current_tlv
= vf
->op_current
;
2269 /* we don't unlock the channel for unsupported tlvs */
2270 if (!bnx2x_tlv_supported(expected_tlv
))
2273 WARN(expected_tlv
!= vf
->op_current
,
2274 "lock mismatch: expected %d found %d", expected_tlv
,
2277 /* record the locking op */
2278 vf
->op_current
= CHANNEL_TLV_NONE
;
2280 /* lock the channel */
2281 mutex_unlock(&vf
->op_mutex
);
2283 /* log the unlock */
2284 DP(BNX2X_MSG_IOV
, "VF[%d]: vf pf channel unlocked by %d\n",
2285 vf
->abs_vfid
, vf
->op_current
);
2288 static int bnx2x_set_pf_tx_switching(struct bnx2x
*bp
, bool enable
)
2290 struct bnx2x_queue_state_params q_params
;
2294 /* Verify changes are needed and record current Tx switching state */
2295 prev_flags
= bp
->flags
;
2297 bp
->flags
|= TX_SWITCHING
;
2299 bp
->flags
&= ~TX_SWITCHING
;
2300 if (prev_flags
== bp
->flags
)
2303 /* Verify state enables the sending of queue ramrods */
2304 if ((bp
->state
!= BNX2X_STATE_OPEN
) ||
2305 (bnx2x_get_q_logical_state(bp
,
2306 &bnx2x_sp_obj(bp
, &bp
->fp
[0]).q_obj
) !=
2307 BNX2X_Q_LOGICAL_STATE_ACTIVE
))
2310 /* send q. update ramrod to configure Tx switching */
2311 memset(&q_params
, 0, sizeof(q_params
));
2312 __set_bit(RAMROD_COMP_WAIT
, &q_params
.ramrod_flags
);
2313 q_params
.cmd
= BNX2X_Q_CMD_UPDATE
;
2314 __set_bit(BNX2X_Q_UPDATE_TX_SWITCHING_CHNG
,
2315 &q_params
.params
.update
.update_flags
);
2317 __set_bit(BNX2X_Q_UPDATE_TX_SWITCHING
,
2318 &q_params
.params
.update
.update_flags
);
2320 __clear_bit(BNX2X_Q_UPDATE_TX_SWITCHING
,
2321 &q_params
.params
.update
.update_flags
);
2323 /* send the ramrod on all the queues of the PF */
2324 for_each_eth_queue(bp
, i
) {
2325 struct bnx2x_fastpath
*fp
= &bp
->fp
[i
];
2327 /* Set the appropriate Queue object */
2328 q_params
.q_obj
= &bnx2x_sp_obj(bp
, fp
).q_obj
;
2330 /* Update the Queue state */
2331 rc
= bnx2x_queue_state_change(bp
, &q_params
);
2333 BNX2X_ERR("Failed to configure Tx switching\n");
2338 DP(BNX2X_MSG_IOV
, "%s Tx Switching\n", enable
? "Enabled" : "Disabled");
2342 int bnx2x_sriov_configure(struct pci_dev
*dev
, int num_vfs_param
)
2344 struct bnx2x
*bp
= netdev_priv(pci_get_drvdata(dev
));
2346 if (!IS_SRIOV(bp
)) {
2347 BNX2X_ERR("failed to configure SR-IOV since vfdb was not allocated. Check dmesg for errors in probe stage\n");
2351 DP(BNX2X_MSG_IOV
, "bnx2x_sriov_configure called with %d, BNX2X_NR_VIRTFN(bp) was %d\n",
2352 num_vfs_param
, BNX2X_NR_VIRTFN(bp
));
2354 /* HW channel is only operational when PF is up */
2355 if (bp
->state
!= BNX2X_STATE_OPEN
) {
2356 BNX2X_ERR("VF num configuration via sysfs not supported while PF is down\n");
2360 /* we are always bound by the total_vfs in the configuration space */
2361 if (num_vfs_param
> BNX2X_NR_VIRTFN(bp
)) {
2362 BNX2X_ERR("truncating requested number of VFs (%d) down to maximum allowed (%d)\n",
2363 num_vfs_param
, BNX2X_NR_VIRTFN(bp
));
2364 num_vfs_param
= BNX2X_NR_VIRTFN(bp
);
2367 bp
->requested_nr_virtfn
= num_vfs_param
;
2368 if (num_vfs_param
== 0) {
2369 bnx2x_set_pf_tx_switching(bp
, false);
2370 pci_disable_sriov(dev
);
2373 return bnx2x_enable_sriov(bp
);
2377 #define IGU_ENTRY_SIZE 4
2379 int bnx2x_enable_sriov(struct bnx2x
*bp
)
2381 int rc
= 0, req_vfs
= bp
->requested_nr_virtfn
;
2382 int vf_idx
, sb_idx
, vfq_idx
, qcount
, first_vf
;
2383 u32 igu_entry
, address
;
2389 first_vf
= bp
->vfdb
->sriov
.first_vf_in_pf
;
2391 /* statically distribute vf sb pool between VFs */
2392 num_vf_queues
= min_t(u16
, BNX2X_VF_MAX_QUEUES
,
2393 BP_VFDB(bp
)->vf_sbs_pool
/ req_vfs
);
2395 /* zero previous values learned from igu cam */
2396 for (vf_idx
= 0; vf_idx
< req_vfs
; vf_idx
++) {
2397 struct bnx2x_virtf
*vf
= BP_VF(bp
, vf_idx
);
2400 vf_sb_count(BP_VF(bp
, vf_idx
)) = 0;
2402 bp
->vfdb
->vf_sbs_pool
= 0;
2404 /* prepare IGU cam */
2405 sb_idx
= BP_VFDB(bp
)->first_vf_igu_entry
;
2406 address
= IGU_REG_MAPPING_MEMORY
+ sb_idx
* IGU_ENTRY_SIZE
;
2407 for (vf_idx
= first_vf
; vf_idx
< first_vf
+ req_vfs
; vf_idx
++) {
2408 for (vfq_idx
= 0; vfq_idx
< num_vf_queues
; vfq_idx
++) {
2409 igu_entry
= vf_idx
<< IGU_REG_MAPPING_MEMORY_FID_SHIFT
|
2410 vfq_idx
<< IGU_REG_MAPPING_MEMORY_VECTOR_SHIFT
|
2411 IGU_REG_MAPPING_MEMORY_VALID
;
2412 DP(BNX2X_MSG_IOV
, "assigning sb %d to vf %d\n",
2414 REG_WR(bp
, address
, igu_entry
);
2416 address
+= IGU_ENTRY_SIZE
;
2420 /* Reinitialize vf database according to igu cam */
2421 bnx2x_get_vf_igu_cam_info(bp
);
2423 DP(BNX2X_MSG_IOV
, "vf_sbs_pool %d, num_vf_queues %d\n",
2424 BP_VFDB(bp
)->vf_sbs_pool
, num_vf_queues
);
2427 for_each_vf(bp
, vf_idx
) {
2428 struct bnx2x_virtf
*vf
= BP_VF(bp
, vf_idx
);
2430 /* set local queue arrays */
2431 vf
->vfqs
= &bp
->vfdb
->vfqs
[qcount
];
2432 qcount
+= vf_sb_count(vf
);
2433 bnx2x_iov_static_resc(bp
, vf
);
2436 /* prepare msix vectors in VF configuration space - the value in the
2437 * PCI configuration space should be the index of the last entry,
2438 * namely one less than the actual size of the table
2440 for (vf_idx
= first_vf
; vf_idx
< first_vf
+ req_vfs
; vf_idx
++) {
2441 bnx2x_pretend_func(bp
, HW_VF_HANDLE(bp
, vf_idx
));
2442 REG_WR(bp
, PCICFG_OFFSET
+ GRC_CONFIG_REG_VF_MSIX_CONTROL
,
2444 DP(BNX2X_MSG_IOV
, "set msix vec num in VF %d cfg space to %d\n",
2445 vf_idx
, num_vf_queues
- 1);
2447 bnx2x_pretend_func(bp
, BP_ABS_FUNC(bp
));
2449 /* enable sriov. This will probe all the VFs, and consequentially cause
2450 * the "acquire" messages to appear on the VF PF channel.
2452 DP(BNX2X_MSG_IOV
, "about to call enable sriov\n");
2453 bnx2x_disable_sriov(bp
);
2455 rc
= bnx2x_set_pf_tx_switching(bp
, true);
2459 rc
= pci_enable_sriov(bp
->pdev
, req_vfs
);
2461 BNX2X_ERR("pci_enable_sriov failed with %d\n", rc
);
2464 DP(BNX2X_MSG_IOV
, "sriov enabled (%d vfs)\n", req_vfs
);
2468 void bnx2x_pf_set_vfs_vlan(struct bnx2x
*bp
)
2471 struct pf_vf_bulletin_content
*bulletin
;
2473 DP(BNX2X_MSG_IOV
, "configuring vlan for VFs from sp-task\n");
2474 for_each_vf(bp
, vfidx
) {
2475 bulletin
= BP_VF_BULLETIN(bp
, vfidx
);
2476 if (BP_VF(bp
, vfidx
)->cfg_flags
& VF_CFG_VLAN
)
2477 bnx2x_set_vf_vlan(bp
->dev
, vfidx
, bulletin
->vlan
, 0);
2481 void bnx2x_disable_sriov(struct bnx2x
*bp
)
2483 pci_disable_sriov(bp
->pdev
);
2486 static int bnx2x_vf_ndo_prep(struct bnx2x
*bp
, int vfidx
,
2487 struct bnx2x_virtf
**vf
,
2488 struct pf_vf_bulletin_content
**bulletin
)
2490 if (bp
->state
!= BNX2X_STATE_OPEN
) {
2491 BNX2X_ERR("vf ndo called though PF is down\n");
2495 if (!IS_SRIOV(bp
)) {
2496 BNX2X_ERR("vf ndo called though sriov is disabled\n");
2500 if (vfidx
>= BNX2X_NR_VIRTFN(bp
)) {
2501 BNX2X_ERR("vf ndo called for uninitialized VF. vfidx was %d BNX2X_NR_VIRTFN was %d\n",
2502 vfidx
, BNX2X_NR_VIRTFN(bp
));
2507 *vf
= BP_VF(bp
, vfidx
);
2508 *bulletin
= BP_VF_BULLETIN(bp
, vfidx
);
2511 BNX2X_ERR("vf ndo called but vf struct is null. vfidx was %d\n",
2517 BNX2X_ERR("vf ndo called but vfqs struct is null. Was ndo invoked before dynamically enabling SR-IOV? vfidx was %d\n",
2523 BNX2X_ERR("vf ndo called but Bulletin Board struct is null. vfidx was %d\n",
2531 int bnx2x_get_vf_config(struct net_device
*dev
, int vfidx
,
2532 struct ifla_vf_info
*ivi
)
2534 struct bnx2x
*bp
= netdev_priv(dev
);
2535 struct bnx2x_virtf
*vf
= NULL
;
2536 struct pf_vf_bulletin_content
*bulletin
= NULL
;
2537 struct bnx2x_vlan_mac_obj
*mac_obj
;
2538 struct bnx2x_vlan_mac_obj
*vlan_obj
;
2541 /* sanity and init */
2542 rc
= bnx2x_vf_ndo_prep(bp
, vfidx
, &vf
, &bulletin
);
2545 mac_obj
= &bnx2x_leading_vfq(vf
, mac_obj
);
2546 vlan_obj
= &bnx2x_leading_vfq(vf
, vlan_obj
);
2547 if (!mac_obj
|| !vlan_obj
) {
2548 BNX2X_ERR("VF partially initialized\n");
2554 ivi
->tx_rate
= 10000; /* always 10G. TBA take from link struct */
2555 ivi
->spoofchk
= 1; /*always enabled */
2556 if (vf
->state
== VF_ENABLED
) {
2557 /* mac and vlan are in vlan_mac objects */
2558 if (bnx2x_validate_vf_sp_objs(bp
, vf
, false)) {
2559 mac_obj
->get_n_elements(bp
, mac_obj
, 1, (u8
*)&ivi
->mac
,
2561 vlan_obj
->get_n_elements(bp
, vlan_obj
, 1,
2562 (u8
*)&ivi
->vlan
, 0,
2567 if (bulletin
->valid_bitmap
& (1 << MAC_ADDR_VALID
))
2568 /* mac configured by ndo so its in bulletin board */
2569 memcpy(&ivi
->mac
, bulletin
->mac
, ETH_ALEN
);
2571 /* function has not been loaded yet. Show mac as 0s */
2572 memset(&ivi
->mac
, 0, ETH_ALEN
);
2575 if (bulletin
->valid_bitmap
& (1 << VLAN_VALID
))
2576 /* vlan configured by ndo so its in bulletin board */
2577 memcpy(&ivi
->vlan
, &bulletin
->vlan
, VLAN_HLEN
);
2579 /* function has not been loaded yet. Show vlans as 0s */
2580 memset(&ivi
->vlan
, 0, VLAN_HLEN
);
2586 /* New mac for VF. Consider these cases:
2587 * 1. VF hasn't been acquired yet - save the mac in local bulletin board and
2588 * supply at acquire.
2589 * 2. VF has already been acquired but has not yet initialized - store in local
2590 * bulletin board. mac will be posted on VF bulletin board after VF init. VF
2591 * will configure this mac when it is ready.
2592 * 3. VF has already initialized but has not yet setup a queue - post the new
2593 * mac on VF's bulletin board right now. VF will configure this mac when it
2595 * 4. VF has already set a queue - delete any macs already configured for this
2596 * queue and manually config the new mac.
2597 * In any event, once this function has been called refuse any attempts by the
2598 * VF to configure any mac for itself except for this mac. In case of a race
2599 * where the VF fails to see the new post on its bulletin board before sending a
2600 * mac configuration request, the PF will simply fail the request and VF can try
2601 * again after consulting its bulletin board.
2603 int bnx2x_set_vf_mac(struct net_device
*dev
, int vfidx
, u8
*mac
)
2605 struct bnx2x
*bp
= netdev_priv(dev
);
2606 int rc
, q_logical_state
;
2607 struct bnx2x_virtf
*vf
= NULL
;
2608 struct pf_vf_bulletin_content
*bulletin
= NULL
;
2610 /* sanity and init */
2611 rc
= bnx2x_vf_ndo_prep(bp
, vfidx
, &vf
, &bulletin
);
2614 if (!is_valid_ether_addr(mac
)) {
2615 BNX2X_ERR("mac address invalid\n");
2619 /* update PF's copy of the VF's bulletin. Will no longer accept mac
2620 * configuration requests from vf unless match this mac
2622 bulletin
->valid_bitmap
|= 1 << MAC_ADDR_VALID
;
2623 memcpy(bulletin
->mac
, mac
, ETH_ALEN
);
2625 /* Post update on VF's bulletin board */
2626 rc
= bnx2x_post_vf_bulletin(bp
, vfidx
);
2628 BNX2X_ERR("failed to update VF[%d] bulletin\n", vfidx
);
2633 bnx2x_get_q_logical_state(bp
, &bnx2x_leading_vfq(vf
, sp_obj
));
2634 if (vf
->state
== VF_ENABLED
&&
2635 q_logical_state
== BNX2X_Q_LOGICAL_STATE_ACTIVE
) {
2636 /* configure the mac in device on this vf's queue */
2637 unsigned long ramrod_flags
= 0;
2638 struct bnx2x_vlan_mac_obj
*mac_obj
;
2640 /* User should be able to see failure reason in system logs */
2641 if (!bnx2x_validate_vf_sp_objs(bp
, vf
, true))
2644 /* must lock vfpf channel to protect against vf flows */
2645 bnx2x_lock_vf_pf_channel(bp
, vf
, CHANNEL_TLV_PF_SET_MAC
);
2647 /* remove existing eth macs */
2648 mac_obj
= &bnx2x_leading_vfq(vf
, mac_obj
);
2649 rc
= bnx2x_del_all_macs(bp
, mac_obj
, BNX2X_ETH_MAC
, true);
2651 BNX2X_ERR("failed to delete eth macs\n");
2656 /* remove existing uc list macs */
2657 rc
= bnx2x_del_all_macs(bp
, mac_obj
, BNX2X_UC_LIST_MAC
, true);
2659 BNX2X_ERR("failed to delete uc_list macs\n");
2664 /* configure the new mac to device */
2665 __set_bit(RAMROD_COMP_WAIT
, &ramrod_flags
);
2666 bnx2x_set_mac_one(bp
, (u8
*)&bulletin
->mac
, mac_obj
, true,
2667 BNX2X_ETH_MAC
, &ramrod_flags
);
2670 bnx2x_unlock_vf_pf_channel(bp
, vf
, CHANNEL_TLV_PF_SET_MAC
);
2676 int bnx2x_set_vf_vlan(struct net_device
*dev
, int vfidx
, u16 vlan
, u8 qos
)
2678 struct bnx2x_queue_state_params q_params
= {NULL
};
2679 struct bnx2x_vlan_mac_ramrod_params ramrod_param
;
2680 struct bnx2x_queue_update_params
*update_params
;
2681 struct pf_vf_bulletin_content
*bulletin
= NULL
;
2682 struct bnx2x_rx_mode_ramrod_params rx_ramrod
;
2683 struct bnx2x
*bp
= netdev_priv(dev
);
2684 struct bnx2x_vlan_mac_obj
*vlan_obj
;
2685 unsigned long vlan_mac_flags
= 0;
2686 unsigned long ramrod_flags
= 0;
2687 struct bnx2x_virtf
*vf
= NULL
;
2688 unsigned long accept_flags
;
2691 /* sanity and init */
2692 rc
= bnx2x_vf_ndo_prep(bp
, vfidx
, &vf
, &bulletin
);
2697 BNX2X_ERR("illegal vlan value %d\n", vlan
);
2701 DP(BNX2X_MSG_IOV
, "configuring VF %d with VLAN %d qos %d\n",
2704 /* update PF's copy of the VF's bulletin. No point in posting the vlan
2705 * to the VF since it doesn't have anything to do with it. But it useful
2706 * to store it here in case the VF is not up yet and we can only
2707 * configure the vlan later when it does. Treat vlan id 0 as remove the
2711 bulletin
->valid_bitmap
|= 1 << VLAN_VALID
;
2713 bulletin
->valid_bitmap
&= ~(1 << VLAN_VALID
);
2714 bulletin
->vlan
= vlan
;
2716 /* is vf initialized and queue set up? */
2717 if (vf
->state
!= VF_ENABLED
||
2718 bnx2x_get_q_logical_state(bp
, &bnx2x_leading_vfq(vf
, sp_obj
)) !=
2719 BNX2X_Q_LOGICAL_STATE_ACTIVE
)
2722 /* User should be able to see error in system logs */
2723 if (!bnx2x_validate_vf_sp_objs(bp
, vf
, true))
2726 /* must lock vfpf channel to protect against vf flows */
2727 bnx2x_lock_vf_pf_channel(bp
, vf
, CHANNEL_TLV_PF_SET_VLAN
);
2729 /* remove existing vlans */
2730 __set_bit(RAMROD_COMP_WAIT
, &ramrod_flags
);
2731 vlan_obj
= &bnx2x_leading_vfq(vf
, vlan_obj
);
2732 rc
= vlan_obj
->delete_all(bp
, vlan_obj
, &vlan_mac_flags
,
2735 BNX2X_ERR("failed to delete vlans\n");
2740 /* need to remove/add the VF's accept_any_vlan bit */
2741 accept_flags
= bnx2x_leading_vfq(vf
, accept_flags
);
2743 clear_bit(BNX2X_ACCEPT_ANY_VLAN
, &accept_flags
);
2745 set_bit(BNX2X_ACCEPT_ANY_VLAN
, &accept_flags
);
2747 bnx2x_vf_prep_rx_mode(bp
, LEADING_IDX
, &rx_ramrod
, vf
,
2749 bnx2x_leading_vfq(vf
, accept_flags
) = accept_flags
;
2750 bnx2x_config_rx_mode(bp
, &rx_ramrod
);
2752 /* configure the new vlan to device */
2753 memset(&ramrod_param
, 0, sizeof(ramrod_param
));
2754 __set_bit(RAMROD_COMP_WAIT
, &ramrod_flags
);
2755 ramrod_param
.vlan_mac_obj
= vlan_obj
;
2756 ramrod_param
.ramrod_flags
= ramrod_flags
;
2757 set_bit(BNX2X_DONT_CONSUME_CAM_CREDIT
,
2758 &ramrod_param
.user_req
.vlan_mac_flags
);
2759 ramrod_param
.user_req
.u
.vlan
.vlan
= vlan
;
2760 ramrod_param
.user_req
.cmd
= BNX2X_VLAN_MAC_ADD
;
2761 rc
= bnx2x_config_vlan_mac(bp
, &ramrod_param
);
2763 BNX2X_ERR("failed to configure vlan\n");
2768 /* send queue update ramrod to configure default vlan and silent
2771 __set_bit(RAMROD_COMP_WAIT
, &q_params
.ramrod_flags
);
2772 q_params
.cmd
= BNX2X_Q_CMD_UPDATE
;
2773 q_params
.q_obj
= &bnx2x_leading_vfq(vf
, sp_obj
);
2774 update_params
= &q_params
.params
.update
;
2775 __set_bit(BNX2X_Q_UPDATE_DEF_VLAN_EN_CHNG
,
2776 &update_params
->update_flags
);
2777 __set_bit(BNX2X_Q_UPDATE_SILENT_VLAN_REM_CHNG
,
2778 &update_params
->update_flags
);
2780 /* if vlan is 0 then we want to leave the VF traffic
2781 * untagged, and leave the incoming traffic untouched
2782 * (i.e. do not remove any vlan tags).
2784 __clear_bit(BNX2X_Q_UPDATE_DEF_VLAN_EN
,
2785 &update_params
->update_flags
);
2786 __clear_bit(BNX2X_Q_UPDATE_SILENT_VLAN_REM
,
2787 &update_params
->update_flags
);
2789 /* configure default vlan to vf queue and set silent
2790 * vlan removal (the vf remains unaware of this vlan).
2792 __set_bit(BNX2X_Q_UPDATE_DEF_VLAN_EN
,
2793 &update_params
->update_flags
);
2794 __set_bit(BNX2X_Q_UPDATE_SILENT_VLAN_REM
,
2795 &update_params
->update_flags
);
2796 update_params
->def_vlan
= vlan
;
2797 update_params
->silent_removal_value
=
2798 vlan
& VLAN_VID_MASK
;
2799 update_params
->silent_removal_mask
= VLAN_VID_MASK
;
2802 /* Update the Queue state */
2803 rc
= bnx2x_queue_state_change(bp
, &q_params
);
2805 BNX2X_ERR("Failed to configure default VLAN\n");
2810 /* clear the flag indicating that this VF needs its vlan
2811 * (will only be set if the HV configured the Vlan before vf was
2812 * up and we were called because the VF came up later
2815 vf
->cfg_flags
&= ~VF_CFG_VLAN
;
2816 bnx2x_unlock_vf_pf_channel(bp
, vf
, CHANNEL_TLV_PF_SET_VLAN
);
2821 /* crc is the first field in the bulletin board. Compute the crc over the
2822 * entire bulletin board excluding the crc field itself. Use the length field
2823 * as the Bulletin Board was posted by a PF with possibly a different version
2824 * from the vf which will sample it. Therefore, the length is computed by the
2825 * PF and the used blindly by the VF.
2827 u32
bnx2x_crc_vf_bulletin(struct bnx2x
*bp
,
2828 struct pf_vf_bulletin_content
*bulletin
)
2830 return crc32(BULLETIN_CRC_SEED
,
2831 ((u8
*)bulletin
) + sizeof(bulletin
->crc
),
2832 bulletin
->length
- sizeof(bulletin
->crc
));
2835 /* Check for new posts on the bulletin board */
2836 enum sample_bulletin_result
bnx2x_sample_bulletin(struct bnx2x
*bp
)
2838 struct pf_vf_bulletin_content bulletin
= bp
->pf2vf_bulletin
->content
;
2841 /* bulletin board hasn't changed since last sample */
2842 if (bp
->old_bulletin
.version
== bulletin
.version
)
2843 return PFVF_BULLETIN_UNCHANGED
;
2845 /* validate crc of new bulletin board */
2846 if (bp
->old_bulletin
.version
!= bp
->pf2vf_bulletin
->content
.version
) {
2847 /* sampling structure in mid post may result with corrupted data
2848 * validate crc to ensure coherency.
2850 for (attempts
= 0; attempts
< BULLETIN_ATTEMPTS
; attempts
++) {
2851 bulletin
= bp
->pf2vf_bulletin
->content
;
2852 if (bulletin
.crc
== bnx2x_crc_vf_bulletin(bp
,
2855 BNX2X_ERR("bad crc on bulletin board. Contained %x computed %x\n",
2857 bnx2x_crc_vf_bulletin(bp
, &bulletin
));
2859 if (attempts
>= BULLETIN_ATTEMPTS
) {
2860 BNX2X_ERR("pf to vf bulletin board crc was wrong %d consecutive times. Aborting\n",
2862 return PFVF_BULLETIN_CRC_ERR
;
2866 /* the mac address in bulletin board is valid and is new */
2867 if (bulletin
.valid_bitmap
& 1 << MAC_ADDR_VALID
&&
2868 !ether_addr_equal(bulletin
.mac
, bp
->old_bulletin
.mac
)) {
2869 /* update new mac to net device */
2870 memcpy(bp
->dev
->dev_addr
, bulletin
.mac
, ETH_ALEN
);
2873 /* the vlan in bulletin board is valid and is new */
2874 if (bulletin
.valid_bitmap
& 1 << VLAN_VALID
)
2875 memcpy(&bulletin
.vlan
, &bp
->old_bulletin
.vlan
, VLAN_HLEN
);
2877 /* copy new bulletin board to bp */
2878 bp
->old_bulletin
= bulletin
;
2880 return PFVF_BULLETIN_UPDATED
;
2883 void bnx2x_timer_sriov(struct bnx2x
*bp
)
2885 bnx2x_sample_bulletin(bp
);
2887 /* if channel is down we need to self destruct */
2888 if (bp
->old_bulletin
.valid_bitmap
& 1 << CHANNEL_DOWN
)
2889 bnx2x_schedule_sp_rtnl(bp
, BNX2X_SP_RTNL_VFPF_CHANNEL_DOWN
,
2893 void __iomem
*bnx2x_vf_doorbells(struct bnx2x
*bp
)
2895 /* vf doorbells are embedded within the regview */
2896 return bp
->regview
+ PXP_VF_ADDR_DB_START
;
2899 int bnx2x_vf_pci_alloc(struct bnx2x
*bp
)
2901 mutex_init(&bp
->vf2pf_mutex
);
2903 /* allocate vf2pf mailbox for vf to pf channel */
2904 bp
->vf2pf_mbox
= BNX2X_PCI_ALLOC(&bp
->vf2pf_mbox_mapping
,
2905 sizeof(struct bnx2x_vf_mbx_msg
));
2906 if (!bp
->vf2pf_mbox
)
2909 /* allocate pf 2 vf bulletin board */
2910 bp
->pf2vf_bulletin
= BNX2X_PCI_ALLOC(&bp
->pf2vf_bulletin_mapping
,
2911 sizeof(union pf_vf_bulletin
));
2912 if (!bp
->pf2vf_bulletin
)
2918 BNX2X_PCI_FREE(bp
->vf2pf_mbox
, bp
->vf2pf_mbox_mapping
,
2919 sizeof(struct bnx2x_vf_mbx_msg
));
2920 BNX2X_PCI_FREE(bp
->vf2pf_mbox
, bp
->pf2vf_bulletin_mapping
,
2921 sizeof(union pf_vf_bulletin
));
2925 void bnx2x_iov_channel_down(struct bnx2x
*bp
)
2928 struct pf_vf_bulletin_content
*bulletin
;
2933 for_each_vf(bp
, vf_idx
) {
2934 /* locate this VFs bulletin board and update the channel down
2937 bulletin
= BP_VF_BULLETIN(bp
, vf_idx
);
2938 bulletin
->valid_bitmap
|= 1 << CHANNEL_DOWN
;
2940 /* update vf bulletin board */
2941 bnx2x_post_vf_bulletin(bp
, vf_idx
);
2945 void bnx2x_iov_task(struct work_struct
*work
)
2947 struct bnx2x
*bp
= container_of(work
, struct bnx2x
, iov_task
.work
);
2949 if (!netif_running(bp
->dev
))
2952 if (test_and_clear_bit(BNX2X_IOV_HANDLE_FLR
,
2953 &bp
->iov_task_state
))
2954 bnx2x_vf_handle_flr_event(bp
);
2956 if (test_and_clear_bit(BNX2X_IOV_HANDLE_VF_MSG
,
2957 &bp
->iov_task_state
))
2961 void bnx2x_schedule_iov_task(struct bnx2x
*bp
, enum bnx2x_iov_flag flag
)
2963 smp_mb__before_clear_bit();
2964 set_bit(flag
, &bp
->iov_task_state
);
2965 smp_mb__after_clear_bit();
2966 DP(BNX2X_MSG_IOV
, "Scheduling iov task [Flag: %d]\n", flag
);
2967 queue_delayed_work(bnx2x_iov_wq
, &bp
->iov_task
, 0);