1 /* SPDX-License-Identifier: BSD-3-Clause
2 * Copyright (c) 2013-2015 Brocade Communications Systems, Inc.
3 * Copyright (c) 2015-2018 Cavium Inc.
10 /* calculate the crc in the bulletin board */
11 static inline uint32_t
12 bnx2x_vf_crc(struct bnx2x_vf_bulletin
*bull
)
14 uint32_t crc_sz
= sizeof(bull
->crc
), length
= bull
->length
- crc_sz
;
16 return ECORE_CRC32_LE(0, (uint8_t *)bull
+ crc_sz
, length
);
19 /* Checks are there mac/channel updates for VF
20 * returns TRUE if something was updated
23 bnx2x_check_bull(struct bnx2x_softc
*sc
)
25 struct bnx2x_vf_bulletin
*bull
;
27 uint16_t old_version
= sc
->old_bulletin
.version
;
28 uint64_t valid_bitmap
;
30 bull
= sc
->pf2vf_bulletin
;
31 if (old_version
== bull
->version
) {
34 /* Check the crc until we get the correct data */
35 while (tries
< BNX2X_VF_BULLETIN_TRIES
) {
36 bull
= sc
->pf2vf_bulletin
;
37 if (bull
->crc
== bnx2x_vf_crc(bull
))
40 PMD_DRV_LOG(ERR
, sc
, "bad crc on bulletin board. contained %x computed %x",
41 bull
->crc
, bnx2x_vf_crc(bull
));
44 if (tries
== BNX2X_VF_BULLETIN_TRIES
) {
45 PMD_DRV_LOG(ERR
, sc
, "pf to vf bulletin board crc was wrong %d consecutive times. Aborting",
51 valid_bitmap
= bull
->valid_bitmap
;
53 /* check the mac address and VLAN and allocate memory if valid */
54 if (valid_bitmap
& (1 << MAC_ADDR_VALID
) && memcmp(bull
->mac
, sc
->old_bulletin
.mac
, ETH_ALEN
))
55 rte_memcpy(&sc
->link_params
.mac_addr
, bull
->mac
, ETH_ALEN
);
56 if (valid_bitmap
& (1 << VLAN_VALID
))
57 rte_memcpy(&bull
->vlan
, &sc
->old_bulletin
.vlan
, VLAN_HLEN
);
59 sc
->old_bulletin
= *bull
;
64 /* place a given tlv on the tlv buffer at a given offset */
66 bnx2x_add_tlv(__rte_unused
struct bnx2x_softc
*sc
, void *tlvs_list
,
67 uint16_t offset
, uint16_t type
, uint16_t length
)
69 struct channel_tlv
*tl
= (struct channel_tlv
*)
70 ((unsigned long)tlvs_list
+ offset
);
76 /* Initiliaze header of the first tlv and clear mailbox*/
78 bnx2x_vf_prep(struct bnx2x_softc
*sc
, struct vf_first_tlv
*first_tlv
,
79 uint16_t type
, uint16_t length
)
81 struct bnx2x_vf_mbx_msg
*mbox
= sc
->vf2pf_mbox
;
83 rte_spinlock_lock(&sc
->vf2pf_lock
);
85 PMD_DRV_LOG(DEBUG
, sc
, "Preparing %d tlv for sending", type
);
87 memset(mbox
, 0, sizeof(struct bnx2x_vf_mbx_msg
));
89 bnx2x_add_tlv(sc
, &first_tlv
->tl
, 0, type
, length
);
91 /* Initialize header of the first tlv */
92 first_tlv
->reply_offset
= sizeof(mbox
->query
);
95 /* releases the mailbox */
97 bnx2x_vf_finalize(struct bnx2x_softc
*sc
,
98 __rte_unused
struct vf_first_tlv
*first_tlv
)
100 PMD_DRV_LOG(DEBUG
, sc
, "done sending [%d] tlv over vf pf channel",
103 rte_spinlock_unlock(&sc
->vf2pf_lock
);
106 #define BNX2X_VF_CMD_ADDR_LO PXP_VF_ADDR_CSDM_GLOBAL_START
107 #define BNX2X_VF_CMD_ADDR_HI BNX2X_VF_CMD_ADDR_LO + 4
108 #define BNX2X_VF_CMD_TRIGGER BNX2X_VF_CMD_ADDR_HI + 4
109 #define BNX2X_VF_CHANNEL_DELAY 100
110 #define BNX2X_VF_CHANNEL_TRIES 100
113 bnx2x_do_req4pf(struct bnx2x_softc
*sc
, rte_iova_t phys_addr
)
115 uint8_t *status
= &sc
->vf2pf_mbox
->resp
.common_reply
.status
;
119 PMD_DRV_LOG(ERR
, sc
, "status should be zero before message"
124 bnx2x_check_bull(sc
);
125 if (sc
->old_bulletin
.valid_bitmap
& (1 << CHANNEL_DOWN
)) {
126 PMD_DRV_LOG(ERR
, sc
, "channel is down. Aborting message sending");
130 REG_WR(sc
, BNX2X_VF_CMD_ADDR_LO
, U64_LO(phys_addr
));
131 REG_WR(sc
, BNX2X_VF_CMD_ADDR_HI
, U64_HI(phys_addr
));
133 /* memory barrier to ensure that FW can read phys_addr */
136 REG_WR8(sc
, BNX2X_VF_CMD_TRIGGER
, 1);
138 /* Do several attempts until PF completes */
139 for (i
= 0; i
< BNX2X_VF_CHANNEL_TRIES
; i
++) {
140 DELAY_MS(BNX2X_VF_CHANNEL_DELAY
);
146 PMD_DRV_LOG(ERR
, sc
, "Response from PF timed out");
150 PMD_DRV_LOG(DEBUG
, sc
, "Response from PF was received");
154 static inline uint16_t bnx2x_check_me_flags(uint32_t val
)
156 if (((val
) & ME_REG_VF_VALID
) && (!((val
) & ME_REG_VF_ERR
)))
157 return ME_REG_VF_VALID
;
162 #define BNX2X_ME_ANSWER_DELAY 100
163 #define BNX2X_ME_ANSWER_TRIES 10
165 static inline int bnx2x_read_vf_id(struct bnx2x_softc
*sc
)
170 while (i
<= BNX2X_ME_ANSWER_TRIES
) {
171 val
= BNX2X_DB_READ(DOORBELL_ADDR(sc
, 0));
172 if (bnx2x_check_me_flags(val
))
175 DELAY_MS(BNX2X_ME_ANSWER_DELAY
);
182 #define BNX2X_VF_OBTAIN_MAX_TRIES 3
183 #define BNX2X_VF_OBTAIN_MAC_FILTERS 1
184 #define BNX2X_VF_OBTAIN_MC_FILTERS 10
187 int bnx2x_loop_obtain_resources(struct bnx2x_softc
*sc
)
189 struct vf_acquire_resp_tlv
*resp
= &sc
->vf2pf_mbox
->resp
.acquire_resp
,
190 *sc_resp
= &sc
->acquire_resp
;
191 struct vf_resource_query
*res_query
;
192 struct vf_resc
*resc
;
193 int res_obtained
= false;
198 PMD_DRV_LOG(DEBUG
, sc
, "trying to get resources");
200 rc
= bnx2x_do_req4pf(sc
, sc
->vf2pf_mbox_mapping
.paddr
);
204 memcpy(sc_resp
, resp
, sizeof(sc
->acquire_resp
));
208 /* check PF to request acceptance */
209 if (sc_resp
->status
== BNX2X_VF_STATUS_SUCCESS
) {
210 PMD_DRV_LOG(DEBUG
, sc
, "resources obtained successfully");
212 } else if (sc_resp
->status
== BNX2X_VF_STATUS_NO_RESOURCES
&&
213 tries
< BNX2X_VF_OBTAIN_MAX_TRIES
) {
214 PMD_DRV_LOG(DEBUG
, sc
,
215 "PF cannot allocate requested amount of resources");
217 res_query
= &sc
->vf2pf_mbox
->query
[0].acquire
.res_query
;
218 resc
= &sc_resp
->resc
;
220 /* PF refused our request. Try to decrease request params */
221 res_query
->num_txqs
= min(res_query
->num_txqs
, resc
->num_txqs
);
222 res_query
->num_rxqs
= min(res_query
->num_rxqs
, resc
->num_rxqs
);
223 res_query
->num_sbs
= min(res_query
->num_sbs
, resc
->num_sbs
);
224 res_query
->num_mac_filters
= min(res_query
->num_mac_filters
, resc
->num_mac_filters
);
225 res_query
->num_vlan_filters
= min(res_query
->num_vlan_filters
, resc
->num_vlan_filters
);
226 res_query
->num_mc_filters
= min(res_query
->num_mc_filters
, resc
->num_mc_filters
);
228 memset(&sc
->vf2pf_mbox
->resp
, 0, sizeof(union resp_tlvs
));
230 PMD_DRV_LOG(ERR
, sc
, "Failed to get the requested "
231 "amount of resources: %d.",
235 } while (!res_obtained
);
240 int bnx2x_vf_get_resources(struct bnx2x_softc
*sc
, uint8_t tx_count
, uint8_t rx_count
)
242 struct vf_acquire_tlv
*acq
= &sc
->vf2pf_mbox
->query
[0].acquire
;
247 bnx2x_vf_prep(sc
, &acq
->first_tlv
, BNX2X_VF_TLV_ACQUIRE
, sizeof(*acq
));
249 vf_id
= bnx2x_read_vf_id(sc
);
257 acq
->res_query
.num_rxqs
= rx_count
;
258 acq
->res_query
.num_txqs
= tx_count
;
259 acq
->res_query
.num_sbs
= sc
->igu_sb_cnt
;
260 acq
->res_query
.num_mac_filters
= BNX2X_VF_OBTAIN_MAC_FILTERS
;
261 acq
->res_query
.num_mc_filters
= BNX2X_VF_OBTAIN_MC_FILTERS
;
263 acq
->bulletin_addr
= sc
->pf2vf_bulletin_mapping
.paddr
;
265 /* Request physical port identifier */
266 bnx2x_add_tlv(sc
, acq
, acq
->first_tlv
.tl
.length
,
267 BNX2X_VF_TLV_PHYS_PORT_ID
,
268 sizeof(struct channel_tlv
));
270 bnx2x_add_tlv(sc
, acq
,
271 (acq
->first_tlv
.tl
.length
+ sizeof(struct channel_tlv
)),
272 BNX2X_VF_TLV_LIST_END
,
273 sizeof(struct channel_list_end_tlv
));
275 /* requesting the resources in loop */
276 rc
= bnx2x_loop_obtain_resources(sc
);
280 struct vf_acquire_resp_tlv sc_resp
= sc
->acquire_resp
;
282 sc
->devinfo
.chip_id
|= (sc_resp
.chip_num
& 0xFFFF);
283 sc
->devinfo
.int_block
= INT_BLOCK_IGU
;
284 sc
->devinfo
.chip_port_mode
= CHIP_2_PORT_MODE
;
285 sc
->devinfo
.mf_info
.mf_ov
= 0;
286 sc
->devinfo
.mf_info
.mf_mode
= 0;
287 sc
->devinfo
.flash_size
= 0;
289 sc
->igu_sb_cnt
= sc_resp
.resc
.num_sbs
;
290 sc
->igu_base_sb
= sc_resp
.resc
.hw_sbs
[0] & 0xFF;
292 sc
->max_tx_queues
= sc_resp
.resc
.num_txqs
;
293 sc
->max_rx_queues
= sc_resp
.resc
.num_rxqs
;
295 sc
->link_params
.chip_id
= sc
->devinfo
.chip_id
;
296 sc
->doorbell_size
= sc_resp
.db_size
;
297 sc
->flags
|= BNX2X_NO_WOL_FLAG
| BNX2X_NO_ISCSI_OOO_FLAG
| BNX2X_NO_ISCSI_FLAG
| BNX2X_NO_FCOE_FLAG
;
299 PMD_DRV_LOG(DEBUG
, sc
, "status block count = %d, base status block = %x",
300 sc
->igu_sb_cnt
, sc
->igu_base_sb
);
301 strncpy(sc
->fw_ver
, sc_resp
.fw_ver
, sizeof(sc
->fw_ver
));
303 if (is_valid_assigned_ether_addr(&sc_resp
.resc
.current_mac_addr
))
304 ether_addr_copy(&sc_resp
.resc
.current_mac_addr
,
305 (struct ether_addr
*)sc
->link_params
.mac_addr
);
307 eth_random_addr(sc
->link_params
.mac_addr
);
310 bnx2x_vf_finalize(sc
, &acq
->first_tlv
);
315 /* Ask PF to release VF's resources */
317 bnx2x_vf_close(struct bnx2x_softc
*sc
)
319 struct vf_release_tlv
*query
;
320 struct vf_common_reply_tlv
*reply
= &sc
->vf2pf_mbox
->resp
.common_reply
;
321 int vf_id
= bnx2x_read_vf_id(sc
);
325 query
= &sc
->vf2pf_mbox
->query
[0].release
;
326 bnx2x_vf_prep(sc
, &query
->first_tlv
, BNX2X_VF_TLV_RELEASE
,
329 query
->vf_id
= vf_id
;
330 bnx2x_add_tlv(sc
, query
, query
->first_tlv
.tl
.length
,
331 BNX2X_VF_TLV_LIST_END
,
332 sizeof(struct channel_list_end_tlv
));
334 rc
= bnx2x_do_req4pf(sc
, sc
->vf2pf_mbox_mapping
.paddr
);
335 if (rc
|| reply
->status
!= BNX2X_VF_STATUS_SUCCESS
)
336 PMD_DRV_LOG(ERR
, sc
, "Failed to release VF");
338 bnx2x_vf_finalize(sc
, &query
->first_tlv
);
342 /* Let PF know the VF status blocks phys_addrs */
344 bnx2x_vf_init(struct bnx2x_softc
*sc
)
346 struct vf_init_tlv
*query
;
347 struct vf_common_reply_tlv
*reply
= &sc
->vf2pf_mbox
->resp
.common_reply
;
350 query
= &sc
->vf2pf_mbox
->query
[0].init
;
351 bnx2x_vf_prep(sc
, &query
->first_tlv
, BNX2X_VF_TLV_INIT
,
354 FOR_EACH_QUEUE(sc
, i
) {
355 query
->sb_addr
[i
] = (unsigned long)(sc
->fp
[i
].sb_dma
.paddr
);
358 query
->stats_step
= sizeof(struct per_queue_stats
);
359 query
->stats_addr
= sc
->fw_stats_data_mapping
+
360 offsetof(struct bnx2x_fw_stats_data
, queue_stats
);
362 bnx2x_add_tlv(sc
, query
, query
->first_tlv
.tl
.length
,
363 BNX2X_VF_TLV_LIST_END
,
364 sizeof(struct channel_list_end_tlv
));
366 rc
= bnx2x_do_req4pf(sc
, sc
->vf2pf_mbox_mapping
.paddr
);
369 if (reply
->status
!= BNX2X_VF_STATUS_SUCCESS
) {
370 PMD_DRV_LOG(ERR
, sc
, "Failed to init VF");
375 PMD_DRV_LOG(DEBUG
, sc
, "VF was initialized");
377 bnx2x_vf_finalize(sc
, &query
->first_tlv
);
382 bnx2x_vf_unload(struct bnx2x_softc
*sc
)
384 struct vf_close_tlv
*query
;
385 struct vf_common_reply_tlv
*reply
= &sc
->vf2pf_mbox
->resp
.common_reply
;
386 struct vf_q_op_tlv
*query_op
;
389 vf_id
= bnx2x_read_vf_id(sc
);
391 FOR_EACH_QUEUE(sc
, i
) {
392 query_op
= &sc
->vf2pf_mbox
->query
[0].q_op
;
393 bnx2x_vf_prep(sc
, &query_op
->first_tlv
,
394 BNX2X_VF_TLV_TEARDOWN_Q
,
397 query_op
->vf_qid
= i
;
399 bnx2x_add_tlv(sc
, query_op
,
400 query_op
->first_tlv
.tl
.length
,
401 BNX2X_VF_TLV_LIST_END
,
402 sizeof(struct channel_list_end_tlv
));
404 rc
= bnx2x_do_req4pf(sc
, sc
->vf2pf_mbox_mapping
.paddr
);
405 if (rc
|| reply
->status
!= BNX2X_VF_STATUS_SUCCESS
)
407 "Bad reply for vf_q %d teardown", i
);
409 bnx2x_vf_finalize(sc
, &query_op
->first_tlv
);
412 bnx2x_vf_set_mac(sc
, false);
414 query
= &sc
->vf2pf_mbox
->query
[0].close
;
415 bnx2x_vf_prep(sc
, &query
->first_tlv
, BNX2X_VF_TLV_CLOSE
,
418 query
->vf_id
= vf_id
;
420 bnx2x_add_tlv(sc
, query
, query
->first_tlv
.tl
.length
,
421 BNX2X_VF_TLV_LIST_END
,
422 sizeof(struct channel_list_end_tlv
));
424 rc
= bnx2x_do_req4pf(sc
, sc
->vf2pf_mbox_mapping
.paddr
);
425 if (rc
|| reply
->status
!= BNX2X_VF_STATUS_SUCCESS
)
427 "Bad reply from PF for close message");
429 bnx2x_vf_finalize(sc
, &query
->first_tlv
);
433 static inline uint16_t
434 bnx2x_vf_q_flags(uint8_t leading
)
436 uint16_t flags
= leading
? BNX2X_VF_Q_FLAG_LEADING_RSS
: 0;
438 flags
|= BNX2X_VF_Q_FLAG_CACHE_ALIGN
;
439 flags
|= BNX2X_VF_Q_FLAG_STATS
;
440 flags
|= BNX2X_VF_Q_FLAG_VLAN
;
446 bnx2x_vf_rx_q_prep(struct bnx2x_softc
*sc
, struct bnx2x_fastpath
*fp
,
447 struct vf_rxq_params
*rxq_init
, uint16_t flags
)
449 struct bnx2x_rx_queue
*rxq
;
451 rxq
= sc
->rx_queues
[fp
->index
];
453 PMD_DRV_LOG(ERR
, sc
, "RX queue %d is NULL", fp
->index
);
457 rxq_init
->rcq_addr
= rxq
->cq_ring_phys_addr
;
458 rxq_init
->rcq_np_addr
= rxq
->cq_ring_phys_addr
+ BNX2X_PAGE_SIZE
;
459 rxq_init
->rxq_addr
= rxq
->rx_ring_phys_addr
;
460 rxq_init
->vf_sb_id
= fp
->index
;
461 rxq_init
->sb_cq_index
= HC_INDEX_ETH_RX_CQ_CONS
;
462 rxq_init
->mtu
= sc
->mtu
;
463 rxq_init
->buf_sz
= fp
->rx_buf_size
;
464 rxq_init
->flags
= flags
;
465 rxq_init
->stat_id
= -1;
466 rxq_init
->cache_line_log
= BNX2X_RX_ALIGN_SHIFT
;
470 bnx2x_vf_tx_q_prep(struct bnx2x_softc
*sc
, struct bnx2x_fastpath
*fp
,
471 struct vf_txq_params
*txq_init
, uint16_t flags
)
473 struct bnx2x_tx_queue
*txq
;
475 txq
= sc
->tx_queues
[fp
->index
];
477 PMD_DRV_LOG(ERR
, sc
, "TX queue %d is NULL", fp
->index
);
481 txq_init
->txq_addr
= txq
->tx_ring_phys_addr
;
482 txq_init
->sb_index
= HC_INDEX_ETH_TX_CQ_CONS_COS0
;
483 txq_init
->flags
= flags
;
484 txq_init
->traffic_type
= LLFC_TRAFFIC_TYPE_NW
;
485 txq_init
->vf_sb_id
= fp
->index
;
489 bnx2x_vf_setup_queue(struct bnx2x_softc
*sc
, struct bnx2x_fastpath
*fp
, int leading
)
491 struct vf_setup_q_tlv
*query
;
492 struct vf_common_reply_tlv
*reply
= &sc
->vf2pf_mbox
->resp
.common_reply
;
493 uint16_t flags
= bnx2x_vf_q_flags(leading
);
496 query
= &sc
->vf2pf_mbox
->query
[0].setup_q
;
497 bnx2x_vf_prep(sc
, &query
->first_tlv
, BNX2X_VF_TLV_SETUP_Q
,
500 query
->vf_qid
= fp
->index
;
501 query
->param_valid
= VF_RXQ_VALID
| VF_TXQ_VALID
;
503 bnx2x_vf_rx_q_prep(sc
, fp
, &query
->rxq
, flags
);
504 bnx2x_vf_tx_q_prep(sc
, fp
, &query
->txq
, flags
);
506 bnx2x_add_tlv(sc
, query
, query
->first_tlv
.tl
.length
,
507 BNX2X_VF_TLV_LIST_END
,
508 sizeof(struct channel_list_end_tlv
));
510 rc
= bnx2x_do_req4pf(sc
, sc
->vf2pf_mbox_mapping
.paddr
);
513 if (reply
->status
!= BNX2X_VF_STATUS_SUCCESS
) {
514 PMD_DRV_LOG(ERR
, sc
, "Failed to setup VF queue[%d]",
519 bnx2x_vf_finalize(sc
, &query
->first_tlv
);
525 bnx2x_vf_set_mac(struct bnx2x_softc
*sc
, int set
)
527 struct vf_set_q_filters_tlv
*query
;
528 struct vf_common_reply_tlv
*reply
;
531 query
= &sc
->vf2pf_mbox
->query
[0].set_q_filters
;
532 bnx2x_vf_prep(sc
, &query
->first_tlv
, BNX2X_VF_TLV_SET_Q_FILTERS
,
535 query
->vf_qid
= sc
->fp
->index
;
536 query
->mac_filters_cnt
= 1;
537 query
->flags
= BNX2X_VF_MAC_VLAN_CHANGED
;
539 query
->filters
[0].flags
= (set
? BNX2X_VF_Q_FILTER_SET_MAC
: 0) |
540 BNX2X_VF_Q_FILTER_DEST_MAC_VALID
;
542 bnx2x_check_bull(sc
);
544 rte_memcpy(query
->filters
[0].mac
, sc
->link_params
.mac_addr
, ETH_ALEN
);
546 bnx2x_add_tlv(sc
, query
, query
->first_tlv
.tl
.length
,
547 BNX2X_VF_TLV_LIST_END
,
548 sizeof(struct channel_list_end_tlv
));
550 rc
= bnx2x_do_req4pf(sc
, sc
->vf2pf_mbox_mapping
.paddr
);
553 reply
= &sc
->vf2pf_mbox
->resp
.common_reply
;
555 while (BNX2X_VF_STATUS_FAILURE
== reply
->status
&&
556 bnx2x_check_bull(sc
)) {
557 /* A new mac was configured by PF for us */
558 rte_memcpy(sc
->link_params
.mac_addr
, sc
->pf2vf_bulletin
->mac
,
560 rte_memcpy(query
->filters
[0].mac
, sc
->pf2vf_bulletin
->mac
,
563 rc
= bnx2x_do_req4pf(sc
, sc
->vf2pf_mbox_mapping
.paddr
);
568 if (BNX2X_VF_STATUS_SUCCESS
!= reply
->status
) {
569 PMD_DRV_LOG(ERR
, sc
, "Bad reply from PF for SET MAC message: %d",
574 bnx2x_vf_finalize(sc
, &query
->first_tlv
);
580 bnx2x_vf_config_rss(struct bnx2x_softc
*sc
,
581 struct ecore_config_rss_params
*params
)
583 struct vf_rss_tlv
*query
;
584 struct vf_common_reply_tlv
*reply
= &sc
->vf2pf_mbox
->resp
.common_reply
;
587 query
= &sc
->vf2pf_mbox
->query
[0].update_rss
;
589 bnx2x_vf_prep(sc
, &query
->first_tlv
, BNX2X_VF_TLV_UPDATE_RSS
,
592 /* add list termination tlv */
593 bnx2x_add_tlv(sc
, query
, query
->first_tlv
.tl
.length
,
594 BNX2X_VF_TLV_LIST_END
,
595 sizeof(struct channel_list_end_tlv
));
597 rte_memcpy(query
->rss_key
, params
->rss_key
, sizeof(params
->rss_key
));
598 query
->rss_key_size
= T_ETH_RSS_KEY
;
600 rte_memcpy(query
->ind_table
, params
->ind_table
, T_ETH_INDIRECTION_TABLE_SIZE
);
601 query
->ind_table_size
= T_ETH_INDIRECTION_TABLE_SIZE
;
603 query
->rss_result_mask
= params
->rss_result_mask
;
604 query
->rss_flags
= params
->rss_flags
;
606 rc
= bnx2x_do_req4pf(sc
, sc
->vf2pf_mbox_mapping
.paddr
);
610 if (reply
->status
!= BNX2X_VF_STATUS_SUCCESS
) {
611 PMD_DRV_LOG(ERR
, sc
, "Failed to configure RSS");
615 bnx2x_vf_finalize(sc
, &query
->first_tlv
);
621 bnx2x_vf_set_rx_mode(struct bnx2x_softc
*sc
)
623 struct vf_set_q_filters_tlv
*query
;
624 struct vf_common_reply_tlv
*reply
= &sc
->vf2pf_mbox
->resp
.common_reply
;
627 query
= &sc
->vf2pf_mbox
->query
[0].set_q_filters
;
628 bnx2x_vf_prep(sc
, &query
->first_tlv
, BNX2X_VF_TLV_SET_Q_FILTERS
,
632 query
->flags
= BNX2X_VF_RX_MASK_CHANGED
;
634 switch (sc
->rx_mode
) {
635 case BNX2X_RX_MODE_NONE
: /* no Rx */
636 query
->rx_mask
= VFPF_RX_MASK_ACCEPT_NONE
;
638 case BNX2X_RX_MODE_NORMAL
:
639 query
->rx_mask
= VFPF_RX_MASK_ACCEPT_MATCHED_MULTICAST
;
640 query
->rx_mask
|= VFPF_RX_MASK_ACCEPT_MATCHED_UNICAST
;
641 query
->rx_mask
|= VFPF_RX_MASK_ACCEPT_BROADCAST
;
643 case BNX2X_RX_MODE_ALLMULTI
:
644 query
->rx_mask
= VFPF_RX_MASK_ACCEPT_ALL_MULTICAST
;
645 query
->rx_mask
|= VFPF_RX_MASK_ACCEPT_MATCHED_UNICAST
;
646 query
->rx_mask
|= VFPF_RX_MASK_ACCEPT_BROADCAST
;
648 case BNX2X_RX_MODE_ALLMULTI_PROMISC
:
649 case BNX2X_RX_MODE_PROMISC
:
650 query
->rx_mask
= VFPF_RX_MASK_ACCEPT_ALL_UNICAST
;
651 query
->rx_mask
|= VFPF_RX_MASK_ACCEPT_ALL_MULTICAST
;
652 query
->rx_mask
|= VFPF_RX_MASK_ACCEPT_BROADCAST
;
655 PMD_DRV_LOG(ERR
, sc
, "BAD rx mode (%d)", sc
->rx_mode
);
660 bnx2x_add_tlv(sc
, query
, query
->first_tlv
.tl
.length
,
661 BNX2X_VF_TLV_LIST_END
,
662 sizeof(struct channel_list_end_tlv
));
664 rc
= bnx2x_do_req4pf(sc
, sc
->vf2pf_mbox_mapping
.paddr
);
668 if (reply
->status
!= BNX2X_VF_STATUS_SUCCESS
) {
669 PMD_DRV_LOG(ERR
, sc
, "Failed to set RX mode");
674 bnx2x_vf_finalize(sc
, &query
->first_tlv
);