BNX2X_VFOP_QTEARDOWN_DONE
};
+enum bnx2x_vfop_rss_state {
+ BNX2X_VFOP_RSS_CONFIG,
+ BNX2X_VFOP_RSS_DONE
+};
+
#define bnx2x_vfop_reset_wq(vf) atomic_set(&vf->op_in_progress, 0)
void bnx2x_vfop_qctor_dump_tx(struct bnx2x *bp, struct bnx2x_virtf *vf,
__set_bit(BNX2X_Q_FLG_TX_SEC, &setup_p->flags);
__set_bit(BNX2X_Q_FLG_ANTI_SPOOF, &setup_p->flags);
- if (vfq_is_leading(q)) {
- __set_bit(BNX2X_Q_FLG_LEADING_RSS, &setup_p->flags);
- __set_bit(BNX2X_Q_FLG_MCAST, &setup_p->flags);
- }
-
/* Setup-op rx parameters */
if (test_bit(BNX2X_Q_TYPE_HAS_RX, &q_type)) {
struct bnx2x_rxq_setup_params *rxq_p = &setup_p->rxq_params;
BNX2X_Q_LOGICAL_STATE_STOPPED) {
DP(BNX2X_MSG_IOV,
"Entered qdtor but queue was already stopped. Aborting gracefully\n");
- goto op_done;
+
+ /* next state */
+ vfop->state = BNX2X_VFOP_QDTOR_DONE;
+
+ bnx2x_vfop_finalize(vf, vfop->rc, VFOP_CONT);
}
/* next state */
op_done:
case BNX2X_VFOP_QDTOR_DONE:
/* invalidate the context */
- qdtor->cxt->ustorm_ag_context.cdu_usage = 0;
- qdtor->cxt->xstorm_ag_context.cdu_reserved = 0;
+ if (qdtor->cxt) {
+ qdtor->cxt->ustorm_ag_context.cdu_usage = 0;
+ qdtor->cxt->xstorm_ag_context.cdu_reserved = 0;
+ }
bnx2x_vfop_end(bp, vf, vfop);
return;
default:
return bnx2x_vfop_transition(bp, vf, bnx2x_vfop_qdtor,
cmd->block);
}
- DP(BNX2X_MSG_IOV, "VF[%d] failed to add a vfop.\n", vf->abs_vfid);
+ DP(BNX2X_MSG_IOV, "VF[%d] failed to add a vfop. rc %d\n",
+ vf->abs_vfid, vfop->rc);
return -ENOMEM;
}
{
struct bnx2x_virtf *vf = bnx2x_vf_by_abs_fid(bp, abs_vfid);
if (vf) {
+ /* the first igu entry belonging to VFs of this PF */
+ if (!BP_VFDB(bp)->first_vf_igu_entry)
+ BP_VFDB(bp)->first_vf_igu_entry = igu_sb_id;
+
+ /* the first igu entry belonging to this VF */
if (!vf_sb_count(vf))
vf->igu_base_id = igu_sb_id;
+
++vf_sb_count(vf);
+ ++vf->sb_count;
}
+ BP_VFDB(bp)->vf_sbs_pool++;
}
/* VFOP MAC/VLAN helpers */
int qid, bool drv_only)
{
struct bnx2x_vfop *vfop = bnx2x_vfop_add(bp, vf);
+ int rc;
if (vfop) {
struct bnx2x_vfop_args_filters filters = {
bnx2x_vfop_mac_prep_ramrod(ramrod, &flags);
/* set object */
+ rc = validate_vlan_mac(bp, &bnx2x_vfq(vf, qid, mac_obj));
+ if (rc)
+ return rc;
ramrod->vlan_mac_obj = &bnx2x_vfq(vf, qid, mac_obj);
/* set extra args */
int qid, bool drv_only)
{
struct bnx2x_vfop *vfop = bnx2x_vfop_add(bp, vf);
+ int rc;
if (vfop) {
struct bnx2x_vfop_args_filters filters = {
bnx2x_vfop_mac_prep_ramrod(ramrod, &flags);
/* set object */
+ rc = validate_vlan_mac(bp, &bnx2x_vfq(vf, qid, mac_obj));
+ if (rc)
+ return rc;
ramrod->vlan_mac_obj = &bnx2x_vfq(vf, qid, mac_obj);
/* set extra args */
int qid, u16 vid, bool add)
{
struct bnx2x_vfop *vfop = bnx2x_vfop_add(bp, vf);
+ int rc;
if (vfop) {
struct bnx2x_vfop_args_filters filters = {
ramrod->user_req.u.vlan.vlan = vid;
/* set object */
+ rc = validate_vlan_mac(bp, &bnx2x_vfq(vf, qid, vlan_obj));
+ if (rc)
+ return rc;
ramrod->vlan_mac_obj = &bnx2x_vfq(vf, qid, vlan_obj);
/* set extra args */
int qid, bool drv_only)
{
struct bnx2x_vfop *vfop = bnx2x_vfop_add(bp, vf);
+ int rc;
if (vfop) {
struct bnx2x_vfop_args_filters filters = {
bnx2x_vfop_vlan_mac_prep_ramrod(ramrod, &flags);
/* set object */
+ rc = validate_vlan_mac(bp, &bnx2x_vfq(vf, qid, vlan_obj));
+ if (rc)
+ return rc;
ramrod->vlan_mac_obj = &bnx2x_vfq(vf, qid, vlan_obj);
/* set extra args */
int qid, bool drv_only)
{
struct bnx2x_vfop *vfop = bnx2x_vfop_add(bp, vf);
+ int rc;
if (vfop) {
struct bnx2x_vfop_args_filters filters = {
bnx2x_vfop_vlan_mac_prep_ramrod(ramrod, &flags);
/* set object */
+ rc = validate_vlan_mac(bp, &bnx2x_vfq(vf, qid, vlan_obj));
+ if (rc)
+ return rc;
ramrod->vlan_mac_obj = &bnx2x_vfq(vf, qid, vlan_obj);
/* set extra args */
case BNX2X_VFOP_QFLR_CLR_VLAN:
/* vlan-clear-all: driver-only, don't consume credit */
vfop->state = BNX2X_VFOP_QFLR_CLR_MAC;
- vfop->rc = bnx2x_vfop_vlan_delall_cmd(bp, vf, &cmd, qid, true);
+ if (!validate_vlan_mac(bp, &bnx2x_vfq(vf, qid, vlan_obj)))
+ vfop->rc = bnx2x_vfop_vlan_delall_cmd(bp, vf, &cmd, qid,
+ true);
if (vfop->rc)
goto op_err;
- return;
+ bnx2x_vfop_finalize(vf, vfop->rc, VFOP_CONT);
case BNX2X_VFOP_QFLR_CLR_MAC:
/* mac-clear-all: driver only consume credit */
vfop->state = BNX2X_VFOP_QFLR_TERMINATE;
- vfop->rc = bnx2x_vfop_mac_delall_cmd(bp, vf, &cmd, qid, true);
+ if (!validate_vlan_mac(bp, &bnx2x_vfq(vf, qid, mac_obj)))
+ vfop->rc = bnx2x_vfop_mac_delall_cmd(bp, vf, &cmd, qid,
+ true);
DP(BNX2X_MSG_IOV,
"VF[%d] vfop->rc after bnx2x_vfop_mac_delall_cmd was %d",
vf->abs_vfid, vfop->rc);
if (vfop->rc)
goto op_err;
- return;
+ bnx2x_vfop_finalize(vf, vfop->rc, VFOP_CONT);
case BNX2X_VFOP_QFLR_TERMINATE:
qstate = &vfop->op_p->qctor.qstate;
{
struct bnx2x_vfop *vfop = bnx2x_vfop_add(bp, vf);
+ /* for non leading queues skip directly to qdown sate */
if (vfop) {
vfop->args.qx.qid = qid;
- bnx2x_vfop_opset(BNX2X_VFOP_QTEARDOWN_RXMODE,
- bnx2x_vfop_qdown, cmd->done);
+ bnx2x_vfop_opset(qid == LEADING_IDX ?
+ BNX2X_VFOP_QTEARDOWN_RXMODE :
+ BNX2X_VFOP_QTEARDOWN_QDTOR, bnx2x_vfop_qdown,
+ cmd->done);
return bnx2x_vfop_transition(bp, vf, bnx2x_vfop_qdown,
cmd->block);
}
* both known
*/
static void
-bnx2x_iov_static_resc(struct bnx2x *bp, struct vf_pf_resc_request *resc)
+bnx2x_iov_static_resc(struct bnx2x *bp, struct bnx2x_virtf *vf)
{
+ struct vf_pf_resc_request *resc = &vf->alloc_resc;
u16 vlan_count = 0;
/* will be set only during VF-ACQUIRE */
resc->num_rxqs = 0;
resc->num_txqs = 0;
- /* no credit calculcis for macs (just yet) */
+ /* no credit calculations for macs (just yet) */
resc->num_mac_filters = 1;
/* divvy up vlan rules */
resc->num_mc_filters = 0;
/* num_sbs already set */
+ resc->num_sbs = vf->sb_count;
}
/* FLR routines: */
static void bnx2x_vf_free_resc(struct bnx2x *bp, struct bnx2x_virtf *vf)
{
/* reset the state variables */
- bnx2x_iov_static_resc(bp, &vf->alloc_resc);
+ bnx2x_iov_static_resc(bp, vf);
vf->state = VF_FREE;
}
/* The VF doorbell size 0 - *B, 4 - 128B. We set it here to match
* the Pf doorbell size although the 2 are independent.
*/
- REG_WR(bp, DORQ_REG_VF_NORM_CID_OFST,
- BNX2X_DB_SHIFT - BNX2X_DB_MIN_SHIFT);
+ REG_WR(bp, DORQ_REG_VF_NORM_CID_OFST, 3);
/* No security checks for now -
* configure single rule (out of 16) mask = 0x1, value = 0x0,
{
int sb_id;
u32 val;
- u8 fid;
+ u8 fid, current_pf = 0;
/* IGU in normal mode - read CAM */
for (sb_id = 0; sb_id < IGU_REG_MAPPING_MEMORY_SIZE; sb_id++) {
if (!(val & IGU_REG_MAPPING_MEMORY_VALID))
continue;
fid = GET_FIELD((val), IGU_REG_MAPPING_MEMORY_FID);
- if (!(fid & IGU_FID_ENCODE_IS_PF))
+ if (fid & IGU_FID_ENCODE_IS_PF)
+ current_pf = fid & IGU_FID_PF_NUM_MASK;
+ else if (current_pf == BP_ABS_FUNC(bp))
bnx2x_vf_set_igu_info(bp, sb_id,
(fid & IGU_FID_VF_NUM_MASK));
-
DP(BNX2X_MSG_IOV, "%s[%d], igu_sb_id=%d, msix=%d\n",
((fid & IGU_FID_ENCODE_IS_PF) ? "PF" : "VF"),
((fid & IGU_FID_ENCODE_IS_PF) ? (fid & IGU_FID_PF_NUM_MASK) :
(fid & IGU_FID_VF_NUM_MASK)), sb_id,
GET_FIELD((val), IGU_REG_MAPPING_MEMORY_VECTOR));
}
+ DP(BNX2X_MSG_IOV, "vf_sbs_pool is %d\n", BP_VFDB(bp)->vf_sbs_pool);
}
static void __bnx2x_iov_free_vfdb(struct bnx2x *bp)
return 0;
}
-static u8 bnx2x_iov_get_max_queue_count(struct bnx2x *bp)
-{
- int i;
- u8 queue_count = 0;
-
- if (IS_SRIOV(bp))
- for_each_vf(bp, i)
- queue_count += bnx2x_vf(bp, i, alloc_resc.num_sbs);
-
- return queue_count;
-}
-
/* must be called after PF bars are mapped */
int bnx2x_iov_init_one(struct bnx2x *bp, int int_mode_param,
- int num_vfs_param)
+ int num_vfs_param)
{
- int err, i, qcount;
+ int err, i;
struct bnx2x_sriov *iov;
struct pci_dev *dev = bp->pdev;
/* re-read the IGU CAM for VFs - index and abs_vfid must be set */
bnx2x_get_vf_igu_cam_info(bp);
- /* get the total queue count and allocate the global queue arrays */
- qcount = bnx2x_iov_get_max_queue_count(bp);
-
/* allocate the queue arrays for all VFs */
- bp->vfdb->vfqs = kzalloc(qcount * sizeof(struct bnx2x_vf_queue),
- GFP_KERNEL);
+ bp->vfdb->vfqs = kzalloc(
+ BNX2X_MAX_NUM_VF_QUEUES * sizeof(struct bnx2x_vf_queue),
+ GFP_KERNEL);
+
+ DP(BNX2X_MSG_IOV, "bp->vfdb->vfqs was %p\n", bp->vfdb->vfqs);
+
if (!bp->vfdb->vfqs) {
BNX2X_ERR("failed to allocate vf queue array\n");
err = -ENOMEM;
q_type);
DP(BNX2X_MSG_IOV,
- "initialized vf %d's queue object. func id set to %d\n",
- vf->abs_vfid, q->sp_obj.func_id);
-
- /* mac/vlan objects are per queue, but only those
- * that belong to the leading queue are initialized
- */
- if (vfq_is_leading(q)) {
- /* mac */
- bnx2x_init_mac_obj(bp, &q->mac_obj,
- cl_id, q->cid, func_id,
- bnx2x_vf_sp(bp, vf, mac_rdata),
- bnx2x_vf_sp_map(bp, vf, mac_rdata),
- BNX2X_FILTER_MAC_PENDING,
- &vf->filter_state,
- BNX2X_OBJ_TYPE_RX_TX,
- &bp->macs_pool);
- /* vlan */
- bnx2x_init_vlan_obj(bp, &q->vlan_obj,
- cl_id, q->cid, func_id,
- bnx2x_vf_sp(bp, vf, vlan_rdata),
- bnx2x_vf_sp_map(bp, vf, vlan_rdata),
- BNX2X_FILTER_VLAN_PENDING,
- &vf->filter_state,
- BNX2X_OBJ_TYPE_RX_TX,
- &bp->vlans_pool);
-
- /* mcast */
- bnx2x_init_mcast_obj(bp, &vf->mcast_obj, cl_id,
- q->cid, func_id, func_id,
- bnx2x_vf_sp(bp, vf, mcast_rdata),
- bnx2x_vf_sp_map(bp, vf, mcast_rdata),
- BNX2X_FILTER_MCAST_PENDING,
- &vf->filter_state,
- BNX2X_OBJ_TYPE_RX_TX);
-
- vf->leading_rss = cl_id;
- }
+ "initialized vf %d's queue object. func id set to %d. cid set to 0x%x\n",
+ vf->abs_vfid, q->sp_obj.func_id, q->cid);
}
/* called by bnx2x_nic_load */
int bnx2x_iov_nic_init(struct bnx2x *bp)
{
- int vfid, qcount, i;
+ int vfid;
if (!IS_SRIOV(bp)) {
DP(BNX2X_MSG_IOV, "vfdb was not allocated\n");
BNX2X_FIRST_VF_CID + base_vf_cid, base_cxt);
/* init statically provisioned resources */
- bnx2x_iov_static_resc(bp, &vf->alloc_resc);
+ bnx2x_iov_static_resc(bp, vf);
/* queues are initialized during VF-ACQUIRE */
}
/* Final VF init */
- qcount = 0;
- for_each_vf(bp, i) {
- struct bnx2x_virtf *vf = BP_VF(bp, i);
+ for_each_vf(bp, vfid) {
+ struct bnx2x_virtf *vf = BP_VF(bp, vfid);
/* fill in the BDF and bars */
- vf->bus = bnx2x_vf_bus(bp, i);
- vf->devfn = bnx2x_vf_devfn(bp, i);
+ vf->bus = bnx2x_vf_bus(bp, vfid);
+ vf->devfn = bnx2x_vf_devfn(bp, vfid);
bnx2x_vf_set_bars(bp, vf);
DP(BNX2X_MSG_IOV,
(unsigned)vf->bars[0].bar, vf->bars[0].size,
(unsigned)vf->bars[1].bar, vf->bars[1].size,
(unsigned)vf->bars[2].bar, vf->bars[2].size);
-
- /* set local queue arrays */
- vf->vfqs = &bp->vfdb->vfqs[qcount];
- qcount += bnx2x_vf(bp, i, alloc_resc.num_sbs);
}
return 0;
for_each_vfq(vf, j) {
struct bnx2x_vf_queue *rxq = vfq_get(vf, j);
+ dma_addr_t q_stats_addr =
+ vf->fw_stat_map + j * vf->stats_stride;
+
/* collect stats fro active queues only */
if (bnx2x_get_q_logical_state(bp, &rxq->sp_obj) ==
BNX2X_Q_LOGICAL_STATE_STOPPED)
/* create stats query entry for this queue */
cur_query_entry->kind = STATS_TYPE_QUEUE;
- cur_query_entry->index = vfq_cl_id(vf, rxq);
+ cur_query_entry->index = vfq_stat_id(vf, rxq);
cur_query_entry->funcID =
cpu_to_le16(FW_VF_HANDLE(vf->abs_vfid));
cur_query_entry->address.hi =
- cpu_to_le32(U64_HI(vf->fw_stat_map));
+ cpu_to_le32(U64_HI(q_stats_addr));
cur_query_entry->address.lo =
- cpu_to_le32(U64_LO(vf->fw_stat_map));
+ cpu_to_le32(U64_LO(q_stats_addr));
DP(BNX2X_MSG_IOV,
"added address %x %x for vf %d queue %d client %d\n",
cur_query_entry->address.hi,
cur_query_entry++;
cur_data_offset += sizeof(struct per_queue_stats);
stats_count++;
+
+ /* all stats are coalesced to the leading queue */
+ if (vf->cfg_flags & VF_CFG_STATS_COALESCE)
+ break;
}
}
bp->fw_stats_req->hdr.cmd_num = bp->fw_stats_num + stats_count;
for_each_vf(bp, i) {
struct bnx2x_virtf *vf = BP_VF(bp, i);
+ if (!vf) {
+ BNX2X_ERR("VF was null! skipping...\n");
+ continue;
+ }
+
if (!list_empty(&vf->op_list_head) &&
atomic_read(&vf->op_in_progress)) {
DP(BNX2X_MSG_IOV, "running pending op for vf %d\n", i);
struct bnx2x_vf_queue *q = vfq_get(vf, i);
if (!q) {
- DP(BNX2X_MSG_IOV, "q number %d was not allocated\n", i);
+ BNX2X_ERR("q number %d was not allocated\n", i);
return -EINVAL;
}
bnx2x_vfop_end(bp, vf, vfop);
}
+static void bnx2x_vfop_rss(struct bnx2x *bp, struct bnx2x_virtf *vf)
+{
+ struct bnx2x_vfop *vfop = bnx2x_vfop_cur(bp, vf);
+ enum bnx2x_vfop_rss_state state;
+
+ if (!vfop) {
+ BNX2X_ERR("vfop was null\n");
+ return;
+ }
+
+ state = vfop->state;
+ bnx2x_vfop_reset_wq(vf);
+
+ if (vfop->rc < 0)
+ goto op_err;
+
+ DP(BNX2X_MSG_IOV, "vf[%d] STATE: %d\n", vf->abs_vfid, state);
+
+ switch (state) {
+ case BNX2X_VFOP_RSS_CONFIG:
+ /* next state */
+ vfop->state = BNX2X_VFOP_RSS_DONE;
+ bnx2x_config_rss(bp, &vfop->op_p->rss);
+ bnx2x_vfop_finalize(vf, vfop->rc, VFOP_DONE);
+op_err:
+ BNX2X_ERR("RSS error: rc %d\n", vfop->rc);
+op_done:
+ case BNX2X_VFOP_RSS_DONE:
+ bnx2x_vfop_end(bp, vf, vfop);
+ return;
+ default:
+ bnx2x_vfop_default(state);
+ }
+op_pending:
+ return;
+}
+
int bnx2x_vfop_release_cmd(struct bnx2x *bp,
struct bnx2x_virtf *vf,
struct bnx2x_vfop_cmd *cmd)
return -ENOMEM;
}
+int bnx2x_vfop_rss_cmd(struct bnx2x *bp,
+ struct bnx2x_virtf *vf,
+ struct bnx2x_vfop_cmd *cmd)
+{
+ struct bnx2x_vfop *vfop = bnx2x_vfop_add(bp, vf);
+
+ if (vfop) {
+ bnx2x_vfop_opset(BNX2X_VFOP_RSS_CONFIG, bnx2x_vfop_rss,
+ cmd->done);
+ return bnx2x_vfop_transition(bp, vf, bnx2x_vfop_rss,
+ cmd->block);
+ }
+ return -ENOMEM;
+}
+
/* VF release ~ VF close + VF release-resources
* Release is the ultimate SW shutdown and is called whenever an
* irrecoverable error is encountered.
.block = block,
};
int rc;
+
+ DP(BNX2X_MSG_IOV, "PF releasing vf %d\n", vf->abs_vfid);
bnx2x_lock_vf_pf_channel(bp, vf, CHANNEL_TLV_PF_RELEASE_VF);
rc = bnx2x_vfop_release_cmd(bp, vf, &cmd);
void bnx2x_lock_vf_pf_channel(struct bnx2x *bp, struct bnx2x_virtf *vf,
enum channel_tlvs tlv)
{
+ /* we don't lock the channel for unsupported tlvs */
+ if (!bnx2x_tlv_supported(tlv)) {
+ BNX2X_ERR("attempting to lock with unsupported tlv. Aborting\n");
+ return;
+ }
+
/* lock the channel */
mutex_lock(&vf->op_mutex);
void bnx2x_unlock_vf_pf_channel(struct bnx2x *bp, struct bnx2x_virtf *vf,
enum channel_tlvs expected_tlv)
{
+ enum channel_tlvs current_tlv;
+
+ if (!vf) {
+ BNX2X_ERR("VF was %p\n", vf);
+ return;
+ }
+
+ current_tlv = vf->op_current;
+
+ /* we don't unlock the channel for unsupported tlvs */
+ if (!bnx2x_tlv_supported(expected_tlv))
+ return;
+
WARN(expected_tlv != vf->op_current,
"lock mismatch: expected %d found %d", expected_tlv,
vf->op_current);
+ /* record the locking op */
+ vf->op_current = CHANNEL_TLV_NONE;
+
/* lock the channel */
mutex_unlock(&vf->op_mutex);
/* log the unlock */
DP(BNX2X_MSG_IOV, "VF[%d]: vf pf channel unlocked by %d\n",
vf->abs_vfid, vf->op_current);
-
- /* record the locking op */
- vf->op_current = CHANNEL_TLV_NONE;
}
int bnx2x_sriov_configure(struct pci_dev *dev, int num_vfs_param)
return bnx2x_enable_sriov(bp);
}
}
+#define IGU_ENTRY_SIZE 4
int bnx2x_enable_sriov(struct bnx2x *bp)
{
int rc = 0, req_vfs = bp->requested_nr_virtfn;
+ int vf_idx, sb_idx, vfq_idx, qcount, first_vf;
+ u32 igu_entry, address;
+ u16 num_vf_queues;
+
+ if (req_vfs == 0)
+ return 0;
+
+ first_vf = bp->vfdb->sriov.first_vf_in_pf;
+
+ /* statically distribute vf sb pool between VFs */
+ num_vf_queues = min_t(u16, BNX2X_VF_MAX_QUEUES,
+ BP_VFDB(bp)->vf_sbs_pool / req_vfs);
+
+ /* zero previous values learned from igu cam */
+ for (vf_idx = 0; vf_idx < req_vfs; vf_idx++) {
+ struct bnx2x_virtf *vf = BP_VF(bp, vf_idx);
+
+ vf->sb_count = 0;
+ vf_sb_count(BP_VF(bp, vf_idx)) = 0;
+ }
+ bp->vfdb->vf_sbs_pool = 0;
+
+ /* prepare IGU cam */
+ sb_idx = BP_VFDB(bp)->first_vf_igu_entry;
+ address = IGU_REG_MAPPING_MEMORY + sb_idx * IGU_ENTRY_SIZE;
+ for (vf_idx = first_vf; vf_idx < first_vf + req_vfs; vf_idx++) {
+ for (vfq_idx = 0; vfq_idx < num_vf_queues; vfq_idx++) {
+ igu_entry = vf_idx << IGU_REG_MAPPING_MEMORY_FID_SHIFT |
+ vfq_idx << IGU_REG_MAPPING_MEMORY_VECTOR_SHIFT |
+ IGU_REG_MAPPING_MEMORY_VALID;
+ DP(BNX2X_MSG_IOV, "assigning sb %d to vf %d\n",
+ sb_idx, vf_idx);
+ REG_WR(bp, address, igu_entry);
+ sb_idx++;
+ address += IGU_ENTRY_SIZE;
+ }
+ }
+
+ /* Reinitialize vf database according to igu cam */
+ bnx2x_get_vf_igu_cam_info(bp);
+
+ DP(BNX2X_MSG_IOV, "vf_sbs_pool %d, num_vf_queues %d\n",
+ BP_VFDB(bp)->vf_sbs_pool, num_vf_queues);
+
+ qcount = 0;
+ for_each_vf(bp, vf_idx) {
+ struct bnx2x_virtf *vf = BP_VF(bp, vf_idx);
+ /* set local queue arrays */
+ vf->vfqs = &bp->vfdb->vfqs[qcount];
+ qcount += vf_sb_count(vf);
+ }
+
+ /* prepare msix vectors in VF configuration space */
+ for (vf_idx = first_vf; vf_idx < first_vf + req_vfs; vf_idx++) {
+ bnx2x_pretend_func(bp, HW_VF_HANDLE(bp, vf_idx));
+ REG_WR(bp, PCICFG_OFFSET + GRC_CONFIG_REG_VF_MSIX_CONTROL,
+ num_vf_queues);
+ }
+ bnx2x_pretend_func(bp, BP_ABS_FUNC(bp));
+
+ /* enable sriov. This will probe all the VFs, and consequentially cause
+ * the "acquire" messages to appear on the VF PF channel.
+ */
+ DP(BNX2X_MSG_IOV, "about to call enable sriov\n");
+ pci_disable_sriov(bp->pdev);
rc = pci_enable_sriov(bp->pdev, req_vfs);
if (rc) {
BNX2X_ERR("pci_enable_sriov failed with %d\n", rc);
pci_disable_sriov(bp->pdev);
}
-static int bnx2x_vf_ndo_prep(struct bnx2x *bp, int vfidx,
- struct bnx2x_virtf **vf,
- struct pf_vf_bulletin_content **bulletin)
+int bnx2x_vf_ndo_prep(struct bnx2x *bp, int vfidx, struct bnx2x_virtf **vf,
+ struct pf_vf_bulletin_content **bulletin)
{
if (bp->state != BNX2X_STATE_OPEN) {
BNX2X_ERR("vf ndo called though PF is down\n");
*bulletin = BP_VF_BULLETIN(bp, vfidx);
if (!*vf) {
- BNX2X_ERR("vf ndo called but vf was null. vfidx was %d\n",
+ BNX2X_ERR("vf ndo called but vf struct is null. vfidx was %d\n",
+ vfidx);
+ return -EINVAL;
+ }
+
+ if (!(*vf)->vfqs) {
+ BNX2X_ERR("vf ndo called but vfqs struct is null. Was ndo invoked before dynamically enabling SR-IOV? vfidx was %d\n",
vfidx);
return -EINVAL;
}
rc = bnx2x_vf_ndo_prep(bp, vfidx, &vf, &bulletin);
if (rc)
return rc;
- mac_obj = &bnx2x_vfq(vf, 0, mac_obj);
- vlan_obj = &bnx2x_vfq(vf, 0, vlan_obj);
+ mac_obj = &bnx2x_leading_vfq(vf, mac_obj);
+ vlan_obj = &bnx2x_leading_vfq(vf, vlan_obj);
if (!mac_obj || !vlan_obj) {
BNX2X_ERR("VF partially initialized\n");
return -EINVAL;
ivi->spoofchk = 1; /*always enabled */
if (vf->state == VF_ENABLED) {
/* mac and vlan are in vlan_mac objects */
- mac_obj->get_n_elements(bp, mac_obj, 1, (u8 *)&ivi->mac,
- 0, ETH_ALEN);
- vlan_obj->get_n_elements(bp, vlan_obj, 1, (u8 *)&ivi->vlan,
- 0, VLAN_HLEN);
+ if (validate_vlan_mac(bp, &bnx2x_leading_vfq(vf, mac_obj)))
+ mac_obj->get_n_elements(bp, mac_obj, 1, (u8 *)&ivi->mac,
+ 0, ETH_ALEN);
+ if (validate_vlan_mac(bp, &bnx2x_leading_vfq(vf, vlan_obj)))
+ vlan_obj->get_n_elements(bp, vlan_obj, 1,
+ (u8 *)&ivi->vlan, 0,
+ VLAN_HLEN);
} else {
/* mac */
if (bulletin->valid_bitmap & (1 << MAC_ADDR_VALID))
return rc;
}
- /* is vf initialized and queue set up? */
q_logical_state =
- bnx2x_get_q_logical_state(bp, &bnx2x_vfq(vf, 0, sp_obj));
+ bnx2x_get_q_logical_state(bp, &bnx2x_leading_vfq(vf, sp_obj));
if (vf->state == VF_ENABLED &&
q_logical_state == BNX2X_Q_LOGICAL_STATE_ACTIVE) {
/* configure the mac in device on this vf's queue */
unsigned long ramrod_flags = 0;
- struct bnx2x_vlan_mac_obj *mac_obj = &bnx2x_vfq(vf, 0, mac_obj);
+ struct bnx2x_vlan_mac_obj *mac_obj =
+ &bnx2x_leading_vfq(vf, mac_obj);
+
+ rc = validate_vlan_mac(bp, &bnx2x_leading_vfq(vf, mac_obj));
+ if (rc)
+ return rc;
/* must lock vfpf channel to protect against vf flows */
bnx2x_lock_vf_pf_channel(bp, vf, CHANNEL_TLV_PF_SET_MAC);
/* is vf initialized and queue set up? */
q_logical_state =
- bnx2x_get_q_logical_state(bp, &bnx2x_vfq(vf, 0, sp_obj));
+ bnx2x_get_q_logical_state(bp, &bnx2x_leading_vfq(vf, sp_obj));
if (vf->state == VF_ENABLED &&
q_logical_state == BNX2X_Q_LOGICAL_STATE_ACTIVE) {
/* configure the vlan in device on this vf's queue */
unsigned long ramrod_flags = 0;
unsigned long vlan_mac_flags = 0;
struct bnx2x_vlan_mac_obj *vlan_obj =
- &bnx2x_vfq(vf, 0, vlan_obj);
+ &bnx2x_leading_vfq(vf, vlan_obj);
struct bnx2x_vlan_mac_ramrod_params ramrod_param;
struct bnx2x_queue_state_params q_params = {NULL};
struct bnx2x_queue_update_params *update_params;
+ rc = validate_vlan_mac(bp, &bnx2x_leading_vfq(vf, mac_obj));
+ if (rc)
+ return rc;
memset(&ramrod_param, 0, sizeof(ramrod_param));
/* must lock vfpf channel to protect against vf flows */
*/
__set_bit(RAMROD_COMP_WAIT, &q_params.ramrod_flags);
q_params.cmd = BNX2X_Q_CMD_UPDATE;
- q_params.q_obj = &bnx2x_vfq(vf, 0, sp_obj);
+ q_params.q_obj = &bnx2x_leading_vfq(vf, sp_obj);
update_params = &q_params.params.update;
__set_bit(BNX2X_Q_UPDATE_DEF_VLAN_EN_CHNG,
&update_params->update_flags);
/* humble our request */
req->resc_request.num_txqs =
- bp->acquire_resp.resc.num_txqs;
+ min(req->resc_request.num_txqs,
+ bp->acquire_resp.resc.num_txqs);
req->resc_request.num_rxqs =
- bp->acquire_resp.resc.num_rxqs;
+ min(req->resc_request.num_rxqs,
+ bp->acquire_resp.resc.num_rxqs);
req->resc_request.num_sbs =
- bp->acquire_resp.resc.num_sbs;
+ min(req->resc_request.num_sbs,
+ bp->acquire_resp.resc.num_sbs);
req->resc_request.num_mac_filters =
- bp->acquire_resp.resc.num_mac_filters;
+ min(req->resc_request.num_mac_filters,
+ bp->acquire_resp.resc.num_mac_filters);
req->resc_request.num_vlan_filters =
- bp->acquire_resp.resc.num_vlan_filters;
+ min(req->resc_request.num_vlan_filters,
+ bp->acquire_resp.resc.num_vlan_filters);
req->resc_request.num_mc_filters =
- bp->acquire_resp.resc.num_mc_filters;
+ min(req->resc_request.num_mc_filters,
+ bp->acquire_resp.resc.num_mc_filters);
/* Clear response buffer */
memset(&bp->vf2pf_mbox->resp, 0,
bp->common.flash_size = 0;
bp->flags |=
NO_WOL_FLAG | NO_ISCSI_OOO_FLAG | NO_ISCSI_FLAG | NO_FCOE_FLAG;
- bp->igu_sb_cnt = 1;
+ bp->igu_sb_cnt = bp->acquire_resp.resc.num_sbs;
bp->igu_base_sb = bp->acquire_resp.resc.hw_sbs[0].hw_sb_id;
strlcpy(bp->fw_ver, bp->acquire_resp.pfdev_info.fw_ver,
sizeof(bp->fw_ver));
bnx2x_free_irq(bp);
}
+static void bnx2x_leading_vfq_init(struct bnx2x *bp, struct bnx2x_virtf *vf,
+ struct bnx2x_vf_queue *q)
+{
+ u8 cl_id = vfq_cl_id(vf, q);
+ u8 func_id = FW_VF_HANDLE(vf->abs_vfid);
+
+ /* mac */
+ bnx2x_init_mac_obj(bp, &q->mac_obj,
+ cl_id, q->cid, func_id,
+ bnx2x_vf_sp(bp, vf, mac_rdata),
+ bnx2x_vf_sp_map(bp, vf, mac_rdata),
+ BNX2X_FILTER_MAC_PENDING,
+ &vf->filter_state,
+ BNX2X_OBJ_TYPE_RX_TX,
+ &bp->macs_pool);
+ /* vlan */
+ bnx2x_init_vlan_obj(bp, &q->vlan_obj,
+ cl_id, q->cid, func_id,
+ bnx2x_vf_sp(bp, vf, vlan_rdata),
+ bnx2x_vf_sp_map(bp, vf, vlan_rdata),
+ BNX2X_FILTER_VLAN_PENDING,
+ &vf->filter_state,
+ BNX2X_OBJ_TYPE_RX_TX,
+ &bp->vlans_pool);
+
+ /* mcast */
+ bnx2x_init_mcast_obj(bp, &vf->mcast_obj, cl_id,
+ q->cid, func_id, func_id,
+ bnx2x_vf_sp(bp, vf, mcast_rdata),
+ bnx2x_vf_sp_map(bp, vf, mcast_rdata),
+ BNX2X_FILTER_MCAST_PENDING,
+ &vf->filter_state,
+ BNX2X_OBJ_TYPE_RX_TX);
+
+ /* rss */
+ bnx2x_init_rss_config_obj(bp, &vf->rss_conf_obj, cl_id, q->cid,
+ func_id, func_id,
+ bnx2x_vf_sp(bp, vf, rss_rdata),
+ bnx2x_vf_sp_map(bp, vf, rss_rdata),
+ BNX2X_FILTER_RSS_CONF_PENDING,
+ &vf->filter_state,
+ BNX2X_OBJ_TYPE_RX_TX);
+
+ vf->leading_rss = cl_id;
+ q->is_leading = true;
+}
+
/* ask the pf to open a queue for the vf */
int bnx2x_vfpf_setup_q(struct bnx2x *bp, int fp_idx)
{
/* fill in pfdev info */
resp->pfdev_info.chip_num = bp->common.chip_id;
- resp->pfdev_info.db_size = (1 << BNX2X_DB_SHIFT);
+ resp->pfdev_info.db_size = bp->db_size;
resp->pfdev_info.indices_per_sb = HC_SB_MAX_INDICES_E2;
resp->pfdev_info.pf_cap = (PFVF_CAP_RSS |
/* PFVF_CAP_DHC |*/ PFVF_CAP_TPA);
/* record ghost addresses from vf message */
vf->spq_map = init->spq_addr;
vf->fw_stat_map = init->stats_addr;
+ vf->stats_stride = init->stats_stride;
vf->op_rc = bnx2x_vf_init(bp, vf, (dma_addr_t *)init->sb_addr);
+ /* set VF multiqueue statistics collection mode */
+ if (init->flags & VFPF_INIT_FLG_STATS_COALESCE)
+ vf->cfg_flags |= VF_CFG_STATS_COALESCE;
+
/* response */
bnx2x_vf_mbx_resp(bp, vf);
}
__set_bit(BNX2X_Q_FLG_HC, sp_q_flags);
if (mbx_q_flags & VFPF_QUEUE_FLG_DHC)
__set_bit(BNX2X_Q_FLG_DHC, sp_q_flags);
+ if (mbx_q_flags & VFPF_QUEUE_FLG_LEADING_RSS)
+ __set_bit(BNX2X_Q_FLG_LEADING_RSS, sp_q_flags);
/* outer vlan removal is set according to PF's multi function mode */
if (IS_MF_SD(bp))
struct bnx2x_queue_init_params *init_p;
struct bnx2x_queue_setup_params *setup_p;
+ if (bnx2x_vfq_is_leading(q))
+ bnx2x_leading_vfq_init(bp, vf, q);
+
/* re-init the VF operation context */
memset(&vf->op_params.qctor, 0 , sizeof(vf->op_params.qctor));
setup_p = &vf->op_params.qctor.prep_qsetup;
bnx2x_vf_mbx_resp(bp, vf);
}
+static void bnx2x_vf_mbx_update_rss(struct bnx2x *bp, struct bnx2x_virtf *vf,
+ struct bnx2x_vf_mbx *mbx)
+{
+ struct bnx2x_vfop_cmd cmd = {
+ .done = bnx2x_vf_mbx_resp,
+ .block = false,
+ };
+ struct bnx2x_config_rss_params *vf_op_params = &vf->op_params.rss;
+ struct vfpf_rss_tlv *rss_tlv = &mbx->msg->req.update_rss;
+
+ if (rss_tlv->ind_table_size != T_ETH_INDIRECTION_TABLE_SIZE ||
+ rss_tlv->rss_key_size != T_ETH_RSS_KEY) {
+ BNX2X_ERR("failing rss configuration of vf %d due to size mismatch\n",
+ vf->index);
+ vf->op_rc = -EINVAL;
+ goto mbx_resp;
+ }
+
+ /* set vfop params according to rss tlv */
+ memcpy(vf_op_params->ind_table, rss_tlv->ind_table,
+ T_ETH_INDIRECTION_TABLE_SIZE);
+ memcpy(vf_op_params->rss_key, rss_tlv->rss_key,
+ sizeof(rss_tlv->rss_key));
+ vf_op_params->rss_obj = &vf->rss_conf_obj;
+ vf_op_params->rss_result_mask = rss_tlv->rss_result_mask;
+
+ /* flags handled individually for backward/forward compatability */
+ if (rss_tlv->rss_flags & VFPF_RSS_MODE_DISABLED)
+ __set_bit(BNX2X_RSS_MODE_DISABLED, &vf_op_params->rss_flags);
+ if (rss_tlv->rss_flags & VFPF_RSS_MODE_REGULAR)
+ __set_bit(BNX2X_RSS_MODE_REGULAR, &vf_op_params->rss_flags);
+ if (rss_tlv->rss_flags & VFPF_RSS_SET_SRCH)
+ __set_bit(BNX2X_RSS_SET_SRCH, &vf_op_params->rss_flags);
+ if (rss_tlv->rss_flags & VFPF_RSS_IPV4)
+ __set_bit(BNX2X_RSS_IPV4, &vf_op_params->rss_flags);
+ if (rss_tlv->rss_flags & VFPF_RSS_IPV4_TCP)
+ __set_bit(BNX2X_RSS_IPV4_TCP, &vf_op_params->rss_flags);
+ if (rss_tlv->rss_flags & VFPF_RSS_IPV4_UDP)
+ __set_bit(BNX2X_RSS_IPV4_UDP, &vf_op_params->rss_flags);
+ if (rss_tlv->rss_flags & VFPF_RSS_IPV6)
+ __set_bit(BNX2X_RSS_IPV6, &vf_op_params->rss_flags);
+ if (rss_tlv->rss_flags & VFPF_RSS_IPV6_TCP)
+ __set_bit(BNX2X_RSS_IPV6_TCP, &vf_op_params->rss_flags);
+ if (rss_tlv->rss_flags & VFPF_RSS_IPV6_UDP)
+ __set_bit(BNX2X_RSS_IPV6_UDP, &vf_op_params->rss_flags);
+
+ if ((!(rss_tlv->rss_flags & VFPF_RSS_IPV4_TCP) &&
+ rss_tlv->rss_flags & VFPF_RSS_IPV4_UDP) ||
+ (!(rss_tlv->rss_flags & VFPF_RSS_IPV6_TCP) &&
+ rss_tlv->rss_flags & VFPF_RSS_IPV6_UDP)) {
+ BNX2X_ERR("about to hit a FW assert. aborting...\n");
+ vf->op_rc = -EINVAL;
+ goto mbx_resp;
+ }
+
+ vf->op_rc = bnx2x_vfop_rss_cmd(bp, vf, &cmd);
+
+mbx_resp:
+ if (vf->op_rc)
+ bnx2x_vf_mbx_resp(bp, vf);
+}
+
/* dispatch request */
static void bnx2x_vf_mbx_request(struct bnx2x *bp, struct bnx2x_virtf *vf,
struct bnx2x_vf_mbx *mbx)
case CHANNEL_TLV_RELEASE:
bnx2x_vf_mbx_release_vf(bp, vf, mbx);
break;
+ case CHANNEL_TLV_UPDATE_RSS:
+ bnx2x_vf_mbx_update_rss(bp, vf, mbx);
+ break;
}
} else {
/* test whether we can respond to the VF (do we have an address
* for it?)
*/
- if (vf->state == VF_ACQUIRED) {
+ if (vf->state == VF_ACQUIRED || vf->state == VF_ENABLED) {
/* mbx_resp uses the op_rc of the VF */
vf->op_rc = PFVF_STATUS_NOT_SUPPORTED;