QEDE_TQSTAT(stopped_cnt),
};
-#define QEDE_TQSTATS_DATA(dev, sindex, tssid, tcid) \
- (*((u64 *)(((void *)(&dev->fp_array[tssid].txqs[tcid])) +\
+#define QEDE_TQSTATS_DATA(dev, sindex, tssid) \
+ (*((u64 *)(((void *)((dev)->fp_array[tssid].txq)) + \
qede_tqstats_arr[(sindex)].offset)))
static const struct {
int i, j, k;
for (i = 0, k = 0; i < QEDE_QUEUE_CNT(edev); i++) {
- int tc;
if (edev->fp_array[i].type & QEDE_FASTPATH_RX) {
for (j = 0; j < QEDE_NUM_RQSTATS; j++)
}
if (edev->fp_array[i].type & QEDE_FASTPATH_TX) {
- for (tc = 0; tc < edev->num_tc; tc++) {
- for (j = 0; j < QEDE_NUM_TQSTATS; j++)
- sprintf(buf + (k + j) *
- ETH_GSTRING_LEN,
- "%d.%d: %s", i, tc,
- qede_tqstats_arr[j].string);
- k += QEDE_NUM_TQSTATS;
- }
+ for (j = 0; j < QEDE_NUM_TQSTATS; j++)
+ sprintf(buf + (k + j) *
+ ETH_GSTRING_LEN,
+ "%d: %s", i,
+ qede_tqstats_arr[j].string);
+ k += QEDE_NUM_TQSTATS;
}
}
mutex_lock(&edev->qede_lock);
for (qid = 0; qid < QEDE_QUEUE_CNT(edev); qid++) {
- int tc;
- if (edev->fp_array[qid].type & QEDE_FASTPATH_RX) {
+ if (edev->fp_array[qid].type & QEDE_FASTPATH_RX)
for (sidx = 0; sidx < QEDE_NUM_RQSTATS; sidx++)
buf[cnt++] = QEDE_RQSTATS_DATA(edev, sidx, qid);
- }
- if (edev->fp_array[qid].type & QEDE_FASTPATH_TX) {
- for (tc = 0; tc < edev->num_tc; tc++) {
- for (sidx = 0; sidx < QEDE_NUM_TQSTATS; sidx++)
- buf[cnt++] = QEDE_TQSTATS_DATA(edev,
- sidx,
- qid, tc);
- }
- }
+ if (edev->fp_array[qid].type & QEDE_FASTPATH_TX)
+ for (sidx = 0; sidx < QEDE_NUM_TQSTATS; sidx++)
+ buf[cnt++] = QEDE_TQSTATS_DATA(edev,
+ sidx,
+ qid);
}
for (sidx = 0; sidx < QEDE_NUM_STATS; sidx++) {
num_stats--;
}
return num_stats + QEDE_RSS_COUNT(edev) * QEDE_NUM_RQSTATS +
- QEDE_TSS_COUNT(edev) * QEDE_NUM_TQSTATS * edev->num_tc;
+ QEDE_TSS_COUNT(edev) * QEDE_NUM_TQSTATS;
case ETH_SS_PRIV_FLAGS:
return QEDE_PRI_FLAG_LEN;
case ETH_SS_TEST:
for_each_queue(i) {
if (edev->fp_array[i].type & QEDE_FASTPATH_TX) {
- txq = edev->fp_array[i].txqs;
+ txq = edev->fp_array[i].txq;
break;
}
}
/* Get tx-queue context and netdev index */
txq_index = skb_get_queue_mapping(skb);
WARN_ON(txq_index >= QEDE_TSS_COUNT(edev));
- txq = QEDE_TX_QUEUE(edev, txq_index);
+ txq = edev->fp_array[edev->fp_num_rx + txq_index].txq;
netdev_txq = netdev_get_tx_queue(ndev, txq_index);
WARN_ON(qed_chain_get_elem_left(&txq->tx_pbl) < (MAX_SKB_FRAGS + 1));
return hw_comp_cons != sw_comp_cons;
}
-static bool qede_has_tx_work(struct qede_fastpath *fp)
-{
- u8 tc;
-
- for (tc = 0; tc < fp->edev->num_tc; tc++)
- if (qede_txq_has_work(&fp->txqs[tc]))
- return true;
- return false;
-}
-
static inline void qede_rx_bd_ring_consume(struct qede_rx_queue *rxq)
{
qed_chain_consume(&rxq->rx_bd_ring);
napi);
struct qede_dev *edev = fp->edev;
int rx_work_done = 0;
- u8 tc;
- for (tc = 0; tc < edev->num_tc; tc++)
- if (likely(fp->type & QEDE_FASTPATH_TX) &&
- qede_txq_has_work(&fp->txqs[tc]))
- qede_tx_int(edev, &fp->txqs[tc]);
+ if (likely(fp->type & QEDE_FASTPATH_TX) && qede_txq_has_work(fp->txq))
+ qede_tx_int(edev, fp->txq);
rx_work_done = (likely(fp->type & QEDE_FASTPATH_RX) &&
qede_has_rx_work(fp->rxq)) ?
if (!((likely(fp->type & QEDE_FASTPATH_RX) &&
qede_has_rx_work(fp->rxq)) ||
(likely(fp->type & QEDE_FASTPATH_TX) &&
- qede_has_tx_work(fp)))) {
+ qede_txq_has_work(fp->txq)))) {
napi_complete(napi);
/* Update and reenable interrupts */
memset(&edev->stats, 0, sizeof(edev->stats));
memcpy(&edev->dev_info, info, sizeof(*info));
- edev->num_tc = edev->dev_info.num_tc;
-
INIT_LIST_HEAD(&edev->vlan_list);
return edev;
kfree(fp->sb_info);
kfree(fp->rxq);
- kfree(fp->txqs);
+ kfree(fp->txq);
}
kfree(edev->fp_array);
}
for_each_queue(i) {
fp = &edev->fp_array[i];
- fp->sb_info = kcalloc(1, sizeof(*fp->sb_info), GFP_KERNEL);
+ fp->sb_info = kzalloc(sizeof(*fp->sb_info), GFP_KERNEL);
if (!fp->sb_info) {
DP_NOTICE(edev, "sb info struct allocation failed\n");
goto err;
}
if (fp->type & QEDE_FASTPATH_TX) {
- fp->txqs = kcalloc(edev->num_tc, sizeof(*fp->txqs),
- GFP_KERNEL);
- if (!fp->txqs) {
- DP_NOTICE(edev,
- "TXQ array allocation failed\n");
+ fp->txq = kzalloc(sizeof(*fp->txq), GFP_KERNEL);
+ if (!fp->txq)
goto err;
- }
}
if (fp->type & QEDE_FASTPATH_RX) {
- fp->rxq = kcalloc(1, sizeof(*fp->rxq), GFP_KERNEL);
- if (!fp->rxq) {
- DP_NOTICE(edev,
- "RXQ struct allocation failed\n");
+ fp->rxq = kzalloc(sizeof(*fp->rxq), GFP_KERNEL);
+ if (!fp->rxq)
goto err;
- }
}
}
/* This function frees all memory of a single fp */
static void qede_free_mem_fp(struct qede_dev *edev, struct qede_fastpath *fp)
{
- int tc;
-
qede_free_mem_sb(edev, fp->sb_info);
if (fp->type & QEDE_FASTPATH_RX)
qede_free_mem_rxq(edev, fp->rxq);
if (fp->type & QEDE_FASTPATH_TX)
- for (tc = 0; tc < edev->num_tc; tc++)
- qede_free_mem_txq(edev, &fp->txqs[tc]);
+ qede_free_mem_txq(edev, fp->txq);
}
/* This function allocates all memory needed for a single fp (i.e. an entity
*/
static int qede_alloc_mem_fp(struct qede_dev *edev, struct qede_fastpath *fp)
{
- int rc, tc;
+ int rc;
rc = qede_alloc_mem_sb(edev, fp->sb_info, fp->id);
if (rc)
}
if (fp->type & QEDE_FASTPATH_TX) {
- for (tc = 0; tc < edev->num_tc; tc++) {
- rc = qede_alloc_mem_txq(edev, &fp->txqs[tc]);
- if (rc)
- goto err;
- }
+ rc = qede_alloc_mem_txq(edev, fp->txq);
+ if (rc)
+ goto err;
}
return 0;
/* This function inits fp content and resets the SB, RXQ and TXQ structures */
static void qede_init_fp(struct qede_dev *edev)
{
- int queue_id, rxq_index = 0, txq_index = 0, tc;
+ int queue_id, rxq_index = 0, txq_index = 0;
struct qede_fastpath *fp;
for_each_queue(queue_id) {
fp->edev = edev;
fp->id = queue_id;
- memset((void *)&fp->napi, 0, sizeof(fp->napi));
-
- memset((void *)fp->sb_info, 0, sizeof(*fp->sb_info));
if (fp->type & QEDE_FASTPATH_RX) {
- memset((void *)fp->rxq, 0, sizeof(*fp->rxq));
fp->rxq->rxq_id = rxq_index++;
}
if (fp->type & QEDE_FASTPATH_TX) {
- memset((void *)fp->txqs, 0,
- (edev->num_tc * sizeof(*fp->txqs)));
- for (tc = 0; tc < edev->num_tc; tc++) {
- fp->txqs[tc].index = txq_index +
- tc * QEDE_TSS_COUNT(edev);
- if (edev->dev_info.is_legacy)
- fp->txqs[tc].is_legacy = true;
- }
- txq_index++;
+ fp->txq->index = txq_index++;
+ if (edev->dev_info.is_legacy)
+ fp->txq->is_legacy = 1;
}
snprintf(fp->name, sizeof(fp->name), "%s-fp-%d",
{
struct qed_update_vport_params vport_update_params;
struct qed_dev *cdev = edev->cdev;
- int rc, tc, i;
+ struct qede_fastpath *fp;
+ int rc, i;
/* Disable the vport */
memset(&vport_update_params, 0, sizeof(vport_update_params));
/* Flush Tx queues. If needed, request drain from MCP */
for_each_queue(i) {
- struct qede_fastpath *fp = &edev->fp_array[i];
+ fp = &edev->fp_array[i];
if (fp->type & QEDE_FASTPATH_TX) {
- for (tc = 0; tc < edev->num_tc; tc++) {
- struct qede_tx_queue *txq = &fp->txqs[tc];
-
- rc = qede_drain_txq(edev, txq, true);
- if (rc)
- return rc;
- }
+ rc = qede_drain_txq(edev, fp->txq, true);
+ if (rc)
+ return rc;
}
}
for (i = QEDE_QUEUE_CNT(edev) - 1; i >= 0; i--) {
struct qed_stop_rxq_params rx_params;
+ fp = &edev->fp_array[i];
+
/* Stop the Tx Queue(s) */
- if (edev->fp_array[i].type & QEDE_FASTPATH_TX) {
- for (tc = 0; tc < edev->num_tc; tc++) {
- struct qed_stop_txq_params tx_params;
- u8 val;
-
- tx_params.rss_id = i;
- val = edev->fp_array[i].txqs[tc].index;
- tx_params.tx_queue_id = val;
+ if (fp->type & QEDE_FASTPATH_TX) {
+ struct qed_stop_txq_params tx_params;
+
+ tx_params.rss_id = i;
+ tx_params.tx_queue_id = fp->txq->index;
rc = edev->ops->q_tx_stop(cdev, &tx_params);
- if (rc) {
- DP_ERR(edev, "Failed to stop TXQ #%d\n",
- tx_params.tx_queue_id);
+ if (rc)
return rc;
- }
- }
}
/* Stop the Rx Queue */
- if (edev->fp_array[i].type & QEDE_FASTPATH_RX) {
+ if (fp->type & QEDE_FASTPATH_RX) {
memset(&rx_params, 0, sizeof(rx_params));
rx_params.rss_id = i;
- rx_params.rx_queue_id = edev->fp_array[i].rxq->rxq_id;
+ rx_params.rx_queue_id = fp->rxq->rxq_id;
rc = edev->ops->q_rx_stop(cdev, &rx_params);
if (rc) {
static int qede_start_queues(struct qede_dev *edev, bool clear_stats)
{
- int rc, tc, i;
int vlan_removal_en = 1;
struct qed_dev *cdev = edev->cdev;
struct qed_update_vport_params vport_update_params;
struct qed_dev_info *qed_info = &edev->dev_info.common;
struct qed_start_vport_params start = {0};
bool reset_rss_indir = false;
+ int rc, i;
if (!edev->num_queues) {
DP_ERR(edev,
qede_update_rx_prod(edev, rxq);
}
- if (!(fp->type & QEDE_FASTPATH_TX))
- continue;
-
- for (tc = 0; tc < edev->num_tc; tc++) {
- struct qede_tx_queue *txq = &fp->txqs[tc];
+ if (fp->type & QEDE_FASTPATH_TX) {
+ struct qede_tx_queue *txq = fp->txq;
p_phys_table = qed_chain_get_pbl_phys(&txq->tx_pbl);
page_cnt = qed_chain_get_page_cnt(&txq->tx_pbl);
q_params.queue_id = txq->index;
q_params.vport_id = 0;
q_params.sb = fp->sb_info->igu_sb_id;
- q_params.sb_idx = TX_PI(tc);
+ q_params.sb_idx = TX_PI(0);
rc = edev->ops->q_tx_start(cdev, &q_params,
p_phys_table, page_cnt,
}
txq->hw_cons_ptr =
- &fp->sb_info->sb_virt->pi_array[TX_PI(tc)];
+ &fp->sb_info->sb_virt->pi_array[TX_PI(0)];
SET_FIELD(txq->tx_db.data.params,
ETH_DB_DATA_DEST, DB_DEST_XCM);
SET_FIELD(txq->tx_db.data.params, ETH_DB_DATA_AGG_CMD,
rc = qede_alloc_mem_load(edev);
if (rc)
goto err1;
- DP_INFO(edev, "Allocated %d RSS queues on %d TC/s\n",
- QEDE_QUEUE_CNT(edev), edev->num_tc);
+ DP_INFO(edev, "Allocated %d Rx, %d Tx queues\n",
+ QEDE_RSS_COUNT(edev), QEDE_TSS_COUNT(edev));
rc = qede_set_real_num_queues(edev);
if (rc)