if (err)
goto err_destroy_rq;
+ set_bit(MLX5E_RQ_STATE_ENABLED, &rq->state);
err = mlx5e_modify_rq_state(rq, MLX5_RQC_STATE_RST, MLX5_RQC_STATE_RDY);
if (err)
goto err_disable_rq;
return 0;
err_disable_rq:
+ clear_bit(MLX5E_RQ_STATE_ENABLED, &rq->state);
mlx5e_disable_rq(rq);
err_destroy_rq:
mlx5e_destroy_rq(rq);
static void mlx5e_close_rq(struct mlx5e_rq *rq)
{
- set_bit(MLX5E_RQ_STATE_FLUSH, &rq->state);
+ clear_bit(MLX5E_RQ_STATE_ENABLED, &rq->state);
napi_synchronize(&rq->channel->napi); /* prevent mlx5e_post_rx_wqes */
cancel_work_sync(&rq->am.work);
if (err)
goto err_destroy_sq;
+ set_bit(MLX5E_SQ_STATE_ENABLED, &sq->state);
err = mlx5e_modify_sq(sq, MLX5_SQC_STATE_RST, MLX5_SQC_STATE_RDY,
false, 0);
if (err)
return 0;
err_disable_sq:
+ clear_bit(MLX5E_SQ_STATE_ENABLED, &sq->state);
mlx5e_disable_sq(sq);
err_destroy_sq:
mlx5e_destroy_sq(sq);
static void mlx5e_close_sq(struct mlx5e_sq *sq)
{
- set_bit(MLX5E_SQ_STATE_FLUSH, &sq->state);
+ clear_bit(MLX5E_SQ_STATE_ENABLED, &sq->state);
/* prevent netif_tx_wake_queue */
napi_synchronize(&sq->channel->napi);
if (!netif_xmit_stopped(netdev_get_tx_queue(dev, i)))
continue;
sched_work = true;
- set_bit(MLX5E_SQ_STATE_FLUSH, &sq->state);
+ clear_bit(MLX5E_SQ_STATE_ENABLED, &sq->state);
netdev_err(dev, "TX timeout on queue: %d, SQ: 0x%x, CQ: 0x%x, SQ Cons: 0x%x SQ Prod: 0x%x\n",
i, sq->sqn, sq->cq.mcq.cqn, sq->cc, sq->pc);
}
for (i = 0; i < priv->params.num_channels; i++) {
struct mlx5e_channel *c = priv->channel[i];
- set_bit(MLX5E_RQ_STATE_FLUSH, &c->rq.state);
+ clear_bit(MLX5E_RQ_STATE_ENABLED, &c->rq.state);
napi_synchronize(&c->napi);
/* prevent mlx5e_poll_rx_cq from accessing rq->xdp_prog */
old_prog = xchg(&c->rq.xdp_prog, prog);
- clear_bit(MLX5E_RQ_STATE_FLUSH, &c->rq.state);
+ set_bit(MLX5E_RQ_STATE_ENABLED, &c->rq.state);
/* napi_schedule in case we have missed anything */
set_bit(MLX5E_CHANNEL_NAPI_SCHED, &c->flags);
napi_schedule(&c->napi);
clear_bit(MLX5E_RQ_STATE_UMR_WQE_IN_PROGRESS, &rq->state);
- if (unlikely(test_bit(MLX5E_RQ_STATE_FLUSH, &rq->state))) {
+ if (unlikely(!test_bit(MLX5E_RQ_STATE_ENABLED, &rq->state))) {
mlx5e_free_rx_mpwqe(rq, &rq->mpwqe.info[wq->head]);
return;
}
}
#define RQ_CANNOT_POST(rq) \
- (test_bit(MLX5E_RQ_STATE_FLUSH, &rq->state) || \
+ (!test_bit(MLX5E_RQ_STATE_ENABLED, &rq->state) || \
test_bit(MLX5E_RQ_STATE_UMR_WQE_IN_PROGRESS, &rq->state))
bool mlx5e_post_rx_wqes(struct mlx5e_rq *rq)
struct mlx5e_sq *xdp_sq = &rq->channel->xdp_sq;
int work_done = 0;
- if (unlikely(test_bit(MLX5E_RQ_STATE_FLUSH, &rq->state)))
+ if (unlikely(!test_bit(MLX5E_RQ_STATE_ENABLED, &rq->state)))
return 0;
if (cq->decmprs_left)
struct mlx5_cqe64 *cqe;
u16 sqcc;
- if (unlikely(test_bit(MLX5E_SQ_STATE_FLUSH, &sq->state)))
+ if (unlikely(!test_bit(MLX5E_SQ_STATE_ENABLED, &sq->state)))
return;
cqe = mlx5e_get_cqe(cq);
sq = container_of(cq, struct mlx5e_sq, cq);
- if (unlikely(test_bit(MLX5E_SQ_STATE_FLUSH, &sq->state)))
+ if (unlikely(!test_bit(MLX5E_SQ_STATE_ENABLED, &sq->state)))
return false;
/* sq->cc must be updated only after mlx5_cqwq_update_db_record(),