]>
git.proxmox.com Git - mirror_ubuntu-bionic-kernel.git/blob - drivers/net/ethernet/mellanox/mlx5/core/en_txrx.c
2 * Copyright (c) 2015, Mellanox Technologies. All rights reserved.
4 * This software is available to you under a choice of one of two
5 * licenses. You may choose to be licensed under the terms of the GNU
6 * General Public License (GPL) Version 2, available from the file
7 * COPYING in the main directory of this source tree, or the
8 * OpenIB.org BSD license below:
10 * Redistribution and use in source and binary forms, with or
11 * without modification, are permitted provided that the following
14 * - Redistributions of source code must retain the above
15 * copyright notice, this list of conditions and the following
18 * - Redistributions in binary form must reproduce the above
19 * copyright notice, this list of conditions and the following
20 * disclaimer in the documentation and/or other materials
21 * provided with the distribution.
23 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
24 * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
25 * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
26 * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
27 * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
28 * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
29 * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
35 struct mlx5_cqe64
*mlx5e_get_cqe(struct mlx5e_cq
*cq
)
37 struct mlx5_cqwq
*wq
= &cq
->wq
;
38 u32 ci
= mlx5_cqwq_get_ci(wq
);
39 struct mlx5_cqe64
*cqe
= mlx5_cqwq_get_wqe(wq
, ci
);
40 int cqe_ownership_bit
= cqe
->op_own
& MLX5_CQE_OWNER_MASK
;
41 int sw_ownership_val
= mlx5_cqwq_get_wrap_cnt(wq
) & 1;
43 if (cqe_ownership_bit
!= sw_ownership_val
)
46 /* ensure cqe content is read after cqe ownership bit */
52 static void mlx5e_poll_ico_cq(struct mlx5e_cq
*cq
)
54 struct mlx5e_sq
*sq
= container_of(cq
, struct mlx5e_sq
, cq
);
55 struct mlx5_wq_cyc
*wq
;
56 struct mlx5_cqe64
*cqe
;
59 if (unlikely(!test_bit(MLX5E_SQ_STATE_ENABLED
, &sq
->state
)))
62 cqe
= mlx5e_get_cqe(cq
);
68 /* sq->cc must be updated only after mlx5_cqwq_update_db_record(),
69 * otherwise a cq overrun may occur
74 u16 ci
= be16_to_cpu(cqe
->wqe_counter
) & wq
->sz_m1
;
75 struct mlx5e_sq_wqe_info
*icowi
= &sq
->db
.ico_wqe
[ci
];
77 mlx5_cqwq_pop(&cq
->wq
);
78 sqcc
+= icowi
->num_wqebbs
;
80 if (unlikely((cqe
->op_own
>> 4) != MLX5_CQE_REQ
)) {
81 WARN_ONCE(true, "mlx5e: Bad OP in ICOSQ CQE: 0x%x\n",
86 switch (icowi
->opcode
) {
90 mlx5e_post_rx_mpwqe(&sq
->channel
->rq
);
94 "mlx5e: Bad OPCODE in ICOSQ WQE info: 0x%x\n",
98 } while ((cqe
= mlx5e_get_cqe(cq
)));
100 mlx5_cqwq_update_db_record(&cq
->wq
);
102 /* ensure cq space is freed before enabling more cqes */
108 static inline bool mlx5e_poll_xdp_tx_cq(struct mlx5e_cq
*cq
)
114 sq
= container_of(cq
, struct mlx5e_sq
, cq
);
116 if (unlikely(!test_bit(MLX5E_SQ_STATE_ENABLED
, &sq
->state
)))
119 /* sq->cc must be updated only after mlx5_cqwq_update_db_record(),
120 * otherwise a cq overrun may occur
124 for (i
= 0; i
< MLX5E_TX_CQ_POLL_BUDGET
; i
++) {
125 struct mlx5_cqe64
*cqe
;
129 cqe
= mlx5e_get_cqe(cq
);
133 mlx5_cqwq_pop(&cq
->wq
);
135 wqe_counter
= be16_to_cpu(cqe
->wqe_counter
);
138 struct mlx5e_sq_wqe_info
*wi
;
139 struct mlx5e_dma_info
*di
;
142 last_wqe
= (sqcc
== wqe_counter
);
144 ci
= sqcc
& sq
->wq
.sz_m1
;
145 di
= &sq
->db
.xdp
.di
[ci
];
146 wi
= &sq
->db
.xdp
.wqe_info
[ci
];
148 if (unlikely(wi
->opcode
== MLX5_OPCODE_NOP
)) {
153 sqcc
+= wi
->num_wqebbs
;
154 /* Recycle RX page */
155 mlx5e_page_release(&sq
->channel
->rq
, di
, true);
159 mlx5_cqwq_update_db_record(&cq
->wq
);
161 /* ensure cq space is freed before enabling more cqes */
165 return (i
== MLX5E_TX_CQ_POLL_BUDGET
);
168 int mlx5e_napi_poll(struct napi_struct
*napi
, int budget
)
170 struct mlx5e_channel
*c
= container_of(napi
, struct mlx5e_channel
,
176 clear_bit(MLX5E_CHANNEL_NAPI_SCHED
, &c
->flags
);
178 for (i
= 0; i
< c
->num_tc
; i
++)
179 busy
|= mlx5e_poll_tx_cq(&c
->sq
[i
].cq
, budget
);
181 work_done
= mlx5e_poll_rx_cq(&c
->rq
.cq
, budget
);
182 busy
|= work_done
== budget
;
185 busy
|= mlx5e_poll_xdp_tx_cq(&c
->xdp_sq
.cq
);
187 mlx5e_poll_ico_cq(&c
->icosq
.cq
);
189 busy
|= mlx5e_post_rx_wqes(&c
->rq
);
194 napi_complete_done(napi
, work_done
);
196 /* avoid losing completion event during/after polling cqs */
197 if (test_bit(MLX5E_CHANNEL_NAPI_SCHED
, &c
->flags
)) {
202 for (i
= 0; i
< c
->num_tc
; i
++)
203 mlx5e_cq_arm(&c
->sq
[i
].cq
);
205 if (test_bit(MLX5E_RQ_STATE_AM
, &c
->rq
.state
))
208 mlx5e_cq_arm(&c
->rq
.cq
);
209 mlx5e_cq_arm(&c
->icosq
.cq
);
214 void mlx5e_completion_event(struct mlx5_core_cq
*mcq
)
216 struct mlx5e_cq
*cq
= container_of(mcq
, struct mlx5e_cq
, mcq
);
219 set_bit(MLX5E_CHANNEL_NAPI_SCHED
, &cq
->channel
->flags
);
220 napi_schedule(cq
->napi
);
223 void mlx5e_cq_error_event(struct mlx5_core_cq
*mcq
, enum mlx5_event event
)
225 struct mlx5e_cq
*cq
= container_of(mcq
, struct mlx5e_cq
, mcq
);
226 struct mlx5e_channel
*c
= cq
->channel
;
227 struct mlx5e_priv
*priv
= c
->priv
;
228 struct net_device
*netdev
= priv
->netdev
;
230 netdev_err(netdev
, "%s: cqn=0x%.6x event=0x%.2x\n",
231 __func__
, mcq
->cqn
, event
);