2 * Copyright (c) 2015, Mellanox Technologies. All rights reserved.
4 * This software is available to you under a choice of one of two
5 * licenses. You may choose to be licensed under the terms of the GNU
6 * General Public License (GPL) Version 2, available from the file
7 * COPYING in the main directory of this source tree, or the
8 * OpenIB.org BSD license below:
10 * Redistribution and use in source and binary forms, with or
11 * without modification, are permitted provided that the following
14 * - Redistributions of source code must retain the above
15 * copyright notice, this list of conditions and the following
18 * - Redistributions in binary form must reproduce the above
19 * copyright notice, this list of conditions and the following
20 * disclaimer in the documentation and/or other materials
21 * provided with the distribution.
23 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
24 * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
25 * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
26 * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
27 * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
28 * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
29 * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
33 #include <linux/tcp.h>
34 #include <linux/if_vlan.h>
37 #define MLX5E_SQ_NOPS_ROOM MLX5_SEND_WQE_MAX_WQEBBS
38 #define MLX5E_SQ_STOP_ROOM (MLX5_SEND_WQE_MAX_WQEBBS +\
41 void mlx5e_send_nop(struct mlx5e_sq
*sq
, bool notify_hw
)
43 struct mlx5_wq_cyc
*wq
= &sq
->wq
;
45 u16 pi
= sq
->pc
& wq
->sz_m1
;
46 struct mlx5e_tx_wqe
*wqe
= mlx5_wq_cyc_get_wqe(wq
, pi
);
48 struct mlx5_wqe_ctrl_seg
*cseg
= &wqe
->ctrl
;
50 memset(cseg
, 0, sizeof(*cseg
));
52 cseg
->opmod_idx_opcode
= cpu_to_be32((sq
->pc
<< 8) | MLX5_OPCODE_NOP
);
53 cseg
->qpn_ds
= cpu_to_be32((sq
->sqn
<< 8) | 0x01);
59 cseg
->fm_ce_se
= MLX5_WQE_CTRL_CQ_UPDATE
;
60 mlx5e_tx_notify_hw(sq
, wqe
, 0);
64 static void mlx5e_dma_pop_last_pushed(struct mlx5e_sq
*sq
, dma_addr_t
*addr
,
68 *addr
= sq
->dma_fifo
[sq
->dma_fifo_pc
& sq
->dma_fifo_mask
].addr
;
69 *size
= sq
->dma_fifo
[sq
->dma_fifo_pc
& sq
->dma_fifo_mask
].size
;
72 static void mlx5e_dma_unmap_wqe_err(struct mlx5e_sq
*sq
, struct sk_buff
*skb
)
78 for (i
= 0; i
< MLX5E_TX_SKB_CB(skb
)->num_dma
; i
++) {
79 mlx5e_dma_pop_last_pushed(sq
, &addr
, &size
);
80 dma_unmap_single(sq
->pdev
, addr
, size
, DMA_TO_DEVICE
);
84 static inline void mlx5e_dma_push(struct mlx5e_sq
*sq
, dma_addr_t addr
,
87 sq
->dma_fifo
[sq
->dma_fifo_pc
& sq
->dma_fifo_mask
].addr
= addr
;
88 sq
->dma_fifo
[sq
->dma_fifo_pc
& sq
->dma_fifo_mask
].size
= size
;
92 static inline void mlx5e_dma_get(struct mlx5e_sq
*sq
, u32 i
, dma_addr_t
*addr
,
95 *addr
= sq
->dma_fifo
[i
& sq
->dma_fifo_mask
].addr
;
96 *size
= sq
->dma_fifo
[i
& sq
->dma_fifo_mask
].size
;
99 u16
mlx5e_select_queue(struct net_device
*dev
, struct sk_buff
*skb
,
100 void *accel_priv
, select_queue_fallback_t fallback
)
102 struct mlx5e_priv
*priv
= netdev_priv(dev
);
103 int channel_ix
= fallback(dev
, skb
);
104 int up
= skb_vlan_tag_present(skb
) ?
105 skb
->vlan_tci
>> VLAN_PRIO_SHIFT
:
106 priv
->default_vlan_prio
;
107 int tc
= netdev_get_prio_tc_map(dev
, up
);
109 return priv
->channeltc_to_txq_map
[channel_ix
][tc
];
112 static inline u16
mlx5e_get_inline_hdr_size(struct mlx5e_sq
*sq
,
113 struct sk_buff
*skb
, bool bf
)
115 /* Some NIC TX decisions, e.g loopback, are based on the packet
116 * headers and occur before the data gather.
117 * Therefore these headers must be copied into the WQE
119 #define MLX5E_MIN_INLINE ETH_HLEN
121 if (bf
&& (skb_headlen(skb
) <= sq
->max_inline
))
122 return skb_headlen(skb
);
124 return MLX5E_MIN_INLINE
;
127 static inline void mlx5e_insert_vlan(void *start
, struct sk_buff
*skb
, u16 ihs
)
129 struct vlan_ethhdr
*vhdr
= (struct vlan_ethhdr
*)start
;
130 int cpy1_sz
= 2 * ETH_ALEN
;
131 int cpy2_sz
= ihs
- cpy1_sz
;
133 skb_copy_from_linear_data(skb
, vhdr
, cpy1_sz
);
134 skb_pull_inline(skb
, cpy1_sz
);
135 vhdr
->h_vlan_proto
= skb
->vlan_proto
;
136 vhdr
->h_vlan_TCI
= cpu_to_be16(skb_vlan_tag_get(skb
));
137 skb_copy_from_linear_data(skb
, &vhdr
->h_vlan_encapsulated_proto
,
139 skb_pull_inline(skb
, cpy2_sz
);
142 static netdev_tx_t
mlx5e_sq_xmit(struct mlx5e_sq
*sq
, struct sk_buff
*skb
)
144 struct mlx5_wq_cyc
*wq
= &sq
->wq
;
146 u16 pi
= sq
->pc
& wq
->sz_m1
;
147 struct mlx5e_tx_wqe
*wqe
= mlx5_wq_cyc_get_wqe(wq
, pi
);
149 struct mlx5_wqe_ctrl_seg
*cseg
= &wqe
->ctrl
;
150 struct mlx5_wqe_eth_seg
*eseg
= &wqe
->eth
;
151 struct mlx5_wqe_data_seg
*dseg
;
153 u8 opcode
= MLX5_OPCODE_SEND
;
154 dma_addr_t dma_addr
= 0;
161 memset(wqe
, 0, sizeof(*wqe
));
163 if (likely(skb
->ip_summed
== CHECKSUM_PARTIAL
))
164 eseg
->cs_flags
= MLX5_ETH_WQE_L3_CSUM
| MLX5_ETH_WQE_L4_CSUM
;
166 sq
->stats
.csum_offload_none
++;
168 if (sq
->cc
!= sq
->prev_cc
) {
169 sq
->prev_cc
= sq
->cc
;
170 sq
->bf_budget
= (sq
->cc
== sq
->pc
) ? MLX5E_SQ_BF_BUDGET
: 0;
173 if (skb_is_gso(skb
)) {
176 eseg
->mss
= cpu_to_be16(skb_shinfo(skb
)->gso_size
);
177 opcode
= MLX5_OPCODE_LSO
;
178 ihs
= skb_transport_offset(skb
) + tcp_hdrlen(skb
);
179 payload_len
= skb
->len
- ihs
;
180 MLX5E_TX_SKB_CB(skb
)->num_bytes
= skb
->len
+
181 (skb_shinfo(skb
)->gso_segs
- 1) * ihs
;
182 sq
->stats
.tso_packets
++;
183 sq
->stats
.tso_bytes
+= payload_len
;
185 bf
= sq
->bf_budget
&&
187 !skb_shinfo(skb
)->nr_frags
;
188 ihs
= mlx5e_get_inline_hdr_size(sq
, skb
, bf
);
189 MLX5E_TX_SKB_CB(skb
)->num_bytes
= max_t(unsigned int, skb
->len
,
193 if (skb_vlan_tag_present(skb
)) {
194 mlx5e_insert_vlan(eseg
->inline_hdr_start
, skb
, ihs
);
197 skb_copy_from_linear_data(skb
, eseg
->inline_hdr_start
, ihs
);
198 skb_pull_inline(skb
, ihs
);
201 eseg
->inline_hdr_sz
= cpu_to_be16(ihs
);
203 ds_cnt
= sizeof(*wqe
) / MLX5_SEND_WQE_DS
;
204 ds_cnt
+= DIV_ROUND_UP(ihs
- sizeof(eseg
->inline_hdr_start
),
206 dseg
= (struct mlx5_wqe_data_seg
*)cseg
+ ds_cnt
;
208 MLX5E_TX_SKB_CB(skb
)->num_dma
= 0;
210 headlen
= skb_headlen(skb
);
212 dma_addr
= dma_map_single(sq
->pdev
, skb
->data
, headlen
,
214 if (unlikely(dma_mapping_error(sq
->pdev
, dma_addr
)))
215 goto dma_unmap_wqe_err
;
217 dseg
->addr
= cpu_to_be64(dma_addr
);
218 dseg
->lkey
= sq
->mkey_be
;
219 dseg
->byte_count
= cpu_to_be32(headlen
);
221 mlx5e_dma_push(sq
, dma_addr
, headlen
);
222 MLX5E_TX_SKB_CB(skb
)->num_dma
++;
227 for (i
= 0; i
< skb_shinfo(skb
)->nr_frags
; i
++) {
228 struct skb_frag_struct
*frag
= &skb_shinfo(skb
)->frags
[i
];
229 int fsz
= skb_frag_size(frag
);
231 dma_addr
= skb_frag_dma_map(sq
->pdev
, frag
, 0, fsz
,
233 if (unlikely(dma_mapping_error(sq
->pdev
, dma_addr
)))
234 goto dma_unmap_wqe_err
;
236 dseg
->addr
= cpu_to_be64(dma_addr
);
237 dseg
->lkey
= sq
->mkey_be
;
238 dseg
->byte_count
= cpu_to_be32(fsz
);
240 mlx5e_dma_push(sq
, dma_addr
, fsz
);
241 MLX5E_TX_SKB_CB(skb
)->num_dma
++;
246 ds_cnt
+= MLX5E_TX_SKB_CB(skb
)->num_dma
;
248 cseg
->opmod_idx_opcode
= cpu_to_be32((sq
->pc
<< 8) | opcode
);
249 cseg
->qpn_ds
= cpu_to_be32((sq
->sqn
<< 8) | ds_cnt
);
253 MLX5E_TX_SKB_CB(skb
)->num_wqebbs
= DIV_ROUND_UP(ds_cnt
,
254 MLX5_SEND_WQEBB_NUM_DS
);
255 sq
->pc
+= MLX5E_TX_SKB_CB(skb
)->num_wqebbs
;
257 netdev_tx_sent_queue(sq
->txq
, MLX5E_TX_SKB_CB(skb
)->num_bytes
);
259 if (unlikely(!mlx5e_sq_has_room_for(sq
, MLX5E_SQ_STOP_ROOM
))) {
260 netif_tx_stop_queue(sq
->txq
);
264 if (!skb
->xmit_more
|| netif_xmit_stopped(sq
->txq
)) {
267 if (bf
&& sq
->uar_bf_map
)
268 bf_sz
= MLX5E_TX_SKB_CB(skb
)->num_wqebbs
<< 3;
270 cseg
->fm_ce_se
= MLX5_WQE_CTRL_CQ_UPDATE
;
271 mlx5e_tx_notify_hw(sq
, wqe
, bf_sz
);
274 /* fill sq edge with nops to avoid wqe wrap around */
275 while ((sq
->pc
& wq
->sz_m1
) > sq
->edge
)
276 mlx5e_send_nop(sq
, false);
278 sq
->bf_budget
= bf
? sq
->bf_budget
- 1 : 0;
285 mlx5e_dma_unmap_wqe_err(sq
, skb
);
287 dev_kfree_skb_any(skb
);
292 netdev_tx_t
mlx5e_xmit(struct sk_buff
*skb
, struct net_device
*dev
)
294 struct mlx5e_priv
*priv
= netdev_priv(dev
);
295 struct mlx5e_sq
*sq
= priv
->txq_to_sq_map
[skb_get_queue_mapping(skb
)];
297 return mlx5e_sq_xmit(sq
, skb
);
300 bool mlx5e_poll_tx_cq(struct mlx5e_cq
*cq
)
309 /* avoid accessing cq (dma coherent memory) if not needed */
310 if (!test_and_clear_bit(MLX5E_CQ_HAS_CQES
, &cq
->flags
))
313 sq
= container_of(cq
, struct mlx5e_sq
, cq
);
318 /* sq->cc must be updated only after mlx5_cqwq_update_db_record(),
319 * otherwise a cq overrun may occur
323 /* avoid dirtying sq cache line every cqe */
324 dma_fifo_cc
= sq
->dma_fifo_cc
;
326 for (i
= 0; i
< MLX5E_TX_CQ_POLL_BUDGET
; i
++) {
327 struct mlx5_cqe64
*cqe
;
331 cqe
= mlx5e_get_cqe(cq
);
335 mlx5_cqwq_pop(&cq
->wq
);
337 wqe_counter
= be16_to_cpu(cqe
->wqe_counter
);
344 last_wqe
= (sqcc
== wqe_counter
);
346 ci
= sqcc
& sq
->wq
.sz_m1
;
349 if (unlikely(!skb
)) { /* nop */
355 for (j
= 0; j
< MLX5E_TX_SKB_CB(skb
)->num_dma
; j
++) {
359 mlx5e_dma_get(sq
, dma_fifo_cc
, &addr
, &size
);
361 dma_unmap_single(sq
->pdev
, addr
, size
,
366 nbytes
+= MLX5E_TX_SKB_CB(skb
)->num_bytes
;
367 sqcc
+= MLX5E_TX_SKB_CB(skb
)->num_wqebbs
;
372 mlx5_cqwq_update_db_record(&cq
->wq
);
374 /* ensure cq space is freed before enabling more cqes */
377 sq
->dma_fifo_cc
= dma_fifo_cc
;
380 netdev_tx_completed_queue(sq
->txq
, npkts
, nbytes
);
382 if (netif_tx_queue_stopped(sq
->txq
) &&
383 mlx5e_sq_has_room_for(sq
, MLX5E_SQ_STOP_ROOM
) &&
384 likely(test_bit(MLX5E_SQ_STATE_WAKE_TXQ_ENABLE
, &sq
->state
))) {
385 netif_tx_wake_queue(sq
->txq
);
388 if (i
== MLX5E_TX_CQ_POLL_BUDGET
) {
389 set_bit(MLX5E_CQ_HAS_CQES
, &cq
->flags
);