2 * Copyright (c) 2015-2016, Mellanox Technologies. All rights reserved.
4 * This software is available to you under a choice of one of two
5 * licenses. You may choose to be licensed under the terms of the GNU
6 * General Public License (GPL) Version 2, available from the file
7 * COPYING in the main directory of this source tree, or the
8 * OpenIB.org BSD license below:
10 * Redistribution and use in source and binary forms, with or
11 * without modification, are permitted provided that the following
14 * - Redistributions of source code must retain the above
15 * copyright notice, this list of conditions and the following
18 * - Redistributions in binary form must reproduce the above
19 * copyright notice, this list of conditions and the following
20 * disclaimer in the documentation and/or other materials
21 * provided with the distribution.
23 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
24 * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
25 * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
26 * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
27 * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
28 * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
29 * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
33 #include <linux/tcp.h>
34 #include <linux/if_vlan.h>
37 #define MLX5E_SQ_NOPS_ROOM MLX5_SEND_WQE_MAX_WQEBBS
38 #define MLX5E_SQ_STOP_ROOM (MLX5_SEND_WQE_MAX_WQEBBS +\
41 void mlx5e_send_nop(struct mlx5e_sq
*sq
, bool notify_hw
)
43 struct mlx5_wq_cyc
*wq
= &sq
->wq
;
45 u16 pi
= sq
->pc
& wq
->sz_m1
;
46 struct mlx5e_tx_wqe
*wqe
= mlx5_wq_cyc_get_wqe(wq
, pi
);
48 struct mlx5_wqe_ctrl_seg
*cseg
= &wqe
->ctrl
;
50 memset(cseg
, 0, sizeof(*cseg
));
52 cseg
->opmod_idx_opcode
= cpu_to_be32((sq
->pc
<< 8) | MLX5_OPCODE_NOP
);
53 cseg
->qpn_ds
= cpu_to_be32((sq
->sqn
<< 8) | 0x01);
60 cseg
->fm_ce_se
= MLX5_WQE_CTRL_CQ_UPDATE
;
61 mlx5e_tx_notify_hw(sq
, &wqe
->ctrl
, 0);
65 static inline void mlx5e_tx_dma_unmap(struct device
*pdev
,
66 struct mlx5e_sq_dma
*dma
)
69 case MLX5E_DMA_MAP_SINGLE
:
70 dma_unmap_single(pdev
, dma
->addr
, dma
->size
, DMA_TO_DEVICE
);
72 case MLX5E_DMA_MAP_PAGE
:
73 dma_unmap_page(pdev
, dma
->addr
, dma
->size
, DMA_TO_DEVICE
);
76 WARN_ONCE(true, "mlx5e_tx_dma_unmap unknown DMA type!\n");
80 static inline void mlx5e_dma_push(struct mlx5e_sq
*sq
,
83 enum mlx5e_dma_map_type map_type
)
85 sq
->dma_fifo
[sq
->dma_fifo_pc
& sq
->dma_fifo_mask
].addr
= addr
;
86 sq
->dma_fifo
[sq
->dma_fifo_pc
& sq
->dma_fifo_mask
].size
= size
;
87 sq
->dma_fifo
[sq
->dma_fifo_pc
& sq
->dma_fifo_mask
].type
= map_type
;
91 static inline struct mlx5e_sq_dma
*mlx5e_dma_get(struct mlx5e_sq
*sq
, u32 i
)
93 return &sq
->dma_fifo
[i
& sq
->dma_fifo_mask
];
96 static void mlx5e_dma_unmap_wqe_err(struct mlx5e_sq
*sq
, u8 num_dma
)
100 for (i
= 0; i
< num_dma
; i
++) {
101 struct mlx5e_sq_dma
*last_pushed_dma
=
102 mlx5e_dma_get(sq
, --sq
->dma_fifo_pc
);
104 mlx5e_tx_dma_unmap(sq
->pdev
, last_pushed_dma
);
108 u16
mlx5e_select_queue(struct net_device
*dev
, struct sk_buff
*skb
,
109 void *accel_priv
, select_queue_fallback_t fallback
)
111 struct mlx5e_priv
*priv
= netdev_priv(dev
);
112 int channel_ix
= fallback(dev
, skb
);
115 if (!netdev_get_num_tc(dev
))
118 if (skb_vlan_tag_present(skb
))
119 up
= skb
->vlan_tci
>> VLAN_PRIO_SHIFT
;
121 /* channel_ix can be larger than num_channels since
122 * dev->num_real_tx_queues = num_channels * num_tc
124 if (channel_ix
>= priv
->params
.num_channels
)
125 channel_ix
= reciprocal_scale(channel_ix
,
126 priv
->params
.num_channels
);
128 return priv
->channeltc_to_txq_map
[channel_ix
][up
];
131 static inline u16
mlx5e_get_inline_hdr_size(struct mlx5e_sq
*sq
,
132 struct sk_buff
*skb
, bool bf
)
134 /* Some NIC TX decisions, e.g loopback, are based on the packet
135 * headers and occur before the data gather.
136 * Therefore these headers must be copied into the WQE
138 #define MLX5E_MIN_INLINE (ETH_HLEN + VLAN_HLEN)
141 u16 ihs
= skb_headlen(skb
);
143 if (skb_vlan_tag_present(skb
))
146 if (ihs
<= sq
->max_inline
)
147 return skb_headlen(skb
);
150 return max(skb_network_offset(skb
), MLX5E_MIN_INLINE
);
153 static inline void mlx5e_tx_skb_pull_inline(unsigned char **skb_data
,
154 unsigned int *skb_len
,
161 static inline void mlx5e_insert_vlan(void *start
, struct sk_buff
*skb
, u16 ihs
,
162 unsigned char **skb_data
,
163 unsigned int *skb_len
)
165 struct vlan_ethhdr
*vhdr
= (struct vlan_ethhdr
*)start
;
166 int cpy1_sz
= 2 * ETH_ALEN
;
167 int cpy2_sz
= ihs
- cpy1_sz
;
169 memcpy(vhdr
, *skb_data
, cpy1_sz
);
170 mlx5e_tx_skb_pull_inline(skb_data
, skb_len
, cpy1_sz
);
171 vhdr
->h_vlan_proto
= skb
->vlan_proto
;
172 vhdr
->h_vlan_TCI
= cpu_to_be16(skb_vlan_tag_get(skb
));
173 memcpy(&vhdr
->h_vlan_encapsulated_proto
, *skb_data
, cpy2_sz
);
174 mlx5e_tx_skb_pull_inline(skb_data
, skb_len
, cpy2_sz
);
177 static netdev_tx_t
mlx5e_sq_xmit(struct mlx5e_sq
*sq
, struct sk_buff
*skb
)
179 struct mlx5_wq_cyc
*wq
= &sq
->wq
;
181 u16 pi
= sq
->pc
& wq
->sz_m1
;
182 struct mlx5e_tx_wqe
*wqe
= mlx5_wq_cyc_get_wqe(wq
, pi
);
183 struct mlx5e_tx_wqe_info
*wi
= &sq
->wqe_info
[pi
];
185 struct mlx5_wqe_ctrl_seg
*cseg
= &wqe
->ctrl
;
186 struct mlx5_wqe_eth_seg
*eseg
= &wqe
->eth
;
187 struct mlx5_wqe_data_seg
*dseg
;
189 unsigned char *skb_data
= skb
->data
;
190 unsigned int skb_len
= skb
->len
;
191 u8 opcode
= MLX5_OPCODE_SEND
;
192 dma_addr_t dma_addr
= 0;
193 unsigned int num_bytes
;
200 memset(wqe
, 0, sizeof(*wqe
));
202 if (likely(skb
->ip_summed
== CHECKSUM_PARTIAL
)) {
203 eseg
->cs_flags
= MLX5_ETH_WQE_L3_CSUM
;
204 if (skb
->encapsulation
) {
205 eseg
->cs_flags
|= MLX5_ETH_WQE_L3_INNER_CSUM
|
206 MLX5_ETH_WQE_L4_INNER_CSUM
;
207 sq
->stats
.csum_partial_inner
++;
209 eseg
->cs_flags
|= MLX5_ETH_WQE_L4_CSUM
;
212 sq
->stats
.csum_none
++;
214 if (sq
->cc
!= sq
->prev_cc
) {
215 sq
->prev_cc
= sq
->cc
;
216 sq
->bf_budget
= (sq
->cc
== sq
->pc
) ? MLX5E_SQ_BF_BUDGET
: 0;
219 if (skb_is_gso(skb
)) {
220 eseg
->mss
= cpu_to_be16(skb_shinfo(skb
)->gso_size
);
221 opcode
= MLX5_OPCODE_LSO
;
223 if (skb
->encapsulation
) {
224 ihs
= skb_inner_transport_offset(skb
) + inner_tcp_hdrlen(skb
);
225 sq
->stats
.tso_inner_packets
++;
226 sq
->stats
.tso_inner_bytes
+= skb
->len
- ihs
;
228 ihs
= skb_transport_offset(skb
) + tcp_hdrlen(skb
);
229 sq
->stats
.tso_packets
++;
230 sq
->stats
.tso_bytes
+= skb
->len
- ihs
;
233 num_bytes
= skb
->len
+ (skb_shinfo(skb
)->gso_segs
- 1) * ihs
;
235 bf
= sq
->bf_budget
&&
237 !skb_shinfo(skb
)->nr_frags
;
238 ihs
= mlx5e_get_inline_hdr_size(sq
, skb
, bf
);
239 num_bytes
= max_t(unsigned int, skb
->len
, ETH_ZLEN
);
242 wi
->num_bytes
= num_bytes
;
244 if (skb_vlan_tag_present(skb
)) {
245 mlx5e_insert_vlan(eseg
->inline_hdr_start
, skb
, ihs
, &skb_data
,
249 memcpy(eseg
->inline_hdr_start
, skb_data
, ihs
);
250 mlx5e_tx_skb_pull_inline(&skb_data
, &skb_len
, ihs
);
253 eseg
->inline_hdr_sz
= cpu_to_be16(ihs
);
255 ds_cnt
= sizeof(*wqe
) / MLX5_SEND_WQE_DS
;
256 ds_cnt
+= DIV_ROUND_UP(ihs
- sizeof(eseg
->inline_hdr_start
),
258 dseg
= (struct mlx5_wqe_data_seg
*)cseg
+ ds_cnt
;
262 headlen
= skb_len
- skb
->data_len
;
264 dma_addr
= dma_map_single(sq
->pdev
, skb_data
, headlen
,
266 if (unlikely(dma_mapping_error(sq
->pdev
, dma_addr
)))
267 goto dma_unmap_wqe_err
;
269 dseg
->addr
= cpu_to_be64(dma_addr
);
270 dseg
->lkey
= sq
->mkey_be
;
271 dseg
->byte_count
= cpu_to_be32(headlen
);
273 mlx5e_dma_push(sq
, dma_addr
, headlen
, MLX5E_DMA_MAP_SINGLE
);
279 for (i
= 0; i
< skb_shinfo(skb
)->nr_frags
; i
++) {
280 struct skb_frag_struct
*frag
= &skb_shinfo(skb
)->frags
[i
];
281 int fsz
= skb_frag_size(frag
);
283 dma_addr
= skb_frag_dma_map(sq
->pdev
, frag
, 0, fsz
,
285 if (unlikely(dma_mapping_error(sq
->pdev
, dma_addr
)))
286 goto dma_unmap_wqe_err
;
288 dseg
->addr
= cpu_to_be64(dma_addr
);
289 dseg
->lkey
= sq
->mkey_be
;
290 dseg
->byte_count
= cpu_to_be32(fsz
);
292 mlx5e_dma_push(sq
, dma_addr
, fsz
, MLX5E_DMA_MAP_PAGE
);
298 ds_cnt
+= wi
->num_dma
;
300 cseg
->opmod_idx_opcode
= cpu_to_be32((sq
->pc
<< 8) | opcode
);
301 cseg
->qpn_ds
= cpu_to_be32((sq
->sqn
<< 8) | ds_cnt
);
305 wi
->num_wqebbs
= DIV_ROUND_UP(ds_cnt
, MLX5_SEND_WQEBB_NUM_DS
);
306 sq
->pc
+= wi
->num_wqebbs
;
308 netdev_tx_sent_queue(sq
->txq
, wi
->num_bytes
);
310 if (unlikely(skb_shinfo(skb
)->tx_flags
& SKBTX_HW_TSTAMP
))
311 skb_shinfo(skb
)->tx_flags
|= SKBTX_IN_PROGRESS
;
313 if (unlikely(!mlx5e_sq_has_room_for(sq
, MLX5E_SQ_STOP_ROOM
))) {
314 netif_tx_stop_queue(sq
->txq
);
318 if (!skb
->xmit_more
|| netif_xmit_stopped(sq
->txq
)) {
321 if (bf
&& test_bit(MLX5E_SQ_STATE_BF_ENABLE
, &sq
->state
))
322 bf_sz
= wi
->num_wqebbs
<< 3;
324 cseg
->fm_ce_se
= MLX5_WQE_CTRL_CQ_UPDATE
;
325 mlx5e_tx_notify_hw(sq
, &wqe
->ctrl
, bf_sz
);
328 /* fill sq edge with nops to avoid wqe wrap around */
329 while ((sq
->pc
& wq
->sz_m1
) > sq
->edge
)
330 mlx5e_send_nop(sq
, false);
336 sq
->stats
.bytes
+= num_bytes
;
341 mlx5e_dma_unmap_wqe_err(sq
, wi
->num_dma
);
343 dev_kfree_skb_any(skb
);
348 netdev_tx_t
mlx5e_xmit(struct sk_buff
*skb
, struct net_device
*dev
)
350 struct mlx5e_priv
*priv
= netdev_priv(dev
);
351 struct mlx5e_sq
*sq
= priv
->txq_to_sq_map
[skb_get_queue_mapping(skb
)];
353 return mlx5e_sq_xmit(sq
, skb
);
356 void mlx5e_free_tx_descs(struct mlx5e_sq
*sq
)
358 struct mlx5e_tx_wqe_info
*wi
;
363 while (sq
->cc
!= sq
->pc
) {
364 ci
= sq
->cc
& sq
->wq
.sz_m1
;
366 wi
= &sq
->wqe_info
[ci
];
368 if (!skb
) { /* nop */
373 for (i
= 0; i
< wi
->num_dma
; i
++) {
374 struct mlx5e_sq_dma
*dma
=
375 mlx5e_dma_get(sq
, sq
->dma_fifo_cc
++);
377 mlx5e_tx_dma_unmap(sq
->pdev
, dma
);
380 dev_kfree_skb_any(skb
);
381 sq
->cc
+= wi
->num_wqebbs
;
385 bool mlx5e_poll_tx_cq(struct mlx5e_cq
*cq
, int napi_budget
)
394 sq
= container_of(cq
, struct mlx5e_sq
, cq
);
396 if (unlikely(test_bit(MLX5E_SQ_STATE_TX_TIMEOUT
, &sq
->state
)))
402 /* sq->cc must be updated only after mlx5_cqwq_update_db_record(),
403 * otherwise a cq overrun may occur
407 /* avoid dirtying sq cache line every cqe */
408 dma_fifo_cc
= sq
->dma_fifo_cc
;
410 for (i
= 0; i
< MLX5E_TX_CQ_POLL_BUDGET
; i
++) {
411 struct mlx5_cqe64
*cqe
;
415 cqe
= mlx5e_get_cqe(cq
);
419 mlx5_cqwq_pop(&cq
->wq
);
421 wqe_counter
= be16_to_cpu(cqe
->wqe_counter
);
424 struct mlx5e_tx_wqe_info
*wi
;
429 last_wqe
= (sqcc
== wqe_counter
);
431 ci
= sqcc
& sq
->wq
.sz_m1
;
433 wi
= &sq
->wqe_info
[ci
];
435 if (unlikely(!skb
)) { /* nop */
440 if (unlikely(skb_shinfo(skb
)->tx_flags
&
442 struct skb_shared_hwtstamps hwts
= {};
444 mlx5e_fill_hwstamp(sq
->tstamp
,
445 get_cqe_ts(cqe
), &hwts
);
446 skb_tstamp_tx(skb
, &hwts
);
449 for (j
= 0; j
< wi
->num_dma
; j
++) {
450 struct mlx5e_sq_dma
*dma
=
451 mlx5e_dma_get(sq
, dma_fifo_cc
++);
453 mlx5e_tx_dma_unmap(sq
->pdev
, dma
);
457 nbytes
+= wi
->num_bytes
;
458 sqcc
+= wi
->num_wqebbs
;
459 napi_consume_skb(skb
, napi_budget
);
463 mlx5_cqwq_update_db_record(&cq
->wq
);
465 /* ensure cq space is freed before enabling more cqes */
468 sq
->dma_fifo_cc
= dma_fifo_cc
;
471 netdev_tx_completed_queue(sq
->txq
, npkts
, nbytes
);
473 if (netif_tx_queue_stopped(sq
->txq
) &&
474 mlx5e_sq_has_room_for(sq
, MLX5E_SQ_STOP_ROOM
) &&
475 likely(test_bit(MLX5E_SQ_STATE_WAKE_TXQ_ENABLE
, &sq
->state
))) {
476 netif_tx_wake_queue(sq
->txq
);
480 return (i
== MLX5E_TX_CQ_POLL_BUDGET
);