2 * Copyright (c) 2015-2016, Mellanox Technologies. All rights reserved.
4 * This software is available to you under a choice of one of two
5 * licenses. You may choose to be licensed under the terms of the GNU
6 * General Public License (GPL) Version 2, available from the file
7 * COPYING in the main directory of this source tree, or the
8 * OpenIB.org BSD license below:
10 * Redistribution and use in source and binary forms, with or
11 * without modification, are permitted provided that the following
14 * - Redistributions of source code must retain the above
15 * copyright notice, this list of conditions and the following
18 * - Redistributions in binary form must reproduce the above
19 * copyright notice, this list of conditions and the following
20 * disclaimer in the documentation and/or other materials
21 * provided with the distribution.
23 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
24 * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
25 * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
26 * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
27 * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
28 * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
29 * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
33 #include <linux/tcp.h>
34 #include <linux/if_vlan.h>
37 #define MLX5E_SQ_NOPS_ROOM MLX5_SEND_WQE_MAX_WQEBBS
38 #define MLX5E_SQ_STOP_ROOM (MLX5_SEND_WQE_MAX_WQEBBS +\
41 void mlx5e_send_nop(struct mlx5e_sq
*sq
, bool notify_hw
)
43 struct mlx5_wq_cyc
*wq
= &sq
->wq
;
45 u16 pi
= sq
->pc
& wq
->sz_m1
;
46 struct mlx5e_tx_wqe
*wqe
= mlx5_wq_cyc_get_wqe(wq
, pi
);
48 struct mlx5_wqe_ctrl_seg
*cseg
= &wqe
->ctrl
;
50 memset(cseg
, 0, sizeof(*cseg
));
52 cseg
->opmod_idx_opcode
= cpu_to_be32((sq
->pc
<< 8) | MLX5_OPCODE_NOP
);
53 cseg
->qpn_ds
= cpu_to_be32((sq
->sqn
<< 8) | 0x01);
59 cseg
->fm_ce_se
= MLX5_WQE_CTRL_CQ_UPDATE
;
60 mlx5e_tx_notify_hw(sq
, &wqe
->ctrl
, 0);
64 static inline void mlx5e_tx_dma_unmap(struct device
*pdev
,
65 struct mlx5e_sq_dma
*dma
)
68 case MLX5E_DMA_MAP_SINGLE
:
69 dma_unmap_single(pdev
, dma
->addr
, dma
->size
, DMA_TO_DEVICE
);
71 case MLX5E_DMA_MAP_PAGE
:
72 dma_unmap_page(pdev
, dma
->addr
, dma
->size
, DMA_TO_DEVICE
);
75 WARN_ONCE(true, "mlx5e_tx_dma_unmap unknown DMA type!\n");
79 static inline void mlx5e_dma_push(struct mlx5e_sq
*sq
,
82 enum mlx5e_dma_map_type map_type
)
84 u32 i
= sq
->dma_fifo_pc
& sq
->dma_fifo_mask
;
86 sq
->db
.txq
.dma_fifo
[i
].addr
= addr
;
87 sq
->db
.txq
.dma_fifo
[i
].size
= size
;
88 sq
->db
.txq
.dma_fifo
[i
].type
= map_type
;
92 static inline struct mlx5e_sq_dma
*mlx5e_dma_get(struct mlx5e_sq
*sq
, u32 i
)
94 return &sq
->db
.txq
.dma_fifo
[i
& sq
->dma_fifo_mask
];
97 static void mlx5e_dma_unmap_wqe_err(struct mlx5e_sq
*sq
, u8 num_dma
)
101 for (i
= 0; i
< num_dma
; i
++) {
102 struct mlx5e_sq_dma
*last_pushed_dma
=
103 mlx5e_dma_get(sq
, --sq
->dma_fifo_pc
);
105 mlx5e_tx_dma_unmap(sq
->pdev
, last_pushed_dma
);
109 u16
mlx5e_select_queue(struct net_device
*dev
, struct sk_buff
*skb
,
110 void *accel_priv
, select_queue_fallback_t fallback
)
112 struct mlx5e_priv
*priv
= netdev_priv(dev
);
113 int channel_ix
= fallback(dev
, skb
);
116 if (!netdev_get_num_tc(dev
))
119 if (skb_vlan_tag_present(skb
))
120 up
= skb
->vlan_tci
>> VLAN_PRIO_SHIFT
;
122 /* channel_ix can be larger than num_channels since
123 * dev->num_real_tx_queues = num_channels * num_tc
125 if (channel_ix
>= priv
->params
.num_channels
)
126 channel_ix
= reciprocal_scale(channel_ix
,
127 priv
->params
.num_channels
);
129 return priv
->channeltc_to_txq_map
[channel_ix
][up
];
132 static inline int mlx5e_skb_l2_header_offset(struct sk_buff
*skb
)
134 #define MLX5E_MIN_INLINE (ETH_HLEN + VLAN_HLEN)
136 return max(skb_network_offset(skb
), MLX5E_MIN_INLINE
);
139 static inline int mlx5e_skb_l3_header_offset(struct sk_buff
*skb
)
141 struct flow_keys keys
;
143 if (skb_transport_header_was_set(skb
))
144 return skb_transport_offset(skb
);
145 else if (skb_flow_dissect_flow_keys(skb
, &keys
, 0))
146 return keys
.control
.thoff
;
148 return mlx5e_skb_l2_header_offset(skb
);
151 static inline unsigned int mlx5e_calc_min_inline(enum mlx5_inline_modes mode
,
157 case MLX5_INLINE_MODE_NONE
:
159 case MLX5_INLINE_MODE_TCP_UDP
:
160 hlen
= eth_get_headlen(skb
->data
, skb_headlen(skb
));
161 if (hlen
== ETH_HLEN
&& !skb_vlan_tag_present(skb
))
164 case MLX5_INLINE_MODE_IP
:
165 /* When transport header is set to zero, it means no transport
166 * header. When transport header is set to 0xff's, it means
167 * transport header wasn't set.
169 if (skb_transport_offset(skb
))
170 return mlx5e_skb_l3_header_offset(skb
);
172 case MLX5_INLINE_MODE_L2
:
174 return mlx5e_skb_l2_header_offset(skb
);
178 static inline u16
mlx5e_get_inline_hdr_size(struct mlx5e_sq
*sq
,
179 struct sk_buff
*skb
, bool bf
)
181 /* Some NIC TX decisions, e.g loopback, are based on the packet
182 * headers and occur before the data gather.
183 * Therefore these headers must be copied into the WQE
186 u16 ihs
= skb_headlen(skb
);
188 if (skb_vlan_tag_present(skb
))
191 if (ihs
<= sq
->max_inline
)
192 return skb_headlen(skb
);
194 return mlx5e_calc_min_inline(sq
->min_inline_mode
, skb
);
197 static inline void mlx5e_tx_skb_pull_inline(unsigned char **skb_data
,
198 unsigned int *skb_len
,
205 static inline void mlx5e_insert_vlan(void *start
, struct sk_buff
*skb
, u16 ihs
,
206 unsigned char **skb_data
,
207 unsigned int *skb_len
)
209 struct vlan_ethhdr
*vhdr
= (struct vlan_ethhdr
*)start
;
210 int cpy1_sz
= 2 * ETH_ALEN
;
211 int cpy2_sz
= ihs
- cpy1_sz
;
213 memcpy(vhdr
, *skb_data
, cpy1_sz
);
214 mlx5e_tx_skb_pull_inline(skb_data
, skb_len
, cpy1_sz
);
215 vhdr
->h_vlan_proto
= skb
->vlan_proto
;
216 vhdr
->h_vlan_TCI
= cpu_to_be16(skb_vlan_tag_get(skb
));
217 memcpy(&vhdr
->h_vlan_encapsulated_proto
, *skb_data
, cpy2_sz
);
218 mlx5e_tx_skb_pull_inline(skb_data
, skb_len
, cpy2_sz
);
221 static netdev_tx_t
mlx5e_sq_xmit(struct mlx5e_sq
*sq
, struct sk_buff
*skb
)
223 struct mlx5_wq_cyc
*wq
= &sq
->wq
;
225 u16 pi
= sq
->pc
& wq
->sz_m1
;
226 struct mlx5e_tx_wqe
*wqe
= mlx5_wq_cyc_get_wqe(wq
, pi
);
227 struct mlx5e_tx_wqe_info
*wi
= &sq
->db
.txq
.wqe_info
[pi
];
229 struct mlx5_wqe_ctrl_seg
*cseg
= &wqe
->ctrl
;
230 struct mlx5_wqe_eth_seg
*eseg
= &wqe
->eth
;
231 struct mlx5_wqe_data_seg
*dseg
;
233 unsigned char *skb_data
= skb
->data
;
234 unsigned int skb_len
= skb
->len
;
235 u8 opcode
= MLX5_OPCODE_SEND
;
236 dma_addr_t dma_addr
= 0;
237 unsigned int num_bytes
;
244 memset(wqe
, 0, sizeof(*wqe
));
246 if (likely(skb
->ip_summed
== CHECKSUM_PARTIAL
)) {
247 eseg
->cs_flags
= MLX5_ETH_WQE_L3_CSUM
;
248 if (skb
->encapsulation
) {
249 eseg
->cs_flags
|= MLX5_ETH_WQE_L3_INNER_CSUM
|
250 MLX5_ETH_WQE_L4_INNER_CSUM
;
251 sq
->stats
.csum_partial_inner
++;
253 eseg
->cs_flags
|= MLX5_ETH_WQE_L4_CSUM
;
256 sq
->stats
.csum_none
++;
258 if (sq
->cc
!= sq
->prev_cc
) {
259 sq
->prev_cc
= sq
->cc
;
260 sq
->bf_budget
= (sq
->cc
== sq
->pc
) ? MLX5E_SQ_BF_BUDGET
: 0;
263 if (skb_is_gso(skb
)) {
264 eseg
->mss
= cpu_to_be16(skb_shinfo(skb
)->gso_size
);
265 opcode
= MLX5_OPCODE_LSO
;
267 if (skb
->encapsulation
) {
268 ihs
= skb_inner_transport_offset(skb
) + inner_tcp_hdrlen(skb
);
269 sq
->stats
.tso_inner_packets
++;
270 sq
->stats
.tso_inner_bytes
+= skb
->len
- ihs
;
272 ihs
= skb_transport_offset(skb
) + tcp_hdrlen(skb
);
273 sq
->stats
.tso_packets
++;
274 sq
->stats
.tso_bytes
+= skb
->len
- ihs
;
277 num_bytes
= skb
->len
+ (skb_shinfo(skb
)->gso_segs
- 1) * ihs
;
279 bf
= sq
->bf_budget
&&
281 !skb_shinfo(skb
)->nr_frags
;
282 ihs
= mlx5e_get_inline_hdr_size(sq
, skb
, bf
);
283 num_bytes
= max_t(unsigned int, skb
->len
, ETH_ZLEN
);
286 wi
->num_bytes
= num_bytes
;
288 ds_cnt
= sizeof(*wqe
) / MLX5_SEND_WQE_DS
;
290 if (skb_vlan_tag_present(skb
)) {
291 mlx5e_insert_vlan(eseg
->inline_hdr
.start
, skb
, ihs
, &skb_data
, &skb_len
);
294 memcpy(eseg
->inline_hdr
.start
, skb_data
, ihs
);
295 mlx5e_tx_skb_pull_inline(&skb_data
, &skb_len
, ihs
);
297 eseg
->inline_hdr
.sz
= cpu_to_be16(ihs
);
298 ds_cnt
+= DIV_ROUND_UP(ihs
- sizeof(eseg
->inline_hdr
.start
), MLX5_SEND_WQE_DS
);
299 } else if (skb_vlan_tag_present(skb
)) {
300 eseg
->insert
.type
= cpu_to_be16(MLX5_ETH_WQE_INSERT_VLAN
);
301 eseg
->insert
.vlan_tci
= cpu_to_be16(skb_vlan_tag_get(skb
));
304 dseg
= (struct mlx5_wqe_data_seg
*)cseg
+ ds_cnt
;
308 headlen
= skb_len
- skb
->data_len
;
310 dma_addr
= dma_map_single(sq
->pdev
, skb_data
, headlen
,
312 if (unlikely(dma_mapping_error(sq
->pdev
, dma_addr
)))
313 goto dma_unmap_wqe_err
;
315 dseg
->addr
= cpu_to_be64(dma_addr
);
316 dseg
->lkey
= sq
->mkey_be
;
317 dseg
->byte_count
= cpu_to_be32(headlen
);
319 mlx5e_dma_push(sq
, dma_addr
, headlen
, MLX5E_DMA_MAP_SINGLE
);
325 for (i
= 0; i
< skb_shinfo(skb
)->nr_frags
; i
++) {
326 struct skb_frag_struct
*frag
= &skb_shinfo(skb
)->frags
[i
];
327 int fsz
= skb_frag_size(frag
);
329 dma_addr
= skb_frag_dma_map(sq
->pdev
, frag
, 0, fsz
,
331 if (unlikely(dma_mapping_error(sq
->pdev
, dma_addr
)))
332 goto dma_unmap_wqe_err
;
334 dseg
->addr
= cpu_to_be64(dma_addr
);
335 dseg
->lkey
= sq
->mkey_be
;
336 dseg
->byte_count
= cpu_to_be32(fsz
);
338 mlx5e_dma_push(sq
, dma_addr
, fsz
, MLX5E_DMA_MAP_PAGE
);
344 ds_cnt
+= wi
->num_dma
;
346 cseg
->opmod_idx_opcode
= cpu_to_be32((sq
->pc
<< 8) | opcode
);
347 cseg
->qpn_ds
= cpu_to_be32((sq
->sqn
<< 8) | ds_cnt
);
349 sq
->db
.txq
.skb
[pi
] = skb
;
351 wi
->num_wqebbs
= DIV_ROUND_UP(ds_cnt
, MLX5_SEND_WQEBB_NUM_DS
);
352 sq
->pc
+= wi
->num_wqebbs
;
354 netdev_tx_sent_queue(sq
->txq
, wi
->num_bytes
);
356 if (unlikely(skb_shinfo(skb
)->tx_flags
& SKBTX_HW_TSTAMP
))
357 skb_shinfo(skb
)->tx_flags
|= SKBTX_IN_PROGRESS
;
359 if (unlikely(!mlx5e_sq_has_room_for(sq
, MLX5E_SQ_STOP_ROOM
))) {
360 netif_tx_stop_queue(sq
->txq
);
364 sq
->stats
.xmit_more
+= skb
->xmit_more
;
365 if (!skb
->xmit_more
|| netif_xmit_stopped(sq
->txq
)) {
368 if (bf
&& test_bit(MLX5E_SQ_STATE_BF_ENABLE
, &sq
->state
))
369 bf_sz
= wi
->num_wqebbs
<< 3;
371 cseg
->fm_ce_se
= MLX5_WQE_CTRL_CQ_UPDATE
;
372 mlx5e_tx_notify_hw(sq
, &wqe
->ctrl
, bf_sz
);
375 /* fill sq edge with nops to avoid wqe wrap around */
376 while ((pi
= (sq
->pc
& wq
->sz_m1
)) > sq
->edge
) {
377 sq
->db
.txq
.skb
[pi
] = NULL
;
378 mlx5e_send_nop(sq
, false);
385 sq
->stats
.bytes
+= num_bytes
;
390 mlx5e_dma_unmap_wqe_err(sq
, wi
->num_dma
);
392 dev_kfree_skb_any(skb
);
397 netdev_tx_t
mlx5e_xmit(struct sk_buff
*skb
, struct net_device
*dev
)
399 struct mlx5e_priv
*priv
= netdev_priv(dev
);
400 struct mlx5e_sq
*sq
= priv
->txq_to_sq_map
[skb_get_queue_mapping(skb
)];
402 return mlx5e_sq_xmit(sq
, skb
);
405 bool mlx5e_poll_tx_cq(struct mlx5e_cq
*cq
, int napi_budget
)
414 sq
= container_of(cq
, struct mlx5e_sq
, cq
);
416 if (unlikely(!test_bit(MLX5E_SQ_STATE_ENABLED
, &sq
->state
)))
422 /* sq->cc must be updated only after mlx5_cqwq_update_db_record(),
423 * otherwise a cq overrun may occur
427 /* avoid dirtying sq cache line every cqe */
428 dma_fifo_cc
= sq
->dma_fifo_cc
;
430 for (i
= 0; i
< MLX5E_TX_CQ_POLL_BUDGET
; i
++) {
431 struct mlx5_cqe64
*cqe
;
435 cqe
= mlx5e_get_cqe(cq
);
439 mlx5_cqwq_pop(&cq
->wq
);
441 wqe_counter
= be16_to_cpu(cqe
->wqe_counter
);
444 struct mlx5e_tx_wqe_info
*wi
;
449 last_wqe
= (sqcc
== wqe_counter
);
451 ci
= sqcc
& sq
->wq
.sz_m1
;
452 skb
= sq
->db
.txq
.skb
[ci
];
453 wi
= &sq
->db
.txq
.wqe_info
[ci
];
455 if (unlikely(!skb
)) { /* nop */
460 if (unlikely(skb_shinfo(skb
)->tx_flags
&
462 struct skb_shared_hwtstamps hwts
= {};
464 mlx5e_fill_hwstamp(sq
->tstamp
,
465 get_cqe_ts(cqe
), &hwts
);
466 skb_tstamp_tx(skb
, &hwts
);
469 for (j
= 0; j
< wi
->num_dma
; j
++) {
470 struct mlx5e_sq_dma
*dma
=
471 mlx5e_dma_get(sq
, dma_fifo_cc
++);
473 mlx5e_tx_dma_unmap(sq
->pdev
, dma
);
477 nbytes
+= wi
->num_bytes
;
478 sqcc
+= wi
->num_wqebbs
;
479 napi_consume_skb(skb
, napi_budget
);
483 mlx5_cqwq_update_db_record(&cq
->wq
);
485 /* ensure cq space is freed before enabling more cqes */
488 sq
->dma_fifo_cc
= dma_fifo_cc
;
491 netdev_tx_completed_queue(sq
->txq
, npkts
, nbytes
);
493 if (netif_tx_queue_stopped(sq
->txq
) &&
494 mlx5e_sq_has_room_for(sq
, MLX5E_SQ_STOP_ROOM
)) {
495 netif_tx_wake_queue(sq
->txq
);
499 return (i
== MLX5E_TX_CQ_POLL_BUDGET
);
502 static void mlx5e_free_txq_sq_descs(struct mlx5e_sq
*sq
)
504 struct mlx5e_tx_wqe_info
*wi
;
509 while (sq
->cc
!= sq
->pc
) {
510 ci
= sq
->cc
& sq
->wq
.sz_m1
;
511 skb
= sq
->db
.txq
.skb
[ci
];
512 wi
= &sq
->db
.txq
.wqe_info
[ci
];
514 if (!skb
) { /* nop */
519 for (i
= 0; i
< wi
->num_dma
; i
++) {
520 struct mlx5e_sq_dma
*dma
=
521 mlx5e_dma_get(sq
, sq
->dma_fifo_cc
++);
523 mlx5e_tx_dma_unmap(sq
->pdev
, dma
);
526 dev_kfree_skb_any(skb
);
527 sq
->cc
+= wi
->num_wqebbs
;
531 static void mlx5e_free_xdp_sq_descs(struct mlx5e_sq
*sq
)
533 struct mlx5e_sq_wqe_info
*wi
;
534 struct mlx5e_dma_info
*di
;
537 while (sq
->cc
!= sq
->pc
) {
538 ci
= sq
->cc
& sq
->wq
.sz_m1
;
539 di
= &sq
->db
.xdp
.di
[ci
];
540 wi
= &sq
->db
.xdp
.wqe_info
[ci
];
542 if (wi
->opcode
== MLX5_OPCODE_NOP
) {
547 sq
->cc
+= wi
->num_wqebbs
;
549 mlx5e_page_release(&sq
->channel
->rq
, di
, false);
553 void mlx5e_free_sq_descs(struct mlx5e_sq
*sq
)
557 mlx5e_free_txq_sq_descs(sq
);
560 mlx5e_free_xdp_sq_descs(sq
);