1 /* SPDX-License-Identifier: GPL-2.0 OR Linux-OpenIB */
2 /* Copyright (c) 2019 Mellanox Technologies. */
4 #ifndef __MLX5_EN_TXRX_H___
5 #define __MLX5_EN_TXRX_H___
9 #define MLX5E_SQ_NOPS_ROOM MLX5_SEND_WQE_MAX_WQEBBS
10 #define MLX5E_SQ_STOP_ROOM (MLX5_SEND_WQE_MAX_WQEBBS +\
13 #ifndef CONFIG_MLX5_EN_TLS
14 #define MLX5E_SQ_TLS_ROOM (0)
16 /* TLS offload requires additional stop_room for:
19 #define MLX5E_SQ_TLS_ROOM \
20 (MLX5_SEND_WQE_MAX_WQEBBS)
23 #define INL_HDR_START_SZ (sizeof(((struct mlx5_wqe_eth_seg *)NULL)->inline_hdr.start))
26 mlx5e_wqc_has_room_for(struct mlx5_wq_cyc
*wq
, u16 cc
, u16 pc
, u16 n
)
28 return (mlx5_wq_cyc_ctr2ix(wq
, cc
- pc
) >= n
) || (cc
== pc
);
32 mlx5e_sq_fetch_wqe(struct mlx5e_txqsq
*sq
, size_t size
, u16
*pi
)
34 struct mlx5_wq_cyc
*wq
= &sq
->wq
;
37 *pi
= mlx5_wq_cyc_ctr2ix(wq
, sq
->pc
);
38 wqe
= mlx5_wq_cyc_get_wqe(wq
, *pi
);
44 static inline struct mlx5e_tx_wqe
*
45 mlx5e_post_nop(struct mlx5_wq_cyc
*wq
, u32 sqn
, u16
*pc
)
47 u16 pi
= mlx5_wq_cyc_ctr2ix(wq
, *pc
);
48 struct mlx5e_tx_wqe
*wqe
= mlx5_wq_cyc_get_wqe(wq
, pi
);
49 struct mlx5_wqe_ctrl_seg
*cseg
= &wqe
->ctrl
;
51 memset(cseg
, 0, sizeof(*cseg
));
53 cseg
->opmod_idx_opcode
= cpu_to_be32((*pc
<< 8) | MLX5_OPCODE_NOP
);
54 cseg
->qpn_ds
= cpu_to_be32((sqn
<< 8) | 0x01);
61 static inline struct mlx5e_tx_wqe
*
62 mlx5e_post_nop_fence(struct mlx5_wq_cyc
*wq
, u32 sqn
, u16
*pc
)
64 u16 pi
= mlx5_wq_cyc_ctr2ix(wq
, *pc
);
65 struct mlx5e_tx_wqe
*wqe
= mlx5_wq_cyc_get_wqe(wq
, pi
);
66 struct mlx5_wqe_ctrl_seg
*cseg
= &wqe
->ctrl
;
68 memset(cseg
, 0, sizeof(*cseg
));
70 cseg
->opmod_idx_opcode
= cpu_to_be32((*pc
<< 8) | MLX5_OPCODE_NOP
);
71 cseg
->qpn_ds
= cpu_to_be32((sqn
<< 8) | 0x01);
72 cseg
->fm_ce_se
= MLX5_FENCE_MODE_INITIATOR_SMALL
;
80 mlx5e_fill_sq_frag_edge(struct mlx5e_txqsq
*sq
, struct mlx5_wq_cyc
*wq
,
83 struct mlx5e_tx_wqe_info
*edge_wi
, *wi
= &sq
->db
.wqe_info
[pi
];
87 /* fill sq frag edge with nops to avoid wqe wrapping two pages */
88 for (; wi
< edge_wi
; wi
++) {
91 mlx5e_post_nop(wq
, sq
->sqn
, &sq
->pc
);
93 sq
->stats
->nop
+= nnops
;
97 mlx5e_notify_hw(struct mlx5_wq_cyc
*wq
, u16 pc
, void __iomem
*uar_map
,
98 struct mlx5_wqe_ctrl_seg
*ctrl
)
100 ctrl
->fm_ce_se
= MLX5_WQE_CTRL_CQ_UPDATE
;
101 /* ensure wqe is visible to device before updating doorbell record */
104 *wq
->db
= cpu_to_be32(pc
);
106 /* ensure doorbell record is visible to device before ringing the
111 mlx5_write64((__be32
*)ctrl
, uar_map
);
114 static inline bool mlx5e_transport_inline_tx_wqe(struct mlx5e_tx_wqe
*wqe
)
116 return !!wqe
->ctrl
.tisn
;
119 static inline void mlx5e_cq_arm(struct mlx5e_cq
*cq
)
121 struct mlx5_core_cq
*mcq
;
124 mlx5_cq_arm(mcq
, MLX5_CQ_DB_REQ_NOT
, mcq
->uar
->map
, cq
->wq
.cc
);
127 static inline struct mlx5e_sq_dma
*
128 mlx5e_dma_get(struct mlx5e_txqsq
*sq
, u32 i
)
130 return &sq
->db
.dma_fifo
[i
& sq
->dma_fifo_mask
];
134 mlx5e_dma_push(struct mlx5e_txqsq
*sq
, dma_addr_t addr
, u32 size
,
135 enum mlx5e_dma_map_type map_type
)
137 struct mlx5e_sq_dma
*dma
= mlx5e_dma_get(sq
, sq
->dma_fifo_pc
++);
141 dma
->type
= map_type
;
145 mlx5e_tx_dma_unmap(struct device
*pdev
, struct mlx5e_sq_dma
*dma
)
148 case MLX5E_DMA_MAP_SINGLE
:
149 dma_unmap_single(pdev
, dma
->addr
, dma
->size
, DMA_TO_DEVICE
);
151 case MLX5E_DMA_MAP_PAGE
:
152 dma_unmap_page(pdev
, dma
->addr
, dma
->size
, DMA_TO_DEVICE
);
155 WARN_ONCE(true, "mlx5e_tx_dma_unmap unknown DMA type!\n");
159 /* SW parser related functions */
161 struct mlx5e_swp_spec
{
170 mlx5e_set_eseg_swp(struct sk_buff
*skb
, struct mlx5_wqe_eth_seg
*eseg
,
171 struct mlx5e_swp_spec
*swp_spec
)
173 /* SWP offsets are in 2-bytes words */
174 eseg
->swp_outer_l3_offset
= skb_network_offset(skb
) / 2;
175 if (swp_spec
->l3_proto
== htons(ETH_P_IPV6
))
176 eseg
->swp_flags
|= MLX5_ETH_WQE_SWP_OUTER_L3_IPV6
;
177 if (swp_spec
->l4_proto
) {
178 eseg
->swp_outer_l4_offset
= skb_transport_offset(skb
) / 2;
179 if (swp_spec
->l4_proto
== IPPROTO_UDP
)
180 eseg
->swp_flags
|= MLX5_ETH_WQE_SWP_OUTER_L4_UDP
;
183 if (swp_spec
->is_tun
) {
184 eseg
->swp_inner_l3_offset
= skb_inner_network_offset(skb
) / 2;
185 if (swp_spec
->tun_l3_proto
== htons(ETH_P_IPV6
))
186 eseg
->swp_flags
|= MLX5_ETH_WQE_SWP_INNER_L3_IPV6
;
187 } else { /* typically for ipsec when xfrm mode != XFRM_MODE_TUNNEL */
188 eseg
->swp_inner_l3_offset
= skb_network_offset(skb
) / 2;
189 if (swp_spec
->l3_proto
== htons(ETH_P_IPV6
))
190 eseg
->swp_flags
|= MLX5_ETH_WQE_SWP_INNER_L3_IPV6
;
192 switch (swp_spec
->tun_l4_proto
) {
194 eseg
->swp_flags
|= MLX5_ETH_WQE_SWP_INNER_L4_UDP
;
197 eseg
->swp_inner_l4_offset
= skb_inner_transport_offset(skb
) / 2;