]>
Commit | Line | Data |
---|---|---|
1 | /* | |
2 | * Copyright (c) 2015-2016, Mellanox Technologies. All rights reserved. | |
3 | * | |
4 | * This software is available to you under a choice of one of two | |
5 | * licenses. You may choose to be licensed under the terms of the GNU | |
6 | * General Public License (GPL) Version 2, available from the file | |
7 | * COPYING in the main directory of this source tree, or the | |
8 | * OpenIB.org BSD license below: | |
9 | * | |
10 | * Redistribution and use in source and binary forms, with or | |
11 | * without modification, are permitted provided that the following | |
12 | * conditions are met: | |
13 | * | |
14 | * - Redistributions of source code must retain the above | |
15 | * copyright notice, this list of conditions and the following | |
16 | * disclaimer. | |
17 | * | |
18 | * - Redistributions in binary form must reproduce the above | |
19 | * copyright notice, this list of conditions and the following | |
20 | * disclaimer in the documentation and/or other materials | |
21 | * provided with the distribution. | |
22 | * | |
23 | * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, | |
24 | * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF | |
25 | * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND | |
26 | * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS | |
27 | * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN | |
28 | * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN | |
29 | * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE | |
30 | * SOFTWARE. | |
31 | */ | |
32 | ||
33 | #include <linux/tcp.h> | |
34 | #include <linux/if_vlan.h> | |
35 | #include <net/dsfield.h> | |
36 | #include "en.h" | |
37 | #include "ipoib/ipoib.h" | |
38 | #include "en_accel/ipsec_rxtx.h" | |
39 | #include "lib/clock.h" | |
40 | ||
41 | #define MLX5E_SQ_NOPS_ROOM MLX5_SEND_WQE_MAX_WQEBBS | |
42 | #define MLX5E_SQ_STOP_ROOM (MLX5_SEND_WQE_MAX_WQEBBS +\ | |
43 | MLX5E_SQ_NOPS_ROOM) | |
44 | ||
45 | static inline void mlx5e_tx_dma_unmap(struct device *pdev, | |
46 | struct mlx5e_sq_dma *dma) | |
47 | { | |
48 | switch (dma->type) { | |
49 | case MLX5E_DMA_MAP_SINGLE: | |
50 | dma_unmap_single(pdev, dma->addr, dma->size, DMA_TO_DEVICE); | |
51 | break; | |
52 | case MLX5E_DMA_MAP_PAGE: | |
53 | dma_unmap_page(pdev, dma->addr, dma->size, DMA_TO_DEVICE); | |
54 | break; | |
55 | default: | |
56 | WARN_ONCE(true, "mlx5e_tx_dma_unmap unknown DMA type!\n"); | |
57 | } | |
58 | } | |
59 | ||
60 | static inline void mlx5e_dma_push(struct mlx5e_txqsq *sq, | |
61 | dma_addr_t addr, | |
62 | u32 size, | |
63 | enum mlx5e_dma_map_type map_type) | |
64 | { | |
65 | u32 i = sq->dma_fifo_pc & sq->dma_fifo_mask; | |
66 | ||
67 | sq->db.dma_fifo[i].addr = addr; | |
68 | sq->db.dma_fifo[i].size = size; | |
69 | sq->db.dma_fifo[i].type = map_type; | |
70 | sq->dma_fifo_pc++; | |
71 | } | |
72 | ||
73 | static inline struct mlx5e_sq_dma *mlx5e_dma_get(struct mlx5e_txqsq *sq, u32 i) | |
74 | { | |
75 | return &sq->db.dma_fifo[i & sq->dma_fifo_mask]; | |
76 | } | |
77 | ||
78 | static void mlx5e_dma_unmap_wqe_err(struct mlx5e_txqsq *sq, u8 num_dma) | |
79 | { | |
80 | int i; | |
81 | ||
82 | for (i = 0; i < num_dma; i++) { | |
83 | struct mlx5e_sq_dma *last_pushed_dma = | |
84 | mlx5e_dma_get(sq, --sq->dma_fifo_pc); | |
85 | ||
86 | mlx5e_tx_dma_unmap(sq->pdev, last_pushed_dma); | |
87 | } | |
88 | } | |
89 | ||
90 | #ifdef CONFIG_MLX5_CORE_EN_DCB | |
91 | static inline int mlx5e_get_dscp_up(struct mlx5e_priv *priv, struct sk_buff *skb) | |
92 | { | |
93 | int dscp_cp = 0; | |
94 | ||
95 | if (skb->protocol == htons(ETH_P_IP)) | |
96 | dscp_cp = ipv4_get_dsfield(ip_hdr(skb)) >> 2; | |
97 | else if (skb->protocol == htons(ETH_P_IPV6)) | |
98 | dscp_cp = ipv6_get_dsfield(ipv6_hdr(skb)) >> 2; | |
99 | ||
100 | return priv->dcbx_dp.dscp2prio[dscp_cp]; | |
101 | } | |
102 | #endif | |
103 | ||
104 | u16 mlx5e_select_queue(struct net_device *dev, struct sk_buff *skb, | |
105 | void *accel_priv, select_queue_fallback_t fallback) | |
106 | { | |
107 | struct mlx5e_priv *priv = netdev_priv(dev); | |
108 | int channel_ix = fallback(dev, skb); | |
109 | u16 num_channels; | |
110 | int up = 0; | |
111 | ||
112 | if (!netdev_get_num_tc(dev)) | |
113 | return channel_ix; | |
114 | ||
115 | #ifdef CONFIG_MLX5_CORE_EN_DCB | |
116 | if (priv->dcbx_dp.trust_state == MLX5_QPTS_TRUST_DSCP) | |
117 | up = mlx5e_get_dscp_up(priv, skb); | |
118 | else | |
119 | #endif | |
120 | if (skb_vlan_tag_present(skb)) | |
121 | up = skb->vlan_tci >> VLAN_PRIO_SHIFT; | |
122 | ||
123 | /* channel_ix can be larger than num_channels since | |
124 | * dev->num_real_tx_queues = num_channels * num_tc | |
125 | */ | |
126 | num_channels = priv->channels.params.num_channels; | |
127 | if (channel_ix >= num_channels) | |
128 | channel_ix = reciprocal_scale(channel_ix, num_channels); | |
129 | ||
130 | return priv->channel_tc2txq[channel_ix][up]; | |
131 | } | |
132 | ||
133 | static inline int mlx5e_skb_l2_header_offset(struct sk_buff *skb) | |
134 | { | |
135 | #define MLX5E_MIN_INLINE (ETH_HLEN + VLAN_HLEN) | |
136 | ||
137 | return max(skb_network_offset(skb), MLX5E_MIN_INLINE); | |
138 | } | |
139 | ||
140 | static inline int mlx5e_skb_l3_header_offset(struct sk_buff *skb) | |
141 | { | |
142 | struct flow_keys keys; | |
143 | ||
144 | if (skb_transport_header_was_set(skb)) | |
145 | return skb_transport_offset(skb); | |
146 | else if (skb_flow_dissect_flow_keys(skb, &keys, 0)) | |
147 | return keys.control.thoff; | |
148 | else | |
149 | return mlx5e_skb_l2_header_offset(skb); | |
150 | } | |
151 | ||
152 | static inline u16 mlx5e_calc_min_inline(enum mlx5_inline_modes mode, | |
153 | struct sk_buff *skb) | |
154 | { | |
155 | u16 hlen; | |
156 | ||
157 | switch (mode) { | |
158 | case MLX5_INLINE_MODE_NONE: | |
159 | return 0; | |
160 | case MLX5_INLINE_MODE_TCP_UDP: | |
161 | hlen = eth_get_headlen(skb->data, skb_headlen(skb)); | |
162 | if (hlen == ETH_HLEN && !skb_vlan_tag_present(skb)) | |
163 | hlen += VLAN_HLEN; | |
164 | break; | |
165 | case MLX5_INLINE_MODE_IP: | |
166 | /* When transport header is set to zero, it means no transport | |
167 | * header. When transport header is set to 0xff's, it means | |
168 | * transport header wasn't set. | |
169 | */ | |
170 | if (skb_transport_offset(skb)) { | |
171 | hlen = mlx5e_skb_l3_header_offset(skb); | |
172 | break; | |
173 | } | |
174 | /* fall through */ | |
175 | case MLX5_INLINE_MODE_L2: | |
176 | default: | |
177 | hlen = mlx5e_skb_l2_header_offset(skb); | |
178 | } | |
179 | return min_t(u16, hlen, skb_headlen(skb)); | |
180 | } | |
181 | ||
182 | static inline void mlx5e_tx_skb_pull_inline(unsigned char **skb_data, | |
183 | unsigned int *skb_len, | |
184 | unsigned int len) | |
185 | { | |
186 | *skb_len -= len; | |
187 | *skb_data += len; | |
188 | } | |
189 | ||
190 | static inline void mlx5e_insert_vlan(void *start, struct sk_buff *skb, u16 ihs, | |
191 | unsigned char **skb_data, | |
192 | unsigned int *skb_len) | |
193 | { | |
194 | struct vlan_ethhdr *vhdr = (struct vlan_ethhdr *)start; | |
195 | int cpy1_sz = 2 * ETH_ALEN; | |
196 | int cpy2_sz = ihs - cpy1_sz; | |
197 | ||
198 | memcpy(vhdr, *skb_data, cpy1_sz); | |
199 | mlx5e_tx_skb_pull_inline(skb_data, skb_len, cpy1_sz); | |
200 | vhdr->h_vlan_proto = skb->vlan_proto; | |
201 | vhdr->h_vlan_TCI = cpu_to_be16(skb_vlan_tag_get(skb)); | |
202 | memcpy(&vhdr->h_vlan_encapsulated_proto, *skb_data, cpy2_sz); | |
203 | mlx5e_tx_skb_pull_inline(skb_data, skb_len, cpy2_sz); | |
204 | } | |
205 | ||
206 | static inline void | |
207 | mlx5e_txwqe_build_eseg_csum(struct mlx5e_txqsq *sq, struct sk_buff *skb, struct mlx5_wqe_eth_seg *eseg) | |
208 | { | |
209 | if (likely(skb->ip_summed == CHECKSUM_PARTIAL)) { | |
210 | eseg->cs_flags = MLX5_ETH_WQE_L3_CSUM; | |
211 | if (skb->encapsulation) { | |
212 | eseg->cs_flags |= MLX5_ETH_WQE_L3_INNER_CSUM | | |
213 | MLX5_ETH_WQE_L4_INNER_CSUM; | |
214 | sq->stats.csum_partial_inner++; | |
215 | } else { | |
216 | eseg->cs_flags |= MLX5_ETH_WQE_L4_CSUM; | |
217 | sq->stats.csum_partial++; | |
218 | } | |
219 | } else | |
220 | sq->stats.csum_none++; | |
221 | } | |
222 | ||
223 | static inline u16 | |
224 | mlx5e_txwqe_build_eseg_gso(struct mlx5e_txqsq *sq, struct sk_buff *skb, | |
225 | struct mlx5_wqe_eth_seg *eseg, unsigned int *num_bytes) | |
226 | { | |
227 | u16 ihs; | |
228 | ||
229 | eseg->mss = cpu_to_be16(skb_shinfo(skb)->gso_size); | |
230 | ||
231 | if (skb->encapsulation) { | |
232 | ihs = skb_inner_transport_offset(skb) + inner_tcp_hdrlen(skb); | |
233 | sq->stats.tso_inner_packets++; | |
234 | sq->stats.tso_inner_bytes += skb->len - ihs; | |
235 | } else { | |
236 | ihs = skb_transport_offset(skb) + tcp_hdrlen(skb); | |
237 | sq->stats.tso_packets++; | |
238 | sq->stats.tso_bytes += skb->len - ihs; | |
239 | } | |
240 | ||
241 | *num_bytes = skb->len + (skb_shinfo(skb)->gso_segs - 1) * ihs; | |
242 | return ihs; | |
243 | } | |
244 | ||
245 | static inline int | |
246 | mlx5e_txwqe_build_dsegs(struct mlx5e_txqsq *sq, struct sk_buff *skb, | |
247 | unsigned char *skb_data, u16 headlen, | |
248 | struct mlx5_wqe_data_seg *dseg) | |
249 | { | |
250 | dma_addr_t dma_addr = 0; | |
251 | u8 num_dma = 0; | |
252 | int i; | |
253 | ||
254 | if (headlen) { | |
255 | dma_addr = dma_map_single(sq->pdev, skb_data, headlen, | |
256 | DMA_TO_DEVICE); | |
257 | if (unlikely(dma_mapping_error(sq->pdev, dma_addr))) | |
258 | goto dma_unmap_wqe_err; | |
259 | ||
260 | dseg->addr = cpu_to_be64(dma_addr); | |
261 | dseg->lkey = sq->mkey_be; | |
262 | dseg->byte_count = cpu_to_be32(headlen); | |
263 | ||
264 | mlx5e_dma_push(sq, dma_addr, headlen, MLX5E_DMA_MAP_SINGLE); | |
265 | num_dma++; | |
266 | dseg++; | |
267 | } | |
268 | ||
269 | for (i = 0; i < skb_shinfo(skb)->nr_frags; i++) { | |
270 | struct skb_frag_struct *frag = &skb_shinfo(skb)->frags[i]; | |
271 | int fsz = skb_frag_size(frag); | |
272 | ||
273 | dma_addr = skb_frag_dma_map(sq->pdev, frag, 0, fsz, | |
274 | DMA_TO_DEVICE); | |
275 | if (unlikely(dma_mapping_error(sq->pdev, dma_addr))) | |
276 | goto dma_unmap_wqe_err; | |
277 | ||
278 | dseg->addr = cpu_to_be64(dma_addr); | |
279 | dseg->lkey = sq->mkey_be; | |
280 | dseg->byte_count = cpu_to_be32(fsz); | |
281 | ||
282 | mlx5e_dma_push(sq, dma_addr, fsz, MLX5E_DMA_MAP_PAGE); | |
283 | num_dma++; | |
284 | dseg++; | |
285 | } | |
286 | ||
287 | return num_dma; | |
288 | ||
289 | dma_unmap_wqe_err: | |
290 | mlx5e_dma_unmap_wqe_err(sq, num_dma); | |
291 | return -ENOMEM; | |
292 | } | |
293 | ||
294 | static inline void | |
295 | mlx5e_txwqe_complete(struct mlx5e_txqsq *sq, struct sk_buff *skb, | |
296 | u8 opcode, u16 ds_cnt, u32 num_bytes, u8 num_dma, | |
297 | struct mlx5e_tx_wqe_info *wi, struct mlx5_wqe_ctrl_seg *cseg) | |
298 | { | |
299 | struct mlx5_wq_cyc *wq = &sq->wq; | |
300 | u16 pi; | |
301 | ||
302 | wi->num_bytes = num_bytes; | |
303 | wi->num_dma = num_dma; | |
304 | wi->num_wqebbs = DIV_ROUND_UP(ds_cnt, MLX5_SEND_WQEBB_NUM_DS); | |
305 | wi->skb = skb; | |
306 | ||
307 | cseg->opmod_idx_opcode = cpu_to_be32((sq->pc << 8) | opcode); | |
308 | cseg->qpn_ds = cpu_to_be32((sq->sqn << 8) | ds_cnt); | |
309 | ||
310 | netdev_tx_sent_queue(sq->txq, num_bytes); | |
311 | ||
312 | if (unlikely(skb_shinfo(skb)->tx_flags & SKBTX_HW_TSTAMP)) | |
313 | skb_shinfo(skb)->tx_flags |= SKBTX_IN_PROGRESS; | |
314 | ||
315 | sq->pc += wi->num_wqebbs; | |
316 | if (unlikely(!mlx5e_wqc_has_room_for(wq, sq->cc, sq->pc, MLX5E_SQ_STOP_ROOM))) { | |
317 | netif_tx_stop_queue(sq->txq); | |
318 | sq->stats.stopped++; | |
319 | } | |
320 | ||
321 | if (!skb->xmit_more || netif_xmit_stopped(sq->txq)) | |
322 | mlx5e_notify_hw(wq, sq->pc, sq->uar_map, cseg); | |
323 | ||
324 | /* fill sq edge with nops to avoid wqe wrap around */ | |
325 | while ((pi = (sq->pc & wq->sz_m1)) > sq->edge) { | |
326 | sq->db.wqe_info[pi].skb = NULL; | |
327 | mlx5e_post_nop(wq, sq->sqn, &sq->pc); | |
328 | sq->stats.nop++; | |
329 | } | |
330 | } | |
331 | ||
332 | static netdev_tx_t mlx5e_sq_xmit(struct mlx5e_txqsq *sq, struct sk_buff *skb, | |
333 | struct mlx5e_tx_wqe *wqe, u16 pi) | |
334 | { | |
335 | struct mlx5e_tx_wqe_info *wi = &sq->db.wqe_info[pi]; | |
336 | ||
337 | struct mlx5_wqe_ctrl_seg *cseg = &wqe->ctrl; | |
338 | struct mlx5_wqe_eth_seg *eseg = &wqe->eth; | |
339 | ||
340 | unsigned char *skb_data = skb->data; | |
341 | unsigned int skb_len = skb->len; | |
342 | u8 opcode = MLX5_OPCODE_SEND; | |
343 | unsigned int num_bytes; | |
344 | int num_dma; | |
345 | u16 headlen; | |
346 | u16 ds_cnt; | |
347 | u16 ihs; | |
348 | ||
349 | mlx5e_txwqe_build_eseg_csum(sq, skb, eseg); | |
350 | ||
351 | if (skb_is_gso(skb)) { | |
352 | opcode = MLX5_OPCODE_LSO; | |
353 | ihs = mlx5e_txwqe_build_eseg_gso(sq, skb, eseg, &num_bytes); | |
354 | sq->stats.packets += skb_shinfo(skb)->gso_segs; | |
355 | } else { | |
356 | ihs = mlx5e_calc_min_inline(sq->min_inline_mode, skb); | |
357 | num_bytes = max_t(unsigned int, skb->len, ETH_ZLEN); | |
358 | sq->stats.packets++; | |
359 | } | |
360 | sq->stats.bytes += num_bytes; | |
361 | sq->stats.xmit_more += skb->xmit_more; | |
362 | ||
363 | ds_cnt = sizeof(*wqe) / MLX5_SEND_WQE_DS; | |
364 | if (ihs) { | |
365 | if (skb_vlan_tag_present(skb)) { | |
366 | mlx5e_insert_vlan(eseg->inline_hdr.start, skb, ihs, &skb_data, &skb_len); | |
367 | ihs += VLAN_HLEN; | |
368 | sq->stats.added_vlan_packets++; | |
369 | } else { | |
370 | memcpy(eseg->inline_hdr.start, skb_data, ihs); | |
371 | mlx5e_tx_skb_pull_inline(&skb_data, &skb_len, ihs); | |
372 | } | |
373 | eseg->inline_hdr.sz = cpu_to_be16(ihs); | |
374 | ds_cnt += DIV_ROUND_UP(ihs - sizeof(eseg->inline_hdr.start), MLX5_SEND_WQE_DS); | |
375 | } else if (skb_vlan_tag_present(skb)) { | |
376 | eseg->insert.type = cpu_to_be16(MLX5_ETH_WQE_INSERT_VLAN); | |
377 | if (skb->vlan_proto == cpu_to_be16(ETH_P_8021AD)) | |
378 | eseg->insert.type |= cpu_to_be16(MLX5_ETH_WQE_SVLAN); | |
379 | eseg->insert.vlan_tci = cpu_to_be16(skb_vlan_tag_get(skb)); | |
380 | sq->stats.added_vlan_packets++; | |
381 | } | |
382 | ||
383 | headlen = skb_len - skb->data_len; | |
384 | num_dma = mlx5e_txwqe_build_dsegs(sq, skb, skb_data, headlen, | |
385 | (struct mlx5_wqe_data_seg *)cseg + ds_cnt); | |
386 | if (unlikely(num_dma < 0)) | |
387 | goto err_drop; | |
388 | ||
389 | mlx5e_txwqe_complete(sq, skb, opcode, ds_cnt + num_dma, | |
390 | num_bytes, num_dma, wi, cseg); | |
391 | ||
392 | return NETDEV_TX_OK; | |
393 | ||
394 | err_drop: | |
395 | sq->stats.dropped++; | |
396 | dev_kfree_skb_any(skb); | |
397 | ||
398 | return NETDEV_TX_OK; | |
399 | } | |
400 | ||
401 | netdev_tx_t mlx5e_xmit(struct sk_buff *skb, struct net_device *dev) | |
402 | { | |
403 | struct mlx5e_priv *priv = netdev_priv(dev); | |
404 | struct mlx5e_txqsq *sq = priv->txq2sq[skb_get_queue_mapping(skb)]; | |
405 | struct mlx5_wq_cyc *wq = &sq->wq; | |
406 | u16 pi = sq->pc & wq->sz_m1; | |
407 | struct mlx5e_tx_wqe *wqe = mlx5_wq_cyc_get_wqe(wq, pi); | |
408 | ||
409 | memset(wqe, 0, sizeof(*wqe)); | |
410 | ||
411 | #ifdef CONFIG_MLX5_EN_IPSEC | |
412 | if (sq->state & BIT(MLX5E_SQ_STATE_IPSEC)) { | |
413 | skb = mlx5e_ipsec_handle_tx_skb(dev, wqe, skb); | |
414 | if (unlikely(!skb)) | |
415 | return NETDEV_TX_OK; | |
416 | } | |
417 | #endif | |
418 | ||
419 | return mlx5e_sq_xmit(sq, skb, wqe, pi); | |
420 | } | |
421 | ||
422 | bool mlx5e_poll_tx_cq(struct mlx5e_cq *cq, int napi_budget) | |
423 | { | |
424 | struct mlx5e_txqsq *sq; | |
425 | struct mlx5_cqe64 *cqe; | |
426 | u32 dma_fifo_cc; | |
427 | u32 nbytes; | |
428 | u16 npkts; | |
429 | u16 sqcc; | |
430 | int i; | |
431 | ||
432 | sq = container_of(cq, struct mlx5e_txqsq, cq); | |
433 | ||
434 | if (unlikely(!MLX5E_TEST_BIT(sq->state, MLX5E_SQ_STATE_ENABLED))) | |
435 | return false; | |
436 | ||
437 | cqe = mlx5_cqwq_get_cqe(&cq->wq); | |
438 | if (!cqe) | |
439 | return false; | |
440 | ||
441 | npkts = 0; | |
442 | nbytes = 0; | |
443 | ||
444 | /* sq->cc must be updated only after mlx5_cqwq_update_db_record(), | |
445 | * otherwise a cq overrun may occur | |
446 | */ | |
447 | sqcc = sq->cc; | |
448 | ||
449 | /* avoid dirtying sq cache line every cqe */ | |
450 | dma_fifo_cc = sq->dma_fifo_cc; | |
451 | ||
452 | i = 0; | |
453 | do { | |
454 | u16 wqe_counter; | |
455 | bool last_wqe; | |
456 | ||
457 | mlx5_cqwq_pop(&cq->wq); | |
458 | ||
459 | wqe_counter = be16_to_cpu(cqe->wqe_counter); | |
460 | ||
461 | do { | |
462 | struct mlx5e_tx_wqe_info *wi; | |
463 | struct sk_buff *skb; | |
464 | u16 ci; | |
465 | int j; | |
466 | ||
467 | last_wqe = (sqcc == wqe_counter); | |
468 | ||
469 | ci = sqcc & sq->wq.sz_m1; | |
470 | wi = &sq->db.wqe_info[ci]; | |
471 | skb = wi->skb; | |
472 | ||
473 | if (unlikely(!skb)) { /* nop */ | |
474 | sqcc++; | |
475 | continue; | |
476 | } | |
477 | ||
478 | if (unlikely(skb_shinfo(skb)->tx_flags & | |
479 | SKBTX_HW_TSTAMP)) { | |
480 | struct skb_shared_hwtstamps hwts = {}; | |
481 | ||
482 | hwts.hwtstamp = | |
483 | mlx5_timecounter_cyc2time(sq->clock, | |
484 | get_cqe_ts(cqe)); | |
485 | skb_tstamp_tx(skb, &hwts); | |
486 | } | |
487 | ||
488 | for (j = 0; j < wi->num_dma; j++) { | |
489 | struct mlx5e_sq_dma *dma = | |
490 | mlx5e_dma_get(sq, dma_fifo_cc++); | |
491 | ||
492 | mlx5e_tx_dma_unmap(sq->pdev, dma); | |
493 | } | |
494 | ||
495 | npkts++; | |
496 | nbytes += wi->num_bytes; | |
497 | sqcc += wi->num_wqebbs; | |
498 | napi_consume_skb(skb, napi_budget); | |
499 | } while (!last_wqe); | |
500 | ||
501 | } while ((++i < MLX5E_TX_CQ_POLL_BUDGET) && (cqe = mlx5_cqwq_get_cqe(&cq->wq))); | |
502 | ||
503 | mlx5_cqwq_update_db_record(&cq->wq); | |
504 | ||
505 | /* ensure cq space is freed before enabling more cqes */ | |
506 | wmb(); | |
507 | ||
508 | sq->dma_fifo_cc = dma_fifo_cc; | |
509 | sq->cc = sqcc; | |
510 | ||
511 | netdev_tx_completed_queue(sq->txq, npkts, nbytes); | |
512 | ||
513 | if (netif_tx_queue_stopped(sq->txq) && | |
514 | mlx5e_wqc_has_room_for(&sq->wq, sq->cc, sq->pc, MLX5E_SQ_STOP_ROOM)) { | |
515 | netif_tx_wake_queue(sq->txq); | |
516 | sq->stats.wake++; | |
517 | } | |
518 | ||
519 | return (i == MLX5E_TX_CQ_POLL_BUDGET); | |
520 | } | |
521 | ||
522 | void mlx5e_free_txqsq_descs(struct mlx5e_txqsq *sq) | |
523 | { | |
524 | struct mlx5e_tx_wqe_info *wi; | |
525 | struct sk_buff *skb; | |
526 | u16 ci; | |
527 | int i; | |
528 | ||
529 | while (sq->cc != sq->pc) { | |
530 | ci = sq->cc & sq->wq.sz_m1; | |
531 | wi = &sq->db.wqe_info[ci]; | |
532 | skb = wi->skb; | |
533 | ||
534 | if (!skb) { /* nop */ | |
535 | sq->cc++; | |
536 | continue; | |
537 | } | |
538 | ||
539 | for (i = 0; i < wi->num_dma; i++) { | |
540 | struct mlx5e_sq_dma *dma = | |
541 | mlx5e_dma_get(sq, sq->dma_fifo_cc++); | |
542 | ||
543 | mlx5e_tx_dma_unmap(sq->pdev, dma); | |
544 | } | |
545 | ||
546 | dev_kfree_skb_any(skb); | |
547 | sq->cc += wi->num_wqebbs; | |
548 | } | |
549 | } | |
550 | ||
551 | #ifdef CONFIG_MLX5_CORE_IPOIB | |
552 | ||
553 | struct mlx5_wqe_eth_pad { | |
554 | u8 rsvd0[16]; | |
555 | }; | |
556 | ||
557 | struct mlx5i_tx_wqe { | |
558 | struct mlx5_wqe_ctrl_seg ctrl; | |
559 | struct mlx5_wqe_datagram_seg datagram; | |
560 | struct mlx5_wqe_eth_pad pad; | |
561 | struct mlx5_wqe_eth_seg eth; | |
562 | }; | |
563 | ||
564 | static inline void | |
565 | mlx5i_txwqe_build_datagram(struct mlx5_av *av, u32 dqpn, u32 dqkey, | |
566 | struct mlx5_wqe_datagram_seg *dseg) | |
567 | { | |
568 | memcpy(&dseg->av, av, sizeof(struct mlx5_av)); | |
569 | dseg->av.dqp_dct = cpu_to_be32(dqpn | MLX5_EXTENDED_UD_AV); | |
570 | dseg->av.key.qkey.qkey = cpu_to_be32(dqkey); | |
571 | } | |
572 | ||
573 | netdev_tx_t mlx5i_sq_xmit(struct mlx5e_txqsq *sq, struct sk_buff *skb, | |
574 | struct mlx5_av *av, u32 dqpn, u32 dqkey) | |
575 | { | |
576 | struct mlx5_wq_cyc *wq = &sq->wq; | |
577 | u16 pi = sq->pc & wq->sz_m1; | |
578 | struct mlx5i_tx_wqe *wqe = mlx5_wq_cyc_get_wqe(wq, pi); | |
579 | struct mlx5e_tx_wqe_info *wi = &sq->db.wqe_info[pi]; | |
580 | ||
581 | struct mlx5_wqe_ctrl_seg *cseg = &wqe->ctrl; | |
582 | struct mlx5_wqe_datagram_seg *datagram = &wqe->datagram; | |
583 | struct mlx5_wqe_eth_seg *eseg = &wqe->eth; | |
584 | ||
585 | unsigned char *skb_data = skb->data; | |
586 | unsigned int skb_len = skb->len; | |
587 | u8 opcode = MLX5_OPCODE_SEND; | |
588 | unsigned int num_bytes; | |
589 | int num_dma; | |
590 | u16 headlen; | |
591 | u16 ds_cnt; | |
592 | u16 ihs; | |
593 | ||
594 | memset(wqe, 0, sizeof(*wqe)); | |
595 | ||
596 | mlx5i_txwqe_build_datagram(av, dqpn, dqkey, datagram); | |
597 | ||
598 | mlx5e_txwqe_build_eseg_csum(sq, skb, eseg); | |
599 | ||
600 | if (skb_is_gso(skb)) { | |
601 | opcode = MLX5_OPCODE_LSO; | |
602 | ihs = mlx5e_txwqe_build_eseg_gso(sq, skb, eseg, &num_bytes); | |
603 | sq->stats.packets += skb_shinfo(skb)->gso_segs; | |
604 | } else { | |
605 | ihs = mlx5e_calc_min_inline(sq->min_inline_mode, skb); | |
606 | num_bytes = max_t(unsigned int, skb->len, ETH_ZLEN); | |
607 | sq->stats.packets++; | |
608 | } | |
609 | ||
610 | sq->stats.bytes += num_bytes; | |
611 | sq->stats.xmit_more += skb->xmit_more; | |
612 | ||
613 | ds_cnt = sizeof(*wqe) / MLX5_SEND_WQE_DS; | |
614 | if (ihs) { | |
615 | memcpy(eseg->inline_hdr.start, skb_data, ihs); | |
616 | mlx5e_tx_skb_pull_inline(&skb_data, &skb_len, ihs); | |
617 | eseg->inline_hdr.sz = cpu_to_be16(ihs); | |
618 | ds_cnt += DIV_ROUND_UP(ihs - sizeof(eseg->inline_hdr.start), MLX5_SEND_WQE_DS); | |
619 | } | |
620 | ||
621 | headlen = skb_len - skb->data_len; | |
622 | num_dma = mlx5e_txwqe_build_dsegs(sq, skb, skb_data, headlen, | |
623 | (struct mlx5_wqe_data_seg *)cseg + ds_cnt); | |
624 | if (unlikely(num_dma < 0)) | |
625 | goto err_drop; | |
626 | ||
627 | mlx5e_txwqe_complete(sq, skb, opcode, ds_cnt + num_dma, | |
628 | num_bytes, num_dma, wi, cseg); | |
629 | ||
630 | return NETDEV_TX_OK; | |
631 | ||
632 | err_drop: | |
633 | sq->stats.dropped++; | |
634 | dev_kfree_skb_any(skb); | |
635 | ||
636 | return NETDEV_TX_OK; | |
637 | } | |
638 | ||
639 | #endif |