2 * Copyright (c) 2007 Mellanox Technologies. All rights reserved.
4 * This software is available to you under a choice of one of two
5 * licenses. You may choose to be licensed under the terms of the GNU
6 * General Public License (GPL) Version 2, available from the file
7 * COPYING in the main directory of this source tree, or the
8 * OpenIB.org BSD license below:
10 * Redistribution and use in source and binary forms, with or
11 * without modification, are permitted provided that the following
14 * - Redistributions of source code must retain the above
15 * copyright notice, this list of conditions and the following
18 * - Redistributions in binary form must reproduce the above
19 * copyright notice, this list of conditions and the following
20 * disclaimer in the documentation and/or other materials
21 * provided with the distribution.
23 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
24 * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
25 * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
26 * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
27 * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
28 * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
29 * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
34 #include <net/busy_poll.h>
35 #include <linux/bpf.h>
36 #include <linux/bpf_trace.h>
37 #include <linux/mlx4/cq.h>
38 #include <linux/slab.h>
39 #include <linux/mlx4/qp.h>
40 #include <linux/skbuff.h>
41 #include <linux/rculist.h>
42 #include <linux/if_ether.h>
43 #include <linux/if_vlan.h>
44 #include <linux/vmalloc.h>
45 #include <linux/irq.h>
47 #if IS_ENABLED(CONFIG_IPV6)
48 #include <net/ip6_checksum.h>
53 static int mlx4_alloc_page(struct mlx4_en_priv
*priv
,
54 struct mlx4_en_rx_alloc
*frag
,
60 page
= alloc_page(gfp
);
63 dma
= dma_map_page(priv
->ddev
, page
, 0, PAGE_SIZE
, priv
->dma_dir
);
64 if (unlikely(dma_mapping_error(priv
->ddev
, dma
))) {
70 frag
->page_offset
= priv
->rx_headroom
;
74 static int mlx4_en_alloc_frags(struct mlx4_en_priv
*priv
,
75 struct mlx4_en_rx_ring
*ring
,
76 struct mlx4_en_rx_desc
*rx_desc
,
77 struct mlx4_en_rx_alloc
*frags
,
82 for (i
= 0; i
< priv
->num_frags
; i
++, frags
++) {
84 if (mlx4_alloc_page(priv
, frags
, gfp
))
86 ring
->rx_alloc_pages
++;
88 rx_desc
->data
[i
].addr
= cpu_to_be64(frags
->dma
+
94 static void mlx4_en_free_frag(const struct mlx4_en_priv
*priv
,
95 struct mlx4_en_rx_alloc
*frag
)
98 dma_unmap_page(priv
->ddev
, frag
->dma
,
99 PAGE_SIZE
, priv
->dma_dir
);
100 __free_page(frag
->page
);
102 /* We need to clear all fields, otherwise a change of priv->log_rx_info
103 * could lead to see garbage later in frag->page.
105 memset(frag
, 0, sizeof(*frag
));
108 static void mlx4_en_init_rx_desc(const struct mlx4_en_priv
*priv
,
109 struct mlx4_en_rx_ring
*ring
, int index
)
111 struct mlx4_en_rx_desc
*rx_desc
= ring
->buf
+ ring
->stride
* index
;
115 /* Set size and memtype fields */
116 for (i
= 0; i
< priv
->num_frags
; i
++) {
117 rx_desc
->data
[i
].byte_count
=
118 cpu_to_be32(priv
->frag_info
[i
].frag_size
);
119 rx_desc
->data
[i
].lkey
= cpu_to_be32(priv
->mdev
->mr
.key
);
122 /* If the number of used fragments does not fill up the ring stride,
123 * remaining (unused) fragments must be padded with null address/size
124 * and a special memory key */
125 possible_frags
= (ring
->stride
- sizeof(struct mlx4_en_rx_desc
)) / DS_SIZE
;
126 for (i
= priv
->num_frags
; i
< possible_frags
; i
++) {
127 rx_desc
->data
[i
].byte_count
= 0;
128 rx_desc
->data
[i
].lkey
= cpu_to_be32(MLX4_EN_MEMTYPE_PAD
);
129 rx_desc
->data
[i
].addr
= 0;
133 static int mlx4_en_prepare_rx_desc(struct mlx4_en_priv
*priv
,
134 struct mlx4_en_rx_ring
*ring
, int index
,
137 struct mlx4_en_rx_desc
*rx_desc
= ring
->buf
+ (index
* ring
->stride
);
138 struct mlx4_en_rx_alloc
*frags
= ring
->rx_info
+
139 (index
<< priv
->log_rx_info
);
140 if (ring
->page_cache
.index
> 0) {
141 /* XDP uses a single page per frame */
143 ring
->page_cache
.index
--;
144 frags
->page
= ring
->page_cache
.buf
[ring
->page_cache
.index
].page
;
145 frags
->dma
= ring
->page_cache
.buf
[ring
->page_cache
.index
].dma
;
147 frags
->page_offset
= XDP_PACKET_HEADROOM
;
148 rx_desc
->data
[0].addr
= cpu_to_be64(frags
->dma
+
149 XDP_PACKET_HEADROOM
);
153 return mlx4_en_alloc_frags(priv
, ring
, rx_desc
, frags
, gfp
);
156 static bool mlx4_en_is_ring_empty(const struct mlx4_en_rx_ring
*ring
)
158 return ring
->prod
== ring
->cons
;
161 static inline void mlx4_en_update_rx_prod_db(struct mlx4_en_rx_ring
*ring
)
163 *ring
->wqres
.db
.db
= cpu_to_be32(ring
->prod
& 0xffff);
167 static void mlx4_en_free_rx_desc(const struct mlx4_en_priv
*priv
,
168 struct mlx4_en_rx_ring
*ring
,
171 struct mlx4_en_rx_alloc
*frags
;
174 frags
= ring
->rx_info
+ (index
<< priv
->log_rx_info
);
175 for (nr
= 0; nr
< priv
->num_frags
; nr
++) {
176 en_dbg(DRV
, priv
, "Freeing fragment:%d\n", nr
);
177 mlx4_en_free_frag(priv
, frags
+ nr
);
181 static int mlx4_en_fill_rx_buffers(struct mlx4_en_priv
*priv
)
183 struct mlx4_en_rx_ring
*ring
;
188 for (buf_ind
= 0; buf_ind
< priv
->prof
->rx_ring_size
; buf_ind
++) {
189 for (ring_ind
= 0; ring_ind
< priv
->rx_ring_num
; ring_ind
++) {
190 ring
= priv
->rx_ring
[ring_ind
];
192 if (mlx4_en_prepare_rx_desc(priv
, ring
,
194 GFP_KERNEL
| __GFP_COLD
)) {
195 if (ring
->actual_size
< MLX4_EN_MIN_RX_SIZE
) {
196 en_err(priv
, "Failed to allocate enough rx buffers\n");
199 new_size
= rounddown_pow_of_two(ring
->actual_size
);
200 en_warn(priv
, "Only %d buffers allocated reducing ring size to %d\n",
201 ring
->actual_size
, new_size
);
212 for (ring_ind
= 0; ring_ind
< priv
->rx_ring_num
; ring_ind
++) {
213 ring
= priv
->rx_ring
[ring_ind
];
214 while (ring
->actual_size
> new_size
) {
217 mlx4_en_free_rx_desc(priv
, ring
, ring
->actual_size
);
224 static void mlx4_en_free_rx_buf(struct mlx4_en_priv
*priv
,
225 struct mlx4_en_rx_ring
*ring
)
229 en_dbg(DRV
, priv
, "Freeing Rx buf - cons:%d prod:%d\n",
230 ring
->cons
, ring
->prod
);
232 /* Unmap and free Rx buffers */
233 for (index
= 0; index
< ring
->size
; index
++) {
234 en_dbg(DRV
, priv
, "Processing descriptor:%d\n", index
);
235 mlx4_en_free_rx_desc(priv
, ring
, index
);
241 void mlx4_en_set_num_rx_rings(struct mlx4_en_dev
*mdev
)
246 struct mlx4_dev
*dev
= mdev
->dev
;
248 mlx4_foreach_port(i
, dev
, MLX4_PORT_TYPE_ETH
) {
249 num_of_eqs
= max_t(int, MIN_RX_RINGS
,
251 mlx4_get_eqs_per_port(mdev
->dev
, i
),
254 num_rx_rings
= mlx4_low_memory_profile() ? MIN_RX_RINGS
:
255 min_t(int, num_of_eqs
,
256 netif_get_num_default_rss_queues());
257 mdev
->profile
.prof
[i
].rx_ring_num
=
258 rounddown_pow_of_two(num_rx_rings
);
262 int mlx4_en_create_rx_ring(struct mlx4_en_priv
*priv
,
263 struct mlx4_en_rx_ring
**pring
,
264 u32 size
, u16 stride
, int node
)
266 struct mlx4_en_dev
*mdev
= priv
->mdev
;
267 struct mlx4_en_rx_ring
*ring
;
271 ring
= kzalloc_node(sizeof(*ring
), GFP_KERNEL
, node
);
273 ring
= kzalloc(sizeof(*ring
), GFP_KERNEL
);
275 en_err(priv
, "Failed to allocate RX ring structure\n");
283 ring
->size_mask
= size
- 1;
284 ring
->stride
= stride
;
285 ring
->log_stride
= ffs(ring
->stride
) - 1;
286 ring
->buf_size
= ring
->size
* ring
->stride
+ TXBB_SIZE
;
288 tmp
= size
* roundup_pow_of_two(MLX4_EN_MAX_RX_FRAGS
*
289 sizeof(struct mlx4_en_rx_alloc
));
290 ring
->rx_info
= vzalloc_node(tmp
, node
);
291 if (!ring
->rx_info
) {
292 ring
->rx_info
= vzalloc(tmp
);
293 if (!ring
->rx_info
) {
299 en_dbg(DRV
, priv
, "Allocated rx_info ring at addr:%p size:%d\n",
302 /* Allocate HW buffers on provided NUMA node */
303 set_dev_node(&mdev
->dev
->persist
->pdev
->dev
, node
);
304 err
= mlx4_alloc_hwq_res(mdev
->dev
, &ring
->wqres
, ring
->buf_size
);
305 set_dev_node(&mdev
->dev
->persist
->pdev
->dev
, mdev
->dev
->numa_node
);
309 ring
->buf
= ring
->wqres
.buf
.direct
.buf
;
311 ring
->hwtstamp_rx_filter
= priv
->hwtstamp_config
.rx_filter
;
317 vfree(ring
->rx_info
);
318 ring
->rx_info
= NULL
;
326 int mlx4_en_activate_rx_rings(struct mlx4_en_priv
*priv
)
328 struct mlx4_en_rx_ring
*ring
;
332 int stride
= roundup_pow_of_two(sizeof(struct mlx4_en_rx_desc
) +
333 DS_SIZE
* priv
->num_frags
);
335 for (ring_ind
= 0; ring_ind
< priv
->rx_ring_num
; ring_ind
++) {
336 ring
= priv
->rx_ring
[ring_ind
];
340 ring
->actual_size
= 0;
341 ring
->cqn
= priv
->rx_cq
[ring_ind
]->mcq
.cqn
;
343 ring
->stride
= stride
;
344 if (ring
->stride
<= TXBB_SIZE
) {
345 /* Stamp first unused send wqe */
346 __be32
*ptr
= (__be32
*)ring
->buf
;
347 __be32 stamp
= cpu_to_be32(1 << STAMP_SHIFT
);
349 /* Move pointer to start of rx section */
350 ring
->buf
+= TXBB_SIZE
;
353 ring
->log_stride
= ffs(ring
->stride
) - 1;
354 ring
->buf_size
= ring
->size
* ring
->stride
;
356 memset(ring
->buf
, 0, ring
->buf_size
);
357 mlx4_en_update_rx_prod_db(ring
);
359 /* Initialize all descriptors */
360 for (i
= 0; i
< ring
->size
; i
++)
361 mlx4_en_init_rx_desc(priv
, ring
, i
);
363 err
= mlx4_en_fill_rx_buffers(priv
);
367 for (ring_ind
= 0; ring_ind
< priv
->rx_ring_num
; ring_ind
++) {
368 ring
= priv
->rx_ring
[ring_ind
];
370 ring
->size_mask
= ring
->actual_size
- 1;
371 mlx4_en_update_rx_prod_db(ring
);
377 for (ring_ind
= 0; ring_ind
< priv
->rx_ring_num
; ring_ind
++)
378 mlx4_en_free_rx_buf(priv
, priv
->rx_ring
[ring_ind
]);
380 ring_ind
= priv
->rx_ring_num
- 1;
381 while (ring_ind
>= 0) {
382 if (priv
->rx_ring
[ring_ind
]->stride
<= TXBB_SIZE
)
383 priv
->rx_ring
[ring_ind
]->buf
-= TXBB_SIZE
;
389 /* We recover from out of memory by scheduling our napi poll
390 * function (mlx4_en_process_cq), which tries to allocate
391 * all missing RX buffers (call to mlx4_en_refill_rx_buffers).
393 void mlx4_en_recover_from_oom(struct mlx4_en_priv
*priv
)
400 for (ring
= 0; ring
< priv
->rx_ring_num
; ring
++) {
401 if (mlx4_en_is_ring_empty(priv
->rx_ring
[ring
])) {
403 napi_reschedule(&priv
->rx_cq
[ring
]->napi
);
409 /* When the rx ring is running in page-per-packet mode, a released frame can go
410 * directly into a small cache, to avoid unmapping or touching the page
411 * allocator. In bpf prog performance scenarios, buffers are either forwarded
412 * or dropped, never converted to skbs, so every page can come directly from
413 * this cache when it is sized to be a multiple of the napi budget.
415 bool mlx4_en_rx_recycle(struct mlx4_en_rx_ring
*ring
,
416 struct mlx4_en_rx_alloc
*frame
)
418 struct mlx4_en_page_cache
*cache
= &ring
->page_cache
;
420 if (cache
->index
>= MLX4_EN_CACHE_SIZE
)
423 cache
->buf
[cache
->index
].page
= frame
->page
;
424 cache
->buf
[cache
->index
].dma
= frame
->dma
;
429 void mlx4_en_destroy_rx_ring(struct mlx4_en_priv
*priv
,
430 struct mlx4_en_rx_ring
**pring
,
431 u32 size
, u16 stride
)
433 struct mlx4_en_dev
*mdev
= priv
->mdev
;
434 struct mlx4_en_rx_ring
*ring
= *pring
;
435 struct bpf_prog
*old_prog
;
437 old_prog
= rcu_dereference_protected(
439 lockdep_is_held(&mdev
->state_lock
));
441 bpf_prog_put(old_prog
);
442 mlx4_free_hwq_res(mdev
->dev
, &ring
->wqres
, size
* stride
+ TXBB_SIZE
);
443 vfree(ring
->rx_info
);
444 ring
->rx_info
= NULL
;
449 void mlx4_en_deactivate_rx_ring(struct mlx4_en_priv
*priv
,
450 struct mlx4_en_rx_ring
*ring
)
454 for (i
= 0; i
< ring
->page_cache
.index
; i
++) {
455 dma_unmap_page(priv
->ddev
, ring
->page_cache
.buf
[i
].dma
,
456 PAGE_SIZE
, priv
->dma_dir
);
457 put_page(ring
->page_cache
.buf
[i
].page
);
459 ring
->page_cache
.index
= 0;
460 mlx4_en_free_rx_buf(priv
, ring
);
461 if (ring
->stride
<= TXBB_SIZE
)
462 ring
->buf
-= TXBB_SIZE
;
466 static int mlx4_en_complete_rx_desc(struct mlx4_en_priv
*priv
,
467 struct mlx4_en_rx_alloc
*frags
,
471 const struct mlx4_en_frag_info
*frag_info
= priv
->frag_info
;
472 unsigned int truesize
= 0;
478 /* Collect used fragments while replacing them in the HW descriptors */
479 for (nr
= 0;; frags
++) {
480 frag_size
= min_t(int, length
, frag_info
->frag_size
);
487 dma_sync_single_range_for_cpu(priv
->ddev
, dma
, frags
->page_offset
,
488 frag_size
, priv
->dma_dir
);
490 __skb_fill_page_desc(skb
, nr
, page
, frags
->page_offset
,
493 truesize
+= frag_info
->frag_stride
;
494 if (frag_info
->frag_stride
== PAGE_SIZE
/ 2) {
495 frags
->page_offset
^= PAGE_SIZE
/ 2;
496 release
= page_count(page
) != 1 ||
497 page_is_pfmemalloc(page
) ||
498 page_to_nid(page
) != numa_mem_id();
500 u32 sz_align
= ALIGN(frag_size
, SMP_CACHE_BYTES
);
502 frags
->page_offset
+= sz_align
;
503 release
= frags
->page_offset
+ frag_info
->frag_size
> PAGE_SIZE
;
506 dma_unmap_page(priv
->ddev
, dma
, PAGE_SIZE
, priv
->dma_dir
);
518 skb
->truesize
+= truesize
;
524 __skb_frag_unref(skb_shinfo(skb
)->frags
+ nr
);
530 static struct sk_buff
*mlx4_en_rx_skb(struct mlx4_en_priv
*priv
,
531 struct mlx4_en_rx_alloc
*frags
,
539 skb
= netdev_alloc_skb(priv
->dev
, SMALL_PACKET_SIZE
+ NET_IP_ALIGN
);
540 if (unlikely(!skb
)) {
541 en_dbg(RX_ERR
, priv
, "Failed allocating skb\n");
544 skb_reserve(skb
, NET_IP_ALIGN
);
547 /* Get pointer to first fragment so we could copy the headers into the
548 * (linear part of the) skb */
549 va
= page_address(frags
[0].page
) + frags
[0].page_offset
;
551 if (length
<= SMALL_PACKET_SIZE
) {
552 /* We are copying all relevant data to the skb - temporarily
553 * sync buffers for the copy */
555 dma
= frags
[0].dma
+ frags
[0].page_offset
;
556 dma_sync_single_for_cpu(priv
->ddev
, dma
, length
,
558 skb_copy_to_linear_data(skb
, va
, length
);
561 unsigned int pull_len
;
563 /* Move relevant fragments to skb */
564 used_frags
= mlx4_en_complete_rx_desc(priv
, frags
,
566 if (unlikely(!used_frags
)) {
570 skb_shinfo(skb
)->nr_frags
= used_frags
;
572 pull_len
= eth_get_headlen(va
, SMALL_PACKET_SIZE
);
573 /* Copy headers into the skb linear buffer */
574 memcpy(skb
->data
, va
, pull_len
);
575 skb
->tail
+= pull_len
;
577 /* Skip headers in first fragment */
578 skb_shinfo(skb
)->frags
[0].page_offset
+= pull_len
;
580 /* Adjust size of first fragment */
581 skb_frag_size_sub(&skb_shinfo(skb
)->frags
[0], pull_len
);
582 skb
->data_len
= length
- pull_len
;
587 static void validate_loopback(struct mlx4_en_priv
*priv
, void *va
)
589 const unsigned char *data
= va
+ ETH_HLEN
;
592 for (i
= 0; i
< MLX4_LOOPBACK_TEST_PAYLOAD
; i
++) {
593 if (data
[i
] != (unsigned char)i
)
597 priv
->loopback_ok
= 1;
600 static bool mlx4_en_refill_rx_buffers(struct mlx4_en_priv
*priv
,
601 struct mlx4_en_rx_ring
*ring
)
603 u32 missing
= ring
->actual_size
- (ring
->prod
- ring
->cons
);
605 /* Try to batch allocations, but not too much. */
609 if (mlx4_en_prepare_rx_desc(priv
, ring
,
610 ring
->prod
& ring
->size_mask
,
611 GFP_ATOMIC
| __GFP_COLD
|
620 /* When hardware doesn't strip the vlan, we need to calculate the checksum
621 * over it and add it to the hardware's checksum calculation
623 static inline __wsum
get_fixed_vlan_csum(__wsum hw_checksum
,
624 struct vlan_hdr
*vlanh
)
626 return csum_add(hw_checksum
, *(__wsum
*)vlanh
);
629 /* Although the stack expects checksum which doesn't include the pseudo
630 * header, the HW adds it. To address that, we are subtracting the pseudo
631 * header checksum from the checksum value provided by the HW.
633 static void get_fixed_ipv4_csum(__wsum hw_checksum
, struct sk_buff
*skb
,
636 __u16 length_for_csum
= 0;
637 __wsum csum_pseudo_header
= 0;
639 length_for_csum
= (be16_to_cpu(iph
->tot_len
) - (iph
->ihl
<< 2));
640 csum_pseudo_header
= csum_tcpudp_nofold(iph
->saddr
, iph
->daddr
,
641 length_for_csum
, iph
->protocol
, 0);
642 skb
->csum
= csum_sub(hw_checksum
, csum_pseudo_header
);
645 #if IS_ENABLED(CONFIG_IPV6)
646 /* In IPv6 packets, besides subtracting the pseudo header checksum,
647 * we also compute/add the IP header checksum which
648 * is not added by the HW.
650 static int get_fixed_ipv6_csum(__wsum hw_checksum
, struct sk_buff
*skb
,
651 struct ipv6hdr
*ipv6h
)
653 __wsum csum_pseudo_hdr
= 0;
655 if (unlikely(ipv6h
->nexthdr
== IPPROTO_FRAGMENT
||
656 ipv6h
->nexthdr
== IPPROTO_HOPOPTS
))
658 hw_checksum
= csum_add(hw_checksum
, (__force __wsum
)htons(ipv6h
->nexthdr
));
660 csum_pseudo_hdr
= csum_partial(&ipv6h
->saddr
,
661 sizeof(ipv6h
->saddr
) + sizeof(ipv6h
->daddr
), 0);
662 csum_pseudo_hdr
= csum_add(csum_pseudo_hdr
, (__force __wsum
)ipv6h
->payload_len
);
663 csum_pseudo_hdr
= csum_add(csum_pseudo_hdr
, (__force __wsum
)ntohs(ipv6h
->nexthdr
));
665 skb
->csum
= csum_sub(hw_checksum
, csum_pseudo_hdr
);
666 skb
->csum
= csum_add(skb
->csum
, csum_partial(ipv6h
, sizeof(struct ipv6hdr
), 0));
670 static int check_csum(struct mlx4_cqe
*cqe
, struct sk_buff
*skb
, void *va
,
671 netdev_features_t dev_features
)
673 __wsum hw_checksum
= 0;
675 void *hdr
= (u8
*)va
+ sizeof(struct ethhdr
);
677 hw_checksum
= csum_unfold((__force __sum16
)cqe
->checksum
);
679 if (cqe
->vlan_my_qpn
& cpu_to_be32(MLX4_CQE_CVLAN_PRESENT_MASK
) &&
680 !(dev_features
& NETIF_F_HW_VLAN_CTAG_RX
)) {
681 hw_checksum
= get_fixed_vlan_csum(hw_checksum
, hdr
);
682 hdr
+= sizeof(struct vlan_hdr
);
685 if (cqe
->status
& cpu_to_be16(MLX4_CQE_STATUS_IPV4
))
686 get_fixed_ipv4_csum(hw_checksum
, skb
, hdr
);
687 #if IS_ENABLED(CONFIG_IPV6)
688 else if (cqe
->status
& cpu_to_be16(MLX4_CQE_STATUS_IPV6
))
689 if (unlikely(get_fixed_ipv6_csum(hw_checksum
, skb
, hdr
)))
695 int mlx4_en_process_rx_cq(struct net_device
*dev
, struct mlx4_en_cq
*cq
, int budget
)
697 struct mlx4_en_priv
*priv
= netdev_priv(dev
);
698 struct mlx4_en_dev
*mdev
= priv
->mdev
;
699 struct mlx4_cqe
*cqe
;
700 struct mlx4_en_rx_ring
*ring
= priv
->rx_ring
[cq
->ring
];
701 struct mlx4_en_rx_alloc
*frags
;
702 struct bpf_prog
*xdp_prog
;
703 int doorbell_pending
;
710 int factor
= priv
->cqe_factor
;
714 if (unlikely(!priv
->port_up
))
717 if (unlikely(budget
<= 0))
720 /* Protect accesses to: ring->xdp_prog, priv->mac_hash list */
722 xdp_prog
= rcu_dereference(ring
->xdp_prog
);
723 doorbell_pending
= 0;
725 /* We assume a 1:1 mapping between CQEs and Rx descriptors, so Rx
726 * descriptor offset can be deduced from the CQE index instead of
727 * reading 'cqe->index' */
728 index
= cq
->mcq
.cons_index
& ring
->size_mask
;
729 cqe
= mlx4_en_get_cqe(cq
->buf
, index
, priv
->cqe_size
) + factor
;
731 /* Process all completed CQEs */
732 while (XNOR(cqe
->owner_sr_opcode
& MLX4_CQE_OWNER_MASK
,
733 cq
->mcq
.cons_index
& cq
->size
)) {
736 frags
= ring
->rx_info
+ (index
<< priv
->log_rx_info
);
737 va
= page_address(frags
[0].page
) + frags
[0].page_offset
;
739 * make sure we read the CQE after we read the ownership bit
743 /* Drop packet on bad receive or bad checksum */
744 if (unlikely((cqe
->owner_sr_opcode
& MLX4_CQE_OPCODE_MASK
) ==
745 MLX4_CQE_OPCODE_ERROR
)) {
746 en_err(priv
, "CQE completed in error - vendor syndrom:%d syndrom:%d\n",
747 ((struct mlx4_err_cqe
*)cqe
)->vendor_err_syndrome
,
748 ((struct mlx4_err_cqe
*)cqe
)->syndrome
);
751 if (unlikely(cqe
->badfcs_enc
& MLX4_CQE_BAD_FCS
)) {
752 en_dbg(RX_ERR
, priv
, "Accepted frame with bad FCS\n");
756 /* Check if we need to drop the packet if SRIOV is not enabled
757 * and not performing the selftest or flb disabled
759 if (priv
->flags
& MLX4_EN_FLAG_RX_FILTER_NEEDED
) {
760 const struct ethhdr
*ethh
= va
;
762 /* Get pointer to first fragment since we haven't
763 * skb yet and cast it to ethhdr struct
765 dma
= frags
[0].dma
+ frags
[0].page_offset
;
766 dma_sync_single_for_cpu(priv
->ddev
, dma
, sizeof(*ethh
),
769 if (is_multicast_ether_addr(ethh
->h_dest
)) {
770 struct mlx4_mac_entry
*entry
;
771 struct hlist_head
*bucket
;
772 unsigned int mac_hash
;
774 /* Drop the packet, since HW loopback-ed it */
775 mac_hash
= ethh
->h_source
[MLX4_EN_MAC_HASH_IDX
];
776 bucket
= &priv
->mac_hash
[mac_hash
];
777 hlist_for_each_entry_rcu(entry
, bucket
, hlist
) {
778 if (ether_addr_equal_64bits(entry
->mac
,
785 if (unlikely(priv
->validate_loopback
)) {
786 validate_loopback(priv
, va
);
791 * Packet is OK - process it.
793 length
= be32_to_cpu(cqe
->byte_cnt
);
794 length
-= ring
->fcs_del
;
795 l2_tunnel
= (dev
->hw_enc_features
& NETIF_F_RXCSUM
) &&
796 (cqe
->vlan_my_qpn
& cpu_to_be32(MLX4_CQE_L2_TUNNEL
));
798 /* A bpf program gets first chance to drop the packet. It may
799 * read bytes but not past the end of the frag.
807 dma
= frags
[0].dma
+ frags
[0].page_offset
;
808 dma_sync_single_for_cpu(priv
->ddev
, dma
,
809 priv
->frag_info
[0].frag_size
,
812 xdp
.data_hard_start
= va
- frags
[0].page_offset
;
814 xdp
.data_end
= xdp
.data
+ length
;
815 orig_data
= xdp
.data
;
817 act
= bpf_prog_run_xdp(xdp_prog
, &xdp
);
819 if (xdp
.data
!= orig_data
) {
820 length
= xdp
.data_end
- xdp
.data
;
821 frags
[0].page_offset
= xdp
.data
-
830 if (likely(!mlx4_en_xmit_frame(ring
, frags
, dev
,
832 &doorbell_pending
))) {
833 frags
[0].page
= NULL
;
836 trace_xdp_exception(dev
, xdp_prog
, act
);
837 goto xdp_drop_no_cnt
; /* Drop on xmit failure */
839 bpf_warn_invalid_xdp_action(act
);
841 trace_xdp_exception(dev
, xdp_prog
, act
);
849 ring
->bytes
+= length
;
852 if (likely(dev
->features
& NETIF_F_RXCSUM
)) {
853 if (cqe
->status
& cpu_to_be16(MLX4_CQE_STATUS_TCP
|
854 MLX4_CQE_STATUS_UDP
)) {
855 if ((cqe
->status
& cpu_to_be16(MLX4_CQE_STATUS_IPOK
)) &&
856 cqe
->checksum
== cpu_to_be16(0xffff)) {
857 ip_summed
= CHECKSUM_UNNECESSARY
;
860 ip_summed
= CHECKSUM_NONE
;
864 if (priv
->flags
& MLX4_EN_FLAG_RX_CSUM_NON_TCP_UDP
&&
865 (cqe
->status
& cpu_to_be16(MLX4_CQE_STATUS_IPV4
|
866 MLX4_CQE_STATUS_IPV6
))) {
867 ip_summed
= CHECKSUM_COMPLETE
;
868 ring
->csum_complete
++;
870 ip_summed
= CHECKSUM_NONE
;
875 ip_summed
= CHECKSUM_NONE
;
879 /* This packet is eligible for GRO if it is:
880 * - DIX Ethernet (type interpretation)
882 * - without IP options
883 * - not an IP fragment
885 if (dev
->features
& NETIF_F_GRO
) {
886 struct sk_buff
*gro_skb
= napi_get_frags(&cq
->napi
);
890 nr
= mlx4_en_complete_rx_desc(priv
, frags
, gro_skb
,
895 if (ip_summed
== CHECKSUM_COMPLETE
) {
896 if (check_csum(cqe
, gro_skb
, va
,
898 ip_summed
= CHECKSUM_NONE
;
900 ring
->csum_complete
--;
904 skb_shinfo(gro_skb
)->nr_frags
= nr
;
905 gro_skb
->len
= length
;
906 gro_skb
->data_len
= length
;
907 gro_skb
->ip_summed
= ip_summed
;
909 if (l2_tunnel
&& ip_summed
== CHECKSUM_UNNECESSARY
)
910 gro_skb
->csum_level
= 1;
912 if ((cqe
->vlan_my_qpn
&
913 cpu_to_be32(MLX4_CQE_CVLAN_PRESENT_MASK
)) &&
914 (dev
->features
& NETIF_F_HW_VLAN_CTAG_RX
)) {
915 u16 vid
= be16_to_cpu(cqe
->sl_vid
);
917 __vlan_hwaccel_put_tag(gro_skb
, htons(ETH_P_8021Q
), vid
);
918 } else if ((be32_to_cpu(cqe
->vlan_my_qpn
) &
919 MLX4_CQE_SVLAN_PRESENT_MASK
) &&
920 (dev
->features
& NETIF_F_HW_VLAN_STAG_RX
)) {
921 __vlan_hwaccel_put_tag(gro_skb
,
923 be16_to_cpu(cqe
->sl_vid
));
926 if (dev
->features
& NETIF_F_RXHASH
)
927 skb_set_hash(gro_skb
,
928 be32_to_cpu(cqe
->immed_rss_invalid
),
929 (ip_summed
== CHECKSUM_UNNECESSARY
) ?
933 skb_record_rx_queue(gro_skb
, cq
->ring
);
935 if (ring
->hwtstamp_rx_filter
== HWTSTAMP_FILTER_ALL
) {
936 timestamp
= mlx4_en_get_cqe_ts(cqe
);
937 mlx4_en_fill_hwtstamps(mdev
,
938 skb_hwtstamps(gro_skb
),
942 napi_gro_frags(&cq
->napi
);
946 /* GRO not possible, complete processing here */
947 skb
= mlx4_en_rx_skb(priv
, frags
, length
);
948 if (unlikely(!skb
)) {
953 if (ip_summed
== CHECKSUM_COMPLETE
) {
954 if (check_csum(cqe
, skb
, va
, dev
->features
)) {
955 ip_summed
= CHECKSUM_NONE
;
956 ring
->csum_complete
--;
961 skb
->ip_summed
= ip_summed
;
962 skb
->protocol
= eth_type_trans(skb
, dev
);
963 skb_record_rx_queue(skb
, cq
->ring
);
965 if (l2_tunnel
&& ip_summed
== CHECKSUM_UNNECESSARY
)
968 if (dev
->features
& NETIF_F_RXHASH
)
970 be32_to_cpu(cqe
->immed_rss_invalid
),
971 (ip_summed
== CHECKSUM_UNNECESSARY
) ?
975 if ((be32_to_cpu(cqe
->vlan_my_qpn
) &
976 MLX4_CQE_CVLAN_PRESENT_MASK
) &&
977 (dev
->features
& NETIF_F_HW_VLAN_CTAG_RX
))
978 __vlan_hwaccel_put_tag(skb
, htons(ETH_P_8021Q
), be16_to_cpu(cqe
->sl_vid
));
979 else if ((be32_to_cpu(cqe
->vlan_my_qpn
) &
980 MLX4_CQE_SVLAN_PRESENT_MASK
) &&
981 (dev
->features
& NETIF_F_HW_VLAN_STAG_RX
))
982 __vlan_hwaccel_put_tag(skb
, htons(ETH_P_8021AD
),
983 be16_to_cpu(cqe
->sl_vid
));
985 if (ring
->hwtstamp_rx_filter
== HWTSTAMP_FILTER_ALL
) {
986 timestamp
= mlx4_en_get_cqe_ts(cqe
);
987 mlx4_en_fill_hwtstamps(mdev
, skb_hwtstamps(skb
),
991 napi_gro_receive(&cq
->napi
, skb
);
993 ++cq
->mcq
.cons_index
;
994 index
= (cq
->mcq
.cons_index
) & ring
->size_mask
;
995 cqe
= mlx4_en_get_cqe(cq
->buf
, index
, priv
->cqe_size
) + factor
;
996 if (++polled
== budget
)
1004 if (doorbell_pending
)
1005 mlx4_en_xmit_doorbell(priv
->tx_ring
[TX_XDP
][cq
->ring
]);
1007 mlx4_cq_set_ci(&cq
->mcq
);
1008 wmb(); /* ensure HW sees CQ consumer before we post new buffers */
1009 ring
->cons
= cq
->mcq
.cons_index
;
1011 AVG_PERF_COUNTER(priv
->pstats
.rx_coal_avg
, polled
);
1013 if (mlx4_en_refill_rx_buffers(priv
, ring
))
1014 mlx4_en_update_rx_prod_db(ring
);
1020 void mlx4_en_rx_irq(struct mlx4_cq
*mcq
)
1022 struct mlx4_en_cq
*cq
= container_of(mcq
, struct mlx4_en_cq
, mcq
);
1023 struct mlx4_en_priv
*priv
= netdev_priv(cq
->dev
);
1025 if (likely(priv
->port_up
))
1026 napi_schedule_irqoff(&cq
->napi
);
1028 mlx4_en_arm_cq(priv
, cq
);
1031 /* Rx CQ polling - called by NAPI */
1032 int mlx4_en_poll_rx_cq(struct napi_struct
*napi
, int budget
)
1034 struct mlx4_en_cq
*cq
= container_of(napi
, struct mlx4_en_cq
, napi
);
1035 struct net_device
*dev
= cq
->dev
;
1036 struct mlx4_en_priv
*priv
= netdev_priv(dev
);
1039 done
= mlx4_en_process_rx_cq(dev
, cq
, budget
);
1041 /* If we used up all the quota - we're probably not done yet... */
1042 if (done
== budget
) {
1043 const struct cpumask
*aff
;
1044 struct irq_data
*idata
;
1047 INC_PERF_COUNTER(priv
->pstats
.napi_quota
);
1049 cpu_curr
= smp_processor_id();
1050 idata
= irq_desc_get_irq_data(cq
->irq_desc
);
1051 aff
= irq_data_get_affinity_mask(idata
);
1053 if (likely(cpumask_test_cpu(cpu_curr
, aff
)))
1056 /* Current cpu is not according to smp_irq_affinity -
1057 * probably affinity changed. Need to stop this NAPI
1058 * poll, and restart it on the right CPU.
1059 * Try to avoid returning a too small value (like 0),
1060 * to not fool net_rx_action() and its netdev_budget
1066 if (napi_complete_done(napi
, done
))
1067 mlx4_en_arm_cq(priv
, cq
);
1071 void mlx4_en_calc_rx_buf(struct net_device
*dev
)
1073 struct mlx4_en_priv
*priv
= netdev_priv(dev
);
1074 int eff_mtu
= MLX4_EN_EFF_MTU(dev
->mtu
);
1077 /* bpf requires buffers to be set up as 1 packet per page.
1078 * This only works when num_frags == 1.
1080 if (priv
->tx_ring_num
[TX_XDP
]) {
1081 priv
->frag_info
[0].frag_size
= eff_mtu
;
1082 /* This will gain efficient xdp frame recycling at the
1083 * expense of more costly truesize accounting
1085 priv
->frag_info
[0].frag_stride
= PAGE_SIZE
;
1086 priv
->dma_dir
= PCI_DMA_BIDIRECTIONAL
;
1087 priv
->rx_headroom
= XDP_PACKET_HEADROOM
;
1090 int frag_size_max
= 2048, buf_size
= 0;
1092 /* should not happen, right ? */
1093 if (eff_mtu
> PAGE_SIZE
+ (MLX4_EN_MAX_RX_FRAGS
- 1) * 2048)
1094 frag_size_max
= PAGE_SIZE
;
1096 while (buf_size
< eff_mtu
) {
1097 int frag_stride
, frag_size
= eff_mtu
- buf_size
;
1100 if (i
< MLX4_EN_MAX_RX_FRAGS
- 1)
1101 frag_size
= min(frag_size
, frag_size_max
);
1103 priv
->frag_info
[i
].frag_size
= frag_size
;
1104 frag_stride
= ALIGN(frag_size
, SMP_CACHE_BYTES
);
1105 /* We can only pack 2 1536-bytes frames in on 4K page
1106 * Therefore, each frame would consume more bytes (truesize)
1108 nb
= PAGE_SIZE
/ frag_stride
;
1109 pad
= (PAGE_SIZE
- nb
* frag_stride
) / nb
;
1110 pad
&= ~(SMP_CACHE_BYTES
- 1);
1111 priv
->frag_info
[i
].frag_stride
= frag_stride
+ pad
;
1113 buf_size
+= frag_size
;
1116 priv
->dma_dir
= PCI_DMA_FROMDEVICE
;
1117 priv
->rx_headroom
= 0;
1120 priv
->num_frags
= i
;
1121 priv
->rx_skb_size
= eff_mtu
;
1122 priv
->log_rx_info
= ROUNDUP_LOG2(i
* sizeof(struct mlx4_en_rx_alloc
));
1124 en_dbg(DRV
, priv
, "Rx buffer scatter-list (effective-mtu:%d num_frags:%d):\n",
1125 eff_mtu
, priv
->num_frags
);
1126 for (i
= 0; i
< priv
->num_frags
; i
++) {
1128 " frag:%d - size:%d stride:%d\n",
1130 priv
->frag_info
[i
].frag_size
,
1131 priv
->frag_info
[i
].frag_stride
);
1135 /* RSS related functions */
1137 static int mlx4_en_config_rss_qp(struct mlx4_en_priv
*priv
, int qpn
,
1138 struct mlx4_en_rx_ring
*ring
,
1139 enum mlx4_qp_state
*state
,
1142 struct mlx4_en_dev
*mdev
= priv
->mdev
;
1143 struct mlx4_qp_context
*context
;
1146 context
= kmalloc(sizeof(*context
), GFP_KERNEL
);
1150 err
= mlx4_qp_alloc(mdev
->dev
, qpn
, qp
, GFP_KERNEL
);
1152 en_err(priv
, "Failed to allocate qp #%x\n", qpn
);
1155 qp
->event
= mlx4_en_sqp_event
;
1157 memset(context
, 0, sizeof *context
);
1158 mlx4_en_fill_qp_context(priv
, ring
->actual_size
, ring
->stride
, 0, 0,
1159 qpn
, ring
->cqn
, -1, context
);
1160 context
->db_rec_addr
= cpu_to_be64(ring
->wqres
.db
.dma
);
1162 /* Cancel FCS removal if FW allows */
1163 if (mdev
->dev
->caps
.flags
& MLX4_DEV_CAP_FLAG_FCS_KEEP
) {
1164 context
->param3
|= cpu_to_be32(1 << 29);
1165 if (priv
->dev
->features
& NETIF_F_RXFCS
)
1168 ring
->fcs_del
= ETH_FCS_LEN
;
1172 err
= mlx4_qp_to_ready(mdev
->dev
, &ring
->wqres
.mtt
, context
, qp
, state
);
1174 mlx4_qp_remove(mdev
->dev
, qp
);
1175 mlx4_qp_free(mdev
->dev
, qp
);
1177 mlx4_en_update_rx_prod_db(ring
);
1183 int mlx4_en_create_drop_qp(struct mlx4_en_priv
*priv
)
1188 err
= mlx4_qp_reserve_range(priv
->mdev
->dev
, 1, 1, &qpn
,
1189 MLX4_RESERVE_A0_QP
);
1191 en_err(priv
, "Failed reserving drop qpn\n");
1194 err
= mlx4_qp_alloc(priv
->mdev
->dev
, qpn
, &priv
->drop_qp
, GFP_KERNEL
);
1196 en_err(priv
, "Failed allocating drop qp\n");
1197 mlx4_qp_release_range(priv
->mdev
->dev
, qpn
, 1);
1204 void mlx4_en_destroy_drop_qp(struct mlx4_en_priv
*priv
)
1208 qpn
= priv
->drop_qp
.qpn
;
1209 mlx4_qp_remove(priv
->mdev
->dev
, &priv
->drop_qp
);
1210 mlx4_qp_free(priv
->mdev
->dev
, &priv
->drop_qp
);
1211 mlx4_qp_release_range(priv
->mdev
->dev
, qpn
, 1);
1214 /* Allocate rx qp's and configure them according to rss map */
1215 int mlx4_en_config_rss_steer(struct mlx4_en_priv
*priv
)
1217 struct mlx4_en_dev
*mdev
= priv
->mdev
;
1218 struct mlx4_en_rss_map
*rss_map
= &priv
->rss_map
;
1219 struct mlx4_qp_context context
;
1220 struct mlx4_rss_context
*rss_context
;
1223 u8 rss_mask
= (MLX4_RSS_IPV4
| MLX4_RSS_TCP_IPV4
| MLX4_RSS_IPV6
|
1229 en_dbg(DRV
, priv
, "Configuring rss steering\n");
1230 err
= mlx4_qp_reserve_range(mdev
->dev
, priv
->rx_ring_num
,
1232 &rss_map
->base_qpn
, 0);
1234 en_err(priv
, "Failed reserving %d qps\n", priv
->rx_ring_num
);
1238 for (i
= 0; i
< priv
->rx_ring_num
; i
++) {
1239 qpn
= rss_map
->base_qpn
+ i
;
1240 err
= mlx4_en_config_rss_qp(priv
, qpn
, priv
->rx_ring
[i
],
1249 /* Configure RSS indirection qp */
1250 err
= mlx4_qp_alloc(mdev
->dev
, priv
->base_qpn
, &rss_map
->indir_qp
, GFP_KERNEL
);
1252 en_err(priv
, "Failed to allocate RSS indirection QP\n");
1255 rss_map
->indir_qp
.event
= mlx4_en_sqp_event
;
1256 mlx4_en_fill_qp_context(priv
, 0, 0, 0, 1, priv
->base_qpn
,
1257 priv
->rx_ring
[0]->cqn
, -1, &context
);
1259 if (!priv
->prof
->rss_rings
|| priv
->prof
->rss_rings
> priv
->rx_ring_num
)
1260 rss_rings
= priv
->rx_ring_num
;
1262 rss_rings
= priv
->prof
->rss_rings
;
1264 ptr
= ((void *) &context
) + offsetof(struct mlx4_qp_context
, pri_path
)
1265 + MLX4_RSS_OFFSET_IN_QPC_PRI_PATH
;
1267 rss_context
->base_qpn
= cpu_to_be32(ilog2(rss_rings
) << 24 |
1268 (rss_map
->base_qpn
));
1269 rss_context
->default_qpn
= cpu_to_be32(rss_map
->base_qpn
);
1270 if (priv
->mdev
->profile
.udp_rss
) {
1271 rss_mask
|= MLX4_RSS_UDP_IPV4
| MLX4_RSS_UDP_IPV6
;
1272 rss_context
->base_qpn_udp
= rss_context
->default_qpn
;
1275 if (mdev
->dev
->caps
.tunnel_offload_mode
== MLX4_TUNNEL_OFFLOAD_MODE_VXLAN
) {
1276 en_info(priv
, "Setting RSS context tunnel type to RSS on inner headers\n");
1277 rss_mask
|= MLX4_RSS_BY_INNER_HEADERS
;
1280 rss_context
->flags
= rss_mask
;
1281 rss_context
->hash_fn
= MLX4_RSS_HASH_TOP
;
1282 if (priv
->rss_hash_fn
== ETH_RSS_HASH_XOR
) {
1283 rss_context
->hash_fn
= MLX4_RSS_HASH_XOR
;
1284 } else if (priv
->rss_hash_fn
== ETH_RSS_HASH_TOP
) {
1285 rss_context
->hash_fn
= MLX4_RSS_HASH_TOP
;
1286 memcpy(rss_context
->rss_key
, priv
->rss_key
,
1287 MLX4_EN_RSS_KEY_SIZE
);
1289 en_err(priv
, "Unknown RSS hash function requested\n");
1293 err
= mlx4_qp_to_ready(mdev
->dev
, &priv
->res
.mtt
, &context
,
1294 &rss_map
->indir_qp
, &rss_map
->indir_state
);
1301 mlx4_qp_modify(mdev
->dev
, NULL
, rss_map
->indir_state
,
1302 MLX4_QP_STATE_RST
, NULL
, 0, 0, &rss_map
->indir_qp
);
1303 mlx4_qp_remove(mdev
->dev
, &rss_map
->indir_qp
);
1304 mlx4_qp_free(mdev
->dev
, &rss_map
->indir_qp
);
1306 for (i
= 0; i
< good_qps
; i
++) {
1307 mlx4_qp_modify(mdev
->dev
, NULL
, rss_map
->state
[i
],
1308 MLX4_QP_STATE_RST
, NULL
, 0, 0, &rss_map
->qps
[i
]);
1309 mlx4_qp_remove(mdev
->dev
, &rss_map
->qps
[i
]);
1310 mlx4_qp_free(mdev
->dev
, &rss_map
->qps
[i
]);
1312 mlx4_qp_release_range(mdev
->dev
, rss_map
->base_qpn
, priv
->rx_ring_num
);
1316 void mlx4_en_release_rss_steer(struct mlx4_en_priv
*priv
)
1318 struct mlx4_en_dev
*mdev
= priv
->mdev
;
1319 struct mlx4_en_rss_map
*rss_map
= &priv
->rss_map
;
1322 mlx4_qp_modify(mdev
->dev
, NULL
, rss_map
->indir_state
,
1323 MLX4_QP_STATE_RST
, NULL
, 0, 0, &rss_map
->indir_qp
);
1324 mlx4_qp_remove(mdev
->dev
, &rss_map
->indir_qp
);
1325 mlx4_qp_free(mdev
->dev
, &rss_map
->indir_qp
);
1327 for (i
= 0; i
< priv
->rx_ring_num
; i
++) {
1328 mlx4_qp_modify(mdev
->dev
, NULL
, rss_map
->state
[i
],
1329 MLX4_QP_STATE_RST
, NULL
, 0, 0, &rss_map
->qps
[i
]);
1330 mlx4_qp_remove(mdev
->dev
, &rss_map
->qps
[i
]);
1331 mlx4_qp_free(mdev
->dev
, &rss_map
->qps
[i
]);
1333 mlx4_qp_release_range(mdev
->dev
, rss_map
->base_qpn
, priv
->rx_ring_num
);