]>
git.proxmox.com Git - mirror_ubuntu-focal-kernel.git/blob - drivers/net/ethernet/huawei/hinic/hinic_rx.c
b6d218768ec1e3cff1d715e2c8278c623dbfdf6c
2 * Huawei HiNIC PCI Express Linux driver
3 * Copyright(c) 2017 Huawei Technologies Co., Ltd
5 * This program is free software; you can redistribute it and/or modify it
6 * under the terms and conditions of the GNU General Public License,
7 * version 2, as published by the Free Software Foundation.
9 * This program is distributed in the hope it will be useful, but WITHOUT
10 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
11 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
16 #include <linux/kernel.h>
17 #include <linux/types.h>
18 #include <linux/errno.h>
19 #include <linux/pci.h>
20 #include <linux/device.h>
21 #include <linux/netdevice.h>
22 #include <linux/etherdevice.h>
23 #include <linux/u64_stats_sync.h>
24 #include <linux/slab.h>
25 #include <linux/interrupt.h>
26 #include <linux/skbuff.h>
27 #include <linux/dma-mapping.h>
28 #include <linux/prefetch.h>
29 #include <linux/cpumask.h>
30 #include <asm/barrier.h>
32 #include "hinic_common.h"
33 #include "hinic_hw_if.h"
34 #include "hinic_hw_wqe.h"
35 #include "hinic_hw_wq.h"
36 #include "hinic_hw_qp.h"
37 #include "hinic_hw_dev.h"
39 #include "hinic_dev.h"
41 #define RX_IRQ_NO_PENDING 0
42 #define RX_IRQ_NO_COALESC 0
43 #define RX_IRQ_NO_LLI_TIMER 0
44 #define RX_IRQ_NO_CREDIT 0
45 #define RX_IRQ_NO_RESEND_TIMER 0
46 #define HINIC_RX_BUFFER_WRITE 16
49 * hinic_rxq_clean_stats - Clean the statistics of specific queue
50 * @rxq: Logical Rx Queue
52 void hinic_rxq_clean_stats(struct hinic_rxq
*rxq
)
54 struct hinic_rxq_stats
*rxq_stats
= &rxq
->rxq_stats
;
56 u64_stats_update_begin(&rxq_stats
->syncp
);
59 u64_stats_update_end(&rxq_stats
->syncp
);
63 * hinic_rxq_get_stats - get statistics of Rx Queue
64 * @rxq: Logical Rx Queue
65 * @stats: return updated stats here
67 void hinic_rxq_get_stats(struct hinic_rxq
*rxq
, struct hinic_rxq_stats
*stats
)
69 struct hinic_rxq_stats
*rxq_stats
= &rxq
->rxq_stats
;
72 u64_stats_update_begin(&stats
->syncp
);
74 start
= u64_stats_fetch_begin(&rxq_stats
->syncp
);
75 stats
->pkts
= rxq_stats
->pkts
;
76 stats
->bytes
= rxq_stats
->bytes
;
77 } while (u64_stats_fetch_retry(&rxq_stats
->syncp
, start
));
78 u64_stats_update_end(&stats
->syncp
);
82 * rxq_stats_init - Initialize the statistics of specific queue
83 * @rxq: Logical Rx Queue
85 static void rxq_stats_init(struct hinic_rxq
*rxq
)
87 struct hinic_rxq_stats
*rxq_stats
= &rxq
->rxq_stats
;
89 u64_stats_init(&rxq_stats
->syncp
);
90 hinic_rxq_clean_stats(rxq
);
93 static void rx_csum(struct hinic_rxq
*rxq
, u16 cons_idx
,
96 struct net_device
*netdev
= rxq
->netdev
;
97 struct hinic_rq_cqe
*cqe
;
103 cqe
= rq
->cqe
[cons_idx
];
104 status
= be32_to_cpu(cqe
->status
);
105 csum_err
= HINIC_RQ_CQE_STATUS_GET(status
, CSUM_ERR
);
107 if (!(netdev
->features
& NETIF_F_RXCSUM
))
111 skb
->ip_summed
= CHECKSUM_UNNECESSARY
;
113 skb
->ip_summed
= CHECKSUM_NONE
;
116 * rx_alloc_skb - allocate skb and map it to dma address
118 * @dma_addr: returned dma address for the skb
122 static struct sk_buff
*rx_alloc_skb(struct hinic_rxq
*rxq
,
123 dma_addr_t
*dma_addr
)
125 struct hinic_dev
*nic_dev
= netdev_priv(rxq
->netdev
);
126 struct hinic_hwdev
*hwdev
= nic_dev
->hwdev
;
127 struct hinic_hwif
*hwif
= hwdev
->hwif
;
128 struct pci_dev
*pdev
= hwif
->pdev
;
133 skb
= netdev_alloc_skb_ip_align(rxq
->netdev
, rxq
->rq
->buf_sz
);
135 netdev_err(rxq
->netdev
, "Failed to allocate Rx SKB\n");
139 addr
= dma_map_single(&pdev
->dev
, skb
->data
, rxq
->rq
->buf_sz
,
141 err
= dma_mapping_error(&pdev
->dev
, addr
);
143 dev_err(&pdev
->dev
, "Failed to map Rx DMA, err = %d\n", err
);
151 dev_kfree_skb_any(skb
);
156 * rx_unmap_skb - unmap the dma address of the skb
158 * @dma_addr: dma address of the skb
160 static void rx_unmap_skb(struct hinic_rxq
*rxq
, dma_addr_t dma_addr
)
162 struct hinic_dev
*nic_dev
= netdev_priv(rxq
->netdev
);
163 struct hinic_hwdev
*hwdev
= nic_dev
->hwdev
;
164 struct hinic_hwif
*hwif
= hwdev
->hwif
;
165 struct pci_dev
*pdev
= hwif
->pdev
;
167 dma_unmap_single(&pdev
->dev
, dma_addr
, rxq
->rq
->buf_sz
,
172 * rx_free_skb - unmap and free skb
175 * @dma_addr: dma address of the skb
177 static void rx_free_skb(struct hinic_rxq
*rxq
, struct sk_buff
*skb
,
180 rx_unmap_skb(rxq
, dma_addr
);
181 dev_kfree_skb_any(skb
);
185 * rx_alloc_pkts - allocate pkts in rx queue
188 * Return number of skbs allocated
190 static int rx_alloc_pkts(struct hinic_rxq
*rxq
)
192 struct hinic_dev
*nic_dev
= netdev_priv(rxq
->netdev
);
193 struct hinic_rq_wqe
*rq_wqe
;
194 unsigned int free_wqebbs
;
195 struct hinic_sge sge
;
201 free_wqebbs
= hinic_get_rq_free_wqebbs(rxq
->rq
);
203 /* Limit the allocation chunks */
204 if (free_wqebbs
> nic_dev
->rx_weight
)
205 free_wqebbs
= nic_dev
->rx_weight
;
207 for (i
= 0; i
< free_wqebbs
; i
++) {
208 skb
= rx_alloc_skb(rxq
, &dma_addr
);
210 netdev_err(rxq
->netdev
, "Failed to alloc Rx skb\n");
214 hinic_set_sge(&sge
, dma_addr
, skb
->len
);
216 rq_wqe
= hinic_rq_get_wqe(rxq
->rq
, HINIC_RQ_WQE_SIZE
,
219 rx_free_skb(rxq
, skb
, dma_addr
);
223 hinic_rq_prepare_wqe(rxq
->rq
, prod_idx
, rq_wqe
, &sge
);
225 hinic_rq_write_wqe(rxq
->rq
, prod_idx
, rq_wqe
, skb
);
230 wmb(); /* write all the wqes before update PI */
232 hinic_rq_update(rxq
->rq
, prod_idx
);
239 * free_all_rx_skbs - free all skbs in rx queue
242 static void free_all_rx_skbs(struct hinic_rxq
*rxq
)
244 struct hinic_rq
*rq
= rxq
->rq
;
245 struct hinic_hw_wqe
*hw_wqe
;
246 struct hinic_sge sge
;
249 while ((hw_wqe
= hinic_read_wqe(rq
->wq
, HINIC_RQ_WQE_SIZE
, &ci
))) {
253 hinic_rq_get_sge(rq
, &hw_wqe
->rq_wqe
, ci
, &sge
);
255 hinic_put_wqe(rq
->wq
, HINIC_RQ_WQE_SIZE
);
257 rx_free_skb(rxq
, rq
->saved_skb
[ci
], hinic_sge_to_dma(&sge
));
262 * rx_recv_jumbo_pkt - Rx handler for jumbo pkt
264 * @head_skb: the first skb in the list
265 * @left_pkt_len: left size of the pkt exclude head skb
266 * @ci: consumer index
268 * Return number of wqes that used for the left of the pkt
270 static int rx_recv_jumbo_pkt(struct hinic_rxq
*rxq
, struct sk_buff
*head_skb
,
271 unsigned int left_pkt_len
, u16 ci
)
273 struct sk_buff
*skb
, *curr_skb
= head_skb
;
274 struct hinic_rq_wqe
*rq_wqe
;
275 unsigned int curr_len
;
276 struct hinic_sge sge
;
279 while (left_pkt_len
> 0) {
280 rq_wqe
= hinic_rq_read_next_wqe(rxq
->rq
, HINIC_RQ_WQE_SIZE
,
285 hinic_rq_get_sge(rxq
->rq
, rq_wqe
, ci
, &sge
);
287 rx_unmap_skb(rxq
, hinic_sge_to_dma(&sge
));
291 curr_len
= (left_pkt_len
> HINIC_RX_BUF_SZ
) ? HINIC_RX_BUF_SZ
:
294 left_pkt_len
-= curr_len
;
296 __skb_put(skb
, curr_len
);
298 if (curr_skb
== head_skb
)
299 skb_shinfo(head_skb
)->frag_list
= skb
;
301 curr_skb
->next
= skb
;
303 head_skb
->len
+= skb
->len
;
304 head_skb
->data_len
+= skb
->len
;
305 head_skb
->truesize
+= skb
->truesize
;
314 * rxq_recv - Rx handler
316 * @budget: maximum pkts to process
318 * Return number of pkts received
320 static int rxq_recv(struct hinic_rxq
*rxq
, int budget
)
322 struct hinic_qp
*qp
= container_of(rxq
->rq
, struct hinic_qp
, rq
);
323 u64 pkt_len
= 0, rx_bytes
= 0;
324 struct hinic_rq_wqe
*rq_wqe
;
325 unsigned int free_wqebbs
;
326 int num_wqes
, pkts
= 0;
327 struct hinic_sge sge
;
331 while (pkts
< budget
) {
334 rq_wqe
= hinic_rq_read_wqe(rxq
->rq
, HINIC_RQ_WQE_SIZE
, &skb
,
339 hinic_rq_get_sge(rxq
->rq
, rq_wqe
, ci
, &sge
);
341 rx_unmap_skb(rxq
, hinic_sge_to_dma(&sge
));
343 rx_csum(rxq
, ci
, skb
);
349 if (pkt_len
<= HINIC_RX_BUF_SZ
) {
350 __skb_put(skb
, pkt_len
);
352 __skb_put(skb
, HINIC_RX_BUF_SZ
);
353 num_wqes
= rx_recv_jumbo_pkt(rxq
, skb
, pkt_len
-
354 HINIC_RX_BUF_SZ
, ci
);
357 hinic_rq_put_wqe(rxq
->rq
, ci
,
358 (num_wqes
+ 1) * HINIC_RQ_WQE_SIZE
);
360 skb_record_rx_queue(skb
, qp
->q_id
);
361 skb
->protocol
= eth_type_trans(skb
, rxq
->netdev
);
363 napi_gro_receive(&rxq
->napi
, skb
);
369 free_wqebbs
= hinic_get_rq_free_wqebbs(rxq
->rq
);
370 if (free_wqebbs
> HINIC_RX_BUFFER_WRITE
)
373 u64_stats_update_begin(&rxq
->rxq_stats
.syncp
);
374 rxq
->rxq_stats
.pkts
+= pkts
;
375 rxq
->rxq_stats
.bytes
+= rx_bytes
;
376 u64_stats_update_end(&rxq
->rxq_stats
.syncp
);
381 static int rx_poll(struct napi_struct
*napi
, int budget
)
383 struct hinic_rxq
*rxq
= container_of(napi
, struct hinic_rxq
, napi
);
384 struct hinic_dev
*nic_dev
= netdev_priv(rxq
->netdev
);
385 struct hinic_rq
*rq
= rxq
->rq
;
388 pkts
= rxq_recv(rxq
, budget
);
393 hinic_hwdev_set_msix_state(nic_dev
->hwdev
,
400 static void rx_add_napi(struct hinic_rxq
*rxq
)
402 struct hinic_dev
*nic_dev
= netdev_priv(rxq
->netdev
);
404 netif_napi_add(rxq
->netdev
, &rxq
->napi
, rx_poll
, nic_dev
->rx_weight
);
405 napi_enable(&rxq
->napi
);
408 static void rx_del_napi(struct hinic_rxq
*rxq
)
410 napi_disable(&rxq
->napi
);
411 netif_napi_del(&rxq
->napi
);
414 static irqreturn_t
rx_irq(int irq
, void *data
)
416 struct hinic_rxq
*rxq
= (struct hinic_rxq
*)data
;
417 struct hinic_rq
*rq
= rxq
->rq
;
418 struct hinic_dev
*nic_dev
;
420 /* Disable the interrupt until napi will be completed */
421 nic_dev
= netdev_priv(rxq
->netdev
);
422 hinic_hwdev_set_msix_state(nic_dev
->hwdev
,
426 nic_dev
= netdev_priv(rxq
->netdev
);
427 hinic_hwdev_msix_cnt_set(nic_dev
->hwdev
, rq
->msix_entry
);
429 napi_schedule(&rxq
->napi
);
433 static int rx_request_irq(struct hinic_rxq
*rxq
)
435 struct hinic_dev
*nic_dev
= netdev_priv(rxq
->netdev
);
436 struct hinic_hwdev
*hwdev
= nic_dev
->hwdev
;
437 struct hinic_rq
*rq
= rxq
->rq
;
444 hinic_hwdev_msix_set(hwdev
, rq
->msix_entry
,
445 RX_IRQ_NO_PENDING
, RX_IRQ_NO_COALESC
,
446 RX_IRQ_NO_LLI_TIMER
, RX_IRQ_NO_CREDIT
,
447 RX_IRQ_NO_RESEND_TIMER
);
449 err
= request_irq(rq
->irq
, rx_irq
, 0, rxq
->irq_name
, rxq
);
455 qp
= container_of(rq
, struct hinic_qp
, rq
);
456 cpumask_set_cpu(qp
->q_id
% num_online_cpus(), &mask
);
457 return irq_set_affinity_hint(rq
->irq
, &mask
);
460 static void rx_free_irq(struct hinic_rxq
*rxq
)
462 struct hinic_rq
*rq
= rxq
->rq
;
464 irq_set_affinity_hint(rq
->irq
, NULL
);
465 free_irq(rq
->irq
, rxq
);
470 * hinic_init_rxq - Initialize the Rx Queue
471 * @rxq: Logical Rx Queue
472 * @rq: Hardware Rx Queue to connect the Logical queue with
473 * @netdev: network device to connect the Logical queue with
475 * Return 0 - Success, negative - Failure
477 int hinic_init_rxq(struct hinic_rxq
*rxq
, struct hinic_rq
*rq
,
478 struct net_device
*netdev
)
480 struct hinic_qp
*qp
= container_of(rq
, struct hinic_qp
, rq
);
481 int err
, pkts
, irqname_len
;
483 rxq
->netdev
= netdev
;
488 irqname_len
= snprintf(NULL
, 0, "hinic_rxq%d", qp
->q_id
) + 1;
489 rxq
->irq_name
= devm_kzalloc(&netdev
->dev
, irqname_len
, GFP_KERNEL
);
493 sprintf(rxq
->irq_name
, "hinic_rxq%d", qp
->q_id
);
495 pkts
= rx_alloc_pkts(rxq
);
501 err
= rx_request_irq(rxq
);
503 netdev_err(netdev
, "Failed to request Rx irq\n");
511 free_all_rx_skbs(rxq
);
512 devm_kfree(&netdev
->dev
, rxq
->irq_name
);
517 * hinic_clean_rxq - Clean the Rx Queue
518 * @rxq: Logical Rx Queue
520 void hinic_clean_rxq(struct hinic_rxq
*rxq
)
522 struct net_device
*netdev
= rxq
->netdev
;
526 free_all_rx_skbs(rxq
);
527 devm_kfree(&netdev
->dev
, rxq
->irq_name
);